From d49c45f1f7eb039245704a432353098a91d27b87 Mon Sep 17 00:00:00 2001 From: Ivan Ilves Date: Sat, 9 Jun 2018 07:32:12 +0200 Subject: [PATCH] chore: Add "/vendor" to version control --- .gitignore | 3 - .../github.com/Microsoft/go-winio/.gitignore | 1 + vendor/github.com/Microsoft/go-winio/LICENSE | 22 + .../github.com/Microsoft/go-winio/README.md | 22 + .../Microsoft/go-winio/archive/tar/LICENSE | 27 + .../Microsoft/go-winio/archive/tar/common.go | 344 + .../go-winio/archive/tar/example_test.go | 80 + .../Microsoft/go-winio/archive/tar/reader.go | 1002 + .../go-winio/archive/tar/reader_test.go | 1125 + .../go-winio/archive/tar/stat_atim.go | 20 + .../go-winio/archive/tar/stat_atimespec.go | 20 + .../go-winio/archive/tar/stat_unix.go | 32 + .../go-winio/archive/tar/tar_test.go | 325 + .../archive/tar/testdata/gnu-multi-hdrs.tar | Bin 0 -> 4608 bytes .../go-winio/archive/tar/testdata/gnu.tar | Bin 0 -> 3072 bytes .../archive/tar/testdata/hardlink.tar | Bin 0 -> 2560 bytes .../archive/tar/testdata/hdr-only.tar | Bin 0 -> 10240 bytes .../archive/tar/testdata/issue10968.tar | Bin 0 -> 512 bytes .../archive/tar/testdata/issue11169.tar | Bin 0 -> 602 bytes .../archive/tar/testdata/issue12435.tar | Bin 0 -> 512 bytes .../archive/tar/testdata/neg-size.tar | Bin 0 -> 512 bytes .../go-winio/archive/tar/testdata/nil-uid.tar | Bin 0 -> 1024 bytes .../archive/tar/testdata/pax-multi-hdrs.tar | Bin 0 -> 4608 bytes .../archive/tar/testdata/pax-path-hdr.tar | Bin 0 -> 1024 bytes .../go-winio/archive/tar/testdata/pax.tar | Bin 0 -> 10240 bytes .../go-winio/archive/tar/testdata/small.txt | 1 + .../go-winio/archive/tar/testdata/small2.txt | 1 + .../archive/tar/testdata/sparse-formats.tar | Bin 0 -> 17920 bytes .../go-winio/archive/tar/testdata/star.tar | Bin 0 -> 3072 bytes .../archive/tar/testdata/ustar-file-reg.tar | Bin 0 -> 1536 bytes .../go-winio/archive/tar/testdata/ustar.tar | Bin 0 -> 2048 bytes .../go-winio/archive/tar/testdata/v7.tar | Bin 0 -> 3584 bytes .../archive/tar/testdata/writer-big-long.tar | Bin 0 -> 4096 bytes .../archive/tar/testdata/writer-big.tar | Bin 0 -> 4096 bytes .../go-winio/archive/tar/testdata/writer.tar | Bin 0 -> 3584 bytes .../go-winio/archive/tar/testdata/xattrs.tar | Bin 0 -> 5120 bytes .../Microsoft/go-winio/archive/tar/writer.go | 444 + .../go-winio/archive/tar/writer_test.go | 739 + .../github.com/Microsoft/go-winio/backup.go | 280 + .../Microsoft/go-winio/backup_test.go | 255 + .../Microsoft/go-winio/backuptar/noop.go | 4 + .../Microsoft/go-winio/backuptar/tar.go | 439 + .../Microsoft/go-winio/backuptar/tar_test.go | 84 + vendor/github.com/Microsoft/go-winio/ea.go | 137 + .../github.com/Microsoft/go-winio/ea_test.go | 89 + vendor/github.com/Microsoft/go-winio/file.go | 307 + .../github.com/Microsoft/go-winio/fileinfo.go | 60 + vendor/github.com/Microsoft/go-winio/pipe.go | 424 + .../Microsoft/go-winio/pipe_test.go | 453 + .../Microsoft/go-winio/privilege.go | 202 + .../Microsoft/go-winio/privileges_test.go | 17 + .../github.com/Microsoft/go-winio/reparse.go | 128 + vendor/github.com/Microsoft/go-winio/sd.go | 98 + .../github.com/Microsoft/go-winio/sd_test.go | 26 + .../github.com/Microsoft/go-winio/syscall.go | 3 + .../go-winio/vhd/mksyscall_windows.go | 901 + .../github.com/Microsoft/go-winio/vhd/vhd.go | 82 + .../github.com/Microsoft/go-winio/vhd/zvhd.go | 64 + .../Microsoft/go-winio/wim/decompress.go | 138 + .../Microsoft/go-winio/wim/lzx/lzx.go | 606 + .../go-winio/wim/validate/validate.go | 51 + .../github.com/Microsoft/go-winio/wim/wim.go | 866 + .../Microsoft/go-winio/zsyscall_windows.go | 520 + vendor/github.com/davecgh/go-spew/.gitignore | 22 + vendor/github.com/davecgh/go-spew/.travis.yml | 14 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + vendor/github.com/davecgh/go-spew/README.md | 205 + .../github.com/davecgh/go-spew/cov_report.sh | 22 + .../github.com/davecgh/go-spew/spew/bypass.go | 152 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../davecgh/go-spew/spew/common_test.go | 298 + .../github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + .../github.com/davecgh/go-spew/spew/dump.go | 509 + .../davecgh/go-spew/spew/dump_test.go | 1042 + .../davecgh/go-spew/spew/dumpcgo_test.go | 99 + .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 + .../davecgh/go-spew/spew/example_test.go | 226 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../davecgh/go-spew/spew/format_test.go | 1558 ++ .../davecgh/go-spew/spew/internal_test.go | 87 + .../go-spew/spew/internalunsafe_test.go | 102 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + .../davecgh/go-spew/spew/spew_test.go | 320 + .../davecgh/go-spew/spew/testdata/dumpcgo.go | 82 + .../davecgh/go-spew/test_coverage.txt | 61 + .../github.com/docker/distribution/.gitignore | 37 + .../github.com/docker/distribution/.mailmap | 19 + vendor/github.com/docker/distribution/AUTHORS | 182 + .../docker/distribution/BUILDING.md | 119 + .../docker/distribution/CHANGELOG.md | 114 + .../docker/distribution/CONTRIBUTING.md | 140 + .../github.com/docker/distribution/Dockerfile | 18 + .../docker/distribution/Godeps/Godeps.json | 458 + .../docker/distribution/Godeps/Readme | 5 + vendor/github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/MAINTAINERS | 58 + .../github.com/docker/distribution/Makefile | 109 + .../github.com/docker/distribution/README.md | 131 + .../docker/distribution/RELEASE-CHECKLIST.md | 36 + .../github.com/docker/distribution/ROADMAP.md | 267 + .../github.com/docker/distribution/blobs.go | 257 + .../github.com/docker/distribution/circle.yml | 93 + .../docker/distribution/cmd/digest/main.go | 97 + .../registry-api-descriptor-template/main.go | 131 + .../cmd/registry/config-cache.yml | 55 + .../distribution/cmd/registry/config-dev.yml | 66 + .../cmd/registry/config-example.yml | 18 + .../docker/distribution/cmd/registry/main.go | 25 + .../configuration/configuration.go | 643 + .../configuration/configuration_test.go | 529 + .../distribution/configuration/parser.go | 283 + .../docker/distribution/context/context.go | 85 + .../docker/distribution/context/doc.go | 89 + .../docker/distribution/context/http.go | 366 + .../docker/distribution/context/http_test.go | 285 + .../docker/distribution/context/logger.go | 116 + .../docker/distribution/context/trace.go | 104 + .../docker/distribution/context/trace_test.go | 85 + .../docker/distribution/context/util.go | 24 + .../docker/distribution/context/version.go | 16 + .../distribution/context/version_test.go | 19 + .../distribution/contrib/apache/README.MD | 36 + .../distribution/contrib/apache/apache.conf | 127 + .../distribution/contrib/compose/README.md | 147 + .../contrib/compose/docker-compose.yml | 15 + .../contrib/compose/nginx/Dockerfile | 6 + .../compose/nginx/docker-registry-v2.conf | 6 + .../compose/nginx/docker-registry.conf | 7 + .../contrib/compose/nginx/nginx.conf | 27 + .../contrib/compose/nginx/registry.conf | 41 + .../contrib/docker-integration/Dockerfile | 9 + .../contrib/docker-integration/README.md | 63 + .../docker-integration/docker-compose.yml | 91 + .../contrib/docker-integration/golem.conf | 18 + .../contrib/docker-integration/helpers.bash | 101 + .../docker-integration/install_certs.sh | 50 + .../malevolent-certs/localregistry.cert | 19 + .../malevolent-certs/localregistry.key | 27 + .../docker-integration/malevolent.bats | 192 + .../docker-integration/nginx/Dockerfile | 10 + .../nginx/docker-registry-v2.conf | 6 + .../docker-integration/nginx/nginx.conf | 61 + .../nginx/registry-basic.conf | 8 + .../nginx/registry-noauth.conf | 5 + .../docker-integration/nginx/registry.conf | 260 + .../docker-integration/nginx/test.passwd | 1 + .../docker-integration/nginx/v1/search.json | 1 + .../docker-integration/run_multiversion.sh | 64 + .../contrib/docker-integration/tls.bats | 109 + .../contrib/docker-integration/token.bats | 135 + .../tokenserver-oauth/.htpasswd | 1 + .../tokenserver-oauth/Dockerfile | 8 + .../certs/auth.localregistry.cert | 19 + .../certs/auth.localregistry.key | 27 + .../certs/localregistry.cert | 19 + .../tokenserver-oauth/certs/localregistry.key | 27 + .../tokenserver-oauth/certs/signing.cert | 18 + .../tokenserver-oauth/certs/signing.key | 27 + .../registry-config-notls.yml | 15 + .../tokenserver-oauth/registry-config.yml | 18 + .../docker-integration/tokenserver/.htpasswd | 1 + .../docker-integration/tokenserver/Dockerfile | 8 + .../tokenserver/certs/auth.localregistry.cert | 19 + .../tokenserver/certs/auth.localregistry.key | 27 + .../tokenserver/certs/localregistry.cert | 19 + .../tokenserver/certs/localregistry.key | 27 + .../tokenserver/certs/signing.cert | 18 + .../tokenserver/certs/signing.key | 27 + .../tokenserver/registry-config.yml | 18 + .../contrib/token-server/errors.go | 38 + .../distribution/contrib/token-server/main.go | 425 + .../contrib/token-server/token.go | 219 + .../docker/distribution/coverpkg.sh | 7 + .../docker/distribution/digest/digest.go | 139 + .../docker/distribution/digest/digest_test.go | 82 + .../docker/distribution/digest/digester.go | 155 + .../digest/digester_resumable_test.go | 21 + .../docker/distribution/digest/doc.go | 42 + .../docker/distribution/digest/set.go | 245 + .../docker/distribution/digest/set_test.go | 368 + .../docker/distribution/digest/verifiers.go | 44 + .../distribution/digest/verifiers_test.go | 49 + vendor/github.com/docker/distribution/doc.go | 7 + .../docker/distribution/docs/README.md | 16 + .../docker/distribution/docs/architecture.md | 52 + .../docker/distribution/docs/configuration.md | 1121 + .../docker/distribution/docs/spec/api.md | 5485 +++++ .../docker/distribution/docs/spec/api.md.tmpl | 1215 ++ .../distribution/docs/spec/auth/index.md | 12 + .../docker/distribution/docs/spec/auth/jwt.md | 329 + .../distribution/docs/spec/auth/oauth.md | 190 + .../distribution/docs/spec/auth/scope.md | 148 + .../distribution/docs/spec/auth/token.md | 250 + .../distribution/docs/spec/implementations.md | 30 + .../docker/distribution/docs/spec/index.md | 12 + .../docker/distribution/docs/spec/json.md | 90 + .../distribution/docs/spec/manifest-v2-1.md | 163 + .../distribution/docs/spec/manifest-v2-2.md | 295 + .../docker/distribution/docs/spec/menu.md | 7 + .../github.com/docker/distribution/errors.go | 115 + .../docker/distribution/health/api/api.go | 37 + .../distribution/health/api/api_test.go | 86 + .../distribution/health/checks/checks.go | 62 + .../distribution/health/checks/checks_test.go | 25 + .../docker/distribution/health/doc.go | 130 + .../docker/distribution/health/health.go | 306 + .../docker/distribution/health/health_test.go | 107 + .../docker/distribution/manifest/doc.go | 1 + .../manifest/manifestlist/manifestlist.go | 155 + .../manifestlist/manifestlist_test.go | 111 + .../manifest/schema1/config_builder.go | 282 + .../manifest/schema1/config_builder_test.go | 272 + .../distribution/manifest/schema1/manifest.go | 184 + .../manifest/schema1/manifest_test.go | 136 + .../manifest/schema1/reference_builder.go | 98 + .../schema1/reference_builder_test.go | 108 + .../distribution/manifest/schema1/sign.go | 68 + .../distribution/manifest/schema1/verify.go | 32 + .../distribution/manifest/schema2/builder.go | 80 + .../manifest/schema2/builder_test.go | 210 + .../distribution/manifest/schema2/manifest.go | 134 + .../manifest/schema2/manifest_test.go | 111 + .../docker/distribution/manifest/versioned.go | 12 + .../docker/distribution/manifests.go | 125 + .../distribution/notifications/bridge.go | 214 + .../distribution/notifications/bridge_test.go | 222 + .../distribution/notifications/endpoint.go | 93 + .../distribution/notifications/event.go | 160 + .../distribution/notifications/event_test.go | 157 + .../docker/distribution/notifications/http.go | 150 + .../distribution/notifications/http_test.go | 185 + .../distribution/notifications/listener.go | 215 + .../notifications/listener_test.go | 205 + .../distribution/notifications/metrics.go | 152 + .../distribution/notifications/sinks.go | 375 + .../distribution/notifications/sinks_test.go | 256 + .../distribution/project/dev-image/Dockerfile | 20 + .../distribution/project/hooks/README.md | 6 + .../project/hooks/configure-hooks.sh | 18 + .../distribution/project/hooks/pre-commit | 29 + .../distribution/reference/reference.go | 370 + .../distribution/reference/reference_test.go | 661 + .../docker/distribution/reference/regexp.go | 124 + .../distribution/reference/regexp_test.go | 489 + .../docker/distribution/registry.go | 97 + .../registry/api/errcode/errors.go | 267 + .../registry/api/errcode/errors_test.go | 185 + .../registry/api/errcode/handler.go | 44 + .../registry/api/errcode/register.go | 138 + .../registry/api/v2/descriptors.go | 1596 ++ .../distribution/registry/api/v2/doc.go | 9 + .../distribution/registry/api/v2/errors.go | 136 + .../registry/api/v2/headerparser.go | 161 + .../registry/api/v2/headerparser_test.go | 161 + .../distribution/registry/api/v2/routes.go | 49 + .../registry/api/v2/routes_test.go | 355 + .../distribution/registry/api/v2/urls.go | 263 + .../distribution/registry/api/v2/urls_test.go | 484 + .../docker/distribution/registry/auth/auth.go | 202 + .../registry/auth/htpasswd/access.go | 115 + .../registry/auth/htpasswd/access_test.go | 122 + .../registry/auth/htpasswd/htpasswd.go | 82 + .../registry/auth/htpasswd/htpasswd_test.go | 85 + .../registry/auth/silly/access.go | 97 + .../registry/auth/silly/access_test.go | 71 + .../registry/auth/token/accesscontroller.go | 272 + .../registry/auth/token/stringset.go | 35 + .../distribution/registry/auth/token/token.go | 378 + .../registry/auth/token/token_test.go | 510 + .../distribution/registry/auth/token/util.go | 58 + .../registry/client/auth/api_version.go | 58 + .../registry/client/auth/challenge/addr.go | 27 + .../client/auth/challenge/authchallenge.go | 237 + .../auth/challenge/authchallenge_test.go | 127 + .../registry/client/auth/session.go | 503 + .../registry/client/auth/session_test.go | 866 + .../registry/client/blob_writer.go | 162 + .../registry/client/blob_writer_test.go | 211 + .../distribution/registry/client/errors.go | 139 + .../registry/client/errors_test.go | 104 + .../registry/client/repository.go | 853 + .../registry/client/repository_test.go | 1226 ++ .../registry/client/transport/http_reader.go | 251 + .../registry/client/transport/transport.go | 147 + .../docker/distribution/registry/doc.go | 2 + .../registry/handlers/api_test.go | 2513 +++ .../distribution/registry/handlers/app.go | 1046 + .../registry/handlers/app_test.go | 279 + .../registry/handlers/basicauth.go | 11 + .../registry/handlers/basicauth_prego14.go | 41 + .../distribution/registry/handlers/blob.go | 99 + .../registry/handlers/blobupload.go | 368 + .../distribution/registry/handlers/catalog.go | 98 + .../distribution/registry/handlers/context.go | 152 + .../registry/handlers/health_test.go | 210 + .../distribution/registry/handlers/helpers.go | 71 + .../distribution/registry/handlers/hmac.go | 74 + .../registry/handlers/hmac_test.go | 117 + .../distribution/registry/handlers/hooks.go | 53 + .../distribution/registry/handlers/images.go | 462 + .../distribution/registry/handlers/mail.go | 45 + .../distribution/registry/handlers/tags.go | 62 + .../registry/listener/listener.go | 74 + .../middleware/registry/middleware.go | 54 + .../middleware/repository/middleware.go | 40 + .../distribution/registry/proxy/proxyauth.go | 87 + .../registry/proxy/proxyblobstore.go | 224 + .../registry/proxy/proxyblobstore_test.go | 416 + .../registry/proxy/proxymanifeststore.go | 95 + .../registry/proxy/proxymanifeststore_test.go | 275 + .../registry/proxy/proxymetrics.go | 74 + .../registry/proxy/proxyregistry.go | 249 + .../registry/proxy/proxytagservice.go | 65 + .../registry/proxy/proxytagservice_test.go | 182 + .../registry/proxy/scheduler/scheduler.go | 259 + .../proxy/scheduler/scheduler_test.go | 211 + .../docker/distribution/registry/registry.go | 356 + .../distribution/registry/registry_test.go | 30 + .../docker/distribution/registry/root.go | 84 + .../registry/storage/blob_test.go | 614 + .../registry/storage/blobcachemetrics.go | 60 + .../registry/storage/blobserver.go | 78 + .../registry/storage/blobstore.go | 223 + .../registry/storage/blobwriter.go | 399 + .../storage/blobwriter_nonresumable.go | 17 + .../registry/storage/blobwriter_resumable.go | 145 + .../registry/storage/cache/cache.go | 35 + .../storage/cache/cachecheck/suite.go | 180 + .../cache/cachedblobdescriptorstore.go | 101 + .../registry/storage/cache/memory/memory.go | 179 + .../storage/cache/memory/memory_test.go | 13 + .../registry/storage/cache/redis/redis.go | 268 + .../storage/cache/redis/redis_test.go | 53 + .../distribution/registry/storage/catalog.go | 153 + .../registry/storage/catalog_test.go | 324 + .../distribution/registry/storage/doc.go | 3 + .../registry/storage/driver/azure/azure.go | 483 + .../storage/driver/azure/azure_test.go | 63 + .../registry/storage/driver/base/base.go | 198 + .../registry/storage/driver/base/regulator.go | 145 + .../storage/driver/factory/factory.go | 64 + .../registry/storage/driver/fileinfo.go | 79 + .../storage/driver/filesystem/driver.go | 440 + .../storage/driver/filesystem/driver_test.go | 113 + .../registry/storage/driver/gcs/doc.go | 3 + .../registry/storage/driver/gcs/gcs.go | 873 + .../registry/storage/driver/gcs/gcs_test.go | 311 + .../storage/driver/inmemory/driver.go | 312 + .../storage/driver/inmemory/driver_test.go | 19 + .../registry/storage/driver/inmemory/mfs.go | 338 + .../middleware/cloudfront/middleware.go | 136 + .../driver/middleware/redirect/middleware.go | 50 + .../middleware/redirect/middleware_test.go | 58 + .../driver/middleware/storagemiddleware.go | 39 + .../registry/storage/driver/oss/doc.go | 3 + .../registry/storage/driver/oss/oss.go | 683 + .../registry/storage/driver/oss/oss_test.go | 144 + .../registry/storage/driver/s3-aws/s3.go | 1189 ++ .../registry/storage/driver/s3-aws/s3_test.go | 313 + .../storage/driver/s3-aws/s3_v2_signer.go | 219 + .../registry/storage/driver/s3-goamz/s3.go | 757 + .../storage/driver/s3-goamz/s3_test.go | 201 + .../registry/storage/driver/storagedriver.go | 165 + .../registry/storage/driver/swift/swift.go | 915 + .../storage/driver/swift/swift_test.go | 245 + .../storage/driver/testdriver/testdriver.go | 71 + .../storage/driver/testsuites/testsuites.go | 1273 ++ .../registry/storage/filereader.go | 177 + .../registry/storage/filereader_test.go | 198 + .../registry/storage/garbagecollect.go | 114 + .../registry/storage/garbagecollect_test.go | 377 + .../distribution/registry/storage/io.go | 71 + .../registry/storage/linkedblobstore.go | 470 + .../registry/storage/linkedblobstore_test.go | 217 + .../registry/storage/manifestlisthandler.go | 92 + .../registry/storage/manifeststore.go | 141 + .../registry/storage/manifeststore_test.go | 391 + .../distribution/registry/storage/paths.go | 490 + .../registry/storage/paths_test.go | 135 + .../registry/storage/purgeuploads.go | 139 + .../registry/storage/purgeuploads_test.go | 166 + .../distribution/registry/storage/registry.go | 306 + .../storage/schema2manifesthandler.go | 136 + .../storage/schema2manifesthandler_test.go | 136 + .../registry/storage/signedmanifesthandler.go | 141 + .../distribution/registry/storage/tagstore.go | 191 + .../registry/storage/tagstore_test.go | 209 + .../distribution/registry/storage/util.go | 21 + .../distribution/registry/storage/vacuum.go | 67 + .../distribution/registry/storage/walk.go | 59 + .../registry/storage/walk_test.go | 152 + vendor/github.com/docker/distribution/tags.go | 27 + .../docker/distribution/testutil/handler.go | 148 + .../docker/distribution/testutil/manifests.go | 87 + .../docker/distribution/testutil/tarfile.go | 115 + .../docker/distribution/uuid/uuid.go | 126 + .../docker/distribution/uuid/uuid_test.go | 48 + .../docker/distribution/version/print.go | 26 + .../docker/distribution/version/version.go | 11 + .../docker/distribution/version/version.sh | 22 + vendor/github.com/docker/docker/.dockerignore | 4 + .../docker/docker/.github/ISSUE_TEMPLATE.md | 64 + .../docker/.github/PULL_REQUEST_TEMPLATE.md | 30 + vendor/github.com/docker/docker/.gitignore | 33 + vendor/github.com/docker/docker/.mailmap | 275 + vendor/github.com/docker/docker/AUTHORS | 1652 ++ vendor/github.com/docker/docker/CHANGELOG.md | 3337 +++ .../github.com/docker/docker/CONTRIBUTING.md | 401 + vendor/github.com/docker/docker/Dockerfile | 246 + .../docker/docker/Dockerfile.aarch64 | 175 + .../github.com/docker/docker/Dockerfile.armhf | 182 + .../docker/docker/Dockerfile.ppc64le | 188 + .../github.com/docker/docker/Dockerfile.s390x | 190 + .../docker/docker/Dockerfile.simple | 73 + .../docker/docker/Dockerfile.solaris | 20 + .../docker/docker/Dockerfile.windows | 267 + vendor/github.com/docker/docker/LICENSE | 191 + vendor/github.com/docker/docker/MAINTAINERS | 376 + vendor/github.com/docker/docker/Makefile | 147 + vendor/github.com/docker/docker/NOTICE | 19 + vendor/github.com/docker/docker/README.md | 304 + vendor/github.com/docker/docker/ROADMAP.md | 118 + vendor/github.com/docker/docker/VENDORING.md | 45 + vendor/github.com/docker/docker/VERSION | 1 + vendor/github.com/docker/docker/api/README.md | 42 + vendor/github.com/docker/docker/api/common.go | 166 + .../docker/docker/api/common_test.go | 341 + .../docker/docker/api/common_unix.go | 6 + .../docker/docker/api/common_windows.go | 8 + .../docker/docker/api/errors/errors.go | 47 + .../docker/docker/api/fixtures/keyfile | 7 + .../docker/api/server/httputils/decoder.go | 16 + .../docker/api/server/httputils/errors.go | 101 + .../docker/api/server/httputils/form.go | 73 + .../docker/api/server/httputils/form_test.go | 105 + .../docker/api/server/httputils/httputils.go | 90 + .../server/httputils/httputils_write_json.go | 17 + .../httputils/httputils_write_json_go16.go | 16 + .../docker/docker/api/server/middleware.go | 24 + .../docker/api/server/middleware/cors.go | 37 + .../docker/api/server/middleware/debug.go | 76 + .../api/server/middleware/experimental.go | 29 + .../api/server/middleware/middleware.go | 13 + .../docker/api/server/middleware/version.go | 50 + .../api/server/middleware/version_test.go | 57 + .../docker/docker/api/server/profiler.go | 41 + .../docker/api/server/router/build/backend.go | 20 + .../docker/api/server/router/build/build.go | 29 + .../api/server/router/build/build_routes.go | 225 + .../api/server/router/checkpoint/backend.go | 10 + .../server/router/checkpoint/checkpoint.go | 36 + .../router/checkpoint/checkpoint_routes.go | 65 + .../api/server/router/container/backend.go | 79 + .../api/server/router/container/container.go | 77 + .../router/container/container_routes.go | 554 + .../api/server/router/container/copy.go | 119 + .../api/server/router/container/exec.go | 140 + .../api/server/router/container/inspect.go | 21 + .../docker/api/server/router/experimental.go | 67 + .../docker/api/server/router/image/backend.go | 45 + .../docker/api/server/router/image/image.go | 50 + .../api/server/router/image/image_routes.go | 344 + .../docker/docker/api/server/router/local.go | 96 + .../api/server/router/network/backend.go | 22 + .../api/server/router/network/filter.go | 96 + .../api/server/router/network/network.go | 44 + .../server/router/network/network_routes.go | 308 + .../api/server/router/plugin/backend.go | 25 + .../docker/api/server/router/plugin/plugin.go | 39 + .../api/server/router/plugin/plugin_routes.go | 314 + .../docker/docker/api/server/router/router.go | 19 + .../docker/api/server/router/swarm/backend.go | 36 + .../docker/api/server/router/swarm/cluster.go | 52 + .../api/server/router/swarm/cluster_routes.go | 418 + .../api/server/router/system/backend.go | 21 + .../docker/api/server/router/system/system.go | 39 + .../api/server/router/system/system_routes.go | 186 + .../api/server/router/volume/backend.go | 17 + .../docker/api/server/router/volume/volume.go | 36 + .../api/server/router/volume/volume_routes.go | 80 + .../docker/api/server/router_swapper.go | 30 + .../docker/docker/api/server/server.go | 210 + .../docker/docker/api/server/server_test.go | 46 + .../docker/docker/api/swagger-gen.yaml | 12 + .../github.com/docker/docker/api/swagger.yaml | 7785 +++++++ .../api/templates/server/operation.gotmpl | 26 + .../docker/docker/api/types/auth.go | 22 + .../docker/api/types/backend/backend.go | 84 + .../docker/docker/api/types/blkiodev/blkio.go | 23 + .../docker/docker/api/types/client.go | 378 + .../docker/docker/api/types/configs.go | 69 + .../docker/api/types/container/config.go | 62 + .../api/types/container/container_create.go | 21 + .../api/types/container/container_update.go | 17 + .../api/types/container/container_wait.go | 17 + .../docker/api/types/container/host_config.go | 333 + .../api/types/container/hostconfig_unix.go | 81 + .../api/types/container/hostconfig_windows.go | 87 + .../docker/docker/api/types/error_response.go | 13 + .../docker/docker/api/types/events/events.go | 42 + .../docker/docker/api/types/filters/parse.go | 310 + .../docker/api/types/filters/parse_test.go | 417 + .../docker/docker/api/types/id_response.go | 13 + .../docker/docker/api/types/image_summary.go | 49 + .../docker/docker/api/types/mount/mount.go | 113 + .../docker/api/types/network/network.go | 59 + .../docker/docker/api/types/plugin.go | 189 + .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 64 + .../docker/docker/api/types/port.go | 23 + .../api/types/reference/image_reference.go | 34 + .../types/reference/image_reference_test.go | 72 + .../docker/api/types/registry/authenticate.go | 21 + .../docker/api/types/registry/registry.go | 104 + .../docker/docker/api/types/seccomp.go | 93 + .../api/types/service_update_response.go | 12 + .../docker/docker/api/types/stats.go | 178 + .../docker/api/types/strslice/strslice.go | 30 + .../api/types/strslice/strslice_test.go | 86 + .../docker/docker/api/types/swarm/common.go | 27 + .../docker/api/types/swarm/container.go | 46 + .../docker/docker/api/types/swarm/network.go | 111 + .../docker/docker/api/types/swarm/node.go | 114 + .../docker/docker/api/types/swarm/secret.go | 31 + .../docker/docker/api/types/swarm/service.go | 105 + .../docker/docker/api/types/swarm/swarm.go | 197 + .../docker/docker/api/types/swarm/task.go | 128 + .../docker/api/types/time/duration_convert.go | 12 + .../api/types/time/duration_convert_test.go | 26 + .../docker/docker/api/types/time/timestamp.go | 124 + .../docker/api/types/time/timestamp_test.go | 93 + .../docker/docker/api/types/types.go | 549 + .../docker/api/types/versions/README.md | 14 + .../docker/api/types/versions/compare.go | 62 + .../docker/api/types/versions/compare_test.go | 26 + .../docker/api/types/versions/v1p19/types.go | 35 + .../docker/api/types/versions/v1p20/types.go | 40 + .../docker/docker/api/types/volume.go | 58 + .../docker/api/types/volume/volumes_create.go | 29 + .../docker/api/types/volume/volumes_list.go | 23 + .../docker/docker/builder/builder.go | 169 + .../docker/docker/builder/context.go | 260 + .../docker/docker/builder/context_test.go | 307 + .../docker/docker/builder/context_unix.go | 11 + .../docker/docker/builder/context_windows.go | 17 + .../docker/docker/builder/dockerfile/bflag.go | 176 + .../docker/builder/dockerfile/bflag_test.go | 187 + .../docker/builder/dockerfile/builder.go | 370 + .../docker/builder/dockerfile/builder_unix.go | 5 + .../builder/dockerfile/builder_windows.go | 3 + .../builder/dockerfile/command/command.go | 46 + .../docker/builder/dockerfile/dispatchers.go | 821 + .../builder/dockerfile/dispatchers_test.go | 517 + .../builder/dockerfile/dispatchers_unix.go | 27 + .../dockerfile/dispatchers_unix_test.go | 33 + .../builder/dockerfile/dispatchers_windows.go | 86 + .../dockerfile/dispatchers_windows_test.go | 40 + .../docker/builder/dockerfile/envVarTest | 116 + .../docker/builder/dockerfile/evaluator.go | 244 + .../builder/dockerfile/evaluator_test.go | 197 + .../builder/dockerfile/evaluator_unix.go | 9 + .../builder/dockerfile/evaluator_windows.go | 13 + .../docker/builder/dockerfile/internals.go | 669 + .../builder/dockerfile/internals_test.go | 95 + .../builder/dockerfile/internals_unix.go | 38 + .../builder/dockerfile/internals_windows.go | 66 + .../dockerfile/internals_windows_test.go | 51 + .../builder/dockerfile/parser/dumper/main.go | 36 + .../builder/dockerfile/parser/json_test.go | 61 + .../builder/dockerfile/parser/line_parsers.go | 361 + .../builder/dockerfile/parser/parser.go | 221 + .../builder/dockerfile/parser/parser_test.go | 173 + .../parser/testfile-line/Dockerfile | 35 + .../env_no_value/Dockerfile | 3 + .../shykes-nested-json/Dockerfile | 1 + .../testfiles/ADD-COPY-with-JSON/Dockerfile | 11 + .../testfiles/ADD-COPY-with-JSON/result | 10 + .../testfiles/brimstone-consuldock/Dockerfile | 26 + .../testfiles/brimstone-consuldock/result | 5 + .../brimstone-docker-consul/Dockerfile | 52 + .../testfiles/brimstone-docker-consul/result | 9 + .../testfiles/continueIndent/Dockerfile | 36 + .../parser/testfiles/continueIndent/result | 10 + .../testfiles/cpuguy83-nagios/Dockerfile | 54 + .../parser/testfiles/cpuguy83-nagios/result | 40 + .../parser/testfiles/docker/Dockerfile | 103 + .../dockerfile/parser/testfiles/docker/result | 24 + .../parser/testfiles/env/Dockerfile | 23 + .../dockerfile/parser/testfiles/env/result | 16 + .../testfiles/escape-after-comment/Dockerfile | 9 + .../testfiles/escape-after-comment/result | 3 + .../testfiles/escape-nonewline/Dockerfile | 7 + .../parser/testfiles/escape-nonewline/result | 3 + .../parser/testfiles/escape/Dockerfile | 6 + .../dockerfile/parser/testfiles/escape/result | 3 + .../parser/testfiles/escapes/Dockerfile | 14 + .../parser/testfiles/escapes/result | 6 + .../parser/testfiles/flags/Dockerfile | 10 + .../dockerfile/parser/testfiles/flags/result | 10 + .../parser/testfiles/health/Dockerfile | 10 + .../dockerfile/parser/testfiles/health/result | 9 + .../parser/testfiles/influxdb/Dockerfile | 15 + .../parser/testfiles/influxdb/result | 11 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../jeztah-invalid-json-single-quotes/result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../parser/testfiles/json/Dockerfile | 8 + .../dockerfile/parser/testfiles/json/result | 8 + .../kartar-entrypoint-oddities/Dockerfile | 7 + .../kartar-entrypoint-oddities/result | 7 + .../lk4d4-the-edge-case-generator/Dockerfile | 48 + .../lk4d4-the-edge-case-generator/result | 29 + .../parser/testfiles/mail/Dockerfile | 16 + .../dockerfile/parser/testfiles/mail/result | 14 + .../testfiles/multiple-volumes/Dockerfile | 3 + .../parser/testfiles/multiple-volumes/result | 2 + .../parser/testfiles/mumble/Dockerfile | 7 + .../dockerfile/parser/testfiles/mumble/result | 4 + .../parser/testfiles/nginx/Dockerfile | 14 + .../dockerfile/parser/testfiles/nginx/result | 11 + .../parser/testfiles/tf2/Dockerfile | 23 + .../dockerfile/parser/testfiles/tf2/result | 20 + .../parser/testfiles/weechat/Dockerfile | 9 + .../parser/testfiles/weechat/result | 6 + .../parser/testfiles/znc/Dockerfile | 7 + .../dockerfile/parser/testfiles/znc/result | 5 + .../docker/builder/dockerfile/parser/utils.go | 176 + .../docker/builder/dockerfile/shell_parser.go | 329 + .../builder/dockerfile/shell_parser_test.go | 155 + .../docker/builder/dockerfile/support.go | 19 + .../docker/builder/dockerfile/support_test.go | 65 + .../docker/builder/dockerfile/utils_test.go | 50 + .../docker/builder/dockerfile/wordsTest | 25 + .../docker/docker/builder/dockerignore.go | 48 + .../builder/dockerignore/dockerignore.go | 49 + .../builder/dockerignore/dockerignore_test.go | 57 + .../docker/builder/dockerignore_test.go | 95 + .../github.com/docker/docker/builder/git.go | 28 + .../docker/docker/builder/remote.go | 157 + .../docker/docker/builder/remote_test.go | 213 + .../docker/docker/builder/tarsum.go | 158 + .../docker/docker/builder/tarsum_test.go | 265 + .../docker/docker/builder/utils_test.go | 87 + vendor/github.com/docker/docker/cli/cobra.go | 139 + .../cli/command/bundlefile/bundlefile.go | 69 + .../cli/command/bundlefile/bundlefile_test.go | 77 + .../docker/cli/command/checkpoint/cmd.go | 24 + .../docker/cli/command/checkpoint/create.go | 58 + .../docker/cli/command/checkpoint/list.go | 62 + .../docker/cli/command/checkpoint/remove.go | 44 + .../docker/docker/cli/command/cli.go | 260 + .../docker/cli/command/commands/commands.go | 91 + .../docker/cli/command/container/attach.go | 130 + .../docker/cli/command/container/cmd.go | 46 + .../docker/cli/command/container/commit.go | 76 + .../docker/docker/cli/command/container/cp.go | 303 + .../docker/cli/command/container/create.go | 218 + .../docker/cli/command/container/diff.go | 58 + .../docker/cli/command/container/exec.go | 207 + .../docker/cli/command/container/exec_test.go | 116 + .../docker/cli/command/container/export.go | 59 + .../docker/cli/command/container/hijack.go | 116 + .../docker/cli/command/container/inspect.go | 47 + .../docker/cli/command/container/kill.go | 56 + .../docker/cli/command/container/list.go | 141 + .../docker/cli/command/container/logs.go | 87 + .../docker/cli/command/container/pause.go | 49 + .../docker/cli/command/container/port.go | 78 + .../docker/cli/command/container/prune.go | 75 + .../docker/cli/command/container/ps_test.go | 118 + .../docker/cli/command/container/rename.go | 51 + .../docker/cli/command/container/restart.go | 62 + .../docker/docker/cli/command/container/rm.go | 73 + .../docker/cli/command/container/run.go | 285 + .../docker/cli/command/container/start.go | 179 + .../docker/cli/command/container/stats.go | 243 + .../cli/command/container/stats_helpers.go | 226 + .../cli/command/container/stats_unit_test.go | 20 + .../docker/cli/command/container/stop.go | 67 + .../docker/cli/command/container/top.go | 58 + .../docker/cli/command/container/tty.go | 103 + .../docker/cli/command/container/unpause.go | 50 + .../docker/cli/command/container/update.go | 163 + .../docker/cli/command/container/utils.go | 143 + .../docker/cli/command/container/wait.go | 50 + .../docker/docker/cli/command/events_utils.go | 49 + .../docker/cli/command/formatter/container.go | 235 + .../cli/command/formatter/container_test.go | 398 + .../docker/cli/command/formatter/custom.go | 51 + .../cli/command/formatter/custom_test.go | 28 + .../cli/command/formatter/disk_usage.go | 334 + .../docker/cli/command/formatter/formatter.go | 123 + .../docker/cli/command/formatter/image.go | 259 + .../cli/command/formatter/image_test.go | 333 + .../docker/cli/command/formatter/network.go | 117 + .../cli/command/formatter/network_test.go | 208 + .../docker/cli/command/formatter/reflect.go | 65 + .../cli/command/formatter/reflect_test.go | 66 + .../docker/cli/command/formatter/service.go | 322 + .../docker/cli/command/formatter/stats.go | 211 + .../cli/command/formatter/stats_test.go | 228 + .../docker/cli/command/formatter/volume.go | 121 + .../cli/command/formatter/volume_test.go | 189 + .../cli/command/idresolver/idresolver.go | 90 + .../docker/docker/cli/command/image/build.go | 477 + .../docker/docker/cli/command/image/cmd.go | 33 + .../docker/cli/command/image/history.go | 99 + .../docker/docker/cli/command/image/import.go | 88 + .../docker/cli/command/image/inspect.go | 44 + .../docker/docker/cli/command/image/list.go | 96 + .../docker/docker/cli/command/image/load.go | 77 + .../docker/docker/cli/command/image/prune.go | 92 + .../docker/docker/cli/command/image/pull.go | 84 + .../docker/docker/cli/command/image/push.go | 61 + .../docker/docker/cli/command/image/remove.go | 77 + .../docker/docker/cli/command/image/save.go | 57 + .../docker/docker/cli/command/image/tag.go | 41 + .../docker/docker/cli/command/image/trust.go | 381 + .../docker/cli/command/image/trust_test.go | 57 + .../docker/docker/cli/command/in.go | 75 + .../docker/cli/command/inspect/inspector.go | 195 + .../cli/command/inspect/inspector_test.go | 221 + .../docker/docker/cli/command/network/cmd.go | 28 + .../docker/cli/command/network/connect.go | 64 + .../docker/cli/command/network/create.go | 226 + .../docker/cli/command/network/disconnect.go | 41 + .../docker/cli/command/network/inspect.go | 45 + .../docker/docker/cli/command/network/list.go | 76 + .../docker/cli/command/network/prune.go | 73 + .../docker/cli/command/network/remove.go | 43 + .../docker/docker/cli/command/node/cmd.go | 43 + .../docker/docker/cli/command/node/demote.go | 36 + .../docker/docker/cli/command/node/inspect.go | 144 + .../docker/docker/cli/command/node/list.go | 115 + .../docker/docker/cli/command/node/opts.go | 60 + .../docker/docker/cli/command/node/promote.go | 36 + .../docker/docker/cli/command/node/ps.go | 93 + .../docker/docker/cli/command/node/remove.go | 56 + .../docker/docker/cli/command/node/update.go | 121 + .../docker/docker/cli/command/out.go | 69 + .../docker/docker/cli/command/plugin/cmd.go | 31 + .../docker/cli/command/plugin/create.go | 125 + .../docker/cli/command/plugin/disable.go | 36 + .../docker/cli/command/plugin/enable.go | 47 + .../docker/cli/command/plugin/inspect.go | 42 + .../docker/cli/command/plugin/install.go | 208 + .../docker/docker/cli/command/plugin/list.go | 63 + .../docker/docker/cli/command/plugin/push.go | 71 + .../docker/cli/command/plugin/remove.go | 55 + .../docker/docker/cli/command/plugin/set.go | 22 + .../docker/cli/command/plugin/upgrade.go | 100 + .../docker/docker/cli/command/prune/prune.go | 50 + .../docker/docker/cli/command/registry.go | 186 + .../docker/cli/command/registry/login.go | 85 + .../docker/cli/command/registry/logout.go | 77 + .../docker/cli/command/registry/search.go | 126 + .../docker/docker/cli/command/secret/cmd.go | 25 + .../docker/cli/command/secret/create.go | 79 + .../docker/cli/command/secret/inspect.go | 45 + .../docker/docker/cli/command/secret/ls.go | 68 + .../docker/cli/command/secret/remove.go | 57 + .../docker/docker/cli/command/secret/utils.go | 76 + .../docker/docker/cli/command/service/cmd.go | 29 + .../docker/cli/command/service/create.go | 100 + .../docker/cli/command/service/inspect.go | 84 + .../cli/command/service/inspect_test.go | 129 + .../docker/docker/cli/command/service/list.go | 158 + .../docker/docker/cli/command/service/logs.go | 163 + .../docker/docker/cli/command/service/opts.go | 648 + .../docker/cli/command/service/opts_test.go | 107 + .../docker/cli/command/service/parse.go | 68 + .../docker/docker/cli/command/service/ps.go | 76 + .../docker/cli/command/service/remove.go | 47 + .../docker/cli/command/service/scale.go | 96 + .../docker/cli/command/service/trust.go | 96 + .../docker/cli/command/service/update.go | 849 + .../docker/cli/command/service/update_test.go | 384 + .../docker/docker/cli/command/stack/cmd.go | 35 + .../docker/docker/cli/command/stack/common.go | 60 + .../docker/docker/cli/command/stack/deploy.go | 357 + .../cli/command/stack/deploy_bundlefile.go | 83 + .../docker/docker/cli/command/stack/list.go | 113 + .../docker/docker/cli/command/stack/opts.go | 49 + .../docker/docker/cli/command/stack/ps.go | 61 + .../docker/docker/cli/command/stack/remove.go | 112 + .../docker/cli/command/stack/services.go | 79 + .../docker/docker/cli/command/swarm/cmd.go | 28 + .../docker/docker/cli/command/swarm/init.go | 85 + .../docker/docker/cli/command/swarm/join.go | 69 + .../docker/cli/command/swarm/join_token.go | 105 + .../docker/docker/cli/command/swarm/leave.go | 44 + .../docker/docker/cli/command/swarm/opts.go | 209 + .../docker/cli/command/swarm/opts_test.go | 37 + .../docker/docker/cli/command/swarm/unlock.go | 54 + .../docker/cli/command/swarm/unlock_key.go | 79 + .../docker/docker/cli/command/swarm/update.go | 72 + .../docker/docker/cli/command/system/cmd.go | 26 + .../docker/docker/cli/command/system/df.go | 56 + .../docker/cli/command/system/events.go | 140 + .../docker/docker/cli/command/system/info.go | 334 + .../docker/cli/command/system/inspect.go | 203 + .../docker/docker/cli/command/system/prune.go | 93 + .../docker/cli/command/system/version.go | 113 + .../docker/docker/cli/command/task/print.go | 161 + .../docker/docker/cli/command/trust.go | 39 + .../docker/docker/cli/command/utils.go | 87 + .../docker/docker/cli/command/volume/cmd.go | 45 + .../docker/cli/command/volume/create.go | 111 + .../docker/cli/command/volume/inspect.go | 55 + .../docker/docker/cli/command/volume/list.go | 91 + .../docker/docker/cli/command/volume/prune.go | 75 + .../docker/cli/command/volume/remove.go | 68 + .../docker/cli/compose/convert/compose.go | 116 + .../cli/compose/convert/compose_test.go | 122 + .../docker/cli/compose/convert/service.go | 416 + .../cli/compose/convert/service_test.go | 216 + .../docker/cli/compose/convert/volume.go | 128 + .../docker/cli/compose/convert/volume_test.go | 133 + .../compose/interpolation/interpolation.go | 90 + .../interpolation/interpolation_test.go | 59 + .../docker/cli/compose/loader/example1.env | 8 + .../docker/cli/compose/loader/example2.env | 1 + .../cli/compose/loader/full-example.yml | 287 + .../docker/cli/compose/loader/loader.go | 653 + .../docker/cli/compose/loader/loader_test.go | 800 + .../docker/cli/compose/schema/bindata.go | 260 + .../schema/data/config_schema_v3.0.json | 383 + .../schema/data/config_schema_v3.1.json | 428 + .../docker/cli/compose/schema/schema.go | 137 + .../docker/cli/compose/schema/schema_test.go | 52 + .../docker/cli/compose/template/template.go | 100 + .../cli/compose/template/template_test.go | 83 + .../docker/docker/cli/compose/types/types.go | 253 + vendor/github.com/docker/docker/cli/error.go | 33 + .../docker/docker/cli/flags/client.go | 13 + .../docker/docker/cli/flags/common.go | 120 + .../docker/docker/cli/flags/common_test.go | 42 + .../github.com/docker/docker/cli/required.go | 96 + .../docker/docker/cli/trust/trust.go | 232 + .../docker/docker/cliconfig/config.go | 120 + .../docker/docker/cliconfig/config_test.go | 621 + .../docker/cliconfig/configfile/file.go | 183 + .../docker/cliconfig/configfile/file_test.go | 27 + .../cliconfig/credentials/credentials.go | 17 + .../cliconfig/credentials/default_store.go | 22 + .../credentials/default_store_darwin.go | 3 + .../credentials/default_store_linux.go | 3 + .../credentials/default_store_unsupported.go | 5 + .../credentials/default_store_windows.go | 3 + .../cliconfig/credentials/file_store.go | 53 + .../cliconfig/credentials/file_store_test.go | 139 + .../cliconfig/credentials/native_store.go | 144 + .../credentials/native_store_test.go | 355 + .../github.com/docker/docker/client/README.md | 35 + .../docker/docker/client/checkpoint_create.go | 13 + .../docker/client/checkpoint_create_test.go | 73 + .../docker/docker/client/checkpoint_delete.go | 20 + .../docker/client/checkpoint_delete_test.go | 54 + .../docker/docker/client/checkpoint_list.go | 28 + .../docker/client/checkpoint_list_test.go | 57 + .../github.com/docker/docker/client/client.go | 246 + .../docker/docker/client/client_mock_test.go | 45 + .../docker/docker/client/client_test.go | 283 + .../docker/docker/client/client_unix.go | 6 + .../docker/docker/client/client_windows.go | 4 + .../docker/docker/client/container_attach.go | 37 + .../docker/docker/client/container_commit.go | 53 + .../docker/client/container_commit_test.go | 96 + .../docker/docker/client/container_copy.go | 97 + .../docker/client/container_copy_test.go | 244 + .../docker/docker/client/container_create.go | 50 + .../docker/client/container_create_test.go | 76 + .../docker/docker/client/container_diff.go | 23 + .../docker/client/container_diff_test.go | 61 + .../docker/docker/client/container_exec.go | 54 + .../docker/client/container_exec_test.go | 157 + .../docker/docker/client/container_export.go | 20 + .../docker/client/container_export_test.go | 50 + .../docker/docker/client/container_inspect.go | 54 + .../docker/client/container_inspect_test.go | 125 + .../docker/docker/client/container_kill.go | 17 + .../docker/client/container_kill_test.go | 46 + .../docker/docker/client/container_list.go | 56 + .../docker/client/container_list_test.go | 96 + .../docker/docker/client/container_logs.go | 52 + .../docker/client/container_logs_test.go | 133 + .../docker/docker/client/container_pause.go | 10 + .../docker/client/container_pause_test.go | 41 + .../docker/docker/client/container_prune.go | 36 + .../docker/docker/client/container_remove.go | 27 + .../docker/client/container_remove_test.go | 59 + .../docker/docker/client/container_rename.go | 16 + .../docker/client/container_rename_test.go | 46 + .../docker/docker/client/container_resize.go | 29 + .../docker/client/container_resize_test.go | 82 + .../docker/docker/client/container_restart.go | 22 + .../docker/client/container_restart_test.go | 48 + .../docker/docker/client/container_start.go | 24 + .../docker/client/container_start_test.go | 58 + .../docker/docker/client/container_stats.go | 26 + .../docker/client/container_stats_test.go | 70 + .../docker/docker/client/container_stop.go | 21 + .../docker/client/container_stop_test.go | 48 + .../docker/docker/client/container_top.go | 28 + .../docker/client/container_top_test.go | 74 + .../docker/docker/client/container_unpause.go | 10 + .../docker/client/container_unpause_test.go | 41 + .../docker/docker/client/container_update.go | 22 + .../docker/client/container_update_test.go | 58 + .../docker/docker/client/container_wait.go | 26 + .../docker/client/container_wait_test.go | 70 + .../docker/docker/client/disk_usage.go | 26 + .../github.com/docker/docker/client/errors.go | 278 + .../github.com/docker/docker/client/events.go | 102 + .../docker/docker/client/events_test.go | 165 + .../github.com/docker/docker/client/hijack.go | 177 + .../docker/docker/client/image_build.go | 123 + .../docker/docker/client/image_build_test.go | 233 + .../docker/docker/client/image_create.go | 34 + .../docker/docker/client/image_create_test.go | 76 + .../docker/docker/client/image_history.go | 22 + .../docker/client/image_history_test.go | 60 + .../docker/docker/client/image_import.go | 37 + .../docker/docker/client/image_import_test.go | 81 + .../docker/docker/client/image_inspect.go | 33 + .../docker/client/image_inspect_test.go | 71 + .../docker/docker/client/image_list.go | 45 + .../docker/docker/client/image_list_test.go | 159 + .../docker/docker/client/image_load.go | 30 + .../docker/docker/client/image_load_test.go | 95 + .../docker/docker/client/image_prune.go | 36 + .../docker/docker/client/image_pull.go | 46 + .../docker/docker/client/image_pull_test.go | 199 + .../docker/docker/client/image_push.go | 54 + .../docker/docker/client/image_push_test.go | 180 + .../docker/docker/client/image_remove.go | 31 + .../docker/docker/client/image_remove_test.go | 95 + .../docker/docker/client/image_save.go | 22 + .../docker/docker/client/image_save_test.go | 58 + .../docker/docker/client/image_search.go | 51 + .../docker/docker/client/image_search_test.go | 165 + .../docker/docker/client/image_tag.go | 34 + .../docker/docker/client/image_tag_test.go | 121 + .../github.com/docker/docker/client/info.go | 26 + .../docker/docker/client/info_test.go | 76 + .../docker/docker/client/interface.go | 171 + .../docker/client/interface_experimental.go | 17 + .../docker/docker/client/interface_stable.go | 10 + .../github.com/docker/docker/client/login.go | 29 + .../docker/docker/client/network_connect.go | 18 + .../docker/client/network_connect_test.go | 107 + .../docker/docker/client/network_create.go | 25 + .../docker/client/network_create_test.go | 72 + .../docker/client/network_disconnect.go | 14 + .../docker/client/network_disconnect_test.go | 64 + .../docker/docker/client/network_inspect.go | 38 + .../docker/client/network_inspect_test.go | 69 + .../docker/docker/client/network_list.go | 31 + .../docker/docker/client/network_list_test.go | 108 + .../docker/docker/client/network_prune.go | 36 + .../docker/docker/client/network_remove.go | 10 + .../docker/client/network_remove_test.go | 47 + .../docker/docker/client/node_inspect.go | 33 + .../docker/docker/client/node_inspect_test.go | 65 + .../docker/docker/client/node_list.go | 36 + .../docker/docker/client/node_list_test.go | 94 + .../docker/docker/client/node_remove.go | 21 + .../docker/docker/client/node_remove_test.go | 69 + .../docker/docker/client/node_update.go | 18 + .../docker/docker/client/node_update_test.go | 49 + .../github.com/docker/docker/client/ping.go | 30 + .../docker/docker/client/plugin_create.go | 26 + .../docker/docker/client/plugin_disable.go | 19 + .../docker/client/plugin_disable_test.go | 48 + .../docker/docker/client/plugin_enable.go | 19 + .../docker/client/plugin_enable_test.go | 48 + .../docker/docker/client/plugin_inspect.go | 32 + .../docker/client/plugin_inspect_test.go | 54 + .../docker/docker/client/plugin_install.go | 113 + .../docker/docker/client/plugin_list.go | 21 + .../docker/docker/client/plugin_list_test.go | 59 + .../docker/docker/client/plugin_push.go | 17 + .../docker/docker/client/plugin_push_test.go | 51 + .../docker/docker/client/plugin_remove.go | 20 + .../docker/client/plugin_remove_test.go | 49 + .../docker/docker/client/plugin_set.go | 12 + .../docker/docker/client/plugin_set_test.go | 47 + .../docker/docker/client/plugin_upgrade.go | 37 + .../docker/docker/client/request.go | 247 + .../docker/docker/client/request_test.go | 92 + .../docker/docker/client/secret_create.go | 24 + .../docker/client/secret_create_test.go | 57 + .../docker/docker/client/secret_inspect.go | 34 + .../docker/client/secret_inspect_test.go | 65 + .../docker/docker/client/secret_list.go | 35 + .../docker/docker/client/secret_list_test.go | 94 + .../docker/docker/client/secret_remove.go | 10 + .../docker/client/secret_remove_test.go | 47 + .../docker/docker/client/secret_update.go | 19 + .../docker/client/secret_update_test.go | 49 + .../docker/docker/client/service_create.go | 30 + .../docker/client/service_create_test.go | 57 + .../docker/docker/client/service_inspect.go | 33 + .../docker/client/service_inspect_test.go | 65 + .../docker/docker/client/service_list.go | 35 + .../docker/docker/client/service_list_test.go | 94 + .../docker/docker/client/service_logs.go | 52 + .../docker/docker/client/service_logs_test.go | 133 + .../docker/docker/client/service_remove.go | 10 + .../docker/client/service_remove_test.go | 47 + .../docker/docker/client/service_update.go | 41 + .../docker/client/service_update_test.go | 77 + .../docker/client/swarm_get_unlock_key.go | 21 + .../docker/docker/client/swarm_init.go | 21 + .../docker/docker/client/swarm_init_test.go | 54 + .../docker/docker/client/swarm_inspect.go | 21 + .../docker/client/swarm_inspect_test.go | 56 + .../docker/docker/client/swarm_join.go | 13 + .../docker/docker/client/swarm_join_test.go | 51 + .../docker/docker/client/swarm_leave.go | 18 + .../docker/docker/client/swarm_leave_test.go | 66 + .../docker/docker/client/swarm_unlock.go | 17 + .../docker/docker/client/swarm_update.go | 22 + .../docker/docker/client/swarm_update_test.go | 49 + .../docker/docker/client/task_inspect.go | 34 + .../docker/docker/client/task_inspect_test.go | 54 + .../docker/docker/client/task_list.go | 35 + .../docker/docker/client/task_list_test.go | 94 + .../docker/docker/client/testdata/ca.pem | 18 + .../docker/docker/client/testdata/cert.pem | 18 + .../docker/docker/client/testdata/key.pem | 27 + .../docker/docker/client/transport.go | 28 + .../github.com/docker/docker/client/utils.go | 33 + .../docker/docker/client/version.go | 21 + .../docker/docker/client/volume_create.go | 21 + .../docker/client/volume_create_test.go | 75 + .../docker/docker/client/volume_inspect.go | 38 + .../docker/client/volume_inspect_test.go | 76 + .../docker/docker/client/volume_list.go | 32 + .../docker/docker/client/volume_list_test.go | 98 + .../docker/docker/client/volume_prune.go | 36 + .../docker/docker/client/volume_remove.go | 21 + .../docker/client/volume_remove_test.go | 47 + .../docker/docker/cmd/docker/daemon_none.go | 27 + .../docker/cmd/docker/daemon_none_test.go | 17 + .../docker/cmd/docker/daemon_unit_test.go | 30 + .../docker/docker/cmd/docker/daemon_unix.go | 79 + .../docker/docker/cmd/docker/docker.go | 180 + .../docker/docker/cmd/docker/docker_test.go | 32 + .../docker/cmd/docker/docker_windows.go | 18 + .../docker/docker/cmd/dockerd/README.md | 3 + .../docker/docker/cmd/dockerd/daemon.go | 524 + .../docker/cmd/dockerd/daemon_freebsd.go | 5 + .../docker/docker/cmd/dockerd/daemon_linux.go | 11 + .../docker/cmd/dockerd/daemon_solaris.go | 85 + .../docker/docker/cmd/dockerd/daemon_test.go | 145 + .../docker/docker/cmd/dockerd/daemon_unix.go | 137 + .../docker/cmd/dockerd/daemon_unix_test.go | 114 + .../docker/cmd/dockerd/daemon_windows.go | 92 + .../docker/docker/cmd/dockerd/docker.go | 110 + .../docker/cmd/dockerd/docker_windows.go | 18 + .../dockerd/hack/malformed_host_override.go | 121 + .../hack/malformed_host_override_test.go | 124 + .../docker/docker/cmd/dockerd/metrics.go | 27 + .../docker/cmd/dockerd/service_unsupported.go | 14 + .../docker/cmd/dockerd/service_windows.go | 426 + .../docker/docker/container/archive.go | 76 + .../docker/docker/container/container.go | 1103 + .../docker/container/container_linux.go | 9 + .../docker/container/container_notlinux.go | 23 + .../docker/container/container_unit_test.go | 60 + .../docker/docker/container/container_unix.go | 448 + .../docker/container/container_windows.go | 111 + .../docker/docker/container/health.go | 49 + .../docker/docker/container/history.go | 30 + .../docker/docker/container/memory_store.go | 95 + .../docker/container/memory_store_test.go | 106 + .../docker/docker/container/monitor.go | 46 + .../docker/docker/container/mounts_unix.go | 12 + .../docker/docker/container/mounts_windows.go | 8 + .../docker/docker/container/state.go | 343 + .../docker/docker/container/state_solaris.go | 7 + .../docker/docker/container/state_test.go | 113 + .../docker/docker/container/state_unix.go | 10 + .../docker/docker/container/state_windows.go | 7 + .../docker/docker/container/store.go | 28 + .../docker/docker/container/stream/streams.go | 143 + .../docker/docker/contrib/README.md | 4 + .../docker/docker/contrib/REVIEWERS | 1 + .../docker/docker/contrib/apparmor/main.go | 56 + .../docker/contrib/apparmor/template.go | 268 + .../contrib/builder/deb/aarch64/build.sh | 10 + .../contrib/builder/deb/aarch64/generate.sh | 118 + .../deb/aarch64/ubuntu-trusty/Dockerfile | 24 + .../deb/aarch64/ubuntu-xenial/Dockerfile | 22 + .../contrib/builder/deb/amd64/README.md | 5 + .../docker/contrib/builder/deb/amd64/build.sh | 10 + .../deb/amd64/debian-jessie/Dockerfile | 20 + .../deb/amd64/debian-stretch/Dockerfile | 20 + .../deb/amd64/debian-wheezy/Dockerfile | 22 + .../contrib/builder/deb/amd64/generate.sh | 149 + .../deb/amd64/ubuntu-precise/Dockerfile | 16 + .../deb/amd64/ubuntu-trusty/Dockerfile | 16 + .../deb/amd64/ubuntu-xenial/Dockerfile | 16 + .../deb/amd64/ubuntu-yakkety/Dockerfile | 16 + .../deb/armhf/debian-jessie/Dockerfile | 20 + .../contrib/builder/deb/armhf/generate.sh | 158 + .../deb/armhf/raspbian-jessie/Dockerfile | 22 + .../deb/armhf/ubuntu-trusty/Dockerfile | 16 + .../deb/armhf/ubuntu-xenial/Dockerfile | 16 + .../deb/armhf/ubuntu-yakkety/Dockerfile | 16 + .../contrib/builder/deb/ppc64le/build.sh | 10 + .../contrib/builder/deb/ppc64le/generate.sh | 103 + .../deb/ppc64le/ubuntu-trusty/Dockerfile | 16 + .../deb/ppc64le/ubuntu-xenial/Dockerfile | 16 + .../deb/ppc64le/ubuntu-yakkety/Dockerfile | 16 + .../docker/contrib/builder/deb/s390x/build.sh | 10 + .../contrib/builder/deb/s390x/generate.sh | 96 + .../deb/s390x/ubuntu-xenial/Dockerfile | 16 + .../contrib/builder/rpm/amd64/README.md | 5 + .../docker/contrib/builder/rpm/amd64/build.sh | 10 + .../builder/rpm/amd64/centos-7/Dockerfile | 19 + .../builder/rpm/amd64/fedora-24/Dockerfile | 19 + .../builder/rpm/amd64/fedora-25/Dockerfile | 19 + .../contrib/builder/rpm/amd64/generate.sh | 189 + .../rpm/amd64/opensuse-13.2/Dockerfile | 18 + .../rpm/amd64/oraclelinux-6/Dockerfile | 28 + .../rpm/amd64/oraclelinux-7/Dockerfile | 18 + .../builder/rpm/amd64/photon-1.0/Dockerfile | 18 + .../docker/docker/contrib/check-config.sh | 354 + .../docker/contrib/completion/REVIEWERS | 2 + .../docker/contrib/completion/bash/docker | 4282 ++++ .../contrib/completion/fish/docker.fish | 405 + .../contrib/completion/powershell/readme.txt | 1 + .../docker/contrib/completion/zsh/REVIEWERS | 2 + .../docker/contrib/completion/zsh/_docker | 2787 +++ .../contrib/desktop-integration/README.md | 11 + .../desktop-integration/chromium/Dockerfile | 36 + .../desktop-integration/gparted/Dockerfile | 31 + .../contrib/docker-device-tool/README.md | 14 + .../contrib/docker-device-tool/device_tool.go | 176 + .../docker-device-tool/device_tool_windows.go | 4 + .../docker/docker/contrib/dockerize-disk.sh | 118 + .../contrib/download-frozen-image-v1.sh | 108 + .../contrib/download-frozen-image-v2.sh | 121 + .../docker/docker/contrib/editorconfig | 13 + .../docker/docker/contrib/gitdm/aliases | 148 + .../docker/docker/contrib/gitdm/domain-map | 39 + .../docker/contrib/gitdm/generate_aliases.sh | 16 + .../docker/docker/contrib/gitdm/gitdm.config | 17 + .../docker/contrib/httpserver/Dockerfile | 4 + .../contrib/httpserver/Dockerfile.solaris | 4 + .../docker/contrib/httpserver/server.go | 12 + .../docker/contrib/init/openrc/docker.confd | 13 + .../docker/contrib/init/openrc/docker.initd | 22 + .../docker/contrib/init/systemd/REVIEWERS | 3 + .../contrib/init/systemd/docker.service | 29 + .../contrib/init/systemd/docker.service.rpm | 28 + .../docker/contrib/init/systemd/docker.socket | 12 + .../contrib/init/sysvinit-debian/docker | 152 + .../init/sysvinit-debian/docker.default | 20 + .../contrib/init/sysvinit-redhat/docker | 153 + .../init/sysvinit-redhat/docker.sysconfig | 7 + .../docker/contrib/init/upstart/REVIEWERS | 2 + .../docker/contrib/init/upstart/docker.conf | 72 + .../docker/contrib/mac-install-bundle.sh | 45 + .../docker/docker/contrib/mkimage-alpine.sh | 87 + .../docker/contrib/mkimage-arch-pacman.conf | 92 + .../docker/docker/contrib/mkimage-arch.sh | 126 + .../contrib/mkimage-archarm-pacman.conf | 98 + .../docker/docker/contrib/mkimage-busybox.sh | 43 + .../docker/docker/contrib/mkimage-crux.sh | 75 + .../docker/contrib/mkimage-debootstrap.sh | 297 + .../docker/docker/contrib/mkimage-pld.sh | 73 + .../docker/docker/contrib/mkimage-rinse.sh | 123 + .../docker/docker/contrib/mkimage-yum.sh | 136 + .../docker/docker/contrib/mkimage.sh | 128 + .../contrib/mkimage/.febootstrap-minimize | 28 + .../docker/contrib/mkimage/busybox-static | 34 + .../docker/docker/contrib/mkimage/debootstrap | 226 + .../docker/contrib/mkimage/mageia-urpmi | 61 + .../docker/docker/contrib/mkimage/rinse | 25 + .../docker/docker/contrib/mkimage/solaris | 89 + .../docker/docker/contrib/nnp-test/Dockerfile | 9 + .../docker/docker/contrib/nnp-test/nnp-test.c | 10 + .../docker/contrib/nuke-graph-directory.sh | 64 + .../docker/docker/contrib/project-stats.sh | 22 + .../docker/docker/contrib/report-issue.sh | 105 + .../docker/docker/contrib/reprepro/suites.sh | 12 + .../docker-engine-selinux/LICENSE | 339 + .../docker-engine-selinux/Makefile | 23 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 29 + .../docker-engine-selinux/docker.if | 523 + .../docker-engine-selinux/docker.te | 399 + .../docker-engine-selinux/LICENSE | 339 + .../docker-engine-selinux/Makefile | 23 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 33 + .../docker-engine-selinux/docker.if | 659 + .../docker-engine-selinux/docker.te | 465 + .../selinux/docker-engine-selinux/LICENSE | 340 + .../selinux/docker-engine-selinux/Makefile | 16 + .../selinux/docker-engine-selinux/docker.fc | 18 + .../selinux/docker-engine-selinux/docker.if | 461 + .../selinux/docker-engine-selinux/docker.te | 407 + .../docker-engine-selinux/docker_selinux.8.gz | Bin 0 -> 2847 bytes .../contrib/syntax/nano/Dockerfile.nanorc | 26 + .../docker/contrib/syntax/nano/README.md | 32 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Syntaxes/Dockerfile.tmLanguage | 143 + .../textmate/Docker.tmbundle/info.plist | 16 + .../docker/contrib/syntax/textmate/README.md | 17 + .../docker/contrib/syntax/textmate/REVIEWERS | 1 + .../docker/docker/contrib/syntax/vim/LICENSE | 22 + .../docker/contrib/syntax/vim/README.md | 26 + .../contrib/syntax/vim/doc/dockerfile.txt | 18 + .../syntax/vim/ftdetect/dockerfile.vim | 1 + .../contrib/syntax/vim/syntax/dockerfile.vim | 31 + .../docker/contrib/syscall-test/Dockerfile | 15 + .../docker/docker/contrib/syscall-test/acct.c | 16 + .../docker/contrib/syscall-test/exit32.s | 7 + .../docker/docker/contrib/syscall-test/ns.c | 63 + .../docker/docker/contrib/syscall-test/raw.c | 14 + .../docker/contrib/syscall-test/setgid.c | 11 + .../docker/contrib/syscall-test/setuid.c | 11 + .../docker/contrib/syscall-test/socket.c | 30 + .../docker/contrib/syscall-test/userns.c | 63 + .../docker/contrib/udev/80-docker.rules | 3 + .../docker/contrib/vagrant-docker/README.md | 50 + .../docker/docker/daemon/apparmor_default.go | 36 + .../daemon/apparmor_default_unsupported.go | 7 + .../docker/docker/daemon/archive.go | 436 + .../docker/docker/daemon/archive_unix.go | 58 + .../docker/docker/daemon/archive_windows.go | 18 + .../github.com/docker/docker/daemon/attach.go | 147 + .../github.com/docker/docker/daemon/auth.go | 13 + .../docker/docker/daemon/bindmount_solaris.go | 5 + .../docker/docker/daemon/bindmount_unix.go | 5 + .../github.com/docker/docker/daemon/cache.go | 254 + .../docker/docker/daemon/caps/utils_unix.go | 131 + .../docker/docker/daemon/changes.go | 31 + .../docker/docker/daemon/checkpoint.go | 110 + .../docker/docker/daemon/cluster.go | 12 + .../docker/docker/daemon/cluster/cluster.go | 1973 ++ .../daemon/cluster/convert/container.go | 235 + .../docker/daemon/cluster/convert/network.go | 210 + .../docker/daemon/cluster/convert/node.go | 89 + .../docker/daemon/cluster/convert/secret.go | 64 + .../docker/daemon/cluster/convert/service.go | 366 + .../docker/daemon/cluster/convert/swarm.go | 122 + .../docker/daemon/cluster/convert/task.go | 81 + .../docker/daemon/cluster/executor/backend.go | 61 + .../cluster/executor/container/adapter.go | 463 + .../cluster/executor/container/attachment.go | 81 + .../cluster/executor/container/container.go | 598 + .../cluster/executor/container/controller.go | 672 + .../cluster/executor/container/errors.go | 15 + .../cluster/executor/container/executor.go | 194 + .../cluster/executor/container/health_test.go | 102 + .../cluster/executor/container/validate.go | 39 + .../executor/container/validate_test.go | 141 + .../executor/container/validate_unix_test.go | 8 + .../container/validate_windows_test.go | 8 + .../docker/docker/daemon/cluster/filters.go | 116 + .../docker/docker/daemon/cluster/helpers.go | 108 + .../docker/daemon/cluster/listen_addr.go | 278 + .../daemon/cluster/listen_addr_linux.go | 91 + .../daemon/cluster/listen_addr_others.go | 9 + .../daemon/cluster/listen_addr_solaris.go | 57 + .../docker/daemon/cluster/provider/network.go | 37 + .../docker/docker/daemon/cluster/secrets.go | 133 + .../github.com/docker/docker/daemon/commit.go | 271 + .../github.com/docker/docker/daemon/config.go | 525 + .../docker/daemon/config_common_unix.go | 90 + .../docker/daemon/config_experimental.go | 8 + .../docker/docker/daemon/config_solaris.go | 47 + .../docker/docker/daemon/config_test.go | 229 + .../docker/docker/daemon/config_unix.go | 104 + .../docker/docker/daemon/config_unix_test.go | 80 + .../docker/docker/daemon/config_windows.go | 71 + .../docker/daemon/config_windows_test.go | 59 + .../docker/docker/daemon/container.go | 282 + .../docker/daemon/container_operations.go | 1049 + .../daemon/container_operations_solaris.go | 46 + .../daemon/container_operations_unix.go | 281 + .../daemon/container_operations_windows.go | 59 + .../github.com/docker/docker/daemon/create.go | 290 + .../docker/docker/daemon/create_unix.go | 81 + .../docker/docker/daemon/create_windows.go | 80 + .../github.com/docker/docker/daemon/daemon.go | 1321 ++ .../docker/daemon/daemon_experimental.go | 7 + .../docker/docker/daemon/daemon_linux.go | 80 + .../docker/docker/daemon/daemon_linux_test.go | 104 + .../docker/docker/daemon/daemon_solaris.go | 523 + .../docker/docker/daemon/daemon_test.go | 627 + .../docker/docker/daemon/daemon_unix.go | 1237 ++ .../docker/docker/daemon/daemon_unix_test.go | 283 + .../docker/daemon/daemon_unsupported.go | 5 + .../docker/docker/daemon/daemon_windows.go | 604 + .../docker/docker/daemon/debugtrap.go | 62 + .../docker/docker/daemon/debugtrap_unix.go | 33 + .../docker/daemon/debugtrap_unsupported.go | 7 + .../docker/docker/daemon/debugtrap_windows.go | 52 + .../github.com/docker/docker/daemon/delete.go | 168 + .../docker/docker/daemon/delete_test.go | 43 + .../docker/docker/daemon/discovery.go | 215 + .../docker/docker/daemon/discovery_test.go | 164 + .../docker/docker/daemon/disk_usage.go | 100 + .../github.com/docker/docker/daemon/errors.go | 57 + .../github.com/docker/docker/daemon/events.go | 132 + .../docker/docker/daemon/events/events.go | 158 + .../docker/daemon/events/events_test.go | 275 + .../docker/docker/daemon/events/filter.go | 110 + .../docker/docker/daemon/events/metrics.go | 15 + .../daemon/events/testutils/testutils.go | 76 + .../docker/docker/daemon/events_test.go | 94 + .../github.com/docker/docker/daemon/exec.go | 280 + .../docker/docker/daemon/exec/exec.go | 118 + .../docker/docker/daemon/exec_linux.go | 27 + .../docker/docker/daemon/exec_solaris.go | 11 + .../docker/docker/daemon/exec_windows.go | 14 + .../github.com/docker/docker/daemon/export.go | 60 + .../docker/docker/daemon/getsize_unix.go | 41 + .../docker/daemon/graphdriver/aufs/aufs.go | 669 + .../daemon/graphdriver/aufs/aufs_test.go | 802 + .../docker/daemon/graphdriver/aufs/dirs.go | 64 + .../docker/daemon/graphdriver/aufs/mount.go | 21 + .../daemon/graphdriver/aufs/mount_linux.go | 7 + .../graphdriver/aufs/mount_unsupported.go | 12 + .../docker/daemon/graphdriver/btrfs/btrfs.go | 530 + .../daemon/graphdriver/btrfs/btrfs_test.go | 63 + .../graphdriver/btrfs/dummy_unsupported.go | 3 + .../daemon/graphdriver/btrfs/version.go | 26 + .../daemon/graphdriver/btrfs/version_none.go | 14 + .../daemon/graphdriver/btrfs/version_test.go | 13 + .../docker/daemon/graphdriver/counter.go | 67 + .../daemon/graphdriver/devmapper/README.md | 96 + .../daemon/graphdriver/devmapper/deviceset.go | 2727 +++ .../graphdriver/devmapper/devmapper_doc.go | 106 + .../graphdriver/devmapper/devmapper_test.go | 110 + .../daemon/graphdriver/devmapper/driver.go | 231 + .../daemon/graphdriver/devmapper/mount.go | 89 + .../docker/daemon/graphdriver/driver.go | 270 + .../daemon/graphdriver/driver_freebsd.go | 19 + .../docker/daemon/graphdriver/driver_linux.go | 135 + .../daemon/graphdriver/driver_solaris.go | 97 + .../daemon/graphdriver/driver_unsupported.go | 15 + .../daemon/graphdriver/driver_windows.go | 14 + .../docker/daemon/graphdriver/fsdiff.go | 169 + .../graphdriver/graphtest/graphbench_unix.go | 259 + .../graphdriver/graphtest/graphtest_unix.go | 358 + .../graphtest/graphtest_windows.go | 1 + .../daemon/graphdriver/graphtest/testutil.go | 342 + .../graphdriver/graphtest/testutil_unix.go | 143 + .../docker/daemon/graphdriver/overlay/copy.go | 174 + .../daemon/graphdriver/overlay/overlay.go | 462 + .../graphdriver/overlay/overlay_test.go | 93 + .../overlay/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/check.go | 79 + .../daemon/graphdriver/overlay2/mount.go | 88 + .../daemon/graphdriver/overlay2/overlay.go | 662 + .../graphdriver/overlay2/overlay_test.go | 121 + .../overlay2/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/randomid.go | 80 + .../graphdriver/overlayutils/overlayutils.go | 18 + .../docker/daemon/graphdriver/plugin.go | 43 + .../docker/docker/daemon/graphdriver/proxy.go | 252 + .../daemon/graphdriver/quota/projectquota.go | 339 + .../graphdriver/register/register_aufs.go | 8 + .../graphdriver/register/register_btrfs.go | 8 + .../register/register_devicemapper.go | 8 + .../graphdriver/register/register_overlay.go | 9 + .../graphdriver/register/register_vfs.go | 6 + .../graphdriver/register/register_windows.go | 6 + .../graphdriver/register/register_zfs.go | 8 + .../docker/daemon/graphdriver/vfs/driver.go | 145 + .../docker/daemon/graphdriver/vfs/vfs_test.go | 37 + .../daemon/graphdriver/windows/windows.go | 886 + .../docker/daemon/graphdriver/zfs/MAINTAINERS | 2 + .../docker/daemon/graphdriver/zfs/zfs.go | 417 + .../daemon/graphdriver/zfs/zfs_freebsd.go | 38 + .../daemon/graphdriver/zfs/zfs_linux.go | 27 + .../daemon/graphdriver/zfs/zfs_solaris.go | 59 + .../docker/daemon/graphdriver/zfs/zfs_test.go | 35 + .../daemon/graphdriver/zfs/zfs_unsupported.go | 11 + .../github.com/docker/docker/daemon/health.go | 341 + .../docker/docker/daemon/health_test.go | 118 + .../github.com/docker/docker/daemon/image.go | 76 + .../docker/docker/daemon/image_delete.go | 412 + .../docker/docker/daemon/image_exporter.go | 25 + .../docker/docker/daemon/image_history.go | 84 + .../docker/docker/daemon/image_inspect.go | 82 + .../docker/docker/daemon/image_pull.go | 149 + .../docker/docker/daemon/image_push.go | 63 + .../docker/docker/daemon/image_tag.go | 37 + .../github.com/docker/docker/daemon/images.go | 331 + .../github.com/docker/docker/daemon/import.go | 135 + .../github.com/docker/docker/daemon/info.go | 180 + .../docker/docker/daemon/info_unix.go | 82 + .../docker/docker/daemon/info_windows.go | 10 + .../docker/daemon/initlayer/setup_solaris.go | 13 + .../docker/daemon/initlayer/setup_unix.go | 69 + .../docker/daemon/initlayer/setup_windows.go | 13 + .../docker/docker/daemon/inspect.go | 264 + .../docker/docker/daemon/inspect_solaris.go | 41 + .../docker/docker/daemon/inspect_unix.go | 92 + .../docker/docker/daemon/inspect_windows.go | 41 + .../github.com/docker/docker/daemon/keys.go | 59 + .../docker/docker/daemon/keys_unsupported.go | 8 + .../github.com/docker/docker/daemon/kill.go | 164 + .../github.com/docker/docker/daemon/links.go | 87 + .../docker/docker/daemon/links/links.go | 141 + .../docker/docker/daemon/links/links_test.go | 213 + .../docker/docker/daemon/links_linux.go | 72 + .../docker/docker/daemon/links_linux_test.go | 98 + .../docker/docker/daemon/links_notlinux.go | 10 + .../github.com/docker/docker/daemon/list.go | 660 + .../docker/docker/daemon/list_unix.go | 11 + .../docker/docker/daemon/list_windows.go | 20 + .../docker/docker/daemon/logdrivers_linux.go | 15 + .../docker/daemon/logdrivers_windows.go | 13 + .../daemon/logger/awslogs/cloudwatchlogs.go | 404 + .../logger/awslogs/cloudwatchlogs_test.go | 724 + .../logger/awslogs/cwlogsiface_mock_test.go | 77 + .../docker/docker/daemon/logger/context.go | 111 + .../docker/docker/daemon/logger/copier.go | 131 + .../docker/daemon/logger/copier_test.go | 296 + .../daemon/logger/etwlogs/etwlogs_windows.go | 170 + .../docker/docker/daemon/logger/factory.go | 104 + .../docker/daemon/logger/fluentd/fluentd.go | 246 + .../daemon/logger/gcplogs/gcplogging.go | 200 + .../docker/docker/daemon/logger/gelf/gelf.go | 209 + .../daemon/logger/gelf/gelf_unsupported.go | 3 + .../docker/daemon/logger/journald/journald.go | 122 + .../daemon/logger/journald/journald_test.go | 23 + .../logger/journald/journald_unsupported.go | 6 + .../docker/daemon/logger/journald/read.go | 401 + .../daemon/logger/journald/read_native.go | 6 + .../logger/journald/read_native_compat.go | 6 + .../logger/journald/read_unsupported.go | 7 + .../daemon/logger/jsonfilelog/jsonfilelog.go | 151 + .../logger/jsonfilelog/jsonfilelog_test.go | 248 + .../docker/daemon/logger/jsonfilelog/read.go | 319 + .../daemon/logger/logentries/logentries.go | 94 + .../docker/docker/daemon/logger/logger.go | 134 + .../docker/daemon/logger/logger_test.go | 26 + .../daemon/logger/loggerutils/log_tag.go | 31 + .../daemon/logger/loggerutils/log_tag_test.go | 47 + .../logger/loggerutils/rotatefilewriter.go | 124 + .../docker/daemon/logger/splunk/splunk.go | 621 + .../daemon/logger/splunk/splunk_test.go | 1302 ++ .../logger/splunk/splunkhecmock_test.go | 157 + .../docker/daemon/logger/syslog/syslog.go | 262 + .../daemon/logger/syslog/syslog_test.go | 62 + .../github.com/docker/docker/daemon/logs.go | 142 + .../docker/docker/daemon/logs_test.go | 15 + .../docker/docker/daemon/metrics.go | 42 + .../docker/docker/daemon/monitor.go | 132 + .../docker/docker/daemon/monitor_linux.go | 19 + .../docker/docker/daemon/monitor_solaris.go | 18 + .../docker/docker/daemon/monitor_windows.go | 46 + .../github.com/docker/docker/daemon/mounts.go | 48 + .../github.com/docker/docker/daemon/names.go | 116 + .../docker/docker/daemon/network.go | 498 + .../docker/docker/daemon/network/settings.go | 33 + .../docker/docker/daemon/oci_linux.go | 790 + .../docker/docker/daemon/oci_solaris.go | 188 + .../docker/docker/daemon/oci_windows.go | 122 + .../github.com/docker/docker/daemon/pause.go | 49 + .../github.com/docker/docker/daemon/prune.go | 236 + .../github.com/docker/docker/daemon/rename.go | 122 + .../github.com/docker/docker/daemon/resize.go | 40 + .../docker/docker/daemon/restart.go | 70 + .../github.com/docker/docker/daemon/search.go | 94 + .../docker/docker/daemon/search_test.go | 358 + .../docker/docker/daemon/seccomp_disabled.go | 19 + .../docker/docker/daemon/seccomp_linux.go | 55 + .../docker/daemon/seccomp_unsupported.go | 5 + .../docker/docker/daemon/secrets.go | 36 + .../docker/docker/daemon/secrets_linux.go | 7 + .../docker/daemon/secrets_unsupported.go | 7 + .../docker/docker/daemon/selinux_linux.go | 17 + .../docker/daemon/selinux_unsupported.go | 13 + .../github.com/docker/docker/daemon/start.go | 230 + .../docker/docker/daemon/start_unix.go | 31 + .../docker/docker/daemon/start_windows.go | 205 + .../github.com/docker/docker/daemon/stats.go | 158 + .../docker/docker/daemon/stats_collector.go | 132 + .../docker/daemon/stats_collector_solaris.go | 34 + .../docker/daemon/stats_collector_unix.go | 71 + .../docker/daemon/stats_collector_windows.go | 15 + .../docker/docker/daemon/stats_unix.go | 58 + .../docker/docker/daemon/stats_windows.go | 11 + .../github.com/docker/docker/daemon/stop.go | 83 + .../docker/docker/daemon/top_unix.go | 126 + .../docker/docker/daemon/top_unix_test.go | 76 + .../docker/docker/daemon/top_windows.go | 53 + .../docker/docker/daemon/unpause.go | 38 + .../github.com/docker/docker/daemon/update.go | 92 + .../docker/docker/daemon/update_linux.go | 25 + .../docker/docker/daemon/update_solaris.go | 11 + .../docker/docker/daemon/update_windows.go | 13 + .../docker/docker/daemon/volumes.go | 303 + .../docker/docker/daemon/volumes_unit_test.go | 39 + .../docker/docker/daemon/volumes_unix.go | 219 + .../docker/docker/daemon/volumes_windows.go | 47 + .../github.com/docker/docker/daemon/wait.go | 32 + .../docker/docker/daemon/workdir.go | 21 + .../docker/docker/distribution/config.go | 241 + .../docker/docker/distribution/errors.go | 159 + .../fixtures/validate_manifest/bad_manifest | 38 + .../validate_manifest/extra_data_manifest | 46 + .../fixtures/validate_manifest/good_manifest | 38 + .../docker/distribution/metadata/metadata.go | 75 + .../distribution/metadata/v1_id_service.go | 51 + .../metadata/v1_id_service_test.go | 83 + .../metadata/v2_metadata_service.go | 241 + .../metadata/v2_metadata_service_test.go | 115 + .../docker/docker/distribution/pull.go | 200 + .../docker/docker/distribution/pull_v1.go | 368 + .../docker/docker/distribution/pull_v2.go | 878 + .../docker/distribution/pull_v2_test.go | 183 + .../docker/distribution/pull_v2_unix.go | 13 + .../docker/distribution/pull_v2_windows.go | 49 + .../docker/docker/distribution/push.go | 186 + .../docker/docker/distribution/push_v1.go | 463 + .../docker/docker/distribution/push_v2.go | 697 + .../docker/distribution/push_v2_test.go | 579 + .../docker/docker/distribution/registry.go | 156 + .../docker/distribution/registry_unit_test.go | 136 + .../docker/distribution/utils/progress.go | 44 + .../docker/distribution/xfer/download.go | 452 + .../docker/distribution/xfer/download_test.go | 356 + .../docker/distribution/xfer/transfer.go | 401 + .../docker/distribution/xfer/transfer_test.go | 410 + .../docker/docker/distribution/xfer/upload.go | 168 + .../docker/distribution/xfer/upload_test.go | 134 + .../docker/docker/dockerversion/useragent.go | 74 + .../docker/dockerversion/version_lib.go | 16 + .../github.com/docker/docker/docs/README.md | 30 + .../docker/docker/docs/api/v1.18.md | 2156 ++ .../docker/docker/docs/api/v1.19.md | 2238 ++ .../docker/docker/docs/api/v1.20.md | 2391 +++ .../docker/docker/docs/api/v1.21.md | 2969 +++ .../docker/docker/docs/api/v1.22.md | 3307 +++ .../docker/docker/docs/api/v1.23.md | 3424 ++++ .../docker/docker/docs/api/v1.24.md | 5316 +++++ .../docker/docker/docs/api/version-history.md | 249 + .../docker/docker/docs/deprecated.md | 286 + .../docker/docker/docs/extend/EBS_volume.md | 164 + .../docker/docker/docs/extend/config.md | 225 + .../extend/images/authz_additional_info.png | Bin 0 -> 45916 bytes .../docker/docs/extend/images/authz_allow.png | Bin 0 -> 33505 bytes .../docs/extend/images/authz_chunked.png | Bin 0 -> 33168 bytes .../extend/images/authz_connection_hijack.png | Bin 0 -> 38780 bytes .../docker/docs/extend/images/authz_deny.png | Bin 0 -> 27099 bytes .../docker/docker/docs/extend/index.md | 222 + .../docker/docs/extend/legacy_plugins.md | 98 + .../docker/docker/docs/extend/plugin_api.md | 196 + .../docs/extend/plugins_authorization.md | 260 + .../docker/docs/extend/plugins_graphdriver.md | 376 + .../docker/docs/extend/plugins_network.md | 77 + .../docker/docs/extend/plugins_volume.md | 276 + .../docker/docker/docs/reference/builder.md | 1746 ++ .../docs/reference/commandline/attach.md | 131 + .../docs/reference/commandline/build.md | 451 + .../docker/docs/reference/commandline/cli.md | 249 + .../docs/reference/commandline/commit.md | 93 + .../reference/commandline/container_prune.md | 47 + .../docker/docs/reference/commandline/cp.md | 112 + .../docs/reference/commandline/create.md | 211 + .../docs/reference/commandline/deploy.md | 101 + .../docker/docs/reference/commandline/diff.md | 48 + .../reference/commandline/docker_images.gif | Bin 0 -> 35785 bytes .../docs/reference/commandline/dockerd.md | 1364 ++ .../docs/reference/commandline/events.md | 217 + .../docker/docs/reference/commandline/exec.md | 65 + .../docs/reference/commandline/export.md | 43 + .../docs/reference/commandline/history.md | 48 + .../docs/reference/commandline/image_prune.md | 71 + .../docs/reference/commandline/images.md | 304 + .../docs/reference/commandline/import.md | 75 + .../docs/reference/commandline/index.md | 178 + .../docker/docs/reference/commandline/info.md | 224 + .../docs/reference/commandline/inspect.md | 102 + .../docker/docs/reference/commandline/kill.md | 34 + .../docker/docs/reference/commandline/load.md | 53 + .../docs/reference/commandline/login.md | 122 + .../docs/reference/commandline/logout.md | 30 + .../docker/docs/reference/commandline/logs.md | 66 + .../docker/docs/reference/commandline/menu.md | 28 + .../reference/commandline/network_connect.md | 100 + .../reference/commandline/network_create.md | 202 + .../commandline/network_disconnect.md | 43 + .../reference/commandline/network_inspect.md | 192 + .../docs/reference/commandline/network_ls.md | 218 + .../reference/commandline/network_prune.md | 45 + .../docs/reference/commandline/network_rm.md | 59 + .../docs/reference/commandline/node_demote.md | 42 + .../reference/commandline/node_inspect.md | 137 + .../docs/reference/commandline/node_ls.md | 130 + .../reference/commandline/node_promote.md | 41 + .../docs/reference/commandline/node_ps.md | 107 + .../docs/reference/commandline/node_rm.md | 73 + .../docs/reference/commandline/node_update.md | 71 + .../docs/reference/commandline/pause.md | 40 + .../reference/commandline/plugin_create.md | 60 + .../reference/commandline/plugin_disable.md | 66 + .../reference/commandline/plugin_enable.md | 65 + .../reference/commandline/plugin_inspect.md | 164 + .../reference/commandline/plugin_install.md | 71 + .../docs/reference/commandline/plugin_ls.md | 53 + .../docs/reference/commandline/plugin_push.md | 50 + .../docs/reference/commandline/plugin_rm.md | 56 + .../docs/reference/commandline/plugin_set.md | 99 + .../reference/commandline/plugin_upgrade.md | 84 + .../docker/docs/reference/commandline/port.md | 41 + .../docker/docs/reference/commandline/ps.md | 384 + .../docker/docs/reference/commandline/pull.md | 252 + .../docker/docs/reference/commandline/push.md | 75 + .../docs/reference/commandline/rename.md | 27 + .../docs/reference/commandline/restart.md | 26 + .../docker/docs/reference/commandline/rm.md | 69 + .../docker/docs/reference/commandline/rmi.md | 83 + .../docker/docs/reference/commandline/run.md | 732 + .../docker/docs/reference/commandline/save.md | 45 + .../docs/reference/commandline/search.md | 134 + .../reference/commandline/secret_create.md | 90 + .../reference/commandline/secret_inspect.md | 85 + .../docs/reference/commandline/secret_ls.md | 43 + .../docs/reference/commandline/secret_rm.md | 48 + .../reference/commandline/service_create.md | 556 + .../reference/commandline/service_inspect.md | 162 + .../reference/commandline/service_logs.md | 77 + .../docs/reference/commandline/service_ls.md | 114 + .../docs/reference/commandline/service_ps.md | 161 + .../docs/reference/commandline/service_rm.md | 55 + .../reference/commandline/service_scale.md | 96 + .../reference/commandline/service_update.md | 181 + .../reference/commandline/stack_deploy.md | 98 + .../docs/reference/commandline/stack_ls.md | 47 + .../docs/reference/commandline/stack_ps.md | 51 + .../docs/reference/commandline/stack_rm.md | 38 + .../reference/commandline/stack_services.md | 70 + .../docs/reference/commandline/start.md | 28 + .../docs/reference/commandline/stats.md | 117 + .../docker/docs/reference/commandline/stop.md | 29 + .../docs/reference/commandline/swarm_init.md | 142 + .../docs/reference/commandline/swarm_join.md | 102 + .../reference/commandline/swarm_join_token.md | 105 + .../docs/reference/commandline/swarm_leave.md | 58 + .../reference/commandline/swarm_unlock.md | 41 + .../reference/commandline/swarm_unlock_key.md | 84 + .../reference/commandline/swarm_update.md | 45 + .../docs/reference/commandline/system_df.md | 76 + .../reference/commandline/system_prune.md | 79 + .../docker/docs/reference/commandline/tag.md | 74 + .../docker/docs/reference/commandline/top.md | 25 + .../docs/reference/commandline/unpause.md | 36 + .../docs/reference/commandline/update.md | 120 + .../docs/reference/commandline/version.md | 67 + .../reference/commandline/volume_create.md | 91 + .../reference/commandline/volume_inspect.md | 59 + .../docs/reference/commandline/volume_ls.md | 183 + .../reference/commandline/volume_prune.md | 54 + .../docs/reference/commandline/volume_rm.md | 42 + .../docker/docs/reference/commandline/wait.md | 25 + .../docker/docker/docs/reference/glossary.md | 286 + .../docker/docker/docs/reference/index.md | 21 + .../docker/docker/docs/reference/run.md | 1555 ++ .../docker/docs/static_files/contributors.png | Bin 0 -> 23100 bytes .../static_files/docker-logo-compressed.png | Bin 0 -> 4972 bytes .../docker/docker/experimental/README.md | 44 + .../docker/experimental/checkpoint-restore.md | 88 + .../experimental/docker-stacks-and-bundles.md | 202 + .../experimental/images/ipvlan-l3.gliffy | 1 + .../docker/experimental/images/ipvlan-l3.png | Bin 0 -> 18260 bytes .../docker/experimental/images/ipvlan-l3.svg | 1 + .../images/ipvlan_l2_simple.gliffy | 1 + .../experimental/images/ipvlan_l2_simple.png | Bin 0 -> 20145 bytes .../experimental/images/ipvlan_l2_simple.svg | 1 + .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 + .../images/macvlan-bridge-ipvlan-l2.png | Bin 0 -> 14527 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 + .../images/multi_tenant_8021q_vlans.gliffy | 1 + .../images/multi_tenant_8021q_vlans.png | Bin 0 -> 17879 bytes .../images/multi_tenant_8021q_vlans.svg | 1 + .../images/vlans-deeper-look.gliffy | 1 + .../experimental/images/vlans-deeper-look.png | Bin 0 -> 38837 bytes .../experimental/images/vlans-deeper-look.svg | 1 + .../docker/experimental/vlan-networks.md | 471 + .../docker/hack/Jenkins/W2L/postbuild.sh | 35 + .../docker/docker/hack/Jenkins/W2L/setup.sh | 309 + .../docker/docker/hack/Jenkins/readme.md | 3 + vendor/github.com/docker/docker/hack/dind | 33 + .../docker/hack/dockerfile/binaries-commits | 11 + .../hack/dockerfile/install-binaries.sh | 123 + .../docker/docker/hack/generate-authors.sh | 15 + .../docker/hack/generate-swagger-api.sh | 22 + .../github.com/docker/docker/hack/install.sh | 484 + vendor/github.com/docker/docker/hack/make.ps1 | 408 + vendor/github.com/docker/docker/hack/make.sh | 304 + .../docker/docker/hack/make/.binary | 48 + .../docker/docker/hack/make/.binary-setup | 10 + .../docker/docker/hack/make/.build-deb/compat | 1 + .../docker/hack/make/.build-deb/control | 29 + .../.build-deb/docker-engine.bash-completion | 1 + .../.build-deb/docker-engine.docker.default | 1 + .../make/.build-deb/docker-engine.docker.init | 1 + .../.build-deb/docker-engine.docker.upstart | 1 + .../make/.build-deb/docker-engine.install | 12 + .../make/.build-deb/docker-engine.manpages | 1 + .../make/.build-deb/docker-engine.postinst | 20 + .../hack/make/.build-deb/docker-engine.udev | 1 + .../docker/docker/hack/make/.build-deb/docs | 1 + .../docker/docker/hack/make/.build-deb/rules | 55 + .../.build-rpm/docker-engine-selinux.spec | 96 + .../hack/make/.build-rpm/docker-engine.spec | 254 + .../docker/hack/make/.detect-daemon-osarch | 69 + .../docker/docker/hack/make/.ensure-emptyfs | 23 + .../docker/docker/hack/make/.go-autogen | 86 + .../docker/docker/hack/make/.go-autogen.ps1 | 91 + .../hack/make/.integration-daemon-setup | 7 + .../hack/make/.integration-daemon-start | 116 + .../docker/hack/make/.integration-daemon-stop | 27 + .../hack/make/.integration-test-helpers | 79 + .../hack/make/.resources-windows/common.rc | 38 + .../.resources-windows/docker.exe.manifest | 18 + .../hack/make/.resources-windows/docker.ico | Bin 0 -> 370070 bytes .../hack/make/.resources-windows/docker.png | Bin 0 -> 658195 bytes .../hack/make/.resources-windows/docker.rc | 3 + .../hack/make/.resources-windows/dockerd.rc | 4 + .../make/.resources-windows/event_messages.mc | 39 + .../hack/make/.resources-windows/resources.go | 18 + .../docker/docker/hack/make/README.md | 17 + .../github.com/docker/docker/hack/make/binary | 15 + .../docker/docker/hack/make/binary-client | 12 + .../docker/docker/hack/make/binary-daemon | 13 + .../docker/docker/hack/make/build-deb | 91 + .../hack/make/build-integration-test-binary | 11 + .../docker/docker/hack/make/build-rpm | 148 + .../docker/docker/hack/make/clean-apt-repo | 43 + .../docker/docker/hack/make/clean-yum-repo | 20 + .../github.com/docker/docker/hack/make/cover | 15 + .../github.com/docker/docker/hack/make/cross | 46 + .../docker/docker/hack/make/dynbinary | 15 + .../docker/docker/hack/make/dynbinary-client | 12 + .../docker/docker/hack/make/dynbinary-daemon | 12 + .../docker/hack/make/generate-index-listing | 74 + .../docker/docker/hack/make/install-binary | 12 + .../docker/hack/make/install-binary-client | 10 + .../docker/hack/make/install-binary-daemon | 16 + .../docker/docker/hack/make/install-script | 63 + .../docker/docker/hack/make/release-deb | 163 + .../docker/docker/hack/make/release-rpm | 71 + vendor/github.com/docker/docker/hack/make/run | 44 + .../docker/docker/hack/make/sign-repos | 65 + .../docker/docker/hack/make/test-deb-install | 71 + .../docker/docker/hack/make/test-docker-py | 20 + .../docker/hack/make/test-install-script | 31 + .../docker/hack/make/test-integration-cli | 28 + .../docker/hack/make/test-integration-shell | 7 + .../docker/docker/hack/make/test-old-apt-repo | 29 + .../docker/docker/hack/make/test-unit | 55 + vendor/github.com/docker/docker/hack/make/tgz | 92 + .../github.com/docker/docker/hack/make/ubuntu | 190 + .../docker/docker/hack/make/update-apt-repo | 70 + vendor/github.com/docker/docker/hack/make/win | 20 + .../github.com/docker/docker/hack/release.sh | 325 + .../docker/hack/validate/.swagger-yamllint | 4 + .../docker/docker/hack/validate/.validate | 30 + .../docker/docker/hack/validate/all | 8 + .../docker/hack/validate/compose-bindata | 28 + .../docker/docker/hack/validate/dco | 55 + .../docker/docker/hack/validate/default | 16 + .../docker/hack/validate/default-seccomp | 28 + .../docker/docker/hack/validate/gofmt | 33 + .../docker/docker/hack/validate/lint | 31 + .../docker/docker/hack/validate/pkg-imports | 33 + .../docker/docker/hack/validate/swagger | 13 + .../docker/docker/hack/validate/swagger-gen | 29 + .../docker/docker/hack/validate/test-imports | 38 + .../docker/docker/hack/validate/toml | 31 + .../docker/docker/hack/validate/vendor | 30 + .../docker/docker/hack/validate/vet | 32 + .../github.com/docker/docker/hack/vendor.sh | 15 + vendor/github.com/docker/docker/image/fs.go | 173 + .../github.com/docker/docker/image/fs_test.go | 384 + .../github.com/docker/docker/image/image.go | 150 + .../docker/docker/image/image_test.go | 59 + .../github.com/docker/docker/image/rootfs.go | 44 + .../docker/docker/image/spec/v1.1.md | 637 + .../docker/docker/image/spec/v1.2.md | 696 + .../github.com/docker/docker/image/spec/v1.md | 573 + .../github.com/docker/docker/image/store.go | 295 + .../docker/docker/image/store_test.go | 300 + .../docker/docker/image/tarexport/load.go | 390 + .../docker/docker/image/tarexport/save.go | 355 + .../docker/image/tarexport/tarexport.go | 47 + .../docker/docker/image/v1/imagev1.go | 156 + .../docker/docker/image/v1/imagev1_test.go | 55 + .../docker/integration-cli/benchmark_test.go | 95 + .../docker/integration-cli/check_test.go | 383 + .../docker/docker/integration-cli/daemon.go | 608 + .../docker/integration-cli/daemon_swarm.go | 419 + .../integration-cli/daemon_swarm_hack.go | 20 + .../docker/integration-cli/daemon_unix.go | 35 + .../docker/integration-cli/daemon_windows.go | 53 + .../integration-cli/docker_api_attach_test.go | 210 + .../integration-cli/docker_api_auth_test.go | 25 + .../integration-cli/docker_api_build_test.go | 254 + .../docker_api_containers_test.go | 1961 ++ .../integration-cli/docker_api_create_test.go | 84 + .../integration-cli/docker_api_events_test.go | 73 + .../docker_api_exec_resize_test.go | 103 + .../integration-cli/docker_api_exec_test.go | 198 + .../integration-cli/docker_api_images_test.go | 165 + .../integration-cli/docker_api_info_test.go | 53 + .../docker_api_inspect_test.go | 183 + .../docker_api_inspect_unix_test.go | 35 + .../integration-cli/docker_api_logs_test.go | 87 + .../docker_api_network_test.go | 353 + .../integration-cli/docker_api_resize_test.go | 44 + .../docker_api_service_update_test.go | 39 + .../integration-cli/docker_api_stats_test.go | 310 + .../docker_api_stats_unix_test.go | 41 + .../integration-cli/docker_api_swarm_test.go | 1367 ++ .../docker/integration-cli/docker_api_test.go | 118 + .../docker_api_update_unix_test.go | 35 + .../docker_api_version_test.go | 23 + .../docker_api_volumes_test.go | 89 + .../integration-cli/docker_cli_attach_test.go | 168 + .../docker_cli_attach_unix_test.go | 237 + .../docker_cli_authz_plugin_v2_test.go | 133 + .../docker_cli_authz_unix_test.go | 477 + .../integration-cli/docker_cli_build_test.go | 7392 +++++++ .../docker_cli_build_unix_test.go | 207 + .../docker_cli_by_digest_test.go | 693 + .../integration-cli/docker_cli_commit_test.go | 157 + .../integration-cli/docker_cli_config_test.go | 140 + .../docker_cli_cp_from_container_test.go | 488 + .../integration-cli/docker_cli_cp_test.go | 660 + .../docker_cli_cp_to_container_test.go | 599 + .../docker_cli_cp_to_container_unix_test.go | 39 + .../integration-cli/docker_cli_cp_utils.go | 303 + .../integration-cli/docker_cli_create_test.go | 513 + .../docker_cli_daemon_plugins_test.go | 317 + .../integration-cli/docker_cli_daemon_test.go | 2988 +++ .../integration-cli/docker_cli_diff_test.go | 98 + .../integration-cli/docker_cli_events_test.go | 794 + .../docker_cli_events_unix_test.go | 486 + .../integration-cli/docker_cli_exec_test.go | 601 + .../docker_cli_exec_unix_test.go | 93 + .../docker_cli_experimental_test.go | 36 + .../docker_cli_export_import_test.go | 49 + ...cker_cli_external_graphdriver_unix_test.go | 405 + ...er_cli_external_volume_driver_unix_test.go | 627 + .../integration-cli/docker_cli_health_test.go | 169 + .../integration-cli/docker_cli_help_test.go | 321 + .../docker_cli_history_test.go | 121 + .../integration-cli/docker_cli_images_test.go | 364 + .../integration-cli/docker_cli_import_test.go | 150 + .../integration-cli/docker_cli_info_test.go | 234 + .../docker_cli_info_unix_test.go | 15 + .../docker_cli_inspect_test.go | 466 + .../integration-cli/docker_cli_kill_test.go | 134 + .../integration-cli/docker_cli_links_test.go | 240 + .../docker_cli_links_unix_test.go | 26 + .../integration-cli/docker_cli_login_test.go | 44 + .../integration-cli/docker_cli_logout_test.go | 100 + .../docker_cli_logs_bench_test.go | 32 + .../integration-cli/docker_cli_logs_test.go | 328 + .../integration-cli/docker_cli_nat_test.go | 93 + .../docker_cli_netmode_test.go | 94 + .../docker_cli_network_unix_test.go | 1791 ++ .../docker_cli_oom_killed_test.go | 30 + .../integration-cli/docker_cli_pause_test.go | 66 + .../docker_cli_plugins_test.go | 393 + .../integration-cli/docker_cli_port_test.go | 319 + .../integration-cli/docker_cli_proxy_test.go | 53 + .../docker_cli_prune_unix_test.go | 91 + .../integration-cli/docker_cli_ps_test.go | 952 + .../docker_cli_pull_local_test.go | 492 + .../integration-cli/docker_cli_pull_test.go | 274 + .../docker_cli_pull_trusted_test.go | 365 + .../integration-cli/docker_cli_push_test.go | 715 + .../docker_cli_registry_user_agent_test.go | 120 + .../integration-cli/docker_cli_rename_test.go | 138 + .../docker_cli_restart_test.go | 278 + .../integration-cli/docker_cli_rm_test.go | 86 + .../integration-cli/docker_cli_rmi_test.go | 352 + .../integration-cli/docker_cli_run_test.go | 4689 +++++ .../docker_cli_run_unix_test.go | 1592 ++ .../docker_cli_save_load_test.go | 383 + .../docker_cli_save_load_unix_test.go | 109 + .../integration-cli/docker_cli_search_test.go | 131 + .../docker_cli_secret_create_test.go | 131 + .../docker_cli_secret_inspect_test.go | 68 + .../docker_cli_service_create_test.go | 175 + .../docker_cli_service_health_test.go | 191 + ...cker_cli_service_logs_experimental_test.go | 96 + .../docker_cli_service_scale_test.go | 57 + .../docker_cli_service_update_test.go | 130 + .../integration-cli/docker_cli_sni_test.go | 44 + .../integration-cli/docker_cli_stack_test.go | 186 + .../integration-cli/docker_cli_start_test.go | 199 + .../integration-cli/docker_cli_stats_test.go | 159 + .../integration-cli/docker_cli_stop_test.go | 17 + .../integration-cli/docker_cli_swarm_test.go | 1254 ++ .../docker_cli_swarm_unix_test.go | 52 + .../integration-cli/docker_cli_tag_test.go | 225 + .../integration-cli/docker_cli_top_test.go | 73 + .../integration-cli/docker_cli_update_test.go | 41 + .../docker_cli_update_unix_test.go | 283 + .../integration-cli/docker_cli_userns_test.go | 98 + .../docker_cli_v2_only_test.go | 125 + .../docker_cli_version_test.go | 58 + .../integration-cli/docker_cli_volume_test.go | 427 + .../integration-cli/docker_cli_wait_test.go | 97 + .../docker_deprecated_api_v124_test.go | 227 + .../docker_deprecated_api_v124_unix_test.go | 30 + .../docker_experimental_network_test.go | 594 + .../docker_hub_pull_suite_test.go | 90 + .../integration-cli/docker_test_vars.go | 165 + .../docker/integration-cli/docker_utils.go | 1607 ++ .../docker/integration-cli/events_utils.go | 206 + .../docker/docker/integration-cli/fixtures.go | 69 + .../auth/docker-credential-shell-test | 55 + .../fixtures/credentialspecs/valid.json | 25 + .../fixtures/deploy/default.yaml | 9 + .../fixtures/deploy/remove.yaml | 11 + .../fixtures/deploy/secrets.yaml | 20 + .../integration-cli/fixtures/https/ca.pem | 23 + .../fixtures/https/client-cert.pem | 73 + .../fixtures/https/client-key.pem | 16 + .../fixtures/https/client-rogue-cert.pem | 73 + .../fixtures/https/client-rogue-key.pem | 16 + .../fixtures/https/server-cert.pem | 76 + .../fixtures/https/server-key.pem | 16 + .../fixtures/https/server-rogue-cert.pem | 76 + .../fixtures/https/server-rogue-key.pem | 16 + .../fixtures/load/emptyLayer.tar | Bin 0 -> 30720 bytes .../integration-cli/fixtures/load/frozen.go | 182 + .../fixtures/notary/delgkey1.crt | 21 + .../fixtures/notary/delgkey1.key | 27 + .../fixtures/notary/delgkey2.crt | 21 + .../fixtures/notary/delgkey2.key | 27 + .../fixtures/notary/delgkey3.crt | 21 + .../fixtures/notary/delgkey3.key | 27 + .../fixtures/notary/delgkey4.crt | 21 + .../fixtures/notary/delgkey4.key | 27 + .../integration-cli/fixtures/notary/gen.sh | 18 + .../fixtures/notary/localhost.cert | 19 + .../fixtures/notary/localhost.key | 27 + .../fixtures/registry/cert.pem | 21 + .../integration-cli/fixtures/secrets/default | 1 + .../integration-cli/fixtures_linux_daemon.go | 143 + .../docker/docker/integration-cli/npipe.go | 12 + .../docker/integration-cli/npipe_windows.go | 12 + .../docker/docker/integration-cli/registry.go | 177 + .../docker/integration-cli/registry_mock.go | 55 + .../docker/integration-cli/requirements.go | 243 + .../integration-cli/requirements_unix.go | 159 + .../docker/integration-cli/test_vars.go | 11 + .../docker/integration-cli/test_vars_exec.go | 8 + .../integration-cli/test_vars_noexec.go | 8 + .../integration-cli/test_vars_noseccomp.go | 8 + .../integration-cli/test_vars_seccomp.go | 8 + .../docker/integration-cli/test_vars_unix.go | 14 + .../integration-cli/test_vars_windows.go | 15 + .../docker/integration-cli/trust_server.go | 344 + .../docker/docker/integration-cli/utils.go | 79 + .../github.com/docker/docker/layer/empty.go | 56 + .../docker/docker/layer/empty_test.go | 46 + .../docker/docker/layer/filestore.go | 354 + .../docker/docker/layer/filestore_test.go | 104 + .../github.com/docker/docker/layer/layer.go | 275 + .../docker/docker/layer/layer_store.go | 684 + .../docker/layer/layer_store_windows.go | 11 + .../docker/docker/layer/layer_test.go | 771 + .../docker/docker/layer/layer_unix.go | 9 + .../docker/docker/layer/layer_unix_test.go | 71 + .../docker/docker/layer/layer_windows.go | 98 + .../docker/docker/layer/migration.go | 256 + .../docker/docker/layer/migration_test.go | 435 + .../docker/docker/layer/mount_test.go | 230 + .../docker/docker/layer/mounted_layer.go | 99 + .../docker/docker/layer/ro_layer.go | 192 + .../docker/docker/layer/ro_layer_windows.go | 9 + .../docker/docker/libcontainerd/client.go | 46 + .../docker/libcontainerd/client_linux.go | 605 + .../docker/libcontainerd/client_solaris.go | 101 + .../docker/libcontainerd/client_unix.go | 142 + .../docker/libcontainerd/client_windows.go | 631 + .../docker/docker/libcontainerd/container.go | 13 + .../docker/libcontainerd/container_unix.go | 250 + .../docker/libcontainerd/container_windows.go | 311 + .../docker/docker/libcontainerd/oom_linux.go | 31 + .../docker/libcontainerd/oom_solaris.go | 5 + .../docker/libcontainerd/pausemonitor_unix.go | 42 + .../docker/docker/libcontainerd/process.go | 18 + .../docker/libcontainerd/process_unix.go | 107 + .../docker/libcontainerd/process_windows.go | 51 + .../docker/docker/libcontainerd/queue_unix.go | 31 + .../docker/docker/libcontainerd/remote.go | 20 + .../docker/libcontainerd/remote_unix.go | 544 + .../docker/libcontainerd/remote_windows.go | 36 + .../docker/docker/libcontainerd/types.go | 75 + .../docker/libcontainerd/types_linux.go | 49 + .../docker/libcontainerd/types_solaris.go | 43 + .../docker/libcontainerd/types_windows.go | 79 + .../docker/libcontainerd/utils_linux.go | 62 + .../docker/libcontainerd/utils_solaris.go | 27 + .../docker/libcontainerd/utils_windows.go | 46 + .../libcontainerd/utils_windows_test.go | 13 + .../github.com/docker/docker/man/Dockerfile | 24 + .../docker/docker/man/Dockerfile.5.md | 474 + .../docker/docker/man/Dockerfile.aarch64 | 25 + .../docker/docker/man/Dockerfile.armhf | 43 + .../docker/docker/man/Dockerfile.ppc64le | 35 + .../docker/docker/man/Dockerfile.s390x | 35 + vendor/github.com/docker/docker/man/README.md | 15 + .../docker/docker/man/docker-attach.1.md | 99 + .../docker/docker/man/docker-build.1.md | 340 + .../docker/docker/man/docker-commit.1.md | 71 + .../docker/docker/man/docker-config-json.5.md | 72 + .../docker/docker/man/docker-cp.1.md | 175 + .../docker/docker/man/docker-create.1.md | 553 + .../docker/docker/man/docker-diff.1.md | 49 + .../docker/docker/man/docker-events.1.md | 180 + .../docker/docker/man/docker-exec.1.md | 71 + .../docker/docker/man/docker-export.1.md | 46 + .../docker/docker/man/docker-history.1.md | 52 + .../docker/docker/man/docker-images.1.md | 153 + .../docker/docker/man/docker-import.1.md | 72 + .../docker/docker/man/docker-info.1.md | 187 + .../docker/docker/man/docker-inspect.1.md | 323 + .../docker/docker/man/docker-kill.1.md | 28 + .../docker/docker/man/docker-load.1.md | 56 + .../docker/docker/man/docker-login.1.md | 53 + .../docker/docker/man/docker-logout.1.md | 32 + .../docker/docker/man/docker-logs.1.md | 71 + .../docker/man/docker-network-connect.1.md | 66 + .../docker/man/docker-network-create.1.md | 187 + .../docker/man/docker-network-disconnect.1.md | 36 + .../docker/man/docker-network-inspect.1.md | 112 + .../docker/docker/man/docker-network-ls.1.md | 188 + .../docker/docker/man/docker-network-rm.1.md | 43 + .../docker/docker/man/docker-pause.1.md | 32 + .../docker/docker/man/docker-port.1.md | 47 + .../docker/docker/man/docker-ps.1.md | 145 + .../docker/docker/man/docker-pull.1.md | 220 + .../docker/docker/man/docker-push.1.md | 63 + .../docker/docker/man/docker-rename.1.md | 15 + .../docker/docker/man/docker-restart.1.md | 26 + .../docker/docker/man/docker-rm.1.md | 72 + .../docker/docker/man/docker-rmi.1.md | 42 + .../docker/docker/man/docker-run.1.md | 1055 + .../docker/docker/man/docker-save.1.md | 45 + .../docker/docker/man/docker-search.1.md | 70 + .../docker/docker/man/docker-start.1.md | 39 + .../docker/docker/man/docker-stats.1.md | 57 + .../docker/docker/man/docker-stop.1.md | 30 + .../docker/docker/man/docker-tag.1.md | 76 + .../docker/docker/man/docker-top.1.md | 36 + .../docker/docker/man/docker-unpause.1.md | 28 + .../docker/docker/man/docker-update.1.md | 171 + .../docker/docker/man/docker-version.1.md | 62 + .../docker/docker/man/docker-wait.1.md | 30 + .../github.com/docker/docker/man/docker.1.md | 237 + .../github.com/docker/docker/man/dockerd.8.md | 710 + .../github.com/docker/docker/man/generate.go | 43 + .../github.com/docker/docker/man/generate.sh | 15 + .../github.com/docker/docker/man/glide.lock | 52 + .../github.com/docker/docker/man/glide.yaml | 12 + .../docker/docker/man/md2man-all.sh | 22 + .../docker/docker/migrate/v1/migratev1.go | 504 + .../docker/migrate/v1/migratev1_test.go | 438 + .../docker/docker/oci/defaults_linux.go | 168 + .../docker/docker/oci/defaults_solaris.go | 20 + .../docker/docker/oci/defaults_windows.go | 19 + .../docker/docker/oci/devices_linux.go | 86 + .../docker/docker/oci/devices_unsupported.go | 20 + .../docker/docker/oci/namespaces.go | 16 + vendor/github.com/docker/docker/opts/hosts.go | 151 + .../docker/docker/opts/hosts_test.go | 148 + .../docker/docker/opts/hosts_unix.go | 8 + .../docker/docker/opts/hosts_windows.go | 6 + vendor/github.com/docker/docker/opts/ip.go | 47 + .../github.com/docker/docker/opts/ip_test.go | 54 + vendor/github.com/docker/docker/opts/mount.go | 171 + .../docker/docker/opts/mount_test.go | 184 + vendor/github.com/docker/docker/opts/opts.go | 360 + .../docker/docker/opts/opts_test.go | 232 + .../docker/docker/opts/opts_unix.go | 6 + .../docker/docker/opts/opts_windows.go | 56 + vendor/github.com/docker/docker/opts/port.go | 146 + .../docker/docker/opts/port_test.go | 259 + .../docker/docker/opts/quotedstring.go | 37 + .../docker/docker/opts/quotedstring_test.go | 28 + .../github.com/docker/docker/opts/secret.go | 107 + .../docker/docker/opts/secret_test.go | 79 + vendor/github.com/docker/docker/pkg/README.md | 11 + .../docker/docker/pkg/aaparser/aaparser.go | 91 + .../docker/pkg/aaparser/aaparser_test.go | 73 + .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1175 ++ .../docker/pkg/archive/archive_linux.go | 95 + .../docker/pkg/archive/archive_linux_test.go | 187 + .../docker/pkg/archive/archive_other.go | 7 + .../docker/docker/pkg/archive/archive_test.go | 1162 ++ .../docker/docker/pkg/archive/archive_unix.go | 118 + .../docker/pkg/archive/archive_unix_test.go | 249 + .../docker/pkg/archive/archive_windows.go | 70 + .../pkg/archive/archive_windows_test.go | 91 + .../docker/docker/pkg/archive/changes.go | 446 + .../docker/pkg/archive/changes_linux.go | 312 + .../docker/pkg/archive/changes_other.go | 97 + .../docker/pkg/archive/changes_posix_test.go | 132 + .../docker/docker/pkg/archive/changes_test.go | 572 + .../docker/docker/pkg/archive/changes_unix.go | 36 + .../docker/pkg/archive/changes_windows.go | 30 + .../docker/docker/pkg/archive/copy.go | 458 + .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/pkg/archive/copy_unix_test.go | 978 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 279 + .../docker/docker/pkg/archive/diff_test.go | 386 + .../docker/pkg/archive/example_changes.go | 97 + .../docker/pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/utils_test.go | 166 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/archive/wrap_test.go | 98 + .../docker/docker/pkg/authorization/api.go | 88 + .../docker/docker/pkg/authorization/authz.go | 186 + .../pkg/authorization/authz_unix_test.go | 282 + .../docker/pkg/authorization/middleware.go | 84 + .../docker/docker/pkg/authorization/plugin.go | 112 + .../docker/pkg/authorization/response.go | 203 + .../docker/pkg/broadcaster/unbuffered.go | 49 + .../docker/pkg/broadcaster/unbuffered_test.go | 162 + .../docker/pkg/chrootarchive/archive.go | 97 + .../docker/pkg/chrootarchive/archive_test.go | 394 + .../docker/pkg/chrootarchive/archive_unix.go | 86 + .../pkg/chrootarchive/archive_windows.go | 22 + .../docker/pkg/chrootarchive/chroot_linux.go | 108 + .../docker/pkg/chrootarchive/chroot_unix.go | 12 + .../docker/docker/pkg/chrootarchive/diff.go | 23 + .../docker/pkg/chrootarchive/diff_unix.go | 130 + .../docker/pkg/chrootarchive/diff_windows.go | 45 + .../docker/pkg/chrootarchive/init_unix.go | 28 + .../docker/pkg/chrootarchive/init_windows.go | 4 + .../docker/pkg/devicemapper/devmapper.go | 828 + .../docker/pkg/devicemapper/devmapper_log.go | 35 + .../pkg/devicemapper/devmapper_wrapper.go | 251 + .../devmapper_wrapper_deferred_remove.go | 34 + .../devmapper_wrapper_no_deferred_remove.go | 15 + .../docker/docker/pkg/devicemapper/ioctl.go | 27 + .../docker/docker/pkg/devicemapper/log.go | 11 + .../docker/docker/pkg/directory/directory.go | 26 + .../docker/pkg/directory/directory_test.go | 192 + .../docker/pkg/directory/directory_unix.go | 48 + .../docker/pkg/directory/directory_windows.go | 37 + .../docker/docker/pkg/discovery/README.md | 41 + .../docker/docker/pkg/discovery/backends.go | 107 + .../docker/docker/pkg/discovery/discovery.go | 35 + .../docker/pkg/discovery/discovery_test.go | 137 + .../docker/docker/pkg/discovery/entry.go | 94 + .../docker/docker/pkg/discovery/file/file.go | 107 + .../docker/pkg/discovery/file/file_test.go | 114 + .../docker/docker/pkg/discovery/generator.go | 35 + .../docker/pkg/discovery/generator_test.go | 53 + .../docker/docker/pkg/discovery/kv/kv.go | 192 + .../docker/docker/pkg/discovery/kv/kv_test.go | 324 + .../docker/pkg/discovery/memory/memory.go | 93 + .../pkg/discovery/memory/memory_test.go | 48 + .../docker/pkg/discovery/nodes/nodes.go | 54 + .../docker/pkg/discovery/nodes/nodes_test.go | 51 + .../docker/pkg/filenotify/filenotify.go | 40 + .../docker/docker/pkg/filenotify/fsnotify.go | 18 + .../docker/docker/pkg/filenotify/poller.go | 204 + .../docker/pkg/filenotify/poller_test.go | 119 + .../docker/docker/pkg/fileutils/fileutils.go | 283 + .../docker/pkg/fileutils/fileutils_darwin.go | 27 + .../docker/pkg/fileutils/fileutils_solaris.go | 7 + .../docker/pkg/fileutils/fileutils_test.go | 585 + .../docker/pkg/fileutils/fileutils_unix.go | 22 + .../docker/pkg/fileutils/fileutils_windows.go | 7 + .../docker/pkg/fsutils/fsutils_linux.go | 89 + .../docker/pkg/fsutils/fsutils_linux_test.go | 91 + .../docker/docker/pkg/gitutils/gitutils.go | 100 + .../docker/pkg/gitutils/gitutils_test.go | 220 + .../docker/pkg/graphdb/conn_sqlite3_linux.go | 19 + .../docker/pkg/graphdb/graphdb_linux.go | 551 + .../docker/pkg/graphdb/graphdb_linux_test.go | 721 + .../docker/docker/pkg/graphdb/sort_linux.go | 27 + .../docker/pkg/graphdb/sort_linux_test.go | 29 + .../docker/docker/pkg/graphdb/unsupported.go | 3 + .../docker/docker/pkg/graphdb/utils_linux.go | 32 + .../docker/docker/pkg/homedir/homedir.go | 39 + .../docker/docker/pkg/homedir/homedir_test.go | 24 + .../docker/docker/pkg/httputils/httputils.go | 56 + .../docker/pkg/httputils/httputils_test.go | 115 + .../docker/docker/pkg/httputils/mimetype.go | 30 + .../docker/pkg/httputils/mimetype_test.go | 13 + .../pkg/httputils/resumablerequestreader.go | 95 + .../httputils/resumablerequestreader_test.go | 307 + .../docker/docker/pkg/idtools/idtools.go | 197 + .../docker/docker/pkg/idtools/idtools_unix.go | 207 + .../docker/pkg/idtools/idtools_unix_test.go | 271 + .../docker/pkg/idtools/idtools_windows.go | 25 + .../docker/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../docker/docker/pkg/idtools/utils_unix.go | 32 + .../docker/pkg/integration/checker/checker.go | 46 + .../docker/pkg/integration/cmd/command.go | 294 + .../pkg/integration/cmd/command_test.go | 118 + .../docker/docker/pkg/integration/utils.go | 227 + .../docker/pkg/integration/utils_test.go | 363 + .../docker/docker/pkg/ioutils/buffer.go | 51 + .../docker/docker/pkg/ioutils/buffer_test.go | 75 + .../docker/docker/pkg/ioutils/bytespipe.go | 186 + .../docker/pkg/ioutils/bytespipe_test.go | 159 + .../docker/docker/pkg/ioutils/fmt.go | 22 + .../docker/docker/pkg/ioutils/fmt_test.go | 17 + .../docker/docker/pkg/ioutils/fswriters.go | 162 + .../docker/pkg/ioutils/fswriters_test.go | 132 + .../docker/docker/pkg/ioutils/multireader.go | 223 + .../docker/pkg/ioutils/multireader_test.go | 211 + .../docker/docker/pkg/ioutils/readers.go | 154 + .../docker/docker/pkg/ioutils/readers_test.go | 94 + .../docker/docker/pkg/ioutils/temp_unix.go | 10 + .../docker/docker/pkg/ioutils/temp_windows.go | 18 + .../docker/docker/pkg/ioutils/writeflusher.go | 92 + .../docker/docker/pkg/ioutils/writers.go | 66 + .../docker/docker/pkg/ioutils/writers_test.go | 65 + .../docker/docker/pkg/jsonlog/jsonlog.go | 42 + .../docker/pkg/jsonlog/jsonlog_marshalling.go | 178 + .../pkg/jsonlog/jsonlog_marshalling_test.go | 34 + .../docker/docker/pkg/jsonlog/jsonlogbytes.go | 122 + .../docker/pkg/jsonlog/jsonlogbytes_test.go | 39 + .../docker/pkg/jsonlog/time_marshalling.go | 27 + .../pkg/jsonlog/time_marshalling_test.go | 47 + .../docker/pkg/jsonmessage/jsonmessage.go | 225 + .../pkg/jsonmessage/jsonmessage_test.go | 245 + .../docker/pkg/listeners/listeners_solaris.go | 31 + .../docker/pkg/listeners/listeners_unix.go | 94 + .../docker/pkg/listeners/listeners_windows.go | 54 + .../docker/docker/pkg/locker/README.md | 65 + .../docker/docker/pkg/locker/locker.go | 112 + .../docker/docker/pkg/locker/locker_test.go | 124 + .../docker/docker/pkg/longpath/longpath.go | 26 + .../docker/pkg/longpath/longpath_test.go | 22 + .../docker/pkg/loopback/attach_loopback.go | 137 + .../docker/docker/pkg/loopback/ioctl.go | 53 + .../docker/pkg/loopback/loop_wrapper.go | 52 + .../docker/docker/pkg/loopback/loopback.go | 63 + .../docker/docker/pkg/mount/flags.go | 149 + .../docker/docker/pkg/mount/flags_freebsd.go | 48 + .../docker/docker/pkg/mount/flags_linux.go | 85 + .../docker/pkg/mount/flags_unsupported.go | 30 + .../docker/docker/pkg/mount/mount.go | 74 + .../docker/pkg/mount/mount_unix_test.go | 162 + .../docker/pkg/mount/mounter_freebsd.go | 59 + .../docker/docker/pkg/mount/mounter_linux.go | 21 + .../docker/pkg/mount/mounter_solaris.go | 33 + .../docker/pkg/mount/mounter_unsupported.go | 11 + .../docker/docker/pkg/mount/mountinfo.go | 40 + .../docker/pkg/mount/mountinfo_freebsd.go | 41 + .../docker/pkg/mount/mountinfo_linux.go | 95 + .../docker/pkg/mount/mountinfo_linux_test.go | 476 + .../docker/pkg/mount/mountinfo_solaris.go | 37 + .../docker/pkg/mount/mountinfo_unsupported.go | 12 + .../docker/pkg/mount/mountinfo_windows.go | 6 + .../docker/pkg/mount/sharedsubtree_linux.go | 69 + .../pkg/mount/sharedsubtree_linux_test.go | 331 + .../docker/pkg/mount/sharedsubtree_solaris.go | 58 + .../cmd/names-generator/main.go | 11 + .../pkg/namesgenerator/names-generator.go | 590 + .../namesgenerator/names-generator_test.go | 27 + .../docker/pkg/parsers/kernel/kernel.go | 74 + .../pkg/parsers/kernel/kernel_darwin.go | 56 + .../docker/pkg/parsers/kernel/kernel_unix.go | 45 + .../pkg/parsers/kernel/kernel_unix_test.go | 96 + .../pkg/parsers/kernel/kernel_windows.go | 69 + .../docker/pkg/parsers/kernel/uname_linux.go | 19 + .../pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../operatingsystem_solaris.go | 37 + .../operatingsystem/operatingsystem_unix.go | 25 + .../operatingsystem_unix_test.go | 247 + .../operatingsystem_windows.go | 49 + .../docker/docker/pkg/parsers/parsers.go | 69 + .../docker/docker/pkg/parsers/parsers_test.go | 70 + .../docker/docker/pkg/pidfile/pidfile.go | 56 + .../docker/pkg/pidfile/pidfile_darwin.go | 18 + .../docker/docker/pkg/pidfile/pidfile_test.go | 38 + .../docker/docker/pkg/pidfile/pidfile_unix.go | 16 + .../docker/pkg/pidfile/pidfile_windows.go | 23 + .../docker/pkg/platform/architecture_linux.go | 16 + .../docker/pkg/platform/architecture_unix.go | 20 + .../pkg/platform/architecture_windows.go | 60 + .../docker/docker/pkg/platform/platform.go | 23 + .../docker/pkg/platform/utsname_int8.go | 18 + .../docker/pkg/platform/utsname_uint8.go | 18 + .../docker/docker/pkg/plugingetter/getter.go | 35 + .../docker/docker/pkg/plugins/client.go | 205 + .../docker/docker/pkg/plugins/client_test.go | 134 + .../docker/docker/pkg/plugins/discovery.go | 131 + .../docker/pkg/plugins/discovery_test.go | 152 + .../docker/pkg/plugins/discovery_unix.go | 5 + .../docker/pkg/plugins/discovery_unix_test.go | 61 + .../docker/pkg/plugins/discovery_windows.go | 8 + .../docker/docker/pkg/plugins/errors.go | 33 + .../docker/docker/pkg/plugins/plugin_test.go | 44 + .../pkg/plugins/pluginrpc-gen/README.md | 58 + .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 89 + .../fixtures/otherfixture/spaceship.go | 4 + .../docker/pkg/plugins/pluginrpc-gen/main.go | 91 + .../pkg/plugins/pluginrpc-gen/parser.go | 263 + .../pkg/plugins/pluginrpc-gen/parser_test.go | 222 + .../pkg/plugins/pluginrpc-gen/template.go | 118 + .../docker/docker/pkg/plugins/plugins.go | 329 + .../docker/pkg/plugins/plugins_linux.go | 7 + .../docker/pkg/plugins/plugins_windows.go | 8 + .../docker/pkg/plugins/transport/http.go | 36 + .../docker/pkg/plugins/transport/transport.go | 36 + .../docker/docker/pkg/pools/pools.go | 116 + .../docker/docker/pkg/pools/pools_test.go | 161 + .../docker/docker/pkg/progress/progress.go | 84 + .../docker/pkg/progress/progressreader.go | 66 + .../pkg/progress/progressreader_test.go | 75 + .../docker/docker/pkg/promise/promise.go | 11 + .../docker/docker/pkg/pubsub/publisher.go | 111 + .../docker/pkg/pubsub/publisher_test.go | 142 + .../docker/docker/pkg/random/random.go | 71 + .../docker/docker/pkg/random/random_test.go | 22 + .../docker/docker/pkg/reexec/README.md | 5 + .../docker/docker/pkg/reexec/command_linux.go | 28 + .../docker/docker/pkg/reexec/command_unix.go | 23 + .../docker/pkg/reexec/command_unsupported.go | 12 + .../docker/pkg/reexec/command_windows.go | 23 + .../docker/docker/pkg/reexec/reexec.go | 47 + .../docker/docker/pkg/registrar/registrar.go | 127 + .../docker/pkg/registrar/registrar_test.go | 119 + .../docker/docker/pkg/signal/README.md | 1 + .../docker/docker/pkg/signal/signal.go | 54 + .../docker/docker/pkg/signal/signal_darwin.go | 41 + .../docker/pkg/signal/signal_freebsd.go | 43 + .../docker/docker/pkg/signal/signal_linux.go | 80 + .../docker/pkg/signal/signal_solaris.go | 42 + .../docker/docker/pkg/signal/signal_unix.go | 21 + .../docker/pkg/signal/signal_unsupported.go | 10 + .../docker/pkg/signal/signal_windows.go | 28 + .../docker/docker/pkg/signal/trap.go | 103 + .../docker/docker/pkg/stdcopy/stdcopy.go | 174 + .../docker/docker/pkg/stdcopy/stdcopy_test.go | 260 + .../pkg/streamformatter/streamformatter.go | 172 + .../streamformatter/streamformatter_test.go | 108 + .../docker/docker/pkg/stringid/README.md | 1 + .../docker/docker/pkg/stringid/stringid.go | 69 + .../docker/pkg/stringid/stringid_test.go | 72 + .../docker/docker/pkg/stringutils/README.md | 1 + .../docker/pkg/stringutils/stringutils.go | 101 + .../pkg/stringutils/stringutils_test.go | 121 + .../docker/docker/pkg/symlink/LICENSE.APACHE | 191 + .../docker/docker/pkg/symlink/LICENSE.BSD | 27 + .../docker/docker/pkg/symlink/README.md | 6 + .../docker/docker/pkg/symlink/fs.go | 144 + .../docker/docker/pkg/symlink/fs_unix.go | 15 + .../docker/docker/pkg/symlink/fs_unix_test.go | 407 + .../docker/docker/pkg/symlink/fs_windows.go | 169 + .../docker/docker/pkg/sysinfo/README.md | 1 + .../docker/docker/pkg/sysinfo/numcpu.go | 12 + .../docker/docker/pkg/sysinfo/numcpu_linux.go | 43 + .../docker/pkg/sysinfo/numcpu_windows.go | 37 + .../docker/docker/pkg/sysinfo/sysinfo.go | 144 + .../docker/pkg/sysinfo/sysinfo_linux.go | 259 + .../docker/pkg/sysinfo/sysinfo_linux_test.go | 58 + .../docker/pkg/sysinfo/sysinfo_solaris.go | 121 + .../docker/docker/pkg/sysinfo/sysinfo_test.go | 26 + .../docker/docker/pkg/sysinfo/sysinfo_unix.go | 9 + .../docker/pkg/sysinfo/sysinfo_windows.go | 9 + .../docker/docker/pkg/system/chtimes.go | 52 + .../docker/docker/pkg/system/chtimes_test.go | 94 + .../docker/docker/pkg/system/chtimes_unix.go | 14 + .../docker/pkg/system/chtimes_unix_test.go | 91 + .../docker/pkg/system/chtimes_windows.go | 27 + .../docker/pkg/system/chtimes_windows_test.go | 86 + .../docker/docker/pkg/system/errors.go | 10 + .../docker/pkg/system/events_windows.go | 85 + .../docker/docker/pkg/system/exitcode.go | 33 + .../docker/docker/pkg/system/filesys.go | 54 + .../docker/pkg/system/filesys_windows.go | 236 + .../docker/docker/pkg/system/lstat.go | 19 + .../docker/pkg/system/lstat_unix_test.go | 30 + .../docker/docker/pkg/system/lstat_windows.go | 25 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 65 + .../docker/pkg/system/meminfo_solaris.go | 128 + .../docker/pkg/system/meminfo_unix_test.go | 40 + .../docker/pkg/system/meminfo_unsupported.go | 8 + .../docker/pkg/system/meminfo_windows.go | 45 + .../docker/docker/pkg/system/mknod.go | 22 + .../docker/docker/pkg/system/mknod_windows.go | 13 + .../docker/docker/pkg/system/path_unix.go | 14 + .../docker/docker/pkg/system/path_windows.go | 37 + .../docker/pkg/system/path_windows_test.go | 78 + .../docker/docker/pkg/system/stat.go | 53 + .../docker/docker/pkg/system/stat_darwin.go | 32 + .../docker/docker/pkg/system/stat_freebsd.go | 27 + .../docker/docker/pkg/system/stat_linux.go | 33 + .../docker/docker/pkg/system/stat_openbsd.go | 15 + .../docker/docker/pkg/system/stat_solaris.go | 34 + .../docker/pkg/system/stat_unix_test.go | 39 + .../docker/pkg/system/stat_unsupported.go | 17 + .../docker/docker/pkg/system/stat_windows.go | 43 + .../docker/docker/pkg/system/syscall_unix.go | 17 + .../docker/pkg/system/syscall_windows.go | 105 + .../docker/pkg/system/syscall_windows_test.go | 9 + .../docker/docker/pkg/system/umask.go | 13 + .../docker/docker/pkg/system/umask_windows.go | 9 + .../docker/pkg/system/utimes_freebsd.go | 22 + .../docker/docker/pkg/system/utimes_linux.go | 26 + .../docker/pkg/system/utimes_unix_test.go | 68 + .../docker/pkg/system/utimes_unsupported.go | 10 + .../docker/docker/pkg/system/xattrs_linux.go | 63 + .../docker/pkg/system/xattrs_unsupported.go | 13 + .../docker/docker/pkg/tailfile/tailfile.go | 66 + .../docker/pkg/tailfile/tailfile_test.go | 148 + .../docker/pkg/tarsum/builder_context.go | 21 + .../docker/pkg/tarsum/builder_context_test.go | 67 + .../docker/docker/pkg/tarsum/fileinfosums.go | 126 + .../docker/pkg/tarsum/fileinfosums_test.go | 62 + .../docker/docker/pkg/tarsum/tarsum.go | 295 + .../docker/docker/pkg/tarsum/tarsum_spec.md | 230 + .../docker/docker/pkg/tarsum/tarsum_test.go | 664 + .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes .../docker/pkg/tarsum/testdata/xattr/json | 1 + .../pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes .../docker/docker/pkg/tarsum/versioning.go | 150 + .../docker/pkg/tarsum/versioning_test.go | 98 + .../docker/docker/pkg/tarsum/writercloser.go | 22 + .../docker/docker/pkg/term/ascii.go | 66 + .../docker/docker/pkg/term/ascii_test.go | 43 + .../docker/docker/pkg/term/tc_linux_cgo.go | 50 + .../docker/docker/pkg/term/tc_other.go | 20 + .../docker/docker/pkg/term/tc_solaris_cgo.go | 63 + .../github.com/docker/docker/pkg/term/term.go | 123 + .../docker/docker/pkg/term/term_solaris.go | 41 + .../docker/docker/pkg/term/term_unix.go | 29 + .../docker/docker/pkg/term/term_windows.go | 233 + .../docker/docker/pkg/term/termios_darwin.go | 69 + .../docker/docker/pkg/term/termios_freebsd.go | 69 + .../docker/docker/pkg/term/termios_linux.go | 47 + .../docker/docker/pkg/term/termios_openbsd.go | 69 + .../docker/pkg/term/windows/ansi_reader.go | 263 + .../docker/pkg/term/windows/ansi_writer.go | 64 + .../docker/docker/pkg/term/windows/console.go | 35 + .../docker/docker/pkg/term/windows/windows.go | 33 + .../docker/pkg/term/windows/windows_test.go | 3 + .../docker/pkg/testutil/assert/assert.go | 97 + .../docker/docker/pkg/testutil/pkg.go | 1 + .../docker/pkg/testutil/tempfile/tempfile.go | 36 + .../docker/pkg/tlsconfig/tlsconfig_clone.go | 11 + .../pkg/tlsconfig/tlsconfig_clone_go16.go | 31 + .../pkg/tlsconfig/tlsconfig_clone_go17.go | 33 + .../docker/pkg/truncindex/truncindex.go | 137 + .../docker/pkg/truncindex/truncindex_test.go | 429 + .../docker/docker/pkg/urlutil/urlutil.go | 50 + .../docker/docker/pkg/urlutil/urlutil_test.go | 70 + .../docker/docker/pkg/useragent/README.md | 1 + .../docker/docker/pkg/useragent/useragent.go | 55 + .../docker/pkg/useragent/useragent_test.go | 31 + .../docker/docker/plugin/backend_linux.go | 790 + .../docker/plugin/backend_unsupported.go | 71 + .../docker/docker/plugin/blobstore.go | 181 + .../github.com/docker/docker/plugin/defs.go | 26 + .../docker/docker/plugin/manager.go | 347 + .../docker/docker/plugin/manager_linux.go | 284 + .../docker/docker/plugin/manager_solaris.go | 28 + .../docker/docker/plugin/manager_windows.go | 30 + .../github.com/docker/docker/plugin/store.go | 263 + .../docker/docker/plugin/store_test.go | 33 + .../docker/docker/plugin/v2/plugin.go | 244 + .../docker/docker/plugin/v2/plugin_linux.go | 121 + .../docker/plugin/v2/plugin_unsupported.go | 14 + .../docker/docker/plugin/v2/settable.go | 102 + .../docker/docker/plugin/v2/settable_test.go | 91 + vendor/github.com/docker/docker/poule.yml | 88 + .../docker/profiles/apparmor/apparmor.go | 122 + .../docker/profiles/apparmor/template.go | 46 + .../docker/profiles/seccomp/default.json | 698 + .../profiles/seccomp/fixtures/example.json | 27 + .../docker/profiles/seccomp/generate.go | 32 + .../docker/docker/profiles/seccomp/seccomp.go | 150 + .../profiles/seccomp/seccomp_default.go | 604 + .../docker/profiles/seccomp/seccomp_test.go | 32 + .../profiles/seccomp/seccomp_unsupported.go | 13 + .../github.com/docker/docker/project/ARM.md | 45 + .../docker/project/BRANCHES-AND-TAGS.md | 35 + .../docker/docker/project/CONTRIBUTORS.md | 1 + .../docker/docker/project/GOVERNANCE.md | 17 + .../docker/project/IRC-ADMINISTRATION.md | 37 + .../docker/docker/project/ISSUE-TRIAGE.md | 132 + .../project/PACKAGE-REPO-MAINTENANCE.md | 74 + .../docker/docker/project/PACKAGERS.md | 307 + .../docker/docker/project/PATCH-RELEASES.md | 68 + .../docker/docker/project/PRINCIPLES.md | 19 + .../docker/docker/project/README.md | 24 + .../docker/project/RELEASE-CHECKLIST.md | 518 + .../docker/docker/project/RELEASE-PROCESS.md | 78 + .../docker/docker/project/REVIEWING.md | 246 + .../github.com/docker/docker/project/TOOLS.md | 63 + .../docker/docker/reference/reference.go | 216 + .../docker/docker/reference/reference_test.go | 275 + .../docker/docker/reference/store.go | 286 + .../docker/docker/reference/store_test.go | 356 + .../github.com/docker/docker/registry/auth.go | 303 + .../docker/docker/registry/auth_test.go | 124 + .../docker/docker/registry/config.go | 305 + .../docker/docker/registry/config_test.go | 49 + .../docker/docker/registry/config_unix.go | 25 + .../docker/docker/registry/config_windows.go | 25 + .../docker/docker/registry/endpoint_test.go | 78 + .../docker/docker/registry/endpoint_v1.go | 198 + .../docker/docker/registry/registry.go | 191 + .../docker/registry/registry_mock_test.go | 478 + .../docker/docker/registry/registry_test.go | 875 + .../docker/docker/registry/service.go | 304 + .../docker/docker/registry/service_v1.go | 40 + .../docker/docker/registry/service_v1_test.go | 23 + .../docker/docker/registry/service_v2.go | 78 + .../docker/docker/registry/session.go | 783 + .../docker/docker/registry/types.go | 73 + .../docker/restartmanager/restartmanager.go | 128 + .../restartmanager/restartmanager_test.go | 34 + .../docker/docker/runconfig/compare.go | 61 + .../docker/docker/runconfig/compare_test.go | 126 + .../docker/docker/runconfig/config.go | 97 + .../docker/docker/runconfig/config_test.go | 139 + .../docker/docker/runconfig/config_unix.go | 59 + .../docker/docker/runconfig/config_windows.go | 19 + .../docker/docker/runconfig/errors.go | 46 + .../fixtures/unix/container_config_1_14.json | 30 + .../fixtures/unix/container_config_1_17.json | 50 + .../fixtures/unix/container_config_1_19.json | 58 + .../unix/container_hostconfig_1_14.json | 18 + .../unix/container_hostconfig_1_19.json | 30 + .../windows/container_config_1_19.json | 58 + .../docker/docker/runconfig/hostconfig.go | 35 + .../docker/runconfig/hostconfig_solaris.go | 41 + .../docker/runconfig/hostconfig_test.go | 283 + .../docker/runconfig/hostconfig_unix.go | 129 + .../docker/runconfig/hostconfig_windows.go | 68 + .../docker/docker/runconfig/opts/envfile.go | 81 + .../docker/runconfig/opts/envfile_test.go | 142 + .../docker/runconfig/opts/fixtures/utf16.env | Bin 0 -> 54 bytes .../runconfig/opts/fixtures/utf16be.env | Bin 0 -> 54 bytes .../docker/runconfig/opts/fixtures/utf8.env | 3 + .../docker/runconfig/opts/fixtures/valid.env | 1 + .../runconfig/opts/fixtures/valid.label | 1 + .../docker/docker/runconfig/opts/opts.go | 83 + .../docker/docker/runconfig/opts/opts_test.go | 113 + .../docker/docker/runconfig/opts/parse.go | 995 + .../docker/runconfig/opts/parse_test.go | 894 + .../docker/docker/runconfig/opts/runtime.go | 79 + .../docker/runconfig/opts/throttledevice.go | 111 + .../docker/docker/runconfig/opts/ulimit.go | 57 + .../docker/runconfig/opts/ulimit_test.go | 42 + .../docker/runconfig/opts/weightdevice.go | 89 + .../github.com/docker/docker/utils/debug.go | 26 + .../docker/docker/utils/debug_test.go | 43 + .../github.com/docker/docker/utils/names.go | 9 + .../docker/docker/utils/process_unix.go | 22 + .../docker/docker/utils/process_windows.go | 20 + .../docker/utils/templates/templates.go | 42 + .../docker/utils/templates/templates_test.go | 38 + .../github.com/docker/docker/utils/utils.go | 87 + .../docker/docker/utils/utils_test.go | 21 + vendor/github.com/docker/docker/vendor.conf | 140 + .../docker/docker/volume/drivers/adapter.go | 177 + .../docker/docker/volume/drivers/extpoint.go | 215 + .../docker/volume/drivers/extpoint_test.go | 23 + .../docker/docker/volume/drivers/proxy.go | 242 + .../docker/volume/drivers/proxy_test.go | 132 + .../docker/docker/volume/local/local.go | 364 + .../docker/docker/volume/local/local_test.go | 344 + .../docker/docker/volume/local/local_unix.go | 87 + .../docker/volume/local/local_windows.go | 34 + .../docker/docker/volume/store/db.go | 88 + .../docker/docker/volume/store/errors.go | 76 + .../docker/docker/volume/store/restore.go | 83 + .../docker/docker/volume/store/store.go | 649 + .../docker/docker/volume/store/store_test.go | 234 + .../docker/docker/volume/store/store_unix.go | 9 + .../docker/volume/store/store_windows.go | 12 + .../docker/volume/testutils/testutils.go | 116 + .../docker/docker/volume/validate.go | 125 + .../docker/docker/volume/validate_test.go | 43 + .../docker/volume/validate_test_unix.go | 8 + .../docker/volume/validate_test_windows.go | 6 + .../github.com/docker/docker/volume/volume.go | 323 + .../docker/docker/volume/volume_copy.go | 23 + .../docker/docker/volume/volume_copy_unix.go | 8 + .../docker/volume/volume_copy_windows.go | 6 + .../docker/docker/volume/volume_linux.go | 56 + .../docker/docker/volume/volume_linux_test.go | 51 + .../docker/volume/volume_propagation_linux.go | 47 + .../volume/volume_propagation_linux_test.go | 65 + .../volume/volume_propagation_unsupported.go | 24 + .../docker/docker/volume/volume_test.go | 269 + .../docker/docker/volume/volume_unix.go | 138 + .../docker/volume/volume_unsupported.go | 16 + .../docker/docker/volume/volume_windows.go | 201 + .../docker/go-connections/CONTRIBUTING.md | 55 + .../github.com/docker/go-connections/LICENSE | 191 + .../docker/go-connections/MAINTAINERS | 27 + .../docker/go-connections/README.md | 13 + .../docker/go-connections/circle.yml | 14 + .../github.com/docker/go-connections/doc.go | 3 + .../docker/go-connections/nat/nat.go | 242 + .../docker/go-connections/nat/nat_test.go | 583 + .../docker/go-connections/nat/parse.go | 57 + .../docker/go-connections/nat/parse_test.go | 54 + .../docker/go-connections/nat/sort.go | 96 + .../docker/go-connections/nat/sort_test.go | 85 + .../docker/go-connections/proxy/logger.go | 11 + .../proxy/network_proxy_test.go | 216 + .../docker/go-connections/proxy/proxy.go | 36 + .../docker/go-connections/proxy/stub_proxy.go | 31 + .../docker/go-connections/proxy/tcp_proxy.go | 105 + .../docker/go-connections/proxy/udp_proxy.go | 176 + .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 + .../sockets/inmem_socket_test.go | 39 + .../docker/go-connections/sockets/proxy.go | 51 + .../docker/go-connections/sockets/sockets.go | 38 + .../go-connections/sockets/sockets_unix.go | 35 + .../go-connections/sockets/sockets_windows.go | 27 + .../go-connections/sockets/tcp_socket.go | 22 + .../go-connections/sockets/unix_socket.go | 32 + .../go-connections/tlsconfig/certpool_go17.go | 18 + .../tlsconfig/certpool_other.go | 14 + .../docker/go-connections/tlsconfig/config.go | 244 + .../tlsconfig/config_client_ciphers.go | 17 + .../tlsconfig/config_legacy_client_ciphers.go | 15 + .../go-connections/tlsconfig/config_test.go | 651 + .../tlsconfig/fixtures/cert.pem | 18 + .../fixtures/cert_of_encrypted_key.pem | 18 + .../tlsconfig/fixtures/encrypted_key.pem | 30 + .../go-connections/tlsconfig/fixtures/key.pem | 27 + .../tlsconfig/fixtures/multi.pem | 28 + .../docker/go-units/CONTRIBUTING.md | 67 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/MAINTAINERS | 27 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/circle.yml | 11 + vendor/github.com/docker/go-units/duration.go | 35 + .../docker/go-units/duration_test.go | 95 + vendor/github.com/docker/go-units/size.go | 108 + .../github.com/docker/go-units/size_test.go | 165 + vendor/github.com/docker/go-units/ulimit.go | 118 + .../github.com/docker/go-units/ulimit_test.go | 131 + .../github.com/jessevdk/go-flags/.travis.yml | 38 + vendor/github.com/jessevdk/go-flags/LICENSE | 26 + vendor/github.com/jessevdk/go-flags/README.md | 135 + vendor/github.com/jessevdk/go-flags/arg.go | 27 + .../github.com/jessevdk/go-flags/arg_test.go | 163 + .../jessevdk/go-flags/assert_test.go | 177 + .../jessevdk/go-flags/check_crosscompile.sh | 16 + .../github.com/jessevdk/go-flags/closest.go | 59 + .../github.com/jessevdk/go-flags/command.go | 455 + .../jessevdk/go-flags/command_test.go | 582 + .../jessevdk/go-flags/completion.go | 309 + .../jessevdk/go-flags/completion_test.go | 315 + .../github.com/jessevdk/go-flags/convert.go | 348 + .../jessevdk/go-flags/convert_test.go | 159 + vendor/github.com/jessevdk/go-flags/error.go | 134 + .../jessevdk/go-flags/example_test.go | 110 + .../jessevdk/go-flags/examples/add.go | 23 + .../go-flags/examples/bash-completion | 9 + .../jessevdk/go-flags/examples/main.go | 79 + .../jessevdk/go-flags/examples/rm.go | 23 + vendor/github.com/jessevdk/go-flags/flags.go | 258 + vendor/github.com/jessevdk/go-flags/group.go | 395 + .../jessevdk/go-flags/group_test.go | 255 + vendor/github.com/jessevdk/go-flags/help.go | 491 + .../github.com/jessevdk/go-flags/help_test.go | 538 + vendor/github.com/jessevdk/go-flags/ini.go | 597 + .../github.com/jessevdk/go-flags/ini_test.go | 1053 + .../github.com/jessevdk/go-flags/long_test.go | 85 + vendor/github.com/jessevdk/go-flags/man.go | 205 + .../jessevdk/go-flags/marshal_test.go | 119 + .../github.com/jessevdk/go-flags/multitag.go | 140 + vendor/github.com/jessevdk/go-flags/option.go | 461 + .../jessevdk/go-flags/options_test.go | 45 + .../jessevdk/go-flags/optstyle_other.go | 67 + .../jessevdk/go-flags/optstyle_windows.go | 108 + vendor/github.com/jessevdk/go-flags/parser.go | 700 + .../jessevdk/go-flags/parser_test.go | 612 + .../jessevdk/go-flags/pointer_test.go | 164 + .../jessevdk/go-flags/short_test.go | 234 + .../github.com/jessevdk/go-flags/tag_test.go | 38 + .../github.com/jessevdk/go-flags/termsize.go | 28 + .../jessevdk/go-flags/termsize_linux.go | 7 + .../jessevdk/go-flags/termsize_nosysioctl.go | 7 + .../jessevdk/go-flags/termsize_other.go | 7 + .../jessevdk/go-flags/termsize_unix.go | 7 + .../jessevdk/go-flags/unknown_test.go | 66 + vendor/github.com/moby/moby/.dockerignore | 4 + .../moby/moby/.github/ISSUE_TEMPLATE.md | 64 + .../moby/.github/PULL_REQUEST_TEMPLATE.md | 30 + vendor/github.com/moby/moby/.gitignore | 33 + vendor/github.com/moby/moby/.mailmap | 320 + vendor/github.com/moby/moby/AUTHORS | 1731 ++ vendor/github.com/moby/moby/CHANGELOG.md | 3437 ++++ vendor/github.com/moby/moby/CONTRIBUTING.md | 401 + vendor/github.com/moby/moby/Dockerfile | 246 + .../github.com/moby/moby/Dockerfile.aarch64 | 175 + vendor/github.com/moby/moby/Dockerfile.armhf | 182 + .../github.com/moby/moby/Dockerfile.ppc64le | 188 + vendor/github.com/moby/moby/Dockerfile.s390x | 190 + vendor/github.com/moby/moby/Dockerfile.simple | 73 + .../github.com/moby/moby/Dockerfile.solaris | 20 + .../github.com/moby/moby/Dockerfile.windows | 267 + vendor/github.com/moby/moby/LICENSE | 191 + vendor/github.com/moby/moby/MAINTAINERS | 376 + vendor/github.com/moby/moby/Makefile | 150 + vendor/github.com/moby/moby/NOTICE | 19 + vendor/github.com/moby/moby/README.md | 304 + vendor/github.com/moby/moby/ROADMAP.md | 118 + vendor/github.com/moby/moby/VENDORING.md | 45 + vendor/github.com/moby/moby/VERSION | 1 + vendor/github.com/moby/moby/api/README.md | 42 + vendor/github.com/moby/moby/api/common.go | 166 + .../github.com/moby/moby/api/common_test.go | 341 + .../github.com/moby/moby/api/common_unix.go | 6 + .../moby/moby/api/common_windows.go | 8 + .../github.com/moby/moby/api/errors/errors.go | 47 + .../github.com/moby/moby/api/fixtures/keyfile | 7 + .../moby/moby/api/server/httputils/decoder.go | 16 + .../moby/moby/api/server/httputils/errors.go | 103 + .../moby/moby/api/server/httputils/form.go | 73 + .../moby/api/server/httputils/form_test.go | 105 + .../moby/api/server/httputils/httputils.go | 90 + .../server/httputils/httputils_write_json.go | 17 + .../httputils/httputils_write_json_go16.go | 16 + .../moby/moby/api/server/middleware.go | 24 + .../moby/moby/api/server/middleware/cors.go | 37 + .../moby/moby/api/server/middleware/debug.go | 76 + .../api/server/middleware/experimental.go | 29 + .../moby/api/server/middleware/middleware.go | 13 + .../moby/api/server/middleware/version.go | 50 + .../api/server/middleware/version_test.go | 57 + .../moby/moby/api/server/profiler.go | 41 + .../moby/api/server/router/build/backend.go | 20 + .../moby/api/server/router/build/build.go | 29 + .../api/server/router/build/build_routes.go | 225 + .../api/server/router/checkpoint/backend.go | 10 + .../server/router/checkpoint/checkpoint.go | 36 + .../router/checkpoint/checkpoint_routes.go | 65 + .../api/server/router/container/backend.go | 79 + .../api/server/router/container/container.go | 77 + .../router/container/container_routes.go | 559 + .../moby/api/server/router/container/copy.go | 119 + .../moby/api/server/router/container/exec.go | 140 + .../api/server/router/container/inspect.go | 21 + .../moby/api/server/router/experimental.go | 67 + .../moby/api/server/router/image/backend.go | 45 + .../moby/api/server/router/image/image.go | 50 + .../api/server/router/image/image_routes.go | 344 + .../moby/moby/api/server/router/local.go | 96 + .../moby/api/server/router/network/backend.go | 22 + .../moby/api/server/router/network/filter.go | 96 + .../moby/api/server/router/network/network.go | 44 + .../server/router/network/network_routes.go | 308 + .../moby/api/server/router/plugin/backend.go | 25 + .../moby/api/server/router/plugin/plugin.go | 39 + .../api/server/router/plugin/plugin_routes.go | 314 + .../moby/moby/api/server/router/router.go | 19 + .../moby/api/server/router/swarm/backend.go | 36 + .../moby/api/server/router/swarm/cluster.go | 52 + .../api/server/router/swarm/cluster_routes.go | 423 + .../moby/api/server/router/system/backend.go | 21 + .../moby/api/server/router/system/system.go | 39 + .../api/server/router/system/system_routes.go | 186 + .../moby/api/server/router/volume/backend.go | 17 + .../moby/api/server/router/volume/volume.go | 36 + .../api/server/router/volume/volume_routes.go | 80 + .../moby/moby/api/server/router_swapper.go | 30 + .../github.com/moby/moby/api/server/server.go | 210 + .../moby/moby/api/server/server_test.go | 46 + .../github.com/moby/moby/api/swagger-gen.yaml | 12 + vendor/github.com/moby/moby/api/swagger.yaml | 7939 ++++++++ .../api/templates/server/operation.gotmpl | 26 + vendor/github.com/moby/moby/api/types/auth.go | 22 + .../moby/moby/api/types/backend/backend.go | 84 + .../moby/moby/api/types/blkiodev/blkio.go | 23 + .../github.com/moby/moby/api/types/client.go | 378 + .../github.com/moby/moby/api/types/configs.go | 69 + .../moby/moby/api/types/container/config.go | 62 + .../api/types/container/container_create.go | 21 + .../api/types/container/container_update.go | 17 + .../api/types/container/container_wait.go | 17 + .../moby/api/types/container/host_config.go | 333 + .../api/types/container/hostconfig_unix.go | 81 + .../api/types/container/hostconfig_windows.go | 87 + .../moby/moby/api/types/error_response.go | 13 + .../moby/moby/api/types/events/events.go | 42 + .../moby/moby/api/types/filters/parse.go | 310 + .../moby/moby/api/types/filters/parse_test.go | 417 + .../moby/moby/api/types/id_response.go | 13 + .../moby/moby/api/types/image_summary.go | 49 + .../moby/moby/api/types/mount/mount.go | 113 + .../moby/moby/api/types/network/network.go | 59 + .../github.com/moby/moby/api/types/plugin.go | 189 + .../moby/moby/api/types/plugin_device.go | 25 + .../moby/moby/api/types/plugin_env.go | 25 + .../moby/api/types/plugin_interface_type.go | 21 + .../moby/moby/api/types/plugin_mount.go | 37 + .../moby/moby/api/types/plugin_responses.go | 64 + vendor/github.com/moby/moby/api/types/port.go | 23 + .../api/types/reference/image_reference.go | 34 + .../types/reference/image_reference_test.go | 72 + .../moby/api/types/registry/authenticate.go | 21 + .../moby/moby/api/types/registry/registry.go | 104 + .../github.com/moby/moby/api/types/seccomp.go | 93 + .../moby/api/types/service_update_response.go | 12 + .../github.com/moby/moby/api/types/stats.go | 181 + .../moby/moby/api/types/strslice/strslice.go | 30 + .../moby/api/types/strslice/strslice_test.go | 86 + .../moby/moby/api/types/swarm/common.go | 27 + .../moby/moby/api/types/swarm/container.go | 46 + .../moby/moby/api/types/swarm/network.go | 111 + .../moby/moby/api/types/swarm/node.go | 114 + .../moby/moby/api/types/swarm/secret.go | 31 + .../moby/moby/api/types/swarm/service.go | 105 + .../moby/moby/api/types/swarm/swarm.go | 197 + .../moby/moby/api/types/swarm/task.go | 128 + .../moby/api/types/time/duration_convert.go | 12 + .../api/types/time/duration_convert_test.go | 26 + .../moby/moby/api/types/time/timestamp.go | 124 + .../moby/api/types/time/timestamp_test.go | 93 + .../github.com/moby/moby/api/types/types.go | 549 + .../moby/moby/api/types/versions/README.md | 14 + .../moby/moby/api/types/versions/compare.go | 62 + .../moby/api/types/versions/compare_test.go | 26 + .../moby/api/types/versions/v1p19/types.go | 35 + .../moby/api/types/versions/v1p20/types.go | 40 + .../github.com/moby/moby/api/types/volume.go | 58 + .../moby/api/types/volume/volumes_create.go | 29 + .../moby/api/types/volume/volumes_list.go | 23 + .../github.com/moby/moby/builder/builder.go | 169 + .../github.com/moby/moby/builder/context.go | 260 + .../moby/moby/builder/context_test.go | 307 + .../moby/moby/builder/context_unix.go | 11 + .../moby/moby/builder/context_windows.go | 17 + .../moby/moby/builder/dockerfile/bflag.go | 176 + .../moby/builder/dockerfile/bflag_test.go | 187 + .../moby/moby/builder/dockerfile/builder.go | 370 + .../moby/builder/dockerfile/builder_unix.go | 5 + .../builder/dockerfile/builder_windows.go | 3 + .../builder/dockerfile/command/command.go | 46 + .../moby/builder/dockerfile/dispatchers.go | 821 + .../builder/dockerfile/dispatchers_test.go | 517 + .../builder/dockerfile/dispatchers_unix.go | 27 + .../dockerfile/dispatchers_unix_test.go | 33 + .../builder/dockerfile/dispatchers_windows.go | 86 + .../dockerfile/dispatchers_windows_test.go | 40 + .../moby/moby/builder/dockerfile/envVarTest | 116 + .../moby/moby/builder/dockerfile/evaluator.go | 244 + .../moby/builder/dockerfile/evaluator_test.go | 197 + .../moby/builder/dockerfile/evaluator_unix.go | 9 + .../builder/dockerfile/evaluator_windows.go | 13 + .../moby/moby/builder/dockerfile/internals.go | 669 + .../moby/builder/dockerfile/internals_test.go | 95 + .../moby/builder/dockerfile/internals_unix.go | 38 + .../builder/dockerfile/internals_windows.go | 66 + .../dockerfile/internals_windows_test.go | 51 + .../builder/dockerfile/parser/dumper/main.go | 36 + .../builder/dockerfile/parser/json_test.go | 61 + .../builder/dockerfile/parser/line_parsers.go | 361 + .../moby/builder/dockerfile/parser/parser.go | 221 + .../builder/dockerfile/parser/parser_test.go | 173 + .../parser/testfile-line/Dockerfile | 35 + .../env_no_value/Dockerfile | 3 + .../shykes-nested-json/Dockerfile | 1 + .../testfiles/ADD-COPY-with-JSON/Dockerfile | 11 + .../testfiles/ADD-COPY-with-JSON/result | 10 + .../testfiles/brimstone-consuldock/Dockerfile | 26 + .../testfiles/brimstone-consuldock/result | 5 + .../brimstone-docker-consul/Dockerfile | 52 + .../testfiles/brimstone-docker-consul/result | 9 + .../testfiles/continueIndent/Dockerfile | 36 + .../parser/testfiles/continueIndent/result | 10 + .../testfiles/cpuguy83-nagios/Dockerfile | 54 + .../parser/testfiles/cpuguy83-nagios/result | 40 + .../parser/testfiles/docker/Dockerfile | 103 + .../dockerfile/parser/testfiles/docker/result | 24 + .../parser/testfiles/env/Dockerfile | 23 + .../dockerfile/parser/testfiles/env/result | 16 + .../testfiles/escape-after-comment/Dockerfile | 9 + .../testfiles/escape-after-comment/result | 3 + .../testfiles/escape-nonewline/Dockerfile | 7 + .../parser/testfiles/escape-nonewline/result | 3 + .../parser/testfiles/escape/Dockerfile | 6 + .../dockerfile/parser/testfiles/escape/result | 3 + .../parser/testfiles/escapes/Dockerfile | 14 + .../parser/testfiles/escapes/result | 6 + .../parser/testfiles/flags/Dockerfile | 10 + .../dockerfile/parser/testfiles/flags/result | 10 + .../parser/testfiles/health/Dockerfile | 10 + .../dockerfile/parser/testfiles/health/result | 9 + .../parser/testfiles/influxdb/Dockerfile | 15 + .../parser/testfiles/influxdb/result | 11 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../jeztah-invalid-json-single-quotes/result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../parser/testfiles/json/Dockerfile | 8 + .../dockerfile/parser/testfiles/json/result | 8 + .../kartar-entrypoint-oddities/Dockerfile | 7 + .../kartar-entrypoint-oddities/result | 7 + .../lk4d4-the-edge-case-generator/Dockerfile | 48 + .../lk4d4-the-edge-case-generator/result | 29 + .../parser/testfiles/mail/Dockerfile | 16 + .../dockerfile/parser/testfiles/mail/result | 14 + .../testfiles/multiple-volumes/Dockerfile | 3 + .../parser/testfiles/multiple-volumes/result | 2 + .../parser/testfiles/mumble/Dockerfile | 7 + .../dockerfile/parser/testfiles/mumble/result | 4 + .../parser/testfiles/nginx/Dockerfile | 14 + .../dockerfile/parser/testfiles/nginx/result | 11 + .../parser/testfiles/tf2/Dockerfile | 23 + .../dockerfile/parser/testfiles/tf2/result | 20 + .../parser/testfiles/weechat/Dockerfile | 9 + .../parser/testfiles/weechat/result | 6 + .../parser/testfiles/znc/Dockerfile | 7 + .../dockerfile/parser/testfiles/znc/result | 5 + .../moby/builder/dockerfile/parser/utils.go | 176 + .../moby/builder/dockerfile/shell_parser.go | 329 + .../builder/dockerfile/shell_parser_test.go | 155 + .../moby/moby/builder/dockerfile/support.go | 19 + .../moby/builder/dockerfile/support_test.go | 65 + .../moby/builder/dockerfile/utils_test.go | 50 + .../moby/moby/builder/dockerfile/wordsTest | 25 + .../moby/moby/builder/dockerignore.go | 48 + .../moby/builder/dockerignore/dockerignore.go | 49 + .../builder/dockerignore/dockerignore_test.go | 57 + .../moby/moby/builder/dockerignore_test.go | 95 + vendor/github.com/moby/moby/builder/git.go | 28 + vendor/github.com/moby/moby/builder/remote.go | 157 + .../moby/moby/builder/remote_test.go | 213 + vendor/github.com/moby/moby/builder/tarsum.go | 159 + .../moby/moby/builder/tarsum_test.go | 265 + .../moby/moby/builder/utils_test.go | 87 + vendor/github.com/moby/moby/cli/cobra.go | 139 + .../moby/cli/command/bundlefile/bundlefile.go | 69 + .../cli/command/bundlefile/bundlefile_test.go | 77 + .../moby/moby/cli/command/checkpoint/cmd.go | 24 + .../moby/cli/command/checkpoint/create.go | 58 + .../moby/moby/cli/command/checkpoint/list.go | 62 + .../moby/cli/command/checkpoint/remove.go | 44 + .../github.com/moby/moby/cli/command/cli.go | 260 + .../moby/cli/command/commands/commands.go | 91 + .../moby/moby/cli/command/container/attach.go | 130 + .../moby/moby/cli/command/container/cmd.go | 46 + .../moby/moby/cli/command/container/commit.go | 76 + .../moby/moby/cli/command/container/cp.go | 303 + .../moby/moby/cli/command/container/create.go | 218 + .../moby/moby/cli/command/container/diff.go | 58 + .../moby/moby/cli/command/container/exec.go | 207 + .../moby/cli/command/container/exec_test.go | 116 + .../moby/moby/cli/command/container/export.go | 59 + .../moby/moby/cli/command/container/hijack.go | 116 + .../moby/cli/command/container/inspect.go | 47 + .../moby/moby/cli/command/container/kill.go | 56 + .../moby/moby/cli/command/container/list.go | 141 + .../moby/moby/cli/command/container/logs.go | 87 + .../moby/moby/cli/command/container/pause.go | 49 + .../moby/moby/cli/command/container/port.go | 78 + .../moby/moby/cli/command/container/prune.go | 75 + .../moby/cli/command/container/ps_test.go | 118 + .../moby/moby/cli/command/container/rename.go | 51 + .../moby/cli/command/container/restart.go | 62 + .../moby/moby/cli/command/container/rm.go | 73 + .../moby/moby/cli/command/container/run.go | 284 + .../moby/moby/cli/command/container/start.go | 179 + .../moby/moby/cli/command/container/stats.go | 243 + .../cli/command/container/stats_helpers.go | 230 + .../cli/command/container/stats_unit_test.go | 20 + .../moby/moby/cli/command/container/stop.go | 67 + .../moby/moby/cli/command/container/top.go | 58 + .../moby/moby/cli/command/container/tty.go | 103 + .../moby/cli/command/container/unpause.go | 50 + .../moby/moby/cli/command/container/update.go | 163 + .../moby/moby/cli/command/container/utils.go | 143 + .../moby/moby/cli/command/container/wait.go | 50 + .../moby/moby/cli/command/events_utils.go | 49 + .../moby/cli/command/formatter/container.go | 235 + .../cli/command/formatter/container_test.go | 398 + .../moby/moby/cli/command/formatter/custom.go | 51 + .../moby/cli/command/formatter/custom_test.go | 28 + .../moby/cli/command/formatter/disk_usage.go | 334 + .../moby/cli/command/formatter/formatter.go | 123 + .../moby/moby/cli/command/formatter/image.go | 259 + .../moby/cli/command/formatter/image_test.go | 333 + .../moby/cli/command/formatter/network.go | 117 + .../cli/command/formatter/network_test.go | 208 + .../moby/cli/command/formatter/reflect.go | 65 + .../cli/command/formatter/reflect_test.go | 66 + .../moby/cli/command/formatter/service.go | 322 + .../moby/moby/cli/command/formatter/stats.go | 213 + .../moby/cli/command/formatter/stats_test.go | 236 + .../moby/moby/cli/command/formatter/volume.go | 121 + .../moby/cli/command/formatter/volume_test.go | 189 + .../moby/cli/command/idresolver/idresolver.go | 90 + .../moby/moby/cli/command/image/build.go | 477 + .../moby/moby/cli/command/image/cmd.go | 33 + .../moby/moby/cli/command/image/history.go | 99 + .../moby/moby/cli/command/image/import.go | 88 + .../moby/moby/cli/command/image/inspect.go | 44 + .../moby/moby/cli/command/image/list.go | 96 + .../moby/moby/cli/command/image/load.go | 77 + .../moby/moby/cli/command/image/prune.go | 92 + .../moby/moby/cli/command/image/pull.go | 84 + .../moby/moby/cli/command/image/push.go | 61 + .../moby/moby/cli/command/image/remove.go | 77 + .../moby/moby/cli/command/image/save.go | 57 + .../moby/moby/cli/command/image/tag.go | 41 + .../moby/moby/cli/command/image/trust.go | 381 + .../moby/moby/cli/command/image/trust_test.go | 57 + vendor/github.com/moby/moby/cli/command/in.go | 75 + .../moby/cli/command/inspect/inspector.go | 195 + .../cli/command/inspect/inspector_test.go | 221 + .../moby/moby/cli/command/network/cmd.go | 28 + .../moby/moby/cli/command/network/connect.go | 64 + .../moby/moby/cli/command/network/create.go | 226 + .../moby/cli/command/network/disconnect.go | 41 + .../moby/moby/cli/command/network/inspect.go | 45 + .../moby/moby/cli/command/network/list.go | 76 + .../moby/moby/cli/command/network/prune.go | 73 + .../moby/moby/cli/command/network/remove.go | 43 + .../moby/moby/cli/command/node/cmd.go | 43 + .../moby/moby/cli/command/node/demote.go | 36 + .../moby/moby/cli/command/node/inspect.go | 144 + .../moby/moby/cli/command/node/list.go | 115 + .../moby/moby/cli/command/node/opts.go | 60 + .../moby/moby/cli/command/node/promote.go | 36 + .../moby/moby/cli/command/node/ps.go | 93 + .../moby/moby/cli/command/node/remove.go | 56 + .../moby/moby/cli/command/node/update.go | 121 + .../github.com/moby/moby/cli/command/out.go | 69 + .../moby/moby/cli/command/plugin/cmd.go | 31 + .../moby/moby/cli/command/plugin/create.go | 125 + .../moby/moby/cli/command/plugin/disable.go | 36 + .../moby/moby/cli/command/plugin/enable.go | 47 + .../moby/moby/cli/command/plugin/inspect.go | 42 + .../moby/moby/cli/command/plugin/install.go | 208 + .../moby/moby/cli/command/plugin/list.go | 63 + .../moby/moby/cli/command/plugin/push.go | 71 + .../moby/moby/cli/command/plugin/remove.go | 55 + .../moby/moby/cli/command/plugin/set.go | 22 + .../moby/moby/cli/command/plugin/upgrade.go | 100 + .../moby/moby/cli/command/prune/prune.go | 50 + .../moby/moby/cli/command/registry.go | 186 + .../moby/moby/cli/command/registry/login.go | 85 + .../moby/moby/cli/command/registry/logout.go | 77 + .../moby/moby/cli/command/registry/search.go | 126 + .../moby/moby/cli/command/secret/cmd.go | 25 + .../moby/moby/cli/command/secret/create.go | 79 + .../moby/moby/cli/command/secret/inspect.go | 45 + .../moby/moby/cli/command/secret/ls.go | 68 + .../moby/moby/cli/command/secret/remove.go | 57 + .../moby/moby/cli/command/secret/utils.go | 76 + .../moby/moby/cli/command/service/cmd.go | 29 + .../moby/moby/cli/command/service/create.go | 100 + .../moby/moby/cli/command/service/inspect.go | 84 + .../moby/cli/command/service/inspect_test.go | 129 + .../moby/moby/cli/command/service/list.go | 158 + .../moby/moby/cli/command/service/logs.go | 163 + .../moby/moby/cli/command/service/opts.go | 648 + .../moby/cli/command/service/opts_test.go | 107 + .../moby/moby/cli/command/service/parse.go | 68 + .../moby/moby/cli/command/service/ps.go | 76 + .../moby/moby/cli/command/service/remove.go | 47 + .../moby/moby/cli/command/service/scale.go | 96 + .../moby/moby/cli/command/service/trust.go | 96 + .../moby/moby/cli/command/service/update.go | 849 + .../moby/cli/command/service/update_test.go | 384 + .../moby/moby/cli/command/stack/cmd.go | 35 + .../moby/moby/cli/command/stack/common.go | 60 + .../moby/moby/cli/command/stack/deploy.go | 357 + .../cli/command/stack/deploy_bundlefile.go | 83 + .../moby/moby/cli/command/stack/list.go | 113 + .../moby/moby/cli/command/stack/opts.go | 49 + .../moby/moby/cli/command/stack/ps.go | 61 + .../moby/moby/cli/command/stack/remove.go | 112 + .../moby/moby/cli/command/stack/services.go | 79 + .../moby/moby/cli/command/swarm/cmd.go | 28 + .../moby/moby/cli/command/swarm/init.go | 85 + .../moby/moby/cli/command/swarm/join.go | 69 + .../moby/moby/cli/command/swarm/join_token.go | 105 + .../moby/moby/cli/command/swarm/leave.go | 44 + .../moby/moby/cli/command/swarm/opts.go | 209 + .../moby/moby/cli/command/swarm/opts_test.go | 37 + .../moby/moby/cli/command/swarm/unlock.go | 54 + .../moby/moby/cli/command/swarm/unlock_key.go | 79 + .../moby/moby/cli/command/swarm/update.go | 72 + .../moby/moby/cli/command/system/cmd.go | 26 + .../moby/moby/cli/command/system/df.go | 56 + .../moby/moby/cli/command/system/events.go | 140 + .../moby/moby/cli/command/system/info.go | 365 + .../moby/moby/cli/command/system/inspect.go | 203 + .../moby/moby/cli/command/system/prune.go | 93 + .../moby/moby/cli/command/system/version.go | 113 + .../moby/moby/cli/command/task/print.go | 161 + .../github.com/moby/moby/cli/command/trust.go | 39 + .../github.com/moby/moby/cli/command/utils.go | 87 + .../moby/moby/cli/command/volume/cmd.go | 45 + .../moby/moby/cli/command/volume/create.go | 111 + .../moby/moby/cli/command/volume/inspect.go | 55 + .../moby/moby/cli/command/volume/list.go | 91 + .../moby/moby/cli/command/volume/prune.go | 75 + .../moby/moby/cli/command/volume/remove.go | 68 + .../moby/moby/cli/compose/convert/compose.go | 116 + .../moby/cli/compose/convert/compose_test.go | 122 + .../moby/moby/cli/compose/convert/service.go | 423 + .../moby/cli/compose/convert/service_test.go | 240 + .../moby/moby/cli/compose/convert/volume.go | 128 + .../moby/cli/compose/convert/volume_test.go | 133 + .../compose/interpolation/interpolation.go | 90 + .../interpolation/interpolation_test.go | 59 + .../moby/moby/cli/compose/loader/example1.env | 8 + .../moby/moby/cli/compose/loader/example2.env | 1 + .../moby/cli/compose/loader/full-example.yml | 287 + .../moby/moby/cli/compose/loader/loader.go | 653 + .../moby/cli/compose/loader/loader_test.go | 800 + .../moby/moby/cli/compose/schema/bindata.go | 260 + .../schema/data/config_schema_v3.0.json | 383 + .../schema/data/config_schema_v3.1.json | 428 + .../moby/moby/cli/compose/schema/schema.go | 137 + .../moby/cli/compose/schema/schema_test.go | 52 + .../moby/cli/compose/template/template.go | 100 + .../cli/compose/template/template_test.go | 83 + .../moby/moby/cli/compose/types/types.go | 253 + vendor/github.com/moby/moby/cli/error.go | 33 + .../github.com/moby/moby/cli/flags/client.go | 13 + .../github.com/moby/moby/cli/flags/common.go | 120 + .../moby/moby/cli/flags/common_test.go | 42 + vendor/github.com/moby/moby/cli/required.go | 96 + .../github.com/moby/moby/cli/trust/trust.go | 232 + .../github.com/moby/moby/cliconfig/config.go | 120 + .../moby/moby/cliconfig/config_test.go | 621 + .../moby/moby/cliconfig/configfile/file.go | 183 + .../moby/cliconfig/configfile/file_test.go | 27 + .../moby/cliconfig/credentials/credentials.go | 17 + .../cliconfig/credentials/default_store.go | 22 + .../credentials/default_store_darwin.go | 3 + .../credentials/default_store_linux.go | 3 + .../credentials/default_store_unsupported.go | 5 + .../credentials/default_store_windows.go | 3 + .../moby/cliconfig/credentials/file_store.go | 53 + .../cliconfig/credentials/file_store_test.go | 139 + .../cliconfig/credentials/native_store.go | 144 + .../credentials/native_store_test.go | 355 + vendor/github.com/moby/moby/client/README.md | 35 + .../moby/moby/client/checkpoint_create.go | 13 + .../moby/client/checkpoint_create_test.go | 73 + .../moby/moby/client/checkpoint_delete.go | 20 + .../moby/client/checkpoint_delete_test.go | 54 + .../moby/moby/client/checkpoint_list.go | 28 + .../moby/moby/client/checkpoint_list_test.go | 57 + vendor/github.com/moby/moby/client/client.go | 246 + .../moby/moby/client/client_mock_test.go | 45 + .../moby/moby/client/client_test.go | 283 + .../moby/moby/client/client_unix.go | 6 + .../moby/moby/client/client_windows.go | 4 + .../moby/moby/client/container_attach.go | 37 + .../moby/moby/client/container_commit.go | 53 + .../moby/moby/client/container_commit_test.go | 96 + .../moby/moby/client/container_copy.go | 97 + .../moby/moby/client/container_copy_test.go | 244 + .../moby/moby/client/container_create.go | 56 + .../moby/moby/client/container_create_test.go | 118 + .../moby/moby/client/container_diff.go | 23 + .../moby/moby/client/container_diff_test.go | 61 + .../moby/moby/client/container_exec.go | 54 + .../moby/moby/client/container_exec_test.go | 157 + .../moby/moby/client/container_export.go | 20 + .../moby/moby/client/container_export_test.go | 50 + .../moby/moby/client/container_inspect.go | 54 + .../moby/client/container_inspect_test.go | 125 + .../moby/moby/client/container_kill.go | 17 + .../moby/moby/client/container_kill_test.go | 46 + .../moby/moby/client/container_list.go | 56 + .../moby/moby/client/container_list_test.go | 96 + .../moby/moby/client/container_logs.go | 52 + .../moby/moby/client/container_logs_test.go | 133 + .../moby/moby/client/container_pause.go | 10 + .../moby/moby/client/container_pause_test.go | 41 + .../moby/moby/client/container_prune.go | 36 + .../moby/moby/client/container_remove.go | 27 + .../moby/moby/client/container_remove_test.go | 59 + .../moby/moby/client/container_rename.go | 16 + .../moby/moby/client/container_rename_test.go | 46 + .../moby/moby/client/container_resize.go | 29 + .../moby/moby/client/container_resize_test.go | 82 + .../moby/moby/client/container_restart.go | 22 + .../moby/client/container_restart_test.go | 48 + .../moby/moby/client/container_start.go | 24 + .../moby/moby/client/container_start_test.go | 58 + .../moby/moby/client/container_stats.go | 26 + .../moby/moby/client/container_stats_test.go | 70 + .../moby/moby/client/container_stop.go | 21 + .../moby/moby/client/container_stop_test.go | 48 + .../moby/moby/client/container_top.go | 28 + .../moby/moby/client/container_top_test.go | 74 + .../moby/moby/client/container_unpause.go | 10 + .../moby/client/container_unpause_test.go | 41 + .../moby/moby/client/container_update.go | 22 + .../moby/moby/client/container_update_test.go | 58 + .../moby/moby/client/container_wait.go | 26 + .../moby/moby/client/container_wait_test.go | 70 + .../github.com/moby/moby/client/disk_usage.go | 26 + vendor/github.com/moby/moby/client/errors.go | 278 + vendor/github.com/moby/moby/client/events.go | 102 + .../moby/moby/client/events_test.go | 165 + vendor/github.com/moby/moby/client/hijack.go | 177 + .../moby/moby/client/image_build.go | 123 + .../moby/moby/client/image_build_test.go | 233 + .../moby/moby/client/image_create.go | 34 + .../moby/moby/client/image_create_test.go | 76 + .../moby/moby/client/image_history.go | 22 + .../moby/moby/client/image_history_test.go | 60 + .../moby/moby/client/image_import.go | 37 + .../moby/moby/client/image_import_test.go | 81 + .../moby/moby/client/image_inspect.go | 33 + .../moby/moby/client/image_inspect_test.go | 71 + .../github.com/moby/moby/client/image_list.go | 45 + .../moby/moby/client/image_list_test.go | 159 + .../github.com/moby/moby/client/image_load.go | 30 + .../moby/moby/client/image_load_test.go | 95 + .../moby/moby/client/image_prune.go | 36 + .../github.com/moby/moby/client/image_pull.go | 46 + .../moby/moby/client/image_pull_test.go | 199 + .../github.com/moby/moby/client/image_push.go | 54 + .../moby/moby/client/image_push_test.go | 180 + .../moby/moby/client/image_remove.go | 31 + .../moby/moby/client/image_remove_test.go | 95 + .../github.com/moby/moby/client/image_save.go | 22 + .../moby/moby/client/image_save_test.go | 58 + .../moby/moby/client/image_search.go | 51 + .../moby/moby/client/image_search_test.go | 165 + .../github.com/moby/moby/client/image_tag.go | 34 + .../moby/moby/client/image_tag_test.go | 121 + vendor/github.com/moby/moby/client/info.go | 26 + .../github.com/moby/moby/client/info_test.go | 76 + .../github.com/moby/moby/client/interface.go | 171 + .../moby/client/interface_experimental.go | 17 + .../moby/moby/client/interface_stable.go | 10 + vendor/github.com/moby/moby/client/login.go | 29 + .../moby/moby/client/network_connect.go | 18 + .../moby/moby/client/network_connect_test.go | 107 + .../moby/moby/client/network_create.go | 25 + .../moby/moby/client/network_create_test.go | 72 + .../moby/moby/client/network_disconnect.go | 14 + .../moby/client/network_disconnect_test.go | 64 + .../moby/moby/client/network_inspect.go | 38 + .../moby/moby/client/network_inspect_test.go | 69 + .../moby/moby/client/network_list.go | 31 + .../moby/moby/client/network_list_test.go | 108 + .../moby/moby/client/network_prune.go | 36 + .../moby/moby/client/network_remove.go | 10 + .../moby/moby/client/network_remove_test.go | 47 + .../moby/moby/client/node_inspect.go | 33 + .../moby/moby/client/node_inspect_test.go | 65 + .../github.com/moby/moby/client/node_list.go | 36 + .../moby/moby/client/node_list_test.go | 94 + .../moby/moby/client/node_remove.go | 21 + .../moby/moby/client/node_remove_test.go | 69 + .../moby/moby/client/node_update.go | 18 + .../moby/moby/client/node_update_test.go | 49 + vendor/github.com/moby/moby/client/ping.go | 30 + .../moby/moby/client/plugin_create.go | 26 + .../moby/moby/client/plugin_disable.go | 19 + .../moby/moby/client/plugin_disable_test.go | 48 + .../moby/moby/client/plugin_enable.go | 19 + .../moby/moby/client/plugin_enable_test.go | 48 + .../moby/moby/client/plugin_inspect.go | 32 + .../moby/moby/client/plugin_inspect_test.go | 54 + .../moby/moby/client/plugin_install.go | 113 + .../moby/moby/client/plugin_list.go | 21 + .../moby/moby/client/plugin_list_test.go | 59 + .../moby/moby/client/plugin_push.go | 17 + .../moby/moby/client/plugin_push_test.go | 51 + .../moby/moby/client/plugin_remove.go | 20 + .../moby/moby/client/plugin_remove_test.go | 49 + .../github.com/moby/moby/client/plugin_set.go | 12 + .../moby/moby/client/plugin_set_test.go | 47 + .../moby/moby/client/plugin_upgrade.go | 37 + vendor/github.com/moby/moby/client/request.go | 247 + .../moby/moby/client/request_test.go | 92 + .../moby/moby/client/secret_create.go | 24 + .../moby/moby/client/secret_create_test.go | 57 + .../moby/moby/client/secret_inspect.go | 34 + .../moby/moby/client/secret_inspect_test.go | 65 + .../moby/moby/client/secret_list.go | 35 + .../moby/moby/client/secret_list_test.go | 94 + .../moby/moby/client/secret_remove.go | 10 + .../moby/moby/client/secret_remove_test.go | 47 + .../moby/moby/client/secret_update.go | 19 + .../moby/moby/client/secret_update_test.go | 49 + .../moby/moby/client/service_create.go | 30 + .../moby/moby/client/service_create_test.go | 57 + .../moby/moby/client/service_inspect.go | 33 + .../moby/moby/client/service_inspect_test.go | 65 + .../moby/moby/client/service_list.go | 35 + .../moby/moby/client/service_list_test.go | 94 + .../moby/moby/client/service_logs.go | 52 + .../moby/moby/client/service_logs_test.go | 133 + .../moby/moby/client/service_remove.go | 10 + .../moby/moby/client/service_remove_test.go | 47 + .../moby/moby/client/service_update.go | 41 + .../moby/moby/client/service_update_test.go | 77 + .../moby/moby/client/swarm_get_unlock_key.go | 21 + .../github.com/moby/moby/client/swarm_init.go | 21 + .../moby/moby/client/swarm_init_test.go | 54 + .../moby/moby/client/swarm_inspect.go | 21 + .../moby/moby/client/swarm_inspect_test.go | 56 + .../github.com/moby/moby/client/swarm_join.go | 13 + .../moby/moby/client/swarm_join_test.go | 51 + .../moby/moby/client/swarm_leave.go | 18 + .../moby/moby/client/swarm_leave_test.go | 66 + .../moby/moby/client/swarm_unlock.go | 17 + .../moby/moby/client/swarm_update.go | 22 + .../moby/moby/client/swarm_update_test.go | 49 + .../moby/moby/client/task_inspect.go | 34 + .../moby/moby/client/task_inspect_test.go | 54 + .../github.com/moby/moby/client/task_list.go | 35 + .../moby/moby/client/task_list_test.go | 94 + .../moby/moby/client/testdata/ca.pem | 18 + .../moby/moby/client/testdata/cert.pem | 18 + .../moby/moby/client/testdata/key.pem | 27 + .../github.com/moby/moby/client/transport.go | 28 + vendor/github.com/moby/moby/client/utils.go | 33 + vendor/github.com/moby/moby/client/version.go | 21 + .../moby/moby/client/volume_create.go | 21 + .../moby/moby/client/volume_create_test.go | 75 + .../moby/moby/client/volume_inspect.go | 38 + .../moby/moby/client/volume_inspect_test.go | 76 + .../moby/moby/client/volume_list.go | 32 + .../moby/moby/client/volume_list_test.go | 98 + .../moby/moby/client/volume_prune.go | 36 + .../moby/moby/client/volume_remove.go | 21 + .../moby/moby/client/volume_remove_test.go | 47 + .../moby/moby/cmd/docker/daemon_none.go | 27 + .../moby/moby/cmd/docker/daemon_none_test.go | 17 + .../moby/moby/cmd/docker/daemon_unit_test.go | 30 + .../moby/moby/cmd/docker/daemon_unix.go | 79 + .../github.com/moby/moby/cmd/docker/docker.go | 180 + .../moby/moby/cmd/docker/docker_test.go | 32 + .../moby/moby/cmd/docker/docker_windows.go | 18 + .../moby/moby/cmd/dockerd/README.md | 3 + .../moby/moby/cmd/dockerd/daemon.go | 524 + .../moby/moby/cmd/dockerd/daemon_freebsd.go | 5 + .../moby/moby/cmd/dockerd/daemon_linux.go | 11 + .../moby/moby/cmd/dockerd/daemon_solaris.go | 85 + .../moby/moby/cmd/dockerd/daemon_test.go | 145 + .../moby/moby/cmd/dockerd/daemon_unix.go | 137 + .../moby/moby/cmd/dockerd/daemon_unix_test.go | 114 + .../moby/moby/cmd/dockerd/daemon_windows.go | 92 + .../moby/moby/cmd/dockerd/docker.go | 110 + .../moby/moby/cmd/dockerd/docker_windows.go | 18 + .../dockerd/hack/malformed_host_override.go | 121 + .../hack/malformed_host_override_test.go | 124 + .../moby/moby/cmd/dockerd/metrics.go | 27 + .../moby/cmd/dockerd/service_unsupported.go | 14 + .../moby/moby/cmd/dockerd/service_windows.go | 426 + .../github.com/moby/moby/container/archive.go | 76 + .../moby/moby/container/container.go | 1103 + .../moby/moby/container/container_linux.go | 9 + .../moby/moby/container/container_notlinux.go | 23 + .../moby/container/container_unit_test.go | 60 + .../moby/moby/container/container_unix.go | 448 + .../moby/moby/container/container_windows.go | 111 + .../github.com/moby/moby/container/health.go | 49 + .../github.com/moby/moby/container/history.go | 30 + .../moby/moby/container/memory_store.go | 95 + .../moby/moby/container/memory_store_test.go | 106 + .../github.com/moby/moby/container/monitor.go | 46 + .../moby/moby/container/mounts_unix.go | 12 + .../moby/moby/container/mounts_windows.go | 8 + .../github.com/moby/moby/container/state.go | 343 + .../moby/moby/container/state_solaris.go | 7 + .../moby/moby/container/state_test.go | 113 + .../moby/moby/container/state_unix.go | 10 + .../moby/moby/container/state_windows.go | 7 + .../github.com/moby/moby/container/store.go | 28 + .../moby/moby/container/stream/streams.go | 143 + vendor/github.com/moby/moby/contrib/README.md | 4 + vendor/github.com/moby/moby/contrib/REVIEWERS | 1 + .../moby/moby/contrib/apparmor/main.go | 56 + .../moby/moby/contrib/apparmor/template.go | 268 + .../moby/contrib/builder/deb/aarch64/build.sh | 10 + .../contrib/builder/deb/aarch64/generate.sh | 118 + .../deb/aarch64/ubuntu-trusty/Dockerfile | 24 + .../deb/aarch64/ubuntu-xenial/Dockerfile | 22 + .../moby/contrib/builder/deb/amd64/README.md | 5 + .../moby/contrib/builder/deb/amd64/build.sh | 10 + .../deb/amd64/debian-jessie/Dockerfile | 20 + .../deb/amd64/debian-stretch/Dockerfile | 20 + .../deb/amd64/debian-wheezy/Dockerfile | 22 + .../contrib/builder/deb/amd64/generate.sh | 149 + .../deb/amd64/ubuntu-precise/Dockerfile | 16 + .../deb/amd64/ubuntu-trusty/Dockerfile | 16 + .../deb/amd64/ubuntu-xenial/Dockerfile | 16 + .../deb/amd64/ubuntu-yakkety/Dockerfile | 16 + .../deb/armhf/debian-jessie/Dockerfile | 20 + .../contrib/builder/deb/armhf/generate.sh | 158 + .../deb/armhf/raspbian-jessie/Dockerfile | 22 + .../deb/armhf/ubuntu-trusty/Dockerfile | 16 + .../deb/armhf/ubuntu-xenial/Dockerfile | 16 + .../deb/armhf/ubuntu-yakkety/Dockerfile | 16 + .../moby/contrib/builder/deb/ppc64le/build.sh | 10 + .../contrib/builder/deb/ppc64le/generate.sh | 103 + .../deb/ppc64le/ubuntu-trusty/Dockerfile | 16 + .../deb/ppc64le/ubuntu-xenial/Dockerfile | 16 + .../deb/ppc64le/ubuntu-yakkety/Dockerfile | 16 + .../moby/contrib/builder/deb/s390x/build.sh | 10 + .../contrib/builder/deb/s390x/generate.sh | 96 + .../deb/s390x/ubuntu-xenial/Dockerfile | 16 + .../moby/contrib/builder/rpm/amd64/README.md | 5 + .../moby/contrib/builder/rpm/amd64/build.sh | 10 + .../builder/rpm/amd64/centos-7/Dockerfile | 19 + .../builder/rpm/amd64/fedora-24/Dockerfile | 19 + .../builder/rpm/amd64/fedora-25/Dockerfile | 19 + .../contrib/builder/rpm/amd64/generate.sh | 189 + .../rpm/amd64/opensuse-13.2/Dockerfile | 18 + .../rpm/amd64/oraclelinux-6/Dockerfile | 28 + .../rpm/amd64/oraclelinux-7/Dockerfile | 18 + .../builder/rpm/amd64/photon-1.0/Dockerfile | 18 + .../moby/moby/contrib/check-config.sh | 354 + .../moby/moby/contrib/completion/REVIEWERS | 2 + .../moby/moby/contrib/completion/bash/docker | 4316 ++++ .../moby/contrib/completion/fish/docker.fish | 405 + .../contrib/completion/powershell/readme.txt | 1 + .../moby/contrib/completion/zsh/REVIEWERS | 2 + .../moby/moby/contrib/completion/zsh/_docker | 2929 +++ .../contrib/desktop-integration/README.md | 11 + .../desktop-integration/chromium/Dockerfile | 36 + .../desktop-integration/gparted/Dockerfile | 31 + .../moby/contrib/docker-device-tool/README.md | 14 + .../contrib/docker-device-tool/device_tool.go | 176 + .../docker-device-tool/device_tool_windows.go | 4 + .../moby/moby/contrib/dockerize-disk.sh | 118 + .../moby/contrib/download-frozen-image-v1.sh | 108 + .../moby/contrib/download-frozen-image-v2.sh | 121 + .../github.com/moby/moby/contrib/editorconfig | 13 + .../moby/moby/contrib/gitdm/aliases | 148 + .../moby/moby/contrib/gitdm/domain-map | 39 + .../moby/contrib/gitdm/generate_aliases.sh | 16 + .../moby/moby/contrib/gitdm/gitdm.config | 17 + .../moby/moby/contrib/httpserver/Dockerfile | 4 + .../contrib/httpserver/Dockerfile.solaris | 4 + .../moby/moby/contrib/httpserver/server.go | 12 + .../moby/contrib/init/openrc/docker.confd | 13 + .../moby/contrib/init/openrc/docker.initd | 22 + .../moby/moby/contrib/init/systemd/REVIEWERS | 3 + .../moby/contrib/init/systemd/docker.service | 29 + .../contrib/init/systemd/docker.service.rpm | 28 + .../moby/contrib/init/systemd/docker.socket | 12 + .../moby/contrib/init/sysvinit-debian/docker | 152 + .../init/sysvinit-debian/docker.default | 20 + .../moby/contrib/init/sysvinit-redhat/docker | 153 + .../init/sysvinit-redhat/docker.sysconfig | 7 + .../moby/moby/contrib/init/upstart/REVIEWERS | 2 + .../moby/contrib/init/upstart/docker.conf | 72 + .../moby/moby/contrib/mac-install-bundle.sh | 45 + .../moby/moby/contrib/mkimage-alpine.sh | 87 + .../moby/contrib/mkimage-arch-pacman.conf | 92 + .../moby/moby/contrib/mkimage-arch.sh | 126 + .../moby/contrib/mkimage-archarm-pacman.conf | 98 + .../moby/moby/contrib/mkimage-busybox.sh | 43 + .../moby/moby/contrib/mkimage-crux.sh | 75 + .../moby/moby/contrib/mkimage-debootstrap.sh | 297 + .../moby/moby/contrib/mkimage-pld.sh | 73 + .../moby/moby/contrib/mkimage-rinse.sh | 123 + .../moby/moby/contrib/mkimage-yum.sh | 136 + .../github.com/moby/moby/contrib/mkimage.sh | 128 + .../contrib/mkimage/.febootstrap-minimize | 28 + .../moby/moby/contrib/mkimage/busybox-static | 34 + .../moby/moby/contrib/mkimage/debootstrap | 226 + .../moby/moby/contrib/mkimage/mageia-urpmi | 61 + .../moby/moby/contrib/mkimage/rinse | 25 + .../moby/moby/contrib/mkimage/solaris | 89 + .../moby/moby/contrib/nnp-test/Dockerfile | 9 + .../moby/moby/contrib/nnp-test/nnp-test.c | 10 + .../moby/moby/contrib/nuke-graph-directory.sh | 64 + .../moby/moby/contrib/project-stats.sh | 22 + .../moby/moby/contrib/report-issue.sh | 105 + .../moby/moby/contrib/reprepro/suites.sh | 12 + .../docker-engine-selinux/LICENSE | 339 + .../docker-engine-selinux/Makefile | 23 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 29 + .../docker-engine-selinux/docker.if | 523 + .../docker-engine-selinux/docker.te | 399 + .../docker-engine-selinux/LICENSE | 339 + .../docker-engine-selinux/Makefile | 23 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 33 + .../docker-engine-selinux/docker.if | 659 + .../docker-engine-selinux/docker.te | 465 + .../selinux/docker-engine-selinux/LICENSE | 340 + .../selinux/docker-engine-selinux/Makefile | 16 + .../selinux/docker-engine-selinux/docker.fc | 18 + .../selinux/docker-engine-selinux/docker.if | 461 + .../selinux/docker-engine-selinux/docker.te | 407 + .../docker-engine-selinux/docker_selinux.8.gz | Bin 0 -> 2847 bytes .../contrib/syntax/nano/Dockerfile.nanorc | 26 + .../moby/moby/contrib/syntax/nano/README.md | 32 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Syntaxes/Dockerfile.tmLanguage | 143 + .../textmate/Docker.tmbundle/info.plist | 16 + .../moby/contrib/syntax/textmate/README.md | 17 + .../moby/contrib/syntax/textmate/REVIEWERS | 1 + .../moby/moby/contrib/syntax/vim/LICENSE | 22 + .../moby/moby/contrib/syntax/vim/README.md | 26 + .../contrib/syntax/vim/doc/dockerfile.txt | 18 + .../syntax/vim/ftdetect/dockerfile.vim | 1 + .../contrib/syntax/vim/syntax/dockerfile.vim | 31 + .../moby/moby/contrib/syscall-test/Dockerfile | 16 + .../moby/moby/contrib/syscall-test/acct.c | 16 + .../moby/contrib/syscall-test/appletalk.c | 12 + .../moby/moby/contrib/syscall-test/exit32.s | 7 + .../moby/moby/contrib/syscall-test/ns.c | 63 + .../moby/moby/contrib/syscall-test/raw.c | 14 + .../moby/moby/contrib/syscall-test/setgid.c | 11 + .../moby/moby/contrib/syscall-test/setuid.c | 11 + .../moby/moby/contrib/syscall-test/socket.c | 30 + .../moby/moby/contrib/syscall-test/userns.c | 63 + .../moby/moby/contrib/udev/80-docker.rules | 3 + .../moby/contrib/vagrant-docker/README.md | 50 + .../moby/moby/daemon/apparmor_default.go | 36 + .../daemon/apparmor_default_unsupported.go | 7 + vendor/github.com/moby/moby/daemon/archive.go | 436 + .../moby/moby/daemon/archive_unix.go | 58 + .../moby/moby/daemon/archive_windows.go | 18 + vendor/github.com/moby/moby/daemon/attach.go | 147 + vendor/github.com/moby/moby/daemon/auth.go | 13 + .../moby/moby/daemon/bindmount_solaris.go | 5 + .../moby/moby/daemon/bindmount_unix.go | 5 + vendor/github.com/moby/moby/daemon/cache.go | 254 + .../moby/moby/daemon/caps/utils_unix.go | 131 + vendor/github.com/moby/moby/daemon/changes.go | 31 + .../github.com/moby/moby/daemon/checkpoint.go | 110 + vendor/github.com/moby/moby/daemon/cluster.go | 12 + .../moby/moby/daemon/cluster/cluster.go | 2006 ++ .../moby/daemon/cluster/convert/container.go | 235 + .../moby/daemon/cluster/convert/network.go | 210 + .../moby/moby/daemon/cluster/convert/node.go | 89 + .../moby/daemon/cluster/convert/secret.go | 64 + .../moby/daemon/cluster/convert/service.go | 366 + .../moby/moby/daemon/cluster/convert/swarm.go | 122 + .../moby/moby/daemon/cluster/convert/task.go | 81 + .../moby/daemon/cluster/executor/backend.go | 61 + .../cluster/executor/container/adapter.go | 463 + .../cluster/executor/container/attachment.go | 81 + .../cluster/executor/container/container.go | 598 + .../cluster/executor/container/controller.go | 668 + .../cluster/executor/container/errors.go | 15 + .../cluster/executor/container/executor.go | 194 + .../cluster/executor/container/health_test.go | 102 + .../cluster/executor/container/validate.go | 39 + .../executor/container/validate_test.go | 141 + .../executor/container/validate_unix_test.go | 8 + .../container/validate_windows_test.go | 8 + .../moby/moby/daemon/cluster/filters.go | 116 + .../moby/moby/daemon/cluster/helpers.go | 112 + .../moby/moby/daemon/cluster/listen_addr.go | 278 + .../moby/daemon/cluster/listen_addr_linux.go | 91 + .../moby/daemon/cluster/listen_addr_others.go | 9 + .../daemon/cluster/listen_addr_solaris.go | 57 + .../moby/daemon/cluster/provider/network.go | 37 + .../moby/moby/daemon/cluster/secrets.go | 133 + vendor/github.com/moby/moby/daemon/commit.go | 271 + vendor/github.com/moby/moby/daemon/config.go | 525 + .../moby/moby/daemon/config_common_unix.go | 90 + .../moby/moby/daemon/config_experimental.go | 8 + .../moby/moby/daemon/config_solaris.go | 47 + .../moby/moby/daemon/config_test.go | 229 + .../moby/moby/daemon/config_unix.go | 104 + .../moby/moby/daemon/config_unix_test.go | 80 + .../moby/moby/daemon/config_windows.go | 71 + .../moby/moby/daemon/config_windows_test.go | 59 + .../github.com/moby/moby/daemon/container.go | 282 + .../moby/moby/daemon/container_operations.go | 1049 + .../daemon/container_operations_solaris.go | 46 + .../moby/daemon/container_operations_unix.go | 283 + .../daemon/container_operations_windows.go | 59 + vendor/github.com/moby/moby/daemon/create.go | 290 + .../moby/moby/daemon/create_unix.go | 81 + .../moby/moby/daemon/create_windows.go | 80 + vendor/github.com/moby/moby/daemon/daemon.go | 1321 ++ .../moby/moby/daemon/daemon_experimental.go | 7 + .../moby/moby/daemon/daemon_linux.go | 80 + .../moby/moby/daemon/daemon_linux_test.go | 104 + .../moby/moby/daemon/daemon_solaris.go | 523 + .../moby/moby/daemon/daemon_test.go | 627 + .../moby/moby/daemon/daemon_unix.go | 1237 ++ .../moby/moby/daemon/daemon_unix_test.go | 283 + .../moby/moby/daemon/daemon_unsupported.go | 5 + .../moby/moby/daemon/daemon_windows.go | 604 + .../github.com/moby/moby/daemon/debugtrap.go | 62 + .../moby/moby/daemon/debugtrap_unix.go | 33 + .../moby/moby/daemon/debugtrap_unsupported.go | 7 + .../moby/moby/daemon/debugtrap_windows.go | 52 + vendor/github.com/moby/moby/daemon/delete.go | 168 + .../moby/moby/daemon/delete_test.go | 43 + .../github.com/moby/moby/daemon/discovery.go | 215 + .../moby/moby/daemon/discovery_test.go | 164 + .../github.com/moby/moby/daemon/disk_usage.go | 100 + vendor/github.com/moby/moby/daemon/errors.go | 57 + vendor/github.com/moby/moby/daemon/events.go | 132 + .../moby/moby/daemon/events/events.go | 158 + .../moby/moby/daemon/events/events_test.go | 275 + .../moby/moby/daemon/events/filter.go | 110 + .../moby/moby/daemon/events/metrics.go | 15 + .../moby/daemon/events/testutils/testutils.go | 76 + .../moby/moby/daemon/events_test.go | 94 + vendor/github.com/moby/moby/daemon/exec.go | 280 + .../github.com/moby/moby/daemon/exec/exec.go | 118 + .../github.com/moby/moby/daemon/exec_linux.go | 50 + .../moby/moby/daemon/exec_solaris.go | 11 + .../moby/moby/daemon/exec_windows.go | 14 + vendor/github.com/moby/moby/daemon/export.go | 60 + .../moby/moby/daemon/getsize_unix.go | 41 + .../moby/moby/daemon/graphdriver/aufs/aufs.go | 669 + .../moby/daemon/graphdriver/aufs/aufs_test.go | 802 + .../moby/moby/daemon/graphdriver/aufs/dirs.go | 64 + .../moby/daemon/graphdriver/aufs/mount.go | 21 + .../daemon/graphdriver/aufs/mount_linux.go | 7 + .../graphdriver/aufs/mount_unsupported.go | 12 + .../moby/daemon/graphdriver/btrfs/btrfs.go | 530 + .../daemon/graphdriver/btrfs/btrfs_test.go | 63 + .../graphdriver/btrfs/dummy_unsupported.go | 3 + .../moby/daemon/graphdriver/btrfs/version.go | 26 + .../daemon/graphdriver/btrfs/version_none.go | 14 + .../daemon/graphdriver/btrfs/version_test.go | 13 + .../moby/moby/daemon/graphdriver/counter.go | 67 + .../daemon/graphdriver/devmapper/README.md | 96 + .../daemon/graphdriver/devmapper/deviceset.go | 2727 +++ .../graphdriver/devmapper/devmapper_doc.go | 106 + .../graphdriver/devmapper/devmapper_test.go | 110 + .../daemon/graphdriver/devmapper/driver.go | 231 + .../daemon/graphdriver/devmapper/mount.go | 89 + .../moby/moby/daemon/graphdriver/driver.go | 270 + .../moby/daemon/graphdriver/driver_freebsd.go | 19 + .../moby/daemon/graphdriver/driver_linux.go | 135 + .../moby/daemon/graphdriver/driver_solaris.go | 97 + .../daemon/graphdriver/driver_unsupported.go | 15 + .../moby/daemon/graphdriver/driver_windows.go | 14 + .../moby/moby/daemon/graphdriver/fsdiff.go | 169 + .../graphdriver/graphtest/graphbench_unix.go | 259 + .../graphdriver/graphtest/graphtest_unix.go | 358 + .../graphtest/graphtest_windows.go | 1 + .../daemon/graphdriver/graphtest/testutil.go | 342 + .../graphdriver/graphtest/testutil_unix.go | 143 + .../moby/daemon/graphdriver/overlay/copy.go | 174 + .../daemon/graphdriver/overlay/overlay.go | 462 + .../graphdriver/overlay/overlay_test.go | 93 + .../overlay/overlay_unsupported.go | 3 + .../moby/daemon/graphdriver/overlay2/check.go | 79 + .../moby/daemon/graphdriver/overlay2/mount.go | 88 + .../daemon/graphdriver/overlay2/overlay.go | 672 + .../graphdriver/overlay2/overlay_test.go | 121 + .../overlay2/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/randomid.go | 80 + .../graphdriver/overlayutils/overlayutils.go | 18 + .../moby/moby/daemon/graphdriver/plugin.go | 43 + .../moby/moby/daemon/graphdriver/proxy.go | 252 + .../daemon/graphdriver/quota/projectquota.go | 339 + .../graphdriver/register/register_aufs.go | 8 + .../graphdriver/register/register_btrfs.go | 8 + .../register/register_devicemapper.go | 8 + .../graphdriver/register/register_overlay.go | 9 + .../graphdriver/register/register_vfs.go | 6 + .../graphdriver/register/register_windows.go | 6 + .../graphdriver/register/register_zfs.go | 8 + .../moby/daemon/graphdriver/vfs/driver.go | 145 + .../moby/daemon/graphdriver/vfs/vfs_test.go | 37 + .../daemon/graphdriver/windows/windows.go | 903 + .../moby/daemon/graphdriver/zfs/MAINTAINERS | 2 + .../moby/moby/daemon/graphdriver/zfs/zfs.go | 417 + .../daemon/graphdriver/zfs/zfs_freebsd.go | 38 + .../moby/daemon/graphdriver/zfs/zfs_linux.go | 27 + .../daemon/graphdriver/zfs/zfs_solaris.go | 59 + .../moby/daemon/graphdriver/zfs/zfs_test.go | 35 + .../daemon/graphdriver/zfs/zfs_unsupported.go | 11 + vendor/github.com/moby/moby/daemon/health.go | 341 + .../moby/moby/daemon/health_test.go | 118 + vendor/github.com/moby/moby/daemon/image.go | 76 + .../moby/moby/daemon/image_delete.go | 412 + .../moby/moby/daemon/image_exporter.go | 25 + .../moby/moby/daemon/image_history.go | 84 + .../moby/moby/daemon/image_inspect.go | 82 + .../github.com/moby/moby/daemon/image_pull.go | 149 + .../github.com/moby/moby/daemon/image_push.go | 63 + .../github.com/moby/moby/daemon/image_tag.go | 37 + vendor/github.com/moby/moby/daemon/images.go | 331 + vendor/github.com/moby/moby/daemon/import.go | 135 + vendor/github.com/moby/moby/daemon/info.go | 180 + .../github.com/moby/moby/daemon/info_unix.go | 82 + .../moby/moby/daemon/info_windows.go | 10 + .../moby/daemon/initlayer/setup_solaris.go | 13 + .../moby/moby/daemon/initlayer/setup_unix.go | 69 + .../moby/daemon/initlayer/setup_windows.go | 13 + vendor/github.com/moby/moby/daemon/inspect.go | 264 + .../moby/moby/daemon/inspect_solaris.go | 41 + .../moby/moby/daemon/inspect_unix.go | 92 + .../moby/moby/daemon/inspect_windows.go | 41 + vendor/github.com/moby/moby/daemon/keys.go | 59 + .../moby/moby/daemon/keys_unsupported.go | 8 + vendor/github.com/moby/moby/daemon/kill.go | 164 + vendor/github.com/moby/moby/daemon/links.go | 87 + .../moby/moby/daemon/links/links.go | 141 + .../moby/moby/daemon/links/links_test.go | 213 + .../moby/moby/daemon/links_linux.go | 72 + .../moby/moby/daemon/links_linux_test.go | 98 + .../moby/moby/daemon/links_notlinux.go | 10 + vendor/github.com/moby/moby/daemon/list.go | 660 + .../github.com/moby/moby/daemon/list_unix.go | 11 + .../moby/moby/daemon/list_windows.go | 20 + .../moby/moby/daemon/logdrivers_linux.go | 15 + .../moby/moby/daemon/logdrivers_windows.go | 13 + .../daemon/logger/awslogs/cloudwatchlogs.go | 404 + .../logger/awslogs/cloudwatchlogs_test.go | 724 + .../logger/awslogs/cwlogsiface_mock_test.go | 77 + .../moby/moby/daemon/logger/context.go | 111 + .../moby/moby/daemon/logger/copier.go | 131 + .../moby/moby/daemon/logger/copier_test.go | 296 + .../daemon/logger/etwlogs/etwlogs_windows.go | 170 + .../moby/moby/daemon/logger/factory.go | 104 + .../moby/daemon/logger/fluentd/fluentd.go | 246 + .../moby/daemon/logger/gcplogs/gcplogging.go | 200 + .../moby/moby/daemon/logger/gelf/gelf.go | 209 + .../daemon/logger/gelf/gelf_unsupported.go | 3 + .../moby/daemon/logger/journald/journald.go | 122 + .../daemon/logger/journald/journald_test.go | 23 + .../logger/journald/journald_unsupported.go | 6 + .../moby/moby/daemon/logger/journald/read.go | 409 + .../daemon/logger/journald/read_native.go | 6 + .../logger/journald/read_native_compat.go | 6 + .../logger/journald/read_unsupported.go | 7 + .../daemon/logger/jsonfilelog/jsonfilelog.go | 151 + .../logger/jsonfilelog/jsonfilelog_test.go | 248 + .../moby/daemon/logger/jsonfilelog/read.go | 326 + .../daemon/logger/logentries/logentries.go | 94 + .../moby/moby/daemon/logger/logger.go | 134 + .../moby/moby/daemon/logger/logger_test.go | 26 + .../moby/daemon/logger/loggerutils/log_tag.go | 31 + .../daemon/logger/loggerutils/log_tag_test.go | 47 + .../logger/loggerutils/rotatefilewriter.go | 124 + .../moby/moby/daemon/logger/splunk/splunk.go | 621 + .../moby/daemon/logger/splunk/splunk_test.go | 1302 ++ .../logger/splunk/splunkhecmock_test.go | 157 + .../moby/moby/daemon/logger/syslog/syslog.go | 262 + .../moby/daemon/logger/syslog/syslog_test.go | 62 + vendor/github.com/moby/moby/daemon/logs.go | 146 + .../github.com/moby/moby/daemon/logs_test.go | 15 + vendor/github.com/moby/moby/daemon/metrics.go | 42 + vendor/github.com/moby/moby/daemon/monitor.go | 132 + .../moby/moby/daemon/monitor_linux.go | 19 + .../moby/moby/daemon/monitor_solaris.go | 18 + .../moby/moby/daemon/monitor_windows.go | 46 + vendor/github.com/moby/moby/daemon/mounts.go | 53 + vendor/github.com/moby/moby/daemon/names.go | 116 + vendor/github.com/moby/moby/daemon/network.go | 498 + .../moby/moby/daemon/network/settings.go | 33 + .../github.com/moby/moby/daemon/oci_linux.go | 790 + .../moby/moby/daemon/oci_solaris.go | 188 + .../moby/moby/daemon/oci_windows.go | 122 + vendor/github.com/moby/moby/daemon/pause.go | 49 + vendor/github.com/moby/moby/daemon/prune.go | 236 + vendor/github.com/moby/moby/daemon/rename.go | 122 + vendor/github.com/moby/moby/daemon/resize.go | 40 + vendor/github.com/moby/moby/daemon/restart.go | 70 + vendor/github.com/moby/moby/daemon/search.go | 94 + .../moby/moby/daemon/search_test.go | 358 + .../moby/moby/daemon/seccomp_disabled.go | 19 + .../moby/moby/daemon/seccomp_linux.go | 55 + .../moby/moby/daemon/seccomp_unsupported.go | 5 + vendor/github.com/moby/moby/daemon/secrets.go | 36 + .../moby/moby/daemon/secrets_linux.go | 7 + .../moby/moby/daemon/secrets_unsupported.go | 7 + .../moby/moby/daemon/selinux_linux.go | 17 + .../moby/moby/daemon/selinux_unsupported.go | 13 + vendor/github.com/moby/moby/daemon/start.go | 230 + .../github.com/moby/moby/daemon/start_unix.go | 31 + .../moby/moby/daemon/start_windows.go | 205 + vendor/github.com/moby/moby/daemon/stats.go | 160 + .../moby/moby/daemon/stats_collector.go | 146 + .../moby/daemon/stats_collector_solaris.go | 34 + .../moby/moby/daemon/stats_collector_unix.go | 84 + .../moby/daemon/stats_collector_windows.go | 19 + .../github.com/moby/moby/daemon/stats_unix.go | 58 + .../moby/moby/daemon/stats_windows.go | 11 + vendor/github.com/moby/moby/daemon/stop.go | 83 + .../github.com/moby/moby/daemon/top_unix.go | 126 + .../moby/moby/daemon/top_unix_test.go | 76 + .../moby/moby/daemon/top_windows.go | 53 + vendor/github.com/moby/moby/daemon/unpause.go | 38 + vendor/github.com/moby/moby/daemon/update.go | 92 + .../moby/moby/daemon/update_linux.go | 25 + .../moby/moby/daemon/update_solaris.go | 11 + .../moby/moby/daemon/update_windows.go | 13 + vendor/github.com/moby/moby/daemon/volumes.go | 389 + .../moby/moby/daemon/volumes_unit_test.go | 39 + .../moby/moby/daemon/volumes_unix.go | 219 + .../moby/moby/daemon/volumes_unix_test.go | 259 + .../moby/moby/daemon/volumes_windows.go | 47 + vendor/github.com/moby/moby/daemon/wait.go | 32 + vendor/github.com/moby/moby/daemon/workdir.go | 21 + .../moby/moby/distribution/config.go | 241 + .../moby/moby/distribution/errors.go | 159 + .../fixtures/validate_manifest/bad_manifest | 38 + .../validate_manifest/extra_data_manifest | 46 + .../fixtures/validate_manifest/good_manifest | 38 + .../moby/distribution/metadata/metadata.go | 75 + .../distribution/metadata/v1_id_service.go | 51 + .../metadata/v1_id_service_test.go | 83 + .../metadata/v2_metadata_service.go | 241 + .../metadata/v2_metadata_service_test.go | 115 + .../github.com/moby/moby/distribution/pull.go | 200 + .../moby/moby/distribution/pull_v1.go | 368 + .../moby/moby/distribution/pull_v2.go | 878 + .../moby/moby/distribution/pull_v2_test.go | 183 + .../moby/moby/distribution/pull_v2_unix.go | 13 + .../moby/moby/distribution/pull_v2_windows.go | 49 + .../github.com/moby/moby/distribution/push.go | 186 + .../moby/moby/distribution/push_v1.go | 463 + .../moby/moby/distribution/push_v2.go | 697 + .../moby/moby/distribution/push_v2_test.go | 579 + .../moby/moby/distribution/registry.go | 156 + .../moby/distribution/registry_unit_test.go | 136 + .../moby/moby/distribution/utils/progress.go | 44 + .../moby/moby/distribution/xfer/download.go | 452 + .../moby/distribution/xfer/download_test.go | 356 + .../moby/moby/distribution/xfer/transfer.go | 401 + .../moby/distribution/xfer/transfer_test.go | 410 + .../moby/moby/distribution/xfer/upload.go | 168 + .../moby/distribution/xfer/upload_test.go | 134 + .../moby/moby/dockerversion/useragent.go | 74 + .../moby/moby/dockerversion/version_lib.go | 16 + vendor/github.com/moby/moby/docs/README.md | 30 + vendor/github.com/moby/moby/docs/api/v1.18.md | 2158 ++ vendor/github.com/moby/moby/docs/api/v1.19.md | 2240 ++ vendor/github.com/moby/moby/docs/api/v1.20.md | 2393 +++ vendor/github.com/moby/moby/docs/api/v1.21.md | 2971 +++ vendor/github.com/moby/moby/docs/api/v1.22.md | 3309 +++ vendor/github.com/moby/moby/docs/api/v1.23.md | 3426 ++++ vendor/github.com/moby/moby/docs/api/v1.24.md | 5321 +++++ .../moby/moby/docs/api/version-history.md | 256 + .../github.com/moby/moby/docs/deprecated.md | 290 + .../moby/moby/docs/extend/EBS_volume.md | 164 + .../moby/moby/docs/extend/config.md | 225 + .../extend/images/authz_additional_info.png | Bin 0 -> 45916 bytes .../moby/docs/extend/images/authz_allow.png | Bin 0 -> 33505 bytes .../moby/docs/extend/images/authz_chunked.png | Bin 0 -> 33168 bytes .../extend/images/authz_connection_hijack.png | Bin 0 -> 38780 bytes .../moby/docs/extend/images/authz_deny.png | Bin 0 -> 27099 bytes .../github.com/moby/moby/docs/extend/index.md | 319 + .../moby/moby/docs/extend/legacy_plugins.md | 100 + .../moby/moby/docs/extend/plugin_api.md | 196 + .../moby/docs/extend/plugins_authorization.md | 260 + .../moby/docs/extend/plugins_graphdriver.md | 380 + .../moby/moby/docs/extend/plugins_network.md | 77 + .../moby/moby/docs/extend/plugins_volume.md | 360 + .../moby/moby/docs/reference/builder.md | 1763 ++ .../moby/docs/reference/commandline/attach.md | 152 + .../moby/docs/reference/commandline/build.md | 450 + .../moby/docs/reference/commandline/cli.md | 264 + .../moby/docs/reference/commandline/commit.md | 117 + .../docs/reference/commandline/container.md | 61 + .../reference/commandline/container_prune.md | 53 + .../moby/docs/reference/commandline/cp.md | 115 + .../moby/docs/reference/commandline/create.md | 234 + .../moby/docs/reference/commandline/deploy.md | 111 + .../moby/docs/reference/commandline/diff.md | 67 + .../docs/reference/commandline/dockerd.md | 1368 ++ .../moby/docs/reference/commandline/events.md | 349 + .../moby/docs/reference/commandline/exec.md | 91 + .../moby/docs/reference/commandline/export.md | 48 + .../docs/reference/commandline/history.md | 56 + .../moby/docs/reference/commandline/image.md | 47 + .../docs/reference/commandline/image_prune.md | 76 + .../moby/docs/reference/commandline/images.md | 342 + .../moby/docs/reference/commandline/import.md | 89 + .../moby/docs/reference/commandline/index.md | 184 + .../moby/docs/reference/commandline/info.md | 244 + .../docs/reference/commandline/inspect.md | 101 + .../moby/docs/reference/commandline/kill.md | 35 + .../moby/docs/reference/commandline/load.md | 62 + .../moby/docs/reference/commandline/login.md | 156 + .../moby/docs/reference/commandline/logout.md | 32 + .../moby/docs/reference/commandline/logs.md | 68 + .../docs/reference/commandline/network.md | 49 + .../reference/commandline/network_connect.md | 117 + .../reference/commandline/network_create.md | 206 + .../commandline/network_disconnect.md | 48 + .../reference/commandline/network_inspect.md | 212 + .../docs/reference/commandline/network_ls.md | 224 + .../reference/commandline/network_prune.md | 49 + .../docs/reference/commandline/network_rm.md | 68 + .../moby/docs/reference/commandline/node.md | 42 + .../docs/reference/commandline/node_demote.md | 47 + .../reference/commandline/node_inspect.md | 147 + .../docs/reference/commandline/node_ls.md | 134 + .../reference/commandline/node_promote.md | 45 + .../docs/reference/commandline/node_ps.md | 115 + .../docs/reference/commandline/node_rm.md | 80 + .../docs/reference/commandline/node_update.md | 77 + .../moby/docs/reference/commandline/pause.md | 48 + .../moby/docs/reference/commandline/plugin.md | 44 + .../reference/commandline/plugin_create.md | 65 + .../reference/commandline/plugin_disable.md | 69 + .../reference/commandline/plugin_enable.md | 68 + .../reference/commandline/plugin_inspect.md | 167 + .../reference/commandline/plugin_install.md | 75 + .../docs/reference/commandline/plugin_ls.md | 55 + .../docs/reference/commandline/plugin_push.md | 55 + .../docs/reference/commandline/plugin_rm.md | 61 + .../docs/reference/commandline/plugin_set.md | 114 + .../reference/commandline/plugin_upgrade.md | 88 + .../moby/docs/reference/commandline/port.md | 47 + .../moby/docs/reference/commandline/ps.md | 390 + .../moby/docs/reference/commandline/pull.md | 254 + .../moby/docs/reference/commandline/push.md | 82 + .../moby/docs/reference/commandline/rename.md | 35 + .../docs/reference/commandline/restart.md | 32 + .../moby/docs/reference/commandline/rm.md | 100 + .../moby/docs/reference/commandline/rmi.md | 105 + .../moby/docs/reference/commandline/run.md | 805 + .../moby/docs/reference/commandline/save.md | 62 + .../moby/docs/reference/commandline/search.md | 149 + .../moby/docs/reference/commandline/secret.md | 45 + .../reference/commandline/secret_create.md | 100 + .../reference/commandline/secret_inspect.md | 90 + .../docs/reference/commandline/secret_ls.md | 49 + .../docs/reference/commandline/secret_rm.md | 54 + .../docs/reference/commandline/service.md | 42 + .../reference/commandline/service_create.md | 748 + .../reference/commandline/service_inspect.md | 167 + .../reference/commandline/service_logs.md | 79 + .../docs/reference/commandline/service_ls.md | 120 + .../docs/reference/commandline/service_ps.md | 163 + .../docs/reference/commandline/service_rm.md | 60 + .../reference/commandline/service_scale.md | 104 + .../reference/commandline/service_update.md | 228 + .../moby/docs/reference/commandline/stack.md | 39 + .../reference/commandline/stack_deploy.md | 106 + .../docs/reference/commandline/stack_ls.md | 51 + .../docs/reference/commandline/stack_ps.md | 59 + .../docs/reference/commandline/stack_rm.md | 40 + .../reference/commandline/stack_services.md | 74 + .../moby/docs/reference/commandline/start.md | 34 + .../moby/docs/reference/commandline/stats.md | 127 + .../moby/docs/reference/commandline/stop.md | 37 + .../moby/docs/reference/commandline/swarm.md | 40 + .../docs/reference/commandline/swarm_init.md | 145 + .../docs/reference/commandline/swarm_join.md | 106 + .../reference/commandline/swarm_join_token.md | 109 + .../docs/reference/commandline/swarm_leave.md | 68 + .../reference/commandline/swarm_unlock.md | 44 + .../reference/commandline/swarm_unlock_key.md | 88 + .../reference/commandline/swarm_update.md | 48 + .../moby/docs/reference/commandline/system.md | 37 + .../docs/reference/commandline/system_df.md | 94 + .../reference/commandline/system_prune.md | 82 + .../moby/docs/reference/commandline/tag.md | 84 + .../moby/docs/reference/commandline/top.md | 25 + .../docs/reference/commandline/unpause.md | 44 + .../moby/docs/reference/commandline/update.md | 122 + .../docs/reference/commandline/version.md | 74 + .../moby/docs/reference/commandline/volume.md | 48 + .../reference/commandline/volume_create.md | 125 + .../reference/commandline/volume_inspect.md | 61 + .../docs/reference/commandline/volume_ls.md | 199 + .../reference/commandline/volume_prune.md | 57 + .../docs/reference/commandline/volume_rm.md | 48 + .../moby/docs/reference/commandline/wait.md | 58 + .../moby/moby/docs/reference/glossary.md | 374 + .../moby/moby/docs/reference/index.md | 21 + .../moby/moby/docs/reference/run.md | 1562 ++ .../moby/docs/static_files/contributors.png | Bin 0 -> 23100 bytes .../static_files/docker-logo-compressed.png | Bin 0 -> 4972 bytes .../github.com/moby/moby/docs/yaml/Dockerfile | 4 + .../moby/moby/docs/yaml/generate.go | 86 + vendor/github.com/moby/moby/docs/yaml/yaml.go | 212 + .../moby/moby/experimental/README.md | 54 + .../moby/experimental/checkpoint-restore.md | 88 + .../experimental/docker-stacks-and-bundles.md | 205 + .../moby/experimental/images/ipvlan-l3.gliffy | 1 + .../moby/experimental/images/ipvlan-l3.png | Bin 0 -> 18260 bytes .../moby/experimental/images/ipvlan-l3.svg | 1 + .../images/ipvlan_l2_simple.gliffy | 1 + .../experimental/images/ipvlan_l2_simple.png | Bin 0 -> 20145 bytes .../experimental/images/ipvlan_l2_simple.svg | 1 + .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 + .../images/macvlan-bridge-ipvlan-l2.png | Bin 0 -> 14527 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 + .../images/multi_tenant_8021q_vlans.gliffy | 1 + .../images/multi_tenant_8021q_vlans.png | Bin 0 -> 17879 bytes .../images/multi_tenant_8021q_vlans.svg | 1 + .../images/vlans-deeper-look.gliffy | 1 + .../experimental/images/vlans-deeper-look.png | Bin 0 -> 38837 bytes .../experimental/images/vlans-deeper-look.svg | 1 + .../moby/moby/experimental/vlan-networks.md | 475 + .../moby/moby/hack/Jenkins/W2L/postbuild.sh | 35 + .../moby/moby/hack/Jenkins/W2L/setup.sh | 309 + .../moby/moby/hack/Jenkins/readme.md | 3 + vendor/github.com/moby/moby/hack/dind | 33 + .../moby/hack/dockerfile/binaries-commits | 11 + .../moby/hack/dockerfile/install-binaries.sh | 123 + .../moby/moby/hack/generate-authors.sh | 15 + .../moby/moby/hack/generate-swagger-api.sh | 22 + vendor/github.com/moby/moby/hack/install.sh | 540 + vendor/github.com/moby/moby/hack/make.ps1 | 408 + vendor/github.com/moby/moby/hack/make.sh | 304 + vendor/github.com/moby/moby/hack/make/.binary | 48 + .../moby/moby/hack/make/.binary-setup | 10 + .../moby/moby/hack/make/.build-deb/compat | 1 + .../moby/moby/hack/make/.build-deb/control | 29 + .../.build-deb/docker-engine.bash-completion | 1 + .../.build-deb/docker-engine.docker.default | 1 + .../make/.build-deb/docker-engine.docker.init | 1 + .../.build-deb/docker-engine.docker.upstart | 1 + .../make/.build-deb/docker-engine.install | 12 + .../make/.build-deb/docker-engine.manpages | 1 + .../make/.build-deb/docker-engine.postinst | 20 + .../hack/make/.build-deb/docker-engine.udev | 1 + .../moby/moby/hack/make/.build-deb/docs | 1 + .../moby/moby/hack/make/.build-deb/rules | 55 + .../.build-rpm/docker-engine-selinux.spec | 96 + .../hack/make/.build-rpm/docker-engine.spec | 254 + .../moby/moby/hack/make/.detect-daemon-osarch | 69 + .../moby/moby/hack/make/.ensure-emptyfs | 23 + .../moby/moby/hack/make/.go-autogen | 86 + .../moby/moby/hack/make/.go-autogen.ps1 | 91 + .../moby/hack/make/.integration-daemon-setup | 7 + .../moby/hack/make/.integration-daemon-start | 116 + .../moby/hack/make/.integration-daemon-stop | 27 + .../moby/hack/make/.integration-test-helpers | 79 + .../hack/make/.resources-windows/common.rc | 38 + .../.resources-windows/docker.exe.manifest | 18 + .../hack/make/.resources-windows/docker.ico | Bin 0 -> 370070 bytes .../hack/make/.resources-windows/docker.png | Bin 0 -> 658195 bytes .../hack/make/.resources-windows/docker.rc | 3 + .../hack/make/.resources-windows/dockerd.rc | 4 + .../make/.resources-windows/event_messages.mc | 39 + .../hack/make/.resources-windows/resources.go | 18 + .../github.com/moby/moby/hack/make/README.md | 17 + vendor/github.com/moby/moby/hack/make/binary | 15 + .../moby/moby/hack/make/binary-client | 12 + .../moby/moby/hack/make/binary-daemon | 13 + .../github.com/moby/moby/hack/make/build-deb | 91 + .../hack/make/build-integration-test-binary | 11 + .../github.com/moby/moby/hack/make/build-rpm | 148 + .../moby/moby/hack/make/clean-apt-repo | 43 + .../moby/moby/hack/make/clean-yum-repo | 20 + vendor/github.com/moby/moby/hack/make/cover | 15 + vendor/github.com/moby/moby/hack/make/cross | 46 + .../github.com/moby/moby/hack/make/dynbinary | 15 + .../moby/moby/hack/make/dynbinary-client | 12 + .../moby/moby/hack/make/dynbinary-daemon | 12 + .../moby/hack/make/generate-index-listing | 74 + .../moby/moby/hack/make/install-binary | 12 + .../moby/moby/hack/make/install-binary-client | 10 + .../moby/moby/hack/make/install-binary-daemon | 16 + .../moby/moby/hack/make/install-script | 63 + .../moby/moby/hack/make/release-deb | 163 + .../moby/moby/hack/make/release-rpm | 71 + vendor/github.com/moby/moby/hack/make/run | 44 + .../github.com/moby/moby/hack/make/sign-repos | 65 + .../moby/moby/hack/make/test-deb-install | 71 + .../moby/moby/hack/make/test-docker-py | 20 + .../moby/moby/hack/make/test-install-script | 31 + .../moby/moby/hack/make/test-integration-cli | 28 + .../moby/hack/make/test-integration-shell | 7 + .../moby/moby/hack/make/test-old-apt-repo | 29 + .../github.com/moby/moby/hack/make/test-unit | 55 + vendor/github.com/moby/moby/hack/make/tgz | 92 + vendor/github.com/moby/moby/hack/make/ubuntu | 190 + .../moby/moby/hack/make/update-apt-repo | 70 + vendor/github.com/moby/moby/hack/make/win | 20 + .../moby/moby/hack/make/yaml-docs-generator | 12 + vendor/github.com/moby/moby/hack/release.sh | 325 + .../moby/moby/hack/validate/.swagger-yamllint | 4 + .../moby/moby/hack/validate/.validate | 30 + vendor/github.com/moby/moby/hack/validate/all | 8 + .../hack/validate/changelog-date-descending | 12 + .../moby/hack/validate/changelog-well-formed | 25 + .../moby/moby/hack/validate/compose-bindata | 28 + vendor/github.com/moby/moby/hack/validate/dco | 55 + .../moby/moby/hack/validate/default | 19 + .../moby/moby/hack/validate/default-seccomp | 28 + .../github.com/moby/moby/hack/validate/gofmt | 33 + .../github.com/moby/moby/hack/validate/lint | 31 + .../moby/moby/hack/validate/pkg-imports | 33 + .../moby/moby/hack/validate/swagger | 13 + .../moby/moby/hack/validate/swagger-gen | 29 + .../moby/moby/hack/validate/test-imports | 38 + .../github.com/moby/moby/hack/validate/toml | 31 + .../github.com/moby/moby/hack/validate/vendor | 30 + vendor/github.com/moby/moby/hack/validate/vet | 32 + vendor/github.com/moby/moby/hack/vendor.sh | 15 + vendor/github.com/moby/moby/hooks/post_build | 19 + vendor/github.com/moby/moby/image/fs.go | 173 + vendor/github.com/moby/moby/image/fs_test.go | 384 + vendor/github.com/moby/moby/image/image.go | 150 + .../github.com/moby/moby/image/image_test.go | 59 + vendor/github.com/moby/moby/image/rootfs.go | 44 + .../github.com/moby/moby/image/spec/v1.1.md | 637 + .../github.com/moby/moby/image/spec/v1.2.md | 696 + vendor/github.com/moby/moby/image/spec/v1.md | 573 + vendor/github.com/moby/moby/image/store.go | 295 + .../github.com/moby/moby/image/store_test.go | 300 + .../moby/moby/image/tarexport/load.go | 390 + .../moby/moby/image/tarexport/save.go | 355 + .../moby/moby/image/tarexport/tarexport.go | 47 + .../github.com/moby/moby/image/v1/imagev1.go | 156 + .../moby/moby/image/v1/imagev1_test.go | 55 + .../moby/integration-cli/benchmark_test.go | 95 + .../moby/moby/integration-cli/check_test.go | 383 + .../moby/moby/integration-cli/daemon.go | 608 + .../moby/moby/integration-cli/daemon_swarm.go | 419 + .../moby/integration-cli/daemon_swarm_hack.go | 20 + .../moby/moby/integration-cli/daemon_unix.go | 35 + .../moby/integration-cli/daemon_windows.go | 53 + .../integration-cli/docker_api_attach_test.go | 210 + .../integration-cli/docker_api_auth_test.go | 25 + .../integration-cli/docker_api_build_test.go | 254 + .../docker_api_containers_test.go | 1961 ++ .../integration-cli/docker_api_create_test.go | 84 + .../integration-cli/docker_api_events_test.go | 73 + .../docker_api_exec_resize_test.go | 103 + .../integration-cli/docker_api_exec_test.go | 198 + .../integration-cli/docker_api_images_test.go | 165 + .../integration-cli/docker_api_info_test.go | 53 + .../docker_api_inspect_test.go | 183 + .../docker_api_inspect_unix_test.go | 35 + .../integration-cli/docker_api_logs_test.go | 87 + .../docker_api_network_test.go | 353 + .../integration-cli/docker_api_resize_test.go | 44 + .../docker_api_service_update_test.go | 39 + .../integration-cli/docker_api_stats_test.go | 310 + .../docker_api_stats_unix_test.go | 41 + .../integration-cli/docker_api_swarm_test.go | 1391 ++ .../moby/integration-cli/docker_api_test.go | 118 + .../docker_api_update_unix_test.go | 35 + .../docker_api_version_test.go | 23 + .../docker_api_volumes_test.go | 89 + .../integration-cli/docker_cli_attach_test.go | 168 + .../docker_cli_attach_unix_test.go | 237 + .../docker_cli_authz_plugin_v2_test.go | 133 + .../docker_cli_authz_unix_test.go | 477 + .../integration-cli/docker_cli_build_test.go | 7420 +++++++ .../docker_cli_build_unix_test.go | 207 + .../docker_cli_by_digest_test.go | 693 + .../integration-cli/docker_cli_commit_test.go | 157 + .../integration-cli/docker_cli_config_test.go | 140 + .../docker_cli_cp_from_container_test.go | 488 + .../integration-cli/docker_cli_cp_test.go | 660 + .../docker_cli_cp_to_container_test.go | 599 + .../docker_cli_cp_to_container_unix_test.go | 39 + .../integration-cli/docker_cli_cp_utils.go | 303 + .../integration-cli/docker_cli_create_test.go | 513 + .../docker_cli_daemon_plugins_test.go | 317 + .../integration-cli/docker_cli_daemon_test.go | 2988 +++ .../integration-cli/docker_cli_diff_test.go | 98 + .../integration-cli/docker_cli_events_test.go | 794 + .../docker_cli_events_unix_test.go | 486 + .../integration-cli/docker_cli_exec_test.go | 601 + .../docker_cli_exec_unix_test.go | 93 + .../docker_cli_experimental_test.go | 36 + .../docker_cli_export_import_test.go | 49 + ...cker_cli_external_graphdriver_unix_test.go | 405 + ...er_cli_external_volume_driver_unix_test.go | 627 + .../integration-cli/docker_cli_health_test.go | 169 + .../integration-cli/docker_cli_help_test.go | 321 + .../docker_cli_history_test.go | 121 + .../integration-cli/docker_cli_images_test.go | 364 + .../integration-cli/docker_cli_import_test.go | 150 + .../integration-cli/docker_cli_info_test.go | 234 + .../docker_cli_info_unix_test.go | 15 + .../docker_cli_inspect_test.go | 466 + .../integration-cli/docker_cli_kill_test.go | 134 + .../integration-cli/docker_cli_links_test.go | 240 + .../docker_cli_links_unix_test.go | 26 + .../integration-cli/docker_cli_login_test.go | 44 + .../integration-cli/docker_cli_logout_test.go | 100 + .../docker_cli_logs_bench_test.go | 32 + .../integration-cli/docker_cli_logs_test.go | 328 + .../integration-cli/docker_cli_nat_test.go | 93 + .../docker_cli_netmode_test.go | 94 + .../docker_cli_network_unix_test.go | 1791 ++ .../docker_cli_oom_killed_test.go | 30 + .../integration-cli/docker_cli_pause_test.go | 66 + .../docker_cli_plugins_test.go | 393 + .../integration-cli/docker_cli_port_test.go | 319 + .../integration-cli/docker_cli_proxy_test.go | 53 + .../docker_cli_prune_unix_test.go | 91 + .../integration-cli/docker_cli_ps_test.go | 952 + .../docker_cli_pull_local_test.go | 492 + .../integration-cli/docker_cli_pull_test.go | 274 + .../docker_cli_pull_trusted_test.go | 365 + .../integration-cli/docker_cli_push_test.go | 715 + .../docker_cli_registry_user_agent_test.go | 120 + .../integration-cli/docker_cli_rename_test.go | 138 + .../docker_cli_restart_test.go | 278 + .../integration-cli/docker_cli_rm_test.go | 86 + .../integration-cli/docker_cli_rmi_test.go | 352 + .../integration-cli/docker_cli_run_test.go | 4713 +++++ .../docker_cli_run_unix_test.go | 1604 ++ .../docker_cli_save_load_test.go | 383 + .../docker_cli_save_load_unix_test.go | 109 + .../integration-cli/docker_cli_search_test.go | 131 + .../docker_cli_secret_create_test.go | 131 + .../docker_cli_secret_inspect_test.go | 68 + .../docker_cli_service_create_test.go | 175 + .../docker_cli_service_health_test.go | 191 + ...cker_cli_service_logs_experimental_test.go | 96 + .../docker_cli_service_scale_test.go | 57 + .../docker_cli_service_update_test.go | 130 + .../integration-cli/docker_cli_sni_test.go | 44 + .../integration-cli/docker_cli_stack_test.go | 186 + .../integration-cli/docker_cli_start_test.go | 199 + .../integration-cli/docker_cli_stats_test.go | 178 + .../integration-cli/docker_cli_stop_test.go | 17 + .../integration-cli/docker_cli_swarm_test.go | 1278 ++ .../docker_cli_swarm_unix_test.go | 52 + .../integration-cli/docker_cli_tag_test.go | 225 + .../integration-cli/docker_cli_top_test.go | 73 + .../integration-cli/docker_cli_update_test.go | 41 + .../docker_cli_update_unix_test.go | 283 + .../integration-cli/docker_cli_userns_test.go | 98 + .../docker_cli_v2_only_test.go | 125 + .../docker_cli_version_test.go | 58 + .../integration-cli/docker_cli_volume_test.go | 627 + .../integration-cli/docker_cli_wait_test.go | 97 + .../docker_deprecated_api_v124_test.go | 227 + .../docker_deprecated_api_v124_unix_test.go | 30 + .../docker_experimental_network_test.go | 594 + .../docker_hub_pull_suite_test.go | 90 + .../moby/integration-cli/docker_test_vars.go | 165 + .../moby/moby/integration-cli/docker_utils.go | 1607 ++ .../moby/moby/integration-cli/events_utils.go | 206 + .../moby/moby/integration-cli/fixtures.go | 69 + .../auth/docker-credential-shell-test | 55 + .../fixtures/credentialspecs/valid.json | 25 + .../fixtures/deploy/default.yaml | 9 + .../fixtures/deploy/remove.yaml | 11 + .../fixtures/deploy/secrets.yaml | 20 + .../integration-cli/fixtures/https/ca.pem | 23 + .../fixtures/https/client-cert.pem | 73 + .../fixtures/https/client-key.pem | 16 + .../fixtures/https/client-rogue-cert.pem | 73 + .../fixtures/https/client-rogue-key.pem | 16 + .../fixtures/https/server-cert.pem | 76 + .../fixtures/https/server-key.pem | 16 + .../fixtures/https/server-rogue-cert.pem | 76 + .../fixtures/https/server-rogue-key.pem | 16 + .../fixtures/load/emptyLayer.tar | Bin 0 -> 30720 bytes .../integration-cli/fixtures/load/frozen.go | 182 + .../fixtures/notary/delgkey1.crt | 21 + .../fixtures/notary/delgkey1.key | 27 + .../fixtures/notary/delgkey2.crt | 21 + .../fixtures/notary/delgkey2.key | 27 + .../fixtures/notary/delgkey3.crt | 21 + .../fixtures/notary/delgkey3.key | 27 + .../fixtures/notary/delgkey4.crt | 21 + .../fixtures/notary/delgkey4.key | 27 + .../integration-cli/fixtures/notary/gen.sh | 18 + .../fixtures/notary/localhost.cert | 19 + .../fixtures/notary/localhost.key | 27 + .../fixtures/registry/cert.pem | 21 + .../integration-cli/fixtures/secrets/default | 1 + .../integration-cli/fixtures_linux_daemon.go | 143 + .../moby/moby/integration-cli/npipe.go | 12 + .../moby/integration-cli/npipe_windows.go | 12 + .../moby/moby/integration-cli/registry.go | 177 + .../moby/integration-cli/registry_mock.go | 55 + .../moby/moby/integration-cli/requirements.go | 243 + .../moby/integration-cli/requirements_unix.go | 159 + .../moby/moby/integration-cli/test_vars.go | 11 + .../moby/integration-cli/test_vars_exec.go | 8 + .../moby/integration-cli/test_vars_noexec.go | 8 + .../integration-cli/test_vars_noseccomp.go | 8 + .../moby/integration-cli/test_vars_seccomp.go | 8 + .../moby/integration-cli/test_vars_unix.go | 14 + .../moby/integration-cli/test_vars_windows.go | 15 + .../moby/moby/integration-cli/trust_server.go | 344 + .../moby/moby/integration-cli/utils.go | 79 + .../moby/moby/keys/launchpad-ppa-zfs.asc | 13 + vendor/github.com/moby/moby/layer/empty.go | 56 + .../github.com/moby/moby/layer/empty_test.go | 46 + .../github.com/moby/moby/layer/filestore.go | 354 + .../moby/moby/layer/filestore_test.go | 104 + vendor/github.com/moby/moby/layer/layer.go | 275 + .../github.com/moby/moby/layer/layer_store.go | 684 + .../moby/moby/layer/layer_store_windows.go | 11 + .../github.com/moby/moby/layer/layer_test.go | 771 + .../github.com/moby/moby/layer/layer_unix.go | 9 + .../moby/moby/layer/layer_unix_test.go | 71 + .../moby/moby/layer/layer_windows.go | 98 + .../github.com/moby/moby/layer/migration.go | 256 + .../moby/moby/layer/migration_test.go | 435 + .../github.com/moby/moby/layer/mount_test.go | 230 + .../moby/moby/layer/mounted_layer.go | 99 + vendor/github.com/moby/moby/layer/ro_layer.go | 192 + .../moby/moby/layer/ro_layer_windows.go | 9 + .../moby/moby/libcontainerd/client.go | 46 + .../moby/moby/libcontainerd/client_linux.go | 605 + .../moby/moby/libcontainerd/client_solaris.go | 101 + .../moby/moby/libcontainerd/client_unix.go | 142 + .../moby/moby/libcontainerd/client_windows.go | 645 + .../moby/moby/libcontainerd/container.go | 13 + .../moby/moby/libcontainerd/container_unix.go | 250 + .../moby/libcontainerd/container_windows.go | 311 + .../moby/moby/libcontainerd/oom_linux.go | 31 + .../moby/moby/libcontainerd/oom_solaris.go | 5 + .../moby/libcontainerd/pausemonitor_unix.go | 42 + .../moby/moby/libcontainerd/process.go | 18 + .../moby/moby/libcontainerd/process_unix.go | 107 + .../moby/libcontainerd/process_windows.go | 51 + .../moby/moby/libcontainerd/queue_unix.go | 31 + .../moby/moby/libcontainerd/remote.go | 20 + .../moby/moby/libcontainerd/remote_unix.go | 544 + .../moby/moby/libcontainerd/remote_windows.go | 36 + .../moby/moby/libcontainerd/types.go | 75 + .../moby/moby/libcontainerd/types_linux.go | 49 + .../moby/moby/libcontainerd/types_solaris.go | 43 + .../moby/moby/libcontainerd/types_windows.go | 79 + .../moby/moby/libcontainerd/utils_linux.go | 62 + .../moby/moby/libcontainerd/utils_solaris.go | 27 + .../moby/moby/libcontainerd/utils_windows.go | 46 + .../moby/libcontainerd/utils_windows_test.go | 13 + vendor/github.com/moby/moby/man/Dockerfile | 24 + .../github.com/moby/moby/man/Dockerfile.5.md | 474 + .../moby/moby/man/Dockerfile.aarch64 | 25 + .../github.com/moby/moby/man/Dockerfile.armhf | 43 + .../moby/moby/man/Dockerfile.ppc64le | 35 + .../github.com/moby/moby/man/Dockerfile.s390x | 35 + vendor/github.com/moby/moby/man/README.md | 15 + .../moby/moby/man/docker-attach.1.md | 99 + .../moby/moby/man/docker-build.1.md | 340 + .../moby/moby/man/docker-commit.1.md | 71 + .../moby/moby/man/docker-config-json.5.md | 72 + .../github.com/moby/moby/man/docker-cp.1.md | 175 + .../moby/moby/man/docker-create.1.md | 553 + .../github.com/moby/moby/man/docker-diff.1.md | 61 + .../moby/moby/man/docker-events.1.md | 180 + .../github.com/moby/moby/man/docker-exec.1.md | 71 + .../moby/moby/man/docker-export.1.md | 46 + .../moby/moby/man/docker-history.1.md | 52 + .../moby/moby/man/docker-images.1.md | 154 + .../moby/moby/man/docker-import.1.md | 72 + .../github.com/moby/moby/man/docker-info.1.md | 187 + .../moby/moby/man/docker-inspect.1.md | 323 + .../github.com/moby/moby/man/docker-kill.1.md | 28 + .../github.com/moby/moby/man/docker-load.1.md | 56 + .../moby/moby/man/docker-login.1.md | 53 + .../moby/moby/man/docker-logout.1.md | 32 + .../github.com/moby/moby/man/docker-logs.1.md | 71 + .../moby/moby/man/docker-network-connect.1.md | 66 + .../moby/moby/man/docker-network-create.1.md | 187 + .../moby/man/docker-network-disconnect.1.md | 36 + .../moby/moby/man/docker-network-inspect.1.md | 112 + .../moby/moby/man/docker-network-ls.1.md | 188 + .../moby/moby/man/docker-network-rm.1.md | 43 + .../moby/moby/man/docker-pause.1.md | 32 + .../github.com/moby/moby/man/docker-port.1.md | 47 + .../github.com/moby/moby/man/docker-ps.1.md | 145 + .../github.com/moby/moby/man/docker-pull.1.md | 220 + .../github.com/moby/moby/man/docker-push.1.md | 63 + .../moby/moby/man/docker-rename.1.md | 15 + .../moby/moby/man/docker-restart.1.md | 26 + .../github.com/moby/moby/man/docker-rm.1.md | 72 + .../github.com/moby/moby/man/docker-rmi.1.md | 42 + .../github.com/moby/moby/man/docker-run.1.md | 1055 + .../github.com/moby/moby/man/docker-save.1.md | 45 + .../moby/moby/man/docker-search.1.md | 70 + .../moby/moby/man/docker-start.1.md | 39 + .../moby/moby/man/docker-stats.1.md | 57 + .../github.com/moby/moby/man/docker-stop.1.md | 30 + .../github.com/moby/moby/man/docker-tag.1.md | 77 + .../github.com/moby/moby/man/docker-top.1.md | 36 + .../moby/moby/man/docker-unpause.1.md | 28 + .../moby/moby/man/docker-update.1.md | 171 + .../moby/moby/man/docker-version.1.md | 62 + .../github.com/moby/moby/man/docker-wait.1.md | 30 + vendor/github.com/moby/moby/man/docker.1.md | 237 + vendor/github.com/moby/moby/man/dockerd.8.md | 710 + vendor/github.com/moby/moby/man/generate.go | 43 + vendor/github.com/moby/moby/man/generate.sh | 15 + vendor/github.com/moby/moby/man/glide.lock | 52 + vendor/github.com/moby/moby/man/glide.yaml | 12 + vendor/github.com/moby/moby/man/md2man-all.sh | 22 + .../moby/moby/migrate/v1/migratev1.go | 504 + .../moby/moby/migrate/v1/migratev1_test.go | 438 + .../moby/moby/oci/defaults_linux.go | 168 + .../moby/moby/oci/defaults_solaris.go | 20 + .../moby/moby/oci/defaults_windows.go | 19 + .../github.com/moby/moby/oci/devices_linux.go | 86 + .../moby/moby/oci/devices_unsupported.go | 20 + vendor/github.com/moby/moby/oci/namespaces.go | 16 + vendor/github.com/moby/moby/opts/hosts.go | 151 + .../github.com/moby/moby/opts/hosts_test.go | 148 + .../github.com/moby/moby/opts/hosts_unix.go | 8 + .../moby/moby/opts/hosts_windows.go | 6 + vendor/github.com/moby/moby/opts/ip.go | 47 + vendor/github.com/moby/moby/opts/ip_test.go | 54 + vendor/github.com/moby/moby/opts/mount.go | 171 + .../github.com/moby/moby/opts/mount_test.go | 184 + vendor/github.com/moby/moby/opts/opts.go | 360 + vendor/github.com/moby/moby/opts/opts_test.go | 232 + vendor/github.com/moby/moby/opts/opts_unix.go | 6 + .../github.com/moby/moby/opts/opts_windows.go | 56 + vendor/github.com/moby/moby/opts/port.go | 146 + vendor/github.com/moby/moby/opts/port_test.go | 259 + .../github.com/moby/moby/opts/quotedstring.go | 37 + .../moby/moby/opts/quotedstring_test.go | 28 + vendor/github.com/moby/moby/opts/secret.go | 107 + .../github.com/moby/moby/opts/secret_test.go | 79 + vendor/github.com/moby/moby/pkg/README.md | 11 + .../moby/moby/pkg/aaparser/aaparser.go | 91 + .../moby/moby/pkg/aaparser/aaparser_test.go | 73 + .../moby/moby/pkg/archive/README.md | 1 + .../moby/moby/pkg/archive/archive.go | 1175 ++ .../moby/moby/pkg/archive/archive_linux.go | 95 + .../moby/pkg/archive/archive_linux_test.go | 187 + .../moby/moby/pkg/archive/archive_other.go | 7 + .../moby/moby/pkg/archive/archive_test.go | 1162 ++ .../moby/moby/pkg/archive/archive_unix.go | 118 + .../moby/pkg/archive/archive_unix_test.go | 249 + .../moby/moby/pkg/archive/archive_windows.go | 70 + .../moby/pkg/archive/archive_windows_test.go | 91 + .../moby/moby/pkg/archive/changes.go | 446 + .../moby/moby/pkg/archive/changes_linux.go | 312 + .../moby/moby/pkg/archive/changes_other.go | 97 + .../moby/pkg/archive/changes_posix_test.go | 132 + .../moby/moby/pkg/archive/changes_test.go | 572 + .../moby/moby/pkg/archive/changes_unix.go | 36 + .../moby/moby/pkg/archive/changes_windows.go | 30 + .../github.com/moby/moby/pkg/archive/copy.go | 458 + .../moby/moby/pkg/archive/copy_unix.go | 11 + .../moby/moby/pkg/archive/copy_unix_test.go | 978 + .../moby/moby/pkg/archive/copy_windows.go | 9 + .../github.com/moby/moby/pkg/archive/diff.go | 279 + .../moby/moby/pkg/archive/diff_test.go | 386 + .../moby/moby/pkg/archive/example_changes.go | 97 + .../moby/moby/pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes .../moby/moby/pkg/archive/time_linux.go | 16 + .../moby/moby/pkg/archive/time_unsupported.go | 16 + .../moby/moby/pkg/archive/utils_test.go | 166 + .../moby/moby/pkg/archive/whiteouts.go | 23 + .../github.com/moby/moby/pkg/archive/wrap.go | 59 + .../moby/moby/pkg/archive/wrap_test.go | 98 + .../moby/moby/pkg/authorization/api.go | 88 + .../moby/moby/pkg/authorization/authz.go | 186 + .../moby/pkg/authorization/authz_unix_test.go | 282 + .../moby/moby/pkg/authorization/middleware.go | 84 + .../moby/moby/pkg/authorization/plugin.go | 112 + .../moby/moby/pkg/authorization/response.go | 203 + .../moby/moby/pkg/broadcaster/unbuffered.go | 49 + .../moby/pkg/broadcaster/unbuffered_test.go | 162 + .../moby/moby/pkg/chrootarchive/archive.go | 97 + .../moby/pkg/chrootarchive/archive_test.go | 394 + .../moby/pkg/chrootarchive/archive_unix.go | 86 + .../moby/pkg/chrootarchive/archive_windows.go | 22 + .../moby/pkg/chrootarchive/chroot_linux.go | 108 + .../moby/pkg/chrootarchive/chroot_unix.go | 12 + .../moby/moby/pkg/chrootarchive/diff.go | 23 + .../moby/moby/pkg/chrootarchive/diff_unix.go | 130 + .../moby/pkg/chrootarchive/diff_windows.go | 45 + .../moby/moby/pkg/chrootarchive/init_unix.go | 28 + .../moby/pkg/chrootarchive/init_windows.go | 4 + .../moby/moby/pkg/devicemapper/devmapper.go | 828 + .../moby/pkg/devicemapper/devmapper_log.go | 35 + .../pkg/devicemapper/devmapper_wrapper.go | 251 + .../devmapper_wrapper_deferred_remove.go | 34 + .../devmapper_wrapper_no_deferred_remove.go | 15 + .../moby/moby/pkg/devicemapper/ioctl.go | 27 + .../moby/moby/pkg/devicemapper/log.go | 11 + .../moby/moby/pkg/directory/directory.go | 26 + .../moby/moby/pkg/directory/directory_test.go | 192 + .../moby/moby/pkg/directory/directory_unix.go | 48 + .../moby/pkg/directory/directory_windows.go | 37 + .../moby/moby/pkg/discovery/README.md | 41 + .../moby/moby/pkg/discovery/backends.go | 107 + .../moby/moby/pkg/discovery/discovery.go | 35 + .../moby/moby/pkg/discovery/discovery_test.go | 137 + .../moby/moby/pkg/discovery/entry.go | 94 + .../moby/moby/pkg/discovery/file/file.go | 107 + .../moby/moby/pkg/discovery/file/file_test.go | 114 + .../moby/moby/pkg/discovery/generator.go | 35 + .../moby/moby/pkg/discovery/generator_test.go | 53 + .../moby/moby/pkg/discovery/kv/kv.go | 192 + .../moby/moby/pkg/discovery/kv/kv_test.go | 324 + .../moby/moby/pkg/discovery/memory/memory.go | 93 + .../moby/pkg/discovery/memory/memory_test.go | 48 + .../moby/moby/pkg/discovery/nodes/nodes.go | 54 + .../moby/pkg/discovery/nodes/nodes_test.go | 51 + .../moby/moby/pkg/filenotify/filenotify.go | 40 + .../moby/moby/pkg/filenotify/fsnotify.go | 18 + .../moby/moby/pkg/filenotify/poller.go | 204 + .../moby/moby/pkg/filenotify/poller_test.go | 119 + .../moby/moby/pkg/fileutils/fileutils.go | 283 + .../moby/pkg/fileutils/fileutils_darwin.go | 27 + .../moby/pkg/fileutils/fileutils_solaris.go | 7 + .../moby/moby/pkg/fileutils/fileutils_test.go | 585 + .../moby/moby/pkg/fileutils/fileutils_unix.go | 22 + .../moby/pkg/fileutils/fileutils_windows.go | 7 + .../moby/moby/pkg/fsutils/fsutils_linux.go | 89 + .../moby/pkg/fsutils/fsutils_linux_test.go | 91 + .../moby/moby/pkg/gitutils/gitutils.go | 100 + .../moby/moby/pkg/gitutils/gitutils_test.go | 220 + .../moby/pkg/graphdb/conn_sqlite3_linux.go | 19 + .../moby/moby/pkg/graphdb/graphdb_linux.go | 551 + .../moby/pkg/graphdb/graphdb_linux_test.go | 721 + .../moby/moby/pkg/graphdb/sort_linux.go | 27 + .../moby/moby/pkg/graphdb/sort_linux_test.go | 29 + .../moby/moby/pkg/graphdb/unsupported.go | 3 + .../moby/moby/pkg/graphdb/utils_linux.go | 32 + .../moby/moby/pkg/homedir/homedir.go | 39 + .../moby/moby/pkg/homedir/homedir_test.go | 24 + .../moby/moby/pkg/httputils/httputils.go | 56 + .../moby/moby/pkg/httputils/httputils_test.go | 115 + .../moby/moby/pkg/httputils/mimetype.go | 30 + .../moby/moby/pkg/httputils/mimetype_test.go | 13 + .../pkg/httputils/resumablerequestreader.go | 95 + .../httputils/resumablerequestreader_test.go | 307 + .../moby/moby/pkg/idtools/idtools.go | 197 + .../moby/moby/pkg/idtools/idtools_unix.go | 207 + .../moby/pkg/idtools/idtools_unix_test.go | 271 + .../moby/moby/pkg/idtools/idtools_windows.go | 25 + .../moby/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../moby/moby/pkg/idtools/utils_unix.go | 32 + .../moby/pkg/integration/checker/checker.go | 46 + .../moby/moby/pkg/integration/cmd/command.go | 294 + .../moby/pkg/integration/cmd/command_test.go | 118 + .../moby/moby/pkg/integration/utils.go | 227 + .../moby/moby/pkg/integration/utils_test.go | 363 + .../moby/moby/pkg/ioutils/buffer.go | 51 + .../moby/moby/pkg/ioutils/buffer_test.go | 75 + .../moby/moby/pkg/ioutils/bytespipe.go | 186 + .../moby/moby/pkg/ioutils/bytespipe_test.go | 159 + .../github.com/moby/moby/pkg/ioutils/fmt.go | 22 + .../moby/moby/pkg/ioutils/fmt_test.go | 17 + .../moby/moby/pkg/ioutils/fswriters.go | 162 + .../moby/moby/pkg/ioutils/fswriters_test.go | 132 + .../moby/moby/pkg/ioutils/multireader.go | 223 + .../moby/moby/pkg/ioutils/multireader_test.go | 211 + .../moby/moby/pkg/ioutils/readers.go | 154 + .../moby/moby/pkg/ioutils/readers_test.go | 94 + .../moby/moby/pkg/ioutils/temp_unix.go | 10 + .../moby/moby/pkg/ioutils/temp_windows.go | 18 + .../moby/moby/pkg/ioutils/writeflusher.go | 92 + .../moby/moby/pkg/ioutils/writers.go | 66 + .../moby/moby/pkg/ioutils/writers_test.go | 65 + .../moby/moby/pkg/jsonlog/jsonlog.go | 42 + .../moby/pkg/jsonlog/jsonlog_marshalling.go | 178 + .../pkg/jsonlog/jsonlog_marshalling_test.go | 34 + .../moby/moby/pkg/jsonlog/jsonlogbytes.go | 122 + .../moby/pkg/jsonlog/jsonlogbytes_test.go | 39 + .../moby/moby/pkg/jsonlog/time_marshalling.go | 27 + .../moby/pkg/jsonlog/time_marshalling_test.go | 47 + .../moby/moby/pkg/jsonmessage/jsonmessage.go | 225 + .../moby/pkg/jsonmessage/jsonmessage_test.go | 245 + .../moby/pkg/listeners/listeners_solaris.go | 31 + .../moby/moby/pkg/listeners/listeners_unix.go | 94 + .../moby/pkg/listeners/listeners_windows.go | 54 + .../github.com/moby/moby/pkg/locker/README.md | 65 + .../github.com/moby/moby/pkg/locker/locker.go | 112 + .../moby/moby/pkg/locker/locker_test.go | 124 + .../moby/moby/pkg/longpath/longpath.go | 26 + .../moby/moby/pkg/longpath/longpath_test.go | 22 + .../moby/moby/pkg/loopback/attach_loopback.go | 137 + .../moby/moby/pkg/loopback/ioctl.go | 53 + .../moby/moby/pkg/loopback/loop_wrapper.go | 52 + .../moby/moby/pkg/loopback/loopback.go | 63 + .../github.com/moby/moby/pkg/mount/flags.go | 149 + .../moby/moby/pkg/mount/flags_freebsd.go | 48 + .../moby/moby/pkg/mount/flags_linux.go | 85 + .../moby/moby/pkg/mount/flags_unsupported.go | 30 + .../github.com/moby/moby/pkg/mount/mount.go | 74 + .../moby/moby/pkg/mount/mount_unix_test.go | 162 + .../moby/moby/pkg/mount/mounter_freebsd.go | 59 + .../moby/moby/pkg/mount/mounter_linux.go | 21 + .../moby/moby/pkg/mount/mounter_solaris.go | 33 + .../moby/pkg/mount/mounter_unsupported.go | 11 + .../moby/moby/pkg/mount/mountinfo.go | 40 + .../moby/moby/pkg/mount/mountinfo_freebsd.go | 41 + .../moby/moby/pkg/mount/mountinfo_linux.go | 95 + .../moby/pkg/mount/mountinfo_linux_test.go | 476 + .../moby/moby/pkg/mount/mountinfo_solaris.go | 37 + .../moby/pkg/mount/mountinfo_unsupported.go | 12 + .../moby/moby/pkg/mount/mountinfo_windows.go | 6 + .../moby/pkg/mount/sharedsubtree_linux.go | 69 + .../pkg/mount/sharedsubtree_linux_test.go | 331 + .../moby/pkg/mount/sharedsubtree_solaris.go | 58 + .../cmd/names-generator/main.go | 11 + .../pkg/namesgenerator/names-generator.go | 590 + .../namesgenerator/names-generator_test.go | 27 + .../moby/moby/pkg/parsers/kernel/kernel.go | 74 + .../moby/pkg/parsers/kernel/kernel_darwin.go | 56 + .../moby/pkg/parsers/kernel/kernel_unix.go | 45 + .../pkg/parsers/kernel/kernel_unix_test.go | 96 + .../moby/pkg/parsers/kernel/kernel_windows.go | 69 + .../moby/pkg/parsers/kernel/uname_linux.go | 19 + .../moby/pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../operatingsystem_solaris.go | 37 + .../operatingsystem/operatingsystem_unix.go | 25 + .../operatingsystem_unix_test.go | 247 + .../operatingsystem_windows.go | 49 + .../moby/moby/pkg/parsers/parsers.go | 69 + .../moby/moby/pkg/parsers/parsers_test.go | 70 + .../moby/moby/pkg/pidfile/pidfile.go | 56 + .../moby/moby/pkg/pidfile/pidfile_darwin.go | 18 + .../moby/moby/pkg/pidfile/pidfile_test.go | 38 + .../moby/moby/pkg/pidfile/pidfile_unix.go | 16 + .../moby/moby/pkg/pidfile/pidfile_windows.go | 23 + .../moby/pkg/platform/architecture_linux.go | 16 + .../moby/pkg/platform/architecture_unix.go | 20 + .../moby/pkg/platform/architecture_windows.go | 60 + .../moby/moby/pkg/platform/platform.go | 23 + .../moby/moby/pkg/platform/utsname_int8.go | 18 + .../moby/moby/pkg/platform/utsname_uint8.go | 18 + .../moby/moby/pkg/plugingetter/getter.go | 35 + .../moby/moby/pkg/plugins/client.go | 205 + .../moby/moby/pkg/plugins/client_test.go | 134 + .../moby/moby/pkg/plugins/discovery.go | 131 + .../moby/moby/pkg/plugins/discovery_test.go | 152 + .../moby/moby/pkg/plugins/discovery_unix.go | 5 + .../moby/pkg/plugins/discovery_unix_test.go | 61 + .../moby/pkg/plugins/discovery_windows.go | 8 + .../moby/moby/pkg/plugins/errors.go | 33 + .../moby/moby/pkg/plugins/plugin_test.go | 44 + .../moby/pkg/plugins/pluginrpc-gen/README.md | 58 + .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 89 + .../fixtures/otherfixture/spaceship.go | 4 + .../moby/pkg/plugins/pluginrpc-gen/main.go | 91 + .../moby/pkg/plugins/pluginrpc-gen/parser.go | 263 + .../pkg/plugins/pluginrpc-gen/parser_test.go | 222 + .../pkg/plugins/pluginrpc-gen/template.go | 118 + .../moby/moby/pkg/plugins/plugins.go | 329 + .../moby/moby/pkg/plugins/plugins_linux.go | 7 + .../moby/moby/pkg/plugins/plugins_windows.go | 8 + .../moby/moby/pkg/plugins/transport/http.go | 36 + .../moby/pkg/plugins/transport/transport.go | 36 + .../github.com/moby/moby/pkg/pools/pools.go | 116 + .../moby/moby/pkg/pools/pools_test.go | 161 + .../moby/moby/pkg/progress/progress.go | 84 + .../moby/moby/pkg/progress/progressreader.go | 66 + .../moby/pkg/progress/progressreader_test.go | 75 + .../moby/moby/pkg/promise/promise.go | 11 + .../moby/moby/pkg/pubsub/publisher.go | 111 + .../moby/moby/pkg/pubsub/publisher_test.go | 142 + .../github.com/moby/moby/pkg/random/random.go | 71 + .../moby/moby/pkg/random/random_test.go | 22 + .../github.com/moby/moby/pkg/reexec/README.md | 5 + .../moby/moby/pkg/reexec/command_linux.go | 28 + .../moby/moby/pkg/reexec/command_unix.go | 23 + .../moby/pkg/reexec/command_unsupported.go | 12 + .../moby/moby/pkg/reexec/command_windows.go | 23 + .../github.com/moby/moby/pkg/reexec/reexec.go | 47 + .../moby/moby/pkg/registrar/registrar.go | 127 + .../moby/moby/pkg/registrar/registrar_test.go | 119 + .../github.com/moby/moby/pkg/signal/README.md | 1 + .../github.com/moby/moby/pkg/signal/signal.go | 54 + .../moby/moby/pkg/signal/signal_darwin.go | 41 + .../moby/moby/pkg/signal/signal_freebsd.go | 43 + .../moby/moby/pkg/signal/signal_linux.go | 80 + .../moby/moby/pkg/signal/signal_solaris.go | 42 + .../moby/moby/pkg/signal/signal_unix.go | 21 + .../moby/pkg/signal/signal_unsupported.go | 10 + .../moby/moby/pkg/signal/signal_windows.go | 28 + .../github.com/moby/moby/pkg/signal/trap.go | 103 + .../moby/moby/pkg/stdcopy/stdcopy.go | 174 + .../moby/moby/pkg/stdcopy/stdcopy_test.go | 260 + .../pkg/streamformatter/streamformatter.go | 172 + .../streamformatter/streamformatter_test.go | 108 + .../moby/moby/pkg/stringid/README.md | 1 + .../moby/moby/pkg/stringid/stringid.go | 69 + .../moby/moby/pkg/stringid/stringid_test.go | 72 + .../moby/moby/pkg/stringutils/README.md | 1 + .../moby/moby/pkg/stringutils/stringutils.go | 101 + .../moby/pkg/stringutils/stringutils_test.go | 121 + .../moby/moby/pkg/symlink/LICENSE.APACHE | 191 + .../moby/moby/pkg/symlink/LICENSE.BSD | 27 + .../moby/moby/pkg/symlink/README.md | 6 + vendor/github.com/moby/moby/pkg/symlink/fs.go | 144 + .../moby/moby/pkg/symlink/fs_unix.go | 15 + .../moby/moby/pkg/symlink/fs_unix_test.go | 407 + .../moby/moby/pkg/symlink/fs_windows.go | 169 + .../moby/moby/pkg/sysinfo/README.md | 1 + .../moby/moby/pkg/sysinfo/numcpu.go | 12 + .../moby/moby/pkg/sysinfo/numcpu_linux.go | 43 + .../moby/moby/pkg/sysinfo/numcpu_windows.go | 37 + .../moby/moby/pkg/sysinfo/sysinfo.go | 144 + .../moby/moby/pkg/sysinfo/sysinfo_linux.go | 259 + .../moby/pkg/sysinfo/sysinfo_linux_test.go | 58 + .../moby/moby/pkg/sysinfo/sysinfo_solaris.go | 121 + .../moby/moby/pkg/sysinfo/sysinfo_test.go | 26 + .../moby/moby/pkg/sysinfo/sysinfo_unix.go | 9 + .../moby/moby/pkg/sysinfo/sysinfo_windows.go | 9 + .../moby/moby/pkg/system/chtimes.go | 52 + .../moby/moby/pkg/system/chtimes_test.go | 94 + .../moby/moby/pkg/system/chtimes_unix.go | 14 + .../moby/moby/pkg/system/chtimes_unix_test.go | 91 + .../moby/moby/pkg/system/chtimes_windows.go | 27 + .../moby/pkg/system/chtimes_windows_test.go | 86 + .../github.com/moby/moby/pkg/system/errors.go | 10 + .../moby/moby/pkg/system/events_windows.go | 85 + .../moby/moby/pkg/system/exitcode.go | 33 + .../moby/moby/pkg/system/filesys.go | 54 + .../moby/moby/pkg/system/filesys_windows.go | 236 + .../github.com/moby/moby/pkg/system/lstat.go | 19 + .../moby/moby/pkg/system/lstat_unix_test.go | 30 + .../moby/moby/pkg/system/lstat_windows.go | 25 + .../moby/moby/pkg/system/meminfo.go | 17 + .../moby/moby/pkg/system/meminfo_linux.go | 65 + .../moby/moby/pkg/system/meminfo_solaris.go | 128 + .../moby/moby/pkg/system/meminfo_unix_test.go | 40 + .../moby/pkg/system/meminfo_unsupported.go | 8 + .../moby/moby/pkg/system/meminfo_windows.go | 45 + .../github.com/moby/moby/pkg/system/mknod.go | 22 + .../moby/moby/pkg/system/mknod_windows.go | 13 + .../moby/moby/pkg/system/path_unix.go | 14 + .../moby/moby/pkg/system/path_windows.go | 37 + .../moby/moby/pkg/system/path_windows_test.go | 78 + .../github.com/moby/moby/pkg/system/stat.go | 53 + .../moby/moby/pkg/system/stat_darwin.go | 32 + .../moby/moby/pkg/system/stat_freebsd.go | 27 + .../moby/moby/pkg/system/stat_linux.go | 33 + .../moby/moby/pkg/system/stat_openbsd.go | 15 + .../moby/moby/pkg/system/stat_solaris.go | 34 + .../moby/moby/pkg/system/stat_unix_test.go | 39 + .../moby/moby/pkg/system/stat_unsupported.go | 17 + .../moby/moby/pkg/system/stat_windows.go | 43 + .../moby/moby/pkg/system/syscall_unix.go | 17 + .../moby/moby/pkg/system/syscall_windows.go | 105 + .../moby/pkg/system/syscall_windows_test.go | 9 + .../github.com/moby/moby/pkg/system/umask.go | 13 + .../moby/moby/pkg/system/umask_windows.go | 9 + .../moby/moby/pkg/system/utimes_freebsd.go | 22 + .../moby/moby/pkg/system/utimes_linux.go | 26 + .../moby/moby/pkg/system/utimes_unix_test.go | 68 + .../moby/pkg/system/utimes_unsupported.go | 10 + .../moby/moby/pkg/system/xattrs_linux.go | 63 + .../moby/pkg/system/xattrs_unsupported.go | 13 + .../moby/moby/pkg/tailfile/tailfile.go | 66 + .../moby/moby/pkg/tailfile/tailfile_test.go | 148 + .../moby/moby/pkg/tarsum/builder_context.go | 21 + .../moby/pkg/tarsum/builder_context_test.go | 67 + .../moby/moby/pkg/tarsum/fileinfosums.go | 126 + .../moby/moby/pkg/tarsum/fileinfosums_test.go | 62 + .../github.com/moby/moby/pkg/tarsum/tarsum.go | 295 + .../moby/moby/pkg/tarsum/tarsum_spec.md | 230 + .../moby/moby/pkg/tarsum/tarsum_test.go | 664 + .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes .../moby/moby/pkg/tarsum/testdata/xattr/json | 1 + .../moby/pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes .../moby/moby/pkg/tarsum/versioning.go | 150 + .../moby/moby/pkg/tarsum/versioning_test.go | 98 + .../moby/moby/pkg/tarsum/writercloser.go | 22 + vendor/github.com/moby/moby/pkg/term/ascii.go | 66 + .../moby/moby/pkg/term/ascii_test.go | 43 + .../moby/moby/pkg/term/tc_linux_cgo.go | 50 + .../github.com/moby/moby/pkg/term/tc_other.go | 20 + .../moby/moby/pkg/term/tc_solaris_cgo.go | 63 + vendor/github.com/moby/moby/pkg/term/term.go | 123 + .../moby/moby/pkg/term/term_solaris.go | 41 + .../moby/moby/pkg/term/term_unix.go | 29 + .../moby/moby/pkg/term/term_windows.go | 233 + .../moby/moby/pkg/term/termios_darwin.go | 69 + .../moby/moby/pkg/term/termios_freebsd.go | 69 + .../moby/moby/pkg/term/termios_linux.go | 47 + .../moby/moby/pkg/term/termios_openbsd.go | 69 + .../moby/moby/pkg/term/windows/ansi_reader.go | 263 + .../moby/moby/pkg/term/windows/ansi_writer.go | 64 + .../moby/moby/pkg/term/windows/console.go | 35 + .../moby/moby/pkg/term/windows/windows.go | 33 + .../moby/pkg/term/windows/windows_test.go | 3 + .../moby/moby/pkg/testutil/assert/assert.go | 97 + .../github.com/moby/moby/pkg/testutil/pkg.go | 1 + .../moby/pkg/testutil/tempfile/tempfile.go | 36 + .../moby/pkg/tlsconfig/tlsconfig_clone.go | 11 + .../pkg/tlsconfig/tlsconfig_clone_go16.go | 31 + .../pkg/tlsconfig/tlsconfig_clone_go17.go | 33 + .../moby/moby/pkg/truncindex/truncindex.go | 137 + .../moby/pkg/truncindex/truncindex_test.go | 429 + .../moby/moby/pkg/urlutil/urlutil.go | 50 + .../moby/moby/pkg/urlutil/urlutil_test.go | 70 + .../moby/moby/pkg/useragent/README.md | 1 + .../moby/moby/pkg/useragent/useragent.go | 55 + .../moby/moby/pkg/useragent/useragent_test.go | 31 + .../moby/moby/plugin/backend_linux.go | 790 + .../moby/moby/plugin/backend_unsupported.go | 71 + .../github.com/moby/moby/plugin/blobstore.go | 181 + vendor/github.com/moby/moby/plugin/defs.go | 26 + vendor/github.com/moby/moby/plugin/manager.go | 347 + .../moby/moby/plugin/manager_linux.go | 292 + .../moby/moby/plugin/manager_solaris.go | 28 + .../moby/moby/plugin/manager_windows.go | 30 + vendor/github.com/moby/moby/plugin/store.go | 263 + .../github.com/moby/moby/plugin/store_test.go | 33 + .../github.com/moby/moby/plugin/v2/plugin.go | 244 + .../moby/moby/plugin/v2/plugin_linux.go | 121 + .../moby/moby/plugin/v2/plugin_unsupported.go | 14 + .../moby/moby/plugin/v2/settable.go | 102 + .../moby/moby/plugin/v2/settable_test.go | 91 + vendor/github.com/moby/moby/poule.yml | 88 + .../moby/moby/profiles/apparmor/apparmor.go | 122 + .../moby/moby/profiles/apparmor/template.go | 46 + .../moby/moby/profiles/seccomp/default.json | 929 + .../profiles/seccomp/fixtures/example.json | 27 + .../moby/moby/profiles/seccomp/generate.go | 32 + .../moby/moby/profiles/seccomp/seccomp.go | 150 + .../moby/profiles/seccomp/seccomp_default.go | 761 + .../moby/profiles/seccomp/seccomp_test.go | 32 + .../profiles/seccomp/seccomp_unsupported.go | 13 + vendor/github.com/moby/moby/project/ARM.md | 45 + .../moby/moby/project/BRANCHES-AND-TAGS.md | 35 + .../moby/moby/project/CONTRIBUTORS.md | 1 + .../moby/moby/project/GOVERNANCE.md | 17 + .../moby/moby/project/IRC-ADMINISTRATION.md | 37 + .../moby/moby/project/ISSUE-TRIAGE.md | 132 + .../moby/project/PACKAGE-REPO-MAINTENANCE.md | 74 + .../github.com/moby/moby/project/PACKAGERS.md | 307 + .../moby/moby/project/PATCH-RELEASES.md | 68 + .../moby/moby/project/PRINCIPLES.md | 19 + vendor/github.com/moby/moby/project/README.md | 24 + .../moby/moby/project/RELEASE-CHECKLIST.md | 518 + .../moby/moby/project/RELEASE-PROCESS.md | 78 + .../github.com/moby/moby/project/REVIEWING.md | 246 + vendor/github.com/moby/moby/project/TOOLS.md | 63 + .../moby/moby/reference/reference.go | 216 + .../moby/moby/reference/reference_test.go | 275 + .../github.com/moby/moby/reference/store.go | 286 + .../moby/moby/reference/store_test.go | 356 + vendor/github.com/moby/moby/registry/auth.go | 303 + .../moby/moby/registry/auth_test.go | 124 + .../github.com/moby/moby/registry/config.go | 305 + .../moby/moby/registry/config_test.go | 49 + .../moby/moby/registry/config_unix.go | 25 + .../moby/moby/registry/config_windows.go | 25 + .../moby/moby/registry/endpoint_test.go | 78 + .../moby/moby/registry/endpoint_v1.go | 198 + .../github.com/moby/moby/registry/registry.go | 191 + .../moby/moby/registry/registry_mock_test.go | 478 + .../moby/moby/registry/registry_test.go | 875 + .../github.com/moby/moby/registry/service.go | 304 + .../moby/moby/registry/service_v1.go | 40 + .../moby/moby/registry/service_v1_test.go | 23 + .../moby/moby/registry/service_v2.go | 78 + .../github.com/moby/moby/registry/session.go | 783 + vendor/github.com/moby/moby/registry/types.go | 73 + .../moby/restartmanager/restartmanager.go | 128 + .../restartmanager/restartmanager_test.go | 34 + .../github.com/moby/moby/runconfig/compare.go | 61 + .../moby/moby/runconfig/compare_test.go | 126 + .../github.com/moby/moby/runconfig/config.go | 97 + .../moby/moby/runconfig/config_test.go | 139 + .../moby/moby/runconfig/config_unix.go | 59 + .../moby/moby/runconfig/config_windows.go | 19 + .../github.com/moby/moby/runconfig/errors.go | 46 + .../fixtures/unix/container_config_1_14.json | 30 + .../fixtures/unix/container_config_1_17.json | 50 + .../fixtures/unix/container_config_1_19.json | 58 + .../unix/container_hostconfig_1_14.json | 18 + .../unix/container_hostconfig_1_19.json | 30 + .../windows/container_config_1_19.json | 58 + .../moby/moby/runconfig/hostconfig.go | 35 + .../moby/moby/runconfig/hostconfig_solaris.go | 41 + .../moby/moby/runconfig/hostconfig_test.go | 283 + .../moby/moby/runconfig/hostconfig_unix.go | 129 + .../moby/moby/runconfig/hostconfig_windows.go | 68 + .../moby/moby/runconfig/opts/envfile.go | 81 + .../moby/moby/runconfig/opts/envfile_test.go | 142 + .../moby/runconfig/opts/fixtures/utf16.env | Bin 0 -> 54 bytes .../moby/runconfig/opts/fixtures/utf16be.env | Bin 0 -> 54 bytes .../moby/runconfig/opts/fixtures/utf8.env | 3 + .../moby/runconfig/opts/fixtures/valid.env | 1 + .../moby/runconfig/opts/fixtures/valid.label | 1 + .../moby/moby/runconfig/opts/opts.go | 83 + .../moby/moby/runconfig/opts/opts_test.go | 113 + .../moby/moby/runconfig/opts/parse.go | 999 + .../moby/moby/runconfig/opts/parse_test.go | 902 + .../moby/moby/runconfig/opts/runtime.go | 79 + .../moby/runconfig/opts/throttledevice.go | 111 + .../moby/moby/runconfig/opts/ulimit.go | 57 + .../moby/moby/runconfig/opts/ulimit_test.go | 42 + .../moby/moby/runconfig/opts/weightdevice.go | 89 + vendor/github.com/moby/moby/utils/debug.go | 26 + .../github.com/moby/moby/utils/debug_test.go | 43 + vendor/github.com/moby/moby/utils/names.go | 9 + .../moby/moby/utils/process_unix.go | 22 + .../moby/moby/utils/process_windows.go | 20 + .../moby/moby/utils/templates/templates.go | 42 + .../moby/utils/templates/templates_test.go | 38 + vendor/github.com/moby/moby/utils/utils.go | 87 + .../github.com/moby/moby/utils/utils_test.go | 21 + vendor/github.com/moby/moby/vendor.conf | 140 + .../moby/moby/volume/drivers/adapter.go | 177 + .../moby/moby/volume/drivers/extpoint.go | 215 + .../moby/moby/volume/drivers/extpoint_test.go | 23 + .../moby/moby/volume/drivers/proxy.go | 242 + .../moby/moby/volume/drivers/proxy_test.go | 132 + .../moby/moby/volume/local/local.go | 389 + .../moby/moby/volume/local/local_test.go | 344 + .../moby/moby/volume/local/local_unix.go | 87 + .../moby/moby/volume/local/local_windows.go | 34 + .../moby/moby/volume/local/unmount_linux.go | 7 + .../moby/moby/volume/local/unmount_unix.go | 9 + .../moby/moby/volume/local/unmount_windows.go | 5 + .../github.com/moby/moby/volume/store/db.go | 88 + .../moby/moby/volume/store/errors.go | 76 + .../moby/moby/volume/store/restore.go | 83 + .../moby/moby/volume/store/store.go | 653 + .../moby/moby/volume/store/store_test.go | 234 + .../moby/moby/volume/store/store_unix.go | 9 + .../moby/moby/volume/store/store_windows.go | 12 + .../moby/moby/volume/testutils/testutils.go | 116 + .../github.com/moby/moby/volume/validate.go | 125 + .../moby/moby/volume/validate_test.go | 43 + .../moby/moby/volume/validate_test_unix.go | 8 + .../moby/moby/volume/validate_test_windows.go | 6 + vendor/github.com/moby/moby/volume/volume.go | 333 + .../moby/moby/volume/volume_copy.go | 23 + .../moby/moby/volume/volume_copy_unix.go | 8 + .../moby/moby/volume/volume_copy_windows.go | 6 + .../moby/moby/volume/volume_linux.go | 56 + .../moby/moby/volume/volume_linux_test.go | 51 + .../moby/volume/volume_propagation_linux.go | 47 + .../volume/volume_propagation_linux_test.go | 65 + .../volume/volume_propagation_unsupported.go | 24 + .../moby/moby/volume/volume_test.go | 269 + .../moby/moby/volume/volume_unix.go | 138 + .../moby/moby/volume/volume_unsupported.go | 16 + .../moby/moby/volume/volume_windows.go | 201 + vendor/github.com/pkg/errors/.gitignore | 24 + vendor/github.com/pkg/errors/.travis.yml | 11 + vendor/github.com/pkg/errors/LICENSE | 23 + vendor/github.com/pkg/errors/README.md | 52 + vendor/github.com/pkg/errors/appveyor.yml | 32 + vendor/github.com/pkg/errors/bench_test.go | 59 + vendor/github.com/pkg/errors/errors.go | 269 + vendor/github.com/pkg/errors/errors_test.go | 226 + vendor/github.com/pkg/errors/example_test.go | 205 + vendor/github.com/pkg/errors/format_test.go | 535 + vendor/github.com/pkg/errors/stack.go | 178 + vendor/github.com/pkg/errors/stack_test.go | 292 + .../github.com/pmezard/go-difflib/.travis.yml | 5 + vendor/github.com/pmezard/go-difflib/LICENSE | 27 + .../github.com/pmezard/go-difflib/README.md | 50 + .../pmezard/go-difflib/difflib/difflib.go | 772 + .../go-difflib/difflib/difflib_test.go | 426 + vendor/github.com/sirupsen/logrus/.gitignore | 1 + vendor/github.com/sirupsen/logrus/.travis.yml | 15 + .../github.com/sirupsen/logrus/CHANGELOG.md | 123 + vendor/github.com/sirupsen/logrus/LICENSE | 21 + vendor/github.com/sirupsen/logrus/README.md | 511 + vendor/github.com/sirupsen/logrus/alt_exit.go | 64 + .../sirupsen/logrus/alt_exit_test.go | 83 + .../github.com/sirupsen/logrus/appveyor.yml | 14 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 288 + .../github.com/sirupsen/logrus/entry_test.go | 115 + .../sirupsen/logrus/example_basic_test.go | 69 + .../sirupsen/logrus/example_hook_test.go | 35 + vendor/github.com/sirupsen/logrus/exported.go | 193 + .../github.com/sirupsen/logrus/formatter.go | 45 + .../sirupsen/logrus/formatter_bench_test.go | 101 + .../github.com/sirupsen/logrus/hook_test.go | 144 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/hooks/syslog/README.md | 39 + .../sirupsen/logrus/hooks/syslog/syslog.go | 55 + .../logrus/hooks/syslog/syslog_test.go | 27 + .../sirupsen/logrus/hooks/test/test.go | 95 + .../sirupsen/logrus/hooks/test/test_test.go | 61 + .../sirupsen/logrus/json_formatter.go | 79 + .../sirupsen/logrus/json_formatter_test.go | 199 + vendor/github.com/sirupsen/logrus/logger.go | 323 + .../sirupsen/logrus/logger_bench_test.go | 61 + vendor/github.com/sirupsen/logrus/logrus.go | 143 + .../github.com/sirupsen/logrus/logrus_test.go | 386 + .../sirupsen/logrus/terminal_bsd.go | 10 + .../logrus/terminal_check_appengine.go | 11 + .../logrus/terminal_check_notappengine.go | 19 + .../sirupsen/logrus/terminal_linux.go | 14 + .../sirupsen/logrus/text_formatter.go | 178 + .../sirupsen/logrus/text_formatter_test.go | 141 + vendor/github.com/sirupsen/logrus/writer.go | 62 + vendor/github.com/stretchr/testify/.gitignore | 24 + .../stretchr/testify/.travis.gofmt.sh | 7 + .../stretchr/testify/.travis.gogenerate.sh | 13 + .../stretchr/testify/.travis.govet.sh | 10 + .../github.com/stretchr/testify/.travis.yml | 15 + vendor/github.com/stretchr/testify/Gopkg.lock | 27 + vendor/github.com/stretchr/testify/Gopkg.toml | 16 + vendor/github.com/stretchr/testify/LICENSE | 22 + vendor/github.com/stretchr/testify/README.md | 301 + .../stretchr/testify/_codegen/main.go | 316 + .../testify/assert/assertion_format.go | 349 + .../testify/assert/assertion_format.go.tmpl | 4 + .../testify/assert/assertion_forward.go | 686 + .../testify/assert/assertion_forward.go.tmpl | 4 + .../stretchr/testify/assert/assertions.go | 1256 ++ .../testify/assert/assertions_test.go | 1581 ++ .../github.com/stretchr/testify/assert/doc.go | 45 + .../stretchr/testify/assert/errors.go | 10 + .../testify/assert/forward_assertions.go | 16 + .../testify/assert/forward_assertions_test.go | 611 + .../testify/assert/http_assertions.go | 127 + .../testify/assert/http_assertions_test.go | 117 + vendor/github.com/stretchr/testify/doc.go | 22 + .../github.com/stretchr/testify/http/doc.go | 2 + .../testify/http/test_response_writer.go | 49 + .../testify/http/test_round_tripper.go | 17 + .../github.com/stretchr/testify/mock/doc.go | 44 + .../github.com/stretchr/testify/mock/mock.go | 815 + .../stretchr/testify/mock/mock_test.go | 1352 ++ .../stretchr/testify/package_test.go | 12 + .../stretchr/testify/require/doc.go | 28 + .../testify/require/forward_requirements.go | 16 + .../require/forward_requirements_test.go | 385 + .../stretchr/testify/require/require.go | 867 + .../stretchr/testify/require/require.go.tmpl | 6 + .../testify/require/require_forward.go | 687 + .../testify/require/require_forward.go.tmpl | 4 + .../stretchr/testify/require/requirements.go | 9 + .../testify/require/requirements_test.go | 369 + .../github.com/stretchr/testify/suite/doc.go | 65 + .../stretchr/testify/suite/interfaces.go | 46 + .../stretchr/testify/suite/suite.go | 136 + .../stretchr/testify/suite/suite_test.go | 294 + vendor/golang.org/x/crypto/.gitattributes | 10 + vendor/golang.org/x/crypto/.gitignore | 2 + vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTING.md | 26 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/README.md | 21 + vendor/golang.org/x/crypto/acme/acme.go | 1065 + vendor/golang.org/x/crypto/acme/acme_test.go | 1380 ++ .../x/crypto/acme/autocert/autocert.go | 962 + .../x/crypto/acme/autocert/autocert_test.go | 757 + .../x/crypto/acme/autocert/cache.go | 130 + .../x/crypto/acme/autocert/cache_test.go | 58 + .../x/crypto/acme/autocert/example_test.go | 36 + .../x/crypto/acme/autocert/listener.go | 160 + .../x/crypto/acme/autocert/renewal.go | 126 + .../x/crypto/acme/autocert/renewal_test.go | 191 + vendor/golang.org/x/crypto/acme/jws.go | 153 + vendor/golang.org/x/crypto/acme/jws_test.go | 319 + vendor/golang.org/x/crypto/acme/types.go | 329 + vendor/golang.org/x/crypto/acme/types_test.go | 63 + vendor/golang.org/x/crypto/argon2/argon2.go | 285 + .../golang.org/x/crypto/argon2/argon2_test.go | 233 + vendor/golang.org/x/crypto/argon2/blake2b.go | 53 + .../x/crypto/argon2/blamka_amd64.go | 61 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 252 + .../x/crypto/argon2/blamka_generic.go | 163 + .../golang.org/x/crypto/argon2/blamka_ref.go | 15 + vendor/golang.org/x/crypto/bcrypt/base64.go | 35 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 + .../golang.org/x/crypto/bcrypt/bcrypt_test.go | 243 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 221 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 43 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 762 + .../x/crypto/blake2b/blake2b_amd64.go | 25 + .../x/crypto/blake2b/blake2b_amd64.s | 290 + .../x/crypto/blake2b/blake2b_generic.go | 179 + .../x/crypto/blake2b/blake2b_ref.go | 11 + .../x/crypto/blake2b/blake2b_test.go | 798 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + .../golang.org/x/crypto/blake2b/register.go | 32 + vendor/golang.org/x/crypto/blake2s/blake2s.go | 187 + .../x/crypto/blake2s/blake2s_386.go | 35 + .../golang.org/x/crypto/blake2s/blake2s_386.s | 460 + .../x/crypto/blake2s/blake2s_amd64.go | 40 + .../x/crypto/blake2s/blake2s_amd64.s | 463 + .../x/crypto/blake2s/blake2s_generic.go | 174 + .../x/crypto/blake2s/blake2s_ref.go | 17 + .../x/crypto/blake2s/blake2s_test.go | 1002 + vendor/golang.org/x/crypto/blake2s/blake2x.go | 178 + .../golang.org/x/crypto/blake2s/register.go | 21 + vendor/golang.org/x/crypto/blowfish/block.go | 159 + .../x/crypto/blowfish/blowfish_test.go | 274 + vendor/golang.org/x/crypto/blowfish/cipher.go | 91 + vendor/golang.org/x/crypto/blowfish/const.go | 199 + vendor/golang.org/x/crypto/bn256/bn256.go | 408 + .../golang.org/x/crypto/bn256/bn256_test.go | 304 + vendor/golang.org/x/crypto/bn256/constants.go | 44 + vendor/golang.org/x/crypto/bn256/curve.go | 278 + .../golang.org/x/crypto/bn256/example_test.go | 43 + vendor/golang.org/x/crypto/bn256/gfp12.go | 200 + vendor/golang.org/x/crypto/bn256/gfp2.go | 219 + vendor/golang.org/x/crypto/bn256/gfp6.go | 296 + vendor/golang.org/x/crypto/bn256/optate.go | 395 + vendor/golang.org/x/crypto/bn256/twist.go | 249 + vendor/golang.org/x/crypto/cast5/cast5.go | 526 + .../golang.org/x/crypto/cast5/cast5_test.go | 106 + .../chacha20poly1305/chacha20poly1305.go | 83 + .../chacha20poly1305_amd64.go | 127 + .../chacha20poly1305/chacha20poly1305_amd64.s | 2714 +++ .../chacha20poly1305_generic.go | 70 + .../chacha20poly1305_noasm.go | 15 + .../chacha20poly1305/chacha20poly1305_test.go | 182 + .../chacha20poly1305_vectors_test.go | 332 + vendor/golang.org/x/crypto/codereview.cfg | 1 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 732 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../x/crypto/cryptobyte/asn1_test.go | 300 + .../golang.org/x/crypto/cryptobyte/builder.go | 309 + .../x/crypto/cryptobyte/cryptobyte_test.go | 428 + .../x/crypto/cryptobyte/example_test.go | 154 + .../golang.org/x/crypto/cryptobyte/string.go | 167 + .../x/crypto/curve25519/const_amd64.h | 8 + .../x/crypto/curve25519/const_amd64.s | 20 + .../x/crypto/curve25519/cswap_amd64.s | 65 + .../x/crypto/curve25519/curve25519.go | 834 + .../x/crypto/curve25519/curve25519_test.go | 39 + vendor/golang.org/x/crypto/curve25519/doc.go | 23 + .../x/crypto/curve25519/freeze_amd64.s | 73 + .../x/crypto/curve25519/ladderstep_amd64.s | 1377 ++ .../x/crypto/curve25519/mont25519_amd64.go | 240 + .../x/crypto/curve25519/mul_amd64.s | 169 + .../x/crypto/curve25519/square_amd64.s | 132 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 188 + .../x/crypto/ed25519/ed25519_test.go | 207 + .../ed25519/internal/edwards25519/const.go | 1422 ++ .../internal/edwards25519/edwards25519.go | 1793 ++ .../x/crypto/ed25519/testdata/sign.input.gz | Bin 0 -> 50330 bytes .../golang.org/x/crypto/hkdf/example_test.go | 61 + vendor/golang.org/x/crypto/hkdf/hkdf.go | 75 + vendor/golang.org/x/crypto/hkdf/hkdf_test.go | 370 + .../internal/chacha20/chacha_generic.go | 198 + .../x/crypto/internal/chacha20/chacha_test.go | 33 + .../golang.org/x/crypto/md4/example_test.go | 20 + vendor/golang.org/x/crypto/md4/md4.go | 118 + vendor/golang.org/x/crypto/md4/md4_test.go | 71 + vendor/golang.org/x/crypto/md4/md4block.go | 89 + vendor/golang.org/x/crypto/nacl/auth/auth.go | 58 + .../x/crypto/nacl/auth/auth_test.go | 172 + .../x/crypto/nacl/auth/example_test.go | 36 + vendor/golang.org/x/crypto/nacl/box/box.go | 103 + .../golang.org/x/crypto/nacl/box/box_test.go | 78 + .../x/crypto/nacl/box/example_test.go | 95 + .../x/crypto/nacl/secretbox/example_test.go | 53 + .../x/crypto/nacl/secretbox/secretbox.go | 166 + .../x/crypto/nacl/secretbox/secretbox_test.go | 154 + vendor/golang.org/x/crypto/ocsp/ocsp.go | 778 + vendor/golang.org/x/crypto/ocsp/ocsp_test.go | 875 + .../x/crypto/openpgp/armor/armor.go | 219 + .../x/crypto/openpgp/armor/armor_test.go | 95 + .../x/crypto/openpgp/armor/encode.go | 160 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/canonical_text_test.go | 52 + .../x/crypto/openpgp/clearsign/clearsign.go | 376 + .../openpgp/clearsign/clearsign_test.go | 210 + .../x/crypto/openpgp/elgamal/elgamal.go | 122 + .../x/crypto/openpgp/elgamal/elgamal_test.go | 49 + .../x/crypto/openpgp/errors/errors.go | 72 + vendor/golang.org/x/crypto/openpgp/keys.go | 641 + .../golang.org/x/crypto/openpgp/keys_test.go | 469 + .../x/crypto/openpgp/packet/compressed.go | 123 + .../crypto/openpgp/packet/compressed_test.go | 41 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 206 + .../openpgp/packet/encrypted_key_test.go | 151 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../x/crypto/openpgp/packet/ocfb_test.go | 46 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 162 + .../x/crypto/openpgp/packet/opaque_test.go | 67 + .../x/crypto/openpgp/packet/packet.go | 549 + .../x/crypto/openpgp/packet/packet_test.go | 255 + .../x/crypto/openpgp/packet/private_key.go | 380 + .../crypto/openpgp/packet/private_key_test.go | 270 + .../x/crypto/openpgp/packet/public_key.go | 753 + .../crypto/openpgp/packet/public_key_test.go | 228 + .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../openpgp/packet/public_key_v3_test.go | 82 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 + .../x/crypto/openpgp/packet/signature_test.go | 78 + .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/signature_v3_test.go | 92 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../packet/symmetric_key_encrypted_test.go | 117 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../packet/symmetrically_encrypted_test.go | 123 + .../x/crypto/openpgp/packet/userattribute.go | 91 + .../openpgp/packet/userattribute_test.go | 109 + .../x/crypto/openpgp/packet/userid.go | 160 + .../x/crypto/openpgp/packet/userid_test.go | 87 + vendor/golang.org/x/crypto/openpgp/read.go | 442 + .../golang.org/x/crypto/openpgp/read_test.go | 613 + vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 + .../x/crypto/openpgp/s2k/s2k_test.go | 137 + vendor/golang.org/x/crypto/openpgp/write.go | 378 + .../golang.org/x/crypto/openpgp/write_test.go | 273 + .../x/crypto/otr/libotr_test_helper.c | 197 + vendor/golang.org/x/crypto/otr/otr.go | 1415 ++ vendor/golang.org/x/crypto/otr/otr_test.go | 470 + vendor/golang.org/x/crypto/otr/smp.go | 572 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + .../golang.org/x/crypto/pbkdf2/pbkdf2_test.go | 176 + .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 + .../x/crypto/pkcs12/bmp-string_test.go | 63 + vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 + .../golang.org/x/crypto/pkcs12/crypto_test.go | 125 + vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../crypto/pkcs12/internal/rc2/bench_test.go | 27 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 + .../x/crypto/pkcs12/internal/rc2/rc2_test.go | 92 + vendor/golang.org/x/crypto/pkcs12/mac.go | 45 + vendor/golang.org/x/crypto/pkcs12/mac_test.go | 42 + vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 + .../golang.org/x/crypto/pkcs12/pbkdf_test.go | 34 + vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 346 + .../golang.org/x/crypto/pkcs12/pkcs12_test.go | 138 + vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 + .../golang.org/x/crypto/poly1305/poly1305.go | 33 + .../x/crypto/poly1305/poly1305_test.go | 159 + .../golang.org/x/crypto/poly1305/sum_amd64.go | 22 + .../golang.org/x/crypto/poly1305/sum_amd64.s | 125 + .../golang.org/x/crypto/poly1305/sum_arm.go | 22 + vendor/golang.org/x/crypto/poly1305/sum_arm.s | 427 + .../golang.org/x/crypto/poly1305/sum_ref.go | 141 + .../x/crypto/ripemd160/ripemd160.go | 120 + .../x/crypto/ripemd160/ripemd160_test.go | 72 + .../x/crypto/ripemd160/ripemd160block.go | 165 + .../x/crypto/salsa20/salsa/hsalsa20.go | 144 + .../x/crypto/salsa20/salsa/salsa2020_amd64.s | 889 + .../x/crypto/salsa20/salsa/salsa208.go | 199 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 24 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 234 + .../x/crypto/salsa20/salsa/salsa_test.go | 54 + vendor/golang.org/x/crypto/salsa20/salsa20.go | 54 + .../x/crypto/salsa20/salsa20_test.go | 139 + .../x/crypto/scrypt/example_test.go | 26 + vendor/golang.org/x/crypto/scrypt/scrypt.go | 244 + .../golang.org/x/crypto/scrypt/scrypt_test.go | 162 + vendor/golang.org/x/crypto/sha3/doc.go | 66 + vendor/golang.org/x/crypto/sha3/hashes.go | 65 + vendor/golang.org/x/crypto/sha3/keccakf.go | 412 + .../golang.org/x/crypto/sha3/keccakf_amd64.go | 13 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 390 + vendor/golang.org/x/crypto/sha3/register.go | 18 + vendor/golang.org/x/crypto/sha3/sha3.go | 192 + vendor/golang.org/x/crypto/sha3/sha3_test.go | 311 + vendor/golang.org/x/crypto/sha3/shake.go | 60 + .../sha3/testdata/keccakKats.json.deflate | Bin 0 -> 521342 bytes vendor/golang.org/x/crypto/sha3/xor.go | 16 + .../golang.org/x/crypto/sha3/xor_generic.go | 28 + .../golang.org/x/crypto/sha3/xor_unaligned.go | 58 + .../golang.org/x/crypto/ssh/agent/client.go | 683 + .../x/crypto/ssh/agent/client_test.go | 379 + .../x/crypto/ssh/agent/example_test.go | 41 + .../golang.org/x/crypto/ssh/agent/forward.go | 103 + .../golang.org/x/crypto/ssh/agent/keyring.go | 215 + .../x/crypto/ssh/agent/keyring_test.go | 76 + .../golang.org/x/crypto/ssh/agent/server.go | 523 + .../x/crypto/ssh/agent/server_test.go | 259 + .../x/crypto/ssh/agent/testdata_test.go | 64 + .../golang.org/x/crypto/ssh/benchmark_test.go | 123 + vendor/golang.org/x/crypto/ssh/buffer.go | 97 + vendor/golang.org/x/crypto/ssh/buffer_test.go | 87 + vendor/golang.org/x/crypto/ssh/certs.go | 521 + vendor/golang.org/x/crypto/ssh/certs_test.go | 335 + vendor/golang.org/x/crypto/ssh/channel.go | 633 + vendor/golang.org/x/crypto/ssh/cipher.go | 771 + vendor/golang.org/x/crypto/ssh/cipher_test.go | 131 + vendor/golang.org/x/crypto/ssh/client.go | 278 + vendor/golang.org/x/crypto/ssh/client_auth.go | 525 + .../x/crypto/ssh/client_auth_test.go | 628 + vendor/golang.org/x/crypto/ssh/client_test.go | 166 + vendor/golang.org/x/crypto/ssh/common.go | 383 + vendor/golang.org/x/crypto/ssh/connection.go | 143 + vendor/golang.org/x/crypto/ssh/doc.go | 21 + .../golang.org/x/crypto/ssh/example_test.go | 320 + vendor/golang.org/x/crypto/ssh/handshake.go | 646 + .../golang.org/x/crypto/ssh/handshake_test.go | 559 + vendor/golang.org/x/crypto/ssh/kex.go | 540 + vendor/golang.org/x/crypto/ssh/kex_test.go | 50 + vendor/golang.org/x/crypto/ssh/keys.go | 1032 + vendor/golang.org/x/crypto/ssh/keys_test.go | 500 + .../x/crypto/ssh/knownhosts/knownhosts.go | 546 + .../crypto/ssh/knownhosts/knownhosts_test.go | 329 + vendor/golang.org/x/crypto/ssh/mac.go | 61 + .../golang.org/x/crypto/ssh/mempipe_test.go | 110 + vendor/golang.org/x/crypto/ssh/messages.go | 766 + .../golang.org/x/crypto/ssh/messages_test.go | 288 + vendor/golang.org/x/crypto/ssh/mux.go | 330 + vendor/golang.org/x/crypto/ssh/mux_test.go | 505 + vendor/golang.org/x/crypto/ssh/server.go | 593 + vendor/golang.org/x/crypto/ssh/session.go | 647 + .../golang.org/x/crypto/ssh/session_test.go | 774 + vendor/golang.org/x/crypto/ssh/streamlocal.go | 115 + vendor/golang.org/x/crypto/ssh/tcpip.go | 465 + vendor/golang.org/x/crypto/ssh/tcpip_test.go | 20 + .../x/crypto/ssh/terminal/terminal.go | 951 + .../x/crypto/ssh/terminal/terminal_test.go | 350 + .../golang.org/x/crypto/ssh/terminal/util.go | 114 + .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 10 + .../x/crypto/ssh/terminal/util_plan9.go | 58 + .../x/crypto/ssh/terminal/util_solaris.go | 124 + .../x/crypto/ssh/terminal/util_windows.go | 103 + .../x/crypto/ssh/test/agent_unix_test.go | 59 + .../x/crypto/ssh/test/banner_test.go | 32 + .../golang.org/x/crypto/ssh/test/cert_test.go | 77 + .../x/crypto/ssh/test/dial_unix_test.go | 128 + vendor/golang.org/x/crypto/ssh/test/doc.go | 7 + .../x/crypto/ssh/test/forward_unix_test.go | 194 + .../x/crypto/ssh/test/multi_auth_test.go | 144 + .../x/crypto/ssh/test/session_test.go | 443 + .../x/crypto/ssh/test/sshd_test_pw.c | 173 + .../x/crypto/ssh/test/test_unix_test.go | 361 + .../x/crypto/ssh/test/testdata_test.go | 64 + .../golang.org/x/crypto/ssh/testdata/doc.go | 8 + .../golang.org/x/crypto/ssh/testdata/keys.go | 198 + .../golang.org/x/crypto/ssh/testdata_test.go | 63 + vendor/golang.org/x/crypto/ssh/transport.go | 353 + .../golang.org/x/crypto/ssh/transport_test.go | 113 + vendor/golang.org/x/crypto/tea/cipher.go | 108 + vendor/golang.org/x/crypto/tea/tea_test.go | 93 + vendor/golang.org/x/crypto/twofish/twofish.go | 342 + .../x/crypto/twofish/twofish_test.go | 129 + vendor/golang.org/x/crypto/xtea/block.go | 66 + vendor/golang.org/x/crypto/xtea/cipher.go | 82 + vendor/golang.org/x/crypto/xtea/xtea_test.go | 229 + vendor/golang.org/x/crypto/xts/xts.go | 137 + vendor/golang.org/x/crypto/xts/xts_test.go | 105 + vendor/golang.org/x/net/.gitattributes | 10 + vendor/golang.org/x/net/.gitignore | 2 + vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTING.md | 26 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/README.md | 16 + vendor/golang.org/x/net/bpf/asm.go | 41 + vendor/golang.org/x/net/bpf/constants.go | 218 + vendor/golang.org/x/net/bpf/doc.go | 82 + vendor/golang.org/x/net/bpf/instructions.go | 704 + .../golang.org/x/net/bpf/instructions_test.go | 525 + vendor/golang.org/x/net/bpf/setter.go | 10 + .../x/net/bpf/testdata/all_instructions.bpf | 1 + .../x/net/bpf/testdata/all_instructions.txt | 79 + vendor/golang.org/x/net/bpf/vm.go | 140 + vendor/golang.org/x/net/bpf/vm_aluop_test.go | 512 + vendor/golang.org/x/net/bpf/vm_bpf_test.go | 192 + .../golang.org/x/net/bpf/vm_extension_test.go | 49 + .../golang.org/x/net/bpf/vm_instructions.go | 174 + vendor/golang.org/x/net/bpf/vm_jump_test.go | 380 + vendor/golang.org/x/net/bpf/vm_load_test.go | 246 + vendor/golang.org/x/net/bpf/vm_ret_test.go | 115 + .../golang.org/x/net/bpf/vm_scratch_test.go | 247 + vendor/golang.org/x/net/bpf/vm_test.go | 144 + vendor/golang.org/x/net/codereview.cfg | 1 + vendor/golang.org/x/net/context/context.go | 56 + .../golang.org/x/net/context/context_test.go | 583 + .../x/net/context/ctxhttp/ctxhttp.go | 74 + .../x/net/context/ctxhttp/ctxhttp_17_test.go | 29 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 + .../net/context/ctxhttp/ctxhttp_pre17_test.go | 79 + .../x/net/context/ctxhttp/ctxhttp_test.go | 105 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/context/pre_go19.go | 109 + .../x/net/context/withtimeout_test.go | 31 + vendor/golang.org/x/net/dict/dict.go | 210 + .../x/net/dns/dnsmessage/example_test.go | 132 + .../x/net/dns/dnsmessage/message.go | 2103 ++ .../x/net/dns/dnsmessage/message_test.go | 1137 ++ vendor/golang.org/x/net/html/atom/atom.go | 78 + .../golang.org/x/net/html/atom/atom_test.go | 109 + vendor/golang.org/x/net/html/atom/gen.go | 710 + vendor/golang.org/x/net/html/atom/table.go | 779 + .../golang.org/x/net/html/atom/table_test.go | 374 + .../golang.org/x/net/html/charset/charset.go | 257 + .../x/net/html/charset/charset_test.go | 237 + .../html/charset/testdata/HTTP-charset.html | 48 + .../charset/testdata/HTTP-vs-UTF-8-BOM.html | 48 + .../testdata/HTTP-vs-meta-charset.html | 49 + .../testdata/HTTP-vs-meta-content.html | 49 + .../testdata/No-encoding-declaration.html | 47 + .../x/net/html/charset/testdata/README | 9 + .../html/charset/testdata/UTF-16BE-BOM.html | Bin 0 -> 2670 bytes .../html/charset/testdata/UTF-16LE-BOM.html | Bin 0 -> 2682 bytes .../testdata/UTF-8-BOM-vs-meta-charset.html | 49 + .../testdata/UTF-8-BOM-vs-meta-content.html | 48 + .../testdata/meta-charset-attribute.html | 48 + .../testdata/meta-content-attribute.html | 48 + vendor/golang.org/x/net/html/const.go | 104 + vendor/golang.org/x/net/html/doc.go | 106 + vendor/golang.org/x/net/html/doctype.go | 156 + vendor/golang.org/x/net/html/entity.go | 2253 ++ vendor/golang.org/x/net/html/entity_test.go | 29 + vendor/golang.org/x/net/html/escape.go | 258 + vendor/golang.org/x/net/html/escape_test.go | 97 + vendor/golang.org/x/net/html/example_test.go | 40 + vendor/golang.org/x/net/html/foreign.go | 226 + vendor/golang.org/x/net/html/node.go | 194 + vendor/golang.org/x/net/html/node_test.go | 146 + vendor/golang.org/x/net/html/parse.go | 2094 ++ vendor/golang.org/x/net/html/parse_test.go | 388 + vendor/golang.org/x/net/html/render.go | 271 + vendor/golang.org/x/net/html/render_test.go | 156 + .../golang.org/x/net/html/testdata/go1.html | 2237 ++ .../x/net/html/testdata/webkit/README | 28 + .../x/net/html/testdata/webkit/adoption01.dat | 194 + .../x/net/html/testdata/webkit/adoption02.dat | 31 + .../x/net/html/testdata/webkit/comments01.dat | 135 + .../x/net/html/testdata/webkit/doctype01.dat | 370 + .../x/net/html/testdata/webkit/entities01.dat | 603 + .../x/net/html/testdata/webkit/entities02.dat | 249 + .../html/testdata/webkit/html5test-com.dat | 246 + .../x/net/html/testdata/webkit/inbody01.dat | 43 + .../x/net/html/testdata/webkit/isindex.dat | 40 + ...pending-spec-changes-plain-text-unsafe.dat | Bin 0 -> 115 bytes .../testdata/webkit/pending-spec-changes.dat | 52 + .../testdata/webkit/plain-text-unsafe.dat | Bin 0 -> 4166 bytes .../net/html/testdata/webkit/scriptdata01.dat | 308 + .../testdata/webkit/scripted/adoption01.dat | 15 + .../testdata/webkit/scripted/webkit01.dat | 28 + .../x/net/html/testdata/webkit/tables01.dat | 212 + .../x/net/html/testdata/webkit/tests1.dat | 1952 ++ .../x/net/html/testdata/webkit/tests10.dat | 799 + .../x/net/html/testdata/webkit/tests11.dat | 482 + .../x/net/html/testdata/webkit/tests12.dat | 62 + .../x/net/html/testdata/webkit/tests14.dat | 74 + .../x/net/html/testdata/webkit/tests15.dat | 208 + .../x/net/html/testdata/webkit/tests16.dat | 2299 +++ .../x/net/html/testdata/webkit/tests17.dat | 153 + .../x/net/html/testdata/webkit/tests18.dat | 269 + .../x/net/html/testdata/webkit/tests19.dat | 1237 ++ .../x/net/html/testdata/webkit/tests2.dat | 763 + .../x/net/html/testdata/webkit/tests20.dat | 455 + .../x/net/html/testdata/webkit/tests21.dat | 221 + .../x/net/html/testdata/webkit/tests22.dat | 157 + .../x/net/html/testdata/webkit/tests23.dat | 155 + .../x/net/html/testdata/webkit/tests24.dat | 79 + .../x/net/html/testdata/webkit/tests25.dat | 219 + .../x/net/html/testdata/webkit/tests26.dat | 313 + .../x/net/html/testdata/webkit/tests3.dat | 305 + .../x/net/html/testdata/webkit/tests4.dat | 59 + .../x/net/html/testdata/webkit/tests5.dat | 191 + .../x/net/html/testdata/webkit/tests6.dat | 663 + .../x/net/html/testdata/webkit/tests7.dat | 390 + .../x/net/html/testdata/webkit/tests8.dat | 148 + .../x/net/html/testdata/webkit/tests9.dat | 457 + .../testdata/webkit/tests_innerHTML_1.dat | 741 + .../x/net/html/testdata/webkit/tricky01.dat | 261 + .../x/net/html/testdata/webkit/webkit01.dat | 610 + .../x/net/html/testdata/webkit/webkit02.dat | 159 + vendor/golang.org/x/net/html/token.go | 1219 ++ vendor/golang.org/x/net/html/token_test.go | 748 + .../x/net/http/httpproxy/export_test.go | 7 + .../x/net/http/httpproxy/go19_test.go | 13 + .../golang.org/x/net/http/httpproxy/proxy.go | 239 + .../x/net/http/httpproxy/proxy_test.go | 301 + vendor/golang.org/x/net/http2/.gitignore | 2 + vendor/golang.org/x/net/http2/Dockerfile | 51 + vendor/golang.org/x/net/http2/Makefile | 3 + vendor/golang.org/x/net/http2/README | 20 + vendor/golang.org/x/net/http2/ciphers.go | 641 + vendor/golang.org/x/net/http2/ciphers_test.go | 309 + .../x/net/http2/client_conn_pool.go | 256 + .../x/net/http2/configure_transport.go | 80 + vendor/golang.org/x/net/http2/databuffer.go | 146 + .../golang.org/x/net/http2/databuffer_test.go | 157 + vendor/golang.org/x/net/http2/errors.go | 133 + vendor/golang.org/x/net/http2/errors_test.go | 24 + vendor/golang.org/x/net/http2/flow.go | 50 + vendor/golang.org/x/net/http2/flow_test.go | 53 + vendor/golang.org/x/net/http2/frame.go | 1579 ++ vendor/golang.org/x/net/http2/frame_test.go | 1191 ++ vendor/golang.org/x/net/http2/go16.go | 16 + vendor/golang.org/x/net/http2/go17.go | 106 + vendor/golang.org/x/net/http2/go17_not18.go | 36 + vendor/golang.org/x/net/http2/go18.go | 56 + vendor/golang.org/x/net/http2/go18_test.go | 79 + vendor/golang.org/x/net/http2/go19.go | 16 + vendor/golang.org/x/net/http2/go19_test.go | 59 + vendor/golang.org/x/net/http2/gotrack.go | 170 + vendor/golang.org/x/net/http2/gotrack_test.go | 33 + .../golang.org/x/net/http2/h2demo/.gitignore | 6 + .../golang.org/x/net/http2/h2demo/Dockerfile | 11 + .../x/net/http2/h2demo/Dockerfile.0 | 134 + vendor/golang.org/x/net/http2/h2demo/Makefile | 55 + vendor/golang.org/x/net/http2/h2demo/README | 16 + .../x/net/http2/h2demo/deployment-prod.yaml | 28 + .../golang.org/x/net/http2/h2demo/h2demo.go | 543 + .../golang.org/x/net/http2/h2demo/launch.go | 302 + .../golang.org/x/net/http2/h2demo/rootCA.key | 27 + .../golang.org/x/net/http2/h2demo/rootCA.pem | 26 + .../golang.org/x/net/http2/h2demo/rootCA.srl | 1 + .../golang.org/x/net/http2/h2demo/server.crt | 20 + .../golang.org/x/net/http2/h2demo/server.key | 27 + .../x/net/http2/h2demo/service.yaml | 17 + vendor/golang.org/x/net/http2/h2demo/tmpl.go | 1991 ++ vendor/golang.org/x/net/http2/h2i/README.md | 97 + vendor/golang.org/x/net/http2/h2i/h2i.go | 522 + vendor/golang.org/x/net/http2/headermap.go | 78 + vendor/golang.org/x/net/http2/hpack/encode.go | 240 + .../x/net/http2/hpack/encode_test.go | 386 + vendor/golang.org/x/net/http2/hpack/hpack.go | 490 + .../x/net/http2/hpack/hpack_test.go | 722 + .../golang.org/x/net/http2/hpack/huffman.go | 212 + vendor/golang.org/x/net/http2/hpack/tables.go | 479 + .../x/net/http2/hpack/tables_test.go | 214 + vendor/golang.org/x/net/http2/http2.go | 391 + vendor/golang.org/x/net/http2/http2_test.go | 199 + vendor/golang.org/x/net/http2/not_go16.go | 21 + vendor/golang.org/x/net/http2/not_go17.go | 87 + vendor/golang.org/x/net/http2/not_go18.go | 29 + vendor/golang.org/x/net/http2/not_go19.go | 16 + vendor/golang.org/x/net/http2/pipe.go | 163 + vendor/golang.org/x/net/http2/pipe_test.go | 130 + vendor/golang.org/x/net/http2/server.go | 2888 +++ .../x/net/http2/server_push_test.go | 521 + vendor/golang.org/x/net/http2/server_test.go | 3725 ++++ .../testdata/draft-ietf-httpbis-http2.xml | 5021 +++++ vendor/golang.org/x/net/http2/transport.go | 2303 +++ .../golang.org/x/net/http2/transport_test.go | 3847 ++++ vendor/golang.org/x/net/http2/write.go | 365 + vendor/golang.org/x/net/http2/writesched.go | 242 + .../x/net/http2/writesched_priority.go | 452 + .../x/net/http2/writesched_priority_test.go | 541 + .../x/net/http2/writesched_random.go | 72 + .../x/net/http2/writesched_random_test.go | 44 + .../golang.org/x/net/http2/writesched_test.go | 125 + vendor/golang.org/x/net/http2/z_spec_test.go | 356 + vendor/golang.org/x/net/icmp/dstunreach.go | 41 + vendor/golang.org/x/net/icmp/echo.go | 45 + vendor/golang.org/x/net/icmp/endpoint.go | 113 + vendor/golang.org/x/net/icmp/example_test.go | 63 + vendor/golang.org/x/net/icmp/extension.go | 89 + .../golang.org/x/net/icmp/extension_test.go | 259 + vendor/golang.org/x/net/icmp/helper_posix.go | 75 + vendor/golang.org/x/net/icmp/interface.go | 236 + vendor/golang.org/x/net/icmp/ipv4.go | 61 + vendor/golang.org/x/net/icmp/ipv4_test.go | 83 + vendor/golang.org/x/net/icmp/ipv6.go | 23 + vendor/golang.org/x/net/icmp/listen_posix.go | 100 + vendor/golang.org/x/net/icmp/listen_stub.go | 33 + vendor/golang.org/x/net/icmp/message.go | 152 + vendor/golang.org/x/net/icmp/message_test.go | 134 + vendor/golang.org/x/net/icmp/messagebody.go | 41 + vendor/golang.org/x/net/icmp/mpls.go | 77 + vendor/golang.org/x/net/icmp/multipart.go | 109 + .../golang.org/x/net/icmp/multipart_test.go | 442 + vendor/golang.org/x/net/icmp/packettoobig.go | 43 + vendor/golang.org/x/net/icmp/paramprob.go | 63 + vendor/golang.org/x/net/icmp/ping_test.go | 200 + vendor/golang.org/x/net/icmp/sys_freebsd.go | 11 + vendor/golang.org/x/net/icmp/timeexceeded.go | 39 + vendor/golang.org/x/net/idna/example_test.go | 70 + vendor/golang.org/x/net/idna/idna.go | 732 + vendor/golang.org/x/net/idna/idna_test.go | 108 + vendor/golang.org/x/net/idna/punycode.go | 203 + vendor/golang.org/x/net/idna/punycode_test.go | 198 + vendor/golang.org/x/net/idna/tables.go | 4557 +++++ vendor/golang.org/x/net/idna/trie.go | 72 + vendor/golang.org/x/net/idna/trieval.go | 119 + .../golang.org/x/net/internal/iana/const.go | 180 + vendor/golang.org/x/net/internal/iana/gen.go | 293 + .../x/net/internal/nettest/helper_bsd.go | 53 + .../x/net/internal/nettest/helper_nobsd.go | 15 + .../x/net/internal/nettest/helper_posix.go | 31 + .../x/net/internal/nettest/helper_stub.go | 32 + .../x/net/internal/nettest/helper_unix.go | 29 + .../x/net/internal/nettest/helper_windows.go | 42 + .../x/net/internal/nettest/interface.go | 94 + .../x/net/internal/nettest/rlimit.go | 11 + .../x/net/internal/nettest/stack.go | 152 + .../x/net/internal/socket/cmsghdr.go | 11 + .../x/net/internal/socket/cmsghdr_bsd.go | 13 + .../internal/socket/cmsghdr_linux_32bit.go | 14 + .../internal/socket/cmsghdr_linux_64bit.go | 14 + .../internal/socket/cmsghdr_solaris_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_stub.go | 17 + .../x/net/internal/socket/defs_darwin.go | 44 + .../x/net/internal/socket/defs_dragonfly.go | 44 + .../x/net/internal/socket/defs_freebsd.go | 44 + .../x/net/internal/socket/defs_linux.go | 49 + .../x/net/internal/socket/defs_netbsd.go | 47 + .../x/net/internal/socket/defs_openbsd.go | 44 + .../x/net/internal/socket/defs_solaris.go | 44 + .../x/net/internal/socket/error_unix.go | 31 + .../x/net/internal/socket/error_windows.go | 26 + .../x/net/internal/socket/iovec_32bit.go | 19 + .../x/net/internal/socket/iovec_64bit.go | 19 + .../internal/socket/iovec_solaris_64bit.go | 19 + .../x/net/internal/socket/iovec_stub.go | 11 + .../x/net/internal/socket/mmsghdr_stub.go | 21 + .../x/net/internal/socket/mmsghdr_unix.go | 42 + .../x/net/internal/socket/msghdr_bsd.go | 39 + .../x/net/internal/socket/msghdr_bsdvar.go | 16 + .../x/net/internal/socket/msghdr_linux.go | 36 + .../net/internal/socket/msghdr_linux_32bit.go | 24 + .../net/internal/socket/msghdr_linux_64bit.go | 24 + .../x/net/internal/socket/msghdr_openbsd.go | 14 + .../internal/socket/msghdr_solaris_64bit.go | 36 + .../x/net/internal/socket/msghdr_stub.go | 14 + .../x/net/internal/socket/rawconn.go | 66 + .../x/net/internal/socket/rawconn_mmsg.go | 74 + .../x/net/internal/socket/rawconn_msg.go | 77 + .../x/net/internal/socket/rawconn_nommsg.go | 18 + .../x/net/internal/socket/rawconn_nomsg.go | 18 + .../x/net/internal/socket/rawconn_stub.go | 25 + .../x/net/internal/socket/reflect.go | 62 + .../x/net/internal/socket/socket.go | 285 + .../net/internal/socket/socket_go1_9_test.go | 259 + .../x/net/internal/socket/socket_test.go | 46 + .../golang.org/x/net/internal/socket/sys.go | 33 + .../x/net/internal/socket/sys_bsd.go | 17 + .../x/net/internal/socket/sys_bsdvar.go | 14 + .../x/net/internal/socket/sys_darwin.go | 7 + .../x/net/internal/socket/sys_dragonfly.go | 7 + .../x/net/internal/socket/sys_linux.go | 27 + .../x/net/internal/socket/sys_linux_386.go | 55 + .../x/net/internal/socket/sys_linux_386.s | 11 + .../x/net/internal/socket/sys_linux_amd64.go | 10 + .../x/net/internal/socket/sys_linux_arm.go | 10 + .../x/net/internal/socket/sys_linux_arm64.go | 10 + .../x/net/internal/socket/sys_linux_mips.go | 10 + .../x/net/internal/socket/sys_linux_mips64.go | 10 + .../net/internal/socket/sys_linux_mips64le.go | 10 + .../x/net/internal/socket/sys_linux_mipsle.go | 10 + .../x/net/internal/socket/sys_linux_ppc64.go | 10 + .../net/internal/socket/sys_linux_ppc64le.go | 10 + .../x/net/internal/socket/sys_linux_s390x.go | 55 + .../x/net/internal/socket/sys_linux_s390x.s | 11 + .../x/net/internal/socket/sys_netbsd.go | 25 + .../x/net/internal/socket/sys_posix.go | 168 + .../x/net/internal/socket/sys_solaris.go | 71 + .../x/net/internal/socket/sys_solaris_amd64.s | 11 + .../x/net/internal/socket/sys_stub.go | 64 + .../x/net/internal/socket/sys_unix.go | 33 + .../x/net/internal/socket/sys_windows.go | 70 + .../x/net/internal/socket/zsys_darwin_386.go | 59 + .../net/internal/socket/zsys_darwin_amd64.go | 61 + .../x/net/internal/socket/zsys_darwin_arm.go | 59 + .../net/internal/socket/zsys_darwin_arm64.go | 61 + .../internal/socket/zsys_dragonfly_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_386.go | 59 + .../net/internal/socket/zsys_freebsd_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_arm.go | 59 + .../x/net/internal/socket/zsys_linux_386.go | 63 + .../x/net/internal/socket/zsys_linux_amd64.go | 66 + .../x/net/internal/socket/zsys_linux_arm.go | 63 + .../x/net/internal/socket/zsys_linux_arm64.go | 66 + .../x/net/internal/socket/zsys_linux_mips.go | 63 + .../net/internal/socket/zsys_linux_mips64.go | 66 + .../internal/socket/zsys_linux_mips64le.go | 66 + .../net/internal/socket/zsys_linux_mipsle.go | 63 + .../x/net/internal/socket/zsys_linux_ppc64.go | 66 + .../net/internal/socket/zsys_linux_ppc64le.go | 66 + .../x/net/internal/socket/zsys_linux_s390x.go | 66 + .../x/net/internal/socket/zsys_netbsd_386.go | 65 + .../net/internal/socket/zsys_netbsd_amd64.go | 68 + .../x/net/internal/socket/zsys_netbsd_arm.go | 65 + .../x/net/internal/socket/zsys_openbsd_386.go | 59 + .../net/internal/socket/zsys_openbsd_amd64.go | 61 + .../x/net/internal/socket/zsys_openbsd_arm.go | 59 + .../net/internal/socket/zsys_solaris_amd64.go | 60 + .../x/net/internal/timeseries/timeseries.go | 525 + .../internal/timeseries/timeseries_test.go | 170 + vendor/golang.org/x/net/ipv4/batch.go | 191 + vendor/golang.org/x/net/ipv4/bpf_test.go | 93 + vendor/golang.org/x/net/ipv4/control.go | 144 + vendor/golang.org/x/net/ipv4/control_bsd.go | 40 + .../golang.org/x/net/ipv4/control_pktinfo.go | 39 + vendor/golang.org/x/net/ipv4/control_stub.go | 13 + vendor/golang.org/x/net/ipv4/control_test.go | 21 + vendor/golang.org/x/net/ipv4/control_unix.go | 73 + .../golang.org/x/net/ipv4/control_windows.go | 16 + vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 + .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 + vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 + vendor/golang.org/x/net/ipv4/defs_linux.go | 122 + vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 + vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 + vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 + vendor/golang.org/x/net/ipv4/dgramopt.go | 265 + vendor/golang.org/x/net/ipv4/doc.go | 244 + vendor/golang.org/x/net/ipv4/endpoint.go | 187 + vendor/golang.org/x/net/ipv4/example_test.go | 224 + vendor/golang.org/x/net/ipv4/gen.go | 199 + vendor/golang.org/x/net/ipv4/genericopt.go | 57 + vendor/golang.org/x/net/ipv4/header.go | 159 + vendor/golang.org/x/net/ipv4/header_test.go | 228 + vendor/golang.org/x/net/ipv4/helper.go | 63 + vendor/golang.org/x/net/ipv4/iana.go | 34 + vendor/golang.org/x/net/ipv4/icmp.go | 57 + vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 + vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 + vendor/golang.org/x/net/ipv4/icmp_test.go | 95 + .../golang.org/x/net/ipv4/multicast_test.go | 334 + .../x/net/ipv4/multicastlistener_test.go | 265 + .../x/net/ipv4/multicastsockopt_test.go | 195 + vendor/golang.org/x/net/ipv4/packet.go | 69 + vendor/golang.org/x/net/ipv4/packet_go1_8.go | 56 + vendor/golang.org/x/net/ipv4/packet_go1_9.go | 67 + vendor/golang.org/x/net/ipv4/payload.go | 23 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 36 + .../x/net/ipv4/payload_cmsg_go1_8.go | 59 + .../x/net/ipv4/payload_cmsg_go1_9.go | 67 + .../golang.org/x/net/ipv4/payload_nocmsg.go | 42 + .../x/net/ipv4/readwrite_go1_8_test.go | 248 + .../x/net/ipv4/readwrite_go1_9_test.go | 388 + .../golang.org/x/net/ipv4/readwrite_test.go | 140 + vendor/golang.org/x/net/ipv4/sockopt.go | 44 + vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 + vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 + vendor/golang.org/x/net/ipv4/sys_asmreq.go | 119 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 + vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_bpf.go | 23 + vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 + vendor/golang.org/x/net/ipv4/sys_darwin.go | 93 + vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 + vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 + vendor/golang.org/x/net/ipv4/sys_linux.go | 59 + vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 + vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_stub.go | 13 + vendor/golang.org/x/net/ipv4/sys_windows.go | 67 + vendor/golang.org/x/net/ipv4/unicast_test.go | 247 + .../x/net/ipv4/unicastsockopt_test.go | 148 + vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 + .../golang.org/x/net/ipv4/zsys_dragonfly.go | 31 + .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 + .../x/net/ipv4/zsys_freebsd_amd64.go | 95 + .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 + .../golang.org/x/net/ipv4/zsys_linux_386.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_arm.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_mips.go | 148 + .../x/net/ipv4/zsys_linux_mips64.go | 150 + .../x/net/ipv4/zsys_linux_mips64le.go | 150 + .../x/net/ipv4/zsys_linux_mipsle.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 150 + .../x/net/ipv4/zsys_linux_ppc64le.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 150 + vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 + vendor/golang.org/x/net/ipv6/batch.go | 119 + vendor/golang.org/x/net/ipv6/bpf_test.go | 96 + vendor/golang.org/x/net/ipv6/control.go | 187 + .../x/net/ipv6/control_rfc2292_unix.go | 48 + .../x/net/ipv6/control_rfc3542_unix.go | 94 + vendor/golang.org/x/net/ipv6/control_stub.go | 13 + vendor/golang.org/x/net/ipv6/control_test.go | 21 + vendor/golang.org/x/net/ipv6/control_unix.go | 55 + .../golang.org/x/net/ipv6/control_windows.go | 16 + vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 + .../golang.org/x/net/ipv6/defs_dragonfly.go | 84 + vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 + vendor/golang.org/x/net/ipv6/defs_linux.go | 147 + vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 + vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 + vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 + vendor/golang.org/x/net/ipv6/dgramopt.go | 302 + vendor/golang.org/x/net/ipv6/doc.go | 243 + vendor/golang.org/x/net/ipv6/endpoint.go | 128 + vendor/golang.org/x/net/ipv6/example_test.go | 216 + vendor/golang.org/x/net/ipv6/gen.go | 199 + vendor/golang.org/x/net/ipv6/genericopt.go | 58 + vendor/golang.org/x/net/ipv6/header.go | 55 + vendor/golang.org/x/net/ipv6/header_test.go | 55 + vendor/golang.org/x/net/ipv6/helper.go | 57 + vendor/golang.org/x/net/ipv6/iana.go | 82 + vendor/golang.org/x/net/ipv6/icmp.go | 60 + vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 + vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 + vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 + vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 + vendor/golang.org/x/net/ipv6/icmp_test.go | 96 + vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 + .../x/net/ipv6/mocktransponder_test.go | 32 + .../golang.org/x/net/ipv6/multicast_test.go | 264 + .../x/net/ipv6/multicastlistener_test.go | 261 + .../x/net/ipv6/multicastsockopt_test.go | 157 + vendor/golang.org/x/net/ipv6/payload.go | 23 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 35 + .../x/net/ipv6/payload_cmsg_go1_8.go | 55 + .../x/net/ipv6/payload_cmsg_go1_9.go | 57 + .../golang.org/x/net/ipv6/payload_nocmsg.go | 41 + .../x/net/ipv6/readwrite_go1_8_test.go | 242 + .../x/net/ipv6/readwrite_go1_9_test.go | 373 + .../golang.org/x/net/ipv6/readwrite_test.go | 148 + vendor/golang.org/x/net/ipv6/sockopt.go | 43 + vendor/golang.org/x/net/ipv6/sockopt_posix.go | 87 + vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 + vendor/golang.org/x/net/ipv6/sockopt_test.go | 133 + vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 + vendor/golang.org/x/net/ipv6/sys_bpf.go | 23 + vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 + vendor/golang.org/x/net/ipv6/sys_darwin.go | 106 + vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 + vendor/golang.org/x/net/ipv6/sys_linux.go | 74 + vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 + vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv6/sys_stub.go | 13 + vendor/golang.org/x/net/ipv6/sys_windows.go | 75 + vendor/golang.org/x/net/ipv6/unicast_test.go | 184 + .../x/net/ipv6/unicastsockopt_test.go | 120 + vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 + .../golang.org/x/net/ipv6/zsys_dragonfly.go | 88 + .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 + .../x/net/ipv6/zsys_freebsd_amd64.go | 124 + .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 + .../golang.org/x/net/ipv6/zsys_linux_386.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_arm.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_mips.go | 170 + .../x/net/ipv6/zsys_linux_mips64.go | 172 + .../x/net/ipv6/zsys_linux_mips64le.go | 172 + .../x/net/ipv6/zsys_linux_mipsle.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 172 + .../x/net/ipv6/zsys_linux_ppc64le.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 172 + vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 + vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 + vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 + .../golang.org/x/net/lex/httplex/httplex.go | 351 + .../x/net/lex/httplex/httplex_test.go | 119 + vendor/golang.org/x/net/lif/address.go | 105 + vendor/golang.org/x/net/lif/address_test.go | 123 + vendor/golang.org/x/net/lif/binary.go | 115 + vendor/golang.org/x/net/lif/defs_solaris.go | 90 + vendor/golang.org/x/net/lif/lif.go | 43 + vendor/golang.org/x/net/lif/link.go | 126 + vendor/golang.org/x/net/lif/link_test.go | 63 + vendor/golang.org/x/net/lif/sys.go | 21 + .../golang.org/x/net/lif/sys_solaris_amd64.s | 8 + vendor/golang.org/x/net/lif/syscall.go | 28 + .../x/net/lif/zsys_solaris_amd64.go | 103 + vendor/golang.org/x/net/nettest/conntest.go | 456 + .../golang.org/x/net/nettest/conntest_go16.go | 24 + .../golang.org/x/net/nettest/conntest_go17.go | 24 + .../golang.org/x/net/nettest/conntest_test.go | 76 + vendor/golang.org/x/net/netutil/listen.go | 48 + .../golang.org/x/net/netutil/listen_test.go | 101 + vendor/golang.org/x/net/proxy/direct.go | 18 + vendor/golang.org/x/net/proxy/per_host.go | 140 + .../golang.org/x/net/proxy/per_host_test.go | 55 + vendor/golang.org/x/net/proxy/proxy.go | 134 + vendor/golang.org/x/net/proxy/proxy_test.go | 215 + vendor/golang.org/x/net/proxy/socks5.go | 214 + vendor/golang.org/x/net/publicsuffix/gen.go | 713 + vendor/golang.org/x/net/publicsuffix/list.go | 135 + .../x/net/publicsuffix/list_test.go | 416 + vendor/golang.org/x/net/publicsuffix/table.go | 9534 +++++++++ .../x/net/publicsuffix/table_test.go | 16959 ++++++++++++++++ vendor/golang.org/x/net/route/address.go | 425 + .../x/net/route/address_darwin_test.go | 63 + vendor/golang.org/x/net/route/address_test.go | 103 + vendor/golang.org/x/net/route/binary.go | 90 + vendor/golang.org/x/net/route/defs_darwin.go | 114 + .../golang.org/x/net/route/defs_dragonfly.go | 113 + vendor/golang.org/x/net/route/defs_freebsd.go | 337 + vendor/golang.org/x/net/route/defs_netbsd.go | 112 + vendor/golang.org/x/net/route/defs_openbsd.go | 116 + vendor/golang.org/x/net/route/interface.go | 64 + .../x/net/route/interface_announce.go | 32 + .../x/net/route/interface_classic.go | 66 + .../x/net/route/interface_freebsd.go | 78 + .../x/net/route/interface_multicast.go | 30 + .../x/net/route/interface_openbsd.go | 90 + vendor/golang.org/x/net/route/message.go | 72 + .../x/net/route/message_darwin_test.go | 34 + .../x/net/route/message_freebsd_test.go | 92 + vendor/golang.org/x/net/route/message_test.go | 239 + vendor/golang.org/x/net/route/route.go | 123 + .../golang.org/x/net/route/route_classic.go | 75 + .../golang.org/x/net/route/route_openbsd.go | 65 + vendor/golang.org/x/net/route/route_test.go | 390 + vendor/golang.org/x/net/route/sys.go | 39 + vendor/golang.org/x/net/route/sys_darwin.go | 87 + .../golang.org/x/net/route/sys_dragonfly.go | 76 + vendor/golang.org/x/net/route/sys_freebsd.go | 155 + vendor/golang.org/x/net/route/sys_netbsd.go | 71 + vendor/golang.org/x/net/route/sys_openbsd.go | 80 + vendor/golang.org/x/net/route/syscall.go | 28 + vendor/golang.org/x/net/route/zsys_darwin.go | 99 + .../golang.org/x/net/route/zsys_dragonfly.go | 98 + .../x/net/route/zsys_freebsd_386.go | 126 + .../x/net/route/zsys_freebsd_amd64.go | 123 + .../x/net/route/zsys_freebsd_arm.go | 123 + vendor/golang.org/x/net/route/zsys_netbsd.go | 97 + vendor/golang.org/x/net/route/zsys_openbsd.go | 101 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + .../golang.org/x/net/trace/histogram_test.go | 325 + vendor/golang.org/x/net/trace/trace.go | 1103 + vendor/golang.org/x/net/trace/trace_go16.go | 21 + vendor/golang.org/x/net/trace/trace_go17.go | 21 + vendor/golang.org/x/net/trace/trace_test.go | 178 + vendor/golang.org/x/net/webdav/file.go | 796 + vendor/golang.org/x/net/webdav/file_go1.6.go | 17 + vendor/golang.org/x/net/webdav/file_go1.7.go | 16 + vendor/golang.org/x/net/webdav/file_test.go | 1184 ++ vendor/golang.org/x/net/webdav/if.go | 173 + vendor/golang.org/x/net/webdav/if_test.go | 322 + .../x/net/webdav/internal/xml/README | 11 + .../x/net/webdav/internal/xml/atom_test.go | 56 + .../x/net/webdav/internal/xml/example_test.go | 151 + .../x/net/webdav/internal/xml/marshal.go | 1223 ++ .../x/net/webdav/internal/xml/marshal_test.go | 1939 ++ .../x/net/webdav/internal/xml/read.go | 692 + .../x/net/webdav/internal/xml/read_test.go | 744 + .../x/net/webdav/internal/xml/typeinfo.go | 371 + .../x/net/webdav/internal/xml/xml.go | 1998 ++ .../x/net/webdav/internal/xml/xml_test.go | 752 + .../x/net/webdav/litmus_test_server.go | 94 + vendor/golang.org/x/net/webdav/lock.go | 445 + vendor/golang.org/x/net/webdav/lock_test.go | 731 + vendor/golang.org/x/net/webdav/prop.go | 418 + vendor/golang.org/x/net/webdav/prop_test.go | 613 + vendor/golang.org/x/net/webdav/webdav.go | 702 + vendor/golang.org/x/net/webdav/webdav_test.go | 344 + vendor/golang.org/x/net/webdav/xml.go | 519 + vendor/golang.org/x/net/webdav/xml_test.go | 906 + vendor/golang.org/x/net/websocket/client.go | 106 + vendor/golang.org/x/net/websocket/dial.go | 24 + .../golang.org/x/net/websocket/dial_test.go | 43 + .../x/net/websocket/exampledial_test.go | 31 + .../x/net/websocket/examplehandler_test.go | 26 + vendor/golang.org/x/net/websocket/hybi.go | 583 + .../golang.org/x/net/websocket/hybi_test.go | 608 + vendor/golang.org/x/net/websocket/server.go | 113 + .../golang.org/x/net/websocket/websocket.go | 448 + .../x/net/websocket/websocket_test.go | 665 + vendor/golang.org/x/net/xsrftoken/xsrf.go | 94 + .../golang.org/x/net/xsrftoken/xsrf_test.go | 83 + vendor/golang.org/x/sys/.gitattributes | 10 + vendor/golang.org/x/sys/.gitignore | 2 + vendor/golang.org/x/sys/AUTHORS | 3 + vendor/golang.org/x/sys/CONTRIBUTING.md | 26 + vendor/golang.org/x/sys/CONTRIBUTORS | 3 + vendor/golang.org/x/sys/LICENSE | 27 + vendor/golang.org/x/sys/PATENTS | 22 + vendor/golang.org/x/sys/README.md | 18 + vendor/golang.org/x/sys/codereview.cfg | 1 + vendor/golang.org/x/sys/plan9/asm.s | 8 + vendor/golang.org/x/sys/plan9/asm_plan9_386.s | 30 + .../golang.org/x/sys/plan9/asm_plan9_amd64.s | 30 + vendor/golang.org/x/sys/plan9/asm_plan9_arm.s | 25 + vendor/golang.org/x/sys/plan9/const_plan9.go | 70 + vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 + vendor/golang.org/x/sys/plan9/env_plan9.go | 31 + vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 + vendor/golang.org/x/sys/plan9/mkall.sh | 138 + vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 + vendor/golang.org/x/sys/plan9/mksyscall.pl | 319 + .../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 + .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 + vendor/golang.org/x/sys/plan9/pwd_plan9.go | 23 + vendor/golang.org/x/sys/plan9/race.go | 30 + vendor/golang.org/x/sys/plan9/race0.go | 25 + vendor/golang.org/x/sys/plan9/str.go | 22 + vendor/golang.org/x/sys/plan9/syscall.go | 74 + .../golang.org/x/sys/plan9/syscall_plan9.go | 349 + vendor/golang.org/x/sys/plan9/syscall_test.go | 33 + .../x/sys/plan9/zsyscall_plan9_386.go | 292 + .../x/sys/plan9/zsyscall_plan9_amd64.go | 292 + .../x/sys/plan9/zsyscall_plan9_arm.go | 284 + .../golang.org/x/sys/plan9/zsysnum_plan9.go | 49 + vendor/golang.org/x/sys/unix/.gitignore | 2 + vendor/golang.org/x/sys/unix/README.md | 173 + .../golang.org/x/sys/unix/affinity_linux.go | 124 + vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 + .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 + .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + .../x/sys/unix/asm_dragonfly_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 57 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 52 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 56 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 56 + vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + .../golang.org/x/sys/unix/bluetooth_linux.go | 35 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 + vendor/golang.org/x/sys/unix/constants.go | 13 + vendor/golang.org/x/sys/unix/creds_test.go | 152 + vendor/golang.org/x/sys/unix/dev_darwin.go | 24 + .../golang.org/x/sys/unix/dev_darwin_test.go | 51 + vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 + .../x/sys/unix/dev_dragonfly_test.go | 50 + vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 + vendor/golang.org/x/sys/unix/dev_linux.go | 42 + .../golang.org/x/sys/unix/dev_linux_test.go | 53 + vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 + .../golang.org/x/sys/unix/dev_netbsd_test.go | 50 + vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 + .../golang.org/x/sys/unix/dev_openbsd_test.go | 54 + .../golang.org/x/sys/unix/dev_solaris_test.go | 51 + vendor/golang.org/x/sys/unix/dirent.go | 17 + vendor/golang.org/x/sys/unix/endian_big.go | 9 + vendor/golang.org/x/sys/unix/endian_little.go | 9 + vendor/golang.org/x/sys/unix/env_unix.go | 31 + .../x/sys/unix/errors_freebsd_386.go | 227 + .../x/sys/unix/errors_freebsd_amd64.go | 227 + .../x/sys/unix/errors_freebsd_arm.go | 226 + vendor/golang.org/x/sys/unix/export_test.go | 9 + vendor/golang.org/x/sys/unix/flock.go | 22 + .../x/sys/unix/flock_linux_32bit.go | 13 + vendor/golang.org/x/sys/unix/gccgo.go | 61 + vendor/golang.org/x/sys/unix/gccgo_c.c | 47 + .../x/sys/unix/gccgo_linux_amd64.go | 20 + vendor/golang.org/x/sys/unix/linux/Dockerfile | 48 + vendor/golang.org/x/sys/unix/linux/mkall.go | 482 + .../golang.org/x/sys/unix/linux/mksysnum.pl | 85 + vendor/golang.org/x/sys/unix/linux/types.go | 696 + vendor/golang.org/x/sys/unix/mkall.sh | 188 + vendor/golang.org/x/sys/unix/mkerrors.sh | 581 + vendor/golang.org/x/sys/unix/mkpost.go | 97 + vendor/golang.org/x/sys/unix/mksyscall.pl | 341 + .../x/sys/unix/mksyscall_solaris.pl | 289 + .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 264 + .../golang.org/x/sys/unix/mksysnum_darwin.pl | 39 + .../x/sys/unix/mksysnum_dragonfly.pl | 50 + .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 50 + .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 58 + .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 50 + .../golang.org/x/sys/unix/mmap_unix_test.go | 35 + .../golang.org/x/sys/unix/openbsd_pledge.go | 38 + vendor/golang.org/x/sys/unix/openbsd_test.go | 113 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 + vendor/golang.org/x/sys/unix/race.go | 30 + vendor/golang.org/x/sys/unix/race0.go | 25 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 104 + vendor/golang.org/x/sys/unix/str.go | 26 + vendor/golang.org/x/sys/unix/syscall.go | 51 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 665 + .../golang.org/x/sys/unix/syscall_bsd_test.go | 93 + .../golang.org/x/sys/unix/syscall_darwin.go | 602 + .../x/sys/unix/syscall_darwin_386.go | 68 + .../x/sys/unix/syscall_darwin_amd64.go | 68 + .../x/sys/unix/syscall_darwin_arm.go | 66 + .../x/sys/unix/syscall_darwin_arm64.go | 68 + .../x/sys/unix/syscall_dragonfly.go | 522 + .../x/sys/unix/syscall_dragonfly_amd64.go | 52 + .../golang.org/x/sys/unix/syscall_freebsd.go | 760 + .../x/sys/unix/syscall_freebsd_386.go | 52 + .../x/sys/unix/syscall_freebsd_amd64.go | 52 + .../x/sys/unix/syscall_freebsd_arm.go | 52 + .../x/sys/unix/syscall_freebsd_test.go | 297 + vendor/golang.org/x/sys/unix/syscall_linux.go | 1503 ++ .../x/sys/unix/syscall_linux_386.go | 391 + .../x/sys/unix/syscall_linux_amd64.go | 144 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../x/sys/unix/syscall_linux_arm.go | 255 + .../x/sys/unix/syscall_linux_arm64.go | 189 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 + .../x/sys/unix/syscall_linux_gccgo.go | 21 + .../x/sys/unix/syscall_linux_mips64x.go | 209 + .../x/sys/unix/syscall_linux_mipsx.go | 231 + .../x/sys/unix/syscall_linux_ppc64x.go | 127 + .../x/sys/unix/syscall_linux_s390x.go | 320 + .../x/sys/unix/syscall_linux_sparc64.go | 143 + .../x/sys/unix/syscall_linux_test.go | 402 + .../golang.org/x/sys/unix/syscall_netbsd.go | 566 + .../x/sys/unix/syscall_netbsd_386.go | 33 + .../x/sys/unix/syscall_netbsd_amd64.go | 33 + .../x/sys/unix/syscall_netbsd_arm.go | 33 + .../golang.org/x/sys/unix/syscall_openbsd.go | 366 + .../x/sys/unix/syscall_openbsd_386.go | 33 + .../x/sys/unix/syscall_openbsd_amd64.go | 33 + .../x/sys/unix/syscall_openbsd_arm.go | 33 + .../golang.org/x/sys/unix/syscall_solaris.go | 718 + .../x/sys/unix/syscall_solaris_amd64.go | 28 + .../x/sys/unix/syscall_solaris_test.go | 55 + vendor/golang.org/x/sys/unix/syscall_test.go | 60 + vendor/golang.org/x/sys/unix/syscall_unix.go | 307 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 + .../x/sys/unix/syscall_unix_test.go | 521 + vendor/golang.org/x/sys/unix/timestruct.go | 82 + .../golang.org/x/sys/unix/timestruct_test.go | 54 + vendor/golang.org/x/sys/unix/types_darwin.go | 277 + .../golang.org/x/sys/unix/types_dragonfly.go | 280 + vendor/golang.org/x/sys/unix/types_freebsd.go | 402 + vendor/golang.org/x/sys/unix/types_netbsd.go | 270 + vendor/golang.org/x/sys/unix/types_openbsd.go | 282 + vendor/golang.org/x/sys/unix/types_solaris.go | 283 + .../x/sys/unix/zerrors_darwin_386.go | 1769 ++ .../x/sys/unix/zerrors_darwin_amd64.go | 1769 ++ .../x/sys/unix/zerrors_darwin_arm.go | 1769 ++ .../x/sys/unix/zerrors_darwin_arm64.go | 1769 ++ .../x/sys/unix/zerrors_dragonfly_amd64.go | 1578 ++ .../x/sys/unix/zerrors_freebsd_386.go | 1756 ++ .../x/sys/unix/zerrors_freebsd_amd64.go | 1757 ++ .../x/sys/unix/zerrors_freebsd_arm.go | 1765 ++ .../x/sys/unix/zerrors_linux_386.go | 2298 +++ .../x/sys/unix/zerrors_linux_amd64.go | 2299 +++ .../x/sys/unix/zerrors_linux_arm.go | 2306 +++ .../x/sys/unix/zerrors_linux_arm64.go | 2289 +++ .../x/sys/unix/zerrors_linux_mips.go | 2308 +++ .../x/sys/unix/zerrors_linux_mips64.go | 2308 +++ .../x/sys/unix/zerrors_linux_mips64le.go | 2308 +++ .../x/sys/unix/zerrors_linux_mipsle.go | 2308 +++ .../x/sys/unix/zerrors_linux_ppc64.go | 2361 +++ .../x/sys/unix/zerrors_linux_ppc64le.go | 2361 +++ .../x/sys/unix/zerrors_linux_s390x.go | 2360 +++ .../x/sys/unix/zerrors_linux_sparc64.go | 2142 ++ .../x/sys/unix/zerrors_netbsd_386.go | 1719 ++ .../x/sys/unix/zerrors_netbsd_amd64.go | 1709 ++ .../x/sys/unix/zerrors_netbsd_arm.go | 1698 ++ .../x/sys/unix/zerrors_openbsd_386.go | 1591 ++ .../x/sys/unix/zerrors_openbsd_amd64.go | 1590 ++ .../x/sys/unix/zerrors_openbsd_arm.go | 1593 ++ .../x/sys/unix/zerrors_solaris_amd64.go | 1489 ++ .../golang.org/x/sys/unix/zptrace386_linux.go | 80 + .../golang.org/x/sys/unix/zptracearm_linux.go | 41 + .../x/sys/unix/zptracemips_linux.go | 50 + .../x/sys/unix/zptracemipsle_linux.go | 50 + .../x/sys/unix/zsyscall_darwin_386.go | 1635 ++ .../x/sys/unix/zsyscall_darwin_amd64.go | 1635 ++ .../x/sys/unix/zsyscall_darwin_arm.go | 1635 ++ .../x/sys/unix/zsyscall_darwin_arm64.go | 1635 ++ .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1493 ++ .../x/sys/unix/zsyscall_freebsd_386.go | 1937 ++ .../x/sys/unix/zsyscall_freebsd_amd64.go | 1937 ++ .../x/sys/unix/zsyscall_freebsd_arm.go | 1937 ++ .../x/sys/unix/zsyscall_linux_386.go | 1994 ++ .../x/sys/unix/zsyscall_linux_amd64.go | 2187 ++ .../x/sys/unix/zsyscall_linux_arm.go | 2096 ++ .../x/sys/unix/zsyscall_linux_arm64.go | 2044 ++ .../x/sys/unix/zsyscall_linux_mips.go | 2152 ++ .../x/sys/unix/zsyscall_linux_mips64.go | 2135 ++ .../x/sys/unix/zsyscall_linux_mips64le.go | 2135 ++ .../x/sys/unix/zsyscall_linux_mipsle.go | 2152 ++ .../x/sys/unix/zsyscall_linux_ppc64.go | 2198 ++ .../x/sys/unix/zsyscall_linux_ppc64le.go | 2198 ++ .../x/sys/unix/zsyscall_linux_s390x.go | 1978 ++ .../x/sys/unix/zsyscall_linux_sparc64.go | 1833 ++ .../x/sys/unix/zsyscall_netbsd_386.go | 1399 ++ .../x/sys/unix/zsyscall_netbsd_amd64.go | 1399 ++ .../x/sys/unix/zsyscall_netbsd_arm.go | 1399 ++ .../x/sys/unix/zsyscall_openbsd_386.go | 1457 ++ .../x/sys/unix/zsyscall_openbsd_amd64.go | 1457 ++ .../x/sys/unix/zsyscall_openbsd_arm.go | 1457 ++ .../x/sys/unix/zsyscall_solaris_amd64.go | 1669 ++ .../x/sys/unix/zsysctl_openbsd_386.go | 270 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 270 + .../x/sys/unix/zsysctl_openbsd_arm.go | 270 + .../x/sys/unix/zsysnum_darwin_386.go | 436 + .../x/sys/unix/zsysnum_darwin_amd64.go | 436 + .../x/sys/unix/zsysnum_darwin_arm.go | 436 + .../x/sys/unix/zsysnum_darwin_arm64.go | 436 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 + .../x/sys/unix/zsysnum_freebsd_386.go | 353 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 353 + .../x/sys/unix/zsysnum_freebsd_arm.go | 353 + .../x/sys/unix/zsysnum_linux_386.go | 390 + .../x/sys/unix/zsysnum_linux_amd64.go | 342 + .../x/sys/unix/zsysnum_linux_arm.go | 362 + .../x/sys/unix/zsysnum_linux_arm64.go | 286 + .../x/sys/unix/zsysnum_linux_mips.go | 375 + .../x/sys/unix/zsysnum_linux_mips64.go | 335 + .../x/sys/unix/zsysnum_linux_mips64le.go | 335 + .../x/sys/unix/zsysnum_linux_mipsle.go | 375 + .../x/sys/unix/zsysnum_linux_ppc64.go | 370 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 370 + .../x/sys/unix/zsysnum_linux_s390x.go | 334 + .../x/sys/unix/zsysnum_linux_sparc64.go | 348 + .../x/sys/unix/zsysnum_netbsd_386.go | 274 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 274 + .../x/sys/unix/zsysnum_netbsd_arm.go | 274 + .../x/sys/unix/zsysnum_openbsd_386.go | 207 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 207 + .../x/sys/unix/zsysnum_openbsd_arm.go | 213 + .../x/sys/unix/ztypes_darwin_386.go | 489 + .../x/sys/unix/ztypes_darwin_amd64.go | 499 + .../x/sys/unix/ztypes_darwin_arm.go | 490 + .../x/sys/unix/ztypes_darwin_arm64.go | 499 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 486 + .../x/sys/unix/ztypes_freebsd_386.go | 553 + .../x/sys/unix/ztypes_freebsd_amd64.go | 556 + .../x/sys/unix/ztypes_freebsd_arm.go | 556 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 897 + .../x/sys/unix/ztypes_linux_amd64.go | 915 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 886 + .../x/sys/unix/ztypes_linux_arm64.go | 894 + .../x/sys/unix/ztypes_linux_mips.go | 891 + .../x/sys/unix/ztypes_linux_mips64.go | 896 + .../x/sys/unix/ztypes_linux_mips64le.go | 896 + .../x/sys/unix/ztypes_linux_mipsle.go | 891 + .../x/sys/unix/ztypes_linux_ppc64.go | 904 + .../x/sys/unix/ztypes_linux_ppc64le.go | 904 + .../x/sys/unix/ztypes_linux_s390x.go | 921 + .../x/sys/unix/ztypes_linux_sparc64.go | 690 + .../x/sys/unix/ztypes_netbsd_386.go | 439 + .../x/sys/unix/ztypes_netbsd_amd64.go | 446 + .../x/sys/unix/ztypes_netbsd_arm.go | 444 + .../x/sys/unix/ztypes_openbsd_386.go | 484 + .../x/sys/unix/ztypes_openbsd_amd64.go | 491 + .../x/sys/unix/ztypes_openbsd_arm.go | 477 + .../x/sys/unix/ztypes_solaris_amd64.go | 459 + .../x/sys/windows/asm_windows_386.s | 13 + .../x/sys/windows/asm_windows_amd64.s | 13 + .../golang.org/x/sys/windows/dll_windows.go | 378 + .../golang.org/x/sys/windows/env_windows.go | 29 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + .../golang.org/x/sys/windows/exec_windows.go | 97 + .../x/sys/windows/memory_windows.go | 26 + vendor/golang.org/x/sys/windows/mksyscall.go | 7 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + .../x/sys/windows/registry/export_test.go | 11 + .../golang.org/x/sys/windows/registry/key.go | 198 + .../x/sys/windows/registry/mksyscall.go | 7 + .../x/sys/windows/registry/registry_test.go | 756 + .../x/sys/windows/registry/syscall.go | 32 + .../x/sys/windows/registry/value.go | 384 + .../sys/windows/registry/zsyscall_windows.go | 120 + .../x/sys/windows/security_windows.go | 476 + vendor/golang.org/x/sys/windows/service.go | 164 + vendor/golang.org/x/sys/windows/str.go | 22 + .../golang.org/x/sys/windows/svc/debug/log.go | 56 + .../x/sys/windows/svc/debug/service.go | 45 + vendor/golang.org/x/sys/windows/svc/event.go | 48 + .../x/sys/windows/svc/eventlog/install.go | 80 + .../x/sys/windows/svc/eventlog/log.go | 70 + .../x/sys/windows/svc/eventlog/log_test.go | 51 + .../x/sys/windows/svc/example/beep.go | 22 + .../x/sys/windows/svc/example/install.go | 92 + .../x/sys/windows/svc/example/main.go | 76 + .../x/sys/windows/svc/example/manage.go | 62 + .../x/sys/windows/svc/example/service.go | 82 + vendor/golang.org/x/sys/windows/svc/go12.c | 24 + vendor/golang.org/x/sys/windows/svc/go12.go | 11 + vendor/golang.org/x/sys/windows/svc/go13.go | 31 + .../x/sys/windows/svc/mgr/config.go | 139 + .../golang.org/x/sys/windows/svc/mgr/mgr.go | 162 + .../x/sys/windows/svc/mgr/mgr_test.go | 169 + .../x/sys/windows/svc/mgr/service.go | 72 + .../golang.org/x/sys/windows/svc/security.go | 62 + .../golang.org/x/sys/windows/svc/service.go | 363 + .../golang.org/x/sys/windows/svc/svc_test.go | 118 + vendor/golang.org/x/sys/windows/svc/sys_386.s | 68 + .../golang.org/x/sys/windows/svc/sys_amd64.s | 42 + vendor/golang.org/x/sys/windows/syscall.go | 71 + .../golang.org/x/sys/windows/syscall_test.go | 53 + .../x/sys/windows/syscall_windows.go | 1153 ++ .../x/sys/windows/syscall_windows_test.go | 107 + .../golang.org/x/sys/windows/types_windows.go | 1333 ++ .../x/sys/windows/types_windows_386.go | 22 + .../x/sys/windows/types_windows_amd64.go | 22 + .../x/sys/windows/zsyscall_windows.go | 2687 +++ vendor/gopkg.in/yaml.v2/.travis.yml | 12 + vendor/gopkg.in/yaml.v2/LICENSE | 201 + vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 133 + vendor/gopkg.in/yaml.v2/apic.go | 739 + vendor/gopkg.in/yaml.v2/decode.go | 775 + vendor/gopkg.in/yaml.v2/decode_test.go | 1326 ++ vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ++ vendor/gopkg.in/yaml.v2/encode.go | 362 + vendor/gopkg.in/yaml.v2/encode_test.go | 595 + .../gopkg.in/yaml.v2/example_embedded_test.go | 41 + vendor/gopkg.in/yaml.v2/go.mod | 5 + vendor/gopkg.in/yaml.v2/parserc.go | 1095 + vendor/gopkg.in/yaml.v2/readerc.go | 412 + vendor/gopkg.in/yaml.v2/resolve.go | 258 + vendor/gopkg.in/yaml.v2/scannerc.go | 2696 +++ vendor/gopkg.in/yaml.v2/sorter.go | 113 + vendor/gopkg.in/yaml.v2/suite_test.go | 12 + vendor/gopkg.in/yaml.v2/writerc.go | 26 + vendor/gopkg.in/yaml.v2/yaml.go | 466 + vendor/gopkg.in/yaml.v2/yamlh.go | 738 + vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 + 6361 files changed, 1180900 insertions(+), 3 deletions(-) create mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore create mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/common.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/reader.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu-multi-hdrs.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hardlink.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hdr-only.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue10968.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue11169.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue12435.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/neg-size.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/nil-uid.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-multi-hdrs.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-path-hdr.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small.txt create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small2.txt create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/sparse-formats.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/star.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar-file-reg.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/v7.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big-long.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/testdata/xattrs.tar create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/writer.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 vendor/github.com/Microsoft/go-winio/backup_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/noop.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/file.go create mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 vendor/github.com/Microsoft/go-winio/privileges_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/vhd.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/zvhd.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/decompress.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/validate/validate.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/wim.go create mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/davecgh/go-spew/.gitignore create mode 100644 vendor/github.com/davecgh/go-spew/.travis.yml create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/README.md create mode 100644 vendor/github.com/davecgh/go-spew/cov_report.sh create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go create mode 100644 vendor/github.com/davecgh/go-spew/test_coverage.txt create mode 100644 vendor/github.com/docker/distribution/.gitignore create mode 100644 vendor/github.com/docker/distribution/.mailmap create mode 100644 vendor/github.com/docker/distribution/AUTHORS create mode 100644 vendor/github.com/docker/distribution/BUILDING.md create mode 100644 vendor/github.com/docker/distribution/CHANGELOG.md create mode 100644 vendor/github.com/docker/distribution/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/distribution/Dockerfile create mode 100644 vendor/github.com/docker/distribution/Godeps/Godeps.json create mode 100644 vendor/github.com/docker/distribution/Godeps/Readme create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/MAINTAINERS create mode 100644 vendor/github.com/docker/distribution/Makefile create mode 100644 vendor/github.com/docker/distribution/README.md create mode 100644 vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/docker/distribution/ROADMAP.md create mode 100644 vendor/github.com/docker/distribution/blobs.go create mode 100644 vendor/github.com/docker/distribution/circle.yml create mode 100644 vendor/github.com/docker/distribution/cmd/digest/main.go create mode 100644 vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go create mode 100644 vendor/github.com/docker/distribution/cmd/registry/config-cache.yml create mode 100644 vendor/github.com/docker/distribution/cmd/registry/config-dev.yml create mode 100644 vendor/github.com/docker/distribution/cmd/registry/config-example.yml create mode 100644 vendor/github.com/docker/distribution/cmd/registry/main.go create mode 100644 vendor/github.com/docker/distribution/configuration/configuration.go create mode 100644 vendor/github.com/docker/distribution/configuration/configuration_test.go create mode 100644 vendor/github.com/docker/distribution/configuration/parser.go create mode 100644 vendor/github.com/docker/distribution/context/context.go create mode 100644 vendor/github.com/docker/distribution/context/doc.go create mode 100644 vendor/github.com/docker/distribution/context/http.go create mode 100644 vendor/github.com/docker/distribution/context/http_test.go create mode 100644 vendor/github.com/docker/distribution/context/logger.go create mode 100644 vendor/github.com/docker/distribution/context/trace.go create mode 100644 vendor/github.com/docker/distribution/context/trace_test.go create mode 100644 vendor/github.com/docker/distribution/context/util.go create mode 100644 vendor/github.com/docker/distribution/context/version.go create mode 100644 vendor/github.com/docker/distribution/context/version_test.go create mode 100644 vendor/github.com/docker/distribution/contrib/apache/README.MD create mode 100644 vendor/github.com/docker/distribution/contrib/apache/apache.conf create mode 100644 vendor/github.com/docker/distribution/contrib/compose/README.md create mode 100644 vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml create mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile create mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf create mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf create mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf create mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/README.md create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/install_certs.sh create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.key create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/malevolent.bats create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/v1/search.json create mode 100755 vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/token.bats create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/.htpasswd create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/Dockerfile create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.cert create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.key create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config.yml create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/.htpasswd create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/Dockerfile create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.key create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.cert create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.key create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.cert create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.key create mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/registry-config.yml create mode 100644 vendor/github.com/docker/distribution/contrib/token-server/errors.go create mode 100644 vendor/github.com/docker/distribution/contrib/token-server/main.go create mode 100644 vendor/github.com/docker/distribution/contrib/token-server/token.go create mode 100755 vendor/github.com/docker/distribution/coverpkg.sh create mode 100644 vendor/github.com/docker/distribution/digest/digest.go create mode 100644 vendor/github.com/docker/distribution/digest/digest_test.go create mode 100644 vendor/github.com/docker/distribution/digest/digester.go create mode 100644 vendor/github.com/docker/distribution/digest/digester_resumable_test.go create mode 100644 vendor/github.com/docker/distribution/digest/doc.go create mode 100644 vendor/github.com/docker/distribution/digest/set.go create mode 100644 vendor/github.com/docker/distribution/digest/set_test.go create mode 100644 vendor/github.com/docker/distribution/digest/verifiers.go create mode 100644 vendor/github.com/docker/distribution/digest/verifiers_test.go create mode 100644 vendor/github.com/docker/distribution/doc.go create mode 100644 vendor/github.com/docker/distribution/docs/README.md create mode 100644 vendor/github.com/docker/distribution/docs/architecture.md create mode 100644 vendor/github.com/docker/distribution/docs/configuration.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/api.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/api.md.tmpl create mode 100644 vendor/github.com/docker/distribution/docs/spec/auth/index.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/auth/jwt.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/auth/oauth.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/auth/scope.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/auth/token.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/implementations.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/index.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/json.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md create mode 100644 vendor/github.com/docker/distribution/docs/spec/menu.md create mode 100644 vendor/github.com/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/distribution/health/api/api.go create mode 100644 vendor/github.com/docker/distribution/health/api/api_test.go create mode 100644 vendor/github.com/docker/distribution/health/checks/checks.go create mode 100644 vendor/github.com/docker/distribution/health/checks/checks_test.go create mode 100644 vendor/github.com/docker/distribution/health/doc.go create mode 100644 vendor/github.com/docker/distribution/health/health.go create mode 100644 vendor/github.com/docker/distribution/health/health_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/doc.go create mode 100644 vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go create mode 100644 vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/config_builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/manifest.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/sign.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/verify.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/builder_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/manifest.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/versioned.go create mode 100644 vendor/github.com/docker/distribution/manifests.go create mode 100644 vendor/github.com/docker/distribution/notifications/bridge.go create mode 100644 vendor/github.com/docker/distribution/notifications/bridge_test.go create mode 100644 vendor/github.com/docker/distribution/notifications/endpoint.go create mode 100644 vendor/github.com/docker/distribution/notifications/event.go create mode 100644 vendor/github.com/docker/distribution/notifications/event_test.go create mode 100644 vendor/github.com/docker/distribution/notifications/http.go create mode 100644 vendor/github.com/docker/distribution/notifications/http_test.go create mode 100644 vendor/github.com/docker/distribution/notifications/listener.go create mode 100644 vendor/github.com/docker/distribution/notifications/listener_test.go create mode 100644 vendor/github.com/docker/distribution/notifications/metrics.go create mode 100644 vendor/github.com/docker/distribution/notifications/sinks.go create mode 100644 vendor/github.com/docker/distribution/notifications/sinks_test.go create mode 100644 vendor/github.com/docker/distribution/project/dev-image/Dockerfile create mode 100644 vendor/github.com/docker/distribution/project/hooks/README.md create mode 100755 vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh create mode 100755 vendor/github.com/docker/distribution/project/hooks/pre-commit create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/reference_test.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp_test.go create mode 100644 vendor/github.com/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/descriptors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/routes.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/routes_test.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/urls.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/urls_test.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/auth.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/silly/access.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/silly/access_test.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/stringset.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/token.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/token_test.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/util.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/api_version.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/session.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/session_test.go create mode 100644 vendor/github.com/docker/distribution/registry/client/blob_writer.go create mode 100644 vendor/github.com/docker/distribution/registry/client/blob_writer_test.go create mode 100644 vendor/github.com/docker/distribution/registry/client/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/client/errors_test.go create mode 100644 vendor/github.com/docker/distribution/registry/client/repository.go create mode 100644 vendor/github.com/docker/distribution/registry/client/repository_test.go create mode 100644 vendor/github.com/docker/distribution/registry/client/transport/http_reader.go create mode 100644 vendor/github.com/docker/distribution/registry/client/transport/transport.go create mode 100644 vendor/github.com/docker/distribution/registry/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/api_test.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/app.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/app_test.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/basicauth.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/blob.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/blobupload.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/catalog.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/context.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/health_test.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/helpers.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/hmac.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/hmac_test.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/hooks.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/images.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/mail.go create mode 100644 vendor/github.com/docker/distribution/registry/handlers/tags.go create mode 100644 vendor/github.com/docker/distribution/registry/listener/listener.go create mode 100644 vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go create mode 100644 vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyauth.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxytagservice.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxytagservice_test.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go create mode 100644 vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go create mode 100644 vendor/github.com/docker/distribution/registry/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/registry_test.go create mode 100644 vendor/github.com/docker/distribution/registry/root.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blob_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobserver.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobwriter.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cache.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/catalog.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/catalog_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/base/base.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/gcs/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/testdriver/testdriver.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/filereader.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/filereader_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/garbagecollect.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/io.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/manifeststore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/paths.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/paths_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/purgeuploads.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/tagstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/tagstore_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/util.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/vacuum.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/walk.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/walk_test.go create mode 100644 vendor/github.com/docker/distribution/tags.go create mode 100644 vendor/github.com/docker/distribution/testutil/handler.go create mode 100644 vendor/github.com/docker/distribution/testutil/manifests.go create mode 100644 vendor/github.com/docker/distribution/testutil/tarfile.go create mode 100644 vendor/github.com/docker/distribution/uuid/uuid.go create mode 100644 vendor/github.com/docker/distribution/uuid/uuid_test.go create mode 100644 vendor/github.com/docker/distribution/version/print.go create mode 100644 vendor/github.com/docker/distribution/version/version.go create mode 100755 vendor/github.com/docker/distribution/version/version.sh create mode 100644 vendor/github.com/docker/docker/.dockerignore create mode 100644 vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/docker/docker/.gitignore create mode 100644 vendor/github.com/docker/docker/.mailmap create mode 100644 vendor/github.com/docker/docker/AUTHORS create mode 100644 vendor/github.com/docker/docker/CHANGELOG.md create mode 100644 vendor/github.com/docker/docker/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/docker/Dockerfile create mode 100644 vendor/github.com/docker/docker/Dockerfile.aarch64 create mode 100644 vendor/github.com/docker/docker/Dockerfile.armhf create mode 100644 vendor/github.com/docker/docker/Dockerfile.ppc64le create mode 100644 vendor/github.com/docker/docker/Dockerfile.s390x create mode 100644 vendor/github.com/docker/docker/Dockerfile.simple create mode 100644 vendor/github.com/docker/docker/Dockerfile.solaris create mode 100644 vendor/github.com/docker/docker/Dockerfile.windows create mode 100644 vendor/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/docker/docker/MAINTAINERS create mode 100644 vendor/github.com/docker/docker/Makefile create mode 100644 vendor/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/docker/docker/README.md create mode 100644 vendor/github.com/docker/docker/ROADMAP.md create mode 100644 vendor/github.com/docker/docker/VENDORING.md create mode 100644 vendor/github.com/docker/docker/VERSION create mode 100644 vendor/github.com/docker/docker/api/README.md create mode 100644 vendor/github.com/docker/docker/api/common.go create mode 100644 vendor/github.com/docker/docker/api/common_test.go create mode 100644 vendor/github.com/docker/docker/api/common_unix.go create mode 100644 vendor/github.com/docker/docker/api/common_windows.go create mode 100644 vendor/github.com/docker/docker/api/errors/errors.go create mode 100644 vendor/github.com/docker/docker/api/fixtures/keyfile create mode 100644 vendor/github.com/docker/docker/api/server/httputils/decoder.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/errors.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/form.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/form_test.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/cors.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/debug.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/experimental.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/middleware.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/version.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/version_test.go create mode 100644 vendor/github.com/docker/docker/api/server/profiler.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/build.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/build_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go create mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/container.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/container_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/copy.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/exec.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/inspect.go create mode 100644 vendor/github.com/docker/docker/api/server/router/experimental.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/image.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/image_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/filter.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/network.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/network_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/plugin.go create mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/router.go create mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/cluster.go create mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/system.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/system_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router_swapper.go create mode 100644 vendor/github.com/docker/docker/api/server/server.go create mode 100644 vendor/github.com/docker/docker/api/server/server_test.go create mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml create mode 100644 vendor/github.com/docker/docker/api/swagger.yaml create mode 100644 vendor/github.com/docker/docker/api/templates/server/operation.gotmpl create mode 100644 vendor/github.com/docker/docker/api/types/auth.go create mode 100644 vendor/github.com/docker/docker/api/types/backend/backend.go create mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/docker/docker/api/types/client.go create mode 100644 vendor/github.com/docker/docker/api/types/configs.go create mode 100644 vendor/github.com/docker/docker/api/types/container/config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go create mode 100644 vendor/github.com/docker/docker/api/types/container/host_config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go create mode 100644 vendor/github.com/docker/docker/api/types/events/events.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse_test.go create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go create mode 100644 vendor/github.com/docker/docker/api/types/network/network.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference.go create mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference_test.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go create mode 100644 vendor/github.com/docker/docker/api/types/seccomp.go create mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/stats.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice_test.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go create mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert.go create mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert_test.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp_test.go create mode 100644 vendor/github.com/docker/docker/api/types/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare_test.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p19/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p20/types.go create mode 100644 vendor/github.com/docker/docker/api/types/volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volumes_create.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volumes_list.go create mode 100644 vendor/github.com/docker/docker/builder/builder.go create mode 100644 vendor/github.com/docker/docker/builder/context.go create mode 100644 vendor/github.com/docker/docker/builder/context_test.go create mode 100644 vendor/github.com/docker/docker/builder/context_unix.go create mode 100644 vendor/github.com/docker/docker/builder/context_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/bflag.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/command/command.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/envVarTest create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/support.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/support_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/utils_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/wordsTest create mode 100644 vendor/github.com/docker/docker/builder/dockerignore.go create mode 100644 vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go create mode 100644 vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerignore_test.go create mode 100644 vendor/github.com/docker/docker/builder/git.go create mode 100644 vendor/github.com/docker/docker/builder/remote.go create mode 100644 vendor/github.com/docker/docker/builder/remote_test.go create mode 100644 vendor/github.com/docker/docker/builder/tarsum.go create mode 100644 vendor/github.com/docker/docker/builder/tarsum_test.go create mode 100644 vendor/github.com/docker/docker/builder/utils_test.go create mode 100644 vendor/github.com/docker/docker/cli/cobra.go create mode 100644 vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go create mode 100644 vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/cli.go create mode 100644 vendor/github.com/docker/docker/cli/command/commands/commands.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/attach.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/commit.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/cp.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/diff.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/exec.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/exec_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/export.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/hijack.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/kill.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/logs.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/pause.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/port.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/ps_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/rename.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/restart.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/rm.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/run.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/start.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stats.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stats_helpers.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stop.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/top.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/tty.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/unpause.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/wait.go create mode 100644 vendor/github.com/docker/docker/cli/command/events_utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/container.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/container_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/custom.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/custom_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/formatter.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/image.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/image_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/network.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/network_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/reflect.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/service.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/stats.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/stats_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/volume.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/volume_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/build.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/history.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/import.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/load.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/pull.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/push.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/save.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/tag.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/trust.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/trust_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/in.go create mode 100644 vendor/github.com/docker/docker/cli/command/inspect/inspector.go create mode 100644 vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/connect.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/disconnect.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/demote.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/promote.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/ps.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/out.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/disable.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/enable.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/install.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/push.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/set.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/upgrade.go create mode 100644 vendor/github.com/docker/docker/cli/command/prune/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry/login.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry/logout.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry/search.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/ls.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/inspect_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/logs.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/opts_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/parse.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/ps.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/scale.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/trust.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/update_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/common.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/deploy.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/ps.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/services.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/init.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/join.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/join_token.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/leave.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/opts_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/unlock.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/df.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/events.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/info.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/version.go create mode 100644 vendor/github.com/docker/docker/cli/command/task/print.go create mode 100644 vendor/github.com/docker/docker/cli/command/trust.go create mode 100644 vendor/github.com/docker/docker/cli/command/utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/remove.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/compose.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/compose_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/service.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/service_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/volume.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/volume_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go create mode 100644 vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/example1.env create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/example2.env create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/full-example.yml create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/loader.go create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/loader_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/bindata.go create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/schema.go create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/schema_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/template/template.go create mode 100644 vendor/github.com/docker/docker/cli/compose/template/template_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/types/types.go create mode 100644 vendor/github.com/docker/docker/cli/error.go create mode 100644 vendor/github.com/docker/docker/cli/flags/client.go create mode 100644 vendor/github.com/docker/docker/cli/flags/common.go create mode 100644 vendor/github.com/docker/docker/cli/flags/common_test.go create mode 100644 vendor/github.com/docker/docker/cli/required.go create mode 100644 vendor/github.com/docker/docker/cli/trust/trust.go create mode 100644 vendor/github.com/docker/docker/cliconfig/config.go create mode 100644 vendor/github.com/docker/docker/cliconfig/config_test.go create mode 100644 vendor/github.com/docker/docker/cliconfig/configfile/file.go create mode 100644 vendor/github.com/docker/docker/cliconfig/configfile/file_test.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/file_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/native_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go create mode 100644 vendor/github.com/docker/docker/client/README.md create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create_test.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete_test.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list_test.go create mode 100644 vendor/github.com/docker/docker/client/client.go create mode 100644 vendor/github.com/docker/docker/client/client_mock_test.go create mode 100644 vendor/github.com/docker/docker/client/client_test.go create mode 100644 vendor/github.com/docker/docker/client/client_unix.go create mode 100644 vendor/github.com/docker/docker/client/client_windows.go create mode 100644 vendor/github.com/docker/docker/client/container_attach.go create mode 100644 vendor/github.com/docker/docker/client/container_commit.go create mode 100644 vendor/github.com/docker/docker/client/container_commit_test.go create mode 100644 vendor/github.com/docker/docker/client/container_copy.go create mode 100644 vendor/github.com/docker/docker/client/container_copy_test.go create mode 100644 vendor/github.com/docker/docker/client/container_create.go create mode 100644 vendor/github.com/docker/docker/client/container_create_test.go create mode 100644 vendor/github.com/docker/docker/client/container_diff.go create mode 100644 vendor/github.com/docker/docker/client/container_diff_test.go create mode 100644 vendor/github.com/docker/docker/client/container_exec.go create mode 100644 vendor/github.com/docker/docker/client/container_exec_test.go create mode 100644 vendor/github.com/docker/docker/client/container_export.go create mode 100644 vendor/github.com/docker/docker/client/container_export_test.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/container_kill.go create mode 100644 vendor/github.com/docker/docker/client/container_kill_test.go create mode 100644 vendor/github.com/docker/docker/client/container_list.go create mode 100644 vendor/github.com/docker/docker/client/container_list_test.go create mode 100644 vendor/github.com/docker/docker/client/container_logs.go create mode 100644 vendor/github.com/docker/docker/client/container_logs_test.go create mode 100644 vendor/github.com/docker/docker/client/container_pause.go create mode 100644 vendor/github.com/docker/docker/client/container_pause_test.go create mode 100644 vendor/github.com/docker/docker/client/container_prune.go create mode 100644 vendor/github.com/docker/docker/client/container_remove.go create mode 100644 vendor/github.com/docker/docker/client/container_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/container_rename.go create mode 100644 vendor/github.com/docker/docker/client/container_rename_test.go create mode 100644 vendor/github.com/docker/docker/client/container_resize.go create mode 100644 vendor/github.com/docker/docker/client/container_resize_test.go create mode 100644 vendor/github.com/docker/docker/client/container_restart.go create mode 100644 vendor/github.com/docker/docker/client/container_restart_test.go create mode 100644 vendor/github.com/docker/docker/client/container_start.go create mode 100644 vendor/github.com/docker/docker/client/container_start_test.go create mode 100644 vendor/github.com/docker/docker/client/container_stats.go create mode 100644 vendor/github.com/docker/docker/client/container_stats_test.go create mode 100644 vendor/github.com/docker/docker/client/container_stop.go create mode 100644 vendor/github.com/docker/docker/client/container_stop_test.go create mode 100644 vendor/github.com/docker/docker/client/container_top.go create mode 100644 vendor/github.com/docker/docker/client/container_top_test.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause_test.go create mode 100644 vendor/github.com/docker/docker/client/container_update.go create mode 100644 vendor/github.com/docker/docker/client/container_update_test.go create mode 100644 vendor/github.com/docker/docker/client/container_wait.go create mode 100644 vendor/github.com/docker/docker/client/container_wait_test.go create mode 100644 vendor/github.com/docker/docker/client/disk_usage.go create mode 100644 vendor/github.com/docker/docker/client/errors.go create mode 100644 vendor/github.com/docker/docker/client/events.go create mode 100644 vendor/github.com/docker/docker/client/events_test.go create mode 100644 vendor/github.com/docker/docker/client/hijack.go create mode 100644 vendor/github.com/docker/docker/client/image_build.go create mode 100644 vendor/github.com/docker/docker/client/image_build_test.go create mode 100644 vendor/github.com/docker/docker/client/image_create.go create mode 100644 vendor/github.com/docker/docker/client/image_create_test.go create mode 100644 vendor/github.com/docker/docker/client/image_history.go create mode 100644 vendor/github.com/docker/docker/client/image_history_test.go create mode 100644 vendor/github.com/docker/docker/client/image_import.go create mode 100644 vendor/github.com/docker/docker/client/image_import_test.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/image_list.go create mode 100644 vendor/github.com/docker/docker/client/image_list_test.go create mode 100644 vendor/github.com/docker/docker/client/image_load.go create mode 100644 vendor/github.com/docker/docker/client/image_load_test.go create mode 100644 vendor/github.com/docker/docker/client/image_prune.go create mode 100644 vendor/github.com/docker/docker/client/image_pull.go create mode 100644 vendor/github.com/docker/docker/client/image_pull_test.go create mode 100644 vendor/github.com/docker/docker/client/image_push.go create mode 100644 vendor/github.com/docker/docker/client/image_push_test.go create mode 100644 vendor/github.com/docker/docker/client/image_remove.go create mode 100644 vendor/github.com/docker/docker/client/image_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/image_save.go create mode 100644 vendor/github.com/docker/docker/client/image_save_test.go create mode 100644 vendor/github.com/docker/docker/client/image_search.go create mode 100644 vendor/github.com/docker/docker/client/image_search_test.go create mode 100644 vendor/github.com/docker/docker/client/image_tag.go create mode 100644 vendor/github.com/docker/docker/client/image_tag_test.go create mode 100644 vendor/github.com/docker/docker/client/info.go create mode 100644 vendor/github.com/docker/docker/client/info_test.go create mode 100644 vendor/github.com/docker/docker/client/interface.go create mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go create mode 100644 vendor/github.com/docker/docker/client/interface_stable.go create mode 100644 vendor/github.com/docker/docker/client/login.go create mode 100644 vendor/github.com/docker/docker/client/network_connect.go create mode 100644 vendor/github.com/docker/docker/client/network_connect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_create.go create mode 100644 vendor/github.com/docker/docker/client/network_create_test.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_list.go create mode 100644 vendor/github.com/docker/docker/client/network_list_test.go create mode 100644 vendor/github.com/docker/docker/client/network_prune.go create mode 100644 vendor/github.com/docker/docker/client/network_remove.go create mode 100644 vendor/github.com/docker/docker/client/network_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/node_list.go create mode 100644 vendor/github.com/docker/docker/client/node_list_test.go create mode 100644 vendor/github.com/docker/docker/client/node_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/node_update.go create mode 100644 vendor/github.com/docker/docker/client/node_update_test.go create mode 100644 vendor/github.com/docker/docker/client/ping.go create mode 100644 vendor/github.com/docker/docker/client/plugin_create.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_install.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go create mode 100644 vendor/github.com/docker/docker/client/request.go create mode 100644 vendor/github.com/docker/docker/client/request_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_create_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_list.go create mode 100644 vendor/github.com/docker/docker/client/secret_list_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_update.go create mode 100644 vendor/github.com/docker/docker/client/secret_update_test.go create mode 100644 vendor/github.com/docker/docker/client/service_create.go create mode 100644 vendor/github.com/docker/docker/client/service_create_test.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/service_list.go create mode 100644 vendor/github.com/docker/docker/client/service_list_test.go create mode 100644 vendor/github.com/docker/docker/client/service_logs.go create mode 100644 vendor/github.com/docker/docker/client/service_logs_test.go create mode 100644 vendor/github.com/docker/docker/client/service_remove.go create mode 100644 vendor/github.com/docker/docker/client/service_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/service_update.go create mode 100644 vendor/github.com/docker/docker/client/service_update_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update_test.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/task_list.go create mode 100644 vendor/github.com/docker/docker/client/task_list_test.go create mode 100644 vendor/github.com/docker/docker/client/testdata/ca.pem create mode 100644 vendor/github.com/docker/docker/client/testdata/cert.pem create mode 100644 vendor/github.com/docker/docker/client/testdata/key.pem create mode 100644 vendor/github.com/docker/docker/client/transport.go create mode 100644 vendor/github.com/docker/docker/client/utils.go create mode 100644 vendor/github.com/docker/docker/client/version.go create mode 100644 vendor/github.com/docker/docker/client/volume_create.go create mode 100644 vendor/github.com/docker/docker/client/volume_create_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_list.go create mode 100644 vendor/github.com/docker/docker/client/volume_list_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_prune.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_none.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/docker.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/docker_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/docker_windows.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/README.md create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/metrics.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/service_windows.go create mode 100644 vendor/github.com/docker/docker/container/archive.go create mode 100644 vendor/github.com/docker/docker/container/container.go create mode 100644 vendor/github.com/docker/docker/container/container_linux.go create mode 100644 vendor/github.com/docker/docker/container/container_notlinux.go create mode 100644 vendor/github.com/docker/docker/container/container_unit_test.go create mode 100644 vendor/github.com/docker/docker/container/container_unix.go create mode 100644 vendor/github.com/docker/docker/container/container_windows.go create mode 100644 vendor/github.com/docker/docker/container/health.go create mode 100644 vendor/github.com/docker/docker/container/history.go create mode 100644 vendor/github.com/docker/docker/container/memory_store.go create mode 100644 vendor/github.com/docker/docker/container/memory_store_test.go create mode 100644 vendor/github.com/docker/docker/container/monitor.go create mode 100644 vendor/github.com/docker/docker/container/mounts_unix.go create mode 100644 vendor/github.com/docker/docker/container/mounts_windows.go create mode 100644 vendor/github.com/docker/docker/container/state.go create mode 100644 vendor/github.com/docker/docker/container/state_solaris.go create mode 100644 vendor/github.com/docker/docker/container/state_test.go create mode 100644 vendor/github.com/docker/docker/container/state_unix.go create mode 100644 vendor/github.com/docker/docker/container/state_windows.go create mode 100644 vendor/github.com/docker/docker/container/store.go create mode 100644 vendor/github.com/docker/docker/container/stream/streams.go create mode 100644 vendor/github.com/docker/docker/contrib/README.md create mode 100644 vendor/github.com/docker/docker/contrib/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/apparmor/main.go create mode 100644 vendor/github.com/docker/docker/contrib/apparmor/template.go create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md create mode 100755 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/check-config.sh create mode 100644 vendor/github.com/docker/docker/contrib/completion/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/completion/bash/docker create mode 100644 vendor/github.com/docker/docker/contrib/completion/fish/docker.fish create mode 100644 vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt create mode 100644 vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/completion/zsh/_docker create mode 100644 vendor/github.com/docker/docker/contrib/desktop-integration/README.md create mode 100644 vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/README.md create mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go create mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go create mode 100755 vendor/github.com/docker/docker/contrib/dockerize-disk.sh create mode 100755 vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh create mode 100755 vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh create mode 100644 vendor/github.com/docker/docker/contrib/editorconfig create mode 100644 vendor/github.com/docker/docker/contrib/gitdm/aliases create mode 100644 vendor/github.com/docker/docker/contrib/gitdm/domain-map create mode 100755 vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh create mode 100644 vendor/github.com/docker/docker/contrib/gitdm/gitdm.config create mode 100644 vendor/github.com/docker/docker/contrib/httpserver/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris create mode 100644 vendor/github.com/docker/docker/contrib/httpserver/server.go create mode 100644 vendor/github.com/docker/docker/contrib/init/openrc/docker.confd create mode 100644 vendor/github.com/docker/docker/contrib/init/openrc/docker.initd create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/docker.service create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/docker.socket create mode 100755 vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker create mode 100644 vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default create mode 100755 vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker create mode 100644 vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/init/upstart/docker.conf create mode 100755 vendor/github.com/docker/docker/contrib/mac-install-bundle.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-alpine.sh create mode 100644 vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-arch.sh create mode 100644 vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-busybox.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-crux.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-pld.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-rinse.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-yum.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/busybox-static create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/debootstrap create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/rinse create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/solaris create mode 100644 vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c create mode 100755 vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh create mode 100755 vendor/github.com/docker/docker/contrib/project-stats.sh create mode 100755 vendor/github.com/docker/docker/contrib/report-issue.sh create mode 100755 vendor/github.com/docker/docker/contrib/reprepro/suites.sh create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz create mode 100644 vendor/github.com/docker/docker/contrib/syntax/nano/Dockerfile.nanorc create mode 100644 vendor/github.com/docker/docker/contrib/syntax/nano/README.md create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/README.md create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/README.md create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/acct.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/exit32.s create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/ns.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/raw.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/setgid.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/setuid.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/socket.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/userns.c create mode 100644 vendor/github.com/docker/docker/contrib/udev/80-docker.rules create mode 100644 vendor/github.com/docker/docker/contrib/vagrant-docker/README.md create mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default.go create mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/archive.go create mode 100644 vendor/github.com/docker/docker/daemon/archive_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/archive_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/attach.go create mode 100644 vendor/github.com/docker/docker/daemon/auth.go create mode 100644 vendor/github.com/docker/docker/daemon/bindmount_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/bindmount_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/cache.go create mode 100644 vendor/github.com/docker/docker/daemon/caps/utils_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/changes.go create mode 100644 vendor/github.com/docker/docker/daemon/checkpoint.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/cluster.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/container.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/network.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/node.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/secret.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/service.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/task.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/backend.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/filters.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/helpers.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/provider/network.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/secrets.go create mode 100644 vendor/github.com/docker/docker/daemon/commit.go create mode 100644 vendor/github.com/docker/docker/daemon/config.go create mode 100644 vendor/github.com/docker/docker/daemon/config_common_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/config_experimental.go create mode 100644 vendor/github.com/docker/docker/daemon/config_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/config_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/config_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/config_windows_test.go create mode 100644 vendor/github.com/docker/docker/daemon/container.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/create.go create mode 100644 vendor/github.com/docker/docker/daemon/create_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/create_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_experimental.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux_test.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_test.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/delete.go create mode 100644 vendor/github.com/docker/docker/daemon/delete_test.go create mode 100644 vendor/github.com/docker/docker/daemon/discovery.go create mode 100644 vendor/github.com/docker/docker/daemon/discovery_test.go create mode 100644 vendor/github.com/docker/docker/daemon/disk_usage.go create mode 100644 vendor/github.com/docker/docker/daemon/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/events.go create mode 100644 vendor/github.com/docker/docker/daemon/events/events.go create mode 100644 vendor/github.com/docker/docker/daemon/events/events_test.go create mode 100644 vendor/github.com/docker/docker/daemon/events/filter.go create mode 100644 vendor/github.com/docker/docker/daemon/events/metrics.go create mode 100644 vendor/github.com/docker/docker/daemon/events/testutils/testutils.go create mode 100644 vendor/github.com/docker/docker/daemon/events_test.go create mode 100644 vendor/github.com/docker/docker/daemon/exec.go create mode 100644 vendor/github.com/docker/docker/daemon/exec/exec.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/export.go create mode 100644 vendor/github.com/docker/docker/daemon/getsize_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/counter.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/plugin.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/proxy.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/health.go create mode 100644 vendor/github.com/docker/docker/daemon/health_test.go create mode 100644 vendor/github.com/docker/docker/daemon/image.go create mode 100644 vendor/github.com/docker/docker/daemon/image_delete.go create mode 100644 vendor/github.com/docker/docker/daemon/image_exporter.go create mode 100644 vendor/github.com/docker/docker/daemon/image_history.go create mode 100644 vendor/github.com/docker/docker/daemon/image_inspect.go create mode 100644 vendor/github.com/docker/docker/daemon/image_pull.go create mode 100644 vendor/github.com/docker/docker/daemon/image_push.go create mode 100644 vendor/github.com/docker/docker/daemon/image_tag.go create mode 100644 vendor/github.com/docker/docker/daemon/images.go create mode 100644 vendor/github.com/docker/docker/daemon/import.go create mode 100644 vendor/github.com/docker/docker/daemon/info.go create mode 100644 vendor/github.com/docker/docker/daemon/info_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/info_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/keys.go create mode 100644 vendor/github.com/docker/docker/daemon/keys_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/kill.go create mode 100644 vendor/github.com/docker/docker/daemon/links.go create mode 100644 vendor/github.com/docker/docker/daemon/links/links.go create mode 100644 vendor/github.com/docker/docker/daemon/links/links_test.go create mode 100644 vendor/github.com/docker/docker/daemon/links_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/links_linux_test.go create mode 100644 vendor/github.com/docker/docker/daemon/links_notlinux.go create mode 100644 vendor/github.com/docker/docker/daemon/list.go create mode 100644 vendor/github.com/docker/docker/daemon/list_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/list_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/logdrivers_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/logdrivers_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/context.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/copier.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/copier_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/factory.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_native.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/logger.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/logger_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logs.go create mode 100644 vendor/github.com/docker/docker/daemon/logs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/metrics.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/mounts.go create mode 100644 vendor/github.com/docker/docker/daemon/names.go create mode 100644 vendor/github.com/docker/docker/daemon/network.go create mode 100644 vendor/github.com/docker/docker/daemon/network/settings.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/pause.go create mode 100644 vendor/github.com/docker/docker/daemon/prune.go create mode 100644 vendor/github.com/docker/docker/daemon/rename.go create mode 100644 vendor/github.com/docker/docker/daemon/resize.go create mode 100644 vendor/github.com/docker/docker/daemon/restart.go create mode 100644 vendor/github.com/docker/docker/daemon/search.go create mode 100644 vendor/github.com/docker/docker/daemon/search_test.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_disabled.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/secrets.go create mode 100644 vendor/github.com/docker/docker/daemon/secrets_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/secrets_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/selinux_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/selinux_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/start.go create mode 100644 vendor/github.com/docker/docker/daemon/start_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/start_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/stats.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/stop.go create mode 100644 vendor/github.com/docker/docker/daemon/top_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/top_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/top_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/unpause.go create mode 100644 vendor/github.com/docker/docker/daemon/update.go create mode 100644 vendor/github.com/docker/docker/daemon/update_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/update_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/update_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_unit_test.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/wait.go create mode 100644 vendor/github.com/docker/docker/daemon/workdir.go create mode 100644 vendor/github.com/docker/docker/distribution/config.go create mode 100644 vendor/github.com/docker/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest create mode 100644 vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest create mode 100644 vendor/github.com/docker/docker/distribution/metadata/metadata.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go create mode 100644 vendor/github.com/docker/docker/distribution/pull.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v1.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_test.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_unix.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_windows.go create mode 100644 vendor/github.com/docker/docker/distribution/push.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v1.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v2.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v2_test.go create mode 100644 vendor/github.com/docker/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/docker/distribution/registry_unit_test.go create mode 100644 vendor/github.com/docker/docker/distribution/utils/progress.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/download.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/download_test.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/transfer.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/transfer_test.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/upload.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/upload_test.go create mode 100644 vendor/github.com/docker/docker/dockerversion/useragent.go create mode 100644 vendor/github.com/docker/docker/dockerversion/version_lib.go create mode 100644 vendor/github.com/docker/docker/docs/README.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.18.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.19.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.20.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.21.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.22.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.23.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.24.md create mode 100644 vendor/github.com/docker/docker/docs/api/version-history.md create mode 100644 vendor/github.com/docker/docker/docs/deprecated.md create mode 100644 vendor/github.com/docker/docker/docs/extend/EBS_volume.md create mode 100644 vendor/github.com/docker/docker/docs/extend/config.md create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_allow.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_chunked.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_deny.png create mode 100644 vendor/github.com/docker/docker/docs/extend/index.md create mode 100644 vendor/github.com/docker/docker/docs/extend/legacy_plugins.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugin_api.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_authorization.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_network.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_volume.md create mode 100644 vendor/github.com/docker/docker/docs/reference/builder.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/attach.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/build.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/cli.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/commit.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/cp.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/deploy.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/diff.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/dockerd.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/events.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/exec.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/export.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/history.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/image_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/images.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/import.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/index.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/info.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/kill.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/load.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/login.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/logout.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/logs.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/menu.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/pause.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/port.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/pull.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/push.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rename.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/restart.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rmi.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/run.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/save.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/search.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/start.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stats.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stop.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/system_df.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/tag.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/top.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/unpause.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/version.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/wait.md create mode 100644 vendor/github.com/docker/docker/docs/reference/glossary.md create mode 100644 vendor/github.com/docker/docker/docs/reference/index.md create mode 100644 vendor/github.com/docker/docker/docs/reference/run.md create mode 100644 vendor/github.com/docker/docker/docs/static_files/contributors.png create mode 100644 vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png create mode 100644 vendor/github.com/docker/docker/experimental/README.md create mode 100644 vendor/github.com/docker/docker/experimental/checkpoint-restore.md create mode 100644 vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png create mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png create mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png create mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg create mode 100644 vendor/github.com/docker/docker/experimental/vlan-networks.md create mode 100644 vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh create mode 100644 vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh create mode 100644 vendor/github.com/docker/docker/hack/Jenkins/readme.md create mode 100755 vendor/github.com/docker/docker/hack/dind create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/binaries-commits create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh create mode 100755 vendor/github.com/docker/docker/hack/generate-authors.sh create mode 100755 vendor/github.com/docker/docker/hack/generate-swagger-api.sh create mode 100644 vendor/github.com/docker/docker/hack/install.sh create mode 100644 vendor/github.com/docker/docker/hack/make.ps1 create mode 100755 vendor/github.com/docker/docker/hack/make.sh create mode 100644 vendor/github.com/docker/docker/hack/make/.binary create mode 100644 vendor/github.com/docker/docker/hack/make/.binary-setup create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/compat create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/control create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docs create mode 100755 vendor/github.com/docker/docker/hack/make/.build-deb/rules create mode 100644 vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec create mode 100644 vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec create mode 100644 vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch create mode 100644 vendor/github.com/docker/docker/hack/make/.ensure-emptyfs create mode 100644 vendor/github.com/docker/docker/hack/make/.go-autogen create mode 100644 vendor/github.com/docker/docker/hack/make/.go-autogen.ps1 create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-daemon-setup create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-daemon-start create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-daemon-stop create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-test-helpers create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.ico create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.png create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.rc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/dockerd.rc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/event_messages.mc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/resources.go create mode 100644 vendor/github.com/docker/docker/hack/make/README.md create mode 100644 vendor/github.com/docker/docker/hack/make/binary create mode 100644 vendor/github.com/docker/docker/hack/make/binary-client create mode 100644 vendor/github.com/docker/docker/hack/make/binary-daemon create mode 100644 vendor/github.com/docker/docker/hack/make/build-deb create mode 100644 vendor/github.com/docker/docker/hack/make/build-integration-test-binary create mode 100644 vendor/github.com/docker/docker/hack/make/build-rpm create mode 100755 vendor/github.com/docker/docker/hack/make/clean-apt-repo create mode 100755 vendor/github.com/docker/docker/hack/make/clean-yum-repo create mode 100644 vendor/github.com/docker/docker/hack/make/cover create mode 100644 vendor/github.com/docker/docker/hack/make/cross create mode 100644 vendor/github.com/docker/docker/hack/make/dynbinary create mode 100644 vendor/github.com/docker/docker/hack/make/dynbinary-client create mode 100644 vendor/github.com/docker/docker/hack/make/dynbinary-daemon create mode 100755 vendor/github.com/docker/docker/hack/make/generate-index-listing create mode 100644 vendor/github.com/docker/docker/hack/make/install-binary create mode 100644 vendor/github.com/docker/docker/hack/make/install-binary-client create mode 100644 vendor/github.com/docker/docker/hack/make/install-binary-daemon create mode 100644 vendor/github.com/docker/docker/hack/make/install-script create mode 100755 vendor/github.com/docker/docker/hack/make/release-deb create mode 100755 vendor/github.com/docker/docker/hack/make/release-rpm create mode 100644 vendor/github.com/docker/docker/hack/make/run create mode 100755 vendor/github.com/docker/docker/hack/make/sign-repos create mode 100755 vendor/github.com/docker/docker/hack/make/test-deb-install create mode 100644 vendor/github.com/docker/docker/hack/make/test-docker-py create mode 100755 vendor/github.com/docker/docker/hack/make/test-install-script create mode 100755 vendor/github.com/docker/docker/hack/make/test-integration-cli create mode 100644 vendor/github.com/docker/docker/hack/make/test-integration-shell create mode 100755 vendor/github.com/docker/docker/hack/make/test-old-apt-repo create mode 100644 vendor/github.com/docker/docker/hack/make/test-unit create mode 100644 vendor/github.com/docker/docker/hack/make/tgz create mode 100644 vendor/github.com/docker/docker/hack/make/ubuntu create mode 100755 vendor/github.com/docker/docker/hack/make/update-apt-repo create mode 100644 vendor/github.com/docker/docker/hack/make/win create mode 100755 vendor/github.com/docker/docker/hack/release.sh create mode 100644 vendor/github.com/docker/docker/hack/validate/.swagger-yamllint create mode 100644 vendor/github.com/docker/docker/hack/validate/.validate create mode 100755 vendor/github.com/docker/docker/hack/validate/all create mode 100755 vendor/github.com/docker/docker/hack/validate/compose-bindata create mode 100755 vendor/github.com/docker/docker/hack/validate/dco create mode 100755 vendor/github.com/docker/docker/hack/validate/default create mode 100755 vendor/github.com/docker/docker/hack/validate/default-seccomp create mode 100755 vendor/github.com/docker/docker/hack/validate/gofmt create mode 100755 vendor/github.com/docker/docker/hack/validate/lint create mode 100755 vendor/github.com/docker/docker/hack/validate/pkg-imports create mode 100755 vendor/github.com/docker/docker/hack/validate/swagger create mode 100755 vendor/github.com/docker/docker/hack/validate/swagger-gen create mode 100755 vendor/github.com/docker/docker/hack/validate/test-imports create mode 100755 vendor/github.com/docker/docker/hack/validate/toml create mode 100755 vendor/github.com/docker/docker/hack/validate/vendor create mode 100755 vendor/github.com/docker/docker/hack/validate/vet create mode 100755 vendor/github.com/docker/docker/hack/vendor.sh create mode 100644 vendor/github.com/docker/docker/image/fs.go create mode 100644 vendor/github.com/docker/docker/image/fs_test.go create mode 100644 vendor/github.com/docker/docker/image/image.go create mode 100644 vendor/github.com/docker/docker/image/image_test.go create mode 100644 vendor/github.com/docker/docker/image/rootfs.go create mode 100644 vendor/github.com/docker/docker/image/spec/v1.1.md create mode 100644 vendor/github.com/docker/docker/image/spec/v1.2.md create mode 100644 vendor/github.com/docker/docker/image/spec/v1.md create mode 100644 vendor/github.com/docker/docker/image/store.go create mode 100644 vendor/github.com/docker/docker/image/store_test.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/load.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/save.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/tarexport.go create mode 100644 vendor/github.com/docker/docker/image/v1/imagev1.go create mode 100644 vendor/github.com/docker/docker/image/v1/imagev1_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/benchmark_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/check_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_swarm.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_windows.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_test_vars.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/events_utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures.go create mode 100755 vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key create mode 100755 vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/localhost.cert create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/localhost.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/registry/cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/secrets/default create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures_linux_daemon.go create mode 100644 vendor/github.com/docker/docker/integration-cli/npipe.go create mode 100644 vendor/github.com/docker/docker/integration-cli/npipe_windows.go create mode 100644 vendor/github.com/docker/docker/integration-cli/registry.go create mode 100644 vendor/github.com/docker/docker/integration-cli/registry_mock.go create mode 100644 vendor/github.com/docker/docker/integration-cli/requirements.go create mode 100644 vendor/github.com/docker/docker/integration-cli/requirements_unix.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_exec.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_unix.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_windows.go create mode 100644 vendor/github.com/docker/docker/integration-cli/trust_server.go create mode 100644 vendor/github.com/docker/docker/integration-cli/utils.go create mode 100644 vendor/github.com/docker/docker/layer/empty.go create mode 100644 vendor/github.com/docker/docker/layer/empty_test.go create mode 100644 vendor/github.com/docker/docker/layer/filestore.go create mode 100644 vendor/github.com/docker/docker/layer/filestore_test.go create mode 100644 vendor/github.com/docker/docker/layer/layer.go create mode 100644 vendor/github.com/docker/docker/layer/layer_store.go create mode 100644 vendor/github.com/docker/docker/layer/layer_store_windows.go create mode 100644 vendor/github.com/docker/docker/layer/layer_test.go create mode 100644 vendor/github.com/docker/docker/layer/layer_unix.go create mode 100644 vendor/github.com/docker/docker/layer/layer_unix_test.go create mode 100644 vendor/github.com/docker/docker/layer/layer_windows.go create mode 100644 vendor/github.com/docker/docker/layer/migration.go create mode 100644 vendor/github.com/docker/docker/layer/migration_test.go create mode 100644 vendor/github.com/docker/docker/layer/mount_test.go create mode 100644 vendor/github.com/docker/docker/layer/mounted_layer.go create mode 100644 vendor/github.com/docker/docker/layer/ro_layer.go create mode 100644 vendor/github.com/docker/docker/layer/ro_layer_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/oom_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/oom_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/queue_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go create mode 100644 vendor/github.com/docker/docker/man/Dockerfile create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.5.md create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.aarch64 create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.armhf create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.ppc64le create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.s390x create mode 100644 vendor/github.com/docker/docker/man/README.md create mode 100644 vendor/github.com/docker/docker/man/docker-attach.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-build.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-commit.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-config-json.5.md create mode 100644 vendor/github.com/docker/docker/man/docker-cp.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-create.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-diff.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-events.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-exec.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-export.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-history.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-images.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-import.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-info.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-inspect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-kill.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-load.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-login.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-logout.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-logs.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-connect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-create.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-disconnect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-inspect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-ls.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-rm.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-pause.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-port.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-ps.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-pull.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-push.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-rename.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-restart.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-rm.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-rmi.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-run.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-save.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-search.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-start.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-stats.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-stop.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-tag.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-top.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-unpause.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-update.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-version.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-wait.1.md create mode 100644 vendor/github.com/docker/docker/man/docker.1.md create mode 100644 vendor/github.com/docker/docker/man/dockerd.8.md create mode 100644 vendor/github.com/docker/docker/man/generate.go create mode 100755 vendor/github.com/docker/docker/man/generate.sh create mode 100644 vendor/github.com/docker/docker/man/glide.lock create mode 100644 vendor/github.com/docker/docker/man/glide.yaml create mode 100755 vendor/github.com/docker/docker/man/md2man-all.sh create mode 100644 vendor/github.com/docker/docker/migrate/v1/migratev1.go create mode 100644 vendor/github.com/docker/docker/migrate/v1/migratev1_test.go create mode 100644 vendor/github.com/docker/docker/oci/defaults_linux.go create mode 100644 vendor/github.com/docker/docker/oci/defaults_solaris.go create mode 100644 vendor/github.com/docker/docker/oci/defaults_windows.go create mode 100644 vendor/github.com/docker/docker/oci/devices_linux.go create mode 100644 vendor/github.com/docker/docker/oci/devices_unsupported.go create mode 100644 vendor/github.com/docker/docker/oci/namespaces.go create mode 100644 vendor/github.com/docker/docker/opts/hosts.go create mode 100644 vendor/github.com/docker/docker/opts/hosts_test.go create mode 100644 vendor/github.com/docker/docker/opts/hosts_unix.go create mode 100644 vendor/github.com/docker/docker/opts/hosts_windows.go create mode 100644 vendor/github.com/docker/docker/opts/ip.go create mode 100644 vendor/github.com/docker/docker/opts/ip_test.go create mode 100644 vendor/github.com/docker/docker/opts/mount.go create mode 100644 vendor/github.com/docker/docker/opts/mount_test.go create mode 100644 vendor/github.com/docker/docker/opts/opts.go create mode 100644 vendor/github.com/docker/docker/opts/opts_test.go create mode 100644 vendor/github.com/docker/docker/opts/opts_unix.go create mode 100644 vendor/github.com/docker/docker/opts/opts_windows.go create mode 100644 vendor/github.com/docker/docker/opts/port.go create mode 100644 vendor/github.com/docker/docker/opts/port_test.go create mode 100644 vendor/github.com/docker/docker/opts/quotedstring.go create mode 100644 vendor/github.com/docker/docker/opts/quotedstring_test.go create mode 100644 vendor/github.com/docker/docker/opts/secret.go create mode 100644 vendor/github.com/docker/docker/opts/secret_test.go create mode 100644 vendor/github.com/docker/docker/pkg/README.md create mode 100644 vendor/github.com/docker/docker/pkg/aaparser/aaparser.go create mode 100644 vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/utils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap_test.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/api.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/authz.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/middleware.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/plugin.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/response.go create mode 100644 vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go create mode 100644 vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/log.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_test.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/README.md create mode 100644 vendor/github.com/docker/docker/pkg/discovery/backends.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/discovery.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/discovery_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/entry.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/file/file.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/file/file_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/generator.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/generator_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/kv/kv.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/memory/memory.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/filenotify.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/poller.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/poller_test.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/gitutils/gitutils.go create mode 100644 vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_test.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/mimetype.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/checker/checker.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/cmd/command.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/utils.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/utils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fmt.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/multireader.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go create mode 100644 vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/locker/README.md create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker.go create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker_test.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath_test.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/ioctl.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/loopback.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mount.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/parsers.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/parsers_test.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/platform.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_int8.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go create mode 100644 vendor/github.com/docker/docker/pkg/plugingetter/getter.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/client.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/client_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/errors.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugin_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/http.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/transport.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools_test.go create mode 100644 vendor/github.com/docker/docker/pkg/progress/progress.go create mode 100644 vendor/github.com/docker/docker/pkg/progress/progressreader.go create mode 100644 vendor/github.com/docker/docker/pkg/progress/progressreader_test.go create mode 100644 vendor/github.com/docker/docker/pkg/promise/promise.go create mode 100644 vendor/github.com/docker/docker/pkg/pubsub/publisher.go create mode 100644 vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go create mode 100644 vendor/github.com/docker/docker/pkg/random/random.go create mode 100644 vendor/github.com/docker/docker/pkg/random/random_test.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/README.md create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/reexec.go create mode 100644 vendor/github.com/docker/docker/pkg/registrar/registrar.go create mode 100644 vendor/github.com/docker/docker/pkg/registrar/registrar_test.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/README.md create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/trap.go create mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go create mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go create mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go create mode 100644 vendor/github.com/docker/docker/pkg/stringid/README.md create mode 100644 vendor/github.com/docker/docker/pkg/stringid/stringid.go create mode 100644 vendor/github.com/docker/docker/pkg/stringid/stringid_test.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/README.md create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE create mode 100644 vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD create mode 100644 vendor/github.com/docker/docker/pkg/symlink/README.md create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/README.md create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go create mode 100644 vendor/github.com/docker/docker/pkg/system/events_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/exitcode.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/tailfile/tailfile.go create mode 100644 vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/builder_context.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/json create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/versioning.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/writercloser.go create mode 100644 vendor/github.com/docker/docker/pkg/term/ascii.go create mode 100644 vendor/github.com/docker/docker/pkg/term/ascii_test.go create mode 100644 vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go create mode 100644 vendor/github.com/docker/docker/pkg/term/tc_other.go create mode 100644 vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/console.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/windows.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/testutil/assert/assert.go create mode 100644 vendor/github.com/docker/docker/pkg/testutil/pkg.go create mode 100644 vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go create mode 100644 vendor/github.com/docker/docker/pkg/truncindex/truncindex.go create mode 100644 vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go create mode 100644 vendor/github.com/docker/docker/pkg/urlutil/urlutil.go create mode 100644 vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go create mode 100644 vendor/github.com/docker/docker/pkg/useragent/README.md create mode 100644 vendor/github.com/docker/docker/pkg/useragent/useragent.go create mode 100644 vendor/github.com/docker/docker/pkg/useragent/useragent_test.go create mode 100644 vendor/github.com/docker/docker/plugin/backend_linux.go create mode 100644 vendor/github.com/docker/docker/plugin/backend_unsupported.go create mode 100644 vendor/github.com/docker/docker/plugin/blobstore.go create mode 100644 vendor/github.com/docker/docker/plugin/defs.go create mode 100644 vendor/github.com/docker/docker/plugin/manager.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_linux.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_solaris.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_windows.go create mode 100644 vendor/github.com/docker/docker/plugin/store.go create mode 100644 vendor/github.com/docker/docker/plugin/store_test.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin_linux.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/settable.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/settable_test.go create mode 100644 vendor/github.com/docker/docker/poule.yml create mode 100644 vendor/github.com/docker/docker/profiles/apparmor/apparmor.go create mode 100644 vendor/github.com/docker/docker/profiles/apparmor/template.go create mode 100755 vendor/github.com/docker/docker/profiles/seccomp/default.json create mode 100755 vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/generate.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/docker/docker/project/ARM.md create mode 100644 vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md create mode 120000 vendor/github.com/docker/docker/project/CONTRIBUTORS.md create mode 100644 vendor/github.com/docker/docker/project/GOVERNANCE.md create mode 100644 vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md create mode 100644 vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md create mode 100644 vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md create mode 100644 vendor/github.com/docker/docker/project/PACKAGERS.md create mode 100644 vendor/github.com/docker/docker/project/PATCH-RELEASES.md create mode 100644 vendor/github.com/docker/docker/project/PRINCIPLES.md create mode 100644 vendor/github.com/docker/docker/project/README.md create mode 100644 vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/docker/docker/project/RELEASE-PROCESS.md create mode 100644 vendor/github.com/docker/docker/project/REVIEWING.md create mode 100644 vendor/github.com/docker/docker/project/TOOLS.md create mode 100644 vendor/github.com/docker/docker/reference/reference.go create mode 100644 vendor/github.com/docker/docker/reference/reference_test.go create mode 100644 vendor/github.com/docker/docker/reference/store.go create mode 100644 vendor/github.com/docker/docker/reference/store_test.go create mode 100644 vendor/github.com/docker/docker/registry/auth.go create mode 100644 vendor/github.com/docker/docker/registry/auth_test.go create mode 100644 vendor/github.com/docker/docker/registry/config.go create mode 100644 vendor/github.com/docker/docker/registry/config_test.go create mode 100644 vendor/github.com/docker/docker/registry/config_unix.go create mode 100644 vendor/github.com/docker/docker/registry/config_windows.go create mode 100644 vendor/github.com/docker/docker/registry/endpoint_test.go create mode 100644 vendor/github.com/docker/docker/registry/endpoint_v1.go create mode 100644 vendor/github.com/docker/docker/registry/registry.go create mode 100644 vendor/github.com/docker/docker/registry/registry_mock_test.go create mode 100644 vendor/github.com/docker/docker/registry/registry_test.go create mode 100644 vendor/github.com/docker/docker/registry/service.go create mode 100644 vendor/github.com/docker/docker/registry/service_v1.go create mode 100644 vendor/github.com/docker/docker/registry/service_v1_test.go create mode 100644 vendor/github.com/docker/docker/registry/service_v2.go create mode 100644 vendor/github.com/docker/docker/registry/session.go create mode 100644 vendor/github.com/docker/docker/registry/types.go create mode 100644 vendor/github.com/docker/docker/restartmanager/restartmanager.go create mode 100644 vendor/github.com/docker/docker/restartmanager/restartmanager_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/compare.go create mode 100644 vendor/github.com/docker/docker/runconfig/compare_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/config.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_windows.go create mode 100644 vendor/github.com/docker/docker/runconfig/errors.go create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/envfile.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/envfile_test.go create mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env create mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16be.env create mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf8.env create mode 100644 vendor/github.com/docker/docker/runconfig/opts/fixtures/valid.env create mode 100644 vendor/github.com/docker/docker/runconfig/opts/fixtures/valid.label create mode 100644 vendor/github.com/docker/docker/runconfig/opts/opts.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/opts_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/parse.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/parse_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/runtime.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/throttledevice.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/ulimit.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/weightdevice.go create mode 100644 vendor/github.com/docker/docker/utils/debug.go create mode 100644 vendor/github.com/docker/docker/utils/debug_test.go create mode 100644 vendor/github.com/docker/docker/utils/names.go create mode 100644 vendor/github.com/docker/docker/utils/process_unix.go create mode 100644 vendor/github.com/docker/docker/utils/process_windows.go create mode 100644 vendor/github.com/docker/docker/utils/templates/templates.go create mode 100644 vendor/github.com/docker/docker/utils/templates/templates_test.go create mode 100644 vendor/github.com/docker/docker/utils/utils.go create mode 100644 vendor/github.com/docker/docker/utils/utils_test.go create mode 100644 vendor/github.com/docker/docker/vendor.conf create mode 100644 vendor/github.com/docker/docker/volume/drivers/adapter.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/extpoint.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/extpoint_test.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/proxy.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/proxy_test.go create mode 100644 vendor/github.com/docker/docker/volume/local/local.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_test.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_unix.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_windows.go create mode 100644 vendor/github.com/docker/docker/volume/store/db.go create mode 100644 vendor/github.com/docker/docker/volume/store/errors.go create mode 100644 vendor/github.com/docker/docker/volume/store/restore.go create mode 100644 vendor/github.com/docker/docker/volume/store/store.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_test.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_unix.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_windows.go create mode 100644 vendor/github.com/docker/docker/volume/testutils/testutils.go create mode 100644 vendor/github.com/docker/docker/volume/validate.go create mode 100644 vendor/github.com/docker/docker/volume/validate_test.go create mode 100644 vendor/github.com/docker/docker/volume/validate_test_unix.go create mode 100644 vendor/github.com/docker/docker/volume/validate_test_windows.go create mode 100644 vendor/github.com/docker/docker/volume/volume.go create mode 100644 vendor/github.com/docker/docker/volume/volume_copy.go create mode 100644 vendor/github.com/docker/docker/volume/volume_copy_unix.go create mode 100644 vendor/github.com/docker/docker/volume/volume_copy_windows.go create mode 100644 vendor/github.com/docker/docker/volume/volume_linux.go create mode 100644 vendor/github.com/docker/docker/volume/volume_linux_test.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_linux.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go create mode 100644 vendor/github.com/docker/docker/volume/volume_test.go create mode 100644 vendor/github.com/docker/docker/volume/volume_unix.go create mode 100644 vendor/github.com/docker/docker/volume/volume_unsupported.go create mode 100644 vendor/github.com/docker/docker/volume/volume_windows.go create mode 100644 vendor/github.com/docker/go-connections/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-connections/LICENSE create mode 100644 vendor/github.com/docker/go-connections/MAINTAINERS create mode 100644 vendor/github.com/docker/go-connections/README.md create mode 100644 vendor/github.com/docker/go-connections/circle.yml create mode 100644 vendor/github.com/docker/go-connections/doc.go create mode 100644 vendor/github.com/docker/go-connections/nat/nat.go create mode 100644 vendor/github.com/docker/go-connections/nat/nat_test.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse_test.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort_test.go create mode 100644 vendor/github.com/docker/go-connections/proxy/logger.go create mode 100644 vendor/github.com/docker/go-connections/proxy/network_proxy_test.go create mode 100644 vendor/github.com/docker/go-connections/proxy/proxy.go create mode 100644 vendor/github.com/docker/go-connections/proxy/stub_proxy.go create mode 100644 vendor/github.com/docker/go-connections/proxy/tcp_proxy.go create mode 100644 vendor/github.com/docker/go-connections/proxy/udp_proxy.go create mode 100644 vendor/github.com/docker/go-connections/sockets/README.md create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go create mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go create mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_test.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert.pem create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert_of_encrypted_key.pem create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/fixtures/encrypted_key.pem create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/fixtures/key.pem create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/fixtures/multi.pem create mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/MAINTAINERS create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/circle.yml create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/duration_test.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/size_test.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/docker/go-units/ulimit_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/.travis.yml create mode 100644 vendor/github.com/jessevdk/go-flags/LICENSE create mode 100644 vendor/github.com/jessevdk/go-flags/README.md create mode 100644 vendor/github.com/jessevdk/go-flags/arg.go create mode 100644 vendor/github.com/jessevdk/go-flags/arg_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/assert_test.go create mode 100755 vendor/github.com/jessevdk/go-flags/check_crosscompile.sh create mode 100644 vendor/github.com/jessevdk/go-flags/closest.go create mode 100644 vendor/github.com/jessevdk/go-flags/command.go create mode 100644 vendor/github.com/jessevdk/go-flags/command_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/completion.go create mode 100644 vendor/github.com/jessevdk/go-flags/completion_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/convert.go create mode 100644 vendor/github.com/jessevdk/go-flags/convert_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/error.go create mode 100644 vendor/github.com/jessevdk/go-flags/example_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/examples/add.go create mode 100644 vendor/github.com/jessevdk/go-flags/examples/bash-completion create mode 100644 vendor/github.com/jessevdk/go-flags/examples/main.go create mode 100644 vendor/github.com/jessevdk/go-flags/examples/rm.go create mode 100644 vendor/github.com/jessevdk/go-flags/flags.go create mode 100644 vendor/github.com/jessevdk/go-flags/group.go create mode 100644 vendor/github.com/jessevdk/go-flags/group_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/help.go create mode 100644 vendor/github.com/jessevdk/go-flags/help_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/ini.go create mode 100644 vendor/github.com/jessevdk/go-flags/ini_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/long_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/man.go create mode 100644 vendor/github.com/jessevdk/go-flags/marshal_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/multitag.go create mode 100644 vendor/github.com/jessevdk/go-flags/option.go create mode 100644 vendor/github.com/jessevdk/go-flags/options_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/optstyle_other.go create mode 100644 vendor/github.com/jessevdk/go-flags/optstyle_windows.go create mode 100644 vendor/github.com/jessevdk/go-flags/parser.go create mode 100644 vendor/github.com/jessevdk/go-flags/parser_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/pointer_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/short_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/tag_test.go create mode 100644 vendor/github.com/jessevdk/go-flags/termsize.go create mode 100644 vendor/github.com/jessevdk/go-flags/termsize_linux.go create mode 100644 vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go create mode 100644 vendor/github.com/jessevdk/go-flags/termsize_other.go create mode 100644 vendor/github.com/jessevdk/go-flags/termsize_unix.go create mode 100644 vendor/github.com/jessevdk/go-flags/unknown_test.go create mode 100644 vendor/github.com/moby/moby/.dockerignore create mode 100644 vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/moby/moby/.gitignore create mode 100644 vendor/github.com/moby/moby/.mailmap create mode 100644 vendor/github.com/moby/moby/AUTHORS create mode 100644 vendor/github.com/moby/moby/CHANGELOG.md create mode 100644 vendor/github.com/moby/moby/CONTRIBUTING.md create mode 100644 vendor/github.com/moby/moby/Dockerfile create mode 100644 vendor/github.com/moby/moby/Dockerfile.aarch64 create mode 100644 vendor/github.com/moby/moby/Dockerfile.armhf create mode 100644 vendor/github.com/moby/moby/Dockerfile.ppc64le create mode 100644 vendor/github.com/moby/moby/Dockerfile.s390x create mode 100644 vendor/github.com/moby/moby/Dockerfile.simple create mode 100644 vendor/github.com/moby/moby/Dockerfile.solaris create mode 100644 vendor/github.com/moby/moby/Dockerfile.windows create mode 100644 vendor/github.com/moby/moby/LICENSE create mode 100644 vendor/github.com/moby/moby/MAINTAINERS create mode 100644 vendor/github.com/moby/moby/Makefile create mode 100644 vendor/github.com/moby/moby/NOTICE create mode 100644 vendor/github.com/moby/moby/README.md create mode 100644 vendor/github.com/moby/moby/ROADMAP.md create mode 100644 vendor/github.com/moby/moby/VENDORING.md create mode 100644 vendor/github.com/moby/moby/VERSION create mode 100644 vendor/github.com/moby/moby/api/README.md create mode 100644 vendor/github.com/moby/moby/api/common.go create mode 100644 vendor/github.com/moby/moby/api/common_test.go create mode 100644 vendor/github.com/moby/moby/api/common_unix.go create mode 100644 vendor/github.com/moby/moby/api/common_windows.go create mode 100644 vendor/github.com/moby/moby/api/errors/errors.go create mode 100644 vendor/github.com/moby/moby/api/fixtures/keyfile create mode 100644 vendor/github.com/moby/moby/api/server/httputils/decoder.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/errors.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/form.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/form_test.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/httputils.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/httputils_write_json_go16.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/cors.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/debug.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/experimental.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/middleware.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/version.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/version_test.go create mode 100644 vendor/github.com/moby/moby/api/server/profiler.go create mode 100644 vendor/github.com/moby/moby/api/server/router/build/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/build/build.go create mode 100644 vendor/github.com/moby/moby/api/server/router/build/build_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go create mode 100644 vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/container.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/container_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/copy.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/exec.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/inspect.go create mode 100644 vendor/github.com/moby/moby/api/server/router/experimental.go create mode 100644 vendor/github.com/moby/moby/api/server/router/image/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/image/image.go create mode 100644 vendor/github.com/moby/moby/api/server/router/image/image_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/local.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/filter.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/network.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/network_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/plugin/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/plugin/plugin.go create mode 100644 vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/router.go create mode 100644 vendor/github.com/moby/moby/api/server/router/swarm/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/swarm/cluster.go create mode 100644 vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/system/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/system/system.go create mode 100644 vendor/github.com/moby/moby/api/server/router/system/system_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/volume/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/volume/volume.go create mode 100644 vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router_swapper.go create mode 100644 vendor/github.com/moby/moby/api/server/server.go create mode 100644 vendor/github.com/moby/moby/api/server/server_test.go create mode 100644 vendor/github.com/moby/moby/api/swagger-gen.yaml create mode 100644 vendor/github.com/moby/moby/api/swagger.yaml create mode 100644 vendor/github.com/moby/moby/api/templates/server/operation.gotmpl create mode 100644 vendor/github.com/moby/moby/api/types/auth.go create mode 100644 vendor/github.com/moby/moby/api/types/backend/backend.go create mode 100644 vendor/github.com/moby/moby/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/moby/moby/api/types/client.go create mode 100644 vendor/github.com/moby/moby/api/types/configs.go create mode 100644 vendor/github.com/moby/moby/api/types/container/config.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_create.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_update.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_wait.go create mode 100644 vendor/github.com/moby/moby/api/types/container/host_config.go create mode 100644 vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/moby/moby/api/types/error_response.go create mode 100644 vendor/github.com/moby/moby/api/types/events/events.go create mode 100644 vendor/github.com/moby/moby/api/types/filters/parse.go create mode 100644 vendor/github.com/moby/moby/api/types/filters/parse_test.go create mode 100644 vendor/github.com/moby/moby/api/types/id_response.go create mode 100644 vendor/github.com/moby/moby/api/types/image_summary.go create mode 100644 vendor/github.com/moby/moby/api/types/mount/mount.go create mode 100644 vendor/github.com/moby/moby/api/types/network/network.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_device.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_env.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_mount.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_responses.go create mode 100644 vendor/github.com/moby/moby/api/types/port.go create mode 100644 vendor/github.com/moby/moby/api/types/reference/image_reference.go create mode 100644 vendor/github.com/moby/moby/api/types/reference/image_reference_test.go create mode 100644 vendor/github.com/moby/moby/api/types/registry/authenticate.go create mode 100644 vendor/github.com/moby/moby/api/types/registry/registry.go create mode 100644 vendor/github.com/moby/moby/api/types/seccomp.go create mode 100644 vendor/github.com/moby/moby/api/types/service_update_response.go create mode 100644 vendor/github.com/moby/moby/api/types/stats.go create mode 100644 vendor/github.com/moby/moby/api/types/strslice/strslice.go create mode 100644 vendor/github.com/moby/moby/api/types/strslice/strslice_test.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/common.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/container.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/network.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/node.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/secret.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/service.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/swarm.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/task.go create mode 100644 vendor/github.com/moby/moby/api/types/time/duration_convert.go create mode 100644 vendor/github.com/moby/moby/api/types/time/duration_convert_test.go create mode 100644 vendor/github.com/moby/moby/api/types/time/timestamp.go create mode 100644 vendor/github.com/moby/moby/api/types/time/timestamp_test.go create mode 100644 vendor/github.com/moby/moby/api/types/types.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/README.md create mode 100644 vendor/github.com/moby/moby/api/types/versions/compare.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/compare_test.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/v1p19/types.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/v1p20/types.go create mode 100644 vendor/github.com/moby/moby/api/types/volume.go create mode 100644 vendor/github.com/moby/moby/api/types/volume/volumes_create.go create mode 100644 vendor/github.com/moby/moby/api/types/volume/volumes_list.go create mode 100644 vendor/github.com/moby/moby/builder/builder.go create mode 100644 vendor/github.com/moby/moby/builder/context.go create mode 100644 vendor/github.com/moby/moby/builder/context_test.go create mode 100644 vendor/github.com/moby/moby/builder/context_unix.go create mode 100644 vendor/github.com/moby/moby/builder/context_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/bflag.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/builder.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/command/command.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/envVarTest create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/utils.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/support.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/support_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/utils_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/wordsTest create mode 100644 vendor/github.com/moby/moby/builder/dockerignore.go create mode 100644 vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go create mode 100644 vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerignore_test.go create mode 100644 vendor/github.com/moby/moby/builder/git.go create mode 100644 vendor/github.com/moby/moby/builder/remote.go create mode 100644 vendor/github.com/moby/moby/builder/remote_test.go create mode 100644 vendor/github.com/moby/moby/builder/tarsum.go create mode 100644 vendor/github.com/moby/moby/builder/tarsum_test.go create mode 100644 vendor/github.com/moby/moby/builder/utils_test.go create mode 100644 vendor/github.com/moby/moby/cli/cobra.go create mode 100644 vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile.go create mode 100644 vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/checkpoint/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/checkpoint/create.go create mode 100644 vendor/github.com/moby/moby/cli/command/checkpoint/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/checkpoint/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/cli.go create mode 100644 vendor/github.com/moby/moby/cli/command/commands/commands.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/attach.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/commit.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/cp.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/create.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/diff.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/exec.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/exec_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/export.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/hijack.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/kill.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/logs.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/pause.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/port.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/prune.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/ps_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/rename.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/restart.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/rm.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/run.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/start.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/stats.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/stats_helpers.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/stats_unit_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/stop.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/top.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/tty.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/unpause.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/update.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/utils.go create mode 100644 vendor/github.com/moby/moby/cli/command/container/wait.go create mode 100644 vendor/github.com/moby/moby/cli/command/events_utils.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/container.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/container_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/custom.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/custom_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/disk_usage.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/formatter.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/image.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/image_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/network.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/network_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/reflect.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/reflect_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/service.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/stats.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/stats_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/volume.go create mode 100644 vendor/github.com/moby/moby/cli/command/formatter/volume_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/idresolver/idresolver.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/build.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/history.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/import.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/load.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/prune.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/pull.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/push.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/save.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/tag.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/trust.go create mode 100644 vendor/github.com/moby/moby/cli/command/image/trust_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/in.go create mode 100644 vendor/github.com/moby/moby/cli/command/inspect/inspector.go create mode 100644 vendor/github.com/moby/moby/cli/command/inspect/inspector_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/connect.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/create.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/disconnect.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/prune.go create mode 100644 vendor/github.com/moby/moby/cli/command/network/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/demote.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/opts.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/promote.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/ps.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/node/update.go create mode 100644 vendor/github.com/moby/moby/cli/command/out.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/create.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/disable.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/enable.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/install.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/push.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/set.go create mode 100644 vendor/github.com/moby/moby/cli/command/plugin/upgrade.go create mode 100644 vendor/github.com/moby/moby/cli/command/prune/prune.go create mode 100644 vendor/github.com/moby/moby/cli/command/registry.go create mode 100644 vendor/github.com/moby/moby/cli/command/registry/login.go create mode 100644 vendor/github.com/moby/moby/cli/command/registry/logout.go create mode 100644 vendor/github.com/moby/moby/cli/command/registry/search.go create mode 100644 vendor/github.com/moby/moby/cli/command/secret/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/secret/create.go create mode 100644 vendor/github.com/moby/moby/cli/command/secret/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/secret/ls.go create mode 100644 vendor/github.com/moby/moby/cli/command/secret/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/secret/utils.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/create.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/inspect_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/logs.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/opts.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/opts_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/parse.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/ps.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/scale.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/trust.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/update.go create mode 100644 vendor/github.com/moby/moby/cli/command/service/update_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/common.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/deploy.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/deploy_bundlefile.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/opts.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/ps.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/remove.go create mode 100644 vendor/github.com/moby/moby/cli/command/stack/services.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/init.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/join.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/join_token.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/leave.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/opts.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/opts_test.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/unlock.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/unlock_key.go create mode 100644 vendor/github.com/moby/moby/cli/command/swarm/update.go create mode 100644 vendor/github.com/moby/moby/cli/command/system/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/system/df.go create mode 100644 vendor/github.com/moby/moby/cli/command/system/events.go create mode 100644 vendor/github.com/moby/moby/cli/command/system/info.go create mode 100644 vendor/github.com/moby/moby/cli/command/system/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/system/prune.go create mode 100644 vendor/github.com/moby/moby/cli/command/system/version.go create mode 100644 vendor/github.com/moby/moby/cli/command/task/print.go create mode 100644 vendor/github.com/moby/moby/cli/command/trust.go create mode 100644 vendor/github.com/moby/moby/cli/command/utils.go create mode 100644 vendor/github.com/moby/moby/cli/command/volume/cmd.go create mode 100644 vendor/github.com/moby/moby/cli/command/volume/create.go create mode 100644 vendor/github.com/moby/moby/cli/command/volume/inspect.go create mode 100644 vendor/github.com/moby/moby/cli/command/volume/list.go create mode 100644 vendor/github.com/moby/moby/cli/command/volume/prune.go create mode 100644 vendor/github.com/moby/moby/cli/command/volume/remove.go create mode 100644 vendor/github.com/moby/moby/cli/compose/convert/compose.go create mode 100644 vendor/github.com/moby/moby/cli/compose/convert/compose_test.go create mode 100644 vendor/github.com/moby/moby/cli/compose/convert/service.go create mode 100644 vendor/github.com/moby/moby/cli/compose/convert/service_test.go create mode 100644 vendor/github.com/moby/moby/cli/compose/convert/volume.go create mode 100644 vendor/github.com/moby/moby/cli/compose/convert/volume_test.go create mode 100644 vendor/github.com/moby/moby/cli/compose/interpolation/interpolation.go create mode 100644 vendor/github.com/moby/moby/cli/compose/interpolation/interpolation_test.go create mode 100644 vendor/github.com/moby/moby/cli/compose/loader/example1.env create mode 100644 vendor/github.com/moby/moby/cli/compose/loader/example2.env create mode 100644 vendor/github.com/moby/moby/cli/compose/loader/full-example.yml create mode 100644 vendor/github.com/moby/moby/cli/compose/loader/loader.go create mode 100644 vendor/github.com/moby/moby/cli/compose/loader/loader_test.go create mode 100644 vendor/github.com/moby/moby/cli/compose/schema/bindata.go create mode 100644 vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.0.json create mode 100644 vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.1.json create mode 100644 vendor/github.com/moby/moby/cli/compose/schema/schema.go create mode 100644 vendor/github.com/moby/moby/cli/compose/schema/schema_test.go create mode 100644 vendor/github.com/moby/moby/cli/compose/template/template.go create mode 100644 vendor/github.com/moby/moby/cli/compose/template/template_test.go create mode 100644 vendor/github.com/moby/moby/cli/compose/types/types.go create mode 100644 vendor/github.com/moby/moby/cli/error.go create mode 100644 vendor/github.com/moby/moby/cli/flags/client.go create mode 100644 vendor/github.com/moby/moby/cli/flags/common.go create mode 100644 vendor/github.com/moby/moby/cli/flags/common_test.go create mode 100644 vendor/github.com/moby/moby/cli/required.go create mode 100644 vendor/github.com/moby/moby/cli/trust/trust.go create mode 100644 vendor/github.com/moby/moby/cliconfig/config.go create mode 100644 vendor/github.com/moby/moby/cliconfig/config_test.go create mode 100644 vendor/github.com/moby/moby/cliconfig/configfile/file.go create mode 100644 vendor/github.com/moby/moby/cliconfig/configfile/file_test.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/credentials.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/default_store.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/default_store_darwin.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/default_store_linux.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/default_store_windows.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/file_store.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/file_store_test.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/native_store.go create mode 100644 vendor/github.com/moby/moby/cliconfig/credentials/native_store_test.go create mode 100644 vendor/github.com/moby/moby/client/README.md create mode 100644 vendor/github.com/moby/moby/client/checkpoint_create.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_create_test.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_delete.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_delete_test.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_list.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_list_test.go create mode 100644 vendor/github.com/moby/moby/client/client.go create mode 100644 vendor/github.com/moby/moby/client/client_mock_test.go create mode 100644 vendor/github.com/moby/moby/client/client_test.go create mode 100644 vendor/github.com/moby/moby/client/client_unix.go create mode 100644 vendor/github.com/moby/moby/client/client_windows.go create mode 100644 vendor/github.com/moby/moby/client/container_attach.go create mode 100644 vendor/github.com/moby/moby/client/container_commit.go create mode 100644 vendor/github.com/moby/moby/client/container_commit_test.go create mode 100644 vendor/github.com/moby/moby/client/container_copy.go create mode 100644 vendor/github.com/moby/moby/client/container_copy_test.go create mode 100644 vendor/github.com/moby/moby/client/container_create.go create mode 100644 vendor/github.com/moby/moby/client/container_create_test.go create mode 100644 vendor/github.com/moby/moby/client/container_diff.go create mode 100644 vendor/github.com/moby/moby/client/container_diff_test.go create mode 100644 vendor/github.com/moby/moby/client/container_exec.go create mode 100644 vendor/github.com/moby/moby/client/container_exec_test.go create mode 100644 vendor/github.com/moby/moby/client/container_export.go create mode 100644 vendor/github.com/moby/moby/client/container_export_test.go create mode 100644 vendor/github.com/moby/moby/client/container_inspect.go create mode 100644 vendor/github.com/moby/moby/client/container_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/container_kill.go create mode 100644 vendor/github.com/moby/moby/client/container_kill_test.go create mode 100644 vendor/github.com/moby/moby/client/container_list.go create mode 100644 vendor/github.com/moby/moby/client/container_list_test.go create mode 100644 vendor/github.com/moby/moby/client/container_logs.go create mode 100644 vendor/github.com/moby/moby/client/container_logs_test.go create mode 100644 vendor/github.com/moby/moby/client/container_pause.go create mode 100644 vendor/github.com/moby/moby/client/container_pause_test.go create mode 100644 vendor/github.com/moby/moby/client/container_prune.go create mode 100644 vendor/github.com/moby/moby/client/container_remove.go create mode 100644 vendor/github.com/moby/moby/client/container_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/container_rename.go create mode 100644 vendor/github.com/moby/moby/client/container_rename_test.go create mode 100644 vendor/github.com/moby/moby/client/container_resize.go create mode 100644 vendor/github.com/moby/moby/client/container_resize_test.go create mode 100644 vendor/github.com/moby/moby/client/container_restart.go create mode 100644 vendor/github.com/moby/moby/client/container_restart_test.go create mode 100644 vendor/github.com/moby/moby/client/container_start.go create mode 100644 vendor/github.com/moby/moby/client/container_start_test.go create mode 100644 vendor/github.com/moby/moby/client/container_stats.go create mode 100644 vendor/github.com/moby/moby/client/container_stats_test.go create mode 100644 vendor/github.com/moby/moby/client/container_stop.go create mode 100644 vendor/github.com/moby/moby/client/container_stop_test.go create mode 100644 vendor/github.com/moby/moby/client/container_top.go create mode 100644 vendor/github.com/moby/moby/client/container_top_test.go create mode 100644 vendor/github.com/moby/moby/client/container_unpause.go create mode 100644 vendor/github.com/moby/moby/client/container_unpause_test.go create mode 100644 vendor/github.com/moby/moby/client/container_update.go create mode 100644 vendor/github.com/moby/moby/client/container_update_test.go create mode 100644 vendor/github.com/moby/moby/client/container_wait.go create mode 100644 vendor/github.com/moby/moby/client/container_wait_test.go create mode 100644 vendor/github.com/moby/moby/client/disk_usage.go create mode 100644 vendor/github.com/moby/moby/client/errors.go create mode 100644 vendor/github.com/moby/moby/client/events.go create mode 100644 vendor/github.com/moby/moby/client/events_test.go create mode 100644 vendor/github.com/moby/moby/client/hijack.go create mode 100644 vendor/github.com/moby/moby/client/image_build.go create mode 100644 vendor/github.com/moby/moby/client/image_build_test.go create mode 100644 vendor/github.com/moby/moby/client/image_create.go create mode 100644 vendor/github.com/moby/moby/client/image_create_test.go create mode 100644 vendor/github.com/moby/moby/client/image_history.go create mode 100644 vendor/github.com/moby/moby/client/image_history_test.go create mode 100644 vendor/github.com/moby/moby/client/image_import.go create mode 100644 vendor/github.com/moby/moby/client/image_import_test.go create mode 100644 vendor/github.com/moby/moby/client/image_inspect.go create mode 100644 vendor/github.com/moby/moby/client/image_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/image_list.go create mode 100644 vendor/github.com/moby/moby/client/image_list_test.go create mode 100644 vendor/github.com/moby/moby/client/image_load.go create mode 100644 vendor/github.com/moby/moby/client/image_load_test.go create mode 100644 vendor/github.com/moby/moby/client/image_prune.go create mode 100644 vendor/github.com/moby/moby/client/image_pull.go create mode 100644 vendor/github.com/moby/moby/client/image_pull_test.go create mode 100644 vendor/github.com/moby/moby/client/image_push.go create mode 100644 vendor/github.com/moby/moby/client/image_push_test.go create mode 100644 vendor/github.com/moby/moby/client/image_remove.go create mode 100644 vendor/github.com/moby/moby/client/image_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/image_save.go create mode 100644 vendor/github.com/moby/moby/client/image_save_test.go create mode 100644 vendor/github.com/moby/moby/client/image_search.go create mode 100644 vendor/github.com/moby/moby/client/image_search_test.go create mode 100644 vendor/github.com/moby/moby/client/image_tag.go create mode 100644 vendor/github.com/moby/moby/client/image_tag_test.go create mode 100644 vendor/github.com/moby/moby/client/info.go create mode 100644 vendor/github.com/moby/moby/client/info_test.go create mode 100644 vendor/github.com/moby/moby/client/interface.go create mode 100644 vendor/github.com/moby/moby/client/interface_experimental.go create mode 100644 vendor/github.com/moby/moby/client/interface_stable.go create mode 100644 vendor/github.com/moby/moby/client/login.go create mode 100644 vendor/github.com/moby/moby/client/network_connect.go create mode 100644 vendor/github.com/moby/moby/client/network_connect_test.go create mode 100644 vendor/github.com/moby/moby/client/network_create.go create mode 100644 vendor/github.com/moby/moby/client/network_create_test.go create mode 100644 vendor/github.com/moby/moby/client/network_disconnect.go create mode 100644 vendor/github.com/moby/moby/client/network_disconnect_test.go create mode 100644 vendor/github.com/moby/moby/client/network_inspect.go create mode 100644 vendor/github.com/moby/moby/client/network_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/network_list.go create mode 100644 vendor/github.com/moby/moby/client/network_list_test.go create mode 100644 vendor/github.com/moby/moby/client/network_prune.go create mode 100644 vendor/github.com/moby/moby/client/network_remove.go create mode 100644 vendor/github.com/moby/moby/client/network_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/node_inspect.go create mode 100644 vendor/github.com/moby/moby/client/node_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/node_list.go create mode 100644 vendor/github.com/moby/moby/client/node_list_test.go create mode 100644 vendor/github.com/moby/moby/client/node_remove.go create mode 100644 vendor/github.com/moby/moby/client/node_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/node_update.go create mode 100644 vendor/github.com/moby/moby/client/node_update_test.go create mode 100644 vendor/github.com/moby/moby/client/ping.go create mode 100644 vendor/github.com/moby/moby/client/plugin_create.go create mode 100644 vendor/github.com/moby/moby/client/plugin_disable.go create mode 100644 vendor/github.com/moby/moby/client/plugin_disable_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_enable.go create mode 100644 vendor/github.com/moby/moby/client/plugin_enable_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_inspect.go create mode 100644 vendor/github.com/moby/moby/client/plugin_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_install.go create mode 100644 vendor/github.com/moby/moby/client/plugin_list.go create mode 100644 vendor/github.com/moby/moby/client/plugin_list_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_push.go create mode 100644 vendor/github.com/moby/moby/client/plugin_push_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_remove.go create mode 100644 vendor/github.com/moby/moby/client/plugin_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_set.go create mode 100644 vendor/github.com/moby/moby/client/plugin_set_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_upgrade.go create mode 100644 vendor/github.com/moby/moby/client/request.go create mode 100644 vendor/github.com/moby/moby/client/request_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_create.go create mode 100644 vendor/github.com/moby/moby/client/secret_create_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_inspect.go create mode 100644 vendor/github.com/moby/moby/client/secret_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_list.go create mode 100644 vendor/github.com/moby/moby/client/secret_list_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_remove.go create mode 100644 vendor/github.com/moby/moby/client/secret_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_update.go create mode 100644 vendor/github.com/moby/moby/client/secret_update_test.go create mode 100644 vendor/github.com/moby/moby/client/service_create.go create mode 100644 vendor/github.com/moby/moby/client/service_create_test.go create mode 100644 vendor/github.com/moby/moby/client/service_inspect.go create mode 100644 vendor/github.com/moby/moby/client/service_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/service_list.go create mode 100644 vendor/github.com/moby/moby/client/service_list_test.go create mode 100644 vendor/github.com/moby/moby/client/service_logs.go create mode 100644 vendor/github.com/moby/moby/client/service_logs_test.go create mode 100644 vendor/github.com/moby/moby/client/service_remove.go create mode 100644 vendor/github.com/moby/moby/client/service_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/service_update.go create mode 100644 vendor/github.com/moby/moby/client/service_update_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/moby/moby/client/swarm_init.go create mode 100644 vendor/github.com/moby/moby/client/swarm_init_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_inspect.go create mode 100644 vendor/github.com/moby/moby/client/swarm_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_join.go create mode 100644 vendor/github.com/moby/moby/client/swarm_join_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_leave.go create mode 100644 vendor/github.com/moby/moby/client/swarm_leave_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_unlock.go create mode 100644 vendor/github.com/moby/moby/client/swarm_update.go create mode 100644 vendor/github.com/moby/moby/client/swarm_update_test.go create mode 100644 vendor/github.com/moby/moby/client/task_inspect.go create mode 100644 vendor/github.com/moby/moby/client/task_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/task_list.go create mode 100644 vendor/github.com/moby/moby/client/task_list_test.go create mode 100644 vendor/github.com/moby/moby/client/testdata/ca.pem create mode 100644 vendor/github.com/moby/moby/client/testdata/cert.pem create mode 100644 vendor/github.com/moby/moby/client/testdata/key.pem create mode 100644 vendor/github.com/moby/moby/client/transport.go create mode 100644 vendor/github.com/moby/moby/client/utils.go create mode 100644 vendor/github.com/moby/moby/client/version.go create mode 100644 vendor/github.com/moby/moby/client/volume_create.go create mode 100644 vendor/github.com/moby/moby/client/volume_create_test.go create mode 100644 vendor/github.com/moby/moby/client/volume_inspect.go create mode 100644 vendor/github.com/moby/moby/client/volume_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/volume_list.go create mode 100644 vendor/github.com/moby/moby/client/volume_list_test.go create mode 100644 vendor/github.com/moby/moby/client/volume_prune.go create mode 100644 vendor/github.com/moby/moby/client/volume_remove.go create mode 100644 vendor/github.com/moby/moby/client/volume_remove_test.go create mode 100644 vendor/github.com/moby/moby/cmd/docker/daemon_none.go create mode 100644 vendor/github.com/moby/moby/cmd/docker/daemon_none_test.go create mode 100644 vendor/github.com/moby/moby/cmd/docker/daemon_unit_test.go create mode 100644 vendor/github.com/moby/moby/cmd/docker/daemon_unix.go create mode 100644 vendor/github.com/moby/moby/cmd/docker/docker.go create mode 100644 vendor/github.com/moby/moby/cmd/docker/docker_test.go create mode 100644 vendor/github.com/moby/moby/cmd/docker/docker_windows.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/README.md create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/docker.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/metrics.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/service_windows.go create mode 100644 vendor/github.com/moby/moby/container/archive.go create mode 100644 vendor/github.com/moby/moby/container/container.go create mode 100644 vendor/github.com/moby/moby/container/container_linux.go create mode 100644 vendor/github.com/moby/moby/container/container_notlinux.go create mode 100644 vendor/github.com/moby/moby/container/container_unit_test.go create mode 100644 vendor/github.com/moby/moby/container/container_unix.go create mode 100644 vendor/github.com/moby/moby/container/container_windows.go create mode 100644 vendor/github.com/moby/moby/container/health.go create mode 100644 vendor/github.com/moby/moby/container/history.go create mode 100644 vendor/github.com/moby/moby/container/memory_store.go create mode 100644 vendor/github.com/moby/moby/container/memory_store_test.go create mode 100644 vendor/github.com/moby/moby/container/monitor.go create mode 100644 vendor/github.com/moby/moby/container/mounts_unix.go create mode 100644 vendor/github.com/moby/moby/container/mounts_windows.go create mode 100644 vendor/github.com/moby/moby/container/state.go create mode 100644 vendor/github.com/moby/moby/container/state_solaris.go create mode 100644 vendor/github.com/moby/moby/container/state_test.go create mode 100644 vendor/github.com/moby/moby/container/state_unix.go create mode 100644 vendor/github.com/moby/moby/container/state_windows.go create mode 100644 vendor/github.com/moby/moby/container/store.go create mode 100644 vendor/github.com/moby/moby/container/stream/streams.go create mode 100644 vendor/github.com/moby/moby/contrib/README.md create mode 100644 vendor/github.com/moby/moby/contrib/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/apparmor/main.go create mode 100644 vendor/github.com/moby/moby/contrib/apparmor/template.go create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/check-config.sh create mode 100644 vendor/github.com/moby/moby/contrib/completion/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/completion/bash/docker create mode 100644 vendor/github.com/moby/moby/contrib/completion/fish/docker.fish create mode 100644 vendor/github.com/moby/moby/contrib/completion/powershell/readme.txt create mode 100644 vendor/github.com/moby/moby/contrib/completion/zsh/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/completion/zsh/_docker create mode 100644 vendor/github.com/moby/moby/contrib/desktop-integration/README.md create mode 100644 vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/docker-device-tool/README.md create mode 100644 vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go create mode 100644 vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go create mode 100755 vendor/github.com/moby/moby/contrib/dockerize-disk.sh create mode 100755 vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh create mode 100755 vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh create mode 100644 vendor/github.com/moby/moby/contrib/editorconfig create mode 100644 vendor/github.com/moby/moby/contrib/gitdm/aliases create mode 100644 vendor/github.com/moby/moby/contrib/gitdm/domain-map create mode 100755 vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh create mode 100644 vendor/github.com/moby/moby/contrib/gitdm/gitdm.config create mode 100644 vendor/github.com/moby/moby/contrib/httpserver/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris create mode 100644 vendor/github.com/moby/moby/contrib/httpserver/server.go create mode 100644 vendor/github.com/moby/moby/contrib/init/openrc/docker.confd create mode 100644 vendor/github.com/moby/moby/contrib/init/openrc/docker.initd create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/docker.service create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/docker.socket create mode 100755 vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker create mode 100644 vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default create mode 100755 vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker create mode 100644 vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/init/upstart/docker.conf create mode 100755 vendor/github.com/moby/moby/contrib/mac-install-bundle.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-alpine.sh create mode 100644 vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-arch.sh create mode 100644 vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-busybox.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-crux.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-debootstrap.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-pld.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-rinse.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-yum.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/busybox-static create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/debootstrap create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/rinse create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/solaris create mode 100644 vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c create mode 100755 vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh create mode 100755 vendor/github.com/moby/moby/contrib/project-stats.sh create mode 100755 vendor/github.com/moby/moby/contrib/report-issue.sh create mode 100755 vendor/github.com/moby/moby/contrib/reprepro/suites.sh create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz create mode 100644 vendor/github.com/moby/moby/contrib/syntax/nano/Dockerfile.nanorc create mode 100644 vendor/github.com/moby/moby/contrib/syntax/nano/README.md create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/README.md create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/README.md create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/acct.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/appletalk.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/exit32.s create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/ns.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/raw.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/setgid.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/setuid.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/socket.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/userns.c create mode 100644 vendor/github.com/moby/moby/contrib/udev/80-docker.rules create mode 100644 vendor/github.com/moby/moby/contrib/vagrant-docker/README.md create mode 100644 vendor/github.com/moby/moby/daemon/apparmor_default.go create mode 100644 vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/archive.go create mode 100644 vendor/github.com/moby/moby/daemon/archive_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/archive_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/attach.go create mode 100644 vendor/github.com/moby/moby/daemon/auth.go create mode 100644 vendor/github.com/moby/moby/daemon/bindmount_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/bindmount_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/cache.go create mode 100644 vendor/github.com/moby/moby/daemon/caps/utils_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/changes.go create mode 100644 vendor/github.com/moby/moby/daemon/checkpoint.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/cluster.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/container.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/network.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/node.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/secret.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/service.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/task.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/backend.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/filters.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/helpers.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/provider/network.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/secrets.go create mode 100644 vendor/github.com/moby/moby/daemon/commit.go create mode 100644 vendor/github.com/moby/moby/daemon/config.go create mode 100644 vendor/github.com/moby/moby/daemon/config_common_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/config_experimental.go create mode 100644 vendor/github.com/moby/moby/daemon/config_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/config_test.go create mode 100644 vendor/github.com/moby/moby/daemon/config_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/config_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/config_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/config_windows_test.go create mode 100644 vendor/github.com/moby/moby/daemon/container.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/create.go create mode 100644 vendor/github.com/moby/moby/daemon/create_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/create_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_experimental.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_linux_test.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_test.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/debugtrap.go create mode 100644 vendor/github.com/moby/moby/daemon/debugtrap_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/debugtrap_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/delete.go create mode 100644 vendor/github.com/moby/moby/daemon/delete_test.go create mode 100644 vendor/github.com/moby/moby/daemon/discovery.go create mode 100644 vendor/github.com/moby/moby/daemon/discovery_test.go create mode 100644 vendor/github.com/moby/moby/daemon/disk_usage.go create mode 100644 vendor/github.com/moby/moby/daemon/errors.go create mode 100644 vendor/github.com/moby/moby/daemon/events.go create mode 100644 vendor/github.com/moby/moby/daemon/events/events.go create mode 100644 vendor/github.com/moby/moby/daemon/events/events_test.go create mode 100644 vendor/github.com/moby/moby/daemon/events/filter.go create mode 100644 vendor/github.com/moby/moby/daemon/events/metrics.go create mode 100644 vendor/github.com/moby/moby/daemon/events/testutils/testutils.go create mode 100644 vendor/github.com/moby/moby/daemon/events_test.go create mode 100644 vendor/github.com/moby/moby/daemon/exec.go create mode 100644 vendor/github.com/moby/moby/daemon/exec/exec.go create mode 100644 vendor/github.com/moby/moby/daemon/exec_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/exec_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/exec_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/export.go create mode 100644 vendor/github.com/moby/moby/daemon/getsize_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/counter.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/plugin.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/proxy.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/health.go create mode 100644 vendor/github.com/moby/moby/daemon/health_test.go create mode 100644 vendor/github.com/moby/moby/daemon/image.go create mode 100644 vendor/github.com/moby/moby/daemon/image_delete.go create mode 100644 vendor/github.com/moby/moby/daemon/image_exporter.go create mode 100644 vendor/github.com/moby/moby/daemon/image_history.go create mode 100644 vendor/github.com/moby/moby/daemon/image_inspect.go create mode 100644 vendor/github.com/moby/moby/daemon/image_pull.go create mode 100644 vendor/github.com/moby/moby/daemon/image_push.go create mode 100644 vendor/github.com/moby/moby/daemon/image_tag.go create mode 100644 vendor/github.com/moby/moby/daemon/images.go create mode 100644 vendor/github.com/moby/moby/daemon/import.go create mode 100644 vendor/github.com/moby/moby/daemon/info.go create mode 100644 vendor/github.com/moby/moby/daemon/info_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/info_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/keys.go create mode 100644 vendor/github.com/moby/moby/daemon/keys_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/kill.go create mode 100644 vendor/github.com/moby/moby/daemon/links.go create mode 100644 vendor/github.com/moby/moby/daemon/links/links.go create mode 100644 vendor/github.com/moby/moby/daemon/links/links_test.go create mode 100644 vendor/github.com/moby/moby/daemon/links_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/links_linux_test.go create mode 100644 vendor/github.com/moby/moby/daemon/links_notlinux.go create mode 100644 vendor/github.com/moby/moby/daemon/list.go create mode 100644 vendor/github.com/moby/moby/daemon/list_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/list_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/logdrivers_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/logdrivers_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/context.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/copier.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/copier_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/factory.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/journald.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read_native.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/logger.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/logger_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logs.go create mode 100644 vendor/github.com/moby/moby/daemon/logs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/metrics.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/mounts.go create mode 100644 vendor/github.com/moby/moby/daemon/names.go create mode 100644 vendor/github.com/moby/moby/daemon/network.go create mode 100644 vendor/github.com/moby/moby/daemon/network/settings.go create mode 100644 vendor/github.com/moby/moby/daemon/oci_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/oci_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/oci_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/pause.go create mode 100644 vendor/github.com/moby/moby/daemon/prune.go create mode 100644 vendor/github.com/moby/moby/daemon/rename.go create mode 100644 vendor/github.com/moby/moby/daemon/resize.go create mode 100644 vendor/github.com/moby/moby/daemon/restart.go create mode 100644 vendor/github.com/moby/moby/daemon/search.go create mode 100644 vendor/github.com/moby/moby/daemon/search_test.go create mode 100644 vendor/github.com/moby/moby/daemon/seccomp_disabled.go create mode 100644 vendor/github.com/moby/moby/daemon/seccomp_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/seccomp_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/secrets.go create mode 100644 vendor/github.com/moby/moby/daemon/secrets_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/secrets_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/selinux_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/selinux_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/start.go create mode 100644 vendor/github.com/moby/moby/daemon/start_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/start_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/stats.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_collector.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_collector_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_collector_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_collector_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/stop.go create mode 100644 vendor/github.com/moby/moby/daemon/top_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/top_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/top_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/unpause.go create mode 100644 vendor/github.com/moby/moby/daemon/update.go create mode 100644 vendor/github.com/moby/moby/daemon/update_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/update_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/update_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_unit_test.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/wait.go create mode 100644 vendor/github.com/moby/moby/daemon/workdir.go create mode 100644 vendor/github.com/moby/moby/distribution/config.go create mode 100644 vendor/github.com/moby/moby/distribution/errors.go create mode 100644 vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest create mode 100644 vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest create mode 100644 vendor/github.com/moby/moby/distribution/metadata/metadata.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go create mode 100644 vendor/github.com/moby/moby/distribution/pull.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v1.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2_test.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2_unix.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2_windows.go create mode 100644 vendor/github.com/moby/moby/distribution/push.go create mode 100644 vendor/github.com/moby/moby/distribution/push_v1.go create mode 100644 vendor/github.com/moby/moby/distribution/push_v2.go create mode 100644 vendor/github.com/moby/moby/distribution/push_v2_test.go create mode 100644 vendor/github.com/moby/moby/distribution/registry.go create mode 100644 vendor/github.com/moby/moby/distribution/registry_unit_test.go create mode 100644 vendor/github.com/moby/moby/distribution/utils/progress.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/download.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/download_test.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/transfer.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/transfer_test.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/upload.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/upload_test.go create mode 100644 vendor/github.com/moby/moby/dockerversion/useragent.go create mode 100644 vendor/github.com/moby/moby/dockerversion/version_lib.go create mode 100644 vendor/github.com/moby/moby/docs/README.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.18.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.19.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.20.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.21.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.22.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.23.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.24.md create mode 100644 vendor/github.com/moby/moby/docs/api/version-history.md create mode 100644 vendor/github.com/moby/moby/docs/deprecated.md create mode 100644 vendor/github.com/moby/moby/docs/extend/EBS_volume.md create mode 100644 vendor/github.com/moby/moby/docs/extend/config.md create mode 100644 vendor/github.com/moby/moby/docs/extend/images/authz_additional_info.png create mode 100644 vendor/github.com/moby/moby/docs/extend/images/authz_allow.png create mode 100644 vendor/github.com/moby/moby/docs/extend/images/authz_chunked.png create mode 100644 vendor/github.com/moby/moby/docs/extend/images/authz_connection_hijack.png create mode 100644 vendor/github.com/moby/moby/docs/extend/images/authz_deny.png create mode 100644 vendor/github.com/moby/moby/docs/extend/index.md create mode 100644 vendor/github.com/moby/moby/docs/extend/legacy_plugins.md create mode 100644 vendor/github.com/moby/moby/docs/extend/plugin_api.md create mode 100644 vendor/github.com/moby/moby/docs/extend/plugins_authorization.md create mode 100644 vendor/github.com/moby/moby/docs/extend/plugins_graphdriver.md create mode 100644 vendor/github.com/moby/moby/docs/extend/plugins_network.md create mode 100644 vendor/github.com/moby/moby/docs/extend/plugins_volume.md create mode 100644 vendor/github.com/moby/moby/docs/reference/builder.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/attach.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/build.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/cli.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/commit.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/container.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/container_prune.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/cp.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/create.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/deploy.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/diff.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/dockerd.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/events.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/exec.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/export.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/history.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/image.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/image_prune.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/images.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/import.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/index.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/info.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/inspect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/kill.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/load.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/login.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/logout.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/logs.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network_connect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network_create.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network_disconnect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network_inspect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network_ls.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network_prune.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/network_rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node_demote.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node_inspect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node_ls.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node_promote.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node_ps.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node_rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/node_update.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/pause.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_create.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_disable.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_enable.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_inspect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_install.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_ls.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_push.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_set.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/plugin_upgrade.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/port.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/ps.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/pull.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/push.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/rename.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/restart.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/rmi.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/run.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/save.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/search.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/secret.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/secret_create.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/secret_inspect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/secret_ls.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/secret_rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_create.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_inspect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_logs.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_ls.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_ps.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_scale.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/service_update.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stack.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stack_deploy.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stack_ls.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stack_ps.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stack_rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stack_services.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/start.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stats.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/stop.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm_init.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm_join.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm_join_token.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm_leave.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock_key.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/swarm_update.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/system.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/system_df.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/system_prune.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/tag.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/top.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/unpause.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/update.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/version.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/volume.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/volume_create.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/volume_inspect.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/volume_ls.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/volume_prune.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/volume_rm.md create mode 100644 vendor/github.com/moby/moby/docs/reference/commandline/wait.md create mode 100644 vendor/github.com/moby/moby/docs/reference/glossary.md create mode 100644 vendor/github.com/moby/moby/docs/reference/index.md create mode 100644 vendor/github.com/moby/moby/docs/reference/run.md create mode 100644 vendor/github.com/moby/moby/docs/static_files/contributors.png create mode 100644 vendor/github.com/moby/moby/docs/static_files/docker-logo-compressed.png create mode 100644 vendor/github.com/moby/moby/docs/yaml/Dockerfile create mode 100644 vendor/github.com/moby/moby/docs/yaml/generate.go create mode 100644 vendor/github.com/moby/moby/docs/yaml/yaml.go create mode 100644 vendor/github.com/moby/moby/experimental/README.md create mode 100644 vendor/github.com/moby/moby/experimental/checkpoint-restore.md create mode 100644 vendor/github.com/moby/moby/experimental/docker-stacks-and-bundles.md create mode 100644 vendor/github.com/moby/moby/experimental/images/ipvlan-l3.gliffy create mode 100644 vendor/github.com/moby/moby/experimental/images/ipvlan-l3.png create mode 100644 vendor/github.com/moby/moby/experimental/images/ipvlan-l3.svg create mode 100644 vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.gliffy create mode 100644 vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.png create mode 100644 vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.svg create mode 100644 vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.gliffy create mode 100644 vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.png create mode 100644 vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.svg create mode 100644 vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.gliffy create mode 100644 vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.png create mode 100644 vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.svg create mode 100644 vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.gliffy create mode 100644 vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.png create mode 100644 vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.svg create mode 100644 vendor/github.com/moby/moby/experimental/vlan-networks.md create mode 100644 vendor/github.com/moby/moby/hack/Jenkins/W2L/postbuild.sh create mode 100644 vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh create mode 100644 vendor/github.com/moby/moby/hack/Jenkins/readme.md create mode 100755 vendor/github.com/moby/moby/hack/dind create mode 100755 vendor/github.com/moby/moby/hack/dockerfile/binaries-commits create mode 100755 vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh create mode 100755 vendor/github.com/moby/moby/hack/generate-authors.sh create mode 100755 vendor/github.com/moby/moby/hack/generate-swagger-api.sh create mode 100644 vendor/github.com/moby/moby/hack/install.sh create mode 100644 vendor/github.com/moby/moby/hack/make.ps1 create mode 100755 vendor/github.com/moby/moby/hack/make.sh create mode 100644 vendor/github.com/moby/moby/hack/make/.binary create mode 100644 vendor/github.com/moby/moby/hack/make/.binary-setup create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/compat create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/control create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docs create mode 100755 vendor/github.com/moby/moby/hack/make/.build-deb/rules create mode 100644 vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec create mode 100644 vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec create mode 100644 vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch create mode 100644 vendor/github.com/moby/moby/hack/make/.ensure-emptyfs create mode 100644 vendor/github.com/moby/moby/hack/make/.go-autogen create mode 100644 vendor/github.com/moby/moby/hack/make/.go-autogen.ps1 create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-daemon-setup create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-daemon-start create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-daemon-stop create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-test-helpers create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.ico create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.png create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.rc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/dockerd.rc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/event_messages.mc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/resources.go create mode 100644 vendor/github.com/moby/moby/hack/make/README.md create mode 100644 vendor/github.com/moby/moby/hack/make/binary create mode 100644 vendor/github.com/moby/moby/hack/make/binary-client create mode 100644 vendor/github.com/moby/moby/hack/make/binary-daemon create mode 100644 vendor/github.com/moby/moby/hack/make/build-deb create mode 100644 vendor/github.com/moby/moby/hack/make/build-integration-test-binary create mode 100644 vendor/github.com/moby/moby/hack/make/build-rpm create mode 100755 vendor/github.com/moby/moby/hack/make/clean-apt-repo create mode 100755 vendor/github.com/moby/moby/hack/make/clean-yum-repo create mode 100644 vendor/github.com/moby/moby/hack/make/cover create mode 100644 vendor/github.com/moby/moby/hack/make/cross create mode 100644 vendor/github.com/moby/moby/hack/make/dynbinary create mode 100644 vendor/github.com/moby/moby/hack/make/dynbinary-client create mode 100644 vendor/github.com/moby/moby/hack/make/dynbinary-daemon create mode 100755 vendor/github.com/moby/moby/hack/make/generate-index-listing create mode 100644 vendor/github.com/moby/moby/hack/make/install-binary create mode 100644 vendor/github.com/moby/moby/hack/make/install-binary-client create mode 100644 vendor/github.com/moby/moby/hack/make/install-binary-daemon create mode 100644 vendor/github.com/moby/moby/hack/make/install-script create mode 100755 vendor/github.com/moby/moby/hack/make/release-deb create mode 100755 vendor/github.com/moby/moby/hack/make/release-rpm create mode 100644 vendor/github.com/moby/moby/hack/make/run create mode 100755 vendor/github.com/moby/moby/hack/make/sign-repos create mode 100755 vendor/github.com/moby/moby/hack/make/test-deb-install create mode 100644 vendor/github.com/moby/moby/hack/make/test-docker-py create mode 100755 vendor/github.com/moby/moby/hack/make/test-install-script create mode 100755 vendor/github.com/moby/moby/hack/make/test-integration-cli create mode 100644 vendor/github.com/moby/moby/hack/make/test-integration-shell create mode 100755 vendor/github.com/moby/moby/hack/make/test-old-apt-repo create mode 100644 vendor/github.com/moby/moby/hack/make/test-unit create mode 100644 vendor/github.com/moby/moby/hack/make/tgz create mode 100644 vendor/github.com/moby/moby/hack/make/ubuntu create mode 100755 vendor/github.com/moby/moby/hack/make/update-apt-repo create mode 100644 vendor/github.com/moby/moby/hack/make/win create mode 100644 vendor/github.com/moby/moby/hack/make/yaml-docs-generator create mode 100755 vendor/github.com/moby/moby/hack/release.sh create mode 100644 vendor/github.com/moby/moby/hack/validate/.swagger-yamllint create mode 100644 vendor/github.com/moby/moby/hack/validate/.validate create mode 100755 vendor/github.com/moby/moby/hack/validate/all create mode 100755 vendor/github.com/moby/moby/hack/validate/changelog-date-descending create mode 100755 vendor/github.com/moby/moby/hack/validate/changelog-well-formed create mode 100755 vendor/github.com/moby/moby/hack/validate/compose-bindata create mode 100755 vendor/github.com/moby/moby/hack/validate/dco create mode 100755 vendor/github.com/moby/moby/hack/validate/default create mode 100755 vendor/github.com/moby/moby/hack/validate/default-seccomp create mode 100755 vendor/github.com/moby/moby/hack/validate/gofmt create mode 100755 vendor/github.com/moby/moby/hack/validate/lint create mode 100755 vendor/github.com/moby/moby/hack/validate/pkg-imports create mode 100755 vendor/github.com/moby/moby/hack/validate/swagger create mode 100755 vendor/github.com/moby/moby/hack/validate/swagger-gen create mode 100755 vendor/github.com/moby/moby/hack/validate/test-imports create mode 100755 vendor/github.com/moby/moby/hack/validate/toml create mode 100755 vendor/github.com/moby/moby/hack/validate/vendor create mode 100755 vendor/github.com/moby/moby/hack/validate/vet create mode 100755 vendor/github.com/moby/moby/hack/vendor.sh create mode 100755 vendor/github.com/moby/moby/hooks/post_build create mode 100644 vendor/github.com/moby/moby/image/fs.go create mode 100644 vendor/github.com/moby/moby/image/fs_test.go create mode 100644 vendor/github.com/moby/moby/image/image.go create mode 100644 vendor/github.com/moby/moby/image/image_test.go create mode 100644 vendor/github.com/moby/moby/image/rootfs.go create mode 100644 vendor/github.com/moby/moby/image/spec/v1.1.md create mode 100644 vendor/github.com/moby/moby/image/spec/v1.2.md create mode 100644 vendor/github.com/moby/moby/image/spec/v1.md create mode 100644 vendor/github.com/moby/moby/image/store.go create mode 100644 vendor/github.com/moby/moby/image/store_test.go create mode 100644 vendor/github.com/moby/moby/image/tarexport/load.go create mode 100644 vendor/github.com/moby/moby/image/tarexport/save.go create mode 100644 vendor/github.com/moby/moby/image/tarexport/tarexport.go create mode 100644 vendor/github.com/moby/moby/image/v1/imagev1.go create mode 100644 vendor/github.com/moby/moby/image/v1/imagev1_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/benchmark_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/check_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon_swarm.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon_unix.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon_windows.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_service_update_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_experimental_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_test_vars.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_utils.go create mode 100644 vendor/github.com/moby/moby/integration-cli/events_utils.go create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures.go create mode 100755 vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/ca.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/client-cert.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/client-key.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-cert.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-key.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/server-cert.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/server-key.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-cert.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-key.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/load/emptyLayer.tar create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key create mode 100755 vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/localhost.cert create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/localhost.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/registry/cert.pem create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/secrets/default create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures_linux_daemon.go create mode 100644 vendor/github.com/moby/moby/integration-cli/npipe.go create mode 100644 vendor/github.com/moby/moby/integration-cli/npipe_windows.go create mode 100644 vendor/github.com/moby/moby/integration-cli/registry.go create mode 100644 vendor/github.com/moby/moby/integration-cli/registry_mock.go create mode 100644 vendor/github.com/moby/moby/integration-cli/requirements.go create mode 100644 vendor/github.com/moby/moby/integration-cli/requirements_unix.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_exec.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_noexec.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_seccomp.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_unix.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_windows.go create mode 100644 vendor/github.com/moby/moby/integration-cli/trust_server.go create mode 100644 vendor/github.com/moby/moby/integration-cli/utils.go create mode 100644 vendor/github.com/moby/moby/keys/launchpad-ppa-zfs.asc create mode 100644 vendor/github.com/moby/moby/layer/empty.go create mode 100644 vendor/github.com/moby/moby/layer/empty_test.go create mode 100644 vendor/github.com/moby/moby/layer/filestore.go create mode 100644 vendor/github.com/moby/moby/layer/filestore_test.go create mode 100644 vendor/github.com/moby/moby/layer/layer.go create mode 100644 vendor/github.com/moby/moby/layer/layer_store.go create mode 100644 vendor/github.com/moby/moby/layer/layer_store_windows.go create mode 100644 vendor/github.com/moby/moby/layer/layer_test.go create mode 100644 vendor/github.com/moby/moby/layer/layer_unix.go create mode 100644 vendor/github.com/moby/moby/layer/layer_unix_test.go create mode 100644 vendor/github.com/moby/moby/layer/layer_windows.go create mode 100644 vendor/github.com/moby/moby/layer/migration.go create mode 100644 vendor/github.com/moby/moby/layer/migration_test.go create mode 100644 vendor/github.com/moby/moby/layer/mount_test.go create mode 100644 vendor/github.com/moby/moby/layer/mounted_layer.go create mode 100644 vendor/github.com/moby/moby/layer/ro_layer.go create mode 100644 vendor/github.com/moby/moby/layer/ro_layer_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/container.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/container_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/container_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/oom_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/oom_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/process.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/process_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/process_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/queue_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/remote.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/remote_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/remote_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go create mode 100644 vendor/github.com/moby/moby/man/Dockerfile create mode 100644 vendor/github.com/moby/moby/man/Dockerfile.5.md create mode 100644 vendor/github.com/moby/moby/man/Dockerfile.aarch64 create mode 100644 vendor/github.com/moby/moby/man/Dockerfile.armhf create mode 100644 vendor/github.com/moby/moby/man/Dockerfile.ppc64le create mode 100644 vendor/github.com/moby/moby/man/Dockerfile.s390x create mode 100644 vendor/github.com/moby/moby/man/README.md create mode 100644 vendor/github.com/moby/moby/man/docker-attach.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-build.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-commit.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-config-json.5.md create mode 100644 vendor/github.com/moby/moby/man/docker-cp.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-create.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-diff.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-events.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-exec.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-export.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-history.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-images.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-import.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-info.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-inspect.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-kill.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-load.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-login.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-logout.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-logs.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-network-connect.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-network-create.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-network-disconnect.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-network-inspect.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-network-ls.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-network-rm.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-pause.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-port.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-ps.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-pull.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-push.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-rename.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-restart.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-rm.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-rmi.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-run.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-save.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-search.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-start.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-stats.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-stop.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-tag.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-top.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-unpause.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-update.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-version.1.md create mode 100644 vendor/github.com/moby/moby/man/docker-wait.1.md create mode 100644 vendor/github.com/moby/moby/man/docker.1.md create mode 100644 vendor/github.com/moby/moby/man/dockerd.8.md create mode 100644 vendor/github.com/moby/moby/man/generate.go create mode 100755 vendor/github.com/moby/moby/man/generate.sh create mode 100644 vendor/github.com/moby/moby/man/glide.lock create mode 100644 vendor/github.com/moby/moby/man/glide.yaml create mode 100755 vendor/github.com/moby/moby/man/md2man-all.sh create mode 100644 vendor/github.com/moby/moby/migrate/v1/migratev1.go create mode 100644 vendor/github.com/moby/moby/migrate/v1/migratev1_test.go create mode 100644 vendor/github.com/moby/moby/oci/defaults_linux.go create mode 100644 vendor/github.com/moby/moby/oci/defaults_solaris.go create mode 100644 vendor/github.com/moby/moby/oci/defaults_windows.go create mode 100644 vendor/github.com/moby/moby/oci/devices_linux.go create mode 100644 vendor/github.com/moby/moby/oci/devices_unsupported.go create mode 100644 vendor/github.com/moby/moby/oci/namespaces.go create mode 100644 vendor/github.com/moby/moby/opts/hosts.go create mode 100644 vendor/github.com/moby/moby/opts/hosts_test.go create mode 100644 vendor/github.com/moby/moby/opts/hosts_unix.go create mode 100644 vendor/github.com/moby/moby/opts/hosts_windows.go create mode 100644 vendor/github.com/moby/moby/opts/ip.go create mode 100644 vendor/github.com/moby/moby/opts/ip_test.go create mode 100644 vendor/github.com/moby/moby/opts/mount.go create mode 100644 vendor/github.com/moby/moby/opts/mount_test.go create mode 100644 vendor/github.com/moby/moby/opts/opts.go create mode 100644 vendor/github.com/moby/moby/opts/opts_test.go create mode 100644 vendor/github.com/moby/moby/opts/opts_unix.go create mode 100644 vendor/github.com/moby/moby/opts/opts_windows.go create mode 100644 vendor/github.com/moby/moby/opts/port.go create mode 100644 vendor/github.com/moby/moby/opts/port_test.go create mode 100644 vendor/github.com/moby/moby/opts/quotedstring.go create mode 100644 vendor/github.com/moby/moby/opts/quotedstring_test.go create mode 100644 vendor/github.com/moby/moby/opts/secret.go create mode 100644 vendor/github.com/moby/moby/opts/secret_test.go create mode 100644 vendor/github.com/moby/moby/pkg/README.md create mode 100644 vendor/github.com/moby/moby/pkg/aaparser/aaparser.go create mode 100644 vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/README.md create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_other.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_other.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/diff.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/diff_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/example_changes.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/testdata/broken.tar create mode 100644 vendor/github.com/moby/moby/pkg/archive/time_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/utils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/wrap.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/wrap_test.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/api.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/authz.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/middleware.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/plugin.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/response.go create mode 100644 vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go create mode 100644 vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/log.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory_test.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/README.md create mode 100644 vendor/github.com/moby/moby/pkg/discovery/backends.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/discovery.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/discovery_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/entry.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/file/file.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/file/file_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/generator.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/generator_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/kv/kv.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/memory/memory.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/filenotify.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/poller.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/poller_test.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/gitutils/gitutils.go create mode 100644 vendor/github.com/moby/moby/pkg/gitutils/gitutils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/graphdb/conn_sqlite3_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/graphdb/sort_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/graphdb/sort_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/graphdb/unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/graphdb/utils_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/homedir/homedir.go create mode 100644 vendor/github.com/moby/moby/pkg/homedir/homedir_test.go create mode 100644 vendor/github.com/moby/moby/pkg/httputils/httputils.go create mode 100644 vendor/github.com/moby/moby/pkg/httputils/httputils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/httputils/mimetype.go create mode 100644 vendor/github.com/moby/moby/pkg/httputils/mimetype_test.go create mode 100644 vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader.go create mode 100644 vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader_test.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/integration/checker/checker.go create mode 100644 vendor/github.com/moby/moby/pkg/integration/cmd/command.go create mode 100644 vendor/github.com/moby/moby/pkg/integration/cmd/command_test.go create mode 100644 vendor/github.com/moby/moby/pkg/integration/utils.go create mode 100644 vendor/github.com/moby/moby/pkg/integration/utils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/fmt.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/fmt_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/multireader.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/multireader_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/readers.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/readers_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/writers.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/writers_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go create mode 100644 vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/locker/README.md create mode 100644 vendor/github.com/moby/moby/pkg/locker/locker.go create mode 100644 vendor/github.com/moby/moby/pkg/locker/locker_test.go create mode 100644 vendor/github.com/moby/moby/pkg/longpath/longpath.go create mode 100644 vendor/github.com/moby/moby/pkg/longpath/longpath_test.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/ioctl.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/loopback.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mount.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go create mode 100644 vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go create mode 100644 vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/parsers.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/parsers_test.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/architecture_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/architecture_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/architecture_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/platform.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/utsname_int8.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go create mode 100644 vendor/github.com/moby/moby/pkg/plugingetter/getter.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/client.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/client_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/errors.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugin_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugins.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugins_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/transport/http.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/transport/transport.go create mode 100644 vendor/github.com/moby/moby/pkg/pools/pools.go create mode 100644 vendor/github.com/moby/moby/pkg/pools/pools_test.go create mode 100644 vendor/github.com/moby/moby/pkg/progress/progress.go create mode 100644 vendor/github.com/moby/moby/pkg/progress/progressreader.go create mode 100644 vendor/github.com/moby/moby/pkg/progress/progressreader_test.go create mode 100644 vendor/github.com/moby/moby/pkg/promise/promise.go create mode 100644 vendor/github.com/moby/moby/pkg/pubsub/publisher.go create mode 100644 vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go create mode 100644 vendor/github.com/moby/moby/pkg/random/random.go create mode 100644 vendor/github.com/moby/moby/pkg/random/random_test.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/README.md create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/reexec.go create mode 100644 vendor/github.com/moby/moby/pkg/registrar/registrar.go create mode 100644 vendor/github.com/moby/moby/pkg/registrar/registrar_test.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/README.md create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/trap.go create mode 100644 vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go create mode 100644 vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go create mode 100644 vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go create mode 100644 vendor/github.com/moby/moby/pkg/stringid/README.md create mode 100644 vendor/github.com/moby/moby/pkg/stringid/stringid.go create mode 100644 vendor/github.com/moby/moby/pkg/stringid/stringid_test.go create mode 100644 vendor/github.com/moby/moby/pkg/stringutils/README.md create mode 100644 vendor/github.com/moby/moby/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE create mode 100644 vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD create mode 100644 vendor/github.com/moby/moby/pkg/symlink/README.md create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/README.md create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/errors.go create mode 100644 vendor/github.com/moby/moby/pkg/system/events_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/exitcode.go create mode 100644 vendor/github.com/moby/moby/pkg/system/filesys.go create mode 100644 vendor/github.com/moby/moby/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lstat.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/mknod.go create mode 100644 vendor/github.com/moby/moby/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/path_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/path_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/path_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/umask.go create mode 100644 vendor/github.com/moby/moby/pkg/system/umask_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/tailfile/tailfile.go create mode 100644 vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/builder_context.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/tarsum.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/xattr/json create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/versioning.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/writercloser.go create mode 100644 vendor/github.com/moby/moby/pkg/term/ascii.go create mode 100644 vendor/github.com/moby/moby/pkg/term/ascii_test.go create mode 100644 vendor/github.com/moby/moby/pkg/term/tc_linux_cgo.go create mode 100644 vendor/github.com/moby/moby/pkg/term/tc_other.go create mode 100644 vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go create mode 100644 vendor/github.com/moby/moby/pkg/term/term.go create mode 100644 vendor/github.com/moby/moby/pkg/term/term_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/term/term_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/term/term_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/term/termios_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/term/termios_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/term/termios_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/term/termios_openbsd.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/console.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/windows.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/assert/assert.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/pkg.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go create mode 100644 vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go create mode 100644 vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go16.go create mode 100644 vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go create mode 100644 vendor/github.com/moby/moby/pkg/truncindex/truncindex.go create mode 100644 vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go create mode 100644 vendor/github.com/moby/moby/pkg/urlutil/urlutil.go create mode 100644 vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go create mode 100644 vendor/github.com/moby/moby/pkg/useragent/README.md create mode 100644 vendor/github.com/moby/moby/pkg/useragent/useragent.go create mode 100644 vendor/github.com/moby/moby/pkg/useragent/useragent_test.go create mode 100644 vendor/github.com/moby/moby/plugin/backend_linux.go create mode 100644 vendor/github.com/moby/moby/plugin/backend_unsupported.go create mode 100644 vendor/github.com/moby/moby/plugin/blobstore.go create mode 100644 vendor/github.com/moby/moby/plugin/defs.go create mode 100644 vendor/github.com/moby/moby/plugin/manager.go create mode 100644 vendor/github.com/moby/moby/plugin/manager_linux.go create mode 100644 vendor/github.com/moby/moby/plugin/manager_solaris.go create mode 100644 vendor/github.com/moby/moby/plugin/manager_windows.go create mode 100644 vendor/github.com/moby/moby/plugin/store.go create mode 100644 vendor/github.com/moby/moby/plugin/store_test.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/plugin.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/plugin_linux.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/settable.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/settable_test.go create mode 100644 vendor/github.com/moby/moby/poule.yml create mode 100644 vendor/github.com/moby/moby/profiles/apparmor/apparmor.go create mode 100644 vendor/github.com/moby/moby/profiles/apparmor/template.go create mode 100755 vendor/github.com/moby/moby/profiles/seccomp/default.json create mode 100755 vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/generate.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/moby/moby/project/ARM.md create mode 100644 vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md create mode 120000 vendor/github.com/moby/moby/project/CONTRIBUTORS.md create mode 100644 vendor/github.com/moby/moby/project/GOVERNANCE.md create mode 100644 vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md create mode 100644 vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md create mode 100644 vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md create mode 100644 vendor/github.com/moby/moby/project/PACKAGERS.md create mode 100644 vendor/github.com/moby/moby/project/PATCH-RELEASES.md create mode 100644 vendor/github.com/moby/moby/project/PRINCIPLES.md create mode 100644 vendor/github.com/moby/moby/project/README.md create mode 100644 vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/moby/moby/project/RELEASE-PROCESS.md create mode 100644 vendor/github.com/moby/moby/project/REVIEWING.md create mode 100644 vendor/github.com/moby/moby/project/TOOLS.md create mode 100644 vendor/github.com/moby/moby/reference/reference.go create mode 100644 vendor/github.com/moby/moby/reference/reference_test.go create mode 100644 vendor/github.com/moby/moby/reference/store.go create mode 100644 vendor/github.com/moby/moby/reference/store_test.go create mode 100644 vendor/github.com/moby/moby/registry/auth.go create mode 100644 vendor/github.com/moby/moby/registry/auth_test.go create mode 100644 vendor/github.com/moby/moby/registry/config.go create mode 100644 vendor/github.com/moby/moby/registry/config_test.go create mode 100644 vendor/github.com/moby/moby/registry/config_unix.go create mode 100644 vendor/github.com/moby/moby/registry/config_windows.go create mode 100644 vendor/github.com/moby/moby/registry/endpoint_test.go create mode 100644 vendor/github.com/moby/moby/registry/endpoint_v1.go create mode 100644 vendor/github.com/moby/moby/registry/registry.go create mode 100644 vendor/github.com/moby/moby/registry/registry_mock_test.go create mode 100644 vendor/github.com/moby/moby/registry/registry_test.go create mode 100644 vendor/github.com/moby/moby/registry/service.go create mode 100644 vendor/github.com/moby/moby/registry/service_v1.go create mode 100644 vendor/github.com/moby/moby/registry/service_v1_test.go create mode 100644 vendor/github.com/moby/moby/registry/service_v2.go create mode 100644 vendor/github.com/moby/moby/registry/session.go create mode 100644 vendor/github.com/moby/moby/registry/types.go create mode 100644 vendor/github.com/moby/moby/restartmanager/restartmanager.go create mode 100644 vendor/github.com/moby/moby/restartmanager/restartmanager_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/compare.go create mode 100644 vendor/github.com/moby/moby/runconfig/compare_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/config.go create mode 100644 vendor/github.com/moby/moby/runconfig/config_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/config_unix.go create mode 100644 vendor/github.com/moby/moby/runconfig/config_windows.go create mode 100644 vendor/github.com/moby/moby/runconfig/errors.go create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_unix.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_windows.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/envfile.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/envfile_test.go create mode 100755 vendor/github.com/moby/moby/runconfig/opts/fixtures/utf16.env create mode 100755 vendor/github.com/moby/moby/runconfig/opts/fixtures/utf16be.env create mode 100755 vendor/github.com/moby/moby/runconfig/opts/fixtures/utf8.env create mode 100644 vendor/github.com/moby/moby/runconfig/opts/fixtures/valid.env create mode 100644 vendor/github.com/moby/moby/runconfig/opts/fixtures/valid.label create mode 100644 vendor/github.com/moby/moby/runconfig/opts/opts.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/opts_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/parse.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/parse_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/runtime.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/throttledevice.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/ulimit.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/ulimit_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/weightdevice.go create mode 100644 vendor/github.com/moby/moby/utils/debug.go create mode 100644 vendor/github.com/moby/moby/utils/debug_test.go create mode 100644 vendor/github.com/moby/moby/utils/names.go create mode 100644 vendor/github.com/moby/moby/utils/process_unix.go create mode 100644 vendor/github.com/moby/moby/utils/process_windows.go create mode 100644 vendor/github.com/moby/moby/utils/templates/templates.go create mode 100644 vendor/github.com/moby/moby/utils/templates/templates_test.go create mode 100644 vendor/github.com/moby/moby/utils/utils.go create mode 100644 vendor/github.com/moby/moby/utils/utils_test.go create mode 100644 vendor/github.com/moby/moby/vendor.conf create mode 100644 vendor/github.com/moby/moby/volume/drivers/adapter.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/extpoint.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/extpoint_test.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/proxy.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/proxy_test.go create mode 100644 vendor/github.com/moby/moby/volume/local/local.go create mode 100644 vendor/github.com/moby/moby/volume/local/local_test.go create mode 100644 vendor/github.com/moby/moby/volume/local/local_unix.go create mode 100644 vendor/github.com/moby/moby/volume/local/local_windows.go create mode 100644 vendor/github.com/moby/moby/volume/local/unmount_linux.go create mode 100644 vendor/github.com/moby/moby/volume/local/unmount_unix.go create mode 100644 vendor/github.com/moby/moby/volume/local/unmount_windows.go create mode 100644 vendor/github.com/moby/moby/volume/store/db.go create mode 100644 vendor/github.com/moby/moby/volume/store/errors.go create mode 100644 vendor/github.com/moby/moby/volume/store/restore.go create mode 100644 vendor/github.com/moby/moby/volume/store/store.go create mode 100644 vendor/github.com/moby/moby/volume/store/store_test.go create mode 100644 vendor/github.com/moby/moby/volume/store/store_unix.go create mode 100644 vendor/github.com/moby/moby/volume/store/store_windows.go create mode 100644 vendor/github.com/moby/moby/volume/testutils/testutils.go create mode 100644 vendor/github.com/moby/moby/volume/validate.go create mode 100644 vendor/github.com/moby/moby/volume/validate_test.go create mode 100644 vendor/github.com/moby/moby/volume/validate_test_unix.go create mode 100644 vendor/github.com/moby/moby/volume/validate_test_windows.go create mode 100644 vendor/github.com/moby/moby/volume/volume.go create mode 100644 vendor/github.com/moby/moby/volume/volume_copy.go create mode 100644 vendor/github.com/moby/moby/volume/volume_copy_unix.go create mode 100644 vendor/github.com/moby/moby/volume/volume_copy_windows.go create mode 100644 vendor/github.com/moby/moby/volume/volume_linux.go create mode 100644 vendor/github.com/moby/moby/volume/volume_linux_test.go create mode 100644 vendor/github.com/moby/moby/volume/volume_propagation_linux.go create mode 100644 vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go create mode 100644 vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go create mode 100644 vendor/github.com/moby/moby/volume/volume_test.go create mode 100644 vendor/github.com/moby/moby/volume/volume_unix.go create mode 100644 vendor/github.com/moby/moby/volume/volume_unsupported.go create mode 100644 vendor/github.com/moby/moby/volume/volume_windows.go create mode 100644 vendor/github.com/pkg/errors/.gitignore create mode 100644 vendor/github.com/pkg/errors/.travis.yml create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/README.md create mode 100644 vendor/github.com/pkg/errors/appveyor.yml create mode 100644 vendor/github.com/pkg/errors/bench_test.go create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/errors_test.go create mode 100644 vendor/github.com/pkg/errors/example_test.go create mode 100644 vendor/github.com/pkg/errors/format_test.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/github.com/pkg/errors/stack_test.go create mode 100644 vendor/github.com/pmezard/go-difflib/.travis.yml create mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE create mode 100644 vendor/github.com/pmezard/go-difflib/README.md create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit_test.go create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/entry_test.go create mode 100644 vendor/github.com/sirupsen/logrus/example_basic_test.go create mode 100644 vendor/github.com/sirupsen/logrus/example_hook_test.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter_bench_test.go create mode 100644 vendor/github.com/sirupsen/logrus/hook_test.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/README.md create mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks/test/test.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks/test/test_test.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter_test.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logger_bench_test.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus_test.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter_test.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go create mode 100644 vendor/github.com/stretchr/testify/.gitignore create mode 100755 vendor/github.com/stretchr/testify/.travis.gofmt.sh create mode 100755 vendor/github.com/stretchr/testify/.travis.gogenerate.sh create mode 100755 vendor/github.com/stretchr/testify/.travis.govet.sh create mode 100644 vendor/github.com/stretchr/testify/.travis.yml create mode 100644 vendor/github.com/stretchr/testify/Gopkg.lock create mode 100644 vendor/github.com/stretchr/testify/Gopkg.toml create mode 100644 vendor/github.com/stretchr/testify/LICENSE create mode 100644 vendor/github.com/stretchr/testify/README.md create mode 100644 vendor/github.com/stretchr/testify/_codegen/main.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl create mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertions_test.go create mode 100644 vendor/github.com/stretchr/testify/assert/doc.go create mode 100644 vendor/github.com/stretchr/testify/assert/errors.go create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions_test.go create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions_test.go create mode 100644 vendor/github.com/stretchr/testify/doc.go create mode 100644 vendor/github.com/stretchr/testify/http/doc.go create mode 100644 vendor/github.com/stretchr/testify/http/test_response_writer.go create mode 100644 vendor/github.com/stretchr/testify/http/test_round_tripper.go create mode 100644 vendor/github.com/stretchr/testify/mock/doc.go create mode 100644 vendor/github.com/stretchr/testify/mock/mock.go create mode 100644 vendor/github.com/stretchr/testify/mock/mock_test.go create mode 100644 vendor/github.com/stretchr/testify/package_test.go create mode 100644 vendor/github.com/stretchr/testify/require/doc.go create mode 100644 vendor/github.com/stretchr/testify/require/forward_requirements.go create mode 100644 vendor/github.com/stretchr/testify/require/forward_requirements_test.go create mode 100644 vendor/github.com/stretchr/testify/require/require.go create mode 100644 vendor/github.com/stretchr/testify/require/require.go.tmpl create mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go create mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go.tmpl create mode 100644 vendor/github.com/stretchr/testify/require/requirements.go create mode 100644 vendor/github.com/stretchr/testify/require/requirements_test.go create mode 100644 vendor/github.com/stretchr/testify/suite/doc.go create mode 100644 vendor/github.com/stretchr/testify/suite/interfaces.go create mode 100644 vendor/github.com/stretchr/testify/suite/suite.go create mode 100644 vendor/github.com/stretchr/testify/suite/suite_test.go create mode 100644 vendor/golang.org/x/crypto/.gitattributes create mode 100644 vendor/golang.org/x/crypto/.gitignore create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/README.md create mode 100644 vendor/golang.org/x/crypto/acme/acme.go create mode 100644 vendor/golang.org/x/crypto/acme/acme_test.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert_test.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache_test.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/example_test.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/listener.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal_test.go create mode 100644 vendor/golang.org/x/crypto/acme/jws.go create mode 100644 vendor/golang.org/x/crypto/acme/jws_test.go create mode 100644 vendor/golang.org/x/crypto/acme/types.go create mode 100644 vendor/golang.org/x/crypto/acme/types_test.go create mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go create mode 100644 vendor/golang.org/x/crypto/argon2/argon2_test.go create mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_test.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s.go create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_386.go create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_386.s create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_test.go create mode 100644 vendor/golang.org/x/crypto/blake2s/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2s/register.go create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/blowfish_test.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/crypto/bn256/bn256.go create mode 100644 vendor/golang.org/x/crypto/bn256/bn256_test.go create mode 100644 vendor/golang.org/x/crypto/bn256/constants.go create mode 100644 vendor/golang.org/x/crypto/bn256/curve.go create mode 100644 vendor/golang.org/x/crypto/bn256/example_test.go create mode 100644 vendor/golang.org/x/crypto/bn256/gfp12.go create mode 100644 vendor/golang.org/x/crypto/bn256/gfp2.go create mode 100644 vendor/golang.org/x/crypto/bn256/gfp6.go create mode 100644 vendor/golang.org/x/crypto/bn256/optate.go create mode 100644 vendor/golang.org/x/crypto/bn256/twist.go create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/cast5/cast5_test.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_test.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go create mode 100644 vendor/golang.org/x/crypto/codereview.cfg create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1_test.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/example_test.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_test.go create mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go create mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_test.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go create mode 100644 vendor/golang.org/x/crypto/ed25519/testdata/sign.input.gz create mode 100644 vendor/golang.org/x/crypto/hkdf/example_test.go create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf_test.go create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_test.go create mode 100644 vendor/golang.org/x/crypto/md4/example_test.go create mode 100644 vendor/golang.org/x/crypto/md4/md4.go create mode 100644 vendor/golang.org/x/crypto/md4/md4_test.go create mode 100644 vendor/golang.org/x/crypto/md4/md4block.go create mode 100644 vendor/golang.org/x/crypto/nacl/auth/auth.go create mode 100644 vendor/golang.org/x/crypto/nacl/auth/auth_test.go create mode 100644 vendor/golang.org/x/crypto/nacl/auth/example_test.go create mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go create mode 100644 vendor/golang.org/x/crypto/nacl/box/box_test.go create mode 100644 vendor/golang.org/x/crypto/nacl/box/example_test.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/example_test.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go create mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go create mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go create mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k_test.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write_test.go create mode 100644 vendor/golang.org/x/crypto/otr/libotr_test_helper.c create mode 100644 vendor/golang.org/x/crypto/otr/otr.go create mode 100644 vendor/golang.org/x/crypto/otr/otr_test.go create mode 100644 vendor/golang.org/x/crypto/otr/smp.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305_test.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ref.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160block.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa20_test.go create mode 100644 vendor/golang.org/x/crypto/scrypt/example_test.go create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt_test.go create mode 100644 vendor/golang.org/x/crypto/sha3/doc.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s create mode 100644 vendor/golang.org/x/crypto/sha3/register.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_test.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake.go create mode 100644 vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate create mode 100644 vendor/golang.org/x/crypto/sha3/xor.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/client.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/client_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/example_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/forward.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/server_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/testdata_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/benchmark_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go create mode 100644 vendor/golang.org/x/crypto/ssh/buffer_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/certs.go create mode 100644 vendor/golang.org/x/crypto/ssh/certs_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/channel.go create mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go create mode 100644 vendor/golang.org/x/crypto/ssh/cipher_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/client.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/common.go create mode 100644 vendor/golang.org/x/crypto/ssh/connection.go create mode 100644 vendor/golang.org/x/crypto/ssh/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/example_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go create mode 100644 vendor/golang.org/x/crypto/ssh/handshake_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/kex.go create mode 100644 vendor/golang.org/x/crypto/ssh/kex_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/keys.go create mode 100644 vendor/golang.org/x/crypto/ssh/keys_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go create mode 100644 vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/mac.go create mode 100644 vendor/golang.org/x/crypto/ssh/mempipe_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/messages.go create mode 100644 vendor/golang.org/x/crypto/ssh/messages_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/mux.go create mode 100644 vendor/golang.org/x/crypto/ssh/mux_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/session.go create mode 100644 vendor/golang.org/x/crypto/ssh/session_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/banner_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/cert_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/dial_unix_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/multi_auth_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/session_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c create mode 100644 vendor/golang.org/x/crypto/ssh/test/test_unix_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/testdata_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/testdata/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/testdata/keys.go create mode 100644 vendor/golang.org/x/crypto/ssh/testdata_test.go create mode 100644 vendor/golang.org/x/crypto/ssh/transport.go create mode 100644 vendor/golang.org/x/crypto/ssh/transport_test.go create mode 100644 vendor/golang.org/x/crypto/tea/cipher.go create mode 100644 vendor/golang.org/x/crypto/tea/tea_test.go create mode 100644 vendor/golang.org/x/crypto/twofish/twofish.go create mode 100644 vendor/golang.org/x/crypto/twofish/twofish_test.go create mode 100644 vendor/golang.org/x/crypto/xtea/block.go create mode 100644 vendor/golang.org/x/crypto/xtea/cipher.go create mode 100644 vendor/golang.org/x/crypto/xtea/xtea_test.go create mode 100644 vendor/golang.org/x/crypto/xts/xts.go create mode 100644 vendor/golang.org/x/crypto/xts/xts_test.go create mode 100644 vendor/golang.org/x/net/.gitattributes create mode 100644 vendor/golang.org/x/net/.gitignore create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/README.md create mode 100644 vendor/golang.org/x/net/bpf/asm.go create mode 100644 vendor/golang.org/x/net/bpf/constants.go create mode 100644 vendor/golang.org/x/net/bpf/doc.go create mode 100644 vendor/golang.org/x/net/bpf/instructions.go create mode 100644 vendor/golang.org/x/net/bpf/instructions_test.go create mode 100644 vendor/golang.org/x/net/bpf/setter.go create mode 100644 vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf create mode 100644 vendor/golang.org/x/net/bpf/testdata/all_instructions.txt create mode 100644 vendor/golang.org/x/net/bpf/vm.go create mode 100644 vendor/golang.org/x/net/bpf/vm_aluop_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_bpf_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_extension_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_instructions.go create mode 100644 vendor/golang.org/x/net/bpf/vm_jump_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_load_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_ret_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_scratch_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_test.go create mode 100644 vendor/golang.org/x/net/codereview.cfg create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/context_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/context/withtimeout_test.go create mode 100644 vendor/golang.org/x/net/dict/dict.go create mode 100644 vendor/golang.org/x/net/dns/dnsmessage/example_test.go create mode 100644 vendor/golang.org/x/net/dns/dnsmessage/message.go create mode 100644 vendor/golang.org/x/net/dns/dnsmessage/message_test.go create mode 100644 vendor/golang.org/x/net/html/atom/atom.go create mode 100644 vendor/golang.org/x/net/html/atom/atom_test.go create mode 100644 vendor/golang.org/x/net/html/atom/gen.go create mode 100644 vendor/golang.org/x/net/html/atom/table.go create mode 100644 vendor/golang.org/x/net/html/atom/table_test.go create mode 100644 vendor/golang.org/x/net/html/charset/charset.go create mode 100644 vendor/golang.org/x/net/html/charset/charset_test.go create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/README create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html create mode 100644 vendor/golang.org/x/net/html/const.go create mode 100644 vendor/golang.org/x/net/html/doc.go create mode 100644 vendor/golang.org/x/net/html/doctype.go create mode 100644 vendor/golang.org/x/net/html/entity.go create mode 100644 vendor/golang.org/x/net/html/entity_test.go create mode 100644 vendor/golang.org/x/net/html/escape.go create mode 100644 vendor/golang.org/x/net/html/escape_test.go create mode 100644 vendor/golang.org/x/net/html/example_test.go create mode 100644 vendor/golang.org/x/net/html/foreign.go create mode 100644 vendor/golang.org/x/net/html/node.go create mode 100644 vendor/golang.org/x/net/html/node_test.go create mode 100644 vendor/golang.org/x/net/html/parse.go create mode 100644 vendor/golang.org/x/net/html/parse_test.go create mode 100644 vendor/golang.org/x/net/html/render.go create mode 100644 vendor/golang.org/x/net/html/render_test.go create mode 100644 vendor/golang.org/x/net/html/testdata/go1.html create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/README create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/comments01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/entities01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/entities02.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/isindex.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tables01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests1.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests10.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests11.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests12.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests14.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests15.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests16.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests17.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests18.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests19.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests2.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests20.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests21.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests22.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests23.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests24.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests25.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests26.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests3.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests4.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests5.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests6.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests7.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests8.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests9.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat create mode 100644 vendor/golang.org/x/net/html/token.go create mode 100644 vendor/golang.org/x/net/html/token_test.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/export_test.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/go19_test.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy_test.go create mode 100644 vendor/golang.org/x/net/http2/.gitignore create mode 100644 vendor/golang.org/x/net/http2/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/Makefile create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/ciphers_test.go create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/configure_transport.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go create mode 100644 vendor/golang.org/x/net/http2/databuffer_test.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/errors_test.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/flow_test.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/frame_test.go create mode 100644 vendor/golang.org/x/net/http2/go16.go create mode 100644 vendor/golang.org/x/net/http2/go17.go create mode 100644 vendor/golang.org/x/net/http2/go17_not18.go create mode 100644 vendor/golang.org/x/net/http2/go18.go create mode 100644 vendor/golang.org/x/net/http2/go18_test.go create mode 100644 vendor/golang.org/x/net/http2/go19.go create mode 100644 vendor/golang.org/x/net/http2/go19_test.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/gotrack_test.go create mode 100644 vendor/golang.org/x/net/http2/h2demo/.gitignore create mode 100644 vendor/golang.org/x/net/http2/h2demo/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 create mode 100644 vendor/golang.org/x/net/http2/h2demo/Makefile create mode 100644 vendor/golang.org/x/net/http2/h2demo/README create mode 100644 vendor/golang.org/x/net/http2/h2demo/deployment-prod.yaml create mode 100644 vendor/golang.org/x/net/http2/h2demo/h2demo.go create mode 100644 vendor/golang.org/x/net/http2/h2demo/launch.go create mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.key create mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.pem create mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.srl create mode 100644 vendor/golang.org/x/net/http2/h2demo/server.crt create mode 100644 vendor/golang.org/x/net/http2/h2demo/server.key create mode 100644 vendor/golang.org/x/net/http2/h2demo/service.yaml create mode 100644 vendor/golang.org/x/net/http2/h2demo/tmpl.go create mode 100644 vendor/golang.org/x/net/http2/h2i/README.md create mode 100644 vendor/golang.org/x/net/http2/h2i/h2i.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode_test.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack_test.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables_test.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/http2_test.go create mode 100644 vendor/golang.org/x/net/http2/not_go16.go create mode 100644 vendor/golang.org/x/net/http2/not_go17.go create mode 100644 vendor/golang.org/x/net/http2/not_go18.go create mode 100644 vendor/golang.org/x/net/http2/not_go19.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/pipe_test.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/server_push_test.go create mode 100644 vendor/golang.org/x/net/http2/server_test.go create mode 100644 vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/transport_test.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority_test.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random_test.go create mode 100644 vendor/golang.org/x/net/http2/writesched_test.go create mode 100644 vendor/golang.org/x/net/http2/z_spec_test.go create mode 100644 vendor/golang.org/x/net/icmp/dstunreach.go create mode 100644 vendor/golang.org/x/net/icmp/echo.go create mode 100644 vendor/golang.org/x/net/icmp/endpoint.go create mode 100644 vendor/golang.org/x/net/icmp/example_test.go create mode 100644 vendor/golang.org/x/net/icmp/extension.go create mode 100644 vendor/golang.org/x/net/icmp/extension_test.go create mode 100644 vendor/golang.org/x/net/icmp/helper_posix.go create mode 100644 vendor/golang.org/x/net/icmp/interface.go create mode 100644 vendor/golang.org/x/net/icmp/ipv4.go create mode 100644 vendor/golang.org/x/net/icmp/ipv4_test.go create mode 100644 vendor/golang.org/x/net/icmp/ipv6.go create mode 100644 vendor/golang.org/x/net/icmp/listen_posix.go create mode 100644 vendor/golang.org/x/net/icmp/listen_stub.go create mode 100644 vendor/golang.org/x/net/icmp/message.go create mode 100644 vendor/golang.org/x/net/icmp/message_test.go create mode 100644 vendor/golang.org/x/net/icmp/messagebody.go create mode 100644 vendor/golang.org/x/net/icmp/mpls.go create mode 100644 vendor/golang.org/x/net/icmp/multipart.go create mode 100644 vendor/golang.org/x/net/icmp/multipart_test.go create mode 100644 vendor/golang.org/x/net/icmp/packettoobig.go create mode 100644 vendor/golang.org/x/net/icmp/paramprob.go create mode 100644 vendor/golang.org/x/net/icmp/ping_test.go create mode 100644 vendor/golang.org/x/net/icmp/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/icmp/timeexceeded.go create mode 100644 vendor/golang.org/x/net/idna/example_test.go create mode 100644 vendor/golang.org/x/net/idna/idna.go create mode 100644 vendor/golang.org/x/net/idna/idna_test.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/punycode_test.go create mode 100644 vendor/golang.org/x/net/idna/tables.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/net/internal/iana/const.go create mode 100644 vendor/golang.org/x/net/internal/iana/gen.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_bsd.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_nobsd.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_posix.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_stub.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_unix.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_windows.go create mode 100644 vendor/golang.org/x/net/internal/nettest/interface.go create mode 100644 vendor/golang.org/x/net/internal/nettest/rlimit.go create mode 100644 vendor/golang.org/x/net/internal/nettest/stack.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go create mode 100644 vendor/golang.org/x/net/internal/socket/error_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/error_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_msg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/reflect.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket_test.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_posix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries_test.go create mode 100644 vendor/golang.org/x/net/ipv4/batch.go create mode 100644 vendor/golang.org/x/net/ipv4/bpf_test.go create mode 100644 vendor/golang.org/x/net/ipv4/control.go create mode 100644 vendor/golang.org/x/net/ipv4/control_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/control_pktinfo.go create mode 100644 vendor/golang.org/x/net/ipv4/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/control_test.go create mode 100644 vendor/golang.org/x/net/ipv4/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv4/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv4/doc.go create mode 100644 vendor/golang.org/x/net/ipv4/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv4/example_test.go create mode 100644 vendor/golang.org/x/net/ipv4/gen.go create mode 100644 vendor/golang.org/x/net/ipv4/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv4/header.go create mode 100644 vendor/golang.org/x/net/ipv4/header_test.go create mode 100644 vendor/golang.org/x/net/ipv4/helper.go create mode 100644 vendor/golang.org/x/net/ipv4/iana.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_test.go create mode 100644 vendor/golang.org/x/net/ipv4/multicast_test.go create mode 100644 vendor/golang.org/x/net/ipv4/multicastlistener_test.go create mode 100644 vendor/golang.org/x/net/ipv4/multicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv4/packet.go create mode 100644 vendor/golang.org/x/net/ipv4/packet_go1_8.go create mode 100644 vendor/golang.org/x/net/ipv4/packet_go1_9.go create mode 100644 vendor/golang.org/x/net/ipv4/payload.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go create mode 100644 vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go create mode 100644 vendor/golang.org/x/net/ipv4/readwrite_test.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/unicast_test.go create mode 100644 vendor/golang.org/x/net/ipv4/unicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/batch.go create mode 100644 vendor/golang.org/x/net/ipv6/bpf_test.go create mode 100644 vendor/golang.org/x/net/ipv6/control.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/control_test.go create mode 100644 vendor/golang.org/x/net/ipv6/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv6/doc.go create mode 100644 vendor/golang.org/x/net/ipv6/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv6/example_test.go create mode 100644 vendor/golang.org/x/net/ipv6/gen.go create mode 100644 vendor/golang.org/x/net/ipv6/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv6/header.go create mode 100644 vendor/golang.org/x/net/ipv6/header_test.go create mode 100644 vendor/golang.org/x/net/ipv6/helper.go create mode 100644 vendor/golang.org/x/net/ipv6/iana.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_test.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/mocktransponder_test.go create mode 100644 vendor/golang.org/x/net/ipv6/multicast_test.go create mode 100644 vendor/golang.org/x/net/ipv6/multicastlistener_test.go create mode 100644 vendor/golang.org/x/net/ipv6/multicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv6/payload.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go create mode 100644 vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go create mode 100644 vendor/golang.org/x/net/ipv6/readwrite_test.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/unicast_test.go create mode 100644 vendor/golang.org/x/net/ipv6/unicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go create mode 100644 vendor/golang.org/x/net/lex/httplex/httplex_test.go create mode 100644 vendor/golang.org/x/net/lif/address.go create mode 100644 vendor/golang.org/x/net/lif/address_test.go create mode 100644 vendor/golang.org/x/net/lif/binary.go create mode 100644 vendor/golang.org/x/net/lif/defs_solaris.go create mode 100644 vendor/golang.org/x/net/lif/lif.go create mode 100644 vendor/golang.org/x/net/lif/link.go create mode 100644 vendor/golang.org/x/net/lif/link_test.go create mode 100644 vendor/golang.org/x/net/lif/sys.go create mode 100644 vendor/golang.org/x/net/lif/sys_solaris_amd64.s create mode 100644 vendor/golang.org/x/net/lif/syscall.go create mode 100644 vendor/golang.org/x/net/lif/zsys_solaris_amd64.go create mode 100644 vendor/golang.org/x/net/nettest/conntest.go create mode 100644 vendor/golang.org/x/net/nettest/conntest_go16.go create mode 100644 vendor/golang.org/x/net/nettest/conntest_go17.go create mode 100644 vendor/golang.org/x/net/nettest/conntest_test.go create mode 100644 vendor/golang.org/x/net/netutil/listen.go create mode 100644 vendor/golang.org/x/net/netutil/listen_test.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/per_host_test.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/proxy_test.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list_test.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table_test.go create mode 100644 vendor/golang.org/x/net/route/address.go create mode 100644 vendor/golang.org/x/net/route/address_darwin_test.go create mode 100644 vendor/golang.org/x/net/route/address_test.go create mode 100644 vendor/golang.org/x/net/route/binary.go create mode 100644 vendor/golang.org/x/net/route/defs_darwin.go create mode 100644 vendor/golang.org/x/net/route/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/route/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/route/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/route/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/route/interface.go create mode 100644 vendor/golang.org/x/net/route/interface_announce.go create mode 100644 vendor/golang.org/x/net/route/interface_classic.go create mode 100644 vendor/golang.org/x/net/route/interface_freebsd.go create mode 100644 vendor/golang.org/x/net/route/interface_multicast.go create mode 100644 vendor/golang.org/x/net/route/interface_openbsd.go create mode 100644 vendor/golang.org/x/net/route/message.go create mode 100644 vendor/golang.org/x/net/route/message_darwin_test.go create mode 100644 vendor/golang.org/x/net/route/message_freebsd_test.go create mode 100644 vendor/golang.org/x/net/route/message_test.go create mode 100644 vendor/golang.org/x/net/route/route.go create mode 100644 vendor/golang.org/x/net/route/route_classic.go create mode 100644 vendor/golang.org/x/net/route/route_openbsd.go create mode 100644 vendor/golang.org/x/net/route/route_test.go create mode 100644 vendor/golang.org/x/net/route/sys.go create mode 100644 vendor/golang.org/x/net/route/sys_darwin.go create mode 100644 vendor/golang.org/x/net/route/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/route/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/route/sys_netbsd.go create mode 100644 vendor/golang.org/x/net/route/sys_openbsd.go create mode 100644 vendor/golang.org/x/net/route/syscall.go create mode 100644 vendor/golang.org/x/net/route/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/route/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/route/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/route/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/histogram_test.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/net/trace/trace_go16.go create mode 100644 vendor/golang.org/x/net/trace/trace_go17.go create mode 100644 vendor/golang.org/x/net/trace/trace_test.go create mode 100644 vendor/golang.org/x/net/webdav/file.go create mode 100644 vendor/golang.org/x/net/webdav/file_go1.6.go create mode 100644 vendor/golang.org/x/net/webdav/file_go1.7.go create mode 100644 vendor/golang.org/x/net/webdav/file_test.go create mode 100644 vendor/golang.org/x/net/webdav/if.go create mode 100644 vendor/golang.org/x/net/webdav/if_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/README create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/atom_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/example_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/marshal.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/read.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/read_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/xml.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/xml_test.go create mode 100644 vendor/golang.org/x/net/webdav/litmus_test_server.go create mode 100644 vendor/golang.org/x/net/webdav/lock.go create mode 100644 vendor/golang.org/x/net/webdav/lock_test.go create mode 100644 vendor/golang.org/x/net/webdav/prop.go create mode 100644 vendor/golang.org/x/net/webdav/prop_test.go create mode 100644 vendor/golang.org/x/net/webdav/webdav.go create mode 100644 vendor/golang.org/x/net/webdav/webdav_test.go create mode 100644 vendor/golang.org/x/net/webdav/xml.go create mode 100644 vendor/golang.org/x/net/webdav/xml_test.go create mode 100644 vendor/golang.org/x/net/websocket/client.go create mode 100644 vendor/golang.org/x/net/websocket/dial.go create mode 100644 vendor/golang.org/x/net/websocket/dial_test.go create mode 100644 vendor/golang.org/x/net/websocket/exampledial_test.go create mode 100644 vendor/golang.org/x/net/websocket/examplehandler_test.go create mode 100644 vendor/golang.org/x/net/websocket/hybi.go create mode 100644 vendor/golang.org/x/net/websocket/hybi_test.go create mode 100644 vendor/golang.org/x/net/websocket/server.go create mode 100644 vendor/golang.org/x/net/websocket/websocket.go create mode 100644 vendor/golang.org/x/net/websocket/websocket_test.go create mode 100644 vendor/golang.org/x/net/xsrftoken/xsrf.go create mode 100644 vendor/golang.org/x/net/xsrftoken/xsrf_test.go create mode 100644 vendor/golang.org/x/sys/.gitattributes create mode 100644 vendor/golang.org/x/sys/.gitignore create mode 100644 vendor/golang.org/x/sys/AUTHORS create mode 100644 vendor/golang.org/x/sys/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sys/LICENSE create mode 100644 vendor/golang.org/x/sys/PATENTS create mode 100644 vendor/golang.org/x/sys/README.md create mode 100644 vendor/golang.org/x/sys/codereview.cfg create mode 100644 vendor/golang.org/x/sys/plan9/asm.s create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_386.s create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_arm.s create mode 100644 vendor/golang.org/x/sys/plan9/const_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/dir_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/env_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/errors_plan9.go create mode 100755 vendor/golang.org/x/sys/plan9/mkall.sh create mode 100755 vendor/golang.org/x/sys/plan9/mkerrors.sh create mode 100755 vendor/golang.org/x/sys/plan9/mksyscall.pl create mode 100755 vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh create mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/pwd_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/race.go create mode 100644 vendor/golang.org/x/sys/plan9/race0.go create mode 100644 vendor/golang.org/x/sys/plan9/str.go create mode 100644 vendor/golang.org/x/sys/plan9/syscall.go create mode 100644 vendor/golang.org/x/sys/plan9/syscall_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/syscall_test.go create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go create mode 100644 vendor/golang.org/x/sys/plan9/zsysnum_plan9.go create mode 100644 vendor/golang.org/x/sys/unix/.gitignore create mode 100644 vendor/golang.org/x/sys/unix/README.md create mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/constants.go create mode 100644 vendor/golang.org/x/sys/unix/creds_test.go create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin_test.go create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly_test.go create mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 vendor/golang.org/x/sys/unix/dev_linux_test.go create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/dev_solaris_test.go create mode 100644 vendor/golang.org/x/sys/unix/dirent.go create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/export_test.go create mode 100644 vendor/golang.org/x/sys/unix/flock.go create mode 100644 vendor/golang.org/x/sys/unix/flock_linux_32bit.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/linux/Dockerfile create mode 100644 vendor/golang.org/x/sys/unix/linux/mkall.go create mode 100755 vendor/golang.org/x/sys/unix/linux/mksysnum.pl create mode 100644 vendor/golang.org/x/sys/unix/linux/types.go create mode 100755 vendor/golang.org/x/sys/unix/mkall.sh create mode 100755 vendor/golang.org/x/sys/unix/mkerrors.sh create mode 100644 vendor/golang.org/x/sys/unix/mkpost.go create mode 100755 vendor/golang.org/x/sys/unix/mksyscall.pl create mode 100755 vendor/golang.org/x/sys/unix/mksyscall_solaris.pl create mode 100755 vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl create mode 100755 vendor/golang.org/x/sys/unix/mksysnum_darwin.pl create mode 100755 vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl create mode 100755 vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl create mode 100755 vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl create mode 100755 vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl create mode 100644 vendor/golang.org/x/sys/unix/mmap_unix_test.go create mode 100644 vendor/golang.org/x/sys/unix/openbsd_pledge.go create mode 100644 vendor/golang.org/x/sys/unix/openbsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go create mode 100644 vendor/golang.org/x/sys/unix/race.go create mode 100644 vendor/golang.org/x/sys/unix/race0.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 vendor/golang.org/x/sys/unix/str.go create mode 100644 vendor/golang.org/x/sys/unix/syscall.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_test.go create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 vendor/golang.org/x/sys/unix/timestruct_test.go create mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace386_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracearm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracemips_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracemipsle_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/registry/export_test.go create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/registry_test.go create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/svc/debug/log.go create mode 100644 vendor/golang.org/x/sys/windows/svc/debug/service.go create mode 100644 vendor/golang.org/x/sys/windows/svc/event.go create mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/install.go create mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/log.go create mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/log_test.go create mode 100644 vendor/golang.org/x/sys/windows/svc/example/beep.go create mode 100644 vendor/golang.org/x/sys/windows/svc/example/install.go create mode 100644 vendor/golang.org/x/sys/windows/svc/example/main.go create mode 100644 vendor/golang.org/x/sys/windows/svc/example/manage.go create mode 100644 vendor/golang.org/x/sys/windows/svc/example/service.go create mode 100644 vendor/golang.org/x/sys/windows/svc/go12.c create mode 100644 vendor/golang.org/x/sys/windows/svc/go12.go create mode 100644 vendor/golang.org/x/sys/windows/svc/go13.go create mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/config.go create mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/mgr.go create mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go create mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/service.go create mode 100644 vendor/golang.org/x/sys/windows/svc/security.go create mode 100644 vendor/golang.org/x/sys/windows/svc/service.go create mode 100644 vendor/golang.org/x/sys/windows/svc/svc_test.go create mode 100644 vendor/golang.org/x/sys/windows/svc/sys_386.s create mode 100644 vendor/golang.org/x/sys/windows/svc/sys_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_test.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows_test.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/decode_test.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/encode_test.go create mode 100644 vendor/gopkg.in/yaml.v2/example_embedded_test.go create mode 100644 vendor/gopkg.in/yaml.v2/go.mod create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/suite_test.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/.gitignore b/.gitignore index 554e0e9..ff4449d 100644 --- a/.gitignore +++ b/.gitignore @@ -10,9 +10,6 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out -# Vendored dependencies -/vendor/* - # Built artifacts /lstags /dist/* diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 0000000..b883f1f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 0000000..b8b569d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 0000000..5680010 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,22 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go new file mode 100644 index 0000000..0378401 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + CreationTime time.Time // creation time + Xattrs map[string]string + Winheaders map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxCreationTime = "LIBARCHIVE.creationtime" + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxWindows = "MSWINDOWS." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go new file mode 100644 index 0000000..5f0ce2f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go @@ -0,0 +1,80 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar_test + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "os" +) + +func Example() { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new tar archive. + tw := tar.NewWriter(buf) + + // Add some files to the archive. + var files = []struct { + Name, Body string + }{ + {"readme.txt", "This archive contains some text files."}, + {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, + {"todo.txt", "Get animal handling license."}, + } + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Mode: 0600, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + log.Fatalln(err) + } + if _, err := tw.Write([]byte(file.Body)); err != nil { + log.Fatalln(err) + } + } + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Fatalln(err) + } + + // Open the tar archive for reading. + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Contents of %s:\n", hdr.Name) + if _, err := io.Copy(os.Stdout, tr); err != nil { + log.Fatalln(err) + } + fmt.Println() + } + + // Output: + // Contents of readme.txt: + // This archive contains some text files. + // Contents of gopher.txt: + // Gopher names: + // George + // Geoffrey + // Gonzo + // Contents of todo.txt: + // Get animal handling license. +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go new file mode 100644 index 0000000..e210c61 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go @@ -0,0 +1,1002 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "math" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + hdrBuff [blockSize]byte // buffer to use in readHeader +} + +type parser struct { + err error // Last error seen +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a +// tar archive. +type sparseFileReader struct { + rfr numBytesReader // Reads the sparse-encoded file data + sp []sparseEntry // The sparse map for the file + pos int64 // Keeps track of file position + total int64 // Total size of the file +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// +// Sparse files are represented using a series of sparseEntrys. +// Despite the name, a sparseEntry represents an actual data fragment that +// references data found in the underlying archive stream. All regions not +// covered by a sparseEntry are logically filled with zeros. +// +// For example, if the underlying raw file contains the 10-byte data: +// var compactData = "abcdefgh" +// +// And the sparse map has the following entries: +// var sp = []sparseEntry{ +// {offset: 2, numBytes: 5} // Data fragment for [2..7] +// {offset: 18, numBytes: 3} // Data fragment for [18..21] +// } +// +// Then the content of the resulting sparse file with a "real" size of 25 is: +// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type sparseEntry struct { + offset int64 // Starting position of the fragment + numBytes int64 // Length of the fragment +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + + var hdr *Header + var extHdrs map[string]string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". +loop: + for { + tr.err = tr.skipUnread() + if tr.err != nil { + return nil, tr.err + } + + hdr = tr.readHeader() + if tr.err != nil { + return nil, tr.err + } + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader: + extHdrs, tr.err = parsePAX(tr) + if tr.err != nil { + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + var realname []byte + realname, tr.err = ioutil.ReadAll(tr) + if tr.err != nil { + return nil, tr.err + } + + // Convert GNU extensions to use PAX headers. + if extHdrs == nil { + extHdrs = make(map[string]string) + } + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + extHdrs[paxPath] = p.parseString(realname) + case TypeGNULongLink: + extHdrs[paxLinkpath] = p.parseString(realname) + } + if p.err != nil { + tr.err = p.err + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + default: + mergePAX(hdr, extHdrs) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil, tr.err + } + } + break loop // This is a file, so stop + } + } + return hdr, nil +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxCreationTime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.CreationTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } else if strings.HasPrefix(k, paxWindows) { + if hdr.Winheaders == nil { + hdr.Winheaders = make(map[string]string) + } + hdr.Winheaders[k[len(paxWindows):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.WriteString(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() + } + return headers, nil +} + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +// +// A PAX record is of the following form: +// "%d %s=%s\n" % (size, key, value) +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + return rec[:eq], rec[eq+1:], rem, nil +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +// parseNumeric parses the input as being encoded in either base-256 or octal. +// This function may return negative numbers. +// If parsing fails or an integer overflow occurs, err will be set. +func (p *parser) parseNumeric(b []byte) int64 { + // Check for base-256 (binary) format first. + // If the first bit is set, then all following bits constitute a two's + // complement encoded number in big-endian byte order. + if len(b) > 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any +// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is +// encountered in the data portion; it is okay to hit io.EOF in the padding. +// +// Note that this function still works properly even when sparse files are being +// used since numBytes returns the bytes remaining in the underlying io.Reader. +func (tr *Reader) skipUnread() error { + dataSkip := tr.numBytes() // Number of data bytes to skip + totalSkip := dataSkip + tr.pad // Total number of bytes to skip + tr.curr, tr.pad = nil, 0 + + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the tar stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, os.SEEK_CUR) + if err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR) + if err != nil { + tr.err = err + return tr.err + } + seekSkipped = pos2 - pos1 + } + } + + var copySkipped int64 // Number of bytes skipped via CopyN + copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) + if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { + tr.err = io.ErrUnexpectedEOF + } + return tr.err +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + var p parser + given := p.parseOctal(header[148:156]) + unsigned, signed := checksum(header) + return p.err == nil && (given == unsigned || given == signed) +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() *Header { + header := tr.hdrBuff[:] + copy(header, zeroBlock) + + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil // io.EOF is okay here + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil // io.EOF is okay here + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + var p parser + hdr := new(Header) + s := slicer(header) + + hdr.Name = p.parseString(s.next(100)) + hdr.Mode = p.parseNumeric(s.next(8)) + hdr.Uid = int(p.parseNumeric(s.next(8))) + hdr.Gid = int(p.parseNumeric(s.next(8))) + hdr.Size = p.parseNumeric(s.next(12)) + hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = p.parseString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case magic == "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = p.parseString(s.next(32)) + hdr.Gname = p.parseString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = p.parseNumeric(devmajor) + hdr.Devminor = p.parseNumeric(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = p.parseString(s.next(155)) + case "star": + prefix = p.parseString(s.next(131)) + hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if p.err != nil { + tr.err = p.err + return nil + } + + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + tr.err = ErrHeader + return nil + } + + // Set the current file reader. + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = p.parseNumeric(header[483:495]) + if p.err != nil { + tr.err = p.err + return nil + } + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + + // Current file is a GNU sparse file. Update the current file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil + } + } + + return hdr +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + var p parser + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, numBytes). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + var cntNewline int64 + var buf bytes.Buffer + var blk = make([]byte, blockSize) + + // feedTokens copies data in numBlock chunks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + var feedTokens = func(cnt int64) error { + for cntNewline < cnt { + if _, err := io.ReadFull(r, blk); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + buf.Write(blk) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + var nextToken = func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return tok[:len(tok)-1] // Cut off newline + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +// +// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.err != nil { + return 0, tr.err + } + if tr.curr == nil { + return 0, io.EOF + } + + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// newSparseFileReader creates a new sparseFileReader, but validates all of the +// sparse entries before doing so. +func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { + if total < 0 { + return nil, ErrHeader // Total size cannot be negative + } + + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + for i, s := range sp { + switch { + case s.offset < 0 || s.numBytes < 0: + return nil, ErrHeader // Negative values are never okay + case s.offset > math.MaxInt64-s.numBytes: + return nil, ErrHeader // Integer overflow with large length + case s.offset+s.numBytes > total: + return nil, ErrHeader // Region extends beyond the "real" size + case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: + return nil, ErrHeader // Regions can't overlap and must be in order + } + } + return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil +} + +// readHole reads a sparse hole ending at endOffset. +func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { + n64 := endOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + // Skip past all empty fragments. + for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { + sfr.sp = sfr.sp[1:] + } + + // If there are no more fragments, then it is possible that there + // is one last sparse hole. + if len(sfr.sp) == 0 { + // This behavior matches the BSD tar utility. + // However, GNU tar stops returning data even if sfr.total is unmet. + if sfr.pos < sfr.total { + return sfr.readHole(b, sfr.total), nil + } + return 0, io.EOF + } + + // In front of a data fragment, so read a hole. + if sfr.pos < sfr.sp[0].offset { + return sfr.readHole(b, sfr.sp[0].offset), nil + } + + // In a data fragment, so read from it. + // This math is overflow free since we verify that offset and numBytes can + // be safely added when creating the sparseFileReader. + endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment + bytesLeft := endPos - sfr.pos // Bytes left in fragment + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + if err == io.EOF { + if sfr.pos < endPos { + err = io.ErrUnexpectedEOF // There was supposed to be more data + } else if sfr.pos < sfr.total { + err = nil // There is still an implicit sparse hole at the end + } + } + + if sfr.pos == endPos { + sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + } + return n, err +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.numBytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go new file mode 100644 index 0000000..7b148b5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go @@ -0,0 +1,1125 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type untarTest struct { + file string // Test input file + headers []*Header // Expected output headers + chksums []string // MD5 checksum of files, leave as nil if not checked + err error // Expected error to occur +} + +var gnuTarTest = &untarTest{ + file: "testdata/gnu.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244428340, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244436044, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + }, + chksums: []string{ + "e38b27eaccb4391bdec553a7f3ae6b2f", + "c65bd2e50a56a2138bf1716f2fd56fe9", + }, +} + +var sparseTarTest = &untarTest{ + file: "testdata/sparse-formats.tar", + headers: []*Header{ + { + Name: "sparse-gnu", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392395740, 0), + Typeflag: 0x53, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392342187, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.1", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392340456, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-1.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392337404, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "end", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 4, + ModTime: time.Unix(1392398319, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + }, + chksums: []string{ + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "b0061974914468de549a2af8ced10316", + }, +} + +var untarTests = []*untarTest{ + gnuTarTest, + sparseTarTest, + { + file: "testdata/star.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + }, + }, + { + file: "testdata/v7.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + { + Name: "small2.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + }, + }, + { + file: "testdata/pax.tar", + headers: []*Header{ + { + Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + Mode: 0664, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 7, + ModTime: time.Unix(1350244992, 23960108), + ChangeTime: time.Unix(1350244992, 23960108), + AccessTime: time.Unix(1350244992, 23960108), + Typeflag: TypeReg, + }, + { + Name: "a/b", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 0, + ModTime: time.Unix(1350266320, 910238425), + ChangeTime: time.Unix(1350266320, 910238425), + AccessTime: time.Unix(1350266320, 910238425), + Typeflag: TypeSymlink, + Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + }, + }, + }, + { + file: "testdata/nil-uid.tar", // golang.org/issue/5290 + headers: []*Header{ + { + Name: "P1050238.JPG.log", + Mode: 0664, + Uid: 0, + Gid: 0, + Size: 14, + ModTime: time.Unix(1365454838, 0), + Typeflag: TypeReg, + Linkname: "", + Uname: "eyefi", + Gname: "eyefi", + Devmajor: 0, + Devminor: 0, + }, + }, + }, + { + file: "testdata/xattrs.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 5, + ModTime: time.Unix(1386065770, 448252320), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1389782956, 794414986), + Xattrs: map[string]string{ + "user.key": "value", + "user.key2": "value2", + // Interestingly, selinux encodes the terminating null inside the xattr + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + { + Name: "small2.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 11, + ModTime: time.Unix(1386065770, 449252304), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1386065770, 449252304), + Xattrs: map[string]string{ + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + }, + }, + { + // Matches the behavior of GNU, BSD, and STAR tar utilities. + file: "testdata/gnu-multi-hdrs.tar", + headers: []*Header{ + { + Name: "GNU2/GNU2/long-path-name", + Linkname: "GNU4/GNU4/long-linkpath-name", + ModTime: time.Unix(0, 0), + Typeflag: '2', + }, + }, + }, + { + // Matches the behavior of GNU and BSD tar utilities. + file: "testdata/pax-multi-hdrs.tar", + headers: []*Header{ + { + Name: "bar", + Linkname: "PAX4/PAX4/long-linkpath-name", + ModTime: time.Unix(0, 0), + Typeflag: '2', + }, + }, + }, + { + file: "testdata/neg-size.tar", + err: ErrHeader, + }, + { + file: "testdata/issue10968.tar", + err: ErrHeader, + }, + { + file: "testdata/issue11169.tar", + err: ErrHeader, + }, + { + file: "testdata/issue12435.tar", + err: ErrHeader, + }, +} + +func TestReader(t *testing.T) { + for i, v := range untarTests { + f, err := os.Open(v.file) + if err != nil { + t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err) + continue + } + defer f.Close() + + // Capture all headers and checksums. + var ( + tr = NewReader(f) + hdrs []*Header + chksums []string + rdbuf = make([]byte, 8) + ) + for { + var hdr *Header + hdr, err = tr.Next() + if err != nil { + if err == io.EOF { + err = nil // Expected error + } + break + } + hdrs = append(hdrs, hdr) + + if v.chksums == nil { + continue + } + h := md5.New() + _, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read + if err != nil { + break + } + chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil))) + } + + for j, hdr := range hdrs { + if j >= len(v.headers) { + t.Errorf("file %s, test %d, entry %d: unexpected header:\ngot %+v", + v.file, i, j, *hdr) + continue + } + if !reflect.DeepEqual(*hdr, *v.headers[j]) { + t.Errorf("file %s, test %d, entry %d: incorrect header:\ngot %+v\nwant %+v", + v.file, i, j, *hdr, *v.headers[j]) + } + } + if len(hdrs) != len(v.headers) { + t.Errorf("file %s, test %d: got %d headers, want %d headers", + v.file, i, len(hdrs), len(v.headers)) + } + + for j, sum := range chksums { + if j >= len(v.chksums) { + t.Errorf("file %s, test %d, entry %d: unexpected sum: got %s", + v.file, i, j, sum) + continue + } + if sum != v.chksums[j] { + t.Errorf("file %s, test %d, entry %d: incorrect checksum: got %s, want %s", + v.file, i, j, sum, v.chksums[j]) + } + } + + if err != v.err { + t.Errorf("file %s, test %d: unexpected error: got %v, want %v", + v.file, i, err, v.err) + } + f.Close() + } +} + +func TestPartialRead(t *testing.T) { + f, err := os.Open("testdata/gnu.tar") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + // Read the first four bytes; Next() should skip the last byte. + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get first file: %v", err) + } + buf := make([]byte, 4) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Kilt"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } + + // Second file + hdr, err = tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get second file: %v", err) + } + buf = make([]byte, 6) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Google"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } +} + +func TestParsePAXHeader(t *testing.T) { + paxTests := [][3]string{ + {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths + {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length + {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}} + for _, test := range paxTests { + key, expected, raw := test[0], test[1], test[2] + reader := bytes.NewReader([]byte(raw)) + headers, err := parsePAX(reader) + if err != nil { + t.Errorf("Couldn't parse correctly formatted headers: %v", err) + continue + } + if strings.EqualFold(headers[key], expected) { + t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected) + continue + } + trailer := make([]byte, 100) + n, err := reader.Read(trailer) + if err != io.EOF || n != 0 { + t.Error("Buffer wasn't consumed") + } + } + badHeaderTests := [][]byte{ + []byte("3 somelongkey=\n"), + []byte("50 tooshort=\n"), + } + for _, test := range badHeaderTests { + if _, err := parsePAX(bytes.NewReader(test)); err != ErrHeader { + t.Fatal("Unexpected success when parsing bad header") + } + } +} + +func TestParsePAXTime(t *testing.T) { + // Some valid PAX time values + timestamps := map[string]time.Time{ + "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case + "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value + "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value + "1350244992": time.Unix(1350244992, 0), // Low precision value + } + for input, expected := range timestamps { + ts, err := parsePAXTime(input) + if err != nil { + t.Fatal(err) + } + if !ts.Equal(expected) { + t.Fatalf("Time parsing failure %s %s", ts, expected) + } + } +} + +func TestMergePAX(t *testing.T) { + hdr := new(Header) + // Test a string, integer, and time based value. + headers := map[string]string{ + "path": "a/b/c", + "uid": "1000", + "mtime": "1350244992.023960108", + } + err := mergePAX(hdr, headers) + if err != nil { + t.Fatal(err) + } + want := &Header{ + Name: "a/b/c", + Uid: 1000, + ModTime: time.Unix(1350244992, 23960108), + } + if !reflect.DeepEqual(hdr, want) { + t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) + } +} + +func TestSparseFileReader(t *testing.T) { + var vectors = []struct { + realSize int64 // Real size of the output file + sparseMap []sparseEntry // Input sparse map + sparseData string // Input compact data + expected string // Expected output data + err error // Expected error outcome + }{{ + realSize: 8, + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + sparseData: "abcde", + expected: "ab\x00\x00\x00cde", + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + sparseData: "abcde", + expected: "ab\x00\x00\x00cde\x00\x00", + }, { + realSize: 8, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de", + }, { + realSize: 8, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 0}, + {offset: 6, numBytes: 0}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de", + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de\x00\x00", + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + {offset: 8, numBytes: 0}, + {offset: 8, numBytes: 0}, + {offset: 8, numBytes: 0}, + {offset: 8, numBytes: 0}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de\x00\x00", + }, { + realSize: 2, + sparseMap: []sparseEntry{}, + sparseData: "", + expected: "\x00\x00", + }, { + realSize: -2, + sparseMap: []sparseEntry{}, + err: ErrHeader, + }, { + realSize: -10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 5}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 35, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 5}, + }, + sparseData: "abcde", + err: io.ErrUnexpectedEOF, + }, { + realSize: 35, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: -5}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 35, + sparseMap: []sparseEntry{ + {offset: math.MaxInt64, numBytes: 3}, + {offset: 6, numBytes: -5}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 2, numBytes: 2}, + }, + sparseData: "abcde", + err: ErrHeader, + }} + + for i, v := range vectors { + r := bytes.NewReader([]byte(v.sparseData)) + rfr := ®FileReader{r: r, nb: int64(len(v.sparseData))} + + var sfr *sparseFileReader + var err error + var buf []byte + + sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize) + if err != nil { + goto fail + } + if sfr.numBytes() != int64(len(v.sparseData)) { + t.Errorf("test %d, numBytes() before reading: got %d, want %d", i, sfr.numBytes(), len(v.sparseData)) + } + buf, err = ioutil.ReadAll(sfr) + if err != nil { + goto fail + } + if string(buf) != v.expected { + t.Errorf("test %d, ReadAll(): got %q, want %q", i, string(buf), v.expected) + } + if sfr.numBytes() != 0 { + t.Errorf("test %d, numBytes() after reading: got %d, want %d", i, sfr.numBytes(), 0) + } + + fail: + if err != v.err { + t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) + } + } +} + +func TestReadGNUSparseMap0x1(t *testing.T) { + const ( + maxUint = ^uint(0) + maxInt = int(maxUint >> 1) + ) + var ( + big1 = fmt.Sprintf("%d", int64(maxInt)) + big2 = fmt.Sprintf("%d", (int64(maxInt)/2)+1) + big3 = fmt.Sprintf("%d", (int64(maxInt) / 3)) + ) + + var vectors = []struct { + extHdrs map[string]string // Input data + sparseMap []sparseEntry // Expected sparse entries to be outputted + err error // Expected errors that may be raised + }{{ + extHdrs: map[string]string{paxGNUSparseNumBlocks: "-4"}, + err: ErrHeader, + }, { + extHdrs: map[string]string{paxGNUSparseNumBlocks: "fee "}, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: big1, + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: big2, + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: big3, + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0.5,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5.5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,fewafewa.5,fewafw,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + sparseMap: []sparseEntry{{0, 5}, {10, 5}, {20, 5}, {30, 5}}, + }} + + for i, v := range vectors { + sp, err := readGNUSparseMap0x1(v.extHdrs) + if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) { + t.Errorf("test %d, readGNUSparseMap0x1(...): got %v, want %v", i, sp, v.sparseMap) + } + if err != v.err { + t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) + } + } +} + +func TestReadGNUSparseMap1x0(t *testing.T) { + var sp = []sparseEntry{{1, 2}, {3, 4}} + for i := 0; i < 98; i++ { + sp = append(sp, sparseEntry{54321, 12345}) + } + + var vectors = []struct { + input string // Input data + sparseMap []sparseEntry // Expected sparse entries to be outputted + cnt int // Expected number of bytes read + err error // Expected errors that may be raised + }{{ + input: "", + cnt: 0, + err: io.ErrUnexpectedEOF, + }, { + input: "ab", + cnt: 2, + err: io.ErrUnexpectedEOF, + }, { + input: strings.Repeat("\x00", 512), + cnt: 512, + err: io.ErrUnexpectedEOF, + }, { + input: strings.Repeat("\x00", 511) + "\n", + cnt: 512, + err: ErrHeader, + }, { + input: strings.Repeat("\n", 512), + cnt: 512, + err: ErrHeader, + }, { + input: "0\n" + strings.Repeat("\x00", 510) + strings.Repeat("a", 512), + sparseMap: []sparseEntry{}, + cnt: 512, + }, { + input: strings.Repeat("0", 512) + "0\n" + strings.Repeat("\x00", 510), + sparseMap: []sparseEntry{}, + cnt: 1024, + }, { + input: strings.Repeat("0", 1024) + "1\n2\n3\n" + strings.Repeat("\x00", 506), + sparseMap: []sparseEntry{{2, 3}}, + cnt: 1536, + }, { + input: strings.Repeat("0", 1024) + "1\n2\n\n" + strings.Repeat("\x00", 509), + cnt: 1536, + err: ErrHeader, + }, { + input: strings.Repeat("0", 1024) + "1\n2\n" + strings.Repeat("\x00", 508), + cnt: 1536, + err: io.ErrUnexpectedEOF, + }, { + input: "-1\n2\n\n" + strings.Repeat("\x00", 506), + cnt: 512, + err: ErrHeader, + }, { + input: "1\nk\n2\n" + strings.Repeat("\x00", 506), + cnt: 512, + err: ErrHeader, + }, { + input: "100\n1\n2\n3\n4\n" + strings.Repeat("54321\n0000000000000012345\n", 98) + strings.Repeat("\x00", 512), + cnt: 2560, + sparseMap: sp, + }} + + for i, v := range vectors { + r := strings.NewReader(v.input) + sp, err := readGNUSparseMap1x0(r) + if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) { + t.Errorf("test %d, readGNUSparseMap1x0(...): got %v, want %v", i, sp, v.sparseMap) + } + if numBytes := len(v.input) - r.Len(); numBytes != v.cnt { + t.Errorf("test %d, bytes read: got %v, want %v", i, numBytes, v.cnt) + } + if err != v.err { + t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) + } + } +} + +func TestUninitializedRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + _, err = tr.Read([]byte{}) + if err == nil || err != io.EOF { + t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF) + } + +} + +type reader struct{ io.Reader } +type readSeeker struct{ io.ReadSeeker } +type readBadSeeker struct{ io.ReadSeeker } + +func (rbs *readBadSeeker) Seek(int64, int) (int64, error) { return 0, fmt.Errorf("illegal seek") } + +// TestReadTruncation test the ending condition on various truncated files and +// that truncated files are still detected even if the underlying io.Reader +// satisfies io.Seeker. +func TestReadTruncation(t *testing.T) { + var ss []string + for _, p := range []string{ + "testdata/gnu.tar", + "testdata/ustar-file-reg.tar", + "testdata/pax-path-hdr.tar", + "testdata/sparse-formats.tar", + } { + buf, err := ioutil.ReadFile(p) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ss = append(ss, string(buf)) + } + + data1, data2, pax, sparse := ss[0], ss[1], ss[2], ss[3] + data2 += strings.Repeat("\x00", 10*512) + trash := strings.Repeat("garbage ", 64) // Exactly 512 bytes + + var vectors = []struct { + input string // Input stream + cnt int // Expected number of headers read + err error // Expected error outcome + }{ + {"", 0, io.EOF}, // Empty file is a "valid" tar file + {data1[:511], 0, io.ErrUnexpectedEOF}, + {data1[:512], 1, io.ErrUnexpectedEOF}, + {data1[:1024], 1, io.EOF}, + {data1[:1536], 2, io.ErrUnexpectedEOF}, + {data1[:2048], 2, io.EOF}, + {data1, 2, io.EOF}, + {data1[:2048] + data2[:1536], 3, io.EOF}, + {data2[:511], 0, io.ErrUnexpectedEOF}, + {data2[:512], 1, io.ErrUnexpectedEOF}, + {data2[:1195], 1, io.ErrUnexpectedEOF}, + {data2[:1196], 1, io.EOF}, // Exact end of data and start of padding + {data2[:1200], 1, io.EOF}, + {data2[:1535], 1, io.EOF}, + {data2[:1536], 1, io.EOF}, // Exact end of padding + {data2[:1536] + trash[:1], 1, io.ErrUnexpectedEOF}, + {data2[:1536] + trash[:511], 1, io.ErrUnexpectedEOF}, + {data2[:1536] + trash, 1, ErrHeader}, + {data2[:2048], 1, io.EOF}, // Exactly 1 empty block + {data2[:2048] + trash[:1], 1, io.ErrUnexpectedEOF}, + {data2[:2048] + trash[:511], 1, io.ErrUnexpectedEOF}, + {data2[:2048] + trash, 1, ErrHeader}, + {data2[:2560], 1, io.EOF}, // Exactly 2 empty blocks (normal end-of-stream) + {data2[:2560] + trash[:1], 1, io.EOF}, + {data2[:2560] + trash[:511], 1, io.EOF}, + {data2[:2560] + trash, 1, io.EOF}, + {data2[:3072], 1, io.EOF}, + {pax, 0, io.EOF}, // PAX header without data is a "valid" tar file + {pax + trash[:1], 0, io.ErrUnexpectedEOF}, + {pax + trash[:511], 0, io.ErrUnexpectedEOF}, + {sparse[:511], 0, io.ErrUnexpectedEOF}, + // TODO(dsnet): This should pass, but currently fails. + // {sparse[:512], 0, io.ErrUnexpectedEOF}, + {sparse[:3584], 1, io.EOF}, + {sparse[:9200], 1, io.EOF}, // Terminate in padding of sparse header + {sparse[:9216], 1, io.EOF}, + {sparse[:9728], 2, io.ErrUnexpectedEOF}, + {sparse[:10240], 2, io.EOF}, + {sparse[:11264], 2, io.ErrUnexpectedEOF}, + {sparse, 5, io.EOF}, + {sparse + trash, 5, io.EOF}, + } + + for i, v := range vectors { + for j := 0; j < 6; j++ { + var tr *Reader + var s1, s2 string + + switch j { + case 0: + tr = NewReader(&reader{strings.NewReader(v.input)}) + s1, s2 = "io.Reader", "auto" + case 1: + tr = NewReader(&reader{strings.NewReader(v.input)}) + s1, s2 = "io.Reader", "manual" + case 2: + tr = NewReader(&readSeeker{strings.NewReader(v.input)}) + s1, s2 = "io.ReadSeeker", "auto" + case 3: + tr = NewReader(&readSeeker{strings.NewReader(v.input)}) + s1, s2 = "io.ReadSeeker", "manual" + case 4: + tr = NewReader(&readBadSeeker{strings.NewReader(v.input)}) + s1, s2 = "ReadBadSeeker", "auto" + case 5: + tr = NewReader(&readBadSeeker{strings.NewReader(v.input)}) + s1, s2 = "ReadBadSeeker", "manual" + } + + var cnt int + var err error + for { + if _, err = tr.Next(); err != nil { + break + } + cnt++ + if s2 == "manual" { + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + break + } + } + } + if err != v.err { + t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %v, want %v", + i, s1, s2, err, v.err) + } + if cnt != v.cnt { + t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %d headers, want %d headers", + i, s1, s2, cnt, v.cnt) + } + } + } +} + +// TestReadHeaderOnly tests that Reader does not attempt to read special +// header-only files. +func TestReadHeaderOnly(t *testing.T) { + f, err := os.Open("testdata/hdr-only.tar") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer f.Close() + + var hdrs []*Header + tr := NewReader(f) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Next(): got %v, want %v", err, nil) + continue + } + hdrs = append(hdrs, hdr) + + // If a special flag, we should read nothing. + cnt, _ := io.ReadFull(tr, []byte{0}) + if cnt > 0 && hdr.Typeflag != TypeReg { + t.Errorf("ReadFull(...): got %d bytes, want 0 bytes", cnt) + } + } + + // File is crafted with 16 entries. The later 8 are identical to the first + // 8 except that the size is set. + if len(hdrs) != 16 { + t.Fatalf("len(hdrs): got %d, want %d", len(hdrs), 16) + } + for i := 0; i < 8; i++ { + var hdr1, hdr2 = hdrs[i+0], hdrs[i+8] + hdr1.Size, hdr2.Size = 0, 0 + if !reflect.DeepEqual(*hdr1, *hdr2) { + t.Errorf("incorrect header:\ngot %+v\nwant %+v", *hdr1, *hdr2) + } + } +} + +func TestParsePAXRecord(t *testing.T) { + var medName = strings.Repeat("CD", 50) + var longName = strings.Repeat("AB", 100) + + var vectors = []struct { + input string + residual string + outputKey string + outputVal string + ok bool + }{ + {"6 k=v\n\n", "\n", "k", "v", true}, + {"19 path=/etc/hosts\n", "", "path", "/etc/hosts", true}, + {"210 path=" + longName + "\nabc", "abc", "path", longName, true}, + {"110 path=" + medName + "\n", "", "path", medName, true}, + {"9 foo=ba\n", "", "foo", "ba", true}, + {"11 foo=bar\n\x00", "\x00", "foo", "bar", true}, + {"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true}, + {"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true}, + {"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true}, + {"17 \x00hello=\x00world\n", "", "\x00hello", "\x00world", true}, + {"1 k=1\n", "1 k=1\n", "", "", false}, + {"6 k~1\n", "6 k~1\n", "", "", false}, + {"6_k=1\n", "6_k=1\n", "", "", false}, + {"6 k=1 ", "6 k=1 ", "", "", false}, + {"632 k=1\n", "632 k=1\n", "", "", false}, + {"16 longkeyname=hahaha\n", "16 longkeyname=hahaha\n", "", "", false}, + {"3 somelongkey=\n", "3 somelongkey=\n", "", "", false}, + {"50 tooshort=\n", "50 tooshort=\n", "", "", false}, + } + + for _, v := range vectors { + key, val, res, err := parsePAXRecord(v.input) + ok := (err == nil) + if v.ok != ok { + if v.ok { + t.Errorf("parsePAXRecord(%q): got parsing failure, want success", v.input) + } else { + t.Errorf("parsePAXRecord(%q): got parsing success, want failure", v.input) + } + } + if ok && (key != v.outputKey || val != v.outputVal) { + t.Errorf("parsePAXRecord(%q): got (%q: %q), want (%q: %q)", + v.input, key, val, v.outputKey, v.outputVal) + } + if res != v.residual { + t.Errorf("parsePAXRecord(%q): got residual %q, want residual %q", + v.input, res, v.residual) + } + } +} + +func TestParseNumeric(t *testing.T) { + var vectors = []struct { + input string + output int64 + ok bool + }{ + // Test base-256 (binary) encoded values. + {"", 0, true}, + {"\x80", 0, true}, + {"\x80\x00", 0, true}, + {"\x80\x00\x00", 0, true}, + {"\xbf", (1 << 6) - 1, true}, + {"\xbf\xff", (1 << 14) - 1, true}, + {"\xbf\xff\xff", (1 << 22) - 1, true}, + {"\xff", -1, true}, + {"\xff\xff", -1, true}, + {"\xff\xff\xff", -1, true}, + {"\xc0", -1 * (1 << 6), true}, + {"\xc0\x00", -1 * (1 << 14), true}, + {"\xc0\x00\x00", -1 * (1 << 22), true}, + {"\x87\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true}, + {"\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true}, + {"\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true}, + {"\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true}, + {"\x80\x7f\xff\xff\xff\xff\xff\xff\xff", math.MaxInt64, true}, + {"\x80\x80\x00\x00\x00\x00\x00\x00\x00", 0, false}, + {"\xff\x80\x00\x00\x00\x00\x00\x00\x00", math.MinInt64, true}, + {"\xff\x7f\xff\xff\xff\xff\xff\xff\xff", 0, false}, + {"\xf5\xec\xd1\xc7\x7e\x5f\x26\x48\x81\x9f\x8f\x9b", 0, false}, + + // Test base-8 (octal) encoded values. + {"0000000\x00", 0, true}, + {" \x0000000\x00", 0, true}, + {" \x0000003\x00", 3, true}, + {"00000000227\x00", 0227, true}, + {"032033\x00 ", 032033, true}, + {"320330\x00 ", 0320330, true}, + {"0000660\x00 ", 0660, true}, + {"\x00 0000660\x00 ", 0660, true}, + {"0123456789abcdef", 0, false}, + {"0123456789\x00abcdef", 0, false}, + {"01234567\x0089abcdef", 342391, true}, + {"0123\x7e\x5f\x264123", 0, false}, + } + + for _, v := range vectors { + var p parser + num := p.parseNumeric([]byte(v.input)) + ok := (p.err == nil) + if v.ok != ok { + if v.ok { + t.Errorf("parseNumeric(%q): got parsing failure, want success", v.input) + } else { + t.Errorf("parseNumeric(%q): got parsing success, want failure", v.input) + } + } + if ok && num != v.output { + t.Errorf("parseNumeric(%q): got %d, want %d", v.input, num, v.output) + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go new file mode 100644 index 0000000..cf9cc79 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go new file mode 100644 index 0000000..6f17dbe --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go new file mode 100644 index 0000000..cb843db --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go new file mode 100644 index 0000000..d63c072 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go @@ -0,0 +1,325 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + "time" +) + +func TestFileInfoHeader(t *testing.T) { + fi, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "small.txt"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(5); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } + // FileInfoHeader should error when passing nil FileInfo + if _, err := FileInfoHeader(nil, ""); err == nil { + t.Fatalf("Expected error when passing nil to FileInfoHeader") + } +} + +func TestFileInfoHeaderDir(t *testing.T) { + fi, err := os.Stat("testdata") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "testdata/"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + // Ignoring c_ISGID for golang.org/issue/4867 + if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(0); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } +} + +func TestFileInfoHeaderSymlink(t *testing.T) { + h, err := FileInfoHeader(symlink{}, "some-target") + if err != nil { + t.Fatal(err) + } + if g, e := h.Name, "some-symlink"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Linkname, "some-target"; g != e { + t.Errorf("Linkname = %q; want %q", g, e) + } +} + +type symlink struct{} + +func (symlink) Name() string { return "some-symlink" } +func (symlink) Size() int64 { return 0 } +func (symlink) Mode() os.FileMode { return os.ModeSymlink } +func (symlink) ModTime() time.Time { return time.Time{} } +func (symlink) IsDir() bool { return false } +func (symlink) Sys() interface{} { return nil } + +func TestRoundTrip(t *testing.T) { + data := []byte("some file contents") + + var b bytes.Buffer + tw := NewWriter(&b) + hdr := &Header{ + Name: "file.txt", + Uid: 1 << 21, // too big for 8 octal digits + Size: int64(len(data)), + ModTime: time.Now(), + } + // tar only supports second precision. + hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond) + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("tw.WriteHeader: %v", err) + } + if _, err := tw.Write(data); err != nil { + t.Fatalf("tw.Write: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("tw.Close: %v", err) + } + + // Read it back. + tr := NewReader(&b) + rHdr, err := tr.Next() + if err != nil { + t.Fatalf("tr.Next: %v", err) + } + if !reflect.DeepEqual(rHdr, hdr) { + t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr) + } + rData, err := ioutil.ReadAll(tr) + if err != nil { + t.Fatalf("Read: %v", err) + } + if !bytes.Equal(rData, data) { + t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data) + } +} + +type headerRoundTripTest struct { + h *Header + fm os.FileMode +} + +func TestHeaderRoundTrip(t *testing.T) { + golden := []headerRoundTripTest{ + // regular file. + { + h: &Header{ + Name: "test.txt", + Mode: 0644 | c_ISREG, + Size: 12, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeReg, + }, + fm: 0644, + }, + // symbolic link. + { + h: &Header{ + Name: "link.txt", + Mode: 0777 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600852, 0), + Typeflag: TypeSymlink, + }, + fm: 0777 | os.ModeSymlink, + }, + // character device node. + { + h: &Header{ + Name: "dev/null", + Mode: 0666 | c_ISCHR, + Size: 0, + ModTime: time.Unix(1360578951, 0), + Typeflag: TypeChar, + }, + fm: 0666 | os.ModeDevice | os.ModeCharDevice, + }, + // block device node. + { + h: &Header{ + Name: "dev/sda", + Mode: 0660 | c_ISBLK, + Size: 0, + ModTime: time.Unix(1360578954, 0), + Typeflag: TypeBlock, + }, + fm: 0660 | os.ModeDevice, + }, + // directory. + { + h: &Header{ + Name: "dir/", + Mode: 0755 | c_ISDIR, + Size: 0, + ModTime: time.Unix(1360601116, 0), + Typeflag: TypeDir, + }, + fm: 0755 | os.ModeDir, + }, + // fifo node. + { + h: &Header{ + Name: "dev/initctl", + Mode: 0600 | c_ISFIFO, + Size: 0, + ModTime: time.Unix(1360578949, 0), + Typeflag: TypeFifo, + }, + fm: 0600 | os.ModeNamedPipe, + }, + // setuid. + { + h: &Header{ + Name: "bin/su", + Mode: 0755 | c_ISREG | c_ISUID, + Size: 23232, + ModTime: time.Unix(1355405093, 0), + Typeflag: TypeReg, + }, + fm: 0755 | os.ModeSetuid, + }, + // setguid. + { + h: &Header{ + Name: "group.txt", + Mode: 0750 | c_ISREG | c_ISGID, + Size: 0, + ModTime: time.Unix(1360602346, 0), + Typeflag: TypeReg, + }, + fm: 0750 | os.ModeSetgid, + }, + // sticky. + { + h: &Header{ + Name: "sticky.txt", + Mode: 0600 | c_ISREG | c_ISVTX, + Size: 7, + ModTime: time.Unix(1360602540, 0), + Typeflag: TypeReg, + }, + fm: 0600 | os.ModeSticky, + }, + // hard link. + { + h: &Header{ + Name: "hard.txt", + Mode: 0644 | c_ISREG, + Size: 0, + Linkname: "file.txt", + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeLink, + }, + fm: 0644, + }, + // More information. + { + h: &Header{ + Name: "info.txt", + Mode: 0600 | c_ISREG, + Size: 0, + Uid: 1000, + Gid: 1000, + ModTime: time.Unix(1360602540, 0), + Uname: "slartibartfast", + Gname: "users", + Typeflag: TypeReg, + }, + fm: 0600, + }, + } + + for i, g := range golden { + fi := g.h.FileInfo() + h2, err := FileInfoHeader(fi, "") + if err != nil { + t.Error(err) + continue + } + if strings.Contains(fi.Name(), "/") { + t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name()) + } + name := path.Base(g.h.Name) + if fi.IsDir() { + name += "/" + } + if got, want := h2.Name, name; got != want { + t.Errorf("i=%d: Name: got %v, want %v", i, got, want) + } + if got, want := h2.Size, g.h.Size; got != want { + t.Errorf("i=%d: Size: got %v, want %v", i, got, want) + } + if got, want := h2.Uid, g.h.Uid; got != want { + t.Errorf("i=%d: Uid: got %d, want %d", i, got, want) + } + if got, want := h2.Gid, g.h.Gid; got != want { + t.Errorf("i=%d: Gid: got %d, want %d", i, got, want) + } + if got, want := h2.Uname, g.h.Uname; got != want { + t.Errorf("i=%d: Uname: got %q, want %q", i, got, want) + } + if got, want := h2.Gname, g.h.Gname; got != want { + t.Errorf("i=%d: Gname: got %q, want %q", i, got, want) + } + if got, want := h2.Linkname, g.h.Linkname; got != want { + t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want) + } + if got, want := h2.Typeflag, g.h.Typeflag; got != want { + t.Logf("%#v %#v", g.h, fi.Sys()) + t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want) + } + if got, want := h2.Mode, g.h.Mode; got != want { + t.Errorf("i=%d: Mode: got %o, want %o", i, got, want) + } + if got, want := fi.Mode(), g.fm; got != want { + t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want) + } + if got, want := h2.AccessTime, g.h.AccessTime; got != want { + t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want) + } + if got, want := h2.ChangeTime, g.h.ChangeTime; got != want { + t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want) + } + if got, want := h2.ModTime, g.h.ModTime; got != want { + t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want) + } + if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h { + t.Errorf("i=%d: Sys didn't return original *Header", i) + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu-multi-hdrs.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu-multi-hdrs.tar new file mode 100644 index 0000000000000000000000000000000000000000..8bcad55d06e8f9fde3641d2a8df370503a582ce6 GIT binary patch literal 4608 zcmdPX*VA|K$%Afaj^QI J<`xZ33jpEZfW-g+ literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu.tar new file mode 100644 index 0000000000000000000000000000000000000000..fc899dc8dc2ad9952f5c5f67a0c76ca2d87249e9 GIT binary patch literal 3072 zcmeHH%L>9U5Ztq0(Jv@FdDKtv;8zq|ijXv5BIw^6DOh^2UK)_HbK1>@VQ0c5`qsHR zJrb1zXEcV16&lMRW}rdtKd=NSXg->JkvP|Esp4`g&CK_h+FMmo7oR?iU7RP&svn2t z!9Ke4)upeR_aRYKtT+(g`B!B>fS>t?p7IZ9V9LKWlK+)w+iY|SVQ_tY3I4Ddrx1w) M;($0H4*b6ZFWOBnBLDyZ literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hardlink.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hardlink.tar new file mode 100644 index 0000000000000000000000000000000000000000..9cd1a26572e44150ded8a628fefb28fa089645d1 GIT binary patch literal 2560 zcmYex%t_TNsVHHfAus>}GZPaAAZ2K7Y5<}Q3?Y0F6C}!DXk=n;YGz~#VjCD58=09i zC>YStO>m=2i%SxVfKDn)N-QZUh6`gbN{dsA@JNF_1@sD>#xP)T3IyjQ7L{Zs0g1H4 z;u5aG>Bv!6(JTZq5{ps>JpTi;4Ql>3F*i14P%uoRL*X>S^FPfJ)~LawAut*OgFXZR DcLg^L literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hdr-only.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hdr-only.tar new file mode 100644 index 0000000000000000000000000000000000000000..f25034083de6e0176e429f939875def6eb78cc73 GIT binary patch literal 10240 zcmeI2ZE}J@42Ji2Pq95gv)>o#1+ajk2rWokd-`UnK%I_CXNW`V?jLp5$;Lb+o4jM3 zRS%4K0WN2N31Pu$!vOG|0DSEi6VfBdZFVz=+!#Geo7WlKr zRl;AI>}kUnRryx%w0!65X8WAPynIb6zQg@I`q=ZhT;AVZ14uaInh{tzn(ux&A2{mb)wBl`42&RQXR%ispcL7W$7F z`oDwzV^q+8Xow$MornI@^B?pdod1LVbIgk3(>1Qxw*Nb){{{Vr0_`Z9LH`*QrhogT zdFVfV{nxJ3f3W@s{fGXsn}`0>@$ct<6aa(%Lr=2_XU@0=FB1PuCY>1poj5 literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue10968.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue10968.tar new file mode 100644 index 0000000000000000000000000000000000000000..1cc837bcff14cd822a26e43034955c82e852ab29 GIT binary patch literal 512 zcmbVI!41MN47Ah*kg@;^fX)>lI!AWsgI^V-_Q4}k$6}2x&>iv*cG6Oc`at9n#lG|1 zIi>(iak!RTol#boyD`0c^v(cHJJuvHh-e39;{t(!nc@gWsV;O@FkUc{-h`pC817Ix zgh|QIatu;A!G^JZ7UC1V_vGb4bURuTWAy6SS-Fx(D=wcI#QP1Y#wzX?HAf0_+~lp> yN?iGbw2JFgJjd0vnp9WIo>K3V$tfee6;KE|`1A3J$tp?9B&Y7`+Gwrtzls-lP-;g2 literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue11169.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue11169.tar new file mode 100644 index 0000000000000000000000000000000000000000..4d71fa15260609ecee0c8c751cfebf49be8763ac GIT binary patch literal 602 zcmdPX4@j)=NKH&hEh^SCG%+zV)=x}KWS}ZA00J`;69y0s1n9JZp|KHzp^>Svp`nSX svAH3G0gzz?R8~P%SKu(Lw74X(2$wLeQpM#*<5EsKtJ9uH<6)~%E3&=F&Kzfi~{+q#!cOa=AzbjSO$1IxR;aN z*g3HiZShf(vs!BvbKPiG1!!GY>l3F=j$kqh!InX?lk@{OAsyh#2%!qzcGBC1;;FXq z6(OUFiyZLnA-?TXGNUXTUA0xix-DD8En}aw{C@tp7XnB4F0(23$NLG-y>mPO&s&?~ literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/nil-uid.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/nil-uid.tar new file mode 100644 index 0000000000000000000000000000000000000000..cc9cfaa33cc5de0a28b4183c1705d801f788c96a GIT binary patch literal 1024 zcmWGAG%z(VGPcn33UJrU$xmmX0WbgpGcywmlR@HOU}(l*Xk=(?U}j`)Zf3?{U^3;ld-=Ii;m6ILFVT3VXy>=Udb`;P z_AF}Ko(kwITGLdEM1G)5o<9H!jr=439(@V?ONRXCAu*5YPw-#9x&N1V|EJgyTG0B^ zUZ)s9!5N^&Ghph+I3UGBWYR$X|2zH<_}9R{M*cI=m|k}8li%1Drp7@Vny>jkUkM=z Sl}Br1`$oQ%|3`N;j=%%-SrC5! literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-path-hdr.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-path-hdr.tar new file mode 100644 index 0000000000000000000000000000000000000000..ab8fc325b26159f4fed6bfb59fe5f616d35fec74 GIT binary patch literal 1024 zcmXR&EXmL>$=5GRO-#v6r43~O0Sq{30|OI7m>ft6gMqP;fsrYLLIndIKxuJFViC}K xO07co9Hr*bNx!kNLIE%d*akR880v$Gocz3WU67b=USe)47oFTOYR$le004hRKE?n5 literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax.tar new file mode 100644 index 0000000000000000000000000000000000000000..9bc24b6587d726c7fca4e533d9c61a3801a34688 GIT binary patch literal 10240 zcmeH~&u-H|5XOD(Q}_vz`9HIV-Z}CL1|qeBRwxNlAD?kbq{vMJQj94uds*3I@2-Ed zpXb|Q{eF0Qw;4Wdw!4)@_!@~t&7&b8A|a!oqM>78BOoLqCLtvwr=Z5b$i&RT%Er#Y zO+ZjcSVUAHn~8MUp(~vBV+cg7LjpEKCCE6D7E@EwTcMv_>l+&bbg`j1Cv0A776ym5t@+ zSt9MDBFtXbKY&m4pMN0f`l~hhD>#q(-`x$5n+q@eEPmAevA;0XTM8XMYkTvSmQ-t5 zkihVw{(qQ#_JjT})&KMa&-FhG0c8or{CPvw|Jf69WL!B2Wa1KoKYcMW6^2fg(@@ia-%40!5$*6oDd81d2cr_`3;w E2V3|JA^-pY literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small.txt b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small.txt new file mode 100644 index 0000000..b249bfc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small.txt @@ -0,0 +1 @@ +Kilts \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small2.txt b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small2.txt new file mode 100644 index 0000000..394ee3e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small2.txt @@ -0,0 +1 @@ +Google.com diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/sparse-formats.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/sparse-formats.tar new file mode 100644 index 0000000000000000000000000000000000000000..8bd4e74d50f9c8961f80a887ab7d6449e032048b GIT binary patch literal 17920 zcmeHO!BXQ!5M{6a3g^xmb&sU64qQV{sZ?#{1DvdPiv%!*Aw}}_dEEjdi|Nre#)hpG zE{}(KnjXC;wbY|&t*;k1>*dFY-EbwpT`b+-m>u zXfjaoe5W44g1VMFbuvaLV|3acePf?HHj7T34f|}^XTyHz*zDR5hW%jJ$GN!K=dPX7 zuwNSXOT&I?*sl!xm0`a!>{o{UdfWbohf`t0wKm47jd5yYoVY#C#(p&HN5g(h+o$d^ z>C~x6+ovLJp9;gi;Rj^+0U3Tkh98jO2W0pG8Gb;9ACTb()boS>@h8I{`Aj1#H@B=dZfDAvtjWMmW;RoC~7Py0M z`m*5%-1CF}@n^#y*zgB7{DBRBV8b8S@CP>hfen8^_^{DnOAo^z5MmhHr;h_0e!zww zu;B-6_yHS!z=j{N;RkH^0r&ji+3`30fen9P!ynl22R8fx0blw!^!(v@`{T)$#0AMUzUr{%bWEKK}dPBZfAtotM&Q)$6}V4GkAAL?ydIxj}Q`3 zJOATY>UD~$8khO$y?3COY_Ib_T!Mz?cSHC~#(oEVI84ue{e9LR^x69SzvU?x#e`$G z`ReZSkBilxf3HuQYO>v9_2tWYd3#C|uKGRxyy zk#CN0vO|t>vO|t?vO|t@vV)g2dr7mGGDo)W_L8o>q-!tf+DkfmNk=c~=p`M!q@$Pg+)H}y zB|Z0&o_k5py`&p2>BdW1A|f+1N!@lEFX<*ndTZ#%;H1d0PWQ;sPWQ<1PWQ+WPxo*$ zCpU9)GbcB5ax*74^K5XIR5u%)rF*!UXXCT<7;fg-2rW5AHbhJJa5K*aY3VWC%(G!y za*S-8mhRzZo{iMfW4M`TW3}WM*$8{;Fcc4%{&{rCCA9dZs{Iw=Go{iJw}H4J9rlMBkscMKka?4V*dGW zC;x{dmiMq8gvD(vpG{xk(ev}2>9_pg&wuy1`g67#*MIt_+k5+eaQ%mN-{S%QM+!~( zxc)<2*YN+U4#l|sv%B)c7PePszGeL<)ZO)vtHtH=w09GsNfox9d|WQBPwAMB1HKi$ z5#I)1l17qNl4g>25`gi0%mT0gEC34-1PB5I0fGQQfKq@`fKq@`fKq@;fJ%T$fJ%T$ zfLefBfLefBfLeekKolSf5Cw<=%mtVWFc)Ahz+8YvfJT5ufJT5u0A#CaDG)Nzv=opE zMO*%@0IdS81gZg+MP*A>0a;*L*S;zQ^1P%)r9keM))iGXNaa8-mb9xN$g|SAj;op= zlS*1t6=X?iT~QSVc~H`#(jdo4>x!y6$YPQf)rV9dQiVt*BGrggBvO?~WSR`0jpG)F zR$z95<=;=bg;QIfR|BZ{k=7%FW59w-S{C9wpVT}I{Ao4pNVj%vb z{pbH+{)aiAzW>2BZdt7HACK|hLCzZHZZvnf_-l0|IXl~}=T~SgCPR@QPL^Kc(9Lpj zv56@U!e<=Br@-+2fA>qk!2KVorji&Q8CK+X>e0g#)6 z@dUuK3}6apYu09*vX znm!5vu=b8Z0L;v^6bQklmI7jCCS}XN6`)n1l|VJX%uKdX6)-c?y7pBeFf)@Dl>##} ztt+Z(U}h#Qst0CfT31vh!CNoVqM~4CrgcSC7r2GAs4|$DXJmbGf$=ejIaDU{qjK;EfgdABYgg9smFU literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/star.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/star.tar new file mode 100644 index 0000000000000000000000000000000000000000..59e2d4e604611eeac3e2a0f3d6f71d2623c50449 GIT binary patch literal 3072 zcmeHHT?)e>4DRzz;R#BjQ;)ERouag*479>@u-$$NbG3!-WyoMlUh?xvNIv}HZD&jy zuA!-C5KZlY0Y@bP833Zfm_JQ2M2yBQ2dTK6K{>VDLBV=D{z>IvVF` zUD#xgUGh?F1AikeIW6NnOIrMRGU4UU`62nAWxyx>^STEhN#m{lQEc_EhA!%P7hKg3OW)iVh<3F#{;S{=CgM+^V}b=Pyosep=F7x+*OI&?Q6F7LxR?fb`B^0 zttmJo<+m$~$Msw9KQ_wfqfW1sDJ#zsWq25)8&TsKn~+7*oF6~$0+nD?L2C$TBQM>$ zcQqo7Eo8w%PL1H9D9KY4`f7Ku6*oaxOv|7S1ZpTT=%sbGS#L2%*Uxm5P}U`mG$%9I zIRuF>849Ugh}k{mmgLJGtca9XnA{r2KBJ`fRXOnPqEZjWtz4z5Mq_`uZWX)VuFVko z#$DNd>@Saj1w+|ef@dPC=g!5Kb4c+mQ$bcu#R_I=;>D)V&agmv_`vpStP-sQh!z(| z5{9v2$DGKS|DrLqZ8y7?ox@|a-R^30zMeK388O@+r+cK=9NdZ)ow#}n{kwf`tD4=t zR9Oz?v}2QNv&rDR7u$%4%_>%5(k#={r!rZ(5WA5+U_Rz+w6{_kV7C!KK2e+5VuS-5 zWLRhpI#>Iq%|mhyZxDfRHCi7gC+R&sD4|k;~pXMNUD{^NPC5(KX~`*cXkwAurz(+XVNg z1P`yNv%FZy)9`uTGCWUJ^@>e2&T3O`W@re{ZN S9gn%XW0CXwKYp`+7X1&(X!Xnh literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar.tar new file mode 100644 index 0000000000000000000000000000000000000000..29679d9a305fc0293f31212541335af824ab32c7 GIT binary patch literal 2048 zcmYex%t_TNsVHHfAus>}GZPaA5N&Q|3Z@N=AbgM*P?o{a$k4#V#K6eR)QrKv#MIE( zgh9c8hHiozU0Pg{SOj!ZaYkZZDqIwk0aTWjhA9jefq29K;yD8YhMfGo^t{B}RQ&;E gz@3MSk&&8{lh1`qc2s;c1V%$(Gz3ONV7P_=0FTf|dgec!sXT^AK{$jKc@-kWdUe(&uh-&e0L+xARj zwLzm>LI~3|1sT#R&XkBIzWbfCPrYEK7fr^Q@7vXO;&pw$QCTT3-?&yO+jq(<{6qS`FS_vP zIBhMBjnmsnS~{|C9LMN8#r!W{zj5l&zcE?^U_t*||1zJ{zqInH{-Zy}2$O|c?WSFx zxn8RtM3-UpAJiW`Z@Zar#$ojz)NjtWBfnULUzD=jj5!>iG>O2k{o(=ZAg=$-urC7q zVm{n!{kK`S@p|Vk`q%aFg#nw)bMB-40yAj*%7=F37m@ziFINBH7pTSD@Cfil^^9T6 zxL-iu+Aq)#ev#CF(l2&S@A^eC<`;^e4{ZQ#s9$Y4r}$iP3;;e3V;a&MNN*s$f%FFc H(;N5+1FUK9 literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big-long.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big-long.tar new file mode 100644 index 0000000000000000000000000000000000000000..5960ee824784ffeacb976a9c648be41b0281508b GIT binary patch literal 4096 zcmeIuJqp7x3tu-|!r}ytVByrmfae ipO37m$1T~NWs?FFpa2CZKmiI+fC3bt00k&;vcMnFf)<_t literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer.tar new file mode 100644 index 0000000000000000000000000000000000000000..e6d816ad0775d56d09242d6f5d1dbe56af310a32 GIT binary patch literal 3584 zcmeHIK@P$o5ajGDd_l9j6nKIMUt!cVjT92WM1L@81h#LhDgML6Bon)c?rO_kPgyt^3D0fH9$GJM`O*&4VCw= zv#H)UKC-TtzNwGuV$*%C{bm zsdIMLR{C5VZL^vBE!S4cfUeCYt@>GOiAt%sq7tp|_iN{x5cDreh9ME=K+wOCQm`$x j!znSk-v6Dy)}|V_!f*AilYjI7l|Jj-R%ReG@B;%+QQ}au literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/xattrs.tar b/vendor/github.com/Microsoft/go-winio/archive/tar/testdata/xattrs.tar new file mode 100644 index 0000000000000000000000000000000000000000..9701950edd1f0dc82858b7117136b37391be0b08 GIT binary patch literal 5120 zcmeHJv2KGf5M|~o_yWg1+khiw>d;i}P^nX=$R$ooYd`|ilD{uBAv6g^kxC>6-(uu< zHg^v_-l5r}td>fyRbC(vfcdOQq}Iq(#u+Ja9X?}Dv(|CCVoJF~09ZgF;2a!G7^%~| zYNYoMUQ-rE=5KzzBJ^EKyr-Mx-NQ4gq%k=v3zee}wOxElT`HH-ei(K*xV|_} zC{$GDvDuoW?o>&odUrVuVHkt_w?IH zW3PV_@V!Jxt@A^i>Yrj(>;K=H?5X8!tJS~MYVd#a^`?|QJKb&Uduf~MfN4M7$J!Lr zF40zZMF!9x{tqJ#0F5+;{2!=)=Knre|G(mAKU`hAc#r>!#{V(9d;sW1hxVv7@B_zF ze)#eKF~#1~>@WTI`#+&4`lkel_5U6!N8h^5vRAE8lqGgr9-Ul!p=H1_U>TS&1K)l2 B)fNB% literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go new file mode 100644 index 0000000..30d7e60 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go @@ -0,0 +1,444 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header + hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header + paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header +} + +type formatter struct { + err error // Last error seen +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (f *formatter) formatOctal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + f.formatString(b, s) +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + var binBits = uint(n-1) * 8 + return n >= 9 || (x >= -1<= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + var f formatter + var header []byte + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header = tw.hdrBuff[:] + if !allowPax { + header = tw.paxHdrBuff[:] + } + copy(header, zeroBlock) + s := slicer(header) + + // Wrappers around formatter that automatically sets paxHeaders if the + // argument extends beyond the capacity of the input byte slice. + var formatString = func(b []byte, s string, paxKeyword string) { + needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + f.formatString(b, s) + } + var formatNumeric = func(b []byte, x int64, paxKeyword string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + f.formatOctal(b, x) + return + } + + // If it is too long for octal, and PAX is preferred, use a PAX header. + if paxKeyword != paxNone && tw.preferPax { + f.formatOctal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + tw.usedBinary = true + f.formatNumeric(b, x) + } + var formatTime = func(b []byte, t time.Time, paxKeyword string) { + var unixTime int64 + if !t.Before(minTime) && !t.After(maxTime) { + unixTime = t.Unix() + } + formatNumeric(b, unixTime, paxNone) + + // Write a PAX header if the time didn't fit precisely. + if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) { + paxHeaders[paxKeyword] = formatPAXTime(t) + } + } + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + formatString(pathHeaderBytes, hdr.Name, paxPath) + + f.formatOctal(s.next(8), hdr.Mode) // 100:108 + formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116 + formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124 + formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136 + formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148 + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + formatString(s.next(100), hdr.Linkname, paxLinkpath) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + formatString(s.next(32), hdr.Uname, paxUname) // 265:297 + formatString(s.next(32), hdr.Gname, paxGname) // 297:329 + formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337 + formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + prefix, suffix, ok := splitUSTARPath(hdr.Name) + if ok { + // Since we can encode in USTAR format, disable PAX header. + delete(paxHeaders, paxPath) + + // Update the path fields + formatString(pathHeaderBytes, suffix, paxNone) + formatString(prefixHeaderBytes, prefix, paxNone) + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + f.formatOctal(header[148:155], chksum) // Never fails + header[155] = ' ' + + // Check if there were any formatting errors. + if f.err != nil { + tw.err = f.err + return tw.err + } + + if allowPax { + if !hdr.AccessTime.IsZero() { + paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime) + } + if !hdr.ChangeTime.IsZero() { + paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime) + } + if !hdr.CreationTime.IsZero() { + paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime) + } + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + for k, v := range hdr.Winheaders { + paxHeaders[paxWindows+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +func formatPAXTime(t time.Time) string { + sec := t.Unix() + usec := t.Nanosecond() + s := strconv.FormatInt(sec, 10) + if usec != 0 { + s = fmt.Sprintf("%s.%09d", s, usec) + } + return s +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= fileNameSize || !isASCII(name) { + return "", "", false + } else if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. However, this results in differing outputs + // for identical inputs. As such, the constant 0 is now used instead. + // golang.org/issue/12358 + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, "PaxHeaders.0", file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + // Keys are sorted before writing to body to allow deterministic output. + var keys []string + for k := range paxHeaders { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) string { + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s=%s\n", size, k, v) + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = fmt.Sprintf("%d %s=%s\n", size, k, v) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go new file mode 100644 index 0000000..a5c9382 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go @@ -0,0 +1,739 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "sort" + "strings" + "testing" + "testing/iotest" + "time" +) + +type writerTestEntry struct { + header *Header + contents string +} + +type writerTest struct { + file string // filename of expected output + entries []*writerTestEntry +} + +var writerTests = []*writerTest{ + // The writer test file was produced with this command: + // tar (GNU tar) 1.26 + // ln -s small.txt link.txt + // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt + { + file: "testdata/writer.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1246508266, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Kilts", + }, + { + header: &Header{ + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1245217492, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Google.com\n", + }, + { + header: &Header{ + Name: "link.txt", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Size: 0, + ModTime: time.Unix(1314603082, 0), + Typeflag: '2', + Linkname: "small.txt", + Uname: "strings", + Gname: "strings", + }, + // no contents + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt + // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar + { + file: "testdata/writer-big.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "tmp/16gig.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 16 << 30, + ModTime: time.Unix(1254699560, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt + // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar + { + file: "testdata/writer-big-long.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "16gig.txt", + Mode: 0644, + Uid: 1000, + Gid: 1000, + Size: 16 << 30, + ModTime: time.Unix(1399583047, 0), + Typeflag: '0', + Uname: "guillaume", + Gname: "guillaume", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // This file was produced using gnu tar 1.17 + // gnutar -b 4 --format=ustar (longname/)*15 + file.txt + { + file: "testdata/ustar.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "file.txt", + Mode: 0644, + Uid: 0765, + Gid: 024, + Size: 06, + ModTime: time.Unix(1360135598, 0), + Typeflag: '0', + Uname: "shane", + Gname: "staff", + }, + contents: "hello\n", + }, + }, + }, + // This file was produced using gnu tar 1.26 + // echo "Slartibartfast" > file.txt + // ln file.txt hard.txt + // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt + { + file: "testdata/hardlink.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "file.txt", + Mode: 0644, + Uid: 1000, + Gid: 100, + Size: 15, + ModTime: time.Unix(1425484303, 0), + Typeflag: '0', + Uname: "vbatts", + Gname: "users", + }, + contents: "Slartibartfast\n", + }, + { + header: &Header{ + Name: "hard.txt", + Mode: 0644, + Uid: 1000, + Gid: 100, + Size: 0, + ModTime: time.Unix(1425484303, 0), + Typeflag: '1', + Linkname: "file.txt", + Uname: "vbatts", + Gname: "users", + }, + // no contents + }, + }, + }, +} + +// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. +func bytestr(offset int, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("%04x ", offset) + for _, ch := range b { + switch { + case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z': + s += fmt.Sprintf(" %c", ch) + default: + s += fmt.Sprintf(" %02x", ch) + } + } + return s +} + +// Render a pseudo-diff between two blocks of bytes. +func bytediff(a []byte, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b)) + for offset := 0; len(a)+len(b) > 0; offset += rowLen { + na, nb := rowLen, rowLen + if na > len(a) { + na = len(a) + } + if nb > len(b) { + nb = len(b) + } + sa := bytestr(offset, a[0:na]) + sb := bytestr(offset, b[0:nb]) + if sa != sb { + s += fmt.Sprintf("-%v\n+%v\n", sa, sb) + } + a = a[na:] + b = b[nb:] + } + return s +} + +func TestWriter(t *testing.T) { +testLoop: + for i, test := range writerTests { + expected, err := ioutil.ReadFile(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + + buf := new(bytes.Buffer) + tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB + big := false + for j, entry := range test.entries { + big = big || entry.header.Size > 1<<10 + if err := tw.WriteHeader(entry.header); err != nil { + t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) + continue testLoop + } + if _, err := io.WriteString(tw, entry.contents); err != nil { + t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err) + continue testLoop + } + } + // Only interested in Close failures for the small tests. + if err := tw.Close(); err != nil && !big { + t.Errorf("test %d: Failed closing archive: %v", i, err) + continue testLoop + } + + actual := buf.Bytes() + if !bytes.Equal(expected, actual) { + t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v", + i, bytediff(expected, actual)) + } + if testing.Short() { // The second test is expensive. + break + } + } +} + +func TestPax(t *testing.T) { + // Create an archive with a large name + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + // Force a PAX long name to be written + longName := strings.Repeat("ab", 100) + contents := strings.Repeat(" ", int(hdr.Size)) + hdr.Name = longName + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long file name") + } +} + +func TestPaxSymlink(t *testing.T) { + // Create an archive with a large linkname + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeSymlink + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long linkname to be written + longLinkname := strings.Repeat("1234567890/1234567890", 10) + hdr.Linkname = longLinkname + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Linkname != longLinkname { + t.Fatal("Couldn't recover long link name") + } +} + +func TestPaxNonAscii(t *testing.T) { + // Create an archive with non ascii. These should trigger a pax header + // because pax headers have a defined utf-8 encoding. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + + // some sample data + chineseFilename := "文件名" + chineseGroupname := "組" + chineseUsername := "用戶名" + + hdr.Name = chineseFilename + hdr.Gname = chineseGroupname + hdr.Uname = chineseUsername + + contents := strings.Repeat(" ", int(hdr.Size)) + + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != chineseFilename { + t.Fatal("Couldn't recover unicode name") + } + if hdr.Gname != chineseGroupname { + t.Fatal("Couldn't recover unicode group") + } + if hdr.Uname != chineseUsername { + t.Fatal("Couldn't recover unicode user") + } +} + +func TestPaxXattrs(t *testing.T) { + xattrs := map[string]string{ + "user.key": "value", + } + + // Create an archive with an xattr + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + contents := "Kilts" + hdr.Xattrs = xattrs + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get the xattrs back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(hdr.Xattrs, xattrs) { + t.Fatalf("xattrs did not survive round trip: got %+v, want %+v", + hdr.Xattrs, xattrs) + } +} + +func TestPaxHeadersSorted(t *testing.T) { + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + contents := strings.Repeat(" ", int(hdr.Size)) + + hdr.Xattrs = map[string]string{ + "foo": "foo", + "bar": "bar", + "baz": "baz", + "qux": "qux", + } + + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + + // xattr bar should always appear before others + indices := []int{ + bytes.Index(buf.Bytes(), []byte("bar=bar")), + bytes.Index(buf.Bytes(), []byte("baz=baz")), + bytes.Index(buf.Bytes(), []byte("foo=foo")), + bytes.Index(buf.Bytes(), []byte("qux=qux")), + } + if !sort.IntsAreSorted(indices) { + t.Fatal("PAX headers are not sorted") + } +} + +func TestUSTARLongName(t *testing.T) { + // Create an archive with a path that failed to split with USTAR extension in previous versions. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeDir + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long name to be written. The name was taken from a practical example + // that fails and replaced ever char through numbers to anonymize the sample. + longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" + hdr.Name = longName + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long name") + } +} + +func TestValidTypeflagWithPAXHeader(t *testing.T) { + var buffer bytes.Buffer + tw := NewWriter(&buffer) + + fileName := strings.Repeat("ab", 100) + + hdr := &Header{ + Name: fileName, + Size: 4, + Typeflag: 0, + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("Failed to write header: %s", err) + } + if _, err := tw.Write([]byte("fooo")); err != nil { + t.Fatalf("Failed to write the file's data: %s", err) + } + tw.Close() + + tr := NewReader(&buffer) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Failed to read header: %s", err) + } + if header.Typeflag != 0 { + t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag) + } + } +} + +func TestWriteAfterClose(t *testing.T) { + var buffer bytes.Buffer + tw := NewWriter(&buffer) + + hdr := &Header{ + Name: "small.txt", + Size: 5, + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("Failed to write header: %s", err) + } + tw.Close() + if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose { + t.Fatalf("Write: got %v; want ErrWriteAfterClose", err) + } +} + +func TestSplitUSTARPath(t *testing.T) { + var sr = strings.Repeat + + var vectors = []struct { + input string // Input path + prefix string // Expected output prefix + suffix string // Expected output suffix + ok bool // Split success? + }{ + {"", "", "", false}, + {"abc", "", "", false}, + {"用戶名", "", "", false}, + {sr("a", fileNameSize), "", "", false}, + {sr("a", fileNameSize) + "/", "", "", false}, + {sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true}, + {sr("a", fileNamePrefixSize) + "/", "", "", false}, + {sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true}, + {sr("a", fileNameSize+1), "", "", false}, + {sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true}, + {sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize), + sr("a", fileNamePrefixSize), sr("b", fileNameSize), true}, + {sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false}, + {sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true}, + } + + for _, v := range vectors { + prefix, suffix, ok := splitUSTARPath(v.input) + if prefix != v.prefix || suffix != v.suffix || ok != v.ok { + t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)", + v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok) + } + } +} + +func TestFormatPAXRecord(t *testing.T) { + var medName = strings.Repeat("CD", 50) + var longName = strings.Repeat("AB", 100) + + var vectors = []struct { + inputKey string + inputVal string + output string + }{ + {"k", "v", "6 k=v\n"}, + {"path", "/etc/hosts", "19 path=/etc/hosts\n"}, + {"path", longName, "210 path=" + longName + "\n"}, + {"path", medName, "110 path=" + medName + "\n"}, + {"foo", "ba", "9 foo=ba\n"}, + {"foo", "bar", "11 foo=bar\n"}, + {"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"}, + {"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"}, + {"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"}, + {"\x00hello", "\x00world", "17 \x00hello=\x00world\n"}, + } + + for _, v := range vectors { + output := formatPAXRecord(v.inputKey, v.inputVal) + if output != v.output { + t.Errorf("formatPAXRecord(%q, %q): got %q, want %q", + v.inputKey, v.inputVal, output, v.output) + } + } +} + +func TestFitsInBase256(t *testing.T) { + var vectors = []struct { + input int64 + width int + ok bool + }{ + {+1, 8, true}, + {0, 8, true}, + {-1, 8, true}, + {1 << 56, 8, false}, + {(1 << 56) - 1, 8, true}, + {-1 << 56, 8, true}, + {(-1 << 56) - 1, 8, false}, + {121654, 8, true}, + {-9849849, 8, true}, + {math.MaxInt64, 9, true}, + {0, 9, true}, + {math.MinInt64, 9, true}, + {math.MaxInt64, 12, true}, + {0, 12, true}, + {math.MinInt64, 12, true}, + } + + for _, v := range vectors { + ok := fitsInBase256(v.width, v.input) + if ok != v.ok { + t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok) + } + } +} + +func TestFormatNumeric(t *testing.T) { + var vectors = []struct { + input int64 + output string + ok bool + }{ + // Test base-256 (binary) encoded values. + {-1, "\xff", true}, + {-1, "\xff\xff", true}, + {-1, "\xff\xff\xff", true}, + {(1 << 0), "0", false}, + {(1 << 8) - 1, "\x80\xff", true}, + {(1 << 8), "0\x00", false}, + {(1 << 16) - 1, "\x80\xff\xff", true}, + {(1 << 16), "00\x00", false}, + {-1 * (1 << 0), "\xff", true}, + {-1*(1<<0) - 1, "0", false}, + {-1 * (1 << 8), "\xff\x00", true}, + {-1*(1<<8) - 1, "0\x00", false}, + {-1 * (1 << 16), "\xff\x00\x00", true}, + {-1*(1<<16) - 1, "00\x00", false}, + {537795476381659745, "0000000\x00", false}, + {537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true}, + {-615126028225187231, "0000000\x00", false}, + {-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true}, + {math.MaxInt64, "0000000\x00", false}, + {math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true}, + {math.MinInt64, "0000000\x00", false}, + {math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true}, + {math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true}, + {math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true}, + } + + for _, v := range vectors { + var f formatter + output := make([]byte, len(v.output)) + f.formatNumeric(output, v.input) + ok := (f.err == nil) + if ok != v.ok { + if v.ok { + t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input) + } else { + t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input) + } + } + if string(output) != v.output { + t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output) + } + } +} + +func TestFormatPAXTime(t *testing.T) { + t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC) + t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC) + t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC) + t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) + verify := func(time time.Time, s string) { + p := formatPAXTime(time) + if p != s { + t.Errorf("for %v, expected %s, got %s", time, s, p) + } + } + verify(t1, "946724400") + verify(t2, "946724400.000000100") + verify(t3, "-315579600") + verify(t4, "0") +} diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 0000000..2be34af --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/backup_test.go b/vendor/github.com/Microsoft/go-winio/backup_test.go new file mode 100644 index 0000000..cc5a0c5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup_test.go @@ -0,0 +1,255 @@ +package winio + +import ( + "io" + "io/ioutil" + "os" + "syscall" + "testing" +) + +var testFileName string + +func TestMain(m *testing.M) { + f, err := ioutil.TempFile("", "tmp") + if err != nil { + panic(err) + } + testFileName = f.Name() + f.Close() + defer os.Remove(testFileName) + os.Exit(m.Run()) +} + +func makeTestFile(makeADS bool) error { + os.Remove(testFileName) + f, err := os.Create(testFileName) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write([]byte("testing 1 2 3\n")) + if err != nil { + return err + } + if makeADS { + a, err := os.Create(testFileName + ":ads.txt") + if err != nil { + return err + } + defer a.Close() + _, err = a.Write([]byte("alternate data stream\n")) + if err != nil { + return err + } + } + return nil +} + +func TestBackupRead(t *testing.T) { + err := makeTestFile(true) + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if len(b) == 0 { + t.Fatal("no data") + } +} + +func TestBackupStreamRead(t *testing.T) { + err := makeTestFile(true) + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + + br := NewBackupStreamReader(r) + gotData := false + gotAltData := false + for { + hdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + switch hdr.Id { + case BackupData: + if gotData { + t.Fatal("duplicate data") + } + if hdr.Name != "" { + t.Fatalf("unexpected name %s", hdr.Name) + } + b, err := ioutil.ReadAll(br) + if err != nil { + t.Fatal(err) + } + if string(b) != "testing 1 2 3\n" { + t.Fatalf("incorrect data %v", b) + } + gotData = true + case BackupAlternateData: + if gotAltData { + t.Fatal("duplicate alt data") + } + if hdr.Name != ":ads.txt:$DATA" { + t.Fatalf("incorrect name %s", hdr.Name) + } + b, err := ioutil.ReadAll(br) + if err != nil { + t.Fatal(err) + } + if string(b) != "alternate data stream\n" { + t.Fatalf("incorrect data %v", b) + } + gotAltData = true + default: + t.Fatalf("unknown stream ID %d", hdr.Id) + } + } + if !gotData || !gotAltData { + t.Fatal("missing stream") + } +} + +func TestBackupStreamWrite(t *testing.T) { + f, err := os.Create(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + w := NewBackupFileWriter(f, false) + defer w.Close() + + data := "testing 1 2 3\n" + altData := "alternate stream\n" + + br := NewBackupStreamWriter(w) + err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))}) + if err != nil { + t.Fatal(err) + } + n, err := br.Write([]byte(data)) + if err != nil { + t.Fatal(err) + } + if n != len(data) { + t.Fatal("short write") + } + + err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"}) + if err != nil { + t.Fatal(err) + } + n, err = br.Write([]byte(altData)) + if err != nil { + t.Fatal(err) + } + if n != len(altData) { + t.Fatal("short write") + } + + f.Close() + + b, err := ioutil.ReadFile(testFileName) + if err != nil { + t.Fatal(err) + } + if string(b) != data { + t.Fatalf("wrong data %v", b) + } + + b, err = ioutil.ReadFile(testFileName + ":ads.txt") + if err != nil { + t.Fatal(err) + } + if string(b) != altData { + t.Fatalf("wrong data %v", b) + } +} + +func makeSparseFile() error { + os.Remove(testFileName) + f, err := os.Create(testFileName) + if err != nil { + return err + } + defer f.Close() + + const ( + FSCTL_SET_SPARSE = 0x000900c4 + FSCTL_SET_ZERO_DATA = 0x000980c8 + ) + + err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil) + if err != nil { + return err + } + + _, err = f.Write([]byte("testing 1 2 3\n")) + if err != nil { + return err + } + + _, err = f.Seek(1000000, 0) + if err != nil { + return err + } + + _, err = f.Write([]byte("more data later\n")) + if err != nil { + return err + } + + return nil +} + +func TestBackupSparseFile(t *testing.T) { + err := makeSparseFile() + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + + br := NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + t.Log(hdr) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go new file mode 100644 index 0000000..d39eccf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go @@ -0,0 +1,4 @@ +// +build !windows +// This file only exists to allow go get on non-Windows platforms. + +package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go new file mode 100644 index 0000000..53da908 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -0,0 +1,439 @@ +// +build windows + +package backuptar + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface +) + +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +const ( + hdrFileAttributes = "fileattr" + hdrSecurityDescriptor = "sd" + hdrRawSecurityDescriptor = "rawsd" + hdrMountPoint = "mountpoint" + hdrEaPrefix = "xattr." +) + +func writeZeroes(w io.Writer, count int64) error { + buf := make([]byte, 8192) + c := len(buf) + for i := int64(0); i < count; i += int64(c) { + if int64(c) > count-i { + c = int(count - i) + } + _, err := w.Write(buf[:c]) + if err != nil { + return err + } + } + return nil +} + +func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { + curOffset := int64(0) + for { + bhdr, err := br.Next() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if bhdr.Id != winio.BackupSparseBlock { + return fmt.Errorf("unexpected stream %d", bhdr.Id) + } + + // archive/tar does not support writing sparse files + // so just write zeroes to catch up to the current offset. + err = writeZeroes(t, bhdr.Offset-curOffset) + if bhdr.Size == 0 { + break + } + n, err := io.Copy(t, br) + if err != nil { + return err + } + curOffset = bhdr.Offset + n + } + return nil +} + +// BasicInfoHeader creates a tar header from basic file information. +func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { + hdr := &tar.Header{ + Name: filepath.ToSlash(name), + Size: size, + Typeflag: tar.TypeReg, + ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), + ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), + AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), + CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()), + Winheaders: make(map[string]string), + } + hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + hdr.Mode |= c_ISDIR + hdr.Size = 0 + hdr.Typeflag = tar.TypeDir + } + return hdr +} + +// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. +// +// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. +// +// The additional Win32 metadata is: +// +// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// +// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// +// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { + name = filepath.ToSlash(name) + hdr := BasicInfoHeader(name, size, fileInfo) + + // If r can be seeked, then this function is two-pass: pass 1 collects the + // tar header data, and pass 2 copies the data stream. If r cannot be + // seeked, then some header data (in particular EAs) will be silently lost. + var ( + restartPos int64 + err error + ) + sr, readTwice := r.(io.Seeker) + if readTwice { + if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { + readTwice = false + } + } + + br := winio.NewBackupStreamReader(r) + var dataHdr *winio.BackupHeader + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupData: + hdr.Mode |= c_ISREG + if !readTwice { + dataHdr = bhdr + } + case winio.BackupSecurity: + sd, err := ioutil.ReadAll(br) + if err != nil { + return err + } + hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) + + case winio.BackupReparseData: + hdr.Mode |= c_ISLNK + hdr.Typeflag = tar.TypeSymlink + reparseBuffer, err := ioutil.ReadAll(br) + rp, err := winio.DecodeReparsePoint(reparseBuffer) + if err != nil { + return err + } + if rp.IsMountPoint { + hdr.Winheaders[hdrMountPoint] = "1" + } + hdr.Linkname = rp.Target + + case winio.BackupEaData: + eab, err := ioutil.ReadAll(br) + if err != nil { + return err + } + eas, err := winio.DecodeExtendedAttributes(eab) + if err != nil { + return err + } + for _, ea := range eas { + // Use base64 encoding for the binary value. Note that there + // is no way to encode the EA's flags, since their use doesn't + // make any sense for persisted EAs. + hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) + } + + case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) + } + } + + err = t.WriteHeader(hdr) + if err != nil { + return err + } + + if readTwice { + // Get back to the data stream. + if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { + return err + } + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if bhdr.Id == winio.BackupData { + dataHdr = bhdr + } + } + } + + if dataHdr != nil { + // A data stream was found. Copy the data. + if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { + if size != dataHdr.Size { + return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + } else { + err = copySparse(t, br) + if err != nil { + return err + } + } + } + + // Look for streams after the data stream. The only ones we handle are alternate data streams. + // Other streams may have metadata that could be serialized, but the tar header has already + // been written. In practice, this means that we don't get EA or TXF metadata. + for { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupAlternateData: + altName := bhdr.Name + if strings.HasSuffix(altName, ":$DATA") { + altName = altName[:len(altName)-len(":$DATA")] + } + if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { + hdr = &tar.Header{ + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + + } else { + // Unsupported for now, since the size of the alternate stream is not present + // in the backup stream until after the data has been read. + return errors.New("tar of sparse alternate data streams is unsupported") + } + case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) + } + } + return nil +} + +// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by +// WriteTarFileFromBackupStream. +func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { + name = hdr.Name + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + size = hdr.Size + } + fileInfo = &winio.FileBasicInfo{ + LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()), + LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()), + ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()), + CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()), + } + if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return "", 0, nil, err + } + fileInfo.FileAttributes = uintptr(attr) + } else { + if hdr.Typeflag == tar.TypeDir { + fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + } + } + return +} + +// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple +// tar file entries in order to collect all the alternate data streams for the file, it returns the next +// tar file that was not processed, or io.EOF is there are no more. +func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { + bw := winio.NewBackupStreamWriter(w) + var sd []byte + var err error + // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written + // by this library will have raw binary for the security descriptor. + if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + if len(sd) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupSecurity, + Size: int64(len(sd)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(sd) + if err != nil { + return nil, err + } + } + var eas []winio.ExtendedAttribute + for k, v := range hdr.Winheaders { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err := winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + bhdr := winio.BackupHeader{ + Id: winio.BackupEaData, + Size: int64(len(eadata)), + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(eadata) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeSymlink { + _, isMountPoint := hdr.Winheaders[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + reparse := winio.EncodeReparsePoint(&rp) + bhdr := winio.BackupHeader{ + Id: winio.BackupReparseData, + Size: int64(len(reparse)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(reparse) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + bhdr := winio.BackupHeader{ + Id: winio.BackupData, + Size: hdr.Size, + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } + // Copy all the alternate data streams and return the next non-ADS header. + for { + ahdr, err := t.Next() + if err != nil { + return nil, err + } + if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { + return ahdr, nil + } + bhdr := winio.BackupHeader{ + Id: winio.BackupAlternateData, + Size: ahdr.Size, + Name: ahdr.Name[len(hdr.Name):] + ":$DATA", + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go new file mode 100644 index 0000000..e04d47f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go @@ -0,0 +1,84 @@ +package backuptar + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" +) + +func ensurePresent(t *testing.T, m map[string]string, keys ...string) { + for _, k := range keys { + if _, ok := m[k]; !ok { + t.Error(k, "not present in tar header") + } + } +} + +func TestRoundTrip(t *testing.T) { + f, err := ioutil.TempFile("", "tst") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.Remove(f.Name()) + + if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil { + t.Fatal(err) + } + + if _, err = f.Seek(0, 0); err != nil { + t.Fatal(err) + } + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + bi, err := winio.GetFileBasicInfo(f) + if err != nil { + t.Fatal(err) + } + + br := winio.NewBackupFileReader(f, true) + defer br.Close() + + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + + err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi) + if err != nil { + t.Fatal(err) + } + + tr := tar.NewReader(&buf) + hdr, err := tr.Next() + if err != nil { + t.Fatal(err) + } + + name, size, bi2, err := FileInfoFromHeader(hdr) + if err != nil { + t.Fatal(err) + } + + if name != filepath.ToSlash(f.Name()) { + t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name())) + } + + if size != fi.Size() { + t.Errorf("got size %d, expected %d", size, fi.Size()) + } + + if !reflect.DeepEqual(*bi, *bi2) { + t.Errorf("got %#v, expected %#v", *bi, *bi2) + } + + ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd") +} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 0000000..b37e930 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/ea_test.go b/vendor/github.com/Microsoft/go-winio/ea_test.go new file mode 100644 index 0000000..92d9d45 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea_test.go @@ -0,0 +1,89 @@ +package winio + +import ( + "io/ioutil" + "os" + "reflect" + "syscall" + "testing" + "unsafe" +) + +var ( + testEas = []ExtendedAttribute{ + {Name: "foo", Value: []byte("bar")}, + {Name: "fizz", Value: []byte("buzz")}, + } + + testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0} + testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3] + testEasTruncated = testEasEncoded[0:20] +) + +func Test_RoundTripEas(t *testing.T) { + b, err := EncodeExtendedAttributes(testEas) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEasEncoded, b) { + t.Fatalf("encoded mismatch %v %v", testEasEncoded, b) + } + eas, err := DecodeExtendedAttributes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func Test_EasDontNeedPaddingAtEnd(t *testing.T) { + eas, err := DecodeExtendedAttributes(testEasNotPadded) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func Test_TruncatedEasFailCorrectly(t *testing.T) { + _, err := DecodeExtendedAttributes(testEasTruncated) + if err == nil { + t.Fatal("expected error") + } +} + +func Test_NilEasEncodeAndDecodeAsNil(t *testing.T) { + b, err := EncodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(b) != 0 { + t.Fatal("expected empty") + } + eas, err := DecodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(eas) != 0 { + t.Fatal("expected empty") + } +} + +// Test_SetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile. +func Test_SetFileEa(t *testing.T) { + f, err := ioutil.TempFile("", "winio") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + defer f.Close() + ntdll := syscall.MustLoadDLL("ntdll.dll") + ntSetEaFile := ntdll.MustFindProc("NtSetEaFile") + var iosb [2]uintptr + r, _, _ := ntSetEaFile.Call(f.Fd(), uintptr(unsafe.Pointer(&iosb[0])), uintptr(unsafe.Pointer(&testEasEncoded[0])), uintptr(len(testEasEncoded))) + if r != 0 { + t.Fatalf("NtSetEaFile failed with %08x", r) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 0000000..4334ff1 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,307 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return makeWin32File(h) +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 0000000..b1d60ab --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,60 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +const ( + fileBasicInfo = 0 + fileIDInfo = 0x12 +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uintptr // includes padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 0000000..82cbe7a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,424 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cPIPE_ACCESS_DUPLEX = 0x3 + cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + + cPIPE_UNLIMITED_INSTANCES = 255 + + cNMPWAIT_USE_DEFAULT_WAIT = 0 + cNMPWAIT_NOWAIT = 1 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then the timeout +// is the default timeout established by the pipe server. +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } + var err error + var h syscall.Handle + for { + h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != cERROR_PIPE_BUSY { + break + } + now := time.Now() + var ms uint32 + if absTimeout.IsZero() { + ms = cNMPWAIT_USE_DEFAULT_WAIT + } else if now.After(absTimeout) { + ms = cNMPWAIT_NOWAIT + } else { + ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) + } + err = waitNamedPipe(path, ms) + if err != nil { + if err == cERROR_SEM_TIMEOUT { + return nil, ErrTimeout + } + break + } + } + if err != nil { + return nil, &os.PathError{Op: "open", Path: path, Err: err} + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + var state uint32 + err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) + if err != nil { + return nil, err + } + + if state&cPIPE_READMODE_MESSAGE != 0 { + return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + securityDescriptor []byte + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED + if first { + flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + } + + var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + if c.MessageMode { + mode |= cPIPE_TYPE_MESSAGE + } + + sa := &syscall.SecurityAttributes{} + sa.Length = uint32(unsafe.Sizeof(*sa)) + if securityDescriptor != nil { + len := uint32(len(securityDescriptor)) + sa.SecurityDescriptor = localAlloc(0, len) + defer localFree(sa.SecurityDescriptor) + copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + } + h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + // Immediately open and then close a client handle so that the named pipe is + // created but not currently accepting connections. + h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != nil { + syscall.Close(h) + return nil, err + } + syscall.Close(h2) + l := &win32PipeListener{ + firstHandle: h, + path: path, + securityDescriptor: sd, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe_test.go b/vendor/github.com/Microsoft/go-winio/pipe_test.go new file mode 100644 index 0000000..c0d1a77 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe_test.go @@ -0,0 +1,453 @@ +package winio + +import ( + "bufio" + "io" + "net" + "os" + "syscall" + "testing" + "time" +) + +var testPipeName = `\\.\pipe\winiotestpipe` + +var aLongTimeAgo = time.Unix(1, 0) + +func TestDialUnknownFailsImmediately(t *testing.T) { + _, err := DialPipe(testPipeName, nil) + if err.(*os.PathError).Err != syscall.ENOENT { + t.Fatalf("expected ENOENT got %v", err) + } +} + +func TestDialListenerTimesOut(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + var d = time.Duration(10 * time.Millisecond) + _, err = DialPipe(testPipeName, &d) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func TestDialAccessDeniedWithRestrictedSD(t *testing.T) { + c := PipeConfig{ + SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)", + } + l, err := ListenPipe(testPipeName, &c) + if err != nil { + t.Fatal(err) + } + defer l.Close() + _, err = DialPipe(testPipeName, nil) + if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED { + t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err) + } +} + +func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) { + l, err := ListenPipe(testPipeName, cfg) + if err != nil { + return + } + defer l.Close() + + type response struct { + c net.Conn + err error + } + ch := make(chan response) + go func() { + c, err := l.Accept() + ch <- response{c, err} + }() + + c, err := DialPipe(testPipeName, nil) + if err != nil { + return + } + + r := <-ch + if err = r.err; err != nil { + c.Close() + return + } + + client = c + server = r.c + return +} + +func TestReadTimeout(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + + c.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) + + buf := make([]byte, 10) + _, err = c.Read(buf) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func server(l net.Listener, ch chan int) { + c, err := l.Accept() + if err != nil { + panic(err) + } + rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) + s, err := rw.ReadString('\n') + if err != nil { + panic(err) + } + _, err = rw.WriteString("got " + s) + if err != nil { + panic(err) + } + err = rw.Flush() + if err != nil { + panic(err) + } + c.Close() + ch <- 1 +} + +func TestFullListenDialReadWrite(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + ch := make(chan int) + go server(l, ch) + + c, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) + _, err = rw.WriteString("hello world\n") + if err != nil { + t.Fatal(err) + } + err = rw.Flush() + if err != nil { + t.Fatal(err) + } + + s, err := rw.ReadString('\n') + if err != nil { + t.Fatal(err) + } + ms := "got hello world\n" + if s != ms { + t.Errorf("expected '%s', got '%s'", ms, s) + } + + <-ch +} + +func TestCloseAbortsListen(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + + ch := make(chan error) + go func() { + _, err := l.Accept() + ch <- err + }() + + time.Sleep(30 * time.Millisecond) + l.Close() + + err = <-ch + if err != ErrPipeListenerClosed { + t.Fatalf("expected ErrPipeListenerClosed, got %v", err) + } +} + +func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) { + b := make([]byte, 10) + w.Close() + n, err := r.Read(b) + if n > 0 { + t.Errorf("unexpected byte count %d", n) + } + if err != io.EOF { + t.Errorf("expected EOF: %v", err) + } +} + +func TestCloseClientEOFServer(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + ensureEOFOnClose(t, c, s) +} + +func TestCloseServerEOFClient(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + ensureEOFOnClose(t, s, c) +} + +func TestCloseWriteEOF(t *testing.T) { + cfg := &PipeConfig{ + MessageMode: true, + } + c, s, err := getConnection(cfg) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + + type closeWriter interface { + CloseWrite() error + } + + err = c.(closeWriter).CloseWrite() + if err != nil { + t.Fatal(err) + } + + b := make([]byte, 10) + _, err = s.Read(b) + if err != io.EOF { + t.Fatal(err) + } +} + +func TestAcceptAfterCloseFails(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + l.Close() + _, err = l.Accept() + if err != ErrPipeListenerClosed { + t.Fatalf("expected ErrPipeListenerClosed, got %v", err) + } +} + +func TestDialTimesOutByDefault(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + _, err = DialPipe(testPipeName, nil) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func TestTimeoutPendingRead(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + serverDone := make(chan struct{}) + + go func() { + s, err := l.Accept() + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + s.Close() + close(serverDone) + }() + + client, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + clientErr := make(chan error) + go func() { + buf := make([]byte, 10) + _, err = client.Read(buf) + clientErr <- err + }() + + time.Sleep(100 * time.Millisecond) // make *sure* the pipe is reading before we set the deadline + client.SetReadDeadline(aLongTimeAgo) + + select { + case err = <-clientErr: + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out while waiting for read to cancel") + <-clientErr + } + <-serverDone +} + +func TestTimeoutPendingWrite(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + serverDone := make(chan struct{}) + + go func() { + s, err := l.Accept() + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + s.Close() + close(serverDone) + }() + + client, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + clientErr := make(chan error) + go func() { + _, err = client.Write([]byte("this should timeout")) + clientErr <- err + }() + + time.Sleep(100 * time.Millisecond) // make *sure* the pipe is writing before we set the deadline + client.SetWriteDeadline(aLongTimeAgo) + + select { + case err = <-clientErr: + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out while waiting for write to cancel") + <-clientErr + } + <-serverDone +} + +type CloseWriter interface { + CloseWrite() error +} + +func TestEchoWithMessaging(t *testing.T) { + c := PipeConfig{ + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := ListenPipe(testPipeName, &c) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + listenerDone := make(chan bool) + clientDone := make(chan bool) + go func() { + // server echo + conn, e := l.Accept() + if e != nil { + t.Fatal(e) + } + defer conn.Close() + + time.Sleep(500 * time.Millisecond) // make *sure* we don't begin to read before eof signal is sent + io.Copy(conn, conn) + conn.(CloseWriter).CloseWrite() + close(listenerDone) + }() + timeout := 1 * time.Second + client, err := DialPipe(testPipeName, &timeout) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + go func() { + // client read back + bytes := make([]byte, 2) + n, e := client.Read(bytes) + if e != nil { + t.Fatal(e) + } + if n != 2 { + t.Fatalf("expected 2 bytes, got %v", n) + } + close(clientDone) + }() + + payload := make([]byte, 2) + payload[0] = 0 + payload[1] = 1 + + n, err := client.Write(payload) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("expected 2 bytes, got %v", n) + } + client.(CloseWriter).CloseWrite() + <-listenerDone + <-clientDone +} + +func TestConnectRace(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + go func() { + for { + s, err := l.Accept() + if err == ErrPipeListenerClosed { + return + } + + if err != nil { + t.Fatal(err) + } + s.Close() + } + }() + + for i := 0; i < 1000; i++ { + c, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + c.Close() + } +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 0000000..9c83d36 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,202 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/privileges_test.go b/vendor/github.com/Microsoft/go-winio/privileges_test.go new file mode 100644 index 0000000..5e94c48 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privileges_test.go @@ -0,0 +1,17 @@ +package winio + +import "testing" + +func TestRunWithUnavailablePrivilege(t *testing.T) { + err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil }) + if _, ok := err.(*PrivilegeError); err == nil || !ok { + t.Fatal("expected PrivilegeError") + } +} + +func TestRunWithPrivileges(t *testing.T) { + err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 0000000..fc1ee4d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 0000000..db1b370 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/sd_test.go b/vendor/github.com/Microsoft/go-winio/sd_test.go new file mode 100644 index 0000000..847db3c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd_test.go @@ -0,0 +1,26 @@ +package winio + +import "testing" + +func TestLookupInvalidSid(t *testing.T) { + _, err := LookupSidByName(".\\weoifjdsklfj") + aerr, ok := err.(*AccountLookupError) + if !ok || aerr.Err != cERROR_NONE_MAPPED { + t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) + } +} + +func TestLookupValidSid(t *testing.T) { + sid, err := LookupSidByName("Everyone") + if err != nil || sid != "S-1-1-0" { + t.Fatal("expected S-1-1-0, got %s, %s", sid, err) + } +} + +func TestLookupEmptyNameFails(t *testing.T) { + _, err := LookupSidByName(".\\weoifjdsklfj") + aerr, ok := err.(*AccountLookupError) + if !ok || aerr.Err != cERROR_NONE_MAPPED { + t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 0000000..20d64cf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go new file mode 100644 index 0000000..977fef8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go @@ -0,0 +1,901 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hard-coding unicode mode for VHD library. + +// +build ignore + +/* +mksyscall_windows generates windows system call bodies + +It parses all files specified on command line containing function +prototypes (like syscall_windows.go) and prints system call bodies +to standard output. + +The prototypes are marked by lines beginning with "//sys" and read +like func declarations if //sys is replaced by func, but: + +* The parameter lists must give a name for each argument. This + includes return parameters. + +* The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + +* If the return parameter is an error number, it must be named err. + +* If go func name needs to be different from it's winapi dll name, + the winapi name could be specified at the end, after "=" sign, like + //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA + +* Each function that returns err needs to supply a condition, that + return value of winapi will be tested against to detect failure. + This would set err to windows "last-error", otherwise it will be nil. + The value can be provided at end of //sys declaration, like + //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA + and is [failretval==0] by default. + +Usage: + mksyscall_windows [flags] [path ...] + +The flags are: + -output + Specify output file name (outputs to console if blank). + -trace + Generate print statement after every syscall. +*/ +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/format" + "go/parser" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "text/template" +) + +var ( + filename = flag.String("output", "", "output file name (standard output if omitted)") + printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") + systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory") +) + +func trim(s string) string { + return strings.Trim(s, " \t") +} + +var packageName string + +func packagename() string { + return packageName +} + +func syscalldot() string { + if packageName == "syscall" { + return "" + } + return "syscall." +} + +// Param is function parameter +type Param struct { + Name string + Type string + fn *Fn + tmpVarIdx int +} + +// tmpVar returns temp variable name that will be used to represent p during syscall. +func (p *Param) tmpVar() string { + if p.tmpVarIdx < 0 { + p.tmpVarIdx = p.fn.curTmpVarIdx + p.fn.curTmpVarIdx++ + } + return fmt.Sprintf("_p%d", p.tmpVarIdx) +} + +// BoolTmpVarCode returns source code for bool temp variable. +func (p *Param) BoolTmpVarCode() string { + const code = `var %s uint32 + if %s { + %s = 1 + } else { + %s = 0 + }` + tmp := p.tmpVar() + return fmt.Sprintf(code, tmp, p.Name, tmp, tmp) +} + +// SliceTmpVarCode returns source code for slice temp variable. +func (p *Param) SliceTmpVarCode() string { + const code = `var %s *%s + if len(%s) > 0 { + %s = &%s[0] + }` + tmp := p.tmpVar() + return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) +} + +// StringTmpVarCode returns source code for string temp variable. +func (p *Param) StringTmpVarCode() string { + errvar := p.fn.Rets.ErrorVarName() + if errvar == "" { + errvar = "_" + } + tmp := p.tmpVar() + const code = `var %s %s + %s, %s = %s(%s)` + s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) + if errvar == "-" { + return s + } + const morecode = ` + if %s != nil { + return + }` + return s + fmt.Sprintf(morecode, errvar) +} + +// TmpVarCode returns source code for temp variable. +func (p *Param) TmpVarCode() string { + switch { + case p.Type == "bool": + return p.BoolTmpVarCode() + case strings.HasPrefix(p.Type, "[]"): + return p.SliceTmpVarCode() + default: + return "" + } +} + +// TmpVarHelperCode returns source code for helper's temp variable. +func (p *Param) TmpVarHelperCode() string { + if p.Type != "string" { + return "" + } + return p.StringTmpVarCode() +} + +// SyscallArgList returns source code fragments representing p parameter +// in syscall. Slices are translated into 2 syscall parameters: pointer to +// the first element and length. +func (p *Param) SyscallArgList() []string { + t := p.HelperType() + var s string + switch { + case t[0] == '*': + s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) + case t == "bool": + s = p.tmpVar() + case strings.HasPrefix(t, "[]"): + return []string{ + fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), + fmt.Sprintf("uintptr(len(%s))", p.Name), + } + default: + s = p.Name + } + return []string{fmt.Sprintf("uintptr(%s)", s)} +} + +// IsError determines if p parameter is used to return error. +func (p *Param) IsError() bool { + return p.Name == "err" && p.Type == "error" +} + +// HelperType returns type of parameter p used in helper function. +func (p *Param) HelperType() string { + if p.Type == "string" { + return p.fn.StrconvType() + } + return p.Type +} + +// join concatenates parameters ps into a string with sep separator. +// Each parameter is converted into string by applying fn to it +// before conversion. +func join(ps []*Param, fn func(*Param) string, sep string) string { + if len(ps) == 0 { + return "" + } + a := make([]string, 0) + for _, p := range ps { + a = append(a, fn(p)) + } + return strings.Join(a, sep) +} + +// Rets describes function return parameters. +type Rets struct { + Name string + Type string + ReturnsError bool + FailCond string +} + +// ErrorVarName returns error variable name for r. +func (r *Rets) ErrorVarName() string { + if r.ReturnsError { + return "err" + } + if r.Type == "error" { + return r.Name + } + return "" +} + +// ToParams converts r into slice of *Param. +func (r *Rets) ToParams() []*Param { + ps := make([]*Param, 0) + if len(r.Name) > 0 { + ps = append(ps, &Param{Name: r.Name, Type: r.Type}) + } + if r.ReturnsError { + ps = append(ps, &Param{Name: "err", Type: "error"}) + } + return ps +} + +// List returns source code of syscall return parameters. +func (r *Rets) List() string { + s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") + if len(s) > 0 { + s = "(" + s + ")" + } + return s +} + +// PrintList returns source code of trace printing part correspondent +// to syscall return values. +func (r *Rets) PrintList() string { + return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// SetReturnValuesCode returns source code that accepts syscall return values. +func (r *Rets) SetReturnValuesCode() string { + if r.Name == "" && !r.ReturnsError { + return "" + } + retvar := "r0" + if r.Name == "" { + retvar = "r1" + } + errvar := "_" + if r.ReturnsError { + errvar = "e1" + } + return fmt.Sprintf("%s, _, %s := ", retvar, errvar) +} + +func (r *Rets) useLongHandleErrorCode(retvar string) string { + const code = `if %s { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = %sEINVAL + } + }` + cond := retvar + " == 0" + if r.FailCond != "" { + cond = strings.Replace(r.FailCond, "failretval", retvar, 1) + } + return fmt.Sprintf(code, cond, syscalldot()) +} + +// SetErrorCode returns source code that sets return parameters. +func (r *Rets) SetErrorCode() string { + const code = `if r0 != 0 { + %s = %sErrno(r0) + }` + if r.Name == "" && !r.ReturnsError { + return "" + } + if r.Name == "" { + return r.useLongHandleErrorCode("r1") + } + if r.Type == "error" { + return fmt.Sprintf(code, r.Name, syscalldot()) + } + s := "" + switch { + case r.Type[0] == '*': + s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) + case r.Type == "bool": + s = fmt.Sprintf("%s = r0 != 0", r.Name) + default: + s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) + } + if !r.ReturnsError { + return s + } + return s + "\n\t" + r.useLongHandleErrorCode(r.Name) +} + +// Fn describes syscall function. +type Fn struct { + Name string + Params []*Param + Rets *Rets + PrintTrace bool + dllname string + dllfuncname string + src string + // TODO: get rid of this field and just use parameter index instead + curTmpVarIdx int // insure tmp variables have uniq names +} + +// extractParams parses s to extract function parameters. +func extractParams(s string, f *Fn) ([]*Param, error) { + s = trim(s) + if s == "" { + return nil, nil + } + a := strings.Split(s, ",") + ps := make([]*Param, len(a)) + for i := range ps { + s2 := trim(a[i]) + b := strings.Split(s2, " ") + if len(b) != 2 { + b = strings.Split(s2, "\t") + if len(b) != 2 { + return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") + } + } + ps[i] = &Param{ + Name: trim(b[0]), + Type: trim(b[1]), + fn: f, + tmpVarIdx: -1, + } + } + return ps, nil +} + +// extractSection extracts text out of string s starting after start +// and ending just before end. found return value will indicate success, +// and prefix, body and suffix will contain correspondent parts of string s. +func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { + s = trim(s) + if strings.HasPrefix(s, string(start)) { + // no prefix + body = s[1:] + } else { + a := strings.SplitN(s, string(start), 2) + if len(a) != 2 { + return "", "", s, false + } + prefix = a[0] + body = a[1] + } + a := strings.SplitN(body, string(end), 2) + if len(a) != 2 { + return "", "", "", false + } + return prefix, a[0], a[1], true +} + +// newFn parses string s and return created function Fn. +func newFn(s string) (*Fn, error) { + s = trim(s) + f := &Fn{ + Rets: &Rets{}, + src: s, + PrintTrace: *printTraceFlag, + } + // function name and args + prefix, body, s, found := extractSection(s, '(', ')') + if !found || prefix == "" { + return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") + } + f.Name = prefix + var err error + f.Params, err = extractParams(body, f) + if err != nil { + return nil, err + } + // return values + _, body, s, found = extractSection(s, '(', ')') + if found { + r, err := extractParams(body, f) + if err != nil { + return nil, err + } + switch len(r) { + case 0: + case 1: + if r[0].IsError() { + f.Rets.ReturnsError = true + } else { + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + } + case 2: + if !r[1].IsError() { + return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") + } + f.Rets.ReturnsError = true + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + default: + return nil, errors.New("Too many return values in \"" + f.src + "\"") + } + } + // fail condition + _, body, s, found = extractSection(s, '[', ']') + if found { + f.Rets.FailCond = body + } + // dll and dll function names + s = trim(s) + if s == "" { + return f, nil + } + if !strings.HasPrefix(s, "=") { + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + s = trim(s[1:]) + a := strings.Split(s, ".") + switch len(a) { + case 1: + f.dllfuncname = a[0] + case 2: + f.dllname = a[0] + f.dllfuncname = a[1] + default: + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + return f, nil +} + +// DLLName returns DLL name for function f. +func (f *Fn) DLLName() string { + if f.dllname == "" { + return "kernel32" + } + return f.dllname +} + +// DLLName returns DLL function name for function f. +func (f *Fn) DLLFuncName() string { + if f.dllfuncname == "" { + return f.Name + } + return f.dllfuncname +} + +// ParamList returns source code for function f parameters. +func (f *Fn) ParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") +} + +// HelperParamList returns source code for helper function f parameters. +func (f *Fn) HelperParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") +} + +// ParamPrintList returns source code of trace printing part correspondent +// to syscall input parameters. +func (f *Fn) ParamPrintList() string { + return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// ParamCount return number of syscall parameters for function f. +func (f *Fn) ParamCount() int { + n := 0 + for _, p := range f.Params { + n += len(p.SyscallArgList()) + } + return n +} + +// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... +// to use. It returns parameter count for correspondent SyscallX function. +func (f *Fn) SyscallParamCount() int { + n := f.ParamCount() + switch { + case n <= 3: + return 3 + case n <= 6: + return 6 + case n <= 9: + return 9 + case n <= 12: + return 12 + case n <= 15: + return 15 + default: + panic("too many arguments to system call") + } +} + +// Syscall determines which SyscallX function to use for function f. +func (f *Fn) Syscall() string { + c := f.SyscallParamCount() + if c == 3 { + return syscalldot() + "Syscall" + } + return syscalldot() + "Syscall" + strconv.Itoa(c) +} + +// SyscallParamList returns source code for SyscallX parameters for function f. +func (f *Fn) SyscallParamList() string { + a := make([]string, 0) + for _, p := range f.Params { + a = append(a, p.SyscallArgList()...) + } + for len(a) < f.SyscallParamCount() { + a = append(a, "0") + } + return strings.Join(a, ", ") +} + +// HelperCallParamList returns source code of call into function f helper. +func (f *Fn) HelperCallParamList() string { + a := make([]string, 0, len(f.Params)) + for _, p := range f.Params { + s := p.Name + if p.Type == "string" { + s = p.tmpVar() + } + a = append(a, s) + } + return strings.Join(a, ", ") +} + +// IsUTF16 is true, if f is W (utf16) function. It is false +// for all A (ascii) functions. +func (f *Fn) IsUTF16() bool { + return true +} + +// StrconvFunc returns name of Go string to OS string function for f. +func (f *Fn) StrconvFunc() string { + if f.IsUTF16() { + return syscalldot() + "UTF16PtrFromString" + } + return syscalldot() + "BytePtrFromString" +} + +// StrconvType returns Go type name used for OS string for f. +func (f *Fn) StrconvType() string { + if f.IsUTF16() { + return "*uint16" + } + return "*byte" +} + +// HasStringParam is true, if f has at least one string parameter. +// Otherwise it is false. +func (f *Fn) HasStringParam() bool { + for _, p := range f.Params { + if p.Type == "string" { + return true + } + } + return false +} + +// HelperName returns name of function f helper. +func (f *Fn) HelperName() string { + if !f.HasStringParam() { + return f.Name + } + return "_" + f.Name +} + +// Source files and functions. +type Source struct { + Funcs []*Fn + Files []string + StdLibImports []string + ExternalImports []string +} + +func (src *Source) Import(pkg string) { + src.StdLibImports = append(src.StdLibImports, pkg) + sort.Strings(src.StdLibImports) +} + +func (src *Source) ExternalImport(pkg string) { + src.ExternalImports = append(src.ExternalImports, pkg) + sort.Strings(src.ExternalImports) +} + +// ParseFiles parses files listed in fs and extracts all syscall +// functions listed in sys comments. It returns source files +// and functions collection *Source if successful. +func ParseFiles(fs []string) (*Source, error) { + src := &Source{ + Funcs: make([]*Fn, 0), + Files: make([]string, 0), + StdLibImports: []string{ + "unsafe", + }, + ExternalImports: make([]string, 0), + } + for _, file := range fs { + if err := src.ParseFile(file); err != nil { + return nil, err + } + } + return src, nil +} + +// DLLs return dll names for a source set src. +func (src *Source) DLLs() []string { + uniq := make(map[string]bool) + r := make([]string, 0) + for _, f := range src.Funcs { + name := f.DLLName() + if _, found := uniq[name]; !found { + uniq[name] = true + r = append(r, name) + } + } + return r +} + +// ParseFile adds additional file path to a source set src. +func (src *Source) ParseFile(path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + s := bufio.NewScanner(file) + for s.Scan() { + t := trim(s.Text()) + if len(t) < 7 { + continue + } + if !strings.HasPrefix(t, "//sys") { + continue + } + t = t[5:] + if !(t[0] == ' ' || t[0] == '\t') { + continue + } + f, err := newFn(t[1:]) + if err != nil { + return err + } + src.Funcs = append(src.Funcs, f) + } + if err := s.Err(); err != nil { + return err + } + src.Files = append(src.Files, path) + + // get package name + fset := token.NewFileSet() + _, err = file.Seek(0, 0) + if err != nil { + return err + } + pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) + if err != nil { + return err + } + packageName = pkg.Name.Name + + return nil +} + +// IsStdRepo returns true if src is part of standard library. +func (src *Source) IsStdRepo() (bool, error) { + if len(src.Files) == 0 { + return false, errors.New("no input files provided") + } + abspath, err := filepath.Abs(src.Files[0]) + if err != nil { + return false, err + } + goroot := runtime.GOROOT() + if runtime.GOOS == "windows" { + abspath = strings.ToLower(abspath) + goroot = strings.ToLower(goroot) + } + sep := string(os.PathSeparator) + if !strings.HasSuffix(goroot, sep) { + goroot += sep + } + return strings.HasPrefix(abspath, goroot), nil +} + +// Generate output source file from a source set src. +func (src *Source) Generate(w io.Writer) error { + const ( + pkgStd = iota // any package in std library + pkgXSysWindows // x/sys/windows package + pkgOther + ) + isStdRepo, err := src.IsStdRepo() + if err != nil { + return err + } + var pkgtype int + switch { + case isStdRepo: + pkgtype = pkgStd + case packageName == "windows": + // TODO: this needs better logic than just using package name + pkgtype = pkgXSysWindows + default: + pkgtype = pkgOther + } + if *systemDLL { + switch pkgtype { + case pkgStd: + src.Import("internal/syscall/windows/sysdll") + case pkgXSysWindows: + default: + src.ExternalImport("golang.org/x/sys/windows") + } + } + if packageName != "syscall" { + src.Import("syscall") + } + funcMap := template.FuncMap{ + "packagename": packagename, + "syscalldot": syscalldot, + "newlazydll": func(dll string) string { + arg := "\"" + dll + ".dll\"" + if !*systemDLL { + return syscalldot() + "NewLazyDLL(" + arg + ")" + } + switch pkgtype { + case pkgStd: + return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" + case pkgXSysWindows: + return "NewLazySystemDLL(" + arg + ")" + default: + return "windows.NewLazySystemDLL(" + arg + ")" + } + }, + } + t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) + err = t.Execute(w, src) + if err != nil { + return errors.New("Failed to execute template: " + err.Error()) + } + return nil +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(1) +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + src, err := ParseFiles(flag.Args()) + if err != nil { + log.Fatal(err) + } + + var buf bytes.Buffer + if err := src.Generate(&buf); err != nil { + log.Fatal(err) + } + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + if *filename == "" { + _, err = os.Stdout.Write(data) + } else { + err = ioutil.WriteFile(*filename, data, 0644) + } + if err != nil { + log.Fatal(err) + } +} + +// TODO: use println instead to print in the following template +const srcTemplate = ` + +{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package {{packagename}} + +import ( +{{range .StdLibImports}}"{{.}}" +{{end}} + +{{range .ExternalImports}}"{{.}}" +{{end}} +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e {{syscalldot}}Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( +{{template "dlls" .}} +{{template "funcnames" .}}) +{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} +{{end}} + +{{/* help functions */}} + +{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +{{end}}{{end}} + +{{define "funcnames"}}{{range .Funcs}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}") +{{end}}{{end}} + +{{define "helperbody"}} +func {{.Name}}({{.ParamList}}) {{template "results" .}}{ +{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) +} +{{end}} + +{{define "funcbody"}} +func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ +{{template "tmpvars" .}} {{template "syscall" .}} +{{template "seterror" .}}{{template "printtrace" .}} return +} +{{end}} + +{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} +{{end}}{{end}}{{end}} + +{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} +{{end}}{{end}}{{end}} + +{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} + +{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} + +{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} +{{end}}{{end}} + +{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") +{{end}}{{end}} + +` diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 0000000..32f0701 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,82 @@ +// +build windows + +package vhd + +import "syscall" + +//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go + +//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk + +type virtualStorageType struct { + DeviceID uint32 + VendorID [16]byte +} + +const virtualDiskAccessNONE uint32 = 0 +const virtualDiskAccessATTACHRO uint32 = 65536 +const virtualDiskAccessATTACHRW uint32 = 131072 +const virtualDiskAccessDETACH uint32 = 262144 +const virtualDiskAccessGETINFO uint32 = 524288 +const virtualDiskAccessCREATE uint32 = 1048576 +const virtualDiskAccessMETAOPS uint32 = 2097152 +const virtualDiskAccessREAD uint32 = 851968 +const virtualDiskAccessALL uint32 = 4128768 +const virtualDiskAccessWRITABLE uint32 = 3276800 + +const createVirtualDiskFlagNone uint32 = 0 +const createVirtualDiskFlagFullPhysicalAllocation uint32 = 1 +const createVirtualDiskFlagPreventWritesToSourceDisk uint32 = 2 +const createVirtualDiskFlagDoNotCopyMetadataFromParent uint32 = 4 + +type version2 struct { + UniqueID [16]byte // GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType virtualStorageType + SourceVirtualStorageType virtualStorageType + ResiliencyGUID [16]byte // GUID +} + +type createVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 version2 +} + +// CreateVhdx will create a simple vhdx file at the given path using default values. +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + var defaultType virtualStorageType + + parameters := createVirtualDiskParameters{ + Version: 2, + Version2: version2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + var handle syscall.Handle + + if err := createVirtualDisk( + &defaultType, + path, + virtualDiskAccessNONE, + nil, + createVirtualDiskFlagNone, + 0, + ¶meters, + nil, + &handle); err != nil { + return err + } + + if err := syscall.CloseHandle(handle); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go new file mode 100644 index 0000000..c450955 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go @@ -0,0 +1,64 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll") + + procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk") +) + +func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle) +} + +func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/decompress.go b/vendor/github.com/Microsoft/go-winio/wim/decompress.go new file mode 100644 index 0000000..f4e67f8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/decompress.go @@ -0,0 +1,138 @@ +package wim + +import ( + "encoding/binary" + "io" + "io/ioutil" + + "github.com/Microsoft/go-winio/wim/lzx" +) + +const chunkSize = 32768 // Compressed resource chunk size + +type compressedReader struct { + r *io.SectionReader + d io.ReadCloser + chunks []int64 + curChunk int + originalSize int64 +} + +func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) { + nchunks := (originalSize + chunkSize - 1) / chunkSize + var base int64 + chunks := make([]int64, nchunks) + if originalSize <= 0xffffffff { + // 32-bit chunk offsets + base = (nchunks - 1) * 4 + chunks32 := make([]uint32, nchunks-1) + err := binary.Read(r, binary.LittleEndian, chunks32) + if err != nil { + return nil, err + } + for i, n := range chunks32 { + chunks[i+1] = int64(n) + } + + } else { + // 64-bit chunk offsets + base = (nchunks - 1) * 8 + err := binary.Read(r, binary.LittleEndian, chunks[1:]) + if err != nil { + return nil, err + } + } + + for i, c := range chunks { + chunks[i] = c + base + } + + cr := &compressedReader{ + r: r, + chunks: chunks, + originalSize: originalSize, + } + + err := cr.reset(int(offset / chunkSize)) + if err != nil { + return nil, err + } + + suboff := offset % chunkSize + if suboff != 0 { + _, err := io.CopyN(ioutil.Discard, cr.d, suboff) + if err != nil { + return nil, err + } + } + return cr, nil +} + +func (r *compressedReader) chunkOffset(n int) int64 { + if n == len(r.chunks) { + return r.r.Size() + } + return r.chunks[n] +} + +func (r *compressedReader) chunkSize(n int) int { + return int(r.chunkOffset(n+1) - r.chunkOffset(n)) +} + +func (r *compressedReader) uncompressedSize(n int) int { + if n < len(r.chunks)-1 { + return chunkSize + } + size := int(r.originalSize % chunkSize) + if size == 0 { + size = chunkSize + } + return size +} + +func (r *compressedReader) reset(n int) error { + if n >= len(r.chunks) { + return io.EOF + } + if r.d != nil { + r.d.Close() + } + r.curChunk = n + size := r.chunkSize(n) + uncompressedSize := r.uncompressedSize(n) + section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size)) + if size != uncompressedSize { + d, err := lzx.NewReader(section, uncompressedSize) + if err != nil { + return err + } + r.d = d + } else { + r.d = ioutil.NopCloser(section) + } + + return nil +} + +func (r *compressedReader) Read(b []byte) (int, error) { + for { + n, err := r.d.Read(b) + if err != io.EOF { + return n, err + } + + err = r.reset(r.curChunk + 1) + if err != nil { + return n, err + } + } +} + +func (r *compressedReader) Close() error { + var err error + if r.d != nil { + err = r.d.Close() + r.d = nil + } + return err +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go b/vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go new file mode 100644 index 0000000..4deb0df --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go @@ -0,0 +1,606 @@ +// Package lzx implements a decompressor for the the WIM variant of the +// LZX compression algorithm. +// +// The LZX algorithm is an earlier variant of LZX DELTA, which is documented +// at https://msdn.microsoft.com/en-us/library/cc483133(v=exchg.80).aspx. +package lzx + +import ( + "bytes" + "encoding/binary" + "errors" + "io" +) + +const ( + maincodecount = 496 + maincodesplit = 256 + lencodecount = 249 + lenshift = 9 + codemask = 0x1ff + tablebits = 9 + tablesize = 1 << tablebits + + maxBlockSize = 32768 + windowSize = 32768 + + maxTreePathLen = 16 + + e8filesize = 12000000 + maxe8offset = 0x3fffffff + + verbatimBlock = 1 + alignedOffsetBlock = 2 + uncompressedBlock = 3 +) + +var footerBits = [...]byte{ + 0, 0, 0, 0, 1, 1, 2, 2, + 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, + 11, 11, 12, 12, 13, 13, 14, +} + +var basePosition = [...]uint16{ + 0, 1, 2, 3, 4, 6, 8, 12, + 16, 24, 32, 48, 64, 96, 128, 192, + 256, 384, 512, 768, 1024, 1536, 2048, 3072, + 4096, 6144, 8192, 12288, 16384, 24576, 32768, +} + +var ( + errCorrupt = errors.New("LZX data corrupt") +) + +// Reader is an interface used by the decompressor to access +// the input stream. If the provided io.Reader does not implement +// Reader, then a bufio.Reader is used. +type Reader interface { + io.Reader + io.ByteReader +} + +type decompressor struct { + r io.Reader + err error + unaligned bool + nbits byte + c uint32 + lru [3]uint16 + uncompressed int + windowReader *bytes.Reader + mainlens [maincodecount]byte + lenlens [lencodecount]byte + window [windowSize]byte + b []byte + bv int + bo int +} + +//go:noinline +func (f *decompressor) fail(err error) { + if f.err == nil { + f.err = err + } + f.bo = 0 + f.bv = 0 +} + +func (f *decompressor) ensureAtLeast(n int) error { + if f.bv-f.bo >= n { + return nil + } + + if f.err != nil { + return f.err + } + + if f.bv != f.bo { + copy(f.b[:f.bv-f.bo], f.b[f.bo:f.bv]) + } + n, err := io.ReadAtLeast(f.r, f.b[f.bv-f.bo:], n) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else { + f.fail(err) + } + return err + } + f.bv = f.bv - f.bo + n + f.bo = 0 + return nil +} + +// feed retrieves another 16-bit word from the stream and consumes +// it into f.c. It returns false if there are no more bytes available. +// Otherwise, on error, it sets f.err. +func (f *decompressor) feed() bool { + err := f.ensureAtLeast(2) + if err != nil { + if err == io.ErrUnexpectedEOF { + return false + } + } + f.c |= (uint32(f.b[f.bo+1])<<8 | uint32(f.b[f.bo])) << (16 - f.nbits) + f.nbits += 16 + f.bo += 2 + return true +} + +// getBits retrieves the next n bits from the byte stream. n +// must be <= 16. It sets f.err on error. +func (f *decompressor) getBits(n byte) uint16 { + if f.nbits < n { + if !f.feed() { + f.fail(io.ErrUnexpectedEOF) + } + } + c := uint16(f.c >> (32 - n)) + f.c <<= n + f.nbits -= n + return c +} + +type huffman struct { + extra [][]uint16 + maxbits byte + table [tablesize]uint16 +} + +// buildTable builds a huffman decoding table from a slice of code lengths, +// one per code, in order. Each code length must be <= maxTreePathLen. +// See https://en.wikipedia.org/wiki/Canonical_Huffman_code. +func buildTable(codelens []byte) *huffman { + // Determine the number of codes of each length, and the + // maximum length. + var count [maxTreePathLen + 1]uint + var max byte + for _, cl := range codelens { + count[cl]++ + if max < cl { + max = cl + } + } + + if max == 0 { + return &huffman{} + } + + // Determine the first code of each length. + var first [maxTreePathLen + 1]uint + code := uint(0) + for i := byte(1); i <= max; i++ { + code <<= 1 + first[i] = code + code += count[i] + } + + if code != 1< tablebits, split long codes into additional tables + // of suffixes of max-tablebits length. + h := &huffman{maxbits: max} + if max > tablebits { + core := first[tablebits+1] / 2 // Number of codes that fit without extra tables + nextra := 1<> (cl - tablebits) + suffix := code & (1<<(cl-tablebits) - 1) + extendedCode := suffix << (max - cl) + for j := uint(0); j < 1<<(max-cl); j++ { + h.extra[h.table[prefix]][extendedCode+j] = v + } + } + } + } + + return h +} + +// getCode retrieves the next code using the provided +// huffman tree. It sets f.err on error. +func (f *decompressor) getCode(h *huffman) uint16 { + if h.maxbits > 0 { + if f.nbits < maxTreePathLen { + f.feed() + } + + // For codes with length < tablebits, it doesn't matter + // what the remainder of the bits used for table lookup + // are, since entries with all possible suffixes were + // added to the table. + c := h.table[f.c>>(32-tablebits)] + if c >= 1<>(32-(h.maxbits-tablebits))] + } + + n := byte(c >> lenshift) + if f.nbits >= n { + // Only consume the length of the code, not the maximum + // code length. + f.c <<= n + f.nbits -= n + return c & codemask + } + + f.fail(io.ErrUnexpectedEOF) + return 0 + } + + // This is an empty tree. It should not be used. + f.fail(errCorrupt) + return 0 +} + +// readTree updates the huffman tree path lengths in lens by +// reading and decoding lengths from the byte stream. lens +// should be prepopulated with the previous block's tree's path +// lengths. For the first block, lens should be zero. +func (f *decompressor) readTree(lens []byte) error { + // Get the pre-tree for the main tree. + var pretreeLen [20]byte + for i := range pretreeLen { + pretreeLen[i] = byte(f.getBits(4)) + } + if f.err != nil { + return f.err + } + h := buildTable(pretreeLen[:]) + + // The lengths are encoded as a series of huffman codes + // encoded by the pre-tree. + for i := 0; i < len(lens); { + c := byte(f.getCode(h)) + if f.err != nil { + return f.err + } + switch { + case c <= 16: // length is delta from previous length + lens[i] = (lens[i] + 17 - c) % 17 + i++ + case c == 17: // next n + 4 lengths are zero + zeroes := int(f.getBits(4)) + 4 + if i+zeroes > len(lens) { + return errCorrupt + } + for j := 0; j < zeroes; j++ { + lens[i+j] = 0 + } + i += zeroes + case c == 18: // next n + 20 lengths are zero + zeroes := int(f.getBits(5)) + 20 + if i+zeroes > len(lens) { + return errCorrupt + } + for j := 0; j < zeroes; j++ { + lens[i+j] = 0 + } + i += zeroes + case c == 19: // next n + 4 lengths all have the same value + same := int(f.getBits(1)) + 4 + if i+same > len(lens) { + return errCorrupt + } + c = byte(f.getCode(h)) + if c > 16 { + return errCorrupt + } + l := (lens[i] + 17 - c) % 17 + for j := 0; j < same; j++ { + lens[i+j] = l + } + i += same + default: + return errCorrupt + } + } + + if f.err != nil { + return f.err + } + return nil +} + +func (f *decompressor) readBlockHeader() (byte, uint16, error) { + // If the previous block was an unaligned uncompressed block, restore + // 2-byte alignment. + if f.unaligned { + err := f.ensureAtLeast(1) + if err != nil { + return 0, 0, err + } + f.bo++ + f.unaligned = false + } + + blockType := f.getBits(3) + full := f.getBits(1) + var blockSize uint16 + if full != 0 { + blockSize = maxBlockSize + } else { + blockSize = f.getBits(16) + if blockSize > maxBlockSize { + return 0, 0, errCorrupt + } + } + + if f.err != nil { + return 0, 0, f.err + } + + switch blockType { + case verbatimBlock, alignedOffsetBlock: + // The caller will read the huffman trees. + case uncompressedBlock: + if f.nbits > 16 { + panic("impossible: more than one 16-bit word remains") + } + + // Drop the remaining bits in the current 16-bit word + // If there are no bits left, discard a full 16-bit word. + n := f.nbits + if n == 0 { + n = 16 + } + + f.getBits(n) + + // Read the LRU values for the next block. + err := f.ensureAtLeast(12) + if err != nil { + return 0, 0, err + } + + f.lru[0] = uint16(binary.LittleEndian.Uint32(f.b[f.bo : f.bo+4])) + f.lru[1] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+4 : f.bo+8])) + f.lru[2] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+8 : f.bo+12])) + f.bo += 12 + + default: + return 0, 0, errCorrupt + } + + return byte(blockType), blockSize, nil +} + +// readTrees reads the two or three huffman trees for the current block. +// readAligned specifies whether to read the aligned offset tree. +func (f *decompressor) readTrees(readAligned bool) (main *huffman, length *huffman, aligned *huffman, err error) { + // Aligned offset blocks start with a small aligned offset tree. + if readAligned { + var alignedLen [8]byte + for i := range alignedLen { + alignedLen[i] = byte(f.getBits(3)) + } + aligned = buildTable(alignedLen[:]) + if aligned == nil { + err = errors.New("corrupt") + return + } + } + + // The main tree is encoded in two parts. + err = f.readTree(f.mainlens[:maincodesplit]) + if err != nil { + return + } + err = f.readTree(f.mainlens[maincodesplit:]) + if err != nil { + return + } + + main = buildTable(f.mainlens[:]) + if main == nil { + err = errors.New("corrupt") + return + } + + // The length tree is encoding in a single part. + err = f.readTree(f.lenlens[:]) + if err != nil { + return + } + + length = buildTable(f.lenlens[:]) + if length == nil { + err = errors.New("corrupt") + return + } + + err = f.err + return +} + +// readCompressedBlock decodes a compressed block, writing into the window +// starting at start and ending at end, and using the provided huffman trees. +func (f *decompressor) readCompressedBlock(start, end uint16, hmain, hlength, haligned *huffman) (int, error) { + i := start + for i < end { + main := f.getCode(hmain) + if f.err != nil { + break + } + if main < 256 { + // Literal byte. + f.window[i] = byte(main) + i++ + continue + } + + // This is a match backward in the window. Determine + // the offset and dlength. + matchlen := (main - 256) % 8 + slot := (main - 256) / 8 + + // The length is either the low bits of the code, + // or if this is 7, is encoded with the length tree. + if matchlen == 7 { + matchlen += f.getCode(hlength) + } + matchlen += 2 + + var matchoffset uint16 + if slot < 3 { + // The offset is one of the LRU values. + matchoffset = f.lru[slot] + f.lru[slot] = f.lru[0] + f.lru[0] = matchoffset + } else { + // The offset is encoded as a combination of the + // slot and more bits from the bit stream. + offsetbits := footerBits[slot] + var verbatimbits, alignedbits uint16 + if offsetbits > 0 { + if haligned != nil && offsetbits >= 3 { + // This is an aligned offset block. Combine + // the bits written verbatim with the aligned + // offset tree code. + verbatimbits = f.getBits(offsetbits-3) * 8 + alignedbits = f.getCode(haligned) + } else { + // There are no aligned offset bits to read, + // only verbatim bits. + verbatimbits = f.getBits(offsetbits) + alignedbits = 0 + } + } + matchoffset = basePosition[slot] + verbatimbits + alignedbits - 2 + // Update the LRU cache. + f.lru[2] = f.lru[1] + f.lru[1] = f.lru[0] + f.lru[0] = matchoffset + } + + if matchoffset <= i && matchlen <= end-i { + copyend := i + matchlen + for ; i < copyend; i++ { + f.window[i] = f.window[i-matchoffset] + } + } else { + f.fail(errCorrupt) + break + } + } + return int(i - start), f.err +} + +// readBlock decodes the current block and returns the number of uncompressed bytes. +func (f *decompressor) readBlock(start uint16) (int, error) { + blockType, size, err := f.readBlockHeader() + if err != nil { + return 0, err + } + + if blockType == uncompressedBlock { + if size%2 == 1 { + // Remember to realign the byte stream at the next block. + f.unaligned = true + } + copied := 0 + if f.bo < f.bv { + copied = int(size) + s := int(start) + if copied > f.bv-f.bo { + copied = f.bv - f.bo + } + copy(f.window[s:s+copied], f.b[f.bo:f.bo+copied]) + f.bo += copied + } + n, err := io.ReadFull(f.r, f.window[start+uint16(copied):start+size]) + return copied + n, err + } + + hmain, hlength, haligned, err := f.readTrees(blockType == alignedOffsetBlock) + if err != nil { + return 0, err + } + + return f.readCompressedBlock(start, start+size, hmain, hlength, haligned) +} + +// decodeE8 reverses the 0xe8 x86 instruction encoding that was performed +// to the uncompressed data before it was compressed. +func decodeE8(b []byte, off int64) { + if off > maxe8offset || len(b) < 10 { + return + } + for i := 0; i < len(b)-10; i++ { + if b[i] == 0xe8 { + currentPtr := int32(off) + int32(i) + abs := int32(binary.LittleEndian.Uint32(b[i+1 : i+5])) + if abs >= -currentPtr && abs < e8filesize { + var rel int32 + if abs >= 0 { + rel = abs - currentPtr + } else { + rel = abs + e8filesize + } + binary.LittleEndian.PutUint32(b[i+1:i+5], uint32(rel)) + } + i += 4 + } + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + // Read and uncompress everything. + if f.windowReader == nil { + n := 0 + for n < f.uncompressed { + k, err := f.readBlock(uint16(n)) + if err != nil { + return 0, err + } + n += k + } + decodeE8(f.window[:f.uncompressed], 0) + f.windowReader = bytes.NewReader(f.window[:f.uncompressed]) + } + + // Just read directly from the window. + return f.windowReader.Read(b) +} + +func (f *decompressor) Close() error { + return nil +} + +// NewReader returns a new io.ReadCloser that decompresses a +// WIM LZX stream until uncompressedSize bytes have been returned. +func NewReader(r io.Reader, uncompressedSize int) (io.ReadCloser, error) { + if uncompressedSize > windowSize { + return nil, errors.New("uncompressed size is limited to 32KB") + } + f := &decompressor{ + lru: [3]uint16{1, 1, 1}, + uncompressed: uncompressedSize, + b: make([]byte, 4096), + r: r, + } + return f, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/validate/validate.go b/vendor/github.com/Microsoft/go-winio/wim/validate/validate.go new file mode 100644 index 0000000..ba03fc9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/validate/validate.go @@ -0,0 +1,51 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/Microsoft/go-winio/wim" +) + +func main() { + flag.Parse() + f, err := os.Open(flag.Arg(0)) + if err != nil { + panic(err) + } + + w, err := wim.NewReader(f) + if err != nil { + panic(err) + + } + + fmt.Printf("%#v\n%#v\n", w.Image[0], w.Image[0].Windows) + + dir, err := w.Image[0].Open() + if err != nil { + panic(err) + } + + err = recur(dir) + if err != nil { + panic(err) + } +} + +func recur(d *wim.File) error { + files, err := d.Readdir() + if err != nil { + return fmt.Errorf("%s: %s", d.Name, err) + } + for _, f := range files { + if f.IsDir() { + err = recur(f) + if err != nil { + return fmt.Errorf("%s: %s", f.Name, err) + } + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/wim.go b/vendor/github.com/Microsoft/go-winio/wim/wim.go new file mode 100644 index 0000000..1d02e92 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/wim.go @@ -0,0 +1,866 @@ +// Package wim implements a WIM file parser. +// +// WIM files are used to distribute Windows file system and container images. +// They are documented at https://msdn.microsoft.com/en-us/library/windows/desktop/dd861280.aspx. +package wim + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "strconv" + "sync" + "time" + "unicode/utf16" +) + +// File attribute constants from Windows. +const ( + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_EA = 0x00040000 +) + +// Windows processor architectures. +const ( + PROCESSOR_ARCHITECTURE_INTEL = 0 + PROCESSOR_ARCHITECTURE_MIPS = 1 + PROCESSOR_ARCHITECTURE_ALPHA = 2 + PROCESSOR_ARCHITECTURE_PPC = 3 + PROCESSOR_ARCHITECTURE_SHX = 4 + PROCESSOR_ARCHITECTURE_ARM = 5 + PROCESSOR_ARCHITECTURE_IA64 = 6 + PROCESSOR_ARCHITECTURE_ALPHA64 = 7 + PROCESSOR_ARCHITECTURE_MSIL = 8 + PROCESSOR_ARCHITECTURE_AMD64 = 9 + PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10 + PROCESSOR_ARCHITECTURE_NEUTRAL = 11 + PROCESSOR_ARCHITECTURE_ARM64 = 12 +) + +var wimImageTag = [...]byte{'M', 'S', 'W', 'I', 'M', 0, 0, 0} + +type guid struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +func (g guid) String() string { + return fmt.Sprintf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7]) +} + +type resourceDescriptor struct { + FlagsAndCompressedSize uint64 + Offset int64 + OriginalSize int64 +} + +type resFlag byte + +const ( + resFlagFree resFlag = 1 << iota + resFlagMetadata + resFlagCompressed + resFlagSpanned +) + +const validate = false + +const supportedResFlags = resFlagMetadata | resFlagCompressed + +func (r *resourceDescriptor) Flags() resFlag { + return resFlag(r.FlagsAndCompressedSize >> 56) +} + +func (r *resourceDescriptor) CompressedSize() int64 { + return int64(r.FlagsAndCompressedSize & 0xffffffffffffff) +} + +func (r *resourceDescriptor) String() string { + s := fmt.Sprintf("%d bytes at %d", r.CompressedSize(), r.Offset) + if r.Flags()&4 != 0 { + s += fmt.Sprintf(" (uncompresses to %d)", r.OriginalSize) + } + return s +} + +// SHA1Hash contains the SHA1 hash of a file or stream. +type SHA1Hash [20]byte + +type streamDescriptor struct { + resourceDescriptor + PartNumber uint16 + RefCount uint32 + Hash SHA1Hash +} + +type hdrFlag uint32 + +const ( + hdrFlagReserved hdrFlag = 1 << iota + hdrFlagCompressed + hdrFlagReadOnly + hdrFlagSpanned + hdrFlagResourceOnly + hdrFlagMetadataOnly + hdrFlagWriteInProgress + hdrFlagRpFix +) + +const ( + hdrFlagCompressReserved hdrFlag = 1 << (iota + 16) + hdrFlagCompressXpress + hdrFlagCompressLzx +) + +const supportedHdrFlags = hdrFlagRpFix | hdrFlagReadOnly | hdrFlagCompressed | hdrFlagCompressLzx + +type wimHeader struct { + ImageTag [8]byte + Size uint32 + Version uint32 + Flags hdrFlag + CompressionSize uint32 + WIMGuid guid + PartNumber uint16 + TotalParts uint16 + ImageCount uint32 + OffsetTable resourceDescriptor + XMLData resourceDescriptor + BootMetadata resourceDescriptor + BootIndex uint32 + Padding uint32 + Integrity resourceDescriptor + Unused [60]byte +} + +type securityblockDisk struct { + TotalLength uint32 + NumEntries uint32 +} + +const securityblockDiskSize = 8 + +type direntry struct { + Attributes uint32 + SecurityID uint32 + SubdirOffset int64 + Unused1, Unused2 int64 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + Hash SHA1Hash + Padding uint32 + ReparseHardLink int64 + StreamCount uint16 + ShortNameLength uint16 + FileNameLength uint16 +} + +var direntrySize = int64(binary.Size(direntry{}) + 8) // includes an 8-byte length prefix + +type streamentry struct { + Unused int64 + Hash SHA1Hash + NameLength int16 +} + +var streamentrySize = int64(binary.Size(streamentry{}) + 8) // includes an 8-byte length prefix + +// Filetime represents a Windows time. +type Filetime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Time returns the time as time.Time. +func (ft *Filetime) Time() time.Time { + // 100-nanosecond intervals since January 1, 1601 + nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) + // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) + nsec -= 116444736000000000 + // convert into nanoseconds + nsec *= 100 + return time.Unix(0, nsec) +} + +// UnmarshalXML unmarshals the time from a WIM XML blob. +func (ft *Filetime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type time struct { + Low string `xml:"LOWPART"` + High string `xml:"HIGHPART"` + } + var t time + err := d.DecodeElement(&t, &start) + if err != nil { + return err + } + + low, err := strconv.ParseUint(t.Low, 0, 32) + if err != nil { + return err + } + high, err := strconv.ParseUint(t.High, 0, 32) + if err != nil { + return err + } + + ft.LowDateTime = uint32(low) + ft.HighDateTime = uint32(high) + return nil +} + +type info struct { + Image []ImageInfo `xml:"IMAGE"` +} + +// ImageInfo contains information about the image. +type ImageInfo struct { + Name string `xml:"NAME"` + Index int `xml:"INDEX,attr"` + CreationTime Filetime `xml:"CREATIONTIME"` + ModTime Filetime `xml:"LASTMODIFICATIONTIME"` + Windows *WindowsInfo `xml:"WINDOWS"` +} + +// WindowsInfo contains information about the Windows installation in the image. +type WindowsInfo struct { + Arch byte `xml:"ARCH"` + ProductName string `xml:"PRODUCTNAME"` + EditionID string `xml:"EDITIONID"` + InstallationType string `xml:"INSTALLATIONTYPE"` + ProductType string `xml:"PRODUCTTYPE"` + Languages []string `xml:"LANGUAGES>LANGUAGE"` + DefaultLanguage string `xml:"LANGUAGES>DEFAULT"` + Version Version `xml:"VERSION"` + SystemRoot string `xml:"SYSTEMROOT"` +} + +// Version represents a Windows build version. +type Version struct { + Major int `xml:"MAJOR"` + Minor int `xml:"MINOR"` + Build int `xml:"BUILD"` + SPBuild int `xml:"SPBUILD"` + SPLevel int `xml:"SPLEVEL"` +} + +// ParseError is returned when the WIM cannot be parsed. +type ParseError struct { + Oper string + Path string + Err error +} + +func (e *ParseError) Error() string { + if e.Path == "" { + return "WIM parse error at " + e.Oper + ": " + e.Err.Error() + } + return fmt.Sprintf("WIM parse error: %s %s: %s", e.Oper, e.Path, e.Err.Error()) +} + +// Reader provides functions to read a WIM file. +type Reader struct { + hdr wimHeader + r io.ReaderAt + fileData map[SHA1Hash]resourceDescriptor + + XMLInfo string // The XML information about the WIM. + Image []*Image // The WIM's images. +} + +// Image represents an image within a WIM file. +type Image struct { + wim *Reader + offset resourceDescriptor + sds [][]byte + rootOffset int64 + r io.ReadCloser + curOffset int64 + m sync.Mutex + + ImageInfo +} + +// StreamHeader contains alternate data stream metadata. +type StreamHeader struct { + Name string + Hash SHA1Hash + Size int64 +} + +// Stream represents an alternate data stream or reparse point data stream. +type Stream struct { + StreamHeader + wim *Reader + offset resourceDescriptor +} + +// FileHeader contains file metadata. +type FileHeader struct { + Name string + ShortName string + Attributes uint32 + SecurityDescriptor []byte + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + Hash SHA1Hash + Size int64 + LinkID int64 + ReparseTag uint32 + ReparseReserved uint32 +} + +// File represents a file or directory in a WIM image. +type File struct { + FileHeader + Streams []*Stream + offset resourceDescriptor + img *Image + subdirOffset int64 +} + +// NewReader returns a Reader that can be used to read WIM file data. +func NewReader(f io.ReaderAt) (*Reader, error) { + r := &Reader{r: f} + section := io.NewSectionReader(f, 0, 0xffff) + err := binary.Read(section, binary.LittleEndian, &r.hdr) + if err != nil { + return nil, err + } + + if r.hdr.ImageTag != wimImageTag { + return nil, &ParseError{Oper: "image tag", Err: errors.New("not a WIM file")} + } + + if r.hdr.Flags&^supportedHdrFlags != 0 { + return nil, fmt.Errorf("unsupported WIM flags %x", r.hdr.Flags&^supportedHdrFlags) + } + + if r.hdr.CompressionSize != 0x8000 { + return nil, fmt.Errorf("unsupported compression size %d", r.hdr.CompressionSize) + } + + if r.hdr.TotalParts != 1 { + return nil, errors.New("multi-part WIM not supported") + } + + fileData, images, err := r.readOffsetTable(&r.hdr.OffsetTable) + if err != nil { + return nil, err + } + + xmlinfo, err := r.readXML() + if err != nil { + return nil, err + } + + var info info + err = xml.Unmarshal([]byte(xmlinfo), &info) + if err != nil { + return nil, &ParseError{Oper: "XML info", Err: err} + } + + for i, img := range images { + for _, imgInfo := range info.Image { + if imgInfo.Index == i+1 { + img.ImageInfo = imgInfo + break + } + } + } + + r.fileData = fileData + r.Image = images + r.XMLInfo = xmlinfo + return r, nil +} + +// Close releases resources associated with the Reader. +func (r *Reader) Close() error { + for _, img := range r.Image { + img.reset() + } + return nil +} + +func (r *Reader) resourceReader(hdr *resourceDescriptor) (io.ReadCloser, error) { + return r.resourceReaderWithOffset(hdr, 0) +} + +func (r *Reader) resourceReaderWithOffset(hdr *resourceDescriptor, offset int64) (io.ReadCloser, error) { + var sr io.ReadCloser + section := io.NewSectionReader(r.r, hdr.Offset, hdr.CompressedSize()) + if hdr.Flags()&resFlagCompressed == 0 { + section.Seek(offset, 0) + sr = ioutil.NopCloser(section) + } else { + cr, err := newCompressedReader(section, hdr.OriginalSize, offset) + if err != nil { + return nil, err + } + sr = cr + } + + return sr, nil +} + +func (r *Reader) readResource(hdr *resourceDescriptor) ([]byte, error) { + rsrc, err := r.resourceReader(hdr) + if err != nil { + return nil, err + } + defer rsrc.Close() + return ioutil.ReadAll(rsrc) +} + +func (r *Reader) readXML() (string, error) { + if r.hdr.XMLData.CompressedSize() == 0 { + return "", nil + } + rsrc, err := r.resourceReader(&r.hdr.XMLData) + if err != nil { + return "", err + } + defer rsrc.Close() + + XMLData := make([]uint16, r.hdr.XMLData.OriginalSize/2) + err = binary.Read(rsrc, binary.LittleEndian, XMLData) + if err != nil { + return "", &ParseError{Oper: "XML data", Err: err} + } + + // The BOM will always indicate little-endian UTF-16. + if XMLData[0] != 0xfeff { + return "", &ParseError{Oper: "XML data", Err: errors.New("invalid BOM")} + } + return string(utf16.Decode(XMLData[1:])), nil +} + +func (r *Reader) readOffsetTable(res *resourceDescriptor) (map[SHA1Hash]resourceDescriptor, []*Image, error) { + fileData := make(map[SHA1Hash]resourceDescriptor) + var images []*Image + + offsetTable, err := r.readResource(res) + if err != nil { + return nil, nil, &ParseError{Oper: "offset table", Err: err} + } + + br := bytes.NewReader(offsetTable) + for i := 0; ; i++ { + var res streamDescriptor + err := binary.Read(br, binary.LittleEndian, &res) + if err == io.EOF { + break + } + if err != nil { + return nil, nil, &ParseError{Oper: "offset table", Err: err} + } + if res.Flags()&^supportedResFlags != 0 { + return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("unsupported resource flag")} + } + + // Validation for ad-hoc testing + if validate { + sec, err := r.resourceReader(&res.resourceDescriptor) + if err != nil { + panic(fmt.Sprint(i, err)) + } + hash := sha1.New() + _, err = io.Copy(hash, sec) + sec.Close() + if err != nil { + panic(fmt.Sprint(i, err)) + } + var cmphash SHA1Hash + copy(cmphash[:], hash.Sum(nil)) + if cmphash != res.Hash { + panic(fmt.Sprint(i, "hash mismatch")) + } + } + + if res.Flags()&resFlagMetadata != 0 { + image := &Image{ + wim: r, + offset: res.resourceDescriptor, + } + images = append(images, image) + } else { + fileData[res.Hash] = res.resourceDescriptor + } + } + + if len(images) != int(r.hdr.ImageCount) { + return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("mismatched image count")} + } + + return fileData, images, nil +} + +func (r *Reader) readSecurityDescriptors(rsrc io.Reader) (sds [][]byte, n int64, err error) { + var secBlock securityblockDisk + err = binary.Read(rsrc, binary.LittleEndian, &secBlock) + if err != nil { + err = &ParseError{Oper: "security table", Err: err} + return + } + + n += securityblockDiskSize + + secSizes := make([]int64, secBlock.NumEntries) + err = binary.Read(rsrc, binary.LittleEndian, &secSizes) + if err != nil { + err = &ParseError{Oper: "security table sizes", Err: err} + return + } + + n += int64(secBlock.NumEntries * 8) + + sds = make([][]byte, secBlock.NumEntries) + for i, size := range secSizes { + sd := make([]byte, size&0xffffffff) + _, err = io.ReadFull(rsrc, sd) + if err != nil { + err = &ParseError{Oper: "security descriptor", Err: err} + return + } + n += int64(len(sd)) + sds[i] = sd + } + + secsize := int64((secBlock.TotalLength + 7) &^ 7) + if n > secsize { + err = &ParseError{Oper: "security descriptor", Err: errors.New("security descriptor table too small")} + return + } + + _, err = io.CopyN(ioutil.Discard, rsrc, secsize-n) + if err != nil { + return + } + + n = secsize + return +} + +// Open parses the image and returns the root directory. +func (img *Image) Open() (*File, error) { + if img.sds == nil { + rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, img.rootOffset) + if err != nil { + return nil, err + } + sds, n, err := img.wim.readSecurityDescriptors(rsrc) + if err != nil { + rsrc.Close() + return nil, err + } + img.sds = sds + img.r = rsrc + img.rootOffset = n + img.curOffset = n + } + + f, err := img.readdir(img.rootOffset) + if err != nil { + return nil, err + } + if len(f) != 1 { + return nil, &ParseError{Oper: "root directory", Err: errors.New("expected exactly 1 root directory entry")} + } + return f[0], err +} + +func (img *Image) reset() { + if img.r != nil { + img.r.Close() + img.r = nil + } + img.curOffset = -1 +} + +func (img *Image) readdir(offset int64) ([]*File, error) { + img.m.Lock() + defer img.m.Unlock() + + if offset < img.curOffset || offset > img.curOffset+chunkSize { + // Reset to seek backward or to seek forward very far. + img.reset() + } + if img.r == nil { + rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, offset) + if err != nil { + return nil, err + } + img.r = rsrc + img.curOffset = offset + } + if offset > img.curOffset { + _, err := io.CopyN(ioutil.Discard, img.r, offset-img.curOffset) + if err != nil { + img.reset() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + } + + var entries []*File + for { + e, n, err := img.readNextEntry(img.r) + img.curOffset += n + if err == io.EOF { + break + } + if err != nil { + img.reset() + return nil, err + } + entries = append(entries, e) + } + return entries, nil +} + +func (img *Image) readNextEntry(r io.Reader) (*File, int64, error) { + var length int64 + err := binary.Read(r, binary.LittleEndian, &length) + if err != nil { + return nil, 0, &ParseError{Oper: "directory length check", Err: err} + } + + if length == 0 { + return nil, 8, io.EOF + } + + left := length + if left < direntrySize { + return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short")} + } + + var dentry direntry + err = binary.Read(r, binary.LittleEndian, &dentry) + if err != nil { + return nil, 0, &ParseError{Oper: "directory entry", Err: err} + } + + left -= direntrySize + + namesLen := int64(dentry.FileNameLength + 2 + dentry.ShortNameLength) + if left < namesLen { + return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short for names")} + } + + names := make([]uint16, namesLen/2) + err = binary.Read(r, binary.LittleEndian, names) + if err != nil { + return nil, 0, &ParseError{Oper: "file name", Err: err} + } + + left -= namesLen + + var name, shortName string + if dentry.FileNameLength > 0 { + name = string(utf16.Decode(names[:dentry.FileNameLength/2])) + } + + if dentry.ShortNameLength > 0 { + shortName = string(utf16.Decode(names[dentry.FileNameLength/2+1:])) + } + + var offset resourceDescriptor + zerohash := SHA1Hash{} + if dentry.Hash != zerohash { + var ok bool + offset, ok = img.wim.fileData[dentry.Hash] + if !ok { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %#v", dentry)} + } + } + + f := &File{ + FileHeader: FileHeader{ + Attributes: dentry.Attributes, + CreationTime: dentry.CreationTime, + LastAccessTime: dentry.LastAccessTime, + LastWriteTime: dentry.LastWriteTime, + Hash: dentry.Hash, + Size: offset.OriginalSize, + Name: name, + ShortName: shortName, + }, + + offset: offset, + img: img, + subdirOffset: dentry.SubdirOffset, + } + + isDir := false + + if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT == 0 { + f.LinkID = dentry.ReparseHardLink + if dentry.Attributes&FILE_ATTRIBUTE_DIRECTORY != 0 { + isDir = true + } + } else { + f.ReparseTag = uint32(dentry.ReparseHardLink) + f.ReparseReserved = uint32(dentry.ReparseHardLink >> 32) + } + + if isDir && f.subdirOffset == 0 { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("no subdirectory data for directory")} + } else if !isDir && f.subdirOffset != 0 { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("unexpected subdirectory data for non-directory")} + } + + if dentry.SecurityID != 0xffffffff { + f.SecurityDescriptor = img.sds[dentry.SecurityID] + } + + _, err = io.CopyN(ioutil.Discard, r, left) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, 0, err + } + + if dentry.StreamCount > 0 { + var streams []*Stream + for i := uint16(0); i < dentry.StreamCount; i++ { + s, n, err := img.readNextStream(r) + length += n + if err != nil { + return nil, 0, err + } + // The first unnamed stream should be treated as the file stream. + if i == 0 && s.Name == "" { + f.Hash = s.Hash + f.Size = s.Size + f.offset = s.offset + } else if s.Name != "" { + streams = append(streams, s) + } + } + f.Streams = streams + } + + if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT != 0 && f.Size == 0 { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("reparse point is missing reparse stream")} + } + + return f, length, nil +} + +func (img *Image) readNextStream(r io.Reader) (*Stream, int64, error) { + var length int64 + err := binary.Read(r, binary.LittleEndian, &length) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, 0, &ParseError{Oper: "stream length check", Err: err} + } + + left := length + if left < streamentrySize { + return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short")} + } + + var sentry streamentry + err = binary.Read(r, binary.LittleEndian, &sentry) + if err != nil { + return nil, 0, &ParseError{Oper: "stream entry", Err: err} + } + + left -= streamentrySize + + if left < int64(sentry.NameLength) { + return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short for name")} + } + + names := make([]uint16, sentry.NameLength/2) + err = binary.Read(r, binary.LittleEndian, names) + if err != nil { + return nil, 0, &ParseError{Oper: "file name", Err: err} + } + + left -= int64(sentry.NameLength) + name := string(utf16.Decode(names)) + + var offset resourceDescriptor + if sentry.Hash != (SHA1Hash{}) { + var ok bool + offset, ok = img.wim.fileData[sentry.Hash] + if !ok { + return nil, 0, &ParseError{Oper: "stream entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %v", sentry.Hash)} + } + } + + s := &Stream{ + StreamHeader: StreamHeader{ + Hash: sentry.Hash, + Size: offset.OriginalSize, + Name: name, + }, + wim: img.wim, + offset: offset, + } + + _, err = io.CopyN(ioutil.Discard, r, left) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, 0, err + } + + return s, length, nil +} + +// Open returns an io.ReadCloser that can be used to read the stream's contents. +func (s *Stream) Open() (io.ReadCloser, error) { + return s.wim.resourceReader(&s.offset) +} + +// Open returns an io.ReadCloser that can be used to read the file's contents. +func (f *File) Open() (io.ReadCloser, error) { + return f.img.wim.resourceReader(&f.offset) +} + +// Readdir reads the directory entries. +func (f *File) Readdir() ([]*File, error) { + if !f.IsDir() { + return nil, errors.New("not a directory") + } + return f.img.readdir(f.subdirOffset) +} + +// IsDir returns whether the given file is a directory. It returns false when it +// is a directory reparse point. +func (f *FileHeader) IsDir() bool { + return f.Attributes&(FILE_ATTRIBUTE_DIRECTORY|FILE_ATTRIBUTE_REPARSE_POINT) == FILE_ATTRIBUTE_DIRECTORY +} diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 0000000..3f52763 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,520 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name string, timeout uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _waitNamedPipe(_p0, timeout) +} + +func _waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml new file mode 100644 index 0000000..984e073 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5.4 + - 1.6.3 + - 1.7 +install: + - go get -v golang.org/x/tools/cmd/cover +script: + - go test -v -tags=safe ./spew + - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov +after_success: + - go get -v github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin + - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..c836416 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 0000000..2624304 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 0000000..9579497 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..8a4a658 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..1fe3cf3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..7c519ff --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go new file mode 100644 index 0000000..0f5ce47 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// custom type to test Stinger interface on non-pointer receiver. +type stringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with non-pointer receivers. +func (s stringer) String() string { + return "stringer " + string(s) +} + +// custom type to test Stinger interface on pointer receiver. +type pstringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with only pointer receivers. +func (s *pstringer) String() string { + return "stringer " + string(*s) +} + +// xref1 and xref2 are cross referencing structs for testing circular reference +// detection. +type xref1 struct { + ps2 *xref2 +} +type xref2 struct { + ps1 *xref1 +} + +// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular +// reference for testing detection. +type indirCir1 struct { + ps2 *indirCir2 +} +type indirCir2 struct { + ps3 *indirCir3 +} +type indirCir3 struct { + ps1 *indirCir1 +} + +// embed is used to test embedded structures. +type embed struct { + a string +} + +// embedwrap is used to test embedded structures. +type embedwrap struct { + *embed + e *embed +} + +// panicer is used to intentionally cause a panic for testing spew properly +// handles them +type panicer int + +func (p panicer) String() string { + panic("test panic") +} + +// customError is used to test custom error interface invocation. +type customError int + +func (e customError) Error() string { + return fmt.Sprintf("error: %d", int(e)) +} + +// stringizeWants converts a slice of wanted test output into a format suitable +// for a test error message. +func stringizeWants(wants []string) string { + s := "" + for i, want := range wants { + if i > 0 { + s += fmt.Sprintf("want%d: %s", i+1, want) + } else { + s += "want: " + want + } + } + return s +} + +// testFailed returns whether or not a test failed by checking if the result +// of the test is in the slice of wanted strings. +func testFailed(result string, wants []string) bool { + for _, want := range wants { + if result == want { + return false + } + } + return true +} + +type sortableStruct struct { + x int +} + +func (ss sortableStruct) String() string { + return fmt.Sprintf("ss.%d", ss.x) +} + +type unsortableStruct struct { + x int +} + +type sortTestCase struct { + input []reflect.Value + expected []reflect.Value +} + +func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { + getInterfaces := func(values []reflect.Value) []interface{} { + interfaces := []interface{}{} + for _, v := range values { + interfaces = append(interfaces, v.Interface()) + } + return interfaces + } + + for _, test := range tests { + spew.SortValues(test.input, cs) + // reflect.DeepEqual cannot really make sense of reflect.Value, + // probably because of all the pointer tricks. For instance, + // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} + // instead. + input := getInterfaces(test.input) + expected := getInterfaces(test.expected) + if !reflect.DeepEqual(input, expected) { + t.Errorf("Sort mismatch:\n %v != %v", input, expected) + } + } +} + +// TestSortValues ensures the sort functionality for relect.Value based sorting +// works as intended. +func TestSortValues(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + embedA := v(embed{"a"}) + embedB := v(embed{"b"}) + embedC := v(embed{"c"}) + tests := []sortTestCase{ + // No values. + { + []reflect.Value{}, + []reflect.Value{}, + }, + // Bools. + { + []reflect.Value{v(false), v(true), v(false)}, + []reflect.Value{v(false), v(false), v(true)}, + }, + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Uints. + { + []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, + []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, + }, + // Floats. + { + []reflect.Value{v(2.0), v(1.0), v(3.0)}, + []reflect.Value{v(1.0), v(2.0), v(3.0)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // Array + { + []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, + []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, + }, + // Uintptrs. + { + []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, + []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, + }, + // SortableStructs. + { + // Note: not sorted - DisableMethods is set. + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + // Invalid. + { + []reflect.Value{embedB, embedA, embedC}, + []reflect.Value{embedB, embedA, embedC}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithMethods ensures the sort functionality for relect.Value +// based sorting works as intended when using string methods. +func TestSortValuesWithMethods(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithSpew ensures the sort functionality for relect.Value +// based sorting works as intended when using spew to stringify keys. +func TestSortValuesWithSpew(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} + helpTestSortValues(tests, &cs, t) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..df1d582 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go new file mode 100644 index 0000000..5aad9c7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Array containing bytes +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Slice containing bytes +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// dumpTest is used to describe a test to be performed against the Dump method. +type dumpTest struct { + in interface{} + wants []string +} + +// dumpTests houses all of the tests to be performed against the Dump method. +var dumpTests = make([]dumpTest, 0) + +// addDumpTest is a helper method to append the passed input and desired result +// to dumpTests +func addDumpTest(in interface{}, wants ...string) { + test := dumpTest{in, wants} + dumpTests = append(dumpTests, test) +} + +func addIntDumpTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addUintDumpTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addBoolDumpTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFloatDumpTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addComplexDumpTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addArrayDumpTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + + vt + ") 2,\n (" + vt + ") 3\n}" + addDumpTest(v, "([3]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[3]"+vt+")()\n") + + // Array containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := [3]pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + + ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + + ") (len=" + v2i2Len + ") " + "stringer 3\n}" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + + v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + + ") " + "\"3\"\n}" + } + addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") + addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") + addDumpTest(nv2, "(*[3]"+v2t+")()\n") + + // Array containing interfaces. + v3i0 := "one" + v3 := [3]interface{}{v3i0, int(2), uint(3)} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Array containing bytes. + v4 := [34]byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[34]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[34]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addSliceDumpTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + + vt + ") 6.28,\n (" + vt + ") 12.56\n}" + addDumpTest(v, "([]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[]"+vt+")()\n") + + // Slice containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := []pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + + v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + + ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + + "stringer 3\n}" + addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[]"+v2t+")()\n") + + // Slice containing interfaces. + v3i0 := "one" + v3 := []interface{}{v3i0, int(2), uint(3), nil} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3,\n (" + v3t5 + ") \n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Slice containing bytes. + v4 := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Nil slice. + v5 := []int(nil) + nv5 := (*[]int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "[]int" + v5s := "" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addStringDumpTests() { + // Standard string. + v := "test" + vLen := fmt.Sprintf("%d", len(v)) + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "(len=" + vLen + ") \"test\"" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addInterfaceDumpTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addMapDumpTests() { + // Map with string keys and int vals. + k := "one" + kk := "two" + m := map[string]int{k: 1, kk: 2} + klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up + kkLen := fmt.Sprintf("%d", len(kk)) + mLen := fmt.Sprintf("%d", len(m)) + nilMap := map[string]int(nil) + nm := (*map[string]int)(nil) + pm := &m + mAddr := fmt.Sprintf("%p", pm) + pmAddr := fmt.Sprintf("%p", &pm) + mt := "map[string]int" + mt1 := "string" + mt2 := "int" + ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + + "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + + ") \"two\": (" + mt2 + ") 2\n}" + ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + + "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + + ") \"one\": (" + mt2 + ") 1\n}" + addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") + addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", + "(*"+mt+")("+mAddr+")("+ms2+")\n") + addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", + "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") + addDumpTest(nm, "(*"+mt+")()\n") + addDumpTest(nilMap, "("+mt+") \n") + + // Map with custom formatter type on pointer receiver only keys and vals. + k2 := pstringer("one") + v2 := pstringer("1") + m2 := map[pstringer]pstringer{k2: v2} + k2Len := fmt.Sprintf("%d", len(k2)) + v2Len := fmt.Sprintf("%d", len(v2)) + m2Len := fmt.Sprintf("%d", len(m2)) + nilMap2 := map[pstringer]pstringer(nil) + nm2 := (*map[pstringer]pstringer)(nil) + pm2 := &m2 + m2Addr := fmt.Sprintf("%p", pm2) + pm2Addr := fmt.Sprintf("%p", &pm2) + m2t := "map[spew_test.pstringer]spew_test.pstringer" + m2t1 := "spew_test.pstringer" + m2t2 := "spew_test.pstringer" + m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + + "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" + if spew.UnsafeDisabled { + m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + + ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + + ") \"1\"\n}" + } + addDumpTest(m2, "("+m2t+") "+m2s+"\n") + addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") + addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") + addDumpTest(nm2, "(*"+m2t+")()\n") + addDumpTest(nilMap2, "("+m2t+") \n") + + // Map with interface keys and values. + k3 := "one" + k3Len := fmt.Sprintf("%d", len(k3)) + m3 := map[interface{}]interface{}{k3: 1} + m3Len := fmt.Sprintf("%d", len(m3)) + nilMap3 := map[interface{}]interface{}(nil) + nm3 := (*map[interface{}]interface{})(nil) + pm3 := &m3 + m3Addr := fmt.Sprintf("%p", pm3) + pm3Addr := fmt.Sprintf("%p", &pm3) + m3t := "map[interface {}]interface {}" + m3t1 := "string" + m3t2 := "int" + m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + + "\"one\": (" + m3t2 + ") 1\n}" + addDumpTest(m3, "("+m3t+") "+m3s+"\n") + addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") + addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") + addDumpTest(nm3, "(*"+m3t+")()\n") + addDumpTest(nilMap3, "("+m3t+") \n") + + // Map with nil interface value. + k4 := "nil" + k4Len := fmt.Sprintf("%d", len(k4)) + m4 := map[string]interface{}{k4: nil} + m4Len := fmt.Sprintf("%d", len(m4)) + nilMap4 := map[string]interface{}(nil) + nm4 := (*map[string]interface{})(nil) + pm4 := &m4 + m4Addr := fmt.Sprintf("%p", pm4) + pm4Addr := fmt.Sprintf("%p", &pm4) + m4t := "map[string]interface {}" + m4t1 := "string" + m4t2 := "interface {}" + m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + + " \"nil\": (" + m4t2 + ") \n}" + addDumpTest(m4, "("+m4t+") "+m4s+"\n") + addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") + addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") + addDumpTest(nm4, "(*"+m4t+")()\n") + addDumpTest(nilMap4, "("+m4t+") \n") +} + +func addStructDumpTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + + v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + + ") (len=5) stringer test2\n}" + v3sp := v3s + if spew.UnsafeDisabled { + v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) \"test2\"\n}" + v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) stringer test2\n}" + } + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + eLen := fmt.Sprintf("%d", len("embedstr")) + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + + ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + + ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + + " \"embedstr\"\n })\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addUintptrDumpTests() { + // Null pointer. + v := uintptr(0) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + nv2 := (*uintptr)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addUnsafePointerDumpTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addChanDumpTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFuncDumpTests() { + // Function with no params and no returns. + v := addIntDumpTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Function with param and no returns. + v2 := TestDump + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") +} + +func addCircularDumpTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + + vAddr + ")()\n })\n}" + vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + + ")()\n })\n })\n}" + v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")()\n })\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + + ")()\n })\n })\n })\n}" + v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")()\n })\n })\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") +} + +func addPanicDumpTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addErrorDumpTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +// TestDump executes all of the tests described by dumpTests. +func TestDump(t *testing.T) { + // Setup tests. + addIntDumpTests() + addUintDumpTests() + addBoolDumpTests() + addFloatDumpTests() + addComplexDumpTests() + addArrayDumpTests() + addSliceDumpTests() + addStringDumpTests() + addInterfaceDumpTests() + addMapDumpTests() + addStructDumpTests() + addUintptrDumpTests() + addUnsafePointerDumpTests() + addChanDumpTests() + addFuncDumpTests() + addCircularDumpTests() + addPanicDumpTests() + addErrorDumpTests() + addCgoDumpTests() + + t.Logf("Running %d tests", len(dumpTests)) + for i, test := range dumpTests { + buf := new(bytes.Buffer) + spew.Fdump(buf, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) + continue + } + } +} + +func TestDumpSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + + "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + + "(len=1) \"3\"\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "(map[spew_test.stringer]int) (len=3) {\n" + + "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if spew.UnsafeDisabled { + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + + "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + + "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + + "}\n" + } + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "(map[spew_test.customError]int) (len=3) {\n" + + "(spew_test.customError) error: 1: (int) 1,\n" + + "(spew_test.customError) error: 2: (int) 2,\n" + + "(spew_test.customError) error: 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 0000000..6ab1808 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew/testdata" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2, v2l, v2c := testdata.GetCgoCharArray() + v2Len := fmt.Sprintf("%d", v2l) + v2Cap := fmt.Sprintf("%d", v2c) + v2t := "[6]testdata._Ctype_char" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + + "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() + v3Len := fmt.Sprintf("%d", v3l) + v3Cap := fmt.Sprintf("%d", v3c) + v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + + "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") + + // C signed char array. + v4, v4l, v4c := testdata.GetCgoSignedCharArray() + v4Len := fmt.Sprintf("%d", v4l) + v4Cap := fmt.Sprintf("%d", v4c) + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5, v5l, v5c := testdata.GetCgoUint8tArray() + v5Len := fmt.Sprintf("%d", v5l) + v5Cap := fmt.Sprintf("%d", v5c) + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + + "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() + v6Len := fmt.Sprintf("%d", v6l) + v6Cap := fmt.Sprintf("%d", v6c) + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + + "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go new file mode 100644 index 0000000..52a0971 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either cgo is not supported or "-tags testcgo" is not added to the go +// test command line. This file intentionally does not setup any cgo tests in +// this scenario. +// +build !cgo !testcgo + +package spew_test + +func addCgoDumpTests() { + // Don't add any tests for cgo since this file is only compiled when + // there should not be any cgo tests. +} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go new file mode 100644 index 0000000..c6ec8c6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" +) + +type Flag int + +const ( + flagOne Flag = iota + flagTwo +) + +var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", +} + +func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) +} + +type Bar struct { + data uintptr +} + +type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} +} + +// This example demonstrates how to use Dump to dump variables to stdout. +func ExampleDump() { + // The following package level declarations are assumed for this example: + /* + type Flag int + + const ( + flagOne Flag = iota + flagTwo + ) + + var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", + } + + func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) + } + + type Bar struct { + data uintptr + } + + type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} + } + */ + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + f := Flag(5) + b := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + + // Dump! + spew.Dump(s1, f, b) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Flag) Unknown flag (5) + // ([]uint8) (len=34 cap=34) { + // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + // 00000020 31 32 |12| + // } + // +} + +// This example demonstrates how to use Printf to display a variable with a +// format string and inline formatting. +func ExamplePrintf() { + // Create a double pointer to a uint 8. + ui8 := uint8(5) + pui8 := &ui8 + ppui8 := &pui8 + + // Create a circular data type. + type circular struct { + ui8 uint8 + c *circular + } + c := circular{ui8: 1} + c.c = &c + + // Print! + spew.Printf("ppui8: %v\n", ppui8) + spew.Printf("circular: %v\n", c) + + // Output: + // ppui8: <**>5 + // circular: {1 <*>{1 <*>}} +} + +// This example demonstrates how to use a ConfigState. +func ExampleConfigState() { + // Modify the indent level of the ConfigState only. The global + // configuration is not modified. + scs := spew.ConfigState{Indent: "\t"} + + // Output using the ConfigState instance. + v := map[string]int{"one": 1} + scs.Printf("v: %v\n", v) + scs.Dump(v) + + // Output: + // v: map[one:1] + // (map[string]int) (len=1) { + // (string) (len=3) "one": (int) 1 + // } +} + +// This example demonstrates how to use ConfigState.Dump to dump variables to +// stdout +func ExampleConfigState_Dump() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances with different indentation. + scs := spew.ConfigState{Indent: "\t"} + scs2 := spew.ConfigState{Indent: " "} + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + + // Dump using the ConfigState instances. + scs.Dump(s1) + scs2.Dump(s1) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // +} + +// This example demonstrates how to use ConfigState.Printf to display a variable +// with a format string and inline formatting. +func ExampleConfigState_Printf() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances and modify the method handling of the + // first ConfigState only. + scs := spew.NewDefaultConfig() + scs2 := spew.NewDefaultConfig() + scs.DisableMethods = true + + // Alternatively + // scs := spew.ConfigState{Indent: " ", DisableMethods: true} + // scs2 := spew.ConfigState{Indent: " "} + + // This is of type Flag which implements a Stringer and has raw value 1. + f := flagTwo + + // Dump using the ConfigState instances. + scs.Printf("f: %v\n", f) + scs2.Printf("f: %v\n", f) + + // Output: + // f: 1 + // f: flagTwo +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..c49875b --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go new file mode 100644 index 0000000..f9b93ab --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -0,0 +1,1558 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +- Type that has a custom Error interface +- %x passthrough with uint +- %#x passthrough with uint +- %f passthrough with precision +- %f passthrough with width and precision +- %d passthrough with width +- %q passthrough with string +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// formatterTest is used to describe a test to be performed against NewFormatter. +type formatterTest struct { + format string + in interface{} + wants []string +} + +// formatterTests houses all of the tests to be performed against NewFormatter. +var formatterTests = make([]formatterTest, 0) + +// addFormatterTest is a helper method to append the passed input and desired +// result to formatterTests. +func addFormatterTest(format string, in interface{}, wants ...string) { + test := formatterTest{format, in, wants} + formatterTests = append(formatterTests, test) +} + +func addIntFormatterTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") +} + +func addUintFormatterTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") +} + +func addBoolFormatterTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFloatFormatterTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addComplexFormatterTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addArrayFormatterTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[3]int" + vs := "[1 2 3]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[3]spew_test.pstringer" + v2sp := "[stringer 1 stringer 2 stringer 3]" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "[1 2 3]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2sp) + addFormatterTest("%v", &pv2, "<**>"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "[one 2 3]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addSliceFormatterTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[]float32" + vs := "[3.14 6.28 12.56]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "[one 2 3 ]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + + ")]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Nil slice. + var v4 []int + nv4 := (*[]int)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]int" + v4s := "" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStringFormatterTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "test" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addInterfaceFormatterTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addMapFormatterTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nilMap := map[string]int(nil) + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vs := "map[one:1 two:2]" + vs2 := "map[two:2 one:1]" + addFormatterTest("%v", v, vs, vs2) + addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, + "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) + addFormatterTest("%#v", nilMap, "("+vt+")"+"") + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, + "(*"+vt+")("+vAddr+")"+vs2) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, + "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%#+v", nilMap, "("+vt+")"+"") + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2s := "map[stringer one:stringer 1]" + if spew.UnsafeDisabled { + v2s = "map[one:1]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "map[one:1]" + v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Map with nil interface value + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "interface {}" + v4s := "map[nil:]" + v4s2 := "map[nil:(" + v4t1 + ")]" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStructFormatterTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{127 255}" + vs2 := "{a:127 b:255}" + vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs3) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs3) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{{127 255} true}" + v2s2 := "{s1:{a:127 b:255} b:true}" + v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + + v2t5 + ")true}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s2) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{stringer test stringer test2}" + v3sp := v3s + v3s2 := "{s:stringer test S:stringer test2}" + v3s2p := v3s2 + v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" + v3s3p := v3s3 + if spew.UnsafeDisabled { + v3s = "{test test2}" + v3sp = "{test stringer test2}" + v3s2 = "{s:test S:test2}" + v3s2p = "{s:test S:stringer test2}" + v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" + v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" + } + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3sp) + addFormatterTest("%v", &pv3, "<**>"+v3sp) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s2) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{<*>{embedstr} <*>{embedstr}}" + v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + + "){a:embedstr}}" + v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + + "){a:(" + v4t3 + ")embedstr}}" + v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + + ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s2) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addUintptrFormatterTests() { + // Null pointer. + v := uintptr(0) + nv := (*uintptr)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addUnsafePointerFormatterTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addChanFormatterTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFuncFormatterTests() { + // Function with no params and no returns. + v := addIntFormatterTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Function with param and no returns. + v2 := TestFormatter + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addCircularFormatterTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{<*>{<*>}}" + vs2 := "{<*>}" + vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" + vs4 := "{c:<*>(" + vAddr + ")}" + vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" + vs6 := "{c:(*" + vt + ")}" + vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + + ")}}" + vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs2) + addFormatterTest("%+v", v, vs3) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) + addFormatterTest("%#v", v, "("+vt+")"+vs5) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) + addFormatterTest("%#+v", v, "("+vt+")"+vs7) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{<*>{<*>{<*>}}}" + v2s2 := "{<*>{<*>}}" + v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + + ts2Addr + ")}}}" + v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" + v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + + ")}}}" + v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" + v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + + ")}}}" + v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + ")}}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s2) + addFormatterTest("%v", &pv2, "<**>"+v2s2) + addFormatterTest("%+v", v2, v2s3) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{<*>{<*>{<*>{<*>}}}}" + v3s2 := "{<*>{<*>{<*>}}}" + v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" + v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + ")}}}" + v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + "){ps2:(*" + v3t2 + ")}}}}" + v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + ")}}}" + v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + + ")(" + tic2Addr + ")}}}}" + v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s2) + addFormatterTest("%v", &pv3, "<**>"+v3s2) + addFormatterTest("%+v", v3, v3s3) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) + addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) +} + +func addPanicFormatterTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addErrorFormatterTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addPassthroughFormatterTests() { + // %x passthrough with uint. + v := uint(4294967295) + pv := &v + vAddr := fmt.Sprintf("%x", pv) + pvAddr := fmt.Sprintf("%x", &pv) + vs := "ffffffff" + addFormatterTest("%x", v, vs) + addFormatterTest("%x", pv, vAddr) + addFormatterTest("%x", &pv, pvAddr) + + // %#x passthrough with uint. + v2 := int(2147483647) + pv2 := &v2 + v2Addr := fmt.Sprintf("%#x", pv2) + pv2Addr := fmt.Sprintf("%#x", &pv2) + v2s := "0x7fffffff" + addFormatterTest("%#x", v2, v2s) + addFormatterTest("%#x", pv2, v2Addr) + addFormatterTest("%#x", &pv2, pv2Addr) + + // %f passthrough with precision. + addFormatterTest("%.2f", 3.1415, "3.14") + addFormatterTest("%.3f", 3.1415, "3.142") + addFormatterTest("%.4f", 3.1415, "3.1415") + + // %f passthrough with width and precision. + addFormatterTest("%5.2f", 3.1415, " 3.14") + addFormatterTest("%6.3f", 3.1415, " 3.142") + addFormatterTest("%7.4f", 3.1415, " 3.1415") + + // %d passthrough with width. + addFormatterTest("%3d", 127, "127") + addFormatterTest("%4d", 127, " 127") + addFormatterTest("%5d", 127, " 127") + + // %q passthrough with string. + addFormatterTest("%q", "test", "\"test\"") +} + +// TestFormatter executes all of the tests described by formatterTests. +func TestFormatter(t *testing.T) { + // Setup tests. + addIntFormatterTests() + addUintFormatterTests() + addBoolFormatterTests() + addFloatFormatterTests() + addComplexFormatterTests() + addArrayFormatterTests() + addSliceFormatterTests() + addStringFormatterTests() + addInterfaceFormatterTests() + addMapFormatterTests() + addStructFormatterTests() + addUintptrFormatterTests() + addUnsafePointerFormatterTests() + addChanFormatterTests() + addFuncFormatterTests() + addCircularFormatterTests() + addPanicFormatterTests() + addErrorFormatterTests() + addPassthroughFormatterTests() + + t.Logf("Running %d tests", len(formatterTests)) + for i, test := range formatterTests { + buf := new(bytes.Buffer) + spew.Fprintf(buf, test.format, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, + stringizeWants(test.wants)) + continue + } + } +} + +type testStruct struct { + x int +} + +func (ts testStruct) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +type testStructP struct { + x int +} + +func (ts *testStructP) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +func TestPrintSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "map[1:1 2:2 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if spew.UnsafeDisabled { + expected = "map[1:1 2:2 3:3]" + } + if s != expected { + t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) + } + + if !spew.UnsafeDisabled { + s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) + } + } + + s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "map[error: 1:1 error: 2:2 error: 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go new file mode 100644 index 0000000..20a9cfe --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" +) + +// dummyFmtState implements a fake fmt.State to use for testing invalid +// reflect.Value handling. This is necessary because the fmt package catches +// invalid values before invoking the formatter on them. +type dummyFmtState struct { + bytes.Buffer +} + +func (dfs *dummyFmtState) Flag(f int) bool { + if f == int('+') { + return true + } + return false +} + +func (dfs *dummyFmtState) Precision() (int, bool) { + return 0, false +} + +func (dfs *dummyFmtState) Width() (int, bool) { + return 0, false +} + +// TestInvalidReflectValue ensures the dump and formatter code handles an +// invalid reflect value properly. This needs access to internal state since it +// should never happen in real code and therefore can't be tested via the public +// API. +func TestInvalidReflectValue(t *testing.T) { + i := 1 + + // Dump invalid reflect value. + v := new(reflect.Value) + buf := new(bytes.Buffer) + d := dumpState{w: buf, cs: &Config} + d.dump(*v) + s := buf.String() + want := "" + if s != want { + t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) + } + i++ + + // Formatter invalid reflect value. + buf2 := new(dummyFmtState) + f := formatState{value: *v, cs: &Config, fs: buf2} + f.format(*v) + s = buf2.String() + want = "" + if s != want { + t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) + } +} + +// SortValues makes the internal sortValues function available to the test +// package. +func SortValues(values []reflect.Value, cs *ConfigState) { + sortValues(values, cs) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go new file mode 100644 index 0000000..a0c612e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2016 Dave Collins + +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. + +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" + "unsafe" +) + +// changeKind uses unsafe to intentionally change the kind of a reflect.Value to +// the maximum kind value which does not exist. This is needed to test the +// fallback code which punts to the standard fmt library for new types that +// might get added to the language. +func changeKind(v *reflect.Value, readOnly bool) { + rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) + *rvf = *rvf | ((1< + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go new file mode 100644 index 0000000..b70466c --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew_test.go @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// spewFunc is used to identify which public function of the spew package or +// ConfigState a test applies to. +type spewFunc int + +const ( + fCSFdump spewFunc = iota + fCSFprint + fCSFprintf + fCSFprintln + fCSPrint + fCSPrintln + fCSSdump + fCSSprint + fCSSprintf + fCSSprintln + fCSErrorf + fCSNewFormatter + fErrorf + fFprint + fFprintln + fPrint + fPrintln + fSdump + fSprint + fSprintf + fSprintln +) + +// Map of spewFunc values to names for pretty printing. +var spewFuncStrings = map[spewFunc]string{ + fCSFdump: "ConfigState.Fdump", + fCSFprint: "ConfigState.Fprint", + fCSFprintf: "ConfigState.Fprintf", + fCSFprintln: "ConfigState.Fprintln", + fCSSdump: "ConfigState.Sdump", + fCSPrint: "ConfigState.Print", + fCSPrintln: "ConfigState.Println", + fCSSprint: "ConfigState.Sprint", + fCSSprintf: "ConfigState.Sprintf", + fCSSprintln: "ConfigState.Sprintln", + fCSErrorf: "ConfigState.Errorf", + fCSNewFormatter: "ConfigState.NewFormatter", + fErrorf: "spew.Errorf", + fFprint: "spew.Fprint", + fFprintln: "spew.Fprintln", + fPrint: "spew.Print", + fPrintln: "spew.Println", + fSdump: "spew.Sdump", + fSprint: "spew.Sprint", + fSprintf: "spew.Sprintf", + fSprintln: "spew.Sprintln", +} + +func (f spewFunc) String() string { + if s, ok := spewFuncStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) +} + +// spewTest is used to describe a test to be performed against the public +// functions of the spew package or ConfigState. +type spewTest struct { + cs *spew.ConfigState + f spewFunc + format string + in interface{} + want string +} + +// spewTests houses the tests to be performed against the public functions of +// the spew package and ConfigState. +// +// These tests are only intended to ensure the public functions are exercised +// and are intentionally not exhaustive of types. The exhaustive type +// tests are handled in the dump and format tests. +var spewTests []spewTest + +// redirStdout is a helper function to return the standard output from f as a +// byte slice. +func redirStdout(f func()) ([]byte, error) { + tempFile, err := ioutil.TempFile("", "ss-test") + if err != nil { + return nil, err + } + fileName := tempFile.Name() + defer os.Remove(fileName) // Ignore error + + origStdout := os.Stdout + os.Stdout = tempFile + f() + os.Stdout = origStdout + tempFile.Close() + + return ioutil.ReadFile(fileName) +} + +func initSpewTests() { + // Config states with various settings. + scsDefault := spew.NewDefaultConfig() + scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} + scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} + scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} + scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} + scsNoCap := &spew.ConfigState{DisableCapacities: true} + + // Variables for tests on types which implement Stringer interface with and + // without a pointer receiver. + ts := stringer("test") + tps := pstringer("test") + + type ptrTester struct { + s *struct{} + } + tptr := &ptrTester{s: &struct{}{}} + + // depthTester is used to test max depth handling for structs, array, slices + // and maps. + type depthTester struct { + ic indirCir1 + arr [1]string + slice []string + m map[string]int + } + dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, + map[string]int{"one": 1}} + + // Variable for tests on types which implement error interface. + te := customError(10) + + spewTests = []spewTest{ + {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, + {scsDefault, fCSFprint, "", int16(32767), "32767"}, + {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, + {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, + {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, + {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, + {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, + {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, + {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, + {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, + {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, + {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, + {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, + {scsDefault, fFprint, "", float32(3.14), "3.14"}, + {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, + {scsDefault, fPrint, "", true, "true"}, + {scsDefault, fPrintln, "", false, "false\n"}, + {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, + {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, + {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, + {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, + {scsNoMethods, fCSFprint, "", ts, "test"}, + {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, + {scsNoMethods, fCSFprint, "", tps, "test"}, + {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, + {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, + {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, + {scsNoPmethods, fCSFprint, "", tps, "test"}, + {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, + {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, + {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + + " ic: (spew_test.indirCir1) {\n \n },\n" + + " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + + " slice: ([]string) (len=1 cap=1) {\n \n },\n" + + " m: (map[string]int) (len=1) {\n \n }\n}\n"}, + {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, + {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + + "(len=4) (stringer test) \"test\"\n"}, + {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, + {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + + "(error: 10) 10\n"}, + {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, + {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, + {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, + {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, + } +} + +// TestSpew executes all of the tests described by spewTests. +func TestSpew(t *testing.T) { + initSpewTests() + + t.Logf("Running %d tests", len(spewTests)) + for i, test := range spewTests { + buf := new(bytes.Buffer) + switch test.f { + case fCSFdump: + test.cs.Fdump(buf, test.in) + + case fCSFprint: + test.cs.Fprint(buf, test.in) + + case fCSFprintf: + test.cs.Fprintf(buf, test.format, test.in) + + case fCSFprintln: + test.cs.Fprintln(buf, test.in) + + case fCSPrint: + b, err := redirStdout(func() { test.cs.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSPrintln: + b, err := redirStdout(func() { test.cs.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSSdump: + str := test.cs.Sdump(test.in) + buf.WriteString(str) + + case fCSSprint: + str := test.cs.Sprint(test.in) + buf.WriteString(str) + + case fCSSprintf: + str := test.cs.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fCSSprintln: + str := test.cs.Sprintln(test.in) + buf.WriteString(str) + + case fCSErrorf: + err := test.cs.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fCSNewFormatter: + fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) + + case fErrorf: + err := spew.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fFprint: + spew.Fprint(buf, test.in) + + case fFprintln: + spew.Fprintln(buf, test.in) + + case fPrint: + b, err := redirStdout(func() { spew.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fPrintln: + b, err := redirStdout(func() { spew.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fSdump: + str := spew.Sdump(test.in) + buf.WriteString(str) + + case fSprint: + str := spew.Sprint(test.in) + buf.WriteString(str) + + case fSprintf: + str := spew.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fSprintln: + str := spew.Sprintln(test.in) + buf.WriteString(str) + + default: + t.Errorf("%v #%d unrecognized function", test.f, i) + continue + } + s := buf.String() + if test.want != s { + t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) + continue + } + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go new file mode 100644 index 0000000..5c87dd4 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go @@ -0,0 +1,82 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This code should really only be in the dumpcgo_test.go file, +// but unfortunately Go will not allow cgo in test files, so this is a +// workaround to allow cgo types to be tested. This configuration is used +// because spew itself does not require cgo to run even though it does handle +// certain cgo types specially. Rather than forcing all clients to require cgo +// and an external C compiler just to run the tests, this scheme makes them +// optional. +// +build cgo,testcgo + +package testdata + +/* +#include +typedef unsigned char custom_uchar_t; + +char *ncp = 0; +char *cp = "test"; +char ca[6] = {'t', 'e', 's', 't', '2', '\0'}; +unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'}; +signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'}; +uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'}; +custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'}; +*/ +import "C" + +// GetCgoNullCharPointer returns a null char pointer via cgo. This is only +// used for tests. +func GetCgoNullCharPointer() interface{} { + return C.ncp +} + +// GetCgoCharPointer returns a char pointer via cgo. This is only used for +// tests. +func GetCgoCharPointer() interface{} { + return C.cp +} + +// GetCgoCharArray returns a char array via cgo and the array's len and cap. +// This is only used for tests. +func GetCgoCharArray() (interface{}, int, int) { + return C.ca, len(C.ca), cap(C.ca) +} + +// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the +// array's len and cap. This is only used for tests. +func GetCgoUnsignedCharArray() (interface{}, int, int) { + return C.uca, len(C.uca), cap(C.uca) +} + +// GetCgoSignedCharArray returns a signed char array via cgo and the array's len +// and cap. This is only used for tests. +func GetCgoSignedCharArray() (interface{}, int, int) { + return C.sca, len(C.sca), cap(C.sca) +} + +// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and +// cap. This is only used for tests. +func GetCgoUint8tArray() (interface{}, int, int) { + return C.ui8ta, len(C.ui8ta), cap(C.ui8ta) +} + +// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via +// cgo and the array's len and cap. This is only used for tests. +func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) { + return C.tuca, len(C.tuca), cap(C.tuca) +} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 0000000..2cd087a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore new file mode 100644 index 0000000..1c3ae0a --- /dev/null +++ b/vendor/github.com/docker/distribution/.gitignore @@ -0,0 +1,37 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap new file mode 100644 index 0000000..2d68669 --- /dev/null +++ b/vendor/github.com/docker/distribution/.mailmap @@ -0,0 +1,19 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita +Misty Stanley-Jones Misty Stanley-Jones diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS new file mode 100644 index 0000000..aaf0298 --- /dev/null +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -0,0 +1,182 @@ +Aaron Lehmann +Aaron Schlesinger +Aaron Vinson +Adam Duke +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Chan +Alex Elman +Alexey Gladkov +allencloud +amitshukla +Amy Lindburg +Andrew Hsu +Andrew Meredith +Andrew T Nguyen +Andrey Kostov +Andy Goldstein +Anis Elleuch +Antonio Mercado +Antonio Murdaca +Anton Tiurin +Anusha Ragunathan +a-palchikov +Arien Holthuizen +Arnaud Porterie +Arthur Baars +Asuka Suzuki +Avi Miller +Ayose Cazorla +BadZen +Ben Bodenmiller +Ben Firshman +bin liu +Brian Bland +burnettk +Carson A +Cezar Sa Espinola +Charles Smith +Chris Dillon +cuiwei13 +cyli +Daisuke Fujita +Daniel Huhn +Darren Shepherd +Dave Trombley +Dave Tucker +David Lawrence +davidli +David Verhasselt +David Xia +Dejan Golja +Derek McGowan +Diogo Mónica +DJ Enriquez +Donald Huang +Doug Davis +Edgar Lee +Eric Yang +Fabio Berchtold +Fabio Huser +farmerworking +Felix Yan +Florentin Raud +Frank Chen +Frederick F. Kautz IV +gabriell nascimento +Gleb Schukin +harche +Henri Gomez +Hua Wang +Hu Keping +HuKeping +Ian Babrou +igayoso +Jack Griffin +James Findley +Jason Freidman +Jason Heiss +Jeff Nickoloff +Jess Frazelle +Jessie Frazelle +jhaohai +Jianqing Wang +Jihoon Chung +Joao Fernandes +John Mulhausen +John Starks +Jonathan Boulle +Jon Johnson +Jon Poler +Jordan Liggitt +Josh Chorlton +Josh Hawn +Julien Fernandez +Keerthan Mala +Kelsey Hightower +Kenneth Lim +Kenny Leung +Ke Xu +liuchang0812 +Liu Hua +Li Yi +Lloyd Ramey +Louis Kottmann +Luke Carpenter +Marcus Martins +Mary Anthony +Matt Bentley +Matt Duch +Matthew Green +Matt Moore +Matt Robenolt +Michael Prokop +Michal Minar +Michal Minář +Mike Brown +Miquel Sabaté +Misty Stanley-Jones +Morgan Bauer +moxiegirl +Nathan Sullivan +nevermosby +Nghia Tran +Nikita Tarasov +Noah Treuhaft +Nuutti Kotivuori +Oilbeater +Olivier Gambier +Olivier Jacques +Omer Cohen +Patrick Devine +Phil Estes +Philip Misiowiec +Pierre-Yves Ritschard +Qiao Anran +Randy Barlow +Richard Scothern +Rodolfo Carvalho +Rusty Conover +Sean Boran +Sebastiaan van Stijn +Sebastien Coavoux +Serge Dubrouski +Sharif Nassar +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +spacexnice +Spencer Rinehart +Stan Hu +Stefan Majewsky +Stefan Weil +Stephen J Day +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Ted Reed +tgic +Thomas Sjögren +Tianon Gravi +Tibor Vass +Tonis Tiigi +Tony Holdstock-Brown +Trevor Pounds +Troels Thomsen +Victoria Bialas +Victor Vieux +Vincent Batts +Vincent Demeester +Vincent Giersch +weiyuan.yl +W. Trevor King +xg.song +xiekeyang +Yann ROBERT +yaoyao.xyy +yixi zhang +yuexiao-wang +yuzou +zhouhaibing089 +姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 0000000..2d5a101 --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,119 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/tools/godep github.com/golang/lint/golint + +**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/Sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendored +Godeps directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry -version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md new file mode 100644 index 0000000..b1a5c68 --- /dev/null +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -0,0 +1,114 @@ +# Changelog + +## 2.6.1 (2017-04-05) + +#### Registry +- Fix `Forwarded` header handling, revert use of `X-Forwarded-Port` +- Use driver `Stat` for registry health check + +## 2.6.0 (2017-01-18) + +#### Storage +- S3: fixed bug in delete due to read-after-write inconsistency +- S3: allow EC2 IAM roles to be used when authorizing region endpoints +- S3: add Object ACL Support +- S3: fix delete method's notion of subpaths +- S3: use multipart upload API in `Move` method for performance +- S3: add v2 signature signing for legacy S3 clones +- Swift: add simple heuristic to detect incomplete DLOs during read ops +- Swift: support different user and tenant domains +- Swift: bulk deletes in chunks +- Aliyun OSS: fix delete method's notion of subpaths +- Aliyun OSS: optimize data copy after upload finishes +- Azure: close leaking response body +- Fix storage drivers dropping non-EOF errors when listing repositories +- Compare path properly when listing repositories in catalog +- Add a foreign layer URL host whitelist +- Improve catalog enumerate runtime + +#### Registry +- Export `storage.CreateOptions` in top-level package +- Enable notifications to endpoints that use self-signed certificates +- Properly validate multi-URL foreign layers +- Add control over validation of URLs in pushed manifests +- Proxy mode: fix socket leak when pull is cancelled +- Tag service: properly handle error responses on HEAD request +- Support for custom authentication URL in proxying registry +- Add configuration option to disable access logging +- Add notification filtering by target media type +- Manifest: `References()` returns all children +- Honor `X-Forwarded-Port` and Forwarded headers +- Reference: Preserve tag and digest in With* functions +- Add policy configuration for enforcing repository classes + +#### Client +- Changes the client Tags `All()` method to follow links +- Allow registry clients to connect via HTTP2 +- Better handling of OAuth errors in client + +#### Spec +- Manifest: clarify relationship between urls and foreign layers +- Authorization: add support for repository classes + +#### Manifest +- Override media type returned from `Stat()` for existing manifests +- Add plugin mediatype to distribution manifest + +#### Docs +- Document `TOOMANYREQUESTS` error code +- Document required Let's Encrypt port +- Improve documentation around implementation of OAuth2 +- Improve documentation for configuration + +#### Auth +- Add support for registry type in scope +- Add support for using v2 ping challenges for v1 +- Add leeway to JWT `nbf` and `exp` checking +- htpasswd: dynamically parse htpasswd file +- Fix missing auth headers with PATCH HTTP request when pushing to default port + +#### Dockerfile +- Update to go1.7 +- Reorder Dockerfile steps for better layer caching + +#### Notes + +Documentation has moved to the documentation repository at +`github.com/docker/docker.github.io/tree/master/registry` + +The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. + + +## 2.5.0 (2016-06-14) + +#### Storage +- Ensure uploads directory is cleaned after upload is committed +- Add ability to cap concurrent operations in filesystem driver +- S3: Add 'us-gov-west-1' to the valid region list +- Swift: Handle ceph not returning Last-Modified header for HEAD requests +- Add redirect middleware + +#### Registry +- Add support for blobAccessController middleware +- Add support for layers from foreign sources +- Remove signature store +- Add support for Let's Encrypt +- Correct yaml key names in configuration + +#### Client +- Add option to get content digest from manifest get + +#### Spec +- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported +- Clarify API documentation around catalog fetch behavior + +#### API +- Support returning HTTP 429 (Too Many Requests) + +#### Documentation +- Update auth documentation examples to show "expires in" as int + +#### Docker Image +- Use Alpine Linux as base image + + diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 0000000..7cc7aed --- /dev/null +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,140 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry -version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile new file mode 100644 index 0000000..426954a --- /dev/null +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -0,0 +1,18 @@ +FROM golang:1.7-alpine + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV DOCKER_BUILDTAGS include_oss include_gcs + +RUN set -ex \ + && apk add --no-cache make git + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml + +RUN make PREFIX=/go clean binaries + +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/Godeps/Godeps.json b/vendor/github.com/docker/distribution/Godeps/Godeps.json new file mode 100644 index 0000000..dbdd891 --- /dev/null +++ b/vendor/github.com/docker/distribution/Godeps/Godeps.json @@ -0,0 +1,458 @@ +{ + "ImportPath": "github.com/docker/distribution", + "GoVersion": "go1.6", + "GodepVersion": "v74", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Comment": "v5.0.0-beta-6-g0b5fe2a", + "Rev": "0b5fe2abe0271ba07049eacaa65922d67c319543" + }, + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/Sirupsen/logrus/formatters/logstash", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/request", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/session", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go/errors", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/osext", + "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" + }, + { + "ImportPath": "github.com/bugsnag/panicwrap", + "Comment": "1.0.0-2-ge2c2850", + "Rev": "e2c28503fcd0675329da73bf48b33404db873782" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/common", + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/oss", + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/util", + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" + }, + { + "ImportPath": "github.com/docker/goamz/aws", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" + }, + { + "ImportPath": "github.com/docker/goamz/s3", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" + }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" + }, + { + "ImportPath": "github.com/gorilla/handlers", + "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + }, + { + "ImportPath": "github.com/inconshreveable/mousetrap", + "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" + }, + { + "ImportPath": "github.com/ncw/swift", + "Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86" + }, + { + "ImportPath": "github.com/ncw/swift/swifttest", + "Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86" + }, + { + "ImportPath": "github.com/spf13/cobra", + "Rev": "312092086bed4968099259622145a0c9ae280064" + }, + { + "ImportPath": "github.com/spf13/pflag", + "Rev": "5644820622454e71517561946e3d94b9f9db6842" + }, + { + "ImportPath": "github.com/stevvooe/resumable", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/stevvooe/resumable/sha256", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/stevvooe/resumable/sha512", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/yvasiyarov/go-metrics", + "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" + }, + { + "ImportPath": "github.com/yvasiyarov/gorelic", + "Comment": "v0.0.6-8-ga9bba5b", + "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" + }, + { + "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", + "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" + }, + { + "ImportPath": "golang.org/x/crypto/bcrypt", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/ocsp", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2/hpack", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/internal/timeseries", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/trace", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jws", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jwt", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/time/rate", + "Rev": "a4bde12657593d5e90d0533a3e4fd95e635124cb" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/appengine", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/app_identity", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/base", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/datastore", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/log", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/modules", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/remote_api", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/cloud", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal/opts", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/storage", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/grpc", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/codes", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/credentials", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/grpclog", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/internal", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/metadata", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/naming", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/peer", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/transport", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "gopkg.in/check.v1", + "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" + }, + { + "ImportPath": "rsc.io/letsencrypt", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + } + ] +} diff --git a/vendor/github.com/docker/distribution/Godeps/Readme b/vendor/github.com/docker/distribution/Godeps/Readme new file mode 100644 index 0000000..4cdaa53 --- /dev/null +++ b/vendor/github.com/docker/distribution/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 0000000..e06d208 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 0000000..bda4001 --- /dev/null +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,58 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "dmcgowan", + "dmp42", + "richardscothern", + "shykes", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.richardscothern] + Name = "Richard Scothern" + Email = "richard.scothern@gmail.com" + GitHub = "richardscothern" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile new file mode 100644 index 0000000..47b8f1d --- /dev/null +++ b/vendor/github.com/docker/distribution/Makefile @@ -0,0 +1,109 @@ +# Set an output prefix, which is the local directory if not specified +PREFIX?=$(shell pwd) + + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" + +.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet +.DEFAULT: all +all: fmt vet lint build test binaries + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + ./version/version.sh > $@ + +# Required for go 1.5 to build +GO15VENDOREXPERIMENT := 1 + +# Go files +GOFILES=$(shell find . -type f -name '*.go') + +# Package list +PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) + +# Resolving binary dependencies for specific targets +GOLINT=$(shell which golint || echo '') +GODEP=$(shell which godep || echo '') + +${PREFIX}/bin/registry: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry + +${PREFIX}/bin/digest: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest + +${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) + @echo "+ $@" + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template + +docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template + ./bin/registry-api-descriptor-template $< > $@ + +vet: + @echo "+ $@" + @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +fmt: + @echo "+ $@" + @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ + (echo >&2 "+ please format Go code with 'gofmt -s'" && false) + +lint: + @echo "+ $@" + $(if $(GOLINT), , \ + $(error Please install golint: `go get -u github.com/golang/lint/golint`)) + @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" + +build: + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) + +test: + @echo "+ $@" + @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +test-full: + @echo "+ $@" + @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template + @echo "+ $@" + +clean: + @echo "+ $@" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" + +dep-save: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) save $(PKGS) + +dep-restore: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) restore -v + +dep-validate: dep-restore + @echo "+ $@" + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @rm -Rf Godeps + @$(GODEP) save ./... + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) + @rm -Rf .vendor.bak diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md new file mode 100644 index 0000000..a6e8db0 --- /dev/null +++ b/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,131 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator] +(https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md new file mode 100644 index 0000000..49235ce --- /dev/null +++ b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md @@ -0,0 +1,36 @@ +## Registry Release Checklist + +10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file. + +20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` + +30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. + + ``` +make AUTHORS +``` + +40. Create a signed tag. + + Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]` +You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes. + +50. Push the signed tag + +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. + +70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. + +80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. +e.g. to release `2.3.1` + + `2.3.1 (new)` + + `2.3.0 -> 2.3.0` can be removed + + `2 -> 2.3.1` + + `2.3 -> 2.3.1` + +90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 0000000..701127a --- /dev/null +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 0000000..1f91ae2 --- /dev/null +++ b/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,257 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml new file mode 100644 index 0000000..61f8be0 --- /dev/null +++ b/vendor/github.com/docker/distribution/circle.yml @@ -0,0 +1,93 @@ +# Pony-up! +machine: + pre: + # Install gvm + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + # Install codecov for coverage + - pip install --user codecov + + post: + # go + - gvm install go1.7 --prefer-binary --name=stable + + environment: + # Convenient shortcuts to "common" locations + CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME + BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + # Trick circle brainflat "no absolute path" behavior + BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR + DOCKER_BUILDTAGS: "include_oss include_gcs" + # Workaround Circle parsing dumb bugs and/or YAML wonkyness + CIRCLE_PAIN: "mode: set" + + hosts: + # Not used yet + fancy: 127.0.0.1 + +dependencies: + pre: + # Copy the code to the gopath of all go versions + - > + gvm use stable && + mkdir -p "$(dirname $BASE_STABLE)" && + cp -R "$CHECKOUT" "$BASE_STABLE" + + override: + # Install dependencies for every copied clone/go version + - gvm use stable && go get github.com/tools/godep: + pwd: $BASE_STABLE + + post: + # For the stable go version, additionally install linting tools + - > + gvm use stable && + go get github.com/axw/gocov/gocov github.com/golang/lint/golint + +test: + pre: + # Output the go versions we are going to test + # - gvm use old && go version + - gvm use stable && go version + + # todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies. + # Ensure validation of dependencies + # - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + # pwd: $BASE_STABLE + + # First thing: build everything. This will catch compile errors, and it's + # also necessary for go vet to work properly (see #807). + - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): + pwd: $BASE_STABLE + + # FMT + - gvm use stable && make fmt: + pwd: $BASE_STABLE + + # VET + - gvm use stable && make vet: + pwd: $BASE_STABLE + + # LINT + - gvm use stable && make lint: + pwd: $BASE_STABLE + + override: + # Test stable, and report + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + + # Test stable with race + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + post: + # Report to codecov + - bash <(curl -s https://codecov.io/bash): + pwd: $BASE_STABLE + + ## Notes + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck diff --git a/vendor/github.com/docker/distribution/cmd/digest/main.go b/vendor/github.com/docker/distribution/cmd/digest/main.go new file mode 100644 index 0000000..49426a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/digest/main.go @@ -0,0 +1,97 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/version" +) + +var ( + algorithm = digest.Canonical + showVersion bool +) + +type job struct { + name string + reader io.Reader +} + +func init() { + flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)") + flag.Var(&algorithm, "algorithm", "select the digest algorithm") + flag.BoolVar(&showVersion, "version", false, "show the version and exit") + + log.SetFlags(0) + log.SetPrefix(os.Args[0] + ": ") +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) + fmt.Fprintf(os.Stderr, ` +Calculate the digest of one or more input files, emitting the result +to standard out. If no files are provided, the digest of stdin will +be calculated. + +`) + flag.PrintDefaults() +} + +func unsupported() { + log.Fatalf("unsupported digest algorithm: %v", algorithm) +} + +func main() { + var jobs []job + + flag.Usage = usage + flag.Parse() + if showVersion { + version.PrintVersion() + return + } + + var fail bool // if we fail on one item, foul the exit code + if flag.NArg() > 0 { + for _, path := range flag.Args() { + fp, err := os.Open(path) + + if err != nil { + log.Printf("%s: %v", path, err) + fail = true + continue + } + defer fp.Close() + + jobs = append(jobs, job{name: path, reader: fp}) + } + } else { + // just read stdin + jobs = append(jobs, job{name: "-", reader: os.Stdin}) + } + + digestFn := algorithm.FromReader + + if !algorithm.Available() { + unsupported() + } + + for _, job := range jobs { + dgst, err := digestFn(job.reader) + if err != nil { + log.Printf("%s: %v", job.name, err) + fail = true + continue + } + + fmt.Printf("%v\t%s\n", dgst, job.name) + } + + if fail { + os.Exit(1) + } +} diff --git a/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go new file mode 100644 index 0000000..e9cbc42 --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go @@ -0,0 +1,131 @@ +// registry-api-descriptor-template uses the APIDescriptor defined in the +// api/v2 package to execute templates passed to the command line. +// +// For example, to generate a new API specification, one would execute the +// following command from the repo root: +// +// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md +// +// The templates are passed in the api/v2.APIDescriptor object. Please see the +// package documentation for fields available on that object. The template +// syntax is from Go's standard library text/template package. For information +// on Go's template syntax, please see golang.org/pkg/text/template. +package main + +import ( + "log" + "net/http" + "os" + "path/filepath" + "regexp" + "text/template" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" +) + +var spaceRegex = regexp.MustCompile(`\n\s*`) + +func main() { + + if len(os.Args) != 2 { + log.Fatalln("please specify a template to execute.") + } + + path := os.Args[1] + filename := filepath.Base(path) + + funcMap := template.FuncMap{ + "removenewlines": func(s string) string { + return spaceRegex.ReplaceAllString(s, " ") + }, + "statustext": http.StatusText, + "prettygorilla": prettyGorillaMuxPath, + } + + tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) + + data := struct { + RouteDescriptors []v2.RouteDescriptor + ErrorDescriptors []errcode.ErrorDescriptor + }{ + RouteDescriptors: v2.APIDescriptor.RouteDescriptors, + ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), + // The following are part of the specification but provided by errcode default. + errcode.ErrorCodeUnauthorized.Descriptor(), + errcode.ErrorCodeDenied.Descriptor(), + errcode.ErrorCodeUnsupported.Descriptor()), + } + + if err := tmpl.Execute(os.Stdout, data); err != nil { + log.Fatalln(err) + } +} + +// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux +// route string, making it suitable for documentation. +func prettyGorillaMuxPath(s string) string { + // Stateful parser that removes regular expressions from gorilla + // routes. It correctly handles balanced bracket pairs. + + var output string + var label string + var level int + +start: + if s[0] == '{' { + s = s[1:] + level++ + goto capture + } + + output += string(s[0]) + s = s[1:] + + goto end +capture: + switch s[0] { + case '{': + level++ + case '}': + level-- + + if level == 0 { + s = s[1:] + goto label + } + case ':': + s = s[1:] + goto skip + default: + label += string(s[0]) + } + s = s[1:] + goto capture +skip: + switch s[0] { + case '{': + level++ + case '}': + level-- + } + s = s[1:] + + if level == 0 { + goto label + } + + goto skip +label: + if label != "" { + output += "<" + label + ">" + label = "" + } +end: + if s != "" { + goto start + } + + return output + +} diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml b/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml new file mode 100644 index 0000000..7a274ea --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml @@ -0,0 +1,55 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development +storage: + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry-cache + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-8082 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +proxy: + remoteurl: https://registry-1.docker.io + username: username + password: password +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml b/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml new file mode 100644 index 0000000..b6438be --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml @@ -0,0 +1,66 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +storage: + delete: + enabled: true + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-5003 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-example.yml b/vendor/github.com/docker/distribution/cmd/registry/config-example.yml new file mode 100644 index 0000000..3277f9a --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/config-example.yml @@ -0,0 +1,18 @@ +version: 0.1 +log: + fields: + service: registry +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /var/lib/registry +http: + addr: :5000 + headers: + X-Content-Type-Options: [nosniff] +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/vendor/github.com/docker/distribution/cmd/registry/main.go b/vendor/github.com/docker/distribution/cmd/registry/main.go new file mode 100644 index 0000000..c077a0c --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/main.go @@ -0,0 +1,25 @@ +package main + +import ( + _ "net/http/pprof" + + "github.com/docker/distribution/registry" + _ "github.com/docker/distribution/registry/auth/htpasswd" + _ "github.com/docker/distribution/registry/auth/silly" + _ "github.com/docker/distribution/registry/auth/token" + _ "github.com/docker/distribution/registry/proxy" + _ "github.com/docker/distribution/registry/storage/driver/azure" + _ "github.com/docker/distribution/registry/storage/driver/filesystem" + _ "github.com/docker/distribution/registry/storage/driver/gcs" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" + _ "github.com/docker/distribution/registry/storage/driver/middleware/redirect" + _ "github.com/docker/distribution/registry/storage/driver/oss" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" + _ "github.com/docker/distribution/registry/storage/driver/s3-goamz" + _ "github.com/docker/distribution/registry/storage/driver/swift" +) + +func main() { + registry.RootCmd.Execute() +} diff --git a/vendor/github.com/docker/distribution/configuration/configuration.go b/vendor/github.com/docker/distribution/configuration/configuration.go new file mode 100644 index 0000000..ec50a6b --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/configuration.go @@ -0,0 +1,643 @@ +package configuration + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and +// optionally modified by environment variables. +// +// Note that yaml field names should never include _ characters, since this is the separator used +// in environment variable names. +type Configuration struct { + // Version is the version which defines the format of the rest of the configuration + Version Version `yaml:"version"` + + // Log supports setting various parameters related to the logging + // subsystem. + Log struct { + // AccessLog configures access logging. + AccessLog struct { + // Disabled disables access logging. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"accesslog,omitempty"` + + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configure the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` + } + + // Loglevel is the level at which registry operations are logged. This is + // deprecated. Please use Log.Level in the future. + Loglevel Loglevel `yaml:"loglevel,omitempty"` + + // Storage is the configuration for the registry's storage driver + Storage Storage `yaml:"storage"` + + // Auth allows configuration of various authorization methods that may be + // used to gate requests. + Auth Auth `yaml:"auth,omitempty"` + + // Middleware lists all middlewares to be used by the registry. + Middleware map[string][]Middleware `yaml:"middleware,omitempty"` + + // Reporting is the configuration for error reporting + Reporting Reporting `yaml:"reporting,omitempty"` + + // HTTP contains configuration parameters for the registry's http + // interface. + HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr,omitempty"` + + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + + // Host specifies an externally-reachable address for the registry, as a fully + // qualified URL. + Host string `yaml:"host,omitempty"` + + Prefix string `yaml:"prefix,omitempty"` + + // Secret specifies the secret key which HMAC tokens are created with. + Secret string `yaml:"secret,omitempty"` + + // RelativeURLs specifies that relative URLs should be returned in + // Location headers + RelativeURLs bool `yaml:"relativeurls,omitempty"` + + // TLS instructs the http server to listen with a TLS configuration. + // This only support simple tls configuration with a cert and key. + // Mostly, this is useful for testing situations or simple deployments + // that require tls. If more complex configurations are required, use + // a proxy or make a proposal to add support here. + TLS struct { + // Certificate specifies the path to an x509 certificate file to + // be used for TLS. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the x509 key file, which should + // contain the private portion for the file specified in + // Certificate. + Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` + + // LetsEncrypt is used to configuration setting up TLS through + // Let's Encrypt instead of manually specifying certificate and + // key. If a TLS certificate is specified, the Let's Encrypt + // section will not be used. + LetsEncrypt struct { + // CacheFile specifies cache file to use for lets encrypt + // certificates and keys. + CacheFile string `yaml:"cachefile,omitempty"` + + // Email is the email to use during Let's Encrypt registration + Email string `yaml:"email,omitempty"` + } `yaml:"letsencrypt,omitempty"` + } `yaml:"tls,omitempty"` + + // Headers is a set of headers to include in HTTP responses. A common + // use case for this would be security headers such as + // Strict-Transport-Security. The map keys are the header names, and + // the values are the associated header payloads. + Headers http.Header `yaml:"headers,omitempty"` + + // Debug configures the http debug interface, if specified. This can + // include services such as pprof, expvar and other data that should + // not be exposed externally. Left disabled by default. + Debug struct { + // Addr specifies the bind address for the debug server. + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + + // HTTP2 configuration options + HTTP2 struct { + // Specifies wether the registry should disallow clients attempting + // to connect via http2. If set to true, only http/1.1 is supported. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"http2,omitempty"` + } `yaml:"http,omitempty"` + + // Notifications specifies configuration about various endpoint to which + // registry events are dispatched. + Notifications Notifications `yaml:"notifications,omitempty"` + + // Redis configures the redis pool available to the registry webapp. + Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` + } `yaml:"redis,omitempty"` + + Health Health `yaml:"health,omitempty"` + + Proxy Proxy `yaml:"proxy,omitempty"` + + // Compatibility is used for configurations of working with older or deprecated features. + Compatibility struct { + // Schema1 configures how schema1 manifests will be handled + Schema1 struct { + // TrustKey is the signing key to use for adding the signature to + // schema1 manifests. + TrustKey string `yaml:"signingkeyfile,omitempty"` + } `yaml:"schema1,omitempty"` + } `yaml:"compatibility,omitempty"` + + // Validation configures validation options for the registry. + Validation struct { + // Enabled enables the other options in this section. + Enabled bool `yaml:"enabled,omitempty"` + // Manifests configures manifest validation. + Manifests struct { + // URLs configures validation for URLs in pushed manifests. + URLs struct { + // Allow specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must match. + Allow []string `yaml:"allow,omitempty"` + // Deny specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must not match. + Deny []string `yaml:"deny,omitempty"` + } `yaml:"urls,omitempty"` + } `yaml:"manifests,omitempty"` + } `yaml:"validation,omitempty"` + + // Policy configures registry policy options. + Policy struct { + // Repository configures policies for repositories + Repository struct { + // Classes is a list of repository classes which the + // registry allows content for. This class is matched + // against the configuration media type inside uploaded + // manifests. When non-empty, the registry will enforce + // the class in authorized resources. + Classes []string `yaml:"classes"` + } `yaml:"repository,omitempty"` + } `yaml:"policy,omitempty"` +} + +// LogHook is composed of hook Level and Type. +// After hooks configuration, it can execute the next handling automatically, +// when defined levels of log message emitted. +// Example: hook can sending an email notification when error log happens in app. +type LogHook struct { + // Disable lets user select to enable hook or not. + Disabled bool `yaml:"disabled,omitempty"` + + // Type allows user to select which type of hook handler they want. + Type string `yaml:"type,omitempty"` + + // Levels set which levels of log message will let hook executed. + Levels []string `yaml:"levels,omitempty"` + + // MailOptions allows user to configurate email parameters. + MailOptions MailOptions `yaml:"options,omitempty"` +} + +// MailOptions provides the configuration sections to user, for specific handler. +type MailOptions struct { + SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure certification. + Insecure bool `yaml:"insecure,omitempty"` + } `yaml:"smtp,omitempty"` + + // From defines mail sending address + From string `yaml:"from,omitempty"` + + // To defines mail receiving address + To []string `yaml:"to,omitempty"` +} + +// FileChecker is a type of entry in the health section for checking files. +type FileChecker struct { + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // File is the path to check + File string `yaml:"file,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// HTTPChecker is a type of entry in the health section for checking HTTP URIs. +type HTTPChecker struct { + // Timeout is the duration to wait before timing out the HTTP request + Timeout time.Duration `yaml:"timeout,omitempty"` + // StatusCode is the expected status code + StatusCode int + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // URI is the HTTP URI to check + URI string `yaml:"uri,omitempty"` + // Headers lists static headers that should be added to all requests + Headers http.Header `yaml:"headers"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// TCPChecker is a type of entry in the health section for checking TCP servers. +type TCPChecker struct { + // Timeout is the duration to wait before timing out the TCP connection + Timeout time.Duration `yaml:"timeout,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Addr is the TCP address to check + Addr string `yaml:"addr,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// Health provides the configuration section for health checks. +type Health struct { + // FileCheckers is a list of paths to check + FileCheckers []FileChecker `yaml:"file,omitempty"` + // HTTPCheckers is a list of URIs to check + HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` + // TCPCheckers is a list of URIs to check + TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` + // StorageDriver configures a health check on the configured storage + // driver + StorageDriver struct { + // Enabled turns on the health check for the storage driver + Enabled bool `yaml:"enabled,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` + } `yaml:"storagedriver,omitempty"` +} + +// v0_1Configuration is a Version 0.1 Configuration struct +// This is currently aliased to Configuration, as it is the current version +type v0_1Configuration Configuration + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints +func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var versionString string + err := unmarshal(&versionString) + if err != nil { + return err + } + + newVersion := Version(versionString) + if _, err := newVersion.major(); err != nil { + return err + } + + if _, err := newVersion.minor(); err != nil { + return err + } + + *version = newVersion + return nil +} + +// CurrentVersion is the most recent Version that can be parsed +var CurrentVersion = MajorMinorVersion(0, 1) + +// Loglevel is the level at which operations are logged +// This can be error, warn, info, or debug +type Loglevel string + +// UnmarshalYAML implements the yaml.Umarshaler interface +// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a +// valid loglevel +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var loglevelString string + err := unmarshal(&loglevelString) + if err != nil { + return err + } + + loglevelString = strings.ToLower(loglevelString) + switch loglevelString { + case "error", "warn", "info", "debug": + default: + return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + } + + *loglevel = Loglevel(loglevelString) + return nil +} + +// Parameters defines a key-value parameters mapping +type Parameters map[string]interface{} + +// Storage defines the configuration for registry object storage +type Storage map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (storage Storage) Type() string { + var storageType []string + + // Return only key in this map + for k := range storage { + switch k { + case "maintenance": + // allow configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + storageType = append(storageType, k) + } + } + if len(storageType) > 1 { + panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", ")) + } + if len(storageType) == 1 { + return storageType[0] + } + return "" +} + +// Parameters returns the Parameters map for a Storage configuration +func (storage Storage) Parameters() Parameters { + return storage[storage.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (storage Storage) setParameter(key string, value interface{}) { + storage[storage.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var storageMap map[string]Parameters + err := unmarshal(&storageMap) + if err == nil { + if len(storageMap) > 1 { + types := make([]string, 0, len(storageMap)) + for k := range storageMap { + switch k { + case "maintenance": + // allow for configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + types = append(types, k) + } + } + + if len(types) > 1 { + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + } + } + *storage = storageMap + return nil + } + + var storageType string + err = unmarshal(&storageType) + if err == nil { + *storage = Storage{storageType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (storage Storage) MarshalYAML() (interface{}, error) { + if storage.Parameters() == nil { + return storage.Type(), nil + } + return map[string]Parameters(storage), nil +} + +// Auth defines the configuration for registry authorization. +type Auth map[string]Parameters + +// Type returns the auth type, such as htpasswd or token +func (auth Auth) Type() string { + // Return only key in this map + for k := range auth { + return k + } + return "" +} + +// Parameters returns the Parameters map for an Auth configuration +func (auth Auth) Parameters() Parameters { + return auth[auth.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (auth Auth) setParameter(key string, value interface{}) { + auth[auth.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]Parameters + err := unmarshal(&m) + if err == nil { + if len(m) > 1 { + types := make([]string, 0, len(m)) + for k := range m { + types = append(types, k) + } + + // TODO(stevvooe): May want to change this slightly for + // authorization to allow multiple challenges. + return fmt.Errorf("must provide exactly one type. Provided: %v", types) + + } + *auth = m + return nil + } + + var authType string + err = unmarshal(&authType) + if err == nil { + *auth = Auth{authType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (auth Auth) MarshalYAML() (interface{}, error) { + if auth.Parameters() == nil { + return auth.Type(), nil + } + return map[string]Parameters(auth), nil +} + +// Notifications configures multiple http endpoints. +type Notifications struct { + // Endpoints is a list of http configurations for endpoints that + // respond to webhook notifications. In the future, we may allow other + // kinds of endpoints, such as external queues. + Endpoints []Endpoint `yaml:"endpoints,omitempty"` +} + +// Endpoint describes the configuration of an http webhook notification +// endpoint. +type Endpoint struct { + Name string `yaml:"name"` // identifies the endpoint in the registry instance. + Disabled bool `yaml:"disabled"` // disables the endpoint + URL string `yaml:"url"` // post url for the endpoint. + Headers http.Header `yaml:"headers"` // static headers that should be added to all requests + Timeout time.Duration `yaml:"timeout"` // HTTP timeout + Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure + Backoff time.Duration `yaml:"backoff"` // backoff duration + IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore +} + +// Reporting defines error reporting methods. +type Reporting struct { + // Bugsnag configures error reporting for Bugsnag (bugsnag.com). + Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` + // NewRelic configures error reporting for NewRelic (newrelic.com) + NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` +} + +// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). +type BugsnagReporting struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey,omitempty"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage,omitempty"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint,omitempty"` +} + +// NewRelicReporting configures error reporting for NewRelic (newrelic.com) +type NewRelicReporting struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey,omitempty"` + // Name is the component name of the registry in NewRelic + Name string `yaml:"name,omitempty"` + // Verbose configures debug output to STDOUT + Verbose bool `yaml:"verbose,omitempty"` +} + +// Middleware configures named middlewares to be applied at injection points. +type Middleware struct { + // Name the middleware registers itself as + Name string `yaml:"name"` + // Flag to disable middleware easily + Disabled bool `yaml:"disabled,omitempty"` + // Map of parameters that will be passed to the middleware's initialization function + Options Parameters `yaml:"options"` +} + +// Proxy configures the registry as a pull through cache +type Proxy struct { + // RemoteURL is the URL of the remote registry + RemoteURL string `yaml:"remoteurl"` + + // Username of the hub user + Username string `yaml:"username"` + + // Password of the hub user + Password string `yaml:"password"` +} + +// Parse parses an input configuration yaml document into a Configuration struct +// This should generally be capable of handling old configuration format versions +// +// Environment variables may be used to override configuration parameters other than version, +// following the scheme below: +// Configuration.Abc may be replaced by the value of REGISTRY_ABC, +// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth +func Parse(rd io.Reader) (*Configuration, error) { + in, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: MajorMinorVersion(0, 1), + ParseAs: reflect.TypeOf(v0_1Configuration{}), + ConversionFunc: func(c interface{}) (interface{}, error) { + if v0_1, ok := c.(*v0_1Configuration); ok { + if v0_1.Loglevel == Loglevel("") { + v0_1.Loglevel = Loglevel("info") + } + if v0_1.Storage.Type() == "" { + return nil, fmt.Errorf("No storage configuration provided") + } + return (*Configuration)(v0_1), nil + } + return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + }, + }, + }) + + config := new(Configuration) + err = p.Parse(in, config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/docker/distribution/configuration/configuration_test.go b/vendor/github.com/docker/distribution/configuration/configuration_test.go new file mode 100644 index 0000000..3e1583d --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/configuration_test.go @@ -0,0 +1,529 @@ +package configuration + +import ( + "bytes" + "net/http" + "os" + "reflect" + "strings" + "testing" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +// Hook up gocheck into the "go test" runner +func Test(t *testing.T) { TestingT(t) } + +// configStruct is a canonical example configuration, which should map to configYamlV0_1 +var configStruct = Configuration{ + Version: "0.1", + Log: struct { + AccessLog struct { + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"accesslog,omitempty"` + Level Loglevel `yaml:"level"` + Formatter string `yaml:"formatter,omitempty"` + Fields map[string]interface{} `yaml:"fields,omitempty"` + Hooks []LogHook `yaml:"hooks,omitempty"` + }{ + Fields: map[string]interface{}{"environment": "test"}, + }, + Loglevel: "info", + Storage: Storage{ + "s3": Parameters{ + "region": "us-east-1", + "bucket": "my-bucket", + "rootdirectory": "/registry", + "encrypt": true, + "secure": false, + "accesskey": "SAMPLEACCESSKEY", + "secretkey": "SUPERSECRET", + "host": nil, + "port": 42, + }, + }, + Auth: Auth{ + "silly": Parameters{ + "realm": "silly", + "service": "silly", + }, + }, + Reporting: Reporting{ + Bugsnag: BugsnagReporting{ + APIKey: "BugsnagApiKey", + }, + }, + Notifications: Notifications{ + Endpoints: []Endpoint{ + { + Name: "endpoint-1", + URL: "http://example.com", + Headers: http.Header{ + "Authorization": []string{"Bearer "}, + }, + IgnoredMediaTypes: []string{"application/octet-stream"}, + }, + }, + }, + HTTP: struct { + Addr string `yaml:"addr,omitempty"` + Net string `yaml:"net,omitempty"` + Host string `yaml:"host,omitempty"` + Prefix string `yaml:"prefix,omitempty"` + Secret string `yaml:"secret,omitempty"` + RelativeURLs bool `yaml:"relativeurls,omitempty"` + TLS struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + LetsEncrypt struct { + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + } `yaml:"letsencrypt,omitempty"` + } `yaml:"tls,omitempty"` + Headers http.Header `yaml:"headers,omitempty"` + Debug struct { + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + HTTP2 struct { + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"http2,omitempty"` + }{ + TLS: struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + LetsEncrypt struct { + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + } `yaml:"letsencrypt,omitempty"` + }{ + ClientCAs: []string{"/path/to/ca.pem"}, + }, + Headers: http.Header{ + "X-Content-Type-Options": []string{"nosniff"}, + }, + HTTP2: struct { + Disabled bool `yaml:"disabled,omitempty"` + }{ + Disabled: false, + }, + }, +} + +// configYamlV0_1 is a Version 0.1 yaml document representing configStruct +var configYamlV0_1 = ` +version: 0.1 +log: + fields: + environment: test +loglevel: info +storage: + s3: + region: us-east-1 + bucket: my-bucket + rootdirectory: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: 42 +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] + ignoredmediatypes: + - application/octet-stream +reporting: + bugsnag: + apikey: BugsnagApiKey +http: + clientcas: + - /path/to/ca.pem + headers: + X-Content-Type-Options: [nosniff] +` + +// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory +// storage driver with no parameters +var inmemoryConfigYamlV0_1 = ` +version: 0.1 +loglevel: info +storage: inmemory +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] + ignoredmediatypes: + - application/octet-stream +http: + headers: + X-Content-Type-Options: [nosniff] +` + +type ConfigSuite struct { + expectedConfig *Configuration +} + +var _ = Suite(new(ConfigSuite)) + +func (suite *ConfigSuite) SetUpTest(c *C) { + os.Clearenv() + suite.expectedConfig = copyConfig(configStruct) +} + +// TestMarshalRoundtrip validates that configStruct can be marshaled and +// unmarshaled without changing any parameters +func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + config, err := Parse(bytes.NewReader(configBytes)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseSimple validates that configYamlV0_1 can be parsed into a struct +// matching configStruct +func (suite *ConfigSuite) TestParseSimple(c *C) { + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInmemory validates that configuration yaml with storage provided as +// a string can be parsed into a Configuration struct with no storage parameters +func (suite *ConfigSuite) TestParseInmemory(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Log.Fields = nil + + config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseIncomplete validates that an incomplete yaml configuration cannot +// be parsed without providing environment variables to fill in the missing +// components. +func (suite *ConfigSuite) TestParseIncomplete(c *C) { + incompleteConfigYaml := "version: 0.1" + _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, NotNil) + + suite.expectedConfig.Log.Fields = nil + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} + suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Notifications = Notifications{} + suite.expectedConfig.HTTP.Headers = nil + + // Note: this also tests that REGISTRY_STORAGE and + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + os.Setenv("REGISTRY_AUTH", "silly") + os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") + + config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvStorage validates that providing environment variables +// that match the given storage type will only include environment-defined +// parameters and remove yaml-defined parameters +func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { + suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} + + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change +// and add to the given storage parameters will change and add parameters to the parsed +// Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { + suite.expectedConfig.Storage.setParameter("region", "us-west-1") + suite.expectedConfig.Storage.setParameter("secure", true) + suite.expectedConfig.Storage.setParameter("newparam", "some Value") + + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") + os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") + os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageType validates that providing an environment variable that +// changes the storage type will be reflected in the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + + os.Setenv("REGISTRY_STORAGE", "inmemory") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable +// that changes the storage type will be reflected in the parsed Configuration struct and that +// environment storage parameters will also be included +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} + suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") + + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log +// level to the same as the one provided in the yaml will not change the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { + os.Setenv("REGISTRY_LOGLEVEL", "info") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the +// log level will override the value provided in the yaml document +func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { + suite.expectedConfig.Loglevel = "error" + + os.Setenv("REGISTRY_LOGLEVEL", "error") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidLoglevel validates that the parser will fail to parse a +// configuration if the loglevel is malformed +func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { + invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" + _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) + c.Assert(err, NotNil) + + os.Setenv("REGISTRY_LOGLEVEL", "derp") + + _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) + +} + +// TestParseWithDifferentEnvReporting validates that environment variables +// properly override reporting parameters +func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { + suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" + suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" + + os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration +// version than the CurrentVersion +func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { + suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + _, err = Parse(bytes.NewReader(configBytes)) + c.Assert(err, NotNil) +} + +// TestParseExtraneousVars validates that environment variables referring to +// nonexistent variables don't cause side effects. +func (suite *ConfigSuite) TestParseExtraneousVars(c *C) { + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + + // A valid environment variable + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + + // Environment variables which shouldn't set config items + os.Setenv("registry_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + os.Setenv("REGISTRY_DUCKS", "quack") + os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseEnvVarImplicitMaps validates that environment variables can set +// values in maps that don't already exist. +func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) { + readonly := make(map[string]interface{}) + readonly["enabled"] = true + + maintenance := make(map[string]interface{}) + maintenance["readonly"] = readonly + + suite.expectedConfig.Storage["maintenance"] = maintenance + + os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a +// string over existing map fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *C) { + os.Setenv("REGISTRY_STORAGE_S3", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvWrongTypeStruct validates that incorrectly attempting to +// unmarshal a string into a struct fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *C) { + os.Setenv("REGISTRY_STORAGE_LOG", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvWrongTypeSlice validates that incorrectly attempting to +// unmarshal a string into a slice fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *C) { + os.Setenv("REGISTRY_LOG_HOOKS", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvMany tests several environment variable overrides. +// The result is not checked - the goal of this test is to detect panics +// from misuse of reflection. +func (suite *ConfigSuite) TestParseEnvMany(c *C) { + os.Setenv("REGISTRY_VERSION", "0.1") + os.Setenv("REGISTRY_LOG_LEVEL", "debug") + os.Setenv("REGISTRY_LOG_FORMATTER", "json") + os.Setenv("REGISTRY_LOG_HOOKS", "json") + os.Setenv("REGISTRY_LOG_FIELDS", "abc: xyz") + os.Setenv("REGISTRY_LOG_HOOKS", "- type: asdf") + os.Setenv("REGISTRY_LOGLEVEL", "debug") + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_AUTH_PARAMS", "param1: value1") + os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") + os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) +} + +func checkStructs(c *C, t reflect.Type, structsChecked map[string]struct{}) { + for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return + } + if _, present := structsChecked[t.String()]; present { + // Already checked this type + return + } + + structsChecked[t.String()] = struct{}{} + + byUpperCase := make(map[string]int) + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + + // Check that the yaml tag does not contain an _. + yamlTag := sf.Tag.Get("yaml") + if strings.Contains(yamlTag, "_") { + c.Fatalf("yaml field name includes _ character: %s", yamlTag) + } + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + c.Fatalf("field name collision in configuration object: %s", sf.Name) + } + byUpperCase[upper] = i + + checkStructs(c, sf.Type, structsChecked) + } +} + +// TestValidateConfigStruct makes sure that the config struct has no members +// with yaml tags that would be ambiguous to the environment variable parser. +func (suite *ConfigSuite) TestValidateConfigStruct(c *C) { + structsChecked := make(map[string]struct{}) + checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked) +} + +func copyConfig(config Configuration) *Configuration { + configCopy := new(Configuration) + + configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) + configCopy.Loglevel = config.Loglevel + configCopy.Log = config.Log + configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) + for k, v := range config.Log.Fields { + configCopy.Log.Fields[k] = v + } + + configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} + for k, v := range config.Storage.Parameters() { + configCopy.Storage.setParameter(k, v) + } + configCopy.Reporting = Reporting{ + Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, + NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, + } + + configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} + for k, v := range config.Auth.Parameters() { + configCopy.Auth.setParameter(k, v) + } + + configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} + for _, v := range config.Notifications.Endpoints { + configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) + } + + configCopy.HTTP.Headers = make(http.Header) + for k, v := range config.HTTP.Headers { + configCopy.HTTP.Headers[k] = v + } + + return configCopy +} diff --git a/vendor/github.com/docker/distribution/configuration/parser.go b/vendor/github.com/docker/distribution/configuration/parser.go new file mode 100644 index 0000000..8b81dd5 --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/parser.go @@ -0,0 +1,283 @@ +package configuration + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// Version is a major/minor version pair of the form Major.Minor +// Major version upgrades indicate structure or type changes +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) +} + +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err +} + +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major +} + +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} + +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// VersionedParseInfo defines how a specific version of a configuration should +// be parsed into the current version +type VersionedParseInfo struct { + // Version is the version which this parsing information relates to + Version Version + // ParseAs defines the type which a configuration file of this version + // should be parsed into + ParseAs reflect.Type + // ConversionFunc defines a method for converting the parsed configuration + // (of type ParseAs) into the current configuration version + // Note: this method signature is very unclear with the absence of generics + ConversionFunc func(interface{}) (interface{}, error) +} + +type envVar struct { + name string + value string +} + +type envVars []envVar + +func (a envVars) Len() int { return len(a) } +func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } + +// Parser can be used to parse a configuration file and environment of a defined +// version into a unified output structure +type Parser struct { + prefix string + mapping map[Version]VersionedParseInfo + env envVars +} + +// NewParser returns a *Parser with the given environment prefix which handles +// versioned configurations which match the given parseInfos +func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { + p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)} + + for _, parseInfo := range parseInfos { + p.mapping[parseInfo.Version] = parseInfo + } + + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + p.env = append(p.env, envVar{envParts[0], envParts[1]}) + } + + // We must sort the environment variables lexically by name so that + // more specific variables are applied before less specific ones + // (i.e. REGISTRY_STORAGE before + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a + // lot simpler and easier to get right than unmarshalling map entries + // into temporaries and merging with the existing entry. + sort.Sort(p.env) + + return &p +} + +// Parse reads in the given []byte and environment and writes the resulting +// configuration into the input v +// +// Environment variables may be used to override configuration parameters other +// than version, following the scheme below: +// v.Abc may be replaced by the value of PREFIX_ABC, +// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth +func (p *Parser) Parse(in []byte, v interface{}) error { + var versionedStruct struct { + Version Version + } + + if err := yaml.Unmarshal(in, &versionedStruct); err != nil { + return err + } + + parseInfo, ok := p.mapping[versionedStruct.Version] + if !ok { + return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + } + + parseAs := reflect.New(parseInfo.ParseAs) + err := yaml.Unmarshal(in, parseAs.Interface()) + if err != nil { + return err + } + + for _, envVar := range p.env { + pathStr := envVar.name + if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { + path := strings.Split(pathStr, "_") + + err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value) + if err != nil { + return err + } + } + } + + c, err := parseInfo.ConversionFunc(parseAs.Interface()) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) + return nil +} + +// overwriteFields replaces configuration values with alternate values specified +// through the environment. Precondition: an empty path slice must never be +// passed in. +func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + panic("encountered nil pointer while handling environment variable " + fullpath) + } + v = reflect.Indirect(v) + } + switch v.Kind() { + case reflect.Struct: + return p.overwriteStruct(v, fullpath, path, payload) + case reflect.Map: + return p.overwriteMap(v, fullpath, path, payload) + case reflect.Interface: + if v.NumMethod() == 0 { + if !v.IsNil() { + return p.overwriteFields(v.Elem(), fullpath, path, payload) + } + // Interface was empty; create an implicit map + var template map[string]interface{} + wrappedV := reflect.MakeMap(reflect.TypeOf(template)) + v.Set(wrappedV) + return p.overwriteMap(wrappedV, fullpath, path, payload) + } + } + return nil +} + +func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { + // Generate case-insensitive map of struct fields + byUpperCase := make(map[string]int) + for i := 0; i < v.NumField(); i++ { + sf := v.Type().Field(i) + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) + } + byUpperCase[upper] = i + } + + fieldIndex, present := byUpperCase[path[0]] + if !present { + logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath) + return nil + } + field := v.Field(fieldIndex) + sf := v.Type().Field(fieldIndex) + + if len(path) == 1 { + // Env var specifies this field directly + fieldVal := reflect.New(sf.Type) + err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) + if err != nil { + return err + } + field.Set(reflect.Indirect(fieldVal)) + return nil + } + + // If the field is nil, must create an object + switch sf.Type.Kind() { + case reflect.Map: + if field.IsNil() { + field.Set(reflect.MakeMap(sf.Type)) + } + case reflect.Ptr: + if field.IsNil() { + field.Set(reflect.New(sf.Type)) + } + } + + err := p.overwriteFields(field, fullpath, path[1:], payload) + if err != nil { + return err + } + + return nil +} + +func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { + if m.Type().Key().Kind() != reflect.String { + // non-string keys unsupported + logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath) + return nil + } + + if len(path) > 1 { + // If a matching key exists, get its value and continue the + // overwriting process. + for _, k := range m.MapKeys() { + if strings.ToUpper(k.String()) == path[0] { + mapValue := m.MapIndex(k) + // If the existing value is nil, we want to + // recreate it instead of using this value. + if (mapValue.Kind() == reflect.Ptr || + mapValue.Kind() == reflect.Interface || + mapValue.Kind() == reflect.Map) && + mapValue.IsNil() { + break + } + return p.overwriteFields(mapValue, fullpath, path[1:], payload) + } + } + } + + // (Re)create this key + var mapValue reflect.Value + if m.Type().Elem().Kind() == reflect.Map { + mapValue = reflect.MakeMap(m.Type().Elem()) + } else { + mapValue = reflect.New(m.Type().Elem()) + } + if len(path) > 1 { + err := p.overwriteFields(mapValue, fullpath, path[1:], payload) + if err != nil { + return err + } + } else { + err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) + if err != nil { + return err + } + } + + m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) + + return nil +} diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go new file mode 100644 index 0000000..23cbf5b --- /dev/null +++ b/vendor/github.com/docker/distribution/context/context.go @@ -0,0 +1,85 @@ +package context + +import ( + "sync" + + "github.com/docker/distribution/uuid" + "golang.org/x/net/context" +) + +// Context is a copy of Context from the golang.org/x/net/context package. +type Context interface { + context.Context +} + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() Context { + return background +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. Use context Values only for request-scoped data that transits processes +// and APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go new file mode 100644 index 0000000..3b4ab88 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/doc.go @@ -0,0 +1,89 @@ +// Package context provides several utilities for working with +// golang.org/x/net/context in http requests. Primarily, the focus is on +// logging relevant request information but this package is not limited to +// that purpose. +// +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped varaibles. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go new file mode 100644 index 0000000..7fe9b8a --- /dev/null +++ b/vendor/github.com/docker/distribution/context/http.go @@ -0,0 +1,366 @@ +package context + +import ( + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx Context, r *http.Request) Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { + if closeNotifier, ok := w.(http.CloseNotifier); ok { + irwCN := &instrumentedResponseWriterCN{ + instrumentedResponseWriter: instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + }, + CloseNotifier: closeNotifier, + } + + return irwCN, irwCN + } + + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx Context, r *http.Request) Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriterCN provides response writer information in a +// context. It implements http.CloseNotifier so that users can detect +// early disconnects. +type instrumentedResponseWriterCN struct { + instrumentedResponseWriter + http.CloseNotifier +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} + +func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + } + + return irw.instrumentedResponseWriter.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/http_test.go b/vendor/github.com/docker/distribution/context/http_test.go new file mode 100644 index 0000000..3d4b3c8 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/http_test.go @@ -0,0 +1,285 @@ +package context + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "testing" + "time" +) + +func TestWithRequest(t *testing.T) { + var req http.Request + + start := time.Now() + req.Method = "GET" + req.Host = "example.com" + req.RequestURI = "/test-test" + req.Header = make(http.Header) + req.Header.Set("Referer", "foo.com/referer") + req.Header.Set("User-Agent", "test/0.1") + + ctx := WithRequest(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "http.request", + expected: &req, + }, + { + key: "http.request.id", + }, + { + key: "http.request.method", + expected: req.Method, + }, + { + key: "http.request.host", + expected: req.Host, + }, + { + key: "http.request.uri", + expected: req.RequestURI, + }, + { + key: "http.request.referer", + expected: req.Referer(), + }, + { + key: "http.request.useragent", + expected: req.UserAgent(), + }, + { + key: "http.request.remoteaddr", + expected: req.RemoteAddr, + }, + { + key: "http.request.startedat", + }, + } { + v := ctx.Value(testcase.key) + + if v == nil { + t.Fatalf("value not found for %q", testcase.key) + } + + if testcase.expected != nil && v != testcase.expected { + t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) + } + + // Key specific checks! + switch testcase.key { + case "http.request.id": + if _, ok := v.(string); !ok { + t.Fatalf("request id not a string: %v", v) + } + case "http.request.startedat": + vt, ok := v.(time.Time) + if !ok { + t.Fatalf("value not a time: %v", v) + } + + now := time.Now() + if vt.After(now) { + t.Fatalf("time generated too late: %v > %v", vt, now) + } + + if vt.Before(start) { + t.Fatalf("time generated too early: %v < %v", vt, start) + } + } + } +} + +type testResponseWriter struct { + flushed bool + status int + written int64 + header http.Header +} + +func (trw *testResponseWriter) Header() http.Header { + if trw.header == nil { + trw.header = make(http.Header) + } + + return trw.header +} + +func (trw *testResponseWriter) Write(p []byte) (n int, err error) { + if trw.status == 0 { + trw.status = http.StatusOK + } + + n = len(p) + trw.written += int64(n) + return +} + +func (trw *testResponseWriter) WriteHeader(status int) { + trw.status = status +} + +func (trw *testResponseWriter) Flush() { + trw.flushed = true +} + +func TestWithResponseWriter(t *testing.T) { + trw := testResponseWriter{} + ctx, rw := WithResponseWriter(Background(), &trw) + + if ctx.Value("http.response") != rw { + t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) + } + + grw, err := GetResponseWriter(ctx) + if err != nil { + t.Fatalf("error getting response writer: %v", err) + } + + if grw != rw { + t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) + } + + if ctx.Value("http.response.status") != 0 { + t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) + } + + if n, err := rw.Write(make([]byte, 1024)); err != nil { + t.Fatalf("unexpected error writing: %v", err) + } else if n != 1024 { + t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) + } + + if ctx.Value("http.response.status") != http.StatusOK { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) + } + + if ctx.Value("http.response.written") != int64(1024) { + t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) + } + + // Make sure flush propagates + rw.(http.Flusher).Flush() + + if !trw.flushed { + t.Fatalf("response writer not flushed") + } + + // Write another status and make sure context is correct. This normally + // wouldn't work except for in this contrived testcase. + rw.WriteHeader(http.StatusBadRequest) + + if ctx.Value("http.response.status") != http.StatusBadRequest { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) + } +} + +func TestWithVars(t *testing.T) { + var req http.Request + vars := map[string]string{ + "foo": "asdf", + "bar": "qwer", + } + + getVarsFromRequest = func(r *http.Request) map[string]string { + if r != &req { + t.Fatalf("unexpected request: %v != %v", r, req) + } + + return vars + } + + ctx := WithVars(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "vars", + expected: vars, + }, + { + key: "vars.foo", + expected: "asdf", + }, + { + key: "vars.bar", + expected: "qwer", + }, + } { + v := ctx.Value(testcase.key) + + if !reflect.DeepEqual(v, testcase.expected) { + t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) + } + } +} + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(200) + })) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest("GET", frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + _, err = http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest("GET", backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go new file mode 100644 index 0000000..fbb6a05 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/logger.go @@ -0,0 +1,116 @@ +package context + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "runtime" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) +} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx Context, logger Logger) Context { + return WithValue(ctx, "logger", logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value("logger") + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() + // If no logger is found, just return the standard logger. + logger = logrus.StandardLogger().WithFields(fields) + } + + fields := logrus.Fields{} + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go new file mode 100644 index 0000000..721964a --- /dev/null +++ b/vendor/github.com/docker/distribution/context/trace.go @@ -0,0 +1,104 @@ +package context + +import ( + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapsed time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/trace_test.go b/vendor/github.com/docker/distribution/context/trace_test.go new file mode 100644 index 0000000..4b969fb --- /dev/null +++ b/vendor/github.com/docker/distribution/context/trace_test.go @@ -0,0 +1,85 @@ +package context + +import ( + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + defer done("this will be emitted at end of test") + + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + })) + + traced := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc, _, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + ctx, done := WithTrace(ctx) + defer done("this should be subordinate to the other trace") + time.Sleep(time.Second) + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + })) + } + traced() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} + +func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { + + for _, testcase := range values { + v := ctx.Value(testcase.key) + if testcase.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) + } + continue + } + + if v != testcase.expected { + t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) + } + } +} diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go new file mode 100644 index 0000000..cb9ef52 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/util.go @@ -0,0 +1,24 @@ +package context + +import ( + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx Context, key interface{}) time.Duration { + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) + } + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx Context, key interface{}) (value string) { + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev + } + return value +} diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go new file mode 100644 index 0000000..746cda0 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/version.go @@ -0,0 +1,16 @@ +package context + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx Context, version string) Context { + ctx = WithValue(ctx, "version", version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, "version")) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx Context) string { + return GetStringValue(ctx, "version") +} diff --git a/vendor/github.com/docker/distribution/context/version_test.go b/vendor/github.com/docker/distribution/context/version_test.go new file mode 100644 index 0000000..b816526 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/version_test.go @@ -0,0 +1,19 @@ +package context + +import "testing" + +func TestVersionContext(t *testing.T) { + ctx := Background() + + if GetVersion(ctx) != "" { + t.Fatalf("context should not yet have a version") + } + + expected := "2.1-whatever" + ctx = WithVersion(ctx, expected) + version := GetVersion(ctx) + + if version != expected { + t.Fatalf("version was not set: %q != %q", version, expected) + } +} diff --git a/vendor/github.com/docker/distribution/contrib/apache/README.MD b/vendor/github.com/docker/distribution/contrib/apache/README.MD new file mode 100644 index 0000000..29f6bae --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/apache/README.MD @@ -0,0 +1,36 @@ +# Apache HTTPd sample for Registry v1, v2 and mirror + +3 containers involved + +* Docker Registry v1 (registry 0.9.1) +* Docker Registry v2 (registry 2.0.0) +* Docker Registry v1 in mirror mode + +HTTP for mirror and HTTPS for v1 & v2 + +* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode +* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode + +## 3 Docker containers should be started + +* Docker Registry 1.0 in Mirror mode : port 5001 +* Docker Registry 1.0 in Hosting mode : port 5000 +* Docker Registry 2.0 in Hosting mode : port 5002 + +### Registry v1 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" + +### Mirror + + docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ + -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" + +### Registry v2 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2" + +# For Hosting mode access + +* users should have account (valid-user) to be able to fetch images +* only users using account docker-deployer will be allowed to push images diff --git a/vendor/github.com/docker/distribution/contrib/apache/apache.conf b/vendor/github.com/docker/distribution/contrib/apache/apache.conf new file mode 100644 index 0000000..3300a7c --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/apache/apache.conf @@ -0,0 +1,127 @@ +# +# Sample Apache 2.x configuration where : +# + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /_ping http://localhost:5001/_ping + ProxyPassReverse /_ping http://localhost:5001/_ping + + ProxyPass /v1 http://localhost:5001/v1 + ProxyPassReverse /v1 http://localhost:5001/v1 + + # Logs + ErrorLog ${APACHE_LOG_DIR}/mirror_error_log + CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog + + + + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt + SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key + + # Higher Strength SSL Ciphers + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + SSLCipherSuite RC4-SHA:HIGH + SSLHonorCipherOrder on + + # Logs + ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log + CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + # + # Registry v1 + # + + ProxyPass /v1 http://localhost:5000/v1 + ProxyPassReverse /v1 http://localhost:5000/v1 + + ProxyPass /_ping http://localhost:5000/_ping + ProxyPassReverse /_ping http://localhost:5000/_ping + + # Authentication require for push + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer account only + + Require user docker-deployer + + + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # + # Registry v2 + # + + ProxyPass /v2 http://localhost:5002/v2 + ProxyPassReverse /v2 http://localhost:5002/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require user docker-deployer + + + + + + + diff --git a/vendor/github.com/docker/distribution/contrib/compose/README.md b/vendor/github.com/docker/distribution/contrib/compose/README.md new file mode 100644 index 0000000..a9522fd --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/README.md @@ -0,0 +1,147 @@ +# Docker Compose V1 + V2 registry + +This compose configuration configures a `v1` and `v2` registry behind an `nginx` +proxy. By default, you can access the combined registry at `localhost:5000`. + +The configuration does not support pushing images to `v2` and pulling from `v1`. +If a `docker` client has a version less than 1.6, Nginx will route its requests +to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. + +### Install Docker Compose + +1. Open a new terminal on the host with your `distribution` source. + +2. Get the `docker-compose` binary. + + $ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose + + This command installs the binary in the `/usr/local/bin` directory. + +3. Add executable permissions to the binary. + + $ sudo chmod +x /usr/local/bin/docker-compose + +## Build and run with Compose + +1. In your terminal, navigate to the `distribution/contrib/compose` directory + + This directory includes a single `docker-compose.yml` configuration. + + nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 + registryv1: + image: registry + ports: + - "5000" + registryv2: + build: "../../" + ports: + - "5000" + + This configuration builds a new `nginx` image as specified by the + `nginx/Dockerfile` file. The 1.0 registry comes from Docker's official + public image. Finally, the registry 2.0 image is built from the + `distribution/Dockerfile` you've used previously. + +2. Get a registry 1.0 image. + + $ docker pull registry:0.9.1 + + The Compose configuration looks for this image locally. If you don't do this + step, later steps can fail. + +3. Build `nginx`, the registry 2.0 image, and + + $ docker-compose build + registryv1 uses an image, skipping + Building registryv2... + Step 0 : FROM golang:1.4 + + ... + + Removing intermediate container 9f5f5068c3f3 + Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf + ---> 74acc70fa106 + Removing intermediate container edb84c2b40cb + Successfully built 74acc70fa106 + + The commmand outputs its progress until it completes. + +4. Start your configuration with compose. + + $ docker-compose up + Recreating compose_registryv1_1... + Recreating compose_registryv2_1... + Recreating compose_nginx_1... + Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 + ... + + +5. In another terminal, display the running configuration. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 + 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 + aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 + +### Explore a bit + +1. Check for TLS on your `nginx` server. + + $ curl -v https://localhost:5000 + * Rebuilt URL to: https://localhost:5000/ + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 5000 (#0) + * successfully set certificate verify locations: + * CAfile: none + CApath: /etc/ssl/certs + * SSLv3, TLS handshake, Client hello (1): + * SSLv3, TLS handshake, Server hello (2): + * SSLv3, TLS handshake, CERT (11): + * SSLv3, TLS alert, Server hello (2): + * SSL certificate problem: self signed certificate + * Closing connection 0 + curl: (60) SSL certificate problem: self signed certificate + More details here: http://curl.haxx.se/docs/sslcerts.html + +2. Tag the `v1` registry image. + + $ docker tag registry:latest localhost:5000/registry_one:latest + +2. Push it to the localhost. + + $ docker push localhost:5000/registry_one:latest + + If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. + +4. Use `curl` to list the image in the registry. + + $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 32777 (#0) + > GET /v2/registry1/tags/list HTTP/1.1 + > User-Agent: curl/7.36.0 + > Host: localhost:32777 + > Accept: */* + > + < HTTP/1.1 200 OK + < Content-Type: application/json; charset=utf-8 + < Docker-Distribution-Api-Version: registry/2.0 + < Date: Tue, 14 Apr 2015 22:34:13 GMT + < Content-Length: 39 + < + {"name":"registry1","tags":["latest"]} + * Connection #0 to host localhost left intact + + This example refers to the specific port assigned to the 2.0 registry. You saw + this port earlier, when you used `docker ps` to show your running containers. + + diff --git a/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml b/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml new file mode 100644 index 0000000..5cd0485 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml @@ -0,0 +1,15 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile b/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile new file mode 100644 index 0000000..2b252ec --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile @@ -0,0 +1,6 @@ +FROM nginx:1.7 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf new file mode 100644 index 0000000..65c4d77 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf new file mode 100644 index 0000000..7b039a5 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line +proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf new file mode 100644 index 0000000..63cd180 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf new file mode 100644 index 0000000..47ffd23 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf @@ -0,0 +1,41 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting plus add_header + # auth_basic "registry.localhost"; + # auth_basic_user_file test.password; + # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile new file mode 100644 index 0000000..7a047a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile @@ -0,0 +1,9 @@ +FROM distribution/golem:0.1 + +MAINTAINER Docker Distribution Team + +RUN apk add --no-cache git + +ENV TMPDIR /var/lib/docker/tmp + +WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/README.md b/vendor/github.com/docker/distribution/contrib/docker-integration/README.md new file mode 100644 index 0000000..bc5be9d --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/README.md @@ -0,0 +1,63 @@ +# Docker Registry Integration Testing + +These integration tests cover interactions between registry clients such as +the docker daemon and the registry server. All tests can be run using the +[golem integration test runner](https://github.com/docker/golem) + +The integration tests configure components using docker compose +(see docker-compose.yaml) and the runner can be using the golem +configuration file (see golem.conf). + +## Running integration tests + +### Run using multiversion script + +The integration tests in the `contrib/docker-integration` directory can be simply +run by executing the run script `./run_multiversion.sh`. If there is no running +daemon to connect to, run as `./run_multiversion.sh -d`. + +This command will build the distribution image from the locally checked out +version and run against multiple versions of docker defined in the script. To +run a specific version of the registry or docker, Golem will need to be +executed manually. + +### Run manually using Golem + +Using the golem tool directly allows running against multiple versions of +the registry and docker. Running against multiple versions of the registry +can be useful for testing changes in the docker daemon which are not +covered by the default run script. + +#### Installing Golem + +Golem is distributed as an executable binary which can be installed from +the [release page](https://github.com/docker/golem/releases/tag/v0.1). + +#### Running golem with docker + +Additionally golem can be run as a docker image requiring no additonal +installation. + +`docker run --privileged -v "$GOPATH/src/github.com/docker/distribution/contrib/docker-integration:/test" -w /test distribution/golem golem -rundaemon .` + +#### Golem custom images + +Golem tests version of software by defining the docker image to test. + +Run with registry 2.2.1 and docker 1.10.3 + +`golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .` + + +#### Use golem caching for developing tests + +Golem allows caching image configuration to reduce test start up time. +Using this cache will allow tests with the same set of images to start +up quickly. This can be useful when developing tests and needing the +test to run quickly. If there are changes which effect the image (such as +building a new registry image), then startup time will be slower. + +Run this command multiple times and after the first time test runs +should start much quicker. +`golem -cache ~/.cache/docker/golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .` + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml new file mode 100644 index 0000000..4d4f385 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml @@ -0,0 +1,91 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + - "5002:5002" + - "5440:5440" + - "5441:5441" + - "5442:5442" + - "5443:5443" + - "5444:5444" + - "5445:5445" + - "5446:5446" + - "5447:5447" + - "5448:5448" + - "5554:5554" + - "5555:5555" + - "5556:5556" + - "5557:5557" + - "5558:5558" + - "5559:5559" + - "5600:5600" + - "6666:6666" + links: + - registryv2:registryv2 + - malevolent:malevolent + - registryv2token:registryv2token + - tokenserver:tokenserver + - registryv2tokenoauth:registryv2tokenoauth + - registryv2tokenoauthnotls:registryv2tokenoauthnotls + - tokenserveroauth:tokenserveroauth +registryv2: + image: golem-distribution:latest + ports: + - "5000" +registryv2token: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver/registry-config.yml:/etc/docker/registry/config.yml + - ./tokenserver/certs/localregistry.cert:/etc/docker/registry/localregistry.cert + - ./tokenserver/certs/localregistry.key:/etc/docker/registry/localregistry.key + - ./tokenserver/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +tokenserver: + build: "tokenserver" + command: "--debug -addr 0.0.0.0:5556 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5556" + ports: + - "5556" +registryv2tokenoauth: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver-oauth/registry-config.yml:/etc/docker/registry/config.yml + - ./tokenserver-oauth/certs/localregistry.cert:/etc/docker/registry/localregistry.cert + - ./tokenserver-oauth/certs/localregistry.key:/etc/docker/registry/localregistry.key + - ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +registryv2tokenoauthnotls: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver-oauth/registry-config-notls.yml:/etc/docker/registry/config.yml + - ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +tokenserveroauth: + build: "tokenserver-oauth" + command: "--debug -addr 0.0.0.0:5559 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5559" + ports: + - "5559" +malevolent: + image: "dmcgowan/malevolent:0.1.0" + command: "-l 0.0.0.0:6666 -r http://registryv2:5000 -c /certs/localregistry.cert -k /certs/localregistry.key" + links: + - registryv2:registryv2 + volumes: + - ./malevolent-certs:/certs:ro + ports: + - "6666" +docker: + image: golem-dind:latest + container_name: dockerdaemon + command: "docker daemon --debug -s $DOCKER_GRAPHDRIVER" + privileged: true + environment: + DOCKER_GRAPHDRIVER: + volumes: + - /etc/generated_certs.d:/etc/docker/certs.d + - /var/lib/docker + links: + - nginx:localregistry + - nginx:auth.localregistry diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf new file mode 100644 index 0000000..99c8d60 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf @@ -0,0 +1,18 @@ +[[suite]] + dind=true + images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0" ] + + [[suite.pretest]] + command="sh ./install_certs.sh /etc/generated_certs.d" + [[suite.testrunner]] + command="bats -t ." + format="tap" + env=["TEST_REPO=hello-world", "TEST_TAG=latest", "TEST_USER=testuser", "TEST_PASSWORD=passpassword", "TEST_REGISTRY=localregistry", "TEST_SKIP_PULL=true"] + [[suite.customimage]] + tag="golem-distribution:latest" + default="registry:2.2.1" + [[suite.customimage]] + tag="golem-dind:latest" + default="docker:1.10.1-dind" + version="1.10.1" + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash b/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash new file mode 100644 index 0000000..e1813d3 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash @@ -0,0 +1,101 @@ +# has_digest enforces the last output line is "Digest: sha256:..." +# the input is the output from a docker push cli command +function has_digest() { + filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') + [ "$filtered" != "" ] + # See http://wiki.alpinelinux.org/wiki/Regex#BREs before making changes to regex + digest=$(expr "$filtered" : ".*\(sha[0-9]\{3,3\}:[a-z0-9]*\)") +} + +# tempImage creates a new image using the provided name +# requires bats +function tempImage() { + dir=$(mktemp -d) + run dd if=/dev/urandom of="$dir/f" bs=1024 count=512 + cat < "$dir/Dockerfile" +FROM scratch +COPY f /f + +CMD [] +DockerFileContent + + cp_t $dir "/tmpbuild/" + exec_t "cd /tmpbuild/; docker build --no-cache -t $1 .; rm -rf /tmpbuild/" +} + +# skip basic auth tests with Docker 1.6, where they don't pass due to +# certificate issues, requires bats +function basic_auth_version_check() { + run sh -c 'docker version | fgrep -q "Client version: 1.6."' + if [ "$status" -eq 0 ]; then + skip "Basic auth tests don't support 1.6.x" + fi +} + +# login issues a login to docker to the provided server +# uses user, password, and email variables set outside of function +# requies bats +function login() { + rm -f /root/.docker/config.json + run docker_t login -u $user -p $password -e $email $1 + if [ "$status" -ne 0 ]; then + echo $output + fi + [ "$status" -eq 0 ] + # First line is WARNING about credential save or email deprecation (maybe both) + [ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ] +} + +function login_oauth() { + login $@ + + tmpFile=$(mktemp) + get_file_t /root/.docker/config.json $tmpFile + run awk -v RS="" "/\"$1\": \\{[[:space:]]+\"auth\": \"[[:alnum:]]+\",[[:space:]]+\"identitytoken\"/ {exit 3}" $tmpFile + [ "$status" -eq 3 ] +} + +function parse_version() { + version=$(echo "$1" | cut -d '-' -f1) # Strip anything after '-' + major=$(echo "$version" | cut -d . -f1) + minor=$(echo "$version" | cut -d . -f2) + rev=$(echo "$version" | cut -d . -f3) + + version=$((major * 1000 * 1000 + minor * 1000 + rev)) +} + +function version_check() { + name=$1 + checkv=$2 + minv=$3 + parse_version "$checkv" + v=$version + parse_version "$minv" + if [ "$v" -lt "$version" ]; then + skip "$name version \"$checkv\" does not meet required version \"$minv\"" + fi +} + +function get_file_t() { + docker cp dockerdaemon:$1 $2 +} + +function cp_t() { + docker cp $1 dockerdaemon:$2 +} + +function exec_t() { + docker exec dockerdaemon sh -c "$@" +} + +function docker_t() { + docker exec dockerdaemon docker $@ +} + +# build reates a new docker image id from another image +function build() { + docker exec -i dockerdaemon docker build --no-cache -t $1 - <> $2/ca.crt +} + +install_test_certs $installdir + +# Malevolent server +install_ca_file ./malevolent-certs/ca.pem $installdir/$hostname:6666 + +# Token server +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5554 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5555 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5557 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5558 +append_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5600 + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert new file mode 100644 index 0000000..071e7a2 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQZRKt7OeG+TlC2riszYwQQTALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz +MjE0OVoXDTE4MDgwNDIzMjE0OVowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDPdsUBStNMz4coXfQVIJIafG85VkngM4fV7hrg7AbiGLCWvq8cWOrYM50G9Wmo +twK1WeQ6bigYOjINgSfTxcy3adciVZIIJyXqboz6n2V0yRPWpakof939bvuAurAP +tSqQ2V5fGN0ZZn4J4IbXMSovKwo7sG3X6i4q/8DYHZ/mKjvCRMPC3MGWqunknpkm +dzyKbIFHaDKlAqIOwTsDhHvGzm/9n3D+h4sl5ZPBobuBEV2u5GR0H5ujak4+Kczt +thCWtRkzCfnjW0TEanheSYJGu8OgCGoFjQnHotgqvOO6iHZCsrB3gf8WQeou+y9e ++OyLZv3FmqdC9SXr3b0LGQTFAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQC/PP2Y9QVhO8t4BXML1QpNRWqXG8Gg0P1XIh6M6FoxcGIodLdbzui828YB +wm9ZlyKars+nDdgLdQWawdV7hSd6s2NeQlHYQSGLsdTAVkgIxiD7D2Tw3kAZ6Zrj +dPikoVAc+rBMm/BXQLzy95IAbBVOHOpBkOOgF+TYxeLnOc3GzbUqBi1Pq97DMaxr +DaDuywH55P/6v7qt610UIsZ6+RZ78iiRx4Q+oRxEqGT0rXI76gVxOFabbJuFr1n1 +kEWa3u/BssJzX3KVAm7oUtaBnj2SH5fokFmvZ5lBXA4QO/5doOa8yZiFFvvQs7EY +SWDxLrvS33UCtsCcpPggjehnxKaC +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.key new file mode 100644 index 0000000..c5bf7ac --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAz3bFAUrTTM+HKF30FSCSGnxvOVZJ4DOH1e4a4OwG4hiwlr6v +HFjq2DOdBvVpqLcCtVnkOm4oGDoyDYEn08XMt2nXIlWSCCcl6m6M+p9ldMkT1qWp +KH/d/W77gLqwD7UqkNleXxjdGWZ+CeCG1zEqLysKO7Bt1+ouKv/A2B2f5io7wkTD +wtzBlqrp5J6ZJnc8imyBR2gypQKiDsE7A4R7xs5v/Z9w/oeLJeWTwaG7gRFdruRk +dB+bo2pOPinM7bYQlrUZMwn541tExGp4XkmCRrvDoAhqBY0Jx6LYKrzjuoh2QrKw +d4H/FkHqLvsvXvjsi2b9xZqnQvUl6929CxkExQIDAQABAoIBAQCZjCUI7NFwwxQc +m1UAogeglMJZJHUu+9SoUD8Sg34grvdbyqueBm1iMOkiclaOKU1W3b4eRNNmAwRy +nEnW4km+4hX48m5PnHHijYnIIFsd0YjeT+Pf9qtdXFvGjeWq6oIjjM3dAnD50LKu +KsCB2oCHQoqjXNQfftJGvt2C1oI2/WvdOR4prnGXElVfASswX4PkP5LCfLhIx+Fr +7ErfaRIKigLSaAWLKaw3IlL12Q/KkuGcnzYIzIRwY4VJ64ENN6M3+KknfGovQItL +sCxceSe61THDP9AAI3Mequm8z3H0CImOWhJCge5l7ttLLMXZXqGxDCVx+3zvqlCa +X0cgGSVBAoGBAOvTN3oJJx1vnh1mRj8+hqzFq1bjm4T/Wp314QWLeo++43II4uMM +5hxUlO5ViY1sKxQrGwK+9c9ddxAvm5OAFFkzgW9EhDCu0tXUb2/vAJQ93SgqbcRu +coXWJpk0eNW/ouk2s1X8dzs+sCs3a4H64fEEj8yhwoyovjfucspsn7t1AoGBAOE2 +ayLKx7CcWCiD/VGNvP7714MDst2isyq8reg8LEMmAaXR2IWWj5eGwKrImTQCsrjW +P37aBp1lcWuuYRKl/WEGBy6JLNdATyUoYc1Yo+8YdenekkOtOHHJerlK3OKi3ZVp +q4HJY9wzKg/wYLcbTmjjzKj+OBIZWwig73XUHwoRAoGBAJnuIrYbp1aFdvXFvnCl +xY6c8DwlEWx8qY+V4S2XX4bYmOnkdwSxdLplU1lGqCSRyIS/pj/imdyjK4Z7LNfY +sG+RORmB5a9JTgGZSqwLm5snzmXbXA7t8P7/S+6Q25baIeKMe/7SbplTT/bFk/0h +371MtvhhVfYuZwtnL7KFuLXJAoGBAMQ3UHKYsBC8tsZd8Pf8AL07mFHKiC04Etfa +Wb5rpri+RVM+mGITgnmnavehHHHHJAWMjPetZ3P8rSv/Ww4PVsoQoXM3Cr1jh1E9 +dLCfWPz4l8syIscaBYKF4wnLItXGxj3mOgoy93EjlrMaYHlILjGOv4JBM4L5WmoT +JW7IaF6xAoGAZ4K8MwU/cAah8VinMmLGxvWWuBSgTTebuY5zN603MvFLKv5necuc +BZfTTxD+gOnxRT6QAh++tOsbBmsgR9HmTSlQSSgw1L7cwGyXzLCDYw+5K/03KXSU +DaFdgtfcDDJO8WtjOgjyTRzEAOsqFta1ige4pIu5fTilNVMQlhts5Iw= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent.bats b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent.bats new file mode 100644 index 0000000..0112ff6 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent.bats @@ -0,0 +1,192 @@ +#!/usr/bin/env bats + +# This tests various expected error scenarios when pulling bad content + +load helpers + +host="localregistry:6666" +base="malevolent-test" + +function setup() { + tempImage $base:latest +} + +@test "Test malevolent proxy pass through" { + docker_t tag -f $base:latest $host/$base/nochange:latest + run docker_t push $host/$base/nochange:latest + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + + run docker_t pull $host/$base/nochange:latest + echo "$output" + [ "$status" -eq 0 ] +} + +@test "Test malevolent image name change" { + imagename="$host/$base/rename" + image="$imagename:lastest" + docker_t tag -f $base:latest $image + run docker_t push $image + [ "$status" -eq 0 ] + has_digest "$output" + + # Pull attempt should fail to verify manifest digest + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent altered layer" { + image="$host/$base/addfile:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull $image + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent altered layer (by digest)" { + imagename="$host/$base/addfile" + image="$imagename:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent poisoned images" { + truncid="777cf9284131" + poison="${truncid}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32" + image1="$host/$base/image1/poison:$poison" + tempImage $image1 + run docker_t push $image1 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + image2="$host/$base/image2/poison:$poison" + tempImage $image2 + run docker_t push $image2 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + run docker_t pull $image1 + echo "$output" + [ "$status" -eq 0 ] + run docker_t pull $image2 + echo "$output" + [ "$status" -eq 0 ] + + # Test if there are multiple images + run docker_t images + echo "$output" + [ "$status" -eq 0 ] + + # Test images have same ID and not the poison + id1=$(docker_t inspect --format="{{.Id}}" $image1) + id2=$(docker_t inspect --format="{{.Id}}" $image2) + + # Remove old images + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + [ "$id1" != "$id2" ] + + [ "$id1" != "$truncid" ] + + [ "$id2" != "$truncid" ] +} + +@test "Test malevolent altered identical images" { + truncid1="777cf9284131" + poison1="${truncid1}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32" + truncid2="888cf9284131" + poison2="${truncid2}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa64" + + image1="$host/$base/image1/alteredid:$poison1" + tempImage $image1 + run docker_t push $image1 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + image2="$host/$base/image2/alteredid:$poison2" + docker_t tag -f $image1 $image2 + run docker_t push $image2 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + run docker_t pull $image1 + echo "$output" + [ "$status" -eq 0 ] + run docker_t pull $image2 + echo "$output" + [ "$status" -eq 0 ] + + # Test if there are multiple images + run docker_t images + echo "$output" + [ "$status" -eq 0 ] + + # Test images have same ID and not the poison + id1=$(docker_t inspect --format="{{.Id}}" $image1) + id2=$(docker_t inspect --format="{{.Id}}" $image2) + + # Remove old images + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + [ "$id1" == "$id2" ] + + [ "$id1" != "$truncid1" ] + + [ "$id2" != "$truncid2" ] +} + +@test "Test malevolent resumeable pull" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + version_check registry "$GOLEM_DISTRIBUTION_VERSION" "2.3.0" + + imagename="$host/$base/resumeable" + image="$imagename:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -eq 0 ] +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile new file mode 100644 index 0000000..17f999d --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile @@ -0,0 +1,10 @@ +FROM nginx:1.9 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf +COPY registry-noauth.conf /etc/nginx/registry-noauth.conf +COPY registry-basic.conf /etc/nginx/registry-basic.conf +COPY test.passwd /etc/nginx/test.passwd +COPY ssl /etc/nginx/ssl +COPY v1 /var/www/html/v1 diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf new file mode 100644 index 0000000..65c4d77 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf new file mode 100644 index 0000000..543eab6 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf @@ -0,0 +1,61 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + +# Setup TCP proxies +stream { + # Malevolent proxy + server { + listen 6666; + proxy_pass malevolent:6666; + } + + # Registry configured for token server + server { + listen 5554; + listen 5555; + proxy_pass registryv2token:5000; + } + + # Token server + server { + listen 5556; + proxy_pass tokenserver:5556; + } + + # Registry configured for token server with oauth + server { + listen 5557; + listen 5558; + proxy_pass registryv2tokenoauth:5000; + } + + # Token server with oauth + server { + listen 5559; + proxy_pass tokenserveroauth:5559; + } +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf new file mode 100644 index 0000000..117ea58 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf @@ -0,0 +1,8 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + include docker-registry-v2.conf; +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf new file mode 100644 index 0000000..6e182d4 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf @@ -0,0 +1,5 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + include docker-registry-v2.conf; +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf new file mode 100644 index 0000000..e693d56 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf @@ -0,0 +1,260 @@ +# Docker registry proxy for api version 2 + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + include docker-registry-v2.conf; + } +} + +# No client auth or TLS (V2 Only) +server { + listen 5002; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry-v2.conf; + } +} + +# TLS Configuration chart +# Username/Password: testuser/passpassword +# | ca | client | basic | notes +# 5440 | yes | no | no | Tests CA certificate +# 5441 | yes | no | yes | Tests basic auth over TLS +# 5442 | yes | yes | no | Tests client auth with client CA +# 5443 | yes | yes | no | Tests client auth without client CA +# 5444 | yes | yes | yes | Tests using basic auth + tls auth +# 5445 | no | no | no | Tests insecure using TLS +# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS +# 5447 | no | yes | no | Tests client auth to insecure +# 5448 | yes | no | no | Bad SSL version + +server { + listen 5440; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + +# Add configuration for localregistry server_name +# Requires configuring /etc/hosts to use +# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing +# Docker secure/insecure registry features +server { + listen 5440; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + + +# V1 search test +# Registry configured with token auth and no tls +# TLS termination done by nginx, search results +# served by nginx + +upstream docker-registry-v2-oauth { + server registryv2tokenoauthnotls:5000; +} + +server { + listen 5600; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + + root /var/www/html; + + client_max_body_size 0; + chunked_transfer_encoding on; + location /v2/ { + proxy_buffering off; + proxy_pass http://docker-registry-v2-oauth; + proxy_set_header Host $http_host; # required for docker client's sake + proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 900; + } + + location /v1/search { + if ($http_authorization !~ "Bearer [a-zA-Z0-9\._-]+") { + return 401; + } + try_files /v1/search.json =404; + add_header Content-Type application/json; + } +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd new file mode 100644 index 0000000..4e55de8 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd @@ -0,0 +1 @@ +testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/v1/search.json b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/v1/search.json new file mode 100644 index 0000000..3da8f1a --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/v1/search.json @@ -0,0 +1 @@ +{"num_pages":1,"num_results":2,"page":1,"page_size": 25,"query":"testsearch","results":[{"description":"","is_automated":false,"is_official":false,"is_trusted":false, "name":"dmcgowan/testsearch-1","star_count":1000},{"description":"Some automated build","is_automated":true,"is_official":false,"is_trusted":false,"name":"dmcgowan/testsearch-2","star_count":10}]} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh new file mode 100755 index 0000000..b673d31 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# Run the integration tests with multiple versions of the Docker engine + +set -e +set -x + +DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) + + +if [ "$TMPDIR" != "" ] && [ ! -d "$TMPDIR" ]; then + mkdir -p $TMPDIR +fi + +cachedir=`mktemp -t -d golem-cache.XXXXXX` +trap "rm -rf $cachedir" EXIT + +if [ "$1" == "-d" ]; then + # Drivers to use for Docker engines the tests are going to create. + STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} + + docker daemon --log-level=panic --storage-driver="$STORAGE_DRIVER" & + DOCKER_PID=$! + + # Wait for it to become reachable. + tries=10 + until docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 + done + + trap "kill $DOCKER_PID" EXIT +fi + +distimage=$(docker build -q $DIR/../..) +fullversion=$(git describe --match 'v[0-9]*' --dirty='.m' --always) +distversion=${fullversion:1} + +echo "Testing image $distimage with distribution version $distversion" + +# Pull needed images before invoking golem to get pull time +# These images are defined in golem.conf +time docker pull nginx:1.9 +time docker pull golang:1.6 +time docker pull registry:0.9.1 +time docker pull dmcgowan/token-server:simple +time docker pull dmcgowan/token-server:oauth +time docker pull distribution/golem-runner:0.1-bats + +time docker pull docker:1.9.1-dind +time docker pull docker:1.10.3-dind +time docker pull docker:1.11.1-dind + +golem -cache $cachedir \ + -i "golem-distribution:latest,$distimage,$distversion" \ + -i "golem-dind:latest,docker:1.9.1-dind,1.9.1" \ + -i "golem-dind:latest,docker:1.10.3-dind,1.10.3" \ + -i "golem-dind:latest,docker:1.11.1-dind,1.11.1" \ + $DIR + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats b/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats new file mode 100644 index 0000000..46f85e3 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats @@ -0,0 +1,109 @@ +#!/usr/bin/env bats + +# Registry host name, should be set to non-localhost address and match +# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d + +load helpers + +hostname="localregistry" +base="hello-world" +image="${base}:latest" + +# Login information, should match values in nginx/test.passwd +user=${TEST_USER:-"testuser"} +password=${TEST_PASSWORD:-"passpassword"} +email="distribution@docker.com" + +function setup() { + tempImage $image +} + +@test "Test valid certificates" { + docker_t tag -f $image $hostname:5440/$image + run docker_t push $hostname:5440/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth" { + basic_auth_version_check + login $hostname:5441 + docker_t tag -f $image $hostname:5441/$image + run docker_t push $hostname:5441/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth with build" { + basic_auth_version_check + login $hostname:5441 + + image1=$hostname:5441/$image-build + image2=$hostname:5441/$image-build-2 + + tempImage $image1 + + run docker_t push $image1 + [ "$status" -eq 0 ] + has_digest "$output" + + docker_t rmi $image1 + + run build $image2 $image1 + echo $output + [ "$status" -eq 0 ] + + run docker_t push $image2 + echo $output + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client auth" { + docker_t tag -f $image $hostname:5442/$image + run docker_t push $hostname:5442/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client with invalid certificate authority fails" { + docker_t tag -f $image $hostname:5443/$image + run docker_t push $hostname:5443/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with TLS client auth" { + basic_auth_version_check + login $hostname:5444 + docker_t tag -f $image $hostname:5444/$image + run docker_t push $hostname:5444/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test unknown certificate authority fails" { + docker_t tag -f $image $hostname:5445/$image + run docker_t push $hostname:5445/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with unknown certificate authority fails" { + run login $hostname:5446 + [ "$status" -ne 0 ] + docker_t tag -f $image $hostname:5446/$image + run docker_t push $hostname:5446/$image + [ "$status" -ne 0 ] +} + +@test "Test TLS client auth to server with unknown certificate authority fails" { + docker_t tag -f $image $hostname:5447/$image + run docker_t push $hostname:5447/$image + [ "$status" -ne 0 ] +} + +@test "Test failure to connect to server fails to fallback to SSLv3" { + docker_t tag -f $image $hostname:5448/$image + run docker_t push $hostname:5448/$image + [ "$status" -ne 0 ] +} + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/token.bats b/vendor/github.com/docker/distribution/contrib/docker-integration/token.bats new file mode 100644 index 0000000..256885a --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/token.bats @@ -0,0 +1,135 @@ +#!/usr/bin/env bats + +# This tests contacting a registry using a token server + +load helpers + +user="testuser" +password="testpassword" +email="a@nowhere.com" +base="hello-world" + +@test "Test token server login" { + run docker_t login -u $user -p $password -e $email localregistry:5554 + echo $output + [ "$status" -eq 0 ] + + # First line is WARNING about credential save or email deprecation + [ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ] +} + +@test "Test token server bad login" { + run docker_t login -u "testuser" -p "badpassword" -e $email localregistry:5554 + [ "$status" -ne 0 ] + + run docker_t login -u "baduser" -p "testpassword" -e $email localregistry:5554 + [ "$status" -ne 0 ] +} + +@test "Test push and pull with token auth" { + login localregistry:5555 + image="localregistry:5555/testuser/token" + build $image "$base:latest" + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + + docker_t rmi $image + + docker_t pull $image +} + +@test "Test push and pull with token auth wrong namespace" { + login localregistry:5555 + image="localregistry:5555/notuser/token" + build $image "$base:latest" + + run docker_t push $image + [ "$status" -ne 0 ] +} + +@test "Test oauth token server login" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5557 +} + +@test "Test oauth token server bad login" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + run docker_t login -u "testuser" -p "badpassword" -e $email localregistry:5557 + [ "$status" -ne 0 ] + + run docker_t login -u "baduser" -p "testpassword" -e $email localregistry:5557 + [ "$status" -ne 0 ] +} + +@test "Test oauth push and pull with token auth" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/testuser/token" + build $image "$base:latest" + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + + docker_t rmi $image + + docker_t pull $image +} + +@test "Test oauth push and build with token auth" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/testuser/token-build" + tempImage $image + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + + docker_t rmi $image + + image2="localregistry:5558/testuser/token-build-2" + run build $image2 $image + echo $output + [ "$status" -eq 0 ] + + run docker_t push $image2 + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + +} + +@test "Test oauth push and pull with token auth wrong namespace" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/notuser/token" + build $image "$base:latest" + + run docker_t push $image + [ "$status" -ne 0 ] +} + +@test "Test oauth with v1 search" { + version_check docker "$GOLEM_DIND_VERSION" "1.12.0" + + run docker_t search localregistry:5600/testsearch + [ "$status" -ne 0 ] + + login_oauth localregistry:5600 + + run docker_t search localregistry:5600/testsearch + echo $output + [ "$status" -eq 0 ] + + echo $output | grep "testsearch-1" + echo $output | grep "testsearch-2" +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/.htpasswd b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/.htpasswd new file mode 100644 index 0000000..0bbf574 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/.htpasswd @@ -0,0 +1 @@ +testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/Dockerfile new file mode 100644 index 0000000..d8c7b5c --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/Dockerfile @@ -0,0 +1,8 @@ +FROM dmcgowan/token-server:oauth + +WORKDIR / + +COPY ./.htpasswd /.htpasswd +COPY ./certs/auth.localregistry.cert /tls.cert +COPY ./certs/auth.localregistry.key /tls.key +COPY ./certs/signing.key /sign.key diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert new file mode 100644 index 0000000..4144ca1 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD +VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg +FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c +Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy +9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon +NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu +qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA +oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL +BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH +KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW +PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+ +xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J +ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/ +qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key new file mode 100644 index 0000000..4c499bb --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o +4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd +GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC +I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu +IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj +ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo +PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM +ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ +pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/ +NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt +afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf +Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms +4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0 +3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8 +U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT +wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB +9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq +KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b +9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV +Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1 +jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO +x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd +SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez +mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz +W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert new file mode 100644 index 0000000..105acc4 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr +Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI +dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ +P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY +V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/ +yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf +iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2 +oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582 +AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8 ++0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants +4aknwTwY3300PuTqBdQufvOFDRN5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key new file mode 100644 index 0000000..cb69a0f --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg +gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s +Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5 +3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ +3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0 +DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh +Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj +bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8 +CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet +qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK +/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc +yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b +/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl +LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR +lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt +ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz +ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc +PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc +HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z +9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I +j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9 +SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu +LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM +3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt +6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.cert new file mode 100644 index 0000000..45166f2 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.cert @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt +QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r +Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3 +9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K +IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA +NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB +ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV +a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw +hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU +LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G +oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL +29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.key new file mode 100644 index 0000000..4756254 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR +vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU +wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5 +CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4 +hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+ +ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+ +sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT +/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0 +caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9 +uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1 +4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p +5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm +KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM +wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz +IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz +SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt +pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC +mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT +tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5 +zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ +J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM +cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA +jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx +VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu +x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml new file mode 100644 index 0000000..ed6b3ea --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml @@ -0,0 +1,15 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 +auth: + token: + realm: "https://auth.localregistry:5559/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config.yml new file mode 100644 index 0000000..630ef05 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config.yml @@ -0,0 +1,18 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 + tls: + certificate: "/etc/docker/registry/localregistry.cert" + key: "/etc/docker/registry/localregistry.key" +auth: + token: + realm: "https://auth.localregistry:5559/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/.htpasswd b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/.htpasswd new file mode 100644 index 0000000..0bbf574 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/.htpasswd @@ -0,0 +1 @@ +testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/Dockerfile new file mode 100644 index 0000000..3452468 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/Dockerfile @@ -0,0 +1,8 @@ +FROM dmcgowan/token-server:simple + +WORKDIR / + +COPY ./.htpasswd /.htpasswd +COPY ./certs/auth.localregistry.cert /tls.cert +COPY ./certs/auth.localregistry.key /tls.key +COPY ./certs/signing.key /sign.key diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert new file mode 100644 index 0000000..4144ca1 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD +VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg +FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c +Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy +9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon +NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu +qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA +oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL +BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH +KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW +PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+ +xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J +ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/ +qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.key new file mode 100644 index 0000000..4c499bb --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o +4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd +GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC +I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu +IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj +ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo +PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM +ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ +pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/ +NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt +afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf +Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms +4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0 +3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8 +U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT +wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB +9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq +KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b +9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV +Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1 +jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO +x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd +SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez +mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz +W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.cert new file mode 100644 index 0000000..105acc4 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr +Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI +dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ +P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY +V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/ +yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf +iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2 +oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582 +AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8 ++0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants +4aknwTwY3300PuTqBdQufvOFDRN5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.key new file mode 100644 index 0000000..cb69a0f --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg +gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s +Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5 +3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ +3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0 +DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh +Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj +bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8 +CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet +qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK +/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc +yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b +/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl +LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR +lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt +ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz +ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc +PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc +HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z +9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I +j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9 +SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu +LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM +3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt +6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.cert new file mode 100644 index 0000000..45166f2 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.cert @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt +QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r +Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3 +9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K +IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA +NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB +ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV +a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw +hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU +LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G +oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL +29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.key new file mode 100644 index 0000000..4756254 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR +vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU +wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5 +CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4 +hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+ +ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+ +sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT +/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0 +caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9 +uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1 +4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p +5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm +KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM +wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz +IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz +SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt +pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC +mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT +tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5 +zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ +J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM +cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA +jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx +VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu +x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/registry-config.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/registry-config.yml new file mode 100644 index 0000000..bc26905 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/registry-config.yml @@ -0,0 +1,18 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 + tls: + certificate: "/etc/docker/registry/localregistry.cert" + key: "/etc/docker/registry/localregistry.key" +auth: + token: + realm: "https://auth.localregistry:5556/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/vendor/github.com/docker/distribution/contrib/token-server/errors.go b/vendor/github.com/docker/distribution/contrib/token-server/errors.go new file mode 100644 index 0000000..bcac8ee --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/token-server/errors.go @@ -0,0 +1,38 @@ +package main + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + errGroup = "tokenserver" + + // ErrorBadTokenOption is returned when a token parameter is invalid + ErrorBadTokenOption = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BAD_TOKEN_OPTION", + Message: "bad token option", + Description: `This error may be returned when a request for a + token contains an option which is not valid`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorMissingRequiredField is returned when a required form field is missing + ErrorMissingRequiredField = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MISSING_REQUIRED_FIELD", + Message: "missing required field", + Description: `This error may be returned when a request for a + token does not contain a required form field`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorUnsupportedValue is returned when a form field has an unsupported value + ErrorUnsupportedValue = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "UNSUPPORTED_VALUE", + Message: "unsupported value", + Description: `This error may be returned when a request for a + token contains a form field with an unsupported value`, + HTTPStatusCode: http.StatusBadRequest, + }) +) diff --git a/vendor/github.com/docker/distribution/contrib/token-server/main.go b/vendor/github.com/docker/distribution/contrib/token-server/main.go new file mode 100644 index 0000000..e9d6d64 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/token-server/main.go @@ -0,0 +1,425 @@ +package main + +import ( + "encoding/json" + "flag" + "math/rand" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/htpasswd" + "github.com/docker/libtrust" + "github.com/gorilla/mux" +) + +var ( + enforceRepoClass bool +) + +func main() { + var ( + issuer = &TokenIssuer{} + pkFile string + addr string + debug bool + err error + + passwdFile string + realm string + + cert string + certKey string + ) + + flag.StringVar(&issuer.Issuer, "issuer", "distribution-token-server", "Issuer string for token") + flag.StringVar(&pkFile, "key", "", "Private key file") + flag.StringVar(&addr, "addr", "localhost:8080", "Address to listen on") + flag.BoolVar(&debug, "debug", false, "Debug mode") + + flag.StringVar(&passwdFile, "passwd", ".htpasswd", "Passwd file") + flag.StringVar(&realm, "realm", "", "Authentication realm") + + flag.StringVar(&cert, "tlscert", "", "Certificate file for TLS") + flag.StringVar(&certKey, "tlskey", "", "Certificate key for TLS") + + flag.BoolVar(&enforceRepoClass, "enforce-class", false, "Enforce policy for single repository class") + + flag.Parse() + + if debug { + logrus.SetLevel(logrus.DebugLevel) + } + + if pkFile == "" { + issuer.SigningKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + logrus.Fatalf("Error generating private key: %v", err) + } + logrus.Debugf("Using newly generated key with id %s", issuer.SigningKey.KeyID()) + } else { + issuer.SigningKey, err = libtrust.LoadKeyFile(pkFile) + if err != nil { + logrus.Fatalf("Error loading key file %s: %v", pkFile, err) + } + logrus.Debugf("Loaded private key with id %s", issuer.SigningKey.KeyID()) + } + + if realm == "" { + logrus.Fatalf("Must provide realm") + } + + ac, err := auth.GetAccessController("htpasswd", map[string]interface{}{ + "realm": realm, + "path": passwdFile, + }) + if err != nil { + logrus.Fatalf("Error initializing access controller: %v", err) + } + + // TODO: Make configurable + issuer.Expiration = 15 * time.Minute + + ctx := context.Background() + + ts := &tokenServer{ + issuer: issuer, + accessController: ac, + refreshCache: map[string]refreshToken{}, + } + + router := mux.NewRouter() + router.Path("/token/").Methods("GET").Handler(handlerWithContext(ctx, ts.getToken)) + router.Path("/token/").Methods("POST").Handler(handlerWithContext(ctx, ts.postToken)) + + if cert == "" { + err = http.ListenAndServe(addr, router) + } else if certKey == "" { + logrus.Fatalf("Must provide certficate (-tlscert) and key (-tlskey)") + } else { + err = http.ListenAndServeTLS(addr, cert, certKey, router) + } + + if err != nil { + logrus.Infof("Error serving: %v", err) + } + +} + +// handlerWithContext wraps the given context-aware handler by setting up the +// request context from a base context. +func handlerWithContext(ctx context.Context, handler func(context.Context, http.ResponseWriter, *http.Request)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + logger := context.GetRequestLogger(ctx) + ctx = context.WithLogger(ctx, logger) + + handler(ctx, w, r) + }) +} + +func handleError(ctx context.Context, err error, w http.ResponseWriter) { + ctx, w = context.WithResponseWriter(ctx, w) + + if serveErr := errcode.ServeJSON(w, err); serveErr != nil { + context.GetResponseLogger(ctx).Errorf("error sending error response: %v", serveErr) + return + } + + context.GetResponseLogger(ctx).Info("application error") +} + +var refreshCharacters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +const refreshTokenLength = 15 + +func newRefreshToken() string { + s := make([]rune, refreshTokenLength) + for i := range s { + s[i] = refreshCharacters[rand.Intn(len(refreshCharacters))] + } + return string(s) +} + +type refreshToken struct { + subject string + service string +} + +type tokenServer struct { + issuer *TokenIssuer + accessController auth.AccessController + refreshCache map[string]refreshToken +} + +type tokenResponse struct { + Token string `json:"access_token"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn int `json:"expires_in,omitempty"` +} + +var repositoryClassCache = map[string]string{} + +func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access { + if !strings.HasSuffix(scope, "/") { + scope = scope + "/" + } + grantedAccessList := make([]auth.Access, 0, len(requestedAccessList)) + for _, access := range requestedAccessList { + if access.Type == "repository" { + if !strings.HasPrefix(access.Name, scope) { + context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name) + continue + } + if enforceRepoClass { + if class, ok := repositoryClassCache[access.Name]; ok { + if class != access.Class { + context.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class) + continue + } + } else if strings.EqualFold(access.Action, "push") { + repositoryClassCache[access.Name] = access.Class + } + } + } else if access.Type == "registry" { + if access.Name != "catalog" { + context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name) + continue + } + // TODO: Limit some actions to "admin" users + } else { + context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type) + continue + } + grantedAccessList = append(grantedAccessList, access) + } + return grantedAccessList +} + +type acctSubject struct{} + +func (acctSubject) String() string { return "acctSubject" } + +type requestedAccess struct{} + +func (requestedAccess) String() string { return "requestedAccess" } + +type grantedAccess struct{} + +func (grantedAccess) String() string { return "grantedAccess" } + +// getToken handles authenticating the request and authorizing access to the +// requested scopes. +func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) { + context.GetLogger(ctx).Info("getToken") + + params := r.URL.Query() + service := params.Get("service") + scopeSpecifiers := params["scope"] + var offline bool + if offlineStr := params.Get("offline_token"); offlineStr != "" { + var err error + offline, err = strconv.ParseBool(offlineStr) + if err != nil { + handleError(ctx, ErrorBadTokenOption.WithDetail(err), w) + return + } + } + + requestedAccessList := ResolveScopeSpecifiers(ctx, scopeSpecifiers) + + authorizedCtx, err := ts.accessController.Authorized(ctx, requestedAccessList...) + if err != nil { + challenge, ok := err.(auth.Challenge) + if !ok { + handleError(ctx, err, w) + return + } + + // Get response context. + ctx, w = context.WithResponseWriter(ctx, w) + + challenge.SetHeaders(w) + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail(challenge.Error()), w) + + context.GetResponseLogger(ctx).Info("get token authentication challenge") + + return + } + ctx = authorizedCtx + + username := context.GetStringValue(ctx, "auth.user.name") + + ctx = context.WithValue(ctx, acctSubject{}, username) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{})) + + context.GetLogger(ctx).Info("authenticated client") + + ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{})) + + grantedAccessList := filterAccessList(ctx, username, requestedAccessList) + ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{})) + + token, err := ts.issuer.CreateJWT(username, service, grantedAccessList) + if err != nil { + handleError(ctx, err, w) + return + } + + context.GetLogger(ctx).Info("authorized client") + + response := tokenResponse{ + Token: token, + ExpiresIn: int(ts.issuer.Expiration.Seconds()), + } + + if offline { + response.RefreshToken = newRefreshToken() + ts.refreshCache[response.RefreshToken] = refreshToken{ + subject: username, + service: service, + } + } + + ctx, w = context.WithResponseWriter(ctx, w) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + + context.GetResponseLogger(ctx).Info("get token complete") +} + +type postTokenResponse struct { + Token string `json:"access_token"` + Scope string `json:"scope,omitempty"` + ExpiresIn int `json:"expires_in,omitempty"` + IssuedAt string `json:"issued_at,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` +} + +// postToken handles authenticating the request and authorizing access to the +// requested scopes. +func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r *http.Request) { + grantType := r.PostFormValue("grant_type") + if grantType == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing grant_type value"), w) + return + } + + service := r.PostFormValue("service") + if service == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing service value"), w) + return + } + + clientID := r.PostFormValue("client_id") + if clientID == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing client_id value"), w) + return + } + + var offline bool + switch r.PostFormValue("access_type") { + case "", "online": + case "offline": + offline = true + default: + handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown access_type value"), w) + return + } + + requestedAccessList := ResolveScopeList(ctx, r.PostFormValue("scope")) + + var subject string + var rToken string + switch grantType { + case "refresh_token": + rToken = r.PostFormValue("refresh_token") + if rToken == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing refresh_token value"), w) + return + } + rt, ok := ts.refreshCache[rToken] + if !ok || rt.service != service { + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid refresh token"), w) + return + } + subject = rt.subject + case "password": + ca, ok := ts.accessController.(auth.CredentialAuthenticator) + if !ok { + handleError(ctx, ErrorUnsupportedValue.WithDetail("password grant type not supported"), w) + return + } + subject = r.PostFormValue("username") + if subject == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing username value"), w) + return + } + password := r.PostFormValue("password") + if password == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing password value"), w) + return + } + if err := ca.AuthenticateUser(subject, password); err != nil { + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid credentials"), w) + return + } + default: + handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown grant_type value"), w) + return + } + + ctx = context.WithValue(ctx, acctSubject{}, subject) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{})) + + context.GetLogger(ctx).Info("authenticated client") + + ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{})) + + grantedAccessList := filterAccessList(ctx, subject, requestedAccessList) + ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{})) + + token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList) + if err != nil { + handleError(ctx, err, w) + return + } + + context.GetLogger(ctx).Info("authorized client") + + response := postTokenResponse{ + Token: token, + ExpiresIn: int(ts.issuer.Expiration.Seconds()), + IssuedAt: time.Now().UTC().Format(time.RFC3339), + Scope: ToScopeList(grantedAccessList), + } + + if offline { + rToken = newRefreshToken() + ts.refreshCache[rToken] = refreshToken{ + subject: subject, + service: service, + } + } + + if rToken != "" { + response.RefreshToken = rToken + } + + ctx, w = context.WithResponseWriter(ctx, w) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + + context.GetResponseLogger(ctx).Info("post token complete") +} diff --git a/vendor/github.com/docker/distribution/contrib/token-server/token.go b/vendor/github.com/docker/distribution/contrib/token-server/token.go new file mode 100644 index 0000000..b0c2abf --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/token-server/token.go @@ -0,0 +1,219 @@ +package main + +import ( + "crypto" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "regexp" + "strings" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/distribution/registry/auth/token" + "github.com/docker/libtrust" +) + +// ResolveScopeSpecifiers converts a list of scope specifiers from a token +// request's `scope` query parameters into a list of standard access objects. +func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Access { + requestedAccessSet := make(map[auth.Access]struct{}, 2*len(scopeSpecs)) + + for _, scopeSpecifier := range scopeSpecs { + // There should be 3 parts, separated by a `:` character. + parts := strings.SplitN(scopeSpecifier, ":", 3) + + if len(parts) != 3 { + context.GetLogger(ctx).Infof("ignoring unsupported scope format %s", scopeSpecifier) + continue + } + + resourceType, resourceName, actions := parts[0], parts[1], parts[2] + + resourceType, resourceClass := splitResourceClass(resourceType) + if resourceType == "" { + continue + } + + // Actions should be a comma-separated list of actions. + for _, action := range strings.Split(actions, ",") { + requestedAccess := auth.Access{ + Resource: auth.Resource{ + Type: resourceType, + Class: resourceClass, + Name: resourceName, + }, + Action: action, + } + + // Add this access to the requested access set. + requestedAccessSet[requestedAccess] = struct{}{} + } + } + + requestedAccessList := make([]auth.Access, 0, len(requestedAccessSet)) + for requestedAccess := range requestedAccessSet { + requestedAccessList = append(requestedAccessList, requestedAccess) + } + + return requestedAccessList +} + +var typeRegexp = regexp.MustCompile(`^([a-z0-9]+)(\([a-z0-9]+\))?$`) + +func splitResourceClass(t string) (string, string) { + matches := typeRegexp.FindStringSubmatch(t) + if len(matches) < 2 { + return "", "" + } + if len(matches) == 2 || len(matches[2]) < 2 { + return matches[1], "" + } + return matches[1], matches[2][1 : len(matches[2])-1] +} + +// ResolveScopeList converts a scope list from a token request's +// `scope` parameter into a list of standard access objects. +func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access { + scopes := strings.Split(scopeList, " ") + return ResolveScopeSpecifiers(ctx, scopes) +} + +func scopeString(a auth.Access) string { + if a.Class != "" { + return fmt.Sprintf("%s(%s):%s:%s", a.Type, a.Class, a.Name, a.Action) + } + return fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action) +} + +// ToScopeList converts a list of access to a +// scope list string +func ToScopeList(access []auth.Access) string { + var s []string + for _, a := range access { + s = append(s, scopeString(a)) + } + return strings.Join(s, ",") +} + +// TokenIssuer represents an issuer capable of generating JWT tokens +type TokenIssuer struct { + Issuer string + SigningKey libtrust.PrivateKey + Expiration time.Duration +} + +// CreateJWT creates and signs a JSON Web Token for the given subject and +// audience with the granted access. +func (issuer *TokenIssuer) CreateJWT(subject string, audience string, grantedAccessList []auth.Access) (string, error) { + // Make a set of access entries to put in the token's claimset. + resourceActionSets := make(map[auth.Resource]map[string]struct{}, len(grantedAccessList)) + for _, access := range grantedAccessList { + actionSet, exists := resourceActionSets[access.Resource] + if !exists { + actionSet = map[string]struct{}{} + resourceActionSets[access.Resource] = actionSet + } + actionSet[access.Action] = struct{}{} + } + + accessEntries := make([]*token.ResourceActions, 0, len(resourceActionSets)) + for resource, actionSet := range resourceActionSets { + actions := make([]string, 0, len(actionSet)) + for action := range actionSet { + actions = append(actions, action) + } + + accessEntries = append(accessEntries, &token.ResourceActions{ + Type: resource.Type, + Class: resource.Class, + Name: resource.Name, + Actions: actions, + }) + } + + randomBytes := make([]byte, 15) + _, err := io.ReadFull(rand.Reader, randomBytes) + if err != nil { + return "", err + } + randomID := base64.URLEncoding.EncodeToString(randomBytes) + + now := time.Now() + + signingHash := crypto.SHA256 + var alg string + switch issuer.SigningKey.KeyType() { + case "RSA": + alg = "RS256" + case "EC": + alg = "ES256" + default: + panic(fmt.Errorf("unsupported signing key type %q", issuer.SigningKey.KeyType())) + } + + joseHeader := token.Header{ + Type: "JWT", + SigningAlg: alg, + } + + if x5c := issuer.SigningKey.GetExtendedField("x5c"); x5c != nil { + joseHeader.X5c = x5c.([]string) + } else { + var jwkMessage json.RawMessage + jwkMessage, err = issuer.SigningKey.PublicKey().MarshalJSON() + if err != nil { + return "", err + } + joseHeader.RawJWK = &jwkMessage + } + + exp := issuer.Expiration + if exp == 0 { + exp = 5 * time.Minute + } + + claimSet := token.ClaimSet{ + Issuer: issuer.Issuer, + Subject: subject, + Audience: audience, + Expiration: now.Add(exp).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: randomID, + + Access: accessEntries, + } + + var ( + joseHeaderBytes []byte + claimSetBytes []byte + ) + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return "", fmt.Errorf("unable to encode jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return "", fmt.Errorf("unable to encode claim set: %s", err) + } + + encodedJoseHeader := joseBase64Encode(joseHeaderBytes) + encodedClaimSet := joseBase64Encode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = issuer.SigningKey.Sign(strings.NewReader(encodingToSign), signingHash); err != nil { + return "", fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64Encode(signatureBytes) + + return fmt.Sprintf("%s.%s", encodingToSign, signature), nil +} + +func joseBase64Encode(data []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=") +} diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh new file mode 100755 index 0000000..25d419a --- /dev/null +++ b/vendor/github.com/docker/distribution/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go new file mode 100644 index 0000000..31d821b --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digest.go @@ -0,0 +1,139 @@ +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +const ( + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch algorithm := Algorithm(s[:i]); algorithm { + case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } + break + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/vendor/github.com/docker/distribution/digest/digest_test.go b/vendor/github.com/docker/distribution/digest/digest_test.go new file mode 100644 index 0000000..afb4ebf --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digest_test.go @@ -0,0 +1,82 @@ +package digest + +import ( + "testing" +) + +func TestParseDigest(t *testing.T) { + for _, testcase := range []struct { + input string + err error + algorithm Algorithm + hex string + }{ + { + input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + algorithm: "sha384", + hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + }, + { + // empty hex + input: "sha256:", + err: ErrDigestInvalidFormat, + }, + { + // just hex + input: "d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // not hex + input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // too short + input: "sha256:abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + // too short (from different algorithm) + input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + input: "foo:d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestUnsupported, + }, + } { + digest, err := ParseDigest(testcase.input) + if err != testcase.err { + t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) + } + + if testcase.err != nil { + continue + } + + if digest.Algorithm() != testcase.algorithm { + t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) + } + + if digest.Hex() != testcase.hex { + t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) + } + + // Parse string return value and check equality + newParsed, err := ParseDigest(digest.String()) + + if err != nil { + t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) + } + + if newParsed != digest { + t.Fatalf("expected equal: %q != %q", newParsed, digest) + } + } +} diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go new file mode 100644 index 0000000..f3105a4 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digester.go @@ -0,0 +1,155 @@ +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/digester_resumable_test.go b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go new file mode 100644 index 0000000..6ba21c8 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go @@ -0,0 +1,21 @@ +// +build !noresumabledigest + +package digest + +import ( + "testing" + + "github.com/stevvooe/resumable" + _ "github.com/stevvooe/resumable/sha256" +) + +// TestResumableDetection just ensures that the resumable capability of a hash +// is exposed through the digester type, which is just a hash plus a Digest +// method. +func TestResumableDetection(t *testing.T) { + d := Canonical.New() + + if _, ok := d.Hash().(resumable.Hash); !ok { + t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) + } +} diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go new file mode 100644 index 0000000..f64b0db --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/doc.go @@ -0,0 +1,42 @@ +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go new file mode 100644 index 0000000..4b9313c --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/set.go @@ -0,0 +1,245 @@ +package digest + +import ( + "errors" + "sort" + "strings" + "sync" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg Algorithm + hex string + ) + dgst, err := ParseDigest(d) + if err == ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg Algorithm + val string + digest Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/digest/set_test.go b/vendor/github.com/docker/distribution/digest/set_test.go new file mode 100644 index 0000000..e9dab87 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/set_test.go @@ -0,0 +1,368 @@ +package digest + +import ( + "crypto/sha256" + "encoding/binary" + "math/rand" + "testing" +) + +func assertEqualDigests(t *testing.T, d1, d2 Digest) { + if d1 != d2 { + t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) + } +} + +func TestLookup(t *testing.T) { + digests := []Digest{ + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup("54") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[3]) + + dgst, err = dset.Lookup("1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("9876") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 9876") + } + if err != ErrDigestNotFound { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: sha256:1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) + + dgst, err = dset.Lookup("sha256:12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) +} + +func TestAddDuplication(t *testing.T) { + digests := []Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + if len(dset.entries) != 8 { + t.Fatal("Invalid dset size") + } + + if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 8 { + t.Fatal("Duplicate digest insert allowed") + } + + if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 9 { + t.Fatal("Insert with different algorithm not allowed") + } +} + +func TestRemove(t *testing.T) { + digests, err := createDigests(10) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup(digests[0].String()) + if err != nil { + t.Fatal(err) + } + if dgst != digests[0] { + t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) + } + + if err := dset.Remove(digests[0]); err != nil { + t.Fatal(err) + } + + if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { + t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) + } +} + +func TestAll(t *testing.T) { + digests, err := createDigests(100) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + all := map[Digest]struct{}{} + for _, dgst := range dset.All() { + all[dgst] = struct{}{} + } + + if len(all) != len(digests) { + t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) + } + + for i, dgst := range digests { + if _, ok := all[dgst]; !ok { + t.Fatalf("Missing element at position %d: %s", i, dgst) + } + } + +} + +func assertEqualShort(t *testing.T, actual, expected string) { + if actual != expected { + t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) + } +} + +func TestShortCodeTable(t *testing.T) { + digests := []Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dump := ShortCodeTable(dset, 2) + + if len(dump) < len(digests) { + t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) + } + assertEqualShort(t, dump[digests[0]], "12341") + assertEqualShort(t, dump[digests[1]], "12345") + assertEqualShort(t, dump[digests[2]], "12346") + assertEqualShort(t, dump[digests[3]], "54") + assertEqualShort(t, dump[digests[4]], "6543") + assertEqualShort(t, dump[digests[5]], "64") + assertEqualShort(t, dump[digests[6]], "6542") + assertEqualShort(t, dump[digests[7]], "653") +} + +func createDigests(count int) ([]Digest, error) { + r := rand.New(rand.NewSource(25823)) + digests := make([]Digest, count) + for i := range digests { + h := sha256.New() + if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { + return nil, err + } + digests[i] = NewDigest("sha256", h) + } + return digests, nil +} + +func benchAddNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchLookupNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + shorts := make([]string, 0, n) + for _, short := range ShortCodeTable(dset, shortLen) { + shorts = append(shorts, short) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err = dset.Lookup(shorts[i%n]); err != nil { + b.Fatal(err) + } + } +} + +func benchRemoveNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + b.StopTimer() + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for j := range digests { + if err = dset.Remove(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchShortCodeNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShortCodeTable(dset, shortLen) + } +} + +func BenchmarkAdd10(b *testing.B) { + benchAddNTable(b, 10) +} + +func BenchmarkAdd100(b *testing.B) { + benchAddNTable(b, 100) +} + +func BenchmarkAdd1000(b *testing.B) { + benchAddNTable(b, 1000) +} + +func BenchmarkRemove10(b *testing.B) { + benchRemoveNTable(b, 10) +} + +func BenchmarkRemove100(b *testing.B) { + benchRemoveNTable(b, 100) +} + +func BenchmarkRemove1000(b *testing.B) { + benchRemoveNTable(b, 1000) +} + +func BenchmarkLookup10(b *testing.B) { + benchLookupNTable(b, 10, 12) +} + +func BenchmarkLookup100(b *testing.B) { + benchLookupNTable(b, 100, 12) +} + +func BenchmarkLookup1000(b *testing.B) { + benchLookupNTable(b, 1000, 12) +} + +func BenchmarkShortCode10(b *testing.B) { + benchShortCodeNTable(b, 10, 12) +} +func BenchmarkShortCode100(b *testing.B) { + benchShortCodeNTable(b, 100, 12) +} +func BenchmarkShortCode1000(b *testing.B) { + benchShortCodeNTable(b, 1000, 12) +} diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go new file mode 100644 index 0000000..9af3be1 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers.go @@ -0,0 +1,44 @@ +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +// NewDigestVerifier returns a verifier that compares the written bytes +// against a passed in digest. +func NewDigestVerifier(d Digest) (Verifier, error) { + if err := d.Validate(); err != nil { + return nil, err + } + + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + }, nil +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/verifiers_test.go b/vendor/github.com/docker/distribution/digest/verifiers_test.go new file mode 100644 index 0000000..c342d6e --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers_test.go @@ -0,0 +1,49 @@ +package digest + +import ( + "bytes" + "crypto/rand" + "io" + "testing" +) + +func TestDigestVerifier(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + digest := FromBytes(p) + + verifier, err := NewDigestVerifier(digest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, bytes.NewReader(p)) + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } +} + +// TestVerifierUnsupportedDigest ensures that unsupported digest validation is +// flowing through verifier creation. +func TestVerifierUnsupportedDigest(t *testing.T) { + unsupported := Digest("bean:0123456789abcdef") + + _, err := NewDigestVerifier(unsupported) + if err == nil { + t.Fatalf("expected error when creating verifier") + } + + if err != ErrDigestUnsupported { + t.Fatalf("incorrect error for unsupported digest: %v", err) + } +} + +// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for +// DigestVerifier. +// +// The relevant benchmark for comparison can be run with the following +// commands: +// +// go test -bench . crypto/sha1 +// diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 0000000..bdd8cb7 --- /dev/null +++ b/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/vendor/github.com/docker/distribution/docs/README.md b/vendor/github.com/docker/distribution/docs/README.md new file mode 100644 index 0000000..b26dc37 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/README.md @@ -0,0 +1,16 @@ +# The docs have been moved! + +The documentation for Registry has been merged into +[the general documentation repo](https://github.com/docker/docker.github.io). +Commit history has been preserved. + +The docs for Registry are now here: +https://github.com/docker/docker.github.io/tree/master/registry + +> Note: The definitive [./spec directory](spec/) directory and +[configuration.md](configuration.md) file will be maintained in this repository +and be refreshed periodically in +[the general documentation repo](https://github.com/docker/docker.github.io). + +As always, the docs in the general repo remain open-source and we appreciate +your feedback and pull requests! diff --git a/vendor/github.com/docker/distribution/docs/architecture.md b/vendor/github.com/docker/distribution/docs/architecture.md new file mode 100644 index 0000000..c2aaa9f --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/architecture.md @@ -0,0 +1,52 @@ +--- +published: false +--- + +# Architecture + +## Design +**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. + +### Eventual Consistency + +> **NOTE:** This section belongs somewhere, perhaps in a design document. We +> are leaving this here so the information is not lost. + +Running the registry on eventually consistent backends has been part of the +design from the beginning. This section covers some of the approaches to +dealing with this reality. + +There are a few classes of issues that we need to worry about when +implementing something on top of the storage drivers: + +1. Read-After-Write consistency (see this [article on + s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). +2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). + +In reality, the registry must worry about these kinds of errors when doing the +following: + +1. Accepting data into a temporary upload file may not have latest data block + yet (read-after-write). +2. Moving uploaded data into its blob location (write-write race). +3. Modifying the "current" manifest for given tag (write-write race). +4. A whole slew of operations around deletes (read-after-write, delete-write + races, garbage collection, etc.). + +The backend path layout employs a few techniques to avoid these problems: + +1. Large writes are done to private upload directories. This alleviates most + of the corruption potential under multiple writers by avoiding multiple + writers. +2. Constraints in storage driver implementations, such as support for writing + after the end of a file to extend it. +3. Digest verification to avoid data corruption. +4. Manifest files are stored by digest and cannot change. +5. All other non-content files (links, hashes, etc.) are written as an atomic + unit. Anything that requires additions and deletions is broken out into + separate "files". Last writer still wins. + +Unfortunately, one must play this game when trying to build something like +this on top of eventually consistent storage systems. If we run into serious +problems, we can wrap the storagedrivers in a shared consistency layer but +that would increase complexity and hinder registry cluster performance. diff --git a/vendor/github.com/docker/distribution/docs/configuration.md b/vendor/github.com/docker/distribution/docs/configuration.md new file mode 100644 index 0000000..a1c66b2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/configuration.md @@ -0,0 +1,1121 @@ +--- +title: "Configuring a registry" +description: "Explains how to configure a registry" +keywords: registry, on-prem, images, tags, repository, distribution, configuration +--- + +The Registry configuration is based on a YAML file, detailed below. While it +comes with sane default values out of the box, you should review it exhaustively +before moving your systems to production. + +## Override specific configuration options + +In a typical setup where you run your Registry from the official image, you can +specify a configuration variable from the environment by passing `-e` arguments +to your `docker run` stanza or from within a Dockerfile using the `ENV` +instruction. + +To override a configuration option, create an environment variable named +`REGISTRY_variable` where `variable` is the name of the configuration option +and the `_` (underscore) represents indention levels. For example, you can +configure the `rootdirectory` of the `filesystem` storage backend: + +```none +storage: + filesystem: + rootdirectory: /var/lib/registry +``` + +To override this value, set an environment variable like this: + +```none +REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere +``` + +This variable overrides the `/var/lib/registry` value to the `/somewhere` +directory. + +> **Note**: Create a base configuration file with environment variables that can +> be configured to tweak individual values. Overriding configuration sections +> with environment variables is not recommended. + +## Overriding the entire configuration file + +If the default configuration is not a sound basis for your usage, or if you are +having issues overriding keys from the environment, you can specify an alternate +YAML configuration file by mounting it as a volume in the container. + +Typically, create a new configuration file from scratch,named `config.yml`, then +specify it in the `docker run` command: + +```bash +$ docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/config.yml:/etc/docker/registry/config.yml \ + registry:2 +``` + +Use this +[example YAML file](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml) +as a starting point. + +## List of configuration options + +These are all configuration options for the registry. Some options in the list +are mutually exclusive. Read the detailed reference information about each +option before finalizing your configuration. + +```none +version: 0.1 +log: + accesslog: + disabled: true + level: debug + formatter: text + fields: + service: registry + environment: staging + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +loglevel: debug # deprecated: use "log" +storage: + filesystem: + rootdirectory: /var/lib/registry + maxthreads: 100 + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + chunksize: 5242880 + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + multipartcopychunksize: 33554432 + multipartcopymaxconcurrency: 100 + multipartcopythresholdsize: 33554432 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: # This driver takes no parameters + delete: + enabled: false + redirect: + disable: false + cache: + blobdescriptor: redis + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false +auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd +middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s + storage: + - name: redirect + options: + baseurl: https://example.com/ +reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true +http: + addr: localhost:5000 + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + http2: + disabled: false +notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + ignoredmediatypes: + - application/octet-stream +redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 +proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] +compatibility: + schema1: + signingkeyfile: /etc/registry/key.json +validation: + enabled: true + manifests: + urls: + allow: + - ^https?://([^/]+\.)*example\.com/ + deny: + - ^https?://www\.example\.com/ +``` + +In some instances a configuration option is **optional** but it contains child +options marked as **required**. In these cases, you can omit the parent with +all its children. However, if the parent is included, you must also include all +the children marked **required**. + +## `version` + +```none +version: 0.1 +``` + +The `version` option is **required**. It specifies the configuration's version. +It is expected to remain a top-level field, to allow for a consistent version +check before parsing the remainder of the configuration file. + +## `log` + +The `log` subsection configures the behavior of the logging system. The logging +system outputs everything to stdout. You can adjust the granularity and format +with this configuration section. + +```none +log: + accesslog: + disabled: true + level: debug + formatter: text + fields: + service: registry + environment: staging +``` + +| Parameter | Required | Description | +|-------------|----------|-------------| +| `level` | no | Sets the sensitivity of logging output. Permitted values are `error`, `warn`, `info`, and `debug`. The default is `info`. | +| `formatter` | no | This selects the format of logging output. The format primarily affects how keyed attributes for a log line are encoded. Options are `text`, `json`, and `logstash`. The default is `text`. | +| `fields` | no | A map of field names to values. These are added to every log line for the context. This is useful for identifying log messages source after being mixed in other systems. | + +### `accesslog` + +```none +accesslog: + disabled: true +``` + +Within `log`, `accesslog` configures the behavior of the access logging +system. By default, the access logging system outputs to stdout in +[Combined Log Format](https://httpd.apache.org/docs/2.4/logs.html#combined). +Access logging can be disabled by setting the boolean flag `disabled` to `true`. + +## `hooks` + +```none +hooks: + - type: mail + levels: + - panic + options: + smtp: + addr: smtp.sendhost.com:25 + username: sendername + password: password + insecure: true + from: name@sendhost.com + to: + - name@receivehost.com +``` + +The `hooks` subsection configures the logging hooks' behavior. This subsection +includes a sequence handler which you can use for sending mail, for example. +Refer to `loglevel` to configure the level of messages printed. + +## `loglevel` + +> **DEPRECATED:** Please use [log](#log) instead. + +```none +loglevel: debug +``` + +Permitted values are `error`, `warn`, `info` and `debug`. The default is +`info`. + +## `storage` + +```none +storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + multipartcopychunksize: 33554432 + multipartcopymaxconcurrency: 100 + multipartcopythresholdsize: 33554432 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: + delete: + enabled: false + cache: + blobdescriptor: inmemory + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false + redirect: + disable: false +``` + +The `storage` option is **required** and defines which storage backend is in +use. You must configure exactly one backend. If you configure more, the registry +returns an error. You can choose any of these backend storage drivers: + +| Storage driver | Description | +|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `filesystem` | Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/filesystem.md). | +| `azure` | Uses Microsoft Azure Blob Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/azure.md). | +| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/gcs.md). | +| `s3` | Uses Amazon Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/s3.md). | +| `swift` | Uses Openstack Swift object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/swift.md). | +| `oss` | Uses Aliyun OSS for object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/oss.md). | + +For testing only, you can use the [`inmemory` storage +driver](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/inmemory.md). +If you would like to run a registry from volatile memory, use the +[`filesystem` driver](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/filesystem.md) +on a ramdisk. + +If you are deploying a registry on Windows, a Windows volume mounted from the +host is not recommended. Instead, you can use a S3 or Azure backing +data-store. If you do use a Windows volume, the length of the `PATH` to +the mount point must be within the `MAX_PATH` limits (typically 255 characters), +or this error will occur: + +```none +mkdir /XXX protocol error and your registry will not function properly. +``` + +### `maintenance` + +Currently, upload purging and read-only mode are the only `maintenance` +functions available. + +### `uploadpurging` + +Upload purging is a background process that periodically removes orphaned files +from the upload directories of the registry. Upload purging is enabled by +default. To configure upload directory purging, the following parameters must +be set. + + +| Parameter | Required | Description | +|------------|----------|----------------------------------------------------------------------------------------------------| +| `enabled` | yes | Set to `true` to enable upload purging. Defaults to `true`. | +| `age` | yes | Upload directories which are older than this age will be deleted.Defaults to `168h` (1 week). | +| `interval` | yes | The interval between upload directory purging. Defaults to `24h`. | +| `dryrun` | yes | Set `dryrun` to `true` to obtain a summary of what directories will be deleted. Defaults to `false`.| + +> **Note**: `age` and `interval` are strings containing a number with optional +fraction and a unit suffix. Some examples: `45m`, `2h10m`, `168h`. + +### `readonly` + +If the `readonly` section under `maintenance` has `enabled` set to `true`, +clients will not be allowed to write to the registry. This mode is useful to +temporarily prevent writes to the backend storage so a garbage collection pass +can be run. Before running garbage collection, the registry should be +restarted with readonly's `enabled` set to true. After the garbage collection +pass finishes, the registry may be restarted again, this time with `readonly` +removed from the configuration (or set to false). + +### `delete` + +Use the `delete` structure to enable the deletion of image blobs and manifests +by digest. It defaults to false, but it can be enabled by writing the following +on the configuration file: + +```none +delete: + enabled: true +``` + +### `cache` + +Use the `cache` structure to enable caching of data accessed in the storage +backend. Currently, the only available cache provides fast access to layer +metadata, which uses the `blobdescriptor` field if configured. + +You can set `blobdescriptor` field to `redis` or `inmemory`. If set to `redis`,a +Redis pool caches layer metadata. If set to `inmemory`, an in-memory map caches +layer metadata. + +> **NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these +> are equivalent, `layerinfo` has been deprecated. + +### `redirect` + +The `redirect` subsection provides configuration for managing redirects from +content backends. For backends that support it, redirecting is enabled by +default. In certain deployment scenarios, you may decide to route all data +through the Registry, rather than redirecting to the backend. This may be more +efficient when using a backend that is not co-located or when a registry +instance is aggressively caching. + +To disable redirects, add a single flag `disable`, set to `true` +under the `redirect` section: + +```none +redirect: + disable: true +``` + +## `auth` + +```none +auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd +``` + +The `auth` option is **optional**. Possible auth providers include: + +- [`silly`](#silly) +- [`token`](#token) +- [`htpasswd`](#token) + +You can configure only one authentication provider. + +### `silly` + +The `silly` authentication provider is only appropriate for development. It simply checks +for the existence of the `Authorization` header in the HTTP request. It does not +check the header's value. If the header does not exist, the `silly` auth +responds with a challenge response, echoing back the realm, service, and scope +for which access was denied. + +The following values are used to configure the response: + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `realm` | yes | The realm in which the registry server authenticates. | +| `service` | yes | The service being authenticated. | + +### `token` + +Token-based authentication allows you to decouple the authentication system from +the registry. It is an established authentication paradigm with a high degree of +security. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `realm` | yes | The realm in which the registry server authenticates. | +| `service` | yes | The service being authenticated. | +| `issuer` | yes | The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer. | +| `rootcertbundle` | yes | The absolute path to the root certificate bundle. This bundle contains the public part of the certificates used to sign authentication tokens. | + + +For more information about Token based authentication configuration, see the +[specification](spec/auth/token.md). + +### `htpasswd` + +The _htpasswd_ authentication backed allows you to configure basic +authentication using an +[Apache htpasswd file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). +The only supported password format is +[`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt). Entries with other hash types +are ignored. The `htpasswd` file is loaded once, at startup. If the file is +invalid, the registry will display an error and will not start. + +> **Warning**: Only use the `htpasswd` authentication scheme with TLS +> configured, since basic authentication sends passwords as part of the HTTP +> header. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `realm` | yes | The realm in which the registry server authenticates. | +| `path` | yes | The path to the `htpasswd` file to load at startup. | + +## `middleware` + +The `middleware` structure is **optional**. Use this option to inject middleware at +named hook points. Each middleware must implement the same interface as the +object it is wrapping. For instance, a registry middleware must implement the +`distribution.Namespace` interface, while a repository middleware must implement +`distribution.Repository`, and a storage middleware must implement +`driver.StorageDriver`. + +This is an example configuration of the `cloudfront` middleware, a storage +middleware: + +```none +middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s +``` + +Each middleware entry has `name` and `options` entries. The `name` must +correspond to the name under which the middleware registers itself. The +`options` field is a map that details custom configuration required to +initialize the middleware. It is treated as a `map[string]interface{}`. As such, +it supports any interesting structures desired, leaving it up to the middleware +initialization function to best determine how to handle the specific +interpretation of the options. + +### `cloudfront` + + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `baseurl` | yes | The `SCHEME://HOST[/PATH]` at which Cloudfront is served. | +| `privatekey` | yes | The private key for Cloudfront, provided by AWS. | +| `keypairid` | yes | The key pair ID provided by AWS. | +| `duration` | no | An integer and unit for the duration of the Cloudfront session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`. For example, `3000s` is valid, but `3000 s` is not. If you do not specify a `duration` or you specify an integer without a time unit, the duration defaults to `20m` (20 minutes).| + +### `redirect` + +You can use the `redirect` storage middleware to specify a custom URL to a +location of a proxy for the layer stored by the S3 storage driver. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------------------------------------------------------------| +| `baseurl` | yes | `SCHEME://HOST` at which layers are served. Can also contain port. For example, `https://example.com:5443`. | + +## `reporting` + +``` +reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true +``` + +The `reporting` option is **optional** and configures error and metrics +reporting tools. At the moment only two services are supported: + +- [Bugsnag](#bugsnag) +- [New Relic](#new-relic) + +A valid configuration may contain both. + +### `bugsnag` + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `apikey` | yes | The API Key provided by Bugsnag. | +| `releasestage` | no | Tracks where the registry is deployed, using a string like `production`, `staging`, or `development`.| +| `endpoint`| no | The enterprise Bugsnag endpoint. | + +### `newrelic` + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `licensekey` | yes | License key provided by New Relic. | +| `name` | no | New Relic application name. | +| `verbose`| no | Set to `true` to enable New Relic debugging output on `stdout`. | + +## `http` + +```none +http: + addr: localhost:5000 + net: tcp + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + http2: + disabled: false +``` + +The `http` option details the configuration for the HTTP server that hosts the +registry. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `addr` | yes | The address for which the server should accept connections. The form depends on a network type (see the `net` option). Use `HOST:PORT` for TCP and `FILE` for a UNIX socket. | +| `net` | no | The network used to create a listening socket. Known networks are `unix` and `tcp`. | +| `prefix` | no | If the server does not run at the root path, set this to the value of the prefix. The root path is the section before `v2`. It requires both preceding and trailing slashes, such as in the example `/path/`. | +| `host` | no | A fully-qualified URL for an externally-reachable address for the registry. If present, it is used when creating generated URLs. Otherwise, these URLs are derived from client requests. | +| `secret` | no | A random piece of data used to sign state that may be stored with the client to protect against tampering. For production environments you should generate a random piece of data using a cryptographically secure random generator. If you omit the secret, the registry will automatically generate a secret when it starts. **If you are building a cluster of registries behind a load balancer, you MUST ensure the secret is the same for all registries.**| +| `relativeurls`| no | If `true`, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. **This option is not compatible with Docker 1.7 and earlier.**| + + +### `tls` + +The `tls` structure within `http` is **optional**. Use this to configure TLS +for the server. If you already have a web server running on +the same host as the registry, you may prefer to configure TLS on that web server +and proxy connections to the registry server. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `certificate` | yes | Absolute path to the x509 certificate file. | +| `key` | yes | Absolute path to the x509 private key file. | +| `clientcas` | no | An array of absolute paths to x509 CA files. | + +### `letsencrypt` + +The `letsencrypt` structure within `tls` is **optional**. Use this to configure +TLS certificates provided by +[Let's Encrypt](https://letsencrypt.org/how-it-works/). + +>**NOTE**: When using Let's Encrypt, ensure that the outward-facing address is +> accessible on port `443`. The registry defaults to listening on port `5000`. +> If you run the registry as a container, consider adding the flag `-p 443:5000` +> to the `docker run` command or using a similar setting in a cloud +> configuration. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `cachefile` | yes | Absolute path to a file where the Let's Encrypt agent can cache data. | +| `email` | yes | The email address used to register with Let's Encrypt. | + +### `debug` + +The `debug` option is **optional** . Use it to configure a debug server that +can be helpful in diagnosing problems. The debug endpoint can be used for +monitoring registry metrics and health, as well as profiling. Sensitive +information may be available via the debug endpoint. Please be certain that +access to the debug endpoint is locked down in a production environment. + +The `debug` section takes a single required `addr` parameter, which specifies +the `HOST:PORT` on which the debug server should accept connections. + +### `headers` + +The `headers` option is **optional** . Use it to specify headers that the HTTP +server should include in responses. This can be used for security headers such +as `Strict-Transport-Security`. + +The `headers` option should contain an option for each header to include, where +the parameter name is the header's name, and the parameter value a list of the +header's payload values. + +Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers +will not interpret content as HTML if they are directed to load a page from the +registry. This header is included in the example configuration file. + +### `http2` + +The `http2` structure within `http` is **optional**. Use this to control http2 +settings for the registry. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `disabled` | no | If `true`, then `http2` support is disabled. | + +## `notifications` + +```none +notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + ignoredmediatypes: + - application/octet-stream +``` + +The notifications option is **optional** and currently may contain a single +option, `endpoints`. + +### `endpoints` + +The `endpoints` structure contains a list of named services (URLs) that can +accept event notifications. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `name` | yes | A human-readable name for the service. | +| `disabled` | no | If `true`, notifications are disabled for the service.| +| `url` | yes | The URL to which events should be published. | +| `headers` | yes | A list of static headers to add to each request. Each header's name is a key beneath `headers`, and each value is a list of payloads for that header name. Values must always be lists. | +| `timeout` | yes | A value for the HTTP timeout. A positive integer and an optional suffix indicating the unit of time, which may be `ns`, `us`, `ms`, `s`, `m`, or `h`. If you omit the unit of time, `ns` is used. | +| `threshold` | yes | An integer specifying how long to wait before backing off a failure. | +| `backoff` | yes | How long the system backs off before retrying after a failure. A positive integer and an optional suffix indicating the unit of time, which may be `ns`, `us`, `ms`, `s`, `m`, or `h`. If you omit the unit of time, `ns` is used. | +| `ignoredmediatypes`|no| A list of target media types to ignore. Events with these target media types are not published to the endpoint. | + +## `redis` + +```none +redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s +``` + +Declare parameters for constructing the `redis` connections. Registry instances +may use the Redis instance for several applications. Currently, it caches +information about immutable blobs. Most of the `redis` options control +how the registry connects to the `redis` instance. You can control the pool's +behavior with the [pool](#pool) subsection. + +You should configure Redis with the **allkeys-lru** eviction policy, because the +registry does not set an expiration value on keys. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `addr` | yes | The address (host and port) of the Redis instance. | +| `password`| no | A password used to authenticate to the Redis instance.| +| `db` | no | The name of the database to use for each connection. | +| `dialtimeout` | no | The timeout for connecting to the Redis instance. | +| `readtimeout` | no | The timeout for reading from the Redis instance. | +| `writetimeout` | no | The timeout for writing to the Redis instance. | + +### `pool` + +```none +pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s +``` + +Use these settings to configure the behavior of the Redis connection pool. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `maxidle` | no | The maximum number of idle connections in the pool. | +| `maxactive`| no | The maximum number of connections which can be open before blocking a connection request. | +| `idletimeout`| no | How long to wait before closing inactive connections. | + +## `health` + +```none +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 +``` + +The health option is **optional**, and contains preferences for a periodic +health check on the storage driver's backend storage, as well as optional +periodic checks on local files, HTTP URIs, and/or TCP servers. The results of +the health checks are available at the `/debug/health` endpoint on the debug +HTTP server if the debug HTTP server is enabled (see http section). + +### `storagedriver` + +The `storagedriver` structure contains options for a health check on the +configured storage driver's backend storage. The health check is only active +when `enabled` is set to `true`. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `enabled` | yes | Set to `true` to enable storage driver health checks or `false` to disable them. | +| `interval`| no | How long to wait between repetitions of the storage driver health check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `threshold`| no | A positive integer which represents the number of times the check must fail before the state is marked as unhealthy. If not specified, a single failure marks the state as unhealthy. | + +### `file` + +The `file` structure includes a list of paths to be periodically checked for the\ +existence of a file. If a file exists at the given path, the health check will +fail. You can use this mechanism to bring a registry out of rotation by creating +a file. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `file` | yes | The path to check for existence of a file. | +| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. | + +### `http` + +The `http` structure includes a list of HTTP URIs to periodically check with +`HEAD` requests. If a `HEAD` request does not complete or returns an unexpected +status code, the health check will fail. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `uri` | yes | The URI to check. | +| `headers` | no | Static headers to add to each request. Each header's name is a key beneath `headers`, and each value is a list of payloads for that header name. Values must always be lists. | +| `statuscode` | no | The expected status code from the HTTP URI. Defaults to `200`. | +| `timeout` | no | How long to wait before timing out the HTTP request. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `threshold`| no | The number of times the check must fail before the state is marked as unhealthy. If this field is not specified, a single failure marks the state as unhealthy. | + +### `tcp` + +The `tcp` structure includes a list of TCP addresses to periodically check using +TCP connection attempts. Addresses must include port numbers. If a connection +attempt fails, the health check will fail. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `addr` | yes | The TCP address and port to connect to. | +| `timeout` | no | How long to wait before timing out the TCP connection. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `interval`| no | How long to wait between repetitions of the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `threshold`| no | The number of times the check must fail before the state is marked as unhealthy. If this field is not specified, a single failure marks the state as unhealthy. | + + +## `proxy` + +``` +proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] +``` + +The `proxy` structure allows a registry to be configured as a pull-through cache +to Docker Hub. See +[mirror](https://github.com/docker/docker.github.io/tree/master/registry/recipes/mirror.md) +for more information. Pushing to a registry configured as a pull-through cache +is unsupported. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `remoteurl`| yes | The URL for the repository on Docker Hub. | +| `username` | no | The username registered with Docker Hub which has access to the repository. | +| `password` | no | The password used to authenticate to Docker Hub using the username specified in `username`. | + + +To enable pulling private repositories (e.g. `batman/robin`) specify the +username (such as `batman`) and the password for that username. + +> **Note**: These private repositories are stored in the proxy cache's storage. +> Take appropriate measures to protect access to the proxy cache. + +## `compatibility` + +```none +compatibility: + schema1: + signingkeyfile: /etc/registry/key.json +``` + +Use the `compatibility` structure to configure handling of older and deprecated +features. Each subsection defines such a feature with configurable behavior. + +### `schema1` + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `signingkeyfile` | no | The signing private key used to add signatures to `schema1` manifests. If no signing key is provided, a new ECDSA key is generated when the registry starts. | + +## `validation` + +```none +validation: + enabled: true + manifests: + urls: + allow: + - ^https?://([^/]+\.)*example\.com/ + deny: + - ^https?://www\.example\.com/ +``` + +### `enabled` + +Use the `enabled` flag to enable the other options in the `validation` +section. They are disabled by default. + +### `manifests` + +Use the `manifest` subsection to configure manifest validation. + +#### `urls` + +The `allow` and `deny` options are each a list of +[regular expressions](https://godoc.org/regexp/syntax) that restrict the URLs in +pushed manifests. + +If `allow` is unset, pushing a manifest containing URLs fails. + +If `allow` is set, pushing a manifest succeeds only if all URLs match +one of the `allow` regular expressions **and** one of the following holds: + +1. `deny` is unset. +2. `deny` is set but no URLs within the manifest match any of the `deny` regular + expressions. + +## Example: Development configuration + +You can use this simple example for local development: + +```none +version: 0.1 +log: + level: debug +storage: + filesystem: + rootdirectory: /var/lib/registry +http: + addr: localhost:5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 +``` + +This example configures the registry instance to run on port `5000`, binding to +`localhost`, with the `debug` server enabled. Registry data is stored in the +`/var/lib/registry` directory. Logging is set to `debug` mode, which is the most +verbose. + +See +[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml) +for another simple configuration. Both examples are generally useful for local +development. + + +## Example: Middleware configuration + +This example configures [Amazon Cloudfront](http://aws.amazon.com/cloudfront/) +as the storage middleware in a registry. Middleware allows the registry to serve +layers via a content delivery network (CDN). This reduces requests to the +storage layer. + +Cloudfront requires the S3 storage driver. + +This is the configuration expressed in YAML: + +```none +middleware: + storage: + - name: cloudfront + disabled: false + options: + baseurl: http://d111111abcdef8.cloudfront.net + privatekey: /path/to/asecret.pem + keypairid: asecret + duration: 60 +``` + +See the configuration reference for [Cloudfront](#cloudfront) for more +information about configuration options. + +> **Note**: Cloudfront keys exist separately from other AWS keys. See +> [the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +> for more information. diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md b/vendor/github.com/docker/distribution/docs/spec/api.md new file mode 100644 index 0000000..74ac8e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/api.md @@ -0,0 +1,5485 @@ +--- +title: "HTTP API V2" +description: "Specification for the Registry API." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] +--- + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification is a set of changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). +The new, self-contained image manifest simplifies image definition and improves +security. This specification will build on that work, leveraging new properties +of the manifest format to improve performance, reduce bandwidth usage and +decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occurred. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
l
+
+
    +
  • Document TOOMANYREQUESTS error code.
  • +
+
+ +
k
+
+
    +
  • Document use of Accept and Content-Type headers in manifests endpoint.
  • +
+
+ +
j
+
+
    +
  • Add ability to mount blobs across repositories.
  • +
+
+ +
i
+
+
    +
  • Clarified expected behavior response to manifest HEAD request.
  • +
+
+ +
h
+
+
    +
  • All mention of tarsum removed.
  • +
+
+ +
g
+
+
    +
  • Clarify behavior of pagination behavior with unspecified parameters.
  • +
+
+ +
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less than + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the [_Errors_](#errors-2) +section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a +`Docker-Content-Digest` header. This will include the digest of the target +entity returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header +> `Docker-Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +The client should include an Accept header indicating which manifest content +types it supports. For more details on the manifest formats and their content +types, see [manifest-v2-1.md](manifest-v2-1.md) and +[manifest-v2-2.md](manifest-v2-2.md). In a successful response, the Content-Type +header will indicate which manifest type is being returned. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see +[docker/docker#8093](https://github.com/docker/docker/issues/8093) for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +[_Completed Upload_](#completed-upload) section for details on the parameters +and expected responses. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, an HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + Content-Type: + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those +specified in the URL. The `reference` field may be a "tag" or a "digest". The +content type should match the type of the manifest being uploaded, as specified +in [manifest-v2-1.md](manifest-v2-1.md) and [manifest-v2-2.md](manifest-v2-2.md). + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the +[_PUT Manifest_](#put-manifest) section for details on possible error codes that +may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the [_Pagination_](#pagination) +section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been received. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with the above value from the `Link` +header, receiving the values _c_ and _d_. Note that `n` may change on the second +to last response or be fully omitted, depending on the server implementation. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +> **Note** When deleting a manifest from a registry version 2.3 or later, the +> following header must be used when `HEAD` or `GET`-ing the manifest to obtain +> the correct digest to delete: + + Accept: application/vnd.docker.distribution.manifest.v2+json + +> for more details, see: [compatibility.md](../compatibility.md#content-addressable-storage-cas) + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | +| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | +| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | +| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | +| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | +| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | +| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | +| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | +| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | +| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | +| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | + + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| + `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. + `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. + `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. + `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. + `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. + `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. + `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. + `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. + `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. + `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. + `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. + `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. + `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. + `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. + `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. + + + +### Base + +Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication. + + + +#### GET Base + +Check that the endpoint implements Docker Registry API V2. + + + +``` +GET /v2/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| + + + + +###### On Success: OK + +``` +200 OK +``` + +The API implements V2 protocol and is accessible. + + + + +###### On Failure: Not Found + +``` +404 Not Found +``` + +The registry does not implement the V2 API. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Tags + +Retrieve information about tags. + + + +#### GET Tags + +Fetch the tags under the repository identified by `name`. + + +##### Tags + +``` +GET /v2//tags/list +Host: +Authorization: +``` + +Return all tags for the repository + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Tags Paginated + +``` +GET /v2//tags/list?n=&last= +``` + +Return a portion of the tags for the specified repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`name`|path|Name of the target repository.| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ], +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Manifest + +Create, update, delete and retrieve manifests. + + + +#### GET Manifest + +Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + + +``` +GET /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: OK + +``` +200 OK +Docker-Content-Digest: +Content-Type: + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + +The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The name or reference was invalid. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PUT Manifest + +Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. + + + +``` +PUT /v2//manifests/ +Host: +Authorization: +Content-Type: + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Content-Digest: +``` + +The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location url of the uploaded manifest.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Manifest + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | +| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | +| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +###### On Failure: Missing Layer(s) + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +} +``` + +One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + +#### DELETE Manifest + +Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. + + + +``` +DELETE /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Accepted + +``` +202 Accepted +``` + + + + + + +###### On Failure: Invalid Name or Reference + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` were invalid and the delete was unable to proceed. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +###### On Failure: Unknown Manifest + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + + +### Blob + +Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. + + + +#### GET Blob + +Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + +##### Fetch Blob + +``` +GET /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Docker-Content-Digest: +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob content.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + +###### On Success: Temporary Redirect + +``` +307 Temporary Redirect +Location: +Docker-Content-Digest: +``` + +The blob identified by `digest` is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location where the layer should be accessible.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Fetch Blob Part + +``` +GET /v2//blobs/ +Host: +Authorization: +Range: bytes=- +``` + +This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Range`|header|HTTP Range header specifying blob chunk.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Partial Content + +``` +206 Partial Content +Content-Length: +Content-Range: bytes -/ +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob chunk.| +|`Content-Range`|Content range of blob chunk.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### DELETE Blob + +Delete the blob identified by `name` and `digest` + + + +``` +DELETE /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Docker-Content-Digest: +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|0| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Method Not Allowed + +``` +405 Method Not Allowed +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Initiate Blob Upload + +Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. + + + +#### POST Initiate Blob Upload + +Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. + + +##### Initiate Monolithic Blob Upload + +``` +POST /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octect-stream + + +``` + +Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|| +|`name`|path|Name of the target repository.| +|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been created in the registry and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Initiate Resumable Blob Upload + +``` +POST /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Initiate a resumable blob upload with an empty request body. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Location: /v2//blobs/uploads/ +Range: 0-0 +Docker-Upload-UUID: +``` + +The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Mount Blob + +``` +POST /v2//blobs/uploads/?mount=&from= +Host: +Authorization: +Content-Length: 0 +``` + +Mount a blob identified by the `mount` parameter from another repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`mount`|query|Digest of blob to mount from the source repository.| +|`from`|query|Name of the source repository.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been mounted in the repository and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Blob Upload + +Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. + + + +#### GET Blob Upload + +Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. + + + +``` +GET /v2//blobs/uploads/ +Host: +Authorization: +``` + +Retrieve the progress of the current upload, as reported by the `Range` header. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Progress + +``` +204 No Content +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The upload is known and in progress. The last received offset is available in the `Range` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PATCH Blob Upload + +Upload a chunk of data for the specified upload. + + +##### Stream upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Type: application/octet-stream + + +``` + +Upload a stream of data to upload without completing the upload. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Data Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Chunked upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Range: - +Content-Length: +Content-Type: application/octet-stream + + +``` + +Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Chunk Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PUT Blob Upload + +Complete the upload specified by `uuid`, optionally appending the body as the final chunk. + + + +``` +PUT /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octet-stream + + +``` + +Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| +|`digest`|query|Digest of uploaded blob.| + + + + +###### On Success: Upload Complete + +``` +204 No Content +Location: +Content-Range: - +Content-Length: 0 +Docker-Content-Digest: +``` + +The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location of the blob for retrieval| +|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### DELETE Blob Upload + +Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. + + + +``` +DELETE /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Cancel the upload specified by `uuid`. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Deleted + +``` +204 No Content +Content-Length: 0 +``` + +The upload has been successfully deleted. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +An error was encountered processing the delete. The client may ignore this error. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Catalog + +List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. + + + +#### GET Catalog + +Retrieve a sorted, json list of repositories available in the registry. + + +##### Catalog Fetch + +``` +GET /v2/_catalog +``` + +Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links. + + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] +} +``` + +Returns the unabridged list of repositories as a json response. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +##### Catalog Fetch Paginated + +``` +GET /v2/_catalog?n=&last= +``` + +Return the specified portion of repositories. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +} +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + + diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl b/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl new file mode 100644 index 0000000..406eda5 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl @@ -0,0 +1,1215 @@ +--- +title: "HTTP API V2" +description: "Specification for the Registry API." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] +--- + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification is a set of changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). +The new, self-contained image manifest simplifies image definition and improves +security. This specification will build on that work, leveraging new properties +of the manifest format to improve performance, reduce bandwidth usage and +decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occurred. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
l
+
+
    +
  • Document TOOMANYREQUESTS error code.
  • +
+
+ +
k
+
+
    +
  • Document use of Accept and Content-Type headers in manifests endpoint.
  • +
+
+ +
j
+
+
    +
  • Add ability to mount blobs across repositories.
  • +
+
+ +
i
+
+
    +
  • Clarified expected behavior response to manifest HEAD request.
  • +
+
+ +
h
+
+
    +
  • All mention of tarsum removed.
  • +
+
+ +
g
+
+
    +
  • Clarify behavior of pagination behavior with unspecified parameters.
  • +
+
+ +
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less than + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the [_Errors_](#errors-2) +section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a +`Docker-Content-Digest` header. This will include the digest of the target +entity returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header +> `Docker-Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +The client should include an Accept header indicating which manifest content +types it supports. For more details on the manifest formats and their content +types, see [manifest-v2-1.md](manifest-v2-1.md) and +[manifest-v2-2.md](manifest-v2-2.md). In a successful response, the Content-Type +header will indicate which manifest type is being returned. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see +[docker/docker#8093](https://github.com/docker/docker/issues/8093) for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +[_Completed Upload_](#completed-upload) section for details on the parameters +and expected responses. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, an HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + Content-Type: + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those +specified in the URL. The `reference` field may be a "tag" or a "digest". The +content type should match the type of the manifest being uploaded, as specified +in [manifest-v2-1.md](manifest-v2-1.md) and [manifest-v2-2.md](manifest-v2-2.md). + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the +[_PUT Manifest_](#put-manifest) section for details on possible error codes that +may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the [_Pagination_](#pagination) +section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been received. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with the above value from the `Link` +header, receiving the values _c_ and _d_. Note that `n` may change on the second +to last response or be fully omitted, depending on the server implementation. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +> **Note** When deleting a manifest from a registry version 2.3 or later, the +> following header must be used when `HEAD` or `GET`-ing the manifest to obtain +> the correct digest to delete: + + Accept: application/vnd.docker.distribution.manifest.v2+json + +> for more details, see: [compatibility.md](../compatibility.md#content-addressable-storage-cas) + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | +{{end}}{{end}} + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} +{{end}} + +{{range $route := .RouteDescriptors}} +### {{.Entity}} + +{{.Description}} + +{{range $method := $route.Methods}} + +#### {{.Method}} {{$route.Entity}} + +{{.Description}} + +{{if .Requests}}{{range .Requests}}{{if .Name}} +##### {{.Name}}{{end}} + +``` +{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} + +{{if or .Headers .PathParameters .QueryParameters}} +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| +{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| +{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| +{{end}}{{end}} + +{{if .Successes}} +{{range .Successes}} +###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Fields}}The following fields may be returned in the response body: + +|Name|Description| +|----|-----------| +{{range .Fields}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{if .Headers}} +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{end}}{{end}} + +{{if .Failures}} +{{range .Failures}} +###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Headers}} +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}} + +{{if .ErrorCodes}} +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorCodes}}| `{{$err.Descriptor.Value}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | +{{end}} + +{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} + +{{end}} diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/index.md b/vendor/github.com/docker/distribution/docs/spec/auth/index.md new file mode 100644 index 0000000..d1aa942 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/index.md @@ -0,0 +1,12 @@ +--- +title: "Docker Registry Token Authentication" +description: "Docker Registry v2 authentication schema" +keywords: ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"] +--- + +# Docker Registry v2 authentication + +See the [Token Authentication Specification](token.md), +[Token Authentication Implementation](jwt.md), +[Token Scope Documentation](scope.md), +[OAuth2 Token Authentication](oauth.md) for more information. diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md b/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md new file mode 100644 index 0000000..aa9941b --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md @@ -0,0 +1,329 @@ +--- +title: "Token Authentication Implementation" +description: "Describe the reference implementation of the Docker Registry v2 authentication schema" +keywords: ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"] +--- + +# Docker Registry v2 Bearer token specification + +This specification covers the `docker/distribution` implementation of the +v2 Registry's authentication schema. Specifically, it describes the JSON +Web Token schema that `docker/distribution` has adopted to implement the +client-opaque Bearer token issued by an authentication service and +understood by the registry. + +This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) + +## Getting a Bearer Token + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. As of Docker 1.8, the +registry client in the Docker Engine only supports Basic Authentication to +these token servers. If an attempt to authenticate to the token server fails, +the token server should return a `401 Unauthorized` response indicating that +the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server will now construct a JSON Web Token to sign and return. A JSON Web +Token has 3 main parts: + +1. Headers + + The header of a JSON Web Token is a standard JOSE header. The "typ" field + will be "JWT" and it will also contain the "alg" which identifies the + signing algorithm used to produce the signature. It also must have a "kid" + field, representing the ID of the key which was used to sign the token. + + The "kid" field has to be in a libtrust fingerprint compatible format. + Such a format can be generated by following steps: + + 1. Take the DER encoded public key which the JWT token was signed against. + + 2. Create a SHA256 hash out of it and truncate to 240bits. + + 3. Split the result into 12 base32 encoded groups with `:` as delimiter. + + Here is an example JOSE Header for a JSON Web Token (formatted with + whitespace for readability): + + ``` + { + "typ": "JWT", + "alg": "ES256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" + } + ``` + + It specifies that this object is going to be a JSON Web token signed using + the key with the given ID using the Elliptic Curve signature algorithm + using a SHA256 hash. + +2. Claim Set + + The Claim Set is a JSON struct containing these standard registered claim + name fields: + +
+
+ iss (Issuer) +
+
+ The issuer of the token, typically the fqdn of the authorization + server. +
+
+ sub (Subject) +
+
+ The subject of the token; the name or id of the client which + requested it. This should be empty (`""`) if the client did not + authenticate. +
+
+ aud (Audience) +
+
+ The intended audience of the token; the name or id of the service + which will verify the token to authorize the client/subject. +
+
+ exp (Expiration) +
+
+ The token should only be considered valid up to this specified date + and time. +
+
+ nbf (Not Before) +
+
+ The token should not be considered valid before this specified date + and time. +
+
+ iat (Issued At) +
+
+ Specifies the date and time which the Authorization server + generated this token. +
+
+ jti (JWT ID) +
+
+ A unique identifier for this token. Can be used by the intended + audience to prevent replays of the token. +
+
+ + The Claim Set will also contain a private claim name unique to this + authorization server specification: + +
+
+ access +
+
+ An array of access entry objects with the following fields: + +
+
+ type +
+
+ The type of resource hosted by the service. +
+
+ name +
+
+ The name of the resource of the given type hosted by the + service. +
+
+ actions +
+
+ An array of strings which give the actions authorized on + this resource. +
+
+
+
+ + Here is an example of such a JWT Claim Set (formatted with whitespace for + readability): + + ``` + { + "iss": "auth.docker.com", + "sub": "jlhawn", + "aud": "registry.docker.com", + "exp": 1415387315, + "nbf": 1415387015, + "iat": 1415387015, + "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", + "access": [ + { + "type": "repository", + "name": "samalba/my-app", + "actions": [ + "pull", + "push" + ] + } + ] + } + ``` + +3. Signature + + The authorization server will produce a JOSE header and Claim Set with no + extraneous whitespace, i.e., the JOSE Header from above would be + + ``` + {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} + ``` + + and the Claim Set from above would be + + ``` + {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]} + ``` + + The utf-8 representation of this JOSE header and Claim Set are then + url-safe base64 encoded (sans trailing '=' buffer), producing: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 + ``` + + for the JOSE Header and + + ``` + eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + for the Claim Set. These two are concatenated using a '.' character, + yielding the string: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + This is then used as the payload to a the `ES256` signature algorithm + specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) + draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) + + This example signature will use the following ECDSA key for the server: + + ``` + { + "kty": "EC", + "crv": "P-256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", + "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", + "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", + "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" + } + ``` + + A resulting signature of the above payload using this key is: + + ``` + QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + + Concatenating all of these together with a `.` character gives the + resulting JWT: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + +This can now be placed in an HTTP response and returned to the client to use to +authenticate to the audience service: + + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} +``` + +## Using the signed token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) + +## Verifying the token + +The registry must now verify the token presented by the user by inspecting the +claim set within. The registry will: + +- Ensure that the issuer (`iss` claim) is an authority it trusts. +- Ensure that the registry identifies as the audience (`aud` claim). +- Check that the current time is between the `nbf` and `exp` claim times. +- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has + not been seen before. + - To enforce this, the registry may keep a record of `jti`s it has seen for + up to the `exp` time of the token to prevent token replays. +- Check the `access` claim value and use the identified resources and the list + of actions authorized to determine whether the token grants the required + level of access for the operation the client is attempting to perform. +- Verify that the signature of the token is valid. + +If any of these requirements are not met, the registry will return a +`403 Forbidden` response to indicate that the token is invalid. + +**Note**: it is only at this point in the workflow that an authorization error +may occur. The token server should *not* return errors when the user does not +have the requested authorization. Instead, the returned token should indicate +whatever of the requested scope the client does have (the intersection of +requested and granted access). If the token does not supply proper +authorization then the registry will return the appropriate error. + +At no point in this process should the registry need to call back to the +authorization server. The registry only needs to be supplied with the trusted +public keys to verify the token signatures. diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md b/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md new file mode 100644 index 0000000..d946da8 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md @@ -0,0 +1,190 @@ +--- +title: "Oauth2 Token Authentication" +description: "Specifies the Docker Registry v2 authentication" +keywords: ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"] +--- + +# Docker Registry v2 authentication using OAuth2 + +This document describes support for the OAuth2 protocol within the authorization +server. [RFC6749](https://tools.ietf.org/html/rfc6749) should be used as a +reference for the protocol and HTTP endpoints described here. + +**Note**: Not all token servers implement oauth2. If the request to the endpoint +returns `404` using the HTTP `POST` method, refer to +[Token Documentation](token.md) for using the HTTP `GET` method supported by all +token servers. + +## Refresh token format + +The format of the refresh token is completely opaque to the client and should be +determined by the authorization server. The authorization should ensure the +token is sufficiently long and is responsible for storing any information about +long-lived tokens which may be needed for revoking. Any information stored +inside the token will not be extracted and presented by clients. + +## Getting a token + +POST /token + +#### Headers +Content-Type: application/x-www-form-urlencoded + +#### Post parameters + +
+
+ grant_type +
+
+ (REQUIRED) Type of grant used to get token. When getting a refresh token + using credentials this type should be set to "password" and have the + accompanying username and password paramters. Type "authorization_code" + is reserved for future use for authenticating to an authorization server + without having to send credentials directly from the client. When + requesting an access token with a refresh token this should be set to + "refresh_token". +
+
+ service +
+
+ (REQUIRED) The name of the service which hosts the resource to get + access for. Refresh tokens will only be good for getting tokens for + this service. +
+
+ client_id +
+
+ (REQUIRED) String identifying the client. This client_id does not need + to be registered with the authorization server but should be set to a + meaningful value in order to allow auditing keys created by unregistered + clients. Accepted syntax is defined in + [RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1) +
+
+ access_type +
+
+ (OPTIONAL) Access which is being requested. If "offline" is provided + then a refresh token will be returned. The default is "online" only + returning short lived access token. If the grant type is "refresh_token" + this will only return the same refresh token and not a new one. +
+
+ scope +
+
+ (OPTIONAL) The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should only be specified once but may + contain multiple scopes using the scope list format defined in the scope + grammar. If multiple scope is provided from + WWW-Authenticate header the scopes should first be + converted to a scope list before requesting the token. The above example + would be specified as: scope=repository:samalba/my-app:push. + When requesting a refresh token the scopes may be empty since the + refresh token will not be limited by this scope, only the provided short + lived access token will have the scope limitation. +
+
+ refresh_token +
+
+ (OPTIONAL) The refresh token to use for authentication when grant type "refresh_token" is used. +
+
+ username +
+
+ (OPTIONAL) The username to use for authentication when grant type "password" is used. +
+
+ password +
+
+ (OPTIONAL) The password to use for authentication when grant type "password" is used. +
+
+ +#### Response fields + +
+
+ access_token +
+
+ (REQUIRED) An opaque Bearer token that clients should + supply to subsequent requests in the Authorization header. + This token should not be attempted to be parsed or understood by the + client but treated as opaque string. +
+
+ scope +
+
+ (REQUIRED) The scope granted inside the access token. This may be the + same scope as requested or a subset. This requirement is stronger than + specified in [RFC6749 Section 4.2.2](https://tools.ietf.org/html/rfc6749#section-4.2.2) + by strictly requiring the scope in the return value. +
+
+ expires_in +
+
+ (REQUIRED) The duration in seconds since the token was issued that it + will remain valid. When omitted, this defaults to 60 seconds. For + compatibility with older clients, a token should never be returned with + less than 60 seconds to live. +
+
+ issued_at +
+
+ (Optional) The RFC3339-serialized UTC + standard time at which a given token was issued. If issued_at is omitted, the + expiration is from when the token exchange completed. +
+
+ refresh_token +
+
+ (Optional) Token which can be used to get additional access tokens for + the same subject with different scopes. This token should be kept secure + by the client and only sent to the authorization server which issues + bearer tokens. This field will only be set when `access_type=offline` is + provided in the request. +
+
+ + +#### Example getting refresh token + +``` +POST /token HTTP/1.1 +Host: auth.docker.io +Content-Type: application/x-www-form-urlencoded + +grant_type=password&username=johndoe&password=A3ddj3w&service=hub.docker.io&client_id=dockerengine&access_type=offline + +HTTP/1.1 200 OK +Content-Type: application/json + +{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":900,"scope":""} +``` + +#### Example refreshing an Access Token + +``` +POST /token HTTP/1.1 +Host: auth.docker.io +Content-Type: application/x-www-form-urlencoded + +grant_type=refresh_token&refresh_token=kas9Da81Dfa8&service=registry-1.docker.io&client_id=dockerengine&scope=repository:samalba/my-app:pull,push + +HTTP/1.1 200 OK +Content-Type: application/json + +{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"} +``` diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/scope.md b/vendor/github.com/docker/distribution/docs/spec/auth/scope.md new file mode 100644 index 0000000..6ef61ed --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/scope.md @@ -0,0 +1,148 @@ +--- +title: "Token Scope Documentation" +description: "Describes the scope and access fields used for registry authorization tokens" +keywords: ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"] +--- + +# Docker Registry Token Scope and Access + +Tokens used by the registry are always restricted what resources they may +be used to access, where those resources may be accessed, and what actions +may be done on those resources. Tokens always have the context of a user which +the token was originally created for. This document describes how these +restrictions are represented and enforced by the authorization server and +resource providers. + +## Scope Components + +### Subject (Authenticated User) + +The subject represents the user for which a token is valid. Any actions +performed using an access token should be considered on behalf of the subject. +This is included in the `sub` field of access token JWT. A refresh token should +be limited to a single subject and only be able to give out access tokens for +that subject. + +### Audience (Resource Provider) + +The audience represents a resource provider which is intended to be able to +perform the actions specified in the access token. Any resource provider which +does not match the audience should not use that access token. The audience is +included in the `aud` field of the access token JWT. A refresh token should be +limited to a single audience and only be able to give out access tokens for that +audience. + +### Resource Type + +The resource type represents the type of resource which the resource name is +intended to represent. This type may be specific to a resource provider but must +be understood by the authorization server in order to validate the subject +is authorized for a specific resource. + +#### Resource Class + +The resource type might have a resource class which further classifies the +the resource name within the resource type. A class is not required and +is specific to the resource type. + +#### Example Resource Types + + - `repository` - represents a single repository within a registry. A +repository may represent many manifest or content blobs, but the resource type +is considered the collections of those items. Actions which may be performed on +a `repository` are `pull` for accessing the collection and `push` for adding to +it. By default the `repository` type has the class of `image`. + - `repository(plugin)` - represents a single repository of plugins within a +registry. A plugin repository has the same content and actions as a repository. + - `registry` - represents the entire registry. Used for administrative actions +or lookup operations that span an entire registry. + +### Resource Name + +The resource name represent the name which identifies a resource for a resource +provider. A resource is identified by this name and the provided resource type. +An example of a resource name would be the name component of an image tag, such +as "samalba/myapp" or "hostname/samalba/myapp". + +### Resource Actions + +The resource actions define the actions which the access token allows to be +performed on the identified resource. These actions are type specific but will +normally have actions identifying read and write access on the resource. Example +for the `repository` type are `pull` for read access and `push` for write +access. + +## Authorization Server Use + +Each access token request may include a scope and an audience. The subject is +always derived from the passed in credentials or refresh token. When using +a refresh token the passed in audience must match the audience defined for +the refresh token. The audience (resource provider) is provided using the +`service` field. Multiple resource scopes may be provided using multiple `scope` +fields on the `GET` request. The `POST` request only takes in a single +`scope` field but may use a space to separate a list of multiple resource +scopes. + +### Resource Scope Grammar + +``` +scope := resourcescope [ ' ' resourcescope ]* +resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]* +resourcetype := resourcetypevalue [ '(' resourcetypevalue ')' ] +resourcetypevalue := /[a-z0-9]+/ +resourcename := [ hostname '/' ] component [ '/' component ]* +hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +port-number := /[0-9]+/ +action := /[a-z]*/ +component := alpha-numeric [ separator alpha-numeric ]* +alpha-numeric := /[a-z0-9]+/ +separator := /[_.]|__|[-]*/ +``` +Full reference grammar is defined +[here](https://godoc.org/github.com/docker/distribution/reference). Currently +the scope name grammar is a subset of the reference grammar. + +> **NOTE:** that the `resourcename` may contain one `:` due to a possible port +> number in the hostname component of the `resourcename`, so a naive +> implementation that interprets the first three `:`-delimited tokens of a +> `scope` to be the `resourcetype`, `resourcename`, and a list of `action` +> would be insufficient. + +## Resource Provider Use + +Once a resource provider has verified the authenticity of the scope through +JWT access token verification, the resource provider must ensure that scope +satisfies the request. The resource provider should match the given audience +according to name or URI the resource provider uses to identify itself. Any +denial based on subject is not defined here and is up to resource provider, the +subject is mainly provided for audit logs and any other user-specific rules +which may need to be provided but are not defined by the authorization server. + +The resource provider must ensure that ANY resource being accessed as the +result of a request has the appropriate access scope. Both the resource type +and resource name must match the accessed resource and an appropriate action +scope must be included. + +When appropriate authorization is not provided either due to lack of scope +or missing token, the resource provider to return a `WWW-AUTHENTICATE` HTTP +header with the `realm` as the authorization server, the `service` as the +expected audience identifying string, and a `scope` field for each required +resource scope to complete the request. + +## JWT Access Tokens + +Each JWT access token may only have a single subject and audience but multiple +resource scopes. The subject and audience are put into standard JWT fields +`sub` and `aud`. The resource scope is put into the `access` field. The +structure of the access field can be seen in the +[jwt documentation](jwt.md). + +## Refresh Tokens + +A refresh token must be defined for a single subject and audience. Further +restricting scope to specific type, name, and actions combinations should be +done by fetching an access token using the refresh token. Since the refresh +token is not scoped to specific resources for an audience, extra care should +be taken to only use the refresh token to negotiate new access tokens directly +with the authorization server, and never with a resource provider. diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/token.md b/vendor/github.com/docker/distribution/docs/spec/auth/token.md new file mode 100644 index 0000000..f8391bd --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/token.md @@ -0,0 +1,250 @@ +--- +title: "Token Authentication Specification" +description: "Specifies the Docker Registry v2 authentication" +keywords: ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"] +--- + +# Docker Registry v2 authentication via central service + +This document outlines the v2 Docker registry authentication scheme: + +![v2 registry auth](../../images/v2-registry-auth.png) + +1. Attempt to begin a push/pull operation with the registry. +2. If the registry requires authorization it will return a `401 Unauthorized` + HTTP response with information on how to authenticate. +3. The registry client makes a request to the authorization service for a + Bearer token. +4. The authorization service returns an opaque Bearer token representing the + client's authorized access. +5. The client retries the original request with the Bearer token embedded in + the request's Authorization header. +6. The Registry authorizes the client by validating the Bearer token and the + claim set embedded within it and begins the push/pull session as usual. + +## Requirements + +- Registry clients which can understand and respond to token auth challenges + returned by the resource server. +- An authorization server capable of managing access controls to their + resources hosted by any given service (such as repositories in a Docker + Registry). +- A Docker Registry capable of trusting the authorization server to sign tokens + which clients can use for authorization and the ability to verify these + tokens for single use or for use during a sufficiently short period of time. + +## Authorization Server Endpoint Descriptions + +The described server is meant to serve as a standalone access control manager +for resources hosted by other services which wish to authenticate and manage +authorizations using a separate access control manager. + +A service like this is used by the official Docker Registry to authenticate +clients and verify their authorization to Docker image repositories. + +As of Docker 1.6, the registry client within the Docker Engine has been updated +to handle such an authorization workflow. + +## How to authenticate + +Registry V1 clients first contact the index to initiate a push or pull. Under +the Registry V2 workflow, clients should contact the registry first. If the +registry server requires authentication it will return a `401 Unauthorized` +response with a `WWW-Authenticate` header detailing how to authenticate to this +registry. + +For example, say I (username `jlhawn`) am attempting to push an image to the +repository `samalba/my-app`. For the registry to authorize this, I will need +`push` access to the `samalba/my-app` repository. The registry will first +return this response: + +``` +HTTP/1.1 401 Unauthorized +Content-Type: application/json; charset=utf-8 +Docker-Distribution-Api-Version: registry/2.0 +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +Date: Thu, 10 Sep 2015 19:32:31 GMT +Content-Length: 235 +Strict-Transport-Security: max-age=31536000 + +{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} +``` + +Note the HTTP Response Header indicating the auth challenge: + +``` +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +``` + +This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) + +This challenge indicates that the registry requires a token issued by the +specified token server and that the request the client is attempting will +need to include sufficient access entries in its claim set. To respond to this +challenge, the client will need to make a `GET` request to the URL +`https://auth.docker.io/token` using the `service` and `scope` values from the +`WWW-Authenticate` header. + +## Requesting a Token + +Defines getting a bearer and refresh token using the token endpoint. + +#### Query Parameters + +
+
+ service +
+
+ The name of the service which hosts the resource. +
+
+ offline_token +
+
+ Whether to return a refresh token along with the bearer token. A refresh + token is capable of getting additional bearer tokens for the same + subject with different scopes. The refresh token does not have an + expiration and should be considered completely opaque to the client. +
+
+ client_id +
+
+ String identifying the client. This client_id does not need + to be registered with the authorization server but should be set to a + meaningful value in order to allow auditing keys created by unregistered + clients. Accepted syntax is defined in + [RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1). +
+
+ scope +
+
+ The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should be specified multiple times if + there is more than one scope entry from the WWW-Authenticate + header. The above example would be specified as: + scope=repository:samalba/my-app:push. The scope field may + be empty to request a refresh token without providing any resource + permissions to the returned bearer token. +
+
+ + +#### Token Response Fields + +
+
+ token +
+
+ An opaque Bearer token that clients should supply to subsequent + requests in the Authorization header. +
+
+ access_token +
+
+ For compatibility with OAuth 2.0, we will also accept token under the name + access_token. At least one of these fields must be specified, but + both may also appear (for compatibility with older clients). When both are specified, + they should be equivalent; if they differ the client's choice is undefined. +
+
+ expires_in +
+
+ (Optional) The duration in seconds since the token was issued that it + will remain valid. When omitted, this defaults to 60 seconds. For + compatibility with older clients, a token should never be returned with + less than 60 seconds to live. +
+
+ issued_at +
+
+ (Optional) The RFC3339-serialized UTC + standard time at which a given token was issued. If issued_at is omitted, the + expiration is from when the token exchange completed. +
+
+ refresh_token +
+
+ (Optional) Token which can be used to get additional access tokens for + the same subject with different scopes. This token should be kept secure + by the client and only sent to the authorization server which issues + bearer tokens. This field will only be set when `offline_token=true` is + provided in the request. +
+
+ +#### Example + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. From Docker 1.11 the +Docker engine supports both Basic Authentication and [OAuth2](oauth.md) for +getting tokens. Docker 1.10 and before, the registry client in the Docker Engine +only supports Basic Authentication. If an attempt to authenticate to the token +server fails, the token server should return a `401 Unauthorized` response +indicating that the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server then constructs an implementation-specific token with this +intersected set of access, and returns it to the Docker client to use to +authenticate to the audience service (within the indicated window of time): + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": 3600,"issued_at": "2009-11-10T23:00:00Z"} +``` + + +## Using the Bearer token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) diff --git a/vendor/github.com/docker/distribution/docs/spec/implementations.md b/vendor/github.com/docker/distribution/docs/spec/implementations.md new file mode 100644 index 0000000..3474653 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/implementations.md @@ -0,0 +1,30 @@ +--- +published: false +--- + +# Distribution API Implementations + +This is a list of known implementations of the Distribution API spec. + +## [Docker Distribution Registry](https://github.com/docker/distribution) + +Docker distribution is the reference implementation of the distribution API +specification. It aims to fully implement the entire specification. + +### Releases +#### 2.0.1 (_in development_) +Implements API 2.0.1 + +_Known Issues_ + - No resumable push support + - Content ranges ignored + - Blob upload status will always return a starting range of 0 + +#### 2.0.0 +Implements API 2.0.0 + +_Known Issues_ + - No resumable push support + - No PATCH implementation for blob upload + - Content ranges ignored + diff --git a/vendor/github.com/docker/distribution/docs/spec/index.md b/vendor/github.com/docker/distribution/docs/spec/index.md new file mode 100644 index 0000000..952ebab --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/index.md @@ -0,0 +1,12 @@ +--- +title: "Reference Overview" +description: "Explains registry JSON objects" +keywords: ["registry, service, images, repository, json"] +--- + +# Docker Registry Reference + +* [HTTP API V2](api.md) +* [Storage Driver](../storage-drivers/index.md) +* [Token Authentication Specification](auth/token.md) +* [Token Authentication Implementation](auth/jwt.md) diff --git a/vendor/github.com/docker/distribution/docs/spec/json.md b/vendor/github.com/docker/distribution/docs/spec/json.md new file mode 100644 index 0000000..825b17a --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/json.md @@ -0,0 +1,90 @@ +--- +published: false +title: "Docker Distribution JSON Canonicalization" +description: "Explains registry JSON objects" +keywords: ["registry, service, images, repository, json"] +--- + + + +# Docker Distribution JSON Canonicalization + +To provide consistent content hashing of JSON objects throughout Docker +Distribution APIs, the specification defines a canonical JSON format. Adopting +such a canonicalization also aids in caching JSON responses. + +Note that protocols should not be designed to depend on identical JSON being +generated across different versions or clients. The canonicalization rules are +merely useful for caching and consistency. + +## Rules + +Compliant JSON should conform to the following rules: + +1. All generated JSON should comply with [RFC + 7159](http://www.ietf.org/rfc/rfc7159.txt). +2. Resulting "JSON text" shall always be encoded in UTF-8. +3. Unless a canonical key order is defined for a particular schema, object + keys shall always appear in lexically sorted order. +4. All whitespace between tokens should be removed. +5. No "trailing commas" are allowed in object or array definitions. +6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e". + Ampersand "&" is escaped to "\u0026". + +## Examples + +The following is a simple example of a canonicalized JSON string: + +```json +{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} +``` + +## Reference + +### Other Canonicalizations + +The OLPC project specifies [Canonical +JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in +[TUF](http://theupdateframework.com/), which may be used with other +distribution-related protocols, this alternative format has been proposed in +case the original source changes. Specifications complying with either this +specification or an alternative should explicitly call out the +canonicalization format. Except for key ordering, this specification is mostly +compatible. + +### Go + +In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library +will emit canonical JSON by default. Simply using `json.Marshal` will suffice +in most cases: + +```go +incoming := map[string]interface{}{ + "asdf": 1, + "qwer": []interface{}{}, + "zxcv": []interface{}{ + map[string]interface{}{}, + true, + int(1e9), + "tyui", + }, +} + +canonical, err := json.Marshal(incoming) +if err != nil { + // ... handle error +} +``` + +To apply canonical JSON format spacing to an existing serialized JSON buffer, one +can use +[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) +with the following arguments: + +```go +incoming := getBytes() +var canonical bytes.Buffer +if err := json.Indent(&canonical, incoming, "", ""); err != nil { + // ... handle error +} +``` diff --git a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md new file mode 100644 index 0000000..b06c2f2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md @@ -0,0 +1,163 @@ +--- +title: "Image Manifest V 2, Schema 1 " +description: "image manifest for the Registry." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] +--- + +# Image Manifest Version 2, Schema 1 + +This document outlines the format of of the V2 image manifest. The image +manifest described herein was introduced in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). +It is a provisional manifest to provide a compatibility with the [V1 Image +format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the +requirements are defined for the [V2 Schema 2 +image](https://github.com/docker/distribution/pull/62). + + +Image manifests describe the various constituents of a docker image. Image +manifests can be serialized to JSON format with the following media types: + +Manifest Type | Media Type +------------- | ------------- +manifest | "application/vnd.docker.distribution.manifest.v1+json" +signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" + +*Note that "application/json" will also be accepted for schema 1.* + +References: + + - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) + - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) + +## *Manifest* Field Descriptions + +Manifest provides the base accessible fields for working with V2 image format + in the registry. + +- **`name`** *string* + + name is the name of the image's repository + +- **`tag`** *string* + + tag is the tag of the image + +- **`architecture`** *string* + + architecture is the host architecture on which this image is intended to + run. This is for information purposes and not currently used by the engine + +- **`fsLayers`** *array* + + fsLayers is a list of filesystem layer blob sums contained in this image. + + An fsLayer is a struct consisting of the following fields + - **`blobSum`** *digest.Digest* + + blobSum is the digest of the referenced filesystem image layer. A + digest must be a sha256 hash. + + +- **`history`** *array* + + history is a list of unstructured historical data for v1 compatibility. It + contains ID of the image layer and ID of the layer's parent layers. + + history is a struct consisting of the following fields + - **`v1Compatibility`** string + + V1Compatibility is the raw V1 compatibility information. This will + contain the JSON object describing the V1 of this image. + +- **`schemaVersion`** *int* + + SchemaVersion is the image manifest schema that this image follows. + +>**Note**:the length of `history` must be equal to the length of `fsLayers` and +>entries in each are correlated by index. + +## Signed Manifests + +Signed manifests provides an envelope for a signed image manifest. A signed +manifest consists of an image manifest along with an additional field +containing the signature of the manifest. + +The docker client can verify signed manifests and displays a message to the user. + +### Signing Manifests + +Image manifests can be signed in two different ways: with a *libtrust* private + key or an x509 certificate chain. When signing with an x509 certificate chain, + the public key of the first element in the chain must be the public key + corresponding with the sign key. + +### Signed Manifest Field Description + +Signed manifests include an image manifest and a list of signatures generated +by *libtrust*. A signature consists of the following fields: + + +- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* + + A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) + +- **`signature`** *string* + + A signature for the image manifest, signed by a *libtrust* private key + +- **`protected`** *string* + + The signed protected header + +## Example Manifest + +*Example showing the official 'hello-world' image manifest.* + +``` +{ + "name": "hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + ], + "schemaVersion": 1, + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", + "kty": "EC", + "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", + "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" + }, + "alg": "ES256" + }, + "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", + "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" + } + ] +} + +``` diff --git a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md new file mode 100644 index 0000000..4b41bb2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md @@ -0,0 +1,295 @@ +--- +title: "Image Manifest V 2, Schema 2 " +description: "image manifest for the Registry." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] +--- + +# Image Manifest Version 2, Schema 2 + +This document outlines the format of of the V2 image manifest, schema version 2. +The original (and provisional) image manifest for V2 (schema 1), was introduced +in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453) +and is specified in the [schema 1 manifest definition](manifest-v2-1.md) + +This second schema version has two primary goals. The first is to allow +multi-architecture images, through a "fat manifest" which references image +manifests for platform-specific versions of an image. The second is to +move the Docker engine towards content-addressable images, by supporting +an image model where the image's configuration can be hashed to generate +an ID for the image. + +# Media Types + +The following media types are used by the manifest formats described here, and +the resources they reference: + +- `application/vnd.docker.distribution.manifest.v1+json`: schema1 (existing manifest format) +- `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2) +- `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest" +- `application/vnd.docker.container.image.v1+json`: Container config JSON +- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar +- `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`: "Layer", as a gzipped tar that should never be pushed +- `application/vnd.docker.plugin.v1+json`: Plugin config JSON + +## Manifest List + +The manifest list is the "fat manifest" which points to specific image manifests +for one or more platforms. Its use is optional, and relatively few images will +use one of these manifests. A client will distinguish a manifest list from an +image manifest based on the Content-Type returned in the HTTP response. + +## *Manifest List* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses the version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest list. This should be set to + `application/vnd.docker.distribution.manifest.list.v2+json`. + +- **`manifests`** *array* + + The manifests field contains a list of manifests for specific platforms. + + Fields of an object in the manifests list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This will generally be + `application/vnd.docker.image.manifest.v2+json`, but it could also + be `application/vnd.docker.image.manifest.v1+json` if the manifest + list references a legacy schema-1 manifest. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + + - **`platform`** *object* + + The platform object describes the platform which the image in the + manifest runs on. A full list of valid operating system and architecture + values are listed in the [Go language documentation for `$GOOS` and + `$GOARCH`](https://golang.org/doc/install/source#environment) + + - **`architecture`** *string* + + The architecture field specifies the CPU architecture, for example + `amd64` or `ppc64le`. + + - **`os`** *string* + + The os field specifies the operating system, for example + `linux` or `windows`. + + - **`os.version`** *string* + + The optional os.version field specifies the operating system version, + for example `10.0.10586`. + + - **`os.features`** *array* + + The optional os.features field specifies an array of strings, + each listing a required OS feature (for example on Windows + `win32k`). + + - **`variant`** *string* + + The optional variant field specifies a variant of the CPU, for + example `armv6l` to specify a particular CPU variant of the ARM CPU. + + - **`features`** *array* + + The optional features field specifies an array of strings, each + listing a required CPU feature (for example `sse4` or `aes`). + +## Example Manifest List + +*Example showing a simple manifest list pointing to image manifests for two platforms:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7143, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + "platform": { + "architecture": "ppc64le", + "os": "linux", + } + }, + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7682, + "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + } + ] +} +``` + +# Image Manifest + +The image manifest provides a configuration and a set of layers for a container +image. It's the direct replacement for the schema-1 manifest. + +## *Image Manifest* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest. This should be set to + `application/vnd.docker.distribution.manifest.v2+json`. + +- **`config`** *object* + + The config field references a configuration object for a container, by + digest. This configuration item is a JSON blob that the runtime uses + to set up the container. This new schema uses a tweaked version + of this configuration to allow image content-addressability on the + daemon side. + + Fields of a config object are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should generally be + `application/vnd.docker.container.image.v1+json`. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + +- **`layers`** *array* + + The layer list is ordered starting from the base image (opposite order of schema1). + + Fields of an item in the layers list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should + generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`. + Layers of type + `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip` may be + pulled from a remote location but they should never be pushed. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + + - **`urls`** *array* + + Provides a list of URLs from which the content may be fetched. Content + should be verified against the `digest` and `size`. This field is + optional and uncommon. + +## Example Image Manifest + +*Example showing an image manifest:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 73109, + "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" + } + ] +} +``` + +# Backward compatibility + +The registry will continue to accept uploads of manifests in both the old and +new formats. + +When pushing images, clients which support the new manifest format should first +construct a manifest in the new format. If uploading this manifest fails, +presumably because the registry only supports the old format, the client may +fall back to uploading a manifest in the old format. + +When pulling images, clients indicate support for this new version of the +manifest format by sending the +`application/vnd.docker.distribution.manifest.v2+json` and +`application/vnd.docker.distribution.manifest.list.v2+json` media types in an +`Accept` header when making a request to the `manifests` endpoint. Updated +clients should check the `Content-Type` header to see whether the manifest +returned from the endpoint is in the old format, or is an image manifest or +manifest list in the new format. + +If the manifest being requested uses the new format, and the appropriate media +type is not present in an `Accept` header, the registry will assume that the +client cannot handle the manifest as-is, and rewrite it on the fly into the old +format. If the object that would otherwise be returned is a manifest list, the +registry will look up the appropriate manifest for the amd64 platform and +linux OS, rewrite that manifest into the old format if necessary, and return +the result to the client. If no suitable manifest is found in the manifest +list, the registry will return a 404 error. + +One of the challenges in rewriting manifests to the old format is that the old +format involves an image configuration for each layer in the manifest, but the +new format only provides one image configuration. To work around this, the +registry will create synthetic image configurations for all layers except the +top layer. These image configurations will not result in runnable images on +their own, but only serve to fill in the parent chain in a compatible way. +The IDs in these synthetic configurations will be derived from hashes of their +respective blobs. The registry will create these configurations and their IDs +using the same scheme as Docker 1.10 when it creates a legacy manifest to push +to a registry which doesn't support the new format. diff --git a/vendor/github.com/docker/distribution/docs/spec/menu.md b/vendor/github.com/docker/distribution/docs/spec/menu.md new file mode 100644 index 0000000..9237e3c --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/menu.md @@ -0,0 +1,7 @@ +--- +title: "Reference" +description: "Explains registry JSON objects" +keywords: ["registry, service, images, repository, json"] +type: "menu" +identifier: "smn_registry_ref" +--- diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 0000000..c20f281 --- /dev/null +++ b/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,115 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/health/api/api.go b/vendor/github.com/docker/distribution/health/api/api.go new file mode 100644 index 0000000..73fcc45 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/api/api.go @@ -0,0 +1,37 @@ +package api + +import ( + "errors" + "net/http" + + "github.com/docker/distribution/health" +) + +var ( + updater = health.NewStatusUpdater() +) + +// DownHandler registers a manual_http_status that always returns an Error +func DownHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(errors.New("Manual Check")) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// UpHandler registers a manual_http_status that always returns nil +func UpHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(nil) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// init sets up the two endpoints to bring the service up and down +func init() { + health.Register("manual_http_status", updater) + http.HandleFunc("/debug/health/down", DownHandler) + http.HandleFunc("/debug/health/up", UpHandler) +} diff --git a/vendor/github.com/docker/distribution/health/api/api_test.go b/vendor/github.com/docker/distribution/health/api/api_test.go new file mode 100644 index 0000000..ec82154 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/api/api_test.go @@ -0,0 +1,86 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/health" +) + +// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes +// the status code of the response to 503 +// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus +func TestPOSTDownHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 1 { + t.Errorf("DownHandler didn't add an error check.") + } +} + +// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes +// the status code of the response to 200 +func TestPOSTUpHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + UpHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 0 { + t.Errorf("UpHandler didn't remove the error check.") + } +} diff --git a/vendor/github.com/docker/distribution/health/checks/checks.go b/vendor/github.com/docker/distribution/health/checks/checks.go new file mode 100644 index 0000000..e3c3b08 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/checks/checks.go @@ -0,0 +1,62 @@ +package checks + +import ( + "errors" + "net" + "net/http" + "os" + "strconv" + "time" + + "github.com/docker/distribution/health" +) + +// FileChecker checks the existence of a file and returns an error +// if the file exists. +func FileChecker(f string) health.Checker { + return health.CheckFunc(func() error { + if _, err := os.Stat(f); err == nil { + return errors.New("file exists") + } + return nil + }) +} + +// HTTPChecker does a HEAD request and verifies that the HTTP status code +// returned matches statusCode. +func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker { + return health.CheckFunc(func() error { + client := http.Client{ + Timeout: timeout, + } + req, err := http.NewRequest("HEAD", r, nil) + if err != nil { + return errors.New("error creating request: " + r) + } + for headerName, headerValues := range headers { + for _, headerValue := range headerValues { + req.Header.Add(headerName, headerValue) + } + } + response, err := client.Do(req) + if err != nil { + return errors.New("error while checking: " + r) + } + if response.StatusCode != statusCode { + return errors.New("downstream service returned unexpected status: " + strconv.Itoa(response.StatusCode)) + } + return nil + }) +} + +// TCPChecker attempts to open a TCP connection. +func TCPChecker(addr string, timeout time.Duration) health.Checker { + return health.CheckFunc(func() error { + conn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + return errors.New("connection to " + addr + " failed") + } + conn.Close() + return nil + }) +} diff --git a/vendor/github.com/docker/distribution/health/checks/checks_test.go b/vendor/github.com/docker/distribution/health/checks/checks_test.go new file mode 100644 index 0000000..6b6dd14 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/checks/checks_test.go @@ -0,0 +1,25 @@ +package checks + +import ( + "testing" +) + +func TestFileChecker(t *testing.T) { + if err := FileChecker("/tmp").Check(); err == nil { + t.Errorf("/tmp was expected as exists") + } + + if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil { + t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err) + } +} + +func TestHTTPChecker(t *testing.T) { + if err := HTTPChecker("https://www.google.cybertron", 200, 0, nil).Check(); err == nil { + t.Errorf("Google on Cybertron was expected as not exists") + } + + if err := HTTPChecker("https://www.google.pt", 200, 0, nil).Check(); err != nil { + t.Errorf("Google at Portugal was expected as exists, error:%v", err) + } +} diff --git a/vendor/github.com/docker/distribution/health/doc.go b/vendor/github.com/docker/distribution/health/doc.go new file mode 100644 index 0000000..8c106b4 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/doc.go @@ -0,0 +1,130 @@ +// Package health provides a generic health checking framework. +// The health package works expvar style. By importing the package the debug +// server is getting a "/debug/health" endpoint that returns the current +// status of the application. +// If there are no errors, "/debug/health" will return an HTTP 200 status, +// together with an empty JSON reply "{}". If there are any checks +// with errors, the JSON reply will include all the failed checks, and the +// response will be have an HTTP 503 status. +// +// A Check can either be run synchronously, or asynchronously. We recommend +// that most checks are registered as an asynchronous check, so a call to the +// "/debug/health" endpoint always returns immediately. This pattern is +// particularly useful for checks that verify upstream connectivity or +// database status, since they might take a long time to return/timeout. +// +// Installing +// +// To install health, just import it in your application: +// +// import "github.com/docker/distribution/health" +// +// You can also (optionally) import "health/api" that will add two convenience +// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add +// "manual" checks that allow the service to quickly be brought in/out of +// rotation. +// +// import _ "github.com/docker/distribution/registry/health/api" +// +// # curl localhost:5001/debug/health +// {} +// # curl -X POST localhost:5001/debug/health/down +// # curl localhost:5001/debug/health +// {"manual_http_status":"Manual Check"} +// +// After importing these packages to your main application, you can start +// registering checks. +// +// Registering Checks +// +// The recommended way of registering checks is using a periodic Check. +// PeriodicChecks run on a certain schedule and asynchronously update the +// status of the check. This allows CheckStatus to return without blocking +// on an expensive check. +// +// A trivial example of a check that runs every 5 seconds and shuts down our +// server if the current minute is even, could be added as follows: +// +// func currentMinuteEvenCheck() error { +// m := time.Now().Minute() +// if m%2 == 0 { +// return errors.New("Current minute is even!") +// } +// return nil +// } +// +// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) +// +// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to +// implement the exact same check, but add a threshold of failures after which +// the check will be unhealthy. This is particularly useful for flaky Checks, +// ensuring some stability of the service when handling them. +// +// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) +// +// The lowest-level way to interact with the health package is calling +// "Register" directly. Register allows you to pass in an arbitrary string and +// something that implements "Checker" and runs your check. If your method +// returns an error with nil, it is considered a healthy check, otherwise it +// will make the health check endpoint "/debug/health" start returning a 503 +// and list the specific check that failed. +// +// Assuming you wish to register a method called "currentMinuteEvenCheck() +// error" you could do that by doing: +// +// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) +// +// CheckFunc is a convenience type that implements Checker. +// +// Another way of registering a check could be by using an anonymous function +// and the convenience method RegisterFunc. An example that makes the status +// endpoint always return an error: +// +// health.RegisterFunc("my_check", func() error { +// return Errors.new("This is an error!") +// })) +// +// Examples +// +// You could also use the health checker mechanism to ensure your application +// only comes up if certain conditions are met, or to allow the developer to +// take the service out of rotation immediately. An example that checks +// database connectivity and immediately takes the server out of rotation on +// err: +// +// updater = health.NewStatusUpdater() +// health.RegisterFunc("database_check", func() error { +// return updater.Check() +// })) +// +// conn, err := Connect(...) // database call here +// if err != nil { +// updater.Update(errors.New("Error connecting to the database: " + err.Error())) +// } +// +// You can also use the predefined Checkers that come included with the health +// package. First, import the checks: +// +// import "github.com/docker/distribution/health/checks +// +// After that you can make use of any of the provided checks. An example of +// using a `FileChecker` to take the application out of rotation if a certain +// file exists can be done as follows: +// +// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) +// +// After registering the check, it is trivial to take an application out of +// rotation from the console: +// +// # curl localhost:5001/debug/health +// {} +// # touch /tmp/disable +// # curl localhost:5001/debug/health +// {"fileChecker":"file exists"} +// +// You could also test the connectivity to a downstream service by using a +// "HTTPChecker", but ensure that you only mark the test unhealthy if there +// are a minimum of two failures in a row: +// +// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) +package health diff --git a/vendor/github.com/docker/distribution/health/health.go b/vendor/github.com/docker/distribution/health/health.go new file mode 100644 index 0000000..220282d --- /dev/null +++ b/vendor/github.com/docker/distribution/health/health.go @@ -0,0 +1,306 @@ +package health + +import ( + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) + +// A Registry is a collection of checks. Most applications will use the global +// registry defined in DefaultRegistry. However, unit tests may need to create +// separate registries to isolate themselves from other tests. +type Registry struct { + mu sync.RWMutex + registeredChecks map[string]Checker +} + +// NewRegistry creates a new registry. This isn't necessary for normal use of +// the package, but may be useful for unit tests so individual tests have their +// own set of checks. +func NewRegistry() *Registry { + return &Registry{ + registeredChecks: make(map[string]Checker), + } +} + +// DefaultRegistry is the default registry where checks are registered. It is +// the registry used by the HTTP handler. +var DefaultRegistry *Registry + +// Checker is the interface for a Health Checker +type Checker interface { + // Check returns nil if the service is okay. + Check() error +} + +// CheckFunc is a convenience type to create functions that implement +// the Checker interface +type CheckFunc func() error + +// Check Implements the Checker interface to allow for any func() error method +// to be passed as a Checker +func (cf CheckFunc) Check() error { + return cf() +} + +// Updater implements a health check that is explicitly set. +type Updater interface { + Checker + + // Update updates the current status of the health check. + Update(status error) +} + +// updater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type updater struct { + mu sync.Mutex + status error +} + +// Check implements the Checker interface +func (u *updater) Check() error { + u.mu.Lock() + defer u.mu.Unlock() + + return u.status +} + +// Update implements the Updater interface, allowing asynchronous access to +// the status of a Checker. +func (u *updater) Update(status error) { + u.mu.Lock() + defer u.mu.Unlock() + + u.status = status +} + +// NewStatusUpdater returns a new updater +func NewStatusUpdater() Updater { + return &updater{} +} + +// thresholdUpdater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type thresholdUpdater struct { + mu sync.Mutex + status error + threshold int + count int +} + +// Check implements the Checker interface +func (tu *thresholdUpdater) Check() error { + tu.mu.Lock() + defer tu.mu.Unlock() + + if tu.count >= tu.threshold { + return tu.status + } + + return nil +} + +// thresholdUpdater implements the Updater interface, allowing asynchronous +// access to the status of a Checker. +func (tu *thresholdUpdater) Update(status error) { + tu.mu.Lock() + defer tu.mu.Unlock() + + if status == nil { + tu.count = 0 + } else if tu.count < tu.threshold { + tu.count++ + } + + tu.status = status +} + +// NewThresholdStatusUpdater returns a new thresholdUpdater +func NewThresholdStatusUpdater(t int) Updater { + return &thresholdUpdater{threshold: t} +} + +// PeriodicChecker wraps an updater to provide a periodic checker +func PeriodicChecker(check Checker, period time.Duration) Checker { + u := NewStatusUpdater() + go func() { + t := time.NewTicker(period) + for { + <-t.C + u.Update(check.Check()) + } + }() + + return u +} + +// PeriodicThresholdChecker wraps an updater to provide a periodic checker that +// uses a threshold before it changes status +func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { + tu := NewThresholdStatusUpdater(threshold) + go func() { + t := time.NewTicker(period) + for { + <-t.C + tu.Update(check.Check()) + } + }() + + return tu +} + +// CheckStatus returns a map with all the current health check errors +func (registry *Registry) CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type + registry.mu.RLock() + defer registry.mu.RUnlock() + statusKeys := make(map[string]string) + for k, v := range registry.registeredChecks { + err := v.Check() + if err != nil { + statusKeys[k] = err.Error() + } + } + + return statusKeys +} + +// CheckStatus returns a map with all the current health check errors from the +// default registry. +func CheckStatus() map[string]string { + return DefaultRegistry.CheckStatus() +} + +// Register associates the checker with the provided name. +func (registry *Registry) Register(name string, check Checker) { + if registry == nil { + registry = DefaultRegistry + } + registry.mu.Lock() + defer registry.mu.Unlock() + _, ok := registry.registeredChecks[name] + if ok { + panic("Check already exists: " + name) + } + registry.registeredChecks[name] = check +} + +// Register associates the checker with the provided name in the default +// registry. +func Register(name string, check Checker) { + DefaultRegistry.Register(name, check) +} + +// RegisterFunc allows the convenience of registering a checker directly from +// an arbitrary func() error. +func (registry *Registry) RegisterFunc(name string, check func() error) { + registry.Register(name, CheckFunc(check)) +} + +// RegisterFunc allows the convenience of registering a checker in the default +// registry directly from an arbitrary func() error. +func RegisterFunc(name string, check func() error) { + DefaultRegistry.RegisterFunc(name, check) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + registry.Register(name, PeriodicChecker(CheckFunc(check), period)) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// in the default registry from an arbitrary func() error. +func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + DefaultRegistry.RegisterPeriodicFunc(name, period, check) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker in the default registry from an arbitrary func() error. +func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + DefaultRegistry.RegisterPeriodicThresholdFunc(name, period, threshold, check) +} + +// StatusHandler returns a JSON blob with all the currently registered Health Checks +// and their corresponding status. +// Returns 503 if any Error status exists, 200 otherwise +func StatusHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + checks := CheckStatus() + status := http.StatusOK + + // If there is an error, return 503 + if len(checks) != 0 { + status = http.StatusServiceUnavailable + } + + statusResponse(w, r, status, checks) + } else { + http.NotFound(w, r) + } +} + +// Handler returns a handler that will return 503 response code if the health +// checks have failed. If everything is okay with the health checks, the +// handler will pass through to the provided handler. Use this handler to +// disable a web application when the health checks fail. +func Handler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + checks := CheckStatus() + if len(checks) != 0 { + errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. + WithDetail("health check failed: please see /debug/health")) + return + } + + handler.ServeHTTP(w, r) // pass through + }) +} + +// statusResponse completes the request with a response describing the health +// of the service. +func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) { + p, err := json.Marshal(checks) + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err) + p, err = json.Marshal(struct { + ServerError string `json:"server_error"` + }{ + ServerError: "Could not parse error message", + }) + status = http.StatusInternalServerError + + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err) + return + } + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.WriteHeader(status) + if _, err := w.Write(p); err != nil { + context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err) + } +} + +// Registers global /debug/health api endpoint, creates default registry +func init() { + DefaultRegistry = NewRegistry() + http.HandleFunc("/debug/health", StatusHandler) +} diff --git a/vendor/github.com/docker/distribution/health/health_test.go b/vendor/github.com/docker/distribution/health/health_test.go new file mode 100644 index 0000000..766fe15 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/health_test.go @@ -0,0 +1,107 @@ +package health + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +// TestReturns200IfThereAreNoChecks ensures that the result code of the health +// endpoint is 200 if there are not currently registered checks. +func TestReturns200IfThereAreNoChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + StatusHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } +} + +// TestReturns500IfThereAreErrorChecks ensures that the result code of the +// health endpoint is 500 if there are health checks with errors +func TestReturns503IfThereAreErrorChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + // Create a manual error + Register("some_check", CheckFunc(func() error { + return errors.New("This Check did not succeed") + })) + + StatusHandler(recorder, req) + + if recorder.Code != 503 { + t.Errorf("Did not get a 503.") + } +} + +// TestHealthHandler ensures that our handler implementation correct protects +// the web application when things aren't so healthy. +func TestHealthHandler(t *testing.T) { + // clear out existing checks. + DefaultRegistry = NewRegistry() + + // protect an http server + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + + // wrap it in our health handler + handler = Handler(handler) + + // use this swap check status + updater := NewStatusUpdater() + Register("test_check", updater) + + // now, create a test server + server := httptest.NewServer(handler) + + checkUp := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting success status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusNoContent) + } + // NOTE(stevvooe): we really don't care about the body -- the format is + // not standardized or supported, yet. + } + + checkDown := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting down status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusServiceUnavailable { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusServiceUnavailable) + } + } + + // server should be up + checkUp(t, "initial health check") + + // now, we fail the health check + updater.Update(fmt.Errorf("the server is now out of commission")) + checkDown(t, "server should be down") // should be down + + // bring server back up + updater.Update(nil) + checkUp(t, "when server is back up") // now we should be back up. +} diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 0000000..88367b0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 0000000..a2082ec --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go new file mode 100644 index 0000000..09e6ed1 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go @@ -0,0 +1,111 @@ +package manifestlist + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestListSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 2392, + "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", + "platform": { + "architecture": "sun4m", + "os": "sunos" + } + } + ] +}`) + +func TestManifestList(t *testing.T) { + manifestDescriptors := []ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "amd64", + OS: "linux", + Features: []string{"sse4"}, + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", + Size: 2392, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "sun4m", + OS: "sunos", + }, + }, + } + + deserialized, err := FromDescriptors(manifestDescriptors) + if err != nil { + t.Fatalf("error creating DeserializedManifestList: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifestList { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest list: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that the canonical field has the expected value. + if !bytes.Equal(expectedManifestListSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestListSerialization)) + } + + var unmarshalled DeserializedManifestList + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + for i := range references { + if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) { + t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) + } + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 0000000..be01237 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,282 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors) + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + Author: h.Author, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go new file mode 100644 index 0000000..5f9abaa --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go @@ -0,0 +1,272 @@ +package schema1 + +import ( + "bytes" + "compress/gzip" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: mediaType, + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestEmptyTar(t *testing.T) { + // Confirm that gzippedEmptyTar expands to 1024 NULL bytes. + var decompressed [2048]byte + gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedEmptyTar)) + if err != nil { + t.Fatalf("NewReader returned error: %v", err) + } + n, err := gzipReader.Read(decompressed[:]) + if n != 1024 { + t.Fatalf("read returned %d bytes; expected 1024", n) + } + n, err = gzipReader.Read(decompressed[1024:]) + if n != 0 { + t.Fatalf("read returned %d bytes; expected 0", n) + } + if err != io.EOF { + t.Fatal("read did not return io.EOF") + } + gzipReader.Close() + for _, b := range decompressed[:1024] { + if b != 0 { + t.Fatal("nonzero byte in decompressed tar") + } + } + + // Confirm that digestSHA256EmptyTar is the digest of gzippedEmptyTar. + dgst := digest.FromBytes(gzippedEmptyTar) + if dgst != digestSHA256GzippedEmptyTar { + t.Fatalf("digest mismatch for empty tar: expected %s got %s", digestSHA256GzippedEmptyTar, dgst) + } +} + +func TestConfigBuilder(t *testing.T) { + imgJSON := `{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "author": "Alyssa P. Hacker \u003calyspdev@example.com\u003e", + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}` + + descriptors := []distribution.Descriptor{ + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("could not generate key for testing: %v", err) + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + + ref, err := reference.ParseNamed("testrepo:testtag") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + builder := NewConfigManifestBuilder(bs, pk, ref, []byte(imgJSON)) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + signed, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the gzipped empty layer tar was put in the blob store + _, err = bs.Stat(context.Background(), digestSHA256GzippedEmptyTar) + if err != nil { + t.Fatal("gzipped empty tar was not put in the blob store") + } + + manifest := signed.(*SignedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 1 { + t.Fatal("SchemaVersion != 1") + } + if manifest.Name != "testrepo" { + t.Fatal("incorrect name in manifest") + } + if manifest.Tag != "testtag" { + t.Fatal("incorrect tag in manifest") + } + if manifest.Architecture != "amd64" { + t.Fatal("incorrect arch in manifest") + } + + expectedFSLayers := []FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if len(manifest.FSLayers) != len(expectedFSLayers) { + t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers)) + } + if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) { + t.Fatal("wrong FSLayers list") + } + + expectedV1Compatibility := []string{ + `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"69e5c1bfadad697fdb6db59f6326648fa119e0c031a0eda33b8cfadcab54ba7f","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`, + `{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]},"author":"Alyssa P. Hacker \u003calyspdev@example.com\u003e"}`, + `{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`, + `{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`, + `{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`, + `{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`, + } + + if len(manifest.History) != len(expectedV1Compatibility) { + t.Fatalf("wrong number of history entries: %d", len(manifest.History)) + } + for i := range expectedV1Compatibility { + if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] { + t.Errorf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility) + } + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 0000000..bff47bd --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go new file mode 100644 index 0000000..05bb8ec --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go @@ -0,0 +1,136 @@ +package schema1 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/libtrust" +) + +type testEnv struct { + name, tag string + invalidSigned *SignedManifest + signed *SignedManifest + pk libtrust.PrivateKey +} + +func TestManifestMarshaling(t *testing.T) { + env := genEnv(t) + + // Check that the all field is the same as json.MarshalIndent with these + // parameters. + p, err := json.MarshalIndent(env.signed, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + + if !bytes.Equal(p, env.signed.all) { + t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.all), string(p)) + } +} + +func TestManifestUnmarshaling(t *testing.T) { + env := genEnv(t) + + var signed SignedManifest + if err := json.Unmarshal(env.signed.all, &signed); err != nil { + t.Fatalf("error unmarshaling signed manifest: %v", err) + } + + if !reflect.DeepEqual(&signed, env.signed) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) + } + +} + +func TestManifestVerification(t *testing.T) { + env := genEnv(t) + + publicKeys, err := Verify(env.signed) + if err != nil { + t.Fatalf("error verifying manifest: %v", err) + } + + if len(publicKeys) == 0 { + t.Fatalf("no public keys found in signature") + } + + var found bool + publicKey := env.pk.PublicKey() + // ensure that one of the extracted public keys matches the private key. + for _, candidate := range publicKeys { + if candidate.KeyID() == publicKey.KeyID() { + found = true + break + } + } + + if !found { + t.Fatalf("expected public key, %v, not found in verified keys: %v", publicKey, publicKeys) + } + + // Check that an invalid manifest fails verification + _, err = Verify(env.invalidSigned) + if err != nil { + t.Fatalf("Invalid manifest should not pass Verify()") + } +} + +func genEnv(t *testing.T) *testEnv { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating test key: %v", err) + } + + name, tag := "foo/bar", "test" + + invalid := Manifest{ + Versioned: SchemaVersion, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + valid := Manifest{ + Versioned: SchemaVersion, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + }, + History: []History{ + { + V1Compatibility: "", + }, + }, + } + + sm, err := Sign(&valid, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + invalidSigned, err := Sign(&invalid, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + return &testEnv{ + name: name, + tag: tag, + invalidSigned: invalidSigned, + signed: sm, + pk: pk, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 0000000..fc1045f --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go new file mode 100644 index 0000000..35db28e --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go @@ -0,0 +1,108 @@ +package schema1 + +import ( + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +func makeSignedManifest(t *testing.T, pk libtrust.PrivateKey, refs []Reference) *SignedManifest { + u := &Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: "foo/bar", + Tag: "latest", + Architecture: "amd64", + } + + for i := len(refs) - 1; i >= 0; i-- { + u.FSLayers = append(u.FSLayers, FSLayer{ + BlobSum: refs[i].Digest, + }) + u.History = append(u.History, History{ + V1Compatibility: refs[i].History.V1Compatibility, + }) + } + + signedManifest, err := Sign(u, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + return signedManifest +} + +func TestReferenceBuilder(t *testing.T) { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + r1 := Reference{ + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 1, + History: History{V1Compatibility: "{\"a\" : 1 }"}, + } + r2 := Reference{ + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 2, + History: History{V1Compatibility: "{\"\a\" : 2 }"}, + } + + handCrafted := makeSignedManifest(t, pk, []Reference{r1, r2}) + + ref, err := reference.ParseNamed(handCrafted.Manifest.Name) + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + ref, err = reference.WithTag(ref, handCrafted.Manifest.Tag) + if err != nil { + t.Fatalf("could not add tag: %v", err) + } + + b := NewReferenceManifestBuilder(pk, ref, handCrafted.Manifest.Architecture) + _, err = b.Build(context.Background()) + if err == nil { + t.Fatal("Expected error building zero length manifest") + } + + err = b.AppendReference(r1) + if err != nil { + t.Fatal(err) + } + + err = b.AppendReference(r2) + if err != nil { + t.Fatal(err) + } + + refs := b.References() + if len(refs) != 2 { + t.Fatalf("Unexpected reference count : %d != %d", 2, len(refs)) + } + + // Ensure ordering + if refs[0].Digest != r2.Digest { + t.Fatalf("Unexpected reference : %v", refs[0]) + } + + m, err := b.Build(context.Background()) + if err != nil { + t.Fatal(err) + } + + built, ok := m.(*SignedManifest) + if !ok { + t.Fatalf("unexpected type from Build() : %T", built) + } + + d1 := digest.FromBytes(built.Canonical) + d2 := digest.FromBytes(handCrafted.Canonical) + if d1 != d2 { + t.Errorf("mismatching canonical JSON") + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 0000000..c862dd8 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 0000000..fa8daa5 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 0000000..ec0bf85 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,80 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go b/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go new file mode 100644 index 0000000..851f917 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go @@ -0,0 +1,210 @@ +package schema2 + +import ( + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: "application/octet-stream", + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestBuilder(t *testing.T) { + imgJSON := []byte(`{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}`) + configDigest := digest.FromBytes(imgJSON) + + descriptors := []distribution.Descriptor{ + { + Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 5312, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + Size: 235231, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 639152, + MediaType: MediaTypeLayer, + }, + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + builder := NewManifestBuilder(bs, imgJSON) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + built, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the config was put in the blob store + _, err = bs.Stat(context.Background(), configDigest) + if err != nil { + t.Fatal("config was not put in the blob store") + } + + manifest := built.(*DeserializedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 2 { + t.Fatal("SchemaVersion != 2") + } + + target := manifest.Target() + if target.Digest != configDigest { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 3153 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := manifest.References() + expected := append([]distribution.Descriptor{manifest.Target()}, descriptors...) + if !reflect.DeepEqual(references, expected) { + t.Fatal("References() does not match the descriptors added") + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 0000000..741998d --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,134 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypePluginConfig specifies the mediaType for plugin configuration. + MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go new file mode 100644 index 0000000..f0003d1 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go @@ -0,0 +1,111 @@ +package schema2 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 153263, + "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" + } + ] +}`) + +func TestManifest(t *testing.T) { + manifest := Manifest{ + Versioned: SchemaVersion, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + Size: 153263, + MediaType: MediaTypeLayer, + }, + }, + } + + deserialized, err := FromStruct(manifest) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifest { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&manifest, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that canonical field matches expected value. + if !bytes.Equal(expectedManifestSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization)) + } + + var unmarshalled DeserializedManifest + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + target := deserialized.Target() + if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 985 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + + if !reflect.DeepEqual(references[0], target) { + t.Fatalf("first reference should be target: %v != %v", references[0], target) + } + + // Test the second reference + if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { + t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) + } + if references[1].MediaType != MediaTypeLayer { + t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) + } + if references[1].Size != 153263 { + t.Fatalf("unexpected size in reference: %d", references[0].Size) + } +} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 0000000..caa6b14 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 0000000..c4fb634 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "fmt" + "mime" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the mediatype. + Payload() (mediatype string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediatype] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { + if _, ok := mappings[mediatype]; ok { + return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) + } + mappings[mediatype] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/notifications/bridge.go b/vendor/github.com/docker/distribution/notifications/bridge.go new file mode 100644 index 0000000..502288a --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/bridge.go @@ -0,0 +1,214 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/uuid" +) + +type bridge struct { + ub URLBuilder + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink +} + +var _ Listener = &bridge{} + +// URLBuilder defines a subset of url builder to be used by the event listener. +type URLBuilder interface { + BuildManifestURL(name reference.Named) (string, error) + BuildBlobURL(ref reference.Canonical) (string, error) +} + +// NewBridge returns a notification listener that writes records to sink, +// using the actor and source. Any urls populated in the events created by +// this bridge will be created using the URLBuilder. +// TODO(stevvooe): Update this to simply take a context.Context object. +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { + return &bridge{ + ub: ub, + actor: actor, + source: source, + request: request, + sink: sink, + } +} + +// NewRequestRecord builds a RequestRecord for use in NewBridge from an +// http.Request, associating it with a request id. +func NewRequestRecord(id string, r *http.Request) RequestRecord { + return RequestRecord{ + ID: id, + Addr: context.RemoteAddr(r), + Host: r.Host, + Method: r.Method, + UserAgent: r.UserAgent(), + } +} + +func (b *bridge) ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPush, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPull, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createManifestDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPush, repo, desc) +} + +func (b *bridge) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPull, repo, desc) +} + +func (b *bridge) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + event, err := b.createBlobEvent(EventActionMount, repo, desc) + if err != nil { + return err + } + event.Target.FromRepository = fromRepo.Name() + return b.sink.Write(*event) +} + +func (b *bridge) BlobDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createBlobDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { + manifestEvent, err := b.createManifestEvent(action, repo, sm) + if err != nil { + return err + } + + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + event.Target.Digest = dgst + + return b.sink.Write(*event) +} + +func (b *bridge) createManifestEvent(action string, repo reference.Named, sm distribution.Manifest) (*Event, error) { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + + mt, p, err := sm.Payload() + if err != nil { + return nil, err + } + + // Ensure we have the canonical manifest descriptor here + _, desc, err := distribution.UnmarshalManifest(mt, p) + if err != nil { + return nil, err + } + + event.Target.MediaType = mt + event.Target.Length = desc.Size + event.Target.Size = desc.Size + event.Target.Digest = desc.Digest + + ref, err := reference.WithDigest(repo, event.Target.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +func (b *bridge) createBlobDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Digest = dgst + event.Target.Repository = repo.Name() + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEventAndWrite(action string, repo reference.Named, desc distribution.Descriptor) error { + event, err := b.createBlobEvent(action, repo, desc) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEvent(action string, repo reference.Named, desc distribution.Descriptor) (*Event, error) { + event := b.createEvent(action) + event.Target.Descriptor = desc + event.Target.Length = desc.Size + event.Target.Repository = repo.Name() + + ref, err := reference.WithDigest(repo, desc.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +// createEvent creates an event with actor and source populated. +func (b *bridge) createEvent(action string) *Event { + event := createEvent(action) + event.Source = b.source + event.Actor = b.actor + event.Request = b.request + + return event +} + +// createEvent returns a new event, timestamped, with the specified action. +func createEvent(action string) *Event { + return &Event{ + ID: uuid.Generate().String(), + Timestamp: time.Now(), + Action: action, + } +} diff --git a/vendor/github.com/docker/distribution/notifications/bridge_test.go b/vendor/github.com/docker/distribution/notifications/bridge_test.go new file mode 100644 index 0000000..0f85791 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/bridge_test.go @@ -0,0 +1,222 @@ +package notifications + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" +) + +var ( + // common environment for expected manifest events. + + repo = "test/repo" + source = SourceRecord{ + Addr: "remote.test", + InstanceID: uuid.Generate().String(), + } + ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/", false)) + + actor = ActorRecord{ + Name: "test", + } + request = RequestRecord{} + m = schema1.Manifest{ + Name: repo, + Tag: "latest", + } + + sm *schema1.SignedManifest + payload []byte + dgst digest.Digest +) + +func TestEventBridgeManifestPulled(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPulled(repoRef, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushed(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPushed(repoRef, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushedWithTag(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPushed(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPulledWithTag(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPulled(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkDeleted(t, EventActionDelete, events...) + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestDeleted(repoRef, dgst); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func createTestEnv(t *testing.T, fn testSinkFn) Listener { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating private key: %v", err) + } + + sm, err = schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + payload = sm.Canonical + dgst = digest.FromBytes(payload) + + return NewBridge(ub, source, actor, request, fn) +} + +func checkDeleted(t *testing.T, action string, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +func checkCommonManifest(t *testing.T, action string, events ...Event) { + checkCommon(t, events...) + + event := events[0] + if event.Action != action { + t.Fatalf("unexpected event action: %q != %q", event.Action, action) + } + + repoRef, _ := reference.ParseNamed(repo) + ref, _ := reference.WithDigest(repoRef, dgst) + u, err := ub.BuildManifestURL(ref) + if err != nil { + t.Fatalf("error building expected url: %v", err) + } + + if event.Target.URL != u { + t.Fatalf("incorrect url passed: \n%q != \n%q", event.Target.URL, u) + } +} + +func checkCommon(t *testing.T, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Length != int64(len(payload)) { + t.Fatalf("unexpected target length: %v != %v", event.Target.Length, len(payload)) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +type testSinkFn func(events ...Event) error + +func (tsf testSinkFn) Write(events ...Event) error { + return tsf(events...) +} + +func (tsf testSinkFn) Close() error { return nil } + +func mustUB(ub *v2.URLBuilder, err error) *v2.URLBuilder { + if err != nil { + panic(err) + } + + return ub +} diff --git a/vendor/github.com/docker/distribution/notifications/endpoint.go b/vendor/github.com/docker/distribution/notifications/endpoint.go new file mode 100644 index 0000000..29a9e27 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/endpoint.go @@ -0,0 +1,93 @@ +package notifications + +import ( + "net/http" + "time" +) + +// EndpointConfig covers the optional configuration parameters for an active +// endpoint. +type EndpointConfig struct { + Headers http.Header + Timeout time.Duration + Threshold int + Backoff time.Duration + IgnoredMediaTypes []string + Transport *http.Transport +} + +// defaults set any zero-valued fields to a reasonable default. +func (ec *EndpointConfig) defaults() { + if ec.Timeout <= 0 { + ec.Timeout = time.Second + } + + if ec.Threshold <= 0 { + ec.Threshold = 10 + } + + if ec.Backoff <= 0 { + ec.Backoff = time.Second + } + + if ec.Transport == nil { + ec.Transport = http.DefaultTransport.(*http.Transport) + } +} + +// Endpoint is a reliable, queued, thread-safe sink that notify external http +// services when events are written. Writes are non-blocking and always +// succeed for callers but events may be queued internally. +type Endpoint struct { + Sink + url string + name string + + EndpointConfig + + metrics *safeMetrics +} + +// NewEndpoint returns a running endpoint, ready to receive events. +func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { + var endpoint Endpoint + endpoint.name = name + endpoint.url = url + endpoint.EndpointConfig = config + endpoint.defaults() + endpoint.metrics = newSafeMetrics() + + // Configures the inmemory queue, retry, http pipeline. + endpoint.Sink = newHTTPSink( + endpoint.url, endpoint.Timeout, endpoint.Headers, + endpoint.Transport, endpoint.metrics.httpStatusListener()) + endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) + endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) + endpoint.Sink = newIgnoredMediaTypesSink(endpoint.Sink, config.IgnoredMediaTypes) + + register(&endpoint) + return &endpoint +} + +// Name returns the name of the endpoint, generally used for debugging. +func (e *Endpoint) Name() string { + return e.name +} + +// URL returns the url of the endpoint. +func (e *Endpoint) URL() string { + return e.url +} + +// ReadMetrics populates em with metrics from the endpoint. +func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { + e.metrics.Lock() + defer e.metrics.Unlock() + + *em = e.metrics.EndpointMetrics + // Map still need to copied in a threadsafe manner. + em.Statuses = make(map[string]int) + for k, v := range e.metrics.Statuses { + em.Statuses[k] = v + } +} diff --git a/vendor/github.com/docker/distribution/notifications/event.go b/vendor/github.com/docker/distribution/notifications/event.go new file mode 100644 index 0000000..b59a72b --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/event.go @@ -0,0 +1,160 @@ +package notifications + +import ( + "fmt" + "time" + + "github.com/docker/distribution" +) + +// EventAction constants used in action field of Event. +const ( + EventActionPull = "pull" + EventActionPush = "push" + EventActionMount = "mount" + EventActionDelete = "delete" +) + +const ( + // EventsMediaType is the mediatype for the json event envelope. If the + // Event, ActorRecord, SourceRecord or Envelope structs change, the version + // number should be incremented. + EventsMediaType = "application/vnd.docker.distribution.events.v1+json" + // LayerMediaType is the media type for image rootfs diffs (aka "layers") + // used by Docker. We don't expect this to change for quite a while. + layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +// Envelope defines the fields of a json event envelope message that can hold +// one or more events. +type Envelope struct { + // Events make up the contents of the envelope. Events present in a single + // envelope are not necessarily related. + Events []Event `json:"events,omitempty"` +} + +// TODO(stevvooe): The event type should be separate from the json format. It +// should be defined as an interface. Leaving as is for now since we don't +// need that at this time. If we make this change, the struct below would be +// called "EventRecord". + +// Event provides the fields required to describe a registry event. +type Event struct { + // ID provides a unique identifier for the event. + ID string `json:"id,omitempty"` + + // Timestamp is the time at which the event occurred. + Timestamp time.Time `json:"timestamp,omitempty"` + + // Action indicates what action encompasses the provided event. + Action string `json:"action,omitempty"` + + // Target uniquely describes the target of the event. + Target struct { + // TODO(stevvooe): Use http.DetectContentType for layers, maybe. + + distribution.Descriptor + + // Length in bytes of content. Same as Size field in Descriptor. + // Provided for backwards compatibility. + Length int64 `json:"length,omitempty"` + + // Repository identifies the named repository. + Repository string `json:"repository,omitempty"` + + // FromRepository identifies the named repository which a blob was mounted + // from if appropriate. + FromRepository string `json:"fromRepository,omitempty"` + + // URL provides a direct link to the content. + URL string `json:"url,omitempty"` + + // Tag provides the tag + Tag string `json:"tag,omitempty"` + } `json:"target,omitempty"` + + // Request covers the request that generated the event. + Request RequestRecord `json:"request,omitempty"` + + // Actor specifies the agent that initiated the event. For most + // situations, this could be from the authorizaton context of the request. + Actor ActorRecord `json:"actor,omitempty"` + + // Source identifies the registry node that generated the event. Put + // differently, while the actor "initiates" the event, the source + // "generates" it. + Source SourceRecord `json:"source,omitempty"` +} + +// ActorRecord specifies the agent that initiated the event. For most +// situations, this could be from the authorizaton context of the request. +// Data in this record can refer to both the initiating client and the +// generating request. +type ActorRecord struct { + // Name corresponds to the subject or username associated with the + // request context that generated the event. + Name string `json:"name,omitempty"` + + // TODO(stevvooe): Look into setting a session cookie to get this + // without docker daemon. + // SessionID + + // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and + // get the actual command. + // Command +} + +// RequestRecord covers the request that generated the event. +type RequestRecord struct { + // ID uniquely identifies the request that initiated the event. + ID string `json:"id"` + + // Addr contains the ip or hostname and possibly port of the client + // connection that initiated the event. This is the RemoteAddr from + // the standard http request. + Addr string `json:"addr,omitempty"` + + // Host is the externally accessible host name of the registry instance, + // as specified by the http host header on incoming requests. + Host string `json:"host,omitempty"` + + // Method has the request method that generated the event. + Method string `json:"method"` + + // UserAgent contains the user agent header of the request. + UserAgent string `json:"useragent"` +} + +// SourceRecord identifies the registry node that generated the event. Put +// differently, while the actor "initiates" the event, the source "generates" +// it. +type SourceRecord struct { + // Addr contains the ip or hostname and the port of the registry node + // that generated the event. Generally, this will be resolved by + // os.Hostname() along with the running port. + Addr string `json:"addr,omitempty"` + + // InstanceID identifies a running instance of an application. Changes + // after each restart. + InstanceID string `json:"instanceID,omitempty"` +} + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("sink: closed") +) + +// Sink accepts and sends events. +type Sink interface { + // Write writes one or more events to the sink. If no error is returned, + // the caller will assume that all events have been committed and will not + // try to send them again. If an error is received, the caller may retry + // sending the event. The caller should cede the slice of memory to the + // sink and not modify it after calling this method. + Write(events ...Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/distribution/notifications/event_test.go b/vendor/github.com/docker/distribution/notifications/event_test.go new file mode 100644 index 0000000..0981a7a --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/event_test.go @@ -0,0 +1,157 @@ +package notifications + +import ( + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/distribution/manifest/schema1" +) + +// TestEventJSONFormat provides silly test to detect if the event format or +// envelope has changed. If this code fails, the revision of the protocol may +// need to be incremented. +func TestEventEnvelopeJSONFormat(t *testing.T) { + var expected = strings.TrimSpace(` +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+prettyjws", + "size": 1, + "digest": "sha256:0123456789abcdef0", + "length": 1, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 2, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "length": 2, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 3, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", + "length": 3, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} + `) + + tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) + if err != nil { + t.Fatalf("error creating time: %v", err) + } + + var prototype Event + prototype.Action = EventActionPush + prototype.Timestamp = tm + prototype.Actor.Name = "test-actor" + prototype.Request.ID = "asdfasdf" + prototype.Request.Addr = "client.local" + prototype.Request.Host = "registrycluster.local" + prototype.Request.Method = "PUT" + prototype.Request.UserAgent = "test/0.1" + prototype.Source.Addr = "hostname.local:port" + + var manifestPush Event + manifestPush = prototype + manifestPush.ID = "asdf-asdf-asdf-asdf-0" + manifestPush.Target.Digest = "sha256:0123456789abcdef0" + manifestPush.Target.Length = 1 + manifestPush.Target.Size = 1 + manifestPush.Target.MediaType = schema1.MediaTypeSignedManifest + manifestPush.Target.Repository = "library/test" + manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush0 Event + layerPush0 = prototype + layerPush0.ID = "asdf-asdf-asdf-asdf-1" + layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + layerPush0.Target.Length = 2 + layerPush0.Target.Size = 2 + layerPush0.Target.MediaType = layerMediaType + layerPush0.Target.Repository = "library/test" + layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush1 Event + layerPush1 = prototype + layerPush1.ID = "asdf-asdf-asdf-asdf-2" + layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" + layerPush1.Target.Length = 3 + layerPush1.Target.Size = 3 + layerPush1.Target.MediaType = layerMediaType + layerPush1.Target.Repository = "library/test" + layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var envelope Envelope + envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling envelope: %v", err) + } + if string(p) != expected { + t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) + } +} diff --git a/vendor/github.com/docker/distribution/notifications/http.go b/vendor/github.com/docker/distribution/notifications/http.go new file mode 100644 index 0000000..1575161 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/http.go @@ -0,0 +1,150 @@ +package notifications + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" +) + +// httpSink implements a single-flight, http notification endpoint. This is +// very lightweight in that it only makes an attempt at an http request. +// Reliability should be provided by the caller. +type httpSink struct { + url string + + mu sync.Mutex + closed bool + client *http.Client + listeners []httpStatusListener + + // TODO(stevvooe): Allow one to configure the media type accepted by this + // sink and choose the serialization based on that. +} + +// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other +// sinks for increased reliability. +func newHTTPSink(u string, timeout time.Duration, headers http.Header, transport *http.Transport, listeners ...httpStatusListener) *httpSink { + if transport == nil { + transport = http.DefaultTransport.(*http.Transport) + } + return &httpSink{ + url: u, + listeners: listeners, + client: &http.Client{ + Transport: &headerRoundTripper{ + Transport: transport, + headers: headers, + }, + Timeout: timeout, + }, + } +} + +// httpStatusListener is called on various outcomes of sending notifications. +type httpStatusListener interface { + success(status int, events ...Event) + failure(status int, events ...Event) + err(err error, events ...Event) +} + +// Accept makes an attempt to notify the endpoint, returning an error if it +// fails. It is the caller's responsibility to retry on error. The events are +// accepted or rejected as a group. +func (hs *httpSink) Write(events ...Event) error { + hs.mu.Lock() + defer hs.mu.Unlock() + defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() + + if hs.closed { + return ErrSinkClosed + } + + envelope := Envelope{ + Events: events, + } + + // TODO(stevvooe): It is not ideal to keep re-encoding the request body on + // retry but we are going to do it to keep the code simple. It is likely + // we could change the event struct to manage its own buffer. + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) + } + + body := bytes.NewReader(p) + resp, err := hs.client.Post(hs.url, EventsMediaType, body) + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + + return fmt.Errorf("%v: error posting: %v", hs, err) + } + defer resp.Body.Close() + + // The notifier will treat any 2xx or 3xx response as accepted by the + // endpoint. + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + for _, listener := range hs.listeners { + listener.success(resp.StatusCode, events...) + } + + // TODO(stevvooe): This is a little accepting: we may want to support + // unsupported media type responses with retries using the correct + // media type. There may also be cases that will never work. + + return nil + default: + for _, listener := range hs.listeners { + listener.failure(resp.StatusCode, events...) + } + return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) + } +} + +// Close the endpoint +func (hs *httpSink) Close() error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return fmt.Errorf("httpsink: already closed") + } + + hs.closed = true + return nil +} + +func (hs *httpSink) String() string { + return fmt.Sprintf("httpSink{%s}", hs.url) +} + +type headerRoundTripper struct { + *http.Transport // must be transport to support CancelRequest + headers http.Header +} + +func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var nreq http.Request + nreq = *req + nreq.Header = make(http.Header) + + merge := func(headers http.Header) { + for k, v := range headers { + nreq.Header[k] = append(nreq.Header[k], v...) + } + } + + merge(req.Header) + merge(hrt.headers) + + return hrt.Transport.RoundTrip(&nreq) +} diff --git a/vendor/github.com/docker/distribution/notifications/http_test.go b/vendor/github.com/docker/distribution/notifications/http_test.go new file mode 100644 index 0000000..e046936 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/http_test.go @@ -0,0 +1,185 @@ +package notifications + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "mime" + "net/http" + "net/http/httptest" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/manifest/schema1" +) + +// TestHTTPSink mocks out an http endpoint and notifies it under a couple of +// conditions, ensuring correct behavior. +func TestHTTPSink(t *testing.T) { + serverHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + t.Fatalf("unexpected request method: %v", r.Method) + return + } + + // Extract the content type and make sure it matches + contentType := r.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) + return + } + + if mediaType != EventsMediaType { + w.WriteHeader(http.StatusUnsupportedMediaType) + t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) + return + } + + var envelope Envelope + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&envelope); err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error decoding request body: %v", err) + return + } + + // Let caller choose the status + status, err := strconv.Atoi(r.FormValue("status")) + if err != nil { + t.Logf("error parsing status: %v", err) + + // May just be empty, set status to 200 + status = http.StatusOK + } + + w.WriteHeader(status) + }) + server := httptest.NewTLSServer(serverHandler) + + metrics := newSafeMetrics() + sink := newHTTPSink(server.URL, 0, nil, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + + // first make sure that the default transport gives x509 untrusted cert error + events := []Event{} + err := sink.Write(events...) + if !strings.Contains(err.Error(), "x509") { + t.Fatal("TLS server with default transport should give unknown CA error") + } + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // make sure that passing in the transport no longer gives this error + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + sink = newHTTPSink(server.URL, 0, nil, tr, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + err = sink.Write(events...) + if err != nil { + t.Fatalf("unexpected error writing events: %v", err) + } + + // reset server to standard http server and sink to a basic sink + server = httptest.NewServer(serverHandler) + sink = newHTTPSink(server.URL, 0, nil, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + var expectedMetrics EndpointMetrics + expectedMetrics.Statuses = make(map[string]int) + + for _, tc := range []struct { + events []Event // events to send + url string + failure bool // true if there should be a failure. + statusCode int // if not set, no status code should be incremented. + }{ + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest)}, + }, + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest), + createTestEvent("push", "library/test", layerMediaType), + createTestEvent("push", "library/test", layerMediaType), + }, + }, + { + statusCode: http.StatusTemporaryRedirect, + }, + { + statusCode: http.StatusBadRequest, + failure: true, + }, + { + // Case where connection never goes through. + url: "http://shoudlntresolve/", + failure: true, + }, + } { + + if tc.failure { + expectedMetrics.Failures += len(tc.events) + } else { + expectedMetrics.Successes += len(tc.events) + } + + if tc.statusCode > 0 { + expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) + } + + url := tc.url + if url == "" { + url = server.URL + "/" + } + // setup endpoint to respond with expected status code. + url += fmt.Sprintf("?status=%v", tc.statusCode) + sink.url = url + + t.Logf("testcase: %v, fail=%v", url, tc.failure) + // Try a simple event emission. + err := sink.Write(tc.events...) + + if !tc.failure { + if err != nil { + t.Fatalf("unexpected error send event: %v", err) + } + } else { + if err == nil { + t.Fatalf("the endpoint should have rejected the request") + } + } + + if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { + t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) + } + } + + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // double close returns error + if err := sink.Close(); err == nil { + t.Fatalf("second close should have returned error: %v", err) + } + +} + +func createTestEvent(action, repo, typ string) Event { + event := createEvent(action) + + event.Target.MediaType = typ + event.Target.Repository = repo + + return *event +} diff --git a/vendor/github.com/docker/distribution/notifications/listener.go b/vendor/github.com/docker/distribution/notifications/listener.go new file mode 100644 index 0000000..c968b98 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/listener.go @@ -0,0 +1,215 @@ +package notifications + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" +) + +// ManifestListener describes a set of methods for listening to events related to manifests. +type ManifestListener interface { + ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestDeleted(repo reference.Named, dgst digest.Digest) error +} + +// BlobListener describes a listener that can respond to layer related events. +type BlobListener interface { + BlobPushed(repo reference.Named, desc distribution.Descriptor) error + BlobPulled(repo reference.Named, desc distribution.Descriptor) error + BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error + BlobDeleted(repo reference.Named, desc digest.Digest) error +} + +// Listener combines all repository events into a single interface. +type Listener interface { + ManifestListener + BlobListener +} + +type repositoryListener struct { + distribution.Repository + listener Listener +} + +// Listen dispatches events on the repository to the listener. +func Listen(repo distribution.Repository, listener Listener) distribution.Repository { + return &repositoryListener{ + Repository: repo, + listener: listener, + } +} + +func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifests, err := rl.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return &manifestServiceListener{ + ManifestService: manifests, + parent: rl, + }, nil +} + +func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { + return &blobServiceListener{ + BlobStore: rl.Repository.Blobs(ctx), + parent: rl, + } +} + +type manifestServiceListener struct { + distribution.ManifestService + parent *repositoryListener +} + +func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := msl.ManifestService.Delete(ctx, dgst) + if err == nil { + if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err) + } + } + + return err +} + +func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm, err := msl.ManifestService.Get(ctx, dgst, options...) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dgst, err := msl.ManifestService.Put(ctx, sm, options...) + + if err == nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err) + } + } + + return dgst, err +} + +type blobServiceListener struct { + distribution.BlobStore + parent *repositoryListener +} + +var _ distribution.BlobStore = &blobServiceListener{} + +func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + p, err := bsl.BlobStore.Get(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return p, err +} + +func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rc, err := bsl.BlobStore.Open(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return rc, err +} + +func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return err +} + +func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + desc, err := bsl.BlobStore.Put(ctx, mediaType, p) + if err == nil { + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err) + } + } + + return desc, err +} + +func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx, options...) + switch err := err.(type) { + case distribution.ErrBlobMounted: + if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Named(), err.Descriptor, err.From); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) + } + return nil, err + } + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := bsl.BlobStore.Delete(ctx, dgst) + if err == nil { + if err := bsl.parent.listener.BlobDeleted(bsl.parent.Repository.Named(), dgst); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err) + } + } + + return err +} + +func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Resume(ctx, id) + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { + return &blobWriterListener{ + BlobWriter: wr, + parent: bsl, + } +} + +type blobWriterListener struct { + distribution.BlobWriter + parent *blobServiceListener +} + +func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + committed, err := bwl.BlobWriter.Commit(ctx, desc) + if err == nil { + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Named(), committed); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) + } + } + + return committed, err +} diff --git a/vendor/github.com/docker/distribution/notifications/listener_test.go b/vendor/github.com/docker/distribution/notifications/listener_test.go new file mode 100644 index 0000000..c7db594 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/listener_test.go @@ -0,0 +1,205 @@ +package notifications + +import ( + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +func TestListener(t *testing.T) { + ctx := context.Background() + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect, storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + tl := &testListener{ + ops: make(map[string]int), + } + + repoRef, _ := reference.ParseNamed("foo/bar") + repository, err := registry.Repository(ctx, repoRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + repository = Listen(repository, tl) + + // Now take the registry through a number of operations + checkExerciseRepository(t, repository) + + expectedOps := map[string]int{ + "manifest:push": 1, + "manifest:pull": 1, + "manifest:delete": 1, + "layer:push": 2, + "layer:pull": 2, + "layer:delete": 2, + } + + if !reflect.DeepEqual(tl.ops, expectedOps) { + t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) + } + +} + +type testListener struct { + ops map[string]int +} + +func (tl *testListener) ManifestPushed(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { + tl.ops["manifest:push"]++ + + return nil +} + +func (tl *testListener) ManifestPulled(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { + tl.ops["manifest:pull"]++ + return nil +} + +func (tl *testListener) ManifestDeleted(repo reference.Named, d digest.Digest) error { + tl.ops["manifest:delete"]++ + return nil +} + +func (tl *testListener) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { + tl.ops["layer:push"]++ + return nil +} + +func (tl *testListener) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { + tl.ops["layer:pull"]++ + return nil +} + +func (tl *testListener) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + tl.ops["layer:mount"]++ + return nil +} + +func (tl *testListener) BlobDeleted(repo reference.Named, d digest.Digest) error { + tl.ops["layer:delete"]++ + return nil +} + +// checkExerciseRegistry takes the registry through all of its operations, +// carrying out generic checks. +func checkExerciseRepository(t *testing.T, repository distribution.Repository) { + // TODO(stevvooe): This would be a nice testutil function. Basically, it + // takes the registry through a common set of operations. This could be + // used to make cross-cutting updates by changing internals that affect + // update counts. Basically, it would make writing tests a lot easier. + + ctx := context.Background() + tag := "thetag" + // todo: change this to use Builder + + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: repository.Named().Name(), + Tag: tag, + } + + var blobDigests []digest.Digest + blobs := repository.Blobs(ctx) + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating test layer: %v", err) + } + dgst := digest.Digest(ds) + blobDigests = append(blobDigests, dgst) + + wr, err := blobs.Create(ctx) + if err != nil { + t.Fatalf("error creating layer upload: %v", err) + } + + // Use the resumes, as well! + wr, err = blobs.Resume(ctx, wr.ID()) + if err != nil { + t.Fatalf("error resuming layer upload: %v", err) + } + + io.Copy(wr, rs) + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ + BlobSum: dgst, + }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + + // Then fetch the blobs + if rc, err := blobs.Open(ctx, dgst); err != nil { + t.Fatalf("error fetching layer: %v", err) + } else { + defer rc.Close() + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating key: %v", err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + manifests, err := repository.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + var digestPut digest.Digest + if digestPut, err = manifests.Put(ctx, sm); err != nil { + t.Fatalf("unexpected error putting the manifest: %v", err) + } + + dgst := digest.FromBytes(sm.Canonical) + if dgst != digestPut { + t.Fatalf("mismatching digest from payload and put") + } + + _, err = manifests.Get(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + err = manifests.Delete(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error deleting blob: %v", err) + } + + for _, d := range blobDigests { + err = blobs.Delete(ctx, d) + if err != nil { + t.Fatalf("unexpected error deleting blob: %v", err) + } + + } +} diff --git a/vendor/github.com/docker/distribution/notifications/metrics.go b/vendor/github.com/docker/distribution/notifications/metrics.go new file mode 100644 index 0000000..a20af16 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/metrics.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "expvar" + "fmt" + "net/http" + "sync" +) + +// EndpointMetrics track various actions taken by the endpoint, typically by +// number of events. The goal of this to export it via expvar but we may find +// some other future solution to be better. +type EndpointMetrics struct { + Pending int // events pending in queue + Events int // total events incoming + Successes int // total events written successfully + Failures int // total events failed + Errors int // total events errored + Statuses map[string]int // status code histogram, per call event +} + +// safeMetrics guards the metrics implementation with a lock and provides a +// safe update function. +type safeMetrics struct { + EndpointMetrics + sync.Mutex // protects statuses map +} + +// newSafeMetrics returns safeMetrics with map allocated. +func newSafeMetrics() *safeMetrics { + var sm safeMetrics + sm.Statuses = make(map[string]int) + return &sm +} + +// httpStatusListener returns the listener for the http sink that updates the +// relevant counters. +func (sm *safeMetrics) httpStatusListener() httpStatusListener { + return &endpointMetricsHTTPStatusListener{ + safeMetrics: sm, + } +} + +// eventQueueListener returns a listener that maintains queue related counters. +func (sm *safeMetrics) eventQueueListener() eventQueueListener { + return &endpointMetricsEventQueueListener{ + safeMetrics: sm, + } +} + +// endpointMetricsHTTPStatusListener increments counters related to http sinks +// for the relevant events. +type endpointMetricsHTTPStatusListener struct { + *safeMetrics +} + +var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} + +func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Successes += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Failures += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Errors += len(events) +} + +// endpointMetricsEventQueueListener maintains the incoming events counter and +// the queues pending count. +type endpointMetricsEventQueueListener struct { + *safeMetrics +} + +func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Events += len(events) + eqc.Pending += len(events) +} + +func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Pending -= len(events) +} + +// endpoints is global registry of endpoints used to report metrics to expvar +var endpoints struct { + registered []*Endpoint + mu sync.Mutex +} + +// register places the endpoint into expvar so that stats are tracked. +func register(e *Endpoint) { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + endpoints.registered = append(endpoints.registered, e) +} + +func init() { + // NOTE(stevvooe): Setup registry metrics structure to report to expvar. + // Ideally, we do more metrics through logging but we need some nice + // realtime metrics for queue state for now. + + registry := expvar.Get("registry") + + if registry == nil { + registry = expvar.NewMap("registry") + } + + var notifications expvar.Map + notifications.Init() + notifications.Set("endpoints", expvar.Func(func() interface{} { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + var names []interface{} + for _, v := range endpoints.registered { + var epjson struct { + Name string `json:"name"` + URL string `json:"url"` + EndpointConfig + + Metrics EndpointMetrics + } + + epjson.Name = v.Name() + epjson.URL = v.URL() + epjson.EndpointConfig = v.EndpointConfig + + v.ReadMetrics(&epjson.Metrics) + + names = append(names, epjson) + } + + return names + })) + + registry.(*expvar.Map).Set("notifications", ¬ifications) +} diff --git a/vendor/github.com/docker/distribution/notifications/sinks.go b/vendor/github.com/docker/distribution/notifications/sinks.go new file mode 100644 index 0000000..549ba97 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/sinks.go @@ -0,0 +1,375 @@ +package notifications + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// NOTE(stevvooe): This file contains definitions for several utility sinks. +// Typically, the broadcaster is the only sink that should be required +// externally, but others are suitable for export if the need arises. Albeit, +// the tight integration with endpoint metrics should be removed. + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan []Event + closed chan chan struct{} +} + +// NewBroadcaster ... +// Add appends one or more sinks to the list of sinks. The broadcaster +// behavior will be affected by the properties of the sink. Generally, the +// sink should accept all messages and deal with reliability on its own. Use +// of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan []Event), + closed: make(chan chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts a block of events to be dispatched to all sinks. This method +// will never fail and should never block (hopefully!). The caller cedes the +// slice memory to the broadcaster and should not modify it after calling +// write. +func (b *Broadcaster) Write(events ...Event) error { + select { + case b.events <- events: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + logrus.Infof("broadcaster: closing") + select { + case <-b.closed: + // already closed + return fmt.Errorf("broadcaster: already closed") + default: + // do a little chan handoff dance to synchronize closing + closed := make(chan struct{}) + b.closed <- closed + close(b.closed) + <-closed + return nil + } +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + for { + select { + case block := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(block...); err != nil { + logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) + } + } + case closing := <-b.closed: + + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil { + logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) + } + } + closing <- struct{}{} + + logrus.Debugf("broadcaster: closed") + return + } + } +} + +// eventQueue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type eventQueue struct { + sink Sink + events *list.List + listeners []eventQueueListener + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// eventQueueListener is called when various events happen on the queue. +type eventQueueListener interface { + ingress(events ...Event) + egress(events ...Event) +} + +// newEventQueue returns a queue to the provided sink. If the updater is non- +// nil, it will be called to update pending metrics on ingress and egress. +func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { + eq := eventQueue{ + sink: sink, + events: list.New(), + listeners: listeners, + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// beend closed. +func (eq *eventQueue) Write(events ...Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + for _, listener := range eq.listeners { + listener.ingress(events...) + } + eq.events.PushBack(events) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *eventQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return fmt.Errorf("eventqueue: already closed") + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + + return eq.sink.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *eventQueue) run() { + for { + block := eq.next() + + if block == nil { + return // nil block means event queue is closed. + } + + if err := eq.sink.Write(block...); err != nil { + logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) + } + + for _, listener := range eq.listeners { + listener.egress(block...) + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *eventQueue) next() []Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.([]Event) + eq.events.Remove(front) + + return block +} + +// ignoredMediaTypesSink discards events with ignored target media types and +// passes the rest along. +type ignoredMediaTypesSink struct { + Sink + ignored map[string]bool +} + +func newIgnoredMediaTypesSink(sink Sink, ignored []string) Sink { + if len(ignored) == 0 { + return sink + } + + ignoredMap := make(map[string]bool) + for _, mediaType := range ignored { + ignoredMap[mediaType] = true + } + + return &ignoredMediaTypesSink{ + Sink: sink, + ignored: ignoredMap, + } +} + +// Write discards events with ignored target media types and passes the rest +// along. +func (imts *ignoredMediaTypesSink) Write(events ...Event) error { + var kept []Event + for _, e := range events { + if !imts.ignored[e.Target.MediaType] { + kept = append(kept, e) + } + } + if len(kept) == 0 { + return nil + } + return imts.Sink.Write(kept...) +} + +// retryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Internally, it is a circuit breaker retries to manage reset. +// Concurrent calls to a retrying sink are serialized through the sink, +// meaning that if one is in-flight, another will not proceed. +type retryingSink struct { + mu sync.Mutex + sink Sink + closed bool + + // circuit breaker heuristics + failures struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + } +} + +type retryingSinkListener interface { + active(events ...Event) + retry(events ...Event) +} + +// TODO(stevvooe): We are using circuit break here, which actually doesn't +// make a whole lot of sense for this use case, since we always retry. Move +// this to use bounded exponential backoff. + +// newRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { + rs := &retryingSink{ + sink: sink, + } + rs.failures.threshold = threshold + rs.failures.backoff = backoff + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *retryingSink) Write(events ...Event) error { + rs.mu.Lock() + defer rs.mu.Unlock() + +retry: + + if rs.closed { + return ErrSinkClosed + } + + if !rs.proceed() { + logrus.Warnf("%v encountered too many errors, backing off", rs.sink) + rs.wait(rs.failures.backoff) + goto retry + } + + if err := rs.write(events...); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logrus.Errorf("retryingsink: error writing events: %v, retrying", err) + goto retry + } + + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *retryingSink) Close() error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if rs.closed { + return fmt.Errorf("retryingsink: already closed") + } + + rs.closed = true + return rs.sink.Close() +} + +// write provides a helper that dispatches failure and success properly. Used +// by write as the single-flight write call. +func (rs *retryingSink) write(events ...Event) error { + if err := rs.sink.Write(events...); err != nil { + rs.failure() + return err + } + + rs.reset() + return nil +} + +// wait backoff time against the sink, unlocking so others can proceed. Should +// only be called by methods that currently have the mutex. +func (rs *retryingSink) wait(backoff time.Duration) { + rs.mu.Unlock() + defer rs.mu.Lock() + + // backoff here + time.Sleep(backoff) +} + +// reset marks a successful call. +func (rs *retryingSink) reset() { + rs.failures.recent = 0 + rs.failures.last = time.Time{} +} + +// failure records a failure. +func (rs *retryingSink) failure() { + rs.failures.recent++ + rs.failures.last = time.Now().UTC() +} + +// proceed returns true if the call should proceed based on circuit breaker +// heuristics. +func (rs *retryingSink) proceed() bool { + return rs.failures.recent < rs.failures.threshold || + time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) +} diff --git a/vendor/github.com/docker/distribution/notifications/sinks_test.go b/vendor/github.com/docker/distribution/notifications/sinks_test.go new file mode 100644 index 0000000..1bfa12c --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/sinks_test.go @@ -0,0 +1,256 @@ +package notifications + +import ( + "fmt" + "math/rand" + "reflect" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "testing" +) + +func TestBroadcaster(t *testing.T) { + const nEvents = 1000 + var sinks []Sink + + for i := 0; i < 10; i++ { + sinks = append(sinks, &testSink{}) + } + + b := NewBroadcaster(sinks...) + + var block []Event + var wg sync.WaitGroup + for i := 1; i <= nEvents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := b.Write(block...); err != nil { + t.Fatalf("error writing block of length %d: %v", len(block), err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() // Wait until writes complete + checkClose(t, b) + + // Iterate through the sinks and check that they all have the expected length. + for _, sink := range sinks { + ts := sink.(*testSink) + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != nEvents { + t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + } + +} + +func TestEventQueue(t *testing.T) { + const nevents = 1000 + var ts testSink + metrics := newSafeMetrics() + eq := newEventQueue( + // delayed sync simulates destination slower than channel comms + &delayedSink{ + Sink: &ts, + delay: time.Millisecond * 1, + }, metrics.eventQueueListener()) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= nevents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := eq.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, eq) + + ts.mu.Lock() + defer ts.mu.Unlock() + metrics.Lock() + defer metrics.Unlock() + + if len(ts.events) != nevents { + t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + + if metrics.Events != nevents { + t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) + } + + if metrics.Pending != 0 { + t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) + } +} + +func TestIgnoredMediaTypesSink(t *testing.T) { + blob := createTestEvent("push", "library/test", "blob") + manifest := createTestEvent("push", "library/test", "manifest") + + type testcase struct { + ignored []string + expected []Event + } + + cases := []testcase{ + {nil, []Event{blob, manifest}}, + {[]string{"other"}, []Event{blob, manifest}}, + {[]string{"blob"}, []Event{manifest}}, + {[]string{"blob", "manifest"}, nil}, + } + + for _, c := range cases { + ts := &testSink{} + s := newIgnoredMediaTypesSink(ts, c.ignored) + + if err := s.Write(blob, manifest); err != nil { + t.Fatalf("error writing event: %v", err) + } + + ts.mu.Lock() + if !reflect.DeepEqual(ts.events, c.expected) { + t.Fatalf("unexpected events: %#v != %#v", ts.events, c.expected) + } + ts.mu.Unlock() + } +} + +func TestRetryingSink(t *testing.T) { + + // Make a sync that fails most of the time, ensuring that all the events + // make it through. + var ts testSink + flaky := &flakySink{ + rate: 1.0, // start out always failing. + Sink: &ts, + } + s := newRetryingSink(flaky, 3, 10*time.Millisecond) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= 100; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + // Above 50, set the failure rate lower + if i > 50 { + s.mu.Lock() + flaky.rate = 0.90 + s.mu.Unlock() + } + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + defer wg.Done() + if err := s.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, s) + + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != 100 { + t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) + } +} + +type testSink struct { + events []Event + mu sync.Mutex + closed bool +} + +func (ts *testSink) Write(events ...Event) error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.events = append(ts.events, events...) + return nil +} + +func (ts *testSink) Close() error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.closed = true + + logrus.Infof("closing testSink") + return nil +} + +type delayedSink struct { + Sink + delay time.Duration +} + +func (ds *delayedSink) Write(events ...Event) error { + time.Sleep(ds.delay) + return ds.Sink.Write(events...) +} + +type flakySink struct { + Sink + rate float64 +} + +func (fs *flakySink) Write(events ...Event) error { + if rand.Float64() < fs.rate { + return fmt.Errorf("error writing %d events", len(events)) + } + + return fs.Sink.Write(events...) +} + +func checkClose(t *testing.T, sink Sink) { + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing: %v", err) + } + + // second close should not crash but should return an error. + if err := sink.Close(); err == nil { + t.Fatalf("no error on double close") + } + + // Write after closed should be an error + if err := sink.Write([]Event{}...); err == nil { + t.Fatalf("write after closed did not have an error") + } else if err != ErrSinkClosed { + t.Fatalf("error should be ErrSinkClosed") + } +} diff --git a/vendor/github.com/docker/distribution/project/dev-image/Dockerfile b/vendor/github.com/docker/distribution/project/dev-image/Dockerfile new file mode 100644 index 0000000..1e2a847 --- /dev/null +++ b/vendor/github.com/docker/distribution/project/dev-image/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:14.04 + +ENV GOLANG_VERSION 1.4rc1 +ENV GOPATH /var/cache/drone +ENV GOROOT /usr/local/go +ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin + +ENV LANG C +ENV LC_ALL C + +RUN apt-get update && apt-get install -y \ + wget ca-certificates git mercurial bzr \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ + tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ + rm go${GOLANG_VERSION}.linux-amd64.tar.gz + +RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/vendor/github.com/docker/distribution/project/hooks/README.md b/vendor/github.com/docker/distribution/project/hooks/README.md new file mode 100644 index 0000000..eda8869 --- /dev/null +++ b/vendor/github.com/docker/distribution/project/hooks/README.md @@ -0,0 +1,6 @@ +Git Hooks +========= + +To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. + +As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh b/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh new file mode 100755 index 0000000..6afea8a --- /dev/null +++ b/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +cd $(dirname $0) + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +set -e +set -x + +# Just in case the directory doesn't exist +mkdir -p $REPO_ROOT/.git/hooks + +ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/project/hooks/pre-commit b/vendor/github.com/docker/distribution/project/hooks/pre-commit new file mode 100755 index 0000000..3ee2e91 --- /dev/null +++ b/vendor/github.com/docker/distribution/project/hooks/pre-commit @@ -0,0 +1,29 @@ +#!/bin/sh + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +cd $REPO_ROOT + +GOFMT_ERRORS=$(gofmt -s -l . 2>&1) +if [ -n "$GOFMT_ERRORS" ]; then + printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr + exit 1 +fi + +GOLINT_ERRORS=$(golint ./... 2>&1) +if [ -n "$GOLINT_ERRORS" ]; then + printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr + exit 1 +fi + +GOVET_ERRORS=$(go vet ./... 2>&1) +GOVET_STATUS=$? +if [ "$GOVET_STATUS" -ne "0" ]; then + printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr + exit $GOVET_STATUS +fi diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 0000000..0278662 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,370 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +package reference + +import ( + "errors" + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + if canonical, ok := name.(Canonical); ok { + return reference{ + name: name.Name(), + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + if tagged, ok := name.(Tagged); ok { + return reference{ + name: name.Name(), + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +// Match reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func Match(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, ref.String()) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, namedRef.Name()) + } + return matched, err +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + return repository(ref.Name()) +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return d.String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go new file mode 100644 index 0000000..405c47c --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference_test.go @@ -0,0 +1,661 @@ +package reference + +import ( + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestReferenceParse(t *testing.T) { + // referenceTestcases is a unified set of testcases for + // testing the parsing of references + referenceTestcases := []struct { + // input is the repository name or name component testcase + input string + // err is the error expected from Parse, or nil + err error + // repository is the string representation for the reference + repository string + // hostname is the hostname expected in the reference + hostname string + // tag is the tag for the reference + tag string + // digest is the digest for the reference (enforces digest reference) + digest string + }{ + { + input: "test_com", + repository: "test_com", + }, + { + input: "test.com:tag", + repository: "test.com", + tag: "tag", + }, + { + input: "test.com:5000", + repository: "test.com", + tag: "5000", + }, + { + input: "test.com/repo:tag", + hostname: "test.com", + repository: "test.com/repo", + tag: "tag", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "test:5000/repo:tag", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + }, + { + input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "repo@sha256:ffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestInvalidLength, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestUnsupported, + }, + { + input: "Uppercase:tag", + err: ErrNameContainsUppercase, + }, + // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. + // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 + //{ + // input: "Uppercase/lowercase:tag", + // err: ErrNameContainsUppercase, + //}, + { + input: "test:5000/Uppercase/lowercase:tag", + err: ErrNameContainsUppercase, + }, + { + input: "lowercase:Uppercase", + repository: "lowercase", + tag: "Uppercase", + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", + hostname: "a", + repository: strings.Repeat("a/", 127) + "a", + tag: "tag-puts-this-over-max", + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + }, + { + input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + tag: "some-long-tag", + }, + { + input: "b.gcr.io/test.example.com/my-app:test.example.com", + hostname: "b.gcr.io", + repository: "b.gcr.io/test.example.com/my-app", + tag: "test.example.com", + }, + { + input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode + hostname: "xn--n3h.com", + repository: "xn--n3h.com/myimage", + tag: "xn--n3h.com", + }, + { + input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode + hostname: "xn--7o8h.com", + repository: "xn--7o8h.com/myimage", + tag: "xn--7o8h.com", + digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "foo_bar.com:8080", + repository: "foo_bar.com", + tag: "8080", + }, + { + input: "foo/foo_bar.com:8080", + hostname: "foo", + repository: "foo/foo_bar.com", + tag: "8080", + }, + } + for _, testcase := range referenceTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + repo, err := Parse(testcase.input) + if testcase.err != nil { + if err == nil { + failf("missing expected error: %v", testcase.err) + } else if testcase.err != err { + failf("mismatched error: got %v, expected %v", err, testcase.err) + } + continue + } else if err != nil { + failf("unexpected parse error: %v", err) + continue + } + if repo.String() != testcase.input { + failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) + } + + if named, ok := repo.(Named); ok { + if named.Name() != testcase.repository { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) + } + hostname, _ := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + } else if testcase.repository != "" || testcase.hostname != "" { + failf("expected named type, got %T", repo) + } + + tagged, ok := repo.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", repo) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := repo.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", repo) + } + } else if ok { + failf("unexpected digested type") + } + + } +} + +// TestWithNameFailure tests cases where WithName should fail. Cases where it +// should succeed are covered by TestSplitHostname, below. +func TestWithNameFailure(t *testing.T) { + testcases := []struct { + input string + err error + }{ + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + _, err := WithName(testcase.input) + if err == nil { + failf("no error parsing name. expected: %s", testcase.err) + } + } +} + +func TestSplitHostname(t *testing.T) { + testcases := []struct { + input string + hostname string + name string + }{ + { + input: "test.com/foo", + hostname: "test.com", + name: "foo", + }, + { + input: "test_com/foo", + hostname: "", + name: "test_com/foo", + }, + { + input: "test:8080/foo", + hostname: "test:8080", + name: "foo", + }, + { + input: "test.com:8080/foo", + hostname: "test.com:8080", + name: "foo", + }, + { + input: "test-com:8080/foo", + hostname: "test-com:8080", + name: "foo", + }, + { + input: "xn--n3h.com:18080/foo", + hostname: "xn--n3h.com:18080", + name: "foo", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.input) + if err != nil { + failf("error parsing name: %s", err) + } + hostname, name := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} + +type serializationType struct { + Description string + Field Field +} + +func TestSerialization(t *testing.T) { + testcases := []struct { + description string + input string + name string + tag string + digest string + err error + }{ + { + description: "empty value", + err: ErrNameEmpty, + }, + { + description: "just a name", + input: "example.com:8000/named", + name: "example.com:8000/named", + }, + { + description: "name with a tag", + input: "example.com:8000/named:tagged", + name: "example.com:8000/named", + tag: "tagged", + }, + { + description: "name with digest", + input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", + name: "other.com/named", + digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + m := map[string]string{ + "Description": testcase.description, + "Field": testcase.input, + } + b, err := json.Marshal(m) + if err != nil { + failf("error marshalling: %v", err) + } + t := serializationType{} + + if err := json.Unmarshal(b, &t); err != nil { + if testcase.err == nil { + failf("error unmarshalling: %v", err) + } + if err != testcase.err { + failf("wrong error, expected %v, got %v", testcase.err, err) + } + + continue + } else if testcase.err != nil { + failf("expected error unmarshalling: %v", testcase.err) + } + + if t.Description != testcase.description { + failf("wrong description, expected %q, got %q", testcase.description, t.Description) + } + + ref := t.Field.Reference() + + if named, ok := ref.(Named); ok { + if named.Name() != testcase.name { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) + } + } else if testcase.name != "" { + failf("expected named type, got %T", ref) + } + + tagged, ok := ref.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", ref) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := ref.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", ref) + } + } else if ok { + failf("unexpected digested type") + } + + t = serializationType{ + Description: testcase.description, + Field: AsField(ref), + } + + b2, err := json.Marshal(t) + if err != nil { + failf("error marshing serialization type: %v", err) + } + + if string(b) != string(b2) { + failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) + } + + // Ensure t.Field is not implementing "Reference" directly, getting + // around the Reference type system + var fieldInterface interface{} = t.Field + if _, ok := fieldInterface.(Reference); ok { + failf("field should not implement Reference interface") + } + + } +} + +func TestWithTag(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + tag: "tag", + combined: "test.com/foo:tag", + }, + { + name: "foo", + tag: "tag2", + combined: "foo:tag2", + }, + { + name: "test.com:8000/foo", + tag: "tag4", + combined: "test.com:8000/foo:tag4", + }, + { + name: "test.com:8000/foo", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.digest != "" { + canonical, err := WithDigest(named, testcase.digest) + if err != nil { + failf("error adding digest") + } + named = canonical + } + + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("WithTag failed: %s", err) + } + if tagged.String() != testcase.combined { + failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) + } + } +} + +func TestWithDigest(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "latest", + combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.tag != "" { + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("error adding tag") + } + named = tagged + } + digested, err := WithDigest(named, testcase.digest) + if err != nil { + failf("WithDigest failed: %s", err) + } + if digested.String() != testcase.combined { + failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) + } + } +} + +func TestMatchError(t *testing.T) { + named, err := Parse("foo") + if err != nil { + t.Fatal(err) + } + _, err = Match("[-x]", named) + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestMatch(t *testing.T) { + matchCases := []struct { + reference string + pattern string + expected bool + }{ + { + reference: "foo", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/any/bat", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/a/bar", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/b/baz", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/*/baz:tag", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/c/baz:tag", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "*/foo/c/baz", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "example.com/foo/c/baz", + expected: true, + }, + } + for _, c := range matchCases { + named, err := Parse(c.reference) + if err != nil { + t.Fatal(err) + } + actual, err := Match(c.pattern, named) + if err != nil { + t.Fatal(err) + } + if actual != c.expected { + t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) + } + } +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 0000000..9a7d366 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,124 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go new file mode 100644 index 0000000..2ec3937 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp_test.go @@ -0,0 +1,489 @@ +package reference + +import ( + "regexp" + "strings" + "testing" +) + +type regexpMatch struct { + input string + match bool + subs []string +} + +func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { + matches := r.FindStringSubmatch(m.input) + if m.match && matches != nil { + if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { + t.Fatalf("Bad match result %#v for %q", matches, m.input) + } + if len(matches) < (len(m.subs) + 1) { + t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) + } + for i := range m.subs { + if m.subs[i] != matches[i+1] { + t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) + } + } + } else if m.match { + t.Errorf("Expected match for %q", m.input) + } else if matches != nil { + t.Errorf("Unexpected match for %q", m.input) + } +} + +func TestHostRegexp(t *testing.T) { + hostcases := []regexpMatch{ + { + input: "test.com", + match: true, + }, + { + input: "test.com:10304", + match: true, + }, + { + input: "test.com:http", + match: false, + }, + { + input: "localhost", + match: true, + }, + { + input: "localhost:8080", + match: true, + }, + { + input: "a", + match: true, + }, + { + input: "a.b", + match: true, + }, + { + input: "ab.cd.com", + match: true, + }, + { + input: "a-b.com", + match: true, + }, + { + input: "-ab.com", + match: false, + }, + { + input: "ab-.com", + match: false, + }, + { + input: "ab.c-om", + match: true, + }, + { + input: "ab.-com", + match: false, + }, + { + input: "ab.com-", + match: false, + }, + { + input: "0101.com", + match: true, // TODO(dmcgowan): valid if this should be allowed + }, + { + input: "001a.com", + match: true, + }, + { + input: "b.gbc.io:443", + match: true, + }, + { + input: "b.gbc.io", + match: true, + }, + { + input: "xn--n3h.com", // ☃.com in punycode + match: true, + }, + { + input: "Asdf.com", // uppercase character + match: true, + }, + } + r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) + for i := range hostcases { + checkRegexp(t, r, hostcases[i]) + } +} + +func TestFullNameRegexp(t *testing.T) { + if anchoredNameRegexp.NumSubexp() != 2 { + t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", + anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "", + match: false, + }, + { + input: "short", + match: true, + subs: []string{"", "short"}, + }, + { + input: "simple/name", + match: true, + subs: []string{"simple", "name"}, + }, + { + input: "library/ubuntu", + match: true, + subs: []string{"library", "ubuntu"}, + }, + { + input: "docker/stevvooe/app", + match: true, + subs: []string{"docker", "stevvooe/app"}, + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, + }, + { + input: "aa/aa/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/bb/bb/bb"}, + }, + { + input: "a/a/a/a", + match: true, + subs: []string{"a", "a/a/a"}, + }, + { + input: "a/a/a/a/", + match: false, + }, + { + input: "a//a/a", + match: false, + }, + { + input: "a", + match: true, + subs: []string{"", "a"}, + }, + { + input: "a/aa", + match: true, + subs: []string{"a", "aa"}, + }, + { + input: "a/aa/a", + match: true, + subs: []string{"a", "aa/a"}, + }, + { + input: "foo.com", + match: true, + subs: []string{"", "foo.com"}, + }, + { + input: "foo.com/", + match: false, + }, + { + input: "foo.com:8080/bar", + match: true, + subs: []string{"foo.com:8080", "bar"}, + }, + { + input: "foo.com:http/bar", + match: false, + }, + { + input: "foo.com/bar", + match: true, + subs: []string{"foo.com", "bar"}, + }, + { + input: "foo.com/bar/baz", + match: true, + subs: []string{"foo.com", "bar/baz"}, + }, + { + input: "localhost:8080/bar", + match: true, + subs: []string{"localhost:8080", "bar"}, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + match: true, + subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, + }, + { + input: "blog.foo.com/bar/baz", + match: true, + subs: []string{"blog.foo.com", "bar/baz"}, + }, + { + input: "a^a", + match: false, + }, + { + input: "aa/asdf$$^/aa", + match: false, + }, + { + input: "asdf$$^/aa", + match: false, + }, + { + input: "aa-a/a", + match: true, + subs: []string{"aa-a", "a"}, + }, + { + input: strings.Repeat("a/", 128) + "a", + match: true, + subs: []string{"a", strings.Repeat("a/", 127) + "a"}, + }, + { + input: "a-/a/a/a", + match: false, + }, + { + input: "foo.com/a-/a/a", + match: false, + }, + { + input: "-foo/bar", + match: false, + }, + { + input: "foo/bar-", + match: false, + }, + { + input: "foo-/bar", + match: false, + }, + { + input: "foo/-bar", + match: false, + }, + { + input: "_foo/bar", + match: false, + }, + { + input: "foo_bar", + match: true, + subs: []string{"", "foo_bar"}, + }, + { + input: "foo_bar.com", + match: true, + subs: []string{"", "foo_bar.com"}, + }, + { + input: "foo_bar.com:8080", + match: false, + }, + { + input: "foo_bar.com:8080/app", + match: false, + }, + { + input: "foo.com/foo_bar", + match: true, + subs: []string{"foo.com", "foo_bar"}, + }, + { + input: "____/____", + match: false, + }, + { + input: "_docker/_docker", + match: false, + }, + { + input: "docker_/docker_", + match: false, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "xn--n3h.com/myimage", // ☃.com in punycode + match: true, + subs: []string{"xn--n3h.com", "myimage"}, + }, + { + input: "xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"xn--7o8h.com", "myimage"}, + }, + { + input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"example.com", "xn--7o8h.com/myimage"}, + }, + { + input: "example.com/some_separator__underscore/myimage", + match: true, + subs: []string{"example.com", "some_separator__underscore/myimage"}, + }, + { + input: "example.com/__underscore/myimage", + match: false, + }, + { + input: "example.com/..dots/myimage", + match: false, + }, + { + input: "example.com/.dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "docker./docker", + match: false, + }, + { + input: ".docker/docker", + match: false, + }, + { + input: "docker-/docker", + match: false, + }, + { + input: "-docker/docker", + match: false, + }, + { + input: "do..cker/docker", + match: false, + }, + { + input: "do__cker:8080/docker", + match: false, + }, + { + input: "do__cker/docker", + match: true, + subs: []string{"", "do__cker/docker"}, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "registry.io/foo/project--id.module--name.ver---sion--name", + match: true, + subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, + }, + { + input: "Asdf.com/foo/bar", // uppercase character in hostname + match: true, + }, + { + input: "Foo/FarB", // uppercase characters in remote name + match: false, + }, + } + for i := range testcases { + checkRegexp(t, anchoredNameRegexp, testcases[i]) + } +} + +func TestReferenceRegexp(t *testing.T) { + if ReferenceRegexp.NumSubexp() != 3 { + t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", + ReferenceRegexp, ReferenceRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "registry.com:8080/myapp:tag", + match: true, + subs: []string{"registry.com:8080/myapp", "tag", ""}, + }, + { + input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@sha256:badbadbadbad", + match: false, + }, + { + input: "registry.com:8080/myapp:invalid~tag", + match: false, + }, + { + input: "bad_hostname.com:8080/myapp:tag", + match: false, + }, + { + input:// localhost treated as name, missing tag with 8080 as tag + "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: false, + }, + { + // localhost will be treated as an image name without a host + input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@bad", + match: false, + }, + { + input: "registry.com:8080/myapp@2bad", + match: false, // TODO(dmcgowan): Support this as valid + }, + } + + for i := range testcases { + checkRegexp(t, ReferenceRegexp, testcases[i]) + } + +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 0000000..1ede31e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,97 @@ +package distribution + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 0000000..6d9bb4b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go new file mode 100644 index 0000000..54e7a73 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go @@ -0,0 +1,185 @@ +package errcode + +import ( + "encoding/json" + "net/http" + "reflect" + "strings" + "testing" +) + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + if len(errorCodeToDescriptors) == 0 { + t.Fatal("errors aren't loaded!") + } + + for ec, desc := range errorCodeToDescriptors { + if ec != desc.Code { + t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) + } + + if idToDescriptors[desc.Value].Code != ec { + t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) + } + + if ec.Message() != desc.Message { + t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) + } + + // Test (de)serializing the ErrorCode + p, err := json.Marshal(ec) + if err != nil { + t.Fatalf("couldn't marshal ec %v: %v", ec, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v", ec) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if ecUnmarshaled != ec { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) + } + + expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) + if ec.Error() != expectedErrorString { + t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) + } + } + +} + +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs = append(errs, ErrorCodeTest1) + errs = append(errs, ErrorCodeTest2.WithDetail( + map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := `{"errors":[` + + `{"code":"TEST1","message":"test error 1"},` + + `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + + `]}` + + if string(p) != expectedJSON { + t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) + } + + // Now test the reverse + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Test the arg substitution stuff + e1 := unmarshaled[3].(Error) + exp1 := `Sorry "BOOGIE" isn't valid` + if e1.Message != exp1 { + t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) + } + + exp1 = "test3: " + exp1 + if e1.Error() != exp1 { + t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) + } + + // Test again with a single value this time + errs = Errors{ErrorCodeUnknown} + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + // Now test the reverse + unmarshaled = nil + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Verify that calling WithArgs() more than once does the right thing. + // Meaning creates a new Error and uses the ErrorCode Message + e1 = ErrorCodeTest3.WithArgs("test1") + e2 := e1.WithArgs("test2") + if &e1 == &e2 { + t.Fatalf("args: e2 and e1 should not be the same, but they are") + } + if e2.Message != `Sorry "test2" isn't valid` { + t.Fatalf("e2 had wrong message: %q", e2.Message) + } + + // Verify that calling WithDetail() more than once does the right thing. + // Meaning creates a new Error and overwrites the old detail field + e1 = ErrorCodeTest3.WithDetail("stuff1") + e2 = e1.WithDetail("stuff2") + if &e1 == &e2 { + t.Fatalf("detail: e2 and e1 should not be the same, but they are") + } + if e2.Detail != `stuff2` { + t.Fatalf("e2 had wrong detail: %q", e2.Detail) + } + +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 0000000..49a64a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 0000000..d1e8826 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 0000000..9979aba --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1596 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 0000000..cde0119 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 0000000..97d6923 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go new file mode 100644 index 0000000..9bc41a3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go new file mode 100644 index 0000000..b8c3749 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "testing" +) + +func TestParseForwardedHeader(t *testing.T) { + for _, tc := range []struct { + name string + raw string + expected map[string]string + expectedRest string + expectedError bool + }{ + { + name: "empty", + raw: "", + }, + { + name: "one pair", + raw: " key = value ", + expected: map[string]string{"key": "value"}, + }, + { + name: "two pairs", + raw: " key1 = value1; key2=value2", + expected: map[string]string{"key1": "value1", "key2": "value2"}, + }, + { + name: "uppercase parameter", + raw: "KeY=VaL", + expected: map[string]string{"key": "VaL"}, + }, + { + name: "missing key=value pair - be tolerant", + raw: "key=val;", + expected: map[string]string{"key": "val"}, + }, + { + name: "quoted values", + raw: `key="val";param = "[[ $((1 + 1)) == 3 ]] && echo panic!;" ; p=" abcd "`, + expected: map[string]string{"key": "val", "param": "[[ $((1 + 1)) == 3 ]] && echo panic!;", "p": " abcd "}, + }, + { + name: "empty quoted value", + raw: `key=""`, + expected: map[string]string{"key": ""}, + }, + { + name: "quoted double quotes", + raw: `key="\"value\""`, + expected: map[string]string{"key": `"value"`}, + }, + { + name: "quoted backslash", + raw: `key="\"\\\""`, + expected: map[string]string{"key": `"\"`}, + }, + { + name: "ignore subsequent elements", + raw: "key=a, param= b", + expected: map[string]string{"key": "a"}, + expectedRest: " param= b", + }, + { + name: "empty element - be tolerant", + raw: " , key=val", + expectedRest: " key=val", + }, + { + name: "obscure key", + raw: `ob₷C&r€ = value`, + expected: map[string]string{`ob₷c&r€`: "value"}, + }, + { + name: "duplicate parameter", + raw: "key=a; p=b; key=c", + expectedError: true, + }, + { + name: "empty parameter", + raw: "=value", + expectedError: true, + }, + { + name: "empty value", + raw: "key= ", + expectedError: true, + }, + { + name: "empty value before a new element ", + raw: "key=,", + expectedError: true, + }, + { + name: "empty value before a new pair", + raw: "key=;", + expectedError: true, + }, + { + name: "just parameter", + raw: "key", + expectedError: true, + }, + { + name: "missing key-value", + raw: "a=b;;", + expectedError: true, + }, + { + name: "unclosed quoted value", + raw: `key="value`, + expectedError: true, + }, + { + name: "escaped terminating dquote", + raw: `key="value\"`, + expectedError: true, + }, + { + name: "just a quoted value", + raw: `"key=val"`, + expectedError: true, + }, + { + name: "quoted key", + raw: `"key"=val`, + expectedError: true, + }, + } { + parsed, rest, err := parseForwardedHeader(tc.raw) + if err != nil && !tc.expectedError { + t.Errorf("[%s] got unexpected error: %v", tc.name, err) + } + if err == nil && tc.expectedError { + t.Errorf("[%s] got unexpected non-error", tc.name) + } + if err != nil || tc.expectedError { + continue + } + for key, value := range tc.expected { + v, exists := parsed[key] + if !exists { + t.Errorf("[%s] missing expected parameter %q", tc.name, key) + continue + } + if v != value { + t.Errorf("[%s] got unexpected value for parameter %q: %q != %q", tc.name, key, v, value) + } + } + for key, value := range parsed { + if _, exists := tc.expected[key]; !exists { + t.Errorf("[%s] got unexpected key/value pair: %q=%q", tc.name, key, value) + } + } + + if rest != tc.expectedRest { + t.Errorf("[%s] got unexpected unparsed string: %q != %q", tc.name, rest, tc.expectedRest) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 0000000..5b80d5b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,49 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameCatalog, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go new file mode 100644 index 0000000..f632d98 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -0,0 +1,355 @@ +package v2 + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" +) + +type routeTestCase struct { + RequestURI string + ExpectedURI string + Vars map[string]string + RouteName string + StatusCode int +} + +// TestRouter registers a test handler with all the routes and ensures that +// each route returns the expected path variables. Not method verification is +// present. This not meant to be exhaustive but as check to ensure that the +// expected variables are extracted. +// +// This may go away as the application structure comes together. +func TestRouter(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBase, + RequestURI: "/v2/", + Vars: map[string]string{}, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/manifests/bar", + Vars: map[string]string{ + "name": "foo", + "reference": "bar", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/tag", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "tag", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "sha256:abcdef01234567890", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/tags/list", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar/baz", + }, + }, + { + RouteName: RouteNameBlob, + RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "digest": "sha256:abcdef0919234", + }, + }, + { + RouteName: RouteNameBlobUpload, + RequestURI: "/v2/foo/bar/blobs/uploads/", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/uuid", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "uuid", + }, + }, + { + // support uuid proper + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + }, + }, + { + // supports urlsafe base64 + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + }, + }, + { + // does not match + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", + StatusCode: http.StatusNotFound, + }, + { + // Check ambiguity: ensure we can distinguish between tags for + // "foo/bar/image/image" and image for "foo/bar/image" with tag + // "tags" + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/manifests/tags", + Vars: map[string]string{ + "name": "foo/bar/manifests", + "reference": "tags", + }, + }, + { + // This case presents an ambiguity between foo/bar with tag="tags" + // and list tags for "foo/bar/manifest" + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/manifests/tags/list", + Vars: map[string]string{ + "name": "foo/bar/manifests", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", + Vars: map[string]string{ + "name": "locahost:8080/foo/bar/baz", + "reference": "tag", + }, + }, + } + + checkTestRouter(t, testCases, "", true) + checkTestRouter(t, testCases, "/prefix/", true) +} + +func TestRouterWithPathTraversals(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/../bar/baz/tags/list", + ExpectedURI: "/v2/bar/baz/tags/list", + Vars: map[string]string{ + "name": "bar/baz", + }, + }, + } + checkTestRouter(t, testCases, "", false) +} + +func TestRouterWithBadCharacters(t *testing.T) { + if testing.Short() { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/不bar/tags/list", + StatusCode: http.StatusNotFound, + }, + } + checkTestRouter(t, testCases, "", true) + } else { + // in the long version we're going to fuzz the router + // with random UTF8 characters not in the 128 bit ASCII range. + // These are not valid characters for the router and we expect + // 404s on every test. + rand.Seed(time.Now().UTC().UnixNano()) + testCases := make([]routeTestCase, 1000) + for idx := range testCases { + testCases[idx] = routeTestCase{ + RouteName: RouteNameTags, + RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), + StatusCode: http.StatusNotFound, + } + } + checkTestRouter(t, testCases, "", true) + } +} + +func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { + router := RouterWithPrefix(prefix) + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + testCase := routeTestCase{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(testCase); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range testCases { + testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI + // Register the endpoint + route := router.GetRoute(testcase.RouteName) + if route == nil { + t.Fatalf("route for name %q not found", testcase.RouteName) + } + + route.Handler(testHandler) + + u := server.URL + testcase.RequestURI + + resp, err := http.Get(u) + + if err != nil { + t.Fatalf("error issuing get request: %v", err) + } + + if testcase.StatusCode == 0 { + // Override default, zero-value + testcase.StatusCode = http.StatusOK + } + if testcase.ExpectedURI == "" { + // Override default, zero-value + testcase.ExpectedURI = testcase.RequestURI + } + + if resp.StatusCode != testcase.StatusCode { + t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) + } + + if testcase.StatusCode != http.StatusOK { + resp.Body.Close() + // We don't care about json response. + continue + } + + dec := json.NewDecoder(resp.Body) + + var actualRouteInfo routeTestCase + if err := dec.Decode(&actualRouteInfo); err != nil { + t.Fatalf("error reading json response: %v", err) + } + // Needs to be set out of band + actualRouteInfo.StatusCode = resp.StatusCode + + if actualRouteInfo.RequestURI != testcase.ExpectedURI { + t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) + } + + if actualRouteInfo.RouteName != testcase.RouteName { + t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) + } + + // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want + // that to make the comparison fail. We're otherwise done with the testcase so empty the + // testcase.ExpectedURI + testcase.ExpectedURI = "" + if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { + t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) + } + + resp.Body.Close() + } + +} + +// -------------- START LICENSED CODE -------------- +// The following code is derivative of https://github.com/google/gofuzz +// gofuzz is licensed under the Apache License, Version 2.0, January 2004, +// a copy of which can be found in the LICENSE file at the root of this +// repository. + +// These functions allow us to generate strings containing only multibyte +// characters that are invalid in our URLs. They are used above for fuzzing +// to ensure we always get 404s on these invalid strings +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose() rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +func randomString(length int) string { + runes := make([]rune, length) + for i := range runes { + runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() + } + return string(runes) +} + +// -------------- END LICENSED CODE -------------- diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 0000000..5e24ca9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,263 @@ +package v2 + +import ( + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var ( + scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme + } + + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } + } + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go new file mode 100644 index 0000000..6d8973f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -0,0 +1,484 @@ +package v2 + +import ( + "net/http" + "net/url" + "testing" + + "github.com/docker/distribution/reference" +) + +type urlBuilderTestCase struct { + description string + expectedPath string + build func() (string, error) +} + +func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + fooBarRef, _ := reference.ParseNamed("foo/bar") + return []urlBuilderTestCase{ + { + description: "test base url", + expectedPath: "/v2/", + build: urlBuilder.BuildBaseURL, + }, + { + description: "test tags url", + expectedPath: "/v2/foo/bar/tags/list", + build: func() (string, error) { + return urlBuilder.BuildTagsURL(fooBarRef) + }, + }, + { + description: "test manifest url", + expectedPath: "/v2/foo/bar/manifests/tag", + build: func() (string, error) { + ref, _ := reference.WithTag(fooBarRef, "tag") + return urlBuilder.BuildManifestURL(ref) + }, + }, + { + description: "build blob url", + expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + build: func() (string, error) { + ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + return urlBuilder.BuildBlobURL(ref) + }, + }, + { + description: "build blob upload url", + expectedPath: "/v2/foo/bar/blobs/uploads/", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL(fooBarRef) + }, + }, + { + description: "build blob upload url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ + "size": []string{"10000"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, + }) + }, + }, + { + description: "build blob upload chunk url", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") + }, + }, + { + description: "build blob upload chunk url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ + "size": []string{"10000"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, + }) + }, + }, + } +} + +// TestURLBuilder tests the various url building functions, ensuring they are +// returning the expected values. +func TestURLBuilder(t *testing.T) { + roots := []string{ + "http://example.com", + "https://example.com", + "http://localhost:5000", + "https://localhost:5443", + } + + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root + expectedURL + } + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } + } + doTest(true) + doTest(false) +} + +func TestURLBuilderWithPrefix(t *testing.T) { + roots := []string{ + "http://example.com/prefix/", + "https://example.com/prefix/", + "http://localhost:5000/prefix/", + "https://localhost:5443/prefix/", + } + + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root[0:len(root)-1] + expectedURL + } + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } + } + doTest(true) + doTest(false) +} + +type builderFromRequestTestCase struct { + request *http.Request + base string +} + +func TestBuilderFromRequest(t *testing.T) { + u, err := url.Parse("http://example.com") + if err != nil { + t.Fatal(err) + } + + testRequests := []struct { + name string + request *http.Request + base string + configHost url.URL + }{ + { + name: "no forwarded header", + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com", + }, + { + name: "https protocol forwarded with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Custom-Forwarded-Proto": []string{"https"}, + }}, + base: "http://example.com", + }, + { + name: "forwarded protocol is the same", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + }}, + base: "https://example.com", + }, + { + name: "forwarded host with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com"}, + }}, + base: "http://first.example.com", + }, + { + name: "forwarded multiple hosts a with non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, + }}, + base: "http://first.example.com", + }, + { + name: "host configured in config file takes priority", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, + }}, + base: "https://third.example.com:5000", + configHost: url.URL{ + Scheme: "https", + Host: "third.example.com:5000", + }, + }, + { + name: "forwarded host and port with just one non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com:443"}, + }}, + base: "http://first.example.com:443", + }, + { + name: "forwarded port with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"example.com:5000"}, + "X-Forwarded-Port": []string{"5000"}, + }}, + base: "http://example.com:5000", + }, + { + name: "forwarded multiple ports with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Port": []string{"443 , 5001"}, + }}, + base: "http://example.com", + }, + { + name: "forwarded standard port with non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com", + }, + { + name: "forwarded standard port with non-standard headers and explicit port", + request: &http.Request{URL: u, Host: u.Host + ":443", Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{u.Host + ":443"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com:443", + }, + { + name: "several non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{" first.example.com:12345 "}, + }}, + base: "https://first.example.com:12345", + }, + { + name: "forwarded host with port supplied takes priority", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com:5000"}, + "X-Forwarded-Port": []string{"80"}, + }}, + base: "http://first.example.com:5000", + }, + { + name: "malformed forwarded port", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com"}, + "X-Forwarded-Port": []string{"abcd"}, + }}, + base: "http://first.example.com", + }, + { + name: "forwarded protocol and addr using standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`proto=https;host="192.168.22.30:80"`}, + }}, + base: "https://192.168.22.30:80", + }, + { + name: "forwarded host takes priority over for", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:5000";for="192.168.22.30"`}, + }}, + base: "http://reg.example.com:5000", + }, + { + name: "forwarded host and protocol using standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host=reg.example.com;proto=https`}, + }}, + base: "https://reg.example.com", + }, + { + name: "process just the first standard forwarded header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:88";proto=http`, `host=reg.example.com;proto=https`}, + }}, + base: "http://reg.example.com:88", + }, + { + name: "process just the first list element of standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:443";proto=https, host="reg.example.com:80";proto=http`}, + }}, + base: "https://reg.example.com:443", + }, + { + name: "IPv6 address use host", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`for="2607:f0d0:1002:51::4";host="[2607:f0d0:1002:51::4]:5001"`}, + "X-Forwarded-Port": []string{"5002"}, + }}, + base: "http://[2607:f0d0:1002:51::4]:5001", + }, + { + name: "IPv6 address with port", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="[2607:f0d0:1002:51::4]:4000"`}, + "X-Forwarded-Port": []string{"5001"}, + }}, + base: "http://[2607:f0d0:1002:51::4]:4000", + }, + { + name: "non-standard and standard forward headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{``}, + "Forwarded": []string{`host=first.example.com; proto=https`}, + }}, + base: "https://first.example.com", + }, + { + name: "standard header takes precedence over non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`http`}, + "Forwarded": []string{`host=second.example.com; proto=https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "https://second.example.com", + }, + { + name: "incomplete standard header uses default", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`for=127.0.0.1`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "http://" + u.Host, + }, + { + name: "standard with just proto", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`proto=https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "https://" + u.Host, + }, + } + + doTest := func(relative bool) { + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, relative) + } else { + builder = NewURLBuilderFromRequest(tr.request, relative) + } + + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + if err != nil { + t.Fatalf("[relative=%t, request=%q, case=%q]: error building url: %v", relative, tr.name, testCase.description, err) + } + + expectedURL := testCase.expectedPath + if !relative { + expectedURL = tr.base + expectedURL + } + + if buildURL != expectedURL { + t.Errorf("[relative=%t, request=%q, case=%q]: %q != %q", relative, tr.name, testCase.description, buildURL, expectedURL) + } + } + } + } + + doTest(true) + doTest(false) +} + +func TestBuilderFromRequestWithPrefix(t *testing.T) { + u, err := url.Parse("http://example.com/prefix/v2/") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + configHost url.URL + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com/prefix/", + }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://subdomain.example.com/prefix/", + configHost: url.URL{ + Scheme: "https", + Host: "subdomain.example.com", + Path: "/prefix/", + }, + }, + } + + var relative bool + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, false) + } else { + builder = NewURLBuilderFromRequest(tr.request, false) + } + + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base[0:len(tr.base)-1] + expectedURL + } + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String()[0:len(urlBase.String())-1] + expectedURL + } + + } + + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) + } + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/auth/auth.go b/vendor/github.com/docker/distribution/registry/auth/auth.go new file mode 100644 index 0000000..1c9af88 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/auth.go @@ -0,0 +1,202 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.SetHeaders(w) +// w.WriteHeader(http.StatusUnauthorized) +// return +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "errors" + "fmt" + "net/http" + + "github.com/docker/distribution/context" +) + +const ( + // UserKey is used to get the user object from + // a user context + UserKey = "auth.user" + + // UserNameKey is used to get the user name from + // a user context + UserNameKey = "auth.user.name" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication fails. + ErrAuthenticationFailure = errors.New("authentication failure") +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Class string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// CredentialAuthenticator is an object which is able to authenticate credentials +type CredentialAuthenticator interface { + AuthenticateUser(username, password string) error +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case UserKey: + return uic.user + case UserNameKey: + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// WithResources returns a context with the authorized resources. +func WithResources(ctx context.Context, resources []Resource) context.Context { + return resourceContext{ + Context: ctx, + resources: resources, + } +} + +type resourceContext struct { + context.Context + resources []Resource +} + +type resourceKey struct{} + +func (rc resourceContext) Value(key interface{}) interface{} { + if key == (resourceKey{}) { + return rc.resources + } + + return rc.Context.Value(key) +} + +// AuthorizedResources returns the list of resources which have +// been authorized for this request. +func AuthorizedResources(ctx context.Context) []Resource { + if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok { + return resources + } + + return nil +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go new file mode 100644 index 0000000..819b09c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -0,0 +1,115 @@ +// Package htpasswd provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. +package htpasswd + +import ( + "fmt" + "net/http" + "os" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +type accessController struct { + realm string + path string + modtime time.Time + mu sync.Mutex + htpasswd *htpasswd +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) + } + + return &accessController{realm: realm.(string), path: path.(string)}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + username, password, ok := req.BasicAuth() + if !ok { + return nil, &challenge{ + realm: ac.realm, + err: auth.ErrInvalidCredential, + } + } + + // Dynamically parsing the latest account list + fstat, err := os.Stat(ac.path) + if err != nil { + return nil, err + } + + lastModified := fstat.ModTime() + ac.mu.Lock() + if ac.htpasswd == nil || !ac.modtime.Equal(lastModified) { + ac.modtime = lastModified + + f, err := os.Open(ac.path) + if err != nil { + ac.mu.Unlock() + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + ac.mu.Unlock() + return nil, err + } + ac.htpasswd = h + } + localHTPasswd := ac.htpasswd + ac.mu.Unlock() + + if err := localHTPasswd.authenticateUser(username, password); err != nil { + context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: auth.ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) +} + +func init() { + auth.Register("htpasswd", auth.InitFunc(newAccessController)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go new file mode 100644 index 0000000..553f05c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go @@ -0,0 +1,122 @@ +package htpasswd + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestBasicAccessController(t *testing.T) { + testRealm := "The-Shire" + testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} + testPasswords := []string{"baggins", "baggins", "새주", "공주님"} + testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= + frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W + MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 + DeokMan:공주님` + + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatal("could not create temporary htpasswd file") + } + if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { + t.Fatal("could not write temporary htpasswd file") + } + + options := map[string]interface{}{ + "realm": testRealm, + "path": tempFile.Name(), + } + ctx := context.Background() + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal("error creating access controller") + } + + tempFile.Close() + + var userNumber = 0 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + authCtx, err := accessController.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("basic accessController did not set auth.user context") + } + + if userInfo.Name != testUsers[userNumber] { + t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + client := &http.Client{ + CheckRedirect: nil, + } + + req, _ := http.NewRequest("GET", server.URL, nil) + resp, err := client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + nonbcrypt := map[string]struct{}{ + "bilbo": {}, + "DeokMan": {}, + } + + for i := 0; i < len(testUsers); i++ { + userNumber = i + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error allocating new request: %v", err) + } + + req.SetBasicAuth(testUsers[i], testPasswords[i]) + + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + if _, ok := nonbcrypt[testUsers[i]]; ok { + // these are not allowed. + // Request should be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) + } + } else { + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + } + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go new file mode 100644 index 0000000..b10b256 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go @@ -0,0 +1,82 @@ +package htpasswd + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/docker/distribution/registry/auth" + + "golang.org/x/crypto/bcrypt" +) + +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. +type htpasswd struct { + entries map[string][]byte // maps username to password byte slice. +} + +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err + } + + return &htpasswd{entries: entries}, nil +} + +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return auth.ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + if err != nil { + return auth.ErrAuthenticationFailure + } + + return nil +} + +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if a syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} + scanner := bufio.NewScanner(rd) + var line int + for scanner.Scan() { + line++ // 1-based line numbering + t := strings.TrimSpace(scanner.Text()) + + if len(t) < 1 { + continue + } + + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go new file mode 100644 index 0000000..309c359 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go @@ -0,0 +1,85 @@ +package htpasswd + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func TestParseHTPasswd(t *testing.T) { + + for _, tc := range []struct { + desc string + input string + err error + entries map[string][]byte + }{ + { + desc: "basic example", + input: ` +# This is a comment in a basic example. +bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= +frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W +MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 +DeokMan:공주님 +`, + entries: map[string][]byte{ + "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), + "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), + "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), + "DeokMan": []byte("공주님"), + }, + }, + { + desc: "ensures comments are filtered", + input: ` +# asdf:asdf +`, + }, + { + desc: "ensure midline hash is not comment", + input: ` +asdf:as#df +`, + entries: map[string][]byte{ + "asdf": []byte("as#df"), + }, + }, + { + desc: "ensure midline hash is not comment", + input: ` +# A valid comment +valid:entry +asdf +`, + err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), + }, + } { + + entries, err := parseHTPasswd(strings.NewReader(tc.input)) + if err != tc.err { + if tc.err == nil { + t.Fatalf("%s: unexpected error: %v", tc.desc, err) + } else { + if err.Error() != tc.err.Error() { // use string equality here. + t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) + } + } + } + + if tc.err != nil { + continue // don't test output + } + + // allow empty and nil to be equal + if tc.entries == nil { + tc.entries = map[string][]byte{} + } + + if !reflect.DeepEqual(entries, tc.entries) { + t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/auth/silly/access.go b/vendor/github.com/docker/distribution/registry/auth/silly/access.go new file mode 100644 index 0000000..2b801d9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/silly/access.go @@ -0,0 +1,97 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil +} + +type challenge struct { + realm string + service string + scope string +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("WWW-Authenticate", header) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go b/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go new file mode 100644 index 0000000..0a5103e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go @@ -0,0 +1,71 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(context.Background(), r) + authCtx, err := ac.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("silly accessController did not set auth.user context") + } + + if userInfo.Name != "silly" { + t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go new file mode 100644 index 0000000..4e8b7f1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -0,0 +1,272 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +var _ auth.Challenge = authChallenge{} + +// Error returns the internal error string for this authChallenge. +func (ac authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + if pemBlock.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + } + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + ctx = auth.WithResources(ctx, token.resources()) + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/stringset.go b/vendor/github.com/docker/distribution/registry/auth/token/stringset.go new file mode 100644 index 0000000..1d04f10 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token.go b/vendor/github.com/docker/distribution/registry/auth/token/token.go new file mode 100644 index 0000000..850f581 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/token.go @@ -0,0 +1,378 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." + // Leeway is the Duration that will be added to NBF and EXP claim + // checks to account for clock skew as per https://tools.ietf.org/html/rfc7519#section-4.1.5 + Leeway = 60 * time.Second +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Class string `json:"class,omitempty"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK *json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Infof("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Infof("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Infof("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentTime := time.Now() + + ExpWithLeeway := time.Unix(t.Claims.Expiration, 0).Add(Leeway) + if currentTime.After(ExpWithLeeway) { + log.Infof("token not to be used after %s - currently %s", ExpWithLeeway, currentTime) + return ErrInvalidToken + } + + NotBeforeWithLeeway := time.Unix(t.Claims.NotBefore, 0).Add(-Leeway) + if currentTime.Before(NotBeforeWithLeeway) { + log.Infof("token not to be used before %s - currently %s", NotBeforeWithLeeway, currentTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Info("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Info(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Infof("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case rawJWK != nil: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) resources() []auth.Resource { + if t.Claims == nil { + return nil + } + + resourceSet := map[auth.Resource]struct{}{} + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Class: resourceActions.Class, + Name: resourceActions.Name, + } + resourceSet[resource] = struct{}{} + } + + resources := make([]auth.Resource, 0, len(resourceSet)) + for resource := range resourceSet { + resources = append(resources, resource) + } + + return resources +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token_test.go b/vendor/github.com/docker/distribution/registry/auth/token/token_test.go new file mode 100644 index 0000000..cbfe2a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/token_test.go @@ -0,0 +1,510 @@ +package token + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { + keys := make([]libtrust.PrivateKey, 0, numKeys) + + for i := 0; i < numKeys; i++ { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + keys = append(keys, key) + } + + return keys, nil +} + +func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { + if depth == 0 { + // Don't need to build a chain. + return rootKey, nil + } + + var ( + x5c = make([]string, depth) + parentKey = rootKey + key libtrust.PrivateKey + cert *x509.Certificate + err error + ) + + for depth > 0 { + if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { + return nil, err + } + + if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { + return nil, err + } + + depth-- + x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) + parentKey = key + } + + key.AddExtendedField("x5c", x5c) + + return key, nil +} + +func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, 0, len(rootKeys)) + + for _, key := range rootKeys { + cert, err := libtrust.GenerateCACert(key, key) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + + return certs, nil +} + +func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { + trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) + + for _, key := range rootKeys { + trustedKeys[key.KeyID()] = key.PublicKey() + } + + return trustedKeys +} + +func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int, now time.Time, exp time.Time) (*Token, error) { + signingKey, err := makeSigningKeyWithChain(rootKey, depth) + if err != nil { + return nil, fmt.Errorf("unable to make signing key with chain: %s", err) + } + + var rawJWK json.RawMessage + rawJWK, err = signingKey.PublicKey().MarshalJSON() + if err != nil { + return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) + } + + joseHeader := &Header{ + Type: "JWT", + SigningAlg: "ES256", + RawJWK: &rawJWK, + } + + randomBytes := make([]byte, 15) + if _, err = rand.Read(randomBytes); err != nil { + return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) + } + + claimSet := &ClaimSet{ + Issuer: issuer, + Subject: "foo", + Audience: audience, + Expiration: exp.Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: base64.URLEncoding.EncodeToString(randomBytes), + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) + encodedClaimSet := joseBase64UrlEncode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) + + return NewToken(tokenString) +} + +// This test makes 4 tokens with a varying number of intermediate +// certificates ranging from no intermediate chain to a length of 3 +// intermediates. +func TestTokenVerify(t *testing.T) { + var ( + numTokens = 4 + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(numTokens) + if err != nil { + t.Fatal(err) + } + + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + + rootPool := x509.NewCertPool() + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + tokens := make([]*Token, 0, numTokens) + + for i := 0; i < numTokens; i++ { + token, err := makeTestToken(issuer, audience, access, rootKeys[i], i, time.Now(), time.Now().Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + tokens = append(tokens, token) + } + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: rootPool, + TrustedKeys: trustedKeys, + } + + for _, token := range tokens { + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + } +} + +// This tests that we don't fail tokens with nbf within +// the defined leeway in seconds +func TestLeeway(t *testing.T) { + var ( + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(1) + if err != nil { + t.Fatal(err) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: nil, + TrustedKeys: trustedKeys, + } + + // nbf verification should pass within leeway + futureNow := time.Now().Add(time.Duration(5) * time.Second) + token, err := makeTestToken(issuer, audience, access, rootKeys[0], 0, futureNow, futureNow.Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + + // nbf verification should fail with a skew larger than leeway + futureNow = time.Now().Add(time.Duration(61) * time.Second) + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, futureNow, futureNow.Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err == nil { + t.Fatal("Verification should fail for token with nbf in the future outside leeway") + } + + // exp verification should pass within leeway + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, time.Now(), time.Now().Add(-59*time.Second)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + + // exp verification should fail with a skew larger than leeway + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, time.Now(), time.Now().Add(-60*time.Second)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err == nil { + t.Fatal("Verification should fail for token with exp in the future outside leeway") + } +} + +func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + return "", err + } + + tempFile, err := ioutil.TempFile("", "rootCertBundle") + if err != nil { + return "", err + } + defer tempFile.Close() + + for _, cert := range rootCerts { + if err = pem.Encode(tempFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }); err != nil { + os.Remove(tempFile.Name()) + return "", err + } + } + + return tempFile.Name(), nil +} + +// TestAccessController tests complete integration of the token auth package. +// It starts by mocking the options for a token auth accessController which +// it creates. It then tries a few mock requests: +// - don't supply a token; should error with challenge +// - supply an invalid token; should error with challenge +// - supply a token with insufficient access; should error with challenge +// - supply a valid token; should not error +func TestAccessController(t *testing.T) { + // Make 2 keys; only the first is to be a trusted root key. + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + // 1. Make a mock http.Request with no token. + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } + + ctx := context.WithRequest(context.Background(), req) + authCtx, err := accessController.Authorized(ctx, testAccess) + challenge, ok := err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrTokenRequired.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 2. Supply an invalid token. + token, err := makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[1], 1, time.Now(), time.Now().Add(5*time.Minute), // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInvalidToken.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 3. Supply a token with insufficient access. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{}, // No access specified. + rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInsufficientScope.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 4. Supply the token we need, or deserve, or whatever. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + if err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("token accessController did not set auth.user context") + } + + if userInfo.Name != "foo" { + t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) + } +} + +// This tests that newAccessController can handle PEM blocks in the certificate +// file other than certificates, for example a private key. +func TestNewAccessControllerPemBlock(t *testing.T) { + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + // Add something other than a certificate to the rootcertbundle + file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + t.Fatal(err) + } + keyBlock, err := rootKeys[0].PEMBlock() + if err != nil { + t.Fatal(err) + } + err = pem.Encode(file, keyBlock) + if err != nil { + t.Fatal(err) + } + err = file.Close() + if err != nil { + t.Fatal(err) + } + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + ac, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + if len(ac.(*accessController).rootCerts.Subjects()) != 2 { + t.Fatal("accessController has the wrong number of certificates") + } +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/util.go b/vendor/github.com/docker/distribution/registry/auth/token/util.go new file mode 100644 index 0000000..d7f95be --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go new file mode 100644 index 0000000..7d8f1d9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 0000000..2c3ebe1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 0000000..c9bdfc3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challanges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challanges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challanges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challanges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go new file mode 100644 index 0000000..d4986b3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go @@ -0,0 +1,127 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "testing" +) + +func TestAuthChallengeParse(t *testing.T) { + header := http.Header{} + header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) + + challenges := parseAuthHeader(header) + if len(challenges) != 1 { + t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) + } + challenge := challenges[0] + + if expected := "bearer"; challenge.Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) + } + + if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) + } + + if expected := "registry.example.com"; challenge.Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) + } + + if expected := "fun"; challenge.Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) + } + + if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) + } + +} + +func TestAuthChallengeNormalization(t *testing.T) { + testAuthChallengeNormalization(t, "reg.EXAMPLE.com") + testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com") + testAuthChallengeNormalization(t, "reg.example.com:80") + testAuthChallengeConcurrent(t, "reg.EXAMPLE.com") +} + +func testAuthChallengeNormalization(t *testing.T, host string) { + + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + + err = scm.AddResponse(resp) + if err != nil { + t.Fatal(err) + } + + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + lowered.Host = canonicalAddr(&lowered) + c, err := scm.GetChallenges(lowered) + if err != nil { + t.Fatal(err) + } + + if len(c) == 0 { + t.Fatal("Expected challenge for lower-cased-host URL") + } +} + +func testAuthChallengeConcurrent(t *testing.T, host string) { + + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + var s sync.WaitGroup + s.Add(2) + go func() { + defer s.Done() + for i := 0; i < 200; i++ { + err = scm.AddResponse(resp) + if err != nil { + t.Error(err) + } + } + }() + go func() { + defer s.Done() + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + for k := 0; k < 200; k++ { + _, err := scm.GetChallenges(lowered) + if err != nil { + t.Error(err) + } + } + }() + s.Wait() +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go new file mode 100644 index 0000000..d6d884f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -0,0 +1,503 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" +) + +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges challenge.Manager + handlers []AuthenticationHandler + transport http.RoundTripper +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + pingPath := req.URL.Path + if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { + pingPath = pingPath[:v2Root+4] + } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { + pingPath = pingPath[:v1Root] + "/v2/" + } else { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: pingPath, + } + + challenges, err := ea.challenges.GetChallenges(ping) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, c := range challenges { + if c.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + +type tokenHandler struct { + header http.Header + creds CredentialStore + transport http.RoundTripper + clock clock + + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time +} + +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Class string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + repoType := "repository" + if rs.Class != "" { + repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) + } + return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) +} + +// RegistryScope represents a token scope for access +// to resources in the registry. +type RegistryScope struct { + Name string + Actions []string +} + +// String returns the string representation of the user +// using the scope grammar +func (rs RegistryScope) String() string { + return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope +} + +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) +} + +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, + } + + return handler +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, + }.String()) + } + + token, err := th.getToken(params, additionalScopes...) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + return nil +} + +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } + var addedScopes bool + for _, scope := range additionalScopes { + scopes = append(scopes, scope) + addedScopes = true + } + + now := th.clock.Now() + if now.After(th.tokenExpiration) || addedScopes { + token, expiration, err := th.fetchToken(params, scopes) + if err != nil { + return "", err + } + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil + } + + return th.tokenCache, nil +} + +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) + + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") + } + + resp, err := th.client().PostForm(realm.String(), form) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) + if err != nil { + return "", time.Time{}, err + } + + reqParams := req.URL.Query() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) + } + + if th.creds != nil { + username, password := th.creds.Basic(realm) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", time.Time{}, ErrNoToken + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) + } + + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return ErrNoBasicAuthCredentials +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go new file mode 100644 index 0000000..4f54c75 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go @@ -0,0 +1,866 @@ +package auth + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/testutil" +) + +// An implementation of clock for providing fake time data. +type fakeClock struct { + current time.Time +} + +// Now implements clock +func (fc *fakeClock) Now() time.Time { return fc.current } + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +type testAuthenticationWrapper struct { + headers http.Header + authCheck func(string) bool + next http.Handler +} + +func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if auth == "" || !w.authCheck(auth) { + h := rw.Header() + for k, values := range w.headers { + h[k] = values + } + rw.WriteHeader(http.StatusUnauthorized) + return + } + w.next.ServeHTTP(rw, r) +} + +func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { + h := testutil.NewHandler(rrm) + wrapper := &testAuthenticationWrapper{ + + headers: http.Header(map[string][]string{ + "X-API-Version": {"registry/2.0"}, + "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, + "WWW-Authenticate": {authenticate}, + }), + authCheck: authCheck, + next: h, + } + + s := httptest.NewServer(wrapper) + return s.URL, s.Close +} + +// ping pings the provided endpoint to determine its required authorization challenges. +// If a version header is provided, the versions will be returned. +func ping(manager challenge.Manager, endpoint, versionHeader string) ([]APIVersion, error) { + resp, err := http.Get(endpoint) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return nil, err + } + + return APIVersions(resp, versionHeader), err +} + +type testCredentialStore struct { + username string + password string + refreshTokens map[string]string +} + +func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { + return tcs.username, tcs.password +} + +func (tcs *testCredentialStore) RefreshToken(u *url.URL, service string) string { + return tcs.refreshTokens[service] +} + +func (tcs *testCredentialStore) SetRefreshToken(u *url.URL, service string, token string) { + if tcs.refreshTokens != nil { + tcs.refreshTokens[service] = token + } +} + +func TestEndpointAuthorizeToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"badtoken"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := challenge.NewSimpleManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 3 { + t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) + } + if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) + } + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func TestEndpointAuthorizeRefreshToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + refreshToken1 := "0123456790abcdef" + refreshToken2 := "0123456790fedcba" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + { + // In the future this test may fail and require using basic auth to get a different refresh token + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken2)), + }, + }, + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"badtoken","refresh_token":"%s"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + creds := &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, creds, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + // Try with refresh token setting + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := challenge.NewSimpleManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } + + if creds.refreshTokens[service] != refreshToken2 { + t.Fatalf("Refresh token not set after change") + } + + // Try with bad token + e3, c3 := testServerWithAuth(m, authenicate, validCheck) + defer c3() + + challengeManager3 := challenge.NewSimpleManager() + versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport3 := transport.NewTransport(nil, NewAuthorizer(challengeManager3, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client3 := &http.Client{Transport: transport3} + + req, _ = http.NewRequest("GET", e3+"/v2/hello", nil) + resp, err = client3.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func TestEndpointAuthorizeV2RefreshToken(t *testing.T) { + service := "localhost.localdomain" + scope1 := "registry:catalog:search" + refreshToken1 := "0123456790abcdef" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v1/search", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + tho := TokenHandlerOptions{ + Credentials: &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + }, + Scopes: []Scope{ + RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + }, + } + + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandlerWithOptions(tho))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v1/search", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func TestEndpointAuthorizeTokenBasic(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken"}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + basicCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + clock := &fakeClock{current: time.Now()} + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + timeIncrement := 1000 * time.Second + for i := 0; i < 4; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + // This test sets things up such that the token was issued one increment + // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. + // This will mean that the token expires after 3 increments instead of 4. + clock := &fakeClock{current: time.Now()} + timeIncrement := 1000 * time.Second + firstIssuedAt := clock.Now() + clock.current = clock.current.Add(timeIncrement) + secondIssuedAt := clock.current.Add(2 * timeIncrement) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn + // so this loop should have one fewer iteration. + for i := 0; i < 3; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeBasic(t *testing.T) { + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + username := "user1" + password := "funSecretPa$$word" + authenicate := fmt.Sprintf("Basic realm=localhost") + validCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 0000000..e3ffcb0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,162 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go b/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go new file mode 100644 index 0000000..099dca4 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go @@ -0,0 +1,211 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" +) + +// Test implements distribution.BlobWriter +var _ distribution.BlobWriter = &httpBlobUpload{} + +func TestUploadReadFrom(t *testing.T) { + _, b := newRandomBlob(64) + repo := "test/upload/readfrom" + locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }, + // Test Valid case + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {"0-63"}, + }), + }, + }, + // Test invalid range + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {""}, + }), + }, + }, + // Test 404 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + }, + }, + // Test 400 valid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte(` + { "errors": + [ + { + "code": "BLOB_UPLOAD_INVALID", + "message": "blob upload invalid", + "detail": "more detail" + } + ] + } `), + }, + }, + // Test 400 invalid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte("something bad happened"), + }, + }, + // Test 500 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusInternalServerError, + }, + }, + }) + + e, c := testServer(m) + defer c() + + blobUpload := &httpBlobUpload{ + client: &http.Client{}, + } + + // Valid case + blobUpload.location = e + locationPath + n, err := blobUpload.ReadFrom(bytes.NewReader(b)) + if err != nil { + t.Fatalf("Error calling ReadFrom: %s", err) + } + if n != 64 { + t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) + } + + // Bad range + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when bad range received") + } + + // 404 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) + } + + // 400 valid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(errcode.Errors); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if len(uploadErr) != 1 { + t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) + } else { + v2Err, ok := uploadErr[0].(errcode.Error) + if !ok { + t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) + } + if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { + t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) + } + if expected := "blob upload invalid"; v2Err.Message != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) + } + if expected := "more detail"; v2Err.Detail.(string) != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) + } + } + + // 400 invalid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else { + respStr := string(uploadErr.Response) + if expected := "something bad happened"; respStr != expected { + t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) + } + } + + // 500 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { + t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 0000000..52d49d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,139 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range challenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/vendor/github.com/docker/distribution/registry/client/errors_test.go b/vendor/github.com/docker/distribution/registry/client/errors_test.go new file mode 100644 index 0000000..ca9dddd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors_test.go @@ -0,0 +1,104 @@ +package client + +import ( + "bytes" + "io" + "net/http" + "strings" + "testing" +) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +func TestHandleErrorResponse401ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: action requires authentication" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: authentication required" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" + response := &http.Response{ + Status: "400 Bad Request", + StatusCode: 400, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "digest invalid: provided digest does not match" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) { + json := `{"randomkey": "randomvalue"}` + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { + response := &http.Response{ + Status: "501 Not Implemented", + StatusCode: 501, + Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, + } + err := HandleErrorResponse(response) + + expectedMsg := "received unexpected HTTP status: 501 Not Implemented" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 0000000..1ebd0b1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,853 @@ +package client + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: checkHTTPRedirect, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +func (r *repository) Named() reference.Named { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.name, + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.name, + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.name, + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Named(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + for { + resp, err := t.client.Get(u) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + u = strings.Trim(strings.Split(link, ";")[0], "<>") + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) + } + } +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.ParseDigest(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + newRequest := func(method string) (*http.Response, error) { + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + resp, err := t.client.Do(req) + return resp, err + } + + resp, err := newRequest("HEAD") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + default: + // if the response is an error - there will be no body to decode. + // Issue a GET request: + // - for data from a server that does not handle HEAD + // - to get error details in case of a failure + resp, err = newRequest("GET") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return descriptorFromResponse(resp) + } + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + ) + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + } else if opt, ok := option.(contentDigestOption); ok { + contentDgst = opt.digest + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } + } + + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.ParseDigest(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.New() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository_test.go b/vendor/github.com/docker/distribution/registry/client/repository_test.go new file mode 100644 index 0000000..a232e03 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/repository_test.go @@ -0,0 +1,1226 @@ +package client + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" +) + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +func newRandomBlob(size int) (digest.Digest, []byte) { + b := make([]byte, size) + if n, err := rand.Read(b); err != nil { + panic(err) + } else if n != size { + panic("unable to read enough bytes") + } + + return digest.FromBytes(b), b +} + +func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) +} + +func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { + headers := map[string][]string{ + "Content-Length": {strconv.Itoa(len(content))}, + "Content-Type": {"application/json; charset=utf-8"}, + } + if link != "" { + headers["Link"] = append(headers["Link"], link) + } + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: route, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(headers), + }, + }) +} + +func TestBlobDelete(t *testing.T) { + dgst, _ := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/repo1") + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + err = l.Delete(ctx, dgst) + if err != nil { + t.Errorf("Error deleting blob: %s", err.Error()) + } + +} + +func TestBlobFetch(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + b, err := l.Get(ctx, d1) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b, b1) != 0 { + t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) + } + + // TODO(dmcgowan): Test for unknown blob case +} + +func TestBlobExistsNoContentLength(t *testing.T) { + var m testutil.RequestResponseMap + + repo, _ := reference.ParseNamed("biff") + dgst, content := newRandomBlob(1024) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + _, err = l.Stat(ctx, dgst) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "missing content-length heade") { + t.Fatalf("Expected missing content-length error message") + } + +} + +func TestBlobExists(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + stat, err := l.Stat(ctx, d1) + if err != nil { + t.Fatal(err) + } + + if stat.Digest != d1 { + t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) + } + + if stat.Size != int64(len(b1)) { + t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) + } + + // TODO(dmcgowan): Test error cases and ErrBlobUnknown case +} + +func TestBlobUploadChunked(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + chunks := [][]byte{ + b1[0:256], + b1[256:512], + b1[512:513], + b1[513:1024], + } + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + uuids := []string{uuid.Generate().String()} + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, + "Docker-Upload-UUID": {uuids[0]}, + "Range": {"0-0"}, + }), + }, + }) + offset := 0 + for i, chunk := range chunks { + uuids = append(uuids, uuid.Generate().String()) + newOffset := offset + len(chunk) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], + Body: chunk, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, + "Docker-Upload-UUID": {uuids[i+1]}, + "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, + }), + }, + }) + offset = newOffset + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(offset)}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uuids[0] { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) + } + + for _, chunk := range chunks { + n, err := upload.Write(chunk) + if err != nil { + t.Fatal(err) + } + if n != len(chunk) { + t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) + } + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobUploadMonolithic(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + uploadID := uuid.Generate().String() + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Range": {"0-0"}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, + Body: b1, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(b1))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uploadID { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) + } + + n, err := upload.ReadFrom(bytes.NewReader(b1)) + if err != nil { + t.Fatal(err) + } + if n != int64(len(b1)) { + t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobMount(t *testing.T) { + dgst, content := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + + sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") + canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + l := r.Blobs(ctx) + + bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatalf("Expected blob writer to be nil, was %v", bw) + } + + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if ebm.From.Digest() != dgst { + t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) + } + if ebm.From.Name() != sourceRepo.Name() { + t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) + } + } else { + t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) + } +} + +func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { + blobs := make([]schema1.FSLayer, blobCount) + history := make([]schema1.History, blobCount) + + for i := 0; i < blobCount; i++ { + dgst, blob := newRandomBlob((i % 5) * 16) + + blobs[i] = schema1.FSLayer{BlobSum: dgst} + history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + } + + m := schema1.Manifest{ + Name: name.String(), + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + panic(err) + } + + return sm, digest.FromBytes(sm.Canonical), sm.Canonical +} + +func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest := digest.FromBytes(content) + getReqWithEtag := testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + Headers: http.Header(map[string][]string{ + "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, + }), + } + + var getRespWithEtag testutil.Response + if actualDigest.String() == dgst { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusNotModified, + Body: []byte{}, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, + }), + } + } else { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, + }), + } + + } + *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) +} + +func contentDigestString(mediatype string, content []byte) string { + if mediatype == schema1.MediaTypeSignedManifest { + m, _, _ := distribution.UnmarshalManifest(mediatype, content) + content = m.(*schema1.SignedManifest).Canonical + } + return digest.Canonical.FromBytes(content).String() +} + +func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {contentDigestString(mediatype, content)}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {digest.Canonical.FromBytes(content).String()}, + }), + }, + }) + +} + +func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { + if m1.Name != m2.Name { + return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) + } + if m1.Tag != m2.Tag { + return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) + } + if len(m1.FSLayers) != len(m2.FSLayers) { + return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) + } + for i := range m1.FSLayers { + if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { + return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) + } + } + if len(m1.History) != len(m2.History) { + return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) + } + for i := range m1.History { + if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { + return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) + } + } + return nil +} + +func TestV1ManifestFetch(t *testing.T) { + ctx := context.Background() + repo, _ := reference.ParseNamed("test.example.com/repo") + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "badcontenttype", "text/html", pl, &m) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + ok, err := ms.Exists(ctx, dgst) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("Manifest does not exist") + } + + manifest, err := ms.Get(ctx, dgst) + if err != nil { + t.Fatal(err) + } + v1manifest, ok := manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err := checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + var contentDigest digest.Digest + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest"), ReturnContentDigest(&contentDigest)) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + if contentDigest != dgst { + t.Fatalf("Unexpected returned content digest %v, expected %v", contentDigest, dgst) + } + + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } +} + +func TestManifestFetchWithEtag(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") + _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + clientManifestService, ok := ms.(*manifests) + if !ok { + panic("wrong type for client manifest service") + } + _, err = clientManifestService.Get(ctx, d1, distribution.WithTag("latest"), AddEtagToTag("latest", d1.String())) + if err != distribution.ErrManifestNotModified { + t.Fatal(err) + } +} + +func TestManifestDelete(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/delete") + _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if err := ms.Delete(ctx, dgst1); err != nil { + t.Fatal(err) + } + if err := ms.Delete(ctx, dgst2); err == nil { + t.Fatal("Expected error deleting unknown manifest") + } + // TODO(dmcgowan): Check for specific unknown error +} + +func TestManifestPut(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/delete") + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) + + _, payload, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/other", + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + + putDgst := digest.FromBytes(m1.Canonical) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/" + putDgst.String(), + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {putDgst.String()}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if _, err := ms.Put(ctx, m1, distribution.WithTag(m1.Tag)); err != nil { + t.Fatal(err) + } + + if _, err := ms.Put(ctx, m1); err != nil { + t.Fatal(err) + } + + // TODO(dmcgowan): Check for invalid input error +} + +func TestManifestTags(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") + tagsList := []byte(strings.TrimSpace(` +{ + "name": "test.example.com/repo/tags/list", + "tags": [ + "tag1", + "tag2", + "funtag" + ] +} + `)) + var m testutil.RequestResponseMap + for i := 0; i < 3; i++ { + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + } + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) + if err != nil { + t.Fatal(err) + } + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } + // TODO(dmcgowan): Check for error cases +} + +func TestObtainsErrorForMissingTag(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo") + + var m testutil.RequestResponseMap + var errors errcode.Errors + errors = append(errors, v2.ErrorCodeManifestUnknown.WithDetail("unknown manifest")) + errBytes, err := json.Marshal(errors) + if err != nil { + t.Fatal(err) + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/1.0.0", + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + Body: errBytes, + Headers: http.Header(map[string][]string{ + "Content-Type": {"application/json; charset=utf-8"}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + tagService := r.Tags(ctx) + + _, err = tagService.Get(ctx, "1.0.0") + if err == nil { + t.Fatalf("Expected an error") + } + if !strings.Contains(err.Error(), "manifest unknown") { + t.Fatalf("Expected unknown manifest error message") + } +} + +func TestManifestTagsPaginated(t *testing.T) { + s := httptest.NewServer(http.NotFoundHandler()) + defer s.Close() + + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") + tagsList := []string{"tag1", "tag2", "funtag"} + var m testutil.RequestResponseMap + for i := 0; i < 3; i++ { + body, err := json.Marshal(map[string]interface{}{ + "name": "test.example.com/repo/tags/list", + "tags": []string{tagsList[i]}, + }) + if err != nil { + t.Fatal(err) + } + queryParams := make(map[string][]string) + if i > 0 { + queryParams["n"] = []string{"1"} + queryParams["last"] = []string{tagsList[i-1]} + } + headers := http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(body))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }) + if i < 2 { + headers.Set("Link", "<"+s.URL+"/v2/"+repo.Name()+"/tags/list?n=1&last="+tagsList[i]+`>; rel="next"`) + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + QueryParams: queryParams, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: body, + Headers: headers, + }, + }) + } + + s.Config.Handler = testutil.NewHandler(m) + + r, err := NewRepository(context.Background(), repo, s.URL, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) + if err != nil { + t.Fatal(tags, err) + } + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } +} + +func TestManifestUnauthorized(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo") + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusUnauthorized, + Body: []byte("garbage"), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = ms.Get(ctx, dgst) + if err == nil { + t.Fatal("Expected error fetching manifest") + } + v2Err, ok := err.(errcode.Error) + if !ok { + t.Fatalf("Unexpected error type: %#v", err) + } + if v2Err.Code != errcode.ErrorCodeUnauthorized { + t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) + } + if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) + } +} + +func TestCatalog(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=5", + []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 5) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 3 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestCatalogInParts(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=2", + []byte("{\"repositories\":[\"bar\", \"baz\"]}"), + "", &m) + addTestCatalog( + "/v2/_catalog?last=baz&n=2", + []byte("{\"repositories\":[\"foo\"]}"), + "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 2) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != nil { + t.Fatal(err) + } + + if numFilled != 2 { + t.Fatalf("Got wrong number of repos") + } + + numFilled, err = r.Repositories(ctx, entries, "baz") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 1 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestSanitizeLocation(t *testing.T) { + for _, testcase := range []struct { + description string + location string + source string + expected string + err error + }{ + { + description: "ensure relative location correctly resolved", + location: "/v2/foo/baasdf", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf", + }, + { + description: "ensure parameters are preserved", + location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + }, + { + description: "ensure new hostname overidden", + location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + source: "http://blahalaja.com/v1", + expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + }, + } { + fatalf := func(format string, args ...interface{}) { + t.Fatalf(testcase.description+": "+format, args...) + } + + s, err := sanitizeLocation(testcase.location, testcase.source) + if err != testcase.err { + if testcase.err != nil { + fatalf("expected error: %v != %v", err, testcase) + } else { + fatalf("unexpected error sanitizing: %v", err) + } + } + + if s != testcase.expected { + fatalf("bad sanitize: %q != %q", s, testcase.expected) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 0000000..e5ff09d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,251 @@ +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we sought to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + + _, err := hrs.reader() + if err != nil { + hrs.readerOffset = lastReaderOffset + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case os.SEEK_CUR: + newOffset += offset + case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + offset + case os.SEEK_SET: + newOffset = offset + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.rc, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + req.Header.Add("Accept-Encoding", "identity") + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 0000000..30e45fa --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/github.com/docker/distribution/registry/doc.go b/vendor/github.com/docker/distribution/registry/doc.go new file mode 100644 index 0000000..a1ba7f3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/doc.go @@ -0,0 +1,2 @@ +// Package registry provides the main entrypoints for running a registry. +package registry diff --git a/vendor/github.com/docker/distribution/registry/handlers/api_test.go b/vendor/github.com/docker/distribution/registry/handlers/api_test.go new file mode 100644 index 0000000..9d64fbb --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/api_test.go @@ -0,0 +1,2513 @@ +package handlers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "path" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + _ "github.com/docker/distribution/registry/storage/driver/testdriver" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/gorilla/handlers" +) + +var headerConfig = http.Header{ + "X-Content-Type-Options": []string{"nosniff"}, +} + +// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified +// 200 OK response. +func TestCheckAPI(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + + if string(p) != "{}" { + t.Fatalf("unexpected response body: %v", string(p)) + } +} + +// TestCatalogAPI tests the /v2/_catalog endpoint +func TestCatalogAPI(t *testing.T) { + chunkLen := 2 + env := newTestEnv(t, false) + defer env.Shutdown() + + values := url.Values{ + "last": []string{""}, + "n": []string{strconv.Itoa(chunkLen)}} + + catalogURL, err := env.builder.BuildCatalogURL(values) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + // ----------------------------------- + // try to get an empty catalog + resp, err := http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + var ctlg struct { + Repositories []string `json:"repositories"` + } + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // we haven't pushed anything to the registry yet + if len(ctlg.Repositories) != 0 { + t.Fatalf("repositories has unexpected values") + } + + if resp.Header.Get("Link") != "" { + t.Fatalf("repositories has more data when none expected") + } + + // ----------------------------------- + // push something to the registry and try again + images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} + + for _, image := range images { + createRepository(env, t, image, "sometag") + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != chunkLen { + t.Fatalf("repositories has unexpected values") + } + + for _, image := range images[:chunkLen] { + if !contains(ctlg.Repositories, image) { + t.Fatalf("didn't find our repository '%s' in the catalog", image) + } + } + + link := resp.Header.Get("Link") + if link == "" { + t.Fatalf("repositories has less data than expected") + } + + newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) + + // ----------------------------------- + // get the last chunk of data + + catalogURL, err = env.builder.BuildCatalogURL(newValues) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != 1 { + t.Fatalf("repositories has unexpected values") + } + + lastImage := images[len(images)-1] + if !contains(ctlg.Repositories, lastImage) { + t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) + } + + link = resp.Header.Get("Link") + if link != "" { + t.Fatalf("catalog has unexpected data") + } +} + +func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { + re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") + matches := re.FindStringSubmatch(urlStr) + + if len(matches) != 2 { + t.Fatalf("Catalog link address response was incorrect") + } + linkURL, _ := url.Parse(matches[1]) + urlValues := linkURL.Query() + + if urlValues.Get("n") != strconv.Itoa(numEntries) { + t.Fatalf("Catalog link entry size is incorrect") + } + + if urlValues.Get("last") != last { + t.Fatal("Catalog link last entry is incorrect") + } + + return urlValues +} + +func contains(elems []string, e string) bool { + for _, elem := range elems { + if elem == e { + return true + } + } + return false +} + +func TestURLPrefix(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + config.HTTP.Prefix = "/test/" + config.HTTP.Headers = headerConfig + + env := newTestEnvWithConfig(t, &config) + defer env.Shutdown() + + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + parsed, _ := url.Parse(baseURL) + if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { + t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) +} + +type blobArgs struct { + imageName reference.Named + layerFile io.ReadSeeker + layerDigest digest.Digest +} + +func makeBlobArgs(t *testing.T) blobArgs { + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + args := blobArgs{ + layerFile: layerFile, + layerDigest: layerDigest, + } + args.imageName, _ = reference.ParseNamed("foo/bar") + return args +} + +// TestBlobAPI conducts a full test of the of the blob api. +func TestBlobAPI(t *testing.T) { + deleteEnabled := false + env1 := newTestEnv(t, deleteEnabled) + defer env1.Shutdown() + args := makeBlobArgs(t) + testBlobAPI(t, env1, args) + + deleteEnabled = true + env2 := newTestEnv(t, deleteEnabled) + defer env2.Shutdown() + args = makeBlobArgs(t) + testBlobAPI(t, env2, args) + +} + +func TestBlobDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + + args := makeBlobArgs(t) + env = testBlobAPI(t, env, args) + testBlobDelete(t, env, args) +} + +func TestRelativeURL(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + config.HTTP.Headers = headerConfig + config.HTTP.RelativeURLs = false + env := newTestEnvWithConfig(t, &config) + defer env.Shutdown() + ref, _ := reference.WithName("foo/bar") + uploadURLBaseAbs, _ := startPushLayer(t, env, ref) + + u, err := url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + args := makeBlobArgs(t) + resp, err := doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } + + config.HTTP.RelativeURLs = true + args = makeBlobArgs(t) + uploadURLBaseRelative, _ := startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseRelative) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Absolute URL returned from blob upload chunk with relative configuration") + } + + // Start a new upload in absolute mode to get a valid base URL + config.HTTP.RelativeURLs = false + uploadURLBaseAbs, _ = startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + // Complete upload with relative URLs enabled to ensure the final location is relative + config.HTTP.RelativeURLs = true + resp, err = doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } +} + +func TestBlobDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + args := makeBlobArgs(t) + + imageName := args.imageName + layerDigest := args.layerDigest + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting when disabled: %v", err) + } + + checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) +} + +func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + // ----------------------------------- + // Test fetch for non-existent content + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) + + // ------------------------------------------ + // Test head request for non-existent content + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) + + // ------------------------------------------ + // Start an upload, check the status then cancel + uploadURLBase, uploadUUID := startPushLayer(t, env, imageName) + + // A status check should work + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Range": []string{"0-0"}, + "Docker-Upload-UUID": []string{uploadUUID}, + }) + + req, err := http.NewRequest("DELETE", uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error creating delete request: %v", err) + } + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error sending delete request: %v", err) + } + + checkResponse(t, "deleting upload", resp, http.StatusNoContent) + + // A status check should result in 404 + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) + + // ----------------------------------------- + // Do layer push with an empty body and different digest + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error doing bad layer push: %v", err) + } + + checkResponse(t, "bad layer push", resp, http.StatusBadRequest) + checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error digesting empty buffer: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + + // This is a valid but empty tarfile! + emptyTar := bytes.Repeat([]byte("\x00"), 1024) + emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar)) + if err != nil { + t.Fatalf("unexpected error digesting empty tar: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) + + // ------------------------------------------ + // Now, actually do successful upload. + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + // ------------------------------------------ + // Now, push just a chunk + layerFile.Seek(0, 0) + + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + layerFile.Seek(0, 0) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) + finishUpload(t, env.builder, imageName, uploadURLBase, dgst) + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking head on existing layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // ---------------- + // Fetch the layer! + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // Verify the body + verifier, err := digest.NewDigestVerifier(layerDigest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + io.Copy(verifier, resp.Body) + + if !verifier.Verified() { + t.Fatalf("response body did not pass verification") + } + + // ---------------- + // Fetch the layer with an invalid digest + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) + resp, err = http.Get(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) + + // Cache headers + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, + "Cache-Control": []string{"max-age=31536000"}, + }) + + // Matching etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Non-matching etag, gives 200 + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", "") + resp, err = http.DefaultClient.Do(req) + checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) + + // Missing tests: + // - Upload the same tar file under and different repository and + // ensure the content remains uncorrupted. + return env +} + +func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { + // Upload a layer + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf(err.Error()) + } + // --------------- + // Delete a layer + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Try and get it back + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) + + // Delete already deleted layer + resp, err = httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusNotFound) + + // ---------------- + // Attempt to delete a layer with an invalid digest + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) + resp, err = httpDelete(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) + + // ---------------- + // Reupload previously deleted blob + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + layerFile.Seek(0, os.SEEK_SET) + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + // ------------------------ + // Use a head request to see if it exists + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) +} + +func TestDeleteDisabled(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + + imageName, _ := reference.ParseNamed("foo/bar") + // "build" our layer file + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) +} + +func TestDeleteReadOnly(t *testing.T) { + env := newTestEnv(t, true) + defer env.Shutdown() + + imageName, _ := reference.ParseNamed("foo/bar") + // "build" our layer file + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + env.app.readOnly = true + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer in read-only mode", resp, http.StatusMethodNotAllowed) +} + +func TestStartPushReadOnly(t *testing.T) { + env := newTestEnv(t, true) + defer env.Shutdown() + env.app.readOnly = true + + imageName, _ := reference.ParseNamed("foo/bar") + + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "starting push in read-only mode", resp, http.StatusMethodNotAllowed) +} + +func httpDelete(url string) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + // defer resp.Body.Close() + return resp, err +} + +type manifestArgs struct { + imageName reference.Named + mediaType string + manifest distribution.Manifest + dgst digest.Digest +} + +func TestManifestAPI(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + + deleteEnabled := false + env1 := newTestEnv(t, deleteEnabled) + defer env1.Shutdown() + testManifestAPISchema1(t, env1, schema1Repo) + schema2Args := testManifestAPISchema2(t, env1, schema2Repo) + testManifestAPIManifestList(t, env1, schema2Args) + + deleteEnabled = true + env2 := newTestEnv(t, deleteEnabled) + defer env2.Shutdown() + testManifestAPISchema1(t, env2, schema1Repo) + schema2Args = testManifestAPISchema2(t, env2, schema2Repo) + testManifestAPIManifestList(t, env2, schema2Args) +} + +func TestManifestDelete(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + schema1Args := testManifestAPISchema1(t, env, schema1Repo) + testManifestDelete(t, env, schema1Args) + schema2Args := testManifestAPISchema2(t, env, schema2Repo) + testManifestDelete(t, env, schema2Args) +} + +func TestManifestDeleteDisabled(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + testManifestDeleteDisabled(t, env, schema1Repo) +} + +func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + manifestURL, err := env.builder.BuildManifestURL(ref) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + resp, err := httpDelete(manifestURL) + if err != nil { + t.Fatalf("unexpected error deleting manifest %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) +} + +func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { + tag := "thetag" + args := manifestArgs{imageName: imageName} + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push unsigned manifest with missing layers + unsignedManifest := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName.Name(), + Tag: tag, + FSLayers: []schema1.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + History: []schema1.History{ + { + V1Compatibility: "", + }, + { + V1Compatibility: "", + }, + }, + } + + resp = putManifest(t, "putting unsigned manifest", manifestURL, "", unsignedManifest) + defer resp.Body.Close() + checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp, v2.ErrorCodeManifestInvalid) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestInvalid: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // sign the manifest and still get some interesting errors. + sm, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp = putManifest(t, "putting signed manifest with errors", manifestURL, "", sm) + defer resp.Body.Close() + checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, + v2.ErrorCodeManifestBlobUnknown, v2.ErrorCodeDigestInvalid) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + v2.ErrorCodeDigestInvalid: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // TODO(stevvooe): Add a test case where we take a mostly valid registry, + // tamper with the content and ensure that we get an unverified manifest + // error. + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the signed manifest with all layers pushed. + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + dgst := digest.FromBytes(signedManifest.Canonical) + args.manifest = signedManifest + args.dgst = dgst + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) + checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema1.SignedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifest.Canonical, signedManifest.Canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema1.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifestByDigest.Canonical, signedManifest.Canonical) { + t.Fatalf("manifests do not match") + } + + // check signature was roundtripped + signatures, err := fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 1 signature from manifest, got: %d", len(signatures)) + } + + // Re-sign, push and pull the same digest + sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) + if err != nil { + t.Fatal(err) + + } + + // Re-push with a few different Content-Types. The official schema1 + // content type should work, as should application/json with/without a + // charset. + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "re-fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "re-fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // check only 1 signature is returned + signatures, err = fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName.Name()) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // Attempt to put a manifest with mismatching FSLayer and History array cardinalities + + unsignedManifest.History = append(unsignedManifest.History, schema1.History{ + V1Compatibility: "", + }) + invalidSigned, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest") + } + + resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, "", invalidSigned) + checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) + + return args +} + +func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { + tag := "schema2tag" + args := manifestArgs{ + imageName: imageName, + mediaType: schema2.MediaTypeManifest, + } + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push manifest with missing config and missing layers + manifest := &schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:463434349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeLayer, + }, + { + Digest: "sha256:630923423623623423352523525237238023652897356239852383652aaaaaaa", + Size: 6863, + MediaType: schema2.MediaTypeLayer, + }, + }, + } + + resp = putManifest(t, "putting missing config manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing config manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing config manifest", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 3, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push a config, and reference it in the manifest + sampleConfig := []byte(`{ + "architecture": "amd64", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + } + ], + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + } + }`) + sampleConfigDigest := digest.FromBytes(sampleConfig) + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) + manifest.Config.Digest = sampleConfigDigest + manifest.Config.Size = int64(len(sampleConfig)) + + // The manifest should still be invalid, because its layer doesn't exist + resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range manifest.Layers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + manifest.Layers[i].Digest = dgst + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the manifest with all layers pushed. + deserializedManifest, err := schema2.FromStruct(*manifest) + if err != nil { + t.Fatalf("could not create DeserializedManifest: %v", err) + } + _, canonical, err := deserializedManifest.Payload() + if err != nil { + t.Fatalf("could not get manifest payload: %v", err) + } + dgst := digest.FromBytes(canonical) + args.dgst = dgst + args.manifest = deserializedManifest + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest by digest", manifestDigestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema2.DeserializedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema2.DeserializedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + for i := range manifest.Layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != manifest.Layers[len(manifest.Layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. + + return args +} + +func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + tag := "manifestlisttag" + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // -------------------------------- + // Attempt to push manifest list that refers to an unknown manifest + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "amd64", + OS: "linux", + }, + }, + }, + } + + resp := putManifest(t, "putting missing manifest manifestlist", manifestURL, manifestlist.MediaTypeManifestList, manifestList) + defer resp.Body.Close() + checkResponse(t, "putting missing manifest manifestlist", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing manifest manifestlist", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // ------------------- + // Push a manifest list that references an actual manifest + manifestList.Manifests[0].Digest = args.dgst + deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) + if err != nil { + t.Fatalf("could not create DeserializedManifestList: %v", err) + } + _, canonical, err := deserializedManifestList.Payload() + if err != nil { + t.Fatalf("could not get manifest list payload: %v", err) + } + dgst := digest.FromBytes(canonical) + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest list by digest", manifestDigestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + // multiple headers in mixed list format to ensure we parse correctly server-side + req.Header.Set("Accept", fmt.Sprintf(` %s ; q=0.8 , %s ; q=0.5 `, manifestlist.MediaTypeManifestList, schema1.MediaTypeSignedManifest)) + req.Header.Add("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest list: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestList manifestlist.DeserializedManifestList + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifestList); err != nil { + t.Fatalf("error decoding fetched manifest list: %v", err) + } + + _, fetchedCanonical, err := fetchedManifestList.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifest lists do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", manifestlist.MediaTypeManifestList) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest list by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestListByDigest manifestlist.DeserializedManifestList + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestListByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifestListByDigest.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest list as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + layers := args.manifest.(*schema2.DeserializedManifest).Layers + for i := range layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != layers[len(layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. +} + +func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + dgst := args.dgst + manifest := args.manifest + + ref, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(ref) + // --------------- + // Delete by digest + resp, err := httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Attempt to fetch deleted manifest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching deleted manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // --------------- + // Delete already deleted manifest by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "re-deleting manifest by digest") + + checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + + // -------------------- + // Re-upload manifest by digest + resp = putManifest(t, "putting manifest", manifestDigestURL, args.mediaType, manifest) + checkResponse(t, "putting manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to fetch re-uploaded deleted digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching re-uploaded manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to delete an unknown manifest + unknownDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + unknownRef, _ := reference.WithDigest(imageName, unknownDigest) + unknownManifestDigestURL, err := env.builder.BuildManifestURL(unknownRef) + checkErr(t, err, "building unknown manifest url") + + resp, err = httpDelete(unknownManifestDigestURL) + checkErr(t, err, "delting unknown manifest by digest") + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // -------------------- + // Upload manifest by tag + tag := "atag" + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := env.builder.BuildManifestURL(tagRef) + resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) + checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec := json.NewDecoder(resp.Body) + var tagsResponse tagsAPIResponse + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // --------------- + // Delete by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest with tag", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // Ensure that the tag is not listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 0 { + t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) + } + +} + +type testEnv struct { + pk libtrust.PrivateKey + ctx context.Context + config configuration.Configuration + app *App + server *httptest.Server + builder *v2.URLBuilder +} + +func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Proxy: configuration.Proxy{ + RemoteURL: "http://example.com", + }, + } + + return newTestEnvWithConfig(t, &config) + +} + +func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + + config.HTTP.Headers = headerConfig + + return newTestEnvWithConfig(t, &config) +} + +func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { + ctx := context.Background() + + app := NewApp(ctx, config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + return &testEnv{ + pk: pk, + ctx: ctx, + config: *config, + app: app, + server: server, + builder: builder, + } +} + +func (t *testEnv) Shutdown() { + t.server.CloseClientConnections() + t.server.Close() +} + +func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { + var body []byte + + switch m := v.(type) { + case *schema1.SignedManifest: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + case *manifestlist.DeserializedManifestList: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + default: + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, env *testEnv, name reference.Named) (location string, uuid string) { + layerUploadURL, err := env.builder.BuildBlobUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + u, err := url.Parse(layerUploadURL) + if err != nil { + t.Fatalf("error parsing layer upload URL: %v", err) + } + + base, err := url.Parse(env.server.URL) + if err != nil { + t.Fatalf("error parsing server URL: %v", err) + } + + layerUploadURL = base.ResolveReference(u).String() + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) + + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatalf("error parsing location header: %v", err) + } + + uuid = path.Base(u.Path) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + "Docker-Upload-UUID": []string{uuid}, + }) + + return resp.Header.Get("Location"), uuid +} + +// doPushLayer pushes the layer content returning the url on success returning +// the response. If you're only expecting a successful response, use pushLayer. +func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + "digest": []string{dgst.String()}, + }.Encode() + + uploadURL := u.String() + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL, body) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + return http.DefaultClient.Do(req) +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { + digester := digest.Canonical.New() + + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + sha256Dgst := digester.Digest() + + ref, _ := reference.WithDigest(name, sha256Dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{sha256Dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, dgst digest.Digest) string { + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + ref, _ := reference.WithDigest(name, dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + }.Encode() + + uploadURL := u.String() + + digester := digest.Canonical.New() + + req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := http.DefaultClient.Do(req) + + return resp, digester.Digest(), err +} + +func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { + resp, dgst, err := doPushChunk(t, uploadURLBase, body) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting chunk", resp, http.StatusAccepted) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + checkHeaders(t, resp, http.Header{ + "Range": []string{fmt.Sprintf("0-%d", length-1)}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location"), dgst +} + +func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { + if resp.StatusCode != expectedStatus { + t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) + maybeDumpResponse(t, resp) + + t.FailNow() + } + + // We expect the headers included in the configuration, unless the + // status code is 405 (Method Not Allowed), which means the handler + // doesn't even get called. + if resp.StatusCode != 405 && !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { + t.Logf("missing or incorrect header X-Content-Type-Options %s", msg) + maybeDumpResponse(t, resp) + + t.FailNow() + } +} + +// checkBodyHasErrorCodes ensures the body is an error body and has the +// expected error codes, returning the error structure, the json slice and a +// count of the errors by code. +func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading body %s: %v", msg, err) + } + + var errs errcode.Errors + if err := json.Unmarshal(p, &errs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(errs) == 0 { + t.Fatalf("expected errors in response") + } + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + + expected := map[errcode.ErrorCode]struct{}{} + counts := map[errcode.ErrorCode]int{} + + // Initialize map with zeros for expected + for _, code := range errorCodes { + expected[code] = struct{}{} + counts[code] = 0 + } + + for _, e := range errs { + err, ok := e.(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", e) + } + if _, ok := expected[err.ErrorCode()]; !ok { + t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) + } + counts[err.ErrorCode()]++ + } + + // Ensure that counts of expected errors were all non-zero + for code := range expected { + if counts[code] == 0 { + t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) + } + } + + return errs, p, counts +} + +func maybeDumpResponse(t *testing.T, resp *http.Response) { + if d, err := httputil.DumpResponse(resp, true); err != nil { + t.Logf("error dumping response: %v", err) + } else { + t.Logf("response:\n%s", string(d)) + } +} + +// matchHeaders checks that the response has at least the headers. If not, the +// test will fail. If a passed in header value is "*", any non-zero value will +// suffice as a match. +func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { + for k, vs := range headers { + if resp.Header.Get(k) == "" { + t.Fatalf("response missing header %q", k) + } + + for _, v := range vs { + if v == "*" { + // Just ensure there is some value. + if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { + continue + } + } + + for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { + if hv != v { + t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) + } + } + } + } +} + +func checkErr(t *testing.T, err error, msg string) { + if err != nil { + t.Fatalf("unexpected error %s: %v", msg, err) + } +} + +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { + imageNameRef, err := reference.ParseNamed(imageName) + if err != nil { + t.Fatalf("unable to parse reference: %v", err) + } + + unsignedManifest := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []schema1.FSLayer{ + { + BlobSum: "asdf", + }, + }, + History: []schema1.History{ + { + V1Compatibility: "", + }, + }, + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + uploadURLBase, _ := startPushLayer(t, env, imageNameRef) + pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) + } + + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + dgst := digest.FromBytes(signedManifest.Canonical) + + // Create this repository by tag to ensure the tag mapping is made in the registry + tagRef, _ := reference.WithTag(imageNameRef, tag) + manifestDigestURL, err := env.builder.BuildManifestURL(tagRef) + checkErr(t, err, "building manifest url") + + digestRef, _ := reference.WithDigest(imageNameRef, dgst) + location, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building location URL") + + resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{location}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + return dgst +} + +// Test mutation operations on a registry configured as a cache. Ensure that they return +// appropriate errors. +func TestRegistryAsCacheMutationAPIs(t *testing.T) { + deleteEnabled := true + env := newTestEnvMirror(t, deleteEnabled) + defer env.Shutdown() + + imageName, _ := reference.ParseNamed("foo/bar") + tag := "latest" + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + // Manifest upload + m := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName.Name(), + Tag: tag, + FSLayers: []schema1.FSLayer{}, + History: []schema1.History{}, + } + + sm, err := schema1.Sign(m, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp := putManifest(t, "putting unsigned manifest", manifestURL, "", sm) + checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Manifest Delete + resp, err = httpDelete(manifestURL) + checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob upload initialization + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob Delete + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + blobURL, err := env.builder.BuildBlobURL(ref) + resp, err = httpDelete(blobURL) + checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + +} + +// TestCheckContextNotifier makes sure the API endpoints get a ResponseWriter +// that implements http.ContextNotifier. +func TestCheckContextNotifier(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + + // Register a new endpoint for testing + env.app.router.Handle("/unittest/{name}/", env.app.dispatcher(func(ctx *Context, r *http.Request) http.Handler { + return handlers.MethodHandler{ + "GET": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if _, ok := w.(http.CloseNotifier); !ok { + t.Fatal("could not cast ResponseWriter to CloseNotifier") + } + w.WriteHeader(200) + }), + } + })) + + resp, err := http.Get(env.server.URL + "/unittest/reponame/") + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) + } +} + +func TestProxyManifestGetByTag(t *testing.T) { + truthConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + truthConfig.HTTP.Headers = headerConfig + + imageName, _ := reference.ParseNamed("foo/bar") + tag := "latest" + + truthEnv := newTestEnvWithConfig(t, &truthConfig) + defer truthEnv.Shutdown() + // create a repository in the truth registry + dgst := createRepository(truthEnv, t, imageName.Name(), tag) + + proxyConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + }, + Proxy: configuration.Proxy{ + RemoteURL: truthEnv.server.URL, + }, + } + proxyConfig.HTTP.Headers = headerConfig + + proxyEnv := newTestEnvWithConfig(t, &proxyConfig) + defer proxyEnv.Shutdown() + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp, err := http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest from proxy by digest") + defer resp.Body.Close() + + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := proxyEnv.builder.BuildManifestURL(tagRef) + checkErr(t, err, "building manifest url") + + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // Create another manifest in the remote with the same image/tag pair + newDigest := createRepository(truthEnv, t, imageName.Name(), tag) + if dgst == newDigest { + t.Fatalf("non-random test data") + } + + // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{newDigest.String()}, + }) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/app.go b/vendor/github.com/docker/distribution/registry/handlers/app.go new file mode 100644 index 0000000..0f30603 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/app.go @@ -0,0 +1,1046 @@ +package handlers + +import ( + cryptorand "crypto/rand" + "expvar" + "fmt" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/health/checks" + "github.com/docker/distribution/notifications" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + registrymiddleware "github.com/docker/distribution/registry/middleware/registry" + repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/proxy" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + rediscache "github.com/docker/distribution/registry/storage/cache/redis" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/distribution/version" + "github.com/docker/libtrust" + "github.com/garyburd/redigo/redis" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + +// defaultCheckInterval is the default time in between health checks +const defaultCheckInterval = 10 * time.Second + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + context.Context + + Config *configuration.Configuration + + router *mux.Router // main application router, configured with dispatchers + driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. + accessController auth.AccessController // main access controller for application + + // httpHost is a parsed representation of the http.host parameter from + // the configuration. Only the Scheme and Host fields are used. + httpHost url.URL + + // events contains notification related configuration. + events struct { + sink notifications.Sink + source notifications.SourceRecord + } + + redis *redis.Pool + + // trustKey is a deprecated key used to sign manifests converted to + // schema1 for backward compatibility. It should not be used for any + // other purposes. + trustKey libtrust.PrivateKey + + // isCache is true if this registry is configured as a pull through cache + isCache bool + + // readOnly is true if the registry is in a read-only maintenance mode + readOnly bool +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(ctx context.Context, config *configuration.Configuration) *App { + app := &App{ + Config: config, + Context: ctx, + router: v2.RouterWithPrefix(config.HTTP.Prefix), + isCache: config.Proxy.RemoteURL != "", + } + + // Register the handler dispatchers. + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) + app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameCatalog, catalogDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, blobDispatcher) + app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) + + // override the storage driver's UA string for registry outbound HTTP requests + storageParams := config.Storage.Parameters() + if storageParams == nil { + storageParams = make(configuration.Parameters) + } + storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version()) + + var err error + app.driver, err = factory.Create(config.Storage.Type(), storageParams) + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + purgeConfig := uploadPurgeDefaultConfig() + if mc, ok := config.Storage["maintenance"]; ok { + if v, ok := mc["uploadpurging"]; ok { + purgeConfig, ok = v.(map[interface{}]interface{}) + if !ok { + panic("uploadpurging config key must contain additional keys") + } + } + if v, ok := mc["readonly"]; ok { + readOnly, ok := v.(map[interface{}]interface{}) + if !ok { + panic("readonly config key must contain additional keys") + } + if readOnlyEnabled, ok := readOnly["enabled"]; ok { + app.readOnly, ok = readOnlyEnabled.(bool) + if !ok { + panic("readonly's enabled config key must have a boolean value") + } + } + } + } + + startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) + + app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"]) + if err != nil { + panic(err) + } + + app.configureSecret(config) + app.configureEvents(config) + app.configureRedis(config) + app.configureLogHook(config) + + options := registrymiddleware.GetRegistryOptions() + if config.Compatibility.Schema1.TrustKey != "" { + app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) + if err != nil { + panic(fmt.Sprintf(`could not load schema1 "signingkey" parameter: %v`, err)) + } + } else { + // Generate an ephemeral key to be used for signing converted manifests + // for clients that don't support schema2. + app.trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + } + + options = append(options, storage.Schema1SigningKey(app.trustKey)) + + if config.HTTP.Host != "" { + u, err := url.Parse(config.HTTP.Host) + if err != nil { + panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) + } + app.httpHost = *u + } + + if app.isCache { + options = append(options, storage.DisableDigestResumption) + } + + // configure deletion + if d, ok := config.Storage["delete"]; ok { + e, ok := d["enabled"] + if ok { + if deleteEnabled, ok := e.(bool); ok && deleteEnabled { + options = append(options, storage.EnableDelete) + } + } + } + + // configure redirects + var redirectDisabled bool + if redirectConfig, ok := config.Storage["redirect"]; ok { + v := redirectConfig["disable"] + switch v := v.(type) { + case bool: + redirectDisabled = v + default: + panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) + } + } + if redirectDisabled { + ctxu.GetLogger(app).Infof("backend redirection disabled") + } else { + options = append(options, storage.EnableRedirect) + } + + // configure validation + if config.Validation.Enabled { + if len(config.Validation.Manifests.URLs.Allow) == 0 && len(config.Validation.Manifests.URLs.Deny) == 0 { + // If Allow and Deny are empty, allow nothing. + options = append(options, storage.ManifestURLsAllowRegexp(regexp.MustCompile("^$"))) + } else { + if len(config.Validation.Manifests.URLs.Allow) > 0 { + for i, s := range config.Validation.Manifests.URLs.Allow { + // Validate via compilation. + if _, err := regexp.Compile(s); err != nil { + panic(fmt.Sprintf("validation.manifests.urls.allow: %s", err)) + } + // Wrap with non-capturing group. + config.Validation.Manifests.URLs.Allow[i] = fmt.Sprintf("(?:%s)", s) + } + re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Allow, "|")) + options = append(options, storage.ManifestURLsAllowRegexp(re)) + } + if len(config.Validation.Manifests.URLs.Deny) > 0 { + for i, s := range config.Validation.Manifests.URLs.Deny { + // Validate via compilation. + if _, err := regexp.Compile(s); err != nil { + panic(fmt.Sprintf("validation.manifests.urls.deny: %s", err)) + } + // Wrap with non-capturing group. + config.Validation.Manifests.URLs.Deny[i] = fmt.Sprintf("(?:%s)", s) + } + re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Deny, "|")) + options = append(options, storage.ManifestURLsDenyRegexp(re)) + } + } + } + + // configure storage caches + if cc, ok := config.Storage["cache"]; ok { + v, ok := cc["blobdescriptor"] + if !ok { + // Backwards compatible: "layerinfo" == "blobdescriptor" + v = cc["layerinfo"] + } + + switch v { + case "redis": + if app.redis == nil { + panic("redis configuration required to use for layerinfo cache") + } + cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + ctxu.GetLogger(app).Infof("using redis blob descriptor cache") + case "inmemory": + cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") + default: + if v != "" { + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) + } + } + } + + if app.registry == nil { + // configure the registry if no cache section is available. + app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + } + + app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"]) + if err != nil { + panic(err) + } + + authType := config.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + ctxu.GetLogger(app).Debugf("configured %q access controller", authType) + } + + // configure as a pull through cache + if config.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy) + if err != nil { + panic(err.Error()) + } + app.isCache = true + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) + } + + return app +} + +// RegisterHealthChecks is an awful hack to defer health check registration +// control to callers. This should only ever be called once per registry +// process, typically in a main function. The correct way would be register +// health checks outside of app, since multiple apps may exist in the same +// process. Because the configuration and app are tightly coupled, +// implementing this properly will require a refactor. This method may panic +// if called twice in the same process. +func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { + if len(healthRegistries) > 1 { + panic("RegisterHealthChecks called with more than one registry") + } + healthRegistry := health.DefaultRegistry + if len(healthRegistries) == 1 { + healthRegistry = healthRegistries[0] + } + + if app.Config.Health.StorageDriver.Enabled { + interval := app.Config.Health.StorageDriver.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + storageDriverCheck := func() error { + _, err := app.driver.Stat(app, "/") // "/" should always exist + return err // any error will be treated as failure + } + + if app.Config.Health.StorageDriver.Threshold != 0 { + healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) + } else { + healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) + } + } + + for _, fileChecker := range app.Config.Health.FileCheckers { + interval := fileChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) + healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) + } + + for _, httpChecker := range app.Config.Health.HTTPCheckers { + interval := httpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + statusCode := httpChecker.StatusCode + if statusCode == 0 { + statusCode = 200 + } + + checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) + + if httpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) + healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) + healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) + } + } + + for _, tcpChecker := range app.Config.Health.TCPCheckers { + interval := tcpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) + + if tcpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) + } + } +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) +} + +// configureEvents prepares the event sink for action. +func (app *App) configureEvents(configuration *configuration.Configuration) { + // Configure all of the endpoint sinks. + var sinks []notifications.Sink + for _, endpoint := range configuration.Notifications.Endpoints { + if endpoint.Disabled { + ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) + continue + } + + ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ + Timeout: endpoint.Timeout, + Threshold: endpoint.Threshold, + Backoff: endpoint.Backoff, + Headers: endpoint.Headers, + IgnoredMediaTypes: endpoint.IgnoredMediaTypes, + }) + + sinks = append(sinks, endpoint) + } + + // NOTE(stevvooe): Moving to a new queuing implementation is as easy as + // replacing broadcaster with a rabbitmq implementation. It's recommended + // that the registry instances also act as the workers to keep deployment + // simple. + app.events.sink = notifications.NewBroadcaster(sinks...) + + // Populate registry event source + hostname, err := os.Hostname() + if err != nil { + hostname = configuration.HTTP.Addr + } else { + // try to pick the port off the config + _, port, err := net.SplitHostPort(configuration.HTTP.Addr) + if err == nil { + hostname = net.JoinHostPort(hostname, port) + } + } + + app.events.source = notifications.SourceRecord{ + Addr: hostname, + InstanceID: ctxu.GetStringValue(app, "instance.id"), + } +} + +type redisStartAtKey struct{} + +func (app *App) configureRedis(configuration *configuration.Configuration) { + if configuration.Redis.Addr == "" { + ctxu.GetLogger(app).Infof("redis not configured") + return + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. + ctx := context.WithValue(app, redisStartAtKey{}, time.Now()) + + done := func(err error) { + logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", + ctxu.Since(ctx, redisStartAtKey{})) + if err != nil { + logger.Errorf("redis: error connecting: %v", err) + } else { + logger.Infof("redis: connect %v", configuration.Redis.Addr) + } + } + + conn, err := redis.DialTimeout("tcp", + configuration.Redis.Addr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) + if err != nil { + ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + configuration.Redis.Addr, err) + done(err) + return nil, err + } + + // authorize the connection + if configuration.Redis.Password != "" { + if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + // select the database to use + if configuration.Redis.DB != 0 { + if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + done(nil) + return conn, nil + }, + MaxIdle: configuration.Redis.Pool.MaxIdle, + MaxActive: configuration.Redis.Pool.MaxActive, + IdleTimeout: configuration.Redis.Pool.IdleTimeout, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + // TODO(stevvooe): We can probably do something more interesting + // here with the health package. + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + app.redis = pool + + // setup expvar + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + return map[string]interface{}{ + "Config": configuration.Redis, + "Active": app.redis.ActiveCount(), + } + })) +} + +// configureLogHook prepares logging hook parameters. +func (app *App) configureLogHook(configuration *configuration.Configuration) { + entry, ok := ctxu.GetLogger(app).(*log.Entry) + if !ok { + // somehow, we are not using logrus + return + } + + logger := entry.Logger + + for _, configHook := range configuration.Log.Hooks { + if !configHook.Disabled { + switch configHook.Type { + case "mail": + hook := &logHook{} + hook.LevelsParam = configHook.Levels + hook.Mail = &mailer{ + Addr: configHook.MailOptions.SMTP.Addr, + Username: configHook.MailOptions.SMTP.Username, + Password: configHook.MailOptions.SMTP.Password, + Insecure: configHook.MailOptions.SMTP.Insecure, + From: configHook.MailOptions.From, + To: configHook.MailOptions.To, + } + logger.Hooks.Add(hook) + default: + } + } + } +} + +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *configuration.Configuration) { + if configuration.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.HTTP.Secret = string(secretBytes[:]) + ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + } +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() // ensure that request body is always closed. + + // Instantiate an http context here so we can track the error codes + // returned by the request router. + ctx := defaultContextManager.context(app, w, r) + + defer func() { + status, ok := ctx.Value("http.response.status").(int) + if ok && status >= 200 && status <= 399 { + ctxu.GetResponseLogger(ctx).Infof("response completed") + } + }() + defer defaultContextManager.release(ctx) + + // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. + var err error + w, err = ctxu.GetResponseWriter(ctx) + if err != nil { + ctxu.GetLogger(ctx).Warnf("response writer not found in context") + } + + // Set a header with the Docker Distribution API Version for all responses. + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") + app.router.ServeHTTP(w, r) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for headerName, headerValues := range app.Config.HTTP.Headers { + for _, value := range headerValues { + w.Header().Add(headerName, value) + } + } + + context := app.context(w, r) + + if err := app.authorized(w, r, context); err != nil { + ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) + return + } + + // Add username to request logging + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) + + if app.nameRequired(r) { + nameRef, err := reference.ParseNamed(getName(context)) + if err != nil { + ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) + context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ + Name: getName(context), + Reason: err, + }) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + repository, err := app.registry.Repository(context, nameRef) + + if err != nil { + ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) + + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) + case distribution.ErrRepositoryNameInvalid: + context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case errcode.Error: + context.Errors = append(context.Errors, err) + } + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + + // assign and decorate the authorized repository with an event bridge. + context.Repository = notifications.Listen( + repository, + app.eventBridge(context, r)) + + context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) + if err != nil { + ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) + context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + } + + dispatch(context, r).ServeHTTP(w, r) + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if context.Errors.Len() > 0 { + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + + app.logError(context, context.Errors) + } + }) +} + +type errCodeKey struct{} + +func (errCodeKey) String() string { return "err.code" } + +type errMessageKey struct{} + +func (errMessageKey) String() string { return "err.message" } + +type errDetailKey struct{} + +func (errDetailKey) String() string { return "err.detail" } + +func (app *App) logError(context context.Context, errors errcode.Errors) { + for _, e1 := range errors { + var c ctxu.Context + + switch e1.(type) { + case errcode.Error: + e, _ := e1.(errcode.Error) + c = ctxu.WithValue(context, errCodeKey{}, e.Code) + c = ctxu.WithValue(c, errMessageKey{}, e.Code.Message()) + c = ctxu.WithValue(c, errDetailKey{}, e.Detail) + case errcode.ErrorCode: + e, _ := e1.(errcode.ErrorCode) + c = ctxu.WithValue(context, errCodeKey{}, e) + c = ctxu.WithValue(c, errMessageKey{}, e.Message()) + default: + // just normal go 'error' + c = ctxu.WithValue(context, errCodeKey{}, errcode.ErrorCodeUnknown) + c = ctxu.WithValue(c, errMessageKey{}, e1.Error()) + } + + c = ctxu.WithLogger(c, ctxu.GetLogger(c, + errCodeKey{}, + errMessageKey{}, + errDetailKey{})) + ctxu.GetResponseLogger(c).Errorf("response completed with error") + } +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { + ctx := defaultContextManager.context(app, w, r) + ctx = ctxu.WithVars(ctx, r) + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, + "vars.name", + "vars.reference", + "vars.digest", + "vars.uuid")) + + context := &Context{ + App: app, + Context: ctx, + } + + if app.httpHost.Scheme != "" && app.httpHost.Host != "" { + // A "host" item in the configuration takes precedence over + // X-Forwarded-Proto and X-Forwarded-Host headers, and the + // hostname in the request. + context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false) + } else { + context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs) + } + + return context +} + +// authorized checks if the request can proceed with access to the requested +// repository. If it succeeds, the context may access the requested +// repository. An error will be returned if access is not available. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + ctxu.GetLogger(context).Debug("authorizing request") + repo := getName(context) + + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + + if repo != "" { + accessRecords = appendAccessRecords(accessRecords, r.Method, repo) + if fromRepo := r.FormValue("from"); fromRepo != "" { + // mounting a blob from one repository to another requires pull (GET) + // access to the source repository. + accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) + } + } else { + // Only allow the name not to be set on the base route. + if app.nameRequired(r) { + // For this to be properly secured, repo must always be set for a + // resource that may make a modification. The only condition under + // which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making + // that mistake elsewhere in the code, allowing any operation to + // proceed. + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return fmt.Errorf("forbidden: no repository name") + } + accessRecords = appendCatalogAccessRecord(accessRecords, r) + } + + ctx, err := app.accessController.Authorized(context.Context, accessRecords...) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + // Add the appropriate WWW-Auth header + err.SetHeaders(w) + + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context + // should be replaced by another, rather than replacing the context on a + // mutable object. + context.Context = ctx + return nil +} + +// eventBridge returns a bridge for the current request, configured with the +// correct actor and source. +func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { + actor := notifications.ActorRecord{ + Name: getUserName(ctx, r), + } + request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) + + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) +} + +// nameRequired returns true if the route requires a name. +func (app *App) nameRequired(r *http.Request) bool { + route := mux.CurrentRoute(r) + routeName := route.GetName() + return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) +} + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} + +// appendAccessRecords checks the method and adds the appropriate Access records to the records list. +func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { + resource := auth.Resource{ + Type: "repository", + Name: repo, + } + + switch method { + case "GET", "HEAD": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + records = append(records, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return records +} + +// Add the access record for the catalog if it's our current route +func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { + route := mux.CurrentRoute(r) + routeName := route.GetName() + + if routeName == v2.RouteNameCatalog { + resource := auth.Resource{ + Type: "registry", + Name: "catalog", + } + + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return accessRecords +} + +// applyRegistryMiddleware wraps a registry instance with the configured middlewares +func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { + for _, mw := range middlewares { + rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) + if err != nil { + return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) + } + registry = rmw + } + return registry, nil + +} + +// applyRepoMiddleware wraps a repository with the configured middlewares +func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { + for _, mw := range middlewares { + rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) + if err != nil { + return nil, err + } + repository = rmw + } + return repository, nil +} + +// applyStorageMiddleware wraps a storage driver with the configured middlewares +func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { + for _, mw := range middlewares { + smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) + if err != nil { + return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) + } + driver = smw + } + return driver, nil +} + +// uploadPurgeDefaultConfig provides a default configuration for upload +// purging to be used in the absence of configuration in the +// confifuration file +func uploadPurgeDefaultConfig() map[interface{}]interface{} { + config := map[interface{}]interface{}{} + config["enabled"] = true + config["age"] = "168h" + config["interval"] = "24h" + config["dryrun"] = false + return config +} + +func badPurgeUploadConfig(reason string) { + panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) +} + +// startUploadPurger schedules a goroutine which will periodically +// check upload directories for old files and delete them +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { + if config["enabled"] == false { + return + } + + var purgeAgeDuration time.Duration + var err error + purgeAge, ok := config["age"] + if ok { + ageStr, ok := purgeAge.(string) + if !ok { + badPurgeUploadConfig("age is not a string") + } + purgeAgeDuration, err = time.ParseDuration(ageStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) + } + } else { + badPurgeUploadConfig("age missing") + } + + var intervalDuration time.Duration + interval, ok := config["interval"] + if ok { + intervalStr, ok := interval.(string) + if !ok { + badPurgeUploadConfig("interval is not a string") + } + + intervalDuration, err = time.ParseDuration(intervalStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) + } + } else { + badPurgeUploadConfig("interval missing") + } + + var dryRunBool bool + dryRun, ok := config["dryrun"] + if ok { + dryRunBool, ok = dryRun.(bool) + if !ok { + badPurgeUploadConfig("cannot parse dryrun") + } + } else { + badPurgeUploadConfig("dryrun missing") + } + + go func() { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute + log.Infof("Starting upload purge in %s", jitter) + time.Sleep(jitter) + + for { + storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + log.Infof("Starting upload purge in %s", intervalDuration) + time.Sleep(intervalDuration) + } + }() +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/app_test.go b/vendor/github.com/docker/distribution/registry/handlers/app_test.go new file mode 100644 index 0000000..385fa4c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/app_test.go @@ -0,0 +1,279 @@ +package handlers + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/silly" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/testdriver" +) + +// TestAppDispatcher builds an application with a test dispatcher and ensures +// that requests are properly dispatched and the handlers are constructed. +// This only tests the dispatch mechanism. The underlying dispatchers must be +// tested individually. +func TestAppDispatcher(t *testing.T) { + driver := testdriver.New() + ctx := context.Background() + registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + app := &App{ + Config: &configuration.Configuration{}, + Context: ctx, + router: v2.Router(), + driver: driver, + registry: registry, + } + server := httptest.NewServer(app) + defer server.Close() + router := v2.Router() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { + return func(ctx *Context, r *http.Request) http.Handler { + // Always checks the same name context + if ctx.Repository.Named().Name() != getName(ctx) { + t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar") + } + + // Check that we have all that is expected + for expectedK, expectedV := range expectedVars { + if ctx.Value(expectedK) != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) + } + } + + // Check that we only have variables that are expected + for k, v := range ctx.Value("vars").(map[string]string) { + _, ok := expectedVars[k] + + if !ok { // name is checked on context + // We have an unexpected key, fail + t.Fatalf("unexpected key %q in vars with value %q", k, v) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + } + } + + // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string + unflatten := func(vars []string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(vars)-1; i = i + 2 { + m[vars[i]] = vars[i+1] + } + + return m + } + + for _, testcase := range []struct { + endpoint string + vars []string + }{ + { + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "foo/bar", + "reference", "sometag", + }, + }, + { + endpoint: v2.RouteNameTags, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUpload, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUploadChunk, + vars: []string{ + "name", "foo/bar", + "uuid", "theuuid", + }, + }, + } { + app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) + route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) + u, err := route.URL(testcase.vars...) + + if err != nil { + t.Fatal(err) + } + + resp, err := http.Get(u.String()) + + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) + } + } +} + +// TestNewApp covers the creation of an application via NewApp with a +// configuration. +func TestNewApp(t *testing.T) { + ctx := context.Background() + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": nil, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Auth: configuration.Auth{ + // For now, we simply test that new auth results in a viable + // application. + "silly": { + "realm": "realm-test", + "service": "service-test", + }, + }, + } + + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. + app := NewApp(ctx, &config) + + server := httptest.NewServer(app) + defer server.Close() + builder, err := v2.NewURLBuilderFromString(server.URL, false) + if err != nil { + t.Fatalf("error creating urlbuilder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("error creating baseURL: %v", err) + } + + // TODO(stevvooe): The rest of this test might belong in the API tests. + + // Just hit the app and make sure we get a 401 Unauthorized error. + req, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer req.Body.Close() + + if req.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected status code during request: %v", err) + } + + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } + + expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" + if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { + t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) + } + + var errs errcode.Errors + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&errs); err != nil { + t.Fatalf("error decoding error response: %v", err) + } + + err2, ok := errs[0].(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", errs[0]) + } + if err2.ErrorCode() != errcode.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized) + } +} + +// Test the access record accumulator +func TestAppendAccessRecords(t *testing.T) { + repo := "testRepo" + + expectedResource := auth.Resource{ + Type: "repository", + Name: repo, + } + + expectedPullRecord := auth.Access{ + Resource: expectedResource, + Action: "pull", + } + expectedPushRecord := auth.Access{ + Resource: expectedResource, + Action: "push", + } + expectedAllRecord := auth.Access{ + Resource: expectedResource, + Action: "*", + } + + records := []auth.Access{} + result := appendAccessRecords(records, "GET", repo) + expectedResult := []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "HEAD", repo) + expectedResult = []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "POST", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PUT", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PATCH", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "DELETE", repo) + expectedResult = []auth.Access{expectedAllRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/basicauth.go b/vendor/github.com/docker/distribution/registry/handlers/basicauth.go new file mode 100644 index 0000000..8727a3c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/basicauth.go @@ -0,0 +1,11 @@ +// +build go1.4 + +package handlers + +import ( + "net/http" +) + +func basicAuth(r *http.Request) (username, password string, ok bool) { + return r.BasicAuth() +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go b/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go new file mode 100644 index 0000000..6cf10a2 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go @@ -0,0 +1,41 @@ +// +build !go1.4 + +package handlers + +import ( + "encoding/base64" + "net/http" + "strings" +) + +// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we +// can compile on go1.3 and earlier. + +// BasicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBasicAuth(auth) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/blob.go b/vendor/github.com/docker/distribution/registry/handlers/blob.go new file mode 100644 index 0000000..fb250ac --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/blob.go @@ -0,0 +1,99 @@ +package handlers + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobDispatcher uses the request context to build a blobHandler. +func blobDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := getDigest(ctx) + if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + blobHandler := &blobHandler{ + Context: ctx, + Digest: dgst, + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + } + + if !ctx.readOnly { + mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) + } + + return mhandler +} + +// blobHandler serves http blob requests. +type blobHandler struct { + *Context + + Digest digest.Digest +} + +// GetBlob fetches the binary data from backend storage returns it in the +// response. +func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("GetBlob") + blobs := bh.Repository.Blobs(bh) + desc, err := blobs.Stat(bh, bh.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) + } else { + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { + context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// DeleteBlob deletes a layer blob +func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("DeleteBlob") + + blobs := bh.Repository.Blobs(bh) + err := blobs.Delete(bh, bh.Digest) + if err != nil { + switch err { + case distribution.ErrUnsupported: + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) + return + case distribution.ErrBlobUnknown: + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + return + default: + bh.Errors = append(bh.Errors, err) + context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) + return + } + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/blobupload.go b/vendor/github.com/docker/distribution/registry/handlers/blobupload.go new file mode 100644 index 0000000..d8c7d88 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/blobupload.go @@ -0,0 +1,368 @@ +package handlers + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage" + "github.com/gorilla/handlers" +) + +// blobUploadDispatcher constructs and returns the blob upload handler for the +// given request context. +func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + buh := &blobUploadHandler{ + Context: ctx, + UUID: getUploadUUID(ctx), + } + + handler := handlers.MethodHandler{ + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + } + + if !ctx.readOnly { + handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) + handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) + handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) + handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) + } + + if buh.UUID != "" { + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + buh.State = state + + if state.Name != ctx.Repository.Named().Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + if state.UUID != buh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + blobs := ctx.Repository.Blobs(buh) + upload, err := blobs.Resume(buh, buh.UUID) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) + if err == distribution.ErrBlobUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + }) + } + buh.Upload = upload + + if size := upload.Size(); size != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } + return closeResources(handler, buh.Upload) + } + + return handler +} + +// blobUploadHandler handles the http blob upload process. +type blobUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. Using UUID + // to key blob writers since this implementation uses UUIDs. + UUID string + + Upload distribution.BlobWriter + + State blobUploadState +} + +// StartBlobUpload begins the blob upload process and allocates a server-side +// blob writer session, optionally mounting the blob from a separate repository. +func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + var options []distribution.BlobCreateOption + + fromRepo := r.FormValue("from") + mountDigest := r.FormValue("mount") + + if mountDigest != "" && fromRepo != "" { + opt, err := buh.createBlobMountOption(fromRepo, mountDigest) + if opt != nil && err == nil { + options = append(options, opt) + } + } + + blobs := buh.Repository.Blobs(buh) + upload, err := blobs.Create(buh, options...) + + if err != nil { + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + } else if err == distribution.ErrUnsupported { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + } else { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + buh.Upload = upload + + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by id. +func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + // TODO(dmcgowan): Set last argument to false in blobUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.WriteHeader(http.StatusNoContent) +} + +// PatchBlobData writes data to an upload. +func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PATCH"); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) + return + } + + if err := buh.blobUploadResponse(w, r, false); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutBlobUploadComplete takes the final request of a blob upload. The +// request may include all the blob data or no blob data. Any data +// provided is received and verified. If successful, the blob is linked +// into the blob store and 201 Created is returned with the canonical +// url of the blob. +func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) + return + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) + return + } + + if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PUT"); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) + return + } + + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ + Digest: dgst, + + // TODO(stevvooe): This isn't wildly important yet, but we should + // really set the mediatype. For now, we can let the backend take care + // of this. + }) + + if err != nil { + switch err := err.(type) { + case distribution.ErrBlobInvalidDigest: + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + case errcode.Error: + buh.Errors = append(buh.Errors, err) + default: + switch err { + case distribution.ErrAccessDenied: + buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied) + case distribution.ErrUnsupported: + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + default: + ctxu.GetLogger(buh).Errorf("unknown error completing upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + } + + // Clean up the backend blob data if there was an error. + if err := buh.Upload.Cancel(buh); err != nil { + // If the cleanup fails, all we can do is observe and report. + ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + } + + return + } + if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// CancelBlobUpload cancels an in-progress upload of a blob. +func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + if err := buh.Upload.Cancel(buh); err != nil { + ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + w.WriteHeader(http.StatusNoContent) +} + +// blobUploadResponse provides a standard request for uploading blobs and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. The fresh argument is used to ensure that new blob +// uploads always start at a 0 offset. This allows disabling resumable push by +// always returning a 0 offset on check status. +func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { + // TODO(stevvooe): Need a better way to manage the upload state automatically. + buh.State.Name = buh.Repository.Named().Name() + buh.State.UUID = buh.Upload.ID() + buh.Upload.Close() + buh.State.Offset = buh.Upload.Size() + buh.State.StartedAt = buh.Upload.StartedAt() + + token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) + return err + } + + uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( + buh.Repository.Named(), buh.Upload.ID(), + url.Values{ + "_state": []string{token}, + }) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload url: %s", err) + return err + } + + endRange := buh.Upload.Size() + if endRange > 0 { + endRange = endRange - 1 + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.Header().Set("Location", uploadURL) + + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) + + return nil +} + +// mountBlob attempts to mount a blob from another repository by its digest. If +// successful, the blob is linked into the blob store and 201 Created is +// returned with the canonical url of the blob. +func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { + dgst, err := digest.ParseDigest(mountDigest) + if err != nil { + return nil, err + } + + ref, err := reference.ParseNamed(fromRepo) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(ref, dgst) + if err != nil { + return nil, err + } + + return storage.WithMountFrom(canonical), nil +} + +// writeBlobCreatedHeaders writes the standard headers describing a newly +// created blob. A 201 Created is written as well as the canonical URL and +// blob digest. +func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { + ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest) + if err != nil { + return err + } + blobURL, err := buh.urlBuilder.BuildBlobURL(ref) + if err != nil { + return err + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/catalog.go b/vendor/github.com/docker/distribution/registry/handlers/catalog.go new file mode 100644 index 0000000..eca9846 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/catalog.go @@ -0,0 +1,98 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/storage/driver" + "github.com/gorilla/handlers" +) + +const maximumReturnedEntries = 100 + +func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { + catalogHandler := &catalogHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(catalogHandler.GetCatalog), + } +} + +type catalogHandler struct { + *Context +} + +type catalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + var moreEntries = true + + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil || maxEntries < 0 { + maxEntries = maximumReturnedEntries + } + + repos := make([]string, maxEntries) + + filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) + _, pathNotFound := err.(driver.PathNotFoundError) + + if err == io.EOF || pathNotFound { + moreEntries = false + } else if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + // Add a link header if there are more entries to retrieve + if moreEntries { + lastEntry = repos[len(repos)-1] + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + + enc := json.NewEncoder(w) + if err := enc.Encode(catalogAPIResponse{ + Repositories: repos[0:filled], + }); err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// Use the original URL from the request to create a new URL for +// the link header +func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { + calledURL, err := url.Parse(origURL) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("n", strconv.Itoa(maxEntries)) + v.Add("last", lastEntry) + + calledURL.RawQuery = v.Encode() + + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) + + return urlStr, nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/context.go b/vendor/github.com/docker/distribution/registry/handlers/context.go new file mode 100644 index 0000000..552db2d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/context.go @@ -0,0 +1,152 @@ +package handlers + +import ( + "fmt" + "net/http" + "sync" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + context.Context + + // Repository is the repository for the current request. All requests + // should be scoped to a single repository. This field may be nil. + Repository distribution.Repository + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors errcode.Errors + + urlBuilder *v2.URLBuilder + + // TODO(stevvooe): The goal is too completely factor this context and + // dispatching out of the web application. Ideally, we should lean on + // context.Context for injection of these resources. +} + +// Value overrides context.Context.Value to ensure that calls are routed to +// correct context. +func (ctx *Context) Value(key interface{}) interface{} { + return ctx.Context.Value(key) +} + +func getName(ctx context.Context) (name string) { + return ctxu.GetStringValue(ctx, "vars.name") +} + +func getReference(ctx context.Context) (reference string) { + return ctxu.GetStringValue(ctx, "vars.reference") +} + +var errDigestNotAvailable = fmt.Errorf("digest not available in context") + +func getDigest(ctx context.Context) (dgst digest.Digest, err error) { + dgstStr := ctxu.GetStringValue(ctx, "vars.digest") + + if dgstStr == "" { + ctxu.GetLogger(ctx).Errorf("digest not available") + return "", errDigestNotAvailable + } + + d, err := digest.ParseDigest(dgstStr) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) + return "", err + } + + return d, nil +} + +func getUploadUUID(ctx context.Context) (uuid string) { + return ctxu.GetStringValue(ctx, "vars.uuid") +} + +// getUserName attempts to resolve a username from the context and request. If +// a username cannot be resolved, the empty string is returned. +func getUserName(ctx context.Context, r *http.Request) string { + username := ctxu.GetStringValue(ctx, auth.UserNameKey) + + // Fallback to request user with basic auth + if username == "" { + var ok bool + uname, _, ok := basicAuth(r) + if ok { + username = uname + } + } + + return username +} + +// contextManager allows us to associate net/context.Context instances with a +// request, based on the memory identity of http.Request. This prepares http- +// level context, which is not application specific. If this is called, +// (*contextManager).release must be called on the context when the request is +// completed. +// +// Providing this circumvents a lot of necessity for dispatchers with the +// benefit of instantiating the request context much earlier. +// +// TODO(stevvooe): Consider making this facility a part of the context package. +type contextManager struct { + contexts map[*http.Request]context.Context + mu sync.Mutex +} + +// defaultContextManager is just a global instance to register request contexts. +var defaultContextManager = newContextManager() + +func newContextManager() *contextManager { + return &contextManager{ + contexts: make(map[*http.Request]context.Context), + } +} + +// context either returns a new context or looks it up in the manager. +func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { + cm.mu.Lock() + defer cm.mu.Unlock() + + ctx, ok := cm.contexts[r] + if ok { + return ctx + } + + if parent == nil { + parent = ctxu.Background() + } + + ctx = ctxu.WithRequest(parent, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + cm.contexts[r] = ctx + + return ctx +} + +// releases frees any associated with resources from request. +func (cm *contextManager) release(ctx context.Context) { + cm.mu.Lock() + defer cm.mu.Unlock() + + r, err := ctxu.GetRequest(ctx) + if err != nil { + ctxu.GetLogger(ctx).Errorf("no request found in context during release") + return + } + delete(cm.contexts, r) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/health_test.go b/vendor/github.com/docker/distribution/registry/handlers/health_test.go new file mode 100644 index 0000000..0f38bd1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/health_test.go @@ -0,0 +1,210 @@ +package handlers + +import ( + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" +) + +func TestFileHealthCheck(t *testing.T) { + interval := time.Second + + tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck") + if err != nil { + t.Fatalf("could not create temporary file: %v", err) + } + defer tmpfile.Close() + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + FileCheckers: []configuration.FileChecker{ + { + Interval: interval, + File: tmpfile.Name(), + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + // Wait for health check to happen + <-time.After(2 * interval) + + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[tmpfile.Name()] != "file exists" { + t.Fatal(`did not get "file exists" result for health check`) + } + + os.Remove(tmpfile.Name()) + + <-time.After(2 * interval) + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } +} + +func TestTCPHealthCheck(t *testing.T) { + interval := time.Second + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("could not create listener: %v", err) + } + addrStr := ln.Addr().String() + + // Start accepting + go func() { + for { + conn, err := ln.Accept() + if err != nil { + // listener was closed + return + } + defer conn.Close() + } + }() + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + TCPCheckers: []configuration.TCPChecker{ + { + Interval: interval, + Addr: addrStr, + Timeout: 500 * time.Millisecond, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + // Wait for health check to happen + <-time.After(2 * interval) + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } + + ln.Close() + <-time.After(2 * interval) + + // Health check should now fail + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[addrStr] != "connection to "+addrStr+" failed" { + t.Fatal(`did not get "connection failed" result for health check`) + } +} + +func TestHTTPHealthCheck(t *testing.T) { + interval := time.Second + threshold := 3 + + stopFailing := make(chan struct{}) + + checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Fatalf("expected HEAD request, got %s", r.Method) + } + select { + case <-stopFailing: + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusInternalServerError) + } + })) + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + HTTPCheckers: []configuration.HTTPChecker{ + { + Interval: interval, + URI: checkedServer.URL, + Threshold: threshold, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + for i := 0; ; i++ { + <-time.After(interval) + + status := healthRegistry.CheckStatus() + + if i < threshold-1 { + // definitely shouldn't have hit the threshold yet + if len(status) != 0 { + t.Fatal("expected 1 item in health check results") + } + continue + } + if i < threshold+1 { + // right on the threshold - don't expect a failure yet + continue + } + + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[checkedServer.URL] != "downstream service returned unexpected status: 500" { + t.Fatal("did not get expected result for health check") + } + + break + } + + // Signal HTTP handler to start returning 200 + close(stopFailing) + + <-time.After(2 * interval) + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/helpers.go b/vendor/github.com/docker/distribution/registry/handlers/helpers.go new file mode 100644 index 0000000..dc3091a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/helpers.go @@ -0,0 +1,71 @@ +package handlers + +import ( + "errors" + "io" + "net/http" + + ctxu "github.com/docker/distribution/context" +) + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} + +// copyFullPayload copies the payload of an HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +// +// The copy will be limited to `limit` bytes, if limit is greater than zero. +func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, context ctxu.Context, action string) error { + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := responseWriter.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) + } + + var body = r.Body + if limit > 0 { + body = http.MaxBytesReader(responseWriter, body, limit) + } + + // Read in the data, if any. + copied, err := io.Copy(destWriter, body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't receive as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + // Set the response code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + responseWriter.WriteHeader(499) + + ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{ + "error": err, + "copied": copied, + "contentLength": r.ContentLength, + }, "error", "copied", "contentLength").Error("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hmac.go b/vendor/github.com/docker/distribution/registry/handlers/hmac.go new file mode 100644 index 0000000..94ed9fd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/hmac.go @@ -0,0 +1,74 @@ +package handlers + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +// blobUploadState captures the state serializable state of the blob upload. +type blobUploadState struct { + // name is the primary repository under which the blob will be linked. + Name string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 + + // StartedAt is the original start time of the upload. + StartedAt time.Time +} + +type hmacKey string + +var errInvalidSecret = fmt.Errorf("invalid secret") + +// unpackUploadState unpacks and validates the blob upload state from the +// token, using the hmacKey secret. +func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { + var state blobUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return state, err + } + mac := hmac.New(sha256.New, []byte(secret)) + + if len(tokenBytes) < mac.Size() { + return state, errInvalidSecret + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return state, errInvalidSecret + } + + if err := json.Unmarshal(messageBytes, &state); err != nil { + return state, err + } + + return state, nil +} + +// packUploadState packs the upload state signed with and hmac digest using +// the hmacKey secret, encoding to url safe base64. The resulting token can be +// used to share data with minimized risk of external tampering. +func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(secret)) + p, err := json.Marshal(lus) + if err != nil { + return "", err + } + + mac.Write(p) + + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go b/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go new file mode 100644 index 0000000..366c727 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go @@ -0,0 +1,117 @@ +package handlers + +import "testing" + +var blobUploadStates = []blobUploadState{ + { + Name: "hello", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "hello-world", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "h3ll0_w0rld", + UUID: "abcd-1234-qwer-0987", + Offset: 1337, + }, + { + Name: "ABCDEFG", + UUID: "ABCD-1234-QWER-0987", + Offset: 1234567890, + }, + { + Name: "this-is-A-sort-of-Long-name-for-Testing", + UUID: "dead-1234-beef-0987", + Offset: 8675309, + }, +} + +var secrets = []string{ + "supersecret", + "12345", + "a", + "SuperSecret", + "Sup3r... S3cr3t!", + "This is a reasonably long secret key that is used for the purpose of testing.", + "\u2603+\u2744", // snowman+snowflake +} + +// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and +// validates that the tokens can be used to reconstruct the proper upload state. +func TestLayerUploadTokens(t *testing.T) { + secret := hmacKey("supersecret") + + for _, testcase := range blobUploadStates { + token, err := secret.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + } +} + +// TestHMACValidate ensures that any HMAC token providers are compatible if and +// only if they share the same secret. +func TestHMACValidation(t *testing.T) { + for _, secret := range secrets { + secret1 := hmacKey(secret) + secret2 := hmacKey(secret) + badSecret := hmacKey("DifferentSecret") + + for _, testcase := range blobUploadStates { + token, err := secret1.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret2.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + + _, err = badSecret.unpackUploadState(token) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) + } + + badToken, err := badSecret.packUploadState(lus) + if err != nil { + t.Fatal(err) + } + + _, err = secret1.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + + _, err = secret2.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + } + } +} + +func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { + if expected.Name != received.Name { + t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) + } + if expected.UUID != received.UUID { + t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) + } + if expected.Offset != received.Offset { + t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) + } +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hooks.go b/vendor/github.com/docker/distribution/registry/handlers/hooks.go new file mode 100644 index 0000000..7bbab4f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/hooks.go @@ -0,0 +1,53 @@ +package handlers + +import ( + "bytes" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" +) + +// logHook is for hooking Panic in web application +type logHook struct { + LevelsParam []string + Mail *mailer +} + +// Fire forwards an error to LogHook +func (hook *logHook) Fire(entry *logrus.Entry) error { + addr := strings.Split(hook.Mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) + + html := ` + {{.Message}} + + {{range $key, $value := .Data}} + {{$key}}: {{$value}} + {{end}} + ` + b := bytes.NewBuffer(make([]byte, 0)) + t := template.Must(template.New("mail body").Parse(html)) + if err := t.Execute(b, entry); err != nil { + return err + } + body := fmt.Sprintf("%s", b) + + return hook.Mail.sendMail(subject, body) +} + +// Levels contains hook levels to be catched +func (hook *logHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, v := range hook.LevelsParam { + lv, _ := logrus.ParseLevel(v) + levels = append(levels, lv) + } + return levels +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/images.go b/vendor/github.com/docker/distribution/registry/handlers/images.go new file mode 100644 index 0000000..3ee207b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/images.go @@ -0,0 +1,462 @@ +package handlers + +import ( + "bytes" + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + "github.com/gorilla/handlers" +) + +// These constants determine which architecture and OS to choose from a +// manifest list when downconverting it to a schema1 manifest. +const ( + defaultArch = "amd64" + defaultOS = "linux" + maxManifestBodySize = 4 << 20 +) + +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ + Context: ctx, + } + reference := getReference(ctx) + dgst, err := digest.ParseDigest(reference) + if err != nil { + // We just have a tag + imageManifestHandler.Tag = reference + } else { + imageManifestHandler.Digest = dgst + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), + } + + if !ctx.readOnly { + mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) + mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) + } + + return mhandler +} + +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { + *Context + + // One of tag or digest gets set, depending on what is present in context. + Tag string + Digest digest.Digest +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("GetImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var manifest distribution.Manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + desc, err := tags.Get(imh, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + imh.Digest = desc.Digest + } + + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + manifest, err = manifests.Get(imh, imh.Digest, options...) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + supportsSchema2 := false + supportsManifestList := false + // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values + // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 + for _, acceptHeader := range r.Header["Accept"] { + // r.Header[...] is a slice in case the request contains the same header more than once + // if the header isn't set, we'll get the zero value, which "range" will handle gracefully + + // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 + for _, mediaType := range strings.Split(acceptHeader, ",") { + // remove "; q=..." if present + if i := strings.Index(mediaType, ";"); i >= 0 { + mediaType = mediaType[:i] + } + + // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") + mediaType = strings.TrimSpace(mediaType) + + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + } + if mediaType == manifestlist.MediaTypeManifestList { + supportsManifestList = true + } + } + } + + schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) + manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + + // Only rewrite schema2 manifests when they are being fetched by tag. + // If they are being fetched by digest, we can't return something not + // matching the digest. + if imh.Tag != "" && isSchema2 && !supportsSchema2 { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else if imh.Tag != "" && isManifestList && !supportsManifestList { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + } + + manifest, err = manifests.Get(imh, manifestDigest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + // If necessary, convert the image manifest + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } + } + + ct, p, err := manifest.Payload() + if err != nil { + return + } + + w.Header().Set("Content-Type", ct) + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) + w.Write(p) +} + +func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + blobs := imh.Repository.Blobs(imh) + configJSON, err := blobs.Get(imh, targetDescriptor.Digest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + + ref := imh.Repository.Named() + + if imh.Tag != "" { + ref, err = reference.WithTag(ref, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) + return nil, err + } + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) + for _, d := range schema2Manifest.Layers { + if err := builder.AppendReference(d); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + } + manifest, err := builder.Build(imh) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical) + + return manifest, nil +} + +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted + return true + } + } + return false +} + +// PutImageManifest validates and stores an image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("PutImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var jsonBuf bytes.Buffer + if err := copyFullPayload(w, r, &jsonBuf, maxManifestBodySize, imh, "image manifest PUT"); err != nil { + // copyFullPayload reports the error if necessary + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error())) + return + } + + mediaType := r.Header.Get("Content-Type") + manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + + if imh.Digest != "" { + if desc.Digest != imh.Digest { + ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + } + } else if imh.Tag != "" { + imh.Digest = desc.Digest + } else { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + + if err := imh.applyResourcePolicy(manifest); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + _, err = manifests.Put(imh, manifest, options...) + if err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + if err == distribution.ErrUnsupported { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + } + if err == distribution.ErrAccessDenied { + imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied) + return + } + switch err := err.(type) { + case distribution.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case distribution.ErrManifestBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) + case distribution.ErrManifestNameInvalid: + imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case distribution.ErrManifestUnverified: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) + default: + if verificationError == digest.ErrDigestInvalidFormat { + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) + } + } + } + case errcode.Error: + imh.Errors = append(imh.Errors, err) + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + return + } + + // Tag this manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + err = tags.Tag(imh, imh.Tag, desc) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + } + + // Construct a canonical url for the uploaded manifest. + ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + location, err := imh.urlBuilder.BuildManifestURL(ref) + if err != nil { + // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to + // happen. We'll log the error here but proceed as if it worked. Worst + // case, we set an empty location header. + ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) + } + + w.Header().Set("Location", location) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// applyResourcePolicy checks whether the resource class matches what has +// been authorized and allowed by the policy configuration. +func (imh *imageManifestHandler) applyResourcePolicy(manifest distribution.Manifest) error { + allowedClasses := imh.App.Config.Policy.Repository.Classes + if len(allowedClasses) == 0 { + return nil + } + + var class string + switch m := manifest.(type) { + case *schema1.SignedManifest: + class = "image" + case *schema2.DeserializedManifest: + switch m.Config.MediaType { + case schema2.MediaTypeConfig: + class = "image" + case schema2.MediaTypePluginConfig: + class = "plugin" + default: + message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) + return errcode.ErrorCodeDenied.WithMessage(message) + } + } + + if class == "" { + return nil + } + + // Check to see if class is allowed in registry + var allowedClass bool + for _, c := range allowedClasses { + if class == c { + allowedClass = true + break + } + } + if !allowedClass { + message := fmt.Sprintf("registry does not allow %s manifest", class) + return errcode.ErrorCodeDenied.WithMessage(message) + } + + resources := auth.AuthorizedResources(imh) + n := imh.Repository.Named().Name() + + var foundResource bool + for _, r := range resources { + if r.Name == n { + if r.Class == "" { + r.Class = "image" + } + if r.Class == class { + return nil + } + foundResource = true + } + } + + // resource was found but no matching class was found + if foundResource { + message := fmt.Sprintf("repository not authorized for %s manifest", class) + return errcode.ErrorCodeDenied.WithMessage(message) + } + + return nil + +} + +// DeleteImageManifest removes the manifest with the given digest from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("DeleteImageManifest") + + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + err = manifests.Delete(imh, imh.Digest) + if err != nil { + switch err { + case digest.ErrDigestUnsupported: + case digest.ErrDigestInvalidFormat: + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + case distribution.ErrBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + case distribution.ErrUnsupported: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) + return + } + } + + tagService := imh.Repository.Tags(imh) + referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + for _, tag := range referencedTags { + if err := tagService.Untag(imh, tag); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + } + + w.WriteHeader(http.StatusAccepted) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/mail.go b/vendor/github.com/docker/distribution/registry/handlers/mail.go new file mode 100644 index 0000000..3924490 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/mail.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "errors" + "net/smtp" + "strings" +) + +// mailer provides fields of email configuration for sending. +type mailer struct { + Addr, Username, Password, From string + Insecure bool + To []string +} + +// sendMail allows users to send email, only if mail parameters is configured correctly. +func (mail *mailer) sendMail(subject, message string) error { + addr := strings.Split(mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + msg := []byte("To:" + strings.Join(mail.To, ";") + + "\r\nFrom: " + mail.From + + "\r\nSubject: " + subject + + "\r\nContent-Type: text/plain\r\n\r\n" + + message) + auth := smtp.PlainAuth( + "", + mail.Username, + mail.Password, + host, + ) + err := smtp.SendMail( + mail.Addr, + auth, + mail.From, + mail.To, + []byte(msg), + ) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/tags.go b/vendor/github.com/docker/distribution/registry/handlers/tags.go new file mode 100644 index 0000000..91f1031 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/tags.go @@ -0,0 +1,62 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + tagService := th.Repository.Tags(th) + tags, err := tagService.All(th) + if err != nil { + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) + case errcode.Error: + th.Errors = append(th.Errors, err) + default: + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Repository.Named().Name(), + Tags: tags, + }); err != nil { + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} diff --git a/vendor/github.com/docker/distribution/registry/listener/listener.go b/vendor/github.com/docker/distribution/registry/listener/listener.go new file mode 100644 index 0000000..b93a7a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/listener/listener.go @@ -0,0 +1,74 @@ +package listener + +import ( + "fmt" + "net" + "os" + "time" +) + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// it is a plain copy-paste from net/http/server.go +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// NewListener announces on laddr and net. Accepted values of the net are +// 'unix' and 'tcp' +func NewListener(net, laddr string) (net.Listener, error) { + switch net { + case "unix": + return newUnixListener(laddr) + case "tcp", "": // an empty net means tcp + return newTCPListener(laddr) + default: + return nil, fmt.Errorf("unknown address type %s", net) + } +} + +func newUnixListener(laddr string) (net.Listener, error) { + fi, err := os.Stat(laddr) + if err == nil { + // the file exists. + // try to remove it if it's a socket + if !isSocket(fi.Mode()) { + return nil, fmt.Errorf("file %s exists and is not a socket", laddr) + } + + if err := os.Remove(laddr); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + // we can't do stat on the file. + // it means we can not remove it + return nil, err + } + + return net.Listen("unix", laddr) +} + +func isSocket(m os.FileMode) bool { + return m&os.ModeSocket != 0 +} + +func newTCPListener(laddr string) (net.Listener, error) { + ln, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil +} diff --git a/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go b/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go new file mode 100644 index 0000000..3e6e5cc --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go @@ -0,0 +1,54 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" +) + +// InitFunc is the type of a RegistryMiddleware factory function and is +// used to register the constructor for different RegistryMiddleware backends. +type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) + +var middlewares map[string]InitFunc +var registryoptions []storage.RegistryOption + +// Register is used to register an InitFunc for +// a RegistryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RegistryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, registry, options) + } + } + + return nil, fmt.Errorf("no registry middleware registered with name: %s", name) +} + +// RegisterOptions adds more options to RegistryOption list. Options get applied before +// any other configuration-based options. +func RegisterOptions(options ...storage.RegistryOption) error { + registryoptions = append(registryoptions, options...) + return nil +} + +// GetRegistryOptions returns list of RegistryOption. +func GetRegistryOptions() []storage.RegistryOption { + return registryoptions +} diff --git a/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go b/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go new file mode 100644 index 0000000..27b42ae --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// InitFunc is the type of a RepositoryMiddleware factory function and is +// used to register the constructor for different RepositoryMiddleware backends. +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RepositoryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RepositoryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, repository, options) + } + } + + return nil, fmt.Errorf("no repository middleware registered with name: %s", name) +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go b/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go new file mode 100644 index 0000000..7b405af --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go @@ -0,0 +1,87 @@ +package proxy + +import ( + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +const challengeHeader = "Docker-Distribution-Api-Version" + +type userpass struct { + username string + password string +} + +type credentials struct { + creds map[string]userpass +} + +func (c credentials) Basic(u *url.URL) (string, string) { + up := c.creds[u.String()] + + return up.username, up.password +} + +func (c credentials) RefreshToken(u *url.URL, service string) string { + return "" +} + +func (c credentials) SetRefreshToken(u *url.URL, service, token string) { +} + +// configureAuth stores credentials for challenge responses +func configureAuth(username, password, remoteURL string) (auth.CredentialStore, error) { + creds := map[string]userpass{} + + authURLs, err := getAuthURLs(remoteURL) + if err != nil { + return nil, err + } + + for _, url := range authURLs { + context.GetLogger(context.Background()).Infof("Discovered token authentication URL: %s", url) + creds[url] = userpass{ + username: username, + password: password, + } + } + + return credentials{creds: creds}, nil +} + +func getAuthURLs(remoteURL string) ([]string, error) { + authURLs := []string{} + + resp, err := http.Get(remoteURL + "/v2/") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + for _, c := range challenge.ResponseChallenges(resp) { + if strings.EqualFold(c.Scheme, "bearer") { + authURLs = append(authURLs, c.Parameters["realm"]) + } + } + + return authURLs, nil +} + +func ping(manager challenge.Manager, endpoint, versionHeader string) error { + resp, err := http.Get(endpoint) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go new file mode 100644 index 0000000..6cd5721 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go @@ -0,0 +1,224 @@ +package proxy + +import ( + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config file +const blobTTL = time.Duration(24 * 7 * time.Hour) + +type proxyBlobStore struct { + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler + repositoryName reference.Named + authChallenger authChallenger +} + +var _ distribution.BlobStore = &proxyBlobStore{} + +// inflight tracks currently downloading blobs +var inflight = make(map[digest.Digest]struct{}) + +// mu protects inflight +var mu sync.Mutex + +func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { + w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) + w.Header().Set("Content-Type", mediaType) + w.Header().Set("Docker-Content-Digest", digest.String()) + w.Header().Set("Etag", digest.String()) +} + +func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { + desc, err := pbs.remoteStore.Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + if w, ok := writer.(http.ResponseWriter); ok { + setResponseHeaders(w, desc.Size, desc.MediaType, dgst) + } + + remoteReader, err := pbs.remoteStore.Open(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + defer remoteReader.Close() + + _, err = io.CopyN(writer, remoteReader, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + + return desc, nil +} + +func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { + localDesc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil { + // Stat can report a zero sized file here if it's checked between creation + // and population. Return nil error, and continue + return false, nil + } + + if err == nil { + proxyMetrics.BlobPush(uint64(localDesc.Size)) + return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + return false, nil + +} + +func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { + defer func() { + mu.Lock() + delete(inflight, dgst) + mu.Unlock() + }() + + var desc distribution.Descriptor + var err error + var bw distribution.BlobWriter + + bw, err = pbs.localStore.Create(ctx) + if err != nil { + return err + } + + desc, err = pbs.copyContent(ctx, dgst, bw) + if err != nil { + return err + } + + _, err = bw.Commit(ctx, desc) + if err != nil { + return err + } + + return nil +} + +func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + served, err := pbs.serveLocal(ctx, w, r, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) + return err + } + + if served { + return nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return err + } + + mu.Lock() + _, ok := inflight[dgst] + if ok { + mu.Unlock() + _, err := pbs.copyContent(ctx, dgst, w) + return err + } + inflight[dgst] = struct{}{} + mu.Unlock() + + go func(dgst digest.Digest) { + if err := pbs.storeLocal(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) + } + + blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return + } + + pbs.scheduler.AddBlob(blobRef, repositoryTTL) + }(dgst) + + _, err = pbs.copyContent(ctx, dgst, w) + if err != nil { + return err + } + return nil +} + +func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err == nil { + return desc, err + } + + if err != distribution.ErrBlobUnknown { + return distribution.Descriptor{}, err + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return distribution.Descriptor{}, err + } + + return pbs.remoteStore.Stat(ctx, dgst) +} + +func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + blob, err := pbs.localStore.Get(ctx, dgst) + if err == nil { + return blob, nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return []byte{}, err + } + + blob, err = pbs.remoteStore.Get(ctx, dgst) + if err != nil { + return []byte{}, err + } + + _, err = pbs.localStore.Put(ctx, "", blob) + if err != nil { + return []byte{}, err + } + return blob, nil +} + +// Unsupported functions +func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go new file mode 100644 index 0000000..8e3a069 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go @@ -0,0 +1,416 @@ +package proxy + +import ( + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/filesystem" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +var sbsMu sync.Mutex + +type statsBlobStore struct { + stats map[string]int + blobs distribution.BlobStore +} + +func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + sbsMu.Lock() + sbs.stats["put"]++ + sbsMu.Unlock() + + return sbs.blobs.Put(ctx, mediaType, p) +} + +func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + sbsMu.Lock() + sbs.stats["get"]++ + sbsMu.Unlock() + + return sbs.blobs.Get(ctx, dgst) +} + +func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + sbsMu.Lock() + sbs.stats["create"]++ + sbsMu.Unlock() + + return sbs.blobs.Create(ctx, options...) +} + +func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + sbsMu.Lock() + sbs.stats["resume"]++ + sbsMu.Unlock() + + return sbs.blobs.Resume(ctx, id) +} + +func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + sbsMu.Lock() + sbs.stats["open"]++ + sbsMu.Unlock() + + return sbs.blobs.Open(ctx, dgst) +} + +func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + sbsMu.Lock() + sbs.stats["serveblob"]++ + sbsMu.Unlock() + + return sbs.blobs.ServeBlob(ctx, w, r, dgst) +} + +func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + + sbsMu.Lock() + sbs.stats["stat"]++ + sbsMu.Unlock() + + return sbs.blobs.Stat(ctx, dgst) +} + +func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + sbsMu.Lock() + sbs.stats["delete"]++ + sbsMu.Unlock() + + return sbs.blobs.Delete(ctx, dgst) +} + +type testEnv struct { + numUnique int + inRemote []distribution.Descriptor + store proxyBlobStore + ctx context.Context +} + +func (te *testEnv) LocalStats() *map[string]int { + sbsMu.Lock() + ls := te.store.localStore.(statsBlobStore).stats + sbsMu.Unlock() + return &ls +} + +func (te *testEnv) RemoteStats() *map[string]int { + sbsMu.Lock() + rs := te.store.remoteStore.(statsBlobStore).stats + sbsMu.Unlock() + return &rs +} + +// Populate remote store and record the digests +func makeTestEnv(t *testing.T, name string) *testEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + + ctx := context.Background() + + truthDir, err := ioutil.TempDir("", "truth") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + cacheDir, err := ioutil.TempDir("", "cache") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + localDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": truthDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + // todo: create a tempfile area here + localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + localRepo, err := localRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + cacheDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": cacheDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + truthRepo, err := truthRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: truthRepo.Blobs(ctx), + } + + localBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: localRepo.Blobs(ctx), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + + proxyBlobStore := proxyBlobStore{ + repositoryName: nameRef, + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, + authChallenger: &mockChallenger{}, + } + + te := &testEnv{ + store: proxyBlobStore, + ctx: ctx, + } + return te +} + +func makeBlob(size int) []byte { + blob := make([]byte, size, size) + for i := 0; i < size; i++ { + blob[i] = byte('A' + rand.Int()%48) + } + return blob +} + +func init() { + rand.Seed(42) +} + +func perm(m []distribution.Descriptor) []distribution.Descriptor { + for i := 0; i < len(m); i++ { + j := rand.Intn(i + 1) + tmp := m[i] + m[i] = m[j] + m[j] = tmp + } + return m +} + +func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { + var inRemote []distribution.Descriptor + + for i := 0; i < numUnique; i++ { + bytes := makeBlob(size) + for j := 0; j < blobCount/numUnique; j++ { + desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) + if err != nil { + t.Fatalf("Put in store") + } + + inRemote = append(inRemote, desc) + } + } + + te.inRemote = inRemote + te.numUnique = numUnique +} +func TestProxyStoreGet(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + populate(t, te, 1, 10, 1) + _, err := te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + + _, err = te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 2 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + +} + +func TestProxyStoreStat(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + remoteBlobCount := 1 + populate(t, te, remoteBlobCount, 10, 1) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Stat - touches both stores + for _, d := range te.inRemote { + _, err := te.store.Stat(te.ctx, d.Digest) + if err != nil { + t.Fatalf("Error stating proxy store") + } + } + + if (*localStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected local stat count") + } + + if (*remoteStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected remote stat count") + } + + if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { + t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) + } + +} + +func TestProxyStoreServeHighConcurrency(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + blobSize := 200 + blobCount := 10 + numUnique := 1 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 16 + testProxyStoreServe(t, te, numClients) +} + +func TestProxyStoreServeMany(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + blobSize := 200 + blobCount := 10 + numUnique := 4 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// todo(richardscothern): blobCount must be smaller than num clients +func TestProxyStoreServeBig(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + blobSize := 2 << 20 + blobCount := 4 + numUnique := 2 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// testProxyStoreServe will create clients to consume all blobs +// populated in the truth store +func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + var wg sync.WaitGroup + + for i := 0; i < numClients; i++ { + // Serveblob - pulls through blobs + wg.Add(1) + go func() { + defer wg.Done() + for _, remoteBlob := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + bodyBytes := w.Body.Bytes() + localDigest := digest.FromBytes(bodyBytes) + if localDigest != remoteBlob.Digest { + t.Fatalf("Mismatching blob fetch from proxy") + } + } + }() + } + + wg.Wait() + + remoteBlobCount := len(te.inRemote) + sbsMu.Lock() + if (*localStats)["stat"] != remoteBlobCount*numClients && (*localStats)["create"] != te.numUnique { + sbsMu.Unlock() + t.Fatal("Expected: stat:", remoteBlobCount*numClients, "create:", remoteBlobCount) + } + sbsMu.Unlock() + + // Wait for any async storage goroutines to finish + time.Sleep(3 * time.Second) + + sbsMu.Lock() + remoteStatCount := (*remoteStats)["stat"] + remoteOpenCount := (*remoteStats)["open"] + sbsMu.Unlock() + + // Serveblob - blobs come from local + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl := digest.FromBytes(w.Body.Bytes()) + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + localStats = te.LocalStats() + remoteStats = te.RemoteStats() + + // Ensure remote unchanged + sbsMu.Lock() + defer sbsMu.Unlock() + if (*remoteStats)["stat"] != remoteStatCount && (*remoteStats)["open"] != remoteOpenCount { + t.Fatalf("unexpected remote stats: %#v", remoteStats) + } +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go new file mode 100644 index 0000000..f08e285 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go @@ -0,0 +1,95 @@ +package proxy + +import ( + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config +const repositoryTTL = time.Duration(24 * 7 * time.Hour) + +type proxyManifestStore struct { + ctx context.Context + localManifests distribution.ManifestService + remoteManifests distribution.ManifestService + repositoryName reference.Named + scheduler *scheduler.TTLExpirationScheduler + authChallenger authChallenger +} + +var _ distribution.ManifestService = &proxyManifestStore{} + +func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(ctx, dgst) + if err != nil { + return false, err + } + if exists { + return true, nil + } + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return false, err + } + return pms.remoteManifests.Exists(ctx, dgst) +} + +func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + // At this point `dgst` was either specified explicitly, or returned by the + // tagstore with the most recent association. + var fromRemote bool + manifest, err := pms.localManifests.Get(ctx, dgst, options...) + if err != nil { + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return nil, err + } + + manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + fromRemote = true + } + + _, payload, err := manifest.Payload() + if err != nil { + return nil, err + } + + proxyMetrics.ManifestPush(uint64(len(payload))) + if fromRemote { + proxyMetrics.ManifestPull(uint64(len(payload))) + + _, err = pms.localManifests.Put(ctx, manifest) + if err != nil { + return nil, err + } + + // Schedule the manifest blob for removal + repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return nil, err + } + + pms.scheduler.AddManifest(repoBlob, repositoryTTL) + // Ensure the manifest blob is cleaned up + //pms.scheduler.AddBlob(blobRef, repositoryTTL) + + } + + return manifest, err +} + +func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var d digest.Digest + return d, distribution.ErrUnsupported +} + +func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go new file mode 100644 index 0000000..067e845 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go @@ -0,0 +1,275 @@ +package proxy + +import ( + "io" + "sync" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type statsManifest struct { + manifests distribution.ManifestService + stats map[string]int +} + +type manifestStoreTestEnv struct { + manifestDigest digest.Digest // digest of the signed manifest in the local storage + manifests proxyManifestStore +} + +func (te manifestStoreTestEnv) LocalStats() *map[string]int { + ls := te.manifests.localManifests.(statsManifest).stats + return &ls +} + +func (te manifestStoreTestEnv) RemoteStats() *map[string]int { + rs := te.manifests.remoteManifests.(statsManifest).stats + return &rs +} + +func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error { + sm.stats["delete"]++ + return sm.manifests.Delete(ctx, dgst) +} + +func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + sm.stats["exists"]++ + return sm.manifests.Exists(ctx, dgst) +} + +func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm.stats["get"]++ + return sm.manifests.Get(ctx, dgst) +} + +func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + sm.stats["put"]++ + return sm.manifests.Put(ctx, manifest) +} + +type mockChallenger struct { + sync.Mutex + count int +} + +// Called for remote operations only +func (m *mockChallenger) tryEstablishChallenges(context.Context) error { + m.Lock() + defer m.Unlock() + m.count++ + return nil +} + +func (m *mockChallenger) credentialStore() auth.CredentialStore { + return nil +} + +func (m *mockChallenger) challengeManager() challenge.Manager { + return nil +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), + storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), + storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + truthRepo, err := truthRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + tr, err := truthRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + truthManifests := statsManifest{ + manifests: tr, + stats: make(map[string]int), + } + + manifestDigest, err := populateRepo(ctx, t, truthRepo, name, tag) + if err != nil { + t.Fatalf(err.Error()) + } + + localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + localRepo, err := localRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + lr, err := localRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + localManifests := statsManifest{ + manifests: lr, + stats: make(map[string]int), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + return &manifestStoreTestEnv{ + manifestDigest: manifestDigest, + manifests: proxyManifestStore{ + ctx: ctx, + localManifests: localManifests, + remoteManifests: truthManifests, + scheduler: s, + repositoryName: nameRef, + authChallenger: &mockChallenger{}, + }, + } +} + +func populateRepo(ctx context.Context, t *testing.T, repository distribution.Repository, name, tag string) (digest.Digest, error) { + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + } + + for i := 0; i < 2; i++ { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + rs, ts, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ts) + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + ms, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + dgst, err := ms.Put(ctx, sm) + if err != nil { + t.Fatalf("unexpected errors putting manifest: %v", err) + } + + return dgst, nil +} + +// TestProxyManifests contains basic acceptance tests +// for the pull-through behavior +func TestProxyManifests(t *testing.T) { + name := "foo/bar" + env := newManifestStoreTestEnv(t, name, "latest") + + localStats := env.LocalStats() + remoteStats := env.RemoteStats() + + ctx := context.Background() + // Stat - must check local and remote + exists, err := env.manifests.Exists(ctx, env.manifestDigest) + if err != nil { + t.Fatalf("Error checking existence") + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) + } + + if env.manifests.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge, got %#v", env.manifests.authChallenger) + } + + // Get - should succeed and pull manifest into local + _, err = env.manifests.Get(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected get count") + } + + if (*localStats)["put"] != 1 { + t.Errorf("Expected local put") + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + + // Stat - should only go to local + exists, err = env.manifests.Exists(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count") + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + + // Get proxied - won't require another authchallenge + _, err = env.manifests.Get(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go b/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go new file mode 100644 index 0000000..d3d84d7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go @@ -0,0 +1,74 @@ +package proxy + +import ( + "expvar" + "sync/atomic" +) + +// Metrics is used to hold metric counters +// related to the proxy +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 + BytesPulled uint64 + BytesPushed uint64 +} + +type proxyMetricsCollector struct { + blobMetrics Metrics + manifestMetrics Metrics +} + +// BlobPull tracks metrics about blobs pulled into the cache +func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.blobMetrics.Misses, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) +} + +// BlobPush tracks metrics about blobs pushed to clients +func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.blobMetrics.Requests, 1) + atomic.AddUint64(&pmc.blobMetrics.Hits, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) +} + +// ManifestPull tracks metrics related to Manifests pulled into the cache +func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) +} + +// ManifestPush tracks metrics about manifests pushed to clients +func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) + atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) +} + +// proxyMetrics tracks metrics about the proxy cache. This is +// kept globally and made available via expvar. +var proxyMetrics = &proxyMetricsCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + pm := registry.(*expvar.Map).Get("proxy") + if pm == nil { + pm = &expvar.Map{} + pm.(*expvar.Map).Init() + registry.(*expvar.Map).Set("proxy", pm) + } + + pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + return proxyMetrics.blobMetrics + })) + + pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + return proxyMetrics.manifestMetrics + })) + +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go b/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go new file mode 100644 index 0000000..d64dcbb --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go @@ -0,0 +1,249 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/url" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" +) + +// proxyingRegistry fetches content from a remote registry and caches it locally +type proxyingRegistry struct { + embedded distribution.Namespace // provides local registry functionality + scheduler *scheduler.TTLExpirationScheduler + remoteURL url.URL + authChallenger authChallenger +} + +// NewRegistryPullThroughCache creates a registry acting as a pull through cache +func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { + remoteURL, err := url.Parse(config.RemoteURL) + if err != nil { + return nil, err + } + + v := storage.NewVacuum(ctx, driver) + s := scheduler.New(ctx, driver, "/scheduler-state.json") + s.OnBlobExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + blobs := repo.Blobs(ctx) + + // Clear the repository reference and descriptor caches + err = blobs.Delete(ctx, r.Digest()) + if err != nil { + return err + } + + err = v.RemoveBlob(r.Digest().String()) + if err != nil { + return err + } + + return nil + }) + + s.OnManifestExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + manifests, err := repo.Manifests(ctx) + if err != nil { + return err + } + err = manifests.Delete(ctx, r.Digest()) + if err != nil { + return err + } + return nil + }) + + err = s.Start() + if err != nil { + return nil, err + } + + cs, err := configureAuth(config.Username, config.Password, config.RemoteURL) + if err != nil { + return nil, err + } + + return &proxyingRegistry{ + embedded: registry, + scheduler: s, + remoteURL: *remoteURL, + authChallenger: &remoteAuthChallenger{ + remoteURL: *remoteURL, + cm: challenge.NewSimpleManager(), + cs: cs, + }, + }, nil +} + +func (pr *proxyingRegistry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + return pr.embedded.Repositories(ctx, repos, last) +} + +func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { + c := pr.authChallenger + + tr := transport.NewTransport(http.DefaultTransport, + auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull"))) + + localRepo, err := pr.embedded.Repository(ctx, name) + if err != nil { + return nil, err + } + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) + if err != nil { + return nil, err + } + + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr) + if err != nil { + return nil, err + } + + remoteManifests, err := remoteRepo.Manifests(ctx) + if err != nil { + return nil, err + } + + return &proxiedRepository{ + blobStore: &proxyBlobStore{ + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + repositoryName: name, + authChallenger: pr.authChallenger, + }, + manifests: &proxyManifestStore{ + repositoryName: name, + localManifests: localManifests, // Options? + remoteManifests: remoteManifests, + ctx: ctx, + scheduler: pr.scheduler, + authChallenger: pr.authChallenger, + }, + name: name, + tags: &proxyTagService{ + localTags: localRepo.Tags(ctx), + remoteTags: remoteRepo.Tags(ctx), + authChallenger: pr.authChallenger, + }, + }, nil +} + +func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator { + return pr.embedded.Blobs() +} + +func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter { + return pr.embedded.BlobStatter() +} + +// authChallenger encapsulates a request to the upstream to establish credential challenges +type authChallenger interface { + tryEstablishChallenges(context.Context) error + challengeManager() challenge.Manager + credentialStore() auth.CredentialStore +} + +type remoteAuthChallenger struct { + remoteURL url.URL + sync.Mutex + cm challenge.Manager + cs auth.CredentialStore +} + +func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { + return r.cs +} + +func (r *remoteAuthChallenger) challengeManager() challenge.Manager { + return r.cm +} + +// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist +func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { + r.Lock() + defer r.Unlock() + + remoteURL := r.remoteURL + remoteURL.Path = "/v2/" + challenges, err := r.cm.GetChallenges(remoteURL) + if err != nil { + return err + } + + if len(challenges) > 0 { + return nil + } + + // establish challenge type with upstream + if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil { + return err + } + + context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) + return nil +} + +// proxiedRepository uses proxying blob and manifest services to serve content +// locally, or pulling it through from a remote and caching it locally if it doesn't +// already exist +type proxiedRepository struct { + blobStore distribution.BlobStore + manifests distribution.ManifestService + name reference.Named + tags distribution.TagService +} + +func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return pr.manifests, nil +} + +func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { + return pr.blobStore +} + +func (pr *proxiedRepository) Named() reference.Named { + return pr.name +} + +func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { + return pr.tags +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxytagservice.go b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice.go new file mode 100644 index 0000000..a827303 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice.go @@ -0,0 +1,65 @@ +package proxy + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// proxyTagService supports local and remote lookup of tags. +type proxyTagService struct { + localTags distribution.TagService + remoteTags distribution.TagService + authChallenger authChallenger +} + +var _ distribution.TagService = proxyTagService{} + +// Get attempts to get the most recent digest for the tag by checking the remote +// tag service first and then caching it locally. If the remote is unavailable +// the local association is returned +func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + desc, err := pt.remoteTags.Get(ctx, tag) + if err == nil { + err := pt.localTags.Tag(ctx, tag, desc) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + } + + desc, err := pt.localTags.Get(ctx, tag) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil +} + +func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} + +func (pt proxyTagService) Untag(ctx context.Context, tag string) error { + err := pt.localTags.Untag(ctx, tag) + if err != nil { + return err + } + return nil +} + +func (pt proxyTagService) All(ctx context.Context) ([]string, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + tags, err := pt.remoteTags.All(ctx) + if err == nil { + return tags, err + } + } + return pt.localTags.All(ctx) +} + +func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return []string{}, distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxytagservice_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice_test.go new file mode 100644 index 0000000..ce0fe78 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice_test.go @@ -0,0 +1,182 @@ +package proxy + +import ( + "reflect" + "sort" + "sync" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type mockTagStore struct { + mapping map[string]distribution.Descriptor + sync.Mutex +} + +var _ distribution.TagService = &mockTagStore{} + +func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + m.Lock() + defer m.Unlock() + + if d, ok := m.mapping[tag]; ok { + return d, nil + } + return distribution.Descriptor{}, distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + m.Lock() + defer m.Unlock() + + m.mapping[tag] = desc + return nil +} + +func (m *mockTagStore) Untag(ctx context.Context, tag string) error { + m.Lock() + defer m.Unlock() + + if _, ok := m.mapping[tag]; ok { + delete(m.mapping, tag) + return nil + } + return distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) All(ctx context.Context) ([]string, error) { + m.Lock() + defer m.Unlock() + + var tags []string + for tag := range m.mapping { + tags = append(tags, tag) + } + + return tags, nil +} + +func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService { + if local == nil { + local = make(map[string]distribution.Descriptor) + } + if remote == nil { + remote = make(map[string]distribution.Descriptor) + } + return &proxyTagService{ + localTags: &mockTagStore{mapping: local}, + remoteTags: &mockTagStore{mapping: remote}, + authChallenger: &mockChallenger{}, + } +} + +func TestGet(t *testing.T) { + remoteDesc := distribution.Descriptor{Size: 42} + remoteTag := "remote" + proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil) + + ctx := context.Background() + + // Get pre-loaded tag + d, err := proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, remoteDesc) { + t.Fatal("unable to get put tag") + } + + local, err := proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + if !reflect.DeepEqual(local, remoteDesc) { + t.Fatalf("unexpected descriptor pulled through") + } + + // Manually overwrite remote tag + newRemoteDesc := distribution.Descriptor{Size: 43} + err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc) + if err != nil { + t.Fatal(err) + } + + d, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, newRemoteDesc) { + t.Fatal("unable to get put tag") + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + // untag, ensure it's removed locally, but present in remote + err = proxyTags.Untag(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err == nil { + t.Fatalf("Expected error getting Untag'd tag") + } + + _, err = proxyTags.remoteTags.Get(ctx, remoteTag) + if err != nil { + t.Fatalf("remote tag should not be untagged with proxyTag.Untag") + } + + _, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("untagged tag should be pulled through") + } + + if proxyTags.authChallenger.(*mockChallenger).count != 3 { + t.Fatalf("Expected 3 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + // Add another tag. Ensure both tags appear in 'All' + err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) + if err != nil { + t.Fatal(err) + } + + all, err := proxyTags.All(ctx) + if err != nil { + t.Fatal(err) + } + + if len(all) != 2 { + t.Fatalf("Unexpected tag length returned from All() : %d ", len(all)) + } + + sort.Strings(all) + if all[0] != "funtag" && all[1] != "remote" { + t.Fatalf("Unexpected tags returned from All() : %v ", all) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 4 { + t.Fatalf("Expected 4 auth challenge calls, got %#v", proxyTags.authChallenger) + } +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go new file mode 100644 index 0000000..bde9465 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go @@ -0,0 +1,259 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" +) + +// onTTLExpiryFunc is called when a repository's TTL expires +type expiryFunc func(reference.Reference) error + +const ( + entryTypeBlob = iota + entryTypeManifest + indexSaveFrequency = 5 * time.Second +) + +// schedulerEntry represents an entry in the scheduler +// fields are exported for serialization +type schedulerEntry struct { + Key string `json:"Key"` + Expiry time.Time `json:"ExpiryData"` + EntryType int `json:"EntryType"` + + timer *time.Timer +} + +// New returns a new instance of the scheduler +func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { + return &TTLExpirationScheduler{ + entries: make(map[string]*schedulerEntry), + driver: driver, + pathToStateFile: path, + ctx: ctx, + stopped: true, + doneChan: make(chan struct{}), + saveTimer: time.NewTicker(indexSaveFrequency), + } +} + +// TTLExpirationScheduler is a scheduler used to perform actions +// when TTLs expire +type TTLExpirationScheduler struct { + sync.Mutex + + entries map[string]*schedulerEntry + + driver driver.StorageDriver + ctx context.Context + pathToStateFile string + + stopped bool + + onBlobExpire expiryFunc + onManifestExpire expiryFunc + + indexDirty bool + saveTimer *time.Ticker + doneChan chan struct{} +} + +// OnBlobExpire is called when a scheduled blob's TTL expires +func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onBlobExpire = f +} + +// OnManifestExpire is called when a scheduled manifest's TTL expires +func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onManifestExpire = f +} + +// AddBlob schedules a blob cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(blobRef, ttl, entryTypeBlob) + return nil +} + +// AddManifest schedules a manifest cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(manifestRef, ttl, entryTypeManifest) + return nil +} + +// Start starts the scheduler +func (ttles *TTLExpirationScheduler) Start() error { + ttles.Lock() + defer ttles.Unlock() + + err := ttles.readState() + if err != nil { + return err + } + + if !ttles.stopped { + return fmt.Errorf("Scheduler already started") + } + + context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + ttles.stopped = false + + // Start timer for each deserialized entry + for _, entry := range ttles.entries { + entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) + } + + // Start a ticker to periodically save the entries index + + go func() { + for { + select { + case <-ttles.saveTimer.C: + ttles.Lock() + if !ttles.indexDirty { + ttles.Unlock() + continue + } + + err := ttles.writeState() + if err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } else { + ttles.indexDirty = false + } + ttles.Unlock() + + case <-ttles.doneChan: + return + } + } + }() + + return nil +} + +func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) { + entry := &schedulerEntry{ + Key: r.String(), + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { + oldEntry.timer.Stop() + } + ttles.entries[entry.Key] = entry + entry.timer = ttles.startTimer(entry, ttl) + ttles.indexDirty = true +} + +func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { + return time.AfterFunc(ttl, func() { + ttles.Lock() + defer ttles.Unlock() + + var f expiryFunc + + switch entry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(reference.Reference) error { + return fmt.Errorf("scheduler entry type") + } + } + + ref, err := reference.Parse(entry.Key) + if err == nil { + if err := f(ref); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + } + } else { + context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) + } + + delete(ttles.entries, entry.Key) + ttles.indexDirty = true + }) +} + +// Stop stops the scheduler. +func (ttles *TTLExpirationScheduler) Stop() { + ttles.Lock() + defer ttles.Unlock() + + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + + for _, entry := range ttles.entries { + entry.timer.Stop() + } + + close(ttles.doneChan) + ttles.saveTimer.Stop() + ttles.stopped = true +} + +func (ttles *TTLExpirationScheduler) writeState() error { + jsonBytes, err := json.Marshal(ttles.entries) + if err != nil { + return err + } + + err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) + if err != nil { + return err + } + + return nil +} + +func (ttles *TTLExpirationScheduler) readState() error { + if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil + default: + return err + } + } + + bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, &ttles.entries) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go new file mode 100644 index 0000000..4d69d5b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go @@ -0,0 +1,211 @@ +package scheduler + +import ( + "encoding/json" + "sync" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { + ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + return ref1, ref2, ref3 +} + +func TestSchedule(t *testing.T) { + ref1, ref2, ref3 := testRefs(t) + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + ref3.String(): true, + } + + var mu sync.Mutex + s := New(context.Background(), inmemory.New(), "/ttl") + deleteFunc := func(repoName reference.Reference) error { + if len(remainingRepos) == 0 { + t.Fatalf("Incorrect expiry count") + } + _, ok := remainingRepos[repoName.String()] + if !ok { + t.Fatalf("Trying to remove nonexistent repo: %s", repoName) + } + t.Log("removing", repoName) + mu.Lock() + delete(remainingRepos, repoName.String()) + mu.Unlock() + + return nil + } + s.onBlobExpire = deleteFunc + err := s.Start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + s.add(ref1, 3*timeUnit, entryTypeBlob) + s.add(ref2, 1*timeUnit, entryTypeBlob) + + func() { + s.Lock() + s.add(ref3, 1*timeUnit, entryTypeBlob) + s.Unlock() + + }() + + // Ensure all repos are deleted + <-time.After(50 * timeUnit) + + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestRestoreOld(t *testing.T) { + ref1, ref2, _ := testRefs(t) + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + } + + var wg sync.WaitGroup + wg.Add(len(remainingRepos)) + var mu sync.Mutex + deleteFunc := func(r reference.Reference) error { + mu.Lock() + defer mu.Unlock() + if r.String() == ref1.String() && len(remainingRepos) == 2 { + t.Errorf("ref1 should not be removed first") + } + _, ok := remainingRepos[r.String()] + if !ok { + t.Fatalf("Trying to remove nonexistent repo: %s", r) + } + delete(remainingRepos, r.String()) + wg.Done() + return nil + } + + timeUnit := time.Millisecond + serialized, err := json.Marshal(&map[string]schedulerEntry{ + ref1.String(): { + Expiry: time.Now().Add(10 * timeUnit), + Key: ref1.String(), + EntryType: 0, + }, + ref2.String(): { + Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first + Key: ref2.String(), + EntryType: 0, + }, + }) + if err != nil { + t.Fatalf("Error serializing test data: %s", err.Error()) + } + + ctx := context.Background() + pathToStatFile := "/ttl" + fs := inmemory.New() + err = fs.PutContent(ctx, pathToStatFile, serialized) + if err != nil { + t.Fatal("Unable to write serialized data to fs") + } + s := New(context.Background(), fs, "/ttl") + s.OnBlobExpire(deleteFunc) + err = s.Start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + defer s.Stop() + + wg.Wait() + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestStopRestore(t *testing.T) { + ref1, ref2, _ := testRefs(t) + + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + } + + var mu sync.Mutex + deleteFunc := func(r reference.Reference) error { + mu.Lock() + delete(remainingRepos, r.String()) + mu.Unlock() + return nil + } + + fs := inmemory.New() + pathToStateFile := "/ttl" + s := New(context.Background(), fs, pathToStateFile) + s.onBlobExpire = deleteFunc + + err := s.Start() + if err != nil { + t.Fatalf(err.Error()) + } + s.add(ref1, 300*timeUnit, entryTypeBlob) + s.add(ref2, 100*timeUnit, entryTypeBlob) + + // Start and stop before all operations complete + // state will be written to fs + s.Stop() + time.Sleep(10 * time.Millisecond) + + // v2 will restore state from fs + s2 := New(context.Background(), fs, pathToStateFile) + s2.onBlobExpire = deleteFunc + err = s2.Start() + if err != nil { + t.Fatalf("Error starting v2: %s", err.Error()) + } + + <-time.After(500 * timeUnit) + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } + +} + +func TestDoubleStart(t *testing.T) { + s := New(context.Background(), inmemory.New(), "/ttl") + err := s.Start() + if err != nil { + t.Fatalf("Unable to start scheduler") + } + err = s.Start() + if err == nil { + t.Fatalf("Scheduler started twice without error") + } +} diff --git a/vendor/github.com/docker/distribution/registry/registry.go b/vendor/github.com/docker/distribution/registry/registry.go new file mode 100644 index 0000000..2adcb1e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/registry.go @@ -0,0 +1,356 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + "rsc.io/letsencrypt" + + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/formatters/logstash" + "github.com/bugsnag/bugsnag-go" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + "github.com/docker/distribution/uuid" + "github.com/docker/distribution/version" + gorhandlers "github.com/gorilla/handlers" + "github.com/spf13/cobra" + "github.com/yvasiyarov/gorelic" +) + +// ServeCmd is a cobra command for running the registry. +var ServeCmd = &cobra.Command{ + Use: "serve ", + Short: "`serve` stores and distributes Docker images", + Long: "`serve` stores and distributes Docker images.", + Run: func(cmd *cobra.Command, args []string) { + + // setup context + ctx := context.WithVersion(context.Background(), version.Version) + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + if config.HTTP.Debug.Addr != "" { + go func(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } + }(config.HTTP.Debug.Addr) + } + + registry, err := NewRegistry(ctx, config) + if err != nil { + log.Fatalln(err) + } + + if err = registry.ListenAndServe(); err != nil { + log.Fatalln(err) + } + }, +} + +// A Registry represents a complete instance of the registry. +// TODO(aaronl): It might make sense for Registry to become an interface. +type Registry struct { + config *configuration.Configuration + app *handlers.App + server *http.Server +} + +// NewRegistry creates a new registry from a context and configuration struct. +func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { + var err error + ctx, err = configureLogging(ctx, config) + if err != nil { + return nil, fmt.Errorf("error configuring logger: %v", err) + } + + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = context.GetLogger(ctx).Warnf + + app := handlers.NewApp(ctx, config) + // TODO(aaronl): The global scope of the health checks means NewRegistry + // can only be called once per process. + app.RegisterHealthChecks() + handler := configureReporting(app) + handler = alive("/", handler) + handler = health.Handler(handler) + handler = panicHandler(handler) + if !config.Log.AccessLog.Disabled { + handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) + } + + server := &http.Server{ + Handler: handler, + } + + return &Registry{ + app: app, + config: config, + server: server, + }, nil +} + +// ListenAndServe runs the registry's HTTP server. +func (registry *Registry) ListenAndServe() error { + config := registry.config + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + return err + } + + if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: nextProtos(config), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + if config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + if config.HTTP.TLS.Certificate != "" { + return fmt.Errorf("cannot specify both certificate and Let's Encrypt") + } + var m letsencrypt.Manager + if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { + return err + } + if !m.Registered() { + if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { + return err + } + } + tlsConf.GetCertificate = m.GetCertificate + } else { + tlsConf.Certificates = make([]tls.Certificate, 1) + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + return err + } + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + return err + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + return fmt.Errorf("Could not add CA to pool") + } + } + + for _, subj := range pool.Subjects() { + context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + ln = tls.NewListener(ln, tlsConf) + context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) + } else { + context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) + } + + return registry.server.Serve(ln) +} + +func configureReporting(app *handlers.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} + +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = context.WithLogger(ctx, context.GetLogger(ctx)) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = context.WithValues(ctx, config.Log.Fields) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + +// panicHandler add an HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} + +// alive simply wraps the handler with a route that always returns an http 200 +// response when the path is matched. If the path is not matched, the request +// is passed to the provided handler. There is no guarantee of anything but +// that the server is up. Wrap with other handlers (such as health.Handler) +// for greater affect. +func alive(path string, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == path { + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func resolveConfiguration(args []string) (*configuration.Configuration, error) { + var configurationPath string + + if len(args) > 0 { + configurationPath = args[0] + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} + +func nextProtos(config *configuration.Configuration) []string { + switch config.HTTP.HTTP2.Disabled { + case true: + return []string{"http/1.1"} + default: + return []string{"h2", "http/1.1"} + } +} diff --git a/vendor/github.com/docker/distribution/registry/registry_test.go b/vendor/github.com/docker/distribution/registry/registry_test.go new file mode 100644 index 0000000..3467311 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/registry_test.go @@ -0,0 +1,30 @@ +package registry + +import ( + "reflect" + "testing" + + "github.com/docker/distribution/configuration" +) + +// Tests to ensure nextProtos returns the correct protocols when: +// * config.HTTP.HTTP2.Disabled is not explicitly set => [h2 http/1.1] +// * config.HTTP.HTTP2.Disabled is explicitly set to false [h2 http/1.1] +// * config.HTTP.HTTP2.Disabled is explicitly set to true [http/1.1] +func TestNextProtos(t *testing.T) { + config := &configuration.Configuration{} + protos := nextProtos(config) + if !reflect.DeepEqual(protos, []string{"h2", "http/1.1"}) { + t.Fatalf("expected protos to equal [h2 http/1.1], got %s", protos) + } + config.HTTP.HTTP2.Disabled = false + protos = nextProtos(config) + if !reflect.DeepEqual(protos, []string{"h2", "http/1.1"}) { + t.Fatalf("expected protos to equal [h2 http/1.1], got %s", protos) + } + config.HTTP.HTTP2.Disabled = true + protos = nextProtos(config) + if !reflect.DeepEqual(protos, []string{"http/1.1"}) { + t.Fatalf("expected protos to equal [http/1.1], got %s", protos) + } +} diff --git a/vendor/github.com/docker/distribution/registry/root.go b/vendor/github.com/docker/distribution/registry/root.go new file mode 100644 index 0000000..5d3005c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/root.go @@ -0,0 +1,84 @@ +package registry + +import ( + "fmt" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" + "github.com/docker/libtrust" + "github.com/spf13/cobra" +) + +var showVersion bool + +func init() { + RootCmd.AddCommand(ServeCmd) + RootCmd.AddCommand(GCCmd) + GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") + RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") +} + +// RootCmd is the main command for the 'registry' binary. +var RootCmd = &cobra.Command{ + Use: "registry", + Short: "`registry`", + Long: "`registry`", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + cmd.Usage() + }, +} + +var dryRun bool + +// GCCmd is the cobra command that corresponds to the garbage-collect subcommand +var GCCmd = &cobra.Command{ + Use: "garbage-collect ", + Short: "`garbage-collect` deletes layers not referenced by any manifests", + Long: "`garbage-collect` deletes layers not referenced by any manifests", + Run: func(cmd *cobra.Command, args []string) { + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) + os.Exit(1) + } + + ctx := context.Background() + ctx, err = configureLogging(ctx, config) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) + os.Exit(1) + } + + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + + registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) + os.Exit(1) + } + + err = storage.MarkAndSweep(ctx, driver, registry, dryRun) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) + os.Exit(1) + } + }, +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blob_test.go b/vendor/github.com/docker/distribution/registry/storage/blob_test.go new file mode 100644 index 0000000..767526b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blob_test.go @@ -0,0 +1,614 @@ +package storage + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/testdriver" + "github.com/docker/distribution/testutil" +) + +// TestWriteSeek tests that the current file size can be +// obtained using Seek +func TestWriteSeek(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + contents := []byte{1, 2, 3} + blobUpload.Write(contents) + blobUpload.Close() + offset := blobUpload.Size() + if offset != int64(len(contents)) { + t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) + } + +} + +// TestSimpleBlobUpload covers the blob upload process, exercising common +// error paths that might be seen during an upload. +func TestSimpleBlobUpload(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + h := sha256.New() + rd := io.TeeReader(randomDataReader, h) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Cancel the upload then restart it + if err := blobUpload.Cancel(ctx); err != nil { + t.Fatalf("unexpected error during upload cancellation: %v", err) + } + + // get the enclosing directory + uploadPath := path.Dir(blobUpload.(*blobWriter).path) + + // ensure state was cleaned up + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after cleanup") + } + + // Do a resume, get unknown upload + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) + } + + // Restart! + blobUpload, err = bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("layer data write incomplete") + } + + blobUpload.Close() + + offset := blobUpload.Size() + if offset != nn { + t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) + } + + // Do a resume, for good fun + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != nil { + t.Fatalf("unexpected error resuming upload: %v", err) + } + + sha256Digest := digest.NewDigest("sha256", h) + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // ensure state was cleaned up + uploadPath = path.Dir(blobUpload.(*blobWriter).path) + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after commit") + } + + // After finishing an upload, it should no longer exist. + if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { + t.Fatalf("expected layer upload to be unknown, got %v", err) + } + + // Test for existence. + statDesc, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h.Reset() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != sha256Digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) + } + + // Delete a blob + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + _, err = bs.Open(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected success opening deleted blob for read") + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type getting deleted manifest: %#v", err) + } + + // Re-upload the blob + randomBlob, err := ioutil.ReadAll(randomDataReader) + if err != nil { + t.Fatalf("Error reading all of blob %s", err.Error()) + } + expectedDigest := digest.FromBytes(randomBlob) + simpleUpload(t, bs, randomBlob, expectedDigest) + + d, err = bs.Stat(ctx, expectedDigest) + if err != nil { + t.Errorf("unexpected error stat-ing blob") + } + if d.Digest != expectedDigest { + t.Errorf("Mismatching digest with restored blob") + } + + _, err = bs.Open(ctx, expectedDigest) + if err != nil { + t.Errorf("Unexpected error opening blob") + } + + // Reuse state to test delete with a delete-disabled registry + registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err = registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs = repository.Blobs(ctx) + err = bs.Delete(ctx, desc.Digest) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestSimpleBlobRead just creates a simple blob file and ensures that basic +// open, read, seek, read works. More specific edge cases should be covered in +// other tests. +func TestSimpleBlobRead(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + if err != nil { + t.Fatalf("error creating random data: %v", err) + } + + // Test for existence. + desc, err := bs.Stat(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when testing for existence: %v", err) + } + + rc, err := bs.Open(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when opening non-existent blob: %v", err) + } + + randomLayerSize, err := seekerSize(randomLayerReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} + t.Logf("desc: %v", descBefore) + + desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) + if err != nil { + t.Fatalf("error adding blob to blobservice: %v", err) + } + + if desc.Size != randomLayerSize { + t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) + } + + rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. + if err != nil { + t.Fatalf("error opening blob with %v: %v", dgst, err) + } + defer rc.Close() + + // Now check the sha digest and ensure its the same + h := sha256.New() + nn, err := io.Copy(h, rc) + if err != nil { + t.Fatalf("unexpected error copying to hash: %v", err) + } + + if nn != randomLayerSize { + t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) + } + + sha256Digest := digest.NewDigest("sha256", h) + if sha256Digest != desc.Digest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) + } + + // Now seek back the blob, read the whole thing and check against randomLayerData + offset, err := rc.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking blob: %v", err) + } + + if offset != 0 { + t.Fatalf("seek failed: expected 0 offset, got %d", offset) + } + + p, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("error reading all of blob: %v", err) + } + + if len(p) != int(randomLayerSize) { + t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) + } + + // Reset the randomLayerReader and read back the buffer + _, err = randomLayerReader.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error resetting layer reader: %v", err) + } + + randomLayerData, err := ioutil.ReadAll(randomLayerReader) + if err != nil { + t.Fatalf("random layer read failed: %v", err) + } + + if !bytes.Equal(p, randomLayerData) { + t.Fatalf("layer data not equal") + } +} + +// TestBlobMount covers the blob mount process, exercising common +// error paths that might be seen during a mount. +func TestBlobMount(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + sourceImageName, _ := reference.ParseNamed("foo/source") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + sourceRepository, err := registry.Repository(ctx, sourceImageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + sbs := sourceRepository.Blobs(ctx) + + blobUpload, err := sbs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, randomDataReader) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // Test for existence. + statDesc, err := sbs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + bs := repository.Blobs(ctx) + // Test destination for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) + } + + canonicalRef, err := reference.WithDigest(sourceRepository.Named(), desc.Digest) + if err != nil { + t.Fatal(err) + } + + bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatal("unexpected blobwriter returned from Create call, should mount instead") + } + + ebm, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Fatalf("unexpected error mounting layer: %v", err) + } + + if !reflect.DeepEqual(ebm.Descriptor, desc) { + t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) + } + + // Test for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h := sha256.New() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != dgst { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) + } + + // Delete the blob from the source repo + err = sbs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) + } + + d, err = sbs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + // Delete the blob from the dest repo + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } +} + +// TestLayerUploadZeroLength uploads zero-length +func TestLayerUploadZeroLength(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) +} + +func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { + ctx := context.Background() + wr, err := bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting upload: %v", err) + } + + nn, err := io.Copy(wr, bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error copying into blob writer: %v", err) + } + + if nn != 0 { + t.Fatalf("unexpected number of bytes copied: %v > 0", nn) + } + + dgst, err := digest.FromReader(bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error getting digest: %v", err) + } + + if dgst != expectedDigest { + // sanity check on zero digest + t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) + } + + desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error committing write: %v", err) + } + + if desc.Digest != dgst { + t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) + } +} + +// seekerSize seeks to the end of seeker, checks the size and returns it to +// the original state, returning the size. The state of the seeker should be +// treated as unknown if an error is returned. +func seekerSize(seeker io.ReadSeeker) (int64, error) { + current, err := seeker.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, err + } + + end, err := seeker.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + resumed, err := seeker.Seek(current, os.SEEK_SET) + if err != nil { + return 0, err + } + + if resumed != current { + return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") + } + + return end, nil +} + +// addBlob simply consumes the reader and inserts into the blob service, +// returning a descriptor on success. +func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { + wr, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + defer wr.Cancel(ctx) + + if nn, err := io.Copy(wr, rd); err != nil { + return distribution.Descriptor{}, err + } else if nn != desc.Size { + return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) + } + + return wr.Commit(ctx, desc) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go new file mode 100644 index 0000000..fad0a77 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go @@ -0,0 +1,60 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobserver.go b/vendor/github.com/docker/distribution/registry/storage/blobserver.go new file mode 100644 index 0000000..2655e01 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobserver.go @@ -0,0 +1,78 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. + return err + } + } + + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go new file mode 100644 index 0000000..2cbbc9b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobstore.go @@ -0,0 +1,223 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// blobStore implements the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. +type blobStore struct { + driver driver.StorageDriver + statter distribution.BlobStatter +} + +var _ distribution.BlobProvider = &blobStore{} + +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) + if err != nil { + return nil, err + } + + p, err := getContent(ctx, bs.driver, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } + + return nil, err + } + + return p, nil +} + +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } + + return newFileReader(ctx, bs.driver, path, desc.Size) +} + +// Put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) + // real error, return it + return distribution.Descriptor{}, err + } + + bp, err := bs.path(dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype here, as well. + return distribution.Descriptor{ + Size: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) +} + +func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { + + specPath, err := pathFor(blobsPathSpec{}) + if err != nil { + return err + } + + err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { + // skip directories + if fileInfo.IsDir() { + return nil + } + + currentPath := fileInfo.Path() + // we only want to parse paths that end with /data + _, fileName := path.Split(currentPath) + if fileName != "data" { + return nil + } + + digest, err := digestFromPath(currentPath) + if err != nil { + return err + } + + return ingester(digest) + }) + return err +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return "", err + } + + return bp, nil +} + +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) + if err != nil { + return "", err + } + + linked, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +type blobStatter struct { + driver driver.StorageDriver +} + +var _ distribution.BlobDescriptorService = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + } + + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Size: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go new file mode 100644 index 0000000..668a6fc --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go @@ -0,0 +1,399 @@ +package storage + +import ( + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var ( + errResumableDigestNotAvailable = errors.New("resumable digest not available") +) + +// blobWriter is used to control the various aspects of resumable +// blob upload. +type blobWriter struct { + ctx context.Context + blobStore *linkedBlobStore + + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write + + fileWriter storagedriver.FileWriter + driver storagedriver.StorageDriver + path string + + resumableDigestEnabled bool + committed bool +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + context.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.fileWriter.Commit(); err != nil { + return distribution.Descriptor{}, err + } + + bw.Close() + desc.Size = bw.Size() + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + + bw.committed = true + return canonical, nil +} + +// Cancel the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + context.GetLogger(ctx).Debug("(*blobWriter).Cancel") + if err := bw.fileWriter.Cancel(); err != nil { + return err + } + + if err := bw.Close(); err != nil { + context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) + } + + if err := bw.removeResources(ctx); err != nil { + return err + } + + return nil +} + +func (bw *blobWriter) Size() int64 { + return bw.fileWriter.Size() +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) + bw.written += nn + + return nn, err +} + +func (bw *blobWriter) Close() error { + if bw.committed { + return errors.New("blobwriter close after commit") + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return err + } + + return bw.fileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + var size int64 + + // Stat the on disk file + if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Size = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = size + } + + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. + + if err := bw.resumeDigest(ctx); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if err == errResumableDigestNotAvailable { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return distribution.Descriptor{}, err + } + + if fullHash { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.Canonical.New() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + defer fr.Close() + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(digestVerifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() + } + } + + if !verified { + context.GetLoggerWithFields(ctx, + map[interface{}]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := pathFor(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // tars. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the digest of an empty tar. + if desc.Digest == digest.DigestSha256EmptyTar { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := pathFor(uploadDataPathSpec{ + name: bw.blobStore.repository.Named().Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go new file mode 100644 index 0000000..32f1309 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go @@ -0,0 +1,17 @@ +// +build noresumabledigest + +package storage + +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go new file mode 100644 index 0000000..ff5482c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -0,0 +1,145 @@ +// +build !noresumabledigest + +package storage + +import ( + "fmt" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigest attempts to restore the state of the internal hash function +// by loading the most recent saved hash state equal to the current size of the blob. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + offset := bw.fileWriter.Size() + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + return errResumableDigestNotAvailable + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 0000000..10a3909 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go new file mode 100644 index 0000000..a563c02 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go @@ -0,0 +1,180 @@ +package cachecheck + +import ( + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" +) + +// CheckBlobDescriptorCache takes a cache implementation through a common set +// of operations. If adding new tests, please add them here so new +// implementations get the benefit. This should be used for unit tests. +func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { + ctx := context.Background() + + checkBlobDescriptorCacheEmptyRepository(ctx, t, provider) + checkBlobDescriptorCacheSetAndRead(ctx, t, provider) + checkBlobDescriptorCacheClear(ctx, t, provider) +} + +func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty store: %v", err) + } + + cache, err := provider.RepositoryScoped("") + if err == nil { + t.Fatalf("expected an error when asking for invalid repo") + } + + cache, err = provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting repository: %v", err) + } + + if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ + Digest: "sha384:abc", + Size: 10, + MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error with invalid digest: %v", err) + } + + if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ + Digest: "", + Size: 10, + MediaType: "application/octet-stream"}); err == nil { + t.Fatalf("expected error setting value on invalid descriptor") + } + + if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error checking for cache item with empty digest: %v", err) + } + + if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty repo: %v", err) + } +} + +func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") + expected := distribution.Descriptor{ + Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // also check that we set the canonical key ("fake:abc") + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("descriptor not returned for canonical key: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // ensure that global gets extra descriptor mapping + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // get at it through canonical descriptor + desc, err = provider.Stat(ctx, expected.Digest) + if err != nil { + t.Fatalf("unexpected error checking glboal descriptor: %v", err) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // now, we set the repo local mediatype to something else and ensure it + // doesn't get changed in the provider cache. + expected.MediaType = "application/json" + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("unexpected error setting descriptor: %v", err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting descriptor: %v", err) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } + + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting global descriptor: %v", err) + } + + expected.MediaType = "application/octet-stream" // expect original mediatype in global + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } +} + +func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") + expected := distribution.Descriptor{ + Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + err = cache.Clear(ctx, localDigest) + if err != nil { + t.Error(err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err == nil { + t.Fatalf("expected error statting deleted blob: %v", err) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 0000000..94ca8a9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,101 @@ +package cache + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 0000000..cf125e1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,179 @@ +package memory + +import ( + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return repo.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.ErrBlobUnknown + } + + return repo.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + if repo == nil { + // allocate map since we are setting it now. + var ok bool + // have to read back value since we may have allocated elsewhere. + repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + repo = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = repo + } + rsimbdcp.repository = repo + } + rsimbdcp.parent.mu.Unlock() + + if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go new file mode 100644 index 0000000..49c2b5c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go @@ -0,0 +1,13 @@ +package memory + +import ( + "testing" + + "github.com/docker/distribution/registry/storage/cache/cachecheck" +) + +// TestInMemoryBlobInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryBlobInfoCache(t *testing.T) { + cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go new file mode 100644 index 0000000..cb264b0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -0,0 +1,268 @@ +package redis + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/garyburd/redigo/redis" +) + +// redisBlobStatService provides an implementation of +// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in +// two parts. The first provide fast access to repository membership through a +// redis set for each repo. The second is a redis hash keyed by the digest of +// the layer, providing path, length and mediatype information. There is also +// a per-repository redis hash of the blob descriptor, allowing override of +// data. This is currently used to override the mediatype on a per-repository +// basis. +// +// Note that there is no implied relationship between these two caches. The +// layer may exist in one, both or none and the code must be written this way. +type redisBlobDescriptorService struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisBlobDescriptorCacheProvider returns a new redis-based +// BlobDescriptorCacheProvider using the provided redis connection pool. +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { + return &redisBlobDescriptorService{ + pool: pool, + } +} + +// RepositoryScoped returns the scoped cache. +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { + return nil, err + } + + return &repositoryScopedRedisBlobDescriptorService{ + repo: repo, + upstream: rbds, + }, nil +} + +// Stat retrieves the descriptor data from the redis hash entry. +func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.stat(ctx, conn, dgst) +} + +func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + // Not atomic in redis <= 2.3 + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") + if err != nil { + return err + } + + if reply == 0 { + return distribution.ErrBlobUnknown + } + + return nil +} + +// stat provides an internal stat call that takes a connection parameter. This +// allows some internal management of the connection scope. +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + // NOTE(stevvooe): The "size" field used to be "length". We treat a + // missing "size" field here as an unknown blob, which causes a cache + // miss, effectively migrating the field. + if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + var desc distribution.Descriptor + if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { + return distribution.Descriptor{}, err + } + + return desc, nil +} + +// SetDescriptor sets the descriptor data for the given digest using a redis +// hash. A hash is used here since we may store unrelated fields about a layer +// in the future. +func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), + "digest", desc.Digest, + "size", desc.Size); err != nil { + return err + } + + // Only set mediatype if not already set. + if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), + "mediatype", desc.MediaType); err != nil { + return err + } + + return nil +} + +func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} + +type repositoryScopedRedisBlobDescriptorService struct { + repo string + upstream *redisBlobDescriptorService +} + +var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} + +// Stat ensures that the digest is a member of the specified repository and +// forwards the descriptor request to the global blob store. If the media type +// differs for the repository, we override it. +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return distribution.Descriptor{}, err + } + + if !member { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // We allow a per repository mediatype, let's look it up here. + mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + if mediatype != "" { + upstream.MediaType = mediatype + } + + return upstream, nil +} + +// Clear removes the descriptor from the cache and forwards to the upstream descriptor store +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return err + } + + if !member { + return distribution.ErrBlobUnknown + } + + return rsrbds.upstream.Clear(ctx, dgst) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + if dgst != desc.Digest { + if dgst.Algorithm() == desc.Digest.Algorithm() { + return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) + } + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + return rsrbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { + return err + } + + if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { + return err + } + + // Override repository mediatype. + if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + return err + } + + // Also set the values for the primary descriptor, if they differ by + // algorithm (ie sha256 vs sha512). + if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { + if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { + return err + } + } + + return nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { + return "repository::" + rsrbds.repo + "::blobs" +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go new file mode 100644 index 0000000..d324842 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go @@ -0,0 +1,53 @@ +package redis + +import ( + "flag" + "os" + "testing" + "time" + + "github.com/docker/distribution/registry/storage/cache/cachecheck" + "github.com/garyburd/redigo/redis" +) + +var redisAddr string + +func init() { + flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") +} + +// TestRedisLayerInfoCache exercises a live redis instance using the cache +// implementation. +func TestRedisBlobDescriptorCacheProvider(t *testing.T) { + if redisAddr == "" { + // fallback to an environement variable + redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") + } + + if redisAddr == "" { + // skip if still not set + t.Skip("please set -test.registry.storage.cache.redis.addr to test layer info cache against redis") + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + return redis.Dial("tcp", redisAddr) + }, + MaxIdle: 1, + MaxActive: 2, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + // Clear the database + conn := pool.Get() + if _, err := conn.Do("FLUSHDB"); err != nil { + t.Fatalf("unexpected error flushing redis db: %v", err) + } + conn.Close() + + cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog.go b/vendor/github.com/docker/distribution/registry/storage/catalog.go new file mode 100644 index 0000000..0b59a39 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/catalog.go @@ -0,0 +1,153 @@ +package storage + +import ( + "errors" + "io" + "path" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// errFinishedWalk signals an early exit to the walk when the current query +// is satisfied. +var errFinishedWalk = errors.New("finished walk") + +// Returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var foundRepos []string + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } + + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return 0, err + } + + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err := handleRepository(fileInfo, root, last, func(repoPath string) error { + foundRepos = append(foundRepos, repoPath) + return nil + }) + if err != nil { + return err + } + + // if we've filled our array, no need to walk any further + if len(foundRepos) == len(repos) { + return errFinishedWalk + } + + return nil + }) + + n = copy(repos, foundRepos) + + switch err { + case nil: + // nil means that we completed walk and didn't fill buffer. No more + // records are available. + err = io.EOF + case errFinishedWalk: + // more records are available. + err = nil + } + + return n, err +} + +// Enumerate applies ingester to each repository +func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + return handleRepository(fileInfo, root, "", ingester) + }) + + return err +} + +// lessPath returns true if one path a is less than path b. +// +// A component-wise comparison is done, rather than the lexical comparison of +// strings. +func lessPath(a, b string) bool { + // we provide this behavior by making separator always sort first. + return compareReplaceInline(a, b, '/', '\x00') < 0 +} + +// compareReplaceInline modifies runtime.cmpstring to replace old with new +// during a byte-wise comparison. +func compareReplaceInline(s1, s2 string, old, new byte) int { + // TODO(stevvooe): We are missing an optimization when the s1 and s2 have + // the exact same slice header. It will make the code unsafe but can + // provide some extra performance. + + l := len(s1) + if len(s2) < l { + l = len(s2) + } + + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 == old { + c1 = new + } + + if c2 == old { + c2 = new + } + + if c1 < c2 { + return -1 + } + + if c1 > c2 { + return +1 + } + } + + if len(s1) < len(s2) { + return -1 + } + + if len(s1) > len(s2) { + return +1 + } + + return 0 +} + +// handleRepository calls function fn with a repository path if fileInfo +// has a path of a repository under root and that it is lexographically +// after last. Otherwise, it will return ErrSkipDir. This should be used +// with Walk to do handling with repositories in a storage. +func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error { + filePath := fileInfo.Path() + + // lop the base path off + repo := filePath[len(root)+1:] + + _, file := path.Split(repo) + if file == "_layers" { + repo = strings.TrimSuffix(repo, "/_layers") + if lessPath(last, repo) { + if err := fn(repo); err != nil { + return err + } + } + return ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return ErrSkipDir + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go new file mode 100644 index 0000000..c69c0cd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go @@ -0,0 +1,324 @@ +package storage + +import ( + "fmt" + "io" + "math/rand" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" +) + +type setupEnv struct { + ctx context.Context + driver driver.StorageDriver + expected []string + registry distribution.Namespace +} + +func setupFS(t *testing.T) *setupEnv { + d := inmemory.New() + ctx := context.Background() + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repos := []string{ + "foo/a", + "foo/b", + "foo-bar/a", + "bar/c", + "bar/d", + "bar/e", + "foo/d/in", + "foo-bar/b", + "test", + } + + for _, repo := range repos { + makeRepo(ctx, t, repo, registry) + } + + expected := []string{ + "bar/c", + "bar/d", + "bar/e", + "foo/a", + "foo/b", + "foo/d/in", + "foo-bar/a", + "foo-bar/b", + "test", + } + + return &setupEnv{ + ctx: ctx, + driver: d, + expected: expected, + registry: registry, + } +} + +func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.Namespace) { + named, err := reference.ParseNamed(name) + if err != nil { + t.Fatal(err) + } + + repo, _ := reg.Repository(ctx, named) + manifests, _ := repo.Manifests(ctx) + + layers, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatal(err) + } + + err = testutil.UploadBlobs(repo, layers) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + getKeys := func(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return + } + + manifest, err := testutil.MakeSchema1Manifest(getKeys(layers)) + if err != nil { + t.Fatal(err) + } + + _, err = manifests.Put(ctx, manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + +} + +func TestCatalog(t *testing.T) { + env := setupFS(t) + + p := make([]string, 50) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if numFilled != len(env.expected) { + t.Errorf("missing items in catalog") + } + + if !testEq(p, env.expected, len(env.expected)) { + t.Errorf("Expected catalog repos err") + } + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } +} + +func TestCatalogInParts(t *testing.T) { + env := setupFS(t) + + chunkLen := 3 + p := make([]string, chunkLen) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[0:chunkLen], numFilled) { + t.Errorf("Expected catalog first chunk err") + } + + lastRepo := p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { + t.Errorf("Expected catalog second chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF || numFilled != len(p) { + t.Errorf("Expected end of catalog") + } + + if !testEq(p, env.expected[chunkLen*2:chunkLen*3], numFilled) { + t.Errorf("Expected catalog third chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } + + if numFilled != 0 { + t.Errorf("Expected catalog fourth chunk err") + } +} + +func TestCatalogEnumerate(t *testing.T) { + env := setupFS(t) + + var repos []string + repositoryEnumerator := env.registry.(distribution.RepositoryEnumerator) + err := repositoryEnumerator.Enumerate(env.ctx, func(repoName string) error { + repos = append(repos, repoName) + return nil + }) + if err != nil { + t.Errorf("Expected catalog enumerate err") + } + + if len(repos) != len(env.expected) { + t.Errorf("Expected catalog enumerate doesn't have correct number of values") + } + + if !testEq(repos, env.expected, len(env.expected)) { + t.Errorf("Expected catalog enumerate not over all values") + } +} + +func testEq(a, b []string, size int) bool { + for cnt := 0; cnt < size-1; cnt++ { + if a[cnt] != b[cnt] { + return false + } + } + return true +} + +func setupBadWalkEnv(t *testing.T) *setupEnv { + d := newBadListDriver() + ctx := context.Background() + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + return &setupEnv{ + ctx: ctx, + driver: d, + registry: registry, + } +} + +type badListDriver struct { + driver.StorageDriver +} + +var _ driver.StorageDriver = &badListDriver{} + +func newBadListDriver() *badListDriver { + return &badListDriver{StorageDriver: inmemory.New()} +} + +func (d *badListDriver) List(ctx context.Context, path string) ([]string, error) { + return nil, fmt.Errorf("List error") +} + +func TestCatalogWalkError(t *testing.T) { + env := setupBadWalkEnv(t) + p := make([]string, 1) + + _, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF { + t.Errorf("Expected catalog driver list error") + } +} + +func BenchmarkPathCompareEqual(B *testing.B) { + B.StopTimer() + pp := randomPath(100) + // make a real copy + ppb := append([]byte{}, []byte(pp)...) + a, b := pp, string(ppb) + + B.StartTimer() + for i := 0; i < B.N; i++ { + lessPath(a, b) + } +} + +func BenchmarkPathCompareNotEqual(B *testing.B) { + B.StopTimer() + a, b := randomPath(100), randomPath(100) + B.StartTimer() + + for i := 0; i < B.N; i++ { + lessPath(a, b) + } +} + +func BenchmarkPathCompareNative(B *testing.B) { + B.StopTimer() + a, b := randomPath(100), randomPath(100) + B.StartTimer() + + for i := 0; i < B.N; i++ { + c := a < b + c = c && false + } +} + +func BenchmarkPathCompareNativeEqual(B *testing.B) { + B.StopTimer() + pp := randomPath(100) + a, b := pp, pp + B.StartTimer() + + for i := 0; i < B.N; i++ { + c := a < b + c = c && false + } +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/doc.go b/vendor/github.com/docker/distribution/registry/storage/doc.go new file mode 100644 index 0000000..387d923 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/doc.go @@ -0,0 +1,3 @@ +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. +package storage diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go new file mode 100644 index 0000000..930b76a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -0,0 +1,483 @@ +// Package azure provides a storagedriver.StorageDriver implementation to +// store blobs in Microsoft Azure Blob Storage Service. +package azure + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +const driverName = "azure" + +const ( + paramAccountName = "accountname" + paramAccountKey = "accountkey" + paramContainer = "container" + paramRealm = "realm" + maxChunkSize = 4 * 1024 * 1024 +) + +type driver struct { + client azure.BlobStorageClient + container string +} + +type baseEmbed struct{ base.Base } + +// Driver is a storagedriver.StorageDriver implementation backed by +// Microsoft Azure Blob Storage Service. +type Driver struct{ baseEmbed } + +func init() { + factory.Register(driverName, &azureDriverFactory{}) +} + +type azureDriverFactory struct{} + +func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// FromParameters constructs a new Driver with a given parameters map. +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + accountName, ok := parameters[paramAccountName] + if !ok || fmt.Sprint(accountName) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + } + + accountKey, ok := parameters[paramAccountKey] + if !ok || fmt.Sprint(accountKey) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + } + + container, ok := parameters[paramContainer] + if !ok || fmt.Sprint(container) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramContainer) + } + + realm, ok := parameters[paramRealm] + if !ok || fmt.Sprint(realm) == "" { + realm = azure.DefaultBaseURL + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) +} + +// New constructs a new Driver with the given Azure Storage Account credentials +func New(accountName, accountKey, container, realm string) (*Driver, error) { + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) + if err != nil { + return nil, err + } + + blobClient := api.GetBlobService() + + // Create registry container + if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { + return nil, err + } + + d := &driver{ + client: blobClient, + container: container} + return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil +} + +// Implement the storagedriver.StorageDriver interface. +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + blob, err := d.client.GetBlob(d.container, path) + if err != nil { + if is404(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + defer blob.Close() + return ioutil.ReadAll(blob) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.client.DeleteBlobIfExists(d.container, path, nil); err != nil { + return err + } + writer, err := d.Writer(ctx, path, false) + if err != nil { + return err + } + defer writer.Close() + _, err = writer.Write(contents) + if err != nil { + return err + } + return writer.Commit() +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if !ok { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + info, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + size := int64(info.ContentLength) + if offset >= size { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + bytesRange := fmt.Sprintf("%v-", offset) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + blobExists, err := d.client.BlobExists(d.container, path) + if err != nil { + return nil, err + } + var size int64 + if blobExists { + if append { + blobProperties, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + size = blobProperties.ContentLength + } else { + err := d.client.DeleteBlob(d.container, path, nil) + if err != nil { + return nil, err + } + } + } else { + if append { + return nil, storagedriver.PathNotFoundError{Path: path} + } + err := d.client.PutAppendBlob(d.container, path, nil) + if err != nil { + return nil, err + } + } + + return d.newWriter(path, size), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // Check if the path is a blob + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if ok { + blob, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + mtim, err := time.Parse(http.TimeFormat, blob.LastModified) + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(blob.ContentLength), + ModTime: mtim, + IsDir: false, + }}, nil + } + + // Check if path is a virtual container + virtContainerPath := path + if !strings.HasSuffix(virtContainerPath, "/") { + virtContainerPath += "/" + } + blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Prefix: virtContainerPath, + MaxResults: 1, + }) + if err != nil { + return nil, err + } + if len(blobs.Blobs) > 0 { + // path is a virtual container + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + }}, nil + } + + // path is not a blob or virtual container + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path == "/" { + path = "" + } + + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return blobs, err + } + + list := directDescendants(blobs, path) + if path != "" && len(list) == 0 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) + err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + if err != nil { + if is404(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err + } + + return d.client.DeleteBlob(d.container, sourcePath, nil) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ok, err := d.client.DeleteBlobIfExists(d.container, path, nil) + if err != nil { + return err + } + if ok { + return nil // was a blob and deleted, return + } + + // Not a blob, see if path is a virtual container with blobs + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return err + } + + for _, b := range blobs { + if err = d.client.DeleteBlob(d.container, b, nil); err != nil { + return err + } + } + + if len(blobs) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil +} + +// URLFor returns a publicly accessible URL for the blob stored at given path +// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration + expires, ok := options["expiry"] + if ok { + t, ok := expires.(time.Time) + if ok { + expiresTime = t + } + } + return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") +} + +// directDescendants will find direct descendants (blobs or virtual containers) +// of from list of blob paths and will return their full paths. Elements in blobs +// list must be prefixed with a "/" and +// +// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is +// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} +func directDescendants(blobs []string, prefix string) []string { + if !strings.HasPrefix(prefix, "/") { // add trailing '/' + prefix = "/" + prefix + } + if !strings.HasSuffix(prefix, "/") { // containerify the path + prefix += "/" + } + + out := make(map[string]bool) + for _, b := range blobs { + if strings.HasPrefix(b, prefix) { + rel := b[len(prefix):] + c := strings.Count(rel, "/") + if c == 0 { + out[b] = true + } else { + out[prefix+rel[:strings.Index(rel, "/")]] = true + } + } + } + + var keys []string + for k := range out { + keys = append(keys, k) + } + return keys +} + +func (d *driver) listBlobs(container, virtPath string) ([]string, error) { + if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path + virtPath += "/" + } + + out := []string{} + marker := "" + for { + resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Marker: marker, + Prefix: virtPath, + }) + + if err != nil { + return out, err + } + + for _, b := range resp.Blobs { + out = append(out, b.Name) + } + + if len(resp.Blobs) == 0 || resp.NextMarker == "" { + break + } + marker = resp.NextMarker + } + return out, nil +} + +func is404(err error) bool { + statusCodeErr, ok := err.(azure.AzureStorageServiceError) + return ok && statusCodeErr.StatusCode == http.StatusNotFound +} + +type writer struct { + driver *driver + path string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path string, size int64) storagedriver.FileWriter { + return &writer{ + driver: d, + path: path, + size: size, + bw: bufio.NewWriterSize(&blockWriter{ + client: d.client, + container: d.container, + path: path, + }, maxChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.bw.Flush() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.client.DeleteBlob(w.driver.container, w.path, nil) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return w.bw.Flush() +} + +type blockWriter struct { + client azure.BlobStorageClient + container string + path string +} + +func (bw *blockWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += maxChunkSize { + chunkSize := maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil) + if err != nil { + return n, err + } + + n += chunkSize + } + + return n, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go new file mode 100644 index 0000000..4a0661b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go @@ -0,0 +1,63 @@ +package azure + +import ( + "fmt" + "os" + "strings" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +const ( + envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" + envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" + envContainer = "AZURE_STORAGE_CONTAINER" + envRealm = "AZURE_STORAGE_REALM" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + var ( + accountName string + accountKey string + container string + realm string + ) + + config := []struct { + env string + value *string + }{ + {envAccountName, &accountName}, + {envAccountKey, &accountKey}, + {envContainer, &container}, + {envRealm, &realm}, + } + + missing := []string{} + for _, v := range config { + *v.value = os.Getenv(v.env) + if *v.value == "" { + missing = append(missing, v.env) + } + } + + azureDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(accountName, accountKey, container, realm) + } + + // Skip Azure storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if len(missing) > 0 { + return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) + } + return "" + } + + testsuites.RegisterSuite(azureDriverConstructor, skipCheck) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go new file mode 100644 index 0000000..e14f7ed --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -0,0 +1,198 @@ +// Package base provides a base implementation of the storage driver that can +// be used to implement common checks. The goal is to increase the amount of +// code sharing. +// +// The canonical approach to use this class is to embed in the exported driver +// struct such that calls are proxied through this implementation. First, +// declare the internal driver, as follows: +// +// type driver struct { ... internal ...} +// +// The resulting type should implement StorageDriver such that it can be the +// target of a Base struct. The exported type can then be declared as follows: +// +// type Driver struct { +// Base +// } +// +// Because Driver embeds Base, it effectively implements Base. If the driver +// needs to intercept a call, before going to base, Driver should implement +// that method. Effectively, Driver can intercept calls before coming in and +// driver implements the actual logic. +// +// To further shield the embed from other packages, it is recommended to +// employ a private embed struct: +// +// type baseEmbed struct { +// base.Base +// } +// +// Then, declare driver to embed baseEmbed, rather than Base directly: +// +// type Driver struct { +// baseEmbed +// } +// +// The type now implements StorageDriver, proxying through Base, without +// exporting an unnecessary field. +package base + +import ( + "io" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// Base provides a wrapper around a storagedriver implementation that provides +// common path and bounds checking. +type Base struct { + storagedriver.StorageDriver +} + +// Format errors received from the storage driver +func (base *Base) setDriverName(e error) error { + switch actual := e.(type) { + case nil: + return nil + case storagedriver.ErrUnsupportedMethod: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.PathNotFoundError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidPathError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidOffsetError: + actual.DriverName = base.StorageDriver.Name() + return actual + default: + storageError := storagedriver.Error{ + DriverName: base.StorageDriver.Name(), + Enclosed: e, + } + + return storageError + } +} + +// GetContent wraps GetContent of underlying storage driver. +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.GetContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + b, e := base.StorageDriver.GetContent(ctx, path) + return b, base.setDriverName(e) +} + +// PutContent wraps PutContent of underlying storage driver. +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.PutContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) +} + +// Reader wraps Reader of underlying storage driver. +func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Reader(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + rc, e := base.StorageDriver.Reader(ctx, path, offset) + return rc, base.setDriverName(e) +} + +// Writer wraps Writer of underlying storage driver. +func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Writer(%q, %v)", base.Name(), path, append) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + writer, e := base.StorageDriver.Writer(ctx, path, append) + return writer, base.setDriverName(e) +} + +// Stat wraps Stat of underlying storage driver. +func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Stat(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + fi, e := base.StorageDriver.Stat(ctx, path) + return fi, base.setDriverName(e) +} + +// List wraps List of underlying storage driver. +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.List(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.List(ctx, path) + return str, base.setDriverName(e) +} + +// Move wraps Move of underlying storage driver. +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) + + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) +} + +// Delete wraps Delete of underlying storage driver. +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Delete(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Delete(ctx, path)) +} + +// URLFor wraps URLFor of underlying storage driver. +func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.URLFor(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.URLFor(ctx, path, options) + return str, base.setDriverName(e) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go new file mode 100644 index 0000000..185160a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go @@ -0,0 +1,145 @@ +package base + +import ( + "io" + "sync" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +type regulator struct { + storagedriver.StorageDriver + *sync.Cond + + available uint64 +} + +// NewRegulator wraps the given driver and is used to regulate concurrent calls +// to the given storage driver to a maximum of the given limit. This is useful +// for storage drivers that would otherwise create an unbounded number of OS +// threads if allowed to be called unregulated. +func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver { + return ®ulator{ + StorageDriver: driver, + Cond: sync.NewCond(&sync.Mutex{}), + available: limit, + } +} + +func (r *regulator) enter() { + r.L.Lock() + for r.available == 0 { + r.Wait() + } + r.available-- + r.L.Unlock() +} + +func (r *regulator) exit() { + r.L.Lock() + // We only need to signal to a waiting FS operation if we're already at the + // limit of threads used + if r.available == 0 { + r.Signal() + } + r.available++ + r.L.Unlock() +} + +// Name returns the human-readable "name" of the driver, useful in error +// messages and logging. By convention, this will just be the registration +// name, but drivers may provide other information here. +func (r *regulator) Name() string { + r.enter() + defer r.exit() + + return r.StorageDriver.Name() +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.GetContent(ctx, path) +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error { + r.enter() + defer r.exit() + + return r.StorageDriver.PutContent(ctx, path, content) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Reader(ctx, path, offset) +} + +// Writer stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Writer(ctx, path, append) +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Stat(ctx, path) +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (r *regulator) List(ctx context.Context, path string) ([]string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.List(ctx, path) +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +// Note: This may be no more efficient than a copy followed by a delete for +// many implementations. +func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Move(ctx, sourcePath, destPath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (r *regulator) Delete(ctx context.Context, path string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Delete(ctx, path) +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// May return an ErrUnsupportedMethod in certain StorageDriver +// implementations. +func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.URLFor(ctx, path, options) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go new file mode 100644 index 0000000..a9c04ec --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go @@ -0,0 +1,64 @@ +package factory + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// driverFactories stores an internal mapping between storage driver names and their respective +// factories +var driverFactories = make(map[string]StorageDriverFactory) + +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces +// Storage drivers should call Register() with a factory to make the driver available by name. +// Individual StorageDriver implementations generally register with the factory via the Register +// func (below) in their init() funcs, and as such they should be imported anonymously before use. +// See below for an example of how to register and get a StorageDriver for S3 +// +// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" +// s3Driver, err = factory.Create("s3", storageParams) +// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams +type StorageDriverFactory interface { + // Create returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +// Additionally, it is not concurrency safe. Most Storage Drivers call this function +// in their init() functions. See the documentation for StorageDriverFactory for more. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned +func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + return nil, InvalidStorageDriverError{name} + } + return driverFactory.Create(parameters) +} + +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go b/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go new file mode 100644 index 0000000..e506402 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go @@ -0,0 +1,79 @@ +package driver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal +// should only be used by storagedriver implementations. They should moved to +// a "driver" package, similar to database/sql. + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var _ FileInfo = FileInfoInternal{} +var _ FileInfo = &FileInfoInternal{} + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go new file mode 100644 index 0000000..649e2bc --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go @@ -0,0 +1,440 @@ +package filesystem + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "reflect" + "strconv" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const ( + driverName = "filesystem" + defaultRootDirectory = "/var/lib/registry" + defaultMaxThreads = uint64(100) + + // minThreads is the minimum value for the maxthreads configuration + // parameter. If the driver's parameters are less than this we set + // the parameters to minThreads + minThreads = uint64(25) +) + +// DriverParameters represents all configuration options available for the +// filesystem driver +type DriverParameters struct { + RootDirectory string + MaxThreads uint64 +} + +func init() { + factory.Register(driverName, &filesystemDriverFactory{}) +} + +// filesystemDriverFactory implements the factory.StorageDriverFactory interface +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + rootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Optional Parameters: +// - rootdirectory +// - maxthreads +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params, err := fromParametersImpl(parameters) + if err != nil || params == nil { + return nil, err + } + return New(*params), nil +} + +func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) { + var ( + err error + maxThreads = defaultMaxThreads + rootDirectory = defaultRootDirectory + ) + + if parameters != nil { + if rootDir, ok := parameters["rootdirectory"]; ok { + rootDirectory = fmt.Sprint(rootDir) + } + + // Get maximum number of threads for blocking filesystem operations, + // if specified + threads := parameters["maxthreads"] + switch v := threads.(type) { + case string: + if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil { + return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads) + } + case uint64: + maxThreads = v + case int, int32, int64: + val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int() + // If threads is negative casting to uint64 will wrap around and + // give you the hugest thread limit ever. Let's be sensible, here + if val > 0 { + maxThreads = uint64(val) + } + case uint, uint32: + maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads) + } + + if maxThreads < minThreads { + maxThreads = minThreads + } + } + + params := &DriverParameters{ + RootDirectory: rootDirectory, + MaxThreads: maxThreads, + } + return params, nil +} + +// New constructs a new Driver with a given rootDirectory +func New(params DriverParameters) *Driver { + fsDriver := &driver{rootDirectory: params.RootDirectory} + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads), + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + writer, err := d.Writer(ctx, subPath, false) + if err != nil { + return err + } + defer writer.Close() + _, err = io.Copy(writer, bytes.NewReader(contents)) + if err != nil { + writer.Cancel() + return err + } + return writer.Commit() +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err + } + + seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + if err != nil { + file.Close() + return nil, err + } else if seekPos < int64(offset) { + file.Close() + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return file, nil +} + +func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { + fullPath := d.fullPath(subPath) + parentDir := path.Dir(fullPath) + if err := os.MkdirAll(parentDir, 0777); err != nil { + return nil, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + + var offset int64 + + if !append { + err := fp.Truncate(0) + if err != nil { + fp.Close() + return nil, err + } + } else { + n, err := fp.Seek(0, os.SEEK_END) + if err != nil { + fp.Close() + return nil, err + } + offset = int64(n) + } + + return newFileWriter(fp, offset), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) + + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err + } + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { + fullPath := d.fullPath(subPath) + + dir, err := os.Open(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + return nil, err + } + + defer dir.Close() + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(subPath, fileName)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { + return err + } + + err := os.Rename(source, dest) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, subPath string) error { + fullPath := d.fullPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{Path: subPath} + } + + err = os.RemoveAll(fullPath) + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} + +type fileWriter struct { + file *os.File + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func newFileWriter(file *os.File, size int64) *fileWriter { + return &fileWriter{ + file: file, + size: size, + bw: bufio.NewWriter(file), + } +} + +func (fw *fileWriter) Write(p []byte) (int, error) { + if fw.closed { + return 0, fmt.Errorf("already closed") + } else if fw.committed { + return 0, fmt.Errorf("already committed") + } else if fw.cancelled { + return 0, fmt.Errorf("already cancelled") + } + n, err := fw.bw.Write(p) + fw.size += int64(n) + return n, err +} + +func (fw *fileWriter) Size() int64 { + return fw.size +} + +func (fw *fileWriter) Close() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + if err := fw.file.Close(); err != nil { + return err + } + fw.closed = true + return nil +} + +func (fw *fileWriter) Cancel() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + fw.cancelled = true + fw.file.Close() + return os.Remove(fw.file.Name()) +} + +func (fw *fileWriter) Commit() error { + if fw.closed { + return fmt.Errorf("already closed") + } else if fw.committed { + return fmt.Errorf("already committed") + } else if fw.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + fw.committed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go new file mode 100644 index 0000000..3be8592 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go @@ -0,0 +1,113 @@ +package filesystem + +import ( + "io/ioutil" + "os" + "reflect" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + driver, err := FromParameters(map[string]interface{}{ + "rootdirectory": root, + }) + if err != nil { + panic(err) + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return driver, nil + }, testsuites.NeverSkip) +} + +func TestFromParametersImpl(t *testing.T) { + + tests := []struct { + params map[string]interface{} // techincally the yaml can contain anything + expected DriverParameters + pass bool + }{ + // check we use default threads and root dirs + { + params: map[string]interface{}{}, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: defaultMaxThreads, + }, + pass: true, + }, + // Testing initiation with a string maxThreads which can't be parsed + { + params: map[string]interface{}{ + "maxthreads": "fail", + }, + expected: DriverParameters{}, + pass: false, + }, + { + params: map[string]interface{}{ + "maxthreads": "100", + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + { + params: map[string]interface{}{ + "maxthreads": 100, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + // check that we use minimum thread counts + { + params: map[string]interface{}{ + "maxthreads": 1, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: minThreads, + }, + pass: true, + }, + } + + for _, item := range tests { + params, err := fromParametersImpl(item.params) + + if !item.pass { + // We only need to assert that expected failures have an error + if err == nil { + t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params) + } + continue + } + + if err != nil { + t.Fatalf("unexpected error creating filesystem driver: %s", err) + } + // Note that we get a pointer to params back + if !reflect.DeepEqual(*params, item.expected) { + t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/gcs/doc.go b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/doc.go new file mode 100644 index 0000000..0f23ea7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/doc.go @@ -0,0 +1,3 @@ +// Package gcs implements the Google Cloud Storage driver backend. Support can be +// enabled by including the "include_gcs" build tag. +package gcs diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go new file mode 100644 index 0000000..1369c28 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go @@ -0,0 +1,873 @@ +// Package gcs provides a storagedriver.StorageDriver implementation to +// store blobs in Google cloud storage. +// +// This package leverages the google.golang.org/cloud/storage client library +//for interfacing with gcs. +// +// Because gcs is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Note that the contents of incomplete uploads are not accessible even though +// Stat returns their length +// +// +build include_gcs + +package gcs + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/googleapi" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" + + "github.com/Sirupsen/logrus" + + ctx "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const ( + driverName = "gcs" + dummyProjectID = "" + + uploadSessionContentType = "application/x-docker-upload-session" + minChunkSize = 256 * 1024 + defaultChunkSize = 20 * minChunkSize + + maxTries = 5 +) + +var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`) + +// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set +type driverParameters struct { + bucket string + config *jwt.Config + email string + privateKey []byte + client *http.Client + rootDirectory string + chunkSize int +} + +func init() { + factory.Register(driverName, &gcsDriverFactory{}) +} + +// gcsDriverFactory implements the factory.StorageDriverFactory interface +type gcsDriverFactory struct{} + +// Create StorageDriver from parameters +func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// driver is a storagedriver.StorageDriver implementation backed by GCS +// Objects are stored at absolute keys in the provided bucket. +type driver struct { + client *http.Client + bucket string + email string + privateKey []byte + rootDirectory string + chunkSize int +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - bucket +func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + chunkSize := defaultChunkSize + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int, uint, int32, uint32, uint64, int64: + chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()) + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + if chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize) + } + } + + var ts oauth2.TokenSource + jwtConf := new(jwt.Config) + if keyfile, ok := parameters["keyfile"]; ok { + jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) + if err != nil { + return nil, err + } + jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) + if err != nil { + return nil, err + } + ts = jwtConf.TokenSource(context.Background()) + } else { + var err error + ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) + if err != nil { + return nil, err + } + } + + params := driverParameters{ + bucket: fmt.Sprint(bucket), + rootDirectory: fmt.Sprint(rootDirectory), + email: jwtConf.Email, + privateKey: jwtConf.PrivateKey, + client: oauth2.NewClient(context.Background(), ts), + chunkSize: chunkSize, + } + + return New(params) +} + +// New constructs a new driver +func New(params driverParameters) (storagedriver.StorageDriver, error) { + rootDirectory := strings.Trim(params.rootDirectory, "/") + if rootDirectory != "" { + rootDirectory += "/" + } + if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize) + } + d := &driver{ + bucket: params.bucket, + rootDirectory: rootDirectory, + email: params.email, + privateKey: params.privateKey, + client: params.client, + chunkSize: params.chunkSize, + } + + return &base.Base{ + StorageDriver: d, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { + gcsContext := d.context(context) + name := d.pathToKey(path) + var rc io.ReadCloser + err := retry(func() error { + var err error + rc, err = storage.NewReader(gcsContext, d.bucket, name) + return err + }) + if err == storage.ErrObjectNotExist { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { + return retry(func() error { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, contents) + }) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (d *driver) Reader(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { + res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset) + if err != nil { + if res != nil { + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + res.Body.Close() + obj, err := storageStatObject(d.context(context), d.bucket, d.pathToKey(path)) + if err != nil { + return nil, err + } + if offset == int64(obj.Size) { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + } + return nil, err + } + if res.Header.Get("Content-Type") == uploadSessionContentType { + defer res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + return res.Body, nil +} + +func getObject(client *http.Client, bucket string, name string, offset int64) (*http.Response, error) { + // copied from google.golang.org/cloud/storage#NewReader : + // to set the additional "Range" header + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + if offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) + } + var res *http.Response + err = retry(func() error { + var err error + res, err = client.Do(req) + return err + }) + if err != nil { + return nil, err + } + return res, googleapi.CheckMediaResponse(res) +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(context ctx.Context, path string, append bool) (storagedriver.FileWriter, error) { + writer := &writer{ + client: d.client, + bucket: d.bucket, + name: d.pathToKey(path), + buffer: make([]byte, d.chunkSize), + } + + if append { + err := writer.init(path) + if err != nil { + return nil, err + } + } + return writer, nil +} + +type writer struct { + client *http.Client + bucket string + name string + size int64 + offset int64 + closed bool + sessionURI string + buffer []byte + buffSize int +} + +// Cancel removes any written content from this FileWriter. +func (w *writer) Cancel() error { + w.closed = true + err := storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + } + return err +} + +func (w *writer) Close() error { + if w.closed { + return nil + } + w.closed = true + + err := w.writeChunk() + if err != nil { + return err + } + + // Copy the remaining bytes from the buffer to the upload session + // Normally buffSize will be smaller than minChunkSize. However, in the + // unlikely event that the upload session failed to start, this number could be higher. + // In this case we can safely clip the remaining bytes to the minChunkSize + if w.buffSize > minChunkSize { + w.buffSize = minChunkSize + } + + // commit the writes by updating the upload session + err = retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = uploadSessionContentType + wc.Metadata = map[string]string{ + "Session-URI": w.sessionURI, + "Offset": strconv.FormatInt(w.offset, 10), + } + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil +} + +func putContentsClose(wc *storage.Writer, contents []byte) error { + size := len(contents) + var nn int + var err error + for nn < size { + n, err := wc.Write(contents[nn:size]) + nn += n + if err != nil { + break + } + } + if err != nil { + wc.CloseWithError(err) + return err + } + return wc.Close() +} + +// Commit flushes all content written to this FileWriter and makes it +// available for future calls to StorageDriver.GetContent and +// StorageDriver.Reader. +func (w *writer) Commit() error { + + if err := w.checkClosed(); err != nil { + return err + } + w.closed = true + + // no session started yet just perform a simple upload + if w.sessionURI == "" { + err := retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil + } + size := w.offset + int64(w.buffSize) + var nn int + // loop must be performed at least once to ensure the file is committed even when + // the buffer is empty + for { + n, err := putChunk(w.client, w.sessionURI, w.buffer[nn:w.buffSize], w.offset, size) + nn += int(n) + w.offset += n + w.size = w.offset + if err != nil { + w.buffSize = copy(w.buffer, w.buffer[nn:w.buffSize]) + return err + } + if nn == w.buffSize { + break + } + } + w.buffSize = 0 + return nil +} + +func (w *writer) checkClosed() error { + if w.closed { + return fmt.Errorf("Writer already closed") + } + return nil +} + +func (w *writer) writeChunk() error { + var err error + // chunks can be uploaded only in multiples of minChunkSize + // chunkSize is a multiple of minChunkSize less than or equal to buffSize + chunkSize := w.buffSize - (w.buffSize % minChunkSize) + if chunkSize == 0 { + return nil + } + // if their is no sessionURI yet, obtain one by starting the session + if w.sessionURI == "" { + w.sessionURI, err = startSession(w.client, w.bucket, w.name) + } + if err != nil { + return err + } + nn, err := putChunk(w.client, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1) + w.offset += nn + if w.offset > w.size { + w.size = w.offset + } + // shift the remaining bytes to the start of the buffer + w.buffSize = copy(w.buffer, w.buffer[int(nn):w.buffSize]) + + return err +} + +func (w *writer) Write(p []byte) (int, error) { + err := w.checkClosed() + if err != nil { + return 0, err + } + + var nn int + for nn < len(p) { + n := copy(w.buffer[w.buffSize:], p[nn:]) + w.buffSize += n + if w.buffSize == cap(w.buffer) { + err = w.writeChunk() + if err != nil { + break + } + } + nn += n + } + return nn, err +} + +// Size returns the number of bytes written to this FileWriter. +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) init(path string) error { + res, err := getObject(w.client, w.bucket, w.name, 0) + if err != nil { + return err + } + defer res.Body.Close() + if res.Header.Get("Content-Type") != uploadSessionContentType { + return storagedriver.PathNotFoundError{Path: path} + } + offset, err := strconv.ParseInt(res.Header.Get("X-Goog-Meta-Offset"), 10, 64) + if err != nil { + return err + } + buffer, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + w.sessionURI = res.Header.Get("X-Goog-Meta-Session-URI") + w.buffSize = copy(w.buffer, buffer) + w.offset = offset + w.size = offset + int64(w.buffSize) + return nil +} + +type request func() error + +func retry(req request) error { + backoff := time.Second + var err error + for i := 0; i < maxTries; i++ { + err = req() + if err == nil { + return nil + } + + status, ok := err.(*googleapi.Error) + if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { + return err + } + + time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) + if i <= 4 { + backoff = backoff * 2 + } + } + return err +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { + var fi storagedriver.FileInfoFields + //try to get as file + gcsContext := d.context(context) + obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) + if err == nil { + if obj.ContentType == uploadSessionContentType { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + Size: obj.Size, + ModTime: obj.Updated, + IsDir: false, + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + //try to get as folder + dirpath := d.pathToDirKey(path) + + var query *storage.Query + query = &storage.Query{} + query.Prefix = dirpath + query.MaxResults = 1 + + objects, err := storageListObjects(gcsContext, d.bucket, query) + if err != nil { + return nil, err + } + if len(objects.Results) < 1 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + } + obj = objects.Results[0] + if obj.Name == dirpath { + fi.Size = obj.Size + fi.ModTime = obj.Updated + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (d *driver) List(context ctx.Context, path string) ([]string, error) { + var query *storage.Query + query = &storage.Query{} + query.Delimiter = "/" + query.Prefix = d.pathToDirKey(path) + list := make([]string, 0, 64) + for { + objects, err := storageListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, object := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType { + list = append(list, d.keyToPath(object.Name)) + } + } + for _, subpath := range objects.Prefixes { + subpath = d.keyToPath(subpath) + list = append(list, subpath) + } + query = objects.Next + if query == nil { + break + } + } + if path != "/" && len(list) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in Google Cloud Storage. + return nil, storagedriver.PathNotFoundError{Path: path} + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { + gcsContext := d.context(context) + _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + } + return err + } + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + // if deleting the file fails, log the error, but do not fail; the file was successfully copied, + // and the original should eventually be cleaned when purging the uploads folder. + if err != nil { + logrus.Infof("error deleting file: %v due to %v", sourcePath, err) + } + return nil +} + +// listAll recursively lists all names of objects stored at "prefix" and its subpaths. +func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { + list := make([]string, 0, 64) + query := &storage.Query{} + query.Prefix = prefix + query.Versions = false + for { + objects, err := storageListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, obj := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if obj.Deleted.IsZero() { + list = append(list, obj.Name) + } + } + query = objects.Next + if query == nil { + break + } + } + return list, nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(context ctx.Context, path string) error { + prefix := d.pathToDirKey(path) + gcsContext := d.context(context) + keys, err := d.listAll(gcsContext, prefix) + if err != nil { + return err + } + if len(keys) > 0 { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + for _, key := range keys { + err := storageDeleteObject(gcsContext, d.bucket, key) + // GCS only guarantees eventual consistency, so listAll might return + // paths that no longer exist. If this happens, just ignore any not + // found error + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + if err != nil { + return err + } + } + return nil + } + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + } + } + return err +} + +func storageDeleteObject(context context.Context, bucket string, name string) error { + return retry(func() error { + return storage.DeleteObject(context, bucket, name) + }) +} + +func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.StatObject(context, bucket, name) + return err + }) + return obj, err +} + +func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { + var objs *storage.Objects + err := retry(func() error { + var err error + objs, err = storage.ListObjects(context, bucket, q) + return err + }) + return objs, err +} + +func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) + return err + }) + return obj, err +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// Returns ErrUnsupportedMethod if this driver has no privateKey +func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { + if d.privateKey == nil { + return "", storagedriver.ErrUnsupportedMethod{} + } + + name := d.pathToKey(path) + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + opts := &storage.SignedURLOptions{ + GoogleAccessID: d.email, + PrivateKey: d.privateKey, + Method: methodString, + Expires: expiresTime, + } + return storage.SignedURL(d.bucket, name, opts) +} + +func startSession(client *http.Client, bucket string, name string) (uri string, err error) { + u := &url.URL{ + Scheme: "https", + Host: "www.googleapis.com", + Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", bucket), + RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", name), + } + err = retry(func() error { + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return err + } + req.Header.Set("X-Upload-Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", "0") + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + uri = resp.Header.Get("Location") + return nil + }) + return uri, err +} + +func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, totalSize int64) (int64, error) { + bytesPut := int64(0) + err := retry(func() error { + req, err := http.NewRequest("PUT", sessionURI, bytes.NewReader(chunk)) + if err != nil { + return err + } + length := int64(len(chunk)) + to := from + length - 1 + size := "*" + if totalSize >= 0 { + size = strconv.FormatInt(totalSize, 10) + } + req.Header.Set("Content-Type", "application/octet-stream") + if from == to+1 { + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", size)) + } else { + req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", from, to, size)) + } + req.Header.Set("Content-Length", strconv.FormatInt(length, 10)) + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if totalSize < 0 && resp.StatusCode == 308 { + groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range")) + end, err := strconv.ParseInt(groups[2], 10, 64) + if err != nil { + return err + } + bytesPut = end - from + 1 + return nil + } + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + bytesPut = to - from + 1 + return nil + }) + return bytesPut, err +} + +func (d *driver) context(context ctx.Context) context.Context { + return cloud.WithContext(context, dummyProjectID, d.client) +} + +func (d *driver) pathToKey(path string) string { + return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") +} + +func (d *driver) pathToDirKey(path string) string { + return d.pathToKey(path) + "/" +} + +func (d *driver) keyToPath(key string) string { + return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go new file mode 100644 index 0000000..f2808d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go @@ -0,0 +1,311 @@ +// +build include_gcs + +package gcs + +import ( + "io/ioutil" + "os" + "testing" + + "fmt" + ctx "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/cloud/storage" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) +var skipGCS func() string + +func init() { + bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") + credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + + // Skip GCS storage driver tests if environment variable parameters are not provided + skipGCS = func() string { + if bucket == "" || credentials == "" { + return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS" + } + return "" + } + + if skipGCS() != "" { + return + } + + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + var ts oauth2.TokenSource + var email string + var privateKey []byte + + ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl) + if err != nil { + // Assume that the file contents are within the environment variable since it exists + // but does not contain a valid file path + jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) + if err != nil { + panic(fmt.Sprintf("Error reading JWT config : %s", err)) + } + email = jwtConfig.Email + privateKey = []byte(jwtConfig.PrivateKey) + if len(privateKey) == 0 { + panic("Error reading JWT config : missing private_key property") + } + if email == "" { + panic("Error reading JWT config : missing client_email property") + } + ts = jwtConfig.TokenSource(ctx.Background()) + } + + gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { + parameters := driverParameters{ + bucket: bucket, + rootDirectory: root, + email: email, + privateKey: privateKey, + client: oauth2.NewClient(ctx.Background(), ts), + chunkSize: defaultChunkSize, + } + + return New(parameters) + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return gcsDriverConstructor(root) + }, skipGCS) +} + +// Test Committing a FileWriter without having called Write +func TestCommitEmpty(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != 0 { + t.Fatalf("writer.Size: %d != 0", writer.Size()) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != 0 { + t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents)) + } +} + +// Test Committing a FileWriter after having written exactly +// defaultChunksize bytes. +func TestCommit(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + contents := make([]byte, defaultChunkSize) + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + _, err = writer.Write(contents) + if err != nil { + t.Fatalf("writer.Write: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != int64(len(contents)) { + t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents)) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != len(contents) { + t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents)) + } +} + +func TestRetry(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + assertError := func(expected string, observed error) { + observedMsg := "" + if observed != nil { + observedMsg = observed.Error() + } + if observedMsg != expected { + t.Fatalf("expected %v, observed %v\n", expected, observedMsg) + } + } + + err := retry(func() error { + return &googleapi.Error{ + Code: 503, + Message: "google api error", + } + }) + assertError("googleapi: Error 503: google api error", err) + + err = retry(func() error { + return &googleapi.Error{ + Code: 404, + Message: "google api error", + } + }) + assertError("googleapi: Error 404: google api error", err) + + err = retry(func() error { + return fmt.Errorf("error") + }) + assertError("error", err) +} + +func TestEmptyRootList(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := gcsDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := gcsDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := ctx.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to remove %v due to %v\n", filename, err) + } + }() + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +// TestMoveDirectory checks that moving a directory returns an error. +func TestMoveDirectory(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + ctx := ctx.Background() + contents := []byte("contents") + // Create a regular file. + err = driver.PutContent(ctx, "/parent/dir/foo", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := driver.Delete(ctx, "/parent") + if err != nil { + t.Fatalf("failed to remove /parent due to %v\n", err) + } + }() + + err = driver.Move(ctx, "/parent/dir", "/parent/other") + if err == nil { + t.Fatalf("Moving directory /parent/dir /parent/other should have return a non-nil error\n") + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go new file mode 100644 index 0000000..eb2fd1c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go @@ -0,0 +1,312 @@ +package inmemory + +import ( + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "inmemory" + +func init() { + factory.Register(driverName, &inMemoryDriverFactory{}) +} + +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. +type inMemoryDriverFactory struct{} + +func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +type driver struct { + root *dir + mutex sync.RWMutex +} + +// baseEmbed allows us to hide the Base embed. +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local map. +// Intended solely for example and testing purposes. +type Driver struct { + baseEmbed // embedded, hidden base driver. +} + +var _ storagedriver.StorageDriver = &Driver{} + +// New constructs a new Driver. +func New() *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + root: &dir{ + common: common{ + p: "/", + mod: time.Now(), + }, + }, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface. + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + rc, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + return ioutil.ReadAll(rc) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(p) + + f, err := d.root.mkfile(normalized) + if err != nil { + // TODO(stevvooe): Again, we need to clarify when this is not a + // directory in StorageDriver API. + return fmt.Errorf("not a file") + } + + f.truncate() + f.WriteAt(contents, 0) + + return nil +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + normalized := normalize(path) + found := d.root.find(normalized) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if found.isdir() { + return nil, fmt.Errorf("%q is a directory", path) + } + + return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + f, err := d.root.mkfile(normalized) + if err != nil { + return nil, fmt.Errorf("not a file") + } + + if !append { + f.truncate() + } + + return d.newWriter(f), nil +} + +// Stat returns info about the provided path. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + found := d.root.find(normalized) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: found.isdir(), + ModTime: found.modtime(), + } + + if !fi.IsDir { + fi.Size = int64(len(found.(*file).data)) + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + + found := d.root.find(normalized) + + if !found.isdir() { + return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... + } + + entries, err := found.(*dir).list(normalized) + + if err != nil { + switch err { + case errNotExists: + return nil, storagedriver.PathNotFoundError{Path: path} + case errIsNotDir: + return nil, fmt.Errorf("not a directory") + default: + return nil, err + } + } + + return entries, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) + + err := d.root.move(normalizedSrc, normalizedDst) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: destPath} + default: + return err + } +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + err := d.root.delete(normalized) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: path} + default: + return err + } +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +type writer struct { + d *driver + f *file + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(f *file) storagedriver.FileWriter { + return &writer{ + d: d, + f: f, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.f.WriteAt(p, int64(len(w.f.data))) +} + +func (w *writer) Size() int64 { + w.d.mutex.RLock() + defer w.d.mutex.RUnlock() + + return int64(len(w.f.data)) +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.d.root.delete(w.f.path()) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go new file mode 100644 index 0000000..dbc1916 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go @@ -0,0 +1,19 @@ +package inmemory + +import ( + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(), nil + } + testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go new file mode 100644 index 0000000..cdefacf --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go @@ -0,0 +1,338 @@ +package inmemory + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +var ( + errExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("notexists") + errIsNotDir = fmt.Errorf("notdir") + errIsDir = fmt.Errorf("isdir") +) + +type node interface { + name() string + path() string + isdir() bool + modtime() time.Time +} + +// dir is the central type for the memory-based storagedriver. All operations +// are dispatched from a root dir. +type dir struct { + common + + // TODO(stevvooe): Use sorted slice + search. + children map[string]node +} + +var _ node = &dir{} + +func (d *dir) isdir() bool { + return true +} + +// add places the node n into dir d. +func (d *dir) add(n node) { + if d.children == nil { + d.children = make(map[string]node) + } + + d.children[n.name()] = n + d.mod = time.Now() +} + +// find searches for the node, given path q in dir. If the node is found, it +// will be returned. If the node is not found, the closet existing parent. If +// the node is found, the returned (node).path() will match q. +func (d *dir) find(q string) node { + q = strings.Trim(q, "/") + i := strings.Index(q, "/") + + if q == "" { + return d + } + + if i == 0 { + panic("shouldn't happen, no root paths") + } + + var component string + if i < 0 { + // No more path components + component = q + } else { + component = q[:i] + } + + child, ok := d.children[component] + if !ok { + // Node was not found. Return p and the current node. + return d + } + + if child.isdir() { + // traverse down! + q = q[i+1:] + return child.(*dir).find(q) + } + + return child +} + +func (d *dir) list(p string) ([]string, error) { + n := d.find(p) + + if n.path() != p { + return nil, errNotExists + } + + if !n.isdir() { + return nil, errIsNotDir + } + + var children []string + for _, child := range n.(*dir).children { + children = append(children, child.path()) + } + + sort.Strings(children) + return children, nil +} + +// mkfile or return the existing one. returns an error if it exists and is a +// directory. Essentially, this is open or create. +func (d *dir) mkfile(p string) (*file, error) { + n := d.find(p) + if n.path() == p { + if n.isdir() { + return nil, errIsDir + } + + return n.(*file), nil + } + + dirpath, filename := path.Split(p) + // Make any non-existent directories + n, err := d.mkdirs(dirpath) + if err != nil { + return nil, err + } + + dd := n.(*dir) + n = &file{ + common: common{ + p: path.Join(dd.path(), filename), + mod: time.Now(), + }, + } + + dd.add(n) + return n.(*file), nil +} + +// mkdirs creates any missing directory entries in p and returns the result. +func (d *dir) mkdirs(p string) (*dir, error) { + p = normalize(p) + + n := d.find(p) + + if !n.isdir() { + // Found something there + return nil, errIsNotDir + } + + if n.path() == p { + return n.(*dir), nil + } + + dd := n.(*dir) + + relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") + + if relative == "" { + return dd, nil + } + + components := strings.Split(relative, "/") + for _, component := range components { + d, err := dd.mkdir(component) + + if err != nil { + // This should actually never happen, since there are no children. + return nil, err + } + dd = d + } + + return dd, nil +} + +// mkdir creates a child directory under d with the given name. +func (d *dir) mkdir(name string) (*dir, error) { + if name == "" { + return nil, fmt.Errorf("invalid dirname") + } + + _, ok := d.children[name] + if ok { + return nil, errExists + } + + child := &dir{ + common: common{ + p: path.Join(d.path(), name), + mod: time.Now(), + }, + } + d.add(child) + d.mod = time.Now() + + return child, nil +} + +func (d *dir) move(src, dst string) error { + dstDirname, _ := path.Split(dst) + + dp, err := d.mkdirs(dstDirname) + if err != nil { + return err + } + + srcDirname, srcFilename := path.Split(src) + sp := d.find(srcDirname) + + if normalize(srcDirname) != normalize(sp.path()) { + return errNotExists + } + + spd, ok := sp.(*dir) + if !ok { + return errIsNotDir // paranoid. + } + + s, ok := spd.children[srcFilename] + if !ok { + return errNotExists + } + + delete(spd.children, srcFilename) + + switch n := s.(type) { + case *dir: + n.p = dst + case *file: + n.p = dst + } + + dp.add(s) + + return nil +} + +func (d *dir) delete(p string) error { + dirname, filename := path.Split(p) + parent := d.find(dirname) + + if normalize(dirname) != normalize(parent.path()) { + return errNotExists + } + + if _, ok := parent.(*dir).children[filename]; !ok { + return errNotExists + } + + delete(parent.(*dir).children, filename) + return nil +} + +// dump outputs a primitive directory structure to stdout. +func (d *dir) dump(indent string) { + fmt.Println(indent, d.name()+"/") + + for _, child := range d.children { + if child.isdir() { + child.(*dir).dump(indent + "\t") + } else { + fmt.Println(indent, child.name()) + } + + } +} + +func (d *dir) String() string { + return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) +} + +// file stores actual data in the fs tree. It acts like an open, seekable file +// where operations are conducted through ReadAt and WriteAt. Use it with +// SectionReader for the best effect. +type file struct { + common + data []byte +} + +var _ node = &file{} + +func (f *file) isdir() bool { + return false +} + +func (f *file) truncate() { + f.data = f.data[:0] +} + +func (f *file) sectionReader(offset int64) io.Reader { + return io.NewSectionReader(f, offset, int64(len(f.data))-offset) +} + +func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { + return copy(p, f.data[offset:]), nil +} + +func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { + off := int(offset) + if cap(f.data) < off+len(p) { + data := make([]byte, len(f.data), off+len(p)) + copy(data, f.data) + f.data = data + } + + f.mod = time.Now() + f.data = f.data[:off+len(p)] + + return copy(f.data[off:off+len(p)], p), nil +} + +func (f *file) String() string { + return fmt.Sprintf("&file{path: %q}", f.p) +} + +// common provides shared fields and methods for node implementations. +type common struct { + p string + mod time.Time +} + +func (c *common) name() string { + _, name := path.Split(c.p) + return name +} + +func (c *common) path() string { + return c.p +} + +func (c *common) modtime() time.Time { + return c.mod +} + +func normalize(p string) string { + return "/" + strings.Trim(p, "/") +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go new file mode 100644 index 0000000..b0618d1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go @@ -0,0 +1,136 @@ +// Package middleware - cloudfront wrapper for storage libs +// N.B. currently only works with S3, not arbitrary sites +// +package middleware + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/cloudfront/sign" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" +) + +// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that +// constructs temporary signed CloudFront URLs from the storagedriver layer URL, +// then issues HTTP Temporary Redirects to this CloudFront content URL. +type cloudFrontStorageMiddleware struct { + storagedriver.StorageDriver + urlSigner *sign.URLSigner + baseURL string + duration time.Duration +} + +var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} + +// newCloudFrontLayerHandler constructs and returns a new CloudFront +// LayerHandler implementation. +// Required options: baseurl, privatekey, keypairid +func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + base, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + baseURL, ok := base.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + if _, err := url.Parse(baseURL); err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } + pk, ok := options["privatekey"] + if !ok { + return nil, fmt.Errorf("no privatekey provided") + } + pkPath, ok := pk.(string) + if !ok { + return nil, fmt.Errorf("privatekey must be a string") + } + kpid, ok := options["keypairid"] + if !ok { + return nil, fmt.Errorf("no keypairid provided") + } + keypairID, ok := kpid.(string) + if !ok { + return nil, fmt.Errorf("keypairid must be a string") + } + + pkBytes, err := ioutil.ReadFile(pkPath) + if err != nil { + return nil, fmt.Errorf("failed to read privatekey file: %s", err) + } + + block, _ := pem.Decode([]byte(pkBytes)) + if block == nil { + return nil, fmt.Errorf("failed to decode private key as an rsa private key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + + urlSigner := sign.NewURLSigner(keypairID, privateKey) + + duration := 20 * time.Minute + d, ok := options["duration"] + if ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("invalid duration: %s", err) + } + duration = dur + } + } + + return &cloudFrontStorageMiddleware{ + StorageDriver: storageDriver, + urlSigner: urlSigner, + baseURL: baseURL, + duration: duration, + }, nil +} + +// S3BucketKeyer is any type that is capable of returning the S3 bucket key +// which should be cached by AWS CloudFront. +type S3BucketKeyer interface { + S3BucketKey(path string) string +} + +// Resolve returns an http.Handler which can serve the contents of the given +// Layer, or an error if not supported by the storagedriver. +func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + // TODO(endophage): currently only supports S3 + keyer, ok := lh.StorageDriver.(S3BucketKeyer) + if !ok { + context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(ctx, path, options) + } + + cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) + if err != nil { + return "", err + } + return cfURL, nil +} + +// init registers the cloudfront layerHandler backend. +func init() { + storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware.go new file mode 100644 index 0000000..20cd7da --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware.go @@ -0,0 +1,50 @@ +package middleware + +import ( + "fmt" + "net/url" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" +) + +type redirectStorageMiddleware struct { + storagedriver.StorageDriver + scheme string + host string +} + +var _ storagedriver.StorageDriver = &redirectStorageMiddleware{} + +func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + o, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + b, ok := o.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + u, err := url.Parse(b) + if err != nil { + return nil, fmt.Errorf("unable to parse redirect baseurl: %s", b) + } + if u.Scheme == "" { + return nil, fmt.Errorf("no scheme specified for redirect baseurl") + } + if u.Host == "" { + return nil, fmt.Errorf("no host specified for redirect baseurl") + } + + return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil +} + +func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + u := &url.URL{Scheme: r.scheme, Host: r.host, Path: path} + return u.String(), nil +} + +func init() { + storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware_test.go new file mode 100644 index 0000000..1eb6309 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware_test.go @@ -0,0 +1,58 @@ +package middleware + +import ( + "testing" + + check "gopkg.in/check.v1" +) + +func Test(t *testing.T) { check.TestingT(t) } + +type MiddlewareSuite struct{} + +var _ = check.Suite(&MiddlewareSuite{}) + +func (s *MiddlewareSuite) TestNoConfig(c *check.C) { + options := make(map[string]interface{}) + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no baseurl provided") +} + +func (s *MiddlewareSuite) TestMissingScheme(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "example.com" + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl") +} + +func (s *MiddlewareSuite) TestHttpsPort(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "https://example.com:5443" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "https") + c.Assert(m.host, check.Equals, "example.com:5443") + + url, err := middleware.URLFor(nil, "/rick/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "https://example.com:5443/rick/data") +} + +func (s *MiddlewareSuite) TestHTTP(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "http://example.com" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "http") + c.Assert(m.host, check.Equals, "example.com") + + url, err := middleware.URLFor(nil, "morty/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "http://example.com/morty/data") +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go new file mode 100644 index 0000000..7e40a8d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go @@ -0,0 +1,39 @@ +package storagemiddleware + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// InitFunc is the type of a StorageMiddleware factory function and is +// used to register the constructor for different StorageMiddleware backends. +type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) + +var storageMiddlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a StorageMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if storageMiddlewares == nil { + storageMiddlewares = make(map[string]InitFunc) + } + if _, exists := storageMiddlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + storageMiddlewares[name] = initFunc + + return nil +} + +// Get constructs a StorageMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { + if storageMiddlewares != nil { + if initFunc, exists := storageMiddlewares[name]; exists { + return initFunc(storageDriver, options) + } + } + + return nil, fmt.Errorf("no storage middleware registered with name: %s", name) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go new file mode 100644 index 0000000..d1bc932 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go new file mode 100644 index 0000000..4d21592 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go @@ -0,0 +1,683 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// +build include_oss + +package oss + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/docker/distribution/context" + + "github.com/Sirupsen/logrus" + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize +const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyID string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + Endpoint string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + + accessKey, ok := parameters["accesskeyid"] + if !ok { + return nil, fmt.Errorf("No accesskeyid parameter provided") + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + return nil, fmt.Errorf("No accesskeysecret parameter provided") + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The internal parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + + params := DriverParameters{ + AccessKeyID: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), + } + + return New(params) +} + +// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) + bucket := client.Bucket(params.Bucket) + client.SetDebug(false) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + return nil, parseError(path, err) + } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.ossPath(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") + if err != nil { + return nil, parseError(path, err) + } + for _, multi := range multis { + if key != multi.Key { + continue + } + parts, err := multi.ListParts() + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range parts { + multiSize += part.Size + } + return d.newWriter(key, multi, parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && opath[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +const maxConcurrency = 10 + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) + err := d.Bucket.CopyLargeFileInParallel(d.ossPath(sourcePath), d.ossPath(destPath), + d.getContentType(), + getPermissions(), + oss.Options{}, + maxConcurrency) + if err != nil { + logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ossPath := d.ossPath(path) + listResponse, err := d.Bucket.List(ossPath, "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + numOssObjects := len(listResponse.Contents) + for index, key := range listResponse.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(key.Key) > len(ossPath) && (key.Key)[len(ossPath)] != '/' { + numOssObjects = index + break + } + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:numOssObjects]}) + if err != nil { + return nil + } + + if numOssObjects < len(listResponse.Contents) { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) + signedURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("signed URL: %s", signedURL) + return signedURL, nil +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *oss.Multi + parts []oss.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key string, multi *oss.Multi, parts []oss.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, oss.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []oss.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go new file mode 100644 index 0000000..fbae5d9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go @@ -0,0 +1,144 @@ +// +build include_oss + +package oss + +import ( + "io/ioutil" + + alioss "github.com/denverdino/aliyungo/oss" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + //"log" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var ossDriverConstructor func(rootDirectory string) (*Driver, error) + +var skipCheck func() string + +func init() { + accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") + secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") + bucket := os.Getenv("OSS_BUCKET") + region := os.Getenv("OSS_REGION") + internal := os.Getenv("OSS_INTERNAL") + encrypt := os.Getenv("OSS_ENCRYPT") + secure := os.Getenv("OSS_SECURE") + endpoint := os.Getenv("OSS_ENDPOINT") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + ossDriverConstructor = func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := false + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + internalBool := false + if internal != "" { + internalBool, err = strconv.ParseBool(internal) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + AccessKeyID: accessKey, + AccessKeySecret: secretKey, + Bucket: bucket, + Region: alioss.Region(region), + Internal: internalBool, + ChunkSize: minChunkSize, + RootDirectory: rootDirectory, + Encrypt: encryptBool, + Secure: secureBool, + Endpoint: endpoint, + } + + return New(parameters) + } + + // Skip OSS storage driver tests if environment variable parameters are not provided + skipCheck = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return ossDriverConstructor(root) + }, skipCheck) +} + +func TestEmptyRootList(t *testing.T) { + if skipCheck() != "" { + t.Skip(skipCheck()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := ossDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := ossDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := ossDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go new file mode 100644 index 0000000..c9d19c4 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go @@ -0,0 +1,1189 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the official aws client library for interfacing with +// S3. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3aws" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +// maxChunkSize defines the maximum multipart upload chunk size allowed by S3. +const maxChunkSize = 5 << 30 + +const defaultChunkSize = 2 * minChunkSize + +const ( + // defaultMultipartCopyChunkSize defines the default chunk size for all + // but the last Upload Part - Copy operation of a multipart copy. + // Empirically, 32 MB is optimal. + defaultMultipartCopyChunkSize = 32 << 20 + + // defaultMultipartCopyMaxConcurrency defines the default maximum number + // of concurrent Upload Part - Copy operations for a multipart copy. + defaultMultipartCopyMaxConcurrency = 100 + + // defaultMultipartCopyThresholdSize defines the default object size + // above which multipart copy will be used. (PUT Object - Copy is used + // for objects at or below this size.) Empirically, 32 MB is optimal. + defaultMultipartCopyThresholdSize = 32 << 20 +) + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint +const noStorageClass = "NONE" + +// validRegions maps known s3 region identifiers to region descriptors +var validRegions = map[string]struct{}{} + +// validObjectACLs contains known s3 object Acls +var validObjectACLs = map[string]struct{}{} + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region string + RegionEndpoint string + Encrypt bool + KeyID string + Secure bool + V4Auth bool + ChunkSize int64 + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + UserAgent string + ObjectACL string +} + +func init() { + for _, region := range []string{ + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + "cn-north-1", + "us-gov-west-1", + } { + validRegions[region] = struct{}{} + } + + for _, objectACL := range []string{ + s3.ObjectCannedACLPrivate, + s3.ObjectCannedACLPublicRead, + s3.ObjectCannedACLPublicReadWrite, + s3.ObjectCannedACLAuthenticatedRead, + s3.ObjectCannedACLAwsExecRead, + s3.ObjectCannedACLBucketOwnerRead, + s3.ObjectCannedACLBucketOwnerFullControl, + } { + validObjectACLs[objectACL] = struct{}{} + } + + // Register this as the default s3 driver in addition to s3aws + factory.Register("s3", &s3DriverFactory{}) + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket string + ChunkSize int64 + Encrypt bool + KeyID string + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + ObjectACL string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionEndpoint := parameters["regionendpoint"] + if regionEndpoint == nil { + regionEndpoint = "" + } + + regionName, ok := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := fmt.Sprint(regionName) + // Don't check the region value if a custom endpoint is provided. + if regionEndpoint == "" { + if _, ok = validRegions[region]; !ok { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + + v4Bool := true + v4auth := parameters["v4auth"] + switch v4auth := v4auth.(type) { + case string: + b, err := strconv.ParseBool(v4auth) + if err != nil { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + v4Bool = b + case bool: + v4Bool = v4auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + + keyID := parameters["keyid"] + if keyID == nil { + keyID = "" + } + + chunkSize, err := getParameterAsInt64(parameters, "chunksize", defaultChunkSize, minChunkSize, maxChunkSize) + if err != nil { + return nil, err + } + + multipartCopyChunkSize, err := getParameterAsInt64(parameters, "multipartcopychunksize", defaultMultipartCopyChunkSize, minChunkSize, maxChunkSize) + if err != nil { + return nil, err + } + + multipartCopyMaxConcurrency, err := getParameterAsInt64(parameters, "multipartcopymaxconcurrency", defaultMultipartCopyMaxConcurrency, 1, math.MaxInt64) + if err != nil { + return nil, err + } + + multipartCopyThresholdSize, err := getParameterAsInt64(parameters, "multipartcopythresholdsize", defaultMultipartCopyThresholdSize, 0, maxChunkSize) + if err != nil { + return nil, err + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StorageClassStandard + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassString = strings.ToUpper(storageClassString) + if storageClassString != noStorageClass && + storageClassString != s3.StorageClassStandard && + storageClassString != s3.StorageClassReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + []string{noStorageClass, s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + storageClass = storageClassString + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + objectACL := s3.ObjectCannedACLPrivate + objectACLParam := parameters["objectacl"] + if objectACLParam != nil { + objectACLString, ok := objectACLParam.(string) + if !ok { + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + } + + if _, ok = validObjectACLs[objectACLString]; !ok { + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + } + objectACL = objectACLString + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + fmt.Sprint(regionEndpoint), + encryptBool, + fmt.Sprint(keyID), + secureBool, + v4Bool, + chunkSize, + multipartCopyChunkSize, + multipartCopyMaxConcurrency, + multipartCopyThresholdSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + objectACL, + } + + return New(params) +} + +// getParameterAsInt64 converts paramaters[name] to an int64 value (using +// defaultt if nil), verifies it is no smaller than min, and returns it. +func getParameterAsInt64(parameters map[string]interface{}, name string, defaultt int64, min int64, max int64) (int64, error) { + rv := defaultt + param := parameters[name] + switch v := param.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return 0, fmt.Errorf("%s parameter must be an integer, %v invalid", name, param) + } + rv = vv + case int64: + rv = v + case int, uint, int32, uint32, uint64: + rv = reflect.ValueOf(v).Convert(reflect.TypeOf(rv)).Int() + case nil: + // do nothing + default: + return 0, fmt.Errorf("invalid value for %s: %#v", name, param) + } + + if rv < min || rv > max { + return 0, fmt.Errorf("The %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max) + } + + return rv, nil +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + if !params.V4Auth && + (params.RegionEndpoint == "" || + strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) { + return nil, fmt.Errorf("On Amazon S3 this storage driver can only be used with v4 authentication") + } + + awsConfig := aws.NewConfig() + creds := credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + + if params.RegionEndpoint != "" { + awsConfig.WithS3ForcePathStyle(true) + awsConfig.WithEndpoint(params.RegionEndpoint) + } + + awsConfig.WithCredentials(creds) + awsConfig.WithRegion(params.Region) + awsConfig.WithDisableSSL(!params.Secure) + + if params.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), + }) + } + + s3obj := s3.New(session.New(awsConfig)) + + // enable S3 compatible signature v2 signing instead + if !params.V4Auth { + setv2Handlers(s3obj) + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, + MultipartCopyChunkSize: params.MultipartCopyChunkSize, + MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, + MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + ObjectACL: params.ObjectACL, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + reader, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + return ioutil.ReadAll(reader) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + _, err := d.S3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + Body: bytes.NewReader(contents), + }) + return parseError(path, err) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + resp, err := d.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), + }) + + if err != nil { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return nil, err + } + return d.newWriter(key, *resp.UploadId, nil), nil + } + resp, err := d.S3.ListMultipartUploads(&s3.ListMultipartUploadsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(key), + }) + if err != nil { + return nil, parseError(path, err) + } + + for _, multi := range resp.Uploads { + if key != *multi.Key { + continue + } + resp, err := d.S3.ListParts(&s3.ListPartsInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + UploadId: multi.UploadId, + }) + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range resp.Parts { + multiSize += *part.Size + } + return d.newWriter(key, *multi.UploadId, resp.Parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(1), + }) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(resp.Contents) == 1 { + if *resp.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = *resp.Contents[0].Size + fi.ModTime = *resp.Contents[0].LastModified + } + } else if len(resp.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + }) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range resp.Contents { + files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range resp.CommonPrefixes { + commonPrefix := *commonPrefix.Prefix + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if *resp.IsTruncated { + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + Marker: resp.NextMarker, + }) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + if err := d.copy(ctx, sourcePath, destPath); err != nil { + return err + } + return d.Delete(ctx, sourcePath) +} + +// copy copies an object stored at sourcePath to destPath. +func (d *driver) copy(ctx context.Context, sourcePath string, destPath string) error { + // S3 can copy objects up to 5 GB in size with a single PUT Object - Copy + // operation. For larger objects, the multipart upload API must be used. + // + // Empirically, multipart copy is fastest with 32 MB parts and is faster + // than PUT Object - Copy for objects larger than 32 MB. + + fileInfo, err := d.Stat(ctx, sourcePath) + if err != nil { + return parseError(sourcePath, err) + } + + if fileInfo.Size() <= d.MultipartCopyThresholdSize { + _, err := d.S3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + }) + if err != nil { + return parseError(sourcePath, err) + } + return nil + } + + // Even in the worst case, a multipart copy should take no more + // than a few minutes, so 30 minutes is very conservative. + expires := time.Now().Add(time.Duration(30) * time.Minute) + createResp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + Expires: aws.Time(expires), + SSEKMSKeyId: d.getSSEKMSKeyID(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return err + } + + numParts := (fileInfo.Size() + d.MultipartCopyChunkSize - 1) / d.MultipartCopyChunkSize + completedParts := make([]*s3.CompletedPart, numParts) + errChan := make(chan error, numParts) + limiter := make(chan struct{}, d.MultipartCopyMaxConcurrency) + + for i := range completedParts { + i := int64(i) + go func() { + limiter <- struct{}{} + firstByte := i * d.MultipartCopyChunkSize + lastByte := firstByte + d.MultipartCopyChunkSize - 1 + if lastByte >= fileInfo.Size() { + lastByte = fileInfo.Size() - 1 + } + uploadResp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + Key: aws.String(d.s3Path(destPath)), + PartNumber: aws.Int64(i + 1), + UploadId: createResp.UploadId, + CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", firstByte, lastByte)), + }) + if err == nil { + completedParts[i] = &s3.CompletedPart{ + ETag: uploadResp.CopyPartResult.ETag, + PartNumber: aws.Int64(i + 1), + } + } + errChan <- err + <-limiter + }() + } + + for range completedParts { + err := <-errChan + if err != nil { + return err + } + } + + _, err = d.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + UploadId: createResp.UploadId, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts}, + }) + return err +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +// We must be careful since S3 does not guarantee read after delete consistency +func (d *driver) Delete(ctx context.Context, path string) error { + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + s3Path := d.s3Path(path) + listObjectsInput := &s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(s3Path), + } +ListLoop: + for { + // list all the objects + resp, err := d.S3.ListObjects(listObjectsInput) + + // resp.Contents can only be empty on the first call + // if there were no more results to return after the first call, resp.IsTruncated would have been false + // and the loop would be exited without recalling ListObjects + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + for _, key := range resp.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' { + break ListLoop + } + s3Objects = append(s3Objects, &s3.ObjectIdentifier{ + Key: key.Key, + }) + } + + // resp.Contents must have at least one element or we would have returned not found + listObjectsInput.Marker = resp.Contents[len(resp.Contents)-1].Key + + // from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned" + // if everything has been returned, break + if resp.IsTruncated == nil || !*resp.IsTruncated { + break + } + } + + // need to chunk objects into groups of 1000 per s3 restrictions + total := len(s3Objects) + for i := 0; i < total; i += 1000 { + _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: s3Objects[i:min(i+1000, total)], + Quiet: aws.Bool(false), + }, + }) + if err != nil { + return err + } + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresIn := 20 * time.Minute + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresIn = et.Sub(time.Now()) + } + } + + var req *request.Request + + switch methodString { + case "GET": + req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + case "HEAD": + req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + default: + panic("unreachable") + } + + return req.Presign(expiresIn) +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getEncryptionMode() *string { + if !d.Encrypt { + return nil + } + if d.KeyID == "" { + return aws.String("AES256") + } + return aws.String("aws:kms") +} + +func (d *driver) getSSEKMSKeyID() *string { + if d.KeyID != "" { + return aws.String(d.KeyID) + } + return nil +} + +func (d *driver) getContentType() *string { + return aws.String("application/octet-stream") +} + +func (d *driver) getACL() *string { + return aws.String(d.ObjectACL) +} + +func (d *driver) getStorageClass() *string { + if d.StorageClass == noStorageClass { + return nil + } + return aws.String(d.StorageClass) +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + uploadID string + parts []*s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += *part.Size + } + return &writer{ + driver: d, + key: key, + uploadID: uploadID, + parts: parts, + size: size, + } +} + +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return 0, err + } + + resp, err := w.driver.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + ContentType: w.driver.getContentType(), + ACL: w.driver.getACL(), + ServerSideEncryption: w.driver.getEncryptionMode(), + StorageClass: w.driver.getStorageClass(), + }) + if err != nil { + return 0, err + } + w.uploadID = *resp.UploadId + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + resp, err := w.driver.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + }) + defer resp.Body.Close() + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart, err = ioutil.ReadAll(resp.Body) + if err != nil { + return 0, err + } + } else { + // Otherwise we can use the old file as the new first part + copyPartResp, err := w.driver.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(w.driver.Bucket), + CopySource: aws.String(w.driver.Bucket + "/" + w.key), + Key: aws.String(w.key), + PartNumber: aws.Int64(1), + UploadId: resp.UploadId, + }) + if err != nil { + return 0, err + } + w.parts = []*s3.Part{ + { + ETag: copyPartResp.CopyPartResult.ETag, + PartNumber: aws.Int64(1), + Size: aws.Int64(w.size), + }, + } + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + _, err := w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + partNumber := aws.Int64(int64(len(w.parts) + 1)) + resp, err := w.driver.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + PartNumber: partNumber, + UploadId: aws.String(w.uploadID), + Body: bytes.NewReader(w.readyPart), + }) + if err != nil { + return err + } + w.parts = append(w.parts, &s3.Part{ + ETag: resp.ETag, + PartNumber: partNumber, + Size: aws.Int64(int64(len(w.readyPart))), + }) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go new file mode 100644 index 0000000..eb7ee51 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go @@ -0,0 +1,313 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + keyID := os.Getenv("S3_KEY_ID") + secure := os.Getenv("S3_SECURE") + v4Auth := os.Getenv("S3_V4_AUTH") + region := os.Getenv("AWS_REGION") + objectACL := os.Getenv("S3_OBJECT_ACL") + root, err := ioutil.TempDir("", "driver-") + regionEndpoint := os.Getenv("REGION_ENDPOINT") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4Bool := true + if v4Auth != "" { + v4Bool, err = strconv.ParseBool(v4Auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + region, + regionEndpoint, + encryptBool, + keyID, + secureBool, + v4Bool, + minChunkSize, + defaultMultipartCopyChunkSize, + defaultMultipartCopyMaxConcurrency, + defaultMultipartCopyThresholdSize, + rootDirectory, + storageClass, + driverName + "-test", + objectACL, + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StorageClassStandard) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.StorageClassReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + if _, err = s3DriverConstructor(rootDir, noStorageClass); err != nil { + t.Fatalf("unexpected error creating driver without storage class: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(standardDriverUnwrapped.Bucket), + Key: aws.String(standardDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if resp.StorageClass != nil { + t.Fatalf("unexpected storage class for standard file: %v", resp.StorageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(rrDriverUnwrapped.Bucket), + Key: aws.String(rrDriverUnwrapped.s3Path(rrFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if resp.StorageClass == nil { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", s3.StorageClassStandard) + } else if *resp.StorageClass != s3.StorageClassReducedRedundancy { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", *resp.StorageClass) + } + +} + +func TestOverThousandBlobs(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + ctx := context.Background() + for i := 0; i < 1005; i++ { + filename := "/thousandfiletest/file" + strconv.Itoa(i) + contents := []byte("contents") + err = standardDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + } + + // cant actually verify deletion because read-after-delete is inconsistent, but can ensure no errors + err = standardDriver.Delete(ctx, "/thousandfiletest") + if err != nil { + t.Fatalf("unexpected error deleting thousand files: %v", err) + } +} + +func TestMoveWithMultipartCopy(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver: %v", err) + } + + ctx := context.Background() + sourcePath := "/source" + destPath := "/dest" + + defer d.Delete(ctx, sourcePath) + defer d.Delete(ctx, destPath) + + // An object larger than d's MultipartCopyThresholdSize will cause d.Move() to perform a multipart copy. + multipartCopyThresholdSize := d.baseEmbed.Base.StorageDriver.(*driver).MultipartCopyThresholdSize + contents := make([]byte, 2*multipartCopyThresholdSize) + rand.Read(contents) + + err = d.PutContent(ctx, sourcePath, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + err = d.Move(ctx, sourcePath, destPath) + if err != nil { + t.Fatalf("unexpected error moving file: %v", err) + } + + received, err := d.GetContent(ctx, destPath) + if err != nil { + t.Fatalf("unexpected error getting content: %v", err) + } + if !bytes.Equal(contents, received) { + t.Fatal("content differs") + } + + _, err = d.GetContent(ctx, sourcePath) + switch err.(type) { + case storagedriver.PathNotFoundError: + default: + t.Fatalf("unexpected error getting content: %v", err) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go new file mode 100644 index 0000000..7cabe07 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -0,0 +1,219 @@ +package s3 + +// Source: https://github.com/pivotal-golang/s3cli + +// Copyright (c) 2013 Damien Le Berrigaud and Nick Wade + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "net/http" + "net/url" + "sort" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +const ( + signatureVersion = "2" + signatureMethod = "HmacSHA1" + timeFormat = "2006-01-02T15:04:05Z" +) + +type signer struct { + // Values that must be populated from the request + Request *http.Request + Time time.Time + Credentials *credentials.Credentials + Query url.Values + stringToSign string + signature string +} + +var s3ParamsToSign = map[string]bool{ + "acl": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "website": true, + "delete": true, +} + +// setv2Handlers will setup v2 signature signing on the S3 driver +func setv2Handlers(svc *s3.S3) { + svc.Handlers.Build.PushBack(func(r *request.Request) { + parsedURL, err := url.Parse(r.HTTPRequest.URL.String()) + if err != nil { + log.Fatalf("Failed to parse URL: %v", err) + } + r.HTTPRequest.URL.Opaque = parsedURL.Path + }) + + svc.Handlers.Sign.Clear() + svc.Handlers.Sign.PushBack(Sign) + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) +} + +// Sign requests with signature version 2. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + v2 := signer{ + Request: req.HTTPRequest, + Time: req.Time, + Credentials: req.Config.Credentials, + } + v2.Sign() +} + +func (v2 *signer) Sign() error { + credValue, err := v2.Credentials.Get() + if err != nil { + return err + } + accessKey := credValue.AccessKeyID + var ( + md5, ctype, date, xamz string + xamzDate bool + sarray []string + smap map[string]string + sharray []string + ) + + headers := v2.Request.Header + params := v2.Request.URL.Query() + parsedURL, err := url.Parse(v2.Request.URL.String()) + if err != nil { + return err + } + host, canonicalPath := parsedURL.Host, parsedURL.Path + v2.Request.Header["Host"] = []string{host} + v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)} + + smap = make(map[string]string) + for k, v := range headers { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + ctype = v[0] + case "date": + if !xamzDate { + date = v[0] + } + default: + if strings.HasPrefix(k, "x-amz-") { + vall := strings.Join(v, ",") + smap[k] = k + ":" + vall + if k == "x-amz-date" { + xamzDate = true + date = "" + } + sharray = append(sharray, k) + } + } + } + if len(sharray) > 0 { + sort.StringSlice(sharray).Sort() + for _, h := range sharray { + sarray = append(sarray, smap[h]) + } + xamz = strings.Join(sarray, "\n") + "\n" + } + + expires := false + if v, ok := params["Expires"]; ok { + expires = true + date = v[0] + params["AWSAccessKeyId"] = []string{accessKey} + } + + sarray = sarray[0:0] + for k, v := range params { + if s3ParamsToSign[k] { + for _, vi := range v { + if vi == "" { + sarray = append(sarray, k) + } else { + sarray = append(sarray, k+"="+vi) + } + } + } + } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() + canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") + } + + v2.stringToSign = strings.Join([]string{ + v2.Request.Method, + md5, + ctype, + date, + xamz + canonicalPath, + }, "\n") + hash := hmac.New(sha1.New, []byte(credValue.SecretAccessKey)) + hash.Write([]byte(v2.stringToSign)) + v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + + if expires { + params["Signature"] = []string{string(v2.signature)} + } else { + headers["Authorization"] = []string{"AWS " + accessKey + ":" + string(v2.signature)} + } + + log.WithFields(log.Fields{ + "string-to-sign": v2.stringToSign, + "signature": v2.signature, + }).Debugln("request signature") + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go new file mode 100644 index 0000000..33751c1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go @@ -0,0 +1,757 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the docker/goamz client library for interfacing with +// S3. It is intended to be deprecated in favor of the s3-aws driver +// implementation. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3goamz" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region aws.Region + Encrypt bool + Secure bool + V4Auth bool + ChunkSize int64 + RootDirectory string + StorageClass s3.StorageClass + UserAgent string +} + +func init() { + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket *s3.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + StorageClass s3.StorageClass +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionName := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := aws.GetRegion(fmt.Sprint(regionName)) + if region.Name == "" { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + + v4AuthBool := false + v4Auth := parameters["v4auth"] + switch v4Auth := v4Auth.(type) { + case string: + b, err := strconv.ParseBool(v4Auth) + if err != nil { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + v4AuthBool = b + case bool: + v4AuthBool = v4Auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam := parameters["chunksize"] + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StandardStorage + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassCasted := s3.StorageClass(strings.ToUpper(storageClassString)) + if storageClassCasted != s3.StandardStorage && storageClassCasted != s3.ReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + storageClass = storageClassCasted + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + v4AuthBool, + chunkSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) + if err != nil { + return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) + } + + if !params.Secure { + params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) + } + + s3obj := s3.New(auth, params.Region) + + if params.UserAgent != "" { + s3obj.Client = &http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, + transport.NewHeaderRequestModifier(http.Header{ + http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}, + }), + ), + } + } + + if params.V4Auth { + s3obj.Signature = aws.V4Signature + } else { + if params.Region.Name == "eu-central-1" { + return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") + } + } + + bucket := s3obj.Bucket(params.Bucket) + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.s3Path(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) + if err != nil { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") + if err != nil { + return nil, parseError(path, err) + } + for _, multi := range multis { + if key != multi.Key { + continue + } + parts, err := multi.ListParts() + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range parts { + multiSize += part.Size + } + return d.newWriter(key, multi, parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), + s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + s3Path := d.s3Path(path) + listResponse, err := d.Bucket.List(s3Path, "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]s3.Object, listMax) + + for len(listResponse.Contents) > 0 { + numS3Objects := len(listResponse.Contents) + for index, key := range listResponse.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(key.Key) > len(s3Path) && (key.Key)[len(s3Path)] != '/' { + numS3Objects = index + break + } + s3Objects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:numS3Objects]}) + if err != nil { + return nil + } + + if numS3Objects < len(listResponse.Contents) { + return nil + } + + listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*aws.Error) + return ok && s3err.Code == code +} + +func (d *driver) getOptions() s3.Options { + return s3.Options{ + SSE: d.Encrypt, + StorageClass: d.StorageClass, + } +} + +func getPermissions() s3.ACL { + return s3.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *s3.Multi + parts []s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key string, multi *s3.Multi, parts []s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []s3.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3_test.go new file mode 100644 index 0000000..352ec3f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3_test.go @@ -0,0 +1,201 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + secure := os.Getenv("S3_SECURE") + v4auth := os.Getenv("S3_USE_V4_AUTH") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4AuthBool := false + if v4auth != "" { + v4AuthBool, err = strconv.ParseBool(v4auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + aws.GetRegion(region), + encryptBool, + secureBool, + v4AuthBool, + minChunkSize, + rootDirectory, + storageClass, + driverName + "-test", + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StandardStorage) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.ReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.Bucket.GetResponse(standardDriverUnwrapped.s3Path(standardFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != "" { + t.Fatalf("unexpected storage class for standard file: %v", storageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.Bucket.GetResponse(rrDriverUnwrapped.s3Path(rrFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", storageClass) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go new file mode 100644 index 0000000..548a17d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go @@ -0,0 +1,165 @@ +package driver + +import ( + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution/context" +) + +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version. +func (version Version) Major() uint { + majorPart := strings.Split(string(version), ".")[0] + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version. +func (version Version) Minor() uint { + minorPart := strings.Split(string(version), ".")[1] + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version. +const CurrentVersion Version = "0.1" + +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. Storage Drivers are automatically +// registered via an internal registration mechanism, and generally created +// via the StorageDriverFactory interface (https://godoc.org/github.com/docker/distribution/registry/storage/driver/factory). +// Please see the aforementioned factory package for example code showing how to get an instance +// of a StorageDriver +type StorageDriver interface { + // Name returns the human-readable "name" of the driver, useful in error + // messages and logging. By convention, this will just be the registration + // name, but drivers may provide other information here. + Name() string + + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. + GetContent(ctx context.Context, path string) ([]byte, error) + + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. + PutContent(ctx context.Context, path string, content []byte) error + + // Reader retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. + Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + + // Writer returns a FileWriter which will store the content written to it + // at the location designated by "path" after the call to Commit. + Writer(ctx context.Context, path string, append bool) (FileWriter, error) + + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(ctx context.Context, path string) (FileInfo, error) + + // List returns a list of the objects that are direct descendants of the + //given path. + List(ctx context.Context, path string) ([]string, error) + + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + // Note: This may be no more efficient than a copy followed by a delete for + // many implementations. + Move(ctx context.Context, sourcePath string, destPath string) error + + // Delete recursively deletes all objects stored at "path" and its subpaths. + Delete(ctx context.Context, path string) error + + // URLFor returns a URL which may be used to retrieve the content stored at + // the given path, possibly using the given options. + // May return an ErrUnsupportedMethod in certain StorageDriver + // implementations. + URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) +} + +// FileWriter provides an abstraction for an opened writable file-like object in +// the storage backend. The FileWriter must flush all content written to it on +// the call to Close, but is only required to make its content readable on a +// call to Commit. +type FileWriter interface { + io.WriteCloser + + // Size returns the number of bytes written to this FileWriter. + Size() int64 + + // Cancel removes any written content from this FileWriter. + Cancel() error + + // Commit flushes all content written to this FileWriter and makes it + // available for future calls to StorageDriver.GetContent and + // StorageDriver.Reader. + Commit() error +} + +// PathRegexp is the regular expression which each file path must match. A +// file path is absolute, beginning with a slash and containing a positive +// number of path components separated by slashes, where each component is +// restricted to alphanumeric characters or a period, underscore, or +// hyphen. +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) + +// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. +type ErrUnsupportedMethod struct { + DriverName string +} + +func (err ErrUnsupportedMethod) Error() string { + return fmt.Sprintf("%s: unsupported method", err.DriverName) +} + +// PathNotFoundError is returned when operating on a nonexistent path. +type PathNotFoundError struct { + Path string + DriverName string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) +} + +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string + DriverName string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) +} + +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. +type InvalidOffsetError struct { + Path string + Offset int64 + DriverName string +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) +} + +// Error is a catch-all error type which captures an error string and +// the driver type on which it occurred. +type Error struct { + DriverName string + Enclosed error +} + +func (err Error) Error() string { + return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go new file mode 100644 index 0000000..4b7aa4e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go @@ -0,0 +1,915 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. +package swift + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/ncw/swift" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" +) + +const driverName = "swift" + +// defaultChunkSize defines the default size of a segment +const defaultChunkSize = 20 * 1024 * 1024 + +// minChunkSize defines the minimum size of a segment +const minChunkSize = 1 << 20 + +// contentType defines the Content-Type header associated with stored segments +const contentType = "application/octet-stream" + +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + TenantDomain string + TenantDomainID string + TrustID string + Region string + AuthVersion int + Container string + Prefix string + EndpointType string + InsecureSkipVerify bool + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +// swiftInfo maps the JSON structure returned by Swift /info endpoint +type swiftInfo struct { + Swift struct { + Version string `mapstructure:"version"` + } + Tempurl struct { + Methods []string `mapstructure:"methods"` + } + BulkDelete struct { + MaxDeletesPerRequest int `mapstructure:"max_deletes_per_request"` + } `mapstructure:"bulk_delete"` +} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn *swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + BulkDeleteMaxDeletes int + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params := Parameters{ + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, + } + + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params Parameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + + ct := &swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + AuthVersion: params.AuthVersion, + UserAgent: "distribution/" + version.Version, + Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + TenantDomain: params.TenantDomain, + TenantDomainId: params.TenantDomainID, + TrustId: params.TrustID, + EndpointType: swift.EndpointType(params.EndpointType), + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + } else if err != nil { + return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + ChunkSize: params.ChunkSize, + TempURLMethods: make([]string, 0), + AccessKey: params.AccessKey, + } + + info := swiftInfo{} + if config, err := d.Conn.QueryInfo(); err == nil { + _, d.BulkDeleteSupport = config["bulk_delete"] + + if err := mapstructure.Decode(config, &info); err == nil { + d.TempURLContainerKey = info.Swift.Version >= "2.3.0" + d.TempURLMethods = info.Tempurl.Methods + if d.BulkDeleteSupport { + d.BulkDeleteMaxDeletes = info.BulkDelete.MaxDeletesPerRequest + } + } + } else { + d.TempURLContainerKey = params.TempURLContainerKey + d.TempURLMethods = params.TempURLMethods + } + + if len(d.TempURLMethods) > 0 { + secretKey := params.SecretKey + if secretKey == "" { + secretKey, _ = generateSecret() + } + + // Since Swift 2.2.2, we can now set secret keys on containers + // in addition to the account secret keys. Use them in preference. + if d.TempURLContainerKey { + _, containerHeaders, err := d.Conn.Container(d.Container) + if err != nil { + return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) + } + + d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } else { + // Use the account secret key + _, accountHeaders, err := d.Conn.Account() + if err != nil { + return nil, fmt.Errorf("Failed to fetch account info (%s)", err) + } + + d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, err +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, contentType) + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + file, headers, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err + } + + //if this is a DLO and it is clear that segments are still missing, + //wait until they show up + _, isDLO := headers["X-Object-Manifest"] + size, err := file.Length() + if err != nil { + return file, err + } + if isDLO && size == 0 { + if time.Now().Add(waitingTime).After(endTime) { + return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + time.Sleep(waitingTime) + waitingTime *= 2 + continue + } + + //if not, then this reader will be fine + return file, nil + } +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + var ( + segments []swift.Object + segmentsPath string + err error + ) + + if !append { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err + } + } else { + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } else if err != nil { + return nil, err + } + manifest, ok := headers["X-Object-Manifest"] + if !ok { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err + } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegmentPath(segmentsPath, len(segments))); err != nil { + return nil, err + } + segments = []swift.Object{info} + } else { + _, segmentsPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentsPath); err != nil { + return nil, err + } + } + } + + return d.newWriter(path, segmentsPath, segments), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), + } + + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // The file exists. But on Swift 1.12, the 'bytes' field is always 0 so + // we need to do a separate HEAD request. + break + } + } + + //Don't trust an empty `objects` slice. A container listing can be + //outdated. For files, we can make a HEAD request on the object which + //reports existence (at least) much more reliably. + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + info, headers, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //if this is a DLO and it is clear that segments are still missing, + //wait until they show up + _, isDLO := headers["X-Object-Manifest"] + if isDLO && info.Bytes == 0 { + if time.Now().Add(waitingTime).After(endTime) { + return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + time.Sleep(waitingTime) + waitingTime *= 2 + continue + } + + //otherwise, accept the result + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + for _, obj := range objects { + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) + } + + if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path) + "/", + } + + objects, err := d.Conn.ObjectsAll(d.Container, &opts) + if err != nil { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + _, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) + if err != nil { + return err + } + objects = append(objects, segments...) + } + } else { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + + if d.BulkDeleteSupport && len(objects) > 0 && d.BulkDeleteMaxDeletes > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + + chunks, err := chunkFilenames(filenames, d.BulkDeleteMaxDeletes) + if err != nil { + return err + } + for _, chunk := range chunks { + _, err := d.Conn.BulkDelete(d.Container, chunk) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } + } else { + for _, obj := range objects { + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + } + + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + if d.SecretKey == "" { + return "", storagedriver.ErrUnsupportedMethod{} + } + + methodString := "GET" + method, ok := options["method"] + if ok { + if methodString, ok = method.(string); !ok { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + if methodString == "HEAD" { + // A "HEAD" request on a temporary URL is allowed if the + // signature was generated with "GET", "POST" or "PUT" + methodString = "GET" + } + + supported := false + for _, method := range d.TempURLMethods { + if method == methodString { + supported = true + break + } + } + + if !supported { + return "", storagedriver.ErrUnsupportedMethod{} + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) + + if d.AccessKey != "" { + // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature + url, _ := url.Parse(tempURL) + query := url.Query() + query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) + url.RawQuery = query.Encode() + tempURL = url.String() + } + + return tempURL, nil +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + //a simple container listing works 99.9% of the time + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //build a lookup table by object name + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentPath := getSegmentPath(path, segmentNumber) + + if _, seen := hasObjectName[segmentPath]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := d.Conn.Object(d.Container, segmentPath) + switch err { + case nil: + //found new segment -> keep going, more might be missing + segments = append(segments, segment) + continue + case swift.ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } + } +} + +func (d *driver) createManifest(path string, segments string) error { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", contentType, headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil +} + +func chunkFilenames(slice []string, maxSize int) (chunks [][]string, err error) { + if maxSize > 0 { + for offset := 0; offset < len(slice); offset += maxSize { + chunkSize := maxSize + if offset+chunkSize > len(slice) { + chunkSize = len(slice) - offset + } + chunks = append(chunks, slice[offset:offset+chunkSize]) + } + } else { + return nil, fmt.Errorf("Max chunk size must be > 0") + } + return +} + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} + +func generateSecret() (string, error) { + var secretBytes [32]byte + if _, err := rand.Read(secretBytes[:]); err != nil { + return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) + } + return hex.EncodeToString(secretBytes[:]), nil +} + +func getSegmentPath(segmentsPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentsPath, partNumber) +} + +type writer struct { + driver *driver + path string + segmentsPath string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path, segmentsPath string, segments []swift.Object) storagedriver.FileWriter { + var size int64 + for _, segment := range segments { + size += segment.Bytes + } + return &writer{ + driver: d, + path: path, + segmentsPath: segmentsPath, + size: size, + bw: bufio.NewWriterSize(&segmentWriter{ + conn: d.Conn, + container: d.Container, + segmentsPath: segmentsPath, + segmentNumber: len(segments) + 1, + maxChunkSize: d.ChunkSize, + }, d.ChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if !w.committed && !w.cancelled { + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + if err := w.waitForSegmentsToShowUp(); err != nil { + return err + } + } + w.closed = true + + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.Delete(context.Background(), w.path) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + + w.committed = true + return w.waitForSegmentsToShowUp() +} + +func (w *writer) waitForSegmentsToShowUp() error { + var err error + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + var info swift.Object + if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil { + if info.Bytes == w.size { + break + } + err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path) + } + if time.Now().Add(waitingTime).After(endTime) { + break + } + time.Sleep(waitingTime) + waitingTime *= 2 + } + + return err +} + +type segmentWriter struct { + conn *swift.Connection + container string + segmentsPath string + segmentNumber int + maxChunkSize int +} + +func (sw *segmentWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += sw.maxChunkSize { + chunkSize := sw.maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + _, err := sw.conn.ObjectPut(sw.container, getSegmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) + if err != nil { + return n, err + } + + sw.segmentNumber++ + n += chunkSize + } + + return n, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go new file mode 100644 index 0000000..dcd5e4f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go @@ -0,0 +1,245 @@ +package swift + +import ( + "io/ioutil" + "os" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/ncw/swift/swifttest" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var swiftDriverConstructor func(prefix string) (*Driver, error) + +func init() { + var ( + username string + password string + authURL string + tenant string + tenantID string + domain string + domainID string + tenantDomain string + tenantDomainID string + trustID string + container string + region string + AuthVersion int + endpointType string + insecureSkipVerify bool + secretKey string + accessKey string + containerKey bool + tempURLMethods []string + + swiftServer *swifttest.SwiftServer + err error + ) + username = os.Getenv("SWIFT_USERNAME") + password = os.Getenv("SWIFT_PASSWORD") + authURL = os.Getenv("SWIFT_AUTH_URL") + tenant = os.Getenv("SWIFT_TENANT_NAME") + tenantID = os.Getenv("SWIFT_TENANT_ID") + domain = os.Getenv("SWIFT_DOMAIN_NAME") + domainID = os.Getenv("SWIFT_DOMAIN_ID") + tenantDomain = os.Getenv("SWIFT_DOMAIN_NAME") + tenantDomainID = os.Getenv("SWIFT_DOMAIN_ID") + trustID = os.Getenv("SWIFT_TRUST_ID") + container = os.Getenv("SWIFT_CONTAINER_NAME") + region = os.Getenv("SWIFT_REGION_NAME") + AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) + endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) + secretKey = os.Getenv("SWIFT_SECRET_KEY") + accessKey = os.Getenv("SWIFT_ACCESS_KEY") + containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) + tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") + + if username == "" || password == "" || authURL == "" || container == "" { + if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { + panic(err) + } + username = "swifttest" + password = "swifttest" + authURL = swiftServer.AuthURL + container = "test" + } + + prefix, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(prefix) + + swiftDriverConstructor = func(root string) (*Driver, error) { + parameters := Parameters{ + username, + password, + authURL, + tenant, + tenantID, + domain, + domainID, + tenantDomain, + tenantDomainID, + trustID, + region, + AuthVersion, + container, + root, + endpointType, + insecureSkipVerify, + defaultChunkSize, + secretKey, + accessKey, + containerKey, + tempURLMethods, + } + + return New(parameters) + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return swiftDriverConstructor(prefix) + } + + testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) +} + +func TestEmptyRootList(t *testing.T) { + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := swiftDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := swiftDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := swiftDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + // Create an object with a path nested under the existing object + err = rootedDriver.PutContent(ctx, filename+"/file1", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + err = rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to delete: %v", err) + } + + keys, err = rootedDriver.List(ctx, "/") + if err != nil { + t.Fatalf("failed to list objects after deletion: %v", err) + } + + if len(keys) != 0 { + t.Fatal("delete did not remove nested objects") + } +} + +func TestFilenameChunking(t *testing.T) { + // Test valid input and sizes + input := []string{"a", "b", "c", "d", "e"} + expecteds := [][][]string{ + { + {"a"}, + {"b"}, + {"c"}, + {"d"}, + {"e"}, + }, + { + {"a", "b"}, + {"c", "d"}, + {"e"}, + }, + { + {"a", "b", "c"}, + {"d", "e"}, + }, + { + {"a", "b", "c", "d"}, + {"e"}, + }, + { + {"a", "b", "c", "d", "e"}, + }, + { + {"a", "b", "c", "d", "e"}, + }, + } + for i, expected := range expecteds { + actual, err := chunkFilenames(input, i+1) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("chunk %v didn't match expected value %v", actual, expected) + } + if err != nil { + t.Fatalf("unexpected error chunking filenames: %v", err) + } + } + + // Test nil input + actual, err := chunkFilenames(nil, 5) + if len(actual) != 0 { + t.Fatal("chunks were returned when passed nil") + } + if err != nil { + t.Fatalf("unexpected error chunking filenames: %v", err) + } + + // Test 0 and < 0 sizes + actual, err = chunkFilenames(nil, 0) + if err == nil { + t.Fatal("expected error for size = 0") + } + actual, err = chunkFilenames(nil, -1) + if err == nil { + t.Fatal("expected error for size = -1") + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/testdriver/testdriver.go b/vendor/github.com/docker/distribution/registry/storage/driver/testdriver/testdriver.go new file mode 100644 index 0000000..988e5d3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/testdriver/testdriver.go @@ -0,0 +1,71 @@ +package testdriver + +import ( + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +const driverName = "testdriver" + +func init() { + factory.Register(driverName, &testDriverFactory{}) +} + +// testDriverFactory implements the factory.StorageDriverFactory interface. +type testDriverFactory struct{} + +func (factory *testDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +// TestDriver is a StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +type TestDriver struct { + storagedriver.StorageDriver +} + +type testFileWriter struct { + storagedriver.FileWriter + prevchunk []byte +} + +var _ storagedriver.StorageDriver = &TestDriver{} + +// New constructs a new StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +func New() *TestDriver { + return &TestDriver{StorageDriver: inmemory.New()} +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (td *TestDriver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + fw, err := td.StorageDriver.Writer(ctx, path, append) + return &testFileWriter{FileWriter: fw}, err +} + +func (tfw *testFileWriter) Write(p []byte) (int, error) { + _, err := tfw.FileWriter.Write(tfw.prevchunk) + tfw.prevchunk = make([]byte, len(p)) + copy(tfw.prevchunk, p) + return len(p), err +} + +func (tfw *testFileWriter) Close() error { + tfw.Write(nil) + return tfw.FileWriter.Close() +} + +func (tfw *testFileWriter) Cancel() error { + tfw.Write(nil) + return tfw.FileWriter.Cancel() +} + +func (tfw *testFileWriter) Commit() error { + tfw.Write(nil) + return tfw.FileWriter.Commit() +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go new file mode 100644 index 0000000..d8afe0c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go @@ -0,0 +1,1273 @@ +package testsuites + +import ( + "bytes" + "crypto/sha1" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path" + "sort" + "strings" + "sync" + "testing" + "time" + + "gopkg.in/check.v1" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// Test hooks up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +// RegisterSuite registers an in-process storage driver test suite with +// the go test runner. +func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { + check.Suite(&DriverSuite{ + Constructor: driverConstructor, + SkipCheck: skipCheck, + ctx: context.Background(), + }) +} + +// SkipCheck is a function used to determine if a test suite should be skipped. +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with +// the given reason. +type SkipCheck func() (reason string) + +// NeverSkip is a default SkipCheck which never skips the suite. +var NeverSkip SkipCheck = func() string { return "" } + +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. +type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. +type DriverTeardown func() error + +// DriverSuite is a gocheck test suite designed to test a +// storagedriver.StorageDriver. The intended way to create a DriverSuite is +// with RegisterSuite. +type DriverSuite struct { + Constructor DriverConstructor + Teardown DriverTeardown + SkipCheck + storagedriver.StorageDriver + ctx context.Context +} + +// SetUpSuite sets up the gocheck test suite. +func (suite *DriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } + d, err := suite.Constructor() + c.Assert(err, check.IsNil) + suite.StorageDriver = d +} + +// TearDownSuite tears down the gocheck test suite. +func (suite *DriverSuite) TearDownSuite(c *check.C) { + if suite.Teardown != nil { + err := suite.Teardown() + c.Assert(err, check.IsNil) + } +} + +// TearDownTest tears down the gocheck test. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest(c *check.C) { + files, _ := suite.StorageDriver.List(suite.ctx, "/") + if len(files) > 0 { + c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + +// TestRootExists ensures that all storage drivers have a root path by default. +func (suite *DriverSuite) TestRootExists(c *check.C) { + _, err := suite.StorageDriver.List(suite.ctx, "/") + if err != nil { + c.Fatalf(`the root path "/" should always exist: %v`, err) + } +} + +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths(c *check.C) { + contents := randomContents(64) + validFiles := []string{ + "/a", + "/2", + "/aa", + "/a.a", + "/0-9/abcdefg", + "/abcdefg/z.75", + "/abc/1.2.3.4.5-6_zyx/123.z/4", + "/docker/docker-registry", + "/123.abc", + "/abc./abc", + "/.abc", + "/a--b", + "/a-.b", + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba"} + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.deletePath(c, firstPart(filename)) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + } +} + +func (suite *DriverSuite) deletePath(c *check.C, path string) { + for tries := 2; tries > 0; tries-- { + err := suite.StorageDriver.Delete(suite.ctx, path) + if _, ok := err.(storagedriver.PathNotFoundError); ok { + err = nil + } + c.Assert(err, check.IsNil) + paths, err := suite.StorageDriver.List(suite.ctx, path) + if len(paths) == 0 { + break + } + time.Sleep(time.Second * 2) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths(c *check.C) { + contents := randomContents(64) + invalidFiles := []string{ + "", + "/", + "abc", + "123.abc", + "//bcd", + "/abc_123/"} + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + // only delete if file was successfully written + if err == nil { + defer suite.deletePath(c, firstPart(filename)) + } + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + } +} + +// TestWriteRead1 tests a simple write-read workflow. +func (suite *DriverSuite) TestWriteRead1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead2 tests a simple write-read workflow with unicode data. +func (suite *DriverSuite) TestWriteRead2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead3 tests a simple write-read workflow with a small string. +func (suite *DriverSuite) TestWriteRead3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. +func (suite *DriverSuite) TestWriteRead4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(c, filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) + + contents = randomContents(1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestReadNonexistent tests reading content from an empty path. +func (suite *DriverSuite) TestReadNonexistent(c *check.C) { + filename := randomPath(32) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestWriteReadStreams1 tests a simple write-read streaming workflow. +func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data. +func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data. +func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data. +func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + checksum := sha1.New() + var fileSize int64 = 5 * 1024 * 1024 * 1024 + + contents := newRandReader(fileSize) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, io.TeeReader(contents, checksum)) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, fileSize) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + writtenChecksum := sha1.New() + io.Copy(writtenChecksum, reader) + + c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) +} + +// TestReaderWithOffset tests that the appropriate data is streamed when +// reading with a given offset. +func (suite *DriverSuite) TestReaderWithOffset(c *check.C) { + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + chunkSize := int64(32) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*2) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contentsChunk3) + + // Ensure we get invalid offest for negative offsets. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(reader, check.IsNil) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3) + c.Assert(err, check.IsNil) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + c.Assert(err, check.Equals, io.EOF) + c.Assert(n, check.Equals, 0) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3-1) + c.Assert(err, check.IsNil) + defer reader.Close() + + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 1) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + c.Assert(err, check.Equals, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 0) + c.Assert(err, check.Equals, io.EOF) +} + +// TestContinueStreamAppendLarge tests that a stream write can be appended to without +// corrupting the data with a large chunk size. +func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { + suite.testContinueStreamAppend(c, int64(10*1024*1024)) +} + +// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only +// with a tiny chunk size in order to test corner cases for some cloud storage drivers. +func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { + suite.testContinueStreamAppend(c, int64(32)) +} + +func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contentsChunk1)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk1))) + + err = writer.Close() + c.Assert(err, check.IsNil) + + curSize := writer.Size() + c.Assert(curSize, check.Equals, int64(len(contentsChunk1))) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + c.Assert(err, check.IsNil) + c.Assert(writer.Size(), check.Equals, curSize) + + nn, err = io.Copy(writer, bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + err = writer.Close() + c.Assert(err, check.IsNil) + + curSize = writer.Size() + c.Assert(curSize, check.Equals, 2*chunkSize) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + c.Assert(err, check.IsNil) + c.Assert(writer.Size(), check.Equals, curSize) + + nn, err = io.Copy(writer, bytes.NewReader(fullContents[curSize:])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[curSize:]))) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) +} + +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails. +func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { + filename := randomPath(32) + + _, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.Reader(suite.ctx, filename, 64) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestList checks the returned list of keys after populating a directory tree. +func (suite *DriverSuite) TestList(c *check.C) { + rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) + defer suite.deletePath(c, rootDirectory) + + doesnotexist := path.Join(rootDirectory, "nonexistent") + _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) + c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ + Path: doesnotexist, + DriverName: suite.StorageDriver.Name(), + }) + + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) + c.Assert(err, check.IsNil) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List(suite.ctx, "/") + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{rootDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{parentDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) + c.Assert(err, check.IsNil) + + sort.Strings(keys) + c.Assert(keys, check.DeepEquals, childFiles) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). +} + +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination. +func (suite *DriverSuite) TestMove(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, sourceContents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. +func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) +} + +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid(c *check.C) { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) + c.Assert(err, check.IsNil) + defer suite.deletePath(c, "/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") + c.Assert(err, check.NotNil) // non-nil error +} + +// TestDelete checks that the delete operation removes data from the storage +// driver +func (suite *DriverSuite) TestDelete(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestURLFor checks that the URLFor method functions properly, but only if it +// is implemented +func (suite *DriverSuite) TestURLFor(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err := http.Get(url) + c.Assert(err, check.IsNil) + defer response.Body.Close() + + read, err := ioutil.ReadAll(response.Body) + c.Assert(err, check.IsNil) + c.Assert(read, check.DeepEquals, contents) + + url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err = http.Head(url) + c.Assert(response.StatusCode, check.Equals, 200) + c.Assert(response.ContentLength, check.Equals, int64(32)) +} + +// TestDeleteNonexistent checks that removing a nonexistent key fails. +func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { + filename := randomPath(32) + err := suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestDeleteFolder checks that deleting a folder removes all child elements. +func (suite *DriverSuite) TestDeleteFolder(c *check.C) { + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, dirname) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestDeleteOnlyDeletesSubpaths checks that deleting path A does not +// delete path B when A is a prefix of B but B is not a subpath of A (so that +// deleting "/a" does not delete "/ab"). This matters for services like S3 that +// do not implement directories. +func (suite *DriverSuite) TestDeleteOnlyDeletesSubpaths(c *check.C) { + dirname := randomPath(32) + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename+"suffix"), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname, filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename+"suffix")) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, dirname)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname, filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename)) + c.Assert(err, check.IsNil) +} + +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. +func (suite *DriverSuite) TestStatCall(c *check.C) { + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) + filePath := path.Join(dirPath, fileName) + + defer suite.deletePath(c, firstPart(dirPath)) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + + // Call on regular file, check results + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, filePath) + c.Assert(fi.Size(), check.Equals, int64(len(content))) + c.Assert(fi.IsDir(), check.Equals, false) + createdTime := fi.ModTime() + + // Sleep and modify the file + time.Sleep(time.Second * 10) + content = randomContents(4096) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) + + // Check if the modification time is after the creation time. + // In case of cloud storage services, storage frontend nodes might have + // time drift between them, however that should be solved with sleeping + // before update. + modTime := fi.ModTime() + if !modTime.After(createdTime) { + c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) + } + + // Call on directory (do not check ModTime as dirs don't need to support it) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, dirPath) + c.Assert(fi.Size(), check.Equals, int64(0)) + c.Assert(fi.IsDir(), check.Equals, true) +} + +// TestPutContentMultipleTimes checks that if storage driver can overwrite the content +// in the subsequent puts. Validates that PutContent does not have to work +// with an offset like Writer does and overwrites the file entirely +// rather than writing the data to the [0,len(data)) of the file. +func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { + filename := randomPath(32) + contents := randomContents(4096) + + defer suite.deletePath(c, firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + contents = randomContents(2048) // upload a different, smaller file + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents) +} + +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + c.Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset := rand.Int63n(int64(len(contents))) + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents[offset:]) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to Writer concurrently without hanging. +func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { + numStreams := 32 + + if testing.Short() { + numStreams = 8 + c.Log("Reducing number of streams to 8 for short mode") + } + + var wg sync.WaitGroup + + testStream := func(size int64) { + defer wg.Done() + suite.testFileStreams(c, size) + } + + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } + + wg.Wait() +} + +// TODO (brianbland): evaluate the relevancy of this test +// TestEventualConsistency checks that if stat says that a file is a certain size, then +// you can freely read from the file (this is the only guarantee that the driver needs to provide) +// func (suite *DriverSuite) TestEventualConsistency(c *check.C) { +// if testing.Short() { +// c.Skip("Skipping test in short mode") +// } +// +// filename := randomPath(32) +// defer suite.deletePath(c, firstPart(filename)) +// +// var offset int64 +// var misswrites int +// var chunkSize int64 = 32 +// +// for i := 0; i < 1024; i++ { +// contents := randomContents(chunkSize) +// read, err := suite.StorageDriver.Writer(suite.ctx, filename, offset, bytes.NewReader(contents)) +// c.Assert(err, check.IsNil) +// +// fi, err := suite.StorageDriver.Stat(suite.ctx, filename) +// c.Assert(err, check.IsNil) +// +// // We are most concerned with being able to read data as soon as Stat declares +// // it is uploaded. This is the strongest guarantee that some drivers (that guarantee +// // at best eventual consistency) absolutely need to provide. +// if fi.Size() == offset+chunkSize { +// reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) +// c.Assert(err, check.IsNil) +// +// readContents, err := ioutil.ReadAll(reader) +// c.Assert(err, check.IsNil) +// +// c.Assert(readContents, check.DeepEquals, contents) +// +// reader.Close() +// offset += read +// } else { +// misswrites++ +// } +// } +// +// if misswrites > 0 { +// c.Log("There were " + string(misswrites) + " occurrences of a write not being instantly available.") +// } +// +// c.Assert(misswrites, check.Not(check.Equals), 1024) +// } + +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files +func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files +func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files +func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files +func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + } +} + +// BenchmarkStreamEmptyFiles benchmarks Writer/Reader for 0B files +func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 0) +} + +// BenchmarkStream1KBFiles benchmarks Writer/Reader for 1KB files +func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024) +} + +// BenchmarkStream1MBFiles benchmarks Writer/Reader for 1MB files +func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks Writer/Reader for 1GB files +func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, bytes.NewReader(randomContents(size))) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, size) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + rc, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files +func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { + suite.benchmarkListFiles(c, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files +func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { + suite.benchmarkListFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + + c.ResetTimer() + for i := 0; i < c.N; i++ { + files, err := suite.StorageDriver.List(suite.ctx, parentDir) + c.Assert(err, check.IsNil) + c.Assert(int64(len(files)), check.Equals, numFiles) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files +func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files +func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { + for i := 0; i < c.N; i++ { + parentDir := randomPath(8) + defer suite.deletePath(c, firstPart(parentDir)) + + c.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + c.StartTimer() + + // This is the operation we're benchmarking + err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + c.Assert(err, check.IsNil) + } +} + +func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { + tf, err := ioutil.TempFile("", "tf") + c.Assert(err, check.IsNil) + defer os.Remove(tf.Name()) + defer tf.Close() + + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + contents := randomContents(size) + + _, err = tf.Write(contents) + c.Assert(err, check.IsNil) + + tf.Sync() + tf.Seek(0, os.SEEK_SET) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, tf) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, size) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { + defer suite.deletePath(c, firstPart(filename)) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contents))) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} + +// randomBytes pre-allocates all of the memory sizes needed for the test. If +// anything panics while accessing randomBytes, just make this number bigger. +var randomBytes = make([]byte, 128<<20) + +func init() { + _, _ = rand.Read(randomBytes) // always returns len(randomBytes) and nil error +} + +func randomContents(length int64) []byte { + return randomBytes[:length] +} + +type randReader struct { + r int64 + m sync.Mutex +} + +func (rr *randReader) Read(p []byte) (n int, err error) { + rr.m.Lock() + defer rr.m.Unlock() + + toread := int64(len(p)) + if toread > rr.r { + toread = rr.r + } + n = copy(p, randomContents(toread)) + rr.r -= int64(n) + + if rr.r <= 0 { + err = io.EOF + } + + return +} + +func newRandReader(n int64) *randReader { + return &randReader{r: n} +} + +func firstPart(filePath string) string { + if filePath == "" { + return "/" + } + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "/" || dir == "" { + return "/" + file + } + if file == "" { + return dir + } + filePath = dir + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader.go b/vendor/github.com/docker/distribution/registry/storage/filereader.go new file mode 100644 index 0000000..3b06c81 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/filereader.go @@ -0,0 +1,177 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): Set an optimal buffer size here. We'll have to +// understand the latency characteristics of the underlying network to +// set this correctly, so we may want to leave it to the driver. For +// out of process drivers, we'll have to optimize this buffer size for +// local communication. +const fileReaderBufferSize = 4 << 20 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + size int64 // size is the total size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, + driver: driver, + path: path, + size: size, + }, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +func (fr *fileReader) Close() error { + return fr.closeWithErr(fmt.Errorf("fileReader: closed")) +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): If the path is not found, we simply return a + // reader that returns io.EOF. However, we do not set fr.rc, + // allowing future attempts at getting a reader to possibly + // succeed if the file turns up later. + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + default: + return nil, err + } + } + + fr.rc = rc + + if fr.brd == nil { + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} + +func (fr *fileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader_test.go b/vendor/github.com/docker/distribution/registry/storage/filereader_test.go new file mode 100644 index 0000000..5926020 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/filereader_test.go @@ -0,0 +1,198 @@ +package storage + +import ( + "bytes" + "io" + mrand "math/rand" + "os" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestSimpleRead(t *testing.T) { + ctx := context.Background() + content := make([]byte, 1<<20) + n, err := mrand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read didn't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + if err != nil { + t.Fatalf("error allocating file reader: %v", err) + } + + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + t.Fatalf("error getting digest verifier: %s", err) + } + + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify read data") + } +} + +func TestFileReaderSeek(t *testing.T) { + driver := inmemory.New() + pattern := "01234567890ab" // prime length block + repititions := 1024 + path := "/patterned" + content := bytes.Repeat([]byte(pattern), repititions) + ctx := context.Background() + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + + if err != nil { + t.Fatalf("unexpected error creating file reader: %v", err) + } + + // Seek all over the place, in blocks of pattern size and make sure we get + // the right data. + for _, repitition := range mrand.Perm(repititions - 1) { + targetOffset := int64(len(pattern) * repitition) + // Seek to a multiple of pattern size and read pattern size bytes + offset, err := fr.Seek(targetOffset, os.SEEK_SET) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if offset != targetOffset { + t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) + } + + p := make([]byte, len(pattern)) + + n, err := fr.Read(p) + if err != nil { + t.Fatalf("error reading pattern: %v", err) + } + + if n != len(pattern) { + t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) + } + + if string(p) != pattern { + t.Fatalf("incorrect read content: %q != %q", p, pattern) + } + + // Check offset + current, err := fr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if current != targetOffset+int64(len(pattern)) { + t.Fatalf("unexpected offset after read: %v", err) + } + } + + start, err := fr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking to start: %v", err) + } + + if start != 0 { + t.Fatalf("expected to seek to start: %v != 0", start) + } + + end, err := fr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("expected to seek to end: %v != %v", end, len(content)) + } + + // 4. Seek before start, ensure error. + + // seek before start + before, err := fr.Seek(-1, os.SEEK_SET) + if err == nil { + t.Fatalf("error expected, returned offset=%v", before) + } + + // 5. Seek after end, + after, err := fr.Seek(1, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error expected, returned offset=%v", after) + } + + p := make([]byte, 16) + n, err := fr.Read(p) + + if n != 0 { + t.Fatalf("bytes reads %d != %d", n, 0) + } + + if err != io.EOF { + t.Fatalf("expected io.EOF, got %v", err) + } +} + +// TestFileReaderNonExistentFile ensures the reader behaves as expected with a +// missing or zero-length remote file. While the file may not exist, the +// reader should not error out on creation and should return 0-bytes from the +// read method, with an io.EOF error. +func TestFileReaderNonExistentFile(t *testing.T) { + driver := inmemory.New() + fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) + if err != nil { + t.Fatalf("unexpected error initializing reader: %v", err) + } + + var buf [1024]byte + + n, err := fr.Read(buf[:]) + if n != 0 { + t.Fatalf("non-zero byte read reported: %d != 0", n) + } + + if err != io.EOF { + t.Fatalf("read on missing file should return io.EOF, got %v", err) + } +} + +// TestLayerReadErrors covers the various error return type for different +// conditions that can arise when reading a layer. +func TestFileReaderErrors(t *testing.T) { + // TODO(stevvooe): We need to cover error return types, driven by the + // errors returned via the HTTP API. For now, here is an incomplete list: + // + // 1. Layer Not Found: returned when layer is not found or access is + // denied. + // 2. Layer Unavailable: returned when link references are unresolved, + // but layer is known to the registry. + // 3. Layer Invalid: This may more split into more errors, but should be + // returned when name or tarsum does not reference a valid error. We + // may also need something to communication layer verification errors + // for the inline tarsum check. + // 4. Timeout: timeouts to backend. Need to better understand these + // failure cases and how the storage driver propagates these errors + // up the stack. +} diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go new file mode 100644 index 0000000..7cf0298 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go @@ -0,0 +1,114 @@ +package storage + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" +) + +func emit(format string, a ...interface{}) { + fmt.Printf(format+"\n", a...) +} + +// MarkAndSweep performs a mark and sweep of registry data +func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { + repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) + if !ok { + return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") + } + + // mark + markSet := make(map[digest.Digest]struct{}) + err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + emit(repoName) + + var err error + named, err := reference.ParseNamed(repoName) + if err != nil { + return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) + } + repository, err := registry.Repository(ctx, named) + if err != nil { + return fmt.Errorf("failed to construct repository: %v", err) + } + + manifestService, err := repository.Manifests(ctx) + if err != nil { + return fmt.Errorf("failed to construct manifest service: %v", err) + } + + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") + } + + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + // Mark the manifest's blob + emit("%s: marking manifest %s ", repoName, dgst) + markSet[dgst] = struct{}{} + + manifest, err := manifestService.Get(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) + } + + descriptors := manifest.References() + for _, descriptor := range descriptors { + markSet[descriptor.Digest] = struct{}{} + emit("%s: marking blob %s", repoName, descriptor.Digest) + } + + return nil + }) + + if err != nil { + // In certain situations such as unfinished uploads, deleting all + // tags in S3 or removing the _manifests folder manually, this + // error may be of type PathNotFound. + // + // In these cases we can continue marking other manifests safely. + if _, ok := err.(driver.PathNotFoundError); ok { + return nil + } + } + + return err + }) + + if err != nil { + return fmt.Errorf("failed to mark: %v", err) + } + + // sweep + blobService := registry.Blobs() + deleteSet := make(map[digest.Digest]struct{}) + err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { + // check if digest is in markSet. If not, delete it! + if _, ok := markSet[dgst]; !ok { + deleteSet[dgst] = struct{}{} + } + return nil + }) + if err != nil { + return fmt.Errorf("error enumerating blobs: %v", err) + } + emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + // Construct vacuum + vacuum := NewVacuum(ctx, storageDriver) + for dgst := range deleteSet { + emit("blob eligible for deletion: %s", dgst) + if dryRun { + continue + } + err = vacuum.RemoveBlob(string(dgst)) + if err != nil { + return fmt.Errorf("failed to delete blob %s: %v", dgst, err) + } + } + + return err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go new file mode 100644 index 0000000..925f1a1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go @@ -0,0 +1,377 @@ +package storage + +import ( + "io" + "path" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type image struct { + manifest distribution.Manifest + manifestDigest digest.Digest + layers map[digest.Digest]io.ReadSeeker +} + +func createRegistry(t *testing.T, driver driver.StorageDriver, options ...RegistryOption) distribution.Namespace { + ctx := context.Background() + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + options = append([]RegistryOption{EnableDelete, Schema1SigningKey(k)}, options...) + registry, err := NewRegistry(ctx, driver, options...) + if err != nil { + t.Fatalf("Failed to construct namespace") + } + return registry +} + +func makeRepository(t *testing.T, registry distribution.Namespace, name string) distribution.Repository { + ctx := context.Background() + + // Initialize a dummy repository + named, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("Failed to parse name %s: %v", name, err) + } + + repo, err := registry.Repository(ctx, named) + if err != nil { + t.Fatalf("Failed to construct repository: %v", err) + } + return repo +} + +func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService { + ctx := context.Background() + + manifestService, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf("Failed to construct manifest store: %v", err) + } + return manifestService +} + +func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} { + ctx := context.Background() + blobService := registry.Blobs() + allBlobsMap := make(map[digest.Digest]struct{}) + err := blobService.Enumerate(ctx, func(dgst digest.Digest) error { + allBlobsMap[dgst] = struct{}{} + return nil + }) + if err != nil { + t.Fatalf("Error getting all blobs: %v", err) + } + return allBlobsMap +} + +func uploadImage(t *testing.T, repository distribution.Repository, im image) digest.Digest { + // upload layers + err := testutil.UploadBlobs(repository, im.layers) + if err != nil { + t.Fatalf("layer upload failed: %v", err) + } + + // upload manifest + ctx := context.Background() + manifestService := makeManifestService(t, repository) + manifestDigest, err := manifestService.Put(ctx, im.manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + return manifestDigest +} + +func uploadRandomSchema1Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema1Manifest(digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func uploadRandomSchema2Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema2Manifest(repository, digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func TestNoDeletionNoEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "palailogos") + manifestService, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + uploadRandomSchema2Image(t, repo) + + // construct manifestlist for fun. + blobstatter := registry.BlobStatter() + manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ + image1.manifestDigest, image2.manifestDigest}) + if err != nil { + t.Fatalf("Failed to make manifest list: %v", err) + } + + _, err = manifestService.Put(ctx, manifestList) + if err != nil { + t.Fatalf("Failed to add manifest list: %v", err) + } + + before := allBlobs(t, registry) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + after := allBlobs(t, registry) + if len(before) != len(after) { + t.Fatalf("Garbage collection affected storage: %d != %d", len(before), len(after)) + } +} + +func TestGCWithMissingManifests(t *testing.T) { + ctx := context.Background() + d := inmemory.New() + + registry := createRegistry(t, d) + repo := makeRepository(t, registry, "testrepo") + uploadRandomSchema1Image(t, repo) + + // Simulate a missing _manifests directory + revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"}) + if err != nil { + t.Fatal(err) + } + + _manifestsPath := path.Dir(revPath) + err = d.Delete(ctx, _manifestsPath) + if err != nil { + t.Fatal(err) + } + + err = MarkAndSweep(context.Background(), d, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + if len(blobs) > 0 { + t.Errorf("unexpected blobs after gc") + } +} + +func TestDeletionHasEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "komnenos") + manifests, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + image3 := uploadRandomSchema2Image(t, repo) + + manifests.Delete(ctx, image2.manifestDigest) + manifests.Delete(ctx, image3.manifestDigest) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that the image1 manifest and all the layers are still in blobs + if _, ok := blobs[image1.manifestDigest]; !ok { + t.Fatalf("First manifest is missing") + } + + for layer := range image1.layers { + if _, ok := blobs[layer]; !ok { + t.Fatalf("manifest 1 layer is missing: %v", layer) + } + } + + // check that image2 and image3 layers are not still around + for layer := range image2.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 2 layer is present: %v", layer) + } + } + + for layer := range image3.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 3 layer is present: %v", layer) + } + } +} + +func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) { + for d = range digests { + break + } + return +} + +func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return +} + +func TestDeletionWithSharedLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "tzimiskes") + + // Create random layers + randomLayers1, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + randomLayers2, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + // Upload all layers + err = testutil.UploadBlobs(repo, randomLayers1) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + err = testutil.UploadBlobs(repo, randomLayers2) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + // Construct manifests + manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + sharedKey := getAnyKey(randomLayers1) + manifest2, err := testutil.MakeSchema2Manifest(repo, append(getKeys(randomLayers2), sharedKey)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + manifestService := makeManifestService(t, repo) + + // Upload manifests + _, err = manifestService.Put(ctx, manifest1) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + manifestDigest2, err := manifestService.Put(ctx, manifest2) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + // delete + err = manifestService.Delete(ctx, manifestDigest2) + if err != nil { + t.Fatalf("manifest deletion failed: %v", err) + } + + // check that all of the layers in layer 1 are still there + blobs := allBlobs(t, registry) + for dgst := range randomLayers1 { + if _, ok := blobs[dgst]; !ok { + t.Fatalf("random layer 1 blob missing: %v", dgst) + } + } +} + +func TestOrphanBlobDeleted(t *testing.T) { + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "michael_z_doukas") + + digests, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatalf("Failed to create random digest: %v", err) + } + + if err = testutil.UploadBlobs(repo, digests); err != nil { + t.Fatalf("Failed to upload blob: %v", err) + } + + // formality to create the necessary directories + uploadRandomSchema2Image(t, repo) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that orphan blob layers are not still around + for dgst := range digests { + if _, ok := blobs[dgst]; ok { + t.Fatalf("Orphan layer is present: %v", dgst) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/io.go b/vendor/github.com/docker/distribution/registry/storage/io.go new file mode 100644 index 0000000..c1be3b7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/io.go @@ -0,0 +1,71 @@ +package storage + +import ( + "errors" + "io" + "io/ioutil" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +const ( + maxBlobGetSize = 4 << 20 +) + +func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) { + r, err := driver.Reader(ctx, p, 0) + if err != nil { + return nil, err + } + + return readAllLimited(r, maxBlobGetSize) +} + +func readAllLimited(r io.Reader, limit int64) ([]byte, error) { + r = limitReader(r, limit) + return ioutil.ReadAll(r) +} + +// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader, +// this returns an error when the limit reached. +func limitReader(r io.Reader, n int64) io.Reader { + return &limitedReader{r: r, n: n} +} + +// limitedReader implements a reader that errors when the limit is reached. +// +// Partially cribbed from net/http.MaxBytesReader. +type limitedReader struct { + r io.Reader // underlying reader + n int64 // max bytes remaining + err error // sticky error +} + +func (l *limitedReader) Read(p []byte) (n int, err error) { + if l.err != nil { + return 0, l.err + } + if len(p) == 0 { + return 0, nil + } + // If they asked for a 32KB byte read but only 5 bytes are + // remaining, no need to read 32KB. 6 bytes will answer the + // question of the whether we hit the limit or go past it. + if int64(len(p)) > l.n+1 { + p = p[:l.n+1] + } + n, err = l.r.Read(p) + + if int64(n) <= l.n { + l.n -= int64(n) + l.err = err + return n, err + } + + n = int(l.n) + l.n = 0 + + l.err = errors.New("storage: read exceeds limit") + return n, l.err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go new file mode 100644 index 0000000..6a5e8d0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -0,0 +1,470 @@ +package storage + +import ( + "fmt" + "net/http" + "path" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(name string, dgst digest.Digest) (string, error) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + registry *registry + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc + + // linkDirectoryPathSpec locates the root directories in which one might find links + linkDirectoryPathSpec pathSpec +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.blobAccessController.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } + + uuid := uuid.Generate().String() + startedAt := time.Now().UTC() + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt, true) +} + +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { + rootPath, err := pathFor(lbs.linkDirectoryPathSpec) + if err != nil { + return err + } + err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { + // exit early if directory... + if fileInfo.IsDir() { + return nil + } + filePath := fileInfo.Path() + + // check if it's a link + _, fileName := path.Split(filePath) + if fileName != "link" { + return nil + } + + // read the digest found in link + digest, err := lbs.blobStore.readlink(ctx, filePath) + if err != nil { + return err + } + + // ensure this conforms to the linkPathFns + _, err = lbs.Stat(ctx, digest) + if err != nil { + // we expect this error to occur so we move on + if err == distribution.ErrBlobUnknown { + return nil + } + return err + } + + err = ingestor(digest) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) { + var stat distribution.Descriptor + if sourceStat == nil { + // look up the blob info from the sourceRepo if not already provided + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err = repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + } else { + // use the provided blob info + stat = *sourceStat + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) +} + +// newBlobUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { + fw, err := lbs.driver.Writer(ctx, path, append) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + fileWriter: fw, + driver: lbs.driver, + path: path, + resumableDigestEnabled: lbs.resumableDigestEnabled, + } + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + // only use the first link + linkPathFn := lbs.linkPathFns[0] + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc +} + +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + var ( + found bool + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + found = true + break // success! + } + + switch err := err.(type) { + case driver.PathNotFoundError: + // do nothing, just move to the next linkPathFn + default: + return distribution.Descriptor{}, err + } + } + + if !found { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. + context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } + } + + return nil +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(layerLinkPathSpec{name: name, digest: dgst}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go new file mode 100644 index 0000000..f0f63d8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go @@ -0,0 +1,217 @@ +package storage + +import ( + "fmt" + "io" + "reflect" + "strconv" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/testutil" +) + +func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { + fooRepoName, _ := reference.ParseNamed("nm/foo") + fooEnv := newManifestStoreTestEnv(t, fooRepoName, "thetag") + ctx := context.Background() + stats, err := mockRegistry(t, fooEnv.registry) + if err != nil { + t.Fatal(err) + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + } + + // upload the layers to foo/bar + for dgst, rs := range testLayers { + wr, err := fooEnv.repository.Blobs(fooEnv.ctx).Create(fooEnv.ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(fooEnv.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + // create another repository nm/bar + barRepoName, _ := reference.ParseNamed("nm/bar") + barRepo, err := fooEnv.registry.Repository(ctx, barRepoName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + // cross-repo mount the test layers into a nm/bar + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + option := WithMountFrom(fooCanonical) + // ensure we can instrospect it + createOpts := distribution.CreateOptions{} + if err := option.Apply(&createOpts); err != nil { + t.Fatalf("failed to apply MountFrom option: %v", err) + } + if !createOpts.Mount.ShouldMount || createOpts.Mount.From.String() != fooCanonical.String() { + t.Fatalf("unexpected create options: %#+v", createOpts.Mount) + } + + _, err := barRepo.Blobs(ctx).Create(ctx, WithMountFrom(fooCanonical)) + if err == nil { + t.Fatalf("unexpected non-error while mounting from %q: %v", fooRepoName.String(), err) + } + if _, ok := err.(distribution.ErrBlobMounted); !ok { + t.Fatalf("expected ErrMountFrom error, not %T: %v", err, err) + } + } + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + count, exists := stats[fooCanonical.String()] + if !exists { + t.Errorf("expected entry %q not found among handled stat calls", fooCanonical.String()) + } else if count != 1 { + t.Errorf("expected exactly one stat call for entry %q, not %d", fooCanonical.String(), count) + } + } + + clearStats(stats) + + // create yet another repository nm/baz + bazRepoName, _ := reference.ParseNamed("nm/baz") + bazRepo, err := fooEnv.registry.Repository(ctx, bazRepoName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + // cross-repo mount them into a nm/baz and provide a prepopulated blob descriptor + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + size, err := strconv.ParseInt("0x"+dgst.Hex()[:8], 0, 64) + if err != nil { + t.Fatal(err) + } + prepolutatedDescriptor := distribution.Descriptor{ + Digest: dgst, + Size: size, + MediaType: "application/octet-stream", + } + _, err = bazRepo.Blobs(ctx).Create(ctx, WithMountFrom(fooCanonical), &statCrossMountCreateOption{ + desc: prepolutatedDescriptor, + }) + blobMounted, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Errorf("expected ErrMountFrom error, not %T: %v", err, err) + continue + } + if !reflect.DeepEqual(blobMounted.Descriptor, prepolutatedDescriptor) { + t.Errorf("unexpected descriptor: %#+v != %#+v", blobMounted.Descriptor, prepolutatedDescriptor) + } + } + // this time no stat calls will be made + if len(stats) != 0 { + t.Errorf("unexpected number of stats made: %d != %d", len(stats), len(testLayers)) + } +} + +func clearStats(stats map[string]int) { + for k := range stats { + delete(stats, k) + } +} + +// mockRegistry sets a mock blob descriptor service factory that overrides +// statter's Stat method to note each attempt to stat a blob in any repository. +// Returned stats map contains canonical references to blobs with a number of +// attempts. +func mockRegistry(t *testing.T, nm distribution.Namespace) (map[string]int, error) { + registry, ok := nm.(*registry) + if !ok { + return nil, fmt.Errorf("not an expected type of registry: %T", nm) + } + stats := make(map[string]int) + + registry.blobDescriptorServiceFactory = &mockBlobDescriptorServiceFactory{ + t: t, + stats: stats, + } + + return stats, nil +} + +type mockBlobDescriptorServiceFactory struct { + t *testing.T + stats map[string]int +} + +func (f *mockBlobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &mockBlobDescriptorService{ + BlobDescriptorService: svc, + t: f.t, + stats: f.stats, + } +} + +type mockBlobDescriptorService struct { + distribution.BlobDescriptorService + t *testing.T + stats map[string]int +} + +var _ distribution.BlobDescriptorService = &mockBlobDescriptorService{} + +func (bs *mockBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + statter, ok := bs.BlobDescriptorService.(*linkedBlobStatter) + if !ok { + return distribution.Descriptor{}, fmt.Errorf("unexpected blob descriptor service: %T", bs.BlobDescriptorService) + } + + name := statter.repository.Named() + canonical, err := reference.WithDigest(name, dgst) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("failed to make canonical reference: %v", err) + } + + bs.stats[canonical.String()]++ + bs.t.Logf("calling Stat on %s", canonical.String()) + + return bs.BlobDescriptorService.Stat(ctx, dgst) +} + +// statCrossMountCreateOptions ensures the expected options type is passed, and optionally pre-fills the cross-mount stat info +type statCrossMountCreateOption struct { + desc distribution.Descriptor +} + +var _ distribution.BlobCreateOption = statCrossMountCreateOption{} + +func (f statCrossMountCreateOption) Apply(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("Unexpected create options: %#v", v) + } + + if !opts.Mount.ShouldMount { + return nil + } + + opts.Mount.Stat = &f.desc + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go new file mode 100644 index 0000000..e24062c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go @@ -0,0 +1,92 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go new file mode 100644 index 0000000..9e8065b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go @@ -0,0 +1,141 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" +) + +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + +type manifestStore struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + + skipDependencyVerification bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler +} + +var _ distribution.ManifestService = &manifestStore{} + +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := ms.blobStore.Get(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Named().Name(), + Revision: dgst, + } + } + + return nil, err + } + + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { + return nil, err + } + + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + // This can be an image manifest or a manifest list + switch versioned.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + default: + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} + } + } + + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) +} + +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) + } + + return "", fmt.Errorf("unrecognized manifest type %T", manifest) +} + +// Delete removes the revision of the specified manifest. +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.blobStore.Delete(ctx, dgst) +} + +func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { + err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { + err := ingester(dgst) + if err != nil { + return err + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go new file mode 100644 index 0000000..cbd30c0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -0,0 +1,391 @@ +package storage + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type manifestStoreTestEnv struct { + ctx context.Context + driver driver.StorageDriver + registry distribution.Namespace + repository distribution.Repository + name reference.Named + tag string +} + +func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, options ...RegistryOption) *manifestStoreTestEnv { + ctx := context.Background() + driver := inmemory.New() + registry, err := NewRegistry(ctx, driver, options...) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repo, err := registry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + return &manifestStoreTestEnv{ + ctx: ctx, + driver: driver, + registry: registry, + repository: repo, + name: name, + tag: tag, + } +} + +func TestManifestStorage(t *testing.T) { + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k)) +} + +func testManifestStorage(t *testing.T, options ...RegistryOption) { + repoName, _ := reference.ParseNamed("foo/bar") + env := newManifestStoreTestEnv(t, repoName, "thetag", options...) + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: env.name.Name(), + Tag: env.tag, + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ + BlobSum: dgst, + }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, merr := schema1.Sign(&m, pk) + if merr != nil { + t.Fatalf("error signing manifest: %v", err) + } + + _, err = ms.Put(ctx, sm) + if err == nil { + t.Fatalf("expected errors putting manifest with full verification") + } + + switch err := err.(type) { + case distribution.ErrManifestVerification: + if len(err) != 2 { + t.Fatalf("expected 2 verification errors: %#v", err) + } + + for _, err := range err { + if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { + t.Fatalf("unexpected error type: %v", err) + } + } + default: + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + // Now, upload the layers that were missing! + for dgst, rs := range testLayers { + wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + var manifestDigest digest.Digest + if manifestDigest, err = ms.Put(ctx, sm); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + exists, err := ms.Exists(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %#v", err) + } + + if !exists { + t.Fatalf("manifest should exist") + } + + fromStore, err := ms.Get(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + fetchedManifest, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(fetchedManifest.Canonical, sm.Canonical) { + t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical) + } + + _, pl, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + payload, err := fetchedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting payload: %v", err) + } + + // Now that we have a payload, take a moment to check that the manifest is + // return by the payload digest. + + dgst := digest.FromBytes(payload) + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("error checking manifest existence by digest: %v", err) + } + + if !exists { + t.Fatalf("manifest %s should exist", dgst) + } + + fetchedByDigest, err := ms.Get(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest by digest: %v", err) + } + + byDigestManifest, ok := fetchedByDigest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(byDigestManifest.Canonical, fetchedManifest.Canonical) { + t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical) + } + + sigs, err := fetchedJWS.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) + } + + // Now, push the same manifest with a different key + pk2, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm2, err := schema1.Sign(&m, pk2) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + _, pl, err = sm2.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + jws2, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("error parsing signature: %v", err) + } + + sigs2, err := jws2.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs2) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) + } + + if manifestDigest, err = ms.Put(ctx, sm2); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + fromStore, err = ms.Get(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + fetched, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected type from signed manifeststore : %T", fetched) + } + + if _, err := schema1.Verify(fetched); err != nil { + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + _, pl, err = fetched.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + receivedPayload, err := receivedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting received payload: %v", err) + } + + if !bytes.Equal(receivedPayload, payload) { + t.Fatalf("payloads are not equal") + } + + // Test deleting manifests + err = ms.Delete(ctx, dgst) + if err != nil { + t.Fatalf("unexpected an error deleting manifest by digest: %v", err) + } + + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if exists { + t.Errorf("Deleted manifest should not exist") + } + + deletedManifest, err := ms.Get(ctx, dgst) + if err == nil { + t.Errorf("Unexpected success getting deleted manifest") + } + switch err.(type) { + case distribution.ErrManifestUnknownRevision: + break + default: + t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) + } + + if deletedManifest != nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + // Re-upload should restore manifest to a good state + _, err = ms.Put(ctx, sm) + if err != nil { + t.Errorf("Error re-uploading deleted manifest") + } + + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if !exists { + t.Errorf("Restored manifest should exist") + } + + deletedManifest, err = ms.Get(ctx, dgst) + if err != nil { + t.Errorf("Unexpected error getting manifest") + } + if deletedManifest == nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repo, err := r.Repository(ctx, env.name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ms, err = repo.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + err = ms.Delete(ctx, dgst) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestLinkPathFuncs ensures that the link path functions behavior are locked +// down and implemented as expected. +func TestLinkPathFuncs(t *testing.T) { + for _, testcase := range []struct { + repo string + digest digest.Digest + linkPathFn linkPathFunc + expected string + }{ + { + repo: "foo/bar", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + linkPathFn: blobLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", + }, + { + repo: "foo/bar", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + linkPathFn: manifestRevisionLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", + }, + } { + p, err := testcase.linkPathFn(testcase.repo, testcase.digest) + if err != nil { + t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) + } + + if p != testcase.expected { + t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go new file mode 100644 index 0000000..1b142b8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/paths.go @@ -0,0 +1,490 @@ +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const ( + storagePathVersion = "v2" // fixed storage layout version + storagePathRoot = "/docker/registry/" // all driver paths have a prefix + + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. +) + +// pathFor maps paths based on "object names" and their ids. The "object +// names" mapped by are internal to the storage system. +// +// The path layout in the storage backend is roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> _manifests/ +// revisions +// -> +// -> link +// tags/ +// -> current/link +// -> index +// -> //link +// -> _layers/ +// +// -> _uploads/ +// data +// startedat +// hashstates// +// -> blob/ +// +// +// The storage backend layout is broken up into a content-addressable blob +// store and repositories. The content-addressable blob store holds most data +// throughout the backend, keyed by algorithm and digests of the underlying +// content. Access to the blob store is controlled through links from the +// repository to blobstore. +// +// A repository is made up of layers, manifests and tags. The layers component +// is just a directory of layers which are "linked" into a repository. A layer +// can only be accessed through a qualified repository name if it is linked in +// the repository. Uploads of layers are managed in the uploads directory, +// which is key by upload id. When all data for an upload is received, the +// data is moved into the blob store and the upload directory is deleted. +// Abandoned uploads can be garbage collected by reading the startedat file +// and removing uploads that have been active for longer than a certain time. +// +// The third component of the repository directory is the manifests store, +// which is made up of a revision store and tag store. Manifests are stored in +// the blob store and linked into the revision store. +// While the registry can save all revisions of a manifest, no relationship is +// implied as to the ordering of changes to a manifest. The tag store provides +// support for name, tag lookups of manifests, using "current/link" under a +// named tag directory. An index is maintained to support deletions of all +// revisions of a given manifest tag. +// +// We cover the path formats implemented by this path mapper below. +// +// Manifests: +// +// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ +// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// +// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link +// +// Tags: +// +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// +// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link +// +// Blobs: +// +// layerLinkPathSpec: /v2/repositories//_layers///link +// +// Uploads: +// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// +// Blob Store: +// +// blobsPathSpec: /v2/blobs/ +// blobPathSpec: /v2/blobs/// +// blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +func pathFor(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + rootPrefix := []string{storagePathRoot, storagePathVersion} + repoPrefix := append(rootPrefix, "repositories") + + switch v := spec.(type) { + + case manifestRevisionsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil + + case manifestRevisionPathSpec: + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + case manifestRevisionLinkPathSpec: + root, err := pathFor(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + case manifestTagPathSpec: + root, err := pathFor(manifestTagsPathSpec{ + name: v.name, + }) + + if err != nil { + return "", err + } + + return path.Join(root, v.tag), nil + case manifestTagCurrentPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "current", "link"), nil + case manifestTagIndexPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "index"), nil + case manifestTagIndexEntryLinkPathSpec: + root, err := pathFor(manifestTagIndexEntryPathSpec{ + name: v.name, + tag: v.tag, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagIndexEntryPathSpec: + root, err := pathFor(manifestTagIndexPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(components...)), nil + case layerLinkPathSpec: + components, err := digestPathComponents(v.digest, false) + if err != nil { + return "", err + } + + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. + + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobsPathSpec: + blobsPathPrefix := append(rootPrefix, "blobs") + return path.Join(blobsPathPrefix...), nil + case blobPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil + case uploadStartedAtPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// manifestRevisionsPathSpec describes the directory path for +// a manifest revision. +type manifestRevisionsPathSpec struct { + name string +} + +func (manifestRevisionsPathSpec) pathSpec() {} + +// manifestRevisionPathSpec describes the components of the directory path for +// a manifest revision. +type manifestRevisionPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionPathSpec) pathSpec() {} + +// manifestRevisionLinkPathSpec describes the path components required to look +// up the data link for a revision of a manifest. If this file is not present, +// the manifest blob is not available in the given repo. The contents of this +// file should just be the digest. +type manifestRevisionLinkPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionLinkPathSpec) pathSpec() {} + +// manifestTagsPathSpec describes the path elements required to point to the +// manifest tags directory. +type manifestTagsPathSpec struct { + name string +} + +func (manifestTagsPathSpec) pathSpec() {} + +// manifestTagPathSpec describes the path elements required to point to the +// manifest tag links files under a repository. These contain a blob id that +// can be used to look up the data and signatures. +type manifestTagPathSpec struct { + name string + tag string +} + +func (manifestTagPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the current revision for a +// given tag. +type manifestTagCurrentPathSpec struct { + name string + tag string +} + +func (manifestTagCurrentPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the index of revisions +// with the given tag. +type manifestTagIndexPathSpec struct { + name string + tag string +} + +func (manifestTagIndexPathSpec) pathSpec() {} + +// manifestTagIndexEntryPathSpec contains the entries of the index by revision. +type manifestTagIndexEntryPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryPathSpec) pathSpec() {} + +// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryLinkPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} + +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + digest digest.Digest +} + +func (layerLinkPathSpec) pathSpec() {} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// blobsPathSpec contains the path for the blobs directory +type blobsPathSpec struct{} + +func (blobsPathSpec) pathSpec() {} + +// blobPathSpec contains the path for the registry global blob store. +type blobPathSpec struct { + digest digest.Digest +} + +func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the registry global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + name string + id string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters for the file that stores the +// start time of an uploads. If it is missing, the upload is considered +// unknown. Admittedly, the presence of this file is an ugly hack to make sure +// we have a way to cleanup old or stalled uploads that doesn't rely on driver +// FileInfo behavior. If we come up with a more clever way to do this, we +// should remove this file immediately and rely on the startetAt field from +// the client to enforce time out policies. +type uploadStartedAtPathSpec struct { + name string + id string +} + +func (uploadStartedAtPathSpec) pathSpec() {} + +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, id, and alg. +type uploadHashStatePathSpec struct { + name string + id string + alg digest.Algorithm + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +// +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) + hex := dgst.Hex() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + return append(prefix, suffix...), nil +} + +// Reconstructs a digest from a path +func digestFromPath(digestPath string) (digest.Digest, error) { + + digestPath = strings.TrimSuffix(digestPath, "/data") + dir, hex := path.Split(digestPath) + dir = path.Dir(dir) + dir, next := path.Split(dir) + + // next is either the algorithm OR the first two characters in the hex string + var algo string + if next == hex[:2] { + algo = path.Base(dir) + } else { + algo = next + } + + dgst := digest.NewDigestFromHex(algo, hex) + return dgst, dgst.Validate() +} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths_test.go b/vendor/github.com/docker/distribution/registry/storage/paths_test.go new file mode 100644 index 0000000..f739552 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/paths_test.go @@ -0,0 +1,135 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution/digest" +) + +func TestPathMapper(t *testing.T) { + for _, testcase := range []struct { + spec pathSpec + expected string + err error + }{ + { + spec: manifestRevisionPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + { + spec: manifestRevisionLinkPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", + }, + { + spec: manifestTagsPathSpec{ + name: "foo/bar", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", + }, + { + spec: manifestTagPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", + }, + { + spec: manifestTagCurrentPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", + }, + { + spec: manifestTagIndexPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", + }, + { + spec: manifestTagIndexEntryPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + { + spec: manifestTagIndexEntryLinkPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", + }, + + { + spec: uploadDataPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", + }, + { + spec: uploadStartedAtPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", + }, + } { + p, err := pathFor(testcase.spec) + if err != nil { + t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) + } + + if p != testcase.expected { + t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) + } + } + + // Add a few test cases to ensure we cover some errors + + // Specify a path that requires a revision and get a digest validation error. + badpath, err := pathFor(manifestRevisionPathSpec{ + name: "foo/bar", + }) + + if err == nil { + t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) + } + +} + +func TestDigestFromPath(t *testing.T) { + for _, testcase := range []struct { + path string + expected digest.Digest + multilevel bool + err error + }{ + { + path: "/docker/registry/v2/blobs/sha256/99/9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86/data", + multilevel: true, + expected: "sha256:9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86", + err: nil, + }, + } { + result, err := digestFromPath(testcase.path) + if err != testcase.err { + t.Fatalf("Unexpected error value %v when we wanted %v", err, testcase.err) + } + + if result != testcase.expected { + t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) + + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go new file mode 100644 index 0000000..7576b18 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -0,0 +1,139 @@ +package storage + +import ( + "path" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(ctx, driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(ctx, uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return ErrSkipDir + } + + } + + uuid, isContainingDir := uUIDFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uUIDFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uUIDFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go new file mode 100644 index 0000000..3b70f72 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go @@ -0,0 +1,166 @@ +package storage + +import ( + "path" + "strings" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/uuid" +) + +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { + d := inmemory.New() + ctx := context.Background() + for i := 0; i < numUploads; i++ { + addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) + } + return d, ctx +} + +func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { + dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + + if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + t.Fatalf("Unable to write startedAt file") + } + +} + +func TestPurgeGather(t *testing.T) { + uploadCount := 5 + fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(ctx, fs) + if len(errs) != 0 { + t.Errorf("Unexepected errors: %q", errs) + } + if len(uploadData) != uploadCount { + t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) + } +} + +func TestPurgeNone(t *testing.T) { + fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) + oneHourAgo := time.Now().Add(-1 * time.Hour) + deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + if len(deleted) != 0 { + t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) + } +} + +func TestPurgeAll(t *testing.T) { + uploadCount := 10 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + + // Ensure > 1 repos are purged + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) + uploadCount++ + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + fileCount := uploadCount + if len(deleted) != fileCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), fileCount) + } +} + +func TestPurgeSome(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + + newUploadCount := 4 + + for i := 0; i < newUploadCount; i++ { + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + if len(deleted) != oldUploadCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), oldUploadCount) + } +} + +func TestPurgeOnlyUploads(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + + // Create a directory tree outside _uploads and ensure + // these files aren't deleted. + dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) + if err != nil { + t.Fatalf(err.Error()) + } + nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) + if strings.Index(nonUploadPath, "_upload") != -1 { + t.Fatalf("Non-upload path not created correctly") + } + + nonUploadFile := path.Join(nonUploadPath, "file") + if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + for _, file := range deleted { + if strings.Index(file, "_upload") == -1 { + t.Errorf("Non-upload file deleted") + } + } +} + +func TestPurgeMissingStartedAt(t *testing.T) { + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) + + err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + + if file == "startedat" { + if err := fs.Delete(ctx, filePath); err != nil { + t.Fatalf("Unable to delete startedat file: %s", filePath) + } + } + return nil + }) + if err != nil { + t.Fatalf("Unexpected error during Walk: %s ", err.Error()) + } + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) > 0 { + t.Errorf("Unexpected errors") + } + if len(deleted) > 0 { + t.Errorf("Files unexpectedly deleted: %s", deleted) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/registry.go b/vendor/github.com/docker/distribution/registry/storage/registry.go new file mode 100644 index 0000000..20525ff --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/registry.go @@ -0,0 +1,306 @@ +package storage + +import ( + "regexp" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libtrust" +) + +// registry is the top-level implementation of Registry for use in the storage +// package. All instances should descend from this object. +type registry struct { + blobStore *blobStore + blobServer *blobServer + statter *blobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool + schema1SigningKey libtrust.PrivateKey + blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory + manifestURLs manifestURLs +} + +// manifestURLs holds regular expressions for controlling manifest URL whitelisting +type manifestURLs struct { + allow *regexp.Regexp + deny *regexp.Regexp +} + +// RegistryOption is the type used for functional options for NewRegistry. +type RegistryOption func(*registry) error + +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).URLFor to serve all blobs. +func EnableRedirect(registry *registry) error { + registry.blobServer.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *registry) error { + registry.deleteEnabled = true + return nil +} + +// DisableDigestResumption is a functional option for NewRegistry. It should be +// used if the registry is acting as a caching proxy. +func DisableDigestResumption(registry *registry) error { + registry.resumableDigestEnabled = false + return nil +} + +// ManifestURLsAllowRegexp is a functional option for NewRegistry. +func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.allow = r + return nil + } +} + +// ManifestURLsDenyRegexp is a functional option for NewRegistry. +func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.deny = r + return nil + } +} + +// Schema1SigningKey returns a functional option for NewRegistry. It sets the +// key for signing all schema1 manifests. +func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { + return func(registry *registry) error { + registry.schema1SigningKey = key + return nil + } +} + +// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the +// factory to create BlobDescriptorServiceFactory middleware. +func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { + return func(registry *registry) error { + registry.blobDescriptorServiceFactory = factory + return nil + } +} + +// BlobDescriptorCacheProvider returns a functional option for +// NewRegistry. It creates a cached blob statter for use by the +// registry. +func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { + // TODO(aaronl): The duplication of statter across several objects is + // ugly, and prevents us from using interface types in the registry + // struct. Ideally, blobStore and blobServer should be lazily + // initialized, and use the current value of + // blobDescriptorCacheProvider. + return func(registry *registry) error { + if blobDescriptorCacheProvider != nil { + statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) + registry.blobStore.statter = statter + registry.blobServer.statter = statter + registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider + } + return nil + } +} + +// NewRegistry creates a new registry instance from the provided driver. The +// resulting registry may be shared by multiple goroutines but is cheap to +// allocate. If the Redirect option is specified, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { + // create global statter + statter := &blobStatter{ + driver: driver, + } + + bs := &blobStore{ + driver: driver, + statter: statter, + } + + registry := ®istry{ + blobStore: bs, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + }, + statter: statter, + resumableDigestEnabled: true, + } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil +} + +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) + if err != nil { + return nil, err + } + } + + return &repository{ + ctx: ctx, + registry: reg, + name: canonicalName, + descriptorCache: descriptorCache, + }, nil +} + +func (reg *registry) Blobs() distribution.BlobEnumerator { + return reg.blobStore +} + +func (reg *registry) BlobStatter() distribution.BlobStatter { + return reg.statter +} + +// repository provides name-scoped access to various services. +type repository struct { + *registry + ctx context.Context + name reference.Named + descriptorCache distribution.BlobDescriptorService +} + +// Name returns the name of the repository. +func (repo *repository) Named() reference.Named { + return repo.name +} + +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + + manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} + + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: statter, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + linkDirectoryPathSpec: manifestDirectoryPathSpec, + } + + ms := &manifestStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ + ctx: ctx, + schema1SigningKey: repo.schema1SigningKey, + repository: repo, + blobStore: blobStore, + }, + schema2Handler: &schema2ManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + manifestURLs: repo.registry.manifestURLs, + }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + }, + } + + // Apply options + for _, option := range options { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + + return ms, nil +} + +// Blobs returns an instance of the BlobStore. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, + } + + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + return &linkedBlobStore{ + registry: repo.registry, + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPathFns: []linkPathFunc{blobLinkPath}, + deleteEnabled: repo.registry.deleteEnabled, + resumableDigestEnabled: repo.resumableDigestEnabled, + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go new file mode 100644 index 0000000..9fe71bb --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go @@ -0,0 +1,136 @@ +package storage + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" +) + +var ( + errUnexpectedURL = errors.New("unexpected URL on layer") + errMissingURL = errors.New("missing URL on layer") + errInvalidURL = errors.New("invalid URL on layer") +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context + manifestURLs manifestURLs +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if skipDependencyVerification { + return nil + } + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + blobsService := ms.repository.Blobs(ctx) + + for _, descriptor := range mnfst.References() { + var err error + + switch descriptor.MediaType { + case schema2.MediaTypeForeignLayer: + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(descriptor.URLs) == 0 { + err = errMissingURL + } + allow := ms.manifestURLs.allow + deny := ms.manifestURLs.deny + for _, u := range descriptor.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { + err = errInvalidURL + break + } + } + case schema2.MediaTypeManifest, schema1.MediaTypeManifest: + var exists bool + exists, err = manifestService.Exists(ctx, descriptor.Digest) + if err != nil || !exists { + err = distribution.ErrBlobUnknown // just coerce to unknown. + } + + fallthrough // double check the blob store. + default: + // forward all else to blob storage + if len(descriptor.URLs) == 0 { + _, err = blobsService.Stat(ctx, descriptor.Digest) + } + } + + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest}) + } + } + + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go new file mode 100644 index 0000000..5051fa3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go @@ -0,0 +1,136 @@ +package storage + +import ( + "regexp" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestVerifyManifestForeignLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + registry := createRegistry(t, inmemoryDriver, + ManifestURLsAllowRegexp(regexp.MustCompile("^https?://foo")), + ManifestURLsDenyRegexp(regexp.MustCompile("^https?://foo/nope"))) + repo := makeRepository(t, registry, "test") + manifestService := makeManifestService(t, repo) + + config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeConfig, nil) + if err != nil { + t.Fatal(err) + } + + layer, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeLayer, nil) + if err != nil { + t.Fatal(err) + } + + foreignLayer := distribution.Descriptor{ + Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeForeignLayer, + } + + template := schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: config, + } + + type testcase struct { + BaseLayer distribution.Descriptor + URLs []string + Err error + } + + cases := []testcase{ + { + foreignLayer, + nil, + errMissingURL, + }, + { + // regular layers may have foreign urls + layer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"file:///local/file"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar#baz"}, + errInvalidURL, + }, + { + foreignLayer, + []string{""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"https://foo/bar", ""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"", "https://foo/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://nope/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/nope"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"https://foo/bar"}, + nil, + }, + } + + for _, c := range cases { + m := template + l := c.BaseLayer + l.URLs = c.URLs + m.Layers = []distribution.Descriptor{l} + dm, err := schema2.FromStruct(m) + if err != nil { + t.Error(err) + continue + } + + _, err = manifestService.Put(ctx, dm) + if verr, ok := err.(distribution.ErrManifestVerification); ok { + // Extract the first error + if len(verr) == 2 { + if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { + err = verr[0] + } + } + } + if err != c.Err { + t.Errorf("%#v: expected %v, got %v", l, c.Err, err) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go new file mode 100644 index 0000000..30d3308 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go @@ -0,0 +1,141 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository distribution.Repository + schema1SigningKey libtrust.PrivateKey + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + + var ( + signatures [][]byte + err error + ) + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + if ms.schema1SigningKey != nil { + if err := jsig.Sign(ms.schema1SigningKey); err != nil { + return nil, err + } + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go new file mode 100644 index 0000000..4386ffc --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore.go @@ -0,0 +1,191 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var _ distribution.TagService = &tagStore{} + +// tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects +type tagStore struct { + repository *repository + blobStore *blobStore +} + +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + + pathSpec, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + }) + if err != nil { + return tags, err + } + + entries, err := ts.blobStore.driver.List(ctx, pathSpec) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} + default: + return tags, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + tags = append(tags, filename) + } + + return tags, nil +} + +// exists returns true if the specified manifest tag exists in the repository. +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { + tagPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return false, err + } + + exists, err := exists(ctx, ts.blobStore.driver, tagPath) + if err != nil { + return false, err + } + + return exists, nil +} + +// Tag tags the digest with the given tag, updating the the store to point at +// the current tag. The digest must point to a manifest. +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return err + } + + lbs := ts.linkedBlobStore(ctx, tag) + + // Link into the index + if err := lbs.linkBlob(ctx, desc); err != nil { + return err + } + + // Overwrite the current link + return ts.blobStore.link(ctx, currentPath, desc.Digest) +} + +// resolve the current revision for name and tag. +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + revision, err := ts.blobStore.readlink(ctx, currentPath) + if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} + } + + return distribution.Descriptor{}, err + } + + return distribution.Descriptor{Digest: revision}, nil +} + +// Untag removes the tag association +func (ts *tagStore) Untag(ctx context.Context, tag string) error { + tagPath, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.ErrTagUnknown{Tag: tag} + case nil: + break + default: + return err + } + + return ts.blobStore.driver.Delete(ctx, tagPath) +} + +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + + }}, + } +} + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go new file mode 100644 index 0000000..554a46b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go @@ -0,0 +1,209 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type tagsTestEnv struct { + ts distribution.TagService + ctx context.Context +} + +func testTagStore(t *testing.T) *tagsTestEnv { + ctx := context.Background() + d := inmemory.New() + reg, err := NewRegistry(ctx, d) + if err != nil { + t.Fatal(err) + } + + repoRef, _ := reference.ParseNamed("a/b") + repo, err := reg.Repository(ctx, repoRef) + if err != nil { + t.Fatal(err) + } + + return &tagsTestEnv{ + ctx: ctx, + ts: repo.Tags(ctx), + } +} + +func TestTagStoreTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + + d := distribution.Descriptor{} + err := tags.Tag(ctx, "latest", d) + if err == nil { + t.Errorf("unexpected error putting malformed descriptor : %s", err) + } + + d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err := tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } + + // Overwrite existing + d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err = tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } +} + +func TestTagStoreUnTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} + + err := tags.Untag(ctx, "latest") + if err == nil { + t.Errorf("Expected error untagging non-existant tag") + } + + err = tags.Tag(ctx, "latest", desc) + if err != nil { + t.Error(err) + } + + err = tags.Untag(ctx, "latest") + if err != nil { + t.Error(err) + } + + errExpect := distribution.ErrTagUnknown{Tag: "latest"}.Error() + _, err = tags.Get(ctx, "latest") + if err == nil || err.Error() != errExpect { + t.Error("Expected error getting untagged tag") + } +} + +func TestTagStoreAll(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + alpha := "abcdefghijklmnopqrstuvwxyz" + for i := 0; i < len(alpha); i++ { + tag := alpha[i] + desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} + err := tagStore.Tag(ctx, string(tag), desc) + if err != nil { + t.Error(err) + } + } + + all, err := tagStore.All(ctx) + if err != nil { + t.Error(err) + } + if len(all) != len(alpha) { + t.Errorf("Unexpected count returned from enumerate") + } + + for i, c := range all { + if c != string(alpha[i]) { + t.Errorf("unexpected tag in enumerate %s", c) + } + } + + removed := "a" + err = tagStore.Untag(ctx, removed) + if err != nil { + t.Error(err) + } + + all, err = tagStore.All(ctx) + if err != nil { + t.Error(err) + } + for _, tag := range all { + if tag == removed { + t.Errorf("unexpected tag in enumerate %s", removed) + } + } + +} + +func TestTagLookup(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} + desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} + + tags, err := tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + if len(tags) != 0 { + t.Fatalf("Lookup returned > 0 tags from empty store") + } + + err = tagStore.Tag(ctx, "a", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "b", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "0", desc0) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "1", desc0) + if err != nil { + t.Fatal(err) + } + + tags, err = tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) + } + + tags, err = tagStore.Lookup(ctx, desc0) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/util.go b/vendor/github.com/docker/distribution/registry/storage/util.go new file mode 100644 index 0000000..773d7ba --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/util.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/vacuum.go b/vendor/github.com/docker/distribution/registry/storage/vacuum.go new file mode 100644 index 0000000..3bdfebf --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/vacuum.go @@ -0,0 +1,67 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.ParseDigest(dgst) + if err != nil { + return err + } + + blobPath, err := pathFor(blobPathSpec{digest: d}) + if err != nil { + return err + } + + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk.go b/vendor/github.com/docker/distribution/registry/storage/walk.go new file mode 100644 index 0000000..d979796 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/walk.go @@ -0,0 +1,59 @@ +package storage + +import ( + "errors" + "fmt" + "sort" + + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// ErrSkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +// If the returned error is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +type WalkFn func(fileInfo storageDriver.FileInfo) error + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + sort.Stable(sort.StringSlice(children)) + for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + err = f(fileInfo) + skipDir := (err == ErrSkipDir) + if err != nil && !skipDir { + return err + } + + if fileInfo.IsDir() && !skipDir { + if err := Walk(ctx, driver, child, f); err != nil { + return err + } + } + } + return nil +} + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk_test.go b/vendor/github.com/docker/distribution/registry/storage/walk_test.go new file mode 100644 index 0000000..3d7a4b1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/walk_test.go @@ -0,0 +1,152 @@ +package storage + +import ( + "fmt" + "sort" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { + d := inmemory.New() + ctx := context.Background() + + expected := map[string]string{ + "/a": "dir", + "/a/b": "dir", + "/a/b/c": "dir", + "/a/b/c/d": "file", + "/a/b/c/e": "file", + "/a/b/f": "dir", + "/a/b/f/g": "file", + "/a/b/f/h": "file", + "/a/b/f/i": "file", + "/z": "dir", + "/z/y": "file", + } + + for p, typ := range expected { + if typ != "file" { + continue + } + + if err := d.PutContent(ctx, p, []byte(p)); err != nil { + t.Fatalf("unable to put content into fixture: %v", err) + } + } + + return d, expected, ctx +} + +func TestWalkErrors(t *testing.T) { + d, expected, ctx := testFS(t) + fileCount := len(expected) + err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Error("Expected invalid root err") + } + + errEarlyExpected := fmt.Errorf("Early termination") + + err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + // error on the 2nd file + if fileInfo.Path() == "/a/b" { + return errEarlyExpected + } + + delete(expected, fileInfo.Path()) + return nil + }) + if len(expected) != fileCount-1 { + t.Error("Walk failed to terminate with error") + } + if err != errEarlyExpected { + if err == nil { + t.Fatalf("expected an error due to early termination") + } else { + t.Error(err.Error()) + } + } + + err = Walk(ctx, d, "/nonexistent", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Errorf("Expected missing file err") + } + +} + +func TestWalk(t *testing.T) { + d, expected, ctx := testFS(t) + var traversed []string + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + filetype, ok := expected[filePath] + if !ok { + t.Fatalf("Unexpected file in walk: %q", filePath) + } + + if fileInfo.IsDir() { + if filetype != "dir" { + t.Errorf("Unexpected file type: %q", filePath) + } + } else { + if filetype != "file" { + t.Errorf("Unexpected file type: %q", filePath) + } + + // each file has its own path as the contents. If the length + // doesn't match the path length, fail. + if fileInfo.Size() != int64(len(fileInfo.Path())) { + t.Fatalf("unexpected size for %q: %v != %v", + fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) + } + } + delete(expected, filePath) + traversed = append(traversed, filePath) + return nil + }) + if len(expected) > 0 { + t.Errorf("Missed files in walk: %q", expected) + } + + if !sort.StringsAreSorted(traversed) { + t.Errorf("result should be sorted: %v", traversed) + } + + if err != nil { + t.Fatalf(err.Error()) + } +} + +func TestWalkSkipDir(t *testing.T) { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + if filePath == "/a/b" { + // skip processing /a/b/c and /a/b/c/d + return ErrSkipDir + } + delete(expected, filePath) + return nil + }) + if err != nil { + t.Fatalf(err.Error()) + } + if _, ok := expected["/a/b/c"]; !ok { + t.Errorf("/a/b/c not skipped") + } + if _, ok := expected["/a/b/c/d"]; !ok { + t.Errorf("/a/b/c/d not skipped") + } + if _, ok := expected["/a/b/c/e"]; !ok { + t.Errorf("/a/b/c/e not skipped") + } + +} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 0000000..5030565 --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "github.com/docker/distribution/context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/distribution/testutil/handler.go b/vendor/github.com/docker/distribution/testutil/handler.go new file mode 100644 index 0000000..00cd8a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/testutil/handler.go @@ -0,0 +1,148 @@ +package testutil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" +) + +// RequestResponseMap is an ordered mapping from Requests to Responses +type RequestResponseMap []RequestResponseMapping + +// RequestResponseMapping defines a Response to be sent in response to a given +// Request +type RequestResponseMapping struct { + Request Request + Response Response +} + +// Request is a simplified http.Request object +type Request struct { + // Method is the http method of the request, for example GET + Method string + + // Route is the http route of this request + Route string + + // QueryParams are the query parameters of this request + QueryParams map[string][]string + + // Body is the byte contents of the http request + Body []byte + + // Headers are the header for this request + Headers http.Header +} + +func (r Request) String() string { + queryString := "" + if len(r.QueryParams) > 0 { + keys := make([]string, 0, len(r.QueryParams)) + queryParts := make([]string, 0, len(r.QueryParams)) + for k := range r.QueryParams { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + for _, val := range r.QueryParams[k] { + queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) + } + } + queryString = "?" + strings.Join(queryParts, "&") + } + var headers []string + if len(r.Headers) > 0 { + var headerKeys []string + for k := range r.Headers { + headerKeys = append(headerKeys, k) + } + sort.Strings(headerKeys) + + for _, k := range headerKeys { + for _, val := range r.Headers[k] { + headers = append(headers, fmt.Sprintf("%s:%s", k, val)) + } + } + + } + return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) +} + +// Response is a simplified http.Response object +type Response struct { + // Statuscode is the http status code of the Response + StatusCode int + + // Headers are the http headers of this Response + Headers http.Header + + // Body is the response body + Body []byte +} + +// testHandler is an http.Handler with a defined mapping from Request to an +// ordered list of Response objects +type testHandler struct { + responseMap map[string][]Response +} + +// NewHandler returns a new test handler that responds to defined requests +// with specified responses +// Each time a Request is received, the next Response is returned in the +// mapping, until no Responses are defined, at which point a 404 is sent back +func NewHandler(requestResponseMap RequestResponseMap) http.Handler { + responseMap := make(map[string][]Response) + for _, mapping := range requestResponseMap { + responses, ok := responseMap[mapping.Request.String()] + if ok { + responseMap[mapping.Request.String()] = append(responses, mapping.Response) + } else { + responseMap[mapping.Request.String()] = []Response{mapping.Response} + } + } + return &testHandler{responseMap: responseMap} +} + +func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + requestBody, _ := ioutil.ReadAll(r.Body) + request := Request{ + Method: r.Method, + Route: r.URL.Path, + QueryParams: r.URL.Query(), + Body: requestBody, + Headers: make(map[string][]string), + } + + // Add headers of interest here + for k, v := range r.Header { + if k == "If-None-Match" { + request.Headers[k] = v + } + } + + responses, ok := app.responseMap[request.String()] + + if !ok || len(responses) == 0 { + http.NotFound(w, r) + return + } + + response := responses[0] + app.responseMap[request.String()] = responses[1:] + + responseHeader := w.Header() + for k, v := range response.Headers { + responseHeader[k] = v + } + + w.WriteHeader(response.StatusCode) + + io.Copy(w, bytes.NewReader(response.Body)) +} diff --git a/vendor/github.com/docker/distribution/testutil/manifests.go b/vendor/github.com/docker/distribution/testutil/manifests.go new file mode 100644 index 0000000..c4f9fef --- /dev/null +++ b/vendor/github.com/docker/distribution/testutil/manifests.go @@ -0,0 +1,87 @@ +package testutil + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/libtrust" +) + +// MakeManifestList constructs a manifest list out of a list of manifest digests +func MakeManifestList(blobstatter distribution.BlobStatter, manifestDigests []digest.Digest) (*manifestlist.DeserializedManifestList, error) { + ctx := context.Background() + + var manifestDescriptors []manifestlist.ManifestDescriptor + for _, manifestDigest := range manifestDigests { + descriptor, err := blobstatter.Stat(ctx, manifestDigest) + if err != nil { + return nil, err + } + platformSpec := manifestlist.PlatformSpec{ + Architecture: "atari2600", + OS: "CP/M", + Variant: "ternary", + Features: []string{"VLIW", "superscalaroutoforderdevnull"}, + } + manifestDescriptor := manifestlist.ManifestDescriptor{ + Descriptor: descriptor, + Platform: platformSpec, + } + manifestDescriptors = append(manifestDescriptors, manifestDescriptor) + } + + return manifestlist.FromDescriptors(manifestDescriptors) +} + +// MakeSchema1Manifest constructs a schema 1 manifest from a given list of digests and returns +// the digest of the manifest +func MakeSchema1Manifest(digests []digest.Digest) (distribution.Manifest, error) { + manifest := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: "who", + Tag: "cares", + } + + for _, digest := range digests { + manifest.FSLayers = append(manifest.FSLayers, schema1.FSLayer{BlobSum: digest}) + manifest.History = append(manifest.History, schema1.History{V1Compatibility: ""}) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("unexpected error generating private key: %v", err) + } + + signedManifest, err := schema1.Sign(&manifest, pk) + if err != nil { + return nil, fmt.Errorf("error signing manifest: %v", err) + } + + return signedManifest, nil +} + +// MakeSchema2Manifest constructs a schema 2 manifest from a given list of digests and returns +// the digest of the manifest +func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) { + ctx := context.Background() + blobStore := repository.Blobs(ctx) + builder := schema2.NewManifestBuilder(blobStore, []byte{}) + for _, digest := range digests { + builder.AppendReference(distribution.Descriptor{Digest: digest}) + } + + manifest, err := builder.Build(ctx) + if err != nil { + return nil, fmt.Errorf("unexpected error generating manifest: %v", err) + } + + return manifest, nil +} diff --git a/vendor/github.com/docker/distribution/testutil/tarfile.go b/vendor/github.com/docker/distribution/testutil/tarfile.go new file mode 100644 index 0000000..a8ba015 --- /dev/null +++ b/vendor/github.com/docker/distribution/testutil/tarfile.go @@ -0,0 +1,115 @@ +package testutil + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + mrand "math/rand" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// CreateRandomTarFile creates a random tarfile, returning it as an +// io.ReadSeeker along with its digest. An error is returned if there is a +// problem generating valid content. +func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { + nFiles := mrand.Intn(10) + 10 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := mrand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + dgst = digest.FromBytes(target.Bytes()) + + return bytes.NewReader(target.Bytes()), dgst, nil +} + +// CreateRandomLayers returns a map of n digests. We don't particularly care +// about the order of said digests (since they're all random anyway). +func CreateRandomLayers(n int) (map[digest.Digest]io.ReadSeeker, error) { + digestMap := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < n; i++ { + rs, ds, err := CreateRandomTarFile() + if err != nil { + return nil, fmt.Errorf("unexpected error generating test layer file: %v", err) + } + + dgst := digest.Digest(ds) + digestMap[dgst] = rs + } + return digestMap, nil +} + +// UploadBlobs lets you upload blobs to a repository +func UploadBlobs(repository distribution.Repository, layers map[digest.Digest]io.ReadSeeker) error { + ctx := context.Background() + for digest, rs := range layers { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + return fmt.Errorf("unexpected error creating upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + return fmt.Errorf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: digest}); err != nil { + return fmt.Errorf("unexpected error committinng upload: %v", err) + } + } + return nil +} diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 0000000..d433cca --- /dev/null +++ b/vendor/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/distribution/uuid/uuid_test.go b/vendor/github.com/docker/distribution/uuid/uuid_test.go new file mode 100644 index 0000000..09c3a7b --- /dev/null +++ b/vendor/github.com/docker/distribution/uuid/uuid_test.go @@ -0,0 +1,48 @@ +package uuid + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} + +func TestParseAndEquality(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + parsed, err := Parse(u.String()) + if err != nil { + t.Fatalf("error parsing uuid %v: %v", u, err) + } + + if parsed != u { + t.Fatalf("parsing round trip failed: %v != %v", parsed, u) + } + } + + for _, c := range []string{ + "bad", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format + " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space + "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space + "00000000-0000-0000-0000-x00000000000", // out of range character + } { + if _, err := Parse(c); err == nil { + t.Fatalf("parsing %q should have failed", c) + } + } +} diff --git a/vendor/github.com/docker/distribution/version/print.go b/vendor/github.com/docker/distribution/version/print.go new file mode 100644 index 0000000..a82bce3 --- /dev/null +++ b/vendor/github.com/docker/distribution/version/print.go @@ -0,0 +1,26 @@ +package version + +import ( + "fmt" + "io" + "os" +) + +// FprintVersion outputs the version string to the writer, in the following +// format, followed by a newline: +// +// +// +// For example, a binary "registry" built from github.com/docker/distribution +// with version "v2.0" would print the following: +// +// registry github.com/docker/distribution v2.0 +// +func FprintVersion(w io.Writer) { + fmt.Fprintln(w, os.Args[0], Package, Version) +} + +// PrintVersion outputs the version information, from Fprint, to stdout. +func PrintVersion() { + FprintVersion(os.Stdout) +} diff --git a/vendor/github.com/docker/distribution/version/version.go b/vendor/github.com/docker/distribution/version/version.go new file mode 100644 index 0000000..3b72105 --- /dev/null +++ b/vendor/github.com/docker/distribution/version/version.go @@ -0,0 +1,11 @@ +package version + +// Package is the overall, canonical project import path under which the +// package was built. +var Package = "github.com/docker/distribution" + +// Version indicates which version of the binary is running. This is set to +// the latest release tag by hand, always suffixed by "+unknown". During +// build, it will be replaced by the actual version. The value here will be +// used if the registry is run after a go get based install. +var Version = "v2.6.2+unknown" diff --git a/vendor/github.com/docker/distribution/version/version.sh b/vendor/github.com/docker/distribution/version/version.sh new file mode 100755 index 0000000..53e29ce --- /dev/null +++ b/vendor/github.com/docker/distribution/version/version.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This bash script outputs the current, desired content of version.go, using +# git describe. For best effect, pipe this to the target file. Generally, this +# only needs to updated for releases. The actual value of will be replaced +# during build time if the makefile is used. + +set -e + +cat < + +**Description** + + + +**Steps to reproduce the issue:** +1. +2. +3. + +**Describe the results you received:** + + +**Describe the results you expected:** + + +**Additional information you deem important (e.g. issue happens only occasionally):** + +**Output of `docker version`:** + +``` +(paste your output here) +``` + +**Output of `docker info`:** + +``` +(paste your output here) +``` + +**Additional environment details (AWS, VirtualBox, physical, etc.):** diff --git a/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..4269818 --- /dev/null +++ b/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,30 @@ + + +**- What I did** + +**- How I did it** + +**- How to verify it** + +**- Description for the changelog** + + + +**- A picture of a cute animal (not mandatory but encouraged)** + diff --git a/vendor/github.com/docker/docker/.gitignore b/vendor/github.com/docker/docker/.gitignore new file mode 100644 index 0000000..be8b03d --- /dev/null +++ b/vendor/github.com/docker/docker/.gitignore @@ -0,0 +1,33 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.exe +*.exe~ +*.orig +*.test +.*.swp +.DS_Store +# a .bashrc may be added to customize the build environment +.bashrc +.editorconfig +.gopath/ +.go-pkg-cache/ +autogen/ +bundles/ +cmd/dockerd/dockerd +cmd/docker/docker +dockerversion/version_autogen.go +dockerversion/version_autogen_unix.go +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +vendor/pkg/ diff --git a/vendor/github.com/docker/docker/.mailmap b/vendor/github.com/docker/docker/.mailmap new file mode 100644 index 0000000..fe99e20 --- /dev/null +++ b/vendor/github.com/docker/docker/.mailmap @@ -0,0 +1,275 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + +Vincent Demeester + +Vishnu Kannan +xlgao-zju xlgao +yuchangchun y00277921 + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + + + +Arnaud Porterie + +David M. Karr + + + +Kenfe-Mickaël Laventure + + + + + +Runshen Zhu +Tom Barlow +Xianlu Bird +Dan Feldman +Harry Zhang diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 0000000..246e2a3 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,1652 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Avilla +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Richards +amangoel +Amen Belayneh +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bilal Amarni +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Barch +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +GabrielNicolasAvellaneda +Galen Sampson +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +He Simei +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Starks +John Tims +John Warwick +John Willis +johnharris85 +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +kayrus +Ke Xu +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin L +Konstantin Pelykh +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +lalyos +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Laszlo Meszaros +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +lixiaobing10051267 +LIZAO LI +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +Mansi Nahar +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minar +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +pestophagous +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qg <1373319223@qq.com> +qhuang +Qiang Huang +qq690388648 <690388648@qq.com> +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean OMeara +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +tpng +tracylihui <793912329@qq.com> +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Tõnis Tiigi +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Xing +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yanqiang Miao +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yuan Sun +yuchangchun +yuchengxia +yuexiao-wang +YuPengZTE +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +zhouhao +Zhu Guihua +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/docker/docker/CHANGELOG.md b/vendor/github.com/docker/docker/CHANGELOG.md new file mode 100644 index 0000000..36bb880 --- /dev/null +++ b/vendor/github.com/docker/docker/CHANGELOG.md @@ -0,0 +1,3337 @@ +# Changelog + +Items starting with `DEPRECATE` are important deprecation notices. For more +information on the list of deprecated flags and APIs please have a look at +https://docs.docker.com/engine/deprecated/ where target removal dates can also +be found. + +## 1.13.1 (2017-02-08) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Contrib + +* Do not require a custom build of tini [#28454](https://github.com/docker/docker/pull/28454) +* Upgrade to Go 1.7.5 [#30489](https://github.com/docker/docker/pull/30489) + +### Remote API (v1.26) & Client + ++ Support secrets in docker stack deploy with compose file [#30144](https://github.com/docker/docker/pull/30144) + +### Runtime + +* Fix size issue in `docker system df` [#30378](https://github.com/docker/docker/pull/30378) +* Fix error on `docker inspect` when Swarm certificates were expired. [#29246](https://github.com/docker/docker/pull/29246) +* Fix deadlock on v1 plugin with activate error [#30408](https://github.com/docker/docker/pull/30408) +* Fix SELinux regression [#30649](https://github.com/docker/docker/pull/30649) + +### Plugins + +* Support global scoped network plugins (v2) in swarm mode [#30332](https://github.com/docker/docker/pull/30332) ++ Add `docker plugin upgrade` [#29414](https://github.com/docker/docker/pull/29414) + +### Windows + +* Fix small regression with old plugins in Windows [#30150](https://github.com/docker/docker/pull/30150) +* Fix warning on Windows [#30730](https://github.com/docker/docker/pull/30730) + +## 1.13.0 (2017-01-18) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Builder + ++ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) ++ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) +* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) +- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) ++ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) ++ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) +- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) +- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) +* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) +- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) ++ Allow `USER` in builder on Windows [#28415](https://github.com/docker/docker/pull/28415) ++ Handle env case-insensitive on Windows [#28725](https://github.com/docker/docker/pull/28725) + +### Contrib + ++ Add support for building docker debs for Ubuntu 16.04 Xenial on PPC64LE [#23438](https://github.com/docker/docker/pull/23438) ++ Add support for building docker debs for Ubuntu 16.04 Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) ++ Add support for building docker debs for Ubuntu 16.10 Yakkety Yak on PPC64LE [#28046](https://github.com/docker/docker/pull/28046) +- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) ++ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) +* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) ++ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) ++ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) ++ Add `make deb` support for aarch64 [#27625](https://github.com/docker/docker/pull/27625) + +### Distribution + +* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) + - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) + - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) + - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) + - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) + - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) + - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) + - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) + - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) +- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) +* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) + +### Logging + +* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) +- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) ++ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) ++ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) ++ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) ++ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) +* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) +- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) +- Fix an issue where `docker logs --tail` returned less lines than expected [#28203](https://github.com/docker/docker/pull/28203) +- Splunk Logging Driver: performance and reliability improvements [#26207](https://github.com/docker/docker/pull/26207) +- Splunk Logging Driver: configurable formats and skip for verifying connection [#25786](https://github.com/docker/docker/pull/25786) + +### Networking + ++ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) ++ Add support for host port PublishMode in services using the `--publish` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) and [#28943](https://github.com/docker/docker/pull/28943) ++ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) +* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) ++ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) +- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) +- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) +- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) ++ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) +- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) +- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) + +### Plugins + +- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) +- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) +* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) ++ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) ++ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) ++ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) +* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) +* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) +* Split `docker plugin install` into two API call `/privileges` and `/pull` [#28963](https://github.com/docker/docker/pull/28963) + +### Remote API (v1.25) & Client + ++ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) ++ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) ++ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) +* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) ++ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) ++ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) ++ Add `--env-file` flag to `docker service create` [#24844](https://github.com/docker/docker/pull/24844) ++ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) ++ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) ++ Add `--no-trunc` to service/node/stack ps output [#25337](https://github.com/docker/docker/pull/25337) ++ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) ++ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) +* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) ++ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) ++ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) +- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) ++ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) ++ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) +* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) ++ Add --cpus flag to control cpu resources for `docker run` and `docker create`, and add `NanoCPUs` to `HostConfig` [#27958](https://github.com/docker/docker/pull/27958) +- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) +* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) +- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) ++ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) +* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) +* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) ++ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) ++ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) ++ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) ++ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) ++ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) + +### Runtime + ++ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) ++ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) ++ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) ++ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) ++ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) ++ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) ++ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) ++ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) +* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) ++ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) +* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) +* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) +- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) +- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) +- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) +- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) +- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) ++ (experimental) Add metrics (Prometheus) output for basic `container`, `image`, and `daemon` operations [#25820](https://github.com/docker/docker/pull/25820) +- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) ++ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) ++ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) ++ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) ++ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) +- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) +- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) +* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) ++ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) +- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) +- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) ++ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) +* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) + +### Swarm Mode + ++ Add secret management [#27794](https://github.com/docker/docker/pull/27794) ++ Add support for templating service options (hostname, mounts, and environment variables) [#28025](https://github.com/docker/docker/pull/28025) +* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) +* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) +* Make `docker node ps` default to the current node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) ++ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) ++ Add `--health-*` and `--no-healthcheck` flags to `docker service create` and `docker service update` [#27369](https://github.com/docker/docker/pull/27369) ++ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) +* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) +- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) +- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) +* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) ++ Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) ++ Add `--host` to `docker service create`, and `--host-add`, `--host-rm` to `docker service update` [#28031](https://github.com/docker/docker/pull/28031) ++ Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) +* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) +* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) ++ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) +- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) ++ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) ++ Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) +* Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) ++ Add options to customize Raft snapshots (`--max-snapshots`, `--snapshot-interval`) [#27997](https://github.com/docker/docker/pull/27997) +- Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) ++ Swarm-mode support for Windows [#27838](https://github.com/docker/docker/pull/27838) ++ Allow hostname to be updated on service [#28771](https://github.com/docker/docker/pull/28771) ++ Support v2 plugins [#29433](https://github.com/docker/docker/pull/29433) ++ Add content trust for services [#29469](https://github.com/docker/docker/pull/29469) + +### Volume + ++ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270) ++ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) +* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) +* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) +* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) + +### Security + +- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) +- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) + +### DEPRECATION + +- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) +- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) +- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) +- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) +- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455) +- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) +- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) +- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) +- Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) +- Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533) +- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437) + +## 1.12.6 (2017-01-10) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix runC privilege escalation (CVE-2016-9962) + +## 1.12.5 (2016-12-15) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix race on sending stdin close event [#29424](https://github.com/docker/docker/pull/29424) + +### Networking + +- Fix panic in docker network ls when a network was created with `--ipv6` and no ipv6 `--subnet` in older docker versions [#29416](https://github.com/docker/docker/pull/29416) + +### Contrib + +- Fix compilation on Darwin [#29370](https://github.com/docker/docker/pull/29370) + +## 1.12.4 (2016-12-12) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix issue where volume metadata was not removed [#29083](https://github.com/docker/docker/pull/29083) +- Asynchronously close streams to prevent holding container lock [#29050](https://github.com/docker/docker/pull/29050) +- Fix selinux labels for newly created container volumes [#29050](https://github.com/docker/docker/pull/29050) +- Remove hostname validation [#28990](https://github.com/docker/docker/pull/28990) +- Fix deadlocks caused by IO races [#29095](https://github.com/docker/docker/pull/29095) [#29141](https://github.com/docker/docker/pull/29141) +- Return an empty stats if the container is restarting [#29150](https://github.com/docker/docker/pull/29150) +- Fix volume store locking [#29151](https://github.com/docker/docker/pull/29151) +- Ensure consistent status code in API [#29150](https://github.com/docker/docker/pull/29150) +- Fix incorrect opaque directory permission in overlay2 [#29093](https://github.com/docker/docker/pull/29093) +- Detect plugin content and error out on `docker pull` [#29297](https://github.com/docker/docker/pull/29297) + +### Swarm Mode + +* Update Swarmkit [#29047](https://github.com/docker/docker/pull/29047) + - orchestrator/global: Fix deadlock on updates [docker/swarmkit#1760](https://github.com/docker/swarmkit/pull/1760) + - on leader switchover preserve the vxlan id for existing networks [docker/swarmkit#1773](https://github.com/docker/swarmkit/pull/1773) +- Refuse swarm spec not named "default" [#29152](https://github.com/docker/docker/pull/29152) + +### Networking + +* Update libnetwork [#29004](https://github.com/docker/docker/pull/29004) [#29146](https://github.com/docker/docker/pull/29146) + - Fix panic in embedded DNS [docker/libnetwork#1561](https://github.com/docker/libnetwork/pull/1561) + - Fix unmarhalling panic when passing --link-local-ip on global scope network [docker/libnetwork#1564](https://github.com/docker/libnetwork/pull/1564) + - Fix panic when network plugin returns nil StaticRoutes [docker/libnetwork#1563](https://github.com/docker/libnetwork/pull/1563) + - Fix panic in osl.(*networkNamespace).DeleteNeighbor [docker/libnetwork#1555](https://github.com/docker/libnetwork/pull/1555) + - Fix panic in swarm networking concurrent map read/write [docker/libnetwork#1570](https://github.com/docker/libnetwork/pull/1570) + * Allow encrypted networks when running docker inside a container [docker/libnetwork#1502](https://github.com/docker/libnetwork/pull/1502) + - Do not block autoallocation of IPv6 pool [docker/libnetwork#1538](https://github.com/docker/libnetwork/pull/1538) + - Set timeout for netlink calls [docker/libnetwork#1557](https://github.com/docker/libnetwork/pull/1557) + - Increase networking local store timeout to one minute [docker/libkv#140](https://github.com/docker/libkv/pull/140) + - Fix a panic in libnetwork.(*sandbox).execFunc [docker/libnetwork#1556](https://github.com/docker/libnetwork/pull/1556) + - Honor icc=false for internal networks [docker/libnetwork#1525](https://github.com/docker/libnetwork/pull/1525) + +### Logging + +* Update syslog log driver [#29150](https://github.com/docker/docker/pull/29150) + +### Contrib + +- Run "dnf upgrade" before installing in fedora [#29150](https://github.com/docker/docker/pull/29150) +- Add build-date back to RPM packages [#29150](https://github.com/docker/docker/pull/29150) +- deb package filename changed to include distro to distinguish between distro code names [#27829](https://github.com/docker/docker/pull/27829) + +## 1.12.3 (2016-10-26) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) +- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) +- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) +* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) +* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) +- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) +- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) + +### Swarm Mode + +- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) +* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) + * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) + * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) + * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) + - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) + +### Networking + +* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) + - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) + - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) + * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) + - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) + - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) + +### Logging + +* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) + +### Contrib + +* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) +* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) + +## 1.12.2 (2016-10-11) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) +* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) +* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) +- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) +- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) +- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) +- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) + +### Networking + +- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) +* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) + * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) + - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) + * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) + * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) + * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) + * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) + * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) + - Disable service discovery in ingress network [docker/libnetwork#1489](https://github.com/docker/libnetwork/pull/1489) + +### Swarm Mode + +* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) +* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) + * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) + - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) + - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) + * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) + - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) + - Do not allow service creation on ingress network [docker/swarmkit#1600](https://github.com/docker/swarmkit/pull/1600) + +### Contrib + +* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) +* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) +- Fix installation on debian stretch [#27184](https://github.com/docker/docker/pull/27184) + +### Windows + +- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) + +## 1.12.1 (2016-08-18) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Client + +* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) +- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) +- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) +- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) +- Remove `service update --network-add` and `service update --network-rm` flags + because this feature is not yet implemented in 1.12, but was inadvertently added + to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) + +### Contrib + ++ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) +- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) + +### Networking + +- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) +- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) +- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) +- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) + +### Plugins (experimental) + +* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) +* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) +- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) +- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) + +### Runtime + +* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) +- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) +- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) + +### Security + +* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) +* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) + +### Swarm + +- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) +- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) +- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) +- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) +- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) +- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) +- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) +- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) +- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) + +### Volume + +- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) +- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) +- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) +- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) + +## 1.12.0 (2016-07-28) + + +**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two +additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for +installing docker, please make sure to update them accordingly. + +### Builder + ++ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) ++ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) ++ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) ++ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) +* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) +* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) +* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) +- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) + +### Contrib + +* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) +- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) + +### Distribution + ++ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) +* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) +* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) +* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) + +### Logging + ++ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) ++ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) ++ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) ++ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) +* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) +* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) + +### Networking + ++ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) ++ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) ++ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) ++ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) ++ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) ++ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) ++ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) ++ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) +* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) +* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) +* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) +- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) +- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) + +### Plugins (experimental) + ++ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) + +### Remote API (v1.24) & Client + ++ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) ++ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) ++ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) ++ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) ++ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) ++ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) ++ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) ++ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) +* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) +- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) +- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) +- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) +- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) +- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) + +### Runtime + ++ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) ++ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) ++ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) ++ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) ++ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) ++ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) ++ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) ++ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) ++ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) ++ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) ++ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) ++ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) ++ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) ++ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) ++ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) ++ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) +* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) +* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) +- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) +- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) +- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) +- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) +- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) +- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) +- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) +- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) +- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) +- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) +- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) +- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) +- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) + +### Swarm Mode + ++ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) ++ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) + +### Volume + ++ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) ++ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) ++ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) +* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) +- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) +- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) + + +### DEPRECATION +* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed + to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) +* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) +* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) +* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) +* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) +* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) +* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) + +## 1.11.2 (2016-05-31) + +### Networking + +- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) +- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) + +### Runtime + +- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) +- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) +- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) +- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) +- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) +- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) +- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) +- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) +- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) + + +## 1.11.1 (2016-04-26) + +### Distribution + +- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) + +### Documentation + ++ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) + +### Builder + +* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) + +### Networking + +- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) +- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) + +### Runtime + +- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) +- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) +- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) +- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) +- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) +- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) +- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) +- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) +- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) +- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) +- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` +- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) +- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) + +## 1.11.0 (2016-04-13) + +**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. + +### Builder + +- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) +- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) + +### Client + +* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) ++ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) +* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) +* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) +- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) +- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) +* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) +- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) ++ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) ++ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) +* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) +* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) +- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) +* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) +* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) +- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) +* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) + +### Distribution + +- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) +- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) ++ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) ++ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) +* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) +* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) +* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) +* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) +- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) +- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) + +### Logging + +- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) +* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) +* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) +* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) ++ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) +* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) ++ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) ++ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) + + +### Misc + ++ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) ++ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) ++ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) +* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) +- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) +- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) +* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) +* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) +* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) +* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) ++ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) ++ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) + +### Networking + +- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) +- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) +* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) ++ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) +* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) +- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) +* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) ++ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) +* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) +- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) +* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) +- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) +- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) +- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) +- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) +- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) +- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) +- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) +- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) + +### Plugins + +- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) +- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) + +### Runtime + +- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) +- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) +- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) +- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) + Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. ++ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) ++ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) ++ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) +* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) +* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) +- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) +- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) +- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) +- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) +- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) +- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) +* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) +- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) +* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) ++ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) +- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) ++ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) +- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) ++ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) ++ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) +- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) +* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) +- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) +* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) +* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) ++ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) +- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) +- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) +- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) +- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) + +### Security + +* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) +* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) +* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) +* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) +* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) + +### Volumes + +* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) +* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) +- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) ++ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) + +## 1.10.3 (2016-03-10) + +### Runtime + +- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) +- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) + +### Distribution + +- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) +- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) + +### Plugin system + +- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) +- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) +- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) + +### Security + +- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) + It was due to the `personality` syscall being blocked by the default seccomp profile. +- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) + It was due to the `ipc` syscall being blocked by the default seccomp profile. +- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) +- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) + +## 1.10.2 (2016-02-22) + +### Runtime + +- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) +- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) +- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) +- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) +- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) + +### Distribution + +- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) +- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) +- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) +- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) + +### Networking + +- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) + +### Volumes + +- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) + +### Security + +- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) + +## 1.10.1 (2016-02-11) + +### Runtime + +* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) +- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) +- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) +- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) +- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) + +### Security + +- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) + +### Distribution + +* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) +- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) + +### Networking + +- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) +- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) +- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) +- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) + +### Logging + +- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) + +### Volumes + +- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) + +### Misc + +- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) + +## 1.10.0 (2016-02-04) + +**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. +A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. +Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. +Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ + +### Runtime + ++ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) ++ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) ++ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) ++ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) ++ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) + This change is backward compatible in the API, but not on the CLI. ++ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) ++ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) ++ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) ++ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) ++ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) ++ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) ++ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) ++ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) ++ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) ++ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) ++ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) +* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) +* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) +* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) +* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) +* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) +* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) +* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) +* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) +- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) +- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) +- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) +- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) +- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) +- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) +- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) + +### Security + ++ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) ++ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) ++ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) ++ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) ++ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) + This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. + Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. +* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) + +### Distribution + +* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) + Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + Images no longer depend on the parent chain but contain a list of layer references. + `docker load`/`docker save` tarballs now also contain content-addressable image configurations. + For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) +* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) +* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) +- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) + +### Networking + ++ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) ++ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) ++ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) ++ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) ++ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) ++ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) ++ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) ++ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) ++ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) ++ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) ++ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) +* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) +* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) +* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) +* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) +* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) +* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) +* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) +* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) +- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) +- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) +- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) +- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) + +### Logging + ++ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) ++ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) +* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) +* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) + +### Volumes + ++ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) +* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) + Existing plugins need to make use of these new APIs to satisfy users' expectation + For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) +- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) +- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) +- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) +- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) +- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) + +### Builder + ++ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) +- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) +- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) + +### Client + ++ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) +- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) + +### Misc + +* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) + +### Deprecations + +* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) +* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) +* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) +* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) +* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) +* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) + +## 1.9.1 (2015-11-21) + +### Runtime + +- Do not prevent daemon from booting if images could not be restored (#17695) +- Force IPC mount to unmount on daemon shutdown/init (#17539) +- Turn IPC unmount errors into warnings (#17554) +- Fix `docker stats` performance regression (#17638) +- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) +- Fix seldom panics (#17639, #17634, #17703) +- Fix opq whiteouts problems for files with dot prefix (#17819) +- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) +- devicemapper: fix displayed fs in docker info (#17974) +- selinux: only relabel if user requested so with the `z` option (#17450, #17834) +- Do not make network calls when normalizing names (#18014) + +### Client + +- Fix `docker login` on windows (#17738) +- Fix bug with `docker inspect` output when not connected to daemon (#17715) +- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) + +### Builder + +- Fix regression with symlink behavior in ADD/COPY (#17710) + +### Networking + +- Allow passing a network ID as an argument for `--net` (#17558) +- Fix connect to host and prevent disconnect from host for `host` network (#17476) +- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is + not the first block in the network (#17853) +- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) +- Allow port-mapping only for endpoints created on docker run (#17858) +- Fixed an endpoint delete issue with a possible stale sbox (#18102) + +### Distribution + +- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) + +## 1.9.0 (2015-11-03) + +### Runtime + ++ `docker stats` now returns block IO metrics (#15005) ++ `docker stats` now details network stats per interface (#15786) ++ Add `ancestor=` filter to `docker ps --filter` flag to filter +containers based on their ancestor images (#14570) ++ Add `label=` filter to `docker ps --filter` to filter containers +based on label (#16530) ++ Add `--kernel-memory` flag to `docker run` (#14006) ++ Add `--message` flag to `docker import` allowing to specify an optional +message (#15711) ++ Add `--privileged` flag to `docker exec` (#14113) ++ Add `--stop-signal` flag to `docker run` allowing to replace the container +process stopping signal (#15307) ++ Add a new `unless-stopped` restart policy (#15348) ++ Inspecting an image now returns tags (#13185) ++ Add container size information to `docker inspect` (#15796) ++ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) +- Remove the deprecated `/container/ps` endpoint from the API (#15972) +- Send and document correct HTTP codes for `/exec//start` (#16250) +- Share shm and mqueue between containers sharing IPC namespace (#15862) +- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) +- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted +with `ro` option (#14965) +- Improve `rmi` performance (#16890) +- Do not update /etc/hosts for the default bridge network, except for links (#17325) +- Fix conflict with duplicate container names (#17389) +- Fix an issue with incorrect template execution in `docker inspect` (#17284) +- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) + +### Client + ++ Allow `docker import` to import from local files (#11907) + +### Builder + ++ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different +stop-signal for the container process (#15307) ++ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` +that allows to add build-time environment variables (#15182) +- Improve cache miss performance (#16890) + +### Storage + +- devicemapper: Implement deferred deletion capability (#16381) + +## Networking + ++ `docker network` exits experimental and is part of standard release (#16645) ++ New network top-level concept, with associated subcommands and API (#16645) + WARNING: the API is different from the experimental API ++ Support for multiple isolated/micro-segmented networks (#16645) ++ Built-in multihost networking using VXLAN based overlay driver (#14071) ++ Support for third-party network plugins (#13424) ++ Ability to dynamically connect containers to multiple networks (#16645) ++ Support for user-defined IP address management via pluggable IPAM drivers (#16910) ++ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) ++ Add `--cluster-store-opt` for setting up TLS settings (#16644) ++ Add `--dns-opt` to the daemon (#16031) +- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + +### Volumes + ++ New top-level `volume` subcommand and API (#14242) +- Move API volume driver settings to host-specific config (#15798) +- Print an error message if volume name is not unique (#16009) +- Ensure volumes created from Dockerfiles always use the local volume driver +(#15507) +- DEPRECATE auto-creating missing host paths for bind mounts (#16349) + +### Logging + ++ Add `awslogs` logging driver for Amazon CloudWatch (#15495) ++ Add generic `tag` log option to allow customizing container/image +information passed to driver (e.g. show container names) (#15384) +- Implement the `docker logs` endpoint for the journald driver (#13707) +- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + +### Distribution + ++ `docker search` now works with partial names (#16509) +- Push optimization: avoid buffering to file (#15493) +- The daemon will display progress for images that were already being pulled +by another client (#15489) +- Only permissions required for the current action being performed are requested (#) ++ Renaming trust keys (and respective environment variables) from `offline` to +`root` and `tagging` to `repository` (#16894) +- DEPRECATE trust key environment variables +`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and +`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + +### Security + ++ Add SELinux profiles to the rpm package (#15832) +- Fix various issues with AppArmor profiles provided in the deb package +(#14609) +- Add AppArmor policy that prevents writing to /proc (#15571) + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +* Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover options to json-file log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi` of dangling images safe while pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + +## 1.7.1 (2015-07-14) + +#### Runtime + +- Fix default user spawning exec process with `docker exec` +- Make `--bridge=none` not to configure the network bridge +- Publish networking stats properly +- Fix implicit devicemapper selection with static binaries +- Fix socket connections that hung intermittently +- Fix bridge interface creation on CentOS/RHEL 6.6 +- Fix local dns lookups added to resolv.conf +- Fix copy command mounting volumes +- Fix read/write privileges in volumes mounted with --volumes-from + +#### Remote API + +- Fix unmarshalling of Command and Entrypoint +- Set limit for minimum client version supported +- Validate port specification +- Return proper errors when attach/reattach fail + +#### Distribution + +- Fix pulling private images +- Fix fallback between registry V2 and V1 + +## 1.7.0 (2015-06-16) + +#### Runtime ++ Experimental feature: support for out-of-process volume plugins +* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag +* The `exec` command supports the `-u|--user` flag to specify the new process owner ++ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags ++ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` ++ Container block IO can be controlled in `docker run` using`--blkio-weight` ++ ZFS support ++ The `docker logs` command supports a `--since` argument ++ UTS namespace can be shared with the host with `docker run --uts=host` + +#### Quality +* Networking stack was entirely rewritten as part of the libnetwork effort +* Engine internals refactoring +* Volumes code was entirely rewritten to support the plugins effort ++ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + +#### Build ++ Support ${variable:-value} and ${variable:+value} syntax for environment variables ++ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` ++ git context changes with branches and directories +* The .dockerignore file support exclusion rules + +#### Distribution ++ Client support for v2 mirroring support for the official registry + +#### Bugfixes +* Firewalld is now supported and will automatically be used when available +* mounting --device recursively + +## 1.6.2 (2015-05-13) + +#### Runtime +- Revert change prohibiting mounting into /sys + +## 1.6.1 (2015-05-07) + +#### Security +- Fix read/write /proc paths (CVE-2015-3630) +- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) +- Fix opening of file-descriptor 1 (CVE-2015-3627) +- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) +- Prohibit mount of /sys + +#### Runtime +- Update AppArmor policy to not allow mounts + +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + +## 1.5.0 (2015-02-10) + +#### Builder ++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag +* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache +* ADD and COPY instructions accept relative paths +* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier +* Improve performance when exposing a large number of ports + +#### Hack ++ Allow client-side only integration tests for Windows +* Include docker-py integration tests against Docker daemon as part of our test suites + +#### Packaging ++ Support for the new version of the registry HTTP API +* Speed up `docker push` for images with a majority of already existing layers +- Fixed contacting a private registry through a proxy + +#### Remote API ++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command ++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command +* Container `inspect` endpoint show the ID of `exec` commands running in this container +* Container `inspect` endpoint show the number of times Docker auto-restarted the container +* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' +- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes + +#### Runtime ++ Docker daemon has full IPv6 support ++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools ++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted ++ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag +* Major stability improvements for devicemapper storage driver +* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted +* Better integration with host system: per-container iptable rules are moved to the DOCKER chain +- Fixed container exiting on out of memory to return an invalid exit code + +#### Other +* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon + +## 1.4.1 (2014-12-15) + +#### Runtime +- Fix issue with volumes-from and bind mounts not being honored after create + +## 1.4.0 (2014-12-11) + +#### Notable Features since 1.3.0 ++ Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag ++ Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` ++ New Overlayfs Storage Driver ++ `docker info` now returns an `ID` and `Name` field ++ Filter events by event name, container, or image ++ `docker cp` now supports copying from container volumes +- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgreSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OS X +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshalling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed Fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OS X compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditions +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where an HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Don't save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md new file mode 100644 index 0000000..eb5f8ab --- /dev/null +++ b/vendor/github.com/docker/docker/CONTRIBUTING.md @@ -0,0 +1,401 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/docker/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/docker/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in +the contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+

+ Read our IRC quickstart guide + for an easy way to get started. +

+
Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 17000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile new file mode 100644 index 0000000..ce2d702 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile @@ -0,0 +1,246 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Add zfs ppa +RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 \ + || apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 +RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + binutils-mingw-w64 \ + bsdmainutils \ + btrfs-tools \ + build-essential \ + clang \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + gcc-mingw-w64 \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + net-tools \ + pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + ubuntu-zfs \ + xfsprogs \ + vim-common \ + libzfs-dev \ + tar \ + zip \ + --no-install-recommends \ + && pip install awscli==1.10.15 +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Configure the container for OSX cross compilation +ENV OSX_SDK MacOSX10.11.sdk +ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953 +RUN set -x \ + && export OSXCROSS_PATH="/osxcross" \ + && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ + && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ + && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ + && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh +ENV PATH /osxcross/target/bin:$PATH + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 \ + freebsd/amd64 freebsd/386 freebsd/arm \ + windows/amd64 windows/386 \ + solaris/amd64 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install CRIU for checkpoint/restore support +ENV CRIU_VERSION 2.2 +RUN mkdir -p /usr/src/criu \ + && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \ + && cd /usr/src/criu \ + && make \ + && make install-criu + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc +# Add integration helps to bashrc +RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \ + busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ + debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \ + hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.aarch64 b/vendor/github.com/docker/docker/Dockerfile.aarch64 new file mode 100644 index 0000000..6112f80 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.aarch64 @@ -0,0 +1,175 @@ +# This file describes the standard way to build Docker on aarch64, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.aarch64 . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM aarch64/ubuntu:wily + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + g++ \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libc6-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-dev \ + mercurial \ + net-tools \ + parallel \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + gccgo \ + iproute2 \ + iputils-ping \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support aarch64 properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# We don't have official binary tarballs for ARM64, eigher for Go or bootstrap, +# so we use gccgo as bootstrap to build Go from source code. +# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because +# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH +ENV GOPATH /go + +# Only install one version of the registry, because old version which support +# schema1 manifests is not working on ARM64, we should skip integration-cli +# tests for schema1 manifests on ARM64. +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \ + aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \ + aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \ + aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.armhf b/vendor/github.com/docker/docker/Dockerfile.armhf new file mode 100644 index 0000000..1aebc16 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.armhf @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on ARMv7, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.armhf . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + cmake \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends \ + && pip install awscli==1.10.15 + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \ + armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ + armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \ + armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.ppc64le b/vendor/github.com/docker/docker/Dockerfile.ppc64le new file mode 100644 index 0000000..1f9f500 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.ppc64le @@ -0,0 +1,188 @@ +# This file describes the standard way to build Docker on ppc64le, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.ppc64le . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ppc64le/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support ppc64le properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + + +# Install Go +# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \ + ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ + ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \ + ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.s390x b/vendor/github.com/docker/docker/Dockerfile.s390x new file mode 100644 index 0000000..ba94bc7 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.s390x @@ -0,0 +1,190 @@ +# This file describes the standard way to build Docker on s390x, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.s390x . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM s390x/gcc:6.1 + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# glibc in Debian has a bug specific to s390x that won't be fixed until Debian 8.6 is released +# - https://github.com/docker/docker/issues/24748 +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=890b7a4b33d482b5c768ab47d70758b80227e9bc +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=2e807f29595eb5b1e5d0decc6e356a3562ecc58e +RUN echo 'deb http://httpredir.debian.org/debian jessie-proposed-updates main' >> /etc/apt/sources.list.d/pu.list \ + && apt-get update \ + && apt-get install -y libc6 \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support s390x properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux seccomp + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \ + s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ + s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \ + s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple new file mode 100644 index 0000000..8eeb3d9 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.simple @@ -0,0 +1,73 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + build-essential \ + curl \ + cmake \ + gcc \ + git \ + libapparmor-dev \ + libdevmapper-dev \ + libsqlite3-dev \ + \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xfsprogs \ + xz-utils \ + \ + aufs-tools \ + vim-common \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go +ENV CGO_LDFLAGS -L/lib + +# Install runc, containerd, tini and docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh runc containerd tini proxy + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.solaris b/vendor/github.com/docker/docker/Dockerfile.solaris new file mode 100644 index 0000000..bb342e5 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.solaris @@ -0,0 +1,20 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +ENV DOCKER_CROSSPLATFORMS solaris/amd64 +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.windows b/vendor/github.com/docker/docker/Dockerfile.windows new file mode 100644 index 0000000..652d072 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.windows @@ -0,0 +1,267 @@ +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016 or Windows 10. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major +# build number must be at least 14393. This can be confirmed, for example, by +# running the following from an elevated PowerShell prompt - this sample output +# is from a fully up to date machine as at mid-November 2016: +# +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md +# +# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server +# containers as the default option, it is recommended you have at least 1GB +# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you +# should have at least 4GB of memory assigned. Note also, to run Hyper-V +# containers in a VM, it is necessary to configure the VM for nested virtualization. + +# ----------------------------------------------------------------------------------------- + + +# Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker +# +# +# 3. Build a docker image with the components required to build the docker binaries from source +# by running one of the following: +# +# >> docker build -t nativebuildimage -f Dockerfile.windows . +# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) +# +# +# 4. Build the docker executable binaries by running one of the following: +# +# >> docker run --name binaries nativebuildimage hack\make.ps1 -Binary +# >> docker run --name binaries -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) +# +# +# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage +# + +# ----------------------------------------------------------------------------------------- + + +# The validation tests can either run in a container, or directly on the host. To run in a +# container, ensure you have created the nativebuildimage above. Then run one of the +# following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat (if using Hyper-V containers) + +# To run the validation tests on the host, from the root of the repository, run the +# following from a Windows PowerShell prompt (elevation is not required): (Note Go +# must be installed to run these tests) +# +# >> hack\make.ps1 -DCO -PkgImports -GoFormat + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests, ensure you have created the nativebuildimage above. Then run one of +# the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) + + +# ----------------------------------------------------------------------------------------- + + +# To run all tests and binary build, ensure you have created the nativebuildimage above. Then +# run one of the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run nativebuildimage hack\make.ps1 -All +# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) + +# ----------------------------------------------------------------------------------------- + + +# Important notes: +# --------------- +# +# Don't attempt to use a bind-mount to pass a local directory as the bundles target +# directory. It does not work (golang attempts for follow a mapped folder incorrectly). +# Instead, use docker cp as per the example. +# +# go.zip is not removed from the image as it is used by the Windows CI servers +# to ensure the host and image are running consistent versions of go. +# +# Nanoserver support is a work in progress. Although the image will build if the +# FROM statement is updated, it will not work when running autogen through hack\make.ps1. +# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +# quit due to the use of console hooks which are not available. +# +# The docker integration tests do not currently run in a container on Windows, predominantly +# due to Windows not supporting privileged mode, so anything using a volume would fail. +# They (along with the rest of the docker CI suite) can be run using +# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. +# +# ----------------------------------------------------------------------------------------- + + +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-command"] + +# Environment variable notes: +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. +# - FROM_DOCKERFILE is used for detection of building within a container. +ENV GO_VERSION=1.7.5 ` + GIT_VERSION=2.11.0 ` + GOPATH=C:\go ` + FROM_DOCKERFILE=1 + +RUN ` + $ErrorActionPreference = 'Stop'; ` + $ProgressPreference = 'SilentlyContinue'; ` + ` + Function Test-Nano() { ` + $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` + return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` + }` + ` + Function Download-File([string] $source, [string] $target) { ` + if (Test-Nano) { ` + $handler = New-Object System.Net.Http.HttpClientHandler; ` + $client = New-Object System.Net.Http.HttpClient($handler); ` + $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` + $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` + $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` + $responseMsg.Wait(); ` + if (!$responseMsg.IsCanceled) { ` + $response = $responseMsg.Result; ` + if ($response.IsSuccessStatusCode) { ` + $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` + $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` + $copyStreamOp.Wait(); ` + $downloadedFileStream.Close(); ` + if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` + } ` + } else { ` + Throw ("Failed to download " + $source) ` + }` + } else { ` + $webClient = New-Object System.Net.WebClient; ` + $webClient.DownloadFile($source, $target); ` + } ` + } ` + ` + setx /M PATH $('C:\git\bin;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + Write-Host INFO: Downloading git...; ` + $location='https://github.com/git-for-windows/git/releases/download/v'+$env:GIT_VERSION+'.windows.1/PortableGit-'+$env:GIT_VERSION+'-64-bit.7z.exe'; ` + Download-File $location C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` + ` + Write-Host INFO: Installing PS7Zip package...; ` + Install-Package PS7Zip -Force | Out-Null; ` + Write-Host INFO: Importing PS7Zip...; ` + Import-Module PS7Zip -Force; ` + New-Item C:\git -ItemType Directory | Out-Null ; ` + cd C:\git; ` + Write-Host INFO: Extracting git...; ` + Expand-7Zip C:\gitsetup.7z.exe | Out-Null; ` + cd C:\; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler 1 of 3...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 2 of 3...; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 3 of 3...; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` + Write-Host INFO: Removing downloaded files...; ` + Remove-Item C:\gcc.zip; ` + Remove-Item C:\runtime.zip; ` + Remove-Item C:\binutils.zip; ` + Remove-Item C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Creating source directory...; ` + New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` + ` + Write-Host INFO: Configuring git core.autocrlf...; ` + C:\git\bin\git config --global core.autocrlf true; ` + ` + Write-Host INFO: Completed + +# Make PowerShell the default entrypoint +ENTRYPOINT ["powershell.exe"] + +# Set the working directory to the location of the sources +WORKDIR C:\go\src\github.com\docker\docker + +# Copy the sources into the container +COPY . . diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 0000000..8f3fee6 --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS new file mode 100644 index 0000000..39bb8c1 --- /dev/null +++ b/vendor/github.com/docker/docker/MAINTAINERS @@ -0,0 +1,376 @@ +# Docker maintainers file +# +# This file describes who runs the docker/docker project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + + people = [ + "aaronlehmann", + "akihirosuda", + "aluzzardi", + "anusha", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "estesp", + "icecrime", + "jhowardmsft", + "justincormack", + "lk4d4", + "mavenugo", + "mhbauer", + "mlaventure", + "mrjana", + "runcom", + "stevvooe", + "tianon", + "tibor", + "tonistiigi", + "unclejack", + "vdemeester", + "vieux" + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "jamtur01", + "misty", + "sven", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "aboch", + "andrewhsu", + "ehazlett", + "mgoelzer", + "programmerq", + "thajeztah" + ] + + [Org.Alumni] + + # This list contains maintainers that are no longer active on the project. + # It is thanks to these people that the project has become what it is today. + # Thank you! + + people = [ + # David Calavera contributed many features to Docker, such as an improved + # event system, dynamic configuration reloading, volume plugins, fancy + # new templating options, and an external client credential store. As a + # maintainer, David was release captain for Docker 1.8, and competing + # with Jess Frazelle to be "top dream killer". + # David is now doing amazing stuff as CTO for https://www.netlify.com, + # and tweets as @calavera. + "calavera", + + # As a maintainer, Erik was responsible for the "builder", and + # started the first designs for the new networking model in + # Docker. Erik is now working on all kinds of plugins for Docker + # (https://github.com/contiv) and various open source projects + # in his own repository https://github.com/erikh. You may + # still stumble into him in our issue tracker, or on IRC. + "erikh", + + # Jessica Frazelle, also known as the "Keyser Söze of containers", + # runs *everything* in containers. She started contributing to + # Docker with a (fun fun) change involving both iptables and regular + # expressions (coz, YOLO!) on July 10, 2014 + # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. + # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed + # many features and improvement, among which "seccomp profiles" (making + # containers a lot more secure). Besides being a maintainer, she + # set up the CI infrastructure for the project, giving everyone + # something to shout at if a PR failed ("noooo Janky!"). + # Jess is currently working on the DCOS security team at Mesosphere, + # and contributing to various open source projects. + # Be sure you don't miss her talks at a conference near you (a must-see), + # read her blog at https://blog.jessfraz.com (a must-read), and + # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). + "jessfraz", + + # As a docs maintainer, Mary Anthony contributed greatly to the Docker + # docs. She wrote the Docker Contributor Guide and Getting Started + # Guides. She helped create a doc build system independent of + # docker/docker project, and implemented a new docs.docker.com theme and + # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub + # public repositories was originally referenced in + # maryatdocker/docker-whale back in May 2015. + "moxiegirl", + + # Vincent "vbatts!" Batts made his first contribution to the project + # in November 2013, to become a maintainer a few months later, on + # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). + # As a maintainer, Vincent made important contributions to core elements + # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). + # He also contributed the "tar-split" library, an important element + # for the content-addressable store. + # Vincent is currently a member of the Open Containers Initiative + # Technical Oversight Board (TOB), besides his work at Red Hat and + # Project Atomic. You can still find him regularly hanging out in + # our repository and the #docker-dev and #docker-maintainers IRC channels + # for a chat, as he's always a lot of fun. + "vbatts", + + # Vishnu became a maintainer to help out on the daemon codebase and + # libcontainer integration. He's currently involved in the + # Open Containers Initiative, working on the specifications, + # besides his work on cAdvisor and Kubernetes for Google. + "vishh" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + + [people.anusha] + Name = "Anusha Ragunathan" + Email = "anusha@docker.com" + GitHub = "anusha-ragunathan" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.coolljt0725] + Name = "Lei Jitang" + Email = "leijitang@huawei.com" + GitHub = "coolljt0725" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "arnaud@docker.com" + GitHub = "icecrime" + + [people.jamtur01] + Name = "James Turnbull" + Email = "james@lovedthanlost.net" + GitHub = "jamtur01" + + [people.jhowardmsft] + Name = "John Howard" + Email = "jhoward@microsoft.com" + GitHub = "jhowardmsft" + + [people.jessfraz] + Name = "Jessie Frazelle" + Email = "jess@linux.com" + GitHub = "jessfraz" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" + + [people.mgoelzer] + Name = "Mike Goelzer" + Email = "mike.goelzer@docker.com" + GitHub = "mgoelzer" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mstanleyjones" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "runcom@redhat.com" + GitHub = "runcom" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile new file mode 100644 index 0000000..81bde6b --- /dev/null +++ b/vendor/github.com/docker/docker/Makefile @@ -0,0 +1,147 @@ +.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win + +# set the graph driver as the current graphdriver if not set +DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) + +# get OS/Arch of docker engine +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}') +DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') + +# env vars passed through directly to Docker's build scripts +# to allow things like `make KEEPBUNDLE=1 binary` easily +# `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILD_APT_MIRROR \ + -e BUILDFLAGS \ + -e KEEPBUNDLE \ + -e DOCKER_BUILD_ARGS \ + -e DOCKER_BUILD_GOGC \ + -e DOCKER_BUILD_PKGS \ + -e DOCKER_DEBUG \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GITCOMMIT \ + -e DOCKER_GRAPHDRIVER=$(DOCKER_GRAPHDRIVER) \ + -e DOCKER_INCREMENTAL_BINARY \ + -e DOCKER_PORT \ + -e DOCKER_REMAP_ROOT \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e http_proxy \ + -e https_proxy \ + -e no_proxy +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + +# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. +# The volume will be cleaned up when the container is removed due to `--rm`. +# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. +DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) + +# enable .go-pkg-cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set +PKGCACHE_DIR := $(if $(PKGCACHE_DIR),$(PKGCACHE_DIR),.go-pkg-cache) +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(CURDIR)/$(PKGCACHE_DIR)/\1"@g'),$(DOCKER_MOUNT)) + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) +DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) + +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) +BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) +export BUILD_APT_MIRROR + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + +default: binary + +all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' + +binary: build ## build the linux binaries + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +build: bundles init-go-pkg-cache + docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . + +bundles: + mkdir bundles + +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross + +deb: build ## build the deb packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb + + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +init-go-pkg-cache: + mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g') + +install: ## install the linux binaries + KEEPBUNDLE=1 hack/make.sh install-binary + +manpages: ## Generate man pages from go source and markdown + docker build -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man + docker run --rm \ + -v $(PWD):/go/src/github.com/docker/docker/ \ + docker-manpage-dev + +rpm: build ## build the rpm packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm + +run: build ## run the docker daemon in a container + $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" + +shell: build ## start a shell inside the build env + $(DOCKER_RUN_DOCKER) bash + +test: build ## run the unit, integration and docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py + +test-docker-py: build ## run the docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py + +test-integration-cli: build ## run the integration tests + $(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli + +test-unit: build ## run the unit tests + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz + +validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor + $(DOCKER_RUN_DOCKER) hack/validate/all + +win: build ## cross build the binary for windows + $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger:0.7.4 diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 0000000..8a37c1c --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md new file mode 100644 index 0000000..0b33bdc --- /dev/null +++ b/vendor/github.com/docker/docker/README.md @@ -0,0 +1,304 @@ +Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) +============================ + +Docker is an open source project to pack, ship and run any application +as a lightweight container. + +Docker containers are both *hardware-agnostic* and *platform-agnostic*. +This means they can run anywhere, from your laptop to the largest +cloud compute instance and everything in between - and they don't require +you to use a particular language, framework or packaging system. That +makes them great building blocks for deploying and scaling web apps, +databases, and backend services without depending on a particular stack +or provider. + +Docker began as an open-source implementation of the deployment engine which +powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/), +a popular Platform-as-a-Service. It benefits directly from the experience +accumulated over several years of large-scale operation and support of hundreds +of thousands of applications and databases. + +![Docker logo](docs/static_files/docker-logo-compressed.png "Docker") + +## Security Disclosure + +Security is very important to us. If you have any issue regarding security, +please disclose the information responsibly by sending an email to +security@docker.com and not by creating a GitHub issue. + +## Better than VMs + +A common method for distributing applications and sandboxing their +execution is to use virtual machines, or VMs. Typical VM formats are +VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory +these formats should allow every developer to automatically package +their application into a "machine" for easy distribution and deployment. +In practice, that almost never happens, for a few reasons: + + * *Size*: VMs are very large which makes them impractical to store + and transfer. + * *Performance*: running VMs consumes significant CPU and memory, + which makes them impractical in many scenarios, for example local + development of multi-tier applications, and large-scale deployment + of cpu and memory-intensive applications on large numbers of + machines. + * *Portability*: competing VM environments don't play well with each + other. Although conversion tools do exist, they are limited and + add even more overhead. + * *Hardware-centric*: VMs were designed with machine operators in + mind, not software developers. As a result, they offer very + limited tooling for what developers need most: building, testing + and running their software. For example, VMs offer no facilities + for application versioning, monitoring, configuration, logging or + service discovery. + +By contrast, Docker relies on a different sandboxing method known as +*containerization*. Unlike traditional virtualization, containerization +takes place at the kernel level. Most modern operating system kernels +now support the primitives necessary for containerization, including +Linux with [openvz](https://openvz.org), +[vserver](http://linux-vserver.org) and more recently +[lxc](https://linuxcontainers.org/), Solaris with +[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), +and FreeBSD with +[Jails](https://www.freebsd.org/doc/handbook/jails.html). + +Docker builds on top of these low-level primitives to offer developers a +portable format and runtime environment that solves all four problems. +Docker containers are small (and their transfer can be optimized with +layers), they have basically zero memory and cpu overhead, they are +completely portable, and are designed from the ground up with an +application-centric design. + +Perhaps best of all, because Docker operates at the OS level, it can still be +run inside a VM! + +## Plays well with others + +Docker does not require you to buy into a particular programming +language, framework, packaging system, or configuration language. + +Is your application a Unix process? Does it use files, tcp connections, +environment variables, standard Unix streams and command-line arguments +as inputs and outputs? Then Docker can run it. + +Can your application's build be expressed as a sequence of such +commands? Then Docker can build it. + +## Escape dependency hell + +A common problem for developers is the difficulty of managing all +their application's dependencies in a simple and automated way. + +This is usually difficult for several reasons: + + * *Cross-platform dependencies*. Modern applications often depend on + a combination of system libraries and binaries, language-specific + packages, framework-specific modules, internal components + developed for another project, etc. These dependencies live in + different "worlds" and require different tools - these tools + typically don't work well with each other, requiring awkward + custom integrations. + + * *Conflicting dependencies*. Different applications may depend on + different versions of the same dependency. Packaging tools handle + these situations with various degrees of ease - but they all + handle them in different and incompatible ways, which again forces + the developer to do extra work. + + * *Custom dependencies*. A developer may need to prepare a custom + version of their application's dependency. Some packaging systems + can handle custom versions of a dependency, others can't - and all + of them handle it differently. + + +Docker solves the problem of dependency hell by giving the developer a simple +way to express *all* their application's dependencies in one place, while +streamlining the process of assembling them. If this makes you think of +[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't +*replace* your favorite packaging systems. It simply orchestrates +their use in a simple and repeatable way. How does it do that? With +layers. + +Docker defines a build as running a sequence of Unix commands, one +after the other, in the same container. Build commands modify the +contents of the container (usually by installing new files on the +filesystem), the next command modifies it some more, etc. Since each +build command inherits the result of the previous commands, the +*order* in which the commands are executed expresses *dependencies*. + +Here's a typical Docker build process: + +```bash +FROM ubuntu:12.04 +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN cd helloflask-master && pip install -r requirements.txt +``` + +Note that Docker doesn't care *how* dependencies are built - as long +as they can be built by running a Unix command in a container. + + +Getting started +=============== + +Docker can be installed either on your computer for building applications or +on servers for running them. To get started, [check out the installation +instructions in the +documentation](https://docs.docker.com/engine/installation/). + +Usage examples +============== + +Docker can be used to run short-lived commands, long-running daemons +(app servers, databases, etc.), interactive shell sessions, etc. + +You can find a [list of real-world +examples](https://docs.docker.com/engine/examples/) in the +documentation. + +Under the hood +-------------- + +Under the hood, Docker is built on the following components: + +* The + [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) + and + [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) + capabilities of the Linux kernel +* The [Go](https://golang.org) programming language +* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) +* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) + +Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) +====================== + +| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** | +|------------------|----------------------|---------|---------| +| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | + +Want to hack on Docker? Awesome! We have [instructions to help you get +started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). + +These instructions are probably not perfect, please let us know if anything +feels wrong or incomplete. Better yet, submit a PR and improve them yourself. + +Getting the development builds +============================== + +Want to run Docker from a master build? You can download +master builds at [master.dockerproject.org](https://master.dockerproject.org). +They are updated with each commit merged into the master branch. + +Don't know how to use that super cool new feature in the master build? Check +out the master docs at +[docs.master.dockerproject.org](http://docs.master.dockerproject.org). + +How the project is run +====================== + +Docker is a very, very active project. If you want to learn more about how it is run, +or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). + +We are always open to suggestions on process improvements, and are always looking for more maintainers. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Internet Relay Chat (IRC) +

+ IRC is a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+ Read our IRC quickstart guide for an easy way to get started. +
Docker Community Forums + The Docker Engine + group is for users of the Docker Engine project. +
Google Groups + The docker-dev group is for contributors and other people + contributing to the Docker project. You can join this group without a + Google account by sending an email to docker-dev+subscribe@googlegroups.com. + You'll receive a join-request message; simply reply to the message to + confirm your subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 7000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ +### Legal + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + +Other Docker Related Projects +============================= +There are a number of projects under development that are based on Docker's +core technology. These projects expand the tooling built around the +Docker platform to broaden its application and utility. + +* [Docker Registry](https://github.com/docker/distribution): Registry +server for Docker (hosting/delivery of repositories and images) +* [Docker Machine](https://github.com/docker/machine): Machine management +for a container-centric world +* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering +system +* [Docker Compose](https://github.com/docker/compose) (formerly Fig): +Define and run multi-container apps +* [Kitematic](https://github.com/docker/kitematic): The easiest way to use +Docker on Mac and Windows + +If you know of another project underway that should be listed here, please help +us keep this list up-to-date by submitting a PR. + +Awesome-Docker +============== +You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/vendor/github.com/docker/docker/ROADMAP.md b/vendor/github.com/docker/docker/ROADMAP.md new file mode 100644 index 0000000..21fe06d --- /dev/null +++ b/vendor/github.com/docker/docker/ROADMAP.md @@ -0,0 +1,118 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Runtime improvements + +We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container +execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional +default libcontainer `execdriver`, but the Engine internals were not ready for this. + +As runC continued evolving, and the OCI specification along with it, we created +[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is +the new target for Engine integration, as it can entirely replace the whole `execdriver` +architecture, and container monitoring along with it. + +Docker Engine will rely on a long-running `containerd` companion daemon for all container execution +related operations. This could open the door in the future for Engine restarts without interrupting +running containers. + +## 1.2 Plugins improvements + +Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks +extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real +world feedback before optimizing for any particular workflow. + +In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of +plugins. This implies in particular making it trivially easy to distribute plugins as containers +through any Registry instance, as well as solving the commonly heard pain points of plugins needing +to be treated as somewhat special (being active at all time, started before any other user +containers, and not as easily dismissed). + +## 1.3 Internal decoupling + +A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the +API implementation has been refactored, and the Builder side of the daemon is now +[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in +the same repository. + +We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the +runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support +with the concept of "special" containers opens the door for bootstrapping more Engine internals +using the same facilities. + +## 1.4 Cluster capable Engine + +The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent +adding features such as multihost networking, and node discovery down at the Engine level. Yet, the +Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm +for that. + +We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the +Docker Engine being already capable of discovering each other and establish overlay networking for +their container to communicate, the next step is for a given Engine to gain ability to dispatch work +to another node in the cluster. This will be introduced in a backward compatible way, such that a +`docker run` invocation on a particular node remains fully deterministic. + +# 2 Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. + +## 2.2 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and provenance. This includes +moving to the V2 Registry API and heavily refactoring the code that powers these features. The +desired result is more secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable and flexible interface. +If new features are added that access the registry without solidifying these interfaces, achieving +feature parity will continue to be elusive. While we get a handle on this situation, we are imposing +a moratorium on new code that accesses the Registry API in commands that don't already make remote +calls. + +Currently, only the following commands cause interaction with a remote registry: + + - push + - pull + - run + - build + - search + - login + +In the interest of stabilizing the registry access model during this ongoing work, we are not +accepting additions to other commands that will cause remote interaction with the Registry API. This +moratorium will lift when the goals of the distribution project have been met. diff --git a/vendor/github.com/docker/docker/VENDORING.md b/vendor/github.com/docker/docker/VENDORING.md new file mode 100644 index 0000000..3086f9d --- /dev/null +++ b/vendor/github.com/docker/docker/VENDORING.md @@ -0,0 +1,45 @@ +# Vendoring policies + +This document outlines recommended Vendoring policies for Docker repositories. +(Example, libnetwork is a Docker repo and logrus is not.) + +## Vendoring using tags + +Commit ID based vendoring provides little/no information about the updates +vendored. To fix this, vendors will now require that repositories use annotated +tags along with commit ids to snapshot commits. Annotated tags by themselves +are not sufficient, since the same tag can be force updated to reference +different commits. + +Each tag should: +- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") +- Have a corresponding entry in the change tracking document. + +Each repo should: +- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, +github releases file. + +The goal here is for consuming repos to be able to use the tag version and +changelog updates to determine whether the vendoring will cause any breaking or +backward incompatible changes. This also means that repos can specify having +dependency on a package of a specific version or greater up to the next major +release, without encountering breaking changes. + +## Semantic Versioning +Annotated version tags should follow Schema Versioning policies. +According to http://semver.org: + +"Given a version number MAJOR.MINOR.PATCH, increment the: + MAJOR version when you make incompatible API changes, + MINOR version when you add functionality in a backwards-compatible manner, and + PATCH version when you make backwards-compatible bug fixes. +Additional labels for pre-release and build metadata are available as extensions +to the MAJOR.MINOR.PATCH format." + +## Vendoring cadence +In order to avoid huge vendoring changes, it is recommended to have a regular +cadence for vendoring updates. e.g. monthly. + +## Pre-merge vendoring tests +All related repos will be vendored into docker/docker. +CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/vendor/github.com/docker/docker/VERSION b/vendor/github.com/docker/docker/VERSION new file mode 100644 index 0000000..b50dd27 --- /dev/null +++ b/vendor/github.com/docker/docker/VERSION @@ -0,0 +1 @@ +1.13.1 diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 0000000..464e056 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. To automatically generate documentation. +2. To automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +All the documentation generation is done in the documentation repository, [docker/docker.github.io](https://github.com/docker/docker.github.io). The Swagger definition is vendored periodically into this repository, but you can manually copy over the Swagger definition to test changes. + +Copy `api/swagger.yaml` in this repository to `engine/api/[VERSION_NUMBER]/swagger.yaml` in the documentation repository, overwriting what is already there. Then, run `docker-compose up` in the documentation repository and browse to [http://localhost:4000/engine/api/](http://localhost:4000/engine/api/) when it finishes rendering. diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 0000000..fd065d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,166 @@ +package api + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "mime" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.26" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/vendor/github.com/docker/docker/api/common_test.go b/vendor/github.com/docker/docker/api/common_test.go new file mode 100644 index 0000000..31d6f58 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_test.go @@ -0,0 +1,341 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "os" + + "github.com/docker/docker/api/types" +) + +type ports struct { + ports []types.Port + expected string +} + +// DisplayablePorts +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + if port.expected != actual { + t.Fatalf("Expected %s, got %s.", port.expected, actual) + } + } +} + +// MatchesContentType +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatalf("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folders do not exist. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 0000000..081e61c --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 0000000..d930fa0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (eg docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/errors/errors.go b/vendor/github.com/docker/docker/api/errors/errors.go new file mode 100644 index 0000000..29fd254 --- /dev/null +++ b/vendor/github.com/docker/docker/api/errors/errors.go @@ -0,0 +1,47 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The Server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestForbiddenError creates a new API error +// that has the 403 HTTP status code associated to it. +func NewRequestForbiddenError(err error) error { + return NewErrorWithStatusCode(err, http.StatusForbidden) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/vendor/github.com/docker/docker/api/fixtures/keyfile b/vendor/github.com/docker/docker/api/fixtures/keyfile new file mode 100644 index 0000000..322f254 --- /dev/null +++ b/vendor/github.com/docker/docker/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/api/server/httputils/decoder.go b/vendor/github.com/docker/docker/api/server/httputils/decoder.go new file mode 100644 index 0000000..458eac5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/decoder.go @@ -0,0 +1,16 @@ +package httputils + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// ContainerDecoder specifies how +// to translate an io.Reader into +// container configuration. +type ContainerDecoder interface { + DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) + DecodeHostConfig(src io.Reader) (*container.HostConfig, error) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/errors.go b/vendor/github.com/docker/docker/api/server/httputils/errors.go new file mode 100644 index 0000000..59098a9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/errors.go @@ -0,0 +1,101 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/gorilla/mux" + "google.golang.org/grpc" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// GetHTTPErrorStatusCode retrieves status code from error message +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + for _, status := range []struct { + keyword string + code int + }{ + {"not found", http.StatusNotFound}, + {"no such", http.StatusNotFound}, + {"bad parameter", http.StatusBadRequest}, + {"no command", http.StatusBadRequest}, + {"conflict", http.StatusConflict}, + {"impossible", http.StatusNotAcceptable}, + {"wrong login/password", http.StatusUnauthorized}, + {"unauthorized", http.StatusUnauthorized}, + {"hasn't been activated", http.StatusForbidden}, + {"this node", http.StatusServiceUnavailable}, + } { + if strings.Contains(errStr, status.keyword) { + statusCode = status.code + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +func apiVersionSupportsJSONErrors(version string) bool { + const firstAPIVersionWithJSONErrors = "1.23" + return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors) +} + +// MakeErrorHandler makes an HTTP handler that decodes a Docker error and +// returns it in the response. +func MakeErrorHandler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + statusCode := GetHTTPErrorStatusCode(err) + vars := mux.Vars(r) + if apiVersionSupportsJSONErrors(vars["version"]) { + response := &types.ErrorResponse{ + Message: err.Error(), + } + WriteJSON(w, statusCode, response) + } else { + http.Error(w, grpc.ErrorDesc(err), statusCode) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form.go b/vendor/github.com/docker/docker/api/server/httputils/form.go new file mode 100644 index 0000000..20188c1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form_test.go b/vendor/github.com/docker/docker/api/server/httputils/form_test.go new file mode 100644 index 0000000..c56f7c1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form_test.go @@ -0,0 +1,105 @@ +package httputils + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := BoolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !BoolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if BoolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := Int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestInt64ValueOrDefault(t *testing.T) { + cases := map[string]int64{ + "": -1, + "-1": -1, + "42": 42, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a, err := Int64ValueOrDefault(r, "test", -1) + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + if err != nil { + t.Fatalf("Error should be nil, but received: %s", err) + } + } +} + +func TestInt64ValueOrDefaultWithError(t *testing.T) { + v := url.Values{} + v.Set("test", "invalid") + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + _, err := Int64ValueOrDefault(r, "test", -1) + if err == nil { + t.Fatalf("Expected an error.") + } +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/vendor/github.com/docker/docker/api/server/httputils/httputils.go new file mode 100644 index 0000000..7930ff7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils.go @@ -0,0 +1,90 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver string) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(string) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go new file mode 100644 index 0000000..4787cc3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go @@ -0,0 +1,17 @@ +// +build go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + return enc.Encode(v) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go new file mode 100644 index 0000000..bdc6981 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go @@ -0,0 +1,16 @@ +// +build go1.6,!go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + return enc.Encode(v) +} diff --git a/vendor/github.com/docker/docker/api/server/middleware.go b/vendor/github.com/docker/docker/api/server/middleware.go new file mode 100644 index 0000000..537ce80 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware.go @@ -0,0 +1,24 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" +) + +// handlerWithGlobalMiddlewares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + for _, m := range s.middlewares { + next = m.WrapHandler(next) + } + + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + return next +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/cors.go b/vendor/github.com/docker/docker/api/server/middleware/cors.go new file mode 100644 index 0000000..ea725db --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/cors.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// CORSMiddleware injects CORS headers to each request +// when it's configured. +type CORSMiddleware struct { + defaultHeaders string +} + +// NewCORSMiddleware creates a new CORSMiddleware with default headers. +func NewCORSMiddleware(d string) CORSMiddleware { + return CORSMiddleware{defaultHeaders: d} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := c.defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/debug.go b/vendor/github.com/docker/docker/api/server/middleware/debug.go new file mode 100644 index 0000000..8c85676 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/debug.go @@ -0,0 +1,76 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + maskSecretKeys(postForm) + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} + +func maskSecretKeys(inp interface{}) { + if arr, ok := inp.([]interface{}); ok { + for _, f := range arr { + maskSecretKeys(f) + } + return + } + if form, ok := inp.(map[string]interface{}); ok { + loop0: + for k, v := range form { + for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} { + if strings.EqualFold(m, k) { + form[k] = "*****" + continue loop0 + } + } + maskSecretKeys(v) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/experimental.go b/vendor/github.com/docker/docker/api/server/middleware/experimental.go new file mode 100644 index 0000000..b8f56e8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/experimental.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// ExperimentalMiddleware is a the middleware in charge of adding the +// 'Docker-Experimental' header to every outgoing request +type ExperimentalMiddleware struct { + experimental string +} + +// NewExperimentalMiddleware creates a new ExperimentalMiddleware +func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware { + if experimentalEnabled { + return ExperimentalMiddleware{"true"} + } + return ExperimentalMiddleware{"false"} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Docker-Experimental", e.experimental) + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/middleware.go b/vendor/github.com/docker/docker/api/server/middleware/middleware.go new file mode 100644 index 0000000..dc1f5bf --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/middleware.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Middleware is an interface to allow the use of ordinary functions as Docker API filters. +// Any struct that has the appropriate signature can be registered as a middleware. +type Middleware interface { + WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version.go b/vendor/github.com/docker/docker/api/server/middleware/version.go new file mode 100644 index 0000000..1101465 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/version.go @@ -0,0 +1,50 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VersionMiddleware is a middleware that +// validates the client and server versions. +type VersionMiddleware struct { + serverVersion string + defaultVersion string + minVersion string +} + +// NewVersionMiddleware creates a new VersionMiddleware +// with the default versions. +func NewVersionMiddleware(s, d, m string) VersionMiddleware { + return VersionMiddleware{ + serverVersion: s, + defaultVersion: d, + minVersion: m, + } +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := vars["version"] + if apiVersion == "" { + apiVersion = v.defaultVersion + } + + if versions.LessThan(apiVersion, v.minVersion) { + return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)) + } + + header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + w.Header().Set("API-Version", v.defaultVersion) + ctx = context.WithValue(ctx, "api-version", apiVersion) + return handler(ctx, w, r, vars) + } + +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version_test.go b/vendor/github.com/docker/docker/api/server/middleware/version_test.go new file mode 100644 index 0000000..9e72efd --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/version_test.go @@ -0,0 +1,57 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +func TestVersionMiddleware(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + if err := h(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} + +func TestVersionMiddlewareWithErrors(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + vars := map[string]string{"version": "0.1"} + err := h(ctx, resp, req, vars) + + if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { + t.Fatalf("Expected too old client error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/api/server/profiler.go b/vendor/github.com/docker/docker/api/server/profiler.go new file mode 100644 index 0000000..8bf8384 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/profiler.go @@ -0,0 +1,41 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/trace", pprof.Trace) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/backend.go b/vendor/github.com/docker/docker/api/server/router/build/backend.go new file mode 100644 index 0000000..0f01c11 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/backend.go @@ -0,0 +1,20 @@ +package build + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build builds a Docker image referenced by an imageID string. + // + // Note: Tagging an image should not be done by a Builder, it should instead be done + // by the caller. + // + // TODO: make this return a reference instead of string + BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build.go b/vendor/github.com/docker/docker/api/server/router/build/build.go new file mode 100644 index 0000000..959498e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &buildRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.Cancellable(router.NewPostRoute("/build", r.postBuild)), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build_routes.go b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go new file mode 100644 index 0000000..75425b1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go @@ -0,0 +1,225 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.NetworkMode = r.FormValue("networkmode") + options.Tags = r.Form["t"] + options.SecurityOpt = r.Form["securityopt"] + options.Squash = httputils.BoolValue(r, "squash") + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + if runtime.GOOS != "windows" && options.SecurityOpt != nil { + return nil, fmt.Errorf("the daemon on this platform does not support --security-opt to build") + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + var buildArgs = map[string]*string{} + buildArgsJSON := r.FormValue("buildargs") + + // Note that there are two ways a --build-arg might appear in the + // json of the query param: + // "foo":"bar" + // and "foo":nil + // The first is the normal case, ie. --build-arg foo=bar + // or --build-arg foo + // where foo's value was picked up from an env var. + // The second ("foo":nil) is where they put --build-arg foo + // but "foo" isn't set as an env var. In that case we can't just drop + // the fact they mentioned it, we need to pass that along to the builder + // so that it can print a warning about "foo" being unused if there is + // no "ARG foo" in the Dockerfile. + if buildArgsJSON != "" { + if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + + var labels = map[string]string{} + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { + return nil, err + } + options.Labels = labels + } + + var cacheFrom = []string{} + cacheFromJSON := r.FormValue("cachefrom") + if cacheFromJSON != "" { + if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { + return nil, err + } + options.CacheFrom = cacheFrom + } + + return options, nil +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]types.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + notVerboseBuffer = bytes.NewBuffer(nil) + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + buildOptions.AuthConfigs = authConfigs + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := sf.NewProgressOutput(output, true) + if buildOptions.SuppressOutput { + progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) + } + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) + } + + out := io.Writer(output) + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + out = &syncWriter{w: out} + stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} + stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} + + pg := backend.ProgressWriter{ + Output: out, + StdoutFormatter: stdout, + StderrFormatter: stderr, + ProgressReaderFunc: createProgressReader, + } + + imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + fmt.Fprintf(stdout, "%s\n", string(imgID)) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go new file mode 100644 index 0000000..8810f88 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go @@ -0,0 +1,10 @@ +package checkpoint + +import "github.com/docker/docker/api/types" + +// Backend for Checkpoint +type Backend interface { + CheckpointCreate(container string, config types.CheckpointCreateOptions) error + CheckpointDelete(container string, config types.CheckpointDeleteOptions) error + CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go new file mode 100644 index 0000000..c1e9392 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// checkpointRouter is a router to talk with the checkpoint controller +type checkpointRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new checkpoint router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &checkpointRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the checkpoint controller +func (r *checkpointRouter) Routes() []router.Route { + return r.routes +} + +func (r *checkpointRouter) initRoutes() { + r.routes = []router.Route{ + router.Experimental(router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints)), + router.Experimental(router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint)), + router.Experimental(router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint)), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go new file mode 100644 index 0000000..f988431 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var options types.CheckpointCreateOptions + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&options); err != nil { + return err + } + + err := s.backend.CheckpointCreate(vars["name"], options) + if err != nil { + return err + } + + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{ + CheckpointDir: r.Form.Get("dir"), + }) + + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, checkpoints) +} + +func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{ + CheckpointDir: r.Form.Get("dir"), + CheckpointID: vars["checkpoint"], + }) + + if err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/backend.go b/vendor/github.com/docker/docker/api/server/router/container/backend.go new file mode 100644 index 0000000..0d20188 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/backend.go @@ -0,0 +1,79 @@ +package container + +import ( + "io" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/archive" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(name string, config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds *int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(name string, timeout time.Duration) (int, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version string) (interface{}, error) + ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error + ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// systemBackend includes functions to implement to provide system wide containers functionality +type systemBackend interface { + ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend + systemBackend +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container.go b/vendor/github.com/docker/docker/api/server/router/container/container.go new file mode 100644 index 0000000..bbed7e9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container.go @@ -0,0 +1,77 @@ +package container + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &containerRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)), + router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + router.NewPostRoute("/containers/prune", r.postContainersPrune), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container_routes.go b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go new file mode 100644 index 0000000..9c9bc0f --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go @@ -0,0 +1,554 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(ctx, vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + containerName := vars["name"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + }, + OutStream: w, + } + + chStarted := make(chan struct{}) + if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) + default: + return err + } + } + + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + + version := httputils.VersionFromContext(ctx) + var hostConfig *container.HostConfig + // A non-nil json object is at least 7 characters. + if r.ContentLength > 7 || r.ContentLength == -1 { + if versions.GreaterThanOrEqualTo(version, "1.24") { + return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")} + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := s.decoder.DecodeHostConfig(r.Body) + if err != nil { + return err + } + hostConfig = c + } + + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoint := r.Form.Get("checkpoint") + checkpointDir := r.Form.Get("checkpoint-dir") + if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &container.ContainerWaitOKBody{ + StatusCode: int64(status), + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + resp, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := versions.LessThan(version, "1.19") + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + detachKeys := r.FormValue("detachKeys") + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + MuxStreams: true, + } + + if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + // Remember to close stream if error happens + conn, _, errHijack := hijacker.Hijack() + if errHijack == nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + statusText := http.StatusText(statusCode) + fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) + httputils.CloseStreams(conn) + } else { + logrus.Errorf("Error Hijacking: %v", err) + } + } + return nil +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var err error + detachKeys := r.FormValue("detachKeys") + + done := make(chan struct{}) + started := make(chan struct{}) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} + +func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ContainersPrune(pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/copy.go b/vendor/github.com/docker/docker/api/server/router/container/copy.go new file mode 100644 index 0000000..ede6dff --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/copy.go @@ -0,0 +1,119 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Deprecated since 1.8, Errors out since 1.12 + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.24") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/exec.go b/vendor/github.com/docker/docker/api/server/router/container/exec.go new file mode 100644 index 0000000..1134a0e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/exec.go @@ -0,0 +1,140 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(name, execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if versions.GreaterThan(version, "1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n") + } else { + fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n") + } + + // copy headers that were removed as part of hijack + if err := w.Header().WriteSubset(outStream, nil); err != nil { + return err + } + fmt.Fprint(outStream, "\r\n") + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + // Maybe we should we pass ctx here if we're not detaching? + if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error() + "\r\n")) + logrus.Errorf("Error running exec in container: %v", err) + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/inspect.go b/vendor/github.com/docker/docker/api/server/router/container/inspect.go new file mode 100644 index 0000000..dbbced7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/docker/docker/api/server/router/experimental.go b/vendor/github.com/docker/docker/api/server/router/experimental.go new file mode 100644 index 0000000..51385c2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/experimental.go @@ -0,0 +1,67 @@ +package router + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" +) + +var ( + errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon with --experimental in order to enable it.") +) + +// ExperimentalRoute defines an experimental API route that can be enabled or disabled. +type ExperimentalRoute interface { + Route + + Enable() + Disable() +} + +// experimentalRoute defines an experimental API route that can be enabled or disabled. +// It implements ExperimentalRoute +type experimentalRoute struct { + local Route + handler httputils.APIFunc +} + +// Enable enables this experimental route +func (r *experimentalRoute) Enable() { + r.handler = r.local.Handler() +} + +// Disable disables the experimental route +func (r *experimentalRoute) Disable() { + r.handler = experimentalHandler +} + +func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented) +} + +// Handler returns returns the APIFunc to let the server wrap it in middlewares. +func (r *experimentalRoute) Handler() httputils.APIFunc { + return r.handler +} + +// Method returns the http method that the route responds to. +func (r *experimentalRoute) Method() string { + return r.local.Method() +} + +// Path returns the subpath where the route responds to. +func (r *experimentalRoute) Path() string { + return r.local.Path() +} + +// Experimental will mark a route as experimental. +func Experimental(r Route) Route { + return &experimentalRoute{ + local: r, + handler: experimentalHandler, + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/backend.go b/vendor/github.com/docker/docker/api/server/router/image/backend.go new file mode 100644 index 0000000..19a67a5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/backend.go @@ -0,0 +1,45 @@ +package image + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) + ImageHistory(imageName string) ([]*types.ImageHistory, error) + Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(imageName, repository, tag string) error + ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image.go b/vendor/github.com/docker/docker/api/server/router/image/image.go new file mode 100644 index 0000000..54a4d51 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image.go @@ -0,0 +1,50 @@ +package image + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { + r := &imageRouter{ + backend: backend, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)), + router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + router.NewPostRoute("/images/prune", r.postImagesPrune), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image_routes.go b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go new file mode 100644 index 0000000..6940365 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go @@ -0,0 +1,344 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { + pause = true + } + + c, _, _, err := s.decoder.DecodeConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: c, + MergeConfigs: true, + }, + Changes: r.Form["changes"], + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output) + } else { //import + src := r.Form.Get("fromSrc") + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"]) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + image := vars["name"] + tag := r.Form.Get("tag") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + imageFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + filterParam := r.Form.Get("filter") + if versions.LessThan(version, "1.28") && filterParam != "" { + imageFilters.Add("reference", filterParam) + } + + images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + limit := registry.DefaultSearchLimit + if r.Form.Get("limit") != "" { + limitValue, err := strconv.Atoi(r.Form.Get("limit")) + if err != nil { + return err + } + limit = limitValue + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} + +func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ImagesPrune(pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local.go b/vendor/github.com/docker/docker/api/server/router/local.go new file mode 100644 index 0000000..7cb2a5a --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local.go @@ -0,0 +1,96 @@ +package router + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc) Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("HEAD", path, handler) +} + +func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if notifier, ok := w.(http.CloseNotifier); ok { + notify := notifier.CloseNotify() + notifyCtx, cancel := context.WithCancel(ctx) + finished := make(chan struct{}) + defer close(finished) + ctx = notifyCtx + go func() { + select { + case <-notify: + cancel() + case <-finished: + } + }() + } + return h(ctx, w, r, vars) + } +} + +// Cancellable makes new route which embeds http.CloseNotifier feature to +// context.Context of handler. +func Cancellable(r Route) Route { + return localRoute{ + method: r.Method(), + path: r.Path(), + handler: cancellableHandler(r.Handler()), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/backend.go b/vendor/github.com/docker/docker/api/server/router/network/backend.go new file mode 100644 index 0000000..0d1dfb0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/backend.go @@ -0,0 +1,22 @@ +package network + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented +// to provide network specific functionality. +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetworkByName(idName string) (libnetwork.Network, error) + GetNetworksByID(partialID string) []libnetwork.Network + GetNetworks() []libnetwork.Network + CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error + DeleteNetwork(name string) error + NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/filter.go b/vendor/github.com/docker/docker/api/server/router/network/filter.go new file mode 100644 index 0000000..94affb8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/filter.go @@ -0,0 +1,96 @@ +package network + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/runconfig" +) + +var ( + // AcceptedFilters is an acceptable filters for validation + AcceptedFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + } +) + +func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) { + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + default: + return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} + +// filterNetworks filters network list according to user specified filter +// and returns user chosen networks +func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + if err := filter.Validate(AcceptedFilters); err != nil { + return nil, err + } + + displayNet := []types.NetworkResource{} + for _, nw := range nws { + if filter.Include("driver") { + if !filter.ExactMatch("driver", nw.Driver) { + continue + } + } + if filter.Include("name") { + if !filter.Match("name", nw.Name) { + continue + } + } + if filter.Include("id") { + if !filter.Match("id", nw.ID) { + continue + } + } + if filter.Include("label") { + if !filter.MatchKVList("label", nw.Labels) { + continue + } + } + displayNet = append(displayNet, nw) + } + + if filter.Include("type") { + var typeNet []types.NetworkResource + errFilter := filter.WalkValues("type", func(fval string) error { + passList, err := filterNetworkByType(displayNet, fval) + if err != nil { + return err + } + typeNet = append(typeNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + displayNet = typeNet + } + + return displayNet, nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network.go b/vendor/github.com/docker/docker/api/server/router/network/network.go new file mode 100644 index 0000000..08a5c8c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/network.go @@ -0,0 +1,44 @@ +package network + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + clusterProvider *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &networkRouter{ + backend: b, + clusterProvider: c, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/networks", r.getNetworksList), + router.NewGetRoute("/networks/", r.getNetworksList), + router.NewGetRoute("/networks/{id:.+}", r.getNetwork), + // POST + router.NewPostRoute("/networks/create", r.postNetworkCreate), + router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), + router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), + router.NewPostRoute("/networks/prune", r.postNetworksPrune), + // DELETE + router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network_routes.go b/vendor/github.com/docker/docker/api/server/router/network/network_routes.go new file mode 100644 index 0000000..7bfc499 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/network_routes.go @@ -0,0 +1,308 @@ +package network + +import ( + "encoding/json" + "net/http" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/networkdb" +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + list := []types.NetworkResource{} + + if nr, err := n.clusterProvider.GetNetworks(); err == nil { + list = append(list, nr...) + } + + // Combine the network list returned by Docker daemon if it is not already + // returned by the cluster manager +SKIP: + for _, nw := range n.backend.GetNetworks() { + for _, nl := range list { + if nl.ID == nw.ID() { + continue SKIP + } + } + list = append(list, *n.buildNetworkResource(nw)) + } + + list, err = filterNetworks(list, netFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { + return httputils.WriteJSON(w, http.StatusOK, nr) + } + return err + } + return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw)) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreateRequest + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if nws, err := n.clusterProvider.GetNetworksByName(create.Name); err == nil && len(nws) > 0 { + return libnetwork.NetworkNameError(create.Name) + } + + nw, err := n.backend.CreateNetwork(create) + if err != nil { + if _, ok := err.(libnetwork.ManagerRedirectError); !ok { + return err + } + id, err := n.clusterProvider.CreateNetwork(create) + if err != nil { + return err + } + nw = &types.NetworkCreateResponse{ID: id} + } + + return httputils.WriteJSON(w, http.StatusCreated, nw) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { + if err = n.clusterProvider.RemoveNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil + } + if err := n.backend.DeleteNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + info := nw.Info() + r.Name = nw.Name() + r.ID = nw.ID() + r.Created = info.Created() + r.Scope = info.Scope() + if n.clusterProvider.IsManager() { + if _, err := n.clusterProvider.GetNetwork(nw.ID()); err == nil { + r.Scope = "swarm" + } + } else if info.Dynamic() { + r.Scope = "swarm" + } + r.Driver = nw.Type() + r.EnableIPv6 = info.IPv6Enabled() + r.Internal = info.Internal() + r.Attachable = info.Attachable() + r.Options = info.DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, info) + r.Labels = info.Labels() + + peers := info.Peers() + if len(peers) != 0 { + r.Peers = buildPeerInfoResources(peers) + } + + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + tmpID := e.ID() + key := "ep-" + tmpID + if sb != nil { + key = sb.ContainerID() + } + + r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei) + } + return r +} + +func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo { + peerInfo := make([]network.PeerInfo, 0, len(peers)) + for _, peer := range peers { + peerInfo = append(peerInfo, network.PeerInfo{ + Name: peer.Name, + IP: peer.IP, + }) + } + return peerInfo +} + +func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { + id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() + + ipv4Info, ipv6Info := nwInfo.IpamInfo() + + r.IPAM.Driver = id + + r.IPAM.Options = opts + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + if ip4.PreferredPool == "" { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if len(r.IPAM.Config) == 0 { + for _, ip4Info := range ipv4Info { + iData := network.IPAMConfig{} + iData.Subnet = ip4Info.IPAMData.Pool.String() + iData.Gateway = ip4Info.IPAMData.Gateway.IP.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } + + hasIpv6Conf := false + for _, ip6 := range ipv6conf { + if ip6.PreferredPool == "" { + continue + } + hasIpv6Conf = true + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if !hasIpv6Conf { + for _, ip6Info := range ipv6Info { + if ip6Info.IPAMData.Pool == nil { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip6Info.IPAMData.Pool.String() + iData.Gateway = ip6Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } +} + +func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource { + er := types.EndpointResource{} + + er.EndpointID = id + er.Name = name + ei := info + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} + +func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneReport, err := n.backend.NetworksPrune(filters.Args{}) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/backend.go b/vendor/github.com/docker/docker/api/server/router/plugin/backend.go new file mode 100644 index 0000000..ab006b2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/plugin/backend.go @@ -0,0 +1,25 @@ +package plugin + +import ( + "io" + "net/http" + + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +// Backend for Plugin +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + List() ([]enginetypes.Plugin, error) + Inspect(name string) (*enginetypes.Plugin, error) + Remove(name string, config *enginetypes.PluginRmConfig) error + Set(name string, args []string) error + Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go b/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go new file mode 100644 index 0000000..e4ea9e2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go @@ -0,0 +1,39 @@ +package plugin + +import "github.com/docker/docker/api/server/router" + +// pluginRouter is a router to talk with the plugin controller +type pluginRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new plugin router +func NewRouter(b Backend) router.Router { + r := &pluginRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the plugin controller +func (r *pluginRouter) Routes() []router.Route { + return r.routes +} + +func (r *pluginRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/plugins", r.listPlugins), + router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin), + router.NewGetRoute("/plugins/privileges", r.getPrivileges), + router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), + router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? + router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), + router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)), + router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)), + router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin)), + router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), + router.NewPostRoute("/plugins/create", r.createPlugin), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go b/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go new file mode 100644 index 0000000..693fa95 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go @@ -0,0 +1,314 @@ +package plugin + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strconv" + "strings" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) { + + metaHeaders := map[string][]string{} + for k, v := range headers { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + // Get X-Registry-Auth + authEncoded := headers.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + authConfig = &types.AuthConfig{} + } + } + + return metaHeaders, authConfig +} + +// parseRemoteRef parses the remote reference into a reference.Named +// returning the tag associated with the reference. In the case the +// given reference string includes both digest and tag, the returned +// reference will have the digest without the tag, but the tag will +// be returned. +func parseRemoteRef(remote string) (reference.Named, string, error) { + // Parse remote reference, supporting remotes with name and tag + // NOTE: Using distribution reference to handle references + // containing both a name and digest + remoteRef, err := distreference.ParseNamed(remote) + if err != nil { + return nil, "", err + } + + var tag string + if t, ok := remoteRef.(distreference.Tagged); ok { + tag = t.Tag() + } + + // Convert distribution reference to docker reference + // TODO: remove when docker reference changes reconciled upstream + ref, err := reference.WithName(remoteRef.Name()) + if err != nil { + return nil, "", err + } + if d, ok := remoteRef.(distreference.Digested); ok { + ref, err = reference.WithDigest(ref, d.Digest()) + if err != nil { + return nil, "", err + } + } else if tag != "" { + ref, err = reference.WithTag(ref, tag) + if err != nil { + return nil, "", err + } + } else { + ref = reference.WithDefaultTag(ref) + } + + return ref, tag, nil +} + +func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + ref, _, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, privileges) +} + +func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, vars["name"]) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + + return nil +} + +func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, r.FormValue("name")) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + + return nil +} + +func getName(ref reference.Named, tag, name string) (string, error) { + if name == "" { + if _, ok := ref.(reference.Canonical); ok { + trimmed := reference.TrimNamed(ref) + if tag != "" { + nt, err := reference.WithTag(trimmed, tag) + if err != nil { + return "", err + } + name = nt.String() + } else { + name = reference.WithDefaultTag(trimmed).String() + } + } else { + name = ref.String() + } + } else { + localRef, err := reference.ParseNamed(name) + if err != nil { + return "", err + } + if _, ok := localRef.(reference.Canonical); ok { + return "", errors.New("cannot use digest in plugin tag") + } + if distreference.IsNameOnly(localRef) { + // TODO: log change in name to out stream + name = reference.WithDefaultTag(localRef).String() + } + } + return name, nil +} + +func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + options := &types.PluginCreateOptions{ + RepoName: r.FormValue("name")} + + if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil { + return err + } + //TODO: send progress bar + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + timeout, err := strconv.Atoi(r.Form.Get("timeout")) + if err != nil { + return err + } + config := &types.PluginEnableConfig{Timeout: timeout} + + return pr.backend.Enable(name, config) +} + +func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginDisableConfig{ + ForceDisable: httputils.BoolValue(r, "force"), + } + + return pr.backend.Disable(name, config) +} + +func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + } + return pr.backend.Remove(name, config) +} + +func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil +} + +func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var args []string + if err := json.NewDecoder(r.Body).Decode(&args); err != nil { + return err + } + if err := pr.backend.Set(vars["name"], args); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + l, err := pr.backend.List() + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, l) +} + +func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + result, err := pr.backend.Inspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, result) +} diff --git a/vendor/github.com/docker/docker/api/server/router/router.go b/vendor/github.com/docker/docker/api/server/router/router.go new file mode 100644 index 0000000..2de25c2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/backend.go b/vendor/github.com/docker/docker/api/server/router/swarm/backend.go new file mode 100644 index 0000000..33840f0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/swarm/backend.go @@ -0,0 +1,36 @@ +package swarm + +import ( + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// Backend abstracts an swarm commands manager. +type Backend interface { + Init(req types.InitRequest) (string, error) + Join(req types.JoinRequest) error + Leave(force bool) error + Inspect() (types.Swarm, error) + Update(uint64, types.Spec, types.UpdateFlags) error + GetUnlockKey() (string, error) + UnlockSwarm(req types.UnlockRequest) error + GetServices(basictypes.ServiceListOptions) ([]types.Service, error) + GetService(string) (types.Service, error) + CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) + UpdateService(string, uint64, types.ServiceSpec, string, string) (*basictypes.ServiceUpdateResponse, error) + RemoveService(string) error + ServiceLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error + GetNodes(basictypes.NodeListOptions) ([]types.Node, error) + GetNode(string) (types.Node, error) + UpdateNode(string, uint64, types.NodeSpec) error + RemoveNode(string, bool) error + GetTasks(basictypes.TaskListOptions) ([]types.Task, error) + GetTask(string) (types.Task, error) + GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) + CreateSecret(s types.SecretSpec) (string, error) + RemoveSecret(id string) error + GetSecret(id string) (types.Secret, error) + UpdateSecret(id string, version uint64, spec types.SecretSpec) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go b/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go new file mode 100644 index 0000000..e2d5ad1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go @@ -0,0 +1,52 @@ +package swarm + +import "github.com/docker/docker/api/server/router" + +// swarmRouter is a router to talk with the build controller +type swarmRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &swarmRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the swarm controller +func (sr *swarmRouter) Routes() []router.Route { + return sr.routes +} + +func (sr *swarmRouter) initRoutes() { + sr.routes = []router.Route{ + router.NewPostRoute("/swarm/init", sr.initCluster), + router.NewPostRoute("/swarm/join", sr.joinCluster), + router.NewPostRoute("/swarm/leave", sr.leaveCluster), + router.NewGetRoute("/swarm", sr.inspectCluster), + router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), + router.NewPostRoute("/swarm/update", sr.updateCluster), + router.NewPostRoute("/swarm/unlock", sr.unlockCluster), + router.NewGetRoute("/services", sr.getServices), + router.NewGetRoute("/services/{id}", sr.getService), + router.NewPostRoute("/services/create", sr.createService), + router.NewPostRoute("/services/{id}/update", sr.updateService), + router.NewDeleteRoute("/services/{id}", sr.removeService), + router.Experimental(router.Cancellable(router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs))), + router.NewGetRoute("/nodes", sr.getNodes), + router.NewGetRoute("/nodes/{id}", sr.getNode), + router.NewDeleteRoute("/nodes/{id}", sr.removeNode), + router.NewPostRoute("/nodes/{id}/update", sr.updateNode), + router.NewGetRoute("/tasks", sr.getTasks), + router.NewGetRoute("/tasks/{id}", sr.getTask), + router.NewGetRoute("/secrets", sr.getSecrets), + router.NewPostRoute("/secrets/create", sr.createSecret), + router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), + router.NewGetRoute("/secrets/{id}", sr.getSecret), + router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go b/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go new file mode 100644 index 0000000..fe97643 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go @@ -0,0 +1,418 @@ +package swarm + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.InitRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + nodeID, err := sr.backend.Init(req) + if err != nil { + logrus.Errorf("Error initializing swarm: %v", err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, nodeID) +} + +func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.JoinRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + return sr.backend.Join(req) +} + +func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + return sr.backend.Leave(force) +} + +func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + swarm, err := sr.backend.Inspect() + if err != nil { + logrus.Errorf("Error getting swarm: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, swarm) +} + +func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var swarm types.Spec + if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error()) + } + + var flags types.UpdateFlags + + if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for rotateWorkerToken: %s", value) + } + + flags.RotateWorkerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for rotateManagerToken: %s", value) + } + + flags.RotateManagerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value) + } + + flags.RotateManagerUnlockKey = rot + } + + if err := sr.backend.Update(version, swarm, flags); err != nil { + logrus.Errorf("Error configuring swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.UnlockRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + if err := sr.backend.UnlockSwarm(req); err != nil { + logrus.Errorf("Error unlocking swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + unlockKey, err := sr.backend.GetUnlockKey() + if err != nil { + logrus.WithError(err).Errorf("Error retrieving swarm unlock key") + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + }) +} + +func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting services: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, services) +} + +func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + service, err := sr.backend.GetService(vars["id"]) + if err != nil { + logrus.Errorf("Error getting service %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, service) +} + +func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + + resp, err := sr.backend.CreateService(service, encodedAuth) + if err != nil { + logrus.Errorf("Error creating service %s: %v", service.Name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, resp) +} + +func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error()) + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + + registryAuthFrom := r.URL.Query().Get("registryAuthFrom") + + resp, err := sr.backend.UpdateService(vars["id"], version, service, encodedAuth, registryAuthFrom) + if err != nil { + logrus.Errorf("Error updating service %s: %v", vars["id"], err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveService(vars["id"]); err != nil { + logrus.Errorf("Error removing service %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + serviceName := vars["id"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: basictypes.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + }, + OutStream: w, + } + + if logsConfig.Details { + return fmt.Errorf("Bad parameters: details is not currently supported") + } + + chStarted := make(chan struct{}) + if err := sr.backend.ServiceLogs(ctx, serviceName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error grabbing service logs: %v\n", err) + default: + return err + } + } + + return nil +} + +func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting nodes: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, nodes) +} + +func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + node, err := sr.backend.GetNode(vars["id"]) + if err != nil { + logrus.Errorf("Error getting node %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, node) +} + +func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var node types.NodeSpec + if err := json.NewDecoder(r.Body).Decode(&node); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error()) + } + + if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { + logrus.Errorf("Error updating node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + + if err := sr.backend.RemoveNode(vars["id"], force); err != nil { + logrus.Errorf("Error removing node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting tasks: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, tasks) +} + +func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + task, err := sr.backend.GetTask(vars["id"]) + if err != nil { + logrus.Errorf("Error getting task %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, task) +} + +func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secrets) +} + +func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return err + } + + id, err := sr.backend.CreateSecret(secret) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveSecret(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + secret, err := sr.backend.GetSecret(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secret) +} + +func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid secret version")) + } + + id := vars["id"] + if err := sr.backend.UpdateSecret(id, version, secret); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/backend.go b/vendor/github.com/docker/docker/api/server/router/system/backend.go new file mode 100644 index 0000000..6946c4e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/backend.go @@ -0,0 +1,21 @@ +package system + +import ( + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SystemDiskUsage() (*types.DiskUsage, error) + SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system.go b/vendor/github.com/docker/docker/api/server/router/system/system.go new file mode 100644 index 0000000..ed23d3b --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system.go @@ -0,0 +1,39 @@ +package system + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + clusterProvider *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new system router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &systemRouter{ + backend: b, + clusterProvider: c, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.Cancellable(router.NewGetRoute("/events", r.getEvents)), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewGetRoute("/system/df", r.getDiskUsage), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system_routes.go b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go new file mode 100644 index 0000000..0d851b6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go @@ -0,0 +1,186 @@ +package system + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + if s.clusterProvider != nil { + info.Swarm = s.clusterProvider.Info() + } + + if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") { + // TODO: handle this conversion in engine-api + type oldInfo struct { + *types.Info + ExecutionDriver string + } + old := &oldInfo{ + Info: info, + ExecutionDriver: "", + } + nameOnlySecurityOptions := []string{} + kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) + if err != nil { + return err + } + for _, s := range kvSecOpts { + nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) + } + old.SecurityOptions = nameOnlySecurityOptions + return httputils.WriteJSON(w, http.StatusOK, old) + } + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + du, err := s.backend.SystemDiskUsage() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, du) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + since, err := eventTime(r.Form.Get("since")) + if err != nil { + return err + } + until, err := eventTime(r.Form.Get("until")) + if err != nil { + return err + } + + var ( + timeout <-chan time.Time + onlyPastEvents bool + ) + if !until.IsZero() { + if until.Before(since) { + return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) + } + + now := time.Now() + + onlyPastEvents = until.Before(now) + + if !onlyPastEvents { + dur := until.Sub(now) + timeout = time.NewTimer(dur).C + } + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, until, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + if onlyPastEvents { + return nil + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-ctx.Done(): + logrus.Debug("Client context cancelled, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, ®istry.AuthenticateOKBody{ + Status: status, + IdentityToken: token, + }) +} + +func eventTime(formTime string) (time.Time, error) { + t, tNano, err := timetypes.ParseTimestamps(formTime, -1) + if err != nil { + return time.Time{}, err + } + if t == -1 { + return time.Time{}, nil + } + return time.Unix(t, tNano), nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/vendor/github.com/docker/docker/api/server/router/volume/backend.go new file mode 100644 index 0000000..180c06e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/backend.go @@ -0,0 +1,17 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string, force bool) error + VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/vendor/github.com/docker/docker/api/server/router/volume/volume.go new file mode 100644 index 0000000..4e9f972 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume.go @@ -0,0 +1,36 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + router.NewPostRoute("/volumes/prune", r.postVolumesPrune), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go new file mode 100644 index 0000000..cfd4618 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -0,0 +1,80 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumesListOKBody{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req volumetypes.VolumesCreateBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + force := httputils.BoolValue(r, "force") + if err := v.backend.VolumeRm(vars["name"], force); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneReport, err := v.backend.VolumesPrune(filters.Args{}) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router_swapper.go b/vendor/github.com/docker/docker/api/server/router_swapper.go new file mode 100644 index 0000000..1ecc7a7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/docker/docker/api/server/server.go b/vendor/github.com/docker/docker/api/server/server.go new file mode 100644 index 0000000..60ee075 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server.go @@ -0,0 +1,210 @@ +package server + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + routerSwapper *routerSwapper + middlewares []middleware.Middleware +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// UseMiddleware appends a new middleware to the request chain. +// This needs to be called before the API routes are configured. +func (s *Server) UseMiddleware(m middleware.Middleware) { + s.middlewares = append(s.middlewares, m) +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Serve method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for i := 0; i < len(s.servers); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create an http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.WithValue(context.Background(), httputils.UAStringKey, r.Header.Get("User-Agent")) + handlerFunc := s.handlerWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + errFormat := "%v" + if statusCode == http.StatusInternalServerError { + errFormat = "%+v" + } + logrus.Errorf("Handler for %s %s returned error: "+errFormat, r.Method, r.URL.Path, err) + httputils.MakeErrorHandler(err)(w, r) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + s.routers = append(s.routers, routers...) + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debug("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) + notFoundHandler := httputils.MakeErrorHandler(err) + m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) + m.NotFoundHandler = notFoundHandler + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/vendor/github.com/docker/docker/api/server/server_test.go b/vendor/github.com/docker/docker/api/server/server_test.go new file mode 100644 index 0000000..11831c1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server_test.go @@ -0,0 +1,46 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + + "golang.org/x/net/context" +) + +func TestMiddlewares(t *testing.T) { + cfg := &Config{ + Version: "0.1omega2", + } + srv := &Server{ + cfg: cfg, + } + + srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + + if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { + t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) + } + + return nil + } + + handlerFunc := srv.handlerWithGlobalMiddlewares(localHandler) + if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 0000000..f07a027 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 0000000..d19e8c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,7785 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.26" +info: + title: "Docker Engine API" + version: "1.26" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + + For Docker Engine >= 1.13.1, the API version is 1.26. To lock to this version, you prefix the URL with `/v1.26`. For example, calling `/info` is the same as calling `/v1.26/info`. + + Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + + In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + This documentation is for version 1.26 of the API, which was introduced with Docker 1.13.1. Use this table to find documentation for previous versions of the API: + + Docker version | API version | Changes + ----------------|-------------|--------- + 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) + 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) + 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) + 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) + 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) + 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) + 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) + 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldly. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + - name: "Secret" + x-displayName: "Secrets" + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + Config: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]` + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `{}` inherit healthcheck from image or parent image + - `{"NONE"}` disable healthcheck + - `{"CMD", args...}` exec arguments directly + - `{"CMD-SHELL", command}` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriver: + description: "Information about this container's graph driver." + type: "object" + properties: + Name: + type: "string" + Data: + type: "object" + additionalProperties: + type: "string" + + Image: + type: "object" + properties: + Id: + type: "string" + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + Comment: + type: "string" + Created: + type: "string" + Container: + type: "string" + ContainerConfig: + $ref: "#/definitions/Config" + DockerVersion: + type: "string" + Author: + type: "string" + Config: + $ref: "#/definitions/Config" + Architecture: + type: "string" + Os: + type: "string" + Size: + type: "integer" + format: "int64" + VirtualSize: + type: "integer" + format: "int64" + GraphDriver: + $ref: "#/definitions/GraphDriver" + RootFS: + type: "object" + properties: + Type: + type: "string" + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + required: [Size, RefCount] + properties: + Size: + type: "integer" + description: "The disk space used by the volume (local driver only)" + default: -1 + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: "The number of containers referencing this volume." + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PropagatedMount + - Mounts + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + WorkDir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + AllowAllDevices: + type: "boolean" + x-nullable: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + diff_ids: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + Engine: + EngineVersion: "1.13.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + EncryptionConfig: + AutoLockManagers: false + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + ContainerSpec: + type: "object" + properties: + Image: + description: "The image name to use for the container." + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + type: "object" + properties: + NanoCPUs: + description: "CPU limit in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory limit in Bytes." + type: "integer" + format: "int64" + Reservation: + description: "Define resources reservation." + properties: + NanoCPUs: + description: "CPU reservation in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory reservation in Bytes." + type: "integer" + format: "int64" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponse: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded secret data" + type: "array" + items: + type: "string" + Secret: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" +paths: + /containers/json: + get: + summary: "List containers" + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. + + Available filters: + - `exited=` containers with exit code of `` + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `id=` a container's ID + - `name=` a container's name + - `is-task=`(`true`|`false`) + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/Config" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: "The status of the container. For example, `running` or `exited`." + type: "string" + Running: + description: "Whether this container is running." + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriver" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/Config" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + type: "object" + properties: + Path: + description: "Path to file that has changed" + type: "string" + Kind: + description: "Kind of change" + type: "integer" + enum: + - 0 + - 1 + - 2 + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used for calculating the CPU usage percentage. It is not the same as the `cpu_stats` field. + operationId: "ContainerStats" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveHead" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get an tar archive of a resource in the filesystem of container id." + operationId: "ContainerGetArchive" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "ContainerPutArchive" + consumes: + - "application/x-tar" + - "application/octet-stream" + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. + + Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) + - `reference`=(`[:]`) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/tar" + default: "application/tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + Id: + type: "string" + Created: + type: "integer" + format: "int64" + CreatedBy: + type: "string" + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + Comment: + type: "string" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were referenced by that image. + + Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "1.13.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.6.3" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.25" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: + - "text/plain" + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/Config" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` + + Images report these events: `delete, import, load, pull, push, save, tag, untag` + + Volumes report these events: `create, mount, unmount, destroy` + + Networks report these events: `create, connect, disconnect, destroy` + + The Docker daemon reports these events: `reload` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` + - `volume=` volume name or ID + - `network=` network name or ID + - `daemon=` daemon name or ID + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "" + Labels: null + Scope: "" + Options: null + UsageData: + Size: 0 + RefCount: 0 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches all or part of a volume + driver name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Containers: + 39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867: + EndpointID: "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda" + MacAddress: "02:42:ac:11:00:02" + IPv4Address: "172.17.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + EncryptionConfig: + AutoLockManagers: false + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `name=` + - `label=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "server error or node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Delay: 30000000000 + Parallelism: 2 + FailureAction: "pause" + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ImageDeleteResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "details" + in: "query" + description: "Show extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created secret." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 406: + description: "server error or node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] diff --git a/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl b/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl new file mode 100644 index 0000000..3a3d752 --- /dev/null +++ b/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl @@ -0,0 +1,26 @@ +package {{ .Package }} + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +import ( + "net/http" + + context "golang.org/x/net/context" + + {{ range .DefaultImports }}{{ printf "%q" . }} + {{ end }} + {{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }} + {{ end }} +) + + +{{ range .ExtraSchemas }} +// {{ .Name }} {{ template "docstring" . }} +// swagger:model {{ .Name }} +{{ template "schema" . }} +{{ end }} diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 0000000..056af6b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go new file mode 100644 index 0000000..abc0bba --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -0,0 +1,84 @@ +// Package backend includes types to send information to server backends. +package backend + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// ContainerLogsConfig holds configs for logging operations. Exists +// for users of the backend to to pass it a logging configuration. +type ContainerLogsConfig struct { + types.ContainerLogsOptions + OutStream io.Writer +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// ContainerCommitConfig is a wrapper around +// types.ContainerCommitConfig that also +// transports configuration changes for a container. +type ContainerCommitConfig struct { + types.ContainerCommitConfig + Changes []string +} + +// ProgressWriter is an interface +// to transport progress streams. +type ProgressWriter struct { + Output io.Writer + StdoutFormatter *streamformatter.StdoutFormatter + StderrFormatter *streamformatter.StderrFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 0000000..931ae10 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 0000000..7900d64 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,378 @@ +package types + +import ( + "bufio" + "io" + "net" + "os" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // See the parsing of buildArgs in api/server/router/build/build_routes.go + // for an explaination of why BuildArgs needs to use *string instead of + // just a string + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SecretRequestOption is a type for requesting secrets +type SecretRequestOption struct { + Source string + Target string + UID string + GID string + Mode os.FileMode +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 0000000..20c19f2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,69 @@ +package types + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 0000000..fc050e5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,62 @@ +package container + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 0000000..d028e3b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 0000000..81ee12c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 0000000..16cf335 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 0000000..0c82d62 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,333 @@ +package container + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// NetworkMode represents the container network stack. +type NetworkMode string + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (eg default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` + + // Custom init path + InitPath string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000..9fb79be --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package container + +import "strings" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000..0ee332b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,87 @@ +package container + +import ( + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsContainer indicates whether container uses a container network stack. +// Returns false as windows doesn't support this mode +func (n NetworkMode) IsContainer() bool { + return false +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// ConnectedContainer is the id of the container which network this container is connected to. +// Returns blank string on windows +func (n NetworkMode) ConnectedContainer() string { + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 0000000..dc942d9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 0000000..7129a65 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,42 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 0000000..e01a41d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,310 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + buf := []byte{} + err := errors.New("") + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) + } + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse_test.go b/vendor/github.com/docker/docker/api/types/filters/parse_test.go new file mode 100644 index 0000000..b2ed27b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse_test.go @@ -0,0 +1,417 @@ +package filters + +import ( + "fmt" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = NewArgs() + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args.Get("created")) != 1 { + t.Errorf("failed to set this arg") + } + if len(args.Get("image.name")) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args.Len() != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestToParamWithVersion(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + str1, err := ToParamWithVersion("1.21", a) + if err != nil { + t.Errorf("failed to marshal the filters with version < 1.22: %s", err) + } + str2, err := ToParamWithVersion("1.22", a) + if err != nil { + t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) + } + if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && + str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { + t.Errorf("incorrectly marshaled the filters: %s", str1) + } + if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && + str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { + t.Errorf("incorrectly marshaled the filters: %s", str2) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valid := map[*Args][]string{ + &Args{fields: map[string]map[string]bool{"key": {"value": true}}}: { + `{"key": ["value"]}`, + `{"key": {"value": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + `{"key": ["value1", "value2"]}`, + `{"key": {"value1": true, "value2": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + `{"key1": ["value1"], "key2": ["value2"]}`, + `{"key1": {"value1": true}, "key2": {"value2": true}}`, + }, + } + + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + + for expectedArgs, matchers := range valid { + for _, json := range matchers { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if args.Len() != expectedArgs.Len() { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs.fields { + values := args.Get(key) + + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + + for _, v := range values { + if !expectedValues[v] { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if a.Len() != v1.Len() { + t.Errorf("these should both be empty sets") + } +} + +func TestArgsMatchKVListEmptySources(t *testing.T) { + args := NewArgs() + if !args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected true for (%v,created), got true", args) + } + + args = Args{map[string]map[string]bool{"created": {"today": true}}} + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } +} + +func TestArgsMatchKVList(t *testing.T) { + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value1": true}}, + }: "labels", + } + + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key4": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value3": true}}, + }: "labels", + } + + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "today", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to*": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tod": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"anyting": true, "to*": true}}, + }: "created", + } + + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tomorrow": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(day": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today1": true}, + "labels": map[string]bool{"today": true}}, + }: "created", + } + + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} + +func TestAdd(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + v := f.fields["status"] + if len(v) != 1 || !v["running"] { + t.Fatalf("Expected to include a running status, got %v", v) + } + + f.Add("status", "paused") + if len(v) != 2 || !v["paused"] { + t.Fatalf("Expected to include a paused status, got %v", v) + } +} + +func TestDel(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Del("status", "running") + v := f.fields["status"] + if v["running"] { + t.Fatalf("Expected to not include a running status filter, got true") + } +} + +func TestLen(t *testing.T) { + f := NewArgs() + if f.Len() != 0 { + t.Fatalf("Expected to not include any field") + } + f.Add("status", "running") + if f.Len() != 1 { + t.Fatalf("Expected to include one field") + } +} + +func TestExactMatch(t *testing.T) { + f := NewArgs() + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + f.Add("status", "pause*") + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.ExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } +} + +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to not match only `running` with two filters, got true") + } +} + +func TestInclude(t *testing.T) { + f := NewArgs() + if f.Include("status") { + t.Fatalf("Expected to not include a status key, got true") + } + f.Add("status", "running") + if !f.Include("status") { + t.Fatalf("Expected to include a status key, got false") + } +} + +func TestValidate(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + + valid := map[string]bool{ + "status": true, + "dangling": true, + } + + if err := f.Validate(valid); err != nil { + t.Fatal(err) + } + + f.Add("bogus", "running") + if err := f.Validate(valid); err == nil { + t.Fatalf("Expected to return an error, got nil") + } +} + +func TestWalkValues(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Add("status", "paused") + + f.WalkValues("status", func(value string) error { + if value != "running" && value != "paused" { + t.Fatalf("Unexpected value %s", value) + } + return nil + }) + + err := f.WalkValues("status", func(value string) error { + return fmt.Errorf("return") + }) + if err == nil { + t.Fatalf("Expected to get an error, got nil") + } + + err = f.WalkValues("foo", func(value string) error { + return fmt.Errorf("return") + }) + if err != nil { + t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) + } +} + +func TestFuzzyMatch(t *testing.T) { + f := NewArgs() + f.Add("container", "foo") + + cases := map[string]bool{ + "foo": true, + "foobar": true, + "barfoo": false, + "bar": false, + } + for source, match := range cases { + got := f.FuzzyMatch("container", source) + if got != match { + t.Fatalf("Expected %v, got %v: %s", match, got, source) + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 0000000..7592d2f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 0000000..e145b3d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 0000000..31f2365 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,113 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 0000000..832b3ed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,59 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// PeerInfo represents one peer of a overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 0000000..6cc7a23 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,189 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 0000000..5699010 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 0000000..32962dc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 0000000..c82f204 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 0000000..5c031cf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 0000000..d6f7553 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,64 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 0000000..ad52d46 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/reference/image_reference.go b/vendor/github.com/docker/docker/api/types/reference/image_reference.go new file mode 100644 index 0000000..be9cf8e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/reference/image_reference.go @@ -0,0 +1,34 @@ +package reference + +import ( + distreference "github.com/docker/distribution/reference" +) + +// Parse parses the given references and returns the repository and +// tag (if present) from it. If there is an error during parsing, it will +// return an error. +func Parse(ref string) (string, string, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return "", "", err + } + + tag := GetTagFromNamedRef(distributionRef) + return distributionRef.Name(), tag, nil +} + +// GetTagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api makes the distinction between repository +// and tags. +func GetTagFromNamedRef(ref distreference.Named) string { + var tag string + switch x := ref.(type) { + case distreference.Digested: + tag = x.Digest().String() + case distreference.NamedTagged: + tag = x.Tag() + default: + tag = "latest" + } + return tag +} diff --git a/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go b/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go new file mode 100644 index 0000000..61fb676 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go @@ -0,0 +1,72 @@ +package reference + +import ( + "testing" +) + +func TestParse(t *testing.T) { + testCases := []struct { + ref string + expectedName string + expectedTag string + expectedError bool + }{ + { + ref: "", + expectedName: "", + expectedTag: "", + expectedError: true, + }, + { + ref: "repository", + expectedName: "repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "repository:tag", + expectedName: "repository", + expectedTag: "tag", + expectedError: false, + }, + { + ref: "test.com/repository", + expectedName: "test.com/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/test/repository", + expectedName: "test.com:5000/test/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + { + ref: "test.com:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + } + + for _, c := range testCases { + name, tag, err := Parse(c.ref) + if err != nil && c.expectedError { + continue + } else if err != nil { + t.Fatalf("error with %s: %s", c.ref, err.Error()) + } + if name != c.expectedName { + t.Fatalf("expected name %s, got %s", c.expectedName, name) + } + if tag != c.expectedTag { + t.Fatalf("expected tag %s, got %s", c.expectedTag, tag) + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 0000000..5e37d19 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 0000000..28fafab --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,104 @@ +package registry + +import ( + "encoding/json" + "net" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go new file mode 100644 index 0000000..4f02ef3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/seccomp.go @@ -0,0 +1,93 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent an specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 0000000..74ea64b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 0000000..9bf1928 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,178 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 0000000..bad493f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go new file mode 100644 index 0000000..1163b36 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go @@ -0,0 +1,86 @@ +package strslice + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + for _, testcase := range []struct { + input StrSlice + expected string + }{ + // MADNESS(stevvooe): No clue why nil would be "" but empty would be + // "null". Had to make a change here that may affect compatibility. + {input: nil, expected: "null"}, + {StrSlice{}, "[]"}, + {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, + } { + data, err := json.Marshal(testcase.input) + if err != nil { + t.Fatal(err) + } + if string(data) != testcase.expected { + t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := StrSlice{"default", "values"} + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := []string(strs) + if !reflect.DeepEqual(actualParts, expectedParts) { + t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) + } + + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 0000000..64a648b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,27 @@ +package swarm + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` +} + +// Driver represents a driver (network, logging). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 0000000..4ab476c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,46 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 0000000..5a5e11b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,111 @@ +package swarm + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 0000000..379e17a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,114 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 0000000..fdb2388 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 0000000..2cf2642 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,105 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt time.Time `json:",omitempty"` + CompletedAt time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 0000000..0b42219 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,197 @@ +package swarm + +import "time" + +// ClusterInfo represents info about the cluster for outputing in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + RemoteAddrs []string + JoinToken string // accept by secret +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int + Managers int + + Cluster ClusterInfo +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 0000000..ace12cc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,128 @@ +package swarm + +import "time" + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + ContainerSpec ContainerSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go new file mode 100644 index 0000000..63e1eec --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go b/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go new file mode 100644 index 0000000..869c08f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go @@ -0,0 +1,26 @@ +package time + +import ( + "testing" + "time" +) + +func TestDurationToSecondsString(t *testing.T) { + cases := []struct { + in time.Duration + expected string + }{ + {0 * time.Second, "0"}, + {1 * time.Second, "1"}, + {1 * time.Minute, "60"}, + {24 * time.Hour, "86400"}, + } + + for _, c := range cases { + s := DurationToSecondsString(c.in) + if s != c.expected { + t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 0000000..d3695ba --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseonds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp_test.go b/vendor/github.com/docker/docker/api/types/time/timestamp_test.go new file mode 100644 index 0000000..a165130 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp_test.go @@ -0,0 +1,93 @@ +package time + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now().In(time.UTC) + cases := []struct { + in, expected string + expectedErr bool + }{ + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, + {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, + {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, + {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, + {"2006-01-02T15:04:05", "1136214245.000000000", false}, + {"2006-01-02T15:04:0Z", "", true}, + {"2006-01-02T15:04:0", "", true}, + {"2006-01-02T15:04Z", "1136214240.000000000", false}, + {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04", "1136214240.000000000", false}, + {"2006-01-02T15:0Z", "", true}, + {"2006-01-02T15:0", "", true}, + {"2006-01-02T15Z", "1136214000.000000000", false}, + {"2006-01-02T15+00:00", "1136214000.000000000", false}, + {"2006-01-02T15-00:00", "1136214000.000000000", false}, + {"2006-01-02T15", "1136214000.000000000", false}, + {"2006-01-02T1Z", "1136163600.000000000", false}, + {"2006-01-02T1", "1136163600.000000000", false}, + {"2006-01-02TZ", "", true}, + {"2006-01-02T", "", true}, + {"2006-01-02+00:00", "1136160000.000000000", false}, + {"2006-01-02-00:00", "1136160000.000000000", false}, + {"2006-01-02-00:01", "1136160060.000000000", false}, + {"2006-01-02Z", "1136160000.000000000", false}, + {"2006-01-02", "1136160000.000000000", false}, + {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, + + // unix timestamps returned as is + {"1136073600", "1136073600", false}, + {"1136073600.000000001", "1136073600.000000001", false}, + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + + // String fallback + {"invalid", "invalid", false}, + } + + for _, c := range cases { + o, err := GetTimestamp(c.in, now) + if o != c.expected || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) + t.Fail() + } + } +} + +func TestParseTimestamps(t *testing.T) { + cases := []struct { + in string + def, expectedS, expectedN int64 + expectedErr bool + }{ + // unix timestamps + {"1136073600", 0, 1136073600, 0, false}, + {"1136073600.000000001", 0, 1136073600, 1, false}, + {"1136073600.0000000010", 0, 1136073600, 1, false}, + {"1136073600.00000001", 0, 1136073600, 10, false}, + {"foo.bar", 0, 0, 0, true}, + {"1136073600.bar", 0, 1136073600, 0, true}, + {"", -1, -1, 0, false}, + } + + for _, c := range cases { + s, n, err := ParseTimestamps(c.in, c.def) + if s != c.expectedS || + n != c.expectedN || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 0000000..a82c3e8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,549 @@ +package types + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// ContainerChange contains response of Engine API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Engine API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Engine API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// ContainerProcessList contains response of Engine API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + Experimental bool +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit records a external tool actual commit id version along the +// one expect by dockerd as set at build time +type Commit struct { + ID string + Expected string +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + CheckDuplicate bool + Driver string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDelete + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 0000000..cdac50a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 0000000..611d4fe --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare_test.go b/vendor/github.com/docker/docker/api/types/versions/compare_test.go new file mode 100644 index 0000000..c2b9686 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare_test.go @@ -0,0 +1,26 @@ +package versions + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := compare(a, b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) +} diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go new file mode 100644 index 0000000..dc13150 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go new file mode 100644 index 0000000..94a06d7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 0000000..da4f8eb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,58 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData volume usage data +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. + // Required: true + RefCount int64 `json:"RefCount"` + + // The disk space used by the volume (local driver only) + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go new file mode 100644 index 0000000..679c160 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// VolumesCreateBody volumes create body +// swagger:model VolumesCreateBody +type VolumesCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go new file mode 100644 index 0000000..7770bcb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumesListOKBody volumes list o k body +// swagger:model VolumesListOKBody +type VolumesListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go new file mode 100644 index 0000000..ced19e8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/builder.go @@ -0,0 +1,169 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + "os" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/image" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Context represents a file system tree. +type Context interface { + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Stat returns an entry corresponding to path if any. + // It is recommended to return an error if path was not found. + // If path is a symlink it also returns the path to the target file. + Stat(path string) (string, FileInfo, error) + // Open opens path from the context and returns a readable stream of it. + Open(path string) (io.ReadCloser, error) + // Walk walks the tree of the context with the function passed to it. + Walk(root string, walkFn WalkFunc) error +} + +// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). +type WalkFunc func(path string, fi FileInfo, err error) error + +// ModifiableContext represents a modifiable Context. +// TODO: remove this interface once we can get rid of Remove() +type ModifiableContext interface { + Context + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. +// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. +type FileInfo interface { + os.FileInfo + Path() string +} + +// PathFileInfo is a convenience struct that implements the FileInfo interface. +type PathFileInfo struct { + os.FileInfo + // FilePath holds the absolute path to the file. + FilePath string + // Name holds the basename for the file. + FileName string +} + +// Path returns the absolute path to the file. +func (fi PathFileInfo) Path() string { + return fi.FilePath +} + +// Name returns the basename of the file. +func (fi PathFileInfo) Name() string { + if fi.FileName != "" { + return fi.FileName + } + return fi.FileInfo.Name() +} + +// Hashed defines an extra method intended for implementations of os.FileInfo. +type Hashed interface { + // Hash returns the hash of a file. + Hash() string + SetHash(string) +} + +// HashedFileInfo is a convenient struct that augments FileInfo with a field. +type HashedFileInfo struct { + FileInfo + // FileHash represents the hash of a file. + FileHash string +} + +// Hash returns the hash of a file. +func (fi HashedFileInfo) Hash() string { + return fi.FileHash +} + +// SetHash sets the hash of a file. +func (fi *HashedFileInfo) SetHash(h string) { + fi.FileHash = h +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + // TODO: use digest reference instead of name + + // GetImageOnBuild looks up a Docker image referenced by `name`. + GetImageOnBuild(name string) (Image, error) + // TagImage tags an image with newTag + TagImageWithReference(image.ID, reference.Named) error + // PullOnBuild tells Docker to pull image referenced by `name`. + PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(containerID string, timeout time.Duration) (int, error) + // ContainerUpdateCmdOnBuild updates container.Path and container.Args + ContainerUpdateCmdOnBuild(containerID string, cmd []string) error + // ContainerCreateWorkdir creates the workdir (currently only used on Windows) + ContainerCreateWorkdir(containerID string) error + + // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container + // specified by a container object. + // TODO: make an Extract method instead of passing `decompress` + // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used + // with Context.Walk + // ContainerCopy(name string, res string) (io.ReadCloser, error) + // TODO: use copyBackend api + CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error + + // HasExperimental checks if the backend supports experimental features + HasExperimental() bool + + // SquashImage squashes the fs layers from the provided image down to the specified `to` image + SquashImage(from string, to string) (string, error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} diff --git a/vendor/github.com/docker/docker/builder/context.go b/vendor/github.com/docker/docker/builder/context.go new file mode 100644 index 0000000..600f423 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context.go @@ -0,0 +1,260 @@ +package builder + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/gitutils" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// GetContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(archive.HeaderSize) + if err != nil && err != io.EOF { + return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + + if archive.IsArchive(magic) { + return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil + } + + // Input should be read as a Dockerfile. + tmpDir, err := ioutil.TempDir("", "docker-build-context-") + if err != nil { + return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) + } + + f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) + if err != nil { + return nil, "", err + } + _, err = io.Copy(f, buf) + if err != nil { + f.Close() + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + if err := r.Close(); err != nil { + return nil, "", err + } + + tar, err := archive.Tar(tmpDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(tmpDir) + return err + }), DefaultDockerfileName, nil + +} + +// GetContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", fmt.Errorf("unable to find 'git': %v", err) + } + if absContextDir, err = gitutils.Clone(gitURL); err != nil { + return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// GetContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := httputils.Download(remoteURL) + if err != nil { + return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// GetContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { + // When using a local context directory, when the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + return getDockerfileRelPath(localDir, dockerfileName) +} + +// getDockerfileRelPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory, the relative path of +// the dockerfile in that context directory, and a non-nil error on success. +func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = filepath.Abs(givenContextDir); err != nil { + return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) + } + + if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { + return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) + } + + if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) + } + + return absContextDir, relDockerfile, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} diff --git a/vendor/github.com/docker/docker/builder/context_test.go b/vendor/github.com/docker/docker/builder/context_test.go new file mode 100644 index 0000000..27d29d7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context_test.go @@ -0,0 +1,307 @@ +package builder + +import ( + "archive/tar" + "bytes" + "io" + "io/ioutil" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +var prepareEmpty = func(t *testing.T) (string, func()) { + return "", func() {} +} + +var prepareNoFiles = func(t *testing.T) (string, func()) { + return createTestTempDir(t, "", "builder-context-test") +} + +var prepareOneFile = func(t *testing.T) (string, func()) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + return contextDir, cleanup +} + +func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { + contextDir, cleanup := prepare(t) + defer cleanup() + + err := ValidateContextDirectory(contextDir, excludes) + + if err != nil { + t.Fatalf("Error should be nil, got: %s", err) + } +} + +func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + absContextDir, relDockerfile, err := GetContextFromLocalDir(fakePath, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, fakePath) + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { + contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") + defer dirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromLocalDirLocalFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } + +} + +func TestGetContextFromReaderString(t *testing.T) { + tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + _, err = tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + if err = tarArchive.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromReaderTar(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar: %s", err) + } + + tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + header, err := tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + if header.Name != DefaultDockerfileName { + t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + if err = tarArchive.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestValidateContextDirectoryEmptyContext(t *testing.T) { + // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. + // The test will ultimately end up calling filepath.Abs(""). On Windows, + // golang will error. On Linux, golang will return /. Due to there being + // drive letters on Windows, this is probably the correct behaviour for + // Windows. + if runtime.GOOS == "windows" { + t.Skip("Invalid test on Windows") + } + testValidateContextDirectory(t, prepareEmpty, []string{}) +} + +func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { + testValidateContextDirectory(t, prepareNoFiles, []string{}) +} + +func TestValidateContextDirectoryWithOneFile(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{}) +} + +func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) +} diff --git a/vendor/github.com/docker/docker/builder/context_unix.go b/vendor/github.com/docker/docker/builder/context_unix.go new file mode 100644 index 0000000..d1f72e0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package builder + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/vendor/github.com/docker/docker/builder/context_windows.go b/vendor/github.com/docker/docker/builder/context_windows.go new file mode 100644 index 0000000..b8ba2ba --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package builder + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/bflag.go b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go new file mode 100644 index 0000000..1e03693 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go @@ -0,0 +1,176 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags returns the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) + } + + } + + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go b/vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go new file mode 100644 index 0000000..65cfcea --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go @@ -0,0 +1,187 @@ +package dockerfile + +import ( + "testing" +) + +func TestBuilderFlags(t *testing.T) { + var expected string + var err error + + // --- + + bf := NewBFlags() + bf.Args = []string{} + if err := bf.Parse(); err != nil { + t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + bf.Args = []string{"--"} + if err := bf.Parse(); err != nil { + t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + flStr1 := bf.AddString("str1", "") + flBool1 := bf.AddBool("bool1", false) + bf.Args = []string{} + if err = bf.Parse(); err != nil { + t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.IsUsed() == true { + t.Fatalf("Test3 - str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Test3 - bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "HI" { + t.Fatalf("Str1 was supposed to default to: HI") + } + if flBool1.IsTrue() { + t.Fatalf("Bool1 was supposed to default to: false") + } + if flStr1.IsUsed() == true { + t.Fatalf("Str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1="} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "BYE" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b1 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=true"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b2 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flBool1.IsTrue() { + t.Fatalf("Test-b3 Bool1 was supposed to be false") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool2"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1", "--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "BYE" { + t.Fatalf("Teset %s, str1 should be BYE", bf.Args) + } + if !flBool1.IsTrue() { + t.Fatalf("Teset %s, bool1 should be true", bf.Args) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go new file mode 100644 index 0000000..da43513 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -0,0 +1,370 @@ +package dockerfile + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + perrors "github.com/pkg/errors" + "golang.org/x/net/context" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// BuiltinAllowedBuildArgs is list of built-in allowed build args +var BuiltinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Output io.Writer + + docker builder.Backend + context builder.Context + clientCtx context.Context + cancel context.CancelFunc + + dockerfile *parser.Node + runConfig *container.Config // runconfig for cmd, run, entrypoint etc. + flags *BFlags + tmpContainers map[string]struct{} + image string // imageID + noBaseImage bool + maintainer string + cmdSet bool + disableCommit bool + cacheBusted bool + allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. + directive parser.Directive + + // TODO: remove once docker.Commit can receive a tag + id string + + imageCache builder.ImageCache + from builder.Image +} + +// BuildManager implements builder.Backend and is shared across all Builder objects. +type BuildManager struct { + backend builder.Backend +} + +// NewBuildManager creates a BuildManager. +func NewBuildManager(b builder.Backend) (bm *BuildManager) { + return &BuildManager{backend: b} +} + +// BuildFromContext builds a new image from a given context. +func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) { + if buildOptions.Squash && !bm.backend.HasExperimental() { + return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode")) + } + buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc) + if err != nil { + return "", err + } + defer func() { + if err := buildContext.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + + if len(dockerfileName) > 0 { + buildOptions.Dockerfile = dockerfileName + } + b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext}, nil) + if err != nil { + return "", err + } + return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output) +} + +// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. +// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, +// will be read from the Context passed to Build(). +func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { + if config == nil { + config = new(types.ImageBuildOptions) + } + if config.BuildArgs == nil { + config.BuildArgs = make(map[string]*string) + } + ctx, cancel := context.WithCancel(clientCtx) + b = &Builder{ + clientCtx: ctx, + cancel: cancel, + options: config, + Stdout: os.Stdout, + Stderr: os.Stderr, + docker: backend, + context: buildContext, + runConfig: new(container.Config), + tmpContainers: map[string]struct{}{}, + id: stringid.GenerateNonCryptoID(), + allowedBuildArgs: make(map[string]bool), + directive: parser.Directive{ + EscapeSeen: false, + LookingForDirectives: true, + }, + } + if icb, ok := backend.(builder.ImageCacheBuilder); ok { + b.imageCache = icb.MakeImageCache(config.CacheFrom) + } + + parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape + + if dockerfile != nil { + b.dockerfile, err = parser.Parse(dockerfile, &b.directive) + if err != nil { + return nil, err + } + } + + return b, nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNamed(repo) + if err != nil { + return nil, err + } + + ref = reference.WithDefaultTag(ref) + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + if _, isTagged := ref.(reference.NamedTagged); !isTagged { + ref, err = reference.WithTag(ref, reference.DefaultTag) + if err != nil { + return nil, err + } + } + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} + +// build runs the Dockerfile builder from a context and a docker object that allows to make calls +// to Docker. +// +// This will (barring errors): +// +// * read the dockerfile from context +// * parse the dockerfile if not already parsed +// * walk the AST and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Tag image, if applicable. +// * Print a happy message and return the image ID. +// +func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) { + b.Stdout = stdout + b.Stderr = stderr + b.Output = out + + // If Dockerfile was not parsed yet, extract it from the Context + if b.dockerfile == nil { + if err := b.readDockerfile(); err != nil { + return "", err + } + } + + repoAndTags, err := sanitizeRepoAndTags(b.options.Tags) + if err != nil { + return "", err + } + + if len(b.options.Labels) > 0 { + line := "LABEL " + for k, v := range b.options.Labels { + line += fmt.Sprintf("%q='%s' ", k, v) + } + _, node, err := parser.ParseLine(line, &b.directive, false) + if err != nil { + return "", err + } + b.dockerfile.Children = append(b.dockerfile.Children, node) + } + + var shortImgID string + total := len(b.dockerfile.Children) + for _, n := range b.dockerfile.Children { + if err := b.checkDispatch(n, false); err != nil { + return "", err + } + } + + for i, n := range b.dockerfile.Children { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprintf(b.Stdout, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + if err := b.dispatch(i, total, n); err != nil { + if b.options.ForceRemove { + b.clearTmp() + } + return "", err + } + + shortImgID = stringid.TruncateID(b.image) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) + if b.options.Remove { + b.clearTmp() + } + } + + // check if there are any leftover build-args that were passed but not + // consumed during build. Return a warning, if there are any. + leftoverArgs := []string{} + for arg := range b.options.BuildArgs { + if !b.isBuildArgAllowed(arg) { + leftoverArgs = append(leftoverArgs, arg) + } + } + + if len(leftoverArgs) > 0 { + fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") + } + + if b.options.Squash { + var fromID string + if b.from != nil { + fromID = b.from.ImageID() + } + b.image, err = b.docker.SquashImage(b.image, fromID) + if err != nil { + return "", perrors.Wrap(err, "error squashing image") + } + } + + imageID := image.ID(b.image) + for _, rt := range repoAndTags { + if err := b.docker.TagImageWithReference(imageID, rt); err != nil { + return "", err + } + } + + fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) + return b.image, nil +} + +// Cancel cancels an ongoing Dockerfile build. +func (b *Builder) Cancel() { + b.cancel() +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + b, err := NewBuilder(context.Background(), nil, nil, nil, nil) + if err != nil { + return nil, err + } + + ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), &b.directive) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range ast.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b.runConfig = config + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + total := len(ast.Children) + for _, n := range ast.Children { + if err := b.checkDispatch(n, false); err != nil { + return nil, err + } + } + + for i, n := range ast.Children { + if err := b.dispatch(i, total, n); err != nil { + return nil, err + } + } + + return b.runConfig, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go new file mode 100644 index 0000000..76a7ce7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package dockerfile + +var defaultShell = []string{"/bin/sh", "-c"} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go new file mode 100644 index 0000000..37e9fbc --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go @@ -0,0 +1,3 @@ +package dockerfile + +var defaultShell = []string{"cmd", "/S", "/C"} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go new file mode 100644 index 0000000..f23c687 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go new file mode 100644 index 0000000..3e78abd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -0,0 +1,821 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // TODO/FIXME/NOT USED + // Just here to show how to use the builder flags stuff within the + // context of a builder command. Will remove once we actually add + // a builder command to something! + /* + flBool1 := b.flags.AddBool("bool1", false) + flStr1 := b.flags.AddString("str1", "HI") + + if err := b.flags.Parse(); err != nil { + return err + } + + fmt.Printf("Bool1:%v\n", flBool1) + fmt.Printf("Str1:%v\n", flStr1) + */ + + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + + if len(args[j]) == 0 { + return errBlankCommandNames("ENV") + } + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + compareTo := args[j] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + compareFrom = strings.ToUpper(compareFrom) + compareTo = strings.ToUpper(compareTo) + } + if compareFrom == compareTo { + b.runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + b.runConfig.Env = append(b.runConfig.Env, newVar) + } + j++ + } + + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.maintainer = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + + if b.runConfig.Labels == nil { + b.runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + + if len(args[j]) == 0 { + return errBlankCommandNames("LABEL") + } + + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + b.runConfig.Labels[args[j]] = args[j+1] + j++ + } + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastTwoArguments("ADD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastTwoArguments("COPY") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("FROM") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + name := args[0] + + var ( + image builder.Image + err error + ) + + // Windows cannot support a container with no base image. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + return fmt.Errorf("Windows does not support FROM scratch") + } + b.image = "" + b.noBaseImage = true + } else { + // TODO: don't use `name`, instead resolve it to a digest + if !b.options.PullParent { + image, err = b.docker.GetImageOnBuild(name) + // TODO: shouldn't we error out if error is different from "not found" ? + } + if image == nil { + image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) + if err != nil { + return err + } + } + } + b.from = image + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + err := b.flags.Parse() + if err != nil { + return err + } + + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + b.runConfig.WorkingDir, err = normaliseWorkdir(b.runConfig.WorkingDir, args[0]) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if b.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + b.runConfig.Image = b.image + + cmd := b.runConfig.Cmd + comment := "WORKDIR " + b.runConfig.WorkingDir + // reset the command for cache detection + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) "+comment)) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + if err := b.docker.ContainerCreateWorkdir(container.ID); err != nil { + return err + } + + return b.commit(container.ID, cmd, comment) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + args = handleJSONArgs(args, attributes) + + if !attributes["json"] { + args = append(getShell(b.runConfig), args...) + } + config := &container.Config{ + Cmd: strslice.StrSlice(args), + Image: b.image, + } + + // stash the cmd + cmd := b.runConfig.Cmd + if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { + b.runConfig.Cmd = config.Cmd + } + + // stash the config environment + env := b.runConfig.Env + + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + defer func(env []string) { b.runConfig.Env = env }(env) + + // derive the net build-time environment for this run. We let config + // environment override the build time environment. + // This means that we take the b.buildArgs list of env vars and remove + // any of those variables that are defined as part of the container. In other + // words, anything in b.Config.Env. What's left is the list of build-time env + // vars that we need to add to each RUN command - note the list could be empty. + // + // We don't persist the build time environment with container's config + // environment, but just sort and prepend it to the command string at time + // of commit. + // This helps with tracing back the image's actual environment at the time + // of RUN, without leaking it to the final image. It also aids cache + // lookup for same image built with same build time environment. + cmdBuildEnv := []string{} + configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + if _, ok := configEnv[key]; !ok && val != nil { + cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, *val)) + } + } + + // derive the command to use for probeCache() and to commit in this container. + // Note that we only do this if there are any build-time env vars. Also, we + // use the special argument "|#" at the start of the args array. This will + // avoid conflicts with any RUN command since commands can not + // start with | (vertical bar). The "#" (number of build envs) is there to + // help ensure proper cache matches. We don't want a RUN command + // that starts with "foo=abc" to be considered part of a build-time env var. + saveCmd := config.Cmd + if len(cmdBuildEnv) > 0 { + sort.Strings(cmdBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) + saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) + } + + b.runConfig.Cmd = saveCmd + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + // set Cmd manually, this is special case only for Dockerfiles + b.runConfig.Cmd = config.Cmd + // set build-time environment for 'run'. + b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) + + cID, err := b.create() + if err != nil { + return err + } + + if err := b.run(cID); err != nil { + return err + } + + // revert to original config environment and set the command string to + // have the build-time env vars in it (if any) so that future cache look-ups + // properly match it. + b.runConfig.Env = env + b.runConfig.Cmd = saveCmd + return b.commit(cID, cmd, "run") +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + cmdSlice := handleJSONArgs(args, attributes) + + if !attributes["json"] { + cmdSlice = append(getShell(b.runConfig), cmdSlice...) + } + + b.runConfig.Cmd = strslice.StrSlice(cmdSlice) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// parseOptInterval(flag) is the duration of flag.Value, or 0 if +// empty. An error is reported if the value is given and is not positive. +func parseOptInterval(f *Flag) (time.Duration, error) { + s := f.Value + if s == "" { + return 0, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + if d <= 0 { + return 0, fmt.Errorf("Interval %#v must be positive", f.name) + } + return d, nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func healthcheck(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("HEALTHCHECK") + } + typ := strings.ToUpper(args[0]) + args = args[1:] + if typ == "NONE" { + if len(args) != 0 { + return fmt.Errorf("HEALTHCHECK NONE takes no arguments") + } + test := strslice.StrSlice{typ} + b.runConfig.Healthcheck = &container.HealthConfig{ + Test: test, + } + } else { + if b.runConfig.Healthcheck != nil { + oldCmd := b.runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(b.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + + healthcheck := container.HealthConfig{} + + flInterval := b.flags.AddString("interval", "") + flTimeout := b.flags.AddString("timeout", "") + flRetries := b.flags.AddString("retries", "") + + if err := b.flags.Parse(); err != nil { + return err + } + + switch typ { + case "CMD": + cmdSlice := handleJSONArgs(args, attributes) + if len(cmdSlice) == 0 { + return fmt.Errorf("Missing command after HEALTHCHECK CMD") + } + + if !attributes["json"] { + typ = "CMD-SHELL" + } + + healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) + default: + return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + } + + interval, err := parseOptInterval(flInterval) + if err != nil { + return err + } + healthcheck.Interval = interval + + timeout, err := parseOptInterval(flTimeout) + if err != nil { + return err + } + healthcheck.Timeout = timeout + + if flRetries.Value != "" { + retries, err := strconv.ParseInt(flRetries.Value, 10, 32) + if err != nil { + return err + } + if retries < 1 { + return fmt.Errorf("--retries must be at least 1 (not %d)", retries) + } + healthcheck.Retries = int(retries) + } else { + healthcheck.Retries = 0 + } + + b.runConfig.Healthcheck = &healthcheck + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("HEALTHCHECK %q", b.runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + parsed := handleJSONArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + b.runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + b.runConfig.Entrypoint = strslice.StrSlice(append(getShell(b.runConfig), parsed[0])) + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.runConfig.Cmd = nil + } + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.runConfig.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if len(args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.ExposedPorts == nil { + b.runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := b.runConfig.ExposedPorts[port]; !exists { + b.runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.runConfig.User = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.Volumes == nil { + b.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("VOLUME specified can not be an empty string") + } + b.runConfig.Volumes[v] = struct{}{} + } + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("STOPSIGNAL") + } + + sig := args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + b.runConfig.StopSignal = sig + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("ARG") + } + + var ( + name string + newValue string + hasDefault bool + ) + + arg := args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return errBlankCommandNames("ARG") + } + + name = parts[0] + newValue = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + // add the arg to allowed list of build-time args from this step on. + b.allowedBuildArgs[name] = true + + // If there is a default value associated with this arg then add it to the + // b.buildArgs if one is not already passed to the builder. The args passed + // to builder override the default value of 'arg'. Note that a 'nil' for + // a value means that the user specified "--build-arg FOO" and "FOO" wasn't + // defined as an env var - and in that case we DO want to use the default + // value specified in the ARG cmd. + if baValue, ok := b.options.BuildArgs[name]; (!ok || baValue == nil) && hasDefault { + b.options.BuildArgs[name] = &newValue + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func shell(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + shellSlice := handleJSONArgs(args, attributes) + switch { + case len(shellSlice) == 0: + // SHELL [] + return errAtLeastOneArgument("SHELL") + case attributes["json"]: + // SHELL ["powershell", "-command"] + b.runConfig.Shell = strslice.StrSlice(shellSlice) + default: + // SHELL powershell -command - not JSON + return errNotJSON("SHELL", original) + } + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("SHELL %v", shellSlice)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errAtLeastTwoArguments(command string) error { + return fmt.Errorf("%s requires at least two arguments", command) +} + +func errBlankCommandNames(command string) error { + return fmt.Errorf("%s names can not be blank", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config) []string { + if 0 == len(c.Shell) { + return defaultShell[:] + } + return c.Shell[:] +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go new file mode 100644 index 0000000..f7c57f7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go @@ -0,0 +1,517 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +type commandWithFunction struct { + name string + function func(args []string) error +} + +func TestCommandsExactlyOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }}, + {"FROM", func(args []string) error { return from(nil, args, nil, "") }}, + {"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }}, + {"USER", func(args []string) error { return user(nil, args, nil, "") }}, + {"STOPSIGNAL", func(args []string) error { return stopSignal(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errExactlyOneArgument(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsAtLeastOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}, + {"ONBUILD", func(args []string) error { return onbuild(nil, args, nil, "") }}, + {"HEALTHCHECK", func(args []string) error { return healthcheck(nil, args, nil, "") }}, + {"EXPOSE", func(args []string) error { return expose(nil, args, nil, "") }}, + {"VOLUME", func(args []string) error { return volume(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errAtLeastOneArgument(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsAtLeastTwoArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ADD", func(args []string) error { return add(nil, args, nil, "") }}, + {"COPY", func(args []string) error { return dispatchCopy(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{"arg1"}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errAtLeastTwoArguments(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsTooManyArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{"arg1", "arg2", "arg3"}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errTooManyArguments(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandseBlankNames(t *testing.T) { + bflags := &BFlags{} + config := &container.Config{} + + b := &Builder{flags: bflags, runConfig: config, disableCommit: true} + + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(b, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(b, args, nil, "") }}, + } + + for _, command := range commands { + err := command.function([]string{"", ""}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errBlankCommandNames(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestEnv2Variables(t *testing.T) { + variables := []string{"var1", "val1", "var2", "val2"} + + bflags := &BFlags{} + config := &container.Config{} + + b := &Builder{flags: bflags, runConfig: config, disableCommit: true} + + if err := env(b, variables, nil, ""); err != nil { + t.Fatalf("Error when executing env: %s", err.Error()) + } + + expectedVar1 := fmt.Sprintf("%s=%s", variables[0], variables[1]) + expectedVar2 := fmt.Sprintf("%s=%s", variables[2], variables[3]) + + if b.runConfig.Env[0] != expectedVar1 { + t.Fatalf("Wrong env output for first variable. Got: %s. Should be: %s", b.runConfig.Env[0], expectedVar1) + } + + if b.runConfig.Env[1] != expectedVar2 { + t.Fatalf("Wrong env output for second variable. Got: %s, Should be: %s", b.runConfig.Env[1], expectedVar2) + } +} + +func TestMaintainer(t *testing.T) { + maintainerEntry := "Some Maintainer " + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := maintainer(b, []string{maintainerEntry}, nil, ""); err != nil { + t.Fatalf("Error when executing maintainer: %s", err.Error()) + } + + if b.maintainer != maintainerEntry { + t.Fatalf("Maintainer in builder should be set to %s. Got: %s", maintainerEntry, b.maintainer) + } +} + +func TestLabel(t *testing.T) { + labelName := "label" + labelValue := "value" + + labelEntry := []string{labelName, labelValue} + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := label(b, labelEntry, nil, ""); err != nil { + t.Fatalf("Error when executing label: %s", err.Error()) + } + + if val, ok := b.runConfig.Labels[labelName]; ok { + if val != labelValue { + t.Fatalf("Label %s should have value %s, had %s instead", labelName, labelValue, val) + } + } else { + t.Fatalf("Label %s should be present but it is not", labelName) + } +} + +func TestFrom(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := from(b, []string{"scratch"}, nil, "") + + if runtime.GOOS == "windows" { + if err == nil { + t.Fatalf("Error not set on Windows") + } + + expectedError := "Windows does not support FROM scratch" + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Error message not correct on Windows. Should be: %s, got: %s", expectedError, err.Error()) + } + } else { + if err != nil { + t.Fatalf("Error when executing from: %s", err.Error()) + } + + if b.image != "" { + t.Fatalf("Image shoule be empty, got: %s", b.image) + } + + if b.noBaseImage != true { + t.Fatalf("Image should not have any base image, got: %v", b.noBaseImage) + } + } +} + +func TestOnbuildIllegalTriggers(t *testing.T) { + triggers := []struct{ command, expectedError string }{ + {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, + {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, + {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} + + for _, trigger := range triggers { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := onbuild(b, []string{trigger.command}, nil, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if !strings.Contains(err.Error(), trigger.expectedError) { + t.Fatalf("Error message not correct. Should be: %s, got: %s", trigger.expectedError, err.Error()) + } + } +} + +func TestOnbuild(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + expectedOnbuild := "ADD . /app/src" + + if b.runConfig.OnBuild[0] != expectedOnbuild { + t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0]) + } +} + +func TestWorkdir(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + workingDir := "/app" + + if runtime.GOOS == "windows" { + workingDir = "C:\app" + } + + err := workdir(b, []string{workingDir}, nil, "") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.WorkingDir != workingDir { + t.Fatalf("WorkingDir should be set to %s, got %s", workingDir, b.runConfig.WorkingDir) + } + +} + +func TestCmd(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + command := "./executable" + + err := cmd(b, []string{command}, nil, "") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + var expectedCommand strslice.StrSlice + + if runtime.GOOS == "windows" { + expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) + } else { + expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) + } + + if !compareStrSlice(b.runConfig.Cmd, expectedCommand) { + t.Fatalf("Command should be set to %s, got %s", command, b.runConfig.Cmd) + } + + if !b.cmdSet { + t.Fatalf("Command should be marked as set") + } +} + +func compareStrSlice(slice1, slice2 strslice.StrSlice) bool { + if len(slice1) != len(slice2) { + return false + } + + for i := range slice1 { + if slice1[i] != slice2[i] { + return false + } + } + + return true +} + +func TestHealthcheckNone(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := healthcheck(b, []string{"NONE"}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Healthcheck == nil { + t.Fatal("Healthcheck should be set, got nil") + } + + expectedTest := strslice.StrSlice(append([]string{"NONE"})) + + if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { + t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) + } +} + +func TestHealthcheckCmd(t *testing.T) { + b := &Builder{flags: &BFlags{flags: make(map[string]*Flag)}, runConfig: &container.Config{}, disableCommit: true} + + if err := healthcheck(b, []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Healthcheck == nil { + t.Fatal("Healthcheck should be set, got nil") + } + + expectedTest := strslice.StrSlice(append([]string{"CMD-SHELL"}, "curl -f http://localhost/ || exit 1")) + + if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { + t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) + } +} + +func TestEntrypoint(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + entrypointCmd := "/usr/sbin/nginx" + + if err := entrypoint(b, []string{entrypointCmd}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Entrypoint == nil { + t.Fatalf("Entrypoint should be set") + } + + var expectedEntrypoint strslice.StrSlice + + if runtime.GOOS == "windows" { + expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) + } else { + expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) + } + + if !compareStrSlice(expectedEntrypoint, b.runConfig.Entrypoint) { + t.Fatalf("Entrypoint command should be set to %s, got %s", expectedEntrypoint, b.runConfig.Entrypoint) + } +} + +func TestExpose(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + exposedPort := "80" + + if err := expose(b, []string{exposedPort}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.ExposedPorts == nil { + t.Fatalf("ExposedPorts should be set") + } + + if len(b.runConfig.ExposedPorts) != 1 { + t.Fatalf("ExposedPorts should contain only 1 element. Got %s", b.runConfig.ExposedPorts) + } + + portsMapping, err := nat.ParsePortSpec(exposedPort) + + if err != nil { + t.Fatalf("Error when parsing port spec: %s", err.Error()) + } + + if _, ok := b.runConfig.ExposedPorts[portsMapping[0].Port]; !ok { + t.Fatalf("Port %s should be present. Got %s", exposedPort, b.runConfig.ExposedPorts) + } +} + +func TestUser(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + userCommand := "foo" + + if err := user(b, []string{userCommand}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.User != userCommand { + t.Fatalf("User should be set to %s, got %s", userCommand, b.runConfig.User) + } +} + +func TestVolume(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + exposedVolume := "/foo" + + if err := volume(b, []string{exposedVolume}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Volumes == nil { + t.Fatalf("Volumes should be set") + } + + if len(b.runConfig.Volumes) != 1 { + t.Fatalf("Volumes should contain only 1 element. Got %s", b.runConfig.Volumes) + } + + if _, ok := b.runConfig.Volumes[exposedVolume]; !ok { + t.Fatalf("Volume %s should be present. Got %s", exposedVolume, b.runConfig.Volumes) + } +} + +func TestStopSignal(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + signal := "SIGKILL" + + if err := stopSignal(b, []string{signal}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.StopSignal != signal { + t.Fatalf("StopSignal should be set to %s, got %s", signal, b.runConfig.StopSignal) + } +} + +func TestArg(t *testing.T) { + buildOptions := &types.ImageBuildOptions{BuildArgs: make(map[string]*string)} + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true, allowedBuildArgs: make(map[string]bool), options: buildOptions} + + argName := "foo" + argVal := "bar" + argDef := fmt.Sprintf("%s=%s", argName, argVal) + + if err := arg(b, []string{argDef}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + allowed, ok := b.allowedBuildArgs[argName] + + if !ok { + t.Fatalf("%s argument should be allowed as a build arg", argName) + } + + if !allowed { + t.Fatalf("%s argument was present in map but disallowed as a build arg", argName) + } + + val, ok := b.options.BuildArgs[argName] + + if !ok { + t.Fatalf("%s argument should be a build arg", argName) + } + + if *val != "bar" { + t.Fatalf("%s argument should have default value 'bar', got %s", argName, val) + } +} + +func TestShell(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + shellCmd := "powershell" + + attrs := make(map[string]bool) + attrs["json"] = true + + if err := shell(b, []string{shellCmd}, attrs, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Shell == nil { + t.Fatalf("Shell should be set") + } + + expectedShell := strslice.StrSlice([]string{shellCmd}) + + if !compareStrSlice(expectedShell, b.runConfig.Shell) { + t.Fatalf("Shell should be set to %s, got %s", expectedShell, b.runConfig.Shell) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 0000000..8b0dfc3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" +) + +// normaliseWorkdir normalises a user requested working directory in a +// platform sematically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", fmt.Errorf("cannot normalise nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} + +func errNotJSON(command, _ string) error { + return fmt.Errorf("%s requires the arguments to be in JSON form", command) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go new file mode 100644 index 0000000..4aae6b4 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go @@ -0,0 +1,33 @@ +// +build !windows + +package dockerfile + +import ( + "testing" +) + +func TestNormaliseWorkdir(t *testing.T) { + testCases := []struct{ current, requested, expected, expectedError string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `foo`, `/foo`, ``}, + {``, `/foo`, `/foo`, ``}, + {`/foo`, `bar`, `/foo/bar`, ``}, + {`/foo`, `/bar`, `/bar`, ``}, + } + + for _, test := range testCases { + normalised, err := normaliseWorkdir(test.current, test.requested) + + if test.expectedError != "" && err == nil { + t.Fatalf("NormaliseWorkdir should return an error %s, got nil", test.expectedError) + } + + if test.expectedError != "" && err.Error() != test.expectedError { + t.Fatalf("NormaliseWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) + } + + if normalised != test.expected { + t.Fatalf("NormaliseWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalised) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 0000000..e890c3a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,86 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normaliseWorkdir normalises a user requested working directory in a +// platform sematically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", fmt.Errorf("cannot normalise nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} + +func errNotJSON(command, original string) error { + // For Windows users, give a hint if it looks like it might contain + // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], + // as JSON must be escaped. Unfortunate... + // + // Specifically looking for quote-driveletter-colon-backslash, there's no + // double backslash and a [] pair. No, this is not perfect, but it doesn't + // have to be. It's simply a hint to make life a little easier. + extra := "" + original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) + if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && + !strings.Contains(original, `\\`) && + strings.Contains(original, "[") && + strings.Contains(original, "]") { + extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) + } + return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go new file mode 100644 index 0000000..3319c06 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go @@ -0,0 +1,40 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseWorkdir(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `a`, `C:\a`, ``}, + {``, `c:\foo`, `C:\foo`, ``}, + {``, `c:\\foo`, `C:\foo`, ``}, + {``, `\foo`, `C:\foo`, ``}, + {``, `\\foo`, `C:\foo`, ``}, + {``, `/foo`, `C:\foo`, ``}, + {``, `C:/foo`, `C:\foo`, ``}, + {`C:\foo`, `bar`, `C:\foo\bar`, ``}, + {`C:\foo`, `/bar`, `C:\bar`, ``}, + {`C:\foo`, `\bar`, `C:\bar`, ``}, + } + for _, i := range tests { + r, e := normaliseWorkdir(i.current, i.requested) + + if i.etext != "" && e == nil { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) + } + + if i.etext != "" && e.Error() != i.etext { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) + } + + if r != i.expected { + t.Fatalf("TestNormaliseWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/envVarTest b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest new file mode 100644 index 0000000..067dca9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest @@ -0,0 +1,116 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | hello +A|he\'llo | he'llo +A|he\\'llo | he\llo +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | hello +A|"hello\" | hello" +A|"hel'lo" | hel'lo +A|'hello | hello +A|'hello\' | hello\ +A|"''" | '' +A|$. | $. +A|$1 | +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|he\${} | he${} +A|he\${}xx | he${}xx +A|he${} | he +A|he${}xx | hexx +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'녕'하세요 | 안녕하세요 +A|안'녕하세요 | 안녕하세요 +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | 안\녕하세요 +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | 안녕\t하세요 +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | 안녕하세요 +A|"안녕하세요\" | 안녕하세요" +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | 안녕하세요 +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | 안녕 +A|안녕${}xx | 안녕xx +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go new file mode 100644 index 0000000..f5997c9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -0,0 +1,244 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "fmt" + "strings" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + command.Add: add, + command.Arg: arg, + command.Cmd: cmd, + command.Copy: dispatchCopy, // copy() is a go builtin + command.Entrypoint: entrypoint, + command.Env: env, + command.Expose: expose, + command.From: from, + command.Healthcheck: healthcheck, + command.Label: label, + command.Maintainer: maintainer, + command.Onbuild: onbuild, + command.Run: run, + command.Shell: shell, + command.StopSignal: stopSignal, + command.User: user, + command.Volume: volume, + command.Workdir: workdir, + } +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, stepTotal int, ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + attrs := ast.Attributes + original := ast.Original + flags := ast.Flags + strList := []string{} + msg := fmt.Sprintf("Step %d/%d : %s", stepN+1, stepTotal, upperCasedCmd) + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + if cmd == "onbuild" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + strList = append(strList, ast.Value) + msg += " " + ast.Value + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + } + + // count the number of nodes that we are going to traverse first + // so we can pre-create the argument and message array. This speeds up the + // allocation of those list a lot when they have a lot of arguments + cursor := ast + var n int + for cursor.Next != nil { + cursor = cursor.Next + n++ + } + msgList := make([]string, n) + + var i int + // Append the build-time args to config-environment. + // This allows builder config to override the variables, making the behavior similar to + // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build + // context. But `ENV foo $foo` will use the value from build context if one + // isn't already been defined by a previous ENV primitive. + // Note, we get this behavior because we know that ProcessWord() will + // stop on the first occurrence of a variable name and not notice + // a subsequent one. So, putting the buildArgs list after the Config.Env + // list, in 'envs', is safe. + envs := b.runConfig.Env + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + envs = append(envs, fmt.Sprintf("%s=%s", key, *val)) + } + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if replaceEnvAllowed[cmd] { + var err error + var words []string + + if allowWordExpansion[cmd] { + words, err = ProcessWords(str, envs, b.directive.EscapeToken) + if err != nil { + return err + } + strList = append(strList, words...) + } else { + str, err = ProcessWord(str, envs, b.directive.EscapeToken) + if err != nil { + return err + } + strList = append(strList, str) + } + } else { + strList = append(strList, str) + } + msgList[i] = ast.Value + i++ + } + + msg += " " + strings.Join(msgList, " ") + fmt.Fprintln(b.Stdout, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + b.flags = NewBFlags() + b.flags.Args = flags + return f(b, strList, attrs, original) + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} + +// checkDispatch does a simple check for syntax errors of the Dockerfile. +// Because some of the instructions can only be validated through runtime, +// arg, env, etc., this syntax check will not be complete and could not replace +// the runtime check. Instead, this function is only a helper that allows +// user to find out the obvious error in Dockerfile earlier on. +// onbuild bool: indicate if instruction XXX is part of `ONBUILD XXX` trigger +func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + // The instruction itself is ONBUILD, we will make sure it follows with at + // least one argument + if upperCasedCmd == "ONBUILD" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + } + + // The instruction is part of ONBUILD trigger (not the instruction itself) + if onbuild { + switch upperCasedCmd { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) + } + } + + if _, ok := evaluateTable[cmd]; ok { + return nil + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go new file mode 100644 index 0000000..4340a2f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go @@ -0,0 +1,197 @@ +package dockerfile + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +type dispatchTestCase struct { + name, dockerfile, expectedError string + files map[string]string +} + +func init() { + reexec.Init() +} + +func initDispatchTestCases() []dispatchTestCase { + dispatchTestCases := []dispatchTestCase{{ + name: "copyEmptyWhitespace", + dockerfile: `COPY + quux \ + bar`, + expectedError: "COPY requires at least two arguments", + }, + { + name: "ONBUILD forbidden FROM", + dockerfile: "ONBUILD FROM scratch", + expectedError: "FROM isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ONBUILD forbidden MAINTAINER", + dockerfile: "ONBUILD MAINTAINER docker.io", + expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ARG two arguments", + dockerfile: "ARG foo bar", + expectedError: "ARG requires exactly one argument", + files: nil, + }, + { + name: "MAINTAINER unknown flag", + dockerfile: "MAINTAINER --boo joe@example.com", + expectedError: "Unknown flag: boo", + files: nil, + }, + { + name: "ADD multiple files to file", + dockerfile: "ADD file1.txt file2.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON ADD multiple files to file", + dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard ADD multiple files to file", + dockerfile: "ADD file*.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard JSON ADD multiple files to file", + dockerfile: `ADD ["file*.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file", + dockerfile: "COPY file1.txt file2.txt test", + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON COPY multiple files to file", + dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "ADD multiple files to file with whitespace", + dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file with whitespace", + dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY wildcard no files", + dockerfile: `COPY file*.txt /tmp/`, + expectedError: "No source files were specified", + files: nil, + }, + { + name: "COPY url", + dockerfile: `COPY https://index.docker.io/robots.txt /`, + expectedError: "Source can't be a URL for COPY", + files: nil, + }, + { + name: "Chaining ONBUILD", + dockerfile: `ONBUILD ONBUILD RUN touch foobar`, + expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + files: nil, + }, + { + name: "Invalid instruction", + dockerfile: `foo bar`, + expectedError: "Unknown instruction: FOO", + files: nil, + }} + + return dispatchTestCases +} + +func TestDispatch(t *testing.T) { + testCases := initDispatchTestCases() + + for _, testCase := range testCases { + executeTestCase(t, testCase) + } +} + +func executeTestCase(t *testing.T, testCase dispatchTestCase) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + for filename, content := range testCase.files { + createTestTempFile(t, contextDir, filename, content, 0777) + } + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := builder.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + r := strings.NewReader(testCase.dockerfile) + d := parser.Directive{} + parser.SetEscapeToken(parser.DefaultEscapeToken, &d) + n, err := parser.Parse(r, &d) + + if err != nil { + t.Fatalf("Error when parsing Dockerfile: %s", err) + } + + config := &container.Config{} + options := &types.ImageBuildOptions{} + + b := &Builder{runConfig: config, options: options, Stdout: ioutil.Discard, context: context} + + err = b.dispatch(0, len(n.Children), n.Children[0]) + + if err == nil { + t.Fatalf("No error when executing test %s", testCase.name) + } + + if !strings.Contains(err.Error(), testCase.expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) + } + +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go new file mode 100644 index 0000000..28fd5b1 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package dockerfile + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go new file mode 100644 index 0000000..72483a2 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go @@ -0,0 +1,13 @@ +package dockerfile + +import "fmt" + +// platformSupports is gives users a quality error message if a Dockerfile uses +// a command not supported on the platform. +func platformSupports(command string) error { + switch command { + case "stopsignal": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go new file mode 100644 index 0000000..6f0a367 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -0,0 +1,669 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/runconfig/opts" +) + +func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { + if b.disableCommit { + return nil + } + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.runConfig.Image = b.image + + if id == "" { + cmd := b.runConfig.Cmd + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } else if hit { + return nil + } + id, err = b.create() + if err != nil { + return err + } + } + + // Note: Actually copy the struct + autoConfig := *b.runConfig + autoConfig.Cmd = autoCmd + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Author: b.maintainer, + Pause: true, + Config: &autoConfig, + }, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + b.image = imageID + return nil +} + +type copyInfo struct { + builder.FileInfo + decompress bool +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + b.runConfig.Image = b.image + + var infos []copyInfo + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + var err error + for _, orig := range args[0 : len(args)-1] { + var fi builder.FileInfo + decompress := allowLocalDecompression + if urlutil.IsURL(orig) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + fi, err = b.download(orig) + if err != nil { + return err + } + defer os.RemoveAll(filepath.Dir(fi.Path())) + decompress = false + infos = append(infos, copyInfo{fi, decompress}) + continue + } + // not a URL + subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) + if err != nil { + return err + } + + infos = append(infos, subInfos...) + } + + if len(infos) == 0 { + return fmt.Errorf("No source files were specified") + } + if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one info then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(infos) == 1 { + fi := infos[0].FileInfo + origPaths = fi.Name() + if hfi, ok := fi.(builder.Hashed); ok { + srcHash = hfi.Hash() + } + } else { + var hashs []string + var origs []string + for _, info := range infos { + fi := info.FileInfo + origs = append(origs, fi.Name()) + if hfi, ok := fi.(builder.Hashed); ok { + hashs = append(hashs, hfi.Hash()) + } + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.runConfig.Cmd + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) + + // Twiddle the destination when its a relative path - meaning, make it + // relative to the WORKINGDIR + if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { + return err + } + + for _, info := range infos { + if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { + return err + } + } + + return b.commit(container.ID, cmd, comment) +} + +func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { + // get filename from URL + u, err := url.Parse(srcURL) + if err != nil { + return + } + path := filepath.FromSlash(u.Path) // Ensure in platform semantics + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + err = fmt.Errorf("cannot determine filename from url: %s", u) + return + } + + // Initiate the download + resp, err := httputils.Download(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) + progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + fmt.Fprintln(b.Stdout) + // ignoring error because the file was already opened successfully + tmpFileSt, err := tmpFile.Stat() + if err != nil { + tmpFile.Close() + return + } + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return + } + hash := tarSum.Sum(nil) + r.Close() + return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil +} + +func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + var copyInfos []copyInfo + if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + if info.Name() == "" { + // Why are we doing this check? + return nil + } + if match, _ := filepath.Match(origPath, path); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil + } + + // Must be a dir or a file + + statPath, fi, err := b.context.Stat(origPath) + if err != nil { + return nil, err + } + + copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} + + hfi, handleHash := fi.(builder.Hashed) + if !handleHash { + return copyInfos, nil + } + + // Deal with the single file case + if !fi.IsDir() { + hfi.SetHash("file:" + hfi.Hash()) + return copyInfos, nil + } + // Must be a dir + var subfiles []string + err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + // we already checked handleHash above + subfiles = append(subfiles, info.(builder.Hashed).Hash()) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) + + return copyInfos, nil +} + +func (b *Builder) processImageFrom(img builder.Image) error { + if img != nil { + b.image = img.ImageID() + + if img.RunConfig() != nil { + b.runConfig = img.RunConfig() + } + } + + // Check to see if we have a default PATH, note that windows won't + // have one as its set by HCS + if system.DefaultPathEnv != "" { + // Convert the slice of strings that represent the current list + // of env vars into a map so we can see if PATH is already set. + // If its not set then go ahead and give it our default value + configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) + if _, ok := configEnv["PATH"]; !ok { + b.runConfig.Env = append(b.runConfig.Env, + "PATH="+system.DefaultPathEnv) + } + } + + if img == nil { + // Typically this means they used "FROM scratch" + return nil + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := b.runConfig.OnBuild + b.runConfig.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step), &b.directive) + if err != nil { + return err + } + + total := len(ast.Children) + for _, n := range ast.Children { + if err := b.checkDispatch(n, true); err != nil { + return err + } + } + for i, n := range ast.Children { + if err := b.dispatch(i, total, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks if cache match can be found for current build instruction. +// If an image is found, probeCache returns `(true, nil)`. +// If no image is found, it returns `(false, nil)`. +// If there is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + c := b.imageCache + if c == nil || b.options.NoCache || b.cacheBusted { + return false, nil + } + cache, err := c.GetCache(b.image, b.runConfig) + if err != nil { + return false, err + } + if len(cache) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) + b.cacheBusted = true + return false, nil + } + + fmt.Fprintf(b.Stdout, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) + b.image = string(cache) + + return true, nil +} + +func (b *Builder) create() (string, error) { + if b.image == "" && !b.noBaseImage { + return "", fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.runConfig.Image = b.image + + resources := container.Resources{ + CgroupParent: b.options.CgroupParent, + CPUShares: b.options.CPUShares, + CPUPeriod: b.options.CPUPeriod, + CPUQuota: b.options.CPUQuota, + CpusetCpus: b.options.CPUSetCPUs, + CpusetMems: b.options.CPUSetMems, + Memory: b.options.Memory, + MemorySwap: b.options.MemorySwap, + Ulimits: b.options.Ulimits, + } + + // TODO: why not embed a hostconfig in builder? + hostConfig := &container.HostConfig{ + SecurityOpt: b.options.SecurityOpt, + Isolation: b.options.Isolation, + ShmSize: b.options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(b.options.NetworkMode), + } + + config := *b.runConfig + + // Create the container + c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: b.runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return "", err + } + for _, warning := range c.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { + return "", err + } + + return c.ID, nil +} + +var errCancelled = errors.New("build cancelled") + +func (b *Builder) run(cID string) (err error) { + errCh := make(chan error) + go func() { + errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) + }() + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-b.clientCtx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + b.docker.ContainerKill(cID, 0) + b.removeContainer(cID) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v", + cancelErr, err) + } + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v", + cancelErr, err) + } + return err + } + + if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", + cancelErr, ret) + } + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), + Code: ret, + } + } + close(finished) + return <-cancelErrCh +} + +func (b *Builder) removeContainer(c string) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.docker.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return err + } + return nil +} + +func (b *Builder) clearTmp() { + for c := range b.tmpContainers { + if err := b.removeContainer(c); err != nil { + return + } + delete(b.tmpContainers, c) + fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} + +// readDockerfile reads a Dockerfile from the current context. +func (b *Builder) readDockerfile() error { + // If no -f was specified then look for 'Dockerfile'. If we can't find + // that then look for 'dockerfile'. If neither are found then default + // back to 'Dockerfile' and use that in the error message. + if b.options.Dockerfile == "" { + b.options.Dockerfile = builder.DefaultDockerfileName + if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { + lowercase := strings.ToLower(b.options.Dockerfile) + if _, _, err := b.context.Stat(lowercase); err == nil { + b.options.Dockerfile = lowercase + } + } + } + + err := b.parseDockerfile() + + if err != nil { + return err + } + + // After the Dockerfile has been parsed, we need to check the .dockerignore + // file for either "Dockerfile" or ".dockerignore", and if either are + // present then erase them from the build context. These files should never + // have been sent from the client but we did send them to make sure that + // we had the Dockerfile to actually parse, and then we also need the + // .dockerignore file to know whether either file should be removed. + // Note that this assumes the Dockerfile has been read into memory and + // is now safe to be removed. + if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { + dockerIgnore.Process([]string{b.options.Dockerfile}) + } + return nil +} + +func (b *Builder) parseDockerfile() error { + f, err := b.context.Open(b.options.Dockerfile) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) + } + return err + } + defer f.Close() + if f, ok := f.(*os.File); ok { + // ignoring error because Open already succeeded + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) + } + } + b.dockerfile, err = parser.Parse(f, &b.directive) + if err != nil { + return err + } + + return nil +} + +// determine if build arg is part of built-in args or user +// defined args in Dockerfile at any point in time. +func (b *Builder) isBuildArgAllowed(arg string) bool { + if _, ok := BuiltinAllowedBuildArgs[arg]; ok { + return true + } + if _, ok := b.allowedBuildArgs[arg]; ok { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go new file mode 100644 index 0000000..d170d8e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go @@ -0,0 +1,95 @@ +package dockerfile + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" +) + +func TestEmptyDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) + + readAndCheckDockerfile(t, "emptyDockefile", contextDir, "", "The Dockerfile (Dockerfile) cannot be empty") +} + +func TestSymlinkDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd") + + // The reason the error is "Cannot locate specified Dockerfile" is because + // in the builder, the symlink is resolved within the context, therefore + // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is + // a nonexistent file. + expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName) + + readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError) +} + +func TestDockerfileOutsideTheBuildContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Forbidden path outside the build context" + + readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) +} + +func TestNonExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Cannot locate specified Dockerfile: Dockerfile" + + readAndCheckDockerfile(t, "NonExistingDockerfile", contextDir, "Dockerfile", expectedError) +} + +func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := builder.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + options := &types.ImageBuildOptions{ + Dockerfile: dockerfilePath, + } + + b := &Builder{options: options, context: context} + + err = b.readDockerfile() + + if err == nil { + t.Fatalf("No error when executing test: %s", testName) + } + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go new file mode 100644 index 0000000..a8a47c3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go @@ -0,0 +1,38 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) + if !system.IsAbs(requested) { + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go new file mode 100644 index 0000000..f60b112 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -0,0 +1,66 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go new file mode 100644 index 0000000..868a667 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go @@ -0,0 +1,51 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseDest(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, + {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, + {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, + {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, + {``, `D`, `D`, ``}, + {``, `./a1`, `.\a1`, ``}, + {``, `.\b1`, `.\b1`, ``}, + {``, `/`, `\`, ``}, + {``, `\`, `\`, ``}, + {``, `c:/`, `\`, ``}, + {``, `c:\`, `\`, ``}, + {``, `.`, `.`, ``}, + {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, + {`C:\wde`, `.\b1`, `\wde\b1`, ``}, + {`C:\wdf`, `/`, `\`, ``}, + {`C:\wdg`, `\`, `\`, ``}, + {`C:\wdh`, `c:/`, `\`, ``}, + {`C:\wdi`, `c:\`, `\`, ``}, + {`C:\wdj`, `.`, `\wdj`, ``}, + {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, + {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, + {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, + {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, + } + for _, i := range tests { + got, err := normaliseDest("TEST", i.current, i.requested) + if err != nil && i.etext == "" { + t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) + } + if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { + if err == nil { + t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) + } else { + t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) + } + } + if i.etext == "" && got != i.expected { + t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go new file mode 100644 index 0000000..fff3046 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/dockerfile/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + defer f.Close() + + d := parser.Directive{LookingForDirectives: true} + parser.SetEscapeToken(parser.DefaultEscapeToken, &d) + + ast, err := parser.Parse(f, &d) + if err != nil { + panic(err) + } else { + fmt.Println(ast.Dump()) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go new file mode 100644 index 0000000..60d74d9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go @@ -0,0 +1,61 @@ +package parser + +import ( + "testing" +) + +var invalidJSONArraysOfStrings = []string{ + `["a",42,"b"]`, + `["a",123.456,"b"]`, + `["a",{},"b"]`, + `["a",{"c": "d"},"b"]`, + `["a",["c"],"b"]`, + `["a",true,"b"]`, + `["a",false,"b"]`, + `["a",null,"b"]`, +} + +var validJSONArraysOfStrings = map[string][]string{ + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + ` [ "a", "b" ] `: {"a", "b"}, + `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, +} + +func TestJSONArraysOfStrings(t *testing.T) { + for json, expected := range validJSONArraysOfStrings { + d := Directive{} + SetEscapeToken(DefaultEscapeToken, &d) + + if node, _, err := parseJSON(json, &d); err != nil { + t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) + } else { + i := 0 + for node != nil { + if i >= len(expected) { + t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) + } + if node.Value != expected[i] { + t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) + } + node = node.Next + i++ + } + if i != len(expected) { + t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) + } + } + } + for _, json := range invalidJSONArraysOfStrings { + d := Directive{} + SetEscapeToken(DefaultEscapeToken, &d) + + if _, _, err := parseJSON(json, &d); err != errDockerfileNotStringArray { + t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 0000000..d2bf2b0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,361 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + _, child, err := ParseLine(rest, d, false) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *Directive) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.EscapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.EscapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *Directive) (*Node, map[string]bool, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var rootnode *Node + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := tokenWhitespace.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf(key + " must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } + + return rootnode, nil, nil +} + +func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { + return parseNameVal(rest, "ENV", d) +} + +func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { + return parseNameVal(rest, "LABEL", d) +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go new file mode 100644 index 0000000..e534644 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -0,0 +1,221 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends +} + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + EscapeToken rune // Current escape token + LineContinuationRegex *regexp.Regexp // Current line contination regex + LookingForDirectives bool // Whether we are currently looking for directives + EscapeSeen bool // Whether the escape directive has been seen +} + +var ( + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = "\\" + +// SetEscapeToken sets the default token for escaping characters in a Dockerfile. +func SetEscapeToken(s string, d *Directive) error { + if s != "`" && s != "\\" { + return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) + } + d.EscapeToken = rune(s[0]) + d.LineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + return nil +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseString, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// ParseLine parses a line and returns the remainder. +func ParseLine(line string, d *Directive, ignoreCont bool) (string, *Node, error) { + // Handle the parser directive '# escape=. Parser directives must precede + // any builder instruction or other comments, and cannot be repeated. + if d.LookingForDirectives { + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) > 0 { + if d.EscapeSeen == true { + return "", nil, fmt.Errorf("only one escape parser directive can be used") + } + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + if err := SetEscapeToken(tecMatch[i], d); err != nil { + return "", nil, err + } + d.EscapeSeen = true + return "", nil, nil + } + } + } + } + + d.LookingForDirectives = false + + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if !ignoreCont && d.LineContinuationRegex.MatchString(line) { + line = d.LineContinuationRegex.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, flags, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args, d) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + node.Original = line + node.Flags = flags + + return "", node, nil +} + +// Parse is the main parse routine. +// It handles an io.ReadWriteCloser and returns the root of the AST. +func Parse(rwc io.Reader, d *Directive) (*Node, error) { + currentLine := 0 + root := &Node{} + root.StartLine = -1 + scanner := bufio.NewScanner(rwc) + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + currentLine++ + line, child, err := ParseLine(scannedLine, d, false) + if err != nil { + return nil, err + } + startLine := currentLine + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + currentLine++ + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = ParseLine(line+newline, d, false) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + if child == nil && line != "" { + // When we call ParseLine we'll pass in 'true' for + // the ignoreCont param if we're at the EOF. This will + // prevent the func from returning immediately w/o + // parsing the line thinking that there's more input + // to come. + + _, child, err = ParseLine(line, d, scanner.Err() == nil) + if err != nil { + return nil, err + } + } + } + + if child != nil { + // Update the line information for the current child. + child.StartLine = startLine + child.EndLine = currentLine + // Update the line information for the root. The starting line of the root is always the + // starting line of the first child and the ending line is the ending line of the last child. + if root.StartLine < 0 { + root.StartLine = currentLine + } + root.EndLine = currentLine + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go new file mode 100644 index 0000000..e8e2696 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go @@ -0,0 +1,173 @@ +package parser + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" +const testFileLineInfo = "testfile-line/Dockerfile" + +func getDirs(t *testing.T, dir string) []string { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + _, err = Parse(df, &d) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s", dir) + } + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir, "Dockerfile") + resultfile := filepath.Join(testDir, dir, "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + ast, err := Parse(df, &d) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) + } + + content, err := ioutil.ReadFile(resultfile) + if err != nil { + t.Fatalf("Error reading %s's result file: %v", dir, err) + } + + if runtime.GOOS == "windows" { + // CRLF --> CR to match Unix behavior + content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) + } + + if ast.Dump()+"\n" != string(content) { + fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) + fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) + t.Fatalf("%s: AST dump of dockerfile does not match result", dir) + } + } +} + +func TestParseWords(t *testing.T) { + tests := []map[string][]string{ + { + "input": {"foo"}, + "expect": {"foo"}, + }, + { + "input": {"foo bar"}, + "expect": {"foo", "bar"}, + }, + { + "input": {"foo\\ bar"}, + "expect": {"foo\\ bar"}, + }, + { + "input": {"foo=bar"}, + "expect": {"foo=bar"}, + }, + { + "input": {"foo bar 'abc xyz'"}, + "expect": {"foo", "bar", "'abc xyz'"}, + }, + { + "input": {`foo bar "abc xyz"`}, + "expect": {"foo", "bar", `"abc xyz"`}, + }, + { + "input": {"àöû"}, + "expect": {"àöû"}, + }, + { + "input": {`föo bàr "âbc xÿz"`}, + "expect": {"föo", "bàr", `"âbc xÿz"`}, + }, + } + + for _, test := range tests { + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + words := parseWords(test["input"][0], &d) + if len(words) != len(test["expect"]) { + t.Fatalf("length check failed. input: %v, expect: %q, output: %q", test["input"][0], test["expect"], words) + } + for i, word := range words { + if word != test["expect"][i] { + t.Fatalf("word check failed for word: %q. input: %q, expect: %q, output: %q", word, test["input"][0], test["expect"], words) + } + } + } +} + +func TestLineInformation(t *testing.T) { + df, err := os.Open(testFileLineInfo) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + ast, err := Parse(df, &d) + if err != nil { + t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) + } + + if ast.StartLine != 5 || ast.EndLine != 31 { + fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine) + t.Fatalf("Root line information doesn't match result.") + } + if len(ast.Children) != 3 { + fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) + t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo) + } + expected := [][]int{ + {5, 5}, + {11, 12}, + {17, 31}, + } + for i, child := range ast.Children { + if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { + t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", + i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) + t.Fatalf("Root line information doesn't match result.") + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile new file mode 100644 index 0000000..c7601c9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile @@ -0,0 +1,35 @@ +# ESCAPE=\ + + + +FROM brimstone/ubuntu:14.04 + + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + + +ENV GOPATH \ +/go + + + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + + + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH + + + + diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile new file mode 100644 index 0000000..1d65578 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 0000000..d1be459 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile new file mode 100644 index 0000000..00b444c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:14.04 +MAINTAINER Seongyeol Lim + +COPY . /go/src/github.com/docker/docker +ADD . / +ADD null / +COPY nullfile /tmp +ADD [ "vimrc", "/tmp" ] +COPY [ "bashrc", "/tmp" ] +COPY [ "test file", "/tmp" ] +ADD [ "test file", "/tmp/test file" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result new file mode 100644 index 0000000..85aee64 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(maintainer "Seongyeol Lim ") +(copy "." "/go/src/github.com/docker/docker") +(add "." "/") +(add "null" "/") +(copy "nullfile" "/tmp") +(add "vimrc" "/tmp") +(copy "bashrc" "/tmp") +(copy "test file" "/tmp") +(add "test file" "/tmp/test file") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 0000000..0364ef9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,26 @@ +#escape=\ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 0000000..227f748 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 0000000..25ae352 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 0000000..16492e5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 0000000..42b324e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result new file mode 100644 index 0000000..268ae07 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 0000000..8ccb71a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 0000000..25dd3dd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile new file mode 100644 index 0000000..99fbe55 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile @@ -0,0 +1,103 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result new file mode 100644 index 0000000..d032f9b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result @@ -0,0 +1,24 @@ +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile new file mode 100644 index 0000000..08fa18a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result new file mode 100644 index 0000000..ba0a6dd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result @@ -0,0 +1,16 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile new file mode 100644 index 0000000..6def7ef --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile @@ -0,0 +1,9 @@ +# Comment here. Should not be looking for the following parser directive. +# Hence the following line will be ignored, and the subsequent backslash +# continuation will be the default. +# escape = ` + +FROM image +MAINTAINER foo@bar.com +ENV GOPATH \ +\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result new file mode 100644 index 0000000..21522a8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile new file mode 100644 index 0000000..08a8cc4 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile @@ -0,0 +1,7 @@ +# escape = `` +# There is no white space line after the directives. This still succeeds, but goes +# against best practices. +FROM image +MAINTAINER foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result new file mode 100644 index 0000000..21522a8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile new file mode 100644 index 0000000..ef30414 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile @@ -0,0 +1,6 @@ +#escape = ` + +FROM image +MAINTAINER foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result new file mode 100644 index 0000000..21522a8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile new file mode 100644 index 0000000..1ffb17e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result new file mode 100644 index 0000000..13e409c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile new file mode 100644 index 0000000..2418e0f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile @@ -0,0 +1,10 @@ +FROM scratch +COPY foo /tmp/ +COPY --user=me foo /tmp/ +COPY --doit=true foo /tmp/ +COPY --user=me --doit=true foo /tmp/ +COPY --doit=true -- foo /tmp/ +COPY -- foo /tmp/ +CMD --doit [ "a", "b" ] +CMD --doit=true -- [ "a", "b" ] +CMD --doit -- [ ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result new file mode 100644 index 0000000..4578f4c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result @@ -0,0 +1,10 @@ +(from "scratch") +(copy "foo" "/tmp/") +(copy ["--user=me"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy ["--user=me" "--doit=true"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy "foo" "/tmp/") +(cmd ["--doit"] "a" "b") +(cmd ["--doit=true"] "a" "b") +(cmd ["--doit"]) diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile new file mode 100644 index 0000000..081e442 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile @@ -0,0 +1,10 @@ +FROM debian +ADD check.sh main.sh /app/ +CMD /app/main.sh +HEALTHCHECK +HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ + CMD /app/check.sh --quiet +HEALTHCHECK CMD +HEALTHCHECK CMD a b +HEALTHCHECK --timeout=3s CMD ["foo"] +HEALTHCHECK CONNECT TCP 7000 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result new file mode 100644 index 0000000..092924f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result @@ -0,0 +1,9 @@ +(from "debian") +(add "check.sh" "main.sh" "/app/") +(cmd "/app/main.sh") +(healthcheck) +(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") +(healthcheck "CMD") +(healthcheck "CMD" "a b") +(healthcheck ["--timeout=3s"] "CMD" "foo") +(healthcheck "CONNECT" "TCP 7000") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 0000000..587fb9b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result new file mode 100644 index 0000000..0998e87 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 0000000..39fe27d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 0000000..afc220c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 0000000..eaae081 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 0000000..484804e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 0000000..c3ac63c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 0000000..6147891 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 0000000..5fd4afa --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 0000000..1ffbb8f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 0000000..30cc4bb --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 0000000..3204814 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile new file mode 100644 index 0000000..a586917 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile @@ -0,0 +1,8 @@ +CMD [] +CMD [""] +CMD ["a"] +CMD ["a","b"] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result new file mode 100644 index 0000000..c6553e6 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result @@ -0,0 +1,8 @@ +(cmd) +(cmd "") +(cmd "a") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 0000000..35f9c24 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 0000000..b5ac6fe --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 0000000..188395f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 0000000..6f7d57a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile new file mode 100644 index 0000000..f64c116 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result new file mode 100644 index 0000000..a0efcf0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 0000000..57bb597 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result new file mode 100644 index 0000000..18dbdee --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile new file mode 100644 index 0000000..5b9ec06 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result new file mode 100644 index 0000000..a0036a9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile new file mode 100644 index 0000000..bf8368e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result new file mode 100644 index 0000000..56ddb6f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile new file mode 100644 index 0000000..72b79bd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result new file mode 100644 index 0000000..d4f94cd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile new file mode 100644 index 0000000..4842088 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result new file mode 100644 index 0000000..c3abb4c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile new file mode 100644 index 0000000..3a4da6e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result new file mode 100644 index 0000000..5493b25 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go new file mode 100644 index 0000000..cd7af75 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go @@ -0,0 +1,176 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string, d *Directive) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args, d) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if tokenComment.MatchString(line) { + return tokenComment.ReplaceAllString(line, "") + } + + return line +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found someting with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go new file mode 100644 index 0000000..189afd1 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go @@ -0,0 +1,329 @@ +package dockerfile + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "runtime" + "strings" + "text/scanner" + "unicode" +) + +type shellWord struct { + word string + scanner scanner.Scanner + envs []string + pos int + escapeToken rune +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string, escapeToken rune) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + escapeToken: escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + word, _, err := sw.process() + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func ProcessWords(word string, env []string, escapeToken rune) ([]string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + escapeToken: escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + _, words, err := sw.process() + return words, err +} + +func (sw *shellWord) process() (string, []string, error) { + return sw.processStopOn(scanner.EOF) +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result string + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result += tmp + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + // '\' (default escape token, but ` allowed) escapes, except end of line + + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result += string(ch) + } + } + + return result, words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + if ch == '\'' || ch == scanner.EOF { + break + } + result += string(ch) + } + + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + var result string + + sw.scanner.Next() + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if ch == '"' { + sw.scanner.Next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.scanner.Next() + if ch == sw.escapeToken { + chNext := sw.scanner.Peek() + + if chNext == scanner.EOF { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.scanner.Next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + ch := sw.scanner.Peek() + if ch == '{' { + sw.scanner.Next() + name := sw.processName() + ch = sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + if runtime.GOOS == "windows" { + // Case-insensitive environment variables on Windows + name = strings.ToUpper(name) + } + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if runtime.GOOS == "windows" { + env = strings.ToUpper(env) + } + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + compareName := env[:i] + if runtime.GOOS == "windows" { + compareName = strings.ToUpper(compareName) + } + if name != compareName { + continue + } + return env[i+1:] + } + return "" +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go new file mode 100644 index 0000000..6cf691c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go @@ -0,0 +1,155 @@ +package dockerfile + +import ( + "bufio" + "os" + "runtime" + "strings" + "testing" +) + +func TestShellParser4EnvVars(t *testing.T) { + fn := "envVarTest" + lineCount := 0 + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} + for scanner.Scan() { + line := scanner.Text() + lineCount++ + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + if len(words) != 3 { + t.Fatalf("Error in '%s' - should be exactly one | in:%q", fn, line) + } + + words[0] = strings.TrimSpace(words[0]) + words[1] = strings.TrimSpace(words[1]) + words[2] = strings.TrimSpace(words[2]) + + // Key W=Windows; A=All; U=Unix + if (words[0] != "W") && (words[0] != "A") && (words[0] != "U") { + t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", words[0], lineCount, fn) + } + + if ((words[0] == "W" || words[0] == "A") && runtime.GOOS == "windows") || + ((words[0] == "U" || words[0] == "A") && runtime.GOOS != "windows") { + newWord, err := ProcessWord(words[1], envs, '\\') + + if err != nil { + newWord = "error" + } + + if newWord != words[2] { + t.Fatalf("Error. Src: %s Calc: %s Expected: %s at line %d", words[1], newWord, words[2], lineCount) + } + } + } +} + +func TestShellParser4Words(t *testing.T) { + fn := "wordsTest" + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + envs := []string{} + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + + if strings.HasPrefix(line, "#") { + continue + } + + if strings.HasPrefix(line, "ENV ") { + line = strings.TrimLeft(line[3:], " ") + envs = append(envs, line) + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) + } + test := strings.TrimSpace(words[0]) + expected := strings.Split(strings.TrimLeft(words[1], " "), ",") + + result, err := ProcessWords(test, envs, '\\') + + if err != nil { + result = []string{"error"} + } + + if len(result) != len(expected) { + t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + } + for i, w := range expected { + if w != result[i] { + t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + } + } + } +} + +func TestGetEnv(t *testing.T) { + sw := &shellWord{ + word: "", + envs: nil, + pos: 0, + } + + sw.envs = []string{} + if sw.getEnv("foo") != "" { + t.Fatalf("2 - 'foo' should map to ''") + } + + sw.envs = []string{"foo"} + if sw.getEnv("foo") != "" { + t.Fatalf("3 - 'foo' should map to ''") + } + + sw.envs = []string{"foo="} + if sw.getEnv("foo") != "" { + t.Fatalf("4 - 'foo' should map to ''") + } + + sw.envs = []string{"foo=bar"} + if sw.getEnv("foo") != "bar" { + t.Fatalf("5 - 'foo' should map to 'bar'") + } + + sw.envs = []string{"foo=bar", "car=hat"} + if sw.getEnv("foo") != "bar" { + t.Fatalf("6 - 'foo' should map to 'bar'") + } + if sw.getEnv("car") != "hat" { + t.Fatalf("7 - 'car' should map to 'hat'") + } + + // Make sure we grab the first 'car' in the list + sw.envs = []string{"foo=bar", "car=hat", "car=bike"} + if sw.getEnv("car") != "hat" { + t.Fatalf("8 - 'car' should map to 'hat'") + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support.go b/vendor/github.com/docker/docker/builder/dockerfile/support.go new file mode 100644 index 0000000..e875889 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/support.go @@ -0,0 +1,19 @@ +package dockerfile + +import "strings" + +// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile +// for exec form it returns untouched args slice +// for shell form it returns concatenated args as the first element of a slice +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support_test.go b/vendor/github.com/docker/docker/builder/dockerfile/support_test.go new file mode 100644 index 0000000..7cc6fe9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/support_test.go @@ -0,0 +1,65 @@ +package dockerfile + +import "testing" + +type testCase struct { + name string + args []string + attributes map[string]bool + expected []string +} + +func initTestCases() []testCase { + testCases := []testCase{} + + testCases = append(testCases, testCase{ + name: "empty args", + args: []string{}, + attributes: make(map[string]bool), + expected: []string{}, + }) + + jsonAttributes := make(map[string]bool) + jsonAttributes["json"] = true + + testCases = append(testCases, testCase{ + name: "json attribute with one element", + args: []string{"foo"}, + attributes: jsonAttributes, + expected: []string{"foo"}, + }) + + testCases = append(testCases, testCase{ + name: "json attribute with two elements", + args: []string{"foo", "bar"}, + attributes: jsonAttributes, + expected: []string{"foo", "bar"}, + }) + + testCases = append(testCases, testCase{ + name: "no attributes", + args: []string{"foo", "bar"}, + attributes: nil, + expected: []string{"foo bar"}, + }) + + return testCases +} + +func TestHandleJSONArgs(t *testing.T) { + testCases := initTestCases() + + for _, test := range testCases { + arguments := handleJSONArgs(test.args, test.attributes) + + if len(arguments) != len(test.expected) { + t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) + } + + for i := range test.expected { + if arguments[i] != test.expected[i] { + t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) + } + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go b/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go new file mode 100644 index 0000000..80a3f1b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go @@ -0,0 +1,50 @@ +package dockerfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// createTestSymlink creates a symlink file within dir which points to oldname +func createTestSymlink(t *testing.T, dir, filename, oldname string) string { + filePath := filepath.Join(dir, filename) + if err := os.Symlink(oldname, filePath); err != nil { + t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err) + } + + return filePath +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/wordsTest b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest new file mode 100644 index 0000000..fa916c6 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest @@ -0,0 +1,25 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | hello there +hello\" there | hello",there diff --git a/vendor/github.com/docker/docker/builder/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore.go new file mode 100644 index 0000000..3da7913 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore.go @@ -0,0 +1,48 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" +) + +// DockerIgnoreContext wraps a ModifiableContext to add a method +// for handling the .dockerignore file at the root of the context. +type DockerIgnoreContext struct { + ModifiableContext +} + +// Process reads the .dockerignore file at the root of the embedded context. +// If .dockerignore does not exist in the context, then nil is returned. +// +// It can take a list of files to be removed after .dockerignore is removed. +// This is used for server-side implementations of builders that need to send +// the .dockerignore file as well as the special files specified in filesToRemove, +// but expect them to be excluded from the context after they were processed. +// +// For example, server-side Dockerfile builders are expected to pass in the name +// of the Dockerfile to be removed after it was parsed. +// +// TODO: Don't require a ModifiableContext (use Context instead) and don't remove +// files, instead handle a list of files to be excluded from the context. +func (c DockerIgnoreContext) Process(filesToRemove []string) error { + f, err := c.Open(".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + excludes, _ := dockerignore.ReadAll(f) + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + rm, _ := fileutils.Matches(fileToRemove, excludes) + if rm { + c.Remove(fileToRemove) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go new file mode 100644 index 0000000..2db67be --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -0,0 +1,49 @@ +package dockerignore + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go new file mode 100644 index 0000000..612a139 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go @@ -0,0 +1,57 @@ +package dockerignore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadAll(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + di, err := ReadAll(nil) + if err != nil { + t.Fatalf("Expected not to have error, got %v", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + diName := filepath.Join(tmpDir, ".dockerignore") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + diFd, err := os.Open(diName) + if err != nil { + t.Fatal(err) + } + defer diFd.Close() + + di, err = ReadAll(diFd) + if err != nil { + t.Fatal(err) + } + + if di[0] != "test1" { + t.Fatalf("First element is not test1") + } + if di[1] != "/test2" { + t.Fatalf("Second element is not /test2") + } + if di[2] != "/a/file/here" { + t.Fatalf("Third element is not /a/file/here") + } + if di[3] != "lastfile" { + t.Fatalf("Fourth element is not lastfile") + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore_test.go b/vendor/github.com/docker/docker/builder/dockerignore_test.go new file mode 100644 index 0000000..3c0ceda --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore_test.go @@ -0,0 +1,95 @@ +package builder + +import ( + "io/ioutil" + "log" + "os" + "sort" + "testing" +) + +const shouldStayFilename = "should_stay" + +func extractFilenames(files []os.FileInfo) []string { + filenames := make([]string, len(files), len(files)) + + for i, file := range files { + filenames[i] = file.Name() + } + + return filenames +} + +func checkDirectory(t *testing.T, dir string, expectedFiles []string) { + files, err := ioutil.ReadDir(dir) + + if err != nil { + t.Fatalf("Could not read directory: %s", err) + } + + if len(files) != len(expectedFiles) { + log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) + } + + filenames := extractFilenames(files) + sort.Strings(filenames) + sort.Strings(expectedFiles) + + for i, filename := range filenames { + if filename != expectedFiles[i] { + t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) + } + } +} + +func executeProcess(t *testing.T, contextDir string) { + modifiableCtx := &tarSumContext{root: contextDir} + ctx := DockerIgnoreContext{ModifiableContext: modifiableCtx} + + err := ctx.Process([]string{DefaultDockerfileName}) + + if err != nil { + t.Fatalf("Error when executing Process: %s", err) + } +} + +func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename}) + +} + +func TestProcessNoDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName}) + +} + +func TestProcessShouldLeaveAllFiles(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName, dockerignoreFilename}) + +} diff --git a/vendor/github.com/docker/docker/builder/git.go b/vendor/github.com/docker/docker/builder/git.go new file mode 100644 index 0000000..74df244 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/git.go @@ -0,0 +1,28 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/gitutils" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (ModifiableContext, error) { + root, err := gitutils.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + // TODO: print errors? + c.Close() + os.RemoveAll(root) + }() + return MakeTarSumContext(c) +} diff --git a/vendor/github.com/docker/docker/builder/remote.go b/vendor/github.com/docker/docker/builder/remote.go new file mode 100644 index 0000000..f3a4329 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remote.go @@ -0,0 +1,157 @@ +package builder + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "regexp" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/urlutil" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// MakeRemoteContext downloads a context from remoteURL and returns it. +// +// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of +// maxPreambleLength bytes from the body to help detecting the MIME type. +// Look at acceptableRemoteMIME for more details. +// +// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected +// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). +// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { + f, err := httputils.Download(remoteURL) + if err != nil { + return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) + } + defer f.Body.Close() + + var contextReader io.ReadCloser + if contentTypeHandlers != nil { + contentType := f.Header.Get("Content-Type") + clen := f.ContentLength + + contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) + if err != nil { + return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) + } + defer contextReader.Close() + + // This loop tries to find a content-type handler for the detected content-type. + // If it could not find one from the caller-supplied map, it tries the empty content-type `""` + // which is interpreted as a fallback handler (usually used for raw tar contexts). + for _, ct := range []string{contentType, ""} { + if fn, ok := contentTypeHandlers[ct]; ok { + defer contextReader.Close() + if contextReader, err = fn(contextReader); err != nil { + return nil, err + } + break + } + } + } + + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + return MakeTarSumContext(contextReader) +} + +// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used +// irrespective of user input. +// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). +func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { + switch { + case remoteURL == "": + context, err = MakeTarSumContext(r) + case urlutil.IsGitURL(remoteURL): + context, err = MakeGitContext(remoteURL) + case urlutil.IsURL(remoteURL): + context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller + // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. + dockerfileName = DefaultDockerfileName + + // TODO: return a context without tarsum + r, err := archive.Generate(dockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + + return ioutil.NopCloser(r), nil + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return createProgressReader(rc), nil + }, + }) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { + contentType, _, err = httputils.DetectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/vendor/github.com/docker/docker/builder/remote_test.go b/vendor/github.com/docker/docker/builder/remote_test.go new file mode 100644 index 0000000..691a084 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remote_test.go @@ -0,0 +1,213 @@ +package builder + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" +) + +var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic + +func TestSelectAcceptableMIME(t *testing.T) { + validMimeStrings := []string{ + "application/x-bzip2", + "application/bzip2", + "application/gzip", + "application/x-gzip", + "application/x-xz", + "application/xz", + "application/tar", + "application/x-tar", + "application/octet-stream", + "text/plain", + } + + invalidMimeStrings := []string{ + "", + "application/octet", + "application/json", + } + + for _, m := range invalidMimeStrings { + if len(selectAcceptableMIME(m)) > 0 { + t.Fatalf("Should not have accepted %q", m) + } + } + + for _, m := range validMimeStrings { + if str := selectAcceptableMIME(m); str == "" { + t.Fatalf("Should have accepted %q", m) + } + } +} + +func TestInspectEmptyResponse(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader([]byte(""))) + contentType, bReader, err := inspectResponse(ct, br, 0) + if err == nil { + t.Fatalf("Should have generated an error for an empty response") + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != 0 { + t.Fatal("response body should remain empty") + } +} + +func TestInspectResponseBinary(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader(binaryContext)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) + if err != nil { + t.Fatal(err) + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != len(binaryContext) { + t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) + } + for i := range body { + if body[i] != binaryContext[i] { + t.Fatalf("Corrupted response body at byte index %d", i) + } + } +} + +func TestResponseUnsupportedContentType(t *testing.T) { + content := []byte(dockerfileContents) + ct := "application/json" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) + + if err == nil { + t.Fatal("Should have returned an error on content-type 'application/json'") + } + if contentType != ct { + t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseTextSimple(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseEmptyContentType(t *testing.T) { + content := []byte(dockerfileContents) + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bodyReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestMakeRemoteContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/" + DefaultDockerfileName + remoteURL := serverURL.String() + + mux.Handle("/", http.FileServer(http.Dir(contextDir))) + + remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + r, err := archive.Generate(DefaultDockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil + }, + }) + + if err != nil { + t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) + } + + if remoteContext == nil { + t.Fatalf("Remote context should not be nil") + } + + tarSumCtx, ok := remoteContext.(*tarSumContext) + + if !ok { + t.Fatalf("Cast error, remote context should be casted to tarSumContext") + } + + fileInfoSums := tarSumCtx.sums + + if fileInfoSums.Len() != 1 { + t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) + } + + fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) + + if fileInfo == nil { + t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) + } + + if fileInfo.Pos() != 0 { + t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) + } +} diff --git a/vendor/github.com/docker/docker/builder/tarsum.go b/vendor/github.com/docker/docker/builder/tarsum.go new file mode 100644 index 0000000..35054dc --- /dev/null +++ b/vendor/github.com/docker/docker/builder/tarsum.go @@ -0,0 +1,158 @@ +package builder + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" +) + +type tarSumContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *tarSumContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return nil, err + } + r, err := os.Open(fullpath) + if err != nil { + return nil, convertPathError(err, cleanpath) + } + return r, nil +} + +func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return "", nil, err + } + + st, err := os.Lstat(fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + sum := path + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} + return rel, fi, nil +} + +// MakeTarSumContext returns a build Context from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &tarSumContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + if err := chrootarchive.Untar(sum, root, nil); err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { + cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) + if err != nil { + return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) + } + _, err = os.Lstat(fullpath) + if err != nil { + return "", "", convertPathError(err, path) + } + return +} + +func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { + root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) + return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return err + } + if rel == "." { + return nil + } + + sum := rel + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} + if err := walkFn(rel, fi, nil); err != nil { + return err + } + return nil + }) +} + +func (c *tarSumContext) Remove(path string) error { + _, fullpath, err := c.normalize(path) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} diff --git a/vendor/github.com/docker/docker/builder/tarsum_test.go b/vendor/github.com/docker/docker/builder/tarsum_test.go new file mode 100644 index 0000000..278e583 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/tarsum_test.go @@ -0,0 +1,265 @@ +package builder + +import ( + "bufio" + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + tarsum := &tarSumContext{root: contextDir} + + err = tarsum.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(contextDir) + + if !os.IsNotExist(err) { + t.Fatalf("Directory should not exist at this point") + defer os.RemoveAll(contextDir) + } +} + +func TestOpenFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + file, err := tarSum.Open(filename) + + if err != nil { + t.Fatalf("Error when executing Open: %s", err) + } + + defer file.Close() + + scanner := bufio.NewScanner(file) + buff := bytes.NewBufferString("") + + for scanner.Scan() { + buff.WriteString(scanner.Text()) + } + + if contents != buff.String() { + t.Fatalf("Contents are not equal. Expected: %s, got: %s", contents, buff.String()) + } + +} + +func TestOpenNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + tarSum := &tarSumContext{root: contextDir} + + file, err := tarSum.Open("not-existing") + + if file != nil { + t.Fatal("Opened file should be nil") + } + + if !os.IsNotExist(err) { + t.Fatalf("Error when executing Open: %s", err) + } +} + +func TestStatFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + testFilename := createTestTempFile(t, contextDir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + relPath, fileInfo, err := tarSum.Stat(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if relPath != filename { + t.Fatalf("Relative path should be equal to %s, got %s", filename, relPath) + } + + if fileInfo.Path() != testFilename { + t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) + } +} + +func TestStatSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + relPath, fileInfo, err := tarSum.Stat(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if relPath != relativePath { + t.Fatalf("Relative path should be equal to %s, got %s", relativePath, relPath) + } + + if fileInfo.Path() != testFilename { + t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) + } +} + +func TestStatNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + tarSum := &tarSumContext{root: contextDir} + + relPath, fileInfo, err := tarSum.Stat("not-existing") + + if relPath != "" { + t.Fatal("Relative path should be nil") + } + + if fileInfo != nil { + t.Fatalf("File info should be nil") + } + + if !os.IsNotExist(err) { + t.Fatalf("This file should not exist: %s", err) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + tarSum := &tarSumContext{root: contextDir} + + err = tarSum.Remove(relativePath) + + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = os.Stat(contextSubdir) + + if !os.IsNotExist(err) { + t.Fatalf("Directory should not exist at this point") + } +} + +func TestMakeTarSumContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("error: %s", err) + } + + defer tarStream.Close() + + tarSum, err := MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when executing MakeTarSumContext: %s", err) + } + + if tarSum == nil { + t.Fatalf("Tar sum context should not be nil") + } +} + +func TestWalkWithoutError(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + createTestTempFile(t, contextSubdir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + walkFun := func(path string, fi FileInfo, err error) error { + return nil + } + + err := tarSum.Walk(contextSubdir, walkFun) + + if err != nil { + t.Fatalf("Error when executing Walk: %s", err) + } +} + +type WalkError struct { +} + +func (we WalkError) Error() string { + return "Error when executing Walk" +} + +func TestWalkWithError(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + tarSum := &tarSumContext{root: contextDir} + + walkFun := func(path string, fi FileInfo, err error) error { + return WalkError{} + } + + err := tarSum.Walk(contextSubdir, walkFun) + + if err == nil { + t.Fatalf("Error should not be nil") + } +} diff --git a/vendor/github.com/docker/docker/builder/utils_test.go b/vendor/github.com/docker/docker/builder/utils_test.go new file mode 100644 index 0000000..1101ff1 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/utils_test.go @@ -0,0 +1,87 @@ +package builder + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempSubdir creates a temporary directory for testing. +// It returns the created path but doesn't provide a cleanup function, +// so createTestTempSubdir should be used only for creating temporary subdirectories +// whose parent directories are properly cleaned up. +// When an error occurs, it terminates the test. +func createTestTempSubdir(t *testing.T, dir, prefix string) string { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// chdir changes current working directory to dir. +// It returns a function which changes working directory back to the previous one. +// This function is meant to be executed as a deferred call. +// When an error occurs, it terminates the test. +func chdir(t *testing.T, dir string) func() { + workingDirectory, err := os.Getwd() + + if err != nil { + t.Fatalf("Error when retrieving working directory: %s", err) + } + + err = os.Chdir(dir) + + if err != nil { + t.Fatalf("Error when changing directory to %s: %s", dir, err) + } + + return func() { + err = os.Chdir(workingDirectory) + + if err != nil { + t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err) + } + } +} diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go new file mode 100644 index 0000000..139845c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cobra.go @@ -0,0 +1,139 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return err + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return fmt.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{.Flags.FlagUsages | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go new file mode 100644 index 0000000..7fd1e4f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go @@ -0,0 +1,69 @@ +package bundlefile + +import ( + "encoding/json" + "fmt" + "io" +) + +// Bundlefile stores the contents of a bundlefile +type Bundlefile struct { + Version string + Services map[string]Service +} + +// Service is a service from a bundlefile +type Service struct { + Image string + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Env []string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Ports []Port `json:",omitempty"` + WorkingDir *string `json:",omitempty"` + User *string `json:",omitempty"` + Networks []string `json:",omitempty"` +} + +// Port is a port as defined in a bundlefile +type Port struct { + Protocol string + Port uint32 +} + +// LoadFile loads a bundlefile from a path to the file +func LoadFile(reader io.Reader) (*Bundlefile, error) { + bundlefile := &Bundlefile{} + + decoder := json.NewDecoder(reader) + if err := decoder.Decode(bundlefile); err != nil { + switch jsonErr := err.(type) { + case *json.SyntaxError: + return nil, fmt.Errorf( + "JSON syntax error at byte %v: %s", + jsonErr.Offset, + jsonErr.Error()) + case *json.UnmarshalTypeError: + return nil, fmt.Errorf( + "Unexpected type at byte %v. Expected %s but received %s.", + jsonErr.Offset, + jsonErr.Type, + jsonErr.Value) + } + return nil, err + } + + return bundlefile, nil +} + +// Print writes the contents of the bundlefile to the output writer +// as human readable json +func Print(out io.Writer, bundle *Bundlefile) error { + bytes, err := json.MarshalIndent(*bundle, "", " ") + if err != nil { + return err + } + + _, err = out.Write(bytes) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go new file mode 100644 index 0000000..c343410 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go @@ -0,0 +1,77 @@ +package bundlefile + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestLoadFileV01Success(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } + }`) + + bundle, err := LoadFile(reader) + assert.NilError(t, err) + assert.Equal(t, bundle.Version, "0.1") + assert.Equal(t, len(bundle.Services), 2) +} + +func TestLoadFileSyntaxError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": unquoted string + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "syntax error at byte 37: invalid character 'u'") +} + +func TestLoadFileTypeError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "web": { + "Image": "redis", + "Networks": "none" + } + } + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string") +} + +func TestPrint(t *testing.T) { + var buffer bytes.Buffer + bundle := &Bundlefile{ + Version: "0.1", + Services: map[string]Service{ + "web": { + Image: "image", + Command: []string{"echo", "something"}, + }, + }, + } + assert.NilError(t, Print(&buffer, bundle)) + output := buffer.String() + assert.Contains(t, output, "\"Image\": \"image\"") + assert.Contains(t, output, + `"Command": [ + "echo", + "something" + ]`) +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go b/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go new file mode 100644 index 0000000..d5705a4 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go @@ -0,0 +1,24 @@ +package checkpoint + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental) +func NewCheckpointCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "checkpoint", + Short: "Manage checkpoints", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + Tags: map[string]string{"experimental": "", "version": "1.25"}, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/create.go b/vendor/github.com/docker/docker/cli/command/checkpoint/create.go new file mode 100644 index 0000000..473a941 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/create.go @@ -0,0 +1,58 @@ +package checkpoint + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type createOptions struct { + container string + checkpoint string + checkpointDir string + leaveRunning bool +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts createOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONTAINER CHECKPOINT", + Short: "Create a checkpoint from a running container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.checkpoint = args[1] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint") + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + checkpointOpts := types.CheckpointCreateOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + Exit: !opts.leaveRunning, + } + + err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/list.go b/vendor/github.com/docker/docker/cli/command/checkpoint/list.go new file mode 100644 index 0000000..daf8349 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/list.go @@ -0,0 +1,62 @@ +package checkpoint + +import ( + "fmt" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type listOptions struct { + checkpointDir string +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS] CONTAINER", + Aliases: []string{"list"}, + Short: "List checkpoints for a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, args[0], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd + +} + +func runList(dockerCli *command.DockerCli, container string, opts listOptions) error { + client := dockerCli.Client() + + listOpts := types.CheckpointListOptions{ + CheckpointDir: opts.checkpointDir, + } + + checkpoints, err := client.CheckpointList(context.Background(), container, listOpts) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintf(w, "CHECKPOINT NAME") + fmt.Fprintf(w, "\n") + + for _, checkpoint := range checkpoints { + fmt.Fprintf(w, "%s\t", checkpoint.Name) + fmt.Fprint(w, "\n") + } + + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go b/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go new file mode 100644 index 0000000..ec39fa7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go @@ -0,0 +1,44 @@ +package checkpoint + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + checkpointDir string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER CHECKPOINT", + Aliases: []string{"remove"}, + Short: "Remove a checkpoint", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args[0], args[1], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runRemove(dockerCli *command.DockerCli, container string, checkpoint string, opts removeOptions) error { + client := dockerCli.Client() + + removeOpts := types.CheckpointDeleteOptions{ + CheckpointID: checkpoint, + CheckpointDir: opts.checkpointDir, + } + + return client.CheckpointDelete(context.Background(), container, removeOpts) +} diff --git a/vendor/github.com/docker/docker/cli/command/cli.go b/vendor/github.com/docker/docker/cli/command/cli.go new file mode 100644 index 0000000..6d1dd74 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/cli.go @@ -0,0 +1,260 @@ +package command + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/cliconfig/credentials" + "github.com/docker/docker/client" + "github.com/docker/docker/dockerversion" + dopts "github.com/docker/docker/opts" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *InStream + Out() *OutStream + Err() io.Writer +} + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + in *InStream + out *OutStream + err io.Writer + keyFile string + client client.APIClient + hasExperimental bool + defaultVersion string +} + +// HasExperimental returns true if experimental features are accessible. +func (cli *DockerCli) HasExperimental() bool { + return cli.hasExperimental +} + +// DefaultVersion returns api.defaultVersion of DOCKER_API_VERSION if specified. +func (cli *DockerCli) DefaultVersion() string { + return cli.defaultVersion +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *OutStream { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *InStream { + return cli.in +} + +// ShowHelp shows the command help. +func (cli *DockerCli) ShowHelp(cmd *cobra.Command, args []string) error { + cmd.SetOutput(cli.err) + cmd.HelpFunc()(cmd, args) + return nil +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + return cli.configFile +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + for registry := range cli.configFile.CredentialHelpers { + helper := cli.CredentialsStore(registry) + newAuths, err := helper.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + } + defaultStore := cli.CredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + return auths, nil +} + +func addAll(to, from map[string]types.AuthConfig) { + for reg, ac := range from { + to[reg] = ac + } +} + +// CredentialsStore returns a new credentials store based +// on the settings provided in the configuration file. Empty string returns +// the default credential store. +func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { + if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { + return credentials.NewNativeStore(cli.configFile, helper) + } + return credentials.NewFileStore(cli.configFile) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { + if c.CredentialHelpers != nil && serverAddress != "" { + if helper, exists := c.CredentialHelpers[serverAddress]; exists { + return helper + } + } + return c.CredentialsStore +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { + cli.configFile = LoadDefaultConfigFile(cli.err) + + var err error + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + if err != nil { + return err + } + + cli.defaultVersion = cli.client.ClientVersion() + + if opts.Common.TrustKey == "" { + cli.keyFile = filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) + } else { + cli.keyFile = opts.Common.TrustKey + } + + if ping, err := cli.client.Ping(context.Background()); err == nil { + cli.hasExperimental = ping.Experimental + + // since the new header was added in 1.25, assume server is 1.24 if header is not present. + if ping.APIVersion == "" { + ping.APIVersion = "1.24" + } + + // if server version is lower than the current cli, downgrade + if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) { + cli.client.UpdateClientVersion(ping.APIVersion) + } + } + return nil +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { + return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + return configFile +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + customHeaders := configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = UserAgent() + + verStr := api.DefaultVersion + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + return client.NewClient(host, verStr, httpClient, customHeaders) +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = dopts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + + config, err := tlsconfig.Client(*tlsOptions) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + }, nil +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/docker/docker/cli/command/commands/commands.go b/vendor/github.com/docker/docker/cli/command/commands/commands.go new file mode 100644 index 0000000..d64d568 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/commands/commands.go @@ -0,0 +1,91 @@ +package commands + +import ( + "os" + + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/checkpoint" + "github.com/docker/docker/cli/command/container" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/cli/command/network" + "github.com/docker/docker/cli/command/node" + "github.com/docker/docker/cli/command/plugin" + "github.com/docker/docker/cli/command/registry" + "github.com/docker/docker/cli/command/secret" + "github.com/docker/docker/cli/command/service" + "github.com/docker/docker/cli/command/stack" + "github.com/docker/docker/cli/command/swarm" + "github.com/docker/docker/cli/command/system" + "github.com/docker/docker/cli/command/volume" + "github.com/spf13/cobra" +) + +// AddCommands adds all the commands from cli/command to the root command +func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) { + cmd.AddCommand( + node.NewNodeCommand(dockerCli), + service.NewServiceCommand(dockerCli), + swarm.NewSwarmCommand(dockerCli), + secret.NewSecretCommand(dockerCli), + container.NewContainerCommand(dockerCli), + image.NewImageCommand(dockerCli), + system.NewSystemCommand(dockerCli), + container.NewRunCommand(dockerCli), + image.NewBuildCommand(dockerCli), + network.NewNetworkCommand(dockerCli), + hide(system.NewEventsCommand(dockerCli)), + registry.NewLoginCommand(dockerCli), + registry.NewLogoutCommand(dockerCli), + registry.NewSearchCommand(dockerCli), + system.NewVersionCommand(dockerCli), + volume.NewVolumeCommand(dockerCli), + hide(system.NewInfoCommand(dockerCli)), + hide(container.NewAttachCommand(dockerCli)), + hide(container.NewCommitCommand(dockerCli)), + hide(container.NewCopyCommand(dockerCli)), + hide(container.NewCreateCommand(dockerCli)), + hide(container.NewDiffCommand(dockerCli)), + hide(container.NewExecCommand(dockerCli)), + hide(container.NewExportCommand(dockerCli)), + hide(container.NewKillCommand(dockerCli)), + hide(container.NewLogsCommand(dockerCli)), + hide(container.NewPauseCommand(dockerCli)), + hide(container.NewPortCommand(dockerCli)), + hide(container.NewPsCommand(dockerCli)), + hide(container.NewRenameCommand(dockerCli)), + hide(container.NewRestartCommand(dockerCli)), + hide(container.NewRmCommand(dockerCli)), + hide(container.NewStartCommand(dockerCli)), + hide(container.NewStatsCommand(dockerCli)), + hide(container.NewStopCommand(dockerCli)), + hide(container.NewTopCommand(dockerCli)), + hide(container.NewUnpauseCommand(dockerCli)), + hide(container.NewUpdateCommand(dockerCli)), + hide(container.NewWaitCommand(dockerCli)), + hide(image.NewHistoryCommand(dockerCli)), + hide(image.NewImagesCommand(dockerCli)), + hide(image.NewImportCommand(dockerCli)), + hide(image.NewLoadCommand(dockerCli)), + hide(image.NewPullCommand(dockerCli)), + hide(image.NewPushCommand(dockerCli)), + hide(image.NewRemoveCommand(dockerCli)), + hide(image.NewSaveCommand(dockerCli)), + hide(image.NewTagCommand(dockerCli)), + hide(system.NewInspectCommand(dockerCli)), + stack.NewStackCommand(dockerCli), + stack.NewTopLevelDeployCommand(dockerCli), + checkpoint.NewCheckpointCommand(dockerCli), + plugin.NewPluginCommand(dockerCli), + ) + +} + +func hide(cmd *cobra.Command) *cobra.Command { + if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { + return cmd + } + cmdCopy := *cmd + cmdCopy.Hidden = true + cmdCopy.Aliases = []string{} + return &cmdCopy +} diff --git a/vendor/github.com/docker/docker/cli/command/container/attach.go b/vendor/github.com/docker/docker/cli/command/container/attach.go new file mode 100644 index 0000000..31bb109 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/attach.go @@ -0,0 +1,130 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/signal" + "github.com/spf13/cobra" +) + +type attachOptions struct { + noStdin bool + proxy bool + detachKeys string + + container string +} + +// NewAttachCommand creates a new cobra.Command for `docker attach` +func NewAttachCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts attachOptions + + cmd := &cobra.Command{ + Use: "attach [OPTIONS] CONTAINER", + Short: "Attach to a running container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runAttach(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") + flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + return cmd +} + +func runAttach(dockerCli *command.DockerCli, opts *attachOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + c, err := client.ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if !c.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if c.State.Paused { + return fmt.Errorf("You cannot attach to a paused container, unpause it first") + } + + if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { + return err + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: !opts.noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = dockerCli.In() + } + + if opts.proxy && !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, opts.container) + defer signal.StopCatch(sigc) + } + + resp, errAttach := client.ContainerAttach(ctx, opts.container, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + if c.Config.Tty && dockerCli.Out().IsTerminal() { + height, width := dockerCli.Out().GetTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + resizeTtyTo(ctx, client, opts.container, height+1, width+1, false) + + // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back + // to the actual size. + if err := MonitorTtySize(ctx, dockerCli, opts.container, false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + if err := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { + return err + } + + if errAttach != nil { + return errAttach + } + + _, status, err := getExitCode(ctx, dockerCli, opts.container) + if err != nil { + return err + } + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/cmd.go b/vendor/github.com/docker/docker/cli/command/container/cmd.go new file mode 100644 index 0000000..3e9b488 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/cmd.go @@ -0,0 +1,46 @@ +package container + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewContainerCommand returns a cobra command for `container` subcommands +func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "container", + Short: "Manage containers", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewAttachCommand(dockerCli), + NewCommitCommand(dockerCli), + NewCopyCommand(dockerCli), + NewCreateCommand(dockerCli), + NewDiffCommand(dockerCli), + NewExecCommand(dockerCli), + NewExportCommand(dockerCli), + NewKillCommand(dockerCli), + NewLogsCommand(dockerCli), + NewPauseCommand(dockerCli), + NewPortCommand(dockerCli), + NewRenameCommand(dockerCli), + NewRestartCommand(dockerCli), + NewRmCommand(dockerCli), + NewRunCommand(dockerCli), + NewStartCommand(dockerCli), + NewStatsCommand(dockerCli), + NewStopCommand(dockerCli), + NewTopCommand(dockerCli), + NewUnpauseCommand(dockerCli), + NewUpdateCommand(dockerCli), + NewWaitCommand(dockerCli), + newListCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/container/commit.go b/vendor/github.com/docker/docker/cli/command/container/commit.go new file mode 100644 index 0000000..cf8d010 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/commit.go @@ -0,0 +1,76 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + dockeropts "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type commitOptions struct { + container string + reference string + + pause bool + comment string + author string + changes dockeropts.ListOpts +} + +// NewCommitCommand creates a new cobra.Command for `docker commit` +func NewCommitCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts commitOptions + + cmd := &cobra.Command{ + Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", + Short: "Create a new image from a container's changes", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.reference = args[1] + } + return runCommit(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit") + flags.StringVarP(&opts.comment, "message", "m", "", "Commit message") + flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") + + opts.changes = dockeropts.NewListOpts(nil) + flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") + + return cmd +} + +func runCommit(dockerCli *command.DockerCli, opts *commitOptions) error { + ctx := context.Background() + + name := opts.container + reference := opts.reference + + options := types.ContainerCommitOptions{ + Reference: reference, + Comment: opts.comment, + Author: opts.author, + Changes: opts.changes.GetAll(), + Pause: opts.pause, + } + + response, err := dockerCli.Client().ContainerCommit(ctx, name, options) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/cp.go b/vendor/github.com/docker/docker/cli/command/container/cp.go new file mode 100644 index 0000000..17ab2ac --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/cp.go @@ -0,0 +1,303 @@ +package container + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/spf13/cobra" +) + +type copyOptions struct { + source string + destination string + followLink bool +} + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// NewCopyCommand creates a new `docker cp` command +func NewCopyCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts copyOptions + + cmd := &cobra.Command{ + Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, + Short: "Copy files/folders between a container and the local filesystem", + Long: strings.Join([]string{ + "Copy files/folders between a container and the local filesystem\n", + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + if args[0] == "" { + return fmt.Errorf("source can not be empty") + } + if args[1] == "" { + return fmt.Errorf("destination can not be empty") + } + opts.source = args[0] + opts.destination = args[1] + return runCopy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") + + return cmd +} + +func runCopy(dockerCli *command.DockerCli, opts copyOptions) error { + srcContainer, srcPath := splitCpArg(opts.source) + dstContainer, dstPath := splitCpArg(opts.destination) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: opts.followLink, + } + + ctx := context.Background() + + switch direction { + case fromContainer: + return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +func statContainerPath(ctx context.Context, dockerCli *command.DockerCli, containerName, path string) (types.ContainerPathStat, error) { + return dockerCli.Client().ContainerStatPath(ctx, containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func copyFromContainer(ctx context.Context, dockerCli *command.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func copyToContainer(ctx context.Context, dockerCli *command.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + } + + return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} diff --git a/vendor/github.com/docker/docker/cli/command/container/create.go b/vendor/github.com/docker/docker/cli/command/container/create.go new file mode 100644 index 0000000..d5e63bd --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/create.go @@ -0,0 +1,218 @@ +package container + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + // FIXME migrate to docker/distribution/reference + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + apiclient "github.com/docker/docker/client" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type createOptions struct { + name string +} + +// NewCreateCommand creates a new cobra.Command for `docker create` +func NewCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts createOptions + var copts *runconfigopts.ContainerOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustedFlags(flags, true) + copts = runconfigopts.AddFlags(flags) + return cmd +} + +func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *runconfigopts.ContainerOptions) error { + config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) + if err != nil { + reportError(dockerCli.Err(), "create", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + response, err := createContainer(context.Background(), dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + return nil +} + +func pullImage(ctx context.Context, dockerCli *command.DockerCli, image string, out io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + RegistryAuth: encodedAuth, + } + + responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream( + responseBody, + out, + dockerCli.Out().FD(), + dockerCli.Out().IsTerminal(), + nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func createContainer(ctx context.Context, dockerCli *command.DockerCli, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*container.ContainerCreateCreatedBody, error) { + stderr := dockerCli.Err() + + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + var trustedRef reference.Canonical + _, ref, err := reference.ParseIDOrReference(config.Image) + if err != nil { + return nil, err + } + if ref != nil { + ref = reference.WithDefaultTag(ref) + + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + var err error + trustedRef, err = image.TrustedReference(ctx, dockerCli, ref, nil) + if err != nil { + return nil, err + } + config.Image = trustedRef.String() + } + } + + //create the container + response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if apiclient.IsErrImageNotFound(err) && ref != nil { + fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", ref.String()) + + // we don't want to write to stdout anything apart from container.ID + if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { + return nil, err + } + if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { + if err := image.TagTrusted(ctx, dockerCli, trustedRef, ref); err != nil { + return nil, err + } + } + // Retry + var retryErr error + response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(stderr, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/diff.go b/vendor/github.com/docker/docker/cli/command/container/diff.go new file mode 100644 index 0000000..12d6591 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/diff.go @@ -0,0 +1,58 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/spf13/cobra" +) + +type diffOptions struct { + container string +} + +// NewDiffCommand creates a new cobra.Command for `docker diff` +func NewDiffCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diffOptions + + return &cobra.Command{ + Use: "diff CONTAINER", + Short: "Inspect changes on a container's filesystem", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runDiff(dockerCli, &opts) + }, + } +} + +func runDiff(dockerCli *command.DockerCli, opts *diffOptions) error { + if opts.container == "" { + return fmt.Errorf("Container name cannot be empty") + } + ctx := context.Background() + + changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) + if err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(dockerCli.Out(), "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/exec.go b/vendor/github.com/docker/docker/cli/command/container/exec.go new file mode 100644 index 0000000..f038149 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/exec.go @@ -0,0 +1,207 @@ +package container + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + apiclient "github.com/docker/docker/client" + options "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type execOptions struct { + detachKeys string + interactive bool + tty bool + detach bool + user string + privileged bool + env *options.ListOpts +} + +func newExecOptions() *execOptions { + var values []string + return &execOptions{ + env: options.NewListOptsRef(&values, runconfigopts.ValidateEnv), + } +} + +// NewExecCommand creats a new cobra.Command for `docker exec` +func NewExecCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := newExecOptions() + + cmd := &cobra.Command{ + Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", + Short: "Run a command in a running container", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + container := args[0] + execCmd := args[1:] + return runExec(dockerCli, opts, container, execCmd) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVarP(&opts.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") + flags.BoolVarP(&opts.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.BoolVarP(&opts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: run command in the background") + flags.StringVarP(&opts.user, "user", "u", "", "Username or UID (format: [:])") + flags.BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the command") + flags.VarP(opts.env, "env", "e", "Set environment variables") + flags.SetAnnotation("env", "version", []string{"1.25"}) + + return cmd +} + +func runExec(dockerCli *command.DockerCli, opts *execOptions, container string, execCmd []string) error { + execConfig, err := parseExec(opts, execCmd) + // just in case the ParseExec does not exit + if container == "" || err != nil { + return cli.StatusError{StatusCode: 1} + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys + + ctx := context.Background() + client := dockerCli.Client() + + response, err := client.ContainerExecCreate(ctx, container, *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintf(dockerCli.Out(), "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + if !execConfig.Detach { + if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if err := client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(dockerCli.Out(), "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = dockerCli.In() + } + if execConfig.AttachStdout { + out = dockerCli.Out() + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = dockerCli.Out() + } else { + stderr = dockerCli.Err() + } + } + + resp, err := client.ContainerExecAttach(ctx, execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + errCh = promise.Go(func() error { + return holdHijackedConnection(ctx, dockerCli, execConfig.Tty, in, out, stderr, resp) + }) + + if execConfig.Tty && dockerCli.In().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { + fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(ctx, client, execID); err != nil { + return err + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, execID string) (bool, int, error) { + resp, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !apiclient.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +// parseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +func parseExec(opts *execOptions, execCmd []string) (*types.ExecConfig, error) { + execConfig := &types.ExecConfig{ + User: opts.user, + Privileged: opts.privileged, + Tty: opts.tty, + Cmd: execCmd, + Detach: opts.detach, + } + + // If -d is not set, attach to everything by default + if !opts.detach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if opts.interactive { + execConfig.AttachStdin = true + } + } + + if opts.env != nil { + execConfig.Env = opts.env.GetAll() + } + + return execConfig, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/exec_test.go b/vendor/github.com/docker/docker/cli/command/container/exec_test.go new file mode 100644 index 0000000..baeeaf1 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/exec_test.go @@ -0,0 +1,116 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +type arguments struct { + options execOptions + execCmd []string +} + +func TestParseExec(t *testing.T) { + valids := map[*arguments]*types.ExecConfig{ + &arguments{ + execCmd: []string{"command"}, + }: { + Cmd: []string{"command"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + execCmd: []string{"command1", "command2"}, + }: { + Cmd: []string{"command1", "command2"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + options: execOptions{ + interactive: true, + tty: true, + user: "uid", + }, + execCmd: []string{"command"}, + }: { + User: "uid", + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Tty: true, + Cmd: []string{"command"}, + }, + &arguments{ + options: execOptions{ + detach: true, + }, + execCmd: []string{"command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Cmd: []string{"command"}, + }, + &arguments{ + options: execOptions{ + tty: true, + interactive: true, + detach: true, + }, + execCmd: []string{"command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Tty: true, + Cmd: []string{"command"}, + }, + } + + for valid, expectedExecConfig := range valids { + execConfig, err := parseExec(&valid.options, valid.execCmd) + if err != nil { + t.Fatal(err) + } + if !compareExecConfig(expectedExecConfig, execConfig) { + t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) + } + } +} + +func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { + if config1.AttachStderr != config2.AttachStderr { + return false + } + if config1.AttachStdin != config2.AttachStdin { + return false + } + if config1.AttachStdout != config2.AttachStdout { + return false + } + if config1.Detach != config2.Detach { + return false + } + if config1.Privileged != config2.Privileged { + return false + } + if config1.Tty != config2.Tty { + return false + } + if config1.User != config2.User { + return false + } + if len(config1.Cmd) != len(config2.Cmd) { + return false + } + for index, value := range config1.Cmd { + if value != config2.Cmd[index] { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/cli/command/container/export.go b/vendor/github.com/docker/docker/cli/command/container/export.go new file mode 100644 index 0000000..8fa2e5d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/export.go @@ -0,0 +1,59 @@ +package container + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type exportOptions struct { + container string + output string +} + +// NewExportCommand creates a new `docker export` command +func NewExportCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts exportOptions + + cmd := &cobra.Command{ + Use: "export [OPTIONS] CONTAINER", + Short: "Export a container's filesystem as a tar archive", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runExport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runExport(dockerCli *command.DockerCli, opts exportOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ContainerExport(context.Background(), opts.container) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/hijack.go b/vendor/github.com/docker/docker/cli/command/container/hijack.go new file mode 100644 index 0000000..ca136f0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/hijack.go @@ -0,0 +1,116 @@ +package container + +import ( + "io" + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +// holdHijackedConnection handles copying input to and output from streams to the +// connection +func holdHijackedConnection(ctx context.Context, streams command.Streams, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var ( + err error + restoreOnce sync.Once + ) + if inputStream != nil && tty { + if err := setRawTerminal(streams); err != nil { + return err + } + defer func() { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + }() + } + + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + // When TTY is ON, use regular copy + if tty && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + // we should restore the terminal as soon as possible once connection end + // so any following print messages will be in normal type. + if inputStream != nil { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + } + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + + logrus.Debug("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + // we should restore the terminal as soon as possible once connection end + // so any following print messages will be in normal type. + if tty { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + } + logrus.Debug("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-ctx.Done(): + } + } + case <-ctx.Done(): + } + + return nil +} + +func setRawTerminal(streams command.Streams) error { + if err := streams.In().SetRawTerminal(); err != nil { + return err + } + return streams.Out().SetRawTerminal() +} + +func restoreTerminal(streams command.Streams, in io.Closer) error { + streams.In().RestoreTerminal() + streams.Out().RestoreTerminal() + // WARNING: DO NOT REMOVE THE OS CHECK !!! + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if in != nil && runtime.GOOS != "darwin" { + return in.Close() + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/inspect.go b/vendor/github.com/docker/docker/cli/command/container/inspect.go new file mode 100644 index 0000000..08a8d24 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/inspect.go @@ -0,0 +1,47 @@ +package container + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + size bool + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker container inspect` +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Display detailed information on one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ContainerInspectWithRaw(ctx, ref, opts.size) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/kill.go b/vendor/github.com/docker/docker/cli/command/container/kill.go new file mode 100644 index 0000000..6da91a4 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/kill.go @@ -0,0 +1,56 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type killOptions struct { + signal string + + containers []string +} + +// NewKillCommand creates a new cobra.Command for `docker kill` +func NewKillCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts killOptions + + cmd := &cobra.Command{ + Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Kill one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runKill(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") + return cmd +} + +func runKill(dockerCli *command.DockerCli, opts *killOptions) error { + var errs []string + ctx := context.Background() + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + return dockerCli.Client().ContainerKill(ctx, container, opts.signal) + }) + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/list.go b/vendor/github.com/docker/docker/cli/command/container/list.go new file mode 100644 index 0000000..5bbf419 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/list.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +type psOptions struct { + quiet bool + size bool + all bool + noTrunc bool + nLatest bool + last int + format string + filter opts.FilterOpt +} + +// NewPsCommand creates a new cobra.Command for `docker ps` +func NewPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS]", + Short: "List containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") + flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)") + flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewPsCommand(dockerCli) + cmd.Aliases = []string{"ps", "list"} + cmd.Use = "ls [OPTIONS]" + return &cmd +} + +// listOptionsProcessor is used to set any container list options which may only +// be embedded in the format template. +// This is passed directly into tmpl.Execute in order to allow the preprocessor +// to set any list options that were not provided by flags (e.g. `.Size`). +// It is using a `map[string]bool` so that unknown fields passed into the +// template format do not cause errors. These errors will get picked up when +// running through the actual template processor. +type listOptionsProcessor map[string]bool + +// Size sets the size of the map when called by a template execution. +func (o listOptionsProcessor) Size() bool { + o["size"] = true + return true +} + +// Label is needed here as it allows the correct pre-processing +// because Label() is a method with arguments +func (o listOptionsProcessor) Label(name string) string { + return "" +} + +func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { + options := &types.ContainerListOptions{ + All: opts.all, + Limit: opts.last, + Size: opts.size, + Filters: opts.filter.Value(), + } + + if opts.nLatest && opts.last == -1 { + options.Limit = 1 + } + + tmpl, err := templates.Parse(opts.format) + + if err != nil { + return nil, err + } + + optionsProcessor := listOptionsProcessor{} + // This shouldn't error out but swallowing the error makes it harder + // to track down if preProcessor issues come up. Ref #24696 + if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { + return nil, err + } + // At the moment all we need is to capture .Size for preprocessor + options.Size = opts.size || optionsProcessor["size"] + + return options, nil +} + +func runPs(dockerCli *command.DockerCli, opts *psOptions) error { + ctx := context.Background() + + listOptions, err := buildContainerListOptions(opts) + if err != nil { + return err + } + + containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PsFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().PsFormat + } else { + format = formatter.TableFormatKey + } + } + + containerCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewContainerFormat(format, opts.quiet, listOptions.Size), + Trunc: !opts.noTrunc, + } + return formatter.ContainerWrite(containerCtx, containers) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/logs.go b/vendor/github.com/docker/docker/cli/command/container/logs.go new file mode 100644 index 0000000..3a37ced --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/logs.go @@ -0,0 +1,87 @@ +package container + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + +type logsOptions struct { + follow bool + since string + timestamps bool + details bool + tail string + + container string +} + +// NewLogsCommand creates a new cobra.Command for `docker logs` +func NewLogsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] CONTAINER", + Short: "Fetch the logs of a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runLogs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) + } + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + } else { + _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) + } + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/container/pause.go b/vendor/github.com/docker/docker/cli/command/container/pause.go new file mode 100644 index 0000000..6817cf6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/pause.go @@ -0,0 +1,49 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type pauseOptions struct { + containers []string +} + +// NewPauseCommand creates a new cobra.Command for `docker pause` +func NewPauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pauseOptions + + return &cobra.Command{ + Use: "pause CONTAINER [CONTAINER...]", + Short: "Pause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runPause(dockerCli, &opts) + }, + } +} + +func runPause(dockerCli *command.DockerCli, opts *pauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/port.go b/vendor/github.com/docker/docker/cli/command/container/port.go new file mode 100644 index 0000000..ea15290 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/port.go @@ -0,0 +1,78 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/go-connections/nat" + "github.com/spf13/cobra" +) + +type portOptions struct { + container string + + port string +} + +// NewPortCommand creates a new cobra.Command for `docker port` +func NewPortCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts portOptions + + cmd := &cobra.Command{ + Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", + Short: "List port mappings or a specific mapping for the container", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.port = args[1] + } + return runPort(dockerCli, &opts) + }, + } + return cmd +} + +func runPort(dockerCli *command.DockerCli, opts *portOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if opts.port != "" { + port := opts.port + proto := "tcp" + parts := strings.SplitN(port, "/", 2) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/prune.go b/vendor/github.com/docker/docker/cli/command/container/prune.go new file mode 100644 index 0000000..064f4c0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/prune.go @@ -0,0 +1,75 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for containers +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all stopped containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all stopped containers. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ContainersPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.ContainersDeleted) > 0 { + output = "Deleted Containers:\n" + for _, id := range report.ContainersDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Container Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true}) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/ps_test.go b/vendor/github.com/docker/docker/cli/command/container/ps_test.go new file mode 100644 index 0000000..62b0545 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/ps_test.go @@ -0,0 +1,118 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestBuildContainerListOptions(t *testing.T) { + filters := opts.NewFilterOpt() + assert.NilError(t, filters.Set("foo=bar")) + assert.NilError(t, filters.Set("baz=foo")) + + contexts := []struct { + psOpts *psOptions + expectedAll bool + expectedSize bool + expectedLimit int + expectedFilters map[string]string + }{ + { + psOpts: &psOptions{ + all: true, + size: true, + last: 5, + filter: filters, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: true, + last: -1, + nLatest: true, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 1, + expectedFilters: make(map[string]string), + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}} {{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // Without .Size, size should be false + format: "{{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: false, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + } + + for _, c := range contexts { + options, err := buildContainerListOptions(c.psOpts) + assert.NilError(t, err) + + assert.Equal(t, c.expectedAll, options.All) + assert.Equal(t, c.expectedSize, options.Size) + assert.Equal(t, c.expectedLimit, options.Limit) + assert.Equal(t, options.Filters.Len(), len(c.expectedFilters)) + + for k, v := range c.expectedFilters { + f := options.Filters + if !f.ExactMatch(k, v) { + t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k)) + } + } + } +} diff --git a/vendor/github.com/docker/docker/cli/command/container/rename.go b/vendor/github.com/docker/docker/cli/command/container/rename.go new file mode 100644 index 0000000..346fb7b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/rename.go @@ -0,0 +1,51 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type renameOptions struct { + oldName string + newName string +} + +// NewRenameCommand creates a new cobra.Command for `docker rename` +func NewRenameCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts renameOptions + + cmd := &cobra.Command{ + Use: "rename CONTAINER NEW_NAME", + Short: "Rename a container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.oldName = args[0] + opts.newName = args[1] + return runRename(dockerCli, &opts) + }, + } + return cmd +} + +func runRename(dockerCli *command.DockerCli, opts *renameOptions) error { + ctx := context.Background() + + oldName := strings.TrimSpace(opts.oldName) + newName := strings.TrimSpace(opts.newName) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } + + if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/restart.go b/vendor/github.com/docker/docker/cli/command/container/restart.go new file mode 100644 index 0000000..fc3ba93 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/restart.go @@ -0,0 +1,62 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type restartOptions struct { + nSeconds int + nSecondsChanged bool + + containers []string +} + +// NewRestartCommand creates a new cobra.Command for `docker restart` +func NewRestartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts restartOptions + + cmd := &cobra.Command{ + Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Restart one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nSecondsChanged = cmd.Flags().Changed("time") + return runRestart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") + return cmd +} + +func runRestart(dockerCli *command.DockerCli, opts *restartOptions) error { + ctx := context.Background() + var errs []string + var timeout *time.Duration + if opts.nSecondsChanged { + timeoutValue := time.Duration(opts.nSeconds) * time.Second + timeout = &timeoutValue + } + + for _, name := range opts.containers { + if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/rm.go b/vendor/github.com/docker/docker/cli/command/container/rm.go new file mode 100644 index 0000000..60724f1 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/rm.go @@ -0,0 +1,73 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type rmOptions struct { + rmVolumes bool + rmLink bool + force bool + + containers []string +} + +// NewRmCommand creates a new cobra.Command for `docker rm` +func NewRmCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Remove one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runRm(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") + flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") + return cmd +} + +func runRm(dockerCli *command.DockerCli, opts *rmOptions) error { + ctx := context.Background() + + var errs []string + options := types.ContainerRemoveOptions{ + RemoveVolumes: opts.rmVolumes, + RemoveLinks: opts.rmLink, + Force: opts.force, + } + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + if container == "" { + return fmt.Errorf("Container name cannot be empty") + } + container = strings.Trim(container, "/") + return dockerCli.Client().ContainerRemove(ctx, container, options) + }) + + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/run.go b/vendor/github.com/docker/docker/cli/command/container/run.go new file mode 100644 index 0000000..0fad93e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/run.go @@ -0,0 +1,285 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "os" + "runtime" + "strings" + "syscall" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + opttypes "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/libnetwork/resolvconf/dns" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type runOptions struct { + detach bool + sigProxy bool + name string + detachKeys string +} + +// NewRunCommand create a new `docker run` command +func NewRunCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts runOptions + var copts *runconfigopts.ContainerOptions + + cmd := &cobra.Command{ + Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Run a command in a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runRun(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + // These are flags not stored in Config/HostConfig + flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") + flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustedFlags(flags, true) + copts = runconfigopts.AddFlags(flags) + return cmd +} + +func runRun(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { + stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() + client := dockerCli.Client() + // TODO: pass this as an argument + cmdPath := "run" + + var ( + flAttach *opttypes.ListOpts + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ) + + config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) + + // just in case the Parse does not exit + if err != nil { + reportError(stderr, cmdPath, err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return ErrConflictRestartPolicyAndAutoRemove + } + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") + } + + if len(hostConfig.DNS) > 0 { + // check the DNS settings passed via --dns against + // localhost regexp to warn if they are trying to + // set a DNS to a localhost address + for _, dnsIP := range hostConfig.DNS { + if dns.IsLocalhost(dnsIP) { + fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + break + } + } + } + + config.ArgsEscaped = false + + if !opts.detach { + if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := flags.Lookup("attach"); fl != nil { + flAttach = fl.Value.(*opttypes.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable sigProxy when in TTY mode + if config.Tty { + opts.sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() + } + + ctx, cancelFun := context.WithCancel(context.Background()) + + createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) + if err != nil { + reportError(stderr, cmdPath, err.Error(), true) + return runStartContainerErr(err) + } + if opts.sigProxy { + sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(stdout, "%s\n", createResponse.ID) + }() + } + attach := config.AttachStdin || config.AttachStdout || config.AttachStderr + if attach { + var ( + out, cerr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = stdin + } + if config.AttachStdout { + out = stdout + } + if config.AttachStderr { + if config.Tty { + cerr = stdout + } else { + cerr = stderr + } + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + errCh = promise.Go(func() error { + errHijack := holdHijackedConnection(ctx, dockerCli, config.Tty, in, out, cerr, resp) + if errHijack == nil { + return errAttach + } + return errHijack + }) + } + + statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, hostConfig.AutoRemove) + + //start the container + if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { + // If we have holdHijackedConnection, we should notify + // holdHijackedConnection we are going to exit and wait + // to avoid the terminal are not restored. + if attach { + cancelFun() + <-errCh + } + + reportError(stderr, cmdPath, err.Error(), false) + if hostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { + fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + status := <-statusChan + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +// reportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func reportError(stderr io.Writer, name string, str string, withHelp bool) { + if withHelp { + str += ".\nSee '" + os.Args[0] + " " + name + " --help'" + } + fmt.Fprintf(stderr, "%s: %s.\n", os.Args[0], str) +} + +// if container start fails with 'not found'/'no such' error, return 127 +// if container start fails with 'permission denied' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") + statusError := cli.StatusError{StatusCode: 125} + if strings.Contains(trimmedErr, "executable file not found") || + strings.Contains(trimmedErr, "no such file or directory") || + strings.Contains(trimmedErr, "system cannot find the file specified") { + statusError = cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { + statusError = cli.StatusError{StatusCode: 126} + } + + return statusError +} diff --git a/vendor/github.com/docker/docker/cli/command/container/start.go b/vendor/github.com/docker/docker/cli/command/container/start.go new file mode 100644 index 0000000..3521a41 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/start.go @@ -0,0 +1,179 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/spf13/cobra" +) + +type startOptions struct { + attach bool + openStdin bool + detachKeys string + checkpoint string + checkpointDir string + + containers []string +} + +// NewStartCommand creates a new cobra.Command for `docker start` +func NewStartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts startOptions + + cmd := &cobra.Command{ + Use: "start [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Start one or more stopped containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") + flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") + flags.SetAnnotation("checkpoint", "experimental", nil) + flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") + flags.SetAnnotation("checkpoint-dir", "experimental", nil) + return cmd +} + +func runStart(dockerCli *command.DockerCli, opts *startOptions) error { + ctx, cancelFun := context.WithCancel(context.Background()) + + if opts.attach || opts.openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if len(opts.containers) > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + // 2. Attach to the container. + container := opts.containers[0] + c, err := dockerCli.Client().ContainerInspect(ctx, container) + if err != nil { + return err + } + + // We always use c.ID instead of container to maintain consistency during `docker start` + if !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, c.ID) + defer signal.StopCatch(sigc) + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: opts.openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + + if options.Stdin { + in = dockerCli.In() + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach return an ErrPersistEOF (connection closed) + // means server met an error and already put it in Hijacked connection, + // we would keep the error and read the detailed error message from hijacked connection + return errAttach + } + defer resp.Close() + cErr := promise.Go(func() error { + errHijack := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) + if errHijack == nil { + return errAttach + } + return errHijack + }) + + // 3. We should open a channel for receiving status code of the container + // no matter it's detached, removed on daemon side(--rm) or exit normally. + statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + + // 4. Start the container. + if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { + cancelFun() + <-cErr + if c.HostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return err + } + + // 5. Wait for attachment to break. + if c.Config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { + fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + + if status := <-statusChan; status != 0 { + return cli.StatusError{StatusCode: status} + } + } else if opts.checkpoint != "" { + if len(opts.containers) > 1 { + return fmt.Errorf("You cannot restore multiple containers at once.") + } + container := opts.containers[0] + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + return dockerCli.Client().ContainerStart(ctx, container, startOptions) + + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) + } + + return nil +} + +func startContainersWithoutAttachments(ctx context.Context, dockerCli *command.DockerCli, containers []string) error { + var failedContainers []string + for _, container := range containers { + if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + failedContainers = append(failedContainers, container) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + + if len(failedContainers) > 0 { + return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats.go b/vendor/github.com/docker/docker/cli/command/container/stats.go new file mode 100644 index 0000000..12d5c68 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stats.go @@ -0,0 +1,243 @@ +package container + +import ( + "fmt" + "io" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/spf13/cobra" +) + +type statsOptions struct { + all bool + noStream bool + format string + containers []string +} + +// NewStatsCommand creates a new cobra.Command for `docker stats` +func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts statsOptions + + cmd := &cobra.Command{ + Use: "stats [OPTIONS] [CONTAINER...]", + Short: "Display a live stream of container(s) resource usage statistics", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStats(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + return cmd +} + +// runStats displays a live stream of resource usage statistics for one or more containers. +// This shows real-time information on CPU usage, memory usage, and network I/O. +func runStats(dockerCli *command.DockerCli, opts *statsOptions) error { + showAll := len(opts.containers) == 0 + closeChan := make(chan error) + + ctx := context.Background() + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + + eventq, errq := dockerCli.Client().Events(ctx, options) + + // Whether we successfully subscribed to eventq or not, we can now + // unblock the main goroutine. + close(started) + + for { + select { + case event := <-eventq: + c <- event + case err := <-errq: + closeChan <- err + return + } + } + } + + // Get the daemonOSType if not set already + if daemonOSType == "" { + svctx := context.Background() + sv, err := dockerCli.Client().ServerVersion(svctx) + if err != nil { + return err + } + daemonOSType = sv.Os + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: opts.all, + } + cs, err := dockerCli.Client().ContainerList(ctx, options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := formatter.NewContainerStats(container.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := command.InitEventHandler() + eh.Handle("create", func(e events.Message) { + if opts.all { + s := formatter.NewContainerStats(e.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := formatter.NewContainerStats(e.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !opts.all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range opts.containers { + s := formatter.NewContainerStats(name, daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + cErr := c.GetError() + if cErr != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, cErr)) + } + } + cStats.mu.Unlock() + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().StatsFormat) > 0 { + format = dockerCli.ConfigFile().StatsFormat + } else { + format = formatter.TableFormatKey + } + } + statsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewStatsFormat(format, daemonOSType), + } + cleanScreen := func() { + if !opts.noStream { + fmt.Fprint(dockerCli.Out(), "\033[2J") + fmt.Fprint(dockerCli.Out(), "\033[H") + } + } + + var err error + for range time.Tick(500 * time.Millisecond) { + cleanScreen() + ccstats := []formatter.StatsEntry{} + cStats.mu.Lock() + for _, c := range cStats.cs { + ccstats = append(ccstats, c.GetStatistics()) + } + cStats.mu.Unlock() + if err = formatter.ContainerStatsWrite(statsCtx, ccstats); err != nil { + break + } + if len(cStats.cs) == 0 && !showAll { + break + } + if opts.noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go b/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go new file mode 100644 index 0000000..4b57e3f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go @@ -0,0 +1,226 @@ +package container + +import ( + "encoding/json" + "errors" + "io" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +type stats struct { + ostype string + mu sync.Mutex + cs []*formatter.ContainerStats +} + +// daemonOSType is set once we have at least one stat for a container +// from the daemon. It is used to ensure we print the right header based +// on the daemon platform. +var daemonOSType string + +func (s *stats) add(cs *formatter.ContainerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Container); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Container == cid { + return i, true + } + } + return -1, false +} + +func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + logrus.Debugf("collecting stats for %s", s.Container) + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + response, err := cli.ContainerStats(ctx, s.Container, streamStats) + if err != nil { + s.SetError(err) + return + } + defer response.Body.Close() + + dec := json.NewDecoder(response.Body) + go func() { + for { + var ( + v *types.StatsJSON + memPercent = 0.0 + cpuPercent = 0.0 + blkRead, blkWrite uint64 // Only used on Linux + mem = 0.0 + memLimit = 0.0 + memPerc = 0.0 + pidsStatsCurrent uint64 + ) + + if err := dec.Decode(&v); err != nil { + dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) + u <- err + if err == io.EOF { + break + } + time.Sleep(100 * time.Millisecond) + continue + } + + daemonOSType = response.OSType + + if daemonOSType != "windows" { + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + } + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) + blkRead, blkWrite = calculateBlockIO(v.BlkioStats) + mem = float64(v.MemoryStats.Usage) + memLimit = float64(v.MemoryStats.Limit) + memPerc = memPercent + pidsStatsCurrent = v.PidsStats.Current + } else { + cpuPercent = calculateCPUPercentWindows(v) + blkRead = v.StorageStats.ReadSizeBytes + blkWrite = v.StorageStats.WriteSizeBytes + mem = float64(v.MemoryStats.PrivateWorkingSet) + } + netRx, netTx := calculateNetwork(v.Networks) + s.SetStatistics(formatter.StatsEntry{ + Name: v.Name, + ID: v.ID, + CPUPercentage: cpuPercent, + Memory: mem, + MemoryPercentage: memPerc, + MemoryLimit: memLimit, + NetworkRx: netRx, + NetworkTx: netTx, + BlockRead: float64(blkRead), + BlockWrite: float64(blkWrite), + PidsCurrent: pidsStatsCurrent, + }) + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.SetErrorAndReset(errors.New("timeout waiting for stats")) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + if err != nil { + s.SetError(err) + continue + } + s.SetError(nil) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} + +func calculateCPUPercentWindows(v *types.StatsJSON) float64 { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + return float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + return 0.00 +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go b/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go new file mode 100644 index 0000000..828d634 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go @@ -0,0 +1,20 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCalculateBlockIO(t *testing.T) { + blkio := types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, + } + blkRead, blkWrite := calculateBlockIO(blkio) + if blkRead != 5801 { + t.Fatalf("blkRead = %d, want 5801", blkRead) + } + if blkWrite != 579 { + t.Fatalf("blkWrite = %d, want 579", blkWrite) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stop.go b/vendor/github.com/docker/docker/cli/command/container/stop.go new file mode 100644 index 0000000..c68ede5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stop.go @@ -0,0 +1,67 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type stopOptions struct { + time int + timeChanged bool + + containers []string +} + +// NewStopCommand creates a new cobra.Command for `docker stop` +func NewStopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts stopOptions + + cmd := &cobra.Command{ + Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Stop one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.timeChanged = cmd.Flags().Changed("time") + return runStop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") + return cmd +} + +func runStop(dockerCli *command.DockerCli, opts *stopOptions) error { + ctx := context.Background() + + var timeout *time.Duration + if opts.timeChanged { + timeoutValue := time.Duration(opts.time) * time.Second + timeout = &timeoutValue + } + + var errs []string + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { + return dockerCli.Client().ContainerStop(ctx, id, timeout) + }) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/top.go b/vendor/github.com/docker/docker/cli/command/container/top.go new file mode 100644 index 0000000..160153b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/top.go @@ -0,0 +1,58 @@ +package container + +import ( + "fmt" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type topOptions struct { + container string + + args []string +} + +// NewTopCommand creates a new cobra.Command for `docker top` +func NewTopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts topOptions + + cmd := &cobra.Command{ + Use: "top CONTAINER [ps OPTIONS]", + Short: "Display the running processes of a container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.args = args[1:] + return runTop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTop(dockerCli *command.DockerCli, opts *topOptions) error { + ctx := context.Background() + + procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/tty.go b/vendor/github.com/docker/docker/cli/command/container/tty.go new file mode 100644 index 0000000..6af8e2b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/tty.go @@ -0,0 +1,103 @@ +package container + +import ( + "fmt" + "os" + gosignal "os/signal" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" +) + +// resizeTtyTo resizes tty to specific height and width +func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) { + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + Height: height, + Width: width, + } + + var err error + if isExec { + err = client.ContainerExecResize(ctx, id, options) + } else { + err = client.ContainerResize(ctx, id, options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// MonitorTtySize updates the container tty size when the terminal tty changes size +func MonitorTtySize(ctx context.Context, cli *command.DockerCli, id string, isExec bool) error { + resizeTty := func() { + height, width := cli.Out().GetTtySize() + resizeTtyTo(ctx, cli.Client(), id, height, width, isExec) + } + + resizeTty() + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.Out().GetTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.Out().GetTtySize() + + if prevW != w || prevH != h { + resizeTty() + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + resizeTty() + } + }() + } + return nil +} + +// ForwardAllSignals forwards signals to the container +func ForwardAllSignals(ctx context.Context, cli *command.DockerCli, cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} diff --git a/vendor/github.com/docker/docker/cli/command/container/unpause.go b/vendor/github.com/docker/docker/cli/command/container/unpause.go new file mode 100644 index 0000000..c4d8d48 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/unpause.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type unpauseOptions struct { + containers []string +} + +// NewUnpauseCommand creates a new cobra.Command for `docker unpause` +func NewUnpauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts unpauseOptions + + cmd := &cobra.Command{ + Use: "unpause CONTAINER [CONTAINER...]", + Short: "Unpause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runUnpause(dockerCli, &opts) + }, + } + return cmd +} + +func runUnpause(dockerCli *command.DockerCli, opts *unpauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerUnpause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/update.go b/vendor/github.com/docker/docker/cli/command/container/update.go new file mode 100644 index 0000000..7576585 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/update.go @@ -0,0 +1,163 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type updateOptions struct { + blkioWeight uint16 + cpuPeriod int64 + cpuQuota int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpusetCpus string + cpusetMems string + cpuShares int64 + memoryString string + memoryReservation string + memorySwap string + kernelMemory string + restartPolicy string + + nFlag int + + containers []string +} + +// NewUpdateCommand creates a new cobra.Command for `docker update` +func NewUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts updateOptions + + cmd := &cobra.Command{ + Use: "update [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Update configuration of one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nFlag = cmd.Flags().NFlag() + return runUpdate(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.Uint16Var(&opts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Int64Var(&opts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&opts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&opts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&opts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&opts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&opts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64VarP(&opts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.StringVarP(&opts.memoryString, "memory", "m", "", "Memory limit") + flags.StringVar(&opts.memoryReservation, "memory-reservation", "", "Memory soft limit") + flags.StringVar(&opts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.StringVar(&opts.kernelMemory, "kernel-memory", "", "Kernel memory limit") + flags.StringVar(&opts.restartPolicy, "restart", "", "Restart policy to apply when a container exits") + + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, opts *updateOptions) error { + var err error + + if opts.nFlag == 0 { + return fmt.Errorf("You must provide one or more flags when using this command.") + } + + var memory int64 + if opts.memoryString != "" { + memory, err = units.RAMInBytes(opts.memoryString) + if err != nil { + return err + } + } + + var memoryReservation int64 + if opts.memoryReservation != "" { + memoryReservation, err = units.RAMInBytes(opts.memoryReservation) + if err != nil { + return err + } + } + + var memorySwap int64 + if opts.memorySwap != "" { + if opts.memorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(opts.memorySwap) + if err != nil { + return err + } + } + } + + var kernelMemory int64 + if opts.kernelMemory != "" { + kernelMemory, err = units.RAMInBytes(opts.kernelMemory) + if err != nil { + return err + } + } + + var restartPolicy containertypes.RestartPolicy + if opts.restartPolicy != "" { + restartPolicy, err = runconfigopts.ParseRestartPolicy(opts.restartPolicy) + if err != nil { + return err + } + } + + resources := containertypes.Resources{ + BlkioWeight: opts.blkioWeight, + CpusetCpus: opts.cpusetCpus, + CpusetMems: opts.cpusetMems, + CPUShares: opts.cpuShares, + Memory: memory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + KernelMemory: kernelMemory, + CPUPeriod: opts.cpuPeriod, + CPUQuota: opts.cpuQuota, + CPURealtimePeriod: opts.cpuRealtimePeriod, + CPURealtimeRuntime: opts.cpuRealtimeRuntime, + } + + updateConfig := containertypes.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + ctx := context.Background() + + var ( + warns []string + errs []string + ) + for _, container := range opts.containers { + r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + warns = append(warns, r.Warnings...) + } + if len(warns) > 0 { + fmt.Fprintf(dockerCli.Out(), "%s", strings.Join(warns, "\n")) + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/utils.go b/vendor/github.com/docker/docker/cli/command/container/utils.go new file mode 100644 index 0000000..6bef924 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/utils.go @@ -0,0 +1,143 @@ +package container + +import ( + "strconv" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/cli/command" + clientapi "github.com/docker/docker/client" +) + +func waitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) chan int { + if len(containerID) == 0 { + // containerID can never be empty + panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") + } + + var removeErr error + statusChan := make(chan int) + exitCode := 125 + + // Get events via Events API + f := filters.NewArgs() + f.Add("type", "container") + f.Add("container", containerID) + options := types.EventsOptions{ + Filters: f, + } + eventCtx, cancel := context.WithCancel(ctx) + eventq, errq := dockerCli.Client().Events(eventCtx, options) + + eventProcessor := func(e events.Message) bool { + stopProcessing := false + switch e.Status { + case "die": + if v, ok := e.Actor.Attributes["exitCode"]; ok { + code, cerr := strconv.Atoi(v) + if cerr != nil { + logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) + } else { + exitCode = code + } + } + if !waitRemove { + stopProcessing = true + } else { + // If we are talking to an older daemon, `AutoRemove` is not supported. + // We need to fall back to the old behavior, which is client-side removal + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { + go func() { + removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) + if removeErr != nil { + logrus.Errorf("error removing container: %v", removeErr) + cancel() // cancel the event Q + } + }() + } + } + case "detach": + exitCode = 0 + stopProcessing = true + case "destroy": + stopProcessing = true + } + return stopProcessing + } + + go func() { + defer func() { + statusChan <- exitCode // must always send an exit code or the caller will block + cancel() + }() + + for { + select { + case <-eventCtx.Done(): + if removeErr != nil { + return + } + case evt := <-eventq: + if eventProcessor(evt) { + return + } + case err := <-errq: + logrus.Errorf("error getting events from daemon: %v", err) + return + } + } + }() + + return statusChan +} + +// getExitCode performs an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(ctx context.Context, dockerCli *command.DockerCli, containerID string) (bool, int, error) { + c, err := dockerCli.Client().ContainerInspect(ctx, containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !clientapi.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + return c.State.Running, c.State.ExitCode, nil +} + +func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { + if len(containers) == 0 { + return nil + } + const defaultParallel int = 50 + sem := make(chan struct{}, defaultParallel) + errChan := make(chan error) + + // make sure result is printed in correct order + output := map[string]chan error{} + for _, c := range containers { + output[c] = make(chan error, 1) + } + go func() { + for _, c := range containers { + err := <-output[c] + errChan <- err + } + }() + + go func() { + for _, c := range containers { + sem <- struct{}{} // Wait for active queue sem to drain. + go func(container string) { + output[container] <- op(ctx, container) + <-sem + }(c) + } + }() + return errChan +} diff --git a/vendor/github.com/docker/docker/cli/command/container/wait.go b/vendor/github.com/docker/docker/cli/command/container/wait.go new file mode 100644 index 0000000..19ccf7a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/wait.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type waitOptions struct { + containers []string +} + +// NewWaitCommand creates a new cobra.Command for `docker wait` +func NewWaitCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts waitOptions + + cmd := &cobra.Command{ + Use: "wait CONTAINER [CONTAINER...]", + Short: "Block until one or more containers stop, then print their exit codes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runWait(dockerCli, &opts) + }, + } + return cmd +} + +func runWait(dockerCli *command.DockerCli, opts *waitOptions) error { + ctx := context.Background() + + var errs []string + for _, container := range opts.containers { + status, err := dockerCli.Client().ContainerWait(ctx, container) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%d\n", status) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/events_utils.go b/vendor/github.com/docker/docker/cli/command/events_utils.go new file mode 100644 index 0000000..e710c97 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/events_utils.go @@ -0,0 +1,49 @@ +package command + +import ( + "sync" + + "github.com/Sirupsen/logrus" + eventtypes "github.com/docker/docker/api/types/events" +) + +type eventProcessor func(eventtypes.Message, error) error + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/container.go b/vendor/github.com/docker/docker/cli/command/formatter/container.go new file mode 100644 index 0000000..6273453 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/container.go @@ -0,0 +1,235 @@ +package formatter + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + units "github.com/docker/go-units" +) + +const ( + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + + containerIDHeader = "CONTAINER ID" + namesHeader = "NAMES" + commandHeader = "COMMAND" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + mountsHeader = "MOUNTS" + localVolumes = "LOCAL VOLUMES" + networksHeader = "NETWORKS" +) + +// NewContainerFormat returns a Format for rendering using a Context +func NewContainerFormat(source string, quiet bool, size bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + format := defaultContainerTableFormat + if size { + format += `\t{{.Size}}` + } + return Format(format) + case RawFormatKey: + if quiet { + return `container_id: {{.ID}}` + } + format := `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{- pad .Status 1 0}} +names: {{.Names}} +labels: {{- pad .Labels 1 0}} +ports: {{- pad .Ports 1 0}} +` + if size { + format += `size: {{.Size}}\n` + } + return Format(format) + } + return Format(source) +} + +// ContainerWrite renders the context for a list of containers +func ContainerWrite(ctx Context, containers []types.Container) error { + render := func(format func(subContext subContext) error) error { + for _, container := range containers { + err := format(&containerContext{trunc: ctx.Trunc, c: container}) + if err != nil { + return err + } + } + return nil + } + return ctx.Write(&containerContext{}, render) +} + +type containerContext struct { + HeaderContext + trunc bool + c types.Container +} + +func (c *containerContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *containerContext) ID() string { + c.AddHeader(containerIDHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.AddHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.AddHeader(imageHeader) + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.AddHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Ellipsis(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.AddHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.AddHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.AddHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.AddHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.AddHeader(sizeHeader) + srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) + sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.AddHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + c.AddHeader(mountsHeader) + + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = stringutils.Ellipsis(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +func (c *containerContext) LocalVolumes() string { + c.AddHeader(localVolumes) + + count := 0 + for _, m := range c.c.Mounts { + if m.Driver == "local" { + count++ + } + } + + return fmt.Sprintf("%d", count) +} + +func (c *containerContext) Networks() string { + c.AddHeader(networksHeader) + + if c.c.NetworkSettings == nil { + return "" + } + + networks := []string{} + for k := range c.c.NetworkSettings.Networks { + networks = append(networks, k) + } + + return strings.Join(networks, ",") +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/container_test.go b/vendor/github.com/docker/docker/cli/command/formatter/container_test.go new file mode 100644 index 0000000..1613789 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/container_test.go @@ -0,0 +1,398 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestContainerPsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + unix := time.Now().Add(-65 * time.Second).Unix() + + var ctx containerContext + cases := []struct { + container types.Container + trunc bool + expValue string + expHeader string + call func() string + }{ + {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID}, + {types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID}, + {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, + {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + true, + "a5a665ff33ec", + imageHeader, + ctx.Image, + }, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + false, + "a5a665ff33eced1e0803148700880edab4", + imageHeader, + ctx.Image, + }, + {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, + {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, + {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, + {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, + {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, + {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, + {types.Container{}, true, "", labelsHeader, ctx.Labels}, + {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, + {types.Container{Created: unix}, true, "About a minute", runningForHeader, ctx.RunningFor}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set", + Driver: "local", + Source: "/a/path", + }, + }, + }, true, "this-is-a-lo...", mountsHeader, ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "/a/path", mountsHeader, ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", mountsHeader, ctx.Mounts}, + } + + for _, c := range cases { + ctx = containerContext{c: c.container, trunc: c.trunc} + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } + + c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c1, trunc: true} + + sid := ctx.Label("com.docker.swarm.swarm-id") + node := ctx.Label("com.docker.swarm.node_name") + if sid != "33" { + t.Fatalf("Expected 33, was %s\n", sid) + } + + if node != "ubuntu" { + t.Fatalf("Expected ubuntu, was %s\n", node) + } + + h := ctx.FullHeader() + if h != "SWARM ID\tNODE NAME" { + t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) + + } + + c2 := types.Container{} + ctx = containerContext{c: c2, trunc: true} + + label := ctx.Label("anything.really") + if label != "" { + t.Fatalf("Expected an empty string, was %s", label) + } + + ctx = containerContext{c: c2, trunc: true} + FullHeader := ctx.FullHeader() + if FullHeader != "" { + t.Fatalf("Expected FullHeader to be empty, was %s", FullHeader) + } + +} + +func TestContainerContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{Format: NewContainerFormat("table", false, true)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE +containerID1 ubuntu "" 24 hours ago foobar_baz 0 B +containerID2 ubuntu "" 24 hours ago foobar_bar 0 B +`, + }, + { + Context{Format: NewContainerFormat("table", false, false)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +containerID1 ubuntu "" 24 hours ago foobar_baz +containerID2 ubuntu "" 24 hours ago foobar_bar +`, + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, true)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", true, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table", true, false)}, + "containerID1\ncontainerID2\n", + }, + // Raw Format + { + Context{Format: NewContainerFormat("raw", false, false)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", false, true)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: +size: 0 B + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: +size: 0 B + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", true, false)}, + "container_id: containerID1\ncontainer_id: containerID2\n", + }, + // Custom Format + { + Context{Format: "{{.Image}}"}, + "ubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("{{.Image}}", false, true)}, + "ubuntu\nubuntu\n", + }, + } + + for _, testcase := range cases { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ContainerWrite(testcase.context, containers) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestContainerContextWriteWithNoContainers(t *testing.T) { + out := bytes.NewBufferString("") + containers := []types.Container{} + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Image}}", + Output: out, + }, + "", + }, + { + Context{ + Format: "table {{.Image}}", + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: NewContainerFormat("{{.Image}}", false, true), + Output: out, + }, + "", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}", false, true), + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: "table {{.Image}}\t{{.Size}}", + Output: out, + }, + "IMAGE SIZE\n", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true), + Output: out, + }, + "IMAGE SIZE\n", + }, + } + + for _, context := range contexts { + ContainerWrite(context.context, containers) + assert.Equal(t, context.expected, out.String()) + // Clean buffer + out.Reset() + } +} + +func TestContainerContextWriteJSON(t *testing.T) { + unix := time.Now().Add(-65 * time.Second).Unix() + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix}, + } + expectedCreated := time.Unix(unix, 0).String() + expectedJSONs := []map[string]interface{}{ + {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID1", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_baz", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, + {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID2", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_bar", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestContainerContextWriteJSONField(t *testing.T) { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, containers[i].ID) + } +} + +func TestContainerBackCompat(t *testing.T) { + containers := []types.Container{{ID: "brewhaha"}} + cases := []string{ + "ID", + "Names", + "Image", + "Command", + "CreatedAt", + "RunningFor", + "Ports", + "Status", + "Size", + "Labels", + "Mounts", + } + buf := bytes.NewBuffer(nil) + for _, c := range cases { + ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf} + if err := ContainerWrite(ctx, containers); err != nil { + t.Logf("could not render template for field '%s': %v", c, err) + t.Fail() + } + buf.Reset() + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/custom.go b/vendor/github.com/docker/docker/cli/command/formatter/custom.go new file mode 100644 index 0000000..df32684 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/custom.go @@ -0,0 +1,51 @@ +package formatter + +import ( + "strings" +) + +const ( + imageHeader = "IMAGE" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + nameHeader = "NAME" + driverHeader = "DRIVER" + scopeHeader = "SCOPE" +) + +type subContext interface { + FullHeader() string + AddHeader(header string) +} + +// HeaderContext provides the subContext interface for managing headers +type HeaderContext struct { + header []string +} + +// FullHeader returns the header as a string +func (c *HeaderContext) FullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +// AddHeader adds another column to the header +func (c *HeaderContext) AddHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func stripNamePrefix(ss []string) []string { + sss := make([]string, len(ss)) + for i, s := range ss { + sss[i] = s[1:] + } + + return sss +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go b/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go new file mode 100644 index 0000000..da42039 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go @@ -0,0 +1,28 @@ +package formatter + +import ( + "reflect" + "strings" + "testing" +) + +func compareMultipleValues(t *testing.T, value, expected string) { + // comma-separated values means probably a map input, which won't + // be guaranteed to have the same order as our expected value + // We'll create maps and use reflect.DeepEquals to check instead: + entriesMap := make(map[string]string) + expMap := make(map[string]string) + entries := strings.Split(value, ",") + expectedEntries := strings.Split(expected, ",") + for _, entry := range entries { + keyval := strings.Split(entry, "=") + entriesMap[keyval[0]] = keyval[1] + } + for _, expected := range expectedEntries { + keyval := strings.Split(expected, "=") + expMap[keyval[0]] = keyval[1] + } + if !reflect.DeepEqual(expMap, entriesMap) { + t.Fatalf("Expected entries: %v, got: %v", expected, value) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go b/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go new file mode 100644 index 0000000..5309d88 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go @@ -0,0 +1,334 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" + defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" + defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" + defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + + typeHeader = "TYPE" + totalHeader = "TOTAL" + activeHeader = "ACTIVE" + reclaimableHeader = "RECLAIMABLE" + containersHeader = "CONTAINERS" + sharedSizeHeader = "SHARED SIZE" + uniqueSizeHeader = "UNIQUE SiZE" +) + +// DiskUsageContext contains disk usage specific information required by the formater, encapsulate a Context struct. +type DiskUsageContext struct { + Context + Verbose bool + LayersSize int64 + Images []*types.ImageSummary + Containers []*types.Container + Volumes []*types.Volume +} + +func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { + ctx.buffer = bytes.NewBufferString("") + ctx.header = "" + ctx.Format = Format(format) + ctx.preFormat() + + return ctx.parseFormat() +} + +func (ctx *DiskUsageContext) Write() { + if ctx.Verbose == false { + ctx.buffer = bytes.NewBufferString("") + ctx.Format = defaultDiskUsageTableFormat + ctx.preFormat() + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ + totalSize: ctx.LayersSize, + images: ctx.Images, + }) + if err != nil { + return + } + err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ + containers: ctx.Containers, + }) + if err != nil { + return + } + + err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ + volumes: ctx.Volumes, + }) + if err != nil { + return + } + + ctx.postFormat(tmpl, &diskUsageContainersContext{containers: []*types.Container{}}) + + return + } + + // First images + tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) + if err != nil { + return + } + + ctx.Output.Write([]byte("Images space usage:\n\n")) + for _, i := range ctx.Images { + repo := "" + tag := "" + if len(i.RepoTags) > 0 && !isDangling(*i) { + // Only show the first tag + ref, err := reference.ParseNamed(i.RepoTags[0]) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repo = ref.Name() + tag = nt.Tag() + } + } + + err = ctx.contextFormat(tmpl, &imageContext{ + repo: repo, + tag: tag, + trunc: true, + i: *i, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &imageContext{}) + + // Now containers + ctx.Output.Write([]byte("\nContainers space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) + if err != nil { + return + } + for _, c := range ctx.Containers { + // Don't display the virtual size + c.SizeRootFs = 0 + err = ctx.contextFormat(tmpl, &containerContext{ + trunc: true, + c: *c, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &containerContext{}) + + // And volumes + ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) + if err != nil { + return + } + for _, v := range ctx.Volumes { + err = ctx.contextFormat(tmpl, &volumeContext{ + v: *v, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &volumeContext{v: types.Volume{}}) +} + +type diskUsageImagesContext struct { + HeaderContext + totalSize int64 + images []*types.ImageSummary +} + +func (c *diskUsageImagesContext) Type() string { + c.AddHeader(typeHeader) + return "Images" +} + +func (c *diskUsageImagesContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.images)) +} + +func (c *diskUsageImagesContext) Active() string { + c.AddHeader(activeHeader) + used := 0 + for _, i := range c.images { + if i.Containers > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageImagesContext) Size() string { + c.AddHeader(sizeHeader) + return units.HumanSize(float64(c.totalSize)) + +} + +func (c *diskUsageImagesContext) Reclaimable() string { + var used int64 + + c.AddHeader(reclaimableHeader) + for _, i := range c.images { + if i.Containers != 0 { + if i.VirtualSize == -1 || i.SharedSize == -1 { + continue + } + used += i.VirtualSize - i.SharedSize + } + } + + reclaimable := c.totalSize - used + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) + } + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageContainersContext struct { + HeaderContext + verbose bool + containers []*types.Container +} + +func (c *diskUsageContainersContext) Type() string { + c.AddHeader(typeHeader) + return "Containers" +} + +func (c *diskUsageContainersContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.containers)) +} + +func (c *diskUsageContainersContext) isActive(container types.Container) bool { + return strings.Contains(container.State, "running") || + strings.Contains(container.State, "paused") || + strings.Contains(container.State, "restarting") +} + +func (c *diskUsageContainersContext) Active() string { + c.AddHeader(activeHeader) + used := 0 + for _, container := range c.containers { + if c.isActive(*container) { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageContainersContext) Size() string { + var size int64 + + c.AddHeader(sizeHeader) + for _, container := range c.containers { + size += container.SizeRw + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageContainersContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + c.AddHeader(reclaimableHeader) + for _, container := range c.containers { + if !c.isActive(*container) { + reclaimable += container.SizeRw + } + totalSize += container.SizeRw + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageVolumesContext struct { + HeaderContext + verbose bool + volumes []*types.Volume +} + +func (c *diskUsageVolumesContext) Type() string { + c.AddHeader(typeHeader) + return "Local Volumes" +} + +func (c *diskUsageVolumesContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.volumes)) +} + +func (c *diskUsageVolumesContext) Active() string { + c.AddHeader(activeHeader) + + used := 0 + for _, v := range c.volumes { + if v.UsageData.RefCount > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageVolumesContext) Size() string { + var size int64 + + c.AddHeader(sizeHeader) + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + size += v.UsageData.Size + } + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageVolumesContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + c.AddHeader(reclaimableHeader) + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + if v.UsageData.RefCount == 0 { + reclaimable += v.UsageData.Size + } + totalSize += v.UsageData.Size + } + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/formatter.go b/vendor/github.com/docker/docker/cli/command/formatter/formatter.go new file mode 100644 index 0000000..e859a1c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/formatter.go @@ -0,0 +1,123 @@ +package formatter + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/utils/templates" +) + +// Format keys used to specify certain kinds of output formats +const ( + TableFormatKey = "table" + RawFormatKey = "raw" + PrettyFormatKey = "pretty" + + defaultQuietFormat = "{{.ID}}" +) + +// Format is the format string rendered using the Context +type Format string + +// IsTable returns true if the format is a table-type format +func (f Format) IsTable() bool { + return strings.HasPrefix(string(f), TableFormatKey) +} + +// Contains returns true if the format contains the substring +func (f Format) Contains(sub string) bool { + return strings.Contains(string(f), sub) +} + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format Format + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + finalFormat string + header string + buffer *bytes.Buffer +} + +func (c *Context) preFormat() { + c.finalFormat = string(c.Format) + + // TODO: handle this in the Format type + if c.Format.IsTable() { + c.finalFormat = c.finalFormat[len(TableFormatKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + return tmpl, fmt.Errorf("Template parsing error: %v\n", err) + } + return tmpl, err +} + +func (c *Context) postFormat(tmpl *template.Template, subContext subContext) { + if c.Format.IsTable() { + if len(c.header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + tmpl.Execute(bytes.NewBufferString(""), subContext) + c.header = subContext.FullHeader() + } + + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(c.header)) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + return fmt.Errorf("Template parsing error: %v\n", err) + } + if c.Format.IsTable() && len(c.header) == 0 { + c.header = subContext.FullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// SubFormat is a function type accepted by Write() +type SubFormat func(func(subContext) error) error + +// Write the template to the buffer using this Context +func (c *Context) Write(sub subContext, f SubFormat) error { + c.buffer = bytes.NewBufferString("") + c.preFormat() + + tmpl, err := c.parseFormat() + if err != nil { + return err + } + + subFormat := func(subContext subContext) error { + return c.contextFormat(tmpl, subContext) + } + if err := f(subFormat); err != nil { + return err + } + + c.postFormat(tmpl, sub) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/image.go b/vendor/github.com/docker/docker/cli/command/formatter/image.go new file mode 100644 index 0000000..5c7de82 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/image.go @@ -0,0 +1,259 @@ +package formatter + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + units "github.com/docker/go-units" +) + +const ( + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" +) + +// ImageContext contains image specific information required by the formater, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool +} + +func isDangling(image types.ImageSummary) bool { + return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" +} + +// NewImageFormat returns a format for rendering an ImageContext +func NewImageFormat(source string, quiet bool, digest bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return defaultQuietFormat + case digest: + return defaultImageTableFormatWithDigest + default: + return defaultImageTableFormat + } + case RawFormatKey: + switch { + case quiet: + return `image_id: {{.ID}}` + case digest: + return `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + default: + return `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + + format := Format(source) + if format.IsTable() && digest && !format.Contains("{{.Digest}}") { + format += "\t{{.Digest}}" + } + return format +} + +// ImageWrite writes the formatter images using the ImageContext +func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { + render := func(format func(subContext subContext) error) error { + return imageFormat(ctx, images, format) + } + return ctx.Write(&imageContext{}, render) +} + +func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error { + for _, image := range images { + images := []*imageContext{} + if isDangling(image) { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: "", + tag: "", + digest: "", + }) + } else { + repoTags := map[string][]string{} + repoDigests := map[string][]string{} + + for _, refString := range append(image.RepoTags) { + ref, err := reference.ParseNamed(refString) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repoTags[ref.Name()] = append(repoTags[ref.Name()], nt.Tag()) + } + } + for _, refString := range append(image.RepoDigests) { + ref, err := reference.ParseNamed(refString) + if err != nil { + continue + } + if c, ok := ref.(reference.Canonical); ok { + repoDigests[ref.Name()] = append(repoDigests[ref.Name()], c.Digest().String()) + } + } + + for repo, tags := range repoTags { + digests := repoDigests[repo] + + // Do not display digests as their own row + delete(repoDigests, repo) + + if !ctx.Digest { + // Ignore digest references, just show tag once + digests = nil + } + + for _, tag := range tags { + if len(digests) == 0 { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: "", + }) + continue + } + // Display the digests for each tag + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: dgst, + }) + } + + } + } + + // Show rows for remaining digest only references + for repo, digests := range repoDigests { + // If digests are displayed, show row per digest + if ctx.Digest { + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + digest: dgst, + }) + } + } else { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + }) + } + } + } + for _, imageCtx := range images { + if err := format(imageCtx); err != nil { + return err + } + } + } + return nil +} + +type imageContext struct { + HeaderContext + trunc bool + i types.ImageSummary + repo string + tag string + digest string +} + +func (c *imageContext) ID() string { + c.AddHeader(imageIDHeader) + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + c.AddHeader(repositoryHeader) + return c.repo +} + +func (c *imageContext) Tag() string { + c.AddHeader(tagHeader) + return c.tag +} + +func (c *imageContext) Digest() string { + c.AddHeader(digestHeader) + return c.digest +} + +func (c *imageContext) CreatedSince() string { + c.AddHeader(createdSinceHeader) + createdAt := time.Unix(int64(c.i.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *imageContext) CreatedAt() string { + c.AddHeader(createdAtHeader) + return time.Unix(int64(c.i.Created), 0).String() +} + +func (c *imageContext) Size() string { + c.AddHeader(sizeHeader) + return units.HumanSizeWithPrecision(float64(c.i.Size), 3) +} + +func (c *imageContext) Containers() string { + c.AddHeader(containersHeader) + if c.i.Containers == -1 { + return "N/A" + } + return fmt.Sprintf("%d", c.i.Containers) +} + +func (c *imageContext) VirtualSize() string { + c.AddHeader(sizeHeader) + return units.HumanSize(float64(c.i.VirtualSize)) +} + +func (c *imageContext) SharedSize() string { + c.AddHeader(sharedSizeHeader) + if c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.SharedSize)) +} + +func (c *imageContext) UniqueSize() string { + c.AddHeader(uniqueSizeHeader) + if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/image_test.go b/vendor/github.com/docker/docker/cli/command/formatter/image_test.go new file mode 100644 index 0000000..ffe77f6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/image_test.go @@ -0,0 +1,333 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestImageContext(t *testing.T) { + imageID := stringid.GenerateRandomID() + unix := time.Now().Unix() + + var ctx imageContext + cases := []struct { + imageCtx imageContext + expValue string + expHeader string + call func() string + }{ + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: true, + }, stringid.TruncateID(imageID), imageIDHeader, ctx.ID}, + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: false, + }, imageID, imageIDHeader, ctx.ID}, + {imageContext{ + i: types.ImageSummary{Size: 10, VirtualSize: 10}, + trunc: true, + }, "10 B", sizeHeader, ctx.Size}, + {imageContext{ + i: types.ImageSummary{Created: unix}, + trunc: true, + }, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + // FIXME + // {imageContext{ + // i: types.ImageSummary{Created: unix}, + // trunc: true, + // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, + {imageContext{ + i: types.ImageSummary{}, + repo: "busybox", + }, "busybox", repositoryHeader, ctx.Repository}, + {imageContext{ + i: types.ImageSummary{}, + tag: "latest", + }, "latest", tagHeader, ctx.Tag}, + {imageContext{ + i: types.ImageSummary{}, + digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", + }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest}, + } + + for _, c := range cases { + ctx = c.imageCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestImageContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context ImageContext + expected string + }{ + // Errors + { + ImageContext{ + Context: Context{ + Format: "{{InvalidFunction}}", + }, + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + ImageContext{ + Context: Context{ + Format: "{{nil}}", + }, + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, false), + }, + }, + `REPOSITORY TAG IMAGE ID CREATED SIZE +image tag1 imageID1 24 hours ago 0 B +image tag2 imageID2 24 hours ago 0 B + imageID3 24 hours ago 0 B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + }, + Digest: true, + }, + `REPOSITORY DIGEST +image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image + +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", true, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, false), + }, + }, + "imageID1\nimageID2\nimageID3\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, true), + }, + Digest: true, + }, + `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0 B +image tag2 imageID2 24 hours ago 0 B + imageID3 24 hours ago 0 B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, true), + }, + Digest: true, + }, + "imageID1\nimageID2\nimageID3\n", + }, + // Raw Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, false), + }, + }, + fmt.Sprintf(`repository: image +tag: tag1 +image_id: imageID1 +created_at: %s +virtual_size: 0 B + +repository: image +tag: tag2 +image_id: imageID2 +created_at: %s +virtual_size: 0 B + +repository: +tag: +image_id: imageID3 +created_at: %s +virtual_size: 0 B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, true), + }, + Digest: true, + }, + fmt.Sprintf(`repository: image +tag: tag1 +digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image_id: imageID1 +created_at: %s +virtual_size: 0 B + +repository: image +tag: tag2 +digest: +image_id: imageID2 +created_at: %s +virtual_size: 0 B + +repository: +tag: +digest: +image_id: imageID3 +created_at: %s +virtual_size: 0 B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", true, false), + }, + }, + `image_id: imageID1 +image_id: imageID2 +image_id: imageID3 +`, + }, + // Custom Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + }, + }, + "image\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + }, + Digest: true, + }, + "image\nimage\n\n", + }, + } + + for _, testcase := range cases { + images := []types.ImageSummary{ + {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, + {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, + {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ImageWrite(testcase.context, images) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestImageContextWriteWithNoImage(t *testing.T) { + out := bytes.NewBufferString("") + images := []types.ImageSummary{} + + contexts := []struct { + context ImageContext + expected string + }{ + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + Output: out, + }, + }, + "REPOSITORY\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + Output: out, + }, + }, + "REPOSITORY DIGEST\n", + }, + } + + for _, context := range contexts { + ImageWrite(context.context, images) + assert.Equal(t, out.String(), context.expected) + // Clean buffer + out.Reset() + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/network.go b/vendor/github.com/docker/docker/cli/command/formatter/network.go new file mode 100644 index 0000000..7fbad7d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/network.go @@ -0,0 +1,117 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" + + networkIDHeader = "NETWORK ID" + ipv6Header = "IPV6" + internalHeader = "INTERNAL" +) + +// NewNetworkFormat returns a Format for rendering using a network Context +func NewNetworkFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultNetworkTableFormat + case RawFormatKey: + if quiet { + return `network_id: {{.ID}}` + } + return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` + } + return Format(source) +} + +// NetworkWrite writes the context +func NetworkWrite(ctx Context, networks []types.NetworkResource) error { + render := func(format func(subContext subContext) error) error { + for _, network := range networks { + networkCtx := &networkContext{trunc: ctx.Trunc, n: network} + if err := format(networkCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(&networkContext{}, render) +} + +type networkContext struct { + HeaderContext + trunc bool + n types.NetworkResource +} + +func (c *networkContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *networkContext) ID() string { + c.AddHeader(networkIDHeader) + if c.trunc { + return stringid.TruncateID(c.n.ID) + } + return c.n.ID +} + +func (c *networkContext) Name() string { + c.AddHeader(nameHeader) + return c.n.Name +} + +func (c *networkContext) Driver() string { + c.AddHeader(driverHeader) + return c.n.Driver +} + +func (c *networkContext) Scope() string { + c.AddHeader(scopeHeader) + return c.n.Scope +} + +func (c *networkContext) IPv6() string { + c.AddHeader(ipv6Header) + return fmt.Sprintf("%v", c.n.EnableIPv6) +} + +func (c *networkContext) Internal() string { + c.AddHeader(internalHeader) + return fmt.Sprintf("%v", c.n.Internal) +} + +func (c *networkContext) Labels() string { + c.AddHeader(labelsHeader) + if c.n.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.n.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *networkContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.n.Labels == nil { + return "" + } + return c.n.Labels[name] +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/network_test.go b/vendor/github.com/docker/docker/cli/command/formatter/network_test.go new file mode 100644 index 0000000..b40a534 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/network_test.go @@ -0,0 +1,208 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestNetworkContext(t *testing.T) { + networkID := stringid.GenerateRandomID() + + var ctx networkContext + cases := []struct { + networkCtx networkContext + expValue string + expHeader string + call func() string + }{ + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: false, + }, networkID, networkIDHeader, ctx.ID}, + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: true, + }, stringid.TruncateID(networkID), networkIDHeader, ctx.ID}, + {networkContext{ + n: types.NetworkResource{Name: "network_name"}, + }, "network_name", nameHeader, ctx.Name}, + {networkContext{ + n: types.NetworkResource{Driver: "driver_name"}, + }, "driver_name", driverHeader, ctx.Driver}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: true}, + }, "true", ipv6Header, ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: false}, + }, "false", ipv6Header, ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{Internal: true}, + }, "true", internalHeader, ctx.Internal}, + {networkContext{ + n: types.NetworkResource{Internal: false}, + }, "false", internalHeader, ctx.Internal}, + {networkContext{ + n: types.NetworkResource{}, + }, "", labelsHeader, ctx.Labels}, + {networkContext{ + n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = c.networkCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestNetworkContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewNetworkFormat("table", false)}, + `NETWORK ID NAME DRIVER SCOPE +networkID1 foobar_baz foo local +networkID2 foobar_bar bar local +`, + }, + { + Context{Format: NewNetworkFormat("table", true)}, + `networkID1 +networkID2 +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", false)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewNetworkFormat("raw", false)}, + `network_id: networkID1 +name: foobar_baz +driver: foo +scope: local + +network_id: networkID2 +name: foobar_bar +driver: bar +scope: local + +`, + }, + { + Context{Format: NewNetworkFormat("raw", true)}, + `network_id: networkID1 +network_id: networkID2 +`, + }, + // Custom Format + { + Context{Format: NewNetworkFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local"}, + {ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := NetworkWrite(testcase.context, networks) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestNetworkContextWriteJSON(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": ""}, + {"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": ""}, + } + + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestNetworkContextWriteJSONField(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .ID}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, networks[i].ID) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/reflect.go b/vendor/github.com/docker/docker/cli/command/formatter/reflect.go new file mode 100644 index 0000000..d1d8737 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/reflect.go @@ -0,0 +1,65 @@ +package formatter + +import ( + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +func marshalJSON(x interface{}) ([]byte, error) { + m, err := marshalMap(x) + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// marshalMap marshals x to map[string]interface{} +func marshalMap(x interface{}) (map[string]interface{}, error) { + val := reflect.ValueOf(x) + if val.Kind() != reflect.Ptr { + return nil, fmt.Errorf("expected a pointer to a struct, got %v", val.Kind()) + } + if val.IsNil() { + return nil, fmt.Errorf("expxected a pointer to a struct, got nil pointer") + } + valElem := val.Elem() + if valElem.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) + } + typ := val.Type() + m := make(map[string]interface{}) + for i := 0; i < val.NumMethod(); i++ { + k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) + if err != nil { + return nil, err + } + if k != "" { + m[k] = v + } + } + return m, nil +} + +var unmarshallableNames = map[string]struct{}{"FullHeader": {}} + +// marshalForMethod returns the map key and the map value for marshalling the method. +// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") +func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { + if val.Kind() != reflect.Func { + return "", nil, fmt.Errorf("expected func, got %v", val.Kind()) + } + name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() + _, blackListed := unmarshallableNames[name] + // FIXME: In text/template, (numOut == 2) is marshallable, + // if the type of the second param is error. + marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && + numIn == 0 && numOut == 1 + if !marshallable { + return "", nil, nil + } + result := val.Call(make([]reflect.Value, numIn)) + intf := result[0].Interface() + return name, intf, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go b/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go new file mode 100644 index 0000000..e547b18 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go @@ -0,0 +1,66 @@ +package formatter + +import ( + "reflect" + "testing" +) + +type dummy struct { +} + +func (d *dummy) Func1() string { + return "Func1" +} + +func (d *dummy) func2() string { + return "func2(should not be marshalled)" +} + +func (d *dummy) Func3() (string, int) { + return "Func3(should not be marshalled)", -42 +} + +func (d *dummy) Func4() int { + return 4 +} + +type dummyType string + +func (d *dummy) Func5() dummyType { + return dummyType("Func5") +} + +func (d *dummy) FullHeader() string { + return "FullHeader(should not be marshalled)" +} + +var dummyExpected = map[string]interface{}{ + "Func1": "Func1", + "Func4": 4, + "Func5": dummyType("Func5"), +} + +func TestMarshalMap(t *testing.T) { + d := dummy{} + m, err := marshalMap(&d) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(dummyExpected, m) { + t.Fatalf("expected %+v, got %+v", + dummyExpected, m) + } +} + +func TestMarshalMapBad(t *testing.T) { + if _, err := marshalMap(nil); err == nil { + t.Fatal("expected an error (argument is nil)") + } + if _, err := marshalMap(dummy{}); err == nil { + t.Fatal("expected an error (argument is non-pointer)") + } + x := 42 + if _, err := marshalMap(&x); err == nil { + t.Fatal("expected an error (argument is a pointer to non-struct)") + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/service.go b/vendor/github.com/docker/docker/cli/command/formatter/service.go new file mode 100644 index 0000000..aaa7838 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/service.go @@ -0,0 +1,322 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command/inspect" + units "github.com/docker/go-units" +) + +const serviceInspectPrettyTemplate Format = ` +ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Service Mode: +{{- if .IsModeGlobal }} Global +{{- else if .IsModeReplicated }} Replicated +{{- if .ModeReplicatedReplicas }} + Replicas: {{ .ModeReplicatedReplicas }} +{{- end }}{{ end }} +{{- if .HasUpdateStatus }} +UpdateStatus: + State: {{ .UpdateStatusState }} + Started: {{ .UpdateStatusStarted }} +{{- if .UpdateIsCompleted }} + Completed: {{ .UpdateStatusCompleted }} +{{- end }} + Message: {{ .UpdateStatusMessage }} +{{- end }} +Placement: +{{- if .TaskPlacementConstraints -}} + Contraints: {{ .TaskPlacementConstraints }} +{{- end }} +{{- if .HasUpdateConfig }} +UpdateConfig: + Parallelism: {{ .UpdateParallelism }} +{{- if .HasUpdateDelay}} + Delay: {{ .UpdateDelay }} +{{- end }} + On failure: {{ .UpdateOnFailure }} +{{- if .HasUpdateMonitor}} + Monitoring Period: {{ .UpdateMonitor }} +{{- end }} + Max failure ratio: {{ .UpdateMaxFailureRatio }} +{{- end }} +ContainerSpec: + Image: {{ .ContainerImage }} +{{- if .ContainerArgs }} + Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} +{{- end -}} +{{- if .ContainerEnv }} + Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} +{{- end -}} +{{- if .ContainerWorkDir }} + Dir: {{ .ContainerWorkDir }} +{{- end -}} +{{- if .ContainerUser }} + User: {{ .ContainerUser }} +{{- end }} +{{- if .ContainerMounts }} +Mounts: +{{- end }} +{{- range $mount := .ContainerMounts }} + Target = {{ $mount.Target }} + Source = {{ $mount.Source }} + ReadOnly = {{ $mount.ReadOnly }} + Type = {{ $mount.Type }} +{{- end -}} +{{- if .HasResources }} +Resources: +{{- if .HasResourceReservations }} + Reservations: +{{- if gt .ResourceReservationNanoCPUs 0.0 }} + CPU: {{ .ResourceReservationNanoCPUs }} +{{- end }} +{{- if .ResourceReservationMemory }} + Memory: {{ .ResourceReservationMemory }} +{{- end }}{{ end }} +{{- if .HasResourceLimits }} + Limits: +{{- if gt .ResourceLimitsNanoCPUs 0.0 }} + CPU: {{ .ResourceLimitsNanoCPUs }} +{{- end }} +{{- if .ResourceLimitMemory }} + Memory: {{ .ResourceLimitMemory }} +{{- end }}{{ end }}{{ end }} +{{- if .Networks }} +Networks: +{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} +Endpoint Mode: {{ .EndpointMode }} +{{- if .Ports }} +Ports: +{{- range $port := .Ports }} + PublishedPort {{ $port.PublishedPort }} + Protocol = {{ $port.Protocol }} + TargetPort = {{ $port.TargetPort }} +{{- end }} {{ end -}} +` + +// NewServiceFormat returns a Format for rendering using a Context +func NewServiceFormat(source string) Format { + switch source { + case PrettyFormatKey: + return serviceInspectPrettyTemplate + default: + return Format(strings.TrimPrefix(source, RawFormatKey)) + } +} + +// ServiceInspectWrite renders the context for a list of services +func ServiceInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != serviceInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + serviceI, _, err := getRef(ref) + if err != nil { + return err + } + service, ok := serviceI.(swarm.Service) + if !ok { + return fmt.Errorf("got wrong object to inspect") + } + if err := format(&serviceInspectContext{Service: service}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&serviceInspectContext{}, render) +} + +type serviceInspectContext struct { + swarm.Service + subContext +} + +func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { + return marshalJSON(ctx) +} + +func (ctx *serviceInspectContext) ID() string { + return ctx.Service.ID +} + +func (ctx *serviceInspectContext) Name() string { + return ctx.Service.Spec.Name +} + +func (ctx *serviceInspectContext) Labels() map[string]string { + return ctx.Service.Spec.Labels +} + +func (ctx *serviceInspectContext) IsModeGlobal() bool { + return ctx.Service.Spec.Mode.Global != nil +} + +func (ctx *serviceInspectContext) IsModeReplicated() bool { + return ctx.Service.Spec.Mode.Replicated != nil +} + +func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { + return ctx.Service.Spec.Mode.Replicated.Replicas +} + +func (ctx *serviceInspectContext) HasUpdateStatus() bool { + return ctx.Service.UpdateStatus.State != "" +} + +func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { + return ctx.Service.UpdateStatus.State +} + +func (ctx *serviceInspectContext) UpdateStatusStarted() string { + return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.StartedAt)) +} + +func (ctx *serviceInspectContext) UpdateIsCompleted() bool { + return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted +} + +func (ctx *serviceInspectContext) UpdateStatusCompleted() string { + return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.CompletedAt)) +} + +func (ctx *serviceInspectContext) UpdateStatusMessage() string { + return ctx.Service.UpdateStatus.Message +} + +func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { + if ctx.Service.Spec.TaskTemplate.Placement != nil { + return ctx.Service.Spec.TaskTemplate.Placement.Constraints + } + return nil +} + +func (ctx *serviceInspectContext) HasUpdateConfig() bool { + return ctx.Service.Spec.UpdateConfig != nil +} + +func (ctx *serviceInspectContext) UpdateParallelism() uint64 { + return ctx.Service.Spec.UpdateConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasUpdateDelay() bool { + return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateDelay() time.Duration { + return ctx.Service.Spec.UpdateConfig.Delay +} + +func (ctx *serviceInspectContext) UpdateOnFailure() string { + return ctx.Service.Spec.UpdateConfig.FailureAction +} + +func (ctx *serviceInspectContext) HasUpdateMonitor() bool { + return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { + return ctx.Service.Spec.UpdateConfig.Monitor +} + +func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { + return ctx.Service.Spec.UpdateConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) ContainerImage() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image +} + +func (ctx *serviceInspectContext) ContainerArgs() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args +} + +func (ctx *serviceInspectContext) ContainerEnv() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env +} + +func (ctx *serviceInspectContext) ContainerWorkDir() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir +} + +func (ctx *serviceInspectContext) ContainerUser() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.User +} + +func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts +} + +func (ctx *serviceInspectContext) HasResources() bool { + return ctx.Service.Spec.TaskTemplate.Resources != nil +} + +func (ctx *serviceInspectContext) HasResourceReservations() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { + return float64(0) + } + return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceReservationMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) +} + +func (ctx *serviceInspectContext) HasResourceLimits() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { + return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceLimitMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) +} + +func (ctx *serviceInspectContext) Networks() []string { + var out []string + for _, n := range ctx.Service.Spec.Networks { + out = append(out, n.Target) + } + return out +} + +func (ctx *serviceInspectContext) EndpointMode() string { + if ctx.Service.Spec.EndpointSpec == nil { + return "" + } + + return string(ctx.Service.Spec.EndpointSpec.Mode) +} + +func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { + return ctx.Service.Endpoint.Ports +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/stats.go b/vendor/github.com/docker/docker/cli/command/formatter/stats.go new file mode 100644 index 0000000..7997f99 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/stats.go @@ -0,0 +1,211 @@ +package formatter + +import ( + "fmt" + "sync" + + units "github.com/docker/go-units" +) + +const ( + winOSType = "windows" + defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" + winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + + containerHeader = "CONTAINER" + cpuPercHeader = "CPU %" + netIOHeader = "NET I/O" + blockIOHeader = "BLOCK I/O" + memPercHeader = "MEM %" // Used only on Linux + winMemUseHeader = "PRIV WORKING SET" // Used only on Windows + memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux + pidsHeader = "PIDS" // Used only on Linux +) + +// StatsEntry represents represents the statistics data collected from a container +type StatsEntry struct { + Container string + Name string + ID string + CPUPercentage float64 + Memory float64 // On Windows this is the private working set + MemoryLimit float64 // Not used on Windows + MemoryPercentage float64 // Not used on Windows + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 // Not used on Windows + IsInvalid bool + OSType string +} + +// ContainerStats represents an entity to store containers statistics synchronously +type ContainerStats struct { + mutex sync.Mutex + StatsEntry + err error +} + +// GetError returns the container statistics error. +// This is used to determine whether the statistics are valid or not +func (cs *ContainerStats) GetError() error { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.err +} + +// SetErrorAndReset zeroes all the container statistics and store the error. +// It is used when receiving time out error during statistics collecting to reduce lock overhead +func (cs *ContainerStats) SetErrorAndReset(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.CPUPercentage = 0 + cs.Memory = 0 + cs.MemoryPercentage = 0 + cs.MemoryLimit = 0 + cs.NetworkRx = 0 + cs.NetworkTx = 0 + cs.BlockRead = 0 + cs.BlockWrite = 0 + cs.PidsCurrent = 0 + cs.err = err + cs.IsInvalid = true +} + +// SetError sets container statistics error +func (cs *ContainerStats) SetError(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.err = err + if err != nil { + cs.IsInvalid = true + } +} + +// SetStatistics set the container statistics +func (cs *ContainerStats) SetStatistics(s StatsEntry) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + s.Container = cs.Container + s.OSType = cs.OSType + cs.StatsEntry = s +} + +// GetStatistics returns container statistics with other meta data such as the container name +func (cs *ContainerStats) GetStatistics() StatsEntry { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.StatsEntry +} + +// NewStatsFormat returns a format for rendering an CStatsContext +func NewStatsFormat(source, osType string) Format { + if source == TableFormatKey { + if osType == winOSType { + return Format(winDefaultStatsTableFormat) + } + return Format(defaultStatsTableFormat) + } + return Format(source) +} + +// NewContainerStats returns a new ContainerStats entity and sets in it the given name +func NewContainerStats(container, osType string) *ContainerStats { + return &ContainerStats{ + StatsEntry: StatsEntry{Container: container, OSType: osType}, + } +} + +// ContainerStatsWrite renders the context for a list of containers statistics +func ContainerStatsWrite(ctx Context, containerStats []StatsEntry) error { + render := func(format func(subContext subContext) error) error { + for _, cstats := range containerStats { + containerStatsCtx := &containerStatsContext{ + s: cstats, + } + if err := format(containerStatsCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(&containerStatsContext{}, render) +} + +type containerStatsContext struct { + HeaderContext + s StatsEntry +} + +func (c *containerStatsContext) Container() string { + c.AddHeader(containerHeader) + return c.s.Container +} + +func (c *containerStatsContext) Name() string { + c.AddHeader(nameHeader) + name := c.s.Name[1:] + return name +} + +func (c *containerStatsContext) ID() string { + c.AddHeader(containerIDHeader) + return c.s.ID +} + +func (c *containerStatsContext) CPUPerc() string { + c.AddHeader(cpuPercHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) +} + +func (c *containerStatsContext) MemUsage() string { + header := memUseHeader + if c.s.OSType == winOSType { + header = winMemUseHeader + } + c.AddHeader(header) + if c.s.IsInvalid { + return fmt.Sprintf("-- / --") + } + if c.s.OSType == winOSType { + return fmt.Sprintf("%s", units.BytesSize(c.s.Memory)) + } + return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) +} + +func (c *containerStatsContext) MemPerc() string { + header := memPercHeader + c.AddHeader(header) + if c.s.IsInvalid || c.s.OSType == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) +} + +func (c *containerStatsContext) NetIO() string { + c.AddHeader(netIOHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) +} + +func (c *containerStatsContext) BlockIO() string { + c.AddHeader(blockIOHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) +} + +func (c *containerStatsContext) PIDs() string { + c.AddHeader(pidsHeader) + if c.s.IsInvalid || c.s.OSType == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%d", c.s.PidsCurrent) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go b/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go new file mode 100644 index 0000000..d5a17cc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go @@ -0,0 +1,228 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestContainerStatsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + + var ctx containerStatsContext + tt := []struct { + stats StatsEntry + expValue string + expHeader string + call func() string + }{ + {StatsEntry{Container: containerID}, containerID, containerHeader, ctx.Container}, + {StatsEntry{CPUPercentage: 5.5}, "5.50%", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "--", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "0.31 B / 12.3 B", netIOHeader, ctx.NetIO}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "--", netIOHeader, ctx.NetIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "0.1 B / 2.3 B", blockIOHeader, ctx.BlockIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "--", blockIOHeader, ctx.BlockIO}, + {StatsEntry{MemoryPercentage: 10.2}, "10.20%", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, OSType: "windows"}, "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{Memory: 24, MemoryLimit: 30}, "24 B / 30 B", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "-- / --", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, OSType: "windows"}, "24 B", winMemUseHeader, ctx.MemUsage}, + {StatsEntry{PidsCurrent: 10}, "10", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, IsInvalid: true}, "--", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, OSType: "windows"}, "--", pidsHeader, ctx.PIDs}, + } + + for _, te := range tt { + ctx = containerStatsContext{s: te.stats} + if v := te.call(); v != te.expValue { + t.Fatalf("Expected %q, got %q", te.expValue, v) + } + + h := ctx.FullHeader() + if h != te.expHeader { + t.Fatalf("Expected %q, got %q", te.expHeader, h) + } + } +} + +func TestContainerStatsContextWrite(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + { + Context{Format: "table {{.MemUsage}}"}, + `MEM USAGE / LIMIT +20 B / 20 B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + OSType: "linux", + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + OSType: "linux", + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Equal(t, out.String(), te.expected) + } + } +} + +func TestContainerStatsContextWriteWindows(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "table {{.MemUsage}}"}, + `PRIV WORKING SET +20 B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + { + Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"}, + `container1 -- -- +container2 -- -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + OSType: "windows", + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + OSType: "windows", + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Equal(t, out.String(), te.expected) + } + } +} + +func TestContainerStatsContextWriteWithNoStats(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Container}}", + Output: &out, + }, + "", + }, + { + Context{ + Format: "table {{.Container}}", + Output: &out, + }, + "CONTAINER\n", + }, + { + Context{ + Format: "table {{.Container}}\t{{.CPUPerc}}", + Output: &out, + }, + "CONTAINER CPU %\n", + }, + } + + for _, context := range contexts { + ContainerStatsWrite(context.context, []StatsEntry{}) + assert.Equal(t, context.expected, out.String()) + // Clean buffer + out.Reset() + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/volume.go b/vendor/github.com/docker/docker/cli/command/formatter/volume.go new file mode 100644 index 0000000..90c9b13 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/volume.go @@ -0,0 +1,121 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultVolumeQuietFormat = "{{.Name}}" + defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" + + volumeNameHeader = "VOLUME NAME" + mountpointHeader = "MOUNTPOINT" + linksHeader = "LINKS" + // Status header ? +) + +// NewVolumeFormat returns a format for use with a volume Context +func NewVolumeFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultVolumeQuietFormat + } + return defaultVolumeTableFormat + case RawFormatKey: + if quiet { + return `name: {{.Name}}` + } + return `name: {{.Name}}\ndriver: {{.Driver}}\n` + } + return Format(source) +} + +// VolumeWrite writes formatted volumes using the Context +func VolumeWrite(ctx Context, volumes []*types.Volume) error { + render := func(format func(subContext subContext) error) error { + for _, volume := range volumes { + if err := format(&volumeContext{v: *volume}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&volumeContext{}, render) +} + +type volumeContext struct { + HeaderContext + v types.Volume +} + +func (c *volumeContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *volumeContext) Name() string { + c.AddHeader(volumeNameHeader) + return c.v.Name +} + +func (c *volumeContext) Driver() string { + c.AddHeader(driverHeader) + return c.v.Driver +} + +func (c *volumeContext) Scope() string { + c.AddHeader(scopeHeader) + return c.v.Scope +} + +func (c *volumeContext) Mountpoint() string { + c.AddHeader(mountpointHeader) + return c.v.Mountpoint +} + +func (c *volumeContext) Labels() string { + c.AddHeader(labelsHeader) + if c.v.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.v.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *volumeContext) Label(name string) string { + + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.v.Labels == nil { + return "" + } + return c.v.Labels[name] +} + +func (c *volumeContext) Links() string { + c.AddHeader(linksHeader) + if c.v.UsageData == nil { + return "N/A" + } + return fmt.Sprintf("%d", c.v.UsageData.RefCount) +} + +func (c *volumeContext) Size() string { + c.AddHeader(sizeHeader) + if c.v.UsageData == nil { + return "N/A" + } + return units.HumanSize(float64(c.v.UsageData.Size)) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go b/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go new file mode 100644 index 0000000..9ec18b6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go @@ -0,0 +1,189 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestVolumeContext(t *testing.T) { + volumeName := stringid.GenerateRandomID() + + var ctx volumeContext + cases := []struct { + volumeCtx volumeContext + expValue string + expHeader string + call func() string + }{ + {volumeContext{ + v: types.Volume{Name: volumeName}, + }, volumeName, volumeNameHeader, ctx.Name}, + {volumeContext{ + v: types.Volume{Driver: "driver_name"}, + }, "driver_name", driverHeader, ctx.Driver}, + {volumeContext{ + v: types.Volume{Scope: "local"}, + }, "local", scopeHeader, ctx.Scope}, + {volumeContext{ + v: types.Volume{Mountpoint: "mountpoint"}, + }, "mountpoint", mountpointHeader, ctx.Mountpoint}, + {volumeContext{ + v: types.Volume{}, + }, "", labelsHeader, ctx.Labels}, + {volumeContext{ + v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = c.volumeCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestVolumeContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewVolumeFormat("table", false)}, + `DRIVER VOLUME NAME +foo foobar_baz +bar foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table", true)}, + `foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", false)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", true)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewVolumeFormat("raw", false)}, + `name: foobar_baz +driver: foo + +name: foobar_bar +driver: bar + +`, + }, + { + Context{Format: NewVolumeFormat("raw", true)}, + `name: foobar_baz +name: foobar_bar +`, + }, + // Custom Format + { + Context{Format: NewVolumeFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + volumes := []*types.Volume{ + {Name: "foobar_baz", Driver: "foo"}, + {Name: "foobar_bar", Driver: "bar"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := VolumeWrite(testcase.context, volumes) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestVolumeContextWriteJSON(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, + {"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestVolumeContextWriteJSONField(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, volumes[i].Name) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go b/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go new file mode 100644 index 0000000..511b1a8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go @@ -0,0 +1,90 @@ +package idresolver + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stringid" +) + +// IDResolver provides ID to Name resolution. +type IDResolver struct { + client client.APIClient + noResolve bool + cache map[string]string +} + +// New creates a new IDResolver. +func New(client client.APIClient, noResolve bool) *IDResolver { + return &IDResolver{ + client: client, + noResolve: noResolve, + cache: make(map[string]string), + } +} + +func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { + switch t := t.(type) { + case swarm.Node: + node, _, err := r.client.NodeInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + if node.Spec.Annotations.Name != "" { + return node.Spec.Annotations.Name, nil + } + if node.Description.Hostname != "" { + return node.Description.Hostname, nil + } + return id, nil + case swarm.Service: + service, _, err := r.client.ServiceInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + return service.Spec.Annotations.Name, nil + case swarm.Task: + // If the caller passes the full task there's no need to do a lookup. + if t.ID == "" { + var err error + + t, _, err = r.client.TaskInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + } + taskID := stringid.TruncateID(t.ID) + if t.ServiceID == "" { + return taskID, nil + } + service, err := r.Resolve(ctx, swarm.Service{}, t.ServiceID) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%d.%s", service, t.Slot, taskID), nil + default: + return "", fmt.Errorf("unsupported type") + } + +} + +// Resolve will attempt to resolve an ID to a Name by querying the manager. +// Results are stored into a cache. +// If the `-n` flag is used in the command-line, resolution is disabled. +func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { + if r.noResolve { + return id, nil + } + if name, ok := r.cache[id]; ok { + return name, nil + } + name, err := r.get(ctx, t, id) + if err != nil { + return "", err + } + r.cache[id] = name + return name, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/build.go b/vendor/github.com/docker/docker/cli/command/image/build.go new file mode 100644 index 0000000..0c88af5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/build.go @@ -0,0 +1,477 @@ +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type buildOptions struct { + context string + dockerfileName string + tags opts.ListOpts + labels opts.ListOpts + buildArgs opts.ListOpts + ulimits *runconfigopts.UlimitOpt + memory string + memorySwap string + shmSize string + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cpuSetCpus string + cpuSetMems string + cgroupParent string + isolation string + quiet bool + noCache bool + rm bool + forceRm bool + pull bool + cacheFrom []string + compress bool + securityOpt []string + networkMode string + squash bool +} + +// NewBuildCommand creates a new `docker build` command +func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command { + ulimits := make(map[string]*units.Ulimit) + options := buildOptions{ + tags: opts.NewListOpts(validateTag), + buildArgs: opts.NewListOpts(runconfigopts.ValidateEnv), + ulimits: runconfigopts.NewUlimitOpt(&ulimits), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "build [OPTIONS] PATH | URL | -", + Short: "Build an image from a Dockerfile", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.context = args[0] + return runBuild(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") + flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") + flags.Var(options.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flags.StringVarP(&options.memory, "memory", "m", "", "Memory limit") + flags.StringVar(&options.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.StringVar(&options.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") + flags.Var(&options.labels, "label", "Set metadata for an image") + flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") + flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") + flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") + flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") + flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") + flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") + flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") + flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") + + command.AddTrustedFlags(flags, true) + + flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") + flags.SetAnnotation("squash", "experimental", nil) + flags.SetAnnotation("squash", "version", []string{"1.25"}) + + return cmd +} + +// lastProgressOutput is the same as progress.Output except +// that it only output with the last update. It is used in +// non terminal scenarios to depresss verbose messages +type lastProgressOutput struct { + output progress.Output +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { + if !prog.LastUpdate { + return nil + } + + return out.output.WriteProgress(prog) +} + +func runBuild(dockerCli *command.DockerCli, options buildOptions) error { + + var ( + buildCtx io.ReadCloser + err error + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + specifiedContext := options.context + progBuff = dockerCli.Out() + buildBuff = dockerCli.Out() + if options.quiet { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + + switch { + case specifiedContext == "-": + buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) + case urlutil.IsURL(specifiedContext): + buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) + default: + contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) + } + + if err != nil { + if options.quiet && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(dockerCli.Err(), progBuff) + } + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if buildCtx == nil { + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return err + } + defer f.Close() + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return err + } + } + + if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + compression := archive.Uncompressed + if options.compress { + compression = archive.Gzip + } + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: compression, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + } + + ctx := context.Background() + + var resolvedTags []*resolvedTag + if command.IsTrusted() { + translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { + return TrustedReference(ctx, dockerCli, ref, nil) + } + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) + if !dockerCli.Out().IsTerminal() { + progressOutput = &lastProgressOutput{output: progressOutput} + } + + var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + + var memory int64 + if options.memory != "" { + parsedMemory, err := units.RAMInBytes(options.memory) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if options.memorySwap != "" { + if options.memorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + + var shmSize int64 + if options.shmSize != "" { + shmSize, err = units.RAMInBytes(options.shmSize) + if err != nil { + return err + } + } + + authConfigs, _ := dockerCli.GetAllCredentials() + buildOptions := types.ImageBuildOptions{ + Memory: memory, + MemorySwap: memorySwap, + Tags: options.tags.GetAll(), + SuppressOutput: options.quiet, + NoCache: options.noCache, + Remove: options.rm, + ForceRemove: options.forceRm, + PullParent: options.pull, + Isolation: container.Isolation(options.isolation), + CPUSetCPUs: options.cpuSetCpus, + CPUSetMems: options.cpuSetMems, + CPUShares: options.cpuShares, + CPUQuota: options.cpuQuota, + CPUPeriod: options.cpuPeriod, + CgroupParent: options.cgroupParent, + Dockerfile: relDockerfile, + ShmSize: shmSize, + Ulimits: options.ulimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()), + AuthConfigs: authConfigs, + Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), + CacheFrom: options.cacheFrom, + SecurityOpt: options.securityOpt, + NetworkMode: options.networkMode, + Squash: options.squash, + } + + response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) + if err != nil { + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s", progBuff) + } + return err + } + defer response.Body.Close() + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { + fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if options.quiet { + fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) + } + + if command.IsTrusted() { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + ref, err := reference.ParseNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.WithDefaultTag(ref) + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + trustedRef, err := translator(ctx, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + content := io.Reader(tarReader) + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/docker/docker/cli/command/image/cmd.go b/vendor/github.com/docker/docker/cli/command/image/cmd.go new file mode 100644 index 0000000..c3ca61f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/cmd.go @@ -0,0 +1,33 @@ +package image + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewImageCommand returns a cobra command for `image` subcommands +func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "image", + Short: "Manage images", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewBuildCommand(dockerCli), + NewHistoryCommand(dockerCli), + NewImportCommand(dockerCli), + NewLoadCommand(dockerCli), + NewPullCommand(dockerCli), + NewPushCommand(dockerCli), + NewSaveCommand(dockerCli), + NewTagCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/image/history.go b/vendor/github.com/docker/docker/cli/command/image/history.go new file mode 100644 index 0000000..91c8f75 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/history.go @@ -0,0 +1,99 @@ +package image + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type historyOptions struct { + image string + + human bool + quiet bool + noTrunc bool +} + +// NewHistoryCommand creates a new `docker history` command +func NewHistoryCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts historyOptions + + cmd := &cobra.Command{ + Use: "history [OPTIONS] IMAGE", + Short: "Show the history of an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + return runHistory(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + + return cmd +} + +func runHistory(dockerCli *command.DockerCli, opts historyOptions) error { + ctx := context.Background() + + history, err := dockerCli.Client().ImageHistory(ctx, opts.image) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + + if opts.quiet { + for _, entry := range history { + if opts.noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil + } + + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + for _, entry := range history { + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if !opts.noTrunc { + createdBy = stringutils.Ellipsis(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if opts.human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSizeWithPrecision(float64(entry.Size), 3) + } else { + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/import.go b/vendor/github.com/docker/docker/cli/command/image/import.go new file mode 100644 index 0000000..60024fb --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/import.go @@ -0,0 +1,88 @@ +package image + +import ( + "io" + "os" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + dockeropts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/urlutil" + "github.com/spf13/cobra" +) + +type importOptions struct { + source string + reference string + changes dockeropts.ListOpts + message string +} + +// NewImportCommand creates a new `docker import` command +func NewImportCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts importOptions + + cmd := &cobra.Command{ + Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", + Short: "Import the contents from a tarball to create a filesystem image", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.source = args[0] + if len(args) > 1 { + opts.reference = args[1] + } + return runImport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + opts.changes = dockeropts.NewListOpts(nil) + flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") + flags.StringVarP(&opts.message, "message", "m", "", "Set commit message for imported image") + + return cmd +} + +func runImport(dockerCli *command.DockerCli, opts importOptions) error { + var ( + in io.Reader + srcName = opts.source + ) + + if opts.source == "-" { + in = dockerCli.In() + } else if !urlutil.IsURL(opts.source) { + srcName = "-" + file, err := os.Open(opts.source) + if err != nil { + return err + } + defer file.Close() + in = file + } + + source := types.ImageImportSource{ + Source: in, + SourceName: srcName, + } + + options := types.ImageImportOptions{ + Message: opts.message, + Changes: opts.changes.GetAll(), + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/inspect.go b/vendor/github.com/docker/docker/cli/command/image/inspect.go new file mode 100644 index 0000000..217863c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/inspect.go @@ -0,0 +1,44 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] IMAGE [IMAGE...]", + Short: "Display detailed information on one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ImageInspectWithRaw(ctx, ref) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/list.go b/vendor/github.com/docker/docker/cli/command/image/list.go new file mode 100644 index 0000000..679604f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/list.go @@ -0,0 +1,96 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type imagesOptions struct { + matchName string + + quiet bool + all bool + noTrunc bool + showDigests bool + format string + filter opts.FilterOpt +} + +// NewImagesCommand creates a new `docker images` command +func NewImagesCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := imagesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "images [OPTIONS] [REPOSITORY[:TAG]]", + Short: "List images", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.matchName = args[0] + } + return runImages(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVarP(&opts.all, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVar(&opts.showDigests, "digests", false, "Show digests") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewImagesCommand(dockerCli) + cmd.Aliases = []string{"images", "list"} + cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" + return &cmd +} + +func runImages(dockerCli *command.DockerCli, opts imagesOptions) error { + ctx := context.Background() + + filters := opts.filter.Value() + if opts.matchName != "" { + filters.Add("reference", opts.matchName) + } + + options := types.ImageListOptions{ + All: opts.all, + Filters: filters, + } + + images, err := dockerCli.Client().ImageList(ctx, options) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().ImagesFormat + } else { + format = formatter.TableFormatKey + } + } + + imageCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewImageFormat(format, opts.quiet, opts.showDigests), + Trunc: !opts.noTrunc, + }, + Digest: opts.showDigests, + } + return formatter.ImageWrite(imageCtx, images) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/load.go b/vendor/github.com/docker/docker/cli/command/image/load.go new file mode 100644 index 0000000..988f510 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/load.go @@ -0,0 +1,77 @@ +package image + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/spf13/cobra" +) + +type loadOptions struct { + input string + quiet bool +} + +// NewLoadCommand creates a new `docker load` command +func NewLoadCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts loadOptions + + cmd := &cobra.Command{ + Use: "load [OPTIONS]", + Short: "Load an image from a tar archive or STDIN", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLoad(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") + + return cmd +} + +func runLoad(dockerCli *command.DockerCli, opts loadOptions) error { + + var input io.Reader = dockerCli.In() + if opts.input != "" { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(opts.input) + if err != nil { + return err + } + defer file.Close() + input = file + } + + // To avoid getting stuck, verify that a tar file is given either in + // the input flag or through stdin and if not display an error message and exit. + if opts.input == "" && dockerCli.In().IsTerminal() { + return fmt.Errorf("requested load from stdin, but stdin is empty") + } + + if !dockerCli.Out().IsTerminal() { + opts.quiet = true + } + response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) + } + + _, err = io.Copy(dockerCli.Out(), response.Body) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/image/prune.go b/vendor/github.com/docker/docker/cli/command/image/prune.go new file mode 100644 index 0000000..82c28fc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/prune.go @@ -0,0 +1,92 @@ +package image + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused images", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images, not just dangling ones") + + return cmd +} + +const ( + allImageWarning = `WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue?` + danglingWarning = `WARNING! This will remove all dangling images. +Are you sure you want to continue?` +) + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := filters.NewArgs() + pruneFilters.Add("dangling", fmt.Sprintf("%v", !opts.all)) + + warning := danglingWarning + if opts.all { + warning = allImageWarning + } + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) + if err != nil { + return + } + + if len(report.ImagesDeleted) > 0 { + output = "Deleted Images:\n" + for _, st := range report.ImagesDeleted { + if st.Untagged != "" { + output += fmt.Sprintln("untagged:", st.Untagged) + } else { + output += fmt.Sprintln("deleted:", st.Deleted) + } + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Image Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all}) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/pull.go b/vendor/github.com/docker/docker/cli/command/image/pull.go new file mode 100644 index 0000000..24933fe --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/pull.go @@ -0,0 +1,84 @@ +package image + +import ( + "errors" + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type pullOptions struct { + remote string + all bool +} + +// NewPullCommand creates a new `docker pull` command +func NewPullCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pullOptions + + cmd := &cobra.Command{ + Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", + Short: "Pull an image or a repository from a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runPull(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPull(dockerCli *command.DockerCli, opts pullOptions) error { + distributionRef, err := reference.ParseNamed(opts.remote) + if err != nil { + return err + } + if opts.all && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !opts.all && reference.IsNameOnly(distributionRef) { + distributionRef = reference.WithDefaultTag(distributionRef) + fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", reference.DefaultTag) + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull") + + // Check if reference has a digest + _, isCanonical := distributionRef.(reference.Canonical) + if command.IsTrusted() && !isCanonical { + err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege) + } else { + err = imagePullPrivileged(ctx, dockerCli, authConfig, distributionRef.String(), requestPrivilege, opts.all) + } + if err != nil { + if strings.Contains(err.Error(), "target is plugin") { + return errors.New(err.Error() + " - Use `docker plugin install`") + } + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/push.go b/vendor/github.com/docker/docker/cli/command/image/push.go new file mode 100644 index 0000000..a8ce494 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/push.go @@ -0,0 +1,61 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewPushCommand creates a new `docker push` command +func NewPushCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] NAME[:TAG]", + Short: "Push an image or a repository to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPush(dockerCli *command.DockerCli, remote string) error { + ref, err := reference.ParseNamed(remote) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") + + if command.IsTrusted() { + return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) + } + + responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref.String(), requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/remove.go b/vendor/github.com/docker/docker/cli/command/image/remove.go new file mode 100644 index 0000000..c79ceba --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/remove.go @@ -0,0 +1,77 @@ +package image + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + noPrune bool +} + +// NewRemoveCommand creates a new `docker remove` command +func NewRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rmi [OPTIONS] IMAGE [IMAGE...]", + Short: "Remove one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") + flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") + + return cmd +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewRemoveCommand(dockerCli) + cmd.Aliases = []string{"rmi", "remove"} + cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" + return &cmd +} + +func runRemove(dockerCli *command.DockerCli, opts removeOptions, images []string) error { + client := dockerCli.Client() + ctx := context.Background() + + options := types.ImageRemoveOptions{ + Force: opts.force, + PruneChildren: !opts.noPrune, + } + + var errs []string + for _, image := range images { + dels, err := client.ImageRemove(ctx, image, options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) + } + } + } + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/save.go b/vendor/github.com/docker/docker/cli/command/image/save.go new file mode 100644 index 0000000..bbe82d2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/save.go @@ -0,0 +1,57 @@ +package image + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type saveOptions struct { + images []string + output string +} + +// NewSaveCommand creates a new `docker save` command +func NewSaveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts saveOptions + + cmd := &cobra.Command{ + Use: "save [OPTIONS] IMAGE [IMAGE...]", + Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.images = args + return runSave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runSave(dockerCli *command.DockerCli, opts saveOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/tag.go b/vendor/github.com/docker/docker/cli/command/image/tag.go new file mode 100644 index 0000000..fb2b703 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/tag.go @@ -0,0 +1,41 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type tagOptions struct { + image string + name string +} + +// NewTagCommand creates a new `docker tag` command +func NewTagCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts tagOptions + + cmd := &cobra.Command{ + Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", + Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + opts.name = args[1] + return runTag(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTag(dockerCli *command.DockerCli, opts tagOptions) error { + ctx := context.Background() + + return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/trust.go b/vendor/github.com/docker/docker/cli/command/image/trust.go new file mode 100644 index 0000000..5136a22 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/trust.go @@ -0,0 +1,381 @@ +package image + +import ( + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "path" + "sort" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/distribution" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/notary/client" + "github.com/docker/notary/tuf/data" +) + +type target struct { + name string + digest digest.Digest + size int64 +} + +// trustedPush handles content trust pushing of an image +func trustedPush(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) +} + +// PushTrustedReference pushes a canonical reference to the trust server. +func PushTrustedReference(cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { + // If it is a trusted push we would like to find the target entry which match the + // tag provided in the function and then do an AddTarget later. + target := &client.Target{} + // Count the times of calling for handleTarget, + // if it is called more that once, that should be considered an error in a trusted push. + cnt := 0 + handleTarget := func(aux *json.RawMessage) { + cnt++ + if cnt > 1 { + // handleTarget should only be called one. This will be treated as an error. + return + } + + var pushResult distribution.PushResult + err := json.Unmarshal(*aux, &pushResult) + if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { + h, err := hex.DecodeString(pushResult.Digest.Hex()) + if err != nil { + target = nil + return + } + target.Name = pushResult.Tag + target.Hashes = data.Hashes{string(pushResult.Digest.Algorithm()): h} + target.Length = int64(pushResult.Size) + } + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + default: + // We want trust signatures to always take an explicit tag, + // otherwise it will act as an untrusted push. + if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), nil); err != nil { + return err + } + fmt.Fprintln(cli.Out(), "No tag specified, skipping trust metadata push") + return nil + } + + if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), handleTarget); err != nil { + return err + } + + if cnt > 1 { + return fmt.Errorf("internal error: only one call to handleTarget expected") + } + + if target == nil { + fmt.Fprintln(cli.Out(), "No targets found, please provide a specific tag in order to sign it") + return nil + } + + fmt.Fprintln(cli.Out(), "Signing and pushing trust metadata") + + repo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "push", "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to notary repository: %s\n", err) + return err + } + + // get the latest repository metadata so we can figure out which roles to sign + err = repo.Update(false) + + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + // Initialize the notary repository with a remotely managed snapshot key + if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + fmt.Fprintf(cli.Out(), "Finished initializing %q\n", repoInfo.FullName()) + err = repo.AddTarget(target, data.CanonicalTargetsRole) + case nil: + // already initialized and we have successfully downloaded the latest metadata + err = addTargetToAllSignableRoles(repo, target) + default: + return trust.NotaryError(repoInfo.FullName(), err) + } + + if err == nil { + err = repo.Publish() + } + + if err != nil { + fmt.Fprintf(cli.Out(), "Failed to sign %q:%s - %s\n", repoInfo.FullName(), tag, err.Error()) + return trust.NotaryError(repoInfo.FullName(), err) + } + + fmt.Fprintf(cli.Out(), "Successfully signed %q:%s\n", repoInfo.FullName(), tag) + return nil +} + +// Attempt to add the image target to all the top level delegation roles we can +// (based on whether we have the signing key and whether the role's path allows +// us to). +// If there are no delegation roles, we add to the targets role. +func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { + var signableRoles []string + + // translate the full key names, which includes the GUN, into just the key IDs + allCanonicalKeyIDs := make(map[string]struct{}) + for fullKeyID := range repo.CryptoService.ListAllKeys() { + allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} + } + + allDelegationRoles, err := repo.GetDelegationRoles() + if err != nil { + return err + } + + // if there are no delegation roles, then just try to sign it into the targets role + if len(allDelegationRoles) == 0 { + return repo.AddTarget(target, data.CanonicalTargetsRole) + } + + // there are delegation roles, find every delegation role we have a key for, and + // attempt to sign into into all those roles. + for _, delegationRole := range allDelegationRoles { + // We do not support signing any delegation role that isn't a direct child of the targets role. + // Also don't bother checking the keys if we can't add the target + // to this role due to path restrictions + if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { + continue + } + + for _, canonicalKeyID := range delegationRole.KeyIDs { + if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { + signableRoles = append(signableRoles, delegationRole.Name) + break + } + } + } + + if len(signableRoles) == 0 { + return fmt.Errorf("no valid signing keys for delegation roles") + } + + return repo.AddTarget(target, signableRoles...) +} + +// imagePushPrivileged push the image +func imagePushPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + } + + return cli.Client().ImagePush(ctx, ref, options) +} + +// trustedPull handles content trust pulling of an image +func trustedPull(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + var refs []target + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return err + } + + if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { + // List all targets + targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.Out(), "Skipping target for %q\n", repoInfo.Name()) + continue + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + continue + } + refs = append(refs, t) + } + if len(refs) == 0 { + return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trusted tags for %s", repoInfo.FullName())) + } + } else { + t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", tagged.Tag())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + r, err := convertTarget(t.Target) + if err != nil { + return err + + } + refs = append(refs, r) + } + + for i, r := range refs { + displayTag := r.name + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) + + ref, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) + if err != nil { + return err + } + if err := imagePullPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege, false); err != nil { + return err + } + + tagged, err := reference.WithTag(repoInfo, r.name) + if err != nil { + return err + } + trustedRef, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) + if err != nil { + return err + } + if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { + return err + } + } + return nil +} + +// imagePullPrivileged pulls the image and displays it to the output +func imagePullPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + All: all, + } + + responseBody, err := cli.Client().ImagePull(ctx, ref, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) +} + +// TrustedReference returns the canonical trusted reference for an image reference +func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { + var ( + repoInfo *registry.RepositoryInfo + err error + ) + if rs != nil { + repoInfo, err = rs.ResolveRepository(ref) + } else { + repoInfo, err = registry.ParseRepositoryInfo(ref) + } + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.FullName(), err) + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.Tag())) + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + + return reference.WithDigest(reference.TrimNamed(ref), r.digest) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + name: t.Name, + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +// TagTrusted tags a trusted ref +func TagTrusted(ctx context.Context, cli *command.DockerCli, trustedRef reference.Canonical, ref reference.NamedTagged) error { + fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedRef.String(), ref.String()) + + return cli.Client().ImageTag(ctx, trustedRef.String(), ref.String()) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/trust_test.go b/vendor/github.com/docker/docker/cli/command/image/trust_test.go new file mode 100644 index 0000000..7814646 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/trust_test.go @@ -0,0 +1,57 @@ +package image + +import ( + "os" + "testing" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/registry" +) + +func unsetENV() { + os.Unsetenv("DOCKER_CONTENT_TRUST") + os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") +} + +func TestENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + output, err := trust.Server(indexInfo) + expectedStr := "https://notary-test.com:5000" + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestHTTPENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + _, err := trust.Server(indexInfo) + if err == nil { + t.Fatal("Expected error with invalid scheme") + } +} + +func TestOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} + output, err := trust.Server(indexInfo) + if err != nil || output != registry.NotaryServer { + t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) + } +} + +func TestNonOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} + output, err := trust.Server(indexInfo) + expectedStr := "https://" + indexInfo.Name + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/in.go b/vendor/github.com/docker/docker/cli/command/in.go new file mode 100644 index 0000000..7204b7a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/in.go @@ -0,0 +1,75 @@ +package command + +import ( + "errors" + "io" + "os" + "runtime" + + "github.com/docker/docker/pkg/term" +) + +// InStream is an input stream used by the DockerCli to read user input +type InStream struct { + in io.ReadCloser + fd uintptr + isTerminal bool + state *term.State +} + +func (i *InStream) Read(p []byte) (int, error) { + return i.in.Read(p) +} + +// Close implements the Closer interface +func (i *InStream) Close() error { + return i.in.Close() +} + +// FD returns the file descriptor number for this stream +func (i *InStream) FD() uintptr { + return i.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (i *InStream) IsTerminal() bool { + return i.isTerminal +} + +// SetRawTerminal sets raw mode on the input terminal +func (i *InStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !i.isTerminal { + return nil + } + i.state, err = term.SetRawTerminal(i.fd) + return err +} + +// RestoreTerminal restores normal mode to the terminal +func (i *InStream) RestoreTerminal() { + if i.state != nil { + term.RestoreTerminal(i.fd, i.state) + } +} + +// CheckTty checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !i.isTerminal { + eText := "the input device is not a TTY" + if runtime.GOOS == "windows" { + return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") + } + return errors.New(eText) + } + return nil +} + +// NewInStream returns a new InStream object from a ReadCloser +func NewInStream(in io.ReadCloser) *InStream { + fd, isTerminal := term.GetFdInfo(in) + return &InStream{in: in, fd: fd, isTerminal: isTerminal} +} diff --git a/vendor/github.com/docker/docker/cli/command/inspect/inspector.go b/vendor/github.com/docker/docker/cli/command/inspect/inspector.go new file mode 100644 index 0000000..1d81643 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/inspect/inspector.go @@ -0,0 +1,195 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/template" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/utils/templates" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// NewTemplateInspectorFromString creates a new TemplateInspector from a string +// which is compiled into a template. +func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { + if tmplStr == "" { + return NewIndentedInspector(out), nil + } + + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, fmt.Errorf("Template parsing error: %s", err) + } + return NewTemplateInspector(out, tmpl), nil +} + +// GetRefFunc is a function which used by Inspect to fetch an object from a +// reference +type GetRefFunc func(ref string) (interface{}, []byte, error) + +// Inspect fetches objects by reference using GetRefFunc and writes the json +// representation to the output writer. +func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { + inspector, err := NewTemplateInspectorFromString(out, tmplStr) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErr error + for _, ref := range references { + element, raw, err := getRef(ref) + if err != nil { + inspectErr = err + break + } + + if err := inspector.Inspect(element, raw); err != nil { + inspectErr = err + break + } + } + + if err := inspector.Flush(); err != nil { + logrus.Errorf("%s\n", err) + } + + if inspectErr != nil { + return cli.StatusError{StatusCode: 1, Status: inspectErr.Error()} + } + return nil +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return fmt.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// tryRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go b/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go new file mode 100644 index 0000000..1ce1593 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go @@ -0,0 +1,221 @@ +package inspect + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/docker/utils/templates" +) + +type testElement struct { + DNS string `json:"Dns"` +} + +func TestTemplateInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "\n" { + t.Fatalf("Expected `\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorTemplateError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Foo}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + err = i.Inspect(testElement{"0.0.0.0"}, nil) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorRawFallback(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorRawFallbackError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n1.1.1.1\n" { + t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) + } +} + +func TestIndentedInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + }, + { + "Dns": "1.1.1.1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := "[]\n" + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorRawElements(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0", + "Node": "0" + }, + { + "Dns": "1.1.1.1", + "Node": "1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/network/cmd.go b/vendor/github.com/docker/docker/cli/command/network/cmd.go new file mode 100644 index 0000000..ab8393c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/cmd.go @@ -0,0 +1,28 @@ +package network + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewNetworkCommand returns a cobra command for `network` subcommands +func NewNetworkCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "network", + Short: "Manage networks", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newConnectCommand(dockerCli), + newCreateCommand(dockerCli), + newDisconnectCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/network/connect.go b/vendor/github.com/docker/docker/cli/command/network/connect.go new file mode 100644 index 0000000..c4b676e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/connect.go @@ -0,0 +1,64 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type connectOptions struct { + network string + container string + ipaddress string + ipv6address string + links opts.ListOpts + aliases []string + linklocalips []string +} + +func newConnectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := connectOptions{ + links: opts.NewListOpts(runconfigopts.ValidateLink), + } + + cmd := &cobra.Command{ + Use: "connect [OPTIONS] NETWORK CONTAINER", + Short: "Connect a container to a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runConnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.ipaddress, "ip", "", "IP Address") + flags.StringVar(&opts.ipv6address, "ip6", "", "IPv6 Address") + flags.Var(&opts.links, "link", "Add link to another container") + flags.StringSliceVar(&opts.aliases, "alias", []string{}, "Add network-scoped alias for the container") + flags.StringSliceVar(&opts.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") + + return cmd +} + +func runConnect(dockerCli *command.DockerCli, opts connectOptions) error { + client := dockerCli.Client() + + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: opts.ipaddress, + IPv6Address: opts.ipv6address, + LinkLocalIPs: opts.linklocalips, + }, + Links: opts.links.GetAll(), + Aliases: opts.aliases, + } + + return client.NetworkConnect(context.Background(), opts.network, opts.container, epConfig) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/create.go b/vendor/github.com/docker/docker/cli/command/network/create.go new file mode 100644 index 0000000..abc494e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/create.go @@ -0,0 +1,226 @@ +package network + +import ( + "fmt" + "net" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts + internal bool + ipv6 bool + attachable bool + + ipamDriver string + ipamSubnet []string + ipamIPRange []string + ipamGateway []string + ipamAux opts.MapOpts + ipamOpt opts.MapOpts +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + ipamAux: *opts.NewMapOpts(nil, nil), + ipamOpt: *opts.NewMapOpts(nil, nil), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] NETWORK", + Short: "Create a network", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.driver, "driver", "d", "bridge", "Driver to manage the Network") + flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&opts.labels, "label", "Set metadata on a network") + flags.BoolVar(&opts.internal, "internal", false, "Restrict external access to the network") + flags.BoolVar(&opts.ipv6, "ipv6", false, "Enable IPv6 networking") + flags.BoolVar(&opts.attachable, "attachable", false, "Enable manual container attachment") + + flags.StringVar(&opts.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") + flags.StringSliceVar(&opts.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") + flags.StringSliceVar(&opts.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") + flags.StringSliceVar(&opts.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") + + flags.Var(&opts.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") + flags.Var(&opts.ipamOpt, "ipam-opt", "Set IPAM driver specific options") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Driver: opts.driver, + Options: opts.driverOpts.GetAll(), + IPAM: &network.IPAM{ + Driver: opts.ipamDriver, + Config: ipamCfg, + Options: opts.ipamOpt.GetAll(), + }, + CheckDuplicate: true, + Internal: opts.internal, + EnableIPv6: opts.ipv6, + Attachable: opts.attachable, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + } + + resp, err := client.NetworkCreate(context.Background(), opts.name, nc) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) + return nil +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} diff --git a/vendor/github.com/docker/docker/cli/command/network/disconnect.go b/vendor/github.com/docker/docker/cli/command/network/disconnect.go new file mode 100644 index 0000000..c9d9c14 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/disconnect.go @@ -0,0 +1,41 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type disconnectOptions struct { + network string + container string + force bool +} + +func newDisconnectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := disconnectOptions{} + + cmd := &cobra.Command{ + Use: "disconnect [OPTIONS] NETWORK CONTAINER", + Short: "Disconnect a container from a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runDisconnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") + + return cmd +} + +func runDisconnect(dockerCli *command.DockerCli, opts disconnectOptions) error { + client := dockerCli.Client() + + return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/inspect.go b/vendor/github.com/docker/docker/cli/command/network/inspect.go new file mode 100644 index 0000000..1a86855 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/inspect.go @@ -0,0 +1,45 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NETWORK [NETWORK...]", + Short: "Display detailed information on one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getNetFunc := func(name string) (interface{}, []byte, error) { + return client.NetworkInspectWithRaw(ctx, name) + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/list.go b/vendor/github.com/docker/docker/cli/command/network/list.go new file mode 100644 index 0000000..1a5d285 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/list.go @@ -0,0 +1,76 @@ +package network + +import ( + "sort" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display network IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output") + flags.StringVar(&opts.format, "format", "", "Pretty-print networks using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + options := types.NetworkListOptions{Filters: opts.filter.Value()} + networkResources, err := client.NetworkList(context.Background(), options) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().NetworksFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byNetworkName(networkResources)) + + networksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewNetworkFormat(format, opts.quiet), + Trunc: !opts.noTrunc, + } + return formatter.NetworkWrite(networksCtx, networkResources) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/prune.go b/vendor/github.com/docker/docker/cli/command/network/prune.go new file mode 100644 index 0000000..9f1979e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/prune.go @@ -0,0 +1,73 @@ +package network + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for networks +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().NetworksPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.NetworksDeleted) > 0 { + output = "Deleted Networks:\n" + for _, id := range report.NetworksDeleted { + output += id + "\n" + } + } + + return +} + +// RunPrune calls the Network Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + output, err := runPrune(dockerCli, pruneOptions{force: true}) + return 0, output, err +} diff --git a/vendor/github.com/docker/docker/cli/command/network/remove.go b/vendor/github.com/docker/docker/cli/command/network/remove.go new file mode 100644 index 0000000..2034b87 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/remove.go @@ -0,0 +1,43 @@ +package network + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "rm NETWORK [NETWORK...]", + Aliases: []string{"remove"}, + Short: "Remove one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } +} + +func runRemove(dockerCli *command.DockerCli, networks []string) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range networks { + if err := client.NetworkRemove(ctx, name); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/cmd.go b/vendor/github.com/docker/docker/cli/command/node/cmd.go new file mode 100644 index 0000000..e71b919 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/cmd.go @@ -0,0 +1,43 @@ +package node + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// NewNodeCommand returns a cobra command for `node` subcommands +func NewNodeCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "Manage Swarm nodes", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newDemoteCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newPromoteCommand(dockerCli), + newRemoveCommand(dockerCli), + newPsCommand(dockerCli), + newUpdateCommand(dockerCli), + ) + return cmd +} + +// Reference returns the reference of a node. The special value "self" for a node +// reference is mapped to the current node, hence the node ID is retrieved using +// the `/info` endpoint. +func Reference(ctx context.Context, client apiclient.APIClient, ref string) (string, error) { + if ref == "self" { + info, err := client.Info(ctx) + if err != nil { + return "", err + } + return info.Swarm.NodeID, nil + } + return ref, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/demote.go b/vendor/github.com/docker/docker/cli/command/node/demote.go new file mode 100644 index 0000000..33f86c6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/demote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newDemoteCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "demote NODE [NODE...]", + Short: "Demote one or more nodes from manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDemote(dockerCli, args) + }, + } +} + +func runDemote(dockerCli *command.DockerCli, nodes []string) error { + demote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleWorker { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleWorker + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, demote, success) +} diff --git a/vendor/github.com/docker/docker/cli/command/node/inspect.go b/vendor/github.com/docker/docker/cli/command/node/inspect.go new file mode 100644 index 0000000..fde7018 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/inspect.go @@ -0,0 +1,144 @@ +package node + +import ( + "fmt" + "io" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + nodeIds []string + format string + pretty bool +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] self|NODE [NODE...]", + Short: "Display detailed information on one or more nodes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIds = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + nodeRef, err := Reference(ctx, client, ref) + if err != nil { + return nil, nil, err + } + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + return node, nil, err + } + + if !opts.pretty { + return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef) + } + return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef) +} + +func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { + for idx, ref := range refs { + obj, _, err := getRef(ref) + if err != nil { + return err + } + printNode(out, obj.(swarm.Node)) + + // TODO: better way to do this? + // print extra space between objects, but not after the last one + if idx+1 != len(refs) { + fmt.Fprintf(out, "\n\n") + } else { + fmt.Fprintf(out, "\n") + } + } + return nil +} + +// TODO: use a template +func printNode(out io.Writer, node swarm.Node) { + fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID) + ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name) + if node.Spec.Labels != nil { + fmt.Fprintln(out, "Labels:") + for k, v := range node.Spec.Labels { + fmt.Fprintf(out, " - %s = %s\n", k, v) + } + } + + ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname) + fmt.Fprintf(out, "Joined at:\t\t%s\n", command.PrettyPrint(node.CreatedAt)) + fmt.Fprintln(out, "Status:") + fmt.Fprintf(out, " State:\t\t\t%s\n", command.PrettyPrint(node.Status.State)) + ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", command.PrettyPrint(node.Status.Message)) + fmt.Fprintf(out, " Availability:\t\t%s\n", command.PrettyPrint(node.Spec.Availability)) + ioutils.FprintfIfNotEmpty(out, " Address:\t\t%s\n", command.PrettyPrint(node.Status.Addr)) + + if node.ManagerStatus != nil { + fmt.Fprintln(out, "Manager Status:") + fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr) + fmt.Fprintf(out, " Raft Status:\t\t%s\n", command.PrettyPrint(node.ManagerStatus.Reachability)) + leader := "No" + if node.ManagerStatus.Leader { + leader = "Yes" + } + fmt.Fprintf(out, " Leader:\t\t%s\n", leader) + } + + fmt.Fprintln(out, "Platform:") + fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS) + fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture) + + fmt.Fprintln(out, "Resources:") + fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9) + fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes))) + + var pluginTypes []string + pluginNamesByType := map[string][]string{} + for _, p := range node.Description.Engine.Plugins { + // append to pluginTypes only if not done previously + if _, ok := pluginNamesByType[p.Type]; !ok { + pluginTypes = append(pluginTypes, p.Type) + } + pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name) + } + + if len(pluginTypes) > 0 { + fmt.Fprintln(out, "Plugins:") + sort.Strings(pluginTypes) // ensure stable output + for _, pluginType := range pluginTypes { + fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", ")) + } + } + fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion) + + if len(node.Description.Engine.Labels) != 0 { + fmt.Fprintln(out, "Engine Labels:") + for k, v := range node.Description.Engine.Labels { + fmt.Fprintf(out, " - %s = %s\n", k, v) + } + } +} diff --git a/vendor/github.com/docker/docker/cli/command/node/list.go b/vendor/github.com/docker/docker/cli/command/node/list.go new file mode 100644 index 0000000..9cacdcf --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/list.go @@ -0,0 +1,115 @@ +package node + +import ( + "fmt" + "io" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +const ( + listItemFmt = "%s\t%s\t%s\t%s\t%s\n" +) + +type listOptions struct { + quiet bool + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List nodes in the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + out := dockerCli.Out() + ctx := context.Background() + + nodes, err := client.NodeList( + ctx, + types.NodeListOptions{Filters: opts.filter.Value()}) + if err != nil { + return err + } + + if len(nodes) > 0 && !opts.quiet { + // only non-empty nodes and not quiet, should we call /info api + info, err := client.Info(ctx) + if err != nil { + return err + } + printTable(out, nodes, info) + } else if !opts.quiet { + // no nodes and not quiet, print only one line with columns ID, HOSTNAME, ... + printTable(out, nodes, types.Info{}) + } else { + printQuiet(out, nodes) + } + + return nil +} + +func printTable(out io.Writer, nodes []swarm.Node, info types.Info) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "STATUS", "AVAILABILITY", "MANAGER STATUS") + for _, node := range nodes { + name := node.Description.Hostname + availability := string(node.Spec.Availability) + + reachability := "" + if node.ManagerStatus != nil { + if node.ManagerStatus.Leader { + reachability = "Leader" + } else { + reachability = string(node.ManagerStatus.Reachability) + } + } + + ID := node.ID + if node.ID == info.Swarm.NodeID { + ID = ID + " *" + } + + fmt.Fprintf( + writer, + listItemFmt, + ID, + name, + command.PrettyPrint(string(node.Status.State)), + command.PrettyPrint(availability), + command.PrettyPrint(reachability)) + } +} + +func printQuiet(out io.Writer, nodes []swarm.Node) { + for _, node := range nodes { + fmt.Fprintln(out, node.ID) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/node/opts.go b/vendor/github.com/docker/docker/cli/command/node/opts.go new file mode 100644 index 0000000..7e6c55d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/opts.go @@ -0,0 +1,60 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" +) + +type nodeOptions struct { + annotations + role string + availability string +} + +type annotations struct { + name string + labels opts.ListOpts +} + +func newNodeOptions() *nodeOptions { + return &nodeOptions{ + annotations: annotations{ + labels: opts.NewListOpts(nil), + }, + } +} + +func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) { + var spec swarm.NodeSpec + + spec.Annotations.Name = opts.annotations.name + spec.Annotations.Labels = runconfigopts.ConvertKVStringsToMap(opts.annotations.labels.GetAll()) + + switch swarm.NodeRole(strings.ToLower(opts.role)) { + case swarm.NodeRoleWorker: + spec.Role = swarm.NodeRoleWorker + case swarm.NodeRoleManager: + spec.Role = swarm.NodeRoleManager + case "": + default: + return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role) + } + + switch swarm.NodeAvailability(strings.ToLower(opts.availability)) { + case swarm.NodeAvailabilityActive: + spec.Availability = swarm.NodeAvailabilityActive + case swarm.NodeAvailabilityPause: + spec.Availability = swarm.NodeAvailabilityPause + case swarm.NodeAvailabilityDrain: + spec.Availability = swarm.NodeAvailabilityDrain + case "": + default: + return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) + } + + return spec, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/promote.go b/vendor/github.com/docker/docker/cli/command/node/promote.go new file mode 100644 index 0000000..f47d783 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/promote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newPromoteCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "promote NODE [NODE...]", + Short: "Promote one or more nodes to manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPromote(dockerCli, args) + }, + } +} + +func runPromote(dockerCli *command.DockerCli, nodes []string) error { + promote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleManager { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a manager.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleManager + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, promote, success) +} diff --git a/vendor/github.com/docker/docker/cli/command/node/ps.go b/vendor/github.com/docker/docker/cli/command/node/ps.go new file mode 100644 index 0000000..a034721 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/ps.go @@ -0,0 +1,93 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type psOptions struct { + nodeIDs []string + noResolve bool + noTrunc bool + filter opts.FilterOpt +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] [NODE...]", + Short: "List tasks running on one or more nodes, defaults to current node", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIDs = []string{"self"} + + if len(args) != 0 { + opts.nodeIDs = args + } + + return runPs(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPs(dockerCli *command.DockerCli, opts psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var ( + errs []string + tasks []swarm.Task + ) + + for _, nodeID := range opts.nodeIDs { + nodeRef, err := Reference(ctx, client, nodeID) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + filter := opts.filter.Value() + filter.Add("node", node.ID) + + nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + tasks = append(tasks, nodeTasks...) + } + + if err := task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc); err != nil { + errs = append(errs, err.Error()) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/remove.go b/vendor/github.com/docker/docker/cli/command/node/remove.go new file mode 100644 index 0000000..19b4a96 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/remove.go @@ -0,0 +1,56 @@ +package node + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := removeOptions{} + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] NODE [NODE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more nodes from the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force remove a node from the swarm") + return cmd +} + +func runRemove(dockerCli *command.DockerCli, args []string, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, nodeID := range args { + err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/update.go b/vendor/github.com/docker/docker/cli/command/node/update.go new file mode 100644 index 0000000..65339e1 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/update.go @@ -0,0 +1,121 @@ +package node + +import ( + "errors" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +var ( + errNoRoleChange = errors.New("role was already set to the requested value") +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + nodeOpts := newNodeOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] NODE", + Short: "Update a node", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)") + flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)") + flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") + labelKeys := opts.NewListOpts(nil) + flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, nodeID string) error { + success := func(_ string) { + fmt.Fprintln(dockerCli.Out(), nodeID) + } + return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) +} + +func updateNodes(dockerCli *command.DockerCli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { + client := dockerCli.Client() + ctx := context.Background() + + for _, nodeID := range nodes { + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + err = mergeNode(&node) + if err != nil { + if err == errNoRoleChange { + continue + } + return err + } + err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) + if err != nil { + return err + } + success(nodeID) + } + return nil +} + +func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { + return func(node *swarm.Node) error { + spec := &node.Spec + + if flags.Changed(flagRole) { + str, err := flags.GetString(flagRole) + if err != nil { + return err + } + spec.Role = swarm.NodeRole(str) + } + if flags.Changed(flagAvailability) { + str, err := flags.GetString(flagAvailability) + if err != nil { + return err + } + spec.Availability = swarm.NodeAvailability(str) + } + if spec.Annotations.Labels == nil { + spec.Annotations.Labels = make(map[string]string) + } + if flags.Changed(flagLabelAdd) { + labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for k, v := range runconfigopts.ConvertKVStringsToMap(labels) { + spec.Annotations.Labels[k] = v + } + } + if flags.Changed(flagLabelRemove) { + keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, k := range keys { + // if a key doesn't exist, fail the command explicitly + if _, exists := spec.Annotations.Labels[k]; !exists { + return fmt.Errorf("key %s doesn't exist in node's labels", k) + } + delete(spec.Annotations.Labels, k) + } + } + return nil + } +} + +const ( + flagRole = "role" + flagAvailability = "availability" + flagLabelAdd = "label-add" + flagLabelRemove = "label-rm" +) diff --git a/vendor/github.com/docker/docker/cli/command/out.go b/vendor/github.com/docker/docker/cli/command/out.go new file mode 100644 index 0000000..85718d7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/out.go @@ -0,0 +1,69 @@ +package command + +import ( + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/term" +) + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +type OutStream struct { + out io.Writer + fd uintptr + isTerminal bool + state *term.State +} + +func (o *OutStream) Write(p []byte) (int, error) { + return o.out.Write(p) +} + +// FD returns the file descriptor number for this stream +func (o *OutStream) FD() uintptr { + return o.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (o *OutStream) IsTerminal() bool { + return o.isTerminal +} + +// SetRawTerminal sets raw mode on the output terminal +func (o *OutStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !o.isTerminal { + return nil + } + o.state, err = term.SetRawTerminalOutput(o.fd) + return err +} + +// RestoreTerminal restores normal mode to the terminal +func (o *OutStream) RestoreTerminal() { + if o.state != nil { + term.RestoreTerminal(o.fd, o.state) + } +} + +// GetTtySize returns the height and width in characters of the tty +func (o *OutStream) GetTtySize() (uint, uint) { + if !o.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(o.fd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return uint(ws.Height), uint(ws.Width) +} + +// NewOutStream returns a new OutStream object from a Writer +func NewOutStream(out io.Writer) *OutStream { + fd, isTerminal := term.GetFdInfo(out) + return &OutStream{out: out, fd: fd, isTerminal: isTerminal} +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/cmd.go b/vendor/github.com/docker/docker/cli/command/plugin/cmd.go new file mode 100644 index 0000000..92c990a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/cmd.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewPluginCommand returns a cobra command for `plugin` subcommands +func NewPluginCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "plugin", + Short: "Manage plugins", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + + cmd.AddCommand( + newDisableCommand(dockerCli), + newEnableCommand(dockerCli), + newInspectCommand(dockerCli), + newInstallCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newSetCommand(dockerCli), + newPushCommand(dockerCli), + newCreateCommand(dockerCli), + newUpgradeCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/create.go b/vendor/github.com/docker/docker/cli/command/plugin/create.go new file mode 100644 index 0000000..2aab1e9 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/create.go @@ -0,0 +1,125 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/reference" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// validateTag checks if the given repoName can be resolved. +func validateTag(rawRepo string) error { + _, err := reference.ParseNamed(rawRepo) + + return err +} + +// validateConfig ensures that a valid config.json is available in the given path +func validateConfig(path string) error { + dt, err := os.Open(filepath.Join(path, "config.json")) + if err != nil { + return err + } + + m := types.PluginConfig{} + err = json.NewDecoder(dt).Decode(&m) + dt.Close() + + return err +} + +// validateContextDir validates the given dir and returns abs path on success. +func validateContextDir(contextDir string) (string, error) { + absContextDir, err := filepath.Abs(contextDir) + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", err + } + + if !stat.IsDir() { + return "", fmt.Errorf("context must be a directory") + } + + return absContextDir, nil +} + +type pluginCreateOptions struct { + repoName string + context string + compress bool +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + options := pluginCreateOptions{} + + cmd := &cobra.Command{ + Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", + Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.repoName = args[0] + options.context = args[1] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.compress, "compress", false, "Compress the context using gzip") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, options pluginCreateOptions) error { + var ( + createCtx io.ReadCloser + err error + ) + + if err := validateTag(options.repoName); err != nil { + return err + } + + absContextDir, err := validateContextDir(options.context) + if err != nil { + return err + } + + if err := validateConfig(options.context); err != nil { + return err + } + + compression := archive.Uncompressed + if options.compress { + logrus.Debugf("compression enabled") + compression = archive.Gzip + } + + createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{ + Compression: compression, + }) + + if err != nil { + return err + } + + ctx := context.Background() + + createOptions := types.PluginCreateOptions{RepoName: options.repoName} + if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), options.repoName) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/disable.go b/vendor/github.com/docker/docker/cli/command/plugin/disable.go new file mode 100644 index 0000000..07b0ec2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/disable.go @@ -0,0 +1,36 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command { + var force bool + + cmd := &cobra.Command{ + Use: "disable [OPTIONS] PLUGIN", + Short: "Disable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDisable(dockerCli, args[0], force) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin") + return cmd +} + +func runDisable(dockerCli *command.DockerCli, name string, force bool) error { + if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/enable.go b/vendor/github.com/docker/docker/cli/command/plugin/enable.go new file mode 100644 index 0000000..77762f4 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/enable.go @@ -0,0 +1,47 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type enableOpts struct { + timeout int + name string +} + +func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts enableOpts + + cmd := &cobra.Command{ + Use: "enable [OPTIONS] PLUGIN", + Short: "Enable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runEnable(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVar(&opts.timeout, "timeout", 0, "HTTP client timeout (in seconds)") + return cmd +} + +func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error { + name := opts.name + if opts.timeout < 0 { + return fmt.Errorf("negative timeout %d is invalid", opts.timeout) + } + + if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/inspect.go b/vendor/github.com/docker/docker/cli/command/plugin/inspect.go new file mode 100644 index 0000000..c2c7a0d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/inspect.go @@ -0,0 +1,42 @@ +package plugin + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + pluginNames []string + format string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Display detailed information on one or more plugins", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.pluginNames = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + return client.PluginInspectWithRaw(ctx, ref) + } + + return inspect.Inspect(dockerCli.Out(), opts.pluginNames, opts.format, getRef) +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/install.go b/vendor/github.com/docker/docker/cli/command/plugin/install.go new file mode 100644 index 0000000..2c3170c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/install.go @@ -0,0 +1,208 @@ +package plugin + +import ( + "bufio" + "errors" + "fmt" + "strings" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +type pluginOptions struct { + remote string + localName string + grantPerms bool + disable bool + args []string + skipRemoteCheck bool +} + +func loadPullFlags(opts *pluginOptions, flags *pflag.FlagSet) { + flags.BoolVar(&opts.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") + command.AddTrustedFlags(flags, true) +} + +func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "install [OPTIONS] PLUGIN [KEY=VALUE...]", + Short: "Install a plugin", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.remote = args[0] + if len(args) > 1 { + options.args = args[1:] + } + return runInstall(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(&options, flags) + flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") + flags.StringVar(&options.localName, "alias", "", "Local name for plugin") + return cmd +} + +func getRepoIndexFromUnnormalizedRef(ref distreference.Named) (*registrytypes.IndexInfo, error) { + named, err := reference.ParseNamed(ref.Name()) + if err != nil { + return nil, err + } + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return nil, err + } + + return repoInfo.Index, nil +} + +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +func newRegistryService() registry.Service { + return pluginRegistryService{ + Service: registry.NewService(registry.ServiceOptions{V2Only: true}), + } +} + +func buildPullConfig(ctx context.Context, dockerCli *command.DockerCli, opts pluginOptions, cmdName string) (types.PluginInstallOptions, error) { + // Parse name using distribution reference package to support name + // containing both tag and digest. Names with both tag and digest + // will be treated by the daemon as a pull by digest with + // an alias for the tag (if no alias is provided). + ref, err := distreference.ParseNamed(opts.remote) + if err != nil { + return types.PluginInstallOptions{}, err + } + + index, err := getRepoIndexFromUnnormalizedRef(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + + repoInfoIndex, err := getRepoIndexFromUnnormalizedRef(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote := ref.String() + + _, isCanonical := ref.(distreference.Canonical) + if command.IsTrusted() && !isCanonical { + var nt reference.NamedTagged + named, err := reference.ParseNamed(ref.Name()) + if err != nil { + return types.PluginInstallOptions{}, err + } + if tagged, ok := ref.(distreference.Tagged); ok { + nt, err = reference.WithTag(named, tagged.Tag()) + if err != nil { + return types.PluginInstallOptions{}, err + } + } else { + named = reference.WithDefaultTag(named) + nt = named.(reference.NamedTagged) + } + + ctx := context.Background() + trusted, err := image.TrustedReference(ctx, dockerCli, nt, newRegistryService()) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote = trusted.String() + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return types.PluginInstallOptions{}, err + } + + registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfoIndex, cmdName) + + options := types.PluginInstallOptions{ + RegistryAuth: encodedAuth, + RemoteRef: remote, + Disabled: opts.disable, + AcceptAllPermissions: opts.grantPerms, + AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.remote), + // TODO: Rename PrivilegeFunc, it has nothing to do with privileges + PrivilegeFunc: registryAuthFunc, + Args: opts.args, + } + return options, nil +} + +func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { + var localName string + if opts.localName != "" { + aref, err := reference.ParseNamed(opts.localName) + if err != nil { + return err + } + aref = reference.WithDefaultTag(aref) + if _, ok := aref.(reference.NamedTagged); !ok { + return fmt.Errorf("invalid name: %s", opts.localName) + } + localName = aref.String() + } + + ctx := context.Background() + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin install") + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.remote) // todo: return proper values from the API for this result + return nil +} + +func acceptPrivileges(dockerCli *command.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { + return func(privileges types.PluginPrivileges) (bool, error) { + fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) + for _, privilege := range privileges { + fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) + } + + fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ") + reader := bufio.NewReader(dockerCli.In()) + line, _, err := reader.ReadLine() + if err != nil { + return false, err + } + return strings.ToLower(string(line)) == "y", nil + } +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/list.go b/vendor/github.com/docker/docker/cli/command/plugin/list.go new file mode 100644 index 0000000..8fd16da --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/list.go @@ -0,0 +1,63 @@ +package plugin + +import ( + "fmt" + "strings" + "text/tabwriter" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type listOptions struct { + noTrunc bool +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Short: "List plugins", + Aliases: []string{"list"}, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + plugins, err := dockerCli.Client().PluginList(context.Background()) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintf(w, "ID \tNAME \tDESCRIPTION\tENABLED") + fmt.Fprintf(w, "\n") + + for _, p := range plugins { + id := p.ID + desc := strings.Replace(p.Config.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !opts.noTrunc { + id = stringid.TruncateID(p.ID) + desc = stringutils.Ellipsis(desc, 45) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", id, p.Name, desc, p.Enabled) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/push.go b/vendor/github.com/docker/docker/cli/command/plugin/push.go new file mode 100644 index 0000000..9abb38e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/push.go @@ -0,0 +1,71 @@ +package plugin + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +func newPushCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] PLUGIN[:TAG]", + Short: "Push a plugin to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPush(dockerCli *command.DockerCli, name string) error { + named, err := reference.ParseNamed(name) // FIXME: validate + if err != nil { + return err + } + if reference.IsNameOnly(named) { + named = reference.WithDefaultTag(named) + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid name: %s", named.String()) + } + + ctx := context.Background() + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return err + } + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) + if err != nil { + return err + } + defer responseBody.Close() + + if command.IsTrusted() { + repoInfo.Class = "plugin" + return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody) + } + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/remove.go b/vendor/github.com/docker/docker/cli/command/plugin/remove.go new file mode 100644 index 0000000..9f3aba9 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/remove.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type rmOptions struct { + force bool + + plugins []string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Remove one or more plugins", + Aliases: []string{"remove"}, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.plugins = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of an active plugin") + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error { + ctx := context.Background() + + var errs cli.Errors + for _, name := range opts.plugins { + // TODO: pass names to api instead of making multiple api calls + if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { + errs = append(errs, err) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. + if errs != nil { + return errs + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/set.go b/vendor/github.com/docker/docker/cli/command/plugin/set.go new file mode 100644 index 0000000..52b09fb --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/set.go @@ -0,0 +1,22 @@ +package plugin + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newSetCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "set PLUGIN KEY=VALUE [KEY=VALUE...]", + Short: "Change settings for a plugin", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) + }, + } + + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go b/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go new file mode 100644 index 0000000..d212cd7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go @@ -0,0 +1,100 @@ +package plugin + +import ( + "bufio" + "context" + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newUpgradeCommand(dockerCli *command.DockerCli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "upgrade [OPTIONS] PLUGIN [REMOTE]", + Short: "Upgrade an existing plugin", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.localName = args[0] + if len(args) == 2 { + options.remote = args[1] + } + return runUpgrade(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(&options, flags) + flags.BoolVar(&options.skipRemoteCheck, "skip-remote-check", false, "Do not check if specified remote plugin matches existing plugin image") + return cmd +} + +func runUpgrade(dockerCli *command.DockerCli, opts pluginOptions) error { + ctx := context.Background() + p, _, err := dockerCli.Client().PluginInspectWithRaw(ctx, opts.localName) + if err != nil { + return fmt.Errorf("error reading plugin data: %v", err) + } + + if p.Enabled { + return fmt.Errorf("the plugin must be disabled before upgrading") + } + + opts.localName = p.Name + if opts.remote == "" { + opts.remote = p.PluginReference + } + remote, err := reference.ParseNamed(opts.remote) + if err != nil { + return errors.Wrap(err, "error parsing remote upgrade image reference") + } + remote = reference.WithDefaultTag(remote) + + old, err := reference.ParseNamed(p.PluginReference) + if err != nil { + return errors.Wrap(err, "error parsing current image reference") + } + old = reference.WithDefaultTag(old) + + fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, old, remote) + if !opts.skipRemoteCheck && remote.String() != old.String() { + _, err := fmt.Fprint(dockerCli.Out(), "Plugin images do not match, are you sure? ") + if err != nil { + return errors.Wrap(err, "error writing to stdout") + } + + rdr := bufio.NewReader(dockerCli.In()) + line, _, err := rdr.ReadLine() + if err != nil { + return errors.Wrap(err, "error reading from stdin") + } + if strings.ToLower(string(line)) != "y" { + return errors.New("canceling upgrade request") + } + } + + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin upgrade") + if err != nil { + return err + } + + responseBody, err := dockerCli.Client().PluginUpgrade(ctx, opts.localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Upgraded plugin %s to %s\n", opts.localName, opts.remote) // todo: return proper values from the API for this result + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/prune/prune.go b/vendor/github.com/docker/docker/cli/command/prune/prune.go new file mode 100644 index 0000000..a022487 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/prune/prune.go @@ -0,0 +1,50 @@ +package prune + +import ( + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/container" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/cli/command/network" + "github.com/docker/docker/cli/command/volume" + "github.com/spf13/cobra" +) + +// NewContainerPruneCommand returns a cobra prune command for containers +func NewContainerPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return container.NewPruneCommand(dockerCli) +} + +// NewVolumePruneCommand returns a cobra prune command for volumes +func NewVolumePruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return volume.NewPruneCommand(dockerCli) +} + +// NewImagePruneCommand returns a cobra prune command for images +func NewImagePruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return image.NewPruneCommand(dockerCli) +} + +// NewNetworkPruneCommand returns a cobra prune command for Networks +func NewNetworkPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return network.NewPruneCommand(dockerCli) +} + +// RunContainerPrune executes a prune command for containers +func RunContainerPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return container.RunPrune(dockerCli) +} + +// RunVolumePrune executes a prune command for volumes +func RunVolumePrune(dockerCli *command.DockerCli) (uint64, string, error) { + return volume.RunPrune(dockerCli) +} + +// RunImagePrune executes a prune command for images +func RunImagePrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { + return image.RunPrune(dockerCli, all) +} + +// RunNetworkPrune executes a prune command for networks +func RunNetworkPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return network.RunPrune(dockerCli) +} diff --git a/vendor/github.com/docker/docker/cli/command/registry.go b/vendor/github.com/docker/docker/cli/command/registry.go new file mode 100644 index 0000000..65f6b33 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry.go @@ -0,0 +1,186 @@ +package command + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" +) + +// ElectAuthServer returns the default registry to use (by asking the daemon) +func ElectAuthServer(ctx context.Context, cli *DockerCli) string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.Client().Info(ctx); err != nil { + fmt.Fprintf(cli.Out(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli *DockerCli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) + authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(authConfig) + } +} + +// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func ResolveAuthConfig(ctx context.Context, cli *DockerCli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = ElectAuthServer(ctx, cli) + } + + a, _ := cli.CredentialsStore(configKey).Get(configKey) + return a +} + +// ConfigureAuth returns an AuthConfig from the specified user, password and server. +func ConfigureAuth(cli *DockerCli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = NewInStream(os.Stdin) + } + + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + + authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) + if err != nil { + return authconfig, err + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return authconfig, fmt.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return authconfig, fmt.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return authconfig, fmt.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli *DockerCli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli *DockerCli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/vendor/github.com/docker/docker/cli/command/registry/login.go b/vendor/github.com/docker/docker/cli/command/registry/login.go new file mode 100644 index 0000000..05b3bb0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry/login.go @@ -0,0 +1,85 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type loginOptions struct { + serverAddress string + user string + password string + email string +} + +// NewLoginCommand creates a new `docker login` command +func NewLoginCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts loginOptions + + cmd := &cobra.Command{ + Use: "login [OPTIONS] [SERVER]", + Short: "Log in to a Docker registry", + Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.serverAddress = args[0] + } + return runLogin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.user, "username", "u", "", "Username") + flags.StringVarP(&opts.password, "password", "p", "", "Password") + + // Deprecated in 1.11: Should be removed in docker 1.14 + flags.StringVarP(&opts.email, "email", "e", "", "Email") + flags.MarkDeprecated("email", "will be removed in 1.14.") + + return cmd +} + +func runLogin(dockerCli *command.DockerCli, opts loginOptions) error { + ctx := context.Background() + clnt := dockerCli.Client() + + var ( + serverAddress string + authServer = command.ElectAuthServer(ctx, dockerCli) + ) + if opts.serverAddress != "" { + serverAddress = opts.serverAddress + } else { + serverAddress = authServer + } + + isDefaultRegistry := serverAddress == authServer + + authConfig, err := command.ConfigureAuth(dockerCli, opts.user, opts.password, serverAddress, isDefaultRegistry) + if err != nil { + return err + } + response, err := clnt.RegistryLogin(ctx, authConfig) + if err != nil { + return err + } + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + if err := dockerCli.CredentialsStore(serverAddress).Store(authConfig); err != nil { + return fmt.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(dockerCli.Out(), response.Status) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/registry/logout.go b/vendor/github.com/docker/docker/cli/command/registry/logout.go new file mode 100644 index 0000000..877e60e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry/logout.go @@ -0,0 +1,77 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewLogoutCommand creates a new `docker login` command +func NewLogoutCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "logout [SERVER]", + Short: "Log out from a Docker registry", + Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var serverAddress string + if len(args) > 0 { + serverAddress = args[0] + } + return runLogout(dockerCli, serverAddress) + }, + } + + return cmd +} + +func runLogout(dockerCli *command.DockerCli, serverAddress string) error { + ctx := context.Background() + var isDefaultRegistry bool + + if serverAddress == "" { + serverAddress = command.ElectAuthServer(ctx, dockerCli) + isDefaultRegistry = true + } + + var ( + loggedIn bool + regsToLogout []string + hostnameAddress = serverAddress + regsToTry = []string{serverAddress} + ) + if !isDefaultRegistry { + hostnameAddress = registry.ConvertToHostname(serverAddress) + // the tries below are kept for backward compatibility where a user could have + // saved the registry in one of the following format. + regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + for _, s := range regsToTry { + if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { + loggedIn = true + regsToLogout = append(regsToLogout, s) + } + } + + if !loggedIn { + fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) + return nil + } + + fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) + for _, r := range regsToLogout { + if err := dockerCli.CredentialsStore(r).Erase(r); err != nil { + fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/registry/search.go b/vendor/github.com/docker/docker/cli/command/registry/search.go new file mode 100644 index 0000000..124b4ae --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry/search.go @@ -0,0 +1,126 @@ +package registry + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type searchOptions struct { + term string + noTrunc bool + limit int + filter opts.FilterOpt + + // Deprecated + stars uint + automated bool +} + +// NewSearchCommand creates a new `docker search` command +func NewSearchCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := searchOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "search [OPTIONS] TERM", + Short: "Search the Docker Hub for images", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.term = args[0] + return runSearch(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + flags.IntVar(&opts.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") + + flags.BoolVar(&opts.automated, "automated", false, "Only show automated builds") + flags.UintVarP(&opts.stars, "stars", "s", 0, "Only displays with at least x stars") + + flags.MarkDeprecated("automated", "use --filter=automated=true instead") + flags.MarkDeprecated("stars", "use --filter=stars=3 instead") + + return cmd +} + +func runSearch(dockerCli *command.DockerCli, opts searchOptions) error { + indexInfo, err := registry.ParseSearchIndexInfo(opts.term) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageSearchOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + Filters: opts.filter.Value(), + Limit: opts.limit, + } + + clnt := dockerCli.Client() + + unorderedResults, err := clnt.ImageSearch(ctx, opts.term, options) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + // --automated and -s, --stars are deprecated since Docker 1.12 + if (opts.automated && !res.IsAutomated) || (int(opts.stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !opts.noTrunc { + desc = stringutils.Ellipsis(desc, 45) + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// SearchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/docker/docker/cli/command/secret/cmd.go b/vendor/github.com/docker/docker/cli/command/secret/cmd.go new file mode 100644 index 0000000..79e6698 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/cmd.go @@ -0,0 +1,25 @@ +package secret + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSecretCommand returns a cobra command for `secret` subcommands +func NewSecretCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "secret", + Short: "Manage Docker secrets", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newSecretListCommand(dockerCli), + newSecretCreateCommand(dockerCli), + newSecretInspectCommand(dockerCli), + newSecretRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/create.go b/vendor/github.com/docker/docker/cli/command/secret/create.go new file mode 100644 index 0000000..f4683a6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/create.go @@ -0,0 +1,79 @@ +package secret + +import ( + "fmt" + "io" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/system" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type createOptions struct { + name string + file string + labels opts.ListOpts +} + +func newSecretCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + createOpts := createOptions{ + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] SECRET file|-", + Short: "Create a secret from a file or STDIN as content", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + createOpts.name = args[0] + createOpts.file = args[1] + return runSecretCreate(dockerCli, createOpts) + }, + } + flags := cmd.Flags() + flags.VarP(&createOpts.labels, "label", "l", "Secret labels") + + return cmd +} + +func runSecretCreate(dockerCli *command.DockerCli, options createOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var in io.Reader = dockerCli.In() + if options.file != "-" { + file, err := system.OpenSequential(options.file) + if err != nil { + return err + } + in = file + defer file.Close() + } + + secretData, err := ioutil.ReadAll(in) + if err != nil { + return fmt.Errorf("Error reading content from %q: %v", options.file, err) + } + + spec := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + Data: secretData, + } + + r, err := client.SecretCreate(ctx, spec) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), r.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/inspect.go b/vendor/github.com/docker/docker/cli/command/secret/inspect.go new file mode 100644 index 0000000..0a8bd4a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/inspect.go @@ -0,0 +1,45 @@ +package secret + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + names []string + format string +} + +func newSecretInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SECRET [SECRET...]", + Short: "Display detailed information on one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runSecretInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runSecretInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) + if err != nil { + return err + } + getRef := func(id string) (interface{}, []byte, error) { + return client.SecretInspectWithRaw(ctx, id) + } + + return inspect.Inspect(dockerCli.Out(), ids, opts.format, getRef) +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/ls.go b/vendor/github.com/docker/docker/cli/command/secret/ls.go new file mode 100644 index 0000000..faeab31 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/ls.go @@ -0,0 +1,68 @@ +package secret + +import ( + "fmt" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type listOptions struct { + quiet bool +} + +func newSecretListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List secrets", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runSecretList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + + return cmd +} + +func runSecretList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + secrets, err := client.SecretList(ctx, types.SecretListOptions{}) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + if opts.quiet { + for _, s := range secrets { + fmt.Fprintf(w, "%s\n", s.ID) + } + } else { + fmt.Fprintf(w, "ID\tNAME\tCREATED\tUPDATED") + fmt.Fprintf(w, "\n") + + for _, s := range secrets { + created := units.HumanDuration(time.Now().UTC().Sub(s.Meta.CreatedAt)) + " ago" + updated := units.HumanDuration(time.Now().UTC().Sub(s.Meta.UpdatedAt)) + " ago" + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", s.ID, s.Spec.Annotations.Name, created, updated) + } + } + + w.Flush() + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/remove.go b/vendor/github.com/docker/docker/cli/command/secret/remove.go new file mode 100644 index 0000000..f45a619 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/remove.go @@ -0,0 +1,57 @@ +package secret + +import ( + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type removeOptions struct { + names []string +} + +func newSecretRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "rm SECRET [SECRET...]", + Aliases: []string{"remove"}, + Short: "Remove one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts := removeOptions{ + names: args, + } + return runSecretRemove(dockerCli, opts) + }, + } +} + +func runSecretRemove(dockerCli *command.DockerCli, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) + if err != nil { + return err + } + + var errs []string + + for _, id := range ids { + if err := client.SecretRemove(ctx, id); err != nil { + errs = append(errs, err.Error()) + continue + } + + fmt.Fprintln(dockerCli.Out(), id) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/utils.go b/vendor/github.com/docker/docker/cli/command/secret/utils.go new file mode 100644 index 0000000..11d31ff --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/utils.go @@ -0,0 +1,76 @@ +package secret + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +// GetSecretsByNameOrIDPrefixes returns secrets given a list of ids or names +func GetSecretsByNameOrIDPrefixes(ctx context.Context, client client.APIClient, terms []string) ([]swarm.Secret, error) { + args := filters.NewArgs() + for _, n := range terms { + args.Add("names", n) + args.Add("id", n) + } + + return client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) +} + +func getCliRequestedSecretIDs(ctx context.Context, client client.APIClient, terms []string) ([]string, error) { + secrets, err := GetSecretsByNameOrIDPrefixes(ctx, client, terms) + if err != nil { + return nil, err + } + + if len(secrets) > 0 { + found := make(map[string]struct{}) + next: + for _, term := range terms { + // attempt to lookup secret by full ID + for _, s := range secrets { + if s.ID == term { + found[s.ID] = struct{}{} + continue next + } + } + // attempt to lookup secret by full name + for _, s := range secrets { + if s.Spec.Annotations.Name == term { + found[s.ID] = struct{}{} + continue next + } + } + // attempt to lookup secret by partial ID (prefix) + // return error if more than one matches found (ambiguous) + n := 0 + for _, s := range secrets { + if strings.HasPrefix(s.ID, term) { + found[s.ID] = struct{}{} + n++ + } + } + if n > 1 { + return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", term, n) + } + } + + // We already collected all the IDs found. + // Now we will remove duplicates by converting the map to slice + ids := []string{} + for id := range found { + ids = append(ids, id) + } + + return ids, nil + } + + return terms, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/cmd.go b/vendor/github.com/docker/docker/cli/command/service/cmd.go new file mode 100644 index 0000000..796fe92 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/cmd.go @@ -0,0 +1,29 @@ +package service + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewServiceCommand returns a cobra command for `service` subcommands +func NewServiceCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "service", + Short: "Manage services", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newPsCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newScaleCommand(dockerCli), + newUpdateCommand(dockerCli), + newLogsCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/service/create.go b/vendor/github.com/docker/docker/cli/command/service/create.go new file mode 100644 index 0000000..1355c19 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/create.go @@ -0,0 +1,100 @@ +package service + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new service", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + if len(args) > 1 { + opts.args = args[1:] + } + return runCreate(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") + flags.StringVar(&opts.name, flagName, "", "Service name") + + addServiceFlags(cmd, opts) + + flags.VarP(&opts.labels, flagLabel, "l", "Service labels") + flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") + flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") + flags.Var(&opts.envFile, flagEnvFile, "Read in a file of environment variables") + flags.Var(&opts.mounts, flagMount, "Attach a filesystem mount to the service") + flags.Var(&opts.constraints, flagConstraint, "Placement constraints") + flags.Var(&opts.networks, flagNetwork, "Network attachments") + flags.Var(&opts.secrets, flagSecret, "Specify secrets to expose to the service") + flags.VarP(&opts.endpoint.publishPorts, flagPublish, "p", "Publish a port as a node port") + flags.Var(&opts.groups, flagGroup, "Set one or more supplementary user groups for the container") + flags.Var(&opts.dns, flagDNS, "Set custom DNS servers") + flags.Var(&opts.dnsOption, flagDNSOption, "Set DNS options") + flags.Var(&opts.dnsSearch, flagDNSSearch, "Set custom DNS search domains") + flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") + + flags.SetInterspersed(false) + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts *serviceOptions) error { + apiClient := dockerCli.Client() + createOpts := types.ServiceCreateOptions{} + + service, err := opts.ToService() + if err != nil { + return err + } + + specifiedSecrets := opts.secrets.Value() + if len(specifiedSecrets) > 0 { + // parse and validate secrets + secrets, err := ParseSecrets(apiClient, specifiedSecrets) + if err != nil { + return err + } + service.TaskTemplate.ContainerSpec.Secrets = secrets + + } + + ctx := context.Background() + + if err := resolveServiceImageDigest(dockerCli, &service); err != nil { + return err + } + + // only send auth if flag was set + if opts.registryAuth { + // Retrieve encoded auth token from the image reference + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, opts.image) + if err != nil { + return err + } + createOpts.EncodedRegistryAuth = encodedAuth + } + + response, err := apiClient.ServiceCreate(ctx, service, createOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/inspect.go b/vendor/github.com/docker/docker/cli/command/service/inspect.go new file mode 100644 index 0000000..deb701b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/inspect.go @@ -0,0 +1,84 @@ +package service + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + refs []string + format string + pretty bool +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SERVICE [SERVICE...]", + Short: "Display detailed information on one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + + if opts.pretty && len(opts.format) > 0 { + return fmt.Errorf("--format is incompatible with human friendly format") + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(ref string) (interface{}, []byte, error) { + service, _, err := client.ServiceInspectWithRaw(ctx, ref) + if err == nil || !apiclient.IsErrServiceNotFound(err) { + return service, nil, err + } + return nil, nil, fmt.Errorf("Error: no such service: %s", ref) + } + + f := opts.format + if len(f) == 0 { + f = "raw" + if len(dockerCli.ConfigFile().ServiceInspectFormat) > 0 { + f = dockerCli.ConfigFile().ServiceInspectFormat + } + } + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + serviceCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewServiceFormat(f), + } + + if err := formatter.ServiceInspectWrite(serviceCtx, opts.refs, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/inspect_test.go b/vendor/github.com/docker/docker/cli/command/service/inspect_test.go new file mode 100644 index 0000000..04a6508 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/inspect_test.go @@ -0,0 +1,129 @@ +package service + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/pkg/testutil/assert" +) + +func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { + b := new(bytes.Buffer) + + endpointSpec := &swarm.EndpointSpec{ + Mode: "vip", + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + }, + }, + } + + two := uint64(2) + + s := swarm.Service{ + ID: "de179gar9d0o7ltdybungplod", + Meta: swarm.Meta{ + Version: swarm.Version{Index: 315}, + CreatedAt: now, + UpdatedAt: now, + }, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "my_service", + Labels: map[string]string{"com.label": "foo"}, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "foo/bar@sha256:this_is_a_test", + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &two, + }, + }, + UpdateConfig: nil, + Networks: []swarm.NetworkAttachmentConfig{ + { + Target: "5vpyomhb6ievnk0i0o60gcnei", + Aliases: []string{"web"}, + }, + }, + EndpointSpec: endpointSpec, + }, + Endpoint: swarm.Endpoint{ + Spec: *endpointSpec, + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + PublishedPort: 30000, + }, + }, + VirtualIPs: []swarm.EndpointVirtualIP{ + { + NetworkID: "6o4107cj2jx9tihgb0jyts6pj", + Addr: "10.255.0.4/16", + }, + }, + }, + UpdateStatus: swarm.UpdateStatus{ + StartedAt: now, + CompletedAt: now, + }, + } + + ctx := formatter.Context{ + Output: b, + Format: format, + } + + err := formatter.ServiceInspectWrite(ctx, []string{"de179gar9d0o7ltdybungplod"}, func(ref string) (interface{}, []byte, error) { + return s, nil, nil + }) + if err != nil { + t.Fatal(err) + } + return b.String() +} + +func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { + s := formatServiceInspect(t, formatter.NewServiceFormat("pretty"), time.Now()) + if strings.Contains(s, "UpdateStatus") { + t.Fatal("Pretty print failed before parsing UpdateStatus") + } +} + +func TestJSONFormatWithNoUpdateConfig(t *testing.T) { + now := time.Now() + // s1: [{"ID":..}] + // s2: {"ID":..} + s1 := formatServiceInspect(t, formatter.NewServiceFormat(""), now) + t.Log("// s1") + t.Logf("%s", s1) + s2 := formatServiceInspect(t, formatter.NewServiceFormat("{{json .}}"), now) + t.Log("// s2") + t.Logf("%s", s2) + var m1Wrap []map[string]interface{} + if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil { + t.Fatal(err) + } + if len(m1Wrap) != 1 { + t.Fatalf("strange s1=%s", s1) + } + m1 := m1Wrap[0] + t.Logf("m1=%+v", m1) + var m2 map[string]interface{} + if err := json.Unmarshal([]byte(s2), &m2); err != nil { + t.Fatal(err) + } + t.Logf("m2=%+v", m2) + assert.DeepEqual(t, m2, m1) +} diff --git a/vendor/github.com/docker/docker/cli/command/service/list.go b/vendor/github.com/docker/docker/cli/command/service/list.go new file mode 100644 index 0000000..7241260 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/list.go @@ -0,0 +1,158 @@ +package service + +import ( + "fmt" + "io" + "text/tabwriter" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringid" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +const ( + listItemFmt = "%s\t%s\t%s\t%s\t%s\n" +) + +type listOptions struct { + quiet bool + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List services", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + ctx := context.Background() + client := dockerCli.Client() + out := dockerCli.Out() + + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: opts.filter.Value()}) + if err != nil { + return err + } + + if len(services) > 0 && !opts.quiet { + // only non-empty services and not quiet, should we call TaskList and NodeList api + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + PrintNotQuiet(out, services, nodes, tasks) + } else if !opts.quiet { + // no services and not quiet, print only one line with columns ID, NAME, MODE, REPLICAS... + PrintNotQuiet(out, services, []swarm.Node{}, []swarm.Task{}) + } else { + PrintQuiet(out, services) + } + + return nil +} + +// PrintNotQuiet shows service list in a non-quiet way. +// Besides this, command `docker stack services xxx` will call this, too. +func PrintNotQuiet(out io.Writer, services []swarm.Service, nodes []swarm.Node, tasks []swarm.Task) { + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + + running := map[string]int{} + tasksNoShutdown := map[string]int{} + + for _, task := range tasks { + if task.DesiredState != swarm.TaskStateShutdown { + tasksNoShutdown[task.ServiceID]++ + } + + if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == swarm.TaskStateRunning { + running[task.ServiceID]++ + } + } + + printTable(out, services, running, tasksNoShutdown) +} + +func printTable(out io.Writer, services []swarm.Service, running, tasksNoShutdown map[string]int) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MODE", "REPLICAS", "IMAGE") + + for _, service := range services { + mode := "" + replicas := "" + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + mode = "replicated" + replicas = fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas) + } else if service.Spec.Mode.Global != nil { + mode = "global" + replicas = fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID]) + } + image := service.Spec.TaskTemplate.ContainerSpec.Image + ref, err := distreference.ParseNamed(image) + if err == nil { + // update image string for display + namedTagged, ok := ref.(distreference.NamedTagged) + if ok { + image = namedTagged.Name() + ":" + namedTagged.Tag() + } + } + + fmt.Fprintf( + writer, + listItemFmt, + stringid.TruncateID(service.ID), + service.Spec.Name, + mode, + replicas, + image) + } +} + +// PrintQuiet shows service list in a quiet way. +// Besides this, command `docker stack services xxx` will call this, too. +func PrintQuiet(out io.Writer, services []swarm.Service) { + for _, service := range services { + fmt.Fprintln(out, service.ID) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/service/logs.go b/vendor/github.com/docker/docker/cli/command/service/logs.go new file mode 100644 index 0000000..19d3d9a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/logs.go @@ -0,0 +1,163 @@ +package service + +import ( + "bytes" + "fmt" + "io" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +type logsOptions struct { + noResolve bool + follow bool + since string + timestamps bool + details bool + tail string + + service string +} + +func newLogsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] SERVICE", + Short: "Fetch the logs of a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.service = args[0] + return runLogs(dockerCli, &opts) + }, + Tags: map[string]string{"experimental": ""}, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { + ctx := context.Background() + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + + client := dockerCli.Client() + responseBody, err := client.ServiceLogs(ctx, opts.service, options) + if err != nil { + return err + } + defer responseBody.Close() + + resolver := idresolver.New(client, opts.noResolve) + + stdout := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Out()} + stderr := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Err()} + + // TODO(aluzzardi): Do an io.Copy for services with TTY enabled. + _, err = stdcopy.StdCopy(stdout, stderr, responseBody) + return err +} + +type logWriter struct { + ctx context.Context + opts *logsOptions + r *idresolver.IDResolver + w io.Writer +} + +func (lw *logWriter) Write(buf []byte) (int, error) { + contextIndex := 0 + numParts := 2 + if lw.opts.timestamps { + contextIndex++ + numParts++ + } + + parts := bytes.SplitN(buf, []byte(" "), numParts) + if len(parts) != numParts { + return 0, fmt.Errorf("invalid context in log message: %v", string(buf)) + } + + taskName, nodeName, err := lw.parseContext(string(parts[contextIndex])) + if err != nil { + return 0, err + } + + output := []byte{} + for i, part := range parts { + // First part doesn't get space separation. + if i > 0 { + output = append(output, []byte(" ")...) + } + + if i == contextIndex { + // TODO(aluzzardi): Consider constant padding. + output = append(output, []byte(fmt.Sprintf("%s@%s |", taskName, nodeName))...) + } else { + output = append(output, part...) + } + } + _, err = lw.w.Write(output) + if err != nil { + return 0, err + } + + return len(buf), nil +} + +func (lw *logWriter) parseContext(input string) (string, string, error) { + context := make(map[string]string) + + components := strings.Split(input, ",") + for _, component := range components { + parts := strings.SplitN(component, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid context: %s", input) + } + context[parts[0]] = parts[1] + } + + taskID, ok := context["com.docker.swarm.task.id"] + if !ok { + return "", "", fmt.Errorf("missing task id in context: %s", input) + } + taskName, err := lw.r.Resolve(lw.ctx, swarm.Task{}, taskID) + if err != nil { + return "", "", err + } + + nodeID, ok := context["com.docker.swarm.node.id"] + if !ok { + return "", "", fmt.Errorf("missing node id in context: %s", input) + } + nodeName, err := lw.r.Resolve(lw.ctx, swarm.Node{}, nodeID) + if err != nil { + return "", "", err + } + + return taskName, nodeName, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/opts.go b/vendor/github.com/docker/docker/cli/command/service/opts.go new file mode 100644 index 0000000..cbe544a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/opts.go @@ -0,0 +1,648 @@ +package service + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type int64Value interface { + Value() int64 +} + +type memBytes int64 + +func (m *memBytes) String() string { + return units.BytesSize(float64(m.Value())) +} + +func (m *memBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = memBytes(val) + return err +} + +func (m *memBytes) Type() string { + return "bytes" +} + +func (m *memBytes) Value() int64 { + return int64(*m) +} + +// PositiveDurationOpt is an option type for time.Duration that uses a pointer. +// It bahave similarly to DurationOpt but only allows positive duration values. +type PositiveDurationOpt struct { + DurationOpt +} + +// Set a new value on the option. Setting a negative duration value will cause +// an error to be returned. +func (d *PositiveDurationOpt) Set(s string) error { + err := d.DurationOpt.Set(s) + if err != nil { + return err + } + if *d.DurationOpt.value < 0 { + return fmt.Errorf("duration cannot be negative") + } + return nil +} + +// DurationOpt is an option type for time.Duration that uses a pointer. This +// allows us to get nil values outside, instead of defaulting to 0 +type DurationOpt struct { + value *time.Duration +} + +// Set a new value on the option +func (d *DurationOpt) Set(s string) error { + v, err := time.ParseDuration(s) + d.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (d *DurationOpt) Type() string { + return "duration" +} + +// String returns a string repr of this option +func (d *DurationOpt) String() string { + if d.value != nil { + return d.value.String() + } + return "" +} + +// Value returns the time.Duration +func (d *DurationOpt) Value() *time.Duration { + return d.value +} + +// Uint64Opt represents a uint64. +type Uint64Opt struct { + value *uint64 +} + +// Set a new value on the option +func (i *Uint64Opt) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + i.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (i *Uint64Opt) Type() string { + return "uint" +} + +// String returns a string repr of this option +func (i *Uint64Opt) String() string { + if i.value != nil { + return fmt.Sprintf("%v", *i.value) + } + return "" +} + +// Value returns the uint64 +func (i *Uint64Opt) Value() *uint64 { + return i.value +} + +type floatValue float32 + +func (f *floatValue) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = floatValue(v) + return err +} + +func (f *floatValue) Type() string { + return "float" +} + +func (f *floatValue) String() string { + return strconv.FormatFloat(float64(*f), 'g', -1, 32) +} + +func (f *floatValue) Value() float32 { + return float32(*f) +} + +// SecretRequestSpec is a type for requesting secrets +type SecretRequestSpec struct { + source string + target string + uid string + gid string + mode os.FileMode +} + +// SecretOpt is a Value type for parsing secrets +type SecretOpt struct { + values []*SecretRequestSpec +} + +// Set a new secret value +func (o *SecretOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + spec := &SecretRequestSpec{ + source: "", + target: "", + uid: "0", + gid: "0", + mode: 0444, + } + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "source", "src": + spec.source = value + case "target": + tDir, _ := filepath.Split(value) + if tDir != "" { + return fmt.Errorf("target must not have a path") + } + spec.target = value + case "uid": + spec.uid = value + case "gid": + spec.gid = value + case "mode": + m, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + spec.mode = os.FileMode(m) + default: + return fmt.Errorf("invalid field in secret request: %s", key) + } + } + + if spec.source == "" { + return fmt.Errorf("source is required") + } + + o.values = append(o.values, spec) + return nil +} + +// Type returns the type of this option +func (o *SecretOpt) Type() string { + return "secret" +} + +// String returns a string repr of this option +func (o *SecretOpt) String() string { + secrets := []string{} + for _, secret := range o.values { + repr := fmt.Sprintf("%s -> %s", secret.source, secret.target) + secrets = append(secrets, repr) + } + return strings.Join(secrets, ", ") +} + +// Value returns the secret requests +func (o *SecretOpt) Value() []*SecretRequestSpec { + return o.values +} + +type updateOptions struct { + parallelism uint64 + delay time.Duration + monitor time.Duration + onFailure string + maxFailureRatio floatValue +} + +type resourceOptions struct { + limitCPU opts.NanoCPUs + limitMemBytes memBytes + resCPU opts.NanoCPUs + resMemBytes memBytes +} + +func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements { + return &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: r.limitCPU.Value(), + MemoryBytes: r.limitMemBytes.Value(), + }, + Reservations: &swarm.Resources{ + NanoCPUs: r.resCPU.Value(), + MemoryBytes: r.resMemBytes.Value(), + }, + } +} + +type restartPolicyOptions struct { + condition string + delay DurationOpt + maxAttempts Uint64Opt + window DurationOpt +} + +func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy { + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyCondition(r.condition), + Delay: r.delay.Value(), + MaxAttempts: r.maxAttempts.Value(), + Window: r.window.Value(), + } +} + +func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig { + nets := []swarm.NetworkAttachmentConfig{} + for _, network := range networks { + nets = append(nets, swarm.NetworkAttachmentConfig{Target: network}) + } + return nets +} + +type endpointOptions struct { + mode string + publishPorts opts.PortOpt +} + +func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { + return &swarm.EndpointSpec{ + Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), + Ports: e.publishPorts.Value(), + } +} + +type logDriverOptions struct { + name string + opts opts.ListOpts +} + +func newLogDriverOptions() logDriverOptions { + return logDriverOptions{opts: opts.NewListOpts(runconfigopts.ValidateEnv)} +} + +func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { + if ldo.name == "" { + return nil + } + + // set the log driver only if specified. + return &swarm.Driver{ + Name: ldo.name, + Options: runconfigopts.ConvertKVStringsToMap(ldo.opts.GetAll()), + } +} + +type healthCheckOptions struct { + cmd string + interval PositiveDurationOpt + timeout PositiveDurationOpt + retries int + noHealthcheck bool +} + +func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) { + var healthConfig *container.HealthConfig + haveHealthSettings := opts.cmd != "" || + opts.interval.Value() != nil || + opts.timeout.Value() != nil || + opts.retries != 0 + if opts.noHealthcheck { + if haveHealthSettings { + return nil, fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + healthConfig = &container.HealthConfig{Test: []string{"NONE"}} + } else if haveHealthSettings { + var test []string + if opts.cmd != "" { + test = []string{"CMD-SHELL", opts.cmd} + } + var interval, timeout time.Duration + if ptr := opts.interval.Value(); ptr != nil { + interval = *ptr + } + if ptr := opts.timeout.Value(); ptr != nil { + timeout = *ptr + } + healthConfig = &container.HealthConfig{ + Test: test, + Interval: interval, + Timeout: timeout, + Retries: opts.retries, + } + } + return healthConfig, nil +} + +// ValidatePort validates a string is in the expected format for a port definition +func ValidatePort(value string) (string, error) { + portMappings, err := nat.ParsePortSpec(value) + for _, portMapping := range portMappings { + if portMapping.Binding.HostIP != "" { + return "", fmt.Errorf("HostIP is not supported by a service.") + } + } + return value, err +} + +// convertExtraHostsToSwarmHosts converts an array of extra hosts in cli +// : +// into a swarmkit host format: +// IP_address canonical_hostname [aliases...] +// This assumes input value (:) has already been validated +func convertExtraHostsToSwarmHosts(extraHosts []string) []string { + hosts := []string{} + for _, extraHost := range extraHosts { + parts := strings.SplitN(extraHost, ":", 2) + hosts = append(hosts, fmt.Sprintf("%s %s", parts[1], parts[0])) + } + return hosts +} + +type serviceOptions struct { + name string + labels opts.ListOpts + containerLabels opts.ListOpts + image string + args []string + hostname string + env opts.ListOpts + envFile opts.ListOpts + workdir string + user string + groups opts.ListOpts + tty bool + mounts opts.MountOpt + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOption opts.ListOpts + hosts opts.ListOpts + + resources resourceOptions + stopGrace DurationOpt + + replicas Uint64Opt + mode string + + restartPolicy restartPolicyOptions + constraints opts.ListOpts + update updateOptions + networks opts.ListOpts + endpoint endpointOptions + + registryAuth bool + + logDriver logDriverOptions + + healthcheck healthCheckOptions + secrets opts.SecretOpt +} + +func newServiceOptions() *serviceOptions { + return &serviceOptions{ + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + constraints: opts.NewListOpts(nil), + containerLabels: opts.NewListOpts(runconfigopts.ValidateEnv), + env: opts.NewListOpts(runconfigopts.ValidateEnv), + envFile: opts.NewListOpts(nil), + groups: opts.NewListOpts(nil), + logDriver: newLogDriverOptions(), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOption: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + hosts: opts.NewListOpts(runconfigopts.ValidateExtraHost), + networks: opts.NewListOpts(nil), + } +} + +func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) { + var service swarm.ServiceSpec + + envVariables, err := runconfigopts.ReadKVStrings(opts.envFile.GetAll(), opts.env.GetAll()) + if err != nil { + return service, err + } + + currentEnv := make([]string, 0, len(envVariables)) + for _, env := range envVariables { // need to process each var, in order + k := strings.SplitN(env, "=", 2)[0] + for i, current := range currentEnv { // remove duplicates + if current == env { + continue // no update required, may hide this behind flag to preserve order of envVariables + } + if strings.HasPrefix(current, k+"=") { + currentEnv = append(currentEnv[:i], currentEnv[i+1:]...) + } + } + currentEnv = append(currentEnv, env) + } + + service = swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: opts.name, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: opts.image, + Args: opts.args, + Env: currentEnv, + Hostname: opts.hostname, + Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()), + Dir: opts.workdir, + User: opts.user, + Groups: opts.groups.GetAll(), + TTY: opts.tty, + Mounts: opts.mounts.Value(), + DNSConfig: &swarm.DNSConfig{ + Nameservers: opts.dns.GetAll(), + Search: opts.dnsSearch.GetAll(), + Options: opts.dnsOption.GetAll(), + }, + Hosts: convertExtraHostsToSwarmHosts(opts.hosts.GetAll()), + StopGracePeriod: opts.stopGrace.Value(), + Secrets: nil, + }, + Networks: convertNetworks(opts.networks.GetAll()), + Resources: opts.resources.ToResourceRequirements(), + RestartPolicy: opts.restartPolicy.ToRestartPolicy(), + Placement: &swarm.Placement{ + Constraints: opts.constraints.GetAll(), + }, + LogDriver: opts.logDriver.toLogDriver(), + }, + Networks: convertNetworks(opts.networks.GetAll()), + Mode: swarm.ServiceMode{}, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: opts.update.parallelism, + Delay: opts.update.delay, + Monitor: opts.update.monitor, + FailureAction: opts.update.onFailure, + MaxFailureRatio: opts.update.maxFailureRatio.Value(), + }, + EndpointSpec: opts.endpoint.ToEndpointSpec(), + } + + healthConfig, err := opts.healthcheck.toHealthConfig() + if err != nil { + return service, err + } + service.TaskTemplate.ContainerSpec.Healthcheck = healthConfig + + switch opts.mode { + case "global": + if opts.replicas.Value() != nil { + return service, fmt.Errorf("replicas can only be used with replicated mode") + } + + service.Mode.Global = &swarm.GlobalService{} + case "replicated": + service.Mode.Replicated = &swarm.ReplicatedService{ + Replicas: opts.replicas.Value(), + } + default: + return service, fmt.Errorf("Unknown mode: %s", opts.mode) + } + return service, nil +} + +// addServiceFlags adds all flags that are common to both `create` and `update`. +// Any flags that are not common are added separately in the individual command +func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) { + flags := cmd.Flags() + + flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container") + flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: [:])") + flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname") + + flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") + flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") + flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") + flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") + flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)") + + flags.Var(&opts.replicas, flagReplicas, "Number of tasks") + + flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)") + flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)") + flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up") + flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)") + + flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously (0 to update all at once)") + flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates (ns|us|ms|s|m|h) (default 0s)") + flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, time.Duration(0), "Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s)") + flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "pause", "Action on update failure (pause|continue)") + flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update") + + flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)") + + flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") + + flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") + flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") + + flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health") + flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ns|us|ms|s|m|h)") + flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ns|us|ms|s|m|h)") + flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy") + flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK") + + flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY") +} + +const ( + flagConstraint = "constraint" + flagConstraintRemove = "constraint-rm" + flagConstraintAdd = "constraint-add" + flagContainerLabel = "container-label" + flagContainerLabelRemove = "container-label-rm" + flagContainerLabelAdd = "container-label-add" + flagDNS = "dns" + flagDNSRemove = "dns-rm" + flagDNSAdd = "dns-add" + flagDNSOption = "dns-option" + flagDNSOptionRemove = "dns-option-rm" + flagDNSOptionAdd = "dns-option-add" + flagDNSSearch = "dns-search" + flagDNSSearchRemove = "dns-search-rm" + flagDNSSearchAdd = "dns-search-add" + flagEndpointMode = "endpoint-mode" + flagHost = "host" + flagHostAdd = "host-add" + flagHostRemove = "host-rm" + flagHostname = "hostname" + flagEnv = "env" + flagEnvFile = "env-file" + flagEnvRemove = "env-rm" + flagEnvAdd = "env-add" + flagGroup = "group" + flagGroupAdd = "group-add" + flagGroupRemove = "group-rm" + flagLabel = "label" + flagLabelRemove = "label-rm" + flagLabelAdd = "label-add" + flagLimitCPU = "limit-cpu" + flagLimitMemory = "limit-memory" + flagMode = "mode" + flagMount = "mount" + flagMountRemove = "mount-rm" + flagMountAdd = "mount-add" + flagName = "name" + flagNetwork = "network" + flagPublish = "publish" + flagPublishRemove = "publish-rm" + flagPublishAdd = "publish-add" + flagReplicas = "replicas" + flagReserveCPU = "reserve-cpu" + flagReserveMemory = "reserve-memory" + flagRestartCondition = "restart-condition" + flagRestartDelay = "restart-delay" + flagRestartMaxAttempts = "restart-max-attempts" + flagRestartWindow = "restart-window" + flagStopGracePeriod = "stop-grace-period" + flagTTY = "tty" + flagUpdateDelay = "update-delay" + flagUpdateFailureAction = "update-failure-action" + flagUpdateMaxFailureRatio = "update-max-failure-ratio" + flagUpdateMonitor = "update-monitor" + flagUpdateParallelism = "update-parallelism" + flagUser = "user" + flagWorkdir = "workdir" + flagRegistryAuth = "with-registry-auth" + flagLogDriver = "log-driver" + flagLogOpt = "log-opt" + flagHealthCmd = "health-cmd" + flagHealthInterval = "health-interval" + flagHealthRetries = "health-retries" + flagHealthTimeout = "health-timeout" + flagNoHealthcheck = "no-healthcheck" + flagSecret = "secret" + flagSecretAdd = "secret-add" + flagSecretRemove = "secret-rm" +) diff --git a/vendor/github.com/docker/docker/cli/command/service/opts_test.go b/vendor/github.com/docker/docker/cli/command/service/opts_test.go new file mode 100644 index 0000000..78b956a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/opts_test.go @@ -0,0 +1,107 @@ +package service + +import ( + "reflect" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestMemBytesString(t *testing.T) { + var mem memBytes = 1048576 + assert.Equal(t, mem.String(), "1 MiB") +} + +func TestMemBytesSetAndValue(t *testing.T) { + var mem memBytes + assert.NilError(t, mem.Set("5kb")) + assert.Equal(t, mem.Value(), int64(5120)) +} + +func TestNanoCPUsString(t *testing.T) { + var cpus opts.NanoCPUs = 6100000000 + assert.Equal(t, cpus.String(), "6.100") +} + +func TestNanoCPUsSetAndValue(t *testing.T) { + var cpus opts.NanoCPUs + assert.NilError(t, cpus.Set("0.35")) + assert.Equal(t, cpus.Value(), int64(350000000)) +} + +func TestDurationOptString(t *testing.T) { + dur := time.Duration(300 * 10e8) + duration := DurationOpt{value: &dur} + assert.Equal(t, duration.String(), "5m0s") +} + +func TestDurationOptSetAndValue(t *testing.T) { + var duration DurationOpt + assert.NilError(t, duration.Set("300s")) + assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) + assert.NilError(t, duration.Set("-300s")) + assert.Equal(t, *duration.Value(), time.Duration(-300*10e8)) +} + +func TestPositiveDurationOptSetAndValue(t *testing.T) { + var duration PositiveDurationOpt + assert.NilError(t, duration.Set("300s")) + assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) + assert.Error(t, duration.Set("-300s"), "cannot be negative") +} + +func TestUint64OptString(t *testing.T) { + value := uint64(2345678) + opt := Uint64Opt{value: &value} + assert.Equal(t, opt.String(), "2345678") + + opt = Uint64Opt{} + assert.Equal(t, opt.String(), "") +} + +func TestUint64OptSetAndValue(t *testing.T) { + var opt Uint64Opt + assert.NilError(t, opt.Set("14445")) + assert.Equal(t, *opt.Value(), uint64(14445)) +} + +func TestHealthCheckOptionsToHealthConfig(t *testing.T) { + dur := time.Second + opt := healthCheckOptions{ + cmd: "curl", + interval: PositiveDurationOpt{DurationOpt{value: &dur}}, + timeout: PositiveDurationOpt{DurationOpt{value: &dur}}, + retries: 10, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ + Test: []string{"CMD-SHELL", "curl"}, + Interval: time.Second, + Timeout: time.Second, + Retries: 10, + }), true) +} + +func TestHealthCheckOptionsToHealthConfigNoHealthcheck(t *testing.T) { + opt := healthCheckOptions{ + noHealthcheck: true, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ + Test: []string{"NONE"}, + }), true) +} + +func TestHealthCheckOptionsToHealthConfigConflict(t *testing.T) { + opt := healthCheckOptions{ + cmd: "curl", + noHealthcheck: true, + } + _, err := opt.toHealthConfig() + assert.Error(t, err, "--no-healthcheck conflicts with --health-* options") +} diff --git a/vendor/github.com/docker/docker/cli/command/service/parse.go b/vendor/github.com/docker/docker/cli/command/service/parse.go new file mode 100644 index 0000000..ce9b454 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/parse.go @@ -0,0 +1,68 @@ +package service + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +// ParseSecrets retrieves the secrets from the requested names and converts +// them to secret references to use with the spec +func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*types.SecretRequestOption) ([]*swarmtypes.SecretReference, error) { + secretRefs := make(map[string]*swarmtypes.SecretReference) + ctx := context.Background() + + for _, secret := range requestedSecrets { + if _, exists := secretRefs[secret.Target]; exists { + return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source) + } + secretRef := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: secret.Target, + UID: secret.UID, + GID: secret.GID, + Mode: secret.Mode, + }, + SecretName: secret.Source, + } + + secretRefs[secret.Target] = secretRef + } + + args := filters.NewArgs() + for _, s := range secretRefs { + args.Add("names", s.SecretName) + } + + secrets, err := client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundSecrets := make(map[string]string) + for _, secret := range secrets { + foundSecrets[secret.Spec.Annotations.Name] = secret.ID + } + + addedSecrets := []*swarmtypes.SecretReference{} + + for _, ref := range secretRefs { + id, ok := foundSecrets[ref.SecretName] + if !ok { + return nil, fmt.Errorf("secret not found: %s", ref.SecretName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.SecretID = id + addedSecrets = append(addedSecrets, ref) + } + + return addedSecrets, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/ps.go b/vendor/github.com/docker/docker/cli/command/service/ps.go new file mode 100644 index 0000000..cf94ad7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/ps.go @@ -0,0 +1,76 @@ +package service + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/node" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type psOptions struct { + serviceID string + quiet bool + noResolve bool + noTrunc bool + filter opts.FilterOpt +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] SERVICE", + Short: "List the tasks of a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.serviceID = args[0] + return runPS(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display task IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli *command.DockerCli, opts psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + service, _, err := client.ServiceInspectWithRaw(ctx, opts.serviceID) + if err != nil { + return err + } + + filter := opts.filter.Value() + filter.Add("service", service.ID) + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + nodeReference, err := node.Reference(ctx, client, nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", nodeReference) + } + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if opts.quiet { + return task.PrintQuiet(dockerCli, tasks) + } + return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/service/remove.go b/vendor/github.com/docker/docker/cli/command/service/remove.go new file mode 100644 index 0000000..c3fbbab --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/remove.go @@ -0,0 +1,47 @@ +package service + +import ( + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + + cmd := &cobra.Command{ + Use: "rm SERVICE [SERVICE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } + cmd.Flags() + + return cmd +} + +func runRemove(dockerCli *command.DockerCli, sids []string) error { + client := dockerCli.Client() + + ctx := context.Background() + + var errs []string + for _, sid := range sids { + err := client.ServiceRemove(ctx, sid) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", sid) + } + if len(errs) > 0 { + return fmt.Errorf(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/scale.go b/vendor/github.com/docker/docker/cli/command/service/scale.go new file mode 100644 index 0000000..cf89e90 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/scale.go @@ -0,0 +1,96 @@ +package service + +import ( + "fmt" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newScaleCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", + Short: "Scale one or multiple replicated services", + Args: scaleArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runScale(dockerCli, args) + }, + } +} + +func scaleArgs(cmd *cobra.Command, args []string) error { + if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { + return err + } + for _, arg := range args { + if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { + return fmt.Errorf( + "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", + arg, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } + } + return nil +} + +func runScale(dockerCli *command.DockerCli, args []string) error { + var errors []string + for _, arg := range args { + parts := strings.SplitN(arg, "=", 2) + serviceID, scaleStr := parts[0], parts[1] + + // validate input arg scale number + scale, err := strconv.ParseUint(scaleStr, 10, 64) + if err != nil { + errors = append(errors, fmt.Sprintf("%s: invalid replicas value %s: %v", serviceID, scaleStr, err)) + continue + } + + if err := runServiceScale(dockerCli, serviceID, scale); err != nil { + errors = append(errors, fmt.Sprintf("%s: %v", serviceID, err)) + } + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf(strings.Join(errors, "\n")) +} + +func runServiceScale(dockerCli *command.DockerCli, serviceID string, scale uint64) error { + client := dockerCli.Client() + ctx := context.Background() + + service, _, err := client.ServiceInspectWithRaw(ctx, serviceID) + if err != nil { + return err + } + + serviceMode := &service.Spec.Mode + if serviceMode.Replicated == nil { + return fmt.Errorf("scale can only be used with replicated mode") + } + + serviceMode.Replicated.Replicas = &scale + + response, err := client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s scaled to %d\n", serviceID, scale) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/trust.go b/vendor/github.com/docker/docker/cli/command/service/trust.go new file mode 100644 index 0000000..052d49c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/trust.go @@ -0,0 +1,96 @@ +package service + +import ( + "encoding/hex" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/notary/tuf/data" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func resolveServiceImageDigest(dockerCli *command.DockerCli, service *swarm.ServiceSpec) error { + if !command.IsTrusted() { + // Digests are resolved by the daemon when not using content + // trust. + return nil + } + + image := service.TaskTemplate.ContainerSpec.Image + + // We only attempt to resolve the digest if the reference + // could be parsed as a digest reference. Specifying an image ID + // is valid but not resolvable. There is no warning message for + // an image ID because it's valid to use one. + if _, err := digest.ParseDigest(image); err == nil { + return nil + } + + ref, err := reference.ParseNamed(image) + if err != nil { + return fmt.Errorf("Could not parse image reference %s", service.TaskTemplate.ContainerSpec.Image) + } + if _, ok := ref.(reference.Canonical); !ok { + ref = reference.WithDefaultTag(ref) + + taggedRef, ok := ref.(reference.NamedTagged) + if !ok { + // This should never happen because a reference either + // has a digest, or WithDefaultTag would give it a tag. + return errors.New("Failed to resolve image digest using content trust: reference is missing a tag") + } + + resolvedImage, err := trustedResolveDigest(context.Background(), dockerCli, taggedRef) + if err != nil { + return fmt.Errorf("Failed to resolve image digest using content trust: %v", err) + } + logrus.Debugf("resolved image tag to %s using content trust", resolvedImage.String()) + service.TaskTemplate.ContainerSpec.Image = resolvedImage.String() + } + return nil +} + +func trustedResolveDigest(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (distreference.Canonical, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, err + } + + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.FullName(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + h, ok := t.Hashes["sha256"] + if !ok { + return nil, errors.New("no valid hash, expecting sha256") + } + + dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h)) + + // Using distribution reference package to make sure that adding a + // digest does not erase the tag. When the two reference packages + // are unified, this will no longer be an issue. + return distreference.WithDigest(ref, dgst) +} diff --git a/vendor/github.com/docker/docker/cli/command/service/update.go b/vendor/github.com/docker/docker/cli/command/service/update.go new file mode 100644 index 0000000..d56de10 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/update.go @@ -0,0 +1,849 @@ +package service + +import ( + "fmt" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" + shlex "github.com/flynn-archive/go-shlex" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + serviceOpts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] SERVICE", + Short: "Update a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.String("image", "", "Service image tag") + flags.String("args", "", "Service command args") + flags.Bool("rollback", false, "Rollback to previous specification") + flags.Bool("force", false, "Force update even if no changes require it") + addServiceFlags(cmd, serviceOpts) + + flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") + flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") + flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") + flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") + flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") + // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") + flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") + flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") + flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") + flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") + flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") + flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)") + flags.Var(&serviceOpts.labels, flagLabelAdd, "Add or update a service label") + flags.Var(&serviceOpts.containerLabels, flagContainerLabelAdd, "Add or update a container label") + flags.Var(&serviceOpts.env, flagEnvAdd, "Add or update an environment variable") + flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") + flags.Var(&serviceOpts.secrets, flagSecretAdd, "Add or update a secret on a service") + flags.Var(&serviceOpts.mounts, flagMountAdd, "Add or update a mount on a service") + flags.Var(&serviceOpts.constraints, flagConstraintAdd, "Add or update a placement constraint") + flags.Var(&serviceOpts.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") + flags.Var(&serviceOpts.groups, flagGroupAdd, "Add an additional supplementary user group to the container") + flags.Var(&serviceOpts.dns, flagDNSAdd, "Add or update a custom DNS server") + flags.Var(&serviceOpts.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") + flags.Var(&serviceOpts.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") + flags.Var(&serviceOpts.hosts, flagHostAdd, "Add or update a custom host-to-IP mapping (host:ip)") + + return cmd +} + +func newListOptsVar() *opts.ListOpts { + return opts.NewListOptsRef(&[]string{}, nil) +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, serviceID string) error { + apiClient := dockerCli.Client() + ctx := context.Background() + updateOpts := types.ServiceUpdateOptions{} + + service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID) + if err != nil { + return err + } + + rollback, err := flags.GetBool("rollback") + if err != nil { + return err + } + + spec := &service.Spec + if rollback { + spec = service.PreviousSpec + if spec == nil { + return fmt.Errorf("service does not have a previous specification to roll back to") + } + } + + err = updateService(flags, spec) + if err != nil { + return err + } + + if flags.Changed("image") { + if err := resolveServiceImageDigest(dockerCli, spec); err != nil { + return err + } + } + + updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) + if err != nil { + return err + } + + spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets + + // only send auth if flag was set + sendAuth, err := flags.GetBool(flagRegistryAuth) + if err != nil { + return err + } + if sendAuth { + // Retrieve encoded auth token from the image reference + // This would be the old image if it didn't change in this update + image := spec.TaskTemplate.ContainerSpec.Image + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + updateOpts.EncodedRegistryAuth = encodedAuth + } else if rollback { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec + } else { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec + } + + response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) + return nil +} + +func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + updateString := func(flag string, field *string) { + if flags.Changed(flag) { + *field, _ = flags.GetString(flag) + } + } + + updateInt64Value := func(flag string, field *int64) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(int64Value).Value() + } + } + + updateFloatValue := func(flag string, field *float32) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(*floatValue).Value() + } + } + + updateDuration := func(flag string, field *time.Duration) { + if flags.Changed(flag) { + *field, _ = flags.GetDuration(flag) + } + } + + updateDurationOpt := func(flag string, field **time.Duration) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*DurationOpt).Value() + *field = &val + } + } + + updateUint64 := func(flag string, field *uint64) { + if flags.Changed(flag) { + *field, _ = flags.GetUint64(flag) + } + } + + updateUint64Opt := func(flag string, field **uint64) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() + *field = &val + } + } + + cspec := &spec.TaskTemplate.ContainerSpec + task := &spec.TaskTemplate + + taskResources := func() *swarm.ResourceRequirements { + if task.Resources == nil { + task.Resources = &swarm.ResourceRequirements{} + } + return task.Resources + } + + updateLabels(flags, &spec.Labels) + updateContainerLabels(flags, &cspec.Labels) + updateString("image", &cspec.Image) + updateStringToSlice(flags, "args", &cspec.Args) + updateEnvironment(flags, &cspec.Env) + updateString(flagWorkdir, &cspec.Dir) + updateString(flagUser, &cspec.User) + updateString(flagHostname, &cspec.Hostname) + if err := updateMounts(flags, &cspec.Mounts); err != nil { + return err + } + + if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { + taskResources().Limits = &swarm.Resources{} + updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) + updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) + } + if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { + taskResources().Reservations = &swarm.Resources{} + updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) + updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) + } + + updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) + + if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { + if task.RestartPolicy == nil { + task.RestartPolicy = &swarm.RestartPolicy{} + } + + if flags.Changed(flagRestartCondition) { + value, _ := flags.GetString(flagRestartCondition) + task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) + } + updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) + updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) + updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) + } + + if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { + if task.Placement == nil { + task.Placement = &swarm.Placement{} + } + updatePlacement(flags, task.Placement) + } + + if err := updateReplicas(flags, &spec.Mode); err != nil { + return err + } + + if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) { + if spec.UpdateConfig == nil { + spec.UpdateConfig = &swarm.UpdateConfig{} + } + updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) + updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) + updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) + updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) + updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) + } + + if flags.Changed(flagEndpointMode) { + value, _ := flags.GetString(flagEndpointMode) + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + spec.EndpointSpec.Mode = swarm.ResolutionMode(value) + } + + if anyChanged(flags, flagGroupAdd, flagGroupRemove) { + if err := updateGroups(flags, &cspec.Groups); err != nil { + return err + } + } + + if anyChanged(flags, flagPublishAdd, flagPublishRemove) { + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { + return err + } + } + + if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { + if cspec.DNSConfig == nil { + cspec.DNSConfig = &swarm.DNSConfig{} + } + if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { + return err + } + } + + if anyChanged(flags, flagHostAdd, flagHostRemove) { + if err := updateHosts(flags, &cspec.Hosts); err != nil { + return err + } + } + + if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { + return err + } + + force, err := flags.GetBool("force") + if err != nil { + return err + } + + if force { + spec.TaskTemplate.ForceUpdate++ + } + + if err := updateHealthcheck(flags, cspec); err != nil { + return err + } + + if flags.Changed(flagTTY) { + tty, err := flags.GetBool(flagTTY) + if err != nil { + return err + } + cspec.TTY = tty + } + + return nil +} + +func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) error { + if !flags.Changed(flag) { + return nil + } + + value, _ := flags.GetString(flag) + valueSlice, err := shlex.Split(value) + *field = valueSlice + return err +} + +func anyChanged(flags *pflag.FlagSet, fields ...string) bool { + for _, flag := range fields { + if flags.Changed(flag) { + return true + } + } + return false +} + +func updatePlacement(flags *pflag.FlagSet, placement *swarm.Placement) { + if flags.Changed(flagConstraintAdd) { + values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() + placement.Constraints = append(placement.Constraints, values...) + } + toRemove := buildToRemoveSet(flags, flagConstraintRemove) + + newConstraints := []string{} + for _, constraint := range placement.Constraints { + if _, exists := toRemove[constraint]; !exists { + newConstraints = append(newConstraints, constraint) + } + } + // Sort so that result is predictable. + sort.Strings(newConstraints) + + placement.Constraints = newConstraints +} + +func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagContainerLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range runconfigopts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagContainerLabelRemove) { + toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range runconfigopts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagLabelRemove) { + toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateEnvironment(flags *pflag.FlagSet, field *[]string) { + envSet := map[string]string{} + for _, v := range *field { + envSet[envKey(v)] = v + } + if flags.Changed(flagEnvAdd) { + value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) + for _, v := range value.GetAll() { + envSet[envKey(v)] = v + } + } + + *field = []string{} + for _, v := range envSet { + *field = append(*field, v) + } + + toRemove := buildToRemoveSet(flags, flagEnvRemove) + *field = removeItems(*field, toRemove, envKey) +} + +func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { + if flags.Changed(flagSecretAdd) { + values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() + + addSecrets, err := ParseSecrets(apiClient, values) + if err != nil { + return nil, err + } + secrets = append(secrets, addSecrets...) + } + toRemove := buildToRemoveSet(flags, flagSecretRemove) + newSecrets := []*swarm.SecretReference{} + for _, secret := range secrets { + if _, exists := toRemove[secret.SecretName]; !exists { + newSecrets = append(newSecrets, secret) + } + } + + return newSecrets, nil +} + +func envKey(value string) string { + kv := strings.SplitN(value, "=", 2) + return kv[0] +} + +func itemKey(value string) string { + return value +} + +func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { + var empty struct{} + toRemove := make(map[string]struct{}) + + if !flags.Changed(flag) { + return toRemove + } + + toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() + for _, key := range toRemoveSlice { + toRemove[key] = empty + } + return toRemove +} + +func removeItems( + seq []string, + toRemove map[string]struct{}, + keyFunc func(string) string, +) []string { + newSeq := []string{} + for _, item := range seq { + if _, exists := toRemove[keyFunc(item)]; !exists { + newSeq = append(newSeq, item) + } + } + return newSeq +} + +type byMountSource []mounttypes.Mount + +func (m byMountSource) Len() int { return len(m) } +func (m byMountSource) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMountSource) Less(i, j int) bool { + a, b := m[i], m[j] + + if a.Source == b.Source { + return a.Target < b.Target + } + + return a.Source < b.Source +} + +func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { + + mountsByTarget := map[string]mounttypes.Mount{} + + if flags.Changed(flagMountAdd) { + values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() + for _, mount := range values { + if _, ok := mountsByTarget[mount.Target]; ok { + return fmt.Errorf("duplicate mount target") + } + mountsByTarget[mount.Target] = mount + } + } + + // Add old list of mount points minus updated one. + for _, mount := range *mounts { + if _, ok := mountsByTarget[mount.Target]; !ok { + mountsByTarget[mount.Target] = mount + } + } + + newMounts := []mounttypes.Mount{} + + toRemove := buildToRemoveSet(flags, flagMountRemove) + + for _, mount := range mountsByTarget { + if _, exists := toRemove[mount.Target]; !exists { + newMounts = append(newMounts, mount) + } + } + sort.Sort(byMountSource(newMounts)) + *mounts = newMounts + return nil +} + +func updateGroups(flags *pflag.FlagSet, groups *[]string) error { + if flags.Changed(flagGroupAdd) { + values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() + *groups = append(*groups, values...) + } + toRemove := buildToRemoveSet(flags, flagGroupRemove) + + newGroups := []string{} + for _, group := range *groups { + if _, exists := toRemove[group]; !exists { + newGroups = append(newGroups, group) + } + } + // Sort so that result is predictable. + sort.Strings(newGroups) + + *groups = newGroups + return nil +} + +func removeDuplicates(entries []string) []string { + hit := map[string]bool{} + newEntries := []string{} + for _, v := range entries { + if !hit[v] { + newEntries = append(newEntries, v) + hit[v] = true + } + } + return newEntries +} + +func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { + newConfig := &swarm.DNSConfig{} + + nameservers := (*config).Nameservers + if flags.Changed(flagDNSAdd) { + values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() + nameservers = append(nameservers, values...) + } + nameservers = removeDuplicates(nameservers) + toRemove := buildToRemoveSet(flags, flagDNSRemove) + for _, nameserver := range nameservers { + if _, exists := toRemove[nameserver]; !exists { + newConfig.Nameservers = append(newConfig.Nameservers, nameserver) + + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Nameservers) + + search := (*config).Search + if flags.Changed(flagDNSSearchAdd) { + values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() + search = append(search, values...) + } + search = removeDuplicates(search) + toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) + for _, entry := range search { + if _, exists := toRemove[entry]; !exists { + newConfig.Search = append(newConfig.Search, entry) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Search) + + options := (*config).Options + if flags.Changed(flagDNSOptionAdd) { + values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() + options = append(options, values...) + } + options = removeDuplicates(options) + toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) + for _, option := range options { + if _, exists := toRemove[option]; !exists { + newConfig.Options = append(newConfig.Options, option) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Options) + + *config = newConfig + return nil +} + +type byPortConfig []swarm.PortConfig + +func (r byPortConfig) Len() int { return len(r) } +func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortConfig) Less(i, j int) bool { + // We convert PortConfig into `port/protocol`, e.g., `80/tcp` + // In updatePorts we already filter out with map so there is duplicate entries + return portConfigToString(&r[i]) < portConfigToString(&r[j]) +} + +func portConfigToString(portConfig *swarm.PortConfig) string { + protocol := portConfig.Protocol + mode := portConfig.PublishMode + return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) +} + +// FIXME(vdemeester) port to opts.PortOpt +// This validation is only used for `--publish-rm`. +// The `--publish-rm` takes: +// [/] (e.g., 80, 80/tcp, 53/udp) +func validatePublishRemove(val string) (string, error) { + proto, port := nat.SplitProtoPort(val) + if proto != "tcp" && proto != "udp" { + return "", fmt.Errorf("invalid protocol '%s' for %s", proto, val) + } + if strings.Contains(port, ":") { + return "", fmt.Errorf("invalid port format: '%s', should be [/] (e.g., 80, 80/tcp, 53/udp)", port) + } + if _, err := nat.ParsePort(port); err != nil { + return "", err + } + return val, nil +} + +func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { + // The key of the map is `port/protocol`, e.g., `80/tcp` + portSet := map[string]swarm.PortConfig{} + + // Build the current list of portConfig + for _, entry := range *portConfig { + if _, ok := portSet[portConfigToString(&entry)]; !ok { + portSet[portConfigToString(&entry)] = entry + } + } + + newPorts := []swarm.PortConfig{} + + // Clean current ports + toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() +portLoop: + for _, port := range portSet { + for _, pConfig := range toRemove { + if equalProtocol(port.Protocol, pConfig.Protocol) && + port.TargetPort == pConfig.TargetPort && + equalPublishMode(port.PublishMode, pConfig.PublishMode) { + continue portLoop + } + } + + newPorts = append(newPorts, port) + } + + // Check to see if there are any conflict in flags. + if flags.Changed(flagPublishAdd) { + ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() + + for _, port := range ports { + if v, ok := portSet[portConfigToString(&port)]; ok { + if v != port { + fmt.Println("v", v) + return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", port.PublishedPort, port.TargetPort, port.Protocol, v.PublishedPort, v.TargetPort, v.Protocol) + } + continue + } + //portSet[portConfigToString(&port)] = port + newPorts = append(newPorts, port) + } + } + + // Sort the PortConfig to avoid unnecessary updates + sort.Sort(byPortConfig(newPorts)) + *portConfig = newPorts + return nil +} + +func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { + return prot1 == prot2 || + (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || + (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) +} + +func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { + return mode1 == mode2 || + (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || + (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) +} + +func equalPort(targetPort nat.Port, port swarm.PortConfig) bool { + return (string(port.Protocol) == targetPort.Proto() && + port.TargetPort == uint32(targetPort.Int())) +} + +func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { + if !flags.Changed(flagReplicas) { + return nil + } + + if serviceMode == nil || serviceMode.Replicated == nil { + return fmt.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() + return nil +} + +func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { + // Combine existing Hosts (in swarmkit format) with the host to add (convert to swarmkit format) + if flags.Changed(flagHostAdd) { + values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) + *hosts = append(*hosts, values...) + } + // Remove duplicate + *hosts = removeDuplicates(*hosts) + + keysToRemove := make(map[string]struct{}) + if flags.Changed(flagHostRemove) { + var empty struct{} + extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() + for _, entry := range extraHostsToRemove { + key := strings.SplitN(entry, ":", 2)[0] + keysToRemove[key] = empty + } + } + + newHosts := []string{} + for _, entry := range *hosts { + // Since this is in swarmkit format, we need to find the key, which is canonical_hostname of: + // IP_address canonical_hostname [aliases...] + parts := strings.Fields(entry) + if len(parts) > 1 { + key := parts[1] + if _, exists := keysToRemove[key]; !exists { + newHosts = append(newHosts, entry) + } + } else { + newHosts = append(newHosts, entry) + } + } + + // Sort so that result is predictable. + sort.Strings(newHosts) + + *hosts = newHosts + return nil +} + +// updateLogDriver updates the log driver only if the log driver flag is set. +// All options will be replaced with those provided on the command line. +func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { + if !flags.Changed(flagLogDriver) { + return nil + } + + name, err := flags.GetString(flagLogDriver) + if err != nil { + return err + } + + if name == "" { + return nil + } + + taskTemplate.LogDriver = &swarm.Driver{ + Name: name, + Options: runconfigopts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), + } + + return nil +} + +func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { + if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { + return nil + } + if containerSpec.Healthcheck == nil { + containerSpec.Healthcheck = &container.HealthConfig{} + } + noHealthcheck, err := flags.GetBool(flagNoHealthcheck) + if err != nil { + return err + } + if noHealthcheck { + if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { + containerSpec.Healthcheck = &container.HealthConfig{ + Test: []string{"NONE"}, + } + return nil + } + return fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { + containerSpec.Healthcheck.Test = nil + } + if flags.Changed(flagHealthInterval) { + val := *flags.Lookup(flagHealthInterval).Value.(*PositiveDurationOpt).Value() + containerSpec.Healthcheck.Interval = val + } + if flags.Changed(flagHealthTimeout) { + val := *flags.Lookup(flagHealthTimeout).Value.(*PositiveDurationOpt).Value() + containerSpec.Healthcheck.Timeout = val + } + if flags.Changed(flagHealthRetries) { + containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) + } + if flags.Changed(flagHealthCmd) { + cmd, _ := flags.GetString(flagHealthCmd) + if cmd != "" { + containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} + } else { + containerSpec.Healthcheck.Test = nil + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/update_test.go b/vendor/github.com/docker/docker/cli/command/service/update_test.go new file mode 100644 index 0000000..08fe248 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/update_test.go @@ -0,0 +1,384 @@ +package service + +import ( + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestUpdateServiceArgs(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("args", "the \"new args\"") + + spec := &swarm.ServiceSpec{} + cspec := &spec.TaskTemplate.ContainerSpec + cspec.Args = []string{"old", "args"} + + updateService(flags, spec) + assert.EqualStringSlice(t, cspec.Args, []string{"the", "new args"}) +} + +func TestUpdateLabels(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-add", "toadd=newlabel") + flags.Set("label-rm", "toremove") + + labels := map[string]string{ + "toremove": "thelabeltoremove", + "tokeep": "value", + } + + updateLabels(flags, &labels) + assert.Equal(t, len(labels), 2) + assert.Equal(t, labels["tokeep"], "value") + assert.Equal(t, labels["toadd"], "newlabel") +} + +func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-rm", "dne") + + labels := map[string]string{"foo": "theoldlabel"} + updateLabels(flags, &labels) + assert.Equal(t, len(labels), 1) +} + +func TestUpdatePlacement(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("constraint-add", "node=toadd") + flags.Set("constraint-rm", "node!=toremove") + + placement := &swarm.Placement{ + Constraints: []string{"node!=toremove", "container=tokeep"}, + } + + updatePlacement(flags, placement) + assert.Equal(t, len(placement.Constraints), 2) + assert.Equal(t, placement.Constraints[0], "container=tokeep") + assert.Equal(t, placement.Constraints[1], "node=toadd") +} + +func TestUpdateEnvironment(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "toadd=newenv") + flags.Set("env-rm", "toremove") + + envs := []string{"toremove=theenvtoremove", "tokeep=value"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 2) + // Order has been removed in updateEnvironment (map) + sort.Strings(envs) + assert.Equal(t, envs[0], "toadd=newenv") + assert.Equal(t, envs[1], "tokeep=value") +} + +func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "foo=newenv") + flags.Set("env-add", "foo=dupe") + flags.Set("env-rm", "foo") + + envs := []string{"foo=value"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 0) +} + +func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { + // Test case for #25404 + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "A=b") + + envs := []string{"A=c"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 1) + assert.Equal(t, envs[0], "A=b") +} + +func TestUpdateGroups(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("group-add", "wheel") + flags.Set("group-add", "docker") + flags.Set("group-rm", "root") + flags.Set("group-add", "foo") + flags.Set("group-rm", "docker") + + groups := []string{"bar", "root"} + + updateGroups(flags, &groups) + assert.Equal(t, len(groups), 3) + assert.Equal(t, groups[0], "bar") + assert.Equal(t, groups[1], "foo") + assert.Equal(t, groups[2], "wheel") +} + +func TestUpdateDNSConfig(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + + // IPv4, with duplicates + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "2.2.2.2") + flags.Set("dns-rm", "3.3.3.3") + flags.Set("dns-rm", "2.2.2.2") + // IPv6 + flags.Set("dns-add", "2001:db8:abc8::1") + // Invalid dns record + assert.Error(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address") + + // domains with duplicates + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.org") + flags.Set("dns-search-rm", "example.org") + // Invalid dns search domain + assert.Error(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain") + + flags.Set("dns-option-add", "ndots:9") + flags.Set("dns-option-rm", "timeout:3") + + config := &swarm.DNSConfig{ + Nameservers: []string{"3.3.3.3", "5.5.5.5"}, + Search: []string{"localdomain"}, + Options: []string{"timeout:3"}, + } + + updateDNSConfig(flags, &config) + + assert.Equal(t, len(config.Nameservers), 3) + assert.Equal(t, config.Nameservers[0], "1.1.1.1") + assert.Equal(t, config.Nameservers[1], "2001:db8:abc8::1") + assert.Equal(t, config.Nameservers[2], "5.5.5.5") + + assert.Equal(t, len(config.Search), 2) + assert.Equal(t, config.Search[0], "example.com") + assert.Equal(t, config.Search[1], "localdomain") + + assert.Equal(t, len(config.Options), 1) + assert.Equal(t, config.Options[0], "ndots:9") +} + +func TestUpdateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol2,target=/toadd") + flags.Set("mount-rm", "/toremove") + + mounts := []mounttypes.Mount{ + {Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Equal(t, len(mounts), 2) + assert.Equal(t, mounts[0].Target, "/toadd") + assert.Equal(t, mounts[1].Target, "/tokeep") + +} + +func TestUpdateMountsWithDuplicateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol4,target=/toadd") + + mounts := []mounttypes.Mount{ + {Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind}, + {Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Equal(t, len(mounts), 3) + assert.Equal(t, mounts[0].Target, "/tokeep1") + assert.Equal(t, mounts[1].Target, "/tokeep2") + assert.Equal(t, mounts[2].Target, "/toadd") +} + +func TestUpdatePorts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "1000:1000") + flags.Set("publish-rm", "333/udp") + + portConfigs := []swarm.PortConfig{ + {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, + {TargetPort: 555}, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 2) + // Do a sort to have the order (might have changed by map) + targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} + sort.Ints(targetPorts) + assert.Equal(t, targetPorts[0], 555) + assert.Equal(t, targetPorts[1], 1000) +} + +func TestUpdatePortsDuplicate(t *testing.T) { + // Test case for #25375 + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "80:80") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 1) + assert.Equal(t, portConfigs[0].TargetPort, uint32(80)) +} + +func TestUpdateHealthcheckTable(t *testing.T) { + type test struct { + flags [][2]string + initial *container.HealthConfig + expected *container.HealthConfig + err string + } + testCases := []test{ + { + flags: [][2]string{{"no-healthcheck", "true"}}, + initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"NONE"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + }, + { + flags: [][2]string{{"health-interval", "1m"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute}, + }, + { + flags: [][2]string{{"health-cmd", ""}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "0"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + } + for i, c := range testCases { + flags := newUpdateCommand(nil).Flags() + for _, flag := range c.flags { + flags.Set(flag[0], flag[1]) + } + cspec := &swarm.ContainerSpec{ + Healthcheck: c.initial, + } + err := updateHealthcheck(flags, cspec) + if c.err != "" { + assert.Error(t, err, c.err) + } else { + assert.NilError(t, err) + if !reflect.DeepEqual(cspec.Healthcheck, c.expected) { + t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck) + } + } + } +} + +func TestUpdateHosts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "example.net:2.2.2.2") + flags.Set("host-add", "ipv6.net:2001:db8:abc8::1") + // remove with ipv6 should work + flags.Set("host-rm", "example.net:2001:db8:abc8::1") + // just hostname should work as well + flags.Set("host-rm", "example.net") + // bad format error + assert.Error(t, flags.Set("host-add", "$example.com$"), "bad format for add-host:") + + hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net"} + + updateHosts(flags, &hosts) + assert.Equal(t, len(hosts), 3) + assert.Equal(t, hosts[0], "1.2.3.4 example.com") + assert.Equal(t, hosts[1], "2001:db8:abc8::1 ipv6.net") + assert.Equal(t, hosts[2], "4.3.2.1 example.org") +} + +func TestUpdatePortsRmWithProtocol(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "8081:81") + flags.Set("publish-add", "8082:82") + flags.Set("publish-rm", "80") + flags.Set("publish-rm", "81/tcp") + flags.Set("publish-rm", "82/udp") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 2) + assert.Equal(t, portConfigs[0].TargetPort, uint32(81)) + assert.Equal(t, portConfigs[1].TargetPort, uint32(82)) +} + +// FIXME(vdemeester) port to opts.PortOpt +func TestValidatePort(t *testing.T) { + validPorts := []string{"80/tcp", "80", "80/udp"} + invalidPorts := map[string]string{ + "9999999": "out of range", + "80:80/tcp": "invalid port format", + "53:53/udp": "invalid port format", + "80:80": "invalid port format", + "80/xyz": "invalid protocol", + "tcp": "invalid syntax", + "udp": "invalid syntax", + "": "invalid protocol", + } + for _, port := range validPorts { + _, err := validatePublishRemove(port) + assert.Equal(t, err, nil) + } + for port, e := range invalidPorts { + _, err := validatePublishRemove(port) + assert.Error(t, err, e) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/cmd.go b/vendor/github.com/docker/docker/cli/command/stack/cmd.go new file mode 100644 index 0000000..860bfed --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/cmd.go @@ -0,0 +1,35 @@ +package stack + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewStackCommand returns a cobra command for `stack` subcommands +func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "stack", + Short: "Manage Docker stacks", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + Tags: map[string]string{"version": "1.25"}, + } + cmd.AddCommand( + newDeployCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newServicesCommand(dockerCli), + newPsCommand(dockerCli), + ) + return cmd +} + +// NewTopLevelDeployCommand returns a command for `docker deploy` +func NewTopLevelDeployCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := newDeployCommand(dockerCli) + // Remove the aliases at the top level + cmd.Aliases = []string{} + cmd.Tags = map[string]string{"experimental": "", "version": "1.25"} + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/common.go b/vendor/github.com/docker/docker/cli/command/stack/common.go new file mode 100644 index 0000000..72719f9 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/common.go @@ -0,0 +1,60 @@ +package stack + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" +) + +func getStackFilter(namespace string) filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { + filter := opt.Value() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getAllStacksFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace) + return filter +} + +func getServices( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]swarm.Service, error) { + return apiclient.ServiceList( + ctx, + types.ServiceListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackNetworks( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]types.NetworkResource, error) { + return apiclient.NetworkList( + ctx, + types.NetworkListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackSecrets( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]swarm.Secret, error) { + return apiclient.SecretList( + ctx, + types.SecretListOptions{Filters: getStackFilter(namespace)}) +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/deploy.go b/vendor/github.com/docker/docker/cli/command/stack/deploy.go new file mode 100644 index 0000000..980876a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/deploy.go @@ -0,0 +1,357 @@ +package stack + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + secretcli "github.com/docker/docker/cli/command/secret" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/cli/compose/loader" + composetypes "github.com/docker/docker/cli/compose/types" + dockerclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +const ( + defaultNetworkDriver = "overlay" +) + +type deployOptions struct { + bundlefile string + composefile string + namespace string + sendRegistryAuth bool +} + +func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts deployOptions + + cmd := &cobra.Command{ + Use: "deploy [OPTIONS] STACK", + Aliases: []string{"up"}, + Short: "Deploy a new stack or update an existing stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runDeploy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + addBundlefileFlag(&opts.bundlefile, flags) + addComposefileFlag(&opts.composefile, flags) + addRegistryAuthFlag(&opts.sendRegistryAuth, flags) + return cmd +} + +func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error { + ctx := context.Background() + + switch { + case opts.bundlefile == "" && opts.composefile == "": + return fmt.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") + case opts.bundlefile != "" && opts.composefile != "": + return fmt.Errorf("You cannot specify both a bundle file and a Compose file.") + case opts.bundlefile != "": + return deployBundle(ctx, dockerCli, opts) + default: + return deployCompose(ctx, dockerCli, opts) + } +} + +// checkDaemonIsSwarmManager does an Info API call to verify that the daemon is +// a swarm manager. This is necessary because we must create networks before we +// create services, but the API call for creating a network does not return a +// proper status code when it can't create a network in the "global" scope. +func checkDaemonIsSwarmManager(ctx context.Context, dockerCli *command.DockerCli) error { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if !info.Swarm.ControlAvailable { + return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + return nil +} + +func deployCompose(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { + configDetails, err := getConfigDetails(opts) + if err != nil { + return err + } + + config, err := loader.Load(configDetails) + if err != nil { + if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { + return fmt.Errorf("Compose file contains unsupported options:\n\n%s\n", + propertyWarnings(fpe.Properties)) + } + + return err + } + + unsupportedProperties := loader.GetUnsupportedProperties(configDetails) + if len(unsupportedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", + strings.Join(unsupportedProperties, ", ")) + } + + deprecatedProperties := loader.GetDeprecatedProperties(configDetails) + if len(deprecatedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", + propertyWarnings(deprecatedProperties)) + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.namespace) + + serviceNetworks := getServicesDeclaredNetworks(config.Services) + networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) + if err := validateExternalNetworks(ctx, dockerCli, externalNetworks); err != nil { + return err + } + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + + secrets, err := convert.Secrets(namespace, config.Secrets) + if err != nil { + return err + } + if err := createSecrets(ctx, dockerCli, namespace, secrets); err != nil { + return err + } + + services, err := convert.Services(namespace, config, dockerCli.Client()) + if err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) +} +func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { + serviceNetworks := map[string]struct{}{} + for _, serviceConfig := range serviceConfigs { + if len(serviceConfig.Networks) == 0 { + serviceNetworks["default"] = struct{}{} + continue + } + for network := range serviceConfig.Networks { + serviceNetworks[network] = struct{}{} + } + } + return serviceNetworks +} + +func propertyWarnings(properties map[string]string) string { + var msgs []string + for name, description := range properties { + msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) + } + sort.Strings(msgs) + return strings.Join(msgs, "\n\n") +} + +func getConfigDetails(opts deployOptions) (composetypes.ConfigDetails, error) { + var details composetypes.ConfigDetails + var err error + + details.WorkingDir, err = os.Getwd() + if err != nil { + return details, err + } + + configFile, err := getConfigFile(opts.composefile) + if err != nil { + return details, err + } + // TODO: support multiple files + details.ConfigFiles = []composetypes.ConfigFile{*configFile} + return details, nil +} + +func getConfigFile(filename string) (*composetypes.ConfigFile, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := loader.ParseYAML(bytes) + if err != nil { + return nil, err + } + return &composetypes.ConfigFile{ + Filename: filename, + Config: config, + }, nil +} + +func validateExternalNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + externalNetworks []string) error { + client := dockerCli.Client() + + for _, networkName := range externalNetworks { + network, err := client.NetworkInspect(ctx, networkName) + if err != nil { + if dockerclient.IsErrNetworkNotFound(err) { + return fmt.Errorf("network %q is declared as external, but could not be found. You need to create the network before the stack is deployed (with overlay driver)", networkName) + } + return err + } + if network.Scope != "swarm" { + return fmt.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of %q", networkName, network.Scope, "swarm") + } + } + + return nil +} + +func createSecrets( + ctx context.Context, + dockerCli *command.DockerCli, + namespace convert.Namespace, + secrets []swarm.SecretSpec, +) error { + client := dockerCli.Client() + + for _, secretSpec := range secrets { + // TODO: fix this after https://github.com/docker/docker/pull/29218 + secrets, err := secretcli.GetSecretsByNameOrIDPrefixes(ctx, client, []string{secretSpec.Name}) + switch { + case err != nil: + return err + case len(secrets) > 1: + return errors.Errorf("ambiguous secret name: %s", secretSpec.Name) + case len(secrets) == 0: + fmt.Fprintf(dockerCli.Out(), "Creating secret %s\n", secretSpec.Name) + _, err = client.SecretCreate(ctx, secretSpec) + default: + secret := secrets[0] + // Update secret to ensure that the local data hasn't changed + err = client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec) + } + if err != nil { + return err + } + } + return nil +} + +func createNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + namespace convert.Namespace, + networks map[string]types.NetworkCreate, +) error { + client := dockerCli.Client() + + existingNetworks, err := getStackNetworks(ctx, client, namespace.Name()) + if err != nil { + return err + } + + existingNetworkMap := make(map[string]types.NetworkResource) + for _, network := range existingNetworks { + existingNetworkMap[network.Name] = network + } + + for internalName, createOpts := range networks { + name := namespace.Scope(internalName) + if _, exists := existingNetworkMap[name]; exists { + continue + } + + if createOpts.Driver == "" { + createOpts.Driver = defaultNetworkDriver + } + + fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) + if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { + return err + } + } + + return nil +} + +func deployServices( + ctx context.Context, + dockerCli *command.DockerCli, + services map[string]swarm.ServiceSpec, + namespace convert.Namespace, + sendAuth bool, +) error { + apiClient := dockerCli.Client() + out := dockerCli.Out() + + existingServices, err := getServices(ctx, apiClient, namespace.Name()) + if err != nil { + return err + } + + existingServiceMap := make(map[string]swarm.Service) + for _, service := range existingServices { + existingServiceMap[service.Spec.Name] = service + } + + for internalName, serviceSpec := range services { + name := namespace.Scope(internalName) + + encodedAuth := "" + if sendAuth { + // Retrieve encoded auth token from the image reference + image := serviceSpec.TaskTemplate.ContainerSpec.Image + encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + } + + if service, exists := existingServiceMap[name]; exists { + fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) + + updateOpts := types.ServiceUpdateOptions{} + if sendAuth { + updateOpts.EncodedRegistryAuth = encodedAuth + } + response, err := apiClient.ServiceUpdate( + ctx, + service.ID, + service.Version, + serviceSpec, + updateOpts, + ) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + } else { + fmt.Fprintf(out, "Creating service %s\n", name) + + createOpts := types.ServiceCreateOptions{} + if sendAuth { + createOpts.EncodedRegistryAuth = encodedAuth + } + if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go b/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go new file mode 100644 index 0000000..5a178c4 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go @@ -0,0 +1,83 @@ +package stack + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/compose/convert" +) + +func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { + bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) + if err != nil { + return err + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.namespace) + + networks := make(map[string]types.NetworkCreate) + for _, service := range bundle.Services { + for _, networkName := range service.Networks { + networks[networkName] = types.NetworkCreate{ + Labels: convert.AddStackLabel(namespace, nil), + } + } + } + + services := make(map[string]swarm.ServiceSpec) + for internalName, service := range bundle.Services { + name := namespace.Scope(internalName) + + var ports []swarm.PortConfig + for _, portSpec := range service.Ports { + ports = append(ports, swarm.PortConfig{ + Protocol: swarm.PortConfigProtocol(portSpec.Protocol), + TargetPort: portSpec.Port, + }) + } + + nets := []swarm.NetworkAttachmentConfig{} + for _, networkName := range service.Networks { + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: namespace.Scope(networkName), + Aliases: []string{networkName}, + }) + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: convert.AddStackLabel(namespace, service.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: service.Image, + Command: service.Command, + Args: service.Args, + Env: service.Env, + // Service Labels will not be copied to Containers + // automatically during the deployment so we apply + // it here. + Labels: convert.AddStackLabel(namespace, nil), + }, + }, + EndpointSpec: &swarm.EndpointSpec{ + Ports: ports, + }, + Networks: nets, + } + + services[internalName] = serviceSpec + } + + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/list.go b/vendor/github.com/docker/docker/cli/command/stack/list.go new file mode 100644 index 0000000..9b6c645 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/list.go @@ -0,0 +1,113 @@ +package stack + +import ( + "fmt" + "io" + "strconv" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +const ( + listItemFmt = "%s\t%s\n" +) + +type listOptions struct { +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{} + + cmd := &cobra.Command{ + Use: "ls", + Aliases: []string{"list"}, + Short: "List stacks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + stacks, err := getStacks(ctx, client) + if err != nil { + return err + } + + out := dockerCli.Out() + printTable(out, stacks) + return nil +} + +func printTable(out io.Writer, stacks []*stack) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "NAME", "SERVICES") + for _, stack := range stacks { + fmt.Fprintf( + writer, + listItemFmt, + stack.Name, + strconv.Itoa(stack.Services), + ) + } +} + +type stack struct { + // Name is the name of the stack + Name string + // Services is the number of the services + Services int +} + +func getStacks( + ctx context.Context, + apiclient client.APIClient, +) ([]*stack, error) { + services, err := apiclient.ServiceList( + ctx, + types.ServiceListOptions{Filters: getAllStacksFilter()}) + if err != nil { + return nil, err + } + m := make(map[string]*stack, 0) + for _, service := range services { + labels := service.Spec.Labels + name, ok := labels[convert.LabelNamespace] + if !ok { + return nil, fmt.Errorf("cannot get label %s for service %s", + convert.LabelNamespace, service.ID) + } + ztack, ok := m[name] + if !ok { + m[name] = &stack{ + Name: name, + Services: 1, + } + } else { + ztack.Services++ + } + } + var stacks []*stack + for _, stack := range m { + stacks = append(stacks, stack) + } + return stacks, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/opts.go b/vendor/github.com/docker/docker/cli/command/stack/opts.go new file mode 100644 index 0000000..74fe4f5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/opts.go @@ -0,0 +1,49 @@ +package stack + +import ( + "fmt" + "io" + "os" + + "github.com/docker/docker/cli/command/bundlefile" + "github.com/spf13/pflag" +) + +func addComposefileFlag(opt *string, flags *pflag.FlagSet) { + flags.StringVarP(opt, "compose-file", "c", "", "Path to a Compose file") +} + +func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { + flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file") + flags.SetAnnotation("bundle-file", "experimental", nil) +} + +func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) { + flags.BoolVar(opt, "with-registry-auth", false, "Send registry authentication details to Swarm agents") +} + +func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { + defaultPath := fmt.Sprintf("%s.dab", namespace) + + if path == "" { + path = defaultPath + } + if _, err := os.Stat(path); err != nil { + return nil, fmt.Errorf( + "Bundle %s not found. Specify the path with --file", + path) + } + + fmt.Fprintf(stderr, "Loading bundle from %s\n", path) + reader, err := os.Open(path) + if err != nil { + return nil, err + } + defer reader.Close() + + bundle, err := bundlefile.LoadFile(reader) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %v\n", path, err) + } + return bundle, err +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/ps.go b/vendor/github.com/docker/docker/cli/command/stack/ps.go new file mode 100644 index 0000000..e4351bf --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/ps.go @@ -0,0 +1,61 @@ +package stack + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type psOptions struct { + filter opts.FilterOpt + noTrunc bool + namespace string + noResolve bool +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] STACK", + Short: "List the tasks in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runPS(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli *command.DockerCli, opts psOptions) error { + namespace := opts.namespace + client := dockerCli.Client() + ctx := context.Background() + + filter := getStackFilterFromOpt(opts.namespace, opts.filter) + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if len(tasks) == 0 { + fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) + return nil + } + + return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/remove.go b/vendor/github.com/docker/docker/cli/command/stack/remove.go new file mode 100644 index 0000000..966c1aa --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/remove.go @@ -0,0 +1,112 @@ +package stack + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type removeOptions struct { + namespace string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm STACK", + Aliases: []string{"remove", "down"}, + Short: "Remove the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runRemove(dockerCli, opts) + }, + } + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts removeOptions) error { + namespace := opts.namespace + client := dockerCli.Client() + ctx := context.Background() + + services, err := getServices(ctx, client, namespace) + if err != nil { + return err + } + + networks, err := getStackNetworks(ctx, client, namespace) + if err != nil { + return err + } + + secrets, err := getStackSecrets(ctx, client, namespace) + if err != nil { + return err + } + + if len(services)+len(networks)+len(secrets) == 0 { + fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) + return nil + } + + hasError := removeServices(ctx, dockerCli, services) + hasError = removeSecrets(ctx, dockerCli, secrets) || hasError + hasError = removeNetworks(ctx, dockerCli, networks) || hasError + + if hasError { + return fmt.Errorf("Failed to remove some resources") + } + return nil +} + +func removeServices( + ctx context.Context, + dockerCli *command.DockerCli, + services []swarm.Service, +) bool { + var err error + for _, service := range services { + fmt.Fprintf(dockerCli.Err(), "Removing service %s\n", service.Spec.Name) + if err = dockerCli.Client().ServiceRemove(ctx, service.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove service %s: %s", service.ID, err) + } + } + return err != nil +} + +func removeNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + networks []types.NetworkResource, +) bool { + var err error + for _, network := range networks { + fmt.Fprintf(dockerCli.Err(), "Removing network %s\n", network.Name) + if err = dockerCli.Client().NetworkRemove(ctx, network.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove network %s: %s", network.ID, err) + } + } + return err != nil +} + +func removeSecrets( + ctx context.Context, + dockerCli *command.DockerCli, + secrets []swarm.Secret, +) bool { + var err error + for _, secret := range secrets { + fmt.Fprintf(dockerCli.Err(), "Removing secret %s\n", secret.Spec.Name) + if err = dockerCli.Client().SecretRemove(ctx, secret.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove secret %s: %s", secret.ID, err) + } + } + return err != nil +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/services.go b/vendor/github.com/docker/docker/cli/command/stack/services.go new file mode 100644 index 0000000..a46652d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/services.go @@ -0,0 +1,79 @@ +package stack + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/service" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type servicesOptions struct { + quiet bool + filter opts.FilterOpt + namespace string +} + +func newServicesCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := servicesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "services [OPTIONS] STACK", + Short: "List the services in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runServices(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runServices(dockerCli *command.DockerCli, opts servicesOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + filter := getStackFilterFromOpt(opts.namespace, opts.filter) + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) + if err != nil { + return err + } + + out := dockerCli.Out() + + // if no services in this stack, print message and exit 0 + if len(services) == 0 { + fmt.Fprintf(out, "Nothing found in stack: %s\n", opts.namespace) + return nil + } + + if opts.quiet { + service.PrintQuiet(out, services) + } else { + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + service.PrintNotQuiet(out, services, nodes, tasks) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/cmd.go b/vendor/github.com/docker/docker/cli/command/swarm/cmd.go new file mode 100644 index 0000000..632679c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/cmd.go @@ -0,0 +1,28 @@ +package swarm + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSwarmCommand returns a cobra command for `swarm` subcommands +func NewSwarmCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "swarm", + Short: "Manage Swarm", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newInitCommand(dockerCli), + newJoinCommand(dockerCli), + newJoinTokenCommand(dockerCli), + newUnlockKeyCommand(dockerCli), + newUpdateCommand(dockerCli), + newLeaveCommand(dockerCli), + newUnlockCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/init.go b/vendor/github.com/docker/docker/cli/command/swarm/init.go new file mode 100644 index 0000000..2550fee --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/init.go @@ -0,0 +1,85 @@ +package swarm + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type initOptions struct { + swarmOptions + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + forceNewCluster bool +} + +func newInitCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := initOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "init [OPTIONS]", + Short: "Initialize a swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInit(dockerCli, cmd.Flags(), opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state") + flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)") + addSwarmFlags(flags, &opts.swarmOptions) + return cmd +} + +func runInit(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts initOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.InitRequest{ + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + ForceNewCluster: opts.forceNewCluster, + Spec: opts.swarmOptions.ToSpec(flags), + AutoLockManagers: opts.swarmOptions.autolock, + } + + nodeID, err := client.SwarmInit(ctx, req) + if err != nil { + if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { + return errors.New(err.Error() + " - specify one with --advertise-addr") + } + return err + } + + fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) + + if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { + return err + } + + fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") + + if req.AutoLockManagers { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/join.go b/vendor/github.com/docker/docker/cli/command/swarm/join.go new file mode 100644 index 0000000..004313b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/join.go @@ -0,0 +1,69 @@ +package swarm + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type joinOptions struct { + remote string + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + token string +} + +func newJoinCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := joinOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "join [OPTIONS] HOST:PORT", + Short: "Join a swarm as a node and/or manager", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runJoin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") + return cmd +} + +func runJoin(dockerCli *command.DockerCli, opts joinOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.JoinRequest{ + JoinToken: opts.token, + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + RemoteAddrs: []string{opts.remote}, + } + err := client.SwarmJoin(ctx, req) + if err != nil { + return err + } + + info, err := client.Info(ctx) + if err != nil { + return err + } + + if info.Swarm.ControlAvailable { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") + } else { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/join_token.go b/vendor/github.com/docker/docker/cli/command/swarm/join_token.go new file mode 100644 index 0000000..3a17a80 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/join_token.go @@ -0,0 +1,105 @@ +package swarm + +import ( + "errors" + "fmt" + + "github.com/spf13/cobra" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "golang.org/x/net/context" +) + +func newJoinTokenCommand(dockerCli *command.DockerCli) *cobra.Command { + var rotate, quiet bool + + cmd := &cobra.Command{ + Use: "join-token [OPTIONS] (worker|manager)", + Short: "Manage join tokens", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + worker := args[0] == "worker" + manager := args[0] == "manager" + + if !worker && !manager { + return errors.New("unknown role " + args[0]) + } + + client := dockerCli.Client() + ctx := context.Background() + + if rotate { + var flags swarm.UpdateFlags + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + flags.RotateWorkerToken = worker + flags.RotateManagerToken = manager + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) + if err != nil { + return err + } + if !quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", args[0]) + } + } + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if quiet { + if worker { + fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Worker) + } else { + fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Manager) + } + } else { + info, err := client.Info(ctx) + if err != nil { + return err + } + return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) + } + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVar(&rotate, flagRotate, false, "Rotate join token") + flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func printJoinCommand(ctx context.Context, dockerCli *command.DockerCli, nodeID string, worker bool, manager bool) error { + client := dockerCli.Client() + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + if node.ManagerStatus != nil { + if worker { + fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Worker, node.ManagerStatus.Addr) + } + if manager { + fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Manager, node.ManagerStatus.Addr) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/leave.go b/vendor/github.com/docker/docker/cli/command/swarm/leave.go new file mode 100644 index 0000000..e2cfa0a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/leave.go @@ -0,0 +1,44 @@ +package swarm + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type leaveOptions struct { + force bool +} + +func newLeaveCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := leaveOptions{} + + cmd := &cobra.Command{ + Use: "leave [OPTIONS]", + Short: "Leave the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLeave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force this node to leave the swarm, ignoring warnings") + return cmd +} + +func runLeave(dockerCli *command.DockerCli, opts leaveOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if err := client.SwarmLeave(ctx, opts.force); err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/opts.go b/vendor/github.com/docker/docker/cli/command/swarm/opts.go new file mode 100644 index 0000000..9db46dc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/opts.go @@ -0,0 +1,209 @@ +package swarm + +import ( + "encoding/csv" + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +const ( + defaultListenAddr = "0.0.0.0:2377" + + flagCertExpiry = "cert-expiry" + flagDispatcherHeartbeat = "dispatcher-heartbeat" + flagListenAddr = "listen-addr" + flagAdvertiseAddr = "advertise-addr" + flagQuiet = "quiet" + flagRotate = "rotate" + flagToken = "token" + flagTaskHistoryLimit = "task-history-limit" + flagExternalCA = "external-ca" + flagMaxSnapshots = "max-snapshots" + flagSnapshotInterval = "snapshot-interval" + flagLockKey = "lock-key" + flagAutolock = "autolock" +) + +type swarmOptions struct { + taskHistoryLimit int64 + dispatcherHeartbeat time.Duration + nodeCertExpiry time.Duration + externalCA ExternalCAOption + maxSnapshots uint64 + snapshotInterval uint64 + autolock bool +} + +// NodeAddrOption is a pflag.Value for listening addresses +type NodeAddrOption struct { + addr string +} + +// String prints the representation of this flag +func (a *NodeAddrOption) String() string { + return a.Value() +} + +// Set the value for this flag +func (a *NodeAddrOption) Set(value string) error { + addr, err := opts.ParseTCPAddr(value, a.addr) + if err != nil { + return err + } + a.addr = addr + return nil +} + +// Type returns the type of this flag +func (a *NodeAddrOption) Type() string { + return "node-addr" +} + +// Value returns the value of this option as addr:port +func (a *NodeAddrOption) Value() string { + return strings.TrimPrefix(a.addr, "tcp://") +} + +// NewNodeAddrOption returns a new node address option +func NewNodeAddrOption(addr string) NodeAddrOption { + return NodeAddrOption{addr} +} + +// NewListenAddrOption returns a NodeAddrOption with default values +func NewListenAddrOption() NodeAddrOption { + return NewNodeAddrOption(defaultListenAddr) +} + +// ExternalCAOption is a Value type for parsing external CA specifications. +type ExternalCAOption struct { + values []*swarm.ExternalCA +} + +// Set parses an external CA option. +func (m *ExternalCAOption) Set(value string) error { + parsed, err := parseExternalCA(value) + if err != nil { + return err + } + + m.values = append(m.values, parsed) + return nil +} + +// Type returns the type of this option. +func (m *ExternalCAOption) Type() string { + return "external-ca" +} + +// String returns a string repr of this option. +func (m *ExternalCAOption) String() string { + externalCAs := []string{} + for _, externalCA := range m.values { + repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) + externalCAs = append(externalCAs, repr) + } + return strings.Join(externalCAs, ", ") +} + +// Value returns the external CAs +func (m *ExternalCAOption) Value() []*swarm.ExternalCA { + return m.values +} + +// parseExternalCA parses an external CA specification from the command line, +// such as protocol=cfssl,url=https://example.com. +func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { + csvReader := csv.NewReader(strings.NewReader(caSpec)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + + externalCA := swarm.ExternalCA{ + Options: make(map[string]string), + } + + var ( + hasProtocol bool + hasURL bool + ) + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + + if len(parts) != 2 { + return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key, value := parts[0], parts[1] + + switch strings.ToLower(key) { + case "protocol": + hasProtocol = true + if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { + externalCA.Protocol = swarm.ExternalCAProtocolCFSSL + } else { + return nil, fmt.Errorf("unrecognized external CA protocol %s", value) + } + case "url": + hasURL = true + externalCA.URL = value + default: + externalCA.Options[key] = value + } + } + + if !hasProtocol { + return nil, errors.New("the external-ca option needs a protocol= parameter") + } + if !hasURL { + return nil, errors.New("the external-ca option needs a url= parameter") + } + + return &externalCA, nil +} + +func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { + flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") + flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period (ns|us|ms|s|m|h)") + flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates (ns|us|ms|s|m|h)") + flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") + flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain") + flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots") +} + +func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet) { + if flags.Changed(flagTaskHistoryLimit) { + spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit + } + if flags.Changed(flagDispatcherHeartbeat) { + spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat + } + if flags.Changed(flagCertExpiry) { + spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry + } + if flags.Changed(flagExternalCA) { + spec.CAConfig.ExternalCAs = opts.externalCA.Value() + } + if flags.Changed(flagMaxSnapshots) { + spec.Raft.KeepOldSnapshots = &opts.maxSnapshots + } + if flags.Changed(flagSnapshotInterval) { + spec.Raft.SnapshotInterval = opts.snapshotInterval + } + if flags.Changed(flagAutolock) { + spec.EncryptionConfig.AutoLockManagers = opts.autolock + } +} + +func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec { + var spec swarm.Spec + opts.mergeSwarmSpec(&spec, flags) + return spec +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go b/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go new file mode 100644 index 0000000..568dc87 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go @@ -0,0 +1,37 @@ +package swarm + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestNodeAddrOptionSetHostAndPort(t *testing.T) { + opt := NewNodeAddrOption("old:123") + addr := "newhost:5555" + assert.NilError(t, opt.Set(addr)) + assert.Equal(t, opt.Value(), addr) +} + +func TestNodeAddrOptionSetHostOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("newhost")) + assert.Equal(t, opt.Value(), "newhost:2377") +} + +func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("::1")) + assert.Equal(t, opt.Value(), "[::1]:2377") +} + +func TestNodeAddrOptionSetPortOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set(":4545")) + assert.Equal(t, opt.Value(), "0.0.0.0:4545") +} + +func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { + opt := NewListenAddrOption() + assert.Error(t, opt.Set("http://localhost:4545"), "Invalid") +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/unlock.go b/vendor/github.com/docker/docker/cli/command/swarm/unlock.go new file mode 100644 index 0000000..048fb56 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/unlock.go @@ -0,0 +1,54 @@ +package swarm + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "golang.org/x/net/context" +) + +func newUnlockCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "unlock", + Short: "Unlock swarm", + Args: cli.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + client := dockerCli.Client() + ctx := context.Background() + + key, err := readKey(dockerCli.In(), "Please enter unlock key: ") + if err != nil { + return err + } + req := swarm.UnlockRequest{ + UnlockKey: key, + } + + return client.SwarmUnlock(ctx, req) + }, + } + + return cmd +} + +func readKey(in *command.InStream, prompt string) (string, error) { + if in.IsTerminal() { + fmt.Print(prompt) + dt, err := terminal.ReadPassword(int(in.FD())) + fmt.Println() + return string(dt), err + } + key, err := bufio.NewReader(in).ReadString('\n') + if err == io.EOF { + err = nil + } + return strings.TrimSpace(key), err +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go b/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go new file mode 100644 index 0000000..96450f5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go @@ -0,0 +1,79 @@ +package swarm + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func newUnlockKeyCommand(dockerCli *command.DockerCli) *cobra.Command { + var rotate, quiet bool + + cmd := &cobra.Command{ + Use: "unlock-key [OPTIONS]", + Short: "Manage the unlock key", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client := dockerCli.Client() + ctx := context.Background() + + if rotate { + flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if !swarm.Spec.EncryptionConfig.AutoLockManagers { + return errors.New("cannot rotate because autolock is not turned on") + } + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) + if err != nil { + return err + } + if !quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") + } + } + + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + + if unlockKeyResp.UnlockKey == "" { + return errors.New("no unlock key is set") + } + + if quiet { + fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) + } else { + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVar(&rotate, flagRotate, false, "Rotate unlock key") + flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func printUnlockCommand(ctx context.Context, dockerCli *command.DockerCli, unlockKey string) { + if len(unlockKey) == 0 { + return + } + + fmt.Fprintf(dockerCli.Out(), "To unlock a swarm manager after it restarts, run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\nPlease remember to store this key in a password manager, since without it you\nwill not be able to restart the manager.\n", unlockKey) + return +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/update.go b/vendor/github.com/docker/docker/cli/command/swarm/update.go new file mode 100644 index 0000000..dbbd268 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/update.go @@ -0,0 +1,72 @@ +package swarm + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := swarmOptions{} + + cmd := &cobra.Command{ + Use: "update [OPTIONS]", + Short: "Update the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), opts) + }, + PreRunE: func(cmd *cobra.Command, args []string) error { + if cmd.Flags().NFlag() == 0 { + return pflag.ErrHelp + } + return nil + }, + } + + cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)") + addSwarmFlags(cmd.Flags(), &opts) + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts swarmOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var updateFlags swarm.UpdateFlags + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + prevAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers + + opts.mergeSwarmSpec(&swarm.Spec, flags) + + curAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Swarm updated.") + + if curAutoLock && !prevAutoLock { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/system/cmd.go b/vendor/github.com/docker/docker/cli/command/system/cmd.go new file mode 100644 index 0000000..ab3beb8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/cmd.go @@ -0,0 +1,26 @@ +package system + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSystemCommand returns a cobra command for `system` subcommands +func NewSystemCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "system", + Short: "Manage Docker", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewEventsCommand(dockerCli), + NewInfoCommand(dockerCli), + NewDiskUsageCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/system/df.go b/vendor/github.com/docker/docker/cli/command/system/df.go new file mode 100644 index 0000000..9f71248 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/df.go @@ -0,0 +1,56 @@ +package system + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type diskUsageOptions struct { + verbose bool +} + +// NewDiskUsageCommand creates a new cobra.Command for `docker df` +func NewDiskUsageCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diskUsageOptions + + cmd := &cobra.Command{ + Use: "df [OPTIONS]", + Short: "Show docker disk usage", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runDiskUsage(dockerCli, opts) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") + + return cmd +} + +func runDiskUsage(dockerCli *command.DockerCli, opts diskUsageOptions) error { + du, err := dockerCli.Client().DiskUsage(context.Background()) + if err != nil { + return err + } + + duCtx := formatter.DiskUsageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + }, + LayersSize: du.LayersSize, + Images: du.Images, + Containers: du.Containers, + Volumes: du.Volumes, + Verbose: opts.verbose, + } + + duCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/system/events.go b/vendor/github.com/docker/docker/cli/command/system/events.go new file mode 100644 index 0000000..0875230 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/events.go @@ -0,0 +1,140 @@ +package system + +import ( + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "text/template" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +type eventsOptions struct { + since string + until string + filter opts.FilterOpt + format string +} + +// NewEventsCommand creates a new cobra.Command for `docker events` +func NewEventsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := eventsOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "events [OPTIONS]", + Short: "Get real time events from the server", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runEvents(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.since, "since", "", "Show all events created since timestamp") + flags.StringVar(&opts.until, "until", "", "Stream events until this timestamp") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&opts.format, "format", "", "Format the output using the given Go template") + + return cmd +} + +func runEvents(dockerCli *command.DockerCli, opts *eventsOptions) error { + tmpl, err := makeTemplate(opts.format) + if err != nil { + return cli.StatusError{ + StatusCode: 64, + Status: "Error parsing format: " + err.Error()} + } + options := types.EventsOptions{ + Since: opts.since, + Until: opts.until, + Filters: opts.filter.Value(), + } + + ctx, cancel := context.WithCancel(context.Background()) + events, errs := dockerCli.Client().Events(ctx, options) + defer cancel() + + out := dockerCli.Out() + + for { + select { + case event := <-events: + if err := handleEvent(out, event, tmpl); err != nil { + return err + } + case err := <-errs: + if err == io.EOF { + return nil + } + return err + } + } +} + +func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + if tmpl == nil { + return prettyPrintEvent(out, event) + } + + return formatEvent(out, event, tmpl) +} + +func makeTemplate(format string) (*template.Template, error) { + if format == "" { + return nil, nil + } + tmpl, err := templates.Parse(format) + if err != nil { + return tmpl, err + } + // we execute the template for an empty message, so as to validate + // a bad template like "{{.badFieldString}}" + return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) +} + +// prettyPrintEvent prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { + if event.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(out, "\n") + return nil +} + +func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + defer out.Write([]byte{'\n'}) + return tmpl.Execute(out, event) +} diff --git a/vendor/github.com/docker/docker/cli/command/system/info.go b/vendor/github.com/docker/docker/cli/command/system/info.go new file mode 100644 index 0000000..e0b8767 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/info.go @@ -0,0 +1,334 @@ +package system + +import ( + "fmt" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/utils" + "github.com/docker/docker/utils/templates" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type infoOptions struct { + format string +} + +// NewInfoCommand creates a new cobra.Command for `docker info` +func NewInfoCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts infoOptions + + cmd := &cobra.Command{ + Use: "info [OPTIONS]", + Short: "Display system-wide information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInfo(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInfo(dockerCli *command.DockerCli, opts *infoOptions) error { + ctx := context.Background() + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if opts.format == "" { + return prettyPrintInfo(dockerCli, info) + } + return formatInfo(dockerCli, info, opts.format) +} + +func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error { + fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) + fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) + + // print a warning if devicemapper is using a loopback file + if pair[0] == "Data loop file" { + fmt.Fprintln(dockerCli.Err(), " WARNING: Usage of loopback devices is strongly discouraged for production use. Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.") + } + } + + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) + + fmt.Fprintf(dockerCli.Out(), "Plugins: \n") + fmt.Fprintf(dockerCli.Out(), " Volume:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), " Network:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(dockerCli.Out(), " Authorization:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + } + + fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) + if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.LocalNodeState != swarm.LocalNodeStateLocked { + fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) + if info.Swarm.Error != "" { + fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) + } + fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) + if info.Swarm.ControlAvailable { + fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID) + fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) + fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) + fmt.Fprintf(dockerCli.Out(), " Orchestration:\n") + taskHistoryRetentionLimit := int64(0) + if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { + taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit + } + fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit) + fmt.Fprintf(dockerCli.Out(), " Raft:\n") + fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) + if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { + fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) + } + fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) + fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) + fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n") + fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))) + fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n") + fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) + if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { + fmt.Fprintf(dockerCli.Out(), " External CAs:\n") + for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) + } + } + } + fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr) + managers := []string{} + for _, entry := range info.Swarm.RemoteManagers { + managers = append(managers, entry.Addr) + } + if len(managers) > 0 { + sort.Strings(managers) + fmt.Fprintf(dockerCli.Out(), " Manager Addresses:\n") + for _, entry := range managers { + fmt.Fprintf(dockerCli.Out(), " %s\n", entry) + } + } + } + + if len(info.Runtimes) > 0 { + fmt.Fprintf(dockerCli.Out(), "Runtimes:") + for name := range info.Runtimes { + fmt.Fprintf(dockerCli.Out(), " %s", name) + } + fmt.Fprint(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) + } + + if info.OSType == "linux" { + fmt.Fprintf(dockerCli.Out(), "Init Binary: %v\n", info.InitBinary) + + for _, ci := range []struct { + Name string + Commit types.Commit + }{ + {"containerd", info.ContainerdCommit}, + {"runc", info.RuncCommit}, + {"init", info.InitCommit}, + } { + fmt.Fprintf(dockerCli.Out(), "%s version: %s", ci.Name, ci.Commit.ID) + if ci.Commit.ID != ci.Commit.Expected { + fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) + } + fmt.Fprintf(dockerCli.Out(), "\n") + } + if len(info.SecurityOptions) != 0 { + kvs, err := types.DecodeSecurityOptions(info.SecurityOptions) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Security Options:\n") + for _, so := range kvs { + fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name) + for _, o := range so.Options { + switch o.Key { + case "profile": + if o.Value != "default" { + fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n") + } + fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value) + } + } + } + } + } + + // Isolation only has meaning on a Windows daemon. + if info.OSType == "windows" { + fmt.Fprintf(dockerCli.Out(), "Default Isolation: %v\n", info.Isolation) + } + + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) + fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) + fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) + fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", utils.IsDebugEnabled()) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) + + if info.Debug { + fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) + fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) + } + + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) + } + fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) + } + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + if !info.MemoryLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + if info.Labels != nil { + fmt.Fprintln(dockerCli.Out(), "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) + } + // TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out + // after 3 release cycles (1.16). For now, a WARNING will be generated. The following will + // be removed eventually. + labelMap := map[string]string{} + for _, label := range info.Labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will throw out an warning + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + fmt.Fprintln(dockerCli.Err(), "WARNING: labels with duplicate keys and conflicting values have been deprecated") + break + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + } + + fmt.Fprintf(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) + } + + if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { + fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") + for _, registry := range info.RegistryConfig.IndexConfigs { + if registry.Secure == false { + fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) + } + } + + for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { + mask, _ := registry.Mask.Size() + fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) + } + } + + if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { + fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") + for _, mirror := range info.RegistryConfig.Mirrors { + fmt.Fprintf(dockerCli.Out(), " %s\n", mirror) + } + } + + fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n", info.LiveRestoreEnabled) + + return nil +} + +func formatInfo(dockerCli *command.DockerCli, info types.Info, format string) error { + tmpl, err := templates.Parse(format) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + err = tmpl.Execute(dockerCli.Out(), info) + dockerCli.Out().Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/system/inspect.go b/vendor/github.com/docker/docker/cli/command/system/inspect.go new file mode 100644 index 0000000..c86e858 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/inspect.go @@ -0,0 +1,203 @@ +package system + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + inspectType string + size bool + ids []string +} + +// NewInspectCommand creates a new cobra.Command for `docker inspect` +func NewInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", + Short: "Return low-level information on Docker objects", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.ids = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + var elementSearcher inspect.GetRefFunc + switch opts.inspectType { + case "", "container", "image", "node", "network", "service", "volume", "task", "plugin": + elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) + default: + return fmt.Errorf("%q is not a valid value for --type", opts.inspectType) + } + return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) +} + +func inspectContainers(ctx context.Context, dockerCli *command.DockerCli, getSize bool) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) + } +} + +func inspectImages(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ImageInspectWithRaw(ctx, ref) + } +} + +func inspectNetwork(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NetworkInspectWithRaw(ctx, ref) + } +} + +func inspectNode(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NodeInspectWithRaw(ctx, ref) + } +} + +func inspectService(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ServiceInspectWithRaw(ctx, ref) + } +} + +func inspectTasks(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().TaskInspectWithRaw(ctx, ref) + } +} + +func inspectVolume(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) + } +} + +func inspectPlugin(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().PluginInspectWithRaw(ctx, ref) + } +} + +func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool, typeConstraint string) inspect.GetRefFunc { + var inspectAutodetect = []struct { + objectType string + isSizeSupported bool + isSwarmObject bool + objectInspector func(string) (interface{}, []byte, error) + }{ + { + objectType: "container", + isSizeSupported: true, + objectInspector: inspectContainers(ctx, dockerCli, getSize), + }, + { + objectType: "image", + objectInspector: inspectImages(ctx, dockerCli), + }, + { + objectType: "network", + objectInspector: inspectNetwork(ctx, dockerCli), + }, + { + objectType: "volume", + objectInspector: inspectVolume(ctx, dockerCli), + }, + { + objectType: "service", + isSwarmObject: true, + objectInspector: inspectService(ctx, dockerCli), + }, + { + objectType: "task", + isSwarmObject: true, + objectInspector: inspectTasks(ctx, dockerCli), + }, + { + objectType: "node", + isSwarmObject: true, + objectInspector: inspectNode(ctx, dockerCli), + }, + { + objectType: "plugin", + objectInspector: inspectPlugin(ctx, dockerCli), + }, + } + + // isSwarmManager does an Info API call to verify that the daemon is + // a swarm manager. + isSwarmManager := func() bool { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return false + } + return info.Swarm.ControlAvailable + } + + isErrNotSupported := func(err error) bool { + return strings.Contains(err.Error(), "not supported") + } + + return func(ref string) (interface{}, []byte, error) { + const ( + swarmSupportUnknown = iota + swarmSupported + swarmUnsupported + ) + + isSwarmSupported := swarmSupportUnknown + + for _, inspectData := range inspectAutodetect { + if typeConstraint != "" && inspectData.objectType != typeConstraint { + continue + } + if typeConstraint == "" && inspectData.isSwarmObject { + if isSwarmSupported == swarmSupportUnknown { + if isSwarmManager() { + isSwarmSupported = swarmSupported + } else { + isSwarmSupported = swarmUnsupported + } + } + if isSwarmSupported == swarmUnsupported { + continue + } + } + v, raw, err := inspectData.objectInspector(ref) + if err != nil { + if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSupported(err)) { + continue + } + return v, raw, err + } + if getSize && !inspectData.isSizeSupported { + fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) + } + return v, raw, err + } + return nil, nil, fmt.Errorf("Error: No such object: %s", ref) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/system/prune.go b/vendor/github.com/docker/docker/cli/command/system/prune.go new file mode 100644 index 0000000..92dddbd --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/prune.go @@ -0,0 +1,93 @@ +package system + +import ( + "fmt" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/prune" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool +} + +// NewPruneCommand creates a new cobra.Command for `docker prune` +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused data", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPrune(dockerCli, opts) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images not just dangling ones") + + return cmd +} + +const ( + warning = `WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + %s +Are you sure you want to continue?` + + danglingImageDesc = "- all dangling images" + allImageDesc = `- all images without at least one container associated to them` +) + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) error { + var message string + + if opts.all { + message = fmt.Sprintf(warning, allImageDesc) + } else { + message = fmt.Sprintf(warning, danglingImageDesc) + } + + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), message) { + return nil + } + + var spaceReclaimed uint64 + + for _, pruneFn := range []func(dockerCli *command.DockerCli) (uint64, string, error){ + prune.RunContainerPrune, + prune.RunVolumePrune, + prune.RunNetworkPrune, + } { + spc, output, err := pruneFn(dockerCli) + if err != nil { + return err + } + spaceReclaimed += spc + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + } + + spc, output, err := prune.RunImagePrune(dockerCli, opts.all) + if err != nil { + return err + } + if spc > 0 { + spaceReclaimed += spc + fmt.Fprintln(dockerCli.Out(), output) + } + + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/system/version.go b/vendor/github.com/docker/docker/cli/command/system/version.go new file mode 100644 index 0000000..ded4f4d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/version.go @@ -0,0 +1,113 @@ +package system + +import ( + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} (minimum version {{.Server.MinAPIVersion}}) + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}} + Experimental: {{.Server.Experimental}}{{end}}` + +type versionOptions struct { + format string +} + +// NewVersionCommand creates a new cobra.Command for `docker version` +func NewVersionCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts versionOptions + + cmd := &cobra.Command{ + Use: "version [OPTIONS]", + Short: "Show the Docker version information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runVersion(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runVersion(dockerCli *command.DockerCli, opts *versionOptions) error { + ctx := context.Background() + + templateFormat := versionTemplate + if opts.format != "" { + templateFormat = opts.format + } + + tmpl, err := templates.Parse(templateFormat) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + APIVersion := dockerCli.Client().ClientVersion() + if defaultAPIVersion := dockerCli.DefaultVersion(); APIVersion != defaultAPIVersion { + APIVersion = fmt.Sprintf("%s (downgraded from %s)", APIVersion, defaultAPIVersion) + } + + vd := types.VersionResponse{ + Client: &types.Version{ + Version: dockerversion.Version, + APIVersion: APIVersion, + GoVersion: runtime.Version(), + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + + serverVersion, err := dockerCli.Client().ServerVersion(ctx) + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { + err = err2 + } + dockerCli.Out().Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/task/print.go b/vendor/github.com/docker/docker/cli/command/task/print.go new file mode 100644 index 0000000..0f1c2cf --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/task/print.go @@ -0,0 +1,161 @@ +package task + +import ( + "fmt" + "io" + "sort" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + psTaskItemFmt = "%s\t%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n" + maxErrLength = 30 +) + +type portStatus swarm.PortStatus + +func (ps portStatus) String() string { + if len(ps.Ports) == 0 { + return "" + } + + str := fmt.Sprintf("*:%d->%d/%s", ps.Ports[0].PublishedPort, ps.Ports[0].TargetPort, ps.Ports[0].Protocol) + for _, pConfig := range ps.Ports[1:] { + str += fmt.Sprintf(",*:%d->%d/%s", pConfig.PublishedPort, pConfig.TargetPort, pConfig.Protocol) + } + + return str +} + +type tasksBySlot []swarm.Task + +func (t tasksBySlot) Len() int { + return len(t) +} + +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) +} + +// Print task information in a table format. +// Besides this, command `docker node ps ` +// and `docker stack ps` will call this, too. +func Print(dockerCli *command.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { + sort.Stable(tasksBySlot(tasks)) + + writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR", "PORTS"}, "\t")) + + if err := print(writer, ctx, tasks, resolver, noTrunc); err != nil { + return err + } + + return nil +} + +// PrintQuiet shows task list in a quiet way. +func PrintQuiet(dockerCli *command.DockerCli, tasks []swarm.Task) error { + sort.Stable(tasksBySlot(tasks)) + + out := dockerCli.Out() + + for _, task := range tasks { + fmt.Fprintln(out, task.ID) + } + + return nil +} + +func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { + prevName := "" + for _, task := range tasks { + id := task.ID + if !noTrunc { + id = stringid.TruncateID(id) + } + + serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) + if err != nil { + return err + } + + nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) + if err != nil { + return err + } + + name := "" + if task.Slot != 0 { + name = fmt.Sprintf("%v.%v", serviceName, task.Slot) + } else { + name = fmt.Sprintf("%v.%v", serviceName, task.NodeID) + } + + // Indent the name if necessary + indentedName := name + if name == prevName { + indentedName = fmt.Sprintf(" \\_ %s", indentedName) + } + prevName = name + + // Trim and quote the error message. + taskErr := task.Status.Err + if !noTrunc && len(taskErr) > maxErrLength { + taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) + } + if len(taskErr) > 0 { + taskErr = fmt.Sprintf("\"%s\"", taskErr) + } + + image := task.Spec.ContainerSpec.Image + if !noTrunc { + ref, err := distreference.ParseNamed(image) + if err == nil { + // update image string for display + namedTagged, ok := ref.(distreference.NamedTagged) + if ok { + image = namedTagged.Name() + ":" + namedTagged.Tag() + } + } + } + + fmt.Fprintf( + out, + psTaskItemFmt, + id, + indentedName, + image, + nodeValue, + command.PrettyPrint(task.DesiredState), + command.PrettyPrint(task.Status.State), + strings.ToLower(units.HumanDuration(time.Since(task.Status.Timestamp))), + taskErr, + portStatus(task.Status.PortStatus), + ) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/trust.go b/vendor/github.com/docker/docker/cli/command/trust.go new file mode 100644 index 0000000..b4c8a84 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/trust.go @@ -0,0 +1,39 @@ +package command + +import ( + "os" + "strconv" + + "github.com/spf13/pflag" +) + +var ( + // TODO: make this not global + untrusted bool +) + +// AddTrustedFlags adds content trust flags to the current command flagset +func AddTrustedFlags(fs *pflag.FlagSet, verify bool) { + trusted, message := setupTrustedFlag(verify) + fs.BoolVar(&untrusted, "disable-content-trust", !trusted, message) +} + +func setupTrustedFlag(verify bool) (bool, string) { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + message := "Skip image signing" + if verify { + message = "Skip image verification" + } + return trusted, message +} + +// IsTrusted returns true if content trust is enabled +func IsTrusted() bool { + return !untrusted +} diff --git a/vendor/github.com/docker/docker/cli/command/utils.go b/vendor/github.com/docker/docker/cli/command/utils.go new file mode 100644 index 0000000..1837ca4 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/utils.go @@ -0,0 +1,87 @@ +package command + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + fmt.Fprintf(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = NewInStream(os.Stdin) + } + + answer := "" + n, _ := fmt.Fscan(ins, &answer) + if n != 1 || (answer != "y" && answer != "Y") { + return false + } + + return true +} diff --git a/vendor/github.com/docker/docker/cli/command/volume/cmd.go b/vendor/github.com/docker/docker/cli/command/volume/cmd.go new file mode 100644 index 0000000..40862f2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/cmd.go @@ -0,0 +1,45 @@ +package volume + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewVolumeCommand returns a cobra command for `volume` subcommands +func NewVolumeCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "volume COMMAND", + Short: "Manage volumes", + Long: volumeDescription, + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} + +var volumeDescription = ` +The **docker volume** command has subcommands for managing data volumes. A data +volume is a specially-designated directory that by-passes storage driver +management. + +Data volumes persist data independent of a container's life cycle. When you +delete a container, the Docker daemon does not delete any data volumes. You can +share volumes across multiple containers. Moreover, you can share data volumes +with other computing resources in your system. + +To see help for a subcommand, use: + + docker volume COMMAND --help + +For full details on using docker volume visit Docker's online documentation. + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/create.go b/vendor/github.com/docker/docker/cli/command/volume/create.go new file mode 100644 index 0000000..7b2a7e3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/create.go @@ -0,0 +1,111 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] [VOLUME]", + Short: "Create a volume", + Long: createDescription, + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + if opts.name != "" { + fmt.Fprint(dockerCli.Err(), "Conflicting options: either specify --name or provide positional arg, not both\n") + return cli.StatusError{StatusCode: 1} + } + opts.name = args[0] + } + return runCreate(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") + flags.StringVar(&opts.name, "name", "", "Specify volume name") + flags.Lookup("name").Hidden = true + flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&opts.labels, "label", "Set metadata for a volume") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + volReq := volumetypes.VolumesCreateBody{ + Driver: opts.driver, + DriverOpts: opts.driverOpts.GetAll(), + Name: opts.name, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + } + + vol, err := client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) + return nil +} + +var createDescription = ` +Creates a new volume that containers can consume and store data in. If a name +is not specified, Docker generates a random name. You create a volume and then +configure the container to use it, for example: + + $ docker volume create hello + hello + $ docker run -d -v hello:/world busybox ls /world + +The mount is created inside the container's **/src** directory. Docker doesn't +not support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is +useful if two containers need access to shared data. For example, if one +container writes and the other reads the data. + +## Driver specific options + +Some volume drivers may take options to customize the volume creation. Use the +**-o** or **--opt** flags to pass driver options: + + $ docker volume create --driver fake --opt tardis=blue --opt timey=wimey + +These options are passed directly to the volume driver. Options for different +volume drivers may do different things (or nothing at all). + +The built-in **local** driver on Windows does not support any options. + +The built-in **local** driver on Linux accepts options similar to the linux +**mount** command: + + $ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 + +Another example: + + $ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/inspect.go b/vendor/github.com/docker/docker/cli/command/volume/inspect.go new file mode 100644 index 0000000..5eb8ad2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/inspect.go @@ -0,0 +1,55 @@ +package volume + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] VOLUME [VOLUME...]", + Short: "Display detailed information on one or more volumes", + Long: inspectDescription, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getVolFunc := func(name string) (interface{}, []byte, error) { + i, err := client.VolumeInspect(ctx, name) + return i, nil, err + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) +} + +var inspectDescription = ` +Returns information about one or more volumes. By default, this command renders +all results in a JSON array. You can specify an alternate format to execute a +given template is executed for each result. Go's https://golang.org/pkg/text/template/ +package describes all the details of the format. + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/list.go b/vendor/github.com/docker/docker/cli/command/volume/list.go new file mode 100644 index 0000000..d76006a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/list.go @@ -0,0 +1,91 @@ +package volume + +import ( + "sort" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List volumes", + Long: listDescription, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") + flags.StringVar(&opts.format, "format", "", "Pretty-print volumes using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + volumes, err := client.VolumeList(context.Background(), opts.filter.Value()) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().VolumesFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byVolumeName(volumes.Volumes)) + + volumeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewVolumeFormat(format, opts.quiet), + } + return formatter.VolumeWrite(volumeCtx, volumes.Volumes) +} + +var listDescription = ` + +Lists all the volumes Docker manages. You can filter using the **-f** or +**--filter** flag. The filtering format is a **key=value** pair. To specify +more than one filter, pass multiple flags (for example, +**--filter "foo=bar" --filter "bif=baz"**) + +The currently supported filters are: + +* **dangling** (boolean - **true** or **false**, **1** or **0**) +* **driver** (a volume driver's name) +* **label** (**label=** or **label==**) +* **name** (a volume's name) + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/prune.go b/vendor/github.com/docker/docker/cli/command/volume/prune.go new file mode 100644 index 0000000..405fbeb --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/prune.go @@ -0,0 +1,75 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for volumes +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all volumes not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().VolumesPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.VolumesDeleted) > 0 { + output = "Deleted Volumes:\n" + for _, id := range report.VolumesDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Volume Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true}) +} diff --git a/vendor/github.com/docker/docker/cli/command/volume/remove.go b/vendor/github.com/docker/docker/cli/command/volume/remove.go new file mode 100644 index 0000000..f464bb3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/remove.go @@ -0,0 +1,68 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + + volumes []string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] VOLUME [VOLUME...]", + Aliases: []string{"remove"}, + Short: "Remove one or more volumes", + Long: removeDescription, + Example: removeExample, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.volumes = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of one or more volumes") + flags.SetAnnotation("force", "version", []string{"1.25"}) + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts *removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range opts.volumes { + if err := client.VolumeRemove(ctx, name, opts.force); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +var removeDescription = ` +Remove one or more volumes. You cannot remove a volume that is in use by a container. +` + +var removeExample = ` +$ docker volume rm hello +hello +` diff --git a/vendor/github.com/docker/docker/cli/compose/convert/compose.go b/vendor/github.com/docker/docker/cli/compose/convert/compose.go new file mode 100644 index 0000000..8122326 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/compose.go @@ -0,0 +1,116 @@ +package convert + +import ( + "io/ioutil" + + "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" +) + +const ( + // LabelNamespace is the label used to track stack resources + LabelNamespace = "com.docker.stack.namespace" +) + +// Namespace mangles names by prepending the name +type Namespace struct { + name string +} + +// Scope prepends the namespace to a name +func (n Namespace) Scope(name string) string { + return n.name + "_" + name +} + +// Name returns the name of the namespace +func (n Namespace) Name() string { + return n.name +} + +// NewNamespace returns a new Namespace for scoping of names +func NewNamespace(name string) Namespace { + return Namespace{name: name} +} + +// AddStackLabel returns labels with the namespace label added +func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + labels[LabelNamespace] = namespace.name + return labels +} + +type networkMap map[string]composetypes.NetworkConfig + +// Networks converts networks from the compose-file type to the engine API type +func Networks( + namespace Namespace, + networks networkMap, + servicesNetworks map[string]struct{}, +) (map[string]types.NetworkCreate, []string) { + if networks == nil { + networks = make(map[string]composetypes.NetworkConfig) + } + + externalNetworks := []string{} + result := make(map[string]types.NetworkCreate) + + for internalName := range servicesNetworks { + network := networks[internalName] + if network.External.External { + externalNetworks = append(externalNetworks, network.External.Name) + continue + } + + createOpts := types.NetworkCreate{ + Labels: AddStackLabel(namespace, network.Labels), + Driver: network.Driver, + Options: network.DriverOpts, + Internal: network.Internal, + } + + if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 { + createOpts.IPAM = &networktypes.IPAM{} + } + + if network.Ipam.Driver != "" { + createOpts.IPAM.Driver = network.Ipam.Driver + } + for _, ipamConfig := range network.Ipam.Config { + config := networktypes.IPAMConfig{ + Subnet: ipamConfig.Subnet, + } + createOpts.IPAM.Config = append(createOpts.IPAM.Config, config) + } + result[internalName] = createOpts + } + + return result, externalNetworks +} + +// Secrets converts secrets from the Compose type to the engine API type +func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) { + result := []swarm.SecretSpec{} + for name, secret := range secrets { + if secret.External.External { + continue + } + + data, err := ioutil.ReadFile(secret.File) + if err != nil { + return nil, err + } + + result = append(result, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: namespace.Scope(name), + Labels: AddStackLabel(namespace, secret.Labels), + }, + Data: data, + }) + } + return result, nil +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go b/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go new file mode 100644 index 0000000..f333d73 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go @@ -0,0 +1,122 @@ +package convert + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" +) + +func TestNamespaceScope(t *testing.T) { + scoped := Namespace{name: "foo"}.Scope("bar") + assert.Equal(t, scoped, "foo_bar") +} + +func TestAddStackLabel(t *testing.T) { + labels := map[string]string{ + "something": "labeled", + } + actual := AddStackLabel(Namespace{name: "foo"}, labels) + expected := map[string]string{ + "something": "labeled", + LabelNamespace: "foo", + } + assert.DeepEqual(t, actual, expected) +} + +func TestNetworks(t *testing.T) { + namespace := Namespace{name: "foo"} + source := networkMap{ + "normal": composetypes.NetworkConfig{ + Driver: "overlay", + DriverOpts: map[string]string{ + "opt": "value", + }, + Ipam: composetypes.IPAMConfig{ + Driver: "driver", + Config: []*composetypes.IPAMPool{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + "outside": composetypes.NetworkConfig{ + External: composetypes.External{ + External: true, + Name: "special", + }, + }, + } + expected := map[string]types.NetworkCreate{ + "default": { + Labels: map[string]string{ + LabelNamespace: "foo", + }, + }, + "normal": { + Driver: "overlay", + IPAM: &network.IPAM{ + Driver: "driver", + Config: []network.IPAMConfig{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Options: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + }, + } + + serviceNetworks := map[string]struct{}{ + "default": {}, + "normal": {}, + "outside": {}, + } + networks, externals := Networks(namespace, source, serviceNetworks) + assert.DeepEqual(t, networks, expected) + assert.DeepEqual(t, externals, []string{"special"}) +} + +func TestSecrets(t *testing.T) { + namespace := Namespace{name: "foo"} + + secretText := "this is the first secret" + secretFile := tempfile.NewTempFile(t, "convert-secrets", secretText) + defer secretFile.Remove() + + source := map[string]composetypes.SecretConfig{ + "one": { + File: secretFile.Name(), + Labels: map[string]string{"monster": "mash"}, + }, + "ext": { + External: composetypes.External{ + External: true, + }, + }, + } + + specs, err := Secrets(namespace, source) + assert.NilError(t, err) + assert.Equal(t, len(specs), 1) + secret := specs[0] + assert.Equal(t, secret.Name, "foo_one") + assert.DeepEqual(t, secret.Labels, map[string]string{ + "monster": "mash", + LabelNamespace: "foo", + }) + assert.DeepEqual(t, secret.Data, []byte(secretText)) +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/service.go b/vendor/github.com/docker/docker/cli/compose/convert/service.go new file mode 100644 index 0000000..4a54895 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/service.go @@ -0,0 +1,416 @@ +package convert + +import ( + "fmt" + "os" + "sort" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + servicecli "github.com/docker/docker/cli/command/service" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// Services from compose-file types to engine API types +// TODO: fix secrets API so that SecretAPIClient is not required here +func Services( + namespace Namespace, + config *composetypes.Config, + client client.SecretAPIClient, +) (map[string]swarm.ServiceSpec, error) { + result := make(map[string]swarm.ServiceSpec) + + services := config.Services + volumes := config.Volumes + networks := config.Networks + + for _, service := range services { + + secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets) + if err != nil { + return nil, err + } + serviceSpec, err := convertService(namespace, service, networks, volumes, secrets) + if err != nil { + return nil, err + } + result[service.Name] = serviceSpec + } + + return result, nil +} + +func convertService( + namespace Namespace, + service composetypes.ServiceConfig, + networkConfigs map[string]composetypes.NetworkConfig, + volumes map[string]composetypes.VolumeConfig, + secrets []*swarm.SecretReference, +) (swarm.ServiceSpec, error) { + name := namespace.Scope(service.Name) + + endpoint, err := convertEndpointSpec(service.Ports) + if err != nil { + return swarm.ServiceSpec{}, err + } + + mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas) + if err != nil { + return swarm.ServiceSpec{}, err + } + + mounts, err := Volumes(service.Volumes, volumes, namespace) + if err != nil { + // TODO: better error message (include service name) + return swarm.ServiceSpec{}, err + } + + resources, err := convertResources(service.Deploy.Resources) + if err != nil { + return swarm.ServiceSpec{}, err + } + + restartPolicy, err := convertRestartPolicy( + service.Restart, service.Deploy.RestartPolicy) + if err != nil { + return swarm.ServiceSpec{}, err + } + + healthcheck, err := convertHealthcheck(service.HealthCheck) + if err != nil { + return swarm.ServiceSpec{}, err + } + + networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name) + if err != nil { + return swarm.ServiceSpec{}, err + } + + var logDriver *swarm.Driver + if service.Logging != nil { + logDriver = &swarm.Driver{ + Name: service.Logging.Driver, + Options: service.Logging.Options, + } + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: AddStackLabel(namespace, service.Deploy.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: service.Image, + Command: service.Entrypoint, + Args: service.Command, + Hostname: service.Hostname, + Hosts: sortStrings(convertExtraHosts(service.ExtraHosts)), + Healthcheck: healthcheck, + Env: sortStrings(convertEnvironment(service.Environment)), + Labels: AddStackLabel(namespace, service.Labels), + Dir: service.WorkingDir, + User: service.User, + Mounts: mounts, + StopGracePeriod: service.StopGracePeriod, + TTY: service.Tty, + OpenStdin: service.StdinOpen, + Secrets: secrets, + }, + LogDriver: logDriver, + Resources: resources, + RestartPolicy: restartPolicy, + Placement: &swarm.Placement{ + Constraints: service.Deploy.Placement.Constraints, + }, + }, + EndpointSpec: endpoint, + Mode: mode, + Networks: networks, + UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig), + } + + return serviceSpec, nil +} + +func sortStrings(strs []string) []string { + sort.Strings(strs) + return strs +} + +type byNetworkTarget []swarm.NetworkAttachmentConfig + +func (a byNetworkTarget) Len() int { return len(a) } +func (a byNetworkTarget) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byNetworkTarget) Less(i, j int) bool { return a[i].Target < a[j].Target } + +func convertServiceNetworks( + networks map[string]*composetypes.ServiceNetworkConfig, + networkConfigs networkMap, + namespace Namespace, + name string, +) ([]swarm.NetworkAttachmentConfig, error) { + if len(networks) == 0 { + return []swarm.NetworkAttachmentConfig{ + { + Target: namespace.Scope("default"), + Aliases: []string{name}, + }, + }, nil + } + + nets := []swarm.NetworkAttachmentConfig{} + for networkName, network := range networks { + networkConfig, ok := networkConfigs[networkName] + if !ok { + return []swarm.NetworkAttachmentConfig{}, fmt.Errorf( + "service %q references network %q, which is not declared", name, networkName) + } + var aliases []string + if network != nil { + aliases = network.Aliases + } + target := namespace.Scope(networkName) + if networkConfig.External.External { + target = networkConfig.External.Name + } + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: target, + Aliases: append(aliases, name), + }) + } + + sort.Sort(byNetworkTarget(nets)) + + return nets, nil +} + +// TODO: fix secrets API so that SecretAPIClient is not required here +func convertServiceSecrets( + client client.SecretAPIClient, + namespace Namespace, + secrets []composetypes.ServiceSecretConfig, + secretSpecs map[string]composetypes.SecretConfig, +) ([]*swarm.SecretReference, error) { + opts := []*types.SecretRequestOption{} + for _, secret := range secrets { + target := secret.Target + if target == "" { + target = secret.Source + } + + source := namespace.Scope(secret.Source) + secretSpec := secretSpecs[secret.Source] + if secretSpec.External.External { + source = secretSpec.External.Name + } + + uid := secret.UID + gid := secret.GID + if uid == "" { + uid = "0" + } + if gid == "" { + gid = "0" + } + + opts = append(opts, &types.SecretRequestOption{ + Source: source, + Target: target, + UID: uid, + GID: gid, + Mode: os.FileMode(secret.Mode), + }) + } + + return servicecli.ParseSecrets(client, opts) +} + +func convertExtraHosts(extraHosts map[string]string) []string { + hosts := []string{} + for host, ip := range extraHosts { + hosts = append(hosts, fmt.Sprintf("%s %s", ip, host)) + } + return hosts +} + +func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) { + if healthcheck == nil { + return nil, nil + } + var ( + err error + timeout, interval time.Duration + retries int + ) + if healthcheck.Disable { + if len(healthcheck.Test) != 0 { + return nil, fmt.Errorf("test and disable can't be set at the same time") + } + return &container.HealthConfig{ + Test: []string{"NONE"}, + }, nil + + } + if healthcheck.Timeout != "" { + timeout, err = time.ParseDuration(healthcheck.Timeout) + if err != nil { + return nil, err + } + } + if healthcheck.Interval != "" { + interval, err = time.ParseDuration(healthcheck.Interval) + if err != nil { + return nil, err + } + } + if healthcheck.Retries != nil { + retries = int(*healthcheck.Retries) + } + return &container.HealthConfig{ + Test: healthcheck.Test, + Timeout: timeout, + Interval: interval, + Retries: retries, + }, nil +} + +func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) { + // TODO: log if restart is being ignored + if source == nil { + policy, err := runconfigopts.ParseRestartPolicy(restart) + if err != nil { + return nil, err + } + switch { + case policy.IsNone(): + return nil, nil + case policy.IsAlways(), policy.IsUnlessStopped(): + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + }, nil + case policy.IsOnFailure(): + attempts := uint64(policy.MaximumRetryCount) + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + }, nil + default: + return nil, fmt.Errorf("unknown restart policy: %s", restart) + } + } + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyCondition(source.Condition), + Delay: source.Delay, + MaxAttempts: source.MaxAttempts, + Window: source.Window, + }, nil +} + +func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig { + if source == nil { + return nil + } + parallel := uint64(1) + if source.Parallelism != nil { + parallel = *source.Parallelism + } + return &swarm.UpdateConfig{ + Parallelism: parallel, + Delay: source.Delay, + FailureAction: source.FailureAction, + Monitor: source.Monitor, + MaxFailureRatio: source.MaxFailureRatio, + } +} + +func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) { + resources := &swarm.ResourceRequirements{} + var err error + if source.Limits != nil { + var cpus int64 + if source.Limits.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs) + if err != nil { + return nil, err + } + } + resources.Limits = &swarm.Resources{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Limits.MemoryBytes), + } + } + if source.Reservations != nil { + var cpus int64 + if source.Reservations.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs) + if err != nil { + return nil, err + } + } + resources.Reservations = &swarm.Resources{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Reservations.MemoryBytes), + } + } + return resources, nil + +} + +type byPublishedPort []swarm.PortConfig + +func (a byPublishedPort) Len() int { return len(a) } +func (a byPublishedPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPublishedPort) Less(i, j int) bool { return a[i].PublishedPort < a[j].PublishedPort } + +func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) { + portConfigs := []swarm.PortConfig{} + ports, portBindings, err := nat.ParsePortSpecs(source) + if err != nil { + return nil, err + } + + for port := range ports { + portConfigs = append( + portConfigs, + opts.ConvertPortToPortConfig(port, portBindings)...) + } + + // Sorting to make sure these are always in the same order + sort.Sort(byPublishedPort(portConfigs)) + + return &swarm.EndpointSpec{Ports: portConfigs}, nil +} + +func convertEnvironment(source map[string]string) []string { + var output []string + + for name, value := range source { + output = append(output, fmt.Sprintf("%s=%s", name, value)) + } + + return output +} + +func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) { + serviceMode := swarm.ServiceMode{} + + switch mode { + case "global": + if replicas != nil { + return serviceMode, fmt.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Global = &swarm.GlobalService{} + case "replicated", "": + serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas} + default: + return serviceMode, fmt.Errorf("Unknown mode: %s", mode) + } + return serviceMode, nil +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/service_test.go b/vendor/github.com/docker/docker/cli/compose/convert/service_test.go new file mode 100644 index 0000000..2e614d7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/service_test.go @@ -0,0 +1,216 @@ +package convert + +import ( + "sort" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestConvertRestartPolicyFromNone(t *testing.T) { + policy, err := convertRestartPolicy("no", nil) + assert.NilError(t, err) + assert.Equal(t, policy, (*swarm.RestartPolicy)(nil)) +} + +func TestConvertRestartPolicyFromUnknown(t *testing.T) { + _, err := convertRestartPolicy("unknown", nil) + assert.Error(t, err, "unknown restart policy: unknown") +} + +func TestConvertRestartPolicyFromAlways(t *testing.T) { + policy, err := convertRestartPolicy("always", nil) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + } + assert.NilError(t, err) + assert.DeepEqual(t, policy, expected) +} + +func TestConvertRestartPolicyFromFailure(t *testing.T) { + policy, err := convertRestartPolicy("on-failure:4", nil) + attempts := uint64(4) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + } + assert.NilError(t, err) + assert.DeepEqual(t, policy, expected) +} + +func TestConvertEnvironment(t *testing.T) { + source := map[string]string{ + "foo": "bar", + "key": "value", + } + env := convertEnvironment(source) + sort.Strings(env) + assert.DeepEqual(t, env, []string{"foo=bar", "key=value"}) +} + +func TestConvertResourcesFull(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.Resource{ + NanoCPUs: "0.003", + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + NanoCPUs: "0.002", + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: 3000000, + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + NanoCPUs: 2000000, + MemoryBytes: 200000000, + }, + } + assert.DeepEqual(t, resources, expected) +} + +func TestConvertResourcesOnlyMemory(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.Resource{ + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + MemoryBytes: 200000000, + }, + } + assert.DeepEqual(t, resources, expected) +} + +func TestConvertHealthcheck(t *testing.T) { + retries := uint64(10) + source := &composetypes.HealthCheckConfig{ + Test: []string{"EXEC", "touch", "/foo"}, + Timeout: "30s", + Interval: "2ms", + Retries: &retries, + } + expected := &container.HealthConfig{ + Test: source.Test, + Timeout: 30 * time.Second, + Interval: 2 * time.Millisecond, + Retries: 10, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.DeepEqual(t, healthcheck, expected) +} + +func TestConvertHealthcheckDisable(t *testing.T) { + source := &composetypes.HealthCheckConfig{Disable: true} + expected := &container.HealthConfig{ + Test: []string{"NONE"}, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.DeepEqual(t, healthcheck, expected) +} + +func TestConvertHealthcheckDisableWithTest(t *testing.T) { + source := &composetypes.HealthCheckConfig{ + Disable: true, + Test: []string{"EXEC", "touch"}, + } + _, err := convertHealthcheck(source) + assert.Error(t, err, "test and disable can't be set") +} + +func TestConvertServiceNetworksOnlyDefault(t *testing.T) { + networkConfigs := networkMap{} + networks := map[string]*composetypes.ServiceNetworkConfig{} + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_default", + Aliases: []string{"service"}, + }, + } + + assert.NilError(t, err) + assert.DeepEqual(t, configs, expected) +} + +func TestConvertServiceNetworks(t *testing.T) { + networkConfigs := networkMap{ + "front": composetypes.NetworkConfig{ + External: composetypes.External{ + External: true, + Name: "fronttier", + }, + }, + "back": composetypes.NetworkConfig{}, + } + networks := map[string]*composetypes.ServiceNetworkConfig{ + "front": { + Aliases: []string{"something"}, + }, + "back": { + Aliases: []string{"other"}, + }, + } + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_back", + Aliases: []string{"other", "service"}, + }, + { + Target: "fronttier", + Aliases: []string{"something", "service"}, + }, + } + + sortedConfigs := byTargetSort(configs) + sort.Sort(&sortedConfigs) + + assert.NilError(t, err) + assert.DeepEqual(t, []swarm.NetworkAttachmentConfig(sortedConfigs), expected) +} + +type byTargetSort []swarm.NetworkAttachmentConfig + +func (s byTargetSort) Len() int { + return len(s) +} + +func (s byTargetSort) Less(i, j int) bool { + return strings.Compare(s[i].Target, s[j].Target) < 0 +} + +func (s byTargetSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/volume.go b/vendor/github.com/docker/docker/cli/compose/convert/volume.go new file mode 100644 index 0000000..24442d4 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/volume.go @@ -0,0 +1,128 @@ +package convert + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" +) + +type volumes map[string]composetypes.VolumeConfig + +// Volumes from compose-file types to engine api types +func Volumes(serviceVolumes []string, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) { + var mounts []mount.Mount + + for _, volumeSpec := range serviceVolumes { + mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +func convertVolumeToMount(volumeSpec string, stackVolumes volumes, namespace Namespace) (mount.Mount, error) { + var source, target string + var mode []string + + // TODO: split Windows path mappings properly + parts := strings.SplitN(volumeSpec, ":", 3) + + for _, part := range parts { + if strings.TrimSpace(part) == "" { + return mount.Mount{}, fmt.Errorf("invalid volume: %s", volumeSpec) + } + } + + switch len(parts) { + case 3: + source = parts[0] + target = parts[1] + mode = strings.Split(parts[2], ",") + case 2: + source = parts[0] + target = parts[1] + case 1: + target = parts[0] + } + + if source == "" { + // Anonymous volume + return mount.Mount{ + Type: mount.TypeVolume, + Target: target, + }, nil + } + + // TODO: catch Windows paths here + if strings.HasPrefix(source, "/") { + return mount.Mount{ + Type: mount.TypeBind, + Source: source, + Target: target, + ReadOnly: isReadOnly(mode), + BindOptions: getBindOptions(mode), + }, nil + } + + stackVolume, exists := stackVolumes[source] + if !exists { + return mount.Mount{}, fmt.Errorf("undefined volume: %s", source) + } + + var volumeOptions *mount.VolumeOptions + if stackVolume.External.Name != "" { + source = stackVolume.External.Name + } else { + volumeOptions = &mount.VolumeOptions{ + Labels: AddStackLabel(namespace, stackVolume.Labels), + NoCopy: isNoCopy(mode), + } + + if stackVolume.Driver != "" { + volumeOptions.DriverConfig = &mount.Driver{ + Name: stackVolume.Driver, + Options: stackVolume.DriverOpts, + } + } + source = namespace.Scope(source) + } + return mount.Mount{ + Type: mount.TypeVolume, + Source: source, + Target: target, + ReadOnly: isReadOnly(mode), + VolumeOptions: volumeOptions, + }, nil +} + +func modeHas(mode []string, field string) bool { + for _, item := range mode { + if item == field { + return true + } + } + return false +} + +func isReadOnly(mode []string) bool { + return modeHas(mode, "ro") +} + +func isNoCopy(mode []string) bool { + return modeHas(mode, "nocopy") +} + +func getBindOptions(mode []string) *mount.BindOptions { + for _, item := range mode { + for _, propagation := range mount.Propagations { + if mount.Propagation(item) == propagation { + return &mount.BindOptions{Propagation: mount.Propagation(item)} + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go b/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go new file mode 100644 index 0000000..113ab1e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go @@ -0,0 +1,133 @@ +package convert + +import ( + "testing" + + "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestIsReadOnly(t *testing.T) { + assert.Equal(t, isReadOnly([]string{"foo", "bar", "ro"}), true) + assert.Equal(t, isReadOnly([]string{"ro"}), true) + assert.Equal(t, isReadOnly([]string{}), false) + assert.Equal(t, isReadOnly([]string{"foo", "rw"}), false) + assert.Equal(t, isReadOnly([]string{"foo"}), false) +} + +func TestIsNoCopy(t *testing.T) { + assert.Equal(t, isNoCopy([]string{"foo", "bar", "nocopy"}), true) + assert.Equal(t, isNoCopy([]string{"nocopy"}), true) + assert.Equal(t, isNoCopy([]string{}), false) + assert.Equal(t, isNoCopy([]string{"foo", "rw"}), false) +} + +func TestGetBindOptions(t *testing.T) { + opts := getBindOptions([]string{"slave"}) + expected := mount.BindOptions{Propagation: mount.PropagationSlave} + assert.Equal(t, *opts, expected) +} + +func TestGetBindOptionsNone(t *testing.T) { + opts := getBindOptions([]string{"ro"}) + assert.Equal(t, opts, (*mount.BindOptions)(nil)) +} + +func TestConvertVolumeToMountNamedVolume(t *testing.T) { + stackVolumes := volumes{ + "normal": composetypes.VolumeConfig{ + Driver: "glusterfs", + DriverOpts: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "foo_normal", + Target: "/foo", + ReadOnly: true, + VolumeOptions: &mount.VolumeOptions{ + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + DriverConfig: &mount.Driver{ + Name: "glusterfs", + Options: map[string]string{ + "opt": "value", + }, + }, + }, + } + mount, err := convertVolumeToMount("normal:/foo:ro", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) { + stackVolumes := volumes{ + "outside": composetypes.VolumeConfig{ + External: composetypes.External{ + External: true, + Name: "special", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "special", + Target: "/foo", + } + mount, err := convertVolumeToMount("outside:/foo", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountBind(t *testing.T) { + stackVolumes := volumes{} + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeBind, + Source: "/bar", + Target: "/foo", + ReadOnly: true, + BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared}, + } + mount, err := convertVolumeToMount("/bar:/foo:ro,shared", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) { + namespace := NewNamespace("foo") + _, err := convertVolumeToMount("unknown:/foo:ro", volumes{}, namespace) + assert.Error(t, err, "undefined volume: unknown") +} + +func TestConvertVolumeToMountAnonymousVolume(t *testing.T) { + stackVolumes := map[string]composetypes.VolumeConfig{} + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Target: "/foo/bar", + } + mnt, err := convertVolumeToMount("/foo/bar", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mnt, expected) +} + +func TestConvertVolumeToMountInvalidFormat(t *testing.T) { + namespace := NewNamespace("foo") + invalids := []string{"::", "::cc", ":bb:", "aa::", "aa::cc", "aa:bb:", " : : ", " : :cc", " :bb: ", "aa: : ", "aa: :cc", "aa:bb: "} + for _, vol := range invalids { + _, err := convertVolumeToMount(vol, map[string]composetypes.VolumeConfig{}, namespace) + assert.Error(t, err, "invalid volume: "+vol) + } +} diff --git a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go new file mode 100644 index 0000000..734f28e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go @@ -0,0 +1,90 @@ +package interpolation + +import ( + "fmt" + + "github.com/docker/docker/cli/compose/template" + "github.com/docker/docker/cli/compose/types" +) + +// Interpolate replaces variables in a string with the values from a mapping +func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) { + out := types.Dict{} + + for name, item := range config { + if item == nil { + out[name] = nil + continue + } + interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping) + if err != nil { + return nil, err + } + out[name] = interpolatedItem + } + + return out, nil +} + +func interpolateSectionItem( + name string, + item types.Dict, + section string, + mapping template.Mapping, +) (types.Dict, error) { + + out := types.Dict{} + + for key, value := range item { + interpolatedValue, err := recursiveInterpolate(value, mapping) + if err != nil { + return nil, fmt.Errorf( + "Invalid interpolation format for %#v option in %s %#v: %#v", + key, section, name, err.Template, + ) + } + out[key] = interpolatedValue + } + + return out, nil + +} + +func recursiveInterpolate( + value interface{}, + mapping template.Mapping, +) (interface{}, *template.InvalidTemplateError) { + + switch value := value.(type) { + + case string: + return template.Substitute(value, mapping) + + case types.Dict: + out := types.Dict{} + for key, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, mapping) + if err != nil { + return nil, err + } + out[key] = interpolatedElem + } + return out, nil + + case []interface{}: + out := make([]interface{}, len(value)) + for i, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, mapping) + if err != nil { + return nil, err + } + out[i] = interpolatedElem + } + return out, nil + + default: + return value, nil + + } + +} diff --git a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go new file mode 100644 index 0000000..c392170 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go @@ -0,0 +1,59 @@ +package interpolation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/cli/compose/types" +) + +var defaults = map[string]string{ + "USER": "jenny", + "FOO": "bar", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestInterpolate(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "example:${USER}", + "volumes": []interface{}{"$FOO:/target"}, + "logging": types.Dict{ + "driver": "${FOO}", + "options": types.Dict{ + "user": "$USER", + }, + }, + }, + } + expected := types.Dict{ + "servicea": types.Dict{ + "image": "example:jenny", + "volumes": []interface{}{"bar:/target"}, + "logging": types.Dict{ + "driver": "bar", + "options": types.Dict{ + "user": "jenny", + }, + }, + }, + } + result, err := Interpolate(services, "service", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, expected, result) +} + +func TestInvalidInterpolation(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "${", + }, + } + _, err := Interpolate(services, "service", defaultMapping) + assert.EqualError(t, err, `Invalid interpolation format for "image" option in service "servicea": "${"`) +} diff --git a/vendor/github.com/docker/docker/cli/compose/loader/example1.env b/vendor/github.com/docker/docker/cli/compose/loader/example1.env new file mode 100644 index 0000000..3e7a059 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/example1.env @@ -0,0 +1,8 @@ +# passed through +FOO=1 + +# overridden in example2.env +BAR=1 + +# overridden in full-example.yml +BAZ=1 diff --git a/vendor/github.com/docker/docker/cli/compose/loader/example2.env b/vendor/github.com/docker/docker/cli/compose/loader/example2.env new file mode 100644 index 0000000..0920d5a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/example2.env @@ -0,0 +1 @@ +BAR=2 diff --git a/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml b/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml new file mode 100644 index 0000000..fb5686a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml @@ -0,0 +1,287 @@ +version: "3" + +services: + foo: + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + + cgroup_parent: m-executor-abcd + + # String or list + command: bundle exec thin -p 3000 + # command: ["bundle", "exec", "thin", "-p", "3000"] + + container_name: my-web-container + + depends_on: + - db + - redis + + deploy: + mode: replicated + replicas: 6 + labels: [FOO=BAR] + update_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + resources: + limits: + cpus: '0.001' + memory: 50M + reservations: + cpus: '0.0001' + memory: 20M + restart_policy: + condition: on_failure + delay: 5s + max_attempts: 3 + window: 120s + placement: + constraints: [node=foo] + + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + + # String or list + # dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + + # String or list + # dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + + domainname: foo.com + + # String or list + # entrypoint: /code/entrypoint.sh -p 3000 + entrypoint: ["/code/entrypoint.sh", "-p", "3000"] + + # String or list + # env_file: .env + env_file: + - ./example1.env + - ./example2.env + + # Mapping or list + # Mapping values can be strings, numbers or null + # Booleans are not allowed - must be quoted + environment: + RACK_ENV: development + SHOW: 'true' + SESSION_SECRET: + BAZ: 3 + # environment: + # - RACK_ENV=development + # - SHOW=true + # - SESSION_SECRET + + # Items can be strings or numbers + expose: + - "3000" + - 8000 + + external_links: + - redis_1 + - project_db_1:mysql + - project_db_1:postgresql + + # Mapping or list + # Mapping values must be strings + # extra_hosts: + # somehost: "162.242.195.82" + # otherhost: "50.31.209.229" + extra_hosts: + - "somehost:162.242.195.82" + - "otherhost:50.31.209.229" + + hostname: foo + + healthcheck: + test: echo "hello world" + interval: 10s + timeout: 1s + retries: 5 + + # Any valid image reference - repo, tag, id, sha + image: redis + # image: ubuntu:14.04 + # image: tutum/influxdb + # image: example-registry.com:4000/postgresql + # image: a4bc65fd + # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + + ipc: host + + # Mapping or list + # Mapping values can be strings, numbers or null + labels: + com.example.description: "Accounting webapp" + com.example.number: 42 + com.example.empty-label: + # labels: + # - "com.example.description=Accounting webapp" + # - "com.example.number=42" + # - "com.example.empty-label" + + links: + - db + - db:database + - redis + + logging: + driver: syslog + options: + syslog-address: "tcp://192.168.0.42:123" + + mac_address: 02:42:ac:11:65:43 + + # network_mode: "bridge" + # network_mode: "host" + # network_mode: "none" + # Use the network mode of an arbitrary container from another service + # network_mode: "service:db" + # Use the network mode of another container, specified by name or id + # network_mode: "container:some-container" + network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" + + networks: + some-network: + aliases: + - alias1 + - alias3 + other-network: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + other-other-network: + + pid: "host" + + ports: + - 3000 + - "3000-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" + + privileged: true + + read_only: true + + restart: always + + security_opt: + - label=level:s0:c100,c200 + - label=type:svirt_apache_t + + stdin_open: true + + stop_grace_period: 20s + + stop_signal: SIGUSR1 + + # String or list + # tmpfs: /run + tmpfs: + - /run + - /tmp + + tty: true + + ulimits: + # Single number or mapping with soft + hard limits + nproc: 65535 + nofile: + soft: 20000 + hard: 40000 + + user: someone + + volumes: + # Just specify a path and let the Engine create a volume + - /var/lib/mysql + # Specify an absolute path mapping + - /opt/data:/var/lib/mysql + # Path on the host, relative to the Compose file + - .:/code + - ./static:/var/www/html + # User-relative path + - ~/configs:/etc/configs/:ro + # Named volume + - datavolume:/var/lib/mysql + + working_dir: /code + +networks: + # Entries can be null, which specifies simply that a network + # called "{project name}_some-network" should be created and + # use the default driver + some-network: + + other-network: + driver: overlay + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + ipam: + driver: overlay + # driver_opts: + # # Values can be strings or numbers + # com.docker.network.enable_ipv6: "true" + # com.docker.network.numeric_value: 1 + config: + - subnet: 172.16.238.0/24 + # gateway: 172.16.238.1 + - subnet: 2001:3984:3989::/64 + # gateway: 2001:3984:3989::1 + + external-network: + # Specifies that a pre-existing network called "external-network" + # can be referred to within this file as "external-network" + external: true + + other-external-network: + # Specifies that a pre-existing network called "my-cool-network" + # can be referred to within this file as "other-external-network" + external: + name: my-cool-network + +volumes: + # Entries can be null, which specifies simply that a volume + # called "{project name}_some-volume" should be created and + # use the default driver + some-volume: + + other-volume: + driver: flocker + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + external-volume: + # Specifies that a pre-existing volume called "external-volume" + # can be referred to within this file as "external-volume" + external: true + + other-external-volume: + # Specifies that a pre-existing volume called "my-cool-volume" + # can be referred to within this file as "other-external-volume" + external: + name: my-cool-volume diff --git a/vendor/github.com/docker/docker/cli/compose/loader/loader.go b/vendor/github.com/docker/docker/cli/compose/loader/loader.go new file mode 100644 index 0000000..39f69a0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/loader.go @@ -0,0 +1,653 @@ +package loader + +import ( + "fmt" + "os" + "path" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/cli/compose/interpolation" + "github.com/docker/docker/cli/compose/schema" + "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + shellwords "github.com/mattn/go-shellwords" + "github.com/mitchellh/mapstructure" + yaml "gopkg.in/yaml.v2" +) + +var ( + fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+") +) + +// ParseYAML reads the bytes from a file, parses the bytes into a mapping +// structure, and returns it. +func ParseYAML(source []byte) (types.Dict, error) { + var cfg interface{} + if err := yaml.Unmarshal(source, &cfg); err != nil { + return nil, err + } + cfgMap, ok := cfg.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf("Top-level object must be a mapping") + } + converted, err := convertToStringKeysRecursive(cfgMap, "") + if err != nil { + return nil, err + } + return converted.(types.Dict), nil +} + +// Load reads a ConfigDetails and returns a fully loaded configuration +func Load(configDetails types.ConfigDetails) (*types.Config, error) { + if len(configDetails.ConfigFiles) < 1 { + return nil, fmt.Errorf("No files specified") + } + if len(configDetails.ConfigFiles) > 1 { + return nil, fmt.Errorf("Multiple files are not yet supported") + } + + configDict := getConfigDict(configDetails) + + if services, ok := configDict["services"]; ok { + if servicesDict, ok := services.(types.Dict); ok { + forbidden := getProperties(servicesDict, types.ForbiddenProperties) + + if len(forbidden) > 0 { + return nil, &ForbiddenPropertiesError{Properties: forbidden} + } + } + } + + if err := schema.Validate(configDict, schema.Version(configDict)); err != nil { + return nil, err + } + + cfg := types.Config{} + if services, ok := configDict["services"]; ok { + servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv) + if err != nil { + return nil, err + } + + servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir) + if err != nil { + return nil, err + } + + cfg.Services = servicesList + } + + if networks, ok := configDict["networks"]; ok { + networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv) + if err != nil { + return nil, err + } + + networksMapping, err := loadNetworks(networksConfig) + if err != nil { + return nil, err + } + + cfg.Networks = networksMapping + } + + if volumes, ok := configDict["volumes"]; ok { + volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv) + if err != nil { + return nil, err + } + + volumesMapping, err := loadVolumes(volumesConfig) + if err != nil { + return nil, err + } + + cfg.Volumes = volumesMapping + } + + if secrets, ok := configDict["secrets"]; ok { + secretsConfig, err := interpolation.Interpolate(secrets.(types.Dict), "secret", os.LookupEnv) + if err != nil { + return nil, err + } + + secretsMapping, err := loadSecrets(secretsConfig, configDetails.WorkingDir) + if err != nil { + return nil, err + } + + cfg.Secrets = secretsMapping + } + + return &cfg, nil +} + +// GetUnsupportedProperties returns the list of any unsupported properties that are +// used in the Compose files. +func GetUnsupportedProperties(configDetails types.ConfigDetails) []string { + unsupported := map[string]bool{} + + for _, service := range getServices(getConfigDict(configDetails)) { + serviceDict := service.(types.Dict) + for _, property := range types.UnsupportedProperties { + if _, isSet := serviceDict[property]; isSet { + unsupported[property] = true + } + } + } + + return sortedKeys(unsupported) +} + +func sortedKeys(set map[string]bool) []string { + var keys []string + for key := range set { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// GetDeprecatedProperties returns the list of any deprecated properties that +// are used in the compose files. +func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string { + return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties) +} + +func getProperties(services types.Dict, propertyMap map[string]string) map[string]string { + output := map[string]string{} + + for _, service := range services { + if serviceDict, ok := service.(types.Dict); ok { + for property, description := range propertyMap { + if _, isSet := serviceDict[property]; isSet { + output[property] = description + } + } + } + } + + return output +} + +// ForbiddenPropertiesError is returned when there are properties in the Compose +// file that are forbidden. +type ForbiddenPropertiesError struct { + Properties map[string]string +} + +func (e *ForbiddenPropertiesError) Error() string { + return "Configuration contains forbidden properties" +} + +// TODO: resolve multiple files into a single config +func getConfigDict(configDetails types.ConfigDetails) types.Dict { + return configDetails.ConfigFiles[0].Config +} + +func getServices(configDict types.Dict) types.Dict { + if services, ok := configDict["services"]; ok { + if servicesDict, ok := services.(types.Dict); ok { + return servicesDict + } + } + + return types.Dict{} +} + +func transform(source map[string]interface{}, target interface{}) error { + data := mapstructure.Metadata{} + config := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + transformHook, + mapstructure.StringToTimeDurationHookFunc()), + Result: target, + Metadata: &data, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + err = decoder.Decode(source) + // TODO: log unused keys + return err +} + +func transformHook( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch target { + case reflect.TypeOf(types.External{}): + return transformExternal(data) + case reflect.TypeOf(make(map[string]string, 0)): + return transformMapStringString(source, target, data) + case reflect.TypeOf(types.UlimitsConfig{}): + return transformUlimits(data) + case reflect.TypeOf(types.UnitBytes(0)): + return loadSize(data) + case reflect.TypeOf(types.ServiceSecretConfig{}): + return transformServiceSecret(data) + } + switch target.Kind() { + case reflect.Struct: + return transformStruct(source, target, data) + } + return data, nil +} + +// keys needs to be converted to strings for jsonschema +// TODO: don't use types.Dict +func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { + if mapping, ok := value.(map[interface{}]interface{}); ok { + dict := make(types.Dict) + for key, entry := range mapping { + str, ok := key.(string) + if !ok { + var location string + if keyPrefix == "" { + location = "at top level" + } else { + location = fmt.Sprintf("in %s", keyPrefix) + } + return nil, fmt.Errorf("Non-string key %s: %#v", location, key) + } + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = str + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + dict[str] = convertedEntry + } + return dict, nil + } + if list, ok := value.([]interface{}); ok { + var convertedList []interface{} + for index, entry := range list { + newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + convertedList = append(convertedList, convertedEntry) + } + return convertedList, nil + } + return value, nil +} + +func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) { + var services []types.ServiceConfig + + for name, serviceDef := range servicesDict { + serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir) + if err != nil { + return nil, err + } + services = append(services, *serviceConfig) + } + + return services, nil +} + +func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) { + serviceConfig := &types.ServiceConfig{} + if err := transform(serviceDict, serviceConfig); err != nil { + return nil, err + } + serviceConfig.Name = name + + if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil { + return nil, err + } + + if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil { + return nil, err + } + + return serviceConfig, nil +} + +func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error { + environment := make(map[string]string) + + if envFileVal, ok := serviceDict["env_file"]; ok { + envFiles := loadStringOrListOfStrings(envFileVal) + + var envVars []string + + for _, file := range envFiles { + filePath := absPath(workingDir, file) + fileVars, err := opts.ParseEnvFile(filePath) + if err != nil { + return err + } + envVars = append(envVars, fileVars...) + } + + for k, v := range opts.ConvertKVStringsToMap(envVars) { + environment[k] = v + } + } + + for k, v := range serviceConfig.Environment { + environment[k] = v + } + + serviceConfig.Environment = environment + + return nil +} + +func resolveVolumePaths(volumes []string, workingDir string) error { + for i, mapping := range volumes { + parts := strings.SplitN(mapping, ":", 2) + if len(parts) == 1 { + continue + } + + if strings.HasPrefix(parts[0], ".") { + parts[0] = absPath(workingDir, parts[0]) + } + parts[0] = expandUser(parts[0]) + + volumes[i] = strings.Join(parts, ":") + } + + return nil +} + +// TODO: make this more robust +func expandUser(path string) string { + if strings.HasPrefix(path, "~") { + return strings.Replace(path, "~", os.Getenv("HOME"), 1) + } + return path +} + +func transformUlimits(data interface{}) (interface{}, error) { + switch value := data.(type) { + case int: + return types.UlimitsConfig{Single: value}, nil + case types.Dict: + ulimit := types.UlimitsConfig{} + ulimit.Soft = value["soft"].(int) + ulimit.Hard = value["hard"].(int) + return ulimit, nil + default: + return data, fmt.Errorf("invalid type %T for ulimits", value) + } +} + +func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) { + networks := make(map[string]types.NetworkConfig) + err := transform(source, &networks) + if err != nil { + return networks, err + } + for name, network := range networks { + if network.External.External && network.External.Name == "" { + network.External.Name = name + networks[name] = network + } + } + return networks, nil +} + +func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) { + volumes := make(map[string]types.VolumeConfig) + err := transform(source, &volumes) + if err != nil { + return volumes, err + } + for name, volume := range volumes { + if volume.External.External && volume.External.Name == "" { + volume.External.Name = name + volumes[name] = volume + } + } + return volumes, nil +} + +// TODO: remove duplicate with networks/volumes +func loadSecrets(source types.Dict, workingDir string) (map[string]types.SecretConfig, error) { + secrets := make(map[string]types.SecretConfig) + if err := transform(source, &secrets); err != nil { + return secrets, err + } + for name, secret := range secrets { + if secret.External.External && secret.External.Name == "" { + secret.External.Name = name + secrets[name] = secret + } + if secret.File != "" { + secret.File = absPath(workingDir, secret.File) + } + } + return secrets, nil +} + +func absPath(workingDir string, filepath string) string { + if path.IsAbs(filepath) { + return filepath + } + return path.Join(workingDir, filepath) +} + +func transformStruct( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + structValue, ok := data.(map[string]interface{}) + if !ok { + // FIXME: this is necessary because of convertToStringKeysRecursive + structValue, ok = data.(types.Dict) + if !ok { + panic(fmt.Sprintf( + "transformStruct called with non-map type: %T, %s", data, data)) + } + } + + var err error + for i := 0; i < target.NumField(); i++ { + field := target.Field(i) + fieldTag := field.Tag.Get("compose") + + yamlName := toYAMLName(field.Name) + value, ok := structValue[yamlName] + if !ok { + continue + } + + structValue[yamlName], err = convertField( + fieldTag, reflect.TypeOf(value), field.Type, value) + if err != nil { + return nil, fmt.Errorf("field %s: %s", yamlName, err.Error()) + } + } + return structValue, nil +} + +func transformMapStringString( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch value := data.(type) { + case map[string]interface{}: + return toMapStringString(value), nil + case types.Dict: + return toMapStringString(value), nil + case map[string]string: + return value, nil + default: + return data, fmt.Errorf("invalid type %T for map[string]string", value) + } +} + +func convertField( + fieldTag string, + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch fieldTag { + case "": + return data, nil + case "healthcheck": + return loadHealthcheck(data) + case "list_or_dict_equals": + return loadMappingOrList(data, "="), nil + case "list_or_dict_colon": + return loadMappingOrList(data, ":"), nil + case "list_or_struct_map": + return loadListOrStructMap(data, target) + case "string_or_list": + return loadStringOrListOfStrings(data), nil + case "list_of_strings_or_numbers": + return loadListOfStringsOrNumbers(data), nil + case "shell_command": + return loadShellCommand(data) + case "size": + return loadSize(data) + case "-": + return nil, nil + } + return data, nil +} + +func transformExternal(data interface{}) (interface{}, error) { + switch value := data.(type) { + case bool: + return map[string]interface{}{"external": value}, nil + case types.Dict: + return map[string]interface{}{"external": true, "name": value["name"]}, nil + case map[string]interface{}: + return map[string]interface{}{"external": true, "name": value["name"]}, nil + default: + return data, fmt.Errorf("invalid type %T for external", value) + } +} + +func transformServiceSecret(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return map[string]interface{}{"source": value}, nil + case types.Dict: + return data, nil + case map[string]interface{}: + return data, nil + default: + return data, fmt.Errorf("invalid type %T for external", value) + } + +} + +func toYAMLName(name string) string { + nameParts := fieldNameRegexp.FindAllString(name, -1) + for i, p := range nameParts { + nameParts[i] = strings.ToLower(p) + } + return strings.Join(nameParts, "_") +} + +func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) { + if list, ok := value.([]interface{}); ok { + mapValue := map[interface{}]interface{}{} + for _, name := range list { + mapValue[name] = nil + } + return mapValue, nil + } + + return value, nil +} + +func loadListOfStringsOrNumbers(value interface{}) []string { + list := value.([]interface{}) + result := make([]string, len(list)) + for i, item := range list { + result[i] = fmt.Sprint(item) + } + return result +} + +func loadStringOrListOfStrings(value interface{}) []string { + if list, ok := value.([]interface{}); ok { + result := make([]string, len(list)) + for i, item := range list { + result[i] = fmt.Sprint(item) + } + return result + } + return []string{value.(string)} +} + +func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string { + if mapping, ok := mappingOrList.(types.Dict); ok { + return toMapStringString(mapping) + } + if list, ok := mappingOrList.([]interface{}); ok { + result := make(map[string]string) + for _, value := range list { + parts := strings.SplitN(value.(string), sep, 2) + if len(parts) == 1 { + result[parts[0]] = "" + } else { + result[parts[0]] = parts[1] + } + } + return result + } + panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList)) +} + +func loadShellCommand(value interface{}) (interface{}, error) { + if str, ok := value.(string); ok { + return shellwords.Parse(str) + } + return value, nil +} + +func loadHealthcheck(value interface{}) (interface{}, error) { + if str, ok := value.(string); ok { + return append([]string{"CMD-SHELL"}, str), nil + } + return value, nil +} + +func loadSize(value interface{}) (int64, error) { + switch value := value.(type) { + case int: + return int64(value), nil + case string: + return units.RAMInBytes(value) + } + panic(fmt.Errorf("invalid type for size %T", value)) +} + +func toMapStringString(value map[string]interface{}) map[string]string { + output := make(map[string]string) + for key, value := range value { + output[key] = toString(value) + } + return output +} + +func toString(value interface{}) string { + if value == nil { + return "" + } + return fmt.Sprint(value) +} diff --git a/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go b/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go new file mode 100644 index 0000000..f7fee89 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go @@ -0,0 +1,800 @@ +package loader + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/docker/docker/cli/compose/types" + "github.com/stretchr/testify/assert" +) + +func buildConfigDetails(source types.Dict) types.ConfigDetails { + workingDir, err := os.Getwd() + if err != nil { + panic(err) + } + + return types.ConfigDetails{ + WorkingDir: workingDir, + ConfigFiles: []types.ConfigFile{ + {Filename: "filename.yml", Config: source}, + }, + Environment: nil, + } +} + +var sampleYAML = ` +version: "3" +services: + foo: + image: busybox + networks: + with_me: + bar: + image: busybox + environment: + - FOO=1 + networks: + - with_ipam +volumes: + hello: + driver: default + driver_opts: + beep: boop +networks: + default: + driver: bridge + driver_opts: + beep: boop + with_ipam: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 +` + +var sampleDict = types.Dict{ + "version": "3", + "services": types.Dict{ + "foo": types.Dict{ + "image": "busybox", + "networks": types.Dict{"with_me": nil}, + }, + "bar": types.Dict{ + "image": "busybox", + "environment": []interface{}{"FOO=1"}, + "networks": []interface{}{"with_ipam"}, + }, + }, + "volumes": types.Dict{ + "hello": types.Dict{ + "driver": "default", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + }, + "networks": types.Dict{ + "default": types.Dict{ + "driver": "bridge", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + "with_ipam": types.Dict{ + "ipam": types.Dict{ + "driver": "default", + "config": []interface{}{ + types.Dict{ + "subnet": "172.28.0.0/16", + }, + }, + }, + }, + }, +} + +var sampleConfig = types.Config{ + Services: []types.ServiceConfig{ + { + Name: "foo", + Image: "busybox", + Environment: map[string]string{}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_me": nil, + }, + }, + { + Name: "bar", + Image: "busybox", + Environment: map[string]string{"FOO": "1"}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_ipam": nil, + }, + }, + }, + Networks: map[string]types.NetworkConfig{ + "default": { + Driver: "bridge", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + "with_ipam": { + Ipam: types.IPAMConfig{ + Driver: "default", + Config: []*types.IPAMPool{ + { + Subnet: "172.28.0.0/16", + }, + }, + }, + }, + }, + Volumes: map[string]types.VolumeConfig{ + "hello": { + Driver: "default", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + }, +} + +func TestParseYAML(t *testing.T) { + dict, err := ParseYAML([]byte(sampleYAML)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, sampleDict, dict) +} + +func TestLoad(t *testing.T) { + actual, err := Load(buildConfigDetails(sampleDict)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestLoadV31(t *testing.T) { + actual, err := loadYAML(` +version: "3.1" +services: + foo: + image: busybox + secrets: [super] +secrets: + super: + external: true +`) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, len(actual.Services), 1) + assert.Equal(t, len(actual.Secrets), 1) +} + +func TestParseAndLoad(t *testing.T) { + actual, err := loadYAML(sampleYAML) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestInvalidTopLevelObjectType(t *testing.T) { + _, err := loadYAML("1") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("\"hello\"") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("[\"hello\"]") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") +} + +func TestNonStringKeys(t *testing.T) { + _, err := loadYAML(` +version: "3" +123: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key at top level: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox + 123: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox +networks: + default: + ipam: + config: + - 123: oh dear +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123") + + _, err = loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + 1: FOO +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1") +} + +func TestSupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox +`) + assert.NoError(t, err) + + _, err = loadYAML(` +version: "3.0" +services: + foo: + image: busybox +`) + assert.NoError(t, err) +} + +func TestUnsupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "2" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") + + _, err = loadYAML(` +version: "2.0" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") +} + +func TestInvalidVersion(t *testing.T) { + _, err := loadYAML(` +version: 3 +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version must be a string") +} + +func TestV1Unsupported(t *testing.T) { + _, err := loadYAML(` +foo: + image: busybox +`) + assert.Error(t, err) +} + +func TestNonMappingObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + - foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services must be a mapping") + + _, err = loadYAML(` +version: "3" +services: + foo: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + - default: + driver: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + default: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks.default must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + - data: + driver: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + data: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes.data must be a mapping") +} + +func TestNonStringImage(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: ["busybox", "latest"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo.image must be a string") +} + +func TestValidEnvironment(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: "1" + BAR: 2 + BAZ: 2.5 + QUUX: + list-env: + image: busybox + environment: + - FOO=1 + - BAR=2 + - BAZ=2.5 + - QUUX= +`) + assert.NoError(t, err) + + expected := map[string]string{ + "FOO": "1", + "BAR": "2", + "BAZ": "2.5", + "QUUX": "", + } + + assert.Equal(t, 2, len(config.Services)) + + for _, service := range config.Services { + assert.Equal(t, expected, service.Environment) + } +} + +func TestInvalidEnvironmentValue(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: ["1"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null") +} + +func TestInvalidEnvironmentObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: "FOO=1" +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping") +} + +func TestEnvironmentInterpolation(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + test: + image: busybox + labels: + - home1=$HOME + - home2=${HOME} + - nonexistent=$NONEXISTENT + - default=${NONEXISTENT-default} +networks: + test: + driver: $HOME +volumes: + test: + driver: $HOME +`) + + assert.NoError(t, err) + + home := os.Getenv("HOME") + + expectedLabels := map[string]string{ + "home1": home, + "home2": home, + "nonexistent": "", + "default": "default", + } + + assert.Equal(t, expectedLabels, config.Services[0].Labels) + assert.Equal(t, home, config.Networks["test"].Driver) + assert.Equal(t, home, config.Volumes["test"].Driver) +} + +func TestUnsupportedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + build: ./web + links: + - bar + db: + image: db + build: ./db +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + unsupported := GetUnsupportedProperties(configDetails) + assert.Equal(t, []string{"build", "links"}, unsupported) +} + +func TestDeprecatedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + container_name: web + db: + image: db + container_name: db + expose: ["5434"] +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + deprecated := GetDeprecatedProperties(configDetails) + assert.Equal(t, 2, len(deprecated)) + assert.Contains(t, deprecated, "container_name") + assert.Contains(t, deprecated, "expose") +} + +func TestForbiddenProperties(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox + volumes: + - /data + volume_driver: some-driver + bar: + extends: + service: foo +`) + + assert.Error(t, err) + assert.IsType(t, &ForbiddenPropertiesError{}, err) + fmt.Println(err) + forbidden := err.(*ForbiddenPropertiesError).Properties + + assert.Equal(t, 2, len(forbidden)) + assert.Contains(t, forbidden, "volume_driver") + assert.Contains(t, forbidden, "extends") +} + +func durationPtr(value time.Duration) *time.Duration { + return &value +} + +func int64Ptr(value int64) *int64 { + return &value +} + +func uint64Ptr(value uint64) *uint64 { + return &value +} + +func TestFullExample(t *testing.T) { + bytes, err := ioutil.ReadFile("full-example.yml") + assert.NoError(t, err) + + config, err := loadYAML(string(bytes)) + if !assert.NoError(t, err) { + return + } + + workingDir, err := os.Getwd() + assert.NoError(t, err) + + homeDir := os.Getenv("HOME") + stopGracePeriod := time.Duration(20 * time.Second) + + expectedServiceConfig := types.ServiceConfig{ + Name: "foo", + + CapAdd: []string{"ALL"}, + CapDrop: []string{"NET_ADMIN", "SYS_ADMIN"}, + CgroupParent: "m-executor-abcd", + Command: []string{"bundle", "exec", "thin", "-p", "3000"}, + ContainerName: "my-web-container", + DependsOn: []string{"db", "redis"}, + Deploy: types.DeployConfig{ + Mode: "replicated", + Replicas: uint64Ptr(6), + Labels: map[string]string{"FOO": "BAR"}, + UpdateConfig: &types.UpdateConfig{ + Parallelism: uint64Ptr(3), + Delay: time.Duration(10 * time.Second), + FailureAction: "continue", + Monitor: time.Duration(60 * time.Second), + MaxFailureRatio: 0.3, + }, + Resources: types.Resources{ + Limits: &types.Resource{ + NanoCPUs: "0.001", + MemoryBytes: 50 * 1024 * 1024, + }, + Reservations: &types.Resource{ + NanoCPUs: "0.0001", + MemoryBytes: 20 * 1024 * 1024, + }, + }, + RestartPolicy: &types.RestartPolicy{ + Condition: "on_failure", + Delay: durationPtr(5 * time.Second), + MaxAttempts: uint64Ptr(3), + Window: durationPtr(2 * time.Minute), + }, + Placement: types.Placement{ + Constraints: []string{"node=foo"}, + }, + }, + Devices: []string{"/dev/ttyUSB0:/dev/ttyUSB0"}, + DNS: []string{"8.8.8.8", "9.9.9.9"}, + DNSSearch: []string{"dc1.example.com", "dc2.example.com"}, + DomainName: "foo.com", + Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"}, + Environment: map[string]string{ + "RACK_ENV": "development", + "SHOW": "true", + "SESSION_SECRET": "", + "FOO": "1", + "BAR": "2", + "BAZ": "3", + }, + Expose: []string{"3000", "8000"}, + ExternalLinks: []string{ + "redis_1", + "project_db_1:mysql", + "project_db_1:postgresql", + }, + ExtraHosts: map[string]string{ + "otherhost": "50.31.209.229", + "somehost": "162.242.195.82", + }, + HealthCheck: &types.HealthCheckConfig{ + Test: []string{ + "CMD-SHELL", + "echo \"hello world\"", + }, + Interval: "10s", + Timeout: "1s", + Retries: uint64Ptr(5), + }, + Hostname: "foo", + Image: "redis", + Ipc: "host", + Labels: map[string]string{ + "com.example.description": "Accounting webapp", + "com.example.number": "42", + "com.example.empty-label": "", + }, + Links: []string{ + "db", + "db:database", + "redis", + }, + Logging: &types.LoggingConfig{ + Driver: "syslog", + Options: map[string]string{ + "syslog-address": "tcp://192.168.0.42:123", + }, + }, + MacAddress: "02:42:ac:11:65:43", + NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b", + Networks: map[string]*types.ServiceNetworkConfig{ + "some-network": { + Aliases: []string{"alias1", "alias3"}, + Ipv4Address: "", + Ipv6Address: "", + }, + "other-network": { + Ipv4Address: "172.16.238.10", + Ipv6Address: "2001:3984:3989::10", + }, + "other-other-network": nil, + }, + Pid: "host", + Ports: []string{ + "3000", + "3000-3005", + "8000:8000", + "9090-9091:8080-8081", + "49100:22", + "127.0.0.1:8001:8001", + "127.0.0.1:5000-5010:5000-5010", + }, + Privileged: true, + ReadOnly: true, + Restart: "always", + SecurityOpt: []string{ + "label=level:s0:c100,c200", + "label=type:svirt_apache_t", + }, + StdinOpen: true, + StopSignal: "SIGUSR1", + StopGracePeriod: &stopGracePeriod, + Tmpfs: []string{"/run", "/tmp"}, + Tty: true, + Ulimits: map[string]*types.UlimitsConfig{ + "nproc": { + Single: 65535, + }, + "nofile": { + Soft: 20000, + Hard: 40000, + }, + }, + User: "someone", + Volumes: []string{ + "/var/lib/mysql", + "/opt/data:/var/lib/mysql", + fmt.Sprintf("%s:/code", workingDir), + fmt.Sprintf("%s/static:/var/www/html", workingDir), + fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir), + "datavolume:/var/lib/mysql", + }, + WorkingDir: "/code", + } + + assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services) + + expectedNetworkConfig := map[string]types.NetworkConfig{ + "some-network": {}, + + "other-network": { + Driver: "overlay", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + Ipam: types.IPAMConfig{ + Driver: "overlay", + Config: []*types.IPAMPool{ + {Subnet: "172.16.238.0/24"}, + {Subnet: "2001:3984:3989::/64"}, + }, + }, + }, + + "external-network": { + External: types.External{ + Name: "external-network", + External: true, + }, + }, + + "other-external-network": { + External: types.External{ + Name: "my-cool-network", + External: true, + }, + }, + } + + assert.Equal(t, expectedNetworkConfig, config.Networks) + + expectedVolumeConfig := map[string]types.VolumeConfig{ + "some-volume": {}, + "other-volume": { + Driver: "flocker", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + }, + "external-volume": { + External: types.External{ + Name: "external-volume", + External: true, + }, + }, + "other-external-volume": { + External: types.External{ + Name: "my-cool-volume", + External: true, + }, + }, + } + + assert.Equal(t, expectedVolumeConfig, config.Volumes) +} + +func loadYAML(yaml string) (*types.Config, error) { + dict, err := ParseYAML([]byte(yaml)) + if err != nil { + return nil, err + } + + return Load(buildConfigDetails(dict)) +} + +func serviceSort(services []types.ServiceConfig) []types.ServiceConfig { + sort.Sort(servicesByName(services)) + return services +} + +type servicesByName []types.ServiceConfig + +func (sbn servicesByName) Len() int { return len(sbn) } +func (sbn servicesByName) Swap(i, j int) { sbn[i], sbn[j] = sbn[j], sbn[i] } +func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name } diff --git a/vendor/github.com/docker/docker/cli/compose/schema/bindata.go b/vendor/github.com/docker/docker/cli/compose/schema/bindata.go new file mode 100644 index 0000000..9486e91 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/bindata.go @@ -0,0 +1,260 @@ +// Code generated by go-bindata. +// sources: +// data/config_schema_v3.0.json +// data/config_schema_v3.1.json +// DO NOT EDIT! + +package schema + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4f\x8f\xdb\xb8\x0e\xbf\xe7\x53\x18\x6e\x6f\xcd\xcc\x14\x78\xc5\x03\x5e\x6f\xef\xb8\xa7\xdd\xf3\x0e\x5c\x43\xb1\x99\x44\x1d\x59\x52\x29\x39\x9d\xb4\xc8\x77\x5f\xc8\xff\x22\x2b\x92\xe5\x24\xee\xb6\x87\x9e\x66\x62\x91\x14\xff\xe9\x47\x8a\xf6\xf7\x55\x92\xa4\x6f\x55\xb1\x87\x8a\xa4\x1f\x93\x74\xaf\xb5\xfc\xf8\xf4\xf4\x59\x09\xfe\xd0\x3e\x7d\x14\xb8\x7b\x2a\x91\x6c\xf5\xc3\xfb\x0f\x4f\xed\xb3\x37\xe9\xda\xf0\xd1\xd2\xb0\x14\x82\x6f\xe9\x2e\x6f\x57\xf2\xc3\x7f\x1e\xdf\x3f\x1a\xf6\x96\x44\x1f\x25\x18\x22\xb1\xf9\x0c\x85\x6e\x9f\x21\x7c\xa9\x29\x82\x61\x7e\x4e\x0f\x80\x8a\x0a\x9e\x66\xeb\x95\x59\x93\x28\x24\xa0\xa6\xa0\xd2\x8f\x89\x51\x2e\x49\x06\x92\xfe\x81\x25\x56\x69\xa4\x7c\x97\x36\x8f\x4f\x8d\x84\x24\x49\x15\xe0\x81\x16\x96\x84\x41\xd5\x37\x4f\x67\xf9\x4f\x03\xd9\xda\x95\x6a\x29\xdb\x3c\x97\x44\x6b\x40\xfe\xd7\xa5\x6e\xcd\xf2\xa7\x67\xf2\xf0\xed\xff\x0f\x7f\xbf\x7f\xf8\xdf\x63\xfe\x90\xbd\x7b\x3b\x5a\x36\xfe\x45\xd8\xb6\xdb\x97\xb0\xa5\x9c\x6a\x2a\xf8\xb0\x7f\x3a\x50\x9e\xba\xff\x4e\xc3\xc6\xa4\x2c\x1b\x62\xc2\x46\x7b\x6f\x09\x53\x30\xb6\x99\x83\xfe\x2a\xf0\x25\x66\xf3\x40\xf6\x93\x6c\xee\xf6\xf7\xd8\x3c\x36\xe7\x20\x58\x5d\x45\x23\xd8\x53\xfd\x24\x63\xda\xed\xef\x8b\xdf\xaa\x37\x7a\x92\xb6\xa5\xb0\xf6\x6e\x14\x1c\x65\xbb\xcf\x55\xbe\x6c\x0b\xfb\x6a\x70\x56\xc0\x4b\x25\x48\x26\x8e\xe6\x59\xc0\x1f\x2d\x41\x05\x5c\xa7\x83\x0b\x92\x24\xdd\xd4\x94\x95\xae\x47\x05\x87\x3f\x8d\x88\x67\xeb\x61\x92\x7c\x77\x0f\xb6\x25\xa7\x59\x1f\xfd\x0a\x07\x7c\x58\x0f\xd8\x32\xac\x17\x82\x6b\x78\xd5\x8d\x51\xd3\x5b\xb7\x2e\x10\xc5\x0b\xe0\x96\x32\x98\xcb\x41\x70\xa7\x26\x5c\xc6\xa8\xd2\xb9\xc0\xbc\xa4\x85\x4e\x4f\x0e\xfb\x85\xbc\x78\x3e\x0d\xac\xd6\xaf\x6c\xe5\x11\x98\x16\x44\xe6\xa4\x2c\x47\x76\x10\x44\x72\x4c\xd7\x49\x4a\x35\x54\xca\x6f\x62\x92\xd6\x9c\x7e\xa9\xe1\x8f\x8e\x44\x63\x0d\xae\xdc\x12\x85\x5c\x5e\xf0\x0e\x45\x2d\x73\x49\xd0\x24\xd8\xb4\xfb\xd3\x42\x54\x15\xe1\x4b\x65\xdd\x35\x76\xcc\xf0\xbc\xe0\x9a\x50\x0e\x98\x73\x52\xc5\x12\xc9\x9c\x3a\xe0\xa5\xca\xdb\xfa\x37\x99\x46\xdb\xbc\xe5\x57\x8e\x80\xa1\x18\x2e\x1a\x8f\x92\x4f\x25\x76\x2b\xc6\xa4\xb6\xd1\x2d\x75\x18\x73\x05\x04\x8b\xfd\x8d\xfc\xa2\x22\x94\xcf\xf1\x1d\x70\x8d\x47\x29\x68\x9b\x2f\xbf\x5c\x22\x00\x3f\xe4\x03\x96\x5c\xed\x06\xe0\x07\x8a\x82\x57\xfd\x69\x98\x03\x30\x03\xc8\x1b\xfe\x57\x29\x14\xb8\x8e\x71\x0c\xb4\x97\x06\x53\x47\x3e\xe9\x39\x9e\x7b\xc3\xd7\x49\xca\xeb\x6a\x03\x68\x5a\xba\x11\xe5\x56\x60\x45\x8c\xb2\xfd\xde\xd6\xf2\xc8\xd3\x9e\xcc\xb3\x1d\x68\xdb\x60\xca\x3a\x61\x39\xa3\xfc\x65\xf9\x14\x87\x57\x8d\x24\xdf\x0b\xa5\xe7\x63\xb8\xc5\xbe\x07\xc2\xf4\xbe\xd8\x43\xf1\x32\xc1\x6e\x53\x8d\xb8\x85\xd2\x73\x92\x9c\x56\x64\x17\x27\x92\x45\x8c\x84\x91\x0d\xb0\x9b\xec\x5c\xd4\xf9\x96\x58\xb1\xdb\x19\xd2\x50\xc6\x5d\x74\x2e\xdd\x72\xac\xe6\x97\x48\x0f\x80\x73\x0b\xb8\x90\xe7\x86\xcb\x5d\x8c\x37\x20\x49\xbc\xfb\x1c\x91\x7e\x7a\x6c\x9b\xcf\x89\x53\xd5\xfc\xc7\x58\x9a\xb9\xed\x42\xe2\xd4\x7d\xdf\x13\xc7\xc2\x79\x0d\xc5\x28\x2a\x15\x29\x4c\xdf\x80\xa0\x02\x71\x3d\x93\x76\xcd\x7e\x5e\x89\x32\x94\xa0\x17\xc4\xae\x6f\x82\x48\x7d\x75\x21\x4c\x6e\xea\x1f\x67\x85\x2e\x7a\x81\x88\x58\x13\x52\x6f\xae\x9a\x67\x75\xe3\x29\xd6\xd0\x11\x46\x89\x82\xf8\x61\x0f\x3a\x72\x24\x8d\xca\xc3\x87\x99\x39\xe1\xe3\xfd\xef\x24\x6f\x80\x35\x28\x73\x7e\x8f\x1c\x11\x75\x56\xa5\x39\x6e\x3e\x45\xb2\xc8\x69\xfb\xc1\x2d\xbc\xa4\x65\x18\x2b\x1a\x84\xb0\x0f\x98\x14\xa8\x2f\x4e\xd7\xbf\x53\xee\xdb\xad\xef\xae\xf6\x12\xe9\x81\x32\xd8\xc1\xf8\xd6\xb2\x11\x82\x01\xe1\x23\xe8\x41\x20\x65\x2e\x38\x3b\xce\xa0\x54\x9a\x60\xf4\x42\xa1\xa0\xa8\x91\xea\x63\x2e\xa4\x5e\xbc\xcf\x50\xfb\x2a\x57\xf4\x1b\x8c\xa3\x79\xc6\xfb\x4e\x50\x36\xe2\x39\xaa\x42\xdf\x56\xaf\x95\x2e\x29\xcf\x85\x04\x1e\xf5\x8e\xd2\x42\xe6\x3b\x24\x05\xe4\x12\x90\x8a\xd2\x67\xe0\xda\x8e\x75\x59\x23\x31\xfb\x5f\x8a\x51\x74\xc7\x09\x8b\x39\x5a\x57\x72\x7b\xe3\xc5\x42\xeb\x78\xb8\x6b\x46\x2b\x1a\x3e\x07\x1e\x80\x9d\x51\x03\x5a\xfc\xf7\xc3\xfe\x04\xe4\x9f\x35\xa5\x5c\xc3\x0e\xd0\x87\x94\x13\x5d\xc7\x74\xd3\x31\xa3\xdb\xd8\x13\x1c\x07\x74\x42\x8f\x86\x41\x89\xad\xf6\x33\xf8\x7a\x11\xaf\x5e\xa3\xe1\x6f\x23\x6f\xdd\x29\x92\x79\xe9\xaf\x82\x73\x57\x8d\x2c\x88\xa8\x27\x2f\xa2\xd6\x2a\xda\x18\x36\x34\x5c\x4d\x35\x35\x03\xa9\x35\xc5\x5c\x14\x2f\x4c\xa3\x64\x0e\x41\x49\xfd\xda\xae\x1c\xcb\xae\x98\x23\x3b\x77\x96\x5e\x80\x6f\xa2\x68\x93\x46\x27\xb0\xd3\xd3\xcd\x8e\x28\x38\x79\xa4\x8a\x6c\x9c\x99\x9b\xef\x70\x9b\x6c\xc4\x43\x1c\x63\x10\x34\x52\x27\x2e\x1d\xda\x8e\xf0\x04\xd4\xaf\x39\x38\xd0\xb4\x02\x51\xfb\x6b\xd6\xca\xce\xef\x8e\x29\xb5\x26\xb3\x91\xa0\x5a\x94\x6e\x4c\x9f\x87\xa0\xf6\xfd\x45\x34\x70\x73\x0e\x09\x82\x64\xb4\x20\x2a\x06\x44\x77\x5c\x50\x6b\x59\x12\x0d\x79\xfb\xa2\xea\x2a\xe8\x9f\xc0\x7c\x49\x90\x30\x06\x8c\xaa\x6a\x0e\x86\xa6\x25\x30\x72\xbc\xa9\x7c\x36\xec\x5b\x42\x59\x8d\x90\x93\x42\x77\xef\xc2\x22\x39\x97\x56\x82\x53\x2d\xbc\x08\x31\x6f\xcb\x8a\xbc\xe6\xfd\xb6\x0d\x89\xf7\xc0\x04\xdb\xba\xb9\x77\x4b\x2b\x13\x94\xa8\xb1\xb8\x70\xf6\xcd\x21\x3a\xd7\xfa\x40\xc6\xf4\x3b\x5e\x98\x8e\xa0\x0c\x92\x0c\x57\xff\x28\x7f\xb4\xb4\x74\x7d\x66\x2e\x05\xa3\xc5\x71\x29\x0b\x0b\xc1\x5b\x27\xcf\x49\x88\x3b\x33\xd0\xa4\x83\x69\x85\x2a\xa9\xa3\x87\xb5\x61\xf8\x4a\x79\x29\xbe\x5e\xb1\xe1\x72\xa9\x24\x19\x29\xc0\xc1\xbb\x7b\x1d\xad\x34\x12\xca\xf5\xd5\xe5\xfc\x5e\xb3\xee\xa8\xe6\x43\x7e\x46\x50\x7f\xa0\x8b\xbf\x49\x0d\x20\x7d\x21\xeb\xe8\x3c\xa8\x82\x4a\xa0\x37\x01\x17\x78\xf3\x1d\x33\xb1\x27\x5b\xa0\xaa\xcd\x1a\x20\x76\x54\xe6\xbe\xb8\xf8\x6d\x23\x3e\x24\xcc\xe2\x80\x44\x25\xa9\x96\x3a\x1d\xb3\x47\xaa\xa9\xb7\x06\x27\xd3\xa3\x88\x24\x3c\x8e\x88\x69\x1d\xd7\xbd\xa3\x50\xf5\x86\xc3\x64\x47\x65\xf9\xd3\xf7\x9e\x77\xfe\x35\xe5\x14\xbe\x94\xdc\x07\x7a\xfd\xdb\x90\x40\x54\x9f\x87\x9e\x79\x3d\xf8\x2a\x9b\x1d\xe2\xe0\xab\x88\xe5\xf4\xbf\xb2\xc1\xbb\x03\x33\xba\x2f\x37\x22\x90\xd1\x51\xfd\x46\x8c\xdf\xf9\x75\x65\x7e\x39\x43\x2a\x2b\xcf\x2e\xef\x8f\x53\x29\x31\x7b\x3a\xdf\x71\x64\x63\x35\x5c\x32\xcf\x07\x74\x63\xb4\x9d\x1a\x4a\xf4\x24\x81\x69\xad\xb3\x69\xe7\xc4\x69\xcb\x17\xcc\xf0\xc7\x77\x13\x35\x65\xea\x2d\xda\x0f\x02\xe3\x05\x06\x3e\xfe\x98\x3a\x8d\x68\xef\xdd\xcb\xaf\xc0\x02\xa0\x66\xf1\x5f\x7c\x13\x66\xec\xe4\xc7\x8b\xf9\xc6\xf7\xf1\xd0\xae\xfd\x9e\x2b\x1b\xf9\xc7\x21\x69\xdf\x49\x5b\x90\x92\xd9\xbd\x79\x28\x8c\xde\x2f\xc5\xdc\x91\x61\xff\xc5\x56\xe6\x87\xab\x95\xfd\xb7\xf9\xba\x6e\x75\x5a\xfd\x13\x00\x00\xff\xff\x46\xf7\x7b\x23\xe5\x2a\x00\x00") + +func dataConfig_schema_v30JsonBytes() ([]byte, error) { + return bindataRead( + _dataConfig_schema_v30Json, + "data/config_schema_v3.0.json", + ) +} + +func dataConfig_schema_v30Json() (*asset, error) { + bytes, err := dataConfig_schema_v30JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataConfig_schema_v31Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x1a\xcb\x8e\xdb\x36\xf0\xee\xaf\x10\x94\xdc\xe2\xdd\x4d\xd1\xa0\x40\x73\xeb\xb1\xa7\xf6\xdc\x85\x23\xd0\xd2\x58\x66\x96\x22\x19\x92\x72\xd6\x09\xfc\xef\x05\xf5\x32\x45\x91\x22\x6d\x2b\xd9\x45\xd1\xd3\xae\xc5\x99\xe1\xbc\x67\x38\xe4\xf7\x55\x92\xa4\x6f\x65\xbe\x87\x0a\xa5\x1f\x93\x74\xaf\x14\xff\xf8\xf0\xf0\x59\x32\x7a\xd7\x7e\xbd\x67\xa2\x7c\x28\x04\xda\xa9\xbb\xf7\x1f\x1e\xda\x6f\x6f\xd2\xb5\xc6\xc3\x85\x46\xc9\x19\xdd\xe1\x32\x6b\x57\xb2\xc3\xaf\xf7\xbf\xdc\x6b\xf4\x16\x44\x1d\x39\x68\x20\xb6\xfd\x0c\xb9\x6a\xbf\x09\xf8\x52\x63\x01\x1a\xf9\x31\x3d\x80\x90\x98\xd1\x74\xb3\x5e\xe9\x35\x2e\x18\x07\xa1\x30\xc8\xf4\x63\xa2\x99\x4b\x92\x01\xa4\xff\x60\x90\x95\x4a\x60\x5a\xa6\xcd\xe7\x53\x43\x21\x49\x52\x09\xe2\x80\x73\x83\xc2\xc0\xea\x9b\x87\x33\xfd\x87\x01\x6c\x6d\x53\x35\x98\x6d\xbe\x73\xa4\x14\x08\xfa\xf7\x94\xb7\x66\xf9\xd3\x23\xba\xfb\xf6\xc7\xdd\x3f\xef\xef\x7e\xbf\xcf\xee\x36\xef\xde\x8e\x96\xb5\x7e\x05\xec\xda\xed\x0b\xd8\x61\x8a\x15\x66\x74\xd8\x3f\x1d\x20\x4f\xdd\x7f\xa7\x61\x63\x54\x14\x0d\x30\x22\xa3\xbd\x77\x88\x48\x18\xcb\x4c\x41\x7d\x65\xe2\x29\x24\xf3\x00\xf6\x42\x32\x77\xfb\x3b\x64\x1e\x8b\x73\x60\xa4\xae\x82\x16\xec\xa1\x5e\x48\x98\x76\xfb\x65\xec\x27\x21\x17\xa0\xc2\x2e\xdb\x42\xbd\x98\xc7\xea\xed\x6f\x13\x78\xd5\x0b\x3d\x0b\xdb\x42\x18\x7b\x37\x0c\x8e\xc2\xdb\xa5\x2a\x57\x78\xf9\x75\x35\x28\xcb\xa3\xa5\x02\x38\x61\x47\xfd\xcd\xa3\x8f\x16\xa0\x02\xaa\xd2\x41\x05\x49\x92\x6e\x6b\x4c\x0a\x5b\xa3\x8c\xc2\x5f\x9a\xc4\xa3\xf1\x31\x49\xbe\xdb\x99\xcc\xa0\xd3\xac\x8f\x7e\xf9\x0d\x3e\xac\x7b\x64\x19\xd6\x73\x46\x15\x3c\xab\x46\xa8\xf9\xad\x5b\x15\xb0\xfc\x09\xc4\x0e\x13\x88\xc5\x40\xa2\x94\x33\x2a\x23\x58\xaa\x8c\x89\xac\xc0\xb9\x4a\x4f\x16\xfa\x84\x5e\xd8\x9f\x06\x54\xe3\xd7\x66\xe5\x20\x98\xe6\x88\x67\xa8\x28\x46\x72\x20\x21\xd0\x31\x5d\x27\x29\x56\x50\x49\xb7\x88\x49\x5a\x53\xfc\xa5\x86\x3f\x3b\x10\x25\x6a\xb0\xe9\x16\x82\xf1\xe5\x09\x97\x82\xd5\x3c\xe3\x48\x68\x07\x9b\x57\x7f\x9a\xb3\xaa\x42\x74\x29\xaf\xbb\x44\x8e\x08\xcd\x33\xaa\x10\xa6\x20\x32\x8a\xaa\x90\x23\xe9\xa8\x03\x5a\xc8\xac\x2d\xf8\xb3\x6e\xb4\xcb\x5a\x7c\x69\x11\x18\xaa\xff\xa2\xf6\x28\xe8\x9c\x63\xb7\x64\xb4\x6b\x6b\xde\x52\x0b\x31\x93\x80\x44\xbe\xbf\x12\x9f\x55\x08\xd3\x18\xdd\x01\x55\xe2\xc8\x19\x6e\xfd\xe5\xd5\x39\x02\xd0\x43\x36\xe4\x92\x8b\xd5\x00\xf4\x80\x05\xa3\x55\x1f\x0d\x31\x09\x66\x48\xf2\x1a\xff\x99\x33\x09\xb6\x62\x2c\x01\xcd\xa5\x41\xd4\x91\x4e\x7a\x8c\xc7\x5e\xf0\x75\x92\xd2\xba\xda\x82\xd0\x3d\xec\x08\x72\xc7\x44\x85\x34\xb3\xfd\xde\xc6\xf2\x48\xd3\x0e\xcf\x33\x15\x68\xca\xa0\xcb\x3a\x22\x19\xc1\xf4\x69\x79\x17\x87\x67\x25\x50\xb6\x67\x52\xc5\xe7\x70\x03\x7d\x0f\x88\xa8\x7d\xbe\x87\xfc\x69\x06\xdd\x84\x1a\x61\x33\xa9\x62\x9c\x1c\x57\xa8\x0c\x03\xf1\x3c\x04\x42\xd0\x16\xc8\x55\x72\x2e\xaa\x7c\x83\x2c\x2b\x4b\x0d\xea\xf3\xb8\x49\xe7\xd2\x2d\x87\x6a\x7e\x21\xf0\x01\x44\x6c\x01\x67\xfc\xdc\x70\xd9\x8b\xe1\x06\x24\x09\x77\x9f\x23\xd0\x4f\xf7\x6d\xf3\x39\x13\x55\xcd\x7f\x84\xa4\x1b\xbb\x5d\x48\xac\xba\xef\xfa\x62\x49\x18\xd7\x50\x8c\xac\x52\xa1\x5c\xf7\x0d\x02\xa4\xc7\xae\x67\xd0\xee\x74\x93\x55\xac\xf0\x39\xe8\x04\xd8\xd6\x8d\x37\x53\x5f\x5c\x08\x93\xab\xfa\xc7\x28\xd3\x05\x0f\x10\x01\x69\x7c\xec\xc5\xb2\x79\x66\x37\xec\x62\x0d\x1c\x22\x18\x49\x08\x07\xbb\x57\x91\x23\x6a\x98\x1f\x3e\x44\xfa\x84\x0b\xf7\xb7\x59\x5c\x0f\xaa\x97\x66\x7c\x8f\x1c\x20\x75\x66\xa5\x09\x37\x17\x23\x9b\x40\xb4\xfd\xe0\x16\x9e\xe3\xc2\x9f\x2b\x9a\x0c\x61\x06\x18\x67\x42\x4d\xa2\xeb\xe7\x94\xfb\x76\xeb\x9b\xab\x3d\x17\xf8\x80\x09\x94\x30\x3e\xb5\x6c\x19\x23\x80\xe8\x28\xf5\x08\x40\x45\xc6\x28\x39\x46\x40\x4a\x85\x44\xf0\x40\x21\x21\xaf\x05\x56\xc7\x8c\x71\xb5\x78\x9f\x21\xf7\x55\x26\xf1\x37\x18\x5b\xf3\x9c\xef\x3b\x42\x1b\x8b\x21\x6b\x42\x72\xa5\x41\x7d\x29\x29\x1c\xc6\x8e\x44\x18\x4c\x54\xe1\x14\x95\x4a\x56\x8b\x3c\xf6\x80\xad\xf7\x44\xa2\x84\xd8\x23\xbc\x76\xb7\x71\xd8\xcc\x03\x97\x97\x00\x4f\x0a\x5d\x67\xc2\x50\x55\xb6\x7f\x9b\x79\xe5\xe4\x0c\x7d\x79\x94\xb9\xba\xae\x5b\x93\xaa\xc0\x34\x63\x1c\x68\x30\x36\xa4\x62\x3c\x2b\x05\xca\x21\xe3\x20\x30\x73\xaa\x62\x6d\x46\x7a\x51\x0b\xa4\xf7\x9f\x92\x91\xb8\xa4\x88\x84\xc2\x4c\x55\x7c\x77\xe5\xb1\x52\xa9\x70\xb0\xd7\x04\x57\xd8\x1f\x34\x0e\xaf\x8d\xe8\x00\xda\xea\xef\x2e\xfa\x33\x05\xff\xcc\x29\xa6\x0a\x4a\xed\x26\x53\xa7\x9a\xe9\x39\xe7\x5b\xce\x88\x5e\x73\x8f\xc4\xd8\xa0\x33\x7c\x24\x6d\x60\xee\x94\x1b\xc1\xd5\x89\x3a\xf9\x1a\xdd\x75\x34\xf4\xd6\x1d\x23\x1b\x27\xfc\x45\xc5\xdc\x66\x63\xe3\xad\xa7\xee\xa0\xaa\x65\xf0\x58\xd0\xc0\x50\x39\xd7\xd2\x0e\xa0\xc6\xd0\x7e\xd1\x6a\xa1\xdb\x64\x1d\x04\x05\x76\x73\xbb\xb2\x24\xbb\x60\xec\x6e\x9d\x58\x7b\x02\xae\x79\xb2\x09\x1a\x9c\xbf\xcf\xcf\xb6\x3b\x20\xef\xdc\x19\x4b\xb4\xb5\x26\xae\xae\xe0\xd6\xde\x28\x0e\xe1\x1c\x23\x40\x09\x6c\xd9\xa5\x4f\xd4\x66\x3e\x01\xf9\x3a\xc7\x46\x0a\x57\xc0\x6a\x77\xc1\x5b\x99\xfe\xdd\x21\xa5\xc6\x5c\x3e\x60\x54\x03\xd2\xb6\xe9\xe3\x60\xd4\xbe\xbb\x0c\x1a\x2e\x26\x48\x04\x70\x82\x73\x24\x43\x89\xe8\x86\xf1\x44\xcd\x0b\xa4\x20\x6b\xef\x65\x2f\x4a\xfd\x33\x39\x9f\x23\x81\x08\x01\x82\x65\x15\x93\x43\xd3\x02\x08\x3a\x5e\x55\x3e\x1b\xf4\x1d\xc2\xa4\x16\x90\xa1\x5c\x75\x57\xbf\x01\x9f\x4b\x2b\x46\xb1\x62\xce\x0c\x11\xb7\x65\x85\x9e\xb3\x7e\xdb\x06\x24\xd4\xd9\x8c\x9b\xfa\xd8\xc9\x82\xe1\x09\x6d\xe3\x77\x59\x75\x9e\x31\xd1\xb9\xd6\x7b\x3c\xa6\xdf\x71\x22\xba\x00\xa9\x33\xc9\x30\xf8\x09\xe2\x07\x4b\x4b\x77\xca\xc8\x38\x23\x38\x3f\x2e\x25\x61\xce\x68\xab\xe4\x18\x87\xb8\xd1\x03\xb5\x3b\xe8\x56\xa8\xe2\x2a\x18\xac\x0d\xc2\x57\x4c\x0b\xf6\xf5\x82\x0d\x97\x73\x25\x4e\x50\x0e\x56\xbe\xbb\x55\xd1\x52\x09\x84\xa9\xba\xb8\x9c\xdf\x2a\xd6\x0d\xd5\x7c\xf0\xcf\x40\xd6\x1f\xe0\xc2\xf7\xe8\x9e\x4c\x9f\xf3\x3a\x38\x0d\xac\xa0\x62\xc2\xe9\x80\x0b\x3c\xf4\x08\x89\xd8\x83\x2d\x50\xd5\xa2\xc6\xc7\x1d\x54\xc6\xf8\xf2\xa7\x8d\xf0\x88\x78\x13\x4e\x48\x98\xa3\x6a\xa9\xe8\x88\x1e\xa8\xa7\xce\x1a\x9c\xcc\xcf\x2d\x12\xff\xec\x22\xc4\x75\x98\xf7\x0e\x42\xd6\x5b\xea\x19\x21\x4c\x4f\x19\xae\x5b\xfe\xf8\x63\xca\xc9\x7f\x28\xb9\x2d\xe9\xf5\x77\x61\x1e\xab\x3e\x0e\x3d\xf3\x7a\xd0\xd5\x26\xda\xc4\xde\x8b\xa8\xe5\xf8\x6f\xda\x77\x7b\x44\xe0\xea\xf3\x2f\xec\x04\x6f\x48\x2e\xdd\x8b\xa6\x40\x6e\xe9\xa0\xfe\x4f\x2d\xff\x11\x47\xfc\x79\xfe\xd5\x3d\x20\x0b\xbe\xdc\x6a\xa0\xae\x2e\xce\x11\xcf\x95\x5e\x81\xcd\x5e\xda\x14\xe3\xc1\xa2\x61\x92\xe9\x99\x7f\x4e\x93\xd1\xf7\x69\x1d\xc6\x66\xcc\x86\x0d\xe6\x78\xe3\x3b\xae\x90\x73\x83\xa4\x1e\xc4\x73\xbf\x62\x6d\xda\x29\x71\x5e\xf2\x05\x93\xcd\xfd\xbb\x99\x3e\x60\xee\xde\xfb\x07\x15\xd0\x05\x86\x74\x6e\x9b\x5a\x87\x87\x5e\xbb\xd3\x77\x9b\x9e\xf8\x37\xf0\x27\xaf\x38\xb5\x9c\xf4\x38\x99\x49\x7d\x1f\x0f\x5a\xdb\x17\x98\x9b\x91\x7e\x2c\x90\xf6\x15\x89\x91\xdd\x37\xe6\x79\xca\x67\x46\xe7\xdb\x4e\x7b\xcc\xdb\xbf\xb1\xf4\xdc\x6a\xac\xcc\xbf\xcd\x7b\xd8\xd5\x69\xf5\x6f\x00\x00\x00\xff\xff\xfc\xf3\x11\x6a\x88\x2f\x00\x00") + +func dataConfig_schema_v31JsonBytes() ([]byte, error) { + return bindataRead( + _dataConfig_schema_v31Json, + "data/config_schema_v3.1.json", + ) +} + +func dataConfig_schema_v31Json() (*asset, error) { + bytes, err := dataConfig_schema_v31JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/config_schema_v3.1.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "data/config_schema_v3.0.json": dataConfig_schema_v30Json, + "data/config_schema_v3.1.json": dataConfig_schema_v31Json, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "data": &bintree{nil, map[string]*bintree{ + "config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}}, + "config_schema_v3.1.json": &bintree{dataConfig_schema_v31Json, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json new file mode 100644 index 0000000..fbcd8bb --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json @@ -0,0 +1,383 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.0.json", + "type": "object", + "required": ["version"], + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "additionalProperties": false, + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string"} + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": {"$ref": "#/definitions/resource"}, + "reservations": {"$ref": "#/definitions/resource"} + } + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "resource": { + "id": "#/definitions/resource", + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"} + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "internal": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json new file mode 100644 index 0000000..b703748 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json @@ -0,0 +1,428 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.1.json", + "type": "object", + "required": ["version"], + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + }, + + "secrets": { + "id": "#/properties/secrets", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "secrets": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + } + } + ] + } + }, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "additionalProperties": false, + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string"} + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": {"$ref": "#/definitions/resource"}, + "reservations": {"$ref": "#/definitions/resource"} + } + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "resource": { + "id": "#/definitions/resource", + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"} + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "internal": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "secret": { + "id": "#/definitions/secret", + "type": "object", + "properties": { + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/schema.go b/vendor/github.com/docker/docker/cli/compose/schema/schema.go new file mode 100644 index 0000000..ae33c77 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/schema.go @@ -0,0 +1,137 @@ +package schema + +//go:generate go-bindata -pkg schema -nometadata data + +import ( + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" +) + +const ( + defaultVersion = "1.0" + versionField = "version" +) + +type portsFormatChecker struct{} + +func (checker portsFormatChecker) IsFormat(input string) bool { + // TODO: implement this + return true +} + +type durationFormatChecker struct{} + +func (checker durationFormatChecker) IsFormat(input string) bool { + _, err := time.ParseDuration(input) + return err == nil +} + +func init() { + gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{}) +} + +// Version returns the version of the config, defaulting to version 1.0 +func Version(config map[string]interface{}) string { + version, ok := config[versionField] + if !ok { + return defaultVersion + } + return normalizeVersion(fmt.Sprintf("%v", version)) +} + +func normalizeVersion(version string) string { + switch version { + case "3": + return "3.0" + default: + return version + } +} + +// Validate uses the jsonschema to validate the configuration +func Validate(config map[string]interface{}, version string) error { + schemaData, err := Asset(fmt.Sprintf("data/config_schema_v%s.json", version)) + if err != nil { + return errors.Errorf("unsupported Compose file version: %s", version) + } + + schemaLoader := gojsonschema.NewStringLoader(string(schemaData)) + dataLoader := gojsonschema.NewGoLoader(config) + + result, err := gojsonschema.Validate(schemaLoader, dataLoader) + if err != nil { + return err + } + + if !result.Valid() { + return toError(result) + } + + return nil +} + +func toError(result *gojsonschema.Result) error { + err := getMostSpecificError(result.Errors()) + description := getDescription(err) + return fmt.Errorf("%s %s", err.Field(), description) +} + +func getDescription(err gojsonschema.ResultError) string { + if err.Type() == "invalid_type" { + if expectedType, ok := err.Details()["expected"].(string); ok { + return fmt.Sprintf("must be a %s", humanReadableType(expectedType)) + } + } + + return err.Description() +} + +func humanReadableType(definition string) string { + if definition[0:1] == "[" { + allTypes := strings.Split(definition[1:len(definition)-1], ",") + for i, t := range allTypes { + allTypes[i] = humanReadableType(t) + } + return fmt.Sprintf( + "%s or %s", + strings.Join(allTypes[0:len(allTypes)-1], ", "), + allTypes[len(allTypes)-1], + ) + } + if definition == "object" { + return "mapping" + } + if definition == "array" { + return "list" + } + return definition +} + +func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError { + var mostSpecificError gojsonschema.ResultError + + for _, err := range errors { + if mostSpecificError == nil { + mostSpecificError = err + } else if specificity(err) > specificity(mostSpecificError) { + mostSpecificError = err + } else if specificity(err) == specificity(mostSpecificError) { + // Invalid type errors win in a tie-breaker for most specific field name + if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" { + mostSpecificError = err + } + } + } + + return mostSpecificError +} + +func specificity(err gojsonschema.ResultError) int { + return len(strings.Split(err.Field(), ".")) +} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go b/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go new file mode 100644 index 0000000..0935d40 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go @@ -0,0 +1,52 @@ +package schema + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type dict map[string]interface{} + +func TestValidate(t *testing.T) { + config := dict{ + "version": "3.0", + "services": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + assert.NoError(t, Validate(config, "3.0")) +} + +func TestValidateUndefinedTopLevelOption(t *testing.T) { + config := dict{ + "version": "3.0", + "helicopters": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + err := Validate(config, "3.0") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Additional property helicopters is not allowed") +} + +func TestValidateInvalidVersion(t *testing.T) { + config := dict{ + "version": "2.1", + "services": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + err := Validate(config, "2.1") + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported Compose file version: 2.1") +} diff --git a/vendor/github.com/docker/docker/cli/compose/template/template.go b/vendor/github.com/docker/docker/cli/compose/template/template.go new file mode 100644 index 0000000..28495ba --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/template/template.go @@ -0,0 +1,100 @@ +package template + +import ( + "fmt" + "regexp" + "strings" +) + +var delimiter = "\\$" +var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?" + +var patternString = fmt.Sprintf( + "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", + delimiter, delimiter, substitution, substitution, +) + +var pattern = regexp.MustCompile(patternString) + +// InvalidTemplateError is returned when a variable template is not in a valid +// format +type InvalidTemplateError struct { + Template string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("Invalid template: %#v", e.Template) +} + +// Mapping is a user-supplied function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type Mapping func(string) (string, bool) + +// Substitute variables in the string with their values +func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) { + result = pattern.ReplaceAllStringFunc(template, func(substring string) string { + matches := pattern.FindStringSubmatch(substring) + groups := make(map[string]string) + for i, name := range pattern.SubexpNames() { + if i != 0 { + groups[name] = matches[i] + } + } + + substitution := groups["named"] + if substitution == "" { + substitution = groups["braced"] + } + if substitution != "" { + // Soft default (fall back if unset or empty) + if strings.Contains(substitution, ":-") { + name, defaultValue := partition(substitution, ":-") + value, ok := mapping(name) + if !ok || value == "" { + return defaultValue + } + return value + } + + // Hard default (fall back if-and-only-if empty) + if strings.Contains(substitution, "-") { + name, defaultValue := partition(substitution, "-") + value, ok := mapping(name) + if !ok { + return defaultValue + } + return value + } + + // No default (fall back to empty string) + value, ok := mapping(substitution) + if !ok { + return "" + } + return value + } + + if escaped := groups["escaped"]; escaped != "" { + return escaped + } + + err = &InvalidTemplateError{Template: template} + return "" + }) + + return result, err +} + +// Split the string at the first occurrence of sep, and return the part before the separator, +// and the part after the separator. +// +// If the separator is not found, return the string itself, followed by an empty string. +func partition(s, sep string) (string, string) { + if strings.Contains(s, sep) { + parts := strings.SplitN(s, sep, 2) + return parts[0], parts[1] + } + return s, "" +} diff --git a/vendor/github.com/docker/docker/cli/compose/template/template_test.go b/vendor/github.com/docker/docker/cli/compose/template/template_test.go new file mode 100644 index 0000000..6b81bf0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/template/template_test.go @@ -0,0 +1,83 @@ +package template + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var defaults = map[string]string{ + "FOO": "first", + "BAR": "", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestEscaped(t *testing.T) { + result, err := Substitute("$${foo}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "${foo}", result) +} + +func TestInvalid(t *testing.T) { + invalidTemplates := []string{ + "${", + "$}", + "${}", + "${ }", + "${ foo}", + "${foo }", + "${foo!}", + } + + for _, template := range invalidTemplates { + _, err := Substitute(template, defaultMapping) + assert.Error(t, err) + assert.IsType(t, &InvalidTemplateError{}, err) + } +} + +func TestNoValueNoDefault(t *testing.T) { + for _, template := range []string{"This ${missing} var", "This ${BAR} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This var", result) + } +} + +func TestValueNoDefault(t *testing.T) { + for _, template := range []string{"This $FOO var", "This ${FOO} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This first var", result) + } +} + +func TestNoValueWithDefault(t *testing.T) { + for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) + } +} + +func TestEmptyValueWithSoftDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) +} + +func TestEmptyValueWithHardDefault(t *testing.T) { + result, err := Substitute("ok ${BAR-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok ", result) +} + +func TestNonAlphanumericDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok /non:-alphanumeric", result) +} diff --git a/vendor/github.com/docker/docker/cli/compose/types/types.go b/vendor/github.com/docker/docker/cli/compose/types/types.go new file mode 100644 index 0000000..cae7b4a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/types/types.go @@ -0,0 +1,253 @@ +package types + +import ( + "time" +) + +// UnsupportedProperties not yet supported by this implementation of the compose file +var UnsupportedProperties = []string{ + "build", + "cap_add", + "cap_drop", + "cgroup_parent", + "devices", + "dns", + "dns_search", + "domainname", + "external_links", + "ipc", + "links", + "mac_address", + "network_mode", + "privileged", + "read_only", + "restart", + "security_opt", + "shm_size", + "stop_signal", + "sysctls", + "tmpfs", + "userns_mode", +} + +// DeprecatedProperties that were removed from the v3 format, but their +// use should not impact the behaviour of the application. +var DeprecatedProperties = map[string]string{ + "container_name": "Setting the container name is not supported.", + "expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.", +} + +// ForbiddenProperties that are not supported in this implementation of the +// compose file. +var ForbiddenProperties = map[string]string{ + "extends": "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.", + "volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", + "volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.", + "cpu_quota": "Set resource limits using deploy.resources", + "cpu_shares": "Set resource limits using deploy.resources", + "cpuset": "Set resource limits using deploy.resources", + "mem_limit": "Set resource limits using deploy.resources", + "memswap_limit": "Set resource limits using deploy.resources", +} + +// Dict is a mapping of strings to interface{} +type Dict map[string]interface{} + +// ConfigFile is a filename and the contents of the file as a Dict +type ConfigFile struct { + Filename string + Config Dict +} + +// ConfigDetails are the details about a group of ConfigFiles +type ConfigDetails struct { + WorkingDir string + ConfigFiles []ConfigFile + Environment map[string]string +} + +// Config is a full compose file configuration +type Config struct { + Services []ServiceConfig + Networks map[string]NetworkConfig + Volumes map[string]VolumeConfig + Secrets map[string]SecretConfig +} + +// ServiceConfig is the configuration of one service +type ServiceConfig struct { + Name string + + CapAdd []string `mapstructure:"cap_add"` + CapDrop []string `mapstructure:"cap_drop"` + CgroupParent string `mapstructure:"cgroup_parent"` + Command []string `compose:"shell_command"` + ContainerName string `mapstructure:"container_name"` + DependsOn []string `mapstructure:"depends_on"` + Deploy DeployConfig + Devices []string + DNS []string `compose:"string_or_list"` + DNSSearch []string `mapstructure:"dns_search" compose:"string_or_list"` + DomainName string `mapstructure:"domainname"` + Entrypoint []string `compose:"shell_command"` + Environment map[string]string `compose:"list_or_dict_equals"` + Expose []string `compose:"list_of_strings_or_numbers"` + ExternalLinks []string `mapstructure:"external_links"` + ExtraHosts map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"` + Hostname string + HealthCheck *HealthCheckConfig + Image string + Ipc string + Labels map[string]string `compose:"list_or_dict_equals"` + Links []string + Logging *LoggingConfig + MacAddress string `mapstructure:"mac_address"` + NetworkMode string `mapstructure:"network_mode"` + Networks map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"` + Pid string + Ports []string `compose:"list_of_strings_or_numbers"` + Privileged bool + ReadOnly bool `mapstructure:"read_only"` + Restart string + Secrets []ServiceSecretConfig + SecurityOpt []string `mapstructure:"security_opt"` + StdinOpen bool `mapstructure:"stdin_open"` + StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"` + StopSignal string `mapstructure:"stop_signal"` + Tmpfs []string `compose:"string_or_list"` + Tty bool `mapstructure:"tty"` + Ulimits map[string]*UlimitsConfig + User string + Volumes []string + WorkingDir string `mapstructure:"working_dir"` +} + +// LoggingConfig the logging configuration for a service +type LoggingConfig struct { + Driver string + Options map[string]string +} + +// DeployConfig the deployment configuration for a service +type DeployConfig struct { + Mode string + Replicas *uint64 + Labels map[string]string `compose:"list_or_dict_equals"` + UpdateConfig *UpdateConfig `mapstructure:"update_config"` + Resources Resources + RestartPolicy *RestartPolicy `mapstructure:"restart_policy"` + Placement Placement +} + +// HealthCheckConfig the healthcheck configuration for a service +type HealthCheckConfig struct { + Test []string `compose:"healthcheck"` + Timeout string + Interval string + Retries *uint64 + Disable bool +} + +// UpdateConfig the service update configuration +type UpdateConfig struct { + Parallelism *uint64 + Delay time.Duration + FailureAction string `mapstructure:"failure_action"` + Monitor time.Duration + MaxFailureRatio float32 `mapstructure:"max_failure_ratio"` +} + +// Resources the resource limits and reservations +type Resources struct { + Limits *Resource + Reservations *Resource +} + +// Resource is a resource to be limited or reserved +type Resource struct { + // TODO: types to convert from units and ratios + NanoCPUs string `mapstructure:"cpus"` + MemoryBytes UnitBytes `mapstructure:"memory"` +} + +// UnitBytes is the bytes type +type UnitBytes int64 + +// RestartPolicy the service restart policy +type RestartPolicy struct { + Condition string + Delay *time.Duration + MaxAttempts *uint64 `mapstructure:"max_attempts"` + Window *time.Duration +} + +// Placement constraints for the service +type Placement struct { + Constraints []string +} + +// ServiceNetworkConfig is the network configuration for a service +type ServiceNetworkConfig struct { + Aliases []string + Ipv4Address string `mapstructure:"ipv4_address"` + Ipv6Address string `mapstructure:"ipv6_address"` +} + +// ServiceSecretConfig is the secret configuration for a service +type ServiceSecretConfig struct { + Source string + Target string + UID string + GID string + Mode uint32 +} + +// UlimitsConfig the ulimit configuration +type UlimitsConfig struct { + Single int + Soft int + Hard int +} + +// NetworkConfig for a network +type NetworkConfig struct { + Driver string + DriverOpts map[string]string `mapstructure:"driver_opts"` + Ipam IPAMConfig + External External + Internal bool + Labels map[string]string `compose:"list_or_dict_equals"` +} + +// IPAMConfig for a network +type IPAMConfig struct { + Driver string + Config []*IPAMPool +} + +// IPAMPool for a network +type IPAMPool struct { + Subnet string +} + +// VolumeConfig for a volume +type VolumeConfig struct { + Driver string + DriverOpts map[string]string `mapstructure:"driver_opts"` + External External + Labels map[string]string `compose:"list_or_dict_equals"` +} + +// External identifies a Volume or Network as a reference to a resource that is +// not managed, and should already exist. +type External struct { + Name string + External bool +} + +// SecretConfig for a secret +type SecretConfig struct { + File string + External External + Labels map[string]string `compose:"list_or_dict_equals"` +} diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go new file mode 100644 index 0000000..62f6243 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/docker/cli/flags/client.go b/vendor/github.com/docker/docker/cli/flags/client.go new file mode 100644 index 0000000..9b6940f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/flags/client.go @@ -0,0 +1,13 @@ +package flags + +// ClientOptions are the options used to configure the client cli +type ClientOptions struct { + Common *CommonOptions + ConfigDir string + Version bool +} + +// NewClientOptions returns a new ClientOptions +func NewClientOptions() *ClientOptions { + return &ClientOptions{Common: NewCommonOptions()} +} diff --git a/vendor/github.com/docker/docker/cli/flags/common.go b/vendor/github.com/docker/docker/cli/flags/common.go new file mode 100644 index 0000000..e2f9da0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/flags/common.go @@ -0,0 +1,120 @@ +package flags + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + // DefaultTrustKeyFile is the default filename for the trust key + DefaultTrustKeyFile = "key.json" + // DefaultCaFile is the default filename for the CA pem file + DefaultCaFile = "ca.pem" + // DefaultKeyFile is the default filename for the key pem file + DefaultKeyFile = "key.pem" + // DefaultCertFile is the default filename for the cert pem file + DefaultCertFile = "cert.pem" + // FlagTLSVerify is the flag name for the tls verification option + FlagTLSVerify = "tlsverify" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +// CommonOptions are options common to both the client and the daemon. +type CommonOptions struct { + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + TrustKey string +} + +// NewCommonOptions returns a new CommonOptions +func NewCommonOptions() *CommonOptions { + return &CommonOptions{} +} + +// InstallFlags adds flags for the common options on the FlagSet +func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } + + flags.BoolVarP(&commonOpts.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", "Set the logging level (\"debug\", \"info\", \"warn\", \"error\", \"fatal\")") + flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify") + flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") + + commonOpts.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } + tlsOptions := commonOpts.TLSOptions + flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") + flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") + flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + + hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, opts.ValidateHost) + flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") +} + +// SetDefaultOptions sets default values for options after flag parsing is +// complete +func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) { + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need + // to check that here as well + if flags.Changed(FlagTLSVerify) || commonOpts.TLSVerify { + commonOpts.TLS = true + } + + if !commonOpts.TLS { + commonOpts.TLSOptions = nil + } else { + tlsOptions := commonOpts.TLSOptions + tlsOptions.InsecureSkipVerify = !commonOpts.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !flags.Changed("tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !flags.Changed("tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +// SetLogLevel sets the logrus logging level +func SetLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/docker/docker/cli/flags/common_test.go b/vendor/github.com/docker/docker/cli/flags/common_test.go new file mode 100644 index 0000000..81eaa38 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/flags/common_test.go @@ -0,0 +1,42 @@ +package flags + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/pflag" +) + +func TestCommonOptionsInstallFlags(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{ + "--tlscacert=\"/foo/cafile\"", + "--tlscert=\"/foo/cert\"", + "--tlskey=\"/foo/key\"", + }) + assert.NilError(t, err) + assert.Equal(t, opts.TLSOptions.CAFile, "/foo/cafile") + assert.Equal(t, opts.TLSOptions.CertFile, "/foo/cert") + assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") +} + +func defaultPath(filename string) string { + return filepath.Join(cliconfig.ConfigDir(), filename) +} + +func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{}) + assert.NilError(t, err) + assert.Equal(t, opts.TLSOptions.CAFile, defaultPath("ca.pem")) + assert.Equal(t, opts.TLSOptions.CertFile, defaultPath("cert.pem")) + assert.Equal(t, opts.TLSOptions.KeyFile, defaultPath("key.pem")) +} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go new file mode 100644 index 0000000..8ee02c8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/required.go @@ -0,0 +1,96 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return fmt.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return fmt.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} + +// RequiresMinArgs returns an error if there is not at least min args +func RequiresMinArgs(min int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresMaxArgs returns an error if there is not at most max args +func RequiresMaxArgs(max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresRangeArgs returns an error if there is not at least min args and at most max args +func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min && len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// ExactArgs returns an error if there is not the exact number of args +func ExactArgs(number int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) == number { + return nil + } + return fmt.Errorf( + "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + number, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} diff --git a/vendor/github.com/docker/docker/cli/trust/trust.go b/vendor/github.com/docker/docker/cli/trust/trust.go new file mode 100644 index 0000000..51914f7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/trust/trust.go @@ -0,0 +1,232 @@ +package trust + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/notary" + "github.com/docker/notary/client" + "github.com/docker/notary/passphrase" + "github.com/docker/notary/storage" + "github.com/docker/notary/trustmanager" + "github.com/docker/notary/trustpinning" + "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/signed" +) + +var ( + // ReleasesRole is the role named "releases" + ReleasesRole = path.Join(data.CanonicalTargetsRole, "releases") +) + +func trustDirectory() string { + return filepath.Join(cliconfig.ConfigDir(), "trust") +} + +// certificateDirectory returns the directory containing +// TLS certificates for the given server. An error is +// returned if there was an error parsing the server string. +func certificateDirectory(server string) (string, error) { + u, err := url.Parse(server) + if err != nil { + return "", err + } + + return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil +} + +// Server returns the base URL for the trust server. +func Server(index *registrytypes.IndexInfo) (string, error) { + if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { + urlObj, err := url.Parse(s) + if err != nil || urlObj.Scheme != "https" { + return "", fmt.Errorf("valid https URL required for trust server, got %s", s) + } + + return s, nil + } + if index.Official { + return registry.NotaryServer, nil + } + return "https://" + index.Name, nil +} + +type simpleCredentialStore struct { + auth types.AuthConfig +} + +func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { + return scs.auth.Username, scs.auth.Password +} + +func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { + return scs.auth.IdentityToken +} + +func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// GetNotaryRepository returns a NotaryRepository which stores all the +// information needed to operate on a notary repository. +// It creates an HTTP transport providing authentication support. +func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { + server, err := Server(repoInfo.Index) + if err != nil { + return nil, err + } + + var cfg = tlsconfig.ClientDefault() + cfg.InsecureSkipVerify = !repoInfo.Index.Secure + + // Get certificate base directory + certDir, err := certificateDirectory(server) + if err != nil { + return nil, err + } + logrus.Debugf("reading certificate directory: %s", certDir) + + if err := registry.ReadCertsDirectory(cfg, certDir); err != nil { + return nil, err + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + DisableKeepAlives: true, + } + + // Skip configuration headers since request is not going to Docker daemon + modifiers := registry.DockerHeaders(command.UserAgent(), http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := server + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + + challengeManager := challenge.NewSimpleManager() + + resp, err := pingClient.Do(req) + if err != nil { + // Ignore error on ping to operate in offline mode + logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) + } else { + defer resp.Body.Close() + + // Add response to the challenge manager to parse out + // authentication header and register authentication method + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + } + + scope := auth.RepositoryScope{ + Repository: repoInfo.FullName(), + Actions: actions, + Class: repoInfo.Class, + } + creds := simpleCredentialStore{auth: authConfig} + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) + tr := transport.NewTransport(base, modifiers...) + + return client.NewNotaryRepository( + trustDirectory(), + repoInfo.FullName(), + server, + tr, + getPassphraseRetriever(streams), + trustpinning.TrustPinConfig{}) +} + +func getPassphraseRetriever(streams command.Streams) notary.PassRetriever { + aliasMap := map[string]string{ + "root": "root", + "snapshot": "repository", + "targets": "repository", + "default": "repository", + } + baseRetriever := passphrase.PromptRetrieverWithInOut(streams.In(), streams.Out(), aliasMap) + env := map[string]string{ + "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + } + + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { + if v := env[alias]; v != "" { + return v, numAttempts > 1, nil + } + // For non-root roles, we can also try the "default" alias if it is specified + if v := env["default"]; v != "" && alias != data.CanonicalRootRole { + return v, numAttempts > 1, nil + } + return baseRetriever(keyName, alias, createNew, numAttempts) + } +} + +// NotaryError formats an error message received from the notary service +func NotaryError(repoName string, err error) error { + switch err.(type) { + case *json.SyntaxError: + logrus.Debugf("Notary syntax error: %s", err) + return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) + case signed.ErrExpired: + return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) + case trustmanager.ErrKeyNotFound: + return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) + case storage.NetworkError: + return fmt.Errorf("Error: error contacting notary server: %v", err) + case storage.ErrMetaNotFound: + return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) + case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType: + return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) + case signed.ErrNoKeys: + return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) + case signed.ErrLowVersion: + return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) + case signed.ErrRoleThreshold: + return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) + case client.ErrRepositoryNotExist: + return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) + case signed.ErrInsufficientSignatures: + return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) + } + + return err +} diff --git a/vendor/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go new file mode 100644 index 0000000..d81bf86 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/config.go @@ -0,0 +1,120 @@ +package cliconfig + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/pkg/homedir" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") +) + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} + +// ConfigDir returns the directory the configuration file is stored in +func ConfigDir() string { + return configDir +} + +// SetConfigDir sets the directory the configuration file is stored in +func SetConfigDir(dir string) { + configDir = dir +} + +// NewConfigFile initializes an empty configuration file for the given filename 'fn' +func NewConfigFile(fn string) *configfile.ConfigFile { + return &configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + } +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + if configDir == "" { + configDir = ConfigDir() + } + + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + Filename: filepath.Join(configDir, ConfigFileName), + } + + // Try happy path first - latest config file + if _, err := os.Stat(configFile.Filename); err == nil { + file, err := os.Open(configFile.Filename) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + } + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = fmt.Errorf("%s - %v", configFile.Filename, err) + } + return &configFile, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + } + + // Can't find latest config file so check for the old one + confFile := filepath.Join(homedir.Get(), oldConfigfile) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + file, err := os.Open(confFile) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", confFile, err) + } + defer file.Close() + err = configFile.LegacyLoadFromReader(file) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", confFile, err) + } + + if configFile.HTTPHeaders == nil { + configFile.HTTPHeaders = map[string]string{} + } + return &configFile, nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/config_test.go b/vendor/github.com/docker/docker/cliconfig/config_test.go new file mode 100644 index 0000000..d8a099a --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/config_test.go @@ -0,0 +1,621 @@ +package cliconfig + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/pkg/homedir" +) + +func TestEmptyConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + SetConfigDir(tmpHome) + + config, err := Load("") + if err != nil { + t.Fatalf("Failed loading on empty config dir: %q", err) + } + + expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) + if config.Filename != expectedConfigFilename { + t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestMissingFile(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestSaveFileToDirs(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + tmpHome += "/.docker" + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestEmptyFile(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { + t.Fatal(err) + } + + _, err = Load(tmpHome) + if err == nil { + t.Fatalf("Was supposed to fail") + } +} + +func TestEmptyJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestOldInvalidsAuth(t *testing.T) { + invalids := map[string]string{ + `username = test`: "The Auth config file is empty", + `username +password`: "Invalid Auth config file", + `username = test +email`: "Invalid auth configuration file", + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + for content, expectedError := range invalids { + fn := filepath.Join(tmpHome, oldConfigfile) + if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + // Use Contains instead of == since the file name will change each time + if err == nil || !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Should have failed\nConfig: %v\nGot: %v\nExpected: %v", config, err, expectedError) + } + + } +} + +func TestOldValidAuth(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `username = am9lam9lOmhlbGxv + email = user@example.com` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatal(err) + } + + // defaultIndexserver is https://index.docker.io/v1/ + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestOldJSONInvalid(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + // Use Contains instead of == since the file name will change each time + if err == nil || !strings.Contains(err.Error(), "Invalid auth configuration file") { + t.Fatalf("Expected an error got : %v, %v", config, err) + } +} + +func TestOldJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv", + "email": "user@example.com" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n'%s'\n not \n'%s'\n", configStr, expConfStr) + } +} + +func TestNewJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestNewJSONNoEmail(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestJSONWithPsFormat(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"psFormat":`) || + !strings.Contains(configStr, "{{.ID}}") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +func TestJSONWithCredentialStore(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "credsStore": "crazy-secure-storage" +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.CredentialsStore != "crazy-secure-storage" { + t.Fatalf("Unknown credential store: %s\n", config.CredentialsStore) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"credsStore":`) || + !strings.Contains(configStr, "crazy-secure-storage") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +func TestJSONWithCredentialHelpers(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "credHelpers": { "images.io": "images-io", "containers.com": "crazy-secure-storage" } +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.CredentialHelpers == nil { + t.Fatal("config.CredentialHelpers was nil") + } else if config.CredentialHelpers["images.io"] != "images-io" || + config.CredentialHelpers["containers.com"] != "crazy-secure-storage" { + t.Fatalf("Credential helpers not deserialized properly: %v\n", config.CredentialHelpers) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"credHelpers":`) || + !strings.Contains(configStr, "images.io") || + !strings.Contains(configStr, "images-io") || + !strings.Contains(configStr, "containers.com") || + !strings.Contains(configStr, "crazy-secure-storage") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +// Save it and make sure it shows up in new form +func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string { + if err := config.Save(); err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } + return string(buf) +} + +func TestConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + if ConfigDir() == tmpHome { + t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) + } + + // Update configDir + SetConfigDir(tmpHome) + + if ConfigDir() != tmpHome { + t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) + } +} + +func TestConfigFile(t *testing.T) { + configFilename := "configFilename" + configFile := NewConfigFile(configFilename) + + if configFile.Filename != configFilename { + t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename) + } +} + +func TestJSONReaderNoFile(t *testing.T) { + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` + + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + +} + +func TestOldJSONReaderNoFile(t *testing.T) { + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + + config, err := LegacyLoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } +} + +func TestJSONWithPsFormatNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + +} + +func TestJSONSaveWithNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + + err = config.SaveToWriter(f) + if err != nil { + t.Fatalf("Failed saving to file: %q", err) + } + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + if string(buf) != expConfStr { + t.Fatalf("Should have save in new form: \n%s\nnot \n%s", string(buf), expConfStr) + } +} + +func TestLegacyJSONSaveWithNoFile(t *testing.T) { + + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + config, err := LegacyLoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + + if err = config.SaveToWriter(f); err != nil { + t.Fatalf("Failed saving to file: %q", err) + } + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv", + "email": "user@example.com" + } + } +}` + + if string(buf) != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", string(buf), expConfStr) + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file.go b/vendor/github.com/docker/docker/cliconfig/configfile/file.go new file mode 100644 index 0000000..3909713 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/configfile/file.go @@ -0,0 +1,183 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +const ( + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexserver = "https://index.docker.io/v1/" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` +} + +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return fmt.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexserver + configFile.AuthConfigs[defaultIndexserver] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + if configFile.Filename == "" { + return fmt.Errorf("Can't save config with empty filename") + } + + if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { + return err + } + f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + return configFile.SaveToWriter(f) +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go b/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go new file mode 100644 index 0000000..435797f --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go @@ -0,0 +1,27 @@ +package configfile + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &types.AuthConfig{Username: "ken", Password: "test"} + authStr := encodeAuth(newAuthConfig) + decAuthConfig := &types.AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go new file mode 100644 index 0000000..ca874ca --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/docker/api/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go new file mode 100644 index 0000000..b473370 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import ( + "os/exec" + + "github.com/docker/docker/cliconfig/configfile" +) + +// DetectDefaultStore sets the default credentials store +// if the host includes the default store helper program. +func DetectDefaultStore(c *configfile.ConfigFile) { + if c.CredentialsStore != "" { + // user defined + return + } + + if defaultCredentialsStore != "" { + if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { + c.CredentialsStore = defaultCredentialsStore + } + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go new file mode 100644 index 0000000..63e8ed4 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go new file mode 100644 index 0000000..864c540 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go new file mode 100644 index 0000000..519ef53 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go @@ -0,0 +1,5 @@ +// +build !windows,!darwin,!linux + +package credentials + +const defaultCredentialsStore = "" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go new file mode 100644 index 0000000..fb6a974 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "wincred" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go new file mode 100644 index 0000000..ca73a38 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go @@ -0,0 +1,53 @@ +package credentials + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/registry" +) + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file *configfile.ConfigFile +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file *configfile.ConfigFile) Store { + return &fileStore{ + file: file, + } +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.AuthConfigs, serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.AuthConfigs[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.AuthConfigs { + if serverAddress == registry.ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.AuthConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.AuthConfigs[authConfig.ServerAddress] = authConfig + return c.file.Save() +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go new file mode 100644 index 0000000..efed4e9 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go @@ -0,0 +1,139 @@ +package credentials + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/configfile" +) + +func newConfigFile(auths map[string]types.AuthConfig) *configfile.ConfigFile { + tmp, _ := ioutil.TempFile("", "docker-test") + name := tmp.Name() + tmp.Close() + + c := cliconfig.NewConfigFile(name) + c.AuthConfigs = auths + return c +} + +func TestFileStoreAddCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + + s := NewFileStore(f) + err := s.Store(types.AuthConfig{ + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }) + + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 1 { + t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) + } + + a, ok := f.AuthConfigs["https://example.com"] + if !ok { + t.Fatalf("expected auth for https://example.com, got %v", f.AuthConfigs) + } + if a.Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestFileStoreGet(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + "https://example.com": { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + }) + + s := NewFileStore(f) + a, err := s.Get("https://example.com") + if err != nil { + t.Fatal(err) + } + if a.Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestFileStoreGetAll(t *testing.T) { + s1 := "https://example.com" + s2 := "https://example2.com" + f := newConfigFile(map[string]types.AuthConfig{ + s1: { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + s2: { + Auth: "super_secret_token2", + Email: "foo@example2.com", + ServerAddress: "https://example2.com", + }, + }) + + s := NewFileStore(f) + as, err := s.GetAll() + if err != nil { + t.Fatal(err) + } + if len(as) != 2 { + t.Fatalf("wanted 2, got %d", len(as)) + } + if as[s1].Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", as[s1].Auth) + } + if as[s1].Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", as[s1].Email) + } + if as[s2].Auth != "super_secret_token2" { + t.Fatalf("expected auth `super_secret_token2`, got %s", as[s2].Auth) + } + if as[s2].Email != "foo@example2.com" { + t.Fatalf("expected email `foo@example2.com`, got %s", as[s2].Email) + } +} + +func TestFileStoreErase(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + "https://example.com": { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + }) + + s := NewFileStore(f) + err := s.Erase("https://example.com") + if err != nil { + t.Fatal(err) + } + + // file store never returns errors, check that the auth config is empty + a, err := s.Get("https://example.com") + if err != nil { + t.Fatal(err) + } + + if a.Auth != "" { + t.Fatalf("expected empty auth token, got %s", a.Auth) + } + if a.Email != "" { + t.Fatalf("expected empty email, got %s", a.Email) + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go new file mode 100644 index 0000000..dec2dbc --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go @@ -0,0 +1,144 @@ +package credentials + +import ( + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file *configfile.ConfigFile, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac, _ := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keyckain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go new file mode 100644 index 0000000..7664faf --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go @@ -0,0 +1,355 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/api/types" +) + +const ( + validServerAddress = "https://index.docker.io/v1" + validServerAddress2 = "https://example.com:5002" + invalidServerAddress = "https://foobar.example.com" + missingCredsAddress = "https://missing.docker.io/v1" +) + +var errCommandExited = fmt.Errorf("exited 1") + +// mockCommand simulates interactions between the docker client and a remote +// credentials helper. +// Unit tests inject this mocked command into the remote to control execution. +type mockCommand struct { + arg string + input io.Reader +} + +// Output returns responses from the remote credentials helper. +// It mocks those responses based in the input in the mock. +func (m *mockCommand) Output() ([]byte, error) { + in, err := ioutil.ReadAll(m.input) + if err != nil { + return nil, err + } + inS := string(in) + + switch m.arg { + case "erase": + switch inS { + case validServerAddress: + return nil, nil + default: + return []byte("program failed"), errCommandExited + } + case "get": + switch inS { + case validServerAddress: + return []byte(`{"Username": "foo", "Secret": "bar"}`), nil + case validServerAddress2: + return []byte(`{"Username": "", "Secret": "abcd1234"}`), nil + case missingCredsAddress: + return []byte(credentials.NewErrCredentialsNotFound().Error()), errCommandExited + case invalidServerAddress: + return []byte("program failed"), errCommandExited + } + case "store": + var c credentials.Credentials + err := json.NewDecoder(strings.NewReader(inS)).Decode(&c) + if err != nil { + return []byte("program failed"), errCommandExited + } + switch c.ServerURL { + case validServerAddress: + return nil, nil + default: + return []byte("program failed"), errCommandExited + } + case "list": + return []byte(fmt.Sprintf(`{"%s": "%s", "%s": "%s"}`, validServerAddress, "foo", validServerAddress2, "")), nil + } + + return []byte(fmt.Sprintf("unknown argument %q with %q", m.arg, inS)), errCommandExited +} + +// Input sets the input to send to a remote credentials helper. +func (m *mockCommand) Input(in io.Reader) { + m.input = in +} + +func mockCommandFn(args ...string) client.Program { + return &mockCommand{ + arg: args[0], + } +} + +func TestNativeStoreAddCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Store(types.AuthConfig{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + ServerAddress: validServerAddress, + }) + + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 1 { + t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) + } + + a, ok := f.AuthConfigs[validServerAddress] + if !ok { + t.Fatalf("expected auth for %s, got %v", validServerAddress, f.AuthConfigs) + } + if a.Auth != "" { + t.Fatalf("expected auth to be empty, got %s", a.Auth) + } + if a.Username != "" { + t.Fatalf("expected username to be empty, got %s", a.Username) + } + if a.Password != "" { + t.Fatalf("expected password to be empty, got %s", a.Password) + } + if a.IdentityToken != "" { + t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestNativeStoreAddInvalidCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Store(types.AuthConfig{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + ServerAddress: invalidServerAddress, + }) + + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } + + if len(f.AuthConfigs) != 0 { + t.Fatalf("expected 0 auth config, got %d", len(f.AuthConfigs)) + } +} + +func TestNativeStoreGet(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + a, err := s.Get(validServerAddress) + if err != nil { + t.Fatal(err) + } + + if a.Username != "foo" { + t.Fatalf("expected username `foo`, got %s", a.Username) + } + if a.Password != "bar" { + t.Fatalf("expected password `bar`, got %s", a.Password) + } + if a.IdentityToken != "" { + t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestNativeStoreGetIdentityToken(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress2: { + Email: "foo@example2.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + a, err := s.Get(validServerAddress2) + if err != nil { + t.Fatal(err) + } + + if a.Username != "" { + t.Fatalf("expected username to be empty, got %s", a.Username) + } + if a.Password != "" { + t.Fatalf("expected password to be empty, got %s", a.Password) + } + if a.IdentityToken != "abcd1234" { + t.Fatalf("expected identity token `abcd1234`, got %s", a.IdentityToken) + } + if a.Email != "foo@example2.com" { + t.Fatalf("expected email `foo@example2.com`, got %s", a.Email) + } +} + +func TestNativeStoreGetAll(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + as, err := s.GetAll() + if err != nil { + t.Fatal(err) + } + + if len(as) != 2 { + t.Fatalf("wanted 2, got %d", len(as)) + } + + if as[validServerAddress].Username != "foo" { + t.Fatalf("expected username `foo` for %s, got %s", validServerAddress, as[validServerAddress].Username) + } + if as[validServerAddress].Password != "bar" { + t.Fatalf("expected password `bar` for %s, got %s", validServerAddress, as[validServerAddress].Password) + } + if as[validServerAddress].IdentityToken != "" { + t.Fatalf("expected identity to be empty for %s, got %s", validServerAddress, as[validServerAddress].IdentityToken) + } + if as[validServerAddress].Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com` for %s, got %s", validServerAddress, as[validServerAddress].Email) + } + if as[validServerAddress2].Username != "" { + t.Fatalf("expected username to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Username) + } + if as[validServerAddress2].Password != "" { + t.Fatalf("expected password to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Password) + } + if as[validServerAddress2].IdentityToken != "abcd1234" { + t.Fatalf("expected identity token `abcd1324` for %s, got %s", validServerAddress2, as[validServerAddress2].IdentityToken) + } + if as[validServerAddress2].Email != "" { + t.Fatalf("expected no email for %s, got %s", validServerAddress2, as[validServerAddress2].Email) + } +} + +func TestNativeStoreGetMissingCredentials(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + _, err := s.Get(missingCredsAddress) + if err != nil { + // missing credentials do not produce an error + t.Fatal(err) + } +} + +func TestNativeStoreGetInvalidAddress(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + _, err := s.Get(invalidServerAddress) + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } +} + +func TestNativeStoreErase(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Erase(validServerAddress) + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 0 { + t.Fatalf("expected 0 auth configs, got %d", len(f.AuthConfigs)) + } +} + +func TestNativeStoreEraseInvalidAddress(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Erase(invalidServerAddress) + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 0000000..059dfb3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 0000000..0effe49 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create_test.go b/vendor/github.com/docker/docker/client/checkpoint_create_test.go new file mode 100644 index 0000000..96e5187 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ + CheckpointID: "noting", + Exit: true, + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointCreate(t *testing.T) { + expectedContainerID := "container_id" + expectedCheckpointID := "checkpoint_id" + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + createOptions := &types.CheckpointCreateOptions{} + if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { + return nil, err + } + + if createOptions.CheckpointID != expectedCheckpointID { + return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) + } + + if !createOptions.Exit { + return nil, fmt.Errorf("expected Exit to be true") + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ + CheckpointID: expectedCheckpointID, + Exit: true, + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 0000000..e6e7558 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete_test.go b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go new file mode 100644 index 0000000..a78b050 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointDeleteError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointDelete(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints/checkpoint_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 0000000..8eb720a --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the volumes configured in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list_test.go b/vendor/github.com/docker/docker/client/checkpoint_list_test.go new file mode 100644 index 0000000..6c90f61 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointList(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]types.Checkpoint{ + { + Name: "checkpoint", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(checkpoints) != 1 { + t.Fatalf("expected 1 checkpoint, got %v", checkpoints) + } +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 0000000..a9bdab6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,246 @@ +/* +Package client is a Go client for the Docker Engine API. + +The "docker" command uses this package to communicate with the daemon. It can also +be used by your own Go applications to do anything the command-line interface does +– running containers, pulling images, managing swarms, etc. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client + +import ( + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// DefaultVersion is the version of the current stable API +const DefaultVersion string = "1.25" + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the tls certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = DefaultVersion + } + + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if os.Getenv("DOCKER_API_VERSION") != "" { + cli.manualOverride = true + } + return cli, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + if client != nil { + if _, ok := client.Transport.(*http.Transport); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + } else { + transport := new(http.Transport) + sockets.ConfigureTransport(transport, proto, addr) + client = &http.Client{ + Transport: transport, + } + } + + scheme := "http" + tlsConfig := resolveTLSConfig(client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + scheme = "https" + } + + return &Client{ + scheme: scheme, + host: host, + proto: proto, + addr: addr, + basePath: basePath, + client: client, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// UpdateClientVersion updates the version string associated with this +// instance of the Client. +func (cli *Client) UpdateClientVersion(v string) { + if !cli.manualOverride { + cli.version = v + } + +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} diff --git a/vendor/github.com/docker/docker/client/client_mock_test.go b/vendor/github.com/docker/docker/client/client_mock_test.go new file mode 100644 index 0000000..0ab935d --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_mock_test.go @@ -0,0 +1,45 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" +) + +func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { + return &http.Client{ + Transport: transportFunc(doer), + } +} + +func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", "application/json") + + body, err := json.Marshal(&types.ErrorResponse{ + Message: message, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(body)), + Header: header, + }, nil + } +} + +func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), + }, nil + } +} diff --git a/vendor/github.com/docker/docker/client/client_test.go b/vendor/github.com/docker/docker/client/client_test.go new file mode 100644 index 0000000..ee199c2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_test.go @@ -0,0 +1,283 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNewEnvClient(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping unix only test for windows") + } + cases := []struct { + envs map[string]string + expectedError string + expectedVersion string + }{ + { + envs: map[string]string{}, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "invalid/path", + }, + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory. Make sure the key is not encrypted", + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_TLS_VERIFY": "1", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_HOST": "https://notaunixsocket", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_HOST": "host", + }, + expectedError: "unable to parse docker host `host`", + }, + { + envs: map[string]string{ + "DOCKER_HOST": "invalid://url", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "anything", + }, + expectedVersion: "anything", + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "1.22", + }, + expectedVersion: "1.22", + }, + } + for _, c := range cases { + recoverEnvs := setupEnvs(t, c.envs) + apiclient, err := NewEnvClient() + if c.expectedError != "" { + if err == nil { + t.Errorf("expected an error for %v", c) + } else if err.Error() != c.expectedError { + t.Errorf("expected an error %s, got %s, for %v", c.expectedError, err.Error(), c) + } + } else { + if err != nil { + t.Error(err) + } + version := apiclient.ClientVersion() + if version != c.expectedVersion { + t.Errorf("expected %s, got %s, for %v", c.expectedVersion, version, c) + } + } + + if c.envs["DOCKER_TLS_VERIFY"] != "" { + // pedantic checking that this is handled correctly + tr := apiclient.client.Transport.(*http.Transport) + if tr.TLSClientConfig == nil { + t.Errorf("no tls config found when DOCKER_TLS_VERIFY enabled") + } + + if tr.TLSClientConfig.InsecureSkipVerify { + t.Errorf("tls verification should be enabled") + } + } + + recoverEnvs(t) + } +} + +func setupEnvs(t *testing.T, envs map[string]string) func(*testing.T) { + oldEnvs := map[string]string{} + for key, value := range envs { + oldEnv := os.Getenv(key) + oldEnvs[key] = oldEnv + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + return func(t *testing.T) { + for key, value := range oldEnvs { + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + } +} + +func TestGetAPIPath(t *testing.T) { + cases := []struct { + v string + p string + q url.Values + e string + }{ + {"", "/containers/json", nil, "/containers/json"}, + {"", "/containers/json", url.Values{}, "/containers/json"}, + {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, + {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, + } + + for _, cs := range cases { + c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) + if err != nil { + t.Fatal(err) + } + g := c.getAPIPath(cs.p, cs.q) + if g != cs.e { + t.Fatalf("Expected %s, got %s", cs.e, g) + } + + err = c.Close() + if nil != err { + t.Fatalf("close client failed, error message: %s", err) + } + } +} + +func TestParseHost(t *testing.T) { + cases := []struct { + host string + proto string + addr string + base string + err bool + }{ + {"", "", "", "", true}, + {"foobar", "", "", "", true}, + {"foo://bar", "foo", "bar", "", false}, + {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, + {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, + } + + for _, cs := range cases { + p, a, b, e := ParseHost(cs.host) + if cs.err && e == nil { + t.Fatalf("expected error, got nil") + } + if !cs.err && e != nil { + t.Fatal(e) + } + if cs.proto != p { + t.Fatalf("expected proto %s, got %s", cs.proto, p) + } + if cs.addr != a { + t.Fatalf("expected addr %s, got %s", cs.addr, a) + } + if cs.base != b { + t.Fatalf("expected base %s, got %s", cs.base, b) + } + } +} + +func TestUpdateClientVersion(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + splitQuery := strings.Split(req.URL.Path, "/") + queryVersion := splitQuery[1] + b, err := json.Marshal(types.Version{ + APIVersion: queryVersion, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + cases := []struct { + v string + }{ + {"1.20"}, + {"v1.21"}, + {"1.22"}, + {"v1.22"}, + } + + for _, cs := range cases { + client.UpdateClientVersion(cs.v) + r, err := client.ServerVersion(context.Background()) + if err != nil { + t.Fatal(err) + } + if strings.TrimPrefix(r.APIVersion, "v") != strings.TrimPrefix(cs.v, "v") { + t.Fatalf("Expected %s, got %s", cs.v, r.APIVersion) + } + } +} + +func TestNewEnvClientSetsDefaultVersion(t *testing.T) { + // Unset environment variables + envVarKeys := []string{ + "DOCKER_HOST", + "DOCKER_API_VERSION", + "DOCKER_TLS_VERIFY", + "DOCKER_CERT_PATH", + } + envVarValues := make(map[string]string) + for _, key := range envVarKeys { + envVarValues[key] = os.Getenv(key) + os.Setenv(key, "") + } + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != DefaultVersion { + t.Fatalf("Expected %s, got %s", DefaultVersion, client.version) + } + + expected := "1.22" + os.Setenv("DOCKER_API_VERSION", expected) + client, err = NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != expected { + t.Fatalf("Expected %s, got %s", expected, client.version) + } + + // Restore environment variables + for _, key := range envVarKeys { + os.Setenv(key, envVarValues[key]) + } +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 0000000..89de892 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd darwin + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 0000000..07c0c7a --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 0000000..eea4682 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 0000000..c766d62 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,53 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + distributionRef, err := distreference.ParseNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + + tag = reference.GetTagFromNamedRef(distributionRef) + repository = distributionRef.Name() + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_commit_test.go b/vendor/github.com/docker/docker/client/container_commit_test.go new file mode 100644 index 0000000..a844675 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerCommitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerCommit(t *testing.T) { + expectedURL := "/commit" + expectedContainerID := "container_id" + specifiedReference := "repository_name:tag" + expectedRepositoryName := "repository_name" + expectedTag := "tag" + expectedComment := "comment" + expectedAuthor := "author" + expectedChanges := []string{"change1", "change2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + containerID := query.Get("container") + if containerID != expectedContainerID { + return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) + } + repo := query.Get("repo") + if repo != expectedRepositoryName { + return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) + } + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) + } + comment := query.Get("comment") + if comment != expectedComment { + return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) + } + author := query.Get("author") + if author != expectedAuthor { + return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) + } + pause := query.Get("pause") + if pause != "0" { + return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) + } + changes := query["changes"] + if len(changes) != len(expectedChanges) { + return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) + } + b, err := json.Marshal(types.IDResponse{ + ID: "new_container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ + Reference: specifiedReference, + Comment: expectedComment, + Author: expectedAuthor, + Changes: expectedChanges, + Pause: false, + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "new_container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 0000000..8380eea --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,97 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy_test.go b/vendor/github.com/docker/docker/client/container_copy_test.go new file mode 100644 index 0000000..706a20c --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy_test.go @@ -0,0 +1,244 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStatPathError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestContainerStatPathNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestContainerStatPath(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "HEAD" { + return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly") + } + content, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(content) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } +} + +func TestCopyToContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyToContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyToContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "PUT" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") + if noOverwriteDirNonDir != "true" { + return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) + } + + content, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + if string(content) != "content" { + return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyFromContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyFromContainerNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestCopyFromContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + + headercontent, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(headercontent) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } + content, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + if string(content) != "content" { + t.Fatalf("expected content to be 'content', got %s", string(content)) + } +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 0000000..9f627aa --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,50 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_create_test.go b/vendor/github.com/docker/docker/client/container_create_test.go new file mode 100644 index 0000000..15dbd5e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) + } + + // 404 doesn't automagitally means an unknown image + client = &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) + } +} + +func TestContainerCreateImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "No such image")), + } + _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestContainerCreateWithName(t *testing.T) { + expectedURL := "/containers/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "container_name" { + return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 0000000..1e3e554 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { + var changes []types.ContainerChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_diff_test.go b/vendor/github.com/docker/docker/client/container_diff_test.go new file mode 100644 index 0000000..1ce1117 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerDiffError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerDiff(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + +} + +func TestContainerDiff(t *testing.T) { + expectedURL := "/containers/container_id/changes" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal([]types.ContainerChange{ + { + Kind: 0, + Path: "/path/1", + }, + { + Kind: 1, + Path: "/path/2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + changes, err := client.ContainerDiff(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if len(changes) != 2 { + t.Fatalf("expected an array of 2 changes, got %v", changes) + } +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 0000000..0665c54 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,54 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec_test.go b/vendor/github.com/docker/docker/client/container_exec_test.go new file mode 100644 index 0000000..0e296a5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec_test.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerExecCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecCreate(t *testing.T) { + expectedURL := "/containers/container_id/exec" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + // FIXME validate the content is the given ExecConfig ? + if err := req.ParseForm(); err != nil { + return nil, err + } + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { + return nil, err + } + if execConfig.User != "user" { + return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) + } + b, err := json.Marshal(types.IDResponse{ + ID: "exec_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ + User: "user", + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "exec_id" { + t.Fatalf("expected `exec_id`, got %s", r.ID) + } +} + +func TestContainerExecStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecStart(t *testing.T) { + expectedURL := "/exec/exec_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if err := req.ParseForm(); err != nil { + return nil, err + } + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { + return nil, err + } + if execStartCheck.Tty || !execStartCheck.Detach { + return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ + Detach: true, + Tty: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecInspect(t *testing.T) { + expectedURL := "/exec/exec_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(types.ContainerExecInspect{ + ExecID: "exec_id", + ContainerID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") + if err != nil { + t.Fatal(err) + } + if inspect.ExecID != "exec_id" { + t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) + } + if inspect.ContainerID != "container_id" { + t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 0000000..52194f3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_export_test.go b/vendor/github.com/docker/docker/client/container_export_test.go new file mode 100644 index 0000000..5849fe9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerExportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExport(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExport(t *testing.T) { + expectedURL := "/containers/container_id/export" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerExport(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 0000000..17f1809 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_inspect_test.go b/vendor/github.com/docker/docker/client/container_inspect_test.go new file mode 100644 index 0000000..f1a6f4a --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect_test.go @@ -0,0 +1,125 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "unknown") + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestContainerInspect(t *testing.T) { + expectedURL := "/containers/container_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } +} + +func TestContainerInspectNode(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + Node: &types.ContainerNode{ + ID: "container_node_id", + Addr: "container_node", + Labels: map[string]string{"foo": "bar"}, + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } + if r.Node.ID != "container_node_id" { + t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) + } + if r.Node.Addr != "container_node" { + t.Fatalf("expected `container_node`, got %s", r.Node.Addr) + } + foo, ok := r.Node.Labels["foo"] + if foo != "bar" || !ok { + t.Fatalf("expected `bar` for label `foo`") + } +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 0000000..29f80c7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_kill_test.go b/vendor/github.com/docker/docker/client/container_kill_test.go new file mode 100644 index 0000000..9477b0a --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerKillError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerKill(t *testing.T) { + expectedURL := "/containers/container_id/kill" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + signal := req.URL.Query().Get("signal") + if signal != "SIGKILL" { + return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 0000000..4398912 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_list_test.go b/vendor/github.com/docker/docker/client/container_list_test.go new file mode 100644 index 0000000..e41c687 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestContainerListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerList(t *testing.T) { + expectedURL := "/containers/json" + expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + all := query.Get("all") + if all != "1" { + return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) + } + limit := query.Get("limit") + if limit != "0" { + return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) + } + since := query.Get("since") + if since != "container" { + return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) + } + before := query.Get("before") + if before != "" { + return nil, fmt.Errorf("before should have not be present in query, go %s", before) + } + size := query.Get("size") + if size != "1" { + return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) + } + + b, err := json.Marshal([]types.Container{ + { + ID: "container_id1", + }, + { + ID: "container_id2", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("before", "container") + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Size: true, + All: true, + Since: "container", + Filters: filters, + }) + if err != nil { + t.Fatal(err) + } + if len(containers) != 2 { + t.Fatalf("expected 2 containers, got %v", containers) + } +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 0000000..69056b6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_logs_test.go b/vendor/github.com/docker/docker/client/container_logs_test.go new file mode 100644 index 0000000..99e3184 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestContainerLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestContainerLogs(t *testing.T) { + expectedURL := "/containers/container_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ContainerLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 0000000..412067a --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_pause_test.go b/vendor/github.com/docker/docker/client/container_pause_test.go new file mode 100644 index 0000000..0ee2f05 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerPauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerPause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerPause(t *testing.T) { + expectedURL := "/containers/container_id/pause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerPause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 0000000..b582170 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 0000000..3a79590 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_remove_test.go b/vendor/github.com/docker/docker/client/container_remove_test.go new file mode 100644 index 0000000..798c08b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRemove(t *testing.T) { + expectedURL := "/containers/container_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + volume := query.Get("v") + if volume != "1" { + return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) + } + force := query.Get("force") + if force != "1" { + return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) + } + link := query.Get("link") + if link != "" { + return nil, fmt.Errorf("link should have not be present in query, go %s", link) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 0000000..0e718da --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_rename_test.go b/vendor/github.com/docker/docker/client/container_rename_test.go new file mode 100644 index 0000000..732ebff --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerRenameError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRename(context.Background(), "nothing", "newNothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRename(t *testing.T) { + expectedURL := "/containers/container_id/rename" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "newName" { + return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRename(context.Background(), "container_id", "newName") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 0000000..66c3cc1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize_test.go b/vendor/github.com/docker/docker/client/container_resize_test.go new file mode 100644 index 0000000..5b2efec --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/containers/container_id/resize")), + } + + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/exec/exec_id/resize")), + } + + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + h := query.Get("h") + if h != "500" { + return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) + } + w := query.Get("w") + if w != "600" { + return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + } +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 0000000..74d7455 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart_test.go b/vendor/github.com/docker/docker/client/container_restart_test.go new file mode 100644 index 0000000..8c3cfd6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerRestartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerRestart(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRestart(t *testing.T) { + expectedURL := "/containers/container_id/restart" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerRestart(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 0000000..b1f08de --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,24 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start_test.go b/vendor/github.com/docker/docker/client/container_start_test.go new file mode 100644 index 0000000..5826fa8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStart(t *testing.T) { + expectedURL := "/containers/container_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + // we're not expecting any payload, but if one is supplied, check it is valid. + if req.Header.Get("Content-Type") == "application/json" { + var startConfig interface{} + if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { + return nil, fmt.Errorf("Unable to parse json: %s", err) + } + } + + checkpoint := req.URL.Query().Get("checkpoint") + if checkpoint != "checkpoint_id" { + return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 0000000..4758c66 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,26 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stats_test.go b/vendor/github.com/docker/docker/client/container_stats_test.go new file mode 100644 index 0000000..7414f13 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerStatsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStats(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStats(t *testing.T) { + expectedURL := "/containers/container_id/stats" + cases := []struct { + stream bool + expectedStream string + }{ + { + expectedStream: "0", + }, + { + stream: true, + expectedStream: "1", + }, + } + for _, c := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + query := r.URL.Query() + stream := query.Get("stream") + if stream != c.expectedStream { + return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 0000000..b5418ae --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stop_test.go b/vendor/github.com/docker/docker/client/container_stop_test.go new file mode 100644 index 0000000..c32cd69 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerStopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerStop(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStop(t *testing.T) { + expectedURL := "/containers/container_id/stop" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerStop(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 0000000..4e7270e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { + var response types.ContainerProcessList + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_top_test.go b/vendor/github.com/docker/docker/client/container_top_test.go new file mode 100644 index 0000000..7802be0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerTopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerTop(context.Background(), "nothing", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerTop(t *testing.T) { + expectedURL := "/containers/container_id/top" + expectedProcesses := [][]string{ + {"p1", "p2"}, + {"p3"}, + } + expectedTitles := []string{"title1", "title2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + args := query.Get("ps_args") + if args != "arg1 arg2" { + return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) + } + + b, err := json.Marshal(types.ContainerProcessList{ + Processes: [][]string{ + {"p1", "p2"}, + {"p3"}, + }, + Titles: []string{"title1", "title2"}, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expectedProcesses, processList.Processes) { + t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) + } + if !reflect.DeepEqual(expectedTitles, processList.Titles) { + t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) + } +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 0000000..5c76211 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause_test.go b/vendor/github.com/docker/docker/client/container_unpause_test.go new file mode 100644 index 0000000..2c42727 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerUnpauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerUnpause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUnpause(t *testing.T) { + expectedURL := "/containers/container_id/unpause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerUnpause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 0000000..5082f22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_update_test.go b/vendor/github.com/docker/docker/client/container_update_test.go new file mode 100644 index 0000000..715bb7c --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUpdate(t *testing.T) { + expectedURL := "/containers/container_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + b, err := json.Marshal(container.ContainerUpdateOKBody{}) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ + Resources: container.Resources{ + CPUPeriod: 1, + }, + RestartPolicy: container.RestartPolicy{ + Name: "always", + }, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 0000000..93212c7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/container" +) + +// ContainerWait pauses execution until a container exits. +// It returns the API status code as response of its readiness. +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + return -1, err + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} diff --git a/vendor/github.com/docker/docker/client/container_wait_test.go b/vendor/github.com/docker/docker/client/container_wait_test.go new file mode 100644 index 0000000..9300bc0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + + "golang.org/x/net/context" +) + +func TestContainerWaitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + code, err := client.ContainerWait(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + if code != -1 { + t.Fatalf("expected a status code equal to '-1', got %d", code) + } +} + +func TestContainerWait(t *testing.T) { + expectedURL := "/containers/container_id/wait" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(container.ContainerWaitOKBody{ + StatusCode: 15, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + code, err := client.ContainerWait(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if code != 15 { + t.Fatalf("expected a status code equal to '15', got %d", code) + } +} + +func ExampleClient_ContainerWait_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + _, err := client.ContainerWait(ctx, "container_id") + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 0000000..03c80b3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 0000000..bf6923f --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,278 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NotFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NotFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NotFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NotFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a volumeNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NotFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NotFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NotFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker server is version %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NoFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} + +// pluginNotFoundError implements an error returned when a plugin is not in the docker host. +type pluginNotFoundError struct { + name string +} + +// NotFound indicates that this error type is of NotFound +func (e pluginNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a pluginNotFoundError +func (e pluginNotFoundError) Error() string { + return fmt.Sprintf("Error: No such plugin: %s", e.name) +} + +// IsErrPluginNotFound returns true if the error is caused +// when a plugin is not found in the docker host. +func IsErrPluginNotFound(err error) bool { + return IsErrNotFound(err) +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 0000000..af47aef --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/json" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/events_test.go b/vendor/github.com/docker/docker/client/events_test.go new file mode 100644 index 0000000..ba82d2f --- /dev/null +++ b/vendor/github.com/docker/docker/client/events_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +func TestEventsErrorInOptions(t *testing.T) { + errorCases := []struct { + options types.EventsOptions + expectedError string + }{ + { + options: types.EventsOptions{ + Since: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + { + options: types.EventsOptions{ + Until: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + } + for _, e := range errorCases { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), e.options) + err := <-errs + if err == nil || !strings.Contains(err.Error(), e.expectedError) { + t.Fatalf("expected an error %q, got %v", e.expectedError, err) + } + } +} + +func TestEventsErrorFromServer(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), types.EventsOptions{}) + err := <-errs + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestEvents(t *testing.T) { + + expectedURL := "/events" + + filters := filters.NewArgs() + filters.Add("type", events.ContainerEventType) + expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) + + eventsCases := []struct { + options types.EventsOptions + events []events.Message + expectedEvents map[string]bool + expectedQueryParams map[string]string + }{ + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{}, + expectedEvents: make(map[string]bool), + }, + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{ + { + Type: "container", + ID: "1", + Action: "create", + }, + { + Type: "container", + ID: "2", + Action: "die", + }, + { + Type: "container", + ID: "3", + Action: "create", + }, + }, + expectedEvents: map[string]bool{ + "1": true, + "2": true, + "3": true, + }, + }, + } + + for _, eventsCase := range eventsCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + + for key, expected := range eventsCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + buffer := new(bytes.Buffer) + + for _, e := range eventsCase.events { + b, _ := json.Marshal(e) + buffer.Write(b) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(buffer), + }, nil + }), + } + + messages, errs := client.Events(context.Background(), eventsCase.options) + + loop: + for { + select { + case err := <-errs: + if err != nil && err != io.EOF { + t.Fatal(err) + } + + break loop + case e := <-messages: + _, ok := eventsCase.expectedEvents[e.ID] + if !ok { + t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 0000000..74c53f5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,177 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + _, err = clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = tlsconfig.Clone(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 0000000..6fde75d --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,123 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_build_test.go b/vendor/github.com/docker/docker/client/image_build_test.go new file mode 100644 index 0000000..b9d04f8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +func TestImageBuildError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageBuild(t *testing.T) { + v1 := "value1" + v2 := "value2" + emptyRegistryConfig := "bnVsbA==" + buildCases := []struct { + buildOptions types.ImageBuildOptions + expectedQueryParams map[string]string + expectedTags []string + expectedRegistryConfig string + }{ + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: true, + NoCache: true, + Remove: true, + ForceRemove: true, + PullParent: true, + }, + expectedQueryParams: map[string]string{ + "q": "1", + "nocache": "1", + "rm": "1", + "forcerm": "1", + "pull": "1", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: false, + NoCache: false, + Remove: false, + ForceRemove: false, + PullParent: false, + }, + expectedQueryParams: map[string]string{ + "q": "", + "nocache": "", + "rm": "0", + "forcerm": "", + "pull": "", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + RemoteContext: "remoteContext", + Isolation: container.Isolation("isolation"), + CPUSetCPUs: "2", + CPUSetMems: "12", + CPUShares: 20, + CPUQuota: 10, + CPUPeriod: 30, + Memory: 256, + MemorySwap: 512, + ShmSize: 10, + CgroupParent: "cgroup_parent", + Dockerfile: "Dockerfile", + }, + expectedQueryParams: map[string]string{ + "remote": "remoteContext", + "isolation": "isolation", + "cpusetcpus": "2", + "cpusetmems": "12", + "cpushares": "20", + "cpuquota": "10", + "cpuperiod": "30", + "memory": "256", + "memswap": "512", + "shmsize": "10", + "cgroupparent": "cgroup_parent", + "dockerfile": "Dockerfile", + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + BuildArgs: map[string]*string{ + "ARG1": &v1, + "ARG2": &v2, + "ARG3": nil, + }, + }, + expectedQueryParams: map[string]string{ + "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + Ulimits: []*units.Ulimit{ + { + Name: "nproc", + Hard: 65557, + Soft: 65557, + }, + { + Name: "nofile", + Hard: 20000, + Soft: 40000, + }, + }, + }, + expectedQueryParams: map[string]string{ + "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + AuthConfigs: map[string]types.AuthConfig{ + "https://index.docker.io/v1/": { + Auth: "dG90bwo=", + }, + }, + }, + expectedQueryParams: map[string]string{ + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", + }, + } + for _, buildCase := range buildCases { + expectedURL := "/build" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check request headers + registryConfig := r.Header.Get("X-Registry-Config") + if registryConfig != buildCase.expectedRegistryConfig { + return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) + } + contentType := r.Header.Get("Content-Type") + if contentType != "application/tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/tar', got %s", contentType) + } + + // Check query parameters + query := r.URL.Query() + for key, expected := range buildCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + // Check tags + if len(buildCase.expectedTags) > 0 { + tags := query["t"] + if !reflect.DeepEqual(tags, buildCase.expectedTags) { + return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) + } + } + + headers := http.Header{} + headers.Add("Server", "Docker/v1.23 (MyOS)") + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + Header: headers, + }, nil + }), + } + buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) + if err != nil { + t.Fatal(err) + } + if buildResponse.OSType != "MyOS" { + t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) + } + response, err := ioutil.ReadAll(buildResponse.Body) + if err != nil { + t.Fatal(err) + } + buildResponse.Body.Close() + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } + } +} + +func TestGetDockerOS(t *testing.T) { + cases := map[string]string{ + "Docker/v1.22 (linux)": "linux", + "Docker/v1.22 (windows)": "windows", + "Foo/v1.22 (bar)": "", + } + for header, os := range cases { + g := getDockerOS(header) + if g != os { + t.Fatalf("Expected %s, got %s", os, g) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 0000000..cf023a7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + query.Set("tag", tag) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_create_test.go b/vendor/github.com/docker/docker/client/image_create_test.go new file mode 100644 index 0000000..5c2edd2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImageCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageCreate(t *testing.T) { + expectedURL := "/images/create" + expectedImage := "test:5000/my_image" + expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) + expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + registryAuth := r.Header.Get("X-Registry-Auth") + if registryAuth != expectedRegistryAuth { + return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) + } + + query := r.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != expectedImage { + return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) + } + + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ + RegistryAuth: expectedRegistryAuth, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(createResponse) + if err != nil { + t.Fatal(err) + } + if err = createResponse.Close(); err != nil { + t.Fatal(err) + } + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 0000000..acb1ee9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { + var history []types.ImageHistory + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_history_test.go b/vendor/github.com/docker/docker/client/image_history_test.go new file mode 100644 index 0000000..729edb1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageHistoryError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageHistory(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageHistory(t *testing.T) { + expectedURL := "/images/image_id/history" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + b, err := json.Marshal([]types.ImageHistory{ + { + ID: "image_id1", + Tags: []string{"tag1", "tag2"}, + }, + { + ID: "image_id2", + Tags: []string{"tag1", "tag2"}, + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageHistories, err := client.ImageHistory(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if len(imageHistories) != 2 { + t.Fatalf("expected 2 containers, got %v", imageHistories) + } +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 0000000..c6f154b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_import_test.go b/vendor/github.com/docker/docker/client/image_import_test.go new file mode 100644 index 0000000..e309be7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import_test.go @@ -0,0 +1,81 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageImportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageImport(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + fromSrc := query.Get("fromSrc") + if fromSrc != "image_source" { + return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) + } + repo := query.Get("repo") + if repo != "repository_name:imported" { + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name', got %s", repo) + } + tag := query.Get("tag") + if tag != "imported" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) + } + message := query.Get("message") + if message != "A message" { + return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) + } + changes := query["changes"] + expectedChanges := []string{"change1", "change2"} + if !reflect.DeepEqual(expectedChanges, changes) { + return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ + Source: strings.NewReader("source"), + SourceName: "image_source", + }, "repository_name:imported", types.ImageImportOptions{ + Tag: "imported", + Message: "A message", + Changes: []string{"change1", "change2"}, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(importResponse) + if err != nil { + t.Fatal(err) + } + importResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 0000000..b3a64ce --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_inspect_test.go b/vendor/github.com/docker/docker/client/image_inspect_test.go new file mode 100644 index 0000000..74a4e49 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect_test.go @@ -0,0 +1,71 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageInspectImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestImageInspect(t *testing.T) { + expectedURL := "/images/image_id/json" + expectedTags := []string{"tag1", "tag2"} + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ImageInspect{ + ID: "image_id", + RepoTags: expectedTags, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if imageInspect.ID != "image_id" { + t.Fatalf("expected `image_id`, got %s", imageInspect.ID) + } + if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { + t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) + } +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 0000000..f26464f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,45 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_list_test.go b/vendor/github.com/docker/docker/client/image_list_test.go new file mode 100644 index 0000000..7c4a464 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list_test.go @@ -0,0 +1,159 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestImageListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageList(context.Background(), types.ImageListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageList(t *testing.T) { + expectedURL := "/images/json" + + noDanglingfilters := filters.NewArgs() + noDanglingfilters.Add("dangling", "false") + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("dangling", "true") + + listCases := []struct { + options types.ImageListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ImageListOptions{}, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + options: types.ImageListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, + }, + }, + { + options: types.ImageListOptions{ + Filters: noDanglingfilters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + images, err := client.ImageList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } + } +} + +func TestImageListApiBefore125(t *testing.T) { + expectedFilter := "image:tag" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + query := req.URL.Query() + actualFilter := query.Get("filter") + if actualFilter != expectedFilter { + return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) + } + actualFilters := query.Get("filters") + if actualFilters != "" { + return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.24", + } + + filters := filters.NewArgs() + filters.Add("reference", "image:tag") + + options := types.ImageListOptions{ + Filters: filters, + } + + images, err := client.ImageList(context.Background(), options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 0000000..77aaf1a --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_load_test.go b/vendor/github.com/docker/docker/client/image_load_test.go new file mode 100644 index 0000000..68dc14f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageLoadError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageLoad(context.Background(), nil, true) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageLoad(t *testing.T) { + expectedURL := "/images/load" + expectedInput := "inputBody" + expectedOutput := "outputBody" + loadCases := []struct { + quiet bool + responseContentType string + expectedResponseJSON bool + expectedQueryParams map[string]string + }{ + { + quiet: false, + responseContentType: "text/plain", + expectedResponseJSON: false, + expectedQueryParams: map[string]string{ + "quiet": "0", + }, + }, + { + quiet: true, + responseContentType: "application/json", + expectedResponseJSON: true, + expectedQueryParams: map[string]string{ + "quiet": "1", + }, + }, + } + for _, loadCase := range loadCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + contentType := req.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) + } + query := req.URL.Query() + for key, expected := range loadCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + headers := http.Header{} + headers.Add("Content-Type", loadCase.responseContentType) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + Header: headers, + }, nil + }), + } + + input := bytes.NewReader([]byte(expectedInput)) + imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) + if err != nil { + t.Fatal(err) + } + if imageLoadResponse.JSON != loadCase.expectedResponseJSON { + t.Fatalf("expected a JSON response, was not.") + } + body, err := ioutil.ReadAll(imageLoadResponse.Body) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected %s, got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 0000000..5ef98b7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 0000000..3bffdb7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,46 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + if tag != "" && !options.All { + query.Set("tag", tag) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull_test.go b/vendor/github.com/docker/docker/client/image_pull_test.go new file mode 100644 index 0000000..fe6bafe --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull_test.go @@ -0,0 +1,199 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePullReferenceParseError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePullAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePullStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != "myimage" { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) + } + tag := query.Get("tag") + if tag != "latest" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePullWithoutErrors(t *testing.T) { + expectedURL := "/images/create" + expectedOutput := "hello world" + pullCases := []struct { + all bool + reference string + expectedImage string + expectedTag string + }{ + { + all: false, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "latest", + }, + { + all: false, + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + { + all: true, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + all: true, + reference: "myimage:anything", + expectedImage: "myimage", + expectedTag: "", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != pullCase.expectedImage { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) + } + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ + All: pullCase.all, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 0000000..8e73d28 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,54 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return nil, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + var tag = "" + if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_push_test.go b/vendor/github.com/docker/docker/client/image_push_test.go new file mode 100644 index 0000000..b52da8b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push_test.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePushReferenceError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } + // An canonical reference cannot be pushed + _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) + if err == nil || err.Error() != "cannot push a digest reference" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePushAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePushStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/myimage/push" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != "tag" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePushWithoutErrors(t *testing.T) { + expectedOutput := "hello world" + expectedURLFormat := "/images/%s/push" + pullCases := []struct { + reference string + expectedImage string + expectedTag string + }{ + { + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 0000000..839e531 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDelete + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_remove_test.go b/vendor/github.com/docker/docker/client/image_remove_test.go new file mode 100644 index 0000000..7b004f7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageRemove(t *testing.T) { + expectedURL := "/images/image_id" + removeCases := []struct { + force bool + pruneChildren bool + expectedQueryParams map[string]string + }{ + { + force: false, + pruneChildren: false, + expectedQueryParams: map[string]string{ + "force": "", + "noprune": "1", + }, + }, { + force: true, + pruneChildren: true, + expectedQueryParams: map[string]string{ + "force": "1", + "noprune": "", + }, + }, + } + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range removeCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + b, err := json.Marshal([]types.ImageDelete{ + { + Untagged: "image_id1", + }, + { + Deleted: "image_id", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ + Force: removeCase.force, + PruneChildren: removeCase.pruneChildren, + }) + if err != nil { + t.Fatal(err) + } + if len(imageDeletes) != 2 { + t.Fatalf("expected 2 deleted images, got %v", imageDeletes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 0000000..ecac880 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_save_test.go b/vendor/github.com/docker/docker/client/image_save_test.go new file mode 100644 index 0000000..8f0cf88 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "strings" +) + +func TestImageSaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSave(context.Background(), []string{"nothing"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageSave(t *testing.T) { + expectedURL := "/images/get" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + names := query["names"] + expectedNames := []string{"image_id1", "image_id2"} + if !reflect.DeepEqual(names, expectedNames) { + return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(saveResponse) + if err != nil { + t.Fatal(err) + } + saveResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 0000000..b0fcd5c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_search_test.go b/vendor/github.com/docker/docker/client/image_search_test.go new file mode 100644 index 0000000..b17bbd8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +func TestImageSearchAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageSearchStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/search" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 result, got %v", results) + } +} + +func TestImageSearchWithoutErrors(t *testing.T) { + expectedURL := "/images/search" + filterArgs := filters.NewArgs() + filterArgs.Add("is-automated", "true") + filterArgs.Add("stars", "3") + + expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + Filters: filterArgs, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected a result, got %v", results) + } +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 0000000..bdbf94a --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,34 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/reference" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + tag := reference.GetTagFromNamedRef(distributionRef) + + query := url.Values{} + query.Set("repo", distributionRef.Name()) + query.Set("tag", tag) + + resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/image_tag_test.go b/vendor/github.com/docker/docker/client/image_tag_test.go new file mode 100644 index 0000000..7925db9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag_test.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageTagError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "repo:tag") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +// Note: this is not testing all the InvalidReference as it's the reponsability +// of distribution/reference package. +func TestImageTagInvalidReference(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag` { + t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) + } +} + +func TestImageTag(t *testing.T) { + expectedURL := "/images/image_id/tag" + tagCases := []struct { + reference string + expectedQueryParams map[string]string + }{ + { + reference: "repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "repository", + "tag": "tag1", + }, + }, { + reference: "another_repository:latest", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "another_repository", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "latest", + }, + }, + } + for _, tagCase := range tagCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range tagCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ImageTag(context.Background(), "image_id", tagCase.reference) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 0000000..ac07961 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/info_test.go b/vendor/github.com/docker/docker/client/info_test.go new file mode 100644 index 0000000..79f23c8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestInfoServerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.Info(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestInfoInvalidResponseJSONError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), + }, nil + }), + } + _, err := client.Info(context.Background()) + if err == nil || !strings.Contains(err.Error(), "invalid character") { + t.Fatalf("expected a 'invalid character' error, got %v", err) + } +} + +func TestInfo(t *testing.T) { + expectedURL := "/info" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + info := &types.Info{ + ID: "daemonID", + Containers: 3, + } + b, err := json.Marshal(info) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + info, err := client.Info(context.Background()) + if err != nil { + t.Fatal(err) + } + + if info.ID != "daemonID" { + t.Fatalf("expected daemonID, got %s", info.ID) + } + + if info.Containers != 3 { + t.Fatalf("expected 3 containers, got %d", info.Containers) + } +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 0000000..0597803 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,171 @@ +package client + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ContainerAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + ServerVersion(ctx context.Context) (types.Version, error) + UpdateClientVersion(v string) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string) (int64, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 0000000..51da98e --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 0000000..cc90a3c --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 0000000..600dc71 --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,29 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns UnauthorizerError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 0000000..c022c17 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_connect_test.go b/vendor/github.com/docker/docker/client/network_connect_test.go new file mode 100644 index 0000000..d472f45 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +func TestNetworkConnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig != nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkConnect(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig.NetworkID != "NetworkID" { + return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ + NetworkID: "NetworkID", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 0000000..4067a54 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_create_test.go b/vendor/github.com/docker/docker/client/network_create_test.go new file mode 100644 index 0000000..0e2457f --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create_test.go @@ -0,0 +1,72 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkCreate(t *testing.T) { + expectedURL := "/networks/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkCreateResponse{ + ID: "network_id", + Warning: "warning", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ + CheckDuplicate: true, + Driver: "mydriver", + EnableIPv6: true, + Internal: true, + Options: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if networkResponse.ID != "network_id" { + t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) + } + if networkResponse.Warning != "warning" { + t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) + } +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 0000000..24b58e3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect_test.go b/vendor/github.com/docker/docker/client/network_disconnect_test.go new file mode 100644 index 0000000..b54a2b1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect_test.go @@ -0,0 +1,64 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkDisconnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkDisconnect(t *testing.T) { + expectedURL := "/networks/network_id/disconnect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var disconnect types.NetworkDisconnect + if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { + return nil, err + } + + if disconnect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) + } + + if !disconnect.Force { + return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 0000000..5ad4ea5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { + var networkResource types.NetworkResource + resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect_test.go b/vendor/github.com/docker/docker/client/network_inspect_test.go new file mode 100644 index 0000000..1f926d6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "unknown") + if err == nil || !IsErrNetworkNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestNetworkInspect(t *testing.T) { + expectedURL := "/networks/network_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.NetworkInspect(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 0000000..e566a93 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_list_test.go b/vendor/github.com/docker/docker/client/network_list_test.go new file mode 100644 index 0000000..4d44349 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestNetworkListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ + Filters: filters.NewArgs(), + }) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkList(t *testing.T) { + expectedURL := "/networks" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + options types.NetworkListOptions + expectedFilters string + }{ + { + options: types.NetworkListOptions{ + Filters: filters.NewArgs(), + }, + expectedFilters: "", + }, { + options: types.NetworkListOptions{ + Filters: noDanglingFilters, + }, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: danglingFilters, + }, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: labelFilters, + }, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal([]types.NetworkResource{ + { + Name: "network", + Driver: "bridge", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResources, err := client.NetworkList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(networkResources) != 1 { + t.Fatalf("expected 1 network resource, got %v", networkResources) + } + } +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 0000000..7352a7f --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 0000000..6bd6748 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_remove_test.go b/vendor/github.com/docker/docker/client/network_remove_test.go new file mode 100644 index 0000000..2a7b964 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestNetworkRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkRemove(t *testing.T) { + expectedURL := "/networks/network_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 0000000..abf505d --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_inspect_test.go b/vendor/github.com/docker/docker/client/node_inspect_test.go new file mode 100644 index 0000000..fc13283 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeInspectNodeNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNodeNotFound(err) { + t.Fatalf("expected an nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspect(t *testing.T) { + expectedURL := "/nodes/node_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Node{ + ID: "node_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") + if err != nil { + t.Fatal(err) + } + if nodeInspect.ID != "node_id" { + t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 0000000..3e8440f --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_list_test.go b/vendor/github.com/docker/docker/client/node_list_test.go new file mode 100644 index 0000000..0251b5c --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NodeList(context.Background(), types.NodeListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeList(t *testing.T) { + expectedURL := "/nodes" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.NodeListOptions + expectedQueryParams map[string]string + }{ + { + options: types.NodeListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.NodeListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Node{ + { + ID: "node_id1", + }, + { + ID: "node_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodes, err := client.NodeList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %v", nodes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 0000000..0a77f3d --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_remove_test.go b/vendor/github.com/docker/docker/client/node_remove_test.go new file mode 100644 index 0000000..f2f8adc --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestNodeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeRemove(t *testing.T) { + expectedURL := "/nodes/node_id" + + removeCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != removeCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 0000000..3ca9760 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_update_test.go b/vendor/github.com/docker/docker/client/node_update_test.go new file mode 100644 index 0000000..613ff10 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestNodeUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeUpdate(t *testing.T) { + expectedURL := "/nodes/node_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 0000000..22dcda2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,30 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and return the value of the "Docker-Experimental" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + + return ping, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 0000000..a660ba5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 0000000..30467db --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable_test.go b/vendor/github.com/docker/docker/client/plugin_disable_test.go new file mode 100644 index 0000000..a4de45b --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginDisableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginDisable(t *testing.T) { + expectedURL := "/plugins/plugin_name/disable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 0000000..95517c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable_test.go b/vendor/github.com/docker/docker/client/plugin_enable_test.go new file mode 100644 index 0000000..b276813 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginEnableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginEnable(t *testing.T) { + expectedURL := "/plugins/plugin_name/enable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 0000000..89f39ee --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,32 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return nil, nil, pluginNotFoundError{name} + } + return nil, nil, err + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect_test.go b/vendor/github.com/docker/docker/client/plugin_inspect_test.go new file mode 100644 index 0000000..fae407e --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginInspect(t *testing.T) { + expectedURL := "/plugins/plugin_name" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.Plugin{ + ID: "plugin_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") + if err != nil { + t.Fatal(err) + } + if pluginInspect.ID != "plugin_id" { + t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 0000000..3217c4c --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(err) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 0000000..88c480a --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + resp, err := cli.get(ctx, "/plugins", nil, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_list_test.go b/vendor/github.com/docker/docker/client/plugin_list_test.go new file mode 100644 index 0000000..173e4b8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginList(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginList(t *testing.T) { + expectedURL := "/plugins" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 0000000..1e5f963 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,17 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_push_test.go b/vendor/github.com/docker/docker/client/plugin_push_test.go new file mode 100644 index 0000000..d9f70cd --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginPushError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginPush(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + auth := req.Header.Get("X-Registry-Auth") + if auth != "authtoken" { + return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 0000000..b017e4d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove_test.go b/vendor/github.com/docker/docker/client/plugin_remove_test.go new file mode 100644 index 0000000..a15f166 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestPluginRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginRemove(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 0000000..3260d2a --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_set_test.go b/vendor/github.com/docker/docker/client/plugin_set_test.go new file mode 100644 index 0000000..2450254 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginSetError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginSet(t *testing.T) { + expectedURL := "/plugins/plugin_name/set" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 0000000..95a4356 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 0000000..ac05363 --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,247 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// getWithContext sends an http request to the docker API using the method GET with a specific go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// postWithContext sends an http request to the docker API using the method POST with a specific go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + return cli.doRequest(ctx, req) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1} + + resp, err := ctxhttp.Do(ctx, cli.client, req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.25/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && + resp.Header.Get("Content-Type") == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return serverResp, fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if body := response.body; body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/request_test.go b/vendor/github.com/docker/docker/client/request_test.go new file mode 100644 index 0000000..63908ae --- /dev/null +++ b/vendor/github.com/docker/docker/client/request_test.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// TestSetHostHeader should set fake host for local communications, set real host +// for normal communications. +func TestSetHostHeader(t *testing.T) { + testURL := "/test" + testCases := []struct { + host string + expectedHost string + expectedURLHost string + }{ + { + "unix:///var/run/docker.sock", + "docker", + "/var/run/docker.sock", + }, + { + "npipe:////./pipe/docker_engine", + "docker", + "//./pipe/docker_engine", + }, + { + "tcp://0.0.0.0:4243", + "", + "0.0.0.0:4243", + }, + { + "tcp://localhost:4243", + "", + "localhost:4243", + }, + } + + for c, test := range testCases { + proto, addr, basePath, err := ParseHost(test.host) + if err != nil { + t.Fatal(err) + } + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, testURL) { + return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) + } + if req.Host != test.expectedHost { + return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) + } + if req.URL.Host != test.expectedURLHost { + return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + }, nil + }), + + proto: proto, + addr: addr, + basePath: basePath, + } + + _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } +} + +// TestPlainTextError tests the server returning an error in plain text for +// backwards compatibility with API versions <1.24. All other tests use +// errors returned as JSON +func TestPlainTextError(t *testing.T) { + client := &Client{ + client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 0000000..de8b041 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var headers map[string][]string + + var response types.SecretCreateResponse + resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_create_test.go b/vendor/github.com/docker/docker/client/secret_create_test.go new file mode 100644 index 0000000..cb378c7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretCreate(t *testing.T) { + expectedURL := "/secrets/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.SecretCreateResponse{ + ID: "test_secret", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_secret" { + t.Fatalf("expected `test_secret`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 0000000..f774576 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect_test.go b/vendor/github.com/docker/docker/client/secret_inspect_test.go new file mode 100644 index 0000000..423d986 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretInspectSecretNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrSecretNotFound(err) { + t.Fatalf("expected an secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspect(t *testing.T) { + expectedURL := "/secrets/secret_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Secret{ + ID: "secret_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } + if secretInspect.ID != "secret_id" { + t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 0000000..7e9d5ec --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list_test.go b/vendor/github.com/docker/docker/client/secret_list_test.go new file mode 100644 index 0000000..1ac11cd --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretList(t *testing.T) { + expectedURL := "/secrets" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.SecretListOptions + expectedQueryParams map[string]string + }{ + { + options: types.SecretListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.SecretListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Secret{ + { + ID: "secret_id1", + }, + { + ID: "secret_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secrets, err := client.SecretList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(secrets) != 2 { + t.Fatalf("expected 2 secrets, got %v", secrets) + } + } +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 0000000..1955b98 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove_test.go b/vendor/github.com/docker/docker/client/secret_remove_test.go new file mode 100644 index 0000000..f269f78 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSecretRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretRemove(t *testing.T) { + expectedURL := "/secrets/secret_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 0000000..b94e24a --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretUpdate updates a Secret. Currently, the only part of a secret spec +// which can be updated is Labels. +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_update_test.go b/vendor/github.com/docker/docker/client/secret_update_test.go new file mode 100644 index 0000000..c620985 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSecretUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretUpdate(t *testing.T) { + expectedURL := "/secrets/secret_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 0000000..3d1be22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var headers map[string][]string + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/service_create_test.go b/vendor/github.com/docker/docker/client/service_create_test.go new file mode 100644 index 0000000..1e07382 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceCreate(t *testing.T) { + expectedURL := "/services/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 0000000..ca71cbd --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { + serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_inspect_test.go b/vendor/github.com/docker/docker/client/service_inspect_test.go new file mode 100644 index 0000000..e235cf0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceInspectServiceNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrServiceNotFound(err) { + t.Fatalf("expected an serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspect(t *testing.T) { + expectedURL := "/services/service_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Service{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } + if serviceInspect.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 0000000..c29e6d4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_list_test.go b/vendor/github.com/docker/docker/client/service_list_test.go new file mode 100644 index 0000000..213981e --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceList(t *testing.T) { + expectedURL := "/services" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ServiceListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ServiceListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ServiceListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Service{ + { + ID: "service_id1", + }, + { + ID: "service_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + services, err := client.ServiceList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(services) != 2 { + t.Fatalf("expected 2 services, got %v", services) + } + } +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 0000000..24384e3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_logs_test.go b/vendor/github.com/docker/docker/client/service_logs_test.go new file mode 100644 index 0000000..a6d002b --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestServiceLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestServiceLogs(t *testing.T) { + expectedURL := "/services/service_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ServiceLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 0000000..a9331f9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_remove_test.go b/vendor/github.com/docker/docker/client/service_remove_test.go new file mode 100644 index 0000000..8e2ac25 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestServiceRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceRemove(t *testing.T) { + expectedURL := "/services/service_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 0000000..afa94d4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,41 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + headers map[string][]string + query = url.Values{} + ) + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/service_update_test.go b/vendor/github.com/docker/docker/client/service_update_test.go new file mode 100644 index 0000000..76bea17 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update_test.go @@ -0,0 +1,77 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +func TestServiceUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceUpdate(t *testing.T) { + expectedURL := "/services/service_id/update" + + updateCases := []struct { + swarmVersion swarm.Version + expectedVersion string + }{ + { + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 0, + }, + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 10, + }, + expectedVersion: "10", + }, + } + + for _, updateCase := range updateCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + version := req.URL.Query().Get("version") + if version != updateCase.expectedVersion { + return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + }, nil + }), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 0000000..be28d32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 0000000..fd45d06 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the Swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init_test.go b/vendor/github.com/docker/docker/client/swarm_init_test.go new file mode 100644 index 0000000..811155a --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmInitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInit(t *testing.T) { + expectedURL := "/swarm/init" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), + }, nil + }), + } + + resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } + if resp != "body" { + t.Fatalf("Expected 'body', got %s", resp) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 0000000..6d95cfc --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the Swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect_test.go b/vendor/github.com/docker/docker/client/swarm_inspect_test.go new file mode 100644 index 0000000..6432d17 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect_test.go @@ -0,0 +1,56 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSwarmInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInspect(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInspect(t *testing.T) { + expectedURL := "/swarm" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + ID: "swarm_id", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + swarmInspect, err := client.SwarmInspect(context.Background()) + if err != nil { + t.Fatal(err) + } + if swarmInspect.ID != "swarm_id" { + t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 0000000..cda9993 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the Swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join_test.go b/vendor/github.com/docker/docker/client/swarm_join_test.go new file mode 100644 index 0000000..31ef2a7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmJoinError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmJoin(t *testing.T) { + expectedURL := "/swarm/join" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 0000000..a4df732 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the Swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave_test.go b/vendor/github.com/docker/docker/client/swarm_leave_test.go new file mode 100644 index 0000000..c96dac8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSwarmLeaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmLeave(context.Background(), false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmLeave(t *testing.T) { + expectedURL := "/swarm/leave" + + leaveCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, leaveCase := range leaveCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != leaveCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmLeave(context.Background(), leaveCase.force) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 0000000..addfb59 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlockes locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + if err != nil { + return err + } + + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 0000000..cc8eeb6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the Swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update_test.go b/vendor/github.com/docker/docker/client/swarm_update_test.go new file mode 100644 index 0000000..3b23db0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUpdate(t *testing.T) { + expectedURL := "/swarm/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 0000000..bc8058f --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect_test.go b/vendor/github.com/docker/docker/client/task_inspect_test.go new file mode 100644 index 0000000..148cdad --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskInspect(t *testing.T) { + expectedURL := "/tasks/task_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Task{ + ID: "task_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") + if err != nil { + t.Fatal(err) + } + if taskInspect.ID != "task_id" { + t.Fatalf("expected `task_id`, got %s", taskInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 0000000..66324da --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_list_test.go b/vendor/github.com/docker/docker/client/task_list_test.go new file mode 100644 index 0000000..2a9a4c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.TaskList(context.Background(), types.TaskListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskList(t *testing.T) { + expectedURL := "/tasks" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.TaskListOptions + expectedQueryParams map[string]string + }{ + { + options: types.TaskListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.TaskListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Task{ + { + ID: "task_id1", + }, + { + ID: "task_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + tasks, err := client.TaskList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(tasks) != 2 { + t.Fatalf("expected 2 tasks, got %v", tasks) + } + } +} diff --git a/vendor/github.com/docker/docker/client/testdata/ca.pem b/vendor/github.com/docker/docker/client/testdata/ca.pem new file mode 100644 index 0000000..ad14d47 --- /dev/null +++ b/vendor/github.com/docker/docker/client/testdata/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC0jCCAbqgAwIBAgIRAILlP5WWLaHkQ/m2ASHP7SowDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMBIxEDAOBgNVBAoTB3ZpbmNlbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD0yZPKAGncoaxaU/QW9tWEHbrvDoGVF/65L8Si/jBrlAgLjhmmV1di +vKG9QPzuU8snxHro3/uCwyA6kTqw0U8bGwHxJq2Bpa6JBYj8N2jMJ+M+sjXgSo2t +E0zIzjTW2Pir3C8qwfrVL6NFp9xClwMD23SFZ0UsEH36NkfyrKBVeM8IOjJd4Wjs +xIcuvF3BTVkji84IJBW2JIKf9ZrzJwUlSCPgptRp4Evdbyp5d+UPxtwxD7qjW4lM +yQQ8vfcC4lKkVx5s/RNJ4fzd5uEgLdEbZ20qt7Zt/bLcxFHpUhH2teA0QjmrOWFh +gbL83s95/+hbSVhsO4hoFW7vTeiCCY4xAgMBAAGjIzAhMA4GA1UdDwEB/wQEAwIC +rDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBY51RHajuDuhO2 +tcm26jeNROzfffnjhvbOVPjSEdo9vI3JpMU/RuQw+nbNcLwJrdjL6UH7tD/36Y+q +NXH+xSIjWFH0zXGxrIUsVrvt6f8CbOvw7vD+gygOG+849PDQMbL6czP8rvXY7vZV +9pdpQfrENk4b5kePRW/6HaGSTvtgN7XOrYD9fp3pm/G534T2e3IxgYMRNwdB9Ul9 +bLwMqQqf4eiqqMs6x4IVmZUkGVMKiFKcvkNg9a+Ozx5pMizHeAezWMcZ5V+QJZVT +8lElSCKZ2Yy2xkcl7aeQMLwcAeZwfTp+Yu9dVzlqXiiBTLd1+LtAQCuKHzmw4Q8k +EvD5m49l +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/client/testdata/cert.pem b/vendor/github.com/docker/docker/client/testdata/cert.pem new file mode 100644 index 0000000..9000ffb --- /dev/null +++ b/vendor/github.com/docker/docker/client/testdata/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC8DCCAdigAwIBAgIRAJAS1glgcke4q7eCaretwgUwDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMB4xHDAaBgNVBAoME3ZpbmNlbnQuPGJvb3RzdHJhcD4wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQClpvG442dGEvrRgmCrqY4kBml1LVlw2Y7ZDn6B +TKa52+MuGDmfXbO1UhclNqTXjLgAwKjPz/OvnPRxNEUoQEDbBd+Xev7rxTY5TvYI +27YH3fMH2LL2j62jum649abfhZ6ekD5eD8tCn3mnrEOgqRIlK7efPIVixq/ZqU1H +7ez0ggB7dmWHlhnUaxyQOCSnAX/7nKYQXqZgVvGhDeR2jp7GcnhbK/qPrZ/mOm83 +2IjCeYN145opYlzTSp64GYIZz7uqMNcnDKK37ZbS8MYcTjrRaHEiqZVVdIC+ghbx +qYqzbZRVfgztI9jwmifn0mYrN4yt+nhNYwBcRJ4Pv3uLFbo7AgMBAAGjNTAzMA4G +A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MA0GCSqGSIb3DQEBCwUAA4IBAQDg1r7nksjYgDFYEcBbrRrRHddIoK+RVmSBTTrq +8giC77m0srKdh9XTVWK1PUbGfODV1oD8m9QhPE8zPDyYQ8jeXNRSU5wXdkrTRmmY +w/T3SREqmE7CObMtusokHidjYFuqqCR07sJzqBKRlzr3o0EGe3tuEhUlF5ARY028 +eipaDcVlT5ChGcDa6LeJ4e05u4cVap0dd6Rp1w3Rx1AYAecdgtgBMnw1iWdl/nrC +sp26ZXNaAhFOUovlY9VY257AMd9hQV7WvAK4yNEHcckVu3uXTBmDgNSOPtl0QLsL +Kjlj75ksCx8nCln/hCut/0+kGTsGZqdV5c6ktgcGYRir/5Hs +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/client/testdata/key.pem b/vendor/github.com/docker/docker/client/testdata/key.pem new file mode 100644 index 0000000..c0869df --- /dev/null +++ b/vendor/github.com/docker/docker/client/testdata/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApabxuONnRhL60YJgq6mOJAZpdS1ZcNmO2Q5+gUymudvjLhg5 +n12ztVIXJTak14y4AMCoz8/zr5z0cTRFKEBA2wXfl3r+68U2OU72CNu2B93zB9iy +9o+to7puuPWm34WenpA+Xg/LQp95p6xDoKkSJSu3nzyFYsav2alNR+3s9IIAe3Zl +h5YZ1GsckDgkpwF/+5ymEF6mYFbxoQ3kdo6exnJ4Wyv6j62f5jpvN9iIwnmDdeOa +KWJc00qeuBmCGc+7qjDXJwyit+2W0vDGHE460WhxIqmVVXSAvoIW8amKs22UVX4M +7SPY8Jon59JmKzeMrfp4TWMAXESeD797ixW6OwIDAQABAoIBAHfyAAleL8NfrtnR +S+pApbmUIvxD0AWUooispBE/zWG6xC72P5MTqDJctIGvpYCmVf3Fgvamns7EGYN2 +07Sngc6V3Ca1WqyhaffpIuGbJZ1gqr89u6gotRRexBmNVj13ZTlvPJmjWgxtqQsu +AvHsOkVL+HOGwRaaw24Z1umEcBVCepl7PGTqsLeJUtBUZBiqdJTu4JYLAB6BggBI +OxhHoTWvlNWwzezo2C/IXkXcXD/tp3i5vTn5rAXHSMQkdMAUh7/xJ73Fl36gxZhp +W7NoPKaS9qNh8jhs6p54S7tInb6+mrKtvRFKl5XAR3istXrXteT5UaukpuBbQ/5d +qf4BXuECgYEAzoOKxMee5tG/G9iC6ImNq5xGAZm0OnmteNgIEQj49If1Q68av525 +FioqdC9zV+blfHQqXEIUeum4JAou4xqmB8Lw2H0lYwOJ1IkpUy3QJjU1IrI+U5Qy +ryZuA9cxSTLf1AJFbROsoZDpjaBh0uUQkD/4PHpwXMgHu/3CaJ4nTEkCgYEAzVjE +VWgczWJGyRxmHSeR51ft1jrlChZHEd3HwgLfo854JIj+MGUH4KPLSMIkYNuyiwNQ +W7zdXCB47U8afSL/lPTv1M5+ZsWY6sZAT6gtp/IeU0Va943h9cj10fAOBJaz1H6M +jnZS4jjWhVInE7wpCDVCwDRoHHJ84kb6JeflamMCgYBDQDcKie9HP3q6uLE4xMKr +5gIuNz2n5UQGnGNUGNXp2/SVDArr55MEksqsd19aesi01KeOz74XoNDke6R1NJJo +6KTB+08XhWl3GwuoGL02FBGvsNf3I8W1oBAnlAZqzfRx+CNfuA55ttU318jDgvD3 +6L0QBNdef411PNf4dbhacQKBgAd/e0PHFm4lbYJAaDYeUMSKwGN3KQ/SOmwblgSu +iC36BwcGfYmU1tHMCUsx05Q50W4kA9Ylskt/4AqCPexdz8lHnE4/7/uesXO5I3YF +JQ2h2Jufx6+MXbjUyq0Mv+ZI/m3+5PD6vxIFk0ew9T5SO4lSMIrGHxsSzx6QCuhB +bG4TAoGBAJ5PWG7d2CyCjLtfF8J4NxykRvIQ8l/3kDvDdNrXiXbgonojo2lgRYaM +5LoK9ApN8KHdedpTRipBaDA22Sp5SjMcUE7A6q42PJCL9r+BRYF0foFQx/rqpCff +pVWKgwIPoKnfxDqN1RUgyFcx1jbA3XVJZCuT+wbMuDQ9nlvulD1W +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 0000000..f04e601 --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,28 @@ +package client + +import ( + "crypto/tls" + "errors" + "net/http" +) + +var errTLSConfigUnavailable = errors.New("TLSConfig unavailable") + +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// resolveTLSConfig attempts to resolve the tls configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 0000000..23d520e --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,33 @@ +package client + +import ( + "github.com/docker/docker/api/types/filters" + "net/url" + "regexp" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToParam(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 0000000..933ceb4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 0000000..9620c87 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create_test.go b/vendor/github.com/docker/docker/client/volume_create_test.go new file mode 100644 index 0000000..9f1b254 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeCreate(t *testing.T) { + expectedURL := "/volumes/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.Volume{ + Name: "volume", + Driver: "local", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + Name: "myvolume", + Driver: "mydriver", + DriverOpts: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if volume.Name != "volume" { + t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) + } + if volume.Driver != "local" { + t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) + } + if volume.Mountpoint != "mountpoint" { + t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) + } +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 0000000..3860e9b --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect_test.go b/vendor/github.com/docker/docker/client/volume_inspect_test.go new file mode 100644 index 0000000..0d1d118 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestVolumeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "unknown") + if err == nil || !IsErrVolumeNotFound(err) { + t.Fatalf("expected a volumeNotFound error, got %v", err) + } +} + +func TestVolumeInspect(t *testing.T) { + expectedURL := "/volumes/volume_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + content, err := json.Marshal(types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + v, err := client.VolumeInspect(context.Background(), "volume_id") + if err != nil { + t.Fatal(err) + } + if v.Name != "name" { + t.Fatalf("expected `name`, got %s", v.Name) + } + if v.Driver != "driver" { + t.Fatalf("expected `driver`, got %s", v.Driver) + } + if v.Mountpoint != "mountpoint" { + t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) + } +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 0000000..32247ce --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list_test.go b/vendor/github.com/docker/docker/client/volume_list_test.go new file mode 100644 index 0000000..f29639b --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list_test.go @@ -0,0 +1,98 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeList(t *testing.T) { + expectedURL := "/volumes" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + filters filters.Args + expectedFilters string + }{ + { + filters: filters.NewArgs(), + expectedFilters: "", + }, { + filters: noDanglingFilters, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + filters: danglingFilters, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + filters: labelFilters, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal(volumetypes.VolumesListOKBody{ + Volumes: []*types.Volume{ + { + Name: "volume", + Driver: "local", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(volumeResponse.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 0000000..a07e4ce --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 0000000..6c26575 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/volume_remove_test.go b/vendor/github.com/docker/docker/client/volume_remove_test.go new file mode 100644 index 0000000..1fe6573 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestVolumeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeRemove(t *testing.T) { + expectedURL := "/volumes/volume_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_none.go b/vendor/github.com/docker/docker/cmd/docker/daemon_none.go new file mode 100644 index 0000000..65f9f37 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_none.go @@ -0,0 +1,27 @@ +// +build !daemon + +package main + +import ( + "fmt" + "runtime" + "strings" + + "github.com/spf13/cobra" +) + +func newDaemonCommand() *cobra.Command { + return &cobra.Command{ + Use: "daemon", + Hidden: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDaemon() + }, + } +} + +func runDaemon() error { + return fmt.Errorf( + "`docker daemon` is not supported on %s. Please run `dockerd` directly", + strings.Title(runtime.GOOS)) +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go b/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go new file mode 100644 index 0000000..32032fe --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go @@ -0,0 +1,17 @@ +// +build !daemon + +package main + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestDaemonCommand(t *testing.T) { + cmd := newDaemonCommand() + cmd.SetArgs([]string{"--help"}) + err := cmd.Execute() + + assert.Error(t, err, "Please run `dockerd`") +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go b/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go new file mode 100644 index 0000000..26348a8 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go @@ -0,0 +1,30 @@ +// +build daemon + +package main + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/cobra" +) + +func stubRun(cmd *cobra.Command, args []string) error { + return nil +} + +func TestDaemonCommandHelp(t *testing.T) { + cmd := newDaemonCommand() + cmd.RunE = stubRun + cmd.SetArgs([]string{"--help"}) + err := cmd.Execute() + assert.NilError(t, err) +} + +func TestDaemonCommand(t *testing.T) { + cmd := newDaemonCommand() + cmd.RunE = stubRun + cmd.SetArgs([]string{"--containerd", "/foo"}) + err := cmd.Execute() + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go b/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go new file mode 100644 index 0000000..f68d220 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go @@ -0,0 +1,79 @@ +// +build daemon + +package main + +import ( + "fmt" + + "os" + "os/exec" + "path/filepath" + "syscall" + + "github.com/spf13/cobra" +) + +const daemonBinary = "dockerd" + +func newDaemonCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "daemon", + Hidden: true, + Args: cobra.ArbitraryArgs, + DisableFlagParsing: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDaemon() + }, + Deprecated: "and will be removed in Docker 1.16. Please run `dockerd` directly.", + } + cmd.SetHelpFunc(helpFunc) + return cmd +} + +// CmdDaemon execs dockerd with the same flags +func runDaemon() error { + // Use os.Args[1:] so that "global" args are passed to dockerd + return execDaemon(stripDaemonArg(os.Args[1:])) +} + +func execDaemon(args []string) error { + binaryPath, err := findDaemonBinary() + if err != nil { + return err + } + + return syscall.Exec( + binaryPath, + append([]string{daemonBinary}, args...), + os.Environ()) +} + +func helpFunc(cmd *cobra.Command, args []string) { + if err := execDaemon([]string{"--help"}); err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err.Error()) + } +} + +// findDaemonBinary looks for the path to the dockerd binary starting with +// the directory of the current executable (if one exists) and followed by $PATH +func findDaemonBinary() (string, error) { + execDirname := filepath.Dir(os.Args[0]) + if execDirname != "" { + binaryPath := filepath.Join(execDirname, daemonBinary) + if _, err := os.Stat(binaryPath); err == nil { + return binaryPath, nil + } + } + + return exec.LookPath(daemonBinary) +} + +// stripDaemonArg removes the `daemon` argument from the list +func stripDaemonArg(args []string) []string { + for i, arg := range args { + if arg == "daemon" { + return append(args[:i], args[i+1:]...) + } + } + return args +} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker.go b/vendor/github.com/docker/docker/cmd/docker/docker.go new file mode 100644 index 0000000..d4847a9 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/docker.go @@ -0,0 +1,180 @@ +package main + +import ( + "errors" + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/commands" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newDockerCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := cliflags.NewClientOptions() + var flags *pflag.FlagSet + + cmd := &cobra.Command{ + Use: "docker [OPTIONS] COMMAND [ARG...]", + Short: "A self-sufficient runtime for containers", + SilenceUsage: true, + SilenceErrors: true, + TraverseChildren: true, + Args: noArgs, + RunE: func(cmd *cobra.Command, args []string) error { + if opts.Version { + showVersion() + return nil + } + return dockerCli.ShowHelp(cmd, args) + }, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + // daemon command is special, we redirect directly to another binary + if cmd.Name() == "daemon" { + return nil + } + // flags must be the top-level command flags, not cmd.Flags() + opts.Common.SetDefaultOptions(flags) + dockerPreRun(opts) + if err := dockerCli.Initialize(opts); err != nil { + return err + } + return isSupported(cmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) + }, + } + cli.SetupRootCommand(cmd) + + cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) { + if dockerCli.Client() == nil { // when using --help, PersistenPreRun is not called, so initialization is needed. + // flags must be the top-level command flags, not cmd.Flags() + opts.Common.SetDefaultOptions(flags) + dockerPreRun(opts) + dockerCli.Initialize(opts) + } + + if err := isSupported(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()); err != nil { + ccmd.Println(err) + return + } + + hideUnsupportedFeatures(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) + + if err := ccmd.Help(); err != nil { + ccmd.Println(err) + } + }) + + flags = cmd.Flags() + flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.ConfigDir, "config", cliconfig.ConfigDir(), "Location of client config files") + opts.Common.InstallFlags(flags) + + cmd.SetOutput(dockerCli.Out()) + cmd.AddCommand(newDaemonCommand()) + commands.AddCommands(cmd, dockerCli) + + return cmd +} + +func noArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + return fmt.Errorf( + "docker: '%s' is not a docker command.\nSee 'docker --help'", args[0]) +} + +func main() { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + logrus.SetOutput(stderr) + + dockerCli := command.NewDockerCli(stdin, stdout, stderr) + cmd := newDockerCommand(dockerCli) + + if err := cmd.Execute(); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(stderr, sterr.Status) + } + // StatusError should only be used for errors, and all errors should + // have a non-zero exit status, so never exit with 0 + if sterr.StatusCode == 0 { + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(stderr, err) + os.Exit(1) + } +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func dockerPreRun(opts *cliflags.ClientOptions) { + cliflags.SetLogLevel(opts.Common.LogLevel) + + if opts.ConfigDir != "" { + cliconfig.SetConfigDir(opts.ConfigDir) + } + + if opts.Common.Debug { + utils.EnableDebug() + } +} + +func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) { + cmd.Flags().VisitAll(func(f *pflag.Flag) { + // hide experimental flags + if !hasExperimental { + if _, ok := f.Annotations["experimental"]; ok { + f.Hidden = true + } + } + + // hide flags not supported by the server + if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 && versions.LessThan(clientVersion, flagVersion[0]) { + f.Hidden = true + } + + }) + + for _, subcmd := range cmd.Commands() { + // hide experimental subcommands + if !hasExperimental { + if _, ok := subcmd.Tags["experimental"]; ok { + subcmd.Hidden = true + } + } + + // hide subcommands not supported by the server + if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) { + subcmd.Hidden = true + } + } +} + +func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error { + if !hasExperimental { + if _, ok := cmd.Tags["experimental"]; ok { + return errors.New("only supported with experimental daemon") + } + } + + if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) { + return fmt.Errorf("only supported with daemon version >= %s", cmdVersion) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker_test.go b/vendor/github.com/docker/docker/cmd/docker/docker_test.go new file mode 100644 index 0000000..8738f60 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/docker_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/utils" +) + +func TestClientDebugEnabled(t *testing.T) { + defer utils.DisableDebug() + + cmd := newDockerCommand(&command.DockerCli{}) + cmd.Flags().Set("debug", "true") + + err := cmd.PersistentPreRunE(cmd, []string{}) + assert.NilError(t, err) + assert.Equal(t, os.Getenv("DEBUG"), "1") + assert.Equal(t, logrus.GetLevel(), logrus.DebugLevel) +} + +func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) { + discard := ioutil.Discard + cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard)) + cmd.SetArgs([]string{"help", "invalid"}) + err := cmd.Execute() + assert.Error(t, err, "unknown help topic: invalid") +} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker_windows.go b/vendor/github.com/docker/docker/cmd/docker/docker_windows.go new file mode 100644 index 0000000..9bc507e --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/docker" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/README.md b/vendor/github.com/docker/docker/cmd/dockerd/README.md new file mode 100644 index 0000000..a8c20b3 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker daemon's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon.go new file mode 100644 index 0000000..2f099e0 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon.go @@ -0,0 +1,524 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" + "github.com/docker/docker/api/server/router/container" + "github.com/docker/docker/api/server/router/image" + "github.com/docker/docker/api/server/router/network" + pluginrouter "github.com/docker/docker/api/server/router/plugin" + swarmrouter "github.com/docker/docker/api/server/router/swarm" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/cluster" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + dopts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/listeners" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + flagDaemonConfigFile = "config-file" +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *daemon.Config + configFile *string + flags *pflag.FlagSet + + api *apiserver.Server + d *daemon.Daemon + authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins +} + +// NewDaemonCli returns a daemon CLI +func NewDaemonCli() *DaemonCli { + return &DaemonCli{} +} + +func migrateKey(config *daemon.Config) (err error) { + // No migration necessary on Windows + if runtime.GOOS == "windows" { + return nil + } + + // Migrate trust key if exists at ~/.docker/key.json and owned by current user + oldPath := filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) + newPath := filepath.Join(getDaemonConfDir(config.Root), cliflags.DefaultTrustKeyFile) + if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { + defer func() { + // Ensure old path is removed if no error occurred + if err == nil { + err = os.Remove(oldPath) + } else { + logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) + os.Remove(newPath) + } + }() + + if err := system.MkdirAll(getDaemonConfDir(config.Root), os.FileMode(0644)); err != nil { + return fmt.Errorf("Unable to create daemon configuration directory: %s", err) + } + + newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating key file %q: %s", newPath, err) + } + defer newFile.Close() + + oldFile, err := os.Open(oldPath) + if err != nil { + return fmt.Errorf("error opening key file %q: %s", oldPath, err) + } + defer oldFile.Close() + + if _, err := io.Copy(newFile, oldFile); err != nil { + return fmt.Errorf("error copying key: %s", err) + } + + logrus.Infof("Migrated key from %s to %s", oldPath, newPath) + } + + return nil +} + +func (cli *DaemonCli) start(opts daemonOptions) (err error) { + stopc := make(chan bool) + defer close(stopc) + + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + opts.common.SetDefaultOptions(opts.flags) + + if cli.Config, err = loadDaemonCliConfig(opts); err != nil { + return err + } + cli.configFile = &opts.configFile + cli.flags = opts.flags + + if opts.common.TrustKey == "" { + opts.common.TrustKey = filepath.Join( + getDaemonConfDir(cli.Config.Root), + cliflags.DefaultTrustKeyFile) + } + + if cli.Config.Debug { + utils.EnableDebug() + } + + if cli.Config.Experimental { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + return fmt.Errorf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + return fmt.Errorf("Failed to set log opts: %v", err) + } + } + + // Create the daemon root before we create ANY other files (PID, or migrate keys) + // to ensure the appropriate ACL is set (particularly relevant on Windows) + if err := daemon.CreateDaemonRoot(cli.Config); err != nil { + return err + } + + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + defer func() { + if err := pf.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.Config{ + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + EnableCors: cli.Config.EnableCors, + CorsHeaders: cli.Config.CorsHeaders, + } + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + return err + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + api := apiserver.New(serverConfig) + cli.api = api + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) + } + + proto := protoAddrParts[0] + addr := protoAddrParts[1] + + // It's a bad idea to bind to TCP without tlsverify. + if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { + logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") + } + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + return err + } + ls = wrapListeners(proto, ls) + // If we're binding to a TCP port, make sure that a container doesn't try to use it. + if proto == "tcp" { + if err := allocateDaemonPort(addr); err != nil { + return err + } + } + logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + api.Accept(addr, ls...) + } + + if err := migrateKey(cli.Config); err != nil { + return err + } + + // FIXME: why is this down here instead of with the other TrustKey logic above? + cli.TrustKeyPath = opts.common.TrustKey + + registryService := registry.NewService(cli.Config.ServiceOptions) + containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) + if err != nil { + return err + } + signal.Trap(func() { + cli.stop() + <-stopc // wait for daemonCli.start() to return + }) + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + + if cli.Config.MetricsAddress != "" { + if !d.HasExperimental() { + return fmt.Errorf("metrics-addr is only supported when experimental is enabled") + } + if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { + return err + } + } + + name, _ := os.Hostname() + + c, err := cluster.New(cluster.Config{ + Root: cli.Config.Root, + Name: name, + Backend: d, + NetworkSubnetsProvider: d, + DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, + RuntimeRoot: cli.getSwarmRunRoot(), + }) + if err != nil { + logrus.Fatalf("Error creating cluster component: %v", err) + } + + // Restart all autostart containers which has a swarm endpoint + // and is not yet running now that we have successfully + // initialized the cluster. + d.RestartSwarmContainers() + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver": d.GraphDriverName(), + }).Info("Docker daemon") + + cli.d = d + + // initMiddlewares needs cli.d to be populated. Dont change this init order. + if err := cli.initMiddlewares(api, serverConfig); err != nil { + logrus.Fatalf("Error creating middlewares: %v", err) + } + d.SetCluster(c) + initRouter(api, d, c) + + cli.setupConfigReloadTrap() + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go api.Wait(serveAPIWait) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + c.Cleanup() + shutdownDaemon(d) + containerdRemote.Cleanup() + if errAPI != nil { + return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) + } + + return nil +} + +func (cli *DaemonCli) reloadConfig() { + reload := func(config *daemon.Config) { + + // Revalidate and reload the authorization plugins + if err := validateAuthzPlugins(config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + logrus.Fatalf("Error validating authorization plugin: %v", err) + return + } + cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins) + + if err := cli.d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + + if config.IsValueSet("debug") { + debugEnabled := utils.IsDebugEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + utils.DisableDebug() + cli.api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + utils.EnableDebug() + cli.api.EnableProfiler() + } + + } + } + + if err := daemon.ReloadConfiguration(*cli.configFile, cli.flags, reload); err != nil { + logrus.Error(err) + } +} + +func (cli *DaemonCli) stop() { + cli.api.Close() +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon) { + shutdownTimeout := d.ShutdownTimeout() + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + if shutdownTimeout < 0 { + <-ch + logrus.Debug("Clean shutdown succeeded") + return + } + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(time.Duration(shutdownTimeout) * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(opts daemonOptions) (*daemon.Config, error) { + config := opts.daemonConfig + flags := opts.flags + config.Debug = opts.common.Debug + config.Hosts = opts.common.Hosts + config.LogLevel = opts.common.LogLevel + config.TLS = opts.common.TLS + config.TLSVerify = opts.common.TLSVerify + config.CommonTLSOptions = daemon.CommonTLSOptions{} + + if opts.common.TLSOptions != nil { + config.CommonTLSOptions.CAFile = opts.common.TLSOptions.CAFile + config.CommonTLSOptions.CertFile = opts.common.TLSOptions.CertFile + config.CommonTLSOptions.KeyFile = opts.common.TLSOptions.KeyFile + } + + if opts.configFile != "" { + c, err := daemon.MergeDaemonConfigurations(config, flags, opts.configFile) + if err != nil { + if flags.Changed(flagDaemonConfigFile) || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", opts.configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + config = c + } + } + + if err := daemon.ValidateConfiguration(config); err != nil { + return nil, err + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (1.16) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := daemon.GetConflictFreeLabels(config.Labels) + // if err != nil { + // return nil, err + // } + // config.Labels = newLabels + // + if _, err := daemon.GetConflictFreeLabels(config.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if config.IsValueSet(cliflags.FlagTLSVerify) { + config.TLS = true + } + + // ensure that the log level is the one set after merging configurations + cliflags.SetLogLevel(config.LogLevel) + + return config, nil +} + +func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { + decoder := runconfig.ContainerDecoder{} + + routers := []router.Router{ + // we need to add the checkpoint router before the container router or the DELETE gets masked + checkpointrouter.NewRouter(d, decoder), + container.NewRouter(d, decoder), + image.NewRouter(d, decoder), + systemrouter.NewRouter(d, c), + volume.NewRouter(d), + build.NewRouter(dockerfile.NewBuildManager(d)), + swarmrouter.NewRouter(c), + pluginrouter.NewRouter(d.PluginManager()), + } + + if d.NetworkControllerEnabled() { + routers = append(routers, network.NewRouter(d, c)) + } + + if d.HasExperimental() { + for _, r := range routers { + for _, route := range r.Routes() { + if experimental, ok := route.(router.ExperimentalRoute); ok { + experimental.Enable() + } + } + } + } + + s.InitRouter(utils.IsDebugEnabled(), routers...) +} + +func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config) error { + v := cfg.Version + + exp := middleware.NewExperimentalMiddleware(cli.d.HasExperimental()) + s.UseMiddleware(exp) + + vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) + s.UseMiddleware(vm) + + if cfg.EnableCors { + c := middleware.NewCORSMiddleware(cfg.CorsHeaders) + s.UseMiddleware(c) + } + + if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + return fmt.Errorf("Error validating authorization plugin: %v", err) + } + cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, cli.d.PluginStore) + s.UseMiddleware(cli.authzMiddleware) + return nil +} + +// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver +// plugins present on the host and available to the daemon +func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { + for _, reqPlugin := range requestedPlugins { + if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.LOOKUP); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go new file mode 100644 index 0000000..623aaf4 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go @@ -0,0 +1,5 @@ +package main + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go new file mode 100644 index 0000000..a556daa --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go @@ -0,0 +1,11 @@ +// +build linux + +package main + +import systemdDaemon "github.com/coreos/go-systemd/daemon" + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go new file mode 100644 index 0000000..974ba16 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go @@ -0,0 +1,85 @@ +// +build solaris + +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{} + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +func allocateDaemonPort(addr string) error { + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go new file mode 100644 index 0000000..b364f87 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go @@ -0,0 +1,145 @@ +package main + +import ( + "testing" + + "github.com/Sirupsen/logrus" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/spf13/pflag" +) + +func defaultOptions(configFile string) daemonOptions { + opts := daemonOptions{ + daemonConfig: &daemon.Config{}, + flags: &pflag.FlagSet{}, + common: cliflags.NewCommonOptions(), + } + opts.common.InstallFlags(opts.flags) + opts.daemonConfig.InstallFlags(opts.flags) + opts.flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "") + opts.configFile = configFile + return opts +} + +func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { + opts := defaultOptions("") + opts.common.Debug = true + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + if !loadedConfig.Debug { + t.Fatalf("expected debug to be copied from the common flags, got false") + } +} + +func TestLoadDaemonCliConfigWithTLS(t *testing.T) { + opts := defaultOptions("") + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + opts.common.TLS = true + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/tmp/ca.pem") +} + +func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"labels": ["l3=foo"]}`) + defer tempFile.Remove() + configFile := tempFile.Name() + + opts := defaultOptions(configFile) + flags := opts.flags + + assert.NilError(t, flags.Set(flagDaemonConfigFile, configFile)) + assert.NilError(t, flags.Set("label", "l1=bar")) + assert.NilError(t, flags.Set("label", "l2=baz")) + + _, err := loadDaemonCliConfig(opts) + assert.Error(t, err, "as a flag and in the configuration file: labels") +} + +func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": true}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": false}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, false) +} + +func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"log-level": "warn"}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.LogLevel, "warn") + assert.Equal(t, logrus.GetLevel(), logrus.WarnLevel) +} + +func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { + content := `{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/etc/certs/ca.pem") + assert.Equal(t, loadedConfig.LogConfig.Type, "syslog") +} + +func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { + content := `{ + "registry-mirrors": ["https://mirrors.docker.com"], + "insecure-registries": ["https://insecure.docker.com"] + }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, len(loadedConfig.Mirrors), 1) + assert.Equal(t, len(loadedConfig.InsecureRegistries), 1) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go new file mode 100644 index 0000000..bdce98b --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go @@ -0,0 +1,137 @@ +// +build !windows,!solaris + +package main + +import ( + "fmt" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + "syscall" + + "github.com/docker/docker/cmd/dockerd/hack" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork/portallocator" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + cli.reloadConfig() + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + if cli.Config.LiveRestoreEnabled { + opts = append(opts, libcontainerd.WithLiveRestore(true)) + } + opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +// allocateDaemonPort ensures that there are no containers +// that try to use any port allocated for the docker server. +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + switch proto { + case "unix": + ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + case "fd": + for i := range ls { + ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + } + } + return ls +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go new file mode 100644 index 0000000..d66dba7 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go @@ -0,0 +1,114 @@ +// +build !windows,!solaris + +// TODO: Create new file for Solaris which tests config parameters +// as described in daemon/config_solaris.go + +package main + +import ( + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" + "testing" +) + +func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { + content := `{"log-opts": {"max-size": "1k"}}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.Debug = true + opts.common.LogLevel = "info" + assert.NilError(t, opts.flags.Set("selinux-enabled", "true")) + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, loadedConfig.Debug, true) + assert.Equal(t, loadedConfig.LogLevel, "info") + assert.Equal(t, loadedConfig.EnableSelinuxSupport, true) + assert.Equal(t, loadedConfig.LogConfig.Type, "json-file") + assert.Equal(t, loadedConfig.LogConfig.Config["max-size"], "1k") +} + +func TestLoadDaemonConfigWithNetwork(t *testing.T) { + content := `{"bip": "127.0.0.2", "ip": "127.0.0.1"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, loadedConfig.IP, "127.0.0.2") + assert.Equal(t, loadedConfig.DefaultIP.String(), "127.0.0.1") +} + +func TestLoadDaemonConfigWithMapOptions(t *testing.T) { + content := `{ + "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, + "log-opts": {"tag": "test"} +}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + expectedPath := "/var/lib/docker/discovery_certs/ca.pem" + assert.Equal(t, loadedConfig.ClusterOpts["kv.cacertfile"], expectedPath) + assert.NotNil(t, loadedConfig.LogConfig.Config) + assert.Equal(t, loadedConfig.LogConfig.Config["tag"], "test") +} + +func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { + content := `{ "userland-proxy": false }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + assert.Equal(t, loadedConfig.EnableUserlandProxy, false) + + // make sure reloading doesn't generate configuration + // conflicts after normalizing boolean values. + reload := func(reloadedConfig *daemon.Config) { + assert.Equal(t, reloadedConfig.EnableUserlandProxy, false) + } + assert.NilError(t, daemon.ReloadConfiguration(opts.configFile, opts.flags, reload)) +} + +func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + assert.Equal(t, loadedConfig.EnableUserlandProxy, true) +} + +func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) { + content := `{"disable-legacy-registry": true}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.V2Only, true) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go new file mode 100644 index 0000000..4cccd32 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go @@ -0,0 +1,92 @@ +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +var defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + return false +} + +// setDefaultUmask doesn't do anything on windows +func setDefaultUmask() error { + return nil +} + +func getDaemonConfDir(root string) string { + return filepath.Join(root, `\config`) +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + if service != nil { + err := service.started() + if err != nil { + logrus.Fatal(err) + } + } +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { + if service != nil { + if err != nil { + logrus.Fatal(err) + } + service.stopped(err) + } +} + +// setupConfigReloadTrap configures a Win32 event to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + go func() { + sa := syscall.SecurityAttributes{ + Length: 0, + } + ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) + if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { + logrus.Debugf("Config reload - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + cli.reloadConfig() + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + return nil +} + +// getLibcontainerdRoot gets the root directory for libcontainerd to store its +// state. The Windows libcontainerd implementation does not need to write a spec +// or state to disk, so this is a no-op. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return "" +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return "" +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker.go b/vendor/github.com/docker/docker/cmd/dockerd/docker.go new file mode 100644 index 0000000..60742ae --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker.go @@ -0,0 +1,110 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type daemonOptions struct { + version bool + configFile string + daemonConfig *daemon.Config + common *cliflags.CommonOptions + flags *pflag.FlagSet +} + +func newDaemonCommand() *cobra.Command { + opts := daemonOptions{ + daemonConfig: daemon.NewConfig(), + common: cliflags.NewCommonOptions(), + } + + cmd := &cobra.Command{ + Use: "dockerd [OPTIONS]", + Short: "A self-sufficient runtime for containers.", + SilenceUsage: true, + SilenceErrors: true, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + opts.flags = cmd.Flags() + return runDaemon(opts) + }, + } + cli.SetupRootCommand(cmd) + + flags := cmd.Flags() + flags.BoolVarP(&opts.version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "Daemon configuration file") + opts.common.InstallFlags(flags) + opts.daemonConfig.InstallFlags(flags) + installServiceFlags(flags) + + return cmd +} + +func runDaemon(opts daemonOptions) error { + if opts.version { + showVersion() + return nil + } + + daemonCli := NewDaemonCli() + + // Windows specific settings as these are not defaulted. + if runtime.GOOS == "windows" { + if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") + } + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + } + + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } + + if stop { + return nil + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func main() { + if reexec.Init() { + return + } + + // Set terminal emulation based on platform as required. + _, stdout, stderr := term.StdStreams() + logrus.SetOutput(stderr) + + cmd := newDaemonCommand() + cmd.SetOutput(stdout) + if err := cmd.Execute(); err != nil { + fmt.Fprintf(stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go new file mode 100644 index 0000000..19c5587 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/dockerd" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go new file mode 100644 index 0000000..d4aa3dd --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go @@ -0,0 +1,121 @@ +// +build !windows + +package hack + +import "net" + +// MalformedHostHeaderOverride is a wrapper to be able +// to overcome the 400 Bad request coming from old docker +// clients that send an invalid Host header. +type MalformedHostHeaderOverride struct { + net.Listener +} + +// MalformedHostHeaderOverrideConn wraps the underlying unix +// connection and keeps track of the first read from http.Server +// which just reads the headers. +type MalformedHostHeaderOverrideConn struct { + net.Conn + first bool +} + +var closeConnHeader = []byte("\r\nConnection: close\r") + +// Read reads the first *read* request from http.Server to inspect +// the Host header. If the Host starts with / then we're talking to +// an old docker client which send an invalid Host header. To not +// error out in http.Server we rewrite the first bytes of the request +// to sanitize the Host header itself. +// In case we're not dealing with old docker clients the data is just passed +// to the server w/o modification. +func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { + // http.Server uses a 4k buffer + if l.first && len(b) == 4096 { + // This keeps track of the first read from http.Server which just reads + // the headers + l.first = false + // The first read of the connection by http.Server is done limited to + // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. + // Here we do the first read which gets us all the http headers to + // be inspected and modified below. + c, err := l.Conn.Read(b) + if err != nil { + return c, err + } + + var ( + start, end int + firstLineFeed = -1 + buf []byte + ) + for i := 0; i <= c-1-7; i++ { + if b[i] == '\n' && firstLineFeed == -1 { + firstLineFeed = i + } + if b[i] != '\n' { + continue + } + + if b[i+1] == '\r' && b[i+2] == '\n' { + return c, nil + } + + if b[i+1] != 'H' { + continue + } + if b[i+2] != 'o' { + continue + } + if b[i+3] != 's' { + continue + } + if b[i+4] != 't' { + continue + } + if b[i+5] != ':' { + continue + } + if b[i+6] != ' ' { + continue + } + if b[i+7] != '/' { + continue + } + // ensure clients other than the docker clients do not get this hack + if i != firstLineFeed { + return c, nil + } + start = i + 7 + // now find where the value ends + for ii, bbb := range b[start:c] { + if bbb == '\n' { + end = start + ii + break + } + } + buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) + // strip the value of the host header and + // inject `Connection: close` to ensure we don't reuse this connection + buf = append(buf, b[:start]...) + buf = append(buf, closeConnHeader...) + buf = append(buf, b[end:c]...) + copy(b, buf) + break + } + if len(buf) == 0 { + return c, nil + } + return len(buf), nil + } + return l.Conn.Read(b) +} + +// Accept makes the listener accepts connections and wraps the connection +// in a MalformedHostHeaderOverrideConn initilizing first to true. +func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return c, err + } + return &MalformedHostHeaderOverrideConn{c, true}, nil +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go new file mode 100644 index 0000000..1a0a60b --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go @@ -0,0 +1,124 @@ +// +build !windows + +package hack + +import ( + "bytes" + "io" + "net" + "strings" + "testing" +) + +type bufConn struct { + net.Conn + buf *bytes.Buffer +} + +func (bc *bufConn) Read(b []byte) (int, error) { + return bc.buf.Read(b) +} + +func TestHeaderOverrideHack(t *testing.T) { + tests := [][2][]byte{ + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + }, + { + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + }, + } + + // Test for https://github.com/docker/docker/issues/23045 + h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" + h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" + tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) + + for _, pair := range tests { + read := make([]byte, 4096) + client := &bufConn{ + buf: bytes.NewBuffer(pair[0]), + } + l := MalformedHostHeaderOverrideConn{client, true} + + n, err := l.Read(read) + if err != nil && err != io.EOF { + t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) + } + if !bytes.Equal(read[:n], pair[1][:n]) { + t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) + } + } +} + +func BenchmarkWithHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + l := MalformedHostHeaderOverrideConn{client, true} + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + l.first = true // make sure each subsequent run uses the hack parsing + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if n, err := l.Read(read); err != nil && err != io.EOF { + b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) + } + } + } + l.Close() + <-done +} + +func BenchmarkNoHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if _, err := client.Read(read); err != nil && err != io.EOF { + b.Fatal(err) + } + } + } + client.Close() + <-done +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/metrics.go b/vendor/github.com/docker/docker/cmd/dockerd/metrics.go new file mode 100644 index 0000000..0c88604 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/metrics.go @@ -0,0 +1,27 @@ +package main + +import ( + "net" + "net/http" + + "github.com/Sirupsen/logrus" + metrics "github.com/docker/go-metrics" +) + +func startMetricsServer(addr string) error { + if err := allocateDaemonPort(addr); err != nil { + return err + } + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + if err := http.Serve(l, mux); err != nil { + logrus.Errorf("serve metrics api: %s", err) + } + }() + return nil +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go new file mode 100644 index 0000000..64ad7fc --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +import ( + "github.com/spf13/pflag" +) + +func initService(daemonCli *DaemonCli) (bool, error) { + return false, nil +} + +func installServiceFlags(flags *pflag.FlagSet) { +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go new file mode 100644 index 0000000..dd37abc --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go @@ -0,0 +1,426 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/spf13/pflag" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/debug" + "golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/mgr" +) + +var ( + flServiceName *string + flRegisterService *bool + flUnregisterService *bool + flRunService *bool + + setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") + oldStderr syscall.Handle + panicFile *os.File + + service *handler +) + +const ( + // These should match the values in event_messages.mc. + eventInfo = 1 + eventWarn = 1 + eventError = 1 + eventDebug = 2 + eventPanic = 3 + eventFatal = 4 + + eventExtraOffset = 10 // Add this to any event to get a string that supports extended data +) + +func installServiceFlags(flags *pflag.FlagSet) { + flServiceName = flags.String("service-name", "docker", "Set the Windows service name") + flRegisterService = flags.Bool("register-service", false, "Register the service and exit") + flUnregisterService = flags.Bool("unregister-service", false, "Unregister the service and exit") + flRunService = flags.Bool("run-service", false, "") + flags.MarkHidden("run-service") +} + +type handler struct { + tosvc chan bool + fromsvc chan error + daemonCli *DaemonCli +} + +type etwHook struct { + log *eventlog.Log +} + +func (h *etwHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} + +func (h *etwHook) Fire(e *logrus.Entry) error { + var ( + etype uint16 + eid uint32 + ) + + switch e.Level { + case logrus.PanicLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventPanic + case logrus.FatalLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventFatal + case logrus.ErrorLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventError + case logrus.WarnLevel: + etype = windows.EVENTLOG_WARNING_TYPE + eid = eventWarn + case logrus.InfoLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventInfo + case logrus.DebugLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventDebug + default: + return errors.New("unknown level") + } + + // If there is additional data, include it as a second string. + exts := "" + if len(e.Data) > 0 { + fs := bytes.Buffer{} + for k, v := range e.Data { + fs.WriteString(k) + fs.WriteByte('=') + fmt.Fprint(&fs, v) + fs.WriteByte(' ') + } + + exts = fs.String()[:fs.Len()-1] + eid += eventExtraOffset + } + + if h.log == nil { + fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) + return nil + } + + var ( + ss [2]*uint16 + err error + ) + + ss[0], err = syscall.UTF16PtrFromString(e.Message) + if err != nil { + return err + } + + count := uint16(1) + if exts != "" { + ss[1], err = syscall.UTF16PtrFromString(exts) + if err != nil { + return err + } + + count++ + } + + return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) +} + +func getServicePath() (string, error) { + p, err := exec.LookPath(os.Args[0]) + if err != nil { + return "", err + } + return filepath.Abs(p) +} + +func registerService() error { + p, err := getServicePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + depends := []string{} + + // This dependency is required on build 14393 (RS1) + // it is added to the platform in newer builds + if system.GetOSVersion().Build == 14393 { + depends = append(depends, "ConDrv") + } + + c := mgr.Config{ + ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, + StartType: mgr.StartAutomatic, + ErrorControl: mgr.ErrorNormal, + Dependencies: depends, + DisplayName: "Docker Engine", + } + + // Configure the service to launch with the arguments that were just passed. + args := []string{"--run-service"} + for _, a := range os.Args[1:] { + if a != "--register-service" && a != "--unregister-service" { + args = append(args, a) + } + } + + s, err := m.CreateService(*flServiceName, p, c, args...) + if err != nil { + return err + } + defer s.Close() + + // See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go + const ( + scActionNone = 0 + scActionRestart = 1 + scActionReboot = 2 + scActionRunCommand = 3 + + serviceConfigFailureActions = 2 + ) + + type serviceFailureActions struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions uintptr + } + + type scAction struct { + Type uint32 + Delay uint32 + } + t := []scAction{ + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionNone}, + } + lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))} + err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo))) + if err != nil { + return err + } + + err = eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) + if err != nil { + return err + } + + return nil +} + +func unregisterService() error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(*flServiceName) + if err != nil { + return err + } + defer s.Close() + + eventlog.Remove(*flServiceName) + err = s.Delete() + if err != nil { + return err + } + return nil +} + +func initService(daemonCli *DaemonCli) (bool, error) { + if *flUnregisterService { + if *flRegisterService { + return true, errors.New("--register-service and --unregister-service cannot be used together") + } + return true, unregisterService() + } + + if *flRegisterService { + return true, registerService() + } + + if !*flRunService { + return false, nil + } + + interactive, err := svc.IsAnInteractiveSession() + if err != nil { + return false, err + } + + h := &handler{ + tosvc: make(chan bool), + fromsvc: make(chan error), + daemonCli: daemonCli, + } + + var log *eventlog.Log + if !interactive { + log, err = eventlog.Open(*flServiceName) + if err != nil { + return false, err + } + } + + logrus.AddHook(&etwHook{log}) + logrus.SetOutput(ioutil.Discard) + + service = h + go func() { + if interactive { + err = debug.Run(*flServiceName, h) + } else { + err = svc.Run(*flServiceName, h) + } + + h.fromsvc <- err + }() + + // Wait for the first signal from the service handler. + err = <-h.fromsvc + if err != nil { + return false, err + } + return false, nil +} + +func (h *handler) started() error { + // This must be delayed until daemonCli initializes Config.Root + err := initPanicFile(filepath.Join(h.daemonCli.Config.Root, "panic.log")) + if err != nil { + return err + } + + h.tosvc <- false + return nil +} + +func (h *handler) stopped(err error) { + logrus.Debugf("Stopping service: %v", err) + h.tosvc <- err != nil + <-h.fromsvc +} + +func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { + s <- svc.Status{State: svc.StartPending, Accepts: 0} + // Unblock initService() + h.fromsvc <- nil + + // Wait for initialization to complete. + failed := <-h.tosvc + if failed { + logrus.Debug("Aborting service start due to failure during initialization") + return true, 1 + } + + s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} + logrus.Debug("Service running") +Loop: + for { + select { + case failed = <-h.tosvc: + break Loop + case c := <-r: + switch c.Cmd { + case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): + h.daemonCli.reloadConfig() + case svc.Interrogate: + s <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + s <- svc.Status{State: svc.StopPending, Accepts: 0} + h.daemonCli.stop() + } + } + } + + removePanicFile() + if failed { + return true, 1 + } + return false, 0 +} + +func initPanicFile(path string) error { + var err error + panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return err + } + + st, err := panicFile.Stat() + if err != nil { + return err + } + + // If there are contents in the file already, move the file out of the way + // and replace it. + if st.Size() > 0 { + panicFile.Close() + os.Rename(path, path+".old") + panicFile, err = os.Create(path) + if err != nil { + return err + } + } + + // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to + // it when it panics. Remember the old stderr to restore it before removing + // the panic file. + sh := syscall.STD_ERROR_HANDLE + h, err := syscall.GetStdHandle(sh) + if err != nil { + return err + } + + oldStderr = h + + r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) + if r == 0 && err != nil { + return err + } + + return nil +} + +func removePanicFile() { + if st, err := panicFile.Stat(); err == nil { + if st.Size() == 0 { + sh := syscall.STD_ERROR_HANDLE + setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) + panicFile.Close() + os.Remove(panicFile.Name()) + } + } +} diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go new file mode 100644 index 0000000..56e6598 --- /dev/null +++ b/vendor/github.com/docker/docker/container/archive.go @@ -0,0 +1,76 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go new file mode 100644 index 0000000..fc4fe27 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container.go @@ -0,0 +1,1103 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + agentexec "github.com/docker/swarmkit/agent/exec" + "github.com/opencontainers/runc/libcontainer/label" +) + +const configFileName = "config.v2.json" + +const ( + // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. + DefaultStopTimeout = 10 +) + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// DetachError is special error which returned in case of container detach. +type DetachError struct{} + +func (DetachError) Error() string { + return "detached from container" +} + +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. +type CommonContainer struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + SecretStore agentexec.SecretGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + CommonContainer: CommonContainer{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + }, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +// ToDisk saves the container configuration on disk. +func (container *Container) ToDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { + return err + } + + return container.WriteHostConfig() +} + +// ToDiskLocking saves the container configuration on disk in a thread safe way. +func (container *Container) ToDiskLocking() error { + container.Lock() + err := container.ToDisk() + container.Unlock() + return err +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container. +func (container *Container) WriteHostConfig() error { + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.HostConfig) +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { + c, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("Failed to get logging factory: %v", err) + } + ctx := logger.Context{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + return c(ctx) +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// Attach connects to the container's TTY, delegating to standard +// streams or websockets depending on the configuration. +func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + ctx := container.InitAttachContext() + return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) +} + +// AttachStreams connects streams to a TTY. +// Used by exec too. Should this move somewhere else? +func AttachStreams(ctx context.Context, streamConfig *stream.Config, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + var ( + cStdout, cStderr io.ReadCloser + cStdin io.WriteCloser + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if stdin != nil && openStdin { + cStdin = streamConfig.StdinPipe() + wg.Add(1) + } + + if stdout != nil { + cStdout = streamConfig.StdoutPipe() + wg.Add(1) + } + + if stderr != nil { + cStderr = streamConfig.StderrPipe() + wg.Add(1) + } + + // Connect stdin of container to the http conn. + go func() { + if stdin == nil || !openStdin { + return + } + logrus.Debug("attach: stdin: begin") + + var err error + if tty { + _, err = copyEscapable(cStdin, stdin, keys) + } else { + _, err = io.Copy(cStdin, stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if stdinOnce && !tty { + cStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + } + logrus.Debug("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if stdin != nil { + stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", stdout, cStdout) + go attachStream("stderr", stderr, cStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cStdin != nil { + cStdin.Close() + } + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + // Default keys : ctrl-p ctrl-q + keys = []byte{16, 17} + } + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + preservBuf := []byte{} + for i, key := range keys { + preservBuf = append(preservBuf, buf[0:nr]...) + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + src.Close() + return 0, DetachError{} + } + nr, er = src.Read(buf) + } + var nw int + var ew error + if len(preservBuf) > 0 { + nw, ew = dst.Write(preservBuf) + nr = len(preservBuf) + } else { + // ---- End of docker + nw, ew = dst.Write(buf[0:nr]) + } + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + // Check if the mounpoint has an ID, this is currently the best way to tell if it's actually mounted + // TODO(cpuguyh83): there should be a better way to handle this + if volumeMount.Volume != nil && volumeMount.ID != "" { + if err := volumeMount.Volume.Unmount(volumeMount.ID); err != nil { + errors = append(errors, err.Error()) + continue + } + volumeMount.ID = "" + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// GetEndpointInNetwork returns the container's endpoint to the provided network. +func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. +func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.buildPortMapInfo(ep); err != nil { + return err + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + + return nil +} + +// UpdateSandboxNetworkSettings updates the sandbox ID and Key. +func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() + return nil +} + +// BuildJoinOptions builds endpoint Join options from a given network. +func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := runconfigopts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + } + return joinOptions, nil +} + +// BuildCreateEndpointOptions builds endpoint options from a given network. +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "" || len(ipam.LinkLocalIPs) > 0) { + var ipList []net.IP + for _, ips := range ipam.LinkLocalIPs { + if ip := net.ParseIP(ips); ip != nil { + ipList = append(ipList, ip) + } + } + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), ipList, nil)) + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + } + + if container.NetworkSettings.Service != nil { + svcCfg := container.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := GetSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger(container.HostConfig.LogConfig) + if err != nil { + return fmt.Errorf("Failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { + if err := container.startLogging(); err != nil { + container.Reset(false) + return err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/container/container_linux.go b/vendor/github.com/docker/docker/container/container_linux.go new file mode 100644 index 0000000..4d4c16b --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_linux.go @@ -0,0 +1,9 @@ +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/vendor/github.com/docker/docker/container/container_notlinux.go b/vendor/github.com/docker/docker/container/container_notlinux.go new file mode 100644 index 0000000..f65653e --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_notlinux.go @@ -0,0 +1,23 @@ +// +build solaris freebsd + +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + //Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature. + // Therefore there are separate definitions for this. + return unix.Unmount(path, 0) +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} diff --git a/vendor/github.com/docker/docker/container/container_unit_test.go b/vendor/github.com/docker/docker/container/container_unit_test.go new file mode 100644 index 0000000..f301f25 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unit_test.go @@ -0,0 +1,60 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/signal" +) + +func TestContainerStopSignal(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + def, err := signal.ParseSignal(signal.DefaultStopSignal) + if err != nil { + t.Fatal(err) + } + + s := c.StopSignal() + if s != int(def) { + t.Fatalf("Expected %v, got %v", def, s) + } + + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopSignal: "SIGKILL"}, + }, + } + s = c.StopSignal() + if s != 9 { + t.Fatalf("Expected 9, got %v", s) + } +} + +func TestContainerStopTimeout(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + s := c.StopTimeout() + if s != DefaultStopTimeout { + t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s) + } + + stopTimeout := 15 + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopTimeout: &stopTimeout}, + }, + } + s = c.StopSignal() + if s != 15 { + t.Fatalf("Expected 15, got %v", s) + } +} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go new file mode 100644 index 0000000..4f6b795 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -0,0 +1,448 @@ +// +build linux freebsd solaris + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/label" + "golang.org/x/sys/unix" +) + +const ( + // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container + DefaultSHMSize int64 = 67108864 + containerSecretMountPath = "/run/secrets" +) + +// Container holds the fields specific to unixen implementations. +// See CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// CreateDaemonEnvironment returns the list of all environment variables given the list of +// environment variables related to links. +// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. +// The defaults set here do not override the values in container.Config.Env +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + env := []string{ + "PATH=" + system.DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + if !container.HasMountFor("/etc/resolv.conf") { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + if !container.HasMountFor("/etc/hostname") { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + if !container.HasMountFor("/etc/hosts") { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + return mounts +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() string { + return filepath.Join(container.Root, "secrets") +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(volume.DefaultPropagationMode), + }) + } + + return mounts +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + if len(container.SecretReferences) > 0 { + return &Mount{ + Source: container.SecretMountPath(), + Destination: containerSecretMountPath, + Writable: false, + } + } + + return nil +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + if _, err := os.Stat(container.SecretMountPath()); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return detachMounted(container.SecretMountPath()) +} + +// UpdateContainer updates configuration of a container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return fmt.Errorf("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving updated container: %v", err) + return err + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := detachMounted(mountPath); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} diff --git a/vendor/github.com/docker/docker/container/container_windows.go b/vendor/github.com/docker/docker/container/container_windows.go new file mode 100644 index 0000000..1025836 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_windows.go @@ -0,0 +1,111 @@ +// +build windows + +package container + +import ( + "fmt" + "os" + "path/filepath" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/utils" +) + +// Container holds fields specific to the Windows implementation. See +// CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(_ bool, linkedEnv []string) []string { + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + return utils.ReplaceOrAppendEnvValues(linkedEnv, container.Config.Env) +} + +// UnmountIpcMounts unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + resources := hostConfig.Resources + if resources.BlkioWeight != 0 || resources.CPUShares != 0 || + resources.CPUPeriod != 0 || resources.CPUQuota != 0 || + resources.CpusetCpus != "" || resources.CpusetMems != "" || + resources.Memory != 0 || resources.MemorySwap != 0 || + resources.MemoryReservation != 0 || resources.KernelMemory != 0 { + return fmt.Errorf("Resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares +// to combine with a volume path +func cleanResourcePath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(os.PathSeparator), path) +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} diff --git a/vendor/github.com/docker/docker/container/health.go b/vendor/github.com/docker/docker/container/health.go new file mode 100644 index 0000000..6e3cd12 --- /dev/null +++ b/vendor/github.com/docker/docker/container/health.go @@ -0,0 +1,49 @@ +package container + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + // This happens when the container is being shutdown and the monitor has stopped + // or the monitor has yet to be setup. + if s.stop == nil { + return types.Unhealthy + } + + switch s.Status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Status + } +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, +// it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go new file mode 100644 index 0000000..c80c2aa --- /dev/null +++ b/vendor/github.com/docker/docker/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go new file mode 100644 index 0000000..706407a --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store.go @@ -0,0 +1,95 @@ +package container + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/docker/docker/container/memory_store_test.go b/vendor/github.com/docker/docker/container/memory_store_test.go new file mode 100644 index 0000000..f81738f --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "testing" + "time" +) + +func TestNewMemoryStore(t *testing.T) { + s := NewMemoryStore() + m, ok := s.(*memoryStore) + if !ok { + t.Fatalf("store is not a memory store %v", s) + } + if m.s == nil { + t.Fatal("expected store map to not be nil") + } +} + +func TestAddContainers(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + if s.Size() != 1 { + t.Fatalf("expected store size 1, got %v", s.Size()) + } +} + +func TestGetContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + c := s.Get("id") + if c == nil { + t.Fatal("expected container to not be nil") + } +} + +func TestDeleteContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + s.Delete("id") + if c := s.Get("id"); c != nil { + t.Fatalf("expected container to be nil after removal, got %v", c) + } + + if s.Size() != 0 { + t.Fatalf("expected store size to be 0, got %v", s.Size()) + } +} + +func TestListContainers(t *testing.T) { + s := NewMemoryStore() + + cont := NewBaseContainer("id", "root") + cont.Created = time.Now() + cont2 := NewBaseContainer("id2", "root") + cont2.Created = time.Now().Add(24 * time.Hour) + + s.Add("id", cont) + s.Add("id2", cont2) + + list := s.List() + if len(list) != 2 { + t.Fatalf("expected list size 2, got %v", len(list)) + } + if list[0].ID != "id2" { + t.Fatalf("expected older container to be first, got %v", list[0].ID) + } +} + +func TestFirstContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + first := s.First(func(cont *Container) bool { + return cont.ID == "id2" + }) + + if first == nil { + t.Fatal("expected container to not be nil") + } + if first.ID != "id2" { + t.Fatalf("expected id2, got %v", first) + } +} + +func TestApplyAllContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + s.ApplyAll(func(cont *Container) { + if cont.ID == "id2" { + cont.ID = "newID" + } + }) + + cont := s.Get("id2") + if cont == nil { + t.Fatal("expected container to not be nil") + } + if cont.ID != "newID" { + t.Fatalf("expected newID, got %v", cont) + } +} diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go new file mode 100644 index 0000000..f05e72b --- /dev/null +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -0,0 +1,46 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go new file mode 100644 index 0000000..c52abed --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/docker/docker/container/mounts_windows.go b/vendor/github.com/docker/docker/container/mounts_windows.go new file mode 100644 index 0000000..01b327f --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go new file mode 100644 index 0000000..4dd2ece --- /dev/null +++ b/vendor/github.com/docker/docker/container/state.go @@ -0,0 +1,343 @@ +package container + +import ( + "fmt" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // FIXME: Why do we have both paused and running if a + // container cannot be paused and running at the same time? + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} + Health *Health +} + +// StateStatus is used to return an error type implementing both +// exec.ExitCode and error. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + error string +} + +func newStateStatus(ec int, err string) *StateStatus { + return &StateStatus{ + exitCode: ec, + error: err, + } +} + +// ExitCode returns current exitcode for the state. +func (ss *StateStatus) ExitCode() int { + return ss.exitCode +} + +// Error returns current error for the state. +func (ss *StateStatus) Error() string { + return ss.error +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// HealthString returns a single string to describe health status. +func (s *State) HealthString() string { + if s.Health == nil { + return types.NoHealthcheck + } + + return s.Health.String() +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStopped +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCodeValue + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + s.Lock() + defer s.Unlock() + return s.ExitCode(), nil +} + +// WaitWithContext waits for the container to stop. Optional context can be +// passed for canceling the request. +func (s *State) WaitWithContext(ctx context.Context) error { + // todo(tonistiigi): make other wait functions use this + s.Lock() + if !s.Running { + state := newStateStatus(s.ExitCode(), s.Error()) + defer s.Unlock() + if state.ExitCode() == 0 { + return nil + } + return state + } + waitChan := s.waitChan + s.Unlock() + select { + case <-waitChan: + s.Lock() + state := newStateStatus(s.ExitCode(), s.Error()) + s.Unlock() + if state.ExitCode() == 0 { + return nil + } + return state + case <-ctx.Done(): + return ctx.Err() + } +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Running = true + s.Restarting = false + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// Error returns current error for the state. +func (s *State) Error() string { + return s.ErrorMsg +} diff --git a/vendor/github.com/docker/docker/container/state_solaris.go b/vendor/github.com/docker/docker/container/state_solaris.go new file mode 100644 index 0000000..1229650 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_solaris.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/docker/docker/container/state_test.go b/vendor/github.com/docker/docker/container/state_test.go new file mode 100644 index 0000000..c9a7bb4 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_test.go @@ -0,0 +1,113 @@ +package container + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/api/types" +) + +func TestIsValidHealthString(t *testing.T) { + contexts := []struct { + Health string + Expected bool + }{ + {types.Healthy, true}, + {types.Unhealthy, true}, + {types.Starting, true}, + {types.NoHealthcheck, true}, + {"fail", false}, + } + + for _, c := range contexts { + v := IsValidHealthString(c.Health) + if v != c.Expected { + t.Fatalf("Expected %t, but got %t", c.Expected, v) + } + } +} + +func TestStateRunStop(t *testing.T) { + s := NewState() + for i := 1; i < 3; i++ { // full lifecycle two times + s.Lock() + s.SetRunning(i+100, false) + s.Unlock() + + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i+100 { + t.Fatalf("Pid %v, expected %v", s.Pid, i+100) + } + if s.ExitCode() != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) + } + + stopped := make(chan struct{}) + var exit int64 + go func() { + exitCode, _ := s.WaitStop(-1 * time.Second) + atomic.StoreInt64(&exit, int64(exitCode)) + close(stopped) + }() + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: i}) + s.Unlock() + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + exitCode := int(atomic.LoadInt64(&exit)) + if exitCode != i { + t.Fatalf("ExitCode %v, expected %v", exitCode, i) + } + if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { + t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + } + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + stopped := make(chan struct{}) + go func() { + s.WaitStop(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: 1}) + s.Unlock() + + stopped = make(chan struct{}) + go func() { + s.WaitStop(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + +} diff --git a/vendor/github.com/docker/docker/container/state_unix.go b/vendor/github.com/docker/docker/container/state_unix.go new file mode 100644 index 0000000..a2fa5af --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/docker/docker/container/state_windows.go b/vendor/github.com/docker/docker/container/state_windows.go new file mode 100644 index 0000000..1229650 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_windows.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go new file mode 100644 index 0000000..042fb1a --- /dev/null +++ b/vendor/github.com/docker/docker/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/docker/docker/container/stream/streams.go b/vendor/github.com/docker/docker/container/stream/streams.go new file mode 100644 index 0000000..79f366a --- /dev/null +++ b/vendor/github.com/docker/docker/container/stream/streams.go @@ -0,0 +1,143 @@ +package stream + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { + copyFunc := func(w io.Writer, r io.Reader) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %+v", err) + } + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + }() + } + } +} diff --git a/vendor/github.com/docker/docker/contrib/README.md b/vendor/github.com/docker/docker/contrib/README.md new file mode 100644 index 0000000..92b1d94 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/README.md @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/vendor/github.com/docker/docker/contrib/REVIEWERS b/vendor/github.com/docker/docker/contrib/REVIEWERS new file mode 100644 index 0000000..18e05a3 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/REVIEWERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/vendor/github.com/docker/docker/contrib/apparmor/main.go b/vendor/github.com/docker/docker/contrib/apparmor/main.go new file mode 100644 index 0000000..f4a2978 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/apparmor/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "log" + "os" + "path" + "text/template" + + "github.com/docker/docker/pkg/aaparser" +) + +type profileData struct { + Version int +} + +func main() { + if len(os.Args) < 2 { + log.Fatal("pass a filename to save the profile in.") + } + + // parse the arg + apparmorProfilePath := os.Args[1] + + version, err := aaparser.GetVersion() + if err != nil { + log.Fatal(err) + } + data := profileData{ + Version: version, + } + fmt.Printf("apparmor_parser is of version %+v\n", data) + + // parse the template + compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) + if err != nil { + log.Fatalf("parsing template failed: %v", err) + } + + // make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { + log.Fatal(err) + } + + f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + if err := compiled.Execute(f, data); err != nil { + log.Fatalf("executing template failed: %v", err) + } + + fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) +} diff --git a/vendor/github.com/docker/docker/contrib/apparmor/template.go b/vendor/github.com/docker/docker/contrib/apparmor/template.go new file mode 100644 index 0000000..e5e1c8b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/apparmor/template.go @@ -0,0 +1,268 @@ +package main + +const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker + +profile /usr/bin/docker (attach_disconnected, complain) { + # Prevent following links to these files during container setup. + deny /etc/** mkl, + deny /dev/** kl, + deny /sys/** mkl, + deny /proc/** mkl, + + mount -> @{DOCKER_GRAPH_PATH}/**, + mount -> /, + mount -> /proc/**, + mount -> /sys/**, + mount -> /run/docker/netns/**, + mount -> /.pivot_root[0-9]*/, + + / r, + + umount, + pivot_root, +{{if ge .Version 209000}} + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), +{{end}} + network, + capability, + owner /** rw, + @{DOCKER_GRAPH_PATH}/** rwl, + @{DOCKER_GRAPH_PATH}/linkgraph.db k, + @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, + @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, + + # For non-root client use: + /dev/urandom r, + /dev/null rw, + /dev/pts/[0-9]* rw, + /run/docker.sock rw, + /proc/** r, + /proc/[0-9]*/attr/exec w, + /sys/kernel/mm/hugepages/ r, + /etc/localtime r, + /etc/ld.so.cache r, + /etc/passwd r, + +{{if ge .Version 209000}} + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, +{{end}} + + /usr/lib/** rm, + /lib/** rm, + + /usr/bin/docker pix, + /sbin/xtables-multi rCx, + /sbin/iptables rCx, + /sbin/modprobe rCx, + /sbin/auplink rCx, + /sbin/mke2fs rCx, + /sbin/tune2fs rCx, + /sbin/blkid rCx, + /bin/kmod rCx, + /usr/bin/xz rCx, + /bin/ps rCx, + /bin/tar rCx, + /bin/cat rCx, + /sbin/zfs rCx, + /sbin/apparmor_parser rCx, + +{{if ge .Version 209000}} + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, +{{end}} + + profile /bin/cat (complain) { + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /proc r, + /bin/cat mr, + + # For reading in 'docker stats': + /proc/[0-9]*/net/dev r, + } + profile /bin/ps (complain) { + /etc/ld.so.cache r, + /etc/localtime r, + /etc/passwd r, + /etc/nsswitch.conf r, + /lib/** rm, + /proc/[0-9]*/** r, + /dev/null rw, + /bin/ps mr, + +{{if ge .Version 209000}} + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), +{{end}} + + # Quiet dac_override denials + deny capability dac_override, + deny capability dac_read_search, + deny capability sys_ptrace, + + /dev/tty r, + /proc/stat r, + /proc/cpuinfo r, + /proc/meminfo r, + /proc/uptime r, + /sys/devices/system/cpu/online r, + /proc/sys/kernel/pid_max r, + /proc/ r, + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_admin, + capability dac_override, + + @{DOCKER_GRAPH_PATH}/aufs/** rw, + @{DOCKER_GRAPH_PATH}/tmp/** rw, + # For user namespaces: + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, + + /sys/fs/aufs/** r, + /lib/** rm, + /apparmor/.null r, + /dev/null rw, + /etc/ld.so.cache r, + /sbin/auplink rm, + /proc/fs/aufs/** rw, + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_module, + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /apparmor/.null rw, + /sbin/modprobe rm, + /bin/kmod rm, + /proc/cmdline r, + /sys/module/** r, + /etc/modprobe.d{/,/**} r, + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + /etc/ld.so.cache r, + /lib/** rm, + /usr/bin/xz rm, + deny /proc/** rw, + deny /sys/** rw, + } + profile /sbin/xtables-multi (attach_disconnected, complain) { + /etc/ld.so.cache r, + /lib/** rm, + /sbin/xtables-multi rm, + /apparmor/.null w, + /dev/null rw, + + /proc r, + + capability net_raw, + capability net_admin, + network raw, + } + profile /sbin/zfs (attach_disconnected, complain) { + file, + capability, + } + profile /sbin/mke2fs (complain) { + /sbin/mke2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/mke2fs.conf r, + /etc/mtab r, + + /dev/dm-* rw, + /dev/urandom r, + /dev/null rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/tune2fs (complain) { + /sbin/tune2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/blkid.conf r, + /etc/mtab r, + /etc/ld.so.cache r, + + /dev/null rw, + /dev/.blkid.tab r, + /dev/dm-* rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/blkid (complain) { + /sbin/blkid rm, + + /lib/** rm, + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/blkid.conf r, + + /dev/null rw, + /dev/.blkid.tab rl, + /dev/.blkid.tab* rwl, + /dev/dm-* r, + + /sys/devices/virtual/block/** r, + + capability mknod, + + mount -> @{DOCKER_GRAPH_PATH}/**, + } + profile /sbin/apparmor_parser (complain) { + /sbin/apparmor_parser rm, + + /lib/** rm, + + /etc/ld.so.cache r, + /etc/apparmor/** r, + /etc/apparmor.d/** r, + /etc/apparmor.d/cache/** w, + + /dev/null rw, + + /sys/kernel/security/apparmor/** r, + /sys/kernel/security/apparmor/.replace w, + + /proc/[0-9]*/mounts r, + /proc/sys/kernel/osrelease r, + /proc r, + + capability mac_admin, + } +}` diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh new file mode 100755 index 0000000..8271d9d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh new file mode 100755 index 0000000..b5040b7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh @@ -0,0 +1,118 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-trusty +# to only update ubuntu-trusty/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it +# +# Note: non-LTS versions are not guaranteed to work. + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="aarch64/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! + # + + FROM $from + + EOF + + dockerBuildTags='apparmor pkcs11 selinux' + runcBuildTags='apparmor selinux' + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for trusty only includes Go 1.2 implementation which + # is too old to build current go source, fortunately trusty has + # golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go ) + ;; + xenial) + packages+=( libsystemd-dev ) + packages+=( golang-go libseccomp-dev) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + *) + echo "Unsupported distro:" $distro:$suite + rm -fr "$version" + exit 1 + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$suite" in + trusty) + echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) + ;; + esac + + echo "# Install Go" >> "$version/Dockerfile" + echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile" + echo "# the image to build go from source." >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile" + echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile" + echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile" + echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV PATH $PATH:/usr/src/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..d04860c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile @@ -0,0 +1,24 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH $PATH:/usr/src/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..3cd8442 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH $PATH:/usr/src/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md new file mode 100644 index 0000000..20a0ff1 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh new file mode 100755 index 0000000..8271d9d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile new file mode 100644 index 0000000..42aaa56 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile new file mode 100644 index 0000000..c052be5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:stretch + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile new file mode 100644 index 0000000..bcedb47 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:wheezy-backports + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + +RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh new file mode 100755 index 0000000..765db5d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh @@ -0,0 +1,149 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + from+='-backports' + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [ "$distro" = "debian" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + precise|wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev );; + *) packages+=( libsystemd-dev );; + esac + + # debian wheezy & ubuntu precise do not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + precise|wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + + if [ "$suite" = 'precise' ]; then + # precise has a few package issues + + # - dh-systemd doesn't exist at all + packages=( "${packages[@]/dh-systemd}" ) + + # - libdevmapper-dev is missing critical structs (too old) + packages=( "${packages[@]/libdevmapper-dev}" ) + extraBuildTags+=' exclude_graphdriver_devicemapper' + + # - btrfs-tools is missing "ioctl.h" (too old), so it's useless + # (since kernels on precise are old too, just skip btrfs entirely) + packages=( "${packages[@]/btrfs-tools}" ) + extraBuildTags+=' exclude_graphdriver_btrfs' + fi + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile new file mode 100644 index 0000000..aa027f8 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:precise + +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..b03a853 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..af03f62 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..5ac1edf --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile new file mode 100644 index 0000000..a4ac781 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh new file mode 100755 index 0000000..e110a21 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh @@ -0,0 +1,158 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + raspbian:jessie) + from="resin/rpi-raspbian:jessie" + ;; + *) + from="armhf/$from" + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + precise|wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev );; + *) packages+=( libsystemd-dev );; + esac + + # debian wheezy & ubuntu precise do not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + precise|wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + + if [ "$suite" = 'precise' ]; then + # precise has a few package issues + + # - dh-systemd doesn't exist at all + packages=( "${packages[@]/dh-systemd}" ) + + # - libdevmapper-dev is missing critical structs (too old) + packages=( "${packages[@]/libdevmapper-dev}" ) + extraBuildTags+=' exclude_graphdriver_devicemapper' + + # - btrfs-tools is missing "ioctl.h" (too old), so it's useless + # (since kernels on precise are old too, just skip btrfs entirely) + packages=( "${packages[@]/btrfs-tools}" ) + extraBuildTags+=' exclude_graphdriver_btrfs' + fi + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + if [ "$distro" == 'raspbian' ]; + then + cat <> "$version/Dockerfile" +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +EOF + fi + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile new file mode 100644 index 0000000..4dbfd09 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM resin/rpi-raspbian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..b36c1da --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..b5e55ad --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..69c2e7f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh new file mode 100755 index 0000000..7d22e8c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh new file mode 100755 index 0000000..0e20b9c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh @@ -0,0 +1,103 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + ;; + *) + # libseccomp isn't available until ubuntu xenial and is required for "seccomp.h" & "libseccomp.so" + packages+=( libseccomp-dev ) + packages+=( libsystemd-dev ) + ;; + esac + + # buildtags + case "$suite" in + # trusty has no seccomp package + trusty) + runcBuildTags="apparmor selinux" + ;; + # ppc64le support was backported into libseccomp 2.2.3-2, + # so enable seccomp by default + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..4182d68 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..f1521db --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..4f8cc66 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh new file mode 100755 index 0000000..8271d9d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh new file mode 100755 index 0000000..b8f5860 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh @@ -0,0 +1,96 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="s390x/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + libsystemd-dev + vim-common # tini dep + ) + + case "$suite" in + # s390x needs libseccomp 2.3.1 + xenial) + # Ubuntu Xenial has libseccomp 2.2.3 + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor selinux seccomp" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..6d7e4c5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md new file mode 100644 index 0000000..5f2e888 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-rpm` + +This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. + +To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh new file mode 100755 index 0000000..558f7ee --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile new file mode 100644 index 0000000..1f84163 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile new file mode 100644 index 0000000..af040c5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile new file mode 100644 index 0000000..98e57a9 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:25 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh new file mode 100755 index 0000000..6f93afa --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh @@ -0,0 +1,189 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + if [[ "$distro" == "photon" ]]; then + installer=tdnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='pkcs11' + runcBuildTags= + + case "$from" in + oraclelinux:6) + # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version + # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo + echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" + echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + oraclelinux:*) + # get "Development Tools" packages and dependencies + # we also need yum-utils for yum-config-manager to pull the latest repo file + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + opensuse:*) + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + photon:*) + echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + libtool-ltdl-devel # for pkcs11 "ltdl.h" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + oraclelinux:7) + # Enable the optional repository + packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) + ;; + esac + + case "$from" in + oraclelinux:6) + # doesn't use systemd, doesn't have a devel package for it + packages=( "${packages[@]/systemd-devel}" ) + ;; + esac + + # opensuse & oraclelinx:6 do not have the right libseccomp libs + case "$from" in + opensuse:*|oraclelinux:6) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + photon:*) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$from" in + oraclelinux:6) + # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. + # The ordering is very important and should not be changed. + echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + +done diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile new file mode 100644 index 0000000..addd431 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM opensuse:13.2 + +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile new file mode 100644 index 0000000..c34d304 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile @@ -0,0 +1,28 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 +RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + +ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile new file mode 100644 index 0000000..378536b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile new file mode 100644 index 0000000..b77d573 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM photon:1.0 + +RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils +RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/check-config.sh b/vendor/github.com/docker/docker/contrib/check-config.sh new file mode 100755 index 0000000..d07e4ce --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/check-config.sh @@ -0,0 +1,354 @@ +#!/usr/bin/env bash +set -e + +EXITCODE=0 + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + : ${CONFIG:="${possibleConfigs[0]}"} +fi + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +kernelVersion="$(uname -r)" +kernelMajor="${kernelVersion%%.*}" +kernelMinor="${kernelVersion#$kernelMajor.}" +kernelMinor="${kernelMinor%%.*}" + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} +is_set_in_kernel() { + zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null +} +is_set_as_module() { + zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null +} + +color() { + local codes=() + if [ "$1" = 'bold' ]; then + codes=( "${codes[@]}" '1' ) + shift + fi + if [ "$#" -gt 0 ]; then + local code= + case "$1" in + # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors + black) code=30 ;; + red) code=31 ;; + green) code=32 ;; + yellow) code=33 ;; + blue) code=34 ;; + magenta) code=35 ;; + cyan) code=36 ;; + white) code=37 ;; + esac + if [ "$code" ]; then + codes=( "${codes[@]}" "$code" ) + fi + fi + local IFS=';' + echo -en '\033['"${codes[*]}"'m' +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set_in_kernel "$1"; then + wrap_good "CONFIG_$1" 'enabled' + elif is_set_as_module "$1"; then + wrap_good "CONFIG_$1" 'enabled (as module)' + else + wrap_bad "CONFIG_$1" 'missing' + EXITCODE=1 + fi +} + +check_flags() { + for flag in "$@"; do + echo -n "- "; check_flag "$flag" + done +} + +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + EXITCODE=1 + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + EXITCODE=1 + fi +} + +check_distro_userns() { + source /etc/os-release 2>/dev/null || /bin/true + if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then + # this is a CentOS7 or RHEL7 system + grep -q "user_namespace.enable=1" /proc/cmdline || { + # no user namespace support enabled + wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)" + EXITCODE=1 + } + fi +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + EXITCODE=1 + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + EXITCODE=1 + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG + KEYS + VETH BRIDGE BRIDGE_NETFILTER + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS} + IP_NF_NAT NF_NAT NF_NAT_NEEDED + + # required for bind-mounting /dev/mqueue into containers + POSIX_MQUEUE +) +check_flags "${flags[@]}" +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -lt 8 ]; then + check_flags DEVPTS_MULTIPLE_INSTANCES +fi + +echo + +echo 'Optional Features:' +{ + check_flags USER_NS + check_distro_userns +} +{ + check_flags SECCOMP +} +{ + check_flags CGROUP_PIDS +} +{ + check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED + if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)" + fi +} +{ + if is_set LEGACY_VSYSCALL_NATIVE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' + echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)" + elif is_set LEGACY_VSYSCALL_EMULATE; then + echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' + elif is_set LEGACY_VSYSCALL_NONE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' + echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)" + echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)" + echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)" + echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)" + # else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do + # not have these LEGACY_VSYSCALL options and are effectively + # LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably + # effectively LEGACY_VSYSCALL_NATIVE. + fi +} + +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then + check_flags MEMCG_KMEM +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then + check_flags RESOURCE_COUNTERS +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then + netprio=NETPRIO_CGROUP +else + netprio=CGROUP_NET_PRIO +fi + +flags=( + BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED + CGROUP_PERF + CGROUP_HUGETLB + NET_CLS_CGROUP $netprio + CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED + IP_VS + IP_VS_NFCT + IP_VS_RR +) +check_flags "${flags[@]}" + +if ! is_set EXT4_USE_FOR_EXT2; then + check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY + if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then + echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" + fi +fi + +check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY +if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then + if is_set EXT4_USE_FOR_EXT2; then + echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)" + else + echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" + fi +fi + +echo '- Network Drivers:' +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags VXLAN | sed 's/^/ /' +echo ' Optional (for encrypted networks):' +check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \ + XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' +echo ' - "'$(wrap_color 'ipvlan' blue)'":' +check_flags IPVLAN | sed 's/^/ /' +echo ' - "'$(wrap_color 'macvlan' blue)'":' +check_flags MACVLAN DUMMY | sed 's/^/ /' + +# only fail if no storage drivers available +CODE=${EXITCODE} +EXITCODE=0 +STORAGE=1 + +echo '- Storage Drivers:' +echo ' - "'$(wrap_color 'aufs' blue)'":' +check_flags AUFS_FS | sed 's/^/ /' +if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" +fi +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'btrfs' blue)'":' +check_flags BTRFS_FS | sed 's/^/ /' +check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'devicemapper' blue)'":' +check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags OVERLAY_FS | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'zfs' blue)'":' +echo -n " - "; check_device /dev/zfs +echo -n " - "; check_command zfs +echo -n " - "; check_command zpool +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +EXITCODE=$CODE +[ "$STORAGE" = 1 ] && EXITCODE=1 + +echo + +check_limit_over() +{ + if [ $(cat "$1") -le "$2" ]; then + wrap_bad "- $1" "$(cat $1)" + wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black + EXITCODE=1 + else + wrap_good "- $1" "$(cat $1)" + fi +} + +echo 'Limits:' +check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000 +echo + +exit $EXITCODE diff --git a/vendor/github.com/docker/docker/contrib/completion/REVIEWERS b/vendor/github.com/docker/docker/contrib/completion/REVIEWERS new file mode 100644 index 0000000..03ee2dd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/completion/bash/docker b/vendor/github.com/docker/docker/contrib/completion/bash/docker new file mode 100644 index 0000000..7ea5d9a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/bash/docker @@ -0,0 +1,4282 @@ +#!/bin/bash +# +# bash completion file for core docker commands +# +# This script provides completion of: +# - commands and their options +# - container ids and names +# - image repos and tags +# - filepaths +# +# To enable the completions either: +# - place this file in /etc/bash_completion.d +# or +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh +# +# Configuration: +# +# For several commands, the amount of completions can be configured by +# setting environment variables. +# +# DOCKER_COMPLETION_SHOW_CONTAINER_IDS +# DOCKER_COMPLETION_SHOW_NETWORK_IDS +# DOCKER_COMPLETION_SHOW_NODE_IDS +# DOCKER_COMPLETION_SHOW_PLUGIN_IDS +# DOCKER_COMPLETION_SHOW_SECRET_IDS +# DOCKER_COMPLETION_SHOW_SERVICE_IDS +# "no" - Show names only (default) +# "yes" - Show names and ids +# +# You can tailor completion for the "events", "history", "inspect", "run", +# "rmi" and "save" commands by settings the following environment +# variables: +# +# DOCKER_COMPLETION_SHOW_IMAGE_IDS +# "none" - Show names only (default) +# "non-intermediate" - Show names and ids, but omit intermediate image IDs +# "all" - Show names and ids, including intermediate image IDs +# +# DOCKER_COMPLETION_SHOW_TAGS +# "yes" - include tags in completion options (default) +# "no" - don't include tags in completion options + +# +# Note: +# Currently, the completions will not work if the docker daemon is not +# bound to the default communication port/socket +# If the docker daemon is using a unix socket for communication your user +# must have access to the socket for the completions to function correctly +# +# Note for developers: +# Please arrange options sorted alphabetically by long name with the short +# options immediately following their corresponding long form. +# This order should be applied to lists, alternatives and code blocks. + +__docker_previous_extglob_setting=$(shopt -p extglob) +shopt -s extglob + +__docker_q() { + docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" +} + +# __docker_containers returns a list of containers. Additional options to +# `docker ps` may be specified in order to filter the list, e.g. +# `__docker_containers --filter status=running` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_CONTAINER_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_containers() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Names}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_CONTAINER_IDS}" = yes ] ; then + format='{{.ID}} {{.Names}}' + else + format='{{.Names}}' + fi + __docker_q ps --format "$format" "$@" +} + +# __docker_complete_containers applies completion of containers based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_containers`. +__docker_complete_containers() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_containers "$@")" -- "$current") ) +} + +__docker_complete_containers_all() { + __docker_complete_containers "$@" --all +} + +__docker_complete_containers_running() { + __docker_complete_containers "$@" --filter status=running +} + +__docker_complete_containers_stopped() { + __docker_complete_containers "$@" --filter status=exited +} + +__docker_complete_containers_unpauseable() { + __docker_complete_containers "$@" --filter status=paused +} + +__docker_complete_container_names() { + local containers=( $(__docker_q ps -aq --no-trunc) ) + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) +} + +__docker_complete_container_ids() { + local containers=( $(__docker_q ps -aq) ) + COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) +} + +__docker_images() { + local images_args="" + + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all) + images_args="--no-trunc -a" + ;; + non-intermediate) + images_args="--no-trunc" + ;; + esac + + local repo_print_command + if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then + repo_print_command='print $1; print $1":"$2' + else + repo_print_command='print $1' + fi + + local awk_script + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all|non-intermediate) + awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' + ;; + none|*) + awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' + ;; + esac + + __docker_q images $images_args | awk "$awk_script" | grep -v '$' +} + +__docker_complete_images() { + COMPREPLY=( $(compgen -W "$(__docker_images)" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_complete_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_complete_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +# __docker_networks returns a list of all networks. Additional options to +# `docker network ls` may be specified in order to filter the list, e.g. +# `__docker_networks --filter type=custom` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_networks() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Name}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + __docker_q network ls --format "$format" "$@" +} + +# __docker_complete_networks applies completion of networks based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_networks`. +__docker_complete_networks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_networks "$@")" -- "$current") ) +} + +__docker_complete_containers_in_network() { + local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") + COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) +} + +# __docker_volumes returns a list of all volumes. Additional options to +# `docker volume ls` may be specified in order to filter the list, e.g. +# `__docker_volumes --filter dangling=true` +# Because volumes do not have IDs, this function does not distinguish between +# IDs and names. +__docker_volumes() { + __docker_q volume ls -q "$@" +} + +# __docker_complete_volumes applies completion of volumes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_volumes`. +__docker_complete_volumes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_volumes "$@")" -- "$current") ) +} + +# __docker_plugins_bundled returns a list of all plugins of a given type. +# The type has to be specified with the mandatory option `--type`. +# Valid types are: Network, Volume, Authorization. +# Completions may be added or removed with `--add` and `--remove` +# This function only deals with plugins that come bundled with Docker. +# For plugins managed by `docker plugin`, see `__docker_plugins_installed`. +__docker_plugins_bundled() { + local type add=() remove=() + while true ; do + case "$1" in + --type) + type="$2" + shift 2 + ;; + --add) + add+=("$2") + shift 2 + ;; + --remove) + remove+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + local plugins=($(__docker_q info | sed -n "/^Plugins/,/^[^ ]/s/ $type: //p")) + for del in "${remove[@]}" ; do + plugins=(${plugins[@]/$del/}) + done + echo "${plugins[@]} ${add[@]}" +} + +# __docker_complete_plugins_bundled applies completion of plugins based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# The plugin type has to be specified with the next option `--type`. +# This function only deals with plugins that come bundled with Docker. +# For completion of plugins managed by `docker plugin`, see +# `__docker_complete_plugins_installed`. +__docker_complete_plugins_bundled() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_bundled "$@")" -- "$current") ) +} + +# __docker_plugins_installed returns a list of all plugins that were installed with +# the Docker plugin API. +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_PLUGIN_IDS=yes to also complete IDs. +# For built-in pugins, see `__docker_plugins_bundled`. +__docker_plugins_installed() { + local fields + if [ "$DOCKER_COMPLETION_SHOW_PLUGIN_IDS" = yes ] ; then + fields='$1,$2' + else + fields='$2' + fi + __docker_q plugin ls | awk "NR>1 {print $fields}" +} + +# __docker_complete_plugins_installed applies completion of plugins that were installed +# with the Docker plugin API, based on the current value of `$cur` or the value of +# the optional first option `--cur`, if given. +# For completion of built-in pugins, see `__docker_complete_plugins_bundled`. +__docker_complete_plugins_installed() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_installed "$@")" -- "$current") ) +} + +__docker_runtimes() { + __docker_q info | sed -n 's/^Runtimes: \(.*\)/\1/p' +} + +__docker_complete_runtimes() { + COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") ) +} + +# __docker_secrets returns a list of all secrets. +# By default, only names of secrets are returned. +# Set DOCKER_COMPLETION_SHOW_SECRET_IDS=yes to also complete IDs of secrets. +__docker_secrets() { + local fields='$2' # default: name only + [ "${DOCKER_COMPLETION_SHOW_SECRET_IDS}" = yes ] && fields='$1,$2' # ID and name + + __docker_q secret ls | awk "NR>1 {print $fields}" +} + +# __docker_complete_secrets applies completion of secrets based on the current value +# of `$cur`. +__docker_complete_secrets() { + COMPREPLY=( $(compgen -W "$(__docker_secrets)" -- "$cur") ) +} + +# __docker_stacks returns a list of all stacks. +__docker_stacks() { + __docker_q stack ls | awk 'NR>1 {print $1}' +} + +# __docker_complete_stacks applies completion of stacks based on the current value +# of `$cur` or the value of the optional first option `--cur`, if given. +__docker_complete_stacks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_stacks "$@")" -- "$current") ) +} + +# __docker_nodes returns a list of all nodes. Additional options to +# `docker node ls` may be specified in order to filter the list, e.g. +# `__docker_nodes --filter role=manager` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +# Completions may be added with `--add`, e.g. `--add self`. +__docker_nodes() { + local add=() + local fields='$2' # default: node name only + [ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name + + while true ; do + case "$1" in + --id) + fields='$1' # IDs only + shift + ;; + --name) + fields='$2' # names only + shift + ;; + --add) + add+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + echo $(__docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}") "${add[@]}" +} + +# __docker_complete_nodes applies completion of nodes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_nodes`. +__docker_complete_nodes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") ) +} + +__docker_complete_nodes_plus_self() { + __docker_complete_nodes --add self "$@" +} + +# __docker_services returns a list of all services. Additional options to +# `docker service ls` may be specified in order to filter the list, e.g. +# `__docker_services --filter name=xxx` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_services() { + local fields='$2' # default: service name only + [ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name + + if [ "$1" = "--id" ] ; then + fields='$1' # IDs only + shift + elif [ "$1" = "--name" ] ; then + fields='$2' # names only + shift + fi + __docker_q service ls "$@" | awk "NR>1 {print $fields}" +} + +# __docker_complete_services applies completion of services based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_services`. +__docker_complete_services() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") ) +} + +# __docker_append_to_completions appends the word passed as an argument to every +# word in `$COMPREPLY`. +# Normally you do this with `compgen -S` while generating the completions. +# This function allows you to append a suffix later. It allows you to use +# the __docker_complete_XXX functions in cases where you need a suffix. +__docker_append_to_completions() { + COMPREPLY=( ${COMPREPLY[@]/%/"$1"} ) +} + +# __docker_is_experimental tests whether the currently configured Docker daemon +# runs in experimental mode. If so, the function exits with 0 (true). +# Otherwise, or if the result cannot be determined, the exit value is 1 (false). +__docker_is_experimental() { + [ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ] +} + +# __docker_pos_first_nonflag finds the position of the first word that is neither +# option nor an option's argument. If there are options that require arguments, +# you should pass a glob describing those options, e.g. "--option1|-o|--option2" +# Use this function to restrict completions to exact positions after the argument list. +__docker_pos_first_nonflag() { + local argument_flags=$1 + + local counter=$((${subcommand_pos:-${command_pos}} + 1)) + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + # eat "=" in case of --option=arg syntax + [ "${words[$counter]}" = "=" ] && (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + + # Bash splits words at "=", retaining "=" as a word, examples: + # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words + while [ "${words[$counter + 1]}" = "=" ] ; do + counter=$(( counter + 2)) + done + + (( counter++ )) + done + + echo $counter +} + +# __docker_map_key_of_current_option returns `key` if we are currently completing the +# value of a map option (`key=value`) which matches the extglob given as an argument. +# This function is needed for key-specific completions. +__docker_map_key_of_current_option() { + local glob="$1" + + local key glob_pos + if [ "$cur" = "=" ] ; then # key= case + key="$prev" + glob_pos=$((cword - 2)) + elif [[ $cur == *=* ]] ; then # key=value case (OSX) + key=${cur%=*} + glob_pos=$((cword - 1)) + elif [ "$prev" = "=" ] ; then + key=${words[$cword - 2]} # key=value case + glob_pos=$((cword - 3)) + else + return + fi + + [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax + + [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" +} + +# __docker_value_of_option returns the value of the first option matching `option_glob`. +# Valid values for `option_glob` are option names like `--log-level` and globs like +# `--log-level|-l` +# Only positions between the command and the current word are considered. +__docker_value_of_option() { + local option_extglob=$(__docker_to_extglob "$1") + + local counter=$((command_pos + 1)) + while [ $counter -lt $cword ]; do + case ${words[$counter]} in + $option_extglob ) + echo ${words[$counter + 1]} + break + ;; + esac + (( counter++ )) + done +} + +# __docker_to_alternatives transforms a multiline list of strings into a single line +# string with the words separated by `|`. +# This is used to prepare arguments to __docker_pos_first_nonflag(). +__docker_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# __docker_to_extglob transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_to_extglob() { + local extglob=$( __docker_to_alternatives "$1" ) + echo "@($extglob)" +} + +# __docker_subcommands processes subcommands +# Locates the first occurrence of any of the subcommands contained in the +# first argument. In case of a match, calls the corresponding completion +# function and returns 0. +# If no match is found, 1 is returned. The calling function can then +# continue processing its completion. +# +# TODO if the preceding command has options that accept arguments and an +# argument is equal ot one of the subcommands, this is falsely detected as +# a match. +__docker_subcommands() { + local subcommands="$1" + + local counter=$(($command_pos + 1)) + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + $(__docker_to_extglob "$subcommands") ) + subcommand_pos=$counter + local subcommand=${words[$counter]} + local completions_func=_docker_${command}_${subcommand} + declare -F $completions_func >/dev/null && $completions_func + return 0 + ;; + esac + (( counter++ )) + done + return 1 +} + +# __docker_nospace suppresses trailing whitespace +__docker_nospace() { + # compopt is not available in ancient bash versions + type compopt &>/dev/null && compopt -o nospace +} + +__docker_complete_resolved_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_local_interfaces() { + command -v ip >/dev/null 2>&1 || return + ip addr show scope global 2>/dev/null | sed -n 's| \+inet \([0-9.]\+\).* \([^ ]\+\)|\1 \2|p' +} + +__docker_complete_local_interfaces() { + local additional_interface + if [ "$1" = "--add" ] ; then + additional_interface="$2" + fi + + COMPREPLY=( $( compgen -W "$(__docker_local_interfaces) $additional_interface" -- "$cur" ) ) +} + +__docker_complete_capabilities() { + # The list of capabilities is defined in types.go, ALL was added manually. + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + AUDIT_WRITE + AUDIT_READ + BLOCK_SUSPEND + CHOWN + DAC_OVERRIDE + DAC_READ_SEARCH + FOWNER + FSETID + IPC_LOCK + IPC_OWNER + KILL + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + MKNOD + NET_ADMIN + NET_BIND_SERVICE + NET_BROADCAST + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_ADMIN + SYS_BOOT + SYS_CHROOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + +__docker_complete_detach-keys() { + case "$prev" in + --detach-keys) + case "$cur" in + *,) + COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) + ;; + esac + + __docker_nospace + return + ;; + esac + return 1 +} + +__docker_complete_isolation() { + COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) +} + +__docker_complete_log_drivers() { + COMPREPLY=( $( compgen -W " + awslogs + etwlogs + fluentd + gcplogs + gelf + journald + json-file + logentries + none + splunk + syslog + " -- "$cur" ) ) +} + +__docker_complete_log_options() { + # see docs/reference/logging/index.md + local awslogs_options="awslogs-region awslogs-group awslogs-stream" + local fluentd_options="env fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries labels tag" + local gcplogs_options="env gcp-log-cmd gcp-project labels" + local gelf_options="env gelf-address gelf-compression-level gelf-compression-type labels tag" + local journald_options="env labels tag" + local json_file_options="env labels max-file max-size" + local logentries_options="logentries-token" + local syslog_options="env labels syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag" + local splunk_options="env labels splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag" + + local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $logentries_options $json_file_options $syslog_options $splunk_options" + + case $(__docker_value_of_option --log-driver) in + '') + COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) + ;; + awslogs) + COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) + ;; + fluentd) + COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) + ;; + gcplogs) + COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) ) + ;; + gelf) + COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) + ;; + journald) + COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) + ;; + json-file) + COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) + ;; + logentries) + COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) ) + ;; + syslog) + COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) + ;; + splunk) + COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + + __docker_nospace +} + +__docker_complete_log_driver_options() { + local key=$(__docker_map_key_of_current_option '--log-opt') + case "$key" in + fluentd-async-connect) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + gelf-address) + COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) ) + __docker_nospace + return + ;; + gelf-compression-level) + COMPREPLY=( $( compgen -W "1 2 3 4 5 6 7 8 9" -- "${cur##*=}" ) ) + return + ;; + gelf-compression-type) + COMPREPLY=( $( compgen -W "gzip none zlib" -- "${cur##*=}" ) ) + return + ;; + syslog-address) + COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + syslog-facility) + COMPREPLY=( $( compgen -W " + auth + authpriv + cron + daemon + ftp + kern + local0 + local1 + local2 + local3 + local4 + local5 + local6 + local7 + lpr + mail + news + syslog + user + uucp + " -- "${cur##*=}" ) ) + return + ;; + syslog-format) + COMPREPLY=( $( compgen -W "rfc3164 rfc5424 rfc5424micro" -- "${cur##*=}" ) ) + return + ;; + syslog-tls-ca-cert|syslog-tls-cert|syslog-tls-key) + _filedir + return + ;; + syslog-tls-skip-verify) + COMPREPLY=( $( compgen -W "true" -- "${cur##*=}" ) ) + return + ;; + splunk-url) + COMPREPLY=( $( compgen -W "http:// https://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + splunk-gzip|splunk-insecureskipverify|splunk-verify-connection) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + splunk-format) + COMPREPLY=( $( compgen -W "inline json raw" -- "${cur##*=}" ) ) + return + ;; + esac + return 1 +} + +__docker_complete_log_levels() { + COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) +} + +__docker_complete_restart() { + case "$prev" in + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) + ;; + esac + return + ;; + esac + return 1 +} + +# __docker_complete_signals returns a subset of the available signals that is most likely +# relevant in the context of docker containers +__docker_complete_signals() { + local signals=( + SIGCONT + SIGHUP + SIGINT + SIGKILL + SIGQUIT + SIGSTOP + SIGTERM + SIGUSR1 + SIGUSR2 + ) + COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) +} + +__docker_complete_user_group() { + if [[ $cur == *:* ]] ; then + COMPREPLY=( $(compgen -g -- "${cur#*:}") ) + else + COMPREPLY=( $(compgen -u -S : -- "$cur") ) + __docker_nospace + fi +} + +_docker_docker() { + # global options that may appear after the docker command + local boolean_options=" + $global_boolean_options + --help + --version -v + " + + case "$prev" in + --config) + _filedir -d + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + $(__docker_to_extglob "$global_options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" ) + if [ $cword -eq $counter ]; then + __docker_is_experimental && commands+=(${experimental_commands[*]}) + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_attach() { + _docker_container_attach +} + +_docker_build() { + _docker_image_build +} + + +_docker_checkpoint() { + local subcommands=" + create + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_checkpoint_create() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help --leave-running" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_checkpoint_ls() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_checkpoint_rm() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + elif [ $cword -eq $(($counter + 1)) ]; then + COMPREPLY=( $( compgen -W "$(__docker_q checkpoint ls "$prev" | sed 1d)" -- "$cur" ) ) + fi + ;; + esac +} + + +_docker_container() { + local subcommands=" + attach + commit + cp + create + diff + exec + export + inspect + kill + logs + ls + pause + port + prune + rename + restart + rm + run + start + stats + stop + top + unpause + update + wait + " + local aliases=" + list + ps + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_container_attach() { + __docker_complete_detach-keys && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--detach-keys') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_commit() { + case "$prev" in + --author|-a|--change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause=false -p=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') + + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_container_cp() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + # combined container and filename completion + _filedir + local files=( ${COMPREPLY[@]} ) + + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + local containers=( ${COMPREPLY[@]} ) + + COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) + if [[ "$COMPREPLY" == *: ]]; then + __docker_nospace + fi + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + if [ -e "$prev" ]; then + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + else + _filedir + fi + return + fi + ;; + esac +} + +_docker_container_create() { + _docker_container_run +} + +_docker_container_diff() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_exec() { + __docker_complete_detach-keys && return + + case "$prev" in + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach -d --detach-keys --env -e --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_export() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_inspect() { + _docker_inspect --type container +} + +_docker_container_kill() { + case "$prev" in + --signal|-s) + __docker_complete_signals + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_list() { + _docker_container_ls +} + +_docker_container_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + ancestor) + cur="${cur##*=}" + __docker_complete_images + return + ;; + before) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + id) + __docker_complete_containers_all --cur "${cur##*=}" --id + return + ;; + health) + COMPREPLY=( $( compgen -W "healthy starting none unhealthy" -- "${cur##*=}" ) ) + return + ;; + is-task) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_containers_all --cur "${cur##*=}" --name + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + since) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + status) + COMPREPLY=( $( compgen -W "created dead exited paused restarting running removing" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "ancestor before exited health id is-task label name network since status volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --format|--last|-n) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --last -n --latest -l --no-trunc --quiet -q --size -s" -- "$cur" ) ) + ;; + esac +} + +_docker_container_pause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_port() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_container_ps() { + _docker_container_ls +} + +_docker_container_rename() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_restart() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) + ;; + *) + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --force|-f) + __docker_complete_containers_all + return + ;; + esac + done + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_run() { + local options_with_args=" + --add-host + --attach -a + --blkio-weight + --blkio-weight-device + --cap-add + --cap-drop + --cgroup-parent + --cidfile + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpus + --cpuset-mems + --cpu-shares -c + --device + --device-read-bps + --device-read-iops + --device-write-bps + --device-write-iops + --dns + --dns-option + --dns-search + --entrypoint + --env -e + --env-file + --expose + --group-add + --hostname -h + --init-path + --ip + --ip6 + --ipc + --isolation + --kernel-memory + --label-file + --label -l + --link + --link-local-ip + --log-driver + --log-opt + --mac-address + --memory -m + --memory-swap + --memory-swappiness + --memory-reservation + --name + --network + --network-alias + --oom-score-adj + --pid + --pids-limit + --publish -p + --restart + --runtime + --security-opt + --shm-size + --stop-signal + --stop-timeout + --storage-opt + --tmpfs + --sysctl + --ulimit + --user -u + --userns + --uts + --volume-driver + --volumes-from + --volume -v + --workdir -w + " + + local boolean_options=" + --disable-content-trust=false + --help + --init + --interactive -i + --oom-kill-disable + --privileged + --publish-all -P + --read-only + --tty -t + " + + if [ "$command" = "run" -o "$subcommand" = "run" ] ; then + options_with_args="$options_with_args + --detach-keys + --health-cmd + --health-interval + --health-retries + --health-timeout + " + boolean_options="$boolean_options + --detach -d + --no-healthcheck + --rm + --sig-proxy=false + " + __docker_complete_detach-keys && return + fi + + local all_options="$options_with_args $boolean_options" + + + __docker_complete_log_driver_options && return + __docker_complete_restart && return + + local key=$(__docker_map_key_of_current_option '--security-opt') + case "$key" in + label) + [[ $cur == *: ]] && return + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "${cur##*=}") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + __docker_nospace + fi + return + ;; + seccomp) + local cur=${cur##*=} + _filedir + COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) + return + ;; + esac + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --attach|-a) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cap-add|--cap-drop) + __docker_complete_capabilities + return + ;; + --cidfile|--env-file|--init-path|--label-file) + _filedir + return + ;; + --device|--tmpfs|--volume|-v) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + __docker_nospace + ;; + /*) + _filedir + __docker_nospace + ;; + esac + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --ipc) + case "$cur" in + *:*) + cur="${cur#*:}" + __docker_complete_containers_running + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --isolation) + __docker_complete_isolation + return + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --pid) + case "$cur" in + *:*) + __docker_complete_containers_running --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --runtime) + __docker_complete_runtimes + return + ;; + --security-opt) + COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then + __docker_nospace + fi + return + ;; + --storage-opt) + COMPREPLY=( $( compgen -W "size" -S = -- "$cur") ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + --userns) + COMPREPLY=( $( compgen -W "host" -- "$cur" ) ) + return + ;; + --volume-driver) + __docker_complete_plugins_bundled --type Volume + return + ;; + --volumes-from) + __docker_complete_containers_all + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_container_start() { + __docker_complete_detach-keys && return + + case "$prev" in + --checkpoint) + if [ __docker_is_experimental ] ; then + return + fi + ;; + --checkpoint-dir) + if [ __docker_is_experimental ] ; then + _filedir -d + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--attach -a --detach-keys --help --interactive -i" + __docker_is_experimental && options+=" --checkpoint --checkpoint-dir" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_stats() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --format --help --no-stream" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_stop() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_top() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_unpause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_unpauseable + fi + ;; + esac +} + +_docker_container_update() { + local options_with_args=" + --blkio-weight + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --kernel-memory + --memory -m + --memory-reservation + --memory-swap + --restart + " + + local boolean_options=" + --help + " + + local all_options="$options_with_args $boolean_options" + + __docker_complete_restart && return + + case "$prev" in + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_wait() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + + +_docker_commit() { + _docker_container_commit +} + +_docker_cp() { + _docker_container_cp +} + +_docker_create() { + _docker_container_run +} + +_docker_daemon() { + local boolean_options=" + $global_boolean_options + --disable-legacy-registry + --experimental + --help + --icc=false + --init + --ip-forward=false + --ip-masq=false + --iptables=false + --ipv6 + --live-restore + --raw-logs + --selinux-enabled + --userland-proxy=false + " + local options_with_args=" + $global_options_with_args + --add-runtime + --api-cors-header + --authorization-plugin + --bip + --bridge -b + --cgroup-parent + --cluster-advertise + --cluster-store + --cluster-store-opt + --config-file + --containerd + --default-gateway + --default-gateway-v6 + --default-ulimit + --dns + --dns-search + --dns-opt + --exec-opt + --exec-root + --fixed-cidr + --fixed-cidr-v6 + --graph -g + --group -G + --init-path + --insecure-registry + --ip + --label + --log-driver + --log-opt + --max-concurrent-downloads + --max-concurrent-uploads + --mtu + --oom-score-adjust + --pidfile -p + --registry-mirror + --seccomp-profile + --shutdown-timeout + --storage-driver -s + --storage-opt + --userland-proxy-path + --userns-remap + " + + __docker_complete_log_driver_options && return + + key=$(__docker_map_key_of_current_option '--cluster-store-opt') + case "$key" in + kv.*file) + cur=${cur##*=} + _filedir + return + ;; + esac + + local key=$(__docker_map_key_of_current_option '--storage-opt') + case "$key" in + dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + dm.fs) + COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur##*=}" ) ) + return + ;; + dm.thinpooldev) + cur=${cur##*=} + _filedir + return + ;; + esac + + case "$prev" in + --authorization-plugin) + __docker_complete_plugins_bundled --type Authorization + return + ;; + --cluster-store) + COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) + __docker_nospace + return + ;; + --cluster-store-opt) + COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path) + _filedir + return + ;; + --exec-root|--graph|-g) + _filedir -d + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --storage-driver|-s) + COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) + return + ;; + --storage-opt) + local btrfs_options="btrfs.min_space" + local devicemapper_options=" + dm.basesize + dm.blkdiscard + dm.blocksize + dm.fs + dm.loopdatasize + dm.loopmetadatasize + dm.min_free_space + dm.mkfsarg + dm.mountopt + dm.override_udev_sync_check + dm.thinpooldev + dm.use_deferred_deletion + dm.use_deferred_removal + " + local zfs_options="zfs.fsname" + + case $(__docker_value_of_option '--storage-driver|-s') in + '') + COMPREPLY=( $( compgen -W "$btrfs_options $devicemapper_options $zfs_options" -S = -- "$cur" ) ) + ;; + btrfs) + COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) ) + ;; + devicemapper) + COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) + ;; + zfs) + COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + __docker_nospace + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --seccomp-profile) + _filedir json + return + ;; + --userns-remap) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + esac +} + +_docker_deploy() { + __docker_is_experimental && _docker_stack_deploy +} + +_docker_diff() { + _docker_container_diff +} + +_docker_events() { + _docker_system_events +} + +_docker_exec() { + _docker_container_exec +} + +_docker_export() { + _docker_container_export +} + +_docker_help() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) + fi +} + +_docker_history() { + _docker_image_history +} + + +_docker_image() { + local subcommands=" + build + history + import + inspect + load + ls + prune + pull + push + rm + save + tag + " + local aliases=" + images + list + remove + rmi + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_image_build() { + local options_with_args=" + --build-arg + --cache-from + --cgroup-parent + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --cpu-period + --cpu-quota + --file -f + --isolation + --label + --memory -m + --memory-swap + --network + --shm-size + --tag -t + --ulimit + " + + local boolean_options=" + --compress + --disable-content-trust=false + --force-rm + --help + --no-cache + --pull + --quiet -q + --rm + " + __docker_is_experimental && boolean_options+="--squash" + + local all_options="$options_with_args $boolean_options" + + case "$prev" in + --build-arg) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --cache-from) + __docker_complete_image_repos_and_tags + return + ;; + --file|-f) + _filedir + return + ;; + --isolation) + __docker_complete_isolation + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --tag|-t) + __docker_complete_image_repos_and_tags + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + _filedir -d + fi + ;; + esac +} + +_docker_image_history() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --human=false -H=false --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_image_images() { + _docker_image_ls +} + +_docker_image_import() { + case "$prev" in + --change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_image_inspect() { + _docker_inspect --type image +} + +_docker_image_load() { + case "$prev" in + --input|-i) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_image_list() { + _docker_image_ls +} + +_docker_image_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + before|since|reference) + cur="${cur##*=}" + __docker_complete_images + return + ;; + dangling) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + label) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "before dangling label reference since" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + =) + return + ;; + *) + __docker_complete_image_repos + ;; + esac +} + +_docker_image_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_image_pull() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --all-tags|-a) + __docker_complete_image_repos + return + ;; + esac + done + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_remove() { + _docker_image_rm +} + +_docker_image_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_rmi() { + _docker_image_rm +} + +_docker_image_save() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_tag() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + + +_docker_images() { + _docker_image_ls +} + +_docker_import() { + _docker_image_import +} + +_docker_info() { + _docker_system_info +} + +_docker_inspect() { + local preselected_type + local type + + if [ "$1" = "--type" ] ; then + preselected_type=yes + type="$2" + else + type=$(__docker_value_of_option --type) + fi + + case "$prev" in + --format|-f) + return + ;; + --type) + if [ -z "$preselected_type" ] ; then + COMPREPLY=( $( compgen -W "container image network node plugin service volume" -- "$cur" ) ) + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--format -f --help --size -s" + if [ -z "$preselected_type" ] ; then + options+=" --type" + fi + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + case "$type" in + '') + COMPREPLY=( $( compgen -W " + $(__docker_containers --all) + $(__docker_images) + $(__docker_networks) + $(__docker_nodes) + $(__docker_plugins_installed) + $(__docker_services) + $(__docker_volumes) + " -- "$cur" ) ) + ;; + container) + __docker_complete_containers_all + ;; + image) + __docker_complete_images + ;; + network) + __docker_complete_networks + ;; + node) + __docker_complete_nodes + ;; + plugin) + __docker_complete_plugins_installed + ;; + service) + __docker_complete_services + ;; + volume) + __docker_complete_volumes + ;; + esac + esac +} + +_docker_kill() { + _docker_container_kill +} + +_docker_load() { + _docker_image_load +} + +_docker_login() { + case "$prev" in + --password|-p|--username|-u) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --password -p --username -u" -- "$cur" ) ) + ;; + esac +} + +_docker_logout() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_logs() { + _docker_container_logs +} + +_docker_network_connect() { + local options_with_args=" + --alias + --ip + --ip6 + --link + --link-local-ip + " + + local boolean_options=" + --help + " + + case "$prev" in + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_network_create() { + case "$prev" in + --aux-address|--gateway|--internal|--ip-range|--ipam-opt|--ipv6|--opt|-o|--subnet) + return + ;; + --ipam-driver) + COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) + return + ;; + --driver|-d) + # remove drivers that allow one instance only, add drivers missing in `docker info` + __docker_complete_plugins_bundled --type Network --remove host --remove null --add macvlan + return + ;; + --label) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--attachable --aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) ) + ;; + esac +} + +_docker_network_disconnect() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_in_network "$prev" + fi + ;; + esac +} + +_docker_network_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks + esac +} + +_docker_network_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Network --add macvlan + return + ;; + id) + __docker_complete_networks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_networks --cur "${cur##*=}" --name + return + ;; + type) + COMPREPLY=( $( compgen -W "builtin custom" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "driver id label name type" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_network_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_network_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks --filter type=custom + esac +} + +_docker_network() { + local subcommands=" + connect + create + disconnect + inspect + ls + prune + rm + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service() { + local subcommands=" + create + inspect + ls list + rm remove + scale + ps + update + " + __docker_daemon_is_experimental && subcommands+="logs" + + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service_create() { + _docker_service_update +} + +_docker_service_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --no-resolve --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_list() { + _docker_service_ls +} + +_docker_service_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_service_remove() { + _docker_service_rm +} + +_docker_service_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_scale() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + __docker_append_to_completions "=" + __docker_nospace + ;; + esac +} + +_docker_service_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + node) + __docker_complete_nodes_plus_self --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id name node" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_update() { + local $subcommand="${words[$subcommand_pos]}" + + local options_with_args=" + --constraint + --endpoint-mode + --env -e + --force + --health-cmd + --health-interval + --health-retries + --health-timeout + --hostname + --label -l + --limit-cpu + --limit-memory + --log-driver + --log-opt + --mount + --network + --no-healthcheck + --replicas + --reserve-cpu + --reserve-memory + --restart-condition + --restart-delay + --restart-max-attempts + --restart-window + --rollback + --stop-grace-period + --update-delay + --update-failure-action + --update-max-failure-ratio + --update-monitor + --update-parallelism + --user -u + --workdir -w + " + + local boolean_options=" + --help + --tty -t + --with-registry-auth + " + + __docker_complete_log_driver_options && return + + if [ "$subcommand" = "create" ] ; then + options_with_args="$options_with_args + --container-label + --dns + --dns-option + --dns-search + --env-file + --group + --host + --mode + --name + --publish -p + --secret + " + + case "$prev" in + --env-file) + _filedir + return + ;; + --host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --mode) + COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) ) + return + ;; + --secret) + __docker_complete_secrets + return + ;; + --group) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + esac + fi + if [ "$subcommand" = "update" ] ; then + options_with_args="$options_with_args + --arg + --container-label-add + --container-label-rm + --dns-add + --dns-option-add + --dns-option-rm + --dns-rm + --dns-search-add + --dns-search-rm + --group-add + --group-rm + --host-add + --host-rm + --image + --publish-add + --publish-rm + --secret-add + --secret-rm + " + + case "$prev" in + --group-add) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --group-rm) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --host-add|--host-rm) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --image) + __docker_complete_image_repos_and_tags + return + ;; + --secret-add|--secret-rm) + __docker_complete_secrets + return + ;; + esac + fi + + case "$prev" in + --endpoint-mode) + COMPREPLY=( $( compgen -W "dnsrr vip" -- "$cur" ) ) + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + __docker_complete_networks + return + ;; + --restart-condition) + COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) ) + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ "$subcommand" = "update" ] ; then + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + else + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + fi + ;; + esac +} + +_docker_swarm() { + local subcommands=" + init + join + join-token + leave + unlock + unlock-key + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_init() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --autolock --availability --cert-expiry --dispatcher-heartbeat --external-ca --force-new-cluster --help --listen-addr --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_join() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + --token) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --help --listen-addr --token" -- "$cur" ) ) + ;; + *:) + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + ;; + esac +} + +_docker_swarm_join-token() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag ) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_swarm_leave() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock-key() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_update() { + case "$prev" in + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--autolock --cert-expiry --dispatcher-heartbeat --external-ca --help --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_node() { + local subcommands=" + demote + inspect + ls list + promote + rm remove + ps + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_node_demote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=manager + esac +} + +_docker_node_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes_plus_self + esac +} + +_docker_node_list() { + _docker_node_ls +} + +_docker_node_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_nodes --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_nodes --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_node_promote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=worker + esac +} + +_docker_node_remove() { + _docker_node_rm +} + +_docker_node_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_node_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes_plus_self + ;; + esac +} + +_docker_node_update() { + case "$prev" in + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --role) + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + return + ;; + --label-add|--label-rm) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--availability --help --label-add --label-rm --role" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_pause() { + _docker_container_pause +} + +_docker_plugin() { + local subcommands=" + create + disable + enable + inspect + install + ls + push + rm + set + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_create() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--compress --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + # reponame + return + elif [ $cword -eq $((counter + 1)) ]; then + _filedir -d + fi + ;; + esac +} + +_docker_plugin_disable() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_enable() { + case "$prev" in + --timeout) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --timeout" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--timeout') + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_inspect() { + case "$prev" in + --format|f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_install() { + case "$prev" in + --alias) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--alias --disable --disable-content-trust=false --grant-all-permissions --help" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_list() { + _docker_plugin_ls +} + +_docker_plugin_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --no-trunc" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_remove() { + _docker_plugin_rm +} + +_docker_plugin_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_set() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + + +_docker_port() { + _docker_container_port +} + +_docker_ps() { + _docker_container_ls +} + +_docker_pull() { + _docker_image_pull +} + +_docker_push() { + _docker_image_push +} + +_docker_rename() { + _docker_container_rename +} + +_docker_restart() { + _docker_container_restart +} + +_docker_rm() { + _docker_container_rm +} + +_docker_rmi() { + _docker_image_rm +} + +_docker_run() { + _docker_container_run +} + +_docker_save() { + _docker_image_save +} + + +_docker_secret() { + local subcommands=" + create + inspect + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_create() { + case "$prev" in + --label|-l) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_list() { + _docker_secret_ls +} + +_docker_secret_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_remove() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_rm() { + _docker_secret_remove +} + + + +_docker_search() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + is-automated) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + is-official) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) + __docker_nospace + return + ;; + --limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter --help --limit --no-trunc" -- "$cur" ) ) + ;; + esac +} + + +_docker_stack() { + local subcommands=" + deploy + ls + ps + rm + services + " + local aliases=" + down + list + remove + up + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_deploy() { + case "$prev" in + --bundle-file) + if __docker_is_experimental ; then + _filedir dab + return + fi + ;; + --compose-file|-c) + _filedir yml + return + ;; + esac + + case "$cur" in + -*) + local options="--compose-file -c --help --with-registry-auth" + __docker_is_experimental && options+=" --bundle-file" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_down() { + _docker_stack_rm +} + +_docker_stack_list() { + _docker_stack_ls +} + +_docker_stack_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + id) + __docker_complete_stacks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_stacks --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id name desired-state" -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_remove() { + _docker_stack_rm +} + +_docker_stack_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_services() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + label) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_up() { + _docker_stack_deploy +} + + +_docker_start() { + _docker_container_start +} + +_docker_stats() { + _docker_container_stats +} + +_docker_stop() { + _docker_container_stop +} + + +_docker_system() { + local subcommands=" + df + events + info + prune + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_system_df() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --verbose -v" -- "$cur" ) ) + ;; + esac +} + +_docker_system_events() { + local key=$(__docker_map_key_of_current_option '-f|--filter') + case "$key" in + container) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + daemon) + local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') + COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) + return + ;; + event) + COMPREPLY=( $( compgen -W " + attach + commit + connect + copy + create + delete + destroy + detach + die + disconnect + exec_create + exec_detach + exec_start + export + health_status + import + kill + load + mount + oom + pause + pull + push + reload + rename + resize + restart + save + start + stop + tag + top + unmount + unpause + untag + update + " -- "${cur##*=}" ) ) + return + ;; + image) + cur="${cur##*=}" + __docker_complete_images + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + type) + COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --since|--until) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --since --until --format" -- "$cur" ) ) + ;; + esac +} + +_docker_system_info() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_system_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) + ;; + esac +} + + +_docker_tag() { + _docker_image_tag +} + +_docker_unpause() { + _docker_container_unpause +} + +_docker_update() { + _docker_container_update +} + +_docker_top() { + _docker_container_top +} + +_docker_version() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_create() { + case "$prev" in + --driver|-d) + __docker_complete_plugins_bundled --type Volume + return + ;; + --label|--opt|-o) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--driver -d --help --label --opt -o" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + dangling) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Volume + return + ;; + name) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "dangling driver label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume() { + local subcommands=" + create + inspect + ls + prune + rm + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_wait() { + _docker_container_wait +} + +_docker() { + local previous_extglob_setting=$(shopt -p extglob) + shopt -s extglob + + local management_commands=( + container + image + network + node + plugin + secret + service + stack + system + volume + ) + + local top_level_commands=( + build + login + logout + run + search + version + ) + + local legacy_commands=( + commit + cp + create + diff + events + exec + export + history + images + import + info + inspect + kill + load + logs + pause + port + ps + pull + push + rename + restart + rm + rmi + save + start + stats + stop + swarm + tag + top + unpause + update + wait + ) + + local experimental_commands=( + checkpoint + deploy + ) + + local commands=(${management_commands[*]} ${top_level_commands[*]}) + [ -z "$DOCKER_HIDE_LEGACY_COMMANDS" ] && commands+=(${legacy_commands[*]}) + + # These options are valid as global options for all client commands + # and valid as command options for `docker daemon` + local global_boolean_options=" + --debug -D + --tls + --tlsverify + " + local global_options_with_args=" + --config + --host -H + --log-level -l + --tlscacert + --tlscert + --tlskey + " + + local host config + + COMPREPLY=() + local cur prev words cword + _get_comp_words_by_ref -n : cur prev words cword + + local command='docker' command_pos=0 subcommand_pos + local counter=1 + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + # save host so that completion can use custom daemon + --host|-H) + (( counter++ )) + host="${words[$counter]}" + ;; + # save config so that completion can use custom configuration directories + --config) + (( counter++ )) + config="${words[$counter]}" + ;; + $(__docker_to_extglob "$global_options_with_args") ) + (( counter++ )) + ;; + -*) + ;; + =) + (( counter++ )) + ;; + *) + command="${words[$counter]}" + command_pos=$counter + break + ;; + esac + (( counter++ )) + done + + local binary="${words[0]}" + if [[ $binary == ?(*/)dockerd ]] ; then + # for the dockerd binary, we reuse completion of `docker daemon`. + # dockerd does not have subcommands and global options. + command=daemon + command_pos=0 + fi + + local completions_func=_docker_${command} + declare -F $completions_func >/dev/null && $completions_func + + eval "$previous_extglob_setting" + return 0 +} + +eval "$__docker_previous_extglob_setting" +unset __docker_previous_extglob_setting + +complete -F _docker docker dockerd diff --git a/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish b/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish new file mode 100644 index 0000000..2715cb1 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish @@ -0,0 +1,405 @@ +# docker.fish - docker completions for fish shell +# +# This file is generated by gen_docker_fish_completions.py from: +# https://github.com/barnybug/docker-fish-completion +# +# To install the completions: +# mkdir -p ~/.config/fish/completions +# cp docker.fish ~/.config/fish/completions +# +# Completion supported: +# - parameters +# - commands +# - containers +# - images +# - repositories + +function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' + for i in (commandline -opc) + if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats + return 1 + end + end + return 0 +end + +function __fish_print_docker_containers --description 'Print a list of docker containers' -a select + switch $select + case running + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF)}' | tr ',' '\n' + case stopped + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF)}' | tr ',' '\n' + case all + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF)}' | tr ',' '\n' + end +end + +function __fish_print_docker_images --description 'Print a list of docker images' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' +end + +function __fish_print_docker_repositories --description 'Print a list of docker repositories' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq +end + +# common options +complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled" +complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' +complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" +complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' +complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' +complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' +complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" +complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' +complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level ("debug", "info", "warn", "error", "fatal")' +complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' +complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' +complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' + +# subcommands +# attach +complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" + +# build +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' + +# commit +complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" + +# cp +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" + +# diff +complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" + +# events +complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l format -d 'Format the output using the given go template' + +# exec +complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" + +# export +complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" + +# history +complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" + +# images +complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" + +# import +complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' +complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' + +# info +complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -l help -d 'Print usage' + +# inspect +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" + +# kill +complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" + +# load +complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' + +# login +complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' + +# logout +complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' + +# logs +complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" + +# port +complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" + +# pause +complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" + +# ps +complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' + +# pull +complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" + +# push +complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" + +# rename +complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' + +# restart +complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" + +# rm +complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container" + +# rmi +complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" + +# run +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + +# save +complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" + +# search +complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' + +# start +complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# stats +complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" + +# stop +complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" + +# tag +complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' + +# top +complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" + +# unpause +complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' +complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" + +# version +complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -l help -d 'Print usage' + +# wait +complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" diff --git a/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt b/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt new file mode 100644 index 0000000..18e1b53 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt @@ -0,0 +1 @@ +See https://github.com/samneirinck/posh-docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS b/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS new file mode 100644 index 0000000..03ee2dd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/completion/zsh/_docker b/vendor/github.com/docker/docker/contrib/completion/zsh/_docker new file mode 100644 index 0000000..ecae826 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/zsh/_docker @@ -0,0 +1,2787 @@ +#compdef docker dockerd +# +# zsh completion for docker (http://docker.com) +# +# version: 0.3.0 +# github: https://github.com/felixr/docker-zsh-completion +# +# contributors: +# - Felix Riedel +# - Steve Durrheimer +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Short-option stacking can be enabled with: +# zstyle ':completion:*:*:docker:*' option-stacking yes +# zstyle ':completion:*:*:docker-*:*' option-stacking yes +__docker_arguments() { + if zstyle -t ":completion:${curcontext}:" option-stacking; then + print -- -s + fi +} + +__docker_get_containers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local kind type line s + declare -a running stopped lines args names + + kind=$1; shift + type=$1; shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)${:-"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line + lines=(${lines[2,-1]}) + + # Container ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + # Names: we only display the one without slash. All other names + # are generated and may clutter the completion. However, with + # Swarm, all names may be prefixed by the swarm node name. + if [[ $type = (names|all) ]]; then + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) + # First step: find a common prefix and strip it (swarm node case) + (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} + # Second step: only keep the first name without a / + s=${${names:#*/*}[1]} + # If no name, well give up. + (( $#s != 0 )) || continue + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 + return ret +} + +__docker_complete_stopped_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers stopped all "$@" +} + +__docker_complete_running_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers running all "$@" +} + +__docker_complete_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all all "$@" +} + +__docker_complete_containers_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all ids "$@" +} + +__docker_complete_containers_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all names "$@" +} + +__docker_complete_info_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + emulate -L zsh + setopt extendedglob + local -a plugins + plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: }) + _describe -t plugins "$1 plugins" plugins && ret=0 + return ret +} + +__docker_complete_images() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a images + images=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images && ret=0 + __docker_complete_repositories_with_tags && ret=0 + return ret +} + +__docker_complete_repositories() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos + repos=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}%% *}[2,-1]}) + repos=(${repos#}) + _describe -t docker-repos "repositories" repos && ret=0 + return ret +} + +__docker_complete_repositories_with_tags() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos onlyrepos matched + declare m + repos=(${${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/ ##/:::}%% *}) + repos=(${${repos%:::}#}) + # Check if we have a prefix-match for the current prefix. + onlyrepos=(${repos%::*}) + for m in $onlyrepos; do + [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { + # Yes, complete with tags + repos=(${${repos/:::/:}/:/\\:}) + _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 + return ret + } + done + # No, only complete repositories + onlyrepos=(${${repos%:::*}/:/\\:}) + _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 + + return ret +} + +__docker_search() { + [[ $PREFIX = -* ]] && return 1 + local cache_policy + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + local searchterm cachename + searchterm="${words[$CURRENT]%/}" + cachename=_docker-search-$searchterm + + local expl + local -a result + if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ + && ! _retrieve_cache ${cachename#_}; then + _message "Searching for ${searchterm}..." + result=(${${${(f)${:-"$(_call_program commands docker $docker_options search $searchterm)"$'\n'}}%% *}[2,-1]}) + _store_cache ${cachename#_} result + fi + _wanted dockersearch expl 'available images' compadd -a result +} + +__docker_get_log_options() { + [[ $PREFIX = -* ]] && return 1 + + integer ret=1 + local log_driver=${opt_args[--log-driver]:-"all"} + local -a awslogs_options fluentd_options gelf_options journald_options json_file_options logentries_options syslog_options splunk_options + + awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream") + fluentd_options=("env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag") + gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels") + gelf_options=("env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag") + journald_options=("env" "labels" "tag") + json_file_options=("env" "labels" "max-file" "max-size") + logentries_options=("logentries-token") + syslog_options=("env" "labels" "syslog-address" "syslog-facility" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "tag") + splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-format" "splunk-gzip" "splunk-gzip-level" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "splunk-verify-connection" "tag") + + [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 + [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 + [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0 + [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 + [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 + [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 + [[ $log_driver = (logentries|all) ]] && _describe -t logentries-options "logentries options" logentries_options "$@" && ret=0 + [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 + [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 + + return ret +} + +__docker_complete_log_drivers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + drivers=(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog) + _describe -t log-drivers "log drivers" drivers && ret=0 + return ret +} + +__docker_complete_log_options() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (syslog-format) + syslog_format_opts=('rfc3164' 'rfc5424' 'rfc5424micro') + _describe -t syslog-format-opts "Syslog format Options" syslog_format_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + __docker_get_log_options -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_detach_keys() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + compset -P "*," + keys=(${:-{a-z}}) + ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) + _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 + _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 +} + +__docker_complete_pid() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local -a opts vopts + + opts=('host') + vopts=('container') + + if compset -P '*:'; then + case "${${words[-1]%:*}#*=}" in + (container) + __docker_complete_running_containers && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0 + _describe -t pid-opts "PID Options" opts && ret=0 + fi + + return ret +} + +__docker_complete_runtimes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + emulate -L zsh + setopt extendedglob + local -a runtimes_opts + runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}}) + _describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0 +} + +__docker_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (ancestor) + __docker_complete_images && ret=0 + ;; + (before|since) + __docker_complete_containers && ret=0 + ;; + (health) + health_opts=('healthy' 'none' 'starting' 'unhealthy') + _describe -t health-filter-opts "health filter options" health_opts && ret=0 + ;; + (id) + __docker_complete_containers_ids && ret=0 + ;; + (is-task) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + (name) + __docker_complete_containers_names && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (status) + status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing') + _describe -t status-filter-opts "status filter options" status_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('ancestor' 'before' 'exited' 'health' 'id' 'label' 'name' 'network' 'since' 'status' 'volume') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_search_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('is-automated' 'is-official' 'stars') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (is-automated|is-official) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_images_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('before' 'dangling' 'label' 'reference' 'since') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (before|reference|since) + __docker_complete_images && ret=0 + ;; + (dangling) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_events_filter() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (container) + __docker_complete_containers && ret=0 + ;; + (daemon) + emulate -L zsh + setopt extendedglob + local -a daemon_opts + daemon_opts=( + ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}} + ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:} + ) + _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0 + ;; + (event) + local -a event_opts + event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach' + 'exec_start' 'export' 'health_status' 'import' 'kill' 'load' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'save' 'start' + 'stop' 'tag' 'top' 'unmount' 'unpause' 'untag' 'update') + _describe -t event-filter-opts "event filter options" event_opts && ret=0 + ;; + (image) + __docker_complete_images && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (type) + local -a type_opts + type_opts=('container' 'daemon' 'image' 'network' 'volume') + _describe -t type-filter-opts "type filter options" type_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_prune_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('until') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +# BO container + +__docker_container_commands() { + local -a _docker_container_subcommands + _docker_container_subcommands=( + "attach:Attach to a running container" + "commit:Create a new image from a container's changes" + "cp:Copy files/folders between a container and the local filesystem" + "create:Create a new container" + "diff:Inspect changes on a container's filesystem" + "exec:Run a command in a running container" + "export:Export a container's filesystem as a tar archive" + "inspect:Display detailed information on one or more containers" + "kill:Kill one or more running containers" + "logs:Fetch the logs of a container" + "ls:List containers" + "pause:Pause all processes within one or more containers" + "port:List port mappings or a specific mapping for the container" + "prune:Remove all stopped containers" + "rename:Rename a container" + "restart:Restart one or more containers" + "rm:Remove one or more containers" + "run:Run a command in a new container" + "start:Start one or more stopped containers" + "stats:Display a live stream of container(s) resource usage statistics" + "stop:Stop one or more running containers" + "top:Display the running processes of a container" + "unpause:Unpause all processes within one or more containers" + "update:Update configuration of one or more containers" + "wait:Block until one or more containers stop, then print their exit codes" + ) + _describe -t docker-container-commands "docker container command" _docker_container_subcommands +} + +__docker_container_subcommand() { + local -a _command_args opts_help opts_attach_exec_run_start opts_create_run opts_create_run_update + local expl help="--help" + integer ret=1 + + opts_attach_exec_run_start=( + "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" + ) + opts_create_run=( + "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" + "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " + "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " + "($help)*--cap-add=[Add Linux capabilities]:capability: " + "($help)*--cap-drop=[Drop Linux capabilities]:capability: " + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " + "($help)--cidfile=[Write the container ID to the file]:CID file:_files" + "($help)--cpus=[Number of CPUs (default 0.000)]:cpus: " + "($help)*--device=[Add a host device to the container]:device:_files" + "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " + "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " + "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " + "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " + "($help)--disable-content-trust[Skip image verification]" + "($help)*--dns=[Custom DNS servers]:DNS server: " + "($help)*--dns-option=[Custom DNS options]:DNS option: " + "($help)*--dns-search=[Custom DNS search domains]:DNS domains: " + "($help)*"{-e=,--env=}"[Environment variables]:environment variable: " + "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" + "($help)*--expose=[Expose a port from the container without publishing it]: " + "($help)*--group=[Set one or more supplementary user groups for the container]:group:_groups" + "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" + "($help)--init[Run an init inside the container that forwards signals and reaps processes]" + "($help)--ip=[Container IPv4 address]:IPv4: " + "($help)--ip6=[Container IPv6 address]:IPv6: " + "($help)--ipc=[IPC namespace to use]:IPC namespace: " + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" + "($help)*--link=[Add link to another container]:link:->link" + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " + "($help)*"{-l=,--label=}"[Container metadata]:label: " + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_complete_log_options" + "($help)--mac-address=[Container MAC address]:MAC address: " + "($help)--name=[Container name]:name: " + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" + "($help)*--network-alias=[Add network-scoped alias for the container]:alias: " + "($help)--oom-kill-disable[Disable OOM Killer]" + "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" + "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]" + "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" + "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" + "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid" + "($help)--privileged[Give extended privileges to this container]" + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)*--security-opt=[Security options]:security option: " + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " + "($help)--stop-timeout=[Timeout (in seconds) to stop a container]:time: " + "($help)*--sysctl=-[sysctl options]:sysctl: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)*--ulimit=[ulimit options]:ulimit: " + "($help)--userns=[Container user namespace]:user namespace:(host)" + "($help)--tmpfs[mount tmpfs]" + "($help)*-v[Bind mount a volume]:volume: " + "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" + "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + opts_create_run_update=( + "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " + "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: " + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " + "($help)--memory-reservation=[Memory soft limit]:Memory limit: " + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " + "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" + ) + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help)--no-stdin[Do not attach stdin]" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help -):containers:__docker_complete_running_containers" && ret=0 + ;; + (commit) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --author)"{-a=,--author=}"[Author]:author: " \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ + "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ + "($help -):container:__docker_complete_containers" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (cp) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \ + "($help -)1:container:->container" \ + "($help -)2:hostpath:_files" && ret=0 + case $state in + (container) + if compset -P "*:"; then + _files && ret=0 + else + __docker_complete_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (diff) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (exec) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)*"{-e=,--env=}"[Set environment variables]:environment variable: " \ + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ + "($help)--privileged[Give extended Linux capabilities to the command]" \ + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ + "($help -):containers:__docker_complete_running_containers" \ + "($help -)*::command:->anycommand" && ret=0 + case $state in + (anycommand) + shift 1 words + (( CURRENT-- )) + _normal && ret=0 + ;; + esac + ;; + (export) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (kill) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--details[Show extra details provided to logs]" \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers]" \ + "($help)--before=[Show only container created before...]:containers:__docker_complete_containers" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \ + "($help)--format=[Pretty-print containers using a Go template]:template: " \ + "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ + "($help -n --last)"{-n=,--last=}"[Show n last created containers (includes all states)]:n:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help)--since=[Show only containers created since...]:containers:__docker_complete_containers" && ret=0 + ;; + (pause|unpause) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (port) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)2:port:_ports" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rename) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):old name:__docker_complete_containers" \ + "($help -):new name: " && ret=0 + ;; + (restart) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_containers_ids" && ret=0 + ;; + (rm) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ + "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ + "($help -)*:containers:->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then + __docker_complete_containers && ret=0 + else + __docker_complete_stopped_containers && ret=0 + fi + ;; + esac + ;; + (run) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)--health-cmd=[Command to run to check health]:command: " \ + "($help)--health-interval=[Time between running the check]:time: " \ + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \ + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \ + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \ + "($help)--rm[Remove intermediate containers when it exits]" \ + "($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help)--stop-signal=[Signal to kill a container]:signal:_signals" \ + "($help)--storage-opt=[Storage driver options for the container]:storage options:->storage-opt" \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + (storage-opt) + if compset -P "*="; then + _message "value" && ret=0 + else + opts=('size') + _describe -t filter-opts "storage options" opts -qS "=" && ret=0 + fi + ;; + esac + ;; + (start) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ + "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ + "($help -)*:containers:__docker_complete_stopped_containers" && ret=0 + ;; + (stats) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-stream[Disable streaming stats and only pull the first result]" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (stop) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (top) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)*:: :->ps-arguments" && ret=0 + case $state in + (ps-arguments) + _ps && ret=0 + ;; + esac + ;; + (update) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + opts_create_run_update \ + "($help -)*: :->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then + __docker_complete_stopped_containers && ret=0 + else + __docker_complete_containers && ret=0 + fi + ;; + esac + ;; + (wait) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO container + +# BO image + +__docker_image_commands() { + local -a _docker_image_subcommands + _docker_image_subcommands=( + "build:Build an image from a Dockerfile" + "history:Show the history of an image" + "import:Import the contents from a tarball to create a filesystem image" + "inspect:Display detailed information on one or more images" + "load:Load an image from a tar archive or STDIN" + "ls:List images" + "prune:Remove unused images" + "pull:Pull an image or a repository from a registry" + "push:Push an image or a repository to a registry" + "rm:Remove one or more images" + "save:Save one or more images to a tar archive (streamed to STDOUT by default)" + "tag:Tag an image into a repository" + ) + _describe -t docker-image-commands "docker image command" _docker_image_subcommands +} + +__docker_image_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (build) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--build-arg=[Build-time variables]:=: " \ + "($help)*--cache-from=[Images to consider as cache sources]: :__docker_complete_repositories_with_tags" \ + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \ + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \ + "($help)--compress[Compress the build context using gzip]" \ + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " \ + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " \ + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " \ + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " \ + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " \ + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ + "($help)--force-rm[Always remove intermediate containers]" \ + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" \ + "($help)*--label=[Set metadata for an image]:label=value: " \ + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \ + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \ + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" \ + "($help)--no-cache[Do not use cache when building the image]" \ + "($help)--pull[Attempt to pull a newer version of the image]" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ + "($help)--rm[Remove intermediate containers after a successful build]" \ + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " \ + "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_complete_repositories_with_tags" \ + "($help)*--ulimit=[ulimit options]:ulimit: " \ + "($help)--userns=[Container user namespace]:user namespace:(host)" \ + "($help -):path or URL:_directories" && ret=0 + ;; + (history) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (import) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \ + "($help -):URL:(- http:// file://)" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:images:__docker_complete_images" && ret=0 + ;; + (load) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0 + ;; + (ls|list) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all images]" \ + "($help)--digests[Show digests]" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -): :__docker_complete_repositories" && ret=0 + case $state in + (filter-options) + __docker_complete_images_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused images, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (pull) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -):name:__docker_search" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image signing]" \ + "($help -): :__docker_complete_images" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help)--no-prune[Do not delete untagged parents]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (save) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (tag) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):source:__docker_complete_images"\ + "($help -):destination:__docker_complete_repositories_with_tags" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO image + +# BO network + +__docker_network_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (driver) + __docker_complete_info_plugins Network && ret=0 + ;; + (id) + __docker_complete_networks_ids && ret=0 + ;; + (name) + __docker_complete_networks_names && ret=0 + ;; + (type) + type_opts=('builtin' 'custom') + _describe -t type-filter-opts "Type Filter Options" type_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('driver' 'id' 'label' 'name' 'type') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_get_networks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines networks + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options network ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Network ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + _describe -t networks-list "networks" networks "$@" && ret=0 + return ret +} + +__docker_complete_networks() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks all "$@" +} + +__docker_complete_networks_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks ids "$@" +} + +__docker_complete_networks_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks names "$@" +} + +__docker_network_commands() { + local -a _docker_network_subcommands + _docker_network_subcommands=( + "connect:Connect a container to a network" + "create:Creates a new network with a name specified by the user" + "disconnect:Disconnects a container from a network" + "inspect:Displays detailed information on a network" + "ls:Lists all the networks created by the user" + "prune:Remove all unused networks" + "rm:Deletes one or more networks" + ) + _describe -t docker-network-commands "docker network command" _docker_network_subcommands +} + +__docker_network_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (connect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ + "($help)--ip=[Container IPv4 address]:IPv4: " \ + "($help)--ip6=[Container IPv6 address]:IPv6: " \ + "($help)*--link=[Add a link to another container]:link:->link" \ + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--attachable[Enable manual container attachment]" \ + "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \ + "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ + "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \ + "($help)--internal[Restricts external access to the network]" \ + "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ + "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ + "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help)*--label=[Set metadata on a network]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \ + "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ + "($help -)1:Network Name: " && ret=0 + ;; + (disconnect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--no-trunc[Do not truncate the output]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print networks using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 + case $state in + (filter-options) + __docker_network_complete_ls_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO network + +# BO node + +__docker_node_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_nodes_ids && ret=0 + ;; + (membership) + membership_opts=('accepted' 'pending' 'rejected') + _describe -t membership-opts "membership options" membership_opts && ret=0 + ;; + (name) + __docker_complete_nodes_names && ret=0 + ;; + (role) + role_opts=('manager' 'worker') + _describe -t role-opts "role options" role_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'membership' 'name' 'role') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_node_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_nodes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines nodes args + + type=$1; shift + filter=$1; shift + [[ $filter != "none" ]] && args=("-f $filter") + + lines=(${(f)${:-"$(_call_program commands docker $docker_options node ls $args)"$'\n'}}) + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Node ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + nodes=($nodes $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + nodes=($nodes $s) + done + fi + + _describe -t nodes-list "nodes" nodes "$@" && ret=0 + return ret +} + +__docker_complete_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all none "$@" +} + +__docker_complete_nodes_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes ids none "$@" +} + +__docker_complete_nodes_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes names none "$@" +} + +__docker_complete_pending_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "membership=pending" "$@" +} + +__docker_complete_manager_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=manager" "$@" +} + +__docker_complete_worker_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=worker" "$@" +} + +__docker_node_commands() { + local -a _docker_node_subcommands + _docker_node_subcommands=( + "demote:Demote a node as manager in the swarm" + "inspect:Display detailed information on one or more nodes" + "ls:List nodes in the swarm" + "promote:Promote a node as manager in the swarm" + "rm:Remove one or more nodes from the swarm" + "ps:List tasks running on one or more nodes, defaults to current node" + "update:Update a node" + ) + _describe -t docker-node-commands "docker node command" _docker_node_subcommands +} + +__docker_node_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force remove a node from the swarm]" \ + "($help -)*:node:__docker_complete_pending_nodes" && ret=0 + ;; + (demote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_manager_nodes" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + case $state in + (filter-options) + __docker_node_complete_ls_filters && ret=0 + ;; + esac + ;; + (promote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_worker_nodes" && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all instances]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + case $state in + (filter-options) + __docker_node_complete_ps_filters && ret=0 + ;; + esac + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--availability=[Availability of the node]:availability:(active pause drain)" \ + "($help)*--label-add=[Add or update a node label]:key=value: " \ + "($help)*--label-rm=[Remove a node label if exists]:label: " \ + "($help)--role=[Role of the node]:role:(manager worker)" \ + "($help -)1:node:__docker_complete_nodes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0 + ;; + esac + + return ret +} + +# EO node + +# BO plugin + +__docker_complete_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines plugins + + lines=(${(f)${:-"$(_call_program commands docker $docker_options plugin ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Name + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}" + plugins=($plugins $s) + done + + _describe -t plugins-list "plugins" plugins "$@" && ret=0 + return ret +} + +__docker_plugin_commands() { + local -a _docker_plugin_subcommands + _docker_plugin_subcommands=( + "disable:Disable a plugin" + "enable:Enable a plugin" + "inspect:Return low-level information about a plugin" + "install:Install a plugin" + "ls:List plugins" + "push:Push a plugin" + "rm:Remove a plugin" + "set:Change settings for a plugin" + ) + _describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands +} + +__docker_plugin_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (disable|enable|inspect|ls|push|rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (install) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--alias=[Local name for plugin]:alias: " \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (set) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help-)*:key=value: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0 + ;; + esac + + return ret +} + +# EO plugin + +# BO secret + +__docker_secrets() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines secrets + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options secret ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + secrets=($secrets $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + secrets=($secrets $s) + done + fi + + _describe -t secrets-list "secrets" secrets "$@" && ret=0 + return ret +} + +__docker_complete_secrets() { + [[ $PREFIX = -* ]] && return 1 + __docker_secrets all "$@" +} + +__docker_secret_commands() { + local -a _docker_secret_subcommands + _docker_secret_subcommands=( + "create:Create a secret using stdin as content" + "inspect:Display detailed information on one or more secrets" + "ls:List secrets" + "rm:Remove one or more secrets" + ) + _describe -t docker-secret-commands "docker secret command" _docker_secret_subcommands +} + +__docker_secret_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-l=,--label=}"[Secret labels]:label: " \ + "($help -):secret: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_secret_commands" && ret=0 + ;; + esac + + return ret +} + +# EO secret + +# BO service + +__docker_service_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_services_ids && ret=0 + ;; + (name) + __docker_complete_services_names && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_service_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_services() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines services + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options service ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + _describe -t services-list "services" services "$@" && ret=0 + return ret +} + +__docker_complete_services() { + [[ $PREFIX = -* ]] && return 1 + __docker_services all "$@" +} + +__docker_complete_services_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_services ids "$@" +} + +__docker_complete_services_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_services names "$@" +} + +__docker_service_commands() { + local -a _docker_service_subcommands + _docker_service_subcommands=( + "create:Create a new service" + "inspect:Display detailed information on one or more services" + "ls:List services" + "rm:Remove one or more services" + "scale:Scale one or multiple replicated services" + "ps:List the tasks of a service" + "update:Update a service" + ) + _describe -t docker-service-commands "docker service command" _docker_service_subcommands +} + +__docker_service_subcommand() { + local -a _command_args opts_help opts_create_update + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + opts_create_update=( + "($help)*--constraint=[Placement constraints]:constraint: " + "($help)--endpoint-mode=[Placement constraints]:mode:(dnsrr vip)" + "($help)*"{-e=,--env=}"[Set environment variables]:env: " + "($help)--health-cmd=[Command to run to check health]:command: " + "($help)--health-interval=[Time between running the check]:time: " + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " + "($help)--hostname=[Service container hostname]:hostname: " \ + "($help)*--label=[Service labels]:label: " + "($help)--limit-cpu=[Limit CPUs]:value: " + "($help)--limit-memory=[Limit Memory]:value: " + "($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options" + "($help)*--mount=[Attach a filesystem mount to the service]:mount: " + "($help)*--network=[Network attachments]:network: " + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" + "($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: " + "($help)--replicas=[Number of tasks]:replicas: " + "($help)--reserve-cpu=[Reserve CPUs]:value: " + "($help)--reserve-memory=[Reserve Memory]:value: " + "($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)" + "($help)--restart-delay=[Delay between restart attempts]:delay: " + "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: " + "($help)--restart-window=[Window used to evaluate the restart policy]:window: " + "($help)*--secret=[Specify secrets to expose to the service]:secret:__docker_complete_secrets" + "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]" + "($help)--update-delay=[Delay between updates]:delay: " + "($help)--update-failure-action=[Action on update failure]:mode:(pause continue)" + "($help)--update-max-failure-ratio=[Failure rate to tolerate during an update]:fraction: " + "($help)--update-monitor=[Duration after each task update to monitor for failure]:window: " + "($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: " + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)--with-registry-auth[Send registry authentication details to swarm agents]" + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)*--container-label=[Container labels]:label: " \ + "($help)*--dns=[Set custom DNS servers]:DNS: " \ + "($help)*--dns-option=[Set DNS options]:DNS option: " \ + "($help)*--dns-search=[Set custom DNS search domains]:DNS search: " \ + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \ + "($help)--mode=[Service Mode]:mode:(global replicated)" \ + "($help)--name=[Service name]:name: " \ + "($help)*--publish=[Publish a port]:port: " \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:->filter-options" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + case $state in + (filter-options) + __docker_service_complete_ls_filters && ret=0 + ;; + esac + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (scale) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:->values" && ret=0 + case $state in + (values) + if compset -P '*='; then + _message 'replicas' && ret=0 + else + __docker_complete_services -qS "=" + fi + ;; + esac + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + case $state in + (filter-options) + __docker_service_complete_ps_filters && ret=0 + ;; + esac + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)--arg=[Service command args]:arguments: _normal" \ + "($help)*--container-label-add=[Add or update container labels]:label: " \ + "($help)*--container-label-rm=[Remove a container label by its key]:label: " \ + "($help)*--dns-add=[Add or update custom DNS servers]:DNS: " \ + "($help)*--dns-rm=[Remove custom DNS servers]:DNS: " \ + "($help)*--dns-option-add=[Add or update DNS options]:DNS option: " \ + "($help)*--dns-option-rm=[Remove DNS options]:DNS option: " \ + "($help)*--dns-search-add=[Add or update custom DNS search domains]:DNS search: " \ + "($help)*--dns-search-rm=[Remove DNS search domains]:DNS search: " \ + "($help)--force[Force update]" \ + "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \ + "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \ + "($help)--image=[Service image tag]:image:__docker_complete_repositories" \ + "($help)*--publish-add=[Add or update a port]:port: " \ + "($help)*--publish-rm=[Remove a port(target-port mandatory)]:port: " \ + "($help)--rollback[Rollback to previous specification]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0 + ;; + esac + + return ret +} + +# EO service + +# BO stack + +__docker_stack_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stack_complete_services_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stacks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines stacks + + lines=(${(f)${:-"$(_call_program commands docker $docker_options stack ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + stacks=($stacks $s) + done + + _describe -t stacks-list "stacks" stacks "$@" && ret=0 + return ret +} + +__docker_complete_stacks() { + [[ $PREFIX = -* ]] && return 1 + __docker_stacks "$@" +} + +__docker_stack_commands() { + local -a _docker_stack_subcommands + _docker_stack_subcommands=( + "deploy:Deploy a new stack or update an existing stack" + "ls:List stacks" + "ps:List the tasks in the stack" + "rm:Remove the stack" + "services:List the services in the stack" + ) + _describe -t docker-stack-commands "docker stack command" _docker_stack_subcommands +} + +__docker_stack_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (deploy|up) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--bundle-file=[Path to a Distributed Application Bundle file]:dab:_files -g \"*.dab\"" \ + "($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file]:compose file:_files -g \"*.(yml|yaml)\"" \ + "($help)--with-registry-auth[Send registry authentication details to Swarm agents]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all tasks]" \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_ps_filters" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (rm|remove|down) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (services) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_services_filters" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_stack_commands" && ret=0 + ;; + esac + + return ret +} + +# EO stack + +# BO swarm + +__docker_swarm_commands() { + local -a _docker_swarm_subcommands + _docker_swarm_subcommands=( + "init:Initialize a swarm" + "join:Join a swarm as a node and/or manager" + "join-token:Manage join tokens" + "leave:Leave a swarm" + "update:Update the swarm" + ) + _describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands +} + +__docker_swarm_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (init) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--advertise-addr[Advertised address]:ip\:port: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--force-new-cluster[Force create a new cluster from current state]" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (join) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--token=[Token for entry into the swarm]:secret: " \ + "($help -):host\:port: " && ret=0 + ;; + (join-token) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate join token]" \ + "($help -):role:(manager worker)" && ret=0 + ;; + (leave) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force this node to leave the swarm, ignoring warnings]" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO swarm + +# BO system + +__docker_system_commands() { + local -a _docker_system_subcommands + _docker_system_subcommands=( + "df:Show docker filesystem usage" + "events:Get real time events from the server" + "info:Display system-wide information" + "prune:Remove unused data" + ) + _describe -t docker-system-commands "docker system command" _docker_system_subcommands +} + +__docker_system_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (df) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -v --verbose)"{-v,--verbose}"[Show detailed information on space usage]" && ret=0 + ;; + (events) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \ + "($help)--since=[Events created since this timestamp]:timestamp: " \ + "($help)--until=[Events created until this timestamp]:timestamp: " \ + "($help)--format=[Format the output using the given go template]:template: " && ret=0 + ;; + (info) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused data, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO system + +# BO volume + +__docker_volume_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (dangling) + dangling_opts=('true' 'false') + _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0 + ;; + (driver) + __docker_complete_info_plugins Volume && ret=0 + ;; + (name) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('dangling' 'driver' 'label' 'name') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_volumes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a lines volumes + + lines=(${(f)${:-"$(_call_program commands docker $docker_options volume ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Names + local line s + for line in $lines; do + s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + volumes=($volumes $s) + done + + _describe -t volumes-list "volumes" volumes && ret=0 + return ret +} + +__docker_volume_commands() { + local -a _docker_volume_subcommands + _docker_volume_subcommands=( + "create:Create a volume" + "inspect:Display detailed information on one or more volumes" + "ls:List volumes" + "prune:Remove all unused volumes" + "rm:Remove one or more volumes" + ) + _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands +} + +__docker_volume_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \ + "($help)*--label=[Set metadata for a volume]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " \ + "($help -)1:Volume name: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)1:volume:__docker_complete_volumes" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print volumes using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 + case $state in + (filter-options) + __docker_volume_complete_ls_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \ + "($help -):volume:__docker_complete_volumes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO volume + +__docker_caching_policy() { + oldp=( "$1"(Nmh+1) ) # 1 hour + (( $#oldp )) +} + +__docker_commands() { + local cache_policy + + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ + && ! _retrieve_cache docker_subcommands; + then + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${(M)${lines[$((${lines[(i)*Commands:]} + 1)),-1]}:# *}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') + (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands + fi + _describe -t docker-commands "docker command" _docker_subcommands +} + +__docker_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach|commit|cp|create|diff|exec|export|kill|logs|pause|unpause|port|rename|restart|rm|run|start|stats|stop|top|update|wait) + __docker_container_subcommand && ret=0 + ;; + (build|history|import|load|pull|push|save|tag) + __docker_image_subcommand && ret=0 + ;; + (container) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_container_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_container_subcommand && ret=0 + ;; + esac + ;; + (daemon) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \ + "($help)--api-cors-header=[CORS headers in the Engine API]:CORS headers: " \ + "($help)*--authorization-plugin=[Authorization plugins to load]" \ + "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ + "($help)--bip=[Network bridge IP]:IP address: " \ + "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \ + "($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \ + "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ + "($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \ + "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \ + "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ + "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ + "($help)*--default-ulimit=[Default ulimits for containers]:ulimit: " \ + "($help)--disable-legacy-registry[Disable contacting legacy registries]" \ + "($help)*--dns=[DNS server to use]:DNS: " \ + "($help)*--dns-opt=[DNS options to use]:DNS option: " \ + "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ + "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \ + "($help)--exec-root=[Root directory for execution state files]:path:_directories" \ + "($help)--experimental[Enable experimental features]" \ + "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ + "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ + "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ + "($help -g --graph)"{-g=,--graph=}"[Root of the Docker runtime]:path:_directories" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help)--icc[Enable inter-container communication]" \ + "($help)--init[Run an init inside containers to forward signals and reap processes]" \ + "($help)--init-path=[Path to the docker-init binary]:docker-init binary:_files" \ + "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ + "($help)--ip=[Default IP when binding container ports]" \ + "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ + "($help)--ip-masq[Enable IP masquerading]" \ + "($help)--iptables[Enable addition of iptables rules]" \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)*--label=[Key=value labels]:label: " \ + "($help)--live-restore[Enable live restore of docker when containers are still running]" \ + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" \ + "($help)*--log-opt=[Default log driver options for containers]:log driver options:__docker_complete_log_options" \ + "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \ + "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \ + "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \ + "($help)--oom-score-adjust=[Set the oom_score_adj for the daemon]:oom-score:(-500)" \ + "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ + "($help)--raw-logs[Full timestamps without ANSI coloring]" \ + "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ + "($help)--seccomp-profile=[Path to seccomp profile]:path:_files -g \"*.json\"" \ + "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs btrfs devicemapper overlay overlay2 vfs zfs)" \ + "($help)--selinux-enabled[Enable selinux support]" \ + "($help)--shutdown-timeout=[Set the shutdown timeout value in seconds]:time: " \ + "($help)*--storage-opt=[Storage driver options]:storage driver options: " \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0 + + case $state in + (cluster-store) + if compset -P '*://'; then + _message 'host:port' && ret=0 + else + store=('consul' 'etcd' 'zk') + _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 + fi + ;; + (cluster-store-options) + if compset -P '*='; then + _files && ret=0 + else + opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') + _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 + fi + ;; + (users-groups) + if compset -P '*:'; then + _groups && ret=0 + else + _describe -t userns-default "default Docker user management" '(default)' && ret=0 + _users && ret=0 + fi + ;; + esac + ;; + (events|info) + __docker_system_subcommand && ret=0 + ;; + (image) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_image_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_image_subcommand && ret=0 + ;; + esac + ;; + (images) + words[1]='ls' + __docker_image_subcommand && ret=0 + ;; + (inspect) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ + "($help)--type=[Return JSON for specified type]:type:(container image network node plugin service volume)" \ + "($help -)*: :->values" && ret=0 + + case $state in + (values) + if [[ ${words[(r)--type=container]} == --type=container ]]; then + __docker_complete_containers && ret=0 + elif [[ ${words[(r)--type=image]} == --type=image ]]; then + __docker_complete_images && ret=0 + elif [[ ${words[(r)--type=network]} == --type=network ]]; then + __docker_complete_networks && ret=0 + elif [[ ${words[(r)--type=node]} == --type=node ]]; then + __docker_complete_nodes && ret=0 + elif [[ ${words[(r)--type=plugin]} == --type=plugin ]]; then + __docker_complete_plugins && ret=0 + elif [[ ${words[(r)--type=service]} == --type=service ]]; then + __docker_complete_services && ret=0 + elif [[ ${words[(r)--type=volume]} == --type=volume ]]; then + __docker_complete_volumes && ret=0 + else + __docker_complete_containers + __docker_complete_images + __docker_complete_networks + __docker_complete_nodes + __docker_complete_plugins + __docker_complete_services + __docker_complete_volumes && ret=0 + fi + ;; + esac + ;; + (login) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -p --password)"{-p=,--password=}"[Password]:password: " \ + "($help -u --user)"{-u=,--user=}"[Username]:username: " \ + "($help -)1:server: " && ret=0 + ;; + (logout) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -)1:server: " && ret=0 + ;; + (network) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_network_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_network_subcommand && ret=0 + ;; + esac + ;; + (node) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_node_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_node_subcommand && ret=0 + ;; + esac + ;; + (plugin) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_plugin_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_plugin_subcommand && ret=0 + ;; + esac + ;; + (ps) + words[1]='ls' + __docker_container_subcommand && ret=0 + ;; + (rmi) + words[1]='rm' + __docker_image_subcommand && ret=0 + ;; + (search) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ + "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):term: " && ret=0 + + case $state in + (filter-options) + __docker_complete_search_filters && ret=0 + ;; + esac + ;; + (secret) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_secret_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_secret_subcommand && ret=0 + ;; + esac + ;; + (service) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_service_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_service_subcommand && ret=0 + ;; + esac + ;; + (stack) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_stack_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_stack_subcommand && ret=0 + ;; + esac + ;; + (swarm) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_swarm_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_swarm_subcommand && ret=0 + ;; + esac + ;; + (system) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_system_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_system_subcommand && ret=0 + ;; + esac + ;; + (version) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (volume) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_volume_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_volume_subcommand && ret=0 + ;; + esac + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 + ;; + esac + + return ret +} + +_docker() { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + + local curcontext="$curcontext" state line help="-h --help" + integer ret=1 + typeset -A opt_args + + _arguments $(__docker_arguments) -C \ + "(: -)"{-h,--help}"[Print usage]" \ + "($help)--config[Location of client config files]:path:_directories" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help -v --version)"{-v,--version}"[Print version information and quit]" \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + local host=${opt_args[-H]}${opt_args[--host]} + local config=${opt_args[--config]} + local docker_options="${host:+--host $host} ${config:+--config $config}" + + case $state in + (command) + __docker_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-$words[1]: + __docker_subcommand && ret=0 + ;; + esac + + return ret +} + +_dockerd() { + integer ret=1 + words[1]='daemon' + __docker_subcommand && ret=0 + return ret +} + +_docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/README.md b/vendor/github.com/docker/docker/contrib/desktop-integration/README.md new file mode 100644 index 0000000..85a01b9 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile b/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 0000000..5cacd1f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,36 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile b/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 0000000..3ddb232 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,31 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/README.md b/vendor/github.com/docker/docker/contrib/docker-device-tool/README.md new file mode 100644 index 0000000..6c54d59 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/docker-device-tool/README.md @@ -0,0 +1,14 @@ +Docker device tool for devicemapper storage driver backend +=================== + +The ./contrib/docker-device-tool contains a tool to manipulate devicemapper thin-pool. + +Compile +======== + + $ make shell + ## inside build container + $ go build contrib/docker-device-tool/device_tool.go + + # if devicemapper version is old and compilation fails, compile with `libdm_no_deferred_remove` tag + $ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go new file mode 100644 index 0000000..906d064 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,176 @@ +// +build !windows,!solaris + +package main + +import ( + "flag" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceID) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionID) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2], nil) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devicemapper.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], "") + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go new file mode 100644 index 0000000..da29a2c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go @@ -0,0 +1,4 @@ +package main + +func main() { +} diff --git a/vendor/github.com/docker/docker/contrib/dockerize-disk.sh b/vendor/github.com/docker/docker/contrib/dockerize-disk.sh new file mode 100755 index 0000000..444e243 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/dockerize-disk.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -e + +if ! command -v qemu-nbd &> /dev/null; then + echo >&2 'error: "qemu-nbd" not found!' + exit 1 +fi + +usage() { + echo "Convert disk image to docker image" + echo "" + echo "usage: $0 image-name disk-image-file [ base-image ]" + echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" + echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" +} + +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi + +CURDIR=$(pwd) + +image_name="${1%:*}" +image_tag="${1#*:}" +if [ "$image_tag" == "$1" ]; then + image_tag="latest" +fi + +disk_image_file="$2" +docker_base_image="$3" + +block_device=/dev/nbd0 + +builddir=$(mktemp -d) + +cleanup() { + umount "$builddir/disk_image" || true + umount "$builddir/workdir" || true + qemu-nbd -d $block_device &> /dev/null || true + rm -rf $builddir +} +trap cleanup EXIT + +# Mount disk image +modprobe nbd max_part=63 +qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" +mkdir "$builddir/disk_image" +mount -o ro ${block_device} "$builddir/disk_image" + +mkdir "$builddir/workdir" +mkdir "$builddir/diff" + +base_image_mounts="" + +# Unpack base image +if [ -n "$docker_base_image" ]; then + mkdir -p "$builddir/base" + docker pull "$docker_base_image" + docker save "$docker_base_image" | tar -xC "$builddir/base" + + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + while [ -n "$image_id" ]; do + mkdir -p "$builddir/base/$image_id/layer" + tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" + + base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" + image_id=$(docker inspect -f "{{.Parent}}" "$image_id") + done +fi + +# Mount work directory +mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" + +# Update files +cd $builddir +LC_ALL=C diff -rq disk_image workdir \ + | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ + | while read action entry; do + case "$action" in + ADD|UPDATE) + cp -a "disk_image$entry" "workdir$entry" + ;; + DEL) + rm -rf "workdir$entry" + ;; + *) + echo "Error: unknown diff line: $action $entry" >&2 + ;; + esac + done + +# Pack new image +new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" +mkdir -p $builddir/result/$new_image_id +cd diff +tar -cf $builddir/result/$new_image_id/layer.tar * +echo "1.0" > $builddir/result/$new_image_id/VERSION +cat > $builddir/result/$new_image_id/json <<-EOS +{ "docker_version": "1.4.1" +, "id": "$new_image_id" +, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" +EOS + +if [ -n "$docker_base_image" ]; then + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json +fi + +echo "}" >> $builddir/result/$new_image_id/json + +echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories + +cd $builddir/result + +# mkdir -p $CURDIR/$image_name +# cp -r * $CURDIR/$image_name +tar -c * | docker load diff --git a/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh b/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh new file mode 100755 index 0000000..29d7ff5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh @@ -0,0 +1,108 @@ +#!/bin/bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@image-id] ..." + echo " ie: $0 /tmp/hello-world hello-world" + echo " $0 /tmp/debian-jessie debian:jessie" + echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" + echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + tag="${imageTag#*:}" + imageId="${tag##*@}" + [ "$imageId" != "$tag" ] || imageId= + [ "$tag" != "$imageTag" ] || tag='latest' + tag="${tag%@*}" + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" + + if [ -z "$imageId" ]; then + imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" + imageId="${imageId//\"/}" + fi + + ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" + if [ "${ancestryJson:0:1}" != '[' ]; then + echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" + echo >&2 " $ancestryJson" + exit 1 + fi + + IFS=',' + ancestry=( ${ancestryJson//[\[\] \"]/} ) + unset IFS + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." + for imageId in "${ancestry[@]}"; do + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh b/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh new file mode 100755 index 0000000..111e3fa --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh @@ -0,0 +1,121 @@ +#!/bin/bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@digest] ..." + echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + imageTag="${imageTag#*:}" + digest="${imageTag##*@}" + tag="${imageTag%%@*}" + + # add prefix library if passed official image + if [[ "$image" != *"/"* ]]; then + image="library/$image" + fi + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" + + manifestJson="$(curl -sSL -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/manifests/$digest")" + if [ "${manifestJson:0:1}" != '{' ]; then + echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" + echo >&2 " $manifestJson" + exit 1 + fi + + layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum') + + IFS=$'\n' + # bash v4 on Windows CI requires CRLF separator + if [ "$(go env GOHOSTOS)" = 'windows' ]; then + major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) + if [ "$major" -ge 4 ]; then + IFS=$'\r\n' + fi + fi + layers=( ${layersFs} ) + unset IFS + + history=$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]') + imageId=$(echo "$history" | jq --raw-output .[0] | jq --raw-output .id) + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '${image}:${tag}@${digest}' (${#layers[@]} layers)..." + for i in "${!layers[@]}"; do + imageJson=$(echo "$history" | jq --raw-output .[${i}]) + imageId=$(echo "$imageJson" | jq --raw-output .id) + imageLayer=${layers[$i]} + + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + echo "$imageJson" > "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" + curl -SL --progress -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/blobs/$imageLayer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + image="${image#library\/}" + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/docker/docker/contrib/editorconfig b/vendor/github.com/docker/docker/contrib/editorconfig new file mode 100644 index 0000000..97eda89 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space diff --git a/vendor/github.com/docker/docker/contrib/gitdm/aliases b/vendor/github.com/docker/docker/contrib/gitdm/aliases new file mode 100644 index 0000000..dd5dd34 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/aliases @@ -0,0 +1,148 @@ +Danny.Yates@mailonline.co.uk danny@codeaholics.org +KenCochrane@gmail.com kencochrane@gmail.com +LÉVEIL thomasleveil@gmail.com +Vincent.Bernat@exoscale.ch bernat@luffy.cx +acidburn@docker.com jess@docker.com +admin@jtlebi.fr jt@yadutaf.fr +ahmetalpbalkan@gmail.com ahmetb@microsoft.com +aj@gandi.net aj@gandi.net +albers@users.noreply.github.com github@albersweb.de +alexander.larsson@gmail.com alexl@redhat.com +amurdaca@redhat.com antonio.murdaca@gmail.com +amy@gandi.net aj@gandi.net +andrew.weiss@microsoft.com andrew.weiss@outlook.com +angt@users.noreply.github.com adrien@gallouet.fr +ankushagarwal@users.noreply.github.com ankushagarwal11@gmail.com +anonymouse2048@gmail.com lheckemann@twig-world.com +anusha@docker.com anusha.ragunathan@docker.com +asarai@suse.com asarai@suse.de +avi.miller@gmail.com avi.miller@oracle.com +bernat@luffy.cx Vincent.Bernat@exoscale.ch +bgoff@cpuguy83-mbp.home cpuguy83@gmail.com +brandon@ifup.co brandon@ifup.org +brent@docker.com brent.salisbury@docker.com +charmes.guillaume@gmail.com guillaume.charmes@docker.com +chenchun.feed@gmail.com ramichen@tencent.com +chooper@plumata.com charles.hooper@dotcloud.com +crosby.michael@gmail.com michael@docker.com +crosbymichael@gmail.com michael@docker.com +cyphar@cyphar.com asarai@suse.de +daehyeok@daehyeok-ui-MacBook-Air.local daehyeok@gmail.com +daehyeok@daehyeokui-MacBook-Air.local daehyeok@gmail.com +daniel.norberg@gmail.com dano@spotify.com +daniel@dotcloud.com daniel.mizyrycki@dotcloud.com +darren@rancher.com darren.s.shepherd@gmail.com +dave@dtucker.co.uk dt@docker.com +dev@vvieux.com victor.vieux@docker.com +dgasienica@zynga.com daniel@gasienica.ch +dnephin@gmail.com dnephin@docker.com +dominikh@fork-bomb.org dominik@honnef.co +dqminh89@gmail.com dqminh@cloudflare.com +dsxiao@dataman-inc.com dxiao@redhat.com +duglin@users.noreply.github.com dug@us.ibm.com +eric.hanchrow@gmail.com ehanchrow@ine.com +erik+github@hollensbe.org github@hollensbe.org +estesp@gmail.com estesp@linux.vnet.ibm.com +ewindisch@docker.com eric@windisch.us +f.joffrey@gmail.com joffrey@docker.com +fkautz@alumni.cmu.edu fkautz@redhat.com +frank.rosquin@gmail.com frank.rosquin+github@gmail.com +gh@mattyw.net mattyw@me.com +git@julienbordellier.com julienbordellier@gmail.com +github@metaliveblog.com github@developersupport.net +github@srid.name sridharr@activestate.com +guillaume.charmes@dotcloud.com guillaume.charmes@docker.com +guillaume@charmes.net guillaume.charmes@docker.com +guillaume@docker.com guillaume.charmes@docker.com +guillaume@dotcloud.com guillaume.charmes@docker.com +haoshuwei24@gmail.com haosw@cn.ibm.com +hollie.teal@docker.com hollie@docker.com +hollietealok@users.noreply.github.com hollie@docker.com +hsinko@users.noreply.github.com 21551195@zju.edu.cn +iamironbob@gmail.com altsysrq@gmail.com +icecrime@gmail.com arnaud.porterie@docker.com +jatzen@gmail.com jacob@jacobatzen.dk +jeff@allingeek.com jeff.nickoloff@gmail.com +jefferya@programmerq.net jeff@docker.com +jerome.petazzoni@dotcloud.com jerome.petazzoni@dotcloud.com +jfrazelle@users.noreply.github.com jess@docker.com +jhoward@microsoft.com John.Howard@microsoft.com +jlhawn@berkeley.edu josh.hawn@docker.com +joffrey@dotcloud.com joffrey@docker.com +john.howard@microsoft.com John.Howard@microsoft.com +jp@enix.org jerome.petazzoni@dotcloud.com +justin.cormack@unikernel.com justin.cormack@docker.com +justin.simonelis@PTS-JSIMON2.toronto.exclamation.com justin.p.simonelis@gmail.com +justin@specialbusservice.com justin.cormack@docker.com +katsuta_soshi@cyberagent.co.jp soshi.katsuta@gmail.com +kuehnle@online.de git.nivoc@neverbox.com +kwk@users.noreply.github.com konrad.wilhelm.kleine@gmail.com +leijitang@gmail.com leijitang@huawei.com +liubin0329@gmail.com liubin0329@users.noreply.github.com +lk4d4math@gmail.com lk4d4@docker.com +louis@dotcloud.com kalessin@kalessin.fr +lsm5@redhat.com lsm5@fedoraproject.org +lyndaoleary@hotmail.com lyndaoleary29@gmail.com +madhu@socketplane.io madhu@docker.com +martins@noironetworks.com aanm90@gmail.com +mary@docker.com mary.anthony@docker.com +mastahyeti@users.noreply.github.com mastahyeti@gmail.com +maztaim@users.noreply.github.com taim@bosboot.org +me@runcom.ninja antonio.murdaca@gmail.com +mheon@mheonlaptop.redhat.com mheon@redhat.com +michael@crosbymichael.com michael@docker.com +mohitsoni1989@gmail.com mosoni@ebay.com +moxieandmore@gmail.com mary.anthony@docker.com +moyses.furtado@wplex.com.br moysesb@gmail.com +msabramo@gmail.com marc@marc-abramowitz.com +mzdaniel@glidelink.net daniel.mizyrycki@dotcloud.com +nathan.leclaire@gmail.com nathan.leclaire@docker.com +nathanleclaire@gmail.com nathan.leclaire@docker.com +ostezer@users.noreply.github.com ostezer@gmail.com +peter@scraperwiki.com p@pwaller.net +princess@docker.com jess@docker.com +proppy@aminche.com proppy@google.com +qhuang@10.0.2.15 h.huangqiang@huawei.com +resouer@gmail.com resouer@163.com +roberto_hashioka@hotmail.com roberto.hashioka@docker.com +root@vagrant-ubuntu-12.10.vagrantup.com daniel.mizyrycki@dotcloud.com +runcom@linux.com antonio.murdaca@gmail.com +runcom@redhat.com antonio.murdaca@gmail.com +runcom@users.noreply.github.com antonio.murdaca@gmail.com +s@docker.com solomon@docker.com +shawnlandden@gmail.com shawn@churchofgit.com +singh.gurjeet@gmail.com gurjeet@singh.im +sjoerd@byte.nl sjoerd-github@linuxonly.nl +smahajan@redhat.com shishir.mahajan@redhat.com +solomon.hykes@dotcloud.com solomon@docker.com +solomon@dotcloud.com solomon@docker.com +stefanb@us.ibm.com stefanb@linux.vnet.ibm.com +stevvooe@users.noreply.github.com stephen.day@docker.com +superbaloo+registrations.github@superbaloo.net baloo@gandi.net +tangicolin@gmail.com tangicolin@gmail.com +thaJeztah@users.noreply.github.com github@gone.nl +thatcher@dotcloud.com thatcher@docker.com +thatcher@gmx.net thatcher@docker.com +tibor@docker.com teabee89@gmail.com +tiborvass@users.noreply.github.com teabee89@gmail.com +timruffles@googlemail.com oi@truffles.me.uk +tintypemolly@Ohui-MacBook-Pro.local tintypemolly@gmail.com +tj@init.me tejesh.mehta@gmail.com +tristan.carel@gmail.com tristan@cogniteev.com +unclejack@users.noreply.github.com cristian.staretu@gmail.com +unclejacksons@gmail.com cristian.staretu@gmail.com +vbatts@hashbangbash.com vbatts@redhat.com +victor.vieux@dotcloud.com victor.vieux@docker.com +victor@docker.com victor.vieux@docker.com +victor@dotcloud.com victor.vieux@docker.com +victorvieux@gmail.com victor.vieux@docker.com +vieux@docker.com victor.vieux@docker.com +vincent+github@demeester.fr vincent@sbr.pm +vincent@bernat.im bernat@luffy.cx +vojnovski@gmail.com viktor.vojnovski@amadeus.com +whoshuu@gmail.com huu@prismskylabs.com +xiaods@gmail.com dxiao@redhat.com +xlgao@zju.edu.cn xlgao@zju.edu.cn +yestin.sun@polyera.com sunyi0804@gmail.com +yuchangchun1@huawei.com yuchangchun1@huawei.com +zjaffee@us.ibm.com zij@case.edu diff --git a/vendor/github.com/docker/docker/contrib/gitdm/domain-map b/vendor/github.com/docker/docker/contrib/gitdm/domain-map new file mode 100644 index 0000000..1f1849e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/domain-map @@ -0,0 +1,39 @@ +# +# Docker +# + +docker.com Docker +dotcloud.com Docker + +aluzzardi@gmail.com Docker +cpuguy83@gmail.com Docker +derek@mcgstyle.net Docker +github@gone.nl Docker +kencochrane@gmail.com Docker +mickael.laventure@gmail.com Docker +sam.alba@gmail.com Docker +svendowideit@fosiki.com Docker +svendowideit@home.org.au Docker +tonistiigi@gmail.com Docker + +cristian.staretu@gmail.com Docker < 2015-01-01 +cristian.staretu@gmail.com Cisco + +github@hollensbe.org Docker < 2015-01-01 +github@hollensbe.org Cisco + +david.calavera@gmail.com Docker < 2016-04-01 +david.calavera@gmail.com Netlify + +# +# Others +# + +cisco.com Cisco +google.com Google +ibm.com IBM +huawei.com Huawei +microsoft.com Microsoft + +redhat.com Red Hat +mrunalp@gmail.com Red Hat diff --git a/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh b/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh new file mode 100755 index 0000000..dd6a564 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# +# This script generates a gitdm compatible email aliases file from a git +# formatted .mailmap file. +# +# Usage: +# $> ./generate_aliases > aliases +# + +cat $1 | \ + grep -v '^#' | \ + sed 's/^[^<]*<\([^>]*\)>/\1/' | \ + grep '<.*>' | sed -e 's/[<>]/ /g' | \ + awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \ + sort | uniq diff --git a/vendor/github.com/docker/docker/contrib/gitdm/gitdm.config b/vendor/github.com/docker/docker/contrib/gitdm/gitdm.config new file mode 100644 index 0000000..d9b62b0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/gitdm.config @@ -0,0 +1,17 @@ +# +# EmailAliases lets us cope with developers who use more +# than one address. +# +EmailAliases aliases + +# +# EmailMap does the main work of mapping addresses onto +# employers. +# +EmailMap domain-map + +# +# Use GroupMap to map a file full of addresses to the +# same employer +# +# GroupMap company-Docker Docker diff --git a/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile new file mode 100644 index 0000000..747dc91 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris new file mode 100644 index 0000000..3d0d691 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris @@ -0,0 +1,4 @@ +FROM solaris +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/docker/docker/contrib/httpserver/server.go b/vendor/github.com/docker/docker/contrib/httpserver/server.go new file mode 100644 index 0000000..a75d5ab --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/httpserver/server.go @@ -0,0 +1,12 @@ +package main + +import ( + "log" + "net/http" +) + +func main() { + fs := http.FileServer(http.Dir("/static")) + http.Handle("/", fs) + log.Panic(http.ListenAndServe(":80", nil)) +} diff --git a/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd b/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd new file mode 100644 index 0000000..2444031 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd @@ -0,0 +1,13 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +#DOCKER_LOGFILE="/var/log/docker.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKERD_BINARY="/usr/bin/dockerd" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd b/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd new file mode 100644 index 0000000..5d31603 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd @@ -0,0 +1,22 @@ +#!/sbin/openrc-run +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +command="${DOCKERD_BINARY:-/usr/bin/dockerd}" +pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" +command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" +DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" +start_stop_daemon_args="--background \ + --stderr \"${DOCKER_LOGFILE}\" --stdout \"${DOCKER_LOGFILE}\"" + +start_pre() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + ulimit -u unlimited + + return 0 +} diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS b/vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS new file mode 100644 index 0000000..b9ba55b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS @@ -0,0 +1,3 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service new file mode 100644 index 0000000..8bfed93 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service @@ -0,0 +1,29 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target docker.socket firewalld.service +Requires=docker.socket + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd -H fd:// +ExecReload=/bin/kill -s HUP $MAINPID +LimitNOFILE=1048576 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm new file mode 100644 index 0000000..6e41892 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm @@ -0,0 +1,28 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target firewalld.service + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.socket b/vendor/github.com/docker/docker/contrib/init/systemd/docker.socket new file mode 100644 index 0000000..7dd9509 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker new file mode 100755 index 0000000..4f9d38d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker @@ -0,0 +1,152 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=docker + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKERD=/usr/bin/dockerd +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# Check docker is present +if [ ! -x $DOCKERD ]; then + log_failure_msg "$DOCKERD not present or not executable" + exit 1 +fi + +check_init() { + # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) + if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 + fi +} + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + check_init + + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + if [ "$BASH" ]; then + ulimit -u unlimited + else + ulimit -p unlimited + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKERD" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + check_init + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 + log_end_msg $? + ;; + + restart) + check_init + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + check_init + fail_unless_root + $0 restart + ;; + + status) + check_init + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC" + ;; + + *) + echo "Usage: service docker {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 0000000..c4e9319 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,20 @@ +# Docker Upstart and SysVinit configuration file + +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/admin/systemd/ +# + +# Customize location of Docker binary (especially for development testing). +#DOCKERD="/usr/local/bin/dockerd" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker new file mode 100755 index 0000000..df9b02a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,153 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +unshare=/usr/bin/unshare +exec="/usr/bin/dockerd" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + if [ ! -x $exec ]; then + if [ ! -e $exec ]; then + echo "Docker executable $exec not found" + else + echo "You do not have permission to execute the Docker executable $exec" + fi + exit 5 + fi + + check_for_cleanup + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + "$unshare" -m -- $exec $other_args >> $logfile 2>&1 & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + echo -n '.' + done + if [ ! -f $pidfile ]; then + failure + echo + exit 1 + fi + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +check_for_cleanup() { + if [ -f ${pidfile} ]; then + /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} + fi +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 0000000..0864b3d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker daemon + +other_args="" diff --git a/vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS b/vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS new file mode 100644 index 0000000..03ee2dd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/init/upstart/docker.conf b/vendor/github.com/docker/docker/contrib/init/upstart/docker.conf new file mode 100644 index 0000000..d58f7d6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/upstart/docker.conf @@ -0,0 +1,72 @@ +description "Docker daemon" + +start on (filesystem and net-device-up IFACE!=lo) +stop on runlevel [!2345] + +limit nofile 524288 1048576 + +# Having non-zero limits causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +limit nproc unlimited unlimited + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKERD=/usr/bin/dockerd + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKERD" $DOCKER_OPTS --raw-logs +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + DOCKER_SOCKET= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + DOCKER_SOCKET=/var/run/docker.sock + else + DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)' | sed 1q) + fi + + if [ -n "$DOCKER_SOCKET" ]; then + while ! [ -e "$DOCKER_SOCKET" ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for $DOCKER_SOCKET" + sleep 0.1 + done + echo "$DOCKER_SOCKET is up" + fi +end script diff --git a/vendor/github.com/docker/docker/contrib/mac-install-bundle.sh b/vendor/github.com/docker/docker/contrib/mac-install-bundle.sh new file mode 100755 index 0000000..2110d04 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mac-install-bundle.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +set -e + +errexit() { + echo "$1" + exit 1 +} + +[ "$(uname -s)" == "Darwin" ] || errexit "This script can only be used on a Mac" + +[ $# -eq 1 ] || errexit "Usage: $0 install|undo" + +BUNDLE="bundles/$(cat VERSION)" +BUNDLE_PATH="$PWD/$BUNDLE" +CLIENT_PATH="$BUNDLE_PATH/cross/darwin/amd64/docker" +DATABASE="$HOME/Library/Containers/com.docker.docker/Data/database" +DATABASE_KEY="$DATABASE/com.docker.driver.amd64-linux/bundle" + +[ -d "$DATABASE" ] || errexit "Docker for Mac must be installed for this script" + +case "$1" in +"install") + [ -d "$BUNDLE" ] || errexit "cannot find bundle $BUNDLE" + [ -e "$CLIENT_PATH" ] || errexit "you need to run make cross first" + [ -e "$BUNDLE/binary-daemon/dockerd" ] || errexit "you need to build binaries first" + [ -f "$BUNDLE/binary-client/docker" ] || errexit "you need to build binaries first" + git -C "$DATABASE" reset --hard >/dev/null + echo "$BUNDLE_PATH" > "$DATABASE_KEY" + git -C "$DATABASE" add "$DATABASE_KEY" + git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" + rm -f /usr/local/bin/docker + cp "$CLIENT_PATH" /usr/local/bin + echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." + ;; +"undo") + git -C "$DATABASE" reset --hard >/dev/null + [ -f "$DATABASE_KEY" ] || errexit "bundle not set" + git -C "$DATABASE" rm "$DATABASE_KEY" + git -C "$DATABASE" commit -m "remove bundle" + rm -f /usr/local/bin/docker + ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin + echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." + ;; +esac diff --git a/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh b/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh new file mode 100755 index 0000000..47cd35c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh @@ -0,0 +1,87 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories + printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:s" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + c) + ADDITIONALREPO=community + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +MAINREPO=$MIRROR/$REL/main +ADDITIONALREPO=$MIRROR/$REL/community +ARCH=${ARCH:-$(uname -m)} + +tmp +getapk +mkbase +conf +pack +save diff --git a/vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf b/vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf new file mode 100644 index 0000000..45fe03d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/docker/docker/contrib/mkimage-arch.sh b/vendor/github.com/docker/docker/contrib/mkimage-arch.sh new file mode 100755 index 0000000..f941177 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-arch.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + + +export LANG="C.UTF-8" + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=( + cryptsetup + device-mapper + dhcpcd + iproute2 + jfsutils + linux + lvm2 + man-db + man-pages + mdadm + nano + netctl + openresolv + pciutils + pcmciautils + reiserfsprogs + s-nail + systemd-sysvcompat + usbutils + vi + xfsprogs +) +IFS=',' +PKGIGNORE="${PKGIGNORE[*]}" +unset IFS + +arch="$(uname -m)" +case "$arch" in + armv*) + if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then + pacman-key --init + pacman-key --populate archlinuxarm + else + echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" + exit 1 + fi + PACMAN_CONF=$(mktemp ${TMPDIR:-/var/tmp}/pacman-conf-archlinux-XXXXXXXXX) + version="$(echo $arch | cut -c 5)" + sed "s/Architecture = armv/Architecture = armv${version}h/g" './mkimage-archarm-pacman.conf' > "${PACMAN_CONF}" + PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' + PACMAN_EXTRA_PKGS='archlinuxarm-keyring' + EXPECT_TIMEOUT=1800 # Most armv* based devices can be very slow (e.g. RPiv1) + ARCH_KEYRING=archlinuxarm + DOCKER_IMAGE_NAME="armv${version}h/archlinux" + ;; + *) + PACMAN_CONF='./mkimage-arch-pacman.conf' + PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' + PACMAN_EXTRA_PKGS='' + EXPECT_TIMEOUT=60 + ARCH_KEYRING=archlinux + DOCKER_IMAGE_NAME=archlinux + ;; +esac + +export PACMAN_MIRRORLIST + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME +docker run --rm -t $DOCKER_IMAGE_NAME echo Success. +rm -rf $ROOTFS diff --git a/vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf b/vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf new file mode 100644 index 0000000..f4b45f5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf @@ -0,0 +1,98 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = armv + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +[alarm] +Include = /etc/pacman.d/mirrorlist + +[aur] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh b/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh new file mode 100755 index 0000000..b11a6bb --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Generate a very minimal filesystem based on busybox-static, +# and load it into the local docker under the name "busybox". + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + +BUSYBOX=$(which busybox) +[ "$BUSYBOX" ] || { + echo "Sorry, I could not locate busybox." + echo "Try 'apt-get install busybox-static'?" + exit 1 +} + +set -e +ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM +mkdir $ROOTFS +cd $ROOTFS + +mkdir bin etc dev dev/pts lib proc sys tmp +touch etc/resolv.conf +cp /etc/nsswitch.conf etc/nsswitch.conf +echo root:x:0:0:root:/:/bin/sh > etc/passwd +echo root:x:0: > etc/group +ln -s lib lib64 +ln -s bin sbin +cp $BUSYBOX bin +for X in $(busybox --list) +do + ln -s busybox bin/$X +done +rm bin/init +ln bin/busybox bin/init +cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +for X in console null ptmx random stdin stdout stderr tty urandom zero +do + cp -a /dev/$X dev +done + +tar --numeric-owner -cf- . | docker import - busybox +docker run -i -u root busybox /bin/echo Success. diff --git a/vendor/github.com/docker/docker/contrib/mkimage-crux.sh b/vendor/github.com/docker/docker/contrib/mkimage-crux.sh new file mode 100755 index 0000000..3f0bdca --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh b/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh new file mode 100755 index 0000000..412a5ce --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh @@ -0,0 +1,297 @@ +#!/usr/bin/env bash +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + +variant='minbase' +include='iproute,iputils-ping' +arch='amd64' # intentionally undocumented for now +skipDetection= +strictDebootstrap= +justTar= + +usage() { + echo >&2 + + echo >&2 "usage: $0 [options] repo suite [mirror]" + + echo >&2 + echo >&2 'options: (not recommended)' + echo >&2 " -p set an http_proxy for debootstrap" + echo >&2 " -v $variant # change default debootstrap variant" + echo >&2 " -i $include # change default package includes" + echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" + echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" + echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" + echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" + + echo >&2 + echo >&2 " ie: $0 username/debian squeeze" + echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" + + echo >&2 + echo >&2 " ie: $0 username/ubuntu precise" + echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" + + echo >&2 + echo >&2 " ie: $0 -t precise.tar.bz2 precise" + echo >&2 " $0 -t wheezy.tgz wheezy" + echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" + + echo >&2 +} + +# these should match the names found at http://www.debian.org/releases/ +debianStable=wheezy +debianUnstable=sid +# this should match the name found at http://releases.ubuntu.com/ +ubuntuLatestLTS=trusty +# this should match the name found at http://releases.tanglu.org/ +tangluLatest=aequorea + +while getopts v:i:a:p:dst name; do + case "$name" in + p) + http_proxy="$OPTARG" + ;; + v) + variant="$OPTARG" + ;; + i) + include="$OPTARG" + ;; + a) + arch="$OPTARG" + ;; + d) + strictDebootstrap=1 + ;; + s) + skipDetection=1 + ;; + t) + justTar=1 + ;; + ?) + usage + exit 0 + ;; + esac +done +shift $(($OPTIND - 1)) + +repo="$1" +suite="$2" +mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided + +if [ ! "$repo" ] || [ ! "$suite" ]; then + usage + exit 1 +fi + +# some rudimentary detection for whether we need to "sudo" our docker calls +docker='' +if docker version > /dev/null 2>&1; then + docker='docker' +elif sudo docker version > /dev/null 2>&1; then + docker='sudo docker' +elif command -v docker > /dev/null 2>&1; then + docker='docker' +else + echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" + echo >&2 " this script is not likely to work as expected" + sleep 3 + docker='docker' # give us a command-not-found later +fi + +# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory +if [ "$justTar" ]; then + if [ ! -d "$(dirname "$repo")" ]; then + echo >&2 "error: $(dirname "$repo") does not exist" + exit 1 + fi + repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" +fi + +# will be filled in later, if [ -z "$skipDetection" ] +lsbDist='' + +target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +if [ "$suite" = 'lucid' ]; then + # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails + include+=',gpgv' +fi + +set -x + +# bootstrap +mkdir -p "$target" +sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" + +cd "$target" + +if [ -z "$strictDebootstrap" ]; then + # prevent init scripts from running during install/update + # policy-rc.d (for most scripts) + echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null + sudo chmod +x usr/sbin/policy-rc.d + # initctl (for some pesky upstart scripts) + sudo chroot . dpkg-divert --local --rename --add /sbin/initctl + sudo ln -sf /bin/true sbin/initctl + # see https://github.com/docker/docker/issues/446#issuecomment-16953173 + + # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) + sudo chroot . apt-get clean + + if strings usr/bin/dpkg | grep -q unsafe-io; then + # while we're at it, apt is unnecessarily slow inside containers + # this forces dpkg not to call sync() after package extraction and speeds up install + # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization + echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null + # we have this wrapped up in an "if" because the "force-unsafe-io" + # option was added in dpkg 1.15.8.6 + # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), + # and ubuntu lucid/10.04 only has 1.15.5.6 + fi + + # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) + { + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo "DPkg::Post-Invoke { ${aptGetClean} };" + echo "APT::Update::Post-Invoke { ${aptGetClean} };" + echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' + } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null + + # and remove the translations, too + echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null + + # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): + # rm /usr/sbin/policy-rc.d + # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl + # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup + # rm /etc/apt/apt.conf.d/no-cache + # rm /etc/apt/apt.conf.d/no-languages + + if [ -z "$skipDetection" ]; then + # see also rudimentary platform detection in hack/install.sh + lsbDist='' + if [ -r etc/lsb-release ]; then + lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then + lsbDist='Debian' + fi + + case "$lsbDist" in + Debian) + # add the updates and security repositories + if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then + # ${suite}-updates only applies to non-unstable + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + + # same for security updates + echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null + fi + ;; + Ubuntu) + # add the universe, updates, and security repositories + sudo sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " etc/apt/sources.list + ;; + Tanglu) + # add the updates repository + if [ "$suite" = "$tangluLatest" ]; then + # ${suite}-updates only applies to stable Tanglu versions + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + fi + ;; + SteamOS) + # add contrib and non-free + sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list + ;; + esac + fi + + # make sure our packages lists are as up to date as we can get them + sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y +fi + +if [ "$justTar" ]; then + # create the tarball file so it has the right permissions (ie, not root) + touch "$repo" + + # fill the tarball + sudo tar --numeric-owner -caf "$repo" . +else + # create the image (and tag $repo:$suite) + sudo tar --numeric-owner -c . | $docker import - $repo:$suite + + # test the image + $docker run -i -t $repo:$suite echo success + + if [ -z "$skipDetection" ]; then + case "$lsbDist" in + Debian) + if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + + if [ -r etc/debian_version ]; then + # tag the specific debian release version (which is only reasonable to tag on debian stable) + ver=$(cat etc/debian_version) + $docker tag $repo:$suite $repo:$ver + fi + fi + ;; + Ubuntu) + if [ "$suite" = "$ubuntuLatestLTS" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Ubuntu version number, if available (12.04, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + Tanglu) + if [ "$suite" = "$tangluLatest" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Tanglu version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + SteamOS) + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific SteamOS version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + esac + fi +fi + +# cleanup +cd "$returnTo" +sudo rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage-pld.sh b/vendor/github.com/docker/docker/contrib/mkimage-pld.sh new file mode 100755 index 0000000..615c203 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-pld.sh @@ -0,0 +1,73 @@ +#!/bin/sh +# +# Generate a minimal filesystem for PLD Linux and load it into the local docker as "pld". +# https://www.pld-linux.org/packages/docker +# +set -e + +if [ "$(id -u)" != "0" ]; then + echo >&2 "$0: requires root" + exit 1 +fi + +image_name=pld + +tmpdir=$(mktemp -d ${TMPDIR:-/var/tmp}/pld-docker-XXXXXX) +root=$tmpdir/rootfs +install -d -m 755 $root + +# to clean up: +docker rmi $image_name || : + +# build +rpm -r $root --initdb + +set +e +install -d $root/dev/pts +mknod $root/dev/random c 1 8 -m 644 +mknod $root/dev/urandom c 1 9 -m 644 +mknod $root/dev/full c 1 7 -m 666 +mknod $root/dev/null c 1 3 -m 666 +mknod $root/dev/zero c 1 5 -m 666 +mknod $root/dev/console c 5 1 -m 660 +set -e + +poldek -r $root --up --noask -u \ + --noignore \ + -O 'rpmdef=_install_langs C' \ + -O 'rpmdef=_excludedocs 1' \ + vserver-packages \ + bash iproute2 coreutils grep poldek + +# fix netsharedpath, so containers would be able to install when some paths are mounted +sed -i -e 's;^#%_netsharedpath.*;%_netsharedpath /dev/shm:/sys:/proc:/dev:/etc/hostname;' $root/etc/rpm/macros + +# no need for alternatives +poldek-config -c $root/etc/poldek/poldek.conf ignore systemd-init + +# this makes initscripts to believe network is up +touch $root/var/lock/subsys/network + +# cleanup large optional packages +remove_packages="ca-certificates" +for pkg in $remove_packages; do + rpm -r $root -q $pkg && rpm -r $root -e $pkg --nodeps +done + +# cleanup more +rm -v $root/etc/ld.so.cache +rm -rfv $root/var/cache/hrmib/* +rm -rfv $root/usr/share/man/man?/* +rm -rfv $root/usr/share/locale/*/ +rm -rfv $root/usr/share/help/*/ +rm -rfv $root/usr/share/doc/* +rm -rfv $root/usr/src/examples/* +rm -rfv $root/usr/share/pixmaps/* + +# and import +tar --numeric-owner --xattrs --acls -C $root -c . | docker import - $image_name + +# and test +docker run -i -u root $image_name /bin/echo Success. + +rm -r $tmpdir diff --git a/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh b/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh new file mode 100755 index 0000000..7e09350 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + +repo="$1" +distro="$2" +mirror="$3" + +if [ ! "$repo" ] || [ ! "$distro" ]; then + self="$(basename $0)" + echo >&2 "usage: $self repo distro [mirror]" + echo >&2 + echo >&2 " ie: $self username/centos centos-5" + echo >&2 " $self username/centos centos-6" + echo >&2 + echo >&2 " ie: $self username/slc slc-5" + echo >&2 " $self username/slc slc-6" + echo >&2 + echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" + echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" + echo >&2 + echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' + echo >&2 ' expected values of "mirror".' + echo >&2 + echo >&2 'This script is tested to work with the original upstream version of rinse,' + echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' + echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' + echo >&2 + exit 1 +fi + +target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) +if [ "$mirror" ]; then + rinseArgs+=( --mirror "$mirror" ) +fi + +set -x + +mkdir -p "$target" + +sudo rinse "${rinseArgs[@]}" + +cd "$target" + +# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own +sudo rm -rf dev +sudo mkdir -m 755 dev +( + cd dev + sudo ln -sf /proc/self/fd ./ + sudo mkdir -m 755 pts + sudo mkdir -m 1777 shm + sudo mknod -m 600 console c 5 1 + sudo mknod -m 600 initctl p + sudo mknod -m 666 full c 1 7 + sudo mknod -m 666 null c 1 3 + sudo mknod -m 666 ptmx c 5 2 + sudo mknod -m 666 random c 1 8 + sudo mknod -m 666 tty c 5 0 + sudo mknod -m 666 tty0 c 4 0 + sudo mknod -m 666 urandom c 1 9 + sudo mknod -m 666 zero c 1 5 +) + +# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" +# locales +sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} +# docs and man pages +sudo rm -rf usr/share/{man,doc,info,gnome/help} +# cracklib +sudo rm -rf usr/share/cracklib +# i18n +sudo rm -rf usr/share/i18n +# yum cache +sudo rm -rf var/cache/yum +sudo mkdir -p --mode=0755 var/cache/yum +# sln +sudo rm -rf sbin/sln +# ldconfig +#sudo rm -rf sbin/ldconfig +sudo rm -rf etc/ld.so.cache var/cache/ldconfig +sudo mkdir -p --mode=0755 var/cache/ldconfig + +# allow networking init scripts inside the container to work without extra steps +echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null + +# to restore locales later: +# yum reinstall glibc-common + +version= +if [ -r etc/redhat-release ]; then + version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" +elif [ -r etc/SuSE-release ]; then + version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" +fi + +if [ -z "$version" ]; then + echo >&2 "warning: cannot autodetect OS version, using $distro as tag" + sleep 20 + version="$distro" +fi + +sudo tar --numeric-owner -c . | docker import - $repo:$version + +docker run -i -t $repo:$version echo success + +cd "$returnTo" +sudo rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage-yum.sh b/vendor/github.com/docker/docker/contrib/mkimage-yum.sh new file mode 100755 index 0000000..29da170 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-yum.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +set -e + +usage() { + cat < +OPTIONS: + -p "" The list of packages to install in the container. + The default is blank. + -g "" The groups of packages to install in the container. + The default is "Core". + -y The path to the yum config to install packages from. The + default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then + yum_config=/etc/dnf/dnf.conf + alias yum=dnf +fi +install_groups="Core" +while getopts ":y:p:g:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + p) + install_packages="$OPTARG" + ;; + g) + install_groups="$OPTARG" + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +# amazon linux yum will fail without vars set +if [ -d /etc/yum/vars ]; then + mkdir -p -m 755 "$target"/etc/yum + cp -a /etc/yum/vars "$target"/etc/yum/ +fi + +if [[ -n "$install_groups" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall $install_groups +fi + +if [[ -n "$install_packages" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y install $install_packages +fi + +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version + +docker run -i -t --rm $name:$version /bin/bash -c 'echo success' + +rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage.sh b/vendor/github.com/docker/docker/contrib/mkimage.sh new file mode 100755 index 0000000..13298c8 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + echo >&2 " $mkimg -t someuser/solaris solaris" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +os= +os=$(uname -o) + +# set up path to gnu tools if solaris +[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH +# TODO check for gnu-tar, gnu-getopt + +# TODO requires root/sudo due to some pkg operations. sigh. +[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege" + +optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +compression="auto" +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + --compression) compression="$2" ; shift 2 ;; + --no-compression) compression="none" ; shift 1 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ "$compression" == 'auto' ] || [ -z "$compression" ] +then + compression='xz' +fi + +[ "$compression" == 'none' ] && compression='' + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar${compression:+.$compression}" +touch "$tarFile" + +( + set -x + tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize b/vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 0000000..7749e63 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs and man pages + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/busybox-static b/vendor/github.com/docker/docker/contrib/mkimage/busybox-static new file mode 100755 index 0000000..e15322b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/debootstrap b/vendor/github.com/docker/docker/contrib/mkimage/debootstrap new file mode 100755 index 0000000..7d56d8e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/debootstrap @@ -0,0 +1,226 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +# get path to "chroot" in our current PATH +chrootPath="$(type -P chroot)" +rootfs_chroot() { + # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! + + # set PATH and chroot away! + PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + "$chrootPath" "$rootfsDir" "$@" +} + +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + +( + set -x + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' + #!/bin/sh + + # For most Docker users, "apt-get install" only happens during "docker build", + # where starting services doesn't work and often fails in humorous ways. This + # prevents those failures by stopping the services from attempting to start. + + exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; rootfs_chroot apt-get clean ) + +# this file is one APT creates to make sure we don't "autoremove" our currently +# in-use kernel, which doesn't really apply to debootstraps/Docker images that +# don't even have kernels installed +rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF + + # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed + echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' + # Since Docker users are looking for the smallest possible final images, the + # following emerges as a very common pattern: + + # RUN apt-get update \ + # && apt-get install -y \ + # && \ + # && apt-get purge -y --auto-remove + + # By default, APT will actually _keep_ packages installed via Recommends or + # Depends if another package Suggests them, even and including if the package + # that originally caused them to be installed is removed. Setting this to + # "false" ensures that APT is appropriately aggressive about removing the + # packages it added. + + # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant + Apt::AutoRemove::SuggestsImportant "false"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi b/vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi new file mode 100755 index 0000000..93fb289 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/vendor/github.com/docker/docker/contrib/mkimage/rinse b/vendor/github.com/docker/docker/contrib/mkimage/rinse new file mode 100755 index 0000000..75eb4f0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/solaris b/vendor/github.com/docker/docker/contrib/mkimage/solaris new file mode 100755 index 0000000..158970e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/solaris @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Solaris 12 base image build script. +# +set -e + +# TODO add optional package publisher origin + +rootfsDir="$1" +shift + +# base install +( + set -x + + pkg image-create --full --zone \ + --facet facet.locale.*=false \ + --facet facet.locale.POSIX=true \ + --facet facet.doc=false \ + --facet facet.doc.*=false \ + "$rootfsDir" + + pkg -R "$rootfsDir" set-property use-system-repo true + + pkg -R "$rootfsDir" set-property flush-content-cache-on-success true + + pkg -R "$rootfsDir" install core-os +) + +# Lay in stock configuration, set up milestone +# XXX This all may become optional in a base image +( + # faster to build repository database on tmpfs + REPO_DB=/system/volatile/repository.$$ + export SVCCFG_REPOSITORY=${REPO_DB} + export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door + + # Import base manifests. NOTE These are a combination of basic requirement + # and gleaned from container milestone manifest. They may change. + for m in $rootfsDir/lib/svc/manifest/system/environment.xml \ + $rootfsDir/lib/svc/manifest/system/svc/global.xml \ + $rootfsDir/lib/svc/manifest/system/svc/restarter.xml \ + $rootfsDir/lib/svc/manifest/network/dns/client.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/switch.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/cache.xml \ + $rootfsDir/lib/svc/manifest/milestone/container.xml ; do + svccfg import $m + done + + # Apply system layer profile, deleting unnecessary dependencies + svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml + + # XXX Even if we keep a repo in the base image, this is definitely optional + svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml + + for s in svc:/system/svc/restarter \ + svc:/system/environment \ + svc:/network/dns/client \ + svc:/system/name-service/switch \ + svc:/system/name-service/cache \ + svc:/system/svc/global \ + svc:/milestone/container ;do + svccfg -s $s refresh + done + + # now copy the built up repository into the base rootfs + mv $REPO_DB $rootfsDir/etc/svc/repository.db +) + +# pkg(1) needs the zoneproxy-client running in the container. +# use a simple wrapper to run it as needed. +# XXX maybe we go back to running this in SMF? +mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg" +cat > "$rootfsDir/usr/bin/pkg" <<-'EOF' +#!/bin/sh +# +# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION +# +# The Solaris base image uses the sysrepo proxy mechanism. The +# IPS client pkg(1) requires the zoneproxy-client to reach the +# remote publisher origins through the host. This wrapper script +# enables and disables the proxy client as needed. This is a +# temporary solution. + +/usr/lib/zones/zoneproxy-client -s localhost:1008 +PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@" +pkill -9 zoneproxy-client +EOF +chmod +x "$rootfsDir/usr/bin/pkg" diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile b/vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile new file mode 100644 index 0000000..026d869 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile @@ -0,0 +1,9 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static nnp-test.c -o /usr/bin/nnp-test + +RUN chmod +s /usr/bin/nnp-test diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c new file mode 100644 index 0000000..b767da7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main(int argc, char *argv[]) +{ + printf("EUID=%d\n", geteuid()); + return 0; +} + diff --git a/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh b/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh new file mode 100755 index 0000000..5eeb45c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/bin/sh +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs > /dev/null 2>&1; then + # Find btrfs subvolumes under $dir checking for inode 256 + # Source: http://stackoverflow.com/a/32865333 + for subvol in $(find "$dir" -type d -inum 256 | sort -r); do + if [ "$dir" != "$subvol" ]; then + ( set -x; btrfs subvolume delete "$subvol" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( shopt -s dotglob; set -x; rm -rf "$dir"/* ) diff --git a/vendor/github.com/docker/docker/contrib/project-stats.sh b/vendor/github.com/docker/docker/contrib/project-stats.sh new file mode 100755 index 0000000..2691c72 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/project-stats.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +## Run this script from the root of the docker repository +## to query project stats useful to the maintainers. +## You will need to install `pulls` and `issues` from +## https://github.com/crosbymichael/pulls + +set -e + +echo -n "Open pulls: " +PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 +echo $PULLS + +echo -n "Pulls alru: " +pulls alru + +echo -n "Open issues: " +ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 +echo $ISSUES + +echo -n "Issues alru: " +issues alru diff --git a/vendor/github.com/docker/docker/contrib/report-issue.sh b/vendor/github.com/docker/docker/contrib/report-issue.sh new file mode 100755 index 0000000..cb54f1a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/report-issue.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# This is a convenience script for reporting issues that include a base +# template of information. See https://github.com/docker/docker/pull/8845 + +set -e + +DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} +DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} +DOCKER=${DOCKER:-"docker"} +DOCKER_COMMAND="${DOCKER}" +export DOCKER_COMMAND + +# pulled from https://gist.github.com/cdown/1163649 +function urlencode() { + # urlencode + + local length="${#1}" + for (( i = 0; i < length; i++ )); do + local c="${1:i:1}" + case $c in + [a-zA-Z0-9.~_-]) printf "$c" ;; + *) printf '%%%02X' "'$c" + esac + done +} + +function template() { +# this should always match the template from CONTRIBUTING.md + cat <<- EOM + Description of problem: + + + \`docker version\`: + `${DOCKER_COMMAND} -D version` + + + \`docker info\`: + `${DOCKER_COMMAND} -D info` + + + \`uname -a\`: + `uname -a` + + + Environment details (AWS, VirtualBox, physical, etc.): + + + How reproducible: + + + Steps to Reproduce: + 1. + 2. + 3. + + + Actual Results: + + + Expected Results: + + + Additional info: + + + EOM +} + +function format_issue_url() { + if [ ${#@} -ne 2 ] ; then + return 1 + fi + local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") + local issue_body=$(urlencode "${2}") + echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" +} + + +echo -ne "Do you use \`sudo\` to call docker? [y|N]: " +read -r -n 1 use_sudo +echo "" + +if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then + export DOCKER_COMMAND="sudo ${DOCKER}" +fi + +echo -ne "Title of new issue?: " +read -r issue_title +echo "" + +issue_url=$(format_issue_url "${issue_title}" "$(template)") + +if which xdg-open 2>/dev/null >/dev/null ; then + echo -ne "Would like to launch this report in your browser? [Y|n]: " + read -r -n 1 launch_now + echo "" + + if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then + xdg-open "${issue_url}" + fi +fi + +echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" + diff --git a/vendor/github.com/docker/docker/contrib/reprepro/suites.sh b/vendor/github.com/docker/docker/contrib/reprepro/suites.sh new file mode 100755 index 0000000..9ecf99d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/reprepro/suites.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +cd "$(dirname "$BASH_SOURCE")/../.." + +targets_from() { + git fetch -q https://github.com/docker/docker.git "$1" + git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / +} + +release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) +{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE new file mode 100644 index 0000000..d511905 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile new file mode 100644 index 0000000..16df33e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md new file mode 100644 index 0000000..7ea3117 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc new file mode 100644 index 0000000..d6cb0e5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc @@ -0,0 +1,29 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if new file mode 100644 index 0000000..e087e8b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if @@ -0,0 +1,523 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute apache +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`apache_exec',` + gen_require(` + type httpd_exec_t; + ') + + can_exec($1, httpd_exec_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te new file mode 100644 index 0000000..4231688 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te @@ -0,0 +1,399 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE new file mode 100644 index 0000000..d511905 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile new file mode 100644 index 0000000..16df33e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md new file mode 100644 index 0000000..7ea3117 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc new file mode 100644 index 0000000..10b7d52 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc @@ -0,0 +1,33 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) + +# OL7.2 systemd selinux update +/var/run/systemd/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_run_t,s0) +/var/lib/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_lib_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if new file mode 100644 index 0000000..4780af0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if @@ -0,0 +1,659 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') + +######################################## +## +## Send and receive messages from +## systemd machined over dbus. +## +## +## +## Domain allowed access. +## +## +# +interface(`systemd_dbus_chat_machined',` + gen_require(` + type systemd_machined_t; + class dbus send_msg; + ') + + allow $1 systemd_machined_t:dbus send_msg; + allow systemd_machined_t $1:dbus send_msg; + ps_process_pattern(systemd_machined_t, $1) +') + +######################################## +## +## Allow any svirt_sandbox_file_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`virt_sandbox_entrypoint',` + gen_require(` + type svirt_sandbox_file_t; + ') + allow $1 svirt_sandbox_file_t:file entrypoint; +') + +######################################## +## +## Send and receive messages from +## virt over dbus. +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_dbus_chat',` + gen_require(` + type virtd_t; + class dbus send_msg; + ') + + allow $1 virtd_t:dbus send_msg; + allow virtd_t $1:dbus send_msg; + ps_process_pattern(virtd_t, $1) +') + +####################################### +## +## Read the process state of virt sandbox containers +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_sandbox_read_state',` + gen_require(` + attribute svirt_sandbox_domain; + ') + + ps_process_pattern($1, svirt_sandbox_domain) +') + +###################################### +## +## Send a signal to sandbox domains +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_signal_sandbox',` + gen_require(` + attribute svirt_sandbox_domain; + ') + + allow $1 svirt_sandbox_domain:process signal; +') + +####################################### +## +## Getattr Sandbox File systems +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_getattr_sandbox_filesystem',` + gen_require(` + type svirt_sandbox_file_t; + ') + + allow $1 svirt_sandbox_file_t:filesystem getattr; +') + +####################################### +## +## Read Sandbox Files +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_read_sandbox_files',` + gen_require(` + type svirt_sandbox_file_t; + ') + + list_dirs_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) + read_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) + read_lnk_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) +') + +####################################### +## +## Read the process state of spc containers +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_read_state',` + gen_require(` + type spc_t; + ') + + ps_process_pattern($1, spc_t) +') + diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te new file mode 100644 index 0000000..d4de36f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te @@ -0,0 +1,465 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +# OL7 systemd selinux update +type systemd_machined_t; +type systemd_machined_exec_t; +init_daemon_domain(systemd_machined_t, systemd_machined_exec_t) + +# /run/systemd/machines +type systemd_machined_var_run_t; +files_pid_file(systemd_machined_var_run_t) + +# /var/lib/machines +type systemd_machined_var_lib_t; +files_type(systemd_machined_var_lib_t) + + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + # unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) + +######################################## +# +# OL7.2 systemd selinux update +# systemd_machined local policy +# +allow systemd_machined_t self:capability { dac_override setgid sys_admin sys_chroot sys_ptrace }; +allow systemd_machined_t systemd_unit_file_t:service { status start }; +allow systemd_machined_t self:unix_dgram_socket create_socket_perms; + +manage_dirs_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +manage_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +init_pid_filetrans(systemd_machined_t, systemd_machined_var_run_t, dir, "machines") + +manage_dirs_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +manage_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +init_var_lib_filetrans(systemd_machined_t, systemd_machined_var_lib_t, dir, "machines") + +kernel_dgram_send(systemd_machined_t) +# This is a bug, but need for now. +kernel_read_unlabeled_state(systemd_machined_t) + +init_dbus_chat(systemd_machined_t) +init_status(systemd_machined_t) + +userdom_dbus_send_all_users(systemd_machined_t) + +term_use_ptmx(systemd_machined_t) + +optional_policy(` + dbus_connect_system_bus(systemd_machined_t) + dbus_system_bus_client(systemd_machined_t) +') + +optional_policy(` + docker_read_share_files(systemd_machined_t) + docker_spc_read_state(systemd_machined_t) +') + +optional_policy(` + virt_dbus_chat(systemd_machined_t) + virt_sandbox_read_state(systemd_machined_t) + virt_signal_sandbox(systemd_machined_t) + virt_stream_connect_sandbox(systemd_machined_t) + virt_rw_svirt_dev(systemd_machined_t) + virt_getattr_sandbox_filesystem(systemd_machined_t) + virt_read_sandbox_files(systemd_machined_t) +') + + diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE new file mode 100644 index 0000000..5b6e7c6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile new file mode 100644 index 0000000..1bdc695 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile @@ -0,0 +1,16 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc new file mode 100644 index 0000000..467d659 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc @@ -0,0 +1,18 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/dockerd -- gen_context(system_u:object_r:docker_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) + +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if new file mode 100644 index 0000000..ca075c0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if @@ -0,0 +1,461 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_share_t, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +interface(`domain_stub_named_filetrans_domain',` + gen_require(` + attribute named_filetrans_domain; + ') +') + +interface(`lvm_stub',` + gen_require(` + type lvm_t; + ') +') +interface(`staff_stub',` + gen_require(` + type staff_t; + ') +') +interface(`virt_stub_svirt_sandbox_domain',` + gen_require(` + attribute svirt_sandbox_domain; + ') +') +interface(`virt_stub_svirt_sandbox_file',` + gen_require(` + type svirt_sandbox_file_t; + ') +') +interface(`fs_dontaudit_remount_tmpfs',` + gen_require(` + type tmpfs_t; + ') + + dontaudit $1 tmpfs_t:filesystem remount; +') +interface(`dev_dontaudit_list_all_dev_nodes',` + gen_require(` + type device_t; + ') + + dontaudit $1 device_t:dir list_dir_perms; +') +interface(`kernel_unlabeled_entry_type',` + gen_require(` + type unlabeled_t; + ') + + domain_entry_file($1, unlabeled_t) +') +interface(`kernel_unlabeled_domtrans',` + gen_require(` + type unlabeled_t; + ') + + read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) + domain_transition_pattern($1, unlabeled_t, $2) + type_transition $1 unlabeled_t:process $2; +') +interface(`files_write_all_pid_sockets',` + gen_require(` + attribute pidfile; + ') + + allow $1 pidfile:sock_file write_sock_file_perms; +') +interface(`dev_dontaudit_mounton_sysfs',` + gen_require(` + type sysfs_t; + ') + + dontaudit $1 sysfs_t:dir mounton; +') diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te new file mode 100644 index 0000000..bad0bb6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te @@ -0,0 +1,407 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +role system_r types spc_t; + +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + unconfined_domain(docker_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) +') + +######################################## +# +# docker upstream policy +# + +optional_policy(` +# domain_stub_named_filetrans_domain() + gen_require(` + attribute named_filetrans_domain; + ') + + docker_filetrans_named_content(named_filetrans_domain) +') + +optional_policy(` + lvm_stub() + docker_rw_sem(lvm_t) +') + +optional_policy(` + staff_stub() + docker_stream_connect(staff_t) + docker_exec(staff_t) +') + +optional_policy(` + virt_stub_svirt_sandbox_domain() + virt_stub_svirt_sandbox_file() + allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; + docker_read_share_files(svirt_sandbox_domain) + docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) + docker_use_ptys(svirt_sandbox_domain) + docker_spc_stream_connect(svirt_sandbox_domain) + fs_list_tmpfs(svirt_sandbox_domain) + fs_rw_hugetlbfs_files(svirt_sandbox_domain) + fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) + dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) + + tunable_policy(`virt_sandbox_use_fusefs',` + fs_manage_fusefs_dirs(svirt_sandbox_domain) + fs_manage_fusefs_files(svirt_sandbox_domain) + fs_manage_fusefs_symlinks(svirt_sandbox_domain) + ') + gen_require(` + attribute domain; + ') + + dontaudit svirt_sandbox_domain domain:key {search link}; +') + +optional_policy(` + gen_require(` + type pcp_pmcd_t; + ') + docker_manage_lib_files(pcp_pmcd_t) +') diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz new file mode 100644 index 0000000000000000000000000000000000000000..ab5d59445ac1601ca378aaa3e71fb9cff43a1592 GIT binary patch literal 2847 zcmV+)3*hu0iwFo7v)okz17vSwYh`j@b7gF4ZgqGrH~__3TaVke5`I4V6@*`!6l=S| zL4lsU6wa>G7)ZQ6Yo}<61q526ZDJ)-B`NQ^I6wZ(@S=+?ue_UwK6D$4GsAB#9L|h1 zT74p9kjmtNshEi^7cAB+)14KO+rU$c!fk;-5#O zmV?vz9JoRUq(p7=UrB&Q;!Mydm$39gew3ZrB;ilS8)GkXHjhLJ~Z zb`9~dA;BW%P_PmCCQFh~L6RLy9thu%13cK#JwqnV8WL401Q%PfK6v5y10~;YJ{0bIQB&IB4h8PX!L;;nhe>W6UN2*vY){HP=m;wW%^*n~)VL%-lM6=;wQLDcf$Tqah4DzZ&A-OQ5 zpk}9!d<;9LGN)V+s;qrrJQSwpk3}bnLm)E<+bWW&HyOZUN+Z8! zrYvwTu1*6Pt*!k*0iAMYb~43Bh141ajx6w1(;G*YMQ=Hqr`EP^4~)I(9~ggC#Eqs? zD{L+egeI(L1_4dCa15BrIqU}t4{6QdV-7S)QIVWJI5#x+uY;!+G9tCH0HBZ%Sxi)C z8$>lWY$jj8Y28@j&axe-pr4r%%d53zgn1bWHS|f79H5xC)H0jgI zs0uU)bj^^I3>RHe-bFS9yYM?pRYwLc4Vmq2q_6juX;@({ab1JGR{4 zfw)WDORWuVA|@%o>a>8w)TaS@706>x{ypfAMZF9;#_*bFSi{*fL({Pf9G43qVP2w% zIefPU=Gn9DQa}6`GQB;xg;6xIw?0G;TbJ9dJ-0w6?P0F2$a6Y?)YuAX#LrY*3cta9 znbBSK62e9L9P1w1f-7Y@QM`a6_IzSR@_3V?)m{U-#s5;+q3R`&mIcd5CTWU?x6D`% zV8;+6L+lw|cO#sY_EKF!`4838h8Nl%`!hOJ>#s0)&D)<2@%Y(r-jGtktqviMNwER^ z48UzB*EEZ@e$}nh;O;eIRpUakf#QQ=iR`XhC_rrG0lrx?CC@<(%RcL-uQ2I}h+fpL z9caOv&z5Hp3f=+ka%(o(UvEx4okAy2e(WgrYB{7zbvTC@2yGVCyZlv@2@b z=9Ay1H{|2&^EC99RZZMk!DF%LI|5gCWOU6CMOBv8JxJALYABUav}-9d4&F+u4l=Z! zt$tIpHaGSo&AtLQ{yMwy<-K68`LOBhW^!G%4q$9F$y%XFlC6?ufnD{##t<;$jUKy4 zmY|~YRRVg>(K3^a{nIz&(T{I`?WEsR6=!_ySm4JPevFGmrwyKZ;Z$C|CJP8Jt~=KX zH>2s6DdEf(n*uUl@{rz-3Z5RXd1H00sjUle)ye1-ZW8H-mPzWaX2Z92 z2)V}{CiL_>nKMVNq%`CEQ5d3}lA>0PK!ac7>?t`f8d{Df`Sy8gn~~aa>{ftd?6kTc zF|j|25>LYg?~WqBj^i1)>7ay0aXYDvzLZeVoOJ<)sByEhPdb$d6PF5P+?nTA#c=PA@scfsdyQ}Y5_8NS&t1%dCZ80_lCjU~9q zjg5~^n4io*sRMUjw&e-&PXRHliX zY20}XrJnZnS;I@=y@9J_Lt)mmQkSDND_Fue2MBz)TLn7JCJNW?r(tTxn%2Mxv7ZB5 zT8-6m%Jsu2I&X4wlF-Qy)}Z;JzP3%3&VP8`3&%{68=F@ZwA>hn8)wGbGNbs`rvOw+Ii6L#f+~(-Xebl9tVa4Q=RZL4J-Fm?7Us&zIL%JG!WlDgL{mh6HshYGQfieib=4EQndS1#f<%XMS1_3R~RknrMThpB*A zCYMWHDccRA1lxy7yBA1<_@xnpti)d@-AL;Gr58s<`kYA`A6_^qr^Q>}F{(R=iy*ms z_mz-vzy~*6=s#M}x=+}dR_%&(wP_tsZHrdF6ek~>)suhy9Ri#~Hp*p+-+449V#y8* z2Vd{B>(20^n+gC1%*l?5Ejz8!n$?sqc{{6|sNQ9TW$Yu)$1I|Q6&gyDs=UJFgs-`Q z0ehv#<~$8HY8O8d4mOJ-J2c8J|4Myuef#ALRG`bj8C+l}nrYeoSfF~{9fp7{rG2HY zM<;c3{cS*>;P9>+Vg|o4pzX0HSg7$y!pS!7-9zUVZUj6|-4GUXvo>%SjTOt~zIt=- z-(4J4q<(_iha62Dz7?pde3zq!?w%O>PqiXYgOcCA&On092;Ebjh1w>3&(N6b`qqva z_kj(bC9a^$msVm=VkX>UOUv6-Ye@!LXc8$>j6$ xb`W`pZ+>}u<]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" + +## Strings, double-quoted +color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" + +## Single and double quotes +color brightyellow "('|\")" diff --git a/vendor/github.com/docker/docker/contrib/syntax/nano/README.md b/vendor/github.com/docker/docker/contrib/syntax/nano/README.md new file mode 100644 index 0000000..5985208 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/nano/README.md @@ -0,0 +1,32 @@ +Dockerfile.nanorc +================= + +Dockerfile syntax highlighting for nano + +Single User Installation +------------------------ +1. Create a nano syntax directory in your home directory: + * `mkdir -p ~/.nano/syntax` + +2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` + * `cp Dockerfile.nanorc ~/.nano/syntax/` + +3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file + ``` +## Dockerfile files +include "~/.nano/syntax/Dockerfile.nanorc" + ``` + +System Wide Installation +------------------------ +1. Create a nano syntax directory: + * `mkdir /usr/local/share/nano` + +2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` + * `cp Dockerfile.nanorc /usr/local/share/nano/` + +3. Add the following to your `/etc/nanorc`: + ``` +## Dockerfile files +include "/usr/local/share/nano/Dockerfile.nanorc" + ``` diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 0000000..20f0d04 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 0000000..948a9bf --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,143 @@ + + + + + fileTypes + + Dockerfile + + name + Dockerfile + patterns + + + captures + + 1 + + name + keyword.control.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s + + + captures + + 1 + + name + keyword.operator.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + + + begin + " + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + " + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + ' + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + captures + + 1 + + name + punctuation.whitespace.comment.leading.dockerfile + + 2 + + name + comment.line.number-sign.dockerfile + + 3 + + name + punctuation.definition.comment.dockerfile + + + comment + comment.line + match + ^(\s*)((#).*$\n?) + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 0000000..239f4b0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/README.md b/vendor/github.com/docker/docker/contrib/syntax/textmate/README.md new file mode 100644 index 0000000..ce61101 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/README.md @@ -0,0 +1,17 @@ +# Docker.tmbundle + +Dockerfile syntax highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. + +enjoy. + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS b/vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS new file mode 100644 index 0000000..965743d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 0000000..e67cdab --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/README.md b/vendor/github.com/docker/docker/contrib/syntax/vim/README.md new file mode 100644 index 0000000..5aa9bd8 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/README.md @@ -0,0 +1,26 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ +With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... + +With [Vundle](https://github.com/gmarik/Vundle.vim) + + Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt b/vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 0000000..e69e2b7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim b/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 0000000..ee10e5d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim b/vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 0000000..a067e6a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,31 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s + +" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell +let s:current_syntax = b:current_syntax +unlet b:current_syntax +syntax include @SH syntax/sh.vim +let b:current_syntax = s:current_syntax +syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH +" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile b/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile new file mode 100644 index 0000000..f95f175 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile @@ -0,0 +1,15 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ + && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ + && gcc -g -Wall -static acct.c -o /usr/bin/acct-test \ + && gcc -g -Wall -static setuid.c -o /usr/bin/setuid-test \ + && gcc -g -Wall -static setgid.c -o /usr/bin/setgid-test \ + && gcc -g -Wall -static socket.c -o /usr/bin/socket-test \ + && gcc -g -Wall -static raw.c -o /usr/bin/raw-test + +RUN [ "$(uname -m)" = "x86_64" ] && gcc -s -m32 -nostdlib exit32.s -o /usr/bin/exit32-test || true diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c new file mode 100644 index 0000000..88ac287 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c @@ -0,0 +1,16 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int err = acct("/tmp/t"); + if (err == -1) { + fprintf(stderr, "acct failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s new file mode 100644 index 0000000..8bbb5c5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s @@ -0,0 +1,7 @@ +.globl _start +.text +_start: + xorl %eax, %eax + incl %eax + movb $0, %bl + int $0x80 diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c new file mode 100644 index 0000000..33684e1 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp argments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c new file mode 100644 index 0000000..7995a0d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c @@ -0,0 +1,14 @@ +#include +#include +#include +#include +#include + +int main() { + if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { + perror("socket"); + return 1; + } + + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c new file mode 100644 index 0000000..df9680c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setgid(1) == -1) { + perror("setgid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c new file mode 100644 index 0000000..5b93967 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setuid(1) == -1) { + perror("setuid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c new file mode 100644 index 0000000..d26c82f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include +#include +#include + +int main() { + int s; + struct sockaddr_in sin; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s == -1) { + perror("socket"); + return 1; + } + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + sin.sin_port = htons(80); + + if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { + perror("bind"); + return 1; + } + + close(s); + + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c new file mode 100644 index 0000000..2af36f4 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp argments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWUSER | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/udev/80-docker.rules b/vendor/github.com/docker/docker/contrib/udev/80-docker.rules new file mode 100644 index 0000000..f934c01 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md b/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md new file mode 100644 index 0000000..286a985 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Engine API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem +stop on runlevel [!2345] + +respawn + +script + /usr/bin/docker daemon -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default.go b/vendor/github.com/docker/docker/daemon/apparmor_default.go new file mode 100644 index 0000000..09dd054 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default.go @@ -0,0 +1,36 @@ +// +build linux + +package daemon + +import ( + "fmt" + + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func ensureDefaultAppArmorProfile() error { + if apparmor.IsEnabled() { + loaded, err := aaprofile.IsLoaded(defaultApparmorProfile) + if err != nil { + return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err) + } + + // Nothing to do. + if loaded { + return nil + } + + // Load the profile. + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", defaultApparmorProfile) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go new file mode 100644 index 0000000..cd2dd97 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func ensureDefaultAppArmorProfile() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go new file mode 100644 index 0000000..1999f12 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive.go @@ -0,0 +1,436 @@ +package daemon + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return err + } + + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + uid, gid := daemon.GetRemappedUIDGID() + options := &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &archive.TarChownOptions{ + UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? + }, + } + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} + +// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container +// specified by a container object. +// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). +// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. +func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { + srcPath := src.Path() + destExists := true + destDir := false + rootUID, rootGID := daemon.GetRemappedUIDGID() + + // Work in daemon-local OS specific file paths + destPath = filepath.FromSlash(destPath) + + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(c) + if err != nil { + return err + } + defer daemon.Unmount(c) + + dest, err := c.GetResourcePath(destPath) + if err != nil { + return err + } + + // Preserve the trailing slash + // TODO: why are we appending another path separator if there was already one? + if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { + destDir = true + dest += string(os.PathSeparator) + } + + destPath = dest + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + + if src.IsDir() { + // copy as directory + if err := archiver.CopyWithTar(srcPath, destPath); err != nil { + return err + } + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) + } + if decompress && archive.IsArchivePath(srcPath) { + // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + err := archiver.UntarPath(srcPath, tarDest) + /* + if err != nil { + logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) + } + */ + return err + } + + // only needed for fixPermissions, but might as well put it before CopyFileWithTar + if destDir || (destExists && destStat.IsDir()) { + destPath = filepath.Join(destPath, src.Name()) + } + + if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { + return err + } + if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { + return err + } + + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) +} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go new file mode 100644 index 0000000..47666fe --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/archive_windows.go b/vendor/github.com/docker/docker/daemon/archive_windows.go new file mode 100644 index 0000000..b3a1045 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_windows.go @@ -0,0 +1,18 @@ +package daemon + +import "github.com/docker/docker/container" + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +// +// This is a no-op on Windows which does not support read-only volumes, or +// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + return false, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // chown is not supported on Windows + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go new file mode 100644 index 0000000..917237d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/attach.go @@ -0,0 +1,147 @@ +package daemon + +import ( + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + keys := []byte{} + var err error + if c.DetachKeys != "" { + keys, err = term.ToBytes(c.DetachKeys) + if err != nil { + return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys) + } + } + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + return errors.NewRequestConflictError(err) + } + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + var stdin io.ReadCloser + var stdout, stderr io.Writer + + if c.UseStdin { + stdin = inStream + } + if c.UseStdout { + stdout = outStream + } + if c.UseStderr { + stderr = errStream + } + + if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) +} + +func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { + if logs { + logDriver, err := daemon.getLogger(c) + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(c, "attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + + waitChan := make(chan struct{}) + if c.Config.StdinOnce && !c.Config.Tty { + go func() { + c.WaitStop(-1 * time.Second) + close(waitChan) + }() + } + + err := <-c.Attach(stdinPipe, stdout, stderr, keys) + if err != nil { + if _, ok := err.(container.DetachError); ok { + daemon.LogContainerEvent(c, "detach") + } else { + logrus.Errorf("attach failed with error: %v", err) + } + } + + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if c.Config.StdinOnce && !c.Config.Tty { + <-waitChan + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/auth.go b/vendor/github.com/docker/docker/daemon/auth.go new file mode 100644 index 0000000..f5f4d7b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/auth.go @@ -0,0 +1,13 @@ +package daemon + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" +) + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) +} diff --git a/vendor/github.com/docker/docker/daemon/bindmount_solaris.go b/vendor/github.com/docker/docker/daemon/bindmount_solaris.go new file mode 100644 index 0000000..87bf3ef --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/bindmount_solaris.go @@ -0,0 +1,5 @@ +// +build solaris + +package daemon + +const bindMountType = "lofs" diff --git a/vendor/github.com/docker/docker/daemon/bindmount_unix.go b/vendor/github.com/docker/docker/daemon/bindmount_unix.go new file mode 100644 index 0000000..3966bab --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/bindmount_unix.go @@ -0,0 +1,5 @@ +// +build linux freebsd + +package daemon + +const bindMountType = "bind" diff --git a/vendor/github.com/docker/docker/daemon/cache.go b/vendor/github.com/docker/docker/daemon/cache.go new file mode 100644 index 0000000..a2c2c13 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cache.go @@ -0,0 +1,254 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/runconfig" + "github.com/pkg/errors" +) + +// getLocalCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func (daemon *Daemon) getLocalCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) +} + +// MakeImageCache creates a stateful image cache. +func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache { + if len(sourceRefs) == 0 { + return &localImageCache{daemon} + } + + cache := &imageCache{daemon: daemon, localImageCache: &localImageCache{daemon}} + + for _, ref := range sourceRefs { + img, err := daemon.GetImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.sources = append(cache.sources, img) + } + + return cache +} + +// localImageCache is cache based on parent chain. +type localImageCache struct { + daemon *Daemon +} + +func (lic *localImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { + return getImageIDAndError(lic.daemon.getLocalCachedImage(image.ID(imgID), config)) +} + +// imageCache is cache based on history objects. Requires initial set of images. +type imageCache struct { + sources []*image.Image + daemon *Daemon + localImageCache *localImageCache +} + +func (ic *imageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { + var history []image.History + rootFS := image.NewRootFS() + lenHistory := 0 + if parent != nil { + history = parent.History + rootFS = parent.RootFS + lenHistory = len(parent.History) + } + history = append(history, target.History[lenHistory]) + if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" { + rootFS.Append(layer) + } + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: cfg, + Architecture: target.Architecture, + OS: target.OS, + Author: target.Author, + Created: history[len(history)-1].Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: target.OSFeatures, + OSVersion: target.OSVersion, + }) + if err != nil { + return "", errors.Wrap(err, "failed to marshal image config") + } + + imgID, err := ic.daemon.imageStore.Create(config) + if err != nil { + return "", errors.Wrap(err, "failed to create cache image") + } + + if parent != nil { + if err := ic.daemon.imageStore.SetParent(imgID, parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return imgID, nil +} + +func (ic *imageCache) isParent(imgID, parentID image.ID) bool { + nextParent, err := ic.daemon.imageStore.GetParent(imgID) + if err != nil { + return false + } + if nextParent == parentID { + return true + } + return ic.isParent(nextParent, parentID) +} + +func (ic *imageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { + imgID, err := ic.localImageCache.GetCache(parentID, cfg) + if err != nil { + return "", err + } + if imgID != "" { + for _, s := range ic.sources { + if ic.isParent(s.ID(), image.ID(imgID)) { + return imgID, nil + } + } + } + + var parent *image.Image + lenHistory := 0 + if parentID != "" { + parent, err = ic.daemon.imageStore.Get(image.ID(parentID)) + if err != nil { + return "", errors.Wrapf(err, "unable to find image %v", parentID) + } + lenHistory = len(parent.History) + } + + for _, target := range ic.sources { + if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { + continue + } + + if len(target.History)-1 == lenHistory { // last + if parent != nil { + if err := ic.daemon.imageStore.SetParent(target.ID(), parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return target.ID().String(), nil + } + + imgID, err := ic.restoreCachedImage(parent, target, cfg) + if err != nil { + return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) + } + + ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm + return imgID.String(), nil + } + + return "", nil +} + +func getImageIDAndError(img *image.Image, err error) (string, error) { + if img == nil || err != nil { + return "", err + } + return img.ID().String(), nil +} + +func isValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 { + return false + } + if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 { + return true + } + if len(parent.History) >= len(img.History) { + return false + } + if len(parent.RootFS.DiffIDs) >= len(img.RootFS.DiffIDs) { + return false + } + + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + for i, d := range parent.RootFS.DiffIDs { + if d != img.RootFS.DiffIDs[i] { + return false + } + } + return true +} + +func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { + layerIndex := 0 + for i, h := range image.History { + if i == index { + if h.EmptyLayer { + return "" + } + break + } + if !h.EmptyLayer { + layerIndex++ + } + } + return image.RootFS.DiffIDs[layerIndex] // validate? +} + +func isValidConfig(cfg *containertypes.Config, h image.History) bool { + // todo: make this format better than join that loses data + return strings.Join(cfg.Cmd, " ") == h.CreatedBy +} diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go new file mode 100644 index 0000000..c99485f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go new file mode 100644 index 0000000..fc8cd27 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/changes.go @@ -0,0 +1,31 @@ +package daemon + +import ( + "errors" + "runtime" + "time" + + "github.com/docker/docker/pkg/archive" +) + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" && container.IsRunning() { + return nil, errors.New("Windows does not support diff of a running container") + } + + container.Lock() + defer container.Unlock() + c, err := container.RWLayer.Changes() + if err != nil { + return nil, err + } + containerActions.WithValues("changes").UpdateSince(start) + return c, nil +} diff --git a/vendor/github.com/docker/docker/daemon/checkpoint.go b/vendor/github.com/docker/docker/daemon/checkpoint.go new file mode 100644 index 0000000..2718174 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/checkpoint.go @@ -0,0 +1,110 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/utils" +) + +var ( + validCheckpointNameChars = utils.RestrictedNameChars + validCheckpointNamePattern = utils.RestrictedNamePattern +) + +// CheckpointCreate checkpoints the process running in a container with CRIU +func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + if !validCheckpointNamePattern.MatchString(config.CheckpointID) { + return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) + } + + err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit) + if err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + daemon.LogContainerEvent(container, "checkpoint") + + return nil +} + +// CheckpointDelete deletes the specified checkpoint +func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) +} + +// CheckpointList lists all checkpoints of the specified container +func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { + var out []types.Checkpoint + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return nil, err + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt types.Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + + return out, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster.go b/vendor/github.com/docker/docker/daemon/cluster.go new file mode 100644 index 0000000..98b2aa1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster.go @@ -0,0 +1,12 @@ +package daemon + +import ( + apitypes "github.com/docker/docker/api/types" +) + +// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). +type Cluster interface { + GetNetwork(input string) (apitypes.NetworkResource, error) + GetNetworks() ([]apitypes.NetworkResource, error) + RemoveNetwork(input string) error +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/cluster.go b/vendor/github.com/docker/docker/daemon/cluster/cluster.go new file mode 100644 index 0000000..4af035b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/cluster.go @@ -0,0 +1,1973 @@ +package cluster + +import ( + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + swarmnode "github.com/docker/swarmkit/node" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const swarmDirName = "swarm" +const controlSocket = "control.sock" +const swarmConnectTimeout = 20 * time.Second +const swarmRequestTimeout = 20 * time.Second +const stateFile = "docker-state.json" +const defaultAddr = "0.0.0.0:2377" + +const ( + initialReconnectDelay = 100 * time.Millisecond + maxReconnectDelay = 30 * time.Second + contextPrefix = "com.docker.swarm" +) + +// ErrNoSwarm is returned on leaving a cluster that was never initialized +var ErrNoSwarm = fmt.Errorf("This node is not part of a swarm") + +// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated +var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") + +// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet. +var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.") + +// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. +var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") + +// ErrSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. +var ErrSwarmLocked = fmt.Errorf("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.") + +// ErrSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. +var ErrSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.") + +// NetworkSubnetsProvider exposes functions for retrieving the subnets +// of networks managed by Docker, so they can be filtered. +type NetworkSubnetsProvider interface { + V4Subnets() []net.IPNet + V6Subnets() []net.IPNet +} + +// Config provides values for Cluster. +type Config struct { + Root string + Name string + Backend executorpkg.Backend + NetworkSubnetsProvider NetworkSubnetsProvider + + // DefaultAdvertiseAddr is the default host/IP or network interface to use + // if no AdvertiseAddr value is specified. + DefaultAdvertiseAddr string + + // path to store runtime state, such as the swarm control socket + RuntimeRoot string +} + +// Cluster provides capabilities to participate in a cluster as a worker or a +// manager. +type Cluster struct { + sync.RWMutex + *node + root string + runtimeRoot string + config Config + configEvent chan struct{} // todo: make this array and goroutine safe + actualLocalAddr string // after resolution, not persisted + stop bool + err error + cancelDelay func() + attachers map[string]*attacher + locked bool + lastNodeConfig *nodeStartConfig +} + +// attacher manages the in-memory attachment state of a container +// attachment to a global scope network managed by swarm manager. It +// helps in identifying the attachment ID via the taskID and the +// corresponding attachment configuration obtained from the manager. +type attacher struct { + taskID string + config *network.NetworkingConfig + attachWaitCh chan *network.NetworkingConfig + attachCompleteCh chan struct{} + detachWaitCh chan struct{} +} + +type node struct { + *swarmnode.Node + done chan struct{} + ready bool + conn *grpc.ClientConn + client swarmapi.ControlClient + logs swarmapi.LogsClient + reconnectDelay time.Duration + config nodeStartConfig +} + +// nodeStartConfig holds configuration needed to start a new node. Exported +// fields of this structure are saved to disk in json. Unexported fields +// contain data that shouldn't be persisted between daemon reloads. +type nodeStartConfig struct { + // LocalAddr is this machine's local IP or hostname, if specified. + LocalAddr string + // RemoteAddr is the address that was given to "swarm join". It is used + // to find LocalAddr if necessary. + RemoteAddr string + // ListenAddr is the address we bind to, including a port. + ListenAddr string + // AdvertiseAddr is the address other nodes should connect to, + // including a port. + AdvertiseAddr string + joinAddr string + forceNewCluster bool + joinToken string + lockKey []byte + autolock bool +} + +// New creates a new Cluster instance using provided config. +func New(config Config) (*Cluster, error) { + root := filepath.Join(config.Root, swarmDirName) + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if config.RuntimeRoot == "" { + config.RuntimeRoot = root + } + if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { + return nil, err + } + c := &Cluster{ + root: root, + config: config, + configEvent: make(chan struct{}, 10), + runtimeRoot: config.RuntimeRoot, + attachers: make(map[string]*attacher), + } + + nodeConfig, err := c.loadState() + if err != nil { + if os.IsNotExist(err) { + return c, nil + } + return nil, err + } + + n, err := c.startNewNode(*nodeConfig) + if err != nil { + return nil, err + } + + select { + case <-time.After(swarmConnectTimeout): + logrus.Error("swarm component could not be started before timeout was reached") + case <-n.Ready(): + case <-n.done: + if errors.Cause(c.err) == ErrSwarmLocked { + return c, nil + } + if err, ok := errors.Cause(c.err).(x509.CertificateInvalidError); ok && err.Reason == x509.Expired { + c.err = ErrSwarmCertificatesExpired + return c, nil + } + return nil, fmt.Errorf("swarm component could not be started: %v", c.err) + } + go c.reconnectOnFailure(n) + return c, nil +} + +func (c *Cluster) loadState() (*nodeStartConfig, error) { + dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile)) + if err != nil { + return nil, err + } + // missing certificate means no actual state to restore from + if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil { + if os.IsNotExist(err) { + c.clearState() + } + return nil, err + } + var st nodeStartConfig + if err := json.Unmarshal(dt, &st); err != nil { + return nil, err + } + return &st, nil +} + +func (c *Cluster) saveState(config nodeStartConfig) error { + dt, err := json.Marshal(config) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600) +} + +func (c *Cluster) reconnectOnFailure(n *node) { + for { + <-n.done + c.Lock() + if c.stop || c.node != nil { + c.Unlock() + return + } + n.reconnectDelay *= 2 + if n.reconnectDelay > maxReconnectDelay { + n.reconnectDelay = maxReconnectDelay + } + logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) + c.cancelDelay = cancel + c.Unlock() + <-delayCtx.Done() + if delayCtx.Err() != context.DeadlineExceeded { + return + } + c.Lock() + if c.node != nil { + c.Unlock() + return + } + var err error + config := n.config + config.RemoteAddr = c.getRemoteAddress() + config.joinAddr = config.RemoteAddr + n, err = c.startNewNode(config) + if err != nil { + c.err = err + close(n.done) + } + c.Unlock() + } +} + +func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) { + if err := c.config.Backend.IsSwarmCompatible(); err != nil { + return nil, err + } + + actualLocalAddr := conf.LocalAddr + if actualLocalAddr == "" { + // If localAddr was not specified, resolve it automatically + // based on the route to joinAddr. localAddr can only be left + // empty on "join". + listenHost, _, err := net.SplitHostPort(conf.ListenAddr) + if err != nil { + return nil, fmt.Errorf("could not parse listen address: %v", err) + } + + listenAddrIP := net.ParseIP(listenHost) + if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { + actualLocalAddr = listenHost + } else { + if conf.RemoteAddr == "" { + // Should never happen except using swarms created by + // old versions that didn't save remoteAddr. + conf.RemoteAddr = "8.8.8.8:53" + } + conn, err := net.Dial("udp", conf.RemoteAddr) + if err != nil { + return nil, fmt.Errorf("could not find local IP address: %v", err) + } + localHostPort := conn.LocalAddr().String() + actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) + conn.Close() + } + } + + var control string + if runtime.GOOS == "windows" { + control = `\\.\pipe\` + controlSocket + } else { + control = filepath.Join(c.runtimeRoot, controlSocket) + } + + c.node = nil + c.cancelDelay = nil + c.stop = false + n, err := swarmnode.New(&swarmnode.Config{ + Hostname: c.config.Name, + ForceNewCluster: conf.forceNewCluster, + ListenControlAPI: control, + ListenRemoteAPI: conf.ListenAddr, + AdvertiseRemoteAPI: conf.AdvertiseAddr, + JoinAddr: conf.joinAddr, + StateDir: c.root, + JoinToken: conf.joinToken, + Executor: container.NewExecutor(c.config.Backend), + HeartbeatTick: 1, + ElectionTick: 3, + UnlockKey: conf.lockKey, + AutoLockManagers: conf.autolock, + PluginGetter: c.config.Backend.PluginGetter(), + }) + + if err != nil { + return nil, err + } + ctx := context.Background() + if err := n.Start(ctx); err != nil { + return nil, err + } + node := &node{ + Node: n, + done: make(chan struct{}), + reconnectDelay: initialReconnectDelay, + config: conf, + } + c.node = node + c.actualLocalAddr = actualLocalAddr // not saved + c.saveState(conf) + + c.config.Backend.DaemonJoinsCluster(c) + go func() { + err := detectLockedError(n.Err(ctx)) + if err != nil { + logrus.Errorf("cluster exited with error: %v", err) + } + c.Lock() + c.node = nil + c.err = err + if errors.Cause(err) == ErrSwarmLocked { + c.locked = true + confClone := conf + c.lastNodeConfig = &confClone + } + c.Unlock() + close(node.done) + }() + + go func() { + select { + case <-n.Ready(): + c.Lock() + node.ready = true + c.err = nil + c.Unlock() + case <-ctx.Done(): + } + c.configEvent <- struct{}{} + }() + + go func() { + for conn := range n.ListenControlSocket(ctx) { + c.Lock() + if node.conn != conn { + if conn == nil { + node.client = nil + node.logs = nil + } else { + node.client = swarmapi.NewControlClient(conn) + node.logs = swarmapi.NewLogsClient(conn) + } + } + node.conn = conn + c.Unlock() + c.configEvent <- struct{}{} + } + }() + + return node, nil +} + +// Init initializes new cluster from user provided request. +func (c *Cluster) Init(req types.InitRequest) (string, error) { + c.Lock() + if c.swarmExists() { + if !req.ForceNewCluster { + c.Unlock() + return "", ErrSwarmExists + } + if err := c.stopNode(); err != nil { + c.Unlock() + return "", err + } + } + + if err := validateAndSanitizeInitRequest(&req); err != nil { + c.Unlock() + return "", err + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + c.Unlock() + return "", err + } + + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + if err != nil { + c.Unlock() + return "", err + } + + localAddr := listenHost + + // If the local address is undetermined, the advertise address + // will be used as local address, if it belongs to this system. + // If the advertise address is not local, then we try to find + // a system address to use as local address. If this fails, + // we give up and ask user to pass the listen address. + if net.ParseIP(localAddr).IsUnspecified() { + advertiseIP := net.ParseIP(advertiseHost) + + found := false + for _, systemIP := range listSystemIPs() { + if systemIP.Equal(advertiseIP) { + localAddr = advertiseIP.String() + found = true + break + } + } + + if !found { + ip, err := c.resolveSystemAddr() + if err != nil { + c.Unlock() + logrus.Warnf("Could not find a local address: %v", err) + return "", errMustSpecifyListenAddr + } + localAddr = ip.String() + } + } + + // todo: check current state existing + n, err := c.startNewNode(nodeStartConfig{ + forceNewCluster: req.ForceNewCluster, + autolock: req.AutoLockManagers, + LocalAddr: localAddr, + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + }) + if err != nil { + c.Unlock() + return "", err + } + c.Unlock() + + select { + case <-n.Ready(): + if err := initClusterSpec(n, req.Spec); err != nil { + return "", err + } + go c.reconnectOnFailure(n) + return n.NodeID(), nil + case <-n.done: + c.RLock() + defer c.RUnlock() + if !req.ForceNewCluster { // if failure on first attempt don't keep state + if err := c.clearState(); err != nil { + return "", err + } + } + return "", c.err + } +} + +// Join makes current Cluster part of an existing swarm cluster. +func (c *Cluster) Join(req types.JoinRequest) error { + c.Lock() + if c.swarmExists() { + c.Unlock() + return ErrSwarmExists + } + if err := validateAndSanitizeJoinRequest(&req); err != nil { + c.Unlock() + return err + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + c.Unlock() + return err + } + + var advertiseAddr string + if req.AdvertiseAddr != "" { + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + // For joining, we don't need to provide an advertise address, + // since the remote side can detect it. + if err == nil { + advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) + } + } + + // todo: check current state existing + n, err := c.startNewNode(nodeStartConfig{ + RemoteAddr: req.RemoteAddrs[0], + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: advertiseAddr, + joinAddr: req.RemoteAddrs[0], + joinToken: req.JoinToken, + }) + if err != nil { + c.Unlock() + return err + } + c.Unlock() + + select { + case <-time.After(swarmConnectTimeout): + // attempt to connect will continue in background, but reconnect only if it didn't fail + go func() { + select { + case <-n.Ready(): + c.reconnectOnFailure(n) + case <-n.done: + logrus.Errorf("failed to join the cluster: %+v", c.err) + } + }() + return ErrSwarmJoinTimeoutReached + case <-n.Ready(): + go c.reconnectOnFailure(n) + return nil + case <-n.done: + c.RLock() + defer c.RUnlock() + return c.err + } +} + +// GetUnlockKey returns the unlock key for the swarm. +func (c *Cluster) GetUnlockKey() (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + client := swarmapi.NewCAClient(c.conn) + + r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) + if err != nil { + return "", err + } + + if len(r.UnlockKey) == 0 { + // no key + return "", nil + } + + return encryption.HumanReadableKey(r.UnlockKey), nil +} + +// UnlockSwarm provides a key to decrypt data that is encrypted at rest. +func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { + c.RLock() + if !c.isActiveManager() { + if err := c.errNoManager(); err != ErrSwarmLocked { + c.RUnlock() + return err + } + } + + if c.node != nil || c.locked != true { + c.RUnlock() + return errors.New("swarm is not locked") + } + c.RUnlock() + + key, err := encryption.ParseHumanReadableKey(req.UnlockKey) + if err != nil { + return err + } + + c.Lock() + config := *c.lastNodeConfig + config.lockKey = key + n, err := c.startNewNode(config) + if err != nil { + c.Unlock() + return err + } + c.Unlock() + select { + case <-n.Ready(): + case <-n.done: + if errors.Cause(c.err) == ErrSwarmLocked { + return errors.New("swarm could not be unlocked: invalid key provided") + } + return fmt.Errorf("swarm component could not be started: %v", c.err) + } + go c.reconnectOnFailure(n) + return nil +} + +// stopNode is a helper that stops the active c.node and waits until it has +// shut down. Call while keeping the cluster lock. +func (c *Cluster) stopNode() error { + if c.node == nil { + return nil + } + c.stop = true + if c.cancelDelay != nil { + c.cancelDelay() + c.cancelDelay = nil + } + node := c.node + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + // TODO: can't hold lock on stop because it calls back to network + c.Unlock() + defer c.Lock() + if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { + return err + } + <-node.done + return nil +} + +func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { + return reachable-2 <= unreachable +} + +func isLastManager(reachable, unreachable int) bool { + return reachable == 1 && unreachable == 0 +} + +// Leave shuts down Cluster and removes current state. +func (c *Cluster) Leave(force bool) error { + c.Lock() + node := c.node + if node == nil { + if c.locked { + c.locked = false + c.lastNodeConfig = nil + c.Unlock() + } else if c.err == ErrSwarmCertificatesExpired { + c.err = nil + c.Unlock() + } else { + c.Unlock() + return ErrNoSwarm + } + } else { + if node.Manager() != nil && !force { + msg := "You are attempting to leave the swarm on a node that is participating as a manager. " + if c.isActiveManager() { + active, reachable, unreachable, err := c.managerStats() + if err == nil { + if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { + if isLastManager(reachable, unreachable) { + msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " + c.Unlock() + return fmt.Errorf(msg) + } + msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) + } + } + } else { + msg += "Doing so may lose the consensus of your cluster. " + } + + msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." + c.Unlock() + return fmt.Errorf(msg) + } + if err := c.stopNode(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + c.Unlock() + return err + } + c.Unlock() + if nodeID := node.NodeID(); nodeID != "" { + nodeContainers, err := c.listContainerForNode(nodeID) + if err != nil { + return err + } + for _, id := range nodeContainers { + if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("error removing %v: %v", id, err) + } + } + } + } + c.configEvent <- struct{}{} + // todo: cleanup optional? + if err := c.clearState(); err != nil { + return err + } + + return nil +} + +func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { + var ids []string + filters := filters.NewArgs() + filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) + containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + Filters: filters, + }) + if err != nil { + return []string{}, err + } + for _, c := range containers { + ids = append(ids, c.ID) + } + return ids, nil +} + +func (c *Cluster) clearState() error { + // todo: backup this data instead of removing? + if err := os.RemoveAll(c.root); err != nil { + return err + } + if err := os.MkdirAll(c.root, 0700); err != nil { + return err + } + c.config.Backend.DaemonLeavesCluster() + return nil +} + +func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost + return context.WithTimeout(context.Background(), swarmRequestTimeout) +} + +// Inspect retrieves the configuration properties of a managed swarm cluster. +func (c *Cluster) Inspect() (types.Swarm, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Swarm{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + swarm, err := getSwarm(ctx, c.client) + if err != nil { + return types.Swarm{}, err + } + + return convert.SwarmFromGRPC(*swarm), nil +} + +// Update updates configuration of a managed swarm cluster. +func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + swarm, err := getSwarm(ctx, c.client) + if err != nil { + return err + } + + // In update, client should provide the complete spec of the swarm, including + // Name and Labels. If a field is specified with 0 or nil, then the default value + // will be used to swarmkit. + clusterSpec, err := convert.SwarmSpecToGRPC(spec) + if err != nil { + return err + } + + _, err = c.client.UpdateCluster( + ctx, + &swarmapi.UpdateClusterRequest{ + ClusterID: swarm.ID, + Spec: &clusterSpec, + ClusterVersion: &swarmapi.Version{ + Index: version, + }, + Rotation: swarmapi.KeyRotation{ + WorkerJoinToken: flags.RotateWorkerToken, + ManagerJoinToken: flags.RotateManagerToken, + ManagerUnlockKey: flags.RotateManagerUnlockKey, + }, + }, + ) + return err +} + +// IsManager returns true if Cluster is participating as a manager. +func (c *Cluster) IsManager() bool { + c.RLock() + defer c.RUnlock() + return c.isActiveManager() +} + +// IsAgent returns true if Cluster is participating as a worker/agent. +func (c *Cluster) IsAgent() bool { + c.RLock() + defer c.RUnlock() + return c.node != nil && c.ready +} + +// GetLocalAddress returns the local address. +func (c *Cluster) GetLocalAddress() string { + c.RLock() + defer c.RUnlock() + return c.actualLocalAddr +} + +// GetListenAddress returns the listen address. +func (c *Cluster) GetListenAddress() string { + c.RLock() + defer c.RUnlock() + if c.node != nil { + return c.node.config.ListenAddr + } + return "" +} + +// GetAdvertiseAddress returns the remotely reachable address of this node. +func (c *Cluster) GetAdvertiseAddress() string { + c.RLock() + defer c.RUnlock() + if c.node != nil && c.node.config.AdvertiseAddr != "" { + advertiseHost, _, _ := net.SplitHostPort(c.node.config.AdvertiseAddr) + return advertiseHost + } + return c.actualLocalAddr +} + +// GetRemoteAddress returns a known advertise address of a remote manager if +// available. +// todo: change to array/connect with info +func (c *Cluster) GetRemoteAddress() string { + c.RLock() + defer c.RUnlock() + return c.getRemoteAddress() +} + +func (c *Cluster) getRemoteAddress() string { + if c.node == nil { + return "" + } + nodeID := c.node.NodeID() + for _, r := range c.node.Remotes() { + if r.NodeID != nodeID { + return r.Addr + } + } + return "" +} + +// ListenClusterEvents returns a channel that receives messages on cluster +// participation changes. +// todo: make cancelable and accessible to multiple callers +func (c *Cluster) ListenClusterEvents() <-chan struct{} { + return c.configEvent +} + +// Info returns information about the current cluster state. +func (c *Cluster) Info() types.Info { + info := types.Info{ + NodeAddr: c.GetAdvertiseAddress(), + } + + c.RLock() + defer c.RUnlock() + + if c.node == nil { + info.LocalNodeState = types.LocalNodeStateInactive + if c.cancelDelay != nil { + info.LocalNodeState = types.LocalNodeStateError + } + if c.locked { + info.LocalNodeState = types.LocalNodeStateLocked + } else if c.err == ErrSwarmCertificatesExpired { + info.LocalNodeState = types.LocalNodeStateError + } + } else { + info.LocalNodeState = types.LocalNodeStatePending + if c.ready == true { + info.LocalNodeState = types.LocalNodeStateActive + } else if c.locked { + info.LocalNodeState = types.LocalNodeStateLocked + } + } + if c.err != nil { + info.Error = c.err.Error() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + if c.isActiveManager() { + info.ControlAvailable = true + swarm, err := c.Inspect() + if err != nil { + info.Error = err.Error() + } + + // Strip JoinTokens + info.Cluster = swarm.ClusterInfo + + if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil { + info.Nodes = len(r.Nodes) + for _, n := range r.Nodes { + if n.ManagerStatus != nil { + info.Managers = info.Managers + 1 + } + } + } + } + + if c.node != nil { + for _, r := range c.node.Remotes() { + info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) + } + info.NodeID = c.node.NodeID() + } + + return info +} + +// isActiveManager should not be called without a read lock +func (c *Cluster) isActiveManager() bool { + return c.node != nil && c.conn != nil +} + +// swarmExists should not be called without a read lock +func (c *Cluster) swarmExists() bool { + return c.node != nil || c.locked || c.err == ErrSwarmCertificatesExpired +} + +// errNoManager returns error describing why manager commands can't be used. +// Call with read lock. +func (c *Cluster) errNoManager() error { + if c.node == nil { + if c.locked { + return ErrSwarmLocked + } + if c.err == ErrSwarmCertificatesExpired { + return ErrSwarmCertificatesExpired + } + return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + if c.node.Manager() != nil { + return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") + } + return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") +} + +// GetServices returns all services of a managed swarm cluster. +func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListServicesFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListServices( + ctx, + &swarmapi.ListServicesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + services := []types.Service{} + + for _, service := range r.Services { + services = append(services, convert.ServiceFromGRPC(*service)) + } + + return services, nil +} + +// imageWithDigestString takes an image such as name or name:tag +// and returns the image pinned to a digest, such as name@sha256:34234... +// Due to the difference between the docker/docker/reference, and the +// docker/distribution/reference packages, we're parsing the image twice. +// As the two packages converge, this function should be simplified. +// TODO(nishanttotla): After the packages converge, the function must +// convert distreference.Named -> distreference.Canonical, and the logic simplified. +func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { + if _, err := digest.ParseDigest(image); err == nil { + return "", errors.New("image reference is an image ID") + } + ref, err := distreference.ParseNamed(image) + if err != nil { + return "", err + } + // only query registry if not a canonical reference (i.e. with digest) + if _, ok := ref.(distreference.Canonical); !ok { + // create a docker/docker/reference Named object because GetRepository needs it + dockerRef, err := reference.ParseNamed(image) + if err != nil { + return "", err + } + dockerRef = reference.WithDefaultTag(dockerRef) + namedTaggedRef, ok := dockerRef.(reference.NamedTagged) + if !ok { + return "", fmt.Errorf("unable to cast image to NamedTagged reference object") + } + + repo, _, err := c.config.Backend.GetRepository(ctx, namedTaggedRef, authConfig) + if err != nil { + return "", err + } + dscrptr, err := repo.Tags(ctx).Get(ctx, namedTaggedRef.Tag()) + if err != nil { + return "", err + } + + namedDigestedRef, err := distreference.WithDigest(distreference.EnsureTagged(ref), dscrptr.Digest) + if err != nil { + return "", err + } + return namedDigestedRef.String(), nil + } + // reference already contains a digest, so just return it + return ref.String(), nil +} + +// CreateService creates a new service in a managed swarm cluster. +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + err := c.populateNetworkID(ctx, c.client, &s) + if err != nil { + return nil, err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(s) + if err != nil { + return nil, err + } + + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp := &apitypes.ServiceCreateResponse{} + + // pin image by digest + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())) + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + } + } + + r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return nil, err + } + + resp.ID = r.Service.ID + return resp, nil +} + +// GetService returns a service based on an ID or name. +func (c *Cluster) GetService(input string) (types.Service, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Service{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + service, err := getService(ctx, c.client, input) + if err != nil { + return types.Service{}, err + } + return convert.ServiceFromGRPC(*service), nil +} + +// UpdateService updates existing service to match new properties. +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) (*apitypes.ServiceUpdateResponse, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + err := c.populateNetworkID(ctx, c.client, &spec) + if err != nil { + return nil, err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(spec) + if err != nil { + return nil, err + } + + currentService, err := getService(ctx, c.client, serviceIDOrName) + if err != nil { + return nil, err + } + + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch registryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return nil, fmt.Errorf("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return nil, fmt.Errorf("unsupported registryAuthFromValue") + } + if ctnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp := &apitypes.ServiceUpdateResponse{} + + // pin image by digest + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + } + + _, err = c.client.UpdateService( + ctx, + &swarmapi.UpdateServiceRequest{ + ServiceID: currentService.ID, + Spec: &serviceSpec, + ServiceVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + + return resp, err +} + +// RemoveService removes a service from a managed swarm cluster. +func (c *Cluster) RemoveService(input string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + service, err := getService(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil { + return err + } + return nil +} + +// ServiceLogs collects service logs and writes them back to `config.OutStream` +func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error { + c.RLock() + if !c.isActiveManager() { + c.RUnlock() + return c.errNoManager() + } + + service, err := getService(ctx, c.client, input) + if err != nil { + c.RUnlock() + return err + } + + stream, err := c.logs.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ + Selector: &swarmapi.LogSelector{ + ServiceIDs: []string{service.ID}, + }, + Options: &swarmapi.LogSubscriptionOptions{ + Follow: config.Follow, + }, + }) + if err != nil { + c.RUnlock() + return err + } + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) + errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) + + // Release the lock before starting the stream. + c.RUnlock() + for { + // Check the context before doing anything. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + subscribeMsg, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + for _, msg := range subscribeMsg.Messages { + data := []byte{} + + if config.Timestamps { + ts, err := ptypes.Timestamp(msg.Timestamp) + if err != nil { + return err + } + data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...) + } + + data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ", + contextPrefix, msg.Context.NodeID, + contextPrefix, msg.Context.ServiceID, + contextPrefix, msg.Context.TaskID, + ))...) + + data = append(data, msg.Data...) + + switch msg.Stream { + case swarmapi.LogStreamStdout: + outStream.Write(data) + case swarmapi.LogStreamStderr: + errStream.Write(data) + } + } + } +} + +// GetNodes returns a list of all nodes known to a cluster. +func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListNodesFilters(options.Filters) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListNodes( + ctx, + &swarmapi.ListNodesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + nodes := []types.Node{} + + for _, node := range r.Nodes { + nodes = append(nodes, convert.NodeFromGRPC(*node)) + } + return nodes, nil +} + +// GetNode returns a node based on an ID or name. +func (c *Cluster) GetNode(input string) (types.Node, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Node{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + node, err := getNode(ctx, c.client, input) + if err != nil { + return types.Node{}, err + } + return convert.NodeFromGRPC(*node), nil +} + +// UpdateNode updates existing nodes properties. +func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + nodeSpec, err := convert.NodeSpecToGRPC(spec) + if err != nil { + return err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + currentNode, err := getNode(ctx, c.client, input) + if err != nil { + return err + } + + _, err = c.client.UpdateNode( + ctx, + &swarmapi.UpdateNodeRequest{ + NodeID: currentNode.ID, + Spec: &nodeSpec, + NodeVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + return err +} + +// RemoveNode removes a node from a cluster +func (c *Cluster) RemoveNode(input string, force bool) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + node, err := getNode(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}); err != nil { + return err + } + return nil +} + +// GetTasks returns a list of tasks matching the filter options. +func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + byName := func(filter filters.Args) error { + if filter.Include("service") { + serviceFilters := filter.Get("service") + for _, serviceFilter := range serviceFilters { + service, err := c.GetService(serviceFilter) + if err != nil { + return err + } + filter.Del("service", serviceFilter) + filter.Add("service", service.ID) + } + } + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + node, err := c.GetNode(nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", node.ID) + } + } + return nil + } + + filters, err := newListTasksFilters(options.Filters, byName) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListTasks( + ctx, + &swarmapi.ListTasksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + tasks := []types.Task{} + + for _, task := range r.Tasks { + if task.Spec.GetContainer() != nil { + tasks = append(tasks, convert.TaskFromGRPC(*task)) + } + } + return tasks, nil +} + +// GetTask returns a task by an ID. +func (c *Cluster) GetTask(input string) (types.Task, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Task{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + task, err := getTask(ctx, c.client, input) + if err != nil { + return types.Task{}, err + } + return convert.TaskFromGRPC(*task), nil +} + +// GetNetwork returns a cluster network by an ID. +func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return apitypes.NetworkResource{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + network, err := getNetwork(ctx, c.client, input) + if err != nil { + return apitypes.NetworkResource{}, err + } + return convert.BasicNetworkFromGRPC(*network), nil +} + +func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + var networks []apitypes.NetworkResource + + for _, network := range r.Networks { + networks = append(networks, convert.BasicNetworkFromGRPC(*network)) + } + + return networks, nil +} + +// GetNetworks returns all current cluster managed networks. +func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { + return c.getNetworks(nil) +} + +// GetNetworksByName returns cluster managed networks by name. +// It is ok to have multiple networks here. #18864 +func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { + // Note that swarmapi.GetNetworkRequest.Name is not functional. + // So we cannot just use that with c.GetNetwork. + return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ + Names: []string{name}, + }) +} + +func attacherKey(target, containerID string) string { + return containerID + ":" + target +} + +// UpdateAttachment signals the attachment config to the attachment +// waiter who is trying to start or attach the container to the +// network. +func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { + c.RLock() + attacher, ok := c.attachers[attacherKey(target, containerID)] + c.RUnlock() + if !ok || attacher == nil { + return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) + } + + attacher.attachWaitCh <- config + close(attacher.attachWaitCh) + return nil +} + +// WaitForDetachment waits for the container to stop or detach from +// the network. +func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + c.RLock() + attacher, ok := c.attachers[attacherKey(networkName, containerID)] + if !ok { + attacher, ok = c.attachers[attacherKey(networkID, containerID)] + } + if c.node == nil || c.node.Agent() == nil { + c.RUnlock() + return fmt.Errorf("invalid cluster node while waiting for detachment") + } + + agent := c.node.Agent() + c.RUnlock() + + if ok && attacher != nil && + attacher.detachWaitCh != nil && + attacher.attachCompleteCh != nil { + // Attachment may be in progress still so wait for + // attachment to complete. + select { + case <-attacher.attachCompleteCh: + case <-ctx.Done(): + return ctx.Err() + } + + if attacher.taskID == taskID { + select { + case <-attacher.detachWaitCh: + case <-ctx.Done(): + return ctx.Err() + } + } + } + + return agent.ResourceAllocator().DetachNetwork(ctx, taskID) +} + +// AttachNetwork generates an attachment request towards the manager. +func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { + aKey := attacherKey(target, containerID) + c.Lock() + if c.node == nil || c.node.Agent() == nil { + c.Unlock() + return nil, fmt.Errorf("invalid cluster node while attaching to network") + } + if attacher, ok := c.attachers[aKey]; ok { + c.Unlock() + return attacher.config, nil + } + + agent := c.node.Agent() + attachWaitCh := make(chan *network.NetworkingConfig) + detachWaitCh := make(chan struct{}) + attachCompleteCh := make(chan struct{}) + c.attachers[aKey] = &attacher{ + attachWaitCh: attachWaitCh, + attachCompleteCh: attachCompleteCh, + detachWaitCh: detachWaitCh, + } + c.Unlock() + + ctx, cancel := c.getRequestContext() + defer cancel() + + taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) + if err != nil { + c.Lock() + delete(c.attachers, aKey) + c.Unlock() + return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) + } + + c.Lock() + c.attachers[aKey].taskID = taskID + close(attachCompleteCh) + c.Unlock() + + logrus.Debugf("Successfully attached to network %s with tid %s", target, taskID) + + var config *network.NetworkingConfig + select { + case config = <-attachWaitCh: + case <-ctx.Done(): + return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) + } + + c.Lock() + c.attachers[aKey].config = config + c.Unlock() + return config, nil +} + +// DetachNetwork unblocks the waiters waiting on WaitForDetachment so +// that a request to detach can be generated towards the manager. +func (c *Cluster) DetachNetwork(target string, containerID string) error { + aKey := attacherKey(target, containerID) + + c.Lock() + attacher, ok := c.attachers[aKey] + delete(c.attachers, aKey) + c.Unlock() + + if !ok { + return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) + } + + close(attacher.detachWaitCh) + return nil +} + +// CreateNetwork creates a new cluster managed network. +func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + if runconfig.IsPreDefinedNetwork(s.Name) { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) + return "", apierrors.NewRequestForbiddenError(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + networkSpec := convert.BasicNetworkCreateToGRPC(s) + r, err := c.client.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) + if err != nil { + return "", err + } + + return r.Network.ID, nil +} + +// RemoveNetwork removes a cluster network. +func (c *Cluster) RemoveNetwork(input string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + network, err := getNetwork(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil { + return err + } + return nil +} + +func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { + // Always prefer NetworkAttachmentConfigs from TaskTemplate + // but fallback to service spec for backward compatibility + networks := s.TaskTemplate.Networks + if len(networks) == 0 { + networks = s.Networks + } + + for i, n := range networks { + apiNetwork, err := getNetwork(ctx, client, n.Target) + if err != nil { + if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() { + err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) + return apierrors.NewRequestForbiddenError(err) + } + return err + } + networks[i].Target = apiNetwork.ID + } + return nil +} + +func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { + // GetNetwork to match via full ID. + rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}) + if err != nil { + // If any error (including NotFound), ListNetworks to match via ID prefix and full name. + rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}}) + if err != nil || len(rl.Networks) == 0 { + rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) + } + + return rl.Networks[0], nil + } + return rg.Network, nil +} + +// Cleanup stops active swarm node. This is run before daemon shutdown. +func (c *Cluster) Cleanup() { + c.Lock() + node := c.node + if node == nil { + c.Unlock() + return + } + defer c.Unlock() + if c.isActiveManager() { + active, reachable, unreachable, err := c.managerStats() + if err == nil { + singlenode := active && isLastManager(reachable, unreachable) + if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { + logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) + } + } + } + c.stopNode() +} + +func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) + if err != nil { + return false, 0, 0, err + } + for _, n := range nodes.Nodes { + if n.ManagerStatus != nil { + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { + reachable++ + if n.ID == c.node.NodeID() { + current = true + } + } + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { + unreachable++ + } + } + } + return +} + +func validateAndSanitizeInitRequest(req *types.InitRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + + if req.Spec.Annotations.Name == "" { + req.Spec.Annotations.Name = "default" + } else if req.Spec.Annotations.Name != "default" { + return errors.New(`swarm spec must be named "default"`) + } + + return nil +} + +func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + if len(req.RemoteAddrs) == 0 { + return fmt.Errorf("at least 1 RemoteAddr is required to join") + } + for i := range req.RemoteAddrs { + req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) + if err != nil { + return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) + } + } + return nil +} + +func validateAddr(addr string) (string, error) { + if addr == "" { + return addr, fmt.Errorf("invalid empty address") + } + newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) + if err != nil { + return addr, nil + } + return strings.TrimPrefix(newaddr, "tcp://"), nil +} + +func initClusterSpec(node *node, spec types.Spec) error { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + for conn := range node.ListenControlSocket(ctx) { + if ctx.Err() != nil { + return ctx.Err() + } + if conn != nil { + client := swarmapi.NewControlClient(conn) + var cluster *swarmapi.Cluster + for i := 0; ; i++ { + lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return fmt.Errorf("error on listing clusters: %v", err) + } + if len(lcr.Clusters) == 0 { + if i < 10 { + time.Sleep(200 * time.Millisecond) + continue + } + return fmt.Errorf("empty list of clusters was returned") + } + cluster = lcr.Clusters[0] + break + } + // In init, we take the initial default values from swarmkit, and merge + // any non nil or 0 value from spec to GRPC spec. This will leave the + // default value alone. + // Note that this is different from Update(), as in Update() we expect + // user to specify the complete spec of the cluster (as they already know + // the existing one and knows which field to update) + clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: &clusterSpec, + }) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + return nil + } + } + return ctx.Err() +} + +func detectLockedError(err error) error { + if err == swarmnode.ErrInvalidUnlockKey { + return errors.WithStack(ErrSwarmLocked) + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/container.go b/vendor/github.com/docker/docker/daemon/cluster/convert/container.go new file mode 100644 index 0000000..10383f7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/container.go @@ -0,0 +1,235 @@ +package convert + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + container "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { + containerSpec := types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &types.DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + // Mounts + for _, m := range c.Mounts { + mount := mounttypes.Mount{ + Target: m.Target, + Source: m.Source, + Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), + ReadOnly: m.ReadOnly, + } + + if m.BindOptions != nil { + mount.BindOptions = &mounttypes.BindOptions{ + Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &mounttypes.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.StopGracePeriod != nil { + grace, _ := ptypes.Duration(c.StopGracePeriod) + containerSpec.StopGracePeriod = &grace + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) + } + + return containerSpec +} + +func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { + refs := make([]*swarmapi.SecretReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.SecretReference{ + SecretID: s.SecretID, + SecretName: s.SecretName, + } + if s.File != nil { + ref.Target = &swarmapi.SecretReference_File{ + File: &swarmapi.SecretReference_FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} +func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { + refs := make([]*types.SecretReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + continue + } + refs = append(refs, &types.SecretReference{ + File: &types.SecretReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + SecretID: s.SecretID, + SecretName: s.SecretName, + }) + } + + return refs +} + +func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { + containerSpec := &swarmapi.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + if c.StopGracePeriod != nil { + containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod) + } + + // Mounts + for _, m := range c.Mounts { + mount := swarmapi.Mount{ + Target: m.Target, + Source: m.Source, + ReadOnly: m.ReadOnly, + } + + if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { + mount.Type = swarmapi.Mount_MountType(mountType) + } else if string(m.Type) != "" { + return nil, fmt.Errorf("invalid MountType: %q", m.Type) + } + + if m.BindOptions != nil { + if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { + mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} + } else if string(m.BindOptions.Propagation) != "" { + return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) + } + + return containerSpec, nil +} + +func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { + interval, _ := ptypes.Duration(h.Interval) + timeout, _ := ptypes.Duration(h.Timeout) + return &container.HealthConfig{ + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + } +} + +func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { + return &swarmapi.HealthConfig{ + Test: h.Test, + Interval: ptypes.DurationProto(h.Interval), + Timeout: ptypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/network.go b/vendor/github.com/docker/docker/daemon/cluster/convert/network.go new file mode 100644 index 0000000..4d21b4d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/network.go @@ -0,0 +1,210 @@ +package convert + +import ( + "strings" + + basictypes "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { + if na != nil { + return types.NetworkAttachment{ + Network: networkFromGRPC(na.Network), + Addresses: na.Addresses, + } + } + return types.NetworkAttachment{} +} + +func networkFromGRPC(n *swarmapi.Network) types.Network { + if n != nil { + network := types.Network{ + ID: n.ID, + Spec: types.NetworkSpec{ + IPv6Enabled: n.Spec.Ipv6Enabled, + Internal: n.Spec.Internal, + Attachable: n.Spec.Attachable, + IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + }, + IPAMOptions: ipamFromGRPC(n.IPAM), + } + + // Meta + network.Version.Index = n.Meta.Version.Index + network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) + network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + + //Annotations + network.Spec.Name = n.Spec.Annotations.Name + network.Spec.Labels = n.Spec.Annotations.Labels + + //DriverConfiguration + if n.Spec.DriverConfig != nil { + network.Spec.DriverConfiguration = &types.Driver{ + Name: n.Spec.DriverConfig.Name, + Options: n.Spec.DriverConfig.Options, + } + } + + //DriverState + if n.DriverState != nil { + network.DriverState = types.Driver{ + Name: n.DriverState.Name, + Options: n.DriverState.Options, + } + } + + return network + } + return types.Network{} +} + +func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { + var ipam *types.IPAMOptions + if i != nil { + ipam = &types.IPAMOptions{} + if i.Driver != nil { + ipam.Driver.Name = i.Driver.Name + ipam.Driver.Options = i.Driver.Options + } + + for _, config := range i.Configs { + ipam.Configs = append(ipam.Configs, types.IPAMConfig{ + Subnet: config.Subnet, + Range: config.Range, + Gateway: config.Gateway, + }) + } + } + return ipam +} + +func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { + var endpointSpec *types.EndpointSpec + if es != nil { + endpointSpec = &types.EndpointSpec{} + endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) + + for _, portState := range es.Ports { + endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{ + Name: portState.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), + TargetPort: portState.TargetPort, + PublishedPort: portState.PublishedPort, + }) + } + } + return endpointSpec +} + +func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { + endpoint := types.Endpoint{} + if e != nil { + if espec := endpointSpecFromGRPC(e.Spec); espec != nil { + endpoint.Spec = *espec + } + + for _, portState := range e.Ports { + endpoint.Ports = append(endpoint.Ports, types.PortConfig{ + Name: portState.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), + TargetPort: portState.TargetPort, + PublishedPort: portState.PublishedPort, + }) + } + + for _, v := range e.VirtualIPs { + endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ + NetworkID: v.NetworkID, + Addr: v.Addr}) + } + + } + + return endpoint +} + +// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. +func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { + spec := n.Spec + var ipam networktypes.IPAM + if spec.IPAM != nil { + if spec.IPAM.Driver != nil { + ipam.Driver = spec.IPAM.Driver.Name + ipam.Options = spec.IPAM.Driver.Options + } + ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) + for _, ic := range spec.IPAM.Configs { + ipamConfig := networktypes.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + AuxAddress: ic.Reserved, + } + ipam.Config = append(ipam.Config, ipamConfig) + } + } + + nr := basictypes.NetworkResource{ + ID: n.ID, + Name: n.Spec.Annotations.Name, + Scope: "swarm", + EnableIPv6: spec.Ipv6Enabled, + IPAM: ipam, + Internal: spec.Internal, + Attachable: spec.Attachable, + Labels: n.Spec.Annotations.Labels, + } + + if n.DriverState != nil { + nr.Driver = n.DriverState.Name + nr.Options = n.DriverState.Options + } + + return nr +} + +// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. +func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { + ns := swarmapi.NetworkSpec{ + Annotations: swarmapi.Annotations{ + Name: create.Name, + Labels: create.Labels, + }, + DriverConfig: &swarmapi.Driver{ + Name: create.Driver, + Options: create.Options, + }, + Ipv6Enabled: create.EnableIPv6, + Internal: create.Internal, + Attachable: create.Attachable, + } + if create.IPAM != nil { + driver := create.IPAM.Driver + if driver == "" { + driver = "default" + } + ns.IPAM = &swarmapi.IPAMOptions{ + Driver: &swarmapi.Driver{ + Name: driver, + Options: create.IPAM.Options, + }, + } + ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) + for _, ipamConfig := range create.IPAM.Config { + ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ + Subnet: ipamConfig.Subnet, + Range: ipamConfig.IPRange, + Gateway: ipamConfig.Gateway, + }) + } + ns.IPAM.Configs = ipamSpec + } + return ns +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/node.go b/vendor/github.com/docker/docker/daemon/cluster/convert/node.go new file mode 100644 index 0000000..306f34e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/node.go @@ -0,0 +1,89 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// NodeFromGRPC converts a grpc Node to a Node. +func NodeFromGRPC(n swarmapi.Node) types.Node { + node := types.Node{ + ID: n.ID, + Spec: types.NodeSpec{ + Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), + Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), + }, + Status: types.NodeStatus{ + State: types.NodeState(strings.ToLower(n.Status.State.String())), + Message: n.Status.Message, + Addr: n.Status.Addr, + }, + } + + // Meta + node.Version.Index = n.Meta.Version.Index + node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) + node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + + //Annotations + node.Spec.Name = n.Spec.Annotations.Name + node.Spec.Labels = n.Spec.Annotations.Labels + + //Description + if n.Description != nil { + node.Description.Hostname = n.Description.Hostname + if n.Description.Platform != nil { + node.Description.Platform.Architecture = n.Description.Platform.Architecture + node.Description.Platform.OS = n.Description.Platform.OS + } + if n.Description.Resources != nil { + node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs + node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + } + if n.Description.Engine != nil { + node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion + node.Description.Engine.Labels = n.Description.Engine.Labels + for _, plugin := range n.Description.Engine.Plugins { + node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) + } + } + } + + //Manager + if n.ManagerStatus != nil { + node.ManagerStatus = &types.ManagerStatus{ + Leader: n.ManagerStatus.Leader, + Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), + Addr: n.ManagerStatus.Addr, + } + } + + return node +} + +// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. +func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { + spec := swarmapi.NodeSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + } + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { + spec.Role = swarmapi.NodeRole(role) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) + } + + if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { + spec.Availability = swarmapi.NodeSpec_Availability(availability) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) + } + + return spec, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go b/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go new file mode 100644 index 0000000..3e96687 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go @@ -0,0 +1,64 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// SecretFromGRPC converts a grpc Secret to a Secret. +func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { + secret := swarmtypes.Secret{ + ID: s.ID, + Spec: swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: s.Spec.Annotations.Name, + Labels: s.Spec.Annotations.Labels, + }, + Data: s.Spec.Data, + }, + } + + secret.Version.Index = s.Meta.Version.Index + // Meta + secret.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) + secret.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + + return secret +} + +// SecretSpecToGRPC converts Secret to a grpc Secret. +func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { + return swarmapi.SecretSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference +func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { + refs := []*swarmtypes.SecretReference{} + + for _, r := range s { + ref := &swarmtypes.SecretReference{ + SecretID: r.SecretID, + SecretName: r.SecretName, + } + + if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { + ref.File = &swarmtypes.SecretReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/service.go b/vendor/github.com/docker/docker/daemon/cluster/convert/service.go new file mode 100644 index 0000000..aa68e01 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/service.go @@ -0,0 +1,366 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/namesgenerator" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// ServiceFromGRPC converts a grpc Service to a Service. +func ServiceFromGRPC(s swarmapi.Service) types.Service { + service := types.Service{ + ID: s.ID, + Spec: *serviceSpecFromGRPC(&s.Spec), + PreviousSpec: serviceSpecFromGRPC(s.PreviousSpec), + + Endpoint: endpointFromGRPC(s.Endpoint), + } + + // Meta + service.Version.Index = s.Meta.Version.Index + service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) + service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + + // UpdateStatus + service.UpdateStatus = types.UpdateStatus{} + if s.UpdateStatus != nil { + switch s.UpdateStatus.State { + case swarmapi.UpdateStatus_UPDATING: + service.UpdateStatus.State = types.UpdateStateUpdating + case swarmapi.UpdateStatus_PAUSED: + service.UpdateStatus.State = types.UpdateStatePaused + case swarmapi.UpdateStatus_COMPLETED: + service.UpdateStatus.State = types.UpdateStateCompleted + } + + service.UpdateStatus.StartedAt, _ = ptypes.Timestamp(s.UpdateStatus.StartedAt) + service.UpdateStatus.CompletedAt, _ = ptypes.Timestamp(s.UpdateStatus.CompletedAt) + service.UpdateStatus.Message = s.UpdateStatus.Message + } + + return service +} + +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec { + if spec == nil { + return nil + } + + serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) + for _, n := range spec.Networks { + serviceNetworks = append(serviceNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Task.Networks)) + for _, n := range spec.Task.Networks { + taskNetworks = append(taskNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container + convertedSpec := &types.ServiceSpec{ + Annotations: types.Annotations{ + Name: spec.Annotations.Name, + Labels: spec.Annotations.Labels, + }, + + TaskTemplate: types.TaskSpec{ + ContainerSpec: containerSpecFromGRPC(containerConfig), + Resources: resourcesFromGRPC(spec.Task.Resources), + RestartPolicy: restartPolicyFromGRPC(spec.Task.Restart), + Placement: placementFromGRPC(spec.Task.Placement), + LogDriver: driverFromGRPC(spec.Task.LogDriver), + Networks: taskNetworks, + ForceUpdate: spec.Task.ForceUpdate, + }, + + Networks: serviceNetworks, + EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), + } + + // UpdateConfig + if spec.Update != nil { + convertedSpec.UpdateConfig = &types.UpdateConfig{ + Parallelism: spec.Update.Parallelism, + MaxFailureRatio: spec.Update.MaxFailureRatio, + } + + convertedSpec.UpdateConfig.Delay, _ = ptypes.Duration(&spec.Update.Delay) + if spec.Update.Monitor != nil { + convertedSpec.UpdateConfig.Monitor, _ = ptypes.Duration(spec.Update.Monitor) + } + + switch spec.Update.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionContinue + } + } + + // Mode + switch t := spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + convertedSpec.Mode.Global = &types.GlobalService{} + case *swarmapi.ServiceSpec_Replicated: + convertedSpec.Mode.Replicated = &types.ReplicatedService{ + Replicas: &t.Replicated.Replicas, + } + } + + return convertedSpec +} + +// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. +func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { + name := s.Name + if name == "" { + name = namesgenerator.GetRandomName(0) + } + + serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) + for _, n := range s.Networks { + serviceNetworks = append(serviceNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) + for _, n := range s.TaskTemplate.Networks { + taskNetworks = append(taskNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + spec := swarmapi.ServiceSpec{ + Annotations: swarmapi.Annotations{ + Name: name, + Labels: s.Labels, + }, + Task: swarmapi.TaskSpec{ + Resources: resourcesToGRPC(s.TaskTemplate.Resources), + LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), + Networks: taskNetworks, + ForceUpdate: s.TaskTemplate.ForceUpdate, + }, + Networks: serviceNetworks, + } + + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + + restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Restart = restartPolicy + + if s.TaskTemplate.Placement != nil { + spec.Task.Placement = &swarmapi.Placement{ + Constraints: s.TaskTemplate.Placement.Constraints, + } + } + + if s.UpdateConfig != nil { + var failureAction swarmapi.UpdateConfig_FailureAction + switch s.UpdateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + failureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + failureAction = swarmapi.UpdateConfig_CONTINUE + default: + return swarmapi.ServiceSpec{}, fmt.Errorf("unrecongized update failure action %s", s.UpdateConfig.FailureAction) + } + spec.Update = &swarmapi.UpdateConfig{ + Parallelism: s.UpdateConfig.Parallelism, + Delay: *ptypes.DurationProto(s.UpdateConfig.Delay), + FailureAction: failureAction, + MaxFailureRatio: s.UpdateConfig.MaxFailureRatio, + } + if s.UpdateConfig.Monitor != 0 { + spec.Update.Monitor = ptypes.DurationProto(s.UpdateConfig.Monitor) + } + } + + if s.EndpointSpec != nil { + if s.EndpointSpec.Mode != "" && + s.EndpointSpec.Mode != types.ResolutionModeVIP && + s.EndpointSpec.Mode != types.ResolutionModeDNSRR { + return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) + } + + spec.Endpoint = &swarmapi.EndpointSpec{} + + spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) + + for _, portConfig := range s.EndpointSpec.Ports { + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ + Name: portConfig.Name, + Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), + PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + } + + // Mode + if s.Mode.Global != nil && s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") + } + + if s.Mode.Global != nil { + spec.Mode = &swarmapi.ServiceSpec_Global{ + Global: &swarmapi.GlobalService{}, + } + } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, + } + } else { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: 1}, + } + } + + return spec, nil +} + +func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { + var resources *types.ResourceRequirements + if res != nil { + resources = &types.ResourceRequirements{} + if res.Limits != nil { + resources.Limits = &types.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + resources.Reservations = &types.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + } + } + + return resources +} + +func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { + var reqs *swarmapi.ResourceRequirements + if res != nil { + reqs = &swarmapi.ResourceRequirements{} + if res.Limits != nil { + reqs.Limits = &swarmapi.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + reqs.Reservations = &swarmapi.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + + } + } + return reqs +} + +func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { + var rp *types.RestartPolicy + if p != nil { + rp = &types.RestartPolicy{} + + switch p.Condition { + case swarmapi.RestartOnNone: + rp.Condition = types.RestartPolicyConditionNone + case swarmapi.RestartOnFailure: + rp.Condition = types.RestartPolicyConditionOnFailure + case swarmapi.RestartOnAny: + rp.Condition = types.RestartPolicyConditionAny + default: + rp.Condition = types.RestartPolicyConditionAny + } + + if p.Delay != nil { + delay, _ := ptypes.Duration(p.Delay) + rp.Delay = &delay + } + if p.Window != nil { + window, _ := ptypes.Duration(p.Window) + rp.Window = &window + } + + rp.MaxAttempts = &p.MaxAttempts + } + return rp +} + +func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { + var rp *swarmapi.RestartPolicy + if p != nil { + rp = &swarmapi.RestartPolicy{} + + switch p.Condition { + case types.RestartPolicyConditionNone: + rp.Condition = swarmapi.RestartOnNone + case types.RestartPolicyConditionOnFailure: + rp.Condition = swarmapi.RestartOnFailure + case types.RestartPolicyConditionAny: + rp.Condition = swarmapi.RestartOnAny + default: + if string(p.Condition) != "" { + return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) + } + rp.Condition = swarmapi.RestartOnAny + } + + if p.Delay != nil { + rp.Delay = ptypes.DurationProto(*p.Delay) + } + if p.Window != nil { + rp.Window = ptypes.DurationProto(*p.Window) + } + if p.MaxAttempts != nil { + rp.MaxAttempts = *p.MaxAttempts + + } + } + return rp, nil +} + +func placementFromGRPC(p *swarmapi.Placement) *types.Placement { + var r *types.Placement + if p != nil { + r = &types.Placement{} + r.Constraints = p.Constraints + } + + return r +} + +func driverFromGRPC(p *swarmapi.Driver) *types.Driver { + if p == nil { + return nil + } + + return &types.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func driverToGRPC(p *types.Driver) *swarmapi.Driver { + if p == nil { + return nil + } + + return &swarmapi.Driver{ + Name: p.Name, + Options: p.Options, + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go b/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go new file mode 100644 index 0000000..606e00a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go @@ -0,0 +1,122 @@ +package convert + +import ( + "fmt" + "strings" + "time" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// SwarmFromGRPC converts a grpc Cluster to a Swarm. +func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { + swarm := types.Swarm{ + ClusterInfo: types.ClusterInfo{ + ID: c.ID, + Spec: types.Spec{ + Orchestration: types.OrchestrationConfig{ + TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, + }, + Raft: types.RaftConfig{ + SnapshotInterval: c.Spec.Raft.SnapshotInterval, + KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, + LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, + HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), + ElectionTick: int(c.Spec.Raft.ElectionTick), + }, + EncryptionConfig: types.EncryptionConfig{ + AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, + }, + }, + }, + JoinTokens: types.JoinTokens{ + Worker: c.RootCA.JoinTokens.Worker, + Manager: c.RootCA.JoinTokens.Manager, + }, + } + + heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) + swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod + + swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) + + for _, ca := range c.Spec.CAConfig.ExternalCAs { + swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ + Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), + URL: ca.URL, + Options: ca.Options, + }) + } + + // Meta + swarm.Version.Index = c.Meta.Version.Index + swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) + + // Annotations + swarm.Spec.Name = c.Spec.Annotations.Name + swarm.Spec.Labels = c.Spec.Annotations.Labels + + return swarm +} + +// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. +func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { + return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) +} + +// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec +func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { + // We take the initSpec (either created from scratch, or returned by swarmkit), + // and will only change the value if the one taken from types.Spec is not nil or 0. + // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. + if s.Annotations.Name != "" { + spec.Annotations.Name = s.Annotations.Name + } + if len(s.Annotations.Labels) != 0 { + spec.Annotations.Labels = s.Annotations.Labels + } + + if s.Orchestration.TaskHistoryRetentionLimit != nil { + spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit + } + if s.Raft.SnapshotInterval != 0 { + spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval + } + if s.Raft.KeepOldSnapshots != nil { + spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots + } + if s.Raft.LogEntriesForSlowFollowers != 0 { + spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers + } + if s.Raft.HeartbeatTick != 0 { + spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) + } + if s.Raft.ElectionTick != 0 { + spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) + } + if s.Dispatcher.HeartbeatPeriod != 0 { + spec.Dispatcher.HeartbeatPeriod = ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)) + } + if s.CAConfig.NodeCertExpiry != 0 { + spec.CAConfig.NodeCertExpiry = ptypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + + for _, ca := range s.CAConfig.ExternalCAs { + protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] + if !ok { + return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) + } + spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ + Protocol: swarmapi.ExternalCA_CAProtocol(protocol), + URL: ca.URL, + Options: ca.Options, + }) + } + + spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers + + return spec, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/task.go b/vendor/github.com/docker/docker/daemon/cluster/convert/task.go new file mode 100644 index 0000000..d0cf89c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/task.go @@ -0,0 +1,81 @@ +package convert + +import ( + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// TaskFromGRPC converts a grpc Task to a Task. +func TaskFromGRPC(t swarmapi.Task) types.Task { + if t.Spec.GetAttachment() != nil { + return types.Task{} + } + containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container + containerStatus := t.Status.GetContainer() + networks := make([]types.NetworkAttachmentConfig, 0, len(t.Spec.Networks)) + for _, n := range t.Spec.Networks { + networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + task := types.Task{ + ID: t.ID, + Annotations: types.Annotations{ + Name: t.Annotations.Name, + Labels: t.Annotations.Labels, + }, + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: types.TaskSpec{ + ContainerSpec: containerSpecFromGRPC(containerConfig), + Resources: resourcesFromGRPC(t.Spec.Resources), + RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), + Placement: placementFromGRPC(t.Spec.Placement), + LogDriver: driverFromGRPC(t.Spec.LogDriver), + Networks: networks, + }, + Status: types.TaskStatus{ + State: types.TaskState(strings.ToLower(t.Status.State.String())), + Message: t.Status.Message, + Err: t.Status.Err, + }, + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + } + + // Meta + task.Version.Index = t.Meta.Version.Index + task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) + task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) + + task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) + + if containerStatus != nil { + task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID + task.Status.ContainerStatus.PID = int(containerStatus.PID) + task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) + } + + // NetworksAttachments + for _, na := range t.Networks { + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) + } + + if t.Status.PortStatus == nil { + return task + } + + for _, p := range t.Status.PortStatus.Ports { + task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ + Name: p.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), + TargetPort: p.TargetPort, + PublishedPort: p.PublishedPort, + }) + } + + return task +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go b/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go new file mode 100644 index 0000000..0f1da38 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go @@ -0,0 +1,61 @@ +package executor + +import ( + "io" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/plugin" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +// Backend defines the executor component for a swarm agent. +type Backend interface { + CreateManagedNetwork(clustertypes.NetworkCreateRequest) error + DeleteManagedNetwork(name string) error + FindNetwork(idName string) (libnetwork.Network, error) + SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + ActivateContainerServiceBinding(containerName string) error + DeactivateContainerServiceBinding(containerName string) error + UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error + ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) + ContainerWaitWithContext(ctx context.Context, name string) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerKill(name string, sig uint64) error + SetContainerSecretStore(name string, store exec.SecretGetter) error + SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SystemInfo() (*types.Info, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + Containers(config *types.ContainerListOptions) ([]*types.Container, error) + SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error + DaemonJoinsCluster(provider cluster.Provider) + DaemonLeavesCluster() + IsSwarmCompatible() error + SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(listener chan interface{}) + UpdateAttachment(string, string, string, *network.NetworkingConfig) error + WaitForDetachment(context.Context, string, string, string, string) error + GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error) + LookupImage(name string) (*types.ImageInspect, error) + PluginManager() *plugin.Manager + PluginGetter() *plugin.Store +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go new file mode 100644 index 0000000..f82f8b5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go @@ -0,0 +1,463 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// containerAdapter conducts remote operations for a container. All calls +// are mostly naked calls to the client API, seeded with information from +// containerConfig. +type containerAdapter struct { + backend executorpkg.Backend + container *containerConfig + secrets exec.SecretGetter +} + +func newContainerAdapter(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task) + if err != nil { + return nil, err + } + + return &containerAdapter{ + container: ctnr, + backend: b, + secrets: secrets, + }, nil +} + +func (c *containerAdapter) pullImage(ctx context.Context) error { + spec := c.container.spec() + + // Skip pulling if the image is referenced by image ID. + if _, err := digest.ParseDigest(spec.Image); err == nil { + return nil + } + + // Skip pulling if the image is referenced by digest and already + // exists locally. + named, err := reference.ParseNamed(spec.Image) + if err == nil { + if _, ok := named.(reference.Canonical); ok { + _, err := c.backend.LookupImage(spec.Image) + if err == nil { + return nil + } + } + } + + // if the image needs to be pulled, the auth config will be retrieved and updated + var encodedAuthConfig string + if spec.PullOptions != nil { + encodedAuthConfig = spec.PullOptions.RegistryAuth + } + + authConfig := &types.AuthConfig{} + if encodedAuthConfig != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + pr, pw := io.Pipe() + metaHeaders := map[string][]string{} + go func() { + err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) + pw.CloseWithError(err) + }() + + dec := json.NewDecoder(pr) + dec.UseNumber() + m := map[string]interface{}{} + spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) + + lastStatus := "" + for { + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + l := log.G(ctx) + // limit pull progress logs unless the status changes + if spamLimiter.Allow() || lastStatus != m["status"] { + // if we have progress details, we have everything we need + if progress, ok := m["progressDetail"].(map[string]interface{}); ok { + // first, log the image and status + l = l.WithFields(logrus.Fields{ + "image": c.container.image(), + "status": m["status"], + }) + // then, if we have progress, log the progress + if progress["current"] != nil && progress["total"] != nil { + l = l.WithFields(logrus.Fields{ + "current": progress["current"], + "total": progress["total"], + }) + } + } + l.Debug("pull in progress") + } + // sometimes, we get no useful information at all, and add no fields + if status, ok := m["status"].(string); ok { + lastStatus = status + } + } + + // if the final stream object contained an error, return it + if errMsg, ok := m["error"]; ok { + return fmt.Errorf("%v", errMsg) + } + return nil +} + +func (c *containerAdapter) createNetworks(ctx context.Context) error { + for _, network := range c.container.networks() { + ncr, err := c.container.networkCreateRequest(network) + if err != nil { + return err + } + + if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing + if _, ok := err.(libnetwork.NetworkNameError); ok { + continue + } + + return err + } + } + + return nil +} + +func (c *containerAdapter) removeNetworks(ctx context.Context) error { + for _, nid := range c.container.networks() { + if err := c.backend.DeleteManagedNetwork(nid); err != nil { + switch err.(type) { + case *libnetwork.ActiveEndpointsError: + continue + case libnetwork.ErrNoSuchNetwork: + continue + default: + log.G(ctx).Errorf("network %s remove failed: %v", nid, err) + return err + } + } + } + + return nil +} + +func (c *containerAdapter) networkAttach(ctx context.Context) error { + config := c.container.createNetworkingConfig() + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) +} + +func (c *containerAdapter) waitForDetach(ctx context.Context) error { + config := c.container.createNetworkingConfig() + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) +} + +func (c *containerAdapter) create(ctx context.Context) error { + var cr containertypes.ContainerCreateCreatedBody + var err error + + if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + Name: c.container.name(), + Config: c.container.config(), + HostConfig: c.container.hostConfig(), + // Use the first network in container create + NetworkingConfig: c.container.createNetworkingConfig(), + }); err != nil { + return err + } + + // Docker daemon currently doesn't support multiple networks in container create + // Connect to all other networks + nc := c.container.connectNetworkingConfig() + + if nc != nil { + for n, ep := range nc.EndpointsConfig { + if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { + return err + } + } + } + + container := c.container.task.Spec.GetContainer() + if container == nil { + return fmt.Errorf("unable to get container from task spec") + } + + // configure secrets + if err := c.backend.SetContainerSecretStore(cr.ID, c.secrets); err != nil { + return err + } + + refs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, refs); err != nil { + return err + } + + if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { + return err + } + + return nil +} + +// checkMounts ensures that the provided mounts won't have any host-specific +// problems at start up. For example, we disallow bind mounts without an +// existing path, which slightly different from the container API. +func (c *containerAdapter) checkMounts() error { + spec := c.container.spec() + for _, mount := range spec.Mounts { + switch mount.Type { + case api.MountTypeBind: + if _, err := os.Stat(mount.Source); os.IsNotExist(err) { + return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) + } + } + } + + return nil +} + +func (c *containerAdapter) start(ctx context.Context) error { + if err := c.checkMounts(); err != nil { + return err + } + + return c.backend.ContainerStart(c.container.name(), nil, "", "") +} + +func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { + cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) + if ctx.Err() != nil { + return types.ContainerJSON{}, ctx.Err() + } + if err != nil { + return types.ContainerJSON{}, err + } + return *cs, nil +} + +// events issues a call to the events API and returns a channel with all +// events. The stream of events can be shutdown by cancelling the context. +func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { + log.G(ctx).Debugf("waiting on events") + buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) + eventsq := make(chan events.Message, len(buffer)) + + for _, event := range buffer { + eventsq <- event + } + + go func() { + defer c.backend.UnsubscribeFromEvents(l) + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + log.G(ctx).Warnf("unexpected event message: %q", ev) + continue + } + select { + case eventsq <- jev: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + + return eventsq +} + +func (c *containerAdapter) wait(ctx context.Context) error { + return c.backend.ContainerWaitWithContext(ctx, c.container.nameOrID()) +} + +func (c *containerAdapter) shutdown(ctx context.Context) error { + // Default stop grace period to nil (daemon will use the stopTimeout of the container) + var stopgrace *int + spec := c.container.spec() + if spec.StopGracePeriod != nil { + stopgraceValue := int(spec.StopGracePeriod.Seconds) + stopgrace = &stopgraceValue + } + return c.backend.ContainerStop(c.container.name(), stopgrace) +} + +func (c *containerAdapter) terminate(ctx context.Context) error { + return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) +} + +func (c *containerAdapter) remove(ctx context.Context) error { + return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ + RemoveVolume: true, + ForceRemove: true, + }) +} + +func (c *containerAdapter) createVolumes(ctx context.Context) error { + // Create plugin volumes that are embedded inside a Mount + for _, mount := range c.container.task.Spec.GetContainer().Mounts { + if mount.Type != api.MountTypeVolume { + continue + } + + if mount.VolumeOptions == nil { + continue + } + + if mount.VolumeOptions.DriverConfig == nil { + continue + } + + req := c.container.volumeCreateRequest(&mount) + + // Check if this volume exists on the engine + if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { + // TODO(amitshukla): Today, volume create through the engine api does not return an error + // when the named volume with the same parameters already exists. + // It returns an error if the driver name is different - that is a valid error + return err + } + + } + + return nil +} + +func (c *containerAdapter) activateServiceBinding() error { + return c.backend.ActivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) deactivateServiceBinding() error { + return c.backend.DeactivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + apiOptions := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: options.Follow, + + // TODO(stevvooe): Parse timestamp out of message. This + // absolutely needs to be done before going to production with + // this, at it is completely redundant. + Timestamps: true, + Details: false, // no clue what to do with this, let's just deprecate it. + }, + OutStream: writer, + } + + if options.Since != nil { + since, err := ptypes.Timestamp(options.Since) + if err != nil { + return nil, err + } + apiOptions.Since = since.Format(time.RFC3339Nano) + } + + if options.Tail < 0 { + // See protobuf documentation for details of how this works. + apiOptions.Tail = fmt.Sprint(-options.Tail - 1) + } else if options.Tail > 0 { + return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") + } + + if len(options.Streams) == 0 { + // empty == all + apiOptions.ShowStdout, apiOptions.ShowStderr = true, true + } else { + for _, stream := range options.Streams { + switch stream { + case api.LogStreamStdout: + apiOptions.ShowStdout = true + case api.LogStreamStderr: + apiOptions.ShowStderr = true + } + } + } + + chStarted := make(chan struct{}) + go func() { + defer writer.Close() + c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted) + }() + + return reader, nil +} + +// todo: typed/wrapped errors +func isContainerCreateNameConflict(err error) bool { + return strings.Contains(err.Error(), "Conflict. The name") +} + +func isUnknownContainer(err error) bool { + return strings.Contains(err.Error(), "No such container:") +} + +func isStoppedContainer(err error) bool { + return strings.Contains(err.Error(), "is already stopped") +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go new file mode 100644 index 0000000..e0ee81a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go @@ -0,0 +1,81 @@ +package container + +import ( + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// networkAttacherController implements agent.Controller against docker's API. +// +// networkAttacherController manages the lifecycle of network +// attachment of a docker unmanaged container managed as a task from +// agent point of view. It provides network attachment information to +// the unmanaged container for it to attach to the network and run. +type networkAttacherController struct { + backend executorpkg.Backend + task *api.Task + adapter *containerAdapter + closed chan struct{} +} + +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, secrets) + if err != nil { + return nil, err + } + + return &networkAttacherController{ + backend: b, + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error { + return nil +} + +func (nc *networkAttacherController) Prepare(ctx context.Context) error { + // Make sure all the networks that the task needs are created. + if err := nc.adapter.createNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Start(ctx context.Context) error { + return nc.adapter.networkAttach(ctx) +} + +func (nc *networkAttacherController) Wait(pctx context.Context) error { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + return nc.adapter.waitForDetach(ctx) +} + +func (nc *networkAttacherController) Shutdown(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Terminate(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Remove(ctx context.Context) error { + // Try removing the network referenced in this task in case this + // task is the last one referencing it + if err := nc.adapter.removeNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Close() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go new file mode 100644 index 0000000..f033ad5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go @@ -0,0 +1,598 @@ +package container + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/api/types" + enginecontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + volumetypes "github.com/docker/docker/api/types/volume" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/reference" + "github.com/docker/go-connections/nat" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/template" +) + +const ( + // Explicitly use the kernel's default setting for CPU quota of 100ms. + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + cpuQuotaPeriod = 100 * time.Millisecond + + // systemLabelPrefix represents the reserved namespace for system labels. + systemLabelPrefix = "com.docker.swarm" +) + +// containerConfig converts task properties into docker container compatible +// components. +type containerConfig struct { + task *api.Task + networksAttachments map[string]*api.NetworkAttachment +} + +// newContainerConfig returns a validated container config. No methods should +// return an error if this function returns without error. +func newContainerConfig(t *api.Task) (*containerConfig, error) { + var c containerConfig + return &c, c.setTask(t) +} + +func (c *containerConfig) setTask(t *api.Task) error { + if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { + return exec.ErrRuntimeUnsupported + } + + container := t.Spec.GetContainer() + if container != nil { + if container.Image == "" { + return ErrImageRequired + } + + if err := validateMounts(container.Mounts); err != nil { + return err + } + } + + // index the networks by name + c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) + for _, attachment := range t.Networks { + c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment + } + + c.task = t + + if t.Spec.GetContainer() != nil { + preparedSpec, err := template.ExpandContainerSpec(t) + if err != nil { + return err + } + c.task.Spec.Runtime = &api.TaskSpec_Container{ + Container: preparedSpec, + } + } + + return nil +} + +func (c *containerConfig) id() string { + attachment := c.task.Spec.GetAttachment() + if attachment == nil { + return "" + } + + return attachment.ContainerID +} + +func (c *containerConfig) taskID() string { + return c.task.ID +} + +func (c *containerConfig) endpoint() *api.Endpoint { + return c.task.Endpoint +} + +func (c *containerConfig) spec() *api.ContainerSpec { + return c.task.Spec.GetContainer() +} + +func (c *containerConfig) nameOrID() string { + if c.task.Spec.GetContainer() != nil { + return c.name() + } + + return c.id() +} + +func (c *containerConfig) name() string { + if c.task.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return c.task.Annotations.Name + } + + slot := fmt.Sprint(c.task.Slot) + if slot == "" || c.task.Slot == 0 { + slot = c.task.NodeID + } + + // fallback to service.slot.id. + return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID) +} + +func (c *containerConfig) image() string { + raw := c.spec().Image + ref, err := reference.ParseNamed(raw) + if err != nil { + return raw + } + return reference.WithDefaultTag(ref).String() +} + +func (c *containerConfig) portBindings() nat.PortMap { + portBindings := nat.PortMap{} + if c.task.Endpoint == nil { + return portBindings + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + binding := []nat.PortBinding{ + {}, + } + + if portConfig.PublishedPort != 0 { + binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) + } + portBindings[port] = binding + } + + return portBindings +} + +func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { + exposedPorts := make(map[nat.Port]struct{}) + if c.task.Endpoint == nil { + return exposedPorts + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + exposedPorts[port] = struct{}{} + } + + return exposedPorts +} + +func (c *containerConfig) config() *enginecontainer.Config { + config := &enginecontainer.Config{ + Labels: c.labels(), + Tty: c.spec().TTY, + OpenStdin: c.spec().OpenStdin, + User: c.spec().User, + Env: c.spec().Env, + Hostname: c.spec().Hostname, + WorkingDir: c.spec().Dir, + Image: c.image(), + ExposedPorts: c.exposedPorts(), + Healthcheck: c.healthcheck(), + } + + if len(c.spec().Command) > 0 { + // If Command is provided, we replace the whole invocation with Command + // by replacing Entrypoint and specifying Cmd. Args is ignored in this + // case. + config.Entrypoint = append(config.Entrypoint, c.spec().Command...) + config.Cmd = append(config.Cmd, c.spec().Args...) + } else if len(c.spec().Args) > 0 { + // In this case, we assume the image has an Entrypoint and Args + // specifies the arguments for that entrypoint. + config.Cmd = c.spec().Args + } + + return config +} + +func (c *containerConfig) labels() map[string]string { + var ( + system = map[string]string{ + "task": "", // mark as cluster task + "task.id": c.task.ID, + "task.name": c.name(), + "node.id": c.task.NodeID, + "service.id": c.task.ServiceID, + "service.name": c.task.ServiceAnnotations.Name, + } + labels = make(map[string]string) + ) + + // base labels are those defined in the spec. + for k, v := range c.spec().Labels { + labels[k] = v + } + + // we then apply the overrides from the task, which may be set via the + // orchestrator. + for k, v := range c.task.Annotations.Labels { + labels[k] = v + } + + // finally, we apply the system labels, which override all labels. + for k, v := range system { + labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v + } + + return labels +} + +func (c *containerConfig) mounts() []enginemount.Mount { + var r []enginemount.Mount + for _, mount := range c.spec().Mounts { + r = append(r, convertMount(mount)) + } + return r +} + +func convertMount(m api.Mount) enginemount.Mount { + mount := enginemount.Mount{ + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + } + + switch m.Type { + case api.MountTypeBind: + mount.Type = enginemount.TypeBind + case api.MountTypeVolume: + mount.Type = enginemount.TypeVolume + case api.MountTypeTmpfs: + mount.Type = enginemount.TypeTmpfs + } + + if m.BindOptions != nil { + mount.BindOptions = &enginemount.BindOptions{} + switch m.BindOptions.Propagation { + case api.MountPropagationRPrivate: + mount.BindOptions.Propagation = enginemount.PropagationRPrivate + case api.MountPropagationPrivate: + mount.BindOptions.Propagation = enginemount.PropagationPrivate + case api.MountPropagationRSlave: + mount.BindOptions.Propagation = enginemount.PropagationRSlave + case api.MountPropagationSlave: + mount.BindOptions.Propagation = enginemount.PropagationSlave + case api.MountPropagationRShared: + mount.BindOptions.Propagation = enginemount.PropagationRShared + case api.MountPropagationShared: + mount.BindOptions.Propagation = enginemount.PropagationShared + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &enginemount.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + } + if m.VolumeOptions.Labels != nil { + mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) + for k, v := range m.VolumeOptions.Labels { + mount.VolumeOptions.Labels[k] = v + } + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &enginemount.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + } + if m.VolumeOptions.DriverConfig.Options != nil { + mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) + for k, v := range m.VolumeOptions.DriverConfig.Options { + mount.VolumeOptions.DriverConfig.Options[k] = v + } + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &enginemount.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + return mount +} + +func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { + hcSpec := c.spec().Healthcheck + if hcSpec == nil { + return nil + } + interval, _ := ptypes.Duration(hcSpec.Interval) + timeout, _ := ptypes.Duration(hcSpec.Timeout) + return &enginecontainer.HealthConfig{ + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + } +} + +func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { + hc := &enginecontainer.HostConfig{ + Resources: c.resources(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Mounts: c.mounts(), + } + + if c.spec().DNSConfig != nil { + hc.DNS = c.spec().DNSConfig.Nameservers + hc.DNSSearch = c.spec().DNSConfig.Search + hc.DNSOptions = c.spec().DNSConfig.Options + } + + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // However, the format of ExtraHosts in HostConfig is + // : + // We need to do the conversion here + // (Alias is ignored for now) + for _, entry := range c.spec().Hosts { + parts := strings.Fields(entry) + if len(parts) > 1 { + hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) + } + } + + if c.task.LogDriver != nil { + hc.LogConfig = enginecontainer.LogConfig{ + Type: c.task.LogDriver.Name, + Config: c.task.LogDriver.Options, + } + } + + return hc +} + +// This handles the case of volumes that are defined inside a service Mount +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody { + var ( + driverName string + driverOpts map[string]string + labels map[string]string + ) + + if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { + driverName = mount.VolumeOptions.DriverConfig.Name + driverOpts = mount.VolumeOptions.DriverConfig.Options + labels = mount.VolumeOptions.Labels + } + + if mount.VolumeOptions != nil { + return &volumetypes.VolumesCreateBody{ + Name: mount.Source, + Driver: driverName, + DriverOpts: driverOpts, + Labels: labels, + } + } + return nil +} + +func (c *containerConfig) resources() enginecontainer.Resources { + resources := enginecontainer.Resources{} + + // If no limits are specified let the engine use its defaults. + // + // TODO(aluzzardi): We might want to set some limits anyway otherwise + // "unlimited" tasks will step over the reservation of other tasks. + r := c.task.Spec.Resources + if r == nil || r.Limits == nil { + return resources + } + + if r.Limits.MemoryBytes > 0 { + resources.Memory = r.Limits.MemoryBytes + } + + if r.Limits.NanoCPUs > 0 { + // CPU Period must be set in microseconds. + resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) + resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 + } + + return resources +} + +// Docker daemon supports just 1 network during container create. +func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { + networks = c.task.Networks + } + + epConfig := make(map[string]*network.EndpointSettings) + if len(networks) > 0 { + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0]) + } + + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create +func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil { + networks = c.task.Networks + } + + // First network is used during container create. Other networks are used in "docker network connect" + if len(networks) < 2 { + return nil + } + + epConfig := make(map[string]*network.EndpointSettings) + for _, na := range networks[1:] { + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na) + } + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { + var ipv4, ipv6 string + for _, addr := range na.Addresses { + ip, _, err := net.ParseCIDR(addr) + if err != nil { + continue + } + + if ip.To4() != nil { + ipv4 = ip.String() + continue + } + + if ip.To16() != nil { + ipv6 = ip.String() + } + } + + return &network.EndpointSettings{ + NetworkID: na.Network.ID, + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: ipv4, + IPv6Address: ipv6, + }, + } +} + +func (c *containerConfig) virtualIP(networkID string) string { + if c.task.Endpoint == nil { + return "" + } + + for _, eVip := range c.task.Endpoint.VirtualIPs { + // We only support IPv4 VIPs for now. + if eVip.NetworkID == networkID { + vip, _, err := net.ParseCIDR(eVip.Addr) + if err != nil { + return "" + } + + return vip.String() + } + } + + return "" +} + +func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { + if len(c.task.Networks) == 0 { + return nil + } + + logrus.Debugf("Creating service config in agent for t = %+v", c.task) + svcCfg := &clustertypes.ServiceConfig{ + Name: c.task.ServiceAnnotations.Name, + Aliases: make(map[string][]string), + ID: c.task.ServiceID, + VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), + } + + for _, na := range c.task.Networks { + svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ + // We support only IPv4 virtual IP for now. + IPv4: c.virtualIP(na.Network.ID), + } + if len(na.Aliases) > 0 { + svcCfg.Aliases[na.Network.ID] = na.Aliases + } + } + + if c.task.Endpoint != nil { + for _, ePort := range c.task.Endpoint.Ports { + if ePort.PublishMode != api.PublishModeIngress { + continue + } + + svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ + Name: ePort.Name, + Protocol: int32(ePort.Protocol), + TargetPort: ePort.TargetPort, + PublishedPort: ePort.PublishedPort, + }) + } + } + + return svcCfg +} + +// networks returns a list of network names attached to the container. The +// returned name can be used to lookup the corresponding network create +// options. +func (c *containerConfig) networks() []string { + var networks []string + + for name := range c.networksAttachments { + networks = append(networks, name) + } + + return networks +} + +func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { + na, ok := c.networksAttachments[name] + if !ok { + return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") + } + + options := types.NetworkCreate{ + // ID: na.Network.ID, + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + }, + Options: na.Network.DriverState.Options, + Labels: na.Network.Spec.Annotations.Labels, + Internal: na.Network.Spec.Internal, + Attachable: na.Network.Spec.Attachable, + EnableIPv6: na.Network.Spec.Ipv6Enabled, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil +} + +func (c containerConfig) eventFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("type", events.ContainerEventType) + filter.Add("name", c.name()) + filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) + return filter +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go new file mode 100644 index 0000000..75f286a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go @@ -0,0 +1,672 @@ +package container + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// controller implements agent.Controller against docker's API. +// +// Most operations against docker's API are done through the container name, +// which is unique to the task. +type controller struct { + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error + + pulled chan struct{} // closed after pull + cancelPull func() // cancels pull context if not nil + pullErr error // pull error, only read after pulled closed +} + +var _ exec.Controller = &controller{} + +// NewController returns a docker exec runner for the provided task. +func newController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, secrets) + if err != nil { + return nil, err + } + + return &controller{ + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (r *controller) Task() (*api.Task, error) { + return r.task, nil +} + +// ContainerStatus returns the container-specific status for the task. +func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + return nil, err + } + return parseContainerStatus(ctnr) +} + +func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + + return parsePortStatus(ctnr) +} + +// Update tasks a recent task update and applies it to the container. +func (r *controller) Update(ctx context.Context, t *api.Task) error { + // TODO(stevvooe): While assignment of tasks is idempotent, we do allow + // updates of metadata, such as labelling, as well as any other properties + // that make sense. + return nil +} + +// Prepare creates a container and ensures the image is pulled. +// +// If the container has already be created, exec.ErrTaskPrepared is returned. +func (r *controller) Prepare(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // Make sure all the networks that the task needs are created. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + // Make sure all the volumes that the task needs are created. + if err := r.adapter.createVolumes(ctx); err != nil { + return err + } + + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + if r.pulled == nil { + // Fork the pull to a different context to allow pull to continue + // on re-entrant calls to Prepare. This ensures that Prepare can be + // idempotent and not incur the extra cost of pulling when + // cancelled on updates. + var pctx context.Context + + r.pulled = make(chan struct{}) + pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. + + go func() { + defer close(r.pulled) + r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled + }() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.pulled: + if r.pullErr != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(r.pullErr).Error("pulling image failed") + } + } + } + + if err := r.adapter.create(ctx); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { + return err + } + + // container is already created. success! + return exec.ErrTaskPrepared + } + + return err + } + + return nil +} + +// Start the container. An error will be returned if the container is already started. +func (r *controller) Start(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return err + } + + // Detect whether the container has *ever* been started. If so, we don't + // issue the start. + // + // TODO(stevvooe): This is very racy. While reading inspect, another could + // start the process and we could end up starting it twice. + if ctnr.State.Status != "created" { + return exec.ErrTaskStarted + } + + for { + if err := r.adapter.start(ctx); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + // Retry network creation again if we + // failed because some of the networks + // were not found. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + continue + } + + return errors.Wrap(err, "starting container failed") + } + + break + } + + // no health check + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil { + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) + return err + } + return nil + } + + healthCmd := ctnr.Config.Healthcheck.Test + + if len(healthCmd) == 0 || healthCmd[0] == "NONE" { + return nil + } + + // wait for container to be healthy + eventq := r.adapter.events(ctx) + + var healthErr error + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } else if ctnr.State.ExitCode != 0 { + return &exitError{code: ctnr.State.ExitCode, cause: healthErr} + } + + return nil + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + // set health check error, and wait for container to fully exit ("die" event) + healthErr = ErrContainerUnhealthy + case "health_status: healthy": + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name()) + return err + } + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Wait on the container to exit. +func (r *controller) Wait(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + healthErr := make(chan error, 1) + go func() { + ectx, cancel := context.WithCancel(ctx) // cancel event context on first event + defer cancel() + if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { + healthErr <- ErrContainerUnhealthy + if err := r.Shutdown(ectx); err != nil { + log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") + } + } + }() + + err := r.adapter.wait(ctx) + if ctx.Err() != nil { + return ctx.Err() + } + + if err != nil { + ee := &exitError{} + if ec, ok := err.(exec.ExitCoder); ok { + ee.code = ec.ExitCode() + } + select { + case e := <-healthErr: + ee.cause = e + default: + if err.Error() != "" { + ee.cause = err + } + } + return ee + } + + return nil +} + +// Shutdown the container cleanly. +func (r *controller) Shutdown(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to deactivate service binding for container %s", r.adapter.container.name()) + return err + } + + if err := r.adapter.shutdown(ctx); err != nil { + if isUnknownContainer(err) || isStoppedContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Terminate the container, with force. +func (r *controller) Terminate(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.terminate(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Remove the container and its resources. +func (r *controller) Remove(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // It may be necessary to shut down the task before removing it. + if err := r.Shutdown(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + // This may fail if the task was already shut down. + log.G(ctx).WithError(err).Debug("shutdown failed on removal") + } + + // Try removing networks referenced in this task in case this + // task is the last one referencing it + if err := r.adapter.removeNetworks(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + return err + } + + if err := r.adapter.remove(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + return nil +} + +// waitReady waits for a container to be "ready". +// Ready means it's past the started state. +func (r *controller) waitReady(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + eventq := r.adapter.events(ctx) + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if !isUnknownContainer(err) { + return errors.Wrap(err, "inspect container failed") + } + } else { + switch ctnr.State.Status { + case "running", "exited", "dead": + return nil + } + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "start": + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { + if err := r.checkClosed(); err != nil { + return err + } + + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } + + rc, err := r.adapter.logs(ctx, options) + if err != nil { + return errors.Wrap(err, "failed getting container logs") + } + defer rc.Close() + + var ( + // use a rate limiter to keep things under control but also provides some + // ability coalesce messages. + limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s + msgctx = api.LogContext{ + NodeID: r.task.NodeID, + ServiceID: r.task.ServiceID, + TaskID: r.task.ID, + } + ) + + brd := bufio.NewReader(rc) + for { + // so, message header is 8 bytes, treat as uint64, pull stream off MSB + var header uint64 + if err := binary.Read(brd, binary.BigEndian, &header); err != nil { + if err == io.EOF { + return nil + } + + return errors.Wrap(err, "failed reading log header") + } + + stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) + + // limit here to decrease allocation back pressure. + if err := limiter.WaitN(ctx, int(size)); err != nil { + return errors.Wrap(err, "failed rate limiter") + } + + buf := make([]byte, size) + _, err := io.ReadFull(brd, buf) + if err != nil { + return errors.Wrap(err, "failed reading buffer") + } + + // Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish + parts := bytes.SplitN(buf, []byte(" "), 2) + if len(parts) != 2 { + return fmt.Errorf("invalid timestamp in log message: %v", buf) + } + + ts, err := time.Parse(time.RFC3339Nano, string(parts[0])) + if err != nil { + return errors.Wrap(err, "failed to parse timestamp") + } + + tsp, err := ptypes.TimestampProto(ts) + if err != nil { + return errors.Wrap(err, "failed to convert timestamp") + } + + if err := publisher.Publish(ctx, api.LogMessage{ + Context: msgctx, + Timestamp: tsp, + Stream: api.LogStream(stream), + + Data: parts[1], + }); err != nil { + return errors.Wrap(err, "failed to publish log message") + } + } +} + +// Close the runner and clean up any ephemeral resources. +func (r *controller) Close() error { + select { + case <-r.closed: + return r.err + default: + if r.cancelPull != nil { + r.cancelPull() + } + + r.err = exec.ErrControllerClosed + close(r.closed) + } + return nil +} + +func (r *controller) matchevent(event events.Message) bool { + if event.Type != events.ContainerEventType { + return false + } + + // TODO(stevvooe): Filter based on ID matching, in addition to name. + + // Make sure the events are for this container. + if event.Actor.Attributes["name"] != r.adapter.container.name() { + return false + } + + return true +} + +func (r *controller) checkClosed() error { + select { + case <-r.closed: + return r.err + default: + return nil + } +} + +func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { + status := &api.ContainerStatus{ + ContainerID: ctnr.ID, + PID: int32(ctnr.State.Pid), + ExitCode: int32(ctnr.State.ExitCode), + } + + return status, nil +} + +func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { + status := &api.PortStatus{} + + if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { + exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) + if err != nil { + return nil, err + } + status.Ports = exposedPorts + } + + return status, nil +} + +func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { + exposedPorts := make([]*api.PortConfig, 0, len(portMap)) + + for portProtocol, mapping := range portMap { + parts := strings.SplitN(string(portProtocol), "/", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) + } + + port, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, err + } + + protocol := api.ProtocolTCP + switch strings.ToLower(parts[1]) { + case "tcp": + protocol = api.ProtocolTCP + case "udp": + protocol = api.ProtocolUDP + default: + return nil, fmt.Errorf("invalid protocol: %s", parts[1]) + } + + for _, binding := range mapping { + hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + if err != nil { + return nil, err + } + + // TODO(aluzzardi): We're losing the port `name` here since + // there's no way to retrieve it back from the Engine. + exposedPorts = append(exposedPorts, &api.PortConfig{ + PublishMode: api.PublishModeHost, + Protocol: protocol, + TargetPort: uint32(port), + PublishedPort: uint32(hostPort), + }) + } + } + + return exposedPorts, nil +} + +type exitError struct { + code int + cause error +} + +func (e *exitError) Error() string { + if e.cause != nil { + return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) + } + + return fmt.Sprintf("task: non-zero exit (%v)", e.code) +} + +func (e *exitError) ExitCode() int { + return int(e.code) +} + +func (e *exitError) Cause() error { + return e.cause +} + +// checkHealth blocks until unhealthy container is detected or ctx exits +func (r *controller) checkHealth(ctx context.Context) error { + eventq := r.adapter.events(ctx) + + for { + select { + case <-ctx.Done(): + return nil + case <-r.closed: + return nil + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "health_status: unhealthy": + return ErrContainerUnhealthy + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go new file mode 100644 index 0000000..63e1233 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go @@ -0,0 +1,15 @@ +package container + +import "fmt" + +var ( + // ErrImageRequired returned if a task is missing the image definition. + ErrImageRequired = fmt.Errorf("dockerexec: image required") + + // ErrContainerDestroyed returned when a container is prematurely destroyed + // during a wait call. + ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed") + + // ErrContainerUnhealthy returned if controller detects the health check failure + ErrContainerUnhealthy = fmt.Errorf("dockerexec: unhealthy container") +) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go new file mode 100644 index 0000000..f0dedd4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go @@ -0,0 +1,194 @@ +package container + +import ( + "sort" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/agent/secrets" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +type executor struct { + backend executorpkg.Backend + secrets exec.SecretsManager +} + +// NewExecutor returns an executor from the docker client. +func NewExecutor(b executorpkg.Backend) exec.Executor { + return &executor{ + backend: b, + secrets: secrets.NewManager(), + } +} + +// Describe returns the underlying node description from the docker client. +func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { + info, err := e.backend.SystemInfo() + if err != nil { + return nil, err + } + + plugins := map[api.PluginDescription]struct{}{} + addPlugins := func(typ string, names []string) { + for _, name := range names { + plugins[api.PluginDescription{ + Type: typ, + Name: name, + }] = struct{}{} + } + } + + // add v1 plugins + addPlugins("Volume", info.Plugins.Volume) + // Add builtin driver "overlay" (the only builtin multi-host driver) to + // the plugin list by default. + addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) + addPlugins("Authorization", info.Plugins.Authorization) + + // add v2 plugins + v2Plugins, err := e.backend.PluginManager().List() + if err == nil { + for _, plgn := range v2Plugins { + for _, typ := range plgn.Config.Interface.Types { + if typ.Prefix != "docker" || !plgn.Enabled { + continue + } + plgnTyp := typ.Capability + if typ.Capability == "volumedriver" { + plgnTyp = "Volume" + } else if typ.Capability == "networkdriver" { + plgnTyp = "Network" + } + plugins[api.PluginDescription{ + Type: plgnTyp, + Name: plgn.Name, + }] = struct{}{} + } + } + } + + pluginFields := make([]api.PluginDescription, 0, len(plugins)) + for k := range plugins { + pluginFields = append(pluginFields, k) + } + + sort.Sort(sortedPlugins(pluginFields)) + + // parse []string labels into a map[string]string + labels := map[string]string{} + for _, l := range info.Labels { + stringSlice := strings.SplitN(l, "=", 2) + // this will take the last value in the list for a given key + // ideally, one shouldn't assign multiple values to the same key + if len(stringSlice) > 1 { + labels[stringSlice[0]] = stringSlice[1] + } + } + + description := &api.NodeDescription{ + Hostname: info.Name, + Platform: &api.Platform{ + Architecture: info.Architecture, + OS: info.OSType, + }, + Engine: &api.EngineDescription{ + EngineVersion: info.ServerVersion, + Labels: labels, + Plugins: pluginFields, + }, + Resources: &api.Resources{ + NanoCPUs: int64(info.NCPU) * 1e9, + MemoryBytes: info.MemTotal, + }, + } + + return description, nil +} + +func (e *executor) Configure(ctx context.Context, node *api.Node) error { + na := node.Attachment + if na == nil { + return nil + } + + options := types.NetworkCreate{ + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + }, + Options: na.Network.DriverState.Options, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ + na.Network.ID, + types.NetworkCreateRequest{ + Name: na.Network.Spec.Annotations.Name, + NetworkCreate: options, + }, + }, na.Addresses[0]) +} + +// Controller returns a docker container runner. +func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + if t.Spec.GetAttachment() != nil { + return newNetworkAttacherController(e.backend, t, e.secrets) + } + + ctlr, err := newController(e.backend, t, e.secrets) + if err != nil { + return nil, err + } + + return ctlr, nil +} + +func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { + nwKeys := []*networktypes.EncryptionKey{} + for _, key := range keys { + nwKey := &networktypes.EncryptionKey{ + Subsystem: key.Subsystem, + Algorithm: int32(key.Algorithm), + Key: make([]byte, len(key.Key)), + LamportTime: key.LamportTime, + } + copy(nwKey.Key, key.Key) + nwKeys = append(nwKeys, nwKey) + } + e.backend.SetNetworkBootstrapKeys(nwKeys) + + return nil +} + +func (e *executor) Secrets() exec.SecretsManager { + return e.secrets +} + +type sortedPlugins []api.PluginDescription + +func (sp sortedPlugins) Len() int { return len(sp) } + +func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +func (sp sortedPlugins) Less(i, j int) bool { + if sp[i].Type != sp[j].Type { + return sp[i].Type < sp[j].Type + } + return sp[i].Name < sp[j].Name +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go new file mode 100644 index 0000000..99cf750 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go @@ -0,0 +1,102 @@ +// +build !windows + +package container + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/events" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func TestHealthStates(t *testing.T) { + + // set up environment: events, task, container .... + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + task := &api.Task{ + ID: "id", + ServiceID: "sid", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + }, + Annotations: api.Annotations{Name: "name"}, + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + } + + daemon := &daemon.Daemon{ + EventsService: e, + } + + controller, err := newController(daemon, task, nil) + if err != nil { + t.Fatalf("create controller fail %v", err) + } + + errChan := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // fire checkHealth + go func() { + err := controller.checkHealth(ctx) + select { + case errChan <- err: + case <-ctx.Done(): + } + }() + + // send an event and expect to get expectedErr + // if expectedErr is nil, shouldn't get any error + logAndExpect := func(msg string, expectedErr error) { + daemon.LogContainerEvent(c, msg) + + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case err := <-errChan: + if err != expectedErr { + t.Fatalf("expect error %v, but get %v", expectedErr, err) + } + case <-timer.C: + if expectedErr != nil { + t.Fatalf("time limit exceeded, didn't get expected error") + } + } + } + + // events that are ignored by checkHealth + logAndExpect("health_status: running", nil) + logAndExpect("health_status: healthy", nil) + logAndExpect("die", nil) + + // unhealthy event will be caught by checkHealth + logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go new file mode 100644 index 0000000..5fda1f2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go @@ -0,0 +1,39 @@ +package container + +import ( + "fmt" + "path/filepath" + + "github.com/docker/swarmkit/api" +) + +func validateMounts(mounts []api.Mount) error { + for _, mount := range mounts { + // Target must always be absolute + if !filepath.IsAbs(mount.Target) { + return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) + } + + switch mount.Type { + // The checks on abs paths are required due to the container API confusing + // volume mounts as bind mounts when the source is absolute (and vice-versa) + // See #25253 + // TODO: This is probably not necessary once #22373 is merged + case api.MountTypeBind: + if !filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) + } + case api.MountTypeVolume: + if filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) + } + case api.MountTypeTmpfs: + if mount.Source != "" { + return fmt.Errorf("invalid tmpfs source, source must be empty") + } + default: + return fmt.Errorf("invalid mount type: %s", mount.Type) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go new file mode 100644 index 0000000..9d98e2c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/swarmkit/api" +) + +func newTestControllerWithMount(m api.Mount) (*controller, error) { + return newController(&daemon.Daemon{}, &api.Task{ + ID: stringid.GenerateRandomID(), + ServiceID: stringid.GenerateRandomID(), + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + Mounts: []api.Mount{m}, + }, + }, + }, + }, nil) +} + +func TestControllerValidateMountBind(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with non-existing source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsNonExistent, + Target: testAbsPath, + }); err != nil { + t.Fatalf("controller should not error at creation: %v", err) + } + + // with proper source + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountVolume(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: testAbsPath, + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: "foo", + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountTarget(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsPath, + Target: "foo", + }); err == nil || !strings.Contains(err.Error(), "invalid mount target") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountTmpfs(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountInvalidType(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.Mount_MountType(9999), + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid mount type") { + t.Fatalf("expected error, got: %v", err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go new file mode 100644 index 0000000..c616eee --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go @@ -0,0 +1,8 @@ +// +build !windows + +package container + +const ( + testAbsPath = "/foo" + testAbsNonExistent = "/some-non-existing-host-path/" +) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go new file mode 100644 index 0000000..c346451 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go @@ -0,0 +1,8 @@ +// +build windows + +package container + +const ( + testAbsPath = `c:\foo` + testAbsNonExistent = `c:\some-non-existing-host-path\` +) diff --git a/vendor/github.com/docker/docker/daemon/cluster/filters.go b/vendor/github.com/docker/docker/daemon/cluster/filters.go new file mode 100644 index 0000000..88668ed --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/filters.go @@ -0,0 +1,116 @@ +package cluster + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/filters" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" +) + +func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "role": true, + "membership": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + f := &swarmapi.ListNodesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + } + + for _, r := range filter.Get("role") { + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { + f.Roles = append(f.Roles, swarmapi.NodeRole(role)) + } else if r != "" { + return nil, fmt.Errorf("Invalid role filter: '%s'", r) + } + } + + for _, a := range filter.Get("membership") { + if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { + f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) + } else if a != "" { + return nil, fmt.Errorf("Invalid membership filter: '%s'", a) + } + } + + return f, nil +} + +func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListServicesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} + +func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "service": true, + "node": true, + "desired-state": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + if transformFunc != nil { + if err := transformFunc(filter); err != nil { + return nil, err + } + } + f := &swarmapi.ListTasksRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + ServiceIDs: filter.Get("service"), + NodeIDs: filter.Get("node"), + } + + for _, s := range filter.Get("desired-state") { + if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { + f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) + } else if s != "" { + return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) + } + } + + return f, nil +} + +func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { + accepted := map[string]bool{ + "names": true, + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListSecretsRequest_Filters{ + Names: filter.Get("names"), + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/helpers.go b/vendor/github.com/docker/docker/daemon/cluster/helpers.go new file mode 100644 index 0000000..be5bf56 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/helpers.go @@ -0,0 +1,108 @@ +package cluster + +import ( + "fmt" + + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { + rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return nil, err + } + + if len(rl.Clusters) == 0 { + return nil, fmt.Errorf("swarm not found") + } + + // TODO: assume one cluster only + return rl.Clusters[0], nil +} + +func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { + // GetNode to match via full ID. + rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}) + if err != nil { + // If any error (including NotFound), ListNodes to match via full name. + rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}}) + + if err != nil || len(rl.Nodes) == 0 { + // If any error or 0 result, ListNodes to match via ID prefix. + rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Nodes) == 0 { + return nil, fmt.Errorf("node %s not found", input) + } + + if l := len(rl.Nodes); l > 1 { + return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) + } + + return rl.Nodes[0], nil + } + return rg.Node, nil +} + +func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) { + // GetService to match via full ID. + rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input}) + if err != nil { + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}}) + if err != nil || len(rl.Services) == 0 { + // If any error or 0 result, ListServices to match via ID prefix. + rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Services) == 0 { + return nil, fmt.Errorf("service %s not found", input) + } + + if l := len(rl.Services); l > 1 { + return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) + } + + return rl.Services[0], nil + } + return rg.Service, nil +} + +func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { + // GetTask to match via full ID. + rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}) + if err != nil { + // If any error (including NotFound), ListTasks to match via full name. + rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}}) + + if err != nil || len(rl.Tasks) == 0 { + // If any error or 0 result, ListTasks to match via ID prefix. + rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Tasks) == 0 { + return nil, fmt.Errorf("task %s not found", input) + } + + if l := len(rl.Tasks); l > 1 { + return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) + } + + return rl.Tasks[0], nil + } + return rg.Task, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go new file mode 100644 index 0000000..c24d486 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go @@ -0,0 +1,278 @@ +package cluster + +import ( + "errors" + "fmt" + "net" +) + +var ( + errNoSuchInterface = errors.New("no such interface") + errNoIP = errors.New("could not find the system's IP address") + errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") + errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") + errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") + errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") +) + +func resolveListenAddr(specifiedAddr string) (string, string, error) { + specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) + if err != nil { + return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) + } + + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + interfaceAddr, err := resolveInterfaceAddr(specifiedHost) + if err == nil { + return interfaceAddr.String(), specifiedPort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if net.ParseIP(specifiedHost) == nil { + return "", "", errBadListenAddr + } + + return specifiedHost, specifiedPort, nil +} + +func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { + // Approach: + // - If an advertise address is specified, use that. Resolve the + // interface's address if an interface was specified in + // advertiseAddr. Fill in the port from listenAddrPort if necessary. + // - If DefaultAdvertiseAddr is not empty, use that with the port from + // listenAddrPort. Resolve the interface's address from + // if an interface name was specified in DefaultAdvertiseAddr. + // - Otherwise, try to autodetect the system's address. Use the port in + // listenAddrPort with this address if autodetection succeeds. + + if advertiseAddr != "" { + advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) + if err != nil { + // Not a host:port specification + advertiseHost = advertiseAddr + advertisePort = listenAddrPort + } + + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + interfaceAddr, err := resolveInterfaceAddr(advertiseHost) + if err == nil { + return interfaceAddr.String(), advertisePort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if ip := net.ParseIP(advertiseHost); ip == nil || ip.IsUnspecified() { + return "", "", errBadAdvertiseAddr + } + + return advertiseHost, advertisePort, nil + } + + if c.config.DefaultAdvertiseAddr != "" { + // Does the default advertise address component match any of the + // interface names on the system? If so, use the address from + // that interface. + interfaceAddr, err := resolveInterfaceAddr(c.config.DefaultAdvertiseAddr) + if err == nil { + return interfaceAddr.String(), listenAddrPort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if ip := net.ParseIP(c.config.DefaultAdvertiseAddr); ip == nil || ip.IsUnspecified() { + return "", "", errBadDefaultAdvertiseAddr + } + + return c.config.DefaultAdvertiseAddr, listenAddrPort, nil + } + + systemAddr, err := c.resolveSystemAddr() + if err != nil { + return "", "", err + } + return systemAddr.String(), listenAddrPort, nil +} + +func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { + // Use a specific interface's IP address. + intf, err := net.InterfaceByName(specifiedInterface) + if err != nil { + return nil, errNoSuchInterface + } + + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + if ipAddr.IP.To4() != nil { + // IPv4 + if interfaceAddr4 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP) + } + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + if interfaceAddr6 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP) + } + interfaceAddr6 = ipAddr.IP + } + } + } + + if interfaceAddr4 == nil && interfaceAddr6 == nil { + return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) + } + + // In the case that there's exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + return interfaceAddr4, nil + } + return interfaceAddr6, nil +} + +func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { + // Use the system's only IP address, or fail if there are + // multiple addresses to choose from. Skip interfaces which + // are managed by docker via subnet check. + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var systemAddr net.IP + var systemInterface string + + // List Docker-managed subnets + v4Subnets := c.config.NetworkSubnetsProvider.V4Subnets() + v6Subnets := c.config.NetworkSubnetsProvider.V6Subnets() + +ifaceLoop: + for _, intf := range interfaces { + // Skip inactive interfaces and loopback interfaces + if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { + continue + } + + addrs, err := intf.Addrs() + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + // Skip loopback and link-local addresses + if !ok || !ipAddr.IP.IsGlobalUnicast() { + continue + } + + if ipAddr.IP.To4() != nil { + // IPv4 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v4Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP) + } + + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v6Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP) + } + + interfaceAddr6 = ipAddr.IP + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Name + } + } + + if systemAddr == nil { + return nil, errNoIP + } + + return systemAddr, nil +} + +func listSystemIPs() []net.IP { + interfaces, err := net.Interfaces() + if err != nil { + return nil + } + + var systemAddrs []net.IP + + for _, intf := range interfaces { + addrs, err := intf.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + systemAddrs = append(systemAddrs, ipAddr.IP) + } + } + } + + return systemAddrs +} + +func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { + if interfaceA == interfaceB { + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB) + } + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go new file mode 100644 index 0000000..3d4f239 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go @@ -0,0 +1,91 @@ +// +build linux + +package cluster + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + // Use the system's only device IP address, or fail if there are + // multiple addresses to choose from. + interfaces, err := netlink.LinkList() + if err != nil { + return nil, err + } + + var ( + systemAddr net.IP + systemInterface string + deviceFound bool + ) + + for _, intf := range interfaces { + // Skip non device or inactive interfaces + if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 { + continue + } + + addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL) + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr := addr.IPNet.IP + + // Skip loopback and link-local addresses + if !ipAddr.IsGlobalUnicast() { + continue + } + + // At least one non-loopback device is found and it is administratively up + deviceFound = true + + if ipAddr.To4() != nil { + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr) + } + interfaceAddr4 = ipAddr + } else { + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr) + } + interfaceAddr6 = ipAddr + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Attrs().Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Attrs().Name + } + } + + if systemAddr == nil { + if !deviceFound { + // If no non-loopback device type interface is found, + // fall back to the regular auto-detection mechanism. + // This is to cover the case where docker is running + // inside a container (eths are in fact veths). + return c.resolveSystemAddrViaSubnetCheck() + } + return nil, errNoIP + } + + return systemAddr, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go new file mode 100644 index 0000000..4e845f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris + +package cluster + +import "net" + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + return c.resolveSystemAddrViaSubnetCheck() +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go new file mode 100644 index 0000000..57a894b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "bufio" + "fmt" + "net" + "os/exec" + "strings" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " + + "`/usr/sbin/route get default | /usr/bin/grep interface | " + + "/usr/bin/awk '{print $2}'`" + out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output() + if err != nil { + return nil, fmt.Errorf("cannot get default route: %v", err) + } + + defInterface := strings.SplitN(string(out), "/", 2) + defInterfaceIP := net.ParseIP(defInterface[0]) + + return defInterfaceIP, nil +} + +func listSystemIPs() []net.IP { + var systemAddrs []net.IP + cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr") + cmdReader, err := cmd.StdoutPipe() + if err != nil { + return nil + } + + if err := cmd.Start(); err != nil { + return nil + } + + scanner := bufio.NewScanner(cmdReader) + go func() { + for scanner.Scan() { + text := scanner.Text() + nameAddrPair := strings.SplitN(text, "/", 2) + // Let go of loopback interfaces and docker interfaces + systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0])) + } + }() + + if err := scanner.Err(); err != nil { + fmt.Printf("scan underwent err: %+v\n", err) + } + + if err := cmd.Wait(); err != nil { + fmt.Printf("run command wait: %+v\n", err) + } + + return systemAddrs +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/provider/network.go b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go new file mode 100644 index 0000000..f4c72ae --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/secrets.go b/vendor/github.com/docker/docker/daemon/cluster/secrets.go new file mode 100644 index 0000000..2b9eb5d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/secrets.go @@ -0,0 +1,133 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" +) + +// GetSecret returns a secret from a managed swarm cluster +func (c *Cluster) GetSecret(id string) (types.Secret, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Secret{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.node.client.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: id}) + if err != nil { + return types.Secret{}, err + } + + return convert.SecretFromGRPC(r.Secret), nil +} + +// GetSecrets returns all secrets of a managed swarm cluster. +func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListSecretsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.node.client.ListSecrets(ctx, + &swarmapi.ListSecretsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + secrets := []types.Secret{} + + for _, secret := range r.Secrets { + secrets = append(secrets, convert.SecretFromGRPC(secret)) + } + + return secrets, nil +} + +// CreateSecret creates a new secret in a managed swarm cluster. +func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + secretSpec := convert.SecretSpecToGRPC(s) + + r, err := c.node.client.CreateSecret(ctx, + &swarmapi.CreateSecretRequest{Spec: &secretSpec}) + if err != nil { + return "", err + } + + return r.Secret.ID, nil +} + +// RemoveSecret removes a secret from a managed swarm cluster. +func (c *Cluster) RemoveSecret(id string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + req := &swarmapi.RemoveSecretRequest{ + SecretID: id, + } + + if _, err := c.node.client.RemoveSecret(ctx, req); err != nil { + return err + } + return nil +} + +// UpdateSecret updates a secret in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateSecret(id string, version uint64, spec types.SecretSpec) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + secretSpec := convert.SecretSpecToGRPC(spec) + + if _, err := c.client.UpdateSecret(ctx, + &swarmapi.UpdateSecretRequest{ + SecretID: id, + SecretVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &secretSpec, + }); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go new file mode 100644 index 0000000..1e7bffb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/commit.go @@ -0,0 +1,271 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + imageEnvKey = strings.ToUpper(imageEnvKey) + userEnvKey = strings.ToUpper(userEnvKey) + } + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + for l, v := range imageConf.Labels { + if _, ok := userConf.Labels[l]; !ok { + userConf.Labels[l] = v + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + userConf.ArgsEscaped = imageConf.ArgsEscaped + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if imageConf.Healthcheck != nil { + if userConf.Healthcheck == nil { + userConf.Healthcheck = imageConf.Healthcheck + } else { + if len(userConf.Healthcheck.Test) == 0 { + userConf.Healthcheck.Test = imageConf.Healthcheck.Test + } + if userConf.Healthcheck.Interval == 0 { + userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval + } + if userConf.Healthcheck.Timeout == 0 { + userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout + } + if userConf.Healthcheck.Retries == 0 { + userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries + } + } + } + + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows and on Solaris. + if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() { + return "", fmt.Errorf("%+v does not support commit of a running container", runtime.GOOS) + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) + if err != nil { + return "", err + } + + if c.MergeConfigs { + if err := merge(newConfig, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var history []image.History + rootFS := image.NewRootFS() + osVersion := "" + var osFeatures []string + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + osVersion = img.OSVersion + osFeatures = img.OSFeatures + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(container.Config.Cmd, " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: newConfig, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *container.Config, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: osFeatures, + OSVersion: osVersion, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + imageRef := "" + if c.Repo != "" { + newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImageWithReference(id, newTag); err != nil { + return "", err + } + imageRef = newTag.String() + } + + attributes := map[string]string{ + "comment": c.Comment, + "imageID": id.String(), + "imageRef": imageRef, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + containerActions.WithValues("commit").UpdateSince(start) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/vendor/github.com/docker/docker/daemon/config.go b/vendor/github.com/docker/docker/daemon/config.go new file mode 100644 index 0000000..42ef18f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config.go @@ -0,0 +1,525 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" + "github.com/spf13/pflag" +) + +const ( + // defaultMaxConcurrentDownloads is the default value for + // maximum number of downloads that + // may take place at a time for each pull. + defaultMaxConcurrentDownloads = 3 + // defaultMaxConcurrentUploads is the default value for + // maximum number of uploads that + // may take place at a time for each push. + defaultMaxConcurrentUploads = 5 + // stockRuntimeName is the reserved name/alias used to represent the + // OCI runtime being shipped with the docker daemon package. + stockRuntimeName = "runc" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +const ( + defaultShutdownTimeout = 15 +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, + "runtimes": true, + "default-ulimits": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// commonBridgeConfig stores all the platform-common bridge driver specific +// configuration. +type commonBridgeConfig struct { + Iface string `json:"bridge,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which is +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonConfig struct { + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + Root string `json:"graph,omitempty"` + SocketGroup string `json:"group,omitempty"` + TrustKeyPath string `json:"-"` + CorsHeaders string `json:"api-cors-header,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + + // LiveRestoreEnabled determines whether we should keep containers + // alive upon daemon shutdown/start + LiveRestoreEnabled bool `json:"live-restore,omitempty"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + // MaxConcurrentDownloads is the maximum number of downloads that + // may take place at a time for each pull. + MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` + + // MaxConcurrentUploads is the maximum number of uploads that + // may take place at a time for each push. + MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` + + // ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container + // to stop when daemon is being shutdown + ShutdownTimeout int `json:"shutdown-timeout,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + + // SwarmDefaultAdvertiseAddr is the default host/IP or network interface + // to use if a wildcard address is specified in the ListenAddr value + // given to the /swarm/init endpoint and no advertise address is + // specified. + SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` + MetricsAddress string `json:"metrics-addr"` + + LogConfig + bridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + reloadLock sync.Mutex + valuesSet map[string]interface{} + + Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not +} + +// InstallCommonFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallCommonFlags(flags *pflag.FlagSet) { + var maxConcurrentDownloads, maxConcurrentUploads int + + config.ServiceOptions.InstallCliFlags(flags) + + flags.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), "storage-opt", "Storage driver options") + flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") + flags.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), "exec-opt", "Runtime execution options") + flags.StringVarP(&config.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") + flags.StringVarP(&config.Root, "graph", "g", defaultGraph, "Root of the Docker runtime") + flags.BoolVarP(&config.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flags.MarkDeprecated("restart", "Please use a restart policy on docker run") + flags.StringVarP(&config.GraphDriver, "storage-driver", "s", "", "Storage driver to use") + flags.IntVar(&config.Mtu, "mtu", 0, "Set the containers network MTU") + flags.BoolVar(&config.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") + // FIXME: why the inconsistency between "hosts" and "sockets"? + flags.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") + flags.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), "dns-opt", "DNS options to use") + flags.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") + flags.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") + flags.StringVar(&config.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") + flags.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") + flags.StringVar(&config.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") + flags.StringVar(&config.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") + flags.StringVar(&config.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") + flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", defaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") + flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", defaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") + flags.IntVar(&config.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") + + flags.StringVar(&config.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") + flags.BoolVar(&config.Experimental, "experimental", false, "Enable experimental features") + + flags.StringVar(&config.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") + + config.MaxConcurrentDownloads = &maxConcurrentDownloads + config.MaxConcurrentUploads = &maxConcurrentUploads +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (config *Config) IsValueSet(name string) bool { + if config.valuesSet == nil { + return false + } + _, ok := config.valuesSet[name] + return ok +} + +// NewConfig returns a new fully initialized Config struct +func NewConfig() *Config { + config := Config{} + config.LogConfig.Config = make(map[string]string) + config.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + config.V2Only = true + } + return &config +} + +func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { + if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") { + return "", errors.New("Cluster Advertise Settings not supported on Solaris") + } + if clusterAdvertise == "" { + return "", errDiscoveryDisabled + } + if clusterStore == "" { + return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + } + + advertise, err := discovery.ParseAdvertise(clusterAdvertise) + if err != nil { + return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) + } + return advertise, nil +} + +// GetConflictFreeLabels validate Labels for conflict +// In swarm the duplicates for labels are removed +// so we only take same values here, no conflict values +// If the key-value is the same we will only take the last label +func GetConflictFreeLabels(labels []string) ([]string, error) { + labelMap := map[string]string{} + for _, label := range labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will return an error + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + + newLabels := []string{} + for k, v := range labelMap { + newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) + } + return newLabels, nil +} + +// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. +func ReloadConfiguration(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := ValidateConfiguration(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (1.16) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := GetConflictFreeLabels(newConfig.Labels) + // if err != nil { + // return err + // } + // newConfig.Labels = newLabels + // + if _, err := GetConflictFreeLabels(newConfig.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := ValidateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + // We need to validate again once both fileConfig and flagsConfig + // have been merged + if err := ValidateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overridden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup(key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *pflag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.valuesSet = configSet + } + + reader = bytes.NewReader(b) + err = json.NewDecoder(reader).Decode(&config) + return &config, err +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + if flag := flags.Lookup(key); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *pflag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *pflag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload + for _, name := range []string{f.Name, f.Shorthand} { + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// ValidateConfiguration validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch, +// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. +func ValidateConfiguration(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + + // validate MaxConcurrentDownloads + if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { + return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) + } + + // validate MaxConcurrentUploads + if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { + return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) + } + + // validate that "default" runtime is not reset + if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { + if _, ok := runtimes[stockRuntimeName]; ok { + return fmt.Errorf("runtime name '%s' is reserved", stockRuntimeName) + } + } + + if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != stockRuntimeName { + runtimes := config.GetAllRuntimes() + if _, ok := runtimes[defaultRuntime]; !ok { + return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_common_unix.go b/vendor/github.com/docker/docker/daemon/config_common_unix.go new file mode 100644 index 0000000..ab76fe7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_common_unix.go @@ -0,0 +1,90 @@ +// +build solaris linux freebsd + +package daemon + +import ( + "net" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/pflag" +) + +// CommonUnixConfig defines configuration of a docker daemon that is +// common across Unix platforms. +type CommonUnixConfig struct { + ExecRoot string `json:"exec-root,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` + Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` + DefaultRuntime string `json:"default-runtime,omitempty"` +} + +type commonUnixBridgeConfig struct { + DefaultIP net.IP `json:"ip,omitempty"` + IP string `json:"bip,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// InstallCommonUnixFlags adds command-line options to the top-level flag parser for +// the current process that are common across Unix platforms. +func (config *Config) InstallCommonUnixFlags(flags *pflag.FlagSet) { + config.Runtimes = make(map[string]types.Runtime) + + flags.StringVarP(&config.SocketGroup, "group", "G", "docker", "Group for the unix socket") + flags.StringVar(&config.bridgeConfig.IP, "bip", "", "Specify network bridge IP") + flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") + flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") + flags.BoolVar(&config.bridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") + flags.Var(runconfigopts.NewNamedRuntimeOpt("runtimes", &config.Runtimes, stockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") + flags.StringVar(&config.DefaultRuntime, "default-runtime", stockRuntimeName, "Default OCI runtime for containers") + +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (config *Config) GetRuntime(name string) *types.Runtime { + config.reloadLock.Lock() + defer config.reloadLock.Unlock() + if rt, ok := config.Runtimes[name]; ok { + return &rt + } + return nil +} + +// GetDefaultRuntimeName returns the current default runtime +func (config *Config) GetDefaultRuntimeName() string { + config.reloadLock.Lock() + rt := config.DefaultRuntime + config.reloadLock.Unlock() + + return rt +} + +// GetAllRuntimes returns a copy of the runtimes map +func (config *Config) GetAllRuntimes() map[string]types.Runtime { + config.reloadLock.Lock() + rts := config.Runtimes + config.reloadLock.Unlock() + return rts +} + +// GetExecRoot returns the user configured Exec-root +func (config *Config) GetExecRoot() string { + return config.ExecRoot +} + +// GetInitPath returns the configure docker-init path +func (config *Config) GetInitPath() string { + config.reloadLock.Lock() + defer config.reloadLock.Unlock() + if config.InitPath != "" { + return config.InitPath + } + return DefaultInitBinary +} diff --git a/vendor/github.com/docker/docker/daemon/config_experimental.go b/vendor/github.com/docker/docker/daemon/config_experimental.go new file mode 100644 index 0000000..963a51e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_experimental.go @@ -0,0 +1,8 @@ +package daemon + +import ( + "github.com/spf13/pflag" +) + +func (config *Config) attachExperimentalFlags(cmd *pflag.FlagSet) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_solaris.go b/vendor/github.com/docker/docker/daemon/config_solaris.go new file mode 100644 index 0000000..bc18ccd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_solaris.go @@ -0,0 +1,47 @@ +package daemon + +import ( + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/system/volatile/docker/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExec = "zones" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig + + // Fields below here are platform specific. + commonUnixBridgeConfig +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then install flags common to unix platforms + config.InstallCommonUnixFlags(flags) + + // Then platform-specific install flags + config.attachExperimentalFlags(flags) +} + +func (config *Config) isSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_test.go b/vendor/github.com/docker/docker/daemon/config_test.go new file mode 100644 index 0000000..90f6a12 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_test.go @@ -0,0 +1,229 @@ +package daemon + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/pflag" +) + +func TestDaemonConfigurationNotFound(t *testing.T) { + _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected does not exist error, got %v", err) + } +} + +func TestDaemonBrokenConfiguration(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"Debug": tru`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected error, got %v", err) + } +} + +func TestParseClusterAdvertiseSettings(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ClusterSettings not supported on Solaris\n") + } + _, err := parseClusterAdvertiseSettings("something", "") + if err != errDiscoveryDisabled { + t.Fatalf("expected discovery disabled error, got %v\n", err) + } + + _, err = parseClusterAdvertiseSettings("", "something") + if err == nil { + t.Fatalf("expected discovery store error, got %v\n", err) + } + + _, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") + if err != nil { + t.Fatal(err) + } +} + +func TestFindConfigurationConflicts(t *testing.T) { + config := map[string]interface{}{"authorization-plugins": "foobar"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.String("authorization-plugins", "", "") + assert.NilError(t, flags.Set("authorization-plugins", "asdf")) + + assert.Error(t, + findConfigurationConflicts(config, flags), + "authorization-plugins: (from flag: asdf, from file: foobar)") +} + +func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { + config := map[string]interface{}{"hosts": []string{"qwer"}} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var hosts []string + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") + assert.NilError(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.NilError(t, flags.Set("host", "unix:///var/run/docker.sock")) + + assert.Error(t, findConfigurationConflicts(config, flags), "hosts") +} + +func TestDaemonConfigurationMergeConflicts(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"debug": true}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.Bool("debug", false, "") + flags.Set("debug", "false") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "debug") { + t.Fatalf("expected debug conflict, got %v", err) + } +} + +func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("tlscacert", "", "") + flags.Set("tlscacert", "~/.docker/ca.pem") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "tlscacert") { + t.Fatalf("expected tlscacert conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.Bool("tlsverify", false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + flags := pflag.NewFlagSet("base", pflag.ContinueOnError) + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} + +func TestValidateConfiguration(t *testing.T) { + c1 := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one"}, + }, + } + + err := ValidateConfiguration(c1) + if err == nil { + t.Fatal("expected error, got nil") + } + + c2 := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one=two"}, + }, + } + + err = ValidateConfiguration(c2) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c3 := &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1"}, + }, + } + + err = ValidateConfiguration(c3) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c4 := &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1o"}, + }, + } + + err = ValidateConfiguration(c4) + if err == nil { + t.Fatal("expected error, got nil") + } + + c5 := &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c"}, + }, + } + + err = ValidateConfiguration(c5) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c6 := &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"123456"}, + }, + } + + err = ValidateConfiguration(c6) + if err == nil { + t.Fatal("expected error, got nil") + } +} diff --git a/vendor/github.com/docker/docker/daemon/config_unix.go b/vendor/github.com/docker/docker/daemon/config_unix.go new file mode 100644 index 0000000..d095788 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_unix.go @@ -0,0 +1,104 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + + runconfigopts "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig + + // Fields below here are platform specific. + CgroupParent string `json:"cgroup-parent,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` + CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` + OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` + Init bool `json:"init,omitempty"` + InitPath string `json:"init-path,omitempty"` + SeccompProfile string `json:"seccomp-profile,omitempty"` +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig + + // These fields are common to all unix platforms. + commonUnixBridgeConfig + + // Fields below here are platform specific. + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + UserlandProxyPath string `json:"userland-proxy-path,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` +} + +// InstallFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then install flags common to unix platforms + config.InstallCommonUnixFlags(flags) + + config.Ulimits = make(map[string]*units.Ulimit) + + // Then platform-specific install flags + flags.BoolVar(&config.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") + flags.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), "default-ulimit", "Default ulimits for containers") + flags.BoolVar(&config.bridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") + flags.BoolVar(&config.bridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") + flags.BoolVar(&config.bridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") + flags.BoolVar(&config.bridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") + flags.StringVar(&config.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") + flags.StringVar(&config.bridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") + flags.BoolVar(&config.bridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") + flags.StringVar(&config.bridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") + flags.BoolVar(&config.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") + flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") + flags.StringVar(&config.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") + flags.StringVar(&config.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") + flags.StringVar(&config.ContainerdAddr, "containerd", "", "Path to containerd socket") + flags.BoolVar(&config.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") + flags.IntVar(&config.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") + flags.BoolVar(&config.Init, "init", false, "Run an init in the container to forward signals and reap processes") + flags.StringVar(&config.InitPath, "init-path", "", "Path to the docker-init binary") + flags.Int64Var(&config.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&config.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&config.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") + + config.attachExperimentalFlags(flags) +} + +func (config *Config) isSwarmCompatible() error { + if config.ClusterStore != "" || config.ClusterAdvertise != "" { + return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + } + if config.LiveRestoreEnabled { + return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_unix_test.go b/vendor/github.com/docker/docker/daemon/config_unix_test.go new file mode 100644 index 0000000..86c16f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_unix_test.go @@ -0,0 +1,80 @@ +// +build !windows + +package daemon + +import ( + "io/ioutil" + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } + + if cc.Ulimits == nil { + t.Fatal("expected default ulimit config, got nil\n") + } else { + if _, OK := cc.Ulimits["nofile"]; OK { + if cc.Ulimits["nofile"].Name != "nofile" || + cc.Ulimits["nofile"].Hard != 2048 || + cc.Ulimits["nofile"].Soft != 1024 { + t.Fatalf("expected default ulimit name, hard and soft are nofile, 2048, 1024, got %s, %d, %d\n", cc.Ulimits["nofile"].Name, cc.Ulimits["nofile"].Hard, cc.Ulimits["nofile"].Soft) + } + } else { + t.Fatal("expected default ulimit name nofile, got nil\n") + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/config_windows.go b/vendor/github.com/docker/docker/daemon/config_windows.go new file mode 100644 index 0000000..df59dcf --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_windows.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile string + defaultGraph = filepath.Join(os.Getenv("programdata"), "docker") +) + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker daemon -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// InstallFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then platform-specific install flags. + flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") + flags.StringVarP(&config.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (config *Config) GetRuntime(name string) *types.Runtime { + return nil +} + +// GetInitPath returns the configure docker-init path +func (config *Config) GetInitPath() string { + return "" +} + +// GetDefaultRuntimeName returns the current default runtime +func (config *Config) GetDefaultRuntimeName() string { + return stockRuntimeName +} + +// GetAllRuntimes returns a copy of the runtimes map +func (config *Config) GetAllRuntimes() map[string]types.Runtime { + return map[string]types.Runtime{} +} + +// GetExecRoot returns the user configured Exec-root +func (config *Config) GetExecRoot() string { + return "" +} + +func (config *Config) isSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_windows_test.go b/vendor/github.com/docker/docker/daemon/config_windows_test.go new file mode 100644 index 0000000..4a7b95c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_windows_test.go @@ -0,0 +1,59 @@ +// +build windows + +package daemon + +import ( + "io/ioutil" + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/container.go b/vendor/github.com/docker/docker/daemon/container.go new file mode 100644 index 0000000..2a44800 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container.go @@ -0,0 +1,282 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.StreamConfig.NewInputPipes() + } else { + c.StreamConfig.NewNopInputPipe() + } + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + + return nil +} + +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + if hostConfig.NetworkMode.IsHost() { + if config.Hostname == "" { + config.Hostname, err = os.Hostname() + if err != nil { + return nil, err + } + } + } else { + daemon.generateHostname(id, config) + } + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Managed = managed + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName() + + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + // make sure links is not nil + // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links + if hostConfig.Links == nil { + hostConfig.Links = []string{} + } + + container.HostConfig = hostConfig + return container.ToDisk() +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + + // Validate if Env contains empty variable or not (e.g., ``, `=foo`) + for _, env := range config.Env { + if _, err := opts.ValidateEnv(env); err != nil { + return nil, err + } + } + } + + if hostConfig == nil { + return nil, nil + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy") + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort) + } + } + } + + p := hostConfig.RestartPolicy + + switch p.Name { + case "always", "unless-stopped", "no": + if p.MaximumRetryCount != 0 { + return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) + } + case "on-failure": + if p.MaximumRetryCount < 0 { + return nil, fmt.Errorf("maximum retry count cannot be negative") + } + case "": + // do nothing + default: + return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations.go b/vendor/github.com/docker/docker/daemon/container_operations.go new file mode 100644 index 0000000..c302506 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations.go @@ -0,0 +1,1049 @@ +package daemon + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrRootFSReadOnly is returned when a container + // rootfs is marked readonly. + ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + getPortMapInfo = container.GetSandboxPortMapInfo +) + +func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { + var ( + sboxOptions []libnetwork.SandboxOption + err error + dns []string + dnsSearch []string + dnsOptions []string + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), + libnetwork.OptionDomainname(container.Config.Domainname)) + + if container.HostConfig.NetworkMode.IsHost() { + sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) + if len(container.HostConfig.ExtraHosts) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) + } + if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && + len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && + len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) + } + } else { + // OptionUseExternalKey is mandatory for userns support. + // But optional for non-userns support + sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) + } + + if err = setupPathsAndSandboxOptions(container, &sboxOptions); err != nil { + return nil, err + } + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemon.configStore.DNS) > 0 { + dns = daemon.configStore.DNS + } + + for _, d := range dns { + sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) + } + + if len(container.HostConfig.DNSSearch) > 0 { + dnsSearch = container.HostConfig.DNSSearch + } else if len(daemon.configStore.DNSSearch) > 0 { + dnsSearch = daemon.configStore.DNSSearch + } + + for _, ds := range dnsSearch { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) + } + + if len(container.HostConfig.DNSOptions) > 0 { + dnsOptions = container.HostConfig.DNSOptions + } else if len(daemon.configStore.DNSOptions) > 0 { + dnsOptions = daemon.configStore.DNSOptions + } + + for _, ds := range dnsOptions { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) + } + + if container.NetworkSettings.SecondaryIPAddresses != nil { + name := container.Config.Hostname + if container.Config.Domainname != "" { + name = name + "." + container.Config.Domainname + } + + for _, a := range container.NetworkSettings.SecondaryIPAddresses { + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) + } + } + + for _, extraHost := range container.HostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + parts := strings.SplitN(extraHost, ":", 2) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + sboxOptions = append(sboxOptions, + libnetwork.OptionPortMapping(pbList), + libnetwork.OptionExposedPorts(exposeList)) + + // Legacy Link feature is supported only for the default bridge network. + // return if this call to build join options is not for default bridge network + // Legacy Link is only supported by docker run --link + bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] + if !ok || bridgeSettings.EndpointSettings == nil { + return sboxOptions, nil + } + + if bridgeSettings.EndpointID == "" { + return sboxOptions, nil + } + + var ( + childEndpoints, parentEndpoints []string + cEndpointID string + ) + + children := daemon.children(container) + for linkAlias, child := range children { + if !isLinkable(child) { + return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) + } + _, alias := path.Split(linkAlias) + // allow access to the linked container via the alias, real name, and container hostname + aliasList := alias + " " + child.Config.Hostname + // only add the name if alias isn't equal to the name + if alias != child.Name[1:] { + aliasList = aliasList + " " + child.Name[1:] + } + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) + cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID + if cEndpointID != "" { + childEndpoints = append(childEndpoints, cEndpointID) + } + } + + for alias, parent := range daemon.parents(container) { + if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { + continue + } + + _, alias = path.Split(alias) + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) + sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( + parent.ID, + alias, + bridgeSettings.IPAddress, + )) + if cEndpointID != "" { + parentEndpoints = append(parentEndpoints, cEndpointID) + } + } + + linkOptions := options.Generic{ + netlabel.GenericData: options.Generic{ + "ParentEndpoints": parentEndpoints, + "ChildEndpoints": childEndpoints, + }, + } + + sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) + return sboxOptions, nil +} + +func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error { + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} + } + + if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + for s := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(s) + if err != nil { + continue + } + + if sn.Name() == n.Name() { + // Avoid duplicate config + return nil + } + if !containertypes.NetworkMode(sn.Type()).IsPrivate() || + !containertypes.NetworkMode(n.Type()).IsPrivate() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(sn.Name()).IsNone() || + containertypes.NetworkMode(n.Name()).IsNone() { + return runconfig.ErrConflictNoNetwork + } + } + + if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + + return nil +} + +func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.BuildEndpointInfo(n, ep); err != nil { + return err + } + + if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { + container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface + } + + return nil +} + +// UpdateNetwork is used to update the container's network (e.g. when linked containers +// get removed/unlinked). +func (daemon *Daemon) updateNetwork(container *container.Container) error { + var ( + start = time.Now() + ctrl = daemon.netController + sid = container.NetworkSettings.SandboxID + ) + + sb, err := ctrl.SandboxByID(sid) + if err != nil { + return fmt.Errorf("error locating sandbox id %s: %v", sid, err) + } + + // Find if container is connected to the default bridge network + var n libnetwork.Network + for name := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(name) + if err != nil { + continue + } + if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { + n = sn + break + } + } + + if n == nil { + // Not connected to the default bridge network; Nothing to do + return nil + } + + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return fmt.Errorf("Update network failed: %v", err) + } + + if err := sb.Refresh(options...); err != nil { + return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) + } + + networkActions.WithValues("update").UpdateSince(start) + + return nil +} + +func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { + n, err := daemon.FindNetwork(idOrName) + if err != nil { + // We should always be able to find the network for a + // managed container. + if container.Managed { + return nil, nil, err + } + } + + // If we found a network and if it is not dynamically created + // we should never attempt to attach to that network here. + if n != nil { + if container.Managed || !n.Info().Dynamic() { + return n, nil, nil + } + } + + var addresses []string + if epConfig != nil && epConfig.IPAMConfig != nil { + if epConfig.IPAMConfig.IPv4Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv4Address) + } + + if epConfig.IPAMConfig.IPv6Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv6Address) + } + } + + var ( + config *networktypes.NetworkingConfig + retryCount int + ) + + for { + // In all other cases, attempt to attach to the network to + // trigger attachment in the swarm cluster manager. + if daemon.clusterProvider != nil { + var err error + config, err = daemon.clusterProvider.AttachNetwork(idOrName, container.ID, addresses) + if err != nil { + return nil, nil, err + } + } + + n, err = daemon.FindNetwork(idOrName) + if err != nil { + if daemon.clusterProvider != nil { + if err := daemon.clusterProvider.DetachNetwork(idOrName, container.ID); err != nil { + logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) + } + } + + // Retry network attach again if we failed to + // find the network after successfull + // attachment because the only reason that + // would happen is if some other container + // attached to the swarm scope network went down + // and removed the network while we were in + // the process of attaching. + if config != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + if retryCount >= 5 { + return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName) + } + retryCount++ + continue + } + } + + return nil, nil, err + } + + break + } + + // This container has attachment to a swarm scope + // network. Update the container network settings accordingly. + container.NetworkSettings.HasSwarmEndpoint = true + return n, config, nil +} + +// updateContainerNetworkSettings update the network settings +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { + var n libnetwork.Network + + mode := container.HostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() { + return + } + + networkName := mode.NetworkName() + if mode.IsDefault() { + networkName = daemon.netController.Config().Daemon.DefaultNetwork + } + + if mode.IsUserDefined() { + var err error + + n, err = daemon.FindNetwork(networkName) + if err == nil { + networkName = n.Name() + } + } + + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{} + } + + if len(endpointsConfig) > 0 { + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + } + + for name, epConfig := range endpointsConfig { + container.NetworkSettings.Networks[name] = &network.EndpointSettings{ + EndpointSettings: epConfig, + } + } + } + + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + + // Convert any settings added by client in default name to + // engine's default network name key + if mode.IsDefault() { + if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok { + container.NetworkSettings.Networks[networkName] = nConf + delete(container.NetworkSettings.Networks, mode.NetworkName()) + } + } + + if !mode.IsUserDefined() { + return + } + // Make sure to internally store the per network endpoint config by network name + if _, ok := container.NetworkSettings.Networks[networkName]; ok { + return + } + + if n != nil { + if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { + container.NetworkSettings.Networks[networkName] = nwConfig + delete(container.NetworkSettings.Networks, n.ID()) + return + } + } +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + start := time.Now() + controller := daemon.netController + + if daemon.netController == nil { + return nil + } + + // Cleanup any stale sandbox left over due to ungraceful daemon shutdown + if err := controller.SandboxDestroy(container.ID); err != nil { + logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) + } + + updateSettings := false + if len(container.NetworkSettings.Networks) == 0 { + if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { + return nil + } + + daemon.updateContainerNetworkSettings(container, nil) + updateSettings = true + } + + // always connect default network first since only default + // network mode support link and we need do some setting + // on sandbox initialize for link, but the sandbox only be initialized + // on first network connecting. + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { + cleanOperationalData(nConf) + if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil { + return err + } + + } + + // the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks" + networks := make(map[string]*network.EndpointSettings) + for n, epConf := range container.NetworkSettings.Networks { + if n == defaultNetName { + continue + } + + networks[n] = epConf + } + + for netName, epConf := range networks { + cleanOperationalData(epConf) + if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil { + return err + } + } + + if err := container.WriteHostConfig(); err != nil { + return err + } + networkActions.WithValues("allocate").UpdateSince(start) + return nil +} + +func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { + var sb libnetwork.Sandbox + daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { + if s.ContainerID() == container.ID { + sb = s + return true + } + return false + }) + return sb +} + +// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration +func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { + return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) +} + +// User specified ip address is acceptable only for networks with user specified subnets. +func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { + if n == nil || epConfig == nil { + return nil + } + if !hasUserDefinedIPAddress(epConfig) { + return nil + } + _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() + for _, s := range []struct { + ipConfigured bool + subnetConfigs []*libnetwork.IpamConf + }{ + { + ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, + subnetConfigs: nwIPv4Configs, + }, + { + ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, + subnetConfigs: nwIPv6Configs, + }, + } { + if s.ipConfigured { + foundSubnet := false + for _, cfg := range s.subnetConfigs { + if len(cfg.PreferredPool) > 0 { + foundSubnet = true + break + } + } + if !foundSubnet { + return runconfig.ErrUnsupportedNetworkNoSubnetAndIP + } + } + } + + return nil +} + +// cleanOperationalData resets the operational data from the passed endpoint settings +func cleanOperationalData(es *network.EndpointSettings) { + es.EndpointID = "" + es.Gateway = "" + es.IPAddress = "" + es.IPPrefixLen = 0 + es.IPv6Gateway = "" + es.GlobalIPv6Address = "" + es.GlobalIPv6PrefixLen = 0 + es.MacAddress = "" + if es.IPAMOperational { + es.IPAMConfig = nil + } +} + +func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error { + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + if hasUserDefinedIPAddress(endpointConfig) && !enableIPOnPredefinedNetwork() { + return runconfig.ErrUnsupportedNetworkAndIP + } + if endpointConfig != nil && len(endpointConfig.Aliases) > 0 && !container.EnableServiceDiscoveryOnDefaultNetwork() { + return runconfig.ErrUnsupportedNetworkAndAlias + } + } else { + addShortID := true + shortID := stringid.TruncateID(container.ID) + for _, alias := range endpointConfig.Aliases { + if alias == shortID { + addShortID = false + break + } + } + if addShortID { + endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) + } + } + + if err := validateNetworkingConfig(n, endpointConfig); err != nil { + return err + } + + if updateSettings { + if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { + start := time.Now() + if container.HostConfig.NetworkMode.IsContainer() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(idOrName).IsBridge() && + daemon.configStore.DisableBridge { + container.Config.NetworkDisabled = true + return nil + } + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + + n, config, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig) + if err != nil { + return err + } + if n == nil { + return nil + } + + var operIPAM bool + if config != nil { + if epConfig, ok := config.EndpointsConfig[n.Name()]; ok { + if endpointConfig.IPAMConfig == nil || + (endpointConfig.IPAMConfig.IPv4Address == "" && + endpointConfig.IPAMConfig.IPv6Address == "" && + len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) { + operIPAM = true + } + + // copy IPAMConfig and NetworkID from epConfig via AttachNetwork + endpointConfig.IPAMConfig = epConfig.IPAMConfig + endpointConfig.NetworkID = epConfig.NetworkID + } + } + + err = daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings) + if err != nil { + return err + } + + controller := daemon.netController + sb := daemon.getNetworkSandbox(container) + createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb, daemon.configStore.DNS) + if err != nil { + return err + } + + endpointName := strings.TrimPrefix(container.Name, "/") + ep, err := n.CreateEndpoint(endpointName, createOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(false); e != nil { + logrus.Warnf("Could not rollback container connection to network %s", idOrName) + } + } + }() + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + IPAMOperational: operIPAM, + } + if _, ok := container.NetworkSettings.Networks[n.ID()]; ok { + delete(container.NetworkSettings.Networks, n.ID()) + } + + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { + return err + } + + if sb == nil { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err = controller.NewSandbox(container.ID, options...) + if err != nil { + return err + } + + container.UpdateSandboxNetworkSettings(sb) + } + + joinOptions, err := container.BuildJoinOptions(n) + if err != nil { + return err + } + + if err := ep.Join(sb, joinOptions...); err != nil { + return err + } + + if !container.Managed { + // add container name/alias to DNS + if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil { + return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err) + } + } + + if err := container.UpdateJoinInfo(n, ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sb) + + daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) + networkActions.WithValues("connect").UpdateSince(start) + return nil +} + +// ForceEndpointDelete deletes an endpoint from a network forcefully +func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { + n, err := daemon.FindNetwork(networkName) + if err != nil { + return err + } + + ep, err := n.EndpointByName(name) + if err != nil { + return err + } + return ep.Delete(true) +} + +func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { + var ( + ep libnetwork.Endpoint + sbox libnetwork.Sandbox + ) + + s := func(current libnetwork.Endpoint) bool { + epInfo := current.Info() + if epInfo == nil { + return false + } + if sb := epInfo.Sandbox(); sb != nil { + if sb.ContainerID() == container.ID { + ep = current + sbox = sb + return true + } + } + return false + } + n.WalkEndpoints(s) + + if ep == nil && force { + epName := strings.TrimPrefix(container.Name, "/") + ep, err := n.EndpointByName(epName) + if err != nil { + return err + } + return ep.Delete(force) + } + + if ep == nil { + return fmt.Errorf("container %s is not connected to the network", container.ID) + } + + if err := ep.Leave(sbox); err != nil { + return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sbox) + + if err := ep.Delete(false); err != nil { + return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) + } + + delete(container.NetworkSettings.Networks, n.Name()) + + if daemon.clusterProvider != nil && n.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(n.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", n.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(n.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", n.ID(), err) + } + } + } + + return nil +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + initializeNetworkingPaths(container, nc) + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + if container.Config.Hostname == "" { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + start := time.Now() + if daemon.netController == nil { + return + } + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } + + sid := container.NetworkSettings.SandboxID + settings := container.NetworkSettings.Networks + container.NetworkSettings.Ports = nil + + if sid == "" || len(settings) == 0 { + return + } + + var networks []libnetwork.Network + for n, epSettings := range settings { + if nw, err := daemon.FindNetwork(n); err == nil { + networks = append(networks, nw) + } + + if epSettings.EndpointSettings == nil { + continue + } + + cleanOperationalData(epSettings) + } + + sb, err := daemon.netController.SandboxByID(sid) + if err != nil { + logrus.Warnf("error locating sandbox id %s: %v", sid, err) + return + } + + if err := sb.Delete(); err != nil { + logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) + } + + for _, nw := range networks { + if daemon.clusterProvider != nil && nw.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(nw.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", nw.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(nw.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", nw.ID(), err) + } + } + } + + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes) + } + networkActions.WithValues("release").UpdateSince(start) +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} + +// ConnectToNetwork connects a container to a network +func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + if !container.Running { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + + n, err := daemon.FindNetwork(idOrName) + if err == nil && n != nil { + if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { + return err + } + } else { + container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + } else if !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else { + if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { + return err + } + } + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + return nil +} + +// DisconnectFromNetwork disconnects container from network n. +func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { + n, err := daemon.FindNetwork(networkName) + if !container.Running || (err != nil && force) { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + // In case networkName is resolved we will use n.Name() + // this will cover the case where network id is passed. + if n != nil { + networkName = n.Name() + } + if _, ok := container.NetworkSettings.Networks[networkName]; !ok { + return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) + } + delete(container.NetworkSettings.Networks, networkName) + } else if err == nil && !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else if err == nil { + if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + if err := daemon.disconnectFromNetwork(container, n, false); err != nil { + return err + } + } else { + return err + } + + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + + if n != nil { + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) + } + return nil +} + +// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response +func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.EnableService() +} + +// DeactivateContainerServiceBinding remove this container fromload balancer active rotation, and DNS response +func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.DisableService() +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_solaris.go b/vendor/github.com/docker/docker/daemon/container_operations_solaris.go new file mode 100644 index 0000000..1653948 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_solaris.go @@ -0,0 +1,46 @@ +// +build solaris + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/vendor/github.com/docker/docker/daemon/container_operations_unix.go new file mode 100644 index 0000000..2296045 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_unix.go @@ -0,0 +1,281 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/cloudflare/cfssl/log" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + env = append(env, link.ToEnv()...) + } + + return env, nil +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.PidMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join PID of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + shmSize := container.DefaultSHMSize + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + defer func() { + if setupErr != nil { + // cleanup + _ = detachMounted(localMountPath) + + if err := os.RemoveAll(localMountPath); err != nil { + log.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + // retrieve possible remapped range start for root UID, GID + rootUID, rootGID := daemon.GetRemappedUIDGID() + // create tmpfs + if err := idtools.MkdirAllAs(localMountPath, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootUID, rootGID) + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to setup secret mount") + } + + for _, s := range c.SecretReferences { + if c.SecretStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + return fmt.Errorf("secret target type is not a file target") + } + + targetPath := filepath.Clean(s.File.Name) + // ensure that the target is a filename only; no paths allowed + if targetPath != filepath.Base(targetPath) { + return fmt.Errorf("error creating secret: secret must not be a path") + } + + fPath := filepath.Join(localMountPath, targetPath) + if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret mount path") + } + + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret := c.SecretStore.Get(s.SecretID) + if secret == nil { + return fmt.Errorf("unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + + uid, err := strconv.Atoi(s.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(s.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for secret") + } + } + + // remount secrets ro + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to remount secret dir as readonly") + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return true +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + var err error + + container.HostsPath, err = container.GetRootResourcePath("hosts") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) + + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_windows.go b/vendor/github.com/docker/docker/daemon/container_operations_windows.go new file mode 100644 index 0000000..d05f251 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_windows.go @@ -0,0 +1,59 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + // TODO Windows + return 0, 0 +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work +// against containers which have volumes. You will still be able to cp +// to somewhere on the container drive, but not to any mounted volumes +// inside the container. Without this fix, docker cp is broken to any +// container which has a volume, regardless of where the file is inside the +// container. +func (daemon *Daemon) mountVolumes(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func isLinkable(child *container.Container) bool { + return false +} + +func enableIPOnPredefinedNetwork() bool { + return true +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go new file mode 100644 index 0000000..c71d14e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create.go @@ -0,0 +1,290 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + volumestore "github.com/docker/docker/volume/store" + "github.com/opencontainers/runc/libcontainer/label" +) + +// CreateManagedContainer creates a container that is managed by a Service +func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, true) +} + +// ContainerCreate creates a regular container +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, false) +} + +func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { + start := time.Now() + if params.Config == nil { + return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + container, err := daemon.create(params, managed) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + containerActions.WithValues("create").UpdateSince(start) + + return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + + if runtime.GOOS == "solaris" && img.OS != "solaris " { + return nil, errors.New("Platform on which parent image was created is not Solaris") + } + imgID = img.ID() + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Config, params.HostConfig, imgID, managed); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.cleanupContainer(container, true, true); err != nil { + logrus.Errorf("failed to cleanup container on create error: %v", err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + container.HostConfig.StorageOpt = params.HostConfig.StorageOpt + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.CheckpointDir(), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + daemon.updateContainerNetworkSettings(container, endpointsConfigs) + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, err + } + if err := daemon.Register(container); err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode, privileged bool) ([]string, error) { + if ipcMode.IsHost() || pidMode.IsHost() || privileged { + return label.DisableSecOpt(), nil + } + + var ipcLabel []string + var pidLabel []string + ipcContainer := ipcMode.Container() + pidContainer := pidMode.Container() + if ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + ipcLabel = label.DupSecOpt(c.ProcessLabel) + if pidContainer == "" { + return ipcLabel, err + } + } + if pidContainer != "" { + c, err := daemon.GetContainer(pidContainer) + if err != nil { + return nil, err + } + + pidLabel = label.DupSecOpt(c.ProcessLabel) + if ipcContainer == "" { + return pidLabel, err + } + } + + if pidLabel != nil && ipcLabel != nil { + for i := 0; i < len(pidLabel); i++ { + if pidLabel[i] != ipcLabel[i] { + return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") + } + } + return pidLabel, nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + + rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.getLayerInit(), container.HostConfig.StorageOpt) + + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the Engine API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + if volumestore.IsNameConflict(err) { + return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) + } + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + return apiV, nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + // Reset the Entrypoint if it is [""] + if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" { + config.Entrypoint = nil + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +// Checks if the client set configurations for more than one network while creating a container +// Also checks if the IPAMConfig is valid +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { + return nil + } + if len(nwConfig.EndpointsConfig) == 1 { + for _, v := range nwConfig.EndpointsConfig { + if v != nil && v.IPAMConfig != nil { + if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)) + } + if v.IPAMConfig.IPv6Address != "" { + n := net.ParseIP(v.IPAMConfig.IPv6Address) + // if the address is an invalid network address (ParseIP == nil) or if it is + // an IPv4 address (To4() != nil), then it is an invalid IPv6 address + if n == nil || n.To4() != nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)) + } + } + } + } + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return apierrors.NewBadRequestError(err) +} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go new file mode 100644 index 0000000..2fe5c98 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/runc/libcontainer/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if mnt.Volume == nil { + continue + } + + if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/create_windows.go b/vendor/github.com/docker/docker/daemon/create_windows.go new file mode 100644 index 0000000..bbf0dbe --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_windows.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + // Make sure the host config has the default daemon isolation if not specified by caller. + if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { + hostConfig.Isolation = daemon.defaultIsolation + } + + for spec := range config.Volumes { + + mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + if err != nil { + return fmt.Errorf("Unrecognised volume spec: %v", err) + } + + // If the mountpoint doesn't have a name, generate one. + if len(mp.Name) == 0 { + mp.Name = stringid.GenerateNonCryptoID() + } + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(mp.Destination) { + continue + } + + volumeDriver := hostConfig.VolumeDriver + + // Create the volume in the volume driver. If it doesn't exist, + // a new one will be created. + v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + // FIXME Windows: This code block is present in the Linux version and + // allows the contents to be copied to the container FS prior to it + // being started. However, the function utilizes the FollowSymLinkInScope + // path which does not cope with Windows volume-style file paths. There + // is a separate effort to resolve this (@swernli), so this processing + // is deferred for now. A case where this would be useful is when + // a dockerfile includes a VOLUME statement, but something is created + // in that directory during the dockerfile processing. What this means + // on Windows for TP5 is that in that scenario, the contents will not + // copied, but that's (somewhat) OK as HCS will bomb out soon after + // at it doesn't support mapped directories which have contents in the + // destination path anyway. + // + // Example for repro later: + // FROM windowsservercore + // RUN mkdir c:\myvol + // RUN copy c:\windows\system32\ntdll.dll c:\myvol + // VOLUME "c:\myvol" + // + // Then + // docker build -t vol . + // docker run -it --rm vol cmd <-- This is where HCS will error out. + // + // // never attempt to copy existing content in a container FS to a shared volume + // if v.DriverName() == volume.DefaultDriverName { + // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { + // return err + // } + // } + + // Add it to container.MountPoints + container.AddMountPointWithVolume(mp.Destination, v, mp.RW) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go new file mode 100644 index 0000000..55a66ae --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon.go @@ -0,0 +1,1321 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/plugin" + "github.com/docker/libnetwork/cluster" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/migrate/v1" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libtrust" + "github.com/pkg/errors" +) + +var ( + // DefaultRuntimeBinary is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeBinary = "docker-runc" + + // DefaultInitBinary is the name of the default init binary + DefaultInitBinary = "docker-init" + + errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") +) + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + execCommands *exec.Store + referenceStore reference.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *Config + statsCollector *statsCollector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discoveryReloader + root string + seccompEnabled bool + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + containerdRemote libcontainerd.Remote + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + + seccompProfile []byte + seccompProfilePath string +} + +// HasExperimental returns whether the experimental features of the daemon are enabled or not +func (daemon *Daemon) HasExperimental() bool { + if daemon.configStore != nil && daemon.configStore.Experimental { + return true + } + return false +} + +func (daemon *Daemon) restore() error { + var ( + currentDriver = daemon.GraphDriverName() + containers = make(map[string]*container.Container) + ) + + logrus.Info("Loading containers: start.") + + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + removeContainers := make(map[string]*container.Container) + restartContainers := make(map[*container.Container]chan struct{}) + activeSandboxes := make(map[string]interface{}) + for id, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + + // verify that all volumes valid and have been migrated from the pre-1.7 layout + if err := daemon.verifyVolumesInfo(c); err != nil { + // don't skip the container due to error + logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) + } + + // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. + // We should rewrite it to use the daemon defaults. + // Fixes https://github.com/docker/docker/issues/22536 + if c.HostConfig.LogConfig.Type == "" { + if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { + logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) + continue + } + } + } + + var migrateLegacyLinks bool // Not relevant on Windows + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + if err := backportMountSpec(c); err != nil { + logrus.Error("Failed to migrate old mounts to use new spec format") + } + + if c.IsRunning() || c.IsPaused() { + c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking + if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { + logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) + return + } + + // we call Mount and then Unmount to get BaseFs of the container + if err := daemon.Mount(c); err != nil { + // The mount is unlikely to fail. However, in case mount fails + // the container should be allowed to restore here. Some functionalities + // (like docker exec -u user) might be missing but container is able to be + // stopped/restarted/removed. + // See #29365 for related information. + // The error is only logged here. + logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) + } else { + // if mount success, then unmount it + if err := daemon.Unmount(c); err != nil { + logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) + } + } + + c.ResetRestartManager(false) + if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { + options, err := daemon.buildSandboxOptions(c) + if err != nil { + logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) + } + mapLock.Lock() + activeSandboxes[c.NetworkSettings.SandboxID] = options + mapLock.Unlock() + } + + } + // fixme: only if not running + // get list of containers we need to restart + if !c.IsRunning() && !c.IsPaused() { + // Do not autostart containers which + // has endpoints in a swarm scope + // network yet since the cluster is + // not initialized yet. We will start + // it after the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } else if c.HostConfig != nil && c.HostConfig.AutoRemove { + mapLock.Lock() + removeContainers[c.ID] = c + mapLock.Unlock() + } + } + + if c.RemovalInProgress { + // We probably crashed in the middle of a removal, reset + // the flag. + // + // We DO NOT remove the container here as we do not + // know if the user had requested for either the + // associated volumes, network links or both to also + // be removed. So we put the container in the "dead" + // state and leave further processing up to them. + logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) + c.ResetRemovalInProgress() + c.SetDead() + c.ToDisk() + } + + // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated + if c.HostConfig != nil && c.HostConfig.Links == nil { + migrateLegacyLinks = true + } + }(c) + } + wg.Wait() + daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) + if err != nil { + return fmt.Errorf("Error initializing network controller: %v", err) + } + + // Perform migration of legacy sqlite links (no-op on Windows) + if migrateLegacyLinks { + if err := daemon.sqliteMigration(containers); err != nil { + return err + } + } + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + daemon.waitForNetworks(c) + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + removeGroup := sync.WaitGroup{} + for id := range removeContainers { + removeGroup.Add(1) + go func(cid string) { + if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("Failed to remove container %s: %s", cid, err) + } + removeGroup.Done() + }(id) + } + removeGroup.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume dirver is not available. + if _, ok := restartContainers[c]; ok { + continue + } else if _, ok := removeContainers[c.ID]; ok { + // container is automatically removed, skip it. + continue + } + + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + logrus.Info("Loading containers: done.") + + return nil +} + +// RestartSwarmContainers restarts any autostart container which has a +// swarm endpoint. +func (daemon *Daemon) RestartSwarmContainers() { + group := sync.WaitGroup{} + for _, c := range daemon.List() { + if !c.IsRunning() && !c.IsPaused() { + // Autostart all the containers which has a + // swarm endpoint now that the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Error(err) + } + }(c) + } + } + + } + group.Wait() +} + +// waitForNetworks is used during daemon initialization when starting up containers +// It ensures that all of a container's networks are available before the daemon tries to start the container. +// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. +func (daemon *Daemon) waitForNetworks(c *container.Container) { + if daemon.discoveryWatcher == nil { + return + } + // Make sure if the container has a network that requires discovery that the discovery service is available before starting + for netName := range c.NetworkSettings.Networks { + // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready + // Most likely this is because the K/V store used for discovery is in a container and needs to be started + if _, err := daemon.netController.NetworkByName(netName); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + continue + } + // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host + // FIXME: why is this slow??? + logrus.Debugf("Container %s waiting for network to be ready", c.Name) + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(60 * time.Second): + } + return + } + } +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// DaemonJoinsCluster informs the daemon has joined the cluster and provides +// the handler to query the cluster component +func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { + daemon.setClusterProvider(clusterProvider) +} + +// DaemonLeavesCluster informs the daemon has left the cluster +func (daemon *Daemon) DaemonLeavesCluster() { + // Daemon is in charge of removing the attachable networks with + // connected containers when the node leaves the swarm + daemon.clearAttachableNetworks() + daemon.setClusterProvider(nil) +} + +// setClusterProvider sets a component for querying the current cluster state. +func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { + daemon.clusterProvider = clusterProvider + // call this in a goroutine to allow netcontroller handle this event async + // and not block if it is in the middle of talking with cluster + go daemon.netController.SetClusterProvider(clusterProvider) +} + +// IsSwarmCompatible verifies if the current daemon +// configuration is compatible with the swarm mode +func (daemon *Daemon) IsSwarmCompatible() error { + if daemon.configStore == nil { + return nil + } + return daemon.configStore.isSwarmCompatible() +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure that we have a correct root key limit for launching containers. + if err := ModifyRootKeyLimit(); err != nil { + logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) + } + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := setupDaemonProcess(config); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := tempDir(config.Root, rootUID, rootGID) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{configStore: config} + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + if err := d.setupSeccompProfile(); err != nil { + return nil, err + } + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + if err := ensureDefaultAppArmorProfile(); err != nil { + logrus.Errorf(err.Error()) + } + + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if runtime.GOOS == "windows" { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { + return nil, err + } + } + + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + + d.RegistryService = registryService + d.PluginStore = plugin.NewStore(config.Root) // todo: remove + // Plugin system initialization should happen before restore. Do not change order. + d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ + Root: filepath.Join(config.Root, "plugins"), + ExecRoot: "/run/docker/plugins", // possibly needs fixing + Store: d.PluginStore, + Executor: containerdRemote, + RegistryService: registryService, + LiveRestoreEnabled: config.LiveRestoreEnabled, + LogPluginEvent: d.LogPluginEvent, // todo: make private + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't create plugin manager") + } + + d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: driverName, + GraphDriverOptions: config.GraphOptions, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + }) + if err != nil { + return nil, err + } + + graphDriver := d.layerStore.DriverName() + imageRoot := filepath.Join(config.Root, "image", graphDriver) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + return nil, err + } + + logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) + d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) + logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) + d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + if err != nil { + return nil, err + } + + // Configure the volumes driver + volStore, err := d.configureVolumes(rootUID, rootGID) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil { + return nil, err + } + + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + if err != nil { + return nil, err + } + + eventsService := events.New() + + referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + migrationStart := time.Now() + if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) + + // Discovery is only enabled when the daemon is launched with an address to advertise. When + // initialized, the daemon is registered and we can store the discovery backend as its read-only + if err := d.initDiscovery(config); err != nil { + return nil, err + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux. + if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { + return nil, fmt.Errorf("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + d.execCommands = exec.NewStore() + d.referenceStore = referenceStore + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + d.seccompEnabled = sysInfo.Seccomp + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + d.containerdRemote = containerdRemote + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + + // FIXME: this method never returns an error + info, _ := d.SystemInfo() + + engineVersion.WithValues( + dockerversion.Version, + dockerversion.GitCommit, + info.Architecture, + info.Driver, + info.KernelVersion, + info.OperatingSystem, + ).Set(1) + engineCpus.Set(float64(info.NCPU)) + engineMemory.Set(float64(info.MemTotal)) + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + stackDumpDir := config.Root + if execRoot := config.GetExecRoot(); execRoot != "" { + stackDumpDir = execRoot + } + d.setupDumpStackTrap(stackDumpDir) + + return d, nil +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + stopTimeout := c.StopTimeout() + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return fmt.Errorf("System does not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil { + logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return fmt.Errorf("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + c.WaitStop(-1 * time.Second) + return err + } + } + // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, stopTimeout); err != nil { + return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) + } + + c.WaitStop(-1 * time.Second) + return nil +} + +// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, +// and is limited by daemon's ShutdownTimeout. +func (daemon *Daemon) ShutdownTimeout() int { + // By default we use daemon's ShutdownTimeout. + shutdownTimeout := daemon.configStore.ShutdownTimeout + + graceTimeout := 5 + if daemon.containers != nil { + for _, c := range daemon.containers.List() { + if shutdownTimeout >= 0 { + stopTimeout := c.StopTimeout() + if stopTimeout < 0 { + shutdownTimeout = -1 + } else { + if stopTimeout+graceTimeout > shutdownTimeout { + shutdownTimeout = stopTimeout + graceTimeout + } + } + } + } + } + return shutdownTimeout +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + // Keep mounts and networking running on daemon shutdown if + // we are to keep containers running and restore them. + + if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { + // check if there are any running containers, if none we should do some cleanup + if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + return nil + } + } + + if daemon.containers != nil { + logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.volumes != nil { + if err := daemon.volumes.Shutdown(); err != nil { + logrus.Errorf("Error shutting down volume store: %v", err) + } + } + + if daemon.layerStore != nil { + if err := daemon.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + } + } + + // Shutdown plugins after containers and layerstore. Don't change the order. + daemon.pluginShutdown() + + // trigger libnetwork Stop only if it's initialized + if daemon.netController != nil { + daemon.netController.Stop() + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + + return nil +} + +// V4Subnets returns the IPv4 subnets of networks that are managed by Docker. +func (daemon *Daemon) V4Subnets() []net.IPNet { + var subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + v4Infos, _ := managedNetwork.Info().IpamInfo() + for _, v4Info := range v4Infos { + if v4Info.IPAMData.Pool != nil { + subnets = append(subnets, *v4Info.IPAMData.Pool) + } + } + } + + return subnets +} + +// V6Subnets returns the IPv6 subnets of networks that are managed by Docker. +func (daemon *Daemon) V6Subnets() []net.IPNet { + var subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + _, v6Infos := managedNetwork.Info().IpamInfo() + for _, v6Info := range v6Infos { + if v6Info.IPAMData.Pool != nil { + subnets = append(subnets, *v6Info.IPAMData.Pool) + } + } + } + + return subnets +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName() string { + return daemon.layerStore.DriverName() +} + +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// tempDir returns the default directory to use for temporary files. +func tempDir(rootDir string, rootUID, rootGID int) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return initlayer.Setup(initPath, rootUID, rootGID) +} + +func setDefaultMtu(config *Config) { + // do nothing if the config does not have the default 0 value. + if config.Mtu != 0 { + return + } + config.Mtu = defaultNetworkMtu +} + +func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.RegisterPluginGetter(daemon.PluginStore) + + if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { + return nil, fmt.Errorf("local volume driver could not be registered") + } + return store.New(daemon.configStore.Root) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// initDiscovery initializes the discovery watcher for this daemon. +func (daemon *Daemon) initDiscovery(config *Config) error { + advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) + if err != nil { + if err == errDiscoveryDisabled { + return nil + } + return err + } + + config.ClusterAdvertise = advertise + discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + + daemon.discoveryWatcher = discoveryWatcher + return nil +} + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// These are the settings that Reload changes: +// - Daemon labels. +// - Daemon debug log level. +// - Daemon insecure registries. +// - Daemon max concurrent downloads +// - Daemon max concurrent uploads +// - Cluster discovery (reconfigure and restart). +// - Daemon live restore +// - Daemon shutdown timeout (in seconds). +func (daemon *Daemon) Reload(config *Config) (err error) { + + daemon.configStore.reloadLock.Lock() + + attributes := daemon.platformReload(config) + + defer func() { + // we're unlocking here, because + // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() + // holds that lock too. + daemon.configStore.reloadLock.Unlock() + if err == nil { + daemon.LogDaemonEventWithAttributes("reload", attributes) + } + }() + + if err := daemon.reloadClusterDiscovery(config); err != nil { + return err + } + + if config.IsValueSet("labels") { + daemon.configStore.Labels = config.Labels + } + if config.IsValueSet("debug") { + daemon.configStore.Debug = config.Debug + } + if config.IsValueSet("insecure-registries") { + daemon.configStore.InsecureRegistries = config.InsecureRegistries + if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil { + return err + } + } + if config.IsValueSet("live-restore") { + daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled + if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil { + return err + } + } + + // If no value is set for max-concurrent-downloads we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { + *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads + } else { + maxConcurrentDownloads := defaultMaxConcurrentDownloads + daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads + } + logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) + if daemon.downloadManager != nil { + daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) + } + + // If no value is set for max-concurrent-upload we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { + *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads + } else { + maxConcurrentUploads := defaultMaxConcurrentUploads + daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads + } + logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) + if daemon.uploadManager != nil { + daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) + } + + if config.IsValueSet("shutdown-timeout") { + daemon.configStore.ShutdownTimeout = config.ShutdownTimeout + logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) + } + + // We emit daemon reload event here with updatable configurations + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) + attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) + + if daemon.configStore.InsecureRegistries != nil { + insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) + if err != nil { + return err + } + attributes["insecure-registries"] = string(insecureRegistries) + } else { + attributes["insecure-registries"] = "[]" + } + + attributes["cluster-store"] = daemon.configStore.ClusterStore + if daemon.configStore.ClusterOpts != nil { + opts, err := json.Marshal(daemon.configStore.ClusterOpts) + if err != nil { + return err + } + attributes["cluster-store-opts"] = string(opts) + } else { + attributes["cluster-store-opts"] = "{}" + } + attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise + + if daemon.configStore.Labels != nil { + labels, err := json.Marshal(daemon.configStore.Labels) + if err != nil { + return err + } + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) + attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) + + return nil +} + +func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { + var err error + newAdvertise := daemon.configStore.ClusterAdvertise + newClusterStore := daemon.configStore.ClusterStore + if config.IsValueSet("cluster-advertise") { + if config.IsValueSet("cluster-store") { + newClusterStore = config.ClusterStore + } + newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) + if err != nil && err != errDiscoveryDisabled { + return err + } + } + + if daemon.clusterProvider != nil { + if err := config.isSwarmCompatible(); err != nil { + return err + } + } + + // check discovery modifications + if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { + return nil + } + + // enable discovery for the first time if it was not previously enabled + if daemon.discoveryWatcher == nil { + discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + daemon.discoveryWatcher = discoveryWatcher + } else { + if err == errDiscoveryDisabled { + // disable discovery if it was previously enabled and it's disabled now + daemon.discoveryWatcher.Stop() + } else { + // reload discovery + if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { + return err + } + } + } + + daemon.configStore.ClusterStore = newClusterStore + daemon.configStore.ClusterOpts = config.ClusterOpts + daemon.configStore.ClusterAdvertise = newAdvertise + + if daemon.netController == nil { + return nil + } + netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) + if err != nil { + logrus.WithError(err).Warnf("failed to get options with network controller") + return nil + } + err = daemon.netController.ReloadConfiguration(netOptions...) + if err != nil { + logrus.Warnf("Failed to reload configuration with network controller: %v", err) + } + + return nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return config.bridgeConfig.Iface == disableNetworkBridge +} + +func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { + options := []nwconfig.Option{} + if dconfig == nil { + return options, nil + } + + options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) + options = append(options, nwconfig.OptionDataDir(dconfig.Root)) + options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) + + dd := runconfig.DefaultDaemonNetworkMode() + dn := runconfig.DefaultDaemonNetworkMode().NetworkName() + options = append(options, nwconfig.OptionDefaultDriver(string(dd))) + options = append(options, nwconfig.OptionDefaultNetwork(dn)) + + if strings.TrimSpace(dconfig.ClusterStore) != "" { + kv := strings.Split(dconfig.ClusterStore, "://") + if len(kv) != 2 { + return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") + } + options = append(options, nwconfig.OptionKVProvider(kv[0])) + options = append(options, nwconfig.OptionKVProviderURL(kv[1])) + } + if len(dconfig.ClusterOpts) > 0 { + options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) + } + + if daemon.discoveryWatcher != nil { + options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) + } + + if dconfig.ClusterAdvertise != "" { + options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) + } + + options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + options = append(options, driverOptions(dconfig)...) + + if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { + options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) + } + + if pg != nil { + options = append(options, nwconfig.OptionPluginGetter(pg)) + } + + return options, nil +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} + +// GetCluster returns the cluster +func (daemon *Daemon) GetCluster() Cluster { + return daemon.cluster +} + +// SetCluster sets the cluster +func (daemon *Daemon) SetCluster(cluster Cluster) { + daemon.cluster = cluster +} + +func (daemon *Daemon) pluginShutdown() { + manager := daemon.pluginManager + // Check for a valid manager object. In error conditions, daemon init can fail + // and shutdown called, before plugin manager is initialized. + if manager != nil { + manager.Shutdown() + } +} + +// PluginManager returns current pluginManager associated with the daemon +func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method + return daemon.pluginManager +} + +// PluginGetter returns current pluginStore associated with the daemon +func (daemon *Daemon) PluginGetter() *plugin.Store { + return daemon.PluginStore +} + +// CreateDaemonRoot creates the root for the daemon +func CreateDaemonRoot(config *Config) error { + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + + if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_experimental.go b/vendor/github.com/docker/docker/daemon/daemon_experimental.go new file mode 100644 index 0000000..fb0251d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_experimental.go @@ -0,0 +1,7 @@ +package daemon + +import "github.com/docker/docker/api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go new file mode 100644 index 0000000..9bdf6e2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux_test.go b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go new file mode 100644 index 0000000..c40b13b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go @@ -0,0 +1,104 @@ +// +build linux + +package daemon + +import ( + "strings" + "testing" +) + +const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio +143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 +145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset +150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu +151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct +152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory +153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices +154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer +155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio +156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event +157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb +158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +89 142 0:87 / /tmp rw,relatime - tmpfs none rw +97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw +100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio +116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio +242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw +120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio +171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio +310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw +` + +func TestCleanupMounts(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) + + if unmounted != 1 { + t.Fatalf("Expected to unmount the shm (and the shm only)") + } +} + +func TestCleanupMountsByID(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) + + if unmounted != 1 { + t.Fatalf("Expected to unmount the auf root (and that only)") + } +} + +func TestNotCleanupMounts(t *testing.T) { + d := &Daemon{ + repository: "", + } + var unmounted bool + unmount := func(target string) error { + unmounted = true + return nil + } + mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` + d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) + if unmounted { + t.Fatalf("Expected not to clean up /dev/shm") + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_solaris.go b/vendor/github.com/docker/docker/daemon/daemon_solaris.go new file mode 100644 index 0000000..2b4d8d0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_solaris.go @@ -0,0 +1,523 @@ +// +build solaris,cgo + +package daemon + +import ( + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/solaris/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + lntypes "github.com/docker/libnetwork/types" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +//#include +import "C" + +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + solarisMinCPUShares = 1 + solarisMaxCPUShares = 65535 +) + +func getMemoryResources(config containertypes.Resources) specs.CappedMemory { + memory := specs.CappedMemory{} + + if config.Memory > 0 { + memory.Physical = strconv.FormatInt(config.Memory, 10) + } + + if config.MemorySwap != 0 { + memory.Swap = strconv.FormatInt(config.MemorySwap, 10) + } + + return memory +} + +func getCPUResources(config containertypes.Resources) specs.CappedCPU { + cpu := specs.CappedCPU{} + + if config.CpusetCpus != "" { + cpu.Ncpus = config.CpusetCpus + } + + return cpu +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + return nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + //Since config.SecurityOpt is specifically defined as a "List of string values to + //customize labels for MLs systems, such as SELinux" + //until we figure out how to map to Trusted Extensions + //this is being disabled for now on Solaris + var ( + labelOpts []string + err error + ) + + if len(config.SecurityOpt) > 0 { + return errors.New("Security options are not supported on Solaris") + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + return nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + // solaris can rely upon checkSystem() below, we don't skew kernel versions + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig.CPUShares < 0 { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) + hostConfig.CPUShares = solarisMinCPUShares + } else if hostConfig.CPUShares > solarisMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) + hostConfig.CPUShares = solarisMaxCPUShares + } + + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + + if hostConfig.ShmSize != 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return false +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and + // therefore we will not do that for Docker container either. + if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. + + if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + hostConfig.MemorySwappiness = nil + } + if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + hostConfig.MemoryReservation = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + hostConfig.KernelMemory = 0 + } + if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + hostConfig.CPUShares = 0 + } + if hostConfig.CPUShares < 0 { + warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { + warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") + logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") + hostConfig.CPUShares = 0 + } + + // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. + if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + if hostConfig.CPUQuota > 0 { + warnings = append(warnings, "Quota will be applied on default period, not period specified.") + logrus.Warnf("Quota will be applied on default period, not period specified.") + } + hostConfig.CPUPeriod = 0 + } + if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUQuota < 0 { + warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + hostConfig.CpusetCpus = "" + hostConfig.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) + } + if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + hostConfig.BlkioWeight = 0 + } + if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { + *hostConfig.OomKillDisable = false + // Don't warn; this is the default setting but only applicable to Linux + } + + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + + // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. + + if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { + warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + hostConfig.CapAdd = nil + hostConfig.CapDrop = nil + } + + if hostConfig.GroupAdd != nil { + warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") + logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") + hostConfig.GroupAdd = nil + } + + if hostConfig.IpcMode != "" { + warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + hostConfig.IpcMode = "" + } + + if hostConfig.PidMode != "" { + warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + hostConfig.PidMode = "" + } + + if hostConfig.Privileged { + warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + hostConfig.Privileged = false + } + + if hostConfig.UTSMode != "" { + warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + hostConfig.UTSMode = "" + } + + if hostConfig.CgroupParent != "" { + warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + hostConfig.CgroupParent = "" + } + + if hostConfig.Ulimits != nil { + warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + hostConfig.Ulimits = nil + } + + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + return map[string]string{} +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + stockRuntimeOpts := []string{} + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} + + // checkSystem validates platform-specific requirements + return nil +} + +func checkSystem() error { + // check OS version for compatibility, ensure running in global zone + var err error + var id C.zoneid_t + + if id, err = C.getzoneid(); err != nil { + return fmt.Errorf("Exiting. Error getting zone id: %+v", err) + } + if int(id) != 0 { + return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") + } + + v, err := kernel.GetKernelVersion() + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { + return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) + } + return err +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + // Initialize default network on "null" + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil { + return nil, fmt.Errorf("Error creating default 'null' network: %v", err) + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ipamV4Conf *libnetwork.IpamConf + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(false)) + if err != nil { + return fmt.Errorf("Error creating default 'bridge' network: %v", err) + } + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { + // Solaris has no custom images to register + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + return nil, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + return types.RootFS{} +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_test.go b/vendor/github.com/docker/docker/daemon/daemon_test.go new file mode 100644 index 0000000..00817bd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_test.go @@ -0,0 +1,627 @@ +// +build !solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/discovery" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/registry" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGetContainer(t *testing.T) { + c1 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + }, + } + + c2 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + }, + } + + c3 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + }, + } + + c4 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + }, + } + + c5 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + }, + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + store.Add(c3.ID, c3) + store.Add(c4.ID, c4) + store.Add(c5.ID, c5) + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + daemon := &Daemon{ + containers: store, + idIndex: index, + nameIndex: registrar.NewRegistrar(), + } + + daemon.reserveName(c1.ID, c1.Name) + daemon.reserveName(c2.ID, c2.Name) + daemon.reserveName(c3.ID, c3.Name) + daemon.reserveName(c4.ID, c4.Name) + daemon.reserveName(c5.ID, c5.Name) + + if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") + } + + if _, err := daemon.GetContainer("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.GetContainer("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } +} + +func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { + var err error + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.volumes, err = store.New(tmp) + if err != nil { + return nil, err + } + + volumesDriver, err := local.New(tmp, 0, 0) + if err != nil { + return nil, err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + + return daemon, nil +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} + +func TestContainerInitDNS(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-container-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` + + // Container struct only used to retrieve path to config file + container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} + configPath, err := container.ConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + + hostConfigPath, err := container.HostConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonWithVolumeStore(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerID) + if err != nil { + t.Fatal(err) + } + + if c.HostConfig.DNS == nil { + t.Fatal("Expected container DNS to not be nil") + } + + if c.HostConfig.DNSSearch == nil { + t.Fatal("Expected container DNSSearch to not be nil") + } + + if c.HostConfig.DNSOptions == nil { + t.Fatal("Expected container DNSOptions to not be nil") + } +} + +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &containertypes.Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &containertypes.Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &containertypes.Config{ + ExposedPorts: ports, + } + + if err := merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} + +func TestDaemonReloadLabels(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:bar"}, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:baz"}, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } +} + +func TestDaemonReloadInsecureRegistries(t *testing.T) { + daemon := &Daemon{} + // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // this will be removed when reloading + "docker1.com", + "docker2.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &Config{} + + insecureRegistries := []string{ + "127.0.0.0/8", // this will be kept + "10.10.1.11:5000", // this will be kept + "10.10.1.33:5000", // this will be newly added + "docker1.com", // this will be kept + "docker3.com", // this will be newly added + } + + valuesSets := make(map[string]interface{}) + valuesSets["insecure-registries"] = insecureRegistries + + newConfig := &Config{ + CommonConfig: CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + InsecureRegistries: insecureRegistries, + }, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + // After Reload, daemon.RegistryService will be changed which is useful + // for registry communication in daemon. + registries := daemon.RegistryService.ServiceConfig() + + // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. + // Then collect registries.InsecureRegistryCIDRs in dataMap. + // When collecting, we need to convert CIDRS into string as a key, + // while the times of key appears as value. + dataMap := map[string]int{} + for _, value := range registries.InsecureRegistryCIDRs { + if _, ok := dataMap[value.String()]; !ok { + dataMap[value.String()] = 1 + } else { + dataMap[value.String()]++ + } + } + + for _, value := range registries.IndexConfigs { + if _, ok := dataMap[value.Name]; !ok { + dataMap[value.Name] = 1 + } else { + dataMap[value.Name]++ + } + } + + // Finally compare dataMap with the original insecureRegistries. + // Each value in insecureRegistries should appear in daemon's insecure registries, + // and each can only appear exactly ONCE. + for _, r := range insecureRegistries { + if value, ok := dataMap[r]; !ok { + t.Fatalf("Expected daemon insecure registry %s, got none", r) + } else if value != 1 { + t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) + } + } + + // assert if "10.10.1.22:5000" is removed when reloading + if value, ok := dataMap["10.10.1.22:5000"]; ok { + t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) + } + + // assert if "docker2.com" is removed when reloading + if value, ok := dataMap["docker2.com"]; ok { + t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) + } +} + +func TestDaemonReloadNotAffectOthers(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:bar"}, + Debug: true, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:baz"}, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } + debug := daemon.configStore.Debug + if !debug { + t.Fatalf("Expected debug 'enabled', got 'disabled'") + } +} + +func TestDaemonDiscoveryReload(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1", + ClusterAdvertise: "127.0.0.1:3333", + }, + } + + if err := daemon.initDiscovery(daemon.configStore); err != nil { + t.Fatal(err) + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + + valuesSets := make(map[string]interface{}) + valuesSets["cluster-store"] = "memory://127.0.0.1:2222" + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSets, + }, + } + + expected = discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + ch, errCh = daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["cluster-store"] = "memory://127.0.0.1:2222" + valuesSet["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSet, + }, + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1", + }, + } + valuesSets := make(map[string]interface{}) + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSets, + }, + } + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(10 * time.Second): + t.Fatal("Timeout waiting for discovery") + } + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go new file mode 100644 index 0000000..5b3ffeb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -0,0 +1,1237 @@ +// +build linux freebsd + +package daemon + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/blkiodev" + pblkiodev "github.com/docker/docker/api/types/blkiodev" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/options" + lntypes "github.com/docker/libnetwork/types" + "github.com/golang/protobuf/ptypes" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/vishvananda/netlink" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.Memory { + memory := specs.Memory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap != 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) *specs.CPU { + cpu := specs.CPU{} + + if config.CPUShares != 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpuset := config.CpusetCpus + cpu.Cpus = &cpuset + } + + if config.CpusetMems != "" { + cpuset := config.CpusetMems + cpu.Mems = &cpuset + } + + if config.NanoCPUs > 0 { + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + period := uint64(100 * time.Millisecond / time.Microsecond) + quota := uint64(config.NanoCPUs) * period / 1e9 + cpu.Period = &period + cpu.Quota = "a + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + quota := uint64(config.CPUQuota) + cpu.Quota = "a + } + + if config.CPURealtimePeriod != 0 { + period := uint64(config.CPURealtimePeriod) + cpu.RealtimePeriod = &period + } + + if config.CPURealtimeRuntime != 0 { + runtime := uint64(config.CPURealtimeRuntime) + cpu.RealtimeRuntime = &runtime + } + + return &cpu +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.WeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.WeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + continue + } + + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.14, use `=` instead.") + } + + if len(con) != 2 { + return fmt.Errorf("invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + default: + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.ThrottleDevice, error) { + var throttleDevices []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, d := range devs { + if err := syscall.Stat(d.Path, &stat); err != nil { + return nil, err + } + rate := d.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + throttleDevices = append(throttleDevices, d) + } + + return throttleDevices, nil +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + // Docker 1.11 and above doesn't actually run on kernels older than 3.4, + // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). + if !kernel.CheckKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + var err error + opts, err := daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode, hostConfig.Privileged) + if err != nil { + return err + } + hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") + logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") + logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") + } + if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) { + return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted") + } + // The highest precision we could get on Linux is 0.001, by setting + // cpu.cfs_period_us=1000ms + // cpu.cfs_quota=1ms + // See the following link for details: + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. + // The error message is 0.01 so that this is consistent with Windows + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { + return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + resources.CPUQuota = 0 + } + if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { + return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") + } + if resources.CPUPercent > 0 { + warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) + logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) + resources.CPUPercent = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") + } + if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { + return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + + // no matter err is nil or not, w could have data in itself. + warnings = append(warnings, w...) + + if err != nil { + return warnings, err + } + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size can not be less than 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) + } + + // ip-forwarding does not affect container with '--net=host' (or '--net=none') + if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warn("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + if hostConfig.Runtime == "" { + hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + } + + if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { + return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) + } + + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + if config.IsValueSet("runtimes") { + daemon.configStore.Runtimes = config.Runtimes + // Always set the default one + daemon.configStore.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + } + + if config.DefaultRuntime != "" { + daemon.configStore.DefaultRuntime = config.DefaultRuntime + } + + // Update attributes + var runtimeList bytes.Buffer + for name, rt := range daemon.configStore.Runtimes { + if runtimeList.Len() > 0 { + runtimeList.WriteRune(' ') + } + runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) + } + + return map[string]string{ + "runtimes": runtimeList.String(), + "default-runtime": daemon.configStore.DefaultRuntime, + } +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + // Check for mutually incompatible config options + if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { + config.bridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(config); err != nil { + return err + } + if config.CgroupParent != "" && UsingSystemd(config) { + if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +// configureKernelSecuritySupport configures and validates security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + if config.EnableSelinuxSupport { + if !selinuxEnabled() { + logrus.Warn("Docker could not enable SELinux on the host system") + } + } else { + selinuxSetDisabled() + } + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + if len(activeSandboxes) > 0 { + logrus.Info("There are old running containers, the network config will not take affect") + return controller, nil + } + + // Initialize default network on "null" + if n, _ := controller.NetworkByName("none"); n == nil { + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) + } + } + + // Initialize default network on "host" + if n, _ := controller.NetworkByName("host"); n == nil { + if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) + } + } + + // Clear stale bridge network + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return nil, fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } else { + removeDefaultBridgeInterface() + } + + return controller, nil +} + +func driverOptions(config *Config) []nwconfig.Option { + bridgeConfig := options.Generic{ + "EnableIPForwarding": config.bridgeConfig.EnableIPForward, + "EnableIPTables": config.bridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy, + "UserlandProxyPath": config.bridgeConfig.UserlandProxyPath} + bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} + + dOptions := []nwconfig.Option{} + dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) + return dOptions +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ( + ipamV4Conf *libnetwork.IpamConf + ipamV6Conf *libnetwork.IpamConf + ) + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + var deferIPv6Alloc bool + if config.bridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) + if err != nil { + return err + } + + // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has + // at least 48 host bits, we need to guarantee the current behavior where the containers' + // IPv6 addresses will be constructed based on the containers' interface MAC address. + // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints + // on this network until after the driver has created the endpoint and returned the + // constructed address. Libnetwork will then reserve this address with the ipam driver. + ones, _ := fCIDRv6.Mask.Size() + deferIPv6Alloc = ones <= 80 + + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.PreferredPool = fCIDRv6.String() + + // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 + // address belongs to the same network, we need to inform libnetwork about it, so + // that it can be reserved with IPAM and it will not be given away to somebody else + for _, nw6 := range nw6List { + if fCIDRv6.Contains(nw6.IP) { + ipamV6Conf.Gateway = nw6.IP.String() + break + } + } + } + + if config.bridgeConfig.DefaultGatewayIPv6 != nil { + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + if ipamV6Conf != nil { + v6Conf = append(v6Conf, ipamV6Conf) + } + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionEnableIPv6(config.bridgeConfig.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) + if err != nil { + return fmt.Errorf("Error creating default \"bridge\" network: %v", err) + } + return nil +} + +// Remove default bridge interface if present (--bridge=none use case) +func removeDefaultBridgeInterface() { + if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { + if err := netlink.LinkDel(lnk); err != nil { + logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) + } + } +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return daemon.setupInitLayer +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := idtools.LookupUID(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := idtools.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := idtools.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupID = group.Gid + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to an unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := idtools.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exist + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + // we also need to verify that any pre-existing directories in the path to + // the graphroot won't block access to remapped root--if any pre-existing directory + // has strict permissions that don't allow "x", container start will fail, so + // better to warn and fail now + dirPath := config.Root + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if !idtools.CanAccess(dirPath, rootUID, rootGID) { + return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) + } + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := runconfigopts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + return container.WriteHostConfig() +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { + s.MemoryStats.Limit = daemon.statsCollector.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read, err = ptypes.Timestamp(stats.Timestamp) + if err != nil { + return nil, err + } + return s, nil +} + +// setDefaultIsolation determines the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +// setupDaemonProcess sets various settings for the daemon's process +func setupDaemonProcess(config *Config) error { + // setup the daemons oom_score_adj + return setupOOMScoreAdj(config.OOMScoreAdjust) +} + +func setupOOMScoreAdj(score int) error { + f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) + if err != nil { + return err + } + + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) + } + return nil + } + f.Close() + return err +} + +func (daemon *Daemon) initCgroupsPath(path string) error { + if path == "/" || path == "." { + return nil + } + + if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 { + return nil + } + + // Recursively create cgroup to ensure that the system and all parent cgroups have values set + // for the period and runtime as this limits what the children can be set to. + daemon.initCgroupsPath(filepath.Dir(path)) + + _, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") + if err != nil { + return err + } + + path = filepath.Join(root, path) + sysinfo := sysinfo.New(true) + if sysinfo.CPURealtimePeriod && daemon.configStore.CPURealtimePeriod != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_period_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimePeriod, 10)), 0700); err != nil { + return err + } + } + if sysinfo.CPURealtimeRuntime && daemon.configStore.CPURealtimeRuntime != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_runtime_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimeRuntime, 10)), 0700); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + if daemon.configStore.SeccompProfile != "" { + daemon.seccompProfilePath = daemon.configStore.SeccompProfile + b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile) + if err != nil { + return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err) + } + daemon.seccompProfile = b + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix_test.go b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go new file mode 100644 index 0000000..6250d35 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go @@ -0,0 +1,283 @@ +// +build !windows,!solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" +) + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUShares(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMinCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMaxCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUSharesNoAdjustment(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMinCPUShares-1 { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMaxCPUShares+1 { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseSecurityOpt(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestNetworkOptions(t *testing.T) { + daemon := &Daemon{} + dconfigCorrect := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "consul://localhost:8500", + ClusterAdvertise: "192.168.0.1:8000", + }, + } + + if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil { + t.Fatalf("Expect networkOptions success, got error: %v", err) + } + + dconfigWrong := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "consul://localhost:8500://test://bbb", + }, + } + + if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil { + t.Fatalf("Expected networkOptions error, got nil") + } +} + +func TestMigratePre17Volumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "test-daemon-volumes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + volumeRoot := filepath.Join(rootDir, "volumes") + err = os.MkdirAll(volumeRoot, 0755) + if err != nil { + t.Fatal(err) + } + + containerRoot := filepath.Join(rootDir, "containers") + cid := "1234" + err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) + + vid := "5678" + vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) + err = os.MkdirAll(vfsPath, 0755) + if err != nil { + t.Fatal(err) + } + + config := []byte(` + { + "ID": "` + cid + `", + "Volumes": { + "/foo": "` + vfsPath + `", + "/bar": "/foo", + "/quux": "/quux" + }, + "VolumesRW": { + "/foo": true, + "/bar": true, + "/quux": false + } + } + `) + + volStore, err := store.New(volumeRoot) + if err != nil { + t.Fatal(err) + } + drv, err := local.New(volumeRoot, 0, 0) + if err != nil { + t.Fatal(err) + } + volumedrivers.Register(drv, volume.DefaultDriverName) + + daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} + err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) + if err != nil { + t.Fatal(err) + } + c, err := daemon.load(cid) + if err != nil { + t.Fatal(err) + } + if err := daemon.verifyVolumesInfo(c); err != nil { + t.Fatal(err) + } + + expected := map[string]volume.MountPoint{ + "/foo": {Destination: "/foo", RW: true, Name: vid}, + "/bar": {Source: "/foo", Destination: "/bar", RW: true}, + "/quux": {Source: "/quux", Destination: "/quux", RW: false}, + } + for id, mp := range c.MountPoints { + x, exists := expected[id] + if !exists { + t.Fatal("volume not migrated") + } + if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name { + t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go new file mode 100644 index 0000000..cb1acf6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows,!solaris + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/daemon_windows.go b/vendor/github.com/docker/docker/daemon/daemon_windows.go new file mode 100644 index 0000000..51ad68b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_windows.go @@ -0,0 +1,604 @@ +package daemon + +import ( + "fmt" + "os" + "strings" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + winlibnetwork "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + blkiodev "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/windows" +) + +const ( + defaultNetworkSpace = "172.16.0.0/12" + platformSupported = true + windowsMinCPUShares = 1 + windowsMaxCPUShares = 10000 + windowsMinCPUPercent = 1 + windowsMaxCPUPercent = 100 + windowsMinCPUCount = 1 +) + +func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { + return nil, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil +} + +func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig == nil { + return nil + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { + warnings := []string{} + + if !isHyperv { + // The processor resource controls are mutually exclusive on + // Windows Server Containers, the order of precedence is + // CPUCount first, then CPUShares, and CPUPercent last. + if resources.CPUCount > 0 { + if resources.CPUShares > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + resources.CPUShares = 0 + } + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } else if resources.CPUShares > 0 { + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } + } + + if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { + return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) + } + if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { + return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) + } + if resources.CPUCount < 0 { + return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") + } + + if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUShares > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") + } + // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. + // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if len(resources.BlkioDeviceReadBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") + } + if len(resources.BlkioDeviceReadIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") + } + if len(resources.BlkioDeviceWriteBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") + } + if len(resources.BlkioDeviceWriteIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") + } + if resources.BlkioWeight > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") + } + if len(resources.BlkioWeightDevice) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") + } + if resources.CgroupParent != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") + } + if resources.CPUPeriod != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") + } + if resources.CpusetCpus != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") + } + if resources.CpusetMems != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") + } + if resources.KernelMemory != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") + } + if resources.MemoryReservation != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") + } + if resources.MemorySwap != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") + } + if resources.OomKillDisable != nil && *resources.OomKillDisable { + return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") + } + if resources.PidsLimit != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") + } + if len(resources.Ulimits) != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") + } + return warnings, nil +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + + hyperv := daemon.runAsHyperVContainer(hostConfig) + if !hyperv && system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + + w, err := verifyContainerResources(&hostConfig.Resources, hyperv) + warnings = append(warnings, w...) + if err != nil { + return warnings, err + } + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + return map[string]string{} +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + // Validate the OS version. Note that docker.exe must be manifested for this + // call to return the correct version. + osv := system.GetOSVersion() + if osv.MajorVersion < 10 { + return fmt.Errorf("This version of Windows does not support the docker daemon") + } + if osv.Build < 14393 { + return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") + } + + vmcompute := windows.NewLazySystemDLL("vmcompute.dll") + if vmcompute.Load() != nil { + return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.") + } + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +// configureMaxThreads sets the Go runtime max threads threshold +func configureMaxThreads(config *Config) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, nil, nil) + if err != nil { + return nil, err + } + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + + // Remove networks not present in HNS + for _, v := range controller.Networks() { + options := v.Info().DriverOptions() + hnsid := options[winlibnetwork.HNSID] + found := false + + for _, v := range hnsresponse { + if v.Id == hnsid { + found = true + break + } + } + + if !found { + // global networks should not be deleted by local HNS + if v.Info().Scope() != datastore.GlobalScope { + err = v.Delete() + if err != nil { + logrus.Errorf("Error occurred when removing network %v", err) + } + } + } + } + + _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) + if err != nil { + return nil, err + } + + defaultNetworkExists := false + + if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + options := network.Info().DriverOptions() + for _, v := range hnsresponse { + if options[winlibnetwork.HNSID] == v.Id { + defaultNetworkExists = true + break + } + } + } + + // discover and add HNS networks to windows + // network that exist are removed and added again + for _, v := range hnsresponse { + var n libnetwork.Network + s := func(current libnetwork.Network) bool { + options := current.Info().DriverOptions() + if options[winlibnetwork.HNSID] == v.Id { + n = current + return true + } + return false + } + + controller.WalkNetworks(s) + if n != nil { + // global networks should not be deleted by local HNS + if n.Info().Scope() == datastore.GlobalScope { + continue + } + v.Name = n.Name() + // This will not cause network delete from HNS as the network + // is not yet populated in the libnetwork windows driver + n.Delete() + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: v.Name, + winlibnetwork.HNSID: v.Id, + } + + v4Conf := []*libnetwork.IpamConf{} + for _, subnet := range v.Subnets { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnet.AddressPrefix + ipamV4Conf.Gateway = subnet.GatewayAddress + v4Conf = append(v4Conf, &ipamV4Conf) + } + + name := v.Name + + // If there is no nat network create one from the first NAT network + // encountered + if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) { + name = runconfig.DefaultDaemonNetworkMode().NetworkName() + defaultNetworkExists = true + } + + v6Conf := []*libnetwork.IpamConf{} + _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + ) + + if err != nil { + logrus.Errorf("Error occurred when creating network %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + return nil + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), + } + + var ipamOption libnetwork.NetworkOption + var subnetPrefix string + + if config.bridgeConfig.FixedCIDR != "" { + subnetPrefix = config.bridgeConfig.FixedCIDR + } else { + // TP5 doesn't support properly detecting subnet + osv := system.GetOSVersion() + if osv.Build < 14360 { + subnetPrefix = defaultNetworkSpace + } + } + + if subnetPrefix != "" { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnetPrefix + v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) + } + + _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + ipamOption, + ) + + if err != nil { + return fmt.Errorf("Error creating default network: %v", err) + } + + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. As of Windows TP4, links are not supported. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMountsByID(in string) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // Create the root directory if it doesn't exists + if err := system.MkdirAllWithACL(config.Root, 0); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// runasHyperVContainer returns true if we are going to run as a Hyper-V container +func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { + if hostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + return daemon.defaultIsolation.IsHyperV() + } + + // Container is requesting an isolation mode. Honour it. + return hostConfig.Isolation.IsHyperV() + +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + // We do not mount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Mount(container) + } + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // We do not unmount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Unmount(container) + } + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + + // Obtain the stats from HCS via libcontainerd + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + + // Start with an empty structure + s := &types.StatsJSON{} + + // Populate the CPU/processor statistics + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: stats.Processor.TotalRuntime100ns, + UsageInKernelmode: stats.Processor.RuntimeKernel100ns, + UsageInUsermode: stats.Processor.RuntimeKernel100ns, + }, + } + + // Populate the memory statistics + s.MemoryStats = types.MemoryStats{ + Commit: stats.Memory.UsageCommitBytes, + CommitPeak: stats.Memory.UsageCommitPeakBytes, + PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes, + } + + // Populate the storage statistics + s.StorageStats = types.StorageStats{ + ReadCountNormalized: stats.Storage.ReadCountNormalized, + ReadSizeBytes: stats.Storage.ReadSizeBytes, + WriteCountNormalized: stats.Storage.WriteCountNormalized, + WriteSizeBytes: stats.Storage.WriteSizeBytes, + } + + // Populate the network statistics + s.Networks = make(map[string]types.NetworkStats) + + for _, nstats := range stats.Network { + s.Networks[nstats.EndpointId] = types.NetworkStats{ + RxBytes: nstats.BytesReceived, + RxPackets: nstats.PacketsReceived, + RxDropped: nstats.DroppedPacketsIncoming, + TxBytes: nstats.BytesSent, + TxPackets: nstats.PacketsSent, + TxDropped: nstats.DroppedPacketsOutgoing, + } + } + + // Set the timestamp + s.Stats.Read = stats.Timestamp + s.Stats.NumProcs = platform.NumProcs() + + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + daemon.defaultIsolation = containertypes.Isolation("process") + // On client SKUs, default to Hyper-V + if system.IsWindowsClient() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + for _, option := range daemon.configStore.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return err + } + key = strings.ToLower(key) + switch key { + + case "isolation": + if !containertypes.Isolation(val).IsValid() { + return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) + } + if containertypes.Isolation(val).IsHyperV() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + if containertypes.Isolation(val).IsProcess() { + if system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + daemon.defaultIsolation = containertypes.Isolation("process") + } + default: + return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) + } + } + + logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +// verifyVolumesInfo is a no-op on windows. +// This is called during daemon initialization to migrate volumes from pre-1.7. +// volumes were not supported on windows pre-1.7 +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap.go b/vendor/github.com/docker/docker/daemon/debugtrap.go new file mode 100644 index 0000000..209048b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap.go @@ -0,0 +1,62 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" +) + +const dataStructuresLogNameTemplate = "daemon-data-%s.log" + +// dumpDaemon appends the daemon datastructures into file in dir and returns full path +// to that file. +func (d *Daemon) dumpDaemon(dir string) (string, error) { + // Ensure we recover from a panic as we are doing this without any locking + defer func() { + recover() + }() + + path := filepath.Join(dir, fmt.Sprintf(dataStructuresLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the daemon datastructure dump") + } + defer f.Close() + + dump := struct { + containers interface{} + names interface{} + links interface{} + execs interface{} + volumes interface{} + images interface{} + layers interface{} + imageReferences interface{} + downloads interface{} + uploads interface{} + registry interface{} + plugins interface{} + }{ + containers: d.containers, + execs: d.execCommands, + volumes: d.volumes, + images: d.imageStore, + layers: d.layerStore, + imageReferences: d.referenceStore, + downloads: d.downloadManager, + uploads: d.uploadManager, + registry: d.RegistryService, + plugins: d.PluginStore, + names: d.nameIndex, + links: d.linkIndex, + } + + spew.Fdump(f, dump) // Does not return an error + f.Sync() + return path, nil +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go new file mode 100644 index 0000000..d650eb7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go @@ -0,0 +1,33 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + stackdump "github.com/docker/docker/pkg/signal" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + path, err := stackdump.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go new file mode 100644 index 0000000..f5b9170 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package daemon + +func (d *Daemon) setupDumpStackTrap(_ string) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_windows.go b/vendor/github.com/docker/docker/daemon/debugtrap_windows.go new file mode 100644 index 0000000..fb20c9d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_windows.go @@ -0,0 +1,52 @@ +package daemon + +import ( + "fmt" + "os" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signaled. ACL'd to builtin administrators and local system + ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") + if err != nil { + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error()) + return + } + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + h, err := system.CreateEvent(&sa, false, false, ev) + if h == 0 || err != nil { + logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error()) + return + } + go func() { + logrus.Debugf("Stackdump - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + path, err := signal.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go new file mode 100644 index 0000000..6b622bd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete.go @@ -0,0 +1,168 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + volumestore "github.com/docker/docker/volume/store" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + err := fmt.Errorf("removal of container %s is already in progress", name) + return errors.NewBadRequestError(err) + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume) + containerActions.WithValues("delete").UpdateSince(start) + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { + if container.IsRunning() { + if !forceRemove { + err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) + return errors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.stopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // If force removal is required, delete container from various + // indexes even if removal failed. + defer func() { + if err == nil || forceRemove { + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) + } + daemon.LogContainerEvent(container, "destroy") + } + }() + + if err = os.RemoveAll(container.Root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + } + } + + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the Engine API +func (daemon *Daemon) VolumeRm(name string, force bool) error { + err := daemon.volumeRm(name) + if err == nil || force { + daemon.volumes.Purge(name) + return nil + } + return err +} + +func (daemon *Daemon) volumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + if volumestore.IsInUse(err) { + err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) + return errors.NewRequestConflictError(err) + } + return fmt.Errorf("Error while removing volume %s: %v", name, err) + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/delete_test.go b/vendor/github.com/docker/docker/daemon/delete_test.go new file mode 100644 index 0000000..1fd27e1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete_test.go @@ -0,0 +1,43 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +func TestContainerDoubleDelete(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.containers = container.NewMemoryStore() + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "test", + State: container.NewState(), + Config: &containertypes.Config{}, + }, + } + daemon.containers.Add(container.ID, container) + + // Mark the container as having a delete in progress + container.SetRemovalInProgress() + + // Try to remove the container when its state is removalInProgress. + // It should return an error indicating it is under removal progress. + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err == nil { + t.Fatalf("expected err: %v, got nil", fmt.Sprintf("removal of container %s is already in progress", container.ID)) + } +} diff --git a/vendor/github.com/docker/docker/daemon/discovery.go b/vendor/github.com/docker/docker/daemon/discovery.go new file mode 100644 index 0000000..ee4ea87 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/discovery.go @@ -0,0 +1,215 @@ +package daemon + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + + // Register the libkv backends for discovery. + _ "github.com/docker/docker/pkg/discovery/kv" +) + +const ( + // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. + defaultDiscoveryHeartbeat = 20 * time.Second + // defaultDiscoveryTTLFactor is the default TTL factor for discovery + defaultDiscoveryTTLFactor = 3 +) + +var errDiscoveryDisabled = errors.New("discovery is disabled") + +type discoveryReloader interface { + discovery.Watcher + Stop() + Reload(backend, address string, clusterOpts map[string]string) error + ReadyCh() <-chan struct{} +} + +type daemonDiscoveryReloader struct { + backend discovery.Backend + ticker *time.Ticker + term chan bool + readyCh chan struct{} +} + +func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + return d.backend.Watch(stopCh) +} + +func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { + return d.readyCh +} + +func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { + var ( + heartbeat = defaultDiscoveryHeartbeat + ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat + ) + + if hb, ok := clusterOpts["discovery.heartbeat"]; ok { + h, err := strconv.Atoi(hb) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if h <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.heartbeat must be positive") + } + + heartbeat = time.Duration(h) * time.Second + ttl = defaultDiscoveryTTLFactor * heartbeat + } + + if tstr, ok := clusterOpts["discovery.ttl"]; ok { + t, err := strconv.Atoi(tstr) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if t <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl must be positive") + } + + ttl = time.Duration(t) * time.Second + + if _, ok := clusterOpts["discovery.heartbeat"]; !ok { + h := int(t / defaultDiscoveryTTLFactor) + heartbeat = time.Duration(h) * time.Second + } + + if ttl <= heartbeat { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") + } + } + + return heartbeat, ttl, nil +} + +// initDiscovery initializes the nodes discovery subsystem by connecting to the specified backend +// and starts a registration loop to advertise the current node under the specified address. +func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) { + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return nil, err + } + + reloader := &daemonDiscoveryReloader{ + backend: backend, + ticker: time.NewTicker(heartbeat), + term: make(chan bool), + readyCh: make(chan struct{}), + } + // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, + // but we never actually Watch() for nodes appearing and disappearing for the moment. + go reloader.advertiseHeartbeat(advertiseAddress) + return reloader, nil +} + +// advertiseHeartbeat registers the current node against the discovery backend using the specified +// address. The function never returns, as registration against the backend comes with a TTL and +// requires regular heartbeats. +func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { + var ready bool + if err := d.initHeartbeat(address); err == nil { + ready = true + close(d.readyCh) + } + + for { + select { + case <-d.ticker.C: + if err := d.backend.Register(address); err != nil { + logrus.Warnf("Registering as %q in discovery failed: %v", address, err) + } else { + if !ready { + close(d.readyCh) + ready = true + } + } + case <-d.term: + return + } + } +} + +// initHeartbeat is used to do the first heartbeat. It uses a tight loop until +// either the timeout period is reached or the heartbeat is successful and returns. +func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { + // Setup a short ticker until the first heartbeat has succeeded + t := time.NewTicker(500 * time.Millisecond) + defer t.Stop() + // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service + timeout := time.After(60 * time.Second) + + for { + select { + case <-timeout: + return errors.New("timeout waiting for initial discovery") + case <-d.term: + return errors.New("terminated") + case <-t.C: + if err := d.backend.Register(address); err == nil { + return nil + } + } + } +} + +// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. +func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { + d.Stop() + + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return err + } + + d.backend = backend + d.ticker = time.NewTicker(heartbeat) + d.readyCh = make(chan struct{}) + + go d.advertiseHeartbeat(advertiseAddress) + return nil +} + +// Stop terminates the discovery advertising. +func (d *daemonDiscoveryReloader) Stop() { + d.ticker.Stop() + d.term <- true +} + +func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err != nil { + return 0, nil, err + } + + backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) + if err != nil { + return 0, nil, err + } + return heartbeat, backend, nil +} + +// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not. +func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { + if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { + return true + } + + if (config.ClusterOpts == nil && clusterOpts == nil) || + (config.ClusterOpts == nil && len(clusterOpts) == 0) || + (len(config.ClusterOpts) == 0 && clusterOpts == nil) { + return false + } + + return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) +} diff --git a/vendor/github.com/docker/docker/daemon/discovery_test.go b/vendor/github.com/docker/docker/daemon/discovery_test.go new file mode 100644 index 0000000..336973c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/discovery_test.go @@ -0,0 +1,164 @@ +package daemon + +import ( + "testing" + "time" +) + +func TestDiscoveryOpts(t *testing.T) { + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("discovery.ttl < discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("discovery.ttl == discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("negative discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("negative discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("invalid discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.ttl": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("invalid discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + if ttl != 20*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + expected := 10 * defaultDiscoveryTTLFactor * time.Second + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } + + clusterOpts = map[string]string{"discovery.ttl": "30"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if ttl != 30*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) + } + + expected = 30 * time.Second / defaultDiscoveryTTLFactor + if heartbeat != expected { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) + } + + clusterOpts = map[string]string{} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != defaultDiscoveryHeartbeat { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) + } + + expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } +} + +func TestModifiedDiscoverySettings(t *testing.T) { + cases := []struct { + current *Config + modified *Config + expected bool + }{ + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", nil), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("baz", "bar", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "baz", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: true, + }, + } + + for _, c := range cases { + got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) + if c.expected != got { + t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) + } + } +} + +func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { + return &Config{ + CommonConfig: CommonConfig{ + ClusterStore: backendAddr, + ClusterAdvertise: advertiseAddr, + ClusterOpts: opts, + }, + } +} diff --git a/vendor/github.com/docker/docker/daemon/disk_usage.go b/vendor/github.com/docker/docker/daemon/disk_usage.go new file mode 100644 index 0000000..c3b9186 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/disk_usage.go @@ -0,0 +1,100 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/volume" +) + +func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int { + tmpImages := daemon.imageStore.Map() + layerRefs := map[layer.ChainID]int{} + for id, img := range tmpImages { + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + } + } + + return layerRefs +} + +// SystemDiskUsage returns information about the daemon data disk usage +func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { + // Retrieve container list + allContainers, err := daemon.Containers(&types.ContainerListOptions{ + Size: true, + All: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to retrieve container list: %v", err) + } + + // Get all top images with extra attributes + allImages, err := daemon.Images(filters.NewArgs(), false, true) + if err != nil { + return nil, fmt.Errorf("failed to retrieve image list: %v", err) + } + + // Get all local volumes + allVolumes := []*types.Volume{} + getLocalVols := func(v volume.Volume) error { + name := v.Name() + refs := daemon.volumes.Refs(v) + + tv := volumeToAPIType(v) + sz, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("failed to determine size of volume %v", name) + sz = -1 + } + tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} + allVolumes = append(allVolumes, tv) + + return nil + } + + err = daemon.traverseLocalVolumes(getLocalVols) + if err != nil { + return nil, err + } + + // Get total layers size on disk + layerRefs := daemon.getLayerRefs() + allLayers := daemon.layerStore.Map() + var allLayersSize int64 + for _, l := range allLayers { + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v", l.ChainID()) + } + } else { + logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) + } + + } + + return &types.DiskUsage{ + LayersSize: allLayersSize, + Containers: allContainers, + Volumes: allVolumes, + Images: allImages, + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go new file mode 100644 index 0000000..566a32f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/errors.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/reference" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + if strings.Contains(dne.RefOrID, "@") { + e := fmt.Errorf("No such image: %s", dne.RefOrID) + return errors.NewRequestNotFoundError(e) + } + tag := reference.DefaultTag + ref, err := reference.ParseNamed(dne.RefOrID) + if err != nil { + e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) + return errors.NewRequestNotFoundError(e) + } + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) + return errors.NewRequestNotFoundError(e) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go new file mode 100644 index 0000000..8fe8e1b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events.go @@ -0,0 +1,132 @@ +package daemon + +import ( + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/libnetwork" +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to an image with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to an image with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogPluginEvent generates an event related to a plugin with only the default attributes. +func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { + daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) +} + +// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. +func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { + attributes["name"] = refName + actor := events.Actor{ + ID: pluginID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.PluginEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// LogNetworkEvent generates an event related to a network with only the default attributes. +func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { + daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) +} + +// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. +func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { + attributes["name"] = nw.Name() + attributes["type"] = nw.Type() + actor := events.Actor{ + ID: nw.ID(), + Attributes: attributes, + } + daemon.EventsService.Log(action, events.NetworkEventType, actor) +} + +// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. +func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { + if daemon.EventsService != nil { + if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + attributes["name"] = info.Name + } + actor := events.Actor{ + ID: daemon.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.DaemonEventType, actor) + } +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { + ef := daemonevents.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, until, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} diff --git a/vendor/github.com/docker/docker/daemon/events/events.go b/vendor/github.com/docker/docker/daemon/events/events.go new file mode 100644 index 0000000..0bf105f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/events.go @@ -0,0 +1,158 @@ +package events + +import ( + "sync" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/pkg/pubsub" +) + +const ( + eventsLimit = 64 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + eventSubscribers.Inc() + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { + eventSubscribers.Inc() + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, until, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + eventSubscribers.Dec() + e.pub.Evict(l) +} + +// Log broadcasts event to listeners. Each listener has 100 millisecond for +// receiving event or it will be skipped. +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + eventsCounter.Inc() + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted between two specific dates. +// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since.IsZero() && until.IsZero() { + return buffered + } + + var sinceNanoUnix int64 + if !since.IsZero() { + sinceNanoUnix = since.UnixNano() + } + + var untilNanoUnix int64 + if !until.IsZero() { + untilNanoUnix = until.UnixNano() + } + + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + + if ev.TimeNano < sinceNanoUnix { + break + } + + if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { + continue + } + + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/vendor/github.com/docker/docker/daemon/events/events_test.go b/vendor/github.com/docker/docker/daemon/events/events_test.go new file mode 100644 index 0000000..bbd160f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/events_test.go @@ -0,0 +1,275 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" + eventstestutils "github.com/docker/docker/daemon/events/testutils" +) + +func TestEventsLog(t *testing.T) { + e := New() + _, l1, _ := e.Subscribe() + _, l2, _ := e.Subscribe() + defer e.Evict(l1) + defer e.Evict(l2) + count := e.SubscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + actor := events.Actor{ + ID: "cont", + Attributes: map[string]string{"image": "image"}, + } + e.Log("test", events.ContainerEventType, actor) + select { + case msg := <-l1: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsLogTimeout(t *testing.T) { + e := New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + c := make(chan struct{}) + go func() { + actor := events.Actor{ + ID: "image", + } + e.Log("test", events.ImageEventType, actor) + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + time.Sleep(50 * time.Millisecond) + current, l, _ := e.Subscribe() + for i := 0; i < 10; i++ { + num := i + eventsLimit + 16 + action := fmt.Sprintf("action_%d", num) + id := fmt.Sprintf("cont_%d", num) + from := fmt.Sprintf("image_%d", num) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + var msgs []events.Message + for len(msgs) < 10 { + m := <-l + jm, ok := (m).(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", m) + } + msgs = append(msgs, jm) + } + if len(current) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) + } + first := current[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Status) + } + last := current[len(current)-1] + if last.Status != "action_79" { + t.Fatalf("Last action is %s, must be action_79", last.Status) + } + + firstC := msgs[0] + if firstC.Status != "action_80" { + t.Fatalf("First action is %s, must be action_80", firstC.Status) + } + lastC := msgs[len(msgs)-1] + if lastC.Status != "action_89" { + t.Fatalf("Last action is %s, must be action_89", lastC.Status) + } +} + +// https://github.com/docker/docker/issues/20999 +// Fixtures: +// +//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) +//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +func TestLoadBufferedEvents(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } +} + +func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + u, uNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Unix(u, uNano) + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } + + if out[0].Type != "network" { + t.Fatalf("expected network event, got %s", out[0].Type) + } +} + +// #13753 +func TestIngoreBufferedWhenNoTimes(t *testing.T) { + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Time{} + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 0 { + t.Fatalf("expected 0 buffered events, got %q", out) + } +} diff --git a/vendor/github.com/docker/docker/daemon/events/filter.go b/vendor/github.com/docker/docker/daemon/events/filter.go new file mode 100644 index 0000000..5c9c527 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/filter.go @@ -0,0 +1,110 @@ +package events + +import ( + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/reference" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.matchEvent(ev) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchDaemon(ev) && + ef.matchContainer(ev) && + ef.matchPlugin(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchEvent(ev events.Message) bool { + // #25798 if an event filter contains either health_status, exec_create or exec_start without a colon + // Let's to a FuzzyMatch instead of an ExactMatch. + if ef.filterContains("event", map[string]struct{}{"health_status": {}, "exec_create": {}, "exec_start": {}}) { + return ef.filter.FuzzyMatch("event", ev.Action) + } + return ef.filter.ExactMatch("event", ev.Action) +} + +func (ef *Filter) filterContains(field string, values map[string]struct{}) bool { + for _, v := range ef.filter.Get(field) { + if _, ok := values[v]; ok { + return true + } + } + return false +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchDaemon(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.DaemonEventType) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchPlugin(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.PluginEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNamed(image) + if err != nil { + return image + } + return ref.Name() +} diff --git a/vendor/github.com/docker/docker/daemon/events/metrics.go b/vendor/github.com/docker/docker/daemon/events/metrics.go new file mode 100644 index 0000000..c9a89ec --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/metrics.go @@ -0,0 +1,15 @@ +package events + +import "github.com/docker/go-metrics" + +var ( + eventsCounter metrics.Counter + eventSubscribers metrics.Gauge +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + eventsCounter = ns.NewCounter("events", "The number of events logged") + eventSubscribers = ns.NewGauge("events_subscribers", "The number of current subscribers to events", metrics.Total) + metrics.Register(ns) +} diff --git a/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go b/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go new file mode 100644 index 0000000..3544446 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go @@ -0,0 +1,76 @@ +package testutils + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" +) + +var ( + reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` + reEventType = `(?P\w+)` + reAction = `(?P\w+)` + reID = `(?P[^\s]+)` + reAttributes = `(\s\((?P[^\)]+)\))?` + reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) + + // eventCliRegexp is a regular expression that matches all possible event outputs in the cli + eventCliRegexp = regexp.MustCompile(reString) +) + +// ScanMap turns an event string like the default ones formatted in the cli output +// and turns it into map. +func ScanMap(text string) map[string]string { + matches := eventCliRegexp.FindAllStringSubmatch(text, -1) + md := map[string]string{} + if len(matches) == 0 { + return md + } + + names := eventCliRegexp.SubexpNames() + for i, n := range matches[0] { + md[names[i]] = n + } + return md +} + +// Scan turns an event string like the default ones formatted in the cli output +// and turns it into an event message. +func Scan(text string) (*events.Message, error) { + md := ScanMap(text) + if len(md) == 0 { + return nil, fmt.Errorf("text is not an event: %s", text) + } + + f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) + if err != nil { + return nil, err + } + + t, tn, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + return nil, err + } + + attrs := make(map[string]string) + for _, a := range strings.SplitN(md["attributes"], ", ", -1) { + kv := strings.SplitN(a, "=", 2) + attrs[kv[0]] = kv[1] + } + + tu := time.Unix(t, tn) + return &events.Message{ + Time: t, + TimeNano: tu.UnixNano(), + Type: md["eventType"], + Action: md["action"], + Actor: events.Actor{ + ID: md["id"], + Attributes: attrs, + }, + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/events_test.go b/vendor/github.com/docker/docker/daemon/events_test.go new file mode 100644 index 0000000..2dbcc27 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events_test.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func TestLogContainerEventCopyLabels(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + daemon.LogContainerEvent(container, "create") + + if _, mutated := container.Config.Labels["image"]; mutated { + t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) + } + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "os": "alpine", + }) +} + +func TestLogContainerEventWithAttributes(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + attributes := map[string]string{ + "node": "2", + "foo": "bar", + } + daemon.LogContainerEventWithAttributes(container, "create", attributes) + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "foo": "bar", + }) +} + +func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { + select { + case ev := <-l: + event, ok := ev.(eventtypes.Message) + if !ok { + t.Fatalf("Unexpected event message: %q", ev) + } + for key, expected := range expectedAttributesToTest { + actual, ok := event.Actor.Attributes[key] + if !ok || actual != expected { + t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) + } + } + case <-time.After(10 * time.Second): + t.Fatalf("LogEvent test timed out") + } +} diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go new file mode 100644 index 0000000..8197426 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -0,0 +1,280 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" +) + +// Seconds to wait after sending TERM before trying KILL +const termProcessTimeout = 10 + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via Engine API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { + container, err := d.getActiveContainer(name) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) + return "", err + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = container.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + + linkedEnv, err := d.setupLinkedContainers(container) + if err != nil { + return "", err + } + execConfig.Env = utils.ReplaceOrAppendEnvValues(container.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) + if len(execConfig.User) == 0 { + execConfig.User = container.Config.User + } + + d.registerExecCommand(container, execConfig) + + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +// If ctx is cancelled, the process is terminated. +func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + defer func() { + if err != nil { + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + } + }() + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.StreamConfig.NewInputPipes() + } else { + ec.StreamConfig.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Env: ec.Env, + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return err + } + + attachErr := container.AttachStreams(ctx, ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) + + systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio) + if err != nil { + return err + } + ec.Lock() + ec.Pid = systemPid + ec.Unlock() + + select { + case <-ctx.Done(): + logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) + select { + case <-time.After(termProcessTimeout * time.Second): + logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) + case <-attachErr: + // TERM signal worked + } + return fmt.Errorf("context cancelled") + case err := <-attachErr: + if err != nil { + if _, ok := err.(container.DetachError); !ok { + return fmt.Errorf("exec attach failed with error: %v", err) + } + d.LogContainerEvent(c, "exec_detach") + } + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go new file mode 100644 index 0000000..933136f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -0,0 +1,118 @@ +package exec + +import ( + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/stringid" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + } +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go new file mode 100644 index 0000000..5aeedc3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_linux.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &specs.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/exec_solaris.go b/vendor/github.com/docker/docker/daemon/exec_solaris.go new file mode 100644 index 0000000..7003355 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/exec_windows.go b/vendor/github.com/docker/docker/daemon/exec_windows.go new file mode 100644 index 0000000..1d6974c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_windows.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + // Process arguments need to be escaped before sending to OCI. + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go new file mode 100644 index 0000000..5ef6dbb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -0,0 +1,60 @@ +package daemon + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("the daemon on this platform does not support export of a container") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/docker/docker/daemon/getsize_unix.go b/vendor/github.com/docker/docker/daemon/getsize_unix.go new file mode 100644 index 0000000..707323a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/getsize_unix.go @@ -0,0 +1,41 @@ +// +build linux freebsd solaris + +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + if err := daemon.Mount(container); err != nil { + logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer daemon.Unmount(container) + + sizeRw, err = container.RWLayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(), container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := container.RWLayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 0000000..ec55ea4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,669 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + mountpk "github.com/docker/docker/pkg/mount" + + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Helper function to debug EBUSY errors on remove. +func debugEBusy(mountPath string) (out []string, err error) { + // lsof is not part of GNU coreutils. This is a best effort + // attempt to detect offending processes. + c := exec.Command("lsof") + + r, err := c.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("Assigning pipes failed with %v", err) + } + + if err := c.Start(); err != nil { + return nil, fmt.Errorf("Starting %s failed with %v", c.Path, err) + } + + defer func() { + waiterr := c.Wait() + if waiterr != nil && err == nil { + err = fmt.Errorf("Waiting for %s failed with %v", c.Path, waiterr) + } + }() + + sc := bufio.NewScanner(r) + for sc.Scan() { + entry := sc.Text() + if strings.Contains(entry, mountPath) { + out = append(out, entry, "\n") + } + } + + return out, nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + return err + } + if !mounted { + break + } + + if err := a.unmount(mountpoint); err != nil { + if err != syscall.EBUSY { + return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err) + } + if retries >= 5 { + out, debugErr := debugEBusy(mountpoint) + if debugErr == nil { + logrus.Warnf("debugEBusy returned %v", out) + } + return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + continue + } + break + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + if err == syscall.EBUSY { + logrus.Warn("os.Rename err due to EBUSY") + out, debugErr := debugEBusy(mountpoint) + if debugErr == nil { + logrus.Warnf("debugEBusy returned %v", out) + } + } + return err + } + defer os.RemoveAll(tmpMntPath) + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpDiffpath) + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, parent) + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff io.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, parent) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, parent) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 0000000..dc3c6a3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,802 @@ +// +build linux + +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/stringid" +) + +var ( + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t testing.TB) graphdriver.Driver { + d, err := Init(dir, nil, nil, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t testing.TB) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.getDiffPath("1")) + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker", nil); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.CreateReadWrite("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.CreateReadWrite("3", "2", nil); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "2") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "1") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id none should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[2] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2", nil); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.CreateReadWrite(current, parent, nil); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" + } +} + +func BenchmarkConcurrentAccess(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + d := newDriver(b) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + numConcurent := 256 + // create a bunch of ids + var ids []string + for i := 0; i < numConcurent; i++ { + ids = append(ids, stringid.GenerateNonCryptoID()) + } + + if err := d.Create(ids[0], "", nil); err != nil { + b.Fatal(err) + } + + if err := d.Create(ids[1], ids[0], nil); err != nil { + b.Fatal(err) + } + + parent := ids[1] + ids = append(ids[2:]) + + chErr := make(chan error, numConcurent) + var outerGroup sync.WaitGroup + outerGroup.Add(len(ids)) + b.StartTimer() + + // here's the actual bench + for _, id := range ids { + go func(id string) { + defer outerGroup.Done() + if err := d.Create(id, parent, nil); err != nil { + b.Logf("Create %s failed", id) + chErr <- err + return + } + var innerGroup sync.WaitGroup + for i := 0; i < b.N; i++ { + innerGroup.Add(1) + go func() { + d.Get(id, "") + d.Put(id) + innerGroup.Done() + }() + } + innerGroup.Wait() + d.Remove(id) + }(id) + } + + outerGroup.Wait() + b.StopTimer() + close(chErr) + for err := range chErr { + if err != nil { + b.Log(err) + b.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 0000000..d2325fc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go new file mode 100644 index 0000000..da1e892 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 0000000..8062bae --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 0000000..d030b06 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000..44420f1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,530 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/go-units" + "github.com/opencontainers/runc/libcontainer/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +var ( + quotaEnabled = false + userDiskQuota = false +) + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, err := parseOptions(options) + if err != nil { + return nil, err + } + + if userDiskQuota { + if err := subvolEnableQuota(home); err != nil { + return nil, err + } + quotaEnabled = true + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, error) { + var options btrfsOptions + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if quotaEnabled { + if err := subvolDisableQuota(d.home); err != nil { + return err + } + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func subvolEnableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolDisableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolRescanQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if !quotaEnabled { + if err := subvolEnableQuota(d.home); err != nil { + return err + } + quotaEnabled = true + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + if err := subvolRescanQuota(d.home); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000..0038dbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,63 @@ +// +build linux + +package btrfs + +import ( + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsSubvolDelete(t *testing.T) { + d := graphtest.GetDriver(t, "btrfs") + if err := d.CreateReadWrite("test", "", nil); err != nil { + t.Fatal(err) + } + defer graphtest.PutDriver(t) + + dir, err := d.Get("test", "") + if err != nil { + t.Fatal(err) + } + defer d.Put("test") + + if err := subvolCreate(dir, "subvoltest"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { + t.Fatal(err) + } + + if err := d.Remove("test"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { + t.Fatalf("expected not exist error on nested subvol, got: %v", err) + } +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000..f070888 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go new file mode 100644 index 0000000..73d90cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 0000000..f802fbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 0000000..15a6e75 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux,!btrfs_noversion + +package btrfs + +import ( + "testing" +) + +func TestLibVersion(t *testing.T) { + if btrfsLibVersion() <= 0 { + t.Errorf("expected output from btrfs lib version > 0") + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go new file mode 100644 index 0000000..5ea604f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -0,0 +1,67 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increaes the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count++ + c.mu.Unlock() + return m.count +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count-- + c.mu.Unlock() + return m.count +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md new file mode 100644 index 0000000..b23bbb1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,96 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. The preferred model is +to have a thin pool reserved outside of Docker and passed to the +daemon via the `--storage-opt dm.thinpooldev` option. + +As a fallback if no thin pool is provided, loopback files will be +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Docker daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +In loopback, a thin pool is created at `/var/lib/docker/devicemapper` +(devicemapper graph location) based on two block devices, one for +data and one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are +`/var/lib/docker/devicemapper/devicemapper/data` and +`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata +required to map from docker entities to the corresponding devicemapper +volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` +file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the `/var/lib/docker/devicemapper` directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Base Device Size: 107.4 GB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +#### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Base Device Size` tells the maximum size of a container and image + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +### About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker daemon --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#/storage-driver-options). +If you add an options, update both the `man` page and the documentation. diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 0000000..b8e7625 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2727 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/loopback" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + if (devices.deviceIDMap[deviceID/8] & mask) != 0 { + return false + } + return true +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + args = append(args, devices.mkfsArgs...) + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if dockerversion.IAmStatic == "true" { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } else { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if err == devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + syscall.Unmount(path, syscall.MNT_DETACH) + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + return nil +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + default: + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 0000000..9ab3e4f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 0000000..5c2abce --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,110 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + defaultDataLoopbackSize = 300 * 1024 * 1024 + defaultMetaDataLoopbackSize = 200 * 1024 * 1024 + defaultBaseFsSize = 300 * 1024 * 1024 + defaultUdevSyncOverride = true + if err := graphtest.InitLoopbacks(); err != nil { + panic(err) + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +func TestDevmapperReduceLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) +} + +func TestDevmapperIncreaseLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) +} + +func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + // make sure data or metadata loopback size are the default size + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { + t.Fatalf("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } + //Reload + d, err := Init(driver.home, []string{ + fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), + fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), + }, nil, nil) + if err != nil { + t.Fatalf("error creating devicemapper driver: %v", err) + } + driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { + t.Fatalf("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } +} + +// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function +func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + + // Call cleanupDeletedDevices() and after the call take and release + // DeviceSet Lock. If lock has not been released, this will hang. + driver.DeviceSet.cleanupDeletedDevices() + + doneChan := make(chan bool) + + go func() { + driver.DeviceSet.Lock() + defer driver.DeviceSet.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Second * 5): + // Timer expired. That means lock was not released upon + // function return and we are deadlocked. Release lock + // here so that cleanup could succeed and fail the test. + driver.DeviceSet.Unlock() + t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()") + case <-doneChan: + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 0000000..7cf422c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,231 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 0000000..cca1fe1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 0000000..f0bce56 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,270 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites retuned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 0000000..2891a84 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go new file mode 100644 index 0000000..5c8d0e2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -0,0 +1,135 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "overlay2", + "overlay", + "devicemapper", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go new file mode 100644 index 0000000..7daf01c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,97 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 0000000..4a87560 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go new file mode 100644 index 0000000..ffd30c2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 0000000..20826cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,169 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go new file mode 100644 index 0000000..def822b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go @@ -0,0 +1,259 @@ +// +build linux freebsd + +package graphtest + +import ( + "bytes" + "io" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// DriverBenchExists benchmarks calls to exist +func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !driver.Exists(base) { + b.Fatal("Newly created image doesn't exist") + } + } +} + +// DriverBenchGetEmpty benchmarks calls to get on an empty layer +func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := driver.Get(base, "") + b.StopTimer() + if err != nil { + b.Fatalf("Error getting mount: %s", err) + } + if err := driver.Put(base); err != nil { + b.Fatalf("Error putting mount: %s", err) + } + b.StartTimer() + } +} + +// DriverBenchDiffBase benchmarks calls to diff on a root layer +func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 3); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(base, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffN benchmarks calls to diff on two layers with +// a provided number of files on the lower and upper layers. +func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, bottom, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, top, 6); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffApplyN benchmarks calls to diff and apply together +func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + b.Fatal(err) + } + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + b.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + b.Fatal(err) + } + + b.StartTimer() + + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, "", arch) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + arch.Close() + + if applyDiffSize != diffSize { + // TODO: enforce this + //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) + } + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + b.Fatal(err) + } + } +} + +// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. +func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 50); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(topLayer, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. +func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + root, err := driver.Get(topLayer, "") + if err != nil { + b.Fatal(err) + } + defer driver.Put(topLayer) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + // Read content + c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + if bytes.Compare(c, content) != 0 { + b.Fatalf("Wrong content in file %v, expected %v", c, content) + } + b.StartTimer() + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go new file mode 100644 index 0000000..6e952de --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go @@ -0,0 +1,358 @@ +// +build linux freebsd solaris + +package graphtest + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "syscall" + "testing" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +var ( + drv *Driver +) + +// Driver conforms to graphdriver.Driver interface and +// contains information such as root and reference count of the number of clients using it. +// This helps in testing drivers added into the framework. +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t testing.TB, name string, options []string) *Driver { + root, err := ioutil.TempDir("", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t testing.TB, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. +func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name, options) + } else { + drv.refCount++ + } + return drv +} + +// PutDriver removes the driver if it is no longer used and updates the reference count. +func PutDriver(t testing.TB) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + if err := driver.Create("empty", "", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + }() + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") +} + +// DriverTestCreateBase create a base driver and verify. +func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + verifyBase(t, driver, "Base") +} + +// DriverTestCreateSnap Create a driver and snap and verify. +func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + + if err := driver.Create("Snap", "Base", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + }() + + verifyBase(t, driver, "Snap") +} + +// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers +func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + t.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + t.Fatal(err) + } + + err = checkManyLayers(driver, topLayer, layerCount) + if err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { + t.Fatal(err) + } +} + +// DriverTestDiffApply tests diffing and applying produces the same layer +func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + deleteFile := "file-remove.txt" + deleteFileContent := []byte("This file should get removed in upper!") + deleteDir := "var/lib" + + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + if err := addDirectory(driver, base, deleteDir); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { + t.Fatal(err) + } + + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + t.Fatal(err) + } + + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + t.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + arch, err := driver.Diff(upper, base) + if err != nil { + t.Fatal(err) + } + + buf := bytes.NewBuffer(nil) + if _, err := buf.ReadFrom(arch); err != nil { + t.Fatal(err) + } + if err := arch.Close(); err != nil { + t.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) + if err != nil { + t.Fatal(err) + } + + if applyDiffSize != diffSize { + t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) + } + + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteFile); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteDir); err != nil { + t.Fatal(err) + } +} + +// DriverTestChanges tests computed changes on a layer matches changes made +func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, 20, 3); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + expectedChanges, err := changeManyFiles(driver, upper, 20, 6) + if err != nil { + t.Fatal(err) + } + + changes, err := driver.Changes(upper, base) + if err != nil { + t.Fatal(err) + } + + if err = checkChanges(expectedChanges, changes); err != nil { + t.Fatal(err) + } +} + +func writeRandomFile(path string, size uint64) error { + buf := make([]int64, size/8) + + r := rand.NewSource(0) + for i := range buf { + buf[i] = r.Int63() + } + + // Cast to []byte + header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) + header.Len *= 8 + header.Cap *= 8 + data := *(*[]byte)(unsafe.Pointer(&header)) + + return ioutil.WriteFile(path, data, 0700) +} + +// DriverTestSetQuota Create a driver and test setting quota. +func DriverTestSetQuota(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + createOpts := &graphdriver.CreateOpts{} + createOpts.StorageOpt = make(map[string]string, 1) + createOpts.StorageOpt["size"] = "50M" + if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + t.Fatal(err) + } + + mountPath, err := driver.Get("zfsTest", "") + if err != nil { + t.Fatal(err) + } + + quota := uint64(50 * units.MiB) + err = writeRandomFile(path.Join(mountPath, "file"), quota*2) + if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { + t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) + } + +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go new file mode 100644 index 0000000..a50c521 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go @@ -0,0 +1 @@ +package graphtest diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go new file mode 100644 index 0000000..35bf6d1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go @@ -0,0 +1,342 @@ +package graphtest + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "sort" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +func randomContent(size int, seed int64) []byte { + s := rand.NewSource(seed) + content := make([]byte, size) + + for i := 0; i < len(content); i += 7 { + val := s.Int63() + for j := 0; i+j < len(content) && j < 7; j++ { + content[i+j] = byte(val) + val >>= 8 + } + } + + return content +} + +func addFiles(drv graphdriver.Driver, layer string, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + return err + } + if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + return err + } + + return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) +} + +func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + if err != nil { + return err + } + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + + return nil +} + +func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return ioutil.WriteFile(path.Join(root, filename), content, 0755) +} + +func addDirectory(drv graphdriver.Driver, layer, dir string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return os.MkdirAll(path.Join(root, dir), 0755) +} + +func removeAll(drv graphdriver.Driver, layer string, names ...string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for _, filename := range names { + if err := os.RemoveAll(path.Join(root, filename)); err != nil { + return err + } + } + return nil +} + +func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if _, err := os.Stat(path.Join(root, filename)); err == nil { + return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + } else if !os.IsNotExist(err) { + return err + } + + return nil +} + +func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + return err + } + } + } + + return nil +} + +func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { + root, err := drv.Get(layer, "") + if err != nil { + return nil, err + } + defer drv.Put(layer) + + changes := []archive.Change{} + for i := 0; i < count; i += 100 { + archiveRoot := fmt.Sprintf("/directory-%d", i) + if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + return nil, err + } + for j := 0; i+j < count && j < 100; j++ { + if j == 0 { + changes = append(changes, archive.Change{ + Path: archiveRoot, + Kind: archive.ChangeModify, + }) + } + var change archive.Change + switch j % 3 { + // Update file + case 0: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeModify + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Add file + case 1: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Kind = archive.ChangeAdd + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Remove file + case 2: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeDelete + if err := os.Remove(path.Join(root, change.Path)); err != nil { + return nil, err + } + } + changes = append(changes, change) + } + } + + return changes, nil +} + +func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + content := randomContent(64, seed+int64(i+j)) + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + } + } + + return nil +} + +type changeList []archive.Change + +func (c changeList) Less(i, j int) bool { + if c[i].Path == c[j].Path { + return c[i].Kind < c[j].Kind + } + return c[i].Path < c[j].Path +} +func (c changeList) Len() int { return len(c) } +func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +func checkChanges(expected, actual []archive.Change) error { + if len(expected) != len(actual) { + return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) + } + sort.Sort(changeList(expected)) + sort.Sort(changeList(actual)) + + for i := range expected { + if expected[i] != actual[i] { + return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) + } + } + + return nil +} + +func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + return err + } + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + if err := os.MkdirAll(layerDir, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { + return err + } + + return nil +} + +func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { + lastLayer := baseLayer + for i := 1; i <= count; i++ { + nextLayer := stringid.GenerateRandomID() + if err := drv.Create(nextLayer, lastLayer, nil); err != nil { + return "", err + } + if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { + return "", err + } + + lastLayer = nextLayer + + } + return lastLayer, nil +} + +func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + if err != nil { + return err + } + + if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) + } + + for i := count; i > 0; i-- { + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + + thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + if err != nil { + return err + } + if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) + } + layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + if err != nil { + return err + } + } + return nil +} + +// readDir reads a directory just like ioutil.ReadDir() +// then hides specific files (currently "lost+found") +// so the tests don't "see" it +func readDir(dir string) ([]os.FileInfo, error) { + a, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + b := a[:0] + for _, x := range a { + if x.Name() != "lost+found" { // ext4 always have this dir + b = append(b, x) + } + } + + return b, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go new file mode 100644 index 0000000..49b0c2c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go @@ -0,0 +1,143 @@ +// +build linux freebsd + +package graphtest + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } +} + +func createBase(t testing.TB, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.CreateReadWrite(name, "", nil); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go new file mode 100644 index 0000000..666a5c0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,174 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 0000000..121b72e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,462 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + supportsDType bool +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v1.16 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + // If id has a root, just return + if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { + return nil + } + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 0000000..34b6d80 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,93 @@ +// +build linux + +package overlay + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlay50LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 50, "overlay") +} + +// Fails due to bug in calculating changes after apply +// likely related to https://github.com/docker/docker/issues/21555 +func TestOverlayDiffApply10Files(t *testing.T) { + t.Skipf("Fails to compute changes after apply intermittently") + graphtest.DriverTestDiffApply(t, 10, "overlay") +} + +func TestOverlayChanges(t *testing.T) { + t.Skipf("Fails to compute changes intermittently") + graphtest.DriverTestChanges(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, "overlay") +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, "overlay") +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, "overlay") +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, "overlay") +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, "overlay") +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, "overlay") +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, "overlay") +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay") +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, "overlay") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 0000000..3dbb4de --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go new file mode 100644 index 0000000..53a7199 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go @@ -0,0 +1,79 @@ +// +build linux + +package overlay2 + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory. When this bug exists naive diff should be used. +func hasOpaqueCopyUpBug(d string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + if err := syscall.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := syscall.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go new file mode 100644 index 0000000..60e248b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go new file mode 100644 index 0000000..65ac6bf --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go @@ -0,0 +1,662 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + overrideKernelCheck bool + quota quota.Quota +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v1.16 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func useNaiveDiff(home string) bool { + useNaiveDiffLock.Do(func() { + if err := hasOpaqueCopyUpBug(home); err != nil { + logrus.Warnf("Not using native diff for overlay2: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + splitLowers := strings.Split(string(lowers), ":") + absLowers := make([]string, len(splitLowers)) + for i, s := range splitLowers { + absLowers[i] = path.Join(d.home, s) + } + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) + mountData := label.FormatMountLabel(opts, mountLabel) + mount := syscall.Mount + mountTarget := mergedDir + + pageSize := syscall.Getpagesize() + + // Go can return a larger page size than supported by the system + // as of go 1.7. This will be fixed in 1.8 and this block can be + // removed when building with 1.8. + // See https://github.com/golang/go/commit/1b9499b06989d2831e5b156161d6c07642926ee1 + // See https://github.com/docker/docker/issues/27384 + if pageSize > 4096 { + pageSize = 4096 + } + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if len(mountData) > pageSize { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, mountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, parent, diff) + } + + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, parent) + } + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, parent) + } + + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, parent) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go new file mode 100644 index 0000000..cf77ff2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go @@ -0,0 +1,121 @@ +// +build linux + +package overlay2 + +import ( + "io/ioutil" + "os" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + untar = archive.UntarUncompressed + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer + + reexec.Init() +} + +func cdMountFrom(dir, device, target, mType, label string) error { + wd, err := os.Getwd() + if err != nil { + return err + } + os.Chdir(dir) + defer os.Chdir(wd) + + return syscall.Mount(device, target, mType, 0, label) +} + +func skipIfNaive(t *testing.T) { + td, err := ioutil.TempDir("", "naive-check-") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(td) + + if useNaiveDiff(td) { + t.Skipf("Cannot run test with naive diff") + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, driverName) +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, driverName) +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, driverName) +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, driverName) +} + +func TestOverlay128LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 128, driverName) +} + +func TestOverlayDiffApply10Files(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestDiffApply(t, 10, driverName) +} + +func TestOverlayChanges(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestChanges(t, driverName) +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, driverName) +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, driverName) +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, driverName) +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, driverName) +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, driverName) +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, driverName) +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, driverName) +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, driverName) +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, driverName) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go new file mode 100644 index 0000000..e5ac4ca --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go new file mode 100644 index 0000000..af5cb65 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go @@ -0,0 +1,80 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go new file mode 100644 index 0000000..67c6640 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go @@ -0,0 +1,18 @@ +// +build linux + +package overlayutils + +import ( + "errors" + "fmt" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type support will no longer be supported in Docker 1.16." + return errors.New(msg) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 0000000..7294bcc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,43 @@ +package graphdriver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.ACQUIRE) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 0000000..bfe74cc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,252 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return filepath.Join(d.p.BasePath(), ret.Dir), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go new file mode 100644 index 0000000..e408d5f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go @@ -0,0 +1,339 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/docker/overlay2 >> /etc/projects +// echo docker:999 >> /etc/projid +// xfs_quota -x -c 'project -s docker' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with docker. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + fileinfo, err := os.Stat(home) + if err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case comeone copied the home directory over to a new device + syscall.Unlink(backingFsBlockDev) + stat := fileinfo.Sys().(*syscall.Stat_t) + if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go new file mode 100644 index 0000000..262954d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/aufs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go new file mode 100644 index 0000000..f456cc5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go new file mode 100644 index 0000000..bb2e9ef --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 0000000..9ba849c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" + _ "github.com/docker/docker/daemon/graphdriver/overlay2" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 0000000..98fad23 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go new file mode 100644 index 0000000..efaa500 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/docker/docker/daemon/graphdriver/windows" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go new file mode 100644 index 0000000..8f34e35 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go new file mode 100644 index 0000000..8832d11 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,145 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000..9ecf21d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,37 @@ +// +build linux + +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go new file mode 100644 index 0000000..beac93a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go @@ -0,0 +1,886 @@ +//+build windows + +package windows + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/longpath" + "github.com/docker/docker/pkg/reexec" + units "github.com/docker/go-units" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = syscall.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = syscall.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + for { + // Get and terminate any template VMs that are currently using the layer + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if err == hcsshim.ErrVmcomputeOperationInvalidState { + if retryCount >= 5 { + // If we are unable to get the list of containers + // go ahead and attempt to delete the layer anyway + // as it will most likely work. + break + } + retryCount++ + time.Sleep(2 * time.Second) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id, mountLabel string) (string, error) { + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + var dir string + + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + delete(d.cache, rID) + d.cacheMu.Unlock() + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +func (d *Driver) Cleanup() error { + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, rPId) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + if err != nil { + return err + } + return nil +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := syscall.UTF16FromString(path) + if err != nil { + return err + } + h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 0000000..9c270c5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 0000000..8e283cc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,417 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/runc/libcontainer/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 0000000..1c05fa7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 0000000..52ed516 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go new file mode 100644 index 0000000..bb4a85b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 0000000..3e22928 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,35 @@ +// +build linux + +package zfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsSetQuota(t *testing.T) { + graphtest.DriverTestSetQuota(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 0000000..ce8daad --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/docker/docker/daemon/health.go b/vendor/github.com/docker/docker/daemon/health.go new file mode 100644 index 0000000..5b01dc0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/health.go @@ -0,0 +1,341 @@ +package daemon + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +const ( + // Longest healthcheck probe output message to store. Longer messages will be truncated. + maxOutputLen = 4096 + + // Default interval between probe runs (from the end of the first to the start of the second). + // Also the time before the first probe. + defaultProbeInterval = 30 * time.Second + + // The maximum length of time a single probe run should take. If the probe takes longer + // than this, the check is considered to have failed. + defaultProbeTimeout = 30 * time.Second + + // Default number of consecutive failures of the health check + // for the container to be considered unhealthy. + defaultProbeRetries = 3 + + // Maximum number of entries to record + maxLogEntries = 5 +) + +const ( + // Exit status codes that can be returned by the probe command. + + exitStatusHealthy = 0 // Container is healthy + exitStatusUnhealthy = 1 // Container is unhealthy +) + +// probe implementations know how to run a particular type of probe. +type probe interface { + // Perform one run of the check. Returns the exit code and an optional + // short diagnostic string. + run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) +} + +// cmdProbe implements the "CMD" probe type. +type cmdProbe struct { + // Run the command with the system's default shell instead of execing it directly. + shell bool +} + +// exec the healthcheck command in the container. +// Returns the exit code and probe output (if any) +func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { + + cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] + if p.shell { + cmdSlice = append(getShell(container.Config), cmdSlice...) + } + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) + execConfig := exec.NewConfig() + execConfig.OpenStdin = false + execConfig.OpenStdout = true + execConfig.OpenStderr = true + execConfig.ContainerID = container.ID + execConfig.DetachKeys = []byte{} + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = false + execConfig.Privileged = false + execConfig.User = container.Config.User + + d.registerExecCommand(container, execConfig) + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + output := &limitedBuffer{} + err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + if err != nil { + return nil, err + } + info, err := d.getExecConfig(execConfig.ID) + if err != nil { + return nil, err + } + if info.ExitCode == nil { + return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", container.ID) + } + // Note: Go's json package will handle invalid UTF-8 for us + out := output.String() + return &types.HealthcheckResult{ + End: time.Now(), + ExitCode: *info.ExitCode, + Output: out, + }, nil +} + +// Update the container's Status.Health struct based on the latest probe's result. +func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) { + c.Lock() + defer c.Unlock() + + // probe may have been cancelled while waiting on lock. Ignore result then + select { + case <-done: + return + default: + } + + retries := c.Config.Healthcheck.Retries + if retries <= 0 { + retries = defaultProbeRetries + } + + h := c.State.Health + oldStatus := h.Status + + if len(h.Log) >= maxLogEntries { + h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) + } else { + h.Log = append(h.Log, result) + } + + if result.ExitCode == exitStatusHealthy { + h.FailingStreak = 0 + h.Status = types.Healthy + } else { + // Failure (including invalid exit code) + h.FailingStreak++ + if h.FailingStreak >= retries { + h.Status = types.Unhealthy + } + // Else we're starting or healthy. Stay in that state. + } + + if oldStatus != h.Status { + d.LogContainerEvent(c, "health_status: "+h.Status) + } +} + +// Run the container's monitoring thread until notified via "stop". +// There is never more than one monitor thread running per container at a time. +func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { + probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) + probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) + for { + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) + return + case <-time.After(probeInterval): + logrus.Debugf("Running health check for container %s ...", c.ID) + startTime := time.Now() + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + results := make(chan *types.HealthcheckResult) + go func() { + healthChecksCounter.Inc() + result, err := probe.run(ctx, d, c) + if err != nil { + healthChecksFailedCounter.Inc() + logrus.Warnf("Health check for container %s error: %v", c.ID, err) + results <- &types.HealthcheckResult{ + ExitCode: -1, + Output: err.Error(), + Start: startTime, + End: time.Now(), + } + } else { + result.Start = startTime + logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) + results <- result + } + close(results) + }() + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) + // Stop timeout and kill probe, but don't wait for probe to exit. + cancelProbe() + return + case result := <-results: + handleProbeResult(d, c, result, stop) + // Stop timeout + cancelProbe() + case <-ctx.Done(): + logrus.Debugf("Health check for container %s taking too long", c.ID) + handleProbeResult(d, c, &types.HealthcheckResult{ + ExitCode: -1, + Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), + Start: startTime, + End: time.Now(), + }, stop) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + } + } + } +} + +// Get a suitable probe implementation for the container's healthcheck configuration. +// Nil will be returned if no healthcheck was configured or NONE was set. +func getProbe(c *container.Container) probe { + config := c.Config.Healthcheck + if config == nil || len(config.Test) == 0 { + return nil + } + switch config.Test[0] { + case "CMD": + return &cmdProbe{shell: false} + case "CMD-SHELL": + return &cmdProbe{shell: true} + default: + logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) + return nil + } +} + +// Ensure the health-check monitor is running or not, depending on the current +// state of the container. +// Called from monitor.go, with c locked. +func (d *Daemon) updateHealthMonitor(c *container.Container) { + h := c.State.Health + if h == nil { + return // No healthcheck configured + } + + probe := getProbe(c) + wantRunning := c.Running && !c.Paused && probe != nil + if wantRunning { + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor(d, c, stop, probe) + } + } else { + h.CloseMonitorChannel() + } +} + +// Reset the health state for a newly-started, restarted or restored container. +// initHealthMonitor is called from monitor.go and we should never be running +// two instances at once. +// Called with c locked. +func (d *Daemon) initHealthMonitor(c *container.Container) { + // If no healthcheck is setup then don't init the monitor + if getProbe(c) == nil { + return + } + + // This is needed in case we're auto-restarting + d.stopHealthchecks(c) + + if h := c.State.Health; h != nil { + h.Status = types.Starting + h.FailingStreak = 0 + } else { + h := &container.Health{} + h.Status = types.Starting + c.State.Health = h + } + + d.updateHealthMonitor(c) +} + +// Called when the container is being stopped (whether because the health check is +// failing or for any other reason). +func (d *Daemon) stopHealthchecks(c *container.Container) { + h := c.State.Health + if h != nil { + h.CloseMonitorChannel() + } +} + +// Buffer up to maxOutputLen bytes. Further data is discarded. +type limitedBuffer struct { + buf bytes.Buffer + mu sync.Mutex + truncated bool // indicates that data has been lost +} + +// Append to limitedBuffer while there is room. +func (b *limitedBuffer) Write(data []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + bufLen := b.buf.Len() + dataLen := len(data) + keep := min(maxOutputLen-bufLen, dataLen) + if keep > 0 { + b.buf.Write(data[:keep]) + } + if keep < dataLen { + b.truncated = true + } + return dataLen, nil +} + +// The contents of the buffer, with "..." appended if it overflowed. +func (b *limitedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + + out := b.buf.String() + if b.truncated { + out = out + "..." + } + return out +} + +// If configuredValue is zero, use defaultValue instead. +func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { + if configuredValue == 0 { + return defaultValue + } + return configuredValue +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func getShell(config *containertypes.Config) []string { + if len(config.Shell) != 0 { + return config.Shell + } + if runtime.GOOS != "windows" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/docker/docker/daemon/health_test.go b/vendor/github.com/docker/docker/daemon/health_test.go new file mode 100644 index 0000000..7e82115 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/health_test.go @@ -0,0 +1,118 @@ +package daemon + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func reset(c *container.Container) { + c.State = &container.State{} + c.State.Health = &container.Health{} + c.State.Health.Status = types.Starting +} + +func TestNoneHealthcheck(t *testing.T) { + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, + }, + }, + State: &container.State{}, + }, + } + daemon := &Daemon{} + + daemon.initHealthMonitor(c) + if c.State.Health != nil { + t.Errorf("Expecting Health to be nil, but was not") + } +} + +func TestHealthStates(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + expect := func(expected string) { + select { + case event := <-l: + ev := event.(eventtypes.Message) + if ev.Status != expected { + t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) + } + case <-time.After(1 * time.Second): + t.Errorf("Expecting event %#v, but got nothing\n", expected) + } + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + + c.Config.Healthcheck = &containertypes.HealthConfig{ + Retries: 1, + } + + reset(c) + + handleResult := func(startTime time.Time, exitCode int) { + handleProbeResult(daemon, c, &types.HealthcheckResult{ + Start: startTime, + End: startTime, + ExitCode: exitCode, + }, nil) + } + + // starting -> failed -> success -> failed + + handleResult(c.State.StartedAt.Add(1*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(2*time.Second), 0) + expect("health_status: healthy") + + handleResult(c.State.StartedAt.Add(3*time.Second), 1) + expect("health_status: unhealthy") + + // Test retries + + reset(c) + c.Config.Healthcheck.Retries = 3 + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + handleResult(c.State.StartedAt.Add(40*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 2 { + t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(60*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } +} diff --git a/vendor/github.com/docker/docker/daemon/image.go b/vendor/github.com/docker/docker/daemon/image.go new file mode 100644 index 0000000..32a8d77 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image.go @@ -0,0 +1,76 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + id, ref, err := reference.ParseIDOrReference(refOrID) + if err != nil { + return "", err + } + if id != "" { + if _, err := daemon.imageStore.Get(image.IDFromDigest(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } + return image.IDFromDigest(id), nil + } + + if id, err := daemon.referenceStore.Get(ref); err == nil { + return image.IDFromDigest(id), nil + } + + // deprecated: repo:shortid https://github.com/docker/docker/pull/799 + if tagged, ok := ref.(reference.NamedTagged); ok { + if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { + if id, err := daemon.imageStore.Search(tag); err == nil { + for _, namedRef := range daemon.referenceStore.References(id.Digest()) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} + +// GetImageOnBuild looks up a Docker image referenced by `name`. +func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + return img, nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_delete.go b/vendor/github.com/docker/docker/daemon/image_delete.go new file mode 100644 index 0000000..3e3c142 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_delete.go @@ -0,0 +1,412 @@ +package daemon + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { + start := time.Now() + records := []types.ImageDelete{} + + imgID, err := daemon.GetImageID(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.referenceStore.References(imgID.Digest()) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !force && isSingleReference(repoRefs) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.referenceStore.References(imgID.Digest()) + + // If a tag reference was removed and the only remaining + // references to the same repository are digest references, + // then clean up those digest references. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundRepoTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + foundRepoTagRef = true + break + } + } + if !foundRepoTagRef { + // Remove canonical references from same repository + remainingRefs := []reference.Named{} + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + if _, err := daemon.removeImageRef(repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} + records = append(records, untaggedRecord) + } else { + remainingRefs = append(remainingRefs, repoRef) + + } + } + repoRefs = remainingRefs + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is at most one tag + // reference to the image AND all references are within one + // repository, then remove all references. + if isSingleReference(repoRefs) { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + return nil, conflict + } + + for _, repoRef := range repoRefs { + parsedRef, err := daemon.removeImageRef(repoRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + } + + if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { + return nil, err + } + + imageActions.WithValues("delete").UpdateSince(start) + + return records, nil +} + +// isSingleReference returns true when all references are from one repository +// and there is at most one tag. Returns false for empty input. +func isSingleReference(repoRefs []reference.Named) bool { + if len(repoRefs) <= 1 { + return len(repoRefs) == 1 + } + var singleRef reference.Named + canonicalRefs := map[string]struct{}{} + for _, repoRef := range repoRefs { + if _, isCanonical := repoRef.(reference.Canonical); isCanonical { + canonicalRefs[repoRef.Name()] = struct{}{} + } else if singleRef == nil { + singleRef = repoRef + } else { + return false + } + } + if singleRef == nil { + // Just use first canonical ref + singleRef = repoRefs[0] + } + _, ok := canonicalRefs[singleRef.Name()] + return len(canonicalRefs) == 1 && ok +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.WithDefaultTag(ref) + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDelete is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { + imageRefs := daemon.referenceStore.References(imgID.Digest()) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + return err + } + + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in multiple repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +} diff --git a/vendor/github.com/docker/docker/daemon/image_exporter.go b/vendor/github.com/docker/docker/daemon/image_exporter.go new file mode 100644 index 0000000..95d1d3d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_exporter.go @@ -0,0 +1,25 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/image/tarexport" +) + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Save(names, outStream) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Load(inTar, outStream, quiet) +} diff --git a/vendor/github.com/docker/docker/daemon/image_history.go b/vendor/github.com/docker/docker/daemon/image_history.go new file mode 100644 index 0000000..839dd12 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_history.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + start := time.Now() + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id.Digest()) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + imageActions.WithValues("history").UpdateSince(start) + return history, nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_inspect.go b/vendor/github.com/docker/docker/daemon/image_inspect.go new file mode 100644 index 0000000..ebf9124 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_inspect.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID().Digest()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, ref.String()) + case reference.Canonical: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + OsVersion: img.OSVersion, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_pull.go b/vendor/github.com/docker/docker/daemon/image_pull.go new file mode 100644 index 0000000..2157d15 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_pull.go @@ -0,0 +1,149 @@ +package daemon + +import ( + "io" + "strings" + + dist "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + if err != nil { + return err + } + } + + return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) +} + +// PullOnBuild tells Docker to pull image referenced by `name`. +func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + ref = reference.WithDefaultTag(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config file, we prefer to use that + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig( + authConfigs, + repoInfo.Index, + ) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + DownloadManager: daemon.downloadManager, + Schema2Types: distribution.ImageTypes, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// GetRepository returns a repository from the registry. +func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig) (dist.Repository, bool, error) { + // get repository info + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, false, err + } + // makes sure name is not empty or `scratch` + if err := distribution.ValidateRepoName(repoInfo.Name()); err != nil { + return nil, false, err + } + + // get endpoints + endpoints, err := daemon.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return nil, false, err + } + + // retrieve repository + var ( + confirmedV2 bool + repository dist.Repository + lastError error + ) + + for _, endpoint := range endpoints { + if endpoint.Version == registry.APIVersion1 { + continue + } + + repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") + if lastError == nil && confirmedV2 { + break + } + } + return repository, confirmedV2, lastError +} diff --git a/vendor/github.com/docker/docker/daemon/image_push.go b/vendor/github.com/docker/docker/daemon/image_push.go new file mode 100644 index 0000000..e6382c7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_push.go @@ -0,0 +1,63 @@ +package daemon + +import ( + "io" + + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + ConfigMediaType: schema2.MediaTypeImageConfig, + LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err = distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} diff --git a/vendor/github.com/docker/docker/daemon/image_tag.go b/vendor/github.com/docker/docker/daemon/image_tag.go new file mode 100644 index 0000000..36fa3b4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_tag.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/reference" +) + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(imageName, repository, tag string) error { + imageID, err := daemon.GetImageID(imageName) + if err != nil { + return err + } + + newTag, err := reference.WithName(repository) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } + + return daemon.TagImageWithReference(imageID, newTag) +} + +// TagImageWithReference adds the given reference to the image ID provided. +func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { + if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { + return err + } + + daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/images.go b/vendor/github.com/docker/docker/daemon/images.go new file mode 100644 index 0000000..88fb8f8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images.go @@ -0,0 +1,331 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/pkg/errors" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, + "before": true, + "since": true, + "reference": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.ImageSummary + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + var beforeFilter, sinceFilter *image.Image + err = imageFilters.WalkValues("before", func(value string) error { + beforeFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + err = imageFilters.WalkValues("since", func(value string) error { + sinceFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + images := []*types.ImageSummary{} + var imagesMap map[*image.Image]*types.ImageSummary + var layerRefs map[layer.ChainID]int + var allLayers map[layer.ChainID]layer.Layer + var allContainers []*container.Container + + for id, img := range allImages { + if beforeFilter != nil { + if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { + continue + } + } + + if sinceFilter != nil { + if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { + continue + } + } + + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.referenceStore.References(id.Digest()) { + if imageFilters.Include("reference") { + var found bool + var matchErr error + for _, pattern := range imageFilters.Get("reference") { + found, matchErr = reference.Match(pattern, ref) + if matchErr != nil { + return nil, matchErr + } + } + if !found { + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, ref.String()) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if imageFilters.Include("reference") { // skip images with no references if filtering by reference + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly && len(newImage.RepoTags) > 0 { + continue + } + + if withExtraAttrs { + // lazyly init variables + if imagesMap == nil { + allContainers = daemon.List() + allLayers = daemon.layerStore.Map() + imagesMap = make(map[*image.Image]*types.ImageSummary) + layerRefs = make(map[layer.ChainID]int) + } + + // Get container count + newImage.Containers = 0 + for _, c := range allContainers { + if c.ImageID == id { + newImage.Containers++ + } + } + + // count layer references + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + if _, ok := allLayers[chid]; !ok { + return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + } + } + imagesMap[img] = newImage + } + + images = append(images, newImage) + } + + if withExtraAttrs { + // Get Shared sizes + for img, newImage := range imagesMap { + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + newImage.SharedSize = 0 + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + + diffSize, err := allLayers[chid].DiffSize() + if err != nil { + return nil, err + } + + if layerRefs[chid] > 1 { + newImage.SharedSize += diffSize + } + } + } + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +// SquashImage creates a new image with the diff of the specified image and the specified parent. +// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. +// The existing image(s) is not destroyed. +// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. +func (daemon *Daemon) SquashImage(id, parent string) (string, error) { + img, err := daemon.imageStore.Get(image.ID(id)) + if err != nil { + return "", err + } + + var parentImg *image.Image + var parentChainID layer.ChainID + if len(parent) != 0 { + parentImg, err = daemon.imageStore.Get(image.ID(parent)) + if err != nil { + return "", errors.Wrap(err, "error getting specified parent layer") + } + parentChainID = parentImg.RootFS.ChainID() + } else { + rootFS := image.NewRootFS() + parentImg = &image.Image{RootFS: rootFS} + } + + l, err := daemon.layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return "", errors.Wrap(err, "error getting image layer") + } + defer daemon.layerStore.Release(l) + + ts, err := l.TarStreamFrom(parentChainID) + if err != nil { + return "", errors.Wrapf(err, "error getting tar stream to parent") + } + defer ts.Close() + + newL, err := daemon.layerStore.Register(ts, parentChainID) + if err != nil { + return "", errors.Wrap(err, "error registering layer") + } + defer daemon.layerStore.Release(newL) + + var newImage image.Image + newImage = *img + newImage.RootFS = nil + + var rootFS image.RootFS + rootFS = *parentImg.RootFS + rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) + newImage.RootFS = &rootFS + + for i, hi := range newImage.History { + if i >= len(parentImg.History) { + hi.EmptyLayer = true + } + newImage.History[i] = hi + } + + now := time.Now() + var historyComment string + if len(parent) > 0 { + historyComment = fmt.Sprintf("merge %s to %s", id, parent) + } else { + historyComment = fmt.Sprintf("create new from %s", id) + } + + newImage.History = append(newImage.History, image.History{ + Created: now, + Comment: historyComment, + }) + newImage.Created = now + + b, err := json.Marshal(&newImage) + if err != nil { + return "", errors.Wrap(err, "error marshalling image config") + } + + newImgID, err := daemon.imageStore.Create(b) + if err != nil { + return "", errors.Wrap(err, "error creating new image after squash") + } + return string(newImgID), nil +} + +func newImage(image *image.Image, virtualSize int64) *types.ImageSummary { + newImage := new(types.ImageSummary) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = virtualSize + newImage.VirtualSize = virtualSize + newImage.SharedSize = -1 + newImage.Containers = -1 + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/docker/docker/daemon/import.go b/vendor/github.com/docker/docker/daemon/import.go new file mode 100644 index 0000000..c93322b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/import.go @@ -0,0 +1,135 @@ +package daemon + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + rc io.ReadCloser + resp *http.Response + newRef reference.Named + ) + + if repository != "" { + var err error + newRef, err = reference.ParseNamed(repository) + if err != nil { + return err + } + + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) + if err != nil { + return err + } + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressOutput := sf.NewProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(inflatedLayerData, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImageWithReference(id, newRef); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(sf.FormatStatus("", id.String())) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go new file mode 100644 index 0000000..1ab9f29 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info.go @@ -0,0 +1,180 @@ +package daemon + +import ( + "fmt" + "os" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume/drivers" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + meminfo = &system.MemInfo{} + } + + sysInfo := sysinfo.New(true) + + var cRunning, cPaused, cStopped int32 + daemon.containers.ApplyAll(func(c *container.Container) { + switch c.StateString() { + case "paused": + atomic.AddInt32(&cPaused, 1) + case "running": + atomic.AddInt32(&cRunning, 1) + default: + atomic.AddInt32(&cStopped, 1) + } + }) + + securityOptions := []string{} + if sysInfo.AppArmor { + securityOptions = append(securityOptions, "name=apparmor") + } + if sysInfo.Seccomp && supportsSeccomp { + profile := daemon.seccompProfilePath + if profile == "" { + profile = "default" + } + securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile)) + } + if selinuxEnabled() { + securityOptions = append(securityOptions, "name=selinux") + } + uid, gid := daemon.GetRemappedUIDGID() + if uid != 0 || gid != 0 { + securityOptions = append(securityOptions, "name=userns") + } + + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: len(daemon.imageStore.Map()), + Driver: daemon.GraphDriverName(), + DriverStatus: daemon.layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: utils.IsDebugEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: sysinfo.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: daemon.configStore.Experimental, + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled, + SecurityOptions: securityOptions, + Isolation: daemon.defaultIsolation, + } + + // Retrieve platform specific info + daemon.FillPlatformInfo(v, sysInfo) + + hostname := "" + if hn, err := os.Hostname(); err != nil { + logrus.Warnf("Could not get hostname: %v", err) + } else { + hostname = hn + } + v.Name = hostname + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + MinAPIVersion: api.MinVersion, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: daemon.configStore.Experimental, + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + pluginsInfo.Network = daemon.GetNetworkDriverList() + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + + return pluginsInfo +} diff --git a/vendor/github.com/docker/docker/daemon/info_unix.go b/vendor/github.com/docker/docker/daemon/info_unix.go new file mode 100644 index 0000000..9c41c0e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info_unix.go @@ -0,0 +1,82 @@ +// +build !windows + +package daemon + +import ( + "context" + "os/exec" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + v.Runtimes = daemon.configStore.GetAllRuntimes() + v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() + v.InitBinary = daemon.configStore.GetInitPath() + + v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID + if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil { + v.ContainerdCommit.ID = sv.Revision + } else { + logrus.Warnf("failed to retrieve containerd version: %v", err) + v.ContainerdCommit.ID = "N/A" + } + + v.RuncCommit.Expected = dockerversion.RuncCommitID + if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), "\n") + if len(parts) == 3 { + parts = strings.Split(parts[1], ": ") + if len(parts) == 2 { + v.RuncCommit.ID = strings.TrimSpace(parts[1]) + } + } + + if v.RuncCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultRuntimeBinary, string(rv)) + v.RuncCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err) + v.RuncCommit.ID = "N/A" + } + + v.InitCommit.Expected = dockerversion.InitCommitID + if rv, err := exec.Command(DefaultInitBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), " - ") + if len(parts) == 2 { + if dockerversion.InitCommitID[0] == 'v' { + vs := strings.TrimPrefix(parts[0], "tini version ") + v.InitCommit.ID = "v" + vs + } else { + // Get the sha1 + gitParts := strings.Split(parts[1], ".") + if len(gitParts) == 2 && gitParts[0] == "git" { + v.InitCommit.ID = gitParts[1] + v.InitCommit.Expected = dockerversion.InitCommitID[0:len(gitParts[1])] + } + } + } + + if v.InitCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultInitBinary, string(rv)) + v.InitCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version", DefaultInitBinary) + v.InitCommit.ID = "N/A" + } +} diff --git a/vendor/github.com/docker/docker/daemon/info_windows.go b/vendor/github.com/docker/docker/daemon/info_windows.go new file mode 100644 index 0000000..c700911 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info_windows.go @@ -0,0 +1,10 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { +} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go new file mode 100644 index 0000000..66d53f0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go @@ -0,0 +1,13 @@ +// +build solaris,cgo + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go new file mode 100644 index 0000000..e83c275 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +package initlayer + +import ( + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/idtools" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootUID, rootGID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go new file mode 100644 index 0000000..48a9d71 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go new file mode 100644 index 0000000..557f639 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect.go @@ -0,0 +1,264 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { + switch { + case versions.LessThan(version, "1.20"): + return daemon.containerInspectPre120(name) + case versions.Equal(version, "1.20"): + return daemon.containerInspect120(name) + } + return daemon.ContainerInspectCurrent(name, size) +} + +// ContainerInspectCurrent returns low-level information about a +// container in a most recent api version. +func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, size) + if err != nil { + return nil, err + } + + apiNetworks := make(map[string]*networktypes.EndpointSettings) + for name, epConf := range container.NetworkSettings.Networks { + if epConf.EndpointSettings != nil { + apiNetworks[name] = epConf.EndpointSettings + } + } + + mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + Ports: container.NetworkSettings.Ports, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: apiNetworks, + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // We merge the Ulimits from hostConfig with daemon default + daemon.mergeUlimits(&hostConfig) + + var containerHealth *types.Health + if container.State.Health != nil { + containerHealth = &types.Health{ + Status: container.State.Health.Status, + FailingStreak: container.State.Health.FailingStreak, + Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), + } + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode(), + Error: container.State.Error(), + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + Health: containerHealth, + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + var ( + sizeRw int64 + sizeRootFs int64 + ) + if size { + sizeRw, sizeRootFs = daemon.getSize(container) + contJSONBase.SizeRw = &sizeRw + contJSONBase.SizeRootFs = &sizeRootFs + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + // If container is marked as Dead, the container's graphdriver metadata + // could have been removed, it will cause error if we try to get the metadata, + // we can ignore the error if the container is dead. + if err != nil && !container.Dead { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + Pid: e.Pid, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + apiV.Status = v.Status() + return apiV, nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok && defaultNetwork.EndpointSettings != nil { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_solaris.go b/vendor/github.com/docker/docker/daemon/inspect_solaris.go new file mode 100644 index 0000000..0e3dcc1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_solaris.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + return &v1p19.ContainerJSON{}, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_unix.go b/vendor/github.com/docker/docker/daemon/inspect_unix.go new file mode 100644 index 0000000..08a8223 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_unix.go @@ -0,0 +1,92 @@ +// +build !windows,!solaris + +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_windows.go b/vendor/github.com/docker/docker/daemon/inspect_windows.go new file mode 100644 index 0000000..b331c83 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_windows.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspectCurrent(name, false) +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/docker/docker/daemon/keys.go b/vendor/github.com/docker/docker/daemon/keys.go new file mode 100644 index 0000000..055d488 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/keys.go @@ -0,0 +1,59 @@ +// +build linux + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +const ( + rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" + rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" + rootKeyLimit = 1000000 + // it is standard configuration to allocate 25 bytes per key + rootKeyByteMultiplier = 25 +) + +// ModifyRootKeyLimit checks to see if the root key limit is set to +// at least 1000000 and changes it to that limit along with the maxbytes +// allocated to the keys at a 25 to 1 multiplier. +func ModifyRootKeyLimit() error { + value, err := readRootKeyLimit(rootKeyFile) + if err != nil { + return err + } + if value < rootKeyLimit { + return setRootKeyLimit(rootKeyLimit) + } + return nil +} + +func setRootKeyLimit(limit int) error { + keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer keys.Close() + if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { + return err + } + bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer bytes.Close() + _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) + return err +} + +func readRootKeyLimit(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(strings.Trim(string(data), "\n")) +} diff --git a/vendor/github.com/docker/docker/daemon/keys_unsupported.go b/vendor/github.com/docker/docker/daemon/keys_unsupported.go new file mode 100644 index 0000000..b172559 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/keys_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package daemon + +// ModifyRootKeyLimit is an noop on unsupported platforms. +func ModifyRootKeyLimit() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go new file mode 100644 index 0000000..18d5bbb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/kill.go @@ -0,0 +1,164 @@ +package daemon + +import ( + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { + logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + if container.Config.StopSignal != "" { + containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) + if err != nil { + return err + } + if containerStopSignal == syscall.Signal(sig) { + container.ExitOnNext() + } + } else { + container.ExitOnNext() + } + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on its next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *container.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + if _, err2 := container.WaitStop(2 * time.Second); err2 != nil { + return err + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} + +func (daemon *Daemon) kill(c *container.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} diff --git a/vendor/github.com/docker/docker/daemon/links.go b/vendor/github.com/docker/docker/daemon/links.go new file mode 100644 index 0000000..7f691d4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/daemon/links/links.go b/vendor/github.com/docker/docker/daemon/links/links.go new file mode 100644 index 0000000..af15de0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/vendor/github.com/docker/docker/daemon/links/links_test.go b/vendor/github.com/docker/docker/daemon/links/links_test.go new file mode 100644 index 0000000..0273f13 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links/links_test.go @@ -0,0 +1,213 @@ +package links + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatalf("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) + + if link.Name != "/db/docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != newPortNoError("tcp", "6379") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkPortRangeEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } + for i := range []int{6379, 6380, 6381} { + tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) + tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) + if env[tcpaddr] == "172.0.17.2" { + t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) + } + if env[tcpport] == fmt.Sprintf("%d", i) { + t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) + } + if env[tcpproto] == "tcp" { + t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) + } + if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { + t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/links_linux.go b/vendor/github.com/docker/docker/daemon/links_linux.go new file mode 100644 index 0000000..2ea40d9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links_linux.go @@ -0,0 +1,72 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/graphdb" +) + +// migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig +// when sqlite links were used, hostConfig.Links was set to nil +func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error { + // if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped + if container.HostConfig == nil || container.HostConfig.Links != nil { + return nil + } + + logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID) + + fullName := container.Name + if fullName[0] != '/' { + fullName = "/" + fullName + } + + // don't use a nil slice, this ensures that the check above will skip once the migration has completed + links := []string{} + children, err := db.Children(fullName, 0) + if err != nil { + if !strings.Contains(err.Error(), "Cannot find child for") { + return err + } + // else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration + } + + for _, child := range children { + c, err := daemon.GetContainer(child.Entity.ID()) + if err != nil { + return err + } + + links = append(links, c.Name+":"+child.Edge.Name) + } + + container.HostConfig.Links = links + return container.WriteHostConfig() +} + +// sqliteMigration performs the link graph DB migration. +func (daemon *Daemon) sqliteMigration(containers map[string]*container.Container) error { + // migrate any legacy links from sqlite + linkdbFile := filepath.Join(daemon.root, "linkgraph.db") + var ( + legacyLinkDB *graphdb.Database + err error + ) + + legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) + if err != nil { + return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) + } + defer legacyLinkDB.Close() + + for _, c := range containers { + if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/links_linux_test.go b/vendor/github.com/docker/docker/daemon/links_linux_test.go new file mode 100644 index 0000000..e2dbff2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links_linux_test.go @@ -0,0 +1,98 @@ +package daemon + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/stringid" +) + +func TestMigrateLegacySqliteLinks(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + name1 := "test1" + c1 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: stringid.GenerateNonCryptoID(), + Name: name1, + HostConfig: &containertypes.HostConfig{}, + }, + } + c1.Root = tmpDir + + name2 := "test2" + c2 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: stringid.GenerateNonCryptoID(), + Name: name2, + }, + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + + d := &Daemon{root: tmpDir, containers: store} + db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) + if err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/"+name1, c1.ID); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/"+name2, c2.ID); err != nil { + t.Fatal(err) + } + + alias := "hello" + if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { + t.Fatal(err) + } + + if err := d.migrateLegacySqliteLinks(db, c1); err != nil { + t.Fatal(err) + } + + if len(c1.HostConfig.Links) != 1 { + t.Fatal("expected links to be populated but is empty") + } + + expected := name2 + ":" + alias + actual := c1.HostConfig.Links[0] + if actual != expected { + t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) + } + + // ensure this is persisted + b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) + if err != nil { + t.Fatal(err) + } + type hc struct { + Links []string + } + var cfg hc + if err := json.Unmarshal(b, &cfg); err != nil { + t.Fatal(err) + } + + if len(cfg.Links) != 1 { + t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) + } + if cfg.Links[0] != expected { // same expected as above + t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) + } +} diff --git a/vendor/github.com/docker/docker/daemon/links_notlinux.go b/vendor/github.com/docker/docker/daemon/links_notlinux.go new file mode 100644 index 0000000..12c226c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links_notlinux.go @@ -0,0 +1,10 @@ +// +build !linux + +package daemon + +import "github.com/docker/docker/container" + +// sqliteMigration performs the link graph DB migration. No-op on platforms other than Linux +func (daemon *Daemon) sqliteMigration(_ map[string]*container.Container) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go new file mode 100644 index 0000000..02805ea --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list.go @@ -0,0 +1,660 @@ +package daemon + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, + "name": true, + "driver": true, + "label": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "health": true, + "since": true, + "volume": true, + "network": true, + "is-task": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // beforeFilter is a filter to ignore containers that appear before the one given + beforeFilter *container.Container + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + sinceFilter *container.Container + + // taskFilter tells if we should filter based on wether a container is part of a task + taskFilter bool + // isTask tells us if the we should filter container that are a task (true) or not (false) + isTask bool + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// byContainerCreated is a temporary type used to sort a list of containers by creation time. +type byContainerCreated []*container.Container + +func (r byContainerCreated) Len() int { return len(r) } +func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byContainerCreated) Less(i, j int) bool { + return r[i].Created.UnixNano() < r[j].Created.UnixNano() +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.transformContainer) +} + +func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { + idSearch := false + names := ctx.filters.Get("name") + ids := ctx.filters.Get("id") + if len(names)+len(ids) == 0 { + // if name or ID filters are not in use, return to + // standard behavior of walking the entire container + // list from the daemon's in-memory store + return daemon.List() + } + + // idSearch will determine if we limit name matching to the IDs + // matched from any IDs which were specified as filters + if len(ids) > 0 { + idSearch = true + } + + matches := make(map[string]bool) + // find ID matches; errors represent "not found" and can be ignored + for _, id := range ids { + if fullID, err := daemon.idIndex.Get(id); err == nil { + matches[fullID] = true + } + } + + // look for name matches; if ID filtering was used, then limit the + // search space to the matches map only; errors represent "not found" + // and can be ignored + if len(names) > 0 { + for id, idNames := range ctx.names { + // if ID filters were used and no matches on that ID were + // found, continue to next ID in the list + if idSearch && !matches[id] { + continue + } + for _, eachName := range idNames { + if ctx.filters.Match("name", eachName) { + matches[id] = true + } + } + } + } + + cntrs := make([]*container.Container, 0, len(matches)) + for id := range matches { + if c := daemon.containers.Get(id); c != nil { + cntrs = append(cntrs, c) + } + } + + // Restore sort-order after filtering + // Created gives us nanosec resolution for sorting + sort.Sort(sort.Reverse(byContainerCreated(cntrs))) + + return cntrs +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + var ( + containers = []*types.Container{} + ) + + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + // fastpath to only look at a subset of containers if specific name + // or ID matches were provided by the user--otherwise we potentially + // end up locking and querying many more containers than intended + containerList := daemon.filterByNameIDMatches(ctx) + + for _, container := range containerList { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + defer container.Unlock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + return reducer(container, ctx) +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filters + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var taskFilter, isTask bool + if psFilters.Include("is-task") { + if psFilters.ExactMatch("is-task", "true") { + taskFilter = true + isTask = true + } else if psFilters.ExactMatch("is-task", "false") { + taskFilter = true + isTask = false + } else { + return nil, fmt.Errorf("Invalid filter 'is-task=%s'", psFilters.Get("is-task")) + } + } + + err = psFilters.WalkValues("health", func(value string) error { + if !container.IsValidHealthString(value) { + return fmt.Errorf("Unrecognised filter value for health: %s", value) + } + + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Container + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, err := daemon.GetImageID(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + return nil + }) + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + taskFilter: taskFilter, + isTask: isTask, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + if !container.Running && !ctx.All && ctx.Limit <= 0 { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + if ctx.taskFilter { + if ctx.isTask != container.Managed { + return excludeContainer + } + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + // Do not include container if its health doesn't match the filter + if !ctx.filters.ExactMatch("health", container.State.HealthString()) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]*volume.MountPoint) + for _, m := range container.MountPoints { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := container.MountPoints[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + networkExist := fmt.Errorf("container part of network") + if ctx.filters.Include("network") { + err := ctx.filters.WalkValues("network", func(value string) error { + if _, ok := container.NetworkSettings.Networks[value]; ok { + return networkExist + } + for _, nw := range container.NetworkSettings.Networks { + if nw.EndpointSettings == nil { + continue + } + if nw.NetworkID == value { + return networkExist + } + } + return nil + }) + if err != networkExist { + return excludeContainer + } + } + + return includeContainer +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID.String(), + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } + } + newC.Image = image + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.State = container.State.StateString() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + // copy networks to avoid races + networks := make(map[string]*networktypes.EndpointSettings) + for name, network := range container.NetworkSettings.Networks { + if network == nil || network.EndpointSettings == nil { + continue + } + networks[name] = &networktypes.EndpointSettings{ + EndpointID: network.EndpointID, + Gateway: network.Gateway, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + IPv6Gateway: network.IPv6Gateway, + GlobalIPv6Address: network.GlobalIPv6Address, + GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, + MacAddress: network.MacAddress, + NetworkID: network.NetworkID, + } + if network.IPAMConfig != nil { + networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: network.IPAMConfig.IPv4Address, + IPv6Address: network.IPAMConfig.IPv6Address, + } + } + } + newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return nil, err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(container) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + newC.Labels = container.Config.Labels + newC.Mounts = addMountPoints(container) + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + + filterVolumes, err := daemon.filterVolumes(volumes, volFilters) + if err != nil { + return nil, nil, err + } + for _, v := range filterVolumes { + apiV := volumeToAPIType(v) + if vv, ok := v.(interface { + CachedPath() string + }); ok { + apiV.Mountpoint = vv.CachedPath() + } else { + apiV.Mountpoint = v.Path() + } + volumesOut = append(volumesOut, apiV) + } + return volumesOut, warnings, nil +} + +// filterVolumes filters volume list according to user specified filter +// and returns user chosen volumes +func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { + // if filter is empty, return original volume list + if filter.Len() == 0 { + return vols, nil + } + + var retVols []volume.Volume + for _, vol := range vols { + if filter.Include("name") { + if !filter.Match("name", vol.Name()) { + continue + } + } + if filter.Include("driver") { + if !filter.Match("driver", vol.DriverName()) { + continue + } + } + if filter.Include("label") { + v, ok := vol.(volume.DetailedVolume) + if !ok { + continue + } + if !filter.MatchKVList("label", v.Labels()) { + continue + } + } + retVols = append(retVols, vol) + } + danglingOnly := false + if filter.Include("dangling") { + if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) + } + retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) + } + return retVols, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go new file mode 100644 index 0000000..91c9cac --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd solaris + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/list_windows.go b/vendor/github.com/docker/docker/daemon/list_windows.go new file mode 100644 index 0000000..7fbcd3a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_windows.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" +) + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + i := strings.ToLower(string(container.HostConfig.Isolation)) + if i == "" { + i = "default" + } + if !ctx.filters.Match("isolation", i) { + return excludeContainer + } + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go new file mode 100644 index 0000000..ad343c1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go @@ -0,0 +1,15 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/gcplogs" + _ "github.com/docker/docker/daemon/logger/gelf" + _ "github.com/docker/docker/daemon/logger/journald" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go new file mode 100644 index 0000000..f3002b9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go @@ -0,0 +1,13 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/etwlogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go new file mode 100644 index 0000000..fee518d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go @@ -0,0 +1,404 @@ +// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs +package awslogs + +import ( + "errors" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" +) + +const ( + name = "awslogs" + regionKey = "awslogs-region" + regionEnvKey = "AWS_REGION" + logGroupKey = "awslogs-group" + logStreamKey = "awslogs-stream" + tagKey = "tag" + batchPublishFrequency = 5 * time.Second + + // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + perEventBytes = 26 + maximumBytesPerPut = 1048576 + maximumLogEventsPerPut = 10000 + + // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + maximumBytesPerEvent = 262144 - perEventBytes + + resourceAlreadyExistsCode = "ResourceAlreadyExistsException" + dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" + invalidSequenceTokenCode = "InvalidSequenceTokenException" + + userAgentHeader = "User-Agent" +) + +type logStream struct { + logStreamName string + logGroupName string + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string +} + +type api interface { + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +type regionFinder interface { + Region() (string, error) +} + +type wrappedEvent struct { + inputLogEvent *cloudwatchlogs.InputLogEvent + insertOrder int +} +type byTimestamp []wrappedEvent + +// init registers the awslogs driver +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates an awslogs logger using the configuration passed in on the +// context. Supported context configuration variables are awslogs-region, +// awslogs-group, and awslogs-stream. When available, configuration is +// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, +// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and +// the EC2 Instance Metadata Service. +func New(ctx logger.Context) (logger.Logger, error) { + logGroupName := ctx.Config[logGroupKey] + logStreamName, err := loggerutils.ParseLogTag(ctx, "{{.FullID}}") + if err != nil { + return nil, err + } + + if ctx.Config[logStreamKey] != "" { + logStreamName = ctx.Config[logStreamKey] + } + client, err := newAWSLogsClient(ctx) + if err != nil { + return nil, err + } + containerStream := &logStream{ + logStreamName: logStreamName, + logGroupName: logGroupName, + client: client, + messages: make(chan *logger.Message, 4096), + } + err = containerStream.create() + if err != nil { + return nil, err + } + go containerStream.collectBatch() + + return containerStream, nil +} + +// newRegionFinder is a variable such that the implementation +// can be swapped out for unit tests. +var newRegionFinder = func() regionFinder { + return ec2metadata.New(session.New()) +} + +// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. +// Customizations to the default client from the SDK include a Docker-specific +// User-Agent string and automatic region detection using the EC2 Instance +// Metadata Service when region is otherwise unspecified. +func newAWSLogsClient(ctx logger.Context) (api, error) { + var region *string + if os.Getenv(regionEnvKey) != "" { + region = aws.String(os.Getenv(regionEnvKey)) + } + if ctx.Config[regionKey] != "" { + region = aws.String(ctx.Config[regionKey]) + } + if region == nil || *region == "" { + logrus.Info("Trying to get region from EC2 Metadata") + ec2MetadataClient := newRegionFinder() + r, err := ec2MetadataClient.Region() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + }).Error("Could not get region from EC2 metadata, environment, or log option") + return nil, errors.New("Cannot determine region for awslogs driver") + } + region = &r + } + logrus.WithFields(logrus.Fields{ + "region": *region, + }).Debug("Created awslogs client") + + client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) + + client.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "DockerUserAgentHandler", + Fn: func(r *request.Request) { + currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) + r.HTTPRequest.Header.Set(userAgentHeader, + fmt.Sprintf("Docker %s (%s) %s", + dockerversion.Version, runtime.GOOS, currentAgent)) + }, + }) + return client, nil +} + +// Name returns the name of the awslogs logging driver +func (l *logStream) Name() string { + return name +} + +// Log submits messages for logging by an instance of the awslogs logging driver +func (l *logStream) Log(msg *logger.Message) error { + l.lock.RLock() + defer l.lock.RUnlock() + if !l.closed { + // buffer up the data, making sure to copy the Line data + l.messages <- logger.CopyMessage(msg) + } + return nil +} + +// Close closes the instance of the awslogs logging driver +func (l *logStream) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if !l.closed { + close(l.messages) + } + l.closed = true + return nil +} + +// create creates a log stream for the instance of the awslogs logging driver +func (l *logStream) create() error { + input := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + + _, err := l.client.CreateLogStream(input) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log stream already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log stream") + } + } + return err +} + +// newTicker is used for time-based batching. newTicker is a variable such +// that the implementation can be swapped out for unit tests. +var newTicker = func(freq time.Duration) *time.Ticker { + return time.NewTicker(freq) +} + +// collectBatch executes as a goroutine to perform batching of log events for +// submission to the log stream. Batching is performed on time- and size- +// bases. Time-based batching occurs at a 5 second interval (defined in the +// batchPublishFrequency const). Size-based batching is performed on the +// maximum number of events per batch (defined in maximumLogEventsPerPut) and +// the maximum number of total bytes in a batch (defined in +// maximumBytesPerPut). Log messages are split by the maximum bytes per event +// (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead +// (defined in perEventBytes) which is accounted for in split- and batch- +// calculations. +func (l *logStream) collectBatch() { + timer := newTicker(batchPublishFrequency) + var events []wrappedEvent + bytes := 0 + for { + select { + case <-timer.C: + l.publishBatch(events) + events = events[:0] + bytes = 0 + case msg, more := <-l.messages: + if !more { + l.publishBatch(events) + return + } + unprocessedLine := msg.Line + for len(unprocessedLine) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(unprocessedLine) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := unprocessedLine[:lineBytes] + unprocessedLine = unprocessedLine[lineBytes:] + if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { + // Publish an existing batch if it's already over the maximum number of events or if adding this + // event would push it over the maximum number of total bytes. + l.publishBatch(events) + events = events[:0] + bytes = 0 + } + events = append(events, wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), + }, + insertOrder: len(events), + }) + bytes += (lineBytes + perEventBytes) + } + } + } +} + +// publishBatch calls PutLogEvents for a given set of InputLogEvents, +// accounting for sequencing requirements (each request must reference the +// sequence token returned by the previous request). +func (l *logStream) publishBatch(events []wrappedEvent) { + if len(events) == 0 { + return + } + + // events in a batch must be sorted by timestamp + // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + sort.Sort(byTimestamp(events)) + cwEvents := unwrapEvents(events) + + nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dataAlreadyAcceptedCode { + // already submitted, just grab the correct sequence token + parts := strings.Split(awsErr.Message(), " ") + nextSequenceToken = &parts[len(parts)-1] + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Info("Data already accepted, ignoring error") + err = nil + } else if awsErr.Code() == invalidSequenceTokenCode { + // sequence code is bad, grab the correct one and retry + parts := strings.Split(awsErr.Message(), " ") + token := parts[len(parts)-1] + nextSequenceToken, err = l.putLogEvents(cwEvents, &token) + } + } + } + if err != nil { + logrus.Error(err) + } else { + l.sequenceToken = nextSequenceToken + } +} + +// putLogEvents wraps the PutLogEvents API +func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { + input := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: sequenceToken, + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + resp, err := l.client.PutLogEvents(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Error("Failed to put log events") + } + return nil, err + } + return resp.NextSequenceToken, nil +} + +// ValidateLogOpt looks for awslogs-specific log options awslogs-region, +// awslogs-group, and awslogs-stream +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case logGroupKey: + case logStreamKey: + case regionKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) + } + } + if cfg[logGroupKey] == "" { + return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) + } + return nil +} + +// Len returns the length of a byTimestamp slice. Len is required by the +// sort.Interface interface. +func (slice byTimestamp) Len() int { + return len(slice) +} + +// Less compares two values in a byTimestamp slice by Timestamp. Less is +// required by the sort.Interface interface. +func (slice byTimestamp) Less(i, j int) bool { + iTimestamp, jTimestamp := int64(0), int64(0) + if slice != nil && slice[i].inputLogEvent.Timestamp != nil { + iTimestamp = *slice[i].inputLogEvent.Timestamp + } + if slice != nil && slice[j].inputLogEvent.Timestamp != nil { + jTimestamp = *slice[j].inputLogEvent.Timestamp + } + if iTimestamp == jTimestamp { + return slice[i].insertOrder < slice[j].insertOrder + } + return iTimestamp < jTimestamp +} + +// Swap swaps two values in a byTimestamp slice with each other. Swap is +// required by the sort.Interface interface. +func (slice byTimestamp) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { + cwEvents := []*cloudwatchlogs.InputLogEvent{} + for _, input := range events { + cwEvents = append(cwEvents, input.inputLogEvent) + } + return cwEvents +} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go new file mode 100644 index 0000000..d5b1aae --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -0,0 +1,724 @@ +package awslogs + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" +) + +const ( + groupName = "groupName" + streamName = "streamName" + sequenceToken = "sequenceToken" + nextSequenceToken = "nextSequenceToken" + logline = "this is a log line" +) + +func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + regionKey: "us-east-1", + }, + } + + client, err := newAWSLogsClient(ctx) + if err != nil { + t.Fatal(err) + } + realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) + if !ok { + t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") + } + buildHandlerList := realClient.Handlers.Build + request := &request.Request{ + HTTPRequest: &http.Request{ + Header: http.Header{}, + }, + } + buildHandlerList.Run(request) + expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", + dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + userAgent := request.HTTPRequest.Header.Get("User-Agent") + if userAgent != expectedUserAgentString { + t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", + expectedUserAgentString, userAgent) + } +} + +func TestNewAWSLogsClientRegionDetect(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{}, + } + + mockMetadata := newMockMetadataClient() + newRegionFinder = func() regionFinder { + return mockMetadata + } + mockMetadata.regionResult <- ®ionResult{ + successResult: "us-east-1", + } + + _, err := newAWSLogsClient(ctx) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: errors.New("Error!"), + } + + err := stream.create() + + if err == nil { + t.Fatal("Expected non-nil err") + } +} + +func TestCreateAlreadyExists(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), + } + + err := stream.create() + + if err != nil { + t.Fatal("Expected nil err") + } +} + +func TestPublishBatchSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: errors.New("Error!"), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != sequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) + } +} + +func TestPublishBatchInvalidSeqSuccess(t *testing.T) { + mockClient := newMockClientBuffered(2) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != "token" { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchAlreadyAccepted(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != "token" { + t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestCollectBatchSimple(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchTicker(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline + " 1"), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte(logline + " 2"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + + // Verify first batch + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 1" { + t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != logline+" 2" { + t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) + } + + stream.Log(&logger.Message{ + Line: []byte(logline + " 3"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 3" { + t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) + } + + stream.Close() + +} + +func TestCollectBatchClose(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchLineSplit(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerEvent) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != longline { + t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != "B" { + t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) + } +} + +func TestCollectBatchMaxEvents(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + line := "A" + for i := 0; i <= maximumLogEventsPerPut; i++ { + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: time.Time{}, + }) + } + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != maximumLogEventsPerPut { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) + } +} + +func TestCollectBatchMaxTotalBytes(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerPut) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + bytes := 0 + for _, event := range argument.LogEvents { + bytes += len(*event.Message) + } + if bytes > maximumBytesPerPut { + t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) + } + + argument = <-mockClient.putLogEventsArgument + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + message := *argument.LogEvents[0].Message + if message[len(message)-1:] != "B" { + t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) + } +} + +func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + times := maximumLogEventsPerPut + expectedEvents := []*cloudwatchlogs.InputLogEvent{} + timestamp := time.Now() + for i := 0; i < times; i++ { + line := fmt.Sprintf("%d", i) + if i%2 == 0 { + timestamp.Add(1 * time.Nanosecond) + } + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: timestamp, + }) + expectedEvents = append(expectedEvents, &cloudwatchlogs.InputLogEvent{ + Message: aws.String(line), + Timestamp: aws.Int64(timestamp.UnixNano() / int64(time.Millisecond)), + }) + } + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != times { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", times, len(argument.LogEvents)) + } + for i := 0; i < times; i++ { + if !reflect.DeepEqual(*argument.LogEvents[i], *expectedEvents[i]) { + t.Errorf("Expected event to be %v but was %v", *expectedEvents[i], *argument.LogEvents[i]) + } + } +} + +func TestCreateTagSuccess(t *testing.T) { + mockClient := newMockClient() + ctx := logger.Context{ + ContainerName: "/test-container", + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, + } + logStreamName, e := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if e != nil { + t.Errorf("Error generating tag: %q", e) + } + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: logStreamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + + if *argument.LogStreamName != "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890" { + t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go new file mode 100644 index 0000000..b768a3d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go @@ -0,0 +1,77 @@ +package awslogs + +import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + +type mockcwlogsclient struct { + createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput + createLogStreamResult chan *createLogStreamResult + putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput + putLogEventsResult chan *putLogEventsResult +} + +type createLogStreamResult struct { + successResult *cloudwatchlogs.CreateLogStreamOutput + errorResult error +} + +type putLogEventsResult struct { + successResult *cloudwatchlogs.PutLogEventsOutput + errorResult error +} + +func newMockClient() *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), + createLogStreamResult: make(chan *createLogStreamResult, 1), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), + putLogEventsResult: make(chan *putLogEventsResult, 1), + } +} + +func newMockClientBuffered(buflen int) *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), + createLogStreamResult: make(chan *createLogStreamResult, buflen), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), + putLogEventsResult: make(chan *putLogEventsResult, buflen), + } +} + +func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + m.createLogStreamArgument <- input + output := <-m.createLogStreamResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) + copy(events, input.LogEvents) + m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: input.SequenceToken, + LogGroupName: input.LogGroupName, + LogStreamName: input.LogStreamName, + } + output := <-m.putLogEventsResult + return output.successResult, output.errorResult +} + +type mockmetadataclient struct { + regionResult chan *regionResult +} + +type regionResult struct { + successResult string + errorResult error +} + +func newMockMetadataClient() *mockmetadataclient { + return &mockmetadataclient{ + regionResult: make(chan *regionResult, 1), + } +} + +func (m *mockmetadataclient) Region() (string, error) { + output := <-m.regionResult + return output.successResult, output.errorResult +} diff --git a/vendor/github.com/docker/docker/daemon/logger/context.go b/vendor/github.com/docker/docker/daemon/logger/context.go new file mode 100644 index 0000000..085ab01 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/context.go @@ -0,0 +1,111 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "time" +) + +// Context provides enough information for a logging driver to do its function. +type Context struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { + extra := make(map[string]string) + labels, ok := ctx.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := ctx.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + env, ok := ctx.Config["env"] + if ok && len(env) > 0 { + envMapping := make(map[string]string) + for _, e := range ctx.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + return extra +} + +// Hostname returns the hostname from the underlying OS. +func (ctx *Context) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (ctx *Context) Command() string { + terms := []string{ctx.ContainerEntrypoint} + terms = append(terms, ctx.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (ctx *Context) ID() string { + return ctx.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (ctx *Context) FullID() string { + return ctx.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (ctx *Context) Name() string { + return ctx.ContainerName[1:] +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (ctx *Context) ImageID() string { + return ctx.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (ctx *Context) ImageFullID() string { + return ctx.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (ctx *Context) ImageName() string { + return ctx.ContainerImageName +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go new file mode 100644 index 0000000..10ab46e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -0,0 +1,131 @@ +package logger + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + bufSize = 16 * 1024 + readSize = 2 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + buf := make([]byte, bufSize) + n := 0 + eof := false + msg := &Message{Source: name} + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + for q := bytes.Index(buf[p:n], []byte{'\n'}); q >= 0; q = bytes.Index(buf[p:n], []byte{'\n'}) { + msg.Line = buf[p : p+q] + msg.Timestamp = time.Now().UTC() + msg.Partial = false + select { + case <-c.closed: + return + default: + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg.Line = buf[p:n] + msg.Timestamp = time.Now().UTC() + msg.Partial = true + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier_test.go b/vendor/github.com/docker/docker/daemon/logger/copier_test.go new file mode 100644 index 0000000..cfd816a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier_test.go @@ -0,0 +1,296 @@ +package logger + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" +) + +type TestLoggerJSON struct { + *json.Encoder + mu sync.Mutex + delay time.Duration +} + +func (l *TestLoggerJSON) Log(m *Message) error { + if l.delay > 0 { + time.Sleep(l.delay) + } + l.mu.Lock() + defer l.mu.Unlock() + return l.Encode(m) +} + +func (l *TestLoggerJSON) Close() error { return nil } + +func (l *TestLoggerJSON) Name() string { return "json" } + +func TestCopier(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + stderrLine := "Line that thinks that it is log line from docker stderr" + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { + t.Fatal(err) + } + } + + // Test remaining lines without line-endings + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stdoutLine, stdoutTrailingLine) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stderrLine, stderrTrailingLine) + } + } + } +} + +// TestCopierLongLines tests long lines without line breaks +func TestCopierLongLines(t *testing.T) { + // Long lines (should be split at "bufSize") + const bufSize = 16 * 1024 + stdoutLongLine := strings.Repeat("a", bufSize) + stderrLongLine := strings.Repeat("b", bufSize) + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + + for i := 0; i < 3; i++ { + if _, err := stdout.WriteString(stdoutLongLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLongLine); err != nil { + t.Fatal(err) + } + } + + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLongLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stdoutLongLine' or 'stdoutTrailingLine'", msg.Line) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLongLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stderrLongLine' or 'stderrTrailingLine'", msg.Line) + } + } + } +} + +func TestCopierSlow(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + var stdout bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} + + c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + <-time.After(150 * time.Millisecond) + c.Close() + select { + case <-time.After(200 * time.Millisecond): + t.Fatalf("failed to exit in time after the copier is closed") + case <-wait: + } +} + +type BenchmarkLoggerDummy struct { +} + +func (l *BenchmarkLoggerDummy) Log(m *Message) error { return nil } + +func (l *BenchmarkLoggerDummy) Close() error { return nil } + +func (l *BenchmarkLoggerDummy) Name() string { return "dummy" } + +func BenchmarkCopier64(b *testing.B) { + benchmarkCopier(b, 1<<6) +} +func BenchmarkCopier128(b *testing.B) { + benchmarkCopier(b, 1<<7) +} +func BenchmarkCopier256(b *testing.B) { + benchmarkCopier(b, 1<<8) +} +func BenchmarkCopier512(b *testing.B) { + benchmarkCopier(b, 1<<9) +} +func BenchmarkCopier1K(b *testing.B) { + benchmarkCopier(b, 1<<10) +} +func BenchmarkCopier2K(b *testing.B) { + benchmarkCopier(b, 1<<11) +} +func BenchmarkCopier4K(b *testing.B) { + benchmarkCopier(b, 1<<12) +} +func BenchmarkCopier8K(b *testing.B) { + benchmarkCopier(b, 1<<13) +} +func BenchmarkCopier16K(b *testing.B) { + benchmarkCopier(b, 1<<14) +} +func BenchmarkCopier32K(b *testing.B) { + benchmarkCopier(b, 1<<15) +} +func BenchmarkCopier64K(b *testing.B) { + benchmarkCopier(b, 1<<16) +} +func BenchmarkCopier128K(b *testing.B) { + benchmarkCopier(b, 1<<17) +} +func BenchmarkCopier256K(b *testing.B) { + benchmarkCopier(b, 1<<18) +} + +func piped(b *testing.B, iterations int, delay time.Duration, buf []byte) io.Reader { + r, w, err := os.Pipe() + if err != nil { + b.Fatal(err) + return nil + } + go func() { + for i := 0; i < iterations; i++ { + time.Sleep(delay) + if n, err := w.Write(buf); err != nil || n != len(buf) { + if err != nil { + b.Fatal(err) + } + b.Fatal(fmt.Errorf("short write")) + } + } + w.Close() + }() + return r +} + +func benchmarkCopier(b *testing.B, length int) { + b.StopTimer() + buf := []byte{'A'} + for len(buf) < length { + buf = append(buf, buf...) + } + buf = append(buf[:length-1], []byte{'\n'}...) + b.StartTimer() + for i := 0; i < b.N; i++ { + c := NewCopier( + map[string]io.Reader{ + "buffer": piped(b, 10, time.Nanosecond, buf), + }, + &BenchmarkLoggerDummy{}) + c.Run() + c.Wait() + c.Close() + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go b/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go new file mode 100644 index 0000000..f296d7f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go @@ -0,0 +1,170 @@ +// Package etwlogs provides a log driver for forwarding container logs +// as ETW events.(ETW stands for Event Tracing for Windows) +// A client can then create an ETW listener to listen for events that are sent +// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". +// Here is an example of how to do this using the logman utility: +// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl +// 2. Run container(s) and generate log messages +// 3. logman stop -ets DockerContainerLogs +// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl +// +// Each container log message generates an ETW event that also contains: +// the container name and ID, the timestamp, and the stream type. +package etwlogs + +import ( + "errors" + "fmt" + "sync" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "golang.org/x/sys/windows" +) + +type etwLogs struct { + containerName string + imageName string + containerID string + imageID string +} + +const ( + name = "etwlogs" + win32CallSuccess = 0 +) + +var ( + modAdvapi32 = windows.NewLazySystemDLL("Advapi32.dll") + procEventRegister = modAdvapi32.NewProc("EventRegister") + procEventWriteString = modAdvapi32.NewProc("EventWriteString") + procEventUnregister = modAdvapi32.NewProc("EventUnregister") +) +var providerHandle syscall.Handle +var refCount int +var mu sync.Mutex + +func init() { + providerHandle = syscall.InvalidHandle + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } +} + +// New creates a new etwLogs logger for the given container and registers the EWT provider. +func New(ctx logger.Context) (logger.Logger, error) { + if err := registerETWProvider(); err != nil { + return nil, err + } + logrus.Debugf("logging driver etwLogs configured for container: %s.", ctx.ContainerID) + + return &etwLogs{ + containerName: fixContainerName(ctx.ContainerName), + imageName: ctx.ContainerImageName, + containerID: ctx.ContainerID, + imageID: ctx.ContainerImageID, + }, nil +} + +// Log logs the message to the ETW stream. +func (etwLogger *etwLogs) Log(msg *logger.Message) error { + if providerHandle == syscall.InvalidHandle { + // This should never be hit, if it is, it indicates a programming error. + errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return callEventWriteString(createLogMessage(etwLogger, msg)) +} + +// Close closes the logger by unregistering the ETW provider. +func (etwLogger *etwLogs) Close() error { + unregisterETWProvider() + return nil +} + +func (etwLogger *etwLogs) Name() string { + return name +} + +func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { + return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", + etwLogger.containerName, + etwLogger.imageName, + etwLogger.containerID, + etwLogger.imageID, + msg.Source, + msg.Line) +} + +// fixContainerName removes the initial '/' from the container name. +func fixContainerName(cntName string) string { + if len(cntName) > 0 && cntName[0] == '/' { + cntName = cntName[1:] + } + return cntName +} + +func registerETWProvider() error { + mu.Lock() + defer mu.Unlock() + if refCount == 0 { + var err error + if err = callEventRegister(); err != nil { + return err + } + } + + refCount++ + return nil +} + +func unregisterETWProvider() { + mu.Lock() + defer mu.Unlock() + if refCount == 1 { + if callEventUnregister() { + refCount-- + providerHandle = syscall.InvalidHandle + } + // Not returning an error if EventUnregister fails, because etwLogs will continue to work + } else { + refCount-- + } +} + +func callEventRegister() error { + // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} + guid := syscall.GUID{ + 0xa3693192, 0x9ed6, 0x46d2, + [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, + } + + ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventWriteString(message string) error { + ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(message)))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventUnregister() bool { + ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) + if ret != win32CallSuccess { + return false + } + return true +} diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go new file mode 100644 index 0000000..9cf716b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -0,0 +1,104 @@ +package logger + +import ( + "fmt" + "sync" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Context) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if !ok { + return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + return c, nil +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(cfg) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go b/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go new file mode 100644 index 0000000..a8303cf --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go @@ -0,0 +1,246 @@ +// Package fluentd provides the log driver for forwarding server logs +// to fluentd endpoints. +package fluentd + +import ( + "fmt" + "math" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-units" + "github.com/fluent/fluent-logger-golang/fluent" + "github.com/pkg/errors" +) + +type fluentd struct { + tag string + containerID string + containerName string + writer *fluent.Fluent + extra map[string]string +} + +type location struct { + protocol string + host string + port int + path string +} + +const ( + name = "fluentd" + + defaultProtocol = "tcp" + defaultHost = "127.0.0.1" + defaultPort = 24224 + defaultBufferLimit = 1024 * 1024 + + // logger tries to reconnect 2**32 - 1 times + // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] + defaultRetryWait = 1000 + defaultMaxRetries = math.MaxInt32 + + addressKey = "fluentd-address" + bufferLimitKey = "fluentd-buffer-limit" + retryWaitKey = "fluentd-retry-wait" + maxRetriesKey = "fluentd-max-retries" + asyncConnectKey = "fluentd-async-connect" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a fluentd logger using the configuration passed in on +// the context. The supported context configuration variable is +// fluentd-address. +func New(ctx logger.Context) (logger.Logger, error) { + loc, err := parseAddress(ctx.Config[addressKey]) + if err != nil { + return nil, err + } + + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := ctx.ExtraAttributes(nil) + + bufferLimit := defaultBufferLimit + if ctx.Config[bufferLimitKey] != "" { + bl64, err := units.RAMInBytes(ctx.Config[bufferLimitKey]) + if err != nil { + return nil, err + } + bufferLimit = int(bl64) + } + + retryWait := defaultRetryWait + if ctx.Config[retryWaitKey] != "" { + rwd, err := time.ParseDuration(ctx.Config[retryWaitKey]) + if err != nil { + return nil, err + } + retryWait = int(rwd.Seconds() * 1000) + } + + maxRetries := defaultMaxRetries + if ctx.Config[maxRetriesKey] != "" { + mr64, err := strconv.ParseUint(ctx.Config[maxRetriesKey], 10, strconv.IntSize) + if err != nil { + return nil, err + } + maxRetries = int(mr64) + } + + asyncConnect := false + if ctx.Config[asyncConnectKey] != "" { + if asyncConnect, err = strconv.ParseBool(ctx.Config[asyncConnectKey]); err != nil { + return nil, err + } + } + + fluentConfig := fluent.Config{ + FluentPort: loc.port, + FluentHost: loc.host, + FluentNetwork: loc.protocol, + FluentSocketPath: loc.path, + BufferLimit: bufferLimit, + RetryWait: retryWait, + MaxRetry: maxRetries, + AsyncConnect: asyncConnect, + } + + logrus.WithField("container", ctx.ContainerID).WithField("config", fluentConfig). + Debug("logging driver fluentd configured") + + log, err := fluent.New(fluentConfig) + if err != nil { + return nil, err + } + return &fluentd{ + tag: tag, + containerID: ctx.ContainerID, + containerName: ctx.ContainerName, + writer: log, + extra: extra, + }, nil +} + +func (f *fluentd) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + // fluent-logger-golang buffers logs from failures and disconnections, + // and these are transferred again automatically. + return f.writer.PostWithTime(f.tag, msg.Timestamp, data) +} + +func (f *fluentd) Close() error { + return f.writer.Close() +} + +func (f *fluentd) Name() string { + return name +} + +// ValidateLogOpt looks for fluentd specific log option fluentd-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "tag": + case addressKey: + case bufferLimitKey: + case retryWaitKey: + case maxRetriesKey: + case asyncConnectKey: + // Accepted + default: + return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) + } + } + + if _, err := parseAddress(cfg["fluentd-address"]); err != nil { + return err + } + + return nil +} + +func parseAddress(address string) (*location, error) { + if address == "" { + return &location{ + protocol: defaultProtocol, + host: defaultHost, + port: defaultPort, + path: "", + }, nil + } + + protocol := defaultProtocol + givenAddress := address + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + // unix and unixgram socket + if url.Scheme == "unix" || url.Scheme == "unixgram" { + return &location{ + protocol: url.Scheme, + host: "", + port: 0, + path: url.Path, + }, nil + } + // tcp|udp + protocol = url.Scheme + address = url.Host + } + + host, port, err := net.SplitHostPort(address) + if err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: defaultPort, + path: "", + }, nil + } + + portnum, err := strconv.Atoi(port) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: portnum, + path: "", + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go new file mode 100644 index 0000000..9a8c1c9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go @@ -0,0 +1,200 @@ +package gcplogs + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/docker/docker/daemon/logger" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/cloud/compute/metadata" + "google.golang.org/cloud/logging" +) + +const ( + name = "gcplogs" + + projectOptKey = "gcp-project" + logLabelsKey = "labels" + logEnvKey = "env" + logCmdKey = "gcp-log-cmd" + logZoneKey = "gcp-meta-zone" + logNameKey = "gcp-meta-name" + logIDKey = "gcp-meta-id" +) + +var ( + // The number of logs the gcplogs driver has dropped. + droppedLogs uint64 + + onGCE bool + + // instance metadata populated from the metadata server if available + projectID string + zone string + instanceName string + instanceID string +) + +func init() { + + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + + if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { + logrus.Fatal(err) + } +} + +type gcplogs struct { + client *logging.Client + instance *instanceInfo + container *containerInfo +} + +type dockerLogEntry struct { + Instance *instanceInfo `json:"instance,omitempty"` + Container *containerInfo `json:"container,omitempty"` + Data string `json:"data,omitempty"` +} + +type instanceInfo struct { + Zone string `json:"zone,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type containerInfo struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + ImageName string `json:"imageName,omitempty"` + ImageID string `json:"imageId,omitempty"` + Created time.Time `json:"created,omitempty"` + Command string `json:"command,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +var initGCPOnce sync.Once + +func initGCP() { + initGCPOnce.Do(func() { + onGCE = metadata.OnGCE() + if onGCE { + // These will fail on instances if the metadata service is + // down or the client is compiled with an API version that + // has been removed. Since these are not vital, let's ignore + // them and make their fields in the dockeLogEntry ,omitempty + projectID, _ = metadata.ProjectID() + zone, _ = metadata.Zone() + instanceName, _ = metadata.InstanceName() + instanceID, _ = metadata.InstanceID() + } + }) +} + +// New creates a new logger that logs to Google Cloud Logging using the application +// default credentials. +// +// See https://developers.google.com/identity/protocols/application-default-credentials +func New(ctx logger.Context) (logger.Logger, error) { + initGCP() + + var project string + if projectID != "" { + project = projectID + } + if projectID, found := ctx.Config[projectOptKey]; found { + project = projectID + } + if project == "" { + return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project") + } + + c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver") + if err != nil { + return nil, err + } + + if err := c.Ping(); err != nil { + return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) + } + + l := &gcplogs{ + client: c, + container: &containerInfo{ + Name: ctx.ContainerName, + ID: ctx.ContainerID, + ImageName: ctx.ContainerImageName, + ImageID: ctx.ContainerImageID, + Created: ctx.ContainerCreated, + Metadata: ctx.ExtraAttributes(nil), + }, + } + + if ctx.Config[logCmdKey] == "true" { + l.container.Command = ctx.Command() + } + + if onGCE { + l.instance = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if ctx.Config[logZoneKey] != "" || ctx.Config[logNameKey] != "" || ctx.Config[logIDKey] != "" { + l.instance = &instanceInfo{ + Zone: ctx.Config[logZoneKey], + Name: ctx.Config[logNameKey], + ID: ctx.Config[logIDKey], + } + } + + // The logger "overflows" at a rate of 10,000 logs per second and this + // overflow func is called. We want to surface the error to the user + // without overly spamming /var/log/docker.log so we log the first time + // we overflow and every 1000th time after. + c.Overflow = func(_ *logging.Client, _ logging.Entry) error { + if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { + logrus.Errorf("gcplogs driver has dropped %v logs", i) + } + return nil + } + + return l, nil +} + +// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs +// driver doesn't take any arguments. +func ValidateLogOpts(cfg map[string]string) error { + for k := range cfg { + switch k { + case projectOptKey, logLabelsKey, logEnvKey, logCmdKey, logZoneKey, logNameKey, logIDKey: + default: + return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) + } + } + return nil +} + +func (l *gcplogs) Log(m *logger.Message) error { + return l.client.Log(logging.Entry{ + Time: m.Timestamp, + Payload: &dockerLogEntry{ + Instance: l.instance, + Container: l.container, + Data: string(m.Line), + }, + }) +} + +func (l *gcplogs) Close() error { + return l.client.Flush() +} + +func (l *gcplogs) Name() string { + return name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go new file mode 100644 index 0000000..95860ac --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go @@ -0,0 +1,209 @@ +// +build linux + +// Package gelf provides the log driver for forwarding server logs to +// endpoints that support the Graylog Extended Log Format. +package gelf + +import ( + "bytes" + "compress/flate" + "encoding/json" + "fmt" + "net" + "net/url" + "strconv" + "time" + + "github.com/Graylog2/go-gelf/gelf" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "gelf" + +type gelfLogger struct { + writer *gelf.Writer + ctx logger.Context + hostname string + rawExtra json.RawMessage +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a gelf logger using the configuration passed in on the +// context. The supported context configuration variable is gelf-address. +func New(ctx logger.Context) (logger.Logger, error) { + // parse gelf address + address, err := parseAddress(ctx.Config["gelf-address"]) + if err != nil { + return nil, err + } + + // collect extra data for GELF message + hostname, err := ctx.Hostname() + if err != nil { + return nil, fmt.Errorf("gelf: cannot access hostname to set source field") + } + + // remove trailing slash from container name + containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") + + // parse log tag + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := map[string]interface{}{ + "_container_id": ctx.ContainerID, + "_container_name": string(containerName), + "_image_id": ctx.ContainerImageID, + "_image_name": ctx.ContainerImageName, + "_command": ctx.Command(), + "_tag": tag, + "_created": ctx.ContainerCreated, + } + + extraAttrs := ctx.ExtraAttributes(func(key string) string { + if key[0] == '_' { + return key + } + return "_" + key + }) + for k, v := range extraAttrs { + extra[k] = v + } + + rawExtra, err := json.Marshal(extra) + if err != nil { + return nil, err + } + + // create new gelfWriter + gelfWriter, err := gelf.NewWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + if v, ok := ctx.Config["gelf-compression-type"]; ok { + switch v { + case "gzip": + gelfWriter.CompressionType = gelf.CompressGzip + case "zlib": + gelfWriter.CompressionType = gelf.CompressZlib + case "none": + gelfWriter.CompressionType = gelf.CompressNone + default: + return nil, fmt.Errorf("gelf: invalid compression type %q", v) + } + } + + if v, ok := ctx.Config["gelf-compression-level"]; ok { + val, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) + } + gelfWriter.CompressionLevel = val + } + + return &gelfLogger{ + writer: gelfWriter, + ctx: ctx, + hostname: hostname, + rawExtra: rawExtra, + }, nil +} + +func (s *gelfLogger) Log(msg *logger.Message) error { + level := gelf.LOG_INFO + if msg.Source == "stderr" { + level = gelf.LOG_ERR + } + + m := gelf.Message{ + Version: "1.1", + Host: s.hostname, + Short: string(msg.Line), + TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, + Level: level, + RawExtra: s.rawExtra, + } + + if err := s.writer.WriteMessage(&m); err != nil { + return fmt.Errorf("gelf: cannot send GELF message: %v", err) + } + return nil +} + +func (s *gelfLogger) Close() error { + return s.writer.Close() +} + +func (s *gelfLogger) Name() string { + return name +} + +// ValidateLogOpt looks for gelf specific log option gelf-address. +func ValidateLogOpt(cfg map[string]string) error { + for key, val := range cfg { + switch key { + case "gelf-address": + case "tag": + case "labels": + case "env": + case "gelf-compression-level": + i, err := strconv.Atoi(val) + if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + case "gelf-compression-type": + switch val { + case "gzip", "zlib", "none": + default: + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + default: + return fmt.Errorf("unknown log opt %q for gelf log driver", key) + } + } + + if _, err := parseAddress(cfg["gelf-address"]); err != nil { + return err + } + + return nil +} + +func parseAddress(address string) (string, error) { + if address == "" { + return "", nil + } + if !urlutil.IsTransportURL(address) { + return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", err + } + + // we support only udp + if url.Scheme != "udp" { + return "", fmt.Errorf("gelf: endpoint needs to be UDP") + } + + // get host and port + if _, _, err = net.SplitHostPort(url.Host); err != nil { + return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + } + + return url.Host, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go new file mode 100644 index 0000000..266f73b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package gelf diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald.go new file mode 100644 index 0000000..9569859 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald.go @@ -0,0 +1,122 @@ +// +build linux + +// Package journald provides the log driver for forwarding server logs +// to endpoints that receive the systemd format. +package journald + +import ( + "fmt" + "sync" + "unicode" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" +) + +const name = "journald" + +type journald struct { + vars map[string]string // additional variables and values to send to the journal along with the log message + readers readerList +} + +type readerList struct { + mu sync.Mutex + readers map[*logger.LogWatcher]*logger.LogWatcher +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// sanitizeKeyMode returns the sanitized string so that it could be used in journald. +// In journald log, there are special requirements for fields. +// Fields must be composed of uppercase letters, numbers, and underscores, but must +// not start with an underscore. +func sanitizeKeyMod(s string) string { + n := "" + for _, v := range s { + if 'a' <= v && v <= 'z' { + v = unicode.ToUpper(v) + } else if ('Z' < v || v < 'A') && ('9' < v || v < '0') { + v = '_' + } + // If (n == "" && v == '_'), then we will skip as this is the beginning with '_' + if !(n == "" && v == '_') { + n += string(v) + } + } + return n +} + +// New creates a journald logger using the configuration passed in on +// the context. +func New(ctx logger.Context) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + // Strip a leading slash so that people can search for + // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. + name := ctx.ContainerName + if name[0] == '/' { + name = name[1:] + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + vars := map[string]string{ + "CONTAINER_ID": ctx.ContainerID[:12], + "CONTAINER_ID_FULL": ctx.ContainerID, + "CONTAINER_NAME": name, + "CONTAINER_TAG": tag, + } + extraAttrs := ctx.ExtraAttributes(sanitizeKeyMod) + for k, v := range extraAttrs { + vars[k] = v + } + return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil +} + +// We don't actually accept any options, but we have to supply a callback for +// the factory to pass the (probably empty) configuration map to. +func validateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "labels": + case "env": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for journald log driver", key) + } + } + return nil +} + +func (s *journald) Log(msg *logger.Message) error { + vars := map[string]string{} + for k, v := range s.vars { + vars[k] = v + } + if msg.Partial { + vars["CONTAINER_PARTIAL_MESSAGE"] = "true" + } + if msg.Source == "stderr" { + return journal.Send(string(msg.Line), journal.PriErr, vars) + } + return journal.Send(string(msg.Line), journal.PriInfo, vars) +} + +func (s *journald) Name() string { + return name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go new file mode 100644 index 0000000..224423f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go @@ -0,0 +1,23 @@ +// +build linux + +package journald + +import ( + "testing" +) + +func TestSanitizeKeyMod(t *testing.T) { + entries := map[string]string{ + "io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io?.kubernetes.pod.name": "IO__KUBERNETES_POD_NAME", + "?io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "_io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "__io123_kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + } + for k, v := range entries { + if sanitizeKeyMod(k) != v { + t.Fatalf("Failed to sanitize %s, got %s, expected %s", k, sanitizeKeyMod(k), v) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go new file mode 100644 index 0000000..d52ca92 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package journald + +type journald struct { +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read.go b/vendor/github.com/docker/docker/daemon/logger/journald/read.go new file mode 100644 index 0000000..d91eb80 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read.go @@ -0,0 +1,401 @@ +// +build linux,cgo,!static_build,journald + +package journald + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// +//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial) +//{ +// int rc; +// size_t plength; +// *msg = NULL; +// *length = 0; +// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true"); +// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length); +// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0)); +// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); +// if (rc == 0) { +// if (*length > 8) { +// (*msg) += 8; +// *length -= 8; +// } else { +// *msg = NULL; +// *length = 0; +// rc = -ENOENT; +// } +// } +// return rc; +//} +//static int get_priority(sd_journal *j, int *priority) +//{ +// const void *data; +// size_t i, length; +// int rc; +// *priority = -1; +// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); +// if (rc == 0) { +// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { +// *priority = 0; +// for (i = 9; i < length; i++) { +// *priority = *priority * 10 + ((const char *)data)[i] - '0'; +// } +// if (length > 9) { +// rc = 0; +// } +// } +// } +// return rc; +//} +//static int is_attribute_field(const char *msg, size_t length) +//{ +// static const struct known_field { +// const char *name; +// size_t length; +// } fields[] = { +// {"MESSAGE", sizeof("MESSAGE") - 1}, +// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, +// {"PRIORITY", sizeof("PRIORITY") - 1}, +// {"CODE_FILE", sizeof("CODE_FILE") - 1}, +// {"CODE_LINE", sizeof("CODE_LINE") - 1}, +// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, +// {"ERRNO", sizeof("ERRNO") - 1}, +// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, +// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, +// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, +// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, +// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, +// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, +// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, +// }; +// unsigned int i; +// void *p; +// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { +// return -1; +// } +// length = ((const char *) p) - msg; +// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { +// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { +// return -1; +// } +// } +// return 0; +//} +//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) +//{ +// int rc; +// *msg = NULL; +// *length = 0; +// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { +// if (is_attribute_field(*msg, *length) == 0) { +// break; +// } +// rc = -ENOENT; +// } +// return rc; +//} +//static int wait_for_data_cancelable(sd_journal *j, int pipefd) +//{ +// struct pollfd fds[2]; +// uint64_t when = 0; +// int timeout, jevents, i; +// struct timespec ts; +// uint64_t now; +// +// memset(&fds, 0, sizeof(fds)); +// fds[0].fd = pipefd; +// fds[0].events = POLLHUP; +// fds[1].fd = sd_journal_get_fd(j); +// if (fds[1].fd < 0) { +// return fds[1].fd; +// } +// +// do { +// jevents = sd_journal_get_events(j); +// if (jevents < 0) { +// return jevents; +// } +// fds[1].events = jevents; +// sd_journal_get_timeout(j, &when); +// if (when == -1) { +// timeout = -1; +// } else { +// clock_gettime(CLOCK_MONOTONIC, &ts); +// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; +// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; +// } +// i = poll(fds, 2, timeout); +// if ((i == -1) && (errno != EINTR)) { +// /* An unexpected error. */ +// return (errno != 0) ? -errno : -EINTR; +// } +// if (fds[0].revents & POLLHUP) { +// /* The close notification pipe was closed. */ +// return 0; +// } +// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { +// /* Data, which we might care about, was appended. */ +// return 1; +// } +// } while ((fds[0].revents & POLLHUP) == 0); +// return 0; +//} +import "C" + +import ( + "fmt" + "strings" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" +) + +func (s *journald) Close() error { + s.readers.mu.Lock() + for reader := range s.readers.readers { + reader.Close() + } + s.readers.mu.Unlock() + return nil +} + +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char { + var msg, data, cursor *C.char + var length C.size_t + var stamp C.uint64_t + var priority, partial C.int + + // Walk the journal from here forward until we run out of new entries. +drain: + for { + // Try not to send a given entry twice. + if oldCursor != nil { + for C.sd_journal_test_cursor(j, oldCursor) > 0 { + if C.sd_journal_next(j) <= 0 { + break drain + } + } + } + // Read and send the logged message, if there is one to read. + i := C.get_message(j, &msg, &length, &partial) + if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { + // Read the entry's timestamp. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } + // Set up the time and text of the entry. + timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) + line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) + if partial == 0 { + line = append(line, "\n"...) + } + // Recover the stream name by mapping + // from the journal priority back to + // the stream that we would have + // assigned that value. + source := "" + if C.get_priority(j, &priority) != 0 { + source = "" + } else if priority == C.int(journal.PriErr) { + source = "stderr" + } else if priority == C.int(journal.PriInfo) { + source = "stdout" + } + // Retrieve the values of any variables we're adding to the journal. + attrs := make(map[string]string) + C.sd_journal_restart_data(j) + for C.get_attribute_field(j, &data, &length) > C.int(0) { + kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) + attrs[kv[0]] = kv[1] + } + if len(attrs) == 0 { + attrs = nil + } + // Send the log message. + logWatcher.Msg <- &logger.Message{ + Line: line, + Source: source, + Timestamp: timestamp.In(time.UTC), + Attrs: attrs, + } + } + // If we're at the end of the journal, we're done (for now). + if C.sd_journal_next(j) <= 0 { + break + } + } + + // free(NULL) is safe + C.free(unsafe.Pointer(oldCursor)) + C.sd_journal_get_cursor(j, &cursor) + return cursor +} + +func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { + s.readers.mu.Lock() + s.readers.readers[logWatcher] = logWatcher + s.readers.mu.Unlock() + go func() { + // Keep copying journal data out until we're notified to stop + // or we hit an error. + status := C.wait_for_data_cancelable(j, pfd[0]) + for status == 1 { + cursor = s.drainJournal(logWatcher, config, j, cursor) + status = C.wait_for_data_cancelable(j, pfd[0]) + } + if status < 0 { + cerrstr := C.strerror(C.int(-status)) + errstr := C.GoString(cerrstr) + fmtstr := "error %q while attempting to follow journal for container %q" + logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + } + // Clean up. + C.close(pfd[0]) + s.readers.mu.Lock() + delete(s.readers.readers, logWatcher) + s.readers.mu.Unlock() + C.sd_journal_close(j) + close(logWatcher.Msg) + }() + // Wait until we're told to stop. + select { + case <-logWatcher.WatchClose(): + // Notify the other goroutine that its work is done. + C.close(pfd[1]) + } + + return cursor +} + +func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + var j *C.sd_journal + var cmatch, cursor *C.char + var stamp C.uint64_t + var sinceUnixMicro uint64 + var pipes [2]C.int + + // Get a handle to the journal. + rc := C.sd_journal_open(&j, C.int(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error opening journal") + close(logWatcher.Msg) + return + } + // If we end up following the log, we can set the journal context + // pointer and the channel pointer to nil so that we won't close them + // here, potentially while the goroutine that uses them is still + // running. Otherwise, close them when we return from this function. + following := false + defer func(pfollowing *bool) { + if !*pfollowing { + C.sd_journal_close(j) + close(logWatcher.Msg) + } + }(&following) + // Remove limits on the size of data items that we'll retrieve. + rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal data threshold") + return + } + // Add a match to have the library do the searching for us. + cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) + defer C.free(unsafe.Pointer(cmatch)) + rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal match") + return + } + // If we have a cutoff time, convert it to Unix time once. + if !config.Since.IsZero() { + nano := config.Since.UnixNano() + sinceUnixMicro = uint64(nano / 1000) + } + if config.Tail > 0 { + lines := config.Tail + // Start at the end of the journal. + if C.sd_journal_seek_tail(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to end of journal") + return + } + if C.sd_journal_previous(j) < 0 { + logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") + return + } + // Walk backward. + for lines > 0 { + // Stop if the entry time is before our cutoff. + // We'll need the entry time if it isn't, so go + // ahead and parse it now. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } else { + // Compare the timestamp on the entry + // to our threshold value. + if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { + break + } + } + lines-- + // If we're at the start of the journal, or + // don't need to back up past any more entries, + // stop. + if lines == 0 || C.sd_journal_previous(j) <= 0 { + break + } + } + } else { + // Start at the beginning of the journal. + if C.sd_journal_seek_head(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start of journal") + return + } + // If we have a cutoff date, fast-forward to it. + if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") + return + } + if C.sd_journal_next(j) < 0 { + logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") + return + } + } + cursor = s.drainJournal(logWatcher, config, j, nil) + if config.Follow { + // Allocate a descriptor for following the journal, if we'll + // need one. Do it here so that we can report if it fails. + if fd := C.sd_journal_get_fd(j); fd < C.int(0) { + logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) + } else { + // Create a pipe that we can poll at the same time as + // the journald descriptor. + if C.pipe(&pipes[0]) == C.int(-1) { + logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") + } else { + cursor = s.followJournal(logWatcher, config, j, pipes, cursor) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true + } + } + } + + C.free(unsafe.Pointer(cursor)) + return +} + +func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + go s.readLogs(logWatcher, config) + return logWatcher +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go new file mode 100644 index 0000000..bba6de5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,!journald_compat + +package journald + +// #cgo pkg-config: libsystemd +import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go new file mode 100644 index 0000000..3f7a43c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,journald_compat + +package journald + +// #cgo pkg-config: libsystemd-journal +import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go new file mode 100644 index 0000000..b43abdc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux !cgo static_build !journald + +package journald + +func (s *journald) Close() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 0000000..a429a08 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,151 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(ctx logger.Context) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := ctx.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := ctx.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + logline := msg.Line + if !msg.Partial { + logline = append(msg.Line, '\n') + } + err = (&jsonlog.JSONLogs{ + Log: logline, + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go new file mode 100644 index 0000000..b5b818a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -0,0 +1,248 @@ +package jsonfilelog + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/jsonlog" +) + +func TestJSONFileLogger(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } +} + +func BenchmarkJSONFileLogger(b *testing.B) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + + testLine := "Line that thinks that it is log line from docker\n" + msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } + } +} + +func TestJSONFileLoggerWithOpts(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"max-file": "2", "max-size": "1k"} + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + for i := 0; i < 20; i++ { + if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { + t.Fatal(err) + } + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + t.Fatal(err) + } + + expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } + if string(penUlt) != expectedPenultimate { + t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) + } + +} + +func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"} + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + Config: config, + ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, + ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"}, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var jsonLog jsonlog.JSONLogs + if err := json.Unmarshal(res, &jsonLog); err != nil { + t.Fatal(err) + } + extra := make(map[string]string) + if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { + t.Fatal(err) + } + expected := map[string]string{ + "rack": "101", + "dc": "lhr", + "environ": "production", + "debug": "false", + "ssl": "true", + } + if !reflect.DeepEqual(extra, expected) { + t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) + } +} + +func BenchmarkJSONFileLoggerWithReader(b *testing.B) { + b.StopTimer() + b.ResetTimer() + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + dir, err := ioutil.TempDir("", "json-logger-bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) + + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filepath.Join(dir, "container.log"), + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + msg := &logger.Message{Line: []byte("line"), Source: "src1"} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + + b.StartTimer() + + go func() { + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + l.Log(msg) + } + } + l.Close() + }() + + lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) + watchClose := lw.WatchClose() + for { + select { + case <-lw.Msg: + case <-watchClose: + return + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go new file mode 100644 index 0000000..f2f9df1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,319 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/fsnotify/fsnotify" + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: l.Attrs, + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read + // This will block writes!!! + l.mu.Lock() + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + defer f.Close() + + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + l.mu.Unlock() + return + } + defer latestFile.Close() + + if config.Tail != 0 { + tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow { + if err := latestFile.Close(); err != nil { + logrus.Errorf("Error closing file: %v", err) + } + l.mu.Unlock() + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + notifyRotate := l.writer.NotifyRotate() + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() + + l.writer.NotifyRotateEvict(notifyRotate) +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader + rdr = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logrus.Debugf("error watching log file for modifications: %v", err) + return nil, err + } + } + return fileWatcher, nil +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + dec = json.NewDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + dec = json.NewDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if err == io.EOF { + for err := waitRead(); err != nil; { + if err == errRetry { + // retry the waitRead + continue + } + return err + } + return nil + } + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + return nil + } + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + return nil + } + return err + } + + // main loop + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go b/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go new file mode 100644 index 0000000..e794b1e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go @@ -0,0 +1,94 @@ +// Package logentries provides the log driver for forwarding server logs +// to logentries endpoints. +package logentries + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/bsphere/le_go" + "github.com/docker/docker/daemon/logger" +) + +type logentries struct { + tag string + containerID string + containerName string + writer *le_go.Logger + extra map[string]string +} + +const ( + name = "logentries" + token = "logentries-token" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a logentries logger using the configuration passed in on +// the context. The supported context configuration variable is +// logentries-token. +func New(ctx logger.Context) (logger.Logger, error) { + logrus.WithField("container", ctx.ContainerID). + WithField("token", ctx.Config[token]). + Debug("logging driver logentries configured") + + log, err := le_go.Connect(ctx.Config[token]) + if err != nil { + return nil, err + } + return &logentries{ + containerID: ctx.ContainerID, + containerName: ctx.ContainerName, + writer: log, + }, nil +} + +func (f *logentries) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + f.writer.Println(f.tag, msg.Timestamp, data) + return nil +} + +func (f *logentries) Close() error { + return f.writer.Close() +} + +func (f *logentries) Name() string { + return name +} + +// ValidateLogOpt looks for logentries specific log option logentries-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "tag": + case key: + default: + return fmt.Errorf("unknown log opt '%s' for logentries log driver", key) + } + } + + if cfg[token] == "" { + return fmt.Errorf("Missing logentries token") + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go new file mode 100644 index 0000000..d091997 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -0,0 +1,134 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +// Message is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +type Message struct { + Line []byte + Source string + Timestamp time.Time + Attrs LogAttributes + Partial bool +} + +// CopyMessage creates a copy of the passed-in Message which will remain +// unchanged if the original is changed. Log drivers which buffer Messages +// rather than dispatching them during their Log() method should use this +// function to obtain a Message whose Line member's contents won't change. +func CopyMessage(msg *Message) *Message { + m := new(Message) + m.Line = make([]byte, len(msg.Line)) + copy(m.Line, msg.Line) + m.Source = msg.Source + m.Timestamp = msg.Timestamp + m.Partial = msg.Partial + m.Attrs = make(LogAttributes) + for k, v := range msg.Attrs { + m.Attrs[k] = v + } + return m +} + +// LogAttributes is used to hold the extra attributes available in the log message +// Primarily used for converting the map type to string and sorting. +type LogAttributes map[string]string +type byKey []string + +func (s byKey) Len() int { return len(s) } +func (s byKey) Less(i, j int) bool { + keyI := strings.Split(s[i], "=") + keyJ := strings.Split(s[j], "=") + return keyI[0] < keyJ[0] +} +func (s byKey) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (a LogAttributes) String() string { + var ss byKey + for k, v := range a { + ss = append(ss, k+"="+v) + } + sort.Sort(ss) + return strings.Join(ss, ",") +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeOnce sync.Once + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger_test.go b/vendor/github.com/docker/docker/daemon/logger/logger_test.go new file mode 100644 index 0000000..16e1514 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger_test.go @@ -0,0 +1,26 @@ +package logger + +import ( + "reflect" + "testing" + "time" +) + +func TestCopyMessage(t *testing.T) { + msg := &Message{ + Line: []byte("test line."), + Source: "stdout", + Timestamp: time.Now(), + Attrs: LogAttributes{ + "key1": "val1", + "key2": "val2", + "key3": "val3", + }, + Partial: true, + } + + m := CopyMessage(msg) + if !reflect.DeepEqual(m, msg) { + t.Fatalf("CopyMessage failed to copy message") + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 0000000..4752679 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/utils/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { + tagTemplate := ctx.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &ctx); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go new file mode 100644 index 0000000..e2aa435 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go @@ -0,0 +1,47 @@ +package loggerutils + +import ( + "testing" + + "github.com/docker/docker/daemon/logger" +) + +func TestParseLogTagDefaultTag(t *testing.T) { + ctx := buildContext(map[string]string{}) + tag, e := ParseLogTag(ctx, "{{.ID}}") + assertTag(t, e, tag, ctx.ID()) +} + +func TestParseLogTag(t *testing.T) { + ctx := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) + tag, e := ParseLogTag(ctx, "{{.ID}}") + assertTag(t, e, tag, "test-image/test-container/container-ab") +} + +func TestParseLogTagEmptyTag(t *testing.T) { + ctx := buildContext(map[string]string{}) + tag, e := ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") + assertTag(t, e, tag, "test-dockerd/container-ab") +} + +// Helpers + +func buildContext(cfg map[string]string) logger.Context { + return logger.Context{ + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerName: "/test-container", + ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerImageName: "test-image", + Config: cfg, + DaemonName: "test-dockerd", + } +} + +func assertTag(t *testing.T, e error, tag string, expected string) { + if e != nil { + t.Fatalf("Error generating tag: %q", e) + } + if tag != expected { + t.Fatalf("Wrong tag: %q, should be %q", tag, expected) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 0000000..99e0964 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,124 @@ +package loggerutils + +import ( + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + return w.f.Close() +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go new file mode 100644 index 0000000..f858326 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go @@ -0,0 +1,621 @@ +// Package splunk provides the log driver for forwarding server logs to +// Splunk HTTP Event Collector endpoint. +package splunk + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const ( + driverName = "splunk" + splunkURLKey = "splunk-url" + splunkTokenKey = "splunk-token" + splunkSourceKey = "splunk-source" + splunkSourceTypeKey = "splunk-sourcetype" + splunkIndexKey = "splunk-index" + splunkCAPathKey = "splunk-capath" + splunkCANameKey = "splunk-caname" + splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" + splunkFormatKey = "splunk-format" + splunkVerifyConnectionKey = "splunk-verify-connection" + splunkGzipCompressionKey = "splunk-gzip" + splunkGzipCompressionLevelKey = "splunk-gzip-level" + envKey = "env" + labelsKey = "labels" + tagKey = "tag" +) + +const ( + // How often do we send messages (if we are not reaching batch size) + defaultPostMessagesFrequency = 5 * time.Second + // How big can be batch of messages + defaultPostMessagesBatchSize = 1000 + // Maximum number of messages we can store in buffer + defaultBufferMaximum = 10 * defaultPostMessagesBatchSize + // Number of messages allowed to be queued in the channel + defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize +) + +const ( + envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY" + envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE" + envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX" + envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" +) + +type splunkLoggerInterface interface { + logger.Logger + worker() +} + +type splunkLogger struct { + client *http.Client + transport *http.Transport + + url string + auth string + nullMessage *splunkMessage + + // http compression + gzipCompression bool + gzipCompressionLevel int + + // Advanced options + postMessagesFrequency time.Duration + postMessagesBatchSize int + bufferMaximum int + + // For synchronization between background worker and logger. + // We use channel to send messages to worker go routine. + // All other variables for blocking Close call before we flush all messages to HEC + stream chan *splunkMessage + lock sync.RWMutex + closed bool + closedCond *sync.Cond +} + +type splunkLoggerInline struct { + *splunkLogger + + nullEvent *splunkMessageEvent +} + +type splunkLoggerJSON struct { + *splunkLoggerInline +} + +type splunkLoggerRaw struct { + *splunkLogger + + prefix []byte +} + +type splunkMessage struct { + Event interface{} `json:"event"` + Time string `json:"time"` + Host string `json:"host"` + Source string `json:"source,omitempty"` + SourceType string `json:"sourcetype,omitempty"` + Index string `json:"index,omitempty"` +} + +type splunkMessageEvent struct { + Line interface{} `json:"line"` + Source string `json:"source"` + Tag string `json:"tag,omitempty"` + Attrs map[string]string `json:"attrs,omitempty"` +} + +const ( + splunkFormatRaw = "raw" + splunkFormatJSON = "json" + splunkFormatInline = "inline" +) + +func init() { + if err := logger.RegisterLogDriver(driverName, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates splunk logger driver using configuration passed in context +func New(ctx logger.Context) (logger.Logger, error) { + hostname, err := ctx.Hostname() + if err != nil { + return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) + } + + // Parse and validate Splunk URL + splunkURL, err := parseURL(ctx) + if err != nil { + return nil, err + } + + // Splunk Token is required parameter + splunkToken, ok := ctx.Config[splunkTokenKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) + } + + tlsConfig := &tls.Config{} + + // Splunk is using autogenerated certificates by default, + // allow users to trust them with skipping verification + if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { + insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + tlsConfig.InsecureSkipVerify = insecureSkipVerify + } + + // If path to the root certificate is provided - load it + if caPath, ok := ctx.Config[splunkCAPathKey]; ok { + caCert, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, err + } + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + } + + if caName, ok := ctx.Config[splunkCANameKey]; ok { + tlsConfig.ServerName = caName + } + + gzipCompression := false + if gzipCompressionStr, ok := ctx.Config[splunkGzipCompressionKey]; ok { + gzipCompression, err = strconv.ParseBool(gzipCompressionStr) + if err != nil { + return nil, err + } + } + + gzipCompressionLevel := gzip.DefaultCompression + if gzipCompressionLevelStr, ok := ctx.Config[splunkGzipCompressionLevelKey]; ok { + var err error + gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) + if err != nil { + return nil, err + } + gzipCompressionLevel = int(gzipCompressionLevel64) + if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { + err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", + gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) + return nil, err + } + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + client := &http.Client{ + Transport: transport, + } + + source := ctx.Config[splunkSourceKey] + sourceType := ctx.Config[splunkSourceTypeKey] + index := ctx.Config[splunkIndexKey] + + var nullMessage = &splunkMessage{ + Host: hostname, + Source: source, + SourceType: sourceType, + Index: index, + } + + // Allow user to remove tag from the messages by setting tag to empty string + tag := "" + if tagTemplate, ok := ctx.Config[tagKey]; !ok || tagTemplate != "" { + tag, err = loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + } + + attrs := ctx.ExtraAttributes(nil) + + var ( + postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) + postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) + bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) + streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) + ) + + logger := &splunkLogger{ + client: client, + transport: transport, + url: splunkURL.String(), + auth: "Splunk " + splunkToken, + nullMessage: nullMessage, + gzipCompression: gzipCompression, + gzipCompressionLevel: gzipCompressionLevel, + stream: make(chan *splunkMessage, streamChannelSize), + postMessagesFrequency: postMessagesFrequency, + postMessagesBatchSize: postMessagesBatchSize, + bufferMaximum: bufferMaximum, + } + + // By default we verify connection, but we allow use to skip that + verifyConnection := true + if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok { + var err error + verifyConnection, err = strconv.ParseBool(verifyConnectionStr) + if err != nil { + return nil, err + } + } + if verifyConnection { + err = verifySplunkConnection(logger) + if err != nil { + return nil, err + } + } + + var splunkFormat string + if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok { + switch splunkFormatParsed { + case splunkFormatInline: + case splunkFormatJSON: + case splunkFormatRaw: + default: + return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) + } + splunkFormat = splunkFormatParsed + } else { + splunkFormat = splunkFormatInline + } + + var loggerWrapper splunkLoggerInterface + + switch splunkFormat { + case splunkFormatInline: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerInline{logger, nullEvent} + case splunkFormatJSON: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} + case splunkFormatRaw: + var prefix bytes.Buffer + if tag != "" { + prefix.WriteString(tag) + prefix.WriteString(" ") + } + for key, value := range attrs { + prefix.WriteString(key) + prefix.WriteString("=") + prefix.WriteString(value) + prefix.WriteString(" ") + } + + loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} + default: + return nil, fmt.Errorf("Unexpected format %s", splunkFormat) + } + + go loggerWrapper.worker() + + return loggerWrapper, nil +} + +func (l *splunkLoggerInline) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + event := *l.nullEvent + event.Line = string(msg.Line) + event.Source = msg.Source + + message.Event = &event + + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerJSON) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + event := *l.nullEvent + + var rawJSONMessage json.RawMessage + if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil { + event.Line = &rawJSONMessage + } else { + event.Line = string(msg.Line) + } + + event.Source = msg.Source + + message.Event = &event + + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + message.Event = string(append(l.prefix, msg.Line...)) + + return l.queueMessageAsync(message) +} + +func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error { + l.lock.RLock() + defer l.lock.RUnlock() + if l.closedCond != nil { + return fmt.Errorf("%s: driver is closed", driverName) + } + l.stream <- message + return nil +} + +func (l *splunkLogger) worker() { + timer := time.NewTicker(l.postMessagesFrequency) + var messages []*splunkMessage + for { + select { + case message, open := <-l.stream: + if !open { + l.postMessages(messages, true) + l.lock.Lock() + defer l.lock.Unlock() + l.transport.CloseIdleConnections() + l.closed = true + l.closedCond.Signal() + return + } + messages = append(messages, message) + // Only sending when we get exactly to the batch size, + // This also helps not to fire postMessages on every new message, + // when previous try failed. + if len(messages)%l.postMessagesBatchSize == 0 { + messages = l.postMessages(messages, false) + } + case <-timer.C: + messages = l.postMessages(messages, false) + } + } +} + +func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { + messagesLen := len(messages) + for i := 0; i < messagesLen; i += l.postMessagesBatchSize { + upperBound := i + l.postMessagesBatchSize + if upperBound > messagesLen { + upperBound = messagesLen + } + if err := l.tryPostMessages(messages[i:upperBound]); err != nil { + logrus.Error(err) + if messagesLen-i >= l.bufferMaximum || lastChance { + // If this is last chance - print them all to the daemon log + if lastChance { + upperBound = messagesLen + } + // Not all sent, but buffer has got to its maximum, let's log all messages + // we could not send and return buffer minus one batch size + for j := i; j < upperBound; j++ { + if jsonEvent, err := json.Marshal(messages[j]); err != nil { + logrus.Error(err) + } else { + logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) + } + } + return messages[upperBound:messagesLen] + } + // Not all sent, returning buffer from where we have not sent messages + return messages[i:messagesLen] + } + } + // All sent, return empty buffer + return messages[:0] +} + +func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { + if len(messages) == 0 { + return nil + } + var buffer bytes.Buffer + var writer io.Writer + var gzipWriter *gzip.Writer + var err error + // If gzip compression is enabled - create gzip writer with specified compression + // level. If gzip compression is disabled, use standard buffer as a writer + if l.gzipCompression { + gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) + if err != nil { + return err + } + writer = gzipWriter + } else { + writer = &buffer + } + for _, message := range messages { + jsonEvent, err := json.Marshal(message) + if err != nil { + return err + } + if _, err := writer.Write(jsonEvent); err != nil { + return err + } + } + // If gzip compression is enabled, tell it, that we are done + if l.gzipCompression { + err = gzipWriter.Close() + if err != nil { + return err + } + } + req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) + if err != nil { + return err + } + req.Header.Set("Authorization", l.auth) + // Tell if we are sending gzip compressed body + if l.gzipCompression { + req.Header.Set("Content-Encoding", "gzip") + } + res, err := l.client.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) + } + io.Copy(ioutil.Discard, res.Body) + return nil +} + +func (l *splunkLogger) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if l.closedCond == nil { + l.closedCond = sync.NewCond(&l.lock) + close(l.stream) + for !l.closed { + l.closedCond.Wait() + } + } + return nil +} + +func (l *splunkLogger) Name() string { + return driverName +} + +func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage { + message := *l.nullMessage + message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second)) + return &message +} + +// ValidateLogOpt looks for all supported by splunk driver options +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case splunkURLKey: + case splunkTokenKey: + case splunkSourceKey: + case splunkSourceTypeKey: + case splunkIndexKey: + case splunkCAPathKey: + case splunkCANameKey: + case splunkInsecureSkipVerifyKey: + case splunkFormatKey: + case splunkVerifyConnectionKey: + case splunkGzipCompressionKey: + case splunkGzipCompressionLevelKey: + case envKey: + case labelsKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) + } + } + return nil +} + +func parseURL(ctx logger.Context) (*url.URL, error) { + splunkURLStr, ok := ctx.Config[splunkURLKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) + } + + splunkURL, err := url.Parse(splunkURLStr) + if err != nil { + return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) + } + + if !urlutil.IsURL(splunkURLStr) || + !splunkURL.IsAbs() || + (splunkURL.Path != "" && splunkURL.Path != "/") || + splunkURL.RawQuery != "" || + splunkURL.Fragment != "" { + return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) + } + + splunkURL.Path = "/services/collector/event/1.0" + + return splunkURL, nil +} + +func verifySplunkConnection(l *splunkLogger) error { + req, err := http.NewRequest(http.MethodOptions, l.url, nil) + if err != nil { + return err + } + res, err := l.client.Do(req) + if err != nil { + return err + } + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) + } + return nil +} + +func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := time.ParseDuration(valueStr) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) + return defaultValue + } + return parsedValue +} + +func getAdvancedOptionInt(envName string, defaultValue int) int { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := strconv.ParseInt(valueStr, 10, 32) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) + return defaultValue + } + return int(parsedValue) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go new file mode 100644 index 0000000..df74cba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go @@ -0,0 +1,1302 @@ +package splunk + +import ( + "compress/gzip" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" +) + +// Validate options +func TestValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + splunkURLKey: "http://127.0.0.1", + splunkTokenKey: "2160C7EF-2CE9-4307-A180-F852B99CF417", + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkCAPathKey: "/usr/cert.pem", + splunkCANameKey: "ca_name", + splunkInsecureSkipVerifyKey: "true", + splunkFormatKey: "json", + splunkVerifyConnectionKey: "true", + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + envKey: "a", + labelsKey: "b", + tagKey: "c", + }) + if err != nil { + t.Fatal(err) + } + + err = ValidateLogOpt(map[string]string{ + "not-supported-option": "a", + }) + if err == nil { + t.Fatal("Expecting error on unsupported options") + } +} + +// Driver require user to specify required options +func TestNewMissedConfig(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{}, + } + _, err := New(ctx) + if err == nil { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-url +func TestNewMissedUrl(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + splunkTokenKey: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + }, + } + _, err := New(ctx) + if err.Error() != "splunk: splunk-url is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-token +func TestNewMissedToken(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: "http://127.0.0.1:8088", + }, + } + _, err := New(ctx) + if err.Error() != "splunk: splunk-token is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Test default settings +func TestDefault(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if loggerDriver.Name() != driverName { + t.Fatal("Unexpected logger driver name") + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Found not default values setup in Splunk Logging Driver.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notajson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + if *hec.gzipEnabled { + t.Fatal("Gzip should not be used") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "{\"a\":\"b\"}" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message2) + } + + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notajson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify inline format with a not default settings for most of options +func TestInlineFormatWithNonDefaultOptions(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkFormatKey: splunkFormatInline, + splunkGzipCompressionKey: "true", + tagKey: "{{.ImageName}}/{{.Name}}", + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "mysource" || + splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || + splunkLoggerDriver.nullMessage.Index != "myindex" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + messageTime := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("1"), "stdout", messageTime, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 1 { + t.Fatal("Expected one message") + } + + if !*hec.gzipEnabled { + t.Fatal("Gzip should be used") + } + + message := hec.messages[0] + if message.Time != fmt.Sprintf("%f", float64(messageTime.UnixNano())/float64(time.Second)) || + message.Host != hostname || + message.Source != "mysource" || + message.SourceType != "mysourcetype" || + message.Index != "myindex" { + t.Fatalf("Unexpected values of message %v", message) + } + + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "1" || + event["source"] != "stdout" || + event["tag"] != "container_image_name/container_name" || + event["attrs"].(map[string]interface{})["a"] != "b" || + len(event) != 4 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify JSON format +func TestJsonFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatJSON, + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerJSON) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"].(map[string]interface{})["a"] != "b" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + // If message cannot be parsed as JSON - it should be sent as a line + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notjson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format +func TestRawFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format with labels +func TestRawFormatWithLabels(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid a=b " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that Splunk Logging Driver can accept tag="" which will allow to send raw messages +// in the same way we get them in stdout/stderr +func TestRawFormatWithoutTag(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + tagKey: "", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "" { + t.Log(string(splunkLoggerDriver.prefix) + "a") + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "{\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that we will send messages in batches with default batching parameters, +// but change frequency to be sure that numOfRequests will match expected 17 requests +func TestBatching(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 16 batches + if hec.numOfRequests != 17 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that test is using time to fire events not rare than specified frequency +func TestFrequency(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "5ms"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + time.Sleep(15 * time.Millisecond) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 to verify that we have sent messages with required frequency, + // but because frequency is too small (to keep test quick), instead of 11, use 9 if context switches will be slow + if hec.numOfRequests < 9 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Simulate behavior similar to first version of Splunk Logging Driver, when we were sending one message +// per request +func TestOneMessagePerRequest(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 messages + if hec.numOfRequests != 11 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Driver should not be created when HEC is unresponsive +func TestVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + _, err := New(ctx) + if err == nil { + t.Fatal("Expecting driver to fail, when server is unresponsive") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that user can specify to skip verification that Splunk HEC is working. +// Also in this test we verify retry logic. +func TestSkipVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < defaultStreamChannelSize*2; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify logic for when we filled whole buffer +func TestBufferMaximum(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "10"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 11; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 9 { + t.Fatalf("Expected # of messages %d, got %d", 9, len(hec.messages)) + } + + // First 1000 messages are written to daemon log when buffer was full + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i+2) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that we are not blocking close when HEC is down for the whole time +func TestServerAlwaysDown(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "4"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 5; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be sent") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Cannot send messages after we close driver +func TestCannotSendAfterClose(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{[]byte("message1"), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{[]byte("message2"), "stdout", time.Now(), nil, false}); err == nil { + t.Fatal("Driver should not allow to send messages after close") + } + + if len(hec.messages) != 1 { + t.Fatal("Only one message should be sent") + } + + message := hec.messages[0] + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "message1" { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go new file mode 100644 index 0000000..e508948 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go @@ -0,0 +1,157 @@ +package splunk + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "testing" +) + +func (message *splunkMessage) EventAsString() (string, error) { + if val, ok := message.Event.(string); ok { + return val, nil + } + return "", fmt.Errorf("Cannot cast Event %v to string", message.Event) +} + +func (message *splunkMessage) EventAsMap() (map[string]interface{}, error) { + if val, ok := message.Event.(map[string]interface{}); ok { + return val, nil + } + return nil, fmt.Errorf("Cannot cast Event %v to map", message.Event) +} + +type HTTPEventCollectorMock struct { + tcpAddr *net.TCPAddr + tcpListener *net.TCPListener + + token string + simulateServerError bool + + test *testing.T + + connectionVerified bool + gzipEnabled *bool + messages []*splunkMessage + numOfRequests int +} + +func NewHTTPEventCollectorMock(t *testing.T) *HTTPEventCollectorMock { + tcpAddr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0, Zone: ""} + tcpListener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + return &HTTPEventCollectorMock{ + tcpAddr: tcpAddr, + tcpListener: tcpListener, + token: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + simulateServerError: false, + test: t, + connectionVerified: false} +} + +func (hec *HTTPEventCollectorMock) URL() string { + return "http://" + hec.tcpListener.Addr().String() +} + +func (hec *HTTPEventCollectorMock) Serve() error { + return http.Serve(hec.tcpListener, hec) +} + +func (hec *HTTPEventCollectorMock) Close() error { + return hec.tcpListener.Close() +} + +func (hec *HTTPEventCollectorMock) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + var err error + + hec.numOfRequests++ + + if hec.simulateServerError { + if request.Body != nil { + defer request.Body.Close() + } + writer.WriteHeader(http.StatusInternalServerError) + return + } + + switch request.Method { + case http.MethodOptions: + // Verify that options method is getting called only once + if hec.connectionVerified { + hec.test.Errorf("Connection should not be verified more than once. Got second request with %s method.", request.Method) + } + hec.connectionVerified = true + writer.WriteHeader(http.StatusOK) + case http.MethodPost: + // Always verify that Driver is using correct path to HEC + if request.URL.String() != "/services/collector/event/1.0" { + hec.test.Errorf("Unexpected path %v", request.URL) + } + defer request.Body.Close() + + if authorization, ok := request.Header["Authorization"]; !ok || authorization[0] != ("Splunk "+hec.token) { + hec.test.Error("Authorization header is invalid.") + } + + gzipEnabled := false + if contentEncoding, ok := request.Header["Content-Encoding"]; ok && contentEncoding[0] == "gzip" { + gzipEnabled = true + } + + if hec.gzipEnabled == nil { + hec.gzipEnabled = &gzipEnabled + } else if gzipEnabled != *hec.gzipEnabled { + // Nothing wrong with that, but we just know that Splunk Logging Driver does not do that + hec.test.Error("Driver should not change Content Encoding.") + } + + var gzipReader *gzip.Reader + var reader io.Reader + if gzipEnabled { + gzipReader, err = gzip.NewReader(request.Body) + if err != nil { + hec.test.Fatal(err) + } + reader = gzipReader + } else { + reader = request.Body + } + + // Read body + var body []byte + body, err = ioutil.ReadAll(reader) + if err != nil { + hec.test.Fatal(err) + } + + // Parse message + messageStart := 0 + for i := 0; i < len(body); i++ { + if i == len(body)-1 || (body[i] == '}' && body[i+1] == '{') { + var message splunkMessage + err = json.Unmarshal(body[messageStart:i+1], &message) + if err != nil { + hec.test.Log(string(body[messageStart : i+1])) + hec.test.Fatal(err) + } + hec.messages = append(hec.messages, &message) + messageStart = i + 1 + } + } + + if gzipEnabled { + gzipReader.Close() + } + + writer.WriteHeader(http.StatusOK) + default: + hec.test.Errorf("Unexpected HTTP method %s", http.MethodOptions) + writer.WriteHeader(http.StatusBadRequest) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go new file mode 100644 index 0000000..fb9e867 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go @@ -0,0 +1,262 @@ +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances +// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium +// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) +func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, syslog-format. +func New(ctx logger.Context) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(ctx.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(ctx.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"], proto) + if err != nil { + return nil, err + } + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(ctx.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, tag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return s.writer.Err(string(msg.Line)) + } + return s.writer.Info(string(msg.Line)) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix and unixgram socket validation + if url.Scheme == "unix" || url.Scheme == "unixgram" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "syslog-address": + case "syslog-facility": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + if proto == secureProto { + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil + case "rfc5424micro": + if proto == secureProto { + return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go new file mode 100644 index 0000000..5015610 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go @@ -0,0 +1,62 @@ +package syslog + +import ( + "reflect" + "testing" + + syslog "github.com/RackSec/srslog" +) + +func functionMatches(expectedFun interface{}, actualFun interface{}) bool { + return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() +} + +func TestParseLogFormat(t *testing.T) { + formatter, framer, err := parseLogFormat("rfc5424", "udp") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424", "tcp+tls") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "udp") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "tcp+tls") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc3164", "") + if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("", "") + if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse empty format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("invalid", "") + if err == nil { + t.Fatal("Failed to parse invalid format", err, formatter, framer) + } +} + +func TestValidateLogOptEmpty(t *testing.T) { + emptyConfig := make(map[string]string) + if err := ValidateLogOpt(emptyConfig); err != nil { + t.Fatal("Failed to parse empty config", err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go new file mode 100644 index 0000000..cc34b82 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logs.go @@ -0,0 +1,142 @@ +package daemon + +import ( + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" +) + +// ContainerLogs hooks up a container's stdout and stderr streams +// configured with the given struct. +func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + if !(config.ShowStdout || config.ShowStderr) { + return fmt.Errorf("You must choose at least one stream") + } + + cLog, err := daemon.getLogger(container) + if err != nil { + return err + } + logReader, ok := cLog.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + + follow := config.Follow && container.IsRunning() + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + logrus.Debug("logs: begin stream") + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return err + } + since = time.Unix(s, n) + } + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + logs := logReader.ReadLogs(readConfig) + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + var outStream io.Writer + outStream = wf + errStream := outStream + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + select { + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + return nil + case <-ctx.Done(): + logs.Close() + return nil + case msg, ok := <-logs.Msg: + if !ok { + logrus.Debug("logs: end stream") + logs.Close() + if cLog != container.LogDriver { + // Since the logger isn't cached in the container, which occurs if it is running, it + // must get explicitly closed here to avoid leaking it and any file handles it has. + if err := cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + } + return nil + } + logLine := msg.Line + if config.Details { + logLine = append([]byte(msg.Attrs.String()+" "), logLine...) + } + if config.Timestamps { + logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } + } +} + +func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { + if container.LogDriver != nil && container.IsRunning() { + return container.LogDriver, nil + } + return container.StartLogger(container.HostConfig.LogConfig) +} + +// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. +func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { + if cfg.Type == "" { + cfg.Type = daemon.defaultLogConfig.Type + } + + if cfg.Config == nil { + cfg.Config = make(map[string]string) + } + + if cfg.Type == daemon.defaultLogConfig.Type { + for k, v := range daemon.defaultLogConfig.Config { + if _, ok := cfg.Config[k]; !ok { + cfg.Config[k] = v + } + } + } + + return logger.ValidateLogOpts(cfg.Type, cfg.Config) +} diff --git a/vendor/github.com/docker/docker/daemon/logs_test.go b/vendor/github.com/docker/docker/daemon/logs_test.go new file mode 100644 index 0000000..0c36299 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logs_test.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" +) + +func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { + d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} + cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} + if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/metrics.go b/vendor/github.com/docker/docker/daemon/metrics.go new file mode 100644 index 0000000..69dbfd9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/metrics.go @@ -0,0 +1,42 @@ +package daemon + +import "github.com/docker/go-metrics" + +var ( + containerActions metrics.LabeledTimer + imageActions metrics.LabeledTimer + networkActions metrics.LabeledTimer + engineVersion metrics.LabeledGauge + engineCpus metrics.Gauge + engineMemory metrics.Gauge + healthChecksCounter metrics.Counter + healthChecksFailedCounter metrics.Counter +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action") + for _, a := range []string{ + "start", + "changes", + "commit", + "create", + "delete", + } { + containerActions.WithValues(a).Update(0) + } + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") + engineVersion = ns.NewLabeledGauge("engine", "The version and commit information for the engine process", metrics.Unit("info"), + "version", + "commit", + "architecture", + "graph_driver", "kernel", + "os", + ) + engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) + engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) + healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") + healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") + imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + metrics.Register(ns) +} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go new file mode 100644 index 0000000..ee0d1fc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor.go @@ -0,0 +1,132 @@ +package daemon + +import ( + "errors" + "fmt" + "runtime" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/restartmanager" +) + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + // if container's AutoRemove flag is set, remove it after clean up + autoRemove := func() { + if c.HostConfig.AutoRemove { + if err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", c.ID, err) + } + } + } + + c.Lock() + c.StreamConfig.Wait() + c.Reset(false) + + restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, false, time.Since(c.StartedAt)) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + } else { + c.SetStopped(platformConstructExitStatus(e)) + defer autoRemove() + } + + daemon.updateHealthMonitor(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } + if err != nil { + c.SetStopped(platformConstructExitStatus(e)) + defer autoRemove() + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + } + }() + } + + defer c.Unlock() + if err := c.ToDisk(); err != nil { + return err + } + return daemon.postRunProcessing(c, e) + case libcontainerd.StateExitProcess: + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.Lock() + defer execConfig.Unlock() + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.StreamConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + // Container is already locked in this case + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + c.HasBeenStartedBefore = true + if err := c.ToDisk(); err != nil { + c.Reset(false) + return err + } + daemon.initHealthMonitor(c) + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + // Container is already locked in this case + c.Paused = true + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + // Container is already locked in this case + c.Paused = false + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "unpause") + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_linux.go b/vendor/github.com/docker/docker/daemon/monitor_linux.go new file mode 100644 index 0000000..09f5af5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_linux.go @@ -0,0 +1,19 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_solaris.go b/vendor/github.com/docker/docker/daemon/monitor_solaris.go new file mode 100644 index 0000000..5ccfada --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_solaris.go @@ -0,0 +1,18 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_windows.go b/vendor/github.com/docker/docker/daemon/monitor_windows.go new file mode 100644 index 0000000..9648b1b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + if e.ExitCode == 0 && e.UpdatePending { + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + newOpts := []libcontainerd.CreateOption{&libcontainerd.ServicingOption{ + IsServicing: true, + }} + + copts, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if copts != nil { + newOpts = append(newOpts, copts...) + } + + // Create a new servicing container, which will start, complete the update, and merge back the + // results if it succeeded, all as part of the below function call. + if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, newOpts...); err != nil { + container.SetExitCode(-1) + return fmt.Errorf("Post-run update servicing failed: %s", err) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go new file mode 100644 index 0000000..1c11f86 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/mounts.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if rm { + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + } + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/names.go b/vendor/github.com/docker/docker/daemon/names.go new file mode 100644 index 0000000..273d551 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/names.go @@ -0,0 +1,116 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/utils" +) + +var ( + validContainerNameChars = utils.RestrictedNameChars + validContainerNamePattern = utils.RestrictedNamePattern +) + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The container name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/network.go b/vendor/github.com/docker/docker/daemon/network.go new file mode 100644 index 0000000..ab8fd88 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network.go @@ -0,0 +1,498 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + networktypes "github.com/docker/libnetwork/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// NetworkControllerEnabled checks if the networking stack is enabled. +// This feature depends on OS primitives and it's disabled in systems like Windows. +func (daemon *Daemon) NetworkControllerEnabled() bool { + return daemon.netController != nil +} + +// FindNetwork function finds a network for a given string that can represent network name or id +func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { + // Find by Name + n, err := daemon.GetNetworkByName(idName) + if err != nil && !isNoSuchNetworkError(err) { + return nil, err + } + + if n != nil { + return n, nil + } + + // Find by id + return daemon.GetNetworkByID(idName) +} + +func isNoSuchNetworkError(err error) bool { + _, ok := err.(libnetwork.ErrNoSuchNetwork) + return ok +} + +// GetNetworkByID function returns a network whose ID begins with the given prefix. +// It fails with an error if no matching, or more than one matching, networks are found. +func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { + list := daemon.GetNetworksByID(partialID) + + if len(list) == 0 { + return nil, libnetwork.ErrNoSuchNetwork(partialID) + } + if len(list) > 1 { + return nil, libnetwork.ErrInvalidID(partialID) + } + return list[0], nil +} + +// GetNetworkByName function returns a network for a given network name. +// If no network name is given, the default network is returned. +func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { + c := daemon.netController + if c == nil { + return nil, libnetwork.ErrNoSuchNetwork(name) + } + if name == "" { + name = c.Config().Daemon.DefaultNetwork + } + return c.NetworkByName(name) +} + +// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { + c := daemon.netController + if c == nil { + return nil + } + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + if strings.HasPrefix(nw.ID(), partialID) { + list = append(list, nw) + } + return false + } + c.WalkNetworks(l) + + return list +} + +// getAllNetworks returns a list containing all networks +func (daemon *Daemon) getAllNetworks() []libnetwork.Network { + c := daemon.netController + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + list = append(list, nw) + return false + } + c.WalkNetworks(l) + + return list +} + +func isIngressNetwork(name string) bool { + return name == "ingress" +} + +var ingressChan = make(chan struct{}, 1) + +func ingressWait() func() { + ingressChan <- struct{}{} + return func() { <-ingressChan } +} + +// SetupIngress setups ingress networking. +func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + return err + } + + go func() { + controller := daemon.netController + controller.AgentInitWait() + + if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID { + if err := controller.SandboxDestroy("ingress-sbox"); err != nil { + logrus.Errorf("Failed to delete stale ingress sandbox: %v", err) + return + } + + // Cleanup any stale endpoints that might be left over during previous iterations + epList := n.Endpoints() + for _, ep := range epList { + if err := ep.Delete(true); err != nil { + logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + } + } + + if err := n.Delete(); err != nil { + logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err) + return + } + } + + if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { + // If it is any other error other than already + // exists error log error and return. + if _, ok := err.(libnetwork.NetworkNameError); !ok { + logrus.Errorf("Failed creating ingress network: %v", err) + return + } + + // Otherwise continue down the call to create or recreate sandbox. + } + + n, err := daemon.GetNetworkByID(create.ID) + if err != nil { + logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + return + } + + sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) + if err != nil { + if _, ok := err.(networktypes.ForbiddenError); !ok { + logrus.Errorf("Failed creating ingress sandbox: %v", err) + } + return + } + + ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) + if err != nil { + logrus.Errorf("Failed creating ingress endpoint: %v", err) + return + } + + if err := ep.Join(sb, nil); err != nil { + logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) + } + + if err := sb.EnableService(); err != nil { + logrus.WithError(err).Error("Failed enabling service for ingress sandbox") + } + }() + + return nil +} + +// SetNetworkBootstrapKeys sets the bootstrap keys. +func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { + return daemon.netController.SetKeys(keys) +} + +// UpdateAttachment notifies the attacher about the attachment config. +func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil { + return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config) + } + + return nil +} + +// WaitForDetachment makes the cluster manager wait for detachment of +// the container from the network. +func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID) +} + +// CreateManagedNetwork creates an agent network. +func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { + _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) + return err +} + +// CreateNetwork creates a network with the given name, driver and other optional parameters +func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { + resp, err := daemon.createNetwork(create, "", false) + if err != nil { + return nil, err + } + return resp, err +} + +func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { + // If there is a pending ingress network creation wait here + // since ingress network creation can happen via node download + // from manager or task download. + if isIngressNetwork(create.Name) { + defer ingressWait()() + } + + if runconfig.IsPreDefinedNetwork(create.Name) && !agent { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) + return nil, apierrors.NewRequestForbiddenError(err) + } + + var warning string + nw, err := daemon.GetNetworkByName(create.Name) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + return nil, err + } + } + if nw != nil { + if create.CheckDuplicate { + return nil, libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + c := daemon.netController + driver := create.Driver + if driver == "" { + driver = c.Config().Daemon.DefaultDriver + } + + nwOptions := []libnetwork.NetworkOption{ + libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(create.Options), + libnetwork.NetworkOptionLabels(create.Labels), + libnetwork.NetworkOptionAttachable(create.Attachable), + } + + if create.IPAM != nil { + ipam := create.IPAM + v4Conf, v6Conf, err := getIpamConfig(ipam.Config) + if err != nil { + return nil, err + } + nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) + } + + if create.Internal { + nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) + } + if agent { + nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) + nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) + } + + if isIngressNetwork(create.Name) { + nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress()) + } + + n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) + if err != nil { + return nil, err + } + + daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.ACQUIRE) + if create.IPAM != nil { + daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.ACQUIRE) + } + daemon.LogNetworkEvent(n, "create") + + return &types.NetworkCreateResponse{ + ID: n.ID(), + Warning: warning, + }, nil +} + +func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { + var builtinDrivers []string + + if capability == driverapi.NetworkPluginEndpointType { + builtinDrivers = daemon.netController.BuiltinDrivers() + } else if capability == ipamapi.PluginEndpointType { + builtinDrivers = daemon.netController.BuiltinIPAMDrivers() + } + + for _, d := range builtinDrivers { + if d == driver { + return + } + } + + if daemon.PluginStore != nil { + _, err := daemon.PluginStore.Get(driver, capability, mode) + if err != nil { + logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") + } + } +} + +func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { + ipamV4Cfg := []*libnetwork.IpamConf{} + ipamV6Cfg := []*libnetwork.IpamConf{} + for _, d := range data { + iCfg := libnetwork.IpamConf{} + iCfg.PreferredPool = d.Subnet + iCfg.SubPool = d.IPRange + iCfg.Gateway = d.Gateway + iCfg.AuxAddresses = d.AuxAddress + ip, _, err := net.ParseCIDR(d.Subnet) + if err != nil { + return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) + } + if ip.To4() != nil { + ipamV4Cfg = append(ipamV4Cfg, &iCfg) + } else { + ipamV6Cfg = append(ipamV6Cfg, &iCfg) + } + } + return ipamV4Cfg, ipamV6Cfg, nil +} + +// UpdateContainerServiceConfig updates a service configuration. +func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + container.NetworkSettings.Service = serviceConfig + return nil +} + +// ConnectContainerToNetwork connects the given container to the given +// network. If either cannot be found, an err is returned. If the +// network cannot be set up, an err is returned. +func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network connect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + return daemon.ConnectToNetwork(container, networkName, endpointConfig) +} + +// DisconnectContainerFromNetwork disconnects the given container from +// the given network. If either cannot be found, an err is returned. +func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network disconnect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + if force { + return daemon.ForceEndpointDelete(containerName, networkName) + } + return err + } + return daemon.DisconnectFromNetwork(container, networkName, force) +} + +// GetNetworkDriverList returns the list of plugins drivers +// registered for network. +func (daemon *Daemon) GetNetworkDriverList() []string { + if !daemon.NetworkControllerEnabled() { + return nil + } + + pluginList := daemon.netController.BuiltinDrivers() + + managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) + + for _, plugin := range managedPlugins { + pluginList = append(pluginList, plugin.Name()) + } + + pluginMap := make(map[string]bool) + for _, plugin := range pluginList { + pluginMap[plugin] = true + } + + networks := daemon.netController.Networks() + + for _, network := range networks { + if !pluginMap[network.Type()] { + pluginList = append(pluginList, network.Type()) + pluginMap[network.Type()] = true + } + } + + sort.Strings(pluginList) + + return pluginList +} + +// DeleteManagedNetwork deletes an agent network. +func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, true) +} + +// DeleteNetwork destroys a network unless it's one of docker's predefined networks. +func (daemon *Daemon) DeleteNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, false) +} + +func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { + nw, err := daemon.FindNetwork(networkID) + if err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { + err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if err := nw.Delete(); err != nil { + return err + } + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.RELEASE) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.RELEASE) + daemon.LogNetworkEvent(nw, "destroy") + return nil +} + +// GetNetworks returns a list of all networks +func (daemon *Daemon) GetNetworks() []libnetwork.Network { + return daemon.getAllNetworks() +} + +// clearAttachableNetworks removes the attachable networks +// after disconnecting any connected container +func (daemon *Daemon) clearAttachableNetworks() { + for _, n := range daemon.GetNetworks() { + if !n.Info().Attachable() { + continue + } + for _, ep := range n.Endpoints() { + epInfo := ep.Info() + if epInfo == nil { + continue + } + sb := epInfo.Sandbox() + if sb == nil { + continue + } + containerID := sb.ContainerID() + if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { + logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", + containerID, n.Name(), err) + } + } + if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { + logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 0000000..8f6b7dd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,33 @@ +package network + +import ( + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go new file mode 100644 index 0000000..a72b0b8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -0,0 +1,790 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes := getCPUResources(r) + blkioWeight := r.BlkioWeight + + specResources := &specs.Resources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.BlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.Pids{ + Limit: &r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.Device + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, oci.Device(d)) + } + rwm := "rwm" + devPermissions = []specs.DeviceCgroup{ + { + Allow: true, + Access: &rwm, + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.Rlimit + + // We want to leave the original HostConfig alone so make a copy here + hostConfig := *c.HostConfig + // Merge with the daemon defaults + daemon.mergeUlimits(&hostConfig) + for _, ul := range hostConfig.Ulimits { + rlimits = append(rlimits, specs.Rlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.Namespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities = caplist + return nil +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap, gidMap := daemon.GetUIDGIDMaps() + if uidMap != nil { + userNS = true + ns := specs.Namespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(gidMap) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.Namespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = c.NetworkSettings.SandboxKey + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.Namespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("ipc")) + } else { + ns := specs.Namespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsContainer() { + ns := specs.Namespace{Type: "pid"} + pc, err := daemon.getPidContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share a PID namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.PidMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("pid")) + } else { + ns := specs.Namespace{Type: "pid"} + setNamespace(s, ns) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.IDMapping { + var ids []specs.IDMapping + for _, item := range s { + ids = append(ids, specs.IDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overridden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + data := m.Data + options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + if data != "" { + options = append(options, strings.Split(data, ",")...) + } + + merged, err := mount.MergeTmpfsOptions(options) + if err != nil { + return err + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + + // only add the custom init if it is specified and the container is running in its + // own private pid namespace. It does not make sense to add if it is running in the + // host namespace or another container's pid namespace where we already have an init + if c.HostConfig.PidMode.IsPrivate() { + if (c.HostConfig.Init != nil && *c.HostConfig.Init) || + (c.HostConfig.Init == nil && daemon.configStore.Init) { + s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) + var path string + if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" { + path, err = exec.LookPath(DefaultInitBinary) + if err != nil { + return err + } + } + if daemon.configStore.InitPath != "" { + path = daemon.configStore.InitPath + } + if c.HostConfig.InitPath != "" { + path = c.HostConfig.InitPath + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/dev/init", + Type: "bind", + Source: path, + Options: []string{"bind", "ro"}, + }) + } + } + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = &cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + s.Linux.Sysctl = c.HostConfig.Sysctls + + p := *s.Linux.CgroupsPath + if useSystemd { + initPath, err := cgroups.GetInitCgroupDir("cpu") + if err != nil { + return nil, err + } + p, _ = cgroups.GetThisCgroupDir("cpu") + if err != nil { + return nil, err + } + p = filepath.Join(initPath, p) + } + + // Clean path to guard against things like ../../../BAD + parentPath := filepath.Dir(p) + if !filepath.IsAbs(parentPath) { + parentPath = filepath.Clean("/" + parentPath) + } + + if err := daemon.initCgroupsPath(parentPath); err != nil { + return nil, fmt.Errorf("linux init cgroups path: %v", err) + } + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + ms = append(ms, c.IpcMounts()...) + + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + + if m := c.SecretMount(); m != nil { + ms = append(ms, *m) + } + + sort.Sort(mounts(ms)) + if err := setMounts(daemon, &s, c, ms); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + for _, ns := range s.Linux.Namespaces { + if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + if err != nil { + return nil, err + } + + s.Hooks = specs.Hooks{ + Prestart: []specs.Hook{{ + Path: target, // FIXME: cross-platform + Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, + }}, + } + } + } + + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return nil, err + } + } + + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*specs.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + ulimits := c.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + c.Ulimits = ulimits +} diff --git a/vendor/github.com/docker/docker/daemon/oci_solaris.go b/vendor/github.com/docker/docker/daemon/oci_solaris.go new file mode 100644 index 0000000..0c757f9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_solaris.go @@ -0,0 +1,188 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "sort" + "strconv" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/libnetwork" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + mem := getMemoryResources(r) + s.Solaris.CappedMemory = &mem + + capCPU := getCPUResources(r) + s.Solaris.CappedCPU = &capCPU + + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + return 0, 0, nil, nil +} + +func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) { + var ( + linkName string + lowerLink string + defRouter string + ) + + epInfo := ep.Info() + if epInfo == nil { + return specs.Anet{}, fmt.Errorf("invalid endpoint") + } + + nw, err := daemon.GetNetworkByName(ep.Network()) + if err != nil { + return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err) + } + + // Evaluate default router, linkname and lowerlink for interface endpoint + switch nw.Type() { + case "bridge": + defRouter = epInfo.Gateway().String() + linkName = "net0" // Should always be net0 for a container + + // TODO We construct lowerlink here exactly as done for solaris bridge + // initialization. Need modular code to reuse. + options := nw.Info().DriverOptions() + nwName := options["com.docker.network.bridge.name"] + lastChar := nwName[len(nwName)-1:] + if _, err = strconv.Atoi(lastChar); err != nil { + lowerLink = nwName + "_0" + } else { + lowerLink = nwName + } + + case "overlay": + defRouter = "" + linkName = "net1" + + // TODO Follows generateVxlanName() in solaris overlay. + id := nw.ID() + if len(nw.ID()) > 12 { + id = nw.ID()[:12] + } + lowerLink = "vx_" + id + "_0" + } + + runzanet := specs.Anet{ + Linkname: linkName, + Lowerlink: lowerLink, + Allowedaddr: epInfo.Iface().Address().String(), + Configallowedaddr: "true", + Defrouter: defRouter, + Linkprotection: "mac-nospoof, ip-nospoof", + Macaddress: epInfo.Iface().MacAddress().String(), + } + + return runzanet, nil +} + +func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error { + var anets []specs.Anet + + sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) + if err != nil { + return fmt.Errorf("Could not obtain sandbox for container") + } + + // Populate interfaces required for each endpoint + for _, ep := range sb.Endpoints() { + runzanet, err := daemon.getRunzAnet(ep) + if err != nil { + return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err) + } + anets = append(anets, runzanet) + } + + s.Solaris.Anet = anets + if anets != nil { + s.Solaris.Milestone = "svc:/milestone/container:default" + } + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: filepath.Dir(c.BaseFS), + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("runtime spec resources: %v", err) + } + + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("spec user: %v", err) + } + + if err := daemon.setNetworkInterface(&s, c); err != nil { + return nil, err + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + ms = append(ms, c.IpcMounts()...) + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + sort.Sort(mounts(ms)) + + return (*specs.Spec)(&s), nil +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/oci_windows.go b/vendor/github.com/docker/docker/daemon/oci_windows.go new file mode 100644 index 0000000..6e26424 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_windows.go @@ -0,0 +1,122 @@ +package daemon + +import ( + "syscall" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/sysinfo" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return nil, err + } + + // Note, unlike Unix, we do NOT call into SetupWorkingDirectory as + // this is done in VMCompute. Further, we couldn't do it for Hyper-V + // containers anyway. + + // In base spec + s.Hostname = c.FullHostname() + + // In s.Mounts + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + for _, mount := range mounts { + m := specs.Mount{ + Source: mount.Source, + Destination: mount.Destination, + } + if !mount.Writable { + m.Options = append(m.Options, "ro") + } + s.Mounts = append(s.Mounts, m) + } + + // In s.Process + s.Process.Args = append([]string{c.Path}, c.Args...) + if !c.Config.ArgsEscaped { + s.Process.Args = escapeArgs(s.Process.Args) + } + s.Process.Cwd = c.Config.WorkingDir + if len(s.Process.Cwd) == 0 { + // We default to C:\ to workaround the oddity of the case that the + // default directory for cmd running as LocalSystem (or + // ContainerAdministrator) is c:\windows\system32. Hence docker run + // cmd will by default end in c:\windows\system32, rather + // than 'root' (/) on Linux. The oddity is that if you have a dockerfile + // which has no WORKDIR and has a COPY file ., . will be interpreted + // as c:\. Hence, setting it to default of c:\ makes for consistency. + s.Process.Cwd = `C:\` + } + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] + s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] + s.Process.Terminal = c.Config.Tty + s.Process.User.Username = c.Config.User + + // In spec.Root. This is not set for Hyper-V containers + isHyperV := false + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + if !isHyperV { + s.Root.Path = c.BaseFS + } + s.Root.Readonly = false // Windows does not support a read-only root filesystem + + // In s.Windows.Resources + // @darrenstahlmsft implement these resources + cpuShares := uint16(c.HostConfig.CPUShares) + cpuPercent := uint8(c.HostConfig.CPUPercent) + if c.HostConfig.NanoCPUs > 0 { + cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(sysinfo.NumCPU()) / 1e9) + } + cpuCount := uint64(c.HostConfig.CPUCount) + memoryLimit := uint64(c.HostConfig.Memory) + s.Windows.Resources = &specs.WindowsResources{ + CPU: &specs.WindowsCPUResources{ + Percent: &cpuPercent, + Shares: &cpuShares, + Count: &cpuCount, + }, + Memory: &specs.WindowsMemoryResources{ + Limit: &memoryLimit, + //TODO Reservation: ..., + }, + Network: &specs.WindowsNetworkResources{ + //TODO Bandwidth: ..., + }, + Storage: &specs.WindowsStorageResources{ + Bps: &c.HostConfig.IOMaximumBandwidth, + Iops: &c.HostConfig.IOMaximumIOps, + }, + } + return (*specs.Spec)(&s), nil +} + +func escapeArgs(args []string) []string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = syscall.EscapeArg(a) + } + return escapedArgs +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go new file mode 100644 index 0000000..dbfafbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/prune.go b/vendor/github.com/docker/docker/daemon/prune.go new file mode 100644 index 0000000..a693beb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/prune.go @@ -0,0 +1,236 @@ +package daemon + +import ( + "fmt" + "regexp" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" +) + +// ContainersPrune removes unused containers +func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + rep := &types.ContainersPruneReport{} + + allContainers := daemon.List() + for _, c := range allContainers { + if !c.IsRunning() { + cSize, _ := daemon.getSize(c) + // TODO: sets RmLink to true? + err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + if err != nil { + logrus.Warnf("failed to prune container %s: %v", c.ID, err) + continue + } + if cSize > 0 { + rep.SpaceReclaimed += uint64(cSize) + } + rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) + } + } + + return rep, nil +} + +// VolumesPrune removes unused local volumes +func (daemon *Daemon) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + rep := &types.VolumesPruneReport{} + + pruneVols := func(v volume.Volume) error { + name := v.Name() + refs := daemon.volumes.Refs(v) + + if len(refs) == 0 { + vSize, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("could not determine size of volume %s: %v", name, err) + } + err = daemon.volumes.Remove(v) + if err != nil { + logrus.Warnf("could not remove volume %s: %v", name, err) + return nil + } + rep.SpaceReclaimed += uint64(vSize) + rep.VolumesDeleted = append(rep.VolumesDeleted, name) + } + + return nil + } + + err := daemon.traverseLocalVolumes(pruneVols) + + return rep, err +} + +// ImagesPrune removes unused images +func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + rep := &types.ImagesPruneReport{} + + danglingOnly := true + if pruneFilters.Include("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) + } + } + + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + allContainers := daemon.List() + imageRefs := map[string]bool{} + for _, c := range allContainers { + imageRefs[c.ID] = true + } + + // Filter intermediary images and get their unique size + allLayers := daemon.layerStore.Map() + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + topImages[id] = img + } + + for id := range topImages { + dgst := digest.Digest(id) + hex := dgst.Hex() + if _, ok := imageRefs[hex]; ok { + continue + } + + deletedImages := []types.ImageDelete{} + refs := daemon.referenceStore.References(dgst) + if len(refs) > 0 { + if danglingOnly { + // Not a dangling image + continue + } + + nrRefs := len(refs) + for _, ref := range refs { + // If nrRefs == 1, we have an image marked as myreponame: + // i.e. the tag content was changed + if _, ok := ref.(reference.Canonical); ok && nrRefs > 1 { + continue + } + imgDel, err := daemon.ImageDelete(ref.String(), false, true) + if err != nil { + logrus.Warnf("could not delete reference %s: %v", ref.String(), err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } else { + imgDel, err := daemon.ImageDelete(hex, false, true) + if err != nil { + logrus.Warnf("could not delete image %s: %v", hex, err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } + + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } + } + } + + return rep, nil +} + +// localNetworksPrune removes unused local networks +func (daemon *Daemon) localNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + var err error + // When the function returns true, the walk will stop. + l := func(nw libnetwork.Network) bool { + nwName := nw.Name() + predefined := runconfig.IsPreDefinedNetwork(nwName) + if !predefined && len(nw.Endpoints()) == 0 { + if err = daemon.DeleteNetwork(nw.ID()); err != nil { + logrus.Warnf("could not remove network %s: %v", nwName, err) + return false + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) + } + return false + } + daemon.netController.WalkNetworks(l) + return rep, err +} + +// clusterNetworksPrune removes unused cluster networks +func (daemon *Daemon) clusterNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + cluster := daemon.GetCluster() + networks, err := cluster.GetNetworks() + if err != nil { + return rep, err + } + networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) + for _, nw := range networks { + if nw.Name == "ingress" { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) + } + return rep, nil +} + +// NetworksPrune removes unused networks +func (daemon *Daemon) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + clusterRep, err := daemon.clusterNetworksPrune(pruneFilters) + if err != nil { + logrus.Warnf("could not remove cluster networks: %v", err) + } else { + rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) + } + localRep, err := daemon.localNetworksPrune(pruneFilters) + if err != nil { + logrus.Warnf("could not remove local networks: %v", err) + } else { + rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + } + return rep, err +} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go new file mode 100644 index 0000000..ffb7715 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/rename.go @@ -0,0 +1,122 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + dockercontainer "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + var ( + sid string + sb libnetwork.Sandbox + ) + + if oldName == "" || newName == "" { + return fmt.Errorf("Neither old nor new names may be empty") + } + + if newName[0] != '/' { + newName = "/" + newName + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint + + if oldName == newName { + return fmt.Errorf("Renaming a container with the same name as its current name") + } + + container.Lock() + defer container.Unlock() + + links := map[string]*dockercontainer.Container{} + for k, v := range daemon.linkIndex.children(container) { + if !strings.HasPrefix(k, oldName) { + return fmt.Errorf("Linked container %s does not match parent %s", k, oldName) + } + links[strings.TrimPrefix(k, oldName)] = v + } + + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + for k, v := range links { + daemon.nameIndex.Reserve(newName+k, v.ID) + daemon.linkIndex.link(container, v, newName+k) + } + + container.Name = newName + container.NetworkSettings.IsAnonymousEndpoint = false + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + daemon.reserveName(container.ID, oldName) + for k, v := range links { + daemon.nameIndex.Reserve(oldName+k, v.ID) + daemon.linkIndex.link(container, v, oldName+k) + daemon.linkIndex.unlink(newName+k, v, container) + daemon.nameIndex.Release(newName + k) + } + daemon.releaseName(newName) + } + }() + + for k, v := range links { + daemon.linkIndex.unlink(oldName+k, v, container) + daemon.nameIndex.Release(oldName + k) + } + daemon.releaseName(oldName) + if err = container.ToDisk(); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + if e := container.ToDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + sid = container.NetworkSettings.SandboxID + if daemon.netController != nil { + sb, err = daemon.netController.SandboxByID(sid) + if err != nil { + return err + } + + err = sb.Rename(strings.TrimPrefix(container.Name, "/")) + if err != nil { + return err + } + } + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go new file mode 100644 index 0000000..7473538 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go new file mode 100644 index 0000000..79292f3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/restart.go @@ -0,0 +1,70 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerRestart(container, *seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil + +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if container.IsRunning() { + // set AutoRemove flag to false before stop so the container won't be + // removed during restart process + autoRemove := container.HostConfig.AutoRemove + + container.HostConfig.AutoRemove = false + err := daemon.containerStop(container, seconds) + // restore AutoRemove irrespective of whether the stop worked or not + container.HostConfig.AutoRemove = autoRemove + // containerStop will write HostConfig to disk, we shall restore AutoRemove + // in disk too + if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { + logrus.Errorf("Write container to disk error: %v", toDiskErr) + } + + if err != nil { + return err + } + } + + if err := daemon.containerStart(container, "", "", true); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/search.go b/vendor/github.com/docker/docker/daemon/search.go new file mode 100644 index 0000000..5d2ac5d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/search.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "fmt" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/dockerversion" +) + +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + + searchFilters, err := filters.FromParam(filtersArgs) + if err != nil { + return nil, err + } + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + var isAutomated, isOfficial bool + var hasStarFilter = 0 + if searchFilters.Include("is-automated") { + if searchFilters.UniqueExactMatch("is-automated", "true") { + isAutomated = true + } else if !searchFilters.UniqueExactMatch("is-automated", "false") { + return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + } + } + if searchFilters.Include("is-official") { + if searchFilters.UniqueExactMatch("is-official", "true") { + isOfficial = true + } else if !searchFilters.UniqueExactMatch("is-official", "false") { + return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + } + } + if searchFilters.Include("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + if err != nil { + return nil, err + } + + filteredResults := []registrytypes.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Include("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Include("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Include("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return ®istrytypes.SearchResults{ + Query: unfilteredResult.Query, + NumResults: len(filteredResults), + Results: filteredResults, + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/search_test.go b/vendor/github.com/docker/docker/daemon/search_test.go new file mode 100644 index 0000000..f5aa85a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/search_test.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" +) + +type FakeService struct { + registry.DefaultService + + shouldReturnError bool + + term string + results []registrytypes.SearchResult +} + +func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + if s.shouldReturnError { + return nil, fmt.Errorf("Search unknown error") + } + return ®istrytypes.SearchResults{ + Query: s.term, + NumResults: len(s.results), + Results: s.results, + }, nil +} + +func TestSearchRegistryForImagesErrors(t *testing.T) { + errorCases := []struct { + filtersArgs string + shouldReturnError bool + expectedError string + }{ + { + expectedError: "Search unknown error", + shouldReturnError: true, + }, + { + filtersArgs: "invalid json", + expectedError: "invalid character 'i' looking for beginning of value", + }, + { + filtersArgs: `{"type":{"custom":true}}`, + expectedError: "Invalid filter 'type'", + }, + { + filtersArgs: `{"is-automated":{"invalid":true}}`, + expectedError: "Invalid filter 'is-automated=[invalid]'", + }, + { + filtersArgs: `{"is-automated":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-automated", + }, + { + filtersArgs: `{"is-official":{"invalid":true}}`, + expectedError: "Invalid filter 'is-official=[invalid]'", + }, + { + filtersArgs: `{"is-official":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-official", + }, + { + filtersArgs: `{"stars":{"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + { + filtersArgs: `{"stars":{"1":true,"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + } + for index, e := range errorCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + shouldReturnError: e.shouldReturnError, + }, + } + _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) + if err == nil { + t.Errorf("%d: expected an error, got nothing", index) + } + if !strings.Contains(err.Error(), e.expectedError) { + t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) + } + } +} + +func TestSearchRegistryForImages(t *testing.T) { + term := "term" + successCases := []struct { + filtersArgs string + registryResults []registrytypes.SearchResult + expectedResults []registrytypes.SearchResult + }{ + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{}, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + }, + { + filtersArgs: `{"stars":{"0":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + IsOfficial: true, + IsAutomated: true, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + IsOfficial: false, + IsAutomated: true, + }, + { + Name: "name2", + Description: "description2", + StarCount: 1, + IsOfficial: true, + IsAutomated: false, + }, + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + }, + } + for index, s := range successCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + term: term, + results: s.registryResults, + }, + } + results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) + if err != nil { + t.Errorf("%d: %v", index, err) + } + if results.Query != term { + t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) + } + if results.NumResults != len(s.expectedResults) { + t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) + } + for _, result := range results.Results { + found := false + for _, expectedResult := range s.expectedResults { + if expectedResult.Name == result.Name && + expectedResult.Description == result.Description && + expectedResult.IsAutomated == result.IsAutomated && + expectedResult.IsOfficial == result.IsOfficial && + expectedResult.StarCount == result.StarCount { + found = true + break + } + } + if !found { + t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go new file mode 100644 index 0000000..ff1127b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go @@ -0,0 +1,19 @@ +// +build linux,!seccomp + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = false + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/vendor/github.com/docker/docker/daemon/seccomp_linux.go new file mode 100644 index 0000000..7f16733 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_linux.go @@ -0,0 +1,55 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = true + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.Seccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile, rs) + if err != nil { + return err + } + } else { + if daemon.seccompProfile != nil { + profile, err = seccomp.LoadProfile(string(daemon.seccompProfile), rs) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile(rs) + if err != nil { + return err + } + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go b/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go new file mode 100644 index 0000000..b3691e9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux + +package daemon + +var supportsSeccomp = false diff --git a/vendor/github.com/docker/docker/daemon/secrets.go b/vendor/github.com/docker/docker/daemon/secrets.go new file mode 100644 index 0000000..355cb1e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/secrets.go @@ -0,0 +1,36 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerSecretStore sets the secret store backend for the container +func (daemon *Daemon) SetContainerSecretStore(name string, store exec.SecretGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretStore = store + + return nil +} + +// SetContainerSecretReferences sets the container secret references needed +func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { + if !secretsSupported() && len(refs) > 0 { + logrus.Warn("secrets are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretReferences = refs + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/secrets_linux.go b/vendor/github.com/docker/docker/daemon/secrets_linux.go new file mode 100644 index 0000000..fca4e12 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/secrets_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/vendor/github.com/docker/docker/daemon/secrets_unsupported.go b/vendor/github.com/docker/docker/daemon/secrets_unsupported.go new file mode 100644 index 0000000..d6f36fd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/secrets_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func secretsSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_linux.go b/vendor/github.com/docker/docker/daemon/selinux_linux.go new file mode 100644 index 0000000..83a3447 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/runc/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go new file mode 100644 index 0000000..25a56ad --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go new file mode 100644 index 0000000..6c94fd5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start.go @@ -0,0 +1,230 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { + if checkpoint != "" && !daemon.HasExperimental() { + return apierrors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode")) + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return apierrors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and has been removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.ToDisk(); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if hostConfig != nil { + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + } + + return daemon.containerStart(container, checkpoint, checkpointDir, true) +} + +// Start starts a container +func (daemon *Daemon) Start(container *container.Container) error { + return daemon.containerStart(container, "", "", true) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { + start := time.Now() + container.Lock() + defer container.Unlock() + + if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode() == 0 { + container.SetExitCode(128) + } + container.ToDisk() + + container.Reset(false) + + daemon.Cleanup(container) + // if containers AutoRemove flag is set, remove it after clean up + if container.HostConfig.AutoRemove { + container.Unlock() + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", container.ID, err) + } + container.Lock() + } + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + createOptions, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if resetRestartManager { + container.ResetRestartManager(true) + } + + if checkpointDir == "" { + checkpointDir = container.CheckpointDir() + } + + if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil { + errDesc := grpc.ErrorDesc(err) + contains := func(s1, s2 string) bool { + return strings.Contains(strings.ToLower(s1), s2) + } + logrus.Errorf("Create container failed with error: %s", errDesc) + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if contains(errDesc, container.Path) && + (contains(errDesc, "executable file not found") || + contains(errDesc, "no such file or directory") || + contains(errDesc, "system cannot find the file specified")) { + container.SetExitCode(127) + } + // set to 126 for container cmd can't be invoked errors + if contains(errDesc, syscall.EACCES.Error()) { + container.SetExitCode(126) + } + + // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts + if contains(errDesc, syscall.ENOTDIR.Error()) { + errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" + container.SetExitCode(127) + } + + return fmt.Errorf("%s", errDesc) + } + + containerActions.WithValues("start").UpdateSince(start) + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + if err := container.UnmountSecrets(); err != nil { + logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/vendor/github.com/docker/docker/daemon/start_unix.go b/vendor/github.com/docker/docker/daemon/start_unix.go new file mode 100644 index 0000000..6bbe485 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Ensure a runtime has been assigned to this container + if container.HostConfig.Runtime == "" { + container.HostConfig.Runtime = stockRuntimeName + container.ToDisk() + } + + rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) + if rt == nil { + return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) + } + if UsingSystemd(daemon.configStore) { + rt.Args = append(rt.Args, "--systemd-cgroup=true") + } + createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) + + return createOptions, nil +} diff --git a/vendor/github.com/docker/docker/daemon/start_windows.go b/vendor/github.com/docker/docker/daemon/start_windows.go new file mode 100644 index 0000000..faa7575 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start_windows.go @@ -0,0 +1,205 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "golang.org/x/sys/windows/registry" +) + +const ( + credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + credentialSpecFileLocation = "CredentialSpecs" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Are we going to run as a Hyper-V container? + hvOpts := &libcontainerd.HyperVIsolationOption{} + if container.HostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container is requesting an isolation mode. Honour it. + hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() + } + + // Generate the layer folder of the layer options + layerOpts := &libcontainerd.LayerOption{} + m, err := container.RWLayer.Metadata() + if err != nil { + return nil, fmt.Errorf("failed to get layer metadata - %s", err) + } + if hvOpts.IsHyperV { + hvOpts.SandboxPath = filepath.Dir(m["dir"]) + } + + layerOpts.LayerFolderPath = m["dir"] + + // Generate the layer paths of the layer options + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) + } + // Get the layer path for each layer. + max := len(img.RootFS.DiffIDs) + for i := 1; i <= max; i++ { + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] + layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) + if err != nil { + return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) + } + // Reverse order, expecting parent most first + layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) + } + + // Get endpoints for the libnetwork allocated networks to the container + var epList []string + AllowUnqualifiedDNSQuery := false + gwHNSID := "" + if container.NetworkSettings != nil { + for n := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := container.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + gwHNSID = gwInfo["hnsid"].(string) + } + } + + if data["hnsid"] != nil { + epList = append(epList, data["hnsid"].(string)) + } + + if data["AllowUnqualifiedDNSQuery"] != nil { + AllowUnqualifiedDNSQuery = true + } + } + } + + if gwHNSID != "" { + epList = append(epList, gwHNSID) + } + + // Read and add credentials from the security options if a credential spec has been provided. + if container.HostConfig.SecurityOpt != nil { + for _, sOpt := range container.HostConfig.SecurityOpt { + sOpt = strings.ToLower(sOpt) + if !strings.Contains(sOpt, "=") { + return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) + } + var splitsOpt []string + splitsOpt = strings.SplitN(sOpt, "=", 2) + if len(splitsOpt) != 2 { + return nil, fmt.Errorf("invalid security option: %s", sOpt) + } + if splitsOpt[0] != "credentialspec" { + return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) + } + + credentialsOpts := &libcontainerd.CredentialsOption{} + var ( + match bool + csValue string + err error + ) + if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for file:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { + return nil, err + } + } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") + } + createOptions = append(createOptions, credentialsOpts) + } + } + + // Now add the remaining options. + createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) + createOptions = append(createOptions, hvOpts) + createOptions = append(createOptions, layerOpts) + if epList != nil { + createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{Endpoints: epList, AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery}) + } + + return createOptions, nil +} + +// getCredentialSpec is a helper function to get the value of a credential spec supplied +// on the CLI, stripping the prefix +func getCredentialSpec(prefix, value string) (bool, string) { + if strings.HasPrefix(value, prefix) { + return true, strings.TrimPrefix(value, prefix) + } + return false, "" +} + +// readCredentialSpecRegistry is a helper function to read a credential spec from +// the registry. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecRegistry(id, name string) (string, error) { + var ( + k registry.Key + err error + val string + ) + if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { + return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) + } + if val, _, err = k.GetStringValue(name); err != nil { + if err == registry.ErrNotExist { + return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) + } + return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) + } + return val, nil +} + +// readCredentialSpecFile is a helper function to read a credential spec from +// a file. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecFile(id, root, location string) (string, error) { + if filepath.IsAbs(location) { + return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") + } + base := filepath.Join(root, credentialSpecFileLocation) + full := filepath.Join(base, location) + if !strings.HasPrefix(full, base) { + return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) + } + bcontents, err := ioutil.ReadFile(full) + if err != nil { + return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) + } + return string(bcontents[:]), nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go new file mode 100644 index 0000000..51f5962 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats.go @@ -0,0 +1,158 @@ +package daemon + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "solaris" { + return fmt.Errorf("%+v does not support stats", runtime.GOOS) + } + // Engine API version (used for backwards compatibility) + apiVersion := config.Version + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is either not running or restarting and requires no stream, return an empty stats. + if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + var preRead time.Time + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.Name = container.Name + ss.ID = container.ID + ss.PreCPUStats = preCPUStats + ss.PreRead = preRead + preCPUStats = ss.CPUStats + preRead = ss.Read + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if versions.LessThan(apiVersion, "1.21") { + if runtime.GOOS == "windows" { + return errors.New("API versions pre v1.21 do not support stats on Windows") + } + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-ctx.Done(): + return nil + } + } +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.unsubscribe(c, ch) +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + // We already have the network stats on Windows directly from HCS. + if !container.Config.NetworkDisabled && runtime.GOOS != "windows" { + if stats.Networks, err = daemon.getNetworkStats(container); err != nil { + return nil, err + } + } + + return stats, nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector.go b/vendor/github.com/docker/docker/daemon/stats_collector.go new file mode 100644 index 0000000..dc6825e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector.go @@ -0,0 +1,132 @@ +// +build !solaris + +package daemon + +import ( + "bufio" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +type statsSupervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// newStatsCollector returns a new statsCollector that collections +// stats for a registered container at the specified interval. +// The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + s := &statsCollector{ + interval: interval, + supervisor: daemon, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + platformNewStatsCollector(s) + go s.run() + return s +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { + m sync.Mutex + supervisor statsSupervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 + machineMemory uint64 +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +func (s *statsCollector) run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(errNotRunning); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + continue + } + // FIXME: move to containerd on Linux (not Windows) + stats.CPUStats.SystemUsage = systemUsage + + pair.publisher.Publish(*stats) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go b/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go new file mode 100644 index 0000000..9cf9f0a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go @@ -0,0 +1,34 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "time" +) + +// newStatsCollector returns a new statsCollector for collection stats +// for a registered container at the specified interval. The collector allows +// non-running containers to be added and will start processing stats when +// they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + return &statsCollector{} +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + return nil +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go new file mode 100644 index 0000000..0fcc9c5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go @@ -0,0 +1,71 @@ +// +build !windows,!solaris + +package daemon + +import ( + "fmt" + "os" + "strconv" + "strings" + + sysinfo "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/system" +) + +// platformNewStatsCollector performs platform specific initialisation of the +// statsCollector structure. +func platformNewStatsCollector(s *statsCollector) { + s.clockTicksPerSecond = uint64(system.GetClockTicks()) + meminfo, err := sysinfo.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + s.machineMemory = uint64(meminfo.MemTotal) + } +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_windows.go b/vendor/github.com/docker/docker/daemon/stats_collector_windows.go new file mode 100644 index 0000000..41731b9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_windows.go @@ -0,0 +1,15 @@ +// +build windows + +package daemon + +// platformNewStatsCollector performs platform specific initialisation of the +// statsCollector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *statsCollector) { +} + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. This is a no-op on Windows. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats_unix.go b/vendor/github.com/docker/docker/daemon/stats_unix.go new file mode 100644 index 0000000..d875607 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Resolve Network SandboxID in case the container reuse another container's network stack +func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { + curr := c + for curr.HostConfig.NetworkMode.IsContainer() { + containerID := curr.HostConfig.NetworkMode.ConnectedContainer() + connected, err := daemon.GetContainer(containerID) + if err != nil { + return "", fmt.Errorf("Could not get container for %s", containerID) + } + curr = connected + } + return curr.NetworkSettings.SandboxID, nil +} + +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + sandboxID, err := daemon.getNetworkSandboxID(c) + if err != nil { + return nil, err + } + + sb, err := daemon.netController.SandboxByID(sandboxID) + if err != nil { + return nil, err + } + + lnstats, err := sb.Statistics() + if err != nil { + return nil, err + } + + stats := make(map[string]types.NetworkStats) + // Convert libnetwork nw stats into api stats + for ifName, ifStats := range lnstats { + stats[ifName] = types.NetworkStats{ + RxBytes: ifStats.RxBytes, + RxPackets: ifStats.RxPackets, + RxErrors: ifStats.RxErrors, + RxDropped: ifStats.RxDropped, + TxBytes: ifStats.TxBytes, + TxPackets: ifStats.TxPackets, + TxErrors: ifStats.TxErrors, + TxDropped: ifStats.TxDropped, + } + } + + return stats, nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats_windows.go b/vendor/github.com/docker/docker/daemon/stats_windows.go new file mode 100644 index 0000000..f8e6f6f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_windows.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Windows network stats are obtained directly through HCS, hence this is a no-op. +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + return make(map[string]types.NetworkStats), nil +} diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go new file mode 100644 index 0000000..aa7b382 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stop.go @@ -0,0 +1,83 @@ +package daemon + +import ( + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/container" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerStop(container, *seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + daemon.stopHealthchecks(container) + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we force kill it. + if _, err := container.WaitStop(2 * time.Second); err != nil { + logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + container.WaitStop(-1 * time.Second) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/top_unix.go b/vendor/github.com/docker/docker/daemon/top_unix.go new file mode 100644 index 0000000..7fb81d0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_unix.go @@ -0,0 +1,126 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types" +) + +func validatePSArgs(psArgs string) error { + // NOTE: \\s does not detect unicode whitespaces. + // So we use fieldsASCII instead of strings.Fields in parsePSOutput. + // See https://github.com/docker/docker/pull/24358 + re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") + for _, group := range re.FindAllStringSubmatch(psArgs, -1) { + if len(group) >= 3 { + k := group[1] + v := group[2] + if k != "pid" { + return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) + } + } + } + return nil +} + +// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces +func fieldsASCII(s string) []string { + fn := func(r rune) bool { + switch r { + case '\t', '\n', '\f', '\r', ' ': + return true + } + return false + } + return strings.FieldsFunc(s, fn) +} + +func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, error) { + procList := &types.ContainerProcessList{} + + lines := strings.Split(string(output), "\n") + procList.Titles = fieldsASCII(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := fieldsASCII(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) + } + } + } + return procList, nil +} + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { + psArgs = "-ef" + } + + if err := validatePSArgs(psArgs); err != nil { + return nil, err + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + procList, err := parsePSOutput(output, pids) + if err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/vendor/github.com/docker/docker/daemon/top_unix_test.go b/vendor/github.com/docker/docker/daemon/top_unix_test.go new file mode 100644 index 0000000..269ab6e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_unix_test.go @@ -0,0 +1,76 @@ +//+build !windows + +package daemon + +import ( + "testing" +) + +func TestContainerTopValidatePSArgs(t *testing.T) { + tests := map[string]bool{ + "ae -o uid=PID": true, + "ae -o \"uid= PID\"": true, // ascii space (0x20) + "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) + "ae o uid=PID": true, + "aeo uid=PID": true, + "ae -O uid=PID": true, + "ae -o pid=PID2 -o uid=PID": true, + "ae -o pid=PID": false, + "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this + "aeo pid=PID": false, + "ae": false, + "": false, + } + for psArgs, errExpected := range tests { + err := validatePSArgs(psArgs) + t.Logf("tested %q, got err=%v", psArgs, err) + if errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, psArgs) + } + if !errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, psArgs) + } + } +} + +func TestContainerTopParsePSOutput(t *testing.T) { + tests := []struct { + output []byte + pids []int + errExpected bool + }{ + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, false}, + {[]byte(` UID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + // unicode space (U+2003, 0xe2 0x80 0x83) + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + // the first space is U+2003, the second one is ascii. + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + } + + for _, f := range tests { + _, err := parsePSOutput(f.output, f.pids) + t.Logf("tested %q, got err=%v", string(f.output), err) + if f.errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, string(f.output)) + } + if !f.errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/top_windows.go b/vendor/github.com/docker/docker/daemon/top_windows.go new file mode 100644 index 0000000..3dd8ead --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_windows.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "errors" + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// ContainerTop handles `docker top` client requests. +// Future considerations: +// -- Windows users are far more familiar with CPU% total. +// Further, users on Windows rarely see user/kernel CPU stats split. +// The kernel returns everything in terms of 100ns. To obtain +// CPU%, we could do something like docker stats does which takes two +// samples, subtract the difference and do the maths. Unfortunately this +// would slow the stat call down and require two kernel calls. So instead, +// we do something similar to linux and display the CPU as combined HH:MM:SS.mmm. +// -- Perhaps we could add an argument to display "raw" stats +// -- "Memory" is an extremely overloaded term in Windows. Hence we do what +// task manager does and use the private working set as the memory counter. +// We could return more info for those who really understand how memory +// management works in Windows if we introduced a "raw" stats (above). +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + // It's not at all an equivalent to linux 'ps' on Windows + if psArgs != "" { + return nil, errors.New("Windows does not support arguments to top") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + s, err := daemon.containerd.Summary(container.ID) + if err != nil { + return nil, err + } + procList := &types.ContainerProcessList{} + procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} + + for _, j := range s { + d := time.Duration((j.KernelTime100ns + j.UserTime100ns) * 100) // Combined time in nanoseconds + procList.Processes = append(procList.Processes, []string{ + j.ImageName, + fmt.Sprint(j.ProcessId), + fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), + units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) + } + return procList, nil +} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go new file mode 100644 index 0000000..e66b386 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/unpause.go @@ -0,0 +1,38 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/update.go b/vendor/github.com/docker/docker/daemon/update.go new file mode 100644 index 0000000..6e26eeb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update.go @@ -0,0 +1,92 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + return container.ContainerUpdateOKBody{Warnings: warnings}, nil +} + +// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. +func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { + if len(cmd) == 0 { + return nil + } + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + c.Path = cmd[0] + c.Args = cmd[1:] + return nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.ToDisk() + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + + // if Restart Policy changed, we need to update container monitor + if hostConfig.RestartPolicy.Name != "" { + container.UpdateMonitor(hostConfig.RestartPolicy) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/vendor/github.com/docker/docker/daemon/update_linux.go b/vendor/github.com/docker/docker/daemon/update_linux.go new file mode 100644 index 0000000..f422325 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_linux.go @@ -0,0 +1,25 @@ +// +build linux + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint64(resources.BlkioWeight) + r.CpuShares = uint64(resources.CPUShares) + r.CpuPeriod = uint64(resources.CPUPeriod) + r.CpuQuota = uint64(resources.CPUQuota) + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint64(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint64(resources.MemorySwap) + } + r.MemoryReservation = uint64(resources.MemoryReservation) + r.KernelMemoryLimit = uint64(resources.KernelMemory) + return r +} diff --git a/vendor/github.com/docker/docker/daemon/update_solaris.go b/vendor/github.com/docker/docker/daemon/update_solaris.go new file mode 100644 index 0000000..f3b545c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/docker/docker/daemon/update_windows.go b/vendor/github.com/docker/docker/daemon/update_windows.go new file mode 100644 index 0000000..0146626 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go new file mode 100644 index 0000000..10cf787 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes.go @@ -0,0 +1,303 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + dockererrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the Engine API +func volumeToAPIType(v volume.Volume) *types.Volume { + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + } + if v, ok := v.(volume.DetailedVolume); ok { + tv.Labels = v.Labels() + tv.Options = v.Options() + tv.Scope = v.Scope() + } + + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + defer func() { + // clean up the container mountpoints once return with error + if retErr != nil { + for _, m := range mountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + } + } + }() + + // 1. Read already configured mount points. + for destination, point := range container.MountPoints { + mountPoints[destination] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Spec: m.Spec, + CopyData: false, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + // #10618 + _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] + if binds[bind.Destination] || tmpfsExists { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if bind.Type == mounttypes.TypeVolume { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + if bind.Driver == volume.DefaultDriverName { + setBindModeIfNull(bind) + } + } + + binds[bind.Destination] = true + mountPoints[bind.Destination] = bind + } + + for _, cfg := range hostConfig.Mounts { + mp, err := volume.ParseMountSpec(cfg) + if err != nil { + return dockererrors.NewBadRequestError(err) + } + + if binds[mp.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) + } + + if mp.Type == mounttypes.TypeVolume { + var v volume.Volume + if cfg.VolumeOptions != nil { + var driverOpts map[string]string + if cfg.VolumeOptions.DriverConfig != nil { + driverOpts = cfg.VolumeOptions.DriverConfig.Options + } + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) + } else { + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) + } + if err != nil { + return err + } + + if err := label.Relabel(mp.Source, container.MountLabel, false); err != nil { + return err + } + mp.Volume = v + mp.Name = v.Name() + mp.Driver = v.DriverName() + + // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow + if cv, ok := v.(interface { + CachedPath() string + }); ok { + mp.Source = cv.CachedPath() + } + } + + binds[mp.Destination] = true + mountPoints[mp.Destination] = mp + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} + +func backportMountSpec(container *container.Container) error { + for target, m := range container.MountPoints { + if m.Spec.Type != "" { + // if type is set on even one mount, no need to migrate + return nil + } + if m.Name != "" { + m.Type = mounttypes.TypeVolume + m.Spec.Type = mounttypes.TypeVolume + + // make sure this is not an anyonmous volume before setting the spec source + if _, exists := container.Config.Volumes[target]; !exists { + m.Spec.Source = m.Name + } + if container.HostConfig.VolumeDriver != "" { + m.Spec.VolumeOptions = &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{Name: container.HostConfig.VolumeDriver}, + } + } + if strings.Contains(m.Mode, "nocopy") { + if m.Spec.VolumeOptions == nil { + m.Spec.VolumeOptions = &mounttypes.VolumeOptions{} + } + m.Spec.VolumeOptions.NoCopy = true + } + } else { + m.Type = mounttypes.TypeBind + m.Spec.Type = mounttypes.TypeBind + m.Spec.Source = m.Source + if m.Propagation != "" { + m.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: m.Propagation, + } + } + } + + m.Spec.Target = m.Destination + if !m.RW { + m.Spec.ReadOnly = true + } + } + return container.ToDiskLocking() +} + +func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { + localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return fmt.Errorf("can't retrieve local volume driver: %v", err) + } + vols, err := localVolumeDriver.List() + if err != nil { + return fmt.Errorf("can't retrieve local volumes: %v", err) + } + + for _, v := range vols { + name := v.Name() + _, err := daemon.volumes.Get(name) + if err != nil { + logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) + } + + err = fn(v) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unit_test.go b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go new file mode 100644 index 0000000..450d17f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/volume" +) + +func TestParseVolumesFrom(t *testing.T) { + cases := []struct { + spec string + expID string + expMode string + fail bool + }{ + {"", "", "", true}, + {"foobar", "foobar", "rw", false}, + {"foobar:rw", "foobar", "rw", false}, + {"foobar:ro", "foobar", "ro", false}, + {"foobar:baz", "", "", true}, + } + + for _, c := range cases { + id, mode, err := volume.ParseVolumesFrom(c.spec) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) + } + continue + } + + if id != c.expID { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) + } + if mode != c.expMode { + t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go new file mode 100644 index 0000000..29dffa9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -0,0 +1,219 @@ +// +build !windows + +// TODO(amitkris): We need to split this file for solaris. + +package daemon + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/pkg/errors" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + // TODO: tmpfs mounts should be part of Mountpoints + tmpfsMounts := make(map[string]bool) + tmpfsMountInfo, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + for _, m := range tmpfsMountInfo { + tmpfsMounts[m.Destination] = true + } + for _, m := range c.MountPoints { + if tmpfsMounts[m.Destination] { + continue + } + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + path, err := m.Setup(c.MountLabel, rootUID, rootGID) + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: string(m.Propagation), + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": string(m.Propagation), + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) { + if bind.Mode == "" { + bind.Mode = "z" + } +} + +// migrateVolume links the contents of a volume created pre Docker 1.7 +// into the location expected by the local driver. +// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. +// It preserves the volume json configuration generated pre Docker 1.7 to be able to +// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. +func migrateVolume(id, vfs string) error { + l, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + newDataPath := l.(*local.Root).DataPath(id) + fi, err := os.Stat(newDataPath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if fi != nil && fi.IsDir() { + return nil + } + + return os.Symlink(vfs, newDataPath) +} + +// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. +// It reads the container configuration and creates valid mount points for the old volumes. +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + // Inspect old structures only when we're upgrading from old versions + // to versions >= 1.7 and the MountPoints has not been populated with volumes data. + type volumes struct { + Volumes map[string]string + VolumesRW map[string]bool + } + cfgPath, err := container.ConfigPath() + if err != nil { + return err + } + f, err := os.Open(cfgPath) + if err != nil { + return errors.Wrap(err, "could not open container config") + } + defer f.Close() + var cv volumes + if err := json.NewDecoder(f).Decode(&cv); err != nil { + return errors.Wrap(err, "could not decode container config") + } + + if len(container.MountPoints) == 0 && len(cv.Volumes) > 0 { + for destination, hostPath := range cv.Volumes { + vfsPath := filepath.Join(daemon.root, "vfs", "dir") + rw := cv.VolumesRW != nil && cv.VolumesRW[destination] + + if strings.HasPrefix(hostPath, vfsPath) { + id := filepath.Base(hostPath) + v, err := daemon.volumes.CreateWithRef(id, volume.DefaultDriverName, container.ID, nil, nil) + if err != nil { + return err + } + if err := migrateVolume(id, hostPath); err != nil { + return err + } + container.AddMountPointWithVolume(destination, v, true) + } else { // Bind mount + m := volume.MountPoint{Source: hostPath, Destination: destination, RW: rw} + container.MountPoints[destination] = &m + } + } + return container.ToDisk() + } + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil { + return err + } + + // mountVolumes() seems to be called for temporary mounts + // outside the container. Soon these will be unmounted with + // lazy unmount option and given we have mounted the rbind, + // all the submounts will propagate if these are shared. If + // daemon is running in host namespace and has / as shared + // then these unmounts will propagate and unmount original + // mount as well. So make all these mounts rprivate. + // Do not use propagation property of volume as that should + // apply only when mounting happen inside the container. + if err := mount.MakeRPrivate(dest); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_windows.go b/vendor/github.com/docker/docker/daemon/volumes_windows.go new file mode 100644 index 0000000..bf7fc47 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_windows.go @@ -0,0 +1,47 @@ +// +build windows + +package daemon + +import ( + "sort" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +// setupMounts configures the mount points for a container by appending each +// of the configured mounts on the container to the OCI mount structure +// which will ultimately be passed into the oci runtime during container creation. +// It also ensures each of the mounts are lexographically sorted. + +// BUGBUG TODO Windows containerd. This would be much better if it returned +// an array of runtime spec mounts, not container mounts. Then no need to +// do multiple transitions. + +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mnts []container.Mount + for _, mount := range c.MountPoints { // type is volume.MountPoint + if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { + return nil, err + } + s, err := mount.Setup(c.MountLabel, 0, 0) + if err != nil { + return nil, err + } + + mnts = append(mnts, container.Mount{ + Source: s, + Destination: mount.Destination, + Writable: mount.RW, + }) + } + + sort.Sort(mounts(mnts)) + return mnts, nil +} + +// setBindModeIfNull is platform specific processing which is a no-op on +// Windows. +func setBindModeIfNull(bind *volume.MountPoint) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go new file mode 100644 index 0000000..2dab22e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/wait.go @@ -0,0 +1,32 @@ +package daemon + +import ( + "time" + + "golang.org/x/net/context" +) + +// ContainerWait stops processing until the given container is +// stopped. If the container is not found, an error is returned. On a +// successful stop, the exit code of the container is returned. On a +// timeout, an error is returned. If you want to wait forever, supply +// a negative duration for the timeout. +func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return -1, err + } + + return container.WaitStop(timeout) +} + +// ContainerWaitWithContext returns a channel where exit code is sent +// when container stops. Channel can be cancelled with a context. +func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return container.WaitWithContext(ctx) +} diff --git a/vendor/github.com/docker/docker/daemon/workdir.go b/vendor/github.com/docker/docker/daemon/workdir.go new file mode 100644 index 0000000..5bd0d0c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/workdir.go @@ -0,0 +1,21 @@ +package daemon + +// ContainerCreateWorkdir creates the working directory. This is solves the +// issue arising from https://github.com/docker/docker/issues/27545, +// which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix +// was too expensive in terms of performance on Windows. Instead, +// https://github.com/docker/docker/pull/28514 introduces this new functionality +// where the builder calls into the backend here to create the working directory. +func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { + container, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(container) + if err != nil { + return err + } + defer daemon.Unmount(container) + rootUID, rootGID := daemon.GetRemappedUIDGID() + return container.SetupWorkingDirectory(rootUID, rootGID) +} diff --git a/vendor/github.com/docker/docker/distribution/config.go b/vendor/github.com/docker/docker/distribution/config.go new file mode 100644 index 0000000..bfea8b0 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/config.go @@ -0,0 +1,241 @@ +package distribution + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore reference.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStore manages layers. + LayerStore PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + RootFSFromConfig([]byte) (*image.RootFS, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + + // fail immediately on windows + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + return unmarshalledConfig.RootFS, nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProviderFromStore returns a layer provider backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { + return &storeLayerProvider{ + ls: ls, + } +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go new file mode 100644 index 0000000..b8cf9fb --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/errors.go @@ -0,0 +1,159 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/reference" + "github.com/pkg/errors" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + var newErr error + switch v.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + newErr = errors.Errorf("repository %s not found: does not exist or no pull access", ref.Name()) + case v2.ErrorCodeManifestUnknown: + newErr = errors.Errorf("manifest for %s not found", ref.String()) + case v2.ErrorCodeNameUnknown: + newErr = errors.Errorf("repository %s not found", ref.Name()) + } + if newErr != nil { + logrus.Infof("Translating %q to %q", err, newErr) + return newErr + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return err +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 0000000..a1f02a6 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 0000000..beec19a --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 0000000..b107de3 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 0000000..05ba4f8 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,75 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 0000000..f262d4d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go new file mode 100644 index 0000000..5568865 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go @@ -0,0 +1,83 @@ +package metadata + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/layer" +) + +func TestV1IDService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "v1-id-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + v1IDService := NewV1IDService(metadataStore) + + testVectors := []struct { + registry string + v1ID string + layerID layer.DiffID + }{ + { + registry: "registry1", + v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", + layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + }, + { + registry: "registry2", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + }, + { + registry: "registry1", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + }, + } + + // Set some associations + for _, vec := range testVectors { + err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + layerID, err := v1IDService.Get(vec.v1ID, vec.registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != vec.layerID { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test Get on a nonexistent entry + _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != testVectors[1].layerID { + t.Fatal("Get returned incorrect layer ID") + } +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 0000000..02d1b4a --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC return true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes([]byte(buf))), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go new file mode 100644 index 0000000..7b0ecb1 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go @@ -0,0 +1,115 @@ +package metadata + +import ( + "encoding/hex" + "io/ioutil" + "math/rand" + "os" + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestV2MetadataService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + V2MetadataService := NewV2MetadataService(metadataStore) + + tooManyBlobSums := make([]V2Metadata, 100) + for i := range tooManyBlobSums { + randDigest := randomDigest() + tooManyBlobSums[i] = V2Metadata{Digest: randDigest} + } + + testVectors := []struct { + diffID layer.DiffID + metadata []V2Metadata + }{ + { + diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + }, + }, + { + diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, + }, + }, + { + diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + metadata: tooManyBlobSums, + }, + } + + // Set some associations + for _, vec := range testVectors { + for _, blobsum := range vec.metadata { + err := V2MetadataService.Add(vec.diffID, blobsum) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + metadata, err := V2MetadataService.GetMetadata(vec.diffID) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + expectedMetadataEntries := len(vec.metadata) + if expectedMetadataEntries > 50 { + expectedMetadataEntries = 50 + } + if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test GetMetadata on a nonexistent entry + _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Test GetDiffID on a nonexistent entry + _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) + if err != nil { + t.Fatalf("error calling Add: %v", err) + } + diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) + if err != nil { + t.Fatalf("error calling GetDiffID: %v", err) + } + if diffID != testVectors[1].diffID { + t.Fatal("GetDiffID returned incorrect diffID") + } +} + +func randomDigest() digest.Digest { + b := [32]byte{} + for i := 0; i < len(b); i++ { + b[i] = byte(rand.Intn(256)) + } + d := hex.EncodeToString(b[:]) + return digest.Digest("sha256:" + d) +} diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go new file mode 100644 index 0000000..a0acfe5 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,200 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := ValidateRepoName(repoInfo.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Errorf("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != reference.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go new file mode 100644 index 0000000..f44ed4f --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -0,0 +1,368 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) + + tagged, isTagged := ref.(reference.NamedTagged) + + repoData, err := p.session.GetRepositoryData(p.repoInfo) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + if isTagged { + return fmt.Errorf("Error: image %s:%s not found", p.repoInfo.RemoteName(), tagged.Tag()) + } + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + } + // Unexpected HTTP error + return err + } + + logrus.Debug("Retrieving the tag list") + var tagsList map[string]string + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return err + } + + if p.config.ReferenceStore != nil { + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 0000000..88807ed --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,878 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + + layerDownload, err := ld.open(ctx) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debug("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier, err = digest.NewDigestVerifier(ld.digest) + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, fmt.Errorf("target is %s", configClass) + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != reference.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, manifestDigest, nil + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.Layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + } + + descriptors = append(descriptors, layerDescriptor) + } + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + ) + + // https://github.com/docker/docker/issues/24766 - Err on the side of caution, + // explicitly blocking images intended for linux from the Windows daemon. On + // Windows, we do this before the attempt to download, effectively serialising + // the download slightly slowing it down. We have to do it this way, as + // chances are the download of layers itself would fail due to file names + // which aren't suitable for NTFS. At some point in the future, if a similar + // check to block Windows images being pulled on Linux is implemented, it + // may be necessary to perform the same type of serialisation. + if runtime.GOOS == "windows" { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + } + + if p.config.DownloadManager != nil { + downloadRootFS := *image.NewRootFS() + rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + if configJSON != nil { + // Already received the config + return "", "", err + } + select { + case err = <-errChan: + return "", "", err + default: + cancel() + select { + case <-configChan: + case <-errChan: + } + return "", "", err + } + } + if release != nil { + defer release() + } + + downloadedRootFS = &rootFS + } + + if configJSON == nil { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { + select { + case configJSON := <-configChan: + rootfs, err := s.RootFSFromConfig(configJSON) + if err != nil { + return nil, nil, err + } + return configJSON, rootfs, nil + case err := <-errChan: + return nil, nil, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", "", errors.New("no supported platform found in manifest list") + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + id, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + id, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_test.go b/vendor/github.com/docker/docker/distribution/pull_v2_test.go new file mode 100644 index 0000000..b745642 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_test.go @@ -0,0 +1,183 @@ +package distribution + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/reference" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "invalid parent ID in the base layer of the image") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + expectedDigest, err := reference.ParseNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + if err != nil { + t.Fatal("could not parse reference") + } + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 0000000..45a7a0c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go new file mode 100644 index 0000000..aefed86 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go @@ -0,0 +1,49 @@ +// +build windows + +package distribution + +import ( + "net/http" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + if len(ld.src.URLs) == 0 { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) + } + + var ( + err error + rsc distribution.ReadSeekCloser + ) + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go new file mode 100644 index 0000000..d35bdb1 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -0,0 +1,186 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", repoInfo.Name()) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Errorf("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go new file mode 100644 index 0000000..257ac18 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v1.go @@ -0,0 +1,463 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := imageID.Digest().Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + var dgst digest.Digest + dgst, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + imgID = image.IDFromDigest(dgst) + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[digest.Digest]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + imgID := image.IDFromDigest(association.ID) + tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) + + if _, present := imagesSeen[association.ID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { + ics, ok := p.config.ImageStore.(*imageConfigStore) + if !ok { + return nil, fmt.Errorf("only image store images supported for v1 push") + } + img, err := ics.Store.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + pl, err := p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, pl) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + + // V1 push is deprecated, only support existing layerstore layers + lsl, ok := pl.(*storeLayer) + if !ok { + return nil, fmt.Errorf("only layer store layers supported for v1 push") + } + l := lsl.Layer + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + l.Release() + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 0000000..1f8c822 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,697 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest digest.Digest + Size int +} + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", ref.String()) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", ref.String(), err) + } + + l, err := p.config.LayerStore.Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo, + ref: p.ref, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(rootfs.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.FullName() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository if we have a mapping with it + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, false, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.WithName(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", namedRef.String(), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // TODO (brianbland): We need to construct a reference where the Name is + // only the full remote name, so clean this up when distribution has a + // richer reference package + remoteRef, err := distreference.WithName(namedRef.RemoteName()) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", namedRef.RemoteName(), namedRef.RemoteName()) + continue + } + + canonicalRef, err := distreference.WithDigest(distreference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + if len(mountCandidate.SourceRepository) > 0 && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + // upload the blob + desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) + if err != nil { + return desc, err + } + + return desc, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.New() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.FullName() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.FullName() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.FullName() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireReigstryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || repoInfo.Hostname() != sourceRepo.Hostname() { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.FullName() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.FullName()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + // make sure to add docker.io/ prefix to the path + named, err := reference.ParseNamed(path) + if err == nil { + path = named.FullName() + } + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2_test.go b/vendor/github.com/docker/docker/distribution/push_v2_test.go new file mode 100644 index 0000000..6a5216b --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2_test.go @@ -0,0 +1,579 @@ +package distribution + +import ( + "net/http" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" +) + +func TestGetRepositoryMountCandidates(t *testing.T) { + for _, tc := range []struct { + name string + hmacKey string + targetRepo string + maxCandidates int + metadata []metadata.V2Metadata + candidates []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item not matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("key", "dgst", "127.0.0.1/repo")}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, + candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, + }, + { + name: "allow missing SourceRepository", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + candidates: []metadata.V2Metadata{}, + }, + { + name: "handle docker.io", + targetRepo: "user/app", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, + {Digest: digest.Digest("2"), SourceRepository: "app"}, + }, + candidates: []metadata.V2Metadata{ + {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("2"), SourceRepository: "app"}, + }, + }, + { + name: "sort more items", + hmacKey: "abcd", + targetRepo: "127.0.0.1/foo/bar", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + taggedMetadata("hash", "1", "hello-world"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + taggedMetadata("abcd", "3", "busybox"), + taggedMetadata("hash", "4", "busybox"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"), + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + // then by longest matching prefix + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + // sort the rest of the matching items in reversed order + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + }, + }, + { + name: "limit max candidates", + hmacKey: "abcd", + targetRepo: "user/app", + maxCandidates: 3, + metadata: []metadata.V2Metadata{ + taggedMetadata("abcd", "1", "user/app1"), + taggedMetadata("abcd", "2", "user/app/base"), + taggedMetadata("hash", "3", "user/app"), + taggedMetadata("abcd", "4", "127.0.0.1/user/app"), + taggedMetadata("hash", "5", "user/foo"), + taggedMetadata("hash", "6", "app/bar"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "2", "user/app/base"), + taggedMetadata("abcd", "1", "user/app1"), + // then by longest matching prefix + taggedMetadata("hash", "3", "user/app"), + }, + }, + } { + repoInfo, err := reference.ParseNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + candidates := getRepositoryMountCandidates(repoInfo, []byte(tc.hmacKey), tc.maxCandidates, tc.metadata) + if len(candidates) != len(tc.candidates) { + t.Errorf("[%s] got unexpected number of candidates: %d != %d", tc.name, len(candidates), len(tc.candidates)) + } + for i := 0; i < len(candidates) && i < len(tc.candidates); i++ { + if !reflect.DeepEqual(candidates[i], tc.candidates[i]) { + t.Errorf("[%s] candidate %d does not match expected: %#+v != %#+v", tc.name, i, candidates[i], tc.candidates[i]) + } + } + for i := len(candidates); i < len(tc.candidates); i++ { + t.Errorf("[%s] missing expected candidate at position %d (%#+v)", tc.name, i, tc.candidates[i]) + } + for i := len(tc.candidates); i < len(candidates); i++ { + t.Errorf("[%s] got unexpected candidate at position %d (%#+v)", tc.name, i, candidates[i]) + } + } +} + +func TestLayerAlreadyExists(t *testing.T) { + for _, tc := range []struct { + name string + metadata []metadata.V2Metadata + targetRepo string + hmacKey string + maxExistenceChecks int + checkOtherRepositories bool + remoteBlobs map[digest.Digest]distribution.Descriptor + remoteErrors map[digest.Digest]error + expectedDescriptor distribution.Descriptor + expectedExists bool + expectedError error + expectedRequests []string + expectedAdditions []metadata.V2Metadata + expectedRemovals []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxExistenceChecks: 3, + checkOtherRepositories: true, + }, + { + name: "single not existent metadata", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + expectedRequests: []string{"pear"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "access denied", + targetRepo: "busybox", + maxExistenceChecks: 1, + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + remoteErrors: map[digest.Digest]error{digest.Digest("apple"): distribution.ErrAccessDenied}, + expectedError: nil, + expectedRequests: []string{"apple"}, + }, + { + name: "not matching reposies", + targetRepo: "busybox", + maxExistenceChecks: 3, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + }, + { + name: "check other repositories", + targetRepo: "busybox", + maxExistenceChecks: 10, + checkOtherRepositories: true, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + expectedRequests: []string{"plum", "pear", "apple", "orange", "banana"}, + }, + { + name: "find existing blob", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + }, + { + name: "find existing blob with different hmac", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{SourceRepository: "docker.io/library/busybox", Digest: digest.Digest("apple"), HMAC: "dummyhmac"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "overwrite media types", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + hmacKey: "key", + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple"), MediaType: "custom-media-type"}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "apple", "docker.io/library/busybox")}, + }, + { + name: "find existing blob among many", + targetRepo: "127.0.0.1/myapp", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("someotherkey", "pear", "127.0.0.1/myapp"), + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + taggedMetadata("", "plum", "127.0.0.1/myapp"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "plum", "pear"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "pear", "127.0.0.1/myapp")}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + {Digest: digest.Digest("plum"), SourceRepository: "127.0.0.1/myapp"}, + }, + }, + { + name: "reach maximum existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedExists: false, + expectedRequests: []string{"banana", "plum", "apple"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + }, + }, + { + name: "zero allowed existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 0, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + }, + { + name: "stat single digest just once", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + taggedMetadata("key1", "pear", "docker.io/library/busybox"), + taggedMetadata("key2", "apple", "docker.io/library/busybox"), + taggedMetadata("key3", "apple", "docker.io/library/busybox"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "pear"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{taggedMetadata("key3", "apple", "docker.io/library/busybox")}, + }, + { + name: "don't stop on first error", + targetRepo: "user/app", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("key", "banana", "docker.io/user/app"), + taggedMetadata("key", "orange", "docker.io/user/app"), + taggedMetadata("key", "plum", "docker.io/user/app"), + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrAccessDenied}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {}}, + expectedError: nil, + expectedRequests: []string{"plum", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "plum", "docker.io/user/app"), + taggedMetadata("key", "banana", "docker.io/user/app"), + }, + }, + { + name: "remove outdated metadata", + targetRepo: "docker.io/user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrBlobUnknown}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("plum"): {}}, + expectedExists: false, + expectedRequests: []string{"orange"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}}, + }, + { + name: "missing SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + maxExistenceChecks: 3, + expectedExists: false, + expectedRequests: []string{"2", "3", "1"}, + }, + + { + name: "with and without SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("3")}, + }, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("1"): {Digest: digest.Digest("1")}}, + maxExistenceChecks: 3, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("1"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"2", "3", "1"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("1"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + } { + repoInfo, err := reference.ParseNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + repo := &mockRepo{ + t: t, + errors: tc.remoteErrors, + blobs: tc.remoteBlobs, + requests: []string{}, + } + ctx := context.Background() + ms := &mockV2MetadataService{} + pd := &v2PushDescriptor{ + hmacKey: []byte(tc.hmacKey), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)}, + checkedDigests: make(map[digest.Digest]struct{}), + } + + desc, exists, err := pd.layerAlreadyExists(ctx, &progressSink{t}, layer.EmptyLayer.DiffID(), tc.checkOtherRepositories, tc.maxExistenceChecks, tc.metadata) + + if !reflect.DeepEqual(desc, tc.expectedDescriptor) { + t.Errorf("[%s] got unexpected descriptor: %#+v != %#+v", tc.name, desc, tc.expectedDescriptor) + } + if exists != tc.expectedExists { + t.Errorf("[%s] got unexpected exists: %t != %t", tc.name, exists, tc.expectedExists) + } + if !reflect.DeepEqual(err, tc.expectedError) { + t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) + } + + if len(repo.requests) != len(tc.expectedRequests) { + t.Errorf("[%s] got unexpected number of requests: %d != %d", tc.name, len(repo.requests), len(tc.expectedRequests)) + } + for i := 0; i < len(repo.requests) && i < len(tc.expectedRequests); i++ { + if repo.requests[i] != tc.expectedRequests[i] { + t.Errorf("[%s] request %d does not match expected: %q != %q", tc.name, i, repo.requests[i], tc.expectedRequests[i]) + } + } + for i := len(repo.requests); i < len(tc.expectedRequests); i++ { + t.Errorf("[%s] missing expected request at position %d (%q)", tc.name, i, tc.expectedRequests[i]) + } + for i := len(tc.expectedRequests); i < len(repo.requests); i++ { + t.Errorf("[%s] got unexpected request at position %d (%q)", tc.name, i, repo.requests[i]) + } + + if len(ms.added) != len(tc.expectedAdditions) { + t.Errorf("[%s] got unexpected number of additions: %d != %d", tc.name, len(ms.added), len(tc.expectedAdditions)) + } + for i := 0; i < len(ms.added) && i < len(tc.expectedAdditions); i++ { + if ms.added[i] != tc.expectedAdditions[i] { + t.Errorf("[%s] added metadata at %d does not match expected: %q != %q", tc.name, i, ms.added[i], tc.expectedAdditions[i]) + } + } + for i := len(ms.added); i < len(tc.expectedAdditions); i++ { + t.Errorf("[%s] missing expected addition at position %d (%q)", tc.name, i, tc.expectedAdditions[i]) + } + for i := len(tc.expectedAdditions); i < len(ms.added); i++ { + t.Errorf("[%s] unexpected metadata addition at position %d (%q)", tc.name, i, ms.added[i]) + } + + if len(ms.removed) != len(tc.expectedRemovals) { + t.Errorf("[%s] got unexpected number of removals: %d != %d", tc.name, len(ms.removed), len(tc.expectedRemovals)) + } + for i := 0; i < len(ms.removed) && i < len(tc.expectedRemovals); i++ { + if ms.removed[i] != tc.expectedRemovals[i] { + t.Errorf("[%s] removed metadata at %d does not match expected: %q != %q", tc.name, i, ms.removed[i], tc.expectedRemovals[i]) + } + } + for i := len(ms.removed); i < len(tc.expectedRemovals); i++ { + t.Errorf("[%s] missing expected removal at position %d (%q)", tc.name, i, tc.expectedRemovals[i]) + } + for i := len(tc.expectedRemovals); i < len(ms.removed); i++ { + t.Errorf("[%s] removed unexpected metadata at position %d (%q)", tc.name, i, ms.removed[i]) + } + } +} + +func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { + meta := metadata.V2Metadata{ + Digest: digest.Digest(dgst), + SourceRepository: sourceRepo, + } + + meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta) + return meta +} + +type mockRepo struct { + t *testing.T + errors map[digest.Digest]error + blobs map[digest.Digest]distribution.Descriptor + requests []string +} + +var _ distribution.Repository = &mockRepo{} + +func (m *mockRepo) Named() distreference.Named { + m.t.Fatalf("Named() not implemented") + return nil +} +func (m *mockRepo) Manifests(ctc context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + m.t.Fatalf("Manifests() not implemented") + return nil, nil +} +func (m *mockRepo) Tags(ctc context.Context) distribution.TagService { + m.t.Fatalf("Tags() not implemented") + return nil +} +func (m *mockRepo) Blobs(ctx context.Context) distribution.BlobStore { + return &mockBlobStore{ + repo: m, + } +} + +type mockBlobStore struct { + repo *mockRepo +} + +var _ distribution.BlobStore = &mockBlobStore{} + +func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + m.repo.requests = append(m.repo.requests, dgst.String()) + if err, exists := m.repo.errors[dgst]; exists { + return distribution.Descriptor{}, err + } + if desc, exists := m.repo.blobs[dgst]; exists { + return desc, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} +func (m *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + m.repo.t.Fatal("Get() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + m.repo.t.Fatal("Open() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + m.repo.t.Fatal("Put() not implemented") + return distribution.Descriptor{}, nil +} + +func (m *mockBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Create() not implemented") + return nil, nil +} +func (m *mockBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Resume() not implemented") + return nil, nil +} +func (m *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + m.repo.t.Fatal("Delete() not implemented") + return nil +} +func (m *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + m.repo.t.Fatalf("ServeBlob() not implemented") + return nil +} + +type mockV2MetadataService struct { + added []metadata.V2Metadata + removed []metadata.V2Metadata +} + +var _ metadata.V2MetadataService = &mockV2MetadataService{} + +func (*mockV2MetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return nil, nil +} +func (*mockV2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + return "", nil +} +func (m *mockV2MetadataService) Add(diffID layer.DiffID, metadata metadata.V2Metadata) error { + m.added = append(m.added, metadata) + return nil +} +func (m *mockV2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta metadata.V2Metadata) error { + meta.HMAC = metadata.ComputeV2MetadataHMAC(hmacKey, &meta) + m.Add(diffID, meta) + return nil +} +func (m *mockV2MetadataService) Remove(metadata metadata.V2Metadata) error { + m.removed = append(m.removed, metadata) + return nil +} + +type progressSink struct { + t *testing.T +} + +func (s *progressSink) WriteProgress(p progress.Progress) error { + s.t.Logf("progress update: %#+v", p) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go new file mode 100644 index 0000000..95e181d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.FullName() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := distreference.ParseNamed(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/registry_unit_test.go b/vendor/github.com/docker/docker/distribution/registry_unit_test.go new file mode 100644 index 0000000..406de34 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry_unit_test.go @@ -0,0 +1,136 @@ +package distribution + +import ( + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "golang.org/x/net/context" +) + +const secretRegistryToken = "mysecrettoken" + +type tokenPassThruHandler struct { + reached bool + gotToken bool + shouldSend401 func(url string) bool +} + +func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.reached = true + if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { + logrus.Debug("Detected registry token in auth header") + h.gotToken = true + } + if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + } +} + +func testTokenPassThru(t *testing.T, ts *httptest.Server) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + uri, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("could not parse url from test server: %v", err) + } + + endpoint := registry.APIEndpoint{ + Mirror: false, + URL: uri, + Version: 2, + Official: false, + TrimHostname: false, + TLSConfig: nil, + //VersionHeader: "verheader", + } + n, _ := reference.ParseNamed("testremotename") + repoInfo := ®istry.RepositoryInfo{ + Named: n, + Index: ®istrytypes.IndexInfo{ + Name: "testrepo", + Mirrors: nil, + Secure: false, + Official: false, + }, + Official: false, + } + imagePullConfig := &ImagePullConfig{ + Config: Config{ + MetaHeaders: http.Header{}, + AuthConfig: &types.AuthConfig{ + RegistryToken: secretRegistryToken, + }, + }, + Schema2Types: ImageTypes, + } + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + t.Fatal(err) + } + p := puller.(*v2Puller) + ctx := context.Background() + p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + t.Fatal(err) + } + + logrus.Debug("About to pull") + // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above + tag, _ := reference.WithTag(n, "tag_goes_here") + _ = p.pullV2Repository(ctx, tag) +} + +func TestTokenPassThru(t *testing.T) { + handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} + ts := httptest.NewServer(handler) + defer ts.Close() + + testTokenPassThru(t, ts) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if !handler.gotToken { + t.Fatal("Failed to receive registry token") + } +} + +func TestTokenPassThruDifferentHost(t *testing.T) { + handler := new(tokenPassThruHandler) + ts := httptest.NewServer(handler) + defer ts.Close() + + tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v2/" { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + return + } + http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) + })) + defer tsredirect.Close() + + testTokenPassThru(t, tsredirect) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if handler.gotToken { + t.Fatal("Redirect should not forward Authorization header to another host") + } +} diff --git a/vendor/github.com/docker/docker/distribution/utils/progress.go b/vendor/github.com/docker/docker/distribution/utils/progress.go new file mode 100644 index 0000000..ef8ecc8 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/utils/progress.go @@ -0,0 +1,44 @@ +package utils + +import ( + "io" + "net" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// WriteDistributionProgress is a helper for writing progress from chan to JSON +// stream with an optional cancel function. +func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 0000000..7545342 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,452 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager +} + +// SetConcurrency set the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { + return &LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + } +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download_test.go b/vendor/github.com/docker/docker/distribution/xfer/download_test.go new file mode 100644 index 0000000..bc20e1e --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download_test.go @@ -0,0 +1,356 @@ +package xfer + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadConcurrency = 3 + +type mockLayer struct { + layerData bytes.Buffer + diffID layer.DiffID + chainID layer.ChainID + parent layer.Layer +} + +func (ml *mockLayer) TarStream() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil +} + +func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ml *mockLayer) ChainID() layer.ChainID { + return ml.chainID +} + +func (ml *mockLayer) DiffID() layer.DiffID { + return ml.diffID +} + +func (ml *mockLayer) Parent() layer.Layer { + return ml.parent +} + +func (ml *mockLayer) Size() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +type mockLayerStore struct { + layers map[layer.ChainID]*mockLayer +} + +func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) +} + +func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer { + layers := map[layer.ChainID]layer.Layer{} + + for k, v := range ls.layers { + layers[k] = v + } + + return layers +} + +func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { + return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) +} + +func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { + var ( + parent layer.Layer + err error + ) + + if parentID != "" { + parent, err = ls.Get(parentID) + if err != nil { + return nil, err + } + } + + l := &mockLayer{parent: parent} + _, err = l.layerData.ReadFrom(reader) + if err != nil { + return nil, err + } + l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) + l.chainID = createChainIDFromParent(parentID, l.diffID) + + ls.layers[l.chainID] = l + return l, nil +} + +func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { + l, ok := ls.layers[chainID] + if !ok { + return nil, layer.ErrLayerDoesNotExist + } + return l, nil +} + +func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { + return []layer.Metadata{}, nil +} +func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, layer.MountInit, map[string]string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { + return nil, errors.New("not implemented") +} +func (ls *mockLayerStore) GetMountID(string) (string, error) { + return "", errors.New("not implemented") +} + +func (ls *mockLayerStore) Cleanup() error { + return nil +} + +func (ls *mockLayerStore) DriverStatus() [][2]string { + return [][2]string{} +} + +func (ls *mockLayerStore) DriverName() string { + return "mock" +} + +type mockDownloadDescriptor struct { + currentDownloads *int32 + id string + diffID layer.DiffID + registeredDiffID layer.DiffID + expectedDiffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (d *mockDownloadDescriptor) Key() string { + return d.id +} + +// ID returns the ID for display purposes. +func (d *mockDownloadDescriptor) ID() string { + return d.id +} + +// DiffID should return the DiffID for this layer, or an error +// if it is unknown (for example, if it has not been downloaded +// before). +func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { + if d.diffID != "" { + return d.diffID, nil + } + return "", errors.New("no diffID available") +} + +func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { + d.registeredDiffID = diffID +} + +func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { + // The mock implementation returns the ID repeated 5 times as a tar + // stream instead of actual tar data. The data is ignored except for + // computing IDs. + return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) +} + +// Download is called to perform the download. +func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + if d.currentDownloads != nil { + defer atomic.AddInt32(d.currentDownloads, -1) + + if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { + return nil, 0, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming download. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) + } + } + + if d.simulateRetries != 0 { + d.simulateRetries-- + return nil, 0, errors.New("simulating retry") + } + + return d.mockTarStream(), 0, nil +} + +func (d *mockDownloadDescriptor) Close() { +} + +func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { + return []DownloadDescriptor{ + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id1", + expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id3", + expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id4", + expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), + simulateRetries: 1, + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id5", + expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), + }, + } +} + +func TestSuccessfulDownload(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]progress.Progress) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p + } + close(progressDone) + }() + + var currentDownloads int32 + descriptors := downloadDescriptors(¤tDownloads) + + firstDescriptor := descriptors[0].(*mockDownloadDescriptor) + + // Pre-register the first layer to simulate an already-existing layer + l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") + if err != nil { + t.Fatal(err) + } + firstDescriptor.diffID = l.DiffID() + + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("download error: %v", err) + } + + releaseFunc() + + close(progressChan) + <-progressDone + + if len(rootFS.DiffIDs) != len(descriptors) { + t.Fatal("got wrong number of diffIDs in rootfs") + } + + for i, d := range descriptors { + descriptor := d.(*mockDownloadDescriptor) + + if descriptor.diffID != "" { + if receivedProgress[d.ID()].Action != "Already exists" { + t.Fatalf("did not get 'Already exists' message for %v", d.ID()) + } + } else if receivedProgress[d.ID()].Action != "Pull complete" { + t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) + } + + if rootFS.DiffIDs[i] != descriptor.expectedDiffID { + t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) + } + + if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { + t.Fatal("diffID mismatch between rootFS and Registered callback") + } + } +} + +func TestCancelledDownload(t *testing.T) { + ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := downloadDescriptors(nil) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected download to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 0000000..14f1566 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency set the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go b/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go new file mode 100644 index 0000000..6c50ce3 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go @@ -0,0 +1,410 @@ +package xfer + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/pkg/progress" +) + +func TestTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + select { + case <-start: + default: + t.Fatalf("transfer function not started even though concurrency limit not reached") + } + + xfer := NewTransfer() + go func() { + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + val, present := receivedProgress[p.ID] + if present && p.Current <= val { + t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) + } + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start a few transfers + ids := []string{"id1", "id2", "id3"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestConcurrencyLimit(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestInactiveJobs(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + testDone := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(inactive) + <-testDone + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + close(testDone) + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestWatchRelease(t *testing.T) { + ready := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type watcherInfo struct { + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(w watcherInfo) { + first := true + for range w.progressChan { + if first { + close(w.receivedFirstProgress) + } + first = false + } + close(w.progressDone) + } + + // Start a transfer + watchers := make([]watcherInfo, 5) + var xfer Transfer + watchers[0].progressChan = make(chan progress.Progress) + watchers[0].progressDone = make(chan struct{}) + watchers[0].receivedFirstProgress = make(chan struct{}) + xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) + go progressConsumer(watchers[0]) + + // Give it multiple watchers + for i := 1; i != len(watchers); i++ { + watchers[i].progressChan = make(chan progress.Progress) + watchers[i].progressDone = make(chan struct{}) + watchers[i].receivedFirstProgress = make(chan struct{}) + watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) + go progressConsumer(watchers[i]) + } + + // Now that the watchers are set up, allow the transfer goroutine to + // proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, w := range watchers { + <-w.receivedFirstProgress + } + + // Release one watcher every 5ms + for _, w := range watchers { + xfer.Release(w.watcher) + <-time.After(5 * time.Millisecond) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() + + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-xfer.Done() + + for _, w := range watchers { + close(w.progressChan) + <-w.progressDone + } +} + +func TestWatchFinishedTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + // Finish immediately + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + + // Start a transfer + watchers := make([]*Watcher, 3) + var xfer Transfer + xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) + + // Give it a watcher immediately + watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Wait for the transfer to complete + <-xfer.Done() + + // Set up another watcher + watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Release the watchers + for _, w := range watchers { + xfer.Release(w) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() +} + +func TestDuplicateTransfer(t *testing.T) { + ready := make(chan struct{}) + + var xferFuncCalls int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + atomic.AddInt32(&xferFuncCalls, 1) + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type transferInfo struct { + xfer Transfer + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(t transferInfo) { + first := true + for range t.progressChan { + if first { + close(t.receivedFirstProgress) + } + first = false + } + close(t.progressDone) + } + + // Try to start multiple transfers with the same ID + transfers := make([]transferInfo, 5) + for i := range transfers { + t := &transfers[i] + t.progressChan = make(chan progress.Progress) + t.progressDone = make(chan struct{}) + t.receivedFirstProgress = make(chan struct{}) + t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) + go progressConsumer(*t) + } + + // Allow the transfer goroutine to proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, t := range transfers { + <-t.receivedFirstProgress + } + + // Confirm that the transfer function was called exactly once. + if xferFuncCalls != 1 { + t.Fatal("transfer function wasn't called exactly once") + } + + // Release one watcher every 5ms + for _, t := range transfers { + t.xfer.Release(t.watcher) + <-time.After(5 * time.Millisecond) + } + + for _, t := range transfers { + // Now that all watchers have been released, Released() should + // return a closed channel. + <-t.xfer.Released() + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-t.xfer.Done() + } + + for _, t := range transfers { + close(t.progressChan) + <-t.progressDone + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 0000000..ad33983 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,168 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager +} + +// SetConcurrency set the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { + return &LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + } +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload_test.go b/vendor/github.com/docker/docker/distribution/xfer/upload_test.go new file mode 100644 index 0000000..16bd187 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload_test.go @@ -0,0 +1,134 @@ +package xfer + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadConcurrency = 3 + +type mockUploadDescriptor struct { + currentUploads *int32 + diffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (u *mockUploadDescriptor) Key() string { + return u.diffID.String() +} + +// ID returns the ID for display purposes. +func (u *mockUploadDescriptor) ID() string { + return u.diffID.String() +} + +// DiffID should return the DiffID for this layer. +func (u *mockUploadDescriptor) DiffID() layer.DiffID { + return u.diffID +} + +// SetRemoteDescriptor is not used in the mock. +func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { +} + +// Upload is called to perform the upload. +func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if u.currentUploads != nil { + defer atomic.AddInt32(u.currentUploads, -1) + + if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { + return distribution.Descriptor{}, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming upload. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return distribution.Descriptor{}, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) + } + } + + if u.simulateRetries != 0 { + u.simulateRetries-- + return distribution.Descriptor{}, errors.New("simulating retry") + } + + return distribution.Descriptor{}, nil +} + +func uploadDescriptors(currentUploads *int32) []UploadDescriptor { + return []UploadDescriptor{ + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, + } +} + +func TestSuccessfulUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + var currentUploads int32 + descriptors := uploadDescriptors(¤tUploads) + + err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("upload error: %v", err) + } + + close(progressChan) + <-progressDone +} + +func TestCancelledUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := uploadDescriptors(nil) + err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected upload to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 0000000..d2a891c --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,74 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(httputils.UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(httputils.UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 0000000..33f77d3 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" +) diff --git a/vendor/github.com/docker/docker/docs/README.md b/vendor/github.com/docker/docker/docs/README.md new file mode 100644 index 0000000..da93093 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/README.md @@ -0,0 +1,30 @@ +# The non-reference docs have been moved! + + + +The documentation for Docker Engine has been merged into +[the general documentation repo](https://github.com/docker/docker.github.io). + +See the [README](https://github.com/docker/docker.github.io/blob/master/README.md) +for instructions on contributing to and building the documentation. + +If you'd like to edit the current published version of the Engine docs, +do it in the master branch here: +https://github.com/docker/docker.github.io/tree/master/engine + +If you need to document the functionality of an upcoming Engine release, +use the `vnext-engine` branch: +https://github.com/docker/docker.github.io/tree/vnext-engine/engine + +The reference docs have been left in docker/docker (this repo), which remains +the place to edit them. + +The docs in the general repo are open-source and we appreciate +your feedback and pull requests! diff --git a/vendor/github.com/docker/docker/docs/api/v1.18.md b/vendor/github.com/docker/docker/docs/api/v1.18.md new file mode 100644 index 0000000..0db0c0f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.18.md @@ -0,0 +1,2156 @@ +--- +title: "Engine API v1.18" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.18/ +- /reference/api/docker_remote_api_v1.18/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.18/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.18/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpuShares": 0, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.18/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.18/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.18/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.18/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.18/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.18/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.18/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.18/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /v1.18/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.18/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.18/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.18/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.18/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.18/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.18/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.18/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.18/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.18/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Debug": 0, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": 1, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": 1, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": 0, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.18/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.18" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.18/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.18/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.18/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.18/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.18/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + +This might change in the future. + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.19.md b/vendor/github.com/docker/docker/docs/api/v1.19.md new file mode 100644 index 0000000..a1a7280 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.19.md @@ -0,0 +1,2238 @@ +--- +title: "Engine API v1.19" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.19/ +- /reference/api/docker_remote_api_v1.19/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.19/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.19/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `syslog` available options are: `address`. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.19/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.19/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.19/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.19/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.19/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.19/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.19/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.19/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.19/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.19/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.19/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.19/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.19/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.19/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.19/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). This API +returns both `is_trusted` and `is_automated` images. Currently, they +are considered identical. In the future, the `is_trusted` property will +be deprecated and replaced by the `is_automated` property. + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.19/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.19/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.19/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.19/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.19" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.19/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.19/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.19/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.19/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.19/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.20.md b/vendor/github.com/docker/docker/docs/api/v1.20.md new file mode 100644 index 0000000..2532c49 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.20.md @@ -0,0 +1,2391 @@ +--- +title: "Engine API v1.20" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.20/ +- /reference/api/docker_remote_api_v1.20/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.20/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.20/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.20/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.20/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.20/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.20/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.20/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.20/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.20/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.20/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.20/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.20/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.20/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.20/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.20/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.20/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.20/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.20/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.20/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.20/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.20/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.20/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.20/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.20/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.20/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.20/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.20/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.20/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.21.md b/vendor/github.com/docker/docker/docs/api/v1.21.md new file mode 100644 index 0000000..b4f54b7 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.21.md @@ -0,0 +1,2969 @@ +--- +title: "Engine API v1.21" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.21/ +- /reference/api/docker_remote_api_v1.21/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.21/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.21/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "" + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Example request, with size information**: + + GET /v1.21/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.21/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.21/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.21/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.21/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.21/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.21/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.21/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.21/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.21/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.21/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.21/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.21/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.21/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.21/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.21/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.21/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.21/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.21/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.21/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.21/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.21/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.21/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.21/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.21/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.21/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.21/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"pull","id":"busybox:latest","time":1442421700,"timeNano":1442421700598988358} + {"status":"create","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716853979870} + {"status":"attach","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716894759198} + {"status":"start","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716983607193} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.21/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.21/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Status" : "running", + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "" + } + } + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.21/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.21/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.21/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.21/networks HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: `name=[network-names]` , `id=[network-ids]` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.21/networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.21/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + } + ] + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.22.md b/vendor/github.com/docker/docker/docs/api/v1.22.md new file mode 100644 index 0000000..e940813 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.22.md @@ -0,0 +1,3307 @@ +--- +title: "Engine API v1.22" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.22/ +- /reference/api/docker_remote_api_v1.22/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.22/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + } + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + } + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + } + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + } + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.22/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWiiteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `splunk`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.22/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.22/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.22/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update resource configs of one or more containers. + +**Example request**: + + POST /v1.22/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.22/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.22/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.22/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.22/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.22/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.22/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.22/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.22/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.22/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.22/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.22/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.22/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.22/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.22/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.22/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.22/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.22/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.22/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.22/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.22/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.22/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.10.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.22", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.22/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.22/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.22/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.10.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.22/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.22/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.22/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.22/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.22/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.22/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.22/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.22/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.23.md b/vendor/github.com/docker/docker/docs/api/v1.23.md new file mode 100644 index 0000000..e23811b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.23.md @@ -0,0 +1,3424 @@ +--- +title: "Engine API v1.23" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.23/ +- /reference/api/docker_remote_api_v1.23/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.23/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "Exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.23/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWiiteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.23/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.23/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.23/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.23/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + }, + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.23/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.23/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.23/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.23/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.23/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.23/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.23/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.23/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.23/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.23/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.23/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.23/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.23/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.23/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.23/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.23/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.23/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.23/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.23/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.23/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.23/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.11.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.23", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.23/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.23/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.23/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.11.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.23/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.23/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.23/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.23/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.23/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.23/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.23/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.23/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.23/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.24.md b/vendor/github.com/docker/docker/docs/api/v1.24.md new file mode 100644 index 0000000..0cf4e2a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.24.md @@ -0,0 +1,5316 @@ +--- +title: "Engine API v1.24" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.24/ +- /reference/api/docker_remote_api_v1.24/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Errors + +The Engine API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + { + "message": "page not found" + } + +The status codes that are returned for each endpoint are specified in the endpoint documentation below. + +## 3. Endpoints + +### 3.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.24/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "Exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.24/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuPercent": 80, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Sysctls": { "net.ipv4.ip_forward": "1" }, + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "StorageOpt": {}, + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033", + "LinkLocalIPs":["169.254.34.68", "fe80::3468"] + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. This must be a valid RFC 1123 hostname. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuPercent** - An integer value containing the usable percentage of the available CPUs. (Windows daemon only) + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **IOMaximumBandwidth** - Maximum IO absolute rate in terms of IOps. + - **IOMaximumIOps** - Maximum IO absolute rate in terms of bytes per second. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWiiteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **Sysctls** - A list of kernel parameters (sysctls) to set in the container, specified as + `{ : }`, for example: + `{ "net.ipv4.ip_forward": "1" }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **StorageOpt**: Storage driver options per container. Options can be passed in the form + `{"size":"120G"}` + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuPercent": 80, + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "Sysctls": { + "net.ipv4.ip_forward": "1" + }, + "StorageOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.24/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **details** - 1/True/true or 0/False/flase, Show extra details provided to logs. Default `false`. +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.24/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.24/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.24/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + }, + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.24/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.24/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.24/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.24/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.24/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.24/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.24/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 3.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.24/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.24/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.24/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.24/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.24/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.24/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.24/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.24/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.24/images/test/tag?repo=myrepo&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.24/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.24/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search +- **limit** – maximum returned search results +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 3.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.24/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.24/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SecurityOptions": [ + "apparmor", + "seccomp", + "selinux" + ], + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.24/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.12.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.6.3", + "GitCommit": "deadbee", + "Arch": "amd64", + "ApiVersion": "1.24", + "BuildTime": "2016-06-14T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.24/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.24/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following event: + + reload + +**Example request**: + + GET /v1.24/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.12.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` or `daemon` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + - `daemon=`; -- daemon name or id to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.24/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.24/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 3.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.24/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": null, + "Scope": "local" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all volumes that are "dangling" (not in use by a container). When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. + - `driver=` Matches all or part of a volume driver name. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.24/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Driver": "custom" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +**JSON fields in response**: + +Refer to the [inspect a volume](#inspect-a-volume) section or details about the +JSON fields returned in the response. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.24/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +**JSON fields in response**: + +The following fields can be returned in the API response. Empty fields, or +fields that are not supported by the volume's driver may be omitted in the +response. + +- **Name** - Name of the volume. +- **Driver** - Name of the volume driver used by the volume. +- **Mountpoint** - Mount path of the volume on the host. +- **Status** - Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. + The `Status` field is optional, and is omitted if the volume driver does not + support this feature. +- **Labels** - Labels set on the volume, specified as a map: `{"key":"value","key2":"value2"}`. +- **Scope** - Scope describes the level at which the volume exists, can be one of + `global` for cluster-wide or `local` for machine level. The default is `local`. + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.24/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.24/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network id. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.24/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.24/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **403** - operation not supported for pre-defined networks +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such network +- **500** - server error + +### 3.6 Plugins (experimental) + +#### List plugins + +`GET /plugins` + +Returns information about installed plugins. + +**Example request**: + + GET /v1.24/plugins HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } + } +] +``` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Install a plugin + +`POST /plugins/pull?name=` + +Pulls and installs a plugin. After the plugin is installed, it can be enabled +using the [`POST /plugins/(plugin name)/enable` endpoint](#enable-a-plugin). + +**Example request**: + +``` +POST /v1.24/plugins/pull?name=tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. When using +this endpoint to pull a plugin from the registry, the `X-Registry-Auth` header +can be used to include a base64-encoded AuthConfig object. Refer to the [create +an image](#create-an-image) section for more details. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 175 + +[ + { + "Name": "network", + "Description": "", + "Value": [ + "host" + ] + }, + { + "Name": "mount", + "Description": "", + "Value": [ + "/data" + ] + }, + { + "Name": "device", + "Description": "", + "Value": [ + "/dev/cpu_dma_latency" + ] + } +] +``` + +**Query parameters**: + +- **name** - Name of the plugin to pull. The name may include a tag or digest. + This parameter is required. + +**Status codes**: + +- **200** - no error +- **500** - error parsing reference / not a valid repository/tag: repository + name must have at least one component +- **500** - plugin already exists + +#### Inspect a plugin + +`GET /plugins/(plugin name)` + +Returns detailed information about an installed plugin. + +**Example request**: + +``` +GET /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": false, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed + +#### Enable a plugin + +`POST /plugins/(plugin name)/enable` + +Enables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/enable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **500** - plugin is already enabled + +#### Disable a plugin + +`POST /plugins/(plugin name)/disable` + +Disables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/disable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **500** - plugin is already disabled + +#### Remove a plugin + +`DELETE /plugins/(plugin name)` + +Removes a plugin + +**Example request**: + +``` +DELETE /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is active + + + +### 3.7 Nodes + +**Note**: Node operations require the engine to be part of a swarm. + +#### List nodes + + +`GET /nodes` + +List nodes + +**Example request**: + + GET /v1.24/nodes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + nodes list. Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a node + + +`GET /nodes/(id or name)` + +Return low-level information on the node `id` + +**Example request**: + + GET /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Remove a node + + +`DELETE /nodes/(id or name)` + +Remove a node from the swarm. + +**Example request**: + + DELETE /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - 1/True/true or 0/False/false, Force remove a node from the swarm. + Default `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Update a node + + +`POST /nodes/(id)/update` + +Update a node. + +The payload of the `POST` request is the new `NodeSpec` and +overrides the current `NodeSpec` for the specified node. + +If `Availability` or `Role` are omitted, this returns an +error. Any other field omitted resets the current value to either +an empty value or the default cluster-wide value. + +**Example Request** + + POST /v1.24/nodes/24ifsmvkjbyhk/update?version=8 HTTP/1.1 + Content-Type: application/json + + { + "Availability": "active", + "Name": "node-name", + "Role": "manager", + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the node object being updated. This is + required to avoid conflicting writes. + +JSON Parameters: + +- **Annotations** – Optional medata to associate with the node. + - **Name** – User-defined name for the node. + - **Labels** – A map of labels to associate with the node (e.g., + `{"key":"value", "key2":"value2"}`). +- **Role** - Role of the node (worker/manager). +- **Availability** - Availability of the node (active/pause/drain). + + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +### 3.8 Swarm + +#### Inspect swarm + + +`GET /swarm` + +Inspect swarm + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CreatedAt" : "2016-08-15T16:00:20.349727406Z", + "Spec" : { + "Dispatcher" : { + "HeartbeatPeriod" : 5000000000 + }, + "Orchestration" : { + "TaskHistoryRetentionLimit" : 10 + }, + "CAConfig" : { + "NodeCertExpiry" : 7776000000000000 + }, + "Raft" : { + "LogEntriesForSlowFollowers" : 500, + "HeartbeatTick" : 1, + "SnapshotInterval" : 10000, + "ElectionTick" : 3 + }, + "TaskDefaults" : {}, + "Name" : "default" + }, + "JoinTokens" : { + "Worker" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a", + "Manager" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + }, + "ID" : "70ilmkj2f6sp2137c753w2nmt", + "UpdatedAt" : "2016-08-15T16:32:09.623207604Z", + "Version" : { + "Index" : 51 + } + } + +**Status codes**: + +- **200** - no error +- **406** – node is not part of a swarm +- **500** - sever error + +#### Initialize a new swarm + + +`POST /swarm/init` + +Initialize a new swarm. The body of the HTTP response includes the node ID. + +**Example request**: + + POST /v1.24/swarm/init HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "ForceNewCluster": false, + "Spec": { + "Orchestration": {}, + "Raft": {}, + "Dispatcher": {}, + "CAConfig": {} + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 28 + Content-Type: application/json + Date: Thu, 01 Sep 2016 21:49:13 GMT + Server: Docker/1.12.0 (linux) + + "7v2t30z9blmxuhnyo6s4cpenp" + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication, as well as determining + the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an + address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is + used. +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **ForceNewCluster** – Force creation of a new swarm. +- **Spec** – Configuration settings for the new swarm. + - **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. + - **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. + - **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. + - **CAConfig** – Certificate authority configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. + +#### Join an existing swarm + +`POST /swarm/join` + +Join an existing swarm + +**Example request**: + + POST /v1.24/swarm/join HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "RemoteAddrs": ["node1:2377"], + "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to + manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **RemoteAddr** – Address of any manager node already participating in the swarm. +- **JoinToken** – Secret token for joining this swarm. + +#### Leave a swarm + + +`POST /swarm/leave` + +Leave a swarm + +**Example request**: + + POST /v1.24/swarm/leave HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - Boolean (0/1, false/true). Force leave swarm, even if this is the last manager or that it will break the cluster. + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** - server error + +#### Update a swarm + + +`POST /swarm/update` + +Update a swarm + +**Example request**: + + POST /v1.24/swarm/update HTTP/1.1 + + { + "Name": "default", + "Orchestration": { + "TaskHistoryRetentionLimit": 10 + }, + "Raft": { + "SnapshotInterval": 10000, + "LogEntriesForSlowFollowers": 500, + "HeartbeatTick": 1, + "ElectionTick": 3 + }, + "Dispatcher": { + "HeartbeatPeriod": 5000000000 + }, + "CAConfig": { + "NodeCertExpiry": 7776000000000000 + }, + "JoinTokens": { + "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", + "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + } + + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the swarm object being updated. This is + required to avoid conflicting writes. +- **rotateWorkerToken** - Set to `true` (or `1`) to rotate the worker join token. +- **rotateManagerToken** - Set to `true` (or `1`) to rotate the manager join token. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is not part of a swarm +- **500** - server error + +JSON Parameters: + +- **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. +- **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. +- **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. +- **CAConfig** – CA configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. +- **JoinTokens** - Tokens that can be used by other nodes to join the swarm. + - **Worker** - Token to use for joining as a worker. + - **Manager** - Token to use for joining as a manager. + +### 3.9 Services + +**Note**: Service operations require to first be part of a swarm. + +#### List services + + +`GET /services` + +List services + +**Example request**: + + GET /v1.24/services HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Version": { + "Index": 19 + }, + "CreatedAt": "2016-06-07T21:05:51.880065305Z", + "UpdatedAt": "2016-06-07T21:07:29.962229872Z", + "Spec": { + "Name": "hopeful_cori", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + } + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.2/16" + }, + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.3/16" + } + ] + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `label=` + - `name=` + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** – server error + +#### Create a service + +`POST /services/create` + +Create a service. When using this endpoint to create a service using a private +repository from the registry, the `X-Registry-Auth` header must be used to +include a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "web", + "TaskTemplate": { + "ContainerSpec": { + "Image": "nginx:alpine", + "Mounts": [ + { + "ReadOnly": true, + "Source": "web-data", + "Target": "/usr/share/nginx/html", + "Type": "volume", + "VolumeOptions": { + "DriverConfig": { + }, + "Labels": { + "com.example.something": "something-value" + } + } + } + ], + "User": "33" + }, + "Networks": [ + { + "Target": "overlay1" + } + ], + "LogDriver": { + "Name": "json-file", + "Options": { + "max-file": "3", + "max-size": "10M" + } + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + }, + "Resources": { + "Limits": { + "MemoryBytes": 104857600 + }, + "Reservations": { + } + }, + "RestartPolicy": { + "Condition": "on-failure", + "Delay": 10000000000, + "MaxAttempts": 10 + } + }, + "Mode": { + "Replicated": { + "Replicas": 4 + } + }, + "UpdateConfig": { + "Delay": 30000000000, + "Parallelism": 2, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Ports": [ + { + "Protocol": "tcp", + "PublishedPort": 8080, + "TargetPort": 80 + } + ] + }, + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "ID":"ak7w3gjqoa3kuz8xcpnyy0pvl" + } + +**Status codes**: + +- **201** – no error +- **403** - network is not eligible for services +- **406** – node is not part of a swarm +- **409** – name conflicts with an existing object +- **500** - server error + +**JSON Parameters**: + +- **Name** – User-defined name for the service. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers + created as part of the service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type. + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume. + - **Options** - key/value map of driver specific options. + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **LogDriver** - Log configuration for containers created as part of the + service. + - **Name** - Name of the logging driver to use (`json-file`, `syslog`, + `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`). + - **Options** - Driver-specific options. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **NanoCPUs** – CPU limit in units of 10-9 CPU shares. + - **MemoryBytes** – Memory limit in Bytes. + - **Reservation** – Define resources reservation. + - **NanoCPUs** – CPU reservation in units of 10-9 CPU shares. + - **MemoryBytes** – Memory reservation in Bytes. + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **Attempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. + - **FailureAction** - Action to take if an updated task fails to run, or stops running during the + update. Values are `continue` and `pause`. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + + +#### Remove a service + + +`DELETE /services/(id or name)` + +Stop and remove the service `id` + +**Example request**: + + DELETE /v1.24/services/16253994b7c4 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect one or more services + + +`GET /services/(id or name)` + +Return information on the service `id`. + +**Example request**: + + GET /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha HTTP/1.1 + +**Example response**: + + { + "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", + "Version": { + "Index": 95 + }, + "CreatedAt": "2016-06-07T21:10:20.269723157Z", + "UpdatedAt": "2016-06-07T21:10:20.276301259Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.4/16" + } + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Update a service + +`POST /services/(id or name)/update` + +Update a service. When using this endpoint to create a service using a +private repository from the registry, the `X-Registry-Auth` header can be used +to update the authentication information for that is stored for the service. +The header contains a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha/update?version=23 HTTP/1.1 + Content-Type: application/json + + { + "Name": "top", + "TaskTemplate": { + "ContainerSpec": { + "Image": "busybox", + "Args": [ + "top" + ] + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1 + }, + "EndpointSpec": { + "Mode": "vip" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**JSON Parameters**: + +- **Name** – User-defined name for the service. Note that renaming services is not supported. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers created as part of the new + service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume + - **Options** - key/value map of driver specific options + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **CPU** – CPU limit + - **Memory** – Memory limit + - **Reservation** – Define resources reservation. + - **CPU** – CPU reservation + - **Memory** – Memory reservation + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Query parameters**: + +- **version** – The version number of the service object being updated. This is + required to avoid conflicting writes. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +### 3.10 Tasks + +**Note**: Task operations require the engine to be part of a swarm. + +#### List tasks + + +`GET /tasks` + +List tasks + +**Example request**: + + GET /v1.24/tasks HTTP/1.1 + +**Example response**: + + [ + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ], + }, + { + "ID": "1yljwbmlr8er2waf8orvqpwms", + "Version": { + "Index": 30 + }, + "CreatedAt": "2016-06-07T21:07:30.019104782Z", + "UpdatedAt": "2016-06-07T21:07:30.231958098Z", + "Name": "hopeful_cori", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:30.202183143Z", + "State": "shutdown", + "Message": "shutdown", + "ContainerStatus": { + "ContainerID": "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + } + }, + "DesiredState": "shutdown", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.5/16" + ] + } + ] + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a task + + +`GET /tasks/(id)` + +Get details on the task `id` + +**Example request**: + + GET /v1.24/tasks/0kzzo1i0y4jz6027t0k7aezc7 HTTP/1.1 + +**Example response**: + + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – unknown task +- **406** - node is not part of a swarm +- **500** – server error + +## 4. Going further + +### 4.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 4.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 4.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/version-history.md b/vendor/github.com/docker/docker/docs/api/version-history.md new file mode 100644 index 0000000..4363cfb --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/version-history.md @@ -0,0 +1,249 @@ +--- +title: "Engine API version history" +description: "Documentation of changes that have been made to Engine API." +keywords: "API, Docker, rcli, REST, documentation" +--- + + + +## v1.26 API changes + +[Docker Engine API v1.26](https://docs.docker.com/engine/api/v1.26/) documentation + +* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. + +## v1.25 API changes + +[Docker Engine API v1.25](https://docs.docker.com/engine/api/v1.25/) documentation + +* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. +* `GET /version` now returns `MinAPIVersion`. +* `POST /build` accepts `networkmode` parameter to specify network used during build. +* `GET /images/(name)/json` now returns `OsVersion` if populated +* `GET /info` now returns `Isolation`. +* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. +* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. +* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. +* `GET /containers/json` now supports filtering containers by `health` status. +* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. +* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. +* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). +* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. +* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. +* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. +* `GET /networks/` endpoint now correctly returns a list of *all* networks, + instead of the default network if a trailing slash is provided, but no `name` + or `id`. +* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. +* `GET /containers/json` now supports a `is-task` filter to filter + containers that are tasks (part of a service in swarm mode). +* `POST /containers/create` now takes `StopTimeout` field. +* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. +* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. +* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. +* `GET /networks/(name)` now returns field `Created` in response to show network created time. +* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. +* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. +* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. +* `POST /containers/prune` prunes stopped containers. +* `POST /images/prune` prunes unused images. +* `POST /volumes/prune` prunes unused volumes. +* `POST /networks/prune` prunes unused networks. +* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). +* Every API response now includes a `API-Version` header specifying the default API version of the server. +* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. +* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. +* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. +* The `HostConfig` field now includes `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. +* `GET /info` now returns more structured information about security options. +* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. +* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. +* `GET /plugins` list plugins. +* `POST /plugins/pull?name=` pulls a plugin. +* `GET /plugins/(plugin name)` inspect a plugin. +* `POST /plugins/(plugin name)/set` configure a plugin. +* `POST /plugins/(plugin name)/enable` enable a plugin. +* `POST /plugins/(plugin name)/disable` disable a plugin. +* `POST /plugins/(plugin name)/push` push a plugin. +* `POST /plugins/create?name=(plugin name)` create a plugin. +* `DELETE /plugins/(plugin name)` delete a plugin. +* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. +* `GET /images/json` now support a `reference` filter. +* `GET /secrets` returns information on the secrets. +* `POST /secrets/create` creates a secret. +* `DELETE /secrets/{id}` removes the secret `id`. +* `GET /secrets/{id}` returns information on the secret `id`. +* `POST /secrets/{id}/update` updates the secret `id`. + +## v1.24 API changes + +[Docker Engine API v1.24](v1.24.md) documentation + +* `POST /containers/create` now takes `StorageOpt` field. +* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. +* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration + with ContainerD in Docker 1.11. +* `GET /networks` now supports filtering by `label` and `driver`. +* `GET /containers/json` now supports filtering containers by `network` name or id. +* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. +* `POST /containers/create` now returns an HTTP 400 "bad parameter" message + if no command is specified (instead of an HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. +* `GET /events` now supports a `detach` event that is emitted on detaching from container process. +* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. +* `GET /images/json` now supports filters `since` and `before`. +* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. +* `POST /images/(name)/tag` no longer has a `force` query parameter. +* `GET /images/search` now supports maximum returned search results `limit`. +* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. +* API errors are now returned as JSON instead of plain text. +* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. +* `POST /containers//exec` and `POST /exec//start` + no longer expects a "Container" field to be present. This property was not used + and is no longer sent by the docker client. +* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). +* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:`, + to have the container join the PID namespace of an existing container. + +## v1.23 API changes + +[Docker Engine API v1.23](v1.23.md) documentation + +* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. +* `GET /containers/json` returns the mount points for the container. +* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. +* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. +* `POST /containers/(name)/update` now supports updating container's restart policy. +* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). +* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. +* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. +* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. +* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. +* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. +* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. +* `POST /auth` now returns an `IdentityToken` when supported by a registry. +* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. +* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. +* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. +* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. + +## v1.22 API changes + +[Docker Engine API v1.22](v1.22.md) documentation + +* `POST /container/(name)/update` updates the resources of a container. +* `GET /containers/json` supports filter `isolation` on Windows. +* `GET /containers/json` now returns the list of networks of containers. +* `GET /info` Now returns `Architecture` and `OSType` fields, providing information + about the host architecture and operating system type that the daemon runs on. +* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. +* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it + consistent with other date/time values returned by the API. +* `AuthConfig` now supports a `registrytoken` for token based authentication +* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` +* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` + will be cancelled if the HTTP connection making the API request is closed before + the push or pull completes. +* `POST /containers/create` now allows you to set a read/write rate limit for a + device (in bytes per second or IO per second). +* `GET /networks` now supports filtering by `name`, `id` and `type`. +* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `GET /info` now includes the number of containers running, stopped, and paused. +* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. +* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network +* `GET /containers/(id)/json` now returns the `NetworkID` of containers. +* `POST /networks/create` Now supports an options field in the IPAM config that provides options + for custom IPAM plugins. +* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any + are available. +* `GET /networks/` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. + +## v1.21 API changes + +[Docker Engine API v1.21](v1.21.md) documentation + +* `GET /volumes` lists volumes from all volume drivers. +* `POST /volumes/create` to create a volume. +* `GET /volumes/(name)` get low-level information about a volume. +* `DELETE /volumes/(name)` remove a volume with the specified name. +* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. +* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. +* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. +* `GET /containers/(id)/stats` will return networking information respectively for each interface. +* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. +* `POST /build` now optionally takes a serialized map of build-time variables. +* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. +* `GET /events` now supports filtering by image and container labels. +* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. +* `GET /containers/json` will return `ImageID` of the image used by container. +* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. +* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. +* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. +* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, + detailing network settings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, + detailing networksettings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the + badness heuristic. This heuristic selects which processes the OOM killer kills + under out-of-memory conditions. + +## v1.20 API changes + +[Docker Engine API v1.20](v1.20.md) documentation + +* `GET /containers/(id)/archive` get an archive of filesystem content from a container. +* `PUT /containers/(id)/archive` upload an archive of content to be extracted to +an existing directory inside a container's filesystem. +* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` +endpoint which can be used to download files and directories from a container. +* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a +list of additional groups that the container process will run as. + +## v1.19 API changes + +[Docker Engine API v1.19](v1.19.md) documentation + +* When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. +* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. +* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. +* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and +`SwapLimit` are now returned as boolean instead of as an int. In addition, the +end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and +`OomKillDisable`. +* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` +* `POST /build` accepts `cpuperiod` and `cpuquota` options + +## v1.18 API changes + +[Docker Engine API v1.18](v1.18.md) documentation + +* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. +* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. +* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. +* `GET /images/json` added a `RepoDigests` field to include image digest information. +* `POST /build` can now set resource constraints for all containers created for the build. +* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. +* `POST /build` closing the HTTP request cancels the build +* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/vendor/github.com/docker/docker/docs/deprecated.md b/vendor/github.com/docker/docker/docs/deprecated.md new file mode 100644 index 0000000..1298370 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/deprecated.md @@ -0,0 +1,286 @@ +--- +aliases: ["/engine/misc/deprecated/"] +title: "Deprecated Engine Features" +description: "Deprecated Features." +keywords: "docker, documentation, about, technology, deprecate" +--- + + + +# Deprecated Engine Features + +The following list of features are deprecated in Engine. +To learn more about Docker Engine's deprecation policy, +see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy). + + +### Top-level network properties in NetworkSettings + +**Deprecated In Release: v1.13.0** + +**Target For Removal In Release: v1.16** + +When inspecting a container, `NetworkSettings` contains top-level information +about the default ("bridge") network; + +`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, +`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`. + +These properties are deprecated in favor of per-network properties in +`NetworkSettings.Networks`. These properties were already "deprecated" in +docker 1.9, but kept around for backward compatibility. + +Refer to [#17538](https://github.com/docker/docker/pull/17538) for further +information. + +## `filter` param for `/images/json` endpoint +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. + +### `repository:shortid` image references +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references. + +### `docker daemon` subcommand +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +The daemon is moved to a separate binary (`dockerd`), and should be used instead. + +### Duplicate keys with conflicting values in engine labels +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +Duplicate keys with conflicting values have been deprecated. A warning is displayed +in the output, and an error will be returned in the future. + +### `MAINTAINER` in Dockerfile +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +`MAINTAINER` was an early very limited form of `LABEL` which should be used instead. + +### API calls without a version +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +API versions should be supplied to all API calls to ensure compatibility with +future Engine versions. Instead of just requesting, for example, the URL +`/containers/json`, you must now request `/v1.25/containers/json`. + +### Backing filesystem without `d_type` support for overlay/overlay2 +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +The overlay and overlay2 storage driver does not work as expected if the backing +filesystem does not support `d_type`. For example, XFS does not support `d_type` +if it is formatted with the `ftype=0` option. + +Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for +further information. + +### Three arguments form in `docker import` +**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. + +### `-h` shorthand for `--help` + +**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v1.15** + +The shorthand (`-h`) is less common than `--help` on Linux and cannot be used +on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on +`docker create`). For this reason, the `-h` shorthand was not printed in the +"usage" output of subcommands, nor documented, and is now marked "deprecated". + +### `-e` and `--email` flags on `docker login` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v1.14** + +The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. + +### Separator (`:`) of `--security-opt` flag on `docker run` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v1.14** + +The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`. + +### `/containers/(id or name)/copy` endpoint + +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. + +### Ambiguous event fields in API +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. +See the events API documentation for the new format. + +### `-f` flag on `docker tag` +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. + +### HostConfig at API container start +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of +defining it at container creation (`POST /containers/create`). + +### `--before` and `--since` flags on `docker ps` + +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker ps --before` and `docker ps --since` options are deprecated. +Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. + +### `--automated` and `--stars` flags on `docker search` + +**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v1.15** + +The `docker search --automated` and `docker search --stars` options are deprecated. +Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. + +### Driver Specific Log Tags +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Log tags are now generated in a standard way across different logging drivers. +Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and +`fluentd-tag` have been deprecated in favor of the generic `tag` option. + + docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" + +### LXC built-in exec driver +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. + +### Old Command Line Options +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: + + docker daemon -H ... + +The following single-dash (`-opt`) variant of certain command line options +are deprecated and replaced with double-dash options (`--opt`): + + docker attach -nostdin + docker attach -sig-proxy + docker build -no-cache + docker build -rm + docker commit -author + docker commit -run + docker events -since + docker history -notrunc + docker images -notrunc + docker inspect -format + docker ps -beforeId + docker ps -notrunc + docker ps -sinceId + docker rm -link + docker run -cidfile + docker run -dns + docker run -entrypoint + docker run -expose + docker run -link + docker run -lxc-conf + docker run -n + docker run -privileged + docker run -volumes-from + docker search -notrunc + docker search -stars + docker search -t + docker search -trusted + docker tag -force + +The following double-dash options are deprecated and have no replacement: + + docker run --cpuset + docker run --networking + docker ps --since-id + docker ps --before-id + docker search --trusted + +**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The single-dash (`-help`) was removed, in favor of the double-dash `--help` + + docker -help + docker [COMMAND] -help + +### `--run` flag on docker commit + +**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** + +**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor +of the `--changes` flag that allows to pass `Dockerfile` commands. + + +### Interacting with V1 registries + +**Disabled By Default In Release: v1.14** + +**Target For Removal In Release: v1.17** + +Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the +docker daemon from `pull`, `push`, and `login` operations against v1 +registries. Though enabled by default, this signals the intent to deprecate +the v1 protocol. + +Support for the v1 protocol to the public registry was removed in 1.13. Any +mirror configurations using v1 should be updated to use a +[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/). + +### Docker Content Trust ENV passphrase variables name change +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables + +- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE +- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE diff --git a/vendor/github.com/docker/docker/docs/extend/EBS_volume.md b/vendor/github.com/docker/docker/docs/extend/EBS_volume.md new file mode 100644 index 0000000..8c64efa --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/EBS_volume.md @@ -0,0 +1,164 @@ +--- +description: Volume plugin for Amazon EBS +keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume" +title: Volume plugin for Amazon EBS +--- + + + +# A proof-of-concept Rexray plugin + +In this example, a simple Rexray plugin will be created for the purposes of using +it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin. + +The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin). + +To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray) + +## 1. Make a Docker image + +The following is the Dockerfile used to containerize rexray. + +```Dockerfile +FROM debian:jessie +RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates +RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz +RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes +ENTRYPOINT ["rexray"] +CMD ["--help"] +``` + +To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image` +will reference the containerized rexray image. + +## 2. Extract rootfs + +```sh +$ TMPDIR=/tmp/rexray # for the purpose of this example +$ # create container without running it, to extract the rootfs from image +$ docker create --name rexray "$image" +$ # save the rootfs to a tar archive +$ docker export -o $TMPDIR/rexray.tar rexray +$ # extract rootfs from tar archive to a rootfs folder +$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar ) +``` + +## 3. Add plugin configuration + +We have to put the following JSON to `$TMPDIR/config.json`: + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A proof-of-concept EBS plugin (using rexray) for Docker", + "Documentation": "https://github.com/tiborvass/rexray-plugin", + "Entrypoint": [ + "/usr/bin/rexray", "service", "start", "-f" + ], + "Env": [ + { + "Description": "", + "Name": "REXRAY_SERVICE", + "Settable": [ + "value" + ], + "Value": "ebs" + }, + { + "Description": "", + "Name": "EBS_ACCESSKEY", + "Settable": [ + "value" + ], + "Value": "" + }, + { + "Description": "", + "Name": "EBS_SECRETKEY", + "Settable": [ + "value" + ], + "Value": "" + } + ], + "Interface": { + "Socket": "rexray.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "AllowAllDevices": true, + "Capabilities": ["CAP_SYS_ADMIN"], + "Devices": null + }, + "Mounts": [ + { + "Source": "/dev", + "Destination": "/dev", + "Type": "bind", + "Options": ["rbind"] + } + ], + "Network": { + "Type": "host" + }, + "PropagatedMount": "/var/lib/libstorage/volumes", + "User": {}, + "WorkDir": "" +} +``` + +Please note a couple of points: +- `PropagatedMount` is needed so that the docker daemon can see mounts done by the +rexray plugin from within the container, otherwise the docker daemon is not able +to mount a docker volume. +- The rexray plugin needs dynamic access to host devices. For that reason, we +have to give it access to all devices under `/dev` and set `AllowAllDevices` to +true for proper access. +- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`, +`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this +plugin. Ideally other rexray parameters could also be set. + +## 4. Create plugin + +`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin. + +```sh +$ docker plugin ls +ID NAME DESCRIPTION ENABLED +2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false +``` + +## 5. Test plugin + +```sh +$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY` +$ docker plugin enable tiborvass/rexray-plugin +$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume +$ docker volume ls +DRIVER VOLUME NAME +tiborvass/rexray-plugin:latest my-ebs-volume +$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi' +$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi +bye +``` + +## 6. Push plugin + +First, ensure you are logged in with `docker login`. Then you can run: +`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker +image to a registry, to make it available for others to install via +`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`. diff --git a/vendor/github.com/docker/docker/docs/extend/config.md b/vendor/github.com/docker/docker/docs/extend/config.md new file mode 100644 index 0000000..096d2d0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/config.md @@ -0,0 +1,225 @@ +--- +title: "Plugin config" +description: "How develop and use a plugin with the managed plugin system" +keywords: "API, Usage, plugins, documentation, developer" +--- + + + + +# Plugin Config Version 1 of Plugin V2 + +This document outlines the format of the V0 plugin configuration. The plugin +config described herein was introduced in the Docker daemon in the [v1.12.0 +release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b). + +Plugin configs describe the various constituents of a docker plugin. Plugin +configs can be serialized to JSON format with the following media types: + +Config Type | Media Type +------------- | ------------- +config | "application/vnd.docker.plugin.v1+json" + + +## *Config* Field Descriptions + +Config provides the base accessible fields for working with V0 plugin format + in the registry. + +- **`description`** *string* + + description of the plugin + +- **`documentation`** *string* + + link to the documentation about the plugin + +- **`interface`** *PluginInterface* + + interface implemented by the plugins, struct consisting of the following fields + + - **`types`** *string array* + + types indicate what interface(s) the plugin currently implements. + + currently supported: + + - **docker.volumedriver/1.0** + + - **docker.authz/1.0** + + - **`socket`** *string* + + socket is the name of the socket the engine should use to communicate with the plugins. + the socket will be created in `/run/docker/plugins`. + + +- **`entrypoint`** *string array* + + entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint) + +- **`workdir`** *string* + + workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir) + +- **`network`** *PluginNetwork* + + network of the plugin, struct consisting of the following fields + + - **`type`** *string* + + network type. + + currently supported: + + - **bridge** + - **host** + - **none** + +- **`mounts`** *PluginMount array* + + mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) + + - **`name`** *string* + + name of the mount. + + - **`description`** *string* + + description of the mount. + + - **`source`** *string* + + source of the mount. + + - **`destination`** *string* + + destination of the mount. + + - **`type`** *string* + + mount type. + + - **`options`** *string array* + + options of the mount. + +- **`propagatedMount`** *string* + + path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. + This path will be bind-mounted outisde of the plugin rootfs so it's contents + are preserved on upgrade. + +- **`env`** *PluginEnv array* + + env of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the env. + + - **`description`** *string* + + description of the env. + + - **`value`** *string* + + value of the env. + +- **`args`** *PluginArgs* + + args of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the args. + + - **`description`** *string* + + description of the args. + + - **`value`** *string array* + + values of the args. + +- **`linux`** *PluginLinux* + + - **`capabilities`** *string array* + + capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) + + - **`allowAllDevices`** *boolean* + + If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. + + - **`devices`** *PluginDevice array* + + device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) + + - **`name`** *string* + + name of the device. + + - **`description`** *string* + + description of the device. + + - **`path`** *string* + + path of the device. + +## Example Config + +*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A sample volume plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Entrypoint": [ + "/usr/bin/sample-volume-plugin", + "/data" + ], + "Env": [ + { + "Description": "", + "Name": "DEBUG", + "Settable": [ + "value" + ], + "Value": "0" + } + ], + "Interface": { + "Socket": "plugin.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "Capabilities": null, + "AllowAllDevices": false, + "Devices": null + }, + "Mounts": null, + "Network": { + "Type": "" + }, + "PropagatedMount": "/data", + "User": {}, + "Workdir": "" +} +``` diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png b/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png new file mode 100644 index 0000000000000000000000000000000000000000..1a6a6d01d2048fcb975b7d2b025cdfabd4c4f5f3 GIT binary patch literal 45916 zcmd43bx@UE)He)x5RM?JfJle5lt_rQlp>N6($cM@(x9NyQUU^sbT>#NA)(SC4bm-w zNPg=w?&o>uo%!bb^P4&MJ;U}~wXeO`TEAH5*`3>RXU|Zc!N9;cs~|6?8mEj#}{hMznYINBu?&oe6G$A5M8Mr ztp2%?yOZk~NktGSO(E}t`um}Kj_Q>uvLRE#Vv_j#d5(dNzJ`Sk$N2q`=ttiPm#IZt z{P`34KQ{W{zd!!}_0JMA*zHq|{>*G_?Q1M*>9Dh#VM&LRcFg&rSx zqV8j@3jHsWhM6dpm8o)fUw9v_7<@n2sL#1iD7KdpOnpr=U%!@JyZC-ld^Fc=n`(Vf zM)_vzcntLu*&9zJ`cVqiE^F_shw^Vnd;OY=O}q7q&(Hq!q}M^-d*1oauSeN#GtfA# zk7;ij$t%$jUG;sZpjTY`IUxJC_u*EcN>L!KmzLE~zAaVT!R<_nr_a?GDsE&_@Fi0) zP6ZiKT(Jn3mgzU2sByQuS+|h%I9TZ5=Z4@6OjFa=YRMOo=jYgFK8Dl#&*oRJKRBG@ z_I8}AvEDoSIXO@!_2i5&pJ~T8x6MWzY6I`1vQ&e5FZPXG?Gj3b_pTduQ9oglMuoy& zCkj8zwnkoMpS!nhmFGdZ$xQRJ(B*-gYj@1vJ1tQaX2DAceSOW9(*3ISU{|5!cH?dv zt+%?PzS}hR?0d05y!T=k_|2RWuepBSI^6zp?fH3*`3A?ij%ezK^Ig0h-Pc%@lbn96 zj`T%{dwa>naB5Q#iXRr5w1rX=e8P0zWwDv48NsZ6LB_Q#pDb3(sGKAk`rdg(gp5-w zoc&h>qa4-vd*_GWI%B^?2tPE_xU{U2DtlIg`{84{qUgQ#VN{3O#X6@YRZ|m~C5;oz z&gl<<*Df2><)&uZ(j6Yn&FAaieo{^4_T0Wj_Aoi_AbG-jDJ?cpaXVJIGlomUYtLZ3 z(8xd4X{mSFTTob7doH8%1DPk$TFmvwE<2JshbOe6Zrwd;H_7GJ$Ev7d(LNOY{*f`% z`*=I~_U=>zZNAGodH(rc8>fqld3JaPHqKth<O{E%xZw@1v&Si_61$gmMUD=>Y9uD`AKSVZ);YO=~aOVtu2P{+n;3-nzPj= zq^!!r-LRq5JxbRW?`9|`52jQ4bsHROR#xT2suU(rD{t+Dw9#JN`J^&**M3L3o1ONk zTgD);yZhJ9VvqEIL@bVD@X41emc5x_p?8b#E6Dj`VXKRb5&m!w+Q%cmOmXgT&T7cZ zNWHl&&e>YFZ^qct{jSbU-k)lV* zTNPn%C^&YXL{>-d4t5*X^Y{~RrhF+$*>*De@*=JBezPuz6q>?|e%P3k1dY-sr|Py^FNW@{3|+gd_x@e} z!J}b1>N=H074qPY#Tp0x?nch=&9Fk3H65ID0(Di(d&jsWN6G;>XZP$z%RlkjjR`Rl z2ESk2{L!a;alv{ZdrqswEQGy8JxjIK{o!M+kd08K6j68qcEbTZ5%ouF_MvQQ=|Wb6 zxt59cJ#U}n$J5In&e=ansD5+-yLx2noWZ2qVUgodzFz3hN$<(b6*cASb9>)AxzE0_ ziw^U$cx$tX>V&N)a=aDhok?jyQ#tWYfs{G!OQP^><-vOO*^6h$FRu>QKCYN;4Y^2g zNK1cN3^p(y>!OFpj$HLFUMfR6wSn#Qi@bsIUVMLBJhk(|dIn>kJkO<;zhqI?Wc*4& zJhWzeT)+L3#)e|btZms-c6)1}?M~GB1E;#i2Nho@%;t|puZ_{+cV=axNydA|Y*z+y z78*j()BRa$trh76)FMKX-Gq*~q(>oWhX|JDhKbt8A$#>ly9x1pjRM=Jx;t>6o!2Fx z5;8pGf7565aesejr6Xyh{$v8P(U@SiElhC6`+Gaf{7nLHnyW<4B4bq#km9;|J0cNy}-r55^xow;zzj zB^tk6P=EYhjaTzm9BuqY?aw4NeIcI7;7>IWvyRQ6T#jTPRI$fAio%&1dy5V6QJO8T~s7rs(4oNw&7KK${cDNFc4aJ^?&w&$A} zyRzC(u>4)C=$OU%YA)UgN&aZl6f&wI>PDEp@kOIUS;FE6x^? z-S~TE@uMFv@6~x|6VQleex4pjn|8)%9PFvRce0GGcVzIP<|v9pD1^HXtjq1S1ybu8n#%bC+FPfSngyMl&03W^N?PvBi+_( z5*&97vHIivoQ^cO&)c5N(ogZfckzFrU^Z09wcc!@-GaCu@0uBURMexU-S{0-&HjyQ znqsTz_G8Pgs|z0^J_My62;2^MXq&M>p(Y1UV|1#mYa@@C+>{(TpmwMCq z*VVL^o*2&2thSTo{AB(%Yfp>US|?G3GddBmn40vpX~&~qbEGRlP`R-8G&d&9L4wXY zo=TDXwwWbtI-?AIzaUaK;g=cYSEh}ucNJZKPN24>hglm+{pZ@|Q)qMd6VK~9Y&Txh zxFdM9@ z&)XYnrlw-9@T%2^Jh$YqysOeWL}geGVU|=Hzw^FO8RDbcbMkHx822u;FTZf2CbH-i zGVAc*Z25%Qfl*F=#)y|*hC~HSRFUv-Ic_O|2z6cQEC2bl`Y4?$oP=%O4V(u^+{Lys z;2HC`w0C$WLo2ZTvz&H-n55=)nWX<|D|IH5*5HoYH9rlz;&`8a8YwXs+}%pQd&pRQ zAbPwv8_92ed#6>LN$BBxaK!BmNLw-+l)BqW1cYkTc9V5^6ZI)!MPccZ+oEC{LKW79oZM>U8E(-R)DA}A5 zIV7#!Y{DysP}?Z|9s(#k-jRgl<3)$#L-$&18|7p%+f|b`E02t3Nt!rp&N|7e(+?*d zS@J!$Dw;l(t=^$NDDKe}Zv5V!C|uIET04?<5Sh_DJx^&>h1L zIK8w?y%dESx5y$JrRU*tPJv>**U>kBLa{q)bVNdrhM~;VGxiEHj57X`bX_E?XaEb7 zrHgQm*88w0j=eV`Z`iN+((Oc6H?Hq3FUYaWy|!A&t-ikVI@)KyDf({b!7`rYaalX3Equj=e#2 zyz3Re>5s*p@5culoxvCQQO2n0c9vvy-EUjNCSmPwZ@jp_|6^%6dUe^alQ!6R(TBXG zanhva`S}hxspKJD2x4WU6xZWK-HX4AOVmNFs_xuP=!CCq6=6sw3z6|nFs-=$?#5)n z3xaksMm}7_zDh?ENnuEQC0A_kC$+UDFztMr8+V;Ne|^S?j3%yFssUFqS6^ zTLv&bd8wSDQPeSDcf`>B=s?lDqWnxib+rg=vg(@!U$m>fXf2t%DOMg`+8DH3TPqmP zntAd`Z&0bVZbBMM*{tT{t;fA4ujrZ}y=3cuOZ$>gq1EE6!69<5_R+waj!O*==1ts) z!J3Zw;UW_P&N;q?XP%rlTkv`c?Y(&8&+DEhq)B8qcqpuem3wO0uEjwPC_vi)r*fh42NZj=S&#j7%PinPJBl)_@ zTY1AlqP2Oeg&l1*Bq(kUr?ZcqVCsLqPoOoP`YPkyJBrxCbNb0`FP4dNAC&3V^RO?N zwdv~a8C#d-N%fG~Wg8}bZyg_#+o^u#B<1>(m*c15>#&u<4$oFA+7e@f%t~Ebd;W(8 zKQG^PnHDYogo}WRypKNR|HTv+B{qp?mqo3<}UUF>%+W3}v!+a0E z$^ZKHQn}wkCW7Hcl=MX^MWZhZ$=>-k{ktgy_O&}Pl)S$7vQN&OZ_e#E=-heL(}b=HqOsc@BV32x_V&wbFhQ`mPk z(E}?lLnGg1h1_>=^sY#CnFih1^?@EJAk=68m1rJlv9504ugKVTv9?!s*%wJhmCbUf z*4%5`c+lXD^&xOxgJnxq++b%9iqdGhhC<`^NapCmKFdNBxfhdwRXe?IUm8Lm{%u_bi0U2D8)xEB_HOGbkY`631^n91a87 z8ok*X92KA66`tuMj-!0&X{7yw)2#guL-S0kK~s$B8P;{3oo2j+!2$zrH*-7>olOS* zy&S{+(_(^?-Y3^vC=cmOpwG3 zQ3{p&Z33Z!>!3O7P!{k_2L4E6kJ%SS$X$XgRZlr15C_8>7F(Q@9Wu zL6VWDt@ih#gq0_iVr? zg7^pCZpg5WWZ(yS^2_&v(6b{$#&2KQSl{Q|aUPYzFEaW;kLJB&Cijm}GkgtG5kR^Sh!qLnC{QPZ2t{N-GUd=Eb)r^9}DN>495(<;>G?qAQ1$#TmS`Y z3FF^aMpAg&$FEt0KmGR$M5Yjz7xPRc(J51x|Cqf&I7Z97_1u30^p_+gNsLpvLjTc$ z4I-FlNLISy{QeElHN#tx!jBsH`=T+>S}gG1USClAyUsDt0deqVgM9g}{e5qAm@6n7 zHu<`fZ*Et8!H3A_dzTLp0 zXX!4rR2x{JdAwQ_KDV}>)B~GGkCkLYsn~!tMUxzU zID~`?)S3%Iz0%L7>-U<3~m&gnk-Nb>Afk*2&C{j z+PTZDD!#wSs9kCifmD7m-|yci*_6RBuz+1$P)Fo>{A=OCw~xfBC+>=~D;})t7l5`* z9(p`EWZ=yuXg3xPsb1uj$?-|!lhF$MPADYjXixS6_g6xXYYNGFy{GlT+s(Xa^FYHlhMcRn}pjL;ztUac{38b`5xJzDXF`2!QRwD2UHjU!02xKa=l!|beOYEu z;l(P8AE_%%-bxl5pA}aIj`ZD~H&T8^o=^8K`KS~(`r~xMZvEB~&9R(LIVoVa#&E+B zYX8V-AWmG{w5TevdW&vSNzX3oh;G`6^R08&FJM4T2==C%0EKXbOXxBE?NQonauV^O$xeayQs;++Y`Fi{4g(R_qA@UzcjR*bqcc_)IUZh+>ROBY*q zy=N~QF`*xWk5`HH&}Ga1%!sKIT3d&y2FwaV8|7m%irNpJn3lwcRpy4MG}n!Z?2QJw zq|i&y7iLz4uNk zKYMLPiqX9%K#+Fie=W0Kv6y*OU}hlL&9oY7i+biY^@00?t`8|=^9`sIYHsl7Pq$)7 zV$%C39OH4He-b=(V27l|5_2yK4>Q%hlsT9CZ8Wu(6LCXhyTEffbo{e+HbiZ;c6Kn!ckPb`v6 z?b0hD$G1>JOY{5)5mT-A@)UO4Oo@LaWAI^9$U}GYta_kyAb|qRxas$A?W)=8Y)hjR z1wejDrYj{}0tTsm>Zu*U@ehTE9_u)A7^#f7)Yn|pbwyW+d0L1pHW!jGRjMD%Df;qr zc`S6_-Qqdf9ksts_QI0+)?MTG*nXPcD&ON8K0O()cFifd@q*0x;lNk6A9{!O9*0|P zR#6vsTGiPT9rZTK)*qM|xXLm6eip{UXP|h8tM@TkT>Pmh6@-JrsHM~}vFu*G+D95` z)}!TlB4S>Ll_Klo)fFOM5CC3(*DEr9A~Kn!#xm;x#P2{=M$)4Wp}^P37J9t_kK^bC z1oHA2;OLNtZ@!`hM(>QwDT^`6g*xHWhz=)bdbI${9XuoIb?8>G_4vo)o0+_ORjMu4 zW0ggWKp1WT0uW9EO19{^x!vrY9}x(%KQ~%vB;5wwc`#qH_p!UP7jOivRr8+`avmal zk0Ze~&}CE>Xdf-o&(=hXSu&k5X6aue4`35}`Sin}ZiSujne*)5J||*1!Y8am(GCtP6p(s z>}^8PO&_r}huXW=1oJ0KGA^zs0(@v=7OEo_+=p@f(+$#3IQw$2?cv!axE;8SY$?PH876@*NJN5K7F;DadLuIT-8jTRVqJ85cKL~!M6tEJy z5Zpt8y7h_<^;M3C-IxFoBSf0r@ds8MfI-UC3y!BV)bfDUw>-+2Zqqdl!CQmz`b9xI zbv4wy_3BGA%IQ&{zchyp&z)s7#NE_DD$8Y#M?aXT$PAvtrG!11!xxR^i>5q1;?2*`qS_B0Z52 ziQ?2&-N??XQ<$1=frpO0toOhw2XbWg?8Al6?Xc|LH8V}z^-G(@rC655)u8n)z{E-{ z_=L%D5tGB0Xet5Ygyq&+C7#1i_ zE%Z190HY^tiuWess_j>GgE*q4&FK$M;~HWw4;@cO^?29cdR2dC`h`KA$1l_g3sGy_ zXZ)*%a~F>by1W=flsQ$qPw@5=GxDtUhKH_4Jg?s_CuVZU^`nEF8fKVRo8e>6O`+77 zSY3cIiGkYlsR1UNCRKVQ!9T;BN;$N2zH}^%0JHYn-N~-0jIG|A+*z)Ji*qBJ^u2^0 z<1xOw#L90+hH#T<8n;T%NFB&F(za3!vwXVJR6?jy?eXhDwD6(O7}x68u?(Y1*U6=Mo+~~*b7w_W|O1lEpwC6ek*li+0V@pI!Vptc6@7?XQcw8#f8bmLBA8oi# z#+bc#l`z9($cOu;gKDcf9hD&Qj~|INj(wK*u+zk7pL0!wUYJi+SkIA+K7TWikoNLM&35Q_vNnge)(uKj8>bq?aR)yioT0#K z5!A$`zV)fHZv1?X$z~qz@oBBMmduOZsvMRSf}$uM@sOn}xjYgVO*&2)qr9Xj%mi_Q zW9S#{Ur(c#$ z&R4JnkB^TIIzeze^m-OvC3^kJ#?q*d@a3N$q%jy!f(yJRt*pss4p%Y!Tx4Yo_;5Jc zJX#Y(F8=cZ%thbrXRE~*O!RENS3VKg-D;oO5Q_?ZI?wZX*Jg*({)hKngL4l>*8`I< z5i2V7=&Nk7qR9^hDO_2fwRYbK?a6X8AH?0mWa@6GZNBN>Whzo%vz;8*=nP&8}XQ$w6!Siw;ZP<`+ z*gE&&ty{?yC}tZu`mN3$qm7%FYI+21hEwdWQk1ue<41`n_GA5;$Eqfp;;c6NM7bk& z#w$mlFwg^s(EfUTSOj*k&9p9gVhX*)rcZ+vJL<x!Un#tu4m5t{jkG=h0UOt6(V=zo0CZPzLcJa8`e2Qjb(rs8m#xUU2q|WAJ z60PO=_BY*n%y?v+nO@{LsoX>iZiSMK6&5{GXV9UNQ?DBP`ifhyr5f+I)3We%3n)92{igaPb0467W&o`dy%5KLz}{`G!>m+ zpm!o}8jvf!p07LhuKr;CS|gu9|0z!r-CP%+J8XCJ6Rk;T0$*XE(0=--SF(hTT=Z*l z@jY?Xyol)Bo?v)=$GZ8RS0m^kCTY3b9sBW@ZZCGCp?&YhKC%Lt}pFRpOwu z-51QdCIu`gGhWSVqpvrWm6z@n`jMo{QW%|SAw0kA;XhJ*f|BMIwX_wGB1vNByZaj# z`pOeEuoCIFW7HEr^hqX{`tbDZr;I9PtlKD?Iq8}E7YP4=U*qQ(6UEPm_zZh6C&K(9 zNcN%((-@nBex1$d)!?PPNDMHTxUowL`7+%x+K^J_feg?m>RIsRexA|=4%<)dMPFaOdV_kiwr_QLEw^IynLR}Rp++2x?;(Z6%z!8sj! zf^$)S5qtAhK;y%9;yTKIc>qUAIOpZ~i0Hq7-FFxP;KM)?VI9t&wg{N(Gk-(8kY<6a3h)*2o6iSf3DLCE9s=5EDa3?tW2WD7!bMGnA_!VHxc@M(F zqCcaqZ4rhl*PPyEJ}9-R`B`SDP*QOEX801Ll8+cu#HG*Q|06n7iGenH62S7`00T19 zz0KM*{}CPRFp*i|oB!r9T;d&KNQxKg{EzvNzX8XbCrSE``3SlKb1hG5?*1Rqkp)xv zPO6FYzkvv3vMpyg!~P>W&ciY4@^1gbeDuO>Gbkm_{l|Q~V~~)HJcCvq4rxfc8T$>; zl?(ri-9J1?KsYQJT%-LC07UnS0RyV{Y;rV%xOZnkx6F!JK|#Sz94f9<}b z_L_fA)Q)`l(iCdEtd7tpq;2DMI~Wpi#0gGdy-iH=_mZ z_1QFY^KqR+;*b+bq zK*rMPw_j(^0KF;yVBs^@TxV=7zygY`p*&p{jcoM@QTOlizy8q{<8Gs!Euh*q z48W${3r`u-E-^#PYKvmuA~t$=AHYr|z4Yk|eD{OiDJSb^Yv!?o6D!)XFTEY8pm*R< z@;fBv5m5zNN?Dt70sI?tyvoC_-@hpcoB<$DgWxTRcQTAltHUa+if<*8OyNZ`iTFA_ zBcSWu;Z}VY1aw+Y)-cduBGiF1hTvPWT0W)5`f)OWsAujxxBk5~8d0}9q14wdIW!GS z@2-tnEuZdV0fae_F;I@!UE1bP-D;zA>nzc)eZ=Rweox@}_Wnu%hwkK~6)q5fUkK*} zt#TXG-P0+xpx2SO@$L?p)bD^rM@9dv?>2^aaItCUnPxw6V-y9QJ`tS|yGAw>u(5(- zaUG59)VV)?{j5J(lR4WbglpM*m~jpKJAIY;)T*^}Xcqn9_{hk&>AoPgaDu^!KW5*m zI{4OyniA7aGG;{~RT8uAuHPZ}7VNxRJe3EuSQPXPR*7iROSsM0C>#&P)-c+FL%Gk1 z!l|>`nVaOBx&{PNnqi}#D)_Fl4C?C; ztun94*Sif?adZjWp(@x+vQ(vHU_x*3=mltqSr6u_j*uktt zOe&1#`aAEFV?MC6I8!|nKgS%A(#3~FyD4u}B-NwH6Yqr?JNy1jl?T8dT@Ze7e{A3r z!TF|aD$a!gZr2ijCEu@8BG&^{D23Lnl1X;cB-;7(-k}Q%WFY|J#bC(E`!Rp~;i%-p z$m}>?lS_pT_X&YeH=N09@%%bE?gm=(BJbY-l_b%`d2qB|y@ARRq>xfhfhq_~ZpdvO zPo>y#R(8iL$}Qh~@iETB->d!vmnoP|A@=g?M`z!AYf-#FsggSEa4S&zoBxT_Sj^!( zeyGWrt;cNmOj98~89jt+W6xtN_3AuEbjfP|2n$3QnET-Jbt+w+yV8XEzTADo=e?th9G2+s|^GX1gdW8$?xColpkpTwm(f*a%J-OeaB#)8`2 zcuw8&TM$0XD2hPz7GQ8(`zkPd4`jdlz`6CND_s=!+%I)n?pHAToM?G3`KoPv6uX8Z z#Dq(bI${yx)VkVrBL-L(1>jxNJbr!WGik+lreWl$dN3E+lPY%;?jy~j=dG*=I4_vK zR@ieRu297n#IM01+YYo()MY&#voA@u!$n|mj5Ywbs`@d|iNyUM?j{*{?nL+J>E3-X z-^B)Q%a_kcS)PBGCbD;-eDWah&-!72iDmE;E`D+g*hx^q8C}PnV-f!%B9E&sUS@7S;{MS#8`;%pi)&EidcuO z50||#Qs-H{I##7tw>N_)Nu_K`_o?dP!aE4?y>Fy(%IXapv6hAkk`FgO5GsmF_(*)C zn}FE_nd1hSeO3j&K$2v;mh3izHYJ>4B;=|T9m!hx%=KqY+=o>z^Yme}Vx#{fNJUmr zr=HCC9s~2584(~6LcwbsV5t5UOg&cbrV%&Hgqg_wk3~KYtAhko@H*SBf@~z;KR3^A_D=xy^`i zf5PwOWXOWGw0a-k7M+A4D>oIO1J-Ls{#!2$OQ)gozWVuIa#vXTu1F(W7Iq9;E6Tj{T*mWVjC-@Ufm{ET$D(Xvh}Pe;HR_u@3Ue?$nlCO_8x zoXA;T>f8lSUG~zYhe>f>O&^~TXs)e-)D%o5tjbbxM7D-CT@Ju=>1$ce73;xUz;C6< zB(H^J_`dNj? zagBGIsSGoO^M2`wu1n+laRu}P`Ng!!xI9_!Evgyq%z#M}WCZmc@0eSD`s|w!%V2~c z#UY}R+B0OwCVUIEkpjT>=!eU|9Q0rLeE4fSIxO%r9wS|xj&J1_86@X7bW;v)YhoH0FX%P(u4lmK+rdp-Qj<-a&HTXY-CI1*IiJb(GiT(Q@0Dz$3bS zDgkCATZ}^3@Aam+`@(D2cfh@htp&H?I*>i3{RAZ?7dJXgjwY8Ba*8LCQ!=>x%2ABC zo1{RcFgI9H1OYk5j1nm;V=3MZSBS9+a{J(SQn61aQ zg0iXMJ`SOx(5I$ycnPW@&k;vl(xYWIjnkSFh;C)okE*+*wd6m=rW_nVr;N*e>l31y zeII-HB`a(sUFK_>6=QI`l%s#R1Jv_h2?PJn<1eZ`QPk3?lda#$BGmgujqKkuxW$ye(fBSx*=xX6z56JGRdS-_>u%$UcO`uDDKaGXbD6|u`+zE?@- zW=ZDd5b^t7SEXRx|30r8%2*>9338xE(IMRiPzki0dq@dsnP?Ioa;Dl-yPAin!Fs4D z{!$E;mJz?#N+z(a(2*d#bIRXH;sYm=`{)d@5{uCk)TEc#F}>48KtH$f$H}n-e?9{e zIqM@66BBJNj#}U%lWGAE6_2W5nqu*V?cMo(uCq2YoT_4^mPzh{M^o!bnXb zd?Zt3o{=EI7|azf97F%+szyMH=ulcU7FNt#-glyyEM`l-!h9LgLkc6y?YcsY3xiS| zC801Ffz%dky__tU(RZSnO-qVZVNit*2C)}8`Rd@C&37LcUxmRQc;CyzebLWQJY$D|gyL%KwmnJlavmQJWt)S{{JSfBxf3%W?2W2;#*qM1bDh5|OQP(<7)s4LijB-R8w**>n9vlgxW zXt@#qRVt(=c=zE0mESwS$w;!IW;QD~|B@7PLD!NMFcO?XW&<*tA#R{n7D)=dIR zr9dRuqi_EDK4;n*d_nfmd1X)mJkU(gs?nRP&A-(B#vkdE2`7|wy%S16anvY)idws@ z45pC(FJM<$*Z!&-M_YpD`kotm@`3_g4{#bxj|dHqOrA;rZXJj^ldkc@93*6;GFmJn zplGs1anY`rsYD=9#2y{|#FLQ$H{QS^zK0;srz^nt3(P%)CJCh137=X-9rhRtQjY=S zP}$kLGzP5q^?9_fzbKv7?L;B^ftH0TOXtV#=; zi4hsTIfGa*G&H*cQ8ow!Ci^78DBmE68B8`cTK~&`De6ujz)L(?txaD-bjND}>lnkR z7PW?R#IlOzcH`^-fx+APR31vOG^>H^-U@qzS5Pe?t_gPaOuCP>n1l-=&dY8cy+ZX? z1MFe$O?600SBV2V>&G}=6VwcSe$4$bKv{KV!8L*QJZeZ z%-b&}q$E);+Q5o!2pR;aKW??0yuZj2~{ z=k@2e%={}J)Qq_P`fjRGGe29vb=)fbnoaRAt;NlSrGf2Ybp_EHqb~(R_uCtIWt8Sk z0cb2o%tp-Q$gSBnj*|%R8f5kaLt<}-h-(UkY_N0c;z=JWwU2bAehQBihcqKlxN{ngWfFr)Q-rM^E)Dt4D z*L0`8Enjvk`MoM0-n}jKStTmKpM5*{iZ4@O5>tp%J2B_~M$)L8rYc+VKAvo)dCxV3 zwLa$t18N5>-8t~sDthv^ey&V7-Sjbhm(9o^r08^7RTD9Z3l<5TZvZ`p zcuc-tb*rtXuqsJ~QUcNWR62zqGc#rg#}XLa?jO`uMRB+kH}xrCAuuBgpC_&t0FqE-LNHq7`@2h~{*UApL6>4cO8cx>>ohtb0jiuz zG`}~J=5SB>Mn#>Qwuj)arQbMl6HI>&;ioHZjVQFregR^c~IHvSq#vE zggAfb<7|u>xa8!)LubMC`q=?HZE4UqkZs7zOvz=p73Hb8% zAM_^mg`NRL#M&??L&SWJDd`c`mwya>6j~yRQ>Rp3n|R}GfFD!G>yVbe|KuZxwMnah zQjwCJ$s^yS{F^w3eIyuyNY0R4bh;A%FD=&LHdnJw7aJH{_Zj+oO}aBE!~la-(GXRx zMeCQ_XlsMb0_h~7 z{tUWf0C)oh=u|$W!ItC-lGwnbfY%&K@UZ|oa;(~||0dsM60c`&-6@i)l@3PjV7-EL zcgJOQczQej^IiM#k>#vg@3g^127L`Tz^cOt{K89UmHG$)U*k2e4(hZBQkv*}D9RA6 z9Z3COtaDeQu$uYBpSouvEA5Rn!;zyEUS&tpz57eZI zPT!2-R3Y34kE1-|Dgw7$wvGMJ7%|W28DdP`@o4w|mA?(Gp*D#=yZ-%3GF~YSlBJDT z{$}a;lHe6jxMe$i&EKv~shcS<(~(@dS9c%}%c9Lb#WRDqn(xeVUq%=u+eoEjD&oLI zhk(nc^Dm^ovn778%HL?w`GUKSwes}VA5x$g(r}}7Zy^4KxA_`>83lMcH7^b~e>T}$ zAS^IKBa2EZIV;t!+j}#Lh2m7(8zV^_&AKWakkK=*VaduZ_h-#Usx5zzScwqHXcm%| zjHFoW()mrn$>SP!V5>1*b6r>WVx&lG==T#o2YfZP6csQ?9UcnQv?5OO;RI^p^LGI9 zw?}itA`AhLC+WM6!Cydv1SNt|wLt&+t81?Lxq8)_A)$uZ3*dlK9dWBRyWCb&V7sGv zDP8$F8JE@VOQs!Bjv`lS4Mk2j>L&RPTU}6Wi(o*ac#6BnxCsfSwSHJ`p8)w zd-HS!1;!s7?K_($R9j=8Imbh8x@xUUFC-d`P%9bnZyT@NhD^w;6o2ATQ18B-9{*md z^(*}VaK!k#>V(7DYkc@hKGcANBp6GcBb$%tEEa(B=-S6W*%vfL3wg6$E_43+=LPuw z`Pv393gvMXOqkqID#burXT!O>=Xbw7@&-Z($QmF<9{IpP@xyCClyd_ar+GW=Be~~OhouiP^F^on0jxExMvFhDxz-q{zM2Su~` z>ZVwv3>s|Ty}h|CzXNj|hO87%nP1?QfDf=3FN0Ajao>>g^bnZYGd&cNJiaUZn(`;? zzk;t>oNG|856$Up*mz`7V0Xgxvas9RobIi9Xg;U@+yoah(Vvucw^nak!`a@{xP^$y z%!osCx4}FeRxkU_pf}LTnK>Z6`3$J15Z)(qT*?OJGi}@ZKR39c>fJVdGQAhdOS`3rtJ>BA(}xI zyEC{7rH@ED|9Ra>cmVuRuvSX{!(~KVSocgxxxRD8A`1n`XHbuy(3c|ie4V7Oe>AT= z9R>ttObHW`{}5g`f&;mf=l|L2nScO46N!<+i~gHq!X+#q23aOLSRTAO7Vdn4zY~Wg*0a5mpSi-3{XSCPu_;?2F znVA(4zV;}#mumY!W8Hl4pg32*Rzzc@$l)#M-AF1gNC(UI5VB?q92uOqAo@H%0eQ)j zOc=x*$$G@12pTybfaRr`HSda3Ndljx`}7lX(bj2fk|2aX+UkwWmAggS@B!eK z@$ySZ4vqfuw0|YvXu&oJV2MZzAyl_Wt{zkg&hw(!T_4wlT|^JFsj&r{mI;XM7{Cok zhgt9*D4Q=q>yrmQ{vB*fgzdg?%_a9yrty88xkaBI+I!LxBkSLtgsAu|y6M=!_Vw*c zvH_^Ftk55R#}bHLM&OTQz%$Ioo4zezk92TAH5&tbb~|9vbE$$8Zqgv0b0Q%F8UWNL zJb&tC>6B%|4`4{>dmlM7{<>@6$qYC=8fN{BkW!+MieZ?>CE2G8SeOHKo{xV)Op3Zr zfqCKjqf)Sot3e)M2MkOelqr6+r2M_W`{*VT6@h@)&~_`vA*W{@g5DXVrM60F{iVbh zu{^n}fbkP~x(*cRRPSVky37o9-hg#VB%pWgc{VmqOE253&@h$XIfV>JYW!RIeTitr#kVOPt7@NDSJBy(4G!U+5E`I@^ zKo&7JE}3Wz4gpoB7?BvHdQx28AcCL^u0}9r>;|4^`zWD%RRzle{z=h6!e$K++x4!E zx&u4+{iTe7(la)XK)W!7xXA9cJ^u+1KWFy%p(&QiJlDNS2T7$io5?!8XVH&0YVRtI z8^vcCw>($ml(M0MWXt=&{0sjVQAQ9iqQRxf?ieJ5&qP#!1iAvQ&PO+%U^L^_EmfcX zAHYDW+=Z?cqb+Ysz9@|0?724r9Iq)y=$@FF@7s!$!YJ}Tn!5^@n z(v17PaZ&!Yh_k4vWuifuRdk!E(Ik-61O6_cbATOz!k1!;#qiwBsC|7j4vkv_n+VMm zt`6%oMXF#Sbt*j$A&vT53_lD8^iB;tdH)Zz(9#qkDfQ4OnQ4m0<`X6OT@$7A)WF zHWj~43U~sIbE3InP;SU_IK}^wByJ@eer=@ch+;DlbNbr$1LyzX2;y%0=5692lDV4x zI)NdJ&8fQ&8`Mdq;;)?i%74;BOYK0w$O>qCo)?G8l-bG`@67kYiQE+*v1hIdI{=y))L;HhedUl>9VKZ zC7TjT#VA&KXn=Q6e9FdA;?`&SxMY&epN`Jzp+G3E!xR7IB7;geql~uGNaCAtYFQ3PrL)jUeV`^xfSLZOE zviBWe#3dp|T;?1ag&jBqMOxIe9Kb9X&!PV>UEa^trSfzXnwCl^NarjCTD#gM??K!X z+(+QTpQ~C(QllNJso`}Dxwu#)zMGOn6bSp{3!>W>3x-+?TlKU0wjDrTn5~S-)nUSo zJBSqIjl?+7^h>MN!b}Ca0ew8rH)VsDq3tmvJXYx_z5agXvOpR3!d-9ws)GH5r(5!L z@yK>l+ULAxhIS)m>>?JM#)>g{&S@@JxlRb_ox0 zgB5;m`m^#=)^XY7{3FKgmnRGlyib05^O}@sNN+8>?X5MmJ@G}&yvDdY()<{&#EYJ# z5Twau_hk)AF~(LmRBj#8tdgq#0<0l7#-BP+`e?{$o|^C)EiqTlX{Sg=y+y1;R*TvW zxg9)vR~dTM_e1_~-+*j{>&t_ig{fqUqeub)IYJ);da zgx`7uz!V!zP4+R=rghCZ=IQjVL%^#f&!9|TD7(5ssfu-unZBjH3u+LOL>TmQ#n2QN zg2Bcu6Dy>sKJRXhg-YZ)uMYF7YM2WDsnJf5rp(m*LW>>-sKJA4 zcE4H|+l=rDyR60;jYHAVFZc^G;)R7D^3uQ#e*;X}3+*MZgde6mdP2TJ`ecyq3a5<; zJ+R+c-g^vPmUDoH5YGDIg2x)C-g|ZM&agrAR0RBV1_;S|s9i&u(`^W#9zuQ)NqqvO z$26$jA_c591^8wyzsbhZq6wgUiiOm5AsHx5dE^%Y)+#1rAOu;fRsJdDgNS^ch}i>M z_9P7fghCdgty;;i{uUue69L^i24HZT=4Woc2|odF9}ED!nJSFTMB$6jD-;DkkfM)p zI!I~p1dqJqrLtJocYle-%ri1AkkGSaPuHAB_4ro)Kh(W>IMjdpH_SAav5rwuqQTgg z$WmFyzGlr96{2s6P?jOvNTTdZDN9*eM4}``HKG*SktoKRJ(N_BV1fEka{0G!w;_A6|>^ePesiA=o}cnETh1WUiM zx!6w!fTSf7W}~v;&9meRze8{p*%`4pEM@bS{qHJ4AEwqqZC|MAy>r+JNzCX}f@w@n z!$DRZJS&QUu~*+5F+)XafL?HR&j;QgJ!-Sc^j`)q*3sDkBMBXjwbDco3vTm#d|+We zB0iukk$?i~Fsr1En%#B0u?GQ1G2Zw--}D&mAa|1~9JF45ELV)wYCo;E0$InWy|)L= zk~|xvaKD*ju}d)hyPMb_0-~6VK~7e)!Tr0Kc$*0l=7Q@zu=^WgS>_laK5d#jry3)2 zd~~6|eR>bN%9b0hca!!{(hD^}h!*zkjS0&;-CoRz}@JR($N#)Kly=wmjvo%PU`xNflI%Y#~nejihwyS zFzMIaN_`6VB1xTjWtx`wLVEH}vt0*x=1g^d4)HgzQ&yHE(%( ziKV>;A5+8W^+`|NhZ^l;A_Z+f)Q87n2)9pwM=#E5H5*!SOAk$Dok? z%IfQZX75_5XjZ=rt)D+?J!<8_SK*q-kyJj}`=H035G?Yt27KZwC`Q}sCx=Nh_TtHltiqKwBYOO>XOi(dbehjQ< zmxD1JkLVd&f6PX_KQ`iLN0V!$%VtzV*jZpOx3TdOAhzqu3d_2Irk? z*gBnOH|XW|zmEcVeUF48V3m1zUFn?B$n1G3s%0j#F@!myhbU7Kae?sh&V zN@)hrz%-PRw$B&Bi!7X6s|U*lzVK;f7Q zykO|LXR1bQ6>q2UKeS3_oY03D~KH z$S01x^$@&r?XB9u*JQXK5N?$XO8L@KQJA=c$dwR(sej$+rs5hlo?YI6&DX4u{5szaIM9C9O0fJ7o2I{SN)pOqP?SqwKSJe9VOy$Qw}Ko)UDXGRpk zH^7=(fZx_@?FVR>)a;NFxYW6-h_Dg$ThN+VBT2_VG<0#XZ_Wzri?9vg9ze-wJ|$k@ zwnI{PHCgIm_-C`BIc$MD8tV-JkzH~Qe8eW`h{21o_f~9pdOn00L+3wXn-5x}=MY<$ zElsT*e^Y#_#NwP4BKT00i;Tm;5R~V)Fp%^6=;VFg^@1BFV7i7%CZUSY;Y9CaHEN=J zhoyDL)SwcY!HTG$;UPd5)UqnZ_uJt zB7ghNDF(mMqtmn&o4JG7XUL29J*dK7irR*sobzA$bl+4}F2miWj_&N+tW{@rKwIGO zju>#{$=yRq*XzH&)HElfK_W33K$li%y0APIGb~76MN3~>jDoLh#!^JIuQLSph&MP3 zIDBaYpFr`Zoa+q_?;c*>N`2J&N_Ryb5GJG^eAc(J96G$O=-vK}9K2q_5yBIc+3J(2 z_$8{6`N@#|jH9v+^iEY47F}&TNpbW-HL#&dY!ZoH3JjaR_qmv3G!_o&yZT9Gb?rsn-b4g3f zwIC76q*z5TXUf$y2!|BPbqx;`T2JhvNW>8-4 zDfzRT8=nUQD+FdCQt(1B4%OvZ-9*2`-o_Nn@ssa=w*s}@k>C9_buYCof zLGaKsXCwgotj1P@RX{m9zRUY7!!Ni1LePOT_?wyJO@tf(DK_auI70%> znE1j;&&Ybe9`)JaJ>uuJkJqFh0W`)uts0P*l+C&o#Q_YZOcYXCaPo_%lOy>4m!iUr z|8GTwvJV^k(;y%cDZM44mzUnc!1)0np8FvX#R5%JSv>@^A;KGg$m3q4uM1;U+5gi3 zQF#d5U*j9!dVD(wvTM8B2X@NruD-Sky_8lQm*ovB(wUyz#WII{E2iuvoMwt`z%$Yu zu`qATLyHi63&-0FV})iu=P+~JGX}e11uDmn2O_^ux*#*gWZ$EGFbv2?|9W?^1k$vD zf!YYT;l81&W)XWa68JJuhbMt0im@UB13ClypL_2eHNstdk-M!GhCe6-={FH(*U8UJ;;`_7D*ij*2H#r?Bz=+MfYy} zL$W#o(ag!dMn&@A*h(jY&BxT{zy=cD0|T+z4&oew4zD*agvM!9`}gftdK}9eVQMoC zE;($md;tbH3Ol56_z#X^%YXDNyY`|8L~+zgrh_3rVZFFU9}0e&(x-nL@34SW?n_NZ zke-VaLP4Umfz8#c>R^o9gBt+^kPhCv^oFLbH{ej^LsMd_inkyC#xd-lpmh$zlT z%We6=5F##DuT%Z21=w@{+O>ddPp-xD;02%`#wX+ejW+hq=gc4gh#NlRolc?Q3Is>( z?WtGi>OYizP3&ARt{NZ~&x>D!YeHICXMeqJYm6<4X>%&{gS&4r4X`llY=SSJu)T3S z0F0SbY3d1uxt(Tae3q}s@{C3#pQw1{6lzaF?Ok5A(;uf5;SDs^FR}HV;5alXRh2Hiawp^cAWt zwZbbfq1Hh!X1z?#{LR?uRw z(K;l&Wy{;occlBj?JRkIyWWOidUW*8(ZaTl17U!cnbY^^LJDH8i}5kfWTr*CZ?0Ue zp`wPB_gP<7CvN=Ke(v6~px)b}xX2F6_{AzR6r-xw=6`&@OT*;zEVcsWS?R550==R~n zP;d;H;aVI+X2=aP!(aIdzc$@P6bsbqMF>Qc&})!G z0%(E*!@|NyXByp0k`I+`54t=?yuva7W#Hct1=`yOQIk%{9^6)RFt_6 z!HR!hVt@e=)Uve~o^lHooOVG{h&FX$ZjdgRdSUwU;=Oxo&OR&WzcG!v2w)4Xeq%yO z6EESsczeJF&7khbFbm9-D8jjh1U~U0xJ2Go?KimxlaJwXITka+z}tUQ4=cC))fZ+{ z)7OK(@cWcN_1`>0F)OAbFNhbu{?YU_fg>Yfn;Pd|#r<)9kTn`K8R1wGA1q>TtZF@| z*YF9|(uKrCbCugRe+n+4QkM$9t+;q6I{MN{^xEJLo(}`Z5!zFMR$1h?$iWPORMXRs zR-!)KIehNj9UEc5gin*cGI|~8qXeaFpiORAJP6=S{&h+1@Na2Am8*69w8;mF#yYc+ zztR?Et4nZ9#im}xTj}XYIlT=~;dJtoF@0=w_9sIqwuw+1K7So$OoxA;ltE0lsZY+oU5pOHp-`N} z_q{*-lx3Pi!k4v4n`)`cY=Q9*{?rm^{&_HR8SgzlrRGxOdwvtjJZo9y6wKX%Mqlee zLGyL{5yXut0rYhaC;-#{!e!*#6fH*CJmZ}=il=&VS}#L<=I-{|$B#609@ zLNN4a9Tk;*ZjR;7qU&UL*;v8|f4LTP6B9f=cE_l(B(+1g714E2xt-aK0yR_yaO|@t zB4G^P>L6b{2T>YDaovmfd5ey@d+43yoxUC&6J{~s)FFaK zL@@r;iJdu0dfrb?ZpI9PiZ#|l(z>P)brCr1#I2l7 zAc+leJ$*;4J2JU4OqKH!SI58Cyt*S?PB$d}jN5$#C~AZfoqTvhX_b{;#>Olc z#@uhn#QZTIT_2x}ig(mrgEq3a{}5U*kS$OMa1KFOSOFGb$DW5JMGa4Y@q2r{ z7fLnE{|+VaMiJg_!dHpo{T}ZW<8Lx1&a2eU0usnJV2*Z0_I^Lq)7Cd#dx_`U zkkh%@=m?8Z!@0{UD#p(Q(d>(KDW7oz+tcuOsr`J%Gy(_WUPX>9Ime%B-1GR!yE_qF z=Z0S7zTXqltlq_K%@esd31Q~82y28B(SCLFOfq)mIFVrpl5AZrlxD#g(%3WeYqRUe9Acs4r^IvB>3n! zFm&?V8Ur=w_}aPZR~_os6A4cmY}VJCxI?%)L`e%-|2**^KbF)L=c0qoT)6Tnf=Y&i z7w8*kTzqae{lT<+ZOk5JPS(9lZb|4ZDDW$Dul#%!jq*_gx=(IZicpUH6%-r@FZK>_ z$R?3_Tkn@^&9~dzf%I2x2uWXt&y%&-VvRC0@Lp#M3z0q6WefXq(9T}pMjNilWk`g| zme%*>rg#dnHtM0pPH^g&1NNAePigoB8KBsz?Kv`W^Puf3^UL`V=k34f=J&NabKmDzs*#<+-litkb{DXlBTSc~dnKFTKB!ome_B`dmkr(PxpWU9! zoZwc(nN8bp@#-be?AP$&58>=<_!eX7?)fm*l-Ii;Eo|@5H{rHpaUuk%cDL2i%zPY= zRug5LVBe+kE!Bac9VN%kQBKB^gQR7>^H)ng8B-_`!}h?#xf}|T&j?8r>95 zRsT~L(_&sA1b7MS{`IeCU=yO24Ys^qH7pV;cOXFfB9kxUONC#HLq5TPQI_sMjR^pZ zU^?;q${+J$RVPXfAkx^1PyT%lc76=S3CE9%V<4cbubbTflfJy`t&*WtrX_Z}*Z$ZB zqt)hzNPUegk=&u3ep#L7=AXf&-GqzhrWrLi4Oq;b}cGGQ=tM3p+I9W4=By7t2Bh3E&(ZWtM;_Ur<2F|!4p@5wDP@_ zL2eXbnWq)D)yx={NVyCQ4oZM7!44J)AqGP*UKA<`q?=y3wzaYV#%L5;THFRcyJv8l z2~W78GOY+-^Tc;y1zvj$%b;HKN<4P37)S{8_f)hf)E0Ava z31V$wiy9eVh;oi82pK|CAYNra0YC$5^K*&{&mdT0cPRuD&c)JH-ok<+mj~f@=0Ie1 z#~c=UDMH7U4JGwC46Y^68lbkx|H-k<@X;Ewqo+5uk3>9C#8? zllw56l=p;IYbCKCR0i4qywWY%dCLZy7AQwUty>=L8eqm?E+Tx2g7-g5rU7}?B!gCS zNdVa4<7fr?O>a;c`gB@>w6_GvZSy?Xo0AvYl(-T2=$KXkYaaNK|*^BU(S9bU|n@jA6^s(L_hAfocV=Hx((Q!D8VSx}&on zr>Dh!pw;~v8wMhbbjD9uZWno%*uOy!xZ1O)W%P6;;qrVYiWlCo)5F4)7>u(?T1x!^ zm+s7?b>}3wZj8p2)|sNr;UaziQ_s46 zHxHFd^L#n#Ax5U-8y*)e?dSf9MCXNHk3Oip1(#q7>LM>768377G#q1Tro>%X zlnVuFD;|e==xLTDX`(`p(r20K_-So?T3K?El250lyQ0wf#4ZSDUreEwUWgb20w!n% zZ|{L~3LgX?p`J+_hgdeS%O&6CdmxGi&jAk>UV5n^9uE&T1xW~j0{>6U`!tiIejyAoTVde9!uQY}G^z7|spJQnzq$mx!X9-EOH|D}x;j=VpGMu1GdOyaN|=$@)W` z`#C|Q1~=FpBi9e6j2B0O7e{{UA6-FKKDgB$qM$gnZvgs09(x?AYQD*lAEeM8V8 zAsM_2fbry#rsn2W$gOCsYd)BNg6S8mS9y4J^tC9uMfYn8{E^m`o6u@vNzkv`fs+); z$sr@j06gFdU~bAl$>ML$ATkR4lJ5n4W~Xx??XyUzrFa&8oii|9F^6vxTl8`<6Nqp(BF} zS{69y{Pi)2ZP3Zn;%WoVH;LMBQU1ZC4#()8;l%u#A<)`c27w^z%p=k8Fd?Ig zGEPa`(9IU2u)F%T(Y@cxkXz+Ni(x+UB?TPSKhrp z>S+(+K5uvmwD}X11*ZwDGvFN#X3RjK)y{+{{XN3muT0=bMVhu2{@CP|g=XD$9m>9M z8lH|K5i@$!ZCC??+RFYS4v!PNE}O3lzMG;+q3lbm{n4ry7d_Gfd^H&|~*52+U(d_|{lyG$}YiY4>#j!w+au)Voib0J2 z?5^_tnq44<=&L%CeG(=bWXZe?itV`$r+sE{8Qt*Im;%hKl9{>ZXs( z?%D>CQ5$rl>V3N^^>A2<(cH+3GM*EQb!g})zjq`ke^Ua?104XXlnAqO5$ zOOT)z4oEJ^u-WhT*y1)@%Nc+CAzDznUcx4LVSnnjklq&R4ZIhOf4in=uQ?5~A3b-< z1(V;6_)dIK65XY#CjuNFvM(`>Zs`TDqpCN276!>BwCSA8JsgB49(PCX1101nas++g zR2N?fiur|-edW72`?^JZF_rAlcE3S*9&{re^4vowgd(7>Ma|1;i6*$x%6Ygv==yI! z12R8*mDu)FZwF+z-y_&0JctRs3i{BN9zue~QdP5%rvGpZTfL>ABRP+|UJLKKEV<8( z@8Yq+r`*FE^dNY0%^Tcuh*<*51@LI@xzHK{fSz*Hx_fhmE=jcwB1+qM(47e`J4kIkFIBGZ3h*xHLs7Y&Z;@*feUdhxwhsRZ~c zy?ihXk3O-k^U!yMOh?veyJM>b&0eYw$82a|n5$`zp+3uh3{%zCV$XS!;2@$NALID- z-HFB%q#LL=QGw<{^M_7PfhWdst@uIJUbVO%tc({!u0_Mlxq?f=XT{welrgUx;M_z)k#NU)Hc#MYJ1)H|83c!DI=;sw;c{W3*T52s@SQ}VRH87o#Cv%s_IT- zvaZv=^&7-D$jpnN#PAcr&`0ZgvdJ;P2WxnHTIu~Egc*Gz(5ZXBpZ0oH@M&VXy%)zk z*M0b=8FZ-jbv~U8O}!aRV;LmNHbcyc_zVD9tzZ|c=lSsMOW#T~TL1&2H>F82KTTUO zY=RU(OCo?h>j7&5A+`6Wc$JM6=u0Bsg11Hz66UQ|p#t1xo7MFb>yEtf7?@Z|bW@j0 zd!Q5Qx_tEmjJecy$elrmnn@Gkl5}EMg?A%#19qTtG?QKPPjI>kDE{YuWhnr`U=e|(O_v6reEiqb>Y;ER>@|6Y0iPi-MXtx?S{@Szkr7RU z4(&Vyjzk)v&JCPdWi^Azk71ck;Gzr!iY|1g>O;dv4~hW2)@A4ofYHrZ93Nq26jzj5 z?9PASC;9-8=@_B_%B;a)a2bf}lzEU6oxb_IyX2M5Vq4%xs4&^YLlBLM3hqDs+t8<5 z!U0dPtD()@lvs&Hv3+3g+jS8+E`f#@o&dYTx_ygx)J7seVwD9=K`V5S-thbyX9gDm z`a{yZG0=%26*tPz%)SXN9RntJxSnNsLDg({8;PgcaD?1_XU#LX_YyME+zS2Z%d3y$`5X*l6bf#6%g^7(#bRqM~}e$G>PJqqN3D zXwwP&{KeQOkkLB~FgjXQL{$Jpz^>61qK#&p`#@T?>-_YjFYM~<36b;`?;a>k?j1*c zeD>6Z!M^3Q^^1?8>Y)%gil`xu96=RWC>k7>`|mNLdT;gIorM=i4%;E$5XyVr3`bTW{Ha6vPck z(REO{s5Nphd0V@fVjEmO*Il={WTlWJ1f9JA4TE2Nti9{IzZ24qdh&`Pf&IwI3gvHNC4i# za~SbA4U1jsj1Y~lC4Z`=2`KDtPv_-y)`iw}ztG(H+I8zBAhF@=?u*+rgLf=POcY9R zZ42|+Z#Uk)>33;QUMNhVXr^d`;W6SxeY^0*+jO&?1;rsCPeE^**n;Pk@>9iLxVWx*nF z*$)s&S?B4X#T9>h8QMizJ~hCBz4^CZEwQ?rEwNv`sbIatriW&%WD$ZRl&Z(qHNi&Z ze2OXF4W$WyRwN6hO~W%WoPQwh)w5ANKyw!s2=dJZU~G4%Ler!WS2Gd6egLaiKNIJk z!zK`wkOSbl1q6zqjjoSK+JT-wBqAy-&l5|a*#YeWOJOO?x#_>jT$>T*`7=9Mq{3GN}>P3G6$ z!Bkg#V5f#mW zQ!g(Jy|=VBoBJ^PYzCd{BEQLUaHi`UGVz{hOtUAEZcFP0DD&f-6Pf<`;9Pq5;9+O{ z!s;~S;c>&@?TJWUMO7g`-@R)i1*y0Lp3%(GGPy%YfPW!qL zzlN_CIps!lI8Mi3bi*Cgx%K0d-5dk~q8sBy%i-Cp1@&_8!cYnWOO^iDL&b?Sp!d+o zN)Ql`f*?bssj5yfmc%>1&U7-^X&W3}+rY9@%v~@ZL9!@A5u`eI%B4NH67Rg!+|3Y< z!8iPaFD4ZBvKC=Q_~j(wbNar~?{B9dfDx>yx&=~zz%iQ&^rQ%QalkpdWC3_NUtaua zwbIk$V;qo(Aot}EW8y;}$5v)IMBwQz23F@Y!W(Gcu~9#V1vY2|T?75r)3JA;BHIzY zTu<-eNQ`abcwi{B+Og2qP$4dtv)_=j_2BtcOcLU0tsf~ z=}^w&t~OM+*rD&YV~^@t^h5ds zHZcP@L%LZ%cN*!%fti%@fB5rCadJU->LAJ%gl2;QKtWpD5G2WFA=mhR?K^zmkKeMt z`{H`v8%MRgkUw}_+*zhLMy#E|4j|Ff0L^1q2N@Lg9N6B?z+Nx}*|wP|3yt#-fW;C3 z#K1~Jwb*!5ghZ2aA?Dc$~6n2 z?gLLB9`FHlXzT6*FZ~?$rIWw_BGMBGk?E zFr;d%+YZH|7z{X10mDcF=iv6KML_71Ae;G9j!xu#J^_Kx)o9U$F;EuC06qyRD=R;R z1~D1hr;QLX6+HB{=2iOGO%^BDOxMq73vhP?09PjKHb;I-w*?VpqwC^);6i69=xUy7 zeyJb4liWMmK3p5MIPUa*sc`q_wB+DwaRqcLP}BHnX`Jz<&kXvPt z?(6e=6*0LQ$*BGG?OeZe>;Y!Wem}U{O5EXsPJ*JUp`|Fe9%Wu3Z5gZqxE>#Vpe?8H zG=mrpbLqM8QNwn|Dn#K#PR{J?>DXe!;7jy@U9UA-KzA zbJov8CYK|3HaU+ZDS6!^39vu=4=#JRKkIS~AQct{~?k9`l zSdbhWxhF7gux4-p*{Wo|v;w=ZEA2C!Kv{Ib9Y;%uR%@yV(gI=q$+U0#<&XD#KI43J zqJ3D5eI8sh=7ypbSAgUhxbgef_RzlG?*126q24ZsgtSSH;|kpjx^TT1lQnz1vsy^RHBqNoxJ4wCs<($4uC=d>*r3I zS|7$rwaR?0Ptezu3Ff+KDt}KnI07yq(p`2D*m=eDN}_<>Q55cobz%*y=i*{QL{}RI zL)-0=W#8mZ`97YVEZn(z;Jd59k*Yosp#CR$2u{^>rv0XmtDFupfF9J11DO-o=*B!Q z6K;yZW<2>b!oq(^7op}t6Y(#wpUMP$01j`u{;&9plokQc{`{Y2$ZKR&4j91=;fyB} zT^LSv^ixoBe3J4rDmdfDX+dnGUmAy?eo8WhMFL*ECDzNL%8Jg z1elU7A|mP`)ZYQc?1EfoeWOyzDx2WI+W8eOe$WyGXSu+pk@IkX?$76hkGd1t`#(J_ z9`-P&Lr?7C<1+IC1PpoKCO_D72q?;@YRx4;tZnB%mzz_BGvng0Xgf=uZuDMAF%n1B z4!~p!5Mp`3k#{`_(sf!POVAt60-xXT8qr>tLoivs(m?Up#3i_v;-WXd(dUE!?6Iz@ z;*;_`d8J6h3obTO7;zj5(d-LJrJSC|^=S4r1B9OCCrprX$aS|DPchzo_lN>A5%*@m zC}su&2Ez7_j=hyY(msG!U7;hGzCoFWPP397KH4)uYhQ3hgE6MIcpvaa3qnFz&VJ#0 zAyXGXwqyp1uM()dErX~I-Mo_zVSbPjG@ivaF_VYV$BT-Ju%2uMgqa(TH!h$EvsENJ zP$Na|fB5ta$!F4`633@Il>WmJZwDv4C|YMr;Kkz?(CV=#Cn&eEm=TzltX#kxDG%?> z;r7wpHNG-H$2i^&K?DitgzW~r047*VvTCo@$8>_E)^lpPc;16dlQ*u|_eg|w{pE)^ zym(br6=v23-c9pWNO+ioSb-L3QMER90J@BpmeLxJf`14}Ls6s?0014zf;A0E`H1ZT ze=eeE<{*OK9J-Ve({wSB)?psF#}32sP~{l?rdvsztUR}(iLDY7EDGE?6NQXoF!T-I zZ>{37W*^W~a%DBp#uFLmtJmY%94oF@>~_Fb?6SnoA>`ALhdTP~5QnQirqm+Fb11q# zdZGW+%8goikAp^1mYN3)eJ^zuI2`gLDgMkP2^`Lv!$?qClDrs|Y#k1eiK8 zn%O%!bYSTRUF85beA?fU7Ubsgfx~OabSPs!HEh$PVOK?%F3B@iG#SPjsB3YX(yU=E zJMX#C(SuO#Qkun;>*5lz`esb84(kz>U}{3arw-w#L*myl*K56W9ca8?|EB+8JCW%P zz?zO?G?h;C2S%=qu{2=t%dcDs7rHeD44QM7x%i}oJ9sXRLh|k+#=TX+Bu1HZc4F(s zO+FaP(a|EKGbMoZz=zH6bIe&69c_kKIIt2%jn&2O;K0_HQbf8_cpEg;)C8oJe)FmI zDN81>!%AbfP$qaC`uJT_Z$Ds`Alnjb^5j8RO#8D+|*Ihce7U!q>-L zwxCz&jFz^0*QLatl7pP`)sf-==z;{F;5a>eV_w zU4;v6;IJ+Sa$LNd1+gI#E+9e0bXl$tv@_@r(G49CmH$Xu7sx{(h0{gIpf-mHhfNcJ zKp|9c>|*5i^Mkc;%*g2>zwVCU@vN7R{r?t-!%#6qCkr4A47mk6q<{&{0Hs`WI4re& z@;=J%{*HY>OnXDqh~&9wv**i!1)cw9dKiWsXo-M>?}2t#0#=>a{p4;;131nC;!8=8 zI>9jJ0L9gf0u+$C;;%6WC7vQ{LG1pt|Cm6iJ5iX)V!)<)lmBi&_+2n=aQEil@9ZbL_p(j}$j0{a!fiT|#KTtQBNTgRAt7&Z_$_`QL} z^J$<~eFoV7Fnq9R*$3Lyh0;}!`53B$lNd=uV>eh_%i@>}4Iwb4{U2bvuYUx8rCk4Q zk4Dyeps_})RB%ZkjWq-IxE@G#WbX+^yMod1fjj*lEHK6u!rw~b??OOaf7L^f{#@Jo z>B9#Q3*Ma?GXC?8OaOLdXh<{$Kd<1--s3C3e`K35?`8S@{{$DBCp-W#aRyTSfb|$T zZqUMx7L8*gNpm?!CItD>io26WkM@E5=?**fE9eQ(7WwR2X&^WVJEU*J_a=LURecQW zLG9YmWdlo_(7E{j7myJ{Z>;u6KKj$~lv~igix3jhp&|VgTF7m9ZsZR}D8+RQtKqR7 z+bB~@D~|BjH+;VbfrAtXWZe-qSF+Lg&Ejqp&8v$1i4DKK}k$9e>VKq`-%MWnAE2I|F8No!428W8F6 zR%rr_1|ZkU?&@+5mL9Mt1`P}hc+HP@+}}Q*+%LLM|2e|Ag03f^ss(?}lH7Ylg>Hj< zwG+fXqHRw%z=8}8d2LX_5;lP#VQo`9QD;q~Ou|Zx(;5SP=T4m7HHciciDj*lgS+n> zOhTpm@k(cM&I7#n99$DwId7WpEq-?dxbsu?6Td(I3Sc4@ytpUNR;=u}?v=x$XtdgB z!}fqnr{9saUVtQXm*sPCi6i7L_R?F_(*Q*6P|gu+m#(zP+NjJPb8`C|y?VI%GEsbl zp8?qyVRmc=TM7uVG1?uc_A|QeBY(t)l{{3{_=2m4gmZt_fT+JP_Pj7GiMC}fukCrN zlw9^t6RY&nfzM-1mVGDj4U-$O@46+w#dMsA&LV=ce|3|&*zgeG?WSY7%i4xB`unFdx!K!_1T!Qq#&7mKynUuvTvgP@2 z!P0)>m%FBBm6t2J)@Y39N$#}Y^Ma|Zl@bK#8wGrv(D0S$vT1&x?frQYGDm^e_RG zFcBlOOV*-i8FU>yE+gd*J1gDRO33IQa>g*835c%?F;iF^lU)|}m#XLAm56CuF*R#R zWMkWmTl}X?Ry9LpBCg9Zjj6~b3rmn?xOf=d_)Kcd4HlfuA=|e#ey%Ielo`YhdX;1v zJ85w^9AzDV#et$vk^xx#hK?Z;g|#}^lU^W2Bzuw64p=h@E7780`N*+3EkN`LBz^7l`R5N#PdX!R@X zii66%Q{%#m)wzrsfO?9T4-9%%Ybi|O2 z)Ai9Wz1mU*Xn_p0jL)FvbbJTUsB9kXX&>f%+5+S&O3!JKzTmxVL20pJ%MKG zMaw7>JOs_t=$l6%5bPX^&H$)^ku3sBCySt?cS@BvpRSH^6lx~m4&D1==HPL>S3@8VwVIX1+RjKH` zK+bC2`jLO({iEf}^|mvnexRj!xUziw9x!`^o6*ZZucAx^9uck4W%nZz%(^)8&_L0r zoBMRim8_SW%E6*gEwtRr3UX+f8lC|fHH-1n!FkyFw1~D)a;4LywbAPt*&RW-D zZ%_O6zP@nc+kH@EhTr^zbiE-en~13$9}b6`>)N0C9OZSJNSm>-Nb^URt-)@MJ&nsP z`;Mg6AY>cAC<~3<5x&o`-Bd}ELoHQQIQ03a+95=og1z}HkbW{wl5Q(#Km~Yz$2w}2 zTf%vm@r+P}h1$~#u*9qsmi5s-UAKpg9Ycvy1vX^~!3{Z+Xv=-*SIul3%FavK?3rhv z0rRm1piV_@u-m@rrHA$$Lz5y<`!FyCZ5u&BCh;PH6%E=cUF$zm$3YdO zTnahD5uiUtVmIV}(psRO*y`~Z8#Y>a3adLhl9hGrZQjLJ8F8jB>~;WsbZk@nZYt<$ zdc?fNpVe>*SbLU+>V*8pd#BI*gw=Z!RlB+5JZcGz3i#?@#?J?tkA$GS8$Sh5TjxGl z_IrHq%t|tpE150Zx|0icVF&Locixvf|M|YAta|qWl1d`YjIS0vK1XV`&~lca4v;>h zb3LUla4=p28odyo-*CD6Jb<`0pGeTES`A0R;sl@L0GSB)^i&@zuF(doOp2egGu%eq zy}++Ge}@d;Qa_N61^IHe>tFi7+EP)Hr2^{#_k(EC?D>TfEtry>@&LylQ;UYL(%~W4 zWv)lSyJI2uhn~v{r((kCb#PQ2Jk#Lqz zNa!$3#xJ)+SDU$~;*oCnXYk=w{386mnt8R4>r->^c)K!4Kd;{wLW>5xwZL_q;#a|m z)>5|EgEMFlD4>cu?tLq|P-C1{q#!py-CZT)aIV|?V2SDOE@)4@d0RSgey3AYoq9lH zRk}^6;!($i6F;N*&wu*8tifT{ps)1u;@X@>1MWt4Azs|HM!#coP&HY?Fn7Q7wSfM| zn7a>m^jEA4eU1G0z4)&>6puDsg*}ODSm~q`vvwIBpyU=Z*Yp0A8lY0FSQ}kq<3#yO zh6==IPUqNz=D1z9rqSUhZ2B2)7ps>v4LGv}T!o*sJ)b?zTLuS|to1AKjke>dVVhr> zsU*`C8jrQsuTsvlT+&b2S`M^C|&sF~Vo2C^aDi1dA^|r*2%Uhu~ zPUpH5f9*fsmZRJ~SNMsST^I?~56@s8l_u*dL}Tctn5GiJw}kTL!lmWKNG>j>uiVNsv`p%FSK5 zDgGZ(QHrWyDabpmp6DMmG&1@W+8AdEm_TR(?qkrsg3VSezk^l09~M4N-Uk>Sc0~&R zRPd$cp~r38miC<=Gz>bQQ*voV;G4R2Uxi=)z3IeVLw>wMda*QF_6#VZ>?7UB&1;WS zx3TP>en{|KlZIb|mMKu$c)b0s3YG)4Sl(Vp9X@)HeN^VruV${mjawdk%LqS^Tu_iK zC>Gi9KJxs>V@;U{zYq7n6WL^an;7J~cEF=8@pH*}msXDA}UD($H z>!kXRM4Q(fvYpr+uj4cy(j+eWkfm3h5*7L6Q-3YUlENe1Vih$d`hGPWnI0IUs|DTt zv?o&Q{U_V1<{CIq*?Zvw&J3SU*rpd7T@FLJb#K(79~wF9zh^jh*4807_kI1xLwhg_ z9`Pp3xA!O+#@4CkSZ#c-a>sAwZk0LVD{Q~GIw6_btMdGshZQzKAUULO*n+3bwQyU^?Tl%+|yzjMR``uVB_lhL=YYDM1-vofuQSS#;`km_r zGr!{XA8I{<&Jmw}9d7-6MBr|Ej2Iq9rNPT+-5FR=t{n`l<=?{})ikTcv~w4yzvpSMik@;EUO76(hx`uk(x^^oJudl%E-PU#*t+I-M zs$|YIQ5WXstL)$;6?#)8VCr!G`-}csmp9Iez))LhmHb&j;!bjHx}gwM>+pS<#F6T4 za}V=Bk}=VI?75d5xjxWw<$_X708YdU-|tdI&%pxdMvVA?aSB^THd+V22m!y*v+4zY?kG4)Q2#cv5613ZI!Z= zi5SjyYM0dJ-j?$@;ogx4T!qP@#l*8%YW&DIXsk<{DT7O)$wtbb2anQm#8JUVIKjDh zdXp7*=;xK$Psjf-IR0Vgp+X6yZ+%P`{(QbpbRcW_0R2#hwf210>BJM;n%{lBuqhsU zP_)#qDUB=sKHW$^?S@rCN8^kPKSXZe)kO$`r={+Q8Dm3b1I`aH>n`xo;FhOw+?6hn z-wq4g_e~w0Z2rXaF*m_@AE<|ZRZ)9HR+Ma(+3hS|=5tQ!>sL-9XZu%aiqiy5WQN79 zettnmi*v@ezk*ZmHJ(^Yc2{3Vcq#PphG@(>gKaquF|&RjKRmvXbn5K+&6suN$C~)D z6hC4ubk1S(Y3gmS8-6B-UZI<4OT4O^*(G)NxKs?yCeLZ6J$z9+hDd8{d2K>T*l@t1 ze+jwQx_Q!Lq73EJ%G^!I{8{76`V*I8M!CYJU_%o<6{0Tru6ao zfLZARwb|ea-H(=f(Q4sMAm}j=rZ}8*y~*D4GiV{WSF( zG{RCjBWFVBc}?36Xp*mG$-BIr$5*hOXTst#3RovfHGT)})hg%ujE`R;l4sbN>*p}` z_my`g^eJe!7q*&@`NyZ&Q)@d{I13)m>hfJ1vK9)Q_S{b3nu$K$=Ud;!+iJSJ6vEaL zb#}in=Fp7l*`b^>oPk?;cUlG4XgqE41vcvX0log@7nYsk?w6NuXWZ!2pXK=JT!BkJCkk$+sx8VZ! znu2Un)qi9wZaW`)nIJ)`6^0iy#JbC1C@r!gtubb76devNi3FJjOOph?ul#Nb9ZDN> zWno&iV^NVqHd*R3sTdFZ?;=x6D;oc4{g z&H6)sUHz-X=SjJ|7`swDN$h@l@?XyIFjA0&ti1WVJ0yV}25-JM$-ay%CaM|@eV^6yb}|Sc!~cVqyO6Q{^nwcAuFq4kT{?%e`fQ4u{3{2bv{Ja6$yP{|2sy5 zVwQ0D$L1S1sYYzMG~`JEodj=)nv5a<8mIZXA2OBY0ibcsi<3~t4iLnG@>%^G9My3rQ}2vgXj6;DwJ?hbi#iP!Kry(pDXOf4+|yv^0{9 zIQ>`kL}1qeNMe=TA>qGc(H9MZgI9>fNc|AjL3cfQ~JDl zEP(ab`+4e5nerZA`rhO3(}lLUr_8lAigk_Nh|vD13M00RV|mV{6d+lHlN4vbqA~~L z5=xI2gE{IPOc}0U=60)Z1$(R$04@&q7Ic9A7<7IwnhRMKjwlU6u(_#8S`+4-@n?^~ zomc8~vp|;fGv{`+L48UWA}j_#*SpyiELSDa&HB{c-5T5j`(g@zu?C00=m$zDap<^h zLoVr5@00TXeeLV0zyC+Tdn93WJ_q(yY2G&g+HFC*Do;r^E7oI^S2h3E82D6FP#>5A zmZbvdX?VmnG`efSZ`J+%|LW=5t#}G@+>|$LSDvMH*M402Y);0B6m&r6( zOGtyQXz7r0-PW|pamiM0p;;wLE-4c!!Z2j`*RGLDa?_=BZufNRuK9~3Rz2bW8 zC8z^O)}pW?*Kxh3KzMO?Z^%GZJ|F5WHdIPXu*Vsbg-86=etI#yr9ruu0U0`f-=7w2 zP`^wlo6<2Nmn}j7y9qe>UMOffC5TtdKql9{JERjBmbxpW-w)!G)fm8cI$#tUhPDkC zzRkPPanrn>Gvcwf|F#>Zx(&z`3|0{ zZqR#Qvb_>@RhpF|@jf$%XgG~-G=f3I6hKs!69b|aJ|fz74 z`NA{EWP%InF45)F=1ab5LSj zx{P#jvh=3LSNFFviTg1jV(}?stXeK&81e)*EHZ<20Jp}CMN~Dl^R@4OR9Ao|bIy5Y zNkFxr<%Z@#Qan#A>y7W8&?b7C4!x}+adaz$ERA9q8j#WQW%uDKeSKjjT7V5>4+8D~ z(thm1Yv7u*)74ERfLIu_Ej@`xdH_~TkvPE>+gKseg`^n?Uk*zyewgPdDeSTGw_clq zidYf_TYC%a0M8X`pK@Vz#bN||CvsOzG3ws#prLQD>r&CJwl<47{Oa-0RjukHVFG?t zEtq~k-W20mbutk=L-fEw2{37;x=lQpq>7s-GwFmh3Ax%SFVKIx>-V4c_Z~%@T12H(Ar-^1rWx0h24RG zuk%>N1VUWeAqNqf8i5RR(cVLQWJ-#FiO7(IDPec&W32xLYdE#~k0|LzoE%0w<4eBH z>*r@qwN)+Wh#yxR4!>USLZ`GO%Q9bxo2>#k70Y}-6j{bD=j&0|QkmwOL=b|oAh(8W zO-sKmA+&oFxpHaMrsz7TetoTE?%=e5B;mCRuWsI#saWwddqf<35JW`wB)bF*lR(kHd zr{0cl*g^LN2;55T)RwVGffVR7uf#H>u}~i$70ATCZR|+#k$UG|=!z4jFU`rAHG!KU zGraC@f4k2+i^7tKICDe;8DR}X?zI_XByEr9`N6c$nTIulE;th&8>Xr(YkE}AG6)Z? zoH%*qn#B||OTCwK2;Ogv(vVHN`nbX+LU8TjWTmFxt3%_TeM1=ve6c~kzqFLW-B*8# zcaAFwfkFgwXZGW1RGpju9QRbniV`ViW z0?9ptTbPE)Ya@x>JMn=10T_{?+Z$YzOu)Ap#6SCd6sG^1fo!<_ zB2Y%N?(aan&;GDAkVB>QAu3kpzc`no)wZ4u-yp~a*kLP|vkwTi+^?>=rngg%ACD3x zic~IdFfnn@*SwRp2m{T|7;#ZQB#Gi6jmC}x19J$y&;TPCm6ON`vz_hF5=B^DZHT%K z9O@66i*ixgCY?-$mL`V}L&ZTPrcaA(z=ROO7Q-|c+sM)0r8bgQ39r~*htA345(yYnR5`czI#;WUf zbz5I5a~FFHQKnr#|ErraZ()Ct Ol5(_nwX52}O8Ou06wbc@ literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png b/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png new file mode 100644 index 0000000000000000000000000000000000000000..f42108040bbbf9facb9fff0c320428323f061e10 GIT binary patch literal 33505 zcmdSB1yEIg6hBBjN;;%dQfW}S1ZgBh8UaD1rBg{Mr4a;_loDxCN>UIMkWxZGLZm?& zY4%+8xBK7!?9S}Y&d%)4ym98ryZ65PjdRW?&V8hn=q!g+ci8JxciMc^7!KkZ>@vJuSY|M72eJ0*RBf^rxl2wv9?-oiI(lUN6 zFTW}e7?N}zG8p>)$&@ceomwRb_4h-s%3R0l@IfsB4KM!BhYN$k&kUDM4g>j2Tfh;H zC||MCpF=t1ZlOD;t;3&{XH}UwaPy)c z8F$#SHy>}Q*9Dx^o2<#69lQG;Z+3qFK~_~|J8;3M>4C4MCgG9zOY@PkTYBQ(s%(fI zd{4!$4aQdJC#H>uqm_$~U)H-unBT8V;e7}Hxr~4PB?0TMq(oj5kvgBv7d;u021+rs z>~>!YVoC%_c}jH)HAA%CJk(o_6v27QB#zNAf1z_slYPL`xp4U0`{6HzTHRxB9foKK z=|qwivI5PV%p3jmP7nG5JGzTZYVLJ3>{VKIPQ==Lex}Dze6uL=aYXm&+YvM2L8+tF zs^!|6gN^&Ye|^fn`Y=x=;le9-ojNz`nk5t~IjJ?3o zd3weAucUs$)B9aCYpBgreWGMPTv9aX`@GP!&MjX@w()N3Hl3Zv{`_<2uV1gVhG4OK zE`LpARVFIZEsX6+6ISNb={#}WxEtZ>MzTBPz}PQ-v^!!xH)LR*EFX66Vy4Aqs}2J9 z>VO)|qLe8DGO-6as|I>ZH39bS|!itj*|`F#IXAkVzVyWC6cZ?&Caw%Y;ATj zsSmx5OzTOK-oHM|o^BGi9-cRKuXCHbu|}X3VtVv)>6}}T5L?W@{#If z(FotI^;#RVUKO?NXEw`t=e1@}fA+iiY|Tqb{VC6~;fKfP^&WgFxE}MSp5Vdyw<=|Z z^`YVv*M=MUY8}<$Yp<=lC%SwN=2999+QjF&Q@n3A_zb8_y|z|Q7IB~NDbq+3qFL}< z8NQzHyE|kMBj)tw@p!!pg7rBp&2*6+%F(#dTz!igB{D^8XQvk30Ps<&+?rWfmv zT>t*A=CRgOp0pL18l{FB!i?dGGC#VJ?%z{^&gou9drk&1v{#~qJS9#DB|XLb58tdL zC^K$8tJcnuFN3AIlvidk&Yg;kS2-d68-EYK0Lh3TvBFCwkJ5rDLp_!T>&ncV%ZQTs zZjJ4?N_nlOiq~;d=S;oxE-$v}%j}4oJX&vFH?F!Hv(gbmcS-9+A%dxAXX(qkjyR_E z!@Y6mTY(4N=F#rYM4j&?$=VIFvC1gN(w7Au&oY&1$)0@GD>c5tI+<>EHp&v1r)=!u zDB?7hx3$Bew9uR3&TClFvSxLf5V$j-OsILqVff~pJoVkr&*sLO{NFjWN0Nr~*5@XX z;5d7=v{Q$?+sk~j_3KguQ-Hv#5(7!V!5kGU z(c0n87cIJ>i+Zn;B*Ir(&1?O3UvVowrnux3LBwFXSn=YanD5pdDha%gR1D}0lfDZK zxjhn|%e74qIff?d+k#_o+p@{>*Uwj!S#SKE>CpX@w+xo@yLjw_YNv618wXtJ zADDLK7Gvy!BQgZ|1^&^GUD^auTsyILU1xfCI%63+hz()=MPo8O*a#+#W8!wlRa$hN ze5sQg-LQdaM|Af#?Pc3|*Tu=k`+Oc!8<@DahpwK*&no+!ot{XH?ZU{@u^OhkPq(0Z z_8eRNhAl+5v>VPZIM}%vqa(i6&gn2*QkE5X`c3S3HcqzwZeQlw){ev?>8)?Ctz+Jt zitWE<9X)*J8*qBCp!AKHN#?T83#$}u+yb*E$KU6f95Z0$7L2YCO(b2^joy=cey1mO z!1L_XpGqRHKBFi1fwSk8!Z4pQlN!xi3M%jJ|Gq~hfiWM?Z!VK8Dy0w6Q<#X?=ym&0 z#chQVhw{b(DKsiRQx|rvOw)X)siwvRBD(rE{MHd))VNve8+FU_6N~ZA(!_~MY4qL! z$vfvdObHutiCyOv?J^|2MG~(+7a`1$vXV8Wd+RVF^19=NT;n;$np2i^ad#zn0Gqcl zZ&KZd(cNc#h=PbxJ9h}?kEP=d#-=K*B0Sb7Z!u_QO4apbN}H%X<*{Sq*DZ+lSRK`Q zb+hQg_CjAulmAf{)9D6|c))S3nmarqt5XG?YU#RFob+9aN4^f9$=yHWg}DrI3wpkn zAP$Y86FoQV>NxgBb?t03E|B8Qr_awt=iaS1J%Qj#Gh{#6P*=I;gz0`7Cv*5x;*IBu zzFmKIu!TQfQ{~gPp2gK~RUyO}^V`BUy^eRjy->tAg6$JcFMi|m_co%Q6V~aynr5HIj|<1)Ci`vSV_RZO*tFRQxMrAXbU@*JNX{{_qMePtrDJ z#Way@z&_~?K_){`9xh#g?Pqk$xa>eNe)A?r`i={neHO9r+~yuH+c60>z@{sABJ5+0sLQS*uS(xNu;6 z=9PivGHQTP(yL^m)>T2;MHOFq3_EJPqJo^^ozr;r#ch@!=ehO2>TMpz#Pqdf$j+jyrz9lD+@x}N`vdhSbjvXV*gS($mmt7n z?oKl_GOc>czettzzQ?LQadDbPGeg4q+HJ)*HZ5vPa+vK5vRi&^usS7uw?B)1*Hl)< zdrzDYp^VNwQAx|B5k<-SYLMe5XAOb0SE8}GyQJ@(HE+@MT{Vr2<>6B9G$HF7B3A5; zdr!X>8)z+$l$ZYybsS~(YftaDH8Wt$Oml8N(cfDeFZ?wsr!1lnPOvO+?JPgVjLh^4 z)II}+A7Ln>ogA4Nls3m!^48ibYiq^k@Dw_`wlTGWY*b!0CJz3a<32x^sY#UBiyH5v z4^@+^)n_Z_XRq#2ZE0M-^O5Arix0flsJ~4sYn@SF;8kkOS4+N8xc058liL6JwqR5S zW5)A1ocat-Eyj!bC8_k%em7&L1?mTWX?lniXlB^o!E)UoxGO0Y=lg4Rn5%Y;bi;yF zm^;G@M#XLGr|R3IF7EP=6}Q`VSUer*yn19O=eICt;=dl!cA)A=9UDVF$q0Iec>jb{ zq+fHh_t_Qo^AY3IYAJlYB=@COW>(6ZMara|FD%>5<|srY6*K8x?UVkU-mp7tviy*U zu>duFrDZLDQTx3@yr{F;?oDM=a-WXSUn+b?udiMIp6%pXoLT4ieY4*>m*A&Sjy%?G ze1S!lRzi}Op!xkVM&~Km0L}*jy-g5E_nhAOJT~%8cVa zEyPu_oM>uf2&i@Xf3JU-C+TKOCor-}tutG%!ZlIv zsqSkLPt+HmDrEgQiAzVGh)KrLV^SbFZ=5>!h*$#nHh9644EIOCy zD+!cKh)mT)TlvjNG=SE6z~8 zd6tLytLtMjcgjdZr9IO~Fj4CpgNn7pR0k$6_5yzo|9z)GjPzV@Ih*SSt*eg#BQ(aw z{*KXkY{nL$*Y1+ih8cvVp2~DROQzAnj5wE){&)2v4)>rS?fXSs_irwB&y+bg&+U?< z8;I#QS%pa|X&TaFQE_GJhYJaq$C3uHVJIcKeE(r|U=c#K*OCb}0NuR@CAcG63>pLx zwqHd@{jj{?rjOFinuWz6-GN+ubRqO6?xMbOYl zI&#%gzII%3JSlZ=U}0L_}3y`&iFiyC%708ZdKcj z24t-P_X)YrYwYqJESf0blPRFBhjku}E0Fp~hlW@>E|7Mu2FH{*s+^lyQ88K>qC94PBvBWm+m+x+UExs zNYH^wOnQ4Z02fzoagDYsnY*wI-;8e0@oGvpJ493bw9N)a`EVf);d8sY)K5qhL)~{O z$F@pD|6q7I4H&h~Yr<8Df2XZdEl8fJn77aG$sd5gP^=eSbGMmty`@$hu1fFg-g!s~hjUoFIQbmU^(?UwWHX^d8TrMD`~D?OyIfx%}$H zeWAB5AHTB~0?wV;j+mK`&wNi)S~h0d^zOfi+2!=z`kBb0fPGQtMd&f0fh8=yuPD7?G6Eh8&-H6VG!IryE(U%CR4zmYV3DA;Lq+x7=Vo zB$nBiJB51mV{V+xtI(59robeK)5aTd!BGv6>0Ud26I%LG7}r;SJyXi4H&dFs&V6Ao zje`QGzv}l6U+4w%`I(HCsnb2tIq6^=>+Si^Ti?B>J`EKc=oM}NoJ@g<=>mMynAKyb zOeTjzY&lwPF1y<-27s1rM3~}igema2(zW!5-<80zH+H$#nr;@|7|c~B7QO#@M6CA! z2!*5?``zWCLvk&rv!q8L+O}18J3ki-MG@%PTGiNZ(=mwZo@9pFMVcmpW@O@ zm8QdG?zs+xDConN0XUNgB}4H2l-6&k>7hqaQBO`!j=D=tYQ7TZR5|pFrQ!9Y3Mv3Z zF9>D)avzXS-4^QE7ehNbhOfU@!vh&d&ir)}MRxEBBm zOfM5QP$DoParT$c%k)jjH@I>d3bO)?`ycHR0JCT-hw=MxdHVf+?c_1Pszd!Y%4mbGN+^T)5@xJ#^lOgAgs=L>X zV6TR>_hrfQBc5a1romcZ zlJe31w)^vm*7BVn2lu;H2IQ=Y3A=F{DR1=LKf*VBfHlyYCEMJ5r$g{qSmEWs)t0!W z;_{|fdxABm?c`dxr|6&2-J4b7)}d}ZS3oX6HBn+D{0*oEHDJgLfH9033y*Kniy*>K z@W7ppIOlXQ?%Yh{6pE#nx*SZkSX7vKV~t3sV8bNZjb|;)7Tp!uT{gP z;nL?VjgKAj?tQO!JKlG_f)(aK$Y)r>&|<3*fv|?jm-Ox&?{D6CW2>|Dt*S7I@0Kx# zW;$2kej5`?_WeA`$18T4O!(u}pIJQ?`>*;dTqB*f*L1}Fc}B}n4`uy1N_5K;k}}T7 zpUtPc{8=9?{m^yQUEbvi7vkW_Xl=Mnw*+Hes+J4JFpI^e3R}S+SqJEvv_P@j@rH_$ z)bEthuRCodSM(BPoIATvq=hy^eLM^}!N4sbLkTZ^K_-2~e54Vw8*V&tlfhQ~GF=Bl&HIsho`9;QWU#;|J zNM=Ud!n-vKEC1$*8?4A%6*R3?nwdxSB+j0WL$8uw;~XppNeq?UGWj42b*jYJ+`;`< z;;CnW@uTC;c?qgyBJW*K*jnkwGwstzrIl1&%6~jf)W#}t{E*bb8YgwZW&IhZk`eVL z5L0q7lb<`MuL$7OE|5$Q+Ej3$H-;P>?yhvnX?(WuFnDXPC;CvZUO8xmo~3dGS7~!z zyV6qeZIn;#LrcK+yU}6s zL0U?9;}x?-qZ05$H}5YGoSyaKey7xLYi+L?Y#k88!KywCgGF%2QsH6i{-T z*!U_Q0816}1gAF~6>6eSCF2E%IRq9z0lPhItW-#%tEEaki0Bn2_LpArDf!fLi0cKGa@n&@aZYG!W=499G?qbYi&siSU9mD?Dz#Vx`HOS_4 z%C;#p`LiKBW5dy}cLAT4%{crHVWICle~PoFjwV|`jc&CoX*x$;Iqmn}_M|-Uq}Xlg zDZjbdcg5fjN`|3%3VG$Eqmk2f^=9L_k{KH=g6*6^MIt^+0*CD(N_|1W7?g6BW7zn; zBs(`B@BLVGmy*<#kfE5Wtzp971sXD&awq(PR-iA+SowQvyTvCHIpxddO%EoC1baJB z@LN+?XvJKz5f-adBdohic)FAHch`>>9sc=vShe2--pk`)@cH!>F5P}BrG%T>_n}m~ z6?GDKzDHbxju9i(r2Wk~ec{}gnZxjpRt(stb@OQJL?Mj9VcWEVDaAG1H(zF#nMaG= zez@QndcTx(qn|5$AN6LOv6(!zZI58%=d@<{!S+ITs-V@)1+6$47Ry?8O&sSq%{@or z43Si4sXFrQrBrj}LLsug9Y|c%*K%TEea9KFdnCSz;GVpyH2N&ansT<)Gmdlew&wZY z54;GtC%Ls^<1lKl@unK@6B*v z>wG)yB*9;FHsTYq05SdLFjni)^hz3YI19@dJetdAt1e74VjiNe=qt$rLm#F!FrLy3 zVDe{1M~M6EjLkf4Q()hSi?T7aYNa=I_EG!Fne~9o#UXovn2J`v2_bV&g6Aq{DA#hGQFQ3cqgGs0lqk#UzE0=&^SHJ51l(HR zOV(~whS0b_p`L5?d=&95l}U=M3^ok=jJ2HdUb2Ci4)J>HV}H{rA2Z2-$R}?6wdFz? zBym_yw)cI`j!*jJZ8u5iT-w{5TsTohCj~S;EEPXgdxwbXz%f5SNT#hN%dBeQCsmb+w_$8FC zvZ;Bh1P10Y|BUx}{;#bf@&e7}Ij^%0LQ~{~q|liK73HSMeC;hCM-{oR_IQ1aXIU=8 zZ3vEU4M53%56J253%FKlBlt_ZbtfXTpr7(b8K;lu<-LYtU5WLGoXk1iB}+w&N9bRDTo z5ZMMk-g|`}Hfgq|=%5PnH?u0L0((&_@bJkY+eOw)M>?Q)YOd=JX#e9Ha5K@uWPI*Q z{YPy$Dg?tPdFb9M+=)2xvr>yTpYQ+5B#Xv!DZYef+X)j1R+qUV!t2?0<=n;AuSV#fLT>!vFCk zB5-cC-6dMJf4Y1td3HZeU$o6%4L?4}372gtU&PhbwYrFlFoqtzfuVcOO|eYwDTNS* zLIhE=_L=#%AFk1MyQ$JlEu$6Di%2$i_AM$JLW>k2kCXtx#a_NQG0MlG4orlTRq?m} zwegzng6q$FbYjP5_)&SWFXA{%^PmV7RTH zg@peb61hbfLWRC0@qfb~WR0u6$oqxja_B!U3Vwk+lV3qk!oOo4WWLi1vRVF{Z$FsN zeqrm_|K^JZ=BxWM7U8DiG-#A=Gw-p?BWSe#{JiN1SPR7I+Du)~t`zf_)(V(a`xKi^ zHTmm3DAq4!1J2$|7D|wW&{)-mO|WjEAMb;aX?{tsMuz8HaQEAVPq&x#OawU<25q*scI(^@%wuvnwHPk zyMYMp(Eqzc^jTrF45NO2Oi(yS$@5YM)asEcxysRCE?9Y!m1?sPxBu9kd>{RyX*$RJ=;V=w~n zgQXz_7Kqx>-kSCP#!SeA=@>b2i58P!D_pK|lT05yPP6;zpyGW3j`eG~x%u9&@5Q}# z-|OF+Hu&gBc&)ya5SK6y_{&kDfahbPE{xA#{#x|Jt+cJNOcp>op3&)~P{>9sov3Cq zPVgS3fPHV$#V3di!Q2b5`^F@yC&sFNrt$ZrZrK@PfT-$|;l{)!0KikG0!YIe$dk#} z9#QYWqR}Tc;3~(sh?_3yt%=BWCx^Sb3;f1!R34oHe) zDf#A0?W=x!s}}(cCIQEz0*DjqfYsu%d9gmPxe@Iq2)x^AoU4-HJJ_P7m+(*l%)~)* zW*bVxAc6ZqhW4is2VT5fEp8rxK}XMsWbpC9wmJaz38K3n<6E6%DI~2rWA*r4Y6laV zWrIiN&I7;U#3&nJ+M6!UYAE%x4H)7ie5QLl7+B{PZFj*~;9BFqO*FBEd%9JEm@k0a z@MW0v|9#7P1=MUVL>IL=pRfjiNgdc4MQ@D@H@NUwAQA-&kWEL*M7!jsU73HUp2WpM zG9!15vHFE>5E=`=Np*>7quSDFdf?f3slCerE1TA7R`(K!%^j|d@xA7m zh7WK}K;6nN!2I#!2iQpXI(R+D_*lOPI6HoOt@ub16`3MeYsD&(ta=sBoWUHG3>2()v#Un<9W=R&y`o6|)3Y>ZMdL&KSpw0!@o(_7d~_ zeUN;M75yWKnVy1wpm+(hcDphr2sh4j$Su zWUlpCx`E{Jo2ABjh(-r{;%4!dV4#fmFVnXwMBAreE783YDi(OSR2X81S#oGzX&GkB z8~5;Zs~tm;XIX(}kt>^?=Y5-qJVeQ#;H`N=%BuA8MCCYVR5?Vb8`4@ND);1U^CtTt{3+)2o)1YD_hsmm*F_>#8%qpf)a9ROVz>Exf(6F#I-its%2y zghTU9B^`#XE>h#|t2-t-$z%|Jr4koo!86J@cL`naWvG|idB}pUG##g|f!fyEa zGl?GtkP>7({q92Mk+ASrhE7g2%WM|QiIC_@;)Ef{x&y9}n{nQ``LDlee(ZW;P! zT~Iv7{Y>RKlDk?sAu4NKmXgDG!8m3G8j8lB|Gfvn-pfI7j7|_^K0(TCtL=~d%!>_z zmR0eNd}PQ-;PFD`s;G}hvOkU&mfBu~t??dC#}l@EOwHONen9QuqMw4I z*94_t1N-i72y6cmBY=KS>;)gGr-=Y2+*5xpWV2OCkvA#a{F9}RXL13mzqmF5Ioeejwbzv!a zORP*Y^HI2jBvl-V+}9$ZijdD{y#+AoV6(&7k||=Pp4=hP;y^AC1$ux<9*evk_B~Q=FlRMv_e#D7 zQ<}K@e00JI1o^MHhd^>DnF2kIgzi5YI(W_yD?@=snk9XfnClA)UgMiWcYC;Qz4J;2 zXIPOH4iSAk1S+*^$8UMS$O?h4sLO#U$N*73S6I!w54+S1Ola{jkoG(;mF0c!jaOf{A1MQy$DITaNxFbn z{4nSCNLDgNzV0fLU`3!9sPV=06ApYIiidE)zCRO`5#ji$mdruB$!j0iMG9b8)xi*u z+kh_7nkjw%4v;}x0AJMsyz+r2kPIx7dahDTo>`-xy(ZIznXc_6^LxU*i!o1`v7U9dTl1FppX3(w9qN#^$q>dp1vFv?i9Dq_F;vd+ zYE|FYk0Y#hE$<#;6Ou*0fRs?I)B%3NsLh?)nyCO2u!*rlV2_6F{DBz{}?6T?*{=(?Y6CrE0i-r~1*|~45yD4)g zCy@O|h;h*>_gST`dh~pyPnpZid!_st#?Hlkq>v`HnGB*EHxJrhjt6m}F7T|09@5Zt zycWgsze&h^U18c2{Dfg3)@ZN$oj@#2Cu7@gg<_)U)@Ytjf=qhA*~JdjdA^&Il}DVGscHJNl<<5KRg`pJtkbrrQ`?!w|gZurWiIb%~-%S-a&jnC+e7}4@N{5~8a&XXnA{6N%0lmgbGa;brtR?`OY z7R~Ltc`MBcB~Efe4&fk8kBNhr8H_0*850G26+& zRQq)k6C#EZVn;N-De8adG1r?V?q2XrVZ^f|l7Y|5gZ4OV{`2!AzCwDnI<^1l-h}OoKVSC~A=lXSff{!~TLXcYh`#bNG`ELc+ z>GYU)gHD9d07WLHb0N%`k*OJgd% zHDn;x(M;jXcmUbWK9o2`YNF22x?BM=*;iX@xBuWaEIJpN;5%iZ9O4Wu7JxVH&(~(m>oEj< zPtbGedK-G>TSS$-3@W82u zHc?Ir!!SEyXrBNANiPHv?*O{0a8iyS#W>`D*euRN$S;0_T6006M?Qe`$e=@|9|Eb1 z2C_`At@7}D3>5R04>cj?pUV6ZTvo00s)?hzT+}g0Mvgi-J$+W0nZa6$y|(jg*_GKu%Ccw94X#Z9(Bi0B#JU;TKsF%Db0np~c00 zSoV|TZFuy0ZtwQkK7uTQ`W8dHzZbE@yku8dfe^2s7T(Gy|w;FUialCeF^!i^` zC9Ou^^T=Bq0#hKyf+nctcee+|Aqe$e1J1tN(leQcn#L|1m8^0y;wE#Ug<9Z?ankN1 zYV^nc?b=n$P+_t`uHr;z)M7_}EM1NyuVO&9CXHdXS#x;WtGrX!V0&9WgPdCMJS&;_ zJe>GaMTiMb)Q0axJmcDqGQu!kwW37h)`Y24g>hCF!H;>(jUIUd$!gVJW(`uJ3p3mq zlP+2BF&}!Q^xt_5gHgN0dsPytM-cQ*0CRUcsEg-6JbDR^_3iM+{^TE6C%2&zAjZ6T z_pgdBhoO()Q8o2`BKXb72Nx5n3F>Si^?%0|=ir`t#vxuXjmSri8t`WuS~4ZS)}kG!3I1kAZPs)4rW+fW;+P$TxR5b zglx*#rnTX@BSd(Lt(`Ey1elCa_`LFDSUq-YNE-13)%zi|qta}TE zVCc60`jQ0-ia3L6h6|Ywwp7102R1WkKJ z9gKzhEg^?!*pqjAS!8%R|_l@Cr7_i9vp0`Ld!rNuuZQab$e>A442M< z);b6U+R&$ggTuo;n}^MA$KMuW;>x!G*Z()cv8k;6;0>8Gj<|m-5UL$=eIk-c7~7&P z=Ke_xrO$|z3i{=__BycSbUhEMbJY-S_&s^QYd7$4$IchzR_m4^bcns@m&V=}^MDuG z}*0iT@*tzpJo`VH01tbStO5$b2YdQBmg02w+O$lRvDt@yG zr>O=^Z}E_3sgCze= zM_!>MU?U-fVX@V=Nn<0qijlg`r}N^PHv9hySI5=7ai!V=AaQM)?z$|ba2seQa-PLi zZ2(rae}?9}6y^a~&Qf8t9=ro}o-0quPQU@eWe^BPprMAq?DLy{SBUra{B^yCdGbi! zmpC6}NN#A5^I>>=kBH*gW=F>%u?5?%afU3EC&9J~9|dh>?Z0wekqv|z^U7fplt?^g z4Teyy723!2VilY#(8?;K{P{UuTo#vXLL0|Zy202eh61XP1BI4)IIbm;hGt^~io zu$J6Nx(n}@aEpLfq=7Dp|5NnRBqh}yHBC=|UCntT8MMM#Tc z?gbM;k;NEV@u@+HVUB+HY<9C$r5tJ0p*FU}IWC;2jf$B5QWH_%wYMX8pcJrY>Z5P@ zA?4hfMR?k^X_Hmsh7prHilg>;N&Y%>n!1*8n1l`foGAE95o0a^qtvLp{s4s}+4S3= z%|jLcQ0~4~#!l|kgYB5ri8@`gz?0oP8_IC{hPwTkjDGjMhRpY98ActfEu{WZY|#*1 zEAY`v$Pw$qnq}GRC#?5PuB5O==l4s#pfSQDi@c8Evrk+=u}b-7bk+z9mccz(1}@}x zPh(jHpjV(~p|ef8yDyO1BJ`w#&N;p_upBRtO17PGgbWie^f{VyIlk!a`E@Z2{^nz$ zk^5q0dTqKa&h9()Y&}B}%Lix@S^-ajL^0?G{&TwFvZ6OKmxF2JHcyz*la@e;iV%fN75i$znEaVs$zJcDnZb%vPyiRU616bBVK)*X0A z3Pg*HKr#<=5sGU|mHp%P6Nvs<6n;`ENH!p2mP^n=BO#8+fJC?&X@sN`9`gRUcQMx3 zCHFl5VB~gZ<-?7{Qqjz6x`%d&?pZv)8AIGtsj4ub0SW1d6gT4I(S6*w6I}7O9}uCt z%Wl1618d0B5)$Q|r;e1gib#%W!kf;|JvMxZK3TH?T>&(Dr4nnhv9$xWd>9EPk5$e)E*p)0&2r@57@=53QDeeO?!B0Z@7fV|NVx(r( zyc*SATnqHC{O2&FWVX<&EbKR#{*jG`j0qHI%od833u~Uewbh3Be^O~M7YWj#<2coH z3;3f6HGXhz@qowobR$CcBNza_$13_o$TQjf;EDT_IU(#L^o>vTRY0mghLGl`tzi@A z4^eR*s4$tqb|c2W`PT~fjs1|?#^(7F=AHE1-$N47f1iLE_As~*vq_Dx{-yE_MJVc{ z2xk8)4Tyv3MDKlsBzv4!(ZO=A@yb}R{-4=b!OerBoR_E#L-4_WmYoK^6qYwANUon= zmcJy z4nh|?cPcyrTZcA77<3k?R$6{QW#BTzn}fnQRBHV%r4}prRG9()&kllYBA7;s%$u85 zzrPc0wURJDS^KPSZv7)2Y+U$eG|#2)dm(N$1RRGGGnFk9tYcFz_Sd>vc@i8oZS5Gu zA(kj06!YQU2#cLj%HDkOe_LodENHGUhUqQ&#H80+R4Cs2?=0#BA0ZiZYr?(n6Zl~6 zoE{$tc`RzPn<1V6j_c33THC>CSCtQG|Bs;zaL|x1*uZqRzv5HzB|*m3x=t$RLd9d4 zjoJa=3kdrB(nxvMrSU02=PM%P=Ul@}C%dGebqVQs6S8PQ3oZq6Lr=t~`wM-_s2>lI z!Jd}DAh7Z`?j(wN{j&22L@K;_Vsdd`59LSMs34L&- zBNiczbTOS${1)A{sH(I%2Dvo;?D+Rtn>fU;8IND(r{eDPwJKRP?;V z(5txw-Q(}=!>j?C!!bGiCN)mBG{}--dyHnf?vlI?M8@ZCtDA&WRpN;IZB5<-8WQYf z^7gdS#aP6MbGNcJwg_oGt-H4f=*~$XP zVOPhLpu70IlT&fC(2Np45IqN%k-oq2CSRO;zNkQ7jt+fy8vSqcPc}>bt7+x0p+OtX zaD8)-xI^gsF#PU%&y|V9_xW)Hvey{jI}uS=${sG}s@(+cW2JhT-tq`)fw#CMbIzj` zR{>b&Uuy=a*|i23yP10&1g#9onmR5klksybzxC$9?D(T@d8U`&pe!9BoCVjwfcKRG zTWA=6>3HrDR!q|}y2;yH;W?Dwrz0m{5Gqvtsq)EC;Cf;Oa#%lVi#c=y0|Cj*J*ni) z(6<%MBy09-s=1tU{&tzP|KZMHgRlD>FE!B6(^8R_=KGYP9rL-UvwqY}JpZ8OCX?yj z=$H_GItZX~k z(I=wt>X~c)Hfphw{{IQF#>x9F>4wc1hJg^?$4M)?S9F|k zhI6oW7a7JXN+qiS#;-xC}ooJ8~Jcp8R_oHWF(nE4W0_qW&d{U>9I&BcAYW z|6yHWGPwYgD%YVKO8v<&Xn3JdAYZ3@r2X}2LmiL?$6bUiKmKJB=5%|2(%Gh7LSwoEv6^&%exs3-r3b*piU*zhlV0kaOvRKKzx{ z;^oAkHfU2#@%`^O38@t((0DRYt8&@UJQ`w<`)jna?ehfKfM4zd0*mw==Oy)qX?cOT z$n}nS(orQ^)Bh6@UD?$723WV27{JhkILNo9@DNY(V=8{W^@inN4e7IR#I4 z9?-#v_x+;L>%3?NN&OAJT`+=lL8uuLAMFFl1)}B1$6Vm;5WBrq79EVYDhQZbw6dAk zF*b1>Cu#-F(lb~A1~Nm!7?uZsbYc?Y3j7Wdi628m2h zzs&R^xOd}0?mFns?1bS8LC6FQ$|s3IvKS=3g)!FzpEdh#7nGd1Y5wO0px`q#ber$F z2uWU_bq7e?BLx;uy-&!Pk)57=u=e1^s$U(S^9f+kA{=etkg$eo(krgDRdG z5$zv6AvhH~wC!@BFbYCNsfjlYw#$`^qmA{ z?q6Wcvax#qp$zk0f&v$mlw=MJ#G8|^t)nJAj}RjC7;N)Orq3?hwX1$%RQgbR6@!ww zg|rciHke~HF6fKmWB{*x6ztmv&;fIFq0NVE<0=|=XJvgRt2R>*HEdXmivS8uUWwaA zGvKm$IM6HyExZ?wmy<^U6_C%!o31Uv$Qhi6JYN`NUyLdSShf?PVme|NSn*v+MYXHp zXdvvT8!V=vFTyN%Y~g6Pem~0Ta0fa}KV!BPA>88Zh)N*X1W5y~SR2rMGiZqdCQrGt z#_)MRJ;a1{h$oYE0#8w7Y;u)aB&5h%Hw=PZixDG7zN#K%={!}PJI6-CjRe%e!4jKM zT40H-Q7KateBM z^P;SMk$5=`v2T)-mXSHQoiu1v7|#EHMtLFE1gci>;tr7idgY`aim&PBsjvV;{{&g6 z(C%iD?3KfC#*S2>G%^or4H#LKqT@@9s?SnL-vLexzSs$-GuRpgE zSac4MAd+C&cR@{o`e6>1b?D#qDKW#ZE*2fD0Q1;?1@_wNpGRgR!NU;b-Vt~E86Sd$ z--Q@@5f>Q}0MDJj#Kz>mlhEpqL3~xP|B{xHsLp4C3`gjJt607 zko^V&G6#x7J?*_I*e5AS9|7{d39wJNNQN_*dzKkizR<{!(96fMc|$CLFY&d+NcV1U zdXe|RpJY_c89%sp1y=`a`n@xFI>;W}!Mhq#0hm}G2^4RMSu$h;n-G8nKZOmtWNvf@ z9&al9&ZpV10lL^4wTc5@h`~?j0}1JEfi~f!c(7jFf;__suhOWJg2HgCQ1(Q%?pXq{ z1Qi5igC)VRpx^1aEF>3kZJ`DJyM4?SY}OYjXP^RKLlZgDD!puI zk+)JFE$5^d99$3QnqmrsGAIuW{W%R5V`@tn^~ zJg#kK%236I&^3BNE=TmOH*k(gsVtZNDRW5Mk!f?lEj&J@S$OY3GQ3_y6^ObyxVgTK zRo*06?9)JerLAM#f)AJ8S;&?2-sF{;4ob|+`%Dc#5LWQu&r#V!!;BDe~7>jS%CV8^sHR$ylQpr)kEK3G-f#I3pAit z7Z$MeYywYu<6UqPX=JIyy4>KQRpIugKEKBcfecHc%m+Q451fvQg$pcOTtQZ{zjL-+ zTz*LlUkc&4Q(%!Db`3}3j00ytcjWc07PvS-<(%MYy9L7YBPnq5dkJQEL5<25EZ5UJ za9`+Q5Fe0rbq%hLP(*Bfv&s4dJ0^L|Gu#{Xh1J}D zH<@%9uY-p!gINtIB1`>6!39Fs z_C*@}8qlvfscBZij7VI1oGdG8l{QoPJ9tfDH=Xa+h5eL&m2=@J%#1AhC?~MUFUQ~CgolF`neL8y3jZ^(1R~Q&H zJ>uHw0V6ySwwnu3<-EK|$@8@Rg=~q<{_%F-kd#P)WICgE;8&Uv2GCZ5w(;6a`mOF! z7Oz&yGh86niFj#ok8uWk3`r1RU(FgyMOdX`c4A1Dyzj(y0dJK3kjP*wgM`a;OLiGG zBy;{~y-64ASsbVrCYQ-IlngWC9w2x~f+KZ2T=WHK$6b;nkA*@ik?}kT!fCkR!f(ZZ z*+90~j-n&`@pzZG#(7Gg)%R!2`Qyc&;J^}8$y>t;i>oC!Q+C;cQ#s;$!py}YBEUBB zGI6y<*#AMh?a?sKB>GO?RAHjM^0`kq0(>H9^X)b z9QKOJv@EFB3(0aAEX>)8GJ-Xj;n_ZS|L9FJaDD$5GoUI(2O0fb+ajUmX&06)6|8tB zbRI^S0sg!;YX9b&5NefcD=5O7bh;7ae8vLpTsOu10kruHli zsaisyYN4P=Q|=*E$&k46L1x}K<2(oYeZ*5!_1*R@78E$V{kWGwrewYfzX?Suh>o|< zAy@hPbGR~0O377F-D145&x^qS)ws?Mc#uV+!}q9d@8}C2qk1fvP6rJ!5PXM1Fsc}L z0ULTV>dk14{ZN;)1S1=Q=~a+tNEGRIllser(wl4PBXCIus;oUbZex3L43hiEz?1#I zDiZ#ynEr~}a5Ll+EC2;(1p1{al+M>Y> z9?jRFLAoc6K?_eM!g)E(#(0CEw0(%A9X1d8G$2HW*N)-+KdRu^yaST-X4hR5(ATSQ z-%d@Fssc(5clT?rSuupG%d-uj-f`zRk5`wbOW)V$oB=Q`7Y#*sr{Tk@ z`eC+sc%>9#S~-N*%IFN{samZre12{KXFFJH8oa)vN&`K+zy>N4(Tin2p`L=b1|@*u zQw`if?C_o?q1*3K$z`|O!V}@WTgY2{kg_b_yUuj5IWQ1$Yap&7cs~a}APYSxGbA}c z*NPXNdY}tgn>9{)FO`5=K+c8e{r>=SgRdimYo`li0|R2c2L44g7y~O0df(jEC#5|n z417P5?{3~Lywu;v^pz%)Twnk_GZ_jY&Uy`Jde6yY4s>o36+0|eTTDE$47ozhjGqS! zS>DAQs8V>d);-spiQrU_0X6G(AgirkS%H7mp~C$MRu(r9ri#oO>vxui)b&dYS!7T5 zbEUR>gw-ZM!o7%3$)k;d@A?Zk(Zdoj@y^{}8Rlir1WM~ER5dE4#&45cMC}HzAr%`^ z#Fm&hn`KNv>!d2Y?^jg=RAXz>ku#tIg z>L~|WKLxh(TbVeA)@k$R7ci%UmGcsX%6tiZ(sEYz(dFs;236(`JPVMMDh?hVY->jV zM}Q}Vv2PK}I0tW9(VITMab7?#DB%Q%6tBl{S_$!6gEG=i6zz2!p}Swd^=;Gz@x&qUQc2R&y! zg&7ik?GGNN{4dpgbx>9NyEot_50XlVgtQ0>qJVTtsDN}y3W79%g0}m%;Xd40h(E4rL?|o01jPQJ z9HBRTe7Bo#q1o+6|LJeBPyb-4NOlaVU~$0E({7obO4A)<`oc-~UKfm_Ec60}z7q|E zEF@Nc;NeA9R@r z??quvcY{KgTmzft8jEpcC14E!a$dqnYt10)mPWOCY7~;;2!__k8W5*RE!=CGGzOq7 zukc>$mC2(p5{&-V-F5bnCuL=*f&>HTn~OSwFta^A9BIOkgTC8^qYY-!WB zhIssd>D;?leSu-gXfX7|y&7Dhh(mq~=sH>65;BTHaqMu>B4J+`&~Hl(!JPUq8pKRW zyc>~Q_7xxbY9g--84Q0OzOFJ{ zQ)PD-^bnjeIu4704pR68%C{rhNFPi#-A8`Enc0nW;V%3C%MHj=?xo^gr?< z(24M>vo?-k0+Y!(qT91Ma+wY9Oc6DRvS@7pZ!6>!RLm+5Jm|3+UB~2;7={-MJ%k~5 ze@EnT?Sq_8>`P9A^GyAb01=8(x~HGi`;$4@i9vzQo|O2I6_ATUSWDc>&+BFXIynf7 zwrLv)k+)5mp!F4;NDjYXjLDC>=XKn_bY?#Qa>!n*xiQY(zDqA}3{rRsYU-H^54LRS zgt%a^T3p65jlw4w*Tqym!ZB;o2nAJ?il|5`Z6;A-M}RBE<~i(=53A;amE6ogt1+JL5%45iJoOCL8haJFyvZ2o}3h3rr2M96K!yBw0LD?W9 zC4WU7B%V>BOM|{J;9(;?weVqXEMC9@WCLAPY+M%*5+*h?yWsMwTko*mCR5Z}GBA@C zH4{jv`Tza;2#=U1thHsd0y4CD*IXV8)ou`|vy!3zB46yH3%K-e-B3vGrEP_X`O|Ba zQ>cYRe@TR4zE}+JN_W&{{rxUU8}dyE&A16GP%O9Z`EnHywS9vx7qH}j3m4+Ud85N~ zfotKf7{^X%SG2lL$iewPMUoHC<2@1|D8Fq_MkmF5l?iE#UcBvt-~zYFVPG>^YzQBe z{98?wWC#JWEJC1Ko;Xa_+JC()HX%yJ0(W6}E?2ki(L!fR;3o-M=PdX&f2jZaiq7;O zl=H#Yw#ofq%`U##!mJCj@Lb5qbnXVGD4u zA5RTQ-xr(*L6Hgq=oZN#o6tU>`A+rr*W_2Br&mG80!nKPDfB}d7-@q~bZ>oO~?0666%~7@3SLX=CmL6V5*#s<%58O*QkA0vcLNJWe z){42m16Uq3;Ir8PZHNcSq{2XfQ32*T)QebgZx3Xh0Sk|8=mg-_NZs6 z2W5$cytEGhMsaqDu7*==K;YhU>T=0>rg&s`d5XAOaksS>3 zfRBTNmF^`7A%`U<`ySo8j9BGI#*_po`g!wxLLrNwyAwU~gf`|IP&o zpkIrm0`cBJ2Ff__d2wfet;>gbt^yX>S&!v0|Cr8EK9(L|K~dXlHi-xO8#S6KD7)_1 zPl)Au2c6sGingyeL$wQ9BR>0P*Ka!N;ClM+3&?IP?q5<=Q z`BgK%4JpaE=zN>oza^BHy$a=_bg#?wzg|I-P%XK)`*HM%mi;q@7-O zN0Y4&VpUH`hw`Y;JZN-AqkIbGOZ{1h-Pn9lBi{A3yvAd*q)*>)JaRHbeCS@H1X}ZS z60j~WIuzCXY@)lJEo!g~Sg00bc$VW!;J8+N4>o*bA;j(vm8ywiMtAp?d>^Bv8(_P6 zpy7)K<)IREXj9;biiQq>h$`p!IYR?LJ6Mr-4xY*_(MM$L6(f}D>?0(>ej-@BJpPRV zWP2!Uw{hpWgu!yPwDkJ2`LlxE_q0!mDYwVDYl+;=MhMPu0L*2B>uebq0gnZ!GIhJx zfJP-7rP&-Lv9cZ)xI`eT^)<7^q>-%iZuZkL}jWucKdh0|hlxWp>dhQah} z->8*-?XW(aVu2)un1s|t=n-x{ra*R}-$UITj7Rzf*DT5Z6pecz5f;D6nfUj$P86VY z9=6gC{(LUeO>`WYmhivF@qpI@)|7g(e|X1Jz{Zi%Dp~w}J-|}CGyCcjd+Bd+#4ZJz z{ym@IE7pSU*FLAt7M(_+5Vk1e(>iy|`@wurKi@d_5)SVdNI3S&8WdW@kj{xO^S}yW z@#r41fFlZ6^Cn&SbHqBB5C6aT|4f$&5#9>?COaVS4lJ;GkstS+>7?hgf5$dK0>i2X z;#|X?2F3ZU6Ao2->!W4O{d@Lf7+CoR1QVzLaIAm}_HKL(-tEVMOiGH-L3~Sb9f<)= z07!doiN?7NgVpn2bq#bq@!%v1Yx@@a-(pgSsyqe%HbgH&}2pkwa}SrgwwGaV}p=VKaJcnV2?oP#?-c#Ie{uWE&vm9}2bB~=IqZABq5m<^D1yW#9_#1XuT@z62uT%bME zN2|be3dhX{i;DI9|5ZDPJi;Ow=O%F=krPXnKH5*?YJH5 z7XeFx?U37r;S(6nUjvr-dVuoA36|SFtTz@_Q--PmZ^mOh^R>7Ree}l|7D6~R>3kwMwI`tZQ-~s zkgS?OAkm!L0Q375WNjDKx;4pTIU@dF7~7F_7yb#3PrJJDO4{#;}kdGab4IY8~l)&Z$ zL}GmuoNIG+i~<21=dXG3He{J^?MqI9xa@U3mCJ{^ ztq=#U>ApkiJzU};R9B8U>V;VRO z@I!t9BacZ51PRU$%k_vo@b}X=D!h<^0)mL_+eEe6zcIVz-!rfRlem8Zl?TrXL_LP8 zG+pLPkOAqktSVI|*ypu9fA{5^zx#5@`65-aEjy4sj^zXsxPI@Id($05)rSEV6+zc8 z)cFpgNMr$YFdu&FA}q2VVHI#sm}$Rt_0}_esUc+42bzh6tR&yFBZV)|BqXl z@&~@2w{Vus9ya})u^82xzbH+R-bmFb7cLc}Z+ZUW{kvTJ>T&UjRJSdUab+{}a6%na z(`C-W9h5HCxDa2K|IKN7)w9XHYGd7K$@}2bf$!+(>ejOFW9LrK&W$G<-kt89&Ykq> zB#sih%Yj@g@B{oaJ;HZzw8R6Nmyhj{dDtiQ{Z|bvEOPE18>8z6W*a5%HTh>UdQl)x zE?o6K<$wt4AA-HB%-{Gm^eTV5$yt|uar{sAsJAfM@AFn6*cn)FWCHBG z@GVel-iE-{>{{A@j9*_KRsq-leewjkyL3^m=UOb^~EIZcg zM&+?ennm{z%Ck?$dnaAcuf~KObHVJB%r~RpTookNv0haH5H(a664D+Ii1aIFj2{?q*Il8i^H|V`2T@<^=FiF1g+<4r(rk^Fg089hWw+VD z>FR3K`qRtDj)ZK?hf~6wb=cII>C7Vs~?tr(I|v4 zQ*2(^PSX-?JJf5E5#wmA)#cuLbZF^gO2g#N9?|>BEpUj^@e-)^;Af`lDjoj5eiUgL zq?U~(pHY7S{Th>`9AmhL#p%Nd2}j8;cQ+vln`rR}P>Eb?x=dky5RfwIWL1^8tjBfl>ctfW^KkXw4r%E}7jDzU3Z zAazp=ItI_z4xvIwY5|+u2$C!EcbH*Nl=v%^{-QnC*3-`v0PD$uGUe2FO|S(jS<9I; zjPRY=w1o+%2pbWSxf{YPEHY0^0EEfyvI3h`pO$U%(e<4VEK!zv>0gB2Qv(ar8h@Gv znzh(ZhMqiMovTKxYy`bUTiTZLq6RJ07;tdKW;oW5Dea;Nbg(Arg!4V0?fKveK^afI ze)uY~3cx-i#-JzM@ zHFq6D6^~Nx9vk_b1{v&t@;EGe*+6g>LkrOz<9^WyrSi5G!g+3=1$BDu}9o zmhw96*N^>AAequ!K^UdJebq&MpJCgRj^bNWlx14p%XRo_6%Ho;xLBjFE8{D-Q>V=n z12$h~U0+lEfjr<`vyefd0I?G#ADcl=!vg@2Hvc|=LG=Qgz@^*(T9K1?No_?CTgPR4 zW9b~2Itq{wbNf{LD4WIiA&jB-y5^l8(*f_-#wnBD8LM^SWz0~PIsh%oA z8LhlegHm~}?xeH!Kk0?gg{f4}#Tb|e^WG4%k3l@_4QLBvcnI6;)8m$5rfI$S$slvp z4AFTI&Y@x4TjI30{;BQh!R{@wpmW&MeUKwzZNDlQMF{+G?Uq}{QC&1u?tWMCRdYUj zSymMfNT=@ajC4)=O?{WUeN;!aZ@4_n(S5FQ+6r1JWwzS>Fahib6sa4*61!3m%i9W< z-l&csAFyd*J((n$@x9H-sKJ1yFYG09M74xJ?xMbMXY{~uIWhngaLLkK?;TbL3kk-D z&o(=-fphk@&`%XZZnkZv>W1H65~2wK9jx`rJwbI&{7S^ZMIoE|R}_?GAu$GmW} zy$5t}$q`Kt)wdDLMUdc#=b&`e^^?wX(14Ox`q|YDoqIpIQPj*pd6mQXYz6S-NWgJ) z`*F>#oX6U1db27k@5l5&$xD|o%jwcL_Q61-OkbByq%Ky7Q)V};H%G68-6yHqo$$4c z!k$j*8J2C9WlzQmM8hh=YK%LrSsDW}uU2UPT!@HlkFx7*k{DmGfccFFf#pbjIQ7K2 zGY0&gS742pf*9p!SZ@f$-`j;|Uwd1RKTebB{({K4FrDCV zNN$p*i%BU0dwOBsjIYWsk1b)iC$S{(ULm>kkPhs5Ww7S=k>U|y4wc@ewIDS z7)tmRK$*VbNyBCSx>;2tb~mXd9a4E0o`T?$MCOh&M5k0x*uo`mYHM%rcLQB?&nlR= zzHjlA4`s~2GvEFQHPq`K0QjfOL1i1T1~sKq{kj*OcX;*kT z`IXUD7+VTF(#as3X{NZ4!wV_|&CdE@(kPm0&>WsXU*|+V9E6>Rr^5?okiiMKJc{*i z<~~OW^g@EfwT$8Wddl6lHmKh*(Y1tdO@J_mq%b`vx>eIQhs&B zc_&)cy#2$6?p>xEXE@b8Jl-1GLsm_2^6_%*A0*dR^z|$*EdcLtj$tURK^(ybO#M# zk`#*J{REqs*w8l3GT`)GVtra2pTu2%F=@*9U`PK7(mIssHzNNFwWtId*l! z?#;sMul?r{+wg++i+7amiqKIVvoS}~HsY6DWSiD^U|+AfzQ|uu&C5#!hD(J^y8d#c z6J_r|?-XjhucN8PfZJZYu5aXi{StWoJ}O~d!QMBy7eOwXbBqZ@&1CI~GnWek2uV5$ z{p#-64? zJF8o6YCYF_b(?hkQ}t(6qWCFRByzTK`S`~9me>Ihhf~0`3@D!%_-Yo)j+xd7F|x(1 zHjoKLyv0q?CeXr@#8r-<33g9gifBs&76t(91$78C4|TJKz+la6MRCsJFa<_(K)FH z<6J=2UIbG}y-8r^9SK8_0$4lz7G3+{*`p;cjR<8>$naf-f z`vfTR`A>8(oAZ#2i{`&tBHTSX^j=`g_rz>>6{ID31h}uL29OLxCi?T$?)1f(>7<2j z_rAh~7Q`jj8%d;Y2r?@6Oqy?ksF#IQY%N-Jb1aa&J2F%_Bke$5`SJzFWUTpW_s+ z)1Atzw*>nBq&r;_2VOViuy3&loziWY69^EMt{hnxka*ok_z(3rS)7F1fe%SCRfH~h z;2yyhLeaYHt)$T%D<3Q+kT&%N|=nd$J4bhu>3H1 zS1s4Ap<3r>gUROxvf3f$9L=$gaN+yk>693JXn4Q9#T}pzq4#Gij;MHQ9d)i^49B*H z#*vwH{Hi1*(sVeK8*q1?b_mV54doi(1w4^zn^KQYH zG+(;xgYdW5zj1|v+vxolS8m&S0-mJWN+Nvvmj&yQ`#+`jpi-VKs9aHMr_-)@dh1&0 zxJT1sp{O!3Hm9qXD%umrdEm1+ttowJ_oJZzb1Areo*EXn4O0wW9_o7CJX>pO1gX3A zTdh1`885bT3H!B_GUpon&kw$nCw`8YdNc`t@X$3j!VxksEw$qj+)Sp8hJi-7u7^tc z+RV3KRZS$RNE2Vfq{H}rfpFqx!?Qr0HiACU98u$SKj*aLn`ca(KZuneWbFuHa4X_x zm>%n~4%Q_}RvK$;WEgNr2>gEZovawxSouETdF=D1*R8I@W(~chnD%nZsaf25M-GlE zx2b<{IBI~rn)SFH`Q9}DQN`;IX>a^4F6&=c5fIG~eH z(Misw@eu_qZgd2fVG%J99jO`waxO1aFn;TthVY2>vR>nT&dsq}jgxln5rPa|&X=)9 zKKzREG}Z`C`M!L;$%3DTelpM;myydR{zx0C=;C;ne@&uahthI$%vf~r+^nQD#mCDw zK9x0_g=j~+fNys=@xQS8j6>LN?VFj@xrq19p+ehJ&Fg}}b9=meqmi+{j*m5U z>4nkOE#XkM===0oPAdeQ6(=uuv`pe0=Xf|sLVI3TTW;(enVMRZ-{|^`t4E&vg)OJ> z<6H^*g?+~af_2t0XYje1eRlnslI`q#27R1lmfVK2PV_K#QAHij_7qZBTK{mXG*wpW zkImt{nJ;b|dhGs$lz{c)NpJD77G?kyq9xTRYxD2wWDo~iyj*Jjz~KB#Ck4Z{G{Sc- z@1OVWjaOmrw9p{_9mATZf;H{1BOgDu8GeJV=m#N5+j3>sgKvI$Dtu*1l5$V=)*FWq z=pmU;P>Vl%pU$q^_l;6WjQV}7nOXIm=Ly#jF&B!Er!c+lw$uCt2Gs7u>sF29{D0$Z ze^RDpL_MW7L+6J+uaztE%CkA(IbG)<5~p{$}ke(8BA}>YDAGe;CgO6*z4l zi}1hcJ*Q>hwfXZf+P?w2n}n)8&0XPqe;7|DNQdotc2$!v^KklRC6j=OdNYylZ`yAb zvA@CIb<|QlyDF8~Q_1326KNja@(zDRv6>tu@i#1OBrVjH{~$Dw4z-N(59DOFVh=XD zse#4jh*^Zjpn)y5HXhh1ZpinzGDewjCxh^U?H3K?)oU~R%-~f=Gb_7m6D5V4XeP&~ zVa)y4Gnz#KOzAT8@RSLp7TsU{DZ$tumW!+MsrbXDM`^QC7TiB@X0*Sq3Ak!u819 zWIQa2Bqy!5U_B5N8lzVC(N0yV}`& z2v`K{v_KuEHu9D3xaTR{5pM3t777B%TJUNFHs!8{T+ zI{pOzq)5m`Z_EKSQr}=ne?-BiqyS)WHjJi_b601hSPjBZj{-=!LZN_zVDfZ6fM+z$ zx$}~I+R*Y4NM|l>WMo%abetG2u7JRf+rOGdsS2l;&>jOFqp$&vQ?GM!vm>9{_T4Wg z^Ph{4U~!oQq?n)5GiCW4&dATJ=lP^tkkkcx=-2@I~YXqb9;bZoeAbhb(2<>*t>(m zjSQpWb9i>Zk5Ov3+zy*tY9XsAaE5(eWA2pzbw41vs&88Hi>;d&vLi5zJF zt3)Kld5uR!siHfW$%Mzes)KPR#eOk+<(W9u^W!?d(|nD>P;EA3B;~;1n#7(kF?2pk(8!JTcvZ4^ z6s%0j!q{G)JB22Rok0Vj$)Fkr7|8`V?cPew0>fdH3H4lKmw1N9?d6u=gr6GXMxU^z z&Hr

n2L|QNjuH0gY?``98dz!+rP4sz-~>n0h2UJ|yt@5G3@|T*xn29kD-R9|CCI9twHC4;H|uCd8~Rv$ z1i0gpqA{5ZL|$$I!iAh?9>Ym}j=j0pSuQTv4^>>h*3HW|X$1PVY;dwU0R*Q6?qQ6D zdxA}U_R*+2SIvxDYn4!7UZqVJ%iiTHaQnecp?sd#B-4k9_pvqzT46?A8mgI2vR>8V00Sy@-%Q0`L7Ra6 z((r}bZ1IWwl|DS#t(R%aLo=YMzMEson@yh2VC%I*7y1?M$%`p{$|4Mf<<@lJMiO$xC-czcN^$OG@a?3J(6b$buiqzR`N zdk7_ik1UJ>v>A_pU-wg@ZXTCDi@xnES-|_0k!>|O__lavyp=AG0Z>V+%sG2jyXC%R z@%l%&ts_b=x1`;KCv|V{LgiIBfde;k9AJRC8NqQ>`w;jl3jtVX5vsrP-BpNV+zh`c z3&mvnKg4F71ySRygix%BRf7K3OL#IB_t9Y5Npx}p03HqiG_->usp~%Bdy{*SJ(i`V zd|`5KqFt>J5;vc&)A;6A7sJen%RyQh+4l88JB=Y;09dSm#au*cY#HxanWfdI7uqVT z)v!czU%d}wdKlabCsrkIgUd)hA7Gv-GR zoj8S{4z4PdYf3tc4&3Ut`Xi;L!mjMtZojd6Md>lvr{j7vw~)9lQfNXsRd43;>{xPe zn0UWi{7|Pi_*@-;PnNxXeI0PR?EuAE!5%p{aWOIJEuSxa1fuzQ;4pX|uK`xtA#t+e z$tf%>95+Q-DUE|Im8u$`d{bubv(QiX^NkWeyU@NWe}%3y3@Ud2EtAND4#Cm(>G0qk z2b_6GoZA+uRV3+*xXU1_w=J;b5K?j;0?E|D^e85`olqdJ`*Rnr2Tgw;Y65~fr-k={zlyfOERWx(D8w-jvNrt5Zvf@F-*aRyT&H&wM4sOv8lGzq$i7_aN|&5s zMhmxh^_PglegVeNKCSZ$kn_3blSB-R>#&UVv`A-_l3PNn^<$l6MqKDYUGgZ`m7xQ` zOkqiN>E9w9`Pn7DWAl0^rnlUk^x15Txac;BmD0j0prYX^J83azNASWTaKV`5Srt5` zb74o4UCT3q9}BnQ&Rh{ap^7rAz9z+=u$5>3_f$}T34aRks0DIq;19X9bq`AmyVmsx-HtJ33JiH40FuiB z1<_X4y*Fp@surD_qlyljW##kOZj)w-lW9_cS-^rh0SNkI@t)w2L%m|Bu9IkL&dyXW t{`B9d77A&U7VZb{!qJ#((6j~HnNeK-%)Zb~CNyMLr_wfZ6Ib^eZ5npA9(WRnkv0@}@nx)!0 zf;6vg_@mLt5TeTB#2`4yzqU$=OGYp4&il*s(vl-A8W)XAhMxBQR#^XS*Y~7vrzzd4 z(rM_O@zCBorxj;c-pDVr1Wbgqs26`<2xQjoykxnG9fk7u&zDY|u_rRh~JfX_t#|MQt`Kc<G$S()49>g@OyT0UJI2b91^yU{%mO~_LA@D+atkfKeEk+0*|Ngl3r)sM6y?)go{-yGX8YgKBFP2-_ z2zgq|Qs%AtB|4S&di={1H6IH!_J04!E_5skC-eLKima-1 zjFnr*=Weu9K9y%478= z{WzJCo5a_gWBKico}t?o2MZ}rb96qK$ua2^3I!E#iMefviyqDqy)&pOqm7``EYOJI zG8VP|QI`9Ayh@pjtK#v9mRQ5=oAZ;rm>T6bq8^E6WS762tUR_@`AGPrjxp!Ft!n*z z)%aZP+)B`CLF1ggT6_AuSvaZx*Oxqe!Og~u)tRx}U{+c`@6#i9Ny*d3lP&qC#ax-_ zdsKa~%#jYBFs}5$cj}ZQE5bXD3*E`>H4PrSYKyrF$ph*I8mtv-&;P(q2;1&U6`W;N z$q+B9=dp~S3NdyMi4-F&WV2K4K&CERhyIG?1J ztOi@cei30-PQTR~{#2im?6nq!fZgvZj-O<&`5(WHuZ*2a#uT;mic) z$D3bhC>IYuMyyXYl`M>!_?^KUyxu2nvnMZ!m?>5}j_1C{+E8vw7iyOzNWCIuWgqVC z_+>Ire(O=x$Y$Q(nwI=f_jw_toDj@C*~x5^`K}IxGEcihZ^>{?ulFW9Cz??RaH7YiU^o z)!ny81Jf-5maD_>ZM;d8u~Y9j+%x&QWKryNZ~W*JzLWDmqt6k2D z`e%P$ZyhMSz9sttkCKV2>7;Corl8StcSW=R^e8vcNUfgJpqlpkTSfIJw2^lAJ97Gh zRHE>PMRe#s2fwQomqZ)Vrr9@S$_X?1JG}=AX+oMWG1spbqrZXuR^WBKzn-XVN~N>>#O9-C*BSn z&bFJ|KU{o6c}yehHZ7aTnMT3$srA9pcApqt7yd9W$L%x|mq*FJOLGFEF)bH6qVLD! zb{!7n%IjV4YxFu6E~SfB=1y>K^gIl6DluxXnfv(U2d{Df2A=TKhTQ~H-Q;9EDxtwU z`;!gNF+TEIc3wLsvXRB?YJMWyw9$(D;I6omlaS9@J+u4P+=?5_X-}}Q$n!moW7CH0 zGi>X}C6^T`&D+CxEcD7OUM`1x$-XH+xErf1x+(M`TxEAYTRM`0w?%ET+97)}QtV7+ zetoQ>%W1xogiE*abWeZwL?Qr#{L8OT#AKf}jp@z5%XxNj@Ap-o<*6!o9(jL_VI;wH z+t?6$OEhY&e0^-I+3yne%6$jjftEAnQ5WNHyH49Ne&!Na`Zf-57UaB$9+XkzpFKU} zif7Z5+qmm>RL-GOD*kM3f;C0psTL`5Z|kE$V~?GF>F}SWg9I+&hYa1?H%zHWvDA8A z^P_L83Q1JLeMxD#box@aoe_VA}MGLJBnvje{i<_=DefQb{x@S5^~?|Wb?~VwpLO}Uku|b zQu=9LQH7rE-+n8rB^hmT1ls2>m8ZU(QwmG#Jv)h94y-0?3tDpbaHH7pAr3v3GH@wd zg*tW_+NZ9KE@?k3ab3*-@%4}L62 zyh&ix@FEZR2D@7>Pbn>otjNS9Xqx||`3l!noBpgI($DK~wO)K1+Qw5(rD;jLzgEOH zmybQfFDG+{Y$`5zC2|?>;n02ji2qrWxo!=&ao1_J(#Lj2i{kbmrBlD#G8*x%2UQ`% z?ZP+DtdS8`4l5_eVYW=}Vb`zDc=iuUpHtJ*SUF)oe>EpwTxTS|rivNcy}OmB{Uw|- zL4mgG&Q*D*ngtJ&s2m~;ZNIId#iWv<f~O%-RCQ5eeQM~V&2@q8bUU1+j+v=u<~EE-7+j3klMt zanB4J&yH4Cm`=O-`tqd}c0z=Um8fMHgwwOmRX07PCnQ~jJP%3Go z`thjIVLF<=ewi&P&?9-92mf}$P5p<$S>{)$ahQy~A{sZ%$A#@j8=hCqcn>c!J;PqL zZ}vr9QDvIrH*3Q(mqRJ}Flas79`2!elVJLLp*Gjy?~i?hJ1^2~ZK*pDTioJU+wP-R zER99oHZpJ`B*jVnt5weNC~vc9?XC=|D`e<&d&<IV7TaE1K9|P_pjO0PE>Y?Uw)x zs^Xn=@1Fl%N~0@j+y|*V<}W6RxywJ|laph9FdOl@5t3!#fD>Qd)AJ-<`Llk^j-tav zwL?~Xq;5@nm0LAtv`O;*Qvdy*gBcl{d)$Uvp4$rDho#S=RPrE%cP_s1;S8~e>{49T zf7^>mO1$tOGs-l`EF=57Q=dql_H0Gq=eOU*&NlJ+F>&UfM$`^}A*%CS$_ySX$4H-- z^1n-@MMp?Cv@5)WFLsjgR&ts#Nz|)WZ6Tj8%XKqMIK~hzKPSTrN@9BHi0i7BI;Yr{ z#_fe7GnV%(L>JU}xWX5sTBPtDdEzrMpZd*(aeWs*)cJ$&LJpUGt>Z=u?oJz#GS=I1 z?$S@^-H>jwB1~0cj`2lKTmEF&+Uk`h6S0PH@F6QU^-Z!ukaF&!N>ZK>* zx|+f~*S_69y1p)G{Tu&*IsUr92AZb96Q{f^4YJwHRvVC#Ke?fpeM?arq${t-esrr%Wu!y^47DBD{RMa(^f&T<-W0omiB8r*#G6)mA6XIev&zhOw{Kv z+HN1Ep@Nncy7t= zJ}5leU1etIk~9pcZNagbYHYCRN>HrX-PB^Qo^`&e!@aKEenN}(!o)9u(@?Kr7pK^3 z&>D($qo+NgCC!g8=fdl0aktRLBy}(;g^8zld2;=%;-XCyo6}w)3aRa$xk+q16l}Xm zJB_I&@|L@sVCGo`|5hMJshsL0eWdU&5{VMT_BQjN7kQsKJq0!OH8X-4wuyt2!|m{9p8Ds>FA*}$A_zC*(vtVq zvg2O{;sxL9e7LvK?=LR#*uJ5q0k`lq@(`-Ohoq@rZpRHwmZ9}Sa5uf|P!r~KIz4jP zJ-E6+$}<(}sxuozY#+cEje=brjIxfqSHXYN`dEjT`5(|+8jq4)XZ!hUw~Q|W`PVFp zrZKSh!9wXDkZi(^%4V(az{~gV@sTL;WP%%`fNubfapN@D!Qe1urQY)4Oc#HIb6 zKH36o6_~CCQJ-bPV!n$5qv5f7iw@pGlmN zYN)5H^I~o#dEsaoJCRPMze{m-2xdCF)?HWd;%FJ<(O=(7|B(H=@L8`>;+-`FxY(sH zj{XWq)7R{>+`YIA4di94ELhH8Y!kX(fOWEOL>piNrB(*t(SOfV zPQ=8gcH6WY%#S|eK25vpI(SBA?5S$|M&$YMYQ4%6&-2s$GWYET6^kC>)2r9`ZQ^Xk z;oIb0fLY{UTqe(P+x)dv)pV+etj7*&V(RFUn?Lr67LFa)ET(^aPjPnU3dLk`XN}vY zS?WBn1mjBq3ujApqRvYy@rg~1ubiOpV-$3pi$ZRTh2FGP&EoZjp4E(S%9(nrF_y~5 zWpc|oukWEYou3`^J{je132x5J>38=zKXv_4q+L5Zai9Hw+7Ck+}vL3lzY^u z$-9A~sKl^NODSF0y4>p?>gZ;Z{s%LuuIJy7n96m1GX9qJfZI#!wYobSkTg4?D4Ux) zKdU;T67|%y#W(@< zMhQ%L@|hGWrrcKSO%TLHpS_R5hHc#%Wgc7fat_dcVZ1{ z9HsEq&JJert;Z|hUl(#_SqpP}F^g^T4P9!u>Z$SW`E39fl3h=}oUbvz@ig0!*tx%5 zV%e1-Cy~gZtLCw@G{aa@v;ONf0BtRIpW*lV%dfX)hmvxhk%IqV%2zBOHFZvh3u1x5P&lD2D*T3e!(7 z2C}4ZYh2gUx2_{xq7|7p)2)d*u3FAsQ{0Xqj^inVBtLCsZTKjW`WniT8re-1OA>#{m*Q_Nym>#X+ip`za_ zg*>S3Wsr!SX`z>=3f5^`?1(#2Rwl#e{Hz_1@KiRh;rYH?-LpS&g~X(|b_ei%&F{=d z*Bio=eI$(UjU!s?1^=I^DLj{!ho%G?<4CLlR<$rF&`rnQ{XRTbk#eE zNJoJ!Yf!?g^zkqxZlQei(d&3m7SJT(JUIUxNWD|G58~TZ8>Q3dL?d+Q+fBIgbwEuN z{JgrZQmA$FN2%%kREEB^yUey@<*NH1zQ!`k|0uJZt#9{4TT_-<5;#*Ubr~F?7Ic&x zt9n|pW=+obkl5fHur7Oz&0v0}GSSrYx8q02sI8gou&R34X6YkrH|}8J>}Lj3mkIUU zdEukRqRZksae^dsD#B~#R)z|3&pvcxZ>QX$=^~(ifMu|ewC;I!xMY=1+(hBGy3+;iDDwEfE*VWP0wTs7oP_LsEJ)F0xFoQY2qc)gk?bI) zx;U2GcEjRf*AK6j7D{Bs%iZzP}j`B6;Yoyd96>1D6dV_ zh(KW2GpM$2VZS?d>??Y3yxznfVUnkwacR?Nq%Ld+ws2k)-xWz$OePdI`-w?Y$-aea*(?Dk$ zgP&RFR%tOQr$K(v!)+d&)dhqr8R5jVLN9X$y$yu$w}GDrF^Wn6!9K^vFQmX;dzcfr zN2b%appkhGz2fDmeZ|TSgy#ZHoe>%P$HjUTa=JqHt|{Lkn!C;uAi3JOS0kR?c&9{@ zc#_XPeMjX&C;W9q59+@$S^XamIiJ0!uce?SPn*7Ze{0ufX?6pRZkoht@fGRKP?G-l zeV-g#1F<`Wf`cMxIP`UT?LX0nQ{C0#mQGCy%rm`ef*VOV@lz;MX@dnLW#U_(zh!jX zBI6$?g}1L~btS4pukHj`W&e>$I3c$utBP-5A{24!7A+*gZ5ZFwNKp)6=iF`7=Q zs6|*w$l+IDritm*gUWF22R?W^Jpo?V7=d{v-ch|g$c+~}8kk4*Ew5+0S4bH#x8j|@ zkFIw#%t&#pn^Ak$y2(85mOoQQ4nDOoiyH=yO6wAZmcYWFUvf#tlC7y*m^7YOH=M(S zdstPNuMihKY)n{{RTdZX&V9<8wC}Z;<~q2&d-OrDjrkE{{y2Z;DdV>cUe1!J%AHG6 zD67@D-6bjN<@?H)%K(oQ6ok2(`d|kX{TZ(+bl3LzLiAA?zsz&{+o|Nb^b^#Dt{DQq zJkDY_3IlaHYyICvH7XZ2;u+h<HcHo!>o%*%p zDAG>OPn*t}12FLufQHlM9ikTtYqaW1w{_>le}IWP!ENeJ*GaKvsYAS^Um9cVwW~SL zc~iSMHFQE61rP19Gmfi|Uzkjz>VzJ(h+AOdS4C`fl?~sPmHR4bPg}*QCESu)#qEh> z(zU)B5qO2Kqe!n;nZLBEBGA|yR9Ak7qJDR)ypw)ZXhni1j2y1-6F|$uyKP=q7^bf= zhP%JHasNffQh&ClGlPM6BenpaF!RgaP%E5SIeYz<*M3{U`s4dIEkion^QoK?+$5Pq zWzRpgEmEXYdY~maeJV#}J?@{hC%Z2}kAZ5Z=HwrYE~_t-`I~FOan}QsDNW-SQds1k z$HeJ1QmtKH6LIB*l>2DiKaQB(9$dw4GHF307qCmEh_Wuk$H9tJT%KF!|InDaTeGSA z>wR#NSi2`LhR2K>*ZoqLz~}&>wb2jsTJhY?{nXhym^GiG5!Z?CV7LlQGQ4oVFWgl7 zeBX}C#7!7AvSRwjL*X;TW0qos00Wxh#*^HVp>WD+{ih9#d{(`ekAf~OY?ZU~eS0+I z$vJXg86}ilpa+n4fuLT!5veRDRi<$=5jBtd{!iB-&Qlo;mg847PZ}cpSSF>zYpfB( z$={*W%`_rX7N2`c1ByV3~cFgDbmB`5m!wn>I?3xoz5HESR+n>rQ(N#_f8C z%TB%5WIy!;C8AE#5U#IE*^?RQy7Be3y(9{F8`givKrN&Z*qZF4FTp89X=L>3hzte` z3%r`Bt5KC6yEeOFMdKXl8I&yxM48`qQW-f^-0qv)pk2Bu2XijDL!XJ?QYIdCZDKMc z|8pu;5qCBEW){JA7yZ2rD`z8@~u72eSVh?JQ^S-FNW6v9RkE|H$mzm1iE~6;%L-$T!@!atMJo z9-QAc4}M1~Q}LL5_QL0~^mhn2$CT_|ZHy4V^R|O_`kFjN^>lh5I%;n4Bk2j}OdV5f zbB*x*yYYD~i*=<}iy=^@pS#$5S=KIO~Y#n}dteXV79fSQl z3=iQ?`$Bfi=_py|yO`J}E;HfA)tkShXiMcB)CaHy=9of64Q9^G?KWKnJ1kIA2W5%XXmk^#M-$omte z5x4)|Ck^yCUh2ZO$3dHqDBs?(KmTTDmZl9?zFRMV@w}D>LoZ8gEdH0e^bVy$wlC_L zwEcS97m?Bk;y2Ip4Y&B-1QN)+V6Gx^WQ|it<5HAyD6VDF8@)m!Y9RG%w(y}sv^4GL z9`0U!2mRF=!dYxg1@cEDjm6mM8Gf#)=~#C~wX(JqyJJqjyiU*NuzsxS9(Ms6uc2*_ zPV;DX-%lYjyB)PMQu1`N#|4wfelWE>Kv4A-5*&tHLvz!=My)C3doz9Ng;cnpjqoxW zx~(^nZktADuD_aWUHOczEg`Mn5+e#(*_jG1n!{$oU15m;6g^3|;MwR8OSzPjuT^D( z6Xo^pm;dVp5RgY=UcV?Jaf2@QlKL#~8(d-NfpI8{Irl>olP?f%lz1y(TIN)$5@1V^ zs=i+o@CR2|jUHZrSaCFLW^u+e`)Bnr|L_g>fPWCmaZ48bhmT-L1svP@V^i+$*HH{-NIYg(~pIzBbm3@ zf3;GCUE{gue@bI@x2`l`RAc^Nhc6{GEdNQ2JE0pU`lyU7QV^jLdpek}niIkSB*Uv~ zx|gtP&A(hZ+;2LkTC#6jk8JZMF<#=g8I1DSUC}in{qyIE?jh}!+RQfyzm}~ylcQuy z3{_9=mIP-8R)Vnm#YPPjB%xd}M|5YK8?T1jymy;!QvUgqI(pXS9i!vF)|l=zux95o zoc?$6np@z~sV43ahhMz>2`_*CJS!5pU}gefCGJ)`uuxpQtb>?E_qKX`9RS6+XC1VGsk?&pCwQGy5^1kLWep_wKryJOVkhx`x4Qg^gm@l*H zqjph*vT+Wiw1G-neSUEs-US@wcqMt@eqgYfc=l(E)%BLaz)=+(A0>AwSJ1Q-p-CNA z*o1h@4^_M4TnB-lneAw4X4x+uvdd(rAm_%{oq)2x)r!m20gz`NfYCsWla+3@eP$C! z!=7Q?vzsRADn}icJVZ1x0TPpqO%aqKl0ox$-++gSMhE??A3 z3}9?fYxHzSIwb%#OCiacNa3vzS|0|Te(*S4iOc=q1GWLw?Lg1P-Zb^$qm}?n%ejtd zw9jc8%b+F`vaRG~^*O?R6sKk6VZieXruL8lx_e&J4fv<}%0Hvm#JuYrDLw{(M1eZu$ zh1jESw2Z4zTC+geC}2fWxr-FsVwn^r@i|(xxN+%_5v~dATTH}OvG3u}U!Py$5m>4XjB){C zyfcWJh=?B04=;7yfFpVG3sa>2E~olE>&m4kq!MThS6@MF1_RKKI@c1Rhw+Or*BU5e znfHSkypDD{=dY068IHax9Bl4pBxJA*?9V+ZD$X&EV4#u{;YQ{nu%kp=tq3=%1Sf}l zy0CkW<9w%DiIE7nkU~Pw+C4V`H*1QMEz}fhy)P&ZGE2K<9n(A%v9vW*s9l)<hCI^vOPT}HI76rue!V84U;u|7N8M*W!eAm{F1+B-1D_c3+L5&S-Sf2%?! z{l=(yB)0$8tTaC2UEtrc(`O>~YF`vraa+OinqXBwkcCaE)-c9B0I$*XLo_`J z`qsNrp~Q9%T52I&2KvE@UZedOvP5g5_026$@M3&OE;k>W?>wJY9G!}!r$a7Eyo~?| z+-j!m<{2h<&+ZpTBtk|Ug7z>&H3O;`B0u9)e1;Q-TMEZrp|l|-eu?NVK@FEYWaYz- z61PCozrXjgcWy9zt9N&r75RCC=_Bn+%VQOVVUz-#MllAp z&a6g@%ySk_s@f` z&hIRLe{n4spGGTT6)5j8uW$Sifg~6f23nK74y@~=bgiTfVR$XQp0ixt9*x`mMN>AW()?{AG zm@KJRod7#bJ3c(>r;9fRf<*HSLJrv=eX(%W(OT0NIF!Xs%N0`Z@UReMd;_ltBa+e5s&0G)v{c#S z5za#(!Ym-I^7@>4)_`;8){{|{bhlsF{K0tWLT(!-F4sp48$2{Qjp|t-Q^f)M*$J93 zViCc1X~dw>Q#ajfPuKcqZH3;OyROAMD?{?$N6XB)Dw%Pa2M&|zBF`-#o|ORr>=KTo z5i6d1^8Iai4CW#|z6cgJLtgRS#J?+Phg?Y}>h`US`YY7J`9-G0xQzQhtlqG`;>$F< zOeLgY(twl_b#t1|jtV!micv3N$P{S2%YjUZWC8t#-V=4U*Si5xo(llPap~I)ua5OC zz%``RO+0TRgn9bvK?b!{72ElCe+&4}F7VX6!@#y?g= zW}ia`@|0>&txZ?uJB7CCB#~5j;npm=byt)IdH_pVQ zkc8lB9Gw)7`^L22rp*dl?TS^3{b5cAXb34BdR6${uJ`o>2)pw%NPt z&lBWd)_ESXcbi#fq4Z3J%MAgveHQWWP!e{Ar<3*V@1SfkL3ip~KnV7XypEKb6~d4) zdDZe6=st7@Y_LUX-O`G8q4R?f#t0$oS{hEei)+uxsEsCVCf_%)I1 z4Gv0*pwnANKj1?3d&pNOOjtkN#0z>F|N547RuP|$g~|cY%W6(dkmf$rDp28SV4#g= zY=HKmZuy=*r0D5XqrbAWnGez@diVM;-$IY;@6j8e%T#oK1ev2ibc0)5sXH=~T0~FC zWra8|wcia5ZHt?$3F=Tcv2?Odye$K5_1RDT`Cqa;VHt!9^eYsD zah7-jw+%8v7KYzJR`2XQI+%^z|#6<0$XgI&=tks5CQEjG=UBSl`V57BztY+mu)Yjik{WuKM}|o9KPrYLokK!H3ixlkg&=#Qm|w zC1#~=C?N0UsB3U9zs&sdShg7-^?kO;Ro2Gq-C&dX}@@?+>zr7t7eEpEy*`yrOro|$B;D=RgK_pwNbSmVN? z-$vw{h1?3Xe*al`KZL>mU;ut?{~}J& zO>!Pg!VqrwZGvGFVu3Pji+?dEAu_FtdL8ATLJm#@_2lqW3hMMus7B@hupJSbyB}yw7OR=pgt z%*tAe{Exngmsxf(f$ulY`ukh*MdUpMv55J{N2^2t0`h<&Ea58rJ*Uyd1%+CWpp#|X zIGs9D8tLc(A{VJ4%L0M%SI>J5(7XdmryOtt+xTW}_KoflJ1@lbAcw_;{oky_-mm#$Eg+I1aSfGz%C& zA=Mn=GOE{MT?5~NELZ^>w!+q5XbNg=WnSl_xh&^AR`34NXg&t<1PCVEyJ`<*(1sBv znyqrT&Zk2w?^f6h$(g*I4HmSHyKCNl*^i%^rKRD@D-Xb=F+wgj5v#s}pa|_=ZzwyMNpRJ5kJk>#A6@B-%5ihbNY4 zn+_F|p!viO@cb+z0kIHaoIO&^p)1Ht&CG`xrR-|JBXa{OnbCdg+@%-H>dH04(TNuj z)!{EhbzB|3nOl0-WhG}~7W6!=?THqxHaQIB)z>-9zF;%j-i$}*RaZS$o2jINSXdcA zw@vHELjNsCmWTvh^x1D6zt$ka_UpSzJm!O*JJ41VKlJ?k%uBb{`Q4L=YE^>fW|XyE z&`%4dKfvzo0z>r>PoZ!Xlpb*t|ADL{xBi8!6xN?!;2~?fLYFBqt&7WlN`6*Zk@a?U zoQ7&m{=dS})z1j-#nAs}F!qu72PH7FPuoa4AA%66sd4*?-uz1wAw)*zWfu3i0AHRW zdY9z^sY&SQID}GQ=yp1*G;MN|1!DkmM;@|&nG%II2aRej9z=}$j)kI`qr*YPr(pa< zq&K0|^wV`t*3YjP(0*&5kLZP9e96{43gS3-Ww`Zt%KP~J?}-}qN%zIHZ^Tz44NhyC zXHdU64gO}{^!=)CPLRP!g^TTqEI`vx7Czr~UYge7u~xlXX-%iZ&9)r!m={L@jlq}f zV>90RTn-6;`dur!{}$8fctK zuh&|GWr;vH?Df!|E&4n21L;QW5l1>8Y zqV$!oH+sI|ArB4VH;AbG*qq25LyL;oa94cbz>IR83p*6W_#q)NQ~F;w+ZlG?Jqi5= z+RshI6O=Y+04-x_d`?u=@|37nvg)_;82k%XiL2p`abErl%#yG7vu$TB4x zK)44MOtB3J@SYHqb^I}5$c?%3g0K(PnHDve-~brfIb;$I{^Ii=mi7}v_!9?c%ZbY& zfpe>!aHRfo?te*IcCd#}%PT@4js`}y+vyt;0YtX zsi(n>@(ueCnLS0kHU0tZLV;Y)(9@y}+$b}ca$_Jtb^)CICH!`KNZzLmgNZY0g(c|h;JGatPYm!~PBSN`Y**b<^ZD(-|z!KKpFP)#1IDPT>$q*78{9q?#B04KY$yLA0=7fX*$@Shf*y^ zhu`IVdhr?^x!Zlc5CE|mfzH(cUiZ`MR6xmdJt-qBInt5TompqdHa5s_a))mri?V4I zu>iNIy6AJdJAzht(jzcMd_CYE(tuKy4=MD98~Or}z=I!PmgNETSpuPALw)_}FMms~ zjcEzsZgR>TRqxDUJ9l60wUX)zExQ!|vg*0WRtLR*EqzNU_!cXbl|Bd%=|zqT2KnH} zJ0B!}{L3%N+(NA`oH3dNs`GX53x3?HB0&-k9TnSqrlhQDZJsi=un`RGsU{W{c7XGf z9WK(H|A!NL>0Vhhc5Gwlg0wvQO+C+OW#mTg8+);9=!VT~`KWYYNW6*~ggD zWDfcEHkoD7W8KWaOYy+jjOnb|RyTv;i2ZBqc}b5u@X7Og((OV^U2MvTDf9wNiy=pH z{}=8-3C>A=AMYTe3YLH$1cOdUq#7vXao390!EA;b$F|^GVo*c(#{1|UlBz(DHnkIk zfVIw#e)bcByet?6bLq z_$Y5W|K`oo(vS659tJEKNJ4Qwwto zsw$Ij$1kW&SIQ0dMz03r>@Gtzc7 zU5#=glcvx;(#kw<>}6*A54F?7PjF4#&^AD_orz%!Dr;C;Q!3E1toau`VFH@EgK~kq zA+9r!pgwSACUH-Y{~ME|9-+A*I9?OoTz+I{ID8=k(q;jml6Kx!bwjQ=t=}Ugfa`hR zRr^oUx&>zpu@l9)i0W5+khrqOMOWbB=n&-5{XNn%7l|n=8JX0pI9DJ1{k^OGa5R1G zA&c*CvfA&gOjZ)C!B>{XfW#5>3FvlIfSfAtV?;1J&N^nRUn_=V2U;x zLbeZ$`7HKQ@svDIYr}&Dkc2Byx^?fNI9k8#Gyushpn&8cbX$VEqhD8cI23VR3Fcx3 zPjXX+_8k_Ig-4o`-QjeWP^yDqwVWaOqtUAlv@_{4i?4J@SP1X|w=vQHY->oqvh&h7 zl79kJd(C~gKvNzZv9?c~f5ZNBIUZupj3k8_XYsGguFN2dQ3{G z*R1@$MA9U}7L@~Q?;O+>NamKD$1D)3s{#o}2ev+K_aA)2#GBmIAKo02L3}WXjfsOkq}UyqJ;#D&>G* zkAYN$-K!kW7OzpHqpI6gngM5KqZF{y(AtG@L4!8uUjjFtT^kvdk*?jZ2{%(T_kGVU#UDUlG99bkY49KyQHyv1N#zx4UvS3@Q?`^rw@f#e> za?sg^xP+oJSX#ucUCe=Ol|@R;$d37v)oLhC#a#uCK_XTXXI}!J{Eu|kJG_CE9&lDz z20~!FH8x+pe5tzKrNt}$jz+{|a5WB8WElYPnzs%BlW@UVHTr)+g%A(MYcJ9wHW-y`mlhOVrj*1Q#-_GyeO>m@du7W7~d#cG;<%{?`;4hK9wjZN6 zgm-&(mUMf?`a zBkq<7zKQWhY%NE?jTS%?HwK5<>U1X=KZv_MF{jcA9KV)WYT)YTBQlOhIbUde40=>& zg@Jwa8%3zKPkt7GTWgc?>P;MqU4Uh-%`2a7z9*v2zQ<-%U#(W|Ax69oNo)7*2rt;G z9k&-0&5dE}FsS9BhsoIo!4?EQ5^IL{vFp_kCiFiMq~yi=L zV?czlD!-H)0OfQx2KJpmnDP4*{$`T+sB9}s*7C!Fh!~`pr&fj16)A_Nrz&kZ zufqyc`mankcNeEO-DqDfR2G}RPsaFvg;A@dt#Vn_vL48J z2_pJ^Xi;W>PU31X?+;Y}0)OBNUm&teb&v7cJhX7I^})tvxXo@2{U7jr2e$r0Nk@zW zw0^MrmK1OUh(|u`LEGio`s7Vyh8+6>VGd|y9LOvvH$T|V+_MJT0g^U@(w7MyY!L-f zPafo93+Ts3E&gSfN1*fZVYu-Agp(Xck@=9h6BN*S7 zM?YkLP%f$8Y^UV)*m>0OF2O!|ZLC5bq5$UZ*H);8o>T2t`9(HoUyiGAT{nXEfdCG@ z@{T(sodrO&Ii4Ii=+-@Zul81Duyk$n_aFGe&>@snT9&PflhA_UzTU7uh`BKHHW-su z1-$1mjx#jSTA-Wgw-sa-ppDwO`QOYU(SP8%B5{m3mLV-jY>EgV?Fbi!hni%sV6R`o zV1&*h?GcQECQWy(T>-?=V{`@Bbu7TmsRfX*EC6L$=yLhGk^4N!XR1pX;$J72(b6v9 z=2baJ?{xktJRFi~$j1mk&qE?Sx@6TbL0)G9NpnK4K6}?D_}nXmjcI-l6(*Kdnht3d zYAJ#&mV?hKPaR4*%(L(*S_{wznAj8k4^m<-@l0(H<2aObvY_=*m}o$YXh+|a9B4*y zQh1isEK^7!2rBTn&))wc6q%mGWkKT(q!yP9>4ieP`I4W|yIBK_;*#NFI&fMHEC`A17+sf zg6pn}RZp2py@57+9AcIXE^Ybh>Gx`I`wf{Edd{?`^FL0Y;AjrzgbtbRWWFyQ@aU3q z;Cfv%luD~JN4>hf++N9zZ93AAw_?oeRp+|#SV1AobL%T}==Zzp_1PGFCKy3fPwq`+ zWL-%h?Sm4nG{v_tzFg~@{g2n`@Z5mGlU306y6c9HsvTzIG_e@%;!R%Cn|kXpS0@E1 zmWCl8yy90;NZ-KYiuagm{0P%^7t--y{p|&ho@FR=7J776;kgCs$3IVAa)CBjksph$k0<6bXi>yuM*qSiTw2VTToZ#vm} zWfp|m3X>sR{um&f1GLFT%bK1%g(u_0wYD_k ziynxxD!&iB#7|u0fMD-Q6;$u}(Th|k{V;^4T*W5(u#5uAp2O`qWD2^eS$npm%9%=Q z(teP^P5nw@mKNAzZ1U9G<1Kn&T8C!E6w@vjkB;0V% zsy3T7JNkRSSqFB+-I;Hv&y}|p`Ll;^|AlOTz-vgtKGDN#94h@|_B4#Cx{z0rsUNO75_z!(AG#is< zAG8T|wq#tgyb1jK`_(5`ck$3eq|-7=fql%FYw;AqV77k@s*;As{@Q!*BOxF8i{ay5 z!jN>Mzp;e3y*uVK{oIawTw}0B<(DAT5zmNZ7B4(H z%3_*J+^uh5F(M?&kbEx>$=HAXMJkxIPpJOjg^VE+9f#3>7hHNvx9K}_F zXn85B&Nbpw_s8hrjctGV$V{ebHb%T8jls+{wf+<_66c-Dn~xu$DKSn9F^0d$$a-eu z>RfD~@vYaEQtkm-ze01o90i`76jjg9vB_FhiSe%38psL=W!T3QA^G_M>nwI6u{dMz zZ}xe_`to5_>&a$gmuEeD;G}|+NARr##pSg@9MaZBg)k_|>F-tt9dns!4H|%EIT!jVV4kjnLC5QLDX4mYZv0WS9jd-#fywVd`tvnG zM$%y(dnDy{3wAMNZYJoNA^m-hq$a;Bi@Cf$_&aa&069;ceWB;8v#_L*4f&KP2rcsO z)N*s|XIi($9+zNqd<3Q6&5+-+6PV$g5R2|4Riv;5rkhw`|Jh$1K(AbT%~)9wI)q>? zB*gkpU6RT{2YoOqmja;Gxf77W;El_q;3%R6K_(Uoq)zCS3zN@y(g}nQc<-X1*p{co zr74^5PFAmVUFQT^BmvT5p+rcNr>WuI|Cz(oNY2OX^nkX9T)UsOThQjz37R6_D`zlX zAt`W(x3A(@RPX(->b^Uk%f9cMpFNVj_g>keNcP@QRz}F?M~F~<_Ff@Mg~$$tl#!KP zl$}k=3`N7J=X2;hulu~N`?~Juy6@NPc|Ff_{&$|8bo`FraeTk;&wG74li4bh%tvNR z<*8jwZ{S44m=qf-_29=dgrBH+1^1=^^fw8p=0Z5c0ss(cf~JBXmnDDURr0Mm`2TNxCYbt*J{h9)m0ld}8J0*s`VKxFZpJ=hB0qy7L+ z!ltDC$@*)%rGhw)8`BLJPP&h?!+0p6ANe?p5@|Fmha9fJIeu4uX89LE(D3E=6MU_v z+31@pZyLYN1cf#J1*hyH5y<}nr{LmH@T-to1L)$VuTk`TKLNa}sX+jn7(Zh3dVbRI zu%h=#=?jiQ8zI~0-izwsYt3XVM?jhJGatuVAQ`=W9DEr_)DbIg!zu!Lv{RuxYd>NP z+Y@2n6|>-#`Q;}W6u;&=*JsmjccYvD-8*67J{ z3;q~*tX5Qd)1numK6e98{qDu3m&96kPuG&tiz%9i0uy`-_-MhxdYC{Rd+OZA^o<|t z3ef*|5DO9AXTVM)SSVJ)VTw^#u-T3TZvlEv&^biD_Od;u?AzyUdw`;_+)xU}Sj*ha z$ET;)Y&nOO`5{6lwv)*};uH&Dsn!N!-bt-7#%Y7r1A^~M4 zRkligw_{W9*i+c~@LkX0TYflNbQfh<=nf^=MBCUYJ*k`Rt7lBzUTgJYEPbX7% z<4Nx~P&AAi1H7fp6jNxZZIFr{Y*MS~53Qsh8uAV(#5aVFJAnPbOZZg3TzT)4w4oe= z(Hr2P<)?8gl_r`FzfP`- zeaN;GUo_(6zrk=8S1=dX<-}xn*v6CS zcbDAEACj&UPrdHzf_#C5QQx;p6KaQ-XK{5}DNr|>K1M*;ti1S*L^gcVlw;@U-#sR1 zN2n$|)FlfnTIuT;d?S*~2h*6Nb(c7K=-bF3hF1HmZ%8~}*rwSErm_d9ytzj#U`u{%XlG=Q zBWt8~jw^uV&HGmFvS9J9#2CNya3z9Xi^hOzi?KkiRVHHcb9O}BM;Kb9X@>0m@ReXK_3PqWa!6vd*w1QN??kBB+|;>5-Z86GYt7*kk-Isx$IynPt&?BVL%n97bLC0o9gKU&@JdM_M_us+YS_ich!udyRdQH? z)FoAn?!1^>PkQKdVv#dc0n^-ibiG;-OxP3s+{nOZO6TkOY@O;ijPm3FwlFjE@|oYr zg)<4*nGR_Nyx$rtngnQvr!`V*TEF>jZ-noD)Wd(|d`me(L6$W;u=N&d-Q-1TDtAwH zB1{>S0J>Ot?<^bb78U?&Nxx6o;Pd&tR>ns_IFxrXq+EwJK^e?g4(2=+j#%6Oj!ChA zE}?7)ls!xV|FhMZ4{=D-Zo`hAaHZCfXcLC%3uKUt7!SJ50QVIB=vSYBWa*)c*aM9Z5# zu!3Af>Px`qno7x|-3`3~;2nV(Hd?u2)hm=d6)IEtCxGb5$G3F81Gh&D5x!^Qo;VHk zBCQP~n0$R0{*kqK?C6CkIbvH-M{LWbf62B09Lxmvy9a5xQiw$K@XE!XCxLz}?Nu~K zT1Ub8)D8abgmZc25TFr6sCGbif{w&$$JST}Ht#_-#9gTUv^C|WHh>-OebcuYs`tla z&d6h7fYrE_UiJ-?r{3qJ>7vw3jl_mYhVC$7r$G*iabo7VpN<$gMxtX5&(nd_f?eh_ zVze&hYdlV$j&S!EnTnB3ab%W!xspa~8q%c0pbm(c{eZ^DLa;)*vfvPq`lPVo1Awq? z0U*Ip>E0jGwuU8hG$!f7)31grV{aC7`eHp=DUHuS1SpWv2H{&(^>Eij;Ofq2E>^zy z=^EIqFb*vnjvno$73V8%LkWo{n{lQ*wM_XU7+p&ak!F*B68$lb13t_s(;X?+Fpa@h zhr+YOgM$jh!)`uo_yO}-52_k-!2E2!4uv%7km41LAI`ad?@MIP^mD(Pa51IWxbo!1 zTn^B$laju z1I&!(Ri%oNw;w-;dt-lcHoZ&Hrt;P@lgaRHe8g2h&kM^XEX`JRA55SfL;BZg!0KN- zYZW}FWF2XgV=Z~64pPO=R!~{>xAK~GvW4uVw!9ljoyls50tx&uyn&Go(<0u#dwU2t zjQ%^-qc?eU9)2}n3R>tO5&)*3QP>uZdO22dj?W$o=fczf;9*{-5j=(t2boOf4=l+3 z>GB$94XLL|TfAj+-KbnlG4v&=C;CSFV;R03?E01vs;@^jv6Fz5>j~@6YY`X^VkNWZ zL&-&PD1ZG`iesdBF(GWhTNvXbIKywYh%N%<uqiP-4X~aO2EoK)6ET39WxEpyozPY$Y!n0uhfHDWudl@;+ftQ*Y z&XUrzGd&cmaZFaC$H#HD?2c*<#geaIJ>)s^di?>+N(w5dYocbrk+)k+b1yz>B+|*1 zLNOAHpE+JgDHWt`Ej6ijmS%`>LtT%D)vZ95E=4SDx-W%!(LmZ=q=-|0HS5^{Pm{hj#Tu6z46MR6W`o zu$B@uvT|DRAhO|XM!(h@??4f z?!Upoc)GZZ{*m2UK$JJDn?|-rd=B0(qfK8WMgjQAiJ(RbznBY5#TlwH%q502TFQ){WX5 ze+hsM7m0ryxWDN)HQNakuqgw-8@OH`+qL7HgPqJJd)&B;-WPtZZjWzH+g%;QZyo+I}{tPwfUE^ObRsB z8Kp+g{};?c<{LM5{YWXI0nsslm8WoD^13FwxM$Oy!hYR8ML33t%ovN%!QUQ2R8 zvfOkw;?4nFU6Vgom2MFjmAV1Og92~dfZ5gOce)E9#k}6($tqPlkJ~mj*Vf0y!Piye zlhJ-LiEV1LDjzD`x$uao2XB?}SZ$pz0a|pm%Cs46KkyfL0$|DBf#iY&K0h=Q{ZnxW zT49kZ-VN<)oNY{M{tteL5?z}%O8oBIhmA=r$a@=osr;Fbfqvkka%Fz+GF%;IqAx?I zTVFD}hdCa+(O@GgzsRzZ_{`00ZG7}>ec84VA#=M`e@mY~3H^tE>UoE@dlXW$UB;>|f6a z=6M+!SyC*B1BqowiLOW^JFkSJF!d5iQDYmSH<3Pc?Rnc-s2|ErFFN0bVI&!qMfH#u z^6~ zIN~x;rEu)P@L$xRfMG&J3{sW~6zc)(s4aO^_?{I*>%wI&Vt< zqMW$vRU)$P<6mmB23R21c-tm|XL;b|28x=)<5>MzUJj(}J{8ueSU3|(j(xgug<^|- z*-Q7%e#Q^xM>j*WvG!xL@y8Z*T1+#2m4%nTqn3WW(F>*Z^L;Anrh(Gu0^%Z(e?gJ9 zW{5HJv+SMF^TUL}zhiEOl$s2^{dd!4?u{f0{kY=Q>31x-iTsgyZ?F3x8-}VLis^nsWY(Y^RKGGZd) zz*y>1V%_w7Z_mtG+xAf&ckYWB>I)3+AL|LX>uE5*W@r2qHB5cy=LwW`3ZglibT+Y7 z_+!O=l`{xA)hJM%VVsN^BK)RG*Nl`m^E5Y`v^QC}+3#WzX>}ibY={p0=s2DEN@U&h z(;2?NO}%GcAMONRar)FG>E>}(va9{>qS^Gu>xw=K^-v153$B@FgYOsbc?RTP zZE>d_88F4!NEZX>^^F6U=3vp6F{=9{l4nzYU&wKK&~}|PM7AJ6fh1H?>OY0xy&RD zxCj%NPo=%1-Kh8pCb?g+kVTIHjwL2pVNWv$7HCaux>^`u6DMH^jNP7G5gK*g(c05fQ+!)}VPGd|0WHPWnc`>WG7lpLugtI>;pJ$A_mAWA^{#E7vC8G8!@oS#RL5m*WImI4FSwlk?FM)dr$$b z22^neDB3GHS9qDHn}i`dk0&n}x<(LB3XyMs?+jdEJg|XraAeGs!%@Mj zs+t;+mzUQ~mvsuvMBRMp3~O+9v?DI4=z5H#&f-Xb0otO3SrvjcZ#Do>i%&@*_Xf?Y zFF`hPh$~c!4%ZwukCmW1;`ZuseeTI#SHV$g!KQk4_*bpx^le;UP$oOTc$ikuxYryn z4}>23#sCbMkIH8Q`0=w*3ue4uWKkR_M3@!lY>fskxOPcmaeZf-C3F;Pi zf!$XSA9!6Ra%wxXq^|#1x>fVp=CMO;z+$3IxuC19ZLaf3X+}(PGT$qVY4&=0OqZ@4 z_6qw+eXbr(PEJ*2Wt86*s8mc9o{%5Jb(hQXP-gK=NSL_5dgdB8bojd&fk)Q(tkO}t zt2eN7+`_gu!$Mzxi0eG|h-D%U@wm-g-}k_#$2!L`6j@sHs&~uNI5H zIh#Aux<3>0?INb9nBx$*T$E5bI8olX63bVk8X(l3RxtW?fIHjjJGO^EpssCDp$LPa z+ZDe51Y+@XUoD-@wn8{XyKj5Z32lI{>|8_ZZ0p$A1&F}C;Y2rVFkB5!2d&If}! z?U!XL+hA1QEIpUoTJdM==e}CL%Y&FsDFXuo%mIvJ1wLR~)#0IvvcK%Q=EO*@e{E4p z*@8%EJn$7oPB=RnFuRvXHf?k=wK8oROZY4}Z`B8?c=o@4Ck!^b78XLa;(Z9kpm-)* zRae0ZWWs#PCn60A7UqMj<(rIQD-|WY`fw~63!kfI4NP3MU&C&a>+%eCeBa+c$JWwv zT#`6K$o*bi+N2Hp;2Rr>%l-V<_cvtTK)Vq3$nRR$4uBp#5#iSfc6r^G%uWaZ6?(fE zYB9baew$T$ldq-*Z<-o_pqG%nR0SRo58vX9`Ow+jm5V=E%bW*;Hk&jq5Gs-nKdDQT zo)zIW2S$+`Z5D1ox<`aN1(T$1Gp%4?mU(VzC$5g(g5G5@N^fASh(o@-l1c2~6(u~x zKlA}uSdgLGWQPiCzcZiZlyJp$)v0rJ)5PwrIwdOJeBTHvPOGIdy}iAAE-}KF{R7rV zejAuwR!#{D#vx^|`U5ZqYPZQqNTju?C@a@&!31V#27OXBVpHW7wQ+6m=$AxlsHz%{ z!nE1 z#Nq`@oNk&I5b^2yvol|pMX|B5JD$SSCm}9w6y#)PMjr|1zvz&-y{i$3gyw2}lJ-}i zHVRD5&f1U|=>Hm)D4gqj_w~uii#~B_DJkqgatk#zHWuvFEiNt=K7IOhXKO137F?~i z*Jx(LRInVdGh^;@o|}EtHU^KNx`syCdoTrVcW_BbnUF{}5*S8YHSVT%@f%o0g>&&osLo57T|8k?6LbF6o^1K( zF0LOPp(~pAqlRMOu)_6&EMk%&{PQ-*gKiv9UaDj$6YjntYIbG~RypmR7<91(4ZL1S zp4HFsl`5VAy=JkZI9*W$%SDPh;(jk!80jjE`bo5bFW->25}Jv^eGv}+RAadk(>hkV zpy~DoMn(p;!AuNkEu$c-g`#X@n}0L(nEwj!E%U8UVmo{9{719A#p6Pjx--` z`aS2|x|7`N6;aca{*1oPYT#Todg^P+CXDY5AC>*|>C;*bwa#zC?g3LR3V*Bq%@gZI zotX;cCK>EOyL96QN@)LKD&9QyfZT9fuShSv;e}2@Cmco7C_ zYedQf53zHyu?S9R3_>|(M#PB-glPUHnXQozc2zg%K}NOLX|*jcsoI>K=*mYl7E}l| zm2{S!vG({zs6xe4IiY*#Zqq}<$n58gqc2(ZvrC@KovR-rbdl9l)I_4$w#V}Y+SVD% zJjXYLDc0lJ$J4>?>nw5V^1W*kBe8m?%2vL7K3vmK^mt($PP*OH3c-rLLui^TY5K9U;WNJ3;NZ;?FJmEiQ{-=A_cEGymq_|W!2x)BTBR~ zIgXDQn8*f`*osE0j+;oaPgDutSZ^}N2@^f_$##?4+=&|NGYU|-rQA^pH?Y5S@P7zCu zref8lM<<~Ko`k)PE0&LnwVueTZND6Q zLZXLbx0Rdkc#)qxs_I#*F!rWJjm+>%^1&^o(Ay>kgH@r!#678pYQL=gY@n`wwa<(q zpEN3jILK2;c8X6r)!5HoIP}$MN-hJ2?*JsmmmuV5Q%TgWhGVtW38-}@#Y%UW7wWyH zDPnEAP8uY^d#!T83g2xlMvZK>9<;+>CPY-sh2nb#+yt^bDE&oQd_Ng|%J6CO{R>FU zk~`Ec2vt{W>m^xt)=t_XHa9Bb0nyjpRC7*9#0?dl6lAEMaHxL5V^{iN56LiSpoUgXnw`X$HmzkU$q!APcdZ4YYm zm$Nj3NuugJLQY?8h(5>Ctd}L2-u}>zrTRXz{tYu8Aw9y^KL}pkKWn3tbn@io585kF zYV#S2q; z2Na=mvb(G0w%IhHzdj-6p?jtoMwuy$29-m`kuHvIf4~-@5RrkG(~Et@4`@he?u#q%)v~hl=5INUNCu{ zCj2AtuXuG)VFt+8V%|A7wtZ$DqtuP{*BJT7G)|7(qhrp>1iXVP~B-vrs~X zhnTnRfuZZAmqLZJj~)lvKQ7wKD-qgz(8PbrPxS?c0grx;sdYS-?OV$k{!{kFO}zub z?keoZfV)qlY3u1MuWJ-BUGk=L2Yp-r-uLgOlDuOoEyzZCca3B2IZr1UK^AGctIY_m zoOABQP~JAyv*+`_KYZVN*d>2Wzc%fpjY*Q@?-{cSAKRIvN{`0h`An-5&Ty_0PKem7 zoH$C~@uvT!oOXP^EVu68zho{SxrP7pUvZKX4Ivkd${olx*m6Y=-}HclPCj(RnasnJNa+ct zIIS~Svc(VK_TdUYIeNs{YdKS3=%Zj|6#t4e&8i2)wLFQ1O)M*dAJUMF9;5&&7}USU(__WlvsO^ zdI0K8K3HWrL7&fullJn#T(JPj5G^wi%esUG`7>wKfZgnX`k9*V;Dm_j4Z5G{JsI;k9hIotR0*q zcp2k^1Z;I~K(5^g&KO#V{E1=)vO{GXj^^TI2HOn}BvE*km6iFYm<>$7L6paEa_%nD zt_N}od2@5K30dRu-3tznSWFrHq@yRnoS_QJsVdB!nm5U5X?ZW%+Y6MPSfZ3|xD$Ky zI0vy0LacrQ!ZDXJvaqmd2;S<&)D6+4Zph5^A{G+FwF0xW@jVRTL1Kcv$l=4F&J6~^t#O1&#W9+U<5-L%olx;i` z62{FCo%#${PMs_dxVT7Gr!n<{=wNFuL_TllKH)Anv3&t$hrDJJ8t^bdj!k=@JNX(Y z{Fd=HRkoNM8822Ml`Hk&LOeL@7(Hl8McR}CBw8l8V2Tssy4Eb^Pn`Ofb50u3T25$ zUgk?1Q0r9T2UOu@QiY5NuE~VqmD>x)2Aqn2=3Tz7Xw$ASlRf8w@xPf{L%d%3ujR)@KbCwuKV$pg>qTSl@tSNE`xph;OdV5zu%( zJdNq7kU~UNbv$PRiT-L3BX>2&Ad>7h0QV`P_&#S%?9Ykzb`?oC#Y4|h7Srp(DTla%x@h_oP1tQCucwC+7T{Bf{y z!~^w6K|#R*UU^aI>f{pM#wUx7jg32T5y+9sGxlq@I50(=ItQi`yYi_Q4o{1VNqi+M zsP3@LP`ZJ4)*jbV8dsK=WcfIdkaY0sa5ko}1~4w<_7;CE+B?=JylwKs#InTc+^+Ok z5_AR{Ppr6-$$aObZ&n$>3$sYAT~fa0&wQ?U8~N_n!xRk-Q^p@Vk`TGbQqRLW9dZd~ z_tms~>rbO~NFbdqu z#C>g!u6o?j0A!WPS8%ww1@1bIp(4rAw78jr<-rg-$OlI?w?oEOTO6YW2Ik-sOcC{` z9(N2`S=)Pmc=txE>C{veqsGM@k0>oGm{Lo;7(PE#sNmZ~c2!Ts{^x zB6mdPq|#qQ))dh7em}CiM5sOtIIIW<}7H~s&cTNzPdDF7ZhEo1b34M z4bDD=iA$yvJ119Q(@7Z`1*1`qof{OrNdOCNamuz#{R~uTsUG62P}qP(DOlvgECyyJNo~(Ay0)hN+>j&hZ6W3U#qb7q^u$ zCh3x=A@U3*Uom7yh7?KX6R-mzTw_7ezpb7RWmRccrJ3K%jpJ5&^DL9U0BX~`&fVO{ zB%JOmB-he9?kraJ9^1Kl9t#VbPfJzF#8X={{H%Nz69@syHHsJ%CE3L|_)WI~VQATs z5j+8deTq|qV1u=0I}_t>*aLRvCD@qXLuPXMZtoh~%u+WAskk>LoQ_wA-Cp?}rGdSl z3}3-E>y-`XarpH9^KE!-)b+5g=mgZZuQ563JbTOjz*NUmzvB-uW{6vL{2H_-IDGV9 zj?alXwcdyBBqk&r)E76T%SZYS5qap-Q^7RoWnwZ}Sf&%R zWWDp9;V3n#^iR8Vn5G7s8%$`TBUq(bOW3l;WI2z2oK;v^MMVvqsmsGpp^oOEh3*cG zliyR;W6gg9+c|{T=da2c*`tmAfMoR8XHdTgjE9x7LPN4EvEUyqHGS1b%C=$u3#2zz AiU0rr literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png b/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png new file mode 100644 index 0000000000000000000000000000000000000000..f13a2987b28d70f81bda7096a54ec479f20b2690 GIT binary patch literal 38780 zcmdRWcQlrN{5QHQWZa>Wd6P|cLS$x-WJmTWTSjDrtVC8;86`53k(E6|W|FMz>~$kq z;dx*6UFY}5^T%_Z^PKaX<9p8c+}-zeeXh@Vzu&L%z8+mylP4ymBE-SLAy!n7y@7*+ zrw0GA;-7#|9MZp9Nu8EL$=dljUBrC8qOcZqwAp>oGp5>Y6`LFzYZtTam6~jq zl(O->Yv}U0x4S0Q8_JNEaN@t7u9aS2%^r1;MNd*kcE>vaFz z5`!y2a9wzpAnfBm*HrLpe6;=dC}A)=ihhkB)vv3_{WAtU3bpE!|9-z-9X{|4FysIC zNnSWjiBAaQ9qex>ynipe^-RF}iRHnK42757b01?{H@w+Mb#iW8>t~h;CXy?v@!A#| zE;Mf?No$QjeRCr6Kf|fhR%So)ZE^ao@hDl4(_ChP5&vrOXw|)at|5biD%SkSd0{MB3haB+ zu?K4qm?#;BPH3G{z`Q2VdZVK%dT%w4(xM~&;%j~3(skto?zsLpx&}HtpZ9wV4(NS% zOkdSl_2f85QNz5RfcZRj{IUXOfuO{3+z6X~jm2MKO~P^O!D_`Eiie6I%FhvxCU>(d zl$|lO9+v)9us$IL8z$#QX3X+XvGUuSr51<3b{*b+Dq^_vIU~C6=+MB(bLI@YUf~Oq zI`7?$jrCUgs?65z-dFN*bIWjPD!8EerNH~Oyl8BNWO6$H|*flr<}TKpI@tT-lk7%*fg_D^WJ#G!72ZQ zhC|$g_&mRCzG1~wyx1q9*nccoaHfEc7Ig9x8!xA zC@7Ur?a=r2^(cw(Q4WjYpvF~pgM|&Q@z0qiFnHWl1j6Bxb-o9NjOj+j z);(o;w>G|i?BD%X;X*xk+2Px~QXEH|^V@Ejx}Im3Iuu5Vt<~eWuZ2%B@`*f#Vnd303ey;0#-cUPNOQTi5y4&vQlKu2>sck;hC7b3B`MJy9J5J@w@I%jnI~@r;Yob1TCDB)r zc{-f$eU;xBbjsjsmPtcE{ss9bly2+gDG^kc-gT;PzbrqLky8BFayzs&*edw2^5}5? z;!^G18Kr3s$E~$*6~)dg#t#lGGf2M+%+VJLfZ~CI1p<%M^Shc4d z?RzOawF{Dwu@7ar-jJo$_o++Rch>d_tYCd%l|3GfdrT8ocy7Ml`wIKk@X`gFFWDM$ zqMSURS4PTb;3@FbyCSGBTm0Htm)j$~x>G+WHwF`;u^&pVUu+e@rTPhIXBvy>}WvM6J z>3f|sjr~43tQgCp*q^K2Uu5jNdo9)Hr+%sZ;k4uPAD0eahlDInG(229q!hdzWj9u> zFZYCoJ<-{72W#qo@7nXa74PlQdvPa~jkkY}ICfOJEoI0)BHN3a@yZ_5J9~I^@@Qw8 zG)dSw-Fttfe53Ic52a$3T8fR`$;#7h*6qeEscwE*d6pgV8w17)%-X%>A{|!P9bi&cq}6dT_zs3pLogx-*SUu0NOc zUmaGtuiTxaz5)kz`srmyqYEoRr_P0Y$MYDk=E5<31LuUh_Utd7+fq!0%3apN-`wuEdD{_mRDM(phX&uM^$Bh4wZ~^a{#+TcoNkF| z=k(XYTIdbcY>(N}1YbT?Jc{lK^dP1z<1u{yqRjS7Hbdx2Z*b)qZo@$~8G7~3Dwq%- z>z-8YYxVnY2CEHwK zULTSo3w*BCk@5o3z(h(HYNt=tjvw^qGRJ)~j~24hzORl}afs}lU$+{bun%}yH+lRR z>4TA+P5kfTU+k*;i4W3=jV}ApH@$jxeREsjOk2%&{c|27rErK99XC=?G|G^sif%Q9 z`Afdhd7EM0`efatK4neBFl6Eek%mc|oI(3j_TxN6Cz+J3#Q9f54yZ_J6VF=BzOY8A zc~J#Y1cwJ$)-Jm2ZdkrzBTDk5TFeev7wbvdE~RYdE8gMH>K(b6ZA$Mym}+&^ef672 zPv6uV)zou4N|AoP_V`vmiEe+LyY47(!Fc7%!0`<_8=r^ak@z~pRWug^nyCDPXQ zBv-@w4eHN!CPnnk^`u=c+iF?p%i6NKKIYTU3R_lAMCX)X-8PS;u*fSBnlC z`0H`oW9vPiJvnN0=5$_waqjLarSX}|O_Svbl1AHZ1q^USxl<*bC_D}0%$;31{24zs zur7I3^O0$I4=akCX#4A6k-3j{#GE1~4{DIcu(Y6)k8$wQx6oZ3Jau`8yH^i)rrY~o z){xiIwTvysz%e+3da}7gKq7X%1maK+KGi^L%pNT%`+=swxX*UV{lR&Dz94E&<9+kn z2_p2xIEJ;}6;!nk=T1s(plvBP0!-ORQt|Dvvt7v|nrlic!=(``8N=__YWKtQ1`3QF z?aYtic8Poae9Er*I(zb~R|gx;4eQED*#+NlS{*B4x`V!0UP zPB0RT?=V~~6<=>Aw;?w}1v7jWbz8&_R%TFh8ZD?h%gn*}o;-G-a=dZS zFa@fEoXH>e#ysU|TMFsF9)A_>XHFn2em`aFu{hqWzT08`bYPMrsTkIe?@E?JG-FF8 z?|mPZPRS~t{W3h8)QG?&8LPfxEH-~->ceC1PBrQI*xjJ>R`=z!3$6NhHR(Gu`j*BL zmmY3C#x@q^=@&09dim7d-^!)F>|!)XCw{-g`Jxp~ne9OL+k@F3aU9xrO?}wy{rO1* z59lBKnru%J$g=d|eT8kMfAB@y`S@owLu-d~WqcjzS$mzEH;VbFrix6hURz5nGU;p= zmG2pJ<(~Q>a0cIeGfz}p`3w=KR`!d*X@RN-N4}#JwXuG$@IOs|*Hi2WJ1&jW#7Sus za1y`iMCgGOhPRsJf?W7@eTM8k*o;I>1{W@x*$CMWGmTcdX%;UQ-rcznDVDBtIp3f> zKbl*M`u0ftMS~2P5R#ZnwmO%X9%<+Eg}*-bLPOp-81t>5c25awUk8c7X3M^L`GqP3 zkGlrfsAW&*x0#sxILYo3y?`z5d?S#8iBL!TZ_#pL(<~;cpl5f+t8GvYwI2 z7bJ6H^cQXuX4MIIW5gxHs^O%cXvCM%*?B}PHc@KoiE0)xedI5Bw&}~jrua{ML7OiM zSNE3%`klNNc-19C#i{K02nNX~?xG;q#iM8xO@7Sxwm5p0I*i_Y`!Z1P&GA!bDH|qd zjjQijT+quYB(LXKhYBmcN8-q^|G4tK!~{otujBf-WKI~|`^hwe?0IAaJB)=;uiW{X zmo!lCN|aM2=Iv5Cpzsj+@~?`*zB zZKIc^(4*AoL>s)qTeSeYAmjGk`>dibd=nWS318}b(G;%I$(RB4_aOO;){m*AJu&~F z3_mG9y5D8>-@rzpJ{2b@R-8VS`Cnx75I*q_K#K6c{!G*%G=7w9ApRGL6tYTA zTJ(fiEB5p*2*%^|Zmy~LKUYS-aN@u2^zdJ#wG1$6Xme8k-z)RtXYiIu zzt?yrN1UVmMh9Ox{B3$LG4)vkVs{VQb6k2Dm)XwfyEE+~)vxnXFXOo_eu+LoOyzNf z_LK7%Dek11*(tn+YWj=s<{0Qvjf}~)2R{w&4433Y=B~R+cQUJCdN=TbL;kZUvG6n2)2siv|2aUFM)g?Uf7i)_41UJc=<}cZOT!L0MM=l< z?;sH{nkxHV{onf=;a?Z_#BFN&&ma*<8;igFpZlMJr{yv6Nc?w@Vg#}&PJ4jHW_IV& z5}M!Th(41mp;EDz?7$K8q%p=t0KES4-Uy^C9*{hFP zQ#~3DV*DZv4(e>+AacxgCJ9{K^xByWiSW`MFVr>F1T!v-q5W1mQcO{dp`JmPI78C`ifRUv&~k7mC=;-Hzdt)7JD~JRE&`Am`y#!hUPBQ!l=*tKhco;27k;@Ct49EcIw0J? ztYzv9u&QT_j@{TtdxGm-OL}Wqa&PsP#1NqPq#Ab<{KLIp{7x_ zFFo(q1w`{~hF-+usreDJakJ5-;!YpSE>dg$+TC32Xaby+0oncCy|uBy0J7<~K_2t`ajnHaT@T~x02n@SBfK>B$%OO=R6 zRG$lVEuFlm9M2WQs!rEXcrpUT7>!e%gU}|v2YVlZh3J3COg7RW${Jn|@stYyoOUCD zDIstYFE{F0xym!@2m_Ajk9I;fqI@-Tj~TTez^RvMlcb?V-x+@Whdgvb~u9P0`Y+bKGqbN4x6mI(dnA zFPPuXfJ*=F-j6%wT)bnXy<2l>50st@*?$rC2H=z_7eR%mTVD2r_DVaT)VJ=Z9|t+_ zkNfVcrp;a@H?#+n<{mT;xV|K-X3i*>ik# z8aD46F(`-FA|(#rQ;9ZzB3yOzxT`T{9x2YU^KSYM;6E}CO2RW78Px=B-B&Pg zZ5PkFYY;8JaJ;uN%)GVOPyTBSH|y9*5oOA zgQixjt(@?G%uR-&a_4Kk;%jJgrghq!nHYW$~Rzs2a4C4 zvyUfbiI~k3iKp})B>lMW|KSmEb8lV&cT_G+J;?CQ`27O_D+O{~pB#u>E@Y2x9#w01 zSJS`EmR;x5rBp$wrA_DqWj85(ceL75;Drjr6KA(kh&*51E*g|+?YBnK>+U1f-2Phi z<`aWjh&j5gBc-<1G>(uVMSpe*^iZ}I7TNT3xK+zX(zUDuK7V80_gYop$#L5YgLV(K z3-r3J=Rdv7KRnn!N!$>6`ofn3*j|()Vn80Ie}1c1B%S#NjtM6>clTBksl@wR<+pno z@K@OaoyDrD`<3W7LPEA*Rr*iv+Q>YIl#%m90$YD;8Q@849Osa>lkKD8#~VGoh96 z;8flCu;5h)G>y#>WGbMA>e%=qw^9qWP7*b-7MhIihZg&r-VGkYGM@}idcfN}gIPt( z+}8!^-ca=l`{Ui>=fbZ}kg2?oDO^*r+s%yLHIJIjtr%TrQM#XL7W zj5(s`v6D@~^9|=Td#<{6%pcV`J*#t_>TrHSBZAuImlXu za@$18%m_(5hTa}yMCK}IP7xBNm z@Zp@XmW=RBM}ls##OYsQFZAqR)^2OaC(bR4+@e32L8V&W^v1(K?{|-8>@;f7i>mfI zBZj`I1m7=s;icw^<(C1EFijsZ$~8R$LjUQ=t9#kcgvrCjRyMHZ5jP8`l}qEK2Isn- z4=hqdeqck=Ii)7Om`7nao%qeJPkx*OeOUGLw$CP-0=Fc*&wuOLc}*l0%plU!-@->t zGX4X7Ie`7Ef8wF!$-BgTF_IV*`nf*?XQxhM&EuEcp)AdFrt>X!*xbgF5Dfd8k4we< zD(N7?MHhld{Kbjg-TIpnyj%{-kJTkf`zDX*&HTiOQ{_6f3`=fDBh1C4J1eyQZ1rS# z!+E~Bt;G_q!;)j`kBLL&rM<~y@3Vbv-z<;Vc^4t%Fj98WC>A4HdQxaikuFppPvbPr zlb51-_)i9j^AtBXdnBsd*j~RiyD9k9sPP8Q&g+#9-etdfrM!nz3n`5f8&`ey+JFrB zWmMQ@m`YcP{Yl)K96!Q@#>Wk17JDHVPWjTz7b_skOUoPH6FS5jv9~o~tQ+!U14q$w z?Rm%7k)8C1DgT#*TxxfW6nJx1eRxzgjCj2#J$Gjk z`puFPqeFTw`Z^MmmNn;H`~J4`w(t`i+aKc9tL{gSF@}X~%)(|xN}aB<^xG>Vy0jsQ zxVFu)o`lbv59_2uQDO$@wwoDV=EiStUOJ;Tx#Ep!J$C0pt=+@}@v}TF33lENyqX#h zSkTLA7jRWE9Aav>iGnX+S16=W0^C^O4*gs4$D6UwRTEDvUG&pF##aX-g2BNmZM8mH z#pC*v*VRSO@TSN%W z*~q}{6QnryJFSsHJd@5J!bO{RC(CJyb}+suYTPN|Cp=Vz8I{{^K4RnRFZL!w=usV{ zFKBo2CTRt-Fxg7+TnnBA+tv_2k ztrr=%Q*_Oaj2DDC@ZErg@@n;=<~o%T@xG2%i|E7-*QBREJ!Oj!SJa$b;bM<>vNDF` zjc!ooeKe6No6wgoB4tkt>a)vbK_t{dDqXWQqORHYqN%Pd{wCYTg_|xcrER%qtX`PC z*CbV595yc#s&>K-9-lrL>sI^7bM7FP@_s{wkd2sHLqmh%AEDDM0;k$W@AV01RV$f1 zOdiyevrXTgIOA!MPg6`Ew9Wnju1rxlItMK`E+q;FZe>n*N_Uc5Y0C&E=~AehoUY~y z10Z@$!}=ZjA0!HR37c@JaYu!>G52TU+lWU}iTdZlX2|ghE*0RP!zIjeingx!^VKI9 zzrjz|X+AnNG#U&#jlBq3pINx30W3dRrPF!xH|(uf1hAg`g*xxl->}>76)+znm0o%x|G;k6(=VLv-f%4B z{s&P%VF4cC|IUC+_oK(5YP&*3#3>K?$= zjQ8}f(3&wq?A>RAk7nn06p8@QwZoy#gai_O!*T(b@Rj|lDVEfu^f%Zc7O zPf#KNLJc>Rn-k;<#TE{~g{n`3^jfPw|p^VrtPp zOhWRjohI=LdlXFZs&hEe+kSqsM!7TH?}L){crtvp%vsd50$U|11xU z3M@}UN>lDX4glfDusp?k4@(*TrY@X#VR=s8BYE?W&_2Nh%hSC``Zf5UkoS4Z@SO4vv@EKi?58;M{{`bbdg}RZ_UrreEm^y+nSjZ09~83Hzn{AhLNL?#|6X zQCq5d?jq|!#Ys@MxnOIZ3+)Do(gqAbCNRpME8T4MAs}jj&4S}Uy{{93AAjBVz_&-=eC8@ zf9J=3tSW!At`msq%s0a z)@b{1<0KI!MkjCL8c-RzQ~p2DY|k1%K+ZI*xSPx6RO!9@PJ_o$Eb0Xe_jBEY2N&LY zHhnAC*{%lz!38yQ^z*#Xuf0vmj(y+frNf1sjDQ{kq0+;2%%` zMb&b$F-T6$N(U5iP3b#3*hw4VQvr@3PPe~#=#BYxxK;4rBk06WaUO1;v}!#zCm%t@ zoFeMR-%ey6Y8$IAgBdb4U_#d0lTi)5R-@-_r+l4L4x?8+2zg z-d=wh0Q!K;qcC8S43!eN-v)a-kr+Ki(5yjx%Q}z^_q)jPX1;%nU2Gs)>juosq|bj; za%K4E!S;9ua2O^}&$P8XcdX5ckr7O%2g-&O!BBLi09&(N)Wx@+ssNvL(V&Zvt`d|w zg-2w}PbmXF!LSmj$PPfwj$>7BHIiymNfy?aF>gI)R1N#3D(+kxiJ0p|$o(Dj!*84u zSMGdfDqpTY9zeED7=c7RvlKA&kd5N*Zp>)Pu+A>?Sy0$fOZZk(Z8Uy5zfn%EQn?&( z{8Vn&!;_@#Mdq!jCbi+!ZxyeCm$BEo(>3Y`##4A2(~-BO`!MfJ&~gw0<2L7d zsCkGcO?Bu{&A^M!$l*2J<6U8vE-VwG9kfqirEAOP41Ul1D`b$4NOOT1-?hdyb@tvZ zmgZQY&@(+AH29AY60T%I>L!u$0)JuT4s&wr(x;a)qF1(7*{NJpX1h~3HD9ZoCfc5) z3dQfDwwL3I8xrKaLO1(^U`WoJ!t4WY^}-z}3}YcCaF}NH?79^ArCGf#XsSxBux8fk zT6nLDq=8;Fzee_9uff+oSjKt-e)nZPfBeQKIzDytmT--ARmvyQonL^`?Jqe(D(!Dr z9J#^`fD%vHq(1!w)~jEnN{m62SFH5jnvr4xcM27EGa2DvFoPuBTw!W#Rs(s!Nl`KN zVP3&tN(PT2%nLNp0Wd$=49d-miNsP1l+T(vWl9K|Tf~ zpAKTHJTI+9Fg(QhHr=oR6T@SUb-dT3%uF52t*h$Vne3lf}QwUp>ZF#*b} zUQ>@)zjR(j1zdoo|H(&wdc_Y1ru4NX7b)uej)~5=`1ZF~va)MqHF5L)a|A|}U;&ck zx9-j}YesvJB9D##M81OH5|K+fv0ga=9g7N~B+k_*y8@_1eeNA;F@y6TV$YnRgQ79? z5}q?RZ8Dkd#qHvn#IF%aH#~$}E2W4`l5lH6t+PE%SZIRVup0vt{{AKb=af$g`dqBm z#n%a$`1EuBRU`)~+3VfbXANG#Geo>k!!n6W7ABQyNJ%GwZL@0@TNgOBRnP1NF8JmFX}y85bhh5YZ+AP+@e_EIR;e5eEwmFYNqobjSv zOc6s!a9!}4Y1TVMxDYPXfrOOjc`*Ki7vRfYKHXu$)au?iXG}<{4tC@OhNUXzweBP+*y*fD6E0uQ`J4kM~@%Jf}X?F7{aWmBw%v{bxbWZ^8Q+e;l0(nfFv+9lScPzSpYssM+P<`+XuNQr_ zQ_0~03t{!{?DKc73pYgEmlOR>#%q1rn_QrLo7RIe*y4Lz^k=Y$h=6Ud#CAZ4vQm)*K2U0l4rRv^V{Qa#(#jkn#ncxuN_t|rGqe;yLk6s%m8*O67hcgp39hjVnL~AE{H4rE zsVm+y72w{>fw*ANovaV2n|-=9@*K#&?puO{Gry(pW`G+I#eV78>_N)U^x9s%@!rYQ zbGfMXJHnE@*+uJ%xXeC<$>em5tDT2RA|C27joPC_Fa4VPrCfT2l2AvnL10e+yGwQ= zTj(=DT{)ZYSHT|%#pZcLU0nWFp;`NDLs^Q-(Q@LM%Uri2R79Ded)at_)$uc!IT1DP zX`BD`07{ukC{5cyLAU5lzgFf+C+e!uUksr=_o_mqSPAbB9raOnB>YB6a={i3n;DCY zE|a50xbTYo1nQVJ1*Yi)&m0ycG?fXs3#!Sf}Els7@DChpHnMYMC~~P$Ek$ zs}GLZ?>p<$@tzA0(YBWDPc>+wtRVwr0`TE?nN>-VO*xg33Oa=5O5>$ytWq>%5Cz+H zNsQQK$1%H-=hVpQo_H)tXiVUdiegN`saD0G<<`oM>ZKqGY4F+vU6ZP{lbW*Zoj~Y$ zd+-RxkCZ!hG(0@nkH!RlFBt`;lz3>t2cO0MV=QX|Y@Wqi9oyurYS|~NcfUiCR^YB7 z-)!HY`4kl93OFTNSA6;pU~z2{dy`p%X(u0jK6_gnv9+EDI;8g7`r7o#bwWrv|HPRFt}De250CQ@3?J_qdae5W={%y? z!k}6e7B8yfD>U;b5pM|^+g?2vF@k<;a;jk)D0cj*Z^#*pP_=vxC13!6XZAM9q?BTO zNkfyJ#gD9MTBJ&1T7vtreqo=^LWaeapJUq*)Uy=Lb~tQXpa}Z@mvzBApR-?zc_g7j zN`W;%jJEcvh$yw<&q}f-Tip*1;7kApSC`I55TIQng$sa3I8m*2O>n~m8SGy^ks*9m zoJgW6JuzaVbDh2q+Z3M*y;XT8sOb5E7Zox|-I?C^HBA%F*} zEhaGa-2lBO=T=o25uMl|f6eZTa(>?BkdvYTJsRa%X}&i-lQ^_)F!#R!#tRTAh zOpr7eh1Ji7>KTF_vW-B=<|xBh7S-SWZOeR3E_-u#hUzGEERwOv3NN7NHqM|;@3|2r zgC?E$jQEV=IyK*?i+1S`@nt)#QG7;v>9|EUCD_xU@%!xFDE+%Pz^}ueLm=>g4C5b) z@uR#%RPY%=xAjke!=R;2MT&*zz;>5n@WXij{!sQ9#GBjm z*UJ1+G%7ghh$2gp&+v!rID`Yek#b88$OxF>c-^B)9*kg~LQ0r^$#y{bx8h#uP9cz)oEArGIyvX@U}uLjB1C zp-v!t{0Qcq`Lv#uGG39A-xRx!SLzCj{~-9LD5wuQbz&qH2(F6-QCvO+H^*my;h`I? zG?>uJD7I9yobn+ZvQS!NO8N%u*a-rY{m!a0@FX5b%wCdYurrhx3p?T76Lly|s*v5F zBn=W3(Py9*NKdC8!Y6YQ!VLw_84UYBTL#%~NFl-mUy}|<3i^xv`v2}blAqCkmpXU& zt@Gj0*I}+d$TS{<|DD7{F)N?de`ldsolY%y8{@P8Q@i!~C0iC}XXp3=#%p1r1)FV5 zV7GA?$Wnc-1IB*_sDhs8x--#a8`V@U50`S1<+h8)NNRz6~C|b z?6M;_xJsTPF@4LnGM)jDHDG7qt0_CxQk!5x^7ogj(=6SNW>^9-;)@VhN)>4TN}Dfg|L zQ4&OqQ=qu~jL&rTo@tLW+LS#|OBKthz6nsODx8u}8gnpzn=bUNi9^vP!UqcBk+EVO+L=P>my@ z6-_Kw!j|Pfyd~nk)py+X>izLmTA4)r7Wr^0us}rsZmZXDOdjOSBZr7@ zP9W&;O|Pq#`igs=Bua{sVfGH3B#znz@J+Q}aRm#NewkfTo^hQ~>2Wh+nTJ4o-@u(h zJfBeB=eoFiIt-U6J_?8CHYKF4aAFAVFC>=n6YDk=XB|v1_GS0_xvVjg`e1p{1lb;9 z9oT!uk6B4Zc9p{yk`Y@YyLiKWBnYB#o3Ve02872}f9aC15Z~@J@_LBD#1FFvTmYrpb(3srZTVQ6lYrSZNV_!d9U?unK+7Ql~!; zmB(=FGhO_%<@{BmgK_4%5^wJQ5|>;5&cpuNh0I?#fo{V6##3FSBox0>jHf$_aU-aLRh}cQ!D@)9WOc35~Y8 zDQ5bX3tGgIpwfu-3URyOkI%qsKP*^2{-DW;$oR+-hx8L-SKHV*@@ia-5B6xRfx>_l zF-5Db(td^A6j%>CrAN3Pf8mXjZXZXB;s8OBTEopWrMZvy$nb@bVW~dd{iDmER3uM^ zp5xTvny~f61lzXt&IDW$3PPguWW$k{E_)MG~eCG-p)g=*2~zTQ4H z!mRlPG%)<#N^u>$)+rC>LQq2v4c zbJC3sA?5PL4>WPJx>7x#ym1yiO|Sh%m*#lJkV7)E?1^;E-WLiV|fR1s5; z8rc~gWOQQw5~dhKaGLu7_h|>cQWet>zSiy2_Utcrw`4O~(pyt7jFm7ewBoqF#_!35 zS>wX%WlIJ8zeHzY|Mxx8@nxTTOx;DDcH1v{_vd+-ChYJKs~!MBMd*qc<+zzHOMpZ? zc%W+_U(C33$GF2~a?15||GKo3us`I4Q(wQfL*G&LGxvqh*Ty`iBRpC!EWm;?ZJk|G z`aSECcqFU{(b3TH{92`xS$J%D>WuSW*MP~vLcQ}HH?ZojaUjqQB2(;TelfMbe$IqO z`@?Da2uKaDlI1g*rtr0!kwvwCet>&gA(kbAP)Dww-w5nOaZu8=E%v`T)*kZ6haA%N zec7JG%~9#ogsczP_=tbYd6Kf^J3yU%2kgHXl!Ha9LG+9P;4bf4m8`Q1)_u{pN>T-+ zL7JbU`DOkLH0yL5NMZ=ctU*vdID?~?1`0ZWg|-AzBIbfT$->vS&yluE7WGsvARXdh zPL!aX==cJd?0CQod}*m_tyZ&<6DZpod3t>QY< zz|IWqs5ipEIaJ>#j}k#0}m;Q`DY3!YE_JN5_(lG}x?_4h9W z*L$|h;C6i0ttt^yhZIl8;n^%UDj-`v+7)hnIeT1Bd!%QDB)l zSS6JxKuj>MbYHRQ=Fcr-PfpLhGDP`j1vv5E8kEOFRg`_sEP28WkzanJ+p!w2do!MU zV5>rssS-L9;U9GJ*pQ-b0ihcFw{yXF9N>c1Hz>2yNX-HPhS1l@qTUa8(eOXn{futV znbJxBJ;3*t&7r5;sRJ7|=pE<6+db{(E7R>9EzROqL7qJ|O!BaN0m=St?1v80Bc~;@ z=EbBZDN+vFb^gq4BECsBv5YxnYb|G0?4F6jM>{atXjP&45YaE~#(oG1Etz2mgGBKt zSQBHQ5*6{lzM#3{u3~RaUw|Zm=nL{0I&W_x9m*YW!s5yaWW1p$(jFR25lAo}`|4K8 zN#0}yjL~IdmcOci`Iib+V&pe;UC#FwcEvTldU#GiRHzzemIbk&+@RkXC#L-_?T$7?WVLUHI&+>S9qRGom&8izm-eTV(2Bnxt@B60EPduN&c zu0=y&W;(B(AF0eo%Z;@o{W;UbU8Xzm>D?Hvp$7*n;8VLH6W^wH(tTv+5ssJRisUV~ zn(#2W$rIvg8iP4@WPeUzBS8rW1wU7-Nrd11?FATp6VS}SHIWI*T#4j7=Jh<3cKps$ zvK8R-#9XH-Qi2T<3r2rVN2F`+_<&Rf=I}Pu3hzKKJd+&Z9euW8o-)0uY*fVfZ6PQ5 zx@u^zwAi`DO6XOSK}#!`&EJJw%^~Rxj@aB3wjOSEV>eZ>$D4(0C8zfTDk`-e;{H8n z{;1P_)&w#(XmYeLpnzxyB01-R$DJII8sO^!&@=JB+v6^G8p26W$pe?`-MQ|Iu(8hm znplB~EfY2$hcNX%noBs?#KvwUWwRfril0vg?LU{zX|=)d`VGKZ4&~4?#M?R)5IS@d zI)?dN?<8auR8Glz@Bdi10CoC?_ExAk{oLn=&rXj7X1o<{$%0N%3xxSw|AoO4PxO85B1>yKrzVQuF;PP5d7t0fPOU5gs_dL(lZj z&H$Fd>GVWi<-hh9CbSt|Xiqxx6C821N7Jou{>m3nt%gEGCvp>d{&z%YiV_Cs`Ty=a zhByMwzdU|}>?pru_!HYbAR!RK5MI9MPr-`;iv1d8{>>;R z*Pze~BaHlWLXjH554ik4ZB_q#O9!SWFB?nwdxyeTfWrrT_wN6r3;B`aK^+_qZ8E#7 zRcqgFKo77wE%f|XCYFzqIfnqQH$#XByMdx9cXJHNGel*g!{TV(%Qb=~S5?1iiozJpg3yce@)sc1vSEzBIvEu@`?h}BI zM4UEEU}AJ1+63}SG8BM(5We7J27f&!oL&n1R5Fj}+{U$BNZ5rg9R&#dPfvnL!n5p_ z_r`Zc_M0Wy22{&Ub^n2zZV2B|}%6By) zj(SV*OxVO5Ay5%%a$sD9f<4RW?yDABGJGCnRW_^_n9y;kbPd6muQ1O5?=hoAfQ#TY z0xmxadHcX82gk4PER~0IL~DigQm6s!;Xy3E-q1>g z2rKM2GD6GkJdlt3{j0D+NP>-@)=Vk1s}8g=mHU9 zx%4q_#%p3_!g{5rY%iN5uafYUcy3zNEd5LsvcC!K0b$NukkH6fo08?4-~|w;)%@tP z@Yf`D(oo6LnBi-&n!?3&$k|{963*z5D+QGvfMXSG`jbYSNuxK#4h4~e7-+zu`bmT} zJi_0378YrUt#SZU>@^IOZHrlU-kH;$Wd9lq(brJJ<&He?+LPif;f42UY&JsAV^Ao_ zuPnfWDg-1aF%zId01*c?MOQ>gs*u;`@pFAi1h+@xY<&v|F|rb{IP+`fWl<<;=@-yH zO`*=f&?OBI-U$75?8!g$g$QKNlFSeEgW$59V9gJsHlKmie`h0NIv2dhwa9MP+NgUX0(bJX@cA96fgU& zOMeT6|G9YLA7&BV$(3{t373HgqcHfX*aV?k(CWX*;m{Khq20}|kblN#xntS}&CA+h zDj-b3FsNl|^)cpJFeyDZ5F&9fk;*{X&3t`pfY)&BwC7Cw8MmZc&!LqhJ-;XZ;&Y4y z{`IE=|IunOR^po_=96!YDv6OMXCyI5_)|<4NPizNLEe?oH2po^ZoKvu68V2SwrpzQ zEi>!D?Mb3l5_zSFC*ydG$Gny7)2`f8gLY0fggAtk0NnKbwJz@ojYV-gP)3Jr+&w`0 z9FXR7f&$hw?;DUTSdk7kL(n2`xzRL*qV*8&6sV+jP+b|ezqv$;?JpP0i5$L;&{gd7 z2MwO>tO=f}3nVszKy>?8s{xi!X8cT0^L~q!&6HwyvBMW{Rcc|hpnvq$aL7whdx-Pz zEaBPErjsQK@9UaAhFHQuYd#Ac2-(oMM>bHam!m6~4rZ_cOv>>pp3cA6iBr#cD8esizq>b{ESll$AN7h$ypgYhOk12F#g-awz=qTyd!C``BV|Ps3oH&rO(q&enJ(`yE}W4500e*pjU3Qc9K@JjV(7nBDc}|pjk6tt-QU)!-R;8$5Mg!XpndCrtrSN}ca^ zNKnE3g=*=8C;Q&8c2BnRf}fr008J!R9I>+$_LSZR&f5mh1%Jg7-ck~sEaK7*8*=(g zcp{@mKquaD={NA+n;XzuYtS3mi8ipLi)kzZE`cz34C!%1b|)yc*(>KF7}R!vlx~ zNkK$(k#v-Mc3YZUGedy^kbX40I_DO&ZZJGJ*h(N16b_zPMtW?CXP9w9Fjy+h9ad-+ zEO#6~i_)$`oRLp*z+7NMX$P-n;=rLUQUkb&$ zi6v*0g)n|}W2~VXsDTSO^uhJsL2k12rj^*h4i{xo9>V;Pl2 z3109!za-Sq6e#JaQrF6ttekBsGzdV`67Y?Z3I>OkE(z?U?CM0_Qq?Auxd2i=BWYOa zwr6d=jLKIq8h=?952Z#qpQJLeA9!t@>M6&eo1bSz%IHCxda3!Ispm;Oh}vLmy~=O% zg{?f-?7hGju5TRZK-#b@jHJa`NU*0>G2Jugf1U2_BJiEz)21&@fu`x)V0KD;4;D z^p>2<5`dfh)5kCIlRfp9Uo(@2yk9!|V&zj};2RZiApN5xo0uuo2~e#aj4=p7>Q2oG zPQoY@lwV}H+K;njEgF;+foq&k(|r`${w@X?M^qr{*WTMo@VlxYZZLKb$jEd)BLU@c$D>noQ&uk7Vw4s90q%+(^ zqp{nqvy{Gbwn?nw#S3GKYn_(zZIk%c6OF$m0?MA=?c|?}`Ezl|Rg_~(=~n=Ihhx5& zoZuP<9+Y%!AciF|24QsjA1fWI<)yO+x-7IlO8jAC+(|IWV-QN*9RXj<{KgDl;;fIKt>NOqS zGjt`~DxPDF|7kf0TMwsPK>)p=fvD7{+;?kmOWmok+25);&tEr7Aa0N}8?3`b5+`H) z9MwiAM6`n*H3hFHYuj;xjb9vxQ^!#z|_4I0|0bD}h+FH7Kk=ge6 zW?|vWL%Ls}C>X(;G2e+59eg%3xp`(d!p8vb3Gslus!ID@#%Rx=6Ik^XRydps8n^1^ zeSWi9?x`3y*F{tH37w+JK3TUKU|CHMhxWa^X87y-2jVQ?G!O@4zSA#{0bQLFBp`|K zM8#zi$qpw45Az3yw(<5~hR@8wwg*`UE)e-L$%o=)kvdI%+dXUUY}Hu+`M%b2*b4TD zYwi-Ud3yDsSBDnt7NXvxfwe0%SWJzjdg)tmqwde5UG`W$)%vf-1ust*3SQDpmH?fFY!pO|ABVyN%j%oJ}zeScC65P8OOFVZKp&JO%9On?F4Tm@X zm6qY3O$*~0TWL)=mN8f>Ugy0I>JX{@EZemkJ8Z|%rh9RPzA#))8iIIhxo*a?Rte>w zsMkYteiUfFZ-2B8oSYH7O6KMXLhk|w46cmM(-YhOu(EVGT_^Ue@Sp8ElYHXw$-$2U zPR-fzvPj*s0o6Z7+0!Wi8rC=P!4EgLLr)-6C+|@C2;~)`m+Tc;WW(8$W8a>fndn&} zI)Gh~-tbkm!y8p?<%H|uUeyVUU|{WOCly^rZ8_l#=!Rp#QeJyZIYsqSy|87wWOhEveI8W;W>VvZDC~zN-_K*Y-CdH37JJj&sa}s~ zG#h=~0z^^Aw#(prJOO7etyVzUH8BIQu%2K^$(xzUZ*o>X(PQAwXXGP0`c4RC&S=m- zC>)sliY|dnJGlJs=|5J_o|C-ciiGj`U8dEhN*&v?cdA$o;s_`p+r&*SaTYySp211< zqtm8Px_={LnBmedtUc(Uto^vQS2S*cv+P|w!8PE_7f@U#ur#|6Tr4~~XBV*iS~#*C zqF1Z8YKzM#L$eEJ-vH#2p_{zgGwA*0ia@D`a-kRDb~S1oNvq{BgT^Isk2KNZbLsG-11dZj^xd_db)r$_|3(ZlHzTE!HsUzwcZw!26ExZh9qT?jCxsSRijP!h=sjf5>0FyzR_}O_=?u-1=^R%z-|zB7<2Ij2{`t$hO9m^F$ED=-%jE-$+62ISly*D^&*!*i%(C? z)-FcL?s#=?Px16zmu9_}Ymkp-gMF9b0Y1`WN?q$#O{vq zT~GL#G>IH39B55E%Uz_X+1n+!@%iLY$c0ps^USgj{2#`V5 zksRMr68o`>nX)Hvo;Hnr);KwVeXB~n31HSIz&{Pv8qm3dP$9xQ`BVEmfdK#sot=hVV9w71n>8xz`Cr7gg|HbiO^ zEyxMW0>1@|(ru!F+S#5H$~bE}Frp>FVT}$;G-^Z3zPDMj<4?8AZA5GrPKPPW$ZCg0 zTo}xAPO`m5_ciN9_NvK;A@zq{PTFgy5Dy@Sa{sTV1wH(7)?rOVd}gNdLE%L!EsH5G zh~uupnx%q?;l<7_o=fv=JC(})M2Z;i?kQQA;JnJUY}dNWbD7~ek&bqY)(9>fSO=3q zn+VJcXLVP!8$bE|Aj1l!>o>Nz0gL>x=kRHcVis*RVU%oHMNOiKe15MP*xr^B7Z$p@ za-1toO)XV}LmjQada5C+iB4G{*?E*Sha1a&1Hj;zihF@>LeuCE{x$pDqgOwe5S9;w zX&`-V1{lXodQ}@}^7;ovm~uE93Q--Wn|y5T-E7x-|2EQ(O=25|60bQ8cpXPR1Z4TwGAP$W{J4ecD(duP> z91%gqXFx1my{Ssm z<88_61n_hVMo^tnNTof!5EQ=9S6PI+OOH#U(jMc85YIHw(g4K$9=Ak zt}cA|5^~kOcS;7#I2aTw89y>6e2&m84lu9F_md)2-r6NvnX&vqY9Bqntx8(CDvpCE zLr9Y_@7cwZy@^f9J=NZa_&QfdCkc({`BPoS%XGIIYxLfCZK#?sq057xwDjWBAD$^$ zC{I+KOY_$kHtXcMcT=@|H}qo0gwq5j%tgFGn%yo6t%KA8zYo?Lts%nZE5m{lhltWD z>r=p(L++)!O5OC@EQ^Bl%}kO77SohR3fKqE2{t%>1udBqi>HlaZb1|e>Nt=sM%#W8%qGW5*EDL~rg^TyYhz*I&g3QLVTZ*&?nrUC zrmXfQp0n-`D+Le%H-mvPdg&{#ZL@j6>X$h_y@66>e|w+L8Hbk&qRZz+93k#>w2YD& zZm}zF_(m**0%#vgg?*cMV&}oXg-VDD9Sd8;-&{&8}~13Mg&D+lamIKjO!yn3sg(Db~S`2Hzjb zhh{b7|3uDoJF5X<$EO0wRHQo_VJoKsi))k8Q)HQkt^-MEPmLuc{pkOZm|2kh`Gol9 z%h9O4s?l3_L;<6EZn)@3yE*|NPkei8jGmGhN%g;?oh}LTd<>q&8u$}gc|s^MJvkPI zu)hgF=WT@AeAu(QeFrLg2|1F-^jPz4gw$j37_Y_e=$nfl9R45!>@gat2y>qznr?1v z%_U2Tk#-vN!QEg6!mN5FX~KVb8WXJcdU=LtI-Sc*;>)3azjI3NGjPYd+M$r8+*gPr z<06q2Hk^(elk{T{uP^dGy*4_iQ*jy!m|d41EbOu?4P;C&L|wb%vKtJUi4vy2{n>6| z;lAdaoWfnO(lReQ4;_xVw)bwY;j=4k3vC!LlUfJ$U-$iH+chJFFg4{8MZUr1UQP78 zfbnCHk)^4D87qri{vz@N>6;XGfqvATeV+b7p8vQ2<0vd0fhZG^VfwLw+Bp)arvUG! zVm

#p8!y=e%VSEz5A1!{}AOuQ0US)*&9cEQaDFTK4dXv*Ss2JHEZmqF*js$nh!g z7(PsTXuHQ67S_@q_Z*)uCN6R;d{j;IJ4VvHGM16B>XM=jEF?5eHM4mdr{asBpRXTD z-^GxJxM7R1ouXq#q|^xLGIbCUq(55SXt?xM33H$-@wfl^{X<6I_pCysXP#LpuhxhR zo^%;aF?uNv;)&ky(2Ff<{K}6w{cymn$bDnuNKiS@#<&?6elVBqC{bbw(29dI_9F@R zU|=cxcNhVVYnu2gxNRKb52_ym(j7}$7px`TQ^QsHCkM9oxDa?B7J<#xOKcVI+W9hU ze8-_dm?jN-7W^L4IjFt;pNggR^8T%+@utFqo(7y{VaOFsuU<>q{|-IOTka!)rPCoI zkt^(-YD4E3coMQ|@J;GGd{0n*! zxwy=q?<3)U7VsR^;NT)nPdpFb`Wx2=(GRHE83by29O-%2^d?LAeEi;;ncvg=M=d!{ zQ(5koaBcsL5*Z9)d1= z(}}O&GCvN~-p!C4*%;RdGE-*0RmlKfW!zfl%Uq|4>6hEauJY)E+hV^aE;e2EV2xJi zJ{{5w3ps2rj>EQ8i$>dp!De@!{aNJxI{Z@OrL8uoO&``;SD2tcH+@vtZA#ZMWV6vq=&r;om~9#2cM?*N|YfX zDc%r#TYK`?;9&FJ8_QG7Mwy<^0bNwCaWH)7Kdw#@*kEch!2kAnD2~KGvQ=R}sTIDu z8uE?3d*5_1`i;P37kXVu$zN>W^bmIKfTkXP#u(fgYjfsn&%)d@7AueEIz9QYigh^f z^f{PvOBDAo#Jyjixs*|dfp+EhYQh}Nq;BQ5ue~nvHVvo5M-!0CmHn{=F7z(6_9qk% z`|AW#-FWN&_$(au@|r+XR|oMgh!>omy%MMBsa_9KU_{730g!3FWtoprh>XqV0Wjb z+_-do-4VSP;rg9C^1CYL9#N9YjB*%&H$b48wNjVLxzAdRz7IIW_%CbWp$$m^p*USt zKh*t}YiR0@u$aF#zkao%LWmIO@vfFZQPknl?14^%J_3Ei;*MWhrhCgbq&qH&PNs83EiZkbbD&k2@2Mq`!nf$? zJPl=p{Y%9enL`+)m<)Jy{JAJ%Wn8~~iVEINZ74IU>pwhr*tBkI-QD#cc3GvEJhj^3 zUC?J~oZaU15h7N|?|NOD<6ZrNqPVyFss2mC0y%rZQ}2*7b4~Bm5lYcqRr&4j{gbpo z+czkRo#V!zZ(?o%gy|IH=q`4;{Y||{W+0+jRcd& z)BjldMpEhEUV`3sXzDW<*wqrB2z7N^NHRoz{~EkPat0(sB&Sz^m%}@3y2hN{yNMXY zKx0oSMV>{Qw*bUAM|UDuC1xhn+7rHoNT1Tgf0CjP4X|{nZb5~;Av`jc28i;K*lCx+ zv0NDb=Ga(@#x8D)&p6bO+N>nDA%?_v2E_xBUU>BJrOs4eS+n1SU*%Xb=_$`qcfj}n zE8S6|Nk#T08FFB?AcHKOUH{sqX(QRAgwT(Xl8A23VyaoWt zqeTKIZ!VwdV^11FfP)K<^gBTLUDn{{T-ER?!dbuGZUfkV zkvQW>=vf;+k*wiW71WZJIkq1%bP?a2Av|b{1;KU89V7mnM+s*pYm|?F4wH9&yE4fp z&))VZKoctVwq53~CvU3(PIE3@>I@0+n@3PlO2%8oNxp7hR8%1G;03X#XqrH-d1J*# z?_ax;pd1dens!t7>Z?z)ko@#=UHiSvq?jAqFAVO_JvnBa&~kiKC7QSI?hOmKSN9L5 z{h4OuYfPaw1*PSI8ZQ0p4tUa=kzW=jF9GHo|0zWieCV&d!9Rd%F^}x2ty@Nfl54%) zK;KgF98FcNOy!!fcW`K8SbV1p3T+X@<0vG;8PY{)CdkTLAN_pnMb-A^^2?0d_>PuI zU1W>R)|t-iJ7SU%@g~_&l%%p2XZ-pO+)la7y*eQ-o%zb;TQ~kZP3I(N+E*Ce-bSDK zNLvb75G-2!`v!dd=cM<2ZC|`7`59bVaID0^cu7a${C%Z9>1N*6}@Z^bc^$>cIFmHJ}_FfpH0dc+h*```U|`HCJ$eu;mC(9Wn~(-F@dP9la#bewn5O|XFu6qM>@Ky>DS7_F5)u;qCsInqBDpB;5y_OEYH3mVX-0Sr zdZ~-w-fFjlme0t>c+sbq=R|I|@_MtiDd_0y2ajfcv5ej+IMa?1-HMFXj@15r%r#&_bpC;s2G- zhB&ELl*ligFQ2$DLxXY zcuzaMCQUV1k<#?%*2&$g<5ON8fjQFr-E-idE^FYhoHx@io}e61R(TelD)R4^e6?}g z#|*U1{xk0X!kyUJJ*9LjX#R59NN^Kj)mr;E*85W8Oc4`*w1o)F2Sw#T8dk~w`xnF} zHiTVkU}+hBsP$gW<|k;MW_%kS9=2Eixv88RFm$dsmjBT83!(Umk{A2QZq2SnWqcdN z1HxlUzAUg3y>ek*-1R2<-jz_%2p2NWdHiEfdmtMojE7^W3L<6hfkH~?rhx3Vp?;G^ zw~wQYnha=^xJ8IB_(C2V^tVbx@1jgsz)@VM!j2U(kHD5_bf~*zC~VIFguK$qEgsS^ zG7XI|PKbE25Jn|h-%i0CjTmI9jZ}-0=yDar&9G%E=IG~0z`N%PibffJbV9C>cm#w) zbru{@GAykDf{a8*DlO8z5qw$EQ-sf|qe#nVjDUTTH2@*I7BKv6FmlsPh-?uLH?!BZ z%u;Y*Zex^*|0vxU-~iFX7sh9?IzN;v~?`7_Jc z5T*B7vTh>vD}#t#dYHt+@Mybxq>oGVD>c{Gc)c&ULhVXy)F)tnGoX}5CM>%Bn*xXZ%S=;q5C z8+H`I=iRZpxV~ifi4*rmw&tfxv1FkR_7qovUmDL!8?&rZlz8z3amZ?LMyrrCNcp$G zzB;9|TQ^)%EV2Mw|MqI1%O-T$VjX%(jI8uY(t!HuM9I9C8)!^T*5F)UnR+U*9<9iC zAh+3-_n|GCkLDLYl!&8!O`9D{P6%4}lDS6&hXTSK15%#>Np*0lFo<1l#@S&Wi?5$& z;j1o#;sMHgC&il|I-UonskHmPPDZLl@JsI`wJ&_z{GV}ISM3R94{De@kGVuN9;g%> zaBoC55>v>FhHhx(r=yK?PlQSO zHLk$3cE9@WPA*qYKky@-1G8DBA9t@k;(Yqcbg>0~EiE=Gn3SeqfWjg-m?ovKztcck zjN>&WH=Ska+I>K{ZmHys;GFp`i4$hputQshr^{JbwtS=27HnO0LT{EOw+E zP-SQ>=lc~s{^?lJn?H+VO;;R?dMc4X~uHZS6alTo6a#>JCv{y4{Mh)u(!I3GgOTEkS?>DsTIB;n} z)_Vo#4^eX*nJGLaF9>h<&(C{J9IH#W%QL!FLvQ^eL1R{lktosTy6A%_QFL^5C#Gf5 zUZR{2js&UIv#0KFc`^Oqoy*xDbbrrv_g&(ULW+prZat9kwM6H5;oprV-ix5bCxlwg~>ze4X) zs-(+p)r+s>SVZO-IX>=ANC_rg6YmMX9G-`hKL=z9@s`M0P7q!$XcUGZlRPw6{8-xt^0G?LsobyzoIsguZ^Jj75n?Ocx%Vi^b z6c6KQCL=+l(eYQC>`fvzj=VytYSlX4Q(x1Vx;5q&-!E4pQYrKPsgMz3E}TSV+ag)w zMAQYIKl{JNurJ&QaI!G_od;dO!LIUiPE0wVxu4NZfW952g~TGZWs&f)BXhC-=ei<* zzVa1m%eCW*qu=AukzL>dPQWoRjL7^*g+O8!w!_r+Pa}Xq2s`sqi#7Ecwe+Sw^^(+TtNdrv zKii1uQBFRgpX3~0ZvwTP45kxoxdpUKj6^9gjG6Va=;)1D?Yj>GV4TbB(0aFQfS-v0 z&OS18WCXy@bExzh87v9G4M+L0D#4EdX{6a-yGWbG?K)4TC1u?(Sj-zjk3X0lT>qD{#j<5%lo3DcpsIK7gfHg_xF9If?6(ggMnofwW@HW}_-mn#lqW-i7N@Aoui|@=cvq%wlWC5>`yZQM zj?0w7FKbHknWS?R!|z1zzaXugSwtKZkJZoM;fuxwey{8|$fY+!4PQ7b7RI&b$%19y zX!d_c(jryHTx3O*J183RI+Mij(@(i*oQL$)ErY!dd-*C19<^snB5Vs{xjL~1vQ`k03UusA{>r^ z9Zi`jn>QK=sIO(;@&*TfNnd(wEMK0&wG|hQLl`X9@;~79EeUKoK~3iCm$WO=@4sSl z?xLRWv~wK4jX{M^cnTk>?m5w22>oPqg z+zhOA`#XSsZNH%Wx=NZ7Y1Fleu2r%kCdT15dBb87puBv?*H@g;0K_r8{o{8z!5q>b zocnon9j7%%+e19UiZ?cbut_#Hfrm(f&Q+hUc!LX9_P2N_K1()y?v{gU;v9TYf2#%G z^CWxHacFSgL{Z%LPwTJ*aUr@|FL^lH!1cwyrsEzXTrZb^=~7cej*`s3pQK5*M~D1+ zDRZR-%xk!AQ`i4X<29A>E_=0QOWf`MD+n}D5PU!4@D|;wKLMs#0+)J-&QN~-4gZU~ z|8BDd9}#6NEd8JH@iYl;&%(F;)ji`(y^j0HXB4aWvoQGoqXdMTUk~M-=W7o;ZhSaI z0)M5t_(#9E4X#JZ=;`ZAianNXIQ;(678KYr7i~^$RTP%J*pJFjY<*XV3^vBCYi*n{ zx(V9Fch#dg9QlGcS9S`XdM_d(#w-yuJb&q89R_rSiUvD0;nc$-;mYCsYxF>VTZL`S zCR6Xi2x+G%DuV{J@E%%)kBQLkU>I7SM=yGI%Gh`M+%A#Zj2-;XKzs)mz|ufBc(3vX z(ELRhk2|agy9f5a2^c(Gm3?_8Vv&343iU6AMba5jS`v(??!5+dC)iHyy=yk-5;V8o zl3WPx<<>V%<}dGp7y{_UmmBA{*4*;P8DM3(cbrw7-%`pb9wqgam}CAcVOL<;Ob+q( z&3P4)H^W_`hab~l+-X%U(_-q}>o(DFe@TQ(-`1x49v|D6(t`NRhE=y0oa zY)OB28F`ejeWR`}jF>N_4T}A7tIWz_^LHvty9T$qPe5(HOcthP#;vaGpw8c^FXaXH z-qnLc|J!V^m1OVvmP!5h@l@n|AW{6kyP!v}TwC)U&#PeyrFoH;2jPa2nh`xwIWYdB zklJ|MJkXRLgXGQ7%d4>9BOqS4GYDHbCu)uosWLJv<6|LrjIMCIgVbiFq1)c%bL#!k zV31sN5tv11r@m+qJ<{rw;y(Hn_3)`_l0!3vTguQs=lAa-;;|z01sJnd4|pkcZ??K6 zjQvuvFJ};czFSCt5(-5~2|b8WHH1FPa*laFt9!b_Ma{70OJKN@MRYi&p;b!OO1DJH z6ZNmSgoGYTnOP9`;cUoGs?p(eL_<0sGkOT(1I<)HlP%m=hz1kB2AM@P=Df?8*>}*9 zU;<=XYWnpoTpFdGO^uxQG>nCkloZzuH?@OvvxIm}m1zq{vvxicT5beS>9K#%}ZBI^SO|~JwB#zOx9tg5E z#>HzzO%Tj5(dT!&`OS^=j#2}d^-Ct1Fda5{zmv4#h|=eV$LFiQMrcnHscIC$_aY+) zeagprFa2lzk(>E1>UKcEZJl(P@9|Ekx2HhJCGgzXi0fFN6>)LsZ+j@0pQz*RvWHss zh!2F!$qQG}jlXoRW!QTRiI3-`%w)zy4x}Nq2hjeDBelNmv}$7@ugy641jCZ49pRS& ztIrK7Zw>IHlwPFKvq(xxnzJp($lNhI#wt&MC2WVO?pf^k@OS%EoH&7Q3M+wOv7NXHWVD zDql?Sa!-SoW&7cLft@MJ*Z1}&y5l1eK2zH9rAwH9{KQ4PevOQ52Lp?sYU2-T>C>BK zNN$oLz;c>gc%sx1o-fW#Zm_Xkmb? zo_2>#{=R?Fg^N4qv}7gEFB};MS(09q1uJfm#29&wGX#nmPjS{U+aI+&w2}wffUInW zV-Tf%L&u4s4y|WFy23ufVK!as89g{RP85}fSfYty^}Dn#8>r3*D2a>2&%mnO0}9#3 zQ@@~eA$C(*KVrE%*7MU&Ki|!8|AOiu*oedvs_(QKY7GA=;`HE2yc|@oj;l+y2yHm9 z;xoSLC?%tgb$fX`6q!YP0Y=^1^!Z%b6$Ta|zMbP-v(3+DZolfh4%Wb`lJFyJL-Jal zUZ7Q*Q@t4y$e8UK1C*uHW$l0y&_*?c9QXvr53Da_?&m6T+_n8PWR`I!PbO%1% zBpJp~0P zP?5Ko9x7dZ)92>w8K@qdD(S{571zbJdO!OzZMe^8RKKF4In0t_1>b0%9O^`NZG%cY z&|G3*Yh0FT2Rde1M6%-_uro^g0ptGkAINq+YIb2NK*2`{>gvYOVo!G_0`e~cY{Z2m z``5STy5rvHA@#K)C+>Z`ui5Xzp@9b3OFe(vH4jAi;5E^wfg<^CG*rz;V!q>kEWg5u z>@d!6e$t@bTJ-q%m-G{#i=6e_$yBsUZJ3Yn3180*1#sMwvwd)g-*863XwghsEOl#_ z5&e3fCyXK0nz@hVcqoW>7JvVFcgOPdlhDj1!PQF-oL`n{?~^ECMFC3bA$JW&W5Sbj zX!`q`i=e7(DJ-!G)@A#72_ML(tk%4dpx zZ`gRrHFDXsBx9e>UhA!4%AGDt-muxEJ8dmyVmG428t$YH>|t}?QF;A0A=vj`QejKl z{@HhrGsUP=K2mcNSG{aBT4Fh&6+CP7gdnf&yuDB_frDxl6xmV~u|?u|Do9DH6>dhe zy=I^qyM=6#q8QyLZZS{4I;6i^cEhtzdhM|X5bv!W*n8pA)1R}7)a@a>ZhR``sX}y9 zA5hw+w^~H#P5ZF@zL1;FDjKJ1Cni{OD%z~dA&7?AqvJe=2u!tEH+(%Y9*05p0u`kO zi)W~Gft(UXA}hj#G;BMLM@Rd{G}|Z#a&9+j`s%B?=YdFL@eH3#+K}m$f^Q=(UBTWP zsNKx_ZTdg#)%Rtu#71j04wqin?)x;MW4*tM>w8yKGC@=*-E{h_e=z3)#$+*~3Qg@g zA%yxYX%43CCbZH@wV$U;tpyok)wOan2FvOe(w<-Qf>ut2!Db?u=Gn4wLHkX0yNlHi z#qJAakC>QI5E4zu-Y@pixs_TvdPSv}qj*&EXI=pJ$zy>q3ToTDlI0dq&G$de&*?yr zGt9TaU4G0(TTaUd^U8Ls4j+qyPoLKApo`&YpOY76>gl$x`z?R)s?CAq-x)=Vd*3`< zeeu1}0{LUl-L~?XxN-${-h=sj$r%wpx{uXVRr>aGcC_lt8HToaB4jO)sA>yUp%m26 zsU%JI@>;b@ za?Wi3Tzq=KN6}6g_9SNU#dX!YefIL4HFz6H(t_ZoLD!1*{eh?n#y2{;!k4O309oLtUKF zl;C>itVJf9^oL(JMg0f+RI(z@sFYMIlnqci6ntMF4ZBcW@^M|qSuVDT*gIzzDEi+} zZAuDLjmSBgp3^pb?&<9g<&XCcTHJT7WV>puurVu1sb$!FK(07f4uIA(DckdAblKE6 zYKl5Icmz4#e$(&f&URlp$Nq!<*BO8NP?-77R9;tLQ822R_^x!J$<_T$Ks&=_8%eR= zrK%zxv6m@a8)=I8WAjU$#7B1-xs@F7_gQ}D@C(|cbOz5rVU^!Sk9T_S#P~?J-S#>Z z)$xx)E#DT=UT;%)Z=z}Xrt>$ux@HlrXt+(9GO$+YC$5l2q< z-ea67ms2VhVw^FgHdxr_6ql7U`gi_ zzYPb81~6w~MZVR_^OYYrw=mHhX)U3j}izPk*X~ww&N_5|rl+mNp5iE!nnf(RiM{({(?J z7vm%Qa>kg_SDiz9O52tMW#nBID54JSFm~;zj8r^%-FtTWmp4B}t!nAApv!r5djWa% zoo9R_@aXZ7(t_aU<%Qu|aunrwL|)5*(`668`o1gbG z-7u3$w|LrjowKsPLoJ(ZbVr-t76v0LyW^XF==s+){aRqD8}8qEc7J`MmeU%O%<3}l z^X)sz0#-?%=A@T-!`j?w7yh6s^-0qyPL?HyHZfjk$>fq1d|fG5IOJcz=CClc)AX}* zi`dw2rqx`0@BIBT!$&py2Z6@*m~cv|i!^ha>lYsCc(VNZl277Gf3aH2^qr=A^_@j5 z@z3p-oS=L(*%M+{LBD)C^(56kaGm_rKJLpU3y;|~X}(ARd}#LUbnxqyd}L->Rx;^P z_m#EhK2q^XbjbWztmiv^tBhw;qT>_4ksqpzzBJZLzY49dNL_I9d!;tpb$$C$%q$Ze zs)z`elkW=UbsPFoc0|Sght?E>(z5cSf^q^3Sb_WMMG5X^(?ezK{sW85*xx&km)A=3 zSt8Dly(~S>dEb#qv$to$FR@qfL!PvFEB~83YIJTE)0sO=ZCZ8$OWLn`hHieosqa_J z&m{kw*}t6=O-FY;*|@PUWaey|J+=60;1~Kcc6?O~j2?$uZfo=?U(w__nX~EOm>o5F zW1jS`mzL5Ihb-uyINr?5%X3=0-^eJ;QB_Yh%J;*-{)cM&&6(x0a_M;A#CvWxcaCUk zXcJD@Nqb)$Mk>`PsOx}?f) zg z00;hWT~M;|D#sz`Miw!QH~`0l-iM{rP*_#9)w<%H(4h+cqow`dBRCgP&+%SRU@0@W z#j@|eA?q;ME>0^5Ik0cuThXAH4XeK)tGtmer^C(e7%(Uoc%vpJx%aOJ9#)?zSuP1KySf2M+x#s|7q3>>oQ;{ z!=_xn5yUNv?+ltIh2yFwsqV(~m3-$3qQKHxUHvRohG)FkA!OBKq zWeMfWb+5BJA>Ee0YhoBWpF92iD9vNzHyCJh~NyFilk zk-H6vVj7%{zj&b^PPDao>*G^=pRsR@iReb56GJ32qz8&JD+-*j=Kvm_>YRZlMP@ZB zAYu;i7$&`vXVCLChpVz*p6@Y;62G5gCYJvn8gln&lkr;DK9>$M^?l85_nF;gd`eKw zy$B8H~V|x4-*e4kxoS4J|xx(%OS#a ztEu<`dX1WgcS;|bm4FV&IeR!i?DJa=sqIK|JLoHlC@gC>`p}XtM#eaZOR{XllL4>V z1X@v}o9xws?{Isxb_q}r^62IyO)Ww}`|3Ah<|CNdx+Udxg7te?I6AuU~cGUdl@ z{&cTym~~u!jjl5)8No`3Cc{pDe_X|#+Kn~i0YVlml!FHTnk^x(2J=; zRpSi%lxp6Ef6~e{B#qRSR?8U3L(wabw(0!R-T# zHTr56Xd5o2J{pHOK>#@LmSO9GUe2Lke!LG+s|t`R5QL6zH46ptKqh62TgoxqjEGwP z<&z{i%pXN+g6`Oj<;(Z}eG>Ckjx<`nLc5Y6e@>xnZ%k2FUILS{_1dc?j;WqRIb)oJ zio8ygPHqCm3ReiJkZ6pvzkOX3M<&)e$qa(k;8e?Kv1uHhOV{o}JBXTA6q5!yjLPXDP6yn3>>L9CbEg=cnZeS zCh^GL`=S4wVpGL+8MJ*C1>>h(+Z^{tWf6qNkm2zAOWwiAMMc@fZ%eN1GdChL{3sA z!eHRV2lM*Y;Dotx{7^v0Qelf=yOI0f_g$@KF5{pLn~L#!WS1X9li(BiMXvZ4qaH7d z@+b~CRXq4Rc;u27neJxWu76kU!x7h2xLebQG3xA=yE9I%1TOh1TRrPhN!`nbEW4KQ znW_e+FKKwAveV-4E{N6y;YWP+Y7;<+0+sEcA?<~w73rUtW(}Bp;hg%VB{ecw@NIR8 z6yI`pGK0m(^-!g`=Z~@}xM~lF%wmVe3@_3j0ZLPgVlF!17jfnMO7#zXeaS9gzSe(K z$}||{q&{uOmX2;5pEsQO?$?XNdd07nHC#>rDFGJp zQ^!v*=&F0=rCVn?A+EHyz$R-L4O;;af8>d@rTjobG=h?B@F3(%{Me3)wjMpayy&BQ z_DE8y4v&kK>|SKjFcifkvGmoCwN8y@jwSj|FsZP8)fp0O;fV(}`Wau`gU&-a(;@yf z%>`vzJA&ccQJiX%cT4rG*ZBII*-{#qTfYo(|O!GSu@%Cl^ zC&kl05B0=8h3jEY^ae1}M8P0lzM;!BJA;Hys9(sRKlz+ z?&1Irb}PZ5p{lj*cKZvkGt!%mepc9c(FX`>5=w&F1E82iZ11@fe|ju{E#qBBCQBLd z^R>P3is;K@^e_UD4;rG)t>qWYXeWxz z9Cz7??Y}gGxVs%HY(dG6*#r9|&IKlIsx~!x*Wm2MXU8TvLKtkkXSbia+ZkweAa%*& z1;zYDczPbR z!LxJZOWhH>wIbGSB4Tvj{eaTnGzPN) z-PS&R0`H5u`vw~lBP^01sC|Q<&RqCE!(bO954%^?mbAvk)=5*+m~(^y9Z}eSVb&{~79<=wxrS^Z#E_0V5ay literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png b/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png new file mode 100644 index 0000000000000000000000000000000000000000..fa4a48584abb3db280b8226d18888cb0539de89d GIT binary patch literal 27099 zcmdSBbySsY*Dnf)5~8$(2q@hlA|NS9OM`Tygn}SQH&QM-Rir~Y1VN-Fln`m8B_*Xh z_Po{i+k1awpYz`y`w4>|{)(SmnBm_`vP$p%`-!N8Fx9lYaf;>N>#OjT;s1Lx zpD;Aij7t}k<^H~tPiU0qzn?H4W4u7EAvq-JEXGqk>Gi z80oWdry&CguGZU=9&|Q?1@>}#Eo4UJn`31bFJBtT>(^}eDrlWvy=|))!rnq0)XyxV z#^yfhxo7oI=l#R`) z?Q5OqQKswG{N}J(#MdY~ndle|sWyh7U|Z2E$(U+M4Nq8->25k+U*8!r>iF^gk--?F zax(is{sU)wh4z*t!Dr@;e&^*vC^rS(87+K!>m>T|*}`QjuL*%nbHTg1a*WjRn8Wo= zWjY>LLPL9)(!XCwHuYiDHb(!WgPf^pdgtp~3ew?JEU$PiaG%WQCb|V(zJ}?Y z%kfh!ynnvoSW?pGgpE&M@-1tQn&kaqg+nZS>R1}DeTg8;v8qzxgV~tF{&#|c2Qh#Feh32>)UpQAunIX$j7lRR*`Y)mte)jbLcG{N6trxoib-BB*ddA zZpyEYR5%GJ+i<eGv#=TI>R^!uR?-PY7+XV6U;q3cY^EtGN4WB4W4;CBQ9h_{XoH=eh ztFLn1%Jx`q#B%suyN5d{-It@x^#YfaD6`&m>*3y%!@;J7>)yEYd7IT=M#et;E$=1` z?yB0w1c`ZGG>Pf$zE3SMExNbhRo3>?6_-FIR)da!hNf6b>g@hoALX|;p#k}hj zsJlh>lG>gItLLgQ#3d$LzVKctZF3x6`SCvg@sBc{jcUim^v~N%104=GEp)4%rp-O% z3Ob=Oq3ww0P^aKEe~Kq&(N1#h8yS}J+;_q_HQ)ZB^_tZWqZ$XBb50+}Ix3ADF=EPR zHr9WB&VyS}+0UjB&(3N$Q8RLFLHaVa@H<;rmtK>6K5Eh@Ap6-XU;uLvjtx8giyf;%WV|V4p z3=s!GOO^eM^gyk<^YZnZmfvZ2PWC4ApDlb_SjK^*s)5XVXdyn+3}WE z(iIL}?Slv!u?dH6*imk9_XgT%ybJO)^B;a3{rm>&La&TVK+DSpWwDdYzU|D&a4Ny{ zy5miS1hFIk&2M*pmkSIO8R#v%3|fAa;+Z{Ippy}ZPh%U`Qlq^;J(WwvY@E7Nu@#U$ zQf?JfwD7AZqmWlZ?kL*(bSp(`e`z3}tp>X@Swt_6U8mQflC4#%P}j^VQdW>8+n}Lchp?sL2MlR508JOpRC}c z@J065#>wDF24P@VWQ#Mw>x2Ki54K z#z)_hdc1t&dqhX{>b+`}&aK+C_k41;OAsFCS~q>vboYm`W5_AS0hC-)k{knEQr67R z{=O_Xj~BY$eFza^WL*+!%xo(9al#*-E;20(R$dKc-^8SzQQko%gAt~(vC|rUn`EQZvXD#_9ETz zLZ$O>!)x=0JIh+b;>yu_lcA(+x;my|{p-z#uWmPvOuxGM$T3p4!g|q3pP1UlwsO+z z)5rcc*gV*S>$Jnu{uQiS36YVh- zG3~JM*4a8Cj_rXwlp#!;p8}@iKfcYtiT4x39;AE!sM|Jc`&ULpJG=@PoiysZO+;Fe zv)WO7{fY3$lMal}F_DC+n$};w3o&`@ENMP*wZ7ZH-5S%-Z~>n{Saz(%z=TnTaLUAj zj9Dhw^;fchebZ*3Z_uaVak#&WYpa_+^(}q==cuWS9p>6eS1*1}4WjWHW}x477lo6$ zxz#PEx5mBFK!XZNRg7W%T3CE^!Q*pZZP!@Gqua)QJt3bOroJ+J?KggD!#)iCBoV*y zeyFYLKwY}VQoQuZ=!g4PLz>&#^rX2-Sv3gAtz3y~NIKu%d5~({g~hmD_ChqQ9#DXK zwsHz*p`cn|j@o30Y$|~+ zDzAGaKRsWatr#+V77|W&ky)Pk9!lE;9oI&an?t88^YU`O5+_cz0exlN?sEr)k!1_0 zJnY`ujGr!k*c@ii`|xW*L2Z>!Im=#Q zs-gwDx1Wq;7A9;&Com{nxrXPt`|43L;kJ?cIF!A$XuKwqSj$5f=H<6Zb#r!N%C=_? zzMo%45=;g2&}-K+S*J*^_GAR{%Q;8zn13MUH6%^q5x;>ITlXnB_h5JPChuKwg< z*xJhCaGXEI$&Zs05iXTfpIBB+`RBhUI1*V~u2#GX_4iBWvx<(>I62%YeD=(gE6Dzt zkm)FTw(9j^swa;urjMchm3g0ec|uuNrVw&`w^1gox{FmW;_WQ}*FehzL%5Ouf-qmi zwL94|+Agw^8DvL=g=-mJ-tgj zLc@hV&u|f!qHo?(=J81h+oAE$o$qxP#qVoVevFTItLJ-vOzgbNJT?0UOifAH}tjQ$>^Zkpl^1>I*sRclu~zd%V`C)uJ8Iw zbh}p5=+?7ZQUe9o=-DVacZbk(g-!|Knk9k;8j$W~kJIhKGI? z^_O}R+k_5Zd|CSv%eNj6t1S2?@L&&$l|JZA(64b( zjhH9AY>^~jpB~GiF3o+&$Hkx6dV4jbXXeiDnv5gi!R7T{|L4th(sU8Ro52BP6T21n zy$|DA9;CMQFB$p9-+cHvXreAia2N+g&eK%Zzx?BU&UnY1$G0RwErG;UNhpt*dtMX@ zV$T9t#CEx+M^?Ytj45L-*UGl2?I#bdwB;~7H8h`K__XPH_b>}Pcw zMKJZ->L<;WH!kPet_o()u2s!kCbJQ1>lAS@KE#VpD?w{vK7WBIG1Kbhs>wLSjM{y93*9M|b*!-@mvc3Z3Q{hvqwMo5h68iSuK_GVR& zj~|#`!l#o$;cjGIE59l2n&=kLcyTFN-}ibf53QelOr_UyG8x|Ub&)B?`>$nxHob5X z+P~W)i7%3wqr)T5=XXnJ`*-Dpn}YF}KNmw$(P|~9kpr$~?YULG>=j>XI27k~ zziD4exo~(}i>=!&1`TJQ5$)cS!SFY?Y_j8Ge{17OU_>eS*r1|DpR)_%1fb9cZ<$a% zTN@LsEo?I1-tJR=qfRYHYmM`?nY`kDM`)CIFHXbv*1D9-ECcUUB>^7I^=Pv%3i2@G z1kj)t@dO&ZDUqU&T*55j3=>JyUhwy89tv9@P1g3$h!ig;h8i}am$*ashfiV$qZyrH zG_^GRWrICRKB52nqoiiyRpg0(m@DlFjtY;o`^EQOf1ivFL=+F3>>PX`AfQrr&{OtyoG#~qw_H8&yz8w;C*QI@f82?QG!({7#X!Mbj1EX z*$12cVCdF5+r<70CwWV3JPO`so2J>{q{>n*ht_6BamF<3_kvIBu<0xD59P(wPGXJ= zHIBbCrUhpIINDies`FxuIhOn^nx(dr6XqHB!n1+w`k^}=0ZQ~2$BFE&LcHnr&U&~x zcdpKJ;dPJF9VSKVL0?yQCIZy%MQO?j1-VH}#)i|Gv1)xY$A^+Mf7e7#4>KOf zPb)y2YWn}*2MOXsH-?P7;}(+LmD&@yxBx4~QwciAj#oQbOgD!(Y|Yxdl*X?2ft^Gy$L00sz)O8&4ZX)%fY{2+zuvHDkBtJ* zul3={X#Dh?X<)9*)XyNALC>F2GSoP_*M-M#a_D{NKcEux8ha$_wA5dEn6sFZ9PW>W zKWOE;G=!ZZ>gk5iSYasqyk@O<2_EAqES$*_HL_nHX%f7=Ml3geMX4g7_o&tBpbob!CPBl?IRJIVMv7!yhU{3EsY(v- zslB8@xz8uNuWh*fE(W?(WLV2ear2=gOLnc%=b6^9s}9r6I0uV4Dch4p2A^iTZcjRf ze11vJm>SJ|74D=Ke4qsmlV1enzQ*N7+`Ug(cS=k?J9?7~JWUn7Ov(3!<=y0((q5-ogh=+5 ztfJ!f9S&u5nF#8_K9&bXNkY%&QJ1F+jp!Pp85A@E@hCI^ZV#kM202pgJqpdK@%tIC z*3Ftz7Fc#BFh{|%W^yyG+VkkSE1+Zq)|ELfs^w}GzV29-2hKY?YF9tdmp9WEk?=VP z|I_}Pmq2agLAp8L=My_c(D17U`0wUYesO!tOR^S4_47HS-P)s!zkQiRJBURqo1xgg{nM_R4901+BFT2b5E$c zuT0k0?@USPQTzZ>>-g)tgmQ(&mq-aQ+2w)!oSfSG~dF3tTp@^oAbC+Lbt#!nhU4+_Z;DndM7@6X8@(w?mX}BjxNx7k2`HQUagss z|FG*dV`7z9{xB8?U#=Gse`pUgb-}1BF}bh!S5i*>VLk2+{M*bVS4mi@LqS>U9n!v9 zpqddlm0fmmu%CdsZEwO|dBSa(1f3!D!S~G}uS>E3YjFHMVSu0qircnH zW6qPFeprbH$5!s{`<5bYmV}!T-p7(OX^ynSjEdjeGLN?wk{cDe5%H7z(Dw=YAwg_A zkj!iEn{`c&c2{@shzwmiFXo$J;!^|N=qb7vO0qF^HIev`yrP^l;O1?dj zNAFwJ+gJRP-(~vgsV#W;i-^7DG7_y7eY&$W;1fDqNBtcHwYQfCDXtrQdOk3OhzSbS zTk~CgYpbJ`m1AAHZGFmOPd@(qbBIhnIsdl(c&9Ky8$H^78eh-52~!Qz$cBfp5JV!& zE0Z2uVb865GeZpBk3nixB=9_*e#zx1aHqq#{2uUD$LdRzd@8r?CdxNK`^tH4^wA+} zZm`Dn+3Id17LB&8z`_1ZM1?yPbB;ank%`b)RLtMnQQWk%JfsOHRui^ttWISc<+Ib{ zgB{(ZBcarzlSjRu4=LKQilmd)C_dN0wp$we_>8fN^5#R7QU>g~_%}y#pr|+{y8o_y zU{L4rV_ffGX$SN=~)Yvh^P2Eo<^ebEgHo zR4aRU818PT+9C8a^du)t6fHRU?@Vr*Hvd9XH{i~=9N!>xNxd9l_r{dpImr!-Gq>OZ zEcK*~<7cRSGM2~kV;R&YGh21uUC{}{Q12y|8-vAgT5`Ju7_$;b_kU<$?%~wT)96Wf z+D7sr3RHw8b<~aPiC6SWjD5Xq+BEVElD$t|MK(`1+Guu2(#E+4N2%#Nv@=c1Bcg;b zZ8|}f?R#P=9rOHQqfn40^Fs!dG5>{W(X!j8i55*L^ufE2x|2O$GUVX|%_Q=eZ8MY5 zsUDkP1`*!KcDW9BhN$3xLB?1_%=^@1yplW48q=u>6<1HR?D^eDDGe{7+MW9Gt;8@Qsq<<~&o!ReCg2UPVQO{sLClcnNEH=>o7EqvUA zeJiU^#AIr;ZQL{VxkaqVgn#icgw-~4JK(&~c&F0Hpu=3_xY#?sIUOo~wD-F*meSB` ze6))p?X0$c#OUz`Mlz_`5@#(hiL=jQ*sk(F{(drE7jD@bz>~I^(b~26%P>KE*3G;-&a;p@Zsv-B=+T7D7P+^fU1(Ir^m%&t)^rzDa#C`NL9!fd`O=#Yun8;l{5 zR9#rO#tjRciR0_DYt)yAO2@E7He@eVmZ+k%&5_gN0iviODV@+hv=1W+XsvkU+zo6u zx3XVUjzckazoei%iMdYOgne-^EwQ@Ul3+Ek$2fvoWZO|MOFpjr78l+nV;^JstWgY$ zfGWL|*PDq6*E=;-P9xCbNx47!^;|;5kDcc#;)W0Cv58`@tu)5`wf&!w zbzW|$-h0Va7YUossGjy!&n4=^VN;XlzJ+t+qYVLkA+GzB*U{>2GHveMz*nNX)Ta5E(u{SO zeNVY58>rd$dKjotT=mCgim!NEes?wO9(QUa_l>5BJkCs!q?k70bNr=JJ0IA+D}l1V zFw)7%E$F>kF;pJ&LzJ-nL&?IEZyTmB>^w{h2?NQV9{}OndDQT|du)hx^{43~m9#ooS9CdYF|yLV5$e*$uN`3H)IUxQZl?=>T7 z-|p+lD8`xMQf63_pQG^QI2V7FIZBfDu{26NkZaQT5}{pDVtb>s!5C^^i4M=4+rXbo z(__0w#*4Uhlq=i7xHca2Njo>it+bix`vk3+a;`D=KGJe8>3~r&@rdkF20k<4`)>hJ zSET!0Mqj9skX)w@F4=sM>bV||CwWydw`l}_nVnqKSQqEb@6|Wik^@(&xv5W0NM4c^ zfK{~=aj7Al^aOkIv{{l}@Mu%moUBKQ+gQTgY`%5$1g$0D{+@AA*M7-m=})_heKMaU zb$q!7pN^A0aQIEZo?&brnn1lZJ%f647@%U3aiJ@KVrGFf4}bzg#-FEfU|S;Zgm zjTNOr&@kl2g^BV+xQE|-c>42h?OCoyt{SUZD~c5WT%KXA8}u!?N{@2`o)J+;6SV#K z?!|^~iwv5e=+~0v%SPSE1EJCy8YAbx?W5qaJ!=yhvL#TkJrma9&D8`QW>tpI-Gcf_ zRzIu1|9LQl+c&mxW$$kFZhcjifM%VC>kE;jC8WBWuHd6L5fKr|E^T`8Y}cCy zzpu4Yi*aK;_~cUkC~26^5V0mk1bz<|zEPNzqDVSj*Y`>ET3x)_AmkI|z1B^FiIW+Q zs9rm*@6ji(c+E!1@I8pFu%Btw3SaAHWxA}?O~aN(pPVX8(v8{MAjoof)h!J@d*yG`u7_JWsR2g#+igB!%cNnB_pkl<*)ep&j+@MNcWK2E5GLZDv+}jb<#%R zJzT&eL>48AFnaLr1mL1-u(q1NH5(T31`jG5bs5zqP@!g^j^^89O~n?|CMn&I&a<^NR^rWCT-#XsPZr?+ z^@;!2AEeb0yE9O1B(@N{NbGHJsmNJJEm|4%%)XHF>Kyp}|D=Q+EM6*n)UHf3HwXO*Vu2b#|U7EB!jiC1oh za*F6kEjUJg_*dKy*}!Gr-;}ul{=P#94H+3zh4n!6`HNS^qMhjl`U`Z_`RyjCaPC=9 zAYqAy7Z9Ad7@ldMm?WUO`+KtZ3_vI&zVNz_Q|-ep%8fZYhZE=ug)SWfgWHmmXK z3VKFHd4#{xoryd`V$6KX zeyTcv`px&&#xX1gGv$aybX!KKv%NtId6#2~Pc6(`s9O<@7(^gHv%J0I91k71S3R23 z79g*aB5~*yD{*^jK?kWktwL7Mquqgxh0a$QadWUA=fG{Tmm?+v8M?kD?DC(g!-;8o zF~Kk3nIN~D8&E@o(EYJz-5>R_X3~KlMROVmz3qQ-kt|c(7lYTTm%i~`pB%T-%cvPB zwG5Jhc$=+P?S7?#O&7m5{;31}ZiHVuv|V$??hP_7-v^c>z+m;UJ4I}gr)#3ty`v{X zX6Icw$UwXR4dmVo*6$TWh#tzSh$;%I|G7Y8e=g7sc+2?BxsFc(ozV<0JDj*Vr9mml zq35Xx*{)@giZKF}V|x)^a1Ka1hS_yGP;9=d=OqrZt7Qh;54J(}<&*v>7x#VKjrfPq zuFRMEbKNxV7&o993_4G^$_PDQu~crKIT$R|%X(03h{z|bG7$m4WHIO?Ut=D-w^L7qh8nqr|NaGDv(c$dh2SJV?8HUeNQfd?$!SOS__pclUSw+4Jimi<-d{b8xx3 zHg5;soahZGC2Yr?3tar+b#f>Nno2LpQ3{6P3C$qmte8K{_}8TktQyKn4%i0 zTjmvCzIJxzEG-;0+R5vZfnPu2yeG3`m4_7>s_?^$c5u<*+Rz4Yu=LWzgHVQLp^*F*HI& z(R$uM1-J)lem1l1!+9ki`JT7f#ZU;<-oT$)SBm2;C})v>4(rZ~5hn+s8*ipHWH9}e z(P4=<2B+?FyBh!iDQb}Wzu3GF=CX$f(t#d|a5A(2e$C~Z_@j*+^e)$1%V3dC`Ct5# zi^G<|pyCNM%a?L}^^PQ?##cv#DEKK{IKl}ZcF{jcHu0yho(l;~>VJ)iKab{q_W|!V z#;h&;1>b_oEMX|0B(84=(yYXZ;CM2kzYf{#Rp7$$8hHpby!C6bS=Z}Z76286Rsi8T zQiA!j$5vGDyW&bE@Kk)k{&3sMJHs81ER;{=p_I&@ZGR6IAs67i*vs&W|H*6Nt$v_xy$24n zUyx`;92CKeUWV;@f`y8%cM&V*r<8b$4T?(Y1aoRs&5 z`ynsY50NzX%Iy1Z^5BCmEn}R=eFyc&Fa=!KNuiY(Wzk%vB!M)r0J7oQWq_QU2L@*b z?iKcT6JAG8oFE5LVb#Z|nJG)S11Fu8xENaZ63F(fJUpr(ENXWp^322wBaL`Vz-cM= z8{LBaY%6Tg3K^dS@b|Eq98gl zDICe^H8L08eSzyX3waptug3>l|LHQaAc$u{8lxBTRE@uSu{T?V9!=D^W&sf@1LbdK zZexGkc@igEt5h_Uuhh26VNPDefnC3<CpGp*zNbB(4BL+8pFasQY-s$t~g` z zo)>tnlX4l3g@!G*1K#)|c6OlL(dFf5bhDS z>y&Z{oNqx+?$dV@M*1|MmZfQD5H^-=VhNYC6Q{wDCaCV56~|1a(q`qOm$r* z>pbJtQ|F|lj1F`|g1+Kp=b31{IJE~0p#6c7XI4(u=S&y&3&0UlpvIqITKJiCG=V3? z2sf>Q`~^lm5;iN&m*BaZD_)|M$fC~ZX?t70k@*WvWA%0E$W}j6`w~%{IZLDZ?bvvA zYL(CuwTg?7-?$ zCws)-)IBG>J8Z@a$-G6_-{o3SJ2!maF#!5D?lYA-uW;YF@g2@!hKi}Zz5OPrkjsSh zv7I277w*V5FZX7>tbv#augCt=J#+n{()oS(Fdc||eIA!26470khKji$jPgN2K$%R* z)mZm=Px?Jxn?d$Mv=p}`77_UZ-HJS6=ukrVs4F~v#|H~3JG%7jX4jRhOwEJ&HZXix z`oI&@INd4sUKCTdcu86bRjM5h_x3MAt)))4XmLIQ*>j^AmhXZqg&N4G|I&@$9ONfW z!D!h}GSX~Bk5=BhS(6?au&^l5>K>}JtC`u-HW1Psq2mNZiUWc&2a))Cr>`7Tc1)xN zm4xt$K+d^A_cs8#$DBwC8rvBP_0NgB>?txB+}M6;#4iw=+3!B|wPC4OhF@^RS>`(u zlp!WewONyh4jp0z*bJ(cKjIgqB#Y_Qi*=o5XO`kp4?HdNW-Hz zm?<3>of{8t3{tQ_8H|#ImvtCn%~r5}5DM-eiu_86>er}*Uc_=$W(zFaxrqEp+ zNh%5Qr7?Qsd?w|?zC91y*A+Zz))aUER_tRnuoSHZ^5V9C^+=vI$qOXh9_eVc%L21L zsK^T%Z#aj#5~D8`jmP)p;8O71j=4Q}DQ*hkL^c#+T6#QVBd!j>@jcjDSf-J0wY#VG z2LNZm2hAq2nkVK)e>|1rfJc6n4$uXC4BQ`HbY3@092LR9crJCgnpJUVq%(zzzla`U zzN7kFAfZh~6zl*OSaqVIM@@va&C*J4&>yoY1^#lsl2Bn^4CDD6Hj@XM&!3*t$Ot?A zak?T`CML?E`8N->n3r&-D~~-9t?w{>gD0x=#~Pu54RhN$tjk{@fqJ8wnq3%fuL}_b z8H7h)WNk{IwQu-hQLht*PW(Yn0)R{!UhzNyLVyAZ02tJCtHl8Fp?okgkRXD<&DOtJ z{a{mMnPXK2lz*Q*1qJKeqgF8ck#498N&-*l)dbCdS&1b008^dh{(l1wp~y|MpDDBc z`($4PC?!#tA#VyehzAzIY^$#o_V>vM*73b*hWD3RGb10lC9ESaN<08l7e*BX}Y1ax5;18R%C?Phym9rczE4Amp*IkdUL*fU6>TFMcsl#8=pLQx@vS zjuC`<8KAQaI)OLTl2D(UDxwb+)M9p&t_1O+WXiZ!Ai8(P|u?x2vLDQu_Pvvf!uxt5rh>XJX!`5DsaU+aUSnOxy_Ig_jEJOAbHep7|OuRP-1HA z=a3F(dr(Aj{A=^7UCniH84^H-SAf=M0WcCtIDVW7zk|6bnMw~Vb0#)(P83wpHa^ul z@Zy$K7a^Ey{_A@h2E&!d8^|SkoM0t_$c3cLNkfQ>p?`ZTJw3!(3Dk5fp zEU{ch6Im`gW#;xDFSoWpQhO(sKAS(P?)D4IO;AY_MPX2&m`cb|)|lOV7UB9cBozLXq8}dbUvHw`b2L5q^uPo&mUopg-4#+aR?c4S=jTdatsG!v5m;3dBn@_ z(OGds$VCc!BMJiO!K&qJCNB3e&Va98-El6Wr>Fm=n1zPM_TyHIpAy!zb-69f_?l2HZwaYT0=v_TR3J^%F~5-6Pp`aqRI#R4jB&H z5Vi6plFjH$7fp$!T`Lg#Dn zJARpP!X8CJRaweODXE*Kk@S&5nCqo}vP7R8P?-+z3x3}7QP2H3K+WEDhP_`VPtQ!W z9)y-NbCTA~y@n|ex1WFm=%f9o{LH%D};%Mi^b3%nbpNiuFS_w-LRWA9DgTMW;DZb$x zzA!CIA^h(1hdU+4f4duPWN&*^Xw*sg-*&cwlbPv|X^MBW)_?)m0veu=xI9bSXfOy1 zf7)zQ92)zJ@hk}EY<{7O_%d8-mIdMZwTZ8F*`|v8cNzcmS9$_m3}otTwmoh+KJ(`X z_dQ#kIuAk884kx^|DI5R*&0FfTl;oaEVRSc9x*{`NRM6?Y5hAF7c7Ld@1e{Bg1_w> z!Dfs>)H8pVg!#4s0zca_)BedNDL88XxqhZ^#6h>x7v-e>pQ>;(e_6BBVh=a z%kyyC0Vcv^A!O*d|GVx;iTluFF)IPnOC=o|Yd>Av9|&yXH4}Eb+z1l$S?+slytZQk z63Gx|8KC(j&7|9Q$z%3y*A8=12(D$c2stfP zWjQWes4d&u$g}*u+mg81`3lrI@c^O*4d!f4WcXl+l*jsWY>ELyPO`bu8sy{It@e6{ zK&9`?5K;U(G6$KacmxCf6PFLEhW4+5?Sm*;FeAkT$0-gd>0-}~Y-6#bm78x-x5eB= zcb}Kt$kb$$@Ncq|A56D1NvYK>L>(f@f z*WAVqd06e%?G~hbFO&sL6-}l(p|;TSW0za?Nk3m3%ME*)D!|=N#bX+9`hjV1TPa19 z1LiN&^y|G0u1*^lazh*KO}}?hgc51VwA8T#B$J3zT`6I5?B_^$rByWXZi`^u)uq{m zmsOR*rFFr#)CzLa3qZfFAQ3er;jR$=uO4lnJiEYg-^c=Fr;H-H-J98=@5m$f_g%oI zt0#YFwXLCw+3HS0BrD`AeiT`}Ak~VZ7yv=&+Qo9vdpfV@!T6_+2>|dc>!G5;vPa!T zz!{4`tVxI1TkeVX!5o<;=oPC-Nw@{Xn{5K zavq3TtPpR@gGa5wNfx5+b=rJSbUGQ5QzkSQ!i-4mJD^|ZSofsA0ROz25(Yz*oaNKTc6O4#b=Mjhak7qjREIPRf;7w$1Nzr5=vpmp4F>=5A7l6fbUHK4zx>A%;Xhr4 z9o7HR;0Vt$Hb?!d`CWi^GCn%#-2VGyAB4c5P(ZDb-je?+eAXySm~}uF4vF9WWB;;wiu?D=;C}$F!dj zi6-;khoKw)I3iH`Q%%ij{t`u6^s8X+UByfO+giYJraSu6_n`F+L)RHGUXT6P`e42r z>+f1+|1ZsV)BWjS1t}LH$@~_Yrvl6ma{_aFxe{8s4Yz;=PzujI`kE~Vl+0Lvciu8m(^DtHCLyLc=CT(Rgt^INA8;>p6UnJ~uc03inj2r14XacGzp z{)$ALF7^OJ%7pNNo78_)>D|%yk0JmIsv}b~Na&Y@MNLY`d5s-HUf8_vA*1$6(hpFg-&`7 z-1B1CEM(_yg(b6wPW|}J4je@lPQ4ROB#)W&pX|wPeLPA&*>T80kDNfO$}?{CTLNuc z3uvAuoLDs=MXHltM}>zl*-N^)+Lsei4P(>tii#TGFls^HQ~^Gvg2cDMv=~|hm~r3Y zYjjI>9fSHC|4LzSesi}x=HebWdI7U>`gjVj?N|o{yk~B_#KfPo4|qtN`xr3%VFyhu z&k2vM`>WwXsa{C5bms_Kpa#^30Wd{kD|tB~5)ZSrYS+ZP^bMe#rWM?O&DUe)wso!> zMlIffyLau%5b$g@5EoP-;r9S^M{5xBm%weX2HwvKPtWr{J2mW>R+FG1t2>O0__ZBw zf-#XIs)`X9%Ckw?J1Tieo(GBdK$qeqeWr#&#{ZemqLCDhA2jKxC(1oiTAJqf$7`VH76Rls=!hfvcAX6VaTD5#&5~ z#~~^lZ^~zHfRTGvOpavfYa1@<4;AV?I^Pw?mT(9${2B*eF+*IW6o0TC&$At?GHKbH z4?0$%!6x|)cT_WuRnt)jR9p?vT`PMV<&aFB>2_-egI~l&JI^mN8g6X@zN2?VZde@B z6DVT9%_LR)^j5@OOIh$RX1CO=ZM*z>lWE}wfC9I}Z_Ty#Gl9nP1K8R8VGY3xAV42( z=H?_v!(ClssYj%F@TC>Gi)Fkwn;nY{I(NW-L0qNhT%NUdlcF760TonZO2z@0a&LCe zw3Ny$Iv&38xw^Dah=S7?ldGQ7jwDF?Z9sCFd+R(wsQA)tOCd$HHrX2HM$^B20_(@& z^w?G8lE+j7I*VaVmFDH7SZ39`M(%7c6)+7W#e<v=IB?C5RBx!ld$tX+}my+}I4AqcY(!B|tRvFmAC4TM4(_wAb}9Rn128(luO@ia(P z3BsaTl0laTvGLHo_J3mifN!B79rTJ$5(YJ%?vC>s3He|Hwj~U;@tQVc`5;Wt)6saSA&dhRyKVnUbK6@hg!7&=!>L^&4l2BoMb~Rh6OKwS z1_#}W)O8B{j_m!a)nsW|FnejEGX_ya5&vxUZ9eT-4VI;R-$e<>`*w6K@E2JwZ@j=( z+JrfKy-`v{Nn_ooFlyw+XEo;H$mru_SdcRDtXo8jR`I@Bz*vS1eg}sjQ#L*W)1}t1 z`xr^`VO`H7f_2^1Ls<;aYqU)aHo;#TaKAjh)z*fY?6E25{K)VQ0b&$Ry!mMI`NEUq z_n>y@)Vftn7%dUJ_(mUT3LAP2|shA~$n37lRY(jmrU-hqZyZHW;h0y!l$A#oi z46h)WGdV4d^$3HMO)K{2=O6{>sXLTUK{+}fU-x|3cwu<^y6<-)kc2D`cz5WDumiI! zh~7UeXTCvjj_samzwS?UQl+dwWxW!5TTE(?b%`ejzvdqsn1&JJi~i9gjGai5BgK6H z-<-p2onnzO1`E*bkZ2U+kL@3+1`nu*@@?THqVcb*3tm8lA)qH(4!Zd(GzyV&P}e0p z5^}*M8G{bLp~dKka}z@&++>iJ{W6Y}Afhd)60k*fJsv&qyjqy?uFW?|%+y0<1#0N}u5qNxNs)A!?a)q^VG`{yqm zyIuH?(L&d1Ol;xCa`&&=E*n*oh=cqAB3KvhK78``OJ_U)PHdwG{}JNFF-q^j0zQnq zsrU;U*Cjwb!J9WO2ae3^B%ver7#kQcR=;1JVg6 zil1YWLEmTH?+^Y%p`~mZQu6)d*$MSvge{Z*H+J2dqfC>q4CQ&r^Et5GIH(h{h&qkX z?8~RJn*X|kzu{nAMa(}3O7!gZ7GBg9HD&JtisxStv`Dd8!dQl9`rsN5&O3m_6@dIS=V-ZCqsHRzL zI4+OH1as28p*?y!`ZxCNe@;+)NtwW!ufv-C{K?P>f)8%Bf&(M>{$ZE!4gvIm)YL*SfxF@zLjk!YCmzwWO1hW|dJQqsGvBDJj0Br$J8<*sS<$3VYt z>nxhV+{gBx_5M8_O^M=)Y|?mD>4L3n{;%m4CVEI;rbR;;U8WIb2MpGGKm`svn<(lz9HAna&&)F5qF30fz$O#j zn|Fp7&sP`)<45yD?*^<5o$1!$qvT{zrjXuFtvdZErS5Qv4bjydLu%s~d{}vyk&2Va z>5G~uQq_i&Z7WPp}t)KRT@}nQ!04AMn-mWA4^tH}2zM-c`SSHWl(}(tC zkxzzL&AK$GN5U^E7M`H&K$gmRYclJ1TxmCHd%gpvRe3K+a&IGv%cg2Gs3jUOmq$8? z6E(D4TwiqQkxB~qH2HAHmicN9;CqP-L738=140r9p^qJ~YKH6Oi#99P_?wzy8m62@xQkEkty-vLwEWF)5mR(XhK(am6cY_tXT)% zs3~4<)(&-F>St-3=!D>mFzN)I6^zknTi*`f}h;%@*Y|}qi75q#Xme?XJx7reH zVUm*c$$%-6-!@GY?__QTYwimm(AseFkJ+T6&KO2&O*tIeL`1>2FnHd#27AJ6Ap3Vc zQq#gH9iu<00QwnDGs+Y*8jIS+r@iN_t}*hD^bmt-D$kStBA3dnEaiPK@~f%azKQB6 zTFJRz&S%ErfBK6c22U<|PrF7q&(M&L3G%D;m*SnQ(%;h@w(}#n|7gk0B)*ASC`+ph zDzm%+9i6}LDTBdNOfGSuolvYx>Tj>2^`-ZgO1fGiIqK@+`Pk=JvO|nPZEv8puDwBY z>LtO6C!wq@&32qu<{{H^$L)asQQddHQ~AgLm&|0oDcdm$;n<<z ztYh!&y;s>QTU!d*LK(^4gwONteO=$b;QPbp^MjwxIoEYN_v?PWo{xDLURdqz`QJXt zp2?&xJ{*U)b^d)@Ix&7J#r1sFx2rJh{1~rZ?@2SsV=W94gNDuZ_TzeHtdh>%myNm6 z!xbjH(9K19>kCiDJfxaM#j8YpLB9kz`%)PWksp8_6TqOI@6niS{)@_Eej{ zZcQnmwP{G(ZPBNw_4Y7(7V}TQ{|lc7uwsHN6#JxM^n_kJ4Ie;MDL@1xRP~c)ez)*8 zn*ny);j$D^wz!&b9ab^-mlDt>_YrsatJ|K)UVidtOY>N36E6ELh>mDL#5Rv;qYerv zVbJw~Gw-m+6(~Swz;mT$oVCOq4c4oT3oMm;C-7+gguhteACfG_j>Mz@+_G_Lq(vj8 zEvJAfR1pp!Nunjh9Perj_sc-PGv>hqqef>2#i3|wgf z*b)*XU95KC-3p~+;89oZbMh_lc*jfkglDJB`KlfFQIvK@ z%q+__?P5sCBq7J;=0d*!f;531Xl$vV;nh=XgYGry^6Nf-JS4sLtHNJ5BBUt|j5~s4 z$|U4xai5T~h|0FZ5~SGWgQ%{fmi5&cWejNNvAp`|rSba8wgvDLm03G=R+n*q1xN_| zi|GKz1bpTW(7dbq_fo+G2F*x`h}ul`(kv!`CZxXv<$pn5*gRJnzPBq#&r13YkZM>; zU)=cjNym$`EFY2|?QmwyuK-ueaeA`%IvoxAQ&Fp(qkjqu=_gPMUy=++dtUUg`u;I*$=w}!?d!YT5m+bu5?qx_K>ubudLPXe z=o5_$7Mh-^g7b9BMp^0YwJ=Uy{|NM&3Y@z;gXn^eb`}9GgFqj!+Y<=ruR8x85qb)` zPCjt=0WaA;A{-tVJdFbm$9K^&Ioo(EjZo=l3k8T-r-2`VOp>}vUbxc$sCfAZ&x!41 zLd3xG_gHP|6YVm4-~f#fw4E7`3`Lq!8_#@va6t2|HZ7wwoqnH%uv3=iQ)=I3o7vT= zRwEdfi!v_*;->AMhBF+k4`;f%pp&m&HN|}!tz*u|G`aVr4C=S82;%w2@`5v=9+SxW z(T`3nJ-B7>R0cWCTm zz@|tqjje&?>#al>>21Pco_UXGGYA-c+DUxgF4JR6{3Y->#bBuOf}IC3_ChqE8PSCh z6{!&vCwvOQ=KZg`yXynk$N}~rtf%|IG73v_h_)A{afE%tF!Q7k8tV;Rr9r5J& z+GW+~-mx{%KN$!L@`Pd=;9^WZW44Jqbgpm~+xy?k0nW2HM_?wB=P?KD%!7q=w-f~V z>%hr%JRDi9(<}uoZ`dCQf%}U`q1mjgURr6tO1Z~|mBWpAIC&fhn(@!LD=QxY+{jDs zez-%|@%Tcw_XCGNLhql^pDPRclp%d|QB~(3@;)KU-o<;|N;&&o#N-@`Y8whj_m;ty zYGgas2Uv(E2lJ~##J`@=^ZH&MKFWHXra+H&+mjl3^j!LuV3EHQ(Jq?sX4O2b{>$E7 zm^*_eReMx+50g*(XtHCdFW*Vl_7)ZAWm&C8!}@bEM_OH%mcc9a4xCmMW-L|HR7Q5F zrly3%{ zgkR!PJLS>`6*}vv*_(MFkHk{p>+OituLG^<>S3bvhP*t_5pE?uznem<;Qy}Df2*>7 zEqvheaF`d#a1ClCSRA~#ecwI4z6#eSau?pO?u*!UDNB<3@G;!21)llk*8KT7QiJzm zVSR$CkBlT*IS*bK3Ub`%T2_3e6tSb#-0eI*mv6l~(a1nFd>}<)ibd3oZv#>XTzwx~ zwi^n~52Ky>t_ui0}-pv>G!{?qOzlmV;%-cz6rV7hO3hZnQ z!xQsdy6QK`({ljQaLZLC*LCFh5O(}7hn(#CR`cql4$ZI`?=d81w2^$5e@1n&?52=g z=m2VrEVqM28oRhu2NbcbohcndJ8+nH>IvQvJJrjG{S*?onOvL&ED;|s<~czwBOQMu zsp)<5AJ2+i=wLo~txKU7beb@eze8BQKoGD79VP(-~6OPt;WJ zPr~?y%B-~$iy9`{m`$KQN;od(uxh&72XyX)*5wwRWWzWksU=Ex{LbEeLha(@U+wp4 zciVv=aX?pLw8%ufe@l1kk>J$4ND5{!qJGi_tGTLDbdV2u2=iyybgKW#K}xYyVf{_d z5oOt6;qERQ@Z4cPl4Y(0(#>6{Nio4VpN!d6+H$?}W{p4HY-+iCsg23-h z&@6uS`(La0KkVaH#NRUt%&xnvu{4RU2+z5`&{{obsF|bb= z3nc_bG&Rs~q$|8m~-etE+C*k176>Zd!=5li@NKu;qB$Yq<&QJgRTC!G9E$gtMU`~4t^|k47 zarNL5ephp;KV94x1q`G;cL!M+{ciCdQnek21{*l<{(Sldos*7f57_Pf5K(`0HjE^7 zwSzo$!yFu-h*IVr{1VTsY^I5~;>VPTfl|jC8Ug9~p7$g`7Y*&ihR6A$R{aO51fo5r zzMo_Bi0Ny^Q#Es8ZBGuNK8qjQ^psP_6H_H_58Z9pQz!z?*8%-r7w9@&$m7Ae75C(T zu;hq^W3N@dh;`Sr6$&6q6ho=80(vp*w{(C49s*2aQsS^n)wnj)DT5wFIB@g~GIT(W zCcBF05$vC6pSO+c{|(21qiaE~E50C_m3SR&p7)fH_U70Dkb!7n2us&g4vb3{Aq7hr z+PEQW&vxdh1gX&oi=C(wJQ~(3GceB&jRQ~3vh$qHC9l&>Fy!moxOn}u1xrvL`cHFC zGAg8vv+NzBQscM&wzLR7?AyB)107xCxVXMK;2GR9ZyUIrf*XM5v5$v@@JAj$K0(b^WDx*A6(#xvTZ!}PbI zsvzr@!Nb4-pAKb?&Z;19C-d5fB^wd@$LBAEd*X70W;3Ap(n({8<0wkxq{`6J70v!P z&5aT!#!zt7#imRLi{-?8OZlt*w}U(3fAx$04~|(l+CFKhNaOv;#QyHOY`yF6 zXiJjF@d7&ctP68U@Rt9Y@_{;eex^{1jD+PsDneOK5qu3~|Nngr&uIN23q>kpTm=}a z>Z-oPMJq@)YeAK~Nev>mJ%Iw_+Y%SWa^5NS(B1pk(}@@`{H*a{vyFxEP8E_@(nMLM z)A+GFDiW_<%>Pn6DF0|`KMUXx9hJ9261pWVa0!W4(z%K4Fst(ezxeXkH&C3BIX(4r zN#Pl=;Ud2g;3jSTiHXOkt_=T1T0+OE#vgUo4hrZiMgShdF8~nFcbJ9)_#JH$C%`ki zl_W7cJp@qzlAqdSg&Tg>M6(UM{s17n?fL`2 zQ`z?KuL+LZiESx_$iskC!!8WK7|qb(0NSp)7^PRth=7Z>isHy2!r7XCVJD{Ao zHA%3@c#VWD%Oo`U9Aj-qsU*p1IK{gx9p6P9h?rScNaDPFu+(3Stb?O0WX97 z0NXtQ(|aQf(vdIXokMeA@hgh@1I=C1$Kt8>Z3Z^UtMIm8(6O(LSpS~%%bvDi0Zr;w zbv54H68G*dg>JA%Ukf6}ARGV;U_#T;YO^jdrEamIp`mvab%4tr1kHO8$WmRny*glQ zuSHT^y727}=G+P&got|i;oe#$yr}^3Yah4fd&5AA7z&0U_gf3tOU!;uH2vbN9OBp9 zQfg9qPR?BN-maoG-{GAptf};2*jU{r5rH!oVlXDiPaTv1e*~AMj))pR!I=WlON}E> z)$#b_w_KSzfQR&9f$BpWeAPRI88LC+2T4X_FA(OyQUs)z8@^Hk9YIAJ$xvm!PZB7F z);klo-y`>ch*DS2kTUG)=_y7!*^gLdybKD1uvuilV=arQYwrPuxGIPmDe*&O-3oOWl!pL4ZQqngo|(9JU_>ruvq9PBv5}L2A8(1Y`oSQzeUmoK z3C_aAQ?Oi=3tN3CT9?ZN3JW2<4*_-5$j%~E%+bLeMyHiKG$cqsHqm(_pUykqfu$2n z^!4v#AbO@-`$*S2V!vns91Z-ZaOaIbrb%k^q8yCDcjKu#n!SkAf+lxxS`;B%jY%tZ z=$$n@4<&GfA09%kRf*F+3l) z*5)R(pK7!?9YMpH4j{Q92tt(GaNle?{Wi7JqOcdV!SVWCuZ3_Nd)Q~3jYaI;N3muR ze7Kj@nA<03d3rh{Bh(%lQ1V>M;}*v8$c7Q}Dg{0^#AIJst?&POG*MwYabr!KaV@Q| zVIhOH-}}?T#GaH(#)8Pyj|U}3M$K7@^K=Jk=t^pTvoJY(`*IIY&wmyc7A$;yGZpNi zWrLlP_%1Q+Ovm9NHYPT9Gxs=WChS{eW25AGmxm7@7GGUwlco+0Nlj%m7fvb47)@*_ zE-TBsYbU`}5RFps*{>d7>HO&&IyGe)laYa@ymE_p<;EoDF?$(~60NGMOB-~wZ@Ts< zFVnvN{d?^pR9?uAKgug5E{@}a)vK|o(NUF$&uDiZ_l?+$Gm{f7=_DNU81P*jn4T7B zF|Cxiaf8D+GAb(Qqg5`8hklt}sh>UH(9n>Don5%Y(*3)4v!)oAencj-k`60+W+o;U zJpVv7cKn3-sg&7{BO^UMn{-M_%7h69y)RCAK1k8WM`p-waR2dnZD3+TyS24t;q0u%#Kc6*G1)$|wG}t?MNCXgw>4_#)JTCV zJjSQar1T%lnSHi)?qQM7R(Yv3ipK$M1_2WsX;+v;r-JM1t~oh7D|lqfuH$viMrOop?yq{o*Jp5Kep4{aAv%Brru zx9=t6j6*&AzOcBcTvk~bUQlqw%}RDsQ}<6HWz@;+tcAZe-?f^~PKCp>PWWbGpPvlt z>+6>shKG|nucChaURi;CjfDL(E-H$2yNiUfkeOpr;&QfZMj!PB!bastQgKa9%}2}{ z%zR3Rjnn_?r|qwIiT8{b6H46q{d<;; ztu0$$FZnsH+i5;#W@ba?j%iqnJZ*=^W!GBWaI2NUj2h|5;qD*TJ9s1|Q>MyoOq?c5 zc0fm# zGZtGCpOC<_va*u*+(m(mj7+iUsfWj1Ch}lmQBlPee>uNRSJWpISBMwAGWQ#8h9yGD z;+&j-j3B0;o12@LHp#~150@4fv%L6~g-{|+Ed9!|$BNjL^mJWeVPTyc(VpE> z&CShA$8x?kN%8T~>1u5xK?+i2J<8T)jo$_bxh*X%OUUEz5e*pKuCbfum!4pcAs!`D zljrPAi?JqBA=#{VH^*^KQ@U;@DSqGM@QWZ%y+V5J-@#k&HFsUEr+1@=WH?xk>+9>o z-v4ATd?1_>a{1j468QZMLIQgHNLvi%J88@VEcV4USqa6*0U{zIB_!CUfu+^eNP}$D zmLC&d9q)}xHEX|rbL*8Ryph2~X{f7v+kX4nU+23(m(7a%Ha~CWY%D1$d0ttF4?fAo zM9$mJ#L3(|sev=DGg+qVnC)(ntHT>DDlJttG&0Jy7B-?UGr$o%->*6odqE~k&O%a& zBfWaQVu5ah32wSi{kCIsTiX>i|M1brdhNXgMzov0zwdOmH$*1Bj8l|jM^Wt)QMK>U z=EV-7fbai#n_)Jes=qyv$qFivHO`FbB)MrEGI@!-8@;UeK z-^Abla@yPbeBDE;bH$yD@5)%Hs$R)OGQ&hzP@H%qerHBX_QyUfE*32=FaI+-I=cNW z7>mVHV|IJB + +# Docker Engine managed plugin system + +* [Installing and using a plugin](index.md#installing-and-using-a-plugin) +* [Developing a plugin](index.md#developing-a-plugin) + +Docker Engine's plugins system allows you to install, start, stop, and remove +plugins using Docker Engine. This mechanism is currently only available for +volume drivers, but more plugin driver types will be available in future releases. + +For information about the legacy plugin system available in Docker Engine 1.12 +and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md). + +> **Note**: Docker Engine managed plugins are currently not supported +on Windows daemons. + +## Installing and using a plugin + +Plugins are distributed as Docker images and can be hosted on Docker Hub or on +a private registry. + +To install a plugin, use the `docker plugin install` command, which pulls the +plugin from Docker hub or your private registry, prompts you to grant +permissions or capabilities if necessary, and enables the plugin. + +To check the status of installed plugins, use the `docker plugin ls` command. +Plugins that start successfully are listed as enabled in the output. + +After a plugin is installed, you can use it as an option for another Docker +operation, such as creating a volume. + +In the following example, you install the `sshfs` plugin, verify that it is +enabled, and use it to create a volume. + +1. Install the `sshfs` plugin. + + ```bash + $ docker plugin install vieux/sshfs + + Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN] + Do you grant the above permissions? [y/N] y + + vieux/sshfs + ``` + + The plugin requests 2 privileges: + - It needs access to the `host` network. + - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run + the `mount` command. + +2. Check that the plugin is enabled in the output of `docker plugin ls`. + + ```bash + $ docker plugin ls + + ID NAME TAG DESCRIPTION ENABLED + 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true + ``` + +3. Create a volume using the plugin. + This example mounts the `/remote` directory on host `1.2.3.4` into a + volume named `sshvolume`. This volume can now be mounted into containers. + + ```bash + $ docker volume create \ + -d vieux/sshfs \ + --name sshvolume \ + -o sshcmd=user@1.2.3.4:/remote + + sshvolume + ``` +4. Verify that the volume was created successfully. + + ```bash + $ docker volume ls + + DRIVER NAME + vieux/sshfs sshvolume + ``` + +5. Start a container that uses the volume `sshvolume`. + + ```bash + $ docker run -v sshvolume:/data busybox ls /data + + + ``` + +To disable a plugin, use the `docker plugin disable` command. To completely +remove it, use the `docker plugin remove` command. For other available +commands and options, see the +[command line reference](../reference/commandline/index.md). + +## Service creation using plugins + +In swarm mode, it is possible to create a service that allows for attaching +to networks or mounting volumes. Swarm schedules services based on plugin availability +on a node. In this example, a volume plugin is installed on a swarm worker and a volume +is created using the plugin. In the manager, a service is created with the relevant +mount options. It can be observed that the service is scheduled to run on the worker +node with the said volume plugin and volume. + +In the following example, node1 is the manager and node2 is the worker. + +1. Prepare manager. In node 1: + + ```bash + $ docker swarm init + Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. + ``` + +2. Join swarm, install plugin and create volume on worker. In node 2: + + ```bash + $ docker swarm join \ + --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ + 192.168.99.100:2377 + ``` + + ```bash + $ docker plugin install tiborvass/sample-volume-plugin + latest: Pulling from tiborvass/sample-volume-plugin + eb9c16fbdc53: Download complete + Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639 + Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest + Installed plugin tiborvass/sample-volume-plugin + ``` + + ```bash + $ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol + ``` + +3. Create a service using the plugin and volume. In node1: + + ```bash + $ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top + + $ docker service ls + z1sj8bb8jnfn my-service replicated 1/1 busybox:latest + ``` + docker service ls shows service 1 instance of service running. + +4. Observe the task getting scheduled in node 2: + + ```bash + $ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}' + 83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top" + ``` + +## Developing a plugin + +#### The rootfs directory +The `rootfs` directory represents the root filesystem of the plugin. In this +example, it was created from a Dockerfile: + +>**Note:** The `/run/docker/plugins` directory is mandatory inside of the +plugin's filesystem for docker to communicate with the plugin. + +```bash +$ git clone https://github.com/vieux/docker-volume-sshfs +$ cd docker-volume-sshfs +$ docker build -t rootfsimage . +$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created +$ sudo mkdir -p myplugin/rootfs +$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs +$ docker rm -vf "$id" +$ docker rmi rootfsimage +``` + +#### The config.json file + +The `config.json` file describes the plugin. See the [plugins config reference](config.md). + +Consider the following `config.json` file. + +```json +{ + "description": "sshFS plugin for Docker", + "documentation": "https://docs.docker.com/engine/extend/plugins/", + "entrypoint": ["/go/bin/docker-volume-sshfs"], + "network": { + "type": "host" + }, + "interface" : { + "types": ["docker.volumedriver/1.0"], + "socket": "sshfs.sock" + }, + "capabilities": ["CAP_SYS_ADMIN"] +} +``` + +This plugin is a volume driver. It requires a `host` network and the +`CAP_SYS_ADMIN` capability. It depends upon the `/go/bin/docker-volume-sshfs` +entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate +with Docker Engine. This plugin has no runtime parameters. + +#### Creating the plugin + +A new plugin can be created by running +`docker plugin create ./path/to/plugin/data` where the plugin +data contains a plugin configuration file `config.json` and a root filesystem +in subdirectory `rootfs`. + +After that the plugin `` will show up in `docker plugin ls`. +Plugins can be pushed to remote registries with +`docker plugin push `. diff --git a/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md b/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md new file mode 100644 index 0000000..6ac914e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md @@ -0,0 +1,98 @@ +--- +redirect_from: +- "/engine/extend/plugins/" +title: "Use Docker Engine plugins" +description: "How to add additional functionality to Docker with plugins extensions" +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +# Use Docker Engine plugins + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker, +refer to [Docker Engine plugin system](index.md). + +You can extend the capabilities of the Docker Engine by loading third-party +plugins. This page explains the types of plugins and provides links to several +volume and network plugins for Docker. + +## Types of plugins + +Plugins extend Docker's functionality. They come in specific types. For +example, a [volume plugin](plugins_volume.md) might enable Docker +volumes to persist across multiple Docker hosts and a +[network plugin](plugins_network.md) might provide network plumbing. + +Currently Docker supports authorization, volume and network driver plugins. In the future it +will support additional plugin types. + +## Installing a plugin + +Follow the instructions in the plugin's documentation. + +## Finding a plugin + +The sections below provide an inexhaustive overview of available plugins. + + + +### Network plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. +[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. +[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. + +### Volume plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/). +[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. +[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS. +[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. +[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes. +[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. +[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks). +[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS. +[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. +[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. +[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. +[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. +[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. +[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. +[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. +[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. +[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. +[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. +[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. +[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. +[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. + +### Authorization plugins + + Plugin | Description +------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + [Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name). + +## Troubleshooting a plugin + +If you are having problems with Docker after loading a plugin, ask the authors +of the plugin for help. The Docker team may not be able to assist you. + +## Writing a plugin + +If you are interested in writing a plugin for Docker, or seeing how they work +under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/vendor/github.com/docker/docker/docs/extend/plugin_api.md b/vendor/github.com/docker/docker/docs/extend/plugin_api.md new file mode 100644 index 0000000..693b77a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugin_api.md @@ -0,0 +1,196 @@ +--- +title: "Plugins API" +description: "How to write Docker plugins extensions " +keywords: "API, Usage, plugins, documentation, developer" +--- + + + +# Docker Plugin API + +Docker plugins are out-of-process extensions which add capabilities to the +Docker Engine. + +This document describes the Docker Engine plugin API. To view information on +plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +This page is intended for people who want to develop their own Docker plugin. +If you just want to learn about or use Docker plugins, look +[here](legacy_plugins.md). + +## What plugins are + +A plugin is a process running on the same or a different host as the docker daemon, +which registers itself by placing a file on the same docker host in one of the plugin +directories described in [Plugin discovery](#plugin-discovery). + +Plugins have human-readable names, which are short, lowercase strings. For +example, `flocker` or `weave`. + +Plugins can run inside or outside containers. Currently running them outside +containers is recommended. + +## Plugin discovery + +Docker discovers plugins by looking for them in the plugin directory whenever a +user or container tries to use one by name. + +There are three types of files which can be put in the plugin directory. + +* `.sock` files are UNIX domain sockets. +* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. +* `.json` files are text files containing a full json specification for the plugin. + +Plugins with UNIX domain socket files must run on the same docker host, whereas +plugins with spec or json files can run on a different host if a remote URL is specified. + +UNIX domain socket files must be located under `/run/docker/plugins`, whereas +spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. + +The name of the file (excluding the extension) determines the plugin name. + +For example, the `flocker` plugin might create a UNIX socket at +`/run/docker/plugins/flocker.sock`. + +You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. +For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only +mount `/run/docker/plugins/flocker` inside the `flocker` container. + +Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under +`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as +soon as it finds the first plugin definition with the given name. + +### JSON specification + +This is the JSON format for a plugin: + +```json +{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "InsecureSkipVerify": false, + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +} +``` + +The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. + +## Plugin lifecycle + +Plugins should be started before Docker, and stopped after Docker. For +example, when packaging a plugin for a platform which supports `systemd`, you +might use [`systemd` dependencies]( +http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to +manage startup and shutdown order. + +When upgrading a plugin, you should first stop the Docker daemon, upgrade the +plugin, then start Docker again. + +## Plugin activation + +When a plugin is first referred to -- either by a user referring to it by name +(e.g. `docker run --volume-driver=foo`) or a container already configured to +use a plugin being started -- Docker looks for the named plugin in the plugin +directory and activates it with a handshake. See Handshake API below. + +Plugins are *not* activated automatically at Docker daemon startup. Rather, +they are activated only lazily, or on-demand, when they are needed. + +## Systemd socket activation + +Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers) +natively supports socket activation. In order for a plugin to be socket activated it needs +a `service` file and a `socket` file. + +The `service` file (for example `/lib/systemd/system/your-plugin.service`): + +``` +[Unit] +Description=Your plugin +Before=docker.service +After=network.target your-plugin.socket +Requires=your-plugin.socket docker.service + +[Service] +ExecStart=/usr/lib/docker/your-plugin + +[Install] +WantedBy=multi-user.target +``` +The `socket` file (for example `/lib/systemd/system/your-plugin.socket`): + +``` +[Unit] +Description=Your plugin + +[Socket] +ListenStream=/run/docker/plugins/your-plugin.sock + +[Install] +WantedBy=sockets.target +``` + +This will allow plugins to be actually started when the Docker daemon connects to +the sockets they're listening on (for instance the first time the daemon uses them +or if one of the plugin goes down accidentally). + +## API design + +The Plugin API is RPC-style JSON over HTTP, much like webhooks. + +Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to +implement an HTTP server and bind this to the UNIX socket mentioned in the +"plugin discovery" section. + +All requests are HTTP `POST` requests. + +The API is versioned via an Accept header, which currently is always set to +`application/vnd.docker.plugins.v1+json`. + +## Handshake API + +Plugins are activated via the following "handshake" API call. + +### /Plugin.Activate + +**Request:** empty body + +**Response:** +``` +{ + "Implements": ["VolumeDriver"] +} +``` + +Responds with a list of Docker subsystems which this plugin implements. +After activation, the plugin will then be sent events from this subsystem. + +Possible values are: + +* [`authz`](plugins_authorization.md) +* [`NetworkDriver`](plugins_network.md) +* [`VolumeDriver`](plugins_volume.md) + + +## Plugin retries + +Attempts to call a method on a plugin are retried with an exponential backoff +for up to 30 seconds. This may help when packaging plugins as containers, since +it gives plugin containers a chance to start up before failing any user +containers which depend on them. + +## Plugins helpers + +To ease plugins development, we're providing an `sdk` for each kind of plugins +currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers). diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md b/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md new file mode 100644 index 0000000..ac1837f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md @@ -0,0 +1,260 @@ +--- +title: "Access authorization plugin" +description: "How to create authorization plugins to manage access control to your Docker daemon." +keywords: "security, authorization, authentication, docker, documentation, plugin, extend" +redirect_from: +- "/engine/extend/authorization/" +--- + + + +# Create an authorization plugin + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker Engine, +refer to [Docker Engine plugin system](index.md). + +Docker's out-of-the-box authorization model is all or nothing. Any user with +permission to access the Docker daemon can run any Docker client command. The +same is true for callers using Docker's Engine API to contact the daemon. If you +require greater access control, you can create authorization plugins and add +them to your Docker daemon configuration. Using an authorization plugin, a +Docker administrator can configure granular access policies for managing access +to Docker daemon. + +Anyone with the appropriate skills can develop an authorization plugin. These +skills, at their most basic, are knowledge of Docker, understanding of REST, and +sound programming knowledge. This document describes the architecture, state, +and methods information available to an authorization plugin developer. + +## Basic principles + +Docker's [plugin infrastructure](plugin_api.md) enables +extending Docker by loading, removing and communicating with +third-party components using a generic API. The access authorization subsystem +was built using this mechanism. + +Using this subsystem, you don't need to rebuild the Docker daemon to add an +authorization plugin. You can add a plugin to an installed Docker daemon. You do +need to restart the Docker daemon to add a new plugin. + +An authorization plugin approves or denies requests to the Docker daemon based +on both the current authentication context and the command context. The +authentication context contains all user details and the authentication method. +The command context contains all the relevant request data. + +Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). +Each plugin must reside within directories described under the +[Plugin discovery](plugin_api.md#plugin-discovery) section. + +**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication +respectively. + +## Default user authorization mechanism + +If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name. +That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`. + +## Basic architecture + +You are responsible for registering your plugin as part of the Docker daemon +startup. You can install multiple plugins and chain them together. This chain +can be ordered. Each request to the daemon passes in order through the chain. +Only when all the plugins grant access to the resource, is the access granted. + +When an HTTP request is made to the Docker daemon through the CLI or via the +Engine API, the authentication subsystem passes the request to the installed +authentication plugin(s). The request contains the user (caller) and command +context. The plugin is responsible for deciding whether to allow or deny the +request. + +The sequence diagrams below depict an allow and deny authorization flow: + +![Authorization Allow flow](images/authz_allow.png) + +![Authorization Deny flow](images/authz_deny.png) + +Each request sent to the plugin includes the authenticated user, the HTTP +headers, and the request/response body. Only the user name and the +authentication method used are passed to the plugin. Most importantly, no user +credentials or tokens are passed. Finally, not all request/response bodies +are sent to the authorization plugin. Only those request/response bodies where +the `Content-Type` is either `text/*` or `application/json` are sent. + +For commands that can potentially hijack the HTTP connection (`HTTP +Upgrade`), such as `exec`, the authorization plugin is only called for the +initial HTTP requests. Once the plugin approves the command, authorization is +not applied to the rest of the flow. Specifically, the streaming data is not +passed to the authorization plugins. For commands that return chunked HTTP +response, such as `logs` and `events`, only the HTTP request is sent to the +authorization plugins. + +During request/response processing, some authorization flows might +need to do additional queries to the Docker daemon. To complete such flows, +plugins can call the daemon API similar to a regular user. To enable these +additional queries, the plugin must provide the means for an administrator to +configure proper authentication and security policies. + +## Docker client flows + +To enable and configure the authorization plugin, the plugin developer must +support the Docker client interactions detailed in this section. + +### Setting up Docker daemon + +Enable the authorization plugin with a dedicated command line flag in the +`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` +value. This value can be the plugin’s socket or a path to a specification file. +Authorization plugins can be loaded without restarting the daemon. Refer +to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information. + +```bash +$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. + +### Calling authorized command (allow) + +```bash +$ docker pull centos +... +f1b10cd84249: Pull complete +... +``` + +### Calling unauthorized command (deny) + +```bash +$ docker pull centos +... +docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. +``` + +### Error from plugins + +```bash +$ docker pull centos +... +docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. +``` + +## API schema and implementation + +In addition to Docker's standard plugin registration method, each plugin +should implement the following two methods: + +* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. + +* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. + +#### /AuthZPlugin.AuthZReq + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string " +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` +#### /AuthZPlugin.AuthZRes + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", + "ResponseBody": "Byte array containing the raw HTTP response body", + "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", + "ResponseStatusCode":"Response status code" +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` + +### Request authorization + +Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + +Name | Type | Description +-----------------------|-------------------|------------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | enum | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the request is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) + +### Response authorization + +The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + + +Name | Type | Description +----------------------- |------------------ |---------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | string | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body +Response status code | int | Status code from the docker daemon +Response headers | map[string]string | Response headers as key value pairs +Response body | []byte | Raw docker daemon response body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the response is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md b/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md new file mode 100644 index 0000000..d91c383 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md @@ -0,0 +1,376 @@ +--- +title: "Graphdriver plugins" +description: "How to manage image and container filesystems with external plugins" +keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api" +advisory: experimental +--- + + + + +## Changelog + +### 1.13.0 + +- Support v2 plugins + +# Docker graph driver plugins + +Docker graph driver plugins enable admins to use an external/out-of-process +graph driver for use with Docker engine. This is an alternative to using the +built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. + +You need to install and enable the plugin and then restart the Docker daemon +before using the plugin. See the following example for the correct ordering +of steps. + +``` +$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver + +$ pkill dockerd +$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin +``` + +# Write a graph driver plugin + +See the [plugin documentation](/docs/extend/index.md) for detailed information +on the underlying plugin protocol. + + +## Graph Driver plugin protocol + +If a plugin registers itself as a `GraphDriver` when activated, then it is +expected to provide the rootfs for containers as well as image layer storage. + +### /GraphDriver.Init + +**Request**: +```json +{ + "Home": "/graph/home/path", + "Opts": [], + "UIDMaps": [], + "GIDMaps": [] +} +``` + +Initialize the graph driver plugin with a home directory and array of options. +These are passed through from the user, but the plugin is not required to parse +or honor them. + +The request also includes a list of UID and GID mappings, structed as follows: +```json +{ + "ContainerID": 0, + "HostID": 0, + "Size": 0 +} +``` + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Create + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Create a new, empty, read-only filesystem layer with the specified +`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no +parent layer. `StorageOpt` is map of strings which indicate storage options. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.CreateReadWrite + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. + +### /GraphDriver.Remove + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Remove the filesystem layer with this given `ID`. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Get + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "MountLabel": "" +} +``` + +Get the mountpoint for the layered filesystem referred to by the given `ID`. + +**Response**: +```json +{ + "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Err": "" +} +``` + +Respond with the absolute path to the mounted layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Put + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Release the system resources for the specified `ID`, such as unmounting the +filesystem layer. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Exists + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Determine if a filesystem layer with the specified `ID` exists. + +**Response**: +```json +{ + "Exists": true +} +``` + +Respond with a boolean for whether or not the filesystem layer with the specified +`ID` exists. + +### /GraphDriver.Status + +**Request**: +```json +{} +``` + +Get low-level diagnostic information about the graph driver. + +**Response**: +```json +{ + "Status": [[]] +} +``` + +Respond with a 2-D array with key/value pairs for the underlying status +information. + + +### /GraphDriver.GetMetadata + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Get low-level diagnostic information about the layered filesystem with the +with the specified `ID` + +**Response**: +```json +{ + "Metadata": {}, + "Err": "" +} +``` + +Respond with a set of key/value pairs containing the low-level diagnostic +information about the layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Cleanup + +**Request**: +```json +{} +``` + +Perform necessary tasks to release resources help by the plugin, such as +unmounting all the layered file systems. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Diff + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get an archive of the changes between the filesystem layers specified by the `ID` +and `Parent`. `Parent` may be an empty string, in which case there is no parent. + +**Response**: +``` +{{ TAR STREAM }} +``` + +### /GraphDriver.Changes + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get a list of changes between the filesystem layers specified by the `ID` and +`Parent`. If `Parent` is an empty string, there is no parent. + +**Response**: +```json +{ + "Changes": [{}], + "Err": "" +} +``` + +Respond with a list of changes. The structure of a change is: +```json + "Path": "/some/path", + "Kind": 0, +``` + +Where the `Path` is the filesystem path within the layered filesystem that is +changed and `Kind` is an integer specifying the type of change that occurred: + +- 0 - Modified +- 1 - Added +- 2 - Deleted + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.ApplyDiff + +**Request**: +``` +{{ TAR STREAM }} +``` + +Extract the changeset from the given diff into the layer with the specified `ID` +and `Parent` + +**Query Parameters**: + +- id (required)- the `ID` of the new filesystem layer to extract the diff to +- parent (required)- the `Parent` of the given `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size of the new layer in bytes. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.DiffSize + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Calculate the changes between the specified `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size changes between the specified `ID` and `Parent` +Respond with a non-empty string error if an error occurred. diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_network.md b/vendor/github.com/docker/docker/docs/extend/plugins_network.md new file mode 100644 index 0000000..a974862 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_network.md @@ -0,0 +1,77 @@ +--- +title: "Docker network driver plugins" +description: "Network driver plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +# Engine network driver plugins + +This document describes Docker Engine network driver plugins generally +available in Docker Engine. To view information on plugins +managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +Docker Engine network plugins enable Engine deployments to be extended to +support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN +or something completely different. Network driver plugins are supported via the +LibNetwork project. Each plugin is implemented as a "remote driver" for +LibNetwork, which shares plugin infrastructure with Engine. Effectively, network +driver plugins are activated in the same way as other plugins, and use the same +kind of protocol. + +## Network driver plugins and swarm mode + +Docker 1.12 adds support for cluster management and orchestration called +[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently +only supports the built-in overlay driver for networking. Therefore existing +networking plugins will not work in swarm mode. + +When you run Docker Engine outside of swarm mode, all networking plugins that +worked in Docker 1.11 will continue to function normally. They do not require +any modification. + +## Using network driver plugins + +The means of installing and running a network driver plugin depend on the +particular plugin. So, be sure to install your plugin according to the +instructions obtained from the plugin developer. + +Once running however, network driver plugins are used just like the built-in +network drivers: by being mentioned as a driver in network-oriented Docker +commands. For example, + + $ docker network create --driver weave mynet + +Some network driver plugins are listed in [plugins](legacy_plugins.md) + +The `mynet` network is now owned by `weave`, so subsequent commands +referring to that network will be sent to the plugin, + + $ docker run --network=mynet busybox top + + +## Write a network plugin + +Network plugins implement the [Docker plugin +API](plugin_api.md) and the network plugin protocol + +## Network plugin protocol + +The network driver protocol, in addition to the plugin activation call, is +documented as part of libnetwork: +[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). + +# Related Information + +To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. + +- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) +- The [LibNetwork](https://github.com/docker/libnetwork) project diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_volume.md b/vendor/github.com/docker/docker/docs/extend/plugins_volume.md new file mode 100644 index 0000000..c060bf3 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_volume.md @@ -0,0 +1,276 @@ +--- +title: "Volume plugins" +description: "How to manage data with external volume plugins" +keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" +--- + + + +# Write a volume plugin + +Docker Engine volume plugins enable Engine deployments to be integrated with +external storage systems, such as Amazon EBS, and enable data volumes to persist +beyond the lifetime of a single Engine host. See the +[plugin documentation](legacy_plugins.md) for more information. + +## Changelog + +### 1.13.0 + +- If used as part of the v2 plugin architecture, mountpoints that are part of paths returned by plugin have to be mounted under the directory specified by PropagatedMount in the plugin configuration [#26398](https://github.com/docker/docker/pull/26398) + +### 1.12.0 + +- Add `Status` field to `VolumeDriver.Get` response ([#21006](https://github.com/docker/docker/pull/21006#)) +- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver([#22077](https://github.com/docker/docker/pull/22077)) + +### 1.10.0 + +- Add `VolumeDriver.Get` which gets the details about the volume ([#16534](https://github.com/docker/docker/pull/16534)) +- Add `VolumeDriver.List` which lists all volumes owned by the driver ([#16534](https://github.com/docker/docker/pull/16534)) + +### 1.8.0 + +- Initial support for volume driver plugins ([#14659](https://github.com/docker/docker/pull/14659)) + +## Command-line changes + +A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: + + $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh + +This command passes the `volumename` through to the volume plugin as a +user-given name for the volume. The `volumename` must not begin with a `/`. + +By having the user specify a `volumename`, a plugin can associate the volume +with an external volume beyond the lifetime of a single container or container +host. This can be used, for example, to move a stateful container from one +server to another. + +By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS. + + +## Create a VolumeDriver + +The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` +field of type `string` allowing to specify the name of the driver. It's default +value of `"local"` (the default driver for local volumes). + +## Volume plugin protocol + +If a plugin registers itself as a `VolumeDriver` when activated, then it is +expected to provide writeable paths on the host filesystem for the Docker +daemon to provide to containers to consume. + +The Docker daemon handles bind-mounting the provided paths into user +containers. + +> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` +> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` +> directory is reserved for Docker. + +### /VolumeDriver.Create + +**Request**: +```json +{ + "Name": "volume_name", + "Opts": {} +} +``` + +Instruct the plugin that the user wants to create a volume, given a user +specified volume name. The plugin does not need to actually manifest the +volume on the filesystem yet (until Mount is called). +Opts is a map of driver specific options passed through from the user request. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### /VolumeDriver.Remove + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Delete the specified volume from disk. This request is issued when a user invokes `docker rm -v` to remove volumes associated with a container. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### /VolumeDriver.Mount + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker requires the plugin to provide a volume, given a user specified volume +name. This is called once per container start. If the same volume_name is requested +more than once, the plugin may need to keep track of each new mount request and provision +at the first mount request and deprovision at the last corresponding unmount request. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: +```json +{ + "Mountpoint": "/path/to/directory/on/host", + "Err": "" +} +``` + +Respond with the path on the host filesystem where the volume has been made +available, and/or a string error if an error occurred. + +### /VolumeDriver.Path + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Docker needs reminding of the path to the volume on the host. + +**Response**: +```json +{ + "Mountpoint": "/path/to/directory/on/host", + "Err": "" +} +``` + +Respond with the path on the host filesystem where the volume has been made +available, and/or a string error if an error occurred. `Mountpoint` is optional, +however the plugin may be queried again later if one is not provided. + +### /VolumeDriver.Unmount + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Indication that Docker no longer is using the named volume. This is called once +per container stop. Plugin may deduce that it is safe to deprovision it at +this point. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + + +### /VolumeDriver.Get + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Get the volume info. + + +**Response**: +```json +{ + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host", + "Status": {} + }, + "Err": "" +} +``` + +Respond with a string error if an error occurred. `Mountpoint` and `Status` are +optional. + + +### /VolumeDriver.List + +**Request**: +```json +{} +``` + +Get the list of volumes registered with the plugin. + +**Response**: +```json +{ + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host" + } + ], + "Err": "" +} +``` + +Respond with a string error if an error occurred. `Mountpoint` is optional. + +### /VolumeDriver.Capabilities + +**Request**: +```json +{} +``` + +Get the list of capabilities the driver supports. +The driver is not required to implement this endpoint, however in such cases +the default values will be taken. + +**Response**: +```json +{ + "Capabilities": { + "Scope": "global" + } +} +``` + +Supported scopes are `global` and `local`. Any other value in `Scope` will be +ignored and assumed to be `local`. Scope allows cluster managers to handle the +volume differently, for instance with a scope of `global`, the cluster manager +knows it only needs to create the volume once instead of on every engine. More +capabilities may be added in the future. diff --git a/vendor/github.com/docker/docker/docs/reference/builder.md b/vendor/github.com/docker/docker/docs/reference/builder.md new file mode 100644 index 0000000..6fa5a24 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/builder.md @@ -0,0 +1,1746 @@ +--- +title: "Dockerfile reference" +description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." +keywords: "builder, docker, Dockerfile, automation, image creation" +--- + + + +# Dockerfile reference + +Docker can build images automatically by reading the instructions from a +`Dockerfile`. A `Dockerfile` is a text document that contains all the commands a +user could call on the command line to assemble an image. Using `docker build` +users can create an automated build that executes several command-line +instructions in succession. + +This page describes the commands you can use in a `Dockerfile`. When you are +done reading this page, refer to the [`Dockerfile` Best +Practices](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for a tip-oriented guide. + +## Usage + +The [`docker build`](commandline/build.md) command builds an image from +a `Dockerfile` and a *context*. The build's context is the files at a specified +location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. +The `URL` is a Git repository location. + +A context is processed recursively. So, a `PATH` includes any subdirectories and +the `URL` includes the repository and its submodules. A simple build command +that uses the current directory as context: + + $ docker build . + Sending build context to Docker daemon 6.51 MB + ... + +The build is run by the Docker daemon, not by the CLI. The first thing a build +process does is send the entire context (recursively) to the daemon. In most +cases, it's best to start with an empty directory as context and keep your +Dockerfile in that directory. Add only the files needed for building the +Dockerfile. + +>**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes +>the build to transfer the entire contents of your hard drive to the Docker +>daemon. + +To use a file in the build context, the `Dockerfile` refers to the file specified +in an instruction, for example, a `COPY` instruction. To increase the build's +performance, exclude files and directories by adding a `.dockerignore` file to +the context directory. For information about how to [create a `.dockerignore` +file](#dockerignore-file) see the documentation on this page. + +Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root +of the context. You use the `-f` flag with `docker build` to point to a Dockerfile +anywhere in your file system. + + $ docker build -f /path/to/a/Dockerfile . + +You can specify a repository and tag at which to save the new image if +the build succeeds: + + $ docker build -t shykes/myapp . + +To tag the image into multiple repositories after the build, +add multiple `-t` parameters when you run the `build` command: + + $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . + +Before the Docker daemon runs the instructions in the `Dockerfile`, it performs +a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect: + + $ docker build -t test/myapp . + Sending build context to Docker daemon 2.048 kB + Error response from daemon: Unknown instruction: RUNCMD + +The Docker daemon runs the instructions in the `Dockerfile` one-by-one, +committing the result of each instruction +to a new image if necessary, before finally outputting the ID of your +new image. The Docker daemon will automatically clean up the context you +sent. + +Note that each instruction is run independently, and causes a new image +to be created - so `RUN cd /tmp` will not have any effect on the next +instructions. + +Whenever possible, Docker will re-use the intermediate images (cache), +to accelerate the `docker build` process significantly. This is indicated by +the `Using cache` message in the console output. +(For more information, see the [Build cache section](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache)) in the +`Dockerfile` best practices guide: + + $ docker build -t svendowideit/ambassador . + Sending build context to Docker daemon 15.36 kB + Step 1/4 : FROM alpine:3.2 + ---> 31f630c65071 + Step 2/4 : MAINTAINER SvenDowideit@home.org.au + ---> Using cache + ---> 2a1c91448f5f + Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/ + ---> Using cache + ---> 21ed6e7fbb73 + Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh + ---> Using cache + ---> 7ea8aef582cc + Successfully built 7ea8aef582cc + +Build cache is only used from images that have a local parent chain. This means +that these images were created by previous builds or the whole chain of images +was loaded with `docker load`. If you wish to use build cache of a specific +image you can specify it with `--cache-from` option. Images specified with +`--cache-from` do not need to have a parent chain and may be pulled from other +registries. + +When you're done with your build, you're ready to look into [*Pushing a +repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Format + +Here is the format of the `Dockerfile`: + +```Dockerfile +# Comment +INSTRUCTION arguments +``` + +The instruction is not case-sensitive. However, convention is for them to +be UPPERCASE to distinguish them from arguments more easily. + + +Docker runs instructions in a `Dockerfile` in order. **The first +instruction must be \`FROM\`** in order to specify the [*Base +Image*](glossary.md#base-image) from which you are building. + +Docker treats lines that *begin* with `#` as a comment, unless the line is +a valid [parser directive](#parser-directives). A `#` marker anywhere +else in a line is treated as an argument. This allows statements like: + +```Dockerfile +# Comment +RUN echo 'we are running some # of cool things' +``` + +Line continuation characters are not supported in comments. + +## Parser directives + +Parser directives are optional, and affect the way in which subsequent lines +in a `Dockerfile` are handled. Parser directives do not add layers to the build, +and will not be shown as a build step. Parser directives are written as a +special type of comment in the form `# directive=value`. A single directive +may only be used once. + +Once a comment, empty line or builder instruction has been processed, Docker +no longer looks for parser directives. Instead it treats anything formatted +as a parser directive as a comment and does not attempt to validate if it might +be a parser directive. Therefore, all parser directives must be at the very +top of a `Dockerfile`. + +Parser directives are not case-sensitive. However, convention is for them to +be lowercase. Convention is also to include a blank line following any +parser directives. Line continuation characters are not supported in parser +directives. + +Due to these rules, the following examples are all invalid: + +Invalid due to line continuation: + +```Dockerfile +# direc \ +tive=value +``` + +Invalid due to appearing twice: + +```Dockerfile +# directive=value1 +# directive=value2 + +FROM ImageName +``` + +Treated as a comment due to appearing after a builder instruction: + +```Dockerfile +FROM ImageName +# directive=value +``` + +Treated as a comment due to appearing after a comment which is not a parser +directive: + +```Dockerfile +# About my dockerfile +FROM ImageName +# directive=value +``` + +The unknown directive is treated as a comment due to not being recognized. In +addition, the known directive is treated as a comment due to appearing after +a comment which is not a parser directive. + +```Dockerfile +# unknowndirective=value +# knowndirective=value +``` + +Non line-breaking whitespace is permitted in a parser directive. Hence, the +following lines are all treated identically: + +```Dockerfile +#directive=value +# directive =value +# directive= value +# directive = value +# dIrEcTiVe=value +``` + +The following parser directive is supported: + +* `escape` + +## escape + + # escape=\ (backslash) + +Or + + # escape=` (backtick) + +The `escape` directive sets the character used to escape characters in a +`Dockerfile`. If not specified, the default escape character is `\`. + +The escape character is used both to escape characters in a line, and to +escape a newline. This allows a `Dockerfile` instruction to +span multiple lines. Note that regardless of whether the `escape` parser +directive is included in a `Dockerfile`, *escaping is not performed in +a `RUN` command, except at the end of a line.* + +Setting the escape character to `` ` `` is especially useful on +`Windows`, where `\` is the directory path separator. `` ` `` is consistent +with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). + +Consider the following example which would fail in a non-obvious way on +`Windows`. The second `\` at the end of the second line would be interpreted as an +escape for the newline, instead of a target of the escape from the first `\`. +Similarly, the `\` at the end of the third line would, assuming it was actually +handled as an instruction, cause it be treated as a line continuation. The result +of this dockerfile is that second and third lines are considered a single +instruction: + +```Dockerfile +FROM microsoft/nanoserver +COPY testfile.txt c:\\ +RUN dir c:\ +``` + +Results in: + + PS C:\John> docker build -t cmd . + Sending build context to Docker daemon 3.072 kB + Step 1/2 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/2 : COPY testfile.txt c:\RUN dir c: + GetFileAttributesEx c:RUN: The system cannot find the file specified. + PS C:\John> + +One solution to the above would be to use `/` as the target of both the `COPY` +instruction, and `dir`. However, this syntax is, at best, confusing as it is not +natural for paths on `Windows`, and at worst, error prone as not all commands on +`Windows` support `/` as the path separator. + +By adding the `escape` parser directive, the following `Dockerfile` succeeds as +expected with the use of natural platform semantics for file paths on `Windows`: + + # escape=` + + FROM microsoft/nanoserver + COPY testfile.txt c:\ + RUN dir c:\ + +Results in: + + PS C:\John> docker build -t succeeds --no-cache=true . + Sending build context to Docker daemon 3.072 kB + Step 1/3 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/3 : COPY testfile.txt c:\ + ---> 96655de338de + Removing intermediate container 4db9acbb1682 + Step 3/3 : RUN dir c:\ + ---> Running in a2c157f842f5 + Volume in drive C has no label. + Volume Serial Number is 7E6D-E0F7 + + Directory of c:\ + + 10/05/2016 05:04 PM 1,894 License.txt + 10/05/2016 02:22 PM

Program Files + 10/05/2016 02:14 PM Program Files (x86) + 10/28/2016 11:18 AM 62 testfile.txt + 10/28/2016 11:20 AM Users + 10/28/2016 11:20 AM Windows + 2 File(s) 1,956 bytes + 4 Dir(s) 21,259,096,064 bytes free + ---> 01c7f3bef04f + Removing intermediate container a2c157f842f5 + Successfully built 01c7f3bef04f + PS C:\John> + +## Environment replacement + +Environment variables (declared with [the `ENV` statement](#env)) can also be +used in certain instructions as variables to be interpreted by the +`Dockerfile`. Escapes are also handled for including variable-like syntax +into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +The `${variable_name}` syntax also supports a few of the standard `bash` +modifiers as specified below: + +* `${variable:-word}` indicates that if `variable` is set then the result + will be that value. If `variable` is not set then `word` will be the result. +* `${variable:+word}` indicates that if `variable` is set then `word` will be + the result, otherwise the result is the empty string. + +In all cases, `word` can be any string, including additional environment +variables. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +Environment variables are supported by the following list of instructions in +the `Dockerfile`: + +* `ADD` +* `COPY` +* `ENV` +* `EXPOSE` +* `LABEL` +* `USER` +* `WORKDIR` +* `VOLUME` +* `STOPSIGNAL` + +as well as: + +* `ONBUILD` (when combined with one of the supported instructions above) + +> **Note**: +> prior to 1.4, `ONBUILD` instructions did **NOT** support environment +> variable, even when combined with any of the instructions listed above. + +Environment variable substitution will use the same value for each variable +throughout the entire command. In other words, in this example: + + ENV abc=hello + ENV abc=bye def=$abc + ENV ghi=$abc + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same command +that set `abc` to `bye`. + +## .dockerignore file + +Before the docker CLI sends the context to the docker daemon, it looks +for a file named `.dockerignore` in the root directory of the context. +If this file exists, the CLI modifies the context to exclude files and +directories that match patterns in it. This helps to avoid +unnecessarily sending large or sensitive files and directories to the +daemon and potentially adding them to images using `ADD` or `COPY`. + +The CLI interprets the `.dockerignore` file as a newline-separated +list of patterns similar to the file globs of Unix shells. For the +purposes of matching, the root of the context is considered to be both +the working and the root directory. For example, the patterns +`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` +in the `foo` subdirectory of `PATH` or in the root of the git +repository located at `URL`. Neither excludes anything else. + +If a line in `.dockerignore` file starts with `#` in column 1, then this line is +considered as a comment and is ignored before interpreted by the CLI. + +Here is an example `.dockerignore` file: + +``` +# comment + */temp* + */*/temp* + temp? +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `# comment` | Ignored. | +| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | +| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | +| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. + + +Matching is done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A +preprocessing step removes leading and trailing whitespace and +eliminates `.` and `..` elements using Go's +[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines +that are blank after preprocessing are ignored. + +Beyond Go's filepath.Match rules, Docker also supports a special +wildcard string `**` that matches any number of directories (including +zero). For example, `**/*.go` will exclude all files that end with `.go` +that are found in all directories, including the root of the build context. + +Lines starting with `!` (exclamation mark) can be used to make exceptions +to exclusions. The following is an example `.dockerignore` file that +uses this mechanism: + +``` + *.md + !README.md +``` + +All markdown files *except* `README.md` are excluded from the context. + +The placement of `!` exception rules influences the behavior: the last +line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. Consider the following example: + +``` + *.md + !README*.md + README-secret.md +``` + +No markdown files are included in the context except README files other than +`README-secret.md`. + +Now consider this example: + +``` + *.md + README-secret.md + !README*.md +``` + +All of the README files are included. The middle line has no effect because +`!README*.md` matches `README-secret.md` and comes last. + +You can even use the `.dockerignore` file to exclude the `Dockerfile` +and `.dockerignore` files. These files are still sent to the daemon +because it needs them to do its job. But the `ADD` and `COPY` commands +do not copy them to the image. + +Finally, you may want to specify which files to include in the +context, rather than which to exclude. To achieve this, specify `*` as +the first pattern, followed by one or more `!` exception patterns. + +**Note**: For historical reasons, the pattern `.` is ignored. + +## FROM + + FROM + +Or + + FROM : + +Or + + FROM @ + +The `FROM` instruction sets the [*Base Image*](glossary.md#base-image) +for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as +its first instruction. The image can be any valid image – it is especially easy +to start by **pulling an image** from the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/). + +- `FROM` must be the first non-comment instruction in the `Dockerfile`. + +- `FROM` can appear multiple times within a single `Dockerfile` in order to create +multiple images. Simply make a note of the last image ID output by the commit +before each new `FROM` command. + +- The `tag` or `digest` values are optional. If you omit either of them, the builder +assumes a `latest` by default. The builder returns an error if it cannot match +the `tag` value. + +## RUN + +RUN has 2 forms: + +- `RUN ` (*shell* form, the command is run in a shell, which by +default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) +- `RUN ["executable", "param1", "param2"]` (*exec* form) + +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the `Dockerfile`. + +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. + +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain the specified shell executable. + +The default shell for the *shell* form can be changed using the `SHELL` +command. + +In the *shell* form you can use a `\` (backslash) to continue a single +RUN instruction onto the next line. For example, consider these two lines: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; \ +echo $HOME' +``` +Together they are equivalent to this single line: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' +``` + +> **Note**: +> To use a different shell, other than '/bin/sh', use the *exec* form +> passing in the desired shell. For example, +> `RUN ["/bin/bash", "-c", "echo hello"]` + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. +> +> **Note**: +> In the *JSON* form, it is necessary to escape backslashes. This is +> particularly relevant on Windows where the backslash is the path separator. +> The following line would otherwise be treated as *shell* form due to not +> being valid JSON, and fail in an unexpected way: +> `RUN ["c:\windows\system32\tasklist.exe"]` +> The correct syntax for this example is: +> `RUN ["c:\\windows\\system32\\tasklist.exe"]` + +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like +`RUN apt-get dist-upgrade -y` will be reused during the next build. The +cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + +The cache for `RUN` instructions can be invalidated by `ADD` instructions. See +[below](#add) for details. + +### Known issues (RUN) + +- [Issue 783](https://github.com/docker/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. + + For systems that have recent aufs version (i.e., `dirperm1` mount option can + be set), docker will attempt to fix the issue automatically by mounting + the layers with `dirperm1` option. More details on `dirperm1` option can be + found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) + + If your system doesn't have support for `dirperm1`, the issue describes a workaround. + +## CMD + +The `CMD` instruction has three forms: + +- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (*shell* form) + +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. + +**The main purpose of a `CMD` is to provide defaults for an executing +container.** These defaults can include an executable, or they can omit +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +> **Note**: +> If `CMD` is used to provide default arguments for the `ENTRYPOINT` +> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified +> with the JSON array format. + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. + +If you use the *shell* form of the `CMD`, then the `` will execute in +`/bin/sh -c`: + + FROM ubuntu + CMD echo "This is a test." | wc - + +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of `CMD`.** Any additional parameters +must be individually expressed as strings in the array: + + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See +[*ENTRYPOINT*](#entrypoint). + +If the user specifies arguments to `docker run` then they will override the +default specified in `CMD`. + +> **Note**: +> Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> the result; `CMD` does not execute anything at build time, but specifies +> the intended command for the image. + +## LABEL + + LABEL = = = ... + +The `LABEL` instruction adds metadata to an image. A `LABEL` is a +key-value pair. To include spaces within a `LABEL` value, use quotes and +backslashes as you would in command-line parsing. A few usage examples: + + LABEL "com.example.vendor"="ACME Incorporated" + LABEL com.example.label-with-value="foo" + LABEL version="1.0" + LABEL description="This text illustrates \ + that label-values can span multiple lines." + +An image can have more than one label. To specify multiple labels, +Docker recommends combining labels into a single `LABEL` instruction where +possible. Each `LABEL` instruction produces a new layer which can result in an +inefficient image if you use many labels. This example results in a single image +layer. + + LABEL multi.label1="value1" multi.label2="value2" other="value3" + +The above can also be written as: + + LABEL multi.label1="value1" \ + multi.label2="value2" \ + other="value3" + +Labels are additive including `LABEL`s in `FROM` images. If Docker +encounters a label/key that already exists, the new value overrides any previous +labels with identical keys. + +To view an image's labels, use the `docker inspect` command. + + "Labels": { + "com.example.vendor": "ACME Incorporated" + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" + }, + +## MAINTAINER (deprecated) + + MAINTAINER + +The `MAINTAINER` instruction sets the *Author* field of the generated images. +The `LABEL` instruction is a much more flexible version of this and you should use +it instead, as it enables setting any metadata you require, and can be viewed +easily, for example with `docker inspect`. To set a label corresponding to the +`MAINTAINER` field you could use: + + LABEL maintainer "SvenDowideit@home.org.au" + +This will then be visible from `docker inspect` with the other labels. + +## EXPOSE + + EXPOSE [...] + +The `EXPOSE` instruction informs Docker that the container listens on the +specified network ports at runtime. `EXPOSE` does not make the ports of the +container accessible to the host. To do that, you must use either the `-p` flag +to publish a range of ports or the `-P` flag to publish all of the exposed +ports. You can expose one port number and publish it externally under another +number. + +To set up port redirection on the host system, see [using the -P +flag](run.md#expose-incoming-ports). The Docker network feature supports +creating networks without the need to expose ports within the network, for +detailed information see the [overview of this +feature](https://docs.docker.com/engine/userguide/networking/)). + +## ENV + + ENV + ENV = ... + +The `ENV` instruction sets the environment variable `` to the value +``. This value will be in the environment of all "descendant" +`Dockerfile` commands and can be [replaced inline](#environment-replacement) in +many as well. + +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final image, but the first form +is preferred because it produces a single cache layer. + +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. + +> **Note**: +> Environment persistence can cause unexpected side effects. For example, +> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get +> users on a Debian-based image. To set a value for a single command, use +> `RUN = `. + +## ADD + +ADD has two forms: + +- `ADD ... ` +- `ADD ["",... ""]` (this form is required for paths containing +whitespace) + +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the image at the path ``. + +Multiple `` resource may be specified but if they are files or +directories then they must be relative to the source directory that is +being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + ADD hom* /mydir/ # adds all files starting with "hom" + ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + ADD test /absoluteDir/ # adds "test" to /absoluteDir/ + +All new files and directories are created with a UID and GID of 0. + +In the case where `` is a remote file URL, the destination will +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. However, like any other file +processed during an `ADD`, `mtime` will not be included in the determination +of whether or not the file has changed and the cache should be updated. + +> **Note**: +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will be used as the context of the build. + +> **Note**: +> If your URL files are protected using authentication, you +> will need to use `RUN wget`, `RUN curl` or use another tool from +> within the container as the `ADD` instruction does not support +> authentication. + +> **Note**: +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + + +`ADD` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. + +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`, the result is the union of: + + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. + + > **Note**: + > Whether a file is identified as a recognized compression format or not + > is done solely based on the contents of the file, not the name of the file. + > For example, if an empty file happens to end with `.tar.gz` this will not + > be recognized as a compressed file and **will not** generate any kind of + > decompression error message, rather the file will simply be copied to the + > destination. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## COPY + +COPY has two forms: + +- `COPY ... ` +- `COPY ["",... ""]` (this form is required for paths containing +whitespace) + +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + COPY hom* /mydir/ # adds all files starting with "hom" + COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + COPY test /absoluteDir/ # adds "test" to /absoluteDir/ + +All new files and directories are created with a UID and GID of 0. + +> **Note**: +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +`COPY` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## ENTRYPOINT + +ENTRYPOINT has two forms: + +- `ENTRYPOINT ["executable", "param1", "param2"]` + (*exec* form, preferred) +- `ENTRYPOINT command param1 param2` + (*shell* form) + +An `ENTRYPOINT` allows you to configure a container that will run as an executable. + +For example, the following will start nginx with its default content, listening +on port 80: + + docker run -i -t --rm -p 80:80 nginx + +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. + +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. + + FROM ubuntu + ENTRYPOINT ["top", "-b"] + CMD ["-c"] + +When you run the container, you can see that `top` is the only process: + + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +commands: + +```bash +#!/bin/bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can override the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s + +### Understand how CMD and ENTRYPOINT interact + +Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. +There are few rules that describe their co-operation. + +1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. + +2. `ENTRYPOINT` should be defined when using the container as an executable. + +3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command +or for executing an ad-hoc command in a container. + +4. `CMD` will be overridden when running the container with alternative arguments. + +The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: + +| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | +|--------------------------------|----------------------------|--------------------------------|------------------------------------------------| +| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | +| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | +| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd | +| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | + +## VOLUME + + VOLUME ["/data"] + +The `VOLUME` instruction creates a mount point with the specified name +and marks it as holding externally mounted volumes from native host or other +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log +/var/db`. For more information/examples and mounting instructions via the +Docker client, refer to +[*Share Directories via Volumes*](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume) +documentation. + +The `docker run` command initializes the newly created volume with any data +that exists at the specified location within the base image. For example, +consider the following Dockerfile snippet: + + FROM ubuntu + RUN mkdir /myvol + RUN echo "hello world" > /myvol/greeting + VOLUME /myvol + +This Dockerfile results in an image that causes `docker run`, to +create a new mount point at `/myvol` and copy the `greeting` file +into the newly created volume. + +> **Note**: +> If any build steps change the data within the volume after it has been +> declared, those changes will be discarded. + +> **Note**: +> The list is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +## USER + + USER daemon + +The `USER` instruction sets the user name or UID to use when running the image +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. + +## WORKDIR + + WORKDIR /path/to/workdir + +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. +If the `WORKDIR` doesn't exist, it will be created even if it's not used in any +subsequent `Dockerfile` instruction. + +It can be used multiple times in the one `Dockerfile`. If a relative path +is provided, it will be relative to the path of the previous `WORKDIR` +instruction. For example: + + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/a/b/c`. + +The `WORKDIR` instruction can resolve environment variables previously set using +`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. +For example: + + ENV DIRPATH /path + WORKDIR $DIRPATH/$DIRNAME + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/path/$DIRNAME` + +## ARG + + ARG [=] + +The `ARG` instruction defines a variable that users can pass at build-time to +the builder with the `docker build` command using the `--build-arg =` +flag. If a user specifies a build argument that was not +defined in the Dockerfile, the build outputs a warning. + +``` +[Warning] One or more build-args [foo] were not consumed. +``` + +The Dockerfile author can define a single variable by specifying `ARG` once or many +variables by specifying `ARG` more than once. For example, a valid Dockerfile: + +``` +FROM busybox +ARG user1 +ARG buildno +... +``` + +A Dockerfile author may optionally specify a default value for an `ARG` instruction: + +``` +FROM busybox +ARG user1=someuser +ARG buildno=1 +... +``` + +If an `ARG` value has a default and if there is no value passed at build-time, the +builder uses the default. + +An `ARG` variable definition comes into effect from the line on which it is +defined in the `Dockerfile` not from the argument's use on the command-line or +elsewhere. For example, consider this Dockerfile: + +``` +1 FROM busybox +2 USER ${user:-some_user} +3 ARG user +4 USER $user +... +``` +A user builds this file by calling: + +``` +$ docker build --build-arg user=what_user Dockerfile +``` + +The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the +subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is +defined and the `what_user` value was passed on the command line. Prior to its definition by an +`ARG` instruction, any use of a variable results in an empty string. + +> **Warning:** It is not recommended to use build-time variables for +> passing secrets like github keys, user credentials etc. Build-time variable +> values are visible to any user of the image with the `docker history` command. + +You can use an `ARG` or an `ENV` instruction to specify variables that are +available to the `RUN` instruction. Environment variables defined using the +`ENV` instruction always override an `ARG` instruction of the same name. Consider +this Dockerfile with an `ENV` and `ARG` instruction. + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER v1.0.0 +4 RUN echo $CONT_IMG_VER +``` +Then, assume this image is built with this command: + +``` +$ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile +``` + +In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting +passed by the user:`v2.0.1` This behavior is similar to a shell +script where a locally scoped variable overrides the variables passed as +arguments or inherited from environment, from its point of definition. + +Using the example above but a different `ENV` specification you can create more +useful interactions between `ARG` and `ENV` instructions: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} +4 RUN echo $CONT_IMG_VER +``` + +Unlike an `ARG` instruction, `ENV` values are always persisted in the built +image. Consider a docker build without the `--build-arg` flag: + +``` +$ docker build Dockerfile +``` + +Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but +its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + +The variable expansion technique in this example allows you to pass arguments +from the command line and persist them in the final image by leveraging the +`ENV` instruction. Variable expansion is only supported for [a limited set of +Dockerfile instructions.](#environment-replacement) + +Docker has a set of predefined `ARG` variables that you can use without a +corresponding `ARG` instruction in the Dockerfile. + +* `HTTP_PROXY` +* `http_proxy` +* `HTTPS_PROXY` +* `https_proxy` +* `FTP_PROXY` +* `ftp_proxy` +* `NO_PROXY` +* `no_proxy` + +To use these, simply pass them on the command line using the flag: + +``` +--build-arg = +``` + +### Impact on build caching + +`ARG` variables are not persisted into the built image as `ENV` variables are. +However, `ARG` variables do impact the build cache in similar ways. If a +Dockerfile defines an `ARG` variable whose value is different from a previous +build, then a "cache miss" occurs upon its first usage, not its definition. In +particular, all `RUN` instructions following an `ARG` instruction use the `ARG` +variable implicitly (as an environment variable), thus can cause a cache miss. + +For example, consider these two Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo $CONT_IMG_VER +``` + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo hello +``` + +If you specify `--build-arg CONT_IMG_VER=` on the command line, in both +cases, the specification on line 2 does not cause a cache miss; line 3 does +cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified +as the same as running `CONT_IMG_VER=` echo hello, so if the `` +changes, we get a cache miss. + +Consider another example under the same command line: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER $CONT_IMG_VER +4 RUN echo $CONT_IMG_VER +``` +In this example, the cache miss occurs on line 3. The miss happens because +the variable's value in the `ENV` references the `ARG` variable and that +variable is changed through the command line. In this example, the `ENV` +command causes the image to include the value. + +If an `ENV` instruction overrides an `ARG` instruction of the same name, like +this Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER hello +4 RUN echo $CONT_IMG_VER +``` + +Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a +constant (`hello`). As a result, the environment variables and values used on +the `RUN` (line 4) doesn't change between builds. + +## ONBUILD + + ONBUILD [INSTRUCTION] + +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base +to build other images, for example an application build environment or a +daemon which may be customized with user-specific configuration. + +For example, if your image is a reusable Python application builder, it +will require application source code to be added in a particular +directory, and it might require a build script to be called *after* +that. You can't just call `ADD` and `RUN` now, because you don't yet +have access to the application source code, and it will be different for +each application build. You could simply provide application developers +with a boilerplate `Dockerfile` to copy-paste into their application, but +that is inefficient, error-prone and difficult to update because it +mixes with application-specific code. + +The solution is to use `ONBUILD` to register advance instructions to +run later, during the next build stage. + +Here's how it works: + +1. When it encounters an `ONBUILD` instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. +3. Later the image may be used as a base for a new build, using the + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. + +For example you might add something like this: + + [...] + ONBUILD ADD . /app/src + ONBUILD RUN /usr/local/bin/python-build --dir /app/src + [...] + +> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. + +> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. + +## STOPSIGNAL + + STOPSIGNAL signal + +The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +## HEALTHCHECK + +The `HEALTHCHECK` instruction has two forms: + +* `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) +* `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) + +The `HEALTHCHECK` instruction tells Docker how to test a container to check that +it is still working. This can detect cases such as a web server that is stuck in +an infinite loop and unable to handle new connections, even though the server +process is still running. + +When a container has a healthcheck specified, it has a _health status_ in +addition to its normal status. This status is initially `starting`. Whenever a +health check passes, it becomes `healthy` (whatever state it was previously in). +After a certain number of consecutive failures, it becomes `unhealthy`. + +The options that can appear before `CMD` are: + +* `--interval=DURATION` (default: `30s`) +* `--timeout=DURATION` (default: `30s`) +* `--retries=N` (default: `3`) + +The health check will first run **interval** seconds after the container is +started, and then again **interval** seconds after each previous check completes. + +If a single run of the check takes longer than **timeout** seconds then the check +is considered to have failed. + +It takes **retries** consecutive failures of the health check for the container +to be considered `unhealthy`. + +There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list +more than one then only the last `HEALTHCHECK` will take effect. + +The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK +CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; +see e.g. `ENTRYPOINT` for details). + +The command's exit status indicates the health status of the container. +The possible values are: + +- 0: success - the container is healthy and ready for use +- 1: unhealthy - the container is not working correctly +- 2: reserved - do not use this exit code + +For example, to check every five minutes or so that a web-server is able to +serve the site's main page within three seconds: + + HEALTHCHECK --interval=5m --timeout=3s \ + CMD curl -f http://localhost/ || exit 1 + +To help debug failing probes, any output text (UTF-8 encoded) that the command writes +on stdout or stderr will be stored in the health status and can be queried with +`docker inspect`. Such output should be kept short (only the first 4096 bytes +are stored currently). + +When the health status of a container changes, a `health_status` event is +generated with the new status. + +The `HEALTHCHECK` feature was added in Docker 1.12. + + +## SHELL + + SHELL ["executable", "parameters"] + +The `SHELL` instruction allows the default shell used for the *shell* form of +commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on +Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON +form in a Dockerfile. + +The `SHELL` instruction is particularly useful on Windows where there are +two commonly used and quite different native shells: `cmd` and `powershell`, as +well as alternate shells available including `sh`. + +The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides +all previous `SHELL` instructions, and affects all subsequent instructions. For example: + + FROM microsoft/windowsservercore + + # Executed as cmd /S /C echo default + RUN echo default + + # Executed as cmd /S /C powershell -command Write-Host default + RUN powershell -command Write-Host default + + # Executed as powershell -command Write-Host hello + SHELL ["powershell", "-command"] + RUN Write-Host hello + + # Executed as cmd /S /C echo hello + SHELL ["cmd", "/S"", "/C"] + RUN echo hello + +The following instructions can be affected by the `SHELL` instruction when the +*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. + +The following example is a common pattern found on Windows which can be +streamlined by using the `SHELL` instruction: + + ... + RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + ... + +The command invoked by docker will be: + + cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + +This is inefficient for two reasons. First, there is an un-necessary cmd.exe command +processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* +form requires an extra `powershell -command` prefixing the command. + +To make this more efficient, one of two mechanisms can be employed. One is to +use the JSON form of the RUN command such as: + + ... + RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] + ... + +While the JSON form is unambiguous and does not use the un-necessary cmd.exe, +it does require more verbosity through double-quoting and escaping. The alternate +mechanism is to use the `SHELL` instruction and the *shell* form, +making a more natural syntax for Windows users, especially when combined with +the `escape` parser directive: + + # escape=` + + FROM microsoft/nanoserver + SHELL ["powershell","-command"] + RUN New-Item -ItemType Directory C:\Example + ADD Execute-MyCmdlet.ps1 c:\example\ + RUN c:\example\Execute-MyCmdlet -sample 'hello world' + +Resulting in: + + PS E:\docker\build\shell> docker build -t shell . + Sending build context to Docker daemon 4.096 kB + Step 1/5 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/5 : SHELL powershell -command + ---> Running in 6fcdb6855ae2 + ---> 6331462d4300 + Removing intermediate container 6fcdb6855ae2 + Step 3/5 : RUN New-Item -ItemType Directory C:\Example + ---> Running in d0eef8386e97 + + + Directory: C:\ + + + Mode LastWriteTime Length Name + ---- ------------- ------ ---- + d----- 10/28/2016 11:26 AM Example + + + ---> 3f2fbf1395d9 + Removing intermediate container d0eef8386e97 + Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ + ---> a955b2621c31 + Removing intermediate container b825593d39fc + Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' + ---> Running in be6d8e63fe75 + hello world + ---> 8e559e9bf424 + Removing intermediate container be6d8e63fe75 + Successfully built 8e559e9bf424 + PS E:\docker\build\shell> + +The `SHELL` instruction could also be used to modify the way in which +a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed +environment variable expansion semantics could be modified. + +The `SHELL` instruction can also be used on Linux should an alternate shell be +required such as `zsh`, `csh`, `tcsh` and others. + +The `SHELL` feature was added in Docker 1.12. + +## Dockerfile examples + +Below you can see some examples of Dockerfile syntax. If you're interested in +something more realistic, take a look at the list of [Dockerization examples](https://docs.docker.com/engine/examples/). + +``` +# Nginx +# +# VERSION 0.0.1 + +FROM ubuntu +LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" +RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server +``` + +``` +# Firefox over VNC +# +# VERSION 0.3 + +FROM ubuntu + +# Install vnc, xvfb in order to create a 'fake' display and firefox +RUN apt-get update && apt-get install -y x11vnc xvfb firefox +RUN mkdir ~/.vnc +# Setup a password +RUN x11vnc -storepasswd 1234 ~/.vnc/passwd +# Autostart firefox (might not be the best way, but it does the trick) +RUN bash -c 'echo "firefox" >> /.bashrc' + +EXPOSE 5900 +CMD ["x11vnc", "-forever", "-usepw", "-create"] +``` + +``` +# Multiple images example +# +# VERSION 0.1 + +FROM ubuntu +RUN echo foo > bar +# Will output something like ===> 907ad6c2736f + +FROM ubuntu +RUN echo moo > oink +# Will output something like ===> 695d7793cbe4 + +# You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with +# /oink. +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/attach.md b/vendor/github.com/docker/docker/docs/reference/commandline/attach.md new file mode 100644 index 0000000..307068a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/attach.md @@ -0,0 +1,131 @@ +--- +title: "attach" +description: "The attach command description and usage" +keywords: "attach, running, container" +--- + + + +# attach + +```markdown +Usage: docker attach [OPTIONS] CONTAINER + +Attach to a running container + +Options: + --detach-keys string Override the key sequence for detaching a container + --help Print usage + --no-stdin Do not attach STDIN + --sig-proxy Proxy all received signals to the process (default true) +``` + +Use `docker attach` to attach to a running container using the container's ID +or name, either to view its ongoing output or to control it interactively. +You can attach to the same contained process multiple times simultaneously, +screen sharing style, or quickly view the progress of your detached process. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to +the container. You can detach from a container and leave it running using the + `CTRL-p CTRL-q` key sequence. + +> **Note:** +> A process running as PID 1 inside a container is treated specially by +> Linux: it ignores any signal with the default action. So, the process +> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do +> so. + +It is forbidden to redirect the standard input of a `docker attach` command +while attaching to a tty-enabled container (i.e.: launched with `-t`). + +While a client is connected to container's stdio using `docker attach`, Docker +uses a ~1MB memory buffer to maximize the throughput of the application. If +this buffer is filled, the speed of the API connection will start to have an +effect on the process output writing speed. This is similar to other +applications like SSH. Because of this, it is not recommended to run +performance critical applications that generate a lot of output in the +foreground over a slow client connection. Instead, users should use the +`docker logs` command to get access to the logs. + + +## Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see [**Configuration file** section](cli.md#configuration-files). + +#### Examples + + $ docker run -d --name topdemo ubuntu /usr/bin/top -b + $ docker attach topdemo + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + + + top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355780k used, 17792k free, 27880k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + ^C$ + $ echo $? + 0 + $ docker ps -a | grep topdemo + 7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo + +And in this second example, you can see the exit code returned by the `bash` +process is returned by the `docker attach` command to its caller too: + + $ docker run --name test -d -it debian + 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab + $ docker attach test + root@f38c87f2a42d:/# exit 13 + exit + $ echo $? + 13 + $ docker ps -a | grep test + 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/build.md b/vendor/github.com/docker/docker/docs/reference/commandline/build.md new file mode 100644 index 0000000..42c3ecf --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/build.md @@ -0,0 +1,451 @@ +--- +title: "build" +description: "The build command description and usage" +keywords: "build, docker, image" +--- + + + +# build + +```markdown +Usage: docker build [OPTIONS] PATH | URL | - + +Build an image from a Dockerfile + +Options: + --build-arg value Set build-time variables (default []) + --cache-from value Images to consider as cache sources (default []) + --cgroup-parent string Optional parent cgroup for the container + --compress Compress the build context using gzip + --cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --disable-content-trust Skip image verification (default true) + -f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile') + --force-rm Always remove intermediate containers + --help Print usage + --isolation string Container isolation technology + --label value Set metadata for an image (default []) + -m, --memory string Memory limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --network string Set the networking mode for the RUN instructions during build + 'bridge': use default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-cache Do not use cache when building the image + --pull Always attempt to pull a newer version of the image + -q, --quiet Suppress the build output and print image ID on success + --rm Remove intermediate containers after a successful build (default true) + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --squash Squash newly built layers into a single new layer (**Experimental Only**) + -t, --tag value Name and optionally a tag in the 'name:tag' format (default []) + --ulimit value Ulimit options (default []) +``` + +Builds Docker images from a Dockerfile and a "context". A build's context is +the files located in the specified `PATH` or `URL`. The build process can refer +to any of the files in the context. For example, your build can use an +[*ADD*](../builder.md#add) instruction to reference a file in the +context. + +The `URL` parameter can refer to three kinds of resources: Git repositories, +pre-packaged tarball contexts and plain text files. + +### Git repositories + +When the `URL` parameter points to the location of a Git repository, the +repository acts as the build context. The system recursively clones the +repository and its submodules using a `git clone --depth 1 --recursive` +command. This command runs in a temporary directory on your local host. After +the command succeeds, the directory is sent to the Docker daemon as the +context. Local clones give you the ability to access private repositories using +local user credentials, VPN's, and so forth. + +Git URLs accept context configuration in their fragment section, separated by a +colon `:`. The first part represents the reference that Git will check out, +this can be either a branch, a tag, or a commit SHA. The second part represents +a subdirectory inside the repository that will be used as a build context. + +For example, run this command to use a directory called `docker` in the branch +`container`: + +```bash +$ docker build https://github.com/docker/rootfs.git#container:docker +``` + +The following table represents all the valid suffixes with their build +contexts: + +Build Syntax Suffix | Commit Used | Build Context Used +--------------------------------|-----------------------|------------------- +`myrepo.git` | `refs/heads/master` | `/` +`myrepo.git#mytag` | `refs/tags/mytag` | `/` +`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` +`myrepo.git#abcdef` | `sha1 = abcdef` | `/` +`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` +`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` +`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder` + + +### Tarball contexts + +If you pass an URL to a remote tarball, the URL itself is sent to the daemon: + +Instead of specifying a context, you can pass a single Dockerfile in the `URL` +or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`: + +```bash +$ docker build http://server/context.tar.gz +``` + +The download operation will be performed on the host the Docker daemon is +running on, which is not necessarily the same host from which the build command +is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the +build context. Tarball contexts must be tar archives conforming to the standard +`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', +'gzip' or 'identity' (no compression) formats. + +### Text files + +Instead of specifying a context, you can pass a single `Dockerfile` in the +`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: + +```bash +$ docker build - < Dockerfile +``` + +With Powershell on Windows, you can run: + +```powershell +Get-Content Dockerfile | docker build - +``` + +If you use `STDIN` or specify a `URL` pointing to a plain text file, the system +places the contents into a file called `Dockerfile`, and any `-f`, `--file` +option is ignored. In this scenario, there is no context. + +By default the `docker build` command will look for a `Dockerfile` at the root +of the build context. The `-f`, `--file`, option lets you specify the path to +an alternative file to use instead. This is useful in cases where the same set +of files are used for multiple builds. The path must be to a file within the +build context. If a relative path is specified then it is interpreted as +relative to the root of the context. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. For information on +creating one, see the [.dockerignore file](../builder.md#dockerignore-file). + +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `CTRL-c` or if the Docker +client is killed for any reason. If the build initiated a pull which is still +running at the time the build is cancelled, the pull is cancelled as well. + +## Return code + +On a successful build, a return code of success `0` will be returned. When the +build fails, a non-zero failure code will be returned. + +There should be informational output of the reason for failure output to +`STDERR`: + +```bash +$ docker build -t fail . + +Sending build context to Docker daemon 2.048 kB +Sending build context to Docker daemon +Step 1/3 : FROM busybox + ---> 4986bf8c1536 +Step 2/3 : RUN exit 13 + ---> Running in e26670ec7a0a +INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 +$ echo $? +1 +``` + +See also: + +[*Dockerfile Reference*](../builder.md). + +## Examples + +### Build with PATH + +```bash +$ docker build . + +Uploading context 10240 bytes +Step 1/3 : FROM busybox +Pulling repository busybox + ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ +Step 2/3 : RUN ls -lh / + ---> Running in 9c9e81692ae9 +total 24 +drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin +drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev +drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib +lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib +dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc +lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin +dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys +drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr + ---> b35f4035db3f +Step 3/3 : CMD echo Hello world + ---> Running in 02071fceb21b + ---> f52f38b7823e +Successfully built f52f38b7823e +Removing intermediate container 9c9e81692ae9 +Removing intermediate container 02071fceb21b +``` + +This example specifies that the `PATH` is `.`, and so all the files in the +local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies +where to find the files for the "context" of the build on the Docker daemon. +Remember that the daemon could be running on a remote machine and that no +parsing of the Dockerfile happens at the client side (where you're running +`docker build`). That means that *all* the files at `PATH` get sent, not just +the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. + +The transfer of context from the local machine to the Docker daemon is what the +`docker` client means when you see the "Sending build context" message. + +If you wish to keep the intermediate containers after the build is complete, +you must use `--rm=false`. This does not affect the build cache. + +### Build with URL + +```bash +$ docker build github.com/creack/docker-firefox +``` + +This will clone the GitHub repository and use the cloned repository as context. +The Dockerfile at the root of the repository is used as Dockerfile. You can +specify an arbitrary Git repository by using the `git://` or `git@` scheme. + +```bash +$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz + +Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B +Step 1/3 : FROM busybox + ---> 8c2e06607696 +Step 2/3 : ADD ctx/container.cfg / + ---> e7829950cee3 +Removing intermediate container b35224abf821 +Step 3/3 : CMD /bin/ls + ---> Running in fbc63d321d73 + ---> 3286931702ad +Removing intermediate container fbc63d321d73 +Successfully built 377c409b35e4 +``` + +This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which +downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` +parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used +to build the image. Any `ADD` commands in that `Dockerfile` that refer to local +paths must be relative to the root of the contents inside `ctx.tar.gz`. In the +example above, the tarball contains a directory `ctx/`, so the `ADD +ctx/container.cfg /` operation works as expected. + +### Build with - + +```bash +$ docker build - < Dockerfile +``` + +This will read a Dockerfile from `STDIN` without context. Due to the lack of a +context, no contents of any local directory will be sent to the Docker daemon. +Since there is no context, a Dockerfile `ADD` only works if it refers to a +remote URL. + +```bash +$ docker build - < context.tar.gz +``` + +This will build an image for a compressed context read from `STDIN`. Supported +formats are: bzip2, gzip and xz. + +### Usage of .dockerignore + +```bash +$ docker build . + +Uploading context 18.829 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +$ echo ".git" > .dockerignore +$ docker build . +Uploading context 6.76 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +``` + +This example shows the use of the `.dockerignore` file to exclude the `.git` +directory from the context. Its effect can be seen in the changed size of the +uploaded context. The builder reference contains detailed information on +[creating a .dockerignore file](../builder.md#dockerignore-file) + +### Tag image (-t) + +```bash +$ docker build -t vieux/apache:2.0 . +``` + +This will build like the previous example, but it will then tag the resulting +image. The repository name will be `vieux/apache` and the tag will be `2.0`. +[Read more about valid tags](tag.md). + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + +```bash +$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . +``` +### Specify Dockerfile (-f) + +```bash +$ docker build -f Dockerfile.debug . +``` + +This will use a file called `Dockerfile.debug` for the build instructions +instead of `Dockerfile`. + +```bash +$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . +$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . +``` + +The above commands will build the current build context (as specified by the +`.`) twice, once using a debug version of a `Dockerfile` and once using a +production version. + +```bash +$ cd /home/me/myapp/some/dir/really/deep +$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp +$ docker build -f ../../../../dockerfiles/debug /home/me/myapp +``` + +These two `docker build` commands do the exact same thing. They both use the +contents of the `debug` file instead of looking for a `Dockerfile` and will use +`/home/me/myapp` as the root of the build context. Note that `debug` is in the +directory structure of the build context, regardless of how you refer to it on +the command line. + +> **Note:** +> `docker build` will return a `no such file or directory` error if the +> file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is +> elsewhere on the Host system. The context is limited to the current +> directory (and its children) for security reasons, and to ensure +> repeatable builds on remote Docker hosts. This is also the reason why +> `ADD ../file` will not work. + +### Optional parent cgroup (--cgroup-parent) + +When `docker build` is run with the `--cgroup-parent` option the containers +used in the build will be run with the [corresponding `docker run` +flag](../run.md#specifying-custom-cgroups). + +### Set ulimits in container (--ulimit) + +Using the `--ulimit` option with `docker build` will cause each build step's +container to be started using those [`--ulimit` +flag values](./run.md#set-ulimits-in-container-ulimit). + +### Set build-time variables (--build-arg) + +You can use `ENV` instructions in a Dockerfile to define variable +values. These values persist in the built image. However, often +persistence is not what you want. Users want to specify variables differently +depending on which host they build an image on. + +A good example is `http_proxy` or source versions for pulling intermediate +files. The `ARG` instruction lets Dockerfile authors define values that users +can set at build-time using the `--build-arg` flag: + +```bash +$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . +``` + +This flag allows you to pass the build-time variables that are +accessed like regular environment variables in the `RUN` instruction of the +Dockerfile. Also, these values don't persist in the intermediate or final images +like `ENV` values do. + +Using this flag will not alter the output you see when the `ARG` lines from the +Dockerfile are echoed during the build process. + +For detailed information on using `ARG` and `ENV` instructions, see the +[Dockerfile reference](../builder.md). + +### Optional security options (--security-opt) + +This flag is only supported on a daemon running on Windows, and only supports +the `credentialspec` option. The `credentialspec` must be in the format +`file://spec.txt` or `registry://keyname`. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + + +### Squash an image's layers (--squash) **Experimental Only** + +Once the image is built, squash the new layers into a new image with a single +new layer. Squashing does not destroy any existing image, rather it creates a new +image with the content of the squshed layers. This effectively makes it look +like all `Dockerfile` commands were created with a single layer. The build +cache is preserved with this method. + +**Note**: using this option means the new image will not be able to take +advantage of layer sharing with other images and may use significantly more +space. + +**Note**: using this option you may see significantly more space used due to +storing two copies of the image, one for the build cache with all the cache +layers in tact, and one for the squashed version. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/cli.md b/vendor/github.com/docker/docker/docs/reference/commandline/cli.md new file mode 100644 index 0000000..e56fb9f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/cli.md @@ -0,0 +1,249 @@ +--- +title: "Use the Docker command line" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +--- + + + +# Use the Docker command line + +To list available commands, either run `docker` with no parameters +or execute `docker help`: + +```bash +$ docker +Usage: docker [OPTIONS] COMMAND [ARG...] + docker [ --help | -v | --version ] + +A self-sufficient runtime for containers. + +Options: + --config string Location of client config files (default "/root/.docker") + -D, --debug Enable debug mode + --help Print usage + -H, --host value Daemon socket(s) to connect to (default []) + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") + --tlskey string Path to TLS key file (default "/root/.docker/key.pem") + --tlsverify Use TLS and verify the remote + -v, --version Print version information and quit + +Commands: + attach Attach to a running container + # […] +``` + +Depending on your Docker system configuration, you may be required to preface +each `docker` command with `sudo`. To avoid having to use `sudo` with the +`docker` command, your system administrator can create a Unix group called +`docker` and add users to it. + +For more information about installing Docker or `sudo` configuration, refer to +the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system. + +## Environment variables + +For easy reference, the following list of environment variables are supported +by the `docker` command line: + +* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) +* `DOCKER_CONFIG` The location of your client configuration files. +* `DOCKER_CERT_PATH` The location of your authentication keys. +* `DOCKER_DRIVER` The graph driver to use. +* `DOCKER_HOST` Daemon socket to connect to. +* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is + unsuitable for Docker. +* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. +* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. +* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. + Equates to `--disable-content-trust=false` for build, create, pull, push, run. +* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults + to the same URL as the registry. +* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and + `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are + printed. This may become the default in a future release, at which point this environment-variable is removed. +* `DOCKER_TMPDIR` Location for temporary Docker files. + +Because Docker is developed using Go, you can also use any environment +variables used by the Go runtime. In particular, you may find these useful: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `NO_PROXY` + +These Go environment variables are case-insensitive. See the +[Go specification](http://golang.org/pkg/net/http/) for details on these +variables. + +## Configuration files + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. However, you can +specify a different location via the `DOCKER_CONFIG` environment variable +or the `--config` command line option. If both are specified, then the +`--config` option overrides the `DOCKER_CONFIG` environment variable. +For example: + + docker --config ~/testconfigs/ ps + +Instructs Docker to use the configuration files in your `~/testconfigs/` +directory when running the `ps` command. + +Docker manages most of the files in the configuration directory +and you should not modify them. However, you *can modify* the +`config.json` file to control certain aspects of how the `docker` +command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +The property `HttpHeaders` specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does +not allow these headers to change any headers it sets for itself. + +The property `psFormat` specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the +[**Formatting** section in the `docker ps` documentation](ps.md) + +The property `imagesFormat` specifies the default format for `docker images` output. +When the `--format` flag is not provided with the `docker images` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker images` documentation](images.md) + +The property `serviceInspectFormat` specifies the default format for `docker +service inspect` output. When the `--format` flag is not provided with the +`docker service inspect` command, Docker's client uses this property. If this +property is not set, the client falls back to the default json format. For a +list of supported formatting directives, see the +[**Formatting** section in the `docker service inspect` documentation](service_inspect.md) + +The property `statsFormat` specifies the default format for `docker +stats` output. When the `--format` flag is not provided with the +`docker stats` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker stats` documentation](stats.md) + +Once attached to a container, users detach from it and leave it running using +the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable +using the `detachKeys` property. Specify a `` value for the +property. The format of the `` is a comma-separated list of either +a letter [a-Z], or the `ctrl-` combined with any of the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +Your customization applies to all containers started in with your Docker client. +Users can override your custom or the default key sequence on a per-container +basis. To do this, the user specifies the `--detach-keys` flag with the `docker +attach`, `docker exec`, `docker run` or `docker start` command. + +Following is a sample `config.json` file: + + {% raw %} + { + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + "serviceInspectFormat": "pretty", + "detachKeys": "ctrl-e,e" + } + {% endraw %} + +### Notary + +If using your own notary server and a self-signed certificate or an internal +Certificate Authority, you need to place the certificate at +`tls//ca.crt` in your docker config directory. + +Alternatively you can trust the certificate globally by adding it to your system's +list of root Certificate Authorities. + +## Help + +To list the help on any command just execute the command, followed by the +`--help` option. + + $ docker run --help + + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + + Run a command in a new container + + Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + ... + +## Option types + +Single character command line options can be combined, so rather than +typing `docker run -i -t --name test busybox sh`, +you can write `docker run -it --name test busybox sh`. + +### Boolean + +Boolean options take the form `-d=false`. The value you see in the help text is +the default value which is set if you do **not** specify that flag. If you +specify a Boolean flag without a value, this will set the flag to `true`, +irrespective of the default value. + +For example, running `docker run -d` will set the value to `true`, so your +container **will** run in "detached" mode, in the background. + +Options which default to `true` (e.g., `docker build --rm=true`) can only be +set to the non-default value by explicitly setting them to `false`: + + $ docker build --rm=false . + +### Multi + +You can specify options like `-a=[]` multiple times in a single command line, +for example in these commands: + + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + $ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls + +Sometimes, multiple options can call for a more complex value string as for +`-v`: + + $ docker run -v /host:/container example/mysql + +> **Note:** +> Do not use the `-t` and `-a stderr` options together due to +> limitations in the `pty` implementation. All `stderr` in `pty` mode +> simply goes to `stdout`. + +### Strings and Integers + +Options like `--name=""` expect a string, and they +can only be specified once. Options like `-c=0` +expect an integer, and they can only be specified once. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/commit.md b/vendor/github.com/docker/docker/docs/reference/commandline/commit.md new file mode 100644 index 0000000..8f971a5 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/commit.md @@ -0,0 +1,93 @@ +--- +title: "commit" +description: "The commit command description and usage" +keywords: "commit, file, changes" +--- + + + +# commit + +```markdown +Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] + +Create a new image from a container's changes + +Options: + -a, --author string Author (e.g., "John Hannibal Smith ") + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Commit message + -p, --pause Pause container during commit (default true) +``` + +It can be useful to commit a container's file changes or settings into a new +image. This allows you debug a container by running an interactive shell, or to +export a working dataset to another server. Generally, it is better to use +Dockerfiles to manage your images in a documented and maintainable way. +[Read more about valid image names and tags](tag.md). + +The commit operation will not include any data contained in +volumes mounted inside the container. + +By default, the container being committed and its processes will be paused +while the image is committed. This reduces the likelihood of encountering data +corruption during the process of creating the commit. If this behavior is +undesired, set the `--pause` option to false. + +The `--change` option will apply `Dockerfile` instructions to the image that is +created. Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Commit a container + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + $ docker commit c3f279d17e0a svendowideit/testimage:version3 + f5283438590d + $ docker images + REPOSITORY TAG ID CREATED SIZE + svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB + +## Commit a container with new configurations + + {% raw %} + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] + $ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 + f5283438590d + $ docker inspect -f "{{ .Config.Env }}" f5283438590d + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] + {% endraw %} + +## Commit a container with new `CMD` and `EXPOSE` instructions + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + + $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 + f5283438590d + + $ docker run -d svendowideit/testimage:version4 + 89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + 89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md new file mode 100644 index 0000000..4315640 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md @@ -0,0 +1,47 @@ +--- +title: "container prune" +description: "Remove all stopped containers" +keywords: container, prune, delete, remove +--- + + + +# container prune + +```markdown +Usage: docker container prune [OPTIONS] + +Remove all stopped containers + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Examples + +```bash +$ docker container prune +WARNING! This will remove all stopped containers. +Are you sure you want to continue? [y/N] y +Deleted Containers: +4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063 +f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360 + +Total reclaimed space: 212 B +``` + +## Related information + +* [system df](system_df.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/cp.md b/vendor/github.com/docker/docker/docs/reference/commandline/cp.md new file mode 100644 index 0000000..fcfd35f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/cp.md @@ -0,0 +1,112 @@ +--- +title: "cp" +description: "The cp command description and usage" +keywords: "copy, container, files, folders" +--- + + + +# cp + +```markdown +Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH + +Copy files/folders between a container and the local filesystem + +Use '-' as the source to read a tar archive from stdin +and extract it to a directory destination in a container. +Use '-' as the destination to stream a tar archive of a +container source to stdout. + +Options: + -L, --follow-link Always follow symbol link in SRC_PATH + --help Print usage +``` + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, specify +the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by +the user in the container. However, you can still copy such files by manually +running `tar` in `docker exec`. For example (consider `SRC_PATH` and `DEST_PATH` +are directories): + + $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + +or + + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - + + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/create.md b/vendor/github.com/docker/docker/docs/reference/commandline/create.md new file mode 100644 index 0000000..e6582e4 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/create.md @@ -0,0 +1,211 @@ +--- +title: "create" +description: "The create command description and usage" +keywords: "docker, create, container" +--- + + + +# create + +Creates a new container. + +```markdown +Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int CPU percent (Windows only) + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --device value Add a host device to the container (default []) + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + --init-path string Path to the docker-init binary + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string Container IPv4 address (e.g. 172.30.100.104) + --ip6 string Container IPv6 address (e.g. 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network (default "default") + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3 + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are: no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` + +The `docker create` command creates a writeable container layer over the +specified image and prepares it for running the specified command. The +container ID is then printed to `STDOUT`. This is similar to `docker run -d` +except the container is never started. You can then use the +`docker start ` command to start the container at any point. + +This is useful when you want to set up a container configuration ahead of time +so that it is ready to start when you need it. The initial status of the +new container is `created`. + +Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. + +## Examples + + $ docker create -t -i fedora bash + 6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 + $ docker start -a -i 6d8af538ec5 + bash-4.2# + +As of v1.4.0 container volumes are initialized during the `docker create` phase +(i.e., `docker run` too). For example, this allows you to `create` the `data` +volume container, and then use it from another container: + + $ docker create -v /data --name data ubuntu + 240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 + $ docker run --rm --volumes-from data ubuntu ls -la /data + total 8 + drwxr-xr-x 2 root root 4096 Dec 5 04:10 . + drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. + +Similarly, `create` a host directory bind mounted volume container, which can +then be used from the subsequent container: + + $ docker create -v /home/docker:/docker --name docker ubuntu + 9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 + $ docker run --rm --volumes-from docker ubuntu ls -la /docker + total 20 + drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . + drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. + -rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history + -rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc + -rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig + drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local + -rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile + drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh + drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker + +Set storage driver options per container. + + $ docker create -it --storage-opt size=120G fedora /bin/bash + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the +daemon is running on Windows server, or `hyperv` if running on Windows client. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md b/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md new file mode 100644 index 0000000..53074b2 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md @@ -0,0 +1,101 @@ +--- +title: "deploy" +description: "The deploy command description and usage" +keywords: "stack, deploy" +advisory: "experimental" +--- + + + +# deploy (alias for stack deploy) (experimental) + +```markdown +Usage: docker deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + --compose-file string Path to a Compose file + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Compose file + +The `deploy` command supports compose file version `3.0` and above. + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +``` +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related information + +* [stack config](stack_config.md) +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/diff.md b/vendor/github.com/docker/docker/docs/reference/commandline/diff.md new file mode 100644 index 0000000..be27678 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/diff.md @@ -0,0 +1,48 @@ +--- +title: "diff" +description: "The diff command description and usage" +keywords: "list, changed, files, container" +--- + + + +# diff + +```markdown +Usage: docker diff CONTAINER + +Inspect changes on a container's filesystem + +Options: + --help Print usage +``` + +List the changed files and directories in a container᾿s filesystem. + There are 3 events that are listed in the `diff`: + +1. `A` - Add +2. `D` - Delete +3. `C` - Change + +For example: + + $ docker diff 7bb0e258aefe + + C /dev + A /dev/kmsg + C /etc + A /etc/mtab + A /go + A /go/src + A /go/src/github.com + A /go/src/github.com/docker + A /go/src/github.com/docker/docker + A /go/src/github.com/docker/docker/.git + .... diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif b/vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif new file mode 100644 index 0000000000000000000000000000000000000000..5894ca270e002758b8f332141e00356e42868880 GIT binary patch literal 35785 zcmd3tS6CBW*sj+UAPFD|HS{DP9Sl`K#LzngL_|~wh=>J0Kv6{0(5o7H5isItu^ai_j`|%i=(+kus7_0yaNA47>mV; zii%50N=ZvgDgGCEGFeScO+{N+4I~zJ0sDe?UNh-Oink491>4dt5?8J@)TEc<>-SB4TS))R7}c z0#5#yKqfOd`M>PV%*@Kl3O{@HU~X<+Ufw^&#nBi3%dyJJnwpvuwY8_3nwp!NnU^mo zUHLC*U0uDsy_wgqpX=`*930FU9?l;hpO}~^zIX4!ga2}IX6DJ0Csj|M*3QpAfByX4 zyLXpAd{|yy{`>du|2*{nFBkF;6bpbi(EIO({u>h@vdW|Ct&fR;w$@>qcrXN0&a7xY`c7GB^N@v$7r*be2ujX3j+*UQ5MKnGzH`-P`a-L+DspQgL zGnS{dwQ1K_d+kIKblEGe4Lvr5kv=tL^y+OUu+{sPO3R{P3jf$h&{<^j`b)jGH8?>Uq8E^YhU>-JrYIyO&?{s@*Pn_VvWv z_lWH>Csg%*|HL26QuVrV{m1f~v5S%Y)nhTK7CHdXxe$t5S$_BI-p!k_Hdo!P=s$)1P1V~&Y2{vtmfPxsbkVUvk#Wq3)V zblS@z#%UJ_atYsYPfC=^u`fQ`zwwmz6AA*M20sQ|*mJPFTPjiA=v9PmEZ&PE&>0@3;FcQ8sU0l3ENxQ{Cq1 zaEfgMXRxW=ufe|AwB>E3q;9&P>GJiT0rTrL*vz-B!w1XF7kw0(XM$6gn=kP)Y5&OIDE7?Z1{U%Lq8-P{eJ$w=le*56bfS^m>{D7G4J^t zpKF~q-0t7;hmi^bB z(Fds=Qkkvv%5_?0W%J_abw0*CPgTTnId)vCt+;eRAo-Q!Z&5YKkh}TZoH8W}05v+W zOtGW#lB6~O?oqyi9?b^8dOLr8=r%47pYa~_oEI?$IsL$Tr}-cxP&w1GBzJ?vY}MU$ z$R5%+58k|*;xp1>j>^k164M%*4c5I=9zVDtUoGpsJ=A-|HH!b;h_U6bCvO(kNk%~O zKSJ+BNkAp#ngj|!|5iTG_T>t8W8ibuJjk9_DH`j-L`~RI)&Qhf^rJIR=)dMf)~&d_ zL)7&^MU9yrH%cc!4Pi=#`;)ni0H|{NaYT0euiZ4%*@|{w=PG>C`%RjSQV2o+C?f7Z zijk5WfRBkdkR?C+iKhFf1U{;iY3C^#OyR{e8q7DC<^-9cU|l&C24Mc|=DFQd?hN>K zgcmmY3tf1qYs5N?aZ*-cK!}=*5Po}{A2m{^kf}g4hZ?6~^jy9>=a!faX*D5eD~#)t z9kDQr!dBl_OU(peM4!x1iL+V1VQ5^;#~G_Yl}&MvG}HulZ98i47{G1qky&R~I#*+6 znL&mkS&6325U6-N@mna@R=TU$r18f@;d-{M5RaG=8hQKlhkLeSD|PF=Ww~TdZp8Eu z-BNcdY4}vO-I%Y2S@DTViQ5rHi&jiQc{>n*Tm$rg(I~7HD^OVEh>e+ z%|dv0hK;nn7Z%TZDXDe9Ta(VL)(?kp1{x?xJ}QC|xsvgw_cH@>6NE@RQtsoJ65R`1D9r z!z9=<>Q{YQGS_qWC6^*;o!9ysc2+9RxZ4#^O~0`IMY&yf_Nu(?CERkUq6R)e#4H^W zb>>52_5czlfq>%^PyYQ`cnu~2SQ?)op3Fz}*f#6pBhl|GzU|eShl^vIN;Q=dL`G_%I0; zp3#SOXz(|a7NYoRPWE2u;JZrw!rEMJ(yKj8TryY4f|Ph9t3R2(heLdQR6XD0x2@XjFqUQ1P$@RZSmoeiMSdfK)E#B?4YQY+mAaXq55GJcvRMx zHX%-e#M8m62$Qu@6L8NF_z6Ui{bqtO+i9^rRZWnkn>dXepsBe8{+k7Q(nhbltK3q)wW^O=PidsGa<}juJEZ>PLjLukVzih0RC&hw<~g0FI1&Z)|Rb-^Mc{+%fX)QLmJsaQ>(zHl`DBa_-MGWpV zz)>qybLqyz!x(p#EdYDF?Wu`5NewSfkQ3u|BC9g2`h|q?Tx`g{kpV~E2UrX6}j6qE<=?HzvptZQjSK{nGcQe<;f`OP1QGgx5a`HeWu4b*OLa zL4k|82leWhV&+Rjx?tSLK!=A6Fzp#rzO0fknCsYQwa+0unRsPA9 zD93a(*@^F$xKbt=*QXPjxKqg5(YZBC6y-4;qU;Z21gEBh^Pwme{m5U>r}{F3DxX)= zMMxJ&cvpY&tEBgV{|>Y~-MoL|?m)XO`y#$w>*&cdqN&&&D%TxL`4A`r(uBE$3BZw9na5X5Zh0Q>~Yw;d}>D8(3J&k+H-Vk-RYAA6vacHvXWLbAVZ5O z#-F*h@x&v0r(;)-DLRvXZnXXFbV_>Zj1q+@Q{_A>dGyT}4^<{?N==d`0aNxr@1L(S zPxK=&Lq|Oa#jyb~9IG%BuN3YII2u~TMuH`1{hKr7F0xLzdG}Q^L}Mz1I!NO^=j|Cn zT$(tBXitjKnncBUKui<6SO@)aJfmFf@oQf)#o2vgIeGI`Du^X-U4h<*q7CcexG%1I z9mwmsOp6sc59WbN0UFvFnIu5X@EN%?M>FaBRJaIBD+I4hZ)q7{3_2F9>fQwvzAFg4sW4CzaOvS{f0 zQ5evWcUl-tKI9&WDsrGS4m4$pvEl*1Ot1%_rDIR;w*3Zl$OVt$Nb%};_+c%uEXJ;F zdbbL(i^7^cTM>C0q%q{{DYQ+dDo2EyqtcCFmLNwef)>)g!ceSju0|vEe$6pw4s0?A zwMP5(?%hIh(#6wZT`FAq^=#M1oTHN{U?7!D$n`!-2B5ATvIaW{-26-?ROKJL%>uI> zD}=laeH5LONJB8Z^NGB`4^A;&yoeJo5p_X`H4`Lq5kTK>LIa}0hIjWC4SdHE=}<>_ z%*}RkX@Lbb+P%Ou*RFjPE$AqA$UPZhV^Yb?OyZ(|4HB8Lt`^Bj3{ZtBWD{9*V0bIF zBp)N>LW7}w7;Rc<3Jp%o-F^lu)S`_c@W6Qt$leYe)yBAvovY*)l~Rv%zGCE#$@OgG#87~u85m)Xmh0Oj zIzE{mc`iA!uzg0G=u$j_JuucS`y>X(K7o-Dm?cJ{o>-x}LezzxL+1LYQrSs-czrIc z!im=AXuWX3ymG;~^C3FBVo?qi=9B;yoJ>-s3rMW>sxSPpi{0e~jIv)AI)55v2f|iy z6kJ%j1qSC-rCC)`X;G@HS^3!+6XSS3+ybL$iIMqP`EEZ(od(B@T}b9dc+l2nN5<3j zxJQYx_6-+}ar;b5Wsg}TRN zeBI(caLX^pCbPy_&WFCOyMYtByY@I1n#Dg(6B%GmAtvWm1D&&4W6irXml>Amiji=^ z!2HT2Civ-(Av1u!E4FO5F37c|Gpr@Na!dU(u!mn3Fmf{{yX_O%XP{#v&JBV~2dQ#MqFUD)~t0 zDAp*G+(tQ?7J}+WdX8q~A4_O=8Nlwr?6RxNzc`9s?_}6;D8k@y61y{nkKhD*O5-pm zXHf(8P8Iwmmz|ie;TR`8^ezgPnu+$nt>TutuAC20SABgvbwPjLWRFIcYy|ddJ!5w| zEhwAYoyOf>%|h*nN1X8JvL*KY_nLB62hkSF)m1<6ogzo_R5a?PCiXgMcE*w`WmqkW zImm1O3+H#La}?KWX{#;OlgD(`?wrg|i9(lbU|msy4gIhttLK1u3vr=Cmusrafd$dm zM~yZ`UW?i7MB{Blv>3*l)~X-@yfN}d^mn7cE7!}HEnXlumoDs}GGX3!va{f(F`x2D z@#Y=b)YKdVN$rv&pihsnjYj&2xmp^&8(Z5`Rz6#zv2hoiQuL}I3KgJKGK~)qws*0} ziAE=KkN>HKsm%~-?HRc`pq)+MB!Fn#+hLu%T#D+ zQ#|#)k4BA|{4QhVs}>}!G}m2A?`aymjsN=t4g4Wf}S5ch9$0ceAg=rAk>J5P=k=?%}NDy;#U+ zoDGo=|0rAcI0^>)N3?k(P1GYoAnz50I@0Tgc4ZIDWVa4M9thB(nVK4-RdU&bTJHX_ z*}3n1+`v|&4s@F_YR5(~Rj@4;k>_Zv7wG6yZ9K%M*t_IgM$h9zyV^_F+0zg*^-zfm zYys}nQIAl;O$BDI+S)YvwzjHVjUMN8`J1AVh$D?6%GCBz1M_*0R9wjQ*H&SPP?+BC z7xB_(6KXTBhPEOA%cE;V0(~k%;n9;KY{ur~T^J9;BN8+j2={_`i0l<@eULg4)HUqgCas0)Kx}yavlp;lEOaHMVs>~pF*S@|4x6wUW7KC~K4^S9B~Nh-(MS#Xeb!Hy z)h@$?ZVi{d+#7hQt0kux$#F(>{&Q<=h4N$lhOp`@bg~cVU=MVYJ zS4N$;VeU3ifmHybJ7Z_J0}$?*$4l=*xt9i~fvU>FXiT6L4Xx=F-~9181AfsNb!nOR zGSG89BTx}$|w5O)yi`R4nI9~L-Vk;V!p+zXzSM}d|tN`5Vg$L#_b(PtWllE zug2q*^aK}{7-cz|UT5SUw;g!hmIHV9yg9M%bep1jPqTKnhg$ABMNLxuXHvDXhx)yV zR~P>(kilgEe$ST+H#CM#US8O-OXSAFg&q5fj{VBw{(JD9zy}F@-fN$B)F}kOavDN^qfb5pHr+nR5y&aStYL1A5oBa>q&K&rF>jZb9PJ~ zb2T}`=uK=+8@S^Kobv5Ygh$BkP$uxZ1nhiIue-~~HwbOc#YcbP{MJ!>ldSUWi#+_5 z?`yQRRae=cEL`l|EV4GuAv)}gy}e2n8hSW92?{JFa5N9Ra!N!H`CqZ`PprOQ8Tz8p zbw@7oJ528!SHS?xE^R{keex7g#IfBAb_4@Zcivpx5ZcI>0;r(|N=9 zqt1yc_kKm4`JSw9I@$i6AKLpOANW~)ym}>0!TtS;!?LMopT>p{X)nE|WQ^P=)z-eIrtg!z?{#_4t9b|k#2jE$jXXVV zfmX6uvL~|9Qy++JqG{-76(v)FJN^H~Y+F#njx}amO%^IHauWk0rt;EMPxDn#yaq|5 zhzi3Bn}pQt9s*TIV;Jx@0l3if{?x-6PwdTP5}>SCkal||1QfPpX@Ebxl4t03(w8-m zgPND&c~raMEtT_zpluAq&*{9R zB>^+z%iZLPg@NU}MIm8(W}2r*VwduN%r?gZteC&=8~~?Is_iK^1Qol+iqD8O@W+IJ zq!;l&X8T1;1E3c)e$^y06^i$G?OLs>>3lzGiuz(cj0r<%*7h}MyX$}?aLROA-=_cR z!)soz!4JU>={)g^Cv>0P?K^2e0lBf;OOv=jTDQ76=%Noc6GEf&mm)|jE5BBj({SxB zTgEiBF?gErhTjbP2#iJpKT*u=4L=F*s5PJD_*-Br3}INdQuQVYyD_srG#AnbegKfB z9`^+mC&oXx_rK8AX6x&R z-HQ_Z8($XgvX)PdZ8e#PfC{_eMM=2MUc21=nO*(D!=D*UR>Z^o^^PW($bF~((b3c> z3Hzx1hTSqm1FMz)e(f$V`%GSZQ(e`?1U2K@Jx`^`fMdhXKNhzShl{l=tDXD701!Q+ z=|HKb38!g)P4sv42$A@8@se6DSN?L<@NBsRhCTj{vy#Gu+R}c^K5YS0neVR*Yl~jm zasQYc3X?sW_4gBhcSHnC8=QoJ%*@XZ-TSxE;GTUc?{?#H7^Z0Nm)vPC_G|9TkFnY; z(XbmpwU&m1HgyMD?tuImlh3E@O-M{pZ+dlf89O4pWfAqA| z+G%ce$YUhC^uV^)OIJ@DV0eJAqbOc!Ciqc%$7vU5kI2UaRf5Wst7RX8#9%F&*&JBj zXYlXk(GNkAGJs?D>`UpW_I&e-KIQ2#wQ~l!TP!Ow7br%A6>N^1^`Ui#4pf+zRlLdo z4=W@t?0b8$c^nmfTzR*-^0L>uo~9d++^=Q>V&pIFe7WwpcfH5QZ5#UY?)T<=dA!}K z>XN~h@z0KN7?6Hf%GDN~PJ{b$ZPu0iDe+)M+TslC*-UY(7240GC%QlOl|0kM2A=)* z%9c&-(f8%gGF60XB;E!82R zz0X%EM8ZD_-=7pVCU@BUW@tL@fyjNu9SMB}bc=(T1aN#`|0XCadBdD%2qQFSWm@2Q zJKAqufm(;$rPf4p&7%&w+0cA!ooe3fGYO%u$x#K(83PucTg>YQQv0(q{ME(bpOyU! z9y#=FU$zss^{>Ih`LmJP)k?3OM=WQ(&+QrDDt{jxv6h_27}!6OClpXmT*{I5NVmy4 zIyCBNxs=1oCY%qU-`XdB-&>}4hFn5R+#kvo+Jv_yIJ%7clrH5rxYz1p-T58G+}63Dug>hG+{%1+5KW~w>eiR=N=XcPdG9&c>A?prH~Ny#g&z<)oQD< zEqi4}pG*0FXNAS(mHg&%sSeXO<&~R4EfjZ1m9Uxcd;SH%p*mA(mfJQ%%>+YZh5Mu# z6jkDCeimzp%Z>!7Z~NA%{2Wum!Yhp_1OC$|nGn$7Wm=D5gkwRw7)kBv*Or4$%gJb? z>~$#&10(C9eKeH>;>DYT3>9|WxvycaWmQ?`{zUmqZsq-oaYrf9ulY|xuHLIKJQuTp zLffqT2;f$}G`3YTn^R41$vL#`ccC-0IdYrScMCT$E{WnF8nqPmrPsKegq`-PZ@m`` z;NY_dcatLZ0{6G6magr6`+T>I=|>0~?idj7=}uR@&F7@+4^Vod{E6lvs|0ge|0csW zL^_<`CS{`+V%Urli`l}Kj}x$TAkVnX5Gwns7rvE(%J`?OHv-(X&B_?KckI__^7eL3 ztNBuc3M1xR`*o!1`~SXl(29X2aR6DL&uvVJ(ZK=1PI((S43;N?lLT}EhOd&w9TJ!1 zZnb4>@0}C6g|`RHrOvdL)+ZqkSpn5gT+ay&{J{z>oy01o1Gag%lbaDE8CDA>;JdgK z96iy24hXaoXW?TsDU9GKB<`7vV!V-ja~qKYm&;O)cu=Va4`wmAsR_ZD?{zOg_^Ii2 z5(nBfUNF&Z!}YIh!S+8(#|fLc$r!8mD4}ZxyRgb_!NMR9rWLQR-DUv%DC@YAiG4pw z(K!&~r6OREiW-(EoR&a?AR&0O)mFtME}}R@64E5C5o~4x&V&YGilrb7_3XNHdlXJq zNWs!PCXT^CRPaH#5ataMXlIz+YRgtx$E2bj?BISCkV z38#5}S=q%;47|@2HskcK-bDU>3hZGDMHU*Dt3~cByWtFk!-23Z=Z(^o!ftnt4d$U` zhxE{e=Mo#4u>aXN+v}M^e_p-hMzE5U@MXfnPF?k{cR|Z{daEVtW5r@4KU=>1sUxig zguZ1-F#Puroc$AoKN3JQo!OIx7r|wd*5QAXx>buIyb%>hsS$6FXtbMObx)VWuvJQ# z-20+}^BgB-pkmf9Qpi9t#XmIPz4Bxn`VZ2RMgZrykX0lsH#$4nX`NYn9*=tT@==ur zCaiRhiHT(jt-<}Xgs&*&1yhLJ0Qf>oX20BxNtc=52Bq8@vn|$3Gl|}B?YMB(vz|>` z9&G@*t0xnhFgsAGc@fC39W+RxDxUtfvDmi6GeijR94x`QImxwtc{|V=Sx=%JUKHf3 zYMuD|6-$tHOBT|YzqP5?Ee}=svOCo868K6^f)tK+HeOKU^&6Ziv-AgZM>HskR!ob zu^=-NyMebSnGHW-BDz#DC1=b=Hl!IA?@8OWftX~B0iRGv_YGjjxfx z8Ngtw;}L7XoXbJ2sZ5C(i9jh>vVr-uAGKh?M2;Vfhsb=`italXwTfEtVKN>v2mwSE zY{Btu_D;}?N-(WF6l@0c643b=xj#Z{)j(o08wJcAQvIj$_re;Sn0?2wcI<=606N`; znIcP9eNFH5m4F3M7CrfGp`^1p(S?S1G^7w{un8Uwe535@j68Zy5y!(Ow3LSnup-9~ z!};%!cWmbH$7|5Gy+Dx<+c+r17E7*iT+Jvf1!U??Vmzt4DaQ_J?M)xM7B7{ME**n) zU`7OvsR(I!HjZ zL0Uo@q{l~EwJ2VEM2`vm*_x`&%1&)t&6b+X_9P`*(g>}4k0Kn&d)oumsHbP&ph6^u zCkX*xVLObN7z-2&pc{KpJ09ZLb<_Zxm&^hWoG?|fGPPPV@~@#k%jZW&b#3@CUOrZZ zf%LzTb!R(2V*r)Ntmf!U6|Q&Qm;CVH94;s57c(8N=3c! zq_v8CvmG*%3%@QQ?EYEnpitM#awL$djHFPXBn#yn2Yr)!A$wF@%%I)V+zopRbpj zzam9Wv~`nn<>l<~W*nd2sqBa$6_#;ny$)HcjQmkXAfcm?ZuhUwxOZdpViyroc~Xjd-Yd=P z?K44y=AU3cy~Dm0#c9;0Vjg3g^TIxD>Z8;9bcA~;o9HB z<@XdrP|RbQ-~62cw*p(;Fp9sAx!wxhO}^d$HYxO5^d~4B`h4lOVWWzmuOd>3K0iRS zTYD)wbgq(*>tHz(QvHGz=We6(lFPY~>hI{rde)0sgli;Gi7O38WnIgghAJ3CZe^Et zWp!18nhM5nIdj;|eZM+abvbvnwE|D7B*#^-h}9MmMlw zYwwZcbMOgwj3X<;mI=Syi&qf9j$m|5bJXvZ3W6i_HAf_A^%mg=w)-8*iaezHySx|Z zD1Jr*2g&;t6%q#8B5rdm@HfkO?ZR%xjb&Tc3L8zliEriL!FDR=5ap?&4*G+pkH~Bk zlF2ZtzgX%?9^d2dXFCl(;D zODk4uz^>=4BDx8@)$56+to~9lv9N3I`R{jn$%W0O%V@;ox(5jr2|yccspNNF&q2jW zMBk`wD*N{PR1dDWf$RMx9oe9Zqj7Is)xh+DJN?0yY(NAsU<+N$}1O?>_xTGS27ZY61(M~jc zQci#JLU(7^=9@8_9q;VE>uWb%VkG-#!N)Dh@Af5|>k&7V1|F5OjV}U~cLPfQJ^**h zL~}Owrz=b4cwn+m#TT23=|=Gj`x4(eV-zAYO8PMCD)`mA$BH78w&YBb{Qb$|`2?M{ zmb%|{mRb7GIm+!#V5*%fVN> zC;oBoQ|FCucVKBpqRsLOmpq4`p4{n%Xqr&B`9|rDkhi|tI%L3$w%}2&6cSw6p6zTu z+>ofc;2NE^xS;6J8#uA7_`aO_e)RJD+c$_Z{H~H*r9*!|4E(Fdj$AZWg2r!Xkm+fA zmarO->*)0{ea*o26gzc*O%Nq_>r7DhmuHst!TqI0c&ac}dD0YB0JATKko6r9E z=9SP}$1$FU0y=eos@Q10%u6Q7xPiSI7Gvpg_aqA(0GOo zNTGOdX(O|LxI`-LNx1Xlk4>f5@|O6W{5B#={h&cnF)tj-{JHv~hvV^2pePjmw3Rav zp@fuw`oca#xhx@ciSRKfj56o9qcRlguM{7-|)PG<(w6pYvHm%wD-Wm zt^;qSDzQ`!N~Hr;3xQQFibTr4Y5n{DrexY>e@_||+8A+8Uv60fLlr_?J-3K$^32R6 z0mm#)X$}2+EqvB`Ro2<_S!&oUku9v6+DthW`u~cyEiS?Y4^HQ`W*6A7$qTkB%(VU>mx-FziG(1By+0K)qvN$nX~33CMWa2%}zJeJY9_P4}nh4`=8^Otbb!vT(B= zuE`zc(!;wB#w3EvNu8zkR2Eukw@v-@=J?fcz|2y!Ff04NUTkxUm+F>8;q525oN=c3 zwr9>QIy*kpTn`>T*A@pei<+b~FOo?2buchfO8FI@WZd{5NyVVJbTJgWrV}3sWdpm( zG(^ok9JG(>_+8p{Tp9@aN5vrEu9XPDfZCNE*N$C+*v%AmU17jy+T{p?bg3eYDgPgS ze~O`Zc<{i6ZCe{EEJN>enG$!d#cN^as}j8=GNdlj3u+*M>Z%QyzqVyEg$*#QwX)rm zIZd~{UYCRH%F{5RkalnTb?oC)db~*kmtvo}&C6CRodDa}6%#zW%==$#v*A^Wn`oU^ zDKzDO*r9C0^oLfxwJrsXREXDF9DZuQ_uB5`&)T9Z5)_9xka!cirqLRdtC5cX+jxKH z0m0Y%c?rZJ)__|xo{%W=Zs_$m7H}9L@!q@5wvo5S_~5b)Y$??g6WezCvjzq^&-XeD z(s$_S-dI|bu9N8@IquTwbScT6feO3VS96o{dE-x)Su-MqL=Ehhy*sd?A=G@YG*J5iMaNw}8;L+mc>w!P* z#a?vrbXFzC%J5;WqZ;q%Qkobxs2uN!*19z1ckn}1XW^_p4#urG&17mGGiG~{~ zbVXqId5e#|y&41pEH-d(zWLFs=1({3@fnL!2EJFJtKVd^btEJd_yZTO&$5B}guLG8 z5({6zgh>ik`Ztx1iyiE|MyL8Co6O4Ij|m6@vxLZR~T)~dHe1utjv7j<-Se7-hX}cC-bB|PC}H! ze&8o}JDC<~%7RUd&;Ox9Dl?pz*#)!UB$17}f@9OWlk~erXu1>jVnl8V4P(YWdwjLk zeFhJof{(sg8FI-_^+z-XT*33}*8PLn%L&gDdyPc?eJ&m?{_!Wp@hW?)`tR&JI~;)& z#uYYL!T!9w(8aNF0QRiJ$(ti%2v#D;Vq`4U=;s&tW1ac0)(xJO$)%CPu@CF5zxNz^b4Cjg>HwFXzxlZ4gNX}`)+5~PNhB2J+Cc10wrGQ?lA zP8UWgv_z>z@3<$u{kWd?WZ4#maHjLZI(ItaFmzbRHgaH{A1Be17%b$8hjG8#ktf&r z!bP`VXl!asSD>})8B=XE$&JoythXSO3*qRbYOmDj{7kJ{;MtAYy%bHLn*@Ex9YIxp z8?tPbb|6$dEX!kx*sM@~=HgKm9@~-_PWyGZfoXY*D^hfD&&kD+0db5^>6DVNL^{jU z*DwKd+TG^(PziBg1UFgBw_abP7)Th{0I~E?NQ=H|1Hx$p<2P9pI$#dW(WtsBKxl2U zYJz?UnSpjt7UeP>!|+)|m{KMvyk%a=&NL6VZT4Z>v$;ufLNv#Xvys8l?_r z-qwnaEHOF+6QZdq686m9H{SZ9R6ZnG!xb_lt&@nSC9A1ddW;yRl?}y}nXRA4*b5+u zXj&4nizK9;E?yh222gu0vM>e_C|Y#7en+EB>}zpxHWPD*glaNu{31Bhq*$32y3vT| zZSRK~UtKPlh3krX;uFQ~Y0zEgvsZMO+vVOAw4U1LfYFL93DpUKjGw;2dwNgOU8k@c z8!kz?1E75U7*wBLg+F}Z$>{2D5+Rep4WYdCd1MK7;)Q4|CSZEM@4R+X{3C^8`E*fd zpnGd24dZiA)bQ8?VUVQaT@;XhyX3C?~J$I@mR{bGpOcV3alHW@6pf4!tWLlaV9 zGI`_QxgxSm2v02+_n#ITn%r>4<_OI98SdttO-?CiA|RdHd0P9Uy>c+I=vX9SP`vFh zd2)W#+y=RGHYC-qFWd(aimZxjDND@TEau)i{9_|$sr10BmQW%T+Td2n)JvHfa=fh z-|WyGc*K=8t8HvYzr&mrt1SMk#3J=*yk(bS|h5$1~JRk_Pw zjW;KN34NhJ-hCM*JpgL}n|IO>@+6$5uSQG^m|2kR8m)Z{ldeh48ZfWPkv?uXyRh!C z8IY!PYxg|Ll8WOLiV}k%nAtz~WT~(#d@1!s*zYpcEw^P*l7Wqm19p5erry8j%k(SD z#qH7v2wigU?anzT*^#V#=Z0f>7t<}uMCOTTR7o}WyCzn?v|B7OhZ*b~ALQ~cMP}P~ui^ww>6;t#7BOd?9+Lnq$!0A7 zMe$j$M%?@K(_StGe64fau~Hva3Ocz3Pm2peTn;^HIS#ZDHvAsI2O#^RIR zxj}F9i(HEDn&;X4K!fW1+;eseK_wO4*;X$3T9lv$?GpR_`8L=ypJTF4mXvmNt=~3K zV&h`b z#}+)54+_}9vLOXMPRr|luK4;y!JXwm#3}FN^s3jLALmU3fv(*-2KXS!nLHv0dSsJt zm7`FsRTPU2^2uNc|IC+bC!Z{-faCJ#ONgAY+?ORp5!c+WjTK&RFQiu%^>tgY8u#Cb zx(I74Wqhu%FDd_3QqmZF!G4EQ22?heL42N(cOPH9UAszcPl62>u9yZig&JGS8vB46 z$G95j(i++UMc3IH&!08)0H;li>9XIK;9pG@vdlt}eW^ z?m$nS1_@T>)1;mQ!4Ut(c%&Y@^VXU;FIN@UGBvQ%V{-r85Ml-xDpZD$ z?8WOn&A3~Pny-u>+p1^lecfZ=NgaQjpMP3`f7Y~rd*DjZ@e=6S}ABI@a_P&i%$}!!6Kj>Fof|R_3 zFH9@Hm)VA+1uW$|-5&AOu->>Znrk)z_AXQs*`>f+X(oRQwJ&z?jc|Qp{^~F8W z9wdi4Z+k8=UvwpOLMy(l!ubel2ApqX03r?c7H(%7bJo1QWfoDGRQt#!&?MB(Tzc=^ z&rO@Z*zGg1zj4G!2H^Jgc4hm(FsHpQ8Z`m-noW)toNK322!2x)|22h}-S;1$K%It$ z(q;;2qEj>p`zs|B@rT1*oGdL7e$}^JYf)4V7IZ{mBkX|_f!T`=Z8_3wb*CEKlj^!1 zuz5IZ^Ny1m7;bBuVqei@aId1l=F$pbK4eJm^K(KC`OuIQq{ph4%(IY1Kv~}AkD8V> zNko~*Rm*W7B?ryEFK#koJm|!gjT`rXY@~Zy)?$Tf6f@XUO4$fo)vj;Y7=Og@%%Coj z8}PFo(c-|NhG%q1aHp)h8rRh}1g(>G)e-c2B*O}(_{EwuaKu=VRJ?ECT=D_^ct}n8+W?CPQ*FK1tf$tatMnp1WeaWh z?iS3v@GTY%B59sf;M)`{Z5>{u;F%kQSOX8+LBIi>U2(=|T1I9^M*g@M{cVBx_JCyZ5PLADFQEd2OZa>Le2Xid(I)3|dx^@(?JsH-KQ_ajPj%dpN%M$3K#g|HnMNC z0HmtNLwDn*#13z)`;SVObC9pcp`oMU8)z=Ghj6&vch)3Fne1pIN$)CmeG(O_wMGAA zH1>`n-AT~rLzJO`G{=_Y+Y5T@M&rH}ZV&$91UZeYsPaHq8O>Qwe#$pL`HJM?{@K84CI8bBMo zp_JR1HVi`*?%m6H9hZSeX$?9LA|JGHwOz#Sj=l5|;SI~-N8AL^#rkO$8`<)KUdf%o zcuc3_&s^BNHTu|@@ea%%tH%YGqD=7)@+`V2_aXkzSfu`=3k!Og*;!TIF}k@L2Ym_? z+aJ6Mp81i8s$dXPX7q(aXbK4x<=?Y*94@>%n-e@&nLk&PM0v$!$i!-+37xvS*P0Usoh=b&#OER z1MXbx!%@QUo#}yj2T3$6Y2xAvs>l{A?bpBZwD7ghGu;~-;y=4O0I-4mypBC&ED5g2 zyxQ|OWRvH=4?Um@N-yF+kJ+CN5C7|sl7d~5X{w|NmtUV5X=eG;|Q z(xRd|{CQC#_!%J{beZhJbI_!Xz&a6btw*~n+zJ!mxU_dYp5Xf7lg`}7+djy-Qh|)s zu8Z*iW?>u|2B8Va;N|1-lef$T(BL<0rJzS~-J3UL>8mF-tVt?;N*1N(q;oyrUgo!y z+@O*dU_r0cT(BM;m^wU9-J3ts%o;rYL41eMOeOCMMrv}=q~t?l=~;f+-;68{fZXC4k!{P+EH&K!(kn6Ym$gD|r1Tg=$WG9e|Z zu@p(NZ*61jOBy0OV@;G$WH;87(vU()Ly}5GdsLqJ-S_i6*Yn)htv8-o^d-=g;;jlv`l07B=BsIRsd;~a;C7&G;Q5AFw~fVV5D-yp^}xrG?RJFA(% zz5G_vts{zWEaz^qp_={Jwj4e_l%eEhi9K8Bj3rHWNkHRWaMK@HgQUf7myt{loa1954$ z2t1?cpa8iii3Pp9NHWaN9@rUYA!T8)M6jcT6;{!i(1Q58FRbAWV<~P*(%89 zVkA)3u;GeD+czCnN6|zTs3PgAT23R|^vf9Q2xL$J_7{GMoEKmz+v>1B-!ASsM`2PU zpJoLii67c;aO?5MJD081 zXj7s8q2T0daZuYe0dI|OClut(N4SZ*3;QCIZ8Z1t+T@=BxvAhjOMI=ooeH&1A)$`@ za85@ueKrFC=}xA_kFM9$=E{ndgOK%!8c@UO^!H zA48}O8{w|__2W#CU8g}n?e`#QX#BAUStm~Ye2kYh_FS6dG3U6Jb-eg6bO#3+Wf7nM z{Bz!NCIikHXRC_4$J%_hb~w22X5%wzk`Jlp>uTApS>3}EGd?Zg?%lpjB^mdqPF;BB z>d(cs{?ZI{oUiOXvFF7hXK#>GkCD4F>Rr*|QIPU{b zSi)v4iTwfAO3pZ9M-Gm9ghf4#=VXj1Z~5hr%ya`2kds5FMILYkwUHpVOa#8~SDUyQ zOA5r@78H`jL&{-;yk^l*%r24(DsD>qN|%#d@afFl&f^-fU6;`#Y=S`oG}oLy+R9Jj zU}d0zSPL}-2}1t-6N2XS$6q+0pluLoyuNl=h7l9E?pRI;ba#5V7vMW~x*?`I@2 z4JDUuo_H$lNcJ}#LJ6Gj*tB%YdhF_TA9mGWLK+^i7GkIGt^26XE z&aCIW|D^5=N&RTah_ehbk(9fB;W!c~N;DLsB!69J0H(sw!PctzTQLaepgvE~M7PC8 zx-z6@b0hd*)|`K}lDYAk0McvUi|08fe7Lm;-ZG_Vg&Ry>OJZUm=et*g0r2FGreYP( zUczeu(nfQj&`~l?F5orK9;0T1n2k}5PecHU8$`G4UwEyW*gI{L7+p?*B^X4Wd3Gq@ zNahh?HU;_N>3B&-&r|wXpk=+=sP@+H92giXQ zKk-nt5MVl6>Ojm;!I?6D1(9EkKq?4qWIx_5LgC@8RZkQ$-U_X=drc2TmcDX2zQ>}F z1!#TC-|cf)Vv`6$4fPp@&VEWVF5phWm1|r?{{D!q8Pi9u_1X_bH_r`o9=cn?bMfev z9gQWagcHreILyi^Eei4x7m>R;F&U(YaYLQ$!nmfyQz5^l3}QGEk+YX}v#&r9GRCxk z0bA@Tt$21mhH)z#0jO;Aoc(vEGLR7hJO+S&FB_x#4u6EsNdrsV-#W{QpU<3Qq;gqp zmi@_iPaC*zi7mO=Q9_rymi(NJk#pAC8G+ny*X?@HtO{`7NCbTl&|`E*vnK15f+Dd? zzh4_jZK2YS)c~t8w5#{BE}e`tc$1jZB@9rYkX|Yvk2K*Dz#)nSsjK}l#sR<0e4;NN zK+YZ_MQs25%E+Ru*;K|KH0(j6=q(gu3K`)?*-S5eDFh7dcQ{pwSdw7xz}2Qb3)0wR zVg7W3^f!kDdlPAwj=e_6#?kp!jPp zuRCe3JL1$Yu}6Cgso{Z#I4oPvrlSElX8P_)X9|LX03@__?8;)`$)ApT^z(A|ehv)l z`0(@B+Yc+UPA&Y5O)La9lrGE?BW?vItv`*!uw2B+@p~oWyi$DLdt7{!0$E{vU)?>Q zAa>Xhuyr`(S~wf}j@!rQUW(T!7_dzJlj`c8jy|v!*Y4>m+KE=8o3@Wi<}4|w5yR9E z`B^g|eoLAjJF#OF)+WneTn6_XN_%LBfGlyhM}kwLmHi6P9M;4p3_TZv{%jCptPHuY zCTZbpRc1rhMS%@2uk2MArXSFVjWW9(VHI|Yiv?NWU~D}MXl1$pk(_vkE*-oZlli(1 z;!cGfH_{J2MQF#M57wU<`Q;6B14ayZEqMo8;0BUsk9VYJb+5s_zY?a$&%7i8uUdV_ z?^&j=K~eVU%(oCV2*4B1$&Kq{H&i6TL`DO1yf$HTCM3byubu~q2YE}dBR07rO+Q1u*wFI(n6>*;^@l2!BPKyMLNzULaDO0`pPxdc@)jcEg&* zfcEcPd*Bl*0oRPu(=C(327!|woyR2 z?;G9@*^l$|VY|+BW+aXE|i>SOs z3kIndiqVqy(52UXo>Sn0E;O|(4x&hL#lop7$}nJMOuri0C19No{Z^5nt(Dy3#rS7{ z2umCm18Fc39r2}h3{Ct-PJ%CRgHR~-jF)^+Oay>3=Hrg%g6i>T>lj(@g5u2;pdlc} znT-e`C<6>YmI)ct@pm&k?hit;6qV3-kU6#B?mX<&D&O;4SrkV$z`zb#mv{ewIX(&l zx~gbcB{v!5@ZTg{rIHxPLyxEIhmKqY#IK|22cWh60A>U6($h!uu-N#4^388MAZLIS z_!H)w?QMz`FiVXOm2SMknRN~Ht=t5(3(KtA2cfQk5X zpAj+zgQ|89M9@WI`;qj@)ps?Nle_q|PQE4j%V17dUgxY(qkQqO91zL10X{k@Ys&QJ zZ#la8EnSf3ssktOi-APxe##jDwgt3rtaThcd`5q_0mDfe112Mc%8_PS8B6O`Edg6Gt(Iq% zY>BQ|CT%aDf@$d;rwOrrFYb`6;GtIFz5X4~Q?efqB)h}yY2WYoY_~sVw^L3j3>MbF zVL(u6M{rL^$cv89?T#?%yAh^$qrC6NltvXA*Lru;aNYG2cN$8& zZ@lPk-0o^Fz1<|;bH}@fV%*cQUD8?F)BBHd`uy|2Cdo1**IdiwjD`q#Y= zecJB-@}YavbilxE;792IBWqyGRO-)*0pL&jjxOaZe(^dDv( zW>CmyaGI}Q?A(Av*`SPCkMzRezwMntoY_$Ko*_jarqa0~)jx?MW<%<|Lt1711exK< zjUl~r!(9HuGB<{e7KSgH4x9HP&16QbvxjU#M(oN)$_z$CAC5Tw8Sz4lIIfS7eMZxW zqXOBZp8TVSvzbSFM;-o*!fIm_r;z}DW}wg5-mtINbT=)9FKZ9e(8qzWv5{yo%?4H_e*+_ zg$v`w{G-m=_mOw**Onox7RGBrM)BJBzZ`zhXm&$>cODwv(C=3q9p@jLydln7AD&?EJ1_l+e(q61?jz6M zW@7E5X8y?%ncaqplj9+i55A5S1At7{WXZzhXPFUK7R+(vSQ9zH6WE-MFk>PfXglmI zJl-yQZ14hp$p2W-=kd=QkGB^d@5r!>wOHmYk4Z$Br=0cD`<5})z9pdnMdke0Oeh|i zR(1e1dcexVb^;SksPX~8u|b1=YkNyuxu;pF{7FZ!{8K4#UiG%agG#B>-Fm?_-L-)6 z`+X|sjiqt77fq%_&%=?UurrmFHKmcY|LN%h`1tF`KQlXjcY~n{(;bcgiruU!Rb%yc zI%NWe-b0FE4H@tE?ftv+gn+TX_5dNc=jrJKPqV5VyiPx!OL~0tG=ky`>HqaGeRR$F zBed;xpORILFj4*4)$oW`tN-GGDOX#{bEovoC;A5}45)P><=qYp%{`X2rO#*7fLn$W zCkGHW)iAR~j&;bbQu`NotSN}LsLBKHOW$6fS9%8D55E@gIBRP1ylz)#>&ZgLkJSTt zFCvw98HCk1>@^~=O1gL9J{ecM`Q1SFzP3L9(n1)K*}L$~9`2d}K4bz~de4XcCXO6= z*;n1bS)Z(yS8((cpQ{=~k0X>!I_#J)juQyVEs|@_m$&c>6@&H^_NSYkF$O(?!iJ2!^$EcozOSdYzZ`RjV}aYL<0|)qt%GPsGrQ&ZY9N`LJkalQQ|wK4Cj>z zp3d3fl`nrF&R5 zwK!N7mZgMswILMKt=WNSG1yC{d;JOy2-lxMZI#bQRPR}J?#<>2d zqyIW)Wt4hZxv-LdaDAUkl z1)%837I|ZZmB5Pw9&CcpC_x? zh|}MnliAzM&(bf>U?$JVyi)k*7SV0a#$v;MgbbSHQPsI_zp&P>NDAATua?}z@ZAwa z0>s0H8m^MnwIU5ikD8wO?Cq8l8piLLclDWW3)}R1FY*fpzY58Rod1{OLR1$ zu_<^onUtldTv}dPWo?jBgd&?WQV39HSp~VMY7Xs0e^@*=J3lx)O1?VBCSOl+2AsPa zCGu8u&S4GhGLTYh7Ja_40w-F6JYs555P;tA8d5u66;zJP;O4Wz zJo_w{uM9~sBdZt+=KCRy{{OACm6Qz26-My^6BU?4^;Z-f3&@95rw^vf@d7Wi(I(w( zyQ@SvhzZ@R4fC|tGb=_^w8rvrEw|&S{n-vWD3%qWlKy_m3c(K{TfSmx_5z?{EpH?e zOFhz?z$*z5t7%l^dJcuPl1NmvOWXO{*0VC`v(T056Xd*tpp;J=}QAxzz)fqm~!=LY)ToA4PsQ(vpQMDGCUa?H|{)owzu2} zm62K->z|z5so>{KFtU1Jbeg1;8N`gMMT@Y|0YHW#g-S&`vk)m4Tr$522y!3EYUW_#9_btO9Az%ajVzg?o-q&7Usjx1Tu@cRibjrqB5%$^VNkS zd?FlhQmgW=5|rAE3$`Y2YX>E_l+IJw6X%elRkVi z?hTvwMzTlLNci>hZ6M!It)UNB--|>zre${?MnXw?;4Zq}0m^j6+=(W2LQb5cFxq(< zV73pY{|veFM58SjFRQneox-xjtE*Pi$stXb#!NNoq@ou$dXhabAucR(Pr8EjlC{-P zb@I=}E7kf@XYdjdlvXMrL3!4S^%tVyf#i)VcV!Ud3O|JO@R_$=?+#uhwDq zM4B0o*yiCaES$nM{RI}RA9$CXRpYMl3_B?#7Cv;0p47Xy5$}w|Os5m?(W6UZXRVWg z=w~;6EsGoY0$sY;4S79oNp?Lu%7z>_H|CqS!Rzd&;kNYhqf-n!$;!b*(co{`-NON@vYphk<{DQbk^A8rHTPNkJrLvRw zb|f@4zmPX5R(5JIU+>0rOnx%3%14!Uw#KxWy#6*@`2-7x?R7`?v~EOUgiU$2k%>YJ zD?2%{efu8qW-H3Y+_-f<$sXdBe^UFED_@n5m^$FtFkz0m5x_}>le6}bk%Q>SPAka+I4(6N(W8i*xW=+cH=4oY z*YQB^CH2&eD&P2X4C-Othe`$*=AU)DF`Z?^MXb>kjp-WdqAwGtTqGynC!r?RfdMqv zMLAUX$_42q0uN`^Z*ismXA;{d}GRnmyHwySgbHUeq}9U%!{OY4{qqz4^b4k{)Z)nBo( zBF;RD=+>2B8Ix3AIXw0f0!WepL|WtQQR)<09SAOW3MenhMQW0<*8x>4A>U& zwO7Ji7|mbUYs9bFYY!`c=J1y;HMqKBbkH4;mU`!ec~}ADVSh!T!7f5>FCjIZ2tgj3 z#X65pKKfH7Dn|6UCgBM21to!=@jBdUU(haz-6aaORDp1<4f?IvCEYZS5p`maj_cS$SC$F9+RS}pX2iJ(S`AbAN<`|hhCT* zv~t}TJd@}d5!Unm;u4waz)OO&GSH#>uMMZZqx*lKpT#?R( z1hvwEkbyi2VkjwB?6=cA&lNXsSbtc;ioD_NCi#^yc(R@s} z<1L6f#f4C2kT5qU9^+VS*wbx;dVYf)vGsF=Fqx^hV;Dx0*kBZ%2pPi0VUmPHfI@X^ zApK;^D}L#&t9Gn$ZjlyQGgSbg7?y~(b=s!{4{~cX2wk}?Pv!cF?bCS~&VOfN<%R@X z1J9^xPN&2Fp0Dsa;Anyo%zGFL16+CY<%$EvUrX-y&FhlDTeHWwe+9|{`WUi}Y)PLFHumC@s~Z%ITNeaq3Y1vFQe5;1H#mj-(mTI@uU z_dJvP_G&rbAv#T%V^ZK;S><&;TEd)9Y()y`+GX|$qUnor2c)Hd+Q&o`Tk`1%Pa@(* zJ1B@z;L=Es=0D+l%KNp+tTrePYhEI-v6kRWkG|_YhtT{rD4es=i(xE1 z&jFeSK0;D@TT_oFOroQW3k(>q!JzHMrqe|_Nc*0TxP_l&f@4}p(S^Wuv??mX^O z3EPQX{P6E5dE@V5ir~5D!aJYtX(Sa3{rUe@d63lKP07C-R(6k?7H)v!c_M+CNjS z9ZdBKPh}N~{z*%%daDX-q*m}KO#x{$foZm)33*pxyZ`6fdvVD;#V9?|9=4;y^whLV zvuPJ4(vuFQrxhVG)(}~9>6hnXI5Vt22Enpnfv{bOWL)O8xQND7aM2)KG%$(&E3i@` zbKN$Ro08d3loeZ>89SA!O$@gNvK|+})h7aHJF>>cGL?Z-d^LMW%o9!&$A)k48`7nQ z8F)$}kXD4~>^Kz;8zL#0fvKSUlataCS;02hNZ;&DTj2hj+6$C*{7!j7(qa zbmb|g^Tpetz3c_&Pn^}s6@3Sa6V8Y-a%CJ`=MEh69+NE@hTN61=lzZwFd{OhXn3?ONV{;+K-;FwU-x5X`g|WPDrmGKPqmJzoJ(l z>Rx~sgAWl3ye0GA98e1Mvf#)Zx(l?2hx|O+eUY|D+m0hNd9O-+xf(uyly~at=YYc3 zqTYB$;rt97=5Q3-qDtd+yZ2q@L`>1;`-P2FMSre{Uiw{dS+v+F2*`J^wPQj?4Z0go z5BDD^8T8*Np3^C*yYFLrwWOr0ME|u%s3;_p7|!uF7GL+ck%8|h*6x(dEo?9CD~2#} z#cHjGuDuIMAIpn~6}w^;B*hNs2PvMJ;8O$^)&xjWmr$Fzfw9Ln#o%h$C@OJ44g zhbjzm5o!_A-h~L~8DKV`q8+FNaFvJbDmTI_jeHSzu+V+pD2+N%|Mbe6I0oLGVG+wX z=qIa|=UVYg1kJ8M1)K1+c z!@A$bc)GF%5L{74O}=oi?skj$yKh30;irNgs7fwDp>2C5GUc>etX!!+wQO%r2g^K9O$Yrpy9Z`Yc~%5O0m+QI_C%+s^-;;q`!37LLP8@jzSw(cuArt-i_3XLjq z8l`2x8Qq%EvD_@@m3KyctKb#SC~G>OMQd$cn7F-(Oly*Gj+R!3i2~sXtPDtGF@=gx zddvr-Vy#W?I|6#kDYy5ZX1F0=WH#~nUiBT7nnCe#szIXP9+BTPhBkJbRNsne4kODL zf)q<3PkEFvMZlbLFc(l-$H?v{<*c_E=3o@>VMU{AFhkM2h9FuXGxfJ4egvz=wGYX| zhrLEob)ZVC+2?u6X+Px4{1k^y? zc9L5=Zn!OvVR4!dKt%2fR_yo;GS;ZPLkjI4+k)rL;!m2!MH%rO59d|Bhjb;0mu;Av zlg+oNHK~{!&ky|l-_7emB{eafR4r=qhA>H6M45yb(cU@Lm@jmo3Q58lNwXsq^E;hY z&P93R2mQJpN9bphPzt`tyIFUqG2T9d`>%U@VM&;R6~R#OZZalk>=d@ZhA5R=dYg|= zy-R9Q+h*JN=ywyDPhV8N`656r^E*pI&b3HSjxyl|E(;_3&qnFxl)Rju^k*xWb6ndz zlOD(=u~_2xZ4|1wYQ9w1i7D5)-D@eV#pi_Vn?UmX(cw8KG_(&nnV58)Hqg*=tSDT( zM0?lcX{NJ??{DvbJd)(qTw%l?R83s*K56U2nWCdi`AZyOA5*P}BIdfO*cs3+wKi<~ z-G-89G*TrfCnTRr#x6A~9FeZmAIy||K4d(&Bg6YpH?~v0_mryGLtKj0sAvdE&rR8} zR}N)K6r7edT@YE;9!sO(4u%Mm|Dd#q=+k&r=OqojFEGnjM*oYEM|JQQ%P`KI|A+jp z8^MEuK1j1LLl)i0*#k+HSBV-NOKT%UyWw4W_i5)=MXKFwWY)tTgSUNwB%kpt`gj(T zbEZ5HajH}HCA7LpxK=9_2`SQ?#v$CUkaNnX6NMTd;}sO^8oc}!}|JHVW%*? z6uMr~4b-0}*eCp45L?oj4ZgwK7VZ-?LEO@OIOcXmV3j;&Gzi1w`k7)D-(Hkqg7%dI zDp_Gu*KUkvQKfXxYp?r0$-*EChsJh4ao7|2gqwv8G1ECcIIYo#a%BJ(hlF30$%{c; zvUITuE3C6+n1(N62HPd_TKT2lm=o4Xk=-jwlg<;?yUHA^wlbMHJQnjr;7B81jCzd&5s+Ztufthp;YM#* ziSStyi6}0bFlgR#kRZGRiym*EEVUA`vg#RzP7F?W<{s*2Srjg-ZZSy8gCrej?$-wV zqP@ZT(0rkKZt^bv>(1>5gw0>;c6%Hi6|yG1|2eey1m;*|-?=y3zw{#Z#@>amCkBu3 z*-?O#0mAtsNOLhnYw5_@8M{=fs8_45^~Nb?uBj-XV){g*;e>!4tLUn&c*u%v?#8v4 zryXJs?GuwLxLFIj;SQXRgI1%(H|K@V?R_=ti!|FPf5OM*5nv;+e^AZZzAQmVo6J@O zZLF<{xwZBH=~a2~av{rlTk;7eBnT6VybI6tczEB^Q|w(48pzw9$IW+Vc%OKo)9LV< zCmuJm=&!dDY)QnOvcPxg&8&=%ECY&v> zzG*9BL3AiMwAh)wMgZQ~jlmq$wP^~rGfhi3iIS|)RwKWg;eDUg3f2fj)%_D|_=NiR z+tUOUbi7^S&&jn8tq;oN4;N!TY)j7HzeA%l)h2v7^T`KF|3b*Rnl&8T@8g;mmXY3Yryg#Ma+nl@cK0YqsjITP!52 zP1$o?PnhpE3zp8odo48C?^yXM#y zHf?LUW2=4a(%TNxTa{4jWz{=!TX!#qxUC|3&P3%uM(lcfifOQ&j*HkGxXl+EVf27c zly$n9J5)pbcdzEL`GLO!9pIvC*j=NDB z>iQ!4KQ&~@%WvNozWczbA!B~r{8pF$eTUOS*8FvQ^Y1?hB@=X+LZosFtNw{Su7j3R4(nTairqR=l3arvA1e_FCYp(^c1$RhuisI9mww7+%p!5=OC|+5#AH| zx(B&#{Kl?MGPKVdn3~)<`MAtgS@mYOzWlRezCE6Q71KV-VC5GkHv>A_X8M!TzJLr) z4ViiJp1hmAx2do1ap@gHpX>g<56_Sm+zH=>!a{705(ZNNM;;=>n_CYH%DZ>wgl&p9iqEXi}r1QO4S1|1wsmXXgJ z3Mb42CdC*;1WMZw)y9Uj^$k0+Hmq9zH!+fH(V%}PYYy2r34mVywDx3oz{(Kb) z*eeGdcAg7^xY5@CrL;Zyrr?mH@zJY3G4GD&OO#Qpk`Er|0b=)MiY8y;RNxFEl|~3} z%c7p0d|MtjMxY_%f4qBJsg%tG5u>Lu@PQ9qg-V_d93~CcRa+XV&G!ucj~$`$U42>1 zd&0GS4*?L6Q1pEw?5q_I?_f!H-X{r~ZP16~j7PSDn@E^&SlGF@v%y6V*}@d46g#!v zIUF7A%!HJEjogP&Y|J@)prxk@5MV-fY=BG{7!$7BcH3T5=%|%~2nyv6u*~ z6SH(e*aR$rCf5oj4IhyKv2k=7+B~R_ zgN@J8X*)dFg@ZDXbS-^2)4S9P0qCTi{dL^Jp7pVn`hJoJW z;JOh;+eX)7Onwc9FsZ)*>s^SbRg_#Z!xFi%_~(S1v=ICVz>`IGqNd~};vOK6_RI){ zl4*{TzTCKE(vng;$7)JRE`+L3fz%-fUJ}>J?HO)U`|Co@0P^P+i1MT)@3;&C0AH;Q z#*)L$bg~nijcI&8tf-iVGqmhxyzqYp_B96Unf)tW%-V9KK`RCFc@rmADcOJspvuBQr|M2M~bM6z|#u zrS$?pbr%DH?vWXYJ0MFTnQEp?toF(YgEo_i#ko#g+CPj&Kb0iAMotlFHA2wHtH%G6FAvm=Yp#oJrik0S)t}|2hUX5s4P6oi_ za4r=lzE1DK)BzKYV4+AJMZZVDT2sUvTK02QNEG1$f)F|#s-)_K(-YF5YX2Gv<{;nO-4Sic?+zN-B@dt{@$a>jMG( z?~=J{pYryGEE(BkO!Tm<+?iBpEbG=T)*MI){=+H6} zg`-;N`y(I~prqjC$nm`eWaVu2)WnbLwW^1c95Q~C8kruiKMH`3B<9^1vun{GjjyMf zfV@07fIpZu_Cytd#aA>UkwF%!rzajCbqnhd23zKxBoB&m z9c~G>jl%6ZMY~tpxD%=_XtG2Xz}>NF-kn~+I^x&|^tsq`c-~VkQO%MM3LB3*d=K$d z{v`=SehRf2odANTjYOCCd{rsoIQx#=m^QPw7Lf9=5K|oD(ZIbI4k5mG3p|X;@A!r< zVV|_rs+tkd9l`n$5ejuBhADB;*Let;5b4>&Tw-MX7_XnQ3=tU7p~ZzY(9AmSPP$Ov zki-E1vqs?(Z^?)4PXz(M^tgP{a_g18b-FAt-mV7lhi!n8PMOc2js2*7_-=hS5_o=8 zKuh&k3`$-Fhja}~gtGHtc>FOp_pk4?2Tq*0IOQplK@zUeOUIIwEB|qKGKC50=#?~# zEE$X6DZxPeoocT^IiqnKm&`k$wD3^Bh(NCqAW=4rOjf0Gs{n~+(>}05k#?{q`H+PD z1Ei3O06g-+1rSMaQ$-R>J+%NV8}l~hW&C61?@5BVXKq?WFHf2lf3wRF=i&XuM0HGF z*R1rjmf=Z((s@(3Ly#y5qOiz@XS%YH<8-NMau>HR;1|l|>CdEEyfsJmh;zv5U!Az3 zmc)!rj=cd#6cnwb^M&$4w_OflDY-nY$I93tD^hPgZ0H|5_0G z{hz&8i39S*qgx=7TKJO-yr5F_N>l>Jo^Y3AZasPUu~v(D$xgfTO`ar54FGn3fgji) zjWGyyC{Az4JV{SeXC(R(~Vhs_%tDqDtW0!St&KNyqGKd|AOADz}%Er??K3UYs3idT{1GdmRe9!(vY6cTrF%NB(q zdes1OfAO6Acw&Q?9+qe(i@{r}AaknqbA}MxBqZOmrUK6n5MY^>6qd##1mE;bN#4M7 zWAITDc+S@Yq^14&NqU@720+eephHitr8c~!oi55u_?BA3se`gdnJ$BSZ_B+OhVb-V z+!RHPu;7d7vA67H`VoKw64$f5;;S5C_Z^oHPf6Ug_EYA=jbq_jf^I*z}>#m3PO;vmK<3JsonZ{%tI_Tr}R$ow`!LSYc7VyHl~7NpV0)I1&}aZ0w% z5c%_K+^ry_Z#?obG6g0oiIOu_ypp01W~N}^Xa|&~al!%K!h^*Hsm26v0?LjEe0hs> zWrNZbV;()E8nciVc2!V0!vXKD5a)gue0`wPp#Q7=>8hA&z9p%b%m{4e4Q@C7H z3DqjRV_e_BKP#5J1OW@bt1~e*Wz&HrZ9wl`g8MVV=^u{#2?W!Yn##7iQJdQ9L>SA` z)NmAc($d6IDxDILxD12|B}io>O5Vn8fF~f_hF}jMRJ0H$3a*_E_B_yNMO<}J*7wy1 zb^>%A3043?fgEV6B`aY=Frn0Byt#HR_z3;ErIjO}F-1#}Tor}CpzL13*&zsG$}n$i zq!^&Q7QkrN40+@EUjG2da#3YKn06qq8eFS|yQL28{tm)KJL{j$;kyrbJayrsduv#8 za>Nc?bUPxahpGjk^PM9?fhDzoTW_0sGF_e=zNu4irh|geiHJ*BLKsv7^#>a>$>3-z z;j#|FNI`e)wqwijqaR0r6Hg)dFub;M>k8CzO$07-b`OPUQ)@vc!5_bRVQV`Iapr?> zJANH-jlUBRwT}=p;rQBwU_?1s`@P$SyEEUi$JMj5_)Citk@1PET|AZ0w(WeS=h!iMD$Mby%7)2t6CnZK?p)5H8&5dXb3DFwC-7YA?WQJp&l^V!(`I>(-5u{c z7gKwR@)`!B4);ws@H!H7DDaz!&ePrmC0zVHhPC9h{nMIC>D;||h5>_@IkU}a1O!Ut z56dv40cw*j`?N%=!`(aG*@p?cF;?nC2g%+ekRy0vq!VuzTJ90*0zf&d9FiZ{#l4Rc zr8XsU9tU_I%hpN@R(huNEEtNP8FXnGY>ytkd5{3N)Q)e!=5%Pl3b-*KxpvLj6^E%;xUG=2X)_@@ew!RB2iA1{}jMi!I7Qr0|PO;OaM7ykxg_$7`0HAvo8DLn4aqQ1?__Hx zylC6PsT*_h4`N3^_hH!;zu3(*4P}N#y~8OzBBIvzvH|Of9tMH;S68O0OH)CeBRbE0 zftY(N)H@BlI={j+lYmY-I@63A)U(r4*Q3nX-&FU_05o}JUx#Zs=*Y4+^NLcv)^Na64ihOvvsJXPbQO>u#>($)eSKpjp{y6`t zr~K8H`uv}luVVhb0%WHlbM`YIHh0A+hoGJ5Ve>b42>fGzl3Eh#uNOJGq?xcJQBf+@ zzjR=JN%qqvoa}P2(X!%&1f>hh%&W_)&HieO%TwQ%wVFM2%wJD=yf)As1fPF0YW6v=01Z%R#$F%HI4emuKNw zNBbAY&G!0FvQ7PDs^V9-oE1mG>5vPH{?F&aE7)9LGNJ|8g4OBqvJRZ|QQX(vH1nOc z9T*-_=)I7mb~Qw>!cD(VPkvgL!~uThAEmVm=Ffec6aba(f7Ic`*10}S=D^O@pS1Dj z|FS=I9^Gd%`AHTFz7pV=Dzd+62=P^){WbaC|H<0+CyXMcbDV}yVShgN6w+j#)x||M zH(4%tS}I#xDln~ju35@^5RkXdnj~))=^*w{Esb!Ssgb}-qYddqV77Ee&7Kj;v8Zgq z`{jtB`-6Rk!7gLDLw*4P0r@dMtICcB$ec%g)j3As4Dc`JALAv~m>iQK{LxpU7pvaF z3RagrfI#gDx6(@#nMLN6~XAFw?bM2Eg)LI>B#CvQ5F@c zXXP~!1@+>^`jq1OlY&z^#Z$uEwzlu1liC8lr!1ny(xO@w7wKl?)vut|m${^PH7WS_0u z3J5|@gM@{KhlKz

hX^kC2g)la!T~mzbHFo1C4VpP-?lqok##r91$LtE>yEgg*cx zMgR*+PD2C&MuQqiPD=nShd>7aD1*Vsg*wIn8iytT1h=k*u_A;k001zAAKin}BZkGt zgDWP3v=prFhl;22^Yr!h_xSnx`~01$?*N@Zcr_>%iW>mb4VsbA$%-4vgsm!&AmTDZ zMqJ@I)B->afdrix95jT%3jz(DSR-T%5ye3TE{K?UfI&cj@%ri9|H-qb&!0epq6&HQ zkcN;rJnoRV4nT&IHKNJ@@Sz4TLm~oLTqPuBAtdLH7E1cjNr8_@7y$Soa6ln&g0N~; z0svysn>mH*-OIPH-@kwx{kw|@1%M%h7!INJA<>5&1tLcVAfUiTLoF6;ojB~!0EmVW zFFfpI>5`@;oPYpu)rsNOgfas=M;Pa#z_@ek-p#xBqrtTc(egHEgk~oc4iFj=&Jfc> z5ELp~r6hppLOTZB8cxXgppLB>K;Qxb8S*FJZTaP>V#bSO{&-ylr=AOHXm zlyK1x7XVNI45z&yfB+w$P+%_r3?RY?0TL4AkVWEXx6hEPhglJThMEbX zo0e+osi?*`pQx<1>MEe5rYh^Kw4S=^tGMQ>>u#~u>g%t-x^wHU#1?C8PrU}K?6S2E zd+f8&Mr%s4%vNhHpUz6F?Y7)jLhZHSh8s|}-Ii;v|F++XtM0mlkbCaC@amc_yYzzV zZoK&B`{}&y2%*Ad4%OHputTWuB`ZtZkeYx71Ej+U09$*nz7$uiBfqOG*qlnoIS9e8 zMu@QSDlm@bbcJ*wZq^2wZB3VLc3{-vpo0@Lpg`#kD4>v( z)J8~M(lslfz+ypp1AM~@-Yn6=3S9q?H;cn50fXCxDDi>`HDvws434YNS`AWry&Mc9 zaG1d}FI=Sq&WSpK#Nsm`w!@Y$*tN>bXU8u4|2@4bag+uch%f*(;(V}#5d)?CLI4Ol zp@Is8n4pf(g30t8rq zTM-i1rV~aKFw7D#08qjXw|p=I5E&SNAe69x(`djvKybhSWF#!5&F+B^yoV0}P&!0F zX%Gm3NK#JYg#1Yj0>xv1Xll?NCuG1TFc1I-nF9?ImT&g9Xz9gQ+!8Pj-bU#S~3(sV}%WCI1DZ=CzD4+(2&}NCnh~{5FA*G3kl&y z`INGTd4#1ySaTlqxo!ji*juJHh652q4w2!Yi|Y!3O950UlI~#0C6~#}OhkZ<7W5%4 zh5>*Nm2nVf90UaR2E;??q7b23AOg({s6Zj%0{Fd7ni5F>01S1_P&z>g#dCr}uVbDnmFWWT(||!+Inl(( zQkFH4P%msr2nw81p5Z700O+J4|6mfwWbQFSg&@$Ley(BwE*Pjz1=@s_t|Ay5_!;sT zl7T@OMg)n2{Kp^_*qCv4B zjuHmIA`rCD3FOIRS0J#29e{5MOQ1_*fB+*I(Z~rUxQuL^AhID0A)bXWUlVr9SZ0!d ziiB8#$Ov!)BUEVtY*9e~fVcuNu_&7qAYUO;NEw!vrT`?jS!d`LYW%JimFlPG>BaXVe$)KLwM12 z`NiuhN+19xnD@Y0qyYd9#wQAZpk5Fr%u;B8;0i}@0|T%WVM@@T1aK(AAZEo72CxAO zn|O9lXg~opTomSV;FTecF-Z#0fG3>z#%F6n0|>xi8T*(|c5T37aZKc`LU9N zV;BtBxX4gunhON*+9g{#AT=OsPNU4_r6pkl=QDw5uS{mRn!o@6*gz7x%;u#X;RI-& zh5-&h=Q`W@&UntVp7+e>KKuF4fDW{s4S*5=7~lkyY_p=hoP!v+fCZ3_w4^6Z=}KGr z(wNS)rZ>&$PD|PaCNaptnioy#Qk(kJs7|%2SIz2HyZY6zj + +# daemon + +```markdown +Usage: dockerd [OPTIONS] + +A self-sufficient runtime for containers. + +Options: + --add-runtime value Register an additional OCI compatible runtime (default []) + --api-cors-header string Set CORS headers in the Engine API + --authorization-plugin value Authorization plugins to load (default []) + --bip string Specify network bridge IP + -b, --bridge string Attach containers to a network bridge + --cgroup-parent string Set parent cgroup for all containers + --cluster-advertise string Address or interface name to advertise + --cluster-store string URL of the distributed storage backend + --cluster-store-opt value Set cluster store options (default map[]) + --config-file string Daemon configuration file (default "/etc/docker/daemon.json") + --containerd string Path to containerd socket + -D, --debug Enable debug mode + --default-gateway value Container default gateway IPv4 address + --default-gateway-v6 value Container default gateway IPv6 address + --default-runtime string Default OCI runtime for containers (default "runc") + --default-ulimit value Default ulimits for containers (default []) + --disable-legacy-registry Disable contacting legacy registries + --dns value DNS server to use (default []) + --dns-opt value DNS options to use (default []) + --dns-search value DNS search domains to use (default []) + --exec-opt value Runtime execution options (default []) + --exec-root string Root directory for execution state files (default "/var/run/docker") + --experimental Enable experimental features + --fixed-cidr string IPv4 subnet for fixed IPs + --fixed-cidr-v6 string IPv6 subnet for fixed IPs + -g, --graph string Root of the Docker runtime (default "/var/lib/docker") + -G, --group string Group for the unix socket (default "docker") + --help Print usage + -H, --host value Daemon socket(s) to connect to (default []) + --icc Enable inter-container communication (default true) + --init Run an init in the container to forward signals and reap processes + --init-path string Path to the docker-init binary + --insecure-registry value Enable insecure registry communication (default []) + --ip value Default IP when binding container ports (default 0.0.0.0) + --ip-forward Enable net.ipv4.ip_forward (default true) + --ip-masq Enable IP masquerading (default true) + --iptables Enable addition of iptables rules (default true) + --ipv6 Enable IPv6 networking + --label value Set key=value labels to the daemon (default []) + --live-restore Enable live restore of docker when containers are still running (Linux only) + --log-driver string Default driver for container logs (default "json-file") + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --log-opt value Default log driver options for containers (default map[]) + --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) + --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) + --metrics-addr string Set address and port to serve the metrics api (default "") + --mtu int Set the containers network MTU + --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) + -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") + --raw-logs Full timestamps without ANSI coloring + --registry-mirror value Preferred Docker registry mirror (default []) + --seccomp-profile value Path to seccomp profile + --selinux-enabled Enable selinux support + --shutdown-timeout=15 Set the shutdown timeout value in seconds + -s, --storage-driver string Storage driver to use + --storage-opt value Storage driver options (default []) + --swarm-default-advertise-addr string Set default address or interface for swarm advertised address + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") + --tlskey string Path to TLS key file (default "/root/.docker/key.pem") + --tlsverify Use TLS and verify the remote + --userland-proxy Use userland proxy for loopback traffic (default true) + --userland-proxy-path string Path to the userland proxy binary + --userns-remap string User/Group setting for user namespaces + -v, --version Print version information and quit +``` + +Options with [] may be specified multiple times. + +dockerd is the persistent process that manages containers. Docker +uses different binaries for the daemon and client. To run the daemon you +type `dockerd`. + +To run the daemon with debug output, use `dockerd -D`. + +## Daemon socket option + +The Docker daemon can listen for [Docker Engine API](../api/) +requests via three different types of Socket: `unix`, `tcp`, and `fd`. + +By default, a `unix` domain socket (or IPC socket) is created at +`/var/run/docker.sock`, requiring either `root` permission, or `docker` group +membership. + +If you need to access the Docker daemon remotely, you need to enable the `tcp` +Socket. Beware that the default setup provides un-encrypted and +un-authenticated direct access to the Docker daemon - and should be secured +either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by +putting a secure web proxy in front of it. You can listen on port `2375` on all +network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network +interface using its IP address: `-H tcp://192.168.59.103:2375`. It is +conventional to use port `2375` for un-encrypted, and port `2376` for encrypted +communication with the daemon. + +> **Note:** +> If you're using an HTTPS encrypted socket, keep in mind that only +> TLS1.0 and greater are supported. Protocols SSLv3 and under are not +> supported anymore for security reasons. + +On Systemd based systems, you can communicate with the daemon via +[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), +use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but +you can also specify individual sockets: `dockerd -H fd://3`. If the +specified socket activated files aren't found, then Docker will exit. You can +find examples of using Systemd socket activation with Docker and Systemd in the +[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). + +You can configure the Docker daemon to listen to multiple sockets at the same +time using multiple `-H` options: + +```bash +# listen using the default unix socket, and on 2 specific IP addresses on this host. +$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 +``` + +The Docker client will honor the `DOCKER_HOST` environment variable to set the +`-H` flag for the client. + +```bash +$ docker -H tcp://0.0.0.0:2375 ps +# or +$ export DOCKER_HOST="tcp://0.0.0.0:2375" +$ docker ps +# both are equal +``` + +Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than +the empty string is equivalent to setting the `--tlsverify` flag. The following +are equivalent: + +```bash +$ docker --tlsverify ps +# or +$ export DOCKER_TLS_VERIFY=1 +$ docker ps +``` + +The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes +precedence over `HTTP_PROXY`. + +### Bind Docker to another host/port or a Unix socket + +> **Warning**: +> Changing the default `docker` daemon binding to a +> TCP port or Unix *docker* user group will increase your security risks +> by allowing non-root users to gain *root* access on the host. Make sure +> you control access to `docker`. If you are binding +> to a TCP port, anyone with access to that port has full Docker access; +> so it is not advisable on an open network. + +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. + +Similarly, the Docker client can use `-H` to connect to a custom port. +The Docker client will default to connecting to `unix:///var/run/docker.sock` +on Linux, and `tcp://127.0.0.1:2376` on Windows. + +`-H` accepts host and port assignment in the following format: + + tcp://[host]:[port][path] or unix://path + +For example: + +- `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption + is on, or port `2375` when communication is in plain text. +- `tcp://host:2375` -> TCP connection on + host:2375 +- `tcp://host:2375/path` -> TCP connection on + host:2375 and prepend path to all requests +- `unix://path/to/socket` -> Unix socket located + at `path/to/socket` + +`-H`, when empty, will default to the same value as +when no `-H` was passed in. + +`-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` + +Run Docker in daemon mode: + +```bash +$ sudo /dockerd -H 0.0.0.0:5555 & +``` + +Download an `ubuntu` image: + +```bash +$ docker -H :5555 pull ubuntu +``` + +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket + +```bash +# Run docker in daemon mode +$ sudo /dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & +# Download an ubuntu image, use default Unix socket +$ docker pull ubuntu +# OR use the TCP port +$ docker -H tcp://127.0.0.1:2375 pull ubuntu +``` + +### Daemon storage-driver option + +The Docker daemon has support for several different image layer storage +drivers: `aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay` and `overlay2`. + +The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that +is unlikely to be merged into the main kernel. These are also known to cause +some serious kernel crashes. However, `aufs` allows containers to share +executable and shared library memory, so is a useful choice when running +thousands of containers with the same program or libraries. + +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) +article explains how to tune your existing setup without the use of options. + +The `btrfs` driver is very fast for `docker build` - but like `devicemapper` +does not share executable memory between devices. Use +`dockerd -s btrfs -g /mnt/btrfs_partition`. + +The `zfs` driver is probably not as fast as `btrfs` but has a longer track record +on stability. Thanks to `Single Copy ARC` shared blocks between clones will be +cached only once. Use `dockerd -s zfs`. To select a different zfs filesystem +set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). + +The `overlay` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). `overlay` +also supports page cache sharing, this means multiple containers accessing +the same file can share a single page cache entry (or entries), it makes +`overlay` as efficient with memory as `aufs` driver. Call +`dockerd -s overlay` to use it. + +> **Note:** +> As promising as `overlay` is, the feature is still quite young and should not +> be used in production. Most notably, using `overlay` can cause excessive +> inode consumption (especially as the number of images grows), as well as +> being incompatible with the use of RPMs. + +The `overlay2` uses the same fast union filesystem but takes advantage of +[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux +kernel 4.0 to avoid excessive inode consumption. Call `dockerd -s overlay2` +to use it. + +> **Note:** +> Both `overlay` and `overlay2` are currently unsupported on `btrfs` or any +> Copy on Write filesystem and should only be used over `ext4` partitions. + +### Storage driver options + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, +options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. + +#### Devicemapper options + +* `dm.thinpooldev` + + Specifies a custom block storage device to use for the thin pool. + + If using a block device for device mapper storage, it is best to use `lvm` + to create and manage the thin-pool volume. This volume is then handed to Docker + to exclusively create snapshot volumes needed for images and containers. + + Managing the thin-pool outside of Engine makes for the most feature-rich + method of having Docker utilize device mapper thin provisioning as the + backing storage for Docker containers. The highlights of the lvm-based + thin-pool management feature include: automatic or interactive thin-pool + resize support, dynamically changing thin-pool features, automatic thinp + metadata checking when lvm activates the thin-pool, etc. + + As a fallback if no thin pool is provided, loopback files are + created. Loopback is very slow, but can be used without any + pre-configuration of storage. It is strongly recommended that you do + not use loopback in production. Ensure your Engine daemon has a + `--storage-opt dm.thinpooldev` argument provided. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool + ``` + +* `dm.basesize` + + Specifies the size to use when creating the base device, which limits the + size of images and containers. The default value is 10G. Note, thin devices + are inherently "sparse", so a 10G device which is mostly empty doesn't use + 10 GB of space on the pool. However, the filesystem will use more space for + the empty case the larger the device is. + + The base device size can be increased at daemon restart which will allow + all future images and containers (based on those new images) to be of the + new base device size. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.basesize=50G + ``` + + This will increase the base device size to 50G. The Docker daemon will throw an + error if existing base device size is larger than 50G. A user can use + this option to expand the base device size however shrinking is not permitted. + + This value affects the system-wide "base" empty filesystem + that may already be initialized and inherited by pulled images. Typically, + a change to this value requires additional steps to take effect: + + ```bash + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + ``` + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.basesize=20G + ``` + +* `dm.loopdatasize` + + > **Note**: + > This option configures devicemapper loopback, which should not + > be used in production. + + Specifies the size to use when creating the loopback file for the + "data" device which is used for the thin pool. The default size is + 100G. The file is sparse, so it will not initially take up this + much space. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.loopdatasize=200G + ``` + +* `dm.loopmetadatasize` + + > **Note**: + > This option configures devicemapper loopback, which should not + > be used in production. + + Specifies the size to use when creating the loopback file for the + "metadata" device which is used for the thin pool. The default size + is 2G. The file is sparse, so it will not initially take up + this much space. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.loopmetadatasize=4G + ``` + +* `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "xfs" + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.fs=ext4 + ``` + +* `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + ```bash + $ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" + ``` + +* `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.mountopt=nodiscard + ``` + +* `dm.datadev` + + (Deprecated, use `dm.thinpooldev`) + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both datadev and + metadatadev should be specified to completely avoid using the loopback + device. + + Example use: + + ```bash + $ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + ``` + +* `dm.metadatadev` + + (Deprecated, use `dm.thinpooldev`) + + Specifies a custom blockdevice to use for metadata for the thin pool. + + For best performance the metadata should be on a different spindle than the + data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This can be + achieved by zeroing the first 4k to indicate empty metadata, like this: + + ```bash + $ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 + ``` + + Example use: + + ```bash + $ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + ``` + +* `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.blocksize=512K + ``` + +* `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing devicemapper + devices. This is enabled by default (only) if using loopback devices and is + required to resparsify the loopback file on image/container removal. + + Disabling this on loopback can lead to *much* faster container removal + times, but will make the space used in `/var/lib/docker` directory not be + returned to the system for other use when containers are removed. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.blkdiscard=false + ``` + +* `dm.override_udev_sync_check` + + Overrides the `udev` synchronization checks between `devicemapper` and `udev`. + `udev` is the device manager for the Linux kernel. + + To view the `udev` sync support of a Docker daemon that is using the + `devicemapper` driver, run: + + ```bash + $ docker info + [...] + Udev Sync Supported: true + [...] + ``` + + When `udev` sync support is `true`, then `devicemapper` and udev can + coordinate the activation and deactivation of devices for containers. + + When `udev` sync support is `false`, a race condition occurs between + the`devicemapper` and `udev` during create and cleanup. The race condition + results in errors and failures. (For information on these failures, see + [docker#4036](https://github.com/docker/docker/issues/4036)) + + To allow the `docker` daemon to start, regardless of `udev` sync not being + supported, set `dm.override_udev_sync_check` to true: + + ```bash + $ sudo dockerd --storage-opt dm.override_udev_sync_check=true + ``` + + When this value is `true`, the `devicemapper` continues and simply warns + you the errors are happening. + + > **Note:** + > The ideal is to pursue a `docker` daemon and environment that does + > support synchronizing with `udev`. For further discussion on this + > topic, see [docker#4036](https://github.com/docker/docker/issues/4036). + > Otherwise, set this flag for migrating existing Docker daemons to + > a daemon with a supported environment. + +* `dm.use_deferred_removal` + + Enables use of deferred device removal if `libdm` and the kernel driver + support the mechanism. + + Deferred device removal means that if device is busy when devices are + being removed/deactivated, then a deferred removal is scheduled on + device. And devices automatically go away when last user of the device + exits. + + For example, when a container exits, its associated thin device is removed. + If that device has leaked into some other mount namespace and can't be + removed, the container exit still succeeds and this option causes the + system to schedule the device for deferred removal. It does not wait in a + loop trying to remove a busy device. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.use_deferred_removal=true + ``` + +* `dm.use_deferred_deletion` + + Enables use of deferred device deletion for thin pool devices. By default, + thin pool device deletion is synchronous. Before a container is deleted, + the Docker daemon removes any associated devices. If the storage driver + can not remove a device, the container deletion fails and daemon returns. + + Error deleting container: Error response from daemon: Cannot destroy container + + To avoid this failure, enable both deferred device deletion and deferred + device removal on the daemon. + + ```bash + $ sudo dockerd \ + --storage-opt dm.use_deferred_deletion=true \ + --storage-opt dm.use_deferred_removal=true + ``` + + With these two options enabled, if a device is busy when the driver is + deleting a container, the driver marks the device as deleted. Later, when + the device isn't in use, the driver deletes it. + + In general it should be safe to enable this option by default. It will help + when unintentional leaking of mount point happens across multiple mount + namespaces. + +* `dm.min_free_space` + + Specifies the min free space percent in a thin pool require for new device + creation to succeed. This check applies to both free data space as well + as free metadata space. Valid values are from 0% - 99%. Value 0% disables + free space checking logic. If user does not specify a value for this option, + the Engine uses a default value of 10%. + + Whenever a new a thin pool device is created (during `docker pull` or during + container creation), the Engine checks if the minimum free space is + available. If sufficient space is unavailable, then device creation fails + and any relevant `docker` operation fails. + + To recover from this error, you must create more free space in the thin pool + to recover from the error. You can create free space by deleting some images + and containers from the thin pool. You can also add more storage to the thin + pool. + + To add more space to a LVM (logical volume management) thin pool, just add + more storage to the volume group container thin pool; this should automatically + resolve any errors. If your configuration uses loop devices, then stop the + Engine daemon, grow the size of loop files and restart the daemon to resolve + the issue. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.min_free_space=10% + ``` + +* `dm.xfs_nospace_max_retries` + + Specifies the maximum number of retries XFS should attempt to complete + IO when ENOSPC (no space) error is returned by underlying storage device. + + By default XFS retries infinitely for IO to finish and this can result + in unkillable process. To change this behavior one can set + xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting + ENOSPC and will shutdown filesystem. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 + ``` + +#### ZFS options + +* `zfs.fsname` + + Set zfs filesystem under which docker will create its own datasets. + By default docker will pick up the zfs filesystem where docker graph + (`/var/lib/docker`) is located. + + Example use: + + ```bash + $ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker + ``` + +#### Btrfs options + +* `btrfs.min_space` + + Specifies the minimum size to use when creating the subvolume which is used + for containers. If user uses disk quota for btrfs when creating or running + a container with **--storage-opt size** option, docker should ensure the + **size** cannot be smaller than **btrfs.min_space**. + + Example use: + + ```bash + $ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G + ``` + +#### Overlay2 options + +* `overlay2.override_kernel_check` + + Overrides the Linux kernel version check allowing overlay2. Support for + specifying multiple lower directories needed by overlay2 was added to the + Linux kernel in 4.0.0. However some older kernel versions may be patched + to add multiple lower directory support for OverlayFS. This option should + only be used after verifying this support exists in the kernel. Applying + this option on a kernel without this support will cause failures on mount. + +## Docker runtime execution options + +The Docker daemon relies on a +[OCI](https://github.com/opencontainers/runtime-spec) compliant runtime +(invoked via the `containerd` daemon) as its interface to the Linux +kernel `namespaces`, `cgroups`, and `SELinux`. + +By default, the Docker daemon automatically starts `containerd`. If you want to +control `containerd` startup, manually start `containerd` and pass the path to +the `containerd` socket using the `--containerd` flag. For example: + +```bash +$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock +``` + +Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + +The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + +> **Note**: defining runtime arguments via the command line is not supported. + +## Options for the runtime + +You can configure the runtime using options specified +with the `--exec-opt` flag. All the flag's options have the `native` prefix. A +single `native.cgroupdriver` option is available. + +The `native.cgroupdriver` option specifies the management of the container's +cgroups. You can specify only specify `cgroupfs` or `systemd`. If you specify +`systemd` and it is not available, the system errors out. If you omit the +`native.cgroupdriver` option,` cgroupfs` is used. + +This example sets the `cgroupdriver` to `systemd`: + +```bash +$ sudo dockerd --exec-opt native.cgroupdriver=systemd +``` + +Setting this option applies to all containers the daemon launches. + +Also Windows Container makes use of `--exec-opt` for special purpose. Docker user +can specify default container isolation technology with this, for example: + +```bash +$ sudo dockerd --exec-opt isolation=hyperv +``` + +Will make `hyperv` the default isolation technology on Windows. If no isolation +value is specified on daemon start, on Windows client, the default is +`hyperv`, and on Windows server, the default is `process`. + +## Daemon DNS options + +To set the DNS server for all Docker containers, use: + +```bash +$ sudo dockerd --dns 8.8.8.8 +``` + + +To set the DNS search domain for all Docker containers, use: + +```bash +$ sudo dockerd --dns-search example.com +``` + + +## Insecure registries + +Docker considers a private registry either secure or insecure. In the rest of +this section, *registry* is used for *private registry*, and `myregistry:5000` +is a placeholder example for a private registry. + +A secure registry uses TLS and a copy of its CA certificate is placed on the +Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure +registry is either not using TLS (i.e., listening on plain text HTTP), or is +using TLS with a CA certificate not known by the Docker daemon. The latter can +happen when the certificate was not found under +`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification +failed (i.e., wrong CA). + +By default, Docker assumes all, but local (see local registries below), +registries are secure. Communicating with an insecure registry is not possible +if Docker assumes that registry is secure. In order to communicate with an +insecure registry, the Docker daemon requires `--insecure-registry` in one of +the following two forms: + +* `--insecure-registry myregistry:5000` tells the Docker daemon that + myregistry:5000 should be considered insecure. +* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries + whose domain resolve to an IP address is part of the subnet described by the + CIDR syntax, should be considered insecure. + +The flag can be used multiple times to allow multiple registries to be marked +as insecure. + +If an insecure registry is not marked as insecure, `docker pull`, +`docker push`, and `docker search` will result in an error message prompting +the user to either secure or pass the `--insecure-registry` flag to the Docker +daemon as described above. + +Local registries, whose IP address falls in the 127.0.0.0/8 range, are +automatically marked as insecure as of Docker 1.3.2. It is not recommended to +rely on this, as it may change in the future. + +Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted +communication, can be useful when running a local registry. However, +because its use creates security vulnerabilities it should ONLY be enabled for +testing purposes. For increased security, users should add their CA to their +system's list of trusted CAs instead of enabling `--insecure-registry`. + +## Legacy Registries + +Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. + +## Running a Docker daemon behind an HTTPS_PROXY + +When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub +certificates will be replaced by the proxy's certificates. These certificates +need to be added to your Docker host's configuration: + +1. Install the `ca-certificates` package for your distribution +2. Ask your network admin for the proxy's CA certificate and append them to + `/etc/pki/tls/certs/ca-bundle.crt` +3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ dockerd`. + The `username:` and `password@` are optional - and are only needed if your + proxy is set up to require authentication. + +This will only add the proxy and authentication to the Docker daemon's requests - +your `docker build`s and running containers will need extra configuration to +use the proxy + +## Default Ulimits + +`--default-ulimit` allows you to set the default `ulimit` options to use for +all containers. It takes the same options as `--ulimit` for `docker run`. If +these defaults are not set, `ulimit` settings will be inherited, if not set on +`docker run`, from the Docker daemon. Any `--ulimit` options passed to +`docker run` will overwrite these defaults. + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to +set the maximum number of processes available to a user, not to a container. For details +please check the [run](run.md) reference. + +## Nodes discovery + +The `--cluster-advertise` option specifies the `host:port` or `interface:port` +combination that this particular daemon instance should use when advertising +itself to the cluster. The daemon is reached by remote hosts through this value. +If you specify an interface, make sure it includes the IP address of the actual +Docker host. For Engine installation created through `docker-machine`, the +interface is typically `eth1`. + +The daemon uses [libkv](https://github.com/docker/libkv/) to advertise +the node within the cluster. Some key-value backends support mutual +TLS. To configure the client TLS settings used by the daemon can be configured +using the `--cluster-store-opt` flag, specifying the paths to PEM encoded +files. For example: + +```bash +$ sudo dockerd \ + --cluster-advertise 192.168.1.2:2376 \ + --cluster-store etcd://192.168.1.2:2379 \ + --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ + --cluster-store-opt kv.certfile=/path/to/cert.pem \ + --cluster-store-opt kv.keyfile=/path/to/key.pem +``` + +The currently supported cluster store options are: + +* `discovery.heartbeat` + + Specifies the heartbeat timer in seconds which is used by the daemon as a + keepalive mechanism to make sure discovery module treats the node as alive + in the cluster. If not configured, the default value is 20 seconds. + +* `discovery.ttl` + + Specifies the ttl (time-to-live) in seconds which is used by the discovery + module to timeout a node if a valid heartbeat is not received within the + configured ttl value. If not configured, the default value is 60 seconds. + +* `kv.cacertfile` + + Specifies the path to a local file with PEM encoded CA certificates to trust + +* `kv.certfile` + + Specifies the path to a local file with a PEM encoded certificate. This + certificate is used as the client cert for communication with the + Key/Value store. + +* `kv.keyfile` + + Specifies the path to a local file with a PEM encoded private key. This + private key is used as the client key for communication with the + Key/Value store. + +* `kv.path` + + Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. + +## Access authorization + +Docker's access authorization can be extended by authorization plugins that your +organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its specification +file. The plugin's implementation determines whether you can specify a name or +path. Consult with your Docker administrator to get information about the +plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the command +line or Docker's Engine API are allowed or denied by the plugin. If you have +multiple plugins installed, at least one must allow the request for it to +complete. + +For information about how to create an authorization plugin, see [authorization +plugin](../../extend/plugins_authorization.md) section in the Docker extend section of this documentation. + + +## Daemon user namespace options + +The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling +a process, and therefore a container, to have a unique range of user and +group IDs which are outside the traditional user and group range utilized by +the host system. Potentially the most important security improvement is that, +by default, container processes running as the `root` user will have expected +administrative privilege (with some restrictions) inside the container but will +effectively be mapped to an unprivileged `uid` on the host. + +When user namespace support is enabled, Docker creates a single daemon-wide mapping +for all containers running on the same engine instance. The mappings will +utilize the existing subordinate user and group ID feature available on all modern +Linux distributions. +The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and +[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be +read for the user, and optional group, specified to the `--userns-remap` +parameter. If you do not wish to specify your own user and/or group, you can +provide `default` as the value to this flag, and a user will be created on your behalf +and provided subordinate uid and gid ranges. This default user will be named +`dockremap`, and entries will be created for it in `/etc/passwd` and +`/etc/group` using your distro's standard user and group creation tools. + +> **Note**: The single mapping per-daemon restriction is in place for now +> because Docker shares image layers from its local cache across all +> containers running on the engine instance. Since file ownership must be +> the same for all containers sharing the same layer content, the decision +> was made to map the file ownership on `docker pull` to the daemon's user and +> group mappings so that there is no delay for running containers once the +> content is downloaded. This design preserves the same performance for `docker +> pull`, `docker push`, and container startup as users expect with +> user namespaces disabled. + +### Starting the daemon with user namespaces enabled + +To enable user namespace support, start the daemon with the +`--userns-remap` flag, which accepts values in the following formats: + + - uid + - uid:gid + - username + - username:groupname + +If numeric IDs are provided, translation back to valid user or group names +will occur so that the subordinate uid and gid information can be read, given +these resources are name-based, not id-based. If the numeric ID information +provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon +startup will fail with an error message. + +**Example: starting with default Docker user management:** + +```bash +$ sudo dockerd --userns-remap=default +``` + +When `default` is provided, Docker will create - or find the existing - user and group +named `dockremap`. If the user is created, and the Linux distribution has +appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated +with a contiguous 65536 length range of subordinate user and group IDs, starting +at an offset based on prior entries in those files. For example, Ubuntu will +create the following range, based on an existing user named `user1` already owning +the first 65536 range: + +```bash +$ cat /etc/subuid +user1:100000:65536 +dockremap:165536:65536 +``` + +If you have a preferred/self-managed user with subordinate ID mappings already +configured, you can provide that username or uid to the `--userns-remap` flag. +If you have a group that doesn't match the username, you may provide the `gid` +or group name as well; otherwise the username will be used as the group name +when querying the system for the subordinate group ID range. + +The output of `docker info` can be used to determine if the daemon is running +with user namespaces enabled or not. If the daemon is configured with user +namespaces, the Security Options entry in the response will list "userns" as +one of the enabled security features. + +### Detailed information on `subuid`/`subgid` ranges + +Given potential advanced use of the subordinate ID ranges by power users, the +following paragraphs define how the Docker daemon currently uses the range entries +found within the subordinate range files. + +The simplest case is that only one contiguous range is defined for the +provided user or group. In this case, Docker will use that entire contiguous +range for the mapping of host uids and gids to the container process. This +means that the first ID in the range will be the remapped root user, and the +IDs above that initial ID will map host ID 1 through the end of the range. + +From the example `/etc/subuid` content shown above, the remapped root +user would be uid 165536. + +If the system administrator has set up multiple ranges for a single user or +group, the Docker daemon will read all the available ranges and use the +following algorithm to create the mapping ranges: + +1. The range segments found for the particular user will be sorted by *start ID* ascending. +2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. +3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. + +### Disable user namespace for a container + +If you enable user namespaces on the daemon, all containers are started +with user namespaces enabled. In some situations you might want to disable +this feature for a container, for example, to start a privileged container (see +[user namespace known restrictions](#user-namespace-known-restrictions)). +To enable those advanced features for a specific container use `--userns=host` +in the `run/exec/create` command. +This option will completely disable user namespace mapping for the container's user. + +### User namespace known restrictions + +The following standard Docker features are currently incompatible when +running a Docker daemon with user namespaces enabled: + + - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) + - Using `--privileged` mode flag on `docker run` (unless also specifying `--userns=host`) + +In general, user namespaces are an advanced feature and will require +coordination with other capabilities. For example, if volumes are mounted from +the host, file ownership will have to be pre-arranged if the user or +administrator wishes the containers to have expected access to the volume +contents. Note that when using external volume or graph driver plugins, those +external software programs must be made aware of user and group mapping ranges +if they are to work seamlessly with user namespace support. + +Finally, while the `root` user inside a user namespaced container process has +many of the expected admin privileges that go along with being the superuser, the +Linux kernel has restrictions based on internal knowledge that this is a user namespaced +process. The most notable restriction that we are aware of at this time is the +inability to use `mknod`. Permission will be denied for device creation even as +container `root` inside a user namespace. + +## Miscellaneous options + +IP masquerading uses address translation to allow containers without a public +IP to talk to other machines on the Internet. This may interfere with some +network topologies and can be disabled with `--ip-masq=false`. + +Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and +for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be +set like this: + + DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + # or + export DOCKER_TMPDIR=/mnt/disk2/tmp + /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + +## Default cgroup parent + +The `--cgroup-parent` option allows you to set the default cgroup parent +to use for containers. If this option is not set, it defaults to `/docker` for +fs cgroup driver and `system.slice` for systemd cgroup driver. + +If the cgroup has a leading forward slash (`/`), the cgroup is created +under the root cgroup, otherwise the cgroup is created under the daemon +cgroup. + +Assuming the daemon is running in cgroup `daemoncgroup`, +`--cgroup-parent=/foobar` creates a cgroup in +`/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` +creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` + +The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd +represents hierarchy by slice and the name of the slice encodes the location in +the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A +name can consist of a dash-separated series of names, which describes the path +to the slice from the root slice. For example, `--cgroup-parent=user-a-b.slice` +means the memory cgroup for the container is created in +`/sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-.scope`. + +This setting can also be set per container, using the `--cgroup-parent` +option on `docker create` and `docker run`, and takes precedence over +the `--cgroup-parent` option on the daemon. + +## Daemon Metrics + +The `--metrics-addr` option takes a tcp address to serve the metrics API. +This feature is still experimental, therefore, the daemon must be running in experimental +mode for this feature to work. + +To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337` +allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the +[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format. + +If you are running a prometheus server you can add this address to your scrape configs +to have prometheus collect metrics on Docker. For more information +on prometheus you can view the website [here](https://prometheus.io/). + +```yml +scrape_configs: + - job_name: 'docker' + static_configs: + - targets: ['127.0.0.1:1337'] +``` + +Please note that this feature is still marked as experimental as metrics and metric +names could change while this feature is still in experimental. Please provide +feedback on what you would like to see collected in the API. + +## Daemon configuration file + +The `--config-file` option allows you to set any configuration option +for the daemon in a JSON format. This file uses the same flag names as keys, +except for flags that allow several entries, where it uses the plural +of the flag name, e.g., `labels` for the `label` flag. + +The options set in the configuration file must not conflict with options set +via flags. The docker daemon fails to start if an option is duplicated between +the file and the flags, regardless their value. We do this to avoid +silently ignore changes introduced in configuration reloads. +For example, the daemon fails to start if you set daemon labels +in the configuration file and also set daemon labels via the `--label` flag. +Options that are not present in the file are ignored when the daemon starts. + +### Linux configuration file + +The default location of the configuration file on Linux is +`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a + non-default location. + +This is a full example of the allowed configuration options on Linux: + +```json +{ + "authorization-plugins": [], + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "exec-root": "", + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "live-restore": true, + "log-driver": "", + "log-opts": {}, + "mtu": 0, + "pidfile": "", + "graph": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tls": true, + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "api-cors-header": "", + "selinux-enabled": false, + "userns-remap": "", + "group": "", + "cgroup-parent": "", + "default-ulimits": {}, + "init": false, + "init-path": "/usr/libexec/docker-init", + "ipv6": false, + "iptables": false, + "ip-forward": false, + "ip-masq": false, + "userland-proxy": false, + "userland-proxy-path": "/usr/libexec/docker-proxy", + "ip": "0.0.0.0", + "bridge": "", + "bip": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "default-gateway": "", + "default-gateway-v6": "", + "icc": false, + "raw-logs": false, + "registry-mirrors": [], + "seccomp-profile": "", + "insecure-registries": [], + "disable-legacy-registry": false, + "default-runtime": "runc", + "oom-score-adjust": -500, + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +### Windows configuration file + +The default location of the configuration file on Windows is + `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be + used to specify a non-default location. + +This is a full example of the allowed configuration options on Windows: + +```json +{ + "authorization-plugins": [], + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "log-driver": "", + "mtu": 0, + "pidfile": "", + "graph": "", + "cluster-store": "", + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "group": "", + "default-ulimits": {}, + "bridge": "", + "fixed-cidr": "", + "raw-logs": false, + "registry-mirrors": [], + "insecure-registries": [], + "disable-legacy-registry": false +} +``` + +### Configuration reloading + +Some options can be reconfigured when the daemon is running without requiring +to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event +in Windows with the key `Global\docker-daemon-config-$PID`. The options can +be modified in the configuration file but still will check for conflicts with +the provided flags. The daemon fails to reconfigure itself +if there are conflicts, but it won't stop execution. + +The list of currently supported options that can be reconfigured is this: + +- `debug`: it changes the daemon to debug mode when set to true. +- `cluster-store`: it reloads the discovery store with the new address. +- `cluster-store-opts`: it uses the new options to reload the discovery store. +- `cluster-advertise`: it modifies the address advertised after reloading. +- `labels`: it replaces the daemon labels with a new set of labels. +- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/engine/admin/live-restore/). +- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. +- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. +- `default-runtime`: it updates the runtime to be used if not is + specified at container creation. It defaults to "default" which is + the runtime shipped with the official docker packages. +- `runtimes`: it updates the list of available OCI runtimes that can + be used to run containers +- `authorization-plugin`: specifies the authorization plugins to use. +- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config. + +Updating and reloading the cluster configurations such as `--cluster-store`, +`--cluster-advertise` and `--cluster-store-opts` will take effect only if +these configurations were not previously configured. If `--cluster-store` +has been provided in flags and `cluster-advertise` not, `cluster-advertise` +can be added in the configuration file without accompanied by `--cluster-store`. +Configuration reload will log a warning message if it detects a change in +previously configured cluster configurations. + + +## Running multiple daemons + +> **Note:** Running multiple daemons on a single host is considered as "experimental". The user should be aware of +> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development +> and will be delivered in the near future. + +This section describes how to run multiple Docker daemons on a single host. To +run multiple daemons, you must configure each daemon so that it does not +conflict with other daemons on the same host. You can set these options either +by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). + +The following daemon options must be configured for each daemon: + +```bash +-b, --bridge= Attach containers to a network bridge +--exec-root=/var/run/docker Root of the Docker execdriver +-g, --graph=/var/lib/docker Root of the Docker runtime +-p, --pidfile=/var/run/docker.pid Path to use for daemon PID file +-H, --host=[] Daemon socket(s) to connect to +--iptables=true Enable addition of iptables rules +--config-file=/etc/docker/daemon.json Daemon configuration file +--tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA +--tlscert="~/.docker/cert.pem" Path to TLS certificate file +--tlskey="~/.docker/key.pem" Path to TLS key file +``` + +When your daemons use different values for these flags, you can run them on the same host without any problems. +It is very important to properly understand the meaning of those options and to use them correctly. + +- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. +If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` +- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for +your running daemon here. +- `--graph` is the path where images are stored. The default value is `/var/lib/docker`. To avoid any conflict with other daemons +set this parameter separately for each daemon. +- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your +pid file here. +- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. +- `--iptables=false` prevents the Docker daemon from adding iptables rules. If +multiple daemons manage iptables rules, they may overwrite rules set by another +daemon. Be aware that disabling this option requires you to manually add +iptables rules to expose container ports. If you prevent Docker from adding +iptables rules, Docker will also not add IP masquerading rules, even if you set +`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be +able to connect to external hosts or the internet when using network other than +default bridge. +- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of +daemon flags. Specify the path for each daemon. +- `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. +The `--tls*` options enable use of specific certificates for individual daemons. + +Example script for a separate “bootstrap” instance of the Docker daemon without network: + +```bash +$ sudo dockerd \ + -H unix:///var/run/docker-bootstrap.sock \ + -p /var/run/docker-bootstrap.pid \ + --iptables=false \ + --ip-masq=false \ + --bridge=none \ + --graph=/var/lib/docker-bootstrap \ + --exec-root=/var/run/docker-bootstrap +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/events.md b/vendor/github.com/docker/docker/docs/reference/commandline/events.md new file mode 100644 index 0000000..baa966d --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/events.md @@ -0,0 +1,217 @@ +--- +title: "events" +description: "The events command description and usage" +keywords: "events, container, report" +--- + + + +# events + +```markdown +Usage: docker events [OPTIONS] + +Get real time events from the server + +Options: + -f, --filter value Filter output based on conditions provided (default []) + --format string Format the output using the given Go template + --help Print usage + --since string Show all events created since timestamp + --until string Stream events until this timestamp +``` + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker plugins report the following events: + + install, enable, disable, remove + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following events: + + reload + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine’s time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container (`container=`) +* event (`event=`) +* image (`image=`) +* plugin (experimental) (`plugin=`) +* label (`label=` or `label==`) +* type (`type=`) +* volume (`volume=`) +* network (`network=`) +* daemon (`daemon=`) + +## Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default +format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + +## Examples + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + + $ docker events + +**Shell 2: Start and Stop containers:** + + $ docker start 4386fb97867d + $ docker stop 4386fb97867d + $ docker stop 7805c1d35632 + +**Shell 1: (Again .. now showing events):** + + 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +**Show events in the past from a specified time:** + + $ docker events --since 1378216169 + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --since '2013-09-03' + 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --since '2013-09-03T15:49:29' + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +This example outputs all events that were generated in the last 3 minutes, +relative to the current time on the client machine: + + $ docker events --since '3m' + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +**Filter events:** + + $ docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + + $ docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'container=container_1' --filter 'container=container_2' + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'type=volume' + 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) + 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) + 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) + 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + + $ docker events --filter 'type=network' + 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) + 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + + $ docker events --filter 'type=plugin' (experimental) + 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + +**Format:** + + $ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + +**Format (as JSON Lines):** + + $ docker events --format '{{json .}}' + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/exec.md b/vendor/github.com/docker/docker/docs/reference/commandline/exec.md new file mode 100644 index 0000000..38891c9 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/exec.md @@ -0,0 +1,65 @@ +--- +title: "exec" +description: "The exec command description and usage" +keywords: "command, container, run, execute" +--- + + + +# exec + +```markdown +Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] + +Run a command in a running container + +Options: + -d, --detach Detached mode: run command in the background + --detach-keys Override the key sequence for detaching a container + -e, --env=[] Set environment variables + --help Print usage + -i, --interactive Keep STDIN open even if not attached + --privileged Give extended privileges to the command + -t, --tty Allocate a pseudo-TTY + -u, --user Username or UID (format: [:]) +``` + +The `docker exec` command runs a new command in a running container. + +The command started using `docker exec` only runs while the container's primary +process (`PID 1`) is running, and it is not restarted if the container is +restarted. + +If the container is paused, then the `docker exec` command will fail with an error: + + $ docker pause test + test + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test + $ docker exec test ls + FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec + $ echo $? + 1 + +## Examples + + $ docker run --name ubuntu_bash --rm -i -t ubuntu bash + +This will create a container named `ubuntu_bash` and start a Bash session. + + $ docker exec -d ubuntu_bash touch /tmp/execWorks + +This will create a new file `/tmp/execWorks` inside the running container +`ubuntu_bash`, in the background. + + $ docker exec -it ubuntu_bash bash + +This will create a new Bash session in the container `ubuntu_bash`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/export.md b/vendor/github.com/docker/docker/docs/reference/commandline/export.md new file mode 100644 index 0000000..1004fc3 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/export.md @@ -0,0 +1,43 @@ +--- +title: "export" +description: "The export command description and usage" +keywords: "export, file, system, container" +--- + + + +# export + +```markdown +Usage: docker export [OPTIONS] CONTAINER + +Export a container's filesystem as a tar archive + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +The `docker export` command does not export the contents of volumes associated +with the container. If a volume is mounted on top of an existing directory in +the container, `docker export` will export the contents of the *underlying* +directory, not the contents of the volume. + +Refer to [Backup, restore, or migrate data +volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes) in +the user guide for examples on exporting data in a volume. + +## Examples + + $ docker export red_panda > latest.tar + +Or + + $ docker export --output="latest.tar" red_panda diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/history.md b/vendor/github.com/docker/docker/docs/reference/commandline/history.md new file mode 100644 index 0000000..00f88db --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/history.md @@ -0,0 +1,48 @@ +--- +title: "history" +description: "The history command description and usage" +keywords: "docker, image, history" +--- + + + +# history + +```markdown +Usage: docker history [OPTIONS] IMAGE + +Show the history of an image + +Options: + --help Print usage + -H, --human Print sizes and dates in human readable format (default true) + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + +To see how the `docker:latest` image was built: + + $ docker history docker + IMAGE CREATED CREATED BY SIZE COMMENT + 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B + 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB + be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB + 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB + 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi + +# image prune + +```markdown +Usage: docker image prune [OPTIONS] + +Remove unused images + +Options: + -a, --all Remove all unused images, not just dangling ones + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. + +Example output: + +```bash +$ docker image prune -a +WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue? [y/N] y +Deleted Images: +untagged: alpine:latest +untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a +deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba +deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f +untagged: alpine:3.3 +untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423 +untagged: my-jq:latest +deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff +deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65 +deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7 +untagged: my-curl:latest +deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e +deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9 +deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e +deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec +deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06 +deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c +deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35 +deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809 +deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0 +deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac +deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b +deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1 + +Total reclaimed space: 16.43 MB +``` + +## Related information + +* [system df](system_df.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/images.md b/vendor/github.com/docker/docker/docs/reference/commandline/images.md new file mode 100644 index 0000000..3b9ea1f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/images.md @@ -0,0 +1,304 @@ +--- +title: "images" +description: "The images command description and usage" +keywords: "list, docker, images" +--- + + + +# images + +```markdown +Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] + +List images + +Options: + -a, --all Show all images (default hides intermediate images) + --digests Show digests + -f, --filter value Filter output based on conditions provided (default []) + - dangling=(true|false) + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + - reference=(pattern of an image reference) + --format string Pretty-print images using a Go template + --help Print usage + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + +The default `docker images` will show all top level +images, their repository and tags, and their size. + +Docker images have intermediate layers that increase reusability, +decrease disk usage, and speed up `docker build` by +allowing each step to be cached. These intermediate layers are not shown +by default. + +The `SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `SIZE` listed only once. + +### Listing the most recently created images + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + 77af4d6b9913 19 hours ago 1.089 GB + committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB + docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB + postgres 9 746b819f315e 4 days ago 213.4 MB + postgres 9.3 746b819f315e 4 days ago 213.4 MB + postgres 9.3.5 746b819f315e 4 days ago 213.4 MB + postgres latest 746b819f315e 4 days ago 213.4 MB + +### Listing images by name and tag + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + +For example, to list all images in the "java" repository, run this command : + + $ docker images java + REPOSITORY TAG IMAGE ID CREATED SIZE + java 8 308e519aac60 6 days ago 824.5 MB + java 7 493d82594c15 3 months ago 656.3 MB + java latest 2711b1d6f3aa 5 months ago 603.9 MB + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + + $ docker images java:8 + REPOSITORY TAG IMAGE ID CREATED SIZE + java 8 308e519aac60 6 days ago 824.5 MB + +If nothing matches `REPOSITORY[:TAG]`, the list is empty. + + $ docker images java:0 + REPOSITORY TAG IMAGE ID CREATED SIZE + +## Listing the full length image IDs + + $ docker images --no-trunc + REPOSITORY TAG IMAGE ID CREATED SIZE + sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB + committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB + sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB + docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB + sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB + sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB + tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB + sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB + +## Listing image digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + + $ docker images --digests + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. You can +also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false) +* label (`label=` or `label==`) +* before (`[:]`, `` or ``) - filter images created before given id or references +* since (`[:]`, `` or ``) - filter images created since given id or references + +##### Untagged images (dangling) + + $ docker images --filter "dangling=true" + + REPOSITORY TAG IMAGE ID CREATED SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B + +This will display untagged images, that are the leaves of the images tree (not +intermediary layers). These images occur when a new build of an image takes the +`repo:tag` away from the image ID, leaving it as `:` or untagged. +A warning will be issued if trying to remove an image when a container is presently +using it. By having this flag it allows for batch cleanup. + +Ready for use by `docker rmi ...`, like: + + $ docker rmi $(docker images -f "dangling=true" -q) + + 8abc22fbb042 + 48e5f45168b9 + bf747efa0e2f + 980fe10e5736 + dea752e4e117 + 511136ea3c5a + +NOTE: Docker will warn you if any containers exist that are using these untagged images. + + +##### Labeled images + +The `label` filter matches images based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches images with the `com.example.version` label regardless of its value. + + $ docker images --filter "label=com.example.version" + + REPOSITORY TAG IMAGE ID CREATED SIZE + match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB + match-me-2 latest dea752e4e117 About a minute ago 188.3 MB + +The following filter matches images with the `com.example.version` label with the `1.0` value. + + $ docker images --filter "label=com.example.version=1.0" + REPOSITORY TAG IMAGE ID CREATED SIZE + match-me latest 511136ea3c5a About a minute ago 188.3 MB + +In this example, with the `0.1` value, it returns an empty set because no matches were found. + + $ docker images --filter "label=com.example.version=0.1" + REPOSITORY TAG IMAGE ID CREATED SIZE + +#### Before + +The `before` filter shows only images created before the image with +given id or reference. For example, having these images: + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + +Filtering with `before` would give: + + $ docker images --filter "before=image1" + REPOSITORY TAG IMAGE ID CREATED SIZE + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + +#### Since + +The `since` filter shows only images created after the image with +given id or reference. For example, having these images: + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + +Filtering with `since` would give: + + $ docker images --filter "since=image3" + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + +#### Reference + +The `reference` filter shows only images whose reference matches +the specified pattern. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest e02e811dd08f 5 weeks ago 1.09 MB + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox musl 733eb3059dce 5 weeks ago 1.21 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB + +Filtering with `reference` would give: + + $ docker images --filter=reference='busy*:*libc' + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB + +## Formatting + +The formatting option (`--format`) will pretty print container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +---- | ---- +`.ID` | Image ID +`.Repository` | Image repository +`.Tag` | Image tag +`.Digest` | Image digest +`.CreatedSince` | Elapsed time since the image was created +`.CreatedAt` | Time when the image was created +`.Size` | Image disk size + +When using the `--format` option, the `image` command will either +output the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Repository` entries separated by a colon for all images: + + {% raw %} + $ docker images --format "{{.ID}}: {{.Repository}}" + 77af4d6b9913: + b6fa739cedf5: committ + 78a85c484f71: + 30557a29d5ab: docker + 5ed6274db6ce: + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + {% endraw %} + +To list all images with their repository and tag in a table format you +can use: + + {% raw %} + $ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + IMAGE ID REPOSITORY TAG + 77af4d6b9913 + b6fa739cedf5 committ latest + 78a85c484f71 + 30557a29d5ab docker latest + 5ed6274db6ce + 746b819f315e postgres 9 + 746b819f315e postgres 9.3 + 746b819f315e postgres 9.3.5 + 746b819f315e postgres latest + {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/import.md b/vendor/github.com/docker/docker/docs/reference/commandline/import.md new file mode 100644 index 0000000..20e90a6 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/import.md @@ -0,0 +1,75 @@ +--- +title: "import" +description: "The import command description and usage" +keywords: "import, file, system, container" +--- + + + +# import + +```markdown +Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] + +Import the contents from a tarball to create a filesystem image + +Options: + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Set commit message for imported image +``` + +You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The +`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) +containing a filesystem or to an individual file on the Docker host. If you +specify an archive, Docker untars it in the container relative to the `/` +(root). If you specify an individual file, you must specify the full path within +the host. To import from a remote location, specify a `URI` that begins with the +`http://` or `https://` protocol. + +The `--change` option will apply `Dockerfile` instructions to the image +that is created. +Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +**Import from a remote location:** + +This will create a new untagged image. + + $ docker import http://example.com/exampleimage.tgz + +**Import from a local file:** + +Import to docker via pipe and `STDIN`. + + $ cat exampleimage.tgz | docker import - exampleimagelocal:new + +Import with a commit message. + + $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + +Import to docker from a local archive. + + $ docker import /path/to/exampleimage.tgz + +**Import from a local directory:** + + $ sudo tar -c . | docker import - exampleimagedir + +**Import from a local directory with new configurations:** + + $ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir + +Note the `sudo` in this example – you must preserve +the ownership of the files (especially root ownership) during the +archiving with tar. If you are not root (or the sudo command) when you +tar, then the ownerships might not get preserved. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/index.md b/vendor/github.com/docker/docker/docs/reference/commandline/index.md new file mode 100644 index 0000000..952fa09 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/index.md @@ -0,0 +1,178 @@ +--- +title: "Docker commands" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +identifier: "smn_cli_guide" +--- + + + +# The Docker commands + +This section contains reference information on using Docker's command line +client. Each command has a reference page along with samples. If you are +unfamiliar with the command line, you should start by reading about how to [Use +the Docker command line](cli.md). + +You start the Docker daemon with the command line. How you start the daemon +affects your Docker containers. For that reason you should also make sure to +read the [`dockerd`](dockerd.md) reference page. + +### Docker management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [dockerd](dockerd.md) | Launch the Docker daemon | +| [info](info.md) | Display system-wide information | +| [inspect](inspect.md)| Return low-level information on a container or image | +| [version](version.md) | Show the Docker version information | + + +### Image commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [build](build.md) | Build an image from a Dockerfile | +| [commit](commit.md) | Create a new image from a container's changes | +| [history](history.md) | Show the history of an image | +| [images](images.md) | List images | +| [import](import.md) | Import the contents from a tarball to create a filesystem image | +| [load](load.md) | Load an image from a tar archive or STDIN | +| [rmi](rmi.md) | Remove one or more images | +| [save](save.md) | Save images to a tar archive | +| [tag](tag.md) | Tag an image into a repository | + +### Container commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [attach](attach.md) | Attach to a running container | +| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT | +| [create](create.md) | Create a new container | +| [diff](diff.md) | Inspect changes on a container's filesystem | +| [events](events.md) | Get real time events from the server | +| [exec](exec.md) | Run a command in a running container | +| [export](export.md) | Export a container's filesystem as a tar archive | +| [kill](kill.md) | Kill a running container | +| [logs](logs.md) | Fetch the logs of a container | +| [pause](pause.md) | Pause all processes within a container | +| [port](port.md) | List port mappings or a specific mapping for the container | +| [ps](ps.md) | List containers | +| [rename](rename.md) | Rename a container | +| [restart](restart.md) | Restart a running container | +| [rm](rm.md) | Remove one or more containers | +| [run](run.md) | Run a command in a new container | +| [start](start.md) | Start one or more stopped containers | +| [stats](stats.md) | Display a live stream of container(s) resource usage statistics | +| [stop](stop.md) | Stop a running container | +| [top](top.md) | Display the running processes of a container | +| [unpause](unpause.md) | Unpause all processes within a container | +| [update](update.md) | Update configuration of one or more containers | +| [wait](wait.md) | Block until a container stops, then print its exit code | + +### Hub and registry commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [login](login.md) | Register or log in to a Docker registry | +| [logout](logout.md) | Log out from a Docker registry | +| [pull](pull.md) | Pull an image or a repository from a Docker registry | +| [push](push.md) | Push an image or a repository to a Docker registry | +| [search](search.md) | Search the Docker Hub for images | + +### Network and connectivity commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [network connect](network_connect.md) | Connect a container to a network | +| [network create](network_create.md) | Create a new network | +| [network disconnect](network_disconnect.md) | Disconnect a container from a network | +| [network inspect](network_inspect.md) | Display information about a network | +| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about | +| [network rm](network_rm.md) | Removes one or more networks | + + +### Shared data volume commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data | +| [volume inspect](volume_inspect.md) | Display information about a volume | +| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about | +| [volume prune](volume_prune.md) | Remove all unused volumes | +| [volume rm](volume_rm.md) | Remove one or more volumes | + + +### Swarm node commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | +| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | +| [node inspect](node_inspect.md) | Inspect a node in the swarm | +| [node update](node_update.md) | Update attributes for a node | +| [node ps](node_ps.md) | List tasks running on one or more nodes | +| [node ls](node_ls.md) | List nodes in the swarm | +| [node rm](node_rm.md) | Remove one or more nodes from the swarm | + +### Swarm swarm commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [swarm init](swarm_init.md) | Initialize a swarm | +| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | +| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | +| [swarm update](swarm_update.md) | Update attributes of a swarm | +| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | + +### Swarm service commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [service create](service_create.md) | Create a new service | +| [service inspect](service_inspect.md) | Inspect a service | +| [service ls](service_ls.md) | List services in the swarm | +| [service rm](service_rm.md) | Remove a service from the swarm | +| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service | +| [service ps](service_ps.md) | List the tasks of a service | +| [service update](service_update.md) | Update the attributes of a service | + +### Swarm secret commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [secret create](secret_create.md) | Create a secret from a file or STDIN as content | +| [secret inspect](service_inspect.md) | Inspect the specified secret | +| [secret ls](secret_ls.md) | List secrets in the swarm | +| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm | + +### Swarm stack commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack | +| [stack ls](stack_ls.md) | List stacks in the swarm | +| [stack ps](stack_ps.md) | List the tasks in the stack | +| [stack rm](stack_rm.md) | Remove the stack from the swarm | +| [stack services](stack_services.md) | List the services in the stack | + +### Plugin commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration | +| [plugin disable](plugin_disable.md) | Disable a plugin | +| [plugin enbale](plugin_enable.md) | Enable a plugin | +| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin | +| [plugin install](plugin_install.md) | Install a plugin | +| [plugin ls](plugin_ls.md) | List plugins | +| [plugin push](plugin_push.md) | Push a plugin to a registry | +| [plugin rm](plugin_rm.md) | Remove a plugin | +| [plugin set](plugin_set.md) | Change settings for a plugin | diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/info.md b/vendor/github.com/docker/docker/docs/reference/commandline/info.md new file mode 100644 index 0000000..50a084f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/info.md @@ -0,0 +1,224 @@ +--- +title: "info" +description: "The info command description and usage" +keywords: "display, docker, information" +--- + + + +# info + +```markdown +Usage: docker info [OPTIONS] + +Display system-wide information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# Examples + +## Display Docker system information + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver and a node that is part of a 2-node swarm: + + $ docker -D info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 1.13.0 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 + runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 + init version: N/A (expected: v0.13.0) + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-31-generic + Operating System: Ubuntu 16.04.1 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.937 GiB + Name: ubuntu + ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 + Docker Root Dir: /var/lib/docker + Debug Mode (client): true + Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 + Http Proxy: http://test:test@proxy.example.com:8080 + Https Proxy: https://test:test@proxy.example.com:8080 + No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com + Registry: https://index.docker.io/v1/ + WARNING: No swap limit support + Labels: + storage=ssd + staging=true + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false + +The global `-D` option tells all `docker` commands to output debug information. + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the devicemapper storage driver. As can be seen in the output, additional +information about the devicemapper storage driver is shown: + + $ docker info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 1.10.3 + Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) + Execution Driver: native-0.2 + Logging Driver: json-file + Plugins: + Volume: local + Network: null host bridge + Kernel Version: 3.10.0-327.el7.x86_64 + Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) + OSType: linux + Architecture: x86_64 + CPUs: 1 + Total Memory: 991.7 MiB + Name: ip-172-30-0-91.ec2.internal + ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S + Docker Root Dir: /var/lib/docker + Debug mode (client): false + Debug mode (server): false + Username: gordontheturtle + Registry: https://index.docker.io/v1/ + Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 + +You can also specify the output format: + + $ docker info --format '{{json .}}' + {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} + +Here is a sample output for a daemon running on Windows Server 2016: + + E:\docker>docker info + Containers: 1 + Running: 0 + Paused: 0 + Stopped: 1 + Images: 17 + Server Version: 1.13.0 + Storage Driver: windowsfilter + Windows: + Logging Driver: json-file + Plugins: + Volume: local + Network: nat null overlay + Swarm: inactive + Default Isolation: process + Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937) + Operating System: Windows Server 2016 Datacenter + OSType: windows + Architecture: x86_64 + CPUs: 8 + Total Memory: 3.999 GiB + Name: WIN-V0V70C0LU5P + ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62 + Docker Root Dir: C:\control + Debug Mode (client): false + Debug Mode (server): false + Registry: https://index.docker.io/v1/ + Insecure Registries: + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md new file mode 100644 index 0000000..7a0c3a0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md @@ -0,0 +1,102 @@ +--- +title: "inspect" +description: "The inspect command description and usage" +keywords: "inspect, container, json" +--- + + + +# inspect + +```markdown +Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] + +Return low-level information on Docker object(s) (e.g. container, image, volume, +network, node, service, or task) identified by name or ID + +Options: + -f, --format Format the output using the given Go template + --help Print usage + -s, --size Display total file sizes if the type is container + --type Return JSON for specified type +``` + +By default, this will render all results in a JSON array. If the container and +image have the same name, this will return container JSON for unspecified type. +If a format is specified, the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +**Get an instance's IP address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + {% raw %} + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID + {% endraw %} + +**Get an instance's MAC address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + {% raw %} + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID + {% endraw %} + +**Get an instance's log path:** + + {% raw %} + $ docker inspect --format='{{.LogPath}}' $INSTANCE_ID + {% endraw %} + +**Get a Task's image name:** + + {% raw %} + $ docker inspect --format='{{.Container.Spec.Image}}' $INSTANCE_ID + {% endraw %} + +**List all port bindings:** + +One can loop over arrays and maps in the results to produce simple text +output: + + {% raw %} + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID + {% endraw %} + +**Find a specific port mapping:** + +The `.Field` syntax doesn't work when the field name begins with a +number, but the template language's `index` function does. The +`.NetworkSettings.Ports` section contains a map of the internal port +mappings to a list of external address/port objects. To grab just the +numeric public port, you use `index` to find the specific port map, and +then `index` 0 contains the first object inside of that. Then we ask for +the `HostPort` field to get the public address. + + {% raw %} + $ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID + {% endraw %} + +**Get a subsection in JSON format:** + +If you request a field which is itself a structure containing other +fields, by default you get a Go-style dump of the inner values. +Docker adds a template function, `json`, which can be applied to get +results in JSON format. + + {% raw %} + $ docker inspect --format='{{json .Config}}' $INSTANCE_ID + {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/kill.md b/vendor/github.com/docker/docker/docs/reference/commandline/kill.md new file mode 100644 index 0000000..32fde3d --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/kill.md @@ -0,0 +1,34 @@ +--- +title: "kill" +description: "The kill command description and usage" +keywords: "container, kill, signal" +--- + + + +# kill + +```markdown +Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] + +Kill one or more running containers + +Options: + --help Print usage + -s, --signal string Signal to send to the container (default "KILL") +``` + +The main process inside the container will be sent `SIGKILL`, or any +signal specified with option `--signal`. + +> **Note:** +> `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of `/bin/sh -c`, +> which does not pass signals. This means that the executable is not the container’s PID 1 +> and does not receive Unix signals. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/load.md b/vendor/github.com/docker/docker/docs/reference/commandline/load.md new file mode 100644 index 0000000..04a5bc7 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/load.md @@ -0,0 +1,53 @@ +--- +title: "load" +description: "The load command description and usage" +keywords: "stdin, tarred, repository" +--- + + + +# load + +```markdown +Usage: docker load [OPTIONS] + +Load an image from a tar archive or STDIN + +Options: + --help Print usage + -i, --input string Read from tar archive file, instead of STDIN. + The tarball may be compressed with gzip, bzip, or xz + -q, --quiet Suppress the load output but still outputs the imported images +``` + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + $ docker load < busybox.tar.gz + # […] + Loaded image: busybox:latest + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ docker load --input fedora.tar + # […] + Loaded image: fedora:rawhide + # […] + Loaded image: fedora:20 + # […] + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/login.md b/vendor/github.com/docker/docker/docs/reference/commandline/login.md new file mode 100644 index 0000000..a0f35fd --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/login.md @@ -0,0 +1,122 @@ +--- +title: "login" +description: "The login command description and usage" +keywords: "registry, login, image" +--- + + + +# login + +```markdown +Usage: docker login [OPTIONS] [SERVER] + +Log in to a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage + -p, --password string Password + -u, --username string Username +``` + +If you want to login to a self-hosted registry you can specify this by +adding the server name. + + example: + $ docker login localhost:8080 + + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +## Credentials store + +The Docker Engine can keep user credentials in an external credentials store, +such as the native keychain of the operating system. Using an external store +is more secure than storing credentials in the Docker configuration file. + +To use a credentials store, you need an external helper program to interact +with a specific keychain or external store. Docker requires the helper +program to be in the client's host `$PATH`. + +This is the list of currently available credentials helpers and where +you can download them from: + +- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases +- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases +- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases + +### Usage + +You need to specify the credentials store in `$HOME/.docker/config.json` +to tell the docker engine to use it: + +```json +{ + "credsStore": "osxkeychain" +} +``` + +If you are currently logged in, run `docker logout` to remove +the credentials from the file and run `docker login` again. + +### Protocol + +Credential helpers can be any program or script that follows a very simple protocol. +This protocol is heavily inspired by Git, but it differs in the information shared. + +The helpers always use the first argument in the command to identify the action. +There are only three possible values for that argument: `store`, `get`, and `erase`. + +The `store` command takes a JSON payload from the standard input. That payload carries +the server address, to identify the credential, the user name, and either a password +or an identity token. + +```json +{ + "ServerURL": "https://index.docker.io/v1", + "Username": "david", + "Secret": "passw0rd1" +} +``` + +If the secret being stored is an identity token, the Username should be set to +``. + +The `store` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +The `get` command takes a string payload from the standard input. That payload carries +the server address that the docker engine needs credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name +and password from this payload: + +```json +{ + "Username": "david", + "Secret": "passw0rd1" +} +``` + +The `erase` command takes a string payload from `STDIN`. That payload carries +the server address that the docker engine wants to remove credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `erase` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/logout.md b/vendor/github.com/docker/docker/docs/reference/commandline/logout.md new file mode 100644 index 0000000..1635e22 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/logout.md @@ -0,0 +1,30 @@ +--- +title: "logout" +description: "The logout command description and usage" +keywords: "logout, docker, registry" +--- + + + +# logout + +```markdown +Usage: docker logout [SERVER] + +Log out from a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage +``` + +For example: + + $ docker logout localhost:8080 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/logs.md b/vendor/github.com/docker/docker/docs/reference/commandline/logs.md new file mode 100644 index 0000000..891e10b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/logs.md @@ -0,0 +1,66 @@ +--- +title: "logs" +description: "The logs command description and usage" +keywords: "logs, retrieve, docker" +--- + + + +# logs + +```markdown +Usage: docker logs [OPTIONS] CONTAINER + +Fetch the logs of a container + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +The `docker logs` command batch-retrieves logs present at the time of execution. + +> **Note**: this command is only functional for containers that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker logs --follow` command will continue streaming the new output from +the container's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +The `--since` option shows only the container logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/menu.md b/vendor/github.com/docker/docker/docs/reference/commandline/menu.md new file mode 100644 index 0000000..d58afac --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/menu.md @@ -0,0 +1,28 @@ +--- +title: "Command line reference" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +identifier: "smn_cli" +--- + + + +# The Docker commands + +This section contains reference information on using Docker's command line +client. Each command has a reference page along with samples. If you are +unfamiliar with the command line, you should start by reading about how to +[Use the Docker command line](cli.md). + +You start the Docker daemon with the command line. How you start the daemon +affects your Docker containers. For that reason you should also make sure to +read the [`dockerd`](dockerd.md) reference page. + +For a list of Docker commands see [Command line reference guide](index.md). diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md new file mode 100644 index 0000000..52459a5 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md @@ -0,0 +1,100 @@ +--- +title: "network connect" +description: "The network connect command description and usage" +keywords: "network, connect, user-defined" +--- + + + +# network connect + +```markdown +Usage: docker network connect [OPTIONS] NETWORK CONTAINER + +Connect a container to a network + +Options: + --alias value Add network-scoped alias for the container (default []) + --help Print usage + --ip string IP Address + --ip6 string IPv6 Address + --link value Add link to another container (default []) + --link-local-ip value Add a link-local address for the container (default []) +``` + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +```bash +$ docker network connect multi-host-network container1 +``` + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network busybox +``` + +You can specify the IP address you want to be assigned to the container's interface. + +```bash +$ docker network connect --ip 10.10.36.122 multi-host-network container2 +``` + +You can use `--link` option to link another container with a preferred alias + +```bash +$ docker network connect --link container1:c1 multi-host-network container2 +``` + +`--alias` option can be used to resolve the container by another name in the network +being connected to. + +```bash +$ docker network connect --alias db --alias mysql multi-host-network container2 +``` +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + +## Related information + +* [network inspect](network_inspect.md) +* [network create](network_create.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md new file mode 100644 index 0000000..e238217 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md @@ -0,0 +1,202 @@ +--- +title: "network create" +description: "The network create command description and usage" +keywords: "network, create" +--- + + + +# network create + +```markdown +Usage: docker network create [OPTIONS] NETWORK + +Create a network + +Options: + --attachable Enable manual container attachment + --aux-address value Auxiliary IPv4 or IPv6 addresses used by Network + driver (default map[]) + -d, --driver string Driver to manage the Network (default "bridge") + --gateway value IPv4 or IPv6 Gateway for the master subnet (default []) + --help Print usage + --internal Restrict external access to the network + --ip-range value Allocate container ip from a sub-range (default []) + --ipam-driver string IP Address Management Driver (default "default") + --ipam-opt value Set IPAM driver specific options (default map[]) + --ipv6 Enable IPv6 networking + --label value Set metadata on a network (default []) + -o, --opt value Set driver specific options (default map[]) + --subnet value Subnet in CIDR format that represents a + network segment (default []) +``` + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When you launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network, but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay). + +While not required, it is a good idea to install Docker Swarm to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management tools that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Connect containers + +When you start a container, use the `--network` flag to connect it to a network. +This example adds the `busybox` container to the `mynet` network: + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running, use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +## Specifying advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing +network. It is purely for ip-addressing purposes. You can override this default +and specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +# Bridge driver options + +When creating a custom network, the default network driver (i.e. `bridge`) has +additional options that can be passed. The following are those options and the +equivalent docker daemon flags used for docker0 bridge: + +| Option | Equivalent | Description | +|--------------------------------------------------|-------------|-------------------------------------------------------| +| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | +| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | +| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | +| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | +| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | + +The following arguments can be passed to `docker network create` for any +network driver, again with their approximate equivalents to `docker daemon`. + +| Argument | Equivalent | Description | +|--------------|----------------|--------------------------------------------| +| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | +| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | +| `--internal` | - | Restrict external access to the network | +| `--ipv6` | `--ipv6` | Enable IPv6 networking | +| `--subnet` | `--bip` | Subnet for network | + +For example, let's use `-o` or `--opt` options to specify an IP address binding +when publishing ports: + +```bash +$ docker network create \ + -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \ + simple-network +``` + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +## Related information + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md new file mode 100644 index 0000000..42e976a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md @@ -0,0 +1,43 @@ +--- +title: "network disconnect" +description: "The network disconnect command description and usage" +keywords: "network, disconnect, user-defined" +--- + + + +# network disconnect + +```markdown +Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER + +Disconnect a container from a network + +Options: + -f, --force Force the container to disconnect from a network + --help Print usage +``` + +Disconnects a container from a network. The container must be running to disconnect it from the network. + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +## Related information + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md new file mode 100644 index 0000000..bc0005e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md @@ -0,0 +1,192 @@ +--- +title: "network inspect" +description: "The network inspect command description and usage" +keywords: "network, inspect, user-defined" +--- + + + +# network inspect + +```markdown +Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...] + +Display detailed information on one or more networks + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. For networks backed by multi-host network driver, such as Overlay, +this command also shows the container endpoints in other hosts in the +cluster. These endpoints are represented as "ep-{endpoint-id}" in the output. +However, for swarm-scoped networks, only the endpoints that are local to the +node are shown. + +You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +```bash +$ sudo docker network inspect bridge +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": {} + } +] +``` + +Returns the information about the user-defined network: + +```bash +$ docker network create simple-network +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +$ docker network inspect simple-network +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {}, + "Labels": {} + } +] +``` + +For swarm mode overlay networks `network inspect` also shows the IP address and node name +of the peers. Peers are the nodes in the swarm cluster which have at least one task attached +to the network. Node name is of the format `-`. + +```bash +$ docker network inspect ingress +[ + { + "Name": "ingress", + "Id": "j0izitrut30h975vk4m1u5kk3", + "Created": "2016-11-08T06:49:59.803387552Z", + "Scope": "swarm", + "Driver": "overlay", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + }, + "Internal": false, + "Attachable": false, + "Containers": { + "ingress-sbox": { + "Name": "ingress-endpoint", + "EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115", + "MacAddress": "02:42:0a:ff:00:03", + "IPv4Address": "10.255.0.3/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + }, + "Labels": {}, + "Peers": [ + { + "Name": "net-1-1d22adfe4d5c", + "IP": "192.168.33.11" + }, + { + "Name": "net-2-d55d838b34af", + "IP": "192.168.33.12" + }, + { + "Name": "net-3-8473f8140bd9", + "IP": "192.168.33.13" + } + ] + } +] +``` + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md new file mode 100644 index 0000000..a4f671d --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md @@ -0,0 +1,218 @@ +--- +title: "network ls" +description: "The network ls command description and usage" +keywords: "network, list, user-defined" +--- + + + +# docker network ls + +```markdown +Usage: docker network ls [OPTIONS] + +List networks + +Aliases: + ls, list + +Options: + -f, --filter filter Provide filter values (e.g. 'driver=bridge') + --format string Pretty-print networks using a Go template + --help Print usage + --no-trunc Do not truncate the output + -q, --quiet Only display network IDs +``` + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster, for example: + +```bash +$ sudo docker network ls +NETWORK ID NAME DRIVER SCOPE +7fca4eb8c647 bridge bridge local +9f904ee27bf5 none null local +cf03ee007fb4 host host local +78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER SCOPE +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* type (custom|builtin) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER SCOPE +63d1ff1f77b0 dev bridge local +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER SCOPE +f6e212da9dfd test2 bridge local +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER SCOPE +06e7eef0a170 foobar bridge local +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +06e7eef0a170 foobar bridge local +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +63d1ff1f77b0 dev bridge local +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +## Formatting + +The formatting options (`--format`) pretty-prints networks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------|------------------------------------------------------------------------------------------ +`.ID` | Network ID +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.IPv6` | Whether IPv6 is enabled on the network or not. +`.Internal` | Whether the network is internal or not. +`.Labels` | All labels assigned to the network. +`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `network ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Driver` entries separated by a colon for all networks: + +```bash +$ docker network ls --format "{{.ID}}: {{.Driver}}" +afaaab448eb2: bridge +d1584f8dc718: host +391df270dc66: null +``` + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md new file mode 100644 index 0000000..5b65465 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md @@ -0,0 +1,45 @@ +--- +title: "network prune" +description: "Remove unused networks" +keywords: "network, prune, delete" +--- + +# network prune + +```markdown +Usage: docker network prune [OPTIONS] + +Remove all unused networks + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all unused networks. Unused networks are those which are not referenced by any containers. + +Example output: + +```bash +$ docker network prune +WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Networks: +n1 +n2 +``` + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [volume prune](volume_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md new file mode 100644 index 0000000..f06b4c0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md @@ -0,0 +1,59 @@ +--- +title: "network rm" +description: "the network rm command description and usage" +keywords: "network, rm, user-defined" +--- + + + +# network rm + +```markdown +Usage: docker network rm NETWORK [NETWORK...] + +Remove one or more networks + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md new file mode 100644 index 0000000..9a81bb9 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md @@ -0,0 +1,42 @@ +--- +title: "node demote" +description: "The node demote command description and usage" +keywords: "node, demote" +--- + + + +# node demote + +```markdown +Usage: docker node demote NODE [NODE...] + +Demote one or more nodes from manager in the swarm + +Options: + --help Print usage + +``` + +Demotes an existing manager so that it is no longer a manager. This command targets a docker engine that is a manager in the swarm. + + +```bash +$ docker node demote +``` + +## Related information + +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md new file mode 100644 index 0000000..fac688f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md @@ -0,0 +1,137 @@ +--- +title: "node inspect" +description: "The node inspect command description and usage" +keywords: "node, inspect" +--- + + + +# node inspect + +```markdown +Usage: docker node inspect [OPTIONS] self|NODE [NODE...] + +Display detailed information on one or more nodes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format. +``` + +Returns information about a node. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +Example output: + + $ docker node inspect swarm-manager + [ + { + "ID": "e216jshn25ckzbvmwlnh5jr3g", + "Version": { + "Index": 10 + }, + "CreatedAt": "2016-06-16T22:52:44.9910662Z", + "UpdatedAt": "2016-06-16T22:52:45.230878043Z", + "Spec": { + "Role": "manager", + "Availability": "active" + }, + "Description": { + "Hostname": "swarm-manager", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 1000000000, + "MemoryBytes": 1039843328 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "overlay" + }, + { + "Type": "Network", + "Name": "null" + }, + { + "Type": "Network", + "Name": "host" + }, + { + "Type": "Network", + "Name": "bridge" + }, + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready", + "Addr": "168.0.32.137" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "168.0.32.137:2377" + } + } + ] + + {% raw %} + $ docker node inspect --format '{{ .ManagerStatus.Leader }}' self + false + {% endraw %} + + $ docker node inspect --pretty self + ID: e216jshn25ckzbvmwlnh5jr3g + Hostname: swarm-manager + Joined at: 2016-06-16 22:52:44.9910662 +0000 utc + Status: + State: Ready + Availability: Active + Address: 172.17.0.2 + Manager Status: + Address: 172.17.0.2:2377 + Raft Status: Reachable + Leader: Yes + Platform: + Operating System: linux + Architecture: x86_64 + Resources: + CPUs: 4 + Memory: 7.704 GiB + Plugins: + Network: overlay, bridge, null, host, overlay + Volume: local + Engine Version: 1.12.0 + +## Related information + +* [node demote](node_demote.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md new file mode 100644 index 0000000..5f61713 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md @@ -0,0 +1,130 @@ +--- +title: "node ls" +description: "The node ls command description and usage" +keywords: "node, list" +--- + + + +# node ls + +```markdown +Usage: docker node ls [OPTIONS] + +List nodes in the swarm + +Aliases: + ls, list + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +Lists all the nodes that the Docker Swarm manager knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +Example output: + +```bash +$ docker node ls + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](node_ls.md#id) +* [label](node_ls.md#label) +* [membership](node_ls.md#membership) +* [name](node_ls.md#name) +* [role](node_ls.md#role) + +#### ID + +The `id` filter matches all or part of a node's id. + +```bash +$ docker node ls -f id=1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### Label + +The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering. + +The following filter matches nodes with the `foo` label regardless of its value. + +```bash +$ docker node ls -f "label=foo" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### Membership + +The `membership` filter matches nodes based on the presence of a `membership` and a value +`accepted` or `pending`. + +The following filter matches nodes with the `membership` of `accepted`. + +```bash +$ docker node ls -f "membership=accepted" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +``` + +#### Name + +The `name` filter matches on all or part of a node hostname. + +The following filter matches the nodes with a name equal to `swarm-master` string. + +```bash +$ docker node ls -f name=swarm-manager1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +#### Role + +The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`. + +The following filter matches nodes with the `manager` role. + +```bash +$ docker node ls -f "role=manager" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md new file mode 100644 index 0000000..92092a8 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md @@ -0,0 +1,41 @@ +--- +title: "node promote" +description: "The node promote command description and usage" +keywords: "node, promote" +--- + + + +# node promote + +```markdown +Usage: docker node promote NODE [NODE...] + +Promote one or more nodes to manager in the swarm + +Options: + --help Print usage +``` + +Promotes a node to manager. This command targets a docker engine that is a manager in the swarm. + + +```bash +$ docker node promote +``` + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md new file mode 100644 index 0000000..7f07c5e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md @@ -0,0 +1,107 @@ +--- +title: "node ps" +description: "The node ps command description and usage" +keywords: node, tasks, ps +aliases: ["/engine/reference/commandline/node_tasks/"] +--- + + + +# node ps + +```markdown +Usage: docker node ps [OPTIONS] [NODE...] + +List tasks running on one or more nodes, defaults to current node. + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output +``` + +Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +Example output: + + $ docker node ps swarm-manager1 + NAME IMAGE NODE DESIRED STATE CURRENT STATE + redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours + redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds + redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds + + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [name](#name) +* [id](#id) +* [label](#label) +* [desired-state](#desired-state) + +#### name + +The `name` filter matches on all or part of a task's name. + +The following filter matches all tasks with a name containing the `redis` string. + + $ docker node ps -f name=redis swarm-manager1 + NAME IMAGE NODE DESIRED STATE CURRENT STATE + redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours + redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds + redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds + + +#### id + +The `id` filter matches a task's id. + + $ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1 + NAME IMAGE NODE DESIRED STATE CURRENT STATE + redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds + + +#### label + +The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches tasks with the `usage` label regardless of its value. + +```bash +$ docker node ps -f "label=usage" +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. + + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md new file mode 100644 index 0000000..b245d63 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md @@ -0,0 +1,73 @@ +--- +title: "node rm" +description: "The node rm command description and usage" +keywords: "node, remove" +--- + + + +# node rm + +```markdown +Usage: docker node rm [OPTIONS] NODE [NODE...] + +Remove one or more nodes from the swarm + +Aliases: + rm, remove + +Options: + -f, --force Force remove a node from the swarm + --help Print usage +``` + +When run from a manager node, removes the specified nodes from a swarm. + + +Example output: + +```nohighlight +$ docker node rm swarm-node-02 + +Node swarm-node-02 removed from swarm +``` + +Removes the specified nodes from the swarm, but only if the nodes are in the +down state. If you attempt to remove an active node you will receive an error: + +```nohighlight +$ docker node rm swarm-node-03 + +Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not +down and can't be removed +``` + +If you lose access to a worker node or need to shut it down because it has been +compromised or is not behaving as expected, you can use the `--force` option. +This may cause transient errors or interruptions, depending on the type of task +being run on the node. + +```nohighlight +$ docker node rm --force swarm-node-03 + +Node swarm-node-03 removed from swarm +``` + +A manager node must be demoted to a worker node (using `docker node demote`) +before you can remove it from the swarm. + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md new file mode 100644 index 0000000..aa65d03 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md @@ -0,0 +1,71 @@ +--- +title: "node update" +description: "The node update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +## update + +```markdown +Usage: docker node update [OPTIONS] NODE + +Update a node + +Options: + --availability string Availability of the node (active/pause/drain) + --help Print usage + --label-add value Add or update a node label (key=value) (default []) + --label-rm value Remove a node label if exists (default []) + --role string Role of the node (worker/manager) +``` + +### Add label metadata to a node + +Add metadata to a swarm node using node labels. You can specify a node label as +a key with an empty value: + +``` bash +$ docker node update --label-add foo worker1 +``` + +To add multiple labels to a node, pass the `--label-add` flag for each label: + +``` bash +$ docker node update --label-add foo --label-add bar worker1 +``` + +When you [create a service](service_create.md), +you can use node labels as a constraint. A constraint limits the nodes where the +scheduler deploys tasks for a service. + +For example, to add a `type` label to identify nodes where the scheduler should +deploy message queue service tasks: + +``` bash +$ docker node update --label-add type=queue worker1 +``` + +The labels you set for nodes using `docker node update` apply only to the node +entity within the swarm. Do not confuse them with the docker daemon labels for +[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels). + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/pause.md b/vendor/github.com/docker/docker/docs/reference/commandline/pause.md new file mode 100644 index 0000000..e2dd800 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/pause.md @@ -0,0 +1,40 @@ +--- +title: "pause" +description: "The pause command description and usage" +keywords: "cgroups, container, suspend, SIGSTOP" +--- + + + +# pause + +```markdown +Usage: docker pause CONTAINER [CONTAINER...] + +Pause all processes within one or more containers + +Options: + --help Print usage +``` + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Related information + +* [unpause](unpause.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md new file mode 100644 index 0000000..9d4e99e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md @@ -0,0 +1,60 @@ +--- +title: "plugin create" +description: "the plugin create command description and usage" +keywords: "plugin, create" +--- + + + +# plugin create + +```markdown +Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR + +Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + +Options: + --compress Compress the context using gzip + --help Print usage +``` + +Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as +[the config.json](../../extend/config.md) + + +The following example shows how to create a sample `plugin`. + +```bash + +$ ls -ls /home/pluginDir + +4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json +0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs + +$ docker plugin create plugin /home/pluginDir +plugin + +NAME TAG DESCRIPTION ENABLED +plugin latest A sample plugin for Docker true +``` + +The plugin can subsequently be enabled for local use or pushed to the public registry. + +## Related information + +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md new file mode 100644 index 0000000..451f1ac --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md @@ -0,0 +1,66 @@ +--- +title: "plugin disable" +description: "the plugin disable command description and usage" +keywords: "plugin, disable" +--- + + + +# plugin disable + +```markdown +Usage: docker plugin disable [OPTIONS] PLUGIN + +Disable a plugin + +Options: + -f, --force Force the disable of an active plugin + --help Print usage +``` + +Disables a plugin. The plugin must be installed before it can be disabled, +see [`docker plugin install`](plugin_install.md). Without the `-f` option, +a plugin that has references (eg, volumes, networks) cannot be disabled. + + +The following example shows that the `sample-volume-plugin` plugin is installed +and enabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +To disable the plugin, use the following command: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md new file mode 100644 index 0000000..df8bee3 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md @@ -0,0 +1,65 @@ +--- +title: "plugin enable" +description: "the plugin enable command description and usage" +keywords: "plugin, enable" +--- + + + +# plugin enable + +```markdown +Usage: docker plugin enable [OPTIONS] PLUGIN + +Enable a plugin + +Options: + --help Print usage + --timeout int HTTP client timeout (in seconds) +``` + +Enables a plugin. The plugin must be installed before it can be enabled, +see [`docker plugin install`](plugin_install.md). + + +The following example shows that the `sample-volume-plugin` plugin is installed, +but disabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +To enable the plugin, use the following command: + +```bash +$ docker plugin enable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md new file mode 100644 index 0000000..fdcc030 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md @@ -0,0 +1,164 @@ +--- +title: "plugin inspect" +description: "The plugin inspect command description and usage" +keywords: "plugin, inspect" +--- + + + +# plugin inspect + +```markdown +Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...] + +Display detailed information on one or more plugins + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +Returns information about a plugin. By default, this command renders all results +in a JSON array. + +Example output: + +```bash +$ docker plugin inspect tiborvass/sample-volume-plugin:latest +``` +```JSON +{ + "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", + "Name": "tiborvass/sample-volume-plugin:latest", + "PluginReference": "tiborvas/sample-volume-plugin:latest", + "Enabled": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-sample-volume-plugin", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` +(output formatted for readability) + + +```bash +$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest +``` +``` +8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21 +``` + + +## Related information + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin disable](plugin_disable.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md new file mode 100644 index 0000000..0601193 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md @@ -0,0 +1,71 @@ +--- +title: "plugin install" +description: "the plugin install command description and usage" +keywords: "plugin, install" +--- + + + +# plugin install + +```markdown +Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] + +Install a plugin + +Options: + --alias string Local name for plugin + --disable Do not enable the plugin on install + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage +``` + +Installs and enables a plugin. Docker looks first for the plugin on your Docker +host. If the plugin does not exist locally, then the plugin is pulled from +the registry. Note that the minimum required registry version to distribute +plugins is 2.3.0 + + +The following example installs `vieus/sshfs` plugin and [set](plugin_set.md) it's env variable +`DEBUG` to 1. Install consists of pulling the plugin from Docker Hub, prompting +the user to accept the list of privileges that the plugin needs, settings parameters + and enabling the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs +``` + +After the plugin is installed, it appears in the list of plugins: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md new file mode 100644 index 0000000..7a3426d --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md @@ -0,0 +1,53 @@ +--- +title: "plugin ls" +description: "The plugin ls command description and usage" +keywords: "plugin, list" +--- + + + +# plugin ls + +```markdown +Usage: docker plugin ls [OPTIONS] + +List plugins + +Aliases: + ls, list + +Options: + --help Print usage + --no-trunc Don't truncate output +``` + +Lists all the plugins that are currently installed. You can install plugins +using the [`docker plugin install`](plugin_install.md) command. + +Example output: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md new file mode 100644 index 0000000..e61d109 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md @@ -0,0 +1,50 @@ +--- +title: "plugin push" +description: "the plugin push command description and usage" +keywords: "plugin, push" +--- + + + +```markdown +Usage: docker plugin push [OPTIONS] PLUGIN[:TAG] + +Push a plugin to a registry + +Options: + --help Print usage +``` + +Use `docker plugin create` to create the plugin. Once the plugin is ready for distribution, +use `docker plugin push` to share your images to the Docker Hub registry or to a self-hosted one. + +Registry credentials are managed by [docker login](login.md). + +The following example shows how to push a sample `user/plugin`. + +```bash + +$ docker plugin ls +ID NAME TAG DESCRIPTION ENABLED +69553ca1d456 user/plugin latest A sample plugin for Docker false +$ docker plugin push user/plugin +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md new file mode 100644 index 0000000..323ce83 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md @@ -0,0 +1,56 @@ +--- +title: "plugin rm" +description: "the plugin rm command description and usage" +keywords: "plugin, rm" +--- + + + +# plugin rm + +```markdown +Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] + +Remove one or more plugins + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of an active plugin + --help Print usage +``` + +Removes a plugin. You cannot remove a plugin if it is enabled, you must disable +a plugin using the [`docker plugin disable`](plugin_disable.md) before removing +it (or use --force, use of force is not recommended, since it can affect +functioning of running containers using the plugin). + +The following example disables and removes the `sample-volume-plugin:latest` plugin; + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin +tiborvass/sample-volume-plugin + +$ docker plugin rm tiborvass/sample-volume-plugin:latest +tiborvass/sample-volume-plugin +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md new file mode 100644 index 0000000..c206a8a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md @@ -0,0 +1,99 @@ +--- +title: "plugin set" +description: "the plugin set command description and usage" +keywords: "plugin, set" +--- + + + +# plugin set + +```markdown +Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...] + +Change settings for a plugin + +Options: + --help Print usage +``` + +Change settings for a plugin. The plugin must be disabled. + +The settings currently supported are: + * env variables + * source of mounts + * path of devices + * args + +The following example change the env variable `DEBUG` on the +`sample-volume-plugin` plugin. + +```bash +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=0] + +$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1 + +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=1] +``` + +The following example change the source of the `mymount` mount on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/foo + +$ docker plugins set myplugin mymount.source=/bar + +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/bar +``` + +Note: since only `source` is settable in `mymount`, `docker plugins set mymount=/bar myplugin` would work too. + +The following example change the path of the `mydevice` device on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin +/dev/foo + +$ docker plugins set myplugin mydevice.path=/dev/bar + +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin +/dev/bar +``` + +Note: since only `path` is settable in `mydevice`, `docker plugins set mydevice=/dev/bar myplugin` would work too. + +The following example change the source of the args on the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin +["foo", "bar"] + +$ docker plugins set myplugin args="foo bar baz" + +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin +["foo", "bar", "baz"] +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md new file mode 100644 index 0000000..20efc57 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md @@ -0,0 +1,84 @@ +--- +title: "plugin upgrade" +description: "the plugin upgrade command description and usage" +keywords: "plugin, upgrade" +--- + + + +# plugin upgrade + +```markdown +Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE] + +Upgrade a plugin + +Options: + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage + --skip-remote-check Do not check if specified remote plugin matches existing plugin image +``` + +Upgrades an existing plugin to the specified remote plugin image. If no remote +is specified, Docker will re-pull the current image and use the updated version. +All existing references to the plugin will continue to work. +The plugin must be disabled before running the upgrade. + +The following example installs `vieus/sshfs` plugin, uses it to create and use +a volume, then upgrades the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs:next + +$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume +sshvolume +$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello" +$ docker plugin disable -f vieux/sshfs:next +viex/sshfs:next + +# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled +$ docker volume ls +DRIVER VOLUME NAME + +$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +Upgrade plugin vieux/sshfs:next to vieux/sshfs:next +$ docker plugin enable vieux/sshfs:next +viex/sshfs:next +$ docker volume ls +DRIVER VOLUME NAME +viuex/sshfs:next sshvolume +$ docker run -it -v sshvolume:/data alpine sh -c "ls /data" +hello +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/port.md b/vendor/github.com/docker/docker/docs/reference/commandline/port.md new file mode 100644 index 0000000..bc90b6e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/port.md @@ -0,0 +1,41 @@ +--- +title: "port" +description: "The port command description and usage" +keywords: "port, mapping, container" +--- + + + +# port + +```markdown +Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] + +List port mappings or a specific mapping for the container + +Options: + --help Print usage +``` + +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +just a specific mapping: + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + $ docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + $ docker port test 7890/tcp + 0.0.0.0:4321 + $ docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + $ docker port test 7890 + 0.0.0.0:4321 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/ps.md new file mode 100644 index 0000000..1d5f31d --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/ps.md @@ -0,0 +1,384 @@ +--- +title: "ps" +description: "The ps command description and usage" +keywords: "container, running, list" +--- + + + +# ps + +```markdown +Usage: docker ps [OPTIONS] + +List containers + +Options: + -a, --all Show all containers (default shows just running) + -f, --filter value Filter output based on conditions provided (default []) + - exited= an exit code of + - label= or label== + - status=(created|restarting|removing|running|paused|exited) + - name= a container's name + - id= a container's ID + - before=(|) + - since=(|) + - ancestor=([:tag]||) + containers created from an image or a descendant. + - is-task=(true|false) + - health=(starting|healthy|unhealthy|none) + --format string Pretty-print containers using a Go template + --help Print usage + -n, --last int Show n last created containers (includes all states) (default -1) + -l, --latest Show the latest created container (includes all states) + --no-trunc Don't truncate output + -q, --quiet Only display numeric IDs + -s, --size Display total file sizes +``` + +Running `docker ps --no-trunc` showing 2 linked containers. + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp +d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db +``` + +The `docker ps` command only shows running containers by default. To see all +containers, use the `-a` (or `--all`) flag: + +```bash +$ docker ps -a +``` + +`docker ps` groups exposed ports into a single range if possible. E.g., a +container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in +the `PORTS` column. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* id (container's id) +* label (`label=` or `label==`) +* name (container's name) +* exited (int - the code of exited containers. Only useful with `--all`) +* status (created|restarting|running|removing|paused|exited|dead) +* ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. +* before (container's id or name) - filters containers created before given id or name +* since (container's id or name) - filters containers created since given id or name +* isolation (default|process|hyperv) (Windows daemon only) +* volume (volume name or mount point) - filters containers that mount volumes. +* network (network id or name) - filters containers connected to the provided network +* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status + +#### Label + +The `label` filter matches containers based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches containers with the `color` label regardless of its value. + +```bash +$ docker ps --filter "label=color" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley +d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani +``` + +The following filter matches containers with the `color` label with the `blue` value. + +```bash +$ docker ps --filter "label=color=blue" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani +``` + +#### Name + +The `name` filter matches on all or part of a container's name. + +The following filter matches all containers with a name containing the `nostalgic_stallman` string. + +```bash +$ docker ps --filter "name=nostalgic_stallman" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker ps --filter "name=nostalgic" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic +9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman +673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley +``` + +#### Exited + +The `exited` filter matches containers by exist status code. For example, to +filter for containers that have exited successfully: + +```bash +$ docker ps -a --filter 'exited=0' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey +106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani +48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds +``` + +#### Killed containers + +You can use a filter to locate containers that exited with status of `137` +meaning a `SIGKILL(9)` killed them. + +```bash +$ docker ps -a --filter 'exited=137' +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski +a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande +``` + +Any of these events result in a `137` status: + +* the `init` process of the container is killed manually +* `docker kill` kills the container +* Docker daemon restarts which kills all running containers + +#### Status + +The `status` filter matches containers by status. You can filter using +`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example, +to filter for `running` containers: + +```bash +$ docker ps --filter status=running + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic +d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top +9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman +``` + +To filter for `paused` containers: + +```bash +$ docker ps --filter status=paused + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley +``` + +#### Ancestor + +The `ancestor` filter matches containers based on its image or a descendant of +it. The filter supports the following image representation: + +- image +- image:tag +- image:tag@digest +- short-id +- full-id + +If you don't specify a `tag`, the `latest` tag is used. For example, to filter +for containers that use the latest `ubuntu` image: + +```bash +$ docker ps --filter ancestor=ubuntu + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet +82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose +bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath +``` + +Match containers based on the `ubuntu-c1` image which, in this case, is a child +of `ubuntu`: + +```bash +$ docker ps --filter ancestor=ubuntu-c1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +``` + +Match containers based on the `ubuntu` version `12.04.5` image: + +```bash +$ docker ps --filter ancestor=ubuntu:12.04.5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +The following matches containers based on the layer `d0e008c6cf02` or an image +that have this layer in its layer stack. + +```bash +$ docker ps --filter ancestor=d0e008c6cf02 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +#### Before + +The `before` filter shows only containers created before the container with +given id or name. For example, having these containers created: + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky +4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +Filtering with `before` would give: + +```bash +$ docker ps -f before=9c3527ed70ce + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +#### Since + +The `since` filter shows only containers created since the container with given +id or name. For example, with the same containers as in `before` filter: + +```bash +$ docker ps -f since=6e63f6ff38b0 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky +4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton +``` + +#### Volume + +The `volume` filter shows only containers that mount a specific volume or have +a volume mounted in a specific path: + +```bash{% raw %} +$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume + +$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume +{% endraw %}``` + +#### Network + +The `network` filter shows only containers that are connected to a network with +a given name or id. + +The following filter matches all containers that are connected to a network +with a name containing `net1`. + +```bash +$ docker run -d --net=net1 --name=test1 ubuntu top +$ docker run -d --net=net2 --name=test2 ubuntu top + +$ docker ps --filter network=net1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +The network filter matches on both the network's name and id. The following +example shows all containers that are attached to the `net1` network, using +the network id as a filter; + +```bash +{% raw %} +$ docker network inspect --format "{{.ID}}" net1 +{% endraw %} + +8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +## Formatting + +The formatting option (`--format`) pretty-prints container output using a Go +template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|---------------------------------------------------------------------------------------------------- +`.ID` | Container ID +`.Image` | Image ID +`.Command` | Quoted command +`.CreatedAt` | Time when the container was created. +`.RunningFor` | Elapsed time since the container was started. +`.Ports` | Exposed ports. +`.Status` | Container status. +`.Size` | Container disk size. +`.Names` | Container names. +`.Labels` | All labels assigned to the container. +`.Label` | Value of a specific label for this container. For example `'{% raw %}{{.Label "com.docker.swarm.cpu"}}{% endraw %}'` +`.Mounts` | Names of the volumes mounted in this container. +`.Networks` | Names of the networks attached to this container. + +When using the `--format` option, the `ps` command will either output the data +exactly as the template declares or, when using the `table` directive, includes +column headers as well. + +The following example uses a template without headers and outputs the `ID` and +`Command` entries separated by a colon for all running containers: + +```bash +{% raw %} +$ docker ps --format "{{.ID}}: {{.Command}}" +{% endraw %} + +a87ecb4f327c: /bin/sh -c #(nop) MA +01946d9d34d8: /bin/sh -c #(nop) MA +c1d3b0166030: /bin/sh -c yum -y up +41d50ecd2f57: /bin/sh -c #(nop) MA +``` + +To list all running containers with their labels in a table format you can use: + +```bash +{% raw %} +$ docker ps --format "table {{.ID}}\t{{.Labels}}" +{% endraw %} + +CONTAINER ID LABELS +a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd +01946d9d34d8 +c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 +41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/pull.md b/vendor/github.com/docker/docker/docs/reference/commandline/pull.md new file mode 100644 index 0000000..0c960b4 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/pull.md @@ -0,0 +1,252 @@ +--- +title: "pull" +description: "The pull command description and usage" +keywords: "pull, image, hub, docker" +--- + + + +# pull + +```markdown +Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] + +Pull an image or a repository from a registry + +Options: + -a, --all-tags Download all tagged images in the repository + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +Most of your images will be created on top of a base image from the +[Docker Hub](https://hub.docker.com) registry. + +[Docker Hub](https://hub.docker.com) contains many pre-built images that you +can `pull` and try without needing to define and configure your own. + +To download a particular image, or set of images (i.e., a repository), +use `docker pull`. + +## Proxy configuration + +If you are behind an HTTP proxy server, for example in corporate settings, +before open a connect to registry, you may need to configure the Docker +daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables. To set these environment variables on a host using +`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy) +for variables configuration. + +## Concurrent downloads + +By default the Docker daemon will pull three layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-downloads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + +```bash +$ docker pull debian + +Using default tag: latest +latest: Pulling from library/debian +fdd5d7827f33: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa +Status: Downloaded newer image for debian:latest +``` + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + +```bash +$ docker pull debian:jessie + +jessie: Pulling from library/debian +fdd5d7827f33: Already exists +a3ed95caeb02: Already exists +Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e +Status: Downloaded newer image for debian:jessie +``` + +To see which images are present locally, use the [`docker images`](images.md) +command: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +debian jessie f50f9524513f 5 days ago 125.1 MB +debian latest f50f9524513f 5 days ago 125.1 MB +``` + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). + + +## Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + +```bash +$ docker pull ubuntu:14.04 + +14.04: Pulling from library/ubuntu +5a132a7e7af1: Pull complete +fd2731e4c50c: Pull complete +28a2f68d1120: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu:14.04 +``` + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + +```bash +$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu +5a132a7e7af1: Already exists +fd2731e4c50c: Already exists +28a2f68d1120: Already exists +a3ed95caeb02: Already exists +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +``` + +Digest can also be used in the `FROM` of a Dockerfile, for example: + +```Dockerfile +FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +MAINTAINER some maintainer +``` + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + + +## Pulling from a different registry + +By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + +```bash +$ docker pull myregistry.local:5000/testing/test-image +``` + +Registry credentials are managed by [docker login](login.md). + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](dockerd.md#insecure-registries) section for more information. + + +## Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + +```bash +$ docker pull --all-tags fedora + +Pulling repository fedora +ad57ef8d78d7: Download complete +105182bb5e8b: Download complete +511136ea3c5a: Download complete +73bd853d2ea5: Download complete +.... + +Status: Downloaded newer image for fedora +``` + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + +```bash +$ docker images fedora + +REPOSITORY TAG IMAGE ID CREATED SIZE +fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB +fedora 20 105182bb5e8b 5 days ago 372.7 MB +fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB +fedora latest 105182bb5e8b 5 days ago 372.7 MB +``` + +## Canceling a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + +```bash +$ docker pull fedora + +Using default tag: latest +latest: Pulling from library/fedora +a3ed95caeb02: Pulling fs layer +236608c7b546: Pulling fs layer +^C +``` + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/push.md b/vendor/github.com/docker/docker/docs/reference/commandline/push.md new file mode 100644 index 0000000..e36fd02 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/push.md @@ -0,0 +1,75 @@ +--- +title: "push" +description: "The push command description and usage" +keywords: "share, push, image" +--- + + + +# push + +```markdown +Usage: docker push [OPTIONS] NAME[:TAG] + +Push an image or a repository to a registry + +Options: + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to the [`docker tag`](tag.md) reference for more information about valid +image and tag names. + +Killing the `docker push` process, for example by pressing `CTRL-c` while it is +running in a terminal, terminates the push operation. + +Registry credentials are managed by [docker login](login.md). + +## Concurrent uploads + +By default the Docker daemon will push five layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-uploads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Pushing a new image to a registry + +First save the new image by finding the container ID (using [`docker ps`](ps.md)) +and then committing it to a new image name. Note that only `a-z0-9-_.` are +allowed when naming images: + +```bash +$ docker commit c16378f943fe rhel-httpd +``` + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + +```bash +$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd +$ docker push registry-host:5000/myadmin/rhel-httpd +``` + +Check that this worked by running: + +```bash +$ docker images +``` + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rename.md b/vendor/github.com/docker/docker/docs/reference/commandline/rename.md new file mode 100644 index 0000000..be035f1 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/rename.md @@ -0,0 +1,27 @@ +--- +title: "rename" +description: "The rename command description and usage" +keywords: "rename, docker, container" +--- + + + +# rename + +```markdown +Usage: docker rename CONTAINER NEW_NAME + +Rename a container + +Options: + --help Print usage +``` + +The `docker rename` command allows the container to be renamed to a different name. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/restart.md b/vendor/github.com/docker/docker/docs/reference/commandline/restart.md new file mode 100644 index 0000000..9f7ed00 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/restart.md @@ -0,0 +1,26 @@ +--- +title: "restart" +description: "The restart command description and usage" +keywords: "restart, container, Docker" +--- + + + +# restart + +```markdown +Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] + +Restart one or more containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing the container (default 10) +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/rm.md new file mode 100644 index 0000000..1c3e795 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/rm.md @@ -0,0 +1,69 @@ +--- +title: "rm" +description: "The rm command description and usage" +keywords: "remove, Docker, container" +--- + + + +# rm + +```markdown +Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] + +Remove one or more containers + +Options: + -f, --force Force the removal of a running container (uses SIGKILL) + --help Print usage + -l, --link Remove the specified link + -v, --volumes Remove the volumes associated with the container +``` + +## Examples + + $ docker rm /redis + /redis + +This will remove the container referenced under the link +`/redis`. + + $ docker rm --link /webapp/redis + /webapp/redis + +This will remove the underlying link between `/webapp` and the `/redis` +containers removing all network communication. + + $ docker rm --force redis + redis + +The main process inside the container referenced under the link `/redis` will receive +`SIGKILL`, then the container will be removed. + + $ docker rm $(docker ps -a -q) + +This command will delete all stopped containers. The command +`docker ps -a -q` will return all existing container IDs and pass them to +the `rm` command which will delete them. Any running containers will not be +deleted. + + $ docker rm -v redis + redis + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + $ docker rm -v hello + +In this example, the volume for `/foo` will remain intact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md b/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md new file mode 100644 index 0000000..149b763 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md @@ -0,0 +1,83 @@ +--- +title: "rmi" +description: "The rmi command description and usage" +keywords: "remove, image, Docker" +--- + + + +# rmi + +```markdown +Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] + +Remove one or more images + +Options: + -f, --force Force removal of the image + --help Print usage + --no-prune Do not delete untagged parents +``` + +You can remove an image using its short or long ID, its tag, or its digest. If +an image has one or more tag referencing it, you must remove all of them before +the image is removed. Digest references are removed automatically when an image +is removed by tag. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi fd484f19954f + Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force + 2013/12/11 05:47:16 Error: failed to remove one or more images + + $ docker rmi test1 + Untagged: test1:latest + $ docker rmi test2 + Untagged: test2:latest + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + $ docker rmi test + Untagged: test:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + +If you use the `-f` flag and specify the image's short or long ID, then this +command untags and removes all images that match the specified ID. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi -f fd484f19954f + Untagged: test1:latest + Untagged: test:latest + Untagged: test2:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + +An image pulled by digest has no tag associated with it: + + $ docker images --digests + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + +To remove an image using its digest: + + $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 + Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 + Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/run.md b/vendor/github.com/docker/docker/docs/reference/commandline/run.md new file mode 100644 index 0000000..e57ba4b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/run.md @@ -0,0 +1,732 @@ +--- +title: "run" +description: "The run command description and usage" +keywords: "run, command, container" +--- + + + +# run + +```markdown +Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + +Run a command in a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int Limit percentage of CPU available for execution + by the container. Windows daemon only. + The processor resource controls are mutually + exclusive, the order of precedence is CPUCount + first, then CPUShares, and CPUPercent last. + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + -d, --detach Run container in background and print container ID + --detach-keys string Override the key sequence for detaching a container + --device value Add a host device to the container (default []) + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + --init-path string Path to the docker-init binary + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + (Windows only). The format is ``. + Unit is optional and can be `b` (bytes per second), + `k` (kilobytes per second), `m` (megabytes per second), + or `g` (gigabytes per second). If you omit the unit, + the system uses bytes per second. + --io-maxbandwidth and --io-maxiops are mutually exclusive options. + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string Container IPv4 address (e.g. 172.30.100.104) + --ip6 string Container IPv6 address (e.g. 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited) + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are : no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --sig-proxy Proxy received signals to the process (default true) + --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` + +The `docker run` command first `creates` a writeable container layer over the +specified image, and then `starts` it using the specified command. That is, +`docker run` is equivalent to the API `/containers/create` then +`/containers/(id)/start`. A stopped container can be restarted with all its +previous changes intact using `docker start`. See `docker ps -a` to view a list +of all containers. + +The `docker run` command can be used in combination with `docker commit` to +[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). + +For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/). + +## Examples + +### Assign name and allocate pseudo-TTY (--name, -it) + + $ docker run --name test -it debian + root@d6c0fe130dba:/# exit 13 + $ echo $? + 13 + $ docker ps -a | grep test + d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test + +This example runs a container named `test` using the `debian:latest` +image. The `-it` instructs Docker to allocate a pseudo-TTY connected to +the container's stdin; creating an interactive `bash` shell in the container. +In the example, the `bash` shell is quit by entering +`exit 13`. This exit code is passed on to the caller of +`docker run`, and is recorded in the `test` container's metadata. + +### Capture container ID (--cidfile) + + $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" + +This will create a container and print `test` to the console. The `cidfile` +flag makes Docker attempt to create a new file and write the container ID to it. +If the file exists already, Docker will return an error. Docker will close this +file when `docker run` exits. + +### Full container capabilities (--privileged) + + $ docker run -t -i --rm ubuntu bash + root@bc338942ef20:/# mount -t tmpfs none /mnt + mount: permission denied + +This will *not* work, because by default, most potentially dangerous kernel +capabilities are dropped; including `cap_sys_admin` (which is required to mount +filesystems). However, the `--privileged` flag will allow it to run: + + $ docker run -t -i --privileged ubuntu bash + root@50e3f57e16e6:/# mount -t tmpfs none /mnt + root@50e3f57e16e6:/# df -h + Filesystem Size Used Avail Use% Mounted on + none 1.9G 0 1.9G 0% /mnt + +The `--privileged` flag gives *all* capabilities to the container, and it also +lifts all the limitations enforced by the `device` cgroup controller. In other +words, the container can then do almost everything that the host can do. This +flag exists to allow special use-cases, like running Docker within Docker. + +### Set working directory (-w) + + $ docker run -w /path/to/dir/ -i -t ubuntu pwd + +The `-w` lets the command being executed inside directory given, here +`/path/to/dir/`. If the path does not exist it is created inside the container. + +### Set storage driver options per container + + $ docker run -it --storage-opt size=120G fedora /bin/bash + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Mount tmpfs (--tmpfs) + + $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image + +The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, `size=65536k` options. + +### Mount volume (-v, --read-only) + + $ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd + +The `-v` flag mounts the current working directory into the container. The `-w` +lets the command being executed inside the current working directory, by +changing into the directory to the value returned by `pwd`. So this +combination executes the command using the container, but inside the +current working directory. + + $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash + +When the host directory of a bind-mounted volume doesn't exist, Docker +will automatically create this directory on the host for you. In the +example above, Docker will create the `/doesnt/exist` +folder before starting your container. + + $ docker run --read-only -v /icanwrite busybox touch /icanwrite/here + +Volumes can be used in combination with `--read-only` to control where +a container writes files. The `--read-only` flag mounts the container's root +filesystem as read only prohibiting writes to locations other than the +specified volumes for the container. + + $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh + +By bind-mounting the docker unix socket and statically linked docker +binary (refer to [get the linux binary]( +https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)), +you give the container the full access to create and manipulate the host's +Docker daemon. + +On Windows, the paths must be specified using Windows-style semantics. + + PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt + Contents of file + + PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt + Contents of file + +The following examples will fail when using Windows-based containers, as the +destination of a volume or bind-mount inside the container must be one of: +a non-existing or empty directory; or a drive other than C:. Further, the source +of a bind mount must be a local directory, not a file. + + net use z: \\remotemachine\share + docker run -v z:\foo:c:\dest ... + docker run -v \\uncpath\to\directory:c:\dest ... + docker run -v c:\foo\somefile.txt:c:\dest ... + docker run -v c:\foo:c: ... + docker run -v c:\foo:c:\existing-directory-with-contents ... + +For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/) + +### Publish or expose port (-p, --expose) + + $ docker run -p 127.0.0.1:80:8080 ubuntu bash + +This binds port `8080` of the container to port `80` on `127.0.0.1` of the host +machine. The [Docker User +Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/) +explains in detail how to manipulate ports in Docker. + + $ docker run --expose 80 ubuntu bash + +This exposes port `80` of the container without publishing the port to the host +system's interfaces. + +### Set environment variables (-e, --env, --env-file) + + $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash + +This sets simple (non-array) environmental variables in the container. For +illustration all three +flags are shown here. Where `-e`, `--env` take an environment variable and +value, or if no `=` is provided, then that variable's current value, set via +`export`, is passed through (i.e. `$MYVAR1` from the host is set to `$MYVAR1` +in the container). When no `=` is provided and that variable is not defined +in the client's environment then that variable will be removed from the +container's list of environment variables. All three flags, `-e`, `--env` and +`--env-file` can be repeated. + +Regardless of the order of these three flags, the `--env-file` are processed +first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will +override variables as needed. + + $ cat ./env.list + TEST_FOO=BAR + $ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO + TEST_FOO=This is a test + +The `--env-file` flag takes a filename as an argument and expects each line +to be in the `VAR=VAL` format, mimicking the argument passed to `--env`. Comment +lines need only be prefixed with `#` + +An example of a file passed with `--env-file` + + $ cat ./env.list + TEST_FOO=BAR + + # this is a comment + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + 123qwe=bar + org.spring.config=something + + # pass through this variable from the caller + TEST_PASSTHROUGH + $ TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=5198e0745561 + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + TEST_PASSTHROUGH=howdy + HOME=/root + 123qwe=bar + org.spring.config=something + + $ docker run --env-file ./env.list busybox env + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=5198e0745561 + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + TEST_PASSTHROUGH= + HOME=/root + 123qwe=bar + org.spring.config=something + +### Set metadata on container (-l, --label, --label-file) + +A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: + + $ docker run -l my-label --label com.example.foo=bar ubuntu bash + +The `my-label` key doesn't specify a value so the label defaults to an empty +string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). + +The `key=value` must be unique to avoid overwriting the label value. If you +specify labels with identical keys but different values, each subsequent value +overwrites the previous. Docker uses the last `key=value` you supply. + +Use the `--label-file` flag to load multiple labels from a file. Delimit each +label in the file with an EOL mark. The example below loads labels from a +labels file in the current directory: + + $ docker run --label-file ./labels ubuntu bash + +The label-file format is similar to the format for loading environment +variables. (Unlike environment variables, labels are not visible to processes +running inside a container.) The following example illustrates a label-file +format: + + com.example.label1="a label" + + # this is a comment + com.example.label2=another\ label + com.example.label3 + +You can load multiple label-files by supplying multiple `--label-file` flags. + +For additional information on working with labels, see [*Labels - custom +metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User +Guide. + +### Connect a container to a network (--network) + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `my-net` network. + +```bash +$ docker run -itd --network=my-net busybox +``` + +You can also choose the IP addresses for the container with `--ip` and `--ip6` +flags when you start the container on a user-defined network. + +```bash +$ docker run -itd --network=my-net --ip=10.10.9.75 busybox +``` + +If you want to add a running container to a network use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate easily need only another container's IP address +or name. For `overlay` networks or custom plugins that support multi-host +connectivity, containers connected to the same multi-host network but launched +from different Engines can also communicate in this way. + +**Note**: Service discovery is unavailable on the default bridge network. +Containers can communicate via their IP addresses by default. To communicate +by name, they must be linked. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Mount volumes from container (--volumes-from) + + $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd + +The `--volumes-from` flag mounts all the defined volumes from the referenced +containers. Containers can be specified by repetitions of the `--volumes-from` +argument. The container ID may be optionally suffixed with `:ro` or `:rw` to +mount the volumes in read-only or read-write mode, respectively. By default, +the volumes are mounted in the same mode (read write or read only) as +the reference container. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change the label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +### Attach to STDIN/STDOUT/STDERR (-a) + +The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` +or `STDERR`. This makes it possible to manipulate the output and input as +needed. + + $ echo "test" | docker run -i -a stdin ubuntu cat - + +This pipes data into a container and prints the container's ID by attaching +only to the container's `STDIN`. + + $ docker run -a stderr ubuntu echo test + +This isn't going to print anything unless there's an error because we've +only attached to the `STDERR` of the container. The container's logs +still store what's been written to `STDERR` and `STDOUT`. + + $ cat somefile | docker run -i -a stdin mybuilder dobuild + +This is how piping a file into a container could be done for a build. +The container's ID will be printed after the build is done and the build +logs could be retrieved using `docker logs`. This is +useful if you need to pipe a file or something else into a container and +retrieve the container's ID once the container has finished running. + +### Add host device to container (--device) + + $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo} + brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc + brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd + crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo + +It is often necessary to directly expose devices to a container. The `--device` +option enables that. For example, a specific block storage device or loop +device or audio device can be added to an otherwise unprivileged container +(without the `--privileged` flag) and have the application directly access it. + +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + +> **Note:** +> `--device` cannot be safely used with ephemeral devices. Block devices +> that may be removed should not be added to untrusted containers with +> `--device`. + +### Restart policies (--restart) + +Use Docker's `--restart` to specify a container's *restart policy*. A restart +policy controls whether the Docker daemon restarts a container after exit. +Docker supports the following restart policies: + + + + + + + + + + + + + + + + + + + + + + + + + + +
PolicyResult
no + Do not automatically restart the container when it exits. This is the + default. +
+ + on-failure[:max-retries] + + + Restart only if the container exits with a non-zero exit status. + Optionally, limit the number of restart retries the Docker + daemon attempts. +
always + Always restart the container regardless of the exit status. + When you specify always, the Docker daemon will try to restart + the container indefinitely. The container will also always start + on daemon startup, regardless of the current state of the container. +
unless-stopped + Always restart the container regardless of the exit status, but + do not start it on daemon startup if the container has been put + to a stopped state before. +
+ + $ docker run --restart=always redis + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + +More detailed information on restart policies can be found in the +[Restart Policies (--restart)](../run.md#restart-policies-restart) +section of the Docker run reference page. + +### Add entries to container hosts file (--add-host) + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + + $ docker run --add-host=docker:10.180.0.1 --rm -it debian + root@f38c87f2a42d:/# ping docker + PING docker (10.180.0.1): 48 data bytes + 56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms + 56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms + ^C--- docker ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms + +Sometimes you need to connect to the Docker host from within your +container. To enable this, pass the Docker host's IP address to +the container using the `--add-host` flag. To find the host's address, +use the `ip addr show` command. + +The flags you pass to `ip addr show` depend on whether you are +using IPv4 or IPv6 networking in your containers. Use the following +flags for IPv4 address retrieval for a network device named `eth0`: + + $ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` + $ docker run --add-host=docker:${HOSTIP} --rm -it debian + +For IPv6 use the `-6` flag instead of the `-4` flag. For other network +devices, replace `eth0` with the correct device name (for example `docker0` +for the bridge device). + +### Set ulimits in container (--ulimit) + +Since setting `ulimit` settings in a container requires extra privileges not +available in the default container, you can set these using the `--ulimit` flag. +`--ulimit` is specified with a soft and hard limit as such: +`=[:]`, for example: + + $ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" + 1024 + +> **Note:** +> If you do not provide a `hard limit`, the `soft limit` will be used +> for both values. If no `ulimits` are set, they will be inherited from +> the default `ulimits` set on the daemon. `as` option is disabled now. +> In other words, the following script is not supported: +> `$ docker run -it --ulimit as=1024 fedora /bin/bash` + +The values are sent to the appropriate `syscall` as they are set. +Docker doesn't perform any byte conversion. Take this into account when setting the values. + +#### For `nproc` usage + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the +maximum number of processes available to a user, not to a container. For example, start four +containers with `daemon` user: + + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + +The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. +This fails because the caller set `nproc=3` resulting in the first three containers using up +the three processes quota set for the `daemon` user. + +### Stop container with signal (--stop-signal) + +The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +### Optional security options (--security-opt) + +On Windows, this flag can be used to specify the `credentialspec` option. +The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. + +### Stop container with timeout (--stop-timeout) + +The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call +signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Microsoft Windows. The `--isolation ` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +``` +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Microsoft Windows, can take any of these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +On Windows, the default isolation for client is `hyperv`, and for server is +`process`. Therefore when running on Windows server without a `daemon` option +set, these two commands are equivalent: +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation process busybox top +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, +if running on Windows server, any of these commands also result in `hyperv` isolation: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation hyperv busybox top +``` + +### Configure namespaced kernel parameters (sysctls) at runtime + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + + $ docker run --sysctl net.ipv4.ip_forward=1 someimage + + +> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls +> inside of a container that also modify the host system. As the kernel +> evolves we expect to see more sysctls become namespaced. + +#### Currently supported sysctls + + `IPC Namespace`: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + If you use the `--ipc=host` option these sysctls will not be allowed. + + `Network Namespace`: + Sysctls beginning with net.* + + If you use the `--network=host` option using these sysctls will not be allowed. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/save.md b/vendor/github.com/docker/docker/docs/reference/commandline/save.md new file mode 100644 index 0000000..88a5fed --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/save.md @@ -0,0 +1,45 @@ +--- +title: "save" +description: "The save command description and usage" +keywords: "tarred, repository, backup" +--- + + + +# save + +```markdown +Usage: docker save [OPTIONS] IMAGE [IMAGE...] + +Save one or more images to a tar archive (streamed to STDOUT by default) + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +Produces a tarred repository to the standard output stream. +Contains all parent layers, and all tags + versions, or specified `repo:tag`, for +each argument provided. + +It is used to create a backup that can then be used with `docker load` + + $ docker save busybox > busybox.tar + $ ls -sh busybox.tar + 2.7M busybox.tar + $ docker save --output busybox.tar busybox + $ ls -sh busybox.tar + 2.7M busybox.tar + $ docker save -o fedora-all.tar fedora + $ docker save -o fedora-latest.tar fedora:latest + +It is even useful to cherry-pick particular tags of an image repository + + $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/search.md b/vendor/github.com/docker/docker/docs/reference/commandline/search.md new file mode 100644 index 0000000..31faf37 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/search.md @@ -0,0 +1,134 @@ +--- +title: "search" +description: "The search command description and usage" +keywords: "search, hub, images" +--- + + + +# search + +```markdown +Usage: docker search [OPTIONS] TERM + +Search the Docker Hub for images + +Options: + -f, --filter value Filter output based on conditions provided (default []) + - is-automated=(true|false) + - is-official=(true|false) + - stars= - image has at least 'number' stars + --help Print usage + --limit int Max number of search results (default 25) + --no-trunc Don't truncate output +``` + +Search [Docker Hub](https://hub.docker.com) for images + +See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for +more details on finding shared images from the command line. + +> **Note:** +> Search queries will only return up to 25 results + +## Examples + +### Search images by name + +This example displays images with a name containing 'busybox': + + $ docker search busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 316 [OK] + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + odise/busybox-python 2 [OK] + azukiapp/busybox This image is meant to be used as the base... 2 [OK] + ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] + shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] + odise/busybox-curl 1 [OK] + ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] + peelsky/zulu-openjdk-busybox 1 [OK] + skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] + elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] + socketplane/busybox 1 [OK] + oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] + ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] + nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] + openshift/busybox-http-app 0 [OK] + jllopis/busybox 0 [OK] + swyckoff/busybox 0 [OK] + powellquiring/busybox 0 [OK] + williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] + simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] + fhisamoto/busybox-java Busybox java 0 [OK] + scottabernethy/busybox 0 [OK] + marclop/busybox-solr + +### Display non-truncated description (--no-trunc) + +This example displays images with a name containing 'busybox', +at least 3 stars and the description isn't truncated in the output: + + $ docker search --stars=3 --no-trunc busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 325 [OK] + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] + +## Limit search results (--limit) + +The flag `--limit` is the maximum number of results returned by a search. This value could +be in the range between 1 and 100. The default value of `--limit` is 25. + + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* stars (int - number of stars the image has) +* is-automated (true|false) - is the image automated or not +* is-official (true|false) - is the image official or not + + +### stars + +This example displays images with a name containing 'busybox' and at +least 3 stars: + + $ docker search --filter stars=3 busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 325 [OK] + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + + +### is-automated + +This example displays images with a name containing 'busybox' +and are automated builds: + + $ docker search --filter is-automated busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + +### is-official + +This example displays images with a name containing 'busybox', at least +3 stars and are official builds: + + $ docker search --filter "is-official=true" --filter "stars=3" busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md new file mode 100644 index 0000000..aebcebb --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md @@ -0,0 +1,90 @@ +--- +title: "secret create" +description: "The secret create command description and usage" +keywords: ["secret, create"] +--- + + + +# secret create + +```Markdown +Usage: docker secret create [OPTIONS] SECRET file|- + +Create a secret from a file or STDIN as content + +Options: + --help Print usage + -l, --label list Secret labels (default []) +``` + +Creates a secret using standard input or from a file for the secret content. You must run this +command on a manager node. + +## Examples + +### Create a secret + +```bash +$ echo | docker secret create my_secret - +mhv17xfe3gh6xc4rij5orpfds + +$ docker secret ls +ID NAME CREATED UPDATED SIZE +mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 +``` + +### Create a secret with a file + +```bash +$ docker secret create my_secret ./secret.json +mhv17xfe3gh6xc4rij5orpfds + +$ docker secret ls +ID NAME CREATED UPDATED SIZE +mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 +``` + +### Create a secret with labels + +```bash +$ docker secret create --label env=dev --label rev=20161102 my_secret ./secret.json +jtn7g6aukl5ky7nr9gvwafoxh + +$ docker secret inspect my_secret +[ + { + "ID": "jtn7g6aukl5ky7nr9gvwafoxh", + "Version": { + "Index": 541 + }, + "CreatedAt": "2016-11-03T20:54:12.924766548Z", + "UpdatedAt": "2016-11-03T20:54:12.924766548Z", + "Spec": { + "Name": "my_secret", + "Labels": { + "env": "dev", + "rev": "20161102" + }, + "Data": null + }, + "Digest": "sha256:4212a44b14e94154359569333d3fc6a80f6b9959dfdaff26412f4b2796b1f387", + "SecretSize": 1679 + } +] + +``` + + +## Related information + +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md new file mode 100644 index 0000000..de878f7 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md @@ -0,0 +1,85 @@ +--- +title: "secret inspect" +description: "The secret inspect command description and usage" +keywords: ["secret, inspect"] +--- + + + +# secret inspect + +```Markdown +Usage: docker secret inspect [OPTIONS] SECRET [SECRET...] + +Display detailed information on one or more secrets + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + + +Inspects the specified secret. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Inspecting a secret by name or ID + +You can inspect a secret, either by its *name*, or *ID* + +For example, given the following secret: + +```bash +$ docker secret ls +ID NAME CREATED UPDATED +mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC +``` + +```bash +$ docker secret inspect secret.json +[ + { + "ID": "mhv17xfe3gh6xc4rij5orpfds", + "Version": { + "Index": 1198 + }, + "CreatedAt": "2016-10-27T23:25:43.909181089Z", + "UpdatedAt": "2016-10-27T23:25:43.909181089Z", + "Spec": { + "Name": "secret.json" + } + } +] +``` + +### Formatting secret output + +You can use the --format option to obtain specific information about a +secret. The following example command outputs the creation time of the +secret. + +```bash{% raw %} +$ docker secret inspect --format='{{.CreatedAt}}' mhv17xfe3gh6xc4rij5orpfds +2016-10-27 23:25:43.909181089 +0000 UTC +{% endraw %}``` + + +## Related information + +* [secret create](secret_create.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md new file mode 100644 index 0000000..6b34fc2 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md @@ -0,0 +1,43 @@ +--- +title: "secret ls" +description: "The secret ls command description and usage" +keywords: ["secret, ls"] +--- + + + +# secret ls + +```Markdown +Usage: docker secret ls [OPTIONS] + +List secrets + +Aliases: + ls, list + +Options: + -q, --quiet Only display IDs +``` + +Run this command on a manager node to list the secrets in the Swarm. + +## Examples + +```bash +$ docker secret ls +ID NAME CREATED UPDATED +mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC +``` +## Related information + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md new file mode 100644 index 0000000..f504b1b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md @@ -0,0 +1,48 @@ +--- +title: "secret rm" +description: "The secret rm command description and usage" +keywords: ["secret, rm"] +--- + + + +# secret rm + +```Markdown +Usage: docker secret rm SECRET [SECRET...] + +Remove one or more secrets + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +Removes the specified secrets from the swarm. This command has to be run +targeting a manager node. + +This example removes a secret: + +```bash +$ docker secret rm secret.json +sapth4csdo5b6wz2p5uimh5xg +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a secret. + + +## Related information + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md new file mode 100644 index 0000000..c9e2980 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md @@ -0,0 +1,556 @@ +--- +title: "service create" +description: "The service create command description and usage" +keywords: "service, create" +--- + + + +# service create + +```Markdown +Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new service + +Options: + --constraint list Placement constraints (default []) + --container-label list Container labels (default []) + --dns list Set custom DNS servers (default []) + --dns-option list Set DNS options (default []) + --dns-search list Set custom DNS search domains (default []) + --endpoint-mode string Endpoint mode (vip or dnsrr) + -e, --env list Set environment variables (default []) + --env-file list Read in a file of environment variables (default []) + --group list Set one or more supplementary user groups for the container (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) + --help Print usage + --host list Set one or more custom host-to-IP mappings (host:ip) (default []) + --hostname string Container hostname + -l, --label list Service labels (default []) + --limit-cpu decimal Limit CPUs (default 0.000) + --limit-memory bytes Limit Memory (default 0 B) + --log-driver string Logging driver for service + --log-opt list Logging driver options (default []) + --mode string Service mode (replicated or global) (default "replicated") + --mount mount Attach a filesystem mount to the service + --name string Service name + --network list Network attachments (default []) + --no-healthcheck Disable any container-specified HEALTHCHECK + -p, --publish port Publish a port as a node port + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs (default 0.000) + --reserve-memory bytes Reserve Memory (default 0 B) + --restart-condition string Restart when condition is met (none, on-failure, or any) + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --secret secret Specify secrets to expose to the service + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure (pause|continue) (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +Creates a service as described by the specified parameters. You must run this +command on a manager node. + +## Examples + +### Create a service + +```bash +$ docker service create --name redis redis:3.0.6 +dmu1ept4cxcfe8k8lhtux3ro3 + +$ docker service create --mode global --name redis2 redis:3.0.6 +a8q9dasaafudfs8q8w32udass + +$ docker service ls +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 1/1 redis:3.0.6 +a8q9dasaafud redis2 global 1/1 redis:3.0.6 +``` + +### Create a service with 5 replica tasks (--replicas) + +Use the `--replicas` flag to set the number of replica tasks for a replicated +service. The following command creates a `redis` service with `5` replica tasks: + +```bash +$ docker service create --name redis --replicas=5 redis:3.0.6 +4cdgfyky7ozwh3htjfw0d12qv +``` + +The above command sets the *desired* number of tasks for the service. Even +though the command returns immediately, actual scaling of the service may take +some time. The `REPLICAS` column shows both the *actual* and *desired* number +of replica tasks for the service. + +In the following example the desired state is `5` replicas, but the current +number of `RUNNING` tasks is `3`: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 3/5 redis:3.0.7 +``` + +Once all the tasks are created and `RUNNING`, the actual number of tasks is +equal to the desired number: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 5/5 redis:3.0.7 +``` + +### Create a service with secrets +Use the `--secret` flag to give a container access to a +[secret](secret_create.md). + +Create a service specifying a secret: + +```bash +$ docker service create --name redis --secret secret.json redis:3.0.6 +4cdgfyky7ozwh3htjfw0d12qv +``` + +Create a service specifying the secret, target, user/group ID and mode: + +```bash +$ docker service create --name redis \ + --secret source=ssh-key,target=ssh \ + --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \ + redis:3.0.6 +4cdgfyky7ozwh3htjfw0d12qv +``` + +Secrets are located in `/run/secrets` in the container. If no target is +specified, the name of the secret will be used as the in memory file in the +container. If a target is specified, that will be the filename. In the +example above, two files will be created: `/run/secrets/ssh` and +`/run/secrets/app` for each of the secret targets specified. + +### Create a service with a rolling update policy + +```bash +$ docker service create \ + --replicas 10 \ + --name redis \ + --update-delay 10s \ + --update-parallelism 2 \ + redis:3.0.6 +``` + +When you run a [service update](service_update.md), the scheduler updates a +maximum of 2 tasks at a time, with `10s` between updates. For more information, +refer to the [rolling updates +tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/). + +### Set environment variables (-e, --env) + +This sets environmental variables for all tasks in a service. For example: + +```bash +$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6 +``` + +### Create a docker service with specific hostname (--hostname) + +This option sets the docker service containers hostname to a specific string. For example: +```bash +$ docker service create --name redis --hostname myredis redis:3.0.6 +``` +### Set metadata on a service (-l, --label) + +A label is a `key=value` pair that applies metadata to a service. To label a +service with two labels: + +```bash +$ docker service create \ + --name redis_2 \ + --label com.example.foo="bar" + --label bar=baz \ + redis:3.0.6 +``` + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +### Add bind-mounts or volumes + +Docker supports two different kinds of mounts, which allow containers to read to +or write from files or directories on other containers or the host operating +system. These types are _data volumes_ (often referred to simply as volumes) and +_bind-mounts_. + +Additionally, Docker also supports tmpfs mounts. + +A **bind-mount** makes a file or directory on the host available to the +container it is mounted within. A bind-mount may be either read-only or +read-write. For example, a container might share its host's DNS information by +means of a bind-mount of the host's `/etc/resolv.conf` or a container might +write logs to its host's `/var/log/myContainerLogs` directory. If you use +bind-mounts and your host and containers have different notions of permissions, +access controls, or other such details, you will run into portability issues. + +A **named volume** is a mechanism for decoupling persistent data needed by your +container from the image used to create the container and from the host machine. +Named volumes are created and managed by Docker, and a named volume persists +even when no container is currently using it. Data in named volumes can be +shared between a container and the host machine, as well as between multiple +containers. Docker uses a _volume driver_ to create, manage, and mount volumes. +You can back up or restore volumes using Docker commands. + +A **tmpfs** mounts a tmpfs inside a container for volatile data. + +Consider a situation where your image starts a lightweight web server. You could +use that image as a base image, copy in your website's HTML files, and package +that into another image. Each time your website changed, you'd need to update +the new image and redeploy all of the containers serving your website. A better +solution is to store the website in a named volume which is attached to each of +your web server containers when they start. To update the website, you just +update the named volume. + +For more information about named volumes, see +[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/). + +The following table describes options which apply to both bind-mounts and named +volumes in a service: + +| Option | Required | Description +|:-----------------------------------------|:--------------------------|:----------------------------------------------------------------------------------------- +| **type** | | The type of mount, can be either `volume`, `bind`, or `tmpfs`. Defaults to `volume` if no type is specified.

  • `volume`: mounts a [managed volume](volume_create.md) into the container.
  • `bind`: bind-mounts a directory or file from the host into the container.
  • `tmpfs`: mount a tmpfs in the container
+| **src** or **source** | for `type=bind` only |
  • `type=volume`: `src` is an optional way to specify the name of the volume (for example, `src=my-volume`). If the named volume does not exist, it is automatically created. If no `src` is specified, the volume is assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. A randomly-named volume has the same lifecycle as its container and is destroyed when the *container* is destroyed (which is upon `service update`, or when scaling or re-balancing the service).
  • `type=bind`: `src` is required, and specifies an absolute path to the file or directory to bind-mount (for example, `src=/path/on/host/`). An error is produced if the file or directory does not exist.
  • `type=tmpfs`: `src` is not supported.
+| **dst** or **destination** or **target** | yes | Mount path inside the container, for example `/some/path/in/container/`. If the path does not exist in the container's filesystem, the Engine creates a directory at the specified location before mounting the volume or bind-mount. +| **readonly** or **ro** | | The Engine mounts binds and volumes `read-write` unless `readonly` option is given when mounting the bind or volume.

  • `true` or `1` or no value: Mounts the bind or volume read-only.
  • `false` or `0`: Mounts the bind or volume read-write.
+ +#### Bind Propagation + +Bind propagation refers to whether or not mounts created within a given +bind-mount or named volume can be propagated to replicas of that mount. Consider +a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings +control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each +propagation setting has a recursive counterpoint. In the case of recursion, +consider that `/tmp/a` is also mounted as `/foo`. The propagation settings +control whether `/mnt/a` and/or `/tmp/a` would exist. + +The `bind-propagation` option defaults to `rprivate` for both bind-mounts and +volume mounts, and is only configurable for bind-mounts. In other words, named +volumes do not support bind propagation. + +- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts, + and sub-mounts of replica mounts are also propagated to the + original mount. +- **`slave`**: similar to a shared mount, but only in one direction. If the + original mount exposes a sub-mount, the replica mount can see it. + However, if the replica mount exposes a sub-mount, the original + mount cannot see it. +- **`private`**: The mount is private. Sub-mounts within it are not exposed to + replica mounts, and sub-mounts of replica mounts are not + exposed to the original mount. +- **`rshared`**: The same as shared, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rslave`**: The same as `slave`, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rprivate`**: The default. The same as `private`, meaning that no mount points + anywhere within the original or replica mount points propagate + in either direction. + +For more information about bind propagation, see the +[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + +#### Options for Named Volumes +The following options can only be used for named volumes (`type=volume`); + +| Option | Description +|:----------------------|:-------------------------------------------------------------------------------------------------------------------- +| **volume-driver** | Name of the volume-driver plugin to use for the volume. Defaults to ``"local"``, to use the local volume driver to create the volume if the volume does not exist. +| **volume-label** | One or more custom metadata ("labels") to apply to the volume upon creation. For example, `volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more information about labels, refer to [apply custom metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). +| **volume-nocopy** | By default, if you attach an empty volume to a container, and files or directories already existed at the mount-path in the container (`dst`), the Engine copies those files and directories into the volume, allowing the host to access them. Set `volume-nocopy` to disables copying files from the container's filesystem to the volume and mount the empty volume.

A value is optional:
  • `true` or `1`: Default if you do not provide a value. Disables copying.
  • `false` or `0`: Enables copying.
+| **volume-opt** | Options specific to a given volume driver, which will be passed to the driver when creating the volume. Options are provided as a comma-separated list of key/value pairs, for example, `volume-opt=some-option=some-value,some-other-option=some-other-value`. For available options for a given driver, refer to that driver's documentation. + +#### Options for tmpfs +The following options can only be used for tmpfs mounts (`type=tmpfs`); + +| Option | Description +|:----------------------|:-------------------------------------------------------------------------------------------------------------------- +| **tmpfs-size** | Size of the tmpfs mount in bytes. Unlimited by default in Linux. +| **tmpfs-mode** | File mode of the tmpfs in octal. (e.g. `"700"` or `"0700"`.) Defaults to ``"1777"`` in Linux. + +#### Differences between "--mount" and "--volume" + +The `--mount` flag supports most options that are supported by the `-v` +or `--volume` flag for `docker run`, with some important exceptions: + +- The `--mount` flag allows you to specify a volume driver and volume driver + options *per volume*, without creating the volumes in advance. In contrast, + `docker run` allows you to specify a single volume driver which is shared + by all volumes, using the `--volume-driver` flag. + +- The `--mount` flag allows you to specify custom metadata ("labels") for a volume, + before the volume is created. + +- When you use `--mount` with `type=bind`, the host-path must refer to an *existing* + path on the host. The path will not be created for you and the service will fail + with an error if the path does not exist. + +- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags, + which are used for `selinux` labeling. + +#### Create a service using a named volume + +The following example creates a service that uses a named volume: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \ + nginx:alpine +``` + +For each replica of the service, the engine requests a volume named "my-volume" +from the default ("local") volume driver where the task is deployed. If the +volume does not exist, the engine creates a new volume and applies the "color" +and "shape" labels. + +When the task is started, the volume is mounted on `/path/in/container/` inside +the container. + +Be aware that the default ("local") volume is a locally scoped volume driver. +This means that depending on where a task is deployed, either that task gets a +*new* volume named "my-volume", or shares the same "my-volume" with other tasks +of the same service. Multiple containers writing to a single shared volume can +cause data corruption if the software running inside the container is not +designed to handle concurrent processes writing to the same location. Also take +into account that containers can be re-scheduled by the Swarm orchestrator and +be deployed on a different node. + +#### Create a service that uses an anonymous volume + +The following command creates a service with three replicas with an anonymous +volume on `/path/in/container`: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,destination=/path/in/container \ + nginx:alpine +``` + +In this example, no name (`source`) is specified for the volume, so a new volume +is created for each task. This guarantees that each task gets its own volume, +and volumes are not shared between tasks. Anonymous volumes are removed after +the task using them is complete. + +#### Create a service that uses a bind-mounted host directory + +The following example bind-mounts a host directory at `/path/in/container` in +the containers backing the service: + +```bash +$ docker service create \ + --name my-service \ + --mount type=bind,source=/path/on/host,destination=/path/in/container \ + nginx:alpine +``` + +### Set service mode (--mode) + +The service mode determines whether this is a _replicated_ service or a _global_ +service. A replicated service runs as many tasks as specified, while a global +service runs on each active node in the swarm. + +The following command creates a global service: + +```bash +$ docker service create \ + --name redis_2 \ + --mode global \ + redis:3.0.6 +``` + +### Specify service constraints (--constraint) + +You can limit the set of nodes where a task can be scheduled by defining +constraint expressions. Multiple constraints find nodes that satisfy every +expression (AND match). Constraints can match node or Docker Engine labels as +follows: + +| node attribute | matches | example | +|:----------------|:--------------------------|:------------------------------------------------| +| node.id | node ID | `node.id == 2ivku8v2gvtg4` | +| node.hostname | node hostname | `node.hostname != node-2` | +| node.role | node role: manager | `node.role == manager` | +| node.labels | user defined node labels | `node.labels.security == high` | +| engine.labels | Docker Engine's labels | `engine.labels.operatingsystem == ubuntu 14.04` | + +`engine.labels` apply to Docker Engine labels like operating system, +drivers, etc. Swarm administrators add `node.labels` for operational purposes by +using the [`docker node update`](node_update.md) command. + +For example, the following limits tasks for the redis service to nodes where the +node type label equals queue: + +```bash +$ docker service create \ + --name redis_2 \ + --constraint 'node.labels.type == queue' \ + redis:3.0.6 +``` + +### Attach a service to an existing network (--network) + +You can use overlay networks to connect one or more services within the swarm. + +First, create an overlay network on a manager node the docker network create +command: + +```bash +$ docker network create --driver overlay my-network + +etjpu59cykrptrgw0z0hk5snf +``` + +After you create an overlay network in swarm mode, all manager nodes have +access to the network. + +When you create a service and pass the --network flag to attach the service to +the overlay network: + +```bash +$ docker service create \ + --replicas 3 \ + --network my-network \ + --name my-web \ + nginx + +716thylsndqma81j6kkkb5aus +``` + +The swarm extends my-network to each node running the service. + +Containers on the same network can access each other using +[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery). + +### Publish service ports externally to the swarm (-p, --publish) + +You can publish service ports to make them available externally to the swarm +using the `--publish` flag: + +```bash +$ docker service create --publish : nginx +``` + +For example: + +```bash +$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx +``` + +When you publish a service port, the swarm routing mesh makes the service +accessible at the target port on every node regardless if there is a task for +the service running on the node. For more information refer to +[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/). + +### Publish a port for TCP only or UDP only + +By default, when you publish a port, it is a TCP port. You can +specifically publish a UDP port instead of or in addition to a TCP port. When +you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to +add the suffix `/tcp` for TCP ports. Otherwise it is optional. + +#### TCP only + +The following two commands are equivalent. + +```bash +$ docker service create --name dns-cache -p 53:53 dns-cache + +$ docker service create --name dns-cache -p 53:53/tcp dns-cache +``` + +#### TCP and UDP + +```bash +$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache +``` + +#### UDP only + +```bash +$ docker service create --name dns-cache -p 53:53/udp dns-cache +``` + +### Create services using templates + +You can use templates for some flags of `service create`, using the syntax +provided by the Go's [text/template](http://golange.org/pkg/text/template/) package. + +The supported flags are the following : + +- `--hostname` +- `--mount` +- `--env` + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------- | -------------------------------------------- +`.Service.ID` | Service ID +`.Service.Name` | Service name +`.Service.Labels` | Service labels +`.Node.ID` | Node ID +`.Task.ID` | Task ID +`.Task.Name` | Task name +`.Task.Slot` | Task slot + +#### Template example + +In this example, we are going to set the template of the created containers based on the +service's name and the node's ID where it sits. + +```bash +$ docker service create --name hosttempl --hostname={% raw %}"{{.Node.ID}}-{{.Service.Name}}"{% endraw %} busybox top +va8ew30grofhjoychbr6iot8c + +$ docker service ps va8ew30grofhjoychbr6iot8c +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago + +$ docker inspect --format={% raw %}"{{.Config.Hostname}}"{% endraw %} hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj +x3ti0erg11rjpg64m75kej2mz-hosttempl +``` + +## Related information + +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) + + diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md new file mode 100644 index 0000000..8b4ab62 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md @@ -0,0 +1,162 @@ +--- +title: "service inspect" +description: "The service inspect command description and usage" +keywords: "service, inspect" +--- + + + +# service inspect + +```Markdown +Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...] + +Display detailed information on one or more services + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format. +``` + + +Inspects the specified service. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Inspecting a service by name or ID + +You can inspect a service, either by its *name*, or *ID* + +For example, given the following service; + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +``` + +Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf` +produce the same result: + +```bash +$ docker service inspect redis +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + "CreatedAt": "2016-06-17T18:44:02.558012087Z", + "UpdatedAt": "2016-06-17T18:44:02.558012087Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis:3.0.6" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": {}, + "EndpointSpec": { + "Mode": "vip" + } + }, + "Endpoint": { + "Spec": {} + } + } +] +``` + +```bash +$ docker service inspect dmu1ept4cxcf +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + ... + } +] +``` + +### Inspect a service using pretty-print + +You can print the inspect output in a human-readable format instead of the default +JSON output, by using the `--pretty` option: + +```bash +$ docker service inspect --pretty frontend +ID: c8wgl7q4ndfd52ni6qftkvnnp +Name: frontend +Labels: + - org.example.projectname=demo-app +Service Mode: REPLICATED + Replicas: 5 +Placement: +UpdateConfig: + Parallelism: 0 +ContainerSpec: + Image: nginx:alpine +Resources: +Endpoint Mode: vip +Ports: + Name = + Protocol = tcp + TargetPort = 443 + PublishedPort = 4443 +``` + +You can also use `--format pretty` for the same effect. + + +### Finding the number of tasks running as part of a service + +The `--format` option can be used to obtain specific information about a +service. For example, the following command outputs the number of replicas +of the "redis" service. + +```bash{% raw %} +$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis +10 +{% endraw %}``` + + +## Related information + +* [service create](service_create.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md new file mode 100644 index 0000000..fdf6a3a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md @@ -0,0 +1,77 @@ +--- +title: "service logs (experimental)" +description: "The service logs command description and usage" +keywords: "service, logs" +advisory: "experimental" +--- + + + +# service logs + +```Markdown +Usage: docker service logs [OPTIONS] SERVICE + +Fetch the logs of a service + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +The `docker service logs` command batch-retrieves logs present at the time of execution. + +> **Note**: this command is only functional for services that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker service logs --follow` command will continue streaming the new output from +the service's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker service logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker service logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +service. + +The `--since` option shows only the service logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md new file mode 100644 index 0000000..ccd68af --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md @@ -0,0 +1,114 @@ +--- +title: "service ls" +description: "The service ls command description and usage" +keywords: "service, ls" +--- + + + +# service ls + +```Markdown +Usage: docker service ls [OPTIONS] + +List services + +Aliases: + ls, list + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +This command when run targeting a manager, lists services are running in the +swarm. + +On a manager node: +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +c8wgl7q4ndfd frontend replicated 5/5 nginx:alpine +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +iwe3278osahj mongo global 7/7 mongo:3.3 +``` + +The `REPLICAS` column shows both the *actual* and *desired* number of tasks for +the service. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](service_ls.md#id) +* [label](service_ls.md#label) +* [name](service_ls.md#name) + +#### ID + +The `id` filter matches all or part of a service's id. + +```bash +$ docker service ls -f "id=0bcjw" +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +#### Label + +The `label` filter matches services based on the presence of a `label` alone or +a `label` and a value. + +The following filter matches all services with a `project` label regardless of +its value: + +```bash +$ docker service ls --filter label=project +ID NAME MODE REPLICAS IMAGE +01sl1rp6nj5u frontend2 replicated 1/1 nginx:alpine +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +The following filter matches only services with the `project` label with the +`project-a` value. + +```bash +$ docker service ls --filter label=project=project-a +ID NAME MODE REPLICAS IMAGE +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +#### Name + +The `name` filter matches on all or part of a service's name. + +The following filter matches services with a name containing `redis`. + +```bash +$ docker service ls --filter name=redis +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md new file mode 100644 index 0000000..61abb15 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md @@ -0,0 +1,161 @@ +--- +title: "service ps" +description: "The service ps command description and usage" +keywords: "service, tasks, ps" +aliases: ["/engine/reference/commandline/service_tasks/"] +--- + + + +# service ps + +```Markdown +Usage: docker service ps [OPTIONS] SERVICE + +List the tasks of a service + +Options: + -f, --filter filter Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +Lists the tasks that are running as part of the specified service. This command +has to be run targeting a manager node. + +## Examples + +### Listing the tasks that are part of a service + +The following command shows all the tasks that are part of the `redis` service: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.5 manager1 Running Running 8 seconds +bk658fpbex0d redis.2 redis:3.0.5 worker2 Running Running 9 seconds +5ls5s5fldaqg redis.3 redis:3.0.5 worker1 Running Running 9 seconds +8ryt076polmc redis.4 redis:3.0.5 worker1 Running Running 9 seconds +1x0v8yomsncd redis.5 redis:3.0.5 manager1 Running Running 8 seconds +71v7je3el7rr redis.6 redis:3.0.5 worker2 Running Running 9 seconds +4l3zm9b7tfr7 redis.7 redis:3.0.5 worker2 Running Running 9 seconds +9tfpyixiy2i7 redis.8 redis:3.0.5 worker1 Running Running 9 seconds +3w1wu13yupln redis.9 redis:3.0.5 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds +``` + +In addition to _running_ tasks, the output also shows the task history. For +example, after updating the service to use the `redis:3.0.6` image, the output +may look like this: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxk redis.1 redis:3.0.6 manager1 Running Running 6 seconds ago +ky2re9oz86r9 \_ redis.1 redis:3.0.5 manager1 Shutdown Shutdown 8 seconds ago +3s46te2nzl4i redis.2 redis:3.0.6 worker2 Running Running less than a second ago +nvjljf7rmor4 \_ redis.2 redis:3.0.6 worker2 Shutdown Rejected 23 seconds ago "No such image: redis@sha256:6…" +vtiuz2fpc0yb \_ redis.2 redis:3.0.5 worker2 Shutdown Shutdown 1 second ago +jnarweeha8x4 redis.3 redis:3.0.6 worker1 Running Running 3 seconds ago +vs448yca2nz4 \_ redis.3 redis:3.0.5 worker1 Shutdown Shutdown 4 seconds ago +jf1i992619ir redis.4 redis:3.0.6 worker1 Running Running 10 seconds ago +blkttv7zs8ee \_ redis.4 redis:3.0.5 worker1 Shutdown Shutdown 11 seconds ago +``` + +The number of items in the task history is determined by the +`--task-history-limit` option that was set when initializing the swarm. You can +change the task history retention limit using the +[`docker swarm update`](swarm_update.md) command. + +When deploying a service, docker resolves the digest for the service's +image, and pins the service to that digest. The digest is not shown by +default, but is printed if `--no-trunc` is used. The `--no-trunc` option +also shows the non-truncated task ID, and error-messages, as can be seen below; + +```bash +$ docker service ps --no-trunc redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxksi9w2a704wkp7 redis.1 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 manager1 Running Running 5 minutes ago +ky2re9oz86r9556i2szb8a8af \_ redis.1 redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e worker2 Shutdown Shutdown 5 minutes ago +bk658fpbex0d57cqcwoe3jthu redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Running Running 5 seconds +nvjljf7rmor4htv7l8rwcx7i7 \_ redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Shutdown Rejected 5 minutes ago "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842" +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* [id](#id) +* [name](#name) +* [node](#node) +* [desired-state](#desired-state) + + +#### ID + +The `id` filter matches on all or a prefix of a task's ID. + +```bash +$ docker service ps -f "id=8" redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +8ryt076polmc redis.4 redis:3.0.6 worker1 Running Running 9 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + +#### Name + +The `name` filter matches on task names. + +```bash +$ docker service ps -f "name=redis.1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +qihejybwf1x5 redis.1 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### Node + +The `node` filter matches on a node name or a node ID. + +```bash +$ docker service ps -f "node=manager1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.6 manager1 Running Running 8 seconds +1x0v8yomsncd redis.5 redis:3.0.6 manager1 Running Running 8 seconds +3w1wu13yupln redis.9 redis:3.0.6 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. + + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md new file mode 100644 index 0000000..d0ba90b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md @@ -0,0 +1,55 @@ +--- +title: "service rm" +description: "The service rm command description and usage" +keywords: "service, rm" +--- + + + +# service rm + +```Markdown +Usage: docker service rm SERVICE [SERVICE...] + +Remove one or more services + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +Removes the specified services from the swarm. This command has to be run +targeting a manager node. + +For example, to remove the redis service: + +```bash +$ docker service rm redis +redis +$ docker service ls +ID NAME MODE REPLICAS IMAGE +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a running service. + + + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md new file mode 100644 index 0000000..64075ed --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md @@ -0,0 +1,96 @@ +--- +title: "service scale" +description: "The service scale command description and usage" +keywords: "service, scale" +--- + + + +# service scale + +```markdown +Usage: docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...] + +Scale one or multiple replicated services + +Options: + --help Print usage +``` + +## Examples + +### Scale a service + +The scale command enables you to scale one or more replicated services either up +or down to the desired number of replicas. This command cannot be applied on +services which are global mode. The command will return immediately, but the +actual scaling of the service may take some time. To stop all replicas of a +service while keeping the service active in the swarm you can set the scale to 0. + +For example, the following command scales the "frontend" service to 50 tasks. + +```bash +$ docker service scale frontend=50 +frontend scaled to 50 +``` + +The following command tries to scale a global service to 10 tasks and returns an error. + +``` +$ docker service create --mode global --name backend backend:latest +b4g08uwuairexjub6ome6usqh +$ docker service scale backend=10 +backend: scale can only be used with replicated mode +``` + +Directly afterwards, run `docker service ls`, to see the actual number of +replicas. + +```bash +$ docker service ls --filter name=frontend + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 15/50 nginx:alpine +``` + +You can also scale a service using the [`docker service update`](service_update.md) +command. The following commands are equivalent: + +```bash +$ docker service scale frontend=50 +$ docker service update --replicas=50 frontend +``` + +### Scale multiple services + +The `docker service scale` command allows you to set the desired number of +tasks for multiple services at once. The following example scales both the +backend and frontend services: + +```bash +$ docker service scale backend=3 frontend=5 +backend scaled to 3 +frontend scaled to 5 + +$ docker service ls +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md new file mode 100644 index 0000000..301a0ea --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md @@ -0,0 +1,181 @@ +--- +title: "service update" +description: "The service update command description and usage" +keywords: "service, update" +--- + + + +# service update + +```Markdown +Usage: docker service update [OPTIONS] SERVICE + +Update a service + +Options: + --args string Service command args + --constraint-add list Add or update a placement constraint (default []) + --constraint-rm list Remove a constraint (default []) + --container-label-add list Add or update a container label (default []) + --container-label-rm list Remove a container label by its key (default []) + --dns-add list Add or update a custom DNS server (default []) + --dns-option-add list Add or update a DNS option (default []) + --dns-option-rm list Remove a DNS option (default []) + --dns-rm list Remove a custom DNS server (default []) + --dns-search-add list Add or update a custom DNS search domain (default []) + --dns-search-rm list Remove a DNS search domain (default []) + --endpoint-mode string Endpoint mode (vip or dnsrr) + --env-add list Add or update an environment variable (default []) + --env-rm list Remove an environment variable (default []) + --force Force update even if no changes require it + --group-add list Add an additional supplementary user group to the container (default []) + --group-rm list Remove a previously added supplementary user group from the container (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) + --help Print usage + --host-add list Add or update a custom host-to-IP mapping (host:ip) (default []) + --host-rm list Remove a custom host-to-IP mapping (host:ip) (default []) + --hostname string Container hostname + --image string Service image tag + --label-add list Add or update a service label (default []) + --label-rm list Remove a label by its key (default []) + --limit-cpu decimal Limit CPUs (default 0.000) + --limit-memory bytes Limit Memory (default 0 B) + --log-driver string Logging driver for service + --log-opt list Logging driver options (default []) + --mount-add mount Add or update a mount on a service + --mount-rm list Remove a mount by its target path (default []) + --no-healthcheck Disable any container-specified HEALTHCHECK + --publish-add port Add or update a published port + --publish-rm port Remove a published port by its target port + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs (default 0.000) + --reserve-memory bytes Reserve Memory (default 0 B) + --restart-condition string Restart when condition is met (none, on-failure, or any) + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --rollback Rollback to previous specification + --secret-add secret Add or update a secret on a service + --secret-rm list Remove a secret (default []) + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure (pause|continue) (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +Updates a service as described by the specified parameters. This command has to be run targeting a manager node. +The parameters are the same as [`docker service create`](service_create.md). Please look at the description there +for further information. + +Normally, updating a service will only cause the service's tasks to be replaced with new ones if a change to the +service requires recreating the tasks for it to take effect. For example, only changing the +`--update-parallelism` setting will not recreate the tasks, because the individual tasks are not affected by this +setting. However, the `--force` flag will cause the tasks to be recreated anyway. This can be used to perform a +rolling restart without any changes to the service parameters. + +## Examples + +### Update a service + +```bash +$ docker service update --limit-cpu 2 redis +``` + +### Perform a rolling restart with no parameter changes + +```bash +$ docker service update --force --update-parallelism 1 --update-delay 30s redis +``` + +In this example, the `--force` flag causes the service's tasks to be shut down +and replaced with new ones even though none of the other parameters would +normally cause that to happen. The `--update-parallelism 1` setting ensures +that only one task is replaced at a time (this is the default behavior). The +`--update-delay 30s` setting introduces a 30 second delay between tasks, so +that the rolling restart happens gradually. + +### Adding and removing mounts + +Use the `--mount-add` or `--mount-rm` options add or remove a service's bind-mounts +or volumes. + +The following example creates a service which mounts the `test-data` volume to +`/somewhere`. The next step updates the service to also mount the `other-volume` +volume to `/somewhere-else`volume, The last step unmounts the `/somewhere` mount +point, effectively removing the `test-data` volume. Each command returns the +service name. + +- The `--mount-add` flag takes the same parameters as the `--mount` flag on + `service create`. Refer to the [volumes and + bind-mounts](service_create.md#volumes-and-bind-mounts-mount) section in the + `service create` reference for details. + +- The `--mount-rm` flag takes the `target` path of the mount. + +```bash +$ docker service create \ + --name=myservice \ + --mount \ + type=volume,source=test-data,target=/somewhere \ + nginx:alpine \ + myservice + +myservice + +$ docker service update \ + --mount-add \ + type=volume,source=other-volume,target=/somewhere-else \ + myservice + +myservice + +$ docker service update --mount-rm /somewhere myservice + +myservice +``` + +### Adding and removing secrets + +Use the `--secret-add` or `--secret-rm` options add or remove a service's +secrets. + +The following example adds a secret named `ssh-2` and removes `ssh-1`: + +```bash +$ docker service update \ + --secret-add source=ssh-2,target=ssh-2 \ + --secret-rm ssh-1 \ + myservice +``` + +### Update services using templates + +Some flags of `service update` support the use of templating. +See [`service create`](./service_create.md#templating) for the reference. + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service ps](service_ps.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md new file mode 100644 index 0000000..037feae --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md @@ -0,0 +1,98 @@ +--- +title: "stack deploy" +description: "The stack deploy command description and usage" +keywords: "stack, deploy, up" +--- + + + +# stack deploy + +```markdown +Usage: docker stack deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + -c, --compose-file string Path to a Compose file + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Compose file + +The `deploy` command supports compose file version `3.0` and above." + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +``` +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related information + +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md new file mode 100644 index 0000000..05c7215 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md @@ -0,0 +1,47 @@ +--- +title: "stack ls" +description: "The stack ls command description and usage" +keywords: "stack, ls" +--- + + + +# stack ls + +```markdown +Usage: docker stack ls + +List stacks + +Aliases: + ls, list + +Options: + --help Print usage +``` + +Lists the stacks. + +For example, the following command shows all stacks and some additional information: + +```bash +$ docker stack ls + +ID SERVICES +vossibility-stack 6 +myapp 2 +``` + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md new file mode 100644 index 0000000..101e9fe --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md @@ -0,0 +1,51 @@ +--- +title: "stack ps" +description: "The stack ps command description and usage" +keywords: "stack, ps" +--- + + + +# stack ps + +```markdown +Usage: docker stack ps [OPTIONS] STACK + +List the tasks in the stack + +Options: + -f, --filter filter Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output +``` + +Lists the tasks that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* id +* name +* desired-state + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md new file mode 100644 index 0000000..fd63997 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md @@ -0,0 +1,38 @@ +--- +title: "stack rm" +description: "The stack rm command description and usage" +keywords: "stack, rm, remove, down" +--- + + + +# stack rm + +```markdown +Usage: docker stack rm STACK + +Remove the stack + +Aliases: + rm, remove, down + +Options: + --help Print usage +``` + +Remove the stack from the swarm. This command has to be run targeting +a manager node. + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md new file mode 100644 index 0000000..62779b4 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md @@ -0,0 +1,70 @@ +--- +title: "stack services" +description: "The stack services command description and usage" +keywords: "stack, services" +advisory: "experimental" +--- + + + +# stack services (experimental) + +```markdown +Usage: docker stack services [OPTIONS] STACK + +List the services in the stack + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +Lists the services that are running as part of the specified stack. This +command has to be run targeting a manager node. + +For example, the following command shows all services in the `myapp` stack: + +```bash +$ docker stack services myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. + +The following command shows both the `web` and `db` services: + +```bash +$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +The currently supported filters are: + +* id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) +* name (`--filter name=myapp_web`) +* label (`--filter label=key=value`) + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/start.md b/vendor/github.com/docker/docker/docs/reference/commandline/start.md new file mode 100644 index 0000000..980bce9 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/start.md @@ -0,0 +1,28 @@ +--- +title: "start" +description: "The start command description and usage" +keywords: "Start, container, stopped" +--- + + + +# start + +```markdown +Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] + +Start one or more stopped containers + +Options: + -a, --attach Attach STDOUT/STDERR and forward signals + --detach-keys string Override the key sequence for detaching a container + --help Print usage + -i, --interactive Attach container's STDIN +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stats.md b/vendor/github.com/docker/docker/docs/reference/commandline/stats.md new file mode 100644 index 0000000..f5d0d54 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stats.md @@ -0,0 +1,117 @@ +--- +title: "stats" +description: "The stats command description and usage" +keywords: "container, resource, statistics" +--- + + + +# stats + +```markdown +Usage: docker stats [OPTIONS] [CONTAINER...] + +Display a live stream of container(s) resource usage statistics + +Options: + -a, --all Show all containers (default shows just running) + --format string Pretty-print images using a Go template + --help Print usage + --no-stream Disable streaming stats and only pull the first result +``` + +The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. + +If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. + +## Examples + +Running `docker stats` on all running containers against a Linux daemon. + + $ docker stats + CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O + 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB + 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B + d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B + +Running `docker stats` on multiple containers by name and id against a Linux daemon. + + $ docker stats fervent_panini 5acfcb1b4fd1 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B + fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B + +Running `docker stats` on all running containers against a Windows daemon. + + PS E:\> docker stats + CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O + 09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB + 9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB + 3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB + +Running `docker stats` on multiple containers by name and id against a Windows daemon. + + PS E:\> docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 3f214c61ad1d nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky + 9db7aa4d986d windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson + 09d3bb5b1604 windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley + + PS E:\> docker stats 3f214c61ad1d mad_wilson + CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O + 3f214c61ad1d 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB + mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB + +## Formatting + +The formatting option (`--format`) pretty prints container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------ | -------------------------------------------- +`.Container` | Container name or ID (user input) +`.Name` | Container name +`.ID` | Container ID +`.CPUPerc` | CPU percentage +`.MemUsage` | Memory usage +`.NetIO` | Network IO +`.BlockIO` | Block IO +`.MemPerc` | Memory percentage (Not available on Windows) +`.PIDs` | Number of PIDs (Not available on Windows) + + +When using the `--format` option, the `stats` command either +outputs the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Container` and `CPUPerc` entries separated by a colon for all images: + +```bash +$ docker stats --format "{{.Container}}: {{.CPUPerc}}" + +09d3bb5b1604: 6.61% +9db7aa4d986d: 9.19% +3f214c61ad1d: 0.00% +``` + +To list all containers statistics with their name, CPU percentage and memory +usage in a table format you can use: + +```bash +$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" + +CONTAINER CPU % PRIV WORKING SET +1285939c1fd3 0.07% 796 KiB / 64 MiB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stop.md b/vendor/github.com/docker/docker/docs/reference/commandline/stop.md new file mode 100644 index 0000000..3090db9 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stop.md @@ -0,0 +1,29 @@ +--- +title: "stop" +description: "The stop command description and usage" +keywords: "stop, SIGKILL, SIGTERM" +--- + + + +# stop + +```markdown +Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] + +Stop one or more running containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing it (default 10) +``` + +The main process inside the container will receive `SIGTERM`, and after a grace +period, `SIGKILL`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md new file mode 100644 index 0000000..44afc27 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md @@ -0,0 +1,142 @@ +--- +title: "swarm init" +description: "The swarm init command description and usage" +keywords: "swarm, init" +--- + + + +# swarm init + +```markdown +Usage: docker swarm init [OPTIONS] + +Initialize a swarm + +Options: + --advertise-addr string Advertised address (format: [:port]) + --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --force-new-cluster Force create a new cluster from current state + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +Initialize a swarm. The docker engine targeted by this command becomes a manager +in the newly created single-node swarm. + + +```bash +$ docker swarm init --advertise-addr 192.168.99.121 +Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. +``` + +`docker swarm init` generates two random tokens, a worker token and a manager token. When you join +a new node to the swarm, the node joins as a worker or manager node based upon the token you pass +to [swarm join](swarm_join.md). + +After you create the swarm, you can display or rotate the token using +[swarm join-token](swarm_join_token.md). + +### `--autolock` + +This flag enables automatic locking of managers with an encryption key. The +private keys and data stored by all managers will be protected by the +encryption key printed in the output, and will not be accessible without it. +Thus, it is very important to store this key in order to activate a manager +after it restarts. The key can be passed to `docker swarm unlock` to reactivate +the manager. Autolock can be disabled by running +`docker swarm update --autolock=false`. After disabling it, the encryption key +is no longer required to start the manager, and it will start up on its own +without user intervention. + +### `--cert-expiry` + +This flag sets the validity period for node certificates. + +### `--dispatcher-heartbeat` + +This flag sets the frequency with which nodes are told to use as a +period to report their health. + +### `--external-ca` + +This flag sets up the swarm to use an external CA to issue node certificates. The value takes +the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used +to send signing requests to the external CA. Currently, the only supported value is `cfssl`. +The URL specifies the endpoint where signing requests should be submitted. + +### `--force-new-cluster` + +This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. + +### `--listen-addr` + +The node listens for inbound swarm manager traffic on this address. The default is to listen on +0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's +address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--advertise-addr` + +This flag specifies the address that will be advertised to other members of the +swarm for API access and overlay networking. If unspecified, Docker will check +if the system has a single IP address, and use that IP address with the +listening port (see `--listen-addr`). If the system has multiple IP addresses, +`--advertise-addr` must be specified so that the correct address is chosen for +inter-manager communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--task-history-limit` + +This flag sets up task history retention limit. + +### `--max-snapshots` + +This flag sets the number of old Raft snapshots to retain in addition to the +current Raft snapshots. By default, no old snapshots are retained. This option +may be used for debugging, or to store old snapshots of the swarm state for +disaster recovery purposes. + +### `--snapshot-interval` + +This flag specifies how many log entries to allow in between Raft snapshots. +Setting this to a higher number will trigger snapshots less frequently. +Snapshots compact the Raft log and allow for more efficient transfer of the +state to new managers. However, there is a performance cost to taking snapshots +frequently. + +## Related information + +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) +* [swarm update](swarm_update.md) +* [swarm join-token](swarm_join_token.md) +* [node rm](node_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md new file mode 100644 index 0000000..0cde0d7 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md @@ -0,0 +1,102 @@ +--- +title: "swarm join" +description: "The swarm join command description and usage" +keywords: "swarm, join" +--- + + + +# swarm join + +```markdown +Usage: docker swarm join [OPTIONS] HOST:PORT + +Join a swarm as a node and/or manager + +Options: + --advertise-addr string Advertised address (format: [:port]) + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --token string Token for entry into the swarm +``` + +Join a node to a swarm. The node joins as a manager node or worker node based upon the token you +pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you +pass a worker token, the node joins as a worker. + +### Join a node to swarm as a manager + +The example below demonstrates joining a manager node using a manager token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377 +This node joined a swarm as a manager. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader +``` + +A cluster should only have 3-7 managers at most, because a majority of managers must be available +for the cluster to function. Nodes that aren't meant to participate in this management quorum +should join as workers instead. Managers should be stable hosts that have static IP addresses. + +### Join a node to swarm as a worker + +The example below demonstrates joining a worker node using a worker token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377 +This node joined a swarm as a worker. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +### `--listen-addr value` + +If the node is a manager, it will listen for inbound swarm manager traffic on this +address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a +network interface to listen on that interface's address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--advertise-addr value` + +This flag specifies the address that will be advertised to other members of the +swarm for API access. If unspecified, Docker will check if the system has a +single IP address, and use that IP address with the listening port (see +`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr` +must be specified so that the correct address is chosen for inter-manager +communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--token string` + +Secret value required for nodes to join the swarm + + +## Related information + +* [swarm init](swarm_init.md) +* [swarm leave](swarm_leave.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md new file mode 100644 index 0000000..d731f02 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md @@ -0,0 +1,105 @@ +--- +title: "swarm join-token" +description: "The swarm join-token command description and usage" +keywords: "swarm, join-token" +--- + + + +# swarm join-token + +```markdown +Usage: docker swarm join-token [OPTIONS] (worker|manager) + +Manage join tokens + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate join token +``` + +Join tokens are secrets that allow a node to join the swarm. There are two +different join tokens available, one for the worker role and one for the manager +role. You pass the token using the `--token` flag when you run +[swarm join](swarm_join.md). Nodes use the join token only when they join the +swarm. + +You can view or rotate the join tokens using `swarm join-token`. + +As a convenience, you can pass `worker` or `manager` as an argument to +`join-token` to print the full `docker swarm join` command to join a new node to +the swarm: + +```bash +$ docker swarm join-token worker +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +$ docker swarm join-token manager +To add a manager to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ + 172.17.0.2:2377 +``` + +Use the `--rotate` flag to generate a new join token for the specified role: + +```bash +$ docker swarm join-token --rotate worker +Successfully rotated worker join token. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ + 172.17.0.2:2377 +``` + +After using `--rotate`, only the new token will be valid for joining with the specified role. + +The `-q` (or `--quiet`) flag only prints the token: + +```bash +$ docker swarm join-token -q worker + +SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t +``` + +### `--rotate` + +Because tokens allow new nodes to join the swarm, you should keep them secret. +Be particularly careful with manager tokens since they allow new manager nodes +to join the swarm. A rogue manager has the potential to disrupt the operation of +your swarm. + +Rotate your swarm's join token if a token gets checked-in to version control, +stolen, or a node is compromised. You may also want to periodically rotate the +token to ensure any unknown token leaks do not allow a rogue node to join +the swarm. + +To rotate the join token and print the newly generated token, run +`docker swarm join-token --rotate` and pass the role: `manager` or `worker`. + +Rotating a join-token means that no new nodes will be able to join the swarm +using the old token. Rotation does not affect existing nodes in the swarm +because the join token is only used for authorizing new nodes joining the swarm. + +### `--quiet` + +Only print the token. Do not print a complete command for joining. + +## Related information + +* [swarm join](swarm_join.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md new file mode 100644 index 0000000..c0d9437 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md @@ -0,0 +1,58 @@ +--- +title: "swarm leave" +description: "The swarm leave command description and usage" +keywords: "swarm, leave" +--- + + + +# swarm leave + +```markdown +Usage: docker swarm leave [OPTIONS] + +Leave the swarm + +Options: + -f, --force Force this node to leave the swarm, ignoring warnings + --help Print usage +``` + +When you run this command on a worker, that worker leaves the swarm. + +You can use the `--force` option to on a manager to remove it from the swarm. +However, this does not reconfigure the swarm to ensure that there are enough +managers to maintain a quorum in the swarm. The safe way to remove a manager +from a swarm is to demote it to a worker and then direct it to leave the quorum +without using `--force`. Only use `--force` in situations where the swarm will +no longer be used after the manager leaves, such as in a single-node swarm. + +Consider the following swarm, as seen from the manager: +```bash +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +To remove `worker2`, issue the following command from `worker2` itself: +```bash +$ docker swarm leave +Node left the default swarm. +``` +To remove an inactive node, use the [`node rm`](node_rm.md) command instead. + +## Related information + +* [node rm](node_rm.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md new file mode 100644 index 0000000..164b7d3 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md @@ -0,0 +1,41 @@ +--- +title: "swarm unlock" +description: "The swarm unlock command description and usage" +keywords: "swarm, unlock" +--- + + + +# swarm unlock + +```markdown +Usage: docker swarm unlock + +Unlock swarm + +Options: + --help Print usage +``` + +Unlocks a locked manager using a user-supplied unlock key. This command must be +used to reactivate a manager after its Docker daemon restarts if the autolock +setting is turned on. The unlock key is printed at the time when autolock is +enabled, and is also available from the `docker swarm unlock-key` command. + + +```bash +$ docker swarm unlock +Please enter unlock key: +``` + +## Related information + +* [swarm init](swarm_init.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md new file mode 100644 index 0000000..a2597fe --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md @@ -0,0 +1,84 @@ +--- +title: "swarm unlock-key" +description: "The swarm unlock-keycommand description and usage" +keywords: "swarm, unlock-key" +--- + + + +# swarm unlock-key + +```markdown +Usage: docker swarm unlock-key [OPTIONS] + +Manage the unlock key + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate unlock key +``` + +An unlock key is a secret key needed to unlock a manager after its Docker daemon +restarts. These keys are only used when the autolock feature is enabled for the +swarm. + +You can view or rotate the unlock key using `swarm unlock-key`. To view the key, +run the `docker swarm unlock-key` command without any arguments: + + +```bash +$ docker swarm unlock-key +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +Use the `--rotate` flag to rotate the unlock key to a new, randomly-generated +key: + +```bash +$ docker swarm unlock-key --rotate +Successfully rotated manager unlock key. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +The `-q` (or `--quiet`) flag only prints the key: + +```bash +$ docker swarm unlock-key -q +SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 +``` + +### `--rotate` + +This flag rotates the unlock key, replacing it with a new randomly-generated +key. The old unlock key will no longer be accepted. + +### `--quiet` + +Only print the unlock key, without instructions. + +## Related information + +* [swarm unlock](swarm_unlock.md) +* [swarm init](swarm_init.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md new file mode 100644 index 0000000..0af63fe --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md @@ -0,0 +1,45 @@ +--- +title: "swarm update" +description: "The swarm update command description and usage" +keywords: "swarm, update" +--- + + + +# swarm update + +```markdown +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options: + --autolock Change manager autolocking setting (true|false) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --help Print usage + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +Updates a swarm with new parameter values. This command must target a manager node. + + +```bash +$ docker swarm update --cert-expiry 720h +``` + +## Related information + +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md b/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md new file mode 100644 index 0000000..c6e8bbd --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md @@ -0,0 +1,76 @@ +--- +title: "system df" +description: "The system df command description and usage" +keywords: "system, data, usage, disk" +--- + + + +# system df + +```markdown +Usage: docker system df [OPTIONS] + +Show docker filesystem usage + +Options: + --help Print usage + -v, --verbose Show detailed information on space usage +``` + +The `docker system df` command displays information regarding the +amount of disk space used by the docker daemon. + +By default the command will just show a summary of the data used: +```bash +$ docker system df +TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 5 2 16.43 MB 11.63 MB (70%) +Containers 2 0 212 B 212 B (100%) +Local Volumes 2 1 36 B 0 B (0%) +``` + +A more detailed view can be requested using the `-v, --verbose` flag: +```bash +$ docker system df -v +Images space usage: + +REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS +my-curl latest b2789dd875bf 6 minutes ago 11 MB 11 MB 5 B 0 +my-jq latest ae67841be6d0 6 minutes ago 9.623 MB 8.991 MB 632.1 kB 0 + a0971c4015c1 6 minutes ago 11 MB 11 MB 0 B 0 +alpine latest 4e38e38c8ce0 9 weeks ago 4.799 MB 0 B 4.799 MB 1 +alpine 3.3 47cf20d8c26c 9 weeks ago 4.797 MB 4.797 MB 0 B 1 + +Containers space usage: + +CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES +4a7f7eebae0f alpine:latest "sh" 1 0 B 16 minutes ago Exited (0) 5 minutes ago hopeful_yalow +f98f9c2aa1ea alpine:3.3 "sh" 1 212 B 16 minutes ago Exited (0) 48 seconds ago anon-vol + +Local Volumes space usage: + +NAME LINKS SIZE +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B +my-named-vol 0 0 B +``` + +* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) +* `UNIQUE SIZE` is the amount of space that is only used by a given image +* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` + +Note that network information is not shown because it doesn't consume the disk space. + +## Related Information +* [system prune](system_prune.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md new file mode 100644 index 0000000..46f8c43 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md @@ -0,0 +1,79 @@ +--- +title: "system prune" +description: "Remove unused data" +keywords: "system, prune, delete, remove" +--- + + + +# system prune + +```markdown +Usage: docker system prune [OPTIONS] + +Delete unused data + +Options: + -a, --all Remove all unused data not just dangling ones + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all unused containers, volumes, networks and images (both dangling and unreferenced). + +Example output: + +```bash +$ docker system prune -a +WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + - all images without at least one container associated to them +Are you sure you want to continue? [y/N] y +Deleted Containers: +0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b +73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d + +Deleted Volumes: +named-vol + +Deleted Images: +untagged: my-curl:latest +deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d +deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b +untagged: alpine:3.3 +deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f +untagged: alpine:latest +deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96 +deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f +deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab +deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3 +untagged: my-jq:latest +deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1 +deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f +deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548 + +Total reclaimed space: 13.5 MB +``` + +## Related information + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/tag.md b/vendor/github.com/docker/docker/docs/reference/commandline/tag.md new file mode 100644 index 0000000..983bfe2 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/tag.md @@ -0,0 +1,74 @@ +--- +title: "tag" +description: "The tag command description and usage" +keywords: "tag, name, image" +--- + + + +# tag + +```markdown +Usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] + +Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Options: + --help Print usage +``` + +An image name is made up of slash-separated name components, optionally prefixed +by a registry hostname. The hostname must comply with standard DNS rules, but +may not contain underscores. If a hostname is present, it may optionally be +followed by a port number in the format `:8080`. If not present, the command +uses Docker's public registry located at `registry-1.docker.io` by default. Name +components may contain lowercase characters, digits and separators. A separator +is defined as a period, one or two underscores, or one or more dashes. A name +component may not start or end with a separator. + +A tag name may contain lowercase and uppercase characters, digits, underscores, +periods and dashes. A tag name may not start with a period or a dash and may +contain a maximum of 128 characters. + +You can group your images together using names and tags, and then upload them +to [*Share Images via Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +# Examples + +## Tagging an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + + docker tag httpd fedora/httpd:version1.0 + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +## Tagging an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + + docker tag httpd:test fedora/httpd:version1.0.test + +## Tagging an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/top.md b/vendor/github.com/docker/docker/docs/reference/commandline/top.md new file mode 100644 index 0000000..0a04828 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/top.md @@ -0,0 +1,25 @@ +--- +title: "top" +description: "The top command description and usage" +keywords: "container, running, processes" +--- + + + +# top + +```markdown +Usage: docker top CONTAINER [ps OPTIONS] + +Display the running processes of a container + +Options: + --help Print usage +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md b/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md new file mode 100644 index 0000000..aa2326f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md @@ -0,0 +1,36 @@ +--- +title: "unpause" +description: "The unpause command description and usage" +keywords: "cgroups, suspend, container" +--- + + + +# unpause + +```markdown +Usage: docker unpause CONTAINER [CONTAINER...] + +Unpause all processes within one or more containers + +Options: + --help Print usage +``` + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Related information + +* [pause](pause.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/update.md b/vendor/github.com/docker/docker/docs/reference/commandline/update.md new file mode 100644 index 0000000..a139004 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/update.md @@ -0,0 +1,120 @@ +--- +title: "update" +description: "The update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +## update + +```markdown +Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] + +Update configuration of one or more containers + +Options: + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --help Print usage + --kernel-memory string Kernel memory limit + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --restart string Restart policy to apply when a container exits +``` + +The `docker update` command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the `--kernel-memory` option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, you can only update `--kernel-memory` on a stopped container or on +a running container with kernel memory initialized. + +## Examples + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use `docker ps` to find these values. You can also +use the ID returned from the `docker run` command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the `--kernel-memory` +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with `--kernel-memory`. +If the container was started *without* `--kernel-memory` you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the `--kernel-memory` setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/version.md b/vendor/github.com/docker/docker/docs/reference/commandline/version.md new file mode 100644 index 0000000..cb1bcee --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/version.md @@ -0,0 +1,67 @@ +--- +title: "version" +description: "The version command description and usage" +keywords: "version, architecture, api" +--- + + + +# version + +```markdown +Usage: docker version [OPTIONS] + +Show the Docker version information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +By default, this will render all version information in an easy to read +layout. If a format is specified, the given template will be executed instead. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +**Default output:** + + $ docker version + Client: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + + Server: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + +**Get server version:** + + {% raw %} + $ docker version --format '{{.Server.Version}}' + 1.8.0 + {% endraw %} + +**Dump raw data:** + + {% raw %} + $ docker version --format '{{json .}}' + {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} + {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md new file mode 100644 index 0000000..9b188a9 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md @@ -0,0 +1,91 @@ +--- +title: "volume create" +description: "The volume create command description and usage" +keywords: "volume, create" +--- + + + +# volume create + +```markdown +Usage: docker volume create [OPTIONS] [VOLUME] + +Create a volume + +Options: + -d, --driver string Specify volume driver name (default "local") + --help Print usage + --label value Set metadata for a volume (default []) + -o, --opt value Set driver specific options (default map[]) +``` + +Creates a new volume that containers can consume and store data in. If a name is not specified, Docker generates a random name. You create a volume and then configure the container to use it, for example: + +```bash +$ docker volume create hello +hello + +$ docker run -d -v hello:/world busybox ls /world +``` + +The mount is created inside the container's `/world` directory. Docker does not support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is useful if two containers need access to shared data. For example, if one container writes and the other reads the data. + +Volume names must be unique among drivers. This means you cannot use the same volume name with two different drivers. If you attempt this `docker` returns an error: + +``` +A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. +``` + +If you specify a volume name already in use on the current driver, Docker assumes you want to re-use the existing volume and does not return an error. + +## Driver specific options + +Some volume drivers may take options to customize the volume creation. Use the `-o` or `--opt` flags to pass driver options: + +```bash +$ docker volume create --driver fake --opt tardis=blue --opt timey=wimey +``` + +These options are passed directly to the volume driver. Options for +different volume drivers may do different things (or nothing at all). + +The built-in `local` driver on Windows does not support any options. + +The built-in `local` driver on Linux accepts options similar to the linux `mount` command. You can provide multiple options by passing the `--opt` flag multiple times. Some `mount` options (such as the `o` option) can take a comma-separated list of options. Complete list of available mount options can be found [here](http://man7.org/linux/man-pages/man8/mount.8.html). + +For example, the following creates a `tmpfs` volume called `foo` with a size of 100 megabyte and `uid` of 1000. + +```bash +$ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 foo +``` + +Another example that uses `btrfs`: + +```bash +$ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 foo +``` + +Another example that uses `nfs` to mount the `/path/to/dir` in `rw` mode from `192.168.1.1`: + +```bash +$ docker volume create --driver local --opt type=nfs --opt o=addr=192.168.1.1,rw --opt device=:/path/to/dir foo +``` + + +## Related information + +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md new file mode 100644 index 0000000..98e0ee5 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md @@ -0,0 +1,59 @@ +--- +title: "volume inspect" +description: "The volume inspect command description and usage" +keywords: "volume, inspect" +--- + + + +# volume inspect + +```markdown +Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] + +Display detailed information on one or more volumes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +Returns information about a volume. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +Example output: + + $ docker volume create + 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d + $ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d + [ + { + "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data", + "Status": null + } + ] + + {% raw %} + $ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d + /var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data + {% endraw %} + +## Related information + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md new file mode 100644 index 0000000..90ecef2 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md @@ -0,0 +1,183 @@ +--- +title: "volume ls" +description: "The volume ls command description and usage" +keywords: "volume, list" +--- + + + +# volume ls + +```markdown +Usage: docker volume ls [OPTIONS] + +List volumes + +Aliases: + ls, list + +Options: + -f, --filter value Provide filter values (e.g. 'dangling=true') (default []) + - dangling= a volume if referenced or not + - driver= a volume's driver name + - label= or label== + - name= a volume's name + --format string Pretty-print volumes using a Go template + --help Print usage + -q, --quiet Only display volume names +``` + +List all the volumes Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +Example output: + +```bash +$ docker volume create rosemary +rosemary +$docker volume create tyler +tyler +$ docker volume ls +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false, 0 or 1) +* driver (a volume driver's name) +* label (`label=` or `label==`) +* name (a volume's name) + +### dangling + +The `dangling` filter matches on all volumes not referenced by any containers + +```bash +$ docker run -d -v tyler:/tmpwork busybox + +f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18 +$ docker volume ls -f dangling=true +DRIVER VOLUME NAME +local rosemary +``` + +### driver + +The `driver` filter matches on all or part of a volume's driver name. + +The following filter matches all volumes with a driver name containing the `local` string. + +```bash +$ docker volume ls -f driver=local + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +#### Label + +The `label` filter matches volumes based on the presence of a `label` alone or +a `label` and a value. + +First, let's create some volumes to illustrate this; + +```bash +$ docker volume create the-doctor --label is-timelord=yes +the-doctor +$ docker volume create daleks --label is-timelord=no +daleks +``` + +The following example filter matches volumes with the `is-timelord` label +regardless of its value. + +```bash +$ docker volume ls --filter label=is-timelord + +DRIVER VOLUME NAME +local daleks +local the-doctor +``` + +As can be seen in the above example, both volumes with `is-timelord=yes`, and +`is-timelord=no` are returned. + +Filtering on both `key` *and* `value` of the label, produces the expected result: + +```bash +$ docker volume ls --filter label=is-timelord=yes + +DRIVER VOLUME NAME +local the-doctor +``` + +Specifying multiple label filter produces an "and" search; all conditions +should be met; + +```bash +$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no + +DRIVER VOLUME NAME +``` + +### name + +The `name` filter matches on all or part of a volume's name. + +The following filter matches all volumes with a name containing the `rose` string. + + $ docker volume ls -f name=rose + DRIVER VOLUME NAME + local rosemary + +## Formatting + +The formatting options (`--format`) pretty-prints volumes output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|------------------------------------------------------------------------------------------ +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.Mountpoint` | Whether the network is internal or not. +`.Labels` | All labels assigned to the volume. +`.Label` | Value of a specific label for this volume. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `volume ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Driver` entries separated by a colon for all volumes: + +```bash +$ docker volume ls --format "{{.Name}}: {{.Driver}}" +vol1: local +vol2: local +vol3: local +``` + +## Related information + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md new file mode 100644 index 0000000..d910a49 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md @@ -0,0 +1,54 @@ +--- +title: "volume prune" +description: "Remove unused volumes" +keywords: "volume, prune, delete" +--- + + + +# volume prune + +```markdown +Usage: docker volume prune [OPTIONS] + +Remove all unused volumes + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all unused volumes. Unused volumes are those which are not referenced by any containers + +Example output: + +```bash +$ docker volume prune +WARNING! This will remove all volumes not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Volumes: +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e +my-named-vol + +Total reclaimed space: 36 B +``` + +## Related information + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md new file mode 100644 index 0000000..1bf9dba --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md @@ -0,0 +1,42 @@ +--- +title: "volume rm" +description: "the volume rm command description and usage" +keywords: "volume, rm" +--- + + + +# volume rm + +```markdown +Usage: docker volume rm [OPTIONS] VOLUME [VOLUME...] + +Remove one or more volumes + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of one or more volumes + --help Print usage +``` + +Remove one or more volumes. You cannot remove a volume that is in use by a container. + + $ docker volume rm hello + hello + +## Related information + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/wait.md b/vendor/github.com/docker/docker/docs/reference/commandline/wait.md new file mode 100644 index 0000000..a07b82b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/wait.md @@ -0,0 +1,25 @@ +--- +title: "wait" +description: "The wait command description and usage" +keywords: "container, stop, wait" +--- + + + +# wait + +```markdown +Usage: docker wait CONTAINER [CONTAINER...] + +Block until one or more containers stop, then print their exit codes + +Options: + --help Print usage +``` diff --git a/vendor/github.com/docker/docker/docs/reference/glossary.md b/vendor/github.com/docker/docker/docs/reference/glossary.md new file mode 100644 index 0000000..0bc39a2 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/glossary.md @@ -0,0 +1,286 @@ +--- +title: "Docker Glossary" +description: "Glossary of terms used around Docker" +keywords: "glossary, docker, terms, definitions" +--- + + + +# Glossary + +A list of terms used around the Docker project. + +## aufs + +aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that +Docker supports as a storage backend. It implements the +[union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems. + +## base image + +An image that has no parent is a **base image**. + +## boot2docker + +[boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made +specifically to run Docker containers. The boot2docker management tool for Mac and Windows was deprecated and replaced by [`docker-machine`](#machine) which you can install with the Docker Toolbox. + +## btrfs + +btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker +supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write) +filesystem. + +## build + +build is the process of building Docker images using a [Dockerfile](#dockerfile). +The build uses a Dockerfile and a "context". The context is the set of files in the +directory in which the image is built. + +## cgroups + +cgroups is a Linux kernel feature that limits, accounts for, and isolates +the resource usage (CPU, memory, disk I/O, network, etc.) of a collection +of processes. Docker relies on cgroups to control and isolate resource limits. + +*Also known as : control groups* + +## Compose + +[Compose](https://github.com/docker/compose) is a tool for defining and +running complex applications with Docker. With compose, you define a +multi-container application in a single file, then spin your +application up in a single command which does everything that needs to +be done to get it running. + +*Also known as : docker-compose, fig* + +## container + +A container is a runtime instance of a [docker image](#image). + +A Docker container consists of + +- A Docker image +- Execution environment +- A standard set of instructions + +The concept is borrowed from Shipping Containers, which define a standard to ship +goods globally. Docker defines a standard to ship software. + +## data volume + +A data volume is a specially-designated directory within one or more containers +that bypasses the Union File System. Data volumes are designed to persist data, +independent of the container's life cycle. Docker therefore never automatically +delete volumes when you remove a container, nor will it "garbage collect" +volumes that are no longer referenced by a container. + + +## Docker + +The term Docker can refer to + +- The Docker project as a whole, which is a platform for developers and sysadmins to +develop, ship, and run applications +- The docker daemon process running on the host which manages images and containers + + +## Docker Hub + +The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with +Docker and its components. It provides the following services: + +- Docker image hosting +- User authentication +- Automated image builds and work-flow tools such as build triggers and web hooks +- Integration with GitHub and Bitbucket + + +## Dockerfile + +A Dockerfile is a text document that contains all the commands you would +normally execute manually in order to build a Docker image. Docker can +build images automatically by reading the instructions from a Dockerfile. + +## filesystem + +A file system is the method an operating system uses to name files +and assign them locations for efficient storage and retrieval. + +Examples : + +- Linux : ext4, aufs, btrfs, zfs +- Windows : NTFS +- macOS : HFS+ + +## image + +Docker images are the basis of [containers](#container). An Image is an +ordered collection of root filesystem changes and the corresponding +execution parameters for use within a container runtime. An image typically +contains a union of layered filesystems stacked on top of each other. An image +does not have state and it never changes. + +## libcontainer + +libcontainer provides a native Go implementation for creating containers with +namespaces, cgroups, capabilities, and filesystem access controls. It allows +you to manage the lifecycle of the container performing additional operations +after the container is created. + +## libnetwork + +libnetwork provides a native Go implementation for creating and managing container +network namespaces and other network resources. It manage the networking lifecycle +of the container performing additional operations after the container is created. + +## link + +links provide a legacy interface to connect Docker containers running on the +same host to each other without exposing the hosts' network ports. Use the +Docker networks feature instead. + +## Machine + +[Machine](https://github.com/docker/machine) is a Docker tool which +makes it really easy to create Docker hosts on your computer, on +cloud providers and inside your own data center. It creates servers, +installs Docker on them, then configures the Docker client to talk to them. + +*Also known as : docker-machine* + +## node + +A [node](https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/) is a physical or virtual +machine running an instance of the Docker Engine in swarm mode. + +**Manager nodes** perform swarm management and orchestration duties. By default +manager nodes are also worker nodes. + +**Worker nodes** execute tasks. + +## overlay network driver + +Overlay network driver provides out of the box multi-host network connectivity +for docker containers in a cluster. + +## overlay storage driver + +OverlayFS is a [filesystem](#filesystem) service for Linux which implements a +[union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems. +It is supported by the Docker daemon as a storage driver. + +## registry + +A Registry is a hosted service containing [repositories](#repository) of [images](#image) +which responds to the Registry API. + +The default registry can be accessed using a browser at [Docker Hub](#docker-hub) +or using the `docker search` command. + +## repository + +A repository is a set of Docker images. A repository can be shared by pushing it +to a [registry](#registry) server. The different images in the repository can be +labeled using [tags](#tag). + +Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/) +and its [tags](https://hub.docker.com/r/library/nginx/tags/) + + +## service + +A [service](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) is the definition of how +you want to run your application containers in a swarm. At the most basic level +a service defines which container image to run in the swarm and which commands +to run in the container. For orchestration purposes, the service defines the +"desired state", meaning how many containers to run as tasks and constraints for +deploying the containers. + +Frequently a service is a microservice within the context of some larger +application. Examples of services might include an HTTP server, a database, or +any other type of executable program that you wish to run in a distributed +environment. + +## service discovery + +Swarm mode [service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery) is a DNS component +internal to the swarm that automatically assigns each service on an overlay +network in the swarm a VIP and DNS entry. Containers on the network share DNS +mappings for the service via gossip so any container on the network can access +the service via its service name. + +You don’t need to expose service-specific ports to make the service available to +other services on the same overlay network. The swarm’s internal load balancer +automatically distributes requests to the service VIP among the active tasks. + +## swarm + +A [swarm](https://docs.docker.com/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode). + +## Docker Swarm + +Do not confuse [Docker Swarm](https://github.com/docker/swarm) with the [swarm mode](#swarm-mode) features in Docker Engine. + +Docker Swarm is the name of a standalone native clustering tool for Docker. +Docker Swarm pools together several Docker hosts and exposes them as a single +virtual Docker host. It serves the standard Docker API, so any tool that already +works with Docker can now transparently scale up to multiple hosts. + +*Also known as : docker-swarm* + +## swarm mode + +[Swarm mode](https://docs.docker.com/engine/swarm/) refers to cluster management and orchestration +features embedded in Docker Engine. When you initialize a new swarm (cluster) or +join nodes to a swarm, the Docker Engine runs in swarm mode. + +## tag + +A tag is a label applied to a Docker image in a [repository](#repository). +tags are how various images in a repository are distinguished from each other. + +*Note : This label is not related to the key=value labels set for docker daemon* + +## task + +A [task](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/#/tasks-and-scheduling) is the +atomic unit of scheduling within a swarm. A task carries a Docker container and +the commands to run inside the container. Manager nodes assign tasks to worker +nodes according to the number of replicas set in the service scale. + +The diagram below illustrates the relationship of services to tasks and +containers. + +![services diagram](https://docs.docker.com/engine/swarm/images/services-diagram.png) + +## Toolbox + +Docker Toolbox is the installer for Mac and Windows users. + + +## Union file system + +Union file systems, or UnionFS, are file systems that operate by creating layers, making them +very lightweight and fast. Docker uses union file systems to provide the building +blocks for containers. + + +## virtual machine + +A virtual machine is a program that emulates a complete computer and imitates dedicated hardware. +It shares physical hardware resources with other users but isolates the operating system. The +end user has the same experience on a Virtual Machine as they would have on dedicated hardware. + +Compared to containers, a virtual machine is heavier to run, provides more isolation, +gets its own set of resources and does minimal sharing. + +*Also known as : VM* diff --git a/vendor/github.com/docker/docker/docs/reference/index.md b/vendor/github.com/docker/docker/docs/reference/index.md new file mode 100644 index 0000000..f24c342 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/index.md @@ -0,0 +1,21 @@ +--- +title: "Engine reference" +description: "Docker Engine reference" +keywords: "Engine" +--- + + + +# Engine reference + +* [Dockerfile reference](builder.md) +* [Docker run reference](run.md) +* [Command line reference](commandline/index.md) +* [API Reference](https://docs.docker.com/engine/api/) diff --git a/vendor/github.com/docker/docker/docs/reference/run.md b/vendor/github.com/docker/docker/docs/reference/run.md new file mode 100644 index 0000000..73769ed --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/run.md @@ -0,0 +1,1555 @@ +--- +title: "Docker run reference" +description: "Configure containers at runtime" +keywords: "docker, run, configure, runtime" +--- + + + +# Docker run reference + +Docker runs processes in isolated containers. A container is a process +which runs on a host. The host may be local or remote. When an operator +executes `docker run`, the container process that runs is isolated in +that it has its own file system, its own networking, and its own +isolated process tree separate from the host. + +This page details how to use the `docker run` command to define the +container's resources at runtime. + +## General form + +The basic `docker run` command takes this form: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +The `docker run` command must specify an [*IMAGE*](glossary.md#image) +to derive the container from. An image developer can define image +defaults related to: + + * detached or foreground running + * container identification + * network settings + * runtime constraints on CPU and memory + +With the `docker run [OPTIONS]` an operator can add to or override the +image defaults set by a developer. And, additionally, operators can +override nearly all the defaults set by the Docker runtime itself. The +operator's ability to override image and Docker runtime defaults is why +[*run*](commandline/run.md) has more options than any +other `docker` command. + +To learn how to interpret the types of `[OPTIONS]`, see [*Option +types*](commandline/cli.md#option-types). + +> **Note**: Depending on your Docker system configuration, you may be +> required to preface the `docker run` command with `sudo`. To avoid +> having to use `sudo` with the `docker` command, your system +> administrator can create a Unix group called `docker` and add users to +> it. For more information about this configuration, refer to the Docker +> installation documentation for your operating system. + + +## Operator exclusive options + +Only the operator (the person executing `docker run`) can set the +following options. + + - [Detached vs foreground](#detached-vs-foreground) + - [Detached (-d)](#detached--d) + - [Foreground](#foreground) + - [Container identification](#container-identification) + - [Name (--name)](#name---name) + - [PID equivalent](#pid-equivalent) + - [IPC settings (--ipc)](#ipc-settings---ipc) + - [Network settings](#network-settings) + - [Restart policies (--restart)](#restart-policies---restart) + - [Clean up (--rm)](#clean-up---rm) + - [Runtime constraints on resources](#runtime-constraints-on-resources) + - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities) + +## Detached vs foreground + +When starting a Docker container, you must first decide if you want to +run the container in the background in a "detached" mode or in the +default foreground mode: + + -d=false: Detached mode: Run container in the background, print new container id + +### Detached (-d) + +To start a container in detached mode, you use `-d=true` or just `-d` option. By +design, containers started in detached mode exit when the root process used to +run the container exits. A container in detached mode cannot be automatically +removed when it stops, this means you cannot use the `--rm` option with `-d` option. + +Do not pass a `service x start` command to a detached container. For example, this +command attempts to start the `nginx` service. + + $ docker run -d -p 80:80 my_image service nginx start + +This succeeds in starting the `nginx` service inside the container. However, it +fails the detached container paradigm in that, the root process (`service nginx +start`) returns and the detached container stops as designed. As a result, the +`nginx` service is started but could not be used. Instead, to start a process +such as the `nginx` web server do the following: + + $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' + +To do input/output with a detached container use network connections or shared +volumes. These are required because the container is no longer listening to the +command line where `docker run` was run. + +To reattach to a detached container, use `docker` +[*attach*](commandline/attach.md) command. + +### Foreground + +In foreground mode (the default when `-d` is not specified), `docker +run` can start the process in the container and attach the console to +the process's standard input, output, and standard error. It can even +pretend to be a TTY (this is what most command line executables expect) +and pass along signals. All of that is configurable: + + -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` + -t : Allocate a pseudo-tty + --sig-proxy=true: Proxy all received signals to the process (non-TTY mode only) + -i : Keep STDIN open even if not attached + +If you do not specify `-a` then Docker will [attach to both stdout and stderr +]( https://github.com/docker/docker/blob/4118e0c9eebda2412a09ae66e90c34b85fae3275/runconfig/opts/parse.go#L267). +You can specify to which of the three standard streams (`STDIN`, `STDOUT`, +`STDERR`) you'd like to connect instead, as in: + + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +For interactive processes (like a shell), you must use `-i -t` together in +order to allocate a tty for the container process. `-i -t` is often written `-it` +as you'll see in later examples. Specifying `-t` is forbidden when the client +standard output is redirected or piped, such as in: + + $ echo test | docker run -i busybox cat + +>**Note**: A process running as PID 1 inside a container is treated +>specially by Linux: it ignores any signal with the default action. +>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is +>coded to do so. + +## Container identification + +### Name (--name) + +The operator can identify a container in three ways: + +| Identifier type | Example value | +| --------------------- | ------------------------------------------------------------------ | +| UUID long identifier | "f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778" | +| UUID short identifier | "f78375b1c487" | +| Name | "evil_ptolemy" | + +The UUID identifiers come from the Docker daemon. If you do not assign a +container name with the `--name` option, then the daemon generates a random +string name for you. Defining a `name` can be a handy way to add meaning to a +container. If you specify a `name`, you can use it when referencing the +container within a Docker network. This works for both background and foreground +Docker containers. + +> **Note**: Containers on the default bridge network must be linked to +> communicate by name. + +### PID equivalent + +Finally, to help with automation, you can have Docker write the +container ID out to a file of your choosing. This is similar to how some +programs might write out their process ID to a file (you've seen them as +PID files): + + --cidfile="": Write the container ID to the file + +### Image[:tag] + +While not strictly a means of identifying a container, you can specify a version of an +image you'd like to run the container with by adding `image[:tag]` to the command. For +example, `docker run ubuntu:14.04`. + +### Image[@digest] + +Images using the v2 or later image format have a content-addressable identifier +called a digest. As long as the input used to generate the image is unchanged, +the digest value is predictable and referenceable. + +The following example runs a container from the `alpine` image with the +`sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0` digest: + + $ docker run alpine@sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 date + +## PID settings (--pid) + + --pid="" : Set the PID (Process) Namespace mode for the container, + 'container:': joins another container's PID namespace + 'host': use the host's PID namespace inside the container + +By default, all containers have the PID namespace enabled. + +PID namespace provides separation of processes. The PID Namespace removes the +view of the system processes, and allows process ids to be reused including +pid 1. + +In certain cases you want your container to share the host's process namespace, +basically allowing processes within the container to see all of the processes +on the system. For example, you could build a container with debugging tools +like `strace` or `gdb`, but want to use these tools when debugging processes +within the container. + +### Example: run htop inside a container + +Create this Dockerfile: + +``` +FROM alpine:latest +RUN apk add --update htop && rm -rf /var/cache/apk/* +CMD ["htop"] +``` + +Build the Dockerfile and tag the image as `myhtop`: + +```bash +$ docker build -t myhtop . +``` + +Use the following command to run `htop` inside a container: + +``` +$ docker run -it --rm --pid=host myhtop +``` + +Joining another container's pid namespace can be used for debugging that container. + +### Example + +Start a container running a redis server: + +```bash +$ docker run --name my-redis -d redis +``` + +Debug the redis container by running another container that has strace in it: + +```bash +$ docker run -it --pid=container:my-redis my_strace_docker_image bash +$ strace -p 1 +``` + +## UTS settings (--uts) + + --uts="" : Set the UTS namespace mode for the container, + 'host': use the host's UTS namespace inside the container + +The UTS namespace is for setting the hostname and the domain that is visible +to running processes in that namespace. By default, all containers, including +those with `--network=host`, have their own UTS namespace. The `host` setting will +result in the container using the same UTS namespace as the host. Note that +`--hostname` is invalid in `host` UTS mode. + +You may wish to share the UTS namespace with the host if you would like the +hostname of the container to change as the hostname of the host changes. A +more advanced use case would be changing the host's hostname from a container. + +## IPC settings (--ipc) + + --ipc="" : Set the IPC mode for the container, + 'container:': reuses another container's IPC namespace + 'host': use the host's IPC namespace inside the container + +By default, all containers have the IPC namespace enabled. + +IPC (POSIX/SysV IPC) namespace provides separation of named shared memory +segments, semaphores and message queues. + +Shared memory segments are used to accelerate inter-process communication at +memory speed, rather than through pipes or through the network stack. Shared +memory is commonly used by databases and custom-built (typically C/OpenMPI, +C++/using boost libraries) high performance applications for scientific +computing and financial services industries. If these types of applications +are broken into multiple containers, you might need to share the IPC mechanisms +of the containers. + +## Network settings + + --dns=[] : Set custom dns servers for the container + --network="bridge" : Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --network-alias=[] : Add network-scoped alias for the container + --add-host="" : Add a line to /etc/hosts (host:IP) + --mac-address="" : Sets the container's Ethernet device's MAC address + --ip="" : Sets the container's Ethernet device's IPv4 address + --ip6="" : Sets the container's Ethernet device's IPv6 address + --link-local-ip=[] : Sets one or more container's Ethernet device's link local IPv4/IPv6 addresses + +By default, all containers have networking enabled and they can make any +outgoing connections. The operator can completely disable networking +with `docker run --network none` which disables all incoming and outgoing +networking. In cases like this, you would perform I/O through files or +`STDIN` and `STDOUT` only. + +Publishing ports and linking to other containers only works with the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking. + +Your container will use the same DNS servers as the host by default, but +you can override this with `--dns`. + +By default, the MAC address is generated using the IP address allocated to the +container. You can set the container's MAC address explicitly by providing a +MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).Be +aware that Docker does not check if manually specified MAC addresses are unique. + +Supported networks : + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NetworkDescription
none + No networking in the container. +
bridge (default) + Connect the container to the bridge via veth interfaces. +
host + Use the host's network stack inside the container. +
container:<name|id> + Use the network stack of another container, specified via + its name or id. +
NETWORK + Connects the container to a user created network (using docker network create command) +
+ +#### Network: none + +With the network is `none` a container will not have +access to any external routes. The container will still have a +`loopback` interface enabled in the container but it does not have any +routes to external traffic. + +#### Network: bridge + +With the network set to `bridge` a container will use docker's +default networking setup. A bridge is setup on the host, commonly named +`docker0`, and a pair of `veth` interfaces will be created for the +container. One side of the `veth` pair will remain on the host attached +to the bridge while the other side of the pair will be placed inside the +container's namespaces in addition to the `loopback` interface. An IP +address will be allocated for containers on the bridge's network and +traffic will be routed though this bridge to the container. + +Containers can communicate via their IP addresses by default. To communicate by +name, they must be linked. + +#### Network: host + +With the network set to `host` a container will share the host's +network stack and all interfaces from the host will be available to the +container. The container's hostname will match the hostname on the host +system. Note that `--mac-address` is invalid in `host` netmode. Even in `host` +network mode a container has its own UTS namespace by default. As such +`--hostname` is allowed in `host` network mode and will only change the +hostname inside the container. +Similar to `--hostname`, the `--add-host`, `--dns`, `--dns-search`, and +`--dns-option` options can be used in `host` network mode. These options update +`/etc/hosts` or `/etc/resolv.conf` inside the container. No change are made to +`/etc/hosts` and `/etc/resolv.conf` on the host. + +Compared to the default `bridge` mode, the `host` mode gives *significantly* +better networking performance since it uses the host's native networking stack +whereas the bridge has to go through one level of virtualization through the +docker daemon. It is recommended to run containers in this mode when their +networking performance is critical, for example, a production Load Balancer +or a High Performance Web Server. + +> **Note**: `--network="host"` gives the container full access to local system +> services such as D-bus and is therefore considered insecure. + +#### Network: container + +With the network set to `container` a container will share the +network stack of another container. The other container's name must be +provided in the format of `--network container:`. Note that `--add-host` +`--hostname` `--dns` `--dns-search` `--dns-option` and `--mac-address` are +invalid in `container` netmode, and `--publish` `--publish-all` `--expose` are +also invalid in `container` netmode. + +Example running a Redis container with Redis binding to `localhost` then +running the `redis-cli` command and connecting to the Redis server over the +`localhost` interface. + + $ docker run -d --name redis example/redis --bind 127.0.0.1 + $ # use the redis container's network stack to access localhost + $ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1 + +#### User-defined network + +You can create a network using a Docker network driver or an external network +driver plugin. You can connect multiple containers to the same network. Once +connected to a user-defined network, the containers can communicate easily using +only another container's IP address or name. + +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +The following example creates a network using the built-in `bridge` network +driver and running a container in the created network + +``` +$ docker network create -d bridge my-net +$ docker run --network=my-net -itd --name=container3 busybox +``` + +### Managing /etc/hosts + +Your container will have lines in `/etc/hosts` which define the hostname of the +container itself as well as `localhost` and a few other common things. The +`--add-host` flag can be used to add additional lines to `/etc/hosts`. + + $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts + 172.17.0.22 09d03f76bf2c + fe00::0 ip6-localnet + ff00::0 ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + 86.75.30.9 db-static + +If a container is connected to the default bridge network and `linked` +with other containers, then the container's `/etc/hosts` file is updated +with the linked container's name. + +If the container is connected to user-defined network, the container's +`/etc/hosts` file is updated with names of all other containers in that +user-defined network. + +> **Note** Since Docker may live update the container’s `/etc/hosts` file, there +may be situations when processes inside the container can end up reading an +empty or incomplete `/etc/hosts` file. In most cases, retrying the read again +should fix the problem. + +## Restart policies (--restart) + +Using the `--restart` flag on Docker run you can specify a restart policy for +how a container should or should not be restarted on exit. + +When a restart policy is active on a container, it will be shown as either `Up` +or `Restarting` in [`docker ps`](commandline/ps.md). It can also be +useful to use [`docker events`](commandline/events.md) to see the +restart policy in effect. + +Docker supports the following restart policies: + + + + + + + + + + + + + + + + + + + + + + + + + + +
PolicyResult
no + Do not automatically restart the container when it exits. This is the + default. +
+ + on-failure[:max-retries] + + + Restart only if the container exits with a non-zero exit status. + Optionally, limit the number of restart retries the Docker + daemon attempts. +
always + Always restart the container regardless of the exit status. + When you specify always, the Docker daemon will try to restart + the container indefinitely. The container will also always start + on daemon startup, regardless of the current state of the container. +
unless-stopped + Always restart the container regardless of the exit status, but + do not start it on daemon startup if the container has been put + to a stopped state before. +
+ +An ever increasing delay (double the previous delay, starting at 100 +milliseconds) is added before each restart to prevent flooding the server. +This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, +and so on until either the `on-failure` limit is hit, or when you `docker stop` +or `docker rm -f` the container. + +If a container is successfully restarted (the container is started and runs +for at least 10 seconds), the delay is reset to its default value of 100 ms. + +You can specify the maximum amount of times Docker will try to restart the +container when using the **on-failure** policy. The default is that Docker +will try forever to restart the container. The number of (attempted) restarts +for a container can be obtained via [`docker inspect`](commandline/inspect.md). For example, to get the number of restarts +for container "my-container"; + + {% raw %} + $ docker inspect -f "{{ .RestartCount }}" my-container + # 2 + {% endraw %} + +Or, to get the last time the container was (re)started; + + {% raw %} + $ docker inspect -f "{{ .State.StartedAt }}" my-container + # 2015-03-04T23:47:07.691840179Z + {% endraw %} + + +Combining `--restart` (restart policy) with the `--rm` (clean up) flag results +in an error. On container restart, attached clients are disconnected. See the +examples on using the [`--rm` (clean up)](#clean-up-rm) flag later in this page. + +### Examples + + $ docker run --restart=always redis + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + + $ docker run --restart=on-failure:10 redis + +This will run the `redis` container with a restart policy of **on-failure** +and a maximum restart count of 10. If the `redis` container exits with a +non-zero exit status more than 10 times in a row Docker will abort trying to +restart the container. Providing a maximum restart limit is only valid for the +**on-failure** policy. + +## Exit Status + +The exit code from `docker run` gives information about why the container +failed to run or why it exited. When `docker run` exits with a non-zero code, +the exit codes follow the `chroot` standard, see below: + +**_125_** if the error is with Docker daemon **_itself_** + + $ docker run --foo busybox; echo $? + # flag provided but not defined: --foo + See 'docker run --help'. + 125 + +**_126_** if the **_contained command_** cannot be invoked + + $ docker run busybox /etc; echo $? + # docker: Error response from daemon: Container command '/etc' could not be invoked. + 126 + +**_127_** if the **_contained command_** cannot be found + + $ docker run busybox foo; echo $? + # docker: Error response from daemon: Container command 'foo' not found or does not exist. + 127 + +**_Exit code_** of **_contained command_** otherwise + + $ docker run busybox /bin/sh -c 'exit 3'; echo $? + # 3 + +## Clean up (--rm) + +By default a container's file system persists even after the container +exits. This makes debugging a lot easier (since you can inspect the +final state) and you retain all your data by default. But if you are +running short-term **foreground** processes, these container file +systems can really pile up. If instead you'd like Docker to +**automatically clean up the container and remove the file system when +the container exits**, you can add the `--rm` flag: + + --rm=false: Automatically remove the container when it exits (incompatible with -d) + +> **Note**: When you set the `--rm` flag, Docker also removes the volumes +associated with the container when the container is removed. This is similar +to running `docker rm -v my-container`. Only volumes that are specified without a +name are removed. For example, with +`docker run --rm -v /foo -v awesome:/bar busybox top`, the volume for `/foo` will be removed, +but the volume for `/bar` will not. Volumes inherited via `--volumes-from` will be removed +with the same logic -- if the original volume was specified with a name it will **not** be removed. + +## Security configuration + --security-opt="label=user:USER" : Set the label user for the container + --security-opt="label=role:ROLE" : Set the label role for the container + --security-opt="label=type:TYPE" : Set the label type for the container + --security-opt="label=level:LEVEL" : Set the label level for the container + --security-opt="label=disable" : Turn off label confinement for the container + --security-opt="apparmor=PROFILE" : Set the apparmor profile to be applied to the container + --security-opt="no-new-privileges" : Disable container processes from gaining new privileges + --security-opt="seccomp=unconfined" : Turn off seccomp confinement for the container + --security-opt="seccomp=profile.json": White listed syscalls seccomp Json file to be used as a seccomp filter + + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. Specifying the level in the following command +allows you to share the same content between containers. + + $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash + +> **Note**: Automatic translation of MLS labels is not currently supported. + +To disable the security labeling for this container versus running with the +`--privileged` flag, use the following command: + + $ docker run --security-opt label=disable -it fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + $ docker run --security-opt label=type:svirt_apache_t -it centos bash + +> **Note**: You would have to write policy defining a `svirt_apache_t` type. + +If you want to prevent your container processes from gaining additional +privileges, you can execute the following command: + + $ docker run --security-opt no-new-privileges -it centos bash + +This means that commands that raise privileges such as `su` or `sudo` will no longer work. +It also causes any seccomp filters to be applied later, after privileges have been dropped +which may mean you can have a more restrictive set of filters. +For more details, see the [kernel documentation](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt). + +## Specifying custom cgroups + +Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a +container in. This allows you to create and manage cgroups on their own. You can +define custom resources for those cgroups and put containers under a common +parent group. + +## Runtime constraints on resources + +The operator can also adjust the performance parameters of the +container: + +| Option | Description | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-m`, `--memory=""` | Memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | +| `--memory-swap=""` | Total memory limit (memory + swap, format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | +| `--memory-reservation=""` | Memory soft limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | +| `--kernel-memory=""` | Kernel memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | +| `-c`, `--cpu-shares=0` | CPU shares (relative weight) | +| `--cpus=0.000` | Number of CPUs. Number is a fractional number. 0.000 means no limit. | +| `--cpu-period=0` | Limit the CPU CFS (Completely Fair Scheduler) period | +| `--cpuset-cpus=""` | CPUs in which to allow execution (0-3, 0,1) | +| `--cpuset-mems=""` | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. | +| `--cpu-quota=0` | Limit the CPU CFS (Completely Fair Scheduler) quota | +| `--cpu-rt-period=0` | Limit the CPU real-time period. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | +| `--cpu-rt-runtime=0` | Limit the CPU real-time runtime. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | +| `--blkio-weight=0` | Block IO weight (relative weight) accepts a weight value between 10 and 1000. | +| `--blkio-weight-device=""` | Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) | +| `--device-read-bps=""` | Limit read rate from a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | +| `--device-write-bps=""` | Limit write rate to a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | +| `--device-read-iops="" ` | Limit read rate (IO per second) from a device (format: `:`). Number is a positive integer. | +| `--device-write-iops="" ` | Limit write rate (IO per second) to a device (format: `:`). Number is a positive integer. | +| `--oom-kill-disable=false` | Whether to disable OOM Killer for the container or not. | +| `--oom-score-adj=0` | Tune container's OOM preferences (-1000 to 1000) | +| `--memory-swappiness=""` | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. | +| `--shm-size=""` | Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. | + +### User memory constraints + +We have four ways to set user memory usage: + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionResult
+ memory=inf, memory-swap=inf (default) + + There is no memory limit for the container. The container can use + as much memory as needed. +
memory=L<inf, memory-swap=inf + (specify memory and set memory-swap as -1) The container is + not allowed to use more than L bytes of memory, but can use as much swap + as is needed (if the host supports swap memory). +
memory=L<inf, memory-swap=2*L + (specify memory without memory-swap) The container is not allowed to + use more than L bytes of memory, swap plus memory usage is double + of that. +
+ memory=L<inf, memory-swap=S<inf, L<=S + + (specify both memory and memory-swap) The container is not allowed to + use more than L bytes of memory, swap plus memory usage is limited + by S. +
+ +Examples: + + $ docker run -it ubuntu:14.04 /bin/bash + +We set nothing about memory, this means the processes in the container can use +as much memory and swap memory as they need. + + $ docker run -it -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash + +We set memory limit and disabled swap memory limit, this means the processes in +the container can use 300M memory and as much swap memory as they need (if the +host supports swap memory). + + $ docker run -it -m 300M ubuntu:14.04 /bin/bash + +We set memory limit only, this means the processes in the container can use +300M memory and 300M swap memory, by default, the total virtual memory size +(--memory-swap) will be set as double of memory, in this case, memory + swap +would be 2*300M, so processes can use 300M swap memory as well. + + $ docker run -it -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash + +We set both memory and swap memory, so the processes in the container can use +300M memory and 700M swap memory. + +Memory reservation is a kind of memory soft limit that allows for greater +sharing of memory. Under normal circumstances, containers can use as much of +the memory as needed and are constrained only by the hard limits set with the +`-m`/`--memory` option. When memory reservation is set, Docker detects memory +contention or low memory and forces containers to restrict their consumption to +a reservation limit. + +Always set the memory reservation value below the hard limit, otherwise the hard +limit takes precedence. A reservation of 0 is the same as setting no +reservation. By default (without reservation set), memory reservation is the +same as the hard memory limit. + +Memory reservation is a soft-limit feature and does not guarantee the limit +won't be exceeded. Instead, the feature attempts to ensure that, when memory is +heavily contended for, memory is allocated based on the reservation hints/setup. + +The following example limits the memory (`-m`) to 500M and sets the memory +reservation to 200M. + +```bash +$ docker run -it -m 500M --memory-reservation 200M ubuntu:14.04 /bin/bash +``` + +Under this configuration, when the container consumes memory more than 200M and +less than 500M, the next system memory reclaim attempts to shrink container +memory below 200M. + +The following example set memory reservation to 1G without a hard memory limit. + +```bash +$ docker run -it --memory-reservation 1G ubuntu:14.04 /bin/bash +``` + +The container can use as much memory as it needs. The memory reservation setting +ensures the container doesn't consume too much memory for long time, because +every memory reclaim shrinks the container's consumption to the reservation. + +By default, kernel kills processes in a container if an out-of-memory (OOM) +error occurs. To change this behaviour, use the `--oom-kill-disable` option. +Only disable the OOM killer on containers where you have also set the +`-m/--memory` option. If the `-m` flag is not set, this can result in the host +running out of memory and require killing the host's system processes to free +memory. + +The following example limits the memory to 100M and disables the OOM killer for +this container: + + $ docker run -it -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash + +The following example, illustrates a dangerous way to use the flag: + + $ docker run -it --oom-kill-disable ubuntu:14.04 /bin/bash + +The container has unlimited memory which can cause the host to run out memory +and require killing system processes to free memory. The `--oom-score-adj` +parameter can be changed to select the priority of which containers will +be killed when the system is out of memory, with negative scores making them +less likely to be killed an positive more likely. + +### Kernel memory constraints + +Kernel memory is fundamentally different than user memory as kernel memory can't +be swapped out. The inability to swap makes it possible for the container to +block system services by consuming too much kernel memory. Kernel memory includes: + + - stack pages + - slab pages + - sockets memory pressure + - tcp memory pressure + +You can setup kernel memory limit to constrain these kinds of memory. For example, +every process consumes some stack pages. By limiting kernel memory, you can +prevent new processes from being created when the kernel memory usage is too high. + +Kernel memory is never completely independent of user memory. Instead, you limit +kernel memory in the context of the user memory limit. Assume "U" is the user memory +limit and "K" the kernel limit. There are three possible ways to set limits: + + + + + + + + + + + + + + + + + + + + + + +
OptionResult
U != 0, K = inf (default) + This is the standard memory limitation mechanism already present before using + kernel memory. Kernel memory is completely ignored. +
U != 0, K < U + Kernel memory is a subset of the user memory. This setup is useful in + deployments where the total amount of memory per-cgroup is overcommitted. + Overcommitting kernel memory limits is definitely not recommended, since the + box can still run out of non-reclaimable memory. + In this case, you can configure K so that the sum of all groups is + never greater than the total memory. Then, freely set U at the expense of + the system's service quality. +
U != 0, K > U + Since kernel memory charges are also fed to the user counter and reclamation + is triggered for the container for both kinds of memory. This configuration + gives the admin a unified view of memory. It is also useful for people + who just want to track kernel memory usage. +
+ +Examples: + + $ docker run -it -m 500M --kernel-memory 50M ubuntu:14.04 /bin/bash + +We set memory and kernel memory, so the processes in the container can use +500M memory in total, in this 500M memory, it can be 50M kernel memory tops. + + $ docker run -it --kernel-memory 50M ubuntu:14.04 /bin/bash + +We set kernel memory without **-m**, so the processes in the container can +use as much memory as they want, but they can only use 50M kernel memory. + +### Swappiness constraint + +By default, a container's kernel can swap out a percentage of anonymous pages. +To set this percentage for a container, specify a `--memory-swappiness` value +between 0 and 100. A value of 0 turns off anonymous page swapping. A value of +100 sets all anonymous pages as swappable. By default, if you are not using +`--memory-swappiness`, memory swappiness value will be inherited from the parent. + +For example, you can set: + + $ docker run -it --memory-swappiness=0 ubuntu:14.04 /bin/bash + +Setting the `--memory-swappiness` option is helpful when you want to retain the +container's working set and to avoid swapping performance penalties. + +### CPU share constraint + +By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the `-c` or `--cpu-shares` +flag to set the weighting to 2 or higher. If 0 is set, the system will ignore the +value and use the default of 1024. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container `{C0}` with `-c=512` running one process, and another container +`{C1}` with `-c=1024` running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +### CPU period constraint + +The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use +`--cpu-period` to set the period of CPUs to limit the container's CPU usage. +And usually `--cpu-period` should work with `--cpu-quota`. + +Examples: + + $ docker run -it --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash + +If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms. + +In addition to use `--cpu-period` and `--cpu-quota` for setting CPU period constraints, +it is possible to specify `--cpus` with a float number to achieve the same purpose. +For example, if there is 1 CPU, then `--cpus=0.5` will achieve the same result as +setting `--cpu-period=50000` and `--cpu-quota=25000` (50% CPU). + +The default value for `--cpus` is `0.000`, which means there is no limit. + +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Cpuset constraint + +We can set cpus in which to allow execution for containers. + +Examples: + + $ docker run -it --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 1 and cpu 3. + + $ docker run -it --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. + +We can set mems in which to allow execution for containers. Only effective +on NUMA systems. + +Examples: + + $ docker run -it --cpuset-mems="1,3" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 1 and 3. + + $ docker run -it --cpuset-mems="0-2" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 0, 1 and 2. + +### CPU quota constraint + +The `--cpu-quota` flag limits the container's CPU usage. The default 0 value +allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair +Scheduler) handles resource allocation for executing processes and is default +Linux Scheduler used by the kernel. Set this value to 50000 to limit the container +to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Block IO bandwidth (Blkio) constraint + +By default, all containers get the same proportion of block IO bandwidth +(blkio). This proportion is 500. To modify this proportion, change the +container's blkio weight relative to the weighting of all other running +containers using the `--blkio-weight` flag. + +> **Note:** The blkio weight setting is only available for direct IO. Buffered IO +> is not currently supported. + +The `--blkio-weight` flag can set the weighting to a value between 10 to 1000. +For example, the commands below create two containers with different blkio +weight: + + $ docker run -it --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash + $ docker run -it --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash + +If you do block IO in the two containers at the same time, by, for example: + + $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct + +You'll find that the proportion of time is the same as the proportion of blkio +weights of the two containers. + +The `--blkio-weight-device="DEVICE_NAME:WEIGHT"` flag sets a specific device weight. +The `DEVICE_NAME:WEIGHT` is a string containing a colon-separated device name and weight. +For example, to set `/dev/sda` device weight to `200`: + + $ docker run -it \ + --blkio-weight-device "/dev/sda:200" \ + ubuntu + +If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker +uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` +to override this default with a new value on a specific device. +The following example uses a default weight of `300` and overrides this default +on `/dev/sda` setting that weight to `200`: + + $ docker run -it \ + --blkio-weight 300 \ + --blkio-weight-device "/dev/sda:200" \ + ubuntu + +The `--device-read-bps` flag limits the read rate (bytes per second) from a device. +For example, this command creates a container and limits the read rate to `1mb` +per second from `/dev/sda`: + + $ docker run -it --device-read-bps /dev/sda:1mb ubuntu + +The `--device-write-bps` flag limits the write rate (bytes per second)to a device. +For example, this command creates a container and limits the write rate to `1mb` +per second for `/dev/sda`: + + $ docker run -it --device-write-bps /dev/sda:1mb ubuntu + +Both flags take limits in the `:[unit]` format. Both read +and write rates must be a positive integer. You can specify the rate in `kb` +(kilobytes), `mb` (megabytes), or `gb` (gigabytes). + +The `--device-read-iops` flag limits read rate (IO per second) from a device. +For example, this command creates a container and limits the read rate to +`1000` IO per second from `/dev/sda`: + + $ docker run -ti --device-read-iops /dev/sda:1000 ubuntu + +The `--device-write-iops` flag limits write rate (IO per second) to a device. +For example, this command creates a container and limits the write rate to +`1000` IO per second to `/dev/sda`: + + $ docker run -ti --device-write-iops /dev/sda:1000 ubuntu + +Both flags take limits in the `:` format. Both read and +write rates must be a positive integer. + +## Additional groups + --group-add: Add additional groups to run as + +By default, the docker container process runs with the supplementary groups looked +up for the specified user. If one wants to add more to that list of groups, then +one can use this flag: + + $ docker run --rm --group-add audio --group-add nogroup --group-add 777 busybox id + uid=0(root) gid=0(root) groups=10(wheel),29(audio),99(nogroup),777 + +## Runtime privilege and Linux capabilities + + --cap-add: Add Linux capabilities + --cap-drop: Drop Linux capabilities + --privileged=false: Give extended privileges to this container + --device=[]: Allows you to run devices inside the container without the --privileged flag. + +By default, Docker containers are "unprivileged" and cannot, for +example, run a Docker daemon inside a Docker container. This is because +by default a container is not allowed to access any devices, but a +"privileged" container is given access to all devices (see +the documentation on [cgroups devices](https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt)). + +When the operator executes `docker run --privileged`, Docker will enable +to access to all devices on the host as well as set some configuration +in AppArmor or SELinux to allow the container nearly all the same access to the +host as processes running outside containers on the host. Additional +information about running with `--privileged` is available on the +[Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). + +If you want to limit access to a specific device or devices you can use +the `--device` flag. It allows you to specify one or more devices that +will be accessible within the container. + + $ docker run --device=/dev/snd:/dev/snd ... + +By default, the container will be able to `read`, `write`, and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` flag: + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc + crash.... + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + +In addition to `--privileged`, the operator can have fine grain control over the +capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default +list of capabilities that are kept. The following table lists the Linux capability +options which are allowed by default and can be dropped. + +| Capability Key | Capability Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| SETPCAP | Modify process capabilities. | +| MKNOD | Create special files using mknod(2). | +| AUDIT_WRITE | Write records to kernel auditing log. | +| CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). | +| NET_RAW | Use RAW and PACKET sockets. | +| DAC_OVERRIDE | Bypass file read, write, and execute permission checks. | +| FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. | +| FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. | +| KILL | Bypass permission checks for sending signals. | +| SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. | +| SETUID | Make arbitrary manipulations of process UIDs. | +| NET_BIND_SERVICE | Bind a socket to internet domain privileged ports (port numbers less than 1024). | +| SYS_CHROOT | Use chroot(2), change root directory. | +| SETFCAP | Set file capabilities. | + +The next table shows the capabilities which are not granted by default and may be added. + +| Capability Key | Capability Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| SYS_MODULE | Load and unload kernel modules. | +| SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). | +| SYS_PACCT | Use acct(2), switch process accounting on or off. | +| SYS_ADMIN | Perform a range of system administration operations. | +| SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. | +| SYS_RESOURCE | Override resource Limits. | +| SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. | +| SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. | +| AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. | +| MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. | +| MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). | +| NET_ADMIN | Perform various network-related operations. | +| SYSLOG | Perform privileged syslog(2) operations. | +| DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. | +| LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. | +| NET_BROADCAST | Make socket broadcasts, and listen to multicasts. | +| IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). | +| IPC_OWNER | Bypass permission checks for operations on System V IPC objects. | +| SYS_PTRACE | Trace arbitrary processes using ptrace(2). | +| SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. | +| LEASE | Establish leases on arbitrary files (see fcntl(2)). | +| WAKE_ALARM | Trigger something that will wake up the system. | +| BLOCK_SUSPEND | Employ features that can block system suspend. | + +Further reference information is available on the [capabilities(7) - Linux man page](http://man7.org/linux/man-pages/man7/capabilities.7.html) + +Both flags support the value `ALL`, so if the +operator wants to have all capabilities but `MKNOD` they could use: + + $ docker run --cap-add=ALL --cap-drop=MKNOD ... + +For interacting with the network stack, instead of using `--privileged` they +should use `--cap-add=NET_ADMIN` to modify the network interfaces. + + $ docker run -it --rm ubuntu:14.04 ip link add dummy0 type dummy + RTNETLINK answers: Operation not permitted + $ docker run -it --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy + +To mount a FUSE based filesystem, you need to combine both `--cap-add` and +`--device`: + + $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fuse: failed to open /dev/fuse: Operation not permitted + $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fusermount: mount failed: Operation not permitted + $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs + # sshfs sven@10.10.10.20:/home/sven /mnt + The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. + ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. + Are you sure you want to continue connecting (yes/no)? yes + sven@10.10.10.20's password: + root@30aa0cfaf1b5:/# ls -la /mnt/src/docker + total 1516 + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . + drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. + -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore + -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git + -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore + .... + +The default seccomp profile will adjust to the selected capabilities, in order to allow +use of facilities allowed by the capabilities, so you should not have to adjust this, +since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be necessary +to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding +capabilities. + +## Logging drivers (--log-driver) + +The container can have a different logging driver than the Docker daemon. Use +the `--log-driver=VALUE` with the `docker run` command to configure the +container's logging driver. The following options are supported: + +| Driver | Description | +| ----------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | +| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | +| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | +| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | +| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | +| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | +| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs | +| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using Event Http Collector. | + +The `docker logs` command is available only for the `json-file` and `journald` +logging drivers. For detailed information on working with logging drivers, see +[Configure a logging driver](https://docs.docker.com/engine/admin/logging/overview/). + + +## Overriding Dockerfile image defaults + +When a developer builds an image from a [*Dockerfile*](builder.md) +or when she commits it, the developer can set a number of default parameters +that take effect when the image starts up as a container. + +Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, +`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override +in `docker run`. We'll go through what the developer might have set in each +Dockerfile instruction and how the operator can override that setting. + + - [CMD (Default Command or Options)](#cmd-default-command-or-options) + - [ENTRYPOINT (Default Command to Execute at Runtime)]( + #entrypoint-default-command-to-execute-at-runtime) + - [EXPOSE (Incoming Ports)](#expose-incoming-ports) + - [ENV (Environment Variables)](#env-environment-variables) + - [HEALTHCHECK](#healthcheck) + - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) + - [USER](#user) + - [WORKDIR](#workdir) + +### CMD (default command or options) + +Recall the optional `COMMAND` in the Docker +commandline: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +This command is optional because the person who created the `IMAGE` may +have already provided a default `COMMAND` using the Dockerfile `CMD` +instruction. As the operator (the person running a container from the +image), you can override that `CMD` instruction just by specifying a new +`COMMAND`. + +If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` +get appended as arguments to the `ENTRYPOINT`. + +### ENTRYPOINT (default command to execute at runtime) + + --entrypoint="": Overwrite the default entrypoint set by the image + +The `ENTRYPOINT` of an image is similar to a `COMMAND` because it +specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The `ENTRYPOINT` gives a +container its default nature or behavior, so that when you set an +`ENTRYPOINT` you can run the container *as if it were that binary*, +complete with default options, and you can pass in more options via the +`COMMAND`. But, sometimes an operator may want to run something else +inside the container, so you can override the default `ENTRYPOINT` at +runtime by using a string to specify the new `ENTRYPOINT`. Here is an +example of how to run a shell in a container that has been set up to +automatically run something else (like `/usr/bin/redis-server`): + + $ docker run -it --entrypoint /bin/bash example/redis + +or two examples of how to pass more parameters to that ENTRYPOINT: + + $ docker run -it --entrypoint /bin/bash example/redis -c ls -l + $ docker run -it --entrypoint /usr/bin/redis-cli example/redis --help + +You can reset a containers entrypoint by passing an empty string, for example: + + $ docker run -it --entrypoint="" mysql bash + +> **Note**: Passing `--entrypoint` will clear out any default command set on the +> image (i.e. any `CMD` instruction in the Dockerfile used to build it). + +### EXPOSE (incoming ports) + +The following `run` command options work with container networking: + + --expose=[]: Expose a port or a range of ports inside the container. + These are additional to those exposed by the `EXPOSE` instruction + -P : Publish all exposed ports to the host interfaces + -p=[] : Publish a container᾿s port or a range of ports to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a + range of ports. When specifying ranges for both, the + number of container ports in the range must match the + number of host ports in the range, for example: + -p 1234-1236:1234-1236/tcp + + When specifying a range for hostPort only, the + containerPort must not be a range. In this case the + container port is published somewhere within the + specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`) + + (use 'docker port' to see the actual mapping) + + --link="" : Add link to another container (:alias or ) + +With the exception of the `EXPOSE` directive, an image developer hasn't +got much control over networking. The `EXPOSE` instruction defines the +initial incoming ports that provide services. These ports are available +to processes inside the container. An operator can use the `--expose` +option to add to the exposed ports. + +To expose a container's internal port, an operator can start the +container with the `-P` or `-p` flag. The exposed port is accessible on +the host and the ports are available to any client that can reach the +host. + +The `-P` option publishes all the ports to the host interfaces. Docker +binds each exposed port to a random port on the host. The range of +ports are within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to +explicitly map a single port or range of ports. + +The port number inside the container (where the service listens) does +not need to match the port number exposed on the outside of the +container (where clients connect). For example, inside the container an +HTTP service is listening on port 80 (and so the image developer +specifies `EXPOSE 80` in the Dockerfile). At runtime, the port might be +bound to 42800 on the host. To find the mapping between the host ports +and the exposed ports, use `docker port`. + +If the operator uses `--link` when starting a new client container in the +default bridge network, then the client container can access the exposed +port via a private networking interface. +If `--link` is used when starting a container in a user-defined network as +described in [*Docker network overview*](https://docs.docker.com/engine/userguide/networking/), +it will provide a named alias for the container being linked to. + +### ENV (environment variables) + +When a new container is created, Docker will set the following environment +variables automatically: + +| Variable | Value | +| -------- | ----- | +| `HOME` | Set based on the value of `USER` | +| `HOSTNAME` | The hostname associated with the container | +| `PATH` | Includes popular directories, such as `:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin` | +| `TERM` | `xterm` if the container is allocated a pseudo-TTY | + +Additionally, the operator can **set any environment variable** in the +container by using one or more `-e` flags, even overriding those mentioned +above, or already defined by the developer with a Dockerfile `ENV`: + + $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export + declare -x HOME="/" + declare -x HOSTNAME="85bc26a0e200" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x SHLVL="1" + declare -x deep="purple" + +Similarly the operator can set the **hostname** with `-h`. + +### HEALTHCHECK + +``` + --health-cmd Command to run to check health + --health-interval Time between running the check + --health-retries Consecutive failures needed to report unhealthy + --health-timeout Maximum time to allow one check to run + --no-healthcheck Disable any container-specified HEALTHCHECK +``` + +Example: + + {% raw %} + $ docker run --name=test -d \ + --health-cmd='stat /etc/passwd || exit 1' \ + --health-interval=2s \ + busybox sleep 1d + $ sleep 2; docker inspect --format='{{.State.Health.Status}}' test + healthy + $ docker exec test rm /etc/passwd + $ sleep 2; docker inspect --format='{{json .State.Health}}' test + { + "Status": "unhealthy", + "FailingStreak": 3, + "Log": [ + { + "Start": "2016-05-25T17:22:04.635478668Z", + "End": "2016-05-25T17:22:04.7272552Z", + "ExitCode": 0, + "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." + }, + { + "Start": "2016-05-25T17:22:06.732900633Z", + "End": "2016-05-25T17:22:06.822168935Z", + "ExitCode": 0, + "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." + }, + { + "Start": "2016-05-25T17:22:08.823956535Z", + "End": "2016-05-25T17:22:08.897359124Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + }, + { + "Start": "2016-05-25T17:22:10.898802931Z", + "End": "2016-05-25T17:22:10.969631866Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + }, + { + "Start": "2016-05-25T17:22:12.971033523Z", + "End": "2016-05-25T17:22:13.082015516Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + } + ] + } + {% endraw %} + +The health status is also displayed in the `docker ps` output. + +### TMPFS (mount tmpfs filesystems) + +```bash +--tmpfs=[]: Create a tmpfs mount with: container-dir[:], + where the options are identical to the Linux + 'mount -t tmpfs -o' command. +``` + +The example below mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, and `size=65536k` options. + + $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image + +### VOLUME (shared filesystems) + + -v, --volume=[host-src:]container-dest[:]: Bind mount a volume. + The comma-delimited `options` are [rw|ro], [z|Z], + [[r]shared|[r]slave|[r]private], and [nocopy]. + The 'host-src' is an absolute path or a name value. + + If neither 'rw' or 'ro' is specified then the volume is mounted in + read-write mode. + + The `nocopy` modes is used to disable automatic copying requested volume + path in the container to the volume storage location. + For named volumes, `copy` is the default mode. Copy modes are not supported + for bind-mounted volumes. + + --volumes-from="": Mount all volumes from the given container(s) + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + +The volumes commands are complex enough to have their own documentation +in section [*Manage data in +containers*](https://docs.docker.com/engine/tutorials/dockervolumes/). A developer can define +one or more `VOLUME`'s associated with an image, but only the operator +can give access from one container to another (or from a container to a +volume mounted on the host). + +The `container-dest` must always be an absolute path such as `/src/docs`. +The `host-src` can either be an absolute path or a `name` value. If you +supply an absolute path for the `host-dir`, Docker bind-mounts to the path +you specify. If you supply a `name`, Docker creates a named volume by that `name`. + +A `name` value must start with an alphanumeric character, +followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). +An absolute path starts with a `/` (forward slash). + +For example, you can specify either `/foo` or `foo` for a `host-src` value. +If you supply the `/foo` value, Docker creates a bind-mount. If you supply +the `foo` specification, Docker creates a named volume. + +### USER + +`root` (id = 0) is the default user within a container. The image developer can +create additional users. Those users are accessible by name. When passing a numeric +ID, the user does not have to exist in the container. + +The developer can set a default user to run the first process with the +Dockerfile `USER` instruction. When starting a container, the operator can override +the `USER` instruction by passing the `-u` option. + + -u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] + +> **Note:** if you pass a numeric uid, it must be in the range of 0-2147483647. + +### WORKDIR + +The default working directory for running binaries within a container is the +root directory (`/`), but the developer can set a different default with the +Dockerfile `WORKDIR` command. The operator can override this with: + + -w="": Working directory inside the container diff --git a/vendor/github.com/docker/docker/docs/static_files/contributors.png b/vendor/github.com/docker/docker/docs/static_files/contributors.png new file mode 100644 index 0000000000000000000000000000000000000000..63c0a0c09b58bce2e1ade867760a937612934202 GIT binary patch literal 23100 zcmb@OV{j(E_x888ZEbB^Tbp;=t!>=3ZQHint!;a2+qU)J@BCgqZ=W|wCYelTGH1?Q z`J8JKt|%{o0E-Lzo_Y4dhY>#tUQd@oOt3`hoOL|BKcJqN3Vxy$AhV?ax|X zwP7_xdmcs=u4=&I4-7>PVT51Et0V?6adF`Fly>RguBKa_?u?$lQ0phh>{!;bJF?oY zC)U*RkjI zsZ{9ymFQPP=Ffni?-C9P+OC9;+m(1`#sifR2o?cG#SMq<7^FFga@MBPTZc}}8{OtP z!CXnufoQzjhXU6pDu?FO?q?N~x)gG+s5_)nwFPTwBYb7~CibK72sE%1)SrJd^${!^ zEHw&Dtkf^i4vaU^uU^}gy=)4@H#0a?oWg*dF~t!rx~8uPt`)MO;*-1xScpDwk8Yd6 zV|oWAE&v<#MZbeL_k~-u+`8|Wmvs+s;S27t7f}( zmXlHcCaBX3IF<@+Er8_&TT#hEUm$9AWJhM+a2DG>$2af5AN|C9xfYFRFuLL?G1+^h z)!T!@8{JnMuGS?na}ca1a>7RB3N7|6Ju30;Ef@hw{DK01K4-F7z?2a1d z6xYB&;G|Ie<*=I5*<4$#T&O(X^Lx-a9hgg%5-@!cNcW>GOGntHUgUcesfkwoDU2}( zp^XvJc&>8m=-3u88Wgay@j*@{{4f45)4=9Jiy38dDa1naT<3nu?&eE&0d1Rn`W{JW z+U8LtqZPyYp%!)zg2B{3Lbkcu{ZEP<^T>cg)(%UgkghgLTC7~CpI^-|9klpVyAWH7 zHV%sv1WpJwWSw3XfkAq0r09b)a@xX9bKHO9xrn_r-#E&xkbhxq3~^b?MMfTMrD`?h zS1|01CL;=KT9+@O+wU!YZtx6RE#mF=ft=3IwPZU@AV(b-C>&!>dN-kzGwftdTF&rd zV2HwHnX$g0Btk=EJ0UuWB9?h;sVo?F6sphY7i^nLaqx;0#|-1_NIN znQ=;!ID#{lTECe0DwM(&vRwJD*)Gz$_3@es`+CpqM7Q%IYfT1VZ34@S>|}^$zHlkX zqF8IhE*z}@E5)`|)vYmMV8Fuuwl#g)M;dnrznCsx^)FDd&2BGw3%jIv9)+lZGfX0) zYXR{+(nD1^wvq>61Pi@=cYUqu1fPu({)DXZT}E*gsEJb65~g}4&2#wpn~EreB`Aiq zg7Pbi@B7eVz2$nQVzSKlXewM|_q0II6N#6^m%zv+)CQ#e{aek6h^j*oSezfg@@ES8MT#Kt;stNsM)7<|y0gZ}3|BfisAPKfpssv+PnM%1VQY)$rhkVcvBn)CNqo=Dw6JxNoo5 z+?S)IhN;I!PR9W}TV0%Cg%P|=Rwc*vuJcWXX}&i%Q7k8eY_YQNJh(%Oe)&`@<6og9 zh)NBcA`1moGJ-+ovIx@i7-yFFK0~yQnAM8Sr9^6XuoX!AbC?P$q2~6iiAfl=nAEpt ztl<$HhP=f;xkH2R(?;rl($kw#&`v9rb%-xrT#kQOtTDMAWxbBS4HQeav)Z{pn`{*f zNzch%el?n$o;f}o{1b;5TUzc)DubRG z_P6s%V$>GskUzg(#JC12ZBkjI>s2sl+{Py{-jh`1@wQ-7m7=K;DQA=*epw=LFrIgt zbYj&O@gA1R^a2EOt0z3ZbP);;o)nTj2r^hI#QW~6(^`bRt$iy zl()J$C_Q(fa_hBYvjNUJAg)_uN(5Yc^KwcJf^P4KI#lO(yt;Dt%5_C^SN+!I$*8R( z9M?!Cjh>#_`&aK}=msRvaedcGO2XFZhr>Gl8X2nHL0P#*@kV6UApxH+v_8;{N_M0Pl$r%yaDSqn6uqqIJBFa=5ojh-~A-^ zPmeoEHRJI_JUY^dNhY?vAL@v^rHqsPz1_@V2OKVYs(b?9_I#N5q3U0Chzi|5KU0n> zRmfWd!I0N3rM@IH1c^x0U4q4o!Kk0L38x(yTR!)X$*8xmmY-}#7eIVJZkfXzV88`x z!$Al&+Ttx~wK}v8dyHMT_ubr=!e}@<74}62y?&~OkaF>|qt=A81dXQ63s+)pM>@<@++StGdv_h{M{^*6(4wSA?L&`h(vdvy=jjQhB$s3~ob z3Y`hzC%|P0cDQR_1}PGC$1c0J>5p^z_2l~T?oQ@{K_oKU9b&I1Oyd<(gP?o$^i(oi z9e5d7eY~#v%EB)Gf-*+X+6%$qZ+Vc+9vrd(yTQ-(`=Y7~>G&eQq3jTPK z?+SNz?L*%+JQaLZ}9TlRVYrGKEHC-veo<>48F+QB1OX_b-y1 ziFZCun;tAP&7Y0IJMx=&mUN}<`z*`t?N2aY?vk(MCdW=@(=uNkl~T80k&p~)NiMJI z_V}mOu+X9?basLL{_S~lM#kyDMHEU41?1ZZYu1zXWaw)kUC>@9r_yDtAL81{(T__n zc9AR&U8ZH_zL$h_w_(L9u3!zbOuU;*atF*!Dy$yc+6jHptGZq~HnX z=%6U+11T1-&TdgZrFtxrFlCioFmWS8XF@Re;1RY?ajff%ePKRjy6$9|i_b(x*Us2^ z%v;$!7$sR!p8U~Ig?=`jzhrjarKGU>D6stp{3uUyIHUa{YK?X8$H#H%cXf^v!fhH` zd1;q$Ji0XkZSz3MoDGoRY4HrVPP`SD6+hx8B*&} z^G~lwg9($F&bIPYuHK!?;+*1Yn|Z8zXgMP~u^^s+)+BZTyV`i5c+jAnK$g?Dnzz)l z!Lp;k+7bS(+$`DHlDO2k6A{&JPK_#sC_IJ<_YZn9bCeXD@>Dj*oU~^Oipd|a)1j@@ zd9c!{Z-pq$UXCd0r8J=tRefM@qn=`aPczYp}J#$p75w76m2L2&@gGH$xJ5 zEeP7RUtVAh)Jbl6Yn%30+Y`!kdZ6Ddopj|r8m7J_48skVI%iLci*%x^Cd%eekU|aJ za?ga`snZG8h>&sMx#noLwhE~?*DOd#a%inn;Nbr3K6u?*D6_Fo4*LmTtgal6xQ1MT z1fvmX5TVixN1@_b0Lk-qCbFQ0`YXFK?N{fmK3&ch^rVcuNO^sxWLHOilBqsU-jY06u2G0xa-3$+X*yVJf zI~UKD!|JODG?=z?b#@%R)2H;3->;EB5UGT%k572IJ9H)t2rW?e7n=(GlVKcew~ zam>#q{bbEX(IZg%qh}Cl4};R|cEX^#^}ujEn3WWj-=VjnVqeB)6cZ?-M$xdb)@29m z_ufV#^-1gmtJ^Qw-f{Jt@Nxv>aC+H%~G^2VgIo?1)MnmtZ} zMI}6!Tkm&Pk-rA1eY&y+84IJ4bhZc&$fWF6YB3!}hD{c%cKVSh;!zxj<{8y5_Os6v6lFp|HsipZZq#_}(K94RQ%wcq;|?*27TE1D)R4-6exbOcEBS z%I}n{D!QE7d_+oy%BWK3SY(;`3NWvBI}f5(w<3~H%jY>PW<++|`1Y_EEYv@E>|FBc zEpBxzGto3zWkt7uWI}nDJG+c}F8AQ~ug--Ojkl2wf@lG&9pol}ZWNM?@aV(v^eFokThdqjox%^trPxl$6CuYbl8lGynbl3QZ z3|b^N@NV8<$P)TW_=*|bTF4X13gK%)i)@oNh%DHS4h4+3@JM!lRU%NkrgpAinJg%p z+qa_h@kz1s%DxKPs!HgY*4N}qhE0iueK!OWx3xwKQP`Z4B4pdmju_3Md8|mk`Cc%- zYa;n2#lX}yK|c7mT#8@1TY&NeK9!hbYV>XS@%g{uU4GEo&kVQS{-E-?Vm3JG^W~1w zC!8Ud-`K}PlWHPN#=i|nsxXIDk8;#r*=#3GY<<$9w%{2$LIH#tu`su(9oJAnqrfw4 z2B31CsqvQR1`u!wcRiVFFJY2QTH7mv&p0FCxRvc&4P`=p7FV06-O#rf*;`riR2Hen z1j%}T=s42*hZN=hN?|AJ&QSw(pTM;uW#)&wuva7y%?`2+T|(M%dx79#Xr6Tf5EQZneX5 z2P@~l%{-;BRK%8)Gnu9s`b?a}EZWeWB*{-=kM>CA7vs}9+IoD&2L|&IVri>A zKNu&oqw$OineM{b%a(ZL!=TMLe44hW;ClSgZtfDu-Ckdh>m0UO)UTQ@Xl*Q$Va9=y zYRJ(y&SXiWT>*wDSF&~!19j1SQE(K74up4it=ZE9J4HVybMXTlaII>#b~^2i^B{4k}n@Vpf23Wh$D#Ya#JXJ1Q_4S^<1( zeBvx#7Lt$S{5mY>4LYSNp-JyPj)f8Nl!!>RiM{r~r><3RNmkoOaBJhV5071mB<0&6 zMBRb=79(1t*sG{m`7#2S)9&iuYS}3(U$hq&`iCd6TSvwqW)3;&U7gCY ztZY44JH#hvo7gqS=3590X!A|b)PoxmJw%j>qRjFM0gpSCK0d`;XEr16jPQ51fx8Pg zHE3**?VgN+8{s;Q_Xwq`q**R6L}YvGV~&d%YEcmsHyvkY zEt;G0xi+_(@Rs;=y-^647=v$D^kx7XO*I#*?Q74^vu1pOSVT&eYe;;Oxm&_%3dS__ zs%gcJ3XFwo2WK^pr~YD5*6G0vbu;}Ccv>sPJ%&hyooMt)Hz$~RQ-kBPU~E=(0nT5- z*v}0Xo%~=(jxL#S-!KUQrk*}dD_O^@{9r2LkwH7E;&WcsY42>R@ErHV8^YC=r?d?y z>*o$s3jBT#2Z3Rk{rANUiw|L$fsL+2%4DGOYUPpKZ!Xq)d}v0d+p#`1{nQz1MVmSn zXbTQX-;S(NfK1E?Wl2z>fVl6y?utRE&}W| zt>AS>Z!PKGCJRwLEs}cqL*YqtkljsW9JNFS#gM8=6{@E6@B^8@6B!9?`;jdeM`S%w z7^M2f#)~)POPrKF@TOaEl^(IqRJ8k?(rQ(%;~Q(pZdR4HzkM*}5A8#O!n%~$3ozAv zSp+E~WuP?AMQIp{h5@4#!J%U9=W6_vmk|rQ*8a)Yg=UXWtl=)X*R))nDbE0{>1sC} zi3b}oQ_gy-p*=7y;~8;*Vsg&o!(K(Tg(5S?F_9x;(dWg+nze~%qYVG1ZQCQ>BVrWm zCgbrp@g}&O_i-eAkQAAU>aoZZ^LGi8+fz|Crw@SDpfkc0VWK?f3xG zVYb*wPj2A$79(=&ksP?KY>3_)MNsFbgoZp(U=hmM+Z-J>}%+69htY7X}bKOmOd1r0d^CYh6 zkBQB&5v1?nR$AD6KH2T(`_;hnHFoIV`AQ~JMXr4&^(BNg4G;8KIVsUIrMGCEUo`x6 zDBTiM=|!;Z|H;4kAs?mR1Cv}yX6P#{-a6G)-w7F}HTsHYE0zx6AZ z!wcPqXUs@YxXSGgBgcMApzWYs{(4;phk|>E#G{#kPjnc&jp8P5dRd#Io%cSDlzlng z{AhE_^Fcf0^47L~W|bhx1W`NADQjZ9_^#>JJqLWyI}W-hkq0E6s__<1CfV(B6ozE( zJLr?-k4))TjV@qH#IO}MH-XI7y14#Ng$jaCb!7|lCfvK)J|dCKLT$lWu_UI}E7S<=H?DX4tYo#>yf;}>4-2VQy4ce1x&>fn_cLn3eO&nO+_?;#i zw3i(aEh-z&FG}jNXpcJU;%eNMLpxwuqSNXSh&yA&BCXUfAl5O1-lUi4G)SO^1AvJ8{+@GSRJ8mE(5q3!94h^DUc zsr=a9_gw?$ad2Y1Y@1POPgkp0oBiG1WTnHx5?6|LX2{D)66f>f*Q3Vti0qrNAay}C zh%T%?SuzH>n4f<2JNj@Wjs3myLa?dyJSMgBp~-2zxiruG5HA*A9SyZ3o746ncVyUB zr9Z2*CPH_g{xc5N=Ya$YwsAI4P-77_@D(ND{awAZNzpRC! z(eUTAy7E#f%{`!3lImU=oT5I-2KlcfRe)kvr*)18JO{+_enyGJRD_@R}L2D0L**u?*Y(60`!x z*<2sS(;}Aop9kT~WKM610cm6BTlwk|U+)g{D~&%Fsa_7j)V9 zo?I!D6`cW;n~ebDKfE{sc?|SEs}5vlp#k}p-i);>Y43kCC8f86fgaowdl#(exa?0o zh;mM=3V@aE?LPTY5VR5d?CZ)Y&I{J5cCqeX&g?_gF^wg&Z)$WD;jO{K9ui@~Hdj^rxlpf z9Eu+P3O8*wP{yh~-`VZhB!;)-!?ezM{Pkr_BHBZ{w@-!UOb)n_)TB0L@!R$q4^>Yg zt;>bDoi{a{o7x`XuVpZjHDA2aLprmWWd^cfhex7inNuk%{;BKUkSPNRtEc zza-iT|4qj1;R2f=m{ty%pat4qAxH1<*`I~r`uXQf{F}ZnzBbL?E6KIDEzbw}X@C#N zQdK%vM`uA8FrT_Nivxp=Zrj&C^?O=byJN0h$xdf%M@MQoomvB}W=kX66ddBUe9fwD zG=Zxh2c-bo4V|&ZKIM9n_)`^Ty5q0Y{YN~TufSD@@_r;b!8MEl1C}aHRpBKE{wkSQ zIh=c10;>R|62RCSfNm0Bdl_HBR>{wi|HTtoJr1wc^QO{l4I2ukOO*7H?i;kX4S{t< zenI9Nbr@Ll1HLM7Ht4ReF#qL6cnM%m2(C;Tv5ykO#EznYgno$G%mvhivr%s5|O;3m_Q=d@j9rT zYC=+-J@Z!C9qD2FHh!8p1Mia)oXnOOdd*t6GCNqwGR4HrC_&H^oZRR-sZgWwTbj=O z!=0~J4*m{FBDybv(RiiULJ8M5fNf!2g8v?bZ9ywSvj^E(?KyeU8p$Pv#OBihHeBq) zWMM6C$^-$WkbF$@6WfF0YWOJteJ?Pwzx{^{VYtxXU5*=%0mBuQ8=O4~|H7anp)cZ$ z0uTYL;eR|;CVIz(@DOTo^{H01+nVI=hAs;DwN^=jbnIcgOo&L};!2Nk`s%D$iVPGF z-3Nh!pIPV&RqHb0$qkHq*0WJgKe|9$%vAx#?8&{GBxOOgn+v9$!DyXFq{9_l3}j&$ z&d&dVqCWKwqHlHG7)>D3`5}eCDySn zn1R>NqJ45w@Wb40!u`1rM)>4RKIuutkTs+!SVgUV+BCcRHQkN{FkVj|Q}5MC3Mh6w zHq@8pA&j~X?ks$7ZG;V$W1NkFy?v_E--yMa(qO^BJ-f7K&pn(Rd3$uAJfnvwb0_wR zLyuKILd9@=@GNM{qZd7Sk-u-9 z8hnG?8;2d;wu~z9aAgbkitRNa%9#yBeR4pCVMhb62fI@{!@pA3E5W?00%W?<-hTeb zXnt3)qF=5{c=xt{7abojDU8~F!9~(O9G!-mPYuQUT>M+?0c<7J0kkeW8M@=vQEew( z3Jk^Ooge`<)yUQmpGz!H$q;p`ONP!rxNL_Lq)f4PIn;yqOnsv6LK4(;LvT2{K9frj zqBc@E=fP>%qJ!Ts7{?Yn?EUC@MxFf87{&VU`;e1_+JXkqJ#_^!$Jz`(KuMcL7A4(R7Tr8loR1EJ1ZJcFSzl1rW%)Y)3T?Hh{Y+D`UZ25Hre|mZ)23 z=Wl5F(bAXYjh_;At^ar)a3P$^*^YQ8^b!ts(*uO9x=(M)8B;j6A{hr~G$o&V`S#Kobn}9sHW#ioMclp98a; z?N6i`F2enDfUyLe6XgzLH1mmH#vs;j-`sffR8soP>rSG!VK=v_O&gI zgI&!CbG+2p&(uogt>UHN#12YvdgE`X)nNc)o6V6+TIQMP3UKQ_JVt1>(J>iPqSw5o z?+EY5cELRrOysYfHWjLEr&{U_Et9 zBS;@k^qwEsaQFI1*C6-l@EV>-A%_bjq|Z;LYt~#;s@{mcg|Mx#-^9i-?KpktZel32 z(5-QS4&VA|)h3GOf{tlZf08is>8S8^?q&-79$^JC!wYKoe{aLtf*jup$hXJNT-thq z)(Dq%Ph&1*{?k|6?SDPU3g3AawK-cX`OAEK_h_JWx)3@`8E8TcN3;gtaqd>)I9R%=94AHMg1-R3lq_ItzhtX zs@2XwqrcU50Cld9()E{=98uN>?4*<3$=&Jt*=I#M{O7hq(}lm5;4h6OgNF|nI1l_6 zSOZEV!`>JL)jmp#pdugA2g`hftMd`HHl+U?F9Zo-Bgz|7Zx>ARx1!(~lYC(WLQmOR zz40Ifkwe8u8-W_PQj1`Y8+I%pQA%r z*ERO#JtRSRZ22YC1jO9e%2sC4vfi;DCsdJw%r=DyR^Cn<`&m`Fj3*l7`Td4y?-+Zs z6UgzNDDO6sj5vhRNUj5?(K9LWv)2Dk({U7ftADVd{InqJrfH}Qm@Mu(yiXBL8bbyw z?__zMDR%Js2D(_v{yv!2Se(Trd>3>SAXH9Jird_GG0ln#^TSz6Tr+5>(N( zG*L>oEKNiITf^bYBsXMoHrI>vMap2IWREM7f_v5aP7n-1SP}|c-T68l-{5b6oZQUb zf#rC0JH@iUu0yFsgt_gUYcWz{k;3Q_5t z>1{sQXG^SX|G3ENOGAg!obXnMX3JO`P61J)J7U5ziV0`Dt9m&jYaHDMJ)zX3JP%2_W^a*-9oD`JjAwe1U9A|N? zw4O7fOfeGURurS}SI&HQ6@p0Qie-4a*@)Vu-C`r>u5{_b4h<+M4v+{Q?{-OAgmi}~ zLkWK=o68b1DM-A};8tm0s9JlmoRkb`vccCm@+a`?|p2m6%3tek1)wAv zSa1w+Vpxe}Ss~B-lFPh#DiUB|{D@SNZW_^wTL~l*mbv>~>f>l!pxaULpKEw=)qg$Nk9XOZYa0G2t^x+V<4bVRsZwaH)l>g+ zt9F(fdeUuY&9`#q67O4N#lp&p4o0I`h;h~8nD%Y!VOXH11nFhZNF(N2!gf2Ki@*^O zc!w+}zr?24dgV{{jW4j06!46QsxD8>kiKy|T2coVzKR{z8^&;gdKBv?RI~VNRYNda zd)o=b)fVYba=_)__Zg#v!mI%ov5u1dFhn?K3pV+x7}eJWa{bmy%>Hz*yG125JBl{T z0y4rsW1Dfw12$(}+o64>v&Oimt))6>O{WYr!WT){=t?B(hyAnkc2uIh(QKd<&(RY> zNdREH$S)4bcXCL_llWl)#TbELpD;y}%(Hwz)JO`&y7M}*jp$IMR>^)-x~3y?#kQP^ z$c3J+)0Pssgf7!`$Mk$@d8#qM*4rFvy%N+&Q#yiMv#F4d3BQkf4N_@4!((sVyXLA8@w)$&@DoMIpDvLKu>k@E ze2;ItOR)VYr@!35`h2SzmE0*5wMysn z(fd+?#wH#rw-_Quw!}dztCp^KPa$TXl$x4!!8kdyIb`7Z2);G;*s!$k2hB1wRa_VD zLZO*yZb{h|?l-ZkHYx|6%csKFL<>S$eM`b+@*Jo~=DBkS9<~utwZeQN3_>#6ibDBh z(U32@O#U79W_03WUP`mW$8Esvj7=er+iRY;>&r?58Ko#pD>E$Xr?&r1Z|5L|iPvYrRTT=WVsg(!euCvC6uvh>(|SH7tMKxFw@ro?p4Rw4=%LsS5< zKo~laFV)s)e+gu{CqsplvDW8X*XChM5EU@t+mm^5Mbh>6UhEIpQ@A*wiD+`Q2-z|S zONfxkB1I4RrBaaeZI2lJ_M0eo*GU!qV(*VJ--mKto&X2g;VD%+EyP{L0Ndhm$N2)@ zI2@>&i&&htvvfk*6q63hz`!xspc-rkJaSlUroULnDQqaKjpBd8hZ zR&>)cMb4Y$Q`5jmi$wFrQsskEI_XfX4Xx1V<-l=$%PRg$K4YQfca@`wZ)KY>t#7zR z+qI!!jbr65+(blavJ|Iuo~_|TQP%kzq7aBXNAJ5cK&)j8siLY`$SR6)f!VbX*~3f7 zgeWP*E|Kk$?fP}XXAkqwHuDeL?c^1`pF;^{`8XPaB8pTIkc0U}??2l2Bms?ki);ul zl+iT($hNQ_uns6rS6qT_rQA|Kd&plh=u(JbTSX8?r4{BJ@R4^ZGQEt&TElM&IPxvc zkK3^PDYTe5Q}q)cqDlYxq$6|)$m+}`AT1@t19>p$o)BBro?_d&5We2LJ-m z3@9|Sp!rdJ9C~I0qG!sA;JC0Td3JRPAT4QnsQAxrb}m34BH@SEleq0|Q$%CIP!sU3 zP#yg1_u(w9Ltmqy4wK$@Uqw>)w<#I+xkOlR!}9dv#%P{}Jajf$ zkjF|z9$Y*37$OMF1;1AeSQL|b z{1V`=?F*gk=fXn$>uMDvErrzMnKEm?^LAahMRhGzZCQ8hwx4=sR`(T+vBZ?-iA{bD zooqvt>;Tiq+6$u+d4@ZInI{80A}?rz9l?SIH)2ny_-7Sb2N~U`OGzqP5EL2a$287qs^)Tk6Pign6p&L3r?iPUgx7>h)9v&@~SOPb{-r0MD2 zyi1~1dG5c=A9oFa%E9hJ70O*dh8>=<$eQZ(@rIlIk5gqD!2G7gPh(pDfc$s%E14!j zV=q|=9U8IYXHRl&Ag0+-iN5C8$E@NSq{tLxNJ_<*klrU z&%PYkh3%PX1`%*lkJcfa5`eA}6IbuGE-`j(HS+d7-4CME>zu-=Bf{J9nw-Xky*a6+ z-frjGlhPmUkSBxmT#Sok?0-t%QxZDoQM=9^OyybOEH4oiFT4OH}G0 zW|fi!1Wk&~2xn~VaLdqPMJ?(fEWE$L?kDvIsTWv#@5l^M_l13M1coUO8J@WrFF*~} ziWP<YSL&H4` zY;+h5Gz`J+{xMb=EQhZz_Nld(z-{WyIlD$S0l&)lKc*NqmOL7K1?5WYv8|>t9-$2= zJSyqL(K!0PS#loR(<@e?JoqmVW@TEi@ijcYp51_dv68EwV)x>l$dUP*?=p_#C(-dW zW+6n1D`{nI?efj${n?}ZpfoCE?WM0VN2rHoo zV)$feiB2O2&55bEm%i#rGIgC#SvLxp-(x z(3rn7h*CL3`eXe-UqPYb^2!CO5rTs1ngpv)ZC{2OtC{TZK2))j1}aZYcxM9MHEA%K znw;~j%t#WRT1`xK#63Kg5+qu&^^o(Lq=QnMqC@D3FwkIbT}m7BtA|k{l~^238-{nC z=D&w9>hz&h!ZS{Z$_X^B$r_F)%_V&?ieS&O5WDPzusU4FPymx4-j~B8MHCIcTDlmz zoE=L#@gS+g|I2{Yb0 z!!*+l7rs=i@5=Pb6HNiYR!zvcW`HybsPvPvBw~s*iF=);Qj+Z(r!?TV8&2LNKQj*v}$m(Blk*sx4?U4jJAG*7c;0tZh4+Nkv>KUs_#>M zynwKqsv`Tq+2zi*hGv)l(_+g0Kc<=GpV)*!aet}vAnFHugLstX!B10$$U^O)DOW1ntiSk|P0Jp4= zbq5lPxLxa^BWU*>Dc+GIETYL?}JzTo0TzpJGW^&JuRyKiPM=MQjNOf8|6KC3uevYbXv{B+CJ&+%ab}zG91B`AQ z@m-9XDp>rrXCw4OAZR))ggBjWiSLSdj`2ib=-tl@f8R})F>Dxr2;#wkaEDXEx%lmOH z5&H8~CAtDG>y?w&BRTH%0~Nc1^>O2MmDhKjhXPO07YMAE@;o~DQ2hk2FIIiE!00KF zR(yU@MZ(QtWlU)QWkn$Y0ZfN<$h1>>8fSnQRp?Bh`yKYCS<)gt@K3XM6Nom=&cS`@lrzBt3}9YIYsUE(7NkU)jgc#&y+gs)5*$D(>e#e;A$DbNj?Mc68a=3y{JMEPkYd(+8Zt;6>Uq zd_3>Bf9v^Qtd^V}{Y-MP`=!yB7RX-R-R`Cmx0Qg!dex|^!u;T%p=xr|Gp8}5{Ta+*?937 z;X{&jYf)|;yR*&}=#6`$?Fw3VuSCir4w_}pM9c1<68VFYJJml5rZAE zx}^{)x&s6Z0UhN@-Rk8i(9Te86PiarGUl>G?}sO@j4~}qx-`G=-tB$#mYbGV-jkue z{0|4qOsIj+#}y`c2odKwRfZ2oC$aVBPS)feYRr%j@1LPP<-V~m*ROxnXqtqF!Y&iO zlCKrt0TR|5+qfMR&&LQJt?6h8zf&EHX?9*P&Imdf-s>Z$w#%@GJr7V!8a0;Md8zsw z4|TIErPwz{FO;Hu%t;@f_qk_widh?WO1VD!yngw2yx8|RC_W!w3N5DNhn2@4h9>1o^SqS8rFL|ybgPDlmv$-+SFM& zFXM%s^oT-tGNSa0vC#wK_GL1`E0YdrhI)DPm%nlIZa-Ni-hKw*~C4QMvrB z4Xxs*%gvPiM=(<)qX^f=1Gd%^7aZTu%>*_zhHTIcS^&fNWqoXEtuO)i42qf61dG%FEM z5(B^EX(RN)*ElYUhn+=tlsP4JwX}U&$I}^FoDy%j&Cxe;%v>I=I)D7pls9=Bo1gH7 z1p%ppm7S^wKOy5Y?EM3kV6f=uYD<^9q`mE!lsK!@-?&b+ttmB?4nJ&zYLVLHqYCyI(&J5ZU6o;|9a7$ z&@|e$4lCH-fzy%~OFxh<=&P&FU)UEh5wZJyIO4E4wKl#xjb1tddULA0$%DeZjDSqnXc64h?IR+l<+AFuT@!0~16jU$1qnVNJ2l{~?cc=rf@m$ZR)Q z&W?54av*}TKOti+XOH)^SQ9h6A^CizHG#%VulD zqL|#9^9|Ih;!__MpH}{6d3T?j)*Jjf^m0xi-dXaUHxx`6_`5w{dTCdcI`s%GZEf1= zVU3MRKoFuFDti~Ezuywgx>m6c6`YoSMQCV;_KbukvKWPmsTLYNmCcQ0{lOG|bfnPP zqU*`wFg6J8?94pFmNR1T8wyax(N^;EVv?I%cN1mEGU8rR{=Bj~iX^S$HJ6Gs7ZbV=hF7N#Z_rv`I z=j`+BbM}7rTI(z_G^G}9L9;*Jzt1rru~U!RxDK_@Xi8E(;R$+#Omq&uWbT0`x)B)SDNFZobE(jGY{I3MbVf z3G29FiUEJuwGf84-wtMc>-;KJ(h;<3R2gJVl2Xt{Sp8A(9mr*R@0dtZFUZ2m*UIjj z?}Q)FAA3#md$;dtp~u8_P(7TJJEx|Q>mv!U9<;8hRBVyOK-ruqTM{|U`4dkG?j6U$ zk>#qNoM(d%I46G8F1RYSYNMu#bs!4!uR&m9PsPb8`hHM@{P|NcjJToy-v57 z;+sDM*dk_9-1d!HkDnZFoEMUPVM!oOsMlwiwh7wDT`iV zijIsZE9C85w0gZ2;uMCJqo_k9dc_vDf=ysEc0QA}ac5K4{b3`JKf(+DQuOa=LrS+e z%#i;!R8`z$JN^er)%Rs(6o%+Va{Z}HbKIskJ}u^RK|KSq208c(K(G(X&?Ya~{Pn=E z)qp=%k%(6W!Y_Bc;_lmPYg`9MY7>O(a^6J0|8o6Tb>ENSRe0Zr{>{@4ZMN^dRP6AP z)r0yvtM7tw3wI|>+*NnwdF`L%)m64L5SSR?1{VCW8@z1yWT{NVIp8)GmJVsVYFkS% z|HH2xi%-T?O=@rC0^&Y@gwlVUz3A$ zX~`Yu%gPuE`EoMfD02@jLmoFu(}l037@tv}YoZU$o^?Eur*@^$1_`8YJE^g|(H$5< zV%)+26)W1>0OpsXIWmwZRh00zB!NL$_DKzjxN+WO4R>xLIfzr7WA3lBo{S_|{Nh8> zo?oVuRnTWrV@Uw~c*zY!wyx7RL-SjO`?B2ju&L(LBVj_%^NbSSTHsvutI^&P0MQG2 zQljb0aC>4`d_gHB9S)1s7$eSVYXq@cXl&Ir>`Z9xo*;Csw+Tu5cf6$4rR*vvy|Ih? zdq;7+#{{RV2{A?&?-t`eHg6h9$=F(AKJO)!lcy zfJH3h5^qeo&=UPCFO3DIZo-;Z8%227>ML$sO$gmM*u^~Q!oJVi_3wrAReC}Bnl|)$ zX1umAhXIWp6O5v?l@xE0l91=1iYe7cQE!v8ecoQiEY3LQ>$W$O5(4Q>i@Ky5Ic`PI znx#QANYX#|++L$uv=q*GLTNiOI#Kj{!e^(1VbM~}HC;37r{39z-;d`k(a0M*GmU_? z49%Q4son3B$QS~n1(mX-@LgAJqRf>1Fq+TA$sf&5gi*=-U?j{;#02>f?-f>^Su1y> z>je&+`|_`*N$c3A<9@$mY*w=?->DJf83vBd`q7n3MjUsIj?q1M2aJ3Rj;nf>y^)qP zfcqC3Hy7qB_ANG)78E46T4-KYU|DrIn`qxEU2>*9d7;nJeDMeWV~nq65Bp$`$>5## ziJVgM1xtQxnxPb9f?AJu(_1PkYIpMJKV=fN@xqumf=|cqpb@8fL1G%^S=q^Eph#0f z_E(8mc2(p94+THv9GYtK->qC7;gM*|g#vf$(s?~Q>1VXjF=BzwToizePd@VJ2BXnd z9ilh0j-boqjOCw}nQ+Fxi)s>l#n5cIPK*If3LSR0IQ@~BDa#f7<2}VO?lPat3HeoQ zsZ7;Lc!gj172^aDhc!^Xq$E=wwdU0pKSb1MrZy+1;UHS){nx7LTmLd76UFAf0wi$1-K1I3gsV;TzFQe2CaZBl}}= zhK@#=6eAId{*&4KGmia@S0h~W&}Ygt2QfbKgR~kp?M6SqF|JokTcOHa7+U5i2Aiir`qDu869rQnf61M?>DJ^O_2p&hX^B%KX zVk>65tGT;wUg>IadPCWw~=kd0n2Pyw|DVoJ$5byVld5W5^R;Z^0wIDK+e*pe79w z5{Hq-^5>eO<1Wq?G%b3ppF|ZChA*g9@I(JFcZEi`6D&wi2ZS1^*AwE z45YB^7D*Mk=4w-0E8uG=3J-zm5!XUfCo=v{m21{Bl$qt|CWrEgyAuROdvMoVh07dL5ACs{Q02qj_n`z60qB|aqz1~#3v|tRFPJBqD7(l z3k&JyxAoe($X%8_3#-HpYmzrYY$or}pR?|O7yxsI?OBzUnTlSC%J6J^fijDc?PEcG z&d*#GHFY?TNaT-Mo^3FTYJt+UkpZJ1md}fTo>yso3m%)obrXnrE#)@Y7x z&l_KAwbdJX60TsN>R08OOCdU5%04+eQ3I(xEiOc^@O#*VUl>s`*12b#X>egL-t9m> zB-S({5^R5;h#PAzdm;LpPs)sI_Lv%k<`}pb=0C9{#kFF}f++3@P`nA~2z`Nit^y^> zwCZW{bhKfAb{WKGQ?y*|FgI9&MLzed-csc=+;7j$oH@8b#(~c+2gHDkztPsVJ8vJ} z9SC@SV1gM6N08W<{vn^Y)Vo=XFp8cG-MO(fJa|GS;i0>)F}edknqTxCIi~le&z` zQd8cF*5b=}v6?JYAT6MmJSMF7rhC$MF1b()YU5eg`3O#1Y@iyW-+n{ZT0=m^iQqij zIyw1jMCXdpmF*C|fZrv{EpGp`3{i8QF&&6oXx*AV?2p1En;hwdmxfu|7S<3Mu4yC7 z=IYc`4RazPpoKay#hHmUBWWatU*rAEqI|V>=T8!B^z7|RkdeKOu7ksNMN2a}Kc|C6 z)PQ8vez!Q-kcRD6;rGE=hus}7kwN5he1QOfkx6IvGW-^NM&EsE_R9O75A)RiNgJv$ z9XYYIp$kjdn@HQ0z^hF=Kl}0+KfXe{(AFVuW-j!3m6td*?B7N}F;2$z*`Xmkwr>Kf z4$=u64v0-I#=daLpagTMe)HM7B!>>WL zRZHz>*mNF=mF|%&YUR#EbSt8b!#mgDKoo%-w5mSJbQ&+#g!J_EnMudomZ%g|MrN7dfqjJdaV@VbqH=KJ1 z7b+O(6B4*H#^u{C1A<$IGR}ea5Bt9~O+f$s!dB}FDxz}Y&Tn)txOw%qGVaFv6(WC? zz9bYYa>~YTl53*t1N>AsDVm3z&Cf%uL*)UJ3fV;CQe~*jfNTBn4Phx3xCO_gz?^Of zmIuLuqoS>lU{!8d>kcOk5V~sDurFtf&hBbcM0Z8apPYGtyPP_dRc3h;B%YynF@RAz zTB>}f-`xhR#eNT7SP{HW^g;gQ2Y0^Y(0e8U-_cK4Rb`y3R|OSZ@p986A1O|g`UM0o z?jfe#c_A>u4b3()*LWHWW-*Ud^{n?#9~&^g*tm2Ek^n)vJDp5N@nF< z>2L?b4R@?vtbe@@Wrh)NyR!%3qGx#}0UBxx;>_qy{_~t)_Rpg0K9Jy?K~# zT3?=yiHVujuA}-*D5+k&7l@7o^S7QptbGk$X@`-S5i(cXEo;s|(GSbwM@knTDZWkr zZh$*Ui|pd#=O zE|@5YMVWRJYsFUH6vpSd%GmKf3c1v%-R&P__1&R2s@^F2CRpqRU@~G~v0H+MK3y6hTq3hWN>s2!-|q%| zU$!1=64}({#O2x&XY1~ti>rowsSFZd?$AkH^rt4|xBMNvg!`dT@*%RV*d6_biqQls zxvO~VZym74+@nY*Ng6(hCWjjzO)u~`5K2%lN>GoD!U}GA`{nY3H_X4U`+kPcYn>s& z?nmApS0bPl3XU*Xp?WOXYn74gACB$QMV8ZM3rCCS86I`EXdC zU9QycN3MwLTbXKF1GlRDAA^U-O+_^Fl`m9HEr>aVh^#TWYxan~t_?NtcN(zC9fQtz zFS{H}hlMDU9~#Sz?hH8$6nQlEq!UHPi_n*=*>rtn+0U_0=7iPkD%<^ti5*+xhS%Au zy&dZ%4Q;ML^p<(Lm7L@!a(OJnA_Ileo`F25&A~}wbrrET>#0__=XDjV89f8hKFV(d zVYbEsy#GwXmr|1khD!t<#_gVVhiqT}n{=#MtJ?|zYUrCze|>Jm4g_y@Z`atlT22}d zOrj9ONF{oY3etI2BmshHgN1CcpoWb15_YJx?-~H%PVV3Gjtt&`QGK;=5)i$d)yomt z8amgd{-W@)ZCiHK(2yjR7;Zn;kJY>`2!?off#A!TS<7Pnuc3Sz@M70oB5G?_RE?;a*~61G(Z;T@m*MdZH$snsAp4L{~4&3w<;PFiA7crzYB19cr z@;Y8_r+lcnl_lw}`D77jNudxik=@Nif=+DGslF_}ZE8F*J1zXj{b50k@ZkoQqxKM) zYFyfFuQ{J{i7aZIle?xoptdFl*cw<;{`HM6YTZ3;^GUa7;X}Vk3_hMf<2?xi@G=B< zIE}uu_#>bb%{N(GS^Ybl)@ek#Qg69!fFW58OfqCx)YI$2q=fPln?uhm5>V~El3be3 zF^{@nNS-hra8`^pQb487yra7;r5JG`%)xV@BnDVy1x$Z;G_JhI_gEI_G=GCkm1WUC zw4wXmS5X%bmU&OeSb<>^;7@^cQY#W`nF#I$e=AqN#y*yVH$E4n(v)X~^HM;^T?i11 zqdaje=HB2BP&+?O-fh>c< zc*UoO^hPodZ@zNStNQ*66Y?Y^HX`~`qHa5h7_^LhGSQP#@!F zO|Nh2GxrP5M@|}KsDn|rww{nCVaXpJ{?Mcq8Kbc1`jSnl<;6R37R8f%Dh%VMk(K%< zF9kh^yb+)9L@cVsFIU&4N7NQ1Az2)#LgD+_ z;F6z0MW8{6xN4(r513cm`pFEXYUT6~io>~nTTfx6B}6h%T-k~JdarMdSmb4OVHIx z?_;)Mc)dNOujjiEO12~_ARFtj)4IDEo%3}psBt97ooyYA_v`JKA;0(4FE87_9vMl0 z51PI#TJL*HH1oR#Y?INdDP<6h0m+f2H<4bXyh{e<`t%87Ng1mfZyj@Y6h59TRM1wu zbFjOaJt}a`WfmQ;eAdVxz*;+ce z7p)tTw3YS&(`0OJe~^A=p+~XuO{P8V{wYn4q>R*Q4D~)Q>e$U?-!gRVHAC-X)>N3T z*XZywk~H@k&&>YF^6|mq!`zF1knO@A^S*NA$x_{X<`Td13QKR1txaA_Kau}E-N;UF z}_Nd@KmPd)J@GAAJyW1HH|?$Muvj@6BexRYo5)cd~F`_-spS9-Q)8GVMN1)L*T=voe>uY0Uw~F(xk?{pI&ZfN1;Q6M*|@;*?k38mE43V=eHCWpfwx_C#r znAllw?Q*~E6z81p{_Bque~HKLaDi1Q*3|XZtP#Ea-Ut0w_7PG`VGJklvgd1dIu%@Ub#a?BJX7Q2*Nx2x$mJ1*^ja}|1R)=J4Vtiv8$kotL2sflbO>$xT?hsO zuY+Alx)(6`Z%|>0ptQfJN+lq2Q~l089zy4B)UX4LKEKP>ou21e(zDC+-0;~l@Bh{E j`2XOe{3aXi30VC`rN0&MDDi9u{X#`SL%vehEckx_MeJN* literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png b/vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png new file mode 100644 index 0000000000000000000000000000000000000000..717d09d773cc46ff8297d06f66d9aa855453f35a GIT binary patch literal 4972 zcmai&cQD-D+s0R0Yj^ebT|uHmbh}CrD|!v0*I@N7h>`>myRteVy6C-&h-eWTEJBos z9-<^fCwij%@;vj-``7QC_sp5O=f2Ko?rY{;=dTm3uctu`WrKo1AZjg5RYMSn3;=;h zs3By3m9`raW`B#bzK*fl-zuwX{C{n-Y$9*-1}L@}R6Pc&oVoU4=l$%0kfT3mND56# zql9}1Uva-kRFRCXF3fhIFQ&XZ}oXTOFRof05QPgBSL6y@1 zl15u3N2zTTe@FcP3$ULSt)XykoVa5Zj-VooAmOmv9K8eRY%r?F2HGJ->^d)z>(fIs z36C3F!*19iQFc#}ioi8E9r%m^O#b1iB0vWrBehe!28TqDgd?xf{jbPSRx|Sc$2mT7 z&m4NttP7}oTSZqXK&JR-6bJ@`zvwcz;r&N4lZ@wj+)(lm!0-sOe} z(k90<@DUDRX8FZ{eyz|1*xIhv%ID3D&1ChI_x`>`Gg;e4bH{_FXe&)x)O!9J{%Jv8 z10pkYH+EE#h@}O2-tj)+FGjX?9&g?%(*vn1tBCYe=Zl)^89~AWtfi;p zbU;b51Uq2kF^Z?IQ(~B|4Iu3_9+o_vFy7H(O8)Wz`Gex&iU2ojQM<@HGj-r2_lrl0U+sN@`J7yX!f1reW_&hJt$I=ng)fsW zyM^%)1x-`I;ysWvqdO~<8gfwWjnv|tC z>F&Q@wW<xfkQ`kwLi(OUouG{MHZ^K0!I&e+06+Oh!1%0UD4#6)B&= z``nN0G{ydfi6juFE*nQ=srFgEaf7z#zakhZrMDm(byQq_VH7tWltTYlN=oY4G9P+p+<^<4=s4n*Xxg_SIz(Q}^0KKI9uw{k^j_ zMOFA(?j7s1Mr0qqa&JjW$S_+=iVjMEHu(J#Edz5Awt)IziHVMWC}rYDyUnB}>NI<# zKLF`P%aUNM?4SlC+A^yMmOR|c&=!nOH55Aa=uc5HY+X!?0tTkvG_gKm15Ox~Fdtf! z`fTZO5?g@IeWGkTG9~8yHTCN&Lk+vj7dl+F{@7>+SWE(&arXi;Zjn*I0Idx5(r zbR1%nDN3=hhhP{pPVCh2HZ${01N6PkQx$>Q(DEo>A%8Z~_LGqHX1M7Gjec|ij00W? z9s6Kw@f=_mO4WdgEnN*>o&7EeiP-CGIFqfDejyp1b3)v?6sQ@Gj-R`2r}f2WexHb_ z6ZU%agw8y_oqjXM+pQcNXZ89xhKiVi+NZ%!q>v(W#4~)Vp_WWSzPEvw4tg4jmXe!1 zEGE`fmefFeS(Q2mtHEesd0sEZ6^kHPJVgno!@w>h{rP(O<}eGR=yj5=&q36W?!CM* z`T?8NFbF4%Q1FzoiPn2^0xuW6^vgKCZXS>j=P?+byDBuSwi30O@hM`Io%)pfri+5Y zk%Z?h`B58R6PkE_gO=;U7vY~AF~T-80ixA6;^E|7e4im?11VycOmIh|iAjMM&)Qxl z$`YSoFv8BFNMJ67@T|c8x*Ad*k-(%4z-99%C#jV4MOPh6)@gBs{l#(bL#1_bLeuOJ zMadQ+Zrrzkl8n4jImhy}u&$}}svi0_5VZC!&wT8_=OsUE|1OgTR~ncer@z#3)YaYQ*?#WrxabQv(>BcRlJOplPc+`&byEqu zSe;z0F6W5sjW{-LbqNo*Y8cT}Bb@z~yI9DLEWN1oWe59z_@ipp zLstkhb7SjtG)K5p8T!^o*bGI%V$0FtNoMIlpau7F(lUkM%3oy@Jgwn}2mJpbVsv zYUL1a#7{fVYxYs?5QBa>1dF#(YQhTY<{rU_!+3F{U(;V?U7m}mCSv`f%>`KB;P!kz zg}Xt&!&;r5fbo;^e-?ODNKx7pBCMv5#Q=9?Z@%340qiWT_HhME+>aGnjB0hEx9mrZ zIf7LeNftg$1IABOR1cnJ&qr69m%R&KzW)Zx^)>eOoLqsYeKH1V{eGaYc$UzK%N(u$ z8MW|qaibkx?1xOd_cB&)9AYUp%jDLJF*q<0_OQFoT5UI4v*lLd)Ct+4ihEmPIr4mv zT3^spo(F~bh_%?WfMyxf;(r^`B|7|(5^YW+;rXpVt6mmzj@;~|;6wte-q-K0Yp_`( zJ;nRYI;I)n>4&$m?y~DBjcs??K>Dz|ef!&(xG(F?8Kqn13naL`$21$cW{lQy$eZwAn~26|FVhQ)&w*@?T;1__?~Q@@RdnRBxDK)W08rl#y?Q zHt6|Y@KA+1?$y3I0R|XEtQ~@(1K^*1>=HW?FL;Gfrn>2X2{)Pq4;z=^FGBy{4 zfjcO)iMEO5!G5H(2PbPs`79(Gg*|FPTSn##qwNj0e}D*6oWkc+NHkG`3@+qjcLh$g z6T8BcIDh5pr@KhK)%}u<*)k0r-o>NFVRBhT%@AQ;O!g*VVZ;OS{t5M+4pX8jTz!fP zUT2PV&V>2OR%#=s5Hrr=k$-fVU2zn>Jc>pmcCD1_#PBHiv8-}q9RPJ1Z_sK3Y# zA27HxB}mSrs&X~!2ZtFIUal{SOK+u$jHlZ&q4q)-v2%g|cEWybWn=rLZAxos;_5{; z%tB8~2Eq>yW6O^wzAD|gfDOFqkjc0CGjmeiY_{XOG^?&-Q6$HP^*dnBu3^{n^e50%;zmk zwc;)Dh{=LBWv_i3!meuG>@sfew~lS&oWM4(iks3~-(?eX81YZI(rwzn7q8e4y$L78 z`4+D#Dl)qaN59RF&-9O!cN;@5)2r>hJYeq;n$uAs@2Z01pYA*HDdD&aosaAzKvx8E z;z|5zUuN5_;k4*HQvgTq^b((I=_2NFM6PVep|fAkTQCl z&iA*FJ-;u>J+8DJ8E=4sJW@%+GeXvl%);76DFJM__y<;;3%*z(2_yT_d4Q#h(SXz0 z6TG{`sFOjtl5o`&3Ged>L{cVrz63hs^eEUZ6D*s7n5fFr7<#%-3rYTGcaKbhthnSZ zvMn0>usnD8z?m8QpU?lM#i}y)Z0=qUB#J$xgB`xiv%~V#F$kHg5_n%lgUE{WATED9 z5Z37T+_QJI?|5hYlFr(_elO;+!WHpPS#rxhmNpG5SS98N{B=1haE1SAlEl)d)k7SZm;xWjT-*RY{tG zN(H#)Vd*7mhLj(sb8{+v^nR8Rgdr@)pL~OMvWo+jlthKWH_7rkcSK^o7TpqSt26HQ zv9Lq=2#t$3r9$CiI%#aWbC8{!1MS>1 z5UZ`lXyO^rgtvE2oaic!Qw{AkN z6!GotTQ_1whY#DPT1GVMS4Yz^iWhhffQ4ra^;Tj+mN*O>C&wdZ3%T64jdoSXNnenO zea@%o;r3TW=&=scbNiVI5=Rkvf{pY+tp_o;Bx zO6NISp=xBP=3DE}=6hZD-a13GU(KY^;wTpDEg?TV(7+$NWG6Sfq@BBZplR|%&spuHx|0@}%*`RMavjdMgtNFi z?6c5OqGt41{y;bM{lvvxTU*vUA|^2TBJ}1zFE$41^u5>}a$N#EgK7jS@T2(%2G>^u zMG{EO*o?=r8ydTN9h#9jGMgK))f7{VAG*3k1F=q=N+F!wD9-r_E9JenHtxz0L8~e# z@$=-I0ZOD$dj8A3`-#!zl1viIELeCBWT;@>EhpCqK4;Vcq!0WX#j%{b>K6JJiZpFg zLP9l+3-z;xQ$p+im5E|cX$26GUzO;&G zuvt5i4v?-+;G|D!5DyOUcJ{ngAt%H4hV-q0pq0g`pm8KZle(5EB=8fd^L-()yOD^# zcq4HkpO)Razp>a{d64bm$vt2#!sCF0X;V_57i1&Q^k5mQoB|V z<(W__{wk;H!y4gdG!mfR-L1s1{wXPs7V}I*f~fHizL@>v=egE;{|Fp0oP2B3DGsrcsO zmPqMH9F@IC++&PuoY2+6lP~d~ZW&j^aaF|~zOE1X+;!6}6HgrY=xS-#(z$}T{tbV@ z>=beW^pmP-9yg&@(NeWi!rjRfO+zyM)>5|4tnk1d8+ez;yd#xM znmNhT$l%P&D%+dLk>Wa$Q!3^Mn5S5aa?m{%g`Ky@Cq~JA5=UvjEDE+oma&EF1bEdv z&`!H>_o}HyFr4)2gsQ>*``6L#I43*9KSz+?morzu{~h`t6(dL&;hE-9;`#3^Ej2yW IT4fCOKM4~1IsgCw literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/experimental/README.md b/vendor/github.com/docker/docker/experimental/README.md new file mode 100644 index 0000000..b57a5d1 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/README.md @@ -0,0 +1,44 @@ +# Docker Experimental Features + +This page contains a list of features in the Docker engine which are +experimental. Experimental features are **not** ready for production. They are +provided for test and evaluation in your sandbox environments. + +The information below describes each feature and the GitHub pull requests and +issues associated with it. If necessary, links are provided to additional +documentation on an issue. As an active Docker user and community member, +please feel free to provide any feedback on these features you wish. + +## Use Docker experimental + +Experimental features are now included in the standard Docker binaries as of +version 1.13.0. +For enabling experimental features, you need to start the Docker daemon with +`--experimental` flag. +You can also enable the daemon flag via `/etc/docker/daemon.json`. e.g. + +```json +{ + "experimental": true +} +``` + +Then make sure the experimental flag is enabled: + +```bash +$ docker version -f '{{.Server.Experimental}}' +true +``` + +## Current experimental features + + * [External graphdriver plugins](../docs/extend/plugins_graphdriver.md) + * [Ipvlan Network Drivers](vlan-networks.md) + * [Docker Stacks and Distributed Application Bundles](docker-stacks-and-bundles.md) + * [Checkpoint & Restore](checkpoint-restore.md) + +## How to comment on an experimental feature + +Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. + +Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/vendor/github.com/docker/docker/experimental/checkpoint-restore.md b/vendor/github.com/docker/docker/experimental/checkpoint-restore.md new file mode 100644 index 0000000..7e609b6 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/checkpoint-restore.md @@ -0,0 +1,88 @@ +# Docker Checkpoint & Restore + +Checkpoint & Restore is a new feature that allows you to freeze a running +container by checkpointing it, which turns its state into a collection of files +on disk. Later, the container can be restored from the point it was frozen. + +This is accomplished using a tool called [CRIU](http://criu.org), which is an +external dependency of this feature. A good overview of the history of +checkpoint and restore in Docker is available in this +[Kubernetes blog post](http://blog.kubernetes.io/2015/07/how-did-quake-demo-from-dockercon-work.html). + +## Installing CRIU + +If you use a Debian system, you can add the CRIU PPA and install with apt-get +[from the criu launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa). + +Alternatively, you can [build CRIU from source](http://criu.org/Installation). + +You need at least version 2.0 of CRIU to run checkpoint/restore in Docker. + +## Use cases for checkpoint & restore + +This feature is currently focused on single-host use cases for checkpoint and +restore. Here are a few: + +- Restarting the host machine without stopping/starting containers +- Speeding up the start time of slow start applications +- "Rewinding" processes to an earlier point in time +- "Forensic debugging" of running processes + +Another primary use case of checkpoint & restore outside of Docker is the live +migration of a server from one machine to another. This is possible with the +current implementation, but not currently a priority (and so the workflow is +not optimized for the task). + +## Using checkpoint & restore + +A new top level command `docker checkpoint` is introduced, with three subcommands: +- `create` (creates a new checkpoint) +- `ls` (lists existing checkpoints) +- `rm` (deletes an existing checkpoint) + +Additionally, a `--checkpoint` flag is added to the container start command. + +The options for checkpoint create: + + Usage: docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT + + Create a checkpoint from a running container + + --leave-running=false Leave the container running after checkpoint + --checkpoint-dir Use a custom checkpoint storage directory + +And to restore a container: + + Usage: docker start --checkpoint CHECKPOINT_ID [OTHER OPTIONS] CONTAINER + + +A simple example of using checkpoint & restore on a container: + + $ docker run --security-opt=seccomp:unconfined --name cr -d busybox /bin/sh -c 'i=0; while true; do echo $i; i=$(expr $i + 1); sleep 1; done' + > abc0123 + + $ docker checkpoint create cr checkpoint1 + + # + $ docker start --checkpoint checkpoint1 cr + > abc0123 + +This process just logs an incrementing counter to stdout. If you `docker logs` +in between running/checkpoint/restoring you should see that the counter +increases while the process is running, stops while it's checkpointed, and +resumes from the point it left off once you restore. + +## Current limitation + +seccomp is only supported by CRIU in very up to date kernels. + +External terminal (i.e. `docker run -t ..`) is not supported at the moment. +If you try to create a checkpoint for a container with an external terminal, +it would fail: + + $ docker checkpoint create cr checkpoint1 + Error response from daemon: Cannot checkpoint container c1: rpc error: code = 2 desc = exit status 1: "criu failed: type NOTIFY errno 0\nlog file: /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log\n" + + $ cat /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log + Error (mount.c:740): mnt: 126:./dev/console doesn't have a proper root mount + diff --git a/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md b/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md new file mode 100644 index 0000000..b777c39 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md @@ -0,0 +1,202 @@ +# Docker Stacks and Distributed Application Bundles + +## Overview + +Docker Stacks and Distributed Application Bundles are experimental features +introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of +swarm mode, and Nodes and Services in the Engine API. + +A Dockerfile can be built into an image, and containers can be created from +that image. Similarly, a docker-compose.yml can be built into a **distributed +application bundle**, and **stacks** can be created from that bundle. In that +sense, the bundle is a multi-services distributable image format. + +As of Docker 1.12 and Compose 1.8, the features are experimental. Neither +Docker Engine nor the Docker Registry support distribution of bundles. + +## Producing a bundle + +The easiest way to produce a bundle is to generate it using `docker-compose` +from an existing `docker-compose.yml`. Of course, that's just *one* possible way +to proceed, in the same way that `docker build` isn't the only way to produce a +Docker image. + +From `docker-compose`: + +```bash +$ docker-compose bundle +WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring +WARNING: Unsupported key 'links' in services.nsqd - ignoring +WARNING: Unsupported key 'volumes' in services.nsqd - ignoring +[...] +Wrote bundle to vossibility-stack.dab +``` + +## Creating a stack from a bundle + +A stack is created using the `docker deploy` command: + +```bash +# docker deploy --help + +Usage: docker deploy [OPTIONS] STACK + +Create and update a stack from a Distributed Application Bundle (DAB) + +Options: + --file string Path to a Distributed Application Bundle file (Default: STACK.dab) + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +Let's deploy the stack created before: + +```bash +# docker deploy vossibility-stack +Loading bundle from vossibility-stack.dab +Creating service vossibility-stack_elasticsearch +Creating service vossibility-stack_kibana +Creating service vossibility-stack_logstash +Creating service vossibility-stack_lookupd +Creating service vossibility-stack_nsqd +Creating service vossibility-stack_vossibility-collector +``` + +We can verify that services were correctly created: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility-stack_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility-stack_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility-stack_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility-stack_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility-stack_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility-stack_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Managing stacks + +Stacks are managed using the `docker stack` command: + +```bash +# docker stack --help + +Usage: docker stack COMMAND + +Manage Docker stacks + +Options: + --help Print usage + +Commands: + config Print the stack configuration + deploy Create and update a stack + ls List stacks + rm Remove the stack + services List the services in the stack + tasks List the tasks in the stack + +Run 'docker stack COMMAND --help' for more information on a command. +``` + +## Bundle file format + +Distributed application bundles are described in a JSON format. When bundles +are persisted as files, the file extension is `.dab` (Docker 1.12RC2 tools use +`.dsb` for the file extension—this will be updated in the next release client). + +A bundle has two top-level fields: `version` and `services`. The version used +by Docker 1.12 tools is `0.1`. + +`services` in the bundle are the services that comprise the app. They +correspond to the new `Service` object introduced in the 1.12 Docker Engine API. + +A service has the following fields: + +
+
+ Image (required) string +
+
+ The image that the service will run. Docker images should be referenced + with full content hash to fully specify the deployment artifact for the + service. Example: + postgres@sha256:f76245b04ddbcebab5bb6c28e76947f49222c99fec4aadb0bb + 1c24821a 9e83ef +
+
+ Command []string +
+
+ Command to run in service containers. +
+
+ Args []string +
+
+ Arguments passed to the service containers. +
+
+ Env []string +
+
+ Environment variables. +
+
+ Labels map[string]string +
+
+ Labels used for setting meta data on services. +
+
+ Ports []Port +
+
+ Service ports (composed of Port (int) and + Protocol (string). A service description can + only specify the container port to be exposed. These ports can be + mapped on runtime hosts at the operator's discretion. +
+ +
+ WorkingDir string +
+
+ Working directory inside the service containers. +
+ +
+ User string +
+
+ Username or UID (format: <name|uid>[:<group|gid>]). +
+ +
+ Networks []string +
+
+ Networks that the service containers should be connected to. An entity + deploying a bundle should create networks as needed. +
+
+ +The following is an example of bundlefile with two services: + +```json +{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } +} +``` diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy new file mode 100644 index 0000000..bf0512a --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":447,"height":422,"nodeIndex":326,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":9,"y":10.461511948529278},"max":{"x":447,"y":421.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":12.0,"y":200.0,"rotation":0.0,"id":276,"width":434.00000000000006,"height":197.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":275.0,"y":8.93295288085936,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[82.0,295.5670471191406],[-4.628896294384617,211.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":285.0,"y":18.93295288085936,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":316,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-204.0,285.5670471191406],[-100.37110370561533,201.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":203.5,"rotation":0.0,"id":267,"width":116.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":28.93295288085936,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":290,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[217.5,167.06704711914062],[219.11774189711457,53.02855906766992]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":57.51435447730654,"y":10.461511948529278,"rotation":0.0,"id":246,"width":343.20677483961606,"height":143.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":55.19999694824217,"rotation":0.0,"id":262,"width":262.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Unless notified about the container networks, the physical network does not have a route to their subnets

Who has 10.16.20.0/24?

Who has 10.1.20.0/24?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.0,"y":403.5,"rotation":0.0,"id":282,"width":442.0,"height":18.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers can be on different subnets and reach each other

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":252.5,"rotation":0.0,"id":288,"width":238.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Ipvlan L3 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":172.0,"rotation":0.0,"id":290,"width":207.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":3.568965517241383,"y":0.0,"rotation":0.0,"id":291,"width":199.86206896551747,"height":42.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Eth0

192.168.50.10/24

Parent interface acts as a Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":29.0,"y":358.1999969482422,"rotation":0.0,"id":304,"width":390.99999999999994,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

All containers can ping each other without a router if

they share the same parent interface (example eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":24.0,"y":276.0,"rotation":0.0,"id":320,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":316,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":317,"width":109.44000000000001,"height":43.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 

172.16.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":318,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":319,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":300.0,"y":276.0,"rotation":0.0,"id":321,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":272,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":273,"width":109.44000000000001,"height":44.0,"uid":null,"order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":310,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":312,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":85.93295288085938,"rotation":0.0,"id":322,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#434343","fillColor":"none","dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-191.0,222.06704711914062],[-80.9272967534639,222.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.0,"y":25.499999999999986,"rotation":0.0,"id":323,"width":135.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Physical Network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":53}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#434343","strokeWidth":2,"dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"orthoMode":2}},"textStyles":{"global":{"face":"Arial","size":"13px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117032939,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png new file mode 100644 index 0000000000000000000000000000000000000000..3227a83ca1541ec68e06b0aa105e22fdf5ae9e6f GIT binary patch literal 18260 zcmaI6Wl$YmumyT>5AG1$-Q67ydXV7m8r+1pok>`#RfTA-*)bij0K-0C)TQ9yW|^N>XfP5?S3O{d6!$!B8y2koK&0EoKg z14FAa8wP53UX5(L=%bDVL&q0#^aP6T-8%nWjl|N@0|HYEm_x>-zYG8X+2Wz0n@ zzI=J7-uZdIPqutrz|x1Ai?3;VhTlje(*-0XDKBVe#9*5bMa}EV0b?Ns0MG@0Ui2`_ zyA_7pwKNt+WmVFd);j7*KV7ZMKpvgIJVpRB zk%B`-b-EM-07kw-z$Bs2hOsF>sATd?h@Xx{3=r`1M{l@jXOaH_z4MhL$3kWlz_!oll8RWh!Q%ROqdsGnb z(TIUiKA{RJOZ(q_NwQH7TH06B6ls}!%=IAY1^)X7pr@=O1Q$j*e|F|%#TA_w-Y;kC z)_`zJ#`9NCCW{OdMugwWB?QLTkvyi0WxiQ_#T0~ZQXY15j6v3s1PYv2ei$9RP7duU zqHT!9#~0bi?!lGe=?X6%oIH~_-Z`^`G@SyH#Q}0r7>^JBu!Keq-(wCBenOz|2O&fP z%g(=xOm2N&(JGPhAZ&+bB(oa!l?)rZAHYn!%X6=Q?12*Z=-5I<(n;+L6OzT~%aEvO zgv8_ogMJaz{p`T;w{DzN-7~1pVqIlvWlRHg>_wsK z+@e+a#w-GRB!9D%P4X!uI$n1QEJo%7!P9sat=Yi+8?`ar93aXXi7MWN=hA*yn@Ikk z4&`q8r987}hlINhTR;^mdxMx-+>@>TP$>)3LgwW~Dq;mQ#%#p8{3(LFu9AB9%YwlX zX^Jt4C~j&qDj?O8cx?i0hwDmDi>9@%F0a|XWrlKxLu-fY0YY#dF;{(Xz#B^()XVp~ zA+#qOWF3Pblb9U2Xb05_fp~})4vZ# zE$5YiKsXBY*+b>07f3O)+SciM@ArFu`gqLyfO499UTM3S_#0RA)ci8kaC()t{CdH{ zUsheO{k`w`&G72YZ?(N1(zlZBufdwTu4o5M#we0nv6WmAo=8Wu(MTcf4Y?&QG)0oU znn9uhM0uu(8;#a5mXez3LPqk*Z#>J&vJ<;CvtdznTbY7$yAnJkFbEtY;(+bo<^Jk( zA>&D(D;*-QXyd%8x~=Fqj7_gcZx1elO`pe&sgB2$F=5#aF7~q%`{`%x&~7&!Hn|YE;!;Q?f*HvOpP-Tm=35u|Ah?>^rxk9yuVL1k zkP^ZRJesL>xg0b!OlMvo;5~)DutD19qHL%dXWPo&|3$Fk!83qR$Kc!-p}nmF?BkD*xxZZfVH1jrnX&-rzd4 zB9>wgQ1hSXV;Ch*6{l5Ws3&5^Z!QG)EfD;%2R=$vYzyZBIc4%=<%&9Uj*)QAJ~z^( zFcF@R^!oic!@aLO^ zn?UzwRe>!I+wm%NXK&$s=!@y=yA~n?K4GTE9nLwYxBrB5XVLJwnD6h{Z+ixERd5+Q0=_af_Z{pPmwPMNNVo= z^rA8x<0ts<5;8ZH)PoX%ne?R%?CbFx1>|Ft)?W5U-c9Y~dz5En8}m-QISYB@`4mY4 zW9t(O60{?YD}w}=snv7$F%1kM6P(L@8S?sxGEPWj|6uQpuv}y$PXwxizk|Yb$fH2j)Be<57<|=?) z7=toefVrv94SujOJ#Sg}yqaIWaY&Y??*6k$VyYbK8$AKc3jb%HTNw$Jb39d2mjxM@ z=d=|(v4Gd_E2cLLyx|}PT6r4s!8}pN-omkWg$xi>=)Dcr#<%ZPVn6f(SlFy~AHLY}=vShNZvs(>Pq`tkJX`n-95d-;rIln`$j| z`F``ctP$Lp%wj`vVKkAJffxJ%mR%E**F(o4QG1_%i2C!cB_M0?~vd%-A{K#TYrfuY6jxF_V7z1G^W^5 zTkPJ+SUdJ+k0#}#=jb4>-k?DVlkLplLG^rRCC{vgKNjI2DZP{(l3auvN}-9e*HX3E zY|ZkKA5d6X?|Deh9z~GQ@vQf=zMECF6tFx~Cf96uRL2Dyr0O3vRLGz;L)IUN?B zsq%We9EPv;iZn;9Zw!hiW zLd0HaeQ06T&$dp7q5FmKY2{BO06lC!TZ_Ri!ge{a&lmz7vGuv+$C zbi2UsYwN#=!F=TImu$daixwP`59?9biqDER2Aw-gPW8$CFsdmt!!B>PT5w#|C)r}_ z2;_M;_b&-|Y=^zpH(i*;wxxzVY?Or(o&kELz~rcAsz}r4i~dI~HKcqu_#{jl=wi@s6Il?Ufv3kf$F~w?T zXS0y$z?7NWvchoB$7{tI#Ro0uTOhEjvHzIfn}xQ_e1hz7neMVY0$xKoF~@7A9Z6Uv zjoS!uqN>pUm?DoL+82J7uGh=@Y{gT@)8l)~nTHM}BDEce8f#*F&?`G>1XN}0*gkzG zYab%K;%OL+gr5|jq|5GB`|;AA>;DplY!-@Wne1udl)5|VjAOAF3mu&HBp4$r3eWWu zGbk%;Co*CYpfqG1SCJ$%SuWA1I_p-mXg+p95oO_zl%Vn>hFagYp4ls4X0;sVKXL|A z{9GC$LuGOv_`^xGS2dPKM`&)S#B4DZIYEPdjfYgGbCPpwzemhxK>YV6e{jy>o1q zL2V0HFsggj|2)H%xFm=oCa1Iz;*$uC=POKh$r;s;^A6XJ@FF^4mVHxM=l$BmB7biWn!)HR#8Sen(!Q7)C~Ladc&k82OJh>_J?`mN<))eYFWR$j6;7-lu|#i}e*ftl`m>F$2P~R$ zHbl2-&`;>f%_-Duu(o5oF#7o8cgir@y3)Tbnrqf)bYz%&RCY36ECGEAl%NaqW=Z(< znTQ{!!A7)0jX3!%h`PeC$J1#=EG>{YX zf7soO!SffH(yI~zflPj>oQCo@t-lf&W_Z92tQ&ZvF0L$8@B-K(#Ts_P^WjPyv0c~- z^`vN@8u~lK9P`oGLASHezt;*GG*p)HXp+nMY@%4K|HK%qF7io#l|7}S_i&*S7nhUsDyo3PDMlyrL`O?WgU7i!5kUL%_&l&#LbkI zec7`)NhjISeE>!LAyZl@GiVcMQl9)C{q3fOo4?KQB5WK?^6s`oooI@N)o8jP^LKL3hoE+ zf2>c?7s=!J#GA#Bh7D}hhj=I)B!AA^aDSP$wKz9iHAqj~_0J=vviS8#3ET+ThUMYT zu+U#iGfvpVV2rZ+^kxq3EvtV|=*j&3jf|iG5|z7@YE1rCg~eLD;>eYV0^CU4A3-FcXaU4u!CqR>5Hl zd6qua$)>X~Mo4r8?m$#l&>lrw+J|9rK_+EaV|k5i+CJw&@{6QIb9UcoXT-@`LqPX= z@2LgNU;N_jmXq4$R9!QMHq>S?1x%Y5foxeE*GN+gB0nym~A+YzVc9 zU;Eua>iH+B215L3o(BG*_|J$>^?I<$Ih^ZSZyAk`Z(hG^$j>pG>YAN9X-AWrU2TtR zy?oms@-**ya<3-)vVM0v^AHg&4w5HV!?=1k$Qyxe-u6<-NFj9hq`v|6kt#j{|E{9_ z5mekudG@#+N6?+Y0vt8yEfdyhoD&9CN=ba8bfU>rH|CGFbFw$K|t&85e9*)7!M`NgD<&I4y=aaT$oI`wC|^b5mEAEkDqraetCI@ z-+g*2Xo<5lR;PBwMnSn1%pzOnWL-(ZL&Iw(wL0CAAk_YhgLL`<(V!oE+=6=Cq$Q8} zEmZ3n?!2)Zfrt;EIlv{tLhb<7ooMUl(oJEccYX#@?z4vb9Fl(L`x=o-4NV0TQlv)f z_fz9`q{Lo`NhyWhg{gMIl)3%f&fl6e49qG)NS)IJFh~T3otpcK(}T#uIuQPS51t-! z3wF8b-KGC1rEQC79hVisWmDd*0kueu3I~dvL7>}j>@O;qcE^#3$%N4j3VJzuZ$n;s z%ZjphD|29+=ZGDyJL3a?L$ek3D}8+cDS|6VooLE%{DIgSYfX3A?K=ZFltvHJTz~oD zG`L-6cY|6tTYckE3K)(x;F%tbyne&%i3pus>yR1YWX^;>1bc-AB_ESPd00(M2c#!h zQ?E1L?8xmUSjDM5IPe2HM<726F*gMwG8Cpx(*808RW#Hdz0fRYWP<)+7;AL_d=b@d zR>CkiFt3y5YSHER+rM5oUi+zEBt$$EFf_3^V&p5e{{1kZ^1)paM(iYxRPCknIPGuq zMVh#Ym|YQu<8SVpX-77=!cuiq7Q&L(v~EhF{tGM%Au_474&ZX`qAZqOO%j#sI#6crI0s6gy-PogU(m^H|vAak_JK;TY^q>7aM_&(BRmQCRV)If8U`d z(#A2~)7(WLxgH8sWhf&*$6UF%8e1J?M4emU)3>jx+4)9mDI(yRdj8!bzoeR^@}&&W z8{XG7N90j6?hg{g$l=u2#ZJW6C8)4)hT4P<7vM62l55*lO4Xwf+gJql&_Zow2(yrW zWXZ@x%QKs>ISd}n{-$$%X!Jktd{Zh)2hlLlbZgOH?E zBTgBxF*YAQ6uK;4Z)c8hb-)H%|2@s$Bde$U`^#=GHBGz zCI0QztR>93kKu{T-Xq#iY*_%N3aL7i5p0W`x3?zBe7W@e>AZ0m@{ftO<%^T3<3sxZ zH}`TDzmjQywsB6-R0+MQf~477*(IqvA@DkQ4)|zsR|RG4lCU62or*MyO^jX->0X$1T2hUkIC#y*XSfYWY%=xE zjuW;-ifb}QEQVGzFmGhmF`TVaiKE%G)T&KHVj%ohxT-9Bt0&u^O3Ej*luP(^yms7S zOlW>GUo&d)6`7&g(NG7lSN;LQ>-1?J9heLiODulF<0$vH2Ko!`w9$nZ4=$#O|eAS^G53K{9f1SzpXVlAG{i-8;oMhzcstd1R;i$4q{(g5JYjl;B-E>j5?8o_3$B@{1 z9Ghdc!YH9HjFl@-p35w5K-!5-2onQ;j8y_zS@m$uJA?m??<0ZPQkp~gBl^A5TDG?j z>`m11Dl_rq8sJN^*ik~>_F590nK<58GJl8t6r#GxLD3Kdbgo!5<6A>rU08s!-5mi+rT-1Lfs+3qYIZz-) zcm|_FsVog%T{Z9rP?%EyZ;qYAx9mh~XwX8t`)^^#NczBLsoWkv+A@FW)r~QmOz4k0 zqM-`k>IFKf*~FtfJ;Ql0!<3hRd7HzRMp?qd)03tDRo<+@63DwPP$yWrD}@S$~xxJ1zN+vzE@*%OxZUWp`mZKMoZiq^yniF^$sROaAF ztYBOQ5|TB~pOHCO$@^%13XwMcMp>b&PRk%T_JK;tLaJX7o0i>|ywKDkMc*;ZM<*H1 zmVBrLEaRm?0N;gc37I}}%kW6f;#;<{_DAj0I_bHHJ0#=&$2mVEPn#^?q!Q9ckfgX9 zP%3_Ur~@ifD|Mi#To~j{kJSLz*YfnV$c}jA#Ds2TB?CfxduQiO?diuVsJbd6F_5O{ zDVbIEmL2cxqCxdAp_T89V^8_gWXgj6X;(j%%sQVdXDd?&rdbwHK!s?wP{P+3vB=1w zo)2c)9&56^${ZZAUh?YU;Mm5&lAI)zv+MfCz%>VL5WGl><^OgrzsQX45!U>7m4B^U zr;mCfa+_~EI%T@<@@IN{`6B!zT%%-mJ{<6qlSt|Q=U-kmpvrVjv-N5ZKy8YhoSH4$ zZZo+v&13fqqkV06z#%gZXRMnIWcgy^!92-1k!~z2Zm6HOT1^AhO~CP;WQ~^xiB94Ur#J9jM?w@ilch`WKCRsASC>?DCfOaa`ep&u zN&XkzuJDr#I~N-Qe77>Ob?0Qx9avkA#8@Jsw4X~&kr3Jd2+civvp;#?43FNl zS9uG$1$mH#!-K`EWU{f&`wc*dDzcrjB=ktkE^|dimZ$K`6UB50%H80{eo0tsqLmEN zVAhq+mblRz+x520I3^1vI(e)c40TY9{Y!ODmq}-~m+~^{C@~j3`yP`XT}Lv*LrGMR z=FOzdYrIH;P^(>kh7>Ks>3+XGUreeR?9=*R1cn=Kg5t(3cqwX4F0mG998NYSJYNK~ zn!zPCoRaecBNzCtRzy8K-^sZR}uw}W|XmlCUlpf=V}11|M3J|&SfE` z*&G!j*JH{+vfw2?oAdT9(|YaOUYERd`gXohy>oi}$?I4Twz9=K@=OENOvUu+jtfs7rmT}Pfc`gbFw0>?M_G$M0*YhDffOcykB zSUeyro(Z$65FVG3E@<;H@a!vt7qH2B6hVoJ__2H<=3lf?<;0P2Q*ExWQ=C%dZ$FmO z@~j^uP4GIH>?F~Uz0p1|7v5S)rSqIg4&-itkqf`#ZdGNsZ~rk*p$MfSgoo{Mejn_19oQuxL+X}`WXEcb*=12X`_`VjL9XMyel0D{7 zU2&clVO>RzUkHoSrL;#GAnAdNu~H*ZU%~H^n&XFZpJLmpos&nv5$Ir?`ak;-z@v=W z11=VBE>@YanO6!+`;2-t;7omez=@^Rx&iLv8u(W4zd;>=rqZ)O;q}7tY#RowJqfzsqj+eb#%PvYZE* z@vCHaR4TT%LW)K${^f^GA{f6|vdnLIFD6Q@{;SIb{2JW{?nTNM_9~O^jW8ezEo~v- zq$ORU$H?>Gswm;)uhT*wJFL0U5brTnl4;poes2RxUQ*};>2o+swK-xXFGWH7|Yvj5j+X2zTGhw!g@4>Jt#ouOz@UE zZA;qQ2x6`(ZJ{R_=RhB9!5SE2|TG=Nssc!4(zW++6latl&==sNAlf znZp)cO|xFX&E++jiwoWu$E-B!@v_Pc^!dLSr_a3mvSzs0>t+_cDuXP^349L&*oG+_ zJx1Onro!7kZz7MdE4pcFqtjs+?VtDvB}*y<;Wxstw>x%}K7Y(yR5rRB&@;SN1eOy` z+o8s4q6>d&4ocSLT$QtVXjXrSBYQyT*C}5g-k&x{R}g)m0u)?-)POVAN?Pj*F-w;N zAGttpdZni+MLW(hq>_C4CvWRXTQ{B4IQZ};t zfCj7(Xt~%y8XVYU)JZDS?#t7JK=@^t_oIjpOq7f@w z;`p`C26J;3jE#E&7UUdR$$7tB83jcabI@LoQb~`h2GVOxeloh*Uh70KJZOXIt#Lsd zI}m>ETK!y7A{nYF)bd7VjkTqz=nlPo${!xY;d%~37Vem@V5w|a(c-lkrfA7IudAY( zo*8q*XzqbdOG67Z-QdmrO+FgYE+6(~5IRj9&IpNF;%Vo8f@JzD^SxT`%g-Fs`gkTh zutYedybNXt1dEgw01vV(#leHm=fo;8&#vHoz$YtPHo;Uf%p_*V5Z9dyg$JREiCNB5 zA zEO^oEVHdHWj%rt9em85-_mr>9sYRDkP5+LAM=L>>TlOCy!8dviR3$H@IkA%SqrL#4 z8vDx+B?7`b^L4S?8>SCWA6Gx3&(Fw~*VjrS@w?Z_*P354tSs&07D(zM z8aU4Z5@MaWMHDC_iNzTO4t#|f zl=2Iiop^~f*_>Buw0WVxpstXzI*OxmW(1Z436J9twion_%fJg(1_d5q*Yc#3d{nld z-I9UUN0XOkID$A?sM%`HC3X@!Sw`y8w&nA;ak)zInSarJ&Vje8IBPZYs>AHd2f`B= zzgX`{Yd@gzyU-?Nau}u8%*BLSlm}EDH)0J!7l1p4dcINO*vb@s`eqsBqg?7DNEt7> z-|kGbxW~A6t1fU%xyD^5y;|cHmcJ%ip?&$)kYP}u0JYJ{KV(A`DJ{pE3yt4 zM4ilIClq;rW6|wQE#coE#<94s;WMDS2+A^Ie~|u6gfpy#8Q{Vui5|Gf;a5yW;(sb@ zwJ`7nYOpMf6&8Hp5E6euxzALT1kdd?LTJ7IWO2o1ehJ{q>;*00~! z3yo>y5#01Qe-M!v&xyCJZ9(CfyG=xyPtJ`|YShOw){!epHxtNc$4`?y@Urm_H>1`z z^z2KF;odTo(a=dPUlNK}8CKQ*#viT#BTUVyDD4ETA%}MNa(NY9qcCRg2CW;%FzU9b z`Qf}OTbjCfLZN+%u^tu{=G3;{sQ86gE@*LW9yz1n91stI+B7??%F7(KXjah_0%M70w0uw~z4S2K#R-)Ts+r|rV(f=UjY4RAu;`m}7^e3b&LuKoUR{77?`+abD z(a%KcmnY=K_NEL{o^^kw&M~4H--%=mLBL8$yGMPB^4VqzN2Z|$y*ll<(b=jN;Ru9M zmT1ZMGzg!^QcR_#J85JW}m%S{w4tpau3Bh+2cu1 zyLR~C>qvZd-@Jw`!eB+Z!73nlPyb?=J~3Y&1TDbY=@ndKmcoIgmLr?LjVeuJGP>qp zs`eGyyeE0mw<*W*jMK4zfQ(S3AU1)>G>D&MB!ZN&-1BGhLmA~6uRkr)AdrRPXLs1- zb8xym-7@HLbi!xlaHq6#B~DfhD$Cax~r!r@4H&H%%O0YP-No3YXxj;+naW{ zJmtSD9W(Q7GuA6dspe?Pn*J#0wR0Jh7Fj=v_sW1%MOypm+r?<;jdLDqFs({EGc!~! z9N5)K>F)8ez&r)?JrSA#@&a2l^f(pyK$rGuny#z%6zK9)ewBy_?DBrd(?)hZNt%J= zz^^4WD)MkHtAoc~V;BT2i7sg9zOCD6Ez+gvUZTPklb56&U6qU*L(2-}ulpIZ!XS^rYQ83bmQzLzP!)w)4f*^(&}n7_KG z03c8?QdQc;z!FqMf2)MxO_M4in9ZT-L(0j;QeeL})>bzB38xLHt%at5)dlaG7e+s( z>AXiz&-FY7f0c19!qsLZwaC(>iL)VU#OtIAJNmRAO#SSwpM>!E5l>=t6>ezI*ML}s zZGOF;(bK&!1R%=ZOpd@P7(jOLI{NDtx`ENh<`35g$@+)bZq_-uOAoyhN`5}WikFcT zhnc0;p#I2~1wrr913&d&GE}WVc8(Sw>wqUSu4gG0|C(Yn;@*TI;9F*{pW!DvA%#|E z8Cz9z+zhlfA_}_0@7SnCg6xF!+qB3n$ypPypaL;e|{zyvFx*a$Kb>1g5n-WDa`yB;gL<7 z6StQ}cPUw*DSsrE=m*qW(7p)iGRJU+(GMRLN!Nwj_g)Zh?JV$VJ}p03ACgN*oj7J5 zBYojC@=|y}3_cvQoErfwactWc6*g^*giL3!&b)#Q!XhyCVA4^qeSq$t4I)^sKJ1;1 zC4N@1Xd?e*ME_1d`*#l+b`wtCo&gJ(q8M^^E|P=MH4n84Fb*u5ed}b6=0U(;EDbp) z^P`+cR+Zf{k*0uiHnPgHni7>G$q=^^rXT+~H#JT{Q~qC{TlwCzWuQgW-8S4M*NTTEr zaT`Y=a!jhHTVn1OGkuXyR_IUWRF(nnuiNskz$zj!Bs7!M(?6Q0`a}>Y!4N}@9hg@Q zWPxXd#T}1_x1s4b?k*Y`mTD=H)T3V;MdCg1a;MACU;odjaHt{IfJK<$H}$H|D8k}3 zA-`O%5$BH+y@R*kl1ZLwvdS17mdTd)IfZ0;dIg{m7p*hu4bPiL?@a;_>aSnxDn?pr z5|IVfBMZz(Pn_o!Z10Wgm_8SLU6S~ww0m>ET0TG}Di|*Es|LI)lR^JPh%+88%5lX? zCL3k%mf}<#N1R!bUGCpgo}M;Vi@yjb6qH%dLW712_cHCbSK19Cu>eI+erErHh-O-H zG^XT_)Pj|m=zE=gEsZy>Kp6#==aSJ3aFyH%jm-~nl@tkt*#ljYXjmw=k@SR04BgTU zy&ZggKYC^cIr-hxZ$_A|^OyBi>GAd2ThZXS$6yAwshKJEcH~NWWYl$}*RZEF%p!4O z6k|tsWrOX#mFZn4DbHooDZkrb00sQ%{f~wox^)j(PRYv0yB&kX3Fk0)l2z=?#x*m~FXEGfqXoN?@|qa^2Ng$%=Rhy1@K2>B#_6Sf~uG zv?Wpzy38r~U4*S;-jD#v$nbg~)Ds z-V1jS38MSa$3U!PWOv>AG!KB-3!SVD2~@J&Hm$GJ606ezHEt5R!^dP*Sue2R5LlI2 z1-LyCs3tiKBP~1~b7XB;$r2_FqN=cl0N%;V9|aaY?B|!eYOM0-ZbpIf8n<-@TIxp9 z^7c-33NeU<43x4ZFdk1!;pIA|A}N-~J>BmGIIg$RP_gtk#`@VJN5h?>C0I{^b3N~1 zL}T7Y_*AY`dvMB{*^gQSaJ{(MhhL$y<$292A*UFozBZM3~(;sZPVTPX` z;ztmP1_t8Lp6k=iJq^y+;yra(H3?$I`wt!P&D$Rqaw9m~zrI7yN>tnfofCm)94zL< z{8-Whkl46_vzx--yXbF#KUFaL+AsF%slVT0W>T-qm6{TrH%TU1R#Dz#&sIqI^kc9q zw;ri=0GpQBmuLj!F)rno96#NBMH7K1mIV&TBdM=`PwA8(&&;y*v!;cQ2^g}>|7IK& z57obBP%K%COCx4G=d{oLt^#C||MTFtmskAJG=zLftXu-gq5R2!)-dyNzsq;AhpJ*K ztAu{Ph1kAg!;d3$9)mK%N>LoaJ6GW~tMF$LmgWBjvKBQoRUp>PGL|Yg#ahkBU^EbI zCN=B-|3LY1ADW^>m0j6H>nhiv@Dh%uM4VmO^uIyUY7_l5?Qaz;Qx~5a&@|NyX?l; zPWS{aMqby)>DCcpg25VM8%gPd{ z`i70!YNM^ddIOqTtT0OkHEjp@<;h0ZO4uM;keT|rp-APLsivz)9fl6|zTY{|<3`r2 zp2t(8iifGA!(nH(*QDE|&z9Gt;4Rep%U|4mZ@RU#qedH3g)#NR|G8wRR*hT!yYb=O zXDa#S{z1Os6_P4%Hl<$e#142Zt zoR|#2%+LVsMg}7bni>hQ{pP8=FrVA45RMiq8$yvnSL*tL1$I(QfS%M(yfP@iS1AWP z2lIKKPNAM=3+>*#(-?K6gtfuQnxk1THy7?{9n%Sd!kIXmuYZMOhTWt37B?yU#B7r? z=G^ACYZRx!PSFSS6NzhE# z@p9cd3|MUYhbKhOR>}1xDRPICg~677VMF>KM{yOwwG8jd7oE(Rq;LFL?168HD!j6u z)d2J5o;$D6xnA0_@ZFBP_J%DqW3f8XN|uy@{jn%Mm#`VpCJ@E4D1=!QIe1d?Yy(Xc z^!Zww#2ur07i<<=5|fSP_Bw?qoU{lGBg;fMyhdql2A=R4Z6;84N?>%8R+{`C(sL!izbn-3vm$vn0B+f&fsN`3e9f{dyAlRtYQ)7R&;f zo*r*1v&TM6$SXV;|8_Its9kt?6T)TK)NxUS7*1tdO>{+`#NYijV*`DvbsCLSv@;ES zH;(!Dz+Rc`LkCBnZ4O6IIJ}Ij(-iK0oG7sxy3yt99q~GzzL(svlWZs2^>>VrPdQPH zpw8bvR!qC&;aIhf*u`*>;K?rINRW6-(G<<#iDN{W6j1z+D--bGVOxfI!Jd?*GG@HGRC=q%kZm^;YZa#SvWfyxZylM?9sygkg0xe-)JN$i~3j4=>q5^3lW_ zeKCy?#k?>FCGd`ycvAk$(@B1A?dg~tl1$5R1!P?();c;9SYAsr{G;+cQTl-icPs#uf4>R)!KaO&u-5@<9+Qyv2 zgSmcVgp&RhF-Lx9Fr;BoSG%pcxS#E*M)5{$Z{6oiIA)0^`2@ahw9T$?)zep6<8@sN znFvBk8>g@0XERfv;dVO*95Bl7SXK=9|5Ku%R?vlcUbP+!ABR6X`x9tPsM28o9NPb&|y<*Vh|BxTP&W`MwosXwnC$;YCxBRwUUIQMF@s0~s zhMr9cE`1N-j~Lsn*x)W&h%1}OK~Y+-(!IdT4LCYMTgSTKNuLwG#SnV9B^0$o^I-&d zEB_4%Tdej+$XUsazv3nMegSFWg}`zr!-Z`38hLMM_dx=fr$EHXl+Cl~p&_DwluAB{H~tZkx<-hN;&4CCl5*aJQc2qi7WNiE_EbCV{|Mw%gm zU7(B4SKd`!FkvTDi?}!5)_zC(qda#I8p9I+@`^e~n(dK-F%=;)_Ij+JIAnVYBo#AE zNAEvV=szD{AkJY6Lx)#1qa`-2sL7jZM=%W?9RDQ>&TH~rjkqF_X4+h>?2S;6q0mfD z^sL&_!jNx%GMnaAQ=w%3-sXl8aH5+3HF9b5pfq z`C~s?RAg5YSprv)0CvY*>~1o@y6xaU!E9r=hAU!;bt7L&#uJloz7o;bTLQ@hOT*7iQmj=<}S* z8JH^4bM=LI55#zrwlHaIVwK?_g})Rp;Gqjc}(SeJz3?Xs4&({VfvzRl&J?ISrJ82m~9yD z>~4NbSo$qEyF5}p|!AiOZ%uKV{J9q|X2bSTwfkBzp zq@9pkm!7bCcfIRk{sSE0^%U1zxsv*Ssptc7=_tObJsm;QyfBLU=(xs`m9BukN|8_5 zDia)iupL;TlV(JjUiPiS7yqG^P9VU>Z)u3~nGf zJfmNSh;oc(4-Dc>)wXKuuyG=r%n$~OT>B|)nB1$>&&LFm%^LaVf6XBk55vOp;n-8V z>~ICg?cJg8EbAoN-fZZs=`1NYH3}ehhPQ!Ln*2A@%HG&rjQc`)(xNoq7iQoiaZ+Si zGzvHwmCjc<&A1_3Yk-f;*XT~KcR)C3pG75LYWD6m0WWRX51~AhcsY-c zMZP`DQqzpvRmFOpWF~^ljzWxa*@(`&NY$)-{hv)RI(nzPk-Mo451`P@ZA#P|fP&@t z)DqqQR_11mtPm=ESM~X`jTKE3+#*^Gp#Y^+09xTH7FK_C;2m19OdMvPe^Yl;wp5*t!ch-YkD&-ho1 zoI$$7#aIZ&^zJ6!%H*Mli>CcaOPqULQHg^k@gW?ykRae@*v7Pgex5B&-!puyro;E5 zY&Jon-MYCOT?u`(BMS?~j|v=NWq~uFdkSaO%D=@dMP3g^v}IxWy7g)Ic^Y6xu13EJ zQe!HY>%j;7gWQ(FocLV?%oPxptEmq0?EvU$c^&wU+C?xIm$A~h7kXzIycIepNR_u_ zgWyon(Q5ODq@muYEd-&I^w)hdKjn+uAzpj8DyF@x(J~4M9iIpkp0n6rl&Y8Yx*8SSX#4b&t6MtY-+vi9=BG05!BwEc7Z~- zwyE}!zIs(w>9rhNTYsThyfMG|@#yNaw|o)$E^=ru~97`Wc|aHmuv!iaz7SWsgge3Egn&R07)Tuf1jRb-a#<{^6)!Iko7oP}F* z5qbk6nqWmGVs}aGMvZ0X*-MRl*X$|8@IltFJ|Pi=u_WzBTutdTTz@j6Px<> zRukMQ8@8y;1k?ixEt2%P0fzVT^6{HB+G;v)8DLlftHq^IZ5iWC8_#_|>+)Xd>fMlc z;(Cqf=lgW3+Cz`tctK_Z>NE{H>Jp|pMqk|xi@gdByYsc&=n}0Z+(%R2(W`~Lt3#2* zdO)S$L#m*I&Vt&*3g+r9qUoeTgFTe{V@Mw!^`IMhn)SsvQR~5k?4`*B+$kH@C`@oT z^?*W)WPNUc;oVkIw{?*Th9yufNTJ#;w8;{Et#hP)hb|j48~$`KLQ{uLKA1PNdX#k< z(+KCy2%UF{`eT9G$Q8o_{c?l4O<%FsCHO0FZD;3$pxX@V^{hd(@nW=ppxbSrX-xTP zf;x2L;6X(&575D<-fqX{Yio&&E(^7;I6r*q*2vQ=0a1JgYZq|GDH~b@6C7|2D6~lU z+yKKnr(q)l%wK&q;8Lj836t@i+I{)(hYKStSUg|)ZJ|`qi&y<|X*}8{jd!DS15KBh zB^$@01$&gwg+zmiwP3$wtvXkvvkQ}nV$easpaxc=p}#dNmRc{}{P3w;qjt?w3{is9 z47hpG_4^#1EeuBTuswLsYekDWh|x0=UyQqzEP?oci}16k0UxbHl3h z?pe+V!|JPnl*A{(m@!7^EVqp0py&{)@-kH#;Zb?Z(@DN2vu}y6{$!TAk(gyT^Og#B z`wr8ag5jhwiW#8?s6&y>o^3Kmu@@a=7StDkwX+j?01LIg5PA627^7Ep5=@%tiBpZNqnCfvqrQ)xzWwjG|=Qqn)&cF$z9^S{JzWk zygtOc>)O{ulgSEIT!PtXW3x-|6&0188+vC6-}sqN!(1=wP-M3dZT;KB^3g#8)WF)= zE*kxj=p)0_dYh7mPmPhMIW}#g^0gBeSEUQMQ#P~+CM2BtIR}Ln4WAoUop;YhMi^FK z4WuLrW2V5#Q9LPo<;s`*X(l$q#a?lxckao*-sZ`F`1gxXUL*vn|I-_`hu2&{L?`X- z;)?wF-w_kJIuuFtP2CKb;{Y8rKn<*o&lvkFyJPAOG06{~8Y5q`n1QJ19_!f*I$PXn z8(P$60_wLN6k0TVZdi5RErEhF!?60Q#idZKjM*dF-HnZnJ^Kk!-{DockerHostUnlessnotifiedaboutthecontainernetworks,thephysicalnetworkdoesnothavearoutetotheirsubnetsWhohas10.16.20.0/24?Whohas10.1.20.0/24?ContainerscanbeondifferentsubnetsandreacheachotherIpvlanL3ModeEth0192.168.50.10/24ParentinterfaceactsasaRouterAllcontainerscanpingeachotherarouterifwithouttheysharetheparentinterface (sameexampleeth0)Container(s)Eth010.1.20.x/24Container(s)Eth0172.16.20.x/24PhysicalNetwork \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy new file mode 100644 index 0000000..41b0475 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":323,"height":292,"nodeIndex":211,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":323,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":87.0,"y":24.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 192.168.1.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":79.19999694824219,"rotation":0.0,"id":207,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

192.168.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":36.36247960848299,"rotation":0.0,"id":121,"width":150.0,"height":30.183360522022674,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

192.168.1.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d ipvlan \\

    --subnet=192.168.1.0/24 \\

    --gateway=192.168.1.1 \\

    -o parent=eth0 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457584497063,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png new file mode 100644 index 0000000000000000000000000000000000000000..e489a446ddd255ce9360445f0f895acad31ae214 GIT binary patch literal 20145 zcmcF}Wm_BX)-MEt1b26e7k77e*Wy;ZSn&jRC|X>LyHhNz@S ze8i0cV6p4QB~YT4VUhO25)Fd;Y4*1#ZpvVXQxpL@g=es)ECJoB_ zyXF_<{ZZ+Qd~qMX23VTO0}Ci$EJ^VH|K+bA>SiF`)Q`u6!a+qr=091`>~XaCToMYq z%#PZ)GO53SJ}8O9N}r9aMcoYQQM|NynYTCxUG1+$oqx-fFj)QU^!q?7qhN(_vcsMC~IKT z)i*HpeMViZn9ole+tgKap0V#v_U+Fc?`5d5fT!ue0mC!l_V&Quf=75Qy*au+MC7l- zeCh8?un+IInI*_z|7wZv5M{;bNI>`**5&%Eq!%q41!ycdM7^OH0GJf z#?QYSMbkM*`&<<-gdC^9X2vA8C(zKOe-NE(1VgZ#JZJHk3m!Y(6RbSS8hww*G5t`t zSz6qUA{66ryN~oW1Yk&=qIz5u1wRcy@S@AmY|BSPjXc*BJL9{2X~VRI07TaD;6-ut zO0EEWC8%$xX;5lmLA}upP)&ZthyU*FAaF+2&rpG)o9xT_1osxTGeRG+9>^thtqc?{ zB|`lf3d;R$sPp%6>)a0*^ngtwg*NIi3KA{J4B||mL4-1puMng1JxKL(fVHc34DX*n z$GN^|;Y&9sgHA+;==1OdA9bj879%e!v#`T}*Y}NN%|r=!#L|g;Fs26;L82C|Cajit zRyHwNo;V|V;drzYKZ6;1f-QgFA_$;vTvnDT1l_U}#f@x47rwW^oEo)$P5ogQUi1B(|g0`n?TBSX{|gNu|)%Tz)QCSI{btITmE5X zNJYEcoVGIdrxZV+3Q!)X3`=`{dEO=?rBf>?%E4V>a6Ko+8Q(Ehs-3|UsW2|LwY z+ud(#h@OLgnDwq)Eh9VIH_-+@=cg?)vhTqZfZ&O$WOrJU|rz+4cMZ+(EHf1j|RUrYkwMbbK@;q_D?O0qC2uIm({e-I z8Prm-->~Qq|HP9=?zdH`wQB(L%j2k&?KJRsCxEXwunnL_6PvKP~^+s z<&JOFVhBJhSqz^b7&WD#NSrR&MVv3O8~oHrJbjrXzJwAg*4_ET`TVUoJ^ikQjL{Xe z-=sBrf4LS2v3wi0yuHo6hj_#qMRXfTAOI ze{9uYtqWbo{thk+^c)|}@Po(6y8YSRB+y{)=V7TaS&R&4*F8cy_%1@N^=(e*U(-$? z3?!%Y1~uN?g)IV5b{jGs9sc|2dHu1YQm9%BCMlWo<$WExAKkf?I+RS&iUIm{3O#EK zEu`=PP8HK%HGMLXD;bI+(KWnv)qXt%-ta;fv$DFD+$YNxdAN<|(kIJaV_WS+13kS|OHuZHDuqsV&rZti zUC4?bAHu=keS>z6VY;2p2m&XfvjA*E`;9KVeQxFmagN6l(U{T^Yo%hId!GuUa<5a( zKPX0X@MV%47lw9uad*W|r@m-`{|G#P9W26`izdRAr`AHA*v3j1r5OCgxr`LJ4MT|k zj4m`@Dz8*9m+owX19@VE>8dLS5daXSA%mqb#i2xcg3%&X(;4rn(=4juVbn(M`Ac!4 zW)IBabKS1NDvIV>*k8^Mm%!{8AWDL|=?UO6&BN0l?&sKhk3f!D>DvWlFBf*=xOF)* z2@cX7J^$vdnUS?XYe$2n7AXREUHE*A?#iK>f((Gr7@K)8Sa#|VW-K}qpd=1~C)knR z`5gjXOlRNJ|z{xF1@TC!z z3@>;Tnqyqf8cqhVntIkzbKfy$56 zU2=7{wMW01Y&g5H*3ZL$kUfu!2AeS&)CguhTh@1&?Bh!oabyfQL*4o2(_MPuQ{LV= zJPhY!>4H5oQ8E(!#5FgBH^4ubK>RqsF8^#&HvQwhRnft(#0Mv?9n|xKCgI1SFH602 z5uNdVl_7?Ly6rHeSt40E+YcE$OtSthv8%?maN&iQ9@Jo=T&gN!Tuh)STa6r4xSm(v zZwEiOaD42XeYh*Evi6&DpJfo4A7BL@0T{k;5#&_y&{jrgt$F@PXUQait4|7=NGqWr zZZb|OWde0>UmILF8dqDNsuvVWNPRXVCc0TaU;NbAh~}GnIeY5Pm@JOyWbvbu6|C7u z?lbg9IfZbu@3RA+h8ZhJ9mrw-S1aGe3|9e!it+SxhjfY>U-eqB(J*|H>+GfIU@R@oDe;$GFfr7B9f*qPUV;w0wJ!K2l4 zL+-mP_AThKY}3b--xlc+*k)zhw$WQgEU5uUKjyQ1Kd}8;K^)zRv3)kIxS;OP3b?tx zmQV6<2m^^#S5{VbbdZQOdVj0z=?Rka5sm=h@i_5k-)(<~3m55nG1GA}LcpxobsQ|> za=8E0Q%HQQ|2TNJTadKBAU#bP&g8J1SeO;zrDx*YJ)4dJgkt$qL?98@-V`fywjh{w zBwX&kEHmfuE3U7qa~8&n!Qh%Dw9+K)S+A)6o+yBMp@6rW_$A!C8>W#jL;TU+zPzjq zDNOQFuPizy=Hf_ENojn1{6dMDnK_}xcwu3|>HUU{)_VC3;;j8w%gGO1z?Y{$0l~&_ z#K1HFOsEf1jJpA7`&imyiA@azrS|;gHabV5SV#S}5guvgs~@}DeY>p~I%QtZvP@~z zi)m?8s8Ptx&5e*evkY{V;IT4Y0BDvKB3O|dP>Na#r>>iw`EKA9f-_l0%=^;e(eRZ) zyg?Ci*ApjlmrH&U^!c&w?r6>iQBg?awNMRPA-k6>6!2H^gFr}v6~yYXSp(yqS&SKLpp4OR~pXGlE_ zU`GQP#!S18EqK(-@A5PADxELAUL}%+Ly0!))v7)6QHks2X5SMl8<)Bszh0%V2Z z;BC0huqcb&C-Py48><`jO2tEQn5Ua}^p!TfrkKnOs_;ngt!<5jejKx-+QQapMFJ&1 zbNSZ2gRsD?gvy>Ql>J8@#)FoJed*f4y>@Q!s?b`DvIz*}Dkqsuv^8*I@9ZB2T= zAb$NWBEB-#D-oLX-%JMi{+Ddbu9&1GbyJlxx1 z$YD4LfWX=M*$k>D`@s>q};wwm3lwVnpv|ublV+h!=f&V0w?V~cLakqc_$8RwGp;IL=-RF=K z!NAqiqcbuLEf^&a{i{2vkeOq*Tsd*rP{OB)7@odm@@}%w+9)9I;;hmAplO7ENj{QW z;6DvW0!WT7QknJoaG~6=x;TPafUk#sm3y(?mwQpmWd+_C{^5nDJy*yzUjU%>2h)(~ z*Kh1S4EZoM90XwM3vL0|ou>rmGG?ET zgX|!bKwsE4S&$YCY^UATu&W*qP#s6+)a0KT9yMSfnmV}r-lwEFoDCP!Z$M-IYo~=U z8fGZ+!k>F{4*iN;KK`^LfH#Yw>cd~Zy$f7X`qwhvij1sXj7}3@wyyA|S`|&7K`b(& zxAVXZb;x}yc&3k7FG;=SFKn+b;GX#iHlR!Mb<(d(w-ltOf#99%8z26EXvGBb8t~te zygyMBv7J)+z54NBrDqA_kBxI6*606e^AGNCRoDNjuX_hk9FXNMe?+Fo!ue-Ph)X2E zh!^W$FZWnU@53-4&O08qkbfO{`_TCI@d20869L$?f+|MATFsrI{`cXjhzaymVS8D> zr9WQze(CSI$CrOrIYph}=KjS0&F}hc)Wd&F|4b;%_`M>l6DPhEn6E|+L zN@t|Jz;X@%ErOVseoV8I`e*Dn*5Q!p8 zJ}36X%CP>2Xpcsc(?w4UtS4ze{6Lp&_jA$X=p*BYb6AMOQU|(lEMwJj5W}%TiQWHT z)w6&M6iN6=UEIVDJhoVNQr4`|*Ono`Q|R9bo+#k=cDuuv<;9zyrvI#+tR3E0BVWvH z%dT_lGJg}S_qDA=@k76|Vxh1AlPrJ6k}s)%M!ohCM)Xxh$@ekabu{OR)!KRk!?|jt z8aRxF$41X5TPih=n1_GaY=6yYtgV{Uf&V+{4P*ZI+TU$9oO4#;nUeP3?Sxtmf3{{@ z&>^7e$7gI)CsI1ZXAGjWKuKk!@!D4h^kH{|9$q&HBt8hm4n3u<4s?ohe_=H6bc_aB zevAb4dM@=pylfrh(sNV)BeXtThE|Z)=32VDF7_BMbkHy=`fqU{W6ZM8`*BNW4!i_u zEk(LhK5d=t_F`;eH!$EVUjqLG4Cvm>)cKRw8CBuM!RG6)k>ks|p=<(zC;xdvu^_DW zFL{cSf!ffrn0taGV9##G73eaHR%qiSS!=UeO#fv0am}Et7aB{;bJ6UrA(8LLd(&{9 zK)aP7WLT`L#vLA7F>>d8D^XAW4`{)^YEOA+A;(%7$$dS zZQJwMI!iaVB6Xy>zC)JB<6`?HsA2Y~38QJ)3&WmTE#+anjCS2E;_+ImC^n_=1)h4ZpPV`CxkusY!ON})B=qUH=k+r!BXxp}C> zrkyKz1oukDrpyGd6%NBCxAI18pR-9}K5?$_P!6q`wGhf%0;7$cS2eK&6d-4e!Eu-A zk>n(XBmw7KLK$m~@|0Z5EVN^Hzv0Jhdcl2WQZ#HAuhuF)t(RQgpL}N_&+fb^^ceT??2-7<1H*5jSQ~#&KD4C5{$G= zOvfmmYmL*=Y>G$@dW&XRe%X(Tr?V(4`Qg12wS$S+vr-bXx7c*%JW?U`Jy=W)9*;w{7$2 zT;rd3&aqgp?X-P$=Hu2h=prcY9F27Z$kbDFP6{8=3Xgw6ZPwDYv?Wdp|3$FmOU|hc z7KdHdy7#w0^v-MFpaCDpf-4#S>Aq*ki#VP0-GQ74fxD-O6(_kQ?&m;UXrn(o zCUE&Xn;CJ*ik(a@iUuW8UHwf83FWmFLGDS{;@0r8HB$82C8)HuIFb3Hp*5;5Ik96T10b` zb2&BzJo;l%NOnCeh|qw5RW-jZLfMOLE5n~P)zv2@(LMaF94+WNVeJ-69Cu$Ev{;fh zsYjfCnowV@Ob`*1GM33*IJSTbjW6Pf04`SaNSqrmuIG4Qj%znm3R{))PhSWRQ_ZF4 zvll{b)YoSQg+jscrQn^()>581-UU_{sp&XlK+s=l*}3^(=7nqC6B}u)tmng`;*<+n z_-N~_&oYlR!J0pt>G7gloP0Z;;p4($=a=hAQkK7dh_zJ;EYX;jGTn#dOV`^;*Ad|o zlG4L_umIG2wCu;izsB%VF~;yLXM=qEBV%Pz`S z=g59bi!L;7w<5z}mclTkS%e4q@14v}eCr4WJK+i0akU^DT{@0)XP9(IxN<3T>7UNm zCav!keM_a&3b_KN>5;cu*@}Q*VCCpemqNjby1Ke0B_(~h`v(UCK`)(Bg#qv+rBIh! zmFEqphtk2#bDo~Y`r}zVRnNlpSdCIME|f!hm<~-+Jk@a}H#-|u%k1uSjWAY_wop9c zgbn}*FWp6kZ8kJC{O)m}`4!N>k`Nas@b)IX#`?{(7QI+rs~!@pry&1wNq15zuVllrgDT(P*7rGViL$jB*pruuz<_K z0PmisOiXnin)?MkQ=)JVKE8+zao#MM@pRgQ=es|zrKP7O&F*`utB)7M6bZ4hvGMU1 z(FOV}oI|8PyxwjBwPL@nqLr-N_2{~YF1F?To;5-*FMPZ^Q&Uz(MMceT_7<}(aCf^u zH+Plz<$29W`s20$=60K<*0H*2@)2gg)wILwWP3E(@Amj0(5)|a>tbtoRtqMI^oMRK zOgT;H#Zwpx0Xn(xV%JTR_u218+v(rk0V{8l%V*Vec%M+FG{cQB(|!JOOuvYTUU9eM%@NtStWi59a9L*_U#y(wTk= zVq(S8xvs7*LTCA;>xpSHQqoYj0k$`+Vaj#P4^BtE&{R(yp8e^eK*LA|nhn3*wK}hT zx6Y)q!Q@>;rx5k$=i-8s1*5O`aVaxwelRq(vjxou)@8&f3Us}LQ%+q?Hs^iGs4$aa zVB>L$+tF$m%J#zgFhvn?t>HZfU}U$iJrYbq&T)~}j~&>${OcbSB<4#r`52X|B>j%R znD-k@g32sVE93hq_#;OIMGZd{5$Q@5O>&HQ zXQFmLg(XiaKLu8j&pb1pW^Y^>oPnX>7>XNYI3T02dH0|NM?Sd-7WgsOxfo|BUP*a^ zBjOuV5ALp`!Ny5+`21R^*U7Ty@gfJe@JdUoAr#w};~Whx`IIiy8a>vR^vCOx(Bq$y z8m{=gS_9L$yY!RRCs9T*CoOt|PAw_iIks}Q+})fbPp%f`ZgY>^pZH7ng!fK- zx=Ycjz*gKXS8iBjCOwU*c#>3bDk1qFgH@_;KP9EZ35O;9(Pj%JXVL?GA(r9tVM0;R za5aZTmlt$vV!%hIB#j$3``A&QGfgwAPj;bS90*qxaa$h6{_rDg(4R>)v+QheLY_E+Ka_ z-6snacjq5DjabrSK6mQE+iU+BGsdcuSEa>H!iGIf&| zVpmg-+ohv(!SUjtG4T-JPa>kb;aXDTE?=FK*M56aZV48ALB@%Uex+TWH0*Niu~4oW zE9M!ll(ArCUrzjT4aU+Ke@$hKOVS9xSyollOVI&!)3Kp}@#rpo+={8+wHGCPvT7{f ziWU+YzG;?Z(hFv^GF$VL`>1wgMP}+kl^z$5IwQ9gd6lq4ElVh!3!=YW?uqWIR1P-e( zb)gatU4<;f>yivtJ8oxX$nXPGIl-Fnt)@fk_Qh*&>x?BCZmc6*e&L zvn(^Zw1BqSet$}g zt*LS4?z@3gcXqX;1IOalflV2z{VA@zV0q$huVbnVb3w-5V-+>SX7 zoSflMeMmetKwEl!)5L+0Uw9fFH*gq}`JeJlSUgB{;#lZV8^|0&-ME)EW5O55YC`yU zrT54aHU)3xU|Sx-BO>tpAk~+R|ZI!X-W= z^ms^J!H)EUq!h%=GYApE@S_IU>r7JN_&V#k?>iffLNkL3Qld5wx)zHgw}(@Hbm6M* zDt_}n**q&ANsPL^mDbEmOCf^9>OfZTN#QJkhD2pcVHQT{6y&#iQJ)~87{ozoMaFAV zDE$e*T#Vpt^WCi6TpJLG0->Nb(My1MopnZt3l@|4`X&sK`dK8OEm+Cp#2Uah`AMW- z{ltLZAOZo=dE5A!7b)fBj3N>(JhYbHH-_xEj zN6TjP7-^jmqS7?CGg%HVtNL^4P#VCnlWzn-^Lk=l$A6z=4j^45p4T~3+I`9+`Lv6&q~9~b_$*mQ(C}9|#gX4wiJwgPo*+dTayt5UI(TBJ$+Cs~|9X15|%tnViju(nQj?B<662bBTB=}y08IzTJz>OZ{ zC2({rJG8g2!VH6@1lRRL{kJaJyEa|%z5}q}ps~2gz`_rCF&+8P`=vpnj02dRf{-f* zAWVPv@u#MWFrGI$auVGqhsaj*Afsk&;mFCk6Nfp7bhmxgU7k?f&5-h(shpAJRBUPF z!n{VBk#T)rNc%mQ(xBrD7wNRNJNos{Prguh@{Q4h_1+$J<#t_hlkH{WBK+BnqYm&F ztEmeGwHO?Y~_7q$H%TR(T>v#Z!!=cDEr~*dAhej`#KPXbG+U7ce)1F0Wpk% z#MJL%Q--q<1%?f5iW6es)zSf}Wc1!wrul=8Lt7kgc+y$wF+9F*%lM?_pYL=&$l<@(Jjj(eKT3F9X?J*6ewX-py#=Jj=R zRoB3|4!1=*$7g3!bfmToCHH8EU|zR)vuYp-O^6A?sSp9)7cDfN(vaXFh(TAr=K>pG z{un#VO8zO%8-Y%j)7#Pg!%UYhIL=%3F)QhJ(Cw1#wz1)+86ED&F7?L0*Fa)iBbY#Q z0=mBEg7f!CVF*dp#GQ&%1IA*lR-(}sfmkjlC<2sCoiK8hmNQ|x;8Ee;{vbmNR!LF()|Q{2H#_Z2_Yr%IC!-mQeT5^&)h1kVxk$D14MjjG%5 zD|JkP)&V1EJ|Mu^csd)~N*cUel8v1quPe6$HF_Lg*%Sbm4(!10x@%zbM>?LKuxOPG za)`l>#Paa$?h;7oZOWg&(fO>i2tUhm)o;R&{!sb07Hc?m$@ikH|kfCWHU*tE$) z(Z1gw@{OZ>Ru@9Zu?3nHT*uP7qkbm|#ULDl)YJh&Q3;b>){z&-;?C6d^kMJm(ON?x zFB=^7+wl;15wcx2{hf36edl#1i#3G!WKuX}-{n&T*uGCz9@o%;WD+yrgmFGxHO$@O z+}}Z++mB!OP2kERCjPqZ>BABZxB;jrr<26pWzK7ZUR$fJ52_FOUhN&+QqGGrz)U|m z?AVyaV=+Ts$=HLWM0Zxh(JX<@Rf3-nGFnT6UZy4vrF@be8V=Q8GQ4dPbwp7Lkvth> zJm)5+G>giI&LJ^k^;6HsYFbHo^lYEcf2dV6(jGTbMyp3?YPafBW#b#Q!nW9*7Msi1 zd&Mt9L74E0uLO`yL8xZkwvhWSsulJVhA#Jm(^(Kzvgz@t=*8vzTrFvk^B+=$03WZj zRV^d>MBBTm2c}5s`2yU9Xh(=4T7frJ6VfLy$Ci+BtQdTU3 zd5l&taN}ZRG2w%1x92AITl1^?!BSp|fRA%u_!5E+EB|Lp%ni+E!0%0xi;}t+RGiD7 zzM`~SCP}>2?eKYF%NCiZ#2`R}{&q)wSIp{Asy>C$kZO}yMLs^^;#GzKbS7(k36!uf06Z#|e zx*tAU%1811d|yg?vlD08PR~m&Vb{?O`WG9`)JFNb>-`xHoJ$#1dh#AmT5pVvS0`e8 z>ITWWXd_kNEvsRCPku)X1vCz>><^yk41wTBFa|~A9uTMi zGU*kw3ArXIqqp_#5x){d+Yk1qFh0@VSj$}sC+k}c&tCnz(%Zyl+>H!4t&;l-v^QbC zuFL~^61eSn)YEj;n4T$AMMx<)t19B@yDtn=DE z%d$cD$+cFO%Ok5&N3t2i|DZg6WO(;4Z1R8zUarsw+Gl|DCZ^F!6Ku917=nRUrDaQC;((|5bk3#EuDg(C8dM8zi#N?!^DD*J zl8^t#L-=KQk)_=!qeF097**xENI4nB-Hrk&-+2EcV;|mD$>718qhD&d!sVi^%Z;`e zG->sKZb)9jCZ&4z41z82_sbDph9hBScaTc3ZkzSzw6`LH{Bs@7xCLDQO%m)8o1RsJ zb5OG+`3JFuX&5Ys8_5YA#4=lqhulntMDjTMpJWudYSN1Q&zLa5VGdCzO(VnNW zSaxrcLSi)I@azIDV_5_V5eU<Vxka##r?>5(^|}#>ROKt{QnCU z#DwD6gpTc4zz+kj!7D{9vADZx**ku@&1Dh^vau4`aBsh2fM1-u+iJnet#27HXt0=yjrZf_2bdD{3u0wb1 ztk`VS+>C=nKm@mGQN$~|q%BT9IpW2_ma-laOSD1cW5eAtMxF~NX`&b=y?39O5!V)yk*YUvpXP;-ot zp3XYt^|@o@^!JC(;w8$X7uU!_Z=qk~1gPZ_p6SY63< z-Z5cBjpCHm^%bGrd+%^>m%+Ey)TNbp+o(t3zs8x?f#xPncu37YNd~SIgFY{F$8W22 zn^@u9pNUw1c~;Dzdg*pQQ$76{{X5R@-U$9#HuGkriW#`A~*vshU#7Z zbX?V($VC4>nR04uLJ&LfcFwx^-=S1yC`I^&Pp_{2#NRj@w>_V9;u9fSNUga6heP+mrf=TB-R7JO=w;_ zzhdVV@Tl{l`|E@v3Cb6ExgFu)!Jyi|OIFapuhNF#6S5o(-b!p*-wyr3Tyio({~$l} zCz;NR(U(s$EppiT?MsvYrl-}?CGz-ZDaMC3b5zLNM5r=nx8v2iA;wK^*>I+KRKe() zWhSc+azN zyPPrUXx~AJsk34(T885e&2hgYseccc`*+-?2Fs#S0gLlUqdpWCf|boccW!7GQ1_U z1hr32NThsRGG|^}I9g3ZV`!FqLnZ&=MGDWXc*bqPGU>~RG@&tG{`9m5j5X}-$;xnR zmnNj+^|Q9sFA?5$3zKMy`n*BVzZ5X97##Ra_#bP$Y3}qPuP6F=Sq?)Vt-(CIC#KN5 z(x3Za=uloqyq%ZFXC%?{ssH{aK~3Y@`6R#R zc2?ovyuPvGP5Yu|rRO4gj#eo#R1@Hs^j3a%b`{^LD8YSpL5a1YQWfnB1zrI5^1aRgrX1`-0#0 z@?^eE6Xy1xNP27H6A|AoYiVf-R{@9*dAE)G7C(U(6IfqN9@gP6?&3Tx)8$M{_t#kc z_+1c1>W}{0E_+58F~{^6s>XFT*GEU1VgdYWvmGYU{#7ak&gr7u8~kCQpQaqsE#~pGnDU-zx%p2coe&^WxpC^{J8yg>iUcB z%7KQjsT?Bqdlr`&9xqW?QAb=F-~Fo>p?tUcAn#FZ%zHfHa75l%zM}?u=B`eQLBl``W8T#~K}l2*i);+X6tjk1od`E<Y=KV3#P-k7eqGT4vqwhBlo65 ztTaME%Y#Jh8FkdQJSCx#Hr{%JIXeBPR7JPYBK2Jl#-%6PY5G~ph}q1Wt?czkwS`X{ zyc6%}J&qC-5yAmc{s#k1o`37J4Xe|z&0j{^Wbsn}T(zBQ-+Ay@YOPaH42kKAZQ&~o zB;#$xe&4N1i)V@I25-gNut0?(nZyyoHsN*cK3%6vsLeQdDv^v`n3=ihh{qwe2X-y+ z|FzN;nV|12mLS?6gb7Nm;H%};AHZnUJYw6~Ak#~YsV!^wU)p9i(c;2-41MTa>HF@U zb|^cRS!3_pl~O{)sndA!Gs>b5d+xKweeuF;Mp*FzY9WX{^~bLnFzN&Eyo??MJ1CIW z)+_Lb4_dgOBTwXzsqGPs-Rn1hlBQSq#x(L?H}XiEh<^&NuH^7bVU_2$c%Lf=rdmp3Ika3ZNCpvIcWJ>I;p2pp4KJ*Y$bb5iMrlIp-YpUQ-XPY< zVsD;u#DWo`$DmhnSi+;|o5sRgY-d$gVu&8I6~imjCdd2~-T-1k59YaGA&iaHIi)8SWJp6&U?3zxQ1yobk<|N-nQ1@dcLG>1g#7Lk$$dV7a4riS zs>4Lzu_K-6tmmAPPXBEKlk0x^yd4W-isCyB$!GCO@vaYg+0aDT@fI)NvHW!XA7(0O zL!G1qYjm@LGk1*sHw$`UO3h00EEth@TUG(p5`PLNevikg7>4$TVq+^&&`#BYx^9O}}Dq{44aJB916HfW>L&MNk!s)l$m9J4V|-Yjd2Usct1c*rDSyF z8J#b$#-xF7lKcFs$@;E6d{7&ZWkLp|c*1^iFZew;q;1E8r39VGtvpq1!O;q59g-EM_5M5wJ&jy95n3e11Bjc+k%xM~pQm4C;D9XpYL z^zkQhvUO1=+S^a~@*MMXU(6^8QQ_}bWszEHHs2mlU?DB$hnTbZNvl_Y^{5tmf_kW7 z7>PD>P|{RpgiOT=f`4xi$L>O{N~$sp?L&)Phg>H%Xj+-b>4(QIgl*oh9FvxZmlx=I z?Uki9PNg&Gs%X*JS7fAzIUEVc6U@EnG7n{XI+SSSojGM>U>dA9yO$6a4*Qq)&8XB> z$9LT%ZlUkovhbG!Uf2Z$$|El>$JtN>f#bCMWUTop5^VCCKN_#;98u-Qzf(q>r4UR1 z)mHJyRTTa0HPtjfHZt6Qf|2~O>#xC)uqyq*lkW7O&$F*$B8HKnQsSnoiO(MJPDNmW zXXV$3TkYN3qkZ^lu&f#%#gN|Is@cV|tbrAx*TBWamFItnJT*?UKUf#?q0kh{_H=*W zMvb5p%Y}uYRt_-5e|rc5DAlW+XY=FgYW5nV z>+tU`KO&)l9xAHYuw(I(rp4zVZ}y6}XE1c$hJEF$M<2&Q_O+Oso6}t`2_5js+?WD; z2r)vtP-X%_iA*!^>!RCJD-O_vypBKbr0Nk$?P00TaXSz~|=(@*#;%gqky z)Kv8hX8FEZk=v#vYx1k_F;t1kd@(keVNT3UEMf2P#U zF6jI`E?gig`BmUU)wh~r5DN}_3Up*M3WuI>+1#B_7woHJL1}Jnk*es@^USNzTNT6h z=N*c+xTK_H1voS`R2vo<8Mz#Qjg74dq@_*6QQmr8>aZ{|@;n*4lisAb2Xuq!(0CZ_ z8L6>kG~}feh>PX5aj|@XUnFyL^RNvxjD?G7i1(r%X=}EP+FUn+7js!$&YIhfY64CL z+x_xRsL06Jpg37^hzPr8pZ)Bn0j;c*=7V>`l&+-kJR5&>hr>I(3VUj_csveGTCSez z+>Hf?&@i54Kzz=u6QP4Ri(O_@9zL!|>6zhQ!8OaJDk%W&G^rh0qe!RwFTGZb6|Z){|6HO0lno z_-RWgnH#>kQU5)Ji7Uw7c6qY3>+@5cQRFwJ2K0Dw!cyeL>YX(Nr5|aggseJoPnQuW zDDtcLf^<)D=Pg+$MNK0Xi^&eIAH{5QF@yBZM-9N{0YuD#P3x*TOs;~3nh6m6$*cK& z&9K`5=9>)tP3wb6L&t9E<%yNzpm3W|7i+tg0*{Z^^801*SFx1uj}8YFwj1h(xR|yq zON-+tRS6c7#x|2wb)VLnsd>Q@*h%W$Bn^|rEM_dJI@MY?Rurq&YY3?@QR>cztxq`qR!X0xEi4w z1ZH~?=0G?)ieKqxuZV*f+I}y}$6xV0Je30Pr)gnefX97=q&%OuV%=mp1MKOt=g&RO z2u>v>)?;mT%BC)(P;@fXhB)GgTsF(7&G@ieJ^uJETKAkNks!L=(bSMeNNPk>kb7-q zeae=F{aU2IYSGMu2pM6I_Z&kq&C!k?3|$J<*npK@v>=4TXEw4 zKNc2K^j-Zy!zaxrc~Aw{e9_h3<#nD-;w&7&qRXGLZTf3mxkdLSoNw1N`tL1@3c8#oY zg3t3_PY>%}g;8+#FC)t02H^-fe#REeOti3|FnBq;+rDZ{R?qhT1-F_jwfOlv2xTZ+-Q5bkc}5Rj=YJ(kfT^WMiLmwaxlXw1X~SQaIquj6l%# z$*_!JtuSpLwk-+y{UxD^G+S|wX#FH@m=|OA?iNC_?F#$Skc56=*8ShUUFGT6h_%h3 zDuW+X(^+hcD`-^2oBB zED1w>$or-H8^9bPN(hqe@J4%M4Nws{27yY|*jxlrE)++tg3=2+JbzZ2Ht-15TzK2( zEq@uGva_@>V@*r=f#R=^#tFA#S;+~JCLezy#kdhPvTm=@ZAard>^n4z6qqcAIr~g_ z#zlSVwb9|mvC)lKK8gH<45w_q2$fRdOU>ZG%?fwY1k)=DIMr@iid2LJV%i*cvHJxC zeK-iiNwsjoCc9#5IWhHip{oDWoEzKd$WoG{m^-&B@+J*vHuEXhEED z?|T86vw&w*or*ZAt83}swy@B_|5L@4heN@2@iB+lyphifqZgWQiDiWG7T8 z`#xoC*(Qx8WNT!X$Yh%g4GmdJV;}n(2H8S=qxX;R`|h9TxxahQIrp63xzBT+=WO=Q zvv&xkTVy$y2nDO7>hr(8kj?NYZD5}*E(}&iqqr>BZQ{l-<4a|ZLzRzN`e|QDzCgw6 zfaMiSn!lqWoAY!Rog+XkrjUS?%ah~rWqZN{^AZWtEs-h$=E$*ep^NvYb}u!eHgLjl z{IRL(gGx$XK1gG!i_vv3rw?jw(0%yz+=9LHr{&*^N*uX82v}xIFilM3EqE#Ex_W4P z42NiqPMkynkB`8mg*LM~Cd9L8X8-v#;clUNVMJsUFLrmG$>g@t-Q(RLtt>j0 zz!E`aE|&d;%2fEwG`Y7S27mk{%Ej!}8I$ADwDz)&CvepPARixJ0j*klw1HqLiaLz* zu$|mTqo7W7AJ})o+B`9L53V0O$*IzmZ68%Dbp~8kk>pQmK-R>Fo7=$|wW=)mcnNEX z>TF#uy8Y$~Vt(ohZd9CJ)b6o**~|?SuF4B4ul}C80!~_3Ujr2*?{(!fjSjobZm&5~ zJC1vPed|ZaULhXuN^`*yK0CYn=B#1w6>oKqS+kdSOOWb9dlLjuw&-)R@h#9y)D{gU zW0NLSpF6K|)Xljy0ay}!EaWCFnNqC{*3O+N1Jeq-gekxBj6F!KmdH&0^>9H?hFgTG z!;ODkzV#q=_^Q11$Kr{;nJ>lfyp7DnmQWJ=N#oqo2t;QslVI5Ji2?o}na>QCp@T#V zE69-}5-}tT9hPod_#wgw& zDk*mX;;kBl4Sl)0bO;`w!1Fxbnt752v~PYN%)Z7$>I2P~==T<39(;w5N9(v3WIkmW zd59mlO}#Lz&mZF5o_1Saa?i;A;~6E|(~rvlZV)Bw-t^v_H)E4eu@!t>dDZS+`FE+t z_OhJ-jR9Z>_-<A32giG6D81%G&( zRCAgWi$9i+g-%x_`00^VV7b7zsW-YV`Mf#*1REgaT+!4Ryu^By*bsYkwdcq|pf!iV z7am3P(0ku(1#X-A4OHIQ%8M^b*rV=_67D2WU1vD4y8B3Z;o;8#LIj7kG+5-x&K+O3 zH8!NdfWP}KlT;&Xo@HjyAn=hiLrmu_sm$A_6N5@p#^a@7n11aWHs$hT3#-@?FCB(x zj8Fn3swk#Zk7YotnXl<$P~Is{!cRoT1jX-4l}(eC$BWIS?URCyWfF|LZ0+a#fAJ;i6MTs z2Q&}r{8_BXkVcz<19BRgENIV zybHBb4)gL)Qb>;N-i`i#D{k_u!9|eNiNu5H{mf|@1K-jqn0m z_1xEkjfz@j{&L|h$U?(v-m&(p9$rsHu892_Yc2@g($tEoe`6EoQ80uiFcnrRsHFuy z`1ekO+)nk8FdibDWB0x45%^w@{$!WtkT?5q2BTnh$-mH!+%}X~bkijH$PS0M#oC{z z==v~Ms~TwhA(`apZZqTa^2DIJDLIAx$V$KB_v~)yQX5);1Eyok`VkvmFiBqhFRpy% zQ+4Z)?GGtpYFvk_8>Ug;x0HbJex+umjl!_4SRf0HzQDKBpa9(RJ*FuoYoE3(inCc# z+%Knz9vYVG|FBj$8gf7a-ZB?oA0gaoJD9=T1sXeV`z^%Z8i9Q9pKNqgU9p`Ja$QY+ z2kj`#6k|MRtzno5!Xe0|Y8Wd}EsBX|!aA1; zo+O^W9QW8a#8Il?W+-QS8wfADdFkCioB-boKcPGHGDjrlkY(!|=G#L47oLntB&ad8 zdJfh%-H!`F)_%Rya`|T9Acr2w}sk15+(1H z=E^P#LrFjL-*YYN|3Fhy>)7EA|UOxZ=LwLZUJ;8NyHp6N@5M+40)g1xpq)1b#0CaR41Yb znbX*_ismgYzu4FFOfY%r63M%p>N!iTh`6uuiW@4^gZJk#F1v$8X!JDx;5x(B1aw^B ziv!@%#~}@6r?BJH8Am7P9M|}3rsl54YzxNO@cn5>3Ln`d!;SwRHE21Tk^1x~Pgs*EnrW>_iT99xEcEPiiYM8oqJ9GBrt| zbUs@Lx-}L7tKmRYcontainer1192.168.1.2/24container2192.168.1.3/24pub_net (eth0)DockerHostdockernetworkcreate -dipvlan \--subnet=192.168.1.0/24 \--gateway=192.168.1.1 \-oparent=eth0pub_neteth0192.168.1.0/24NetworkRouter192.168.1.1/24 \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy new file mode 100644 index 0000000..eceec77 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":541,"height":352,"nodeIndex":290,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":2,"y":6.5},"max":{"x":541,"y":334.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":2.0,"y":6.5,"rotation":0.0,"id":288,"width":541.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Macvlan Bridge Mode & Ipvlan L2 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":177.0,"rotation":0.0,"id":234,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":16.0,"y":240.0,"rotation":0.0,"id":225,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":235,"width":106.56,"height":45.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #1

eth0

172.16.1.10/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":138.0,"y":240.0,"rotation":0.0,"id":237,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":238,"width":106.56,"height":44.0,"uid":null,"order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #2

eth0 172.16.1.11/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":40.0,"y":-26.067047119140625,"rotation":0.0,"id":258,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":237,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":50.0,"y":-16.067047119140625,"rotation":0.0,"id":259,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":225,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":60.0,"y":-6.067047119140625,"rotation":0.0,"id":260,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":241,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[75.0,180.06704711914062],[215.32345076546227,90.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":184.5,"rotation":0.0,"id":261,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":283.0,"y":177.0,"rotation":0.0,"id":276,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":291.0,"y":240.0,"rotation":0.0,"id":274,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":275,"width":106.56,"height":45.0,"uid":null,"order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #3

eth0

172.16.1.12/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":413.0,"y":240.0,"rotation":0.0,"id":272,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":273,"width":106.56,"height":44.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #4

eth0 172.16.1.13/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":315.0,"y":-26.067047119140625,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":325.0,"y":-16.067047119140625,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":274,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":184.5,"rotation":0.0,"id":267,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.0,"y":3.932952880859375,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":270,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[340.0,170.06704711914062],[205.32345076546227,80.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.32131882292583,"y":39.0019243141968,"rotation":0.0,"id":246,"width":216.0042638850729,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":356.0,"y":150.0,"rotation":0.0,"id":270,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172418,"y":0.0,"rotation":0.0,"id":271,"width":104.27586206896557,"height":42.0,"uid":null,"order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.253/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.0,"y":150.0,"rotation":0.0,"id":241,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172415,"y":0.0,"rotation":0.0,"id":242,"width":104.27586206896555,"height":42.0,"uid":null,"order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.254/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":224.0,"y":64.19999694824219,"rotation":0.0,"id":262,"width":120.00000000000001,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Gateway

172.16.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":307.5,"rotation":0.0,"id":282,"width":541.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers Attached Directly to Parent Interface. No Bridge Used (Docker0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":32}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#000000","strokeWidth":1,"orthoMode":2}},"textStyles":{"global":{"italic":true,"face":"Arial","size":"20px","color":"#000000","bold":false}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458124258706,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png new file mode 100644 index 0000000000000000000000000000000000000000..13aa4f212d9db346f307dfbe111fd657406bb943 GIT binary patch literal 14527 zcmZ8|1yEek(&pf9gS!px9wfL7?(XjHfe_r?o!}4%?(Xgc5AMNT154iS`)haSR-Kye z`MUe-K7G$Qb?Z)ql7bWpA^{=*06>uji>m+tkZb?|gb6&FFcuiA2}u9| zCuqei$M4-5nwr`%i3Np4vgR^5xmi?p#UH=*mV3XyA4*G0x3#x92gWff1cLwoYwwVe zXz=#-mVViE$?s==*J21!?}^c7DO)2a0KlX5gh}1OzpT$YG814}Y}0%tmN+Aryb)z3 zsT@@gAoG`$lpO2IFpREFRt13lD?iI9=-GG+2moqFC%hy9I;nj{27uocPT1D10075u zO@yRFl7errMnFEDh#5#PPuh?-x?z&dII=d^&poRym>Ga5A8o_}h!g_|nSE(VHt%mQ zPqw6czuO|90az5wa0^O8kUNShYVM8{SpWbzzUn>s&g!NPQdUl24Wmw9x|Dc7tCD5i z`~h`8htZ7%KLEg;77+f;xPQ6_tOjVPO#d7)KeD-x!mCj40sbB># zBGK_J+`o&7iG6fe=KSsDW0j8+4tQ zhH*-^spHNug`oyE=L)?pv{pax=*n0-F0Ov#MtGq;M)d`C_0|)mHCU--2WMp&h)U&P zcQv?6T)kMcBJ6c$fMY%x)hr|An5WbgL#Op*Pvno^B*6%cGQulpIp?=Ppr)&ez$$6D z*}MDmX)#sm4Bby&%JvaxP;wzHM8DFF^FHjHwaXuDtd)8S?sxf2d3U>RT&%lJ$$CKm zXh*C8HSbv=U%Mg`XBgyBD%Rl}U!)etqPnI`2p{fA6T+dIgA+kT$S=YKz6gfU{%qvb z7e`jpjWyGn%7K-x9`5=yt$bko+n7@#7IU)OAp}+uF5#D0@BlvI_$(!08p3V=l>$Z3 zlwFz4CPzXyYS1KhRVa3_pd_SxNF|)OT`e>lX_!!Pr15$;eD}>yYE?otTcpveS87|& ztkT$vjWL34w}3#Tgw7O^gpwUU2|(|iPy<+_ncLzhsmGAV&thM_)Q|PylCK^T0QjcK zo)J4y05vSxJZF_EhzlM^A^oT4R?8Dmr zwcQDogw_q@WZCF0mOTbPNpjBRXO@O@nrbl2Vm8jTFDVV77`yjlsDjtincO zM(x$`)?EVqN$)d!m{}ZTC)2AeEw+vI>c6fF??;Ax1Z*Js`7TrxV1fJvHlbX1Hp{=wWxaUkp%r;hrW+LO`2E=(bg%#N+RzoYBK2xv zWVjjX{L3{G28PiMMhX<0`Xnn1r`@t})K`Pv)@qv4v=R5^9oiqikp-o-UU|L*Ja?lK z^~t|nwjE>6J~DcyXdjyY;MzpznqdRM$Q^AYD;i&eSwO;!ow^Zq%U0@9iokAYuN1=0 zgjyk@V*PUkjr8X{l@-*_=Ov5&jCwY{A}H~ahq|QDLtcg)p;~3h3no~JY%&`*vC&7^ z>eEyEgAHMBKM!|f zS^386lj~YFezTObA2o}(5mchyM-Ly|toT6%F z7)_9n2@5gyTM>li#2#N+?(o9x8uvAM9D;G%>ym)K>fPrXn@mRJ#**k+lXkp*hum|c zq-1Zrm(C;xkw(JwzZP&ka;|2lw+qcoTme=rp;XgC*aTN#3s+cEC11-J3o1v9za4Xx~HF`t_^?MsU8C-VhCW<=qT-s%dTMDFKtR?Shvc)nrj6n%d3#Y_z&7J zrn*Ns22|<_(Wc?fCoX+yTbklb3JVSk{~bMgl-KmFC;*#;@%qQDYeOJCLe=MOv&Hw+ zO<(DH8U2Dk^4Basq;cIAD+E;=;f42~rYU3FLb!|ErqsXR$WU_m&VGuc)+^xQrq)=I)A(> z3eq>1S5EZYnb3qc&FEHe!-=MDnrX#aNvg?bf)@Gl85B?bP<6YHD_N=&cfdll0*f672)(R19dd$J@@N{w_z&pMbh5kL5@`Ex& zq&SW}KA<5D|7<)1kw7ZCF3_b&b?Vo#7D!g(=aw2`run#=*WBbN>sO)(Bc-=a0u}hBb$_t3wwW`DS%BKn1z*%{>8}Z zL^-J*dwD-znMzHcwswYp;Xpdcs)spLD?%v%r%!Wm!3~J%y9`ry3lbe=yhWi&noL(O{~pRZ zhF-BaAUy|z^c;yWqTyz(=2#Mc0q>nyb_n^oYH%fyF}#B$-F>b^teXvT)0d6kkIl7z zQ=IXU)dXF)oEt1lrWK3f;E~veRry0233TD7pUIQMkKk268myK!minxp;_1w?wPwNBgq62c=52H1bC>GzD#Q>{GypZ?aN*$R=IJf2|D>8XPCX4HG|$i585Vy$kj zUPg%fSbO5#DUq8|zNayBaW7VF15SxZv&K*w+A#k{w8(fy)U>_#_1o@Auj^W&Une}; z!^!7CPCY#K0Ror^@yBz@1Feq3mD}7}P^iKOFdu5vpq6F2Z#Bit=d8$Z`euV38G8rO zB&WfNKOFBuhr`6eZ{NF{ZqA;<;Qf)La@{W*9qQfT7^7`OhJW}>P`~cZ z@Aq?>q)ssj-FYN)y!}~!-kvXw;uyXs3MKs5u$hG1m-zLw?*(o`Qm(S*-J+QMkN4Jg z6;Hd1JBg|2SF7D)zDmEt{alPVl1Q!U=7!5sAuK9pu3&{O8%QegU0`;e=}Zw*tcuQK zl*FhPZvLrri8@W>1H{>pXNY6T@o_2e=8Q~!I2tu175a>nKu5raW~Tk-FR|1mS`}&asn<@r2JsIP(WHGM-CCu+3#EB7cL+5tY`ZAC|;K@PjgQ^CrJS zoSZig&6L68RPw{CZ-?UHBgAtxF`Dskz;p21CwWf^(x7qD@U#L(@t4Nxgj>hvbDw9k zt3Q+;dfN^3`G1g+vFLwE2QT@C-=ed<i+abt)eg^w7Ps@X*{}DCI9dEmxnBdP?5w=jgEL$wSKvEO?o+=B zAu-?zTrBEeJhd2}5X_u+Z^)DiK6uoSR3ioj=sln8YafL*F2>HC{uYyJ79u`EIP<&e z6`zxVV?9zZ_D562P*iO}qr8B)=!DgeJn`m3Mvj%H!uzVap&K>BWMMSDnb5@_=F9ig zXe8{!h78pn3x>z|{vI#MP{epUyBej~V@|07?X~qOiZGKRjrdTPIE=(W@XENha4pF4 z;5*zkK$WHn@-RQ|+-rFI2QL4H9+Yc!J%Z6{l-`Lz!__pVRA15BgkdZe+(Vbr^E)of zX0dK&SY)66l>%;Q$HT#tZ<{Bhb-`WMe7z+ziQeg_WX|EWqwZuk-F6Ip^JEv~#OK$n z&Of6syLejdEuP3#uXY$)Q@OQMCH&*2te)CT)M0!+LukO&^`p@=1stEB_#-plM$TQg zcY1eJTr<{l?u{B)rf8(+UpF2ee5yT3R+n#*g6dD!6;9um24)Q?LJ8Fn(6FBcZ}jow zhEs)_aHuQS@2l*sGA}zWHt!U0lEesoHXiTw+Sf8X6>tuHBJr}iexg64;e+~ag-l91 zY|rjCS*AZ@vBB+dsp^VPWSn$cc}9_%Jz%l<`5EOwNkTB`eE^SyJj{+e9{&6uDnZ~A z%D9hp#9&k1*!F1tdf`3>as2E$d3<3Yn<$uFObQLoyccw((!rC9 zYeaSDtk!>Zm_wn!lsf#f$`*oV?BsMVi}*D-9)_AODWr@>A&1j1%`d`A( zs0rEAdW(%IiXg0?EGB5N*?tcR*z;&JvOLELo7%Tf}W@Ybq7Kz%Rq z6nkUtzzR)UX1h`NaO7jeGrov$#v-IAnB0bRQIUX2`0{Gwog!&=c7_y;QuU!-7!Ma+P9i0)hk)ZK+qvNG=QPW?#5)*a1&e&L2}m$S6=*ltishAM!VI?EpdD zywb>>t45Z2STvPvvBq^@y>_qPLM>lG$1Q)z=~BH$?j={S3{dmK_k9>;)L%{{TUDPa zE45wz^Ur#>q$P>ggIiADPbU12Kd8c;L%!=RG6bg{4s+Kn^8Q?HEtswg1) zcd|>y>)-E_XP#=c8J%U6Mz3hgppg$K7FNBVL*7C3ZLPGG(L78oB3e=SoW~I)9C!J} z=->~^wYhKN1m7HTUJPfDs5-T|sAGBiV0lU==$dyfsppC(Yt4*mlB{^~$I2bqo?c2h z7Uy^>Ozjw)MUGvGG3>2KX)$l~kH`2CgG>>@pDPPgK_P+&UkAC=s8#6Mer(h0wH!Cr zXIs=*Xl|3i;LCGp)sL?*BhihRWcFsRo2k?jVq|mAS;N|TNUNI_&B=Ymnn&JH;jx-z z4rVsW6wwp8XtM~jh7F0H(XJ<4;N+m^U@ZC-cy04xxXx)x7%#ZTVPZMykUT$O^K1=U z`!h2KO}~dT=oL{PR?OA|zsK65?L!@!%(97#Pg)mX9?Ut9yhG&INaWTnLpqPl9JHc6 zi8_z$=O(}O=?J0(|Mf@*mvz!;a%!6_D>=UuUk}!`TVO^1AI3zVf64vdmz|x=oRnXu z9;%rlRn+Q1u=vrEN-3PU+yUK*(JI03^_FnCF=oz0J%ih_OeWAje;9v(zR1jM``$Ba z^JzWMh09%K_V*oG%mQuOdeLX#u-sW9^`r#mcpU;!xQ^1&e4cJZeLghq{aecTN%ZM2 zul|C=+zHK{r@seq7ySZB(zH!28AGO2ovnCCOz;LYBwS~;V8c;x%i0CI* zAIx_N?q-;{IEV-{?MRVi?C?G}Uk?*mh=?@tE?;65a`46D4_8Doa~rRC+Q_+WYG^dy2hDRGv7A*swCIzA1X;dlV0jQk+;#o~)8RkzCo)X2~5 zT9#uaH)daTwGqG_phR;`H%P-NoLRzGtf)bX(n^vzjgcOfm*bN1?o58bbj^z3>%oj4>6w27DAOwo z02PHPSmM5cwh+L3+s1800uZBzH=G>8aRz8_t)s>*Ql841o0)|z?FY^lIyX1f54?!) zSEJ0x?T$+%xe*ljKw>O&8jKsxwqyu`Q8O0@4_=9d+u5X@mOQk)pPc$!aUA`8mYX!) zfOz=0Pe7i``NPIA%L4GKntifOVp6#Ga{b?`&a?aP(PQ{VC=Jft50aYd65m)4CqNYH zFQO5WL}#Y!$2Y6}9Lk`W&49O5W}b+2IEJH(ORn}*XNA?3otDk+EOL&+T*7HBP8kTw z-pKN+-If1XXoE%!lgRl$@Ka4JIr5DokH0Qv$K4*JXSsNCYj>0v*|}_$nkG!{?Wc~{ zK3FA!{bjjS>H%h}{LbUu45)Q~FcN0!6nHgm4Iw#L(FB;$W#-{%?d5cv=Rz6?8jU=fJ2Qjy8M>%cca~=!gFPIR3H1L z_^kb54*k6U;_t?~&xMb3BF{Vy^(UC_Hu+A07ek~*CfaMEeSsqsf%?c{zi*}SM#iYX zuZ>ZStoiJKIJUXr!fGdZyCwhHAaTQ>)Xb!gZ(iZ`7WiuO%OV98e>)4(I^w*-HBT4t znmyx>Mp>FLg03DLCGD4RQf*>-#a6}N@5wOq93=F7qkT2#B7kI>yt>8# zB!@iOF+roqrTO2_M4HX;ay4qd>c+!+N;+#$9|8QI2dF6)F2~T*Q>qQ+6G+Wi5e$0g~XyMHq)YIIKnBwI!+uG~KC!al$ zks=8q28*I7c?28|K?bGtEl0&8EPigeE~7#&S`nNQ0NI>1hYu(TEN7K*g-vpg-lS>I z8Fzw^YL5vOjr0mMOtY8Oeh<%kvWP778^aj(!bsne5r&3^r~X~@7Z?1NVTvI}_q`rP zJs9~!`s?6@pNL_A6Wl%O@06BA6o=rbO~b%EjxmP}hCoqqR-|_ zr>cCM`X2a5baf8vb8~Gz+QCN>E2);k_?SB;s~qb@$rpC%z$rxBnRK(1_y9n#A zCz*VzEuc_Pbo|-eFjz5ZOh{_9V2=KMCPCnapii}m8g-Y^jJ7x}GxVx=?3b1o4E3I< znrH>vhCjpkE(~-a-Iu}K>D8Je3s{jxJwyDN{0Y%Sbtz;XB+MXesZtt|rJAd~C*6Ho z-NxKAMv0X0hy%-#Q}2DgA#u>N7wQZJ(s6j9-wDL+6ONCSH(B4sSRaB$I~?x1wts~) zkGDi;{s#ee3-nW3LK_f-LvX8}d*pL?6#8Gvu5-Z>gI*$sWRITZq+c%QL?tw5aP0X0 z!&BqKR}Q|Obd1fdhZ;duMAoFsV#^N!ekgs-vcabBzi##5iK zL}9GCcTG!`Slc4pn{RVmw`r_p@kTEcU}j&x@??KYs$Je!sot|G#jD!Qfbg0S?KA#k z$9Go1HR*HKnb_xU6BQ|!jRH2zv!;kcbtwYw$ ze$+TSB-*0A3vR_+zu4=qkTM6`5Cgc?Vu4jsKI-$#d0fxCO^MXy@cG1fxjuF4>m z6!S<({3=7jvd@pfmrfbgf@r*_Vwqke#ardA%Vy0VjD~J6DDnd{S1ULdnZ$*wLO+JZ>1-Ix#p1Jr2he#-T!q!N3^$ zUJe2fRJsZJR%AeN9$%uSZZbx`7^72XtZY(SWFi%XQyIEIKN%Qsx)xwbs7wUsL^Q1v zpi=nbP&y8NOudX>ROIJ!0ah(!D0s!ZSeMMS$qlLOCx=LL06gZbb0uRF1WW25G(}^> zp5Y2t`;BSP(%bntPe-X95^n^4`m!1}hsigEO&hgmK6 z&`Ro8_ubx4-3;UYhrLOaB`QTAP{ay-2@C1p*-g?;RoR#ByCwE8gs|GS7V_$ODB`DL z+|;!3FLDB2iIJC!Qjoy5fu2D>A&ko&wOPyem{;z}XHV_JB=i>-zEv)FJRZ-R-56uW z545gHmb)K_2oktDAcsALslbZZr-(B!J9t@+PBoSO@>7S13s?L&iWaLF*x!h7q_;4f zJIbR{d$}b|Lc$xpp5+b?w!*wwz0_dZhAt!v+sW`?JeV4MkczII8dJQwj`eE3>2p4O zpn{rLFnDQ6x3#Qj)$+Oo&EqZwOw?2A(0@ zcAhkIXw+!OGU$IZk1gW1{6=SmEzf|TPGB;b|D zF41;e6fMG{rgD#DzG@F!8KK=N(bAOvGdX;FN>2>FH9!az41IQb-um`tM077kN8F(Z zL^MAVhzx$C2D*U`d}xuVE@^?!1A9MKLNRuHM?q`s2*5%xx&SAilY5#F$Nv_@`IKmp zG(aC_f^4l8ULVp&BB3LNU|LKlzHLn*2a-J+0vd2Mg~+ASk#PR$K4I&OHx`1oN#faz z`*$VoB;y;}k0$bu9nuB~mcwGn1P*+M6cfLGr=ea+f=B+E4^1K=nuXV%Mr;cl_Wbiv z&d6YDw(po{zw6X55sMY#@iKd6lEf!Wn`;7kab*PJD0*o^4d8t3s+9=Hum8DTX3AhA z_06{v%|{N9N{FW#${m&MQoIVOvkV(51eXC#0~LM=5@7u^C6|fz2Jj`kAb(dX_#i27 zGEv_#;O3JaQdc>gh$j(vn7?HF`{f&^J)W!y7D6suwU{C8hfj3S(C<4pMw~-}gYQZ^ zURcu65fH=!M|V~;sq2(F2!jgGl?^q5$N z7RC!Nm?U>bV1RRqKu&_$8-rrXrZm|_MKv;~G$XEkc4xNA^ttN|8m?xZu!mjyVxc!} zeEZo2C;h>CP4_3hH>32dNgKl`5N)ha8C6OTv|X=mWKjUgajgaXYZ3~Q6Y2@f^Wgd? zo|&AZAJdrhG?%A-m_@j-8L$d?81^xCLAXzhm`b}v9Ruf)+QT|IyzF_=5?y z=N&=@r0yCij?ilEV0y>W(28Vxp_K_!2fGHC84)1GYU7z*pkc+#Wbs({D6E*B^RMaB zexKss0$w^QwC2&O)%_t`WD3vA^Z(eIPFu(1Q6;bEh-g1RPhxnF(2%5k#Q-qljI=o>a_qYk z<+tmM$tR|f`10+gfIs{}jaq$k*|Il21CKzP=t4;Ug1OtfC`aWe2brsB_3TbweLorr ztC)1^ju>mN!AVKCR33|V`0*$Cf#=e&n1;ik&?eY{Iib4IR*48kTaZU(rD;c4C5ZZZ zTZacIC&w&1!I3h+DER3s?c8J>|1zm)LEsn<9N=p+l9EWL{x$GL2vjN&Jtmfpg^_WC zznrtLo9+vK9RiSWAjF}fAOzhsdyo&*1-zlqvjD$l2mL_54}2p43AD%9T0<@W) z#UGCezvUT+2p;$x+Au|{RMutiUZ^LR*TQ|M2YOo;G1)^t(#LWOG0^E#41e8Q+dG>KHf{j z(&ex6wRRV1VDqAchNU>gyAzRq;Ye{F)Gx+?^uT9PZ~EyKpFABJVY?1~m7Y`FONk%Z zl~znQs~}Jk5XpLW=Abgc3Ayk`w#meW`fdtUx|CiqyMjK|F{!8{MlrSG0paNqRKi`_CEB$gv3l})i{pa#+EtuMxyrJoV?qH2gx z#Fd_6mkmxV8s*S_MZZD@s%PIo3qaXatLN-fb_CzRW_PQ}Bfs}J!A$m2zZhxTn)->L zsHp1|;{Iv~hBL$T_+mfdw%Pmi$=!fhu_{5$@dp_LS=MV9vSib zdk0r2#aFq9@9e|%yU2B;v;OK{vtfc6D(Q!X*&}JkT=|)Jx5g^YUpA<_0!==F_YvSC zhZ%NyH3ZW}@8L-_3ecQ+0ChqO>wO{tMrX}@Oc_WrLO{O~2hb@^f>c~K-oqS}7Uk$U z#EWGQ0p)gVzS0KVCqjoLM3#cwVLwe_&*K)I;XSmQjIjz3wE*OOr~?$4bIkI>JZk$b z6je`e(bCX7P|~cg2yDo@F8}K%dYBogh?({;m>I$d&Q?;i>DP!Ubtu}faaLPUh{@lo zO~FNJiQi&XH7p@CJ1F{6jB|fsew*aSCe+XbNO1tqd6+?Hd3f?cS<%`nO>aNJawh-x z6`ORk;Roh>l!i&FrC&=GDPUpi7&OdcwROM+!wFRzu`8Ibv4d}^hZxA;2rt>O640n% zlAQm8q+_d$nbvu=Hj4SWs)51*^Itv9hhE6`%CcgQ>a3(q|G$oD*g9eqNpmV|yLAv2 z*)E1Ec4w>YbYUZM2QIkwIRDLS$}*9b$_IQoNjK6{Bo&DWQ>0Ab)aUQY7r{{mM1LH$Oj%c$f3R z|39dgc~fFAC-6?OORoi4qN*Vxgc4=h zH5DTuM7ktaxzc)R>$M8qXJV?G1a+ zB0sjDRYae&;f{b-tR)B$3{U+^S`a#x424s2LzRp>Y+x*nj3w*qM=mb>`u}X+kJLyx z)QW~mNo)*JUdX@{$MIRqy0^FYUwdh6^eq1jx)NmbM&~ayj>5J79RX;+p5!qn4bQ6y z#r#Ju`&w_MKdygy`?s2L;XOu?mmp50&ezvidk6PGkk_7=KhAoF*CD6tJtsIb_IFm=?WH7%Ma=0B*ym!dx2?0>78N{O28Bpwzl(6Glg@L?$@6e1h{vf2(&> zfLS^tEIwlVOG7tY<8a%n{Z3KZF>@TXv7NlIMbljec{@?&Bo-$^DNWqOxjhpUS^H@@ z4~%>te!18=rad+@AYT96it|I^Ex+efi{^ZPFS7B~RG6aW)Ji7rr?f&oG=fO{n3qPZ zJv3x%eRMgpR*zLYI&>SK<=mr69(iSvYK8s$FUc& z6hi1M83a5IC^#T^TAP^6h=5HP4u0wx%(?|A6cNP(kA&fQ=a>_bZ&ju#hYMJ2MZ+Ta z=kQrrME4>eT(IOvyw(;>pJN)zo|}mYD5da8dZsG9 zhsxe`z)PX3Dk)wERe&Df1#Dvq>_(Qe9on_(!M%gOwL&UXyt$D<_+0*s zR0pSzf!inHI`x6m`Um5rWqYpFJ`m-s2()EC7#vw-NwO%%S zqfilAIDIR9t^nG%P|jIDc5=#|-76N`S`|er9~;XV5WDkWLq#b@8BEg1x9W!Sqs_5K zX=`iM*e0y_ic(O_&Eee(Dm;qKW)Q?Qixu)48y9AIwbq4^a70RI@f5HpOXRsEZzH~q zP4Qmj`1q(;u?U!6@IUY$zMOj>=02=i{E(8H&GJKWGG?H5adxAHJ(QUrgcta-;lTtM zRR~k6@>)#hIl~zkk}LO8-6Qg_uG^zszJi&r%8?N$HjaCN5lo3!SHyfQ70=+b# zOxL=>nj&y8X|N(~L?;`##Rn^iN-ptZJQS?c!>8lTGs_+X=dT^5P?n}n+2L#a zUs*jURphV4i|TREJ~Ms1k++gtIk9ajG5$^+t|HIsv>u^5gLUT|p9R>y*ryP_WYu`q zsp}F9+Tpr3HKN1g!RJMZ@nudS7n?38t-ZXAV)`Ri>Pt(nY$Y(+1mgc;$+fvjetR+W zMP_IqI=4Lavlg&LAiN2pvudu*{US2{=Bi6HmI46##PG(ApBA{FhNvPuzzr+Ye(5Bp z_L0@k`;Yg{el>`=KIhtMLE;{o1~K56jI1OovZXti9jb(_;x^TuT@~5;G*9l$MryM11#KG35vk zS8d*DH9pjk%@Q=rEmkhnIX!58nvVm>iBqK#W-5xaN}dn!AJJiCf~9axN;nYqhG=gP zc%}Fgm!~%_zRmj|_PVORyi;Ff7R6!haSbw9Q^TIq73@~(Y=O+?2yufyU?8Il$*X^P z%{^)ywABSH4}GUzm5wTcegzBYo^7LUWglP(O6Mz=DG+FkrqjN6t>xZ*H8HJ;Mbd>l z_-q&4!O&wW=v&8d0^`{Q{SDM<+^xkd{x~@yDC{3F@6tvTq#VWJ2$%LmG6ylERpe^D zxisET>(V3OLq)Qq!iv{wX`|bOLs~iEWOo8xGIWz#$AwT~IUmWw z)peI#IXY*+DdppYxvBrhG!re;d5F?{J^%nW60d_0Bh^mdN)&F7JSW31pqcCsH&8!& zhOZ=6#S6o7;VsAF(zRMX(268A_qxfBd|kM;ZXd1^_X{!Y%^Ms>Ih(>S=7in-eAb@# z^ELrCZ{4$hnY!yL34+;iqoXxkT4DUy1vzvxg?Phb`KE$j_QKC`c#~; zKCpMB@o`1e%Vvw^sZTg56izHBnX~4j{|6UAmAOm|U7#y=nIW@Legy1i7_SJatl08a zk$?1K_l5n*Hy%$Gl81IhM}+Drny|$ilDH)itON+p4vlUR_poRp$>~_kyk@FjDPF@j zICF)Ej7P?xD~sn>@;4fB6bq(qKpxFmLu)QHDAIrE2xXqYr8C=Y(n6K|jhZdlEK$8D zOuxtb+)##;ij4SwWJvpQF5c#X6X^EvXwn1#}Ki0-R-0D zC;$_4aqjQqjG$>p&mOzWK`xa1m|+sJSf4K|<(f3PWta)P%=9SbQwF;u2rq7 zAbx+Q{KKP}X*IO99wIEIE0cnMwgpk`cCS)A#t&-5n9^mOSK{^5kEPU+pwUorG+n8i zEDr*KIbiMJ*C4d5hy53%nNhC^W#}H)y#ZVR)M9ai2J8JDlfO1)6Pwa%Na=l>jPR~D zWN{*vLsbbD4dTsfEGmV3M#B#Le1IOYXL(x2!24ah0wi4l$dMH)e}51&`xQ%(_tfh& z=hfPOrYt*i3XBZ6$8E7dd2HSIomjl@)@s~qgc~P_p1m6jVG|?bj=5*%X#IL67V*O9 zVZsCxlu+AjnRg8nO%no#Y7K9lPK%Xmaq_I2LwBS-nZ{XNYIb$~7-}Zi$F3v0Xk&Za z8?p}5XBw;Fe@wwB7%CY=6C~Kzv%|p9#5S{9OQ9irqHWy`h{?pnMC{x_6Dn8lKCU)o z2^89UU)i2LnMV3tE_qNa)4O&Gf1c}K`=M)9HK?SCq!?=iR%cUcsuf2ijz`^(uDkDNi?s-lBIv@iG01iz zm7=VyiKc`~djzZ<)D!kSKu&JVd23;0IaEf88fYH$&<6rML5|+!-z4p>1w&UPVQ;|k zs@J0zQ=a9#Mg7q;>{{txEtygSrzContainer #1eth0172.16.1.10/24Container #2eth0172.16.1.11/24DockerHost #1Container #3eth0172.16.1.12/24Container #4eth0172.16.1.13/24DockerHost #2(Host)eth0172.16.1.253/24(IPOptional)(Host)eth0172.16.1.254/24(IPOptional)NetworkGateway172.16.1.1/24ContainersAttachedDirectlytoParentInterface.NoBridgeUsed (Docker0)MacvlanBridgeMode &IpvlanL2Mode \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy new file mode 100644 index 0000000..40eed17 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":389,"height":213,"nodeIndex":276,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":6.6999969482421875},"max":{"x":389,"y":212.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":64.0,"y":36.0,"rotation":0.0,"id":216,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-12.0,33.0],[84.0,33.0],[84.0,86.0],[120.0,86.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":190.0,"y":32.0,"rotation":0.0,"id":254,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#f1c232","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-142.0,16.0],[54.0,16.0],[54.0,115.0],[87.0,115.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":133.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":226,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.147567221510933,"y":139.96785409109907,"rotation":0.0,"id":115,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":29,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":116,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":117,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887324033,-1.055138662316466],[1.3318647887324033,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":118,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":119,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":120,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":121,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1 - vlan10

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.0,"y":82.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":4.1999969482421875,"rotation":0.0,"id":187,"width":108.99999999999999,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 - 802.1q trunk

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":282.0,"y":8.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":55.0,"rotation":0.0,"id":210,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-8.0,11.0],[-8.0,34.0],[26.0,34.0],[26.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":12.805718530101615,"y":11.940280333547719,"rotation":0.0,"id":134,"width":59.31028146989837,"height":83.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":73.19999694824219,"rotation":0.0,"id":211,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":52.19999694824219,"rotation":0.0,"id":212,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.386363636363733,"y":108.14285409109937,"rotation":0.0,"id":219,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":139.1475672215109,"y":139.96785409109907,"rotation":0.0,"id":227,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":228,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":43,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":229,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":232,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":232,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":230,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":231,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":232,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":233,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":54,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2 - vlan20

172.16.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":259.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":248,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":265.14756722151094,"y":139.96785409109907,"rotation":0.0,"id":241,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":73,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":242,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":243,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":246,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":244,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":245,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":246,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":247,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3 - vlan30

10.1.1.2/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":31.199996948242188,"rotation":0.0,"id":253,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.49612211422149,"y":17.874999999999943,"rotation":0.0,"id":266,"width":275.00609168449375,"height":15.70000000000006,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":68.50387788577851,"y":43.12500000000006,"rotation":0.0,"id":258,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-31.924999999999997],[197.00221379871527,-31.925000000000153]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.50387788577851,"y":38.55333333333314,"rotation":0.0,"id":262,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":40.7533333333331,"rotation":0.0,"id":261,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":42.88666666666643,"rotation":0.0,"id":260,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":73.50387788577851,"y":43.95333333333309,"rotation":0.0,"id":259,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ffe599","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":248.0,"y":51.19999694824219,"rotation":0.0,"id":207,"width":143.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router (gateway)

vlan10 - 192.168.1.1/24

vlan20 - 172.16.1.1/24

vlan30 - 10.1.1.1/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":88.19999694824219,"rotation":0.0,"id":272,"width":77.99999999999999,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":80}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#e06666","strokeWidth":2,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586821719,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png new file mode 100644 index 0000000000000000000000000000000000000000..a38633cdbc23014364bfc611d650b2a17dc72ae0 GIT binary patch literal 17879 zcmYhg1yEbx7cE?9f#O>wA2N=k^5TU; zo{GGTp7-KW?!7UgA^9LY@*H}Cd+%K1(B{(ASh93{t=X3Dy466wQ~@&Pj=v%}BW_Tv zbkUAB&>_O7chr-9U4T!T_5s~WI_wo`Hxeo-?Y#yLmK@UoPhtsg``T`mL=N9B!-bsW zgx|Q&gpY0y13~m9EY?WzZE_R{@hs0lpRCMaMHVgp79Xn2Ku{Ma|No98>dQ?kYv>Nv zsWm8Z-;KC6HhR6Lr!VeNd|tr%VOY^m zkwVRr&<8Req~^;ok(4~qc8%?}ztG$_^;BLz`tzB2Oc({;nbWga)0iI=_!KOa4M-C_ ztTazF`Hr*|1=0#(;FjKu?@{<00=iad-;Dj0b?Kb28_=j{!D5U#_8Zd^0xL57Wr;kV zW7Yogi>A0O_xXi?B{b%JsyPNb<9T4@0^Ua9m{`f@Ovrp&86MOYtf-^It+zxU^_2T1@9mNroUAc=~sP<&MwscFUeQ~U<|7u8-^w-37_ z?-JEMYVoHK&{lO$cTS!yzsMU2rqz<^>|956zA~~i8KsrX0scsyr~V=kQBHaJzO|cU zTzmPi`1-h;toXlFvEO)Dhttdt&?@q>Kd)2Vquk7t1{Yd_oSwgUmj1whTCDfD33ck9 z8RJU_(H9rX_0l_CkUw=EALt!#nWmXQAB_t;pO30$=Q^sZZaP`|Lj;nj;8ja2!7oV8 z60r=%w|IV$d3b~n<=QD^cSzQ7Vpm-Q+IaizNe1w?1stu&`ijkMd>wcd20ZSXmE!1I zI8Z;Z4{&eE;orfX6?=uD-Zc^jXFe|_NhE{gSt~z1m@cv{#2Ot7Dt%%N{W=MM*IAB6 zO*h~mm)cFI9OyBP>x`Y6Vxo^NBYlu5a$oT%!`QXsCSLEni?ZE5>9IAgThC|y@b=K) zup@H_BIEF_%5Ob&C2gXbHMgR~wd38|>Y>X)pg^&S(#@Qp{`5}_L)TAjPNKc3{R_Im zSTx*!+wnGYZWnBY3_>`pJ=Dj!#FUC~K^{-G#oe3eQeR|JSmIRTf|KTmOp)u~kKwNy ztr$AvQVd!r-YN-dJzN1!UqQ7M4I_mGlQj}eH&TSJxxDcg!oN~+fq?KYq*czb)8De< z@bw6WQp{wZ*NveGzgX|D9yD6qy=^=-BQVoy8O4?}f=3qpAOvaoMw*+Mnf-ufYlTmy zHG6OWpo!J2Z1iIa~O*@BFdJ2=tR+=&$w0&?W~!n&Tou+fz?XbS5x z?)E3!mMk5G5K4sx%>E`#WAZ0sl3d^Q$J@mmV$t>0g4BG4_>bM64d8oB1YJ65SI|}XnodMBgWbaKzxJQI_?a(SC6hSaSC6PjS3Sys<~Kr(di<~axaTe) za%Hw;r*#TBwCP%3Uo~;Q*)Af+HL~`&f5kcw9!*fzA8$9hki&vnbf2gx1HBGmr|LM$ z`@(Q`VlT9|BD@v0Osq%F*-}u*{5BzxKyc2M9V4oaBegp$`47kKyy)DwQ~J0LHDgA< zWb{EidaHNwHT|vMa~t-2Gd1GWO{fiz5JM=PAwnfkh0g9Ip9Y|WhQKM@UhRN&zPk@N8r}r7J%{fLmcR1XAlPz_PNfc zN|&SLZ2SxtKn_ed>cZM?j3(z@Qm?d4)VR4vJw<`8@OM1x#Ov9;B4#0E z4CVXs=_V=k1>gaGm2iPJZer)vq&w7Rk~oroQIV1gDlmHf{dclgvQ{qtxXoV)uLOSo zX z`CSXah0|4bZk|}UxVTK^cZ*IjP{%t|mFVyBDa8>G@=YnA?} zgD-#oF`fg9bI3JQN;mGX!GD)EonafCDf15VB z)D+V|i~tOxT6z3HYWbn_=c%>V)#cXDDJOGk&}$O{_XS^I7lH zZEVp1(PsD0r`Rw_kove#YX1(JL{n_7*`@jA$u>0>>W$>ND>KqKq}Ht`T13M%;lh2>dyc6VNPYTEIH1cr}Y|Ey8a zfu>QWO3t&dD+t;#Tx*{x3V3SpLH znu1t~d;FK-b@fQC*lQb(x=k21XO9pwH19Lwo9W8IV#p? z)@G&1;m!p8%7LJ(odTd%R+6>fYmz}chCuq(_Lj@nv-j4-x4~*4?9!r|4BSn}f|#r} z757FHa_Ywi5}df-E^S<|xZt8dV3W^5h}cR~{!AaF(d@Kv{RC)ZGv(ZLcp z>sU1pq%7n3Sj+l@fW>QXStG|drHMNll$Zv(q8sCI+I7Mt_aIJ2_zUE0RA#$Sksm8* zLS{KD)hPk)JS<899bTtfq+w`e_-!V{Oi_1@LNp>r$i|;qZ}Bt(@>wcUfsK!QZ^9x9 z_e;2Q>_Fo&8N^H-8 zA7?b&6>{I6&U#vRSdcO|)ETV+wGWAZjIbLuHj?RRL+P;Y)3ybw&ihi0u9rr6AKMkd zba9PkWo4O&L+UclrXF?+$x&-~LB=;Ky?qNcSMUjAX|ZzJY{J+s)~5OZUeu}bcM`9>7?7d0=Zy6~Y5~94 zHqIZ;oY`_Syah{NYlV+)xxLbXQR{atN zIkMpwHgL#z4=J50!Dc?&wXv>)7$HMBt7CZu2jv^_Kczn7(1g|>M9{YLw$Z^$q18?$l*f4Wg2Z`&Fwa|Jt$vzDtSl(R}P5tzI;DyAEYm=!kN>vXVM zxLuUebn3b{#i>s;%+;=b+%mbjhkDqg&Ov{*dEWQmfOm%?U3FWGbGg9R6i{sE`9>gklS7l4J#-RNg@sMFv zFyxqk_j_}|yMn6^V6`9&O%fuBY+4vDglCVl03$43_eIrxG5UqsL_NYGr@W}du2sAA zJIPSItk%GGA%bI~tWpaAC_?AG>x01TfDfNEe%T!jTL)_W>F}(oo@^*Zr>gIt#{R1VDuD*$^ zS|^}(ASNZ!>3VV@mM#QKDN4F}YVQpcARb++%x$%xZut*r`?qmc*7{NQu*dPp?ZHdC zGK#U56sz=K8-&)azkSK-oU`JM1VP{p1EX(}lUClsuKt+=C$BH(#vn=zgRmJ9#-%GE7dn}3|Q3kb|bt{!lG9d=icF zdhScP!jO%;3j8_&aZ$v*7%YL#;q=;{czRtFkMoHc2TCv-1?mGp})8NX4wIBz#d+Vuzl-*M#%(@81e)fgR>XUs^{(gVPM6UW*HV=EHlZO`8neF-+F z(hW;?u>OfdAFfaBd=)8hlv6)C$zs8c*(;jvPyJ+{nks%9>~hZ{=w=++HL-^rG8)8P>9tK$gml8CJrcltW;q!$snXiI{NYh0I8LFyWkilaW; z@n(xXY**z11v*RJ#^7q5{uOjWhH>_QU#F&-eDbtO;{!f6^+>2y56M>*gcftib0;aqY@-M!r#9+Q~ykQjU9PcrZfBmixm0dA| zhBYEv-$N4$8B4%5A5C%1tN>`RDQ5z++>P}43Hn@7$|t>C^=!&3$le`FvShp=u8k#7 zN{MeDNG_)7ywzuBI{@ej5#4bW-sm=1J7#Xb2>~6(o2GPgJ9sxse2D6Aj52kP@q9j* z@+i(ceOU6>SH_J~3H_eldKBhBK9+Tm{KgRYTmH-MquX>Ta8?Xf#F{aE;)lhj z5!JBC9m4*o$!R`{J7JY@D{}dsTJ`%eJTtIh&n1BGbQ*d@tGTHdjH%$z>l+@Sj*-d` zymFLDSD)iZNZ@Bf+=#r`t+hilrAJDneHKuurwiYSXq=pOt$0OS#H6qBhAhQK{u_xbl*gJZZGdYFlkn>Ys&Qn( zYVG5UZw}feXdn&wr!XSdi8}R6Tw|RQ&9Ym6-uHE*0$l}4C*OkIBu??oL7hwdL)IoSBB*R8Npahnzhc4b!tgvLS2o2 zTBl>?8SUah1Zwob3#$ik~fqlk!&e$)ydh zHnA*+)`U5eU8uTZPe!Brw&=-=HbvV(@BX#&|0+KYgLt&BlwC_xy0skCzX znZLO9skp03F}sKHN+&)nN#%<2>#Mvd_*H#E+^iZv0DQ~xx=X$91zn~-pRjz7ygbO< z@;e~+QgB-%6w|9zCs>}dzzLO@ROrm{h(z|W}+I28RQh#bHKL>C$FP( zysKRD(zYLRc|*-ixJb>w_BK*rh{@oK(-X?@sUctdRn{kr-XU2h2^J)?hId$t!WyO> zkMc9?-*Rb*8+%?u0eP$@UB)8jSZ2YU-3L|$(|n7;y!wvKFW0i--cL^O>lgj7YbrWb z42H0h71M5^Gv$0#*&=2<>CG;7au&oY_jAvE{5hMchsalPrjTvNNmS+#DMY^ataq+u z3rh$K|HZ)8_jZGcj5@$k@_U8PMk%!8B&=#ZhPQySrAbD#o`r@v!d!yVh6f+hE*Q^d zI*5Hfp8Q*dpT~zCS+a+aeg}pQuvOqft9ANcf;zSFSI{u;Kd4vJZkfm_%vo&75ekrq z8C{%Xw`uX^w@f-Jh6Ney?6W-R=wQxrwrmlkqBft+F-p4~MF(Jw0uUxr|3$#GLfkXd z^SCZBT{q~*+D1`bAPO`LvG0K0Lh;%i+&qAj9I~?fSOA5Dwo+$2xa9|i+5vB8uRlCP zI-J}J#@zBkU2d*+PdctPS7)>Do0@R0rM`#1f@+2vJud)l$+T@PD*v9}e{SPgYKcW- zdM;)b+%>wc?Cyk|eY!m0Gu}DPsXxA(d$QOq;oitTot7=Gi^IO=6@UhT@?39)uPx-1J`Os4e9@z2rmQrg zpVTmE^QR4wVm04w=`l`?oI})yow?Z+nMs(KTZDBIVxrWM(TB-70AfEliWk0edWuNq z_uDOmr36-nH!}F9_7QLpa)<@`J!fhipPE#LgRRNjnI@;9douW>DUQ!fr5uM@a(H4G zK2~B?@qIt5Skh2=qiPEWWOmA@M9yMzDaXgasOsE%6N50@Iz>@nbiA%E{RMOnWo)$e z5~j|PX&ZuHp`C1p5(1^&=o=e`OTUHQB1m@wMWfeb9}O`1bzVGtEy32|Cgd=#b&f{Oy+ z6*O<2DB!v2`F=S2`Res_Pl3Zi|3a=|UUfKXeVj(wImSB)Iui?yL*B?2;5va-VS|iq z939WEAZONQ1po}Iz<7-Jdp0f$4-8xRD7?_AwqIY5<>L@CGj4tkx~4$NWc)%sPt&N~qvk=b27kYF#3R=f3R6 zjhe(b)T6Z)GfZq-;9;tOnuk{Lp&c%8)(+7g^HwInMm-Q+&W52CNV53x6^5E`lg_B7PDr0APkyIFh-025+IhOK zYF&SH_mRT$qg zLSFMAkW+789n3H3{Ef@PhS>F}E5GQ@`t6lu(_iaVfcU`p(Nentf zA)7yK1krNUjpj0rN!h4!U z-re*S;|zDfG~im$Vcird>%{i*a2RwKRao_B z>B|cbi7kId^Ck5$qXpDyd~)snpTS>Wb||4{PrjTCM%T%6?i}}#Zk;XD*SlI~FyJ-w zGo-~ChiAPO89IyC9CnpbE%gC?RBQ`~8rguj5h$5edOsCl;4b(M@?wze*wp(B21On@kQuRuP$T zcaphyj@XJnKaL+G6ptvpT1FCA@qE~5g&_1)^5Knek^z zK+{*rErU0tCTxq|A?-;pFu;j5Ftg^x<7WW`G0L{>LLYFBcqMn;;eh z1^`+c@er_h3gHM<(|+o-T9#*bNSnd+L zQcN&90+Bz!+c@fD0ARjHi=Jb#m)9RKg4+w&iQ!!)kgIhDg1em?gld-MPfY@D{wURG zGO4*N*h%?q^@QU#HX`bu*1$VSGAiRJ-T;6!$o>U^F!hhUR9QqYnry0u2Gv-`SHK|j5pYZb;ubgZ3QGOtNlN&%6+S$qx&Zl25mP znzw_lw_}Ifntm^_qAaj1&60F^f2p$oa{2>i*Hi0=dGq5wn(+25p4ZKS`M@&0gkY9q zeZL!S^N0lhz41uR$Ka(gx1jru8vpG@>qMC;NNm0F+I=2kgMUp$ew;w<(L#Dn$?4kT z-zn~Ex0OZs2JQv7|HhF0i#Rt7Fgh4L*BmS%d^;MRFLUu>`l*)nwkyn|PxnDuY zGpO}ul99p=s+#_DQ*_U0xa_;JpA|~y!=yR5YJeW#L7)!K#S$Ke*a{m*tlh$Kx*4J} zOOjz$Gn?T@4R~cVH&YKYTU$!5FjVGmcU(u@x#WPr`pK=`D(^U?Oi#*VInO7J6-;$| z=M8R^$6tnW=Ke5VdzPGloQtjD|J#ZA)~0~NvxAfmcih(CQCX5K_F|t2_{ibWB^~-G zI4%f_nIrD>g?+q$~yTO{oSoY&%qqirY5sjACfNXvo8zz

b*{Hq>az{aEwl^vuL-ETW!AsvC0U0rmZ$_<6OOxU4rH z6u*cCCrj%CbZzu?LCU+fGt1_K0=ic0aiH8Tv(d}mzpAB)%@%6-6`Lyn zd;=aEyurlO2oEI|CPfQU*p&b-f0^7f+aySv?;0IYmhE8@&azM@s(W+tZ9c5e4!{sX zfiaPRB-x>31b(-@*OX3SfPO={;5>XR!!5^M3})_?`eW3RYLFL4dn&gCRlddq!UZp3 zAO=>+?ALa2%k{;xrD-8Kl++6_#b$F=O!pn7J1$t#B@!$C5)BfdlU5HN(yJrQ%j!dq}rfM(a$o8fv zTp~yzd4j8p6OX6R;`GAwKclI1Fc=V2d+jG|XGUoRj0-i(!+O4aQ6eJQWJVRWoER@>i z1n>F0IMxw9Tw>{kK1i)xicm&1#D5gu{o>fIFIqAt;KD8L+_HrIBbg;wPdL84HblQ)C+~sF>=()-`U*es#QL*>k3l!ZIIv@K;?LQ#Ty`{4Un&{&BHwE!4CHY8s zb9=V6y1JT|m)G4blMWT&;|nz-WC)pfM6;rDh2LXBA-h@)ttSbpP#BK2EA@3v!))k_I4*8zu-(cSeUL5Bkr5o$S)*>_9hh!9tbP63Ei-@-I*%W`U5q{cz|E*9v&Qg5%&dt z8!AqBkj06l=i^H)WY~-BjUhDa&H65JxDC=MS2hEuZ%EK2A?7il=<>wG#%6A#1e`7} zo5c3NGd9MIpFdt|q97+9fkL^>J9E;~(sFV}If_$KQp(F&<9&f+WOeBp)6DcfK(mM& ze>?Fg>6w+b&jHPHL|?21Nq6=$)R_j15nrFPD&Eh{H+?w}3v;%(RKEj$S!Mmq@NT8ky{N_Eq*cDRbAP`cP^8&pSqLb}@=d)^ojf1TGW= zuDwy*%gkJ?v!NjzhLx@$bKNNM;+5*d$YJ{zRg=C4TI5VM%m4OPr0(G0(9qICd@hXL z6;g5pN&P^^55gwB`9sbgRWKHhJBqdJyXfNrVPqfmezre`PZKHj!qWT@RALZRSPem- z5^p>^3k%wi;p4v4Z=Si@6@b5-guorPcC*_oI4o%WW^tQ)ge-JewU zoM=ex0>Ge^j5r+xnPav+yv)pKieCIzX%!U}w>)7m)}-_ukaFHzgi{wn7|G+5d1`x{ zr0MDDe;+en1TZl%d6)d(P0|sYq|2Usd$A8zE|HZaJf;mTq1MY{WzY(4=jWC?_Z)ui z@>{sQ6=vuSQ-CWdD5!~KaGABg4jtowUSD0oe{hF#+VlZ6_-BZIr3cSO`wR8M!;{5P z&L`WkhATD9p3M{ij5184NkCuY_wR=axPKtim(fa;f|ateva@{@{tu8YWZ2ay=;maF z@I8Y{2+_`OGnrC+00CB2FlwY`K?fuLs|<{990C|AWwK^5>2yB&!-s3o^|_v60eW*D zIjtPRgt>3DR2cA-q_psKZU(LJb|M7&gXFocbeWR(9K=jXv^Lt>+AmBjhJYXLEZjwN z+nG?T)i-~9YWJt}VS|-;lcg9Ywm)@XEfNnH|K<+Qy|wV4V^-iM73T;2p2BISVy&?R z+1aH=A{%|~ffA*ZDM)@p;7A)n;Gi}8<&9~TNoV)C((aRbW?*bgQDa%hL7|zz%%BzP zuZ%##mOR&z{*e+*!tvud{eI$Xe0+SIGM?-7tqIz>evMhj2x3da=m*aUR&H)TZ=~T$ zd9w*K#Vqo7GbmxG5D`KfMypV%*cRFuFp1Th#7&c_utafC6s0V@hnSEh6l=z zyt|W)LV62R*F7WzlgwF660sK-1E$zj*x_n>RjMHUIn zT8DC98iDbqIji4HQ0M-xXPE&3r(Lq+XgHPm=VWNyYhSJuNN)-bXyYYA4KX}?A-WZN zb>rEFp@zUGz?aO1V9cg zUQ<9y0QWk|6g3&X#u-3#Q6+3YbxzWQ4Ypf`Hc&YieLT8y*|r}!5T0qtpq1Oc+OD4C ziyrZ5{q-B}`y>Zb62pg))goLh&hEpb72S9zFOnb6KRi8D{r<8C1+?27}e0s3aI$1Q*m6Z`| z$9mVvQ^)P17IbJq=TXIentF%@*UeafK^jUZM zY%xzfxFw$W;#g%ScC@RLJViv{{DLp_=ziV&AZ?_bDw%!_85R^+K&$c+cC`b7;gf^! zdkf+K++yzA!7U(VlTN^zS(Dh!w$apvnxHq?H_9PLvkrmBv6taxYGR@0E&bY$Jxn`^ z=%9sJCCM_P>1ycq+bfLYH{4p6!&4Tkb*@^gP_cSk(^iEz7E+LhakO54^8zbDujJO_*>BO_v$z zj4LH+Ds`DR|4;;TMfm!oSDjZ-&`LOgx+g|WEv+_0b z`B9yb=ef5Ik~F%XVxI9y;y5N!Nll=ggAuL{8xrbj76_0td5R|R>l;2Cfzn=3!d;Tg zH0#m>fYa6$rzGjzU3HJ%hU1Bn!%yO6*k-7#&+0A@tKgsc{ck9_A!uFh7yJE-kH^XH zthZn0>y9|4+em&Hl=vTnhm_+q(-x^$JEaDWqgv>riK>>G`411%0U!7QNTNZop)^vXVh`0e*yCO;tD%bx59ow#0oplpi`6f zY~gW~ldgiAr6oZJSpww3S40G^TrRSUENHG~msvj@=;JJ-b1j`ZSji?Ee zLTl2nAyhWMHZ3iUzksZ{!}?{2zVfqu5>Y~5@~9WF(^_a;b~E&z=QvCjw|5WNejE?; z{%reMvCHi`dto8d?R}82^=I3&WL#s(LB292YEg%0BERP|DVnt?s>gftiWB$s=Nbvd z=SUbyI<#ttj~x{S{u2T!Q2RFLG*^wk1i9**7zqh7h@5Gl>9xo#|6N~n$3bb;6VgFN zUzJ8^$nWgB*Ikyg?flq8)>h{1cvk?-rpolPY!0(GM!SOzXP)Y2)R$YiF3cd026Zgt zC$-2(vYvx-rJwev4#xy9N*bz{jirO3CfZV~CgeytlXU)Ef;V1Vt6J?s=q7 z=djU<1Y;vHrhH(A>JE~{)8;^F*eLe7O7b%gL#p|Ep;tpwO&~;P>{6;M#4_XfS--bT zEYZIdr}5mCRMMM&O}DlQ6Ni^>a7I$R%&KRyH7)Pr_G{OY?5)>8+e3}RjDg&&Hhxi12ACsFKf`f z7oYv-$yT{+T$4%p1=MQOd3No39a3SnDEYVOLS7{rc{wd@iSWE>-p@m$%r#o02ziO% zxSH&+4z!y~N?Q zoNX(k9_6Cxns2M!L>{r^m_I8Tcf zb}|n5Q3iXZAGJYRPf|ojKYn^}?zDp~9{^7qdg++dm&hou72(25PwkVg{v+vvH1|(V zZTY~N<{za>gyBEcYJXDDYhUqd5=9N`TIFpYkmjC}Wv0L1QF^n3MhFlZVwMZM8>jvG zc8j=_#^DgaDeevn1;A16ClmhZUAZw+BJO|7GvJ0_=j9@hPU0u26PbDX zQwosiGyh(|WsVhbh7B7VSM#^2+bq^7=g9m=hABYDPgecGAu$(OENNcD;k)pY6W93} z{o=)u5!DB6WD$7xgnuu7CS28A_MgzhT{@DDctf8{@&8fwUBv4B?w2@UQwv)(U58s+ z>L-e-FMaZbnb5scP*iU^!5_+F|`*or!y+O+GPRrl6W9d9hq!*i@F(^6WPi+`|1oe#XHM#V8H?3VEHJp-9MA2BWM&*oupsHc!G$ zIl7T+C&KU#W><18`npZ12wP^0?we&E%f*<#SDY9E*M>ei&)MOq5yfCnX@Y`f~9Nw$HcbW(91%)zevdoylqjmA@%P2At{~42)X&EiW?+*+-4v4Ycp!@dWvw zDgBqLm`+2=~Fdb&CFawyt!AknMLEA8XK%Aupv%7KBaJv3dUU~F zDnhkLRK0fs$mi3UOTj*Aqy7ON6&X^UKXq(73j#;3Um*(6a7w=Eb&`O@KbAb|FG5l5 z@1^u!`!0@ondd;Z;zY&k>0abQ{OZ~A%;vBnGk7d#l4-^k?+a$ESC=ES;2A|LIw+sG zov|H??43x6y?*(tHMoJ0&o49co4MN#Rz_Y6vX6e z-(5!8HguEgP@N24RWa6d`e#;0fe(FSLq%Bx^b6%W3e2L;B^-)861&F4|)pMSa)fA5d%70C71M6))P z@6i^+GmhDfM`)-_;NQtQtm#tp>BTTUxJM5ND88t#K~=eB4*1M?e{-tjjU*7!-i@fI z1OK@F9K%`!K$E_XgkzH6ncXhYx+T$6AO;}gM<%e&Bg)$WXxf1q8@o*ynQp zYOfb;D0%56T7))UhDk!_V{bZDXuC|Hatfm5`Sk|heaUkQ4+GhwK!Ady$P(q>(9pkb zXk=u>^>o|eHA?VUb+Q?^S-X9b&=9V~`K1fl4;eWigu`zNPn6y5&tIe;*}Ov(ut@TM zQG6T}*(ppat0YdXMG1(TCjEyY5rKi}HS#1o;&8mf4~Y}wxFj4D1pp`?RzP^Q*`u1^ z+7CH=y)dE^(W%v>rJTa3etdjF^$1sc`vS!2@rb?P}6)hsEi*@mn$*rsZ z*JfPQrK6anuKW4(vrA?lGMd2=a|$k6h-LJAAtTMlC75cSiZj&6P7!sI%c0aCFCn&_ zaw0M}v%sR}WI`hFLD>DRdboJN4l-IZz@+>2#LXSmU)?JFXaW!aQqmm<)w~WAnW$&8 zO06QprL)YG)PHgvYRjT^Cx2nrbs!rfMl1Wtp(E@8wGf-nc%bO#NzDYpej0D?x4Vl5 z>m)%{(S?aP5eP~xF_48ah}6gc{lcH2I-?jlkMGVt+md@3Yx}V)kSbG6R&v++l4&6SN}?GZ zRdZTx5P5hf?(j7jkpe_LZf8E*#8~F#!JL+s`~$ggf;}>kXtlZ;G5`Fg%3Njq=b<&l zU>uvr=7(hE?<{hSeL-8xBbhI9>Yytcel^R1t$?rf&cX~r`L^i=o=O^IOom_Dd{qUJ zail2X-MQHDJkJBL`vp2MOTG)`p?pgp>l(&y%MI_{_LSQNCk`w3YSyDK0DjbJ@_kjH z;~sf4NKJJ26B_xmzpX--4KG(+W{l;!bSMkmne469Ugk|SEIl8^SpJSU@$a*kq(bN30OYWo%wjL35S$m2|v-vCEV z9;fQiEDv%U61L=z%FBOS5sN7^W_wf@xm}`e6~Qx(V3xYgj5KlI_KNU67`+1Y1GMbU zJCM?69K(%tleG#o%!G9113xSRR8o@6+FDyr)PDtYxdf^ND7r|>3+m2t;eOlHAbwrs zbw8)tp{`?#N>Hy>Vw#I7(z02B0x^N^}%SwYLlWp@+i|) zatj!bQnjjCdTV?pg>rnxS44WT0OEQfm>^wgO2=?0i6_}Y_~e$TTEI%|SPD1@R>bq< zg2`!ar%7_C%Kl<)qLZQSw{jsf^Jc+~V#S-%V{$ng&!1vV3q-tD?feCew$g(`diU5! zW=HzyUb}&!#!sNpq;x+tBf*c`0eX-_G$KL-Wq=;r!YaE6?I+d zJP4v zYwZ20rc)i-xvV)D@&Z76bEK3vf9%rU&<65}(H%_@)zH<-)#%-2Dn595eqv$TAnBkM z^?WTC_!ofY$wQ{V_+6uP$VBR2BBM`Sg;4ED@K2Etf1mSu{D-HS(;C?LO{??a92!_V z^fEQ&{q6@vX}el^h4NG4kf=6kDcv)sPjqb5d5K=xaT0#lv=c#($wC>ad^E@~Mm@Gg z*#Dobs?9oLE`6UK;Mo!1-gn+^VhULP-@Na-+)QVz4a;9j(o@sY!kuLImJ|A44rgn` zIu*tSH};M!Wk)1otS&LGhX;^ipQFMHRsVIpn(Nv(l(f-B@=!QB7Ef`Ftz2%qq+RF( z?YerZIHz%hxh0M{&AUy=+ar(f#0%E`)axho@(%&|?>IU;FY$xcE`Odt=|Q8V#;qCF^rc~|!; z$03QC1fa!sN2;rnk3x_e@|#I(4czUYba3DZ(ZWR-w~yzV%!jnS zI=|~Z2r@9^K6cOV9SQpw{wF37N%lAT=kQO~$#=au6eB#p?KtpWTcCmd!iM34ElUM` zT%NWTfA6=o639Q!#xt%X%4G|l@4IemhMEa^QItX%L_qY$Ma<64;qt#U2v=WU9$EjA z9y*uPxeXDO#I3#AAZpSPWa_)nnPJ12t%$=BrW6EHBz%c#iB`PC#H;UpUmx|IqEa}R z7rrqO^$_=QQo2H5f_61(&4qg)TW+IrnFGPhZ{~x4OK4gEcN(vBVXo1T=ck8fCUp6g%z?@m#P*^>KMXZd7=E>JyreSU^X zm&dee)0)ofFnxNm&F9I#?R$;Wa-}&AZ>SGmSY7Xaa?bw=^DkUntS0V%@Z1ESqs_;8 zB%Kt!IXVxTp73@|usNa7{M#zezDcJxbX|GuADdvInV)}7{OoD<@}Bi=_79OW>$m?g z`@eA2+2Y#lhUZV79p>-|Dl~EXVb}it@|S0quN(C$Xw0z|zW3|pzC7=e*S+%Ht15O} z@PGbzli_EFo6LXr8F5J6UioSE_0JD~Z_+CHU^$2X`F*{(@yrtv}nS5&!vBR_42NAzV-A%y8Q(dUenH`-^_q-7HNxz5MeY)8FOa zr*FOXrR@Ixx_i}SR>BV~r~Y)K1J1J{=R9QKWpYm{_9*M9=UPf z#iNJjFFzo6UDuJ@{{7O=r)w`>J9m0ZzP{XlyB||7|J(VuaDhQ-V3hUo-W%6agW2V_ zWJdk46ezQe-oO8ud;g7vb(?Rxn7;qL{e#Wx+}mI`=qZ5}=ncW#H{L8@?Uj_5D>q|nKA5%jk{U=wX7;6)2FF{BBqTqj@vwYd~H*~w%fp*k;|sH>+@1|w*I$?a-V1P zd+48%X}y>+<)EMxODP*S$b{vqQy*sV>MiBYSk->?!91O=^h+_kcTZn8ykpo_c_!IT zX|G7RN!Ox^y1HxWv*+ip+-container1 -vlan10192.168.1.2/24eth0 -802.1qtrunkNetworkRouter (gateway)vlan10 -192.168.1.1/24vlan20172.16.1.1/24vlan3010.1.1.1/16eth0.10eth0.20container2 -vlan20172.16.1.2/24container3 -vlan3010.1.1.2/16eth0.30DockerHost \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy new file mode 100644 index 0000000..4d9f276 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":566,"height":581,"nodeIndex":500,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":-3,"y":-1.0100878848684474},"max":{"x":566,"y":581}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":-5.0,"y":-1.0100878848684474,"rotation":0.0,"id":499,"width":569.0,"height":582.0100878848684,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":103,"lockAspectRatio":false,"lockShape":false,"children":[{"x":374.0,"y":44.510087884868476,"rotation":0.0,"id":497,"width":145.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network & other

Docker Hosts

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.40277777777783,"y":108.18042331083174,"rotation":0.0,"id":492,"width":121.19444444444446,"height":256.03113588084784,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-126.13675213675185,"y":31.971494223140525,"rotation":180.0,"id":453,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.4915197649562,-156.36606993796556],[-121.49151976495622,-99.52846483047983],[-229.68596420939843,-99.52846483047591],[-229.68596420939843,-34.22088765589871]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.82598824786317,"y":137.23816896148608,"rotation":180.0,"id":454,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.05455395299924,191.93174068122784],[291.05455395299924,106.06051735724502],[186.27677617521402,106.06051735724502],[186.27677617521402,69.78655839914467]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":332.0100878848684,"rotation":0.0,"id":490,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":97,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":9.5,"rotation":0.0,"id":365,"width":141.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Parent: eth0.30

VLAN: 30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":342,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":332.0100878848684,"rotation":0.0,"id":489,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":92,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":10.5,"rotation":0.0,"id":367,"width":138.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.10

VLAN ID: 10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":340,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":91,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.40277777777794,"y":126.43727235088903,"rotation":0.0,"id":486,"width":121.19444444444446,"height":250.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":88,"lockAspectRatio":false,"lockShape":false,"children":[{"x":236.18596420940128,"y":158.89044937932732,"rotation":0.0,"id":449,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.49151976495682,-152.05853787273531],[-121.49151976495682,-81.64750068755309],[-229.68596420940125,-81.64750068755139],[-229.68596420940125,-33.27817949077674]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-179.77677617521388,"y":56.523633779319084,"rotation":0.0,"id":450,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.0545539529992,186.6444547140887],[291.0545539529992,117.79470574474337],[186.276776175214,117.79470574474337],[186.276776175214,67.8640963321146]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":447.0,"y":150.01008788486848,"rotation":0.0,"id":472,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":87,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":473,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":474,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":475,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":101.71008483311067,"rotation":0.0,"id":477,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.30.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":350.51767083236393,"y":87.47159983339776,"rotation":0.0,"id":478,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#cc0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":94.0,"y":155.01008788486848,"rotation":0.0,"id":463,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":78,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":464,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":465,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":466,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":80.0,"y":109.71008483311067,"rotation":0.0,"id":468,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.10.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.51767083236396,"y":95.47159983339776,"rotation":0.0,"id":469,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":70,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#38761d","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":341.0,"y":40.010087884868476,"rotation":0.0,"id":460,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":417,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":418,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":419,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":198.51767083236396,"y":41.471599833397754,"rotation":0.0,"id":459,"width":175.20345848455912,"height":79.73848499971291,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":17.482329167636067,"y":14.23848499971291,"rotation":0.0,"id":458,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.20.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":330,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ff9900","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":279.0,"y":129.01008788486848,"rotation":0.0,"id":440,"width":5.0,"height":227.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#ff9900","fillColor":"#ff9900","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.000000000000057,-25.08952732449731],[4.000000000000114,176.01117206537933]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":56.0,"y":503.0913886978766,"rotation":0.0,"id":386,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Frontend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":420.0100878848684,"rotation":0.0,"id":381,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":41,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":382,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":383,"width":98.00597014925374,"height":44.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.10.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":384,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":385,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":382.0,"y":420.0100878848684,"rotation":0.0,"id":376,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":31,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":377,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":378,"width":98.00597014925374,"height":44.0,"uid":null,"order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.30.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":379,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":380,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":214.0,"y":503.0100878848685,"rotation":0.0,"id":374,"width":135.0,"height":20.162601626016258,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Backend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":376.0,"y":502.0100878848684,"rotation":0.0,"id":373,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Credit Cards

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":627.0,"y":99.94304076572786,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":25,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":363,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":342,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-183.0,310.0670471191406],[-183.0,292.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":410.0100878848684,"rotation":0.0,"id":363,"width":144.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":341.5100878848684,"rotation":0.0,"id":366,"width":132.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.20

VLAN ID: 20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":297.0,"y":89.94304076572786,"rotation":0.0,"id":356,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":343,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.0,320.0670471191406],[-13.0,302.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":222.0,"y":420.0100878848684,"rotation":0.0,"id":348,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":349,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":350,"width":98.00597014925374,"height":44.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":351,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":352,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":410.0100878848684,"rotation":0.0,"id":353,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":332.0100878848684,"rotation":0.0,"id":343,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":203.0,"y":307.5100878848684,"rotation":0.0,"id":333,"width":160.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 Interface

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":303.0,"y":240.51008788486845,"rotation":0.0,"id":323,"width":261.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

802.1Q Trunk - can be a single Ethernet link or Multiple Bonded Ethernet links

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.0,"y":291.0100878848684,"rotation":0.0,"id":290,"width":497.0,"height":80.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":543.5100878848684,"rotation":0.0,"id":282,"width":569.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host: Frontend, Backend & Credit Card App Tiers are Isolated but can still communicate inside parent interface or any other Docker hosts using the VLAN ID

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-33.0,"y":79.94304076572786,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":345,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":340,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[157.0,330.0670471191406],[157.0,312.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":410.0100878848684,"rotation":0.0,"id":345,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":323.0100878848684,"rotation":0.0,"id":276,"width":531.0,"height":259.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":19.609892022503004,"y":20.27621073737908,"rotation":355.62347411485274,"id":246,"width":540.0106597126834,"height":225.00000000000003,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":99.94304076572786,"rotation":0.0,"id":394,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.5670471191406],[261.0,108.05111187584177]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.0,"y":90.94304076572786,"rotation":0.0,"id":481,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.56704711914062],[261.0,108.05111187584174]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":104}],"shapeStyles":{},"lineStyles":{"global":{"fill":"#999999","stroke":"#38761d","strokeWidth":3,"dashStyle":"1.0,1.0","orthoMode":2}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"14px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117295143,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png new file mode 100644 index 0000000000000000000000000000000000000000..32d95f600e1d0f028e5a354584d7b3eac1639e35 GIT binary patch literal 38837 zcmV(?K-a&CP)`~Uy{|NsBV<^9ae%>V!XU&~tip+HNj^66}A$mRQ)nVITrYtHNc(eD3Esqa*> z`kbAd%gf76to6~-(lK|cWX@pB=>GHa@o3Xzg_^4Ws!jc*N5S0jld8K^uJzT`)$ek1 zjEs!W&(7fC;Fgw^`k+76^8b5_oPvUbLqS5dw6u?pkKFqIl9H1A{QJh>@4UUdh>3~o zZf_bI8wCUeRju;(_xAs-RR8+*@o{lbulell>t$t)3OtT%C;^muqGbEMhb*uuoa zthc;2H8U@1m?bADOrYYNL_*Bw_Ge{V*N8X=7YhCG-9?x^NsPS9$HfQ|DjO?231S5M zlL5N}0aL3>H+?Qtsp(QsPhQMaGY|=~Zwi2)wv~;4X@!Z;naVZ8!1Fgf;UAc;zXuVure|>tOWE~Z8pARn$o3h1ObAbNj!%u^* z_5J^CTT#Z>-^Nl<{-!W<+HSAO-u~F8IZIryPE6BPOvND_pf4@y_4@7P*??SStj416 zXk>tnp?7p^5>${_Fg7T04AS7|-0SlB;o#|%K{jY`-gbA9fN4Oe^_8!W{kmp&I5V+7}GvKNhH!eJfrZ;)8=# zpwqakn3Tfhgs;~cM<7#nn}SJ8E;MGj>e#%;Zg8JfSz4 zvp_CT_#;99000DZQchF9>dx@GGiCe$0F;4AL_t(|USeQi8bn|eFfcF!*t2>a$5|-4 zqi+-{DqATiEeu5j0zpGVL+6shegeP$D|}pn2ahA~{O#-}nH(wBHO4^h(YZg#^A+KN z|9(9^qXjyfdTZ6k`BKNgy1XfuI#}PWcKuvVT6W&stlrk;s=3{#zPlN!uhga2huH31 zr`8S1DKDn%eDlluV%M`h{dz*^`D*Y05VF|C*xY@XF0?m}R`!QiLc3?IW1l)P+Mkbp zJBq5g|83EGj)%%QDk1j+RInXOmzz<@tvi4Bp-w>P4brX&x2%xY?$pi^;{7l%Hg?BD zJ55Fi5fg+k(=;FYt{WK}yeAHAS9uWfbzKxoYd3T=I6sC4b7@Z&KojH&aBl%8i9-}- z?#F>Rt(WBXR7!D<-vZ}KCSI)MO^Bvvo}kYOP2n|+wT0-Tfup(p2GARf>`j9s%Aww&IJlPEu;7F=CZlaivP(gV@h;xvwx6ru^ zBi^&_U_fL%zq?{Zo@g+g&zw9$)^C)i6B$@iaQqn!=86J%h^FnBcY0rpJJ~^##7rU? z7Lv=t9CnGVe@zLhxET65jbUYDjyRLi6sJMZk+@VyPOp6Q@%}}Vp)&zXOA9BNYAhHk zPJdsifqsfY+%N|TiD4C?WD`w0`Wlr`!dH43t^GJfZmHyz6Tx(urgNIZIt%KOUYC`PTk=3+Npo2+fSsJMp3%#W~E`ov*Wx}10c*&ublvv zD(EoR5fj}jA^9-=5MTTQ=@$bRPJd^w>3lD^m1R$=xB^95O1JfLE$*m=hd7#awnFfI zD#V+HQuQK@#*E(m9aYYO(j{^!UfN2$gQhf?l2RVc(U;ow-jcc=gjz}}MQNDWSs4iZ z4QaYAlx(tTHvtlGHaP&VI~l3qsOa7lf(Ee)RZ5I*lP$iT6L-Wka{_sTCNl%-ybxkS z{t@#Gc&aLK(%N8{yDNnZLN0Vz8e5P4MVCgHiRyGxA)0AEbR!!|!|8N_D?jwJu5Tw3 zv9+~D;>yZq8tz6Ry2T}RC@Mi5{|t`x9rOAf6MZ*DzETR8mK$;(g!pI|I=C40hySKR ze=5<`MRq%#)fm!_O{IO!P!_lD_6gA)x8?gNGkhtFBlKPW4et}=M9$5q~;`woA^t;vy%i+#cf9f8d^Z zV~m=iGPKnzl@IPxpFdq)BZwWkg2s*;a^6+|hOh!Q3PzYLXneSv>ksi2fn>g0Y58;cav6&vtV5QuTJq=C zm1`|!Ie<(`5OwqQ=`#o6@>%<_hB!pK!*EQz0`XHN1fd_*ht{k7BmmmSpz(ib^o7go zvDk1~Mz#4W4|l7#>`DgLB;>vop=0fFoTu5Qd+vVz2o=M3%K1b(Ft6j^Pl02_e zl{{XD%7=@n;Xj!N;e0IiK{D(!jjy)Rf4VBDuw4Ei7W+k#G9GTKLCEPlP#(xND$UOIFwfcCXm6$eA6lTk6? zgGW%3QCeQ%P;l6+V8SQoKg7*AoY$P$a9HtgvB@6pmUVF)iW8rrDph5@wFx{db^Oj>2+;FE)9P6>}*CMg!%4=uCb!zbdotnVpiV-AAKL4T%m(quY4lYl}fqj z2MGl5-=OqBRY*~y>1&w@%HY6K^{g9{oQ;E)2eSb|VAO+d9(K znF`IM3iN7;?&#}(NjHAfHg&sFm`Ne$)6im&zB&c~;R)vU7ZJP{zPm)O(CJ(%G|@`_ zYbg-lD8(BKw=a@_!JTqz%I8m>5Tr`O>har@O6%@vI;zdK2%d;FL}KuoSH)^6Q{Cwz zL-j6;_Y`3)_Q3L3mo>wUjwvgG(9OdT(niiOZK0W{lV05nlJh~)DN5H%*s^Si;B15< zb9*}hfEJ^Zx!~<505ZJ{Z|IME+o*|tSZNK!C6T^fkj{e&tM03~0G+r(C@$o(xb?gN z9;Wc97`{T7&#BxS%6_vYVn(y4fATDF0B?+~=n+AAl-Crqe~{ z%h1errt?WDcztsr+A$OTr_5A5ajcMwusFR&?*O2!=%Ym@`UoH#U-HaG6TPjIaZHGa z6;kMYSdgCRjpDMY0pcm{q!q$N1cx$CSu|xRGV#0YBu`L>?AU16ZaLOnhAI#o7)=aj z*ny!Q*IH?{Ckx#8?cCfif43i(22Da1Q|Z9$bb3Bcw|wt+R8#?q{fS=7nJA%xwpdbZ}wzdF4J{#GU!eEC=lwfh8mJTOLdHt~K- zHWP0m{A=mbCxP~KAzShbll6&=hXSgXxtf`{H@Xc->AY(IzQ1oT#&b)((IJ3&iN;%? zd_HdHHy8Hd(uFeyA;%AoI%c{3ETEH72v_g-x*g{95SNuZ(jpk72qal`BHVKY+3UFC zr=8n{(Y2Av$e)UC@rkYcOL?J z*((j{Z@c{L^uaI)LAwnP0#sFp($DS>gN=~hjOmRhtX|cKIJgraL`@Jl%=Ew8A0gSJ zJI``ViTrjG(m&)|9KBg^^eu5Nd%Q8%*3er_`9Dg}J4VL69b2yeW96zs$Wj6Te3%db zR1_cn;f{QF5&$w9AqcqfTZ0GT&S1q<2m*p6Nw%u3d0sbLhVFSDfFmjMRE=Ip+6>1R zz3XMs2vEqFFoR8q#X}P6=_4#hK$;2JLx`_uhawioSFZnOgZI~uPEM^6v(71TRqp91 z-rE$frUB#MUv4hET3Mnv76RfL#iJ>@LjC^NCHhK$gH1yrl+|t@|I4+5aA_AI^O7Wq zkH7@#9Y{bGIx+WKVj4^YyiX4JTHfI>2zlqr5eSz-hubDJ7%MA|1fNoMidEZm1SRJ{ zPF$Pa`J=?;#``XQbuY2iPdKk6#Wn2vi(hCB^qlRbrC)8vCn4bMcr%@yprvU+qASE~ z8*GYZTlwzIJv$I{XH7!rN1g>7OV|ZZ8c;#-P`QDGryUAHM;aA8E==z?&jD6~X(}R2 zCLrJvCfS$FI5#Eio!x*5IU$8mR*P3xhg;C+P90&vj#+gY9?r{q?#g&vS;z?W)iMR!cGlMMK%z#b1VvDG{q_PqdDP0Ka#v|GIS{o}hO^0wJtA*LL?QxW_ssJ;I+wf z`Air1{-P!F#py`Xm=^(ZM(w!#Z^5U_#LgA==@nyFINs3sqCuwj7k+88Xw)l+k89MS zyhb6ahh8S9z_G}kt6x3f=>ve+ck>4!AX!ox2KU7d9;mP=vT~9uJS1sI@{4NgG6;Uz z>S{vAoM+B#B0voGfpBC14_dhrCSH6Od)fk0SHimvtF0&bZKx`|K`eq%SA1l* zm(nS;S@qBn;CQ4T;X%K{3h}DF(zGD+4gyA)A5}-JEG?d~>li9VJ`{5Nt!d;KdirW4FckZTRs>=Hm0@mtB#lrUBEul8{DW9ntGP zDs{q|a@M0l4vtBy0HC@D{ zf$-UEBUHqIbs(hnf{5Civf7${gufuC%paVxAtKV+2qejwkeYh=$D~fo2nZ}%tRBZD z559WEmwai?b<<&b;c)bE5xcb-fS|+S9(!R;s6)+xq}$L!yO49rMH~)!+U_{LHEMUK zFZLuIo=r{Tmcu@9Dd-4Q#I1Ix-ti;enAp{=X}9`$KI8J>s|OI22!-ir{3swvqW@C_ zI!My^H9-hN2=R|#@k<8Z69gl?k_6_+#dxrUv|f?+k;Ky3**J^e6tYYoCtP=)=hi$x&zZ z?mcCw=lh#6tNmuuI-)rX(4F3s8vU#~wVJ=kKl$oYc<9>hqYo3g#R{#3B!yA%_yK{H zqWP#%$W-9C1me}azw|ig^tcu2Pj)wSn_%|>06TpkQFW>>q2tm$lTl~%MCA7XCjJ4) zL=^DnQYP38;7vh)>tZB<~al}qRiJy>Jv z>X@LX&d+81F;87JQ4v4~Gj(-r$`cJIHDvshvoYzBd1O6XI$GoBJ$16wG>zT_O@=9t z#}W_oHD0)PyK}!mE9J)%ov0vEnS$1+B#spjwkC^*js(Xl@YjNqRa)Fel0sWaAXd7P z40=W3BM=ZwZV6$$_&gp)0*V3zA#|n0V<od@6h(0fX;{e?9Ujm;lYfGUv#SqC z01wv$6H?sx+}zqClDWC@agKtAz9kq{rA3Hv=OEd)PT|O076j&84h02{U-(-^%n1ZR zm2#xatavm4#trFC3{TMK2fW=6;`QE`Cuh00E)<5-q|ZZe+~E&#S8>6$rMq|kbzxy} zup=jfgM$kT|9W>~ZJZSEry=et6L*1f;lN)z>f|rA&c8`PJ21clz&0 zi~7!h(V&_3(s3h>l?P3&1f?;K1vMtQuGLFxHW&cqFjn zRAro|D=9|4aalIn;mCn^MG=fpf*JRCg+C^E~a7 z`LTNlOK*_Hg^rGerSVd{^7;R@2@J~JHS%!XmsQ3rO|Cdaejp$!MU5D)f3L(^2uV_? z@`mqpQt)lNH+i=@SqHvv^+c7%vw)Vzr@nqs8}}p{`Ay2wGP%)a3Re{%MMuJhQGBZ$%XcR;LO^~XMUw}0$QDtU2-j%mR-pT=zEm)+{O+UA`wuVhM zO@q0Gj=`ll@w^1WKAoB2_~KoJ+}hlnFqfjqcXiby_A2)*9|(v>FB6Dlc$8dUl2DmZ zN1fsO^E3ba@83N8=Gq_U=kE{e+A7Tq-0#k$k!h!;(5+_^{8Qg+x6E+~X-JnZ>e`Gn zgj)Jnsl-Uat+*M$mIEcFZCj(!4eMFJjmm1noG+UBxqr$y+UL7D>gKI=qkX%}aLDPq z;FykI`K{z=OLQh2pIiLfpdi>Th;qK;?p*mYYw$q zml=a*B8H;hgf6@X{ioSj2syquaXxz~?@p{Qu1_q8(43z;!a^{XY`gy1|D7M6RhpSxR|-OH(F~>)#l>-M0$sj^yG!FVO_SmhM3U$^wYUThFQO{tCXcV*9hA&ZiBKk~ zOyPY2fuK!*Rnc}pANArFvop5thcmP9V1(J(`}Swu4G-s4d2_`|gsV^Zy?2sU4Jii= z7Vib|8S$~;^5hQX{*z{6+%b#AJsRv-Sf8UmtT|o5rd1Sf4FNs4c))6tWjqvS0N+ct>0r z*A_a?uaAQqZb*)&M~7(ZA9Zv*BKKcCv{Fgsdjdik1wfD?g2knklzDd8D66jveKr%S z)5?sd+2QNm*JsNF$#NH3b^lrKGmmHHvl&ml%s4!YqXs;N~z zM-}1GA&_bpvbdf^8b|`Lj_=^I?T_IHXy@W!$NIh}7%@_!(2?&Ah^z<@{K(wDZtL!T z=oucKHJe$6q8QeU)Mfbo%rH2L5DbAEf2#0Y&%^Fs>mO$U?7#ErRk2aVEhMkR=lw_j z@to3RysV8{?C4m61?M7e_w8(f#X;F#b*k5r zKk|(3&__>C9qAUC0D_u;}crW0v1pNWW21LB?uw@ zLOah~h}}@za6L}Yhf5uJ^}ndK=>}^TF2J?fiU5J!V8_zFcTlC6K&NGnno26UYGQ(s zTrx~Ez1{O>2+G!FmZ#r~=TfsX-QD-eoUg#~JV!$)+G4~%!hQT9c+iI-Hs-3JVPd`e zE7So+9KMJ*8ZeKn-@11R5OOx6zR`-3usmvqdFoQ4iy1oMQxEvGWMmnDCW0$1wDWA5l*{xgACOWJO=7HbH((6Jx|5lVA@u}yxzR8lfqd9t_Y4L|Jt=h*`z z|L{O({h*<+z}S=XKOsa)=HtsNn)@Ok@j1y+-h-CpQSSED-(fk4`qZEvLEN78UhlZe z6&S?rh&LFDR%*m>O}GQ-WNJS^?gIFZ6-No+k(Vr(uVdLo56+`onEC)$r>x9AlMpEkES*F*`bpeM zpv>xptk5mNUhLZkz7&wHflFN6XWZF{w3sVW?o-Cb*yyHH-Ln^ z(9B3PivD2sO-Mjw2T>xJ1kOLpJ}riKxV?K8>?f$<#;3SphG1Aw3ly=@11AYD^fY^L z5)C;i+KbZg?fBZ=Vs3<7S)JY<(2tJ!mSa=KfiY(b^&iXI4WFc&Rin?hF5bq|QQ_y- z4Qqoxb1ndOFUr#f-fDds3fXL3LG_OyQ3wbG;|s{B)1XwKNDazxF#M<*0tuDHSf7nQ z%EYu`^RogxAYfLkcI#~b(a98gkUoXni>MLK?@sT2`9&ub3}O@?On&)iZn9xD0I)&q zD~^ZFch3}qBvG^dPY^2DFzm_ItVjn?8#8)8b)@Ik7XSeyeDJuszi0m+d72-Y&0=OFGggn7F=%-7upZOcV~tO>N%;`n}m zCN7WEYr8DaFQe)wox)VUSRt3ZuOg9HW>O#<|Z>64|W9s#{lRZS{S6jD}`npzG zfU5`qm^iZqbgpLTJ`T#*`t}!R5bbJ59#X6MtanDd-q?wfQ+RF#{Tld z8(SCvP7{jan7QTYkN^8?=4mH@!ML>qkXZqca+x4Jf}>yLXhB}6M76ii763v= z`asGNjnza3#!~)KgLBmHtZ-;OOi~fr)K%>W7yI=p2ILEY0FOig5_ti0^c{BvMvYz z-ni@LM!yw6$b2{U_zy3>3ny7Xx}vlt47e|!zxmd5qc3Oyysa1z^EIS~Sjq$PYaSd{ z#s>xG>R_%aovRORQ@x{SOl(>hoWc*~g}RGBv_RMjX&-VZKq#fuTvJh_QL<$2g!w*V zH;}z8T(ewm=VQalF#|HOAD6);>Z;kAx~c$|LAos@dHcDi(V>vO2gE!h z0bZ*J13KergegxoP%7$rBCCOzUSHWZmaIo)O3ifGOLNTt%I=QEr2X%+$8Cu77!9tsdbSD&jt*Xmd@ zf47F{5{v3cOchLwc)@!KsnT8VF1Kz$8VNisAdBabf|N*5W?XeDKw6jR1Q9>rCQpNb z#*}}^-yb<)+ZejpW&lX3-vY+j2{NB2T0vSfO~z#r0|KOnPn&OyIH%CAa;kYF3}9jt zZ1sNa-7Ns*Z$+#xqZL!CP8^DUR~hv=I5bTDnc4R4d8Rn^62gL<&!QL)6gMUL@w>tY z@o(=)p`(f&?Diuu$K?UxN;M?W#0)uW!HzDmOu77ko^0f-t(iu{*p}H@#CCLK=e7yxhF5qCH}(2M{H`)Y?aFd!iA&^t0d#Q>YefHY69)JCjz z7?8-=PSoQJUD|R~7=8v|%U3&zEXejUcD1CHvS7(#D+K$jw;Wif4kS5|D*=%e*a--T zYx`}TX_W3ksN?}an-jI@G^bR7U3W%nW-&qm$e2}lkP5)q7$8}>D(Jfnn6_}Rm1Kn+ zky8w8@ilflaA!c z{^0|d&;A#-xEo)?#M$3yX+FY3FwvODDytz(O}&UmWl4?;HG_78C4#HW>e82jMx`6< zR#4#02uRkNTarfacJ+$E81e5QHQrhUWZdB8&S{+9U85Tr7$em z=LNxOFHp*XYHwp0WAle+$!sy~(sBOoA}KjKsw^Ig((o;wAZbW`!ccv&G93s=e?w1V z3e|{v%uhRgH@#rj+WJ_JmL%k}e7@6c8Ddv((BNsOJ>+^G+H70#Zfheg+usF8U4P!H z3uKnJ2ikT*BUE}Lg;+=WadUbq)W=)L3RcSN@6wQd^wS|vIB;m8UUm#3E{n_S1{vB& zwBV9BmKEt#3Xm!m;HI1`fu7=}d&l4f>ClTtJbdR2N?vWa4PCV$^OjI~Ndi*d^ag`N z39J_oDw2+Lf~;T$enPrKD7h)yg8W@=`b9vb5~&)A@+w-gO0Nr@(i%;sFs}2CvkeudabsMCH#XP%yghe~sF-GH!RmL}1)Vf#G6SPpV9x|ghDo??fLMwC{ zbLyv)e$Eug%WIiA*r;Qnd7kGWIJ7`&QOv>Jv$CAk?X;=?51COnRRh3B9OuT;YPAvB zJ-gK%NUJf25M2E&AG-bjdN3vGaaMukja6@?icFh4Q_D#imHBzs=0Ibu>e`svQOo?t zfH#jDcu518`!Ug6M=QONIYiN?`QXJ3;#>9FexQ=D@JU`i7TkMI7Vc0WnKwUgsB~g) zqTotEaEyZUa8be(l)97Y(wn?G=Bvusevj;3%PCf)jCdkHZm$OGTW?H9ngakj8snpZ zKVd*Jj@W7-ZrXX9HWJZG+nmpM=0&gcBl6qPDjoTY3{xL2ep8O4HwpkUa(!HVMa1u%gcl%hzO;S%?Ao*L>%-K$eM%gPI%*9O zSj$T}0j{XCnzQ;{ElGO-Y#EqU!(UE^)SgRk{=n5%kNPv1`r)2hFVTPB)J>BnLJuhzaL$2kZdo0~7VZYSzk`sCe$Z}k*@f352DqIK% zu~WICG$&i)F`mt`WPYtHGacK6(!xXHlcewx!tB$S<22wR5pFqx`Hz90x1 zwSEL^Yfx5iG}WD^jrI6n=;{PX6hXvaF@ED_fOcIOo16T7f7C3~vTf*}z}xkT-}1(q zk-(OABe30YTH2(xKQkXQ{Vq8_dG0TS`SE{JvWL=9QD9+2Qo02t>nA>uMlE#+?t@en zVm@5269`CC83n;I``N>vdV7bZ&Qb%KhMP|}l2((o2f!E2&W)rY<48AoE*c!4<7&;{ z0+2Cy)5~`dAb{REJJd)e9LurEPhM+XBcW+$53lCbH+}!;w?)%=q_B{UUE(Rp{<;@#kGqUiI!4(Mk(`bjDILi<5 zbzwdf?*j1^hs+mcy9$C-^JV)k-+G@V3qT<^eee8sA$7YR4p4Os6_LL zRMQFB>HdrAV3bWNeSb*z^t*Y3Kj}!0(D4k=NA1<7W-A5uevd~8`=*)}Hdes@kqu$2yNf}N3=qr!0(N7@u!>8}&k zP$vQ;xw-4s4}@__pt=X_S`DEMM-Ko#?fugr;ZAi1AJ?w*C7M^%oX_BUz7?f1T34;T zg;O`?*^qWQ_=p09dbaaRi*xjzaUkLNOH9#!_^;d0YpBY7iI&15S(_j`q)``H6MY*5 zoCOR6f*QQ?(``D6Y{OJG$x;*xVw$iR`J(`3K5@dV5?Up)6rj-%Kwwv; z?Gy`DZ6~}O3keES-B3QZx2>G-zJ`h3k0~gF5pjPVFCR@zEUmAtElo^Jpw|=d9Un`X zW5InT#z+-ksFd^(E;G#Um3l$|qz3naFAqqrL`o|M@V}Y69?wRyI1b@~MW@@A0eX^z zlE_G%%#De)Bgx)e2wgX+UO9J{BhF^AdLfn^t~R%VJDlFpVJW2=iUu-V=(!vrAt_qe zN}zRE!2JXJUS=kn?R#&2bjI3GTZHK}olm~s&wJnZ{b8q8a;r8}IbRDiNq5rsJpjK7 z;Gq+GaM2$8cyP2g*T-bBV$W4TwZ59KsUS|@bjfVNTrPm0$|^K zW>%5oAWi}JW)0P>d1;;m%%Azo-XE%?l~82BUetd>qV5eqIMiGHpvr%NAH#ahiCJlm z6Oub7LhSC3U=x(S-UX#U>IkT=F3OpGQP=~c1=OHi4 zi+LVGZAl-E-|;+OeHhA#OAsRZMwq=?_&1$S{{e9|nsf-Q@If?Y*0BLQLf%z%E;}Lw z@M@)AKkKO}z^*+b9!XEE$q8uFn8#vLqnpT<**u42UzeNZRvHbP^{{9=( zriMr~FjE_%4hR7OAjK9jd$IzjL@KZx@JVm0OGbN5a40jkSB^k|A%O!`e_ZL%y!yK`Y3vk=>T2=$?flmB-c~?nPq5%tTM%yjfVwV5>g_FYY6>-s^Dm+jyJxNA5#cf z`0sJ`x#S=$A@^$a;c$3StJP4s`_rbvdwtU03Bl`1QL#K)jJ-LbT8JnXi(>mHT1MJN z%-HHVA|z}a7lvKF|1gb^a$|ux*#zW}!(8jm-bKBFh8V}1O;uB5mUG1h8UP{yUkN$t z_xl4tZZn5z2u4UIcS?Etd?b$TL#Tv+0B>Z|MFYSuLdwj8ott|Fjw4wK>S)Gl%_rYL zyHu}`6X-j)kToF;g^<~l$++xfrw+`*=SVz79eL!Qkn?J{{YC55 z?s~JSs!$`8DMA|R(Ddr_%31Bx;de+c=I3`3HNtZm5FK`HV)11!LmuHePYB+m1Fd?^ zSQZG_k35?BtYCt0{hbLZX39@d7n2%>Ivl!j)>6Zsl-VTYLu$WsZw`0i9ZzZFqwB`#1K`F&wqRV1As{&;1RxOC%s#-lxBvSP z3vf5Dp=OYfrCNw$oXCFSVt$AmF*2NfsY>eKEw*#qZjZW%x1I2 z>@8f<2w|n5;eK2YVmhsOJnW|MObJWXa{{)PL;@kQs;V0A-vbml!DkD(LN3etp7OGC zZ{<8oK$${F!aPxTH9v$)=*4WYh#YW(#7TxBKqOg+Rw|YLl6`xA-=bzGg8!m!d)Y7T zrDG!6q-Cn9&k4@m$Rl1dvWFre-bFO&=EZ|;63WE}PRPMQ2}=_G=$5_qMz`^Mb%ltB zI3cqHheg)y$fML^a5hmD38u5eq{7QbJH)}*m%4?#W$*e~BS*4$B3zM@tEK~mLQ(~d zgoKpT8I%;efjBTIkn$TEe4Mkva~o~6-{6>;J8*Cad<@KGPR<7t3|wFsv*Qo2c(M)e z7?=-m@2RR(wB_ryT4n6r^)GO>)uo2+pI*H`Rqwr<64E^YA-0k2-*IEk+-IlTu} z2{NKHqNl_e+uwcto3BTJYyZAMNON&^cHvnj#Cqt&s%PWX%6wgr>4YF7Otn;C^x^NiTkA^)(eL$ddv)Gr--;B{frJf;(Z9*jO1+?W3m zg0f(>`mWtdNVy1a$0EA_m4n=aA7YaaS6RF}_A$(S5hSiD%9Nyxxr{}R+n=z%A7Y6Q z)t@53Dgjd^#H7LrnR{iB((UrL87|Ou(8+j`2SMMEn;T|?P7LgYPKc9fLbms2F;S7N zC~L)sC`b2CX$rl-QnS}H7iRM4zPU4;qvm-l6*uo(0t787VR~Q zymqlKq9i1@M2LMq1W%njPVl`XQ8}KTjrQH7um7#i2-_p%k@_L_QPn|ea&G4nqAo5A zt4mgRbaeK%v`NUHEE9s;^qDgoWt^zcV3B*yTtdcM!Is(=5-!g%5GyeZ+*ALpz>;}T9YJ@u?y3tb^GV6Ep^Cd7%-RC}6sK%hqQ z#}miN-X0wvVe0z{jF@q9etdj{ajxj>`1p^N<9HWG$Ilu$@mNhfKIxsA=HAXRp28L` z7GKygaVl%-B+zsuZRt4H2_dbDjm6v=AKf7ma&dTwqw^L<%sn`P@fi>jq9qoAUOa{i zKmN;jvP&Z$uZdF~;eZ!$gl|}v5Jbfi zV^7ZqiB8U5M7I!f-f2R*?g$BacGz{HB&0(^`iTU)Wpqf$rXNDyy?>3>0HCc6Xl3~S zJ&f;xkWZkcKL7#OFk+4AcV7Xn`zi~4t+z~1yn^d6k{yCBS?Skq5DabXX=`-3eL|c@ zn{dKX?AGf`bTlXD{Yg0-8v+Etk;X7X6(RTC`8m8%!nX}!tkew+jCuDh=#))D$feT8 z2kIOTc_V*c=Mu(OJs|)fK3onVZ2vW^P zkk*Yi>&xAR5GPcX(9%Lg#EN-o2m8MVsD5}?C>dCz;mws&4Odb%z<8;22t1^i)AsoRH# zjeL7sjB^e=G+~>^f`VM&W|2#^;z(#7ndZLMC1f1gPn7|Y=B4w%$WER;J31d--B?o0 zh!~H~?c)dyV|5(od0f|mGRD$02?AWCOrIOvTotiz(%I}w$QXQ`228D6Zd=+Z9Z>Nd z34vl--=6eKmh7cNGICG)Y6)r~NkC^KQUC+E8VuM$y|IoA8}~0ECZ3#VN*I>upAA1) z>_~`mO|()+J%HKa*1;D+>fQvU6sZ6%ii8OugJ%{M7PNSNsu%!hCCFwWQn@(Nugyr zxIk%Auy|Q ziB{l(2E)S2ih_yUFFj2Pxs!j(%Vua(vflkWnF3~)m^*9F=q0iX7gN4hm60t4Ip$${ zbK>z;-o#7>Zj?MHDXm+-%(j+Y8{RAZyk@2!Uy$>;$ABEGNuNYEMN%++-7SM^0#s}JwMYxa3B|<=g+|1!Ux7gX-y1ovmbpM53PTM8BQc-uhg`gc2)uom zMPPPl!oGxTy-Il0*26DrO!6S1b-ebQCW@-6EIrRF%c_c^rdhVJ0Kp1|TUeUPu@Fr* zNkWVtasVLKBm_r%*qAQT9*l!`_9X-{M?wsEwX9u~4dxDzW+B=meOQ4C(F+jTD4X6b zkV$Ev<1u-;iyvaTBjEA2nSO{;Zeg7cEd!HShlw7)gcwr7TqH%$3UxK`6$7NkwZ&^@ zfh&@_jWWJY^mb<|qjF@DglzgD#>yhQC64l@6{%8=6ok31wD3HB3E7ZwuL@&|@f?tJ z=57T`gCbxQjphDU-z|oCjHG0O4zc2gXf`}QmvMBeqywvh+k<__aQlOh@v>RxwQvKZ zB~g;0kphw=!S`sL51xw*S^|rTsx*$WDOKQ9531RINaj_3jK(4xmeaJx!0peD5W|W7 zW@Y5><#A0}Dv@Y%sEG4+ZyA-wqZ*|@up+3#lIvf5!eMKIe#q7S9f>3Z6P%Y38XF0S zEi+a;U01p)(ZlEv%bOc=`>KM$ga5U2I|lbfp`CxwMIv8D-c)|TC}UaMtALM9^|ES~ zYmi_;85$yqOh=+xG%$`{ah&-JB4oTikULPwLlKx&gQWoyB;xh0j;67)2nxoweMQc_ z5?NTZ$bvM=b?S?DYt_|M#LH4Ex1S#&)`f~MYwgk)hbF*KS>1*h)r)#Rlk*I~&km0c zPh_{~Kkvf@4=N#_%2hIoqQ>`^;U1Du3peefhMJ--*N8Lppe+IMnn|}#Gzr23^)fc9Gp9Gi;3Uik`2F^>0RguzK z#(PFwJP{%}Y1-Hg05?1uhLE;1APHV{U3ZoS|M|m@KYsHZ{{IV@y!DWpQ1ja zNNAle{HWwxmb@oIluP61XgHfRN9t~ZvYrs?eun*!zkd(!zeGY9-PmLW3~msTQ1s>$ z6#|%OP&W-!kS9WZ+NG(QMIH+bl_8`*V;=pp;}g0eI;plsbNiYn>7!4Sba_ zD(X7U+jZzim6uH@xAjEGW;i3`=xMffEdcn`vmQaQ(twJAs}<_(=g*%f3@3;lbrW|%b z_T^Yav{?cuN07Zj;f!9HpQ}%%oNu zp*r?Iuy?X$$1?3Ux``z7p&j3V=Gu|BCkU~>XSi)JrNr-GK zTsg{wE-L^;T#_kfw?nZmX5m{?Ve6Ypp7tvTd2K7VpX2h3D0!wz+0^_uXx2vQmoKPy z=G!joqqpAr4RkKXj{!Ns)T!EgDcikP*?tLtT_0yWr%pL3lnvt2!0_C7~IVu^iTmYbJjK9*;jK#N91VXKn_87t<<)>29b2Tb0 zCUh~Dk@KTEHBHR{Kt1U4^|{OTaEWn5t6V%5cPI@hIY`BpZj4Zl%#?opP7Y94xGZ#h z*Ura}AAioZ04ZDx2A!qgTwQX4u*x_A0YTJ~oOA^MtztkBaMF)KknZ%b(grPVl&-mo z!TWWFk+J|FSbB1RnjW=eDxKi-+X2QEsLTNbq9!7KS4tRaLCDXf7#9Gynk0q`@x)q~ z@Qg;b*{H&eEG`LU_$qt`qe;ghjG^LNCRwhdp5od0Nn{l z`y&Soh_LWAOKq&TEJRq9Tzjon4GV!@Z7q2ShzTF7Kvhd2bvF2*&e%g331~#IP+0vN z(EK_TlQwrMuyf}sfL2jqAt=eR7oVi&JrskFY$_6R7GzRXXYVYpK7z)?giR7DISc^J zZI$~G2wQ2X-d<6FP+}}M=yLL;G>{Y;tliq=VvLwL~!>IRg-BFuI`t2)(^I49KRC zE-yX0opCnbg`xw?8!M_V%DYfLi%)75D6imS=^}hQdHQDKiqeRYtESYw{Iupd6Nn`M zIJrvjJdqBk#Lt9GHa`Ky#X#u$^imi#^O4WE`NVos=^`HT2@dMVzdA8o)=tzOahZwVFpbbC^j>GaYhps6 z=?F43;VV&EGXBWp7w8&5j$iO?W5ZhdIY1ETGP#&*Wyen*b1#`HfDk#W?3F*!s^A}O z70AwZ`;SFge6wR5RDAa!8wq3jh-11Ws`?@sjPSb6$`-^|kZm|? zbd)i%b>c4;c=Wl;_WEO5GAY8_hHodn9@v8nOYEc15 zMNLqC?ek0m0R5!@;`y)SH_p6Qg<7?c;YFo*uHaE;KZq>`m`kKs1=wL0$A_vyyb5En zg|jezt2n9rxuUI|BmEl@FEl340KG)@Psb9oOQ&?{(d=E1hb&hDq6_jjmEv||C{dW4 zo>vgm7ZMp?d>2jsXh)>Z!n07qn*DZ{1uq{U9SaA*doHV;p0|)+-YPkEVv1hLMji;V3qV+i$ySTBvw496q093R9c)8 zBK}Mj@*>Ux?1)KOoXj;m%W)^?A6=K@F?Re3lwzuuysA@aYI!xL0+&Dc<>hq;l)OSb z`$u_B_NF(~ZUcli_QQ7{S-`B5hY&1FumrG#K`jVF%K1p3dDpMF#XiV5RaIn0p22wBvtw5@=Zp(Zx%5sRIL5rA@7=XMper@B(;`mRU0<^S{ z;lY7{F!U*nt4(5Lb})biD1n@XjO2q|FAFD3S*3tkf)xXp17jn==7@EGAaECK5(9@3 zM`CNqfdLBw2!XK)2HKy3N*lKbrNK1+K;Nu3bridm?6BC7uYvE(?3>lFAC2D33aydx zP{aab>{#dQPXidlVgRb-D3Qan8>9v@k{{B|x$j2~gd~Q-xKbE;FCe1efI3wOLLmT9 z72wbT|JWHoM+Y36d|1)c*na)x%YXgiFb=JI1f+?+{74MoAgP-lHiV%7xF{qB-UG-$ z;+kOL2%WJ+xvU}bG0Mzi)SxNnh_N!sK{S<{6p$c~Au<)HF>*f;%t>1Gep5!qUedDn zOAi@h^Tv&|2Tn7cZhDBmx5%HR)9DuVY@}bL8yvCq;zhdYAbLTdcNo}gwpe)Lpt-Su z(*qg0q=f>6GYZo{1M=y12lP8QfED^* zJkznafH3g3iylDh2C0SMDQ_S++vH6Gk|r-Y2u<>P9zsKT3)(ZQ0o0v*8&Qog93d-Q zGd@1`k3NS!jX?-QLm(jL*YsSBG9s&x*BOCOPXTZc66BL60hy$NMnI;?%K)rGu7K33 zLxocZ0;mBaAV0N%kUX+OAon_fpaPQa075zh5(FZu5r!lfyrd1EOMnAoiju^p=>!A7LZRhaUvk3kY@3teIu)8Xr$b%e`E;bxQ70{&KzPAI5exPy>u(f@!1z`cXpz)LDkZ^%OjNa<%N+V#fXhKzJ z*E_tU0Xq&b60$##LweWwTh2QI-gSJ|{@~&v0U2}77Z6c?NJ=1kZyh_dqpRAWaI}8B zqf=C=-Gk78nCA)zqDmHDJIERDys(`7*akn62Q|Oc5pWbwu;)B}Q}}Kn=o~odO=@8u z1Q1d?2jIylG7u1yg>hKKw_;d_Kv2|a#KB7!)%N5=0fI^=>mf`I+Le937eM0Lr48Y? z$F9WScN}I%AEi%bc;9ck?Y!;Tk3>@6Xo3Jej*Ia~H30ASDeyXeo`%3UAH%f(ey-MgKO zKah{q6cM}fwLxtl5S*=p2>9WMWRY6tTdEtXDL4%S6$w2K5RiQ^IYcWb%CVk!VBWIl z$6;N{+)92l5%1+FIDCL<7+a$ zx+$N`0{4N`q;zfhvoVv&_2*?WYwOo*8L8iTAal!m(#~4)ATlG9>zOC)e|pKyls3A( zFb{6MgaU-0$7Qd7PpStX;N8|o@veMLpHj;rFLO6*JMv~;#v>g-ejcfAS*z8ZgMZK6 zS{kXzzj^vUP}Fi$Bb)B*aCN6t)k{l}@1qy=JCTuBnTvvT1#<3|y&nhVp$lOE>2Zc| zu_vV!)#akIm79&^Woq^DT{*h;4iJ68sIDg?ltZd|&ay0-tm#w2*vmaPa4&1wnS-j` zXCR2pjBQ!Ae17IYv{UWW2h@XIhQk2D>eJJcW%8~-Hl=jkYzOAw~H+d>*nH^IYdvV zJrR(h*kIhXEGue52uYorj@ipup_bA2am8IAAbsKukWHD~pOGm*)&a!ZI(R%+S{kn2 z?GJ$b`1l8XW5gpMdTKTDO3&+eMus<|3(&(p#PdAP!VniL6S^L?-1wk5&@+(Xl?kh~ z*iUj90XeIkA>vbMyHbvNTT=Gpp)^)!SsE+St$7|UI3VS4A*InGmyHR)9sAVq# z)Us2CmyJf_EAb6nAs;t9GFn8R^*&_z;_TI{mp}jP`mN|tZyUoEefIKafBEQ>Pd@s4 ze;7baP1DTYMlU!Afhafw68b2GO6U0I$|0aKMvfG!!c|q(sk_630BUDkfPXe2df?%- z8jY~Nq56$hGgd(u2Ey=S&ZSU?<3PuQ3?fA^&|Pp)+#IYvfC$dco%LxP+kbn3&NFX#hFE6nW!+5$#H}w;u2V{R6iwZvw>;Qm5UwaXJ*{71_t4d>T zGzTpJcw1F#7q*8_&|uyn*bU)8qDYLd`%#lX4#&kH5+(kGAWCWvv;HDo5Id>_1Q%__ zOvD;uixESR*OIBWgv`L6N!u!rt#j7>0~0t%hD`1Lg35@%h2@)>dYnogU71P#5=cBxRg(!xp0t%cYui8=$2Ujfh!%@O zAasK+hYv!G!M()@GHCKvgh5?Q10hokp_G_XC>Sb@#Z#f1HbV!00%xeb^%HpM6#7AW zzB|3)sG*JSC%-)YqX)$E^CJl%$G0pP_p(?ObkOfG6h z2#k{b^jpF7D~JDXgXunrAZZqI((q7!UI>iGv)hH?yckzfO`OHzVIEbCz&J~;!= zU2Pu2iV;H2*UK$nK>_9jt-!!`x$Z;K{)P~e-Sa6Ldf=9B=XNxtNYWp|x)3Tb zo1sr6MF*`_)qrv#{fk`4Yb=KYgQuL~dmnu*~2Hg&+31JvdCOD8YR*0gQJ}Z2vC18wq1tMd-oyizukzo!{ zLI%AXV(Sp%3jU(TagGxw?{Gw~+z?U8pcK-p2|?)Uz^YjZq7>D(hsU^#{lg3Hsv`hEs+^Z_M zzdHBabE@cs;w2VMQy3M`G)Ici7R|P)B+%ew6Cem#7lCN8WU*LG4pa>jJ{!7)4r16e zMc>Drs@Bq4$pMVu(n=OvI1y<@Xk!Dw5ReZaViaJv?f3n6Q0i<$R_3z65Z$GL#VLc>9_UBNyeRtQh(Rvj;^EA z3mEGF0LXM0XC-~H1B0nt2L!XMzT~(X=S3A;)n!dM3~qbc5JQDU?FFZ}_XQ127-F@Q zQRYX6==2$@x0bP~Dnlz2jh=%Bh;9YsU*8r;$lNfra(P*D3nLvjyQe2S&m8Hv%>l=Q_ata|h4b@&t6RTTq&{B^&KttO8_~^T5gW5$(YwQ z+jVo+MkQytmX*y~?7+5c@))ShN@p2kc7|CNbMsl2se~2*`McYNQpvK7N*A8;10K?D zZ#o^cXQp#JVuiNJJ7;Zn=%Fcb7!URR^#q2RY^fdaHVC1Q^-IhFkh6s2o+*oW6kXqB z1sy;XpTlsYhZKu7R_{5OQC@t>>cz5i1a*k{J#Fw-)fuYQl!l>YULO`%vB8{S*=gO5_ zSP9oEb7@2jDW9`W(pD25kj2W>gqNJy0J3dmY5wzINkE#F49ori5L(qcrfzN?mjHyf z5g_)CkDY?&w0Aw{Y|%N^6BI<0byhg;v%L}T?0WEm5nqacSTz3$eVy^m36QCW(8ZpL zpgxWvoChs6Ssga`=}TTT&RJua;zw0}YE(IH_)|lrjny)+4Ffi$_yI5YzURPwra+df z0m4b}RzM&>5r`YQ-4j{LHoI0%?lvoPuG{UpZXPfyYhlG*L$JU|1PKd6LaA%zD>V5T zb`gkt6OdnBi6JJcT79Qb-~z}#D?koS=S;`p2_Sa_5R2@H9{}P2*3w8US%^P~_WOM4 zcvlCE5kkpzF(f{YA%lTx^cr-v(=w+2TwHBfUD}C&6qmsxMnINe9|I9k20oT7UQi7~ z>kSN}z>)y+3oThPq-!-dLIEW2h8r6jSqVVoSs?DlMl*W>h;3v3U8ph~NFo~zk(IO@J(thXB-WyN?yEa0Vy>NQbpId0*czDQ(UU+nb|4J8pAF zPu%DsLj(0$_5a!F_dNnKFusFoG>TOx0s=&_R^%Z^MzwKTMo=$DKw3U8QYb%9#eF^; zIP4T2lwu7v{&VfJ-n$-1ZVuBYed&g@WDyW7TN0M6xm-5_5=wcoWPO>l?6ljIb52yj33LMzv2D}sS(sQtRPCyXUJ6?j=G0u70 zb6A14z33S2!@!P}fSG+poyR6;Fz9z?fXoDSQq=EwP;+QKDjMH9yy0^IX#q&0%jD{8w6zMoGwv}JI+oG8fzHm_~gYO+x9}b=%%tlu=;4LJZG$o2Z++ z%(XJI1rLFjbXhnK>bAuY<*`OFd)qoeD|9P^AqO&mV3d?00|sBeWF;0$ZIK`<`$14D zZF+qV$z3T3*7fIoPw{Y^`|bTcxd^@mL0ebWgT7Zo%2Df&bo^=)mcF;%C-Hd9Jz)+{v{1hB_zeySH#jb1Iaqn(o_ zR%`62&{8`pHA7WQEsHjesGtBH*?Tu@O8izuyVHhGUjH}R94Qiae8q^czA4Mr0!^sc5CKbz&C=SR`@0(Pl~~*8_Q%G6gbQTpSmwv`Kk4(&Lv3y=2nJ zFc}%?kqS*48=(J;K;HX>ii6IyO=!O?+~O$@wk= zdH557pdNlL5a~@bF-p?ZE=?>F$<^v3+20-r6=|bJC_>ehv3=T$ghVHq)*{DiR6-bk z6bL@1-VmczhnBCXdKE2G%hX`0!Y4kWv6@)lq6W4pkOi?!LPJ@LC|! zc($h#D?uiog2=^p8fJJ%<8|%rf$WzMw^2Am+8)iTVjg-5?D&LvpWt|TOgE&=y(o}%1CGI4L%hK6{;O7i9%ne+KF?BG``U9`^d>7 zr%~29ci7(UK4*-#m76@|S|H1)BpPw!J3egGMWE1@eWV$2RCUpzQW}qljd5j%JLu?f z#`zIU)APhG1Hnu_{N$U3`P=WZ9^8BVm|m8pSC=jVk>=bSO}VvgTVo*G%)&&c8M86n zmH^`BWZ4aE8+9*Cz8EH@yG-OCZjObr^tK+d=Yw|-sKjDYVn(86TA;0 zm1X9>y2QIahoFTW~dAmZiY|K6Hkoxkmp1-bkF%18f!ckdoPeLAybA(z1P zgT~*0AtKatDJQWo`4w9L0So|XR;U=6$>s+-87CkV8v31)vxfqEp<|ut7lFL-q`t^sJa3nZC!E7~IwL|k1@ije?mrY4`}fCJ zJOqMloaP;Acmza4+j8m#JfwjJnS5$#K=xQ`km7ZaD;oaN>2mD)CF}mDzj=zcBkylL zT75GA0`q&jS{8`>?$yfIAARt_C!aKDUlBoqC%bNC>>;x4<~K0YSr^IqED+Li;f5Ol z3A3&MVpG}oGLSExZGHNIkUx1dE52g#XmOS(*jX{`lF}2cOWjKKxU1nY=>CR1uYwb6zAMumQx;_b5*pJs&wds_n&CG79P# z#2NrHTm}?6)9k4PkdL2z@%Yn^zFt{*wYvJ`$@>p(?I9rY+ZT@>eFbSsGqcxqrN3F( z36L<9;0J`lW_Fr@%#DGNhDxwBi6LVk-K#+UvbBXvk$gd~Wlbfa6gqm)(E&uc10egT z?K!A#^$RFA9s(9y4ItufqLy5ZiTuU)YUPN z(Tu@3R;0KLC$gZF{G3_wIwccQ3stC)>FH|V zm{^%Seh455eV>hENC`l=!&%|%j^9y4^&X>{_mSl2v?gYl|oB*=6h6DY%X1=nGj`7cORAkJ# zYgnl z7zo2trAW0DkBO3|8Bqo{Z;`q%gr!qf!~>Ltr7K-Jbm)SF#D>HR@Bl2(g-75WcnZ$G z94F$afs$kBFU7XbiK^yHe2(>d%wom}BpktyAqprC0>V;p8wh`tTR}}9;Z2QQaF8vl zxf-uYf)yNHK-?1$5)f8Kt0(N8gDRsQtTO7PkQF(Hx`5dKi9pCv%?3udK4SfDHK>Fv z;wHnTX&T0q_&S){^%+=WF=V#>Rw9sT>!u!-)O{en{BR50o~RGRK0^q^^Q^lPMG=fO zCr~$s=ww^;0%7-idwq}*F)gmR$wprg#d?5rs(;8*aV%`M_JPQVa|i-a2&7wVw&sY} zAb=F}#6Z+QZ^+RAVf|#t`jz_~i~k(5HgCfFLxN>WK=i5f{*cM8_yUq%*c`BTqeojv z0fkMLf2R3(8VU$Y=Z~^LlErMEB9M_jTf-bljt9($FRKcHz4jrv3cF5^FIRd zc&KoCzDOcL9>KE62}m42JUh3Jgxu_F2hS|#5H-@peQjS63Ax3tyIEchnA9;9DSh(- zeuP9uQM9_+rdM3UL}GU2;g|Nm4Vm>1~y>j+F5KREP)hbv4j0_ z=wV@Dt9Bxs2uWdc+<_q6D-6DYwJ%_6ZR-n|!($h(n!xU_U;f4BgCq?R*uPy?Y4!S& zj)6H&4&?Tx=SWD$W~e1gb+1+N{q}ksmv^U3gy6cjN_X`3^l>qY`>*E)31MNjqfM9V z&*|!H43DetO^m>AV*~t<&b~qBMu?YnBgR~zC4^9z{<_A-7+aY*pUq<{6uE*;%&Iw%mqS_!0M{XjQO3Iab6mO`UW9Gin(82%viNy zAO^#e=<{w4u28|_BPyRw#_D5x81+L#^ME{l*fhU=XhS_k=*uUSA<6ncp{cP=xwOUS{XID0*xVWbv{e#=4MrLehV~Ljs9U zUV*4wf$+Yc7KrfhM}e08m_eR>WXr<%Kp;sHX0$-?r#d6?MO+0!17^#5yE%~Y{AVC? zUm*MpY3(-5kKr>!0)&m9AwS9kurg(5h{(kZ0XsvkBX+-O%MuV;LdWgsvm0s?`lsVPh?G3rC6t(_!5^`R3Ufk_8|vwE}x64Ip}nv(EgWzYsG z+5tJMLKA*Tcy5gE4>9G&v6=)%AukWYu>=JNFb(Oq=i#S%r7oaVO6xgw&kooOg@9iz zd}W&kMF7_#2aS!uDLiOW>xeZqg`E;d>3l7=J%*;-*R?>F;VQByUzbJEOs)jZ1$AX5 zEPdwNMUb8Dd?QsWSJ$ zJJZMocMfvHHhsc3w!_zDdDH_fqs_`ii3R{IXUy6*5XJGasWRQcA%U^zqo2X>C) zsEb_+#fx~*42BHm3LRV!QK59umeFIuWUw|vhd72V!6anPs2+baB~CKo_nKhyj1KOR}u+uP`0v`Hba8K7vA+E+5KFrEc)eQlhrTxZ6 zR#N}zyd3~yZ85|^gr8pce*;qS_GZf0Pf!JwW>~S3uNszq{ib2je#35_J@by z9GDG9m<-M;fjk$%Cl@>3CP12p>Ao!x1_3XZ3rz-GElQq?0Qr90d=yA=Wa%sKj}F7q zJihYFqAF&;mc-Ec^5*&K9;xYmf3hJEZzq+I@h}a@?Y6UGBXe{T#6W;h69ym*1Kohy z3;-s#kWbPX(^E^N3Z!&o`Fexc)vKuMGStCLR%G&o03Jqk1Jx!#ylF!T(c3#qqynUT zWbNJexSfTO>k^uhBN(ylVOFhVYX9SGpOn7wK_q??`#P?4w&03XisE*H*qa{a4pcJtn z8Uy1#0VKhTx z2w7K=>YXv&Q^zw466DoTzpB@Iez(0t8rSiDQPn03bg8z1_l*E3q$^f zuJtl72p9oTAs_@)nUER=2`CC2kkILpnpKWvGIBL+&kH8_BzL{)-vm#Mn8SuH)BgOsx9J8FSKnpb*HVkvZ>aS$m`Zx zUs}Bqy36cst+nCNiulf@`-iT-_zrIPFe3ymVf*MdJREx^=0jTDSlh&{wrpJl8kiZt zS`lkwDqXD17+YbA`t~26gOQmFcJ2wGam#oWh}^dQ(CThsQ5QzL z-zHBQ{{R!e|I0US^t-Pi7hkY5FNC;#x^=?!ihGFHRz%ZU$C}l_b#mPeMw{3+FO2G> z?yoH`txUL@h_`DXM7ELs^s}GLq%4S#gAkNj9#Vd7S#4CyY;tX6Aln9PGu^DUEXFpM z8rOwPkhHfiX(-lF`wX(y84-dK0}@T0Kkt=D9WqGvXb#v3Mbfejz@lacXV|`Z$z!IqJ zCkt_ZV+Ftx%PR{o)epg-P1h)$!GpEUbW)GpvMRS$RMTA{0F%Ff>#QHIhQI-PBm^SF z(c>bE+bOk7n`jm4u{LckJEnDCP04%twSgTITV4mMxK$IqD_%z5J3;_fC4Yt~`o{tno_9=|xPZQH?g-IACfLXBf|KJKa^}0eKR8(kv@M=U zZHyo0@IxhGAYz@FRm6g93^Orkor%n=>_(YY&vlQygAfh-D0G)MoRqTmsmJIIuZ>}G z&$DLGxyhqOWeLj0!I-?tSZKhB=}9^vd{;;&c=QA&h_ey0r%@Ie-EisZL`G#_MLG1W zk$F#&K8%bJ!3bR%qE(KR!T6pK#?x=ylzrD8cVg&->;nWEB0eIR_>@qL2{h!!?+DgW zSi;lrWIplG=dl&!6(J~qvDWEA9E>82e2fV3C?OR2jzAoYhOi+NBV}MMs_cloSA<9y zYyE7ls2^YyReKo4*ufluAO|DFcU&Z`#{p(~fZZ3uKK_rp?c#BSJX#IeZ@})@z`Ngf z4VL?d+l5FxirqZKdsT@1?vu~nIVEM?haG6gFgFjum?aqB7s5FE_6dBw!n}|J48IPx z-;glbfFI}|enklJKmO`{xG3x7QP%l)ayS-p+z8?bxuXg14lzXT@5W;xFP=Y#%jD*Z z`s-i>VB~+n2rTXkLE@L+K7-r*&DRj`xM75LFuH$)@6agrj)dHN{q>h{f4~b;*5yKo zD$BCn2&NJQkK)#DkayG$Mcs#=nOQ>}d4Ld-v_MO$M)lNX{ML@-bucGHAfW;FTgY0o z5pvY%;9z~&G_G3EN|!wtKWYadsH*ck`Uqx-kC?LNh1539b6>6Mj1yFq(T%})Mu<|P z{`EUYHRLE|9W}JFd7d*l)hk43sBL6@G9yF%37Gi3@<4s%0On`_s~iji3={7}A&|38q}?Vd-wb!0}!%sgZV z+r0|jGtnNlTVP6n=AI!lLS||Rz6T?BVPp?;Yj#V7PZjd0H3W0nV^Rf~o04&s`54v_ zHButd0Amluc|zcMrgBDb7>wkS2;p(ykxFF(UIkT|fcPVF2MAyTh{yv>!laRTLotyF zeuI$NQ5F(`AVlU29MCXe7+~Apgxd|+F^Ov8ydfCL9^)AyXO6O#c(svBx7`GX>7u6|jY|hei>38Q=$I%hfeGuXtIo&g*N_WO z13DH$kELQxwNY?h2*F{rKTopQqi*YCyhCe()F#LUqs~3}6 zjwl3}6QXVPo*4?W2H)4q^UGeRE!tP=+(ymUfF5W)#pAOs_bBTtn&tSIemaA;j$wgb|m^7D4p=tz$bO{@O+4zae4Wbc{Z{sAHbK z6oPmKLR`<}2M~S1LCD&0SsGDx>#m`1H9}rn)IrE_W$JEZ8=is?Hj=6^aEt~wRBDP? zvp?=qmSAWAAr(`aJ@1D1v?@j>9aG%o!&S+KvRp2vHwZbJ7|suS9~dDXDlO*qqF7Z$ z&$H!*uR0Heph_=_BrY14>H`~>izRf=9c}pBl=ablAzT#;TeXI(#mH6XYN|nQ1BR}T zfH`#RgKeP?BTMlmLQ+@Mp+@?xMQqvgxJ%)P>fHvIs$8YZA|LFi0k$H%?)+0z*5ghL zNz5<_rSw86h-f^*8XPtPlS(zTl9;ImF;+E=l(&>HLC&oqKe{6Xvj*cZmOfCaXnX)d zlMFzOA2!vN*@Q8`1i%=%WMYT`fJv+bj~n7Q0{(|WH3WLAU;_j&-5Im2X&g+91kVV0 z+*3&p6GOV2BjMdNcKeu;m+Poo5WJs0kq6AwH;0KK-odD&Y=Ou%x`!Q@TNpimV)(~{ zkS)NYcVN5TY+Y>+S(YS@f2L{P0tT+*V)Ga9HQ_LfqN!?qusrX(X*r5dtS$ zD|9goskbZyzBVK$mu@zSBX!Cs3ob`^io;7`45Be(^FrWq1VJu^RZ;qug}|km2_nS9 z4~-5=(GcY-n~xBev;H0s5>fw;k1R0td%A}*5!9TEjHxNzhG#gHk4F7+NdB$EZ13&ja^DP#;8x|Kqq z(Em`s%Tbk(_}V+{XCnkgJX=rC_wIC`E@px&4?zNMP@#nCjVNTpJkiL!<~T*A+aXZo zTDQ4383P$7Aae->nKA}R zGRJ5y&`bMQJD<)1$w-qVYjp@bvU@vJ#1q2>NE;;^vC>XK;F0s~vmi3)0rejf62LN+#(>R&cVij^XA*hZv8HL|%HM%_YMOKwh~8fk!^_ybUBvsMb!0K$ZU1B!sM$ zQDVZc>I7MB6)s?kqZ^bEQ$Hk$xxK~7KL~;j`)NL4-eeSHG02vtEL-rDrg!Cl29Y6Y z3$>=f#cmZtp3^u4?1T%1U#B4Gj=cX$2O)H{HjTTaOB!|iO0V`yHi$z|m>8K`K~`Jz z*?DAlUa7k9MXfX!Cj8Qef^2rNw^QU=2a#3xWDRo(g3I~)9akp-WWwk{8s-)x{GNJ; z7jO4I+;V?ZNQgt3P`5*{EaAboo5KLI`=rerg4hRPq|?tKWLhXihAoSz-!ve1?0w83 zSfgn4S`c3-gc9?uizOYyPLW?R%vnhGE%bam8jqg29fD^y={AZpZ?Ys83q78iY;EfS?M|BRfGh@G_UC1g zb??mX*$>Uef=cyUd9hOwi>+zg%0lFk>Mu#GP<7-%*>V85yq+h!)3?l=)M zUnEu4drySu%X^c*jvXBTIP!X7I)kV8>bcx*y|Ul$=j9or{J6B^l3}BoBh?N1 zt9^qm(qrJcEB#KFEHc=9ulg(xWDiAsoK|E$eo}pH_IQTB>@C?yk ze9&ac!W?gALf5vD^?Dgt@O%Ew=Kj>_ zO>74y+j#Hz!c+H`KF#cV5Td?V%ZixE7NgmS^KdhwG~BWUQijH6nvbY8W>vycdX1ph zqq9^x{>eEVpNXKHjnHqIUz#Dnqi0Ber3HcRSysQ>agi-SfShq(23J;p)s+TM7~J6K zr7IfkU=0Bwm%GV(+OyX*40&evc*y6wFxzpybGR_oI80V%uicPZg@hE()JY=_&xzFR z)QY1K-IDa=JU)qrxbDeVW(b19u_=Q_JUUOU^9DZ|BB?e*nlETh(lNBZ;3b&lm#nr6 zyj%rVNB7+BAn0EN?u95#fxFV&7lQ!|RtE;J?TWMs^uq~x5>iSn>tk*0Pi=lVv3gS{ z$-;Fq-__>trqg)7LFgNA?*mgcNlk%Wj~ zHj9R%go2QqE$Ei9Fkd7maWsSX7bB$G?_$EO1(r8JJsJ$IK7fe25Bp;fvYgt}HQj|LYoFf|;(X-udAOS-bH4{g;eWM_`x8=CbO^mhZDNg> zUW1TE68^w8`37Bn=U-00Pr8H#cB%uRhc6>QOVRGm)%qH}mElQSux#K+$qDs~Q8X{_z zHI7qgcNm)NhL3a3&s#K3>6ma3LboDC2!`?4ibph$sdX&~X`~3Updn!%^6SPIY6wsc zA)05k2ikX1wN*j{uG^NPz7i!1e*J^nx2)CSgcx;BIj3P}gcQ;}-`!Aj4BngeEZ-3> ze`kRZKDXBvME$^n2j`JaDZQ1L%ML?ZdZNY)G5TW^X zqjd(6Z;j(TV&aSl8dB0s9Z*Xi6+i8j%~3nhG_BojN_gRdrG7`F!ljv6*KP=*?s&k!OrWbXkZ`r@c2-+M7| z0HTow*;ft~rx-T3Z%cWWr5}|EP5YD~PygoF1y@o6Ewua29N5a8**Mogqg8ITpQ_4ax5aWzDqeeq4nORplYL5Ny2IGsriREA`R@A?7bn)*U!R^HfBQT2&7NOQ zga(uVsXNZA+Cu z*T<*LW3?V8zFG6qcP6EzNZoOkul{h_#5Gd?j)c|QzkT~vIrtxKcWA>f5JLeJnVsZP zOP20v3$s9FDd3lQh3*iAhX&sQ9U;SYfmTtXD1mMK;E_+hb?zm0wD|oPu>RF>-_(tOV={cR5ofFR&WGK(>9ql*aQ&^`|7krmK=~9 zzE}Jwn4&B+(lrB(`22bx$yqI=jt&87kcSmd$Y@N~mjiHg$=)uz!j18PPf7EDL4FL7 z?;xH-^6|!i#LM30#2`6+Bk}KmKteEw_@^w|CFKYJI6)D9UYZlwJYy%K&XXIijOzhz zuDcdc;6^~GvPZ7S(JpCprWz~1X=$!4u+*?BR&Lk@+3pI8i2Ceg9#Je(dpyIo6x7dyNFsWU2^M(!qFa4!F$RI8R4F81K5n#3W(33H$Gc0l@`T)6@VFgfUA5jFD05 zBa8^65s)0M^&&c_MTlb*!Hl=+Qw|Y8T~bxO8{)A%C}L+;h>ek?#p3(tA)v#H>5k<^ zn2fC8W>uk>AfPxDx0rtMb%c1-PFr0B6UPghW%U`8nPU9AUq?SFsel4+o$*Kvki>As zT$dwkJ|A#wJ^>F}cPvX)hjulh7{{KNF?Nk~F{VroHOt>IB3=Z77u64xO^Yf6jFPnw zYhH*8)ldWiKH_YEApi+9XvoN6?AYjr#X;^_ZYv}=3kY3&x<4FH4kOmKv@W#; zgVBQP%0xUg!xx*`2pg^~QvwYxJnizt6$rk2Isn>`T^ZSmfMr<`=u#)bjL68Y&11us zPr8WbIeOo7+vP4bfJea({^G6Qbg`_+*kF1X+3uVbB98u4orgP*i~5$E)BTKVa~Z|O zxb4npVcjlwF~mymyED2Ej%08I5Nx>wH>{&csFatY;%teBubvcMj z42_vIO7LE$RBu-RtNZ?Y+}r$-g7d_0!-y6dW@Mqf&xM{>GqUQ)t|^Qw1138Cr7nBt<`c&eqU zs^vqzn3rGdZ)JBw)Y2YP;JGzR>xCcJ^dWOIpd6j+-Rog`l+{k%lBBzwsaHRa(;nOpJM5>qO^kx&{z@%+d3;;QbF;HfWwmQjb5f!b4r^WwdO^Z|G9D zotCuhcN7=0roL;Gy5c=+&5AH`(%=}N>p@cod7hx=Ys)$~$__6obGP5`-#j3aPn**1 zZL@yg402|$>2>b;8c?4ZQZZ+ga?{ZYxvn8B04_rMdS%zYZ_75CN}Fc_cQM zgSO=VuE{f2^ilUIYUzWb#!NrphOrgy||^s6Hazo4yjHXFQO zQ`J*{Q>B!H3<=~$Hu`7=KQ4I}nq;M({4fv^B^)j7Lz2_EB_YH#Ow+1sJtocQJbOplT{AlSKTSMw!3Ob=ev4#e|xi0NF` zvmoeJ^y8hkJ^Z5l7YGPGJB?_wb5VYCygYmxiOrgIKnQX`*8*LDa9r(ULQ6Y*PSYP7 zr4{k}A0F!u3tC$0aKH7$bk@_Al;Pi-5Lrtiy%JUL*}y4 z*;?N@^zPrgkXG71bd8KmNYfjLn0}=P;$fG5Om_z2$yUJ%f8TZPblYXWwFe8>YV7j@ z;qkf^wF-N|-@49W^}jx}(zE%&F9+vUx40~vd=S(9brrAcAtH`CcyVF|6v1A`8|7CI zcJN(2XfN0D{DPiISC(#5LKv@^e%0tXOTDHSyeM_WhDz}lTHjswf-^n3tG#*4gG!*TFn)sLmbB==mQtw^)rZX{Ni%R{ij)U-IOdc~AEnor^t> zO0Q7*w0?XnJ$AIy5TQlWf1(AAZfkkyK^XCG8+g5^?K-8$PgbJu%MnKHmu*P4I77aB zf8#v_FY1ycJw^+fBn`th5IrN^Z%g}SNm>bODu+06S&GDl6tzl{WCh!LN2)a6Nmq?7 zMFrKTJX+Kun$z~Rt+X$e2awYR?O3Z6)$pzwZd&{1Rp}Y#(TwDIQWbsbE$Q7EwcLA@ z4nL}LIbY5<(|#*mY3oC+#}N+Cck;T<9=>&L1)h_mJZ+Y|@?|@n*HkC1YtBrXONm4EyG^)X(dz2moFPz*g471s}aJt z)R($f=ta$PK7Gj%yzoP1QPTAupOYlB`$?DYz{2FIW`0`@6$d;j<#QVlhH1J-nu`$zj4#AERtcrQE(WyQ*|C=Cshxz z;$qu%QYitu3SssX_7~!el5(k0Tva%bmhFg;jLqJ(9h)P>o>hn~7>Zz|d}Lqw*s)PY z;XvGW(g6mkdcCgjG_$&@$i_uQ$k@bCcxN`n!wvLs`;p?Zw|3*=%56gryv(i&)Rqy- ztX2dH{K&BR_G$<|6CT*HUBTrMLlqIjju1y&q4c;|&y@QY_4qhLzygt7>^!RQvW#%% zQANfa8S7#Ofh|NGT(C)FdsQKhLNX}@R-(NcQ5Zl?Kpznfrd+QG*eF(P*-6QWL+#l* zm|V5pjLQQ&!bU*CuFJCQI_}L+>X=dtxZ%cL*;tF=gX12;$$K>cF#>mrp~J*6MIaTA zvKAT`W>j(@VFba9&CELigpFxa3Wg|+1yI6AFvj#^n>cpIw3+b{CNzcx4URR)gqMgS zjwcEH*Z^+nfk@kCB;r^yu@LK{v9XC89Mj1x(8MSuD1nY)hz(;H7^~rtV+2U}M4dqc z5jG8;Ppp9L__}#S6BFg*zaF}|+DW|f2~IK$>KHr#8H2~d8j$>@>2sinm8hz60QGx_7jT8T@< zWKd#xN+ul+k+>uUIycOa>13x@GBQFt;>bK1OT`b|2$Ud@u>#4+EEvLcehKG~hU2sX zaUiGGSQppbM0wi$p+ChofsPOnfW8(rcG8p)y{2q1pi`15OlhV;t_3)8FakJ<0K(VZ zu;~&c$RxwZ=>-l5EC(VP0Z2oD4=fG^-pvC-SPmv%9V~}&l9d=}Rv?IT)tL41ND5)Q zf?^ep#}@%*=?vo-+kFm;h2s$6Nor5NY?$F#4hC`aWrTUu?(YKV)BM>2|AXElX-UBI%fzz(P&?7DWH+{zAk0^+=^q=csanP&hp zwqk25VJ4N55wcg~mj{7`#R1cXttgNQFl1u}PO!nIOE?60!dB_6{Pnj*7h!oUN*`FV z(=dGshA_^g>h!@bwqugQrtFf|a<^y`%qC5pi#oq3^%;{8&aegK3gDDDDdMon8O6FS z6JnGsp*&!27?YsF4?VHHcG;3%5eUy+DSL@qrRLgmYFZQ^spgy z%)yt|8@*D57t@ZE$9F&`LU>Vyk$gB`L4zdY_(1x^z&^N@a6l^#GBk7KrKj@`^SndMF2GD|415J;%g(X9f+ca~j=ESFPuWv`SV8N9C5Z0b!(@ z9Wc#~x&f5q2tNBE#SV~*PoL2n2LvzZQFB#E^y5zb-3LU7aj#xX56)m6^7EH19fG=Uh0S!ZG^EQ9U%U=#p3<<|dO$cjrxMiK z>b@R`KW*H%`1WvJg|*X$c6qmGMF=vaFE*b4zCzAA6%F2zoqX`gN<9M36Fm^iCF|>F;I~fLlr}g*nkU_M_PN-w%3f(45Iuqn z+MqZ6cj`ROSyX&c>x(bub_U|T1M%qD*`LYGkj`A5?L=Sg_&Dd)f0-4}m+gedN-{rcU8mvc7XRC+xSh?HccMd8{Jpo% zYFiMHLN{D|jZZ$DOJ247^>-Wuqgd8n-yQI1Vv5lLqH(g-uMVT(ft>%S96*g6yKnqzZG-kLnxIz2b*^wAi%5C z%)D2P5c+aYl@4%I`i@d@ea$Lod$iOmtYs$|5{U)#8NKNq!s^LzSUr3b7S#O%dJjqv z&(2n-=$*MYm05X~>_b;-^mT>3}DA@$=$t9FT9%J->Os^yg4a4`$40 z5i+DtYbD)pl`5FN-o?2cMe{L_&(;Rb@}Te;fBbATo(^zajcIDZ(ux7KG02cSS}3)3 z5T$dLklD1Ny@z56G9+BA&JJ#MWUWsh+=Hw_Tx%HuvhjP5oOdce2_H?RI>-%L+ACHh z`_R&sw~V8=7kTdI@5IgJ^24SUXGn*Z15vBgRJ`bgh|osXWOdoMwqlpeAO*H&-h2ZUgz$7-dKKcP z4z6_RQWOh&TMZR`DX9>Ug|$@iDGU#8^b_tn1SEciwr!sMVni|m64=KOpv$O?Eepx0 zz++hvsI15WZ@2;@P<#%F6|e)^29tLMf?URfO+AnWjAU1kc|j%^cpR2NMkcb$6Ov(W z44GtH0RRBOtYPLLwvLV1499FDLynL7g6)pLH5)^YjWFmS3GfIw%P20#MA;!h23H6% zn8;ujfGacW&|`B>U}wfZM`1w7n4oH~IRGQZ25_ARDQ6D?0yMZmBR8rBoO!;>>&k9p~*R9^rlY`hu8=1$p;28iH8sYDPUmM!e{(rC5pL;cFYDn z*1#TNMU)XE6R}7l8vEnO*iB-MFcHGV;$&imv;dDI<-lNT@UeyG1X$%)cCYFo!pe9``VxW|5_GAWXZHCAqhIxjC$9KOUX4J zOgdv?8k7+^k{byTOh$uap^4ei-*{;1o12J_`BOc(rXmPW&?L&&VNVzAI?Sm-)=p?{ z!u&tAvx!PvmAGytZl;!!YvOa-Pe_8OBIW*9EbMrTEGUr5^Hzux(A1|L7zLUFaV};$ zmMr*l2n?v@nKjw3*P-+n5Cc@D^DJU}z~t zJ1`0?4FT-iAr1f_2td)o#{F+>V$PyTF}xS4e~d(w-H&0(j3&$tSqIkk0iiCo-SqvCT7M#14aQe6999SUnert*{%Qp N002ovPDHLkV1f$s#LfT! literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg new file mode 100644 index 0000000..96cd21d --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg @@ -0,0 +1 @@ +DockerHost:Frontend,Backend &CreditCardAppTiersareIsolatedbutcanstillcommunicateinsideinterfaceoranyotherDockerhostsusingtheparentVLANID802.1QTrunk -canbeasingleEthernetlinkorMultipleBondedEthernetlinksInterfaceeth0Container(s)Eth010.1.20.0/24Parent:eth0.20VLANID:20CreditCardsBackendContainer(s)Eth010.1.30.0/24Container(s)Eth010.1.10.0/24FrontendGateway10.1.20.1andothercontainersonthesameVLAN/subnetGateway10.1.10.1andothercontainersonthesameVLAN/subnetGateway10.1.30.1andothercontainersonthesameVLAN/subnet:Parenteth0.10VLANID:10Parent:eth0.30VLAN:30NetworkotherDockerHosts \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/vlan-networks.md b/vendor/github.com/docker/docker/experimental/vlan-networks.md new file mode 100644 index 0000000..caec6d6 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/vlan-networks.md @@ -0,0 +1,471 @@ +# Ipvlan Network Driver + +### Getting Started + +The Ipvlan driver is currently in experimental mode in order to incubate Docker users use cases and vet the implementation to ensure a hardened, production ready driver in a future release. Libnetwork now gives users total control over both IPv4 and IPv6 addressing. The VLAN driver builds on top of that in giving operators complete control of layer 2 VLAN tagging and even Ipvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints see the [multi-host overlay ](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) driver. + +Ipvlan is a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using the traditional Linux bridge for isolation, they are simply associated to a Linux Ethernet interface or sub-interface to enforce separation between networks and connectivity to the physical network. + +Ipvlan offers a number of unique features and plenty of room for further innovations with the various modes. Two high level advantages of these approaches are, the positive performance implications of bypassing the Linux bridge and the simplicity of having less moving parts. Removing the bridge that traditionally resides in between the Docker host NIC and container interface leaves a very simple setup consisting of container interfaces, attached directly to the Docker host interface. This result is easy access for external facing services as there is no port mappings in these scenarios. + +### Pre-Requisites + +- The examples on this page are all single host and setup using Docker experimental builds that can be installed with the following instructions: [Install Docker experimental](https://github.com/docker/docker/tree/master/experimental) + +- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. + +- Kernel requirements: + + - To check your current kernel version, use `uname -r` to display your kernel version + - Ipvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy) + +### Ipvlan L2 Mode Example Usage + +The ipvlan `L2` mode example is like the following image. The driver is specified with `-d driver_name` option. In this case `-d ipvlan`. + +![Simple Ipvlan L2 Mode Example](images/ipvlan_l2_simple.png) + +The parent interface in the next example `-o parent=eth0` is configured as followed: + +``` +ip addr show eth0 +3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 +``` + +Use the network from the host's interface as the `--subnet` in the `docker network create`. The container will be attached to the same network as the host interface as set via the `-o parent=` option. + +Create the ipvlan network and run a container attaching to it: + +``` +# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) +docker network create -d ipvlan \ + --subnet=192.168.1.0/24 \ + --gateway=192.168.1.1 \ + -o ipvlan_mode=l2 \ + -o parent=eth0 db_net + +# Start a container on the db_net network +docker run --net=db_net -it --rm alpine /bin/sh + +# NOTE: the containers can NOT ping the underlying host interfaces as +# they are intentionally filtered by Linux for additional isolation. +``` + +The default mode for Ipvlan is `l2`. If `-o ipvlan_mode=` are left unspecified, the default mode will be used. Similarly, if the `--gateway` is left empty, the first usable address on the network will be set as the gateway. For example, if the subnet provided in the network create is `--subnet=192.168.1.0/24` then the gateway the container receives is `192.168.1.1`. + +To help understand how this mode interacts with other hosts, the following figure shows the same layer 2 segment between two Docker hosts that applies to and Ipvlan L2 mode. + +![Multiple Ipvlan Hosts](images/macvlan-bridge-ipvlan-l2.png) + +The following will create the exact same network as the network `db_net` created prior, with the driver defaults for `--gateway=192.168.1.1` and `-o ipvlan_mode=l2`. + +``` +# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) +docker network create -d ipvlan \ + --subnet=192.168.1.0/24 \ + -o parent=eth0 db_net_ipv + +# Start a container with an explicit name in daemon mode +docker run --net=db_net_ipv --name=ipv1 -itd alpine /bin/sh + +# Start a second container and ping using the container name +# to see the docker included name resolution functionality +docker run --net=db_net_ipv --name=ipv2 -it --rm alpine /bin/sh +ping -c 4 ipv1 + +# NOTE: the containers can NOT ping the underlying host interfaces as +# they are intentionally filtered by Linux for additional isolation. +``` + +The drivers also support the `--internal` flag that will completely isolate containers on a network from any communications external to that network. Since network isolation is tightly coupled to the network's parent interface the result of leaving the `-o parent=` option off of a network create is the exact same as the `--internal` option. If the parent interface is not specified or the `--internal` flag is used, a netlink type `dummy` parent interface is created for the user and used as the parent interface effectively isolating the network completely. + +The following two `docker network create` examples result in identical networks that you can attach container to: + +``` +# Empty '-o parent=' creates an isolated network +docker network create -d ipvlan \ + --subnet=192.168.10.0/24 isolated1 + +# Explicit '--internal' flag is the same: +docker network create -d ipvlan \ + --subnet=192.168.11.0/24 --internal isolated2 + +# Even the '--subnet=' can be left empty and the default +# IPAM subnet of 172.18.0.0/16 will be assigned +docker network create -d ipvlan isolated3 + +docker run --net=isolated1 --name=cid1 -it --rm alpine /bin/sh +docker run --net=isolated2 --name=cid2 -it --rm alpine /bin/sh +docker run --net=isolated3 --name=cid3 -it --rm alpine /bin/sh + +# To attach to any use `docker exec` and start a shell +docker exec -it cid1 /bin/sh +docker exec -it cid2 /bin/sh +docker exec -it cid3 /bin/sh +``` + +### Ipvlan 802.1q Trunk L2 Mode Example Usage + +Architecturally, Ipvlan L2 mode trunking is the same as Macvlan with regard to gateways and L2 path isolation. There are nuances that can be advantageous for CAM table pressure in ToR switches, one MAC per port and MAC exhaustion on a host's parent NIC to name a few. The 802.1q trunk scenario looks the same. Both modes adhere to tagging standards and have seamless integration with the physical network for underlay integration and hardware vendor plugin integrations. + +Hosts on the same VLAN are typically on the same subnet and almost always are grouped together based on their security policy. In most scenarios, a multi-tier application is tiered into different subnets because the security profile of each process requires some form of isolation. For example, hosting your credit card processing on the same virtual network as the frontend webserver would be a regulatory compliance issue, along with circumventing the long standing best practice of layered defense in depth architectures. VLANs or the equivocal VNI (Virtual Network Identifier) when using the Overlay driver, are the first step in isolating tenant traffic. + +![Docker VLANs in Depth](images/vlans-deeper-look.png) + +The Linux sub-interface tagged with a vlan can either already exist or will be created when you call a `docker network create`. `docker network rm` will delete the sub-interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces with a netlink parent index > 0. + +For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. Other sub-interface naming can be used as the specified parent, but the link will not be deleted automatically when `docker network rm` is invoked. + +The option to use either existing parent vlan sub-interfaces or let Docker manage them enables the user to either completely manage the Linux interfaces and networking or let Docker create and delete the Vlan parent sub-interfaces (netlink `ip link`) with no effort from the user. + +For example: `eth0.10` to denote a sub-interface of `eth0` tagged with vlan id `10`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.10 type vlan id 10`. + +The example creates the vlan tagged networks and then start two containers to test connectivity between containers. Different Vlans cannot ping one another without a router routing between the two networks. The default namespace is not reachable per ipvlan design in order to isolate container namespaces from the underlying host. + +**Vlan ID 20** + +In the first network tagged and isolated by the Docker host, `eth0.20` is the parent interface tagged with vlan id `20` specified with `-o parent=eth0.20`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. + +``` +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +docker network create -d ipvlan \ + --subnet=192.168.20.0/24 \ + --gateway=192.168.20.1 \ + -o parent=eth0.20 ipvlan20 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan20 -it --name ivlan_test1 --rm alpine /bin/sh +docker run --net=ipvlan20 -it --name ivlan_test2 --rm alpine /bin/sh +``` + +**Vlan ID 30** + +In the second network, tagged and isolated by the Docker host, `eth0.30` is the parent interface tagged with vlan id `30` specified with `-o parent=eth0.30`. The `ipvlan_mode=` defaults to l2 mode `ipvlan_mode=l2`. It can also be explicitly set with the same result as shown in the next example. + +``` +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. +docker network create -d ipvlan \ + --subnet=192.168.30.0/24 \ + --gateway=192.168.30.1 \ + -o parent=eth0.30 \ + -o ipvlan_mode=l2 ipvlan30 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan30 -it --name ivlan_test3 --rm alpine /bin/sh +docker run --net=ipvlan30 -it --name ivlan_test4 --rm alpine /bin/sh +``` + +The gateway is set inside of the container as the default gateway. That gateway would typically be an external router on the network. + +``` +$ ip route + default via 192.168.30.1 dev eth0 + 192.168.30.0/24 dev eth0 src 192.168.30.2 +``` + +Example: Multi-Subnet Ipvlan L2 Mode starting two containers on the same subnet and pinging one another. In order for the `192.168.114.0/24` to reach `192.168.116.0/24` it requires an external router in L2 mode. L3 mode can route between subnets that share a common `-o parent=`. + +Secondary addresses on network routers are common as an address space becomes exhausted to add another secondary to a L3 vlan interface or commonly referred to as a "switched virtual interface" (SVI). + +``` +docker network create -d ipvlan \ + --subnet=192.168.114.0/24 --subnet=192.168.116.0/24 \ + --gateway=192.168.114.254 --gateway=192.168.116.254 \ + -o parent=eth0.114 \ + -o ipvlan_mode=l2 ipvlan114 + +docker run --net=ipvlan114 --ip=192.168.114.10 -it --rm alpine /bin/sh +docker run --net=ipvlan114 --ip=192.168.114.11 -it --rm alpine /bin/sh +``` + +A key takeaway is, operators have the ability to map their physical network into their virtual network for integrating containers into their environment with no operational overhauls required. NetOps simply drops an 802.1q trunk into the Docker host. That virtual link would be the `-o parent=` passed in the network creation. For untagged (non-VLAN) links, it is as simple as `-o parent=eth0` or for 802.1q trunks with VLAN IDs each network gets mapped to the corresponding VLAN/Subnet from the network. + +An example being, NetOps provides VLAN ID and the associated subnets for VLANs being passed on the Ethernet link to the Docker host server. Those values are simply plugged into the `docker network create` commands when provisioning the Docker networks. These are persistent configurations that are applied every time the Docker engine starts which alleviates having to manage often complex configuration files. The network interfaces can also be managed manually by being pre-created and docker networking will never modify them, simply use them as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: + +- VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 + + - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` + +- VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 + + - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20 ` + +- VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 + + - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` + +### IPVlan L3 Mode Example + +IPVlan will require routes to be distributed to each endpoint. The driver only builds the Ipvlan L3 mode port and attaches the container to the interface. Route distribution throughout a cluster is beyond the initial implementation of this single host scoped driver. In L3 mode, the Docker host is very similar to a router starting new networks in the container. They are on networks that the upstream network will not know about without route distribution. For those curious how Ipvlan L3 will fit into container networking see the following examples. + +![Docker Ipvlan L2 Mode](images/ipvlan-l3.png) + +Ipvlan L3 mode drops all broadcast and multicast traffic. This reason alone makes Ipvlan L3 mode a prime candidate for those looking for massive scale and predictable network integrations. It is predictable and in turn will lead to greater uptimes because there is no bridging involved. Bridging loops have been responsible for high profile outages that can be hard to pinpoint depending on the size of the failure domain. This is due to the cascading nature of BPDUs (Bridge Port Data Units) that are flooded throughout a broadcast domain (VLAN) to find and block topology loops. Eliminating bridging domains, or at the least, keeping them isolated to a pair of ToRs (top of rack switches) will reduce hard to troubleshoot bridging instabilities. Ipvlan L2 modes is well suited for isolated VLANs only trunked into a pair of ToRs that can provide a loop-free non-blocking fabric. The next step further is to route at the edge via Ipvlan L3 mode that reduces a failure domain to a local host only. + +- L3 mode needs to be on a separate subnet as the default namespace since it requires a netlink route in the default namespace pointing to the Ipvlan parent interface. + +- The parent interface used in this example is `eth0` and it is on the subnet `192.168.1.0/24`. Notice the `docker network` is **not** on the same subnet as `eth0`. + +- Unlike ipvlan l2 modes, different subnets/networks can ping one another as long as they share the same parent interface `-o parent=`. + +``` +ip a show eth0 +3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 00:50:56:39:45:2e brd ff:ff:ff:ff:ff:ff + inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 +``` + +-A traditional gateway doesn't mean much to an L3 mode Ipvlan interface since there is no broadcast traffic allowed. Because of that, the container default gateway simply point the the containers `eth0` device. See below for CLI output of `ip route` or `ip -6 route` from inside an L3 container for details. + +The mode ` -o ipvlan_mode=l3` must be explicitly specified since the default ipvlan mode is `l2`. + +The following example does not specify a parent interface. The network drivers will create a dummy type link for the user rather then rejecting the network creation and isolating containers from only communicating with one another. + +``` +# Create the Ipvlan L3 network +docker network create -d ipvlan \ + --subnet=192.168.214.0/24 \ + --subnet=10.1.214.0/24 \ + -o ipvlan_mode=l3 ipnet210 + +# Test 192.168.214.0/24 connectivity +docker run --net=ipnet210 --ip=192.168.214.10 -itd alpine /bin/sh +docker run --net=ipnet210 --ip=10.1.214.10 -itd alpine /bin/sh + +# Test L3 connectivity from 10.1.214.0/24 to 192.168.212.0/24 +docker run --net=ipnet210 --ip=192.168.214.9 -it --rm alpine ping -c 2 10.1.214.10 + +# Test L3 connectivity from 192.168.212.0/24 to 10.1.214.0/24 +docker run --net=ipnet210 --ip=10.1.214.9 -it --rm alpine ping -c 2 192.168.214.10 + +``` + +Notice there is no `--gateway=` option in the network create. The field is ignored if one is specified `l3` mode. Take a look at the container routing table from inside of the container: + +``` +# Inside an L3 mode container +$ ip route + default dev eth0 + 192.168.120.0/24 dev eth0 src 192.168.120.2 +``` + +In order to ping the containers from a remote Docker host or the container be able to ping a remote host, the remote host or the physical network in between need to have a route pointing to the host IP address of the container's Docker host eth interface. More on this as we evolve the Ipvlan `L3` story. + +### Dual Stack IPv4 IPv6 Ipvlan L2 Mode + +- Not only does Libnetwork give you complete control over IPv4 addressing, but it also gives you total control over IPv6 addressing as well as feature parity between the two address families. + +- The next example will start with IPv6 only. Start two containers on the same VLAN `139` and ping one another. Since the IPv4 subnet is not specified, the default IPAM will provision a default IPv4 subnet. That subnet is isolated unless the upstream network is explicitly routing it on VLAN `139`. + +``` +# Create a v6 network +docker network create -d ipvlan \ + --subnet=2001:db8:abc2::/64 --gateway=2001:db8:abc2::22 \ + -o parent=eth0.139 v6ipvlan139 + +# Start a container on the network +docker run --net=v6ipvlan139 -it --rm alpine /bin/sh + +``` + +View the container eth0 interface and v6 routing table: + +``` + eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 172.18.0.2/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc2::1/64 scope link nodad + valid_lft forever preferred_lft forever + +root@5c1dc74b1daa:/# ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc2::/64 dev eth0 proto kernel metric 256 +default via 2001:db8:abc2::22 dev eth0 metric 1024 +``` + +Start a second container and ping the first container's v6 address. + +``` +$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh + +root@b817e42fcc54:/# ip a show eth0 +75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 172.18.0.3/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link tentative dadfailed + valid_lft forever preferred_lft forever + inet6 2001:db8:abc2::2/64 scope link nodad + valid_lft forever preferred_lft forever + +root@b817e42fcc54:/# ping6 2001:db8:abc2::1 +PING 2001:db8:abc2::1 (2001:db8:abc2::1): 56 data bytes +64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=0 ttl=64 time=0.044 ms +64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=1 ttl=64 time=0.058 ms + +2 packets transmitted, 2 packets received, 0% packet loss +round-trip min/avg/max/stddev = 0.044/0.051/0.058/0.000 ms +``` + +The next example with setup a dual stack IPv4/IPv6 network with an example VLAN ID of `140`. + +Next create a network with two IPv4 subnets and one IPv6 subnets, all of which have explicit gateways: + +``` +docker network create -d ipvlan \ + --subnet=192.168.140.0/24 --subnet=192.168.142.0/24 \ + --gateway=192.168.140.1 --gateway=192.168.142.1 \ + --subnet=2001:db8:abc9::/64 --gateway=2001:db8:abc9::22 \ + -o parent=eth0.140 \ + -o ipvlan_mode=l2 ipvlan140 +``` + +Start a container and view eth0 and both v4 & v6 routing tables: + +``` +docker run --net=v6ipvlan139 --ip6=2001:db8:abc2::51 -it --rm alpine /bin/sh + +root@3cce0d3575f3:/# ip a show eth0 +78: eth0@if77: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 192.168.140.2/24 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc9::1/64 scope link nodad + valid_lft forever preferred_lft forever + +root@3cce0d3575f3:/# ip route +default via 192.168.140.1 dev eth0 +192.168.140.0/24 dev eth0 proto kernel scope link src 192.168.140.2 + +root@3cce0d3575f3:/# ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc9::/64 dev eth0 proto kernel metric 256 +default via 2001:db8:abc9::22 dev eth0 metric 1024 +``` + +Start a second container with a specific `--ip4` address and ping the first host using IPv4 packets: + +``` +docker run --net=ipvlan140 --ip=192.168.140.10 -it --rm alpine /bin/sh +``` + +**Note**: Different subnets on the same parent interface in Ipvlan `L2` mode cannot ping one another. That requires a router to proxy-arp the requests with a secondary subnet. However, Ipvlan `L3` will route the unicast traffic between disparate subnets as long as they share the same `-o parent` parent link. + +### Dual Stack IPv4 IPv6 Ipvlan L3 Mode + +**Example:** IpVlan L3 Mode Dual Stack IPv4/IPv6, Multi-Subnet w/ 802.1q Vlan Tag:118 + +As in all of the examples, a tagged VLAN interface does not have to be used. The sub-interfaces can be swapped with `eth0`, `eth1`, `bond0` or any other valid interface on the host other then the `lo` loopback. + +The primary difference you will see is that L3 mode does not create a default route with a next-hop but rather sets a default route pointing to `dev eth` only since ARP/Broadcasts/Multicast are all filtered by Linux as per the design. Since the parent interface is essentially acting as a router, the parent interface IP and subnet needs to be different from the container networks. That is the opposite of bridge and L2 modes, which need to be on the same subnet (broadcast domain) in order to forward broadcast and multicast packets. + +``` +# Create an IPv6+IPv4 Dual Stack Ipvlan L3 network +# Gateways for both v4 and v6 are set to a dev e.g. 'default dev eth0' +docker network create -d ipvlan \ + --subnet=192.168.110.0/24 \ + --subnet=192.168.112.0/24 \ + --subnet=2001:db8:abc6::/64 \ + -o parent=eth0 \ + -o ipvlan_mode=l3 ipnet110 + + +# Start a few of containers on the network (ipnet110) +# in separate terminals and check connectivity +docker run --net=ipnet110 -it --rm alpine /bin/sh +# Start a second container specifying the v6 address +docker run --net=ipnet110 --ip6=2001:db8:abc6::10 -it --rm alpine /bin/sh +# Start a third specifying the IPv4 address +docker run --net=ipnet110 --ip=192.168.112.50 -it --rm alpine /bin/sh +# Start a 4th specifying both the IPv4 and IPv6 addresses +docker run --net=ipnet110 --ip6=2001:db8:abc6::50 --ip=192.168.112.50 -it --rm alpine /bin/sh +``` + +Interface and routing table outputs are as follows: + +``` +root@3a368b2a982e:/# ip a show eth0 +63: eth0@if59: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 192.168.112.2/24 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc6::10/64 scope link nodad + valid_lft forever preferred_lft forever + +# Note the default route is simply the eth device because ARPs are filtered. +root@3a368b2a982e:/# ip route + default dev eth0 scope link + 192.168.112.0/24 dev eth0 proto kernel scope link src 192.168.112.2 + +root@3a368b2a982e:/# ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc6::/64 dev eth0 proto kernel metric 256 +default dev eth0 metric 1024 +``` + +*Note:* There may be a bug when specifying `--ip6=` addresses when you delete a container with a specified v6 address and then start a new container with the same v6 address it throws the following like the address isn't properly being released to the v6 pool. It will fail to unmount the container and be left dead. + +``` +docker: Error response from daemon: Address already in use. +``` + +### Manually Creating 802.1q Links + +**Vlan ID 40** + +If a user does not want the driver to create the vlan sub-interface it simply needs to exist prior to the `docker network create`. If you have sub-interface naming that is not `interface.vlan_id` it is honored in the `-o parent=` option again as long as the interface exists and us up. + +Links if manually created can be named anything you want. As long as the exist when the network is created that is all that matters. Manually created links do not get deleted regardless of the name when the network is deleted with `docker network rm`. + +``` +# create a new sub-interface tied to dot1q vlan 40 +ip link add link eth0 name eth0.40 type vlan id 40 + +# enable the new sub-interface +ip link set eth0.40 up + +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +docker network create -d ipvlan \ + --subnet=192.168.40.0/24 \ + --gateway=192.168.40.1 \ + -o parent=eth0.40 ipvlan40 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh +docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh +``` + +**Example:** Vlan sub-interface manually created with any name: + +``` +# create a new sub interface tied to dot1q vlan 40 +ip link add link eth0 name foo type vlan id 40 + +# enable the new sub-interface +ip link set foo up + +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +docker network create -d ipvlan \ + --subnet=192.168.40.0/24 --gateway=192.168.40.1 \ + -o parent=foo ipvlan40 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh +docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh +``` + +Manually created links can be cleaned up with: + +``` +ip link del foo +``` + +As with all of the Libnetwork drivers, they can be mixed and matched, even as far as running 3rd party ecosystem drivers in parallel for maximum flexibility to the Docker user. diff --git a/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh b/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh new file mode 100644 index 0000000..662e2dc --- /dev/null +++ b/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh @@ -0,0 +1,35 @@ +set +x +set +e + +echo "" +echo "" +echo "---" +echo "Now starting POST-BUILD steps" +echo "---" +echo "" + +echo INFO: Pointing to $DOCKER_HOST + +if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo INFO: Removing containers... + ! docker rm -vf $(docker ps -aq) +fi + +# Remove all images which don't have docker or debian in the name +if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then + echo INFO: Removing images... + ! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }') +fi + +# Kill off any instances of git, go and docker, just in case +! taskkill -F -IM git.exe -T >& /dev/null +! taskkill -F -IM go.exe -T >& /dev/null +! taskkill -F -IM docker.exe -T >& /dev/null + +# Remove everything +! cd /c/jenkins/gopath/src/github.com/docker/docker +! rm -rfd * >& /dev/null +! rm -rfd .* >& /dev/null + +echo INFO: Cleanup complete +exit 0 \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh b/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh new file mode 100644 index 0000000..30e5884 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh @@ -0,0 +1,309 @@ +# Jenkins CI script for Windows to Linux CI. +# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. +set +xe +SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" + +# TODO to make (even) more resilient: +# - Wait for daemon to be running before executing docker commands +# - Check if jq is installed +# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version +# - Make sure we are not running as local system. Can't do until all Azure nodes are updated. +# - Error if docker versions are not equal. Can't do until all Azure nodes are updated +# - Error if go versions are not equal. Can't do until all Azure nodes are updated. +# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" +# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind +# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP +# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason +# for doing that is that it mirrors the actual release process for docker.exe which is cross-built. +# However, should absolutely not be a problem if built natively, so nit-picking. +# - Tidy up of images and containers. Either here, or in the teardown script. + +ec=0 +uniques=1 +echo INFO: Started at `date`. Script version $SCRIPT_VER + + +# !README! +# There are two daemons running on the remote Linux host: +# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon +# from the sources matching the PR. +# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted +# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). +# The windows integration tests are run against this inner daemon. + +# get the ip, inner and outer ports. +ip="${DOCKER_HOST#*://}" +port_outer="${ip#*:}" +# inner port is like outer port with last two digits inverted. +port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') +ip="${ip%%:*}" + +echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" + +# If TLS is enabled +if [ -n "$DOCKER_TLS_VERIFY" ]; then + protocol=https + if [ -z "$DOCKER_MACHINE_NAME" ]; then + ec=1 + echo "ERROR: DOCKER_MACHINE_NAME is undefined" + fi + certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) + curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" + run_extra_args="-v tlscerts:/etc/docker" + daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" +else + protocol=http +fi + +# Save for use by make.sh and scripts it invokes +export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" + +# Verify we can get the remote node to respond to _ping +if [ $ec -eq 0 ]; then + reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` + if [ "$reply" != "OK" ]; then + ec=1 + echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" + echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" + echo " either the daemon has crashed/is not running, or the Linux node is unavailable." + echo + echo " A regular ping to the remote Linux node is below. It should reply. If not, the" + echo " machine cannot be reached at all and may have crashed. If it does reply, it is" + echo " likely a case of the Linux daemon not running or having crashed, which requires" + echo " further investigation." + echo + echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" + echo " for someone to perform further diagnostics, or take this node out of rotation." + echo + ping $ip + else + echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" + fi +fi + +# Get the version from the remote node. Note this may fail if jq is not installed. +# That's probably worth checking to make sure, just in case. +if [ $ec -eq 0 ]; then + remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` + echo "INFO: Remote daemon is running docker version $remoteVersion" +fi + +# Compare versions. We should really fail if result is no 1. Output at end of script. +if [ $ec -eq 0 ]; then + uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` +fi + +# Make sure we are in repo +if [ $ec -eq 0 ]; then + if [ ! -d hack ]; then + echo "ERROR: Are you sure this is being launched from a the root of docker repository?" + echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." + echo " Current directory is `pwd`" + ec=1 + fi +fi + +# Are we in split binary mode? +if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then + splitBinary=0 + echo "INFO: Running in single binary mode" +else + splitBinary=1 + echo "INFO: Running in split binary mode" +fi + + +# Get the commit has and verify we have something +if [ $ec -eq 0 ]; then + export COMMITHASH=$(git rev-parse --short HEAD) + echo INFO: Commmit hash is $COMMITHASH + if [ -z $COMMITHASH ]; then + echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" + ec=1 + fi +fi + +# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not +# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment +# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which +# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... +if [ $ec -eq 0 ]; then + export TEMP=/c/CI/CI-$COMMITHASH + export TMP=$TEMP + /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p +fi + +# Tidy up time +if [ $ec -eq 0 ]; then + echo INFO: Deleting pre-existing containers and images... + + # Force remove all containers based on a previously built image with this commit + ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null + + # Force remove any container with this commithash as a name + ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null + + # This SHOULD never happen, but just in case, also blow away any containers + # that might be around. + ! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo WARN: There were some leftover containers. Cleaning them up. + ! docker rm -f $(docker ps -aq) + fi + + # Force remove the image if it exists + ! docker rmi -f "docker-$COMMITHASH" &>/dev/null +fi + +# Provide the docker version for debugging purposes. If these fail, game over. +# as the Linux box isn't responding for some reason. +if [ $ec -eq 0 ]; then + echo INFO: Docker version and info of the outer daemon on the Linux node + echo + docker version + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# Same as above, but docker info +if [ $ec -eq 0 ]; then + echo + docker info + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# build the daemon image +if [ $ec -eq 0 ]; then + echo "INFO: Running docker build on Linux host at $DOCKER_HOST" + if [ $splitBinary -eq 0 ]; then + set -x + docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" . + cat < +# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# +# This script should be executed inside a docker container in privileged mode +# ('docker run --privileged', introduced in docker 0.6). + +# Usage: dind CMD [ARG...] + +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and --privileged mode might break.' + } +fi + +# Mount /tmp (conditionally) +if ! mountpoint -q /tmp; then + mount -t tmpfs none /tmp +fi + +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits b/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits new file mode 100755 index 0000000..8dfcca3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits @@ -0,0 +1,11 @@ +#!/bin/sh + +TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a + +# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly +RUNC_COMMIT=9df8b306d01f59d3a8029be411de015b7304dd8f +CONTAINERD_COMMIT=aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1 +TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 +LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e +VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0 +BINDATA_COMMIT=a0ff2567cfb70903282db057e799fd826784d41d diff --git a/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh b/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh new file mode 100755 index 0000000..64f2b57 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh @@ -0,0 +1,123 @@ +#!/bin/sh +set -e +set -x + +. $(dirname "$0")/binaries-commits + +RM_GOPATH=0 + +TMP_GOPATH=${TMP_GOPATH:-""} + +if [ -z "$TMP_GOPATH" ]; then + export GOPATH="$(mktemp -d)" + RM_GOPATH=1 +else + export GOPATH="$TMP_GOPATH" +fi + +# Do not build with ambient capabilities support +RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp apparmor selinux"}" + +install_runc() { + echo "Install runc version $RUNC_COMMIT" + git clone https://github.com/docker/runc.git "$GOPATH/src/github.com/opencontainers/runc" + cd "$GOPATH/src/github.com/opencontainers/runc" + git checkout -q "$RUNC_COMMIT" + make BUILDTAGS="$RUNC_BUILDTAGS" $1 + cp runc /usr/local/bin/docker-runc +} + +install_containerd() { + echo "Install containerd version $CONTAINERD_COMMIT" + git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" + cd "$GOPATH/src/github.com/docker/containerd" + git checkout -q "$CONTAINERD_COMMIT" + make $1 + cp bin/containerd /usr/local/bin/docker-containerd + cp bin/containerd-shim /usr/local/bin/docker-containerd-shim + cp bin/ctr /usr/local/bin/docker-containerd-ctr +} + +install_proxy() { + echo "Install docker-proxy version $LIBNETWORK_COMMIT" + git clone https://github.com/docker/libnetwork.git "$GOPATH/src/github.com/docker/libnetwork" + cd "$GOPATH/src/github.com/docker/libnetwork" + git checkout -q "$LIBNETWORK_COMMIT" + go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy +} + +install_bindata() { + echo "Install go-bindata version $BINDATA_COMMIT" + git clone https://github.com/jteeuwen/go-bindata "$GOPATH/src/github.com/jteeuwen/go-bindata" + cd $GOPATH/src/github.com/jteeuwen/go-bindata + git checkout -q "$BINDATA_COMMIT" + go build -o /usr/local/bin/go-bindata github.com/jteeuwen/go-bindata/go-bindata +} + +for prog in "$@" +do + case $prog in + tomlv) + echo "Install tomlv version $TOMLV_COMMIT" + git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" + cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT" + go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv + ;; + + runc) + install_runc static + ;; + + runc-dynamic) + install_runc + ;; + + containerd) + install_containerd static + ;; + + containerd-dynamic) + install_containerd + ;; + + tini) + echo "Install tini version $TINI_COMMIT" + git clone https://github.com/krallin/tini.git "$GOPATH/tini" + cd "$GOPATH/tini" + git checkout -q "$TINI_COMMIT" + cmake . + make tini-static + cp tini-static /usr/local/bin/docker-init + ;; + + proxy) + export CGO_ENABLED=0 + install_proxy + ;; + + proxy-dynamic) + PROXY_LDFLAGS="-linkmode=external" install_proxy + ;; + + vndr) + echo "Install vndr version $VNDR_COMMIT" + git clone https://github.com/LK4D4/vndr.git "$GOPATH/src/github.com/LK4D4/vndr" + cd "$GOPATH/src/github.com/LK4D4/vndr" + git checkout -q "$VNDR_COMMIT" + go build -v -o /usr/local/bin/vndr . + ;; + + bindata) + install_bindata + ;; + + *) + echo echo "Usage: $0 [tomlv|runc|containerd|tini|proxy]" + exit 1 + + esac +done + +if [ $RM_GOPATH -eq 1 ]; then + rm -rf "$GOPATH" +fi diff --git a/vendor/github.com/docker/docker/hack/generate-authors.sh b/vendor/github.com/docker/docker/hack/generate-authors.sh new file mode 100755 index 0000000..e78a97f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/vendor/github.com/docker/docker/hack/generate-swagger-api.sh b/vendor/github.com/docker/docker/hack/generate-swagger-api.sh new file mode 100755 index 0000000..a8e9f81 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/generate-swagger-api.sh @@ -0,0 +1,22 @@ +#!/bin/sh +set -eu + +swagger generate model -f api/swagger.yaml \ + -t api -m types --skip-validator -C api/swagger-gen.yaml \ + -n Volume \ + -n Port \ + -n ImageSummary \ + -n Plugin -n PluginDevice -n PluginMount -n PluginEnv -n PluginInterfaceType \ + -n ErrorResponse \ + -n IdResponse \ + -n ServiceUpdateResponse + +swagger generate operation -f api/swagger.yaml \ + -t api -a types -m types -C api/swagger-gen.yaml \ + -T api/templates --skip-responses --skip-parameters --skip-validator \ + -n VolumesList \ + -n VolumesCreate \ + -n ContainerCreate \ + -n ContainerUpdate \ + -n Authenticate \ + -n ContainerWait diff --git a/vendor/github.com/docker/docker/hack/install.sh b/vendor/github.com/docker/docker/hack/install.sh new file mode 100644 index 0000000..cc20d69 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/install.sh @@ -0,0 +1,484 @@ +#!/bin/sh +set -e +# +# This script is meant for quick & easy install via: +# 'curl -sSL https://get.docker.com/ | sh' +# or: +# 'wget -qO- https://get.docker.com/ | sh' +# +# For test builds (ie. release candidates): +# 'curl -fsSL https://test.docker.com/ | sh' +# or: +# 'wget -qO- https://test.docker.com/ | sh' +# +# For experimental builds: +# 'curl -fsSL https://experimental.docker.com/ | sh' +# or: +# 'wget -qO- https://experimental.docker.com/ | sh' +# +# Docker Maintainers: +# To update this script on https://get.docker.com, +# use hack/release.sh during a normal release, +# or the following one-liner for script hotfixes: +# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index +# + +url="https://get.docker.com/" +apt_url="https://apt.dockerproject.org" +yum_url="https://yum.dockerproject.org" +gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D" + +key_servers=" +ha.pool.sks-keyservers.net +pgp.mit.edu +keyserver.ubuntu.com +" + +mirror='' +while [ $# -gt 0 ]; do + case "$1" in + --mirror) + mirror="$2" + shift + ;; + *) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + AzureChinaCloud) + apt_url="https://mirror.azure.cn/docker-engine/apt" + yum_url="https://mirror.azure.cn/docker-engine/yum" + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +echo_docker_as_nonroot() { + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + cat <<-EOF + + If you would like to use Docker as a non-root user, you should now consider + adding your user to the "docker" group with something like: + + sudo usermod -aG docker $your_user + + Remember that you will have to log out and back in for this to take effect! + + EOF +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + # We're Debian and don't even know it! + lsb_dist=debian + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 9) + dist_version="stretch" + ;; + 8|'Kali Linux 2') + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + fi + fi + fi +} + +rpm_import_repository_key() { + local key=$1; shift + local tmpdir=$(mktemp -d) + chmod 600 "$tmpdir" + for key_server in $key_servers ; do + gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break + done + gpg --homedir "$tmpdir" -k "$key" >/dev/null + gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key + rpm --import "$tmpdir"/repo.key + rm -rf "$tmpdir" +} + +semverParse() { + major="${1%%.*}" + minor="${1#$major.}" + minor="${minor%%.*}" + patch="${1#$major.$minor.}" + patch="${patch%%[-.]*}" +} + +do_install() { + architecture=$(uname -m) + case $architecture in + # officially supported + amd64|x86_64) + ;; + # unofficially supported with available repositories + armv6l|armv7l) + ;; + # unofficially supported without available repositories + aarch64|arm64|ppc64le|s390x) + cat 1>&2 <<-EOF + Error: Docker doesn't officially support $architecture and no Docker $architecture repository exists. + EOF + exit 1 + ;; + # not supported + *) + cat >&2 <<-EOF + Error: $architecture is not a recognized platform. + EOF + exit 1 + ;; + esac + + if command_exists docker; then + version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)" + MAJOR_W=1 + MINOR_W=10 + + semverParse $version + + shouldWarn=0 + if [ $major -lt $MAJOR_W ]; then + shouldWarn=1 + fi + + if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then + shouldWarn=1 + fi + + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + EOF + + if [ $shouldWarn -eq 1 ]; then + cat >&2 <<-'EOF' + again to update Docker, we urge you to migrate your image store before upgrading + to v1.10+. + + You can find instructions for this here: + https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration + EOF + else + cat >&2 <<-'EOF' + again to update Docker, you can safely ignore this message. + EOF + fi + + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + curl='' + if command_exists curl; then + curl='curl -sSL' + elif command_exists wget; then + curl='wget -qO-' + elif command_exists busybox && busybox --list-modules | grep -q wget; then + curl='busybox wget -qO-' + fi + + # check to see which repo they are trying to install from + if [ -z "$repo" ]; then + repo='main' + if [ "https://test.docker.com/" = "$url" ]; then + repo='testing' + elif [ "https://experimental.docker.com/" = "$url" ]; then + repo='experimental' + fi + fi + + # perform some very rudimentary platform detection + lsb_dist='' + dist_version='' + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then + lsb_dist='oracleserver' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then + lsb_dist='centos' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then + lsb_dist='redhat' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/photon-release ]; then + lsb_dist='photon' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + # Special case redhatenterpriseserver + if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then + # Set it to redhat, it will be changed to centos below anyways + lsb_dist='redhat' + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 8) + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + ;; + + oracleserver) + # need to switch lsb_dist to match yum repo URL + lsb_dist="oraclelinux" + dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" + ;; + + fedora|centos|redhat) + dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)" + ;; + + "vmware photon") + lsb_dist="photon" + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + + esac + + # Check if this is a forked Linux distro + check_forked + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + export DEBIAN_FRONTEND=noninteractive + + did_apt_get_update= + apt_get_update() { + if [ -z "$did_apt_get_update" ]; then + ( set -x; $sh_c 'sleep 3; apt-get update' ) + did_apt_get_update=1 + fi + } + + if [ "$lsb_dist" != "raspbian" ]; then + # aufs is preferred over devicemapper; try to ensure the driver is available. + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then + kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" + + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' + echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + ( set -x; sleep 10 ) + fi + else + echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' + echo >&2 ' package. We have no AUFS support. Consider installing the packages' + echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' + ( set -x; sleep 10 ) + fi + fi + fi + + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser >/dev/null 2>&1; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + + if [ ! -e /usr/lib/apt/methods/https ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) + fi + if [ -z "$curl" ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) + curl='curl -sSL' + fi + if ! command -v gpg > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' ) + fi + + # dirmngr is a separate package in ubuntu yakkety; see https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1634464 + if ! command -v dirmngr > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q dirmngr' ) + fi + + ( + set -x + for key_server in $key_servers ; do + $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break + done + $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null" + $sh_c "mkdir -p /etc/apt/sources.list.d" + $sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" + $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' + ) + echo_docker_as_nonroot + exit 0 + ;; + + fedora|centos|redhat|oraclelinux|photon) + if [ "${lsb_dist}" = "redhat" ]; then + # we use the centos repository for both redhat and centos releases + lsb_dist='centos' + fi + $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF + [docker-${repo}-repo] + name=Docker ${repo} Repository + baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version} + enabled=1 + gpgcheck=1 + gpgkey=${yum_url}/gpg + EOF + if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then + ( + set -x + $sh_c 'sleep 3; dnf -y -q install docker-engine' + ) + elif [ "$lsb_dist" = "photon" ]; then + ( + set -x + $sh_c 'sleep 3; tdnf -y install docker-engine' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-engine' + ) + fi + echo_docker_as_nonroot + exit 0 + ;; + esac + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' + + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: + + https://docs.docker.com/engine/installation/ + + EOF + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/vendor/github.com/docker/docker/hack/make.ps1 b/vendor/github.com/docker/docker/hack/make.ps1 new file mode 100644 index 0000000..14b9603 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make.ps1 @@ -0,0 +1,408 @@ +<# +.NOTES + Author: @jhowardmsft + + Summary: Windows native build script. This is similar to functionality provided + by hack\make.sh, but uses native Windows PowerShell semantics. It does + not support the full set of options provided by the Linux counterpart. + For example: + + - You can't cross-build Linux docker binaries on Windows + - Hashes aren't generated on binaries + - 'Releasing' isn't supported. + - Integration tests. This is because they currently cannot run inside a container, + and require significant external setup. + + It does however provided the minimum necessary to support parts of local Windows + development and Windows to Windows CI. + + Usage Examples (run from repo root): + "hack\make.ps1 -Binary" to build the binaries + "hack\make.ps1 -Client" to build just the client 64-bit binary + "hack\make.ps1 -TestUnit" to run unit tests + "hack\make.ps1 -Binary -TestUnit" to build the binaries and run unit tests + "hack\make.ps1 -All" to run everything this script knows about + +.PARAMETER Client + Builds the client binaries. + +.PARAMETER Daemon + Builds the daemon binary. + +.PARAMETER Binary + Builds the client binaries and the daemon binary. A convenient shortcut to `make.ps1 -Client -Daemon`. + +.PARAMETER Race + Use -race in go build and go test. + +.PARAMETER Noisy + Use -v in go build. + +.PARAMETER ForceBuildAll + Use -a in go build. + +.PARAMETER NoOpt + Use -gcflags -N -l in go build to disable optimisation (can aide debugging). + +.PARAMETER CommitSuffix + Adds a custom string to be appended to the commit ID (spaces are stripped). + +.PARAMETER DCO + Runs the DCO (Developer Certificate Of Origin) test. + +.PARAMETER PkgImports + Runs the pkg\ directory imports test. + +.PARAMETER GoFormat + Runs the Go formatting test. + +.PARAMETER TestUnit + Runs unit tests. + +.PARAMETER All + Runs everything this script knows about. + + +TODO +- Unify the head commit +- Sort out the GITCOMMIT environment variable in the absense of a .git (longer term) +- Add golint and other checks (swagger maybe?) + +#> + + +param( + [Parameter(Mandatory=$False)][switch]$Client, + [Parameter(Mandatory=$False)][switch]$Daemon, + [Parameter(Mandatory=$False)][switch]$Binary, + [Parameter(Mandatory=$False)][switch]$Race, + [Parameter(Mandatory=$False)][switch]$Noisy, + [Parameter(Mandatory=$False)][switch]$ForceBuildAll, + [Parameter(Mandatory=$False)][switch]$NoOpt, + [Parameter(Mandatory=$False)][string]$CommitSuffix="", + [Parameter(Mandatory=$False)][switch]$DCO, + [Parameter(Mandatory=$False)][switch]$PkgImports, + [Parameter(Mandatory=$False)][switch]$GoFormat, + [Parameter(Mandatory=$False)][switch]$TestUnit, + [Parameter(Mandatory=$False)][switch]$All +) + +$ErrorActionPreference = "Stop" +$pushed=$False # To restore the directory if we have temporarily pushed to one. + +# Utility function to get the commit ID of the repository +Function Get-GitCommit() { + if (-not (Test-Path ".\.git")) { + # If we don't have a .git directory, but we do have the environment + # variable DOCKER_GITCOMMIT set, that can override it. + if ($env:DOCKER_GITCOMMIT.Length -eq 0) { + Throw ".git directory missing and DOCKER_GITCOMMIT environment variable not specified." + } + Write-Host "INFO: Git commit assumed from DOCKER_GITCOMMIT environment variable" + return $env:DOCKER_GITCOMMIT + } + $gitCommit=$(git rev-parse --short HEAD) + if ($(git status --porcelain --untracked-files=no).Length -ne 0) { + $gitCommit="$gitCommit-unsupported" + Write-Host "" + Write-Warning "This version is unsupported because there are uncommitted file(s)." + Write-Warning "Either commit these changes, or add them to .gitignore." + git status --porcelain --untracked-files=no | Write-Warning + Write-Host "" + } + return $gitCommit +} + +# Utility function to get get the current build version of docker +Function Get-DockerVersion() { + if (-not (Test-Path ".\VERSION")) { Throw "VERSION file not found. Is this running from the root of a docker repository?" } + return $(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim() +} + +# Utility function to determine if we are running in a container or not. +# In Windows, we get this through an environment variable set in `Dockerfile.Windows` +Function Check-InContainer() { + if ($env:FROM_DOCKERFILE.Length -eq 0) { + Write-Host "" + Write-Warning "Not running in a container. The result might be an incorrect build." + Write-Host "" + } +} + +# Utility function to get the commit for HEAD +Function Get-HeadCommit() { + $head = Invoke-Expression "git rev-parse --verify HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting HEAD commit" } + + return $head +} + +# Utility function to get the commit for upstream +Function Get-UpstreamCommit() { + Invoke-Expression "git fetch -q https://github.com/docker/docker.git refs/heads/master" + if ($LASTEXITCODE -ne 0) { Throw "Failed fetching" } + + $upstream = Invoke-Expression "git rev-parse --verify FETCH_HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting upstream commit" } + + return $upstream +} + +# Build a binary (client or daemon) +Function Execute-Build($type, $additionalBuildTags, $directory) { + # Generate the build flags + $buildTags = "autogen" + if ($Noisy) { $verboseParm=" -v" } + if ($Race) { Write-Warning "Using race detector"; $raceParm=" -race"} + if ($ForceBuildAll) { $allParm=" -a" } + if ($NoOpt) { $optParm=" -gcflags "+""""+"-N -l"+"""" } + if ($addtionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } + + # Do the go build in the appropriate directory + # Note -linkmode=internal is required to be able to debug on Windows. + # https://github.com/golang/go/issues/14319#issuecomment-189576638 + Write-Host "INFO: Building $type..." + Push-Location $root\cmd\$directory; $global:pushed=$True + $buildCommand = "go build" + ` + $raceParm + ` + $verboseParm + ` + $allParm + ` + $optParm + ` + " -tags """ + $buildTags + """" + ` + " -ldflags """ + "-linkmode=internal" + """" + ` + " -o $root\bundles\"+$directory+".exe" + Invoke-Expression $buildCommand + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile $type" } + Pop-Location; $global:pushed=$False +} + + +# Validates the DCO marker is present on each commit +Function Validate-DCO($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating Developer Certificate of Origin..." + # Username may only contain alphanumeric characters or dashes and cannot begin with a dash + $usernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + + $dcoPrefix="Signed-off-by:" + $dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($usernameRegex)\\))?$" + + $counts = Invoke-Expression "git diff --numstat $upstreamCommit...$headCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" } + + # Counts of adds and deletes after removing multiple white spaces. AWK anyone? :( + $adds=0; $dels=0; $($counts -replace '\s+', ' ') | %{ + $a=$_.Split(" "); + if ($a[0] -ne "-") { $adds+=[int]$a[0] } + if ($a[1] -ne "-") { $dels+=[int]$a[1] } + } + if (($adds -eq 0) -and ($dels -eq 0)) { + Write-Warning "DCO validation - nothing to validate!" + return + } + + $commits = Invoke-Expression "git log $upstreamCommit..$headCommit --format=format:%H%n" + if ($LASTEXITCODE -ne 0) { Throw "Failed git log --format" } + $commits = $($commits -split '\s+' -match '\S') + $badCommits=@() + $commits | %{ + # Skip commits with no content such as merge commits etc + if ($(git log -1 --format=format: --name-status $_).Length -gt 0) { + # Ignore exit code on next call - always process regardless + $commitMessage = Invoke-Expression "git log -1 --format=format:%B --name-status $_" + if (($commitMessage -match $dcoRegex).Length -eq 0) { $badCommits+=$_ } + } + } + if ($badCommits.Length -eq 0) { + Write-Host "Congratulations! All commits are properly signed with the DCO!" + } else { + $e = "`nThese commits do not have a proper '$dcoPrefix' marker:`n" + $badCommits | %{ $e+=" - $_`n"} + $e += "`nPlease amend each commit to include a properly formatted DCO marker.`n`n" + $e += "Visit the following URL for information about the Docker DCO:`n" + $e += "https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work`n" + Throw $e + } +} + +# Validates that .\pkg\... is safely isolated from internal code +Function Validate-PkgImports($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating pkg import isolation..." + + # Get a list of go source-code files which have changed under pkg\. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'pkg\*.go`'" + $badFiles=@(); $files | %{ + $file=$_ + # For the current changed file, get its list of dependencies, sorted and uniqued. + $imports = Invoke-Expression "go list -e -f `'{{ .Deps }}`' $file" + if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" } + $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique + # Filter out what we are looking for + $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" ` + -NotMatch "^github.com/docker/docker/vendor" ` + -Match "^github.com/docker/docker" ` + -Replace "`n", "" + $imports | % { $badFiles+="$file imports $_`n" } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! ".\pkg\*.go" is safely isolated from internal code.' + } else { + $e = "`nThese files import internal code: (either directly or indirectly)`n" + $badFiles | %{ $e+=" - $_"} + Throw $e + } +} + +# Validates that changed files are correctly go-formatted +Function Validate-GoFormat($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating go formatting on changed files..." + + # Verify gofmt is installed + if ($(Get-Command gofmt -ErrorAction SilentlyContinue) -eq $nil) { Throw "gofmt does not appear to be installed" } + + # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" + $files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go" + $badFiles=@(); $files | %{ + # Deliberately ignore error on next line - treat as failed + $content=Invoke-Expression "git show $headCommit`:$_" + + # Next set of hoops are to ensure we have LF not CRLF semantics as otherwise gofmt on Windows will not succeed. + # Also note that gofmt on Windows does not appear to support stdin piping correctly. Hence go through a temporary file. + $content=$content -join "`n" + $content+="`n" + $outputFile=[System.IO.Path]::GetTempFileName() + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $content, (New-Object System.Text.UTF8Encoding($False))) + $valid=Invoke-Expression "gofmt -s -l $outputFile" + Write-Host "Checking $outputFile" + if ($valid.Length -ne 0) { $badFiles+=$_ } + if (Test-Path $outputFile) { Remove-Item $outputFile } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! All Go source files are properly formatted.' + } else { + $e = "`nThese files are not properly gofmt`'d:`n" + $badFiles | %{ $e+=" - $_`n"} + $e+= "`nPlease reformat the above files using `"gofmt -s -w`" and commit the result." + Throw $e + } +} + +# Run the unit tests +Function Run-UnitTests() { + Write-Host "INFO: Running unit tests..." + $testPath="./..." + $goListCommand = "go list -e -f '{{if ne .Name """ + '\"github.com/docker/docker\"' + """}}{{.ImportPath}}{{end}}' $testPath" + $pkgList = $(Invoke-Expression $goListCommand) + if ($LASTEXITCODE -ne 0) { Throw "go list for unit tests failed" } + $pkgList = $pkgList | Select-String -Pattern "github.com/docker/docker" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/vendor" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/man" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration-cli" + $pkgList = $pkgList -replace "`r`n", " " + $goTestCommand = "go test" + $raceParm + " -cover -ldflags -w -tags """ + "autogen daemon" + """ -a """ + "-test.timeout=10m" + """ $pkgList" + Invoke-Expression $goTestCommand + if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" } +} + +# Start of main code. +Try { + Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)" + $root=$(pwd) + + # Handle the "-All" shortcut to turn on all things we can handle. + if ($All) { $Client=$True; $Daemon=$True; $DCO=$True; $PkgImports=$True; $GoFormat=$True; $TestUnit=$True } + + # Handle the "-Binary" shortcut to build both client and daemon. + if ($Binary) { $Client = $True; $Daemon = $True } + + # Make sure we have something to do + if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { Throw 'Nothing to do. Try adding "-All" for everything I can do' } + + # Verify git is installed + if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" } + + # Verify go is installed + if ($(Get-Command go -ErrorAction SilentlyContinue) -eq $nil) { Throw "GoLang does not appear to be installed" } + + # Get the git commit. This will also verify if we are in a repo or not. Then add a custom string if supplied. + $gitCommit=Get-GitCommit + if ($CommitSuffix -ne "") { $gitCommit += "-"+$CommitSuffix -Replace ' ', '' } + + # Get the version of docker (eg 1.14.0-dev) + $dockerVersion=Get-DockerVersion + + # Give a warning if we are not running in a container and are building binaries or running unit tests. + # Not relevant for validation tests as these are fine to run outside of a container. + if ($Client -or $Daemon -or $TestUnit) { Check-InContainer } + + # Verify GOPATH is set + if ($env:GOPATH.Length -eq 0) { Throw "Missing GOPATH environment variable. See https://golang.org/doc/code.html#GOPATH" } + + # Run autogen if building binaries or running unit tests. + if ($Client -or $Daemon -or $TestUnit) { + Write-Host "INFO: Invoking autogen..." + Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion } + Catch [Exception] { Throw $_ } + } + + # DCO, Package import and Go formatting tests. + if ($DCO -or $PkgImports -or $GoFormat) { + # We need the head and upstream commits for these + $headCommit=Get-HeadCommit + $upstreamCommit=Get-UpstreamCommit + + # Run DCO validation + if ($DCO) { Validate-DCO $headCommit $upstreamCommit } + + # Run `gofmt` validation + if ($GoFormat) { Validate-GoFormat $headCommit $upstreamCommit } + + # Run pkg isolation validation + if ($PkgImports) { Validate-PkgImports $headCommit $upstreamCommit } + } + + # Build the binaries + if ($Client -or $Daemon) { + # Create the bundles directory if it doesn't exist + if (-not (Test-Path ".\bundles")) { New-Item ".\bundles" -ItemType Directory | Out-Null } + + # Perform the actual build + if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } + if ($Client) { Execute-Build "client" "" "docker" } + } + + # Run unit tests + if ($TestUnit) { Run-UnitTests } + + # Gratuitous ASCII art. + if ($Daemon -or $Client) { + Write-Host + Write-Host -ForegroundColor Green " ________ ____ __." + Write-Host -ForegroundColor Green " \_____ \ `| `|/ _`|" + Write-Host -ForegroundColor Green " / `| \`| `<" + Write-Host -ForegroundColor Green " / `| \ `| \" + Write-Host -ForegroundColor Green " \_______ /____`|__ \" + Write-Host -ForegroundColor Green " \/ \/" + Write-Host + } +} +Catch [Exception] { + Write-Host -ForegroundColor Red ("`nERROR: make.ps1 failed:`n$_") + + # More gratuitous ASCII art. + Write-Host + Write-Host -ForegroundColor Red "___________ .__.__ .___" + Write-Host -ForegroundColor Red "\_ _____/____ `|__`| `| ____ __`| _/" + Write-Host -ForegroundColor Red " `| __) \__ \ `| `| `| _/ __ \ / __ `| " + Write-Host -ForegroundColor Red " `| \ / __ \`| `| `|_\ ___// /_/ `| " + Write-Host -ForegroundColor Red " \___ / (____ /__`|____/\___ `>____ `| " + Write-Host -ForegroundColor Red " \/ \/ \/ \/ " + Write-Host + + Throw $_ +} +Finally { + if ($global:pushed) { Pop-Location } + Write-Host -ForegroundColor Cyan "INFO: make.ps1 ended at $(Get-Date)" +} diff --git a/vendor/github.com/docker/docker/hack/make.sh b/vendor/github.com/docker/docker/hack/make.sh new file mode 100755 index 0000000..f0e482f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make.sh @@ -0,0 +1,304 @@ +#!/usr/bin/env bash +set -e + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (https://github.com/docker/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -unsupported if the repository isn't clean. +# - The script is intended to be run inside the docker container specified +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "make" from +# your checkout of the Docker repository. +# the Makefile will do a "docker build -t docker ." and then +# "docker run hack/make.sh" in the resulting image. +# + +set -o pipefail + +export DOCKER_PKG='github.com/docker/docker' +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export MAKEDIR="$SCRIPTDIR/make" +export PKG_CONFIG=${PKG_CONFIG:-pkg-config} + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +inContainer="AssumeSoInitially" +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + if [ -z "$FROM_DOCKERFILE" ]; then + unset inContainer + fi +else + if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + unset inContainer + fi +fi + +if [ -z "$inContainer" ]; then + { + echo "# WARNING! I don't seem to be running in a Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo + +# List of bundles to create when no argument is passed +DEFAULT_BUNDLES=( + binary-client + binary-daemon + dynbinary + + test-unit + test-integration-cli + test-docker-py + + cross + tgz +) + +VERSION=$(< ./VERSION) +! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') +if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then + GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + GITCOMMIT="$GITCOMMIT-unsupported" + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + echo "# GITCOMMIT = $GITCOMMIT" + echo "# The version you are building is listed as unsupported because" + echo "# there are some files in the git repository that are in an uncommitted state." + echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version." + echo "# Here is the current list:" + git status --porcelain --untracked-files=no + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + fi +elif [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +else + echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' + echo >&2 ' Please either build with the .git directory accessible, or specify the' + echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' + echo >&2 ' future accountability in diagnosing build issues. Thanks!' + exit 1 +fi + +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" + export GOPATH="${PWD}/.gopath" + + if [ "$(go env GOOS)" = 'solaris' ]; then + # sys/unix is installed outside the standard library on solaris + # TODO need to allow for version change, need to get version from go + export GO_VERSION=${GO_VERSION:-"1.7.1"} + export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" + fi +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + +DOCKER_BUILDTAGS+=" daemon" +if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald" +elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald journald_compat" +fi + +# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately +if \ + command -v gcc &> /dev/null \ + && ! gcc -E - -o /dev/null &> /dev/null <<<'#include ' \ +; then + DOCKER_BUILDTAGS+=' btrfs_noversion' +fi + +# test whether "libdevmapper.h" is new enough to support deferred remove +# functionality. +if \ + command -v gcc &> /dev/null \ + && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \ +; then + DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' +fi + +# Use these flags when compiling the tests and final binary + +IAMSTATIC='true' +source "$SCRIPTDIR/make/.go-autogen" +if [ -z "$DOCKER_DEBUG" ]; then + LDFLAGS='-w' +fi + +LDFLAGS_STATIC='' +EXTLDFLAGS_STATIC='-static' +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -tags "autogen netgo static_build sqlite_omit_load_extension $DOCKER_BUILDTAGS" -installsuffix netgo ) +# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here + +# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental +# builds by installing dependent packages to the GOPATH. +REBUILD_FLAG="-a" +if [ "$DOCKER_INCREMENTAL_BINARY" ]; then + REBUILD_FLAG="-i" +fi +ORIG_BUILDFLAGS+=( $REBUILD_FLAG ) + +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. + +if [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then + : ${TIMEOUT:=10m} +elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then + : ${TIMEOUT:=8m} +else + : ${TIMEOUT:=5m} +fi + +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" +" + +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +# If sqlite3.h doesn't exist under /usr/include, +# check /usr/local/include also just in case +# (e.g. FreeBSD Ports installs it under the directory) +if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then + export CGO_CFLAGS='-I/usr/local/include' + export CGO_LDFLAGS='-L/usr/local/lib' +fi + +HAVE_GO_TEST_COVER= +if \ + go help testflag | grep -- -cover > /dev/null \ + && go tool -n cover > /dev/null 2>&1 \ +; then + HAVE_GO_TEST_COVER=1 +fi + +# a helper to provide ".exe" when it's appropriate +binary_extension() { + if [ "$(go env GOOS)" = 'windows' ]; then + echo -n '.exe' + fi +} + +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + +bundle() { + local bundle="$1"; shift + echo "---> Making bundle: $(basename "$bundle") (in $DEST)" + source "$SCRIPTDIR/make/$bundle" "$@" +} + +copy_binaries() { + dir="$1" + # Add nested executables to bundle dir so we have complete set of + # them available, but only if the native OS/ARCH is the same as the + # OS/ARCH of the build target + if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + if [ -x /usr/local/bin/docker-runc ]; then + echo "Copying nested executables into $dir" + for file in containerd containerd-shim containerd-ctr runc init proxy; do + cp `which "docker-$file"` "$dir/" + if [ "$2" == "hash" ]; then + hash_files "$dir/docker-$file" + fi + done + fi + fi +} + +install_binary() { + file="$1" + target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" + if [ "$(go env GOOS)" == "linux" ]; then + echo "Installing $(basename $file) to ${target}" + cp -L "$file" "$target" + else + echo "Install is only supported on linux" + return 1 + fi +} + +main() { + # We want this to fail if the bundles already exist and cannot be removed. + # This is to avoid mixing bundles from different versions of the code. + mkdir -p bundles + if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then + echo "bundles/$VERSION already exists. Removing." + rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + echo + fi + + if [ "$(go env GOHOSTOS)" != 'windows' ]; then + # Windows and symlinks don't get along well + + rm -f bundles/latest + ln -s "$VERSION" bundles/latest + fi + + if [ $# -lt 1 ]; then + bundles=(${DEFAULT_BUNDLES[@]}) + else + bundles=($@) + fi + for bundle in ${bundles[@]}; do + export DEST="bundles/$VERSION/$(basename "$bundle")" + # Cygdrive paths don't play well with go build -o. + if [[ "$(uname -s)" == CYGWIN* ]]; then + export DEST="$(cygpath -mw "$DEST")" + fi + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + bundle "$bundle" + echo + done +} + +main "$@" diff --git a/vendor/github.com/docker/docker/hack/make/.binary b/vendor/github.com/docker/docker/hack/make/.binary new file mode 100644 index 0000000..f5c35c3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.binary @@ -0,0 +1,48 @@ +#!/bin/bash +set -e + +BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" +BINARY_EXTENSION="$(binary_extension)" +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +source "${MAKEDIR}/.go-autogen" + +( +export GOGC=${DOCKER_BUILD_GOGC:-1000} + +if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + # must be cross-compiling! + case "$(go env GOOS)/$(go env GOARCH)" in + windows/amd64) + export CC=x86_64-w64-mingw32-gcc + export CGO_ENABLED=1 + ;; + esac +fi + +if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then + if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then + export CGO_ENABLED=1 + export CC=o64-clang + export LDFLAGS='-linkmode external -s' + export LDFLAGS_STATIC_DOCKER='-extld='${CC} + else + export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary + fi +fi + +echo "Building: $DEST/$BINARY_FULLNAME" +go build \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + $GO_PACKAGE +) + +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/vendor/github.com/docker/docker/hack/make/.binary-setup b/vendor/github.com/docker/docker/hack/make/.binary-setup new file mode 100644 index 0000000..b9f8ce2 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.binary-setup @@ -0,0 +1,10 @@ +#!/bin/bash + +DOCKER_CLIENT_BINARY_NAME='docker' +DOCKER_DAEMON_BINARY_NAME='dockerd' +DOCKER_RUNC_BINARY_NAME='docker-runc' +DOCKER_CONTAINERD_BINARY_NAME='docker-containerd' +DOCKER_CONTAINERD_CTR_BINARY_NAME='docker-containerd-ctr' +DOCKER_CONTAINERD_SHIM_BINARY_NAME='docker-containerd-shim' +DOCKER_PROXY_BINARY_NAME='docker-proxy' +DOCKER_INIT_BINARY_NAME='docker-init' diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/compat b/vendor/github.com/docker/docker/hack/make/.build-deb/compat new file mode 100644 index 0000000..ec63514 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/compat @@ -0,0 +1 @@ +9 diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/control b/vendor/github.com/docker/docker/hack/make/.build-deb/control new file mode 100644 index 0000000..0f54399 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/control @@ -0,0 +1,29 @@ +Source: docker-engine +Section: admin +Priority: optional +Maintainer: Docker +Standards-Version: 3.9.6 +Homepage: https://dockerproject.org +Vcs-Browser: https://github.com/docker/docker +Vcs-Git: git://github.com/docker/docker.git + +Package: docker-engine +Architecture: linux-any +Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} +Recommends: aufs-tools, + ca-certificates, + cgroupfs-mount | cgroup-lite, + git, + xz-utils, + ${apparmor:Recommends} +Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs +Description: Docker: the open-source application container engine + Docker is an open source project to build, ship and run any application as a + lightweight container + . + Docker containers are both hardware-agnostic and platform-agnostic. This means + they can run anywhere, from your laptop to the largest EC2 compute instance and + everything in between - and they don't require you to use a particular + language, framework or packaging system. That makes them great building blocks + for deploying and scaling web apps, databases, and backend services without + depending on a particular stack or provider. diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion new file mode 100644 index 0000000..6ea1119 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion @@ -0,0 +1 @@ +contrib/completion/bash/docker diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default new file mode 120000 index 0000000..4278533 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init new file mode 120000 index 0000000..8cb89d3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart new file mode 120000 index 0000000..7e1b64a --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install new file mode 100644 index 0000000..dc6b25f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install @@ -0,0 +1,12 @@ +#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ +#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ +#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ +contrib/*-integration usr/share/docker-engine/contrib/ +contrib/check-config.sh usr/share/docker-engine/contrib/ +contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ +contrib/mk* usr/share/docker-engine/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ +contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages new file mode 100644 index 0000000..1aa6218 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages @@ -0,0 +1 @@ +man/man*/* diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst new file mode 100644 index 0000000..eeef6ca --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi + fi + ;; + abort-*) + # How'd we get here?? + exit 1 + ;; + *) + ;; +esac + +#DEBHELPER# diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev new file mode 120000 index 0000000..914a361 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docs b/vendor/github.com/docker/docker/hack/make/.build-deb/docs new file mode 100644 index 0000000..b43bf86 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docs @@ -0,0 +1 @@ +README.md diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/rules b/vendor/github.com/docker/docker/hack/make/.build-deb/rules new file mode 100755 index 0000000..6522103 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/rules @@ -0,0 +1,55 @@ +#!/usr/bin/make -f + +VERSION = $(shell cat VERSION) +SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1) +SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true ) + +override_dh_gencontrol: + # if we're on Ubuntu, we need to Recommends: apparmor + echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars + dh_gencontrol + +override_dh_auto_build: + ./hack/make.sh dynbinary + # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +override_dh_auto_test: + ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v + ./bundles/$(VERSION)/dynbinary-client/docker -v + +override_dh_strip: + # Go has lots of problems with stripping, so just don't + +override_dh_auto_install: + mkdir -p debian/docker-engine/usr/bin + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd + cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy + cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd + cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim + cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr + cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc + cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init + mkdir -p debian/docker-engine/usr/lib/docker + +override_dh_installinit: + # use "docker" as our service name, not "docker-engine" + dh_installinit --name=docker +ifeq (true, $(SYSTEMD_GT_227)) + $(warning "Setting TasksMax=infinity") + sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service +endif + +override_dh_installudev: + # match our existing priority + dh_installudev --priority=z80 + +override_dh_install: + dh_install + dh_apparmor --profile-name=docker-engine -pdocker-engine + +override_dh_shlibdeps: + dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info + +%: + dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec new file mode 100644 index 0000000..ae597bd --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec @@ -0,0 +1,96 @@ +# Some bits borrowed from the openstack-selinux package +Name: docker-engine-selinux +Version: %{_version} +Release: %{_release}%{?dist} +Summary: SELinux Policies for the open-source application container engine +BuildArch: noarch +Group: Tools/Docker + +License: GPLv2 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +%global selinux_policyver 3.13.1-102 +%global selinuxtype targeted +%global moduletype services +%global modulenames docker + +Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils +BuildRequires: selinux-policy selinux-policy-devel + +# conflicting packages +Conflicts: docker-selinux + +# Usage: _format var format +# Expand 'modulenames' into various formats as needed +# Format must contain '$x' somewhere to do anything useful +%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done; + +# Relabel files +%global relabel_files() \ + /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \ + +%description +SELinux policy modules for use with Docker + +%prep +%if 0%{?centos} <= 6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +make SHARE="%{_datadir}" TARGETS="%{modulenames}" + +%install + +# Install SELinux interfaces +%_format INTERFACES $x.if +install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} +install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} + +# Install policy modules +%_format MODULES $x.pp.bz2 +install -d %{buildroot}%{_datadir}/selinux/packages +install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages + +%post +# +# Install all modules in a single transaction +# +if [ $1 -eq 1 ]; then + %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1 +fi +%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2 +%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES +if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + if [ $1 -eq 1 ]; then + restorecon -R %{_sharedstatedir}/docker + fi +fi + +%postun +if [ $1 -eq 0 ]; then + %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || : + if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + fi +fi + +%files +%doc LICENSE +%defattr(-,root,root,0755) +%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2 +%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if + +%changelog +* Tue Dec 1 2015 Jessica Frazelle 1.9.1-1 +- add licence to rpm +- add selinux-policy and docker-engine-selinux rpm diff --git a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec new file mode 100644 index 0000000..d53e55b --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec @@ -0,0 +1,254 @@ +Name: docker-engine +Version: %{_version} +Release: %{_release}%{?dist} +Summary: The open-source application container engine +Group: Tools/Docker + +License: ASL 2.0 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +# is_systemd conditional +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 +%global is_systemd 1 +%endif + +# required packages for build +# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh) +# only require systemd on those systems +%if 0%{?is_systemd} +%if 0%{?suse_version} >= 1210 +BuildRequires: systemd-rpm-macros +%{?systemd_requires} +%else +%if 0%{?fedora} >= 25 +# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301) +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +%else +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +BuildRequires: pkgconfig(libsystemd-journal) +%endif +%endif +%else +Requires(post): chkconfig +Requires(preun): chkconfig +# This is for /sbin/service +Requires(preun): initscripts +%endif + +# required packages on install +Requires: /bin/sh +Requires: iptables +%if !0%{?suse_version} +Requires: libcgroup +%else +Requires: libcgroup1 +%endif +Requires: tar +Requires: xz +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +# Resolves: rhbz#1165615 +Requires: device-mapper-libs >= 1.02.90-1 +%endif +%if 0%{?oraclelinux} >= 6 +# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper +Requires: kernel-uek >= 4.1 +Requires: device-mapper >= 1.02.90-2 +%endif + +# docker-selinux conditional +%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global with_selinux 1 +%endif + +# DWZ problem with multiple golang binary, see bug +# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 +%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global _dwz_low_mem_die_limit 0 +%endif + +# start if with_selinux +%if 0%{?with_selinux} +# Version of SELinux we were using +%if 0%{?fedora} == 20 +%global selinux_policyver 3.12.1-197 +%endif # fedora 20 +%if 0%{?fedora} == 21 +%global selinux_policyver 3.13.1-105 +%endif # fedora 21 +%if 0%{?fedora} >= 22 +%global selinux_policyver 3.13.1-128 +%endif # fedora 22 +%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global selinux_policyver 3.13.1-23 +%endif # centos,oraclelinux 7 +%endif # with_selinux + +# RE: rhbz#1195804 - ensure min NVR for selinux-policy +%if 0%{?with_selinux} +Requires: selinux-policy >= %{selinux_policyver} +Requires(pre): %{name}-selinux >= %{version}-%{release} +%endif # with_selinux + +# conflicting packages +Conflicts: docker +Conflicts: docker-io +Conflicts: docker-engine-cs + +%description +Docker is an open source project to build, ship and run any application as a +lightweight container. + +Docker containers are both hardware-agnostic and platform-agnostic. This means +they can run anywhere, from your laptop to the largest EC2 compute instance and +everything in between - and they don't require you to use a particular +language, framework or packaging system. That makes them great building blocks +for deploying and scaling web apps, databases, and backend services without +depending on a particular stack or provider. + +%prep +%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +export DOCKER_GITCOMMIT=%{_gitcommit} +./hack/make.sh dynbinary +# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +%check +./bundles/%{_origversion}/dynbinary-client/docker -v +./bundles/%{_origversion}/dynbinary-daemon/dockerd -v + +%install +# install binary +install -d $RPM_BUILD_ROOT/%{_bindir} +install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker +install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd + +# install proxy +install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy + +# install containerd +install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd +install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim +install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr + +# install runc +install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc + +# install tini +install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init + +# install udev rules +install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d +install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules + +# add init scripts +install -d $RPM_BUILD_ROOT/etc/sysconfig +install -d $RPM_BUILD_ROOT/%{_initddir} + + +%if 0%{?is_systemd} +install -d $RPM_BUILD_ROOT/%{_unitdir} +install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service +%else +install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker +install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker +%endif +# add bash, zsh, and fish completions +install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions +install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions +install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d +install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker +install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker +install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish + +# install manpages +install -d %{buildroot}%{_mandir}/man1 +install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 +install -d %{buildroot}%{_mandir}/man5 +install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 +install -d %{buildroot}%{_mandir}/man8 +install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8 + +# add vimfiles +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax +install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt +install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim + +# add nano +install -d $RPM_BUILD_ROOT/usr/share/nano +install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc + +# list files owned by the package here +%files +%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md +/%{_bindir}/docker +/%{_bindir}/dockerd +/%{_bindir}/docker-containerd +/%{_bindir}/docker-containerd-shim +/%{_bindir}/docker-containerd-ctr +/%{_bindir}/docker-proxy +/%{_bindir}/docker-runc +/%{_bindir}/docker-init +/%{_sysconfdir}/udev/rules.d/80-docker.rules +%if 0%{?is_systemd} +/%{_unitdir}/docker.service +%else +%config(noreplace,missingok) /etc/sysconfig/docker +/%{_initddir}/docker +%endif +/usr/share/bash-completion/completions/docker +/usr/share/zsh/vendor-completions/_docker +/usr/share/fish/vendor_completions.d/docker.fish +%doc +/%{_mandir}/man1/* +/%{_mandir}/man5/* +/%{_mandir}/man8/* +/usr/share/vim/vimfiles/doc/dockerfile.txt +/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +/usr/share/vim/vimfiles/syntax/dockerfile.vim +/usr/share/nano/Dockerfile.nanorc + +%post +%if 0%{?is_systemd} +%systemd_post docker +%else +# This adds the proper /etc/rc*.d links for the script +/sbin/chkconfig --add docker +%endif +if ! getent group docker > /dev/null; then + groupadd --system docker +fi + +%preun +%if 0%{?is_systemd} +%systemd_preun docker +%else +if [ $1 -eq 0 ] ; then + /sbin/service docker stop >/dev/null 2>&1 + /sbin/chkconfig --del docker +fi +%endif + +%postun +%if 0%{?is_systemd} +%systemd_postun_with_restart docker +%else +if [ "$1" -ge "1" ] ; then + /sbin/service docker condrestart >/dev/null 2>&1 || : +fi +%endif + +%changelog diff --git a/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch new file mode 100644 index 0000000..7395539 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch @@ -0,0 +1,69 @@ +#!/bin/bash +set -e + +docker-version-osarch() { + local target="$1" # "Client" or "Server" + local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}" + if docker version -f "$fmtStr" 2>/dev/null; then + # if "docker version -f" works, let's just use that! + return + fi + docker version | awk ' + $1 ~ /^(Client|Server):$/ { section = 0 } + $1 == "'"$target"':" { section = 1; next } + section && $1 == "OS/Arch:" { print $2 } + + # old versions of Docker + $1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 } + ' +} + +# Retrieve OS/ARCH of docker daemon, e.g. linux/amd64 +export DOCKER_ENGINE_OSARCH="$(docker-version-osarch 'Server')" +export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" +export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" +DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64} + +# and the client, just in case +export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')" +export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" +export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" +DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} + +# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ +PACKAGE_ARCH='amd64' +case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in + arm) + PACKAGE_ARCH='armhf' + ;; + arm64) + PACKAGE_ARCH='aarch64' + ;; + amd64|ppc64le|s390x) + PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" + ;; + *) + echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" + ;; +esac +export PACKAGE_ARCH + +DOCKERFILE='Dockerfile' +TEST_IMAGE_NAMESPACE= +case "$PACKAGE_ARCH" in + amd64) + case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in + windows) + DOCKERFILE='Dockerfile.windows' + ;; + solaris) + DOCKERFILE='Dockerfile.solaris' + ;; + esac + ;; + *) + DOCKERFILE="Dockerfile.$PACKAGE_ARCH" + TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH" + ;; +esac +export DOCKERFILE TEST_IMAGE_NAMESPACE diff --git a/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs b/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs new file mode 100644 index 0000000..e71a30a --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +if ! docker inspect emptyfs &> /dev/null; then + # let's build a "docker save" tarball for "emptyfs" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + dir="$DEST/emptyfs" + mkdir -p "$dir" + ( + cd "$dir" + echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cC "$dir" . | docker load ) + rm -rf "$dir" +fi diff --git a/vendor/github.com/docker/docker/hack/make/.go-autogen b/vendor/github.com/docker/docker/hack/make/.go-autogen new file mode 100644 index 0000000..4d26052 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.go-autogen @@ -0,0 +1,86 @@ +#!/bin/bash + +rm -rf autogen + +source hack/dockerfile/binaries-commits + +cat > dockerversion/version_autogen.go < dockerversion/version_autogen_unix.go < + +param( + [Parameter(Mandatory=$true)][string]$CommitString, + [Parameter(Mandatory=$true)][string]$DockerVersion +) + +$ErrorActionPreference = "Stop" + +# Utility function to get the build date/time in UTC +Function Get-BuildDateTime() { + return $(Get-Date).ToUniversalTime() +} + +try { + $buildDateTime=Get-BuildDateTime + + if (Test-Path ".\autogen") { + Remove-Item ".\autogen" -Recurse -Force | Out-Null + } + + $fileContents = ' +// +build autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "'+$CommitString+'" + Version string = "'+$DockerVersion+'" + BuildTime string = "'+$buildDateTime+'" +) + +// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1 +' + + # Write the file without BOM + $outputFile="$(pwd)\dockerversion\version_autogen.go" + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $fileContents, (New-Object System.Text.UTF8Encoding($False))) + + New-Item -ItemType Directory -Path "autogen\winresources\tmp" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\docker" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\dockerd" | Out-Null + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\docker" + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\dockerd" + + # Generate a version in the form major,minor,patch,build + $versionQuad=$DockerVersion -replace "[^0-9.]*" -replace "\.", "," + + # Compile the messages + windmc hack\make\.resources-windows\event_messages.mc -h autogen\winresources\tmp -r autogen\winresources\tmp + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile event message resources" } + + # If you really want to understand this madness below, search the Internet for powershell variables after verbatim arguments... Needed to get double-quotes passed through to the compiler options. + # Generate the .syso files containing all the resources and manifest needed to compile the final docker binaries. Both 32 and 64-bit clients. + $env:_ag_dockerVersion=$DockerVersion + $env:_ag_gitCommit=$CommitString + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 64-bit resources" } + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_386.syso -F pe-i386 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 32-bit resources" } + + windres -i hack/make/.resources-windows/dockerd.rc -o autogen/winresources/dockerd/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile daemon resources" } +} +Catch [Exception] { + # Throw the error onto the caller to display errors. We don't expect this script to be called directly + Throw ".go-autogen.ps1 failed with error $_" +} +Finally { + Remove-Item .\autogen\winresources\tmp -Recurse -Force -ErrorAction SilentlyContinue | Out-Null + $env:_ag_dockerVersion="" + $env:_ag_gitCommit="" +} diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup b/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup new file mode 100644 index 0000000..0efde71 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +bundle .detect-daemon-osarch +if [ $DOCKER_ENGINE_GOOS != "windows" ]; then + bundle .ensure-emptyfs +fi diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-start b/vendor/github.com/docker/docker/hack/make/.integration-daemon-start new file mode 100644 index 0000000..b96979b --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-start @@ -0,0 +1,116 @@ +#!/bin/bash + +# see test-integration-cli for example usage of this script + +base="$ABS_DEST/.." +export PATH="$base/binary-client:$base/binary-daemon:$base/dynbinary-client:$base/dynbinary-daemon:$PATH" + +if ! command -v docker &> /dev/null; then + echo >&2 'error: binary-client or dynbinary-client must be run before .integration-daemon-start' + false +fi + +# This is a temporary hack for split-binary mode. It can be removed once +# https://github.com/docker/docker/pull/22134 is merged into docker master +if [ "$(go env GOOS)" = 'windows' ]; then + return +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + if docker version &> /dev/null; then + echo >&2 'skipping daemon start, since daemon appears to be already started' + return + fi +fi + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before .integration-daemon-start' + false +fi + +# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers +exec 41>&1 42>&2 + +export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +if [ "$DOCKER_EXPERIMENTAL" ]; then + echo >&2 '# DOCKER_EXPERIMENTAL is set: starting daemon with experimental features enabled! ' + extra_params="$extra_params --experimental" +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + # Start apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + # reset container variable so apparmor profile is applied to process + # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 + export container="" + ( + set -x + /etc/init.d/apparmor start + ) + fi + + export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one + ( set -x; exec \ + dockerd --debug \ + --host "$DOCKER_HOST" \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --pidfile "$DEST/docker.pid" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params \ + &> "$DEST/docker.log" + ) & + # make sure that if the script exits unexpectedly, we stop this daemon we just started + trap 'bundle .integration-daemon-stop' EXIT +else + export DOCKER_HOST="$DOCKER_TEST_HOST" +fi + +# give it a little time to come up so it's "ready" +tries=60 +echo "INFO: Waiting for daemon to start..." +while ! docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + printf "\n" + if [ -z "$DOCKER_HOST" ]; then + echo >&2 "error: daemon failed to start" + echo >&2 " check $DEST/docker.log for details" + else + echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" + docker version >&2 || true + # Additional Windows CI debugging as this is a common error as of + # January 2016 + if [ "$(go env GOOS)" = 'windows' ]; then + echo >&2 "Container log below:" + echo >&2 "---" + # Important - use the docker on the CI host, not the one built locally + # which is currently in our path. + ! /c/bin/docker -H=$MAIN_DOCKER_HOST logs docker-$COMMITHASH + echo >&2 "---" + fi + fi + false + fi + printf "." + sleep 2 +done +printf "\n" diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop b/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop new file mode 100644 index 0000000..03c1b14 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop @@ -0,0 +1,27 @@ +#!/bin/bash + +if [ ! "$(go env GOOS)" = 'windows' ]; then + trap - EXIT # reset EXIT trap applied in .integration-daemon-start + + for pidFile in $(find "$DEST" -name docker.pid); do + pid=$(set -x; cat "$pidFile") + ( set -x; kill "$pid" ) + if ! wait "$pid"; then + echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" + fi + done + + if [ -z "$DOCKER_TEST_HOST" ]; then + # Stop apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + ( + set -x + /etc/init.d/apparmor stop + ) + fi + fi +else + # Note this script is not actionable on Windows to Linux CI. Instead the + # DIND daemon under test is torn down by the Jenkins tear-down script + echo "INFO: Not stopping daemon on Windows CI" +fi diff --git a/vendor/github.com/docker/docker/hack/make/.integration-test-helpers b/vendor/github.com/docker/docker/hack/make/.integration-test-helpers new file mode 100644 index 0000000..7b73b2f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-test-helpers @@ -0,0 +1,79 @@ +#!/bin/bash + +: ${TEST_REPEAT:=0} + +bundle_test_integration_cli() { + TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m" + go_test_dir integration-cli $DOCKER_INTEGRATION_TESTS_VERIFIED +} + +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want +# to run certain tests on your local host, you should run with command: +# +# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli +# +go_test_dir() { + dir=$1 + precompiled=$2 + testbinary="$DEST/test.main" + testcover=() + testcoverprofile=() + ( + mkdir -p "$DEST/coverprofiles" + export DEST="$ABS_DEST" # in a subshell this is safe -- our integration-cli tests need DEST, and "cd" screws it up + if [ -z $precompiled ]; then + ensure_test_dir $1 $testbinary + fi + cd "$dir" + i=0 + while ((++i)); do + test_env "$testbinary" $TESTFLAGS + if [ $i -gt "$TEST_REPEAT" ]; then + break + fi + echo "Repeating test ($i)" + done + ) +} + +ensure_test_dir() { + ( + # make sure a test dir will compile + dir="$1" + out="$2" + echo Building test dir: "$dir" + set -xe + cd "$dir" + go test -c -o "$out" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" + ) +} + +test_env() { + ( + set -xe + # use "env -i" to tightly control the environment variables that bleed into the tests + env -i \ + DEST="$DEST" \ + DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ + DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ + DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \ + DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ + DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ + DOCKER_HOST="$DOCKER_HOST" \ + DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \ + DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \ + DOCKERFILE="$DOCKERFILE" \ + GOPATH="$GOPATH" \ + GOTRACEBACK=all \ + HOME="$ABS_DEST/fake-HOME" \ + PATH="$PATH" \ + TEMP="$TEMP" \ + TEST_IMAGE_NAMESPACE="$TEST_IMAGE_NAMESPACE" \ + "$@" + ) +} diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc b/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc new file mode 100644 index 0000000..000fb35 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc @@ -0,0 +1,38 @@ +// Application icon +1 ICON "docker.ico" + +// Windows executable manifest +1 24 /* RT_MANIFEST */ "docker.exe.manifest" + +// Version information +1 VERSIONINFO + +#ifdef DOCKER_VERSION_QUAD +FILEVERSION DOCKER_VERSION_QUAD +PRODUCTVERSION DOCKER_VERSION_QUAD +#endif + +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004B0" + BEGIN + VALUE "ProductName", DOCKER_NAME + +#ifdef DOCKER_VERSION + VALUE "FileVersion", DOCKER_VERSION + VALUE "ProductVersion", DOCKER_VERSION +#endif + +#ifdef DOCKER_COMMIT + VALUE "OriginalFileName", DOCKER_COMMIT +#endif + + END + END + + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0000, 0x04B0 + END +END diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest new file mode 100644 index 0000000..674bc94 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest @@ -0,0 +1,18 @@ + + + Docker + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.ico b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.ico new file mode 100644 index 0000000000000000000000000000000000000000..c6506ec8dbd8e295d98a084412e9d2c358a6ba39 GIT binary patch literal 370070 zcmeEP2Y405+TL?c0%F4sD!rrFr6_v!{$4AJ1?*S7-fQo@E1)91i}a!b(rX$m0Ro|S zl8_#Huc9K5%>TY~c9PA>Nlzfm^YzX_0Ys1`^UF*YMa1~pn zQjstq44g3rHUvNJ**NcYPxRuB0h?D14oKMWTR>{++JN8Fl)E5}8uGT~8vB{$p7Fiq z3GHSD%>rR6Jw0Ie?|buOFq?qadi(lEl=jLTyr^ZHo z7akkcdFke*UDm`d?HV1stb2UylFl*Mw>EBh&(-lO`z&8Q{gHVQeLchH_qMvv>*e}n zb`R@6u;-^>-%l(WU_HF#C+qHIL#^iPMp?BEgn-W;06W+I1Ne$J)^pInx>VeC~g3qScfHtL7TiJK;c zro@FsC2sibAoeS4E1a*Ay8wPnjH6}NVbS~}?= zHMiddDy*BO!n(Tvrj~*aP;+{E0bihiKd`-Zc6VzT(lGdjX$$&WgBJa0y}Pi#)e_s) zR*keSKCsZL)V-|m<$Jfyd z-o1A7ioUNV$IkjTe$BAx#ElbAr6kOUZO^qaPo*S8*tWem9d>>)HF4pIl*EV=DGA}g zd>)^cf%v&;h$Yguq%KQO*f=45`IJY~X7>zC3+`;Cg?6>lVDqPJoJ4xU#!1WtoC4B; z4BvpVYxOjiWzj9ySk~ zj(}}PoWwS5JlzJ$;2-ocJkEi*xoS)D61{KjocGk+{+Fnb&USy_!wp-`?DzfIHuk04 zFn)qQ!uShQrsyM#pD^(Q(w*=PD-kP9!Cc~th!YyYCtT(f|D>-i*VK-P5qe^taiEMB zoZLvh{4lV2c^`MurtuklJ?{+T@Ou+CP3WDpX;QL|-4nuL&#+y@>4@QJJ&(`1Pa2$OBeT0r57&rJH2VbBt z&#)aa!U*_-k1#*b+*iUO-7F8EIr#3EZDF7+G7!IZxM%mlR9EbhuDY*}jqF%Ee(i|& zF=mfTiJN^qIev~xO~lw-_wC`Zb@uDFAIQnJ^V*MbLTVD{03y|zu(#CgUgxNgA7Jma z`yP4r_xZK=ypA*ez{Cpl4SFmQYR@qsKKKLUhB(9rLos&v=c?h}@q&?aw-+oIVupaF zc=l-aNh%DK1qL>+=z}@@p`PtqmU-=dafj;@H;x~H`T0Z1@p}B8o{|`W{+?rY_VJFG zy(oFu{+8q@l^8ukEgAa{ZTF$wX!nfui@nb;Or7)t977Q1{B({P;0tyDzu?)yODjiM z=NdV~U%0p)0dtKqe`rXNv{W(>z2IXv*5T;*b?R%Dd-Jj$EfUs^o|YUNl*V`*^K`U% z_VLAM^TuD$_EVRu&8vs0@Ike`Iz?gm|(@QtaFN`QW>OC6^eoQHN)Mh3E?_c-8BELrU~mtO-zmr{uBK? z;`;@(dyMIev9C9F&lE8QZGY3^E^1Ei^HgXT+L7mU+jCK`XJP|0caY9G1NeeC_=3Jz zU({47uiaz3;6kk6VN6h=luT~6)G-jf@MAZg*Xg;v^+D|}k6$yiJK}uUI_y4i0sH!* zkLjJZ?whCW`t*Vqksn<1UWzdAZ-(S?-u6ou0xNe=DEUd*l2PwtLg>rz}_N=6wvi zcYB|~m7cMFPPSdp{bml4_h({)&F}+XE*s{3o;c=RtWCxmBg6?351c0bugI6#IPW#r z#<{O*+h055!J6@_hfT!#J=#6S^&H>x*?ck0>E%=p$NRAT_2FN5pTBp{*Yo^>+Fofp zUdIGx?hs>u!+1u}W9bm95%rPpAn=(1)*p! z>{uVRUqa)1UmsxGKF9lE-79(5@SeVBd!@lA=$L@<0DS@04;{fcU@-hZUA4nIM}T=m zJ}(eIP=dBl(W;JH(M|XHk^QYISku!xDK-et@vuHGF`Vml3cgOKFz0Z*k7s%DjmRM$?IRZa=vLwV-D+MR_y#<8=USW$e_%g@D?`3O`vJ6H`vHsr=HY$A>!=Ci z5DUNuNGwozv#1bf;$3?eY+c(v*73cIHGC>1X%YH+zF%8-+c$lDYGOESev#U>ZJEQ? zH>x8CqJd2sw0kc$==}$FuTU$-HAH`J!QKNiY}nXxp;L#SU>%F$6CwwDblfl>K4ETe zuRoyuXZQr<%YZ+~cy@^O1Ha?>;1BD6vCg6bW1H_BN-R(zthwMfnH)D;_YrZchjiex zJ*?%y8s2%F<43=*L0{ktQ)Z``eu8$-^R^~0LjS%@?ccpd+jgd{Z?bLO*nDQnzW?aq z6t#Kbt7_3t7W(x{#b^7DSRjl4@Z%FWj$n+yu|pY)5BS_rzjJ_RhU@W7!@H;voD;xz zjwBW+xOtT8Gg6b6>V5&=*y)rUhj;Xn7N5dAKfblW@9Ow|TPN#xc37Umclx|*{nKIh z@DUMd7{8~%=1(2kzu^@6_*3ZDZ7{y~#`u~2eWvXn-nU*IJ(Qq!Cr?*VBhFKchq_?< z0VQw$e*A(NPtZ?r-huvra|v9hYhr^^921zi0k$_Cw*N!iB=0&xt|i8^Lx}~-wN>Zu z<`UNas`Vf#W@cx`{P-3}T4MARmALUYm9SyVse}zi_;}2sz?;X?A<`L%~+?;;oV9ZJ6PaNL2@g&OA zv-$zV{)gh##?ZE>ukZI``&nWGlp9~bbjeVR6)?Xzzn|Od3+(3&rPLSjU86L{FPIlt ziM2w_s2Ruy;+tmT1M+7k<@%_^b-%b$;^#C-ik(?!^NN0TVxqpUyK&w-bvDd?vG#`9 z|Es-j)?>BS&TLz2-KfG&${phb&ej2 zYqEbwd@Y`-m(9|&F~A+ z^bPa_CyyRZJh?MryjncWea8BIU)%S$ZT!Jv_yRt22=9lu0N)QRrE!6oALO&b!+1vU zsyc4DKD^KSjG$aAx7=Kg4B+{YCmi3s59@3NOvRe=91kpFK|pVJrU zxBz1W_P1Pf!1=>c6c_0ELADKZgCot{ApC*M2^3Ms%i8If6UuR|s5*R$XHR(bb7Ci) zbLpmWRW9E&?ySq#{ZgsU+Mg@cBQ}hwTyxE+v#wY>x@xnHV=inIJE8jJ2f}X2_C4IZ zI2nAa6%aMN=_T6}C!L=d`9{0#vA;~&kuZMt^3fOkv3LmP{D%dkmo{6^t37&r!1)3^ zQ{ego&K;Jrn1Hc>nHyZOe3*4L<%IEoe)f}>%%r^RGk~=r+V(O2dmhh*r$Y7<5Eo8` zUQ9>dFdOqli-FbH7YiiV*ou_jv)hic6g)pqTsl1P;L_m%yMaVJqhE?~@hrr{(-#j3 zn6_kSz|>{KD*d)%MCHM&Mpk}r#fVDv7Y#YL>Z)IBT$&Qr<~*g+{l*)S&sd)2L#nh~ zHazgzs9^!Iw!QiB1Dq2)GxLIZ`2bUHd;!M?^D#bPzgvo)Cu$!6dG5t?!TYGyd_Ex0 z2+O{G=XM9?8xrXM?}Y62yb$}RoZ!?~lMf%@!n}Ouni`|;xu!!sr*O{SQNsfNTt2)~ z!m{B3-QXu)TsgAJvm1Xo{|dHw{mE$Xdet>g>{QgiH#)7X<20=f-|N4BxocrZh_8Gwc zwbS;s{%7k#JOhfRJ;Uz*2G|!kVb+nDHRj$qWPJO6;UIS!^qJ#wU!20elYYSc{>Sk{ zJ?VOF-!M-y?0n&hk(K)|9bT!?+EM3JC)-g&1Dj{qzis0S2U=<|p2aO0Q3XCI0Q2zJ z9*XVaTpv_|{zAtD@LOD;%ymZeyS5y1^uau)1i`fex*x!tz_;Wb-zj(JHa_Gr@+*4` z1b6k$^FYqMZGF-)U|zaK{hZU&rPs*$uBU}<7qstXUrx|hoB-$_OdaX8cf|gwONV

-m7;XQ|EM&#Ha#!_>j8Q`Nqt(JCSGRkd`~Meqs5yhbpyO{Q(dnwqfz z=L_fdIpcmn&k1mRg7M(6#7W*YgYsR0vS;HtwHV)-_r}yeS*jGCU4^y+I4OdZ-Gu6QZN$Sv! zFtuuY<1*(1cz=e|g+F=py9kE!{K%%5kI7gs1RpRhak6zDb&3BAIVYWx{bhpz^S`Xs zZp(wXf01qbjPY~#0lMEWaohLjL+B3>8+hmcBQe(B5i?lr*%PNaeeqxQ;J>a_58i!) zdf={W)rW6Cu96d1sr_4KV7=fu-g$viJ};>KnooRT`~c?)`A*Uqe0Hc~Ld*ruTRYn8 z15DphHnd$@>B|7(I{kbX&v~C_j0d~VSKQBIIcHT2F~2_>&wC%n^jHU=)=X`o{`wWN2gQ;$E~TBrB@-?i$?kDpgZ@V-FI-1ce_<^xX4`vN}p>(8bOvoFIB zV6CzC1N70Se+_}|8{h*LZW?13eqHpJ&`pJ&VBm@n_WbX$P?XMd<&xto&TX$1YaSwEu^qz0>|5d$1MqE!3+o zKBOLbpt)MUG+6D99e{ZX+JDA;Vwuky8Sr@l=U`8>574>-9}tpYzdM9ygEAjbM%;i| z%dzoS{k(6Ztvk8){XAwq#-F9ahV1@7cHWcf@WDj&+J7HVZSQNYo_O>&_3-`8)!+eN zt35kbsrg|e)%I;0Rm$>@^*cjlrt+^!<#j#D4M`;>bA zsk_wA!@pO@4#(l0p%j%E^`Up{kK<2EyGqLE<+on=frW!S`klkm6bqbUAA+?bUsCr& z?6o9i!44{78xi~KIsd4itb6&q-?o1~=OMbE&d@hb?e@PxPF2>gc{D>d#}T zYR@iwS785Ub#V7G6}RwZy%u;0*8w`tDerkBs|c*eK22kSQ_v-aHRR9GrjW`zL#5QY zOz-HmfAsx*ZJW>anIH9s-?%wr&-(JT+W;NgFCBG(T0ip+tOI;nZ3?+pE&sKecioSE zPoQ+a2T)S}$czc*nK3~QK0YVkUx59(AHn+qe_&nEom743m%KY%%6tIE`+9x`?{-$l zdY}Dve3)zB&+k{5{-mVzw5at)aXtWRf%uG%&jq+Pfb#>~U)tXVEb6`I=X}bK&kNZ{ zm*AK{`vB-wD*i8d9jd;0ucVZjL4|D&p7ZN@Kg`oUWY7EMX#4d2y6?xFQhs#o%pZe3 zp7w4~2G{zPB!~0?oWJF_$@DtUl9?aWu_XHbxjSZBfiyJptf|5pT`9V8_+}lGVXkJH z9oyyj%%A;4S?K#q(q_tI-6j^8U2?I&N%kAiv0hYtj0t2MPzrp&GW)x(DO0U;Ap02G z{>}Vfwz)sba8aoq_m`Epr1Z5jGZyf^Pf&nyel|Vvb8b5Njx>%nA_rRSsQOqZB=do1 z*azU7K3WENf4ePwGRJ|8{r$%O?EB}I;yM5F*e{=ndor=W9QgWTewTngfZr6|xpKI7 zE&y{+vOe$(`T)p7+dswt?}Sm_-K`UTe7E-RlwGNN-k;}lfYR5`o(Y>S=rwgLfH@vM ztxmJyk^?_%?1(I-fP5zT;7YTsP7O4Ef?x&%j}i zzx~{QkX0M=GTJ9|e$Ou;uZ06$YUv2B_Z3t$4k-5}_#Z3y{|{=m{S4Tzp5&xI+4pQ+Hr z-01+*YzRhq_`pE5U~tJ|A?nWyCEbRi)Spw^i&AGvpPN&go%`7a?{+0Mr*B|3{qUzV z`Pg?dxSLyr^sJ=E0k1rD?fLE7mYlv9(2i((v_aY;^OpcMfJ=cY#T5gfu6n==z#t$J zI0*2273sj=!0*6e;8-z%^&Pcw?g&bYpVE6u5&b{jc3+fR<4ZHKy$YQ7!%VPsR_^x>A&bzl89dU^z9 z1Z}_x?lXGKV|~vr?6#@*IOq$D4ZVEliuUoTJ;HSvT9ucHe_*-;Xx1J-o*!(0;b_X-B)cuRY4LoP+kZ zz2D2DX)o_T%QXwZGtYN&&AQ1qxnX0R$1zD)_Vr>f8d(ozz0Ap;L_si^PTmeUe0-V z2(zF2m`8dc@H`L#Fcu&`rvUmT`pn#L(mu|Y)4#Ak1z6SopRRPf2V=! z@Mfd*FJEh-K7RRX^~Q78sTUq^t{#2R>mR5;1a$!CLSI^GnK7RmcpS^A>pY)!|Ma7+ z)cgN!t~$SWqZ;_tb!u#vW-6%P)hc30Q?-0lW3_Iq7n{a6QZW;}SU;|@TK#Kde7n1e zS}?S!3hIBgn$Z0kHKNnCs?Vp_s}677q~3k8xqANbmik(}4)GAK`$!wF-^g#9o%ch% zr2dl!-UI8T-MsX4OZD{|H>rM~U$1`Ye65<%=W4ZZSW~s~m&R)Cn8qp^ZI4FVc`w`- zIUH@qJ&x^ijT-pn^{Ug`H>%hF-9pz(KgM=G%68&f+Q;R=tG_n#Jl0{_X*hWHu~zD> z7n-ZD(B7UO-=KzexK{nv^%^y^@6{@Na8nfpPFJCgY%AML&e?t=hZUn6>w60saFrV0 zt(hA9&2_5l2REpXU%FX6*S=MTT-)A{{(}C*mbtTyzIiXN&<(WtVe~Z*-G5s;V04Lk zeE^6776PHbRA3x1(awh|Usu3_(MjdU(x+R3(>do?Njg1Hy&V}1t^1bhPA1vCPxBMvMxv68SDsZpyI&y z68N@Z9j{jJ{^wfw%9e;N@ZB}|0e}5D_wn8sliYvL?OI1(c>Lyc^g*io2lckYcRY>z zydJ0nTmn?~XJ6P)TjX3>HK0Cl8$i3A03-n?0Hcqz3&LzWQ?q|9${zwUu7`XUd*rsi zwv^O9$MyAC!`}W7DeukrG;=Ssb>kB_ZWsYP3S7wcA87T8q}}lYEt$+>^FOTa)Vlg z<7q3@A!FkQ=G52wc8Ao5UfRGhJT5Qn!7&;*PoIx%+CS^0k8ob+5cbjEu~6cg2T>pG z$Z*1Tjzhov9^x1iXZUL$?T0>7l^f{)z zhBt2K#W%}M`zb4)%YCfR_y%5QPsrseVsKT}=aX9M-|buIab&xPyzw8~OSJX&_3EXk zZd5%#s;j2=x=5}0B`_W9&s8e0eTMZI^%ZGGJZ$tA@}kYtCbQ@=%bh=zbH46wpbxfU$etgSn4g$bSdu*u}I9 z+q|bN7~l0$U03vYS55T!sqFWGG^X$aO8W!&gw5kEl{mQyNM*=G`GI)x7e1{=?zubk5{WNUuT=W zHe_tf=xCdJZ&fco*;4nPTM<*R-=?0M`b}EYwwroPN}s}5W@m5%6^!xCo6o_9&=$xp z!%rFCM8EsN|C*~Qy_zY;BD8Pz&1@&zW$JWtvA2ZyHrLu0#&*vZTB`Wxt5THFK|QrD^tzk2ll68Zk!1|G~cYkLO= z+8&wu!00vYL+6d&GsV7(0LBIJfbj!Po9SjB$8%Xnsj++Keb{Z}z3Z4ADdz`yKlEu# zhXb`7$MJr0^kcAd9aA7}2<)NV1KR%WJhmwh!~oOIfv<1q)hp8}sK|FfSt4EtIz z^eXkvf12w#y}@5!r~6{so9;KzUli3=igG^nlWk^7J7Hf%pTM}}qnDcNc|?x8$s_yJ z9vCAh!KP^Uj48~yMQKBR&Nc01zpDM~tOkgyuho8v;}*tKoGWl9>;s4 zZELyU9`kdpk~(H@XhXyY_0+Z*Ra7cs$6vm$uI>ImPuvWIko`Ze#1DycQ>uXsf-2`Ox`}wK+Odo^xniznw zJ?~>LpkpSa#)swkY{GDh^jhEuK>pM1l=%$%-!lDt*ncDFmA7xn&>h;94fb0c&ra*z zOzYnP#I`KUIJTJkp2F6vZOm>nZD&tt12y1_>-Bs%eE{bTj?9CffPc_&?ipyev1R%J z`XJ7WzVJj#{j7uYhN&|eX3Tk*{=Kk#l+1Yz;AJ7(`6T4liB8Q&C*dDg7`*^3# z+q#ye|4D}aT})d;TBQEi*Cw#t65zZdZAX8b5NVLKF+XRKc`x>bBWh5aSl%(%Pjhc{pjznPwo zEG;%gKft*H&ebg)*;IY~=1poFVgknayMv35hmw&~1Ni=in3KIo$N8L3Vt;SzS#GI` z0Y0TJ7L>pxa!^+L#kh z95-&AacO>S->HL)0rcE8(jvtGPMd(8Xn%sVF7Pkgeu}Mr=e0^QkG7k6fIMS;f9(LL zrvJ~?{^=KX%&L}e`_D-a%EJEQo*ygDdoImw%j-Vr1E@osPoh5M17;rTW9qKaReyS1 zn0>Gv^E|Co&-dkaZ@34}aTMBb8BUSr8vj#PJ7!g{5cZ#!9!q)QVqH8RlYF0p^{eZ}Pa_hU|Q zcW@0n^Y`Qx2N+#k2i?@)3_hLqQQoiRu>V@fZ^?)Kr=t%S2nW=GQ#MW_CAggi4m^XfV?y2qTkJeT^P4< z-r4*7pU(gV6`6y*S>$hQp5wy?^a1n-Oif?m*C#Lz*b_n@5SUvWV4ejA+xG8*ZWil2 zKz?kcJRbu--N5z0KY&|-*1p&_JI5p>y$JARF@Z*JfxnS8Q3t{J0?R~7w8Uj27V|czQ=jJ0R3IqVp7Vt9LNBb^FJ8?Yc4d^GtClrLZ zd>#v3(lHs*e*y|fw^QZ|ij(#^TGpwdRja@jHUbJrvs2~^f>ZYKVF|O&(Yk|cyl-FU zR3X~vyvKCd|81lS_Fo6r`MZE^^SzIg_W3;x{b=9MMQm%Def}1tm$P1^B_2m_n|X-~ z&yEB4hc(C&!|a%K>Ft<17_ld`?s3}>&G5>mvx(Iup}1GJ4NfyaR(z)ipx0PU;_Fdc{hE(g{D z!+}cyp7%8Hke&Yzn-xE^N+sAsG_VRd57-Cv0BQq=fR})~0NT;@z^A|gpgJ%f;Ca=6 zL|_5yFP0nrp#jDaBQbx7v4I{7 z&(IuqD z{uu|{0UQEo=dS>YydWJ9gxL8N$oI4J)Ki|<0$~0{?vI;!7VUpCu$0?KcLN;&4{#88 z6le<2)@bLi0sHJ@M<5?%=VOpx%shXn|2_LU)scP#q}uz3AivtqCn4VvI1ixh(5LZu zw(nx#ZRUOKpY729;}$9NZGh_l+J8;pzrbGBk8~Wc#?CKAzCXb64v+26{ruq?V}Rp! z{x{@9?Yu94|3cZ-0R1l0wt(S%6!QCkOYCz`tNrtu=C=cL8nAQBBUICNw`102H(~rg z+nyi59AU$>9YK6wz_0!D88zQmBbax>>^F0U&N__mh{rjDfLl#_kk@kvW?RyNF#w(N z`t(fuzZ?7h51<~_1l|PFSu0ZZL$mGtD&)1EBc)B<1Keuowf)CuxBtDc4cb5b!eO92 za4kSvyBv5OIKcXmjs}+5dHRq@=J~@ljQz8Hp90%}K!EXxvHxV`85>*(u>XGwxB_5X z>DS(2-jDrr{7(PGHa!U30I*N52fP9p|3v#=&-Nf)4h#f1-r=#@FWPOT{r_#}=|6Mg z?|zhB2eALY7T~;rvH#J?mkRsmeekDaf;mk*C)PZ!{Q%w%cpc9W_tOu+KcsJ;$@d1c z`2eR)=a#3v6J{+7eS#TB@I2Yq`*8yGz{Mr99?Eev{4CH42?EiDJ|NBsO4M6*6`WRsB zztr^qzRI;L_zKVO;Wug>+BWmj`j{h(;dp>!0jxdA&BhB;M!RRsVB!JhX8}#wE~JbF zv_JA~r)>LY06Hf24{Gf3KW$#@-@mHlxEFJRdX6x4+PRlvZg81BAAos5d_x4^5%Tj) zpfI-YJjcWaW~}f#?EY;wkoHeMkYi(|APWOx|5@h$#Qw8rN8TG)TdMopq$y`#0^47o zY5SS$1yiP;Qz^5Z4LX>jpUv;P(XU;+yo9OUeh(C(!;m7GOREFy9`MI3T;&vg{H27yHkm z5Cz!4|4`6zKh_R?Y|sCnaN2%mz5%|+T3f$2Y}=@BKVDqhP9IQ-w}R`*d_XQT75f+a z&qY0QEW&)hwySM3E^f3pw2oN=knwInrtN306NaCFo#*>IVukey#s>^x<6-|21LP=0 ziU0jP|C9N@T=c~h!M^ocOneVylx^!-;(lNI=Qf@R;Cr-1dM=nBpJ3($c?!1n^&z|R2J z)p9Lf9N=&LZyj9kGl1)V&jLmOdQCo3uK(=}Tmo?I>@xt@3$v|UH>lST`-%Vi;aXgu z%r>wU`fJOO;iuhHYID=;7UIqWhDF!OfKzMr##51UExV4V zkVXQ1?EU6EzAwnUwtxHmpNw~RU>nH^BAv^#Q&&dl^9c+QB-J4hFQ%BV7mh zd;f>;#=Q?D*zM?t{3<&ikNoEV{Q~_1*VA&{KihX6@CNff_MeXHbhoe1Hu3!*u223Q z;QLAc22A_7{@=XU6p4H<`@CbwJJs=YcgMg8O;rqB@Oah(00kawnhSJ-*pPHb1R>(nx1{&fHNjSyZF^L`_3n=a_s-?smn^FsSuo^8{s zdbZBIC``2M|F(GycnjzN^ag&k!EXh82k?6Wj{`jREGHM(=KDZ0@9!kVPaZmD z?4SJ)ZHD@H5;zE?1IK~=0Ckae#$%X20{mg;SP%(HE2 z0R00~-q$YJ|IKWxz5ag-uERF)`Ue2EkKY7fo<4y0VcK^Td5&RtEZfQZ;<4QBz&iQE zHTnbOl-JCQzf(Adyz^L|LtZHZ=2roi*>#=~`{%cUc>OiMSZD1CtsUiWy`Q6>_t*Dx zAL4)X|G_nN9Lf5UCI{+wcww(adY9LnZ>L#&3a=x-oEd;_oef+KF!m&_0vb8;4UunZ z!{nP_n|X6AkLSB<7w}5S!RNUSEUeW4Xa+R0)2om+w)0JqHnH>E$L*^D9?RoQ{u<=# znAXE)F9xm!8Usy%tL^+%jy(6VjK?(d$!DCmX{uYV7vCFkWuSdcXZzSTXZx6Eo7h%o z`?!zWZ1;6Qb<1lxR`vKej z3o`dd``7UTzA4z1?f*Uerfj|p_N(9FLF)TWK;5TfKU01ykdS`=(AIzXe)Q`^OsLdVD89j}ejT_xCF#p{4Am(Z6tBzc`Q)d!c{59H_Nvs^>~? z>aLNQ_b?Bb@+|q9C?j}YZ9sqfij?hNQ1d*X&VGEe3#fs7EpNVN&8+zv*yb_19LHtk zxlf;mxW6*%jGZ3n!8M%MWSgCBMLV4B(KSrDB0?YvZ1o%v! z&la`<{Jsz88t1E++jLw3`+d*0Y++v%fRl9_T;E5`5;RzvGFN_R;~M zUpN3f0&otDHd76F4bb1$L^>QW=S3sG*xV27*4RJS415HnvJ5HL;OOW7NRxmL0N3R3 z`P}0`b%6GN0q_>{uz&siU)cYZxCYyypX1y4Hb}1p=&R@-{tN8kwUGV>thV#?8T|m> z7oRKW=l|Shd3pI^&cCSkkMTl1g_!>Za)Nk5`u_5w9DeQt`fu!Cum8jLUV!#r31F<< z9=Hym-EnQs>%ak4g>)2P&f~g+NOK9;t+D@F$bSlK0|J5JfVN4b$pD|tUkLCy{Zqgd z0PX*B;9chZ*neMKi+0R5JqX+YFfOPEyaMR&{~;X*thd{`9QlDj6@bTFIv!xwP;k9ood_+LS_cW;?~fqssOwL}^7L`JrfmLCRS2gd&OIyr39{=We@FF^Zm z2k?0zZLK=M{@fcF0cVEX@R$a9R~&;I-1TAZ_E zn;rn@pJ>x{fR_P(_RrY=N1!rLZtWlTs_lRK%*zTh|F56>Vea4uYWRWKjdWe*N89C2 zd>Mc(i2eJLQiiuNXRqV+t9^_P@rq|2)6@;hiU}r|>@mKIQrNKLOJBlZbNu6Zp9 zA+Q3ZV*lmS{{LQlhqhxp%Nt_bzwaC%w)M0AWyzDz((i)enW5Nz1+aPNEsFh@Py5IE zA05ZzJHY?6-veU*Z(;!S`>_4c+KF3dUR;OzVBQ;a>W1XYKLcX_<x*f6V&Y z5!h$Wi$UJs`#)UI`!TSE4MsW`Fzf&M{!d5XJb>RCb-w>|5w>$$|J#do*=^$cKV1LI zcY109{{zhXKV1L6#y)l_@_dhx>zd2$`#-!_=kMtJ8?+C=cL7H5y&k;Jvp99yd3AUU zo)gGizmu#An1?NJ{qF`~H9-5`1M~)(0DS-N4S+Uw3b+N}`?LE1zH2iP;5YB;0tvu; zL!q%j1v$BJNcfE|yd$V%0#hbw#bp5Y#Qy(HpgYhN z=nQlLy4iW|GyA!ZWju!YA8fF^7x2RFaPQhN*zK(Vzah@PpXdy9^U3oYA1vd3mh+q9 z&OEojh5gs0gwX$2#Wgwryq>dtCePHgmG{H8vW?6$jygciw7mQ@CfjUwf@gv|f@ z*^fNu6Zt=W_-#?<4+H%FWz4huIKcVG0|4g|dG0=d)m0x(pZyoD1;SzV?2xe?zpS!aylx z0JiG1f7-nMb~jSy+W^-A{J(#-ftLaOePX1(_WvXH(Qoir{ryj*V*jO3=F%i#z%T$C z^|gQY|D0bm_J2L{jP2_Hw12%m5h?9ofBzRL`+wU1Spbg}`!@tjDh!lj24JiH?B9j` zwEqVH_W${{|H}5UV*jOB=F&7_z%T%N75g`2NGc3eFb2f_E11CLc7*{417iOUG9)Vu zR4@j_{wtWk<#vSu2LodN4l*Pw3{)@%#QrOoz~y#@0S5zO{|+)FD-2XH2E_g=n84+B zg#iZxV*d^@Br6P5Fb4eCe?RQwI)1MIYYXuGAFlu7`#-M%`$-Yf-+&EvemU|#0et^Q z*8f&89g^EEy9~fyo$LSj{$F?7rcWZz^}p8uzXR2P=YZ{YIoJQ|?-L_k1eo`Rjv+7W zf6J~)NXsh_1F%*7jSi$&0(_r81>iS8*#9pDBJBKZr2MupkC_Z?1^7(>e*1&p2;h0s z0G_A6|A|!I|EoZ{B)3}j7=VqM{{J%MR{*r*T|hKI8{cl{cOu;a!~%POL}0U>k3-7; zH?R%haUYTir1JfrvZoT#;tI$BZ581E1>twV&Ic|As@gF53$V>&SkB{2-g(|7*nf5f zq)={I7!U@80bxKGD3c7J-{yP&j01`aJ*Fs=BuJx$0bxKG5C((+VL%uV2801&Ko}4P zgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801& zKo}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV z2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+ zVL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG z5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@8 z0bxKG5C((+VL%uV2801&Ko}4PgaKio%rT(kCkzM!!hkR!3*9?;BCD=sDr-) zQZvr?e}M&=<$gKJEVlgJL1y!C;2qf;;Ao!ZkD?5AcpX0tNacR>b7il8fhp5zhinIQ z%u-%ETX~k^>*gW1co-k^_o zP(@Y=W-rew!7SRT8<1CdRvSt6Hrn~OgYyRcl;4p@d2MI;0%y(Md=`QVQJ#fBKj&v5 zu%q{y&cAt;XCctf`B@0et=wN(Mnk|-cG+f>dhIlGGh0rRxWMX=tpGb}t5mO}JM2;Z z%TIZ`qvKWmmZR>DT878($f+-Z5%TMZZ{PEX5r+^Am?i8TE^Ybcq z3fSNId6suDcjqUi&~qzSu;@GdoR_)Fr##iF+|jztvUJKbZQ+{}u-~$3u-xPA!H-)P zI8E@tn9znnBN~M42pZ5JWE9S#OT5FJtrW_y2^ujbvz+~f!v?dLr)Kfe*~{s(I%HhH ze|csDvX`TNM?aXod;!Wc8|c40Q^&HGcf|RbI+DE{#d(%z>WKgIGYQC6?(LE?2tqmJ z|%Ltzr4#c$@N#yDW5Km zPQa_5-lMaZHwYNsz}g-#Ag6NZr@ntvo+*6q9;3g-Rqiw&*6CY+ZSRFL{uZ?Sj*K(C znXKohJFQb6;9K4ykMn(secG?Gi9c)oDw}wH%Dd!Ie;?m+`kx^9AA5g_Z+TYz@GV7e zMgMo$Ve{D!Df%ARNv@T^zvfz=d4K*7lX4^fvdK?g^3O|u{NJ_g6$XR>VL%uta||@W zFrlL!D`2=_)nN`lmp75W!hkR!3AWd(%ywLJ5BTdv3-<)dghad)QoC#)UrN?8A^Cpm6Tz?Lm*a+ERK zIk>jvne~!&R>&vrv#0*os|V_hy2wc`wGTD;>HP{EvcDyz9ID0!1#5eS|n|n{8e)7%sI)MgZC!K&q+&;pRH07 z!hH~-wr*Xc5;sj%ODDEbvwK!j@cAmNdv?t3VJQM-$LI94wgK~D`~Bzlww_zi&$@d3 z&-||inZIe@TJAYLtN@gWE@aYDY&rON_qNpK8QjLN8QM50dRk}L_2!iLxsdaGAVR?> zFii(eLjGRF&pV0zr?#XlO~Qb;U-%zwENl9= zmJ9m>7Rfjw-{ObA58%OzhFukyuwiVc^(Jku}X@Wp~45% zRUxqZ**yb_L+<|SpzNs!S#$xmzXAQj%gY8^XY*cW{eT#ur&lMqEkFMZB*jkCvX5Ev zLzATFsjE`r(YGf?(6@fuy)ODq`qW+U~>P?-}bk5tc*jM>{S8s0;K9 z^bgu6K;|jXgV&Z1v(DljQwI=BX2df7S*VzMVdq-*acf3AoD>^$%p1>Xd(Yah+j@`; z-7uhTq$2*?k`$qKZeOcbO?yzyMGPM?AW(((cWKP;=Th_fy42j>E;XlDMvR|TFN!9A zr|y{8(&)iv=)t3u?4+-BY#=eBL(RACjMp{+-~TqoaIpD>wD;pcW-O09om->S-kmGefjw*0!M$tL(ZeZf+oqq?;=z`R8WE_XhI_JM$uN&177z8P$iW`9 z@FzF)1iH}&v0+cI{Tp9V1Yclu!Ndw$4-g-OVw~8Jx6Zf#I#J<$!IoXII=;u4ZUE-; z=;PD$xDLLYv3mw!JkpE|OC<4L4xhxV>XUoo~G)4CzcC3QDJ?-`nhlE zdryp>-Xtk@*6_qlQ@SN?7}ptdb>AcQ`!-?2nC}xePV9_%n;u(}7xdk~Giv7k9SdV2 z=fs1%m;Zin_X>4r-&%EO?^^Zy(XA?B$%pBS23l!R!z<-2d)p8A>V@$cOQ0Kzhj`S2 zfo|;^=nJR|MT{LX^Z;_7jd8&h)C0r?y6hIqX`6P5=(B7?cV^@r+d1~v2) z1;h{30j&##ix*6ss37-U7%%=G?+aW6U=G1AAI|$O5BY`meMSznE`c3{&F<-n#uzPj zPEXIaIlVmlfP=^%UN|t|VAP1p$ADc+hF6YRGOW_ZrNb+&iW(j;3Vr<7kprxDv);FA zO?l9*?Puk%i!Xo=4@J3(9OOE+V1T6-{Aj7g+SZ{HIoN!D_fr>mEc=N0h$T(`P{=re zz91d_K^o$KHz?Pc-&+BT2bO;w2=3zT^C0`7Hg{(HY~m*Ecj<$3mW|WKEANJ#Pg^#k z(gRV$1H&Q*S!&&+`lnK({-+YdA4V*A7JRwO>lccs18k4+36by#9A7|&h4lw|oWS!j zhuD=m5Cm@U{Hfgf0oJkz5dFx>YjfzR;{kDp?)6O zMj;#d8so_+E@_2AvtsxF;gRbe6H)aJPlsf7bEFHnTBL0&q*a-#>FZ=yU4 z=nuR)(90S`9hmW5`5GGzdBfYUK@S!=ZJ!)5uFem?xgA5ZT#;E^1*H$GZ zty6y;T%wkYI9DwhYL5*HVc)r3*Iya+V9{Xjc%gtgz&T|4f$ymUIJaEP4`9wi-v{Qu zZnbS+&xzT3pkwMn_`in0yq{}Jj9Lp zfYU2`r%v(S7$b7bm`@#`ZE$RO0&()~nm5~im@jKT6US_sX$2zgi?`*^cwhICk?@nn zD}VOGoCD-D0HkM7_P%<+=MDMv1)3l5l@c|`x{NYE)8oQ+58dJ_3`Tx+f%g6L`?zl6 zy;HX-d)h*A^!rYZOKYBUQ1k(u)8M=TpEvRTLv0%{w&0VVWI3K!PNw|3zImzV?dO}h z9&g*KP;$sgcC5Q3f9U(Q@5g%Ccd=Gd0VkPa{Vn{oOD!AWR!fKH6Fe5s=h%5}=llG; zuH&q;gz9D6myL8|t^&`ZF=s%VuzB&9YOWUcwO*r)aO@e02cbJ2;BufIPy@IKI2Wi2 zT;K=Xe_s2xx19Ux)7MmG+h=wS96hUxM+J59oDAxU6qrA-64vPeKV?S*_Aej?dsA-D zA6Qw<^TB+atB>VzHn{DtPTzgb^CD1>vwr4zyt56y=lk1s*2(smw)nO?d=TzsKqVF2 z-OB~}mx-M!X!gWe&$YYhJa9|ynHq50?8{9iJmx}xGNdf8 z1Oh3a+sz?V9WK8RX6OQ;)Pzk3QH^J<_J7dJHMIpKsq>z4%0P_1`DGcmermaqi=7 zTV@=~$=XIWB{g0!5)J?pAJg>=XvrZoS4C>^$ zti#mDlx@)0LHXmj_M@B=s=c=BQIvCllpGPDuE zx#>K=-GjFrd$>*OzhF}toRU`?X5T4nlY7d9vT(}f2)20~%R+#Ufm?toxyjzQ49Bzo zsSk7lSU>AzJ4_px{spW6hC%M1KmKs5SH^U%wrhOXOV!9uSETp(q_+C%jb`fQXIiPY z58SHm`B!Vz_WoPJ)vbE_-n(1lzFVnp-n>bT`t~|Cd*Ib-+32Qf!*7jM?8HVYesUuf zH>r_|p3qpW`L&5!JOb-H`!`dgzq?NT@cxbJt>-C+TNLYg;GWjt_*RW}58Vpc+^Y9K zg7m%rwopHPeLc=a{iB+yji`S!>Sf&<$2V5Xam~;luU11kT&F&IskyG>UbL0xvMyaO z(l+h$)F(4m{vefQiB4qAhYTNLDt?SH5T@4xNTOHbdZ{`b_4>)YPnx@Y^gtth+y0?z?2 z0iObcfF-~mfKwJmHpj5P2hbEK@Y)le1F#>^@dxz+`C7mRz+5i|`8NQ)9ykm3j#J3f)!@ z-ghf><7PFfM>Dl)d?U3tw7xoqHn1%`mu)-3x*hFiU8a8Cn`w8_)GO84&eh<1Zq#z8 zU+ML69X0pI^G?P5=2ly$TXV?!!*^$YMuhv)TqC^;XI57S<}}puj zZ>R=*aXsw0vHE*aU6j?=Ta#7> z+A)pQpZ5N}IL@5ouYPB}ye?(-=Yo1FWuGzBpWIOE#BRu@F!l2sp38QoKwnq>(pde4y3Mr; zQ>XL(c>dndM(|%X)sC3~3jEPtG{9M!&EHz2dK`c>L*7OVeQDk>$LBXaQ+4ij+?&6)vOM#cEV@ab0)1#ey-+Dr&-SMS_fl5ivqWvX zIP?K+bOG!G8UYQ7?XxbsVQ0`4H}4F({3aVW?+vMa)#0#4^$vwLtbaHRuv2amM^Lse zq;^vrfAh9!)f#P`a#_7?Q>)eAHlOUCT zpuw)-D{k5`vwAb0XX<2qzIAS&UcCvgzdxjIW8GFq-K@`Chh=+$Yc}Jt_WharbfzuQ zzJKivt@H5iklLRj9kVyIZVYsVzQFcn4d@?ELI={leSypL0eW5-Tm}H-mv93ffcuDI z@9&F4AAIZBIK}HHR!y|(ubpJI0q(YOk0bvt|J&#(AvASr_X!`ct=G*_qjrVwT=#mEiUNP2czG1Q}3Fj@h!Fo-dyw0+XlU<4PhgsDR zN7nro=fwj{f#m?}WnE0UZygZaxxMAWdGmqQJRe})tkdL|A|DHMqI}|}RjC9!$Ngpl zWL?7pyAObz1JLJZlM~8Y?g_0k1wH_M0%F1`IwsU(C~&z3hyYdrD}hL0GSCHR59oIc zjE-QtRQdz(r0*MPU*Kcl9pEeAL*P9-{S4^`>nB+QRs5Z2Lk?qr4}cGW_w4jjd*25K z7S-zZdsKtxmyfk>+c?Gb6V83##-~U>a%_Ky^3e-NTD1=^uJ;wrCq4l_0^YarHPUx& z42_*0coxq81$fJ@hwc0vcpG4QS$_wD+#@Z}8V-z&+=y8M4$ZsHjWM$J9lL_AxE-;= zCin!EIJpx00_uR#1IC0-nEk(F|4iU6TL*L>!F*}@fgdCP{>Xm=P)6U`>6b{q0H_aN zLH>hO;=igu7UO|00LuJxfO6yZmrNo5{*eC*kbi5)cO=fGd`w*?rLKL6^05m>TD2g5 z%DJ;$2kT^A)P?VW&j9Wl89mig8Rv}yC}&gG*T}Qoybjy?EzprXBQ22powC^-Qp*F| z*XO59t6CjCVE3NT+L#aWPzN}cW<21FllDI9gwX}+f&OL*(rkXldCk+E2R}~vlRL_# zlbw=J+CFvQ3oZYI|D*l?X3OMbq(%pfj6Q?>``YrS{SU{vMmDU=)JePj4CP}Wf7(Cg zPgy(lihiIIKs_Lg{Kw+l5A3>FH}!z+W}a>BNRUgUg_b|hKN8Vg`vCL_%$ajEjs8urHvD z_53PQr|eDsZ+k!IkoEWNkm_srZ^7x=>yUpxmzJ|HxnMfV-XZ**$)5OoFKQ(>oXXyV2tN4Gi|7ZNi z{+RM-n$hd;WX#tG? z#{i7=DSOsSz18(0WqiT-r4#u?TB!d2;M^u&-yd3MTn_Tr`hdCAH)4LX{)Kp8BIKD2 z7&|vIXLYTTG59CzqQjAO=r?wt4&ORsu%G)9 zQ8v?Vb12#q1F&7rb~4X>oOAftKDNN}-w{+z`{jw9Ef;+MrmVi-%nj=21`4s@e|epQ zbFbpuUnPKZtp5NW1RetJ1UUDa@qC`gm$o1Hx)5jx)boS7*w+BKnC(v6{>a%X<-V%G z`Hqx%jr7#}t18ve-Jpu)n%K*A{**qJFREgn#`vDDb6KZ4@p=Hxzd)tlZ|Y(FRqgGn zD*4_E)Sf5w_ngp zUSsR@3;cdB!Fhe?0CnVy`+R4c3YySKriH9W_yt@ z(YOhHzLx*4keUrJ1~76rDRn^SF;8@C=d9{=*rr`US8{&U`5ZvY+LpO4<8h_+2Rm{B z?p$E{3HpfzzyyH(@I+t|Fxk#egr5&zGr@Z9ujNf!!EZ;R-<}9e044*I?Cr5g$Dw?M z+R)<4brbcR(`?i+-qd5~SvU8we*N49X>RQ_Wrfxe%<*XX?+LBd0I|N2yGbd3-S@*c zL{Of`>^XwcD%;{{7dY2^BE1wC4RB8P1HjA?GhXEO2ata^*#Gm8f9nGaYYl{)&0H>H zMa~(U?GI2sO0B!GHso*S8BJ`+`U%E%?*g2cW-M7~x#pJ#J{#e&=m+XS#wPC1CV%+; z|M57C6$%_<@Vv70GrS|c1mO6K@}jI5Z*%-*(k~$Y{*eC*w){sx-W`t)%7*D%((C(`1n0O{lA_Af?jM$o>E!I zBRlQm%aS|`+E&9mQrbWLKF3~Wj7ZsM%jX&r!^m)i0-egJ7buA#!GvS9lmwaauo$9u@n58P`$ zkX8u!@0;CF`*isBUbg(XF60FBka-VshGzj4LiW5@ANkY&(>`gFw0qjDNk51E_w(`p zoFAg^H~oN_+xy(d{=fIBi~i-b`+wdq>|4tg^T4;$-XZtDwd_OdO#Sss%k|fy+e*A2 zVmnI94{~tY|IgT_Z|0mEK{-(un0^fT_jJU6gCTd$;}Pr!7y~lT^#dP4{-f>qk1+(F z+tc^6Zi4Y7pGh-iEXcX!)5iOW$bafo4g4!+}s>whuzEy%gjB ze;!|PRXxaf5fBE<_5t_J!JO`FtSLDcb2=~JJgx}~23T*HBjrBMIrJqDNKdQn;aQ)) z9{PJX`i0Ya?K3TRxxQs#4CvzeK+Fe-?7jBKIX2E05*hPt&TZJg>(6CPT=4I1*tvnr zk>@^c>*pd!-vKrN3xUNBaE&SVtp@b-C!`{Wa$=Z}r5AVv!FmE* z7r=eLmXq6-+prmcOpW}1K)wUOu_EUN&0I0}ajuBqT%y>1Ino};)5xFp{w=_H0*(`z zQV%)-)CFP`Ao8ym@;7rtBL8wEZ{J&iJ?CWqB7a}9%Hg(;{2AwQ9Vlh*`~C;x0`dRl zOa~xSWB-irDR=hyjP>|l8P^F=A2>E7h62+6mm?j3JdON0&YKTR1*Y3zeg-fFm<3Gb zts@m3C?`4qS?W1kToVm?S#|i0r+v;ErV0HM_HrvO&+Zh$4dbd5M+}^g#wo(i95gqIu9bMiU z?y$ElOMkU1w$k?`T(QY2j^490OZ8sO?9rOX2+jsbE*Y>-u+0$(z z>6x|bT}oNk>kR+64x@>Yna6t+&N$@UPC*Ts+uj?fS*o z$6eNc&T>KEsooP#rgeDtP#VgPdu4d?>kjW8IMCq^+>4fB#`bY9#g1*=!i;USgcq>4 zJMd2QHtN^)rB>I+^ZMty(74pvEYAF8Yrm9c%;-bA^s?}*v9uXi$Y`&_Rq%#Lj7DWCG=KHDeRUdQimw%741=(cZtzSm33 zXuIPgk}Y`#n!wKQ1UdpPs}BC+cc=asR>15Y zR=}K|mS#__I{_;~?awyeo^zf+>V+f&OWeb<$1@CCR7=GUC#G8 z<2%buKIZi*J^$M`FP>83=Xkbl^WJyNq7I&<=*ib7#m;;ud2{gSq}W-DljFjowOeuy*7{xZjV!#U|)MI`*rSg!w8paiQ9hyhN=EdQr{p5vW4DS}Lr&e}Ip} zKsfICW7tANbFU#?wN8|(Zx$s*Pjl}&{Fl~sjJ0mtwHQ zihv;9LY1P%20P#X%)JW;SfkIp_p!g<|DM^I*_m_Bl-)ZsXGYe|v!KawHeBwsBhGEI zQUimioFQ{{R(jOU%yW^qKp}f%b$;G4>4^6yEYx0Lk?XZs_|GB>Tc?&0vM?ESUgr1F1?fZ9LqL?H zU_^R=4xUNB@Voa{T>kVE+XtRI<}xw)pw-Bfgr#FMPwyO6oFDz|)v|MQuT|!5xLTQ; z@P|k5>v66EbRcrMNq&y4N_I$tL{QR4%mxM5MF`59bV21_2XdVtzcSQ?k)#8}Q=!jn z@V=q2r*Q?LAEI>8Ke>e13;BU|=*L$J+3~JIPQoT3HEyHu$H5IkV63ySF=3PH3;3@& zx~nxy_P4A~+NKK~h-&#|J=CG357JHCCIyRHUt1^Y=zwG=J@9@4JS-tz@QzT%I6ZwG zAV1#bp!@gLJc&}c1J!3Ol%*ExH&N{x#G{@c+o@gcw__rkG5s4B?>UksZ4p?)7R0S1 zcYU{9FXA`fQx7CR^_dJjIm$X9`ZTYd^VB*~fs`aC3AMu-&EW%N&-zKO(xUJEFJ;{xC)xbpI8?K-UAKvAlHK_q=RjX3{)S#|AzSQPhM$Mj=Hi@{D-|FG<|K1qhJ?- z_ZPlR@BF?+!G7PWMv}XN9p2VBe&2#Nk8(fs5b-G;IZaM+&(j7yvK{GzqJLqL!~+0c zr0}(Z1bXYBlQ59viE?}DMn?W%qze4`EvLKwyO*E~b_ zgBjRe-&c`K?qP|M0!n|1tWLLB^v$d8G?@ zHvu{UZAcD)e+qS?5|&B<3>d3w}cD9B-jG>wv)W{f(;s*ws^7@#BlqzSVuCf?#v0BGOE{8f_|F(}3LMSBS7P z`?^VP)_tY1BL_*lSH2+S`dLWjb>dT8y3g~s;OUJUHAr$<+)p~aw>wX}Zd^*AryXU6 zvx8099(xmJHO)eba&5s<_9{wqeT0t0Zzj&u0{;XvU^{Re(5Eoa2C~ck2;==Is3B1E zjdkWXEyHvBg=^^^A}b2%3?9tb)#xtlYE&0i8g9x8LvWsLJrv=-B+QIeM47T{h@0bQ z&gm7zr?9&5>HbQ%8M_{%jz?)%;yHT0eq8yv(r~ltt5GI*E(hvWp=|DCcnU1tLs&<$ z_&c`N-n+6rmlJJRN%Cl9B=>#)Lm*u4Kb1DqyKx<4c#3V1c0TA$oZEY&FvR?JVX)bs zMIq)_ibKt>lxTqSJ+Bvsn3V%pz&D{3{9Z*pxLOu&URo!9InIOqlj3ROmQnnIV6*Gc zLj}bnsM9s8KwO*|Y>KmEImj)MUJ!>Bg<7y|@0L|*`$>MW1bqI3WWDGwh_>HbDBYJC zYHwZMNil0e@B7M4&&U_)Kz0+&@KlOOw?l@9*b(Q~nG^*Z8|C`9eIeafe`JQQ{xD#8 zx=-_wxdCnal|&i8TO8GSWN~EYVW6W+B8^|q^fefi=G}a>Chmv~AN>ysLOb>?iRwHY zaR|fU9#s@(^kSN~{yT`r(@pnnJ}T3<<&dJ_&YcT_I&}d|h~iAmo<$)h?*i7)#ZjE8 zDCPPZ+(8{B9n2!0!LJTc`NF*dxJxcM>`~|`FZH(u=n!BxumNz!*^@g`eYM9{B$)n; z^C&lA58{o$j`D;q^G5PztXuc59)Wc%nd=%5TbI0soXfy00sFqqN+eQX`jvnYQ>hHrCz)*jC1 zeH&->tpMn}-Z`f{g=OdbbiRcAi*Ozel(LH=|Z(uT0R$g!G*vM%JQ&oipxQr6m*^{&QVq1Om!vNo2od> zoVU@!kS?#_zB=oG9_c^=ztkTPWcPl!n``v&*!2K>@jeptp8(0v&lraMOVj*xsQl?X zYC3;{U=10kBWw!dk^CeZ$-e~hk3krnOGNU|gZ#FTe;mT*o;e`W`NgDH(gUU6D8B{4 zow>$4*aTEGVNVHxfho%3%t80UMg)WoB>>wq_iKhTGI zSJldI>Y$PT22s2h7En5<3tT_srVBcF?EX)kLhUb=2cXn|=L2^Kfa)x@-zBKW%kf_L z0p&amZGz68Sc1OcC+K16`GDr*(Jt%}2lYZAP@VsTasbr^&u!0@6o!~^I<@s9UC&-Zd*$2KVU%A!!y zZS;I;c=v~DKNR;#T;Nxhza`w805`xD*oFS=2lVq){<{HJzzu!ChIIdC@1y;%M_XS7 z*aI$z`!?|d`cU#yT$DG@8@hNA-v<2sPX5p8g3P)=bD$ZZk2CC5*OR&_(f*r5_GW-S z3lN! zUZC$F-v5C{~zdY0H50d{a-GBxjnRooI?Q8 z17RreyxdkEy=FB%@!zBYlr{B-crV_CXWoDRH|em6*Cq`#X`o31O&Vy@K$8ZVH1IeY zc=|gL{M8()G~0>fXf|H4Rc`@9+pE|hZQAI>nDW3bAGGmFIsRVjsBd}19MTHzWg1KO zHk!R&2Ftsg7~kUl#)H*+z#t)hPYKcmDz%DhWfN<|q-=@r2m+eTVhuF@nuzg;dpjkY z*@OO>uBJD#ve^l`*U<7fW8BrbBxCiBlwFS+tM8xcFYOc2510Vd>=R**o2IKt15Fxe z(m<02nl#X)fhG+!Y2b-yAmii?G5z>v@p#}k5%v>*`|(cL@Ns+2-0Z`uOGSAN`1Q0F zM6$!c+IViu`FF6r(thC^1u%p}q8^dF{IZ0WmM>ZD5Y&N+Zx?0G9UBDl(%kT%* zVm^(BkJ`;;r$_L)>+{K;WFzJQS#FVSU{-o0%gsK*PDU+cp=-5SFzn)B z4_MdENp_R%-(lFd%z-R?Eo(5^3CU4cuBYLSbMMXbIDgj1ChJ@{c#dFM=Llg{`FY20 zo;kR<3if=};p-Gq_|Ig|NOp}JRVWLx#~>Rel7rip#pPvLRfV}Buup2i;>aEYwn~Y0ZSkB$*e8N-wOu0d zF58@OIly-*%40m~oNR5LvnZ{1&F8eXa61uj=j*j@Ja^ZGkkYihdtDyDU0zrII{S@?~ z=G{T)jeKs9jc@Em5jIf@*ayMp3wA(puHucPUxbw4Z9{HtSUs5?-DAZ46JZZjWAg+% zBH8{c0XBhTA6Z8Z;-A|*L52v}*zt8Pu*G@gx>RRFy-k!{`l) zn}^(0u;Y@URh_U!2R1kY^K~1_+&3>{$2~20J!)XjMD|dCY_~+VjgheDqjU)*1KBu2 zu8`U=$>p#$Tp`l z(BjKcmYt0HjOpM!r`?3T);eU{Q%!b1kuEwc0Jc8dK8S3KG;V(f9TvP^1-m017UiPN zg6hPPZHXc@c0^=PM0QE!FWV=1*lDvp)>_PaxmK0Gy^6Wad2VcL)34fnG3ur6;D>BQ zh%d4g>Y%xASyvAD=>e|+UI2rw(UJH5^``amndbh#SQ)~ur+3waPX|a}jvgR6E_gxm zTH8|!cJ3|(0sGhVlr}7SL7FwmQi2^2>|q8-tLF5TJP{{kLwCv7p$F_<`be-3l0JEV zpfm+^&Dt z0Zs#>fVTDc=j{dlB;WMqKXlcn@H}5(Ilw9+JKs6GuRA-wyBlm(Okv|fAX^tR^*!uv zVDG_v9D1`v_a59&9gpIe!hXe+rF(T{dsg*fSw0rrhK2mEY0@C~pRuOwoTo`u#Kx94 zv%Ca$(O+$^aM)dsw?o)3jR9}35nrIZZy~pQk)P~;a<_^q`ziosK1F~VV+!ov|(NO8)-O|RTMXj)tz zVP5z78)WYS+&~$M{Ol7CZi{f4==R?M5+OVz7?~J6sO#WKj5czCBoOi286<`5WW&=H~i~POX-ZGe6##osjxPY>^2D8HV5l9|3&NdF9!TWUZYQPQ~^XoWquz<2IRx#1Z@Ls z1f0O98`k8{yn4uD3D%&lz*_uuC9y`|VNIshNe^))*5<8E_Et^Rgs-ej;NerjKi1-k z&PZbm@D1?+TBHBc@NV3VHMc>u?wDlA^=-Kodh`LWXMrQYCZGj93;Mt^pm=mqK0ptk z5AZ^=x56khQ2#q%{`o-t#h^nkM6{ihAO75e3*l`SgT8~c#1rAa09bN9u=x)Z;%*&A;h!=w1spAN(28H=ueH zz1Ni!x}q}R-_xMa0am~)$g()qSBu)i9Nr${O@2aqo&x{5Xb)uDM7AqqC@oMGo}qLa zlznMfSDrR>9YFk}PkNi4fejD$C-+~yC^N3@3} z)E-bi>!JTIC~vqM)jx&Fe4-tX1i1Y<_XByt zw}=9Ie`0l}K9BOfrVOYqE6#c=Md)8a>-H!dl-6-B0cHc%0Jm2HeIWmG`p<%bKdygJ z1HcJz0JZ>=G;;l|`ImJDeyJ>a1O0)Q0B-9H+MwT&pJ}2`Rs+yAm4S>Vy+2v?Hl^02 zfhG++V-2VmA*l~i!eLUHHCg`&N{h9S&rMq01zJw4xk9aa{ni#gG;W)w55D4?h zVpe)M_GeCP@W0V}sUXt)9_DFiPv$57{|ED^%AD+^nsg_lE4zNRpm|xrQG?JGf_8m* zpXA=Rzwc9zMW-EJrKjy+_L4IzFO*}y# z%DLZdcC_f?wKi@HWl;VSmBwBp3A_YjIBZE(^%t7V}ZYpAH*n^3S)28tt~y zJLbI#b9YX}a1qV}OsTlYZcChy1r-KfGTjkL@?c-=THIPP^{RZe~B4 zi^G1D7A$GI1q*lS&eqNA%NBgwp9MOp{S)20GS?-2*qm=(Wcyb4WN}-%vZKFRutVFr zvW<&+-dZ-pggGrVc7Nl|w_hDMYQSvF-6kTx;Uhn)-7k)Km^8+c&ruf!3a#^f6?vFj zyICG#QF1-TtmtZ#SxIH2X))%@ipwI*N(zE4ivPr%RAr=D3Fgy^%Hel8i2D^`?yVei zTGudl_Gd+uS=GgWmMqUlI3*o@o|o;wmxt^2=|B9gb^(6G&qoYYy#B^J;_OL1R7u

{U$3Iv~D8^4{;l+zqdvpkHAO zb~)yJ=3?A$KF0X0b3$7EfcYM4jJqxq82vEP&pFsC5; z`N{BGfidlG!6)rmWsPU}yg*)Hd)_}|ta<>=fnw}B1^G`w-mbuWfbR=-FCNIBk01I7 zBGSe_A>smz@h`~^HuxH2o>tJuY|JH5o;3Hu*P6hs1#p=$&dmEAP|aRXrduh^M;ZT= z(*nhqxM)Lj63BlxbiNe%FG}&zo&&uv1Akvr+MqS{3i)z3s0GcNV9zO2ij(c#OdGQE zx=8mAb0L2n;6s%8aOD3g$FmmmIAbZD%pi4n*G&`iA4=?V(YmhR zUv*<1hU*x5p#972F}^#I(yT}{yx0=nn=!yYM< z0PLIB`8#?JWBiTgfjZcz|Gn~hkiR@u3fdm%2Jktv2R-+{7EXEqXdLs2>i2(5Pk(25 zXmsJBE|{`0jFdg}x9CI%Eq%Yd_y zi^eHvSi%IT8K-!p-VqNzqYLMhh-c!KiCE+LNcBH6HAu`kAA6US^xp;Fth6TjzG>kb z8)rJSMH^sktgfsAd^g{7?W>dXTK?SSvP%8@OPTh$=cCRi?C4XBZ{`aj ztHng{uqbSuq8+8>I=`)Q=+Ge|zDYSh@y;Tr_9{Vncz192)R?a>oeCa#G0v?S*1KWN zqKhceH#7d17RNXU?@}5;D{J3}^Cop|rZk$|ddK3HU;C|AvhejnRfv;;z}pxc5L4+pN|+M{xqdmi{6&+4G48^mS=E_+ zJqlAjOv{6v&C8SbnwKBmVO|#I()~)Nw^{Mgon}(lhBjvhy!@(pmmZ^g5ijz^*jr*j zoX*=hK{~5%AL%;(a-`nE%MmRX-a2Z!FfUZK>QY3Dmlzw_tTeLSx;$U4GZ+1Y@8xG_ zPwvUTDGu!x+Me)K{xdVE<-*J$gGE`v20vv4=+8K^M|ky&moDEou{gBrt_uOr?>PCZ z@Mb}Nrs+W4r)0qRRW|&7GUmVlv%S#dMr=2;Vn88wB6Dz`d%HIt`@$JCN0`#sr zLEj~S;!XMPEdI^h2te4?+VHw@{||kfaxDM= literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.png b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.png new file mode 100644 index 0000000000000000000000000000000000000000..88df0b66dfcf0b298de8cd296b793b9220c4a914 GIT binary patch literal 658195 zcmeI52VfLM-^S;9dgxUo6akTtgq{#WZz3H8tb`;KDWNHduM$KQ)K9@e6$LBOqzFor zPy}pLL3-~cKzhILKbJeW9LeSS?%qA4!|l#a{mo{dow~Dn_2}&HQ^kid=HInThdzwS zW^?PMCYOZm&Hknz@3CD5O<>HoEVnYY@QX@}`ScvozJ0G=!{aB$PZ%CQHmGa+_CaIE z#}6GbDvq(qCl~h}*01l43av8Ernc)gGvTvt@qNmB1@&q7K|;AXb?VpfF7@=x8cR=< z@4cgR$BxRX`?(x#+r|;dp=fdTNZKnCu9?MF3t&goSA+MH| z`BcqB&2LOL`G~IHd8)o_dRRVNtUp9 zex1H-iINTeD`vSW5tgtxbHN)j_WWlx%FC9_VL@f%W_Do1Te6i0o_)U~ z8yL#U4eho&oE;BlpqF@?FQg{8(u3nKPbb$~V~HeYI*$X74OuQdM06R8F3A~)3z?>Oywui$Z-0RtL7UT=Tbicghm7A=on*rwsm*B4Z7G_6VC z`4p;$C3DJCX;bnh?+;kLe}A)&HU_*Ld~>^6TXV>eOZit0tQgydu?rL5I+WiaWPO!{ zmgMrFMfnVXSz{s#;jVywfGY4z8%t(v-{QiDYGV>?u*clhQGZuU;Q+PinQE7i^?HsP`HgP&aacDs7- z{fq zYklNZalwqt8auiu%Y0Bf=#QD@de`hWr)ih4KJWJn>ffbWa;x$3@b_Nmw0Ta;eXP;j zx$kc69QN6?b-i2t>$jo(re)2mywmAx--EFe!xnyAVacL{ceJBuN_=5Jn-bl zQSJAJCSI?kX}o(>Xv00b+O2KT!28pfzt+}V>9=A}l{L?~XxyvY^LmHz|G6yScaGo!&d^*8Fr?x1Z~5E}gZz(vU`B?+xtI zpw_W6AD0a&^~TKhyPMZ8Gpl{0x|?RpyRQAV-{CTcI~^X=r`C-IKQ3z0rAn<&|2P>t za-Ampvj#y=?VE9V@#S5Y8(*$^xkmEyK8uI68TV~w-;4G7{Q1h$m9Lb#(&UOdZhoV- z{l8h*=ghLD{Yp3e__=ngmTv6xQ}6edm2L9s!tj5lSLoX9yH#I*ex%Cqio-KT{B(Wd z=F(?Af4s-Mg98r#b))n3dDj)Gll=VOEkAwl%whE^FRG-eG_3NZNG$LE@0Xul?%O}5|6lz+{_&;0 zZ?5jtzj5D7KYIT=cSZl@i+kPbTea`7A6qVaZ~3rg@%>tV`_`bmS?WQ%p4rv2{kOfB ztbO^?);&SKF_qU(Zct;S|B82Coz`@Bowfg*_}^>wx6IG0w&jCX%YR%N-oEkEjR&mn zlk(Zn)|yN6$7Oyov4{7ydOyrddF%bNAEk}?L6$`{i?;+`kv@}<_Di2Ds*oZk+!ir0rvVNSgoUZ!aZ$`Aqx|XJ6lTE&H)IL-P9Njn4idt+U@i^?JYB>QjFED@Fd^ zbXzEnOg%A2}VE^Y{T;FNc;#E%^+p=oQ52b(D)Z^10E51B7 zY}cCm8~@n&<1dxhTsf{jwR+>|jc-N|8&YxDFTbzemGtShPv6}9=Ktlj()ho3B49Im;pg$?BA$&nyKw&(Nv+@fJ$3GlkYNK4lsa&{@uiJr|9W!wh{b&u9qBx*;;>B< zrl-vOU{<%c|7tids^Oxld(Q0JcKPXvY2#kJFy(OC;zjEh?Yy(>)K5vxl13!GclxWp zn|$@;mxCHlIJj$H`}4i89BS?R)#srj&#io`XZxPhhHbc3DQ~p>m*@qt{#4j~&!RUyc4?*IRydXS_FKz`LzK8as1fl|y0knoc@&y8VQ69kM&j?QnL{ zu&|!tKLtPCw0_j`hOb1PY`(e4=GJ=}4juIO(SeHxb{*C2vG9iVS2r6o>bG;bcT(QF zxcH*)r8g?h583zP*w>ft-pQFM99A>rP_sibkGFrJ{^LE8yVUPK<&)cUwl;me_V10qYj9=LJ9U4Yz4aOYXU;5& z__D&n*MdejY8A09Z2Bj2W{+#Pyhis=Hx9cw?CQ|h{#qF~V_p95V?OvL7r3 zJ3aYI)%W86+`i!SXFvbAV)v%l149oFy>azy@R!dA%w0b3>b!t4{wGfU?0@drpYNol zcRUp`@1J_7woKpHe$zMGBY)i*_ro9i|7qE}-?#lz`(^iQmUQ^chUot?#~ezk`o`Au zCby5CdVXt0Ueq7Io$4>^-~H&&qj5Q1bJl0HUOj91jm)_@qhDOrWAwN?dliQjd74@Q z8~@w%@6l?9TlMSn@v*0Fbq?5D{kQzZcP9n=H}miRe_!mmSZi#D*Dv45@IIIExPQdS z*FJn>aSHBRxRX}b_R>Q2*gHL_E@r`RS`_H`+7=L$jsfia3chr7xt>xC1 z`(H@@bz<_mQ&-9k_;tX-pCVo!eRJZ?$ox-_^xV7Ui!BRRZ+c<;0}ANvpQ@-1>5An>%e! zv_JFS(bmuBpFV!~^wd`pCoM@ibh>kfddBnv`E$}f4s5Ubc)`biz3cz(tuI?dg@<2k zJwES7?how`#2%>gWySoj^Z#01u|mjOnMZH^xBt-mLrdo+&---y=>L6}cQf%;<)2RN z*}3QO-~O1~IP9g=XLIiCm~?tlwY%S(3+#FMe?e30X@_U6P3wE`hn!C&auIV@AeJ3>p$QY{ZyW zHSccOUNdM!Y^$0BnsyKEKDK?_@DW|6jF0O(rANP*DI;UTV{5jK@`;=rK@p6On;0E5 zdGx3;6Cx(Js;P@Nf{wX!NX;Ofmx&`=)ojZX3>w(IS5W)-@o_;-8#WD&32hV>)I7Xl zSktD>n>T6@)F?EpNl0i|NLb_Gu<(c`O(H@=gY>3mln)(6j*lH0(WgTveR6cxs^;*C z6URn`giM+=so|u?4dcfT3keGk4-W}#6w;_sF!>0c@cNjE(UXJ6OsG{5i6Nd2aT8+3 zj~F{~MEsZ_9&hxJ_*W;ks#%jK^uXxLHG1rWgvLzJvm=TjlcUFmgf$EeDJD?&?&gDy z9$jR}2@^ZMMmf=!tjGi>^m~15Tu7g|3GuIvkBRH}THKh4wTj9_Y|Mjr#=bg!l#Wkq zOi0|QxX~130%bm|$SjOCv_KvNFO-c3my0d^#1TUuR!*U?{7SLG;wFzMCK$g`5KMO~ z6x2{8-G347$Hzraj33`GK7LeG!Cm&S{Y1?tepdz6>mD65VhnFoJsBBd8t7q-Ka4M~ zL-fSBC<@y&IJ8-CXybliP3d-wXiCS;>A)zG0-q*vbf>#NHhN;TC9xFpYZ^~{?1-VS zTNKMfpM~S--aVr0mVQ}ie!KfElllvvZD zEt-T6iES1f8yhn;xJhX2kl^r`A+f>H;mu>CW1EFGj*X5sl%4`$ z(cPwd_VkS#SJ>5XVbloSGh}S^_z7|RPqS4`!{6qEht7j%3J;_E6OV|F;ZL8a7=8oC z#YTn{a($Ss2O;&rMm$=bmLVvJ-za$TX-pa(H^$uUUl5hfSMiZdh#xv}QuO$^w!`QS zvFvZNAl||OO%sR=;m_+)Bb-nb6QW>Vq0zHVrz+d{E=dra>Qu>_2A2#3*|9>oRKM zY!r&?afA+PUwUD@Kt!OQC-kC6w@TSp!|ERrK*HHiy}m;f<<3S8#-m|O)e6CmbK zfy+D}ldHgG0>u0&aGB?0auv8tfS5l8F7td$t^$_{5c8+NWuA}8Rp2rKV*V7k%=0n1 z3S1^Y%%1|6c|Imrfy)Gl`BUIB&&T8{aG3xxe+pdY`IuY3XxXkl0xe8n+K+K;4mw7%W zSAoj}i1}0CGSA24DsY(qF@FkN=J}Xh1uhdH=1+mkJRg&*z-0o&{3&pm=VNjexJ-bU zKQ)WX=izeDxG}T_bP_H0%;}f-4=of8is{m)J7ZIxpj}L+GnS>L-y4j*8p_zs!L-J7 zK4TT*KY980PR#rF?5-W!_M0qYTJj{*@_%7K00ck)1VF$_0&*+;!#fCo00@8p200hJ)YzPDbAOHd&00F^000JNY0w7=$0SJgq*boQ= zKmY_l00M%000ck)1VF$h0uT_Juptl#fB*=900ad000@8p2!Mc11Rx+bVM8Dg009sH z0SE~00T2KI5C8$22tYt=!iGQ~00JNY0uT_~10VnbAOHe35rBZ$gbjf}00ck)1Rx-| z2S5M>KmY`6A^-ug2^#`|00@8p2tYt^4}bs&fB*>CL;wO}6E*|_0T2KI5P*Q-9smIl z009uNi2ww|CTs`<0w4eaAOHcuJpckA00JOj69EW_P1q0!1V8`;KmY=QdjJGL00i8N zfGi<@s_RMw6>%MFqyz#W00M4B00QDx)rrD@00@A9>j*$VT*n$IfdB}AfLjrOfVfq4 zqA(x;0wCZz0uT__u|`TD00JQ3Rs_1jKc$krD`i00_7h0SJg& zRVNAq0w4eat|I^eaUE-<1Ogxc0&YbB0^(NHiNb&Y2!Md=2tYtw#~LYt00@A9TM>YO zxK(wcFdzT|AmBOz5D?d~MoJ(60wCa41Rx-8Rh=jd2!H?xxQ+k>#C5EZ5(t0*2)Gpi z2#8x%Ckg`sAOHfcBLD$$9c!cn0w4eaZbbkB;#Sp(!hiq>fPm`=KtNo_8YzJQ2!McF z5rBZWRdu2;AOHd&;5q^j5ZAFrO2rUJ&`vGp3YKtP-XARywb7vu^AKmY_pPXGcU`YJ*OK>!3mK%4|1 zAmXeSKtP-Xl-6WRCX-p?9ezPT^aQH!Od3L&3~+Hq zzq=4s=9Y`oL~H4(!v2uOs$Ta?f0F3xKrr3V3VF?1y7cmn9c zay(PS2m&Ag0vP%AOHd&U=sldh)viK2n0X?1V8`+f_nf2KmY_lz$O9^5Sy?e5D0((2!H?t1or?4 zfB*=9fK3D-AU0t`AP@in5C8!P2<`z8009sH0hKp+4DAOHdo5ZnVG00JNY z0yYtVfY^i$fj|HRKmY_FAh-uW00ck)1Z*Mz0kH`i0)YSsfB*v)C^@f`AkVRNt9&p7L3pY#Q0yWR+yAlI4FG7s+x{iL8d~Ewbrk{m7Ojn?d#_ zS#Ee7+*~1>OSTMIf3o+<-XrTpwjx<>!Y)LWIaqLre?dSv0@ghLi^?VcV}Sty5Ma!g zAaH9i{B6k~3I@0Gh6DYtcM*QL_!yf&I^h-u1l*1Q1jOyC)L{im&`xz&T%wO?CFQW3 zi*uRe;`ERl2sniR`mvmX6_J7f2!Me23E10@g^_ge*D-Pk0w4eaq9gzT5oNU?Qy>5W zARvAM5D@X#5poCuAOHfQB!H20QC5qSGqv(?BBjvU$*Gt|-<$PoxW?c}b7*_f@lS?x z7ya&_IkasGrM2?#c{(?pa#hHS6Hda21?KWP0W6|)EdjJZT+17&fdB}AfZGs23&d@z z(x!r3pioQ5@|A9r$Z|8S;D_-RLI~MSWcQH`BKtDg8)WB`!W+`uK>-rU8p87xd9%rKGn_16mA91ad9p3Yb|z~`lMdNb{tua1 zWHZSQCRo0$1?2cpW2tYur;|1R!00I&sK!fITAEolmqOvR_#{iXP zB#Fq}qOQZOWC9S7k{Q7V2!Mb@2~fAycomI%n9Dwi$OwptIYPx>gMbvz23J4;1SC#C zqfkt=qzF0@S$BDcC4S-OApr=;Ll!s!0T6H_0yOg~ipD++U-BxkWd3UP6dErXUZO{M zct8LG@&E-6KmY{XiGWfrpJGjU_`o_%^_@#od|O)+&XfH@Ks>qjQFah;V*+Z0!s-Qs ze5$XnQoV@=-wVy!a$`((6%7Jn7d`|A0T2))0cu6Gphbhm-zl~@n<}|nL+eKVvN(VT zdxU^^aOI=uAmFY96f$|7J!SJ&D63Xje`UEnad<*l2#6=tGRg=79!7vxn_JU;m7q0j zg=xykPo?fp&6!@N?l^x)04nm31&%-f1l)uGEdU9!wBu8>P>HQPJeZ<;*yJxb=PA_Y|Bz{>_fxOnl$Ft+^HV>)TMu#z zrDJZnsjc_(c)@9j|MsZ9Gl_>aq!~)*gX!E5-cNtdBPzrHA=8E2g6OvyKOs}w$WMU_ zW6zOWI-OK9IGFi0I)t2Qd9A$&V>jV%Sa~>+O5y4cAM|GZ8m=+WqX&C0I{wLU?xNov z7oy6vDU{aA!{_O|dLcLGpM(*MgU(OJGgGMk%Z(hROzyOoWB$avmzDW=QqO9aq)ynH z=YLUUMHg04RNw{zARt)+^iG`T%*^xGXxg!Hj5E`aO59FUX{iA4(0o2a<#u)5r*)qvSe||dp!^`< zW(2BkxORhPOI2`Yv9j~>QZ99?6yVG>ToE_Auv|eIX@CF-NQOYZR_pW@9u1`XO9qG& zGwSN5yAz9os6hY(K!7n4kSosSz$X;ZdJw}*J7*^++W1}F4=!2|F3H%+!+bjEaF?X@ zFp+yS<7f86Q<`#AFhw@V*v$pU%V-AF@dxRwJj|!H4t#L#nuGJiVu^85#9}O+moj#9 z!SQ@zas5F$D-XAz<1P=*U2|~IcvhTil5|Zf`AmD1Qi^hRYNR2}DUD;!PDjk~LqJ?x zJ6u4fx;9C6fzm4^Aap*MtgEN-9;7rM3J7CdKwQmY2c@}$tg+z&Qr+0W1;=YBjfVol z7#9#%vsg-L@_e|Rp%%yMLy;}{vpK1DY(;Mo(QJ6z|KW_U&tNgCzRobF7 zmY?)y%TGp<9YOYUvRlcXB%4X$lgVBn%j@NUk>&kk7s;lPzq@3Qko}2l0@=P~d6$OZ z2F(z`s{k1Q0T6IBft>vOQ!2TFFZ6Mm(K1H=7h#s4iHQ6w-_B?BO6NTFb zWP6bstiCSAl00w5q50U8JSpSGBBZhJFz zH!G8+=em5%+S+WNrT`194P=df2#7Vb;1>iyKr#fV8|y+wZZ4mxc~9M`Vw{{Bh8UfIxDWFBp#cc6}cf^2#A3M zen9{P+>XGV9@W-UE99ah3P$^+uxsgQe8)ydMCZ&n&Qs)Lotc0kZU~5h1b#sP1l*E9 zR(|dfM?CU*%Y(N-L~i)N%m8YA+;c@fAt0`xj5I(11Oy{Mt&nXrZt;gb8Mu>~A%#J6 zUc{j!9)H?XR%^mSK&+t!zaRhtk|aQ%5$r-!ZfzP+PR`C^_pX$MuXm(jDW>TCNF}U-cfZLOJEEF;`5D+2hBW(}>0YL~{f2K+}b!YujRN^->(phF+Zc&%S z;sRH_SJ3S|}J1o>3m`!r=&geHaZ#6MU3l$gYWb3zM* zQ-A>h5D+1O1g*nIhpFiyr>4jEq#y%iOjfASDb)1fD+Wbo_(#*{k^PX=qNauN+LAd)Z;5J^-MG6@18 zU=0Bhj*2R|d?^V>C%H^kly)%M?_IaxLo3V{yH}_@yUA`Pn@FO;zW^mUBeMoxG4&w1 zLJOARm4`zq?F0JsdDE0n81y2mA}T_%T4bKQDxQ?9MZol5BH*ff6|{D!W+`$>G5<8 z;rR<9?+@mNv*C;k{5U(Ft|3i2e87T-;bD0mcw9S(MH~8kV2H(eI;l?95WXn>hVZ;h z+@G4PH(6ePhAswE{`@-kXDEw-A33owVsVfk=*-H)W9j!}{h1-pw$!S#N?Hob(^|CP<01j#ag`&>8Cz=j-x6Q6ab9oF z(C;C#2gNd`Va@ZO^8JY6H#Lkf1o#8O*zku9x5mcsP%^mjhZ462!yk&KehlJc@MG?` zsPLxo@jn>jKN|jrWa`Ht5{9(6aGClsTr29=G&~mtZViTulxcjX{EAA`Ac}@~%>5P> z-ZV`vaK?uJ{h0bO|VlgZxW3)CHRr-WprUlw(5{^5C8#72$WrWDNG@k zk5bCyok%nuvqm%w@fUoPHq~13xJKcBHpGG7M+u+{>rp0f0Ra#YkN~wZno}#|1zPmi zhFTg=(4e^Sc$n2WGl)l)^_yHRrq&0aC;?Lp0SHJjVBib_AmCgABoq~CyqnLKicl~a zZ)MaW!6-xGVLC8vb4Co}VZ#oIeDIu)kHMHB;JE^_{A6X~(%cN2M=k}bH8p9bNBKuC zzySn6;86lHR+_d9QPZ5La%9WUGJrr5i?U<`6f(KDBZcD8#A&{{{=*(NhzE~uGqpa> z6%{Ys9un|e0b#5yVfgMLijt2s3MKPbna;E=IUIa|00_92fSuyOGyAb?b1co&9M)8n zVDZaJFwe*`2!KEd1RNk9$0&)-B_xB(M8J~;qyp{$5C8%9BH#e=;6P?m_Z1%lfhmE2 zCwF0)t{5%>6)uAS2#A@0jq5y2i*%bDzBP5kdC>$sSwJjWxmq+fxPkx(h>3u1dlFh< z&NoQ3)6ApR2j6oJ#sUJKEFi{vN?Cx4dqJz#LQwDq0xl!KH%8=pTk&5z4c~syP^PYl7`i9;m#;`c=x_l$eMm@!5K-}6{p#oH91VF%Y0;xIK zy5@)3KT6A^<34#4GUrW@qCOZS7(;p%@-M#qNS6PK)`&tt z_O4%bl?ZhwyNbZMsyV_x9cGoW@Ee#`#R<=a324q9WTn=9!erT=H=RFV}aHD5lOG=l2c5_1wkIklzdbyMAP{oUB4wm1m~1 z(rf;YrH6H9X-&E^t;*0yFXYXwPxt~4E*^#@=7HBvv08gpk$BKOlXRJ#iRjPLO7l-M z*lhUH^RqUGc2Gci&hb{p+hoU+HGQdnj@*7HD@r5yhtUaM4I*=tY;jFB-Um2|?Dr%j zyfy0>v;5>Nis&_KBdg{Y!05vu76;?qFus5P!o2@Yq^}ehJs{68%cS0~bf$Ri(aktiDhOxnCo^q+xua3K?Q^W@&WTivP~_Zz(s~n zwOc_j^DOXh-plfnQ^>;%^PV2PTFKj&`C9N{5apdK%=_?m=5_K9#vhi&8Quy>i5kfA z0?QZY#jV^G$+vYaFz=(gtRLf`2L=~h{zjhf6DUb8^7Q-6_n%GlLa~EhD6G4U^Syjo z=2NX$#*;1G`hH2HzZcz1D{Ap;Qt#@7qx6*%#nyK|eXQqpjZk)-1lEcc$(tk|f=u0| z=N@lm^d-xmL7~MK?BNC8trh8vo*2mr2tni@y za{3?Y=eNIKUrP^+%zEK0qhVW?Tc(O6=qQt}G4CUZI`NcI7mU3or*bWpRjV<}su{}i z{L9!IooK?}rGfG!nr+2}qso7~Sw;Q=W`)ouR|wLHNfN!7<#_oOSFY=Hfww-K(KTLN znv9-#{APWL>~ONxi}hIah27-bkDhrqioR@{t0V=4KyuOenyj@DjVbZ&tVFV0gnlNl z=jqpSv;1T?^8B;q0SbKb&k|OkR*23}pS|+dC8oY|lBqBLtNXQY4!?|LS8u>F>a}3m zK@BC_UBUbFxlpJtpCB$r8E=g_%j8t4%`)q@V41ZVF|Ec`Gr%n?4WEkab$UNdn%GAJ z;#Ry+SQh;QU%6$gvg~U0$<||BShT95UvM4-5a9v;W4@hwkEzZdVdc;4XQeM6Cl~H& zqoFUZ->+DM-K$!UCDU&jy=d?*FTEK`;<3nZ;>=%q*70W?f7bLN+m`HuOwh5l<}w9% zk)Cyl)&?xjzoZ0&U_MR-nMQVSaRsu&{S1XoB)f;~-(>$I(8a&%EkD_W{QO1MLc^QW z_gJa>H(15gJ2Xq^0zD*8(Ru~mMd^6M2Til9Jz4Nzp#J~7QWYGZhy!Be-E+!2mvutQ zCkUv>DsR($ZZaV5fTYYvL9Mv#s&!Zv2`}fCSGtmU|4zEfXH==_;ioxwm_*|wlR0L# zkmb3h2WM`%$5>AJ>MXZ>HL{Ph+_H~}_GN@m-Bw=yk7>^yW&y-YPHSPDVe;tjDY-%o zmKjj)TBYNAM#;4KhbW!1^o+1Wr3&Yio^MKWsZX{s*$A?&$krw6Xv1H=-=w@>v^Qc& z2nc~~Km{F5Hkz!fMgemvE#6(pciZXi8S2VY-nmLI8N`gHY74y9 zCzW!4mh2_H=`7t-VS!Zb45j4hUb8ySwRCw3a6J%?Xb=C8YzZ@?i z`xyzzVfwYnh*m&IG+T(z$;4k&I1}#1?-|{FLt{=_dePGI$s=<9lC3wy#d!KL=OO33_%FYI_=g9!B}!p6ltcQzd#MQDkmVDMteRCODghzUXh?;4T5ow1&hq)L@Ko9NKlTO zX33=j649d{UslfthFa@?c^@DQ2!H?xfPnZ3+@dV*Buk5sbfR)-&$^X%O(GC0`cr8pj^Ji*80}w@JU`UAOHep3D9ct{L~C##Dizd+h;Gt!)y)Nb=ww-=9x^T;&Z*NHrkv#n}o;egb)A(BuGG;MO{`Yv{Q)j2BNMmhmX%lHX`D#tCRJl z^gLTYcugynd?Ard2HjXTVM8FP6G+fbHD3fnzg4;bgC52?^Y#6k4G;gq*a^puCjb>u zD7V{+Rhx~$5L?+Hocj^r{Z@I&X-x39E0rpXX}mhRA7Sy82m+!|EU~p(bblx|45HYY z9>RfuSPAfHRt5c5)(TQ%2xIC#JUiw%;>`6p%?IVCO5CgwC+i$Y0EStYht_Y-WXv%2-4+aF>kpORXq|;c37-t^( z`0in&TH(79Hr#g@1F->GkwH}IUy5|M&@mrlAr_K?H>nZevki6AbHykg8ch;YtJ_MA zsk`JCU08;iRHb zy24DBqWHML4FY0-9M|k5y~$YhNiRcC-7o)RqisiFK)@XcXtQaC6(8#mW2=J$=IcFt z{bFgi#FOs8TdHM%fEd@DT5Z?9&wN^=DBskK%ZOtTFh_u|>)>1H@|_UHGQ+B<{QCMW zrL9Vq7Ig`3t|Nd!bYs?E{8f(KyL*#%fwOruT&7krxv!UTtR8pF;XPT}o7MK$Fn_gz zC1>Zc6ZbRONwP5FC7}Co2X94-SvWX8AKK??RI8@qB`KYp**l010Wq!XPj?<=8R_*+ zovk^S(-)^?9{9y6Idv=as$Pl>t5=>?^nP@8{)N_%*Q8)4GX_1h3rZpj}Q=N)Y9zj$t*j|-=3&FX^$)gwLBKJ3Sy1Q7KrA) zJ$u>4pEt1U*RJUhDwRq$>5VC@dQkNTNVi>0W)uIogqFwyBCb8)qM`fFtk}D&3|JJ3 zJQ_3)r*V(J3{Lp9pMbprfOiCUvM6V3+;o4OVx0PDO(87w`+V&(55(h}B}>_t zi@tau9z2RR?b;O*5AN2!N&xFxEs!6=xR8MEQ-SogC%!09^kW?cE^@h+`T4&9@h|`p z&o2a|sJc)nCe!yY?c3mrcMs4UdUrQ!T74f=r(gm9j_rwT*N&Z!6uE4%L&wm0<*Ij0bcldEF0SHJ@y!rQTy?uTv>XJ2X zy1_{|UmMc2ye|u+nO3|hykY&%MdIOITDkNgd4oB6l`-5>y$FI}P; z(07ZB>B9N*?7itT*oJlMb=_N^d_0@+0d{VlEahEf7&zKbz_9V5q{cdoi|yzC6@A!z zvau76#YI3VuIz{?H$$5zvAMf9u+&t$KTgBD4io`2z|5QDZf4483T~m~`^jcYcIo0p z#;w8JCjkkd*;%Ei^#bEq0=kwjpIs&87Z+dOpV=!dhd7oDVskVBw{L+YXs6oAOsN`d z_kak`f_{mwow*TZ@O!jUkQ-mWhjVx>$*8X{ZDC9%CC9$;CQH)t7(X;OTDO#3YJ(l7 z73Ax;m5pp=_uHFkv^aM>0k>BXyO8mwh_}yTyW-FX=C6dh=`W6mOhj>#647QmD*U!IjITSgb|RB}Oh5fw>#SN#DwJ$VMD|0zMzrbSJa-W~y#gphBTw9lLb4sAyfEd0OZ1PF61a_inmHVc@9< z0(^Q^dX|3YLkW(On-$Zw) zSxkxHEK&m45X;hvnUi>(Wo1^h)O$((_^yk5_eFOwK1w~{4Yv~xUUWWR7gMe*xCxx&1p?w6u0_ZC z`R=tWEwx9{7j1KOClTQ-k=VNB+4Hr^GL7QVdCO_k-SySU+wAL8w^=q?9wpx}d~lq% zIOq#cZntw7pV#i|_aTYLt0jYg4`&g8fH;e6QSr{-t6>=#Nh~MZvgt-e#ZiQt+*`wB zUg{#Qh`@=uN)$3hhFq!MDpx39q88X=IoTP1(xa`|Wq`gUBrPlYv)Ta~7C?(VcuRwS z5MTB}(`j9d^BPB;mn_xH3T3auHjrbmASirqGjs63Om|=ru&$P zAHCSSa@jg{CCO#-TxwlhlgpLcWD1>N+$*UxszrMpLy$K3RnL;tmQEjVw1WM!miNO>HVMURWfod?E? zs!WaMI#X-HNIb3=bs28bB!F31w$|H%ePJ}M_h))Ady%F2t6CM)<|}9(tDMg5ezKN0 zE~1;oN&ggdIdOHML}j@$nLJY_m*1n~n@lc0M8|6~UJfgIuUs3-?!EM2hup064f!&y zNC$0fD3l9>=r*h3Gpe!+^$NN$2LdGJ>l-MU1eHX zGPNR(()BYmE^);uj8hN2mmY}C&&f{6%ge2zmGdDI7^?_S>yLL+vRq|-0QpA8LE{$FSruPU_yz2qMrxz1K!DAA(Y&}-SPo$3T@2y-vOEb+A)e9tr9MMcLRX*8N!wEa&6&9uTzBk~)-UI8im zq!4*wkv-c9Y(IbXi5)jHMyX`7j+K0r)hl_cyvup3WZoneCG|;F^3|}&ioR_3jWpXc z;Lag+T~ss$m(Qk>@`pGaQAfVMTbN4Sg$Bz#R6QL5Q95y83`9D0^+-s?x{RQjtGGL2F}6Q)X%iASO@Uwvd}g zpK#HjZ<**s!=2Y{SX#;a6P~_)6R68-wxvPfPj~|Mwm={t4ywc7PMr(P%+vlxkLfVJ zO-DwaR!gg{^K)q%ql{c_9=Y-1(_Ff)n?FU{deH1yS-y-$(&^!@n@LNLBsrgL%d^C% zRe961pC3`HK~9UimEN?vPbFt^H3bONC}rjSePtT8>n1~u+k2jEzwW`U5S?i7mIhh= zp?6(PI_L(Hax@yR8_dhA4Ru)^mvS*YG4Bu%5!E`0h|i6Ty!{ou6?I*eE&d%2UoS7_ z3L&{k-=65S$-}y}4c$|Gl{i`&g>P%#s`d5zmiDuHvG4_)NriyD0)p>>IIyPMJy9c_ z#G}!rwA@n;jMi*ad3kv(IW3Ldp%0CxXJ%4+#(A&B6?`;oKn>Gb$7Zu9$%|TI%2$Tx z)2Ctc?xpI!Bnj~_sY;bPmH7vBf_Rv4@vu{LVL73~sU@O5m7H_ri2zj{Cq!>HO8%`4 zwMxajsB4S=I@+YuTX@vQlkW8$4@1`!AJgEi3@3`PqwB*sz69-5<5MRd^QMQ7?^gO$ zXLlMccgW{DotRrhT>=4n-#VcpB>=YdGAW7TU{coKp=C6ow#H#^IQ|h14Jirl<5DRd zH|uWG#UwWApKJDJPq^@0HE7C|ZrT)A4r+G5SU^CnPGR0Y18KlxqXoX5Sx)H+e=#Y9D;@q9$y z8gqkI7LJ* z98?`jWe)T4na+ksOm|R>h(?42ARr>Fl;W~9WYghZ9V>gUuIQ~4QCpyrvpHgg& z2LUBP00JV3YI>N-*iDD(gqGGEu3uWix8s$(IdLzOjoEdX(W*(^_Y7$fWWiUl%=Xuj zXYQYigT8x7cO&b?Lh?|gS^#gKGfb`K!f{ZlML@o-BLD%hj+ZEWPfa|m&SLTtovQi< zi6TRGW@7Exdu+}jfx5En%sYY;0gWb!c0e3S!m$iIJc|GXM8fqQv-$9%-qn0##X2rx z4chnrxz2vQh<)F!VJ5g=l`5Tt<8AtA$1K6~f(!^k00JTicE=?ev-wEJ@N(*nHT~6& zTd3i<>{%RBE{$1C{QVL;d^^M9Fz_f40hKC?gkuiPl6W0k=L%E@B1skkB9e-*CmSE_ zJ?eEmOM9eq72n!shYsvX#06oKv-8-f9T(Z<)NB`|h9s;epd{h&_FBjk%JEpwVKqIs z3;+RfqiTJ5)8WaTD|x?5AMKFLEG<*~H`8<2=$#kYtqiO;Gi4*#xk{bJyfmNDEVeh; z$W~Z(FIYv0FJTCX`0B*29L?HutV~uu+aFocySiNw2^jc7N;Vs}>ms|Cg=dw3)xZbG z@6sZ`59mW3?+Ka@BraM45D?K;jeQv#y7kDIh_afG9`{pOw?y2&WE~dzTyhqhu=^53 z#9>4&jEMJvdU>6uPXoS5!hu;;7Gm>MZx9eSuKBO_pHTmolDoNWMeo)ch4gy0AR;cj zIr(P+y}WkN0>Du;9ru_EQ$|t}CIA7EaOGK=^_lyQH_9z<8>A6%M6wHUS8Tvk8kR{xQG(`&_*M)xvs#YF`m$ ztHi9_&B|qy_grD8Xw^E50|@ZehQ@0jZL9nS8y24EfEW>lhZ2B*cwoi9z30DiK|acN zAM;guNu~H1x%q6y-`CilYj{bvsm}P=hQ{kMZK1q`wttyHp96#%+XNDUj0r$MjA?k# zF@3N~w*5xtq-x&sw`vEdy`@lezWvo#$8NLlPTbLLg(ZbN3!X=f<_-;rucj`lX*9NR zMezI}18zkC0^(NHX^&Y`PA&gG`i^dC+hTBSs9jf6*!%mhv$P!AZ5O660p8M3 ztM8I%Y@%(CXHXZ_zlA;E3*VFU92ri|n?=8~rju;^GLG|f> zU1i4|y%oG60V)0JL_>3px~G0-O64pX3_l~K>?5zzB_LG+NzhJ}E_6xcocg!|cHheU zxQ4ItxvDoK$&ocb0O=|UVntQu~_QC&z`lzrSrDxp%qA0CY zRYVlIbOk`+_1eS$SIa<*{3A^_hDt zPyeMU5oOQ%tjOj0G?;ON`lbFPyPWI_7SjYPVyq{L5Kbfj0dXQ+ca55h$*Gjwc9+s~ z{#VUg(X6(=+MD;qi@}_|pT%a=7pnffBeA{VRjN$tjyg|Mrhcc7|NcmOiEI%APvijv z%n^Wqn4^Ph77-vpDZBetR=iTC9az;{5%id^N+qz6v`~7RNSIg#c3v2kXXoeFc}HP(B|nb%?lC(0VffF zfH(;(qAg6||NcBt>&U(AI6sA~bvcc^R%LHRDVpJ>a>X}!lc*K)_3=Awl1ThP?tik_rdbY43lV{U2nj$yL|7@Fl%;Wr$Eu!7&FSZ( zkTvsD%IXBFWR**)rtnuOWHeyUln!XX+@vp6@ulp)6elFS)xZa@WwKoQ z%_1R4qu={%?W61<00JOj2LT9(9qI5JlQm;Ye9|S-E1Oy@g z0TBp1k_7<}00F5JfPhH729bXd009sXhyVmcAn-^Q1V8`;q)q?=BJ~!3mK51f)&?0wVPqME*el1VBI_ z0uT^^z#~}@009t?IspiX)N2s=2LTWO0f7iWKm-DhWI+G~KtSpQARtn&LF69DM7j00JNY0)h~LfCz#ei3(33K|59WOdtm!AbkQ*5$V?dd5|BF?0XxIq8}K){U& zKtSBMa#1u8009tiCIJYDGwC935C8!XaAN`x5I3$|6b%GG00f*#00QDnx`-PDKmY{X zm;eOCjVl*L0|5{K0cR3`fH;#b;syZ_00B2900D91%0!3m zz>Nt&K-{=;Q8W+$0T6H|0SJgQ=^}0r009tiV*(HmH?CY14Fo^{1e{3#0^&@%h#Lez z00i8a00hL1D;Grr0T2KIXA*#bIFl~o1_2NN0XHT90deEXMbSV21VF%<1Rx;Jq>H#g z00cn5jR`0T6Iw0uT^4u3Qui1V8`;oJjxz;!L`T z+j;^C+NstDLI@B50l^4BMFc~Sgh2oVKtS3AARyANKja<+KmY^;BLD#r3_TJC0T2KI zX%m2eNW1=!dk_Et5D<(21Vk|ONEie_00g8>00JWI`a|wP00cllFai(|!O$aN5C8!X zkTwAbh_veuxd#Cd00F@WKtKdTkAy)01VBLA1Rx;Nu0P}+1V8`;1S0?e5ez*N1_2NN z0cjI}fJnRkkb4jS0T2+300cxZ^hg*4KmY`!O#lKS?fOIRK>!3mKrjLj5W&zRVGsZT z5Rf(j2#B=n54i^c5C8$e2tYstLyv?(00cll+5{jV(yl+`9t1!D1Oy`h0TB#65(WVf z00C(efPhH5{*ZeR00D^-nB6gcvqZCAINMN>!aQ&W0w4eaAYdT@2#AHC;0**o00cmw zFaZciVIDXG0T2KI5U`K{1jIs6@CE`P00JOTm;eN%Fb|x800@8p2v|q}0%9R3cmn|t z009svOaKB>m 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + fi + + debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" + debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" + debDate="$(date --rfc-2822)" + + # if go-md2man is available, pre-generate the man pages + make manpages + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-deb:$version" + if ! docker inspect "$image" &> /dev/null; then + ( + # Add the APT_MIRROR args only if the consuming Dockerfile uses it + # Otherwise this will cause the build to fail + if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then + DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR" + fi + set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" + ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + WORKDIR /usr/src/docker + COPY . /usr/src/docker + ENV DOCKER_GITCOMMIT $GITCOMMIT + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \ + && ln -snf /usr/src/docker /go/src/github.com/docker/docker + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN cp -aL hack/make/.build-deb debian + RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog + RUN dpkg-buildpackage -uc -us -I.git + EOF + tempImage="docker-temp/build-deb:$version" + ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) + docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/build-integration-test-binary b/vendor/github.com/docker/docker/hack/make/build-integration-test-binary new file mode 100644 index 0000000..2039be4 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/build-integration-test-binary @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +rm -rf "$DEST" +DEST="$DEST/../test-integration-cli" + +if [ -z $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then + source ${MAKEDIR}/.integration-test-helpers + ensure_test_dir integration-cli "$DEST/test.main" + export DOCKER_INTEGRATION_TESTS_VERIFIED=1 +fi diff --git a/vendor/github.com/docker/docker/hack/make/build-rpm b/vendor/github.com/docker/docker/hack/make/build-rpm new file mode 100644 index 0000000..7fec059 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/build-rpm @@ -0,0 +1,148 @@ +#!/bin/bash +set -e + +# subshell so that we can export PATH and TZ without breaking other things +( + export TZ=UTC # make sure our "date" variables are UTC-based + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" + + # TODO consider using frozen images for the dockercore/builder-rpm tags + + rpmName=docker-engine + rpmVersion="$VERSION" + rpmRelease=1 + + # rpmRelease versioning is as follows + # Docker 1.7.0: version=1.7.0, release=1 + # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 + # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 + # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 + # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH + + # if we have a "-rc*" suffix, set appropriate release + if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then + rcVersion=${rpmVersion#*-rc} + rpmVersion=${rpmVersion%-rc*} + rpmRelease="0.${rcVersion}.rc${rcVersion}" + fi + + DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" + fi + + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="${gitDate}.git${gitCommit}" + # gitVersion is now something like '20150128.112847.17e840a' + rpmVersion="${rpmVersion%-dev}" + rpmRelease="0.0.$gitVersion" + fi + + # Replace any other dashes with periods + rpmVersion="${rpmVersion/-/.}" + + rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" + rpmDate="$(date +'%a %b %d %Y')" + + # if go-md2man is available, pre-generate the man pages + make manpages + + # Convert the CHANGELOG.md file into RPM changelog format + VERSION_REGEX="^\W\W (.*) \((.*)\)$" + ENTRY_REGEX="^[-+*] (.*)$" + while read -r line || [[ -n "$line" ]]; do + if [ -z "$line" ]; then continue; fi + if [[ "$line" =~ $VERSION_REGEX ]]; then + echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + if [[ "$line" =~ $ENTRY_REGEX ]]; then + echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + done < CHANGELOG.md + + builderDir="contrib/builder/rpm/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-rpm:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + COPY . /usr/src/${rpmName} + WORKDIR /usr/src/${rpmName} + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN mkdir -p /root/rpmbuild/SOURCES \ + && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros + WORKDIR /root/rpmbuild + RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS + WORKDIR /root/rpmbuild/SPECS + RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName} + RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd + RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy + RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc + RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini + RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar + RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ + ${rpmName}.spec + EOF + # selinux policy referencing systemd things won't work on non-systemd versions + # of centos or rhel, which we don't support anyways + if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then + selinuxDir="selinux" + if [ -d "./contrib/selinux-$version" ]; then + selinuxDir="selinux-${version}" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + ${rpmName}-selinux.spec + EOF + fi + tempImage="docker-temp/build-rpm:$version" + ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) + docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" +) 2>&1 | tee -a $DEST/test.log diff --git a/vendor/github.com/docker/docker/hack/make/clean-apt-repo b/vendor/github.com/docker/docker/hack/make/clean-apt-repo new file mode 100755 index 0000000..1c37d98 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/clean-apt-repo @@ -0,0 +1,43 @@ +#!/bin/bash +set -e + +# This script cleans the experimental pool for the apt repo. +# This is useful when there are a lot of old experimental debs and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental +: ${DOCKER_ARCHIVE_DIR:=$DEST/archive} +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }') + +# get the latest version +latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine) +latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*}) + +echo "latest docker-engine version: $latest_docker_engine_version" + +# remove all the files that are not that version in experimental +pool_dir=$(dirname "$latest_docker_engine_file") +old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") ) + +echo "${old_pkgs[@]}" + +mkdir -p "$DOCKER_ARCHIVE_DIR" +for old_pkg in "${old_pkgs[@]}"; do + echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR" + mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR" +done + +echo +echo "$pool_dir now has contents:" +ls "$pool_dir" + +# now regenerate release files for experimental +export COMPONENT=experimental +source "${DIR}/update-apt-repo" + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/docker/docker/hack/make/clean-yum-repo b/vendor/github.com/docker/docker/hack/make/clean-yum-repo new file mode 100755 index 0000000..1cafbbd --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/clean-yum-repo @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +# This script cleans the experimental pool for the yum repo. +# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental + +suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) ) + +for suite in "${suites[@]}"; do + echo "cleanup in: $suite" + ( set -x; repomanage -k2 --old "$suite" | xargs rm -f ) +done + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/docker/docker/hack/make/cover b/vendor/github.com/docker/docker/hack/make/cover new file mode 100644 index 0000000..08e28e3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/cover @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +bundle_cover() { + coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) + for p in "${coverprofiles[@]}"; do + echo + ( + set -x + go tool cover -func="$p" + ) + done +} + +bundle_cover 2>&1 | tee "$DEST/report.log" diff --git a/vendor/github.com/docker/docker/hack/make/cross b/vendor/github.com/docker/docker/hack/make/cross new file mode 100644 index 0000000..6d672b1 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/cross @@ -0,0 +1,46 @@ +#!/bin/bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) + +# if we have our linux/amd64 version compiled, let's symlink it in +if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then + arch=$(go env GOHOSTARCH) + mkdir -p "$DEST/linux/${arch}" + ( + cd "$DEST/linux/${arch}" + ln -s ../../../binary-daemon/* ./ + ln -s ../../../binary-client/* ./ + ) + echo "Created symlinks:" "$DEST/linux/${arch}/"* +fi + +for platform in $DOCKER_CROSSPLATFORMS; do + ( + export KEEPDEST=1 + export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + + if [ "$GOOS" != "solaris" ]; then + # TODO. Solaris cannot be cross build because of CGO calls. + if [ -z "${daemonSupporting[$platform]}" ]; then + # we just need a simple client for these platforms + export LDFLAGS_STATIC_DOCKER="" + # remove the "daemon" build tag from platforms that aren't supported + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) + source "${MAKEDIR}/binary-client" + else + source "${MAKEDIR}/binary-client" + source "${MAKEDIR}/binary-daemon" + fi + fi + ) +done diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary b/vendor/github.com/docker/docker/hack/make/dynbinary new file mode 100644 index 0000000..1a435dc --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/dynbinary @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +# This script exists as backwards compatibility for CI +( + DEST="${DEST}-client" + ABS_DEST="${ABS_DEST}-client" + . hack/make/dynbinary-client +) +( + + DEST="${DEST}-daemon" + ABS_DEST="${ABS_DEST}-daemon" + . hack/make/dynbinary-daemon +) diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary-client b/vendor/github.com/docker/docker/hack/make/dynbinary-client new file mode 100644 index 0000000..e4b7741 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/dynbinary-client @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +( + export BINARY_SHORT_NAME='docker' + export GO_PACKAGE='github.com/docker/docker/cmd/docker' + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary-daemon b/vendor/github.com/docker/docker/hack/make/dynbinary-daemon new file mode 100644 index 0000000..090a916 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/dynbinary-daemon @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +( + export BINARY_SHORT_NAME='dockerd' + export GO_PACKAGE='github.com/docker/docker/cmd/dockerd' + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/docker/docker/hack/make/generate-index-listing b/vendor/github.com/docker/docker/hack/make/generate-index-listing new file mode 100755 index 0000000..ec44171 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/generate-index-listing @@ -0,0 +1,74 @@ +#!/bin/bash +set -e + +# This script generates index files for the directory structure +# of the apt and yum repos + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt +YUMDIR=$DOCKER_RELEASE_DIR/yum + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before generate-index-listing' + exit 1 +fi + +create_index() { + local directory=$1 + local original=$2 + local cleaned=${directory#$original} + + # the index file to create + local index_file="${directory}/index" + + # cd into dir & touch the index file + cd $directory + touch $index_file + + # print the html header + cat <<-EOF > "$index_file" + + + Index of ${cleaned}/ + +

Index of ${cleaned}/


+
../
+	EOF
+
+	# start of content output
+	(
+	# change IFS locally within subshell so the for loop saves line correctly to L var
+	IFS=$'\n';
+
+	# pretty sweet, will mimick the normal apache output. skipping "index" and hidden files
+	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g');
+	do
+		# file
+		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
+
+		# file with file size
+		F=$(du -bh $F | cut -f1);
+
+		# output with correct format
+		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
+	done;
+	) >> $index_file;
+
+	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
+	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file
+
+	# print the footer html
+	echo "

" >> $index_file + +} + +get_dirs() { + local directory=$1 + + for d in `find ${directory} -type d`; do + create_index $d $directory + done +} + +get_dirs $APTDIR +get_dirs $YUMDIR diff --git a/vendor/github.com/docker/docker/hack/make/install-binary b/vendor/github.com/docker/docker/hack/make/install-binary new file mode 100644 index 0000000..82cbc79 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-binary @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + source "${MAKEDIR}/install-binary-client" +) + +( + source "${MAKEDIR}/install-binary-daemon" +) diff --git a/vendor/github.com/docker/docker/hack/make/install-binary-client b/vendor/github.com/docker/docker/hack/make/install-binary-client new file mode 100644 index 0000000..6c80452 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-binary-client @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-client" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_CLIENT_BINARY_NAME}" +) diff --git a/vendor/github.com/docker/docker/hack/make/install-binary-daemon b/vendor/github.com/docker/docker/hack/make/install-binary-daemon new file mode 100644 index 0000000..08a2d69 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-binary-daemon @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-daemon" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}" +) diff --git a/vendor/github.com/docker/docker/hack/make/install-script b/vendor/github.com/docker/docker/hack/make/install-script new file mode 100644 index 0000000..feadac2 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-script @@ -0,0 +1,63 @@ +#!/bin/bash +set -e + +# This script modifies the install.sh script for domains and keys other than +# those used by the primary opensource releases. +# +# You can provide `url`, `yum_url`, `apt_url` and optionally `gpg_fingerprint` +# or `GPG_KEYID` as environment variables, or the defaults for open source are used. +# +# The lower-case variables are substituted into install.sh. +# +# gpg_fingerprint and GPG_KEYID are optional, defaulting to the opensource release +# key ("releasedocker"). Other GPG_KEYIDs will require you to mount a volume with +# the correct contents to /root/.gnupg. +# +# It outputs the modified `install.sh` file to $DOCKER_RELEASE_DIR (default: $DEST) +# +# Example usage: +# +# docker run \ +# --rm \ +# --privileged \ +# -e "GPG_KEYID=deadbeef" \ +# -e "GNUPGHOME=/root/.gnupg" \ +# -v $HOME/.gnupg:/root/.gnupg \ +# -v $(pwd):/go/src/github.com/docker/docker/bundles \ +# "$IMAGE_DOCKER" \ +# hack/make.sh install-script + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} + +DEFAULT_URL="https://get.docker.com/" +DEFAULT_APT_URL="https://apt.dockerproject.org" +DEFAULT_YUM_URL="https://yum.dockerproject.org" +DEFAULT_GPG_FINGERPRINT="58118E89F3A912897C070ADBF76221572C52609D" + +: ${url:=$DEFAULT_URL} +: ${apt_url:=$DEFAULT_APT_URL} +: ${yum_url:=$DEFAULT_YUM_URL} +if [[ "$GPG_KEYID" == "releasedocker" ]] ; then + : ${gpg_fingerprint:=$DEFAULT_GPG_FINGERPRINT} +fi + +DEST_FILE="$DOCKER_RELEASE_DIR/install.sh" + +bundle_install_script() { + mkdir -p "$DOCKER_RELEASE_DIR" + + if [[ -z "$gpg_fingerprint" ]] ; then + # NOTE: if no key matching key is in /root/.gnupg, this will fail + gpg_fingerprint=$(gpg --with-fingerprint -k "$GPG_KEYID" | grep "Key fingerprint" | awk -F "=" '{print $2};' | tr -d ' ') + fi + + cp hack/install.sh "$DEST_FILE" + sed -i.bak 's#^url=".*"$#url="'"$url"'"#' "$DEST_FILE" + sed -i.bak 's#^apt_url=".*"$#apt_url="'"$apt_url"'"#' "$DEST_FILE" + sed -i.bak 's#^yum_url=".*"$#yum_url="'"$yum_url"'"#' "$DEST_FILE" + sed -i.bak 's#^gpg_fingerprint=".*"$#gpg_fingerprint="'"$gpg_fingerprint"'"#' "$DEST_FILE" + rm "${DEST_FILE}.bak" +} + +bundle_install_script diff --git a/vendor/github.com/docker/docker/hack/make/release-deb b/vendor/github.com/docker/docker/hack/make/release-deb new file mode 100755 index 0000000..ed65fe2 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/release-deb @@ -0,0 +1,163 @@ +#!/bin/bash +set -e + +# This script creates the apt repos for the .deb files generated by hack/make/build-deb +# +# The following can then be used as apt sources: +# deb http://apt.dockerproject.org/repo $distro-$release $version +# +# For example: +# deb http://apt.dockerproject.org/repo ubuntu-trusty main +# deb http://apt.dockerproject.org/repo ubuntu-trusty testing +# deb http://apt.dockerproject.org/repo debian-wheezy experimental +# deb http://apt.dockerproject.org/repo debian-jessie main +# +# ... and so on and so forth for the builds created by hack/make/build-deb + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# setup the apt repo (if it does not exist) +mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists" + +# supported arches/sections +arches=( amd64 i386 armhf ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit) + if [ -n "$exists" ] ; then + components+=( $component ) + fi +done + +# set the component for the version being released +component="main" + +if [[ "$VERSION" == *-rc* ]]; then + component="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + component="experimental" +fi + +# Make sure our component is in the list of components +if [[ ! "${components[*]}" =~ $component ]] ; then + components+=( $component ) +fi + +# create apt-ftparchive file on every run. This is essential to avoid +# using stale versions of the config file that could cause unnecessary +# refreshing of bits for EOL-ed releases. +cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" +Dir { + ArchiveDir "${APTDIR}"; + CacheDir "${APTDIR}/db"; +}; + +Default { + Packages::Compress ". gzip bzip2"; + Sources::Compress ". gzip bzip2"; + Contents::Compress ". gzip bzip2"; +}; + +TreeDefault { + BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; + Directory "pool/\$(SECTION)"; + Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; + SrcDirectory "pool/\$(SECTION)"; + Sources "\$(DIST)/\$(SECTION)/source/Sources"; + Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; + FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; +}; +EOF + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + suite="${version//debootstrap-}" + + cat <<-EOF + Tree "dists/${suite}" { + Sections "${components[*]}"; + Architectures "${arches[*]}"; + } + + EOF +done >> "$APTDIR/conf/apt-ftparchive.conf" + +cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" +APT::FTPArchive::Release::Origin "Docker"; +APT::FTPArchive::Release::Components "${components[*]}"; +APT::FTPArchive::Release::Label "Docker APT Repository"; +APT::FTPArchive::Release::Architectures "${arches[*]}"; +EOF + +# release the debs +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)" + DEBFILE=( "$dir/docker-engine"*.deb ) + + # add the deb for each component for the distro version into the + # pool (if it is not there already) + mkdir -p "$APTDIR/pool/$component/d/docker-engine/" + for deb in ${DEBFILE[@]}; do + d=$(basename "$deb") + # We do not want to generate a new deb if it has already been + # copied into the APTDIR + if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then + cp "$deb" "$tempdir/" + # if we have a $GPG_PASSPHRASE we may as well + # dpkg-sign before copying the deb into the pool + if [ ! -z "$GPG_PASSPHRASE" ]; then + dpkg-sig -g "--no-tty --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \ + -k "$GPG_KEYID" --sign builder "$tempdir/$d" + fi + mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/" + fi + done + + rm -rf "$tempdir" + + # build the right directory structure, needed for apt-ftparchive + for arch in "${arches[@]}"; do + for c in "${components[@]}"; do + mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch" + done + done + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename}*.deb -o \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Components=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done +done diff --git a/vendor/github.com/docker/docker/hack/make/release-rpm b/vendor/github.com/docker/docker/hack/make/release-rpm new file mode 100755 index 0000000..d7e3ec4 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/release-rpm @@ -0,0 +1,71 @@ +#!/bin/bash +set -e + +# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm +# +# The following can then be used as a yum repo: +# http://yum.dockerproject.org/repo/$release/$distro/$distro-version +# +# For example: +# http://yum.dockerproject.org/repo/main/fedora/23 +# http://yum.dockerproject.org/repo/testing/centos/7 +# http://yum.dockerproject.org/repo/experimental/fedora/23 +# http://yum.dockerproject.org/repo/main/centos/7 +# +# ... and so on and so forth for the builds created by hack/make/build-rpm + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo +: ${GPG_KEYID:=releasedocker} + +# get the release +release="main" + +if [[ "$VERSION" == *-rc* ]]; then + release="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + release="experimental" +fi + +# Setup the yum repo +for dir in bundles/$VERSION/build-rpm/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + distro="${version%-*}" + + REPO=$YUMDIR/$release/$distro + + # if the directory does not exist, initialize the yum repo + if [[ ! -d $REPO/$suite/Packages ]]; then + mkdir -p "$REPO/$suite/Packages" + + createrepo --pretty "$REPO/$suite" + fi + + # path to rpms + RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) + + # if we have a $GPG_PASSPHRASE we may as well + # sign the rpms before adding to repo + if [ ! -z $GPG_PASSPHRASE ]; then + # export our key to rpm import + gpg --armor --export "$GPG_KEYID" > /tmp/gpg + rpm --import /tmp/gpg + + # sign the rpms + echo "yes" | setsid rpm \ + --define "_gpg_name $GPG_KEYID" \ + --define "_signature gpg" \ + --define "__gpg_check_password_cmd /bin/true" \ + --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ + --resign "${RPMFILE[@]}" + fi + + # copy the rpms to the packages folder + cp "${RPMFILE[@]}" "$REPO/$suite/Packages" + + # update the repo + createrepo --pretty --update "$REPO/$suite" +done diff --git a/vendor/github.com/docker/docker/hack/make/run b/vendor/github.com/docker/docker/hack/make/run new file mode 100644 index 0000000..37cfd53 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/run @@ -0,0 +1,44 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before run' + false +fi + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + + +listen_port=2375 +if [ -n "$DOCKER_PORT" ]; then + IFS=':' read -r -a ports <<< "$DOCKER_PORT" + listen_port="${ports[-1]}" +fi + +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +args="--debug \ + --host tcp://0.0.0.0:${listen_port} --host unix:///var/run/docker.sock \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params" + +echo dockerd $args +exec dockerd $args diff --git a/vendor/github.com/docker/docker/hack/make/sign-repos b/vendor/github.com/docker/docker/hack/make/sign-repos new file mode 100755 index 0000000..6ed1606 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/sign-repos @@ -0,0 +1,65 @@ +#!/bin/bash + +# This script signs the deliverables from release-deb and release-rpm +# with a designated GPG key. + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo + +if [ -z "$GPG_PASSPHRASE" ]; then + echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' + exit 1 +fi + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before sign-repos' + exit 1 +fi + +sign_packages(){ + # sign apt repo metadata + if [ -d $APTDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" + + # sign the repo metadata + for F in $(find $APTDIR -name Release); do + if test "$F" -nt "$F.gpg" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.gpg" "$F" + fi + inRelease="$(dirname "$F")/InRelease" + if test "$F" -nt "$inRelease" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --clearsign \ + --batch --yes \ + --output "$inRelease" "$F" + fi + done + fi + + # sign yum repo metadata + if [ -d $YUMDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" + + # sign the repo metadata + for F in $(find $YUMDIR -name repomd.xml); do + if test "$F" -nt "$F.asc" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.asc" "$F" + fi + done + fi +} + +sign_packages diff --git a/vendor/github.com/docker/docker/hack/make/test-deb-install b/vendor/github.com/docker/docker/hack/make/test-deb-install new file mode 100755 index 0000000..aec5847 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-deb-install @@ -0,0 +1,71 @@ +#!/bin/bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +: ${DEB_DIR:="$(pwd)/bundles/$(cat VERSION)/build-deb"} + +if [[ ! -d "${DEB_DIR}" ]]; then + echo "you must first run `make deb` or hack/make/build-deb" + exit 1 +fi + +test_deb_install(){ + # test for each Dockerfile in contrib/builder + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + local dir=$(basename "$dir") + + if [[ ! -d "${DEB_DIR}/${dir}" ]]; then + echo "No deb found for ${dir}" + exit 1 + fi + + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + cat <<-EOF > "${script}" + #!/bin/bash + set -e + set -x + + apt-get update && apt-get install -y apparmor + + dpkg -i /root/debs/*.deb || true + + apt-get install -yf + + /etc/init.d/apparmor start + + # this will do everything _except_ load the profile into the kernel + ( + cd /etc/apparmor.d + /sbin/apparmor_parser --skip-kernel-load docker-engine + ) + EOF + + chmod +x "${script}" + + echo "testing deb install for ${from}" + docker run --rm -i --privileged \ + -v ${DEB_DIR}/${dir}:/root/debs \ + -v ${script}:/install.sh \ + ${from} /install.sh + + rm -f ${script} + done +} + +( + bundle .integration-daemon-start + test_deb_install + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-docker-py b/vendor/github.com/docker/docker/hack/make/test-docker-py new file mode 100644 index 0000000..fcacc16 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-docker-py @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + dockerPy='/docker-py' + [ -d "$dockerPy" ] || { + dockerPy="$DEST/docker-py" + git clone https://github.com/docker/docker-py.git "$dockerPy" + } + + # exporting PYTHONPATH to import "docker" from our local docker-py + test_env PYTHONPATH="$dockerPy" py.test --junitxml="$DEST/results.xml" "$dockerPy/tests/integration" + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-install-script b/vendor/github.com/docker/docker/hack/make/test-install-script new file mode 100755 index 0000000..4782cbe --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-install-script @@ -0,0 +1,31 @@ +#!/bin/bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +test_install_script(){ + # these are equivalent to main, testing, experimental components + # in the repos, but its the url that will do the conversion + components=( experimental test get ) + + for component in "${components[@]}"; do + # change url to specific component for testing + local test_url=https://${component}.docker.com + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + sed "s,url='https://get.docker.com/',url='${test_url}/'," hack/install.sh > "${script}" + + chmod +x "${script}" + + # test for each Dockerfile in contrib/builder + for dir in contrib/builder/*/*/; do + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + + echo "running install.sh for ${component} with ${from}" + docker run --rm -i -v ${script}:/install.sh ${from} /install.sh + done + + rm -f ${script} + done +} + +test_install_script diff --git a/vendor/github.com/docker/docker/hack/make/test-integration-cli b/vendor/github.com/docker/docker/hack/make/test-integration-cli new file mode 100755 index 0000000..689a528 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-integration-cli @@ -0,0 +1,28 @@ +#!/bin/bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + bundle .integration-daemon-setup + + bundle_test_integration_cli + + bundle .integration-daemon-stop + + if [ "$(go env GOOS)" != 'windows' ] + then + leftovers=$(ps -ax -o pid,cmd | awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration-cli/ { print $1 }') + if [ -n "$leftovers" ] + then + ps aux + kill -9 $leftovers 2> /dev/null + echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" + exit 1 + fi + fi + +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-integration-shell b/vendor/github.com/docker/docker/hack/make/test-integration-shell new file mode 100644 index 0000000..86df965 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-integration-shell @@ -0,0 +1,7 @@ +#!/bin/bash + +bundle .integration-daemon-start +bundle .integration-daemon-setup + +export ABS_DEST +bash +e diff --git a/vendor/github.com/docker/docker/hack/make/test-old-apt-repo b/vendor/github.com/docker/docker/hack/make/test-old-apt-repo new file mode 100755 index 0000000..bb20128 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-old-apt-repo @@ -0,0 +1,29 @@ +#!/bin/bash +set -e + +versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) + +install() { + local version=$1 + local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) + local dockerfile="${tmpdir}/Dockerfile" + cat <<-EOF > "$dockerfile" + FROM debian:jessie + ENV VERSION ${version} + RUN apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + --no-install-recommends + RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list + RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ + --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + RUN apt-get update && apt-get install -y \ + lxc-docker-\${VERSION} + EOF + + docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir +} + +for v in "${versions[@]}"; do + install "$v" +done diff --git a/vendor/github.com/docker/docker/hack/make/test-unit b/vendor/github.com/docker/docker/hack/make/test-unit new file mode 100644 index 0000000..f263345 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-unit @@ -0,0 +1,55 @@ +#!/bin/bash +set -e + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + TESTFLAGS+=" -test.timeout=${TIMEOUT}" + INCBUILD="-i" + count=0 + for flag in "${BUILDFLAGS[@]}"; do + if [ "${flag}" == ${INCBUILD} ]; then + unset BUILDFLAGS[${count}] + break + fi + count=$[ ${count} + 1 ] + done + + date + if [ -z "$TESTDIRS" ]; then + TEST_PATH=./... + else + TEST_PATH=./${TESTDIRS} + fi + + if [ "$(go env GOHOSTOS)" = 'solaris' ]; then + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/daemon/graphdriver \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + else + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + fi + + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list +} + +bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/tgz b/vendor/github.com/docker/docker/hack/make/tgz new file mode 100644 index 0000000..3ccd93f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/tgz @@ -0,0 +1,92 @@ +#!/bin/bash + +CROSS="$DEST/../cross" + +set -e + +arch=$(go env GOHOSTARCH) +if [ ! -d "$CROSS/linux/${arch}" ]; then + echo >&2 'error: binary and cross must be run before tgz' + false +fi + +( +for d in "$CROSS/"*/*; do + export GOARCH="$(basename "$d")" + export GOOS="$(basename "$(dirname "$d")")" + + source "${MAKEDIR}/.binary-setup" + + BINARY_NAME="${DOCKER_CLIENT_BINARY_NAME}-$VERSION" + DAEMON_BINARY_NAME="${DOCKER_DAEMON_BINARY_NAME}-$VERSION" + PROXY_BINARY_NAME="${DOCKER_PROXY_BINARY_NAME}-$VERSION" + BINARY_EXTENSION="$(export GOOS && binary_extension)" + if [ "$GOOS" = 'windows' ]; then + # if windows use a zip, not tgz + BUNDLE_EXTENSION=".zip" + IS_TAR="false" + elif [ "$GOOS" == "solaris" ]; then + # Solaris bypasses cross due to CGO issues. + continue + else + BUNDLE_EXTENSION=".tgz" + IS_TAR="true" + fi + BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + DAEMON_BINARY_FULLNAME="$DAEMON_BINARY_NAME$BINARY_EXTENSION" + PROXY_BINARY_FULLNAME="$PROXY_BINARY_NAME$BINARY_EXTENSION" + mkdir -p "$DEST/$GOOS/$GOARCH" + TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME$BUNDLE_EXTENSION" + + # The staging directory for the files in the tgz + BUILD_PATH="$DEST/build" + + # The directory that is at the root of the tar file + TAR_BASE_DIRECTORY="docker" + + # $DEST/build/docker + TAR_PATH="$BUILD_PATH/$TAR_BASE_DIRECTORY" + + # Copy the correct docker binary + mkdir -p $TAR_PATH + cp -L "$d/$BINARY_FULLNAME" "$TAR_PATH/${DOCKER_CLIENT_BINARY_NAME}${BINARY_EXTENSION}" + if [ -f "$d/$DAEMON_BINARY_FULLNAME" ]; then + cp -L "$d/$DAEMON_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_DAEMON_BINARY_NAME}${BINARY_EXTENSION}" + fi + if [ -f "$d/$PROXY_BINARY_FULLNAME" ]; then + cp -L "$d/$PROXY_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_PROXY_BINARY_NAME}${BINARY_EXTENSION}" + fi + + # copy over all the extra binaries + copy_binaries $TAR_PATH + + # add completions + for s in bash fish zsh; do + mkdir -p $TAR_PATH/completion/$s + cp -L contrib/completion/$s/*docker* $TAR_PATH/completion/$s/ + done + + if [ "$IS_TAR" == "true" ]; then + echo "Creating tgz from $BUILD_PATH and naming it $TGZ" + tar --numeric-owner --owner 0 -C "$BUILD_PATH" -czf "$TGZ" $TAR_BASE_DIRECTORY + else + # ZIP needs to full absolute dir path, not the absolute path + ZIP=`pwd`"/$TGZ" + # keep track of where we are, for later. + pushd . + # go into the BUILD_PATH since zip does not have a -C equivalent. + cd $BUILD_PATH + echo "Creating zip from $BUILD_PATH and naming it $ZIP" + zip -q -r $ZIP $TAR_BASE_DIRECTORY + # go back to where we started + popd + fi + + hash_files "$TGZ" + + # cleanup after ourselves + rm -rf "$BUILD_PATH" + + echo "Created tgz: $TGZ" +done +) diff --git a/vendor/github.com/docker/docker/hack/make/ubuntu b/vendor/github.com/docker/docker/hack/make/ubuntu new file mode 100644 index 0000000..8de5d9c --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/ubuntu @@ -0,0 +1,190 @@ +#!/bin/bash + +PKGVERSION="${VERSION//-/'~'}" +# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + GIT_UNIX="$(git log -1 --pretty='%at')" + GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" + GIT_COMMIT="$(git log -1 --pretty='%h')" + GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" + # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' + PKGVERSION="$PKGVERSION~$GIT_VERSION" +fi + +# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false +# true + +# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="https://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" +PACKAGE_DESCRIPTION="Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." +PACKAGE_LICENSE="Apache-2.0" + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + DIR="$ABS_DEST/build" + + # Include our udev rules + mkdir -p "$DIR/etc/udev/rules.d" + cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" + + # Include our init scripts + mkdir -p "$DIR/etc/init" + cp contrib/init/upstart/docker.conf "$DIR/etc/init/" + mkdir -p "$DIR/etc/init.d" + cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" + mkdir -p "$DIR/etc/default" + cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" + mkdir -p "$DIR/lib/systemd/system" + cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" + + # Include contributed completions + mkdir -p "$DIR/etc/bash_completion.d" + cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" + mkdir -p "$DIR/usr/share/zsh/vendor-completions" + cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" + mkdir -p "$DIR/etc/fish/completions" + cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" + + # Include man pages + make manpages + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done + + # Copy the binary + # This will fail if the binary bundle hasn't been built + mkdir -p "$DIR/usr/bin" + cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" + + # Generate postinst/prerm/postrm scripts + cat > "$DEST/postinst" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi + +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi +if [ -n "$2" ]; then + _dh_action=restart +else + _dh_action=start +fi +service docker $_dh_action 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/prerm" <<'EOF' +#!/bin/sh +set -e +set -u + +service docker stop 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/postrm" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = "purge" ] ; then + update-rc.d docker remove > /dev/null || true +fi + +# In case this system is running systemd, we make systemd reload the unit files +# to pick up changes. +if [ -d /run/systemd/system ] ; then + systemctl --system daemon-reload > /dev/null || true +fi + +#DEBHELPER# +EOF + # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way + chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + + ( + # switch directories so we create *.deb in the right folder + cd "$DEST" + + # create lxc-docker-VERSION package + fpm -s dir -C "$DIR" \ + --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ + --after-install "$ABS_DEST/postinst" \ + --before-remove "$ABS_DEST/prerm" \ + --after-remove "$ABS_DEST/postrm" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ + --deb-suggests apparmor \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package + fpm -s empty \ + --name lxc-docker --version "$PKGVERSION" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb + ) + + # clean up after ourselves so we have a clean output directory + rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + rm -r "$DIR" +} + +bundle_ubuntu diff --git a/vendor/github.com/docker/docker/hack/make/update-apt-repo b/vendor/github.com/docker/docker/hack/make/update-apt-repo new file mode 100755 index 0000000..7354a2e --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/update-apt-repo @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo. +# This script is a "fix all" for any sort of problems that might have occurred with +# the Release or Package files in the repo. +# It should only be used in the rare case of extreme emergencies to regenerate +# Release and Package files for the apt repo. +# +# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running +# this script. + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# supported arches/sections +arches=( amd64 i386 ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then + components+=( $component ) + fi +done + +dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) ) + +# override component if it is set +if [ "$COMPONENT" ]; then + components=( $COMPONENT ) +fi + +# release the debs +for version in "${dists[@]}"; do + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" + done +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dist in "${dists[@]}"; do + version=$(basename "$dist") + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Component=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done + done +done diff --git a/vendor/github.com/docker/docker/hack/make/win b/vendor/github.com/docker/docker/hack/make/win new file mode 100644 index 0000000..f9f4111 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/win @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) +platform="windows/amd64" +export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION +mkdir -p "$DEST" +ABS_DEST="$(cd "$DEST" && pwd -P)" +export GOOS=${platform%/*} +export GOARCH=${platform##*/} +if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported +fi +source "${MAKEDIR}/binary" diff --git a/vendor/github.com/docker/docker/hack/release.sh b/vendor/github.com/docker/docker/hack/release.sh new file mode 100755 index 0000000..4b02053 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/release.sh @@ -0,0 +1,325 @@ +#!/usr/bin/env bash +set -e + +# This script looks for bundles built by make.sh, and releases them on a +# public S3 bucket. +# +# Bundles should be available for the VERSION string passed as argument. +# +# The correct way to call this script is inside a container built by the +# official Dockerfile at the root of the Docker source code. The Dockerfile, +# make.sh and release.sh should all be from the same source code revision. + +set -o pipefail + +# Print a usage message and exit. +usage() { + cat >&2 <<'EOF' +To run, I need: +- to be in a container generated by the Dockerfile at the top of the Docker + repository; +- to be provided with the location of an S3 bucket and path, in + environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); +- to be provided with AWS credentials for this S3 bucket, in environment + variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; +- a generous amount of good will and nice manners. +The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" + +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -it --privileged \ + docker ./hack/release.sh +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage +[ "$AWS_ACCESS_KEY_ID" ] || usage +[ "$AWS_SECRET_ACCESS_KEY" ] || usage +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker +[ -x hack/make.sh ] || usage + +export AWS_DEFAULT_REGION +: ${AWS_DEFAULT_REGION:=us-west-1} + +AWS_CLI=${AWS_CLI:-'aws'} + +RELEASE_BUNDLES=( + binary + cross + tgz +) + +if [ "$1" != '--release-regardless-of-test-failure' ]; then + RELEASE_BUNDLES=( + test-unit + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) +fi + +VERSION=$(< VERSION) +BUCKET=$AWS_S3_BUCKET +BUCKET_PATH=$BUCKET +[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH + +if command -v git &> /dev/null && git rev-parse &> /dev/null; then + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + echo "You cannot run the release script on a repo with uncommitted changes" + usage + fi +fi + +# These are the 2 keys we've used to sign the deb's +# release (get.docker.com) +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.com) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + +setup_s3() { + echo "Setting up S3" + # Try creating the bucket. Ignore errors (it might already exist). + $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true + # Check access to the bucket. + $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null + # Make the bucket accessible through website endpoints. + $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET" +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > "$F" + $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" + rm -f "$F" +} + +s3_url() { + case "$BUCKET" in + get.docker.com|test.docker.com|experimental.docker.com) + echo "https://$BUCKET_PATH" + ;; + *) + BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" + if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then + echo "$BASE_URL/$AWS_S3_BUCKET_PATH" + else + echo "$BASE_URL" + fi + ;; + esac +} + +build_all() { + echo "Building release" + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + $AWS_CLI s3 cp --acl public-read "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + +release_build() { + echo "Releasing binaries" + GOOS=$1 + GOARCH=$2 + + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + zipExt=".tgz" + binaryExt="" + tgz=$binary$zipExt + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi + + # we need to map our GOOS and GOARCH to uname values + # see https://en.wikipedia.org/wiki/Uname + # ie, GOOS=linux -> "uname -s"=Linux + + s3Os=$GOOS + case "$s3Os" in + darwin) + s3Os=Darwin + ;; + freebsd) + s3Os=FreeBSD + ;; + linux) + s3Os=Linux + ;; + solaris) + echo skipping solaris release + return 0 + ;; + windows) + # this is windows use the .zip and .exe extensions for the files. + s3Os=Windows + zipExt=".zip" + binaryExt=".exe" + tgz=$binary$zipExt + binary+=$binaryExt + ;; + *) + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" + exit 1 + ;; + esac + + s3Arch=$GOARCH + case "$s3Arch" in + amd64) + s3Arch=x86_64 + ;; + 386) + s3Arch=i386 + ;; + arm) + s3Arch=armel + # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too + ;; + *) + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" + exit 1 + ;; + esac + + s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" + # latest= + latestTgz= + if [ "$latestBase" ]; then + # commented out since we aren't uploading binaries right now. + # latest="$s3Dir/$latestBase$binaryExt" + # we don't include the $binaryExt because we don't want docker.exe.zip + latestTgz="$s3Dir/$latestBase$zipExt" + fi + + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" + exit 1 + fi + # disable binary uploads for now. Only providing tgz downloads + # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" +} + +# Upload binaries and tgz files to S3 +release_binaries() { + [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || { + echo >&2 './hack/make.sh must be run before release_binaries' + exit 1 + } + + for d in bundles/$VERSION/cross/*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + release_build "$GOOS" "$GOARCH" + done + + # TODO create redirect from builds/*/i686 to builds/*/i386 + + cat < /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- cli/compose/schema 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs' + echo + echo "$diffs" + echo + echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`' + } >&2 + false + else + echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.' + fi +else + echo 'No cli/compose/schema/data changes in diff.' +fi diff --git a/vendor/github.com/docker/docker/hack/validate/dco b/vendor/github.com/docker/docker/hack/validate/dco new file mode 100755 index 0000000..754ce8f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/dco @@ -0,0 +1,55 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff --git a/vendor/github.com/docker/docker/hack/validate/default b/vendor/github.com/docker/docker/hack/validate/default new file mode 100755 index 0000000..29b96ca --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/default @@ -0,0 +1,16 @@ +#!/bin/bash +# +# Run default validation, exclude vendor because it's slow + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +. $SCRIPTDIR/dco +. $SCRIPTDIR/default-seccomp +. $SCRIPTDIR/gofmt +. $SCRIPTDIR/lint +. $SCRIPTDIR/pkg-imports +. $SCRIPTDIR/swagger +. $SCRIPTDIR/swagger-gen +. $SCRIPTDIR/test-imports +. $SCRIPTDIR/toml +. $SCRIPTDIR/vet diff --git a/vendor/github.com/docker/docker/hack/validate/default-seccomp b/vendor/github.com/docker/docker/hack/validate/default-seccomp new file mode 100755 index 0000000..8fe8435 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/default-seccomp @@ -0,0 +1,28 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run 'go generate' and see if we have a diff afterwards + go generate ./profiles/seccomp/ >/dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- profiles/seccomp 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of go generate ./profiles/seccomp/ differs' + echo + echo "$diffs" + echo + echo 'Please re-run go generate ./profiles/seccomp/' + echo + } >&2 + false + else + echo 'Congratulations! Seccomp profile generation is done correctly.' + fi +fi diff --git a/vendor/github.com/docker/docker/hack/validate/gofmt b/vendor/github.com/docker/docker/hack/validate/gofmt new file mode 100755 index 0000000..2040afa --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/gofmt @@ -0,0 +1,33 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | + grep -v '^vendor/' | + grep -v '^cli/compose/schema/bindata.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/lint b/vendor/github.com/docker/docker/hack/validate/lint new file mode 100755 index 0000000..4ac0a33 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/lint @@ -0,0 +1,31 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedLint=$(golint "$f") + if [ "$failedLint" ]; then + errors+=( "$failedLint" ) + fi +done + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been linted.' +else + { + echo "Errors from golint:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please fix the above errors. You can test via "golint" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/pkg-imports b/vendor/github.com/docker/docker/hack/validate/pkg-imports new file mode 100755 index 0000000..9e4ea74 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/pkg-imports @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + IFS=$'\n' + badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -vE '^github.com/docker/docker/vendor' | grep -E '^github.com/docker/docker' || true) ) + unset IFS + + for import in "${badImports[@]}"; do + badFiles+=( "$f imports $import" ) + done +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' +else + { + echo 'These files import internal code: (either directly or indirectly)' + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/swagger b/vendor/github.com/docker/docker/hack/validate/swagger new file mode 100755 index 0000000..e754fb8 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/swagger @@ -0,0 +1,13 @@ +#!/bin/bash +set -e +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + yamllint -c ${SCRIPTDIR}/.swagger-yamllint api/swagger.yaml + swagger validate api/swagger.yaml +fi diff --git a/vendor/github.com/docker/docker/hack/validate/swagger-gen b/vendor/github.com/docker/docker/hack/validate/swagger-gen new file mode 100755 index 0000000..008abc7 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/swagger-gen @@ -0,0 +1,29 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + ${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of hack/generate-swagger-api.sh differs' + echo + echo "$diffs" + echo + echo 'Please update api/swagger.yaml with any api changes, then ' + echo 'run `hack/generate-swagger-api.sh`.' + } >&2 + false + else + echo 'Congratulations! All api changes are done the right way.' + fi +else + echo 'No api/types/ or api/swagger.yaml changes in diff.' +fi diff --git a/vendor/github.com/docker/docker/hack/validate/test-imports b/vendor/github.com/docker/docker/hack/validate/test-imports new file mode 100755 index 0000000..373caa2 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/test-imports @@ -0,0 +1,38 @@ +#!/bin/bash +# Make sure we're not using gos' Testing package any more in integration-cli + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # skip check_test.go since it *does* use the testing package + if [ "$f" = "integration-cli/check_test.go" ]; then + continue + fi + + # we use "git show" here to validate that what's committed doesn't contain golang built-in testing + if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then + if [ "$(echo $f | grep '_test')" ]; then + # allow testing.T for non- _test files + badFiles+=( "$f" ) + fi + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! No testing.T found.' +else + { + echo "These files use the wrong testing infrastructure:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/toml b/vendor/github.com/docker/docker/hack/validate/toml new file mode 100755 index 0000000..a0cb158 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/toml @@ -0,0 +1,31 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed has valid toml syntax + if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All toml source files changed here have valid syntax.' +else + { + echo "These files are not valid toml:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files as valid toml' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/vendor b/vendor/github.com/docker/docker/hack/validate/vendor new file mode 100755 index 0000000..0cb5aab --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/vendor @@ -0,0 +1,30 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'vendor.conf' 'vendor/' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run vndr to and see if we have a diff afterwards + vndr + # Let see if the working directory is clean + diffs="$(git status --porcelain -- vendor 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of vndr differs' + echo + echo "$diffs" + echo + echo 'Please vendor your package with github.com/LK4D4/vndr.' + echo + } >&2 + false + else + echo 'Congratulations! All vendoring changes are done the right way.' + fi +else + echo 'No vendor changes in diff.' +fi diff --git a/vendor/github.com/docker/docker/hack/validate/vet b/vendor/github.com/docker/docker/hack/validate/vet new file mode 100755 index 0000000..6476048 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/vet @@ -0,0 +1,32 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedVet=$(go vet "$f") + if [ "$failedVet" ]; then + errors+=( "$failedVet" ) + fi +done + + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been vetted.' +else + { + echo "Errors from go vet:" + for err in "${errors[@]}"; do + echo " - $err" + done + echo + echo 'Please fix the above errors. You can test via "go vet" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/vendor.sh b/vendor/github.com/docker/docker/hack/vendor.sh new file mode 100755 index 0000000..9a4d038 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/vendor.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. +# For updating dependencies you should change `vendor.conf` file in root of the +# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for +# vndr usage. + +set -e + +if ! hash vndr; then + echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" + exit 1 +fi + +vndr "$@" diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go new file mode 100644 index 0000000..39cfbf5 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fs.go @@ -0,0 +1,173 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, err + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("Invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", err + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + if err := os.Remove(s.contentFile(dgst)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/vendor/github.com/docker/docker/image/fs_test.go b/vendor/github.com/docker/docker/image/fs_test.go new file mode 100644 index 0000000..8d602d9 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fs_test.go @@ -0,0 +1,384 @@ +package image + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestFSGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testGetSet(t, fs) +} + +func TestFSGetInvalidData(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id, err := fs.Set([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + + dgst := digest.Digest(id) + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { + t.Fatal(err) + } + + _, err = fs.Get(id) + if err == nil { + t.Fatal("Expected get to fail after data modification.") + } +} + +func TestFSInvalidSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id := digest.FromBytes([]byte("foobar")) + err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) + if err != nil { + t.Fatal(err) + } + + _, err = fs.Set([]byte("foobar")) + if err == nil { + t.Fatal("Expecting error from invalid filesystem data.") + } +} + +func TestFSInvalidRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + tcases := []struct { + root, invalidFile string + }{ + {"root", "root"}, + {"root", "root/content"}, + {"root", "root/metadata"}, + } + + for _, tc := range tcases { + root := filepath.Join(tmpdir, tc.root) + filePath := filepath.Join(tmpdir, tc.invalidFile) + err := os.MkdirAll(filepath.Dir(filePath), 0700) + if err != nil { + t.Fatal(err) + } + f, err := os.Create(filePath) + if err != nil { + t.Fatal(err) + } + f.Close() + + _, err = NewFSStoreBackend(root) + if err == nil { + t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile) + } + + os.RemoveAll(root) + } + +} + +func testMetadataGetSet(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := []struct { + id digest.Digest + key string + value []byte + }{ + {id, "tkey", []byte("tval1")}, + {id, "tkey2", []byte("tval2")}, + {id2, "tkey", []byte("tval3")}, + } + + for _, tc := range tcases { + err = store.SetMetadata(tc.id, tc.key, tc.value) + if err != nil { + t.Fatal(err) + } + + actual, err := store.GetMetadata(tc.id, tc.key) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(actual, tc.value) != 0 { + t.Fatalf("Metadata expected %q, got %q", tc.value, actual) + } + } + + _, err = store.GetMetadata(id2, "tkey2") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown key") + } + + id3 := digest.FromBytes([]byte("baz")) + err = store.SetMetadata(id3, "tkey", []byte("tval")) + if err == nil { + t.Fatal("Expected error for setting metadata for unknown ID.") + } + + _, err = store.GetMetadata(id3, "tkey") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown ID.") + } +} + +func TestFSMetadataGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testMetadataGetSet(t, fs) +} + +func TestFSDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testDelete(t, fs) +} + +func TestFSWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testWalker(t, fs) +} + +func TestFSInvalidWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + fooID, err := fs.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil { + t.Fatal(err) + } + + n := 0 + err = fs.Walk(func(id digest.Digest) error { + if id != fooID { + t.Fatalf("Invalid walker ID %q, expected %q", id, fooID) + } + n++ + return nil + }) + if err != nil { + t.Fatalf("Invalid data should not have caused walker error, got %v", err) + } + if n != 1 { + t.Fatalf("Expected 1 walk initialization, got %d", n) + } +} + +func testGetSet(t *testing.T, store StoreBackend) { + type tcase struct { + input []byte + expected digest.Digest + } + tcases := []tcase{ + {[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, + } + + randomInput := make([]byte, 8*1024) + _, err := rand.Read(randomInput) + if err != nil { + t.Fatal(err) + } + // skipping use of digest pkg because its used by the implementation + h := sha256.New() + _, err = h.Write(randomInput) + if err != nil { + t.Fatal(err) + } + tcases = append(tcases, tcase{ + input: randomInput, + expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))), + }) + + for _, tc := range tcases { + id, err := store.Set([]byte(tc.input)) + if err != nil { + t.Fatal(err) + } + if id != tc.expected { + t.Fatalf("Expected ID %q, got %q", tc.expected, id) + } + } + + for _, emptyData := range [][]byte{nil, {}} { + _, err := store.Set(emptyData) + if err == nil { + t.Fatal("Expected error for nil input.") + } + } + + for _, tc := range tcases { + data, err := store.Get(tc.expected) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(data, tc.input) != 0 { + t.Fatalf("Expected data %q, got %q", tc.input, data) + } + } + + for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { + _, err := store.Get(key) + if err == nil { + t.Fatalf("Expected error for ID %q.", key) + } + } + +} + +func testDelete(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id) + if err != nil { + t.Fatal(err) + } + + _, err = store.Get(id) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id) + } + _, err = store.Get(id2) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id2) + if err != nil { + t.Fatal(err) + } + _, err = store.Get(id2) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id2) + } +} + +func testWalker(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + tcases[id2] = struct{}{} + n := 0 + err = store.Walk(func(id digest.Digest) error { + delete(tcases, id) + n++ + return nil + }) + if err != nil { + t.Fatal(err) + } + + if n != 2 { + t.Fatalf("Expected 2 walk initializations, got %d", n) + } + if len(tcases) != 0 { + t.Fatalf("Expected empty unwalked set, got %+v", tcases) + } + + // stop on error + tcases = make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + err = store.Walk(func(id digest.Digest) error { + return errors.New("") + }) + if err == nil { + t.Fatalf("Exected error from walker.") + } +} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go new file mode 100644 index 0000000..29a990a --- /dev/null +++ b/vendor/github.com/docker/docker/image/image.go @@ -0,0 +1,150 @@ +package image + +import ( + "encoding/json" + "errors" + "io" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types/container" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent id of the image + Parent string `json:"parent,omitempty"` + // Comment user added comment + Comment string `json:"comment,omitempty"` + // Created timestamp when image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies version on which image is built + DockerVersion string `json:"docker_version,omitempty"` + // Author of the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// History stores build commands that were used to create an image +type History struct { + // Created timestamp for build point + Created time.Time `json:"created"` + // Author of the build point + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building image. + CreatedBy string `json:"created_by,omitempty"` + // Comment is custom message set by the user when creating the image. + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Exporter provides interface for exporting and importing images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("Invalid image JSON, no RootFS key.") + } + + img.rawJSON = src + + return img, nil +} diff --git a/vendor/github.com/docker/docker/image/image_test.go b/vendor/github.com/docker/docker/image/image_test.go new file mode 100644 index 0000000..525023b --- /dev/null +++ b/vendor/github.com/docker/docker/image/image_test.go @@ -0,0 +1,59 @@ +package image + +import ( + "encoding/json" + "sort" + "strings" + "testing" +) + +const sampleImageJSON = `{ + "architecture": "amd64", + "os": "linux", + "config": {}, + "rootfs": { + "type": "layers", + "diff_ids": [] + } +}` + +func TestJSON(t *testing.T) { + img, err := NewFromJSON([]byte(sampleImageJSON)) + if err != nil { + t.Fatal(err) + } + rawJSON := img.RawJSON() + if string(rawJSON) != sampleImageJSON { + t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON) + } +} + +func TestInvalidJSON(t *testing.T) { + _, err := NewFromJSON([]byte("{}")) + if err == nil { + t.Fatal("Expected JSON parse error") + } +} + +func TestMarshalKeyOrder(t *testing.T) { + b, err := json.Marshal(&Image{ + V1Image: V1Image{ + Comment: "a", + Author: "b", + Architecture: "c", + }, + }) + if err != nil { + t.Fatal(err) + } + + expectedOrder := []string{"architecture", "author", "comment"} + var indexes []int + for _, k := range expectedOrder { + indexes = append(indexes, strings.Index(string(b), k)) + } + + if !sort.IntsAreSorted(indexes) { + t.Fatal("invalid key order in JSON: ", string(b)) + } +} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go new file mode 100644 index 0000000..7b24e3e --- /dev/null +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,44 @@ +package image + +import ( + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/layer" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/vendor/github.com/docker/docker/image/spec/v1.1.md b/vendor/github.com/docker/docker/image/spec/v1.1.md new file mode 100644 index 0000000..83f1380 --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/v1.1.md @@ -0,0 +1,637 @@ +# Docker Image Specification v1.1.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.10. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 127 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must follow comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/docker/docker/image/spec/v1.2.md b/vendor/github.com/docker/docker/image/spec/v1.2.md new file mode 100644 index 0000000..6c641ca --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/v1.2.md @@ -0,0 +1,696 @@ +# Docker Image Specification v1.2.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.12. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 127 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must follow comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Healthcheck struct +
+
+ A test to perform to determine whether the container is healthy. + Here is an example: +
{
+  "Test": [
+      "CMD-SHELL",
+      "/usr/bin/check-health localhost"
+  ],
+  "Interval": 30000000000,
+  "Timeout": 10000000000,
+  "Retries": 3
+}
+ The object has the following fields. +
+
+ Test array of strings +
+
+ The test to perform to check that the container is healthy. + The options are: +
    +
  • [] : inherit healthcheck from base image
  • +
  • ["NONE"] : disable healthcheck
  • +
  • ["CMD", arg1, arg2, ...] : exec arguments directly
  • +
  • ["CMD-SHELL", command] : run command with system's default shell
  • +
+ + The test command should exit with a status of 0 if the container is healthy, + or with 1 if it is unhealthy. +
+
+ Interval integer +
+
+ Number of nanoseconds to wait between probe attempts. +
+
+ Timeout integer +
+
+ Number of nanoseconds to wait before considering the check to have hung. +
+
+ Retries integer +
+
+ The number of consecutive failures needed to consider a container as unhealthy. +
+
+ + In each case, the field can be omitted to indicate that the + value should be inherited from the base layer. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/docker/docker/image/spec/v1.md b/vendor/github.com/docker/docker/image/spec/v1.md new file mode 100644 index 0000000..57a599b --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/v1.md @@ -0,0 +1,573 @@ +# Docker Image Specification v1.0.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
    +
  1. The metadata for the layer, described in the JSON format.
  2. +
  3. The filesystem changes described by a layer.
  4. +
+ + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
+
+ Image JSON +
+
+ Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Image ID +
+
+ Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
+
+ Image Parent +
+
+ Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
+
+ Image Checksum +
+
+ Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
+
+ id string +
+
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
+
+ parent string +
+
+ ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendant layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ checksum string +
+
+ Image Checksum of the filesystem changeset associated with the image + layer. +
+
+ Size integer +
+
+ The size in bytes of the filesystem changeset associated with the image + layer. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go new file mode 100644 index 0000000..b61c456 --- /dev/null +++ b/vendor/github.com/docker/docker/image/store.go @@ -0,0 +1,295 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.Mutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digest.Set +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digest.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", err + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + is.Lock() + defer is.Unlock() + + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digest.ErrDigestNotFound { + err = fmt.Errorf("No such image: %s", term) + } + return "", err + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +func (is *store) Children(id ID) []ID { + is.Lock() + defer is.Unlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.Lock() + defer is.Unlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/vendor/github.com/docker/docker/image/store_test.go b/vendor/github.com/docker/docker/image/store_test.go new file mode 100644 index 0000000..50f8aa8 --- /dev/null +++ b/vendor/github.com/docker/docker/image/store_test.go @@ -0,0 +1,300 @@ +package image + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestRestore(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + _, err = fs.Set([]byte(`invalid`)) + if err != nil { + t.Fatal(err) + } + id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + err = fs.SetMetadata(id2, "parent", []byte(id1)) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + imgs := is.Map() + if actual, expected := len(imgs), 2; actual != expected { + t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) + } + + img1, err := is.Get(ID(id1)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.computedID, ID(id1); actual != expected { + t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) + } + + if actual, expected := img1.computedID.String(), string(id1); actual != expected { + t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) + } + + img2, err := is.Get(ID(id2)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) + } + + if actual, expected := img2.Comment, "def"; actual != expected { + t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) + } + + p, err := is.GetParent(ID(id1)) + if err == nil { + t.Fatal("expected error for getting parent") + } + + p, err = is.GetParent(ID(id2)) + if err != nil { + t.Fatal(err) + } + if actual, expected := p, ID(id1); actual != expected { + t.Fatalf("invalid parent: expected %q, got %q", expected, actual) + } + + children := is.Children(ID(id1)) + if len(children) != 1 { + t.Fatalf("invalid children length: %q", len(children)) + } + if actual, expected := children[0], ID(id2); actual != expected { + t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) + } + + heads := is.Heads() + if actual, expected := len(heads), 1; actual != expected { + t.Fatalf("invalid images length: expected %q, got %q", expected, actual) + } + + sid1, err := is.Search(string(id1)[:10]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + invalidPattern := digest.Digest(id1).Hex()[1:6] + _, err = is.Search(invalidPattern) + if err == nil { + t.Fatalf("expected search for %q to fail", invalidPattern) + } + +} + +func TestAddDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { + t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) + } + + img, err := is.Get(id1) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) + } + + id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = is.SetParent(id2, id1) + if err != nil { + t.Fatal(err) + } + + pid1, err := is.GetParent(id2) + if err != nil { + t.Fatal(err) + } + if actual, expected := pid1, id1; actual != expected { + t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) + } + + _, err = is.Delete(id1) + if err != nil { + t.Fatal(err) + } + _, err = is.Get(id1) + if err == nil { + t.Fatalf("expected get for deleted image %q to fail", id1) + } + _, err = is.Get(id2) + if err != nil { + t.Fatal(err) + } + pid1, err = is.GetParent(id2) + if err == nil { + t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) + } + +} + +func TestSearchAfterDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id1, err := is.Search(string(id)[:15]) + if err != nil { + t.Fatal(err) + } + + if actual, expected := id1, id; expected != actual { + t.Fatalf("wrong id returned from search: expected %q, got %q", expected, actual) + } + + if _, err := is.Delete(id); err != nil { + t.Fatal(err) + } + + if _, err := is.Search(string(id)[:15]); err == nil { + t.Fatal("expected search after deletion to fail") + } +} + +func TestParentReset(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + if err := is.SetParent(id, id2); err != nil { + t.Fatal(err) + } + + ids := is.Children(id2) + if actual, expected := len(ids), 1; expected != actual { + t.Fatalf("wrong number of children: %d, got %d", expected, actual) + } + + if err := is.SetParent(id, id3); err != nil { + t.Fatal(err) + } + + ids = is.Children(id2) + if actual, expected := len(ids), 0; expected != actual { + t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) + } + + ids = is.Children(id3) + if actual, expected := len(ids), 1; expected != actual { + t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) + } + +} + +type mockLayerGetReleaser struct{} + +func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/load.go b/vendor/github.com/docker/docker/image/tarexport/load.go new file mode 100644 index 0000000..01edd91 --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/load.go @@ -0,0 +1,390 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + progressOutput progress.Output + ) + if !quiet { + progressOutput = sf.NewProgressOutput(outStream, false) + } + outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return err + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + var imageIDsStr string + var imageRefCount int + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), m.LayerSources[diffID], progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) + + imageRefCount = 0 + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", ref))) + imageRefCount++ + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + if imageRefCount == 0 { + outStream.Write([]byte(imageIDsStr)) + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list. On Linux, this equates to a regular os.Open. + rawTar, err := system.OpenSequential(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + var r io.Reader + if progressOutput != nil { + fileInfo, err := rawTar.Stat() + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + } else { + r = rawTar + } + + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if ds, ok := l.ls.(layer.DescribableStore); ok { + return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + return err + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, distribution.Descriptor{}, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/image/tarexport/save.go b/vendor/github.com/docker/docker/image/tarexport/save.go new file mode 100644 index 0000000..6e3a5bc --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/save.go @@ -0,0 +1,355 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} + diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Canonical); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + id, ref, err := reference.ParseIDOrReference(name) + if err != nil { + return nil, err + } + if id != "" { + _, err := l.is.Get(image.IDFromDigest(id)) + if err != nil { + return nil, err + } + addAssoc(image.IDFromDigest(id), nil) + continue + } + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if reference.IsNameOnly(ref) { + assocs := l.rs.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + id, err = l.rs.Get(ref) + if err != nil { + return nil, err + } + addAssoc(image.IDFromDigest(id), ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + s.diffIDPaths = make(map[layer.DiffID]string) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + foreignSrcs, err := s.saveImage(id) + if err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: id.Digest().Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + LayerSources: foreignSrcs, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil { + rf.Close() + return err + } + + rf.Close() + + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(f).Encode(manifest); err != nil { + f.Close() + return err + } + + f.Close() + + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { + img, err := s.is.Get(id) + if err != nil { + return nil, err + } + + if len(img.RootFS.DiffIDs) == 0 { + return nil, fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + var foreignSrcs map[layer.DiffID]distribution.Descriptor + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{ + Created: img.Created, + } + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return nil, err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) + if err != nil { + return nil, err + } + layers = append(layers, v1Img.ID) + parent = v1ID + if src.Digest != "" { + if foreignSrcs == nil { + foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) + } + foreignSrcs[img.RootFS.DiffIDs[i]] = src + } + } + + configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return nil, err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return nil, err + } + + s.images[id].layers = layers + return foreignSrcs, nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return distribution.Descriptor{}, nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return distribution.Descriptor{}, err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return distribution.Descriptor{}, err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return distribution.Descriptor{}, err + } + + // serialize filesystem + layerPath := filepath.Join(outDir, legacyLayerFileName) + l, err := s.ls.Get(id) + if err != nil { + return distribution.Descriptor{}, err + } + defer layer.ReleaseAndLog(s.ls, l) + + if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { + relPath, err := filepath.Rel(outDir, oldPath) + if err != nil { + return distribution.Descriptor{}, err + } + os.Symlink(relPath, layerPath) + } else { + // Use system.CreateSequential rather than os.Create. This ensures sequential + // file access on Windows to avoid eating into MM standby list. + // On Linux, this equates to a regular os.Create. + tarFile, err := system.CreateSequential(layerPath) + if err != nil { + return distribution.Descriptor{}, err + } + defer tarFile.Close() + + arch, err := l.TarStream() + if err != nil { + return distribution.Descriptor{}, err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return distribution.Descriptor{}, err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return distribution.Descriptor{}, err + } + } + + s.diffIDPaths[l.DiffID()] = layerPath + } + s.savedLayers[legacyImg.ID] = struct{}{} + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + return src, nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/tarexport.go b/vendor/github.com/docker/docker/image/tarexport/tarexport.go new file mode 100644 index 0000000..c0be954 --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/tarexport.go @@ -0,0 +1,47 @@ +package tarexport + +import ( + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` + LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs reference.Store + loggerImgEvent LogImageEvent +} + +// LogImageEvent defines interface for event generation related to image tar(load and save) operations +type LogImageEvent interface { + //LogImageEvent generates an event related to an image operation + LogImageEvent(imageID, refName, action string) +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store, loggerImgEvent LogImageEvent) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + loggerImgEvent: loggerImgEvent, + } +} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go new file mode 100644 index 0000000..d498ddb --- /dev/null +++ b/vendor/github.com/docker/docker/image/v1/imagev1.go @@ -0,0 +1,156 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = "1.8.3" + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + imageType := reflect.TypeOf(img).Elem() + for i := 0; i < imageType.NumField(); i++ { + f := imageType.Field(i) + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + // Parent is handled specially below. + if jsonName != "" && jsonName != "parent" { + delete(configAsMap, jsonName) + } + } + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1_test.go b/vendor/github.com/docker/docker/image/v1/imagev1_test.go new file mode 100644 index 0000000..936c55e --- /dev/null +++ b/vendor/github.com/docker/docker/image/v1/imagev1_test.go @@ -0,0 +1,55 @@ +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/docker/docker/image" +) + +func TestMakeV1ConfigFromConfig(t *testing.T) { + img := &image.Image{ + V1Image: image.V1Image{ + ID: "v2id", + Parent: "v2parent", + OS: "os", + }, + OSVersion: "osversion", + RootFS: &image.RootFS{ + Type: "layers", + }, + } + v2js, err := json.Marshal(img) + if err != nil { + t.Fatal(err) + } + + // Convert the image back in order to get RawJSON() support. + img, err = image.NewFromJSON(v2js) + if err != nil { + t.Fatal(err) + } + + js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false) + if err != nil { + t.Fatal(err) + } + + newimg := &image.Image{} + err = json.Unmarshal(js, newimg) + if err != nil { + t.Fatal(err) + } + + if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" { + t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent) + } + + if newimg.RootFS != nil { + t.Error("rootfs should have been removed") + } + + if newimg.V1Image.OS != "os" { + t.Error("os should have been preserved") + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/benchmark_test.go b/vendor/github.com/docker/docker/integration-cli/benchmark_test.go new file mode 100644 index 0000000..b87e131 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/benchmark_test.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "sync" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) { + maxConcurrency := runtime.GOMAXPROCS(0) + numIterations := c.N + outerGroup := &sync.WaitGroup{} + outerGroup.Add(maxConcurrency) + chErr := make(chan error, numIterations*2*maxConcurrency) + + for i := 0; i < maxConcurrency; i++ { + go func() { + defer outerGroup.Done() + innerGroup := &sync.WaitGroup{} + innerGroup.Add(2) + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + args := []string{"run", "-d", defaultSleepImage} + args = append(args, sleepCommandForDaemonPlatform()...) + out, _, err := dockerCmdWithError(args...) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + id := strings.TrimSpace(out) + tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id) + if err != nil { + chErr <- err + return + } + defer os.RemoveAll(tmpDir) + out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("start", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + // don't do an rm -f here since it can potentially ignore errors from the graphdriver + out, _, err = dockerCmdWithError("rm", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + out, _, err := dockerCmdWithError("ps") + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + innerGroup.Wait() + }() + } + + outerGroup.Wait() + close(chErr) + + for err := range chErr { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/check_test.go b/vendor/github.com/docker/docker/integration-cli/check_test.go new file mode 100644 index 0000000..7084d6f --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/check_test.go @@ -0,0 +1,383 @@ +package main + +import ( + "fmt" + "net/http/httptest" + "os" + "path/filepath" + "sync" + "syscall" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/reexec" + "github.com/go-check/check" +) + +func Test(t *testing.T) { + reexec.Init() // This is required for external graphdriver tests + + if !isLocalDaemon { + fmt.Println("INFO: Testing against a remote daemon") + } else { + fmt.Println("INFO: Testing against a local daemon") + } + + if daemonPlatform == "linux" { + ensureFrozenImagesLinux(t) + } + check.TestingT(t) +} + +func init() { + check.Suite(&DockerSuite{}) +} + +type DockerSuite struct { +} + +func (s *DockerSuite) OnTimeout(c *check.C) { + if daemonPid > 0 && isLocalDaemon { + signalDaemonDump(daemonPid) + } +} + +func (s *DockerSuite) TearDownTest(c *check.C) { + unpauseAllContainers() + deleteAllContainers() + deleteAllImages() + deleteAllVolumes() + deleteAllNetworks() + deleteAllPlugins() +} + +func init() { + check.Suite(&DockerRegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.reg = setupRegistry(c, false, "", "") + s.d = NewDaemon(c) +} + +func (s *DockerRegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerSchema1RegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSchema1RegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) + s.reg = setupRegistry(c, true, "", "") + s.d = NewDaemon(c) +} + +func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthHtpasswdSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthHtpasswdSuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.reg = setupRegistry(c, false, "htpasswd", "") + s.d = NewDaemon(c) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthTokenSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthTokenSuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.d = NewDaemon(c) +} + +func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) { + if s == nil { + c.Fatal("registry suite isn't initialized") + } + s.reg = setupRegistry(c, false, "token", tokenURL) +} + +func init() { + check.Suite(&DockerDaemonSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerDaemonSuite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerDaemonSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerDaemonSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = NewDaemon(c) +} + +func (s *DockerDaemonSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { + filepath.Walk(daemonSockRoot, func(path string, fi os.FileInfo, err error) error { + if err != nil { + // ignore errors here + // not cleaning up sockets is not really an error + return nil + } + if fi.Mode() == os.ModeSocket { + syscall.Unlink(path) + } + return nil + }) + os.RemoveAll(daemonSockRoot) +} + +const defaultSwarmPort = 2477 + +func init() { + check.Suite(&DockerSwarmSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSwarmSuite struct { + server *httptest.Server + ds *DockerSuite + daemons []*SwarmDaemon + daemonsLock sync.Mutex // protect access to daemons + portIndex int +} + +func (s *DockerSwarmSuite) OnTimeout(c *check.C) { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + d.DumpStackAndQuit() + } +} + +func (s *DockerSwarmSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) +} + +func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon { + d := &SwarmDaemon{ + Daemon: NewDaemon(c), + port: defaultSwarmPort + s.portIndex, + } + d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) + args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts + if experimentalDaemon { + args = append(args, "--experimental") + } + err := d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + + if joinSwarm == true { + if len(s.daemons) > 0 { + tokens := s.daemons[0].joinTokens(c) + token := tokens.Worker + if manager { + token = tokens.Manager + } + c.Assert(d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{s.daemons[0].listenAddr}, + JoinToken: token, + }), check.IsNil) + } else { + c.Assert(d.Init(swarm.InitRequest{}), check.IsNil) + } + } + + s.portIndex++ + s.daemonsLock.Lock() + s.daemons = append(s.daemons, d) + s.daemonsLock.Unlock() + + return d +} + +func (s *DockerSwarmSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.daemonsLock.Lock() + for _, d := range s.daemons { + d.Stop() + // raft state file is quite big (64MB) so remove it after every test + walDir := filepath.Join(d.root, "swarm/raft/wal") + if err := os.RemoveAll(walDir); err != nil { + c.Logf("error removing %v: %v", walDir, err) + } + + cleanupExecRoot(c, d.execRoot) + } + s.daemons = nil + s.daemonsLock.Unlock() + + s.portIndex = 0 + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerTrustSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerTrustSuite struct { + ds *DockerSuite + reg *testRegistryV2 + not *testNotary +} + +func (s *DockerTrustSuite) SetUpTest(c *check.C) { + testRequires(c, RegistryHosting, NotaryServerHosting) + s.reg = setupRegistry(c, false, "", "") + s.not = setupNotary(c) +} + +func (s *DockerTrustSuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.not != nil { + s.not.Close() + } + + // Remove trusted keys and metadata after test + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + s.ds.TearDownTest(c) +} + +func init() { + ds := &DockerSuite{} + check.Suite(&DockerTrustedSwarmSuite{ + trustSuite: DockerTrustSuite{ + ds: ds, + }, + swarmSuite: DockerSwarmSuite{ + ds: ds, + }, + }) +} + +type DockerTrustedSwarmSuite struct { + swarmSuite DockerSwarmSuite + trustSuite DockerTrustSuite + reg *testRegistryV2 + not *testNotary +} + +func (s *DockerTrustedSwarmSuite) SetUpTest(c *check.C) { + s.swarmSuite.SetUpTest(c) + s.trustSuite.SetUpTest(c) +} + +func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { + s.trustSuite.TearDownTest(c) + s.swarmSuite.TearDownTest(c) +} + +func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { + s.swarmSuite.OnTimeout(c) +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon.go b/vendor/github.com/docker/docker/integration-cli/daemon.go new file mode 100644 index 0000000..9fd3f1e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon.go @@ -0,0 +1,608 @@ +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var daemonSockRoot = filepath.Join(os.TempDir(), "docker-integration") + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + GlobalFlags []string + + id string + c *check.C + logFile *os.File + folder string + root string + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + wait chan error + userlandProxy bool + useDefaultHost bool + useDefaultTLSHost bool + execRoot string +} + +type clientConfig struct { + transport *http.Transport + scheme string + addr string +} + +// NewDaemon returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DEST. +// The daemon will not automatically start. +func NewDaemon(c *check.C) *Daemon { + dest := os.Getenv("DEST") + c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) + + err := os.MkdirAll(daemonSockRoot, 0700) + c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root")) + + id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) + dir := filepath.Join(dest, id) + daemonFolder, err := filepath.Abs(dir) + c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir)) + daemonRoot := filepath.Join(daemonFolder, "root") + + c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir)) + + userlandProxy := true + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + userlandProxy = val + } + } + + return &Daemon{ + id: id, + c: c, + folder: daemonFolder, + root: daemonRoot, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + userlandProxy: userlandProxy, + execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), + } +} + +// RootDir returns the root directory of the daemon. +func (d *Daemon) RootDir() string { + return d.root +} + +func (d *Daemon) getClientConfig() (*clientConfig, error) { + var ( + transport *http.Transport + scheme string + addr string + proto string + ) + if d.useDefaultTLSHost { + option := &tlsconfig.Options{ + CAFile: "fixtures/https/ca.pem", + CertFile: "fixtures/https/client-cert.pem", + KeyFile: "fixtures/https/client-key.pem", + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) + scheme = "https" + proto = "tcp" + } else if d.useDefaultHost { + addr = opts.DefaultUnixSocket + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } else { + addr = d.sockPath() + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } + + d.c.Assert(sockets.ConfigureTransport(transport, proto, addr), check.IsNil) + + return &clientConfig{ + transport: transport, + scheme: scheme, + addr: addr, + }, nil +} + +// Start will start the daemon and return once it is ready to receive requests. +// You can specify additional daemon flags. +func (d *Daemon) Start(args ...string) error { + logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) + + return d.StartWithLogFile(logFile, args...) +} + +// StartWithLogFile will start the daemon and attach its streams to a given file. +func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { + dockerdBinary, err := exec.LookPath(dockerdBinary) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) + + args := append(d.GlobalFlags, + "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", + "--graph", d.root, + "--exec-root", d.execRoot, + "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), + fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), + ) + if experimentalDaemon { + args = append(args, "--experimental", "--init") + } + if !(d.useDefaultHost || d.useDefaultTLSHost) { + args = append(args, []string{"--host", d.sock()}...) + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + args = append(args, []string{"--userns-remap", root}...) + } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundLog := false + foundSd := false + for _, a := range providedArgs { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundLog = true + } + if strings.Contains(a, "--storage-driver") { + foundSd = true + } + } + if !foundLog { + args = append(args, "--debug") + } + if d.storageDriver != "" && !foundSd { + args = append(args, "--storage-driver", d.storageDriver) + } + + args = append(args, providedArgs...) + d.cmd = exec.Command(dockerdBinary, args...) + d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") + d.cmd.Stdout = out + d.cmd.Stderr = out + d.logFile = out + + if err := d.cmd.Start(); err != nil { + return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.c.Logf("[%s] exiting daemon", d.id) + close(wait) + }() + + d.wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + startTime := time.Now().Unix() + for { + d.c.Logf("[%s] waiting for daemon to start", d.id) + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return fmt.Errorf("[%s] Daemon exited and never started", d.id) + } + select { + case <-time.After(2 * time.Second): + return fmt.Errorf("[%s] timeout: daemon does not respond", d.id) + case <-tick: + clientConfig, err := d.getClientConfig() + if err != nil { + return err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/_ping", nil) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id)) + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + resp, err := client.Do(req) + if err != nil { + continue + } + if resp.StatusCode != http.StatusOK { + d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) + } + d.c.Logf("[%s] daemon started", d.id) + d.root, err = d.queryRootDir() + if err != nil { + return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) + } + return nil + case <-d.wait: + return fmt.Errorf("[%s] Daemon exited during startup", d.id) + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(arg ...string) error { + if err := d.Start(arg...); err != nil { + return err + } + return d.LoadBusybox() +} + +// Kill will send a SIGKILL to the daemon +func (d *Daemon) Kill() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + if err := d.cmd.Process.Kill(); err != nil { + d.c.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + return err + } + + return nil +} + +// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its +// stack to its log file and exit +// This is used primarily for gathering debug information on test timeout +func (d *Daemon) DumpStackAndQuit() { + if d.cmd == nil || d.cmd.Process == nil { + return + } + signalDaemonDump(d.cmd.Process.Pid) +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) Stop() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } +out1: + for { + select { + case err := <-d.wait: + return err + case <-time.After(20 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.c.Logf("timeout: %v", d.id) + break out1 + } + } + +out2: + for { + select { + case err := <-d.wait: + return err + case <-tick: + i++ + if i > 5 { + d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + break out2 + } + d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.c.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and then starting it. +func (d *Daemon) Restart(arg ...string) error { + d.Stop() + // in the case of tests running a user namespace-enabled daemon, we have resolved + // d.root to be the actual final path of the graph dir after the "uid.gid" of + // remapped root is added--we need to subtract it from the path before calling + // start or else we will continue making subdirectories rather than truly restarting + // with the same location/root: + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + d.root = filepath.Dir(d.root) + } + return d.Start(arg...) +} + +// LoadBusybox will load the stored busybox into a newly started daemon +func (d *Daemon) LoadBusybox() error { + bb := filepath.Join(d.folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if out, err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { + imagesOut, _ := exec.Command(dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() + return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) + } + } + // loading busybox image to this daemon + if out, err := d.Cmd("load", "--input", bb); err != nil { + return fmt.Errorf("could not load busybox image: %s", out) + } + if err := os.Remove(bb); err != nil { + d.c.Logf("could not remove %s: %v", bb, err) + } + return nil +} + +func (d *Daemon) queryRootDir() (string, error) { + // update daemon root by asking /info endpoint (to support user + // namespaced daemon with root remapped uid.gid directory) + clientConfig, err := d.getClientConfig() + if err != nil { + return "", err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + + resp, err := client.Do(req) + if err != nil { + return "", err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + return resp.Body.Close() + }) + + type Info struct { + DockerRootDir string + } + var b []byte + var i Info + b, err = readBody(body) + if err == nil && resp.StatusCode == http.StatusOK { + // read the docker root dir + if err = json.Unmarshal(b, &i); err == nil { + return i.DockerRootDir, nil + } + } + return "", err +} + +func (d *Daemon) sock() string { + return fmt.Sprintf("unix://" + d.sockPath()) +} + +func (d *Daemon) sockPath() string { + return filepath.Join(daemonSockRoot, d.id+".sock") +} + +func (d *Daemon) waitRun(contID string) error { + args := []string{"--host", d.sock()} + return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...) +} + +func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { + infoCmdOutput, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "-H", d.sock(), "info"), + exec.Command("grep", "Base Device Size"), + ) + c.Assert(err, checker.IsNil) + basesizeSlice := strings.Split(infoCmdOutput, ":") + basesize := strings.Trim(basesizeSlice[1], " ") + basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + c.Assert(err, checker.IsNil) + basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) + return basesizeBytes +} + +// Cmd will execute a docker CLI command against this Daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(args ...string) (string, error) { + b, err := d.command(args...).CombinedOutput() + return string(b), err +} + +func (d *Daemon) command(args ...string) *exec.Cmd { + return exec.Command(dockerBinary, d.prependHostArg(args)...) +} + +func (d *Daemon) prependHostArg(args []string) []string { + for _, arg := range args { + if arg == "--host" || arg == "-H" { + return args + } + } + return append([]string{"--host", d.sock()}, args...) +} + +// SockRequest executes a socket request on a daemon and returns statuscode and output. +func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := readBody(body) + return res.StatusCode, b, err +} + +// SockRequestRaw executes a socket request on a daemon and returns an http +// response and a reader for the output data. +func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) +} + +// LogFileName returns the path the the daemon's log file +func (d *Daemon) LogFileName() string { + return d.logFile.Name() +} + +func (d *Daemon) getIDByName(name string) (string, error) { + return d.inspectFieldWithError(name, "Id") +} + +func (d *Daemon) activeContainers() (ids []string) { + out, _ := d.Cmd("ps", "-q") + for _, id := range strings.Split(out, "\n") { + if id = strings.TrimSpace(id); id != "" { + ids = append(ids, id) + } + } + return +} + +func (d *Daemon) inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + out, err := d.Cmd("inspect", "-f", format, name) + if err != nil { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { + return d.inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +func (d *Daemon) findContainerIP(id string) string { + out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) + if err != nil { + d.c.Log(err) + } + return strings.Trim(out, " \r\n'") +} + +func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { + buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) + return runCommandWithOutput(buildCmd) +} + +func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + if len(strings.TrimSpace(out)) == 0 { + return 0, nil + } + return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) +} + +func (d *Daemon) reloadConfig() error { + if d.cmd == nil || d.cmd.Process == nil { + return fmt.Errorf("daemon is not running") + } + + errCh := make(chan error) + started := make(chan struct{}) + go func() { + _, body, err := sockRequestRawToDaemon("GET", "/events", nil, "", d.sock()) + close(started) + if err != nil { + errCh <- err + } + defer body.Close() + dec := json.NewDecoder(body) + for { + var e events.Message + if err := dec.Decode(&e); err != nil { + errCh <- err + return + } + if e.Type != events.DaemonEventType { + continue + } + if e.Action != "reload" { + continue + } + close(errCh) // notify that we are done + return + } + }() + + <-started + if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { + return fmt.Errorf("error signaling daemon reload: %v", err) + } + select { + case err := <-errCh: + if err != nil { + return fmt.Errorf("error waiting for daemon reload event: %v", err) + } + case <-time.After(30 * time.Second): + return fmt.Errorf("timeout waiting for daemon reload event") + } + return nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go b/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go new file mode 100644 index 0000000..199bce0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go @@ -0,0 +1,419 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// SwarmDaemon is a test daemon with helpers for participating in a swarm. +type SwarmDaemon struct { + *Daemon + swarm.Info + port int + listenAddr string +} + +// Init initializes a new swarm cluster. +func (d *SwarmDaemon) Init(req swarm.InitRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.listenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/init", req) + if status != http.StatusOK { + return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("initializing swarm: %v", err) + } + info, err := d.info() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Join joins a daemon to an existing cluster. +func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.listenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/join", req) + if status != http.StatusOK { + return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("joining swarm: %v", err) + } + info, err := d.info() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Leave forces daemon to leave current cluster. +func (d *SwarmDaemon) Leave(force bool) error { + url := "/swarm/leave" + if force { + url += "?force=1" + } + status, out, err := d.SockRequest("POST", url, nil) + if status != http.StatusOK { + return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + err = fmt.Errorf("leaving swarm: %v", err) + } + return err +} + +func (d *SwarmDaemon) info() (swarm.Info, error) { + var info struct { + Swarm swarm.Info + } + status, dt, err := d.SockRequest("GET", "/info", nil) + if status != http.StatusOK { + return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status) + } + if err != nil { + return info.Swarm, fmt.Errorf("get swarm info: %v", err) + } + if err := json.Unmarshal(dt, &info); err != nil { + return info.Swarm, err + } + return info.Swarm, nil +} + +type serviceConstructor func(*swarm.Service) +type nodeConstructor func(*swarm.Node) +type specConstructor func(*swarm.Spec) + +func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string { + var service swarm.Service + for _, fn := range f { + fn(&service) + } + status, out, err := d.SockRequest("POST", "/services/create", service.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.ServiceCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { + var service swarm.Service + status, out, err := d.SockRequest("GET", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &service), checker.IsNil) + return &service +} + +func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filterArgs.Add("service", service) + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + return tasks +} + +func (d *SwarmDaemon) checkServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.getServiceTasks(c, service) + var count int + for _, task := range tasks { + if task.Status.State == state { + if message == "" || strings.Contains(task.Status.Message, message) { + count++ + } + } + } + return count, nil + } +} + +func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return d.checkServiceTasksInState(service, swarm.TaskStateRunning, "") +} + +func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + service := d.getService(c, service) + return service.UpdateStatus.State, nil + } +} + +func (d *SwarmDaemon) checkServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.getServiceTasks(c, service) + return len(tasks), nil + } +} + +func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + if task.Status.State == swarm.TaskStateRunning { + result[task.Spec.ContainerSpec.Image]++ + } + } + return result, nil +} + +func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { + nodes := d.listNodes(c) + var readyCount int + for _, node := range nodes { + if node.Status.State == swarm.NodeStateReady { + readyCount++ + } + } + return readyCount, nil +} + +func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { + var task swarm.Task + + status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &task), checker.IsNil) + return task +} + +func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) { + for _, fn := range f { + fn(service) + } + url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) removeService(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { + var node swarm.Node + status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &node), checker.IsNil) + c.Assert(node.ID, checker.Equals, id) + return &node +} + +func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { + url := "/nodes/" + id + if force { + url += "?force=1" + } + + status, out, err := d.SockRequest("DELETE", url, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { + for i := 0; ; i++ { + node := d.getNode(c, id) + for _, fn := range f { + fn(node) + } + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d.SockRequest("POST", url, node.Spec) + if i < 10 && strings.Contains(string(out), "update out of sequence") { + time.Sleep(100 * time.Millisecond) + continue + } + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + return + } +} + +func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { + status, out, err := d.SockRequest("GET", "/nodes", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + nodes := []swarm.Node{} + c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) + return nodes +} + +func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { + status, out, err := d.SockRequest("GET", "/services", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + services := []swarm.Service{} + c.Assert(json.Unmarshal(out, &services), checker.IsNil) + return services +} + +func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string { + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.SecretCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { + status, out, err := d.SockRequest("GET", "/secrets", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + secrets := []swarm.Secret{} + c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) + return secrets +} + +func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { + var secret swarm.Secret + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &secret), checker.IsNil) + return &secret +} + +func (d *SwarmDaemon) deleteSecret(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw +} + +func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { + sw := d.getSwarm(c) + for _, fn := range f { + fn(&sw.Spec) + } + url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index) + status, out, err := d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) rotateTokens(c *check.C) { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + + url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index) + status, out, err = d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw.JoinTokens +} + +func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.info() + c.Assert(err, checker.IsNil) + return info.LocalNodeState, nil +} + +func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + return info.ControlAvailable, nil +} + +func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterface) { + errList := check.Commentf("could not get node list") + status, out, err := d.SockRequest("GET", "/nodes", nil) + if err != nil { + return err, errList + } + if status != http.StatusOK { + return fmt.Errorf("expected http status OK, got: %d", status), errList + } + + var ls []swarm.Node + if err := json.Unmarshal(out, &ls); err != nil { + return err, errList + } + + for _, node := range ls { + if node.ManagerStatus != nil && node.ManagerStatus.Leader { + return nil, nil + } + } + return fmt.Errorf("no leader"), check.Commentf("could not find leader") +} + +func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) { + for i := 0; ; i++ { + out, err := d.Cmd(args...) + if err != nil { + if strings.Contains(out, "update out of sequence") { + if i < 10 { + continue + } + } + } + return out, err + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go b/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go new file mode 100644 index 0000000..0cea901 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go @@ -0,0 +1,20 @@ +package main + +import "github.com/go-check/check" + +func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + if d.NodeID == nodeID { + return d + } + } + c.Fatalf("could not find node with id: %s", nodeID) + return nil +} + +// nodeCmd executes a command on a given node via the normal docker socket +func (s *DockerSwarmSuite) nodeCmd(c *check.C, id string, args ...string) (string, error) { + return s.getDaemon(c, id).Cmd(args...) +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_unix.go b/vendor/github.com/docker/docker/integration-cli/daemon_unix.go new file mode 100644 index 0000000..6ca7daf --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "os" + "path/filepath" + "syscall" + + "github.com/go-check/check" +) + +func cleanupExecRoot(c *check.C, execRoot string) { + // Cleanup network namespaces in the exec root of this + // daemon because this exec root is specific to this + // daemon instance and has no chance of getting + // cleaned up when a new daemon is instantiated with a + // new exec root. + netnsPath := filepath.Join(execRoot, "netns") + filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { + if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil { + c.Logf("unmount of %s failed: %v", path, err) + } + os.Remove(path) + return nil + }) +} + +func signalDaemonDump(pid int) { + syscall.Kill(pid, syscall.SIGQUIT) +} + +func signalDaemonReload(pid int) error { + return syscall.Kill(pid, syscall.SIGHUP) +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_windows.go b/vendor/github.com/docker/docker/integration-cli/daemon_windows.go new file mode 100644 index 0000000..885b703 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_windows.go @@ -0,0 +1,53 @@ +package main + +import ( + "fmt" + "strconv" + "syscall" + "unsafe" + + "github.com/go-check/check" + "golang.org/x/sys/windows" +) + +func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p2 uint32 + if inheritHandle { + _p2 = 1 + } + r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +func signalDaemonDump(pid int) { + modkernel32 := windows.NewLazySystemDLL("kernel32.dll") + procOpenEvent := modkernel32.NewProc("OpenEventW") + procPulseEvent := modkernel32.NewProc("PulseEvent") + + ev := "Global\\docker-daemon-" + strconv.Itoa(pid) + h2, _ := openEvent(0x0002, false, ev, procOpenEvent) + if h2 == 0 { + return + } + pulseEvent(h2, procPulseEvent) +} + +func signalDaemonReload(pid int) error { + return fmt.Errorf("daemon reload not supported") +} + +func cleanupExecRoot(c *check.C, execRoot string) { +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go new file mode 100644 index 0000000..d43bf3a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go @@ -0,0 +1,210 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stdcopy" + "github.com/go-check/check" + "golang.org/x/net/websocket" +) + +func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") + + rwc, err := sockConn(time.Duration(10*time.Second), "") + c.Assert(err, checker.IsNil) + + cleanedContainerID := strings.TrimSpace(out) + config, err := websocket.NewConfig( + "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", + "http://localhost", + ) + c.Assert(err, checker.IsNil) + + ws, err := websocket.NewClient(config, rwc) + c.Assert(err, checker.IsNil) + defer ws.Close() + + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := io.ReadFull(ws, actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := ws.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to ws") + } + + select { + case err := <-outChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from ws") + } + + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("Websocket didn't return the expected data")) +} + +// regression gh14320 +func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { + req, client, err := newRequestClient("POST", "/containers/doesnotexist/attach", nil, "", "") + c.Assert(err, checker.IsNil) + + resp, err := client.Do(req) + // connection will shutdown, err should be "persistent connection closed" + c.Assert(err, checker.NotNil) // Server shutdown connection + + body, err := readBody(resp.Body) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) + expected := "No such container: doesnotexist\r\n" + c.Assert(string(body), checker.Equals, expected) +} + +func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { + status, body, err := sockRequest("GET", "/containers/doesnotexist/attach/ws", nil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(err, checker.IsNil) + expected := "No such container: doesnotexist" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +func (s *DockerSuite) TestPostContainersAttach(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) { + defer conn.Close() + expected := []byte("success") + _, err := conn.Write(expected) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + lenHeader := 0 + if !tty { + lenHeader = 8 + } + actual := make([]byte, len(expected)+lenHeader) + _, err = io.ReadFull(br, actual) + c.Assert(err, checker.IsNil) + if !tty { + fdMap := map[string]byte{ + "stdin": 0, + "stdout": 1, + "stderr": 2, + } + c.Assert(actual[0], checker.Equals, fdMap[stream]) + } + c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream)) + } + + expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) { + defer conn.Close() + _, err := conn.Write([]byte{'t'}) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + actual := make([]byte, 1) + _, err = io.ReadFull(br, actual) + opErr, ok := err.(*net.OpError) + c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err)) + c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream)) + } + + // Create a container that only emits stdout. + cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cid = strings.TrimSpace(cid) + // Attach to the container's stdout stream. + conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Check if the data from stdout can be received. + expectSuccess(conn, br, "stdout", false) + // Attach to the container's stderr stream. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Since the container only emits stdout, attaching to stderr should return nothing. + expectTimeout(conn, br, "stdout") + + // Test the similar functions of the stderr stream. + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stderr", false) + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectTimeout(conn, br, "stderr") + + // Test with tty. + cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + // Attach to stdout only. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stdout", true) + + // Attach without stdout stream. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Nothing should be received because both the stdout and stderr of the container will be + // sent to the client as stdout when tty is enabled. + expectTimeout(conn, br, "stdout") + + // Test the client API + // Make sure we don't see "hello" if Logs is false + client, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "echo hello; cat") + cid = strings.TrimSpace(cid) + + attachOpts := types.ContainerAttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + } + + resp, err := client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + expectSuccess(resp.Conn, resp.Reader, "stdout", false) + + // Make sure we do see "hello" if Logs is true + attachOpts.Logs = true + resp, err = client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + + defer resp.Conn.Close() + resp.Conn.SetReadDeadline(time.Now().Add(time.Second)) + + _, err = resp.Conn.Write([]byte("success")) + c.Assert(err, checker.IsNil) + + actualStdout := new(bytes.Buffer) + actualStderr := new(bytes.Buffer) + stdcopy.StdCopy(actualStdout, actualStderr, resp.Reader) + c.Assert(actualStdout.Bytes(), checker.DeepEquals, []byte("hello\nsuccess"), check.Commentf("Attach didn't return the expected data from stdout")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go new file mode 100644 index 0000000..bfcae31 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Test case for #22244 +func (s *DockerSuite) TestAuthAPI(c *check.C) { + testRequires(c, Network) + config := types.AuthConfig{ + Username: "no-user", + Password: "no-password", + } + + expected := "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" + status, body, err := sockRequest("POST", "/auth", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusUnauthorized) + msg := getErrorMessage(c, body) + c.Assert(msg, checker.Contains, expected, check.Commentf("Expected: %v, got: %v", expected, msg)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go new file mode 100644 index 0000000..9b069a4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go @@ -0,0 +1,254 @@ +package main + +import ( + "archive/tar" + "bytes" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { + testRequires(c, NotUserNamespace) + var testD string + if daemonPlatform == "windows" { + testD = `FROM busybox +COPY * /tmp/ +RUN find / -name ba* +RUN find /tmp/` + } else { + // -xdev is required because sysfs can cause EPERM + testD = `FROM busybox +COPY * /tmp/ +RUN find / -xdev -name ba* +RUN find /tmp/` + } + server, err := fakeStorage(map[string]string{"testD": testD}) + c.Assert(err, checker.IsNil) + defer server.Close() + + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + // Make sure Dockerfile exists. + // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL + out := string(buf) + c.Assert(out, checker.Contains, "/tmp/Dockerfile") + c.Assert(out, checker.Not(checker.Contains), "baz") +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte("FROM busybox") + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, checker.IsNil) + + defer server.Close() + + res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + b.Close() +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContextWithCustomDockerfile(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox +RUN echo 'wrong'`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + custom := []byte(`FROM busybox +RUN echo 'right' +`) + err = tw.WriteHeader(&tar.Header{ + Name: "custom", + Size: int64(len(custom)), + }) + + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(custom) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, checker.IsNil) + + defer server.Close() + url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" + res, body, err := sockRequestRaw("POST", url, nil, "application/tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + defer body.Close() + content, err := readBody(body) + c.Assert(err, checker.IsNil) + + // Build used the wrong dockerfile. + c.Assert(string(content), checker.Not(checker.Contains), "wrong") +} + +func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from dockerfile") +} + +func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "baz": `FROM busybox +RUN echo from baz`, + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from baz") +} + +func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) { + testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from Dockerfile") +} + +func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { + // Make sure that build context tars with entries of the form + // x/./y don't cause caching false positives. + + buildFromTarContext := func(fileContents []byte) string { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + COPY dir /dir/`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write Dockerfile in tar file content + c.Assert(err, checker.IsNil) + + err = tw.WriteHeader(&tar.Header{ + Name: "dir/./file", + Size: int64(len(fileContents)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(fileContents) + // failed to write file contents in tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := readBody(body) + c.Assert(err, checker.IsNil) + lines := strings.Split(string(out), "\n") + c.Assert(len(lines), checker.GreaterThan, 1) + c.Assert(lines[len(lines)-2], checker.Matches, ".*Successfully built [0-9a-f]{12}.*") + + re := regexp.MustCompile("Successfully built ([0-9a-f]{12})") + matches := re.FindStringSubmatch(lines[len(lines)-2]) + return matches[1] + } + + imageA := buildFromTarContext([]byte("abc")) + imageB := buildFromTarContext([]byte("def")) + + c.Assert(imageA, checker.Not(checker.Equals), imageB) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go new file mode 100644 index 0000000..d046ec0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go @@ -0,0 +1,1961 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) { + startCount, err := getContainerCount() + c.Assert(err, checker.IsNil, check.Commentf("Cannot query container count")) + + name := "getall" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var inspectJSON []struct { + Names []string + } + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) + + c.Assert(inspectJSON, checker.HasLen, startCount+1) + + actual := inspectJSON[0].Names[0] + c.Assert(actual, checker.Equals, "/"+name) +} + +// regression test for empty json field being omitted #13691 +func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { + dockerCmd(c, "run", "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + // empty Labels field triggered this bug, make sense to check for everything + // cause even Ports for instance can trigger this bug + // better safe than sorry.. + fields := []string{ + "Id", + "Names", + "Image", + "Command", + "Created", + "Ports", + "Labels", + "Status", + "NetworkSettings", + } + + // decoding into types.Container do not work since it eventually unmarshal + // and empty field to an empty go map, so we just check for a string + for _, f := range fields { + if !strings.Contains(string(body), f) { + c.Fatalf("Field %s is missing and it shouldn't", f) + } + } +} + +type containerPs struct { + Names []string + Ports []map[string]interface{} +} + +// regression test for non-empty fields from #13901 +func (s *DockerSuite) TestContainerAPIPsOmitFields(c *check.C) { + // Problematic for Windows porting due to networking not yet being passed back + testRequires(c, DaemonIsLinux) + name := "pstest" + port := 80 + runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port)) + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var resp []containerPs + err = json.Unmarshal(body, &resp) + c.Assert(err, checker.IsNil) + + var foundContainer *containerPs + for _, container := range resp { + for _, testName := range container.Names { + if "/"+name == testName { + foundContainer = &container + break + } + } + } + + c.Assert(foundContainer.Ports, checker.HasLen, 1) + c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) + _, ok := foundContainer.Ports[0]["PublicPort"] + c.Assert(ok, checker.Not(checker.Equals), true) + _, ok = foundContainer.Ports[0]["IP"] + c.Assert(ok, checker.Not(checker.Equals), true) +} + +func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) { + // Not supported on Windows as Windows does not support docker export + testRequires(c, DaemonIsLinux) + name := "exportcontainer" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") + + status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil && err == io.EOF { + break + } + if h.Name == "test" { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image")) +} + +func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) { + // Not supported on Windows as Windows does not support docker diff (/containers/name/changes) + testRequires(c, DaemonIsLinux) + name := "changescontainer" + dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") + + status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + changes := []struct { + Kind int + Path string + }{} + c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff")) +} + +func (s *DockerSuite) TestGetContainerStats(c *check.C) { + var ( + name = "statscontainer" + ) + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + var s *types.Stats + // decode only one object from the stream + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + + buf := &integration.ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + _, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json") + c.Assert(err, checker.IsNil) + defer body.Close() + + chErr := make(chan error, 1) + go func() { + _, err = io.Copy(buf, body) + chErr <- err + }() + + b := make([]byte, 32) + // make sure we've got some stats + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + // Now remove without `-f` and make sure we are still pulling stats + _, _, err = dockerCmdWithError("rm", id) + c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't")) + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", id) + c.Assert(<-chErr, checker.IsNil) +} + +// regression test for gh13421 +// previous test was just checking one stat entry so it didn't fail (stats with +// stream false always return one stat) +func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of "read" of types.Stats + if l := strings.Count(s, "read"); l < 2 { + c.Fatalf("Expected more than one stat streamed, got %d", l) + } + } +} + +func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of `"read"` of types.Stats + c.Assert(strings.Count(s, `"read"`), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, `"read"`))) + } +} + +func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { + name := "statscontainer" + dockerCmd(c, "create", "--name", name, "busybox", "ps") + + type stats struct { + status int + err error + } + chResp := make(chan stats) + + // We expect an immediate response, but if it's not immediate, the test would hang, so put it in a goroutine + // below we'll check this on a timeout. + go func() { + resp, body, err := sockRequestRaw("GET", "/containers/"+name+"/stats", nil, "") + body.Close() + chResp <- stats{resp.StatusCode, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.status, checker.Equals, http.StatusOK) + case <-time.After(10 * time.Second): + c.Fatal("timeout waiting for stats response for stopped container") + } +} + +func (s *DockerSuite) TestContainerAPIPause(c *check.C) { + // Problematic on Windows as Windows does not support pause + testRequires(c, DaemonIsLinux) + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") + ContainerID := strings.TrimSpace(out) + + status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) + + if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pausedContainers, err = getSliceOfPausedContainers() + c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) + c.Assert(pausedContainers, checker.IsNil, check.Commentf("There should be no paused container.")) +} + +func (s *DockerSuite) TestContainerAPITop(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { + c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) + } + c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)) + c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top") + c.Assert(top.Processes[1][10], checker.Equals, "top") +} + +func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := runSleepingContainer(c, "-d") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 4, check.Commentf("expected 4 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "Name" || top.Titles[3] != "Private Working Set" { + c.Fatalf("expected `Name` at `Titles[0]` and `Private Working Set` at Titles[3]: %v", top.Titles) + } + c.Assert(len(top.Processes), checker.GreaterOrEqualThan, 2, check.Commentf("expected at least 2 processes, found %d: %v", len(top.Processes), top.Processes)) + + foundProcess := false + expectedProcess := "busybox.exe" + for _, process := range top.Processes { + if process[0] == expectedProcess { + foundProcess = true + break + } + } + + c.Assert(foundProcess, checker.Equals, true, check.Commentf("expected to find %s: %v", expectedProcess, top.Processes)) +} + +func (s *DockerSuite) TestContainerAPICommit(c *check.C) { + cName := "testapicommit" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + name := "testcontainerapicommit" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) { + cName := "testapicommitwithconfig" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + config := map[string]interface{}{ + "Labels": map[string]string{"key1": "value1", "key2": "value2"}, + } + + name := "testcontainerapicommitwithconfig" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1") + c.Assert(label1, checker.Equals, "value1") + + label2 := inspectFieldMap(c, img.ID, "Config.Labels", "key2") + c.Assert(label2, checker.Equals, "value2") + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) { + // TODO Windows to Windows CI - Port this test + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "echo test"}, + "PortBindings": map[string]interface{}{ + "8080/tcp": []map[string]interface{}{ + { + "HostIP": "", + "HostPort": "aa80", + }, + }, + }, + } + + jsonData := bytes.NewBuffer(nil) + json.NewEncoder(jsonData).Encode(config) + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Equals, `invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body)) +} + +func (s *DockerSuite) TestContainerAPICreate(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + } + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + + out, _ := dockerCmd(c, "start", "-a", container.ID) + c.Assert(strings.TrimSpace(out), checker.Equals, "/test") +} + +func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) { + config := map[string]interface{}{} + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + expected := "Config cannot be empty in order to create a container" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) { + // Container creation must fail if client specified configurations for more than one network + config := map[string]interface{}{ + "Image": "busybox", + "NetworkingConfig": networktypes.NetworkingConfig{ + EndpointsConfig: map[string]*networktypes.EndpointSettings{ + "net1": {}, + "net2": {}, + "net3": {}, + }, + }, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + msg := getErrorMessage(c, body) + // network name order in error message is not deterministic + c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints") + c.Assert(msg, checker.Contains, "net1") + c.Assert(msg, checker.Contains, "net2") + c.Assert(msg, checker.Contains, "net3") +} + +func (s *DockerSuite) TestContainerAPICreateWithHostName(c *check.C) { + hostName := "test-host" + config := map[string]interface{}{ + "Image": "busybox", + "Hostname": hostName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) +} + +func (s *DockerSuite) TestContainerAPICreateWithDomainName(c *check.C) { + domainName := "test-domain" + config := map[string]interface{}{ + "Image": "busybox", + "Domainname": domainName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) +} + +func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *check.C) { + // Windows does not support bridge + testRequires(c, DaemonIsLinux) + UtilCreateNetworkMode(c, "bridge") +} + +func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *check.C) { + // Windows does not support these network modes + testRequires(c, DaemonIsLinux, NotUserNamespace) + UtilCreateNetworkMode(c, "host") + UtilCreateNetworkMode(c, "container:web1") +} + +func UtilCreateNetworkMode(c *check.C, networkMode string) { + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) +} + +func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) { + // TODO Windows to Windows CI. The CpuShares part could be ported. + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "CpuShares": 512, + "CpusetCpus": "0", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + + out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares") + c.Assert(out, checker.Equals, "512") + + outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus") + c.Assert(outCpuset, checker.Equals, "0") +} + +func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + } + + create := func(ct string) (*http.Response, io.ReadCloser, error) { + jsonData := bytes.NewBuffer(nil) + c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) + return sockRequestRaw("POST", "/containers/create", jsonData, ct) + } + + // Try with no content-type + res, body, err := create("") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // Try with wrong content-type + res, body, err = create("application/xml") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // now application/json + res, body, err = create("application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + body.Close() +} + +//Issue 14230. daemon should return 500 for invalid port syntax +func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "NetworkMode": "default", + "PortBindings": { + "19039;1230": [ + {} + ] + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid port") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "something", + "MaximumRetryCount": 0 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "always", + "MaximumRetryCount": 2 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": -2 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": 0 + } + } + }` + + res, _, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { + config := `{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"busybox", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + out := inspectField(c, container.ID, "HostConfig.CpusetCpus") + c.Assert(out, checker.Equals, "") + + outMemory := inspectField(c, container.ID, "HostConfig.Memory") + c.Assert(outMemory, checker.Equals, "0") + outMemorySwap := inspectField(c, container.ID, "HostConfig.MemorySwap") + c.Assert(outMemorySwap, checker.Equals, "0") +} + +func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + config := `{ + "Image": "busybox", + "Cmd": "ls", + "OpenStdin": true, + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + b, err2 := readBody(body) + c.Assert(err2, checker.IsNil) + + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +func (s *DockerSuite) TestContainerAPIRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "TestContainerAPIRename", "-d", "busybox", "sh") + + containerID := strings.TrimSpace(out) + newName := "TestContainerAPIRenameNew" + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + + name := inspectField(c, containerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) +} + +func (s *DockerSuite) TestContainerAPIKill(c *check.C) { + name := "test-api-kill" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + state := inspectField(c, name, "State.Running") + c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) +} + +func (s *DockerSuite) TestContainerAPIRestart(c *check.C) { + name := "test-api-restart" + runSleepingContainer(c, "-di", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) { + name := "test-api-restart-no-timeout-param" + out, _ := runSleepingContainer(c, "-di", "--name", name) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIStart(c *check.C) { + name := "testing-start" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + + // TODO(tibor): figure out why this doesn't work on windows + if isLocalDaemon { + c.Assert(status, checker.Equals, http.StatusNotModified) + } +} + +func (s *DockerSuite) TestContainerAPIStop(c *check.C) { + name := "test-api-stop" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotModified) +} + +func (s *DockerSuite) TestContainerAPIWait(c *check.C) { + name := "test-api-wait" + + sleepCmd := "/bin/sleep" + if daemonPlatform == "windows" { + sleepCmd = "sleep" + } + dockerCmd(c, "run", "--name", name, "busybox", sleepCmd, "2") + + status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + var waitres containertypes.ContainerWaitOKBody + c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) + c.Assert(waitres.StatusCode, checker.Equals, int64(0)) +} + +func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) { + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, _, err := sockRequest("POST", "/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if h.Name == "test.txt" { + found = true + break + } + } + c.Assert(found, checker.True) +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-empty" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Path cannot be empty\n") +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-not-found" + dockerCmd(c, "run", "--name", name, "busybox") + + postData := types.CopyConfig{ + Resource: "/notexist", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") +} + +func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + postData := types.CopyConfig{ + Resource: "/something", + } + + status, _, err := sockRequest("POST", "/v1.23/containers/notexists/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPIDelete(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + dockerCmd(c, "stop", id) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) { + status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(getErrorMessage(c, body), checker.Matches, "No such container: doesnotexist") +} + +func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") + + id2 := strings.TrimSpace(out) + c.Assert(waitRun(id2), checker.IsNil) + + links := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) + + status, b, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) + + linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) +} + +func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + vol := "/testvolume" + if daemonPlatform == "windows" { + vol = `c:\testvolume` + } + + out, _ := runSleepingContainer(c, "-v", vol) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + source, err := inspectMountSourceField(id, vol) + _, err = os.Stat(source) + c.Assert(err, checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + _, err = os.Stat(source) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) +} + +// Regression test for https://github.com/docker/docker/issues/6231 +func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) { + conn, err := sockConn(time.Duration(10*time.Second), "") + c.Assert(err, checker.IsNil) + client := httputil.NewClientConn(conn, nil) + defer client.Close() + + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + b, err := json.Marshal(config) + c.Assert(err, checker.IsNil) + + req, err := http.NewRequest("POST", "/containers/create", bytes.NewBuffer(b)) + c.Assert(err, checker.IsNil) + req.Header.Set("Content-Type", "application/json") + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + + resp, err := client.Do(req) + c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding")) + resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) +} + +func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) { + out, _ := runSleepingContainer(c) + + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) +} + +// #14170 +func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd []string + }{"busybox", "echo", []string{"hello", "world"}} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Entrypoint []string + Cmd []string + }{"busybox", []string{"echo"}, []string{"hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// #14170 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd string + }{"busybox", "echo", "hello world"} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Cmd []string + }{"busybox", []string{"echo", "hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// regression #14318 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { + // Windows doesn't support CapAdd/CapDrop + testRequires(c, DaemonIsLinux) + config := struct { + Image string + CapAdd string + CapDrop string + }{"busybox", "NET_ADMIN", "SYS_ADMIN"} + status, _, err := sockRequest("POST", "/containers/create?name=capaddtest0", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config2 := struct { + Image string + CapAdd []string + CapDrop []string + }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} + status, _, err = sockRequest("POST", "/containers/create?name=capaddtest1", config2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// #14915 +func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only support 1.25 or later + config := struct { + Image string + }{"busybox"} + status, _, err := sockRequest("POST", "/v1.18/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// Ensure an error occurs when you have a container read-only rootfs but you +// extract an archive to a symlink in a writable volume which points to a +// directory outside of the volume. +func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { + // Windows does not support read-only rootfs + // Requires local volume mount bind. + // --read-only + userns has remount issues + testRequires(c, SameHostDaemon, NotUserNamespace, DaemonIsLinux) + + testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + cID := makeTestContainer(c, testContainerOptions{ + readOnly: true, + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + defer deleteContainer(cID) + + // Attempt to extract to a symlink in the volume which points to a + // directory outside the volume. This should cause an error because the + // rootfs is read-only. + query := make(url.Values, 1) + query.Set("path", "/vol2/symlinkToAbsDir") + urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) + + statusCode, body, err := sockRequest("PUT", urlPath, nil) + c.Assert(err, checker.IsNil) + + if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { + c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) + } +} + +func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) { + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(string(body), checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { + // Not supported on Windows + testRequires(c, DaemonIsLinux) + + c1 := struct { + Image string + CpusetCpus string + }{"busybox", "1-42,,"} + name := "wrong-cpuset-cpus" + status, body, err := sockRequest("POST", "/containers/create?name="+name, c1) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected := "Invalid value 1-42,, for cpuset cpus" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + c2 := struct { + Image string + CpusetMems string + }{"busybox", "42-3,1--"} + name = "wrong-cpuset-mems" + status, body, err = sockRequest("POST", "/containers/create?name="+name, c2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected = "Invalid value 42-3,1-- for cpuset mems" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"ShmSize": -1}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Contains, "SHM size can not be less than 0") +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + var defaultSHMSize int64 = 67108864 + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{}, + "Cmd": "mount", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { + // Swappiness is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) +} + +// check validation is done daemon side and not only in cli +func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) { + // OomScoreAdj is not supported on Windows + testRequires(c, DaemonIsLinux) + + config := struct { + Image string + OomScoreAdj int + }{"busybox", 1001} + name := "oomscoreadj-over" + status, b, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]" + msg := getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } + + config = struct { + Image string + OomScoreAdj int + }{"busybox", -1001} + name = "oomscoreadj-low" + status, b, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]" + msg = getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } +} + +// test case for #22210 where an empty container name caused panic. +func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) { + status, out, err := sockRequest("DELETE", "/containers/", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(out), checker.Contains, "No container name or ID supplied") +} + +func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { + // Problematic on Windows as Windows does not support stats + testRequires(c, DaemonIsLinux) + + name := "testing-network-disabled" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "NetworkDisabled": true, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + c.Assert(waitRun(name), check.IsNil) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + // decode only one object from the stream + var s *types.Stats + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { + type m mounttypes.Mount + type hc struct{ Mounts []m } + type cfg struct { + Image string + HostConfig hc + } + type testCase struct { + config cfg + status int + msg string + } + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + notExistPath := prefix + slash + "notexist" + + cases := []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "notreal", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "mount type unknown", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: notExistPath, + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "bind source path does not exist", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello2", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local"}}}}}}, + status: http.StatusCreated, + msg: "", + }, + } + + if SameHostDaemon.Condition() { + tmpDir, err := ioutils.TempDir("", "test-mounts-api") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{}}}}}, + status: http.StatusBadRequest, + msg: "VolumeOptions must not be specified", + }, + }...) + } + + if DaemonIsLinux.Condition() { + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello3", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local", + Options: map[string]string{"o": "size=1"}}}}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath, + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 4096 * 1024, + Mode: 0700, + }}}}}, + status: http.StatusCreated, + msg: "", + }, + + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Source: "/shouldnotbespecified", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be specified", + }, + }...) + + } + + for i, x := range cases { + c.Logf("case %d", i) + status, b, err := sockRequest("POST", "/containers/create", x.config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, x.status, check.Commentf("%s\n%v", string(b), cases[i].config)) + if len(x.msg) > 0 { + c.Assert(string(b), checker.Contains, x.msg, check.Commentf("%v", cases[i].config)) + } + } +} + +func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) { + testRequires(c, NotUserNamespace, SameHostDaemon) + // also with data in the host side + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + tmpDir, err := ioutil.TempDir("", "test-mounts-api-bind") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("hello"), 666) + c.Assert(err, checker.IsNil) + + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "cat /foo/bar"}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{{"Type": "bind", "Source": tmpDir, "Target": destPath}}}, + } + status, resp, err := sockRequest("POST", "/containers/create?name=test", data) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + + out, _ := dockerCmd(c, "start", "-a", "test") + c.Assert(out, checker.Equals, "hello") +} + +// Test Mounts comes out as expected for the MountPoint +func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + + var ( + err error + testImg string + ) + if daemonPlatform != "windows" { + testImg, err = buildImage("test-mount-config", ` + FROM busybox + RUN mkdir `+destPath+` && touch `+destPath+slash+`bar + CMD cat `+destPath+slash+`bar + `, true) + } else { + testImg = "busybox" + } + c.Assert(err, checker.IsNil) + + type testCase struct { + cfg mounttypes.Mount + expected types.MountPoint + } + + cases := []testCase{ + // use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest + // Validation of the actual `Mount` struct is done in another test is not needed here + {mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}}, + } + + if SameHostDaemon.Condition() { + // setup temp dir for testing binds + tmpDir1, err := ioutil.TempDir("", "test-mounts-api-1") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir1) + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}}, + }...) + + // for modes only supported on Linux + if DaemonIsLinux.Condition() { + tmpDir3, err := ioutils.TempDir("", "test-mounts-api-3") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir3) + + c.Assert(mount.Mount(tmpDir3, tmpDir3, "none", "bind,rw"), checker.IsNil) + c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil) + + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}}, + }...) + } + } + + if daemonPlatform != "windows" { // Windows does not support volume populate + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}}, + }...) + } + + type wrapper struct { + containertypes.Config + HostConfig containertypes.HostConfig + } + type createResp struct { + ID string `json:"Id"` + } + for i, x := range cases { + c.Logf("case %d - config: %v", i, x.cfg) + status, data, err := sockRequest("POST", "/containers/create", wrapper{containertypes.Config{Image: testImg}, containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}}) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(data))) + + var resp createResp + err = json.Unmarshal(data, &resp) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + id := resp.ID + + var mps []types.MountPoint + err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps) + c.Assert(err, checker.IsNil) + c.Assert(mps, checker.HasLen, 1) + c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination) + + if len(x.expected.Source) > 0 { + c.Assert(mps[0].Source, checker.Equals, x.expected.Source) + } + if len(x.expected.Name) > 0 { + c.Assert(mps[0].Name, checker.Equals, x.expected.Name) + } + if len(x.expected.Driver) > 0 { + c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver) + } + c.Assert(mps[0].RW, checker.Equals, x.expected.RW) + c.Assert(mps[0].Type, checker.Equals, x.expected.Type) + c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode) + if len(x.expected.Propagation) > 0 { + c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation) + } + + out, _, err := dockerCmdWithError("start", "-a", id) + if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && daemonPlatform != "windows" { + c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0])) + } else { + c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0])) + } + + dockerCmd(c, "rm", "-fv", id) + if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 { + // This should still exist even though we removed the container + dockerCmd(c, "volume", "inspect", mps[0].Name) + } else { + // This should be removed automatically when we removed the container + out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + } + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { + testRequires(c, DaemonIsLinux) + type testCase struct { + cfg map[string]interface{} + expectedOptions []string + } + target := "/foo" + cases := []testCase{ + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + }, + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target, + "TmpfsOptions": map[string]interface{}{ + "SizeBytes": 4096 * 1024, "Mode": 0700}}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k", "mode=700"}, + }, + } + + for i, x := range cases { + cName := fmt.Sprintf("test-tmpfs-%d", i) + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", + fmt.Sprintf("mount | grep 'tmpfs on %s'", target)}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{x.cfg}}, + } + status, resp, err := sockRequest("POST", "/containers/create?name="+cName, data) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + out, _ := dockerCmd(c, "start", "-a", cName) + for _, option := range x.expectedOptions { + c.Assert(out, checker.Contains, option) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go new file mode 100644 index 0000000..41011c3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPICreateWithNotExistImage(c *check.C) { + name := "test" + config := map[string]interface{}{ + "Image": "test456:v1", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected := "No such image: test456:v1" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + config2 := map[string]interface{}{ + "Image": "test456", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err = sockRequest("POST", "/containers/create?name="+name, config2) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: test456:latest" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + config3 := map[string]interface{}{ + "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + } + + status, body, err = sockRequest("POST", "/containers/create?name="+name, config3) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + +} + +// Test for #25099 +func (s *DockerSuite) TestAPICreateEmptyEnv(c *check.C) { + name := "test1" + config := map[string]interface{}{ + "Image": "busybox", + "Env": []string{"", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + + status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected := "invalid environment variable:" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=foo", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =foo" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go new file mode 100644 index 0000000..3891c87 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsAPIEmptyOutput(c *check.C) { + type apiResp struct { + resp *http.Response + err error + } + chResp := make(chan *apiResp) + go func() { + resp, body, err := sockRequestRaw("GET", "/events", nil, "") + body.Close() + chResp <- &apiResp{resp, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for events api to respond, should have responded immediately") + } +} + +func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { + since := daemonTime(c).Unix() + ts := strconv.FormatInt(since, 10) + + out, _ := runSleepingContainer(c, "--name=foo", "-d") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + q := url.Values{} + q.Set("since", ts) + + _, body, err := sockRequestRaw("GET", "/events?"+q.Encode(), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + + dec := json.NewDecoder(body) + var containerCreateEvent *jsonmessage.JSONMessage + for { + var event jsonmessage.JSONMessage + if err := dec.Decode(&event); err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if event.Status == "create" && event.ID == containerID { + containerCreateEvent = &event + break + } + } + + c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) + c.Assert(containerCreateEvent.Status, checker.Equals, "create") + c.Assert(containerCreateEvent.ID, checker.Equals, containerID) + c.Assert(containerCreateEvent.From, checker.Equals, "busybox") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go new file mode 100644 index 0000000..cf4dded --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) +} + +// Part of #14845 +func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { + name := "exec_resize_test" + dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh") + + testExecResize := func() error { + data := map[string]interface{}{ + "AttachStdin": true, + "Cmd": []string{"/bin/sh"}, + } + uri := fmt.Sprintf("/containers/%s/exec", name) + status, body, err := sockRequest("POST", uri, data) + if err != nil { + return err + } + if status != http.StatusCreated { + return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) + } + + out := map[string]string{} + err = json.Unmarshal(body, &out) + if err != nil { + return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) + } + + execID := out["Id"] + if len(execID) < 1 { + return fmt.Errorf("ExecCreate got invalid execID") + } + + payload := bytes.NewBufferString(`{"Tty":true}`) + conn, _, err := sockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json") + if err != nil { + return fmt.Errorf("Failed to start the exec: %q", err.Error()) + } + defer conn.Close() + + _, rc, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), nil, "text/plain") + // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. + if err == io.ErrUnexpectedEOF { + return fmt.Errorf("The daemon might have crashed.") + } + + if err == nil { + rc.Close() + } + + // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. + return nil + } + + // The panic happens when daemon.ContainerExecStart is called but the + // container.Exec is not called. + // Because the panic is not 100% reproducible, we send the requests concurrently + // to increase the probability that the problem is triggered. + var ( + n = 10 + ch = make(chan error, n) + wg sync.WaitGroup + ) + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := testExecResize(); err != nil { + ch <- err + } + }() + } + + wg.Wait() + select { + case err := <-ch: + c.Fatal(err.Error()) + default: + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go new file mode 100644 index 0000000..716e9ac --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go @@ -0,0 +1,198 @@ +// +build !test_no_exec + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Regression test for #9414 +func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + comment := check.Commentf("Expected message when creating exec command with no Cmd specified") + c.Assert(getErrorMessage(c, body), checker.Contains, "No exec command specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { + c.Fatalf("Can not encode data to json %s", err) + } + + res, body, err := sockRequestRaw("POST", fmt.Sprintf("/containers/%s/exec", name), jsonData, "text/plain") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + + comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") + c.Assert(getErrorMessage(c, b), checker.Contains, "Content-Type specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) { + // Not relevant on Windows as Windows containers cannot be paused + testRequires(c, DaemonIsLinux) + name := "exec_create_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + dockerCmd(c, "pause", name) + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + comment := check.Commentf("Expected message when creating exec command with Container %s is paused", name) + c.Assert(getErrorMessage(c, body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) +} + +func (s *DockerSuite) TestExecAPIStart(c *check.C) { + testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvagable to Windows to Windows CI + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + startExec(c, id, http.StatusOK) + + var execJSON struct{ PID int } + inspectExec(c, id, &execJSON) + c.Assert(execJSON.PID, checker.GreaterThan, 1) + + id = createExec(c, "test") + dockerCmd(c, "stop", "test") + + startExec(c, id, http.StatusNotFound) + + dockerCmd(c, "start", "test") + startExec(c, id, http.StatusNotFound) + + // make sure exec is created before pausing + id = createExec(c, "test") + dockerCmd(c, "pause", "test") + startExec(c, id, http.StatusConflict) + dockerCmd(c, "unpause", "test") + startExec(c, id, http.StatusOK) +} + +func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + resp, _, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Server"), checker.Not(checker.Equals), "") +} + +func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "-d", "--name", "test") + id := createExec(c, "test") + + resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") + c.Assert(err, checker.IsNil) + + b, err := readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) +} + +// #19362 +func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { + runSleepingContainer(c, "-d", "--name", "test") + execID := createExec(c, "test") + startExec(c, execID, http.StatusOK) + + timeout := time.After(60 * time.Second) + var execJSON struct{ Running bool } + for { + select { + case <-timeout: + c.Fatal("timeout waiting for exec to start") + default: + } + + inspectExec(c, execID, &execJSON) + if !execJSON.Running { + break + } + } + + startExec(c, execID, http.StatusConflict) +} + +// #20638 +func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { + name := "foo" + runSleepingContainer(c, "-d", "-t", "--name", name) + data := map[string]interface{}{ + "cmd": []string{"true"}, + "AttachStdin": true, + } + _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + + _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + + b, err = readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + + resp, _, err := sockRequestRaw("GET", "/_ping", nil, "") + c.Assert(err, checker.IsNil) + if resp.StatusCode != http.StatusOK { + c.Fatal("daemon is down, it should alive") + } +} + +func createExec(c *check.C, name string) string { + _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + return createResp.ID +} + +func startExec(c *check.C, id string, code int) { + resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + + b, err := readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, code, comment) +} + +func inspectExec(c *check.C, id string, out interface{}) { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/exec/%s/json", id), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go new file mode 100644 index 0000000..b7617ea --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go @@ -0,0 +1,165 @@ +package main + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIImagesFilter(c *check.C) { + name := "utest:tag1" + name2 := "utest/docker:tag2" + name3 := "utest:5000/docker:tag3" + for _, n := range []string{name, name2, name3} { + dockerCmd(c, "tag", "busybox", n) + } + type image types.ImageSummary + getImages := func(filter string) []image { + v := url.Values{} + v.Set("filter", filter) + status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var images []image + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + + return images + } + + //incorrect number of matches returned + images := getImages("utest*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 2) + + images = getImages("utest") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("utest*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("*5000*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) +} + +func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) { + // TODO Windows to Windows CI: Investigate further why this test fails. + testRequires(c, Network) + testRequires(c, DaemonIsLinux) + out, err := buildImage("saveandload", "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + dockerCmd(c, "rmi", id) + + res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") + c.Assert(err, checker.IsNil) + defer loadBody.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + inspectOut := inspectField(c, id, "Id") + c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) +} + +func (s *DockerSuite) TestAPIImagesDelete(c *check.C) { + if daemonPlatform != "windows" { + testRequires(c, Network) + } + name := "test-api-images-delete" + out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + dockerCmd(c, "tag", name, "test:tag1") + + status, _, err := sockRequest("DELETE", "/images/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + status, _, err = sockRequest("DELETE", "/images/test:noexist", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image + + status, _, err = sockRequest("DELETE", "/images/test:tag1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { + if daemonPlatform != "windows" { + testRequires(c, Network) + } + name := "test-api-images-history" + out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + + id := strings.TrimSpace(out) + + status, body, err := sockRequest("GET", "/images/"+id+"/history", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var historydata []types.ImageHistory + err = json.Unmarshal(body, &historydata) + c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) + + c.Assert(historydata, checker.Not(checker.HasLen), 0) + c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") +} + +// #14846 +func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) { + testRequires(c, Network) + + res, b, err := sockRequestRaw("GET", "/images/search?term=test", nil, "application/json") + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") +} + +// Test case for 30027: image size reported as -1 in v1.12 client against v1.13 daemon. +// This test checks to make sure both v1.12 and v1.13 client against v1.13 daemon get correct `Size` after the fix. +func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) { + status, b, err := sockRequest("GET", "/images/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var images []types.ImageSummary + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + c.Assert(len(images), checker.Not(checker.Equals), 0) + for _, image := range images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } + + type v124Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string + } + status, b, err = sockRequest("GET", "/v1.24/images/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var v124Images []v124Image + err = json.Unmarshal(b, &v124Images) + c.Assert(err, checker.IsNil) + c.Assert(len(v124Images), checker.Not(checker.Equals), 0) + for _, image := range v124Images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go new file mode 100644 index 0000000..1556099 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoAPI(c *check.C) { + endpoint := "/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "ContainersRunning", + "ContainersPaused", + "ContainersStopped", + "Images", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "OSType", + "Architecture", + "MemTotal", + "KernelVersion", + "Driver", + "ServerVersion", + "SecurityOptions"} + + out := string(body) + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix) + } +} + +func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + endpoint := "/v1.20/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + out := string(body) + c.Assert(out, checker.Contains, "ExecutionDriver") + c.Assert(out, checker.Contains, "not supported") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go new file mode 100644 index 0000000..546b224 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go @@ -0,0 +1,183 @@ +package main + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", + "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "MountLabel", "ProcessLabel", "GraphDriver"} + + type acase struct { + version string + keys []string + } + + var cases []acase + + if daemonPlatform == "windows" { + cases = []acase{ + {"v1.25", append(keysBase, "Mounts")}, + } + + } else { + cases = []acase{ + {"v1.20", append(keysBase, "Mounts")}, + {"v1.19", append(keysBase, "Volumes", "VolumesRW")}, + } + } + + for _, cs := range cases { + body := getInspectBody(c, cs.version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", cs.version)) + + for _, key := range cs.keys { + _, ok := inspectJSON[key] + c.Check(ok, checker.True, check.Commentf("%s does not exist in response for version %s", key, cs.version)) + } + + //Issue #6830: type not properly converted to JSON/back + _, ok := inspectJSON["Path"].(bool) + c.Assert(ok, checker.False, check.Commentf("Path of `true` should not be converted to boolean `true` via JSON marshalling")) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *check.C) { + // No legacy implications for Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version %s expected to include VolumeDriver in 'Config'", version)) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--volume-driver", "local", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + body := getInspectBody(c, "v1.25", cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version 1.25")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.False, check.Commentf("API version 1.25 expected to not include VolumeDriver in 'Config'")) + + config, ok = inspectJSON["HostConfig"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'HostConfig'")) + cfg = config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version 1.25 expected to include VolumeDriver in 'HostConfig'")) +} + +func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") + + endpoint := "/images/busybox/json" + status, body, err := sockRequest("GET", endpoint, nil) + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var imageJSON types.ImageInspect + err = json.Unmarshal(body, &imageJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) + c.Assert(imageJSON.RepoTags, checker.HasLen, 2) + + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) +} + +// #17131, #17139, #17173 +func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *check.C) { + // Not relevant on Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + for _, f := range []string{"MacAddress", "NetworkDisabled", "ExposedPorts"} { + _, ok := cfg[f] + c.Check(ok, checker.True, check.Commentf("API version %s expected to include %s in 'Config'", version, f)) + } + } +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *check.C) { + // Not relevant on Windows, and besides it doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.20", containerID) + + var inspectJSON v1p20.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings121(c *check.C) { + // Windows doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.21", containerID) + + var inspectJSON types.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) + c.Assert(settings.Networks["bridge"], checker.Not(checker.IsNil)) + c.Assert(settings.IPAddress, checker.Equals, settings.Networks["bridge"].IPAddress) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go new file mode 100644 index 0000000..f49a139 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// #16665 +func (s *DockerSuite) TestInspectAPICpusetInConfigPre120(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cgroupCpuset) + + name := "cpusetinconfig-pre120" + dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") + + status, body, err := sockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["Cpuset"] + c.Assert(ok, checker.True, check.Commentf("API version 1.19 expected to include Cpuset in 'Config'")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go new file mode 100644 index 0000000..2e8ffa9 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go @@ -0,0 +1,87 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + type logOut struct { + out string + res *http.Response + err error + } + chLog := make(chan logOut) + + go func() { + res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") + if err != nil { + chLog <- logOut{"", nil, err} + return + } + defer body.Close() + out, err := bufio.NewReader(body).ReadString('\n') + if err != nil { + chLog <- logOut{"", nil, err} + return + } + chLog <- logOut{strings.TrimSpace(out), res, err} + }() + + select { + case l := <-chLog: + c.Assert(l.err, checker.IsNil) + c.Assert(l.res.StatusCode, checker.Equals, http.StatusOK) + if !strings.HasSuffix(l.out, "hello") { + c.Fatalf("expected log output to container 'hello', but it does not") + } + case <-time.After(20 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } +} + +func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) { + name := "logs_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(err, checker.IsNil) + + expected := "Bad parameters: you must choose at least one stream" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +// Regression test for #12704 +func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) { + name := "logs_test" + t0 := time.Now() + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + t1 := time.Now() + c.Assert(err, checker.IsNil) + body.Close() + elapsed := t1.Sub(t0).Seconds() + if elapsed > 20.0 { + c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) + } +} + +func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) { + name := "nonExistentContainer" + resp, _, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go new file mode 100644 index 0000000..1cc66f0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go @@ -0,0 +1,353 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPINetworkGetDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + // By default docker daemon creates 3 networks. check if they are present + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + c.Assert(isNetworkAvailable(c, nn), checker.Equals, true) + } +} + +func (s *DockerSuite) TestAPINetworkCreateDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create a network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + id := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // delete the network and make sure it is deleted + deleteNetwork(c, id, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testcheckduplicate" + configOnCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + configNotCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + // Creating a new network first + createNetwork(c, configOnCheck, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // Creating another network with same name and CheckDuplicate must fail + createNetwork(c, configOnCheck, false) + + // Creating another network with same name and not CheckDuplicate must succeed + createNetwork(c, configNotCheck, true) +} + +func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { + testRequires(c, DaemonIsLinux) + nr := getNetworkResource(c, getNetworkIDByName(c, "bridge")) + c.Assert(nr.Name, checker.Equals, "bridge") +} + +func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Inspect default bridge network + nr := getNetworkResource(c, "bridge") + c.Assert(nr.Name, checker.Equals, "bridge") + + // run a container and attach it to the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + containerIP := findContainerIP(c, "test", "bridge") + + // inspect default bridge network again and make sure the container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + c.Assert(ip.String(), checker.Equals, containerIP) + + // IPAM configuration inspect + ipam := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "172.28.0.0/16", IPRange: "172.28.5.0/24", Gateway: "172.28.5.254"}}, + } + config := types.NetworkCreateRequest{ + Name: "br0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam, + Options: map[string]string{"foo": "bar", "opts": "dopts"}, + }, + } + id0 := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) + + nr = getNetworkResource(c, id0) + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Options["foo"], checker.Equals, "bar") + c.Assert(nr.Options["opts"], checker.Equals, "dopts") + + // delete the network and make sure it is deleted + deleteNetwork(c, id0, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create test network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + } + id := createNetwork(c, config, true) + nr := getNetworkResource(c, id) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(nr.ID, checker.Equals, id) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + + // connect the container to the test network + connectNetwork(c, nr.ID, containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + containerIP := findContainerIP(c, "test", "testnetwork") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + disconnectNetwork(c, nr.ID, containerID) + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // delete the network + deleteNetwork(c, nr.ID, true) +} + +func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + // test0 bridge network + ipam0 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.0.0/16", IPRange: "192.178.128.0/17", Gateway: "192.178.138.100"}}, + } + config0 := types.NetworkCreateRequest{ + Name: "test0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam0, + }, + } + id0 := createNetwork(c, config0, true) + c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) + + ipam1 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.128.0/17", Gateway: "192.178.128.1"}}, + } + // test1 bridge network overlaps with test0 + config1 := types.NetworkCreateRequest{ + Name: "test1", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam1, + }, + } + createNetwork(c, config1, false) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) + + ipam2 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.169.0.0/16", Gateway: "192.169.100.100"}}, + } + // test2 bridge network does not overlap + config2 := types.NetworkCreateRequest{ + Name: "test2", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam2, + }, + } + createNetwork(c, config2, true) + c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) + + // remove test0 and retry to create test1 + deleteNetwork(c, id0, true) + createNetwork(c, config1, true) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true) + c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true) + c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true) + c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) + + for i := 1; i < 6; i++ { + deleteNetwork(c, fmt.Sprintf("test%d", i), true) + } +} + +func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + createDeletePredefinedNetwork(c, "bridge") + createDeletePredefinedNetwork(c, "none") + createDeletePredefinedNetwork(c, "host") +} + +func createDeletePredefinedNetwork(c *check.C, name string) { + // Create pre-defined network + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + shouldSucceed := false + createNetwork(c, config, shouldSucceed) + deleteNetwork(c, name, shouldSucceed) +} + +func isNetworkAvailable(c *check.C, name string) bool { + status, body, err := sockRequest("GET", "/networks", nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.Unmarshal(body, &nJSON) + c.Assert(err, checker.IsNil) + + for _, n := range nJSON { + if n.Name == name { + return true + } + } + return false +} + +func getNetworkIDByName(c *check.C, name string) string { + var ( + v = url.Values{} + filterArgs = filters.NewArgs() + ) + filterArgs.Add("name", name) + filterJSON, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + v.Set("filters", filterJSON) + + status, body, err := sockRequest("GET", "/networks?"+v.Encode(), nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.Unmarshal(body, &nJSON) + c.Assert(err, checker.IsNil) + c.Assert(len(nJSON), checker.Equals, 1) + + return nJSON[0].ID +} + +func getNetworkResource(c *check.C, id string) *types.NetworkResource { + _, obj, err := sockRequest("GET", "/networks/"+id, nil) + c.Assert(err, checker.IsNil) + + nr := types.NetworkResource{} + err = json.Unmarshal(obj, &nr) + c.Assert(err, checker.IsNil) + + return &nr +} + +func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string { + status, resp, err := sockRequest("POST", "/networks/create", config) + if !shouldSucceed { + c.Assert(status, checker.Not(checker.Equals), http.StatusCreated) + return "" + } + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var nr types.NetworkCreateResponse + err = json.Unmarshal(resp, &nr) + c.Assert(err, checker.IsNil) + + return nr.ID +} + +func connectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + status, _, err := sockRequest("POST", "/networks/"+nid+"/connect", config) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func disconnectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + status, _, err := sockRequest("POST", "/networks/"+nid+"/disconnect", config) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func deleteNetwork(c *check.C, id string, shouldSucceed bool) { + status, _, err := sockRequest("DELETE", "/networks/"+id, nil) + if !shouldSucceed { + c.Assert(status, checker.Not(checker.Equals), http.StatusOK) + return + } + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go new file mode 100644 index 0000000..daf1b05 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIHeightWidthNoInt(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIResponseWhenContainerNotStarted(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", cleanedContainerID) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, body, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + c.Assert(getErrorMessage(c, body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go new file mode 100644 index 0000000..15a21e5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go @@ -0,0 +1,39 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor { + return func(s *swarm.Service) { + if s.Spec.EndpointSpec == nil { + s.Spec.EndpointSpec = &swarm.EndpointSpec{} + } + s.Spec.EndpointSpec.Ports = portConfig + } +} + +func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service with a port mapping of 8080:8081. + portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} + serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} + remoteService := d.getService(c, serviceID) + d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) + + // Inspect the service and verify port mapping. + updatedService := d.getService(c, serviceID) + c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) + c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go new file mode 100644 index 0000000..23fbdbb --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go @@ -0,0 +1,310 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") + +func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + var cpuPercent = 0.0 + + if daemonPlatform != "windows" { + cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) + systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } else { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + cpuPercent = float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + } + + c.Assert(cpuPercent, check.Not(checker.Equals), 0.0, check.Commentf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent)) +} + +func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") + id := strings.TrimSpace(out) + + getGoRoutines := func() int { + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/info"), nil, "") + c.Assert(err, checker.IsNil) + info := types.Info{} + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + return info.NGoroutines + } + + // When the HTTP connection is closed, the number of goroutines should not increase. + routines := getGoRoutines() + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats", id), nil, "") + c.Assert(err, checker.IsNil) + body.Close() + + t := time.After(30 * time.Second) + for { + select { + case <-t: + c.Assert(getGoRoutines(), checker.LessOrEqualThan, routines) + return + default: + if n := getGoRoutines(); n <= routines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} + +func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + // Retrieve the container address + net := "bridge" + if daemonPlatform == "windows" { + net = "nat" + } + contIP := findContainerIP(c, id, net) + numPings := 1 + + var preRxPackets uint64 + var preTxPackets uint64 + var postRxPackets uint64 + var postTxPackets uint64 + + // Get the container networking stats before and after pinging the container + nwStatsPre := getNetworkStats(c, id) + for _, v := range nwStatsPre { + preRxPackets += v.RxPackets + preTxPackets += v.TxPackets + } + + countParam := "-c" + if runtime.GOOS == "windows" { + countParam = "-n" // Ping count parameter is -n on Windows + } + pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).CombinedOutput() + if err != nil && runtime.GOOS == "linux" { + // If it fails then try a work-around, but just for linux. + // If this fails too then go back to the old error for reporting. + // + // The ping will sometimes fail due to an apparmor issue where it + // denies access to the libc.so.6 shared library - running it + // via /lib64/ld-linux-x86-64.so.2 seems to work around it. + pingout2, err2 := exec.Command("/lib64/ld-linux-x86-64.so.2", "/bin/ping", contIP, "-c", strconv.Itoa(numPings)).CombinedOutput() + if err2 == nil { + pingout = pingout2 + err = err2 + } + } + c.Assert(err, checker.IsNil) + pingouts := string(pingout[:]) + nwStatsPost := getNetworkStats(c, id) + for _, v := range nwStatsPost { + postRxPackets += v.RxPackets + postTxPackets += v.TxPackets + } + + // Verify the stats contain at least the expected number of packets + // On Linux, account for ARP. + expRxPkts := preRxPackets + uint64(numPings) + expTxPkts := preTxPackets + uint64(numPings) + if daemonPlatform != "windows" { + expRxPkts++ + expTxPkts++ + } + c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, + check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) + c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, + check.Commentf("Reported less Txbytes than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) +} + +func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { + // Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21 + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + wg := sync.WaitGroup{} + + for i := 17; i <= 21; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + apiVersion := fmt.Sprintf("v1.%d", i) + statsJSONBlob := getVersionedStats(c, id, apiVersion) + if versions.LessThan(apiVersion, "v1.21") { + c.Assert(jsonBlobHasLTv121NetworkStats(statsJSONBlob), checker.Equals, true, + check.Commentf("Stats JSON blob from API %s %#v does not look like a =v1.21 API stats structure", apiVersion, statsJSONBlob)) + } + }(i) + } + wg.Wait() +} + +func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { + var st *types.StatsJSON + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + + err = json.NewDecoder(body).Decode(&st) + c.Assert(err, checker.IsNil) + body.Close() + + return st.Networks +} + +// getVersionedStats returns stats result for the +// container with id using an API call with version apiVersion. Since the +// stats result type differs between API versions, we simply return +// map[string]interface{}. +func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} { + stats := make(map[string]interface{}) + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + + err = json.NewDecoder(body).Decode(&stats) + c.Assert(err, checker.IsNil, check.Commentf("failed to decode stat: %s", err)) + + return stats +} + +func jsonBlobHasLTv121NetworkStats(blob map[string]interface{}) bool { + networkStatsIntfc, ok := blob["network"] + if !ok { + return false + } + networkStats, ok := networkStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkStats[expectedKey]; !ok { + return false + } + } + return true +} + +func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { + networksStatsIntfc, ok := blob["networks"] + if !ok { + return false + } + networksStats, ok := networksStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, networkInterfaceStatsIntfc := range networksStats { + networkInterfaceStats, ok := networkInterfaceStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkInterfaceStats[expectedKey]; !ok { + return false + } + } + } + return true +} + +func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) { + testRequires(c, DaemonIsLinux) + + status, _, err := sockRequest("GET", "/containers/nonexistent/stats", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + + status, _, err = sockRequest("GET", "/containers/nonexistent/stats?stream=0", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out1, _ := runSleepingContainer(c) + id1 := strings.TrimSpace(out1) + c.Assert(waitRun(id1), checker.IsNil) + + out2, _ := runSleepingContainer(c, "--net", "container:"+id1) + id2 := strings.TrimSpace(out2) + c.Assert(waitRun(id2), checker.IsNil) + + ch := make(chan error) + go func() { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id2), nil, "") + defer body.Close() + if err != nil { + ch <- err + } + if resp.StatusCode != http.StatusOK { + ch <- fmt.Errorf("Invalid StatusCode %v", resp.StatusCode) + } + if resp.Header.Get("Content-Type") != "application/json" { + ch <- fmt.Errorf("Invalid 'Content-Type' %v", resp.Header.Get("Content-Type")) + } + var v *types.Stats + if err := json.NewDecoder(body).Decode(&v); err != nil { + ch <- err + } + ch <- nil + }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("Error in stats Engine API: %v", err)) + case <-time.After(15 * time.Second): + c.Fatalf("Stats did not return after timeout") + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go new file mode 100644 index 0000000..0995ce3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go @@ -0,0 +1,41 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIStatsContainerGetMemoryLimit(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport) + + resp, body, err := sockRequestRaw("GET", "/info", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + var info types.Info + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + + // don't set a memory limit, the memory limit should be system memory + conName := "foo" + dockerCmd(c, "run", "-d", "--name", conName, "busybox", "top") + c.Assert(waitRun(conName), checker.IsNil) + + resp, body, err = sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", conName), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + c.Assert(fmt.Sprintf("%d", v.MemoryStats.Limit), checker.Equals, fmt.Sprintf("%d", info.MemTotal)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go new file mode 100644 index 0000000..1f8eaec --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go @@ -0,0 +1,1367 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var defaultReconciliationTimeout = 30 * time.Second + +func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { + // todo: should find a better way to verify that components are running than /info + d1 := s.AddDaemon(c, true, true) + info, err := d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d2 := s.AddDaemon(c, true, false) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Leaving cluster + c.Assert(d2.Leave(false), checker.IsNil) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Current state restoring after restarts + err = d1.Stop() + c.Assert(err, checker.IsNil) + err = d2.Stop() + c.Assert(err, checker.IsNil) + + err = d1.Start() + c.Assert(err, checker.IsNil) + err = d2.Start() + c.Assert(err, checker.IsNil) + + info, err = d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + + d2 := s.AddDaemon(c, false, false) + err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken := d1.joinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change tokens + d1.rotateTokens(c) + + err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken = d1.joinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change spec, don't change tokens + d1.updateSwarm(c, func(s *swarm.Spec) {}) + + err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + splitToken := strings.Split(d1.joinTokens(c).Worker, "-") + splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" + replacementToken := strings.Join(splitToken, "-") + err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") +} + +func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d2 := s.AddDaemon(c, true, false) + + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) + + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleWorker + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) + + // Demoting last node should fail + node := d1.getNode(c, d1.NodeID) + node.Spec.Role = swarm.NodeRoleWorker + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d1.SockRequest("POST", url, node.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) + c.Assert(string(out), checker.Contains, "last manager of the swarm") + info, err = d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.ControlAvailable, checker.True) + + // Promote already demoted node + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + services := d.listServices(c) + c.Assert(services, checker.NotNil) + c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + service := d.getService(c, id) + instances = 5 + d.updateService(c, service, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + d.removeService(c, service.ID) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + instances := 9 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + // reconciliation on d2 node down + c.Assert(d2.Stop(), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + // test downscaling + instances = 5 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + d1.createService(c, simpleTestService, setGlobalMode) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) + + d4 := s.AddDaemon(c, true, false) + d5 := s.AddDaemon(c, true, false) + + waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:test" + + // create a different tag + for _, d := range daemons { + out, err := d.Cmd("tag", image1, image2) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } + + // create service + instances := 5 + parallelism := 2 + id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].getService(c, id) + daemons[0].updateService(c, service, setImage(image2)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - parallelism, image1: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - 2*parallelism, image1: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:badtag" + + // create service + instances := 5 + id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].getService(c, id) + daemons[0].updateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) + + // should update 2 tasks and then pause + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) + v, _ := daemons[0].checkServiceRunningTasks(id)(c) + c.Assert(v, checker.Equals, instances-2) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // create service + constraints := []string{"node.role==worker"} + instances := 3 + id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + // validate tasks are running on worker nodes + tasks := daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + node := daemons[0].getNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.role!=worker"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are running on manager nodes + for _, task := range tasks { + node := daemons[0].getNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.role==nosuchrole"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + // validate tasks are not assigned to any node + tasks = daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].listNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "low", + } + }) + } + + // create service + instances := 3 + constraints := []string{"node.labels.security==high"} + id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].getServiceTasks(c, id) + // validate all tasks are running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[0].ID) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.labels.security!=high"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + // validate all tasks are NOT running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) + } + //remove service + daemons[0].removeService(c, id) + + constraints = []string{"node.labels.security==medium"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + //remove service + daemons[0].removeService(c, id) + + // multiple constraints + constraints = []string{ + "node.labels.security==high", + fmt.Sprintf("node.id==%s", nodes[1].ID), + } + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + // make nodes[1] fulfills the constraints + daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[1].ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept + + instances := 9 + d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + getContainers := func() map[string]*SwarmDaemon { + m := make(map[string]*SwarmDaemon) + for _, d := range []*SwarmDaemon{d1, d2, d3} { + for _, id := range d.activeContainers() { + m[id] = d + } + } + return m + } + + containers := getContainers() + c.Assert(containers, checker.HasLen, instances) + var toRemove string + for i := range containers { + toRemove = i + } + + _, err := containers[toRemove].Cmd("stop", toRemove) + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + containers2 := getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } + + containers = containers2 + for i := range containers { + toRemove = i + } + + // try with killing process outside of docker + pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) + c.Assert(err, checker.IsNil) + pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) + c.Assert(err, checker.IsNil) + c.Assert(syscall.Kill(pid, syscall.SIGKILL), checker.IsNil) + + time.Sleep(time.Second) // give some time to handle the signal + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + containers2 = getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { + // add three managers, one of these is leader + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // start a service by hitting each of the 3 managers + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test1" + }) + d2.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test2" + }) + d3.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test3" + }) + + // 3 services should be started now, because the requests were proxied to leader + // query each node and make sure it returns 3 services + for _, d := range []*SwarmDaemon{d1, d2, d3} { + services := d.listServices(c) + c.Assert(services, checker.HasLen, 3) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { + // Create 3 nodes + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // assert that the first node we made is the leader, and the other two are followers + c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) + c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) + + d1.Stop() // stop the leader + + var ( + leader *SwarmDaemon // keep track of leader + followers []*SwarmDaemon // keep track of followers + ) + checkLeader := func(nodes ...*SwarmDaemon) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + // clear these out before each run + leader = nil + followers = nil + for _, d := range nodes { + if d.getNode(c, d.NodeID).ManagerStatus.Leader { + leader = d + } else { + followers = append(followers, d) + } + } + + if leader == nil { + return false, check.Commentf("no leader elected") + } + + return true, check.Commentf("elected %v", leader.id) + } + } + + // wait for an election to occur + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True) + + // assert that we have a new leader + c.Assert(leader, checker.NotNil) + + // Keep track of the current leader, since we want that to be chosen. + stableleader := leader + + // add the d1, the initial leader, back + d1.Start() + + // TODO(stevvooe): may need to wait for rejoin here + + // wait for possible election + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True) + // pick out the leader and the followers again + + // verify that we still only have 1 leader and 2 followers + c.Assert(leader, checker.NotNil) + c.Assert(followers, checker.HasLen, 2) + // and that after we added d1 back, the leader hasn't changed + c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID) +} + +func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d1.createService(c, simpleTestService) + + c.Assert(d2.Stop(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top1" + }) + + c.Assert(d3.Stop(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + var service swarm.Service + simpleTestService(&service) + service.Spec.Name = "top2" + status, out, err := d1.SockRequest("POST", "/services/create", service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out))) + + c.Assert(d2.Start(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top3" + }) +} + +func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + nodes := d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + +loop0: + for _, n := range nodes { + for _, d := range []*SwarmDaemon{d1, d2, d3} { + if n.ID == d.NodeID { + continue loop0 + } + } + c.Errorf("unknown nodeID %v", n.ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + nodes := d.listNodes(c) + + d.updateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + n := d.getNode(c, nodes[0].ID) + c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { + testRequires(c, Network) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + _ = s.AddDaemon(c, true, false) + + nodes := d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + + // Getting the info so we can take the NodeID + d2Info, err := d2.info() + c.Assert(err, checker.IsNil) + + // forceful removal of d2 should work + d1.removeNode(c, d2Info.NodeID, true) + + nodes = d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) + + // Restart the node that was removed + err = d2.Restart() + c.Assert(err, checker.IsNil) + + // Give some time for the node to rejoin + time.Sleep(1 * time.Second) + + // Make sure the node didn't rejoin + nodes = d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + // start a service, expect balanced distribution + instances := 8 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + + // set d2 back to active + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityActive + }) + + instances = 1 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + instances = 8 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + // drained node first so we don't get any old containers + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + d2ContainerCount := len(d2.activeContainers()) + + // set d2 to paused, scale service up, only d1 gets new tasks + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + instances = 14 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) + +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + d.createService(c, simpleTestService, setInstances(instances)) + + id, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) + + c.Assert(d.Leave(false), checker.NotNil) + c.Assert(d.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + id2, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23629 +func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { + testRequires(c, Network) + s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + + id, err := d2.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + err = d2.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d2.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + + id2, err := d2.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23705 +func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { + testRequires(c, Network) + d := s.AddDaemon(c, false, false) + err := d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d.Stop(), checker.IsNil) + c.Assert(d.Start(), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + d1.getService(c, id) + d1.Stop() + d1.Start() + d1.getService(c, id) + + d2 := s.AddDaemon(c, true, true) + d2.getService(c, id) + d2.Stop() + d2.Start() + d2.getService(c, id) + + d3 := s.AddDaemon(c, true, true) + d3.getService(c, id) + d3.Stop() + d3.Start() + d3.getService(c, id) + + d3.Kill() + time.Sleep(1 * time.Second) // time to handle signal + d3.Start() + d3.getService(c, id) +} + +func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + containers := d.activeContainers() + instances = 4 + d.updateService(c, d.getService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + containers2 := d.activeContainers() + +loop0: + for _, c1 := range containers { + for _, c2 := range containers2 { + if c1 == c2 { + continue loop0 + } + } + c.Errorf("container %v not found in new set %#v", c1, containers2) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) { + d := s.AddDaemon(c, false, false) + req := swarm.InitRequest{ + ListenAddr: "", + } + status, _, err := d.SockRequest("POST", "/swarm/init", req) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + req2 := swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + RemoteAddrs: []string{""}, + } + status, _, err = d.SockRequest("POST", "/swarm/join", req2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) +} + +func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + + c.Assert(d2.Stop(), checker.IsNil) + + c.Assert(d1.Init(swarm.InitRequest{ + ForceNewCluster: true, + Spec: swarm.Spec{}, + }), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + + d3 := s.AddDaemon(c, true, true) + info, err := d3.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + instances = 4 + d3.updateService(c, d3.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) +} + +func simpleTestService(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + } + s.Spec.Name = "top" +} + +func serviceForUpdate(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: 2, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, + } + s.Spec.Name = "updatetest" +} + +func setInstances(replicas int) serviceConstructor { + ureplicas := uint64(replicas) + return func(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + } + } +} + +func setImage(image string) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.TaskTemplate.ContainerSpec.Image = image + } +} + +func setFailureAction(failureAction string) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.FailureAction = failureAction + } +} + +func setMaxFailureRatio(maxFailureRatio float32) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio + } +} + +func setParallelism(parallelism uint64) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.Parallelism = parallelism + } +} + +func setConstraints(constraints []string) serviceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Constraints = constraints + } +} + +func setGlobalMode(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + } +} + +func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) { + var totalMCount, totalWCount int + + for _, d := range cl { + var ( + info swarm.Info + err error + ) + + // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error + checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { + info, err = d.info() + return err, check.Commentf("cluster not ready in time") + } + waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) + if !info.ControlAvailable { + totalWCount++ + continue + } + + var leaderFound bool + totalMCount++ + var mCount, wCount int + + for _, n := range d.listNodes(c) { + waitReady := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Status.State == swarm.NodeStateReady { + return true, nil + } + nn := d.getNode(c, n.ID) + n = *nn + return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True) + + waitActive := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Spec.Availability == swarm.NodeAvailabilityActive { + return true, nil + } + nn := d.getNode(c, n.ID) + n = *nn + return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True) + + if n.Spec.Role == swarm.NodeRoleManager { + c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID)) + if n.ManagerStatus.Leader { + leaderFound = true + } + mCount++ + } else { + c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID)) + wCount++ + } + } + c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID)) + c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID)) + c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID)) + } + c.Assert(totalMCount, checker.Equals, managerCount) + c.Assert(totalWCount, checker.Equals, workerCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { + mCount, wCount := 5, 1 + + var nodes []*SwarmDaemon + for i := 0; i < mCount; i++ { + manager := s.AddDaemon(c, true, true) + info, err := manager.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, manager) + } + + for i := 0; i < wCount; i++ { + worker := s.AddDaemon(c, true, false) + info, err := worker.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, worker) + } + + // stop whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *SwarmDaemon) { + defer wg.Done() + if err := daemon.Stop(); err != nil { + errs <- err + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + daemon.root = filepath.Dir(daemon.root) + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + // start whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *SwarmDaemon) { + defer wg.Done() + if err := daemon.Start("--iptables=false"); err != nil { + errs <- err + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + checkClusterHealth(c, nodes, mCount, wCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + service := d.getService(c, id) + instances = 5 + + setInstances(instances)(service) + url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + secrets := d.listSecrets(c) + c.Assert(secrets, checker.NotNil) + c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secrets := d.listSecrets(c) + c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) + name := secrets[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + + d.deleteSecret(c, secret.ID) + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) +} + +// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`, +// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`. +// This test makes sure the fixes correctly output scopes instead. +func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + var n1 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "bridge" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n1), checker.IsNil) + + var n2 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n2), checker.IsNil) + + var r1 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r1), checker.IsNil) + + c.Assert(r1.Scope, checker.Equals, "local") + + var r2 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r2), checker.IsNil) + + c.Assert(r2.Scope, checker.Equals, "swarm") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_test.go new file mode 100644 index 0000000..3b38ba9 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_test.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "net/http" + "net/http/httptest" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIOptionsRoute(c *check.C) { + status, _, err := sockRequest("OPTIONS", "/", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) { + res, body, err := sockRequestRaw("GET", "/version", nil, "") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + body.Close() + // TODO: @runcom incomplete tests, why old integration tests had this headers + // and here none of the headers below are in the response? + //c.Log(res.Header) + //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") + //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") +} + +func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) { + if daemonPlatform != runtime.GOOS { + c.Skip("Daemon platform doesn't match test platform") + } + if api.MinVersion == api.DefaultVersion { + c.Skip("API MinVersion==DefaultVersion") + } + v := strings.Split(api.MinVersion, ".") + vMinInt, err := strconv.Atoi(v[1]) + c.Assert(err, checker.IsNil) + vMinInt-- + v[1] = strconv.Itoa(vMinInt) + version := strings.Join(v, ".") + + status, body, err := sockRequest("GET", "/v"+version+"/version", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + expected := fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", version, api.MinVersion) + c.Assert(strings.TrimSpace(string(body)), checker.Contains, expected) +} + +func (s *DockerSuite) TestAPIDockerAPIVersion(c *check.C) { + var svrVersion string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + url := r.URL.Path + svrVersion = url + })) + defer server.Close() + + // Test using the env var first + result := icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs("-H="+server.URL[7:], "version"), + Env: appendBaseEnv(false, "DOCKER_API_VERSION=xxx"), + }) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "API version: xxx", ExitCode: 1}) + c.Assert(svrVersion, check.Equals, "/vxxx/version", check.Commentf("%s", result.Compare(icmd.Success))) +} + +func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { + httpResp, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(`{}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { + // Windows requires API 1.25 or later. This test is validating a behaviour which was present + // in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors + testRequires(c, DaemonIsLinux) + httpResp, body, err := sockRequestRaw("POST", "/v1.23/containers/create", strings.NewReader(`{}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { + // 404 is a different code path to normal errors, so test separately + httpResp, body, err := sockRequestRaw("GET", "/notfound", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") +} + +func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { + httpResp, body, err := sockRequestRaw("GET", "/v1.23/notfound", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go new file mode 100644 index 0000000..dfe14ec --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIUpdateContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "apiUpdateContainer" + hostConfig := map[string]interface{}{ + "Memory": 314572800, + "MemorySwap": 524288000, + } + dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") + _, _, err := sockRequest("POST", "/containers/"+name+"/update", hostConfig) + c.Assert(err, check.IsNil) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go new file mode 100644 index 0000000..eb2de59 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestGetVersion(c *check.C) { + status, body, err := sockRequest("GET", "/version", nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var v types.Version + + c.Assert(json.Unmarshal(body, &v), checker.IsNil) + + c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go new file mode 100644 index 0000000..d1d4400 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go @@ -0,0 +1,89 @@ +package main + +import ( + "encoding/json" + "net/http" + "path/filepath" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumesAPIList(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") + + status, b, err := sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) +} + +func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := sockRequest("POST", "/volumes/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + var vol types.Volume + err = json.Unmarshal(b, &vol) + c.Assert(err, checker.IsNil) + + c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) +} + +func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") + + status, b, err := sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + v := volumes.Volumes[0] + status, _, err = sockRequest("DELETE", "/volumes/"+v.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) + + dockerCmd(c, "rm", "-f", "test") + status, data, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) + +} + +func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := sockRequest("POST", "/volumes/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + status, b, err = sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + var vol types.Volume + status, b, err = sockRequest("GET", "/volumes/"+config.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + c.Assert(json.Unmarshal(b, &vol), checker.IsNil) + c.Assert(vol.Name, checker.Equals, config.Name) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go new file mode 100644 index 0000000..2df4fdc --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go @@ -0,0 +1,168 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +const attachWait = 5 * time.Second + +func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") + c.Assert(err, check.IsNil) + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + cmd := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + cmd.Wait() + endGroup.Done() + }() + + out, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer out.Close() + + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + c.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + c.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not initialize properly") + } + + dockerCmd(c, "kill", "attacher") + + select { + case <-endDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not finish properly") + } +} + +func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + done := make(chan error) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + done <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + done <- fmt.Errorf("attach should have failed") + return + } else if !strings.Contains(out, expected) { + done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-done: + c.Assert(err, check.IsNil) + case <-time.After(attachWait): + c.Fatal("attach is running but should have failed") + } +} + +func (s *DockerSuite) TestAttachDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + id := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", id) + stdin, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Process.Kill() + + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + c.Assert(stdin.Close(), check.IsNil) + + // Expect container to still be running after stdin is closed + running := inspectField(c, id, "State.Running") + c.Assert(running, check.Equals, "true") +} + +func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + runSleepingContainer(c, "-d", "--name=test") + dockerCmd(c, "pause", "test") + + result := dockerCmdWithResult("attach", "test") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 1", + ExitCode: 1, + Err: "You cannot attach to a paused container, unpause it first", + }) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go new file mode 100644 index 0000000..fb794cc --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go @@ -0,0 +1,237 @@ +// +build !windows + +package main + +import ( + "bufio" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #9860 Make sure attach ends when container ends (with no errors) +func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-dti", "busybox", "/bin/sh", "-c", `trap 'exit 0' SIGTERM; while true; do sleep 1; done`) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + + attachCmd := exec.Command(dockerBinary, "attach", id) + attachCmd.Stdin = tty + attachCmd.Stdout = tty + attachCmd.Stderr = tty + err = attachCmd.Start() + c.Assert(err, check.IsNil) + + errChan := make(chan error) + go func() { + time.Sleep(300 * time.Millisecond) + defer close(errChan) + // Container is waiting for us to signal it to stop + dockerCmd(c, "stop", id) + // And wait for the attach command to end + errChan <- attachCmd.Wait() + }() + + // Wait for the docker to end (should be done by the + // stop command in the go routine) + dockerCmd(c, "wait", id) + + select { + case err := <-errChan: + tty.Close() + out, _ := ioutil.ReadAll(pty) + c.Assert(err, check.IsNil, check.Commentf("out: %v", string(out))) + case <-time.After(attachWait): + c.Fatal("timed out without attach returning") + } + +} + +func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { + name := "detachtest" + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + errChan := make(chan error) + go func() { + errChan <- cmd.Run() + close(errChan) + }() + + c.Assert(waitRun(name), check.IsNil) + + cpty.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + cpty.Write([]byte{17}) + + select { + case err := <-errChan: + if err != nil { + buff := make([]byte, 200) + tty.Read(buff) + c.Fatalf("%s: %s", err, buff) + } + case <-time.After(5 * time.Second): + c.Fatal("timeout while detaching") + } + + cpty, tty, err = pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + + cmd = exec.Command(dockerBinary, "attach", name) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + err = cmd.Start() + c.Assert(err, checker.IsNil) + + bytes := make([]byte, 10) + var nBytes int + readErr := make(chan error, 1) + + go func() { + time.Sleep(500 * time.Millisecond) + cpty.Write([]byte("\n")) + time.Sleep(500 * time.Millisecond) + + nBytes, err = cpty.Read(bytes) + cpty.Close() + readErr <- err + }() + + select { + case err := <-readErr: + c.Assert(err, check.IsNil) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for attach read") + } + + err = cmd.Wait() + c.Assert(err, checker.IsNil) + + c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") + +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func (s *DockerSuite) TestAttachDetach(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, check.IsNil) + c.Assert(waitRun(id), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := stringid.TruncateID(strings.TrimSpace(out)) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, checker.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go new file mode 100644 index 0000000..8a669fb --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go @@ -0,0 +1,133 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var ( + authzPluginName = "riyaz/authz-no-volume-plugin" + authzPluginTag = "latest" + authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag + authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" + nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" +) + +func init() { + check.Suite(&DockerAuthzV2Suite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzV2Suite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + s.d = NewDaemon(c) + c.Assert(s.d.Start(), check.IsNil) +} + +func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // defer disabling the plugin + defer func() { + c.Assert(s.d.Restart(), check.IsNil) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + // Ensure docker run command and accompanying docker ps are successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + + // restart the daemon with the plugin + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) + + // defer disabling the plugin + defer func() { + c.Assert(s.d.Restart(), check.IsNil) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + out, err := s.d.Cmd("volume", "create") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + // The plugin will block the command before it can determine the volume does not exist + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "prune", "-f") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin with bad manifest + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginBadManifestName) + c.Assert(err, checker.IsNil) + + // start the daemon with the plugin, it will error + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginBadManifestName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + c.Assert(s.d.Restart(), check.IsNil) +} + +func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + // start the daemon with a non-existent authz plugin, it will error + c.Assert(s.d.Restart("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + c.Assert(s.d.Restart(), check.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go new file mode 100644 index 0000000..a826249 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go @@ -0,0 +1,477 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + + "bufio" + "bytes" + "os/exec" + "strconv" + "time" + + "net" + "net/http/httputil" + "net/url" + + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +const ( + testAuthZPlugin = "authzplugin" + unauthorizedMessage = "User unauthorized authz plugin" + errorMessage = "something went wrong..." + containerListAPI = "/containers/json" +) + +var ( + alwaysAllowed = []string{"/_ping", "/info"} +) + +func init() { + check.Suite(&DockerAuthzSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzSuite struct { + server *httptest.Server + ds *DockerSuite + d *Daemon + ctrl *authorizationController +} + +type authorizationController struct { + reqRes authorization.Response // reqRes holds the plugin response to the initial client request + resRes authorization.Response // resRes holds the plugin response to the daemon response + psRequestCnt int // psRequestCnt counts the number of calls to list container request api + psResponseCnt int // psResponseCnt counts the number of calls to list containers response API + requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller + reqUser string + resUser string +} + +func (s *DockerAuthzSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ctrl = &authorizationController{} +} + +func (s *DockerAuthzSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) + s.ctrl = nil +} + +func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) + c.Assert(err, check.IsNil) + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) + assertAuthHeaders(c, authReq.RequestHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psRequestCnt++ + } + + s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) + + reqRes := s.ctrl.reqRes + if isAllowed(authReq.RequestURI) { + reqRes = authorization.Response{Allow: true} + } + if reqRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(reqRes) + c.Assert(err, check.IsNil) + s.ctrl.reqUser = authReq.User + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) + assertAuthHeaders(c, authReq.ResponseHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psResponseCnt++ + } + resRes := s.ctrl.resRes + if isAllowed(authReq.RequestURI) { + resRes = authorization.Response{Allow: true} + } + if resRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(resRes) + c.Assert(err, check.IsNil) + s.ctrl.resUser = authReq.User + w.Write(b) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) + err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) + c.Assert(err, checker.IsNil) +} + +// check for always allowed endpoints to not inhibit test framework functions +func isAllowed(reqURI string) bool { + for _, endpoint := range alwaysAllowed { + if strings.HasSuffix(reqURI, endpoint) { + return true + } + } + return false +} + +// assertAuthHeaders validates authentication headers are removed +func assertAuthHeaders(c *check.C, headers map[string]string) error { + for k := range headers { + if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { + c.Errorf("Found authentication headers in request '%v'", headers) + } + } + return nil +} + +// assertBody asserts that body is removed for non text/json requests +func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { + if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { + //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) + c.Errorf("Body included for authentication endpoint %s", string(body)) + } + + for k, v := range headers { + if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { + return + } + } + if len(body) > 0 { + c.Errorf("Body included while it should not (Headers: '%v')", headers) + } +} + +func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // Ensure command successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { + + const testDaemonHTTPSAddr = "tcp://localhost:4271" + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + if err := s.d.Start( + "--authorization-plugin="+testAuthZPlugin, + "--tlsverify", + "--tlscacert", + "fixtures/https/ca.pem", + "--tlscert", + "fixtures/https/server-cert.pem", + "--tlskey", + "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "-H", + testDaemonHTTPSAddr, + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } + + c.Assert(s.ctrl.reqUser, check.Equals, "client") + c.Assert(s.ctrl.resUser, check.Equals, "client") +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = false + s.ctrl.reqRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden +func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + daemonURL, err := url.Parse(s.d.sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden) + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin +func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { + testRequires(c, DaemonIsLinux) + + // start the daemon and load busybox to avoid pulling busybox from Docker Hub + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) + // Add another command to to enable event pipelining + eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Assert(err, check.IsNil) + } + + observer := eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + } + + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // Create a container and wait for the creation events + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + containerID := strings.TrimSpace(out) + c.Assert(s.d.waitRun(containerID), checker.IsNil) + + events := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", events) + processor := processEventMatch(events) + go observer.Match(matcher, processor) + + // Ensure all events are received + for event, eventChannel := range events { + + select { + case <-time.After(30 * time.Second): + // Fail the test + observer.CheckEventError(c, containerID, event, matcher) + c.FailNow() + case <-eventChannel: + // Ignore, event received + } + } + + // Ensure both events and container endpoints are passed to the authorization plugin + assertURIRecorded(c, s.ctrl.requestsURIs, "/events") + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // assert plugin is only called once.. + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) { + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + tmp, err := ioutil.TempDir("", "test-authz-load-import") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + savedImagePath := filepath.Join(tmp, "save.tar") + + out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("load", "--input", savedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) + + exportedImagePath := filepath.Join(tmp, "export.tar") + + out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("import", exportedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { + c.Assert(s.d.Start("--debug", "--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + daemonURL, err := url.Parse(s.d.sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json") +} + +// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin +func assertURIRecorded(c *check.C, uris []string, uri string) { + var found bool + for _, u := range uris { + if strings.Contains(u, uri) { + found = true + break + } + } + if !found { + c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go new file mode 100644 index 0000000..49c1062 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go @@ -0,0 +1,7392 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "text/template" + "time" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { + name := "testbuildjsonemptyrun" + + _, err := buildImage( + name, + ` + FROM busybox + RUN [] + `, + true) + + if err != nil { + c.Fatal("error when dealing with a RUN statement with empty JSON array") + } + +} + +func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { + name := "testbuildshcmdjsonentrypoint" + + _, err := buildImage( + name, + ` + FROM busybox + ENTRYPOINT ["echo"] + CMD echo test + `, + true) + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + + if daemonPlatform == "windows" { + if !strings.Contains(out, "cmd /S /C echo test") { + c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out) + } + } else { + if strings.TrimSpace(out) != "/bin/sh -c echo test" { + c.Fatalf("CMD did not contain /bin/sh -c : %q", out) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { + // Windows does not support FROM scratch or the USER command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV user foo + USER ${user} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.User") + + if res != `"foo"` { + c.Fatal("User foo from environment not in Config.User on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { + name := "testbuildenvironmentreplacement" + + var volumePath string + + if daemonPlatform == "windows" { + volumePath = "c:/quux" + } else { + volumePath = "/quux" + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + ENV volume `+volumePath+` + VOLUME ${volume} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Volumes") + + var volumes map[string]interface{} + + if err := json.Unmarshal([]byte(res), &volumes); err != nil { + c.Fatal(err) + } + + if _, ok := volumes[volumePath]; !ok { + c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { + // Windows does not support FROM scratch or the EXPOSE command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV port 80 + EXPOSE ${port} + ENV ports " 99 100 " + EXPOSE ${ports} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + + var exposedPorts map[string]interface{} + + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + exp := []int{80, 99, 100} + + for _, p := range exp { + tmp := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[tmp]; !ok { + c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `, true) + + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { + name := "testbuildenvironmentreplacement" + + ctx, err := fakeContext(` + FROM `+minimalBaseImage()+` + ENV baz foo + ENV quux bar + ENV dot . + ENV fee fff + ENV gee ggg + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + ADD ${zzz:-${fee}} ${dot} + COPY ${zzz:-${gee}} ${dot} + `, + map[string]string{ + "foo": "test1", + "bar": "test2", + "fff": "test3", + "ggg": "test4", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, + ` + FROM busybox + ENV foo zzz + ENV bar ${foo} + ENV abc1='$foo' + ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" + RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) + ENV abc2="\$foo" + RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) + ENV abc3 '$foo' + RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) + ENV abc4 "\$foo" + RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) + `, true) + + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Env") + + envResult := []string{} + + if err = json.Unmarshal([]byte(res), &envResult); err != nil { + c.Fatal(err) + } + + found := false + envCount := 0 + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "zzz" { + c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "zzz" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "foo" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } + } + + if !found { + c.Fatal("Never found the `bar` env variable") + } + + if envCount != 4 { + c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + } + +} + +func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { + // The volume paths used in this test are invalid on Windows + testRequires(c, DaemonIsLinux) + name := "testbuildhandleescapes" + + _, err := buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME ${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + var result map[string]map[string]struct{} + + res := inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["bar"]; !ok { + c.Fatalf("Could not find volume bar set from env foo in volumes table, got %q", result) + } + + deleteImages(name) + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res = inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["${FOO}"]; !ok { + c.Fatalf("Could not find volume ${FOO} set from env foo in volumes table, got %q", result) + } + + deleteImages(name) + + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \\\\\\\${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res = inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result[`\\\${FOO}`]; !ok { + c.Fatalf(`Could not find volume \\\${FOO} set from env foo in volumes table, got %q`, result) + } + +} + +func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + _, err := buildImage(name, + ` + FROM busybox + onbuild run echo quux + `, true) + + if err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + FROM %s + `, name), true) + + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "quux") { + c.Fatalf("Did not receive the expected echo text, got %s", out) + } + + if strings.Contains(out, "ONBUILD ONBUILD") { + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + } + +} + +func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvescapes" + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \$ + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-t", name) + + if strings.TrimSpace(out) != "$" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvoverwrite" + + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) + + if strings.TrimSpace(out) != "bar" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatalf("did not get echo output from onbuild. Got: %q", out) + } + +} + +func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatal("got malformed output from onbuild", out) + } + +} + +func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet + name := "testbuildtwoimageswithadd" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + if _, err := buildImage(name, + fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL()), + true); err != nil { + c.Fatal(err) + } + if err != nil { + c.Fatal(err) + } + deleteImages(name) + _, out, err := buildImageWithOut(name, + fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if strings.Contains(out, "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } + +} + +func (s *DockerSuite) TestBuildLastModified(c *check.C) { + name := "testbuildlastmodified" + + server, err := fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + var out, out2 string + + dFmt := `FROM busybox +ADD %s/file /` + + dockerfile := fmt.Sprintf(dFmt, server.URL()) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + // Build it again and make sure the mtime of the file didn't change. + // Wait a few seconds to make sure the time changed enough to notice + time.Sleep(2 * time.Second) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + if out != out2 { + c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2) + } + + // Now 'touch' the file and make sure the timestamp DID change this time + // Create a new fakeStorage instead of just using Add() to help windows + server, err = fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + dockerfile = fmt.Sprintf(dFmt, server.URL()) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + if out == out2 { + c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2) + } + +} + +// Regression for https://github.com/docker/docker/pull/27805 +// Makes sure that we don't use the cache if the contents of +// a file in a subfolder of the context is modified and we re-build. +func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) { + name := "testbuildmodifyfileinfolder" + + ctx, err := fakeContext(`FROM busybox +RUN ["mkdir", "/test"] +ADD folder/file /test/changetarget`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if err := ctx.Add("folder/file", "first"); err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if err := ctx.Add("folder/file", "second"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("cache was used even though file contents in folder was changed") + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddimg" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs +func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { + name := "testaddsinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +ADD test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddsinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + name := "testcopymultiplefilestofile" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 %s/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, server.URL()), + map[string]string{ + "test_file1": "test1", + "test_file2": "test2", + "test_file3": "test3", + "test_file4": "test4", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddtonewdest" + ctx, err := fakeContext(`FROM busybox +ADD . /new_dir +RUN ls -l / +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test file", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopytonewdir" + ctx, err := fakeContext(`FROM busybox +COPY test_dir /new_dir +RUN ls -l /new_dir +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test file", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testworkdirownership" + if _, err := buildImage(name, `FROM busybox +WORKDIR /new_dir +RUN ls -l / +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently passing on Windows + name := "testaddfilewithwhitespace" + ctx, err := fakeContext(`FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +ADD [ "test file1", "/test_file1" ] +ADD [ "test_file2", "/test file2" ] +ADD [ "test file3", "/test file3" ] +ADD [ "test dir/test_file4", "/test_dir/test_file4" ] +ADD [ "test_dir/test_file5", "/test dir/test_file5" ] +ADD [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { + dockerfile := `FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]` + + if daemonPlatform == "windows" { + dockerfile = `FROM ` + WindowsBaseImage + ` +RUN mkdir "C:/test dir" +RUN mkdir "C:/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN find "test1" "C:/test_file1" +RUN find "test2" "C:/test file2" +RUN find "test3" "C:/test file3" +RUN find "test4" "C:/test_dir/test_file4" +RUN find "test5" "C:/test dir/test_file5" +RUN find "test6" "C:/test dir/test_file6"` + } + + name := "testcopyfilewithwhitespace" + ctx, err := fakeContext(dockerfile, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { + name := "testcopywildcard" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN [ "mkdir", "/tmp1" ] + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN [ "mkdir", "/tmp2" ] + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL()), + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { + name := "testcopywildcardinname" + ctx, err := fakeContext(`FROM busybox + COPY *.txt /tmp/ + RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] + `, map[string]string{"*.txt": "hi there"}) + + if err != nil { + // Normally we would do c.Fatal(err) here but given that + // the odds of this failing are so rare, it must be because + // the OS we're running the client on doesn't support * in + // filenames (like windows). So, instead of failing the test + // just let it pass. Then we don't need to explicitly + // say which OSs this works on or not. + return + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("should have built: %q", err) + } +} + +func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { + name := "testcopywildcardcache" + ctx, err := fakeContext(`FROM busybox + COPY file1.txt /tmp/`, + map[string]string{ + "file1.txt": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddsinglefiletononexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testadddircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testadddircontenttoexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddwholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #5941 +func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { + name := "testaddetctoroot" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` +ADD . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #9401 +func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddpreservesfilesspecialbits" + ctx, err := fakeContext(`FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, + map[string]string{ + "suidbin": "suidbin", + "/data/usr/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { + name := "testcopysinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +COPY test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletononexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopydircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopydircontenttoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopywholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { + name := "testcopyetctoroot" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` +COPY . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently working on Windows + + dockerfile := ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile := "foo.txt" + var ( + name = "test-link-absolute" + ) + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + var symlinkTarget string + if runtime.GOOS == "windows" { + var driveLetter string + if abs, err := filepath.Abs(tempDir); err != nil { + c.Fatal(err) + } else { + driveLetter = abs[:1] + } + tempDirWithoutDrive := tempDir[2:] + symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) + } else { + symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + } + + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + c.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + c.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { + testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { + testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows + + { + name := "testbuildinaccessiblefiles" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown file to root: %s", err) + } + if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context") + } + } + { + name := "testbuildinaccessibledirectory" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + c.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) + } + + } + { + name := "testlinksok" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { + c.Fatal(err) + } + defer os.Remove(target) + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + } + { + name := "testbuildignoredinaccessible" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", + map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + result := icmd.RunCmd(icmd.Cmd{ + Dir: ctx.Dir, + Command: []string{"su", "unprivilegeduser", "-c", + fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + }) + result.Assert(c, icmd.Expected{}) + } +} + +func (s *DockerSuite) TestBuildForceRm(c *check.C) { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + name := "testbuildforcerm" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + RUN true + RUN thiswillfail`, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--force-rm shouldn't have left containers behind") + } + +} + +func (s *DockerSuite) TestBuildRm(c *check.C) { + name := "testbuildrm" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + ADD foo / + ADD foo /`, map[string]string{"foo": "bar"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + c.Fatalf("--rm=false should have left containers behind") + } + deleteImages(name) + + } + +} + +func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { + testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + _, err := buildImage(name, + `FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Volumes") + + err = json.Unmarshal([]byte(res), &result) + if err != nil { + c.Fatal(err) + } + + equal := reflect.DeepEqual(&result, &expected) + + if !equal { + c.Fatalf("Volumes %s, expected %s", result, expected) + } + +} + +func (s *DockerSuite) TestBuildMaintainer(c *check.C) { + name := "testbuildmaintainer" + + expected := "dockerio" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != expected { + c.Fatalf("Maintainer %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildUser(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + expected := "dockerio" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.User") + if res != expected { + c.Fatalf("User %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { + name := "testbuildrelativeworkdir" + + var ( + expected1 string + expected2 string + expected3 string + expected4 string + expectedFinal string + ) + + if daemonPlatform == "windows" { + expected1 = `C:/` + expected2 = `C:/test1` + expected3 = `C:/test2` + expected4 = `C:/test2/test3` + expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox + } else { + expected1 = `/` + expected2 = `/test1` + expected3 = `/test2` + expected4 = `/test2/test3` + expectedFinal = `/test2/test3` + } + + _, err := buildImage(name, + `FROM busybox + RUN sh -c "[ "$PWD" = "`+expected1+`" ]" + WORKDIR test1 + RUN sh -c "[ "$PWD" = "`+expected2+`" ]" + WORKDIR /test2 + RUN sh -c "[ "$PWD" = "`+expected3+`" ]" + WORKDIR test3 + RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.WorkingDir") + if res != expectedFinal { + c.Fatalf("Workdir %s, expected %s", res, expectedFinal) + } +} + +// #22181 Regression test. Single end-to-end test of using +// Windows semantics. Most path handling verifications are in unit tests +func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsworkdirprocessing" + _, err := buildImage(name, + `FROM busybox + WORKDIR C:\\foo + WORKDIR bar + RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" + `, + true) + if err != nil { + c.Fatal(err) + } +} + +// #22181 Regression test. Most paths handling verifications are in unit test. +// One functional test for end-to-end +func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsaddcopypathprocessing" + // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to + // support backslash such as .\\ being equivalent to ./ and c:\\ being + // equivalent to c:/. This is not currently (nor ever has been) supported + // by docker on the Windows platform. + dockerfile := ` + FROM busybox + # No trailing slash on COPY/ADD + # Results in dir being changed to a file + WORKDIR /wc1 + COPY wc1 c:/wc1 + WORKDIR /wc2 + ADD wc2 c:/wc2 + WORKDIR c:/ + RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]" + RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]" + + # Trailing slash on COPY/ADD, Windows-style path. + WORKDIR /wd1 + COPY wd1 c:/wd1/ + WORKDIR /wd2 + ADD wd2 c:/wd2/ + RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" + RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" + ` + ctx, err := fakeContext(dockerfile, map[string]string{ + "wc1": "hellowc1", + "wc2": "worldwc2", + "wd1": "hellowd1", + "wd2": "worldwd2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { + name := "testbuildworkdirwithenvvariables" + + var expected string + if daemonPlatform == "windows" { + expected = `C:\test1\test2` + } else { + expected = `/test1/test2` + } + + _, err := buildImage(name, + `FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.WorkingDir") + if res != expected { + c.Fatalf("Workdir %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { + // cat /test1/test2/foo gets permission denied for the user + testRequires(c, NotUserNamespace) + + var expected string + if daemonPlatform == "windows" { + expected = `C:/test1/test2` + } else { + expected = `/test1/test2` + } + + name := "testbuildrelativecopy" + dockerfile := ` + FROM busybox + WORKDIR /test1 + WORKDIR test2 + RUN sh -c "[ "$PWD" = '` + expected + `' ]" + COPY foo ./ + RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" + ADD foo ./bar/baz + RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" + COPY foo ./bar/baz2 + RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" + WORKDIR .. + COPY foo ./ + RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" + COPY foo /test3/ + RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" + WORKDIR /test4 + COPY . . + RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" + WORKDIR /test5/test6 + COPY foo ../ + RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" + ` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildBlankName(c *check.C) { + name := "testbuildblankname" + _, _, stderr, err := buildImageWithStdoutStderr(name, + `FROM busybox + ENV =`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "ENV names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } + + _, _, stderr, err = buildImageWithStdoutStderr(name, + `FROM busybox + LABEL =`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "LABEL names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } + + _, _, stderr, err = buildImageWithStdoutStderr(name, + `FROM busybox + ARG =foo`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "ARG names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } +} + +func (s *DockerSuite) TestBuildEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + _, err := buildImage(name, + `FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Env") + if res != expected { + c.Fatalf("Env %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildPATH(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + + defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + + fn := func(dockerfile string, exp string) { + _, err := buildImage("testbldpath", dockerfile, true) + c.Assert(err, check.IsNil) + + res := inspectField(c, "testbldpath", "Config.Env") + + if res != exp { + c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile) + } + } + + tests := []struct{ dockerfile, exp string }{ + {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, + {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, + {"FROM scratch\nENV PATH=''", "[PATH=]"}, + {"FROM busybox\nENV PATH=''", "[PATH=]"}, + } + + for _, test := range tests { + fn(test.dockerfile, test.exp) + } +} + +func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + RUN /non/existing/command`, + true) + if err == nil { + c.Fatalf("expected build to fail, but it didn't") + } + entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildCmd(c *check.C) { + name := "testbuildcmd" + + expected := "[/bin/echo Hello World]" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + CMD ["/bin/echo", "Hello World"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + if res != expected { + c.Fatalf("Cmd %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExpose(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexpose" + expected := "map[2375/tcp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + // start building docker file with a large number of ports + portList := make([]string, 50) + line := make([]string, 100) + expectedPorts := make([]int, len(portList)*len(line)) + for i := 0; i < len(portList); i++ { + for j := 0; j < len(line); j++ { + p := i*len(line) + j + 1 + line[j] = strconv.Itoa(p) + expectedPorts[p-1] = p + } + if i == len(portList)-1 { + portList[i] = strings.Join(line, " ") + } else { + portList[i] = strings.Join(line, " ") + ` \` + } + } + + dockerfile := `FROM scratch + EXPOSE {{range .}} {{.}} + {{end}}` + tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) + buf := bytes.NewBuffer(nil) + tmpl.Execute(buf, portList) + + name := "testbuildexpose" + _, err := buildImage(name, buf.String(), true) + if err != nil { + c.Fatal(err) + } + + // check if all the ports are saved inside Config.ExposedPorts + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + var exposedPorts map[string]interface{} + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + for _, p := range expectedPorts { + ep := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[ep]; !ok { + c.Errorf("Port(%s) is not exposed", ep) + } else { + delete(exposedPorts, ep) + } + } + if len(exposedPorts) != 0 { + c.Errorf("Unexpected extra exposed ports %v", exposedPorts) + } +} + +func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + buildID := func(name, exposed string) string { + _, err := buildImage(name, fmt.Sprintf(`FROM scratch + EXPOSE %s`, exposed), true) + if err != nil { + c.Fatal(err) + } + id := inspectField(c, name, "Id") + return id + } + + id1 := buildID("testbuildexpose1", "80 2375") + id2 := buildID("testbuildexpose2", "2375 80") + if id1 != id2 { + c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") + } +} + +func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexposeuppercaseproto" + expected := "map[5678/udp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 5678/UDP`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + + expected := "[/bin/echo]" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + _, err = buildImage(name2, + fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name), + true) + if err != nil { + c.Fatal(err) + } + res = inspectField(c, name2, "Config.Entrypoint") + + expected = "[]" + + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { + name := "testbuildentrypoint" + expected := "[]" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT []`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { + name := "testbuildentrypoint" + + expected := "[/bin/echo]" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { + var ( + out2, out3 string + ) + { + name1 := "testonbuildtrigger1" + dockerfile1 := ` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + ` + ctx, err := fakeContext(dockerfile1, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out1, err) + } + } + { + name2 := "testonbuildtrigger2" + dockerfile2 := ` + FROM testonbuildtrigger1 + ` + ctx, err := fakeContext(dockerfile2, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out2, err) + } + } + { + name3 := "testonbuildtrigger3" + dockerfile3 := ` + FROM testonbuildtrigger2 + ` + ctx, err := fakeContext(dockerfile3, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out3, err) + } + + } + + // ONBUILD should be run in second build. + if !strings.Contains(out2, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") + } + + // ONBUILD should *not* be run in third build. + if strings.Contains(out3, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } + +} + +func (s *DockerSuite) TestBuildWithCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithcache" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithoutcache" + name2 := "testbuildwithoutcache2" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImage(name2, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { + name := "testbuildconditionalcache" + + dockerfile := ` + FROM busybox + ADD foo /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #1: %s", err) + } + + if err := ctx.Add("foo", "bye"); err != nil { + c.Fatalf("Error modifying foo: %s", err) + } + + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatalf("Error building #2: %s", err) + } + if id2 == id1 { + c.Fatal("Should not have used the cache") + } + + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #3: %s", err) + } + if id3 != id2 { + c.Fatal("Should have used the cache") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { + // local files are not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddlocalfilewithcache" + name2 := "testbuildaddlocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { + name := "testbuildaddmultiplelocalfilewithcache" + name2 := "testbuildaddmultiplelocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { + // local files are not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddlocalfilewithoutcache" + name2 := "testbuildaddlocalfilewithoutcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { + name := "testbuildcopydirbutnotfile" + name2 := "testbuildcopydirbutnotfile2" + + dockerfile := ` + FROM ` + minimalBaseImage() + ` + COPY dir /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "dir/foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { + name := "testbuildaddcurrentdirwithcache" + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id3, err := buildImageFromContext(name3, ctx, true) + if err != nil { + c.Fatal(err) + } + if id2 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content with different mtime does not + // invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id4, err := buildImageFromContext(name4, ctx, true) + if err != nil { + c.Fatal(err) + } + if id3 != id4 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { + name := "testbuildaddcurrentdirwithoutcache" + name2 := "testbuildaddcurrentdirwithoutcache2" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { + name := "testbuildaddremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { + name := "testbuildaddremotefilewithoutcache" + name2 := "testbuildaddremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name2, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { + name := "testbuildaddremotefilemtime" + name2 := name + "2" + name3 := name + "3" + + files := map[string]string{"baz": "hello"} + server, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't - #1") + } + + // Now create a different server with same contents (causes different mtime) + // The cache should still be used + + // allow some time for clock to pass as mtime precision is only 1s + time.Sleep(2 * time.Second) + + server2, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server2.Close() + + ctx2, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx2.Close() + id3, err := buildImageFromContext(name3, ctx2, true) + if err != nil { + c.Fatal(err) + } + if id1 != id3 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func testContextTar(c *check.C, compression archive.Compression) { + ctx, err := fakeContext( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`, + map[string]string{ + "foo": "bar", + }, + ) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + c.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = context + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } +} + +func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { + testContextTar(c, archive.Gzip) +} + +func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { + testContextTar(c, archive.Uncompressed) +} + +func (s *DockerSuite) TestBuildNoContext(c *check.C) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") + buildCmd.Stdin = strings.NewReader( + `FROM busybox + CMD ["echo", "ok"]`) + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } + + if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { + c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } +} + +// TODO: TestCaching +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithoutcache" + name2 := "testbuildaddlocalandremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalidated but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildimg" + + _, err := buildImage(name, + `FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + + if expected := "drw-------"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + + if expected := "daemon daemon"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { + name := "testbuildcmdcleanup" + if _, err := buildImage(name, + `FROM busybox + RUN echo "hello"`, + true); err != nil { + c.Fatal(err) + } + + ctx, err := fakeContext(`FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + // Cmd must be cleaned up + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } +} + +func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { + name := "testbuildaddnotfound" + expected := "foo: no such file or directory" + + if daemonPlatform == "windows" { + expected = "foo: The system cannot find the file specified" + } + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + ADD foo /usr/local/bar`, + map[string]string{"bar": "hello"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + if !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildInheritance(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildinheritance" + + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + ports1 := inspectField(c, name, "Config.ExposedPorts") + + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name), + true) + if err != nil { + c.Fatal(err) + } + + res := inspectField(c, name, "Config.Entrypoint") + if expected := "[/bin/echo]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2 := inspectField(c, name, "Config.ExposedPorts") + if ports1 != ports2 { + c.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } +} + +func (s *DockerSuite) TestBuildFails(c *check.C) { + name := "testbuildfails" + _, err := buildImage(name, + `FROM busybox + RUN sh -c "exit 23"`, + true) + if err != nil { + if !strings.Contains(err.Error(), "returned a non-zero code: 23") { + c.Fatalf("Wrong error %v, must be about non-zero code 23", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildOnBuild(c *check.C) { + name := "testbuildonbuild" + _, err := buildImage(name, + `FROM busybox + ONBUILD RUN touch foobar`, + true) + if err != nil { + c.Fatal(err) + } + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name), + true) + if err != nil { + c.Fatal(err) + } +} + +// gh #2446 +func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { + makeLink := `ln -s /foo /bar` + if daemonPlatform == "windows" { + makeLink = `mklink /D C:\bar C:\foo` + } + name := "testbuildaddtosymlinkdest" + ctx, err := fakeContext(`FROM busybox + RUN sh -c "mkdir /foo" + RUN `+makeLink+` + ADD foo /bar/ + RUN sh -c "[ -f /bar/foo ]" + RUN sh -c "[ -f /foo/foo ]"`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { + name := "testbuildescapewhitespace" + + _, err := buildImage(name, ` + # ESCAPE=\ + FROM busybox + MAINTAINER "Docker \ +IO " + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectField(c, name, "Author") + + if res != "\"Docker IO \"" { + c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) + } + +} + +func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { + // Verify that strings that look like ints are still passed as strings + name := "testbuildstringing" + + _, err := buildImage(name, ` + FROM busybox + MAINTAINER 123 + `, true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "inspect", name) + + if !strings.Contains(out, "\"123\"") { + c.Fatalf("Output does not contain the int as a string:\n%s", out) + } + +} + +func (s *DockerSuite) TestBuildDockerignore(c *check.C) { + name := "testbuilddockerignore" + dockerfile := ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ ! -e /bla/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ ! -e v.cc ]]" + RUN sh -c "[[ ! -e src/v.cc ]]" + RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "src/_vendor/v.cc": "package main", + "src/v.cc": "package main", + "v.cc": "package main", + "dir/foo": "", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +**/*.cc +dir`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { + name := "testbuilddockerignoreexceptions" + dockerfile := ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ -e /bla/dir/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/dir/foo1 ]]" + RUN sh -c "[[ -f /bla/dir/e ]]" + RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ -e /bla/dir/a.cc ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "dir/foo": "", + "dir/foo1": "", + "dir/dir/f1": "", + "dir/dir/foo": "", + "dir/e": "", + "dir/e-dir/foo": "", + ".gitignore": "", + "README.md": "readme", + "dir/a.cc": "hello", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +dir +!dir/e* +!dir/dir/foo +**/*.cc +!**/*.cc`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) + } + + // now try it with ./Dockerfile + ctx.Add(".dockerignore", "./Dockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls /tmp/Dockerfile + RUN sh -c "! ls /tmp/MyDockerfile" + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "Should not use me", + "MyDockerfile": dockerfile, + ".dockerignore": "MyDockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) + } + + // now try it with ./MyDockerfile + ctx.Add(".dockerignore", "./MyDockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { + name := "testbuilddockerignoredockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/.dockerignore" + RUN ls /tmp/Dockerfile` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": ".dockerignore\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { + var id1 string + var id2 string + + name := "testbuilddockerignoretouchdockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if id1, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 1") + } + + // Now make sure touching Dockerfile doesn't invalidate the cache + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 2") + } + + // One more time but just 'touch' it instead of changing the content + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 3") + } + +} + +func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { + name := "testbuilddockerignorewholedir" + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": ".*\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { + name := "testbuilddockerignorebadexclusion" + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": "!\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err == nil { + c.Fatalf("Build was supposed to fail but didn't") + } + + if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { + c.Fatalf("Incorrect output, got:%q", err.Error()) + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.dockerignore ]]" + RUN sh -c "[[ ! -e /Dockerfile ]]" + RUN sh -c "[[ ! -e /file1 ]]" + RUN sh -c "[[ ! -e /dir ]]"` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "file1": "", + "dir/dfile1": "", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + // All of these should result in ignoring all files + for _, variant := range []string{"**", "**/", "**/**", "*"} { + ctx.Add(".dockerignore", variant) + _, err = buildImageFromContext("noname", ctx, true) + c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant)) + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + #RUN sh -c "[[ -e /.dockerignore ]]" + RUN sh -c "[[ -e /Dockerfile ]] && \ + [[ ! -e /file0 ]] && \ + [[ ! -e /dir1/file0 ]] && \ + [[ ! -e /dir2/file0 ]] && \ + [[ ! -e /file1 ]] && \ + [[ ! -e /dir1/file1 ]] && \ + [[ ! -e /dir1/dir2/file1 ]] && \ + [[ ! -e /dir1/file2 ]] && \ + [[ -e /dir1/dir2/file2 ]] && \ + [[ ! -e /dir1/dir2/file4 ]] && \ + [[ ! -e /dir1/dir2/file5 ]] && \ + [[ ! -e /dir1/dir2/file6 ]] && \ + [[ ! -e /dir1/dir3/file7 ]] && \ + [[ ! -e /dir1/dir3/file8 ]] && \ + [[ -e /dir1/dir3 ]] && \ + [[ -e /dir1/dir4 ]] && \ + [[ ! -e 'dir1/dir5/fileAA' ]] && \ + [[ -e 'dir1/dir5/fileAB' ]] && \ + [[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing + + RUN echo all done!` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "file0": "", + "dir1/file0": "", + "dir1/dir2/file0": "", + + "file1": "", + "dir1/file1": "", + "dir1/dir2/file1": "", + + "dir1/file2": "", + "dir1/dir2/file2": "", // remains + + "dir1/dir2/file4": "", + "dir1/dir2/file5": "", + "dir1/dir2/file6": "", + "dir1/dir3/file7": "", + "dir1/dir3/file8": "", + "dir1/dir4/file9": "", + + "dir1/dir5/fileAA": "", + "dir1/dir5/fileAB": "", + "dir1/dir5/fileB": "", + + ".dockerignore": ` +**/file0 +**/*file1 +**/dir1/file2 +dir1/**/file4 +**/dir2/file5 +**/dir1/dir2/file6 +dir1/dir3/** +**/dir4/** +**/file?A +**/file\?B +**/dir5/file. +`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext("noname", ctx, true) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestBuildLineBreak(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildlinebreak" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildeolinline" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcomments" + _, err := buildImage(name, + `FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildusers" + _, err := buildImage(name, + `FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ + echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ + # Add a "supplementary" group for our dockerio user + echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage" + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc=def +ENV ghi=$abc +RUN [ "$ghi" = "def" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage2" + dockerfile := `FROM busybox +ENV abc=def def="hello world" +RUN [ "$abc,$def" = "def,hello world" ] +ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too" +RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$abc,$(cat $TO)" = "zzz,hello" ] +ENV abc 'yyy' +RUN [ $abc = 'yyy' ] +ENV abc= +RUN [ "$abc" = "" ] + +# use grep to make sure if the builder substitutes \$foo by mistake +# we don't get a false positive +ENV abc=\$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) +ENV abc \$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) + +ENV abc=\'foo\' abc2=\"foo\" +RUN [ "$abc,$abc2" = "'foo',\"foo\"" ] +ENV abc "foo" +RUN [ "$abc" = "foo" ] +ENV abc 'foo' +RUN [ "$abc" = 'foo' ] +ENV abc \'foo\' +RUN [ "$abc" = "'foo'" ] +ENV abc \"foo\" +RUN [ "$abc" = '"foo"' ] + +ENV abc=ABC +RUN [ "$abc" = "ABC" ] +ENV def1=${abc:-DEF} def2=${ccc:-DEF} +ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:} +RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ] +ENV mypath=${mypath:+$mypath:}/home +ENV mypath=${mypath:+$mypath:}/away +RUN [ "$mypath" = '/home:/away' ] + +ENV e1=bar +ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11 +RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] + +ENV ee1 bar +ENV ee2 $ee1 +ENV ee3 $ee11 +ENV ee4 \$ee1 +ENV ee5 \$ee11 +RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] + +ENV eee1="foo" eee2='foo' +ENV eee3 "foo" +ENV eee4 'foo' +RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] + +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddScript(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddscript" + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "test": "#!/bin/sh\necho 'test!' > /testfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddTar(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddtar" + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { + name := "testbuildaddbrokentar" + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar /` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + // Corrupt the tar by removing one byte off the end + stat, err := testTar.Stat() + if err != nil { + c.Fatalf("failed to stat tar archive: %v", err) + } + if err := testTar.Truncate(stat.Size() - 1); err != nil { + c.Fatalf("failed to truncate tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err == nil { + c.Fatalf("build should have failed for TestBuildAddBrokenTar") + } +} + +func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { + name := "testbuildaddnontar" + + // Should not try to extract test.tar + ctx, err := fakeContext(` + FROM busybox + ADD test.tar / + RUN test -f /test.tar`, + map[string]string{"test.tar": "not_a_tar_file"}) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed for TestBuildAddNonTar") + } +} + +func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxzgz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + gzipCompressCmd := exec.Command("gzip", "test.tar.xz") + gzipCompressCmd.Dir = tmpDir + out, _, err = runCommandWithOutput(gzipCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildFromGit(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + _, err = buildImageFromPath(name, git.RepoURL, true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "docker/Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "docker/first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + u := fmt.Sprintf("%s#master:docker", git.RepoURL) + _, err = buildImageFromPath(name, u, true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) { + name := "testbuildfromgitwithf" + git, err := newFakeGit("repo", map[string]string{ + "myApp/myDockerfile": `FROM busybox + RUN echo hi from Dockerfile`, + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL) + if err != nil { + c.Fatalf("Error on build. Out: %s\nErr: %v", out, err) + } + + if !strings.Contains(out, "hi from Dockerfile") { + c.Fatalf("Missing expected output, got:\n%s", out) + } +} + +func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { + name := "testbuildfromremotetarball" + + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + MAINTAINER docker`) + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, check.IsNil) + + defer server.Close() + + _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) + c.Assert(err, check.IsNil) + + res := inspectField(c, name, "Author") + + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { + name := "testbuildcmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + CMD ["test"] + ENTRYPOINT ["echo"]`, + true); err != nil { + c.Fatal(err) + } + if _, err := buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name), + true); err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } + + res = inspectField(c, name, "Config.Entrypoint") + if expected := "[cat]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildClearCmd(c *check.C) { + name := "testbuildclearcmd" + _, err := buildImage(name, + `From `+minimalBaseImage()+` + ENTRYPOINT ["/bin/bash"] + CMD []`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected %s", res, "[]") + } +} + +func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + name := "testbuildemptycmd" + if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "null" { + c.Fatalf("Cmd %s, expected %s", res, "null") + } +} + +func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { + name := "testbuildonbuildparent" + if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "# Executing 1 build trigger") { + c.Fatal("failed to find the build trigger output", out) + } +} + +func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { + name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) + _, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true) + // if the error doesn't check for illegal tag name, or the image is built + // then this should fail + if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") { + c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) + } +} + +func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { + name := "testbuildcmdshc" + if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Cmd") + + expected := `["/bin/sh","-c","echo cmd"]` + if daemonPlatform == "windows" { + expected = `["cmd","/S","/C","echo cmd"]` + } + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { + // Test to make sure that when we strcat arrays we take into account + // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't + // look the same + name := "testbuildcmdspaces" + var id1 string + var id2 string + var err error + + if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same CMD") + } + + // Now do the same with ENTRYPOINT + if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same ENTRYPOINT") + } + +} + +func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { + name := "testbuildcmdjson" + if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Cmd") + + expected := `["echo","cmd"]` + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { + + if _, err := buildImage("parent", ` + FROM busybox + ENTRYPOINT exit 130 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 { + c.Fatalf("expected exit code 130 but received %d", status) + } + + if _, err := buildImage("child", ` + FROM parent + ENTRYPOINT exit 5 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError("run", "child"); status != 5 { + c.Fatalf("expected exit code 5 but received %d", status) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + if daemonPlatform == "windows" { + expected = `["cmd","/S","/C","echo quux"]` + } + + if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { + c.Fatal(err) + } + + if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name2, "Config.Entrypoint") + + if res != expected { + c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + out, _ := dockerCmd(c, "run", name2) + + expected = "quux" + + if strings.TrimSpace(out) != expected { + c.Fatalf("Expected output is %s, got %s", expected, out) + } + +} + +func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { + name := "testbuildentrypoint" + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT echo`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--rm", name) +} + +func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildexoticshellinterpolation" + + _, err := buildImage(name, ` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `, false) + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" instead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + + if _, err := buildImage(name, + `FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`, + true); err != nil { + c.Fatal(err) + } + + if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil { + c.Fatal("The image was not supposed to be able to run") + } + +} + +func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { + name := "testbuildverboseout" + expected := "\n123\n" + + if daemonPlatform == "windows" { + expected = "\n123\r\n" + } + + _, out, err := buildImageWithOut(name, + `FROM busybox +RUN echo 123`, + false) + + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q: %q", "123", out) + } + +} + +func (s *DockerSuite) TestBuildWithTabs(c *check.C) { + name := "testbuildwithtabs" + _, err := buildImage(name, + "FROM busybox\nRUN echo\tone\t\ttwo", true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` + expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + if daemonPlatform == "windows" { + expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` + expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + } + if res != expected1 && res != expected2 { + c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) + } +} + +func (s *DockerSuite) TestBuildLabels(c *check.C) { + name := "testbuildlabel" + expected := `{"License":"GPL","Vendor":"Acme"}` + _, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme + LABEL License GPL`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { + name := "testbuildlabelcache" + + id1, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, false) + if err != nil { + c.Fatalf("Build 1 should have worked: %v", err) + } + + id2, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, true) + if err != nil || id1 != id2 { + c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor=Acme1`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor Acme`, true) // Note: " " and "=" should be same + if err != nil || id1 != id2 { + c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + // Now make sure the cache isn't used by mistake + id1, err = buildImage(name, + `FROM busybox + LABEL f1=b1 f2=b2`, false) + if err != nil { + c.Fatalf("Build 5 should have worked: %q", err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL f1="b1 f2=b2"`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) + } + +} + +func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { + // This test makes sure that -q works correctly when build is successful: + // stdout has only the image ID (long image ID) and stderr is empty. + var stdout, stderr string + var err error + outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") + + tt := []struct { + Name string + BuildFunc func(string) + }{ + { + Name: "quiet_build_stdin_success", + BuildFunc: func(name string) { + _, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm") + }, + }, + { + Name: "quiet_build_ctx_success", + BuildFunc: func(name string) { + ctx, err := fakeContext("FROM busybox", map[string]string{ + "quiet_build_success_fctx": "test", + }) + if err != nil { + c.Fatalf("Failed to create context: %s", err.Error()) + } + defer ctx.Close() + _, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm") + }, + }, + { + Name: "quiet_build_git_success", + BuildFunc: func(name string) { + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": "FROM busybox", + }, true) + if err != nil { + c.Fatalf("Failed to create the git repo: %s", err.Error()) + } + defer git.Close() + _, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm") + + }, + }, + } + + for _, te := range tt { + te.BuildFunc(te.Name) + if err != nil { + c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error()) + } + if outRegexp.Find([]byte(stdout)) == nil { + c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout) + } + + if stderr != "" { + c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr) + } + } + +} + +func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + testRequires(c, Network) + testName := "quiet_build_not_exists_image" + buildCmd := "FROM busybox11" + _, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm") + _, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm") + if verr == nil || qerr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName)) + } + if qstderr != vstdout+vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr)) + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + tt := []struct { + TestName string + BuildCmds string + }{ + {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, + {"quiet_build_unknown_instr", "FROMD busybox"}, + } + + for _, te := range tt { + _, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm") + _, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm") + if verr == nil || qerr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName)) + } + if qstderr != vstdout+vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr)) + } + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { + // This test ensures that when given a wrong URL, stderr in quiet mode and + // stderr in verbose mode are identical. + // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout + URL := "http://something.invalid" + Name := "quiet_build_wrong_remote" + _, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL) + _, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL) + if qerr == nil || verr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name)) + } + if qstderr != vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr)) + } +} + +func (s *DockerSuite) TestBuildStderr(c *check.C) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + _, _, stderr, err := buildImageWithStdoutStderr(name, + "FROM busybox\nRUN echo one", true) + if err != nil { + c.Fatal(err) + } + + if runtime.GOOS == "windows" && + daemonPlatform != "windows" { + // Windows to non-Windows should have a security warning + if !strings.Contains(stderr, "SECURITY WARNING:") { + c.Fatalf("Stderr contains unexpected output: %q", stderr) + } + } else { + // Other platform combinations should have no stderr written too + if stderr != "" { + c.Fatalf("Stderr should have been empty, instead it's: %q", stderr) + } + } +} + +func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { + testRequires(c, UnixCli) // test uses chown: not available on windows + testRequires(c, DaemonIsLinux) + + name := "testbuildchownsinglefile" + + ctx, err := fakeContext(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`, map[string]string{ + "test": "test", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + c.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + c.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + c.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { + c.Fatal(err) + } + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + c.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + c.Fatalf("unexpected error: %v", err) + } +} + +func (s *DockerSuite) TestBuildXZHost(c *check.C) { + // /usr/local/sbin/xz gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildxzhost" + + ctx, err := fakeContext(` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`, + map[string]string{ + "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", + "xz": "#!/bin/sh\ntouch /injected", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { + // /foo/file gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127 + var ( + name = "testbuildvolumescontent" + expected = "some text" + volName = "/foo" + ) + + if daemonPlatform == "windows" { + volName = "C:/foo" + } + + ctx, err := fakeContext(` +FROM busybox +COPY content /foo/file +VOLUME `+volName+` +CMD cat /foo/file`, + map[string]string{ + "content": expected, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, false); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + if out != expected { + c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) + } + +} + +func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { + + ctx, err := fakeContext(`FROM busybox + RUN echo from Dockerfile`, + map[string]string{ + "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", + "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", + "files/dFile": "FROM busybox\nRUN echo from files/dFile", + "dFile": "FROM busybox\nRUN echo from dFile", + "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test1 should have used Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/dFile") { + c.Fatalf("test3 should have used files/dFile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from dFile") { + c.Fatalf("test4 should have used dFile, output:%s", out) + } + + dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") + c.Assert(err, check.IsNil) + nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") + if _, err = os.Create(nonDockerfileFile); err != nil { + c.Fatal(err) + } + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") + + if err == nil { + c.Fatalf("test5 was supposed to fail to find passwd") + } + + if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { + c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") + if err != nil { + c.Fatalf("test6 failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test6 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") + if err != nil { + c.Fatalf("test7 failed: %s", err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test7 should have used files Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") + if err == nil || !strings.Contains(out, "must be within the build context") { + c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) + } + + tmpDir := os.TempDir() + out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) + if err != nil { + c.Fatalf("test9 - failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test9 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") + if err != nil { + c.Fatalf("test10 should have worked: %s", err) + } + if !strings.Contains(out, "from files/dFile2") { + c.Fatalf("test10 should have used files/dFile2, output:%s", out) + } + +} + +func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + ctx, err := fakeContext(`FROM busybox + RUN echo from dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { + server, err := fakeStorage(map[string]string{"baz": `FROM busybox +RUN echo from baz +COPY * /tmp/ +RUN find /tmp/`}) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why + ctx, err := fakeContext(`FROM busybox +RUN echo "from Dockerfile"`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") + dockerCommand.Dir = ctx.Dir + dockerCommand.Stdin = strings.NewReader(`FROM busybox +RUN echo "from baz" +COPY * /tmp/ +RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) + out, status, err := runCommandWithOutput(dockerCommand) + if err != nil || status != 0 { + c.Fatalf("Error building: %s", err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { + name := "testbuildfromofficial" + fromNames := []string{ + "busybox", + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + for idx, fromName := range fromNames { + imgName := fmt.Sprintf("%s%d", name, idx) + _, err := buildImage(imgName, "FROM "+fromName, true) + if err != nil { + c.Errorf("Build failed using FROM %s: %s", fromName, err) + } + deleteImages(imgName) + } +} + +func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { + testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerfileoutsidecontext" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { + c.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(wd) + if err := os.Chdir(ctx); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { + c.Fatal(err) + } + + for _, dockerfilePath := range []string{ + filepath.Join("..", "outsideDockerfile"), + filepath.Join(ctx, "dockerfile1"), + filepath.Join(ctx, "dockerfile2"), + } { + result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") + c.Assert(result, icmd.Matches, icmd.Expected{ + Err: "must be within the build context", + ExitCode: 1, + }) + deleteImages(name) + } + + os.Chdir(tmpdir) + + // Path to Dockerfile should be resolved relative to working directory, not relative to context. + // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) + if err == nil { + c.Fatalf("Expected error. Out: %s", out) + } +} + +func (s *DockerSuite) TestBuildSpaces(c *check.C) { + // Test to make sure that leading/trailing spaces on a command + // doesn't change the error msg we get + var ( + err1 error + err2 error + ) + + name := "testspaces" + ctx, err := fakeContext("FROM busybox\nCOPY\n", + map[string]string{ + "Dockerfile": "FROM busybox\nCOPY\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { + c.Fatal("Build 1 was supposed to fail, but didn't") + } + + ctx.Add("Dockerfile", "FROM busybox\nCOPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 2 was supposed to fail, but didn't") + } + + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) + } + + // Skip over the times + e1 := removeLogTimestamps(err1.Error()) + e2 := removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 3 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 4 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) + } + +} + +func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { + // Test to make sure that spaces in quotes aren't lost + name := "testspacesquotes" + + dockerfile := `FROM busybox +RUN echo " \ + foo "` + + _, out, err := buildImageWithOut(name, dockerfile, false) + if err != nil { + c.Fatal("Build failed:", err) + } + + expecting := "\n foo \n" + // Windows uses the builtin echo, which preserves quotes + if daemonPlatform == "windows" { + expecting = "\" foo \"" + } + if !strings.Contains(out, expecting) { + c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) + } + +} + +// #4393 +func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This should error out + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") + buildCmd.Stdin = strings.NewReader(` + FROM busybox + RUN touch /foo + VOLUME /foo + `) + + out, _, err := runCommandWithOutput(buildCmd) + if err == nil || !strings.Contains(out, "file exists") { + c.Fatalf("expected build to fail when file exists in container at requested volume path") + } + +} + +func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { + // Test to make sure that all Dockerfile commands (except the ones listed + // in skipCmds) will generate an error if no args are provided. + // Note: INSERT is deprecated so we exclude it because of that. + skipCmds := map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + } + + if daemonPlatform == "windows" { + skipCmds = map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + "STOPSIGNAL": {}, + "ARG": {}, + "USER": {}, + "EXPOSE": {}, + } + } + + for cmd := range command.Commands { + cmd = strings.ToUpper(cmd) + if _, ok := skipCmds[cmd]; ok { + continue + } + + var dockerfile string + if cmd == "FROM" { + dockerfile = cmd + } else { + // Add FROM to make sure we don't complain about it missing + dockerfile = "FROM busybox\n" + cmd + } + + ctx, err := fakeContext(dockerfile, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + var out string + if out, err = buildImageFromContext("args", ctx, true); err == nil { + c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) + } + if !strings.Contains(err.Error(), cmd+" requires") { + c.Fatalf("%s returned the wrong type of error:%s", cmd, err) + } + } + +} + +func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + _, out, err := buildImageWithOut("sc", "FROM scratch", true) + if err == nil { + c.Fatalf("Build was supposed to fail") + } + if !strings.Contains(out, "No image was generated") { + c.Fatalf("Wrong error message: %v", out) + } +} + +func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { + ctx, err := fakeContext("FROM busybox\n", + map[string]string{ + "..gitme": "", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext("sc", ctx, false); err != nil { + c.Fatalf("Build was supposed to work: %s", err) + } +} + +func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { + testRequires(c, DaemonIsLinux) // No hello-world Windows image + name := "testbuildrunonejson" + + ctx, err := fakeContext(`FROM hello-world:frozen +RUN [ "/hello" ]`, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") + if err != nil { + c.Fatalf("failed to build the image: %s, %v", out, err) + } + + if !strings.Contains(out, "Hello from Docker") { + c.Fatalf("bad output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { + name := "testbuildemptystringvolume" + + _, err := buildImage(name, ` + FROM busybox + ENV foo="" + VOLUME $foo + `, false) + if err == nil { + c.Fatal("Should have failed to build") + } + +} + +func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + data, err := ioutil.ReadFile("/proc/self/cgroup") + if err != nil { + c.Fatalf("failed to read '/proc/self/cgroup - %v", err) + } + selfCgroupPaths := parseCgroupPaths(string(data)) + _, found := selfCgroupPaths["memory"] + if !found { + c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) + } + cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") + cmd.Stdin = strings.NewReader(` +FROM busybox +RUN cat /proc/self/cgroup +`) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out) + c.Assert(err, check.IsNil) + if !m { + c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out) + } +} + +func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { + // Check to make sure our build output prints the Dockerfile cmd + // property - there was a bug that caused it to be duplicated on the + // Step X line + name := "testbuildnodupoutput" + + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN env`, false) + if err != nil { + c.Fatalf("Build should have worked: %q", err) + } + + exp := "\nStep 2/2 : RUN env\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +// GH15826 +func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { + // Explicit check to ensure that build starts from step 1 rather than 0 + name := "testbuildstartsfromone" + + _, out, err := buildImageWithOut(name, ` + FROM busybox`, false) + if err != nil { + c.Fatalf("Build should have worked: %q", err) + } + + exp := "\nStep 1/1 : FROM busybox\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { + // Test to make sure the bad command is quoted with just "s and + // not as a Go []string + name := "testbuildbadrunerrmsg" + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 + if err == nil { + c.Fatal("Should have failed to build") + } + shell := "/bin/sh -c" + exitCode := "127" + if daemonPlatform == "windows" { + shell = "cmd /S /C" + // architectural - Windows has to start the container to determine the exe is bad, Linux does not + exitCode = "1" + } + exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode + if !strings.Contains(out, exp) { + c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) + } +} + +func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-build") + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuild" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err != nil { + c.Fatalf("Error running trusted build: %s\n%s", err, out) + } + + if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { + c.Fatalf("Unexpected output on trusted build:\n%s", out) + } + + // We should also have a tag reference for the image. + if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } + + // We should now be able to remove the tag reference. + if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } +} + +func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuilduntrustedtag" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) + } + + if !strings.Contains(out, "does not have trust data for") { + c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tempDir) + + // Make a real context directory in this temp directory with a simple + // Dockerfile. + realContextDirname := filepath.Join(tempDir, "context") + if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { + c.Fatal(err) + } + + if err = ioutil.WriteFile( + filepath.Join(realContextDirname, "Dockerfile"), + []byte(` + FROM busybox + RUN echo hello world + `), + os.FileMode(0644), + ); err != nil { + c.Fatal(err) + } + + // Make a symlink to the real context directory. + contextSymlinkName := filepath.Join(tempDir, "context_link") + if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { + c.Fatal(err) + } + + // Executing the build with the symlink as the specified context should + // *not* fail. + if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { + c.Fatalf("build failed with exit status %d: %s", exitStatus, out) + } +} + +func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create the releases role + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the releases role + otherTag := fmt.Sprintf("%s:other", repoName) + dockerCmd(c, "tag", "busybox", otherTag) + + pushCmd := exec.Command(dockerBinary, "push", otherTag) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) + s.assertTargetInRoles(c, repoName, "other", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + out, status := dockerCmd(c, "rmi", otherTag) + c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildreleasesrole" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err = runCommandWithOutput(buildCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out)) + c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName)) +} + +func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create a non-releases delegation role + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the other role + otherTag := fmt.Sprintf("%s:other", repoName) + dockerCmd(c, "tag", "busybox", otherTag) + + pushCmd := exec.Command(dockerBinary, "push", otherTag) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) + s.assertTargetInRoles(c, repoName, "other", "targets/other") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + out, status := dockerCmd(c, "rmi", otherTag) + c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildotherrole" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err = runCommandWithOutput(buildCmd) + c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out)) +} + +// Issue #15634: COPY fails when path starts with "null" +func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { + name := "testbuildnullstringinaddcopyvolume" + + volName := "nullvolume" + + if daemonPlatform == "windows" { + volName = `C:\\nullvolume` + } + + ctx, err := fakeContext(` + FROM busybox + + ADD null / + COPY nullfile / + VOLUME `+volName+` + `, + map[string]string{ + "null": "test1", + "nullfile": "test2", + }, + ) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestBuildStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet + imgName := "test_build_stop_signal" + _, err := buildImage(imgName, + `FROM busybox + STOPSIGNAL SIGKILL`, + true) + c.Assert(err, check.IsNil) + res := inspectFieldJSON(c, imgName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } + + containerName := "test-container-stop-signal" + dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") + + res = inspectFieldJSON(c, containerName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)} + var dockerfile string + if daemonPlatform == "windows" { + // Bugs in Windows busybox port - use the default base image and native cmd stuff + dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` + ARG %s + RUN echo %%%s%% + CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey) + } else { + dockerfile = fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s + CMD echo $%s`, envKey, envKey, envKey) + + } + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + containerName := "bldargCont" + out, _ := dockerCmd(c, "run", "--name", containerName, imgName) + out = strings.Trim(out, " \r\n'") + if out != "" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envDef := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s`, envKey, envDef) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + out, _ := dockerCmd(c, "history", "--no-trunc", imgName) + outputTabs := strings.Split(out, "\n")[1] + if !strings.Contains(outputTabs, envDef) { + c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachehit" + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + extraEnvKey := "foo1" + extraEnvVal := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ARG %s + RUN echo $%s`, envKey, extraEnvKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachemiss" + args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal)) + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + newEnvVal := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachemiss" + args = []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal), + } + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + RUN echo $%s + CMD echo $%s + `, envKey, envKey, envValOveride, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ENV %s %s + ARG %s + RUN echo $%s + CMD echo $%s + `, envKey, envValOveride, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + + wdVar := "WDIR" + wdVal := "/tmp/" + addVar := "AFILE" + addVal := "addFile" + copyVar := "CFILE" + copyVal := "copyFile" + envVar := "foo" + envVal := "bar" + exposeVar := "EPORT" + exposeVal := "9999" + userVar := "USER" + userVal := "testUser" + volVar := "VOL" + volVal := "/testVol/" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), + "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), + "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), + "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), + "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), + "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), + "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), + } + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + ARG %s + WORKDIR ${%s} + ARG %s + ADD ${%s} testDir/ + ARG %s + COPY $%s testDir/ + ARG %s + ENV %s=${%s} + ARG %s + EXPOSE $%s + ARG %s + USER $%s + ARG %s + VOLUME ${%s}`, + wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, + envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar), + map[string]string{ + addVal: "some stuff", + copyVal: "some stuff", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil { + c.Fatal(err) + } + + var resMap map[string]interface{} + var resArr []string + res := "" + res = inspectField(c, imgName, "Config.WorkingDir") + if res != filepath.ToSlash(filepath.Clean(wdVal)) { + c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) + } + + inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr) + + found := false + for _, v := range resArr { + if fmt.Sprintf("%s=%s", envVar, envVal) == v { + found = true + break + } + } + if !found { + c.Fatalf("Config.Env value mismatch. Expected to exist: %s=%s, got: %v", + envVar, envVal, resArr) + } + + inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap) + if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { + c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) + } + + res = inspectField(c, imgName, "Config.User") + if res != userVal { + c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) + } + + inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap) + if _, ok := resMap[volVal]; !ok { + c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + envKey := "foo" + envVal := "bar" + envKey1 := "foo1" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + ENV %s ${%s} + RUN echo $%s + CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + ARG %s + CMD echo $%s`, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("able to access environment variable in output: %q expected to be missing", out) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support --build-arg + imgName := "bldargtest" + envKey := "HTTP_PROXY" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s + ENV %s $%s + RUN echo $%s + CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + warnStr := "[Warning] One or more build-args" + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); !strings.Contains(out, warnStr) { + c.Fatalf("build completed without warning: %q %q", out, err) + } else if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + +} + +func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + args := []string{ + "build", + "--build-arg", fmt.Sprintf("FOO1=fromcmd"), + "--build-arg", fmt.Sprintf("FOO2="), + "--build-arg", fmt.Sprintf("FOO3"), // set in env + "--build-arg", fmt.Sprintf("FOO4"), // not set in env + "--build-arg", fmt.Sprintf("FOO5=fromcmd"), + // FOO6 is not set at all + "--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning + "--build-arg", fmt.Sprintf("FOO8="), // should produce a warning + "--build-arg", fmt.Sprintf("FOO9"), // should produce a warning + ".", + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG FOO1=fromfile + ARG FOO2=fromfile + ARG FOO3=fromfile + ARG FOO4=fromfile + ARG FOO5 + ARG FOO6 + RUN env + RUN [ "$FOO1" == "fromcmd" ] + RUN [ "$FOO2" == "" ] + RUN [ "$FOO3" == "fromenv" ] + RUN [ "$FOO4" == "fromfile" ] + RUN [ "$FOO5" == "fromcmd" ] + # The following should not exist at all in the env + RUN [ "$(env | grep FOO6)" == "" ] + RUN [ "$(env | grep FOO7)" == "" ] + RUN [ "$(env | grep FOO8)" == "" ] + RUN [ "$(env | grep FOO9)" == "" ] + `) + + ctx, err := fakeContext(dockerfile, nil) + c.Assert(err, check.IsNil) + defer ctx.Close() + + cmd := exec.Command(dockerBinary, args...) + cmd.Dir = ctx.Dir + cmd.Env = append(os.Environ(), + "FOO1=fromenv", + "FOO2=fromenv", + "FOO3=fromenv") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + // Now check to make sure we got a warning msg about unused build-args + i := strings.Index(out, "[Warning]") + if i < 0 { + c.Fatalf("Missing the build-arg warning in %q", out) + } + + out = out[i:] // "out" should contain just the warning message now + + // These were specified on a --build-arg but no ARG was in the Dockerfile + c.Assert(out, checker.Contains, "FOO7") + c.Assert(out, checker.Contains, "FOO8") + c.Assert(out, checker.Contains, "FOO9") +} + +func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + envKey3 := "foo3" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s="" + ARG %s='' + ARG %s="''" + ARG %s='""' + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, + envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, + envKey2, envKey3) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s= + ARG %s="" + ARG %s='' + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN env`, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out) + } +} + +func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { + volName := "testname:/foo" + + if daemonPlatform == "windows" { + volName = "testname:C:\\foo" + } + dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") + + dockerFile := `FROM busybox + VOLUME ` + volName + ` + RUN ls /foo/oops + ` + _, err := buildImage("test", dockerFile, false) + c.Assert(err, check.NotNil, check.Commentf("image build should have failed")) +} + +func (s *DockerSuite) TestBuildTagEvent(c *check.C) { + since := daemonUnixTime(c) + + dockerFile := `FROM busybox + RUN echo events + ` + _, err := buildImage("test", dockerFile, false) + c.Assert(err, check.IsNil) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, "test:latest", "image") + var foundTag bool + for _, a := range actions { + if a == "tag" { + foundTag = true + break + } + } + + c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) +} + +// #15780 +func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER test-15780 + ` + cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2", + "-t", "tag1:latest", "-t", "tag1", "--no-cache", "-") + cmd.Stdin = strings.NewReader(dockerfile) + _, err := runCommand(cmd) + c.Assert(err, check.IsNil) + + id1, err := getIDByName("tag1") + c.Assert(err, check.IsNil) + id2, err := getIDByName("tag2:v2") + c.Assert(err, check.IsNil) + c.Assert(id1, check.Equals, id2) +} + +// #17290 +func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY . ./`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + // warm up cache + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + // add new file to context, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Not(checker.Contains), "Using cache") + +} + +func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink target`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") + c.Assert(out, checker.Matches, "bar") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + id, out, err = buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Using cache") + + out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") + c.Assert(out, checker.Matches, "baz") +} + +func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink /`, + map[string]string{ + "foo/abc": "bar", + "foo/def": "baz", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + c.Assert(out, checker.Matches, "barbaz") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) + c.Assert(err, checker.IsNil) + + id, out, err = buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Using cache") + + out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + c.Assert(out, checker.Matches, "barbax") + +} + +// TestBuildSymlinkBasename tests that target file gets basename from symlink, +// not from the target file. +func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink /`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") + c.Assert(out, checker.Matches, "bar") + +} + +// #17827 +func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { + name := "testbuildrootsource" + ctx, err := fakeContext(` + FROM busybox + COPY / /data`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + // warm up cache + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + // change file, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Not(checker.Contains), "Using cache") +} + +// #19375 +func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { + cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") + + cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") +} + +// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir +func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildworkdirwindowspath" + + _, err := buildImage(name, ` + FROM `+WindowsBaseImage+` + RUN mkdir C:\\work + WORKDIR C:\\work + RUN if "%CD%" NEQ "C:\work" exit -1 + `, true) + + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildLabel(c *check.C) { + name := "testbuildlabel" + testLabel := "foo" + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, false, "--label", testLabel) + + c.Assert(err, checker.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { + name := "testbuildlabel" + + _, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar") + + c.Assert(err, checker.IsNil) + + res, err := inspectImage(name, "json .Config.Labels") + c.Assert(err, checker.IsNil) + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + v, ok := labels["foo"] + if !ok { + c.Fatal("label `foo` not found in image") + } + c.Assert(v, checker.Equals, "bar") +} + +func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { + name := "testbuildlabelcachecommit" + testLabel := "foo" + + if _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo + `, false); err != nil { + c.Fatal(err) + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, true, "--label", testLabel) + + c.Assert(err, checker.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { + name := "testbuildlabelmultiple" + testLabels := map[string]string{ + "foo": "bar", + "123": "456", + } + + labelArgs := []string{} + + for k, v := range testLabels { + labelArgs = append(labelArgs, "--label", k+"="+v) + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, false, labelArgs...) + + if err != nil { + c.Fatal("error building image with labels", err) + } + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + for k, v := range testLabels { + if x, ok := labels[k]; !ok || x != v { + c.Fatalf("label %s=%s not found in image", k, v) + } + } +} + +func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) { + name := "testbuildlabeloverwrite" + testLabel := "foo" + testValue := "bar" + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL `+testLabel+`+ foo +`, false, []string{"--label", testLabel + "=" + testValue}...) + + if err != nil { + c.Fatal("error building image with labels", err) + } + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + v, ok := labels[testLabel] + if !ok { + c.Fatal("label not found in image") + } + + if v != testValue { + c.Fatal("label not overwritten") + } +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + baseImage := privateRegistryURL + "/baseimage" + + _, err := buildImage(baseImage, ` + FROM busybox + ENV env1 val1 + `, true) + + c.Assert(err, checker.IsNil) + + dockerCmd(c, "push", baseImage) + dockerCmd(c, "rmi", baseImage) + + _, err = buildImage(baseImage, fmt.Sprintf(` + FROM %s + ENV env2 val2 + `, baseImage), true) + + c.Assert(err, checker.IsNil) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + // make sure the image is pulled when building + dockerCmd(c, "rmi", repoName) + + buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-") + buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName)) + + out, _, err := runCommandWithOutput(buildCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +// Test cases in #22036 +func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { + // Command line option labels will always override + name := "scratchy" + expected := `{"bar":"from-flag","foo":"from-flag"}` + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`, + true, "--label", "foo=from-flag", "--label", "bar=from-flag") + c.Assert(err, check.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + name = "from" + expected = `{"foo":"from-dockerfile"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo from-dockerfile`, + true) + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option label will override even via `FROM` + name = "new" + expected = `{"bar":"from-dockerfile2","foo":"new"}` + _, err = buildImage(name, + `FROM from + LABEL bar from-dockerfile2`, + true, "--label", "foo=new") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + name = "scratchy2" + expected = `{"bar":"","foo":""}` + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`, + true, "--label", "foo", "--label", "bar=") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + // This time is for inherited images + name = "new2" + expected = `{"bar":"","foo":""}` + _, err = buildImage(name, + `FROM from + LABEL bar from-dockerfile2`, + true, "--label", "foo=", "--label", "bar") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with only `FROM` + name = "scratchy" + expected = `{"bar":"from-flag","foo":"from-flag"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage(), + true, "--label", "foo=from-flag", "--label", "bar=from-flag") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with env var + name = "scratchz" + expected = `{"bar":"$PATH"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage(), + true, "--label", "bar=$PATH") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + +} + +// Test case for #22855 +func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { + name := "test-delete-committed-file" + + _, err := buildImage(name, + `FROM busybox + RUN echo test > file + RUN test -e file + RUN rm file + RUN sh -c "! test -e file"`, false) + if err != nil { + c.Fatal(err) + } +} + +// #20083 +func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { + // TODO Windows: Figure out why this test is flakey on TP5. If you add + // something like RUN sleep 5, or even RUN ls /tmp after the ADD line, + // it is more reliable, but that's not a good fix. + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(ls -la /tmp/#1)" + RUN sh -c "(! ls -la /tmp/#2)" + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + "#1": "# file 1", + "#2": "# file 2", + ".dockerignore": `# Visual C++ cache files +# because we have git ;-) +# The above comment is from #20083 +foo +#dir1/foo +foo2 +# The following is considered as comment as # is at the beginning +#1 +# The following is not considered as comment as # is not at the beginning + #2 +`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Test case for #23221 +func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { + name := "test-with-utf8-bom" + dockerfile := []byte(`FROM busybox`) + bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) + ctx, err := fakeContextFromNewTempDir() + c.Assert(err, check.IsNil) + defer ctx.Close() + err = ctx.addFile("Dockerfile", bomDockerfile) + c.Assert(err, check.IsNil) + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) +} + +// Test case for UTF-8 BOM in .dockerignore, related to #23221 +func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { + name := "test-with-utf8-bom-dockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls -la /tmp + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + dockerignore := []byte("./Dockerfile\n") + bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + err = ctx.addFile(".dockerignore", bomDockerignore) + c.Assert(err, check.IsNil) + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +// #22489 Shell test to confirm config gets updated correctly +func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { + name := "testbuildshellupdatesconfig" + + expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + SHELL ["foo", "-bar"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + if res != expected { + c.Fatalf("%s, expected %s", res, expected) + } + res = inspectFieldJSON(c, name, "ContainerConfig.Shell") + if res != `["foo","-bar"]` { + c.Fatalf(`%s, expected ["foo","-bar"]`, res) + } +} + +// #22489 Changing the shell multiple times and CMD after. +func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { + name := "testbuildshellmultiple" + + _, out, _, err := buildImageWithStdoutStderr(name, + `FROM busybox + RUN echo defaultshell + SHELL ["echo"] + RUN echoshell + SHELL ["ls"] + RUN -l + CMD -l`, + true) + if err != nil { + c.Fatal(err) + } + + // Must contain 'defaultshell' twice + if len(strings.Split(out, "defaultshell")) != 3 { + c.Fatalf("defaultshell should have appeared twice in %s", out) + } + + // Must contain 'echoshell' twice + if len(strings.Split(out, "echoshell")) != 3 { + c.Fatalf("echoshell should have appeared twice in %s", out) + } + + // Must contain "total " (part of ls -l) + if !strings.Contains(out, "total ") { + c.Fatalf("%s should have contained 'total '", out) + } + + // A container started from the image uses the shell-form CMD. + // Last shell is ls. CMD is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489. Changed SHELL with ENTRYPOINT +func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { + name := "testbuildshellentrypoint" + + _, err := buildImage(name, + `FROM busybox + SHELL ["ls"] + ENTRYPOINT -l`, + true) + if err != nil { + c.Fatal(err) + } + + // A container started from the image uses the shell-form ENTRYPOINT. + // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489 Shell test to confirm shell is inherited in a subsequent build +func (s *DockerSuite) TestBuildShellInherited(c *check.C) { + name1 := "testbuildshellinherited1" + _, err := buildImage(name1, + `FROM busybox + SHELL ["ls"]`, + true) + if err != nil { + c.Fatal(err) + } + + name2 := "testbuildshellinherited2" + _, out, _, err := buildImageWithStdoutStderr(name2, + `FROM `+name1+` + RUN -l`, + true) + if err != nil { + c.Fatal(err) + } + + // ls -l has "total " followed by some number in it, ls without -l does not. + if !strings.Contains(out, "total ") { + c.Fatalf("Should have seen total in 'ls -l'.\n%s", out) + } +} + +// #22489 Shell test to confirm non-JSON doesn't work +func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { + name := "testbuildshellnotjson" + + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. + true) + if err == nil { + c.Fatal("Image build should have failed") + } + if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") { + c.Fatal("Error didn't indicate that arguments must be in JSON form") + } +} + +// #22489 Windows shell test to confirm native is powershell if executing a PS command +// This would error if the default shell were still cmd. +func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildshellpowershell" + _, out, err := buildImageWithOut(name, + `FROM `+minimalBaseImage()+` + SHELL ["powershell", "-command"] + RUN Write-Host John`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "\nJohn\n") { + c.Fatalf("Line with 'John' not found in output %q", out) + } +} + +// Verify that escape is being correctly applied to words when escape directive is not \. +// Tests WORKDIR, ADD +func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildescapenotbackslashwordtesta" + _, out, err := buildImageWithOut(name, + `# escape= `+"`"+` + FROM `+minimalBaseImage()+` + WORKDIR c:\windows + RUN dir /w`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(strings.ToLower(out), "[system32]") { + c.Fatalf("Line with '[windows]' not found in output %q", out) + } + + name = "testbuildescapenotbackslashwordtestb" + _, out, err = buildImageWithOut(name, + `# escape= `+"`"+` + FROM `+minimalBaseImage()+` + SHELL ["powershell.exe"] + WORKDIR c:\foo + ADD Dockerfile c:\foo\ + RUN dir Dockerfile`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(strings.ToLower(out), "-a----") { + c.Fatalf("Line with '-a----' not found in output %q", out) + } + +} + +// #22868. Make sure shell-form CMD is marked as escaped in the config of the image +func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildcmdshellescaped" + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + CMD "ipconfig" + `, true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.ArgsEscaped") + if res != "true" { + c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res) + } + dockerCmd(c, "run", "--name", "inspectme", name) + dockerCmd(c, "wait", "inspectme") + res = inspectFieldJSON(c, name, "Config.Cmd") + + if res != `["cmd","/S","/C","\"ipconfig\""]` { + c.Fatalf("CMD was not escaped Config.Cmd: got %v", res) + } +} + +// Test case for #24912. +func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) { + name := "testbuildstepswithprogress" + + totalRun := 5 + _, out, err := buildImageWithOut(name, "FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun), true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun)) + for i := 2; i <= 1+totalRun; i++ { + c.Assert(out, checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun)) + } +} + +func (s *DockerSuite) TestBuildWithFailure(c *check.C) { + name := "testbuildwithfailure" + + // First test case can only detect `nobody` in runtime so all steps will show up + buildCmd := "FROM busybox\nRUN nobody" + _, stdout, _, err := buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(stdout, checker.Contains, "Step 2/2 : RUN nobody") + + // Second test case `FFOM` should have been detected before build runs so no steps + buildCmd = "FFOM nobody\nRUN nobody" + _, stdout, _, err = buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Not(checker.Contains), "Step 1/2 : FROM busybox") + c.Assert(stdout, checker.Not(checker.Contains), "Step 2/2 : RUN nobody") +} + +func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch bax` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + id1, err := buildImageFromContext("build1", ctx, true) + c.Assert(err, checker.IsNil) + + // rebuild with cache-from + id2, out, err := buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + dockerCmd(c, "rmi", "build2") + + // no cache match with unknown source + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=nosuchtag") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 0) + dockerCmd(c, "rmi", "build2") + + // clear parent images + tempDir, err := ioutil.TempDir("", "test-build-cache-from-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, "img.tar") + dockerCmd(c, "save", "-o", tempFile, "build1") + dockerCmd(c, "rmi", "build1") + dockerCmd(c, "load", "-i", tempFile) + parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1") + c.Assert(strings.TrimSpace(parentID), checker.Equals, "") + + // cache still applies without parents + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + history1, _ := dockerCmd(c, "history", "-q", "build2") + + // Retry, no new intermediate images + id3, out, err := buildImageFromContextWithOut("build3", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id3) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + history2, _ := dockerCmd(c, "history", "-q", "build3") + + c.Assert(history1, checker.Equals, history2) + dockerCmd(c, "rmi", "build2") + dockerCmd(c, "rmi", "build3") + dockerCmd(c, "rmi", "build1") + dockerCmd(c, "load", "-i", tempFile) + + // Modify file, everything up to last command and layers are reused + dockerfile = ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch newfile` + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644) + c.Assert(err, checker.IsNil) + + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 2) + + layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1") + layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2") + + var layers1 []string + var layers2 []string + c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil) + c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil) + + c.Assert(len(layers1), checker.Equals, len(layers2)) + for i := 0; i < len(layers1)-1; i++ { + c.Assert(layers1[i], checker.Equals, layers2[i]) + } + c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1]) +} + +func (s *DockerSuite) TestBuildNetNone(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "testbuildnetnone" + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN ping -c 1 8.8.8.8 + `, true, "--network=none") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unreachable") +} + +func (s *DockerSuite) TestBuildNetContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname") + + name := "testbuildnetcontainer" + out, err := buildImage(name, ` + FROM busybox + RUN nc localhost 1234 > /otherhost + `, true, "--network=container:"+strings.TrimSpace(id)) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost") + c.Assert(strings.TrimSpace(host), check.Equals, "foobar") +} + +func (s *DockerSuite) TestBuildSquashParent(c *check.C) { + testRequires(c, ExperimentalDaemon) + dockerFile := ` + FROM busybox + RUN echo hello > /hello + RUN echo world >> /hello + RUN echo hello > /remove_me + ENV HELLO world + RUN rm /remove_me + ` + // build and get the ID that we can use later for history comparison + origID, err := buildImage("test", dockerFile, false) + c.Assert(err, checker.IsNil) + + // build with squash + id, err := buildImage("test", dockerFile, true, "--squash") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld") + + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]") + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`) + + // make sure the ID produced is the ID of the tag we specified + inspectID, err := inspectImage("test", ".ID") + c.Assert(err, checker.IsNil) + c.Assert(inspectID, checker.Equals, id) + + origHistory, _ := dockerCmd(c, "history", origID) + testHistory, _ := dockerCmd(c, "history", "test") + + splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n") + splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n") + c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1) + + out, err = inspectImage(id, "len .RootFS.Layers") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "3") +} + +func (s *DockerSuite) TestBuildContChar(c *check.C) { + name := "testbuildcontchar" + + _, out, err := buildImageWithOut(name, + `FROM busybox\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/1 : FROM busybox") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi\n") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\n") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \\\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") +} + +// TestBuildOpaqueDirectory tests that a build succeeds which +// creates opaque directories. +// See https://github.com/docker/docker/issues/25244 +func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerFile := ` + FROM busybox + RUN mkdir /dir1 && touch /dir1/f1 + RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2 + RUN touch /dir1/f3 + RUN [ -f /dir1/f2 ] + ` + + // Test that build succeeds, last command fails if opaque directory + // was not handled correctly + _, err := buildImage("testopaquedirectory", dockerFile, false) + c.Assert(err, checker.IsNil) +} + +// Windows test for USER in dockerfile +func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsuser" + _, out, err := buildImageWithOut(name, + `FROM `+WindowsBaseImage+` + RUN net user user /add + USER user + RUN set username + `, + true) + if err != nil { + c.Fatal(err) + } + c.Assert(strings.ToLower(out), checker.Contains, "username=user") +} + +// Verifies if COPY file . when WORKDIR is set to a non-existing directory, +// the directory is created and the file is copied into the directory, +// as opposed to the file being copied as a file with the name of the +// directory. Fix for 27545 (found on Windows, but regression good for Linux too). +// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514. +func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) { + name := "testbuildcopyfiledotwithworkdir" + ctx, err := fakeContext(`FROM busybox +WORKDIR /foo +COPY file . +RUN ["cat", "/foo/file"] +`, + map[string]string{}) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if err := ctx.Add("file", "content"); err != nil { + c.Fatal(err) + } + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Case-insensitive environment variables on Windows +func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsenvcaseinsensitive" + if _, err := buildImage(name, ` + FROM `+WindowsBaseImage+` + ENV FOO=bar foo=bar + `, true); err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Env") + if res != `["foo=bar"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped. + c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res) + } +} + +// Test case for 29667 +func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "testworkdirimagecmd" + dockerfile := ` +FROM busybox +WORKDIR /foo/bar +` + out, err := buildImage(image, dockerfile, true) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) + + image = "testworkdirlabelimagecmd" + dockerfile = ` +FROM busybox +WORKDIR /foo/bar +LABEL a=b +` + out, err = buildImage(image, dockerfile, true) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) +} + +// Test case for 28902/28090 +func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerFile := ` + FROM golang:1.7-alpine + WORKDIR / + ` + _, err := buildImage("testbuildworkdircmd", dockerFile, true) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageWithOut("testbuildworkdircmd", dockerFile, true) + c.Assert(err, checker.IsNil) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 1) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go new file mode 100644 index 0000000..0205a92 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go @@ -0,0 +1,207 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/go-units" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { + testRequires(c, cpuCfsQuota) + name := "testbuildresourceconstraints" + + ctx, err := fakeContext(` + FROM hello-world:frozen + RUN ["/hello"] + `, map[string]string{}) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, ".") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "ps", "-lq") + cID := strings.TrimSpace(out) + + type hostConfig struct { + Memory int64 + MemorySwap int64 + CpusetCpus string + CpusetMems string + CPUShares int64 + CPUQuota int64 + Ulimits []*units.Ulimit + } + + cfg := inspectFieldJSON(c, cID, "HostConfig") + + var c1 hostConfig + err = json.Unmarshal([]byte(cfg), &c1) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) + c.Assert(c1.MemorySwap, checker.Equals, int64(-1), check.Commentf("resource constraints not set properly for MemorySwap")) + c.Assert(c1.CpusetCpus, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetCpus")) + c.Assert(c1.CpusetMems, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetMems")) + c.Assert(c1.CPUShares, checker.Equals, int64(100), check.Commentf("resource constraints not set properly for CPUShares")) + c.Assert(c1.CPUQuota, checker.Equals, int64(8000), check.Commentf("resource constraints not set properly for CPUQuota")) + c.Assert(c1.Ulimits[0].Name, checker.Equals, "nofile", check.Commentf("resource constraints not set properly for Ulimits")) + c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) + + // Make sure constraints aren't saved to image + dockerCmd(c, "run", "--name=test", name) + + cfg = inspectFieldJSON(c, "test", "HostConfig") + + var c2 hostConfig + err = json.Unmarshal([]byte(cfg), &c2) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c2.Memory, check.Not(checker.Equals), int64(64*1024*1024), check.Commentf("resource leaked from build for Memory")) + c.Assert(c2.MemorySwap, check.Not(checker.Equals), int64(-1), check.Commentf("resource leaked from build for MemorySwap")) + c.Assert(c2.CpusetCpus, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetCpus")) + c.Assert(c2.CpusetMems, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetMems")) + c.Assert(c2.CPUShares, check.Not(checker.Equals), int64(100), check.Commentf("resource leaked from build for CPUShares")) + c.Assert(c2.CPUQuota, check.Not(checker.Equals), int64(8000), check.Commentf("resource leaked from build for CPUQuota")) + c.Assert(c2.Ulimits, checker.IsNil, check.Commentf("resource leaked from build for Ulimits")) +} + +func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddown" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD foo /bar/ + RUN [ $(stat -c %U:%G "/bar") = 'root:root' ] + RUN [ $(stat -c %U:%G "/bar/foo") = 'root:root' ] + ` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testFile, err := os.Create(filepath.Join(tmpDir, "foo")) + if err != nil { + c.Fatalf("failed to create foo file: %v", err) + } + defer testFile.Close() + + chownCmd := exec.Command("chown", "daemon:daemon", "foo") + chownCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(chownCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddChangeOwnership: %v", err) + } +} + +// Test that an infinite sleep during a build is killed if the client disconnects. +// This test is fairly hairy because there are lots of ways to race. +// Strategy: +// * Monitor the output of docker events starting from before +// * Run a 1-year-long sleep from a docker build. +// * When docker events sees container start, close the "docker build" command +// * Wait for docker events to emit a dying event. +func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcancellation" + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // (Note: one year, will never finish) + ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") + buildCmd.Dir = ctx.Dir + + stdoutBuild, err := buildCmd.StdoutPipe() + if err := buildCmd.Start(); err != nil { + c.Fatalf("failed to run build: %s", err) + } + + matchCID := regexp.MustCompile("Running in (.+)") + scanner := bufio.NewScanner(stdoutBuild) + + outputBuffer := new(bytes.Buffer) + var buildID string + for scanner.Scan() { + line := scanner.Text() + outputBuffer.WriteString(line) + outputBuffer.WriteString("\n") + if matches := matchCID.FindStringSubmatch(line); len(matches) > 0 { + buildID = matches[1] + break + } + } + + if buildID == "" { + c.Fatalf("Unable to find build container id in build output:\n%s", outputBuffer.String()) + } + + testActions := map[string]chan bool{ + "start": make(chan bool, 1), + "die": make(chan bool, 1), + } + + matcher := matchEventLine(buildID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + // Send a kill to the `docker build` command. + // Causes the underlying build to be cancelled due to socket close. + if err := buildCmd.Process.Kill(); err != nil { + c.Fatalf("error killing build command: %s", err) + } + + // Get the exit status of `docker build`, check it exited because killed. + if err := buildCmd.Wait(); err != nil && !integration.IsKilled(err) { + c.Fatalf("wait failed during build run: %T %s", err, err) + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go new file mode 100644 index 0000000..c2d8546 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go @@ -0,0 +1,693 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +var ( + remoteRepoName = "dockercli/busybox-by-dgst" + repoName = fmt.Sprintf("%s/%s", privateRegistryURL, remoteRepoName) + pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") + digestRegex = regexp.MustCompile("Digest: ([\\S]+)") +) + +func setupImage(c *check.C) (digest.Digest, error) { + return setupImageWithTag(c, "latest") +} + +func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { + containerName := "busyboxbydigest" + + dockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox") + + // tag the image to upload it to the private registry + repoAndTag := repoName + ":" + tag + out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) + + // delete the container as we don't need it any more + err = deleteContainer(containerName) + c.Assert(err, checker.IsNil) + + // push the image + out, _, err = dockerCmdWithError("push", repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) + + // delete our local repo that we previously tagged + rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) + + matches := pushDigestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) + pushDigest := matches[1] + + return digest.Digest(pushDigest), nil +} + +func testPullByTagDisplaysDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the tag + out, _ := dockerCmd(c, "pull", repoName) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func testPullByDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + out, _ := dockerCmd(c, "pull", imageReference) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func testPullByDigestNoFallback(c *check.C) { + testRequires(c, DaemonIsLinux) + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) + out, _, err := dockerCmdWithError("pull", imageReference) + c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("manifest for %s not found", imageReference), check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) +} + +func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "createByDigest" + dockerCmd(c, "create", "--name", containerName, imageReference) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "runByDigest" + out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") + + foundRegex := regexp.MustCompile("found=([^\n]+)") + matches := foundRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + c.Assert(matches[1], checker.Equals, "1", check.Commentf("Expected %q, got %q", "1", matches[1])) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // make sure inspect runs ok + inspectField(c, imageReference, "Id") + + // do the delete + err = deleteImages(imageReference) + c.Assert(err, checker.IsNil, check.Commentf("unexpected error deleting image")) + + // try to inspect again - it should error this time + _, err = inspectFieldWithError(imageReference, "Id") + //unexpected nil err trying to inspect what should be a non-existent image + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "No such object") +} + +func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // get the image id + imageID := inspectField(c, imageReference, "Id") + + // do the build + name := "buildbydigest" + _, err = buildImage(name, fmt.Sprintf( + `FROM %s + CMD ["/bin/echo", "Hello World"]`, imageReference), + true) + c.Assert(err, checker.IsNil) + + // get the build's image id + res := inspectField(c, name, "Config.Image") + // make sure they match + c.Assert(res, checker.Equals, imageID) +} + +func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // tag it + tag := "tagbydigest" + dockerCmd(c, "tag", imageReference, tag) + + expectedID := inspectField(c, imageReference, "Id") + + tagID := inspectField(c, tag, "Id") + c.Assert(tagID, checker.Equals, expectedID) +} + +func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "images") + c.Assert(out, checker.Not(checker.Contains), "DIGEST", check.Commentf("list output should not have contained DIGEST header")) +} + +func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { + + // setup image1 + digest1, err := setupImageWithTag(c, "tag1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "tag2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag1 + dockerCmd(c, "pull", repoName+":tag1") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag 2 + dockerCmd(c, "pull", repoName+":tag2") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + // make sure busybox has tag, but not digest + busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) + c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out)) +} + +func (s *DockerRegistrySuite) TestListDanglingImagesWithDigests(c *check.C) { + // setup image1 + digest1, err := setupImageWithTag(c, "dangle1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "dangle2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle1 tag + dockerCmd(c, "pull", repoName+":dangle1") + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*dangle1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle2 tag + dockerCmd(c, "pull", repoName+":dangle2") + + // list images, show tagged images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*dangle2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images, no longer dangling, should not match + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest2.String(), out)) +} + +func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, check.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "inspect", imageReference) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + c.Assert(imageJSON, checker.HasLen, 1) + c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) + c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) +} + +func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // build an image from it + imageName1 := "images_ps_filter_test" + _, err = buildImage(imageName1, fmt.Sprintf( + `FROM %s + LABEL match me 1`, imageReference), true) + c.Assert(err, checker.IsNil) + + // run a container based on that + dockerCmd(c, "run", "--name=test1", imageReference, "echo", "hello") + expectedID, err := getIDByName("test1") + c.Assert(err, check.IsNil) + + // run a container based on the a descendant of that too + dockerCmd(c, "run", "--name=test2", imageName1, "echo", "hello") + expectedID1, err := getIDByName("test2") + c.Assert(err, check.IsNil) + + expectedIDs := []string{expectedID, expectedID1} + + // Invalid imageReference + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", fmt.Sprintf("--filter=ancestor=busybox@%s", digest)) + // Filter container for ancestor filter should be empty + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Valid imageReference + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) + checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) +} + +func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + // just in case... + + dockerCmd(c, "tag", imageReference, repoName+":sometag") + + imageID := inspectField(c, imageReference, "Id") + + dockerCmd(c, "rmi", imageID) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repoName + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted only repoTag2, because there's another tag + inspectField(c, repoTag, "Id") + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndMultiRepoTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + repo2 := fmt.Sprintf("%s/%s", repoName, "repo2") + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repo2 + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted repoTag and image reference, but left repoTag2 + inspectField(c, repoTag2, "Id") + _, err = inspectFieldWithError(imageReference, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image digest reference should have been removed")) + + _, err = inspectFieldWithError(repoTag, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image tag reference should have been removed")) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.FSLayers[0] = schema1.FSLayer{ + BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), + } + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.Layers[0].Digest + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.FSLayers[0].BlobSum + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go new file mode 100644 index 0000000..8008ae1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,157 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +//test commit a paused container should not unpause it after commit +func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + out = inspectField(c, cleanedContainerID, "State.Paused") + // commit should not unpause a paused container + c.Assert(out, checker.Contains, "true") +} + +func (s *DockerSuite) TestCommitNewFile(c *check.C) { + dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + + imageID, _ := dockerCmd(c, "commit", "commiter") + imageID = strings.TrimSpace(imageID) + + out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "koye") +} + +func (s *DockerSuite) TestCommitHardlink(c *check.C) { + testRequires(c, DaemonIsLinux) + firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + + chunks := strings.Split(strings.TrimSpace(firstOutput), " ") + inode := chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(firstOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) + + imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") + imageID = strings.TrimSpace(imageID) + + secondOutput, _ := dockerCmd(c, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") + + chunks = strings.Split(strings.TrimSpace(secondOutput), " ") + inode = chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(secondOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) +} + +func (s *DockerSuite) TestCommitTTY(c *check.C) { + dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", "ttytest", "/bin/ls") +} + +func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", "bindtest", "true") +} + +func (s *DockerSuite) TestCommitChange(c *check.C) { + dockerCmd(c, "run", "--name", "test", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "EXPOSE 8080", + "--change", "ENV DEBUG true", + "--change", "ENV test 1", + "--change", "ENV PATH /foo", + "--change", "LABEL foo bar", + "--change", "CMD [\"/bin/sh\"]", + "--change", "WORKDIR /opt", + "--change", "ENTRYPOINT [\"/bin/sh\"]", + "--change", "USER testuser", + "--change", "VOLUME /var/lib/docker", + "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalised on Windows + expected := map[string]string{ + "Config.ExposedPorts": "map[8080/tcp:{}]", + "Config.Env": "[DEBUG=true test=1 PATH=/foo]", + "Config.Labels": "map[foo:bar]", + "Config.Cmd": "[/bin/sh]", + "Config.WorkingDir": prefix + slash + "opt", + "Config.Entrypoint": "[/bin/sh]", + "Config.User": "testuser", + "Config.Volumes": "map[/var/lib/docker:{}]", + "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", + } + + for conf, value := range expected { + res := inspectField(c, imageID, conf) + if res != value { + c.Errorf("%s('%s'), expected %s", conf, res, value) + } + } +} + +func (s *DockerSuite) TestCommitChangeLabels(c *check.C) { + dockerCmd(c, "run", "--name", "test", "--label", "some=label", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "LABEL some=label2", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + c.Assert(inspectField(c, imageID, "Config.Labels"), checker.Equals, "map[some:label2]") + // check that container labels didn't change + c.Assert(inspectField(c, "test", "Config.Labels"), checker.Equals, "map[some:label]") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go new file mode 100644 index 0000000..1d5e5ad --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go @@ -0,0 +1,140 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestConfigHTTPHeader(c *check.C) { + testRequires(c, UnixCli) // Can't set/unset HOME on windows right now + // We either need a level of Go that supports Unsetenv (for cases + // when HOME/USERPROFILE isn't set), or we need to be able to use + // os/user but user.Current() only works if we aren't statically compiling + + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + headers = r.Header + })) + defer server.Close() + + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + out, _, _ := runCommandWithOutput(cmd) + + c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) + + c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", out)) + + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", out)) + +} + +func (s *DockerSuite) TestConfigDir(c *check.C) { + cDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(cDir) + + // First make sure pointing to empty dir doesn't generate an error + dockerCmd(c, "--config", cDir, "ps") + + // Test with env var too + cmd := exec.Command(dockerBinary, "ps") + cmd.Env = appendBaseEnv(true, "DOCKER_CONFIG="+cDir) + out, _, err := runCommandWithOutput(cmd) + + c.Assert(err, checker.IsNil, check.Commentf("ps2 didn't work,out:%v", out)) + + // Start a server so we can check to see if the config file was + // loaded properly + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + // Create a dummy config file in our new config dir + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + tmpCfg := filepath.Join(cDir, "config.json") + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) + + env := appendBaseEnv(false) + + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + cmd.Env = env + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header,out:%v", out)) + + // Reset headers and try again using env var this time + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG="+cDir) + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header,out:%v", out)) + + // Reset headers and make sure flag overrides the env var + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG=MissingDir") + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header,out:%v", out)) + + // Reset headers and make sure flag overrides the env var. + // Almost same as previous but make sure the "MissingDir" isn't + // ignore - we don't want to default back to the env var. + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG="+cDir) + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value,out:%v", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go new file mode 100644 index 0000000..9ed7e8c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go @@ -0,0 +1,488 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// docker cp CONTAINER:PATH LOCALPATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPath(tmpDir, "notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPathTrailingSep(tmpDir, "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Check that copying from a container to a local symlink copies to the symlink +// target and does not overwrite the local symlink itself. +func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // First, copy a file from the container to a symlink to a file. This + // should overwrite the symlink target contents with the source contents. + srcPath := containerCpPath(containerID, "/file2") + dstPath := cpPath(tmpDir, "symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a directory. This + // should copy the file into the symlink target directory. + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a file that does + // not exist (a broken symlink). This should create the target file with + // the contents of the source file. + dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = containerCpPath(containerID, "/dir2") + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory that does not exist (a broken symlink). This should create + // the target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpFromCaseA(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-a") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpFromCaseB(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-b") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPathTrailingSep(tmpDir, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpFromCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "file2") + + // Ensure the local file starts with different content. + c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPath(tmpDir, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + // Ensure that dstPath doesn't exist. + _, err := os.Stat(dstPath) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir1") + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpFromCaseE(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-e") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPath(containerID, "dir1") + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpFromCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstDir := cpPath(tmpDir, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + dstPath := filepath.Join(resultDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseH(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-h") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "dir1") + "." + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove resultDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpFromCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstDir := cpPath(tmpDir, "dir2") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go new file mode 100644 index 0000000..4e5c39e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,660 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Ensure that an all-local path case returns an error. +func (s *DockerSuite) TestCpLocalOnly(c *check.C) { + err := runDockerCp(c, "foo", "bar") + c.Assert(err, checker.NotNil) + + c.Assert(err.Error(), checker.Contains, "must specify at least one container source") +} + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func (s *DockerSuite) TestCpGarbagePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("../../../../../../../../../../../../", cpFullPath) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- garbage path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for garbage path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that relative paths are relative to the container's rootfs +func (s *DockerSuite) TestCpRelativePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + var relPath string + if path.IsAbs(cpFullPath) { + // normally this is `filepath.Rel("/", cpFullPath)` but we cannot + // get this unix-path manipulation on windows with filepath. + relPath = cpFullPath[1:] + } + c.Assert(path.IsAbs(cpFullPath), checker.True, check.Commentf("path %s was assumed to be an absolute path", cpFullPath)) + + dockerCmd(c, "cp", containerID+":"+relPath, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- relative path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for relative path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that absolute paths are relative to the container's rootfs +func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- absolute path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for absolute path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, "container_path") + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path") + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + // We should have copied a symlink *NOT* the file itself! + linkTarget, err := os.Readlink(tmpname) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpFullPath)) +} + +// Check that symlinks to a directory behave as expected when copying one from +// a container. +func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + linkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpTestPathParent)) + + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link/", testDir) + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testDir, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) +} + +// Check that symlinks to a directory behave as expected when copying one to a +// container. +func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testVol) + + // Create a test container with a local volume. We will test by copying + // to the volume path in the container which we can then verify locally. + out, _ := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") + + containerID := strings.TrimSpace(out) + + // Create a temp directory to hold a test file nested in a directory. + testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This file will be at "/testDir/some/path/test" and will be copied into + // the test volume later. + hostTestFilename := filepath.Join(testDir, cpFullPath) + c.Assert(os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)), checker.IsNil) + c.Assert(ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)), checker.IsNil) + + // Now create another temp directory to hold a symlink to the + // "/testDir/some" directory. + linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(linkDir) + + // Then symlink "/linkDir/dir_link" to "/testdir/some". + linkTarget := filepath.Join(testDir, cpTestPathParent) + localLink := filepath.Join(linkDir, "dir_link") + c.Assert(os.Symlink(linkTarget, localLink), checker.IsNil) + + // Now copy that symlink into the test volume in the container. + dockerCmd(c, "cp", localLink, containerID+":/testVol") + + // This copy command should have copied the symlink *not* the target. + expectedPath := filepath.Join(testVol, "dir_link") + actualLinkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to read symlink at %q", expectedPath)) + + c.Assert(actualLinkTarget, checker.Equals, linkTarget) + + // Good, now remove that copied link for the next test. + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the test volume directory in the + // container. + dockerCmd(c, "cp", localLink+"/", containerID+":/testVol") + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testVol, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) + + // And this directory should contain the file copied from the host at the + // expected location: "/testVol/dir_link/path/test" + expectedFilepath := filepath.Join(testVol, "dir_link/path/test") + fileContents, err := ioutil.ReadFile(expectedFilepath) + c.Assert(err, checker.IsNil) + + c.Assert(string(fileContents), checker.Equals, cpHostContents) +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path", cpTestName) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- symlink path component can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for symlink path component + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that cp with unprivileged user doesn't return any error +func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, UnixCli) // uses chmod/su: not available on windows + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpdir) + + c.Assert(os.Chmod(tmpdir, 0777), checker.IsNil) + + result := icmd.RunCommand("su", "unprivilegeduser", "-c", + fmt.Sprintf("%s cp %s:%s %s", dockerBinary, containerID, cpTestName, tmpdir)) + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + outDir, err := ioutil.TempDir("", "cp-test-special-files") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) + + expected, err := readContainerFile(containerID, "resolv.conf") + actual, err := ioutil.ReadFile(outDir + "/resolv.conf") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/hosts + dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) + + expected, err = readContainerFile(containerID, "hosts") + actual, err = ioutil.ReadFile(outDir + "/hosts") + + // Expected copied file to be duplicate of the container hosts + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) + + expected, err = readContainerFile(containerID, "hostname") + actual, err = ioutil.ReadFile(outDir + "/hostname") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) +} + +func (s *DockerSuite) TestCpVolumePath(c *check.C) { + // stat /tmp/cp-test-volumepath851508420/test gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual volume path + dockerCmd(c, "cp", containerID+":/foo", outDir) + + stat, err := os.Stat(outDir + "/foo") + c.Assert(err, checker.IsNil) + // expected copied content to be dir + c.Assert(stat.IsDir(), checker.True) + stat, err = os.Stat(outDir + "/foo/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy file nested in volume + dockerCmd(c, "cp", containerID+":/foo/bar", outDir) + + stat, err = os.Stat(outDir + "/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy Bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz", outDir) + stat, err = os.Stat(outDir + "/baz") + c.Assert(err, checker.IsNil) + // Expected `baz` to be a dir + c.Assert(stat.IsDir(), checker.True) + + // Copy file nested in bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + c.Assert(err, checker.IsNil) + fb2, err := ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) + + // Copy bind-mounted file + dockerCmd(c, "cp", containerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + c.Assert(err, checker.IsNil) + fb2, err = ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) +} + +func (s *DockerSuite) TestCpToDot(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + c.Assert(err, checker.IsNil) + defer os.Chdir(cwd) + c.Assert(os.Chdir(tmpdir), checker.IsNil) + dockerCmd(c, "cp", containerID+":/test", ".") + content, err := ioutil.ReadFile("./test") + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCpToStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "cp", containerID+":/test", "-"), + exec.Command("tar", "-vtf", "-")) + + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, "test") + c.Assert(out, checker.Contains, "-rw") +} + +func (s *DockerSuite) TestCpNameHasColon(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) + content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCopyAndRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + expectedMsg := "hello" + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/group", containerID), tmpDir) + + out, _ = dockerCmd(c, "start", "-a", containerID) + + c.Assert(strings.TrimSpace(out), checker.Equals, expectedMsg) +} + +func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") + + tmpDir, err := ioutil.TempDir("", "test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) +} + +// test copy with option `-L`: following symbol link +// Check that symlinks to a file behave as expected when copying one from +// a container to host following symbol link +func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" /dir_link") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + testDir, err := ioutil.TempDir("", "test-cp-symlink-container-to-host-follow-symlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + + expected := []byte(cpContainerContents) + actual, err := ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + os.Remove(expectedPath) + + // now test copy symbol link to a non-existing file in host + expectedPath = filepath.Join(testDir, "somefile_host") + // expectedPath shouldn't exist, if exists, remove it + if _, err := os.Lstat(expectedPath); err == nil { + os.Remove(expectedPath) + } + + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) + + actual, err = ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + defer os.Remove(expectedPath) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go new file mode 100644 index 0000000..f981cb8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go @@ -0,0 +1,599 @@ +package main + +import ( + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// docker cp LOCALPATH CONTAINER:PATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPathTrailingSep(tmpDir, "file1") + dstPath := containerCpPath(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing path separator but exists as a +// file. Also test that we cannot overwrite an existing directory with a +// non-directory and cannot overwrite an existing +func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "dir1/file1-1") + dstPath := containerCpPathTrailingSep(containerID, "file1") + + // The client should encounter an error trying to stat the destination + // and then be unable to copy since the destination is asserted to be a + // directory but does not exist. + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + // The client should encounter an error trying to stat the destination and + // then decide to extract to the parent directory instead with a rebased + // name in the source archive, but this directory would overwrite the + // existing file with the same name. + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) +} + +// Check that copying from a local path to a symlink in a container copies to +// the symlink target and does not overwrite the container symlink itself. +func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { + // stat /tmp/test-cp-to-symlink-destination-262430901/vol3 gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol := getTestDir(c, "test-cp-to-symlink-destination-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + + // First, copy a local file to a symlink to a file in the container. This + // should overwrite the symlink target contents with the source contents. + srcPath := cpPath(testVol, "file2") + dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a local file to a symlink to a directory in the container. + // This should copy the file into the symlink target directory. + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file to a symlink to a file that does not exist (a broken + // symlink) in the container. This should create the target file with the + // contents of the source file. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a directory in the + // container. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = cpPath(testVol, "/dir2") + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a local directory that does + // not exist (a broken symlink) in the container. This should create the + // target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpToCaseA(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + workDir: "/root", command: makeCatFileCommand("itWorks.txt"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-a") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpToCaseB(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("testDir/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-b") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPathTrailingSep(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpToCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("file2"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/file2") + + // Ensure the container's file starts with the original content. + c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPath(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpToCaseE(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-e") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpToCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("dir2/dir1/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "/root/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir2/dir1/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseH(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-h") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpToCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpToCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("/dir2/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/dir2/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// The `docker cp` command should also ensure that you cannot +// write to a container rootfs that is marked as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + readOnly: true, workDir: "/root", + command: makeCatFileCommand("shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} + +// The `docker cp` command should also ensure that you +// cannot write to a volume that is mounted as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(tmpDir), workDir: "/root", + command: makeCatFileCommand("/vol_ro/shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go new file mode 100644 index 0000000..45d85ba --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go @@ -0,0 +1,39 @@ +// +build !windows + +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +// Check ownership is root, both in non-userns and userns enabled modes +func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + tmpVolDir := getTestDir(c, "test-cp-tmpvol") + containerID := makeTestContainer(c, + testContainerOptions{volumes: []string{fmt.Sprintf("%s:/tmpvol", tmpVolDir)}}) + + tmpDir := getTestDir(c, "test-cp-to-check-ownership") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/tmpvol", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.IsNil) + + stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) + c.Assert(err, checker.IsNil) + uid, gid, err := getRootUIDGID() + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go new file mode 100644 index 0000000..0501c5d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go @@ -0,0 +1,303 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +type fileType uint32 + +const ( + ftRegular fileType = iota + ftDir + ftSymlink +) + +type fileData struct { + filetype fileType + path string + contents string +} + +func (fd fileData) creationCommand() string { + var command string + + switch fd.filetype { + case ftRegular: + // Don't overwrite the file if it already exists! + command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) + case ftDir: + command = fmt.Sprintf("mkdir -p %s", fd.path) + case ftSymlink: + command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) + } + + return command +} + +func mkFilesCommand(fds []fileData) string { + commands := make([]string, len(fds)) + + for i, fd := range fds { + commands[i] = fd.creationCommand() + } + + return strings.Join(commands, " && ") +} + +var defaultFileData = []fileData{ + {ftRegular, "file1", "file1"}, + {ftRegular, "file2", "file2"}, + {ftRegular, "file3", "file3"}, + {ftRegular, "file4", "file4"}, + {ftRegular, "file5", "file5"}, + {ftRegular, "file6", "file6"}, + {ftRegular, "file7", "file7"}, + {ftDir, "dir1", ""}, + {ftRegular, "dir1/file1-1", "file1-1"}, + {ftRegular, "dir1/file1-2", "file1-2"}, + {ftDir, "dir2", ""}, + {ftRegular, "dir2/file2-1", "file2-1"}, + {ftRegular, "dir2/file2-2", "file2-2"}, + {ftDir, "dir3", ""}, + {ftRegular, "dir3/file3-1", "file3-1"}, + {ftRegular, "dir3/file3-2", "file3-2"}, + {ftDir, "dir4", ""}, + {ftRegular, "dir4/file3-1", "file4-1"}, + {ftRegular, "dir4/file3-2", "file4-2"}, + {ftDir, "dir5", ""}, + {ftSymlink, "symlinkToFile1", "file1"}, + {ftSymlink, "symlinkToDir1", "dir1"}, + {ftSymlink, "brokenSymlinkToFileX", "fileX"}, + {ftSymlink, "brokenSymlinkToDirX", "dirX"}, + {ftSymlink, "symlinkToAbsDir", "/root"}, +} + +func defaultMkContentCommand() string { + return mkFilesCommand(defaultFileData) +} + +func makeTestContentInDir(c *check.C, dir string) { + for _, fd := range defaultFileData { + path := filepath.Join(dir, filepath.FromSlash(fd.path)) + switch fd.filetype { + case ftRegular: + c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)), checker.IsNil) + case ftDir: + c.Assert(os.Mkdir(path, os.FileMode(0777)), checker.IsNil) + case ftSymlink: + c.Assert(os.Symlink(fd.contents, path), checker.IsNil) + } + } +} + +type testContainerOptions struct { + addContent bool + readOnly bool + volumes []string + workDir string + command string +} + +func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { + if options.addContent { + mkContentCmd := defaultMkContentCommand() + if options.command == "" { + options.command = mkContentCmd + } else { + options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) + } + } + + if options.command == "" { + options.command = "#(nop)" + } + + args := []string{"run", "-d"} + + for _, volume := range options.volumes { + args = append(args, "-v", volume) + } + + if options.workDir != "" { + args = append(args, "-w", options.workDir) + } + + if options.readOnly { + args = append(args, "--read-only") + } + + args = append(args, "busybox", "/bin/sh", "-c", options.command) + + out, _ := dockerCmd(c, args...) + + containerID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + + exitCode := strings.TrimSpace(out) + if exitCode != "0" { + out, _ = dockerCmd(c, "logs", containerID) + } + c.Assert(exitCode, checker.Equals, "0", check.Commentf("failed to make test container: %s", out)) + + return +} + +func makeCatFileCommand(path string) string { + return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) +} + +func cpPath(pathElements ...string) string { + localizedPathElements := make([]string, len(pathElements)) + for i, path := range pathElements { + localizedPathElements[i] = filepath.FromSlash(path) + } + return strings.Join(localizedPathElements, string(filepath.Separator)) +} + +func cpPathTrailingSep(pathElements ...string) string { + return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) +} + +func containerCpPath(containerID string, pathElements ...string) string { + joined := strings.Join(pathElements, "/") + return fmt.Sprintf("%s:%s", containerID, joined) +} + +func containerCpPathTrailingSep(containerID string, pathElements ...string) string { + return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) +} + +func runDockerCp(c *check.C, src, dst string) (err error) { + c.Logf("running `docker cp %s %s`", src, dst) + + args := []string{"cp", src, dst} + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) + } + + return +} + +func startContainerGetOutput(c *check.C, containerID string) (out string, err error) { + c.Logf("running `docker start -a %s`", containerID) + + args := []string{"start", "-a", containerID} + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) + } + + return +} + +func getTestDir(c *check.C, label string) (tmpDir string) { + var err error + + tmpDir, err = ioutil.TempDir("", label) + // unable to make temporary directory + c.Assert(err, checker.IsNil) + + return +} + +func isCpNotExist(err error) bool { + return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") +} + +func isCpDirNotExist(err error) bool { + return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) +} + +func isCpNotDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") +} + +func isCpCannotCopyDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) +} + +func isCpCannotCopyReadOnly(err error) bool { + return strings.Contains(err.Error(), "marked read-only") +} + +func isCannotOverwriteNonDirWithDir(err error) bool { + return strings.Contains(err.Error(), "cannot overwrite non-directory") +} + +func fileContentEquals(c *check.C, filename, contents string) (err error) { + c.Logf("checking that file %q contains %q\n", filename, contents) + + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return + } + + expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) + if err != nil { + return + } + + if !bytes.Equal(fileBytes, expectedBytes) { + err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) + } + + return +} + +func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { + c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) + + actualTarget, err := os.Readlink(symlink) + if err != nil { + return + } + + if actualTarget != expectedTarget { + err = fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) + } + + return +} + +func containerStartOutputEquals(c *check.C, containerID, contents string) (err error) { + c.Logf("checking that container %q start output contains %q\n", containerID, contents) + + out, err := startContainerGetOutput(c, containerID) + if err != nil { + return + } + + if out != contents { + err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) + } + + return +} + +func defaultVolumes(tmpDir string) []string { + if SameHostDaemon.Condition() { + return []string{ + "/vol1", + fmt.Sprintf("%s:/vol2", tmpDir), + fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), + fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), + } + } + + // Can't bind-mount volumes with separate host daemon. + return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go new file mode 100644 index 0000000..515a340 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go @@ -0,0 +1,513 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "time" + + "os/exec" + + "io/ioutil" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/nat" + "github.com/go-check/check" +) + +// Make sure we can create a simple container with some args +func (s *DockerSuite) TestCreateArgs(c *check.C) { + // Intentionally clear entrypoint, as the Windows busybox image needs an entrypoint, which breaks this test + out, _ := dockerCmd(c, "create", "--entrypoint=", "busybox", "command", "arg1", "arg2", "arg with space", "-c", "flags") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(string(cont.Path), checker.Equals, "command", check.Commentf("Unexpected container path. Expected command, received: %s", cont.Path)) + + b := false + expected := []string{"arg1", "arg2", "arg with space", "-c", "flags"} + for i, arg := range expected { + if arg != cont.Args[i] { + b = true + break + } + } + if len(cont.Args) != len(expected) || b { + c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) + } + +} + +// Make sure we can grow the container's rootfs at creation time. +func (s *DockerSuite) TestCreateGrowRootfs(c *check.C) { + // Windows and Devicemapper support growing the rootfs + if daemonPlatform != "windows" { + testRequires(c, Devicemapper) + } + out, _ := dockerCmd(c, "create", "--storage-opt", "size=120G", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, cleanedContainerID, "HostConfig.StorageOpt") + c.Assert(inspectOut, checker.Equals, "map[size:120G]") +} + +// Make sure we cannot shrink the container's rootfs at creation time. +func (s *DockerSuite) TestCreateShrinkRootfs(c *check.C) { + testRequires(c, Devicemapper) + + // Ensure this fails because of the defaultBaseFsSize is 10G + out, _, err := dockerCmdWithError("create", "--storage-opt", "size=5G", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Container size cannot be smaller than") +} + +// Make sure we can set hostconfig options too +func (s *DockerSuite) TestCreateHostConfig(c *check.C) { + out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PublishAllPorts, check.NotNil, check.Commentf("Expected PublishAllPorts, got false")) +} + +func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 4, check.Commentf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1, check.Commentf("Expected 1 ports binding, for the port %s but found %s", k, v)) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + + } + +} + +func (s *DockerSuite) TestCreateWithLargePortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 65535) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + } + +} + +// "test123" should be printed by docker create + start +func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) + c.Assert(out, checker.Equals, "test123\n", check.Commentf("container should've printed 'test123', got %q", out)) + +} + +func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + name := "test_create_volume" + dockerCmd(c, "create", "--name", name, "-v", prefix+slash+"foo", "busybox") + + dir, err := inspectMountSourceField(name, prefix+slash+"foo") + c.Assert(err, check.IsNil, check.Commentf("Error getting volume host path: %q", err)) + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + c.Fatalf("Volume was not created") + } + if err != nil { + c.Fatalf("Error statting volume host path: %q", err) + } + +} + +func (s *DockerSuite) TestCreateLabels(c *check.C) { + name := "test_create_labels" + expected := map[string]string{"k1": "v1", "k2": "v2"} + dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") + + actual := make(map[string]string) + inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { + imageName := "testcreatebuildlabel" + _, err := buildImage(imageName, + `FROM busybox + LABEL k1=v1 k2=v2`, + true) + + c.Assert(err, check.IsNil) + + name := "test_create_labels_from_image" + expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} + dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) + + actual := make(map[string]string) + inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { + image := "busybox" + // Busybox on Windows does not implement hostname command + if daemonPlatform == "windows" { + image = WindowsBaseImage + } + out, _ := dockerCmd(c, "run", "-h", "web.0", image, "hostname") + c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) + +} + +func (s *DockerSuite) TestCreateRM(c *check.C) { + // Test to make sure we can 'rm' a new container that is in + // "Created" state, and has ever been run. Test "rm -f" too. + + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + dockerCmd(c, "rm", cID) + + // Now do it again so we can "rm -f" this time + out, _ = dockerCmd(c, "create", "busybox") + + cID = strings.TrimSpace(out) + dockerCmd(c, "rm", "-f", cID) +} + +func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { + // Uses Linux specific functionality (--ipc) + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "create", "busybox") + id := strings.TrimSpace(out) + + dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") +} + +func (s *DockerSuite) TestCreateByImageID(c *check.C) { + imageName := "testcreatebyimageid" + imageID, err := buildImage(imageName, + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + + dockerCmd(c, "create", imageID) + dockerCmd(c, "create", truncatedImageID) + dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) + + // Ensure this fails + out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Error parsing reference"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } + + out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Unable to find image"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } +} + +func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-create") + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Try untrusted create to ensure we pushed the tag to the registry + createCmd = exec.Command(dockerBinary, "create", "--disable-content-trust=true", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create with --disable-content-trust:\n%s", out)) + +} + +func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) + withTagName := fmt.Sprintf("%s:latest", repoName) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", withTagName) + dockerCmd(c, "push", withTagName) + dockerCmd(c, "rmi", withTagName) + + // Try trusted create on untrusted tag + createCmd := exec.Command(dockerBinary, "create", withTagName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, fmt.Sprintf("does not have trust data for %s", repoName), check.Commentf("Missing expected output on trusted create:\n%s", out)) + +} + +func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-create") + + // Try create + createCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-create-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) + + }) +} + +func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") + c.Assert(err, check.IsNil) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + c.Assert(err, check.IsNil) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. + createCmd = exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted create:\n%s", out) + } + +} + +func (s *DockerSuite) TestCreateStopSignal(c *check.C) { + name := "test_create_stop_signal" + dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") + + res := inspectFieldJSON(c, name, "Config.StopSignal") + c.Assert(res, checker.Contains, "9") + +} + +func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { + name := "foo" + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + dir := prefix + slash + "home" + slash + "foo" + slash + "bar" + + dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") + // Windows does not create the workdir until the container is started + if daemonPlatform == "windows" { + dockerCmd(c, "start", name) + } + dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), prefix+slash+"tmp") +} + +func (s *DockerSuite) TestCreateWithInvalidLogOpts(c *check.C) { + name := "test-invalidate-log-opts" + out, _, err := dockerCmdWithError("create", "--name", name, "--log-opt", "invalid=true", "busybox") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown log opt") + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Not(checker.Contains), name) +} + +// #20972 +func (s *DockerSuite) TestCreate64ByteHexID(c *check.C) { + out := inspectField(c, "busybox", "Id") + imageID := strings.TrimPrefix(strings.TrimSpace(string(out)), "sha256:") + + dockerCmd(c, "create", imageID) +} + +// Test case for #23498 +func (s *DockerSuite) TestCreateUnsetEntrypoint(c *check.C) { + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "create", "--entrypoint=", name, "echo", "foo") + id := strings.TrimSpace(out) + c.Assert(id, check.Not(check.Equals), "") + out, _ = dockerCmd(c, "start", "-a", id) + c.Assert(strings.TrimSpace(out), check.Equals, "foo") +} + +// #22471 +func (s *DockerSuite) TestCreateStopTimeout(c *check.C) { + name1 := "test_create_stop_timeout_1" + dockerCmd(c, "create", "--name", name1, "--stop-timeout", "15", "busybox") + + res := inspectFieldJSON(c, name1, "Config.StopTimeout") + c.Assert(res, checker.Contains, "15") + + name2 := "test_create_stop_timeout_2" + dockerCmd(c, "create", "--name", name2, "busybox") + + res = inspectFieldJSON(c, name2, "Config.StopTimeout") + c.Assert(res, checker.Contains, "null") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go new file mode 100644 index 0000000..f91edc6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go @@ -0,0 +1,317 @@ +// +build linux + +package main + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/mount" + "github.com/go-check/check" +) + +// TestDaemonRestartWithPluginEnabled tests state restore for an enabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "true") +} + +// TestDaemonRestartWithPluginDisabled tests state restore for a disabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName, "--disable"); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") +} + +// TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start("--live-restore"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 0 { + c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) + } +} + +// TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start("--live-restore"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 0 { + c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) + } +} + +// TestDaemonShutdownWithPlugins shuts down running plugins. +func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network, SameHostDaemon) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + for { + if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH { + break + } + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 1 { + c.Fatalf("Expected exit code '1', got %d err: %v output: %s ", ec, err, out) + } + + s.d.Start("--live-restore") + cmd = exec.Command("pgrep", "-f", pluginProcessName) + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +// TestVolumePlugin tests volume creation using a plugin. +func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { + testRequires(c, IsAmd64, Network) + + volName := "plugin-volume" + destDir := "/tmp/data/" + destFile := "foo" + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + out, err := s.d.Cmd("plugin", "install", pName, "--grant-all-permissions") + if err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + pluginID, err := s.d.Cmd("plugin", "inspect", "-f", "{{.Id}}", pName) + pluginID = strings.TrimSpace(pluginID) + if err != nil { + c.Fatalf("Could not retrieve plugin ID: %v %s", err, pluginID) + } + mountpointPrefix := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs") + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, false) + + }() + + out, err = s.d.Cmd("volume", "create", "-d", pName, volName) + if err != nil { + c.Fatalf("Could not create volume: %v %s", err, out) + } + defer func() { + if out, err := s.d.Cmd("volume", "remove", volName); err != nil { + c.Fatalf("Could not remove volume: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("volume", "ls") + if err != nil { + c.Fatalf("Could not list volume: %v %s", err, out) + } + c.Assert(out, checker.Contains, volName) + c.Assert(out, checker.Contains, pName) + + mountPoint, err := s.d.Cmd("volume", "inspect", volName, "--format", "{{.Mountpoint}}") + if err != nil { + c.Fatalf("Could not inspect volume: %v %s", err, mountPoint) + } + mountPoint = strings.TrimSpace(mountPoint) + + out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "touch", destDir+destFile) + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs", mountPoint, destFile) + _, err = os.Lstat(path) + c.Assert(err, checker.IsNil) + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, true) +} + +func (s *DockerDaemonSuite) TestGraphdriverPlugin(c *check.C) { + testRequires(c, Network, IsAmd64, DaemonIsLinux, overlay2Supported, ExperimentalDaemon) + + s.d.Start() + + // install the plugin + plugin := "cpuguy83/docker-overlay2-graphdriver-plugin" + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", plugin) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // restart the daemon with the plugin set as the storage driver + s.d.Restart("-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1") + + // run a container + out, err = s.d.Cmd("run", "--rm", "busybox", "true") // this will pull busybox using the plugin + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux, Network, IsAmd64) + + s.d.Start("--live-restore=true") + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, err = s.d.Cmd("volume", "create", "--driver", pName, "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + s.d.Restart("--live-restore=true") + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "in use") + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "rm", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) { + mounts, err := mount.GetMounts() + if err != nil { + return false, err + } + for _, mnt := range mounts { + if strings.HasPrefix(mnt.Mountpoint, mountpointPrefix) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go new file mode 100644 index 0000000..3a74fe2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go @@ -0,0 +1,2988 @@ +// +build linux + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libtrust" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon +// command. Remove this test when we remove this. +func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) { + cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug") + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'")) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top1: err=%v\n%s", err, out) + } + // --restart=no by default + if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top2: err=%v\n%s", err, out) + } + + testRun := func(m map[string]bool, prefix string) { + var format string + for cont, shouldRun := range m { + out, err := s.d.Cmd("ps") + if err != nil { + c.Fatalf("Could not run ps: err=%v\n%q", err, out) + } + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, cont) { + c.Fatalf(format, prefix, cont) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + c.Fatal(err, out) + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") + c.Assert(err, check.IsNil) + + if _, err := inspectMountPointJSON(out, "/foo"); err != nil { + c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) + } +} + +// #11008 +func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) + + testRun := func(m map[string]bool, prefix string) { + var format string + for name, shouldRun := range m { + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) + } + } + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "") + + out, err = s.d.Cmd("stop", "top1") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("stop", "top2") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // both stopped + testRun(map[string]bool{"top1": false, "top2": false}, "") + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // restart=always running + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") + + out, err = s.d.Cmd("start", "top2") + c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") + +} + +func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + // wait test1 to stop + hostArgs := []string{"--host", s.d.sock()} + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // record last start time + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + lastStartTime := out + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // test1 shouldn't restart at all + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 0, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // make sure test1 isn't restarted when daemon restart + // if "StartAt" time updates, means test1 was once restarted. + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Equals, lastStartTime, check.Commentf("test1 shouldn't start after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { + if err := s.d.Start("--iptables=false"); err != nil { + c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) + } +} + +// Make sure we cannot shrink base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { + testRequires(c, Devicemapper) + c.Assert(s.d.Start(), check.IsNil) + + oldBasesizeBytes := s.d.getBaseDeviceSize(c) + var newBasesizeBytes int64 = 1073741824 //1GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) + } + c.Assert(s.d.Stop(), check.IsNil) +} + +// Make sure we can grow base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { + testRequires(c, Devicemapper) + c.Assert(s.d.Start(), check.IsNil) + + oldBasesizeBytes := s.d.getBaseDeviceSize(c) + + var newBasesizeBytes int64 = 53687091200 //50GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes)))) + } + + err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) + + basesizeAfterRestart := s.d.getBaseDeviceSize(c) + newBasesize, err := convertBasesize(newBasesizeBytes) + c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) + c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) + c.Assert(s.d.Stop(), check.IsNil) +} + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will remove the ip from docker0 and then try starting the daemon + ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + // make sure the container is not running + runningOut, err := s.d.Cmd("inspect", "--format={{.State.Running}}", "top") + if err != nil { + c.Fatalf("Could not inspect on container: %s, %v", out, err) + } + if strings.TrimSpace(runningOut) != "true" { + c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) + } +} + +// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge +// has the fe80::1 address and that a container is assigned a link-local address +func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *check.C) { + testRequires(c, IPv6) + + setupV6(c) + defer teardownV6(c) + + if err := s.d.StartWithBusybox("--ipv6"); err != nil { + c.Fatal(err) + } + + iface, err := net.InterfaceByName("docker0") + if err != nil { + c.Fatalf("Error getting docker0 interface: %v", err) + } + + addrs, err := iface.Addrs() + if err != nil { + c.Fatalf("Error getting addresses for docker0 interface: %v", err) + } + + var found bool + expected := "fe80::1/64" + + for i := range addrs { + if addrs[i].String() == expected { + found = true + break + } + } + + if !found { + c.Fatalf("Bridge does not have an IPv6 Address") + } + + if out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + out, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.LinkLocalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a link-local IPv6 address") + } + + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip != nil { + c.Fatalf("Container should not have a global IPv6 address: %v", out) + } +} + +// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR +// that running containers are given a link-local and global IPv6 address +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + setupV6(c) + defer teardownV6(c) + + err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100") + c.Assert(err, checker.IsNil, check.Commentf("Could not start daemon with busybox: %v", err)) + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + out = strings.Trim(out, " \r\n'") + + c.Assert(err, checker.IsNil, check.Commentf(out)) + + ip := net.ParseIP(out) + c.Assert(ip, checker.NotNil, check.Commentf("Container should have a global IPv6 address")) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.IPv6Gateway}}", "ipv6test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:2::100", check.Commentf("Container should have a global IPv6 gateway")) +} + +// TestDaemonIPv6FixedCIDRAndMac checks that when the daemon is started with ipv6 fixed CIDR +// the running containers are given an IPv6 address derived from the MAC address and the ipv6 fixed CIDR +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + setupV6(c) + defer teardownV6(c) + + err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:1::/64") + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + c.Assert(err, checker.IsNil) + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { + c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { + if err := s.d.Start("--log-level=debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { + // we creating new daemons to create new logFile + if err := s.d.Start("--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { + if err := s.d.Start("-D"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { + if err := s.d.Start("--debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { + if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { + listeningPorts := [][]string{ + {"0.0.0.0", "0.0.0.0", "5678"}, + {"127.0.0.1", "127.0.0.1", "1234"}, + {"localhost", "127.0.0.1", "1235"}, + } + + cmdArgs := make([]string, 0, len(listeningPorts)*2) + for _, hostDirective := range listeningPorts { + cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) + } + + if err := s.d.StartWithBusybox(cmdArgs...); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + for _, hostDirective := range listeningPorts { + output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") + if err == nil { + c.Fatalf("Container should not start, expected port already allocated error: %q", output) + } else if !strings.Contains(output, "port is already allocated") { + c.Fatalf("Expected port is already allocated error: %q", output) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + kid := k.KeyID() + // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) + if len(kid) != 59 { + c.Fatalf("Bad key ID: %s", kid) + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + k1, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + c.Fatalf("Error generating private key: %s", err) + } + if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { + c.Fatalf("Error creating .docker directory: %s", err) + } + if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { + c.Fatalf("Error saving private key: %s", err) + } + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + if k1.KeyID() != k2.KeyID() { + c.Fatalf("Key not migrated") + } +} + +// GH#11320 - verify that the daemon exits on failure properly +// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means +// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required +func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { + //attempt to start daemon with incorrect flags (we know -b and --bip conflict) + if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + //verify we got the right error + if !strings.Contains(err.Error(), "Daemon exited") { + c.Fatalf("Expected daemon not to start, got %v", err) + } + // look in the log and make sure we got the message that daemon is shutting down + runCmd := exec.Command("grep", "Error starting daemon", s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) + } + } else { + //if we didn't get an error and the daemon is running, this is a failure + c.Fatal("Conflicting options should cause the daemon to error out with a failure") + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { + d := s.d + err := d.Start("--bridge", "nosuchbridge") + c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) + defer d.Restart() + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bridge", bridgeName) + c.Assert(err, check.IsNil) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("ExtContainer") + ip := net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *check.C) { + // start with bridge none + d := s.d + err := d.StartWithBusybox("--bridge", "none") + c.Assert(err, check.IsNil) + defer d.Restart() + + // verify docker0 iface is not there + out, _, err := runCommandWithOutput(exec.Command("ifconfig", "docker0")) + c.Assert(err, check.NotNil, check.Commentf("docker0 should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "Device not found"), check.Equals, true) + + // verify default "bridge" network is not there + out, err = d.Cmd("network", "inspect", "bridge") + c.Assert(err, check.NotNil, check.Commentf("\"bridge\" network should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "No such network"), check.Equals, true) +} + +func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { + args := []string{"link", "add", "name", ifName, "type", ifType} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + + ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") + out, _, err = runCommandWithOutput(ifCfgCmd) + return out, err +} + +func deleteInterface(c *check.C, ifName string) { + ifCmd := exec.Command("ip", "link", "delete", ifName) + out, _, err := runCommandWithOutput(ifCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd := exec.Command("iptables", "-t", "nat", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd = exec.Command("iptables", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { + // TestDaemonBridgeIP Steps + // 1. Delete the existing docker0 Bridge + // 2. Set --bip daemon configuration and start the new Docker Daemon + // 3. Check if the bip config has taken effect using ifconfig and iptables commands + // 4. Launch a Container and make sure the IP-Address is in the expected subnet + // 5. Delete the docker0 Bridge + // 6. Restart the Docker Daemon (via deferred action) + // This Restart takes care of bringing docker0 interface back to auto-assigned IP + + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1/24" + ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + ifconfigSearchString := ip.String() + ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) + out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, + check.Commentf("ifconfig output should have contained %q, but was %q", + ifconfigSearchString, out)) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("test") + ip = net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + defer s.d.Restart() + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will change the docker0's IP and then try starting the daemon + bridgeIP := "192.169.100.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start("--bip", bridgeIP); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + //check if the iptables contains new bridgeIP MASQUERADE rule + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + for i := 0; i < 4; i++ { + cName := "Container" + strconv.Itoa(i) + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + if err != nil { + c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, + check.Commentf("Could not run a Container : %s %s", err.Error(), out)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "10.2.2.1/16" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err = d.Cmd("run", "-d", "--name", "bb", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + defer d.Cmd("stop", "bb") + + out, err = d.Cmd("exec", "bb", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(out, checker.Equals, "10.2.2.0\n") + + out, err = d.Cmd("run", "--rm", "busybox", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Equals, "10.2.2.2\n") +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "172.27.42.1/16" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bridge", bridgeName, "--fixed-cidr", bridgeIP) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err = d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + cid1 := strings.TrimSpace(out) + defer d.Cmd("stop", cid1) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIPNet) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", + bridgeIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + gatewayIP := "192.169.1.254" + + err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Explicit default gateway should be %s, but default route was '%s'", + gatewayIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + // Program a custom default gateway outside of the container subnet, daemon should accept it and start + err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") + c.Assert(err, check.IsNil) + + deleteInterface(c, defaultNetworkBridge) + s.d.Restart() +} + +func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + // Start daemon without docker0 bridge + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + discoveryBackend := "consul://consuladdr:consulport/some/path" + err := s.d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + c.Assert(err, checker.IsNil) + + // Start daemon with docker0 bridge + result := icmd.RunCommand("ifconfig", defaultNetworkBridge) + c.Assert(result, icmd.Matches, icmd.Success) + + err = s.d.Restart(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + c.Assert(err, checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { + d := s.d + + ipStr := "192.170.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + args := []string{"--ip", ip.String()} + err := d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.NotNil, + check.Commentf("Running a container must fail with an invalid --ip option")) + c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) + + ifName := "dummy" + out, err = createInterface(c, "dummy", ifName, ipStr) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, ifName) + + _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.IsNil) + + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) +} + +func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { + testRequires(c, bridgeNfIptables) + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + // Pinging another container must fail with --icc=false + pingContainers(c, d, true) + + ipStr := "192.171.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + ifName := "icc-dummy" + + createInterface(c, "dummy", ifName, ipStr) + + // But, Pinging external or a Host interface must succeed + pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) + runArgs := []string{"run", "--rm", "busybox", "sh", "-c", pingCmd} + _, err = d.Cmd(runArgs...) + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") + c.Assert(err, check.IsNil) + defer s.d.Restart() + + _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") + c.Assert(err, check.IsNil) + + childIP := s.d.findContainerIP("child") + parentIP := s.d.findContainerIP("parent") + + sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules not found") + } + + s.d.Cmd("rm", "--link", "parent/http") + if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules should be removed when unlink") + } + + s.d.Cmd("kill", "child") + s.d.Cmd("kill", "parent") +} + +func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + + if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") + if err != nil { + c.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile := strings.TrimSpace(outArr[0]) + nproc := strings.TrimSpace(outArr[1]) + + if nofile != "42" { + c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } + + // Now restart daemon with a new default + if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { + c.Fatal(err) + } + + out, err = s.d.Cmd("start", "-a", "test") + if err != nil { + c.Fatal(err) + } + + outArr = strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile = strings.TrimSpace(outArr[0]) + nproc = strings.TrimSpace(outArr[1]) + + if nofile != "43" { + c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } +} + +// #11315 +func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("start", "test2"); err != nil { + c.Fatal(err, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, check.IsNil, check.Commentf(out)) + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { + c.Assert(s.d.StartWithBusybox("--log-driver=none"), checker.IsNil) + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("logs", "test") + c.Assert(err, check.NotNil, check.Commentf("Logs should fail with 'none' driver")) + expected := `"logs" command is supported only for "json-file" and "journald" logging drivers (got: none)` + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { + dir, err := ioutil.TempDir("", "socket-cleanup-test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + sockPath := filepath.Join(dir, "docker.sock") + if err := s.d.Start("--host", "unix://"+sockPath); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err != nil { + c.Fatal("socket does not exist") + } + + if err := s.d.Stop(); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { + c.Fatal("unix socket is not cleaned up") + } +} + +func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { + type Config struct { + Crv string `json:"crv"` + D string `json:"d"` + Kid string `json:"kid"` + Kty string `json:"kty"` + X string `json:"x"` + Y string `json:"y"` + } + + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Failed to start daemon: %v", err) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + config := &Config{} + bytes, err := ioutil.ReadFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error reading key.json file: %s", err) + } + + // byte[] to Data-Struct + if err := json.Unmarshal(bytes, &config); err != nil { + c.Fatalf("Error Unmarshal: %s", err) + } + + //replace config.Kid with the fake value + config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" + + // NEW Data-Struct to byte[] + newBytes, err := json.Marshal(&config) + if err != nil { + c.Fatalf("Error Marshal: %s", err) + } + + // write back + if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { + c.Fatalf("Error ioutil.WriteFile: %s", err) + } + + defer os.Remove("/etc/docker/key.json") + + if err := s.d.Start(); err == nil { + c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) + } + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + + if !strings.Contains(string(content), "Public Key ID does not match") { + c.Fatalf("Missing KeyID message from daemon logs: %s", string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") + if err != nil { + c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) + } + containerID := strings.TrimSpace(out) + + if out, err := s.d.Cmd("kill", containerID); err != nil { + c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("wait", containerID); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on a stopped (killed) container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +// TestHTTPSInfo connects via two-way authenticated HTTPS to the info endpoint +func (s *DockerDaemonSuite) TestHTTPSInfo(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } +} + +// TestHTTPSRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints. +// https://github.com/docker/docker/issues/19280 +func (s *DockerDaemonSuite) TestHTTPSRun(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.StartWithBusybox("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "run", "busybox", "echo", "TLS response", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } + + if !strings.Contains(out, "TLS response") { + c.Fatalf("expected output to include `TLS response`, got %v", out) + } +} + +// TestTLSVerify verifies that --tlsverify=false turns on tls +func (s *DockerDaemonSuite) TestTLSVerify(c *check.C) { + out, err := exec.Command(dockerdBinary, "--tlsverify=false").CombinedOutput() + if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { + c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) + } +} + +// TestHTTPSInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func (s *DockerDaemonSuite) TestHTTPSInfoRogueCert(c *check.C) { + const ( + errBadCertificate = "bad certificate" + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errBadCertificate) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) + } +} + +// TestHTTPSInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { + const ( + errCaUnknown = "x509: certificate signed by unknown authority" + testDaemonRogueHTTPSAddr = "tcp://localhost:4272" + ) + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonRogueHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errCaUnknown) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) + } +} + +func pingContainers(c *check.C, d *Daemon, expectFailure bool) { + var dargs []string + if d != nil { + dargs = []string{"--host", d.sock()} + } + + args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, args...) + + args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") + pingCmd := "ping -c 1 %s -W 1" + args = append(args, fmt.Sprintf(pingCmd, "alias1")) + _, _, err := dockerCmdWithError(args...) + + if expectFailure { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + args = append(dargs, "rm", "-f", "container1") + dockerCmd(c, args...) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + socket := filepath.Join(s.d.folder, "docker.sock") + + out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(s.d.Restart(), check.IsNil) +} + +// os.Kill should kill daemon ungracefully, leaving behind container mounts. +// A subsequent daemon restart shoud clean up said mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // kill the container + runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Failed to run ctr, ExitCode: %d, err: %v output: %s id: %s\n", ec, err, out, id) + } + + // restart daemon. + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + // Send SIGINT and daemon should clean up + c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) + // Wait for the daemon to stop. + c.Assert(<-s.d.wait, checker.IsNil) + + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) + + out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) + + out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) + // the extra grep and awk clean up the output of `ip` to only list the number and name of + // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to + // be used while still verifying that the interface list is the exact same + cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err != nil { + c.Fatal("Failed to get host network interface") + } + out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), + check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil { + t.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + t.Fatal(err) + } + // Container 'test' should be removed without error + if out, err := s.d.Cmd("rm", "test"); err != nil { + t.Fatal(out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") + if err != nil { + c.Fatal(out, err) + } + + // Get sandbox key via inspect + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + fileName := strings.Trim(out, " \r\n'") + + if out, err := s.d.Cmd("stop", "netns"); err != nil { + c.Fatal(out, err) + } + + // Test if the file still exists + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) + out = strings.TrimSpace(out) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out)) + + // Remove the container and restart the daemon + if out, err := s.d.Cmd("rm", "netns"); err != nil { + c.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // Test again and see now the netns file does not exist + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) + out = strings.TrimSpace(out) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) +} + +// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored +func (s *DockerDaemonSuite) TestDaemonTLSVerifyIssue13964(c *check.C) { + host := "tcp://localhost:4271" + c.Assert(s.d.Start("-H", host), check.IsNil) + cmd := exec.Command(dockerBinary, "-H", host, "info") + cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) + c.Assert(strings.Contains(out, "error during connect"), check.Equals, true) + +} + +func setupV6(c *check.C) { + // Hack to get the right IPv6 address on docker0, which has already been created + result := icmd.RunCommand("ip", "addr", "add", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Expected{}) +} + +func teardownV6(c *check.C) { + result := icmd.RunCommand("ip", "addr", "del", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil) + id := strings.TrimSpace(out) + + _, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("wait", id) + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(out, check.Equals, "") + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) +} + +func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { + if err := s.d.StartWithBusybox("--log-opt=max-size=1k"); err != nil { + c.Fatal(err) + } + name := "logtest" + out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "max-size:1k") + c.Assert(out, checker.Contains, "max-file:5") + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Type }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "json-file") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + if out, err := s.d.Cmd("pause", "test"); err != nil { + c.Fatal(err, out) + } + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + errchan := make(chan error) + go func() { + out, err := s.d.Cmd("start", "test") + if err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + name := strings.TrimSpace(out) + if name != "test" { + errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on start a container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) + c.Assert(out, checker.Contains, "in use") +} + +func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { + c.Assert(s.d.Start(), check.IsNil) + + _, err := s.d.Cmd("volume", "create", "test") + c.Assert(err, check.IsNil) + c.Assert(s.d.Restart(), check.IsNil) + + _, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { + c.Assert(s.d.Start("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) + expected := "Failed to set log opts: syslog-address should be in form proto://address" + runCmd := exec.Command("grep", expected, s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { + c.Assert(s.d.Start("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) + expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " + runCmd := exec.Command("grep", expected, s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { + s.d.useDefaultHost = true + defer func() { + s.d.useDefaultHost = false + }() + c.Assert(s.d.Start(), check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { + s.d.useDefaultTLSHost = true + defer func() { + s.d.useDefaultTLSHost = false + }() + if err := s.d.Start( + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + // The client with --tlsverify should also use default host localhost:2376 + tmpHost := os.Getenv("DOCKER_HOST") + defer func() { + os.Setenv("DOCKER_HOST", tmpHost) + }() + + os.Setenv("DOCKER_HOST", "") + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } +} + +func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + bridgeIP := "192.169.1.1" + bridgeRange := bridgeIP + "/30" + + err := s.d.StartWithBusybox("--bip", bridgeRange) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + var cont int + for { + contName := fmt.Sprintf("container%d", cont) + _, err = s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") + if err != nil { + // pool exhausted + break + } + ip, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.IPAddress}}'", contName) + c.Assert(err, check.IsNil) + + c.Assert(ip, check.Not(check.Equals), bridgeIP) + cont++ + } +} + +// Test daemon for no space left on device error +func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, Network) + + testDir, err := ioutil.TempDir("", "no-space-left-on-device-test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + c.Assert(mount.MakeRShared(testDir), checker.IsNil) + defer mount.Unmount(testDir) + + // create a 2MiB image and mount it as graph root + // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) + dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=2 count=0") + out, _, err := runCommandWithOutput(exec.Command("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img"))) // `mkfs.ext4` is not in busybox + c.Assert(err, checker.IsNil, check.Commentf(out)) + + cmd := exec.Command("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) + loout, err := cmd.CombinedOutput() + c.Assert(err, checker.IsNil) + loopname := strings.TrimSpace(string(loout)) + defer exec.Command("losetup", "-d", loopname).Run() + + dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", fmt.Sprintf("mkdir -p /test/test-mount && mount -t ext4 -no loop,rw %v /test/test-mount", loopname)) + defer mount.Unmount(filepath.Join(testDir, "test-mount")) + + err = s.d.Start("--graph", filepath.Join(testDir, "test-mount")) + defer s.d.Stop() + c.Assert(err, check.IsNil) + + // pull a repository large enough to fill the mount point + pullOut, err := s.d.Cmd("pull", "registry:2") + c.Assert(err, checker.NotNil, check.Commentf(pullOut)) + c.Assert(pullOut, checker.Contains, "no space left on device") +} + +// Test daemon restart with container links + auto restart +func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + parent1Args := []string{} + parent2Args := []string{} + wg := sync.WaitGroup{} + maxChildren := 10 + chErr := make(chan error, maxChildren) + + for i := 0; i < maxChildren; i++ { + wg.Add(1) + name := fmt.Sprintf("test%d", i) + + if i < maxChildren/2 { + parent1Args = append(parent1Args, []string{"--link", name}...) + } else { + parent2Args = append(parent2Args, []string{"--link", name}...) + } + + go func() { + _, err = s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") + chErr <- err + wg.Done() + }() + } + + wg.Wait() + close(chErr) + for err := range chErr { + c.Assert(err, check.IsNil) + } + + parent1Args = append([]string{"run", "-d"}, parent1Args...) + parent1Args = append(parent1Args, []string{"--name=parent1", "--restart=always", "busybox", "top"}...) + parent2Args = append([]string{"run", "-d"}, parent2Args...) + parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) + + _, err = s.d.Cmd(parent1Args...) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd(parent2Args...) + c.Assert(err, check.IsNil) + + err = s.d.Stop() + c.Assert(err, check.IsNil) + // clear the log file -- we don't need any of it but may for the next part + // can ignore the error here, this is just a cleanup + os.Truncate(s.d.LogFileName(), 0) + err = s.d.Start() + c.Assert(err, check.IsNil) + + for _, num := range []string{"1", "2"} { + out, err := s.d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) + c.Assert(err, check.IsNil) + if strings.TrimSpace(out) != "true" { + log, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Fatalf("parent container is not running\n%s", string(log)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + err := s.d.StartWithBusybox("--cgroup-parent", cgroupParent) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") + c.Assert(err, checker.IsNil) + cgroupPaths := parseCgroupPaths(string(out)) + c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) + out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(string(out)) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + c.Assert(s.d.Restart(), check.IsNil) + + // should fail since test is not running yet + out, err = s.d.Cmd("start", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + + out, err = s.d.Cmd("start", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + test2ID := strings.TrimSpace(out) + + out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") + test3ID := strings.TrimSpace(out) + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) + // this one is no longer needed, removing simplifies the remainder of the test + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("ps", "-a", "--no-trunc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + lines := strings.Split(strings.TrimSpace(out), "\n")[1:] + + test2validated := false + test3validated := false + for _, line := range lines { + fields := strings.Fields(line) + names := fields[len(fields)-1] + switch fields[0] { + case test2ID: + c.Assert(names, check.Equals, "test2,test3/abc") + test2validated = true + case test3ID: + c.Assert(names, check.Equals, "test3") + test3validated = true + } + } + + c.Assert(test2validated, check.Equals, true) + c.Assert(test3validated, check.Equals, true) +} + +// TestDaemonRestartWithKilledRunningContainer requires live restore of running containers +func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop() + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + pid = strings.TrimSpace(pid) + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // kill the container + runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + t.Fatalf("Failed to run ctr, ExitCode: %d, err: '%v' output: '%s' cid: '%s'\n", ec, err, out, cid) + } + + // Give time to containerd to process the command if we don't + // the exit event might be received after we do the inspect + pidCmd := exec.Command("kill", "-0", pid) + _, ec, _ := runCommandWithOutput(pidCmd) + for ec == 0 { + time.Sleep(1 * time.Second) + _, ec, _ = runCommandWithOutput(pidCmd) + } + + // restart the daemon + if err := s.d.Start(); err != nil { + t.Fatal(err) + } + + // Check that we've got the correct exit code + out, err := s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "143" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "143", out, cid) + } + +} + +// os.Kill should kill daemon ungracefully, leaving behind live containers. +// The live containers should be known to the restarted daemon. Stopping +// them now, should remove the mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { + testRequires(c, DaemonIsLinux) + c.Assert(s.d.StartWithBusybox("--live-restore"), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // restart daemon. + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatal(err) + } + + // container should be running. + out, err = s.d.Cmd("inspect", "--format={{.State.Running}}", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + out = strings.TrimSpace(out) + if out != "true" { + c.Fatalf("Container %s expected to stay alive after daemon restart", id) + } + + // 'docker stop' should work. + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers. +func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox("--live-restore"); err != nil { + t.Fatal(err) + } + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop() + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + + // pause the container + if _, err := s.d.Cmd("pause", cid); err != nil { + t.Fatal(cid, err) + } + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // resume the container + result := icmd.RunCommand( + ctrBinary, + "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", + "containers", "resume", cid) + t.Assert(result, icmd.Matches, icmd.Success) + + // Give time to containerd to process the command if we don't + // the resume event might be received after we do the inspect + waitAndAssert(t, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCommand("kill", "-0", strings.TrimSpace(pid)) + return result.ExitCode, nil + }, checker.Equals, 0) + + // restart the daemon + if err := s.d.Start("--live-restore"); err != nil { + t.Fatal(err) + } + + // Check that we've got the correct status + out, err := s.d.Cmd("inspect", "-f", "{{.State.Status}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "running" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "running", out, cid) + } + if _, err := s.d.Cmd("kill", cid); err != nil { + t.Fatal(err) + } +} + +// TestRunLinksChanged checks that creating a new container with the same name does not update links +// this ensures that the old, pre gh#16032 functionality continues on +func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received") + + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") + + err = s.d.Restart() + c.Assert(err, check.IsNil) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + infoLog := "\x1b[34mINFO\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + // Enable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=false") + s.d.Stop() + c.Assert(b.String(), checker.Contains, infoLog) + + b.Reset() + + // Disable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=true") + s.d.Stop() + c.Assert(b.String(), check.Not(checker.Contains), infoLog) +} + +func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + debugLog := "\x1b[37mDEBU\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + s.d.StartWithLogFile(tty, "--debug") + s.d.Stop() + c.Assert(b.String(), checker.Contains, debugLog) +} + +func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + daemonConfig := `{ "debug" : false }` + configFile, err := ioutil.TempFile("", "test-daemon-discovery-backend-config-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + configFilePath := configFile.Name() + defer func() { + configFile.Close() + os.RemoveAll(configFile.Name()) + }() + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + // --log-level needs to be set so that d.Start() doesn't add --debug causing + // a conflict with the config + err = s.d.Start("--config-file", configFilePath, "--log-level=info") + c.Assert(err, checker.IsNil) + + // daemon config file + daemonConfig = `{ + "cluster-store": "consul://consuladdr:consulport/some/path", + "cluster-advertise": "192.168.56.100:0", + "debug" : false + }` + + err = configFile.Truncate(0) + c.Assert(err, checker.IsNil) + _, err = configFile.Seek(0, os.SEEK_SET) + c.Assert(err, checker.IsNil) + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + err = s.d.reloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: consul://consuladdr:consulport/some/path")) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: 192.168.56.100:0")) +} + +// Test for #21956 +func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { + err := s.d.StartWithBusybox("--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("inspect", "--format='{{.HostConfig.LogConfig}}'", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "{json-file map[]}") +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { + c.Assert(s.d.Start("--max-concurrent-uploads=6", "--max-concurrent-downloads=8"), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-downloads" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 7, "max-concurrent-downloads" : 9 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-uploads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 1, "max-concurrent-downloads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "labels":["foo=bar"] }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { + err := s.d.StartWithBusybox("-b=none", "--iptables=false") + c.Assert(err, check.IsNil) + s.d.c.Logf("dockerBinary %s", dockerBinary) + out, code, err := s.d.buildImageWithOut("busyboxs", + `FROM busybox + RUN cat /etc/hosts`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns", "1.2.3.4") + c.Assert(err, checker.IsNil) + + expectedOutput := "nameserver 1.2.3.4" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSSearchInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns-search", "example.com") + c.Assert(err, checker.IsNil) + + expectedOutput := "search example.com" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSOptionsInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns-opt", "timeout:3") + c.Assert(err, checker.IsNil) + + expectedOutput := "options timeout:3" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { + conf, err := ioutil.TempFile("", "config-file-") + c.Assert(err, check.IsNil) + configName := conf.Name() + conf.Close() + defer os.Remove(configName) + + config := ` +{ + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + err = s.d.StartWithBusybox("--config-file", configName) + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Reset config to only have the default + config = ` +{ + "runtimes": { + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + config = ` +{ + "runtimes": { + "runc": { + "path": "my-runc" + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) + + // Check that we can select a default runtime + config = ` +{ + "default-runtime": "vm", + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { + err := s.d.StartWithBusybox("--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Start a daemon without any extra runtimes + s.d.Stop() + err = s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + s.d.Stop() + err = s.d.Start("--add-runtime", "runc=my-runc") + c.Assert(err, check.NotNil) + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) + + // Check that we can select a default runtime + s.d.Stop() + err = s.d.StartWithBusybox("--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + // top1 will exist after daemon restarts + out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top1: %v", out)) + // top2 will be removed after daemon restarts + out, err = s.d.Cmd("run", "-d", "--rm", "--name", "top2", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top2: %v", out)) + + out, err = s.d.Cmd("ps") + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should be running")) + c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running")) + + // now restart daemon gracefully + err = s.d.Restart() + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("ps", "-a") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should exist after daemon restarts")) + c.Assert(out, checker.Not(checker.Contains), "top2", check.Commentf("top2 should be removed after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + containerName := "error-values" + // Make a container with both a non 0 exit code and an error message + out, err := s.d.Cmd("run", "--name", containerName, "busybox", "toto") + c.Assert(err, checker.NotNil) + + // Check that those values were saved on disk + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + + // now restart daemon + err = s.d.Restart() + c.Assert(err, checker.IsNil) + + // Check that those values are still around + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { + testRequires(c, SameHostDaemon) + d := s.d + err := d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + // hack to be able to side-load a container config + out, err := d.Cmd("create", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(d.Stop(), checker.IsNil) + <-d.wait + + imageID := strings.TrimSpace(out) + volumeID := stringid.GenerateNonCryptoID() + vfsPath := filepath.Join(d.root, "vfs", "dir", volumeID) + c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) + + config := []byte(` + { + "ID": "` + id + `", + "Name": "hello", + "Driver": "` + d.storageDriver + `", + "Image": "` + imageID + `", + "Config": {"Image": "busybox:latest"}, + "NetworkSettings": {}, + "Volumes": { + "/bar":"/foo", + "/foo": "` + vfsPath + `", + "/quux":"/quux" + }, + "VolumesRW": { + "/bar": true, + "/foo": true, + "/quux": false + } + } + `) + + configPath := filepath.Join(d.root, "containers", id, "config.v2.json") + err = ioutil.WriteFile(configPath, config, 600) + err = d.Start() + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + type mount struct { + Name string + Source string + Destination string + Driver string + RW bool + } + + ls := []mount{} + err = json.NewDecoder(strings.NewReader(out)).Decode(&ls) + c.Assert(err, checker.IsNil) + + expected := []mount{ + {Source: "/foo", Destination: "/bar", RW: true}, + {Name: volumeID, Destination: "/foo", RW: true}, + {Source: "/quux", Destination: "/quux", RW: false}, + } + c.Assert(ls, checker.HasLen, len(expected)) + + for _, m := range ls { + var matched bool + for _, x := range expected { + if m.Source == x.Source && m.Destination == x.Destination && m.RW == x.RW || m.Name != x.Name { + matched = true + break + } + } + c.Assert(matched, checker.True, check.Commentf("did find match for %+v", m)) + } +} + +func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + dockerProxyPath, err := exec.LookPath("docker-proxy") + c.Assert(err, checker.IsNil) + tmpDir, err := ioutil.TempDir("", "test-docker-proxy") + c.Assert(err, checker.IsNil) + + newProxyPath := filepath.Join(tmpDir, "docker-proxy") + cmd := exec.Command("cp", dockerProxyPath, newProxyPath) + c.Assert(cmd.Run(), checker.IsNil) + + // custom one + c.Assert(s.d.StartWithBusybox("--userland-proxy-path", newProxyPath), checker.IsNil) + out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // try with the original one + c.Assert(s.d.Restart("--userland-proxy-path", dockerProxyPath), checker.IsNil) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // not exist + c.Assert(s.d.Restart("--userland-proxy-path", "/does/not/exist"), checker.IsNil) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "driver failed programming external connectivity on endpoint") + c.Assert(out, checker.Contains, "/does/not/exist: no such file or directory") +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { + testRequires(c, SameHostDaemon) + + c.Assert(s.d.StartWithBusybox("--shutdown-timeout=3"), check.IsNil) + + _, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGINT) + + select { + case <-s.d.wait: + case <-time.After(5 * time.Second): + } + + expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "shutdown-timeout" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "shutdown-timeout" : 5 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + select { + case <-s.d.wait: + case <-time.After(3 * time.Second): + } + + expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for 29342 +func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d.StartWithBusybox("--live-restore") + + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.waitRun("top") + + out1, err := s.d.Cmd("exec", "-u", "test", "top", "id") + // uid=100(test) gid=101(test) groups=101(test) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1)) + + // restart daemon. + s.d.Restart("--live-restore") + + out2, err := s.d.Cmd("exec", "-u", "test", "top", "id") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2)) + c.Assert(out1, check.Equals, out2, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, overlayFSSupported, SameHostDaemon) + s.d.StartWithBusybox("--live-restore", "--storage-driver", "overlay") + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.waitRun("top") + + // restart daemon. + s.d.Restart("--live-restore", "--storage-driver", "overlay") + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // test if the rootfs mountpoint still exist + mountpoint, err := s.d.inspectFilter("top", ".GraphDriver.Data.MergedDir") + c.Assert(err, check.IsNil) + f, err := os.Open("/proc/self/mountinfo") + c.Assert(err, check.IsNil) + defer f.Close() + sc := bufio.NewScanner(f) + for sc.Scan() { + line := sc.Text() + if strings.Contains(line, mountpoint) { + c.Fatalf("mountinfo should not include the mountpoint of stop container") + } + } + + out, err = s.d.Cmd("rm", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go new file mode 100644 index 0000000..08cf6e1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,98 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure that an added file shows up in docker diff +func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { + containerCmd := `mkdir /foo; echo xyzzy > /foo/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + // Wait for it to exit as cannot diff a running container on Windows, and + // it will take a few seconds to exit. Also there's no way in Windows to + // differentiate between an Add or a Modify, and all files are under + // a "Files/" prefix. + containerID := strings.TrimSpace(out) + lookingFor := "A /foo/bar" + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + lookingFor = "C Files/foo/bar" + } + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains(line, lookingFor) { + found = true + break + } + } + c.Assert(found, checker.True) +} + +// test to ensure GH #3840 doesn't occur any more +func (s *DockerSuite) TestDiffEnsureInitLayerFilesAreIgnored(c *check.C) { + testRequires(c, DaemonIsLinux) + // this is a list of files which shouldn't show up in `docker diff` + initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} + containerCount := 5 + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < containerCount; i++ { + containerCmd := `echo foo > /root/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + for _, filename := range initLayerFiles { + c.Assert(out, checker.Not(checker.Contains), filename) + } + } +} + +func (s *DockerSuite) TestDiffEnsureDefaultDevs(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/mqueue": true, + "A /dev/kmsg": true, + "A /dev/fd": true, + "A /dev/ptmx": true, + "A /dev/null": true, + "A /dev/random": true, + "A /dev/stdout": true, + "A /dev/stderr": true, + "A /dev/tty1": true, + "A /dev/stdin": true, + "A /dev/tty": true, + "A /dev/urandom": true, + "A /dev/zero": true, + } + + for _, line := range strings.Split(out, "\n") { + c.Assert(line == "" || expected[line], checker.True, check.Commentf(line)) + } +} + +// https://github.com/docker/docker/pull/14381#discussion_r33859347 +func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { + out, _, err := dockerCmdWithError("diff", "") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "Container name cannot be empty") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go new file mode 100644 index 0000000..1fbfc74 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go @@ -0,0 +1,794 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strings" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { + name := "events-time-format-test" + + // Start stopwatch, generate an event + start := daemonTime(c) + time.Sleep(1100 * time.Millisecond) // so that first event occur in different second from since (just for the case) + dockerCmd(c, "run", "--rm", "--name", name, "busybox", "true") + time.Sleep(1100 * time.Millisecond) // so that until > since + end := daemonTime(c) + + // List of available time formats to --since + unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } + rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } + duration := func(t time.Time) string { return time.Now().Sub(t).String() } + + // --since=$start must contain only the 'untag' event + for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { + since, until := f(start), f(end) + out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, name, "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) + } +} + +func (s *DockerSuite) TestEventsUntag(c *check.C) { + image := "busybox" + dockerCmd(c, "tag", image, "utest:tag1") + dockerCmd(c, "tag", image, "utest:tag2") + dockerCmd(c, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag2") + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "events", "--since=1"}, + Timeout: time.Millisecond * 2500, + }) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + events := strings.Split(result.Stdout(), "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + c.Assert(v, checker.Contains, "untag", check.Commentf("event should be untag")) + } +} + +func (s *DockerSuite) TestEventsLimit(c *check.C) { + // Limit to 8 goroutines creating containers in order to prevent timeouts + // creating so many containers simultaneously on Windows + sem := make(chan bool, 8) + numContainers := 17 + errChan := make(chan error, numContainers) + + args := []string{"run", "--rm", "busybox", "true"} + for i := 0; i < numContainers; i++ { + sem <- true + go func() { + defer func() { <-sem }() + out, err := exec.Command(dockerBinary, args...).CombinedOutput() + if err != nil { + err = fmt.Errorf("%v: %s", err, string(out)) + } + errChan <- err + }() + } + + // Wait for all goroutines to finish + for i := 0; i < cap(sem); i++ { + sem <- true + } + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) + } + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents)) +} + +func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "container-events-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--filter", "container=container-events-test", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 3) //Missing expected event + matchedEvents := 0 + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if matches["eventType"] == "container" && matches["action"] == "create" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } else if matches["eventType"] == "container" && matches["action"] == "start" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } + } + c.Assert(matchedEvents, checker.Equals, 2, check.Commentf("missing events for container container-events-test:\n%s", out)) +} + +func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "since-epoch-test", "busybox", "true") + timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) + timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) + out, _ := dockerCmd(c, "events", "--since", timeBeginning, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsImageTag(c *check.C) { + time.Sleep(1 * time.Second) // because API has seconds granularity + since := daemonUnixTime(c) + image := "testimageevents:tag" + dockerCmd(c, "tag", "busybox", image) + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1, check.Commentf("was expecting 1 event. out=%s", out)) + event := strings.TrimSpace(events[0]) + + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, image), checker.True, check.Commentf("matches: %v\nout:\n%s", matches, out)) + c.Assert(matches["action"], checker.Equals, "tag") +} + +func (s *DockerSuite) TestEventsImagePull(c *check.C) { + // TODO Windows: Enable this test once pull and reliable image names are available + testRequires(c, DaemonIsLinux) + since := daemonUnixTime(c) + testRequires(c, Network) + + dockerCmd(c, "pull", "hello-world") + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + matches := eventstestutils.ScanMap(event) + c.Assert(matches["id"], checker.Equals, "hello-world:latest") + c.Assert(matches["action"], checker.Equals, "pull") + +} + +func (s *DockerSuite) TestEventsImageImport(c *check.C) { + // TODO Windows CI. This should be portable once export/import are + // more reliable (@swernli) + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + since := daemonUnixTime(c) + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil, check.Commentf("import failed with output: %q", out)) + imageRef := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=import") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageRef, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "import", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsImageLoad(c *check.C) { + testRequires(c, DaemonIsLinux) + myImageName := "footest:v1" + dockerCmd(c, "tag", "busybox", myImageName) + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + longImageID := strings.TrimSpace(out) + c.Assert(longImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty")) + + dockerCmd(c, "save", "-o", "saveimg.tar", myImageName) + dockerCmd(c, "rmi", myImageName) + out, _ = dockerCmd(c, "images", "-q", myImageName) + noImageID := strings.TrimSpace(out) + c.Assert(noImageID, checker.Equals, "", check.Commentf("Should not have any image")) + dockerCmd(c, "load", "-i", "saveimg.tar") + + result := icmd.RunCommand("rm", "-rf", "saveimg.tar") + c.Assert(result, icmd.Matches, icmd.Success) + + out, _ = dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + imageID := strings.TrimSpace(out) + c.Assert(imageID, checker.Equals, longImageID, check.Commentf("Should have same image id as before")) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=load") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "load", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=save") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches = eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "save", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsPluginOps(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + since := daemonUnixTime(c) + + dockerCmd(c, "plugin", "install", pNameWithTag, "--grant-all-permissions") + dockerCmd(c, "plugin", "disable", pNameWithTag) + dockerCmd(c, "plugin", "remove", pNameWithTag) + + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 4) + + pluginEvents := eventActionsByIDAndType(c, events, pNameWithTag, "plugin") + c.Assert(pluginEvents, checker.HasLen, 4, check.Commentf("events: %v", events)) + + c.Assert(pluginEvents[0], checker.Equals, "pull", check.Commentf(out)) + c.Assert(pluginEvents[1], checker.Equals, "enable", check.Commentf(out)) + c.Assert(pluginEvents[2], checker.Equals, "disable", check.Commentf(out)) + c.Assert(pluginEvents[3], checker.Equals, "remove", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilters(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die") + parseEvents(c, out, "die") + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die", "--filter", "event=start") + parseEvents(c, out, "die|start") + + // make sure we at least got 2 start events + count := strings.Count(out, "start") + c.Assert(strings.Count(out, "start"), checker.GreaterOrEqualThan, 2, check.Commentf("should have had 2 start events but had %d, out: %s", count, out)) + +} + +func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + name := "busybox" + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("image=%s", name)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + c.Assert(events, checker.Not(checker.HasLen), 0) //Expected events but found none for the image busybox:latest + count1 := 0 + count2 := 0 + + for _, e := range events { + if strings.Contains(e, container1) { + count1++ + } else if strings.Contains(e, container2) { + count2++ + } + } + c.Assert(count1, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count1, container1)) + c.Assert(count2, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count2, container2)) + +} + +func (s *DockerSuite) TestEventsFilterLabels(c *check.C) { + since := daemonUnixTime(c) + label := "io.docker.testing=foo" + + out, _ := dockerCmd(c, "run", "-d", "-l", label, "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 3) + + for _, e := range events { + c.Assert(e, checker.Contains, container1) + c.Assert(e, checker.Not(checker.Contains), container2) + } +} + +func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + _, err := buildImage(name, fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label), true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } +} + +func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { + since := daemonUnixTime(c) + nameID := make(map[string]string) + + for _, name := range []string{"container_1", "container_2"} { + dockerCmd(c, "run", "--name", name, "busybox", "true") + id := inspectField(c, name, "Id") + nameID[name] = id + } + + until := daemonUnixTime(c) + + checkEvents := func(id string, events []string) error { + if len(events) != 4 { // create, attach, start, die + return fmt.Errorf("expected 4 events, got %v", events) + } + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if !matchEventID(matches, id) { + return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, matches["id"]) + } + } + return nil + } + + for name, ID := range nameID { + // filter by names + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + + // filter by ID's + out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) + events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + } +} + +func (s *DockerSuite) TestEventsCommit(c *check.C) { + // Problematic on Windows as cannot commit a running container + testRequires(c, DaemonIsLinux) + + out, _ := runSleepingContainer(c) + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", "-m", "test", cID) + dockerCmd(c, "stop", cID) + c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) +} + +func (s *DockerSuite) TestEventsCopy(c *check.C) { + // Build a test image. + id, err := buildImage("cpimg", ` + FROM busybox + RUN echo HI > /file`, true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + // Create an empty test file. + tempFile, err := ioutil.TempFile("", "test-events-copy-") + c.Assert(err, checker.IsNil) + defer os.Remove(tempFile.Name()) + + c.Assert(tempFile.Close(), checker.IsNil) + + dockerCmd(c, "create", "--name=cptest", id) + + dockerCmd(c, "cp", "cptest:/file", tempFile.Name()) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "archive-path", check.Commentf("Missing 'archive-path' log event\n")) + + dockerCmd(c, "cp", tempFile.Name(), "cptest:/filecopy") + + until = daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "extract-to-dir", check.Commentf("Missing 'extract-to-dir' log event")) +} + +func (s *DockerSuite) TestEventsResize(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + endpoint := "/containers/" + cID + "/resize?h=80&w=24" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "resize", check.Commentf("Missing 'resize' log event")) +} + +func (s *DockerSuite) TestEventsAttach(c *check.C) { + // TODO Windows CI: Figure out why this test fails intermittently (TP5). + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + cmd := exec.Command(dockerBinary, "attach", cID) + stdin, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + // Make sure we're done attaching by writing/reading some stuff + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello'")) + + c.Assert(stdin.Close(), checker.IsNil) + + dockerCmd(c, "kill", cID) + c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) +} + +func (s *DockerSuite) TestEventsRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "oldName", "busybox", "true") + cID := strings.TrimSpace(out) + dockerCmd(c, "rename", "oldName", "newName") + + until := daemonUnixTime(c) + // filter by the container id because the name in the event will be the new name. + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until", until) + c.Assert(out, checker.Contains, "rename", check.Commentf("Missing 'rename' log event\n")) +} + +func (s *DockerSuite) TestEventsTop(c *check.C) { + // Problematic on Windows as Windows does not support top + testRequires(c, DaemonIsLinux) + + out, _ := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "top", cID) + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, " top", check.Commentf("Missing 'top' log event")) +} + +// #14316 +func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { + // Problematic to port for Windows CI during TP5 timeframe until + // supporting push + testRequires(c, DaemonIsLinux) + testRequires(c, Network) + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", cID, repoName) + dockerCmd(c, "stop", cID) + dockerCmd(c, "push", repoName) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "image="+repoName, "-f", "event=push", "--until", until) + c.Assert(out, checker.Contains, repoName, check.Commentf("Missing 'push' log event for %s", repoName)) +} + +func (s *DockerSuite) TestEventsFilterType(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + _, err := buildImage(name, fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label), true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=container") + events = strings.Split(strings.TrimSpace(out), "\n") + + // Events generated by the container that builds the image + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", "type=network") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 1, check.Commentf("Events == %s", events)) +} + +// #25798 +func (s *DockerSuite) TestEventsSpecialFiltersWithExecCreate(c *check.C) { + since := daemonUnixTime(c) + runSleepingContainer(c, "--name", "test-container", "-d") + waitRun("test-container") + + dockerCmd(c, "exec", "test-container", "echo", "hello-world") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event='exec_create: echo hello-world'", + ) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event=exec_create", + ) + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilterImageInContainerAction(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerRestart(c *check.C) { + dockerCmd(c, "run", "-d", "--name=testEvent", "--restart=on-failure:3", "busybox", "false") + + // wait until test2 is auto removed. + waitTime := 10 * time.Second + if daemonPlatform == "windows" { + // Windows takes longer... + waitTime = 90 * time.Second + } + + err := waitInspect("testEvent", "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTime) + c.Assert(err, checker.IsNil) + + var ( + createCount int + startCount int + dieCount int + ) + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container=testEvent") + events := strings.Split(strings.TrimSpace(out), "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event + actions := eventActionsByIDAndType(c, events, "testEvent", "container") + + for _, a := range actions { + switch a { + case "create": + createCount++ + case "start": + startCount++ + case "die": + dieCount++ + } + } + c.Assert(createCount, checker.Equals, 1, check.Commentf("testEvent should be created 1 times: %v", actions)) + c.Assert(startCount, checker.Equals, 4, check.Commentf("testEvent should start 4 times: %v", actions)) + c.Assert(dieCount, checker.Equals, 4, check.Commentf("testEvent should die 4 times: %v", actions)) +} + +func (s *DockerSuite) TestEventsSinceInTheFuture(c *check.C) { + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + since := daemonTime(c) + until := since.Add(time.Duration(-24) * time.Hour) + out, _, err := dockerCmdWithError("events", "--filter", "image=busybox", "--since", parseEventTime(since), "--until", parseEventTime(until)) + + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "cannot be after `until`") +} + +func (s *DockerSuite) TestEventsUntilInThePast(c *check.C) { + since := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + until := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container2", "-d", "busybox", "true") + waitRun("test-container2") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", until) + + c.Assert(out, checker.Not(checker.Contains), "test-container2") + c.Assert(out, checker.Contains, "test-container") +} + +func (s *DockerSuite) TestEventsFormat(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--format", "{{json .}}") + dec := json.NewDecoder(strings.NewReader(out)) + // make sure we got 2 start events + startCount := 0 + for { + var err error + var ev eventtypes.Message + if err = dec.Decode(&ev); err == io.EOF { + break + } + c.Assert(err, checker.IsNil) + if ev.Status == "start" { + startCount++ + } + } + + c.Assert(startCount, checker.Equals, 2, check.Commentf("should have had 2 start events but had %d, out: %s", startCount, out)) +} + +func (s *DockerSuite) TestEventsFormatBadFunc(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{badFuncString .}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1: function \"badFuncString\" not defined", + }) +} + +func (s *DockerSuite) TestEventsFormatBadField(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{.badFieldString}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1:2: executing \"\" at <.badFieldString>: can't evaluate field badFieldString in type *events.Message", + }) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go new file mode 100644 index 0000000..dc91667 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go @@ -0,0 +1,486 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "syscall" + "time" + "unicode" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #5979 +func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "busybox", "true") + + file, err := ioutil.TempFile("", "") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file")) + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%s --until=%s > %s", dockerBinary, since, daemonUnixTime(c), file.Name()) + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), checker.IsNil, check.Commentf("run err for command %q", command)) + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, ch := range scanner.Text() { + c.Assert(unicode.IsControl(ch), checker.False, check.Commentf("found control character %v", []byte(string(ch)))) + } + } + c.Assert(scanner.Err(), checker.IsNil, check.Commentf("Scan err for command %q", command)) + +} + +func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport) + + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--name", "oomFalse", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } + + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomFalse", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + nEvents := len(events) + + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + c.Assert(parseEventAction(c, events[nEvents-5]), checker.Equals, "create") + c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "attach") + c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "start") + c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "oom") + c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "die") +} + +func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport) + + errChan := make(chan error) + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--oom-kill-disable=true", "--name", "oomTrue", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + c.Assert(waitRun("oomTrue"), checker.IsNil) + defer dockerCmd(c, "kill", "oomTrue") + containerID := inspectField(c, "oomTrue", "Id") + + testActions := map[string]chan bool{ + "oom": make(chan bool), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(20 * time.Second): + observer.CheckEventError(c, containerID, "oom", matcher) + case <-testActions["oom"]: + // ignore, done + case errRun := <-errChan: + if errRun != nil { + c.Fatalf("%v", errRun) + } else { + c.Fatalf("container should be still running but it's not") + } + } + + status := inspectField(c, "oomTrue", "State.Status") + c.Assert(strings.TrimSpace(status), checker.Equals, "running", check.Commentf("container should be still running")) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterByName(c *check.C) { + testRequires(c, DaemonIsLinux) + cOut, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c1 := strings.TrimSpace(cOut) + waitRun("foo") + cOut, _ = dockerCmd(c, "run", "--name=bar", "-d", "busybox", "top") + c2 := strings.TrimSpace(cOut) + waitRun("bar") + out, _ := dockerCmd(c, "events", "-f", "container=foo", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(out, checker.Contains, c1, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), c2, check.Commentf(out)) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterBeforeCreate(c *check.C) { + testRequires(c, DaemonIsLinux) + buf := &bytes.Buffer{} + cmd := exec.Command(dockerBinary, "events", "-f", "container=foo", "--since=0") + cmd.Stdout = buf + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Wait() + defer cmd.Process.Kill() + + // Sleep for a second to make sure we are testing the case where events are listened before container starts. + time.Sleep(time.Second) + id, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + cID := strings.TrimSpace(id) + for i := 0; ; i++ { + out := buf.String() + if strings.Contains(out, cID) { + break + } + if i > 30 { + c.Fatalf("Missing event of container (foo, %v), got %q", cID, out) + } + time.Sleep(500 * time.Millisecond) + } +} + +func (s *DockerSuite) TestVolumeEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/mount volume actions + dockerCmd(c, "volume", "create", "test-event-volume-local") + dockerCmd(c, "run", "--name", "test-volume-container", "--volume", "test-event-volume-local:/foo", "-d", "busybox", "true") + waitRun("test-volume-container") + + // Observe unmount/destroy volume actions + dockerCmd(c, "rm", "-f", "test-volume-container") + dockerCmd(c, "volume", "rm", "test-event-volume-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") + c.Assert(volumeEvents, checker.HasLen, 4) + c.Assert(volumeEvents[0], checker.Equals, "create") + c.Assert(volumeEvents[1], checker.Equals, "mount") + c.Assert(volumeEvents[2], checker.Equals, "unmount") + c.Assert(volumeEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestNetworkEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local", "-d", "busybox", "true") + waitRun("test-network-container") + + // Observe disconnect/destroy network actions + dockerCmd(c, "rm", "-f", "test-network-container") + dockerCmd(c, "network", "rm", "test-event-network-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + netEvents := eventActionsByIDAndType(c, events, "test-event-network-local", "network") + c.Assert(netEvents, checker.HasLen, 4) + c.Assert(netEvents[0], checker.Equals, "create") + c.Assert(netEvents[1], checker.Equals, "connect") + c.Assert(netEvents[2], checker.Equals, "disconnect") + c.Assert(netEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local-1") + dockerCmd(c, "network", "create", "test-event-network-local-2") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local-1", "-td", "busybox", "sh") + waitRun("test-network-container") + dockerCmd(c, "network", "connect", "test-event-network-local-2", "test-network-container") + + since := daemonUnixTime(c) + + dockerCmd(c, "stop", "-t", "1", "test-network-container") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "-f", "type=network") + netEvents := strings.Split(strings.TrimSpace(out), "\n") + + // received two network disconnect events + c.Assert(len(netEvents), checker.Equals, 2) + c.Assert(netEvents[0], checker.Contains, "disconnect") + c.Assert(netEvents[1], checker.Contains, "disconnect") + + //both networks appeared in the network event output + c.Assert(out, checker.Contains, "test-event-network-local-1") + c.Assert(out, checker.Contains, "test-event-network-local-2") +} + +func (s *DockerSuite) TestEventsStreaming(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") + containerID := strings.TrimSpace(out) + + testActions := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + "die": make(chan bool, 1), + "destroy": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "create", matcher) + case <-testActions["create"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } + + dockerCmd(c, "rm", containerID) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "destroy", matcher) + case <-testActions["destroy"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + name := "testimageevents" + imageID, err := buildImage(name, + `FROM scratch + MAINTAINER "docker"`, + true) + c.Assert(err, checker.IsNil) + c.Assert(deleteImages(name), checker.IsNil) + + testActions := map[string]chan bool{ + "untag": make(chan bool, 1), + "delete": make(chan bool, 1), + } + + matcher := matchEventLine(imageID, "image", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "untag", matcher) + case <-testActions["untag"]: + // ignore, done + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "delete", matcher) + case <-testActions["delete"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsFilterVolumeAndNetworkType(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-type") + dockerCmd(c, "volume", "create", "test-event-volume-type") + + out, _ := dockerCmd(c, "events", "--filter", "type=volume", "--filter", "type=network", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 2, check.Commentf(out)) + + networkActions := eventActionsByIDAndType(c, events, "test-event-network-type", "network") + volumeActions := eventActionsByIDAndType(c, events, "test-event-volume-type", "volume") + + c.Assert(volumeActions[0], checker.Equals, "create") + c.Assert(networkActions[0], checker.Equals, "create") +} + +func (s *DockerSuite) TestEventsFilterVolumeID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "volume", "create", "test-event-volume-id") + out, _ := dockerCmd(c, "events", "--filter", "volume=test-event-volume-id", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-volume-id") + c.Assert(events[0], checker.Contains, "driver=local") +} + +func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-local") + out, _ := dockerCmd(c, "events", "--filter", "network=test-event-network-local", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-network-local") + c.Assert(events[0], checker.Contains, "type=bridge") +} + +func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"], "shutdown-timeout": 10}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, runtimes=runc:{docker-runc []}, shutdown-timeout=10)", daemonID, daemonName)) +} + +func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go new file mode 100644 index 0000000..cac76d9 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go @@ -0,0 +1,601 @@ +// +build !test_no_exec + +package main + +import ( + "bufio" + "fmt" + "net/http" + "os" + "os/exec" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExec(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "cat", "/tmp/file") + out = strings.Trim(out, "\r\n") + c.Assert(out, checker.Equals, "test") + +} + +func (s *DockerSuite) TestExecInteractive(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + c.Assert(err, checker.IsNil) + stdout, err := execCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + + err = execCmd.Start() + c.Assert(err, checker.IsNil) + _, err = stdin.Write([]byte("cat /tmp/file\n")) + c.Assert(err, checker.IsNil) + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + c.Assert(err, checker.IsNil) + line = strings.TrimSpace(line) + c.Assert(line, checker.Equals, "test") + err = stdin.Close() + c.Assert(err, checker.IsNil) + errChan := make(chan error) + go func() { + errChan <- execCmd.Wait() + close(errChan) + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("docker exec failed to exit on stdin close") + } + +} + +func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { + out, _ := runSleepingContainer(c) + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + dockerCmd(c, "restart", cleanedContainerID) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") + outStr := strings.TrimSpace(out) + c.Assert(outStr, checker.Equals, "hello") +} + +func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { + // TODO Windows CI: Requires a little work to get this ported. + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) + + err = s.d.Restart() + c.Assert(err, checker.IsNil, check.Commentf("Could not restart daemon")) + + out, err = s.d.Cmd("start", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) + + out, err = s.d.Cmd("exec", "top", "echo", "hello") + c.Assert(err, checker.IsNil, check.Commentf("Could not exec on container top: %s", out)) + + outStr := strings.TrimSpace(string(out)) + c.Assert(outStr, checker.Equals, "hello") +} + +// Regression test for #9155, #9044 +func (s *DockerSuite) TestExecEnv(c *check.C) { + // TODO Windows CI: This one is interesting and may just end up being a feature + // difference between Windows and Linux. On Windows, the environment is passed + // into the process that is launched, not into the machine environment. Hence + // a subsequent exec will not have LALA set/ + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "LALA=value1") + c.Assert(out, checker.Contains, "LALA=value2") + c.Assert(out, checker.Contains, "HOME=/root") +} + +func (s *DockerSuite) TestExecSetEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "HOME=/root", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "-e", "HOME=/another", "-e", "ABC=xyz", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "HOME=/root") + c.Assert(out, checker.Contains, "HOME=/another") + c.Assert(out, checker.Contains, "ABC=xyz") +} + +func (s *DockerSuite) TestExecExitStatus(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top") + + result := icmd.RunCommand(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 23, Error: "exit status 23"}) +} + +func (s *DockerSuite) TestExecPausedContainer(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + out, _ := runSleepingContainer(c, "-d", "--name", "testing") + ContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", "testing") + out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello") + c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new conmmand if it is paused")) + + expected := ContainerID + " is paused, unpause the container before exec" + c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) +} + +// regression test for #9476 +func (s *DockerSuite) TestExecTTYCloseStdin(c *check.C) { + // TODO Windows CI: This requires some work to port to Windows. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + + cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, _ = dockerCmd(c, "top", "exec_tty_stdin") + outArr := strings.Split(out, "\n") + c.Assert(len(outArr), checker.LessOrEqualThan, 3, check.Commentf("exec process left running")) + c.Assert(out, checker.Not(checker.Contains), "nsenter-exec") +} + +func (s *DockerSuite) TestExecTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("exec should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("exec is running but should have failed") + } +} + +func (s *DockerSuite) TestExecParseError(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + // Test normal (non-detached) case first + cmd := exec.Command(dockerBinary, "exec", "top") + _, stderr, _, err := runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stderr, checker.Contains, "See 'docker exec --help'") +} + +func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + err := exec.Command(dockerBinary, "exec", "testing", "top").Start() + c.Assert(err, checker.IsNil) + + type dstop struct { + out []byte + err error + } + + ch := make(chan dstop) + go func() { + out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() + ch <- dstop{out, err} + close(ch) + }() + select { + case <-time.After(3 * time.Second): + c.Fatal("Container stop timed out") + case s := <-ch: + c.Assert(s.err, check.IsNil) + } +} + +func (s *DockerSuite) TestExecCgroup(c *check.C) { + // Not applicable on Windows - using Linux specific functionality + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") + containerCgroups := sort.StringSlice(strings.Split(out, "\n")) + + var wg sync.WaitGroup + var mu sync.Mutex + execCgroups := []sort.StringSlice{} + errChan := make(chan error) + // exec a few times concurrently to get consistent failure + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup") + if err != nil { + errChan <- err + return + } + cg := sort.StringSlice(strings.Split(out, "\n")) + + mu.Lock() + execCgroups = append(execCgroups, cg) + mu.Unlock() + wg.Done() + }() + } + wg.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil) + } + + for _, cg := range execCgroups { + if !reflect.DeepEqual(cg, containerCgroups) { + fmt.Println("exec cgroups:") + for _, name := range cg { + fmt.Printf(" %s\n", name) + } + + fmt.Println("container cgroups:") + for _, name := range containerCgroups { + fmt.Printf(" %s\n", name) + } + c.Fatal("cgroups mismatched") + } + } +} + +func (s *DockerSuite) TestExecInspectID(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + id := strings.TrimSuffix(out, "\n") + + out = inspectField(c, id, "ExecIDs") + c.Assert(out, checker.Equals, "[]", check.Commentf("ExecIDs should be empty, got: %s", out)) + + // Start an exec, have it block waiting so we can do some checking + cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c", + "while ! test -e /execid1; do sleep 1; done") + + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("failed to start the exec cmd")) + + // Give the exec 10 chances/seconds to start then give up and stop the test + tries := 10 + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out != "[]" && out != "" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // Save execID for later + execID, err := inspectFilter(id, "index .ExecIDs 0") + c.Assert(err, checker.IsNil, check.Commentf("failed to get the exec id")) + + // End the exec by creating the missing file + err = exec.Command(dockerBinary, "exec", id, + "sh", "-c", "touch /execid1").Run() + + c.Assert(err, checker.IsNil, check.Commentf("failed to run the 2nd exec cmd")) + + // Wait for 1st exec to complete + cmd.Wait() + + // Give the exec 10 chances/seconds to stop then give up and stop the test + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out == "[]" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still not empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // But we should still be able to query the execID + sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil) + c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) + + // Now delete the container and then an 'inspect' on the exec should + // result in a 404 (not 'container not running') + out, ec := dockerCmd(c, "rm", "-f", id) + c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) + sc, body, err = sockRequest("GET", "/exec/"+execID+"/json", nil) + c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) +} + +func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { + // Problematic on Windows as Windows does not support links + testRequires(c, DaemonIsLinux) + var out string + out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + c.Assert(idA, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + c.Assert(idB, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") +} + +func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { + // Not applicable on Windows to Windows CI. + testRequires(c, SameHostDaemon, DaemonIsLinux) + for _, fn := range []string{"resolv.conf", "hosts"} { + deleteAllContainers() + + content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) + c.Assert(err, checker.IsNil) + + c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) + + out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + contID := strings.TrimSpace(out) + netFilePath := containerStorageFile(contID, fn) + + f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + c.Assert(err, checker.IsNil) + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + c.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + c.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + c.Fatal(err) + } + f.Close() + + res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) + c.Assert(res, checker.Equals, "success2\n") + } +} + +func (s *DockerSuite) TestExecWithUser(c *check.C) { + // TODO Windows CI: This may be fixable in the future once Windows + // supports users + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") + c.Assert(out, checker.Contains, "uid=1(daemon) gid=1(daemon)") + + out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") + c.Assert(out, checker.Contains, "uid=0(root) gid=0(root)", check.Commentf("exec with user by id expected daemon user got %s", out)) +} + +func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Start main loop which attempts mknod repeatedly + dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) + + // Check exec mknod doesn't work + cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16") + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) + c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) + + // Check exec mknod does work with --privileged + cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil) + + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", out)) + + // Check subsequent unprivileged exec cannot mknod + cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32") + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) + c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) + + // Confirm at no point was mknod allowed + logCmd := exec.Command(dockerBinary, "logs", "parent") + out, _, err = runCommandWithOutput(logCmd) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Success") + +} + +func (s *DockerSuite) TestExecWithImageUser(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio`, + true) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") + + out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") + c.Assert(out, checker.Contains, "dockerio", check.Commentf("exec with user by id expected dockerio user got %s", out)) +} + +func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { + // Windows does not support read-only + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") + dockerCmd(c, "exec", "parent", "true") +} + +func (s *DockerSuite) TestExecUlimits(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testexeculimits" + runSleepingContainer(c, "-d", "--ulimit", "nproc=21", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -p") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "21") +} + +// #15750 +func (s *DockerSuite) TestExecStartFails(c *check.C) { + // TODO Windows CI. This test should be portable. Figure out why it fails + // currently. + testRequires(c, DaemonIsLinux) + name := "exec-15750" + runSleepingContainer(c, "-d", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "no-such-cmd") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "executable file not found") +} + +// Fix regression in https://github.com/docker/docker/pull/26461#issuecomment-250287297 +func (s *DockerSuite) TestExecWindowsPathNotWiped(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", minimalBaseImage(), "powershell", "start-sleep", "60") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "powershell", "write-host", "$env:PATH") + out = strings.ToLower(strings.Trim(out, "\r\n")) + c.Assert(out, checker.Contains, `windowspowershell\v1.0`) +} + +func (s *DockerSuite) TestExecEnvLinksHost(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-d", "--name", "foo") + runSleepingContainer(c, "-d", "--link", "foo:db", "--hostname", "myhost", "--name", "bar") + out, _ := dockerCmd(c, "exec", "bar", "env") + c.Assert(out, checker.Contains, "HOSTNAME=myhost") + c.Assert(out, checker.Contains, "DB_NAME=/bar/db") +} + +func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { + testRequires(c, DaemonIsWindows) + runSleepingContainer(c, "-d", "--name", "test") + exec := make(chan bool) + go func() { + dockerCmd(c, "exec", "test", "cmd", "/c", "start sleep 10") + exec <- true + }() + + for { + top := make(chan string) + var out string + go func() { + out, _ := dockerCmd(c, "top", "test") + top <- out + }() + + select { + case <-time.After(time.Second * 5): + c.Error("timed out waiting for top while exec is exiting") + case out = <-top: + break + } + + if strings.Count(out, "busybox.exe") == 2 && !strings.Contains(out, "cmd.exe") { + // The initial exec process (cmd.exe) has exited, and both sleeps are currently running + break + } + time.Sleep(1 * time.Second) + } + + inspect := make(chan bool) + go func() { + dockerCmd(c, "inspect", "test") + inspect <- true + }() + + select { + case <-time.After(time.Second * 5): + c.Error("timed out waiting for inspect while exec is exiting") + case <-inspect: + break + } + + // Ensure the background sleep is still running + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 2) + + // The exec should exit when the background sleep exits + select { + case <-time.After(time.Second * 15): + c.Error("timed out waiting for async exec to exit") + case <-exec: + // Ensure the background sleep has actually exited + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 1) + break + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go new file mode 100644 index 0000000..5f69119 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go @@ -0,0 +1,93 @@ +// +build !windows,!test_no_exec + +package main + +import ( + "bytes" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// regression test for #12546 +func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + ch := make(chan error) + go func() { ch <- cmd.Wait() }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil) + output := b.String() + c.Assert(strings.TrimSpace(output), checker.Equals, "hello") + case <-time.After(5 * time.Second): + c.Fatal("timed out running docker exec") + } +} + +func (s *DockerSuite) TestExecTTY(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + dockerCmd(c, "run", "-d", "--name=test", "busybox", "sh", "-c", "echo hello > /foo && top") + + cmd := exec.Command(dockerBinary, "exec", "-it", "test", "sh") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + defer p.Close() + + _, err = p.Write([]byte("cat /foo && exit\n")) + c.Assert(err, checker.IsNil) + + chErr := make(chan error) + go func() { + chErr <- cmd.Wait() + }() + select { + case err := <-chErr: + c.Assert(err, checker.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for exec to exit") + } + + buf := make([]byte, 256) + read, err := p.Read(buf) + c.Assert(err, checker.IsNil) + c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) +} + +// Test the the TERM env var is set when -t is provided on exec +func (s *DockerSuite) TestExecWithTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-id", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", "-t", contID, "sh", "-c", "if [ -z $TERM ]; then exit 1; else exit 0; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} + +// Test that the TERM env var is not set on exec when -t is not provided, even if it was set +// on run +func (s *DockerSuite) TestExecWithNoTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", contID, "sh", "-c", "if [ -z $TERM ]; then exit 0; else exit 1; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go new file mode 100644 index 0000000..6a49cc8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) { + testRequires(c, ExperimentalDaemon) + + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, "*true") + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} + +func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) { + testRequires(c, NotExperimentalDaemon) + + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, "*false") + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 0000000..069dc08 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "os" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// export an image and try to import it into a new one +func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + + out, _ := dockerCmd(c, "export", containerID) + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err := runCommandWithOutput(importCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) + + cleanedImageID := strings.TrimSpace(out) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} + +// Used to test output flag in the export command +func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerwithoutputandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + dockerCmd(c, "export", "--output=testexp.tar", containerID) + defer os.Remove("testexp.tar") + + out, _, err := runCommandWithOutput(exec.Command("cat", "testexp.tar")) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(importCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) + + cleanedImageID := strings.TrimSpace(out) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go new file mode 100644 index 0000000..a794ca7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -0,0 +1,405 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +func init() { + check.Suite(&DockerExternalGraphdriverSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerExternalGraphdriverSuite struct { + server *httptest.Server + jserver *httptest.Server + ds *DockerSuite + d *Daemon + ec map[string]*graphEventsCounter +} + +type graphEventsCounter struct { + activations int + creations int + removals int + gets int + puts int + stats int + cleanups int + exists int + init int + metadata int + diff int + applydiff int + changes int + diffsize int +} + +func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) +} + +func (s *DockerExternalGraphdriverSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { + s.ec = make(map[string]*graphEventsCounter) + s.setUpPluginViaSpecFile(c) + s.setUpPluginViaJSONFile(c) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaSpecFile(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + s.setUpPlugin(c, "test-external-graph-driver", "spec", mux, []byte(s.server.URL)) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaJSONFile(c *check.C) { + mux := http.NewServeMux() + s.jserver = httptest.NewServer(mux) + + p := plugins.NewLocalPlugin("json-external-graph-driver", s.jserver.URL) + b, err := json.Marshal(p) + c.Assert(err, check.IsNil) + + s.setUpPlugin(c, "json-external-graph-driver", "json", mux, b) +} + +func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ext string, mux *http.ServeMux, b []byte) { + type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + } + + type graphDriverResponse struct { + Err error `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + } + + respond := func(w http.ResponseWriter, data interface{}) { + w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") + switch t := data.(type) { + case error: + fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) + case string: + fmt.Fprintln(w, t) + default: + json.NewEncoder(w).Encode(&data) + } + } + + decReq := func(b io.ReadCloser, out interface{}, w http.ResponseWriter) error { + defer b.Close() + if err := json.NewDecoder(b).Decode(&out); err != nil { + http.Error(w, fmt.Sprintf("error decoding json: %s", err.Error()), 500) + } + return nil + } + + base, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + vfsProto, err := vfs.Init(base, []string{}, nil, nil) + c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) + driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) + + s.ec[ext] = &graphEventsCounter{} + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].activations++ + respond(w, `{"Implements": ["GraphDriver"]}`) + }) + + mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].init++ + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.CreateReadWrite", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.CreateReadWrite(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.Create(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].removals++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Remove(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].gets++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + dir, err := driver.Get(req.ID, req.MountLabel) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Dir: dir}) + }) + + mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].puts++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Put(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].exists++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + respond(w, &graphDriverResponse{Exists: driver.Exists(req.ID)}) + }) + + mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].stats++ + respond(w, &graphDriverResponse{Status: driver.Status()}) + }) + + mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].cleanups++ + err := driver.Cleanup() + if err != nil { + respond(w, err) + return + } + respond(w, `{}`) + }) + + mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].metadata++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + data, err := driver.GetMetadata(req.ID) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Metadata: data}) + }) + + mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diff++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + diff, err := driver.Diff(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + io.Copy(w, diff) + }) + + mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].changes++ + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + changes, err := driver.Changes(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Changes: changes}) + }) + + mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].applydiff++ + diff := r.Body + defer r.Body.Close() + + id := r.URL.Query().Get("id") + parent := r.URL.Query().Get("parent") + + if id == "" { + http.Error(w, fmt.Sprintf("missing id"), 409) + } + + size, err := driver.ApplyDiff(id, parent, diff) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diffsize++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + size, err := driver.DiffSize(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + err = os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) + + specFile := "/etc/docker/plugins/" + name + "." + ext + err = ioutil.WriteFile(specFile, b, 0644) + c.Assert(err, check.IsNil, check.Commentf("error writing to %s", specFile)) +} + +func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { + s.server.Close() + s.jserver.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { + testRequires(c, ExperimentalDaemon) + + s.testExternalGraphDriver("test-external-graph-driver", "spec", c) + s.testExternalGraphDriver("json-external-graph-driver", "json", c) +} + +func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) { + if err := s.d.StartWithBusybox("-s", name); err != nil { + b, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Assert(err, check.IsNil, check.Commentf("\n%s", string(b))) + } + + out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") + c.Assert(err, check.IsNil, check.Commentf(out)) + + err = s.d.Restart("-s", name) + + out, err = s.d.Cmd("inspect", "--format={{.GraphDriver.Name}}", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + out, err = s.d.Cmd("diff", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "A /hello"), check.Equals, true, check.Commentf("diff output: %s", out)) + + out, err = s.d.Cmd("rm", "-f", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("info") + c.Assert(err, check.IsNil, check.Commentf(out)) + + err = s.d.Stop() + c.Assert(err, check.IsNil) + + // Don't check s.ec.exists, because the daemon no longer calls the + // Exists function. + c.Assert(s.ec[ext].activations, check.Equals, 2) + c.Assert(s.ec[ext].init, check.Equals, 2) + c.Assert(s.ec[ext].creations >= 1, check.Equals, true) + c.Assert(s.ec[ext].removals >= 1, check.Equals, true) + c.Assert(s.ec[ext].gets >= 1, check.Equals, true) + c.Assert(s.ec[ext].puts >= 1, check.Equals, true) + c.Assert(s.ec[ext].stats, check.Equals, 5) + c.Assert(s.ec[ext].cleanups, check.Equals, 2) + c.Assert(s.ec[ext].applydiff >= 1, check.Equals, true) + c.Assert(s.ec[ext].changes, check.Equals, 1) + c.Assert(s.ec[ext].diffsize, check.Equals, 0) + c.Assert(s.ec[ext].diff, check.Equals, 0) + c.Assert(s.ec[ext].metadata, check.Equals, 1) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { + testRequires(c, Network, ExperimentalDaemon) + + c.Assert(s.d.Start(), check.IsNil) + + out, err := s.d.Cmd("pull", "busybox:latest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go new file mode 100644 index 0000000..806d87e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -0,0 +1,627 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +const volumePluginName = "test-external-volume-driver" + +func init() { + check.Suite(&DockerExternalVolumeSuite{ + ds: &DockerSuite{}, + }) +} + +type eventCounter struct { + activations int + creations int + removals int + mounts int + unmounts int + paths int + lists int + gets int + caps int +} + +type DockerExternalVolumeSuite struct { + ds *DockerSuite + d *Daemon + *volumePlugin +} + +func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ec = &eventCounter{} +} + +func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { + s.volumePlugin = newVolumePlugin(c, volumePluginName) +} + +type volumePlugin struct { + ec *eventCounter + *httptest.Server + vols map[string]vol +} + +type vol struct { + Name string + Mountpoint string + Ninja bool // hack used to trigger a null volume return on `Get` + Status map[string]interface{} + Options map[string]string +} + +func (p *volumePlugin) Close() { + p.Server.Close() +} + +func newVolumePlugin(c *check.C, name string) *volumePlugin { + mux := http.NewServeMux() + s := &volumePlugin{Server: httptest.NewServer(mux), ec: &eventCounter{}, vols: make(map[string]vol)} + + type pluginRequest struct { + Name string + Opts map[string]string + ID string + } + + type pluginResp struct { + Mountpoint string `json:",omitempty"` + Err string `json:",omitempty"` + } + + read := func(b io.ReadCloser) (pluginRequest, error) { + defer b.Close() + var pr pluginRequest + if err := json.NewDecoder(b).Decode(&pr); err != nil { + return pr, err + } + return pr, nil + } + + send := func(w http.ResponseWriter, data interface{}) { + switch t := data.(type) { + case error: + http.Error(w, t.Error(), 500) + case string: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, t) + default: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + json.NewEncoder(w).Encode(&data) + } + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec.activations++ + send(w, `{"Implements": ["VolumeDriver"]}`) + }) + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec.creations++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + _, isNinja := pr.Opts["ninja"] + status := map[string]interface{}{"Hello": "world"} + s.vols[pr.Name] = vol{Name: pr.Name, Ninja: isNinja, Status: status, Options: pr.Opts} + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + s.ec.lists++ + vols := make([]vol, 0, len(s.vols)) + for _, v := range s.vols { + if v.Ninja { + continue + } + vols = append(vols, v) + } + send(w, map[string][]vol{"Volumes": vols}) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec.gets++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, exists := s.vols[pr.Name] + if !exists { + send(w, `{"Err": "no such volume"}`) + } + + if v.Ninja { + send(w, map[string]vol{}) + return + } + + v.Mountpoint = hostVolumePath(pr.Name) + send(w, map[string]vol{"Volume": v}) + return + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec.removals++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, ok := s.vols[pr.Name] + if !ok { + send(w, nil) + return + } + + if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + delete(s.vols, v.Name) + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + s.ec.paths++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + p := hostVolumePath(pr.Name) + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + s.ec.mounts++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + if v, exists := s.vols[pr.Name]; exists { + // Use this to simulate a mount failure + if _, exists := v.Options["invalidOption"]; exists { + send(w, fmt.Errorf("invalid argument")) + return + } + } + + p := hostVolumePath(pr.Name) + if err := os.MkdirAll(p, 0755); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.Server.URL), 0644); err != nil { + send(w, err) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "mountID"), []byte(pr.ID), 0644); err != nil { + send(w, err) + return + } + + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + s.ec.unmounts++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + s.ec.caps++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, `{"Capabilities": { "Scope": "global" }}`) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + err = ioutil.WriteFile("/etc/docker/plugins/"+name+".spec", []byte(s.Server.URL), 0644) + c.Assert(err, checker.IsNil) + return s +} + +func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { + s.volumePlugin.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *check.C) { + dockerCmd(c, "volume", "create", "test") + + out, _, err := dockerCmdWithError("volume", "create", "test", "--driver", volumePluginName) + c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) + c.Assert(out, checker.Contains, "A volume named test already exists") + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test") + _, _, err = dockerCmdWithError("volume", "create", "test", "--driver", strings.TrimSpace(out)) + c.Assert(err, check.IsNil) + + // make sure hidden --name option conflicts with positional arg name + out, _, err = dockerCmdWithError("volume", "create", "--name", "test2", "test2") + c.Assert(err, check.NotNil, check.Commentf("Conflicting options: either specify --name or provide positional arg, not both")) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + p := hostVolumePath("external-volume-test") + _, err = os.Lstat(p) + c.Assert(err, checker.NotNil) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("Expected volume path in host to not exist: %s, %v\n", p, err)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 2) + c.Assert(s.ec.unmounts, checker.Equals, 2) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func hostVolumePath(name string) string { + return fmt.Sprintf("/var/lib/docker/volumes/%s", name) +} + +// Make sure a request to use a down driver doesn't block other requests +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { + specPath := "/etc/docker/plugins/down-driver.spec" + err := ioutil.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0644) + c.Assert(err, check.IsNil) + defer os.RemoveAll(specPath) + + chCmd1 := make(chan struct{}) + chCmd2 := make(chan error) + cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") + cmd2 := exec.Command(dockerBinary, "volume", "create") + + c.Assert(cmd1.Start(), checker.IsNil) + defer cmd1.Process.Kill() + time.Sleep(100 * time.Millisecond) // ensure API has been called + c.Assert(cmd2.Start(), checker.IsNil) + + go func() { + cmd1.Wait() + close(chCmd1) + }() + go func() { + chCmd2 <- cmd2.Wait() + }() + + select { + case <-chCmd1: + cmd2.Process.Kill() + c.Fatalf("volume create with down driver finished unexpectedly") + case err := <-chCmd2: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + cmd2.Process.Kill() + c.Fatal("volume creates are blocked by previous create requests when previous driver is down") + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + specPath := "/etc/docker/plugins/test-external-volume-driver-retry.spec" + os.RemoveAll(specPath) + defer os.RemoveAll(specPath) + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver-retry", "busybox:latest"); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + go func() { + // wait for a retry to occur, then create spec to allow plugin to register + time.Sleep(2000 * time.Millisecond) + // no need to check for an error here since it will get picked up by the timeout later + ioutil.WriteFile(specPath, []byte(s.Server.URL), 0644) + }() + + select { + case err := <-errchan: + c.Assert(err, checker.IsNil) + case <-time.After(8 * time.Second): + c.Fatal("volume creates fail when plugin not immediately available") + } + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "foo") + dockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") + + var mounts []struct { + Name string + Driver string + } + out := inspectFieldJSON(c, "testing", "Mounts") + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverList(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc3") + out, _ := dockerCmd(c, "volume", "ls") + ls := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(ls), check.Equals, 2, check.Commentf("\n%s", out)) + + vol := strings.Fields(ls[len(ls)-1]) + c.Assert(len(vol), check.Equals, 2, check.Commentf("%v", vol)) + c.Assert(vol[0], check.Equals, volumePluginName) + c.Assert(vol[1], check.Equals, "abc3") + + c.Assert(s.ec.lists, check.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { + out, _, err := dockerCmdWithError("volume", "inspect", "dummy") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") + c.Assert(s.ec.gets, check.Equals, 1) + + dockerCmd(c, "volume", "create", "test", "-d", volumePluginName) + out, _ = dockerCmd(c, "volume", "inspect", "test") + + type vol struct { + Status map[string]string + } + var st []vol + + c.Assert(json.Unmarshal([]byte(out), &st), checker.IsNil) + c.Assert(st, checker.HasLen, 1) + c.Assert(st[0].Status, checker.HasLen, 1, check.Commentf("%v", st[0])) + c.Assert(st[0].Status["Hello"], checker.Equals, "world", check.Commentf("%v", st[0].Status)) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1") + err := s.d.Restart() + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") + var mounts []types.MountPoint + inspectFieldAndMarshall(c, "test", "Mounts", &mounts) + c.Assert(mounts, checker.HasLen, 1) + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. +// Prior the daemon would panic in this scenario. +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "abc2") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") +} + +// Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + c.Assert(s.ec.paths, checker.Equals, 0) + + out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "inspect", "--format='{{.Mountpoint}}'", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + c.Assert(s.ec.paths, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +// Check that VolumeDriver.Capabilities gets called, and only called once +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + c.Assert(s.ec.caps, checker.Equals, 0) + + for i := 0; i < 3; i++ { + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.caps, checker.Equals, 1) + out, err = s.d.Cmd("volume", "inspect", "--format={{.Scope}}", fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, volume.GlobalScope) + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) { + driverName := stringid.GenerateNonCryptoID() + p := newVolumePlugin(c, driverName) + defer p.Close() + + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + + out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "volume named test already exists") + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test re-create with same driver + out, err = s.d.Cmd("volume", "create", "-d", driverName, "--opt", "foo=bar", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var vs []types.Volume + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Driver, checker.Equals, driverName) + c.Assert(vs[0].Options, checker.NotNil) + c.Assert(vs[0].Options["foo"], checker.Equals, "bar") + c.Assert(vs[0].Driver, checker.Equals, driverName) + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test create with different driver + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + vs = nil + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Options, checker.HasLen, 0) + c.Assert(vs[0].Driver, checker.Equals, "local") +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *check.C) { + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount") + + out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) + out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go new file mode 100644 index 0000000..6b7baeb --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go @@ -0,0 +1,169 @@ +package main + +import ( + "encoding/json" + + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func waitForStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func waitForHealthStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func getHealth(c *check.C, name string) *types.Health { + out, _ := dockerCmd(c, "inspect", "--format={{json .State.Health}}", name) + var health types.Health + err := json.Unmarshal([]byte(out), &health) + c.Check(err, checker.Equals, nil) + return &health +} + +func (s *DockerSuite) TestHealth(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + _, err := buildImage(imageName, + `FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD cat /status`, + true) + + c.Check(err, check.IsNil) + + // No health status before starting + name := "test_health" + dockerCmd(c, "create", "--name", name, imageName) + out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") + c.Check(out, checker.Equals, "Created\n") + + // Inspect the options + out, _ = dockerCmd(c, "inspect", + "--format=timeout={{.Config.Healthcheck.Timeout}} "+ + "interval={{.Config.Healthcheck.Interval}} "+ + "retries={{.Config.Healthcheck.Retries}} "+ + "test={{.Config.Healthcheck.Test}}", name) + c.Check(out, checker.Equals, "timeout=30s interval=1s retries=0 test=[CMD-SHELL cat /status]\n") + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + + // Make it fail + dockerCmd(c, "exec", name, "rm", "/status") + waitForHealthStatus(c, name, "healthy", "unhealthy") + + // Inspect the status + out, _ = dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + c.Check(out, checker.Equals, "unhealthy\n") + + // Make it healthy again + dockerCmd(c, "exec", name, "touch", "/status") + waitForHealthStatus(c, name, "unhealthy", "healthy") + + // Remove container + dockerCmd(c, "rm", "-f", name) + + // Disable the check from the CLI + out, _ = dockerCmd(c, "create", "--name=noh", "--no-healthcheck", imageName) + out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "noh") + c.Check(out, checker.Equals, "[NONE]\n") + dockerCmd(c, "rm", "noh") + + // Disable the check with a new build + _, err = buildImage("no_healthcheck", + `FROM testhealth + HEALTHCHECK NONE`, true) + c.Check(err, check.IsNil) + + out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") + c.Check(out, checker.Equals, "[NONE]\n") + + // Enable the checks from the CLI + _, _ = dockerCmd(c, "run", "-d", "--name=fatal_healthcheck", + "--health-interval=0.5s", + "--health-retries=3", + "--health-cmd=cat /status", + "no_healthcheck") + waitForHealthStatus(c, "fatal_healthcheck", "starting", "healthy") + health := getHealth(c, "fatal_healthcheck") + c.Check(health.Status, checker.Equals, "healthy") + c.Check(health.FailingStreak, checker.Equals, 0) + last := health.Log[len(health.Log)-1] + c.Check(last.ExitCode, checker.Equals, 0) + c.Check(last.Output, checker.Equals, "OK\n") + + // Fail the check + dockerCmd(c, "exec", "fatal_healthcheck", "rm", "/status") + waitForHealthStatus(c, "fatal_healthcheck", "healthy", "unhealthy") + + failsStr, _ := dockerCmd(c, "inspect", "--format={{.State.Health.FailingStreak}}", "fatal_healthcheck") + fails, err := strconv.Atoi(strings.TrimSpace(failsStr)) + c.Check(err, check.IsNil) + c.Check(fails >= 3, checker.Equals, true) + dockerCmd(c, "rm", "-f", "fatal_healthcheck") + + // Check timeout + // Note: if the interval is too small, it seems that Docker spends all its time running health + // checks and never gets around to killing it. + _, _ = dockerCmd(c, "run", "-d", "--name=test", + "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1ms", imageName) + waitForHealthStatus(c, "test", "starting", "unhealthy") + health = getHealth(c, "test") + last = health.Log[len(health.Log)-1] + c.Check(health.Status, checker.Equals, "unhealthy") + c.Check(last.ExitCode, checker.Equals, -1) + c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1ms)") + dockerCmd(c, "rm", "-f", "test") + + // Check JSON-format + _, err = buildImage(imageName, + `FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD ["cat", "/my status"]`, + true) + c.Check(err, check.IsNil) + out, _ = dockerCmd(c, "inspect", + "--format={{.Config.Healthcheck.Test}}", imageName) + c.Check(out, checker.Equals, "[CMD cat /my status]\n") + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go new file mode 100644 index 0000000..29b6553 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go @@ -0,0 +1,321 @@ +package main + +import ( + "fmt" + "os/exec" + "runtime" + "strings" + "unicode" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestHelpTextVerify(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Make sure main help text fits within 80 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Test for HOME set to its default value and set to "/" on linux + // Yes on windows setting up an array and looping (right now) isn't + // necessary because we just have one value, but we'll need the + // array/loop on linux so we might as well set it up so that we can + // test any number of home dirs later on and all we need to do is + // modify the array - the rest of the testing infrastructure should work + homes := []string{homedir.Get()} + + // Non-Windows machines need to test for this special case of $HOME + if runtime.GOOS != "windows" { + homes = append(homes, "/") + } + + homeKey := homedir.Key() + baseEnvs := appendBaseEnv(true) + + // Remove HOME env var from list so we can add a new value later. + for i, env := range baseEnvs { + if strings.HasPrefix(env, homeKey+"=") { + baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) + break + } + } + + for _, home := range homes { + + // Dup baseEnvs and add our new HOME value + newEnvs := make([]string, len(baseEnvs)+1) + copy(newEnvs, baseEnvs) + newEnvs[len(newEnvs)-1] = homeKey + "=" + home + + scanForHome := runtime.GOOS != "windows" && home != "/" + + // Check main help text to make sure its not over 80 chars + helpCmd := exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, _, err := runCommandWithOutput(helpCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + lines := strings.Split(out, "\n") + for _, line := range lines { + // All lines should not end with a space + c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) + + if scanForHome && strings.Contains(line, `=`+home) { + c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) + } + if runtime.GOOS != "windows" { + i := strings.Index(line, homedir.GetShortcutString()) + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + c.Fatalf("Main help should not have used home shortcut:\n%s", line) + } + } + } + + // Make sure each cmd's help text fits within 90 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Pull the list of commands from the "Commands:" section of docker help + helpCmd = exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, _, err = runCommandWithOutput(helpCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + i := strings.Index(out, "Commands:") + c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", out)) + + cmds := []string{} + // Grab all chars starting at "Commands:" + helpOut := strings.Split(out[i:], "\n") + // Skip first line, it is just "Commands:" + helpOut = helpOut[1:] + + // Create the list of commands we want to test + cmdsToTest := []string{} + for _, cmd := range helpOut { + // Stop on blank line or non-idented line + if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { + break + } + + // Grab just the first word of each line + cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] + cmds = append(cmds, cmd) // Saving count for later + + cmdsToTest = append(cmdsToTest, cmd) + } + + // Add some 'two word' commands - would be nice to automatically + // calculate this list - somehow + cmdsToTest = append(cmdsToTest, "volume create") + cmdsToTest = append(cmdsToTest, "volume inspect") + cmdsToTest = append(cmdsToTest, "volume ls") + cmdsToTest = append(cmdsToTest, "volume rm") + cmdsToTest = append(cmdsToTest, "network connect") + cmdsToTest = append(cmdsToTest, "network create") + cmdsToTest = append(cmdsToTest, "network disconnect") + cmdsToTest = append(cmdsToTest, "network inspect") + cmdsToTest = append(cmdsToTest, "network ls") + cmdsToTest = append(cmdsToTest, "network rm") + + if experimentalDaemon { + cmdsToTest = append(cmdsToTest, "checkpoint create") + cmdsToTest = append(cmdsToTest, "checkpoint ls") + cmdsToTest = append(cmdsToTest, "checkpoint rm") + } + + // Divide the list of commands into go routines and run the func testcommand on the commands in parallel + // to save runtime of test + + errChan := make(chan error) + + for index := 0; index < len(cmdsToTest); index++ { + go func(index int) { + errChan <- testCommand(cmdsToTest[index], newEnvs, scanForHome, home) + }(index) + } + + for index := 0; index < len(cmdsToTest); index++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } + } +} + +func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { + // Test to make sure the exit code and output (stdout vs stderr) of + // various good and bad cases are what we expect + + // docker : stdout=all, stderr=empty, rc=0 + out, _, err := dockerCmdWithError() + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) + + // docker help: stdout=all, stderr=empty, rc=0 + out, _, err = dockerCmdWithError("help") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) + + // docker --help: stdout=all, stderr=empty, rc=0 + out, _, err = dockerCmdWithError("--help") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) + + // docker inspect busybox: stdout=all, stderr=empty, rc=0 + // Just making sure stderr is empty on valid cmd + out, _, err = dockerCmdWithError("inspect", "busybox") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) + + // docker rm: stdout=empty, stderr=all, rc!=0 + // testing the min arg error msg + cmd := exec.Command(dockerBinary, "rm") + stdout, stderr, _, err := runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Equals, "") + // Should not contain full help text but should contain info about + // # of args and Usage line + c.Assert(stderr, checker.Contains, "requires at least 1 argument", check.Commentf("Missing # of args text from 'docker rm'\n")) + + // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 + // testing to make sure no blank line on error + cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer") + stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(len(stderr), checker.Not(checker.Equals), 0) + c.Assert(stdout, checker.Equals, "") + // Be really picky + c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) + + // docker BadCmd: stdout=empty, stderr=all, rc=0 + cmd = exec.Command(dockerBinary, "BadCmd") + stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Equals, "") + c.Assert(stderr, checker.Equals, "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'\n", check.Commentf("Unexcepted output for 'docker badCmd'\n")) +} + +func testCommand(cmd string, newEnvs []string, scanForHome bool, home string) error { + + args := strings.Split(cmd+" --help", " ") + + // Check the full usage text + helpCmd := exec.Command(dockerBinary, args...) + helpCmd.Env = newEnvs + out, stderr, _, err := runCommandWithStdoutStderr(helpCmd) + if len(stderr) != 0 { + return fmt.Errorf("Error on %q help. non-empty stderr:%q\n", cmd, stderr) + } + if strings.HasSuffix(out, "\n\n") { + return fmt.Errorf("Should not have blank line on %q\n", cmd) + } + if !strings.Contains(out, "--help") { + return fmt.Errorf("All commands should mention '--help'. Command '%v' did not.\n", cmd) + } + + if err != nil { + return fmt.Errorf(out) + } + + // Check each line for lots of stuff + lines := strings.Split(out, "\n") + for _, line := range lines { + i := strings.Index(line, "~") + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + return fmt.Errorf("Help for %q should not have used ~:\n%s", cmd, line) + } + + // If a line starts with 4 spaces then assume someone + // added a multi-line description for an option and we need + // to flag it + if strings.HasPrefix(line, " ") && + !strings.HasPrefix(strings.TrimLeft(line, " "), "--") { + return fmt.Errorf("Help for %q should not have a multi-line option", cmd) + } + + // Options should NOT end with a period + if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { + return fmt.Errorf("Help for %q should not end with a period: %s", cmd, line) + } + + // Options should NOT end with a space + if strings.HasSuffix(line, " ") { + return fmt.Errorf("Help for %q should not end with a space: %s", cmd, line) + } + + } + + // For each command make sure we generate an error + // if we give a bad arg + args = strings.Split(cmd+" --badArg", " ") + + out, _, err = dockerCmdWithError(args...) + if err == nil { + return fmt.Errorf(out) + } + + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line at the end of 'docker rm'\n") + } + + // Now make sure that each command will print a short-usage + // (not a full usage - meaning no opts section) if we + // are missing a required arg or pass in a bad arg + + // These commands will never print a short-usage so don't test + noShortUsage := map[string]string{ + "images": "", + "login": "", + "logout": "", + "network": "", + "stats": "", + "volume create": "", + } + + if _, ok := noShortUsage[cmd]; !ok { + // skipNoArgs are ones that we don't want to try w/o + // any args. Either because it'll hang the test or + // lead to incorrect test result (like false negative). + // Whatever the reason, skip trying to run w/o args and + // jump to trying with a bogus arg. + skipNoArgs := map[string]struct{}{ + "daemon": {}, + "events": {}, + "load": {}, + } + + var result *icmd.Result + if _, ok := skipNoArgs[cmd]; !ok { + result = dockerCmdWithResult(strings.Split(cmd, " ")...) + } + + // If its ok w/o any args then try again with an arg + if result == nil || result.ExitCode == 0 { + result = dockerCmdWithResult(strings.Split(cmd+" badArg", " ")...) + } + + if err := result.Compare(icmd.Expected{ + Out: icmd.None, + Err: "\nUsage:", + ExitCode: 1, + }); err != nil { + return err + } + + stderr := result.Stderr() + // Shouldn't have full usage + if strings.Contains(stderr, "--help=false") { + return fmt.Errorf("Should not have full usage on %q:%v", result.Cmd.Args, stderr) + } + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line on %q\n%v", result.Cmd.Args, stderr) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go new file mode 100644 index 0000000..9979080 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go @@ -0,0 +1,121 @@ +package main + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func (s *DockerSuite) TestBuildHistory(c *check.C) { + name := "testbuildhistory" + _, err := buildImage(name, `FROM `+minimalBaseImage()+` +LABEL label.A="A" +LABEL label.B="B" +LABEL label.C="C" +LABEL label.D="D" +LABEL label.E="E" +LABEL label.F="F" +LABEL label.G="G" +LABEL label.H="H" +LABEL label.I="I" +LABEL label.J="J" +LABEL label.K="K" +LABEL label.L="L" +LABEL label.M="M" +LABEL label.N="N" +LABEL label.O="O" +LABEL label.P="P" +LABEL label.Q="Q" +LABEL label.R="R" +LABEL label.S="S" +LABEL label.T="T" +LABEL label.U="U" +LABEL label.V="V" +LABEL label.W="W" +LABEL label.X="X" +LABEL label.Y="Y" +LABEL label.Z="Z"`, + true) + + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "history", "testbuildhistory") + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("LABEL label.%s=%s", expectedValues[i], expectedValues[i]) + actualValue := actualValues[i] + c.Assert(actualValue, checker.Contains, echoValue) + } + +} + +func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { + dockerCmd(c, "history", "busybox") +} + +func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { + _, _, err := dockerCmdWithError("history", "testHistoryNonExistentImage") + c.Assert(err, checker.NotNil, check.Commentf("history on a non-existent image should fail.")) +} + +func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { + name := "testhistoryimagewithcomment" + + // make an image through docker commit [ -m messages ] + + dockerCmd(c, "run", "--name", name, "busybox", "true") + dockerCmd(c, "wait", name) + + comment := "This_is_a_comment" + dockerCmd(c, "commit", "-m="+comment, name, name) + + // test docker history to check comment messages + + out, _ := dockerCmd(c, "history", name) + outputTabs := strings.Fields(strings.Split(out, "\n")[1]) + actualValue := outputTabs[len(outputTabs)-1] + c.Assert(actualValue, checker.Contains, comment) +} + +func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=false", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + + _, err := strconv.Atoi(strings.TrimSpace(sizeString)) + c.Assert(err, checker.IsNil, check.Commentf("The size '%s' was not an Integer", sizeString)) + } +} + +func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=true", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + humanSizeRegexRaw := "\\d+.*B" // Matches human sizes like 10 MB, 3.2 KB, etc + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + c.Assert(strings.TrimSpace(sizeString), checker.Matches, humanSizeRegexRaw, check.Commentf("The size '%s' was not in human format", sizeString)) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go new file mode 100644 index 0000000..3b678a2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go @@ -0,0 +1,364 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images") + c.Assert(imagesOut, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestImagesEnsureImageWithTagIsListed(c *check.C) { + name := "imagewithtag" + dockerCmd(c, "tag", "busybox", name+":v1") + dockerCmd(c, "tag", "busybox", name+":v1v1") + dockerCmd(c, "tag", "busybox", name+":v2") + + imagesOut, _ := dockerCmd(c, "images", name+":v1") + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Not(checker.Contains), "v2") + c.Assert(imagesOut, checker.Not(checker.Contains), "v1v1") + + imagesOut, _ = dockerCmd(c, "images", name) + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Contains, "v1v1") + c.Assert(imagesOut, checker.Contains, "v2") +} + +func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images", "busybox:nonexistent") + c.Assert(imagesOut, checker.Not(checker.Contains), "busybox") +} + +func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { + id1, err := buildImage("order:test_a", + `FROM busybox + MAINTAINER dockerio1`, true) + c.Assert(err, checker.IsNil) + time.Sleep(1 * time.Second) + id2, err := buildImage("order:test_c", + `FROM busybox + MAINTAINER dockerio2`, true) + c.Assert(err, checker.IsNil) + time.Sleep(1 * time.Second) + id3, err := buildImage("order:test_b", + `FROM busybox + MAINTAINER dockerio3`, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc") + imgs := strings.Split(out, "\n") + c.Assert(imgs[0], checker.Equals, id3, check.Commentf("First image must be %s, got %s", id3, imgs[0])) + c.Assert(imgs[1], checker.Equals, id2, check.Commentf("First image must be %s, got %s", id2, imgs[1])) + c.Assert(imgs[2], checker.Equals, id1, check.Commentf("First image must be %s, got %s", id1, imgs[2])) +} + +func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { + imageName1 := "images_filter_test1" + imageName2 := "images_filter_test2" + imageName3 := "images_filter_test3" + image1ID, err := buildImage(imageName1, + `FROM busybox + LABEL match me`, true) + c.Assert(err, check.IsNil) + + image2ID, err := buildImage(imageName2, + `FROM busybox + LABEL match="me too"`, true) + c.Assert(err, check.IsNil) + + image3ID, err := buildImage(imageName3, + `FROM busybox + LABEL nomatch me`, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") + out = strings.TrimSpace(out) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) + c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, image2ID) +} + +// Regression : #15659 +func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { + // Create a container + dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") + // Commit with labels "using changes" + out, _ := dockerCmd(c, "commit", "-c", "LABEL foo.version=1.0.0-1", "-c", "LABEL foo.name=bar", "-c", "LABEL foo.author=starlord", "bar", "bar:1.0.0-1") + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=foo.version=1.0.0-1") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, imageID) +} + +func (s *DockerSuite) TestImagesFilterSinceAndBefore(c *check.C) { + imageID1, err := buildImage("image:1", `FROM `+minimalBaseImage()+` +LABEL number=1`, true) + c.Assert(err, checker.IsNil) + imageID2, err := buildImage("image:2", `FROM `+minimalBaseImage()+` +LABEL number=2`, true) + c.Assert(err, checker.IsNil) + imageID3, err := buildImage("image:3", `FROM `+minimalBaseImage()+` +LABEL number=3`, true) + c.Assert(err, checker.IsNil) + + expected := []string{imageID3, imageID2} + + out, _ := dockerCmd(c, "images", "-f", "since=image:1", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID1, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID3} + + out, _ = dockerCmd(c, "images", "-f", "since=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID2, imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:3", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID3, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) +} + +func assertImageList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + imageIDIndex := strings.Index(lines[0], "IMAGE ID") + for i := 0; i < len(expected); i++ { + imageID := lines[i+1][imageIDIndex : imageIDIndex+12] + found := false + for _, e := range expected { + if imageID == e[7:19] { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { + imageName := "images_filter_test" + buildImage(imageName, + `FROM busybox + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`, true) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + out, _ := dockerCmd(c, "images", "-q", "-f", filter) + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d\n", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + c.Fatalf("All output must be the same") + } + } +} + +func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { + testRequires(c, DaemonIsLinux) + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "foobox") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + // overwrite the tag, making the previous image dangling + dockerCmd(c, "tag", "busybox", "foobox") + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") + // Expect one dangling image + c.Assert(strings.Count(out, imageID), checker.Equals, 1) + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false") + //dangling=false would not include dangling images + c.Assert(out, checker.Not(checker.Contains), imageID) + + out, _ = dockerCmd(c, "images") + //docker images still include dangling images + c.Assert(out, checker.Contains, imageID) + +} + +func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker + ENV foo bar` + + head, out, err := buildImageWithOut("scratch-image", dockerfile, false) + c.Assert(err, check.IsNil) + + // this is just the output of docker build + // we're interested in getting the image id of the MAINTAINER instruction + // and that's located at output, line 5, from 7 to end + split := strings.Split(out, "\n") + intermediate := strings.TrimSpace(split[5][7:]) + + out, _ = dockerCmd(c, "images") + // images shouldn't show non-heads images + c.Assert(out, checker.Not(checker.Contains), intermediate) + // images should contain final built images + c.Assert(out, checker.Contains, stringid.TruncateID(head)) +} + +func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support FROM scratch + + dockerfile := ` + FROM scratch + MAINTAINER docker` + + id, _, err := buildImageWithOut("scratch-image", dockerfile, false) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images") + // images should contain images built from scratch + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// For W2W - equivalent to TestImagesEnsureImagesFromScratchShown but Windows +// doesn't support from scratch +func (s *DockerSuite) TestImagesEnsureImagesFromBusyboxShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker` + + id, _, err := buildImageWithOut("busybox-image", dockerfile, false) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images") + // images should contain images built from busybox + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// #18181 +func (s *DockerSuite) TestImagesFilterNameWithPort(c *check.C) { + tag := "a.b.c.d:5000/hello" + dockerCmd(c, "tag", "busybox", tag) + out, _ := dockerCmd(c, "images", tag) + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":latest") + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":no-such-tag") + c.Assert(out, checker.Not(checker.Contains), tag) +} + +func (s *DockerSuite) TestImagesFormat(c *check.C) { + // testRequires(c, DaemonIsLinux) + tag := "myimage" + dockerCmd(c, "tag", "busybox", tag+":v1") + dockerCmd(c, "tag", "busybox", tag+":v2") + + out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag) + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"myimage", "myimage"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// ImagesDefaultFormatAndQuiet +func (s *DockerSuite) TestImagesFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "myimage") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + config := `{ + "imagesFormat": "{{ .ID }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "--config", d, "images", "-q", "myimage") + c.Assert(out, checker.Equals, imageID+"\n", check.Commentf("Expected to print only the image id, got %v\n", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go new file mode 100644 index 0000000..57dc2a6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go @@ -0,0 +1,150 @@ +package main + +import ( + "bufio" + "compress/gzip" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImportDisplay(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + + image := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportBadURL(c *check.C) { + out, _, err := dockerCmdWithError("import", "http://nourl/bad") + c.Assert(err, checker.NotNil, check.Commentf("import was supposed to fail but didn't")) + // Depending on your system you can get either of these errors + if !strings.Contains(out, "dial tcp") && + !strings.Contains(out, "ApplyLayer exit status 1 stdout: stderr: archive/tar: invalid tar header") && + !strings.Contains(out, "Error processing tar file") { + c.Fatalf("expected an error msg but didn't get one.\nErr: %v\nOut: %v", err, out) + } +} + +func (s *DockerSuite) TestImportFile(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + runCmd.Stdout = bufio.NewWriter(temporaryFile) + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportGzipped(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + w := gzip.NewWriter(temporaryFile) + runCmd.Stdout = w + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + err = w.Close() + c.Assert(err, checker.IsNil, check.Commentf("failed to close gzip writer")) + temporaryFile.Close() + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + runCmd.Stdout = bufio.NewWriter(temporaryFile) + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + + message := "Testing commit message" + out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "history", image) + split := strings.Split(out, "\n") + + c.Assert(split, checker.HasLen, 3, check.Commentf("expected 3 lines from image history")) + r := regexp.MustCompile("[\\s]{2,}") + split = r.Split(split[1], -1) + + c.Assert(message, checker.Equals, split[3], check.Commentf("didn't get expected value in commit message")) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing")) +} + +func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { + _, _, err := dockerCmdWithError("import", "example.com/myImage.tar") + c.Assert(err, checker.NotNil, check.Commentf("import non-existing file must failed")) +} + +func (s *DockerSuite) TestImportWithQuotedChanges(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + result := icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs("export", "test-import"), + Stdout: bufio.NewWriter(temporaryFile), + }) + c.Assert(result, icmd.Matches, icmd.Success) + + result = dockerCmdWithResult("import", "-c", `ENTRYPOINT ["/bin/sh", "-c"]`, temporaryFile.Name()) + c.Assert(result, icmd.Matches, icmd.Success) + image := strings.TrimSpace(result.Stdout()) + + result = dockerCmdWithResult("run", "--rm", image, "true") + c.Assert(result, icmd.Matches, icmd.Expected{Out: icmd.None}) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go new file mode 100644 index 0000000..62ce7e2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go @@ -0,0 +1,234 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure docker info succeeds +func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "info") + + // always shown fields + stringsToCheck := []string{ + "ID:", + "Containers:", + " Running:", + " Paused:", + " Stopped:", + "Images:", + "OSType:", + "Architecture:", + "Logging Driver:", + "Operating System:", + "CPUs:", + "Total Memory:", + "Kernel Version:", + "Storage Driver:", + "Volume:", + "Network:", + "Live Restore Enabled:", + } + + if daemonPlatform == "linux" { + stringsToCheck = append(stringsToCheck, "Init Binary:", "Security Options:", "containerd version:", "runc version:", "init version:") + } + + if DaemonIsLinux.Condition() { + stringsToCheck = append(stringsToCheck, "Runtimes:", "Default Runtime: runc") + } + + if experimentalDaemon { + stringsToCheck = append(stringsToCheck, "Experimental: true") + } else { + stringsToCheck = append(stringsToCheck, "Experimental: false") + } + + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix, check.Commentf("couldn't find string %v in output", linePrefix)) + } +} + +// TestInfoFormat tests `docker info --format` +func (s *DockerSuite) TestInfoFormat(c *check.C) { + out, status := dockerCmd(c, "info", "--format", "{{json .}}") + c.Assert(status, checker.Equals, 0) + var m map[string]interface{} + err := json.Unmarshal([]byte(out), &m) + c.Assert(err, checker.IsNil) + _, _, err = dockerCmdWithError("info", "--format", "{{.badString}}") + c.Assert(err, checker.NotNil) +} + +// TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and +// `--cluster-store` properly show the backend's endpoint in info output. +func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "1.1.1.1:2375" + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s\n", discoveryAdvertise)) +} + +// TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with +// an invalid `--cluster-advertise` configuration +func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + + // --cluster-advertise with an invalid string is an error + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") + c.Assert(err, checker.Not(checker.IsNil)) + + // --cluster-advertise without --cluster-store is also an error + err = d.Start("--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.Not(checker.IsNil)) +} + +// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` +// configured with interface name properly show the advertise ip-address in info output. +func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { + testRequires(c, SameHostDaemon, Network, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "eth0" + + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) + c.Assert(err, checker.IsNil) + defer d.Stop() + + iface, err := net.InterfaceByName(discoveryAdvertise) + c.Assert(err, checker.IsNil) + addrs, err := iface.Addrs() + c.Assert(err, checker.IsNil) + c.Assert(len(addrs), checker.GreaterThan, 0) + ip, _, err := net.ParseCIDR(addrs[0].String()) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s:2375\n", ip.String())) +} + +func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "-d", "busybox", "top") + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { + testRequires(c, IsPausable) + + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) +} + +func (s *DockerSuite) TestInfoDebug(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + err := d.Start("--debug") + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("--debug", "info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Debug Mode (client): true\n") + c.Assert(out, checker.Contains, "Debug Mode (server): true\n") + c.Assert(out, checker.Contains, "File Descriptors") + c.Assert(out, checker.Contains, "Goroutines") + c.Assert(out, checker.Contains, "System Time") + c.Assert(out, checker.Contains, "EventsListeners") + c.Assert(out, checker.Contains, "Docker Root Dir") +} + +func (s *DockerSuite) TestInsecureRegistries(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryCIDR := "192.168.1.0/24" + registryHost := "insecurehost.com:5000" + + d := NewDaemon(c) + err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Insecure Registries:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryHost)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryCIDR)) +} + +func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryMirror1 := "https://192.168.1.2" + registryMirror2 := "http://registry.mirror.com:5000" + + err := s.d.Start("--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2) + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Registry Mirrors:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror2)) +} + +// Test case for #24392 +func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.Start("--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`) + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go new file mode 100644 index 0000000..b932306 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go @@ -0,0 +1,15 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoSecurityOptions(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, Apparmor, DaemonIsLinux) + + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, "Security Options:\n apparmor\n seccomp\n Profile: default\n") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go new file mode 100644 index 0000000..32ed28a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go @@ -0,0 +1,466 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func checkValidGraphDriver(c *check.C, name string) { + if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { + c.Fatalf("%v is not a valid graph driver name", name) + } +} + +func (s *DockerSuite) TestInspectImage(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + // It is important that this ID remain stable. If a code change causes + // it to be different, this is equivalent to a cache bust when pulling + // a legacy-format manifest. If the check at the end of this function + // fails, fix the difference in the image serialization instead of + // updating this hash. + imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" + id := inspectField(c, imageTest, "Id") + + c.Assert(id, checker.Equals, imageTestID) +} + +func (s *DockerSuite) TestInspectInt64(c *check.C) { + dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true") + inspectOut := inspectField(c, "inspectTest", "HostConfig.Memory") + c.Assert(inspectOut, checker.Equals, "314572800") +} + +func (s *DockerSuite) TestInspectDefault(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch the container JSON. + //If the container JSON is not available, it will go for the image JSON. + + out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + containerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, "busybox", "Id") + c.Assert(strings.TrimSpace(inspectOut), checker.Equals, containerID) +} + +func (s *DockerSuite) TestInspectStatus(c *check.C) { + if daemonPlatform != "windows" { + defer unpauseAllContainers() + } + out, _ := runSleepingContainer(c, "-d") + out = strings.TrimSpace(out) + + inspectOut := inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + + // Windows does not support pause/unpause on Windows Server Containers. + // (RS1 does for Hyper-V Containers, but production CI is not setup for that) + if daemonPlatform != "windows" { + dockerCmd(c, "pause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "paused") + + dockerCmd(c, "unpause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + } + + dockerCmd(c, "stop", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "exited") + +} + +func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch container + //JSON State.Running field. If the field is true, it's a container. + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.State.Running}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(out, checker.Equals, "true\n") // not a container JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { + //Run this test on an image named busybox. docker inspect will try to fetch container + //JSON. Since there is no container named busybox and --type=container, docker inspect will + //not try to get the image JSON. It will throw an error. + + dockerCmd(c, "run", "-d", "busybox", "true") + + _, _, err := dockerCmdWithError("inspect", "--type=container", "busybox") + // docker inspect should fail, as there is no container named busybox + c.Assert(err, checker.NotNil) +} + +func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch image + //JSON as --type=image. if there is no image with name busybox, docker inspect + //will throw an error. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, _ := dockerCmd(c, "inspect", "--type=image", "busybox") + c.Assert(out, checker.Not(checker.Contains), "State") // not an image JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { + //Both the container and image are named busybox. docker inspect will fail + //as --type=foobar is not a valid value for the flag. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("%s", exitCode)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("%s", err)) + c.Assert(out, checker.Contains, "not a valid value for --type") +} + +func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + out := inspectField(c, imageTest, "Size") + + size, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect size of the image: %s, %v", out, err)) + + //now see if the size turns out to be the same + formatStr := fmt.Sprintf("--format={{eq .Size %d}}", size) + out, _ = dockerCmd(c, "inspect", formatStr, imageTest) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to run container: %v, output: %q", err, out)) + + id := strings.TrimSpace(out) + + out = inspectField(c, id, "State.ExitCode") + + exitCode, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect exitcode of the container: %s, %v", out, err)) + + //now get the exit code to verify + formatStr := fmt.Sprintf("--format={{eq .State.ExitCode %d}}", exitCode) + out, _ = dockerCmd(c, "inspect", formatStr, id) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + imageTest := "emptyfs" + name := inspectField(c, imageTest, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + deviceID := inspectField(c, imageTest, "GraphDriver.Data.DeviceId") + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, imageTest, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + out = strings.TrimSpace(out) + + name := inspectField(c, out, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + imageDeviceID := inspectField(c, "busybox", "GraphDriver.Data.DeviceId") + + deviceID := inspectField(c, out, "GraphDriver.Data.DeviceId") + + c.Assert(imageDeviceID, checker.Not(checker.Equals), deviceID) + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, out, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { + modifier := ",z" + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if daemonPlatform == "windows" { + modifier = "" + // TODO Windows: Temporary check - remove once TP5 support is dropped + if windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + // Linux creates the host directory if it doesn't exist. Windows does not. + os.Mkdir(`c:\data`, os.ModeDir) + } + + dockerCmd(c, "run", "-d", "--name", "test", "-v", prefix+slash+"data:"+prefix+slash+"data:ro"+modifier, "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, check.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "") + c.Assert(m.Driver, checker.Equals, "") + c.Assert(m.Source, checker.Equals, prefix+slash+"data") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + if daemonPlatform != "windows" { // Windows does not set mode + c.Assert(m.Mode, checker.Equals, "ro"+modifier) + } + c.Assert(m.RW, checker.Equals, false) +} + +func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:"+prefix+slash+"data", "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, checker.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "data") + c.Assert(m.Driver, checker.Equals, "local") + c.Assert(m.Source, checker.Not(checker.Equals), "") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + c.Assert(m.RW, checker.Equals, true) +} + +// #14947 +func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + id := strings.TrimSpace(out) + startedAt := inspectField(c, id, "State.StartedAt") + finishedAt := inspectField(c, id, "State.FinishedAt") + created := inspectField(c, id, "Created") + + _, err := time.Parse(time.RFC3339Nano, startedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, finishedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) + + created = inspectField(c, "busybox", "Created") + + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) +} + +// #15633 +func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) { + dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox") + var logConfig container.LogConfig + + out := inspectFieldJSON(c, "test", "HostConfig.LogConfig") + + err := json.NewDecoder(strings.NewReader(out)).Decode(&logConfig) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + + c.Assert(logConfig.Type, checker.Equals, "json-file") + c.Assert(logConfig.Config["max-file"], checker.Equals, "42", check.Commentf("%v", logConfig)) +} + +func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch container + //JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields. + + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.SizeRw}},{{.SizeRootFs}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Exepcted not to display size info: %s", out)) +} + +func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" + out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox") + sz := strings.Split(out, ",") + + c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "") + c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "") +} + +func (s *DockerSuite) TestInspectTemplateError(c *check.C) { + // Template parsing error for both the container and image. + + runSleepingContainer(c, "--name=container1", "-d") + + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "container1") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") + + out, _, err = dockerCmdWithError("inspect", "--type=image", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestInspectJSONFields(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format={{.HostConfig.Dns}}", "busybox") + + c.Assert(err, check.IsNil) + c.Assert(out, checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestInspectByPrefix(c *check.C) { + id := inspectField(c, "busybox", "Id") + c.Assert(id, checker.HasPrefix, "sha256:") + + id2 := inspectField(c, id[:12], "Id") + c.Assert(id, checker.Equals, id2) + + id3 := inspectField(c, strings.TrimPrefix(id, "sha256:")[:12], "Id") + c.Assert(id, checker.Equals, id3) +} + +func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + runSleepingContainer(c, "--name=not-shown", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.Name}}'", "busybox", "missing", "not-shown") + + c.Assert(err, checker.Not(check.IsNil)) + c.Assert(out, checker.Contains, "busybox") + c.Assert(out, checker.Not(checker.Contains), "not-shown") + c.Assert(out, checker.Contains, "Error: No such container: missing") +} + +func (s *DockerSuite) TestInspectHistory(c *check.C) { + dockerCmd(c, "run", "--name=testcont", "busybox", "echo", "hello") + dockerCmd(c, "commit", "-m", "test comment", "testcont", "testimg") + out, _, err := dockerCmdWithError("inspect", "--format='{{.Comment}}'", "testimg") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "test comment") +} + +func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) { + testRequires(c, DaemonIsLinux) + + contName := "test1" + dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top") + netOut, _ := dockerCmd(c, "network", "inspect", "--format={{.ID}}", "bridge") + out := inspectField(c, contName, "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "bridge") + out = inspectField(c, contName, "NetworkSettings.Networks.bridge.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) { + testRequires(c, DaemonIsLinux) + + netOut, _ := dockerCmd(c, "network", "create", "net1") + dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top") + out := inspectField(c, "container1", "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "net1") + out = inspectField(c, "container1", "NetworkSettings.Networks.net1.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectRootFS(c *check.C) { + out, _, err := dockerCmdWithError("inspect", "busybox") + c.Assert(err, check.IsNil) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + + c.Assert(len(imageJSON[0].RootFS.Layers), checker.GreaterOrEqualThan, 1) +} + +func (s *DockerSuite) TestInspectAmpersand(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "--env", `TEST_ENV="soanni&rtr"`, "busybox", "env") + c.Assert(out, checker.Contains, `soanni&rtr`) + out, _ = dockerCmd(c, "inspect", name) + c.Assert(out, checker.Contains, `soanni&rtr`) +} + +func (s *DockerSuite) TestInspectPlugin(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + // Even without tag the inspect still work + out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +// Test case for 29185 +func (s *DockerSuite) TestInspectUnknownObject(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: foobar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: foobar") +} + +func (s *DockerSuite) TestInpectInvalidReference(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "FooBar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: FooBar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: FooBar") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go new file mode 100644 index 0000000..4316480 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,134 @@ +package main + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestKillContainer(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + _, _, err := dockerCmdWithError("kill", "-s", "30", cleanedContainerID) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) +} + +func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { + // TODO Windows: Windows does not yet support -u (Feb 2016). + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +// regression test about correct signal parsing see #13665 +func (s *DockerSuite) TestKillWithSignal(c *check.C) { + // Cannot port to Windows - does not support signals in the same way Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + dockerCmd(c, "kill", "-s", "SIGWINCH", cid) + time.Sleep(250 * time.Millisecond) + + running := inspectField(c, cid, "State.Running") + + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithSameSignalShouldDisableRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + // Let docker send a TERM signal to the container + // It will kill the process and disable the restart policy + dockerCmd(c, "kill", "-s", "TERM", cid) + c.Assert(waitExited(cid, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cid, check.Commentf("killed container is still running")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithDifferentSignalShouldKeepRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + // Let docker send a TERM signal to the container + // It will kill the process, but not disable the restart policy + dockerCmd(c, "kill", "-s", "TERM", cid) + c.Assert(waitRestart(cid, 10*time.Second), check.IsNil) + + // Restart policy should still be in place, so it should be still running + c.Assert(waitRun(cid), check.IsNil) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err := dockerCmdWithError("kill", "-s", "0", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) + + running := inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + + out, _ = runSleepingContainer(c, "-d") + cid = strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) + + running = inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + +} + +func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") + dockerCmd(c, "stop", "docker-kill-test-api") + + status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go new file mode 100644 index 0000000..a5872d9 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go @@ -0,0 +1,240 @@ +package main + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + _, exitCode, err := dockerCmdWithError("run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + + // run ping failed with error + c.Assert(exitCode, checker.Equals, 1, check.Commentf("error: %v", err)) +} + +// Test for appropriate error when calling --link with an invalid target container +func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--link", "bogus:alias", "busybox", "true") + + // an invalid container target should produce an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an invalid container target should produce an error + c.Assert(out, checker.Contains, "Could not get container") +} + +func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + // Test with the three different ways of specifying the default network on Linux + testLinkPingOnNetwork(c, "") + testLinkPingOnNetwork(c, "default") + testLinkPingOnNetwork(c, "bridge") +} + +func testLinkPingOnNetwork(c *check.C, network string) { + var postArgs []string + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "top"}...) + runArgs1 := append([]string{"run", "-d", "--name", "container1", "--hostname", "fred"}, postArgs...) + runArgs2 := append([]string{"run", "-d", "--name", "container2", "--hostname", "wilma"}, postArgs...) + + // Run the two named containers + dockerCmd(c, runArgs1...) + dockerCmd(c, runArgs2...) + + postArgs = []string{} + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "sh", "-c"}...) + + // Format a run for a container which links to the other two + runArgs := append([]string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2"}, postArgs...) + pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" + + // test ping by alias, ping by name, and ping by hostname + // 1. Ping by alias + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) + // 2. Ping by container name + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) + // 3. Ping by hostname + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) + + // Clean for next round + dockerCmd(c, "rm", "-f", "container1") + dockerCmd(c, "rm", "-f", "container2") +} + +func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(c, "kill", idA) + dockerCmd(c, "kill", idB) + +} + +func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := convertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := convertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name=first", "busybox", "top") + dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") + dockerCmd(c, "start", "first") + +} + +func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + + out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") + idOne := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") + idTwo := strings.TrimSpace(out) + + c.Assert(waitRun(idTwo), checker.IsNil) + + contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("contentOne: %s", string(contentOne))) + + contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("contentTwo: %s", string(contentTwo))) + // Host is not present in updated hosts file + c.Assert(string(contentTwo), checker.Contains, "onetwo") +} + +func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") + out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") + id := strings.TrimSpace(string(out)) + + realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + content, err := readContainerFileWithExec(id, "/etc/hosts") + c.Assert(err, checker.IsNil) + + getIP := func(hosts []byte, hostname string) string { + re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) + matches := re.FindSubmatch(hosts) + c.Assert(matches, checker.NotNil, check.Commentf("Hostname %s have no matches in hosts", hostname)) + return string(matches[1]) + } + ip := getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) + + dockerCmd(c, "restart", "one") + realIP = inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + + content, err = readContainerFileWithExec(id, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("content: %s", string(content))) + ip = getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) +} + +func (s *DockerSuite) TestLinksEnvs(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") + out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") + c.Assert(out, checker.Contains, "FIRST_ENV_e1=\n") + c.Assert(out, checker.Contains, "FIRST_ENV_e2=v2") + c.Assert(out, checker.Contains, "FIRST_ENV_e3=v3=v3") +} + +func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") + + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") + + cid2 := strings.TrimSpace(out) + c.Assert(waitRun(cid2), checker.IsNil) + + links := inspectFieldJSON(c, cid2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") +} + +func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") + out, _, err := dockerCmdWithError("run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") + + // Running container linking to a container with --net host should have failed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // Running container linking to a container with --net host should have failed + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + // /etc/hosts should be a regular file + c.Assert(out, checker.Matches, "^-.+\n") +} + +func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") + dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") + dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go new file mode 100644 index 0000000..1af9279 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go @@ -0,0 +1,26 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { + // In a _unix file as using Unix specific files, and must be on the + // same host as the daemon. + testRequires(c, SameHostDaemon, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + c.Skip("/etc/hosts does not exist, skip this test") + } + + c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go new file mode 100644 index 0000000..01de75d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "bytes" + "os/exec" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { + cmd := exec.Command(dockerBinary, "login") + + // Send to stdin so the process does not get the TTY + cmd.Stdin = bytes.NewBufferString("buffer test string \n") + + // run the command and block until it's done + err := cmd.Run() + c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistryDeprecatedEmailFlag(c *check.C) { + // Test to make sure login still works with the deprecated -e and --email flags + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", "-e", s.reg.email, privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + // -e flag + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "-e", s.reg.email, privateRegistryURL) + // --email flag + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "--email", s.reg.email, privateRegistryURL) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go new file mode 100644 index 0000000..a5f4b10 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go @@ -0,0 +1,100 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + c.Assert(string(b), checker.Contains, privateRegistryURL) + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) + + // check I cannot pull anymore + out, _, err := dockerCmdWithError("--config", tmp, "pull", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithWrongHostnamesStored(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + cmd := exec.Command("docker-credential-shell-test", "store") + stdin := bytes.NewReader([]byte(fmt.Sprintf(`{"ServerURL": "https://%s", "Username": "%s", "Secret": "%s"}`, privateRegistryURL, s.reg.username, s.reg.password))) + cmd.Stdin = stdin + c.Assert(cmd.Run(), checker.IsNil) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := fmt.Sprintf(`{ "auths": {"https://%s": {}}, "credsStore": "shell-test" }`, privateRegistryURL) + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"%s\": {}", privateRegistryURL)) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"%s\": {}", privateRegistryURL)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go new file mode 100644 index 0000000..eeb008d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkLogsCLIRotateFollow(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--log-opt", "max-size=1b", "--log-opt", "max-file=10", "busybox", "sh", "-c", "while true; do usleep 50000; echo hello; done") + id := strings.TrimSpace(out) + ch := make(chan error, 1) + go func() { + ch <- nil + out, _, _ := dockerCmdWithError("logs", "-f", id) + // if this returns at all, it's an error + ch <- fmt.Errorf(out) + }() + + <-ch + select { + case <-time.After(30 * time.Second): + // ran for 30 seconds with no problem + return + case err := <-ch: + if err != nil { + c.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go new file mode 100644 index 0000000..d2dcad1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,328 @@ +package main + +import ( + "fmt" + "io" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/jsonlog" + "github.com/go-check/check" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { + testLen := 32767 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { + testLen := 32768 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { + testLen := 33000 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsTimestamps(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo = >> a.a; done; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "-t", id) + + lines := strings.Split(out, "\n") + + c.Assert(lines, checker.HasLen, testLen+1) + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) + c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) + // ensure we have padded 0's + c.Assert(l[29], checker.Equals, uint8('Z')) + } + } +} + +func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) + + c.Assert(stdout, checker.Equals, "") + + stderr = strings.TrimSpace(stderr) + + c.Assert(stderr, checker.Equals, msg) +} + +func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { + // TODO Windows: Needs investigation why this fails. Obtained string includes + // a bunch of ANSI escape sequences before the "stderr_log" message. + testRequires(c, DaemonIsLinux) + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) + c.Assert(stderr, checker.Equals, "") + + stdout = strings.TrimSpace(stdout) + c.Assert(stdout, checker.Equals, msg) +} + +func (s *DockerSuite) TestLogsTail(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "--tail", "0", id) + lines := strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 1) + + out, _ = dockerCmd(c, "logs", "--tail", "5", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 6) + + out, _ = dockerCmd(c, "logs", "--tail", "99", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 100) + + out, _ = dockerCmd(c, "logs", "--tail", "all", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out, _ = dockerCmd(c, "logs", "--tail", "-1", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello") + id, err := getIDByName("test") + c.Assert(err, check.IsNil) + + logsCmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(logsCmd.Start(), checker.IsNil) + + errChan := make(chan error) + go func() { + errChan <- logsCmd.Wait() + close(errChan) + }() + + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Following logs is hanged") + } +} + +func (s *DockerSuite) TestLogsSince(c *check.C) { + name := "testlogssince" + dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done") + out, _ := dockerCmd(c, "logs", "-t", name) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written + c.Assert(err, checker.IsNil) + since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up + out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) + + // Skip 2 seconds + unexpected := []string{"log1", "log2"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since)) + } + + // Test to make sure a bad since format is caught by the client + out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name) + c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server")) + + // Test with default value specified and parameter omitted + expected := []string{"log1", "log2", "log3"} + for _, cmd := range []*exec.Cmd{ + exec.Command(dockerBinary, "logs", "-t", name), + exec.Command(dockerBinary, "logs", "-t", "--since=0", name), + } { + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to log container: %s", out)) + for _, v := range expected { + c.Assert(out, checker.Contains, v) + } + } +} + +func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { + // TODO Windows TP5 - Figure out why this test is so flakey. Disabled for now. + testRequires(c, DaemonIsLinux) + name := "testlogssincefuturefollow" + out, _ := dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do echo log$i; sleep 1; done`) + + // Extract one timestamp from the log file to give us a starting point for + // our `--since` argument. Because the log producer runs in the background, + // we need to check repeatedly for some output to be produced. + var timestamp string + for i := 0; i != 100 && timestamp == ""; i++ { + if out, _ = dockerCmd(c, "logs", "-t", name); out == "" { + time.Sleep(time.Millisecond * 100) // Retry + } else { + timestamp = strings.Split(strings.Split(out, "\n")[0], " ")[0] + } + } + + c.Assert(timestamp, checker.Not(checker.Equals), "") + t, err := time.Parse(time.RFC3339Nano, timestamp) + c.Assert(err, check.IsNil) + + since := t.Unix() + 2 + out, _ = dockerCmd(c, "logs", "-t", "-f", fmt.Sprintf("--since=%v", since), name) + c.Assert(out, checker.Not(checker.HasLen), 0, check.Commentf("cannot read from empty log")) + lines := strings.Split(strings.TrimSpace(out), "\n") + for _, v := range lines { + ts, err := time.Parse(time.RFC3339Nano, strings.Split(v, " ")[0]) + c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'", v)) + c.Assert(ts.Unix() >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts)) + } +} + +// Regression test for #8832 +func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { + // TODO Windows: Fix this test for TP5. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) + + id := strings.TrimSpace(out) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", id).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", id) + stdout, err := logCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + c.Assert(logCmd.Start(), checker.IsNil) + + // First read slowly + bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + c.Assert(err, checker.IsNil) + + // After the container has finished we can continue reading fast + bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) + c.Assert(err, checker.IsNil) + + actual := bytes1 + bytes2 + expected := 200000 + c.Assert(actual, checker.Equals, expected) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + r, w := io.Pipe() + cmd.Stdout = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + chErr := make(chan error) + go func() { + b := make([]byte, 1) + _, err := r.Read(b) + chErr <- err + }() + c.Assert(<-chErr, checker.IsNil) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(cmd.Start(), checker.IsNil) + time.Sleep(200 * time.Millisecond) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { + name := "testlogsnocontainer" + out, _, _ := dockerCmdWithError("logs", name) + message := fmt.Sprintf("Error: No such container: %s\n", name) + c.Assert(out, checker.Equals, message) +} + +func (s *DockerSuite) TestLogsWithDetails(c *check.C) { + dockerCmd(c, "run", "--name=test", "--label", "foo=bar", "-e", "baz=qux", "--log-opt", "labels=foo", "--log-opt", "env=baz", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "--details", "--timestamps", "test") + + logFields := strings.Fields(strings.TrimSpace(out)) + c.Assert(len(logFields), checker.Equals, 3, check.Commentf(out)) + + details := strings.Split(logFields[1], ",") + c.Assert(details, checker.HasLen, 2) + c.Assert(details[0], checker.Equals, "baz=qux") + c.Assert(details[1], checker.Equals, "foo=bar") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go new file mode 100644 index 0000000..7f4cc2c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func startServerContainer(c *check.C, msg string, port int) string { + name := "server" + cmd := []string{ + "-d", + "-p", fmt.Sprintf("%d:%d", port, port), + "busybox", + "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), + } + c.Assert(waitForContainer(name, cmd...), check.IsNil) + return name +} + +func getExternalAddress(c *check.C) net.IP { + iface, err := net.InterfaceByName("eth0") + if err != nil { + c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) + } + + ifaceAddrs, err := iface.Addrs() + c.Assert(err, check.IsNil) + c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + c.Assert(err, check.IsNil) + + return ifaceIP +} + +func getContainerLogs(c *check.C, containerID string) string { + out, _ := dockerCmd(c, "logs", containerID) + return strings.Trim(out, "\r\n") +} + +func getContainerStatus(c *check.C, containerID string) string { + out := inspectField(c, containerID, "State.Running") + return out +} + +func (s *DockerSuite) TestNetworkNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + var ( + msg = "hi yall" + ) + startServerContainer(c, msg, 8081) + conn, err := net.Dial("tcp", "localhost:8081") + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", + "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) + final := strings.TrimRight(string(out), "\n") + c.Assert(final, checker.Equals, msg) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go new file mode 100644 index 0000000..4dfad93 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +// GH14530. Validates combinations of --net= with other options + +// stringCheckPS is how the output of PS starts in order to validate that +// the command executed in a container did really run PS correctly. +const stringCheckPS = "PID USER" + +// DockerCmdWithFail executes a docker command that is supposed to fail and returns +// the output, the exit code. If the command returns a Nil error, it will fail and +// stop the tests. +func dockerCmdWithFail(c *check.C, args ...string) (string, int) { + out, status, err := dockerCmdWithError(args...) + c.Assert(err, check.NotNil, check.Commentf("%v", out)) + return out, status +} + +func (s *DockerSuite) TestNetHostnameWithNetHost(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) +} + +func (s *DockerSuite) TestNetHostname(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-h=name", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=bridge", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=none", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=container:other", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") + c.Assert(out, checker.Contains, "--net: invalid net mode: invalid container format container:") + + out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") + c.Assert(out, checker.Contains, "network weird not found") +} + +func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictContainerNetworkHostAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeNetHostAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--dns=8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--add-host=name:8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-P", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-p", "8080", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--expose", "8000-9000", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkExposePorts.Error()) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go new file mode 100644 index 0000000..97f204a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go @@ -0,0 +1,1791 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/driverapi" + remoteapi "github.com/docker/libnetwork/drivers/remote/api" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/docker/libnetwork/netlabel" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +const dummyNetworkDriver = "dummy-network-driver" +const dummyIPAMDriver = "dummy-ipam-driver" + +var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest + +func init() { + check.Suite(&DockerNetworkSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerNetworkSuite struct { + server *httptest.Server + ds *DockerSuite + d *Daemon +} + +func (s *DockerNetworkSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) +} + +func (s *DockerNetworkSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIPAMDriver) +} + +func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"local"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func assertNwIsAvailable(c *check.C, name string) { + if !isNwPresent(c, name) { + c.Fatalf("Network %s not found in network ls o/p", name) + } +} + +func assertNwNotAvailable(c *check.C, name string) { + if isNwPresent(c, name) { + c.Fatalf("Found network %s in network ls o/p", name) + } +} + +func isNwPresent(c *check.C, name string) bool { + out, _ := dockerCmd(c, "network", "ls") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + netFields := strings.Fields(lines[i]) + if netFields[1] == name { + return true + } + } + return false +} + +// assertNwList checks network list retrieved with ls command +// equals to expected network list +// note: out should be `network ls [option]` result +func assertNwList(c *check.C, out string, expectNws []string) { + lines := strings.Split(out, "\n") + var nwList []string + for _, line := range lines[1 : len(lines)-1] { + netFields := strings.Fields(line) + // wrap all network name in nwList + nwList = append(nwList, netFields[1]) + } + + // network ls should contains all expected networks + c.Assert(nwList, checker.DeepEquals, expectNws) +} + +func getNwResource(c *check.C, name string) *types.NetworkResource { + out, _ := dockerCmd(c, "network", "inspect", name) + nr := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &nr) + c.Assert(err, check.IsNil) + return &nr[0] +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + assertNwIsAvailable(c, nn) + } +} + +func (s *DockerSuite) TestNetworkLsFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge", "host", "none"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + config := `{ + "networksFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "network", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge default", "host default", "none default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be created again + out, _, err := dockerCmdWithError("network", "create", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { + dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + out, _ := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, "192.168.10.1:5000->5000/tcp") +} + +func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be removed + out, _, err := dockerCmdWithError("network", "rm", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { + testNet := "testnet1" + testLabel := "foo" + testValue := "bar" + out, _ := dockerCmd(c, "network", "create", "dev") + defer func() { + dockerCmd(c, "network", "rm", "dev") + dockerCmd(c, "network", "rm", testNet) + }() + networkID := strings.TrimSpace(out) + + // filter with partial ID + // only show 'dev' network + out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5]) + assertNwList(c, out, []string{"dev"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "name=dge") + assertNwList(c, out, []string{"bridge"}) + + // only show built-in network (bridge, none, host) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "host", "none"}) + + // only show custom networks (dev) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom") + assertNwList(c, out, []string{"dev"}) + + // show all networks with filter + // it should be equivalent of ls without option + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "dev", "host", "none"}) + + out, _ = dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel+"="+testValue) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label=nonexistent") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=null") + assertNwList(c, out, []string{"none"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=host") + assertNwList(c, out, []string{"host"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=bridge") + assertNwList(c, out, []string{"bridge", "dev", testNet}) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateDelete(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateLabel(c *check.C) { + testNet := "testnetcreatelabel" + testLabel := "foo" + testValue := "bar" + + dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _, err := dockerCmdWithError("network", "inspect", "--format={{ .Labels."+testLabel+" }}", testNet) + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) + + dockerCmd(c, "network", "rm", testNet) + assertNwNotAvailable(c, testNet) +} + +func (s *DockerSuite) TestDockerNetworkDeleteNotExists(c *check.C) { + out, _, err := dockerCmdWithError("network", "rm", "test") + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) +} + +func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { + dockerCmd(c, "network", "create", "testDelMulti0") + assertNwIsAvailable(c, "testDelMulti0") + dockerCmd(c, "network", "create", "testDelMulti1") + assertNwIsAvailable(c, "testDelMulti1") + dockerCmd(c, "network", "create", "testDelMulti2") + assertNwIsAvailable(c, "testDelMulti2") + out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + // delete three networks at the same time, since testDelMulti2 + // contains active container, its deletion should fail. + out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2") + // err should not be nil due to deleting testDelMulti2 failed. + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // testDelMulti2 should fail due to network has active endpoints + c.Assert(out, checker.Contains, "has active endpoints") + assertNwNotAvailable(c, "testDelMulti0") + assertNwNotAvailable(c, "testDelMulti1") + // testDelMulti2 can't be deleted, so it should exist + assertNwIsAvailable(c, "testDelMulti2") +} + +func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "host") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Name }}", "host") + c.Assert(strings.TrimSpace(out), check.Equals, "host") +} + +func (s *DockerSuite) TestDockerNetworkInspectWithID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "test2") + networkID := strings.TrimSpace(out) + assertNwIsAvailable(c, "test2") + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Id }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .ID }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) +} + +func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { + result := dockerCmdWithResult("network", "inspect", "host", "none") + c.Assert(result, icmd.Matches, icmd.Success) + + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 2) + + // Should print an error, return an exitCode 1 *but* should print the host network + result = dockerCmdWithResult("network", "inspect", "host", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources = []types.NetworkResource{} + err = json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(networkResources, checker.HasLen, 1) + + // Should print an error and return an exitCode, nothing else + result = dockerCmdWithResult("network", "inspect", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "[]", + }) +} + +func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { + dockerCmd(c, "network", "create", "brNetForInspect") + assertNwIsAvailable(c, "brNetForInspect") + defer func() { + dockerCmd(c, "network", "rm", "brNetForInspect") + assertNwNotAvailable(c, "brNetForInspect") + }() + + out, _ := dockerCmd(c, "run", "-d", "--name", "testNetInspect1", "--net", "brNetForInspect", "busybox", "top") + c.Assert(waitRun("testNetInspect1"), check.IsNil) + containerID := strings.TrimSpace(out) + defer func() { + // we don't stop container by name, because we'll rename it later + dockerCmd(c, "stop", containerID) + }() + + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + container, ok := networkResources[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container.Name, checker.Equals, "testNetInspect1") + + // rename container and check docker inspect output update + newName := "HappyNewName" + dockerCmd(c, "rename", "testNetInspect1", newName) + + // check whether network inspect works properly + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + newNetRes := []types.NetworkResource{} + err = json.Unmarshal([]byte(out), &newNetRes) + c.Assert(err, check.IsNil) + c.Assert(newNetRes, checker.HasLen, 1) + container1, ok := newNetRes[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container1.Name, checker.Equals, newName) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + nr := getNwResource(c, "test") + + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + containerID := strings.TrimSpace(out) + + // connect the container to the test network + dockerCmd(c, "network", "connect", "test", containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], check.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, check.IsNil) + containerIP := findContainerIP(c, "test", "test") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + dockerCmd(c, "network", "disconnect", "test", containerID) + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run another container + out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top") + c.Assert(waitRun("test2"), check.IsNil) + containerID = strings.TrimSpace(out) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 1) + + // force disconnect the container to the test network + dockerCmd(c, "network", "disconnect", "-f", "test", containerID) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { + // test0 bridge network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") + assertNwIsAvailable(c, "test1") + + // test2 bridge network does not overlap + dockerCmd(c, "network", "create", "--subnet=192.169.0.0/16", "test2") + assertNwIsAvailable(c, "test2") + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + dockerCmd(c, "network", "create", "test3") + assertNwIsAvailable(c, "test3") + dockerCmd(c, "network", "create", "test4") + assertNwIsAvailable(c, "test4") + dockerCmd(c, "network", "create", "test5") + assertNwIsAvailable(c, "test5") + + // test network with multiple subnets + // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") + assertNwIsAvailable(c, "test6") + + // test network with multiple subnets with valid ipam combinations + // also check same subnet across networks when the driver supports it. + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, + "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", + "--gateway=192.168.0.100", "--gateway=192.170.0.100", + "--ip-range=192.168.1.0/24", + "--aux-address", "a=192.168.1.5", "--aux-address", "b=192.168.1.6", + "--aux-address", "c=192.170.1.5", "--aux-address", "d=192.170.1.6", + "test7") + assertNwIsAvailable(c, "test7") + + // cleanup + for i := 1; i < 8; i++ { + dockerCmd(c, "network", "rm", fmt.Sprintf("test%d", i)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { + // Create a bridge network using custom ipam driver + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam fields are there + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.IPAM.Driver, checker.Equals, dummyIPAMDriver) + + // remove network and exercise remote ipam driver + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "br0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { + // Create a bridge network using custom ipam driver and options + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam options + nr := getNetworkResource(c, "br0") + opts := nr.IPAM.Options + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { + nr := getNetworkResource(c, "none") + c.Assert(nr.Driver, checker.Equals, "null") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "host") + c.Assert(nr.Driver, checker.Equals, "host") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "bridge") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) { + // if unspecified, network subnet will be selected from inside preferred pool + dockerCmd(c, "network", "create", "test01") + assertNwIsAvailable(c, "test01") + + nr := getNetworkResource(c, "test01") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) + + dockerCmd(c, "network", "rm", "test01") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--ipv6", "--subnet=fd80:24e2:f998:72d6::/64", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0") + assertNwIsAvailable(c, "br0") + + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, true) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 2) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Internal, checker.False) + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C) { + // network with ip-range out of subnet range + _, _, err := dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--ip-range=192.170.0.0/16", "test") + c.Assert(err, check.NotNil) + + // network with multiple gateways for a single subnet + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") + c.Assert(err, check.NotNil) + + // Multiple overlapping subnets in the same network must fail + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") + c.Assert(err, check.NotNil) + + // overlapping subnets across networks must fail + // create a valid test0 network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test0") + assertNwIsAvailable(c, "test0") + // create an overlapping test1 network + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1") + c.Assert(err, check.NotNil) + dockerCmd(c, "network", "rm", "test0") + assertNwNotAvailable(c, "test0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") + assertNwIsAvailable(c, "testopt") + gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] + c.Assert(gopts, checker.NotNil) + opts, ok := gopts.(map[string]interface{}) + c.Assert(ok, checker.Equals, true) + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") + dockerCmd(c, "network", "rm", "testopt") + assertNwNotAvailable(c, "testopt") + +} + +func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + var ( + npName = "tiborvass/test-docker-netplugin" + npTag = "latest" + npNameWithTag = npName + ":" + npTag + ) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npName) + c.Assert(out, checker.Contains, npTag) + c.Assert(out, checker.Contains, "true") + + dockerCmd(c, "network", "create", "-d", npNameWithTag, "v2net") + assertNwIsAvailable(c, "v2net") + dockerCmd(c, "network", "rm", "v2net") + assertNwNotAvailable(c, "v2net") + +} + +func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *check.C) { + testRequires(c, ExecSupport) + // On default bridge network built-in service discovery should not happen + hostsFile := "/etc/hosts" + bridgeName := "external-bridge" + bridgeIP := "192.169.255.254/24" + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = s.d.StartWithBusybox("--bridge", bridgeName) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + // run two containers and store first container's etc/hosts content + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + cid1 := strings.TrimSpace(out) + defer s.d.Cmd("stop", cid1) + + hosts, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("run", "-d", "--name", "container2", "busybox", "top") + c.Assert(err, check.IsNil) + cid2 := strings.TrimSpace(out) + + // verify first container's etc/hosts file has not changed after spawning the second named container + hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // stop container 2 and verify first container's etc/hosts has not changed + _, err = s.d.Cmd("stop", cid2) + c.Assert(err, check.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // but discovery is on when connecting to non default bridge network + network := "anotherbridge" + out, err = s.d.Cmd("network", "create", network) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer s.d.Cmd("network", "rm", network) + + out, err = s.d.Cmd("network", "connect", network, cid1) + c.Assert(err, check.IsNil, check.Commentf(out)) + + hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second network connection", hostsFile)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { + testRequires(c, ExecSupport, NotArm) + hostsFile := "/etc/hosts" + cstmBridgeNw := "custom-bridge-nw" + cstmBridgeNw1 := "custom-bridge-nw1" + + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw) + assertNwIsAvailable(c, cstmBridgeNw) + + // run two anonymous containers and store their etc/hosts content + out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid1 := strings.TrimSpace(out) + + hosts1, err := readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid2 := strings.TrimSpace(out) + + hosts2, err := readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + + // verify first container etc/hosts file has not changed + hosts1post, err := readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) + + // Connect the 2nd container to a new network and verify the + // first container /etc/hosts file still hasn't changed. + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw1) + assertNwIsAvailable(c, cstmBridgeNw1) + + dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) + + hosts2, err = readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + + hosts1post, err = readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on container connect", hostsFile)) + + // start a named container + cName := "AnyName" + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "--name", cName, "busybox", "top") + cid3 := strings.TrimSpace(out) + + // verify that container 1 and 2 can ping the named container + dockerCmd(c, "exec", cid1, "ping", "-c", "1", cName) + dockerCmd(c, "exec", cid2, "ping", "-c", "1", cName) + + // Stop named container and verify first two containers' etc/hosts file hasn't changed + dockerCmd(c, "stop", cid3) + hosts1post, err = readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + hosts2post, err := readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts2), checker.Equals, string(hosts2post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + // verify that container 1 and 2 can't ping the named container now + _, _, err = dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check.C) { + // Legacy Link feature must work only on default network, and not across networks + cnt1 := "container1" + cnt2 := "container2" + network := "anotherbridge" + + // Run first container on default network + dockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top") + + // Create another network and run the second container on it + dockerCmd(c, "network", "create", network) + assertNwIsAvailable(c, network) + dockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top") + + // Try launching a container on default network, linking to the first container. Must succeed + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top") + + // Try launching a container on default network, linking to the second container. Must fail + _, _, err := dockerCmdWithError("run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") + c.Assert(err, checker.NotNil) + + // Connect second container to default network. Now a container on default network can link to it + dockerCmd(c, "network", "connect", "bridge", cnt2) + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") +} + +func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { + // Verify exposed ports are present in ps output when running a container on + // a network managed by a driver which does not provide the default gateway + // for the container + nwn := "ov" + ctn := "bb" + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, expose1, expose2, "busybox", "top") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dnd := "dnd" + did := "did" + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + s.d.StartWithBusybox() + _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) + + // Kill daemon and restart + if err = s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + + server.Close() + + startTime := time.Now().Unix() + if err = s.d.Restart(); err != nil { + c.Fatal(err) + } + lapse := time.Now().Unix() - startTime + if lapse > 60 { + // In normal scenarios, daemon restart takes ~1 second. + // Plugin retry mechanism can delay the daemon start. systemd may not like it. + // Avoid accessing plugins during daemon bootup + c.Logf("daemon restart took too long : %d seconds", lapse) + } + + // Restart the custom dummy plugin + mux = http.NewServeMux() + server = httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + // trying to reuse the same ip must succeed + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { + // Verify endpoint MAC address is correctly populated in container's network settings + nwn := "ov" + ctn := "bb" + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, "busybox", "top") + + mac := inspectField(c, ctn, "NetworkSettings.Networks."+nwn+".MacAddress") + c.Assert(mac, checker.Equals, "a0:b1:c2:d3:e4:f5") +} + +func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "mybridge1") + dockerCmd(c, "network", "create", "mybridge2") + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "network", "connect", "mybridge1", id) + dockerCmd(c, "network", "connect", "mybridge2", id) + + body := getInspectBody(c, "v1.20", id) + var inspect120 v1p20.ContainerJSON + err := json.Unmarshal(body, &inspect120) + c.Assert(err, checker.IsNil) + + versionedIP := inspect120.NetworkSettings.IPAddress + + body = getInspectBody(c, "v1.21", id) + var inspect121 types.ContainerJSON + err = json.Unmarshal(body, &inspect121) + c.Assert(err, checker.IsNil) + c.Assert(inspect121.NetworkSettings.Networks, checker.HasLen, 3) + + bridge := inspect121.NetworkSettings.Networks["bridge"] + c.Assert(bridge.IPAddress, checker.Equals, versionedIP) + c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) +} + +func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { + // Run a container on the default network + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach the container to other networks + for _, nw := range nws { + out, err = d.Cmd("network", "create", nw) + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("network", "connect", nw, cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } +} + +func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { + // Verify container is connected to all the networks + for _, nw := range nws { + out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Equals), "\n") + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { + cName := "bb" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox() + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Reload daemon + s.d.Restart() + + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { + cName := "cc" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox() + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Kill daemon and restart + if err := s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + s.d.Restart() + + // Restart container + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "one") + containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(containerOut)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + s.d.StartWithBusybox() + + // Run a few containers on host network + for i := 0; i < 10; i++ { + cName := fmt.Sprintf("hostc-%d", i) + out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // verfiy container has finished starting before killing daemon + err = s.d.waitRun(cName) + c.Assert(err, checker.IsNil) + } + + // Kill daemon ungracefully and restart + if err := s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // make sure all the containers are up and running + for i := 0; i < 10; i++ { + err := s.d.waitRun(fmt.Sprintf("hostc-%d", i)) + c.Assert(err, checker.IsNil) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectToHostFromOtherNetwork(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + dockerCmd(c, "network", "disconnect", "bridge", "container1") + out, _, err := dockerCmdWithError("network", "connect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "--net=host", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + out, _, err := dockerCmdWithError("network", "disconnect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf("Should err out disconnect from host")) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *check.C) { + testRequires(c, NotArm) + dockerCmd(c, "network", "create", "test1") + dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + dockerCmd(c, "network", "connect", "test1", "c1") +} + +func verifyPortMap(c *check.C, container, port, originalMapping string, mustBeEqual bool) { + chk := checker.Equals + if !mustBeEqual { + chk = checker.Not(checker.Equals) + } + currentMapping, _ := dockerCmd(c, "port", container, port) + c.Assert(currentMapping, chk, originalMapping) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c *check.C) { + // Connect and disconnect a container with explicit and non-explicit + // host port mapping to/from networks which do cause and do not cause + // the container default gateway to change, and verify docker port cmd + // returns congruent information + testRequires(c, NotArm) + cnt := "c1" + dockerCmd(c, "network", "create", "aaa") + dockerCmd(c, "network", "create", "ccc") + + dockerCmd(c, "run", "-d", "--name", cnt, "-p", "9000:90", "-p", "70", "busybox", "top") + c.Assert(waitRun(cnt), check.IsNil) + curPortMap, _ := dockerCmd(c, "port", cnt, "70") + curExplPortMap, _ := dockerCmd(c, "port", cnt, "90") + + // Connect to a network which causes the container's default gw switch + dockerCmd(c, "network", "connect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Disconnect from a network which causes the container's default gw switch + dockerCmd(c, "network", "disconnect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Connect to a network which does not cause the container's default gw switch + dockerCmd(c, "network", "connect", "ccc", cnt) + verifyPortMap(c, cnt, "70", curPortMap, true) + verifyPortMap(c, cnt, "90", curExplPortMap, true) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) { + macAddress := "02:42:ac:11:00:02" + dockerCmd(c, "network", "create", "mynetwork") + dockerCmd(c, "run", "--name=test", "-d", "--mac-address", macAddress, "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + mac1 := inspectField(c, "test", "NetworkSettings.Networks.bridge.MacAddress") + c.Assert(strings.TrimSpace(mac1), checker.Equals, macAddress) + dockerCmd(c, "network", "connect", "mynetwork", "test") + mac2 := inspectField(c, "test", "NetworkSettings.Networks.mynetwork.MacAddress") + c.Assert(strings.TrimSpace(mac2), checker.Not(checker.Equals), strings.TrimSpace(mac1)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCreatedContainer(c *check.C) { + dockerCmd(c, "create", "--name", "test", "busybox") + networks := inspectField(c, "test", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + dockerCmd(c, "network", "connect", "test", "foo") + dockerCmd(c, "restart", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network")) + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "create", "--name=foo", "busybox", "top") + dockerCmd(c, "network", "connect", "test", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // start the container and test if we can ping it from another container in the same network + dockerCmd(c, "start", "foo") + c.Assert(waitRun("foo"), checker.IsNil) + ip := inspectField(c, "foo", "NetworkSettings.Networks.test.IPAddress") + ip = strings.TrimSpace(ip) + dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip)) + + dockerCmd(c, "stop", "foo") + + // Test disconnect + dockerCmd(c, "network", "disconnect", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectContainerNonexistingNetwork(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--net=test", "-d", "--name=foo", "busybox", "top") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Stop container and remove network + dockerCmd(c, "stop", "foo") + dockerCmd(c, "network", "rm", "test") + + // Test disconnecting stopped container from nonexisting network + dockerCmd(c, "network", "disconnect", "-f", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { + // create two networks + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "n0") + assertNwIsAvailable(c, "n0") + + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--ip-range=172.30.5.0/24", "--subnet=2001:db8:abcd::/64", "--ip-range=2001:db8:abcd::/80", "n1") + assertNwIsAvailable(c, "n1") + + // run a container on first network specifying the ip addresses + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + + // connect the container to the second network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Stop and restart the container + dockerCmd(c, "stop", "c0") + dockerCmd(c, "start", "c0") + + // verify requested addresses are applied and configs are still there + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Still it should fail to connect to the default network with a specified IP (whatever ip) + out, _, err := dockerCmdWithError("network", "connect", "--ip", "172.21.55.44", "bridge", "c0") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { + // create a container + dockerCmd(c, "create", "--name", "c0", "busybox", "top") + + // create a network + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") + assertNwIsAvailable(c, "n0") + + // connect the container to the network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // start the container, verify config has not changed and ip addresses are assigned + dockerCmd(c, "start", "c0") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // stop the container and check ip config has not changed + dockerCmd(c, "stop", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") +} + +func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedRequiredIP(c *check.C) { + // requested IP is not supported on predefined networks + for _, mode := range []string{"none", "host", "bridge", "default"} { + checkUnsupportedNetworkAndIP(c, mode) + } + + // requested IP is not supported on networks with no user defined subnets + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + out, _, err := dockerCmdWithError("run", "-d", "--ip", "172.28.99.88", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + out, _, err = dockerCmdWithError("run", "-d", "--ip6", "2001:db8:1234::9988", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + dockerCmd(c, "network", "rm", "n0") + assertNwNotAvailable(c, "n0") +} + +func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { + out, _, err := dockerCmdWithError("run", "-d", "--net", nwMode, "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) +} + +func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { + if ipv4 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + } + + if ipv6 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) + } +} + +func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAddress", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + + out = inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.GlobalIPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *check.C) { + // create one test network + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + // run a container with incorrect link-local address + _, _, err := dockerCmdWithError("run", "--link-local-ip", "169.253.5.5", "busybox", "top") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("run", "--link-local-ip", "2001:db8::89", "busybox", "top") + c.Assert(err, check.NotNil) + + // run two containers with link-local ip on the test network + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--link-local-ip", "169.254.7.7", "--link-local-ip", "fe80::254:77", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + dockerCmd(c, "run", "-d", "--name", "c1", "--net=n0", "--link-local-ip", "169.254.8.8", "--link-local-ip", "fe80::254:88", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + + // run a container on the default network and connect it to the test network specifying a link-local address + dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + c.Assert(waitRun("c2"), check.IsNil) + dockerCmd(c, "network", "connect", "--link-local-ip", "169.254.9.9", "n0", "c2") + + // verify the three containers can ping each other via the link-local addresses + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) + + // Stop and restart the three containers + dockerCmd(c, "stop", "c0") + dockerCmd(c, "stop", "c1") + dockerCmd(c, "stop", "c2") + dockerCmd(c, "start", "c0") + dockerCmd(c, "start", "c1") + dockerCmd(c, "start", "c2") + + // verify the ping again + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "foo1") + dockerCmd(c, "network", "create", "-d", "bridge", "foo2") + + dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in a user-defined network with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias FirstInFoo1 must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.IsNil) + + // connect first container to foo2 network + dockerCmd(c, "network", "connect", "foo2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) + + // disconnect first container from foo1 network + dockerCmd(c, "network", "disconnect", "foo1", "first") + + // link in foo1 network must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.NotNil) + + // link in foo2 network must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) { + netWorkName1 := "test1" + netWorkName2 := "test2" + containerName := "foo" + + dockerCmd(c, "network", "create", netWorkName1) + dockerCmd(c, "network", "create", netWorkName2) + dockerCmd(c, "create", "--name", containerName, "busybox", "top") + dockerCmd(c, "network", "connect", netWorkName1, containerName) + dockerCmd(c, "network", "connect", netWorkName2, containerName) + dockerCmd(c, "network", "disconnect", "bridge", containerName) + + dockerCmd(c, "start", containerName) + c.Assert(waitRun(containerName), checker.IsNil) + networks := inspectField(c, containerName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1))) + c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + containerID := strings.TrimSpace(out) + for _, net := range defaults { + res, _, err := dockerCmdWithError("network", "connect", "--alias", "alias"+net, net, containerID) + c.Assert(err, checker.NotNil) + c.Assert(res, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + dockerCmd(c, "network", "create", "-d", "bridge", "net2") + + cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping first container and its alias + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // connect first container to net2 network + dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "net2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + + // disconnect first container from net1 network + dockerCmd(c, "network", "disconnect", "net1", "first") + + // ping to net1 scoped alias "foo" must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.NotNil) + + // ping to net2 scoped alias "bar" must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + // ping to net2 scoped alias short-id must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // verify the alias option is rejected when running on predefined network + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + + // verify the alias option is rejected when connecting to predefined network + out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + c.Assert(waitRun("c1.net1"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + c.Assert(waitRun("c2.net1"), check.IsNil) + + // ping first container by its unqualified name + _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") + c.Assert(err, check.IsNil) + + // ping first container by its qualified name + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") + c.Assert(err, check.IsNil) + + // ping with first qualified name masked by an additional domain. should fail + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "nw1") + + // Sending garbage to embedded DNS shouldn't crash the daemon + dockerCmd(c, "run", "-i", "--net=nw1", "--name=c1", "debian:jessie", "bash", "-c", "echo InvalidQuery > /dev/udp/127.0.0.11/53") +} + +func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { + dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") + c.Assert(waitRun("bb"), check.IsNil) + + ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + + // A failing redundant network connect should not alter current container's endpoint settings + _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") + c.Assert(err, check.NotNil) + + ns1 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + c.Assert(ns1, check.Equals, ns0) +} + +func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--internal", "internal") + assertNwIsAvailable(c, "internal") + nr := getNetworkResource(c, "internal") + c.Assert(nr.Internal, checker.True) + + dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "ping: bad address") + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +// Test for #21401 +func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *check.C) { + dockerCmd(c, "network", "create", "test@#$") + assertNwIsAvailable(c, "test@#$") + dockerCmd(c, "network", "rm", "test@#$") + assertNwNotAvailable(c, "test@#$") + + dockerCmd(c, "network", "create", "kiwl$%^") + assertNwIsAvailable(c, "kiwl$%^") + dockerCmd(c, "network", "rm", "kiwl$%^") + assertNwNotAvailable(c, "kiwl$%^") +} + +func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox("--live-restore"); err != nil { + t.Fatal(err) + } + defer s.d.Stop() + oldCon := "old" + + _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top") + if err != nil { + t.Fatal(err) + } + oldContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", oldCon) + if err != nil { + t.Fatal(err) + } + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // restart the daemon + if err := s.d.Start("--live-restore"); err != nil { + t.Fatal(err) + } + + // start a new container, the new container's ip should not be the same with + // old running container. + newCon := "new" + _, err = s.d.Cmd("run", "-d", "--name", newCon, "busybox", "top") + if err != nil { + t.Fatal(err) + } + newContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", newCon) + if err != nil { + t.Fatal(err) + } + if strings.Compare(strings.TrimSpace(oldContainerIP), strings.TrimSpace(newContainerIP)) == 0 { + t.Fatalf("new container ip should not equal to old running container ip") + } + + // start a new container, the new container should ping old running container + _, err = s.d.Cmd("run", "-t", "busybox", "ping", "-c", "1", oldContainerIP) + if err != nil { + t.Fatal(err) + } + + // start a new container, trying to publish port 80:80 should fail + out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") { + t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container") + } + + // kill old running container and try to allocate again + _, err = s.d.Cmd("kill", oldCon) + if err != nil { + t.Fatal(err) + } + id, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err != nil { + t.Fatal(err) + } + + // Cleanup because these containers will not be shut down by daemon + out, err = s.d.Cmd("stop", newCon) + if err != nil { + t.Fatalf("err: %v %v", err, string(out)) + } + _, err = s.d.Cmd("stop", strings.TrimSpace(id)) + if err != nil { + t.Fatal(err) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkFlagAlias(c *check.C) { + dockerCmd(c, "network", "create", "user") + output, status := dockerCmd(c, "run", "--rm", "--network=user", "--network-alias=foo", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--net=user", "--network=user", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--network=user", "--net-alias=foo", "--network-alias=bar", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkValidateIP(c *check.C) { + _, _, err := dockerCmdWithError("network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "mynet") + c.Assert(err, check.IsNil) + assertNwIsAvailable(c, "mynet") + + _, _, err = dockerCmdWithError("run", "-d", "--name", "mynet0", "--net=mynet", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, check.IsNil) + c.Assert(waitRun("mynet0"), check.IsNil) + verifyIPAddressConfig(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "mynet_ip", "--ip6", "2001:db8:1234::9999", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv4 address") + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "172.28.99.99", "--ip6", "mynet_ip6", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a case of IPv4 address to `--ip6` + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a special case of an IPv4-mapped IPv6 address + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "::ffff:172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") +} + +// Test case for 26220 +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "--format", "{{.Id}}", "bridge") + + network := strings.TrimSpace(out) + + name := "test" + dockerCmd(c, "create", "--name", name, "busybox", "top") + + _, _, err := dockerCmdWithError("network", "disconnect", network, name) + c.Assert(err, check.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go new file mode 100644 index 0000000..bcf59f8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + + c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "true") +} + +func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "false") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go new file mode 100644 index 0000000..9217a69 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go @@ -0,0 +1,66 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPause(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + name := "testeventpause" + runSleepingContainer(c, "-d", "--name", name) + + dockerCmd(c, "pause", name) + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil) + c.Assert(len(pausedContainers), checker.Equals, 1) + + dockerCmd(c, "unpause", name) + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") +} + +func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + containers := []string{ + "testpausewithmorecontainers1", + "testpausewithmorecontainers2", + } + for _, name := range containers { + runSleepingContainer(c, "-d", "--name", name) + } + dockerCmd(c, append([]string{"pause"}, containers...)...) + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil) + c.Assert(len(pausedContainers), checker.Equals, len(containers)) + + dockerCmd(c, append([]string{"unpause"}, containers...)...) + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + + for _, name := range containers { + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") + } +} + +func (s *DockerSuite) TestPauseFailsOnWindowsServerContainers(c *check.C) { + testRequires(c, DaemonIsWindows, NotPausable) + runSleepingContainer(c, "-d", "--name=test") + out, _, _ := dockerCmdWithError("pause", "test") + c.Assert(out, checker.Contains, "cannot pause Windows Server Containers") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go new file mode 100644 index 0000000..380357d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go @@ -0,0 +1,393 @@ +package main + +import ( + "fmt" + "os/exec" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +var ( + pluginProcessName = "sample-volume-plugin" + pName = "tiborvass/sample-volume-plugin" + npName = "tiborvass/test-docker-netplugin" + pTag = "latest" + pNameWithTag = pName + ":" + pTag + npNameWithTag = npName + ":" + pTag +) + +func (s *DockerSuite) TestPluginBasicOps(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id)) + if !os.IsNotExist(err) { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestPluginForceRemove(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + out, _, err = dockerCmdWithError("plugin", "remove", "--force", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActive(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("volume", "create", "-d", pNameWithTag, "--name", "testvol1") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(out, checker.Contains, "in use") + + _, _, err = dockerCmdWithError("volume", "rm", "testvol1") + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActiveNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("network", "create", "-d", npNameWithTag, "test") + c.Assert(err, checker.IsNil) + + nID := strings.TrimSpace(out) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is in use") + + _, _, err = dockerCmdWithError("network", "rm", nID) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npNameWithTag) +} + +func (s *DockerSuite) TestPluginInstallDisable(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "false") + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) +} + +func (s *DockerSuite) TestPluginInstallDisableVolumeLs(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + dockerCmd(c, "volume", "ls") +} + +func (s *DockerSuite) TestPluginSet(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + + dockerCmd(c, "plugin", "set", pName, "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName, "DEBUG=1") + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) + + out, _, err := dockerCmdWithError("plugin", "install", repoName) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "target is image") +} + +func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already disabled") + + _, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSuite) TestPluginCreate(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + name := "foo/bar-driver" + temp, err := ioutil.TempDir("", "foo") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(temp) + + data := `{"description": "foo plugin"}` + err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644) + c.Assert(err, checker.IsNil) + + err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "already exist") + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + // The output will consists of one HEADER line and one line of foo/bar-driver + c.Assert(len(strings.Split(strings.TrimSpace(out), "\n")), checker.Equals, 2) +} + +func (s *DockerSuite) TestPluginInspect(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + // Find the ID first + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + c.Assert(id, checker.Not(checker.Equals), "") + + // Long form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Short form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name with tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name without tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + // After remove nothing should be found + _, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.NotNil) +} + +// Test case for https://github.com/docker/docker/pull/29186#discussion_r91277345 +func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) { + // This test should work on Windows only + testRequires(c, DaemonIsWindows) + + out, _, err := dockerCmdWithError("plugin", "inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "plugins are not supported on this platform") + c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform") +} + +func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") + + installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", trustedName) + s.trustedCmd(installCmd) + out, _, err := runCommandWithOutput(installCmd) + + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "true") + + out, _, err = dockerCmdWithError("plugin", "disable", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "enable", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "rm", "-f", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + // Try untrusted pull to ensure we pushed the tag to the registry + installCmd = exec.Command(dockerBinary, "plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName) + s.trustedCmd(installCmd) + out, _, err = runCommandWithOutput(installCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "true") + +} + +func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) + // install locally and push to private registry + dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) + dockerCmd(c, "plugin", "push", pluginName) + dockerCmd(c, "plugin", "rm", "-f", pluginName) + + // Try trusted install on untrusted plugin + installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", pluginName) + s.trustedCmd(installCmd) + out, _, err := runCommandWithOutput(installCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +func (s *DockerSuite) TestPluginUpgrade(c *check.C) { + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + plugin := "cpuguy83/docker-volume-driver-plugin-local:latest" + pluginV2 := "cpuguy83/docker-volume-driver-plugin-local:v2" + + dockerCmd(c, "plugin", "install", "--grant-all-permissions", plugin) + dockerCmd(c, "volume", "create", "--driver", plugin, "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "touch /apple/core") + + out, _, err := dockerCmdWithError("plugin", "upgrade", "--grant-all-permissions", plugin, pluginV2) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "disabled before upgrading") + + out, _ = dockerCmd(c, "plugin", "inspect", "--format={{.ID}}", plugin) + id := strings.TrimSpace(out) + + // make sure "v2" does not exists + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf(out)) + + dockerCmd(c, "plugin", "disable", "-f", plugin) + dockerCmd(c, "plugin", "upgrade", "--grant-all-permissions", "--skip-remote-check", plugin, pluginV2) + + // make sure "v2" file exists + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "plugin", "enable", plugin) + dockerCmd(c, "volume", "inspect", "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "ls -lh /apple/core") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go new file mode 100644 index 0000000..80b00fe --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "net" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPortList(c *check.C) { + testRequires(c, DaemonIsLinux) + // one port + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", firstID) + + err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", firstID) + + // three port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", ID) + + // more and one port mapped to the same container port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + testRange := func() { + // host port ranges used + IDs := make([]string, 3) + for i := 0; i < 3; i++ { + out, _ = dockerCmd(c, "run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + IDs[i] = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", IDs[i]) + + err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) + // Port list is not correct + c.Assert(err, checker.IsNil) + } + + // test port range exhaustion + out, _, err = dockerCmdWithError("run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + // Exhausted port range did not return an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + for i := 0; i < 3; i++ { + dockerCmd(c, "rm", "-f", IDs[i]) + } + } + testRange() + // Verify we ran re-use port ranges after they are no longer in use. + testRange() + + // test invalid port ranges + for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { + out, _, err = dockerCmdWithError("run", "-d", + "-p", invalidRange, + "busybox", "top") + // Port range should have returned an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + } + + // test host range:container range spec. + out, _ = dockerCmd(c, "run", "-d", + "-p", "9800-9803:80-83", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9800", + "81/tcp -> 0.0.0.0:9801", + "82/tcp -> 0.0.0.0:9802", + "83/tcp -> 0.0.0.0:9803"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + // test mixing protocols in same port range + out, _ = dockerCmd(c, "run", "-d", + "-p", "8000-8080:80", + "-p", "8000-8080:80/udp", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:8000", + "80/udp -> 0.0.0.0:8000"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) +} + +func assertPortList(c *check.C, out string, expected []string) error { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") + } + } + + return nil +} + +func stopRemoveContainer(id string, c *check.C) { + dockerCmd(c, "rm", "-f", id) +} + +func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { + testRequires(c, DaemonIsLinux) + // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) + + // Run the container forcing to publish the exposed ports + dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the exposed ports in the port bindings + expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) + expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output + c.Assert(expBndRegx1.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort1: %s", out, unpPort1)) + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output + c.Assert(expBndRegx2.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort2: %s", out, unpPort2)) + + // Run the container specifying explicit port bindings for the exposed ports + offset := 10000 + pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) + pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") + id := strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) + expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with explicit port bindings and no exposed ports + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") + id = strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with one unpublished exposed port and one explicit port binding + dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the specified unpublished port and port mapping + out, _ = dockerCmd(c, "ps", "-n=1") + // Missing unpublished exposed ports (unpPort1) in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) +} + +func (s *DockerSuite) TestPortHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + _, exposedPort, err := net.SplitHostPort(out) + c.Assert(err, checker.IsNil, check.Commentf("out: %s", out)) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") + nr := getNetworkResource(c, "internal-net") + c.Assert(nr.Internal, checker.Equals, true) + + dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", + "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") + c.Assert(waitRun("c1"), check.IsNil) + + _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.NotNil, + check.Commentf("Port mapping on internal network is expected to fail")) + + // Connect container to another normal bridge network + dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") + dockerCmd(c, "network", "connect", "foo-net", "c1") + + _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.IsNil, + check.Commentf("Port mapping on the new network is expected to succeed")) + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go new file mode 100644 index 0000000..1cf569b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "net" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCLIProxyDisableProxyUnixSock(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. + + cmd := exec.Command(dockerBinary, "info") + cmd.Env = appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999") + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + +} + +// Can't use localhost here since go has a special case to not use proxy if connecting to localhost +// See https://golang.org/pkg/net/http/#ProxyFromEnvironment +func (s *DockerDaemonSuite) TestCLIProxyProxyTCPSock(c *check.C) { + testRequires(c, SameHostDaemon) + // get the IP to use to connect since we can't use localhost + addrs, err := net.InterfaceAddrs() + c.Assert(err, checker.IsNil) + var ip string + for _, addr := range addrs { + sAddr := addr.String() + if !strings.Contains(sAddr, "127.0.0.1") { + addrArr := strings.Split(sAddr, "/") + ip = addrArr[0] + break + } + } + + c.Assert(ip, checker.Not(checker.Equals), "") + + err = s.d.Start("-H", "tcp://"+ip+":2375") + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "info") + cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + // Test with no_proxy + cmd.Env = append(cmd.Env, "NO_PROXY="+ip) + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "info")) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go new file mode 100644 index 0000000..dabbc72 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func pruneNetworkAndVerify(c *check.C, d *SwarmDaemon, kept, pruned []string) { + _, err := d.Cmd("network", "prune", "--force") + c.Assert(err, checker.IsNil) + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + for _, s := range kept { + c.Assert(out, checker.Contains, s) + } + for _, s := range pruned { + c.Assert(out, checker.Not(checker.Contains), s) + } +} + +func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + _, err := d.Cmd("network", "create", "n1") // used by container (testprune) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n2") + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n3", "--driver", "overlay") // used by service (testprunesvc) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n4", "--driver", "overlay") + c.Assert(err, checker.IsNil) + + cName := "testprune" + _, err = d.Cmd("run", "-d", "--name", cName, "--net", "n1", "busybox", "top") + c.Assert(err, checker.IsNil) + + serviceName := "testprunesvc" + replicas := 1 + out, err := d.Cmd("service", "create", "--name", serviceName, + "--replicas", strconv.Itoa(replicas), + "--network", "n3", + "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, replicas+1) + + // prune and verify + pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) + + // remove containers, then prune and verify again + _, err = d.Cmd("rm", "-f", cName) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) +} + +func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + + out, _, err := s.d.buildImageWithOut("test", + `FROM busybox + LABEL foo=bar`, true, "-q") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force", "--all") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go new file mode 100644 index 0000000..19ede90 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go @@ -0,0 +1,952 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPsListContainersBase(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + firstID := strings.TrimSpace(out) + + out, _ = runSleepingContainer(c, "-d") + secondID := strings.TrimSpace(out) + + // not long running + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + thirdID := strings.TrimSpace(out) + + out, _ = runSleepingContainer(c, "-d") + fourthID := strings.TrimSpace(out) + + // make sure the second is running + c.Assert(waitRun(secondID), checker.IsNil) + + // make sure third one is not running + dockerCmd(c, "wait", thirdID) + + // make sure the forth is running + c.Assert(waitRun(fourthID), checker.IsNil) + + // all + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) + + // running + out, _ = dockerCmd(c, "ps") + c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) + + // limit + out, _ = dockerCmd(c, "ps", "-n=2", "-a") + expected := []string{fourthID, thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") + expected = []string{fourthID, thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) + expected = []string{fourthID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) + expected = []string{fourthID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter before + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & before + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) + expected = []string{secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") + expected = []string{fourthID, thirdID} + + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since & filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +// FIXME(vdemeester) Move this into a unit test in daemon package +func (s *DockerSuite) TestPsListContainersInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("ps", "-f", "invalidFilter=test") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestPsListContainersSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "busybox") + + baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") + baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") + baseSizeIndex := strings.Index(baseLines[0], "SIZE") + baseFoundsize := baseLines[1][baseSizeIndex:] + baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) + c.Assert(err, checker.IsNil) + + name := "test_size" + dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + id, err := getIDByName(name) + c.Assert(err, checker.IsNil) + + runCmd := exec.Command(dockerBinary, "ps", "-s", "-n=1") + var out string + + wait := make(chan struct{}) + go func() { + out, _, err = runCommandWithOutput(runCmd) + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + c.Fatalf("Calling \"docker ps -s\" timed out!") + } + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) + expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) + foundSize := lines[1][sizeIndex:] + c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) +} + +func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + // start exited container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", firstID) + + // start running container + out, _ = dockerCmd(c, "run", "-itd", "busybox") + secondID := strings.TrimSpace(out) + + // filter containers by exited + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID) + + out, _ = dockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, secondID) + + result := dockerCmdWithTimeout(time.Second*60, "ps", "-a", "-q", "--filter=status=rubbish") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Unrecognised filter value for status", + }) + + // Windows doesn't support pausing of containers + if daemonPlatform != "windows" { + // pause running container + out, _ = dockerCmd(c, "run", "-itd", "busybox") + pausedID := strings.TrimSpace(out) + dockerCmd(c, "pause", pausedID) + // make sure the container is unpaused to let the daemon stop it properly + defer func() { dockerCmd(c, "unpause", pausedID) }() + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, pausedID) + } +} + +func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { + // Test legacy no health check + out, _ := runSleepingContainer(c, "--name=none_legacy") + containerID := strings.TrimSpace(out) + + waitForContainer(containerID) + + out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for legacy none filter, output: %q", containerID, containerOut, out)) + + // Test no health check specified explicitly + out, _ = runSleepingContainer(c, "--name=none", "--no-healthcheck") + containerID = strings.TrimSpace(out) + + waitForContainer(containerID) + + out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for none filter, output: %q", containerID, containerOut, out)) + + // Test failing health check + out, _ = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "failing_container", "starting", "unhealthy") + + out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for unhealthy filter, output: %q", containerID, containerOut, out)) + + // Check passing healthcheck + out, _ = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "passing_container", "starting", "healthy") + + out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { + // start container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + runSleepingContainer(c) + + // filter containers by id + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { + // start container + dockerCmd(c, "run", "--name=a_name_to_match", "busybox") + id, err := getIDByName("a_name_to_match") + c.Assert(err, check.IsNil) + + // start another container + runSleepingContainer(c, "--name=b_name_to_match") + + // filter containers by name + out, _ := dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", id[:12], containerOut, out)) +} + +// Test for the ancestor filter for ps. +// There is also the same test but with image:tag@digest in docker_cli_by_digest_test.go +// +// What the test setups : +// - Create 2 image based on busybox using the same repository but different tags +// - Create an image based on the previous image (images_ps_filter_test2) +// - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) +// - Filter them out :P +func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { + // Build images + imageName1 := "images_ps_filter_test1" + imageID1, err := buildImage(imageName1, + `FROM busybox + LABEL match me 1`, true) + c.Assert(err, checker.IsNil) + + imageName1Tagged := "images_ps_filter_test1:tag" + imageID1Tagged, err := buildImage(imageName1Tagged, + `FROM busybox + LABEL match me 1 tagged`, true) + c.Assert(err, checker.IsNil) + + imageName2 := "images_ps_filter_test2" + imageID2, err := buildImage(imageName2, + fmt.Sprintf(`FROM %s + LABEL match me 2`, imageName1), true) + c.Assert(err, checker.IsNil) + + // start containers + dockerCmd(c, "run", "--name=first", "busybox", "echo", "hello") + firstID, err := getIDByName("first") + c.Assert(err, check.IsNil) + + // start another container + dockerCmd(c, "run", "--name=second", "busybox", "echo", "hello") + secondID, err := getIDByName("second") + c.Assert(err, check.IsNil) + + // start third container + dockerCmd(c, "run", "--name=third", imageName1, "echo", "hello") + thirdID, err := getIDByName("third") + c.Assert(err, check.IsNil) + + // start fourth container + dockerCmd(c, "run", "--name=fourth", imageName1Tagged, "echo", "hello") + fourthID, err := getIDByName("fourth") + c.Assert(err, check.IsNil) + + // start fifth container + dockerCmd(c, "run", "--name=fifth", imageName2, "echo", "hello") + fifthID, err := getIDByName("fifth") + c.Assert(err, check.IsNil) + + var filterTestSuite = []struct { + filterName string + expectedIDs []string + }{ + // non existent stuff + {"nonexistent", []string{}}, + {"nonexistent:tag", []string{}}, + // image + {"busybox", []string{firstID, secondID, thirdID, fourthID, fifthID}}, + {imageName1, []string{thirdID, fifthID}}, + {imageName2, []string{fifthID}}, + // image:tag + {fmt.Sprintf("%s:latest", imageName1), []string{thirdID, fifthID}}, + {imageName1Tagged, []string{fourthID}}, + // short-id + {stringid.TruncateID(imageID1), []string{thirdID, fifthID}}, + {stringid.TruncateID(imageID2), []string{fifthID}}, + // full-id + {imageID1, []string{thirdID, fifthID}}, + {imageID1Tagged, []string{fourthID}}, + {imageID2, []string{fifthID}}, + } + + var out string + for _, filter := range filterTestSuite { + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) + checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) + } + + // Multiple ancestor filter + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) + checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) +} + +func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { + actualIDs := []string{} + if out != "" { + actualIDs = strings.Split(out[:len(out)-1], "\n") + } + sort.Strings(actualIDs) + sort.Strings(expectedIDs) + + c.Assert(actualIDs, checker.HasLen, len(expectedIDs), check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v:%v, got %v:%v", filterName, len(expectedIDs), expectedIDs, len(actualIDs), actualIDs)) + if len(expectedIDs) > 0 { + same := true + for i := range expectedIDs { + if actualIDs[i] != expectedIDs[i] { + c.Logf("%s, %s", actualIDs[i], expectedIDs[i]) + same = false + break + } + } + c.Assert(same, checker.Equals, true, check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v, got %v", filterName, expectedIDs, actualIDs)) + } +} + +func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { + // start container + dockerCmd(c, "run", "--name=first", "-l", "match=me", "-l", "second=tag", "busybox") + firstID, err := getIDByName("first") + c.Assert(err, check.IsNil) + + // start another container + dockerCmd(c, "run", "--name=second", "-l", "match=me too", "busybox") + secondID, err := getIDByName("second") + c.Assert(err, check.IsNil) + + // start third container + dockerCmd(c, "run", "--name=third", "-l", "nomatch=me", "busybox") + thirdID, err := getIDByName("third") + c.Assert(err, check.IsNil) + + // filter containers by exact match + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels, but expect not found because of AND behavior + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, "", check.Commentf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)) + + // filter containers by exact key + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Contains, firstID) + c.Assert(containerOut, checker.Contains, secondID) + c.Assert(containerOut, checker.Not(checker.Contains), thirdID) +} + +func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { + runSleepingContainer(c, "--name=sleep") + + dockerCmd(c, "run", "--name", "zero1", "busybox", "true") + firstZero, err := getIDByName("zero1") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--name", "zero2", "busybox", "true") + secondZero, err := getIDByName("zero2") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + + firstNonZero, err := getIDByName("nonzero1") + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + secondNonZero, err := getIDByName("nonzero2") + c.Assert(err, checker.IsNil) + + // filter containers by exited=0 + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + ids := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out)) + c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1])) + + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + ids = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids))) + c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1])) + +} + +func (s *DockerSuite) TestPsRightTagName(c *check.C) { + // TODO Investigate further why this fails on Windows to Windows CI + testRequires(c, DaemonIsLinux) + tag := "asybox:shmatest" + dockerCmd(c, "tag", "busybox", tag) + + var id1 string + out, _ := runSleepingContainer(c) + id1 = strings.TrimSpace(string(out)) + + var id2 string + out, _ = runSleepingContainerInImage(c, tag) + id2 = strings.TrimSpace(string(out)) + + var imageID string + out = inspectField(c, "busybox", "Id") + imageID = strings.TrimSpace(string(out)) + + var id3 string + out, _ = runSleepingContainerInImage(c, imageID) + id3 = strings.TrimSpace(string(out)) + + out, _ = dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // skip header + lines = lines[1:] + c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) + for _, line := range lines { + f := strings.Fields(line) + switch f[0] { + case id1: + c.Assert(f[1], checker.Equals, "busybox", check.Commentf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])) + case id2: + c.Assert(f[1], checker.Equals, tag, check.Commentf("Expected %s tag for id %s, got %s", tag, id2, f[1])) + case id3: + c.Assert(f[1], checker.Equals, imageID, check.Commentf("Expected %s imageID for id %s, got %s", tag, id3, f[1])) + default: + c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) + } + } +} + +func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { + // Problematic on Windows as it doesn't support links as of Jan 2016 + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "--name=first") + runSleepingContainer(c, "--name=second", "--link=first:first") + + out, _ := dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // strip header + lines = lines[1:] + expected := []string{"second", "first,second/first"} + var names []string + for _, l := range lines { + fields := strings.Fields(l) + names = append(names, fields[len(fields)-1]) + } + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { + // Problematic on Windows as it doesn't support port ranges as of Jan 2016 + testRequires(c, DaemonIsLinux) + portRange := "3850-3900" + dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") + + out, _ := dockerCmd(c, "ps") + + c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) + +} + +func (s *DockerSuite) TestPsWithSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--size") + c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) +} + +func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + shortCID := cID[:12] + + // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), shortCID, check.Commentf("Should have not seen '%s' in ps output:\n%s", shortCID, out)) + + // Make sure it DOES show up as 'Created' for 'ps -a' + out, _ = dockerCmd(c, "ps", "-a") + + hits := 0 + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, shortCID) { + continue + } + hits++ + c.Assert(line, checker.Contains, "Created", check.Commentf("Missing 'Created' on '%s'", line)) + } + + c.Assert(hits, checker.Equals, 1, check.Commentf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out)) + + // filter containers by 'create' - note, no -a needed + out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") + containerOut := strings.TrimSpace(out) + c.Assert(cID, checker.HasPrefix, containerOut) +} + +func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { + // Problematic on Windows as it doesn't support link as of Jan 2016 + testRequires(c, DaemonIsLinux) + //create 2 containers and link them + dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") + dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") + + //use the new format capabilities to only list the names and --no-trunc to get all names + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"parent", "child,parent/linkedone"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) + + //now list without turning off truncation and make sure we only get the non-link names + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + expected = []string{"parent", "child"} + var truncNames []string + truncNames = append(truncNames, lines...) + c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) +} + +// Test for GitHub issue #21772 +func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { + runSleepingContainer(c, "--name=test1") + runSleepingContainer(c, "--name=test2") + + //use the new format capabilities to list the names twice + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"test2 test2", "test1 test1"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { + // make sure no-container "docker ps" still prints the header row + out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") + c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) + + // verify that "docker ps" with a container still prints the header row also + runSleepingContainer(c, "--name=test") + out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") + c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) +} + +func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { + config := `{ + "psFormat": "default {{ .ID }}" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := runSleepingContainer(c, "--name=test") + id := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "--config", d, "ps", "-q") + c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) +} + +// Test for GitHub issue #12595 +func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { + // TODO: Investigate why this fails on Windows to Windows CI further. + testRequires(c, DaemonIsLinux) + originalImageName := "busybox:TestPsImageIDAfterUpdate-original" + updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" + + runCmd := exec.Command(dockerBinary, "tag", "busybox:latest", originalImageName) + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + originalImageID, err := getIDByName(originalImageName) + c.Assert(err, checker.IsNil) + + runCmd = exec.Command(dockerBinary, append([]string{"run", "-d", originalImageName}, sleepCommandForDaemonPlatform()...)...) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + linesOut, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() + c.Assert(err, checker.IsNil) + + lines := strings.Split(strings.TrimSpace(string(linesOut)), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageName) + } + + runCmd = exec.Command(dockerBinary, "commit", containerID, updatedImageName) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + runCmd = exec.Command(dockerBinary, "tag", updatedImageName, originalImageName) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + linesOut, err = exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() + c.Assert(err, checker.IsNil) + + lines = strings.Split(strings.TrimSpace(string(linesOut)), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageID) + } + +} + +func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + out, _ := dockerCmd(c, "ps") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := "0.0.0.0:5000->5000/tcp" + fields := strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) + + dockerCmd(c, "kill", "foo") + dockerCmd(c, "wait", "foo") + out, _ = dockerCmd(c, "ps", "-l") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + fields = strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) +} + +func (s *DockerSuite) TestPsShowMounts(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + mp := prefix + slash + "test" + + dockerCmd(c, "volume", "create", "ps-volume-test") + // volume mount containers + runSleepingContainer(c, "--name=volume-test-1", "--volume", "ps-volume-test:"+mp) + c.Assert(waitRun("volume-test-1"), checker.IsNil) + runSleepingContainer(c, "--name=volume-test-2", "--volume", mp) + c.Assert(waitRun("volume-test-2"), checker.IsNil) + // bind mount container + var bindMountSource string + var bindMountDestination string + if DaemonIsWindows.Condition() { + bindMountSource = "c:\\" + bindMountDestination = "c:\\t" + } else { + bindMountSource = "/tmp" + bindMountDestination = "/t" + } + runSleepingContainer(c, "--name=bind-mount-test", "-v", bindMountSource+":"+bindMountDestination) + c.Assert(waitRun("bind-mount-test"), checker.IsNil) + + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 3) + + fields := strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + fields = strings.Fields(lines[1]) + c.Assert(fields, checker.HasLen, 2) + + annonymounsVolumeID := fields[1] + + fields = strings.Fields(lines[2]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by volume name + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // empty results filtering by unknown volume + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=this-volume-should-not-exist") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) + + // filter by mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 2) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, annonymounsVolumeID) + fields = strings.Fields(lines[1]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by bind mount source + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // filter by bind mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // empty results filtering by unknown mount point + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+prefix+slash+"this-path-was-never-mounted") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) +} + +func (s *DockerSuite) TestPsFormatSize(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c) + + out, _ := dockerCmd(c, "ps", "--format", "table {{.Size}}") + lines := strings.Split(out, "\n") + c.Assert(lines[1], checker.Not(checker.Equals), "0 B", check.Commentf("Should not display a size of 0 B")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "table {{.Size}}") + lines = strings.Split(out, "\n") + c.Assert(lines[0], checker.Equals, "SIZE", check.Commentf("Should only have one size column")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "raw") + lines = strings.Split(out, "\n") + c.Assert(lines[8], checker.HasPrefix, "size:", check.Commentf("Size should be appended on a newline")) +} + +func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { + // TODO default network on Windows is not called "bridge", and creating a + // custom network fails on Windows fails with "Error response from daemon: plugin not found") + testRequires(c, DaemonIsLinux) + + // create some containers + runSleepingContainer(c, "--net=bridge", "--name=onbridgenetwork") + runSleepingContainer(c, "--net=none", "--name=onnonenetwork") + + // Filter docker ps on non existing network + out, _ := dockerCmd(c, "ps", "--filter", "network=doesnotexist") + containerOut := strings.TrimSpace(string(out)) + lines := strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have no containers + c.Assert(lines, checker.HasLen, 0) + + // Filter docker ps on network bridge + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(lines, checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + + // Filter docker ps on networks bridge and none + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge", "--filter", "network=none") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + //ps output should have both the containers + c.Assert(lines, checker.HasLen, 2) + + // Making sure onbridgenetwork and onnonenetwork is on the output + c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on bridge network\n")) + + nwID, _ := dockerCmd(c, "network", "inspect", "--format", "{{.ID}}", "bridge") + + // Filter by network ID + out, _ = dockerCmd(c, "ps", "--filter", "network="+nwID) + containerOut = strings.TrimSpace(string(out)) + + c.Assert(containerOut, checker.Contains, "onbridgenetwork") +} + +func (s *DockerSuite) TestPsByOrder(c *check.C) { + name1 := "xyz-abc" + out, err := runSleepingContainer(c, "--name", name1) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + container1 := strings.TrimSpace(out) + + name2 := "xyz-123" + out, err = runSleepingContainer(c, "--name", name2) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + container2 := strings.TrimSpace(out) + + name3 := "789-abc" + out, err = runSleepingContainer(c, "--name", name3) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + name4 := "789-123" + out, err = runSleepingContainer(c, "--name", name4) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // Run multiple time should have the same result + out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) + + // Run multiple time should have the same result + out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) +} + +func (s *DockerSuite) TestPsFilterMissingArgErrorCode(c *check.C) { + _, errCode, _ := dockerCmdWithError("ps", "--filter") + c.Assert(errCode, checker.Equals, 125) +} + +// Test case for 30291 +func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar") + out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`) + c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go new file mode 100644 index 0000000..cb14c2c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go @@ -0,0 +1,492 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other +// tags for the same image) are not also pulled down. +// +// Ref: docker/docker#8141 +func testPullImageWithAliases(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh"} { + repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) + } + + // Tag and push the same image multiple times. + for _, repo := range repos { + dockerCmd(c, "tag", "busybox", repo) + dockerCmd(c, "push", repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Pull a single tag and verify it doesn't bring down all aliases. + dockerCmd(c, "pull", repos[0]) + dockerCmd(c, "inspect", repos[0]) + for _, repo := range repos[1:] { + _, _, err := dockerCmdWithError("inspect", repo) + c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo)) + } +} + +func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +// testConcurrentPullWholeRepo pulls the same repo concurrently. +func testConcurrentPullWholeRepo(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo), true) + c.Assert(err, checker.IsNil) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Run multiple re-pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", "-a", repoName)) + results <- err + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +// testConcurrentFailingPull tries a concurrent pull that doesn't succeed. +func testConcurrentFailingPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + // Run multiple pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repoName+":asdfasdf")) + results <- err + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail")) + } +} + +func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +// testConcurrentPullMultipleTags pulls multiple tags from the same repo +// concurrently. +func testConcurrentPullMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo), true) + c.Assert(err, checker.IsNil) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull individual tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repo)) + results <- err + }(repo) + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +// testPullIDStability verifies that pushing an image and pulling it back +// preserves the image ID. +func testPullIDStability(c *check.C) { + derivedImage := privateRegistryURL + "/dockercli/id-stability" + baseImage := "busybox" + + _, err := buildImage(derivedImage, fmt.Sprintf(` + FROM %s + ENV derived true + ENV asdf true + RUN dd if=/dev/zero of=/file bs=1024 count=1024 + CMD echo %s + `, baseImage, derivedImage), true) + if err != nil { + c.Fatal(err) + } + + originalID, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + dockerCmd(c, "push", derivedImage) + + // Pull + out, _ := dockerCmd(c, "pull", derivedImage) + if strings.Contains(out, "Pull complete") { + c.Fatalf("repull redownloaded a layer: %s", out) + } + + derivedIDAfterPull, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image runs correctly + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull, err = getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + // Make sure the image still runs + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } +} + +func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +// #21213 +func testPullNoLayers(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) + + _, err := buildImage(repoName, ` + FROM scratch + ENV foo bar`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + dockerCmd(c, "pull", repoName) +} + +func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { + testRequires(c, NotArm) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Inject a manifest list into the registry + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "bogus_arch", + OS: "bogus_os", + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: pushDigest, + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + }, + }, + }, + } + + manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list")) + + manifestListDigest := digest.FromBytes(manifestListJSON) + hexDigest := manifestListDigest.Hex() + + registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2") + + // Write manifest list to blob store + blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) + err = os.MkdirAll(blobDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir")) + blobPath := filepath.Join(blobDir, "data") + err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list")) + + // Add to revision store + revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) + err = os.Mkdir(revisionDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir")) + revisionPath := filepath.Join(revisionDir, "link") + err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing revision link")) + + // Update tag + tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") + err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing tag link")) + + // Verify that the image can be pulled through the manifest list. + out, _ := dockerCmd(c, "pull", repoName) + + // The pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // Make sure the pushed and pull digests match + c.Assert(manifestListDigest.String(), checker.Equals, pullDigest) + + // Was the image actually created? + dockerCmd(c, "inspect", repoName) + + dockerCmd(c, "rmi", repoName) +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, "https://"+privateRegistryURL) + dockerCmd(c, "--config", tmp, "pull", repoName) + + // likewise push should work + repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL) + dockerCmd(c, "tag", repoName, repoName2) + dockerCmd(c, "--config", tmp, "push", repoName2) + + // logout should work w scheme also because it will be stripped + dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "pull", repoName) +} + +// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest) +func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v:latest", repo) + repoTag2 := fmt.Sprintf("%v:t1", repo) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + dockerCmd(c, "tag", "busybox", repoTag2) + dockerCmd(c, "push", repo) + dockerCmd(c, "rmi", repoTag1) + dockerCmd(c, "rmi", repoTag2) + + out, _, err := dockerCmdWithError("run", repo) + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo)) + + // There should be only one line for repo, the one with repo:latest + outImageCmd, _, err := dockerCmdWithError("images", repo) + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go new file mode 100644 index 0000000..a0118a8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,274 @@ +package main + +import ( + "fmt" + "regexp" + "strings" + "sync" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client +// prints all expected output. +func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + out := s.Cmd(c, "pull", "hello-world") + defer deleteImages("hello-world") + + c.Assert(out, checker.Contains, "Using default tag: latest", check.Commentf("expected the 'latest' tag to be automatically assumed")) + c.Assert(out, checker.Contains, "Pulling from library/hello-world", check.Commentf("expected the 'library/' prefix to be automatically assumed")) + c.Assert(out, checker.Contains, "Downloaded newer image for hello-world:latest") + + matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) + c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) + c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) + _, err := digest.ParseDigest(matches[0][1]) + c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullNonExistingImage pulls non-existing images from the central registry, with different +// combinations of implicit tag and library prefix. +func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + type entry struct { + repo string + alias string + tag string + } + + entries := []entry{ + {"asdfasdf", "asdfasdf", "foobar"}, + {"asdfasdf", "library/asdfasdf", "foobar"}, + {"asdfasdf", "asdfasdf", ""}, + {"asdfasdf", "asdfasdf", "latest"}, + {"asdfasdf", "library/asdfasdf", ""}, + {"asdfasdf", "library/asdfasdf", "latest"}, + } + + // The option field indicates "-a" or not. + type record struct { + e entry + option string + out string + err error + } + + // Execute 'docker pull' in parallel, pass results (out, err) and + // necessary information ("-a" or not, and the image name) to channel. + var group sync.WaitGroup + recordChan := make(chan record, len(entries)*2) + for _, e := range entries { + group.Add(1) + go func(e entry) { + defer group.Done() + repoName := e.alias + if e.tag != "" { + repoName += ":" + e.tag + } + out, err := s.CmdWithError("pull", repoName) + recordChan <- record{e, "", out, err} + }(e) + if e.tag == "" { + // pull -a on a nonexistent registry should fall back as well + group.Add(1) + go func(e entry) { + defer group.Done() + out, err := s.CmdWithError("pull", "-a", e.alias) + recordChan <- record{e, "-a", out, err} + }(e) + } + } + + // Wait for completion + group.Wait() + close(recordChan) + + // Process the results (out, err). + for record := range recordChan { + if len(record.option) == 0 { + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found: does not exist or no pull access", record.e.repo), check.Commentf("expected image not found error messages")) + } else { + // pull -a on a nonexistent registry should fall back as well + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) + } + } + +} + +// TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies +// that pulling the same image with different combinations of implicit elements of the the image +// reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to +// multiple images. +func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Pull hello-world from v2 + pullFromV2 := func(ref string) (int, string) { + out := s.Cmd(c, "pull", "hello-world") + v1Retries := 0 + for strings.Contains(out, "this image was pulled from a legacy registry") { + // Some network errors may cause fallbacks to the v1 + // protocol, which would violate the test's assumption + // that it will get the same images. To make the test + // more robust against these network glitches, allow a + // few retries if we end up with a v1 pull. + + if v1Retries > 2 { + c.Fatalf("too many v1 fallback incidents when pulling %s", ref) + } + + s.Cmd(c, "rmi", ref) + out = s.Cmd(c, "pull", ref) + + v1Retries++ + } + + return v1Retries, out + } + + pullFromV2("hello-world") + defer deleteImages("hello-world") + + s.Cmd(c, "tag", "hello-world", "hello-world-backup") + + for _, ref := range []string{ + "hello-world", + "hello-world:latest", + "library/hello-world", + "library/hello-world:latest", + "docker.io/library/hello-world", + "index.docker.io/library/hello-world", + } { + var out string + for { + var v1Retries int + v1Retries, out = pullFromV2(ref) + + // Keep repeating the test case until we don't hit a v1 + // fallback case. We won't get the right "Image is up + // to date" message if the local image was replaced + // with one pulled from v1. + if v1Retries == 0 { + break + } + s.Cmd(c, "rmi", ref) + s.Cmd(c, "tag", "hello-world-backup", "hello-world") + } + c.Assert(out, checker.Contains, "Image is up to date for hello-world:latest") + } + + s.Cmd(c, "rmi", "hello-world-backup") + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullScratchNotAllowed verifies that pulling 'scratch' is rejected. +func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + out, err := s.CmdWithError("pull", "scratch") + c.Assert(err, checker.NotNil, check.Commentf("expected pull of scratch to fail")) + c.Assert(out, checker.Contains, "'scratch' is a reserved name") + c.Assert(out, checker.Not(checker.Contains), "Pulling repository scratch") +} + +// TestPullAllTagsFromCentralRegistry pulls using `all-tags` for a given image and verifies that it +// results in more images than a naked pull. +func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + s.Cmd(c, "pull", "busybox") + outImageCmd := s.Cmd(c, "images", "busybox") + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) + + s.Cmd(c, "pull", "--all-tags=true", "busybox") + outImageAllTagCmd := s.Cmd(c, "images", "busybox") + linesCount := strings.Count(outImageAllTagCmd, "\n") + c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) + + // Verify that the line for 'busybox:latest' is left unchanged. + var latestLine string + for _, line := range strings.Split(outImageAllTagCmd, "\n") { + if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { + latestLine = line + break + } + } + c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) + splitLatest := strings.Fields(latestLine) + splitCurrent := strings.Fields(splitOutImageCmd[1]) + + // Clear relative creation times, since these can easily change between + // two invocations of "docker images". Without this, the test can fail + // like this: + // ... obtained []string = []string{"busybox", "latest", "d9551b4026f0", "27", "minutes", "ago", "1.113", "MB"} + // ... expected []string = []string{"busybox", "latest", "d9551b4026f0", "26", "minutes", "ago", "1.113", "MB"} + splitLatest[3] = "" + splitLatest[4] = "" + splitLatest[5] = "" + splitCurrent[3] = "" + splitCurrent[4] = "" + splitCurrent[5] = "" + + c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) +} + +// TestPullClientDisconnect kills the client during a pull operation and verifies that the operation +// gets cancelled. +// +// Ref: docker/docker#15589 +func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "hello-world:latest" + + pullCmd := s.MakeCmd("pull", repoName) + stdout, err := pullCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + err = pullCmd.Start() + c.Assert(err, checker.IsNil) + + // Cancel as soon as we get some output. + buf := make([]byte, 10) + _, err = stdout.Read(buf) + c.Assert(err, checker.IsNil) + + err = pullCmd.Process.Kill() + c.Assert(err, checker.IsNil) + + time.Sleep(2 * time.Second) + _, err = s.CmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { + // we don't care about the actual image, we just want to see image not found + // because that means v2 call returned 401 and we fell back to v1 which usually + // gives a 404 (in this case the test registry doesn't handle v1 at all) + out, _, err := dockerCmdWithError("pull", privateRegistryURL+"/busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image busybox:latest not found") +} + +// Regression test for https://github.com/docker/docker/issues/26429 +func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows, Network) + _, _, err := dockerCmdWithError("pull", "ubuntu") + c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go new file mode 100644 index 0000000..96a42d6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go @@ -0,0 +1,365 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-pull") + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + // Try untrusted pull to ensure we pushed the tag to the registry + pullCmd = exec.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + +} + +func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-pull") + + // Try pull (run from isolated directory without trust information) + pullCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(string(out))) + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted pull on untrusted tag + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-cert-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf(out)) + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", "--disable-content-trust", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + }) +} + +func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + dockerCmd(c, "rmi", repoName) + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + + c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + + // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted pull:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + runAtDifferentDate(fourYearsLater, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf("Missing expected error running trusted pull with expired snapshots")) + c.Assert(string(out), checker.Contains, "repository out-of-date", check.Commentf(out)) + }) +} + +func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-offline-pull") + + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "error contacting notary server", check.Commentf(out)) + // Do valid trusted pull to warm cache + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + + // Try pull again with invalid notary server, should use cache + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") + // tag the image and upload it to the private registry + _, err := buildImage(repoName, ` + FROM busybox + CMD echo trustedpulldelete + `, true) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "rmi", repoName); status != 0 { + c.Fatalf("Error removing image %q\n%s", repoName, out) + } + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + imageID := inspectField(c, repoName, "Id") + + imageByDigest := repoName + "@" + pullDigest + byDigestID := inspectField(c, imageByDigest, "Id") + + c.Assert(byDigestID, checker.Equals, imageID) + + // rmi of tag should also remove the digest reference + dockerCmd(c, "rmi", repoName) + + _, err = inspectFieldWithError(imageByDigest, "Id") + c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // Push with targets first, initializing the repo + dockerCmd(c, "tag", "busybox", targetName) + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.assertTargetInRoles(c, repoName, "latest", "targets") + + // Try pull, check we retrieve from targets role + pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "retrieving target for targets role") + + // Now we'll create the releases role, and try pushing and pulling + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // try a pull, check that we can still pull because we can still read the + // old tag in the targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "retrieving target for targets role") + + // try a pull -a, check that it succeeds because we can still pull from the + // targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Push, should sign with targets/releases + dockerCmd(c, "tag", "busybox", targetName) + pushCmd = exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") + + // Try pull, check we retrieve from targets/releases role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(out, checker.Contains, "retrieving target for targets/releases role") + + // Create another delegation that we'll sign with + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) + s.notaryPublish(c, repoName) + + dockerCmd(c, "tag", "busybox", targetName) + pushCmd = exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") + + // Try pull, check we retrieve from targets/releases role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(out, checker.Contains, "retrieving target for targets/releases role") +} + +func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // We'll create a repo first with a non-release delegation role, so that when we + // push we'll sign it into the delegation role + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // Push should write to the delegation role, not targets + dockerCmd(c, "tag", "busybox", targetName) + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.assertTargetInRoles(c, repoName, "latest", "targets/other") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull - we should fail, since pull will only pull from the targets/releases + // role or the targets role + pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No trust data for") + + // try a pull -a: we should fail since pull will only pull from the targets/releases + // role or the targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No trusted tags for") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go new file mode 100644 index 0000000..f750c12 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go @@ -0,0 +1,715 @@ +package main + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Pushing an image to a private registry. +func testPushBusyboxImage(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) +} + +func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +// pushing an image without a prefix should throw an error +func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { + out, _, err := dockerCmdWithError("push", "busybox") + c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)) +} + +func testPushUntagged(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + expected := "An image does not exist locally with the tag" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func testPushBadTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) + expected := "does not exist" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func testPushMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) + repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + + dockerCmd(c, "tag", "busybox", repoTag2) + + dockerCmd(c, "push", repoName) + + // Ensure layer list is equivalent for repoTag1 and repoTag2 + out1, _ := dockerCmd(c, "pull", repoTag1) + + imageAlreadyExists := ": Image already exists" + var out1Lines []string + for _, outputLine := range strings.Split(out1, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + + out2, _ := dockerCmd(c, "pull", repoTag2) + + var out2Lines []string + for _, outputLine := range strings.Split(out2, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + c.Assert(out2Lines, checker.HasLen, len(out1Lines)) + + for i := range out1Lines { + c.Assert(out1Lines[i], checker.Equals, out2Lines[i]) + } +} + +func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func testPushEmptyLayer(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) + emptyTarball, err := ioutil.TempFile("", "empty_tarball") + c.Assert(err, check.IsNil, check.Commentf("Unable to create test file")) + + tw := tar.NewWriter(emptyTarball) + err = tw.Close() + c.Assert(err, check.IsNil, check.Commentf("Error creating empty tarball")) + + freader, err := os.Open(emptyTarball.Name()) + c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) + defer freader.Close() + + importCmd := exec.Command(dockerBinary, "import", "-", repoName) + importCmd.Stdin = freader + out, _, err := runCommandWithOutput(importCmd) + c.Assert(err, check.IsNil, check.Commentf("import failed: %q", out)) + + // Now verify we can push it + out, _, err = dockerCmdWithError("push", repoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) +} + +func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +// testConcurrentPush pushes multiple tags to the same repo +// concurrently. +func testConcurrentPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"push1", "push2", "push3"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s +`, repo), true) + c.Assert(err, checker.IsNil) + repos = append(repos, repo) + } + + // Push tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repo)) + results <- err + }(repo) + } + + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err)) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull and run individual tags, to make sure pushes succeeded + for _, repo := range repos { + dockerCmd(c, "pull", repo) + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // ensure that layers were mounted from the first repo during push + c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Equals, digest2) + + // ensure that pushing again produces the same digest + out3, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + + digest3 := reference.DigestRegexp.FindString(out3) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest3, check.Equals, digest2) + + // ensure that we can pull and run the cross-repo-pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out4, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out4, check.Equals, "hello world") +} + +func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen + c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Not(check.Equals), digest2) + + // ensure that we can pull and run the second pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out3, check.Equals, "hello world") +} + +func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) + + // Assert that we rotated the snapshot key to the server by checking our local keystore + contents, err := ioutil.ReadDir(filepath.Join(cliconfig.ConfigDir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) + c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) + // Check that we only have 1 key (targets key) + c.Assert(contents, checker.HasLen, 1) +} + +func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithPassphrases(pushCmd, "12345678", "12345678") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + // Using a name that doesn't resolve to an address makes this test faster + s.trustedCmdWithServer(pushCmd, "https://server.invalid:81/") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Missing error while running trusted push w/ no server")) + c.Assert(out, checker.Contains, "error contacting notary server", check.Commentf("Missing expected output on trusted push")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", "--disable-content-trust", repoName) + // Using a name that doesn't resolve to an address makes this test faster + s.trustedCmdWithServer(pushCmd, "https://server.invalid") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push with no server and --disable-content-trust failed: %s\n%s", err, out)) + c.Assert(out, check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Do a trusted push + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // Do another trusted push + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + dockerCmd(c, "rmi", repoName) + + // Try pull to ensure the double push did not break our ability to pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted pull: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted pull with --disable-content-trust")) + +} + +func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with short targets passphrase: \n%s", out)) + c.Assert(out, checker.Contains, "could not find necessary signing keys", check.Commentf("Missing expected output on trusted push with short targets/snapsnot passphrase")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + runAtDifferentDate(fourYearsLater, func() { + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with expired snapshot: \n%s", out)) + c.Assert(out, checker.Contains, "repository out-of-date", check.Commentf("Missing expected output on trusted push with expired snapshot")) + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // The timestamps expire in two weeks. Lets check three + threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) + + // Should succeed because the server transparently re-signs one + runAtDifferentDate(threeWeeksLater, func() { + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with expired timestamp")) + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + // check to make sure that the target has been added to targets/releases and not targets + s.assertTargetInRoles(c, repoName, "latest", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + + s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) + s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // check to make sure that the target has been added to targets/role1 and targets/role2, and + // not targets (because there are delegations) or targets/role3 (due to missing key) or + // targets/role1/subrole (due to it being a second level delegation) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + // pull should fail because none of these are the releases role + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") + s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // check to make sure that the target has been added to targets/role1 and targets/role4, and + // not targets (because there are delegations) or targets/role2 (due to path restrictions) or + // targets/role3 (due to missing key) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + // pull should fail because none of these are the releases role + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + // do not import any delegations key + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("trusted push succeeded but should have failed:\n%s", out)) + c.Assert(out, checker.Contains, "no valid signing keys", + check.Commentf("Missing expected output on trusted push without keys")) + + s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "no basic auth credentials") +} + +// This may be flaky but it's needed not to regress on unauthorized push, see #21054 +func (s *DockerSuite) TestPushToCentralRegistryUnauthorized(c *check.C) { + testRequires(c, Network) + repoName := "test/busybox" + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") +} + +func getTestTokenService(status int, body string, retries int) *httptest.Server { + var mu sync.Mutex + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + if retries > 0 { + w.WriteHeader(http.StatusServiceUnavailable) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"errors":[{"code":"UNAVAILABLE","message":"cannot create token at this time"}]}`)) + retries-- + } else { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(body)) + } + mu.Unlock() + })) +} + +func (s *DockerRegistryAuthTokenSuite) TestPushTokenServiceUnauthResponse(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"errors": [{"Code":"UNAUTHORIZED", "message": "a message", "detail": null}]}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "unauthorized: a message") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnauthorized(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"error": "unauthorized"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "unauthorized: authentication required") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseError(c *check.C) { + ts := getTestTokenService(http.StatusTooManyRequests, `{"errors": [{"code":"TOOMANYREQUESTS","message":"out of tokens"}]}`, 4) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Retrying") + c.Assert(out, checker.Not(checker.Contains), "Retrying in 15") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "toomanyrequests: out of tokens") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnparsable(c *check.C) { + ts := getTestTokenService(http.StatusForbidden, `no way`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], checker.Contains, "error parsing HTTP 403 response body: ") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseNoToken(c *check.C) { + ts := getTestTokenService(http.StatusOK, `{"something": "wrong"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "authorization server did not include a token in the response") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go new file mode 100644 index 0000000..fb9a66a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go @@ -0,0 +1,120 @@ +package main + +import ( + "fmt" + "net/http" + "regexp" + + "github.com/go-check/check" +) + +// unescapeBackslashSemicolonParens unescapes \;() +func unescapeBackslashSemicolonParens(s string) string { + re := regexp.MustCompile(`\\;`) + ret := re.ReplaceAll([]byte(s), []byte(";")) + + re = regexp.MustCompile(`\\\(`) + ret = re.ReplaceAll([]byte(ret), []byte("(")) + + re = regexp.MustCompile(`\\\)`) + ret = re.ReplaceAll([]byte(ret), []byte(")")) + + re = regexp.MustCompile(`\\\\`) + ret = re.ReplaceAll([]byte(ret), []byte(`\`)) + + return string(ret) +} + +func regexpCheckUA(c *check.C, ua string) { + re := regexp.MustCompile("(?P.+) UpstreamClient(?P.+)") + substrArr := re.FindStringSubmatch(ua) + + c.Assert(substrArr, check.HasLen, 3, check.Commentf("Expected 'UpstreamClient()' with upstream client UA")) + dockerUA := substrArr[1] + upstreamUAEscaped := substrArr[2] + + // check dockerUA looks correct + reDockerUA := regexp.MustCompile("^docker/[0-9A-Za-z+]") + bMatchDockerUA := reDockerUA.MatchString(dockerUA) + c.Assert(bMatchDockerUA, check.Equals, true, check.Commentf("Docker Engine User-Agent malformed")) + + // check upstreamUA looks correct + // Expecting something like: Docker-Client/1.11.0-dev (linux) + upstreamUA := unescapeBackslashSemicolonParens(upstreamUAEscaped) + reUpstreamUA := regexp.MustCompile("^\\(Docker-Client/[0-9A-Za-z+]") + bMatchUpstreamUA := reUpstreamUA.MatchString(upstreamUA) + c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) +} + +func registerUserAgentHandler(reg *testRegistry, result *string) { + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + var ua string + for k, v := range r.Header { + if k == "User-Agent" { + ua = v[0] + } + } + *result = ua + }) +} + +// TestUserAgentPassThrough verifies that when an image is pulled from +// a registry, the registry should see a User-Agent string of the form +// [docker engine UA] UptreamClientSTREAM-CLIENT([client UA]) +func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { + var ( + buildUA string + pullUA string + pushUA string + loginUA string + ) + + buildReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(buildReg, &buildUA) + buildRepoName := fmt.Sprintf("%s/busybox", buildReg.hostport) + + pullReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(pullReg, &pullUA) + pullRepoName := fmt.Sprintf("%s/busybox", pullReg.hostport) + + pushReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(pushReg, &pushUA) + pushRepoName := fmt.Sprintf("%s/busybox", pushReg.hostport) + + loginReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(loginReg, &loginUA) + + err = s.d.Start( + "--insecure-registry", buildReg.hostport, + "--insecure-registry", pullReg.hostport, + "--insecure-registry", pushReg.hostport, + "--insecure-registry", loginReg.hostport, + "--disable-legacy-registry=true") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup1, err := makefile(fmt.Sprintf("FROM %s", buildRepoName)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup1() + s.d.Cmd("build", "--file", dockerfileName, ".") + regexpCheckUA(c, buildUA) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", loginReg.hostport) + regexpCheckUA(c, loginUA) + + s.d.Cmd("pull", pullRepoName) + regexpCheckUA(c, pullUA) + + dockerfileName, cleanup2, err := makefile(`FROM scratch + ENV foo bar`) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup2() + s.d.Cmd("build", "-t", pushRepoName, "--file", dockerfileName, ".") + + s.d.Cmd("push", pushRepoName) + regexpCheckUA(c, pushUA) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go new file mode 100644 index 0000000..373d614 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + name := inspectField(c, cleanedContainerID, "Name") + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name = inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + +} + +func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) +} + +func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { + out, _ := runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + + newName := "new_name" + ContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, ContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) + + out, _ = runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + newContainerID := strings.TrimSpace(out) + name = inspectField(c, newContainerID, "Name") + c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) +} + +func (s *DockerSuite) TestRenameCheckNames(c *check.C) { + dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, newName, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + + result := dockerCmdWithResult("inspect", "-f={{.Name}}", "--type=container", "first_name") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such container: first_name", + }) +} + +func (s *DockerSuite) TestRenameInvalidName(c *check.C) { + runSleepingContainer(c, "--name", "myname") + + out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "requires exactly 2 argument(s).", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname", "") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "", "newname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) +} + +func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "network", "create", "network1") + out, _ := dockerCmd(c, "create", "-it", "--net", "network1", "busybox", "top") + + anonymousContainerID := strings.TrimSpace(out) + + dockerCmd(c, "rename", anonymousContainerID, "container1") + dockerCmd(c, "start", "container1") + + count := "-c" + if daemonPlatform == "windows" { + count = "-n" + } + + _, _, err := dockerCmdWithError("run", "--net", "network1", "busybox", "ping", count, "1", "container1") + c.Assert(err, check.IsNil, check.Commentf("Embedded DNS lookup fails after renaming anonymous container: %v", err)) +} + +func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { + out, _ := runSleepingContainer(c, "--name", "old") + ContainerID := strings.TrimSpace(out) + + out, _, err := dockerCmdWithError("rename", "old", "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", ContainerID, "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) +} + +// Test case for #23973 +func (s *DockerSuite) TestRenameContainerWithLinkedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + db1, _ := dockerCmd(c, "run", "--name", "db1", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "app1", "-d", "--link", "db1:/mysql", "busybox", "top") + dockerCmd(c, "rename", "app1", "app2") + out, _, err := dockerCmdWithError("inspect", "--format={{ .Id }}", "app2/mysql") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(db1)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go new file mode 100644 index 0000000..7d58528 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go @@ -0,0 +1,278 @@ +package main + +import ( + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "foobar") + cleanedContainerID, err := getIDByName("test") + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", cleanedContainerID) + + // Wait until the container has stopped + err = waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + + cleanedContainerID := strings.TrimSpace(out) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", "-t", "1", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out, _ := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") + + cleanedContainerID := strings.TrimSpace(out) + out, err := inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + source, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", cleanedContainerID) + + out, err = inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + c.Assert(source, checker.Equals, sourceAfterRestart) +} + +func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=no", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "no") +} + +func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=always", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "always") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + // MaximumRetryCount=0 if the restart policy is always + c.Assert(MaximumRetryCount, checker.Equals, "0") +} + +func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { + out, _, err := dockerCmdWithError("create", "--restart=on-failure:-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "maximum retry count cannot be negative") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:1", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "1") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:0", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") + + out, _ = dockerCmd(c, "create", "--restart=on-failure", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") +} + +// a good container with --restart=on-failure:3 +// MaximumRetryCount!=0; RestartCount=0 +func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") + + id := strings.TrimSpace(string(out)) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "0") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(MaximumRetryCount, checker.Equals, "3") + +} + +func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { + // TODO Windows. This may be portable following HNS integration post TP5. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udNet") + + dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second", + "--link=first:foo", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Now kill the second container and let the restart policy kick in + pidStr := inspectField(c, "second", "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "restart", id) + + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { + out1, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + out2, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") + + id1 := strings.TrimSpace(string(out1)) + id2 := strings.TrimSpace(string(out2)) + waitTimeout := 15 * time.Second + if daemonPlatform == "windows" { + waitTimeout = 150 * time.Second + } + err := waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", id1) + dockerCmd(c, "restart", id2) + + dockerCmd(c, "stop", id1) + dockerCmd(c, "stop", id2) + dockerCmd(c, "start", id1) + dockerCmd(c, "start", id2) +} + +func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { + out, _ := runSleepingContainer(c, "--rm") + + id := strings.TrimSpace(string(out)) + dockerCmd(c, "restart", id) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go new file mode 100644 index 0000000..0186c56 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go @@ -0,0 +1,86 @@ +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") + + err = os.RemoveAll(tempDir) + c.Assert(err, check.IsNil) + + dockerCmd(c, "rm", "-v", "losemyvolumes") +} + +func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") + + dockerCmd(c, "rm", "-v", "foo") +} + +func (s *DockerSuite) TestRmContainerRunning(c *check.C) { + createRunningContainer(c, "foo") + + _, _, err := dockerCmdWithError("rm", "foo") + c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) +} + +func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { + createRunningContainer(c, "foo") + + // Stop then remove with -s + dockerCmd(c, "rm", "-f", "foo") +} + +func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["true"] + MAINTAINER Integration Tests` + + // build first dockerfile + img1, err := buildImage(img, dockerfile1, true) + c.Assert(err, check.IsNil, check.Commentf("Could not build image %s", img)) + // run container on first image + dockerCmd(c, "run", img) + // rebuild dockerfile with a small addition at the end + _, err = buildImage(img, dockerfile2, true) + c.Assert(err, check.IsNil, check.Commentf("Could not rebuild image %s", img)) + // try to remove the image, should not error out. + out, _, err := dockerCmdWithError("rmi", img) + c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) + + // check if we deleted the first image + out, _ = dockerCmd(c, "images", "-q", "--no-trunc") + c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) + +} + +func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { + out, _, err := dockerCmdWithError("rm", "unknown") + c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) + c.Assert(out, checker.Contains, "No such container") +} + +func createRunningContainer(c *check.C, name string) { + runSleepingContainer(c, "-dt", "--name", name) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go new file mode 100644 index 0000000..cb16d9d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go @@ -0,0 +1,352 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { + errSubstr := "is using it" + + // create a container + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + // try to delete the image + out, _, err := dockerCmdWithError("rmi", "busybox") + // Container is using image, should not be able to rmi + c.Assert(err, checker.NotNil) + // Container is using image, error message should contain errSubstr + c.Assert(out, checker.Contains, errSubstr, check.Commentf("Container: %q", cleanedContainerID)) + + // make sure it didn't delete the busybox name + images, _ := dockerCmd(c, "images") + // The name 'busybox' should not have been removed from images + c.Assert(images, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestRmiTag(c *check.C) { + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox", "utest:tag1") + dockerCmd(c, "tag", "busybox", "utest/docker:tag2") + dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+3, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest/docker:tag2") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+1, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } + dockerCmd(c, "rmi", "utest:tag1") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n"), check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } +} + +func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") + + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "commit", containerID, "busybox-one") + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + + imagesAfter, _ := dockerCmd(c, "images", "-a") + // tag busybox to create 2 more images with same imageID + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) + + imgID := inspectField(c, "busybox-one:tag1", "Id") + + // run a container with the image + out, _ = runSleepingContainerInImage(c, "busybox-one") + + containerID = strings.TrimSpace(out) + + // first checkout without force it fails + out, _, err := dockerCmdWithError("rmi", imgID) + expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) + // rmi tagged in multiple repos should have failed without force + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expected) + + dockerCmd(c, "stop", containerID) + dockerCmd(c, "rmi", "-f", imgID) + + imagesAfter, _ = dockerCmd(c, "images", "-a") + // rmi -f failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) +} + +func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") + + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "commit", containerID, "busybox-test") + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-test", "utest:tag1") + dockerCmd(c, "tag", "busybox-test", "utest:tag2") + dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + imgID := inspectField(c, "busybox-test", "Id") + + // first checkout without force it fails + out, _, err := dockerCmdWithError("rmi", imgID) + // rmi tagged in multiple repos should have failed without force + c.Assert(err, checker.NotNil) + // rmi tagged in multiple repos should have failed without force + c.Assert(out, checker.Contains, "(must be forced) - image is referenced in multiple repositories", check.Commentf("out: %s; err: %v;", out, err)) + + dockerCmd(c, "rmi", "-f", imgID) + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + // rmi failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) + } +} + +// See https://github.com/docker/docker/issues/14116 +func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { + dockerfile := "FROM busybox\nRUN echo test 14116\n" + imgID, err := buildImage("test-14116", dockerfile, false) + c.Assert(err, checker.IsNil) + + newTag := "newtag" + dockerCmd(c, "tag", imgID, newTag) + runSleepingContainerInImage(c, imgID) + + out, _, err := dockerCmdWithError("rmi", "-f", imgID) + // rmi -f should not delete image with running containers + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "(cannot be forced) - image is being used by running container") +} + +func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + dockerCmd(c, "tag", bb, newtag) + + dockerCmd(c, "run", "--name", container, bb, "/bin/true") + + out, _ := dockerCmd(c, "rmi", newtag) + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1) +} + +func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { + image := "busybox-clone" + + cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") + cmd.Stdin = strings.NewReader(`FROM busybox +MAINTAINER foo`) + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("Could not build %s: %s", image, out)) + + dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") + + dockerCmd(c, "rmi", "-f", image) +} + +func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { + newRepo := "127.0.0.1:5000/busybox" + oldRepo := "busybox" + newTag := "busybox:test" + dockerCmd(c, "tag", oldRepo, newRepo) + + dockerCmd(c, "run", "--name", "test", oldRepo, "touch", "/abcd") + + dockerCmd(c, "commit", "test", newTag) + + out, _ := dockerCmd(c, "rmi", newTag) + c.Assert(out, checker.Contains, "Untagged: "+newTag) +} + +func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { + imageName := "rmiimage" + tag1 := imageName + ":tag1" + tag2 := imageName + ":tag2" + + _, err := buildImage(tag1, + `FROM busybox + MAINTAINER "docker"`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "tag", tag1, tag2) + + out, _ := dockerCmd(c, "rmi", "-f", tag2) + c.Assert(out, checker.Contains, "Untagged: "+tag2) + c.Assert(out, checker.Not(checker.Contains), "Untagged: "+tag1) + + // Check built image still exists + images, _ := dockerCmd(c, "images", "-a") + c.Assert(images, checker.Contains, imageName, check.Commentf("Built image missing %q; Images: %q", imageName, images)) +} + +func (s *DockerSuite) TestRmiBlank(c *check.C) { + out, _, err := dockerCmdWithError("rmi", " ") + // Should have failed to delete ' ' image + c.Assert(err, checker.NotNil) + // Wrong error message generated + c.Assert(out, checker.Not(checker.Contains), "no such id", check.Commentf("out: %s", out)) + // Expected error message not generated + c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { + // Build 2 images for testing. + imageNames := []string{"test1", "test2"} + imageIds := make([]string, 2) + for i, name := range imageNames { + dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) + id, err := buildImage(name, dockerfile, false) + c.Assert(err, checker.IsNil) + imageIds[i] = id + } + + // Create a long-running container. + runSleepingContainerInImage(c, imageNames[0]) + + // Create a stopped container, and then force remove its image. + dockerCmd(c, "run", imageNames[1], "true") + dockerCmd(c, "rmi", "-f", imageIds[1]) + + // Try to remove the image of the running container and see if it fails as expected. + out, _, err := dockerCmdWithError("rmi", "-f", imageIds[0]) + // The image of the running container should not be removed. + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "image is being used by running container", check.Commentf("out: %s", out)) +} + +// #13422 +func (s *DockerSuite) TestRmiUntagHistoryLayer(c *check.C) { + image := "tmp1" + // Build an image for testing. + dockerfile := `FROM busybox +MAINTAINER foo +RUN echo 0 #layer0 +RUN echo 1 #layer1 +RUN echo 2 #layer2 +` + _, err := buildImage(image, dockerfile, false) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "history", "-q", image) + ids := strings.Split(out, "\n") + idToTag := ids[2] + + // Tag layer0 to "tmp2". + newTag := "tmp2" + dockerCmd(c, "tag", idToTag, newTag) + // Create a container based on "tmp1". + dockerCmd(c, "run", "-d", image, "true") + + // See if the "tmp2" can be untagged. + out, _ = dockerCmd(c, "rmi", newTag) + // Expected 1 untagged entry + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1, check.Commentf("out: %s", out)) + + // Now let's add the tag again and create a container based on it. + dockerCmd(c, "tag", idToTag, newTag) + out, _ = dockerCmd(c, "run", "-d", newTag, "true") + cid := strings.TrimSpace(out) + + // At this point we have 2 containers, one based on layer2 and another based on layer0. + // Try to untag "tmp2" without the -f flag. + out, _, err = dockerCmdWithError("rmi", newTag) + // should not be untagged without the -f flag + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, cid[:12]) + c.Assert(out, checker.Contains, "(must force)") + + // Add the -f flag and test again. + out, _ = dockerCmd(c, "rmi", "-f", newTag) + // should be allowed to untag with the -f flag + c.Assert(out, checker.Contains, fmt.Sprintf("Untagged: %s:latest", newTag)) +} + +func (*DockerSuite) TestRmiParentImageFail(c *check.C) { + _, err := buildImage("test", ` + FROM busybox + RUN echo hello`, false) + c.Assert(err, checker.IsNil) + + id := inspectField(c, "busybox", "ID") + out, _, err := dockerCmdWithError("rmi", id) + c.Assert(err, check.NotNil) + if !strings.Contains(out, "image has dependent child images") { + c.Fatalf("rmi should have failed because it's a parent image, got %s", out) + } +} + +func (s *DockerSuite) TestRmiWithParentInUse(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "create", imageID) + cID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID = strings.TrimSpace(out) + + dockerCmd(c, "rmi", imageID) +} + +// #18873 +func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) { + dockerCmd(c, "create", "busybox") + + imgID := inspectField(c, "busybox:latest", "Id") + + _, _, err := dockerCmdWithError("rmi", imgID[:12]) + c.Assert(err, checker.NotNil) + + // check that tag was not removed + imgID2 := inspectField(c, "busybox:latest", "Id") + c.Assert(imgID, checker.Equals, imgID2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go new file mode 100644 index 0000000..9462aef --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go @@ -0,0 +1,4689 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/go-check/check" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" +) + +// "test123" should be printed by docker run +func (s *DockerSuite) TestRunEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") + if out != "test123\n" { + c.Fatalf("container should've printed 'test123', got '%s'", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test'") + } +} + +// docker run should not leak file descriptors. This test relies on Unix +// specific functionality and cannot run on Windows. +func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + c.Errorf("container should've printed '0 1 2 3', not: %s", out) + } +} + +// it should be possible to lookup Google DNS +// this will fail when Internet access is unavailable +func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) { + testRequires(c, Network, NotArm) + if daemonPlatform == "windows" { + // nslookup isn't present in Windows busybox. Is built-in. Further, + // nslookup isn't present in nanoserver. Hence just use PowerShell... + dockerCmd(c, "run", WindowsBaseImage, "powershell", "Resolve-DNSName", "google.com") + } else { + dockerCmd(c, "run", DefaultImage, "nslookup", "google.com") + } + +} + +// the exit code should be 0 +func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { + dockerCmd(c, "run", "busybox", "true") +} + +// the exit code should be 1 +func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { + _, exitCode, err := dockerCmdWithError("run", "busybox", "false") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 1) +} + +// it should be possible to pipe in data via stdin to a process running in a container +func (s *DockerSuite) TestRunStdinPipe(c *check.C) { + // TODO Windows: This needs some work to make compatible. + testRequires(c, DaemonIsLinux) + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + logsOut, _ := dockerCmd(c, "logs", out) + + containerLogs := strings.TrimSpace(logsOut) + if containerLogs != "blahblah" { + c.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + dockerCmd(c, "rm", out) +} + +// the container's ID should be printed when starting a container in detached mode +func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + rmOut, _ := dockerCmd(c, "rm", out) + + rmOut = strings.TrimSpace(rmOut) + if rmOut != out { + c.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } +} + +// the working directory should be set correctly +func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { + dir := "/root" + image := "busybox" + if daemonPlatform == "windows" { + dir = `C:/Windows` + } + + // First with -w + out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("-w failed to set working directory") + } + + // Then with --workdir + out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("--workdir failed to set working directory") + } +} + +// pinging Google's DNS resolver should fail when we disable the networking +func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { + count := "-c" + image := "busybox" + if daemonPlatform == "windows" { + count = "-n" + image = WindowsBaseImage + } + + // First using the long form --net + out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") + if err != nil && exitCode != 1 { + c.Fatal(out, err) + } + if exitCode != 1 { + c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } +} + +//test --link use container name to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") + + ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container name to link target failed") + } +} + +//test --link use container id to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") + + cID = strings.TrimSpace(cID) + ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container id to link target failed") + } +} + +func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in user-defined network udlinkNet with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping to third and its alias must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.NotNil) + + // start third container now + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") + c.Assert(waitRun("third"), check.IsNil) + + // ping to third and its alias must succeed now + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart second container + dockerCmd(c, "restart", "second") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + for _, net := range defaults { + out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + + cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Check if default short-id alias is added automatically + id := strings.TrimSpace(cid1) + aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check if default short-id alias is added automatically + id = strings.TrimSpace(cid2) + aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + // ping to first and its network-scoped aliases + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its network-scoped aliases must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) +} + +// Issue 9677. +func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { + out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown flag: --exec-opt") +} + +// Regression test for #4979 +func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { + + var ( + out string + exitCode int + ) + + // Create a file in a volume + if daemonPlatform == "windows" { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("1", out, exitCode) + } + + // Read the file from another container using --volumes-from to access the volume in the second container + if daemonPlatform == "windows" { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("2", out, exitCode) + } +} + +// Volume path is a symlink which also exists on the host, and the host side is a file not a dir +// But the volume call is just a normal volume, not a bind mount +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink" + + dir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + // In the case of Windows to Windows CI, if the machine is setup so that + // the temp directory is not the C: drive, this test is invalid and will + // not work. + if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" { + c.Skip("Requires TEMP to point to C: drive") + } + + f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) + if err != nil { + c.Fatal(err) + } + f.Close() + + if daemonPlatform == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) + containerPath = "/test/test" + cmd = "true" + } + if _, err := buildImage(name, dockerFile, false); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +// Volume path is a symlink in the container +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink2" + + if daemonPlatform == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name) + containerPath = "/test/test" + cmd = "true" + } + if _, err := buildImage(name, dockerFile, false); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + var ( + volumeDir string + fileInVol string + ) + if daemonPlatform == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + testRequires(c, DaemonIsLinux) + volumeDir = "/test" + fileInVol = `/test/file` + } + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + + if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +// Regression test for #1201 +func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { + var ( + volumeDir string + fileInVol string + ) + if daemonPlatform == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + volumeDir = "/test" + fileInVol = "/test/file" + } + + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) { + c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) + } + + dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) +} + +func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + hostpath := randomTmpDirPath("test", daemonPlatform) + if err := os.MkdirAll(hostpath, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", hostpath, err) + } + defer os.RemoveAll(hostpath) + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this "rw" mode to be be ignored since the inherited volume is "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this to be read-only since both are "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } +} + +// Test for GH#10618 +func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { + path1 := randomTmpDirPath("test1", daemonPlatform) + path2 := randomTmpDirPath("test2", daemonPlatform) + + someplace := ":/someplace" + if daemonPlatform == "windows" { + // Windows requires that the source directory exists before calling HCS + testRequires(c, SameHostDaemon) + someplace = `:c:\someplace` + if err := os.MkdirAll(path1, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path1) + if err := os.MkdirAll(path2, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path2) + } + mountstr1 := path1 + someplace + mountstr2 := path2 + someplace + + if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + + // Test for https://github.com/docker/docker/issues/22093 + volumename1 := "test1" + volumename2 := "test2" + volume1 := volumename1 + someplace + volume2 := volumename2 + someplace + if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + // create failed should have create volume volumename1 or volumename2 + // we should remove volumename2 or volumename2 successfully + out, _ := dockerCmd(c, "volume", "ls") + if strings.Contains(out, volumename1) { + dockerCmd(c, "volume", "rm", volumename1) + } else { + dockerCmd(c, "volume", "rm", volumename2) + } +} + +// Test for #1351 +func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") +} + +func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") + dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") +} + +// this tests verifies the ID format for the container +func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { + out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") + if err != nil { + c.Fatal(err) + } + if exit != 0 { + c.Fatalf("expected exit code 0 received %d", exit) + } + + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatal(err) + } + if !match { + c.Fatalf("Invalid container ID: %s", out) + } +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func (s *DockerSuite) TestRunCreateVolume(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { + // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) + testRequires(c, DaemonIsLinux) + image := "docker-test-createvolumewithsymlink" + + buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN ln -s home /bar`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build '%s': %v", image, err) + } + + _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") + c.Assert(err, checker.IsNil) + + _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") + if err != nil || exitCode != 0 { + c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + _, err = os.Stat(volPath) + if !os.IsNotExist(err) { + c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, DaemonIsLinux) + name := "docker-test-volumesfromsymlinkpath" + prefix := "" + dfContents := `FROM busybox + RUN ln -s home /foo + VOLUME ["/foo/bar"]` + + if daemonPlatform == "windows" { + prefix = `c:` + dfContents = `FROM ` + WindowsBaseImage + ` + RUN mkdir c:\home + RUN mklink /D c:\foo c:\home + VOLUME ["c:/foo/bar"] + ENTRYPOINT c:\windows\system32\cmd.exe` + } + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = strings.NewReader(dfContents) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) + if err != nil || exitCode != 0 { + c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) + } + + _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } +} + +func (s *DockerSuite) TestRunExitCode(c *check.C) { + var ( + exit int + err error + ) + + _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") + + if err == nil { + c.Fatal("should not have a non nil error") + } + if exit != 72 { + c.Fatalf("expected exit code 72 received %d", exit) + } +} + +func (s *DockerSuite) TestRunUserDefaults(c *check.C) { + expected := "uid=0(root) gid=0(root)" + if daemonPlatform == "windows" { + expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" + } + out, _ := dockerCmd(c, "run", "busybox", "id") + if !strings.Contains(out, expected) { + c.Fatalf("expected '%s' got %s", expected, out) + } +} + +func (s *DockerSuite) TestRunUserByName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("expected root user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux, NotArm) + out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserNotFound(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") + if err == nil { + c.Fatal("unknown user should cause container to fail") + } +} + +func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { + sleepTime := "2" + group := sync.WaitGroup{} + group.Add(2) + + errChan := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) + errChan <- err + }() + } + + group.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestRunEnvironment(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") + cmd.Env = append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // not set in our local env that they're removed (if present) in + // the container + + cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") + cmd.Env = appendBaseEnv(true) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // already in the env that we're overriding them + + cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") + cmd.Env = appendBaseEnv(true, "HOSTNAME=bar") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root2", + "HOSTNAME=bar", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { + if daemonPlatform == "windows" { + // Windows busybox does not have ping. Use built in ping instead. + dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { + // TODO Windows: This is Linux specific as --link is not supported and + // this will be deprecated in favor of container networking model. + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "--name", "linked", "busybox", "true") + + _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") + if err == nil { + c.Fatal("Expected error") + } +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { + // TODO Windows: -h is not yet functional. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunGroupAdd(c *check.C) { + // Not applicable for Windows as there is no concept of --group-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") + + groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" + if actual := strings.Trim(out, "\r\n"); actual != groupsList { + c.Fatalf("expected output %s received %s", groupsList, actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotArm) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { + c.Fatal("sys should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { + c.Fatalf("sys should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { + c.Fatal("proc should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 { + c.Fatalf("proc should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } +} + +func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { + // Not applicable on Windows as it does not support chroot + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "busybox", "chroot", "/", "true") +} + +func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + c.Fatalf("expected output /dev/nulo, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { + c.Fatalf("expected output /dev/zero, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") + if err == nil { + c.Fatalf("run container with device mode ro should fail") + } +} + +func (s *DockerSuite) TestRunModeHostname(c *check.C) { + // Not applicable on Windows as Windows does not support -h + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + c.Fatalf("expected 'testhostname', but says: %q", actual) + } + + out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + hostname, err := os.Hostname() + if err != nil { + c.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + c.Fatalf("expected %q, but says: %q", hostname, actual) + } +} + +func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { + out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") + expected := "/\n" + if daemonPlatform == "windows" { + expected = "C:" + expected + } + if out != expected { + c.Fatalf("pwd returned %q (expected %s)", s, expected) + } +} + +func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { + if daemonPlatform == "windows" { + // Windows busybox will fail with Permission Denied on items such as pagefile.sys + dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`) + } else { + dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") + } +} + +func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { + mount := "/:/" + targetDir := "/host" + if daemonPlatform == "windows" { + mount = `c:\:c\` + targetDir = "c:/host" // Forward slash as using busybox + } + out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) + if err == nil { + c.Fatal(out, err) + } +} + +// Verify that a container gets default DNS when only localhost resolvers exist +func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) { + // Not applicable on Windows as this is testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // preserve original resolv.conf for restoring after test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + // defer restored original conf + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost + // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by + // GetNameservers(), leading to a replacement of nameservers with the default set + tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + + actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + // check that the actual defaults are appended to the commented out + // localhost resolver (which should be preserved) + // NOTE: if we ever change the defaults from google dns, this will break + expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if actual != expected { + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + } +} + +func (s *DockerSuite) TestRunDNSOptions(c *check.C) { + // Not applicable on Windows as Windows does not support --dns*, or + // the Unix-specific functionality of resolv.conf. + testRequires(c, DaemonIsLinux) + out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") + + // The client will get a warning on stderr when setting DNS to a localhost address; verify this: + if !strings.Contains(stderr, "Localhost DNS setting") { + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) + } + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { + c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) + } + + out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf") + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 127.0.0.1 options ndots:3" { + c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf") + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { + c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) { + // Not applicable on Windows as testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + var out string + out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" { + c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } + + out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP) + if len(actualNameservers) != len(hostNameservers) { + c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNameservers[i] { + c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP) + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } +} + +// Test to see if a non-root user can resolve a DNS name. Also +// check if the container resolv.conf file has at least 0644 perm. +func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { + // Not applicable on Windows as Windows does not support --user + testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm) + + dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") + + cID, err := getIDByName("testperm") + if err != nil { + c.Fatal(err) + } + + fmode := (os.FileMode)(0644) + finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) + if err != nil { + c.Fatal(err) + } + + if (finfo.Mode() & fmode) != fmode { + c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) + } +} + +// Test if container resolv.conf gets updated the next time it restarts +// if host /etc/resolv.conf has changed. This only applies if the container +// uses the host's /etc/resolv.conf and does not have any dns options provided. +func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { + // Not applicable on Windows as testing unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + c.Skip("Unstable test, to be re-activated once #19937 is resolved") + + tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") + tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") + + //take a copy of resolv.conf for restoring after test completes + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // This test case is meant to test monitoring resolv.conf when it is + // a regular file not a bind mounc. So we unmount resolv.conf and replace + // it with a file containing the original settings. + mounted, err := mount.Mounted("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + if mounted { + cmd := exec.Command("umount", "/etc/resolv.conf") + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + } + + //cleanup + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + }() + + //1. test that a restarting container gets an updated resolv.conf + dockerCmd(c, "run", "--name=first", "busybox", "true") + containerID1, err := getIDByName("first") + if err != nil { + c.Fatal(err) + } + + // replace resolv.conf with our temporary copy + bytesResolvConf := []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // check for update in container + containerResolv, err := readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + } + + /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } */ + //2. test that a restarting container does not receive resolv.conf updates + // if it modified the container copy of the starting point resolv.conf + dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") + containerID2, err := getIDByName("second") + if err != nil { + c.Fatal(err) + } + + //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // start the container again + dockerCmd(c, "start", "second") + + // check for update in container + containerResolv, err = readContainerFile(containerID2, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, resolvConfSystem) { + c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) + } + + //3. test that a running container's resolv.conf is not modified while running + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + runningContainerID := strings.TrimSpace(out) + + // replace resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) + } + + //4. test that a running container's resolv.conf is updated upon restart + // (the above container is still running..) + dockerCmd(c, "restart", runningContainerID) + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) + } + + //5. test that additions of a localhost resolver are cleaned from + // host resolv.conf before updating container's resolv.conf copies + + // replace resolv.conf with a localhost-only nameserver copy + bytesResolvConf = []byte(tmpLocalhostResolvConf) + if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // our first exited container ID should have been updated, but with default DNS + // after the cleanup of resolv.conf found only a localhost nameserver: + containerResolv, err = readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if !bytes.Equal(containerResolv, []byte(expected)) { + c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) + } + + //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update + // of containers' resolv.conf. + + // Restore the original resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // Run the container so it picks up the old settings + dockerCmd(c, "run", "--name=third", "busybox", "true") + containerID3, err := getIDByName("third") + if err != nil { + c.Fatal(err) + } + + // Create a modified resolv.conf.aside and override resolv.conf with it + bytesResolvConf = []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "third") + + // check for update in container + containerResolv, err = readContainerFile(containerID3, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) + } + + //cleanup, restore original resolv.conf happens in defer func() +} + +func (s *DockerSuite) TestRunAddHost(c *check.C) { + // Not applicable on Windows as it does not support --add-host + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode +// but using --attach instead of -a to make sure we read the flag correctly +func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") + _, stderr, _, err := runCommandWithStdoutStderr(cmd) + if err == nil { + c.Fatal("Container should have exited with error code different than 0") + } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { + c.Fatal("Should have been returned an error with conflicting options -a and -d") + } +} + +func (s *DockerSuite) TestRunState(c *check.C) { + // TODO Windows: This needs some rework as Windows busybox does not support top + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + if pid1 == "0" { + c.Fatal("Container state Pid 0") + } + + dockerCmd(c, "stop", id) + state = inspectField(c, id, "State.Running") + if state != "false" { + c.Fatal("Container state is 'running'") + } + pid2 := inspectField(c, id, "State.Pid") + if pid2 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + dockerCmd(c, "start", id) + state = inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid3 := inspectField(c, id, "State.Pid") + if pid3 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } +} + +// Test for #1737 +func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { + // Not applicable on Windows as it does not support uid or gid in this way + testRequires(c, DaemonIsLinux) + name := "testrunvolumesuidgid" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the uid and gid is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } +} + +// Test for #1582 +func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { + // TODO Windows, post TP5. Windows does not yet support volume functionality + // that copies from the image to the volume. + testRequires(c, DaemonIsLinux) + name := "testruncopyvolumecontent" + _, err := buildImage(name, + `FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the content is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + c.Fatal("Container failed to transfer content to volume") + } +} + +func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { + name := "testrunmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`, + true); err != nil { + c.Fatal(err) + } + + out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) + if exit != 0 { + c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + expected := "root" + if daemonPlatform == "windows" { + if strings.Contains(WindowsBaseImage, "windowsservercore") { + expected = `user manager\containeradministrator` + } else { + expected = `ContainerAdministrator` // nanoserver + } + } + if out != expected { + c.Fatalf("Expected output %s, got %q. %s", expected, out, WindowsBaseImage) + } +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { + existingFile := "/bin/cat" + expected := "not a directory" + if daemonPlatform == "windows" { + existingFile = `\windows\system32\ntdll.dll` + expected = `The directory name is invalid.` + } + + out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") + if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { + c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode) + } +} + +func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { + name := "testrunexitonstdinclose" + + meow := "/bin/cat" + delay := 60 + if daemonPlatform == "windows" { + meow = "cat" + } + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) + + stdin, err := runCmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + c.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + c.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + c.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + finish := make(chan error) + go func() { + finish <- runCmd.Wait() + close(finish) + }() + select { + case err := <-finish: + c.Assert(err, check.IsNil) + case <-time.After(time.Duration(delay) * time.Second): + c.Fatal("docker run failed to exit on stdin close") + } + state := inspectField(c, name, "State.Running") + + if state != "false" { + c.Fatal("Container must be stopped after stdin closing") + } +} + +// Test run -i --restart xxx doesn't hang +func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { + name := "test-inter-restart" + + result := icmd.StartCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh"}, + Stdin: bytes.NewBufferString("exit 11"), + }) + c.Assert(result.Error, checker.IsNil) + defer func() { + dockerCmdWithResult("stop", name).Assert(c, icmd.Success) + }() + + result = icmd.WaitOnCmd(60*time.Second, result) + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11}) +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writehosts" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hosts should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func eqToBaseDiff(out string, c *check.C) bool { + name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) + dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") + cID, err := getIDByName(name) + c.Assert(err, check.IsNil) + + baseDiff, _ := dockerCmd(c, "diff", cID) + baseArr := strings.Split(baseDiff, "\n") + sort.Strings(baseArr) + outArr := strings.Split(out, "\n") + sort.Strings(outArr) + return sliceEq(baseArr, outArr) +} + +func sliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writehostname" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hostname should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writeresolv" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/resolv.conf should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { + // Cannot run on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux) + name := "baddevice" + out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") + + if err == nil { + c.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunEntrypoint(c *check.C) { + name := "entrypoint" + + out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar") + expected := "foobar" + + if out != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunBindMounts(c *check.C) { + testRequires(c, SameHostDaemon) + if daemonPlatform == "linux" { + testRequires(c, DaemonIsLinux, NotUserNamespace) + } + + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", c) + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { + // Test reading from a read-only bind mount + out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") + if !strings.Contains(out, "touch-me") { + c.Fatal("Container failed to read from bind mount") + } + } + + // test writing to bind mount + if daemonPlatform == "windows" { + dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") + } else { + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + } + + readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + if err == nil { + c.Fatal("Container bind mounted illegal directory") + } + + // Windows does not (and likely never will) support mounting a single file + if daemonPlatform != "windows" { + // test mount a file + dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, content) + } + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + + image := "emptyfs" + if daemonPlatform == "windows" { + // Windows can't support an emptyfs image. Just use the regular Windows image + image = WindowsBaseImage + } + out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) + if err == nil { + c.Fatalf("Run without command must fail. out=%s", out) + } else if !strings.Contains(out, "No command specified") { + c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) + } + + if _, err := os.Stat(tmpCidFile); err == nil { + c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + + out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + c.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + c.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + c.Fatalf("cid must be equal to %s, got %s", id, cid) + } +} + +func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { + mac := "12:34:56:78:9a:bc" + var out string + if daemonPlatform == "windows" { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") + mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs + } else { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") + } + + actualMac := strings.TrimSpace(out) + if actualMac != mac { + c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } +} + +func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, DaemonIsLinux) + mac := "12:34:56:78:9a:bc" + out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") + + id := strings.TrimSpace(out) + inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress") + if inspectedMac != mac { + c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + } +} + +// test docker run use an invalid mac address +func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { + out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") + //use an invalid mac address should with an error out + if err == nil || !strings.Contains(out, "is not a valid mac address") { + c.Fatalf("run with an invalid --mac-address should with error out") + } +} + +func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + + id := strings.TrimSpace(out) + ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") + iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") + out, _, err := runCommandWithOutput(iptCmd) + if err != nil { + c.Fatal(err, out) + } + if err := deleteContainer(id); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") +} + +func (s *DockerSuite) TestRunPortInUse(c *check.C) { + // TODO Windows. The duplicate NAT message returned by Windows will be + // changing as is currently completely undecipherable. Does need modifying + // to run sh rather than top though as top isn't in Windows busybox. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + port := "1234" + dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") + if err == nil { + c.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "port is already allocated") { + c.Fatalf("Out must be about \"port is already allocated\", got %s", out) + } +} + +// https://github.com/docker/docker/issues/12148 +func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { + // TODO Windows. -P is not yet supported + testRequires(c, DaemonIsLinux) + // allocate a dynamic port to get the most recent + out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id, "80") + + strPort := strings.Split(strings.TrimSpace(out), ":")[1] + port, err := strconv.ParseInt(strPort, 10, 64) + if err != nil { + c.Fatalf("invalid port, got: %s, error: %s", strPort, err) + } + + // allocate a static port and a dynamic port together, with static port + // takes the next recent port in dynamic port range. + dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") +} + +// Regression test for #7792 +func (s *DockerSuite) TestRunMountOrdering(c *check.C) { + // TODO Windows: Post TP5. Updated, but Windows does not support nested mounts currently. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mounc. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + c.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", + "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), + "busybox:latest", "sh", "-c", + "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { + // Not applicable on Windows as Windows does not support volumes + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + c.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") +} + +//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container +func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { + // While Windows supports volumes, it does not support --add-host hence + // this test is not applicable on Windows. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") + if !strings.Contains(out, "nameserver 127.0.0.1") { + c.Fatal("/etc volume mount hides /etc/resolv.conf") + } + + out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") + if !strings.Contains(out, "test123") { + c.Fatal("/etc volume mount hides /etc/hostname") + } + + out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") + out = strings.Replace(out, "\n", " ", -1) + if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { + c.Fatal("/etc volume mount hides /etc/hosts") + } +} + +func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { + // TODO Windows (Post RS1). Windows does not support volumes which + // are pre-populated such as is built in the dockerfile used in this test. + testRequires(c, DaemonIsLinux) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if _, err := buildImage("dataimage", + `FROM busybox + RUN ["mkdir", "-p", "/foo"] + RUN ["touch", "/foo/bar"]`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox") + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform) + if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } +} + +func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + c.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + c.Fatalf("Stdout contains output from pull: %s", stdout) + } +} + +func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if _, err := buildImage("run_volumes_clean_paths", + `FROM busybox + VOLUME `+prefix+`/foo/`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + + out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) + } +} + +// Regression test for #3631 +func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { + // TODO Windows: This should be able to run on Windows if can find an + // alternate to /dev/zero and /dev/stdout. + testRequires(c, DaemonIsLinux) + cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") + + stdout, err := cont.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := cont.Start(); err != nil { + c.Fatal(err) + } + n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + if err != nil { + c.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + c.Fatalf("Expected %d, got %d", expected, n) + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { + // TODO Windows: -P is not currently supported. Also network + // settings are not propagated back. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + var ports nat.PortMap + if err := json.Unmarshal([]byte(portstr), &ports); err != nil { + c.Fatal(err) + } + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatalf("Port is not mapped for the port %s", port) + } + } +} + +func (s *DockerSuite) TestRunExposePort(c *check.C) { + out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out")) + c.Assert(out, checker.Contains, "invalid range format for --expose") +} + +func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc != out { + c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc == out { + c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) + } +} + +func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if parentContainerIpc != out { + c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) + } + + catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") + if catOutput != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) + } + + // check that /dev/mqueue is actually of mqueue type + grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") + if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { + c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) + } + + lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") + lsOutput = strings.Trim(lsOutput, "\n") + if lsOutput != "toto" { + c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run IPC from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunModePIDContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if parentContainerPid != out { + c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out) + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run PID from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + defer os.Remove("/dev/mqueue/toto") + defer os.Remove("/dev/shm/test") + volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") + c.Assert(err, checker.IsNil) + if volPath != "/dev/shm" { + c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) + } + + out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") + if out != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) + } + + // Check that the mq was created + if _, err := os.Stat("/dev/mqueue/toto"); err != nil { + c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) + } +} + +func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if parentContainerNet != out { + c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) + } +} + +func (s *DockerSuite) TestRunModePIDHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostPid, err := os.Readlink("/proc/1/ns/pid") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid != out { + c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid == out { + c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) + } +} + +func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + hostUTS, err := os.Readlink("/proc/1/ns/uts") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS != out { + c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS == out { + c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) + } + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error()) +} + +func (s *DockerSuite) TestRunTLSVerify(c *check.C) { + // Remote daemons use TLS and this test is not applicable when TLS is required. + testRequires(c, SameHostDaemon) + if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { + c.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + result := dockerCmdWithResult("--tlsverify=false", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "error during connect"}) + + result = dockerCmdWithResult("--tlsverify=true", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "cert"}) +} + +func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { + // TODO Windows. Once moved to libnetwork/CNM, this may be able to be + // re-instated. + testRequires(c, DaemonIsLinux) + // first find allocator current position + out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id) + + out = strings.TrimSpace(out) + if out == "" { + c.Fatal("docker port command output is empty") + } + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + c.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + c.Fatal(err) + } + defer l.Close() + + out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id = strings.TrimSpace(out) + dockerCmd(c, "port", id) +} + +func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) { + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("run should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("container is running but should have failed") + } +} + +func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { + addr := "00:16:3E:08:00:50" + args := []string{"run", "--mac-address", addr} + expected := addr + + if daemonPlatform != "windows" { + args = append(args, "busybox", "ifconfig") + } else { + args = append(args, WindowsBaseImage, "ipconfig", "/all") + expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) + } + + if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) { + c.Fatalf("Output should have contained %q: %s", expected, out) + } +} + +func (s *DockerSuite) TestRunNetHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet == out { + c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) + } +} + +func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { + // TODO Windows. As Windows networking evolves and converges towards + // CNM, this test may be possible to enable on Windows. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") +} + +func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Container should have host network namespace") + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { + // TODO Windows. This may be possible to enable in the future. However, + // Windows does not currently support --expose, or populate the network + // settings seen through inspect. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + + var ports nat.PortMap + err := json.Unmarshal([]byte(portstr), &ports) + c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr)) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatal("Port is not mapped for the port "+port, out) + } + } +} + +func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { + runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy") + out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name") + if out != "no" { + c.Fatalf("Set default restart policy failed") + } +} + +func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + timeout := 10 * time.Second + if daemonPlatform == "windows" { + timeout = 120 * time.Second + } + + id := strings.TrimSpace(string(out)) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { + c.Fatal(err) + } + + count := inspectField(c, id, "RestartCount") + if count != "3" { + c.Fatalf("Container was restarted %s times, expected %d", count, 3) + } + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + if MaximumRetryCount != "3" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + } +} + +func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + testPriv := true + // don't test privileged mode subtest if user namespaces enabled + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + testPriv = false + } + testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") +} + +func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { + // Not applicable on Windows due to use of Unix specific functionality, plus + // the use of --read-only which is not supported. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + // Ensure we have not broken writing /dev/pts + out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") + if status != 0 { + c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") + } + expected := "type devpts (rw," + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output to contain %s but contains %s", expected, out) + } +} + +func testReadOnlyFile(c *check.C, testPriv bool, filenames ...string) { + touch := "touch " + strings.Join(filenames, " ") + out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } + + if !testPriv { + return + } + + out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { + // Not applicable on Windows which does not support --link + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") + if !strings.Contains(string(out), "testlinked") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) { + // Not applicable on Windows which does not support either --read-only or --dns. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") + if !strings.Contains(string(out), "1.1.1.1") { + c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(string(out), "testreadonly") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") + } +} + +func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo") + runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest") + + // Remove the main volume container and restart the consuming container + dockerCmd(c, "rm", "-f", "voltest") + + // This should not fail since the volumes-from were already applied + dockerCmd(c, "restart", "restarter") +} + +// run container with --rm should remove container if exit code != 0 +func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + name := "flowers" + out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + name := "sparkles" + out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunPIDHostWithChildIsKillable(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, NotUserNamespace) + name := "ibuildthecloud" + dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") + + c.Assert(waitRun(name), check.IsNil) + + errchan := make(chan error) + go func() { + if out, _, err := dockerCmdWithError("kill", name); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + select { + case err := <-errchan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Kill container timed out") + } +} + +func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { + // TODO Windows. This may be possible to enable once Windows supports + // memory limits on containers + testRequires(c, DaemonIsLinux) + // this memory limit is 1 byte less than the min, which is 4MB + // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 + out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") + if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { + c.Fatalf("expected run to fail when using too low a memory limit: %q", out) + } +} + +func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") + if err == nil || code == 0 { + c.Fatal("standard container should not be able to write to /proc/asound") + } +} + +func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + // some kernels don't have this configured so skip the test if this file is not found + // on the host running the tests. + if _, err := os.Stat("/proc/latency_stats"); err != nil { + c.Skip("kernel doesn't have latency_stats configured") + return + } + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testReadPaths := []string{ + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/kcore", + } + for i, filePath := range testReadPaths { + name := fmt.Sprintf("procsieve-%d", i) + shellCmd := fmt.Sprintf("exec 3<%s", filePath) + + out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if exitCode != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestMountIntoProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") + if err == nil || code == 0 { + c.Fatal("container should not be able to mount into /proc") + } +} + +func (s *DockerSuite) TestMountIntoSys(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + testRequires(c, NotUserNamespace) + dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") +} + +func (s *DockerSuite) TestRunUnshareProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + // In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run. + errChan := make(chan error) + + go func() { + name := "acidburn" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") + if err == nil || + !(strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + go func() { + name := "cereal" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + /* Ensure still fails if running privileged with the default policy */ + go func() { + name := "crashoverride" + out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + for i := 0; i < 3; i++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerSuite) TestRunPublishPort(c *check.C) { + // TODO Windows: This may be possible once Windows moves to libnetwork and CNM + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") + out, _ := dockerCmd(c, "port", "test") + out = strings.Trim(out, "\r\n") + if out != "" { + c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) + } +} + +// Issue #10184. +func (s *DockerSuite) TestDevicePermissions(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + const permissions = "crw-rw-rw-" + out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") + if status != 0 { + c.Fatalf("expected status 0, got %d", status) + } + if !strings.HasPrefix(out, permissions) { + c.Fatalf("output should begin with %q, got %q", permissions, out) + } +} + +func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +// https://github.com/docker/docker/pull/14498 +func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { + dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") + } + dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") + + if daemonPlatform != "windows" { + mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if mRO.RW { + c.Fatalf("Expected RO volume was RW") + } + } + + mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if !mRW.RW { + c.Fatalf("Expected RW volume was RO") + } +} + +func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testWritePaths := []string{ + /* modprobe and core_pattern should both be denied by generic + * policy of denials for /proc/sys/kernel. These files have been + * picked to be checked as they are particularly sensitive to writes */ + "/proc/sys/kernel/modprobe", + "/proc/sys/kernel/core_pattern", + "/proc/sysrq-trigger", + "/proc/kcore", + } + for i, filePath := range testWritePaths { + name := fmt.Sprintf("writeprocsieve-%d", i) + + shellCmd := fmt.Sprintf("exec 3>%s", filePath) + out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if code != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + expected := "test123" + + filename := createTmpFile(c, expected) + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) + if actual != expected { + c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux, UserNamespaceROMount) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) + if exitCode != 0 { + c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := s.setupTrustedImage(c, "trusted-run") + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s\n", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try untrusted run to ensure we pushed the tag to the registry + runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted run on untrusted tag + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error expected when running trusted run with:\n%s", out) + } + + if !strings.Contains(string(out), "does not have trust data for") { + c.Fatalf("Missing expected output on trusted run:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-run-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "could not validate the path to a trusted root") { + c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) + } + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) + } + }) +} + +func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. + runCmd = exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted run:\n%s", out) + } +} + +func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + _, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux) + + // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace + // itself, but pid>1 should not be able to trace pid1. + _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") + if exitCode == 0 { + c.Fatal("ptrace was not successfully restricted by AppArmor") + } +} + +func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) + + _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") + if exitCode != 0 { + c.Fatal("ptrace of self failed.") + } +} + +func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") + if exitCode == 0 { + // If our test failed, attempt to repair the host system... + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") + if exitCode == 0 { + c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") + } + } +} + +func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") +} + +// run create container failed should clean up the container +func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { + // TODO Windows. This may be possible to enable once link is supported + testRequires(c, DaemonIsLinux) + name := "unique_name" + _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") + c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) + + containerID, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID)) + c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) +} + +func (s *DockerSuite) TestRunNamedVolume(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") + + out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunWithUlimits(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") + ul := strings.TrimSpace(out) + if ul != "42" { + c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "/cgroup-parent/test" + name := "cgroup-test" + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "SHOULD_NOT_EXIST" + name := "cgroup-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "/SHOULD_NOT_EXIST" + name := "cgroup-absolute-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + + filename := "/sys/fs/cgroup/devices/test123" + out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected cgroup mount point to be read-only, touch file should fail") + } + expected := "Read-only file system" + if !strings.Contains(out, expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } +} + +func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") + if err == nil || !strings.Contains(out, "cannot join own network") { + c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { + c.Fatalf("run --net=container with --dns should error out") + } + + out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { + c.Fatalf("run --net=container with --mac-address should error out") + } + + out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { + c.Fatalf("run --net=container with --add-host should error out") + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -p should error out") + } + + out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -P should error out") + } + + out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { + c.Fatalf("run --net=container with --expose should error out") + } +} + +func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { + // Not applicable on Windows which does not support --net=container or --link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") + dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") +} + +func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { + // TODO Windows: This may be possible to convert. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + c.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } +} + +// Issue #4681 +func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { + if daemonPlatform == "windows" { + dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { + // Windows does not support --net=container + testRequires(c, DaemonIsLinux, ExecSupport) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") + out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") + out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") + + if out1 != out { + c.Fatal("containers with shared net namespace should have same hostname") + } +} + +func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { + // TODO Windows: Network settings are not currently propagated. This may + // be resolved in the future with the move to libnetwork and CNM. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + id := strings.TrimSpace(out) + res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress") + if res != "" { + c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } +} + +func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { + // Not applicable as Windows does not support --net=host + testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") + dockerCmd(c, "stop", "first") + dockerCmd(c, "stop", "second") +} + +func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") + dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run 1 container in testnetwork1 and another in testnetwork2 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check Isolation between containers : ping must fail + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) + // Connect first container to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + // ping must succeed now + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.IsNil) + + // Disconnect first container from testnetwork2 + dockerCmd(c, "network", "disconnect", "testnetwork2", "first") + // ping must fail again + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Network delete with active containers must fail + _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) + + dockerCmd(c, "stop", "first") + _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") + + // Stop second container and test ping failures on both networks + dockerCmd(c, "stop", "second") + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") + c.Assert(err, check.NotNil) + + // Start second container and connectivity must be restored on both networks + dockerCmd(c, "start", "second") + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Run a container with --net=host + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + // Run second container in first container's network namespace + dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) +} + +func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) + + // create a container connected to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Connect second container to none network. it must fail as well + _, _, err = dockerCmdWithError("network", "connect", "none", "second") + c.Assert(err, check.NotNil) +} + +// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited +func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") + in, err := cmd.StdinPipe() + c.Assert(err, check.IsNil) + defer in.Close() + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + cmd.Stderr = stdout + c.Assert(cmd.Start(), check.IsNil) + + waitChan := make(chan error) + go func() { + waitChan <- cmd.Wait() + }() + + select { + case err := <-waitChan: + c.Assert(err, check.IsNil, check.Commentf(stdout.String())) + case <-time.After(30 * time.Second): + c.Fatal("timeout waiting for command to exit") + } +} + +func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' +func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { + name := "testNonExecutableCmd" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == 127 && strings.Contains(stateExitCode, "127")) { + c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) + } +} + +// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. +func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { + name := "testNonExistingCmd" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == 127 && strings.Contains(stateExitCode, "127")) { + c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) + } +} + +// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or +// 127 on Windows. The difference is that in Windows, the container must be started +// as that's when the check is made (and yes, by its design...) +func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { + expected := 126 + if daemonPlatform == "windows" { + expected = 127 + } + name := "testCmdCannotBeInvoked" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) { + c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode) + } +} + +// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' +func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "foo") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) { + c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err) + } +} + +// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed +func (s *DockerSuite) TestDockerFails(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 125) { + c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) + } +} + +// TestRunInvalidReference invokes docker run with a bad reference. +func (s *DockerSuite) TestRunInvalidReference(c *check.C) { + out, exit, _ := dockerCmdWithError("run", "busybox@foo") + if exit == 0 { + c.Fatalf("expected non-zero exist code; received %d", exit) + } + + if !strings.Contains(out, "Error parsing reference") { + c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out) + } +} + +// Test fix for issue #17854 +func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { + // Not applicable on Windows as it does not support Linux uid/gid ownership + testRequires(c, DaemonIsLinux) + name := "testetcfileownership" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN chown dockerio:dockerio /etc`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that dockerio ownership of /etc is retained at runtime + out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { + testRequires(c, DaemonIsLinux) + + expected := "642" + out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") + oomScoreAdj := strings.TrimSpace(out) + if oomScoreAdj != "642" { + c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } + out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } +} + +func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") + + // Make sure a bind mount under a shared volume propagated to host. + if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { + c.Fatalf("Bind mount under shared volume did not propagate to host") + } + + mount.Unmount(path.Join(tmpDir, "mnt1")) +} + +func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Prepare a source directory with file in it. We will bind mount this + // directory and see if file shows up. + tmpDir2, err := ioutil.TempDir("", "volume-source2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") + + // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside + // container then contents of tmpDir2/slave-testfile should become + // visible at "/volume-dest/mnt1/slave-testfile" + cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") + + mount.Unmount(path.Join(tmpDir, "mnt1")) + + if out != "Test" { + c.Fatalf("Bind mount under slave volume did not propagate to container") + } +} + +func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, exitCode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") + c.Assert(exitCode, checker.Not(checker.Equals), 0) + c.Assert(out, checker.Contains, "invalid mount config") +} + +func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { + testRequires(c, DaemonIsLinux) + + testImg := "testvolumecopy" + _, err := buildImage(testImg, ` + FROM busybox + RUN mkdir -p /foo && echo hello > /foo/hello + `, true) + c.Assert(err, check.IsNil) + + dockerCmd(c, "run", "-v", "foo:/foo", testImg) + out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") + + dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "rm", "-fv", "test") + dockerCmd(c, "volume", "inspect", "test") + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + + // Remove the parent so there are not other references to the volumes + dockerCmd(c, "rm", "-f", "parent") + // now remove the child and ensure the named volume (and only the named volume) still exists + dockerCmd(c, "rm", "-fv", "child") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + runSleepingContainer(c, "--name=test", "-p", "8000:8000") + + // Wait until container is fully up and running + c.Assert(waitRun("test"), check.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true") + // We will need the following `inspect` to diagnose the issue if test fails (#21247) + out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test") + out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail") + c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)) + // check for windows error as well + // TODO Windows Post TP5. Fix the error message string + c.Assert(strings.Contains(string(out), "port is already allocated") || + strings.Contains(string(out), "were not connected because a duplicate name exists") || + strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") || + strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out)) + dockerCmd(c, "rm", "-f", "test") + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Test for one character directory name case (#20122) +func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo") + c.Assert(strings.TrimSpace(out), checker.Equals, "/foo") +} + +func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume + _, err := buildImage("volumecopy", + `FROM busybox + RUN mkdir /foo && echo hello > /foo/bar + CMD cat /foo/bar`, + true, + ) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "volume", "create", "test") + + // test with the nocopy flag + out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // test default behavior which is to copy for non-binds + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + // error out when the volume is already populated + out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // do not error out when copy isn't explicitly set even though it's already populated + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // do not allow copy modes on volumes-from + dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true") + out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // do not allow copy modes on binds + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "nameserver 127.0.0.1" + expectedWarning := "Localhost DNS setting" + out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr)) + + expectedOutput = "nameserver 1.2.3.4" + out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput = "search example.com" + out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput = "options timeout:3" + out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "1.2.3.4\textra" + out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSuite) TestRunRmAndWait(c *check.C) { + dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2") + + out, code, err := dockerCmdWithError("wait", "test") + c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code)) + c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code)) + c.Assert(code, checker.Equals, 0) +} + +// Test case for #23498 +func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo") + c.Assert(strings.TrimSpace(out), check.Equals, "foo") + + // CMD will be reset as well (the same as setting a custom entrypoint) + _, _, err = dockerCmdWithError("run", "--entrypoint=", "-t", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "No command specified") +} + +func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { + c.Assert(s.d.StartWithBusybox("--debug", "--default-ulimit=nofile=65535"), checker.IsNil) + + name := "test-A" + _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.waitRun(name), check.IsNil) + + out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=65535:65535]") + + name = "test-B" + _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.waitRun(name), check.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=42:42]") +} + +func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Handles error conditions for --credentialspec. Validating E2E success cases +// requires additional infrastructure (AD for example) on CI servers. +func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) { + testRequires(c, DaemonIsWindows) + attempts := []struct{ value, expectedError string }{ + {"rubbish", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"rubbish://", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"file://", "no value supplied for file:// credential spec security option"}, + {"registry://", "no value supplied for registry:// credential spec security option"}, + {`file://c:\blah.txt`, "path cannot be absolute"}, + {`file://doesnotexist.txt`, "The system cannot find the file specified"}, + } + for _, attempt := range attempts { + _, _, err := dockerCmdWithError("run", "--security-opt=credentialspec="+attempt.value, "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf("%s expected non-nil err", attempt.value)) + c.Assert(err.Error(), checker.Contains, attempt.expectedError, check.Commentf("%s expected %s got %s", attempt.value, attempt.expectedError, err)) + } +} + +// Windows specific test to validate credential specs with a well-formed spec. +// Note it won't actually do anything in CI configuration with the spec, but +// it should not fail to run a container. +func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + validCS := readFile(`fixtures\credentialspecs\valid.json`, c) + writeFile(filepath.Join(dockerBasePath, `credentialspecs\valid.json`), validCS, c) + dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true") +} + +// Windows specific test to ensure that a servicing app container is started +// if necessary once a container exits. It does this by forcing a no-op +// servicing event and verifying the event from Hyper-V-Compute +func (s *DockerSuite) TestRunServicingContainer(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", WindowsBaseImage, "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255") + containerID := strings.TrimSpace(out) + err := waitExited(containerID, 60*time.Second) + c.Assert(err, checker.IsNil) + + cmd := exec.Command("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) + out2, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil) + c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2)) + c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2)) + c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2)) +} + +func (s *DockerSuite) TestRunDuplicateMount(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + tmpFile, err := ioutil.TempFile("", "touch-me") + c.Assert(err, checker.IsNil) + defer tmpFile.Close() + + data := "touch-me-foo-bar\n" + if _, err := tmpFile.Write([]byte(data)); err != nil { + c.Fatal(err) + } + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "-v", "/tmp:/tmp", "-v", "/tmp:/tmp", "busybox", "sh", "-c", "cat "+tmpFile.Name()+" && ls /") + c.Assert(out, checker.Not(checker.Contains), "tmp:") + c.Assert(out, checker.Contains, data) + + out = inspectFieldJSON(c, name, "Config.Volumes") + c.Assert(out, checker.Contains, "null") +} + +func (s *DockerSuite) TestRunWindowsWithCPUCount(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") +} + +func (s *DockerSuite) TestRunWindowsWithCPUShares(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-shares=1000", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +func (s *DockerSuite) TestRunWindowsWithCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +func (s *DockerSuite) TestRunProcessIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsProcess) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunHypervIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsHyperv) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +// Test for #25099 +func (s *DockerSuite) TestRunEmptyEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "invalid environment variable:" + + out, _, err := dockerCmdWithError("run", "-e", "", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=foo", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +// #28658 +func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { + name := "testslowstdinclosing" + repeat := 3 // regression happened 50% of the time + for i := 0; i < repeat; i++ { + cmd := exec.Command(dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat") + cmd.Stdin = &delayedReader{} + done := make(chan error, 1) + go func() { + _, err := runCommand(cmd) + done <- err + }() + + select { + case <-time.After(15 * time.Second): + c.Fatal("running container timed out") // cleanup in teardown + case err := <-done: + c.Assert(err, checker.IsNil) + } + } +} + +type delayedReader struct{} + +func (s *delayedReader) Read([]byte) (int, error) { + time.Sleep(500 * time.Millisecond) + return 0, io.EOF +} + +// #28823 (originally #28639) +func (s *DockerSuite) TestRunMountReadOnlyDevShm(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + emptyDir, err := ioutil.TempDir("", "test-read-only-dev-shm") + c.Assert(err, check.IsNil) + defer os.RemoveAll(emptyDir) + out, _, err := dockerCmdWithError("run", "--rm", "--read-only", + "-v", fmt.Sprintf("%s:/dev/shm:ro", emptyDir), + "busybox", "touch", "/dev/shm/foo") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Read-only file system") +} + +// Test case for 29129 +func (s *DockerSuite) TestRunHostnameInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "foobar\nfoobar" + out, _ := dockerCmd(c, "run", "--net=host", "--hostname=foobar", "busybox", "sh", "-c", `echo $HOSTNAME && hostname`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go new file mode 100644 index 0000000..e346c19 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go @@ -0,0 +1,1592 @@ +// +build !windows + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #6509 +func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { + checkRedirect := func(command string) { + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), checker.IsNil) + ch := make(chan error) + go func() { + ch <- cmd.Wait() + close(ch) + }() + + select { + case <-time.After(10 * time.Second): + c.Fatal("command timeout") + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("wait err")) + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") +} + +// Test recursive bind mount works by default +func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { + // /tmp gets permission denied + testRequires(c, NotUserNamespace, SameHostDaemon) + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + c.Assert(os.MkdirAll(tmpfsDir, 0777), checker.IsNil, check.Commentf("failed to mkdir at %s", tmpfsDir)) + c.Assert(mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""), checker.IsNil, check.Commentf("failed to create a tmpfs mount at %s", tmpfsDir)) + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + c.Assert(err, checker.IsNil) + defer f.Close() + + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) +} + +func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, err := os.Stat("/dev/snd"); err != nil { + c.Skip("Host does not have /dev/snd") + } + + out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "timer", check.Commentf("expected output /dev/snd/timer")) + + out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) +} + +// TestRunDetach checks attaching and detaching with the default escape sequence. +func (s *DockerSuite) TestRunAttachDetach(c *check.C) { + name := "attach-detach" + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + cmd.Stdin = tty + c.Assert(cmd.Start(), checker.IsNil) + c.Assert(waitRun(name), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + + out, err := bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + out, _ = dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container="+name) + // attach and detach event should be monitored + c.Assert(out, checker.Contains, "attach") + c.Assert(out, checker.Contains, "detach") +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { + name := "attach-detach" + keyCtrlA := []byte{1} + keyA := []byte{97} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "top") + c.Assert(waitRun(name), check.IsNil) + + // specify an invalid detach key, container will ignore it and use default + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-A,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + bufReader := bufio.NewReader(stdout) + out, err := bufReader.ReadString('\n') + if err != nil { + c.Fatal(err) + } + // it should print a warning to indicate the detach key flag is invalid + errStr := "Invalid escape keys (ctrl-A,a) provided" + c.Assert(strings.TrimSpace(out), checker.Equals, errStr) +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via config file. +func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-a,a" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunDetach checks attaching and detaching with the detach flags, making sure it overrides config file +func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-e,e" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +func (s *DockerSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *check.C) { + name := "attach-detach" + keyA := []byte{97} + keyB := []byte{98} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=a,b,c", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + // Invalid escape sequence aba, should print aba in output + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyB); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte("\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "aba" { + c.Fatalf("expected 'aba', got %q", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunWithCPUQuota(c *check.C) { + testRequires(c, cpuCfsQuota) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + out, _ := dockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "8000") + + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "8000", check.Commentf("setting the CPU CFS quota failed")) +} + +func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000") + + out, _ = dockerCmd(c, "run", "--cpu-period", "0", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "100000") + + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "50000", check.Commentf("setting the CPU CFS period failed")) +} + +func (s *DockerSuite) TestRunWithInvalidCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + out, _, err := dockerCmdWithError("run", "--cpu-period", "900", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "2000000", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "-3", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file) + c.Assert(strings.TrimSpace(stdout), checker.Equals, "52428800") + + out := inspectField(c, "test1", "HostConfig.KernelMemory") + c.Assert(out, check.Equals, "52428800") +} + +func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + out, _, err := dockerCmdWithError("run", "--kernel-memory", "2M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum kernel memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--kernel-memory", "-16m", "--name", "test2", "busybox", "echo", "test") + c.Assert(err, check.NotNil) + expected = "invalid size" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { + testRequires(c, cpuShare) + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { + testRequires(c, cpuShare) + testRequires(c, memoryLimitSupport) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test") + c.Assert(out, checker.Equals, "test\n", check.Commentf("container should've printed 'test'")) +} + +func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.cpus" + out, _ := dockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetCpus") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.mems" + out, _ := dockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetMems") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + + file := "/sys/fs/cgroup/blkio/blkio.weight" + out, _ := dockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "300") + + out = inspectField(c, "test", "HostConfig.BlkioWeight") + c.Assert(out, check.Equals, "300") +} + +func (s *DockerSuite) TestRunWithInvalidBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "Range of blkio weight is from 10 to 1000" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioWeightDevice(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight-device", "/dev/sdX:100", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { + testRequires(c, memoryLimitSupport, swapMemorySupport) + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(600 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } +} + +func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(stdout), checker.Equals, "33554432") + + out := inspectField(c, "test", "HostConfig.Memory") + c.Assert(out, check.Equals, "33554432") +} + +// TestRunWithoutMemoryswapLimit sets memory limit and disables swap +// memory limit, this means the processes in the container can use +// 16M memory and as much swap memory as they need (if the host +// supports swap memory). +func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + dockerCmd(c, "run", "-m", "32m", "--memory-swap", "-1", "busybox", "true") +} + +func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { + testRequires(c, memorySwappinessSupport) + file := "/sys/fs/cgroup/memory/memory.swappiness" + out, _ := dockerCmd(c, "run", "--memory-swappiness", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.MemorySwappiness") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { + testRequires(c, memorySwappinessSupport) + out, _, err := dockerCmdWithError("run", "--memory-swappiness", "101", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Valid memory swappiness range is 0-100" + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) + + out, _, err = dockerCmdWithError("run", "--memory-swappiness", "-10", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) +} + +func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { + testRequires(c, memoryReservationSupport) + + file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" + out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "209715200") + + out = inspectField(c, "test", "HostConfig.MemoryReservation") + c.Assert(out, check.Equals, "209715200") +} + +func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, memoryReservationSupport) + out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum memory limit can not be less than memory reservation limit" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) + + out, _, err = dockerCmdWithError("run", "--memory-reservation", "1k", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Minimum memory reservation allowed is 4MB" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) +} + +func (s *DockerSuite) TestStopContainerSignal(c *check.C) { + out, _ := dockerCmd(c, "run", "--stop-signal", "SIGUSR1", "-d", "busybox", "/bin/sh", "-c", `trap 'echo "exit trapped"; exit 0' USR1; while true; do sleep 1; done`) + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + dockerCmd(c, "stop", containerID) + out, _ = dockerCmd(c, "logs", containerID) + + c.Assert(out, checker.Contains, "exit trapped", check.Commentf("Expected `exit trapped` in the log")) +} + +func (s *DockerSuite) TestRunSwapLessThanMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + out, _, err := dockerCmdWithError("run", "-m", "16m", "--memory-swap", "15m", "busybox", "echo", "test") + expected := "Minimum memoryswap limit should be larger than memory limit" + c.Assert(err, check.NotNil) + + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetCpusFlagValue(c *check.C) { + testRequires(c, cgroupCpuset, SameHostDaemon) + + sysInfo := sysinfo.New(true) + cpus, err := parsers.ParseUintList(sysInfo.Cpus) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(cpus)+1; i++ { + if !cpus[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-cpus", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested CPUs are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Cpus) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetMemsFlagValue(c *check.C) { + testRequires(c, cgroupCpuset) + + sysInfo := sysinfo.New(true) + mems, err := parsers.ParseUintList(sysInfo.Mems) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(mems)+1; i++ { + if !mems[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-mems", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested memory nodes are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Mems) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCPUShares(c *check.C) { + testRequires(c, cpuShare, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "The minimum allowed cpu-shares is 2" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "-1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "shares: invalid argument" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "99999999", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "The maximum allowed cpu-shares is" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithDefaultShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm-default" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "67108864") +} + +func (s *DockerSuite) TestRunWithShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm" + out, _ := dockerCmd(c, "run", "--name", name, "--shm-size=1G", "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "1073741824") +} + +func (s *DockerSuite) TestRunTmpfsMountsEnsureOrdered(c *check.C) { + tmpFile, err := ioutil.TempFile("", "test") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", "-v", tmpFile.Name()+":/run/test", "busybox", "ls", "/run") + c.Assert(out, checker.Contains, "test") +} + +func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support tmpfs mounts. + testRequires(c, DaemonIsLinux) + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec,nosuid,rw,size=5k,mode=700", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run failed to mount on tmpfs with valid options %q %s", err, out) + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run:foobar", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("/run mounted on tmpfs when it should have vailed within invalid mount option") + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "-v", "/run:/run", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("Should have generated an error saying Duplicate mount points") + } +} + +func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) { + name := "img-with-volumes" + _, err := buildImage( + name, + ` + FROM busybox + VOLUME /run + RUN touch /run/stuff + `, + true) + if err != nil { + c.Fatal(err) + } + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run") + c.Assert(out, checker.Not(checker.Contains), "stuff") +} + +// Test case for #22420 +func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOptions := []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ := dockerCmd(c, "run", "--tmpfs", "/tmp", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "relatime", "size=8192k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,exec,size=8192k", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,size=8192k,exec,size=4096k,noexec", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + // We use debian:jessie as there is no findmnt in busybox. Also the output will be in the format of + // TARGET PROPAGATION + // /tmp shared + // so we only capture `shared` here. + expectedOptions = []string{"shared"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:shared", "debian:jessie", "findmnt", "-o", "TARGET,PROPAGATION", "/tmp") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } +} + +func (s *DockerSuite) TestRunSysctls(c *check.C) { + + testRequires(c, DaemonIsLinux) + var err error + + out, _ := dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=1", "--name", "test", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "1") + + out = inspectFieldJSON(c, "test", "HostConfig.Sysctls") + + sysctls := make(map[string]string) + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "1") + + out, _ = dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=0", "--name", "test1", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "0") + + out = inspectFieldJSON(c, "test1", "HostConfig.Sysctls") + + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "0") + + runCmd := exec.Command(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", "busybox", "cat", "/proc/sys/kernel/foobar") + out, _, _ = runCommandWithOutput(runCmd) + if !strings.Contains(out, "invalid argument") { + c.Fatalf("expected --sysctl to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:jessie unshare' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected unshare with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp=/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name":"fchmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name": "fchmodat", + "action":"SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "400", "/etc/hostname") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected chmod with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to +// deny unhare of a userns exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + // from sched.h + jsonData := fmt.Sprintf(`{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO", + "args": [ + { + "index": 0, + "value": %d, + "op": "SCMP_CMP_EQ" + } + ] + } + ] +}`, uint64(0x10000000)) + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected unshare userns with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' +// with a the default seccomp profile exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + ensureSyscallTest(c) + + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "userns-test", "id") + out, _, err := runCommandWithOutput(runCmd) + if err == nil || !strings.Contains(out, "clone failed: Operation not permitted") { + c.Fatalf("expected clone userns with default seccomp profile denied to fail, got %s: %v", out, err) + } +} + +// TestRunSeccompUnconfinedCloneUserns checks that +// 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns. +func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace, unprivilegedUsernsClone) + ensureSyscallTest(c) + + // make sure running w privileged is ok + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "syscall-test", "userns-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { + c.Fatalf("expected clone userns with --security-opt seccomp=unconfined to succeed, got %s: %v", out, err) + } +} + +// TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' +// allows creating a userns. +func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace) + ensureSyscallTest(c) + + // make sure running w privileged is ok + runCmd := exec.Command(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { + c.Fatalf("expected clone userns with --privileged to succeed, got %s: %v", out, err) + } +} + +// TestRunSeccompProfileAllow32Bit checks that 32 bit code can run on x86_64 +// with the default seccomp profile. +func (s *DockerSuite) TestRunSeccompProfileAllow32Bit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64) + ensureSyscallTest(c) + + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "exit32-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("expected to be able to run 32 bit code, got %s: %v", out, err) + } +} + +// TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. +func (s *DockerSuite) TestRunSeccompAllowSetrlimit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + // ulimit uses setrlimit, so we want to make sure we don't break it + runCmd := exec.Command(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("expected ulimit with seccomp to succeed, got %s: %v", out, err) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileAcct(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 1: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 2: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 3: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileNS(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1") + if err != nil || !strings.Contains(out, "hello1") { + c.Fatalf("test 1: expected hello1, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2") + if err != nil || !strings.Contains(out, "hello2") { + c.Fatalf("test 2: expected hello2, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3") + if err != nil || !strings.Contains(out, "hello3") { + c.Fatalf("test 3: expected hello3, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4") + if err != nil || !strings.Contains(out, "hello4") { + c.Fatalf("test 5: expected hello4, got: %s, %v", out, err) + } +} + +// TestRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", "nnp-test", "/usr/bin/nnp-test") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "EUID=1000") { + c.Fatalf("expected output to contain EUID=1000, got %s: %v", out, err) + } +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChown(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_CHOWN + runCmd := exec.Command(dockerBinary, "run", "busybox", "chown", "100", "/tmp") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_CHOWN + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chown", "100", "/tmp") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_CHOWN + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "chown", "busybox", "chown", "100", "/tmp") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_DAC_OVERRIDE + runCmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "echo test > /etc/passwd") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_DAC_OVERRIDE + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "sh", "-c", "echo test > /etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") + // TODO test that root user can drop default capability CAP_DAC_OVERRIDE +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesFowner(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_FOWNER + runCmd := exec.Command(dockerBinary, "run", "busybox", "chmod", "777", "/etc/passwd") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_FOWNER + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chmod", "777", "/etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // TODO test that root user can drop default capability CAP_FOWNER +} + +// TODO CAP_KILL + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetuid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETUID + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setuid-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SETUID + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setuid-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SETUID + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setuid", "syscall-test", "setuid-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetgid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETGID + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setgid-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SETGID + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setgid-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SETGID + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setgid", "syscall-test", "setgid-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +// TODO CAP_SETPCAP + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_BIND_SERVICE + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "socket-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_NET_BIND_SERVICE + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "socket-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") + // test that root user can drop default capability CAP_NET_BIND_SERVICE + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_bind_service", "syscall-test", "socket-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_RAW + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "raw-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_NET_RAW + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "raw-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_NET_RAW + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_raw", "syscall-test", "raw-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChroot(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SYS_CHROOT + runCmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "/bin/true") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SYS_CHROOT + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chroot", "/", "/bin/true") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SYS_CHROOT + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "sys_chroot", "busybox", "chroot", "/", "/bin/true") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesMknod(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_MKNOD + runCmd := exec.Command(dockerBinary, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_MKNOD + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "mknod", "/tmp/node", "b", "1", "2") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_MKNOD + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "mknod", "busybox", "mknod", "/tmp/node", "b", "1", "2") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +// TODO CAP_AUDIT_WRITE +// TODO CAP_SETFCAP + +func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { + testRequires(c, SameHostDaemon, Apparmor) + + // running w seccomp unconfined tests the apparmor profile + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") + if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", out, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") + if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", out, err) + } +} + +// make sure the default profile can be successfully parsed (using unshare as it is +// something which we know is blocked in the default profile) +func (s *DockerSuite) TestRunSeccompWithDefaultProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + out, _, err := dockerCmdWithError("run", "--security-opt", "seccomp=../profiles/seccomp/default.json", "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "unshare: unshare failed: Operation not permitted") +} + +// TestRunDeviceSymlink checks run with device that follows symlink (#13840 and #22271) +func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, SameHostDaemon) + if _, err := os.Stat("/dev/zero"); err != nil { + c.Skip("Host does not have /dev/zero") + } + + // Create a temporary directory to create symlink + tmpDir, err := ioutil.TempDir("", "docker_device_follow_symlink_tests") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a symbolic link to /dev/zero + symZero := filepath.Join(tmpDir, "zero") + err = os.Symlink("/dev/zero", symZero) + c.Assert(err, checker.IsNil) + + // Create a temporary file "temp" inside tmpDir, write some data to "tmpDir/temp", + // then create a symlink "tmpDir/file" to the temporary file "tmpDir/temp". + tmpFile := filepath.Join(tmpDir, "temp") + err = ioutil.WriteFile(tmpFile, []byte("temp"), 0666) + c.Assert(err, checker.IsNil) + symFile := filepath.Join(tmpDir, "file") + err = os.Symlink(tmpFile, symFile) + c.Assert(err, checker.IsNil) + + // Create a symbolic link to /dev/zero, this time with a relative path (#22271) + err = os.Symlink("zero", "/dev/symzero") + if err != nil { + c.Fatal("/dev/symzero creation failed") + } + // We need to remove this symbolic link here as it is created in /dev/, not temporary directory as above + defer os.Remove("/dev/symzero") + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 + out, _ := dockerCmd(c, "run", "--device", symZero+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) + + // symlink "tmpDir/file" to a file "tmpDir/temp" will result in an error as it is not a device. + out, _, err = dockerCmdWithError("run", "--device", symFile+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(err, check.NotNil) + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "not a device node", check.Commentf("expected output 'not a device node'")) + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 (this time check with relative path backed, see #22271) + out, _ = dockerCmd(c, "run", "--device", "/dev/symzero:/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) +} + +// TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit +func (s *DockerSuite) TestRunPIDsLimit(c *check.C) { + testRequires(c, pidsLimit) + + file := "/sys/fs/cgroup/pids/pids.max" + out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "4") + + out = inspectField(c, "skittles", "HostConfig.PidsLimit") + c.Assert(out, checker.Equals, "4", check.Commentf("setting the pids limit failed")) +} + +func (s *DockerSuite) TestRunPrivilegedAllowedDevices(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "cat", file) + c.Logf("out: %q", out) + c.Assert(strings.TrimSpace(out), checker.Equals, "a *:* rwm") +} + +func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + + fi, err := os.Stat("/dev/snd/timer") + if err != nil { + c.Skip("Host does not have /dev/snd/timer") + } + stat, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + c.Skip("Could not stat /dev/snd/timer") + } + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--device", "/dev/snd/timer:w", "busybox", "cat", file) + c.Assert(out, checker.Contains, fmt.Sprintf("c %d:%d w", stat.Rdev/256, stat.Rdev%256)) +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "names": ["fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + } + ], + "architectures": [ + "SCMP_ARCH_X32" + ], + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") +} + +func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + // 1) verify I can run containers with the Docker default shipped profile which allows chmod + _, err = s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + // 2) restart the daemon and add a custom seccomp profile in which we deny chmod + err = s.d.Restart("--seccomp-profile=" + tmpFile.Name()) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestRunWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpus", "0.5", "--name", "test", "busybox", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + out = inspectField(c, "test", "HostConfig.NanoCpus") + c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err := dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: Nano CPUs and CPU Period cannot both be set") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 0000000..70139a5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,383 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// save a repo using gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + out, _ := dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + repoTarball, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(repoTarball) + out, _, err = runCommandWithOutput(loadCmd) + c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +// save a repo using xz+gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-gz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(loadCmd) + c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +func (s *DockerSuite) TestSaveSingleTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-single-tag-test" + dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedImageID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), + exec.Command("tar", "t"), + exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "busybox:latest" + out, _ := dockerCmd(c, "inspect", repoName) + data := []struct { + ID string + Created time.Time + }{} + err := json.Unmarshal([]byte(out), &data) + c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) + c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) + tarTvTimeFormat := "2006-01-02 15:04" + out, _, err = runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("tar", "tv"), + exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveImageId(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-image-id-test" + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") + + out, _ = dockerCmd(c, "images", "-q", repoName) + cleanedShortImageID := strings.TrimSpace(out) + + // Make sure IDs are not empty + c.Assert(cleanedLongImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + c.Assert(cleanedShortImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + + saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) + tarCmd := exec.Command("tar", "t") + + var err error + tarCmd.Stdin, err = saveCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for tar: %v", err)) + grepCmd := exec.Command("grep", cleanedLongImageID) + grepCmd.Stdin, err = tarCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for grep: %v", err)) + + c.Assert(tarCmd.Start(), checker.IsNil, check.Commentf("tar failed with error: %v", err)) + c.Assert(saveCmd.Start(), checker.IsNil, check.Commentf("docker save failed with error: %v", err)) + defer func() { + saveCmd.Wait() + tarCmd.Wait() + dockerCmd(c, "rmi", repoName) + }() + + out, _, err = runCommandWithOutput(grepCmd) + + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID: %s, %v", out, err)) +} + +// save a repo and try to load it using flags +func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-and-load-repo-flags" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + + deleteImages(repoName) + dockerCmd(c, "commit", name, repoName) + + before, _ := dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + after, _ := dockerCmd(c, "inspect", repoName) + c.Assert(before, checker.Equals, after, check.Commentf("inspect is not the same after a save / load")) +} + +func (s *DockerSuite) TestSaveWithNoExistImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + imgName := "foobar-non-existing-image" + + out, _, err := dockerCmdWithError("save", "-o", "test-img.tar", imgName) + c.Assert(err, checker.NotNil, check.Commentf("save image should fail for non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("No such image: %s", imgName)) +} + +func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-multi-name-test" + + // Make one image + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) + + // Make two images + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), + exec.Command("tar", "xO", "repositories"), + exec.Command("grep", "-q", "-E", "(-one|-two)"), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple repos: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { + testRequires(c, DaemonIsLinux) + makeImage := func(from string, tag string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) + imageID := strings.TrimSpace(out) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName, "busybox:latest"), + exec.Command("tar", "t")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) + + lines := strings.Split(strings.TrimSpace(out), "\n") + var actual []string + for _, l := range lines { + if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { + actual = append(actual, strings.TrimSuffix(l, ".json")) + } + } + + // make the list of expected layers + out = inspectField(c, "busybox:latest", "Id") + expected := []string{strings.TrimSpace(out), idFoo, idBar} + + // prefixes are not in tar + for i := range expected { + expected[i] = digest.Digest(expected[i]).Hex() + } + + sort.Strings(actual) + sort.Strings(expected) + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) +} + +// Issue #6722 #5892 ensure directories are included in changes +func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { + testRequires(c, DaemonIsLinux) + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary directory: %s", err)) + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + defer os.RemoveAll(tmpDir) + _, err = buildImage(name, + `FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, + true) + c.Assert(err, checker.IsNil, check.Commentf("%v", err)) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command("tar", "-xf", "-", "-C", extractionDirectory), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and extract image: %s", out)) + + dirs, err := ioutil.ReadDir(extractionDirectory) + c.Assert(err, checker.IsNil, check.Commentf("failed to get a listing of the layer directories: %s", err)) + + found := false + for _, entry := range dirs { + var entriesSansDev []string + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err)) + defer f.Close() + + entries, err := listTar(f) + for _, e := range entries { + if !strings.Contains(e, "dev/") { + entriesSansDev = append(entriesSansDev, e) + } + } + c.Assert(err, checker.IsNil, check.Commentf("encountered error while listing tar entries: %s", err)) + + if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { + found = true + break + } + } + } + + c.Assert(found, checker.Equals, true, check.Commentf("failed to find the layer with the right content listing")) + +} + +// Test loading a weird image where one of the layers is of zero size. +// The layer.tar file is actually zero bytes, no padding or anything else. +// See issue: 18170 +func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") +} + +func (s *DockerSuite) TestSaveLoadParents(c *check.C) { + testRequires(c, DaemonIsLinux) + + makeImage := func(from string, addfile string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "touch", addfile) + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + imageID := strings.TrimSpace(out) + + dockerCmd(c, "rm", "-f", cleanedContainerID) + return imageID + } + + idFoo := makeImage("busybox", "foo") + idBar := makeImage(idFoo, "bar") + + tmpDir, err := ioutil.TempDir("", "save-load-parents") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + c.Log("tmpdir", tmpDir) + + outfile := filepath.Join(tmpDir, "out.tar") + + dockerCmd(c, "save", "-o", outfile, idBar, idFoo) + dockerCmd(c, "rmi", idBar) + dockerCmd(c, "load", "-i", outfile) + + inspectOut := inspectField(c, idBar, "Parent") + c.Assert(inspectOut, checker.Equals, idFoo) + + inspectOut = inspectField(c, idFoo, "Parent") + c.Assert(inspectOut, checker.Equals, "") +} + +func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "saveloadnotag" + + _, err := buildImage(name, "FROM busybox\nENV foo=bar", true) + c.Assert(err, checker.IsNil, check.Commentf("%v", err)) + + id := inspectField(c, name, "Id") + + // Test to make sure that save w/o name just shows imageID during load + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", id), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + // Should not show 'name' but should show the image ID during the load + c.Assert(out, checker.Not(checker.Contains), "Loaded image: ") + c.Assert(out, checker.Contains, "Loaded image ID:") + c.Assert(out, checker.Contains, id) + + // Test to make sure that save by name shows that name during load + out, _, err = runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + c.Assert(out, checker.Contains, "Loaded image: "+name+":latest") + c.Assert(out, checker.Not(checker.Contains), "Loaded image ID:") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go new file mode 100644 index 0000000..22445e5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go @@ -0,0 +1,109 @@ +// +build !windows + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// save a repo and try to load it using stdout +func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { + name := "test-save-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + before, _ := dockerCmd(c, "commit", name, repoName) + before = strings.TrimRight(before, "\n") + + tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") + c.Assert(err, check.IsNil) + defer os.Remove(tmpFile.Name()) + + saveCmd := exec.Command(dockerBinary, "save", repoName) + saveCmd.Stdout = tmpFile + + _, err = runCommand(saveCmd) + c.Assert(err, check.IsNil) + + tmpFile, err = os.Open(tmpFile.Name()) + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = tmpFile + + out, _, err := runCommandWithOutput(loadCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + after := inspectField(c, repoName, "Id") + after = strings.TrimRight(after, "\n") + + c.Assert(after, check.Equals, before) //inspect is not the same after a save / load + + deleteImages(repoName) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), check.IsNil) + c.Assert(cmd.Wait(), check.NotNil) //did not break writing to a TTY + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "Cowardly refusing", check.Commentf("help output is not being yielded", out)) +} + +func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { + name := "test-load" + _, err := buildImage(name, ` + FROM busybox + RUN touch aa + `, true) + c.Assert(err, check.IsNil) + + tmptar := name + ".tar" + dockerCmd(c, "save", "-o", tmptar, name) + defer os.Remove(tmptar) + + dockerCmd(c, "rmi", name) + dockerCmd(c, "tag", "busybox", name) + out, _ := dockerCmd(c, "load", "-i", tmptar) + expected := fmt.Sprintf("The image %s:latest already exists, renaming the old one with ID", name) + c.Assert(out, checker.Contains, expected) +} + +// fail because load didn't receive data from stdin +func (s *DockerSuite) TestLoadNoStdinFail(c *check.C) { + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, dockerBinary, "load") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), check.NotNil) // docker-load should fail + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "requested load from stdin, but stdin is empty") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go new file mode 100644 index 0000000..5a32f2a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// search for repos named "registry" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "busybox") + c.Assert(out, checker.Contains, "Busybox base image.", check.Commentf("couldn't find any repository named (or containing) 'Busybox base image.'")) +} + +func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { + out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "--stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) +} + +func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "--help") + c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") + + outSearchCmd, _ := dockerCmd(c, "search", "busybox") + outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") + + c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) + + outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") + for i := range outSearchCmdautomatedSlice { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n") + for i := range outSearchCmdNotOfficialSlice { + c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)) + } + + outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n") + c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return + c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial)) + + outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) + + dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox") + + // --automated deprecated since Docker 1.13 + outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n") + for i := range outSearchCmdautomatedSlice1 { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + // -s --stars deprecated since Docker 1.13 + outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)) + + // -s --stars deprecated since Docker 1.13 + dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") +} + +// search for repos which start with "ubuntu-" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + dockerCmd(c, "search", "ubuntu-") +} + +// test case for #23055 +func (s *DockerSuite) TestSearchWithLimit(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + limit := 10 + out, _, err := dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice := strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 50 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 100 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 0 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) + + limit = 200 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go new file mode 100644 index 0000000..b79fdbe --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + c.Assert(len(secret.Spec.Labels), checker.Equals, 2) + c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(secret.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: name, + }, + []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + fake := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: id, + }, + []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", fake)) + + out, err := d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("secret", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("secret", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("secret", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "secretCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_secret" + out, err := d.Cmd("secret", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go new file mode 100644 index 0000000..0985a2b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("secret", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: n, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "secret", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go new file mode 100644 index 0000000..9e8b1e9 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go @@ -0,0 +1,175 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.getTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "foo") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mountConfig[0].VolumeOptions, checker.NotNil) + c.Assert(mountConfig[0].VolumeOptions.NoCopy, checker.True) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + + out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) +} + +func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.getTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mountConfig[0].TmpfsOptions, checker.NotNil) + c.Assert(mountConfig[0].TmpfsOptions.SizeBytes, checker.Equals, int64(1048576)) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mounts[0].Name, checker.Equals, "") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) + + out, err = s.nodeCmd(c, task.NodeID, "logs", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.HasPrefix, "tmpfs on /foo type tmpfs") + c.Assert(strings.TrimSpace(out), checker.Contains, "size=1024k") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go new file mode 100644 index 0000000..30580f6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go @@ -0,0 +1,191 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// start a service, and then make its task unhealthy during running +// finally, unhealthy task should be detected and killed +func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // build image with health-check + // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + RUN touch /status + HEALTHCHECK --interval=1s --timeout=1s --retries=1\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceRun" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) + containerID := task.Status.ContainerStatus.ContainerID + + // wait for container to be healthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "healthy") + + // make it fail + d.Cmd("exec", containerID, "rm", "/status") + // wait for container to be unhealthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "unhealthy") + + // Task should be terminated + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateFailed) + + if !strings.Contains(task.Status.Err, container.ErrContainerUnhealthy.Error()) { + c.Fatal("unhealthy task exits because of other error") + } +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.getTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.getTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go new file mode 100644 index 0000000..c221654 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +type logMessage struct { + err error + data []byte +} + +func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { + testRequires(c, ExperimentalDaemon) + + d := s.AddDaemon(c, true, true) + + // we have multiple services here for detecting the goroutine issue #28915 + services := map[string]string{ + "TestServiceLogs1": "hello1", + "TestServiceLogs2": "hello2", + } + + for name, message := range services { + out, err := d.Cmd("service", "create", "--name", name, "busybox", + "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + } + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, + d.checkActiveContainerCount, checker.Equals, len(services)) + + for name, message := range services { + out, err := d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + c.Logf("log for %q: %q", name, out) + c.Assert(out, checker.Contains, message) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { + testRequires(c, ExperimentalDaemon) + + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsFollow" + + out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + args := []string{"service", "logs", "-f", name} + cmd := exec.Command(dockerBinary, d.prependHostArg(args)...) + r, w := io.Pipe() + cmd.Stdout = w + cmd.Stderr = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + ch := make(chan *logMessage) + done := make(chan struct{}) + go func() { + reader := bufio.NewReader(r) + for { + msg := &logMessage{} + msg.data, _, msg.err = reader.ReadLine() + select { + case ch <- msg: + case <-done: + return + } + } + }() + + for i := 0; i < 3; i++ { + msg := <-ch + c.Assert(msg.err, checker.IsNil) + c.Assert(string(msg.data), checker.Contains, "log test") + } + close(done) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go new file mode 100644 index 0000000..29cca23 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go @@ -0,0 +1,57 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceScale(c *check.C) { + d := s.AddDaemon(c, true, true) + + service1Name := "TestService1" + service1Args := append([]string{"service", "create", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // global mode + service2Name := "TestService2" + service2Args := append([]string{"service", "create", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create services + out, err := d.Cmd(service1Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd(service2Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=2") + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=foobar") + c.Assert(err, checker.NotNil) + + str := fmt.Sprintf("%s: invalid replicas value %s", service1Name, "foobar") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + out, err = d.Cmd("service", "scale", "TestService1=-1") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: invalid replicas value %s", service1Name, "-1") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + // TestService2 is a global mode + out, err = d.Cmd("service", "scale", "TestService2=2") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: scale can only be used with replicated mode\n", service2Name) + if out != str { + c.Errorf("got: %s, expected: %s", out, str) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go new file mode 100644 index 0000000..837370c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go @@ -0,0 +1,130 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "TestServiceUpdatePort" + serviceArgs := append([]string{"service", "create", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create a service with a port mapping of 8080:8081. + out, err := d.Cmd(serviceArgs...) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) + c.Assert(err, checker.IsNil) + + // Inspect the service and verify port mapping + expected := []swarm.PortConfig{ + { + Protocol: "tcp", + PublishedPort: 8082, + TargetPort: 8083, + PublishMode: "ingress", + }, + } + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) + c.Assert(err, checker.IsNil) + + var portConfig []swarm.PortConfig + if err := json.Unmarshal([]byte(out), &portConfig); err != nil { + c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) + } + c.Assert(portConfig, checker.DeepEquals, expected) +} + +func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service := d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + + // add label to empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") + + // add label to non-empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 2) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "") + + // now make sure we can add again + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") +} + +func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add secret + out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go new file mode 100644 index 0000000..fb896d5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestClientSetsTLSServerName(c *check.C) { + c.Skip("Flakey test") + // there may be more than one hit to the server for each registry request + serverNameReceived := []string{} + var serverName string + + virtualHostServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverNameReceived = append(serverNameReceived, r.TLS.ServerName) + })) + defer virtualHostServer.Close() + // discard TLS handshake errors written by default to os.Stderr + virtualHostServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + + u, err := url.Parse(virtualHostServer.URL) + c.Assert(err, check.IsNil) + hostPort := u.Host + serverName = strings.Split(hostPort, ":")[0] + + repoName := fmt.Sprintf("%v/dockercli/image:latest", hostPort) + cmd := exec.Command(dockerBinary, "pull", repoName) + cmd.Run() + + // check that the fake server was hit at least once + c.Assert(len(serverNameReceived) > 0, check.Equals, true) + // check that for each hit the right server name was received + for _, item := range serverNameReceived { + c.Check(item, check.Equals, serverName) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go new file mode 100644 index 0000000..fd9b154 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go @@ -0,0 +1,186 @@ +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestStackRemoveUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "remove", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackPSUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "ps", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackServicesUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "services", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/default.yaml", + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") + + out, err = d.Cmd("stack", "rm", testStackName) + c.Assert(err, checker.IsNil) + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n") +} + +func (s *DockerSwarmSuite) TestStackDeployWithSecretsTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("secret", "create", "outside", "fixtures/secrets/default") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/secrets.yaml", + testStackName, + } + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", "testdeploy_web") + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 3) + + sort.Sort(sortSecrets(refs)) + c.Assert(refs[0].SecretName, checker.Equals, "outside") + c.Assert(refs[1].SecretName, checker.Equals, "testdeploy_special") + c.Assert(refs[1].File.Name, checker.Equals, "special") + c.Assert(refs[2].SecretName, checker.Equals, "testdeploy_super") + c.Assert(refs[2].File.Name, checker.Equals, "foo.txt") + c.Assert(refs[2].File.Mode, checker.Equals, os.FileMode(0400)) + + // Deploy again to ensure there are no errors when secret hasn't changed + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestStackRemove(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/remove.yaml", + stackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ps", stackName) + c.Assert(err, checker.IsNil) + c.Assert(strings.Split(strings.TrimSpace(out), "\n"), checker.HasLen, 2) + + out, err = d.Cmd("stack", "rm", stackName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Removing service testdeploy_web") + c.Assert(out, checker.Contains, "Removing network testdeploy_default") + c.Assert(out, checker.Contains, "Removing secret testdeploy_special") +} + +type sortSecrets []swarm.SecretReference + +func (s sortSecrets) Len() int { return len(s) } +func (s sortSecrets) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortSecrets) Less(i, j int) bool { return s[i].SecretName < s[j].SecretName } + +// testDAB is the DAB JSON used for testing. +// TODO: Use template/text and substitute "Image" with the result of +// `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest` +const testDAB = `{ + "Version": "0.1", + "Services": { + "srv1": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["top"] + }, + "srv2": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["tail"], + "Args": ["-f", "/dev/null"] + } + } +}` + +func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { + testRequires(c, ExperimentalDaemon) + // setup + testStackName := "test" + testDABFileName := testStackName + ".dab" + defer os.RemoveAll(testDABFileName) + err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444) + c.Assert(err, checker.IsNil) + d := s.AddDaemon(c, true, true) + // deploy + stackArgs := []string{ + "stack", "deploy", + "--bundle-file", testDABFileName, + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Loading bundle from test.dab\n") + c.Assert(out, checker.Contains, "Creating service test_srv1\n") + c.Assert(out, checker.Contains, "Creating service test_srv2\n") + // ls + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n"+"test 2\n") + // rm + stackArgs = []string{"stack", "rm", testStackName} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Removing service test_srv1\n") + c.Assert(out, checker.Contains, "Removing service test_srv2\n") + // ls (empty) + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go new file mode 100644 index 0000000..b1cea35 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go @@ -0,0 +1,199 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { + // Windows does not support link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "busybox") + + // Expect this to fail because the above container is stopped, this is what we want + out, _, err := dockerCmdWithError("run", "--name", "test2", "--link", "test:test", "busybox") + // err shouldn't be nil because container test2 try to link to stopped container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + ch := make(chan error) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if out, _, err := dockerCmdWithError("start", "-a", "test2"); err == nil { + ch <- fmt.Errorf("Expected error but got none:\n%s", out) + } + close(ch) + }() + + select { + case err := <-ch: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatalf("Attach did not exit properly") + } +} + +// gh#8555: Exit code should be passed through when using start -a +func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") + out = strings.TrimSpace(out) + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", out) + + startOut, exitCode, err := dockerCmdWithError("start", "-a", out) + // start command should fail + c.Assert(err, checker.NotNil, check.Commentf("startOut: %s", startOut)) + // start -a did not respond with proper exit code + c.Assert(exitCode, checker.Equals, 1, check.Commentf("startOut: %s", startOut)) + +} + +func (s *DockerSuite) TestStartAttachSilent(c *check.C) { + name := "teststartattachcorrectexitcode" + dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", name) + + startOut, _ := dockerCmd(c, "start", "-a", name) + // start -a produced unexpected output + c.Assert(startOut, checker.Equals, "test\n") +} + +func (s *DockerSuite) TestStartRecordError(c *check.C) { + // TODO Windows CI: Requires further porting work. Should be possible. + testRequires(c, DaemonIsLinux) + // when container runs successfully, we should not have state.Error + dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr := inspectField(c, "test", "State.Error") + // Expected to not have state error + c.Assert(stateErr, checker.Equals, "") + + // Expect this to fail and records error because of ports conflict + out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") + // err shouldn't be nil because docker run will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + stateErr = inspectField(c, "test2", "State.Error") + c.Assert(stateErr, checker.Contains, "port is already allocated") + + // Expect the conflict to be resolved when we stop the initial container + dockerCmd(c, "stop", "test") + dockerCmd(c, "start", "test2") + stateErr = inspectField(c, "test2", "State.Error") + // Expected to not have state error but got one + c.Assert(stateErr, checker.Equals, "") +} + +func (s *DockerSuite) TestStartPausedContainer(c *check.C) { + // Windows does not support pausing containers + testRequires(c, IsPausable) + defer unpauseAllContainers() + + runSleepingContainer(c, "-d", "--name", "testing") + + dockerCmd(c, "pause", "testing") + + out, _, err := dockerCmdWithError("start", "testing") + // an error should have been shown that you cannot start paused container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an error should have been shown that you cannot start paused container + c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") +} + +func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { + // Windows does not support --link + testRequires(c, DaemonIsLinux) + // run a container named 'parent' and create two container link to `parent` + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + for _, container := range []string{"child_first", "child_second"} { + dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") + } + + // stop 'parent' container + dockerCmd(c, "stop", "parent") + + out := inspectField(c, "parent", "State.Running") + // Container should be stopped + c.Assert(out, checker.Equals, "false") + + // start all the three containers, container `child_first` start first which should be failed + // container 'parent' start second and then start container 'child_second' + expOut := "Cannot link to a non running container" + expErr := "failed to start containers: [child_first]" + out, _, err := dockerCmdWithError("start", "child_first", "parent", "child_second") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + if !(strings.Contains(out, expOut) || strings.Contains(err.Error(), expErr)) { + c.Fatalf("Expected out: %v with err: %v but got out: %v with err: %v", expOut, expErr, out, err) + } + + for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { + // run multiple containers to test + for _, container := range []string{"test1", "test2", "test3"} { + runSleepingContainer(c, "--name", container) + } + + // stop all the containers + for _, container := range []string{"test1", "test2", "test3"} { + dockerCmd(c, "stop", container) + } + + // test start and attach multiple containers at once, expected error + for _, option := range []string{"-a", "-i", "-ai"} { + out, _, err := dockerCmdWithError("start", option, "test1", "test2", "test3") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + c.Assert(out, checker.Contains, "You cannot start and attach multiple containers at once.") + } + + // confirm the state of all the containers be stopped + for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +// Test case for #23716 +func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-t", "--name", "before", "busybox") + go func() { + c.Assert(waitRun("before"), checker.IsNil) + dockerCmd(c, "rename", "before", "after") + dockerCmd(c, "stop", "--time=2", "after") + }() + _, stderr, _, _ := runCommandWithStdoutStderr(exec.Command(dockerBinary, "start", "-a", "before")) + c.Assert(stderr, checker.Not(checker.Contains), "No such container") +} + +func (s *DockerSuite) TestStartReturnCorrectExitCode(c *check.C) { + dockerCmd(c, "create", "--restart=on-failure:2", "--name", "withRestart", "busybox", "sh", "-c", "exit 11") + dockerCmd(c, "create", "--rm", "--name", "withRm", "busybox", "sh", "-c", "exit 12") + + _, exitCode, err := dockerCmdWithError("start", "-a", "withRestart") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 11) + _, exitCode, err = dockerCmdWithError("start", "-a", "withRm") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 12) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go new file mode 100644 index 0000000..5cb1a3e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go @@ -0,0 +1,159 @@ +package main + +import ( + "bufio" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStatsNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) + type output struct { + out []byte + err error + } + + ch := make(chan output) + go func() { + out, err := statsCmd.Output() + ch <- output{out, err} + }() + + select { + case outerr := <-ch: + c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) + c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output + case <-time.After(3 * time.Second): + statsCmd.Process.Kill() + c.Fatalf("stats did not return immediately when not streaming") + } +} + +func (s *DockerSuite) TestStatsContainerNotFound(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("stats", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats, got %q instead", out)) + + out, _, err = dockerCmdWithError("stats", "--no-stream", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats with --no-stream, got %q instead", out)) +} + +func (s *DockerSuite) TestStatsAllRunningNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id3 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id3), check.IsNil) + dockerCmd(c, "stop", id3) + + out, _ = dockerCmd(c, "stats", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + if strings.Contains(out, id3) { + c.Fatalf("Did not expect %s in stats, got %s", id3, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + // outLines[2] is id1's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) + // check stat result of id1 contains real data + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) +} + +func (s *DockerSuite) TestStatsAllNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + dockerCmd(c, "stop", id1) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + + out, _ = dockerCmd(c, "stats", "--all", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result of %s is empty: %s", id2, out)) + // check stat result of id1 contains all zero + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.IsNil, check.Commentf("stat result of %s should be empty : %s", id1, out)) +} + +func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + id := make(chan string) + addedChan := make(chan struct{}) + + runSleepingContainer(c, "-d") + statsCmd := exec.Command(dockerBinary, "stats") + stdout, err := statsCmd.StdoutPipe() + c.Assert(err, check.IsNil) + c.Assert(statsCmd.Start(), check.IsNil) + defer statsCmd.Process.Kill() + + go func() { + containerID := <-id + matchID := regexp.MustCompile(containerID) + + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + switch { + case matchID.MatchString(scanner.Text()): + close(addedChan) + return + } + } + }() + + out, _ := runSleepingContainer(c, "-d") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + id <- strings.TrimSpace(out)[:12] + + select { + case <-time.After(30 * time.Second): + c.Fatal("failed to observe new container created added to stats") + case <-addedChan: + // ignore, done + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go new file mode 100644 index 0000000..103d013 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStopContainerWithRestartPolicyAlways(c *check.C) { + dockerCmd(c, "run", "--name", "verifyRestart1", "-d", "--restart=always", "busybox", "false") + dockerCmd(c, "run", "--name", "verifyRestart2", "-d", "--restart=always", "busybox", "false") + + c.Assert(waitRun("verifyRestart1"), checker.IsNil) + c.Assert(waitRun("verifyRestart2"), checker.IsNil) + + dockerCmd(c, "stop", "verifyRestart1") + dockerCmd(c, "stop", "verifyRestart2") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go new file mode 100644 index 0000000..8eae162 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go @@ -0,0 +1,1254 @@ +// +build !windows + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + getSpec := func() swarm.Spec { + sw := d.getSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + // setting anything under 30m for cert-expiry is not allowed + out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "minimum certificate expiry time") + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) +} + +func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { + d := s.AddDaemon(c, false, false) + + getSpec := func() swarm.Spec { + sw := d.getSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + c.Assert(d.Leave(true), checker.IsNil) + time.Sleep(500 * time.Millisecond) // https://github.com/docker/swarmkit/issues/1421 + out, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 5*time.Second) +} + +func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) { + testRequires(c, IPv6) + d1 := s.AddDaemon(c, false, false) + out, err := d1.Cmd("swarm", "init", "--listen-addr", "::1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + d2 := s.AddDaemon(c, false, false) + out, err = d2.Cmd("swarm", "join", "::1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "Swarm: active") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + out, err := d.Cmd("swarm", "init", "--advertise-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "advertise address must be a non-zero IP address") +} + +func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { + // init swarm mode and stop a daemon + d := s.AddDaemon(c, true, true) + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d.Stop(), checker.IsNil) + + // start a daemon with --cluster-store and --cluster-advertise + err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) + content, _ := ioutil.ReadFile(d.logFile.Name()) + c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + + // start a daemon with --live-restore + err = d.Start("--live-restore") + c.Assert(err, checker.NotNil) + content, _ = ioutil.ReadFile(d.logFile.Name()) + c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") + // restart for teardown + c.Assert(d.Start(), checker.IsNil) +} + +// Test case for #24090 +func (s *DockerSwarmSuite) TestSwarmNodeListHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + // The first line should contain "HOSTNAME" + out, err := d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(strings.Split(out, "\n")[0], checker.Contains, "HOSTNAME") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + containers := d.activeContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) +} + +// Test case for #24270 +func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name1 := "redis-cluster-md5" + name2 := "redis-cluster" + name3 := "other-cluster" + out, err := d.Cmd("service", "create", "--name", name1, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--name", name2, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--name", name3, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter1 := "name=redis-cluster-md5" + filter2 := "name=redis-cluster" + + // We search checker.Contains with `name+" "` to prevent prefix only. + out, err = d.Cmd("service", "ls", "--filter", filter1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Not(checker.Contains), name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls", "--filter", filter2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Contains, name3+" ") +} + +func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "inspect", "--format", "{{ .Description.Hostname }}", "self") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + name := strings.TrimSpace(out) + + filter := "name=" + name[:4] + + out, err = d.Cmd("node", "ls", "--filter", filter) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("node", "ls", "--filter", "name=none") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) + + filter := "name=redis-cluster" + + out, err = d.Cmd("node", "ps", "--filter", filter, "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("node", "ps", "--filter", "name=none", "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") +} + +// Test case for #25375 +func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { + d := s.AddDaemon(c, true, true) + + testCases := []struct { + name string + publishAdd []string + ports string + }{ + { + name: "simple-syntax", + publishAdd: []string{ + "80:80", + "80:80", + "80:80", + "80:20", + }, + ports: "[{ tcp 80 80 ingress}]", + }, + { + name: "complex-syntax", + publishAdd: []string{ + "target=90,published=90,protocol=tcp,mode=ingress", + "target=90,published=90,protocol=tcp,mode=ingress", + "target=90,published=90,protocol=tcp,mode=ingress", + "target=30,published=90,protocol=tcp,mode=ingress", + }, + ports: "[{ tcp 90 90 ingress}]", + }, + } + + for _, tc := range testCases { + out, err := d.Cmd("service", "create", "--name", tc.name, "--label", "x=y", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[0], tc.name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[1], tc.name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[2], "--publish-add", tc.publishAdd[3], tc.name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", tc.name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, tc.ports) + } +} + +func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + container := strings.TrimSpace(out) + + out, err = d.Cmd("exec", container, "id") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777") +} + +func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("run", "-id", "--restart=always", "--net=foo", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + d.Restart() + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // ping first container and its alias + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "testnet") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + networkID := strings.TrimSpace(out) + + out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") + c.Assert(err, checker.IsNil) + cID := strings.TrimSpace(out) + d.waitRun(cID) + + _, err = d.Cmd("rm", "-f", cID) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("network", "rm", "testnet") + c.Assert(err, checker.IsNil) + + checkNetwork := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls") + c.Assert(err, checker.IsNil) + return out, nil + } + + waitAndAssert(c, 3*time.Second, checkNetwork, checker.Not(checker.Contains), "testnet") +} + +func (s *DockerSwarmSuite) TestOverlayAttachable(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // validate attachable + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + // validate containers can attache to this overlay network + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // redo validation, there was a bug that the value of attachable changes after + // containers attach to the network + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create an attachable swarm network + nwName := "attovl" + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", nwName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Connect a container to the network + out, err = d.Cmd("run", "-d", "--network", nwName, "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Leave the swarm + err = d.Leave(true) + c.Assert(err, checker.IsNil) + + // Check the container is disconnected + out, err = d.Cmd("inspect", "c1", "--format", "{{.NetworkSettings.Networks."+nwName+"}}") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Check the network is gone + out, err = d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), nwName) +} + +func (s *DockerSwarmSuite) TestSwarmRemoveInternalNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "ingress" + out, err := d.Cmd("network", "rm", name) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, name) + c.Assert(strings.TrimSpace(out), checker.Contains, "is a pre-defined network and cannot be removed") +} + +// Test case for #24108, also the case from: +// https://github.com/docker/docker/pull/24620#issuecomment-233715656 +func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter := "name=redis-cluster" + + checkNumTasks := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + return len(strings.Split(out, "\n")) - 2, nil // includes header and nl in last line + } + + // wait until all tasks have been created + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 3) + + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name="+name+".1", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + name = "redis-cluster-sha1" + out, err = d.Cmd("service", "create", "--name", name, "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 1) + + filter = "name=redis-cluster" + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name="+name, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a bare container + out, err := d.Cmd("run", "-d", "--name=bare-container", "busybox", "top") + c.Assert(err, checker.IsNil) + bareID := strings.TrimSpace(out)[:12] + // Create a service + name := "busybox-top" + out, err = d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceRunningTasks(name), checker.Equals, 1) + + // Filter non-tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") + c.Assert(err, checker.IsNil) + psOut := strings.TrimSpace(out) + c.Assert(psOut, checker.Equals, bareID, check.Commentf("Expected id %s, got %s for is-task label, output %q", bareID, psOut, out)) + + // Filter tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=true") + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 1) + c.Assert(lines[0], checker.Not(checker.Equals), bareID, check.Commentf("Expected not %s, but got it for is-task label, output %q", bareID, out)) +} + +const globalNetworkPlugin = "global-network-plugin" +const globalIPAMPlugin = "global-ipam-plugin" + +func setupRemoteGlobalNetworkPlugin(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"global"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.AllocateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.FreeNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteGlobalNetworkPlugin(c, mux, s.server.URL, globalNetworkPlugin, globalIPAMPlugin) + defer func() { + s.server.Close() + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) + }() + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", globalNetworkPlugin, "foo") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "not supported in swarm mode") +} + +// Test case for #24712 +func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + path := filepath.Join(d.folder, "env.txt") + err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) + c.Assert(err, checker.IsNil) + + name := "worker" + out, err := d.Cmd("service", "create", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // The complete env is [VAR1=A VAR2=A VAR1=B VAR1=C VAR2= VAR2] and duplicates will be removed => [VAR1=C VAR2] + out, err = d.Cmd("inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.Env }}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[VAR1=C VAR2]") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + + ttyCheck := "if [ -t 0 ]; then echo TTY > /status && top; else echo none > /status && top; fi" + + // Without --tty + expectedOutput := "none" + out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + // Remove service + out, err = d.Cmd("service", "rm", name) + c.Assert(err, checker.IsNil) + // Make sure container has been destroyed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + + // With --tty + expectedOutput = "TTY" + out, err = d.Cmd("service", "create", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "false") + + _, err = d.Cmd("service", "update", "--tty", name) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, err = d.Cmd("exec", id, "cat", "/etc/resolv.conf") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.DNSConfig }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") +} + +func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + c.Assert(d.Restart(), checker.IsNil) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + cmd := d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString("wrong-secret-key") + out, err := cmd.CombinedOutput() + c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) + c.Assert(string(out), checker.Contains, "invalid key") + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err = cmd.CombinedOutput() + c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // Wait for autolock to be turned off + time.Sleep(time.Second) + + c.Assert(d.Restart(), checker.IsNil) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") +} + +func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "leave", "--force") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + outs, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { + d := s.AddDaemon(c, true, true) + + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) + + c.Assert(d.Restart(), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd := d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err := cmd.CombinedOutput() + + if err == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + c.Assert(d.Restart(), checker.IsNil) + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err = cmd.CombinedOutput() + } + c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) + c.Assert(string(out), checker.Contains, "invalid key") + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + out, err = cmd.CombinedOutput() + c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + unlockKey = newUnlockKey + } +} + +func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput := "1.2.3.4\texample.com" + out, err = d.Cmd("exec", id, "cat", "/etc/hosts") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + // Manager Addresses will always show Node 1's address + expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.port) + + out, err := d1.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d3.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +func (s *DockerSwarmSuite) TestSwarmServiceInspectPretty(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--name", name, "--limit-cpu=0.5", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + expectedOutput := ` +Resources: + Limits: + CPU: 0.5` + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") + + out, err = d.Cmd("service", "create", "--network=foo", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") +} + +// TODO: migrate to a unit test +// This test could be migrated to unit test and save costly integration test, +// once PR #29143 is merged. +func (s *DockerSwarmSuite) TestSwarmUpdateWithoutArgs(c *check.C) { + d := s.AddDaemon(c, true, true) + + expectedOutput := ` +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options:` + + out, err := d.Cmd("swarm", "update") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "trusted" + serviceCmd := d.command("-D", "service", "create", "--name", name, repoName, "top") + s.trustSuite.trustedCmd(serviceCmd) + out, _, err := runCommandWithOutput(serviceCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service create on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + name = "untrusted" + serviceCmd = d.command("service", "create", "--name", name, repoName, "top") + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "myservice" + + // Create a service without content trust + _, err := d.Cmd("service", "create", "--name", name, repoName, "top") + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Daemon won't insert the digest because this is disabled by + // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. + c.Assert(out, check.Not(checker.Contains), repoName+"@", check.Commentf(out)) + + serviceCmd := d.command("-D", "service", "update", "--image", repoName, name) + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service update on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + serviceCmd = d.command("service", "update", "--image", repoName, name) + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +// Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID. +// e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1". +func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "inspect", "-f", "{{.Id}}", "ingress") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + ingressID := strings.TrimSpace(out) + c.Assert(ingressID, checker.Not(checker.Equals), "") + + // create a network of which name is the prefix of the ID of an overlay network + // (ingressID in this case) + newNetName := ingressID[0:2] + out, err = d.Cmd("network", "create", "--driver", "overlay", newNetName) + // In #27866, it was failing because of "network with name %s already exists" + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "rm", newNetName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) +} + +// Test case for https://github.com/docker/docker/pull/27938#issuecomment-265768303 +// This test creates two networks with the same name sequentially, with various drivers. +// Since the operations in this test are done sequentially, the 2nd call should fail with +// "network with name FOO already exists". +// Note that it is to ok have multiple networks with the same name if the operations are done +// in parallel. (#18864) +func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *check.C) { + d := s.AddDaemon(c, true, true) + drivers := []string{"bridge", "overlay"} + for i, driver1 := range drivers { + nwName := fmt.Sprintf("network-test-%d", i) + for _, driver2 := range drivers { + c.Logf("Creating a network named %q with %q, then %q", + nwName, driver1, driver2) + out, err := d.Cmd("network", "create", "--driver", driver1, nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "create", "--driver", driver2, nwName) + c.Assert(out, checker.Contains, + fmt.Sprintf("network with name %s already exists", nwName)) + c.Assert(err, checker.NotNil) + c.Logf("As expected, the attempt to network %q with %q failed: %s", + nwName, driver2, out) + out, err = d.Cmd("network", "rm", nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go new file mode 100644 index 0000000..d9e56ce --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go @@ -0,0 +1,52 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure task stays pending before plugin is available + waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceTasksInState("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1) + + plugin := newVolumePlugin(c, "customvolumedriver") + defer plugin.Close() + + // create a dummy volume to trigger lazy loading of the plugin + out, err = d.Cmd("volume", "create", "-d", "customvolumedriver", "hello") + + // TODO(aaronl): It will take about 15 seconds for swarm to realize the + // plugin was loaded. Switching the test over to plugin v2 would avoid + // this long delay. + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "-f", "{{json .Mounts}}", containerID) + c.Assert(err, checker.IsNil) + + var mounts []struct { + Name string + Driver string + } + + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "my-volume") + c.Assert(mounts[0].Driver, checker.Equals, "customvolumedriver") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go new file mode 100644 index 0000000..b7d2b1d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,225 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +// tagging a named image in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { + imageID := inspectField(c, "busybox", "Id") + dockerCmd(c, "tag", imageID, "testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} + + for _, repo := range invalidRepos { + out, _, err := dockerCmdWithError("tag", "busybox", repo) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) + } +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { + longTag := stringutils.GenerateRandomAlphaOnlyString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} + + for _, repotag := range invalidTags { + out, _, err := dockerCmdWithError("tag", "busybox", repotag) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) + } +} + +// ensure we allow the use of valid tags +func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} + + for _, repo := range validRepos { + _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) + if err != nil { + c.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + } +} + +// tag an image with an existed tag name without -f option should work +func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + dockerCmd(c, "tag", "busybox:latest", "busybox:test") +} + +func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + // test repository name begin with '-' + out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test namespace name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test index name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) +} + +// ensure tagging using official names works +// ensure all tags result in the same name +func (s *DockerSuite) TestTagOfficialNames(c *check.C) { + names := []string{ + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + + for _, name := range names { + out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") + if err != nil || exitCode != 0 { + c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) + continue + } + + // ensure we don't have multiple tag names. + out, _, err = dockerCmdWithError("images") + if err != nil { + c.Errorf("listing images failed with errors: %v, %s", err, out) + } else if strings.Contains(out, name) { + c.Errorf("images should not have listed '%s'", name) + deleteImages(name + ":latest") + } + } + + for _, name := range names { + _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") + if err != nil || exitCode != 0 { + c.Errorf("tag %v fooo/bar should have worked: %s", name, err) + continue + } + deleteImages("fooo/bar:latest") + } +} + +// ensure tags can not match digests +func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) + if err == nil { + c.Fatal("digest tag a name should have failed") + } + // check that no new image matches the digest + _, _, err = dockerCmdWithError("inspect", digest) + if err == nil { + c.Fatal("inspecting by digest should have failed") + } +} + +func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") + if err == nil { + c.Fatal("tagging with image named \"sha256\" should have failed") + } +} + +// ensure tags cannot create ambiguity with image ids +func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { + //testRequires(c, DaemonIsLinux) + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + imageID, err := buildImage("notbusybox:latest", + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) + + id := inspectField(c, truncatedTag, "Id") + + // Ensure inspect by image id returns image for image id + c.Assert(id, checker.Equals, imageID) + c.Logf("Built image: %s", imageID) + + // test setting tag fails + _, _, err = dockerCmdWithError("tag", "busybox:latest", truncatedTag) + if err != nil { + c.Fatalf("Error tagging with an image id: %s", err) + } + + id = inspectField(c, truncatedTag, "Id") + + // Ensure id is imageID and not busybox:latest + c.Assert(id, checker.Not(checker.Equals), imageID) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go new file mode 100644 index 0000000..caae290 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + var expected icmd.Expected + switch daemonPlatform { + case "windows": + expected = icmd.Expected{ExitCode: 1, Err: "Windows does not support arguments to top"} + default: + expected = icmd.Expected{Out: "PID"} + } + result := dockerCmdWithResult("top", cleanedContainerID, "-o", "pid") + c.Assert(result, icmd.Matches, expected) +} + +func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + // Windows will list the name of the launched executable which in this case is busybox.exe, without the parameters. + // Linux will display the command executed in the container + var lookingFor string + if daemonPlatform == "windows" { + lookingFor = "busybox.exe" + } else { + lookingFor = "top" + } + + c.Assert(out1, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the first time", lookingFor)) + c.Assert(out2, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the second time", lookingFor)) +} + +// TestTopWindowsCoreProcesses validates that there are lines for the critical +// processes which are found in a Windows container. Note Windows is architecturally +// very different to Linux in this regard. +func (s *DockerSuite) TestTopWindowsCoreProcesses(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + out1, _ := dockerCmd(c, "top", cleanedContainerID) + lookingFor := []string{"smss.exe", "csrss.exe", "wininit.exe", "services.exe", "lsass.exe", "CExecSvc.exe"} + for i, s := range lookingFor { + c.Assert(out1, checker.Contains, s, check.Commentf("top should've listed `%s` in the process list, but failed. Test case %d", s, i)) + } +} + +func (s *DockerSuite) TestTopPrivileged(c *check.C) { + // Windows does not support --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) + c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go new file mode 100644 index 0000000..0b31bb4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go @@ -0,0 +1,41 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false") + timeout := 60 * time.Second + if daemonPlatform == "windows" { + timeout = 180 * time.Second + } + + id := strings.TrimSpace(string(out)) + + // update restart policy to on-failure:5 + dockerCmd(c, "update", "--restart=on-failure:5", id) + + err := waitExited(id, timeout) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "5") + + maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(maximumRetryCount, checker.Equals, "5") +} + +func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { + out, _ := runSleepingContainer(c, "--rm") + id := strings.TrimSpace(out) + + // update restart policy for an AutoRemove container + out, _, err := dockerCmdWithError("update", "--restart=always", id) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Restart policy cannot be updated because AutoRemove is enabled for the container") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go new file mode 100644 index 0000000..580ff02 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "github.com/kr/pty" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateRunningContainerWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + dockerCmd(c, "restart", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + dockerCmd(c, "run", "--name", name, "-m", "300M", "busybox", "cat", file) + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + out, _ := dockerCmd(c, "start", "-a", name) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdatePausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top") + dockerCmd(c, "pause", name) + dockerCmd(c, "update", "--cpu-shares", "500", name) + + c.Assert(inspectField(c, name, "HostConfig.CPUShares"), checker.Equals, "500") + + dockerCmd(c, "unpause", name) + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "500") +} + +func (s *DockerSuite) TestUpdateWithUntouchedFields(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + // Update memory and not touch cpus, `cpuset.cpus` should still have the old value + out := inspectField(c, name, "HostConfig.CPUShares") + c.Assert(out, check.Equals, "800") + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "800") +} + +func (s *DockerSuite) TestUpdateContainerInvalidValue(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + out, _, err := dockerCmdWithError("update", "-m", "2M", name) + c.Assert(err, check.NotNil) + expected := "Minimum memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestUpdateContainerWithoutFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + _, _, err := dockerCmdWithError("update", name) + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--kernel-memory", "50M", "busybox", "top") + dockerCmd(c, "update", "--kernel-memory", "100M", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "104857600") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "104857600") +} + +func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + isNewKernel := kernel.CheckKernelVersion(4, 6, 0) + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) + // Update kernel memory to a running container without kernel memory initialized + // is not allowed before kernel version 4.6. + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "pause", name) + _, _, err = dockerCmdWithError("update", "--kernel-memory", "200M", name) + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + dockerCmd(c, "unpause", name) + + dockerCmd(c, "stop", name) + dockerCmd(c, "update", "--kernel-memory", "300M", name) + dockerCmd(c, "start", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "314572800") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") +} + +func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateInvalidSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + _, _, err := dockerCmdWithError("update", "--memory-swap", "200M", name) + // Update invalid swap memory should fail. + // This will pass docker config validation, but failed at kernel validation + c.Assert(err, check.NotNil) + + // Update invalid swap memory with failure should not change HostConfig + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateStats(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuCfsQuota) + name := "foo" + dockerCmd(c, "run", "-d", "-ti", "--name", name, "-m", "500m", "busybox") + + c.Assert(waitRun(name), checker.IsNil) + + getMemLimit := func(id string) uint64 { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + return v.MemoryStats.Limit + } + preMemLimit := getMemLimit(name) + + dockerCmd(c, "update", "--cpu-quota", "2000", name) + + curMemLimit := getMemLimit(name) + + c.Assert(preMemLimit, checker.Equals, curMemLimit) + +} + +func (s *DockerSuite) TestUpdateMemoryWithSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "busybox", "top") + out, _, err := dockerCmdWithError("update", "--memory", "800M", name) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Memory limit should be smaller than already set memoryswap limit") + + dockerCmd(c, "update", "--memory", "800M", "--memory-swap", "1000M", name) +} + +func (s *DockerSuite) TestUpdateNotAffectMonitorRestartPolicy(c *check.C) { + testRequires(c, DaemonIsLinux, cpuShare) + + out, _ := dockerCmd(c, "run", "-tid", "--restart=always", "busybox", "sh") + id := strings.TrimSpace(string(out)) + dockerCmd(c, "update", "--cpu-shares", "512", id) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + _, err = cpty.Write([]byte("exit\n")) + c.Assert(err, checker.IsNil) + + c.Assert(cmd.Wait(), checker.IsNil) + + // container should restart again and keep running + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(id), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go new file mode 100644 index 0000000..acf7423 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go @@ -0,0 +1,98 @@ +// +build !windows + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +// user namespaces test: run daemon with remapped root setting +// 1. validate uid/gid maps are set properly +// 2. verify that files created are owned by remapped root +func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel) + + c.Assert(s.d.StartWithBusybox("--userns-remap", "default"), checker.IsNil) + + tmpDir, err := ioutil.TempDir("", "userns") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Set a non-existent path + tmpDirNotExists := path.Join(os.TempDir(), "userns"+stringid.GenerateRandomID()) + defer os.RemoveAll(tmpDirNotExists) + + // we need to find the uid and gid of the remapped root from the daemon's root dir info + uidgid := strings.Split(filepath.Base(s.d.root), ".") + c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) + uid, err := strconv.Atoi(uidgid[0]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) + gid, err := strconv.Atoi(uidgid[1]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse gid")) + + // writable by the remapped root UID/GID pair + c.Assert(os.Chown(tmpDir, uid, gid), checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "userns", "-v", tmpDir+":/goofy", "-v", tmpDirNotExists+":/donald", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user := s.findUser(c, "userns") + c.Assert(uidgid[0], checker.Equals, user) + + // check that the created directory is owned by remapped uid:gid + statNotExists, err := system.Stat(tmpDirNotExists) + c.Assert(err, checker.IsNil) + c.Assert(statNotExists.UID(), checker.Equals, uint32(uid), check.Commentf("Created directory not owned by remapped root UID")) + c.Assert(statNotExists.GID(), checker.Equals, uint32(gid), check.Commentf("Created directory not owned by remapped root GID")) + + pid, err := s.d.Cmd("inspect", "--format={{.State.Pid}}", "userns") + c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) + // check the uid and gid maps for the PID to ensure root is remapped + // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') + out, rc1, err := runCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) + c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) + + out, rc2, err := runCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) + c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) + + // check that the touched file is owned by remapped uid:gid + stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Touched file not owned by remapped root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Touched file not owned by remapped root GID")) + + // use host usernamespace + out, err = s.d.Cmd("run", "-d", "--name", "userns_skip", "--userns", "host", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user = s.findUser(c, "userns_skip") + // userns are skipped, user is root + c.Assert(user, checker.Equals, "root") +} + +// findUser finds the uid or name of the user of the first process that runs in a container +func (s *DockerDaemonSuite) findUser(c *check.C, container string) string { + out, err := s.d.Cmd("top", container) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + rows := strings.Split(out, "\n") + if len(rows) < 2 { + // No process rows founds + c.FailNow() + } + return strings.Fields(rows[1])[0] +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go new file mode 100644 index 0000000..889936a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go @@ -0,0 +1,125 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/go-check/check" +) + +func makefile(contents string) (string, func(), error) { + cleanup := func() { + + } + + f, err := ioutil.TempFile(".", "tmp") + if err != nil { + return "", cleanup, err + } + err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) + if err != nil { + return "", cleanup, err + } + + cleanup = func() { + err := os.Remove(f.Name()) + if err != nil { + fmt.Println("Error removing tmpfile") + } + } + return f.Name(), cleanup, nil + +} + +// TestV2Only ensures that a daemon in v2-only mode does not +// attempt to contact any v1 registry endpoints. +func (s *DockerRegistrySuite) TestV2Only(c *check.C) { + reg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + + reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { + c.Fatal("V1 registry contacted") + }) + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + + s.d.Cmd("run", repoName) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + s.d.Cmd("pull", repoName) +} + +// TestV1 starts a daemon in 'normal' mode +// and ensure v1 endpoints are hit for the following operations: +// login, push, pull, build & run +func (s *DockerRegistrySuite) TestV1(c *check.C) { + reg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + + v2Pings := 0 + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + v2Pings++ + // V2 ping 404 causes fallback to v1 + w.WriteHeader(404) + }) + + v1Pings := 0 + reg.registerHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { + v1Pings++ + }) + + v1Logins := 0 + reg.registerHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { + v1Logins++ + }) + + v1Repo := 0 + reg.registerHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + reg.registerHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + s.d.Cmd("run", repoName) + c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run")) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.hostport) + c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt")) + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + + c.Assert(v1Repo, check.Equals, 2) + + s.d.Cmd("pull", repoName) + c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go new file mode 100644 index 0000000..7672beb --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go @@ -0,0 +1,58 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure docker version works +func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "version") + stringsToCheck := map[string]int{ + "Client:": 1, + "Server:": 1, + " Version:": 2, + " API version:": 2, + " Go version:": 2, + " Git commit:": 2, + " OS/Arch:": 2, + " Built:": 2, + } + + for k, v := range stringsToCheck { + c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) + } +} + +// ensure the Windows daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { + testRequires(c, DaemonIsWindows) + testVersionPlatform(c, "windows/amd64") +} + +// ensure the Linux daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { + testRequires(c, DaemonIsLinux) + testVersionPlatform(c, "linux") +} + +func testVersionPlatform(c *check.C, platform string) { + out, _ := dockerCmd(c, "version") + expected := "OS/Arch: " + platform + + split := strings.Split(out, "\n") + c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) + + // Verify the second 'OS/Arch' matches the platform. Experimental has + // more lines of output than 'regular' + bFound := false + for i := 14; i < len(split); i++ { + if strings.Contains(split[i], expected) { + bFound = true + break + } + } + c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go new file mode 100644 index 0000000..61a9413 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go @@ -0,0 +1,427 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { + dockerCmd(c, "volume", "create") + + _, err := runCommand(exec.Command(dockerBinary, "volume", "create", "-d", "nosuchdriver")) + c.Assert(err, check.Not(check.IsNil)) + + // test using hidden --name option + out, _ := dockerCmd(c, "volume", "create", "--name=test") + name := strings.TrimSpace(out) + c.Assert(name, check.Equals, "test") + + out, _ = dockerCmd(c, "volume", "create", "test2") + name = strings.TrimSpace(out) + c.Assert(name, check.Equals, "test2") +} + +func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { + c.Assert( + exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume inspect should error on non-existent volume"), + ) + + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", name) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + dockerCmd(c, "volume", "create", "test") + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", "test") + c.Assert(strings.TrimSpace(out), check.Equals, "test") +} + +func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { + dockerCmd(c, "volume", "create", "test1") + dockerCmd(c, "volume", "create", "test2") + dockerCmd(c, "volume", "create", "not-shown") + + result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesntexist", "not-shown") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such volume: doesntexist", + }) + + out := result.Stdout() + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 2, check.Commentf("\n%s", out)) + + c.Assert(out, checker.Contains, "test1") + c.Assert(out, checker.Contains, "test2") + c.Assert(out, checker.Not(checker.Contains), "not-shown") +} + +func (s *DockerSuite) TestVolumeCLILs(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "aaa") + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "volume", "create", "soo") + dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") + + out, _ := dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + assertVolList(c, out, []string{"aaa", "soo", "test"}) +} + +func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa", "soo", "test"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + config := `{ + "volumesFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "volume", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa default", "soo default", "test default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// assertVolList checks volume retrieved with ls command +// equals to expected volume list +// note: out should be `volume ls [option]` result +func assertVolList(c *check.C, out string, expectVols []string) { + lines := strings.Split(out, "\n") + var volList []string + for _, line := range lines[1 : len(lines)-1] { + volFields := strings.Fields(line) + // wrap all volume name in volList + volList = append(volList, volFields[1]) + } + + // volume ls should contains all expected volumes + c.Assert(volList, checker.DeepEquals, expectVols) +} + +func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "testnotinuse1") + dockerCmd(c, "volume", "create", "testisinuse1") + dockerCmd(c, "volume", "create", "testisinuse2") + + // Make sure both "created" (but not started), and started + // containers are included in reference counting + dockerCmd(c, "run", "--name", "volume-test1", "-v", "testisinuse1:"+prefix+"/foo", "busybox", "true") + dockerCmd(c, "create", "--name", "volume-test2", "-v", "testisinuse2:"+prefix+"/foo", "busybox", "true") + + out, _ := dockerCmd(c, "volume", "ls") + + // No filter, all volumes should show + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") + + // Explicitly disabling dangling + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") + + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") + // dangling=0 is same as dangling=false case + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("execpeted volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invalidDriver") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loc") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + +} + +func (s *DockerSuite) TestVolumeCLILsErrorWithInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLILsWithIncorrectFilterValue(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "rm", id) + dockerCmd(c, "volume", "rm", "test") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + volumeID := "testing" + dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "volume", "rm", "testing")) + c.Assert( + err, + check.Not(check.IsNil), + check.Commentf("Should not be able to remove volume that is in use by a container\n%s", out)) + + out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + dockerCmd(c, "rm", "-fv", "test2") + dockerCmd(c, "volume", "inspect", volumeID) + dockerCmd(c, "rm", "-f", "test") + + out, _ = dockerCmd(c, "run", "--name=test2", "-v", volumeID+":"+prefix+"/foo", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello", check.Commentf("volume data was removed")) + dockerCmd(c, "rm", "test2") + + dockerCmd(c, "volume", "rm", volumeID) + c.Assert( + exec.Command("volume", "rm", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume rm should fail with non-existent volume"), + ) +} + +func (s *DockerSuite) TestVolumeCLINoArgs(c *check.C) { + out, _ := dockerCmd(c, "volume") + // no args should produce the cmd usage output + usage := "Usage: docker volume COMMAND" + c.Assert(out, checker.Contains, usage) + + // invalid arg should error and show the command usage on stderr + _, stderr, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "somearg")) + c.Assert(err, check.NotNil, check.Commentf(stderr)) + c.Assert(stderr, checker.Contains, usage) + + // invalid flag should error and show the flag error and cmd usage + _, stderr, _, err = runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "--no-such-flag")) + c.Assert(err, check.NotNil, check.Commentf(stderr)) + c.Assert(stderr, checker.Contains, usage) + c.Assert(stderr, checker.Contains, "unknown flag: --no-such-flag") +} + +func (s *DockerSuite) TestVolumeCLIInspectTmplError(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + + out, exitCode, err := dockerCmdWithError("volume", "inspect", "--format='{{ .FooBar }}'", name) + c.Assert(err, checker.NotNil, check.Commentf("Output: %s", out)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestVolumeCLICreateWithOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "volume", "create", "-d", "local", "test", "--opt=type=tmpfs", "--opt=device=tmpfs", "--opt=o=size=1m,uid=1000") + out, _ := dockerCmd(c, "run", "-v", "test:/foo", "busybox", "mount") + + mounts := strings.Split(out, "\n") + var found bool + for _, m := range mounts { + if strings.Contains(m, "/foo") { + found = true + info := strings.Fields(m) + // tmpfs on type tmpfs (rw,relatime,size=1024k,uid=1000) + c.Assert(info[0], checker.Equals, "tmpfs") + c.Assert(info[2], checker.Equals, "/foo") + c.Assert(info[4], checker.Equals, "tmpfs") + c.Assert(info[5], checker.Contains, "uid=1000") + c.Assert(info[5], checker.Contains, "size=1024k") + } + } + c.Assert(found, checker.Equals, true) +} + +func (s *DockerSuite) TestVolumeCLICreateLabel(c *check.C) { + testVol := "testvolcreatelabel" + testLabel := "foo" + testValue := "bar" + + out, _, err := dockerCmdWithError("volume", "create", "--label", testLabel+"="+testValue, testVol) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+testLabel+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) +} + +func (s *DockerSuite) TestVolumeCLICreateLabelMultiple(c *check.C) { + testVol := "testvolcreatelabel" + + testLabels := map[string]string{ + "foo": "bar", + "baz": "foo", + } + + args := []string{ + "volume", + "create", + testVol, + } + + for k, v := range testLabels { + args = append(args, "--label", k+"="+v) + } + + out, _, err := dockerCmdWithError(args...) + c.Assert(err, check.IsNil) + + for k, v := range testLabels { + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+k+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, v) + } +} + +func (s *DockerSuite) TestVolumeCLILsFilterLabels(c *check.C) { + testVol1 := "testvolcreatelabel-1" + out, _, err := dockerCmdWithError("volume", "create", "--label", "foo=bar1", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvolcreatelabel-2" + out, _, err = dockerCmdWithError("volume", "create", "--label", "foo=bar2", testVol2) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo") + + // filter with label=key + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, checker.Contains, "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=bar1") + + // filter with label=key=value + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, check.Not(checker.Contains), "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2 in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=non-exist") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=non-exist") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForceUsage(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "rm", "-f", id) + dockerCmd(c, "volume", "rm", "--force", "nonexist") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + out, _ = dockerCmd(c, "volume", "inspect", "--format", "{{.Mountpoint}}", name) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + // Mountpoint is in the form of "/var/lib/docker/volumes/.../_data", removing `/_data` + path := strings.TrimSuffix(strings.TrimSpace(out), "/_data") + out, _, err := runCommandWithOutput(exec.Command("rm", "-rf", path)) + c.Assert(err, check.IsNil) + + dockerCmd(c, "volume", "rm", "-f", "test") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Not(checker.Contains), name) + dockerCmd(c, "volume", "create", "test") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) +} + +func (s *DockerSuite) TestVolumeCliInspectWithVolumeOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Without options + name := "test1" + dockerCmd(c, "volume", "create", "-d", "local", name) + out, _ := dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, "map[]") + + // With options + name = "test2" + k1, v1 := "type", "tmpfs" + k2, v2 := "device", "tmpfs" + k3, v3 := "o", "size=1m,uid=1000" + dockerCmd(c, "volume", "create", "-d", "local", name, "--opt", fmt.Sprintf("%s=%s", k1, v1), "--opt", fmt.Sprintf("%s=%s", k2, v2), "--opt", fmt.Sprintf("%s=%s", k3, v3)) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k1, v1)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k2, v2)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k3, v3)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go new file mode 100644 index 0000000..961aef5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go @@ -0,0 +1,97 @@ +package main + +import ( + "bytes" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// non-blocking wait with 0 exit code +func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "0", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with 0 exit code +func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { + // Windows busybox does not support trap in this way, not sleep with sub-second + // granularity. It will always exit 0x40010004. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan string) + go func() { + chWait <- "" + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) + chWait <- out + }() + + <-chWait // make sure the goroutine is started + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) + + select { + case status := <-chWait: + c.Assert(strings.TrimSpace(status), checker.Equals, "0", check.Commentf("expected exit 0, got %s", status)) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") + } + +} + +// non-blocking wait with random exit code +func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "99", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with random exit code +func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { + // Cannot run on Windows as trap in Windows busybox does not support trap in this way. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan error) + waitCmd := exec.Command(dockerBinary, "wait", containerID) + waitCmdOut := bytes.NewBuffer(nil) + waitCmd.Stdout = waitCmdOut + c.Assert(waitCmd.Start(), checker.IsNil) + go func() { + chWait <- waitCmd.Wait() + }() + + dockerCmd(c, "stop", containerID) + + select { + case err := <-chWait: + c.Assert(err, checker.IsNil, check.Commentf(waitCmdOut.String())) + status, err := waitCmdOut.ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(status), checker.Equals, "99", check.Commentf("expected exit 99, got %s", status)) + case <-time.After(2 * time.Second): + waitCmd.Process.Kill() + c.Fatal("timeout waiting for `docker wait` to exit") + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go new file mode 100644 index 0000000..7bc287e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go @@ -0,0 +1,227 @@ +// This file will be removed when we completely drop support for +// passing HostConfig to container start API. + +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func formatV123StartAPIURL(url string) string { + return "/v1.23" + url +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartHostConfig(c *check.C) { + name := "test-deprecated-api-124" + dockerCmd(c, "create", "--name", name, "busybox") + config := map[string]interface{}{ + "Binds": []string{"/aa:/bb"}, + } + status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(body), checker.Contains, "was deprecated since v1.10") +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumeBinds(c *check.C) { + // TODO Windows CI: Investigate further why this fails on Windows to Windows CI. + testRequires(c, DaemonIsLinux) + path := "/foo" + if daemonPlatform == "windows" { + path = `c:\foo` + } + name := "testing" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{path: {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath := randomTmpDirPath("test", daemonPlatform) + config = map[string]interface{}{ + "Binds": []string{bindPath + ":" + path}, + } + status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, path) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, bindPath, check.Commentf("expected volume host path to be %s, got %s", bindPath, pth)) +} + +// Test for GH#10618 +func (s *DockerSuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + name := "testdups" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath1 := randomTmpDirPath("test1", daemonPlatform) + bindPath2 := randomTmpDirPath("test2", daemonPlatform) + + config = map[string]interface{}{ + "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, + } + status, body, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumesFrom(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + volName := "voltst" + volPath := "/tmp" + + dockerCmd(c, "run", "--name", volName, "-v", volPath, "busybox") + + name := "TestContainerAPIStartVolumesFrom" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{volPath: {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config = map[string]interface{}{ + "VolumesFrom": []string{volName}, + } + status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, volPath) + c.Assert(err, checker.IsNil) + pth2, err := inspectMountSourceField(volName, volPath) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, pth2, check.Commentf("expected volume host path to be %s, got %s", pth, pth2)) +} + +// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume +func (s *DockerSuite) TestDeprecatedPostContainerBindNormalVolume(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") + + fooDir, err := inspectMountSourceField("one", "/foo") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") + + bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/two/start"), bindSpec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + fooDir2, err := inspectMountSourceField("two", "/foo") + c.Assert(err, checker.IsNil) + c.Assert(fooDir2, checker.Equals, fooDir, check.Commentf("expected volume path to be %s, got: %s", fooDir, fooDir2)) +} + +func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + + containerID := strings.TrimSpace(out) + + config := `{ + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + b, err2 := readBody(body) + c.Assert(err2, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithoutLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, append([]string{"create", "--name", name, "busybox"}, sleepCommandForDaemonPlatform()...)...) + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") + dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") + id := strings.TrimSpace(out) + dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +func (s *DockerSuite) TestDeprecatedStartWithNilDNS(c *check.C) { + // TODO Windows: Add once DNS is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + containerID := strings.TrimSpace(out) + + config := `{"HostConfig": {"Dns": null}}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() + + dns := inspectFieldJSON(c, containerID, "HostConfig.Dns") + c.Assert(dns, checker.Equals, "[]") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go new file mode 100644 index 0000000..94ef9b1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// #19100 This is a deprecated feature test, it should be removed in Docker 1.12 +func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c *check.C) { + netName := "test" + conName := "foo" + dockerCmd(c, "network", "create", netName) + dockerCmd(c, "create", "--name", conName, "busybox", "top") + + config := map[string]interface{}{ + "HostConfig": map[string]interface{}{ + "NetworkMode": netName, + }, + } + _, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+conName+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(conName), checker.IsNil) + networks := inspectField(c, conName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go b/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go new file mode 100644 index 0000000..85dec31 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go @@ -0,0 +1,594 @@ +// +build !windows + +package main + +import ( + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" +) + +var ( + MacvlanKernelSupport = testRequirement{ + func() bool { + const macvlanKernelVer = 3 // minimum macvlan kernel support + const macvlanMajorVer = 9 // minimum macvlan major kernel support + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + // ensure Kernel version is >= v3.9 for macvlan support + if kv.Kernel < macvlanKernelVer || (kv.Kernel == macvlanKernelVer && kv.Major < macvlanMajorVer) { + return false + } + return true + }, + "kernel version failed to meet the minimum macvlan kernel requirement of 3.9", + } + IpvlanKernelSupport = testRequirement{ + func() bool { + const ipvlanKernelVer = 4 // minimum ipvlan kernel support + const ipvlanMajorVer = 2 // minimum ipvlan major kernel support + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + // ensure Kernel version is >= v4.2 for ipvlan support + if kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) { + return false + } + return true + }, + "kernel version failed to meet the minimum ipvlan kernel requirement of 4.0.0", + } +) + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist") + assertNwIsAvailable(c, "dm-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + // verify network is recreated from persistence + assertNwIsAvailable(c, "dm-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.70) + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'di' notation represent 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist") + assertNwIsAvailable(c, "di-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + // verify network is recreated from persistence + assertNwIsAvailable(c, "di-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.50) + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.50", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.50) + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.60", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, master, "dm-dummy0.40", "40") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err = dockerCmdWithError("network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, master, "di-dummy0.30", "30") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err = dockerCmdWithError("network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { + // create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254", + "--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackbridge") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + c.Skip("Temporarily skipping while invesitigating sporadic v6 CI issues") + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.100.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc2::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.102.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc4::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254", + "--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl2") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.200.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc8::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.202.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc6::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, IPv6, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254", + "--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl3") + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // Verify connectivity across disparate subnets which is unique to L3 mode only + _, _, err = dockerCmdWithError("exec", "third", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "third", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "") + // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled + ip6gw := inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) { + // Ensure the default gateways, next-hops and default dev devices are properly set + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24", + "--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge") + assertNwIsAvailable(c, "dualstackbridge") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top") + // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err := dockerCmdWithError("exec", "first", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.130.1 dev eth0") + // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "first", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abca::254 dev eth0") + + // Verify ipvlan l2 mode sets the proper default gateway routes via netlink + // for either an explicitly set route by the user or inferred via default IPAM + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254", + "--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2") + assertNwIsAvailable(c, "dualstackl2") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top") + // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err = dockerCmdWithError("exec", "second", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.140.254 dev eth0") + // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "second", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0") + + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254", + "--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3") + assertNwIsAvailable(c, "dualstackl3") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top") + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") + // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { + // macvlan bridge mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "dm-nil-parent") + assertNwIsAvailable(c, "dm-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { + // macvlan bridge mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--internal", "dm-internal") + assertNwIsAvailable(c, "dm-internal") + nr := getNetworkResource(c, "dm-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { + // ipvlan l2 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "di-nil-parent") + assertNwIsAvailable(c, "di-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { + // ipvlan l2 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--internal", "di-internal") + assertNwIsAvailable(c, "di-internal") + nr := getNetworkResource(c, "di-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { + // ipvlan l3 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "di-nil-parent-l3") + assertNwIsAvailable(c, "di-nil-parent-l3") + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { + // ipvlan l3 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "--internal", "di-internal-l3") + assertNwIsAvailable(c, "di-internal-l3") + nr := getNetworkResource(c, "di-internal-l3") + c.Assert(nr.Internal, checker.True) + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanExistingParent(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-parent-exists" + out, err := createMasterDummy(c, "dm-dummy0") + //out, err := createVlanInterface(c, "dm-parent", "dm-slave", "macvlan", "bridge") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0", netName) + assertNwIsAvailable(c, netName) + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined link + out, err = linkExists(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) + deleteInterface(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-subinterface" + out, err := createMasterDummy(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, "dm-dummy0", "dm-dummy0.20", "20") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.20", netName) + assertNwIsAvailable(c, netName) + + // start containers on 802.1q tagged '-o parent' sub-interface + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // verify containers can communicate + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + + // remove the containers + dockerCmd(c, "rm", "-f", "first") + dockerCmd(c, "rm", "-f", "second") + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined sub-interface + out, err = linkExists(c, "dm-dummy0.20") + c.Assert(err, check.IsNil, check.Commentf(out)) + // delete the parent interface which also collects the slave + deleteInterface(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func createMasterDummy(c *check.C, master string) (string, error) { + // ip link add type dummy + args := []string{"link", "add", master, "type", "dummy"} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + // ip link set dummy_name up + args = []string{"link", "set", master, "up"} + ipLinkCmd = exec.Command("ip", args...) + out, _, err = runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} + +func createVlanInterface(c *check.C, master, slave, id string) (string, error) { + // ip link add link name . type vlan id + args := []string{"link", "add", "link", master, "name", slave, "type", "vlan", "id", id} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + // ip link set up + args = []string{"link", "set", slave, "up"} + ipLinkCmd = exec.Command("ip", args...) + out, _, err = runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} + +func linkExists(c *check.C, master string) (string, error) { + // verify the specified link exists, ip link show + args := []string{"link", "show", master} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go b/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go new file mode 100644 index 0000000..df52cae --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go @@ -0,0 +1,90 @@ +package main + +import ( + "os/exec" + "runtime" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func init() { + // FIXME. Temporarily turning this off for Windows as GH16039 was breaking + // Windows to Linux CI @icecrime + if runtime.GOOS != "windows" { + check.Suite(newDockerHubPullSuite()) + } +} + +// DockerHubPullSuite provides an isolated daemon that doesn't have all the +// images that are baked into our 'global' test environment daemon (e.g., +// busybox, httpserver, ...). +// +// We use it for push/pull tests where we want to start fresh, and measure the +// relative impact of each individual operation. As part of this suite, all +// images are removed after each test. +type DockerHubPullSuite struct { + d *Daemon + ds *DockerSuite +} + +// newDockerHubPullSuite returns a new instance of a DockerHubPullSuite. +func newDockerHubPullSuite() *DockerHubPullSuite { + return &DockerHubPullSuite{ + ds: &DockerSuite{}, + } +} + +// SetUpSuite starts the suite daemon. +func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = NewDaemon(c) + err := s.d.Start() + c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) +} + +// TearDownSuite stops the suite daemon. +func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { + if s.d != nil { + err := s.d.Stop() + c.Assert(err, checker.IsNil, check.Commentf("stopping push/pull test daemon: %v", err)) + } +} + +// SetUpTest declares that all tests of this suite require network. +func (s *DockerHubPullSuite) SetUpTest(c *check.C) { + testRequires(c, Network) +} + +// TearDownTest removes all images from the suite daemon. +func (s *DockerHubPullSuite) TearDownTest(c *check.C) { + out := s.Cmd(c, "images", "-aq") + images := strings.Split(out, "\n") + images = append([]string{"rmi", "-f"}, images...) + s.d.Cmd(images...) + s.ds.TearDownTest(c) +} + +// Cmd executes a command against the suite daemon and returns the combined +// output. The function fails the test when the command returns an error. +func (s *DockerHubPullSuite) Cmd(c *check.C, name string, arg ...string) string { + out, err := s.CmdWithError(name, arg...) + c.Assert(err, checker.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(arg, " "), out, err)) + return out +} + +// CmdWithError executes a command against the suite daemon and returns the +// combined output as well as any error. +func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, error) { + c := s.MakeCmd(name, arg...) + b, err := c.CombinedOutput() + return string(b), err +} + +// MakeCmd returns an exec.Cmd command to run against the suite daemon. +func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { + args := []string{"--host", s.d.sock(), name} + args = append(args, arg...) + return exec.Command(dockerBinary, args...) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go b/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go new file mode 100644 index 0000000..3559bfd --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go @@ -0,0 +1,165 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/reexec" +) + +var ( + // the docker client binary to use + dockerBinary = "docker" + // the docker daemon binary to use + dockerdBinary = "dockerd" + + // path to containerd's ctr binary + ctrBinary = "docker-containerd-ctr" + + // the private registry image to use for tests involving the registry + registryImageName = "registry" + + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + // TODO Windows CI. These are incorrect and need fixing into + // platform specific pieces. + runtimePath = "/var/run/docker" + + workingDirectory string + + // isLocalDaemon is true if the daemon under test is on the same + // host as the CLI. + isLocalDaemon bool + + // daemonPlatform is held globally so that tests can make intelligent + // decisions on how to configure themselves according to the platform + // of the daemon. This is initialized in docker_utils by sending + // a version call to the daemon and examining the response header. + daemonPlatform string + + // windowsDaemonKV is used on Windows to distinguish between different + // versions. This is necessary to enable certain tests based on whether + // the platform supports it. For example, Windows Server 2016 TP3 did + // not support volumes, but TP4 did. + windowsDaemonKV int + + // daemonDefaultImage is the name of the default image to use when running + // tests. This is platform dependent. + daemonDefaultImage string + + // For a local daemon on Linux, these values will be used for testing + // user namespace support as the standard graph path(s) will be + // appended with the root remapped uid.gid prefix + dockerBasePath string + volumesConfigPath string + containerStoragePath string + + // experimentalDaemon tell whether the main daemon has + // experimental features enabled or not + experimentalDaemon bool + + // daemonStorageDriver is held globally so that tests can know the storage + // driver of the daemon. This is initialized in docker_utils by sending + // a version call to the daemon and examining the response header. + daemonStorageDriver string + + // WindowsBaseImage is the name of the base image for Windows testing + // Environment variable WINDOWS_BASE_IMAGE can override this + WindowsBaseImage = "microsoft/windowsservercore" + + // isolation is the isolation mode of the daemon under test + isolation container.Isolation + + // daemonPid is the pid of the main test daemon + daemonPid int + + daemonKernelVersion string +) + +const ( + // DefaultImage is the name of the base image for the majority of tests that + // are run across suites + DefaultImage = "busybox" +) + +func init() { + reexec.Init() + if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { + dockerBinary = dockerBin + } + var err error + dockerBinary, err = exec.LookPath(dockerBinary) + if err != nil { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)\n", err) + os.Exit(1) + } + if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { + registryImageName = registryImage + } + if registry := os.Getenv("REGISTRY_URL"); registry != "" { + privateRegistryURL = registry + } + workingDirectory, _ = os.Getwd() + + // Deterministically working out the environment in which CI is running + // to evaluate whether the daemon is local or remote is not possible through + // a build tag. + // + // For example Windows to Linux CI under Jenkins tests the 64-bit + // Windows binary build with the daemon build tag, but calls a remote + // Linux daemon. + // + // We can't just say if Windows then assume the daemon is local as at + // some point, we will be testing the Windows CLI against a Windows daemon. + // + // Similarly, it will be perfectly valid to also run CLI tests from + // a Linux CLI (built with the daemon tag) against a Windows daemon. + if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { + isLocalDaemon = false + } else { + isLocalDaemon = true + } + + // TODO Windows CI. This are incorrect and need fixing into + // platform specific pieces. + // This is only used for a tests with local daemon true (Linux-only today) + // default is "/var/lib/docker", but we'll try and ask the + // /info endpoint for the specific root dir + dockerBasePath = "/var/lib/docker" + type Info struct { + DockerRootDir string + ExperimentalBuild bool + KernelVersion string + } + var i Info + status, b, err := sockRequest("GET", "/info", nil) + if err == nil && status == 200 { + if err = json.Unmarshal(b, &i); err == nil { + dockerBasePath = i.DockerRootDir + experimentalDaemon = i.ExperimentalBuild + daemonKernelVersion = i.KernelVersion + } + } + volumesConfigPath = dockerBasePath + "/volumes" + containerStoragePath = dockerBasePath + "/containers" + + if len(os.Getenv("WINDOWS_BASE_IMAGE")) > 0 { + WindowsBaseImage = os.Getenv("WINDOWS_BASE_IMAGE") + fmt.Println("INFO: Windows Base image is ", WindowsBaseImage) + } + + dest := os.Getenv("DEST") + b, err = ioutil.ReadFile(filepath.Join(dest, "docker.pid")) + if err == nil { + if p, err := strconv.ParseInt(string(b), 10, 32); err == nil { + daemonPid = int(p) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_utils.go b/vendor/github.com/docker/docker/integration-cli/docker_utils.go new file mode 100644 index 0000000..749e4b3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_utils.go @@ -0,0 +1,1607 @@ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/httputils" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-connections/tlsconfig" + units "github.com/docker/go-units" + "github.com/go-check/check" +) + +func init() { + cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) + } + images := strings.Split(strings.TrimSpace(string(out)), "\n") + for _, img := range images { + protectedImages[img] = struct{}{} + } + + res, body, err := sockRequestRaw("GET", "/info", nil, "application/json") + if err != nil { + panic(fmt.Errorf("Init failed to get /info: %v", err)) + } + defer body.Close() + if res.StatusCode != http.StatusOK { + panic(fmt.Errorf("Init failed to get /info. Res=%v", res)) + } + + svrHeader, _ := httputils.ParseServerHeader(res.Header.Get("Server")) + daemonPlatform = svrHeader.OS + if daemonPlatform != "linux" && daemonPlatform != "windows" { + panic("Cannot run tests against platform: " + daemonPlatform) + } + + // Now we know the daemon platform, can set paths used by tests. + var info types.Info + err = json.NewDecoder(body).Decode(&info) + if err != nil { + panic(fmt.Errorf("Init failed to unmarshal docker info: %v", err)) + } + + daemonStorageDriver = info.Driver + dockerBasePath = info.DockerRootDir + volumesConfigPath = filepath.Join(dockerBasePath, "volumes") + containerStoragePath = filepath.Join(dockerBasePath, "containers") + // Make sure in context of daemon, not the local platform. Note we can't + // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. + if daemonPlatform == "windows" { + volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) + // On Windows, extract out the version as we need to make selective + // decisions during integration testing as and when features are implemented. + // eg in "10.0 10550 (10550.1000.amd64fre.branch.date-time)" we want 10550 + windowsDaemonKV, _ = strconv.Atoi(strings.Split(info.KernelVersion, " ")[1]) + } else { + volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) + } + isolation = info.Isolation +} + +func convertBasesize(basesizeBytes int64) (int64, error) { + basesize := units.HumanSize(float64(basesizeBytes)) + basesize = strings.Trim(basesize, " ")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + if err != nil { + return 0, err + } + return int64(basesizeFloat) * 1024 * 1024 * 1024, nil +} + +func daemonHost() string { + daemonURLStr := "unix://" + opts.DefaultUnixSocket + if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { + daemonURLStr = daemonHostVar + } + return daemonURLStr +} + +func getTLSConfig() (*tls.Config, error) { + dockerCertPath := os.Getenv("DOCKER_CERT_PATH") + + if dockerCertPath == "" { + return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") + } + + option := &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + + return tlsConfig, nil +} + +func sockConn(timeout time.Duration, daemon string) (net.Conn, error) { + if daemon == "" { + daemon = daemonHost() + } + daemonURL, err := url.Parse(daemon) + if err != nil { + return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) + } + + var c net.Conn + switch daemonURL.Scheme { + case "npipe": + return npipeDial(daemonURL.Path, timeout) + case "unix": + return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) + case "tcp": + if os.Getenv("DOCKER_TLS_VERIFY") != "" { + // Setup the socket TLS configuration. + tlsConfig, err := getTLSConfig() + if err != nil { + return nil, err + } + dialer := &net.Dialer{Timeout: timeout} + return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) + } + return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) + default: + return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) + } +} + +func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := readBody(body) + return res.StatusCode, b, err +} + +func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return sockRequestRawToDaemon(method, endpoint, data, ct, "") +} + +func sockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, daemon) + if err != nil { + return nil, nil, err + } + + resp, err := client.Do(req) + if err != nil { + client.Close() + return nil, nil, err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + defer resp.Body.Close() + return client.Close() + }) + + return resp, body, nil +} + +func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, "") + if err != nil { + return nil, nil, err + } + + client.Do(req) + conn, br := client.Hijack() + return conn, br, nil +} + +func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) { + c, err := sockConn(time.Duration(10*time.Second), daemon) + if err != nil { + return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) + } + + client := httputil.NewClientConn(c, nil) + + req, err := http.NewRequest(method, endpoint, data) + if err != nil { + client.Close() + return nil, nil, fmt.Errorf("could not create new request: %v", err) + } + + if ct != "" { + req.Header.Set("Content-Type", ct) + } + return req, client, nil +} + +func readBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +} + +func deleteContainer(container ...string) error { + result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...) + return result.Compare(icmd.Success) +} + +func getAllContainers() (string, error) { + getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of containers: %v\n", out) + } + + return out, err +} + +func deleteAllContainers() error { + containers, err := getAllContainers() + if err != nil { + fmt.Println(containers) + return err + } + if containers == "" { + return nil + } + + err = deleteContainer(strings.Split(strings.TrimSpace(containers), "\n")...) + if err != nil { + fmt.Println(err.Error()) + } + return err +} + +func deleteAllNetworks() error { + networks, err := getAllNetworks() + if err != nil { + return err + } + var errors []string + for _, n := range networks { + if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { + continue + } + if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { + // nat is a pre-defined network on Windows and cannot be removed + continue + } + status, b, err := sockRequest("DELETE", "/networks/"+n.Name, nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllNetworks() ([]types.NetworkResource, error) { + var networks []types.NetworkResource + _, b, err := sockRequest("GET", "/networks", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &networks); err != nil { + return nil, err + } + return networks, nil +} + +func deleteAllPlugins() error { + plugins, err := getAllPlugins() + if err != nil { + return err + } + var errors []string + for _, p := range plugins { + status, b, err := sockRequest("DELETE", "/plugins/"+p.Name+"?force=1", nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting plugin %s: %s", p.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllPlugins() (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + _, b, err := sockRequest("GET", "/plugins", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &plugins); err != nil { + return nil, err + } + return plugins, nil +} + +func deleteAllVolumes() error { + volumes, err := getAllVolumes() + if err != nil { + return err + } + var errors []string + for _, v := range volumes { + status, b, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllVolumes() ([]*types.Volume, error) { + var volumes volumetypes.VolumesListOKBody + _, b, err := sockRequest("GET", "/volumes", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &volumes); err != nil { + return nil, err + } + return volumes.Volumes, nil +} + +var protectedImages = map[string]struct{}{} + +func deleteAllImages() error { + cmd := exec.Command(dockerBinary, "images") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + return err + } + lines := strings.Split(string(out), "\n")[1:] + var imgs []string + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + if _, ok := protectedImages[imgTag]; !ok { + if fields[0] == "" { + imgs = append(imgs, fields[2]) + continue + } + imgs = append(imgs, imgTag) + } + } + if len(imgs) == 0 { + return nil + } + args := append([]string{"rmi", "-f"}, imgs...) + if err := exec.Command(dockerBinary, args...).Run(); err != nil { + return err + } + return nil +} + +func getPausedContainers() (string, error) { + getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) + } + + return out, err +} + +func getSliceOfPausedContainers() ([]string, error) { + out, err := getPausedContainers() + if err == nil { + if len(out) == 0 { + return nil, err + } + slice := strings.Split(strings.TrimSpace(out), "\n") + return slice, err + } + return []string{out}, err +} + +func unpauseContainer(container string) error { + return icmd.RunCommand(dockerBinary, "unpause", container).Error +} + +func unpauseAllContainers() error { + containers, err := getPausedContainers() + if err != nil { + fmt.Println(containers) + return err + } + + containers = strings.Replace(containers, "\n", " ", -1) + containers = strings.Trim(containers, " ") + containerList := strings.Split(containers, " ") + + for _, value := range containerList { + if err = unpauseContainer(value); err != nil { + return err + } + } + + return nil +} + +func deleteImages(images ...string) error { + args := []string{dockerBinary, "rmi", "-f"} + return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error +} + +func imageExists(image string) error { + return icmd.RunCommand(dockerBinary, "inspect", image).Error +} + +func pullImageIfNotExist(image string) error { + if err := imageExists(image); err != nil { + pullCmd := exec.Command(dockerBinary, "pull", image) + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err != nil || exitCode != 0 { + return fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) + } + } + return nil +} + +func dockerCmdWithError(args ...string) (string, int, error) { + if err := validateArgs(args...); err != nil { + return "", 0, err + } + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + return result.Combined(), result.ExitCode, result.Compare(icmd.Success) + } + return result.Combined(), result.ExitCode, result.Error +} + +func dockerCmdWithStdoutStderr(c *check.C, args ...string) (string, string, int) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + + result := icmd.RunCommand(dockerBinary, args...) + // TODO: why is c ever nil? + if c != nil { + c.Assert(result, icmd.Matches, icmd.Success) + } + return result.Stdout(), result.Stderr(), result.ExitCode +} + +func dockerCmd(c *check.C, args ...string) (string, int) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + result := icmd.RunCommand(dockerBinary, args...) + c.Assert(result, icmd.Matches, icmd.Success) + return result.Combined(), result.ExitCode +} + +func dockerCmdWithResult(args ...string) *icmd.Result { + return icmd.RunCommand(dockerBinary, args...) +} + +func binaryWithArgs(args ...string) []string { + return append([]string{dockerBinary}, args...) +} + +// execute a docker command with a timeout +func dockerCmdWithTimeout(timeout time.Duration, args ...string) *icmd.Result { + if err := validateArgs(args...); err != nil { + return &icmd.Result{Error: err} + } + return icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Timeout: timeout}) +} + +// execute a docker command in a directory +func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + result := icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Dir: path}) + return result.Combined(), result.ExitCode, result.Error +} + +// execute a docker command in a directory with a timeout +func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) *icmd.Result { + if err := validateArgs(args...); err != nil { + return &icmd.Result{Error: err} + } + return icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs(args...), + Timeout: timeout, + Dir: path, + }) +} + +// validateArgs is a checker to ensure tests are not running commands which are +// not supported on platforms. Specifically on Windows this is 'busybox top'. +func validateArgs(args ...string) error { + if daemonPlatform != "windows" { + return nil + } + foundBusybox := -1 + for key, value := range args { + if strings.ToLower(value) == "busybox" { + foundBusybox = key + } + if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { + return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") + } + } + return nil +} + +// find the State.ExitCode in container metadata +func findContainerExitCode(c *check.C, name string, vargs ...string) string { + args := append(vargs, "inspect", "--format='{{ .State.ExitCode }} {{ .State.Error }}'", name) + cmd := exec.Command(dockerBinary, args...) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + return out +} + +func findContainerIP(c *check.C, id string, network string) string { + out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) + return strings.Trim(out, " \r\n'") +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := strings.TrimSpace(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} + +// FakeContext creates directories that can be used as a build context +type FakeContext struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *FakeContext) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *FakeContext) addFile(file string, content []byte) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + dirpath := filepath.Dir(fp) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(fp, content, 0644) + +} + +// Delete a file at a path +func (f *FakeContext) Delete(file string) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + return os.RemoveAll(fp) +} + +// Close deletes the context +func (f *FakeContext) Close() error { + return os.RemoveAll(f.Dir) +} + +func fakeContextFromNewTempDir() (*FakeContext, error) { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return nil, err + } + if err := os.Chmod(tmp, 0755); err != nil { + return nil, err + } + return fakeContextFromDir(tmp), nil +} + +func fakeContextFromDir(dir string) *FakeContext { + return &FakeContext{dir} +} + +func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + return ctx, nil +} + +func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { + if err := ctx.Add("Dockerfile", dockerfile); err != nil { + ctx.Close() + return err + } + return nil +} + +func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { + return nil, err + } + return ctx, nil +} + +// FakeStorage is a static file server. It might be running locally or remotely +// on test host. +type FakeStorage interface { + Close() error + URL() string + CtxDir() string +} + +func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for name, content := range archives { + if err := ctx.addFile(name, content.Bytes()); err != nil { + return nil, err + } + } + return fakeStorageWithContext(ctx) +} + +// fakeStorage returns either a local or remote (at daemon machine) file server +func fakeStorage(files map[string]string) (FakeStorage, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + return fakeStorageWithContext(ctx) +} + +// fakeStorageWithContext returns either a local or remote (at daemon machine) file server +func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { + if isLocalDaemon { + return newLocalFakeStorage(ctx) + } + return newRemoteFileServer(ctx) +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *FakeContext + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.FakeContext.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.FakeContext.Close() +} + +func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + FakeContext: ctx, + Server: server, + }, nil +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + ctx *FakeContext +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + deleteImages(f.image) + } + }() + if f.container == "" { + return nil + } + return deleteContainer(f.container) +} + +func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + ) + + if err := ensureHTTPServerImage(); err != nil { + return nil, err + } + + // Build the image + if err := fakeContextAddDockerfile(ctx, `FROM httpserver +COPY . /static`); err != nil { + return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) + } + if _, err := buildImageFromContext(image, ctx, false); err != nil { + return nil, fmt.Errorf("failed building file storage container image: %v", err) + } + + // Start the container + runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) + } + + // Find out the system assigned port + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) + if err != nil { + return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) + } + + fileserverHostPort := strings.Trim(out, "\n") + _, port, err := net.SplitHostPort(fileserverHostPort) + if err != nil { + return nil, fmt.Errorf("unable to parse file server host:port: %v", err) + } + + dockerHostURL, err := url.Parse(daemonHost()) + if err != nil { + return nil, fmt.Errorf("unable to parse daemon host URL: %v", err) + } + + host, _, err := net.SplitHostPort(dockerHostURL.Host) + if err != nil { + return nil, fmt.Errorf("unable to parse docker daemon host:port: %v", err) + } + + return &remoteFileServer{ + container: container, + image: image, + host: fmt.Sprintf("%s:%s", host, port), + ctx: ctx}, nil +} + +func inspectFieldAndMarshall(c *check.C, name, field string, output interface{}) { + str := inspectFieldJSON(c, name, field) + err := json.Unmarshal([]byte(str), output) + if c != nil { + c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) + } +} + +func inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectFieldWithError(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +func inspectField(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectFieldJSON(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectFieldMap(c *check.C, name, path, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectMountSourceField(name, destination string) (string, error) { + m, err := inspectMountPoint(name, destination) + if err != nil { + return "", err + } + return m.Source, nil +} + +func inspectMountPoint(name, destination string) (types.MountPoint, error) { + out, err := inspectFilter(name, "json .Mounts") + if err != nil { + return types.MountPoint{}, err + } + + return inspectMountPointJSON(out, destination) +} + +var errMountNotFound = errors.New("mount point not found") + +func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { + var mp []types.MountPoint + if err := json.Unmarshal([]byte(j), &mp); err != nil { + return types.MountPoint{}, err + } + + var m *types.MountPoint + for _, c := range mp { + if c.Destination == destination { + m = &c + break + } + } + + if m == nil { + return types.MountPoint{}, errMountNotFound + } + + return *m, nil +} + +func inspectImage(name, filter string) (string, error) { + args := []string{"inspect", "--type", "image"} + if filter != "" { + format := fmt.Sprintf("{{%s}}", filter) + args = append(args, "-f", format) + } + args = append(args, name) + inspectCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func getIDByName(name string) (string, error) { + return inspectFieldWithError(name, "Id") +} + +// getContainerState returns the exit code of the container +// and true if it's running +// the exit code should be ignored if it's running +func getContainerState(c *check.C, id string) (int, bool, error) { + var ( + exitStatus int + running bool + ) + out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) + if exitCode != 0 { + return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) + } + + out = strings.Trim(out, "\n") + splitOutput := strings.Split(out, " ") + if len(splitOutput) != 2 { + return 0, false, fmt.Errorf("failed to get container state: output is broken") + } + if splitOutput[0] == "true" { + running = true + } + if n, err := strconv.Atoi(splitOutput[1]); err == nil { + exitStatus = n + } else { + return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") + } + + return exitStatus, running, nil +} + +func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { + return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) +} + +func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { + args := []string{} + if host != "" { + args = append(args, "--host", host) + } + args = append(args, "build", "-t", name) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd +} + +func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", out, fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", out, err + } + return id, out, nil +} + +func buildImageWithStdoutStderr(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImage(name, dockerfile string, useCache bool, buildFlags ...string) (string, error) { + id, _, err := buildImageWithOut(name, dockerfile, useCache, buildFlags...) + return id, err +} + +func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, error) { + id, _, err := buildImageFromContextWithOut(name, ctx, useCache, buildFlags...) + if err != nil { + return "", err + } + return id, nil +} + +func buildImageFromContextWithOut(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", "", fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", "", err + } + return id, out, nil +} + +func buildImageFromContextWithStdoutStderr(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImageFromGitWithStdoutStderr(name string, ctx *fakeGit, useCache bool, buildFlags ...string) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ctx.RepoURL) + buildCmd := exec.Command(dockerBinary, args...) + + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImageFromPath(name, path string, useCache bool, buildFlags ...string) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, path) + buildCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +type fakeGit struct { + root string + server gitServer + RepoURL string +} + +func (g *fakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (*fakeGit, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + return nil, err + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + return nil, err + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + return nil, err + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + return nil, err + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + return nil, err + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server, err = fakeStorageWithContext(fakeContextFromDir(root)) + if err != nil { + return nil, fmt.Errorf("cannot start fake storage: %v", err) + } + } else { + // always start a local http server on CLI test machine + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &fakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + }, nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Fail the test when error occurs. +func writeFile(dst, content string, c *check.C) { + // Create subdirectories if necessary + c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + c.Assert(err, check.IsNil) + defer f.Close() + // Write content (truncate if it exists) + _, err = io.Copy(f, strings.NewReader(content)) + c.Assert(err, check.IsNil) +} + +// Return the contents of file at path `src`. +// Fail the test when error occurs. +func readFile(src string, c *check.C) (content string) { + data, err := ioutil.ReadFile(src) + c.Assert(err, check.IsNil) + + return string(data) +} + +func containerStorageFile(containerID, basename string) string { + return filepath.Join(containerStoragePath, containerID, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return nil, fmt.Errorf("%v: %q", err, out) + } + + contID := strings.TrimSpace(out) + + if err := waitRun(contID); err != nil { + return nil, fmt.Errorf("%v: %q", contID, err) + } + + return readContainerFile(contID, filename) +} + +func readContainerFile(containerID, filename string) ([]byte, error) { + f, err := os.Open(containerStorageFile(containerID, filename)) + if err != nil { + return nil, err + } + defer f.Close() + + content, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return content, nil +} + +func readContainerFileWithExec(containerID, filename string) ([]byte, error) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerID, "cat", filename)) + return []byte(out), err +} + +// daemonTime provides the current time on the daemon host +func daemonTime(c *check.C) time.Time { + if isLocalDaemon { + return time.Now() + } + + status, body, err := sockRequest("GET", "/info", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + type infoJSON struct { + SystemTime string + } + var info infoJSON + err = json.Unmarshal(body, &info) + c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) + return dt +} + +// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. +// It return the time formatted how the client sends timestamps to the server. +func daemonUnixTime(c *check.C) string { + return parseEventTime(daemonTime(c)) +} + +func parseEventTime(t time.Time) string { + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) +} + +func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *testRegistryV2 { + reg, err := newTestRegistryV2(c, schema1, auth, tokenURL) + c.Assert(err, check.IsNil) + + // Wait for registry to be ready to serve requests. + for i := 0; i != 50; i++ { + if err = reg.Ping(); err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available: %v", err)) + return reg +} + +func setupNotary(c *check.C) *testNotary { + ts, err := newTestNotary(c) + c.Assert(err, check.IsNil) + + return ts +} + +// appendBaseEnv appends the minimum set of environment variables to exec the +// docker cli binary for testing with correct configuration to the given env +// list. +func appendBaseEnv(isTLS bool, env ...string) []string { + preserveList := []string{ + // preserve remote test host + "DOCKER_HOST", + + // windows: requires preserving SystemRoot, otherwise dial tcp fails + // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." + "SystemRoot", + + // testing help text requires the $PATH to dockerd is set + "PATH", + } + if isTLS { + preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") + } + + for _, key := range preserveList { + if val := os.Getenv(key); val != "" { + env = append(env, fmt.Sprintf("%s=%s", key, val)) + } + } + return env +} + +func createTmpFile(c *check.C, content string) string { + f, err := ioutil.TempFile("", "testfile") + c.Assert(err, check.IsNil) + + filename := f.Name() + + err = ioutil.WriteFile(filename, []byte(content), 0644) + c.Assert(err, check.IsNil) + + return filename +} + +func buildImageWithOutInDamon(socket string, name, dockerfile string, useCache bool) (string, error) { + args := []string{"--host", socket} + buildCmd := buildImageCmdArgs(args, name, dockerfile, useCache) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return out, fmt.Errorf("failed to build the image: %s, error: %v", out, err) + } + return out, nil +} + +func buildImageCmdArgs(args []string, name, dockerfile string, useCache bool) *exec.Cmd { + args = append(args, []string{"-D", "build", "-t", name}...) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd + +} + +func waitForContainer(contID string, args ...string) error { + args = append([]string{dockerBinary, "run", "--name", contID}, args...) + result := icmd.RunCmd(icmd.Cmd{Command: args}) + if result.Error != nil { + return result.Error + } + return waitRun(contID) +} + +// waitRestart will wait for the specified container to restart once +func waitRestart(contID string, duration time.Duration) error { + return waitInspect(contID, "{{.RestartCount}}", "1", duration) +} + +// waitRun will wait for the specified container to be running, maximum 5 seconds. +func waitRun(contID string) error { + return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) +} + +// waitExited will wait for the specified container to state exit, subject +// to a maximum time limit in seconds supplied by the caller +func waitExited(contID string, duration time.Duration) error { + return waitInspect(contID, "{{.State.Status}}", "exited", duration) +} + +// waitInspect will wait for the specified container to have the specified string +// in the inspect output. It will wait until the specified timeout (in seconds) +// is reached. +func waitInspect(name, expr, expected string, timeout time.Duration) error { + return waitInspectWithArgs(name, expr, expected, timeout) +} + +func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { + after := time.After(timeout) + + args := append(arg, "inspect", "-f", expr, name) + for { + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + if !strings.Contains(result.Stderr(), "No such") { + return fmt.Errorf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + return result.Error + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} + +func getInspectBody(c *check.C, version, id string) []byte { + endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + return body +} + +// Run a long running idle task in a background container using the +// system-specific default image and command. +func runSleepingContainer(c *check.C, extraArgs ...string) (string, int) { + return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) +} + +// Run a long running idle task in a background container using the specified +// image and the system-specific command. +func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) (string, int) { + args := []string{"run", "-d"} + args = append(args, extraArgs...) + args = append(args, image) + args = append(args, sleepCommandForDaemonPlatform()...) + return dockerCmd(c, args...) +} + +func getRootUIDGID() (int, int, error) { + uidgid := strings.Split(filepath.Base(dockerBasePath), ".") + if len(uidgid) == 1 { + //user namespace remapping is not turned on; return 0 + return 0, 0, nil + } + uid, err := strconv.Atoi(uidgid[0]) + if err != nil { + return 0, 0, err + } + gid, err := strconv.Atoi(uidgid[1]) + if err != nil { + return 0, 0, err + } + return uid, gid, nil +} + +// minimalBaseImage returns the name of the minimal base image for the current +// daemon platform. +func minimalBaseImage() string { + if daemonPlatform == "windows" { + return WindowsBaseImage + } + return "scratch" +} + +func getGoroutineNumber() (int, error) { + i := struct { + NGoroutines int + }{} + status, b, err := sockRequest("GET", "/info", nil) + if err != nil { + return 0, err + } + if status != http.StatusOK { + return 0, fmt.Errorf("http status code: %d", status) + } + if err := json.Unmarshal(b, &i); err != nil { + return 0, err + } + return i.NGoroutines, nil +} + +func waitForGoroutines(expected int) error { + t := time.After(30 * time.Second) + for { + select { + case <-t: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n > expected { + return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) + } + default: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n <= expected { + return nil + } + time.Sleep(200 * time.Millisecond) + } + } +} + +// getErrorMessage returns the error message from an error API response +func getErrorMessage(c *check.C, body []byte) string { + var resp types.ErrorResponse + c.Assert(json.Unmarshal(body, &resp), check.IsNil) + return strings.TrimSpace(resp.Message) +} + +func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { + after := time.After(timeout) + for { + v, comment := f(c) + assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) + select { + case <-after: + assert = true + default: + } + if assert { + if comment != nil { + args = append(args, comment) + } + c.Assert(v, checker, args...) + return + } + time.Sleep(100 * time.Millisecond) + } +} + +type checkF func(*check.C) (interface{}, check.CommentInterface) +type reducer func(...interface{}) interface{} + +func reducedCheck(r reducer, funcs ...checkF) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + var values []interface{} + var comments []string + for _, f := range funcs { + v, comment := f(c) + values = append(values, v) + if comment != nil { + comments = append(comments, comment.CheckCommentString()) + } + } + return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) + } +} + +func sumAsIntegers(vals ...interface{}) interface{} { + var s int + for _, v := range vals { + s += v.(int) + } + return s +} diff --git a/vendor/github.com/docker/docker/integration-cli/events_utils.go b/vendor/github.com/docker/docker/integration-cli/events_utils.go new file mode 100644 index 0000000..ba24179 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/events_utils.go @@ -0,0 +1,206 @@ +package main + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// eventMatcher is a function that tries to match an event input. +// It returns true if the event matches and a map with +// a set of key/value to identify the match. +type eventMatcher func(text string) (map[string]string, bool) + +// eventMatchProcessor is a function to handle an event match. +// It receives a map of key/value with the information extracted in a match. +type eventMatchProcessor func(matches map[string]string) + +// eventObserver runs an events commands and observes its output. +type eventObserver struct { + buffer *bytes.Buffer + command *exec.Cmd + scanner *bufio.Scanner + startTime string + disconnectionError error +} + +// newEventObserver creates the observer and initializes the command +// without running it. Users must call `eventObserver.Start` to start the command. +func newEventObserver(c *check.C, args ...string) (*eventObserver, error) { + since := daemonTime(c).Unix() + return newEventObserverWithBacklog(c, since, args...) +} + +// newEventObserverWithBacklog creates a new observer changing the start time of the backlog to return. +func newEventObserverWithBacklog(c *check.C, since int64, args ...string) (*eventObserver, error) { + startTime := strconv.FormatInt(since, 10) + cmdArgs := []string{"events", "--since", startTime} + if len(args) > 0 { + cmdArgs = append(cmdArgs, args...) + } + eventsCmd := exec.Command(dockerBinary, cmdArgs...) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + return nil, err + } + + return &eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + }, nil +} + +// Start starts the events command. +func (e *eventObserver) Start() error { + return e.command.Start() +} + +// Stop stops the events command. +func (e *eventObserver) Stop() { + e.command.Process.Kill() + e.command.Process.Release() +} + +// Match tries to match the events output with a given matcher. +func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { + for e.scanner.Scan() { + text := e.scanner.Text() + e.buffer.WriteString(text) + e.buffer.WriteString("\n") + + if matches, ok := match(text); ok { + process(matches) + } + } + + err := e.scanner.Err() + if err == nil { + err = io.EOF + } + + logrus.Debugf("EventObserver scanner loop finished: %v", err) + e.disconnectionError = err +} + +func (e *eventObserver) CheckEventError(c *check.C, id, event string, match eventMatcher) { + var foundEvent bool + scannerOut := e.buffer.String() + + if e.disconnectionError != nil { + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", e.startTime, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + for _, e := range events { + if _, ok := match(e); ok { + foundEvent = true + break + } + } + scannerOut = out + } + if !foundEvent { + c.Fatalf("failed to observe event `%s` for %s. Disconnection error: %v\nout:\n%v", event, id, e.disconnectionError, scannerOut) + } +} + +// matchEventLine matches a text with the event regular expression. +// It returns the matches and true if the regular expression matches with the given id and event type. +// It returns an empty map and false if there is no match. +func matchEventLine(id, eventType string, actions map[string]chan bool) eventMatcher { + return func(text string) (map[string]string, bool) { + matches := eventstestutils.ScanMap(text) + if len(matches) == 0 { + return matches, false + } + + if matchIDAndEventType(matches, id, eventType) { + if _, ok := actions[matches["action"]]; ok { + return matches, true + } + } + return matches, false + } +} + +// processEventMatch closes an action channel when an event line matches the expected action. +func processEventMatch(actions map[string]chan bool) eventMatchProcessor { + return func(matches map[string]string) { + if ch, ok := actions[matches["action"]]; ok { + ch <- true + } + } +} + +// parseEventAction parses an event text and returns the action. +// It fails if the text is not in the event format. +func parseEventAction(c *check.C, text string) string { + matches := eventstestutils.ScanMap(text) + return matches["action"] +} + +// eventActionsByIDAndType returns the actions for a given id and type. +// It fails if the text is not in the event format. +func eventActionsByIDAndType(c *check.C, events []string, id, eventType string) []string { + var filtered []string + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matches, checker.Not(checker.IsNil)) + if matchIDAndEventType(matches, id, eventType) { + filtered = append(filtered, matches["action"]) + } + } + return filtered +} + +// matchIDAndEventType returns true if an event matches a given id and type. +// It also resolves names in the event attributes if the id doesn't match. +func matchIDAndEventType(matches map[string]string, id, eventType string) bool { + return matchEventID(matches, id) && matches["eventType"] == eventType +} + +func matchEventID(matches map[string]string, id string) bool { + matchID := matches["id"] == id || strings.HasPrefix(matches["id"], id) + if !matchID && matches["attributes"] != "" { + // try matching a name in the attributes + attributes := map[string]string{} + for _, a := range strings.Split(matches["attributes"], ", ") { + kv := strings.Split(a, "=") + attributes[kv[0]] = kv[1] + } + matchID = attributes["name"] == id + } + return matchID +} + +func parseEvents(c *check.C, out, match string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} + +func parseEventsWithID(c *check.C, out, match, id string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, id), checker.True) + + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures.go b/vendor/github.com/docker/docker/integration-cli/fixtures.go new file mode 100644 index 0000000..e99b738 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" +) + +var ensureHTTPServerOnce sync.Once + +func ensureHTTPServerImage() error { + var doIt bool + ensureHTTPServerOnce.Do(func() { + doIt = true + }) + + if !doIt { + return nil + } + + protectedImages["httpserver:latest"] = struct{}{} + + tmp, err := ioutil.TempDir("", "docker-http-server-test") + if err != nil { + return fmt.Errorf("could not build http server: %v", err) + } + defer os.RemoveAll(tmp) + + goos := daemonPlatform + if goos == "" { + goos = "linux" + } + goarch := os.Getenv("DOCKER_ENGINE_GOARCH") + if goarch == "" { + goarch = "amd64" + } + + goCmd, lookErr := exec.LookPath("go") + if lookErr != nil { + return fmt.Errorf("could not build http server: %v", lookErr) + } + + cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") + cmd.Env = append(os.Environ(), []string{ + "CGO_ENABLED=0", + "GOOS=" + goos, + "GOARCH=" + goarch, + }...) + var out []byte + if out, err = cmd.CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %s", string(out)) + } + + cpCmd, lookErr := exec.LookPath("cp") + if lookErr != nil { + return fmt.Errorf("could not build http server: %v", lookErr) + } + if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %v", string(out)) + } + + if out, err = exec.Command(dockerBinary, "build", "-q", "-t", "httpserver", tmp).CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %v", string(out)) + } + return nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test b/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test new file mode 100755 index 0000000..a7be56b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test @@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +listFile=shell_test_list.json + +case $1 in + "store") + in=$( $TEMP/$serverHash + # add the server to the list file + if [[ ! -f $TEMP/$listFile ]]; then + echo "{ \"${server}\": \"${username}\" }" > $TEMP/$listFile + else + list=$(<$TEMP/$listFile) + echo "$list" | jq ". + {\"${server}\": \"${username}\"}" > $TEMP/$listFile + fi + ;; + "get") + in=$( $TEMP/$listFile + ;; + "list") + if [[ ! -f $TEMP/$listFile ]]; then + echo "{}" + else + payload=$(<$TEMP/$listFile) + echo "$payload" + fi + ;; + *) + echo "unknown credential option" + exit 1 + ;; +esac diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json b/vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json new file mode 100644 index 0000000..28913e4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json @@ -0,0 +1,25 @@ +{ + "CmsPlugins": [ + "ActiveDirectory" + ], + "DomainJoinConfig": { + "Sid": "S-1-5-21-4288985-3632099173-1864715694", + "MachineAccountName": "MusicStoreAcct", + "Guid": "3705d4c3-0b80-42a9-ad97-ebc1801c74b9", + "DnsTreeName": "hyperv.local", + "DnsName": "hyperv.local", + "NetBiosName": "hyperv" + }, + "ActiveDirectoryConfig": { + "GroupManagedServiceAccounts": [ + { + "Name": "MusicStoreAcct", + "Scope": "hyperv.local" + }, + { + "Name": "MusicStoreAcct", + "Scope": "hyperv" + } + ] + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml new file mode 100644 index 0000000..f30c04f --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml @@ -0,0 +1,9 @@ + +version: "3" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + db: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: "tail -f /dev/null" diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml new file mode 100644 index 0000000..4ec8cac --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml @@ -0,0 +1,11 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special +secrets: + special: + file: fixtures/secrets/default diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml new file mode 100644 index 0000000..6ac92cd --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml @@ -0,0 +1,20 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special + - source: super + target: foo.txt + mode: 0400 + - star +secrets: + special: + file: fixtures/secrets/default + super: + file: fixtures/secrets/default + star: + external: + name: outside diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem new file mode 100644 index 0000000..6825d6d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem new file mode 100644 index 0000000..c05ed47 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem new file mode 100644 index 0000000..b5c15f8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem new file mode 100644 index 0000000..21ae4bd --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem new file mode 100644 index 0000000..53c122a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem new file mode 100644 index 0000000..08abfd1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem new file mode 100644 index 0000000..c269320 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem new file mode 100644 index 0000000..28feba6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem new file mode 100644 index 0000000..10f7c65 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar b/vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar new file mode 100644 index 0000000000000000000000000000000000000000..beabb569ac1e7fea69848f79ffd77fb17b2487b0 GIT binary patch literal 30720 zcmeI4ZExeo5y$c* zovW*2`my-BS#-6VC#(TWylUoH;FtY>E>gSBdD(R?T)*vNRTtCjW{dQq*rhc~JiEJz zsl`J#iI?4n-S>-|RPWcb+PkO&p2Vj7>-wi+S}$j_7sXd|yt?(*H*av1 z+nj$j?T=M`^{U#O?d!U0Kio7`Pi=kdMg6yBHQQD7cjwb=bNHthMQp;4h~=l$E~*AE zuwWB8>5;iMTvuHR-Lmal{@LpHT4MUleZcZ73oou6S9)q>^d-k>oX_}Qcxyo^p;T#I z4yBjEs}MP78A@rGh|&6(B$v4q&IKEaPbcYiRLK(|Fm&Lbue#U-=g_}>K?m>u=o9SE z|2Wfh|Nm{EC(M}~AUk1=_?|0jAs(MS-F>HlGq1Fv!G%u}F zxq~RdI2NK#f|*#Nn~(V{vFLOO8UqEuV@wWH0lWbP;+XE!kN?B|zvfDCV+{KLI33XJ zG420+{r|!CKbik8jnddl(C`@lF&t?A{}{IbApid{FhAKg29N*Y{y*OI0q1{v<6mif z{ttd8A4C0_$Nx6||IluLzUGZ<*Y1BAOzlPXfBaM3-28d; z^3|(Ru4ZY9j6K(!m62PF7=SBvvY>6u9_MkwI>$54a7OCT$T3-m=lJE( zmUnm}hhi;&u@=Bq+_JlF`T)-5bsBwzUH;iw#FV;!sB`|AkTbLnLJ;j*XJ0q(@q+&r#AY7K_tidj`Je@64Y zt^SAcHvH+*phv{y_9nO&aXZ27@H1gNA7uuCn|I7u4AJ4h5%>!(#?o4@01sM9=?Y<}U-wbn}~IT=+q) zpnU-y`(0?Hj|bqk-Cup*+8>Y5g-CYfCvx)B0vO)01+SpM1Tko z0U|&IhyW2F0z`la5CI}U1c(3;cp3zTzMXo^%Y*&eXctdy*>?6A8Sq zp|r}Cz7&k%LWj}|qk_p@3pT0=6BEu*_dHZBTzvXOYO{wlC)G^^hyW2F0z`la5CI}U k1c(3;AOb{y2oM1xKm>>Y5g-CYfCvx)B0vO)z%wTBe{YUtx&QzG literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go b/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go new file mode 100644 index 0000000..13cd393 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go @@ -0,0 +1,182 @@ +package load + +import ( + "bufio" + "bytes" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var frozenImgDir = "/docker-frozen-images" + +// FrozenImagesLinux loads the frozen image set for the integration suite +// If the images are not available locally it will download them +// TODO: This loads whatever is in the frozen image dir, regardless of what +// images were passed in. If the images need to be downloaded, then it will respect +// the passed in images +func FrozenImagesLinux(dockerBinary string, images ...string) error { + imgNS := os.Getenv("TEST_IMAGE_NAMESPACE") + var loadImages []struct{ srcName, destName string } + for _, img := range images { + if err := exec.Command(dockerBinary, "inspect", "--type=image", img).Run(); err != nil { + srcName := img + // hello-world:latest gets re-tagged as hello-world:frozen + // there are some tests that use hello-world:latest specifically so it pulls + // the image and hello-world:frozen is used for when we just want a super + // small image + if img == "hello-world:frozen" { + srcName = "hello-world:latest" + } + if imgNS != "" { + srcName = imgNS + "/" + srcName + } + loadImages = append(loadImages, struct{ srcName, destName string }{ + srcName: srcName, + destName: img, + }) + } + } + if len(loadImages) == 0 { + // everything is loaded, we're done + return nil + } + + fi, err := os.Stat(frozenImgDir) + if err != nil || !fi.IsDir() { + srcImages := make([]string, 0, len(loadImages)) + for _, img := range loadImages { + srcImages = append(srcImages, img.srcName) + } + if err := pullImages(dockerBinary, srcImages); err != nil { + return errors.Wrap(err, "error pulling image list") + } + } else { + if err := loadFrozenImages(dockerBinary); err != nil { + return err + } + } + + for _, img := range loadImages { + if img.srcName != img.destName { + if out, err := exec.Command(dockerBinary, "tag", img.srcName, img.destName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + if out, err := exec.Command(dockerBinary, "rmi", img.srcName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + } + } + return nil +} + +func loadFrozenImages(dockerBinary string) error { + tar, err := exec.LookPath("tar") + if err != nil { + return errors.Wrap(err, "could not find tar binary") + } + tarCmd := exec.Command(tar, "-cC", frozenImgDir, ".") + out, err := tarCmd.StdoutPipe() + if err != nil { + return errors.Wrap(err, "error getting stdout pipe for tar command") + } + + errBuf := bytes.NewBuffer(nil) + tarCmd.Stderr = errBuf + tarCmd.Start() + defer tarCmd.Wait() + + cmd := exec.Command(dockerBinary, "load") + cmd.Stdin = out + if out, err := cmd.CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + return nil +} + +func pullImages(dockerBinary string, images []string) error { + cwd, err := os.Getwd() + if err != nil { + return errors.Wrap(err, "error getting path to dockerfile") + } + dockerfile := os.Getenv("DOCKERFILE") + if dockerfile == "" { + dockerfile = "Dockerfile" + } + dockerfilePath := filepath.Join(filepath.Dir(filepath.Clean(cwd)), dockerfile) + pullRefs, err := readFrozenImageList(dockerfilePath, images) + if err != nil { + return errors.Wrap(err, "error reading frozen image list") + } + + var wg sync.WaitGroup + chErr := make(chan error, len(images)) + for tag, ref := range pullRefs { + wg.Add(1) + go func(tag, ref string) { + defer wg.Done() + if out, err := exec.Command(dockerBinary, "pull", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "tag", ref, tag).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "rmi", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + }(tag, ref) + } + wg.Wait() + close(chErr) + return <-chErr +} + +func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { + f, err := os.Open(dockerfilePath) + if err != nil { + return nil, errors.Wrap(err, "error reading dockerfile") + } + defer f.Close() + ls := make(map[string]string) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + if len(line) < 3 { + continue + } + if !(line[0] == "RUN" && line[1] == "./contrib/download-frozen-image-v2.sh") { + continue + } + + frozenImgDir = line[2] + if line[2] == frozenImgDir { + frozenImgDir = filepath.Join(os.Getenv("DEST"), "frozen-images") + } + + for scanner.Scan() { + img := strings.TrimSpace(scanner.Text()) + img = strings.TrimSuffix(img, "\\") + img = strings.TrimSpace(img) + split := strings.Split(img, "@") + if len(split) < 2 { + break + } + + for _, i := range images { + if split[0] == i { + ls[i] = img + break + } + } + } + } + return ls, nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt new file mode 100644 index 0000000..2218f23 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAP2EcMN2UXPcMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvgewhaYs +Ke5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqIOdxWjYITgJuHrTwB4ZhBqWS7 +tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbWK9PPhGGkeR01c/Q932m92Hsn +fCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4ylPRxs0RrE/rP+bEGssKQSbeCZ +wazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdvBqrRdWnkOZClhlLgEQ5nK2yV +B6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW8oKHlBBl6pRxHIKzNN4VFbeB +vvYvrogrDrC/owIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUFoHfukRa6qGk1ncON64Z +ASKlZdkwDQYJKoZIhvcNAQELBQADggEBAEq9Adpd03CPmpbRtTAJGAkjjLFr60sV +2r+/l/m9R31ZCN9ymM9nxToQ8zfMdeAh/nnPcErziil2gDVqXueCNDkRj09tmDIE +Q1Oc92uyNZNgcECow77cKZCTZSTku+qsJrYaykH5vSnia8ltcKj8inJedIcpBR+p +608HEQvF0Eg5eaLPJwH48BCb0Gqdri1dJgrNnqptz7MDr8M+u7tHVulbAd3YxLlq +JH1W2bkVUx6esbn/MUE5HL5iTuOYREEINvBSmLdmmFkampmCnCB/bDEyJeL9bAkt +ZPIi0UNSnqFKLSP1Vf8AGLXt6iO7+1OGvtsDXEEYdXVOMsSXZtUuT7A= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key new file mode 100644 index 0000000..cb37efc --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvgewhaYsKe5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqI +OdxWjYITgJuHrTwB4ZhBqWS7tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbW +K9PPhGGkeR01c/Q932m92HsnfCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4yl +PRxs0RrE/rP+bEGssKQSbeCZwazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdv +BqrRdWnkOZClhlLgEQ5nK2yVB6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW +8oKHlBBl6pRxHIKzNN4VFbeBvvYvrogrDrC/owIDAQABAoIBAB/o8KZwsgfUhqh7 +WoViSCwQb0e0z7hoFwhpUl4uXPTGf1v6HEgDDPG0PwwgkdbwNaypQZVtWevj4NTQ +R326jjdjH1xbfQa2PZpz722L3jDqJR6plEtFxRoIv3KrCffPsrgabIu2mnnJJpDB +ixtW5cq0sT4ov2i4H0i85CWWwbSY/G/MHsvCuK9PhoCj9uToVqrf1KrAESE5q4fh +mPSYUL99KVnj7SZkUz+79rc8sLLPVks3szZACMlm1n05ZTj/d6Nd2ZZUO45DllIj +1XJghfWmnChrB/P/KYXgQ3Y9BofIAw1ra2y3wOZeqRFNsbmojcGldfdtN/iQzhEj +uk4ThokCgYEA9FTmv36N8qSPWuqX/KzkixDQ8WrDGohcB54kK98Wx4ijXx3i38SY +tFjO8YUS9GVo1+UgmRjZbzVX7xeum6+TdBBwOjNOxEQ4tzwiQBWDdGpli8BccdJ2 +OOIVxSslWhiUWfpYloXVetrR88iHbT882g795pbonDaJdXSLnij4UW8CgYEAxxrr +QFpsmOEZvI/yPSOGdG7A1RIsCeH+cEOf4cKghs7+aCtAHlIweztNOrqirl3oKI1r +I0zQl46WsaW8S/y99v9lmmnZbWwqLa4vIu0NWs0zaZdzKZw3xljMhgp4Ge69hHa2 +utCtAxcX+7q/yLlHoTiYwKdxX54iLkheCB8csw0CgYEAleEG820kkjXUIodJ2JwO +Tihwo8dEC6CeI6YktizRgnEVFqH0rCOjMO5Rc+KX8AfNOrK5PnD54LguSuKSH7qi +j04OKgWTSd43lF90+y63RtCFnibQDpp2HwrBJAQFk7EEP/XMJfnPLN/SbuMSADgM +kg8kPTFRW5Iw3DYz9z9WpE0CgYAkn6/8Q2XMbUOFqti9JEa8Lg8sYk5VdwuNbPMA +3QMYKQUk9ieyLB4c3Nik3+XCuyVUKEc31A5egmz3umu7cn8i6vGuiJ/k/8t2YZ7s +Bry5Ihu95Yzab5DW3Eiqs0xKQN79ebS9AluAwQO5Wy2h52rknfuDHIm/M+BHsSoS +xl5KFQKBgQCokCsYuX1z2GojHw369/R2aX3ovCGuHqy4k7fWxUrpHTHvth2+qNPr +84qLJ9rLWoZE5sUiZ5YdwCgW877EdfkT+v4aaBX79ixso5VdqgJ/PdnoNntah/Vq +njQiW1skn6/P5V/eyimN2n0VsyBr/zMDEtYTRP/Tb1zi/njFLQkZEA== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt new file mode 100644 index 0000000..bec0847 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAIq8naKlYAQfMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyY2EWYTW +5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aHfoOe8wGKg3Ohz7UCBdD5Mob/ +L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3AaawEUOw2rwwMDEjLnDDTSZM +z8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY8ioRbROCL2PGgqywWq2fThav +c70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFHVARXiUv/ILHk7ImYnSGJUcuk +JTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJDSiRP72nkg/cE4BqMl9FrMwK +9iS8xa9yMDLUvwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUvQzzFmh3Sv3HcdExY3wx +/1u6JLAwDQYJKoZIhvcNAQELBQADggEBAJcmDme2Xj/HPUPwaN/EyCmjhY73EiHO +x6Pm16tscg5JGn5A+u3CZ1DmxUYl8Hp6MaW/sWzdtL0oKJg76pynadCWh5EacFR8 +u+2GV/IcN9mSX6JQzvrqbjSqo5/FehqBD+W5h3euwwApWA3STAadYeyEfmdOA3SQ +W1vzrA1y7i8qgTqeJ7UX1sEAXlIhBK2zPYaMB+en+ZOiPyNxJYj6IDdGdD2paC9L +6H9wKC+GAUTSdCWp89HP7ETSXEGr94AXkrwU+qNsiN+OyK8ke0EMngEPh5IQoplw +/7zEZCth3oKxvR1/4S5LmTVaHI2ZlbU4q9bnY72G4tw8YQr2gcBGo4w= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key new file mode 100644 index 0000000..5ccabe9 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAyY2EWYTW5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aH +foOe8wGKg3Ohz7UCBdD5Mob/L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3 +AaawEUOw2rwwMDEjLnDDTSZMz8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY +8ioRbROCL2PGgqywWq2fThavc70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFH +VARXiUv/ILHk7ImYnSGJUcukJTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJ +DSiRP72nkg/cE4BqMl9FrMwK9iS8xa9yMDLUvwIDAQABAoIBAHmffvzx7ydESWwa +zcfdu26BkptiTvjjfJrqEd4wSewxWGPKqJqMXE8xX99A2KTZClZuKuH1mmnecQQY +iRXGrK9ewFMuHYGeKEiLlPlqR8ohXhyGLVm+t0JDwaXMp5t9G0i73O5iLTm5fNGd +FGxa9YnVW20Q8MqNczbVGH1D1zInhxzzOyFzBd4bBBJ8PdrUdyLpd7+RxY2ghnbT +p9ZANR2vk5zmDLJgZx72n/u+miJWuhY6p0v3Vq4z/HHgdhf+K6vpDdzTcYlA0rO4 +c/c+RKED3ZadGUD5QoLsmEN0e3FVSMPN1kt4ZRTqWfH8f2X4mLz33aBryTjktP6+ +1rX6ThECgYEA74wc1Tq23B5R0/GaMm1AK3Ko2zzTD8wK7NSCElh2dls02B+GzrEB +aE3A2GMQSuzb+EA0zkipwANBaqs3ZemH5G1pu4hstQsXCMd4jAJn0TmTXlplXBCf +PSc8ZUU6XcJENRr9Q7O9/TGlgahX+z0ndxYx/CMCsSu7XsMg4IZsbAcCgYEA12Vb +wKOVG15GGp7pMshr+2rQfVimARUP4gf3JnQmenktI4PfdnMW3a4L3DEHfLhIerwT +6lRp/NpxSADmuT4h1UO1l2lc+gmTVPw0Vbl6VwHpgS5Kfu4ZyM6n3S66f/dE4nu7 +hQF9yZz7vn5Agghak4p6a1wC1gdMzR1tvxFzk4kCgYByBMTskWfcWeok8Yitm+bB +R3Ar+kWT7VD97SCETusD5uG+RTNLSmEbHnc+B9kHcLo67YS0800pAeOvPBPARGnU +RmffRU5I1iB+o0MzkSmNItSMQoagTaEd4IEUyuC/I+qHRHNsOC+kRm86ycAm67LP +MhdUpe1wGxqyPjp15EXTHQKBgDKzFu+3EWfJvvKRKQ7dAh3BvKVkcl6a2Iw5l8Ej +YdM+JpPPfI/i8yTmzL/dgoem0Nii4IUtrWzo9fUe0TAVId2S/HFRSaNJEbbVTnRH +HjbQqmfPv5U08jjD+9siHp/0UfCFc1QRT8xe+RqTmReCY9+KntoaZEiAm2FEZgqt +TukRAoGAf7QqbTP5/UH1KSkX89F5qy/6GS3pw6TLj9Ufm/l/NO8Um8gag6YhEKWR +7HpkpCqjfWj8Av8ESR9cqddPGrbdqXFm9z7dCjlAd5T3Q3h/h+v+JzLQWbsI6WOb +SsOSWNyE006ZZdIiFwO6GfxpLI24sVtYKgyob6Q71oxSqfnrnT0= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt new file mode 100644 index 0000000..f434b45 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAKHt/jxiWqMtMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqfbJk2Dk +C9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJzetsclsV/95nBhinIGcSmPQA +l318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCeS86SOyLNTpMD9gsF0S8nR1RN +h0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5PhyrMZgNip4IrG46umCkFlrw +zMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKorIJQbPtHVYdr4UxYnNmk6fbU +biEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj9fZ7Viw0t5IKXZPsxMhwknUT +9vmPzIJO6NiniwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUdTXRP1EzxQ+UDZSoheVo +Mobud1cwDQYJKoZIhvcNAQELBQADggEBADV9asTWWdbmpkeRuKyi0xGho39ONK88 +xxkFlco766BVgemo/rGQj3oPuw6M6SzHFoJ6JUPjmLiAQDIGEU/2/b6LcOuLjP+4 +YejCcDTY3lSW/HMNoAmzr2foo/LngNGfe/qhVFUqV7GjFT9+XzFFBfIZ1cQiL2ed +kc8rgQxFPwWXFCSwaENWeFnMDugkd+7xanoAHq8GsJpg5fTruDTmJkUqC2RNiMLn +WM7QaqW7+lmUnMnc1IBoz0hFhgoiadWM/1RQxx51zTVw6Au1koIm4ZXu5a+/WyC8 +K1+HyUbc0AVaDaRBpRSOR9aHRwLGh6WQ4aUZQNyJroc999qfYrDEEV8= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key new file mode 100644 index 0000000..a61d18c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAqfbJk2DkC9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJ +zetsclsV/95nBhinIGcSmPQAl318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCe +S86SOyLNTpMD9gsF0S8nR1RNh0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5 +PhyrMZgNip4IrG46umCkFlrwzMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKo +rIJQbPtHVYdr4UxYnNmk6fbUbiEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj +9fZ7Viw0t5IKXZPsxMhwknUT9vmPzIJO6NiniwIDAQABAoIBAQCAr/ed3A2umO7T +FDYZik3nXBiiiW4t7r+nGGgZ3/kNgY1lnuHlROxehXLZwbX1mrLnyML/BjhwezV9 +7ZNVPd6laVPpNj6DyxtWHRZ5yARlm1Al39E7CpQTrF0QsiWcpGnqIa62xjDRTpnq +askV/Q5qggyvqmE9FnFCQpEiAjlhvp7F0kVHVJm9s3MK3zSyR0UTZ3cpYus2Jr2z +OotHgAMHq5Hgb3dvxOeE2xRMeYAVDujbkNzXm2SddAtiRdLhWDh7JIr3zXhp0HyN +4rLOyhlgz00oIGeDt/C0q3fRmghr3iZOG+7m2sUx0FD1Ru1dI9v2A+jYmIVNW6+x +YJk5PzxJAoGBANDj7AGdcHSci/LDBPoTTUiz3uucAd27/IJma/iy8mdbVfOAb0Fy +PRSPvoozlpZyOxg2J4eH/o4QxQR4lVKtnLKZLNHK2tg3LarwyBX1LiI3vVlB+DT1 +AmV8i5bJAckDhqFeEH5qdWZFi03oZsSXWEqX5iMYCrdK5lTZggcrFZeHAoGBANBL +fkk3knAdcVfTYpmHx18GBi2AsCWTd20KD49YBdbVy0Y2Jaa1EJAmGWpTUKdYx40R +H5CuGgcAviXQz3bugdTU1I3tAclBtpJNU7JkhuE+Epz0CM/6WERJrE0YxcGQA5ui +6fOguFyiXD1/85jrDBOKy74aoS7lYz9r/a6eqmjdAoGBAJpm/nmrIAZx+Ff2ouUe +A1Ar9Ch/Zjm5zEmu3zwzOU4AiyWz14iuoktifNq2iyalRNz+mnVpplToPFizsNwu +C9dPtXtU0DJlhtIFrD/evLz6KnGhe4/ZUm4lgyBvb2xfuNHqL5Lhqelwmil6EQxb +Oh3Y7XkfOjyFln89TwlxZUJdAoGAJRMa4kta7EvBTeGZLjyltvsqhFTghX+vBSCC +ToBbYbbiHJgssXSPAylU4sD7nR3HPwuqM6VZip+OOMrm8oNXZpuPTce+xqTEq1vK +JvmPrG3RAFDLdMFZjqYSXhKnuGE60yv3Ol8EEbDwfB3XLQPBPYU56Jdy0xcPSE2f +dMJXEJ0CgYEAisZw0nXw6lFeYecu642EGuU0wv1O9i21p7eho9QwOcsoTl4Q9l+M +M8iBv+qTHO+D19l4JbkGvy2H2diKoYduUFACcuiFYs8fjrT+4Z6DyOQAQGAf6Ylw +BFbU15k6KbA9v4mZDfd1tY9x62L/XO55ZxYG+J+q0e26tEThgD8cEog= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt new file mode 100644 index 0000000..c8cbe46 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJANae++ZkUEWMMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqULAjgba +Y2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4ltkQj1iO4zBTs0Ft9EzXFc5ZBh +pTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3HZpVOlEMI3npRfBGNIBllUaRN +PWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ImhSo3aipJUHHcp9Z9NgvpNC +3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw+YTrWZq3qVnnqUouHO//c9PG +Ry3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih58i/OBKe81eD9NuZDP2KrjTxI +5xkXKhj6DV2NnQIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUDt95hiqbQvi0KcvZGAUu +VisnztQwDQYJKoZIhvcNAQELBQADggEBAGi7qHai7MWbfeu6SlXhzIP3AIMa8TMi +lp/+mvPUFPswIVqYJ71MAN8uA7CTH3z50a2vYupGeOEtZqVJeRf+xgOEpwycncxp +Qz6wc6TWPVIoT5q1Hqxw1RD2MyKL+Y+QBDYwFxFkthpDMlX48I9frcqoJUWFxBF2 +lnRr/cE7BbPE3sMbXV3wGPlH7+eUf+CgzXJo2HB6THzagyEgNrDiz/0rCQa1ipFd +mNU3D/U6BFGmJNxhvSOtXX9escg8yjr05YwwzokHS2K4jE0ZuJPBd50C/Rvo3Mf4 +0h7/2Q95e7d42zPe9WYPu2F8KTWsf4r+6ddhKrKhYzXIcTAfHIOiO+U= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key new file mode 100644 index 0000000..f473cc4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqULAjgbaY2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4lt +kQj1iO4zBTs0Ft9EzXFc5ZBhpTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3H +ZpVOlEMI3npRfBGNIBllUaRNPWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ +ImhSo3aipJUHHcp9Z9NgvpNC3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw ++YTrWZq3qVnnqUouHO//c9PGRy3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih5 +8i/OBKe81eD9NuZDP2KrjTxI5xkXKhj6DV2NnQIDAQABAoIBAGK0ZKnuYSiXux60 +5MvK4pOCsa/nY3mOcgVHhW4IzpRgJdIrcFOlz9ncXrBsSAIWjX7o3u2Ydvjs4DOW +t8d6frB3QiDInYcRVDjLCD6otWV97Bk9Ua0G4N4hAWkMF7ysV4oihS1JDSoAdo39 +qOdki6s9yeyHZGKwk2oHLlowU5TxQMBA8DHmxqBII1HTm+8xRz45bcEqRXydYSUn +P1JuSU9jFqdylxU+Nrq6ehslMQ3y7qNWQyiLGxu6EmR+vgrzSU0s3iAOqCHthaOS +VBBXPL3DNEYUS+0QGnGrACuJhanOMBfdiO6Orelx6ZzWZm38PNGv0yBt0WCM+8/A +TtQNGkECgYEA1LqR6AH9XikUQ0+rM4526BgVuYqtjw21h4Lj9alaA+YTQntBBJOv +iAcUpnJiV4T8jzAMLeqpK8R/rbxRnK5S9jOV2gr+puk4L6tH46cgahBUESDigDp8 +6vK8ur6ubBcXNPh3AT6rsPj+Ph2EU3raqiYdouvCdga/OCYZb+jr6UkCgYEAy7Cr +l8WssI/8/ORcQ4MFJFNyfz/Y2beNXyLd1PX0H+wRSiGcKzeUuTHNtzFFpMbrK/nx +ZOPCT2ROdHsBHzp1L+WquCb0fyMVSiYiXBU+VCFDbUU5tBr3ycTc7VwuFPENOiha +IdlWgew/aW110FQHIaqe9g+htRe+mXe++faZtbUCgYB/MSJmNzJX53XvHSZ/CBJ+ +iVAMBSfq3caJRLCqRNzGcf1YBbwFUYxlZ95n+wJj0+byckcF+UW3HqE8rtmZNf3y +qTtTCLnj8JQgpGeybU4LPMIXD7N9+fqQvBwuCC7gABpnGJyHCQK9KNNTLnDdPRqb +G3ki3ZYC3dvdZaJV8E2FyQKBgQCMa5Mf4kqWvezueo+QizZ0QILibqWUEhIH0AWV +1qkhiKCytlDvCjYhJdBnxjP40Jk3i+t6XfmKud/MNTAk0ywOhQoYQeKz8v+uSnPN +f2ekn/nXzq1lGGJSWsDjcXTjQvqXaVIZm7cjgjaE+80IfaUc9H75qvUT3vaq3f5u +XC7DMQKBgQDMAzCCpWlEPbZoFMl6F49+7jG0/TiqM/WRUSQnNtufPMbrR9Je4QM1 +L1UCANCPaHFOncKYer15NfIV1ctt5MZKImevDsUaQO8CUlO+dzd5H8KvHw9E29gA +B22v8k3jIjsYeRL+UJ/sBnWHgxdAe/NEM+TdlP2oP9D1gTifutPqAg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh new file mode 100755 index 0000000..8d6381c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh @@ -0,0 +1,18 @@ +for selfsigned in delgkey1 delgkey2 delgkey3 delgkey4; do + subj='/C=US/ST=CA/L=SanFrancisco/O=Docker/CN=delegation' + + openssl genrsa -out "${selfsigned}.key" 2048 + openssl req -new -key "${selfsigned}.key" -out "${selfsigned}.csr" -sha256 -subj "${subj}" + cat > "${selfsigned}.cnf" < 1 && buf[0] == 'Y' + }, + "Test requires apparmor is enabled.", + } + RegistryHosting = testRequirement{ + func() bool { + // for now registry binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // registry binary is in PATH. + _, err := exec.LookPath(v2binary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), + } + NotaryHosting = testRequirement{ + func() bool { + // for now notary binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), + } + NotaryServerHosting = testRequirement{ + func() bool { + // for now notary-server binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), + } + NotOverlay = testRequirement{ + func() bool { + return !strings.HasPrefix(daemonStorageDriver, "overlay") + }, + "Test requires underlying root filesystem not be backed by overlay.", + } + + Devicemapper = testRequirement{ + func() bool { + return strings.HasPrefix(daemonStorageDriver, "devicemapper") + }, + "Test requires underlying root filesystem to be backed by devicemapper.", + } + + IPv6 = testRequirement{ + func() bool { + cmd := exec.Command("test", "-f", "/proc/net/if_inet6") + + if err := cmd.Run(); err != nil { + return true + } + return false + }, + "Test requires support for IPv6", + } + UserNamespaceROMount = testRequirement{ + func() bool { + // quick case--userns not enabled in this test run + if os.Getenv("DOCKER_REMAP_ROOT") == "" { + return true + } + if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { + return false + } + return true + }, + "Test cannot be run if user namespaces enabled but readonly mounts fail on this kernel.", + } + UserNamespaceInKernel = testRequirement{ + func() bool { + if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { + /* + * This kernel-provided file only exists if user namespaces are + * supported + */ + return false + } + + // We need extra check on redhat based distributions + if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { + defer f.Close() + b := make([]byte, 1) + _, _ = f.Read(b) + if string(b) == "N" { + return false + } + return true + } + + return true + }, + "Kernel must have user namespaces configured and enabled.", + } + NotUserNamespace = testRequirement{ + func() bool { + root := os.Getenv("DOCKER_REMAP_ROOT") + if root != "" { + return false + } + return true + }, + "Test cannot be run when remapping root", + } + IsPausable = testRequirement{ + func() bool { + if daemonPlatform == "windows" { + return isolation == "hyperv" + } + return true + }, + "Test requires containers are pausable.", + } + NotPausable = testRequirement{ + func() bool { + if daemonPlatform == "windows" { + return isolation == "process" + } + return false + }, + "Test requires containers are not pausable.", + } + IsolationIsHyperv = testRequirement{ + func() bool { + return daemonPlatform == "windows" && isolation == "hyperv" + }, + "Test requires a Windows daemon running default isolation mode of hyperv.", + } + IsolationIsProcess = testRequirement{ + func() bool { + return daemonPlatform == "windows" && isolation == "process" + }, + "Test requires a Windows daemon running default isolation mode of process.", + } +) + +// testRequires checks if the environment satisfies the requirements +// for the test to run or skips the tests. +func testRequires(c *check.C, requirements ...testRequirement) { + for _, r := range requirements { + if !r.Condition() { + c.Skip(r.SkipMessage) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/requirements_unix.go b/vendor/github.com/docker/docker/integration-cli/requirements_unix.go new file mode 100644 index 0000000..ef017d8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/requirements_unix.go @@ -0,0 +1,159 @@ +// +build !windows + +package main + +import ( + "bytes" + "io/ioutil" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" +) + +var ( + // SysInfo stores information about which features a kernel supports. + SysInfo *sysinfo.SysInfo + cpuCfsPeriod = testRequirement{ + func() bool { + return SysInfo.CPUCfsPeriod + }, + "Test requires an environment that supports cgroup cfs period.", + } + cpuCfsQuota = testRequirement{ + func() bool { + return SysInfo.CPUCfsQuota + }, + "Test requires an environment that supports cgroup cfs quota.", + } + cpuShare = testRequirement{ + func() bool { + return SysInfo.CPUShares + }, + "Test requires an environment that supports cgroup cpu shares.", + } + oomControl = testRequirement{ + func() bool { + return SysInfo.OomKillDisable + }, + "Test requires Oom control enabled.", + } + pidsLimit = testRequirement{ + func() bool { + return SysInfo.PidsLimit + }, + "Test requires pids limit enabled.", + } + kernelMemorySupport = testRequirement{ + func() bool { + return SysInfo.KernelMemory + }, + "Test requires an environment that supports cgroup kernel memory.", + } + memoryLimitSupport = testRequirement{ + func() bool { + return SysInfo.MemoryLimit + }, + "Test requires an environment that supports cgroup memory limit.", + } + memoryReservationSupport = testRequirement{ + func() bool { + return SysInfo.MemoryReservation + }, + "Test requires an environment that supports cgroup memory reservation.", + } + swapMemorySupport = testRequirement{ + func() bool { + return SysInfo.SwapLimit + }, + "Test requires an environment that supports cgroup swap memory limit.", + } + memorySwappinessSupport = testRequirement{ + func() bool { + return SysInfo.MemorySwappiness + }, + "Test requires an environment that supports cgroup memory swappiness.", + } + blkioWeight = testRequirement{ + func() bool { + return SysInfo.BlkioWeight + }, + "Test requires an environment that supports blkio weight.", + } + cgroupCpuset = testRequirement{ + func() bool { + return SysInfo.Cpuset + }, + "Test requires an environment that supports cgroup cpuset.", + } + seccompEnabled = testRequirement{ + func() bool { + return supportsSeccomp && SysInfo.Seccomp + }, + "Test requires that seccomp support be enabled in the daemon.", + } + bridgeNfIptables = testRequirement{ + func() bool { + return !SysInfo.BridgeNFCallIPTablesDisabled + }, + "Test requires that bridge-nf-call-iptables support be enabled in the daemon.", + } + bridgeNfIP6tables = testRequirement{ + func() bool { + return !SysInfo.BridgeNFCallIP6TablesDisabled + }, + "Test requires that bridge-nf-call-ip6tables support be enabled in the daemon.", + } + unprivilegedUsernsClone = testRequirement{ + func() bool { + content, err := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") + if err == nil && strings.Contains(string(content), "0") { + return false + } + return true + }, + "Test cannot be run with 'sysctl kernel.unprivileged_userns_clone' = 0", + } + ambientCapabilities = testRequirement{ + func() bool { + content, err := ioutil.ReadFile("/proc/self/status") + if err == nil && strings.Contains(string(content), "CapAmb:") { + return true + } + return false + }, + "Test cannot be run without a kernel (4.3+) supporting ambient capabilities", + } + overlayFSSupported = testRequirement{ + func() bool { + cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems") + out, err := cmd.CombinedOutput() + if err != nil { + return false + } + return bytes.Contains(out, []byte("overlay\n")) + }, + "Test cannot be run without suppport for overlayfs", + } + overlay2Supported = testRequirement{ + func() bool { + if !overlayFSSupported.Condition() { + return false + } + + daemonV, err := kernel.ParseRelease(daemonKernelVersion) + if err != nil { + return false + } + requiredV := kernel.VersionInfo{Kernel: 4} + return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 + + }, + "Test cannot be run without overlay2 support (kernel 4.0+)", + } +) + +func init() { + SysInfo = sysinfo.New(true) +} diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars.go b/vendor/github.com/docker/docker/integration-cli/test_vars.go new file mode 100644 index 0000000..97bcddd --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars.go @@ -0,0 +1,11 @@ +package main + +// sleepCommandForDaemonPlatform is a helper function that determines what +// the command is for a sleeping container based on the daemon platform. +// The Windows busybox image does not have a `top` command. +func sleepCommandForDaemonPlatform() []string { + if daemonPlatform == "windows" { + return []string{"sleep", "240"} + } + return []string{"top"} +} diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_exec.go b/vendor/github.com/docker/docker/integration-cli/test_vars_exec.go new file mode 100644 index 0000000..7633b34 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_exec.go @@ -0,0 +1,8 @@ +// +build !test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = true +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go b/vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go new file mode 100644 index 0000000..0845090 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go @@ -0,0 +1,8 @@ +// +build test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = false +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go b/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go new file mode 100644 index 0000000..2f47ab0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go @@ -0,0 +1,8 @@ +// +build !seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = false +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go b/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go new file mode 100644 index 0000000..00cf697 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go @@ -0,0 +1,8 @@ +// +build seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = true +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_unix.go b/vendor/github.com/docker/docker/integration-cli/test_vars_unix.go new file mode 100644 index 0000000..f9ecc01 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = true + + expectedFileChmod = "-rw-r--r--" + + // On Unix variants, the busybox image comes with the `top` command which + // runs indefinitely while still being interruptible by a signal. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_windows.go b/vendor/github.com/docker/docker/integration-cli/test_vars_windows.go new file mode 100644 index 0000000..bfc9a5a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_windows.go @@ -0,0 +1,15 @@ +// +build windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = false + + // this is the expected file permission set on windows: gh#11395 + expectedFileChmod = "-rwxr-xr-x" + + // On Windows, the busybox image doesn't have the `top` command, so we rely + // on `sleep` with a high duration. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/docker/docker/integration-cli/trust_server.go b/vendor/github.com/docker/docker/integration-cli/trust_server.go new file mode 100644 index 0000000..1887631 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/trust_server.go @@ -0,0 +1,344 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var notaryBinary = "notary" +var notaryServerBinary = "notary-server" + +type keyPair struct { + Public string + Private string +} + +type testNotary struct { + cmd *exec.Cmd + dir string + keys []keyPair +} + +const notaryHost = "localhost:4443" +const notaryURL = "https://" + notaryHost + +func newTestNotary(c *check.C) (*testNotary, error) { + // generate server config + template := `{ + "server": { + "http_addr": "%s", + "tls_key_file": "%s", + "tls_cert_file": "%s" + }, + "trust_service": { + "type": "local", + "hostname": "", + "port": "", + "key_algorithm": "ed25519" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "memory" + } +}` + tmp, err := ioutil.TempDir("", "notary-test-") + if err != nil { + return nil, err + } + confPath := filepath.Join(tmp, "config.json") + config, err := os.Create(confPath) + if err != nil { + return nil, err + } + defer config.Close() + + workingDir, err := os.Getwd() + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // generate client config + clientConfPath := filepath.Join(tmp, "client-config.json") + clientConfig, err := os.Create(clientConfPath) + if err != nil { + return nil, err + } + defer clientConfig.Close() + + template = `{ + "trust_dir" : "%s", + "remote_server": { + "url": "%s", + "skipTLSVerify": true + } +}` + if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.ConfigDir(), "trust"), notaryURL); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // load key fixture filenames + var keys []keyPair + for i := 1; i < 5; i++ { + keys = append(keys, keyPair{ + Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), + Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), + }) + } + + // run notary-server + cmd := exec.Command(notaryServerBinary, "-config", confPath) + if err := cmd.Start(); err != nil { + os.RemoveAll(tmp) + if os.IsNotExist(err) { + c.Skip(err.Error()) + } + return nil, err + } + + testNotary := &testNotary{ + cmd: cmd, + dir: tmp, + keys: keys, + } + + // Wait for notary to be ready to serve requests. + for i := 1; i <= 20; i++ { + if err = testNotary.Ping(); err == nil { + break + } + time.Sleep(10 * time.Millisecond * time.Duration(i*i)) + } + + if err != nil { + c.Fatalf("Timeout waiting for test notary to become available: %s", err) + } + + return testNotary, nil +} + +func (t *testNotary) Ping() error { + tlsConfig := tlsconfig.ClientDefault() + tlsConfig.InsecureSkipVerify = true + client := http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (t *testNotary) Close() { + t.cmd.Process.Kill() + os.RemoveAll(t.dir) +} + +func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) { + pwd := "12345678" + trustCmdEnv(cmd, notaryURL, pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) { + pwd := "12345678" + trustCmdEnv(cmd, server, pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, rootPwd, repositoryPwd string) { + trustCmdEnv(cmd, notaryURL, rootPwd, repositoryPwd) +} + +func trustCmdEnv(cmd *exec.Cmd, server, rootPwd, repositoryPwd string) { + env := []string{ + "DOCKER_CONTENT_TRUST=1", + fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), + fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), + fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), + } + cmd.Env = append(os.Environ(), env...) +} + +func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "rmi", repoName); status != 0 { + c.Fatalf("Error removing image %q\n%s", repoName, out) + } + + return repoName +} + +func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) + + pushCmd := exec.Command(dockerBinary, "plugin", "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + if err != nil { + c.Fatalf("Error running trusted plugin push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "plugin", "rm", "-f", repoName); status != 0 { + c.Fatalf("Error removing plugin %q\n%s", repoName, out) + } + + return repoName +} + +func notaryClientEnv(cmd *exec.Cmd) { + pwd := "12345678" + env := []string{ + fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), + } + cmd.Env = append(os.Environ(), env...) +} + +func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { + initCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "init", repoName) + notaryClientEnv(initCmd) + out, _, err := runCommandWithOutput(initCmd) + if err != nil { + c.Fatalf("Error initializing notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { + pathsArg := "--all-paths" + if len(paths) > 0 { + pathsArg = "--paths=" + strings.Join(paths, ",") + } + + delgCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), + "delegation", "add", repoName, role, pubKey, pathsArg) + notaryClientEnv(delgCmd) + out, _, err := runCommandWithOutput(delgCmd) + if err != nil { + c.Fatalf("Error adding %s role to notary repository: %s\n", role, out) + } +} + +func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { + pubCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "publish", repoName) + notaryClientEnv(pubCmd) + out, _, err := runCommandWithOutput(pubCmd) + if err != nil { + c.Fatalf("Error publishing notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { + impCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "key", + "import", privKey, "-g", repoName, "-r", role) + notaryClientEnv(impCmd) + out, _, err := runCommandWithOutput(impCmd) + if err != nil { + c.Fatalf("Error importing key to notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { + listCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "list", + repoName, "-r", role) + notaryClientEnv(listCmd) + out, _, err := runCommandWithOutput(listCmd) + if err != nil { + c.Fatalf("Error listing targets in notary repository: %s\n", out) + } + + // should look something like: + // NAME DIGEST SIZE (BYTES) ROLE + // ------------------------------------------------------------------------------------------------------ + // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets + + targets := make(map[string]string) + + // no target + lines := strings.Split(strings.TrimSpace(out), "\n") + if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { + return targets + } + + // otherwise, there is at least one target + c.Assert(len(lines), checker.GreaterOrEqualThan, 3) + + for _, line := range lines[2:] { + tokens := strings.Fields(line) + c.Assert(tokens, checker.HasLen, 4) + targets[tokens[0]] = tokens[3] + } + + return targets +} + +func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { + // check all the roles + for _, role := range roles { + targets := s.notaryListTargetsInRole(c, repoName, role) + roleName, ok := targets[target] + c.Assert(ok, checker.True) + c.Assert(roleName, checker.Equals, role) + } +} + +func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { + targets := s.notaryListTargetsInRole(c, repoName, "targets") + + roleName, ok := targets[target] + if ok { + for _, role := range roles { + c.Assert(roleName, checker.Not(checker.Equals), role) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/utils.go b/vendor/github.com/docker/docker/integration-cli/utils.go new file mode 100644 index 0000000..87d48e4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/utils.go @@ -0,0 +1,79 @@ +package main + +import ( + "io" + "os" + "os/exec" + "time" + + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/cmd" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if daemonPlatform == "windows" { + return "c:", `\` + } + return "", "/" +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommandWithOutput(execCmd *exec.Cmd) (string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Combined(), result.ExitCode, result.Error +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommandWithStdoutStderr(execCmd *exec.Cmd) (string, string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Stdout(), result.Stderr(), result.ExitCode, result.Error +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommand(execCmd *exec.Cmd) (exitCode int, err error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.ExitCode, result.Error +} + +// Temporary shim for migrating commands to the new function +func transformCmd(execCmd *exec.Cmd) cmd.Cmd { + return cmd.Cmd{ + Command: execCmd.Args, + Env: execCmd.Env, + Dir: execCmd.Dir, + Stdin: execCmd.Stdin, + Stdout: execCmd.Stdout, + } +} + +func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + return integration.RunCommandPipelineWithOutput(cmds...) +} + +func convertSliceOfStringsToMap(input []string) map[string]struct{} { + return integration.ConvertSliceOfStringsToMap(input) +} + +func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + return integration.CompareDirectoryEntries(e1, e2) +} + +func listTar(f io.Reader) ([]string, error) { + return integration.ListTar(f) +} + +func randomTmpDirPath(s string, platform string) string { + return integration.RandomTmpDirPath(s, platform) +} + +func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + return integration.ConsumeWithSpeed(reader, chunkSize, interval, stop) +} + +func parseCgroupPaths(procCgroupData string) map[string]string { + return integration.ParseCgroupPaths(procCgroupData) +} + +func runAtDifferentDate(date time.Time, block func()) { + integration.RunAtDifferentDate(date, block) +} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go new file mode 100644 index 0000000..3b6ffc8 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/empty.go @@ -0,0 +1,56 @@ +package layer + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/vendor/github.com/docker/docker/layer/empty_test.go b/vendor/github.com/docker/docker/layer/empty_test.go new file mode 100644 index 0000000..c22da76 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/empty_test.go @@ -0,0 +1,46 @@ +package layer + +import ( + "io" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestEmptyLayer(t *testing.T) { + if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { + t.Fatal("wrong ID for empty layer") + } + + if EmptyLayer.DiffID() != DigestSHA256EmptyTar { + t.Fatal("wrong DiffID for empty layer") + } + + if EmptyLayer.Parent() != nil { + t.Fatal("expected no parent for empty layer") + } + + if size, err := EmptyLayer.Size(); err != nil || size != 0 { + t.Fatal("expected zero size for empty layer") + } + + if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { + t.Fatal("expected zero diffsize for empty layer") + } + + tarStream, err := EmptyLayer.TarStream() + if err != nil { + t.Fatalf("error streaming tar for empty layer: %v", err) + } + + digester := digest.Canonical.New() + _, err = io.Copy(digester.Hash(), tarStream) + + if err != nil { + t.Fatalf("error hashing empty tar layer: %v", err) + } + + if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { + t.Fatal("empty layer tar stream hashes to wrong value") + } +} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go new file mode 100644 index 0000000..42b4555 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,354 @@ +package layer + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/vendor/github.com/docker/docker/layer/filestore_test.go b/vendor/github.com/docker/docker/layer/filestore_test.go new file mode 100644 index 0000000..55e3b28 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore_test.go @@ -0,0 +1,104 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/docker/distribution/digest" +) + +func randomLayerID(seed int64) ChainID { + r := rand.New(rand.NewSource(seed)) + + return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) +} + +func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { + td, err := ioutil.TempDir("", "layers-") + if err != nil { + t.Fatal(err) + } + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + + return fms.(*fileMetadataStore), td, func() { + if err := os.RemoveAll(td); err != nil { + t.Logf("Failed to cleanup %q: %s", td, err) + } + } +} + +func assertNotDirectoryError(t *testing.T, err error) { + perr, ok := err.(*os.PathError) + if !ok { + t.Fatalf("Unexpected error %#v, expected path error", err) + } + + if perr.Err != syscall.ENOTDIR { + t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) + } +} + +func TestCommitFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if err := tx.SetSize(0); err != nil { + t.Fatal(err) + } + + err = tx.Commit(randomLayerID(5)) + if err == nil { + t.Fatalf("Expected error committing with invalid layer parent directory") + } + assertNotDirectoryError(t, err) +} + +func TestStartTransactionFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + _, err := fms.StartTransaction() + if err == nil { + t.Fatalf("Expected error starting transaction with invalid layer parent directory") + } + assertNotDirectoryError(t, err) + + if err := os.Remove(filepath.Join(td, "tmp")); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { + t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) + } + + if err := tx.Cancel(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go new file mode 100644 index 0000000..ec1d434 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -0,0 +1,275 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/archive" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current platform + ErrNotSupported = errors.New("not support on this platform") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + SetDescriptor(distribution.Descriptor) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + GetDescriptor(ChainID) (distribution.Descriptor, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 0000000..1a1ff9f --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,684 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.StorePath, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.New() + tr := io.TeeReader(ts, digester.Hash()) + + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil +} diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go new file mode 100644 index 0000000..1276a91 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, descriptor) +} diff --git a/vendor/github.com/docker/docker/layer/layer_test.go b/vendor/github.com/docker/docker/layer/layer_test.go new file mode 100644 index 0000000..10712df --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_test.go @@ -0,0 +1,771 @@ +package layer + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" +) + +func init() { + graphdriver.ApplyUncompressedLayer = archive.UnpackLayer + vfs.CopyWithTar = archive.CopyWithTar +} + +func newVFSGraphDriver(td string) (graphdriver.Driver, error) { + uidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }, + } + gidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }, + } + + options := graphdriver.Options{Root: td, UIDMaps: uidMap, GIDMaps: gidMap} + return graphdriver.GetDriver("vfs", nil, options) +} + +func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { + td, err := ioutil.TempDir("", "graph-") + if err != nil { + t.Fatal(err) + } + + driver, err := newVFSGraphDriver(td) + if err != nil { + t.Fatal(err) + } + + return driver, func() { + os.RemoveAll(td) + } +} + +func newTestStore(t *testing.T) (Store, string, func()) { + td, err := ioutil.TempDir("", "layerstore-") + if err != nil { + t.Fatal(err) + } + + graph, graphcleanup := newTestGraphDriver(t) + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + return ls, td, func() { + graphcleanup() + os.RemoveAll(td) + } +} + +type layerInit func(root string) error + +func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { + containerID := stringid.GenerateRandomID() + mount, err := ls.CreateRWLayer(containerID, parent, "", nil, nil) + if err != nil { + return nil, err + } + + path, err := mount.Mount("") + if err != nil { + return nil, err + } + + if err := layerFunc(path); err != nil { + return nil, err + } + + ts, err := mount.TarStream() + if err != nil { + return nil, err + } + defer ts.Close() + + layer, err := ls.Register(ts, parent) + if err != nil { + return nil, err + } + + if err := mount.Unmount(); err != nil { + return nil, err + } + + if _, err := ls.ReleaseRWLayer(mount); err != nil { + return nil, err + } + + return layer, nil +} + +type FileApplier interface { + ApplyFile(root string) error +} + +type testFile struct { + name string + content []byte + permission os.FileMode +} + +func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { + return &testFile{ + name: name, + content: content, + permission: perm, + } +} + +func (tf *testFile) ApplyFile(root string) error { + fullPath := filepath.Join(root, tf.name) + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return err + } + // Check if already exists + if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := os.Chmod(fullPath, tf.permission); err != nil { + return err + } + } + if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { + return err + } + return nil +} + +func initWithFiles(files ...FileApplier) layerInit { + return func(root string) error { + for _, f := range files { + if err := f.ApplyFile(root); err != nil { + return err + } + } + return nil + } +} + +func getCachedLayer(l Layer) *roLayer { + if rl, ok := l.(*referencedCacheLayer); ok { + return rl.roLayer + } + return l.(*roLayer) +} + +func getMountLayer(l RWLayer) *mountedLayer { + return l.(*referencedRWLayer).mountedLayer +} + +func createMetadata(layers ...Layer) []Metadata { + metadata := make([]Metadata, len(layers)) + for i := range layers { + size, err := layers[i].Size() + if err != nil { + panic(err) + } + + metadata[i].ChainID = layers[i].ChainID() + metadata[i].DiffID = layers[i].DiffID() + metadata[i].Size = size + metadata[i].DiffSize = getCachedLayer(layers[i]).size + } + + return metadata +} + +func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { + if len(metadata) != len(expectedMetadata) { + t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) + } + + for i := range metadata { + if metadata[i] != expectedMetadata[i] { + t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) + } + } + if t.Failed() { + t.FailNow() + } +} + +func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { + layerCount := len(ls.(*layerStore).layerMap) + expectedMetadata := createMetadata(removed...) + metadata, err := ls.Release(layer) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, expectedMetadata) + + if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } +} + +func cacheID(l Layer) string { + return getCachedLayer(l).cacheID +} + +func assertLayerEqual(t *testing.T, l1, l2 Layer) { + if l1.ChainID() != l2.ChainID() { + t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) + } + if l1.DiffID() != l2.DiffID() { + t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) + } + + size1, err := l1.Size() + if err != nil { + t.Fatal(err) + } + + size2, err := l2.Size() + if err != nil { + t.Fatal(err) + } + + if size1 != size2 { + t.Fatalf("Mismatched size: %d vs %d", size1, size2) + } + + if cacheID(l1) != cacheID(l2) { + t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) + } + + p1 := l1.Parent() + p2 := l2.Parent() + if p1 != nil && p2 != nil { + assertLayerEqual(t, p1, p2) + } else if p1 != nil || p2 != nil { + t.Fatalf("Mismatched parents: %v vs %v", p1, p2) + } +} + +func TestMountAndRegister(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + size, _ := layer.Size() + t.Logf("Layer size: %d", size) + + mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), "", nil, nil) + if err != nil { + t.Fatal(err) + } + + path2, err := mount2.Mount("") + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + + if expected := "some test data"; string(b) != expected { + t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) + } + + if err := mount2.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(mount2); err != nil { + t.Fatal(err) + } +} + +func TestLayerRelease(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + t.Logf("Layer1: %s", layer1.ChainID()) + t.Logf("Layer2: %s", layer2.ChainID()) + t.Logf("Layer3a: %s", layer3a.ChainID()) + t.Logf("Layer3b: %s", layer3b.ChainID()) + + if expected := 4; len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } + + releaseAndCheckDeleted(t, ls, layer3b, layer3b) + releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) +} + +func TestStoreRestore(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), "", nil, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + t.Fatal(err) + } + + if err := m.Unmount(); err != nil { + t.Fatal(err) + } + + ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) + if err != nil { + t.Fatal(err) + } + + layer3b, err := ls2.Get(layer3.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertLayerEqual(t, layer3b, layer3) + + // Create again with same name, should return error + if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), "", nil, nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + m2, err := ls2.GetRWLayer("some-mount_name") + if err != nil { + t.Fatal(err) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + if expected := "nothing here"; string(b) != expected { + t.Fatalf("Unexpected content %q, expected %q", string(b), expected) + } + + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) +} + +func TestTarStreamStability(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), + } + addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) + files2 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), + newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + // hack layer to add file + p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") + if err != nil { + t.Fatal(err) + } + + if err := addedFile.ApplyFile(p); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + id1 := layer1.ChainID() + t.Logf("Layer 1: %s", layer1.ChainID()) + t.Logf("Layer 2: %s", layer2.ChainID()) + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar2, layer2) + + layer1b, err := ls.Get(id1) + if err != nil { + t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar1, layer1b) + + if _, err := ls.Release(layer1b); err != nil { + t.Fatal(err) + } +} + +func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { + expectedDigest := digest.FromBytes(expected) + + if digest.Digest(layer.DiffID()) != expectedDigest { + t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) + } + + ts, err := layer.TarStream() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + actual, err := ioutil.ReadAll(ts) + if err != nil { + t.Fatal(err) + } + + if len(actual) != len(expected) { + logByteDiff(t, actual, expected) + t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) + } + + actualDigest := digest.FromBytes(actual) + + if actualDigest != expectedDigest { + logByteDiff(t, actual, expected) + t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) + } +} + +const maxByteLog = 4 * 1024 + +func logByteDiff(t *testing.T, actual, expected []byte) { + d1, d2 := byteDiff(actual, expected) + if len(d1) == 0 && len(d2) == 0 { + return + } + + prefix := len(actual) - len(d1) + if len(d1) > maxByteLog || len(d2) > maxByteLog { + t.Logf("Byte diff after %d matching bytes", prefix) + } else { + t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) + } +} + +// byteDiff returns the differing bytes after the matching prefix +func byteDiff(b1, b2 []byte) ([]byte, []byte) { + i := 0 + for i < len(b1) && i < len(b2) { + if b1[i] != b2[i] { + break + } + i++ + } + + return b1[i:], b2[i:] +} + +func tarFromFiles(files ...FileApplier) ([]byte, error) { + td, err := ioutil.TempDir("", "tar-") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + + for _, f := range files { + if err := f.ApplyFile(td); err != nil { + return nil, err + } + } + + r, err := archive.Tar(td, archive.Uncompressed) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, r); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// assertReferences asserts that all the references are to the same +// image and represent the full set of references to that image. +func assertReferences(t *testing.T, references ...Layer) { + if len(references) == 0 { + return + } + base := references[0].(*referencedCacheLayer).roLayer + seenReferences := map[Layer]struct{}{ + references[0]: {}, + } + for i := 1; i < len(references); i++ { + other := references[i].(*referencedCacheLayer).roLayer + if base != other { + t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) + } + if _, ok := base.references[references[i]]; !ok { + t.Fatalf("Reference not part of reference list: %v", references[i]) + } + if _, ok := seenReferences[references[i]]; ok { + t.Fatalf("Duplicated reference %v", references[i]) + } + } + if rc := len(base.references); rc != len(references) { + t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) + } +} + +func TestRegisterExistingLayer(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layerFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), + } + + li := initWithFiles(baseFiles...) + layer1, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + tar1, err := tarFromFiles(layerFiles...) + if err != nil { + t.Fatal(err) + } + + layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) +} + +func TestTarStreamVerification(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, tmpdir, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0644), + } + files2 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0600), // different perm + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), "") + if err != nil { + t.Fatal(err) + } + id1 := digest.Digest(layer1.ChainID()) + id2 := digest.Digest(layer2.ChainID()) + + // Replace tar data files + src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer src.Close() + + dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + t.Fatal(err) + } + + src.Sync() + dst.Sync() + + ts, err := layer2.TarStream() + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ts) + if err == nil { + t.Fatal("expected data verification to fail") + } + if !strings.Contains(err.Error(), "could not verify layer data") { + t.Fatalf("wrong error returned from tarstream: %q", err) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 0000000..776b78a --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd solaris + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/vendor/github.com/docker/docker/layer/layer_unix_test.go b/vendor/github.com/docker/docker/layer/layer_unix_test.go new file mode 100644 index 0000000..9aa1afd --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_unix_test.go @@ -0,0 +1,71 @@ +// +build !windows + +package layer + +import "testing" + +func graphDiffSize(ls Store, l Layer) (int64, error) { + cl := getCachedLayer(l) + var parent string + if cl.parent != nil { + parent = cl.parent.cacheID + } + return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) +} + +// Unix as Windows graph driver does not support Changes which is indirectly +// invoked by calling DiffSize on the driver +func TestLayerSize(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Added contents") + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) + if err != nil { + t.Fatal(err) + } + + layer1DiffSize, err := graphDiffSize(ls, layer1) + if err != nil { + t.Fatal(err) + } + + if int(layer1DiffSize) != len(content1) { + t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) + } + + layer1Size, err := layer1.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1); int(layer1Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) + } + + layer2DiffSize, err := graphDiffSize(ls, layer2) + if err != nil { + t.Fatal(err) + } + + if int(layer2DiffSize) != len(content2) { + t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) + } + + layer2Size, err := layer2.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1) + len(content2); int(layer2Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) + } + +} diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go new file mode 100644 index 0000000..e20311a --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_windows.go @@ -0,0 +1,98 @@ +package layer + +import ( + "errors" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" +) + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { + var err error // this is used for cleanup in existingLayer case + diffID := digest.FromBytes([]byte(graphID)) + + // Create new roLayer + layer := &roLayer{ + cacheID: graphID, + diffID: DiffID(diffID), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + size: size, + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + layer.chainID = createChainIDFromParent("", layer.diffID) + + if !ls.driver.Exists(layer.cacheID) { + return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) + } + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} + +func (ls *layerStore) GraphDriver() graphdriver.Driver { + return ls.driver +} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go new file mode 100644 index 0000000..b45c310 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -0,0 +1,256 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.New() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/vendor/github.com/docker/docker/layer/migration_test.go b/vendor/github.com/docker/docker/layer/migration_test.go new file mode 100644 index 0000000..07b4b68 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/migration_test.go @@ -0,0 +1,435 @@ +package layer + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func writeTarSplitFile(name string, tarContent []byte) error { + f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + fz := gzip.NewWriter(f) + + metaPacker := storage.NewJSONPacker(fz) + defer fz.Close() + + rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) + if err != nil { + return err + } + + if _, err := io.Copy(ioutil.Discard, rdr); err != nil { + return err + } + + return nil +} + +func TestLayerMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + tar1, err := tarFromFiles(layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(layer2Files...) + if err != nil { + t.Fatal(err) + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + + graphID1 := stringid.GenerateRandomID() + if err := graph.Create(graphID1, "", nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID1, "", bytes.NewReader(tar1)); err != nil { + t.Fatal(err) + } + + tf1 := filepath.Join(td, "tar1.json.gz") + if err := writeTarSplitFile(tf1, tar1); err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + graphID2 := stringid.GenerateRandomID() + if err := graph.Create(graphID2, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID2, graphID1, bytes.NewReader(tar2)); err != nil { + t.Fatal(err) + } + + tf2 := filepath.Join(td, "tar2.json.gz") + if err := writeTarSplitFile(tf2, tar2); err != nil { + t.Fatal(err) + } + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { + t, err := tarFromFiles(files...) + if err != nil { + return nil, err + } + + if err := graph.Create(graphID, parentID, nil); err != nil { + return nil, err + } + if _, err := graph.ApplyDiff(graphID, parentID, bytes.NewReader(t)); err != nil { + return nil, err + } + + ar, err := graph.Diff(graphID, parentID) + if err != nil { + return nil, err + } + defer ar.Close() + + return ioutil.ReadAll(ar) +} + +func TestLayerMigrationNoTarsplit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + graphID1 := stringid.GenerateRandomID() + graphID2 := stringid.GenerateRandomID() + + tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) + if err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func TestMountMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing (obvious - paths... needs porting) + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + initFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte{}, 0644), + newTestFile("/etc/resolv.conf", []byte{}, 0644), + } + mountFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), + } + + initTar, err := tarFromFiles(initFiles...) + if err != nil { + t.Fatal(err) + } + + mountTar, err := tarFromFiles(mountFiles...) + if err != nil { + t.Fatal(err) + } + + graph := ls.(*layerStore).driver + + layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) + if err != nil { + t.Fatal(err) + } + + graphID1 := layer1.(*referencedCacheLayer).cacheID + + containerID := stringid.GenerateRandomID() + containerInit := fmt.Sprintf("%s-init", containerID) + + if err := graph.Create(containerInit, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil { + t.Fatal(err) + } + + if err := graph.Create(containerID, containerInit, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { + t.Fatal(err) + } + + rwLayer1, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if _, err := rwLayer1.Mount(""); err != nil { + t.Fatal(err) + } + + changes, err := rwLayer1.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 5; len(changes) != expected { + t.Logf("Changes %#v", changes) + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/etc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/etc/hosts", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/root", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/root/.bashrc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[4], archive.Change{ + Path: "/root/testfile1.txt", + Kind: archive.ChangeAdd, + }) + + if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + rwLayer2, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { + t.Fatal("Expected same layer from get with same name as from migrate") + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if metadata, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) + } + + if err := rwLayer1.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { + t.Fatal(err) + } + + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + metadata, err := ls.ReleaseRWLayer(rwLayer2) + if err != nil { + t.Fatal(err) + } + if len(metadata) == 0 { + t.Fatal("Expected base layer to be deleted when deleting mount") + } + + assertMetadata(t, metadata, createMetadata(layer1)) +} diff --git a/vendor/github.com/docker/docker/layer/mount_test.go b/vendor/github.com/docker/docker/layer/mount_test.go new file mode 100644 index 0000000..7a8637e --- /dev/null +++ b/vendor/github.com/docker/docker/layer/mount_test.go @@ -0,0 +1,230 @@ +package layer + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestMountInit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) + initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefile) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + if expected := "init data!"; string(b) != expected { + t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) + } + + if fi.Mode().Perm() != 0777 { + t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) + } +} + +func TestMountSize(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Mutable contents") + contentInit := []byte("why am I excluded from the size ☹") + + li := initWithFiles(newTestFile("file1", content1, 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return newTestFile("file-init", contentInit, 0777).ApplyFile(root) + } + + m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + t.Fatal(err) + } + + mountSize, err := m.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content2); int(mountSize) != expected { + t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) + } +} + +func TestMountChanges(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefiles := []FileApplier{ + newTestFile("testfile1.txt", []byte("base data!"), 0644), + newTestFile("testfile2.txt", []byte("base data!"), 0644), + newTestFile("testfile3.txt", []byte("base data!"), 0644), + } + initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefiles...) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + t.Fatal(err) + } + + if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + t.Fatal(err) + } + + changes, err := m.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 4; len(changes) != expected { + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/testfile1.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/testfile2.txt", + Kind: archive.ChangeDelete, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/testfile3.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/testfile4.txt", + Kind: archive.ChangeAdd, + }) +} + +func assertChange(t *testing.T, actual, expected archive.Change) { + if actual.Path != expected.Path { + t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) + } + if actual.Kind != expected.Kind { + t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) + } +} + +func sortChanges(changes []archive.Change) { + cs := &changeSorter{ + changes: changes, + } + sort.Sort(cs) +} + +type changeSorter struct { + changes []archive.Change +} + +func (cs *changeSorter) Len() int { + return len(cs.changes) +} + +func (cs *changeSorter) Swap(i, j int) { + cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] +} + +func (cs *changeSorter) Less(i, j int) bool { + return cs.changes[i].Path < cs.changes[j].Path +} diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 0000000..a5cfcfa --- /dev/null +++ b/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,99 @@ +package layer + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 0000000..7c8d233 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,192 @@ +package layer + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarentees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + r, err := rl.layerStore.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return rc, nil +} + +// TarStreamFrom does not make any guarentees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + + return nil +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: verifier, + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go new file mode 100644 index 0000000..32bd718 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -0,0 +1,9 @@ +package layer + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client.go b/vendor/github.com/docker/docker/libcontainerd/client.go new file mode 100644 index 0000000..c14c1c5 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write oprations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(containerID string) { + clnt.mapMutex.Lock() + delete(clnt.containers, containerID) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_linux.go new file mode 100644 index 0000000..190f981 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_linux.go @@ -0,0 +1,605 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (pid int, err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + + spec, err := container.spec() + if err != nil { + return -1, err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if len(specp.Env) > 0 { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(syscall.Stdin), + Stdout: p.fifo(syscall.Stdout), + Stderr: p.fifo(syscall.Stderr), + Capabilities: sp.Capabilities, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := p.openFifos(fifoCtx, sp.Terminal) + if err != nil { + return -1, err + } + + resp, err := clnt.remote.apiClient.AddProcess(ctx, r) + if err != nil { + p.closeFifos(iopipe) + return -1, err + } + + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + if err2 := p.sendCloseStdin(); err == nil { + err = err2 + } + }) + return err + }) + + container.processes[processFriendlyName] = p + + if err := attachStdio(*iopipe); err != nil { + p.closeFifos(iopipe) + return -1, err + } + + return int(resp.SystemPid), nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: pid, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happened without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string, exitCode uint32) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + err := clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: exitCode, + }}) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is already active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := container.openFifos(fifoCtx, terminal) + if err != nil { + return err + } + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + }) + return err + }) + + if err := attachStdio(*iopipe); err != nil { + container.closeFifos(iopipe) + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateRestore, + Pid: container.systemPid, + }}) + + if err != nil { + container.closeFifos(iopipe) + return err + } + + if lastEvent != nil { + // This should only be a pause or resume event + if lastEvent.Type == StatePause || lastEvent.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: lastEvent.Type, + Pid: container.systemPid, + }}) + } + + logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent) + } + + return nil +} + +func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) { + er := &containerd.EventsRequest{ + Timestamp: tsp, + StoredOnly: true, + Id: id, + } + events, err := clnt.remote.apiClient.Events(context.Background(), er) + if err != nil { + logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err) + return nil, err + } + + var ev *containerd.Event + for { + e, err := events.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err) + return nil, err + } + ev = e + logrus.Debugf("libcontainerd: received past event %#v", ev) + } + + return ev, nil +} + +func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) { + ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp) + if err == nil && ev == nil { + // If ev is nil and the container is running in containerd, + // we already consumed all the event of the + // container, included the "exit" one. + // Thus, we request all events containerd has in memory for + // this container in order to get the last one (which should + // be an exit event) + logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id) + // Request all events since beginning of time + t := time.Unix(0, 0) + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err) + return nil, err + } + + return clnt.getContainerLastEventSinceTime(id, tsp) + } + + return ev, err +} + +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + // Synchronize with live events + clnt.remote.Lock() + defer clnt.remote.Unlock() + // Check that containerd still knows this container. + // + // In the unlikely event that Restore for this container process + // the its past event before the main loop, the event will be + // processed twice. However, this is not an issue as all those + // events will do is change the state of the container to be + // exactly the same. + cont, err := clnt.getContainerdContainer(containerID) + // Get its last event + ev, eerr := clnt.getContainerLastEvent(containerID) + if err != nil || cont.Status == "Stopped" { + if err != nil { + logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err) + } + if ev != nil && (ev.Pid != InitFriendlyName || ev.Type != StateExit) { + // Wait a while for the exit event + timeout := time.NewTimer(10 * time.Second) + tick := time.NewTicker(100 * time.Millisecond) + stop: + for { + select { + case <-timeout.C: + break stop + case <-tick.C: + ev, eerr = clnt.getContainerLastEvent(containerID) + if eerr != nil { + break stop + } + if ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + break stop + } + } + } + timeout.Stop() + tick.Stop() + } + + // get the exit status for this container, if we don't have + // one, indicate an error + ec := uint32(255) + if eerr == nil && ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + ec = ev.Status + } + clnt.setExited(containerID, ec) + + return nil + } + + // container is still alive + if clnt.liveRestore { + if err := clnt.restore(cont, ev, attachStdio, options...); err != nil { + logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err) + } + return nil + } + + // Kill the container if liveRestore == false + w := clnt.getOrCreateExitNotifier(containerID) + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + container.discardFifos() + + if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { + logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err) + } + // Let the main loop handle the exit event + clnt.remote.Unlock() + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + // relock because of the defer + clnt.remote.Lock() + + clnt.deleteContainer(containerID) + + return clnt.setExited(containerID, uint32(255)) +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.CreateCheckpoint(context.Background(), &containerd.CreateCheckpointRequest{ + Id: containerID, + Checkpoint: &containerd.Checkpoint{ + Name: checkpointID, + Exit: exit, + Tcp: true, + UnixSockets: true, + Shell: false, + EmptyNS: []string{"network"}, + }, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.DeleteCheckpoint(context.Background(), &containerd.DeleteCheckpointRequest{ + Id: containerID, + Name: checkpointID, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return nil, err + } + + resp, err := clnt.remote.apiClient.ListCheckpoint(context.Background(), &containerd.ListCheckpointRequest{ + Id: containerID, + CheckpointDir: checkpointDir, + }) + if err != nil { + return nil, err + } + return (*Checkpoints)(resp), nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_solaris.go b/vendor/github.com/docker/docker/libcontainerd/client_solaris.go new file mode 100644 index 0000000..cb93997 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_solaris.go @@ -0,0 +1,101 @@ +package libcontainerd + +import "golang.org/x/net/context" + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (int, error) { + return -1, nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + return nil +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + return nil +} + +func (clnt *client) Pause(containerID string) error { + return nil +} + +func (clnt *client) Resume(containerID string) error { + return nil +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + return nil, nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + defer clnt.mapMutex.Unlock() + w, ok := clnt.exitNotifiers[containerID] + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + return nil +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, nil +} + +// Summary returns a summary of the processes running in a container. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Solaris + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return nil +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return nil +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_unix.go b/vendor/github.com/docker/docker/libcontainerd/client_unix.go new file mode 100644 index 0000000..21e8fea --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_unix.go @@ -0,0 +1,142 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("Container %s is already active", containerID) + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + return container.start(checkpoint, checkpointDir, attachStdio) +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: newContainer(): %v", err) + } + } + return container +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_windows.go b/vendor/github.com/docker/docker/libcontainerd/client_windows.go new file mode 100644 index 0000000..ddcf321 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_windows.go @@ -0,0 +1,631 @@ +package libcontainerd + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "golang.org/x/net/context" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/sysinfo" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type client struct { + clientCommon + + // Platform specific properties below here (none presently on Windows) +} + +// Win32 error codes that are used for various workarounds +// These really should be ALL_CAPS to match golangs syscall library and standard +// Win32 error conventions, but golint insists on CamelCase. +const ( + CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string + ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started + ErrorBadPathname = syscall.Errno(161) // The specified path is invalid + ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object +) + +// defaultOwner is a tag passed to HCS to allow it to differentiate between +// container creator management stacks. We hard code "docker" in the case +// of docker. +const defaultOwner = "docker" + +// Create is the entrypoint to create a container from a spec, and if successfully +// created, start it too. Table below shows the fields required for HCS JSON calling parameters, +// where if not populated, is omitted. +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | | Isolation=Process | Isolation=Hyper-V | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | VolumePath | \\?\\Volume{GUIDa} | | +// | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | +// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | +// | SandboxPath | | %root%\windowsfilter | +// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// +// Isolation=Process example: +// +// { +// "SystemType": "Container", +// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Owner": "docker", +// "IsDummy": false, +// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "5e0055c814a6", +// "MappedDirectories": [], +// "HvPartition": false, +// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], +// "Servicing": false +//} +// +// Isolation=Hyper-V example: +// +//{ +// "SystemType": "Container", +// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", +// "Owner": "docker", +// "IsDummy": false, +// "IgnoreFlushesDuringBoot": true, +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "475c2c58933b", +// "MappedDirectories": [], +// "SandboxPath": "C:\\\\control\\\\windowsfilter", +// "HvPartition": true, +// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], +// "HvRuntime": { +// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" +// }, +// "Servicing": false +//} +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + logrus.Debugln("libcontainerd: client.Create() with spec", spec) + + configuration := &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: containerID, + Owner: defaultOwner, + IgnoreFlushesDuringBoot: false, + HostName: spec.Hostname, + HvPartition: false, + } + + if spec.Windows.Resources != nil { + if spec.Windows.Resources.CPU != nil { + if spec.Windows.Resources.CPU.Count != nil { + // This check is being done here rather than in adaptContainerSettings + // because we don't want to update the HostConfig in case this container + // is moved to a host with more CPUs than this one. + cpuCount := *spec.Windows.Resources.CPU.Count + hostCPUCount := uint64(sysinfo.NumCPU()) + if cpuCount > hostCPUCount { + logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) + cpuCount = hostCPUCount + } + configuration.ProcessorCount = uint32(cpuCount) + } + if spec.Windows.Resources.CPU.Shares != nil { + configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) + } + if spec.Windows.Resources.CPU.Percent != nil { + configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 + } + } + if spec.Windows.Resources.Memory != nil { + if spec.Windows.Resources.Memory.Limit != nil { + configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 + } + } + if spec.Windows.Resources.Storage != nil { + if spec.Windows.Resources.Storage.Bps != nil { + configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps + } + if spec.Windows.Resources.Storage.Iops != nil { + configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops + } + } + } + + var layerOpt *LayerOption + for _, option := range options { + if s, ok := option.(*ServicingOption); ok { + configuration.Servicing = s.IsServicing + continue + } + if f, ok := option.(*FlushOption); ok { + configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot + continue + } + if h, ok := option.(*HyperVIsolationOption); ok { + configuration.HvPartition = h.IsHyperV + configuration.SandboxPath = h.SandboxPath + continue + } + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + continue + } + if c, ok := option.(*CredentialsOption); ok { + configuration.Credentials = c.Credentials + continue + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + if configuration.HvPartition { + // Find the upper-most utility VM image, since the utility VM does not + // use layering in RS1. + // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. + var uvmImagePath string + for _, path := range layerOpt.LayerPaths { + fullPath := filepath.Join(path, "UtilityVM") + _, err := os.Stat(fullPath) + if err == nil { + uvmImagePath = fullPath + break + } + if !os.IsNotExist(err) { + return err + } + } + if uvmImagePath == "" { + return errors.New("utility VM image could not be found") + } + configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} + } else { + configuration.VolumePath = spec.Root.Path + } + + configuration.LayerFolderPath = layerOpt.LayerFolderPath + + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: layerPath, + }) + } + + // Add the mounts (volumes, bind mounts etc) to the structure + mds := make([]hcsshim.MappedDir, len(spec.Mounts)) + for i, mount := range spec.Mounts { + mds[i] = hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: mount.Destination, + ReadOnly: false, + } + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + mds[i].ReadOnly = true + } + } + } + configuration.MappedDirectories = mds + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + commandLine: strings.Join(spec.Process.Args, " "), + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) + return nil + +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := hcsshim.ProcessConfig{ + EmulateConsole: procToAdd.Terminal, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: !procToAdd.Terminal, + } + createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) + + // Take working directory from the process to add if it is defined, + // otherwise take from the first process. + if procToAdd.Cwd != "" { + createProcessParms.WorkingDirectory = procToAdd.Cwd + } else { + createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) + createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") + createProcessParms.User = procToAdd.User.Username + + logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) + + // Start the command running in the container. + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) + return -1, err + } + + pid := newProcess.Pid() + + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) + return -1, err + } + + iopipe := &IOPipe{Terminal: procToAdd.Terminal} + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + proc := &process{ + processCommon: processCommon{ + containerID: containerID, + friendlyName: processFriendlyName, + client: clnt, + systemPid: uint32(pid), + }, + commandLine: createProcessParms.CommandLine, + hcsProcess: newProcess, + } + + // Add the process to the container's list of processes + container.processes[processFriendlyName] = proc + + // Tell the engine to attach streams back to the client + if err := attachStdio(*iopipe); err != nil { + return -1, err + } + + // Spin up a go routine waiting for exit to handle cleanup + go container.waitExit(proc, false) + + return pid, nil +} + +// Signal handles `docker stop` on Windows. While Linux has support for +// the full range of signals, signals aren't really implemented on Windows. +// We fake supporting regular stop and -9 to force kill. +func (clnt *client) Signal(containerID string, sig int) error { + var ( + cont *container + err error + ) + + // Get the container as we need it to get the container handle. + clnt.lock(containerID) + defer clnt.unlock(containerID) + if cont, err = clnt.getContainer(containerID); err != nil { + return err + } + + cont.manualStopRequested = true + + logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) + + if syscall.Signal(sig) == syscall.SIGKILL { + // Terminate the compute system + if err := cont.hcsContainer.Terminate(); err != nil { + if !hcsshim.IsPending(err) { + logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) + } + } + } else { + // Terminate Process + if err := cont.hcsProcess.Kill(); err != nil && !hcsshim.IsAlreadyStopped(err) { + // ignore errors + logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err) + } + } + + return nil +} + +// While Linux has support for the full range of signals, signals aren't really implemented on Windows. +// We try to terminate the specified process whatever signal is requested. +func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + return p.hcsProcess.Kill() + } + } + + return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) +} + +// Resize handles a CLI event to resize an interactive docker run or docker exec +// window. +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + h, w := uint16(height), uint16(width) + + if processFriendlyName == InitFriendlyName { + logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) + return cont.process.hcsProcess.ResizeConsole(w, h) + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) + return p.hcsProcess.ResizeConsole(w, h) + } + } + + return fmt.Errorf("Resize could not find containerID %s to resize", containerID) + +} + +// Pause handles pause requests for containers +func (clnt *client) Pause(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot pause Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Pause() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StatePause, + }}) +} + +// Resume handles resume requests for containers +func (clnt *client) Resume(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + // This should never happen, since Windows Server Containers cannot be paused + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot resume Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Resume() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateResume, + }}) +} + +// Stats handles stats requests for containers +func (clnt *client) Stats(containerID string) (*Stats, error) { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + s, err := container.hcsContainer.Statistics() + if err != nil { + return nil, err + } + st := Stats(s) + return &st, nil +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { + // TODO Windows: Implement this. For now, just tell the backend the container exited. + logrus.Debugf("libcontainerd: Restore(%s)", containerID) + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: 1 << 31, + }}) +} + +// GetPidsForContainer returns a list of process IDs running in a container. +// Although implemented, this is not used in Windows. +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + var pids []int + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + + // Add the first process + pids = append(pids, int(cont.containerCommon.systemPid)) + // And add all the exec'd processes + for _, p := range cont.processes { + pids = append(pids, int(p.processCommon.systemPid)) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is present in Windows to support docker top. In linux, the +// engine shells out to ps to get process information. On Windows, as +// the containers could be Hyper-V containers, they would not be +// visible on the container host. However, libcontainerd does have +// that information. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + p, err := container.hcsContainer.ProcessList() + if err != nil { + return nil, err + } + pl := make([]Summary, len(p)) + for i := range p { + pl[i] = Summary(p[i]) + } + return pl, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Windows + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + return &ServerVersion{}, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container.go b/vendor/github.com/docker/docker/libcontainerd/container.go new file mode 100644 index 0000000..b403213 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container.go @@ -0,0 +1,13 @@ +package libcontainerd + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + processes map[string]*process +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_unix.go b/vendor/github.com/docker/docker/libcontainerd/container_unix.go new file mode 100644 index 0000000..61bab14 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container_unix.go @@ -0,0 +1,250 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool + runtime string + runtimeArgs []string +} + +type runtime struct { + path string + args []string +} + +// WithRuntime sets the runtime to be used for the created container +func WithRuntime(path string, args []string) CreateOption { + return runtime{path, args} +} + +func (rt runtime) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.runtime = rt.path + pr.runtimeArgs = rt.args + } + return nil +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) { + logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start(checkpoint string, checkpointDir string, attachStdio StdioCallback) (err error) { + spec, err := ctr.spec() + if err != nil { + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ready := make(chan struct{}) + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := ctr.openFifos(fifoCtx, spec.Process.Terminal) + if err != nil { + return err + } + + var stdinOnce sync.Once + + // we need to delay stdin closure after container start or else "stdin close" + // event will be rejected by containerd. + // stdin closure happens in attachStdio + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + go func() { + select { + case <-ready: + case <-ctx.Done(): + } + select { + case <-ready: + if err := ctr.sendCloseStdin(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + default: + } + }() + }) + return err + }) + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(syscall.Stdin), + Stdout: ctr.fifo(syscall.Stdout), + Stderr: ctr.fifo(syscall.Stderr), + Checkpoint: checkpoint, + CheckpointDir: checkpointDir, + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + Runtime: ctr.runtime, + RuntimeArgs: ctr.runtimeArgs, + } + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + ctr.closeFifos(iopipe) + return err + } + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.systemPid = systemPid(resp.Container) + close(ready) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }}) +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: e.Type, + ExitCode: e.Status, + }, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + + // Remove process from list if we have exited + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("libcontainerd: event unhandled: %+v", e) + } + return nil +} + +// discardFifos attempts to fully read the container fifos to unblock processes +// that may be blocked on the writer side. +func (ctr *container) discardFifos() { + ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) + for _, i := range []int{syscall.Stdout, syscall.Stderr} { + f, err := fifo.OpenFifo(ctx, ctr.fifo(i), syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + logrus.Warnf("error opening fifo %v for discarding: %+v", f, err) + continue + } + go func() { + io.Copy(ioutil.Discard, f) + }() + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_windows.go b/vendor/github.com/docker/docker/libcontainerd/container_windows.go new file mode 100644 index 0000000..9b19650 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container_windows.go @@ -0,0 +1,311 @@ +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "syscall" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. There are none presently on Windows. + options []CreateOption + + // The ociSpec is required, as client.Create() needs a spec, + // but can be called from the RestartManager context which does not + // otherwise have access to the Spec + ociSpec specs.Spec + + manualStopRequested bool + hcsContainer hcsshim.Container +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +// start starts a created container. +// Caller needs to lock container ID before calling this method. +func (ctr *container) start(attachStdio StdioCallback) error { + var err error + isServicing := false + + for _, option := range ctr.options { + if s, ok := option.(*ServicingOption); ok && s.IsServicing { + isServicing = true + } + } + + // Start the container. If this is a servicing container, this call will block + // until the container is done with the servicing execution. + logrus.Debugln("libcontainerd: starting container ", ctr.containerID) + if err = ctr.hcsContainer.Start(); err != nil { + logrus.Errorf("libcontainerd: failed to start container: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate") + } + return err + } + + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := &hcsshim.ProcessConfig{ + EmulateConsole: ctr.ociSpec.Process.Terminal, + WorkingDirectory: ctr.ociSpec.Process.Cwd, + CreateStdInPipe: !isServicing, + CreateStdOutPipe: !isServicing, + CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, + } + createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) + createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + createProcessParms.User = ctr.ociSpec.Process.User.Username + + // Start the command running in the container. + newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: CreateProcess() failed %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate") + } + return err + } + + pid := newProcess.Pid() + + // Save the hcs Process and PID + ctr.process.friendlyName = InitFriendlyName + ctr.process.hcsProcess = newProcess + + // If this is a servicing container, wait on the process synchronously here and + // if it succeeds, wait for it cleanly shutdown and merge into the parent container. + if isServicing { + exitCode := ctr.waitProcessExitCode(&ctr.process) + + if exitCode != 0 { + if err := ctr.terminate(); err != nil { + logrus.Warnf("libcontainerd: terminating servicing container %s failed: %s", ctr.containerID, err) + } + return fmt.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) + } + + return ctr.hcsContainer.WaitTimeout(time.Minute * 5) + } + + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err) + } + return err + } + + iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} + + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + // Save the PID + logrus.Debugf("libcontainerd: process started - PID %d", pid) + ctr.systemPid = uint32(pid) + + // Spin up a go routine waiting for exit to handle cleanup + go ctr.waitExit(&ctr.process, true) + + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + // OK to return the error here, as waitExit will handle tear-down in HCS + return err + } + + // Tell the docker engine that the container has started. + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft + }} + logrus.Debugf("libcontainerd: start() completed OK, %+v", si) + return ctr.client.backend.StateChanged(ctr.containerID, si) + +} + +// waitProcessExitCode will wait for the given process to exit and return its error code. +func (ctr *container) waitProcessExitCode(process *process) int { + // Block indefinitely for the process to exit. + err := process.hcsProcess.Wait() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err) + } + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + exitCode, err := process.hcsProcess.ExitCode() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID) + } + // Since we got an error retrieving the exit code, make sure that the code we return + // doesn't incorrectly indicate success. + exitCode = -1 + + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + return exitCode +} + +// waitExit runs as a goroutine waiting for the process to exit. It's +// equivalent to (in the linux containerd world) where events come in for +// state change notifications from containerd. +func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { + logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid) + + exitCode := ctr.waitProcessExitCode(process) + // Lock the container while shutting down + ctr.client.lock(ctr.containerID) + + // Assume the container has exited + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: uint32(exitCode), + Pid: process.systemPid, + ProcessID: process.friendlyName, + }, + UpdatePending: false, + } + + // But it could have been an exec'd process which exited + if !isFirstProcessToStart { + si.State = StateExitProcess + ctr.cleanProcess(process.friendlyName) + } else { + updatePending, err := ctr.hcsContainer.HasPendingUpdates() + if err != nil { + logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) + } else { + si.UpdatePending = updatePending + } + + logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) + if err := ctr.shutdown(); err != nil { + logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID) + } else { + logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID) + } + if err := ctr.hcsContainer.Close(); err != nil { + logrus.Error(err) + } + + // Remove process from list if we have exited + if si.State == StateExit { + ctr.client.deleteContainer(ctr.containerID) + } + } + + if err := process.hcsProcess.Close(); err != nil { + logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err) + } + + // Unlock here before we call back into the daemon to update state + ctr.client.unlock(ctr.containerID) + + // Call into the backend to notify it of the state change. + logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si) + if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { + logrus.Error(err) + } + + logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si) + + return nil +} + +// cleanProcess removes process from the map. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + delete(ctr.processes, id) +} + +// shutdown shuts down the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) shutdown() error { + const shutdownTimeout = time.Minute * 5 + err := ctr.hcsContainer.Shutdown() + if hcsshim.IsPending(err) { + // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. + err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err) + if err := ctr.terminate(); err != nil { + return err + } + return err + } + + return nil +} + +// terminate terminates the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) terminate() error { + const terminateTimeout = time.Minute * 5 + err := ctr.hcsContainer.Terminate() + + if hcsshim.IsPending(err) { + err = ctr.hcsContainer.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err) + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/oom_linux.go b/vendor/github.com/docker/docker/libcontainerd/oom_linux.go new file mode 100644 index 0000000..e126b7a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/oom_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/system" +) + +func setOOMScore(pid, score int) error { + oomScoreAdjPath := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + f, err := os.OpenFile(oomScoreAdjPath, os.O_WRONLY, 0) + if err != nil { + return err + } + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + f.Close() + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !system.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to %s", stringScore, oomScoreAdjPath) + } + return nil + } + return err +} diff --git a/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go b/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go new file mode 100644 index 0000000..2ebe5e8 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go @@ -0,0 +1,5 @@ +package libcontainerd + +func setOOMScore(pid, score int) error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go new file mode 100644 index 0000000..4f3766d --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package libcontainerd + +import ( + "sync" +) + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + sync.Mutex + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process.go b/vendor/github.com/docker/docker/libcontainerd/process.go new file mode 100644 index 0000000..57562c8 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_unix.go b/vendor/github.com/docker/docker/libcontainerd/process_unix.go new file mode 100644 index 0000000..506fca6 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process_unix.go @@ -0,0 +1,107 @@ +// +build linux solaris + +package libcontainerd + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + goruntime "runtime" + "strings" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +var fdNames = map[int]string{ + unix.Stdin: "stdin", + unix.Stdout: "stdout", + unix.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(ctx context.Context, terminal bool) (pipe *IOPipe, err error) { + if err := os.MkdirAll(p.dir, 0700); err != nil { + return nil, err + } + + io := &IOPipe{} + + io.Stdin, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdin), unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdin.Close() + } + }() + + io.Stdout, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdout), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdout.Close() + } + }() + + if goruntime.GOOS == "solaris" || !terminal { + // For Solaris terminal handling is done exclusively by the runtime therefore we make no distinction + // in the processing for terminal and !terminal cases. + io.Stderr, err = fifo.OpenFifo(ctx, p.fifo(unix.Stderr), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + io.Stderr.Close() + } + }() + } else { + io.Stderr = ioutil.NopCloser(emptyReader{}) + } + + return io, nil +} + +func (p *process) sendCloseStdin() error { + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + if err != nil && (strings.Contains(err.Error(), "container not found") || strings.Contains(err.Error(), "process not found")) { + return nil + } + return err +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + io.Stdout.Close() + io.Stderr.Close() +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_windows.go b/vendor/github.com/docker/docker/libcontainerd/process_windows.go new file mode 100644 index 0000000..57ecc94 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process_windows.go @@ -0,0 +1,51 @@ +package libcontainerd + +import ( + "io" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/ioutils" +) + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + + // commandLine is to support returning summary information for docker top + commandLine string + hcsProcess hcsshim.Process +} + +type autoClosingReader struct { + io.ReadCloser + sync.Once +} + +func (r *autoClosingReader) Read(b []byte) (n int, err error) { + n, err = r.ReadCloser.Read(b) + if err == io.EOF { + r.Once.Do(func() { r.ReadCloser.Close() }) + } + return +} + +func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(pipe, func() error { + if err := pipe.Close(); err != nil { + return err + } + + err := process.CloseStdin() + if err != nil && !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyClosed(err) { + // This error will occur if the compute system is currently shutting down + if perr, ok := err.(*hcsshim.ProcessError); ok && perr.Err != hcsshim.ErrVmcomputeOperationInvalidState { + return err + } + } + + return nil + }) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_unix.go b/vendor/github.com/docker/docker/libcontainerd/queue_unix.go new file mode 100644 index 0000000..b848b98 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/queue_unix.go @@ -0,0 +1,31 @@ +// +build linux solaris + +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + }() +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote.go b/vendor/github.com/docker/docker/libcontainerd/remote.go new file mode 100644 index 0000000..9031e3a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote.go @@ -0,0 +1,20 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() + // UpdateOptions allows various remote options to be updated at runtime. + UpdateOptions(...RemoteOption) error +} + +// RemoteOption allows to configure parameters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_unix.go b/vendor/github.com/docker/docker/libcontainerd/remote_unix.go new file mode 100644 index 0000000..64a2864 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_unix.go @@ -0,0 +1,544 @@ +// +build linux solaris + +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + goruntime "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + containerdHealthCheckTimeout = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + containerdStateDir = "containerd" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closeManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + runtime string + runtimeArgs []string + daemonWaitCh chan struct{} + liveRestore bool + oomScore int + restoreFromTimestamp *timestamp.Timestamp +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specified the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + // don't output the grpc reconnect logging + grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) + dialOpts := append([]grpc.DialOption{grpc.WithInsecure()}, + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + ) + conn, err := grpc.Dial(r.rpcAddr, dialOpts...) + if err != nil { + return nil, fmt.Errorf("error connecting to containerd: %v", err) + } + + r.rpcConn = conn + r.apiClient = containerd.NewAPIClient(conn) + + // Get the timestamp to restore from + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + r.restoreFromTimestamp = tsp + + go r.handleConnectionChange() + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) UpdateOptions(options ...RemoteOption) error { + for _, option := range options { + if err := option.Apply(r); err != nil { + return err + } + } + return nil +} + +func (r *remote) handleConnectionChange() { + var transientFailureCount = 0 + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(r.rpcConn) + + for { + <-ticker.C + ctx, cancel := context.WithTimeout(context.Background(), containerdHealthCheckTimeout) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err == nil { + continue + } + + logrus.Debugf("libcontainerd: containerd health check returned error: %v", err) + + if r.daemonPid != -1 { + if strings.Contains(err.Error(), "is closing") { + // Well, we asked for it to stop, just return + return + } + // all other errors are transient + // Reset state to be notified of next failure + transientFailureCount++ + if transientFailureCount >= maxConnectionRetryCount { + transientFailureCount = 0 + if utils.IsProcessAlive(r.daemonPid) { + utils.KillProcess(r.daemonPid) + } + <-r.daemonWaitCh + if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error + logrus.Errorf("libcontainerd: error restarting containerd: %v", err) + } + continue + } + } + } +} + +func (r *remote) Cleanup() { + if r.daemonPid == -1 { + return + } + r.closeManually = true + r.rpcConn.Close() + // Ask the daemon to quit + syscall.Kill(r.daemonPid, syscall.SIGTERM) + + // Wait up to 15secs for it to stop + for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { + if !utils.IsProcessAlive(r.daemonPid) { + break + } + time.Sleep(time.Second) + } + + if utils.IsProcessAlive(r.daemonPid) { + logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) + syscall.Kill(r.daemonPid, syscall.SIGKILL) + } + + // cleanup some files + os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) + os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + liveRestore: r.liveRestore, + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + defer f.Close() + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } +} + +func (r *remote) getLastEventTimestamp() time.Time { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t + } + + f, err := os.Open(r.eventTsPath) + if err != nil { + logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) + return t + } + defer f.Close() + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) + return t + } + + t.UnmarshalText(b) + + return t +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + er := &containerd.EventsRequest{ + Timestamp: tsp, + } + events, err := r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) + if err != nil { + return err + } + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closeManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + logrus.Debugf("libcontainerd: received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Warnf("libcontainerd: unknown container %s", e.Id) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) + } + + tsp, err := ptypes.Timestamp(e.Timestamp) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) + continue + } + + r.updateEventTimestamp(tsp) + } +} + +func (r *remote) runContainerdDaemon() error { + pidFilename := filepath.Join(r.stateDir, containerdPidFilename) + f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + // File exist, check if the daemon is alive + b := make([]byte, 8) + n, err := f.Read(b) + if err != nil && err != io.EOF { + return err + } + + if n > 0 { + pid, err := strconv.ParseUint(string(b[:n]), 10, 64) + if err != nil { + return err + } + if utils.IsProcessAlive(int(pid)) { + logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid) + r.daemonPid = int(pid) + return nil + } + } + + // rewind the file + _, err = f.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Truncate it + err = f.Truncate(0) + if err != nil { + return err + } + + // Start a new instance + args := []string{ + "-l", fmt.Sprintf("unix://%s", r.rpcAddr), + "--metrics-interval=0", + "--start-timeout", "2m", + "--state-dir", filepath.Join(r.stateDir, containerdStateDir), + } + if goruntime.GOOS == "solaris" { + args = append(args, "--shim", "containerd-shim", "--runtime", "runc") + } else { + args = append(args, "--shim", "docker-containerd-shim") + if r.runtime != "" { + args = append(args, "--runtime") + args = append(args, r.runtime) + } + } + if r.debugLog { + args = append(args, "--debug") + } + if len(r.runtimeArgs) > 0 { + for _, v := range r.runtimeArgs { + args = append(args, "--runtime-args") + args = append(args, v) + } + logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args) + } + + cmd := exec.Command(containerdBinary, args...) + // redirect containerd logs to docker logs + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.SysProcAttr = setSysProcAttr(true) + cmd.Env = nil + // clear the NOTIFY_SOCKET from the env when starting containerd + for _, e := range os.Environ() { + if !strings.HasPrefix(e, "NOTIFY_SOCKET") { + cmd.Env = append(cmd.Env, e) + } + } + if err := cmd.Start(); err != nil { + return err + } + logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) + if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { + utils.KillProcess(cmd.Process.Pid) + return err + } + if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { + utils.KillProcess(cmd.Process.Pid) + return err + } + + r.daemonWaitCh = make(chan struct{}) + go func() { + cmd.Wait() + close(r.daemonWaitCh) + }() // Reap our child when needed + r.daemonPid = cmd.Process.Pid + return nil +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimePath sets the path of the runtime to be used as the +// default by containerd +func WithRuntimePath(rt string) RemoteOption { + return runtimePath(rt) +} + +type runtimePath string + +func (rt runtimePath) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtime = string(rt) + return nil + } + return fmt.Errorf("WithRuntime option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} + +// WithLiveRestore defines if containers are stopped on shutdown or restored. +func WithLiveRestore(v bool) RemoteOption { + return liveRestore(v) +} + +type liveRestore bool + +func (l liveRestore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.liveRestore = bool(l) + for _, c := range remote.clients { + c.liveRestore = bool(l) + } + return nil + } + return fmt.Errorf("WithLiveRestore option not supported for this remote") +} + +// WithOOMScore defines the oom_score_adj to set for the containerd process. +func WithOOMScore(score int) RemoteOption { + return oomScore(score) +} + +type oomScore int + +func (o oomScore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.oomScore = int(o) + return nil + } + return fmt.Errorf("WithOOMScore option not supported for this remote") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_windows.go b/vendor/github.com/docker/docker/libcontainerd/remote_windows.go new file mode 100644 index 0000000..74c1044 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_windows.go @@ -0,0 +1,36 @@ +package libcontainerd + +import "github.com/docker/docker/pkg/locker" + +type remote struct { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + } + return c, nil +} + +// Cleanup is a no-op on Windows. It is here to implement the interface. +func (r *remote) Cleanup() { +} + +func (r *remote) UpdateOptions(opts ...RemoteOption) error { + return nil +} + +// New creates a fresh instance of libcontainerd remote. On Windows, +// this is not used as there is no remote containerd process. +func New(_ string, _ ...RemoteOption) (Remote, error) { + return &remote{}, nil +} + +// WithLiveRestore is a noop on windows. +func WithLiveRestore(v bool) RemoteOption { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types.go b/vendor/github.com/docker/docker/libcontainerd/types.go new file mode 100644 index 0000000..3d981e3 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types.go @@ -0,0 +1,75 @@ +package libcontainerd + +import ( + "io" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestore = "restore" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state +) + +// CommonStateInfo contains the state info common to all platforms. +type CommonStateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error +} + +// Client provides access to containerd features. +type Client interface { + GetServerVersion(ctx context.Context) (*ServerVersion, error) + Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error + Signal(containerID string, sig int) error + SignalProcess(containerID string, processFriendlyName string, sig int) error + AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process, attachStdio StdioCallback) (int, error) + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error + CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error + DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error + ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// StdioCallback is called to connect a container or process stdio. +type StdioCallback func(IOPipe) error + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser + Terminal bool // Whether stderr is connected on Windows +} + +// ServerVersion contains version information as retrieved from the +// server +type ServerVersion struct { + containerd.GetServerVersionResponse +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/vendor/github.com/docker/docker/libcontainerd/types_linux.go new file mode 100644 index 0000000..cc2a17a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_linux.go @@ -0,0 +1,49 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.Rlimit `json:"rlimits,omitempty"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary contains a container summary from containerd +type Summary struct{} + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/docker/docker/libcontainerd/types_solaris.go b/vendor/github.com/docker/docker/libcontainerd/types_solaris.go new file mode 100644 index 0000000..dbafef6 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_solaris.go @@ -0,0 +1,43 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats struct{} + +// Summary contains a container summary from containerd +type Summary struct{} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Resources defines updatable container resource values. +type Resources struct{} + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/docker/docker/libcontainerd/types_windows.go b/vendor/github.com/docker/docker/libcontainerd/types_windows.go new file mode 100644 index 0000000..24a9a96 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_windows.go @@ -0,0 +1,79 @@ +package libcontainerd + +import ( + "github.com/Microsoft/hcsshim" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process specs.Process + +// Summary contains a ProcessList item from HCS to support `top` +type Summary hcsshim.ProcessListItem + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + + UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. +} + +// Stats contains statics from HCS +type Stats hcsshim.Statistics + +// Resources defines updatable container resource values. +type Resources struct{} + +// ServicingOption is a CreateOption with a no-op application that signifies +// the container needs to be used for a Windows servicing operation. +type ServicingOption struct { + IsServicing bool +} + +// FlushOption is a CreateOption that signifies if the container should be +// started with flushes ignored until boot has completed. This is an optimisation +// for first boot of a container. +type FlushOption struct { + IgnoreFlushesDuringBoot bool +} + +// HyperVIsolationOption is a CreateOption that indicates whether the runtime +// should start the container as a Hyper-V container, and if so, the sandbox path. +type HyperVIsolationOption struct { + IsHyperV bool + SandboxPath string `json:",omitempty"` +} + +// LayerOption is a CreateOption that indicates to the runtime the layer folder +// and layer paths for a container. +type LayerOption struct { + // LayerFolder is the path to the current layer folder. Empty for Hyper-V containers. + LayerFolderPath string `json:",omitempty"` + // Layer paths of the parent layers + LayerPaths []string +} + +// NetworkEndpointsOption is a CreateOption that provides the runtime list +// of network endpoints to which a container should be attached during its creation. +type NetworkEndpointsOption struct { + Endpoints []string + AllowUnqualifiedDNSQuery bool +} + +// CredentialsOption is a CreateOption that indicates the credentials from +// a credential spec to be used to the runtime +type CredentialsOption struct { + Credentials string +} + +// Checkpoint holds the details of a checkpoint (not supported in windows) +type Checkpoint struct { + Name string +} + +// Checkpoints contains the details of a checkpoint +type Checkpoints struct { + Checkpoints []*Checkpoint +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go new file mode 100644 index 0000000..78828bc --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go @@ -0,0 +1,62 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.IDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setsid: sid, + Pdeathsig: syscall.SIGKILL, + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go b/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go new file mode 100644 index 0000000..49632b4 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go @@ -0,0 +1,27 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + return 0, 0, nil +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_windows.go b/vendor/github.com/docker/docker/libcontainerd/utils_windows.go new file mode 100644 index 0000000..41ac40d --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_windows.go @@ -0,0 +1,46 @@ +package libcontainerd + +import "strings" + +// setupEnvironmentVariables converts a string array of environment variables +// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. +func setupEnvironmentVariables(a []string) map[string]string { + r := make(map[string]string) + for _, s := range a { + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + r[arr[0]] = arr[1] + } + } + return r +} + +// Apply for a servicing option is a no-op. +func (s *ServicingOption) Apply(interface{}) error { + return nil +} + +// Apply for the flush option is a no-op. +func (f *FlushOption) Apply(interface{}) error { + return nil +} + +// Apply for the hypervisolation option is a no-op. +func (h *HyperVIsolationOption) Apply(interface{}) error { + return nil +} + +// Apply for the layer option is a no-op. +func (h *LayerOption) Apply(interface{}) error { + return nil +} + +// Apply for the network endpoints option is a no-op. +func (s *NetworkEndpointsOption) Apply(interface{}) error { + return nil +} + +// Apply for the credentials option is a no-op. +func (s *CredentialsOption) Apply(interface{}) error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go b/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go new file mode 100644 index 0000000..f3679bf --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go @@ -0,0 +1,13 @@ +package libcontainerd + +import ( + "testing" +) + +func TestEnvironmentParsing(t *testing.T) { + env := []string{"foo=bar", "car=hat", "a=b=c"} + result := setupEnvironmentVariables(env) + if len(result) != 3 || result["foo"] != "bar" || result["car"] != "hat" || result["a"] != "b=c" { + t.Fatalf("Expected map[foo:bar car:hat a:b=c], got %v", result) + } +} diff --git a/vendor/github.com/docker/docker/man/Dockerfile b/vendor/github.com/docker/docker/man/Dockerfile new file mode 100644 index 0000000..80e97ff --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.7.5-alpine + +RUN apk add -U git bash curl gcc musl-dev make + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.5.md b/vendor/github.com/docker/docker/man/Dockerfile.5.md new file mode 100644 index 0000000..5191b19 --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.5.md @@ -0,0 +1,474 @@ +% DOCKERFILE(5) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION + +The **Dockerfile** is a configuration file that automates the steps of creating +a Docker image. It is similar to a Makefile. Docker reads instructions from the +**Dockerfile** to automate the steps otherwise performed manually to create an +image. To build an image, create a file called **Dockerfile**. + +The **Dockerfile** describes the steps taken to assemble the image. When the +**Dockerfile** has been created, call the `docker build` command, using the +path of directory that contains **Dockerfile** as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + + FROM image + +# DESCRIPTION + +A Dockerfile is a file that automates the steps of creating a Docker image. +A Dockerfile is similar to a Makefile. + +# USAGE + + docker build . + + -- Runs the steps and commits them, building a final image. + The path to the source repository defines where to find the context of the + build. The build is run by the Docker daemon, not the CLI. The whole + context must be transferred to the daemon. The Docker CLI reports + `"Sending build context to Docker daemon"` when the context is sent to the + daemon. + + ``` + docker build -t repository/tag . + ``` + + -- specifies a repository and tag at which to save the new image if the build + succeeds. The Docker daemon runs the steps one-by-one, committing the result + to a new image if necessary, before finally outputting the ID of the new + image. The Docker daemon automatically cleans up the context it is given. + + Docker re-uses intermediate images whenever possible. This significantly + accelerates the *docker build* process. + +# FORMAT + + `FROM image` + + `FROM image:tag` + + `FROM image@digest` + + -- The **FROM** instruction sets the base image for subsequent instructions. A + valid Dockerfile must have **FROM** as its first instruction. The image can be any + valid image. It is easy to start by pulling an image from the public + repositories. + + -- **FROM** must be the first non-comment instruction in Dockerfile. + + -- **FROM** may appear multiple times within a single Dockerfile in order to create + multiple images. Make a note of the last image ID output by the commit before + each new **FROM** command. + + -- If no tag is given to the **FROM** instruction, Docker applies the + `latest` tag. If the used tag does not exist, an error is returned. + + -- If no digest is given to the **FROM** instruction, Docker applies the + `latest` tag. If the used tag does not exist, an error is returned. + +**MAINTAINER** + -- **MAINTAINER** sets the Author field for the generated images. + Useful for providing users with an email or url for support. + +**RUN** + -- **RUN** has two forms: + + ``` + # the command is run in a shell - /bin/sh -c + RUN + + # Executable form + RUN ["executable", "param1", "param2"] + ``` + + + -- The **RUN** instruction executes any commands in a new layer on top of the current + image and commits the results. The committed image is used for the next step in + Dockerfile. + + -- Layering **RUN** instructions and generating commits conforms to the core + concepts of Docker where commits are cheap and containers can be created from + any point in the history of an image. This is similar to source control. The + exec form makes it possible to avoid shell string munging. The exec form makes + it possible to **RUN** commands using a base image that does not contain `/bin/sh`. + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + +**CMD** + -- **CMD** has three forms: + + ``` + # Executable form + CMD ["executable", "param1", "param2"]` + + # Provide default arguments to ENTRYPOINT + CMD ["param1", "param2"]` + + # the command is run in a shell - /bin/sh -c + CMD command param1 param2 + ``` + + -- There should be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only + the last **CMD** takes effect. + The main purpose of a **CMD** is to provide defaults for an executing container. + These defaults may include an executable, or they can omit the executable. If + they omit the executable, an **ENTRYPOINT** must be specified. + When used in the shell or exec formats, the **CMD** instruction sets the command to + be executed when running the image. + If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + + ``` + FROM ubuntu + CMD echo "This is a test." | wc - + ``` + + -- If you run **command** without a shell, then you must express the command as a + JSON array and give the full path to the executable. This array form is the + preferred form of **CMD**. All additional parameters must be individually expressed + as strings in the array: + + ``` + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + ``` + + -- To make the container run the same executable every time, use **ENTRYPOINT** in + combination with **CMD**. + If the user specifies arguments to `docker run`, the specified commands + override the default in **CMD**. + Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. + **CMD** executes nothing at build time, but specifies the intended command for + the image. + +**LABEL** + -- `LABEL = [= ...]`or + ``` + LABEL [ ] + LABEL [ ] + ... + ``` + The **LABEL** instruction adds metadata to an image. A **LABEL** is a + key-value pair. To specify a **LABEL** without a value, simply use an empty + string. To include spaces within a **LABEL** value, use quotes and + backslashes as you would in command-line parsing. + + ``` + LABEL com.example.vendor="ACME Incorporated" + LABEL com.example.vendor "ACME Incorporated" + LABEL com.example.vendor.is-beta "" + LABEL com.example.vendor.is-beta= + LABEL com.example.vendor.is-beta="" + ``` + + An image can have more than one label. To specify multiple labels, separate + each key-value pair by a space. + + Labels are additive including `LABEL`s in `FROM` images. As the system + encounters and then applies a new label, new `key`s override any previous + labels with identical keys. + + To display an image's labels, use the `docker inspect` command. + +**EXPOSE** + -- `EXPOSE [...]` + The **EXPOSE** instruction informs Docker that the container listens on the + specified network ports at runtime. Docker uses this information to + interconnect containers using links and to set up port redirection on the host + system. + +**ENV** + -- `ENV ` + The **ENV** instruction sets the environment variable to + the value ``. This value is passed to all future + **RUN**, **ENTRYPOINT**, and **CMD** instructions. This is + functionally equivalent to prefixing the command with `=`. The + environment variables that are set with **ENV** persist when a container is run + from the resulting image. Use `docker inspect` to inspect these values, and + change them using `docker run --env =`. + + Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause + unintended consequences, because it will persist when the container is run + interactively, as with the following command: `docker run -t -i image bash` + +**ADD** + -- **ADD** has two forms: + + ``` + ADD + + # Required for paths with whitespace + ADD ["",... ""] + ``` + + The **ADD** instruction copies new files, directories + or remote file URLs to the filesystem of the container at path ``. + Multiple `` resources may be specified but if they are files or directories + then they must be relative to the source directory that is being built + (the context of the build). The `` is the absolute path, or path relative + to **WORKDIR**, into which the source is copied inside the target container. + If the `` argument is a local file in a recognized compression format + (tar, gzip, bzip2, etc) then it is unpacked at the specified `` in the + container's filesystem. Note that only local compressed files will be unpacked, + i.e., the URL download and archive unpacking features cannot be used together. + All new directories are created with mode 0755 and with the uid and gid of **0**. + +**COPY** + -- **COPY** has two forms: + + ``` + COPY + + # Required for paths with whitespace + COPY ["",... ""] + ``` + + The **COPY** instruction copies new files from `` and + adds them to the filesystem of the container at path . The `` must be + the path to a file or directory relative to the source directory that is + being built (the context of the build) or a remote file URL. The `` is an + absolute path, or a path relative to **WORKDIR**, into which the source will + be copied inside the target container. If you **COPY** an archive file it will + land in the container exactly as it appears in the build context without any + attempt to unpack it. All new files and directories are created with mode **0755** + and with the uid and gid of **0**. + +**ENTRYPOINT** + -- **ENTRYPOINT** has two forms: + + ``` + # executable form + ENTRYPOINT ["executable", "param1", "param2"]` + + # run command in a shell - /bin/sh -c + ENTRYPOINT command param1 param2 + ``` + + -- An **ENTRYPOINT** helps you configure a + container that can be run as an executable. When you specify an **ENTRYPOINT**, + the whole container runs as if it was only that executable. The **ENTRYPOINT** + instruction adds an entry command that is not overwritten when arguments are + passed to docker run. This is different from the behavior of **CMD**. This allows + arguments to be passed to the entrypoint, for instance `docker run -d` + passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the + **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** + statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run + arguments. Parameters specified via **CMD** are overwritten by docker run + arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in + `/bin/sh -c`, like a **CMD** instruction: + + ``` + FROM ubuntu + ENTRYPOINT wc -l - + ``` + + This means that the Dockerfile's image always takes stdin as input (that's + what "-" means), and prints the number of lines (that's what "-l" means). To + make this optional but default, use a **CMD**: + + ``` + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + ``` + +**VOLUME** + -- `VOLUME ["/data"]` + The **VOLUME** instruction creates a mount point with the specified name and marks + it as holding externally-mounted volumes from the native host or from other + containers. + +**USER** + -- `USER daemon` + Sets the username or UID used for running subsequent commands. + + The **USER** instruction can optionally be used to set the group or GID. The + followings examples are all valid: + USER [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Until the **USER** instruction is set, instructions will be run as root. The USER + instruction can be used any number of times in a Dockerfile, and will only affect + subsequent commands. + +**WORKDIR** + -- `WORKDIR /path/to/workdir` + The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, + **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can + be used multiple times in a single Dockerfile. Relative paths are defined + relative to the path of the previous **WORKDIR** instruction. For example: + + ``` + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + ``` + + In the above example, the output of the **pwd** command is **a/b/c**. + +**ARG** + -- ARG [=] + + The `ARG` instruction defines a variable that users can pass at build-time to + the builder with the `docker build` command using the `--build-arg + =` flag. If a user specifies a build argument that was not + defined in the Dockerfile, the build outputs a warning. + + ``` + [Warning] One or more build-args [foo] were not consumed + ``` + + The Dockerfile author can define a single variable by specifying `ARG` once or many + variables by specifying `ARG` more than once. For example, a valid Dockerfile: + + ``` + FROM busybox + ARG user1 + ARG buildno + ... + ``` + + A Dockerfile author may optionally specify a default value for an `ARG` instruction: + + ``` + FROM busybox + ARG user1=someuser + ARG buildno=1 + ... + ``` + + If an `ARG` value has a default and if there is no value passed at build-time, the + builder uses the default. + + An `ARG` variable definition comes into effect from the line on which it is + defined in the `Dockerfile` not from the argument's use on the command-line or + elsewhere. For example, consider this Dockerfile: + + ``` + 1 FROM busybox + 2 USER ${user:-some_user} + 3 ARG user + 4 USER $user + ... + ``` + A user builds this file by calling: + + ``` + $ docker build --build-arg user=what_user Dockerfile + ``` + + The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the + subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is + defined and the `what_user` value was passed on the command line. Prior to its definition by an + `ARG` instruction, any use of a variable results in an empty string. + + > **Warning:** It is not recommended to use build-time variables for + > passing secrets like github keys, user credentials etc. Build-time variable + > values are visible to any user of the image with the `docker history` command. + + You can use an `ARG` or an `ENV` instruction to specify variables that are + available to the `RUN` instruction. Environment variables defined using the + `ENV` instruction always override an `ARG` instruction of the same name. Consider + this Dockerfile with an `ENV` and `ARG` instruction. + + ``` + 1 FROM ubuntu + 2 ARG CONT_IMG_VER + 3 ENV CONT_IMG_VER v1.0.0 + 4 RUN echo $CONT_IMG_VER + ``` + Then, assume this image is built with this command: + + ``` + $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile + ``` + + In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting + passed by the user:`v2.0.1` This behavior is similar to a shell + script where a locally scoped variable overrides the variables passed as + arguments or inherited from environment, from its point of definition. + + Using the example above but a different `ENV` specification you can create more + useful interactions between `ARG` and `ENV` instructions: + + ``` + 1 FROM ubuntu + 2 ARG CONT_IMG_VER + 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} + 4 RUN echo $CONT_IMG_VER + ``` + + Unlike an `ARG` instruction, `ENV` values are always persisted in the built + image. Consider a docker build without the --build-arg flag: + + ``` + $ docker build Dockerfile + ``` + + Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but + its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + + The variable expansion technique in this example allows you to pass arguments + from the command line and persist them in the final image by leveraging the + `ENV` instruction. Variable expansion is only supported for [a limited set of + Dockerfile instructions.](#environment-replacement) + + Docker has a set of predefined `ARG` variables that you can use without a + corresponding `ARG` instruction in the Dockerfile. + + * `HTTP_PROXY` + * `http_proxy` + * `HTTPS_PROXY` + * `https_proxy` + * `FTP_PROXY` + * `ftp_proxy` + * `NO_PROXY` + * `no_proxy` + + To use these, simply pass them on the command line using the `--build-arg + =` flag. + +**ONBUILD** + -- `ONBUILD [INSTRUCTION]` + The **ONBUILD** instruction adds a trigger instruction to an image. The + trigger is executed at a later time, when the image is used as the base for + another build. Docker executes the trigger in the context of the downstream + build, as if the trigger existed immediately after the **FROM** instruction in + the downstream Dockerfile. + + You can register any build instruction as a trigger. A trigger is useful if + you are defining an image to use as a base for building other images. For + example, if you are defining an application build environment or a daemon that + is customized with a user-specific configuration. + + Consider an image intended as a reusable python application builder. It must + add application source code to a particular directory, and might need a build + script called after that. You can't just call **ADD** and **RUN** now, because + you don't yet have access to the application source code, and it is different + for each application build. + + -- Providing application developers with a boilerplate Dockerfile to copy-paste + into their application is inefficient, error-prone, and + difficult to update because it mixes with application-specific code. + The solution is to use **ONBUILD** to register instructions in advance, to + run later, during the next build stage. + +# HISTORY +*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. +*Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability +*Sept 2015, updated by Sally O'Malley (somalley@redhat.com) +*Oct 2016, updated by Addam Hardy (addam.hardy@gmail.com) diff --git a/vendor/github.com/docker/docker/man/Dockerfile.aarch64 b/vendor/github.com/docker/docker/man/Dockerfile.aarch64 new file mode 100644 index 0000000..e788eb1 --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.aarch64 @@ -0,0 +1,25 @@ +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y git golang-go + +RUN mkdir -p /go/src /go/bin /go/pkg +ENV GOPATH=/go +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.armhf b/vendor/github.com/docker/docker/man/Dockerfile.armhf new file mode 100644 index 0000000..e7ea495 --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.armhf @@ -0,0 +1,43 @@ +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y \ + git \ + bash \ + curl \ + gcc \ + make + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.ppc64le b/vendor/github.com/docker/docker/man/Dockerfile.ppc64le new file mode 100644 index 0000000..fc96ca7 --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.ppc64le @@ -0,0 +1,35 @@ +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y \ + curl \ + gcc \ + git \ + make \ + tar + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH=/go + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.s390x b/vendor/github.com/docker/docker/man/Dockerfile.s390x new file mode 100644 index 0000000..d4bcf1d --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.s390x @@ -0,0 +1,35 @@ +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y \ + curl \ + gcc \ + git \ + make \ + tar + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH=/go + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/README.md b/vendor/github.com/docker/docker/man/README.md new file mode 100644 index 0000000..82dac65 --- /dev/null +++ b/vendor/github.com/docker/docker/man/README.md @@ -0,0 +1,15 @@ +Docker Documentation +==================== + +This directory contains scripts for generating the man pages. Many of the man +pages are generated directly from the `spf13/cobra` `Command` definition. Some +legacy pages are still generated from the markdown files in this directory. +Do *not* edit the man pages in the man1 directory. Instead, update the +Cobra command or amend the Markdown files for legacy pages. + + +## Generate the man pages + +From within the project root directory run: + + make manpages diff --git a/vendor/github.com/docker/docker/man/docker-attach.1.md b/vendor/github.com/docker/docker/man/docker-attach.1.md new file mode 100644 index 0000000..c39d1c92 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-attach.1.md @@ -0,0 +1,99 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-attach - Attach to a running container + +# SYNOPSIS +**docker attach** +[**--detach-keys**[=*[]*]] +[**--help**] +[**--no-stdin**] +[**--sig-proxy**[=*true*]] +CONTAINER + +# DESCRIPTION +The **docker attach** command allows you to attach to a running container using +the container's ID or name, either to view its ongoing output or to control it +interactively. You can attach to the same contained process multiple times +simultaneously, screen sharing style, or quickly view the progress of your +detached process. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. You can detach from the container (and leave it running) using a +configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You +configure the key sequence using the **--detach-keys** option or a configuration +file. See **config-json(5)** for documentation on using a configuration file. + +It is forbidden to redirect the standard input of a `docker attach` command while +attaching to a tty-enabled container (i.e.: launched with `-t`). + +# OPTIONS +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--help** + Print usage statement + +**--no-stdin**=*true*|*false* + Do not attach STDIN. The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + +# Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see **docker(1)**. + +# EXAMPLES + +## Attaching to a container + +In this example the top command is run inside a container, from an image called +fedora, in detached mode. The ID from the container is passed into the **docker +attach** command: + + # ID=$(sudo docker run -d fedora /usr/bin/top -b) + # sudo docker attach $ID + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-build.1.md b/vendor/github.com/docker/docker/man/docker-build.1.md new file mode 100644 index 0000000..4beee88 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-build.1.md @@ -0,0 +1,340 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-build - Build an image from a Dockerfile + +# SYNOPSIS +**docker build** +[**--build-arg**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cgroup-parent**[=*CGROUP-PARENT*]] +[**--help**] +[**-f**|**--file**[=*PATH/Dockerfile*]] +[**-squash**] *Experimental* +[**--force-rm**] +[**--isolation**[=*default*]] +[**--label**[=*[]*]] +[**--no-cache**] +[**--pull**] +[**--compress**] +[**-q**|**--quiet**] +[**--rm**[=*true*]] +[**-t**|**--tag**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--memory-swap**[=*LIMIT*]] +[**--network**[=*"default"*]] +[**--shm-size**[=*SHM-SIZE*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--ulimit**[=*[]*]] +PATH | URL | - + +# DESCRIPTION +This will read the Dockerfile from the directory specified in **PATH**. +It also sends any other files and directories found in the current +directory to the Docker daemon. The contents of this directory would +be used by **ADD** commands found within the Dockerfile. + +Warning, this will send a lot of data to the Docker daemon depending +on the contents of the current directory. The build is run by the Docker +daemon, not by the CLI, so the whole context must be transferred to the daemon. +The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to +the daemon. + +When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from +the client to the Docker daemon. In this case, the Dockerfile at the root of the archive and +the rest of the archive will get used as the context of the build. When a Git repository is +set as the **URL**, the repository is cloned locally and then sent as the context. + +# OPTIONS +**-f**, **--file**=*PATH/Dockerfile* + Path to the Dockerfile to use. If the path is a relative path and you are + building from a local directory, then the path must be relative to that + directory. If you are building from a remote URL pointing to either a + tarball or a Git repository, then the path must be relative to the root of + the remote context. In all cases, the file must be within the build context. + The default is *Dockerfile*. + +**--squash**=*true*|*false* + **Experimental Only** + Once the image is built, squash the new layers into a new image with a single + new layer. Squashing does not destroy any existing image, rather it creates a new + image with the content of the squshed layers. This effectively makes it look + like all `Dockerfile` commands were created with a single layer. The build + cache is preserved with this method. + + **Note**: using this option means the new image will not be able to take + advantage of layer sharing with other images and may use significantly more + space. + + **Note**: using this option you may see significantly more space used due to + storing two copies of the image, one for the build cache with all the cache + layers in tact, and one for the squashed version. + +**--build-arg**=*variable* + name and value of a **buildarg**. + + For example, if you want to pass a value for `http_proxy`, use + `--build-arg=http_proxy="http://some.proxy.url"` + + Users pass these values at build-time. Docker uses the `buildargs` as the + environment context for command(s) run via the Dockerfile's `RUN` instruction + or for variable expansion in other Dockerfile instructions. This is not meant + for passing secret values. [Read more about the buildargs instruction](https://docs.docker.com/engine/reference/builder/#arg) + +**--force-rm**=*true*|*false* + Always remove intermediate containers, even after unsuccessful builds. The default is *false*. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. + +**--label**=*label* + Set metadata for an image + +**--no-cache**=*true*|*false* + Do not use cache when building the image. The default is *false*. + +**--help** + Print usage statement + +**--pull**=*true*|*false* + Always attempt to pull a newer version of the image. The default is *false*. + +**--compress**=*true*|*false* + Compress the build context using gzip. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Suppress the build output and print image ID on success. The default is *false*. + +**--rm**=*true*|*false* + Remove intermediate containers after a successful build. The default is *true*. + +**-t**, **--tag**="" + Repository names (and optionally with tags) to be applied to the resulting + image in case of success. Refer to **docker-tag(1)** for more information + about valid tag names. + +**-m**, **--memory**=*MEMORY* + Memory limit + +**--memory-swap**=*LIMIT* + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--network**=*bridge* + Set the networking mode for the RUN instructions during build. Supported standard + values are: `bridge`, `host`, `none` and `container:`. Any other value + is taken as a custom network's name or ID which this container should connect to. + +**--shm-size**=*SHM-SIZE* + Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. + If you omit the size entirely, the system uses `64m`. + +**--cpu-shares**=*0* + CPU shares (relative weight). + + By default, all containers get the same proportion of CPU cycles. + CPU shares is a 'relative weight', relative to the default setting of 1024. + This default value is defined here: + ``` + cat /sys/fs/cgroup/cpu/cpu.shares + 1024 + ``` + You can change this proportion by adjusting the container's CPU share + weighting relative to the weighting of all other running containers. + + To modify the proportion from the default of 1024, use the **--cpu-shares** + flag to set the weighting to 2 or higher. + + Container CPU share Flag + {C0} 60% of CPU --cpu-shares=614 (614 is 60% of 1024) + {C1} 40% of CPU --cpu-shares=410 (410 is 40% of 1024) + + The proportion is only applied when CPU-intensive processes are running. + When tasks in one container are idle, the other containers can use the + left-over CPU time. The actual amount of CPU time used varies depending on + the number of containers running on the system. + + For example, consider three containers, where one has **--cpu-shares=1024** and + two others have **--cpu-shares=512**. When processes in all three + containers attempt to use 100% of CPU, the first container would receive + 50% of the total CPU time. If you add a fourth container with **--cpu-shares=1024**, + the first container only gets 33% of the CPU. The remaining containers + receive 16.5%, 16.5% and 33% of the CPU. + + + Container CPU share Flag CPU time + {C0} 100% --cpu-shares=1024 33% + {C1} 50% --cpu-shares=512 16.5% + {C2} 50% --cpu-shares=512 16.5% + {C4} 100% --cpu-shares=1024 33% + + + On a multi-core system, the shares of CPU time are distributed across the CPU + cores. Even if a container is limited to less than 100% of CPU time, it can + use 100% of each individual CPU core. + + For example, consider a system with more than three cores. If you start one + container **{C0}** with **--cpu-shares=512** running one process, and another container + **{C1}** with **--cpu-shares=1024** running two processes, this can result in the following + division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period. + + Limit the container's CPU usage. This flag causes the kernel to restrict the + container's CPU usage to the period you specify. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota. + + By default, containers run with the full CPU resource. This flag causes the +kernel to restrict the container's CPU usage to the quota you specify. + +**--cpuset-cpus**=*CPUSET-CPUS* + CPUs in which to allow execution (0-3, 0,1). + +**--cpuset-mems**=*CPUSET-MEMS* + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on + NUMA systems. + + For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +to ensure the processes in your Docker container only use memory from the first +two memory nodes. + +**--cgroup-parent**=*CGROUP-PARENT* + Path to `cgroups` under which the container's `cgroup` are created. + + If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. +Cgroups are created if they do not already exist. + +**--ulimit**=[] + Ulimit options + + For more information about `ulimit` see [Setting ulimits in a +container](https://docs.docker.com/engine/reference/commandline/run/#set-ulimits-in-container---ulimit) + +# EXAMPLES + +## Building an image using a Dockerfile located inside the current directory + +Docker images can be built using the build command and a Dockerfile: + + docker build . + +During the build process Docker creates intermediate images. In order to +keep them, you must explicitly set `--rm=false`. + + docker build --rm=false . + +A good practice is to make a sub-directory with a related name and create +the Dockerfile in that directory. For example, a directory called mongo may +contain a Dockerfile to create a Docker MongoDB image. Likewise, another +directory called httpd may be used to store Dockerfiles for Apache web +server images. + +It is also a good practice to add the files required for the image to the +sub-directory. These files will then be specified with the `COPY` or `ADD` +instructions in the `Dockerfile`. + +Note: If you include a tar file (a good practice), then Docker will +automatically extract the contents of the tar file specified within the `ADD` +instruction into the specified target. + +## Building an image and naming that image + +A good practice is to give a name to the image you are building. Note that +only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. + +The **-t**/**--tag** flag is used to rename an image. Here are some examples: + +Though it is not a good practice, image names can be arbitrary: + + docker build -t myimage . + +A better approach is to provide a fully qualified and meaningful repository, +name, and tag (where the tag in this context means the qualifier after +the ":"). In this example we build a JBoss image for the Fedora repository +and give it the version 1.0: + + docker build -t fedora/jboss:1.0 . + +The next example is for the "whenry" user repository and uses Fedora and +JBoss and gives it the version 2.1 : + + docker build -t whenry/fedora-jboss:v2.1 . + +If you do not provide a version tag then Docker will assign `latest`: + + docker build -t whenry/fedora-jboss . + +When you list the images, the image above will have the tag `latest`. + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + + docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . + +So renaming an image is arbitrary but consideration should be given to +a useful convention that makes sense for consumers and should also take +into account Docker community conventions. + + +## Building an image using a URL + +This will clone the specified GitHub repository from the URL and use it +as context. The Dockerfile at the root of the repository is used as +Dockerfile. This only works if the GitHub repository is a dedicated +repository. + + docker build github.com/scollier/purpletest + +Note: You can set an arbitrary Git repository via the `git://` scheme. + +## Building an image using a URL to a tarball'ed context + +This will send the URL itself to the Docker daemon. The daemon will fetch the +tarball archive, decompress it and use its contents as the build context. The +Dockerfile at the root of the archive and the rest of the archive will get used +as the context of the build. If you pass an **-f PATH/Dockerfile** option as well, +the system will look for that file inside the contents of the tarball. + + docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz + +Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +# HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-commit.1.md b/vendor/github.com/docker/docker/man/docker-commit.1.md new file mode 100644 index 0000000..d8a4cf8 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-commit.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-commit - Create a new image from a container's changes + +# SYNOPSIS +**docker commit** +[**-a**|**--author**[=*AUTHOR*]] +[**-c**|**--change**[=\[*DOCKERFILE INSTRUCTIONS*\]]] +[**--help**] +[**-m**|**--message**[=*MESSAGE*]] +[**-p**|**--pause**[=*true*]] +CONTAINER [REPOSITORY[:TAG]] + +# DESCRIPTION +Create a new image from an existing container specified by name or +container ID. The new image will contain the contents of the +container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)** +for more information about valid image and tag names. + +While the `docker commit` command is a convenient way of extending an +existing image, you should prefer the use of a Dockerfile and `docker +build` for generating images that you intend to share with other +people. + +# OPTIONS +**-a**, **--author**="" + Author (e.g., "John Hannibal Smith ") + +**-c** , **--change**=[] + Apply specified Dockerfile instructions while committing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +**--help** + Print usage statement + +**-m**, **--message**="" + Commit message + +**-p**, **--pause**=*true*|*false* + Pause container during commit. The default is *true*. + +# EXAMPLES + +## Creating a new image from an existing container +An existing Fedora based container has had Apache installed while running +in interactive mode with the bash shell. Apache is also running. To +create a new image run `docker ps` to find the container's ID and then run: + + # docker commit -m="Added Apache to Fedora base image" \ + -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 + +Note that only a-z0-9-_. are allowed when naming images from an +existing container. + +## Apply specified Dockerfile instructions while committing the image +If an existing container was created without the DEBUG environment +variable set to "true", you can create a new image based on that +container by first getting the container's ID with `docker ps` and +then running: + + # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and in +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +Oct 2014, updated by Daniel, Dao Quang Minh +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-config-json.5.md b/vendor/github.com/docker/docker/man/docker-config-json.5.md new file mode 100644 index 0000000..49987f0 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-config-json.5.md @@ -0,0 +1,72 @@ +% CONFIG.JSON(5) Docker User Manuals +% Docker Community +% JANUARY 2016 +# NAME +HOME/.docker/config.json - Default Docker configuration file + +# INTRODUCTION + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. Docker manages most of +the files in the configuration directory and you should not modify them. +However, you *can modify* the `config.json` file to control certain aspects of +how the `docker` command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +* The `HttpHeaders` property specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does not +allow these headers to change any headers it sets for itself. + +* The `psFormat` property specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see **docker-ps(1)**. + +* The `detachKeys` property specifies the default key sequence which +detaches the container. When the `--detach-keys` flag is not provide +with the `docker attach`, `docker exec`, `docker run` or `docker +start`, Docker's client uses this property. If this property is not +set, the client falls back to the default sequence `ctrl-p,ctrl-q`. + + +* The `imagesFormat` property specifies the default format for `docker images` +output. When the `--format` flag is not provided with the `docker images` +command, Docker's client uses this property. If this property is not set, the +client falls back to the default table format. For a list of supported +formatting directives, see **docker-images(1)**. + +You can specify a different location for the configuration files via the +`DOCKER_CONFIG` environment variable or the `--config` command line option. If +both are specified, then the `--config` option overrides the `DOCKER_CONFIG` +environment variable: + + docker --config ~/testconfigs/ ps + +This command instructs Docker to use the configuration files in the +`~/testconfigs/` directory when running the `ps` command. + +## Examples + +Following is a sample `config.json` file: + + { + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "detachKeys": "ctrl-e,e" + } + +# HISTORY +January 2016, created by Moxiegirl diff --git a/vendor/github.com/docker/docker/man/docker-cp.1.md b/vendor/github.com/docker/docker/man/docker-cp.1.md new file mode 100644 index 0000000..949d60b --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-cp.1.md @@ -0,0 +1,175 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-cp - Copy files/folders between a container and the local filesystem. + +# SYNOPSIS +**docker cp** +[**--help**] +CONTAINER:SRC_PATH DEST_PATH|- + +**docker cp** +[**--help**] +SRC_PATH|- CONTAINER:DEST_PATH + +# DESCRIPTION + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, +specify the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container. +However, you can still copy such files by manually running `tar` in `docker exec`. +For example (consider `SRC_PATH` and `DEST_PATH` are directories): + + $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + +or + + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - + + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. + +# OPTIONS +**-L**, **--follow-link**=*true*|*false* + Follow symbol link in SRC_PATH + +**--help** + Print usage statement + +# EXAMPLES + +Suppose a container has finished producing some output as a file it saves +to somewhere in its filesystem. This could be the output of a build job or +some other computation. You can copy these outputs from the container to a +location on your local host. + +If you want to copy the `/tmp/foo` directory from a container to the +existing `/tmp` directory on your host. If you run `docker cp` in your `~` +(home) directory on the local host: + + $ docker cp compassionate_darwin:tmp/foo /tmp + +Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit +the leading slash in the command. If you execute this command from your home +directory: + + $ docker cp compassionate_darwin:tmp/foo tmp + +If `~/tmp` does not exist, Docker will create it and copy the contents of +`/tmp/foo` from the container into this new directory. If `~/tmp` already +exists as a directory, then Docker will copy the contents of `/tmp/foo` from +the container into a directory at `~/tmp/foo`. + +When copying a single file to an existing `LOCALPATH`, the `docker cp` command +will either overwrite the contents of `LOCALPATH` if it is a file or place it +into `LOCALPATH` if it is a directory, overwriting an existing file of the same +name if one exists. For example, this command: + + $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test + +If `/test` does not exist on the local machine, it will be created as a file +with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` +exists as a file, it will be overwritten. Lastly, if `/test` exists as a +directory, the file will be copied to `/test/myfile.txt`. + +Next, suppose you want to copy a file or folder into a container. For example, +this could be a configuration file or some other input to a long running +computation that you would like to place into a created container before it +starts. This is useful because it does not require the configuration file or +other input to exist in the container image. + +If you have a file, `config.yml`, in the current directory on your local host +and wish to copy it to an existing directory at `/etc/my-app.d` in a container, +this command can be used: + + $ docker cp config.yml myappcontainer:/etc/my-app.d + +If you have several files in a local directory `/config` which you need to copy +to a directory `/etc/my-app.d` in a container: + + $ docker cp /config/. myappcontainer:/etc/my-app.d + +The above command will copy the contents of the local `/config` directory into +the directory `/etc/my-app.d` in the container. + +Finally, if you want to copy a symbolic link into a container, you typically +want to copy the linked target and not the link itself. To copy the target, use +the `-L` option, for example: + + $ ln -s /tmp/somefile /tmp/somefile.ln + $ docker cp -L /tmp/somefile.ln myappcontainer:/tmp/ + +This command copies content of the local `/tmp/somefile` into the file +`/tmp/somefile.ln` in the container. Without `-L` option, the `/tmp/somefile.ln` +preserves its symbolic link but not its content. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +May 2015, updated by Josh Hawn diff --git a/vendor/github.com/docker/docker/man/docker-create.1.md b/vendor/github.com/docker/docker/man/docker-create.1.md new file mode 100644 index 0000000..3f8a076 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-create.1.md @@ -0,0 +1,553 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-create - Create a new container + +# SYNOPSIS +**docker create** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--blkio-weight-device**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-count**[=*0*]] +[**--cpu-percent**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpus**[=*0.0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--device**[=*[]*]] +[**--device-read-bps**[=*[]*]] +[**--device-read-iops**[=*[]*]] +[**--device-write-bps**[=*[]*]] +[**--device-write-iops**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--dns-option**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**-i**|**--interactive**] +[**--ip**[=*IPv4-ADDRESS*]] +[**--ip6**[=*IPv6-ADDRESS*]] +[**--ipc**[=*IPC*]] +[**--isolation**[=*default*]] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--link-local-ip**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*LIMIT*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--network-alias**[=*[]*]] +[**--network**[=*"bridge"*]] +[**--oom-kill-disable**] +[**--oom-score-adj**[=*0*]] +[**-P**|**--publish-all**] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[PID]*]] +[**--userns**[=*[]*]] +[**--pids-limit**[=*PIDS_LIMIT*]] +[**--privileged**] +[**--read-only**] +[**--restart**[=*RESTART*]] +[**--rm**] +[**--security-opt**[=*[]*]] +[**--storage-opt**[=*[]*]] +[**--stop-signal**[=*SIGNAL*]] +[**--stop-timeout**[=*TIMEOUT*]] +[**--shm-size**[=*[]*]] +[**--sysctl**[=*[]*]] +[**-t**|**--tty**] +[**--tmpfs**[=*[CONTAINER-DIR[:]*]] +[**-u**|**--user**[=*USER*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] +[**--volume-driver**[=*DRIVER*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Creates a writeable container layer over the specified image and prepares it for +running the specified command. The container ID is then printed to STDOUT. This +is similar to **docker run -d** except the container is never started. You can +then use the **docker start ** command to start the container at +any point. + +The initial status of the container created with **docker create** is 'created'. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + +**--blkio-weight**=*0* + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--blkio-weight-device**=[] + Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). + +**--cpu-shares**=*0* + CPU shares (relative weight) + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cidfile**="" + Write the container ID to the file + +**--cpu-count**=*0* + Limit the number of CPUs available for execution by the container. + + On Windows Server containers, this is approximated as a percentage of total CPU usage. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-percent**=*0* + Limit the percentage of CPU available for execution by a container running on a Windows daemon. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpus**=0.0 + Number of CPUs. The default is *0.0*. + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--device-read-bps**=[] + Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb) + +**--device-read-iops**=[] + Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000) + +**--device-write-bps**=[] + Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb) + +**--device-write-iops**=[] + Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000) + +**--dns**=[] + Set custom DNS servers + +**--dns-option**=[] + Set custom DNS options + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**-e**, **--env**=[] + Set environment variables + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + +**--env-file**=[] + Read in a line-delimited file of environment variables + +**--expose**=[] + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--ip**="" + Sets the container's interface IPv4 address (e.g. 172.23.0.9) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ip6**="" + Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Constrains the kernel memory available to a container. If a limit of 0 +is specified (not using `--kernel-memory`), the container's kernel memory +is not limited. If you specify a limit, it may be rounded up to a multiple +of the operating system's page size and the value can be very large, +millions of trillions. + +**-l**, **--label**=[] + Adds metadata to a container (e.g., --label=com.example.key=value) + +**--label-file**=[] + Read labels from a file. Delimit each label with an EOL. + +**--link**=[] + Add link to another container in the form of :alias or just + in which case the alias will match the name. + +**--link-local-ip**=[] + Add one or more link-local IPv4/IPv6 addresses to the container's interface + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Logging driver for the container. Default is defined by daemon `--log-driver` flag. + **Warning**: the `docker logs` command works only for the `json-file` and + `journald` logging drivers. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: [], where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + + After setting memory reservation, when the system detects memory contention +or low memory, containers are forced to restrict their consumption to their +reservation. So you should always set the value below **--memory**, otherwise the +hard limit will take precedence. By default, memory reservation will be the same +as memory limit. + +**--memory-swap**="LIMIT" + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**--name**="" + Assign a name to the container + +**--network**="*bridge*" + Set the Network mode for the container + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + '|': connect to a user-defined network + +**--network-alias**=[] + Add network-scoped alias for the container + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**--oom-score-adj**="" + Tune the host's OOM preferences for containers (accepts -1000 to 1000) + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + +**-p**, **--publish**=[] + Publish a container's port, or a range of ports, to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a range of ports. + When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) + (use 'docker port' to see the actual mapping) + +**--pid**="" + Set the PID mode for the container + Default is to create a private PID namespace for the container + 'container:': join another container's PID namespace + 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--userns**="" + Set the usernamespace mode for the container when `userns-remap` option is enabled. + **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). + +**--pids-limit**="" + Tune the container's pids limit. Set `-1` to have unlimited pids for the container. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + +**--restart**="*no*" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +**--rm**=*true*|*false* + Automatically remove the container when it exits. The default is *false*. + +**--shm-size**="" + Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. + If you omit the size entirely, the system uses `64m`. + +**--security-opt**=[] + Security Options + + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + "no-new-privileges" : Disable container processes from gaining additional privileges + "seccomp:unconfined" : Turn off seccomp confinement for the container + "seccomp:profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter + +**--storage-opt**=[] + Storage driver options per container + + $ docker create -it --storage-opt size=120G fedora /bin/bash + + This (size) will allow to set the container rootfs size to 120G at creation time. + This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. + For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. + For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. + Under these conditions, user can pass any size less then the backing fs size. + +**--stop-signal**=*SIGTERM* + Signal to stop a container. Default is SIGTERM. + +**--stop-timeout**=*10* + Timeout (in seconds) to stop a container. Default is 10. + +**--sysctl**=SYSCTL + Configure namespaced kernel parameters at runtime + + IPC Namespace - current sysctls allowed: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + Note: if you use --ipc=host using these sysctls will not be allowed. + + Network Namespace - current sysctls allowed: + Sysctls beginning with net.* + + Note: if you use --network=host using these sysctls will not be allowed. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**--tmpfs**=[] Create a tmpfs mount + + Mount a temporary filesystem (`tmpfs`) mount into a container, for example: + + $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image + + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount +options are the same as the Linux default `mount` flags. If you do not specify +any options, the systems uses the following options: +`rw,noexec,nosuid,nodev,size=65536k`. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument root user will be used in the container by default. + +**--ulimit**=[] + Ulimit options + +**--uts**=*host* + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] + Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker + bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker + container. If 'HOST-DIR' is omitted, Docker automatically creates the new + volume on the host. The `OPTIONS` are a comma delimited list and can be: + + * [rw|ro] + * [z|Z] + * [`[r]shared`|`[r]slave`|`[r]private`] + +The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` +can be an absolute path or a `name` value. A `name` value must start with an +alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or +`-` (hyphen). An absolute path starts with a `/` (forward slash). + +If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the +path you specify. If you supply a `name`, Docker creates a named volume by that +`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` +value. If you supply the `/foo` value, Docker creates a bind-mount. If you +supply the `foo` specification, Docker creates a named volume. + +You can specify multiple **-v** options to mount one or more mounts to a +container. To use these same mounts in other containers, specify the +**--volumes-from** option also. + +You can add `:ro` or `:rw` suffix to a volume to mount it read-only or +read-write mode, respectively. By default, the volumes are mounted read-write. +See examples. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change a label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +By default bind mounted volumes are `private`. That means any mounts done +inside container will not be visible on host and vice-a-versa. One can change +this behavior by specifying a volume mount propagation property. Making a +volume `shared` mounts done under that volume inside container will be +visible on host and vice-a-versa. Making a volume `slave` enables only one +way mount propagation and that is mounts done on host under that volume +will be visible inside container but not the other way around. + +To control mount propagation property of volume one can use `:[r]shared`, +`:[r]slave` or `:[r]private` propagation flag. Propagation property can +be specified only for bind mounted volumes and not for internal volumes or +named volumes. For mount propagation to work source mount point (mount point +where source dir is mounted on) has to have right propagation properties. For +shared volumes, source mount point has to be shared. And for slave volumes, +source mount has to be either shared or slave. + +Use `df ` to figure out the source mount and then use +`findmnt -o TARGET,PROPAGATION ` to figure out propagation +properties of source mount. If `findmnt` utility is not available, then one +can look at mount entry for source mount point in `/proc/self/mountinfo`. Look +at `optional fields` and see if any propagaion properties are specified. +`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if +nothing is there that means mount is `private`. + +To change propagation properties of a mount point use `mount` command. For +example, if one wants to bind mount source directory `/foo` one can do +`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This +will convert /foo into a `shared` mount point. Alternatively one can directly +change propagation properties of source mount. Say `/` is source mount for +`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + + +To disable automatic copying of data from the container path to the volume, use +the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. + +**--volume-driver**="" + Container's volume driver. This driver creates volumes specified either from + a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. + See **docker-volume-create(1)** for full details. + +**--volumes-from**=[] + Mount volumes from the specified container(s) + +**-w**, **--workdir**="" + Working directory inside the container + +# EXAMPLES + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +# HISTORY +August 2014, updated by Sven Dowideit +September 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-diff.1.md b/vendor/github.com/docker/docker/man/docker-diff.1.md new file mode 100644 index 0000000..6c6c502 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-diff.1.md @@ -0,0 +1,49 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-diff - Inspect changes on a container's filesystem + +# SYNOPSIS +**docker diff** +[**--help**] +CONTAINER + +# DESCRIPTION +Inspect changes on a container's filesystem. You can use the full or +shortened container ID or the container name set using +**docker run --name** option. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES +Inspect the changes to on a nginx container: + + # docker diff 1fdfd1f54c1b + C /dev + C /dev/console + C /dev/core + C /dev/stdout + C /dev/fd + C /dev/ptmx + C /dev/stderr + C /dev/stdin + C /run + A /run/nginx.pid + C /var/lib/nginx/tmp + A /var/lib/nginx/tmp/client_body + A /var/lib/nginx/tmp/fastcgi + A /var/lib/nginx/tmp/proxy + A /var/lib/nginx/tmp/scgi + A /var/lib/nginx/tmp/uwsgi + C /var/log/nginx + A /var/log/nginx/access.log + A /var/log/nginx/error.log + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-events.1.md b/vendor/github.com/docker/docker/man/docker-events.1.md new file mode 100644 index 0000000..51b0427 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-events.1.md @@ -0,0 +1,180 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-events - Get real time events from the server + +# SYNOPSIS +**docker events** +[**--help**] +[**-f**|**--filter**[=*[]*]] +[**--since**[=*SINCE*]] +[**--until**[=*UNTIL*]] +[**--format**[=*FORMAT*]] + + +# DESCRIPTION +Get event information from the Docker daemon. Information can include historical +information and real-time information. + +Docker containers will report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--filter**=[] + Filter output based on these conditions + - container (`container=`) + - event (`event=`) + - image (`image=`) + - plugin (experimental) (`plugin=`) + - label (`label=` or `label==`) + - type (`type=`) + - volume (`volume=`) + - network (`network=`) + - daemon (`daemon=`) + +**--since**="" + Show all events created since timestamp + +**--until**="" + Stream events until this timestamp + +**--format**="" + Format the output using the given Go template + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine's time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +# EXAMPLES + +## Listening for Docker events + +After running docker events a container 786d698004576 is started and stopped +(The container name has been shortened in the output below): + + # docker events + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die + 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop + +## Listening for events since a given date +Again the output container IDs have been shortened for the purposes of this document: + + # docker events --since '2015-01-28' + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + +The following example outputs all events that were generated in the last 3 minutes, +relative to the current time on the client machine: + + # docker events --since '3m' + 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2015-05-12T15:52:12.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +If you do not provide the --since option, the command returns only new and/or +live events. + +## Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default format. Go's **text/template** package describes all the +details of the format. + + # docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + + # docker events --format '{{json .}}' + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + +## Filters + + $ docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + + $ docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'type=volume' + 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) + 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) + 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) + 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + + $ docker events --filter 'type=network' + 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) + 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + + $ docker events --filter 'type=plugin' (experimental) + 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Brian Goff +October 2015, updated by Mike Brown diff --git a/vendor/github.com/docker/docker/man/docker-exec.1.md b/vendor/github.com/docker/docker/man/docker-exec.1.md new file mode 100644 index 0000000..fe9c279 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-exec.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-exec - Run a command in a running container + +# SYNOPSIS +**docker exec** +[**-d**|**--detach**] +[**--detach-keys**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--help**] +[**-i**|**--interactive**] +[**--privileged**] +[**-t**|**--tty**] +[**-u**|**--user**[=*USER*]] +CONTAINER COMMAND [ARG...] + +# DESCRIPTION + +Run a process in a running container. + +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. + +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run + +# OPTIONS +**-d**, **--detach**=*true*|*false* + Detached mode: run command in the background. The default is *false*. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary environment variables that are +available for the command to be executed. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--privileged**=*true*|*false* + Give the process extended [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) +when running in a container. The default is *false*. + + Without this flag, the process run by `docker exec` in a running container has +the same capabilities as the container, which may be limited. Set +`--privileged` to give all capabilities to the process. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +# HISTORY +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-export.1.md b/vendor/github.com/docker/docker/man/docker-export.1.md new file mode 100644 index 0000000..3d59e47 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-export.1.md @@ -0,0 +1,46 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-export - Export the contents of a container's filesystem as a tar archive + +# SYNOPSIS +**docker export** +[**--help**] +[**-o**|**--output**[=*""*]] +CONTAINER + +# DESCRIPTION +Export the contents of a container's filesystem using the full or shortened +container ID or container name. The output is exported to STDOUT and can be +redirected to a tar file. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement + +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES +Export the contents of the container called angry_bell to a tar file +called angry_bell.tar: + + # docker export angry_bell > angry_bell.tar + # docker export --output=angry_bell-latest.tar angry_bell + # ls -sh angry_bell.tar + 321M angry_bell.tar + # ls -sh angry_bell-latest.tar + 321M angry_bell-latest.tar + +# See also +**docker-import(1)** to create an empty filesystem image +and import the contents of the tarball into it, then optionally tag it. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +January 2015, updated by Joseph Kern (josephakern at gmail dot com) diff --git a/vendor/github.com/docker/docker/man/docker-history.1.md b/vendor/github.com/docker/docker/man/docker-history.1.md new file mode 100644 index 0000000..91edefe --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-history.1.md @@ -0,0 +1,52 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-history - Show the history of an image + +# SYNOPSIS +**docker history** +[**--help**] +[**-H**|**--human**[=*true*]] +[**--no-trunc**] +[**-q**|**--quiet**] +IMAGE + +# DESCRIPTION + +Show the history of when and how an image was created. + +# OPTIONS +**--help** + Print usage statement + +**-H**, **--human**=*true*|*false* + Print sizes and dates in human readable format. The default is *true*. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + $ docker history fedora + IMAGE CREATED CREATED BY SIZE COMMENT + 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB + 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 10 months ago 0 B Imported from - + +## Display comments in the image history +The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. + + $ sudo docker history docker:scm + IMAGE CREATED CREATED BY SIZE COMMENT + 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image + 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB + c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 19 months ago 0 B Imported from - + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-images.1.md b/vendor/github.com/docker/docker/man/docker-images.1.md new file mode 100644 index 0000000..d7958d0 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-images.1.md @@ -0,0 +1,153 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-images - List images + +# SYNOPSIS +**docker images** +[**--help**] +[**-a**|**--all**] +[**--digests**] +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--no-trunc**] +[**-q**|**--quiet**] +[REPOSITORY[:TAG]] + +# DESCRIPTION +This command lists the images stored in the local Docker repository. + +By default, intermediate images, used during builds, are not listed. Some of the +output, e.g., image ID, is truncated, for space reasons. However the truncated +image ID, and often the first few characters, are enough to be used in other +Docker commands that use the image ID. The output includes repository, tag, image +ID, date created and the virtual size. + +The title REPOSITORY for the first title may seem confusing. It is essentially +the image name. However, because you can tag a specific image, and multiple tags +(image instances) can be associated with a single name, the name is really a +repository for all tagged images of the same name. For example consider an image +called fedora. It may be tagged with 18, 19, or 20, etc. to manage different +versions. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all images (by default filter out the intermediate image layers). The default is *false*. + +**--digests**=*true*|*false* + Show image digests. The default is *false*. + +**-f**, **--filter**=[] + Filters the output based on these conditions: + - dangling=(true|false) - find unused images + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + +**--format**="*TEMPLATE*" + Pretty-print images using a Go template. + Valid placeholders: + .ID - Image ID + .Repository - Image repository + .Tag - Image tag + .Digest - Image digest + .CreatedSince - Elapsed time since the image was created + .CreatedAt - Time when the image was created + .Size - Image disk size + +**--help** + Print usage statement + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + +## Listing the images + +To list the images in a local repository (not the registry) run: + + docker images + +The list will contain the image repository name, a tag for the image, and an +image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, +IMAGE ID, CREATED, and SIZE. + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + + docker images java + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + + docker images java:8 + +To get a verbose list of images which contains all the intermediate images +used in builds use **-a**: + + docker images -a + +Previously, the docker images command supported the --tree and --dot arguments, +which displayed different visualizations of the image data. Docker core removed +this functionality in the 1.7 version. If you liked this functionality, you can +still find it in the third-party dockviz tool: https://github.com/justone/dockviz. + +## Listing images in a desired format + +When using the --format option, the image command will either output the data +exactly as the template declares or, when using the `table` directive, will +include column headers as well. You can use special characters like `\t` for +inserting tab spacing between columns. + +The following example uses a template without headers and outputs the ID and +Repository entries separated by a colon for all images: + + docker images --format "{{.ID}}: {{.Repository}}" + 77af4d6b9913: + b6fa739cedf5: committ + 78a85c484bad: ipbabble + 30557a29d5ab: docker + 5ed6274db6ce: + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + +To list all images with their repository and tag in a table format you can use: + + docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + IMAGE ID REPOSITORY TAG + 77af4d6b9913 + b6fa739cedf5 committ latest + 78a85c484bad ipbabble + 30557a29d5ab docker latest + 5ed6274db6ce + 746b819f315e postgres 9 + 746b819f315e postgres 9.3 + 746b819f315e postgres 9.3.5 + 746b819f315e postgres latest + +Valid template placeholders are listed above. + +## Listing only the shortened image IDs + +Listing just the shortened image IDs. This can be useful for some automated +tools. + + docker images -q + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-import.1.md b/vendor/github.com/docker/docker/man/docker-import.1.md new file mode 100644 index 0000000..43d65ef --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-import.1.md @@ -0,0 +1,72 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + +# SYNOPSIS +**docker import** +[**-c**|**--change**[=*[]*]] +[**-m**|**--message**[=*MESSAGE*]] +[**--help**] +file|URL|**-**[REPOSITORY[:TAG]] + +# OPTIONS +**-c**, **--change**=[] + Apply specified Dockerfile instructions while importing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +**--help** + Print usage statement + +**-m**, **--message**="" + Set commit message for imported image + +# DESCRIPTION +Create a new filesystem image from the contents of a tarball (`.tar`, +`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. + + +# EXAMPLES + +## Import from a remote location + + # docker import http://example.com/exampleimage.tgz example/imagerepo + +## Import from a local file + +Import to docker via pipe and stdin: + + # cat exampleimage.tgz | docker import - example/imagelocal + +Import with a commit message. + + # cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + +Import to a Docker image from a local file. + + # docker import /path/to/exampleimage.tgz + + +## Import from a local file and tag + +Import to docker via pipe and stdin: + + # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 + +## Import from a local directory + + # tar -c . | docker import - exampleimagedir + +## Apply specified Dockerfile instructions while importing the image +This example sets the docker image ENV variable DEBUG to true by default. + + # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir + +# See also +**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-info.1.md b/vendor/github.com/docker/docker/man/docker-info.1.md new file mode 100644 index 0000000..bb7a8fb --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-info.1.md @@ -0,0 +1,187 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-info - Display system-wide information + +# SYNOPSIS +**docker info** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] + +# DESCRIPTION +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's **text/template** package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template + +# EXAMPLES + +## Display Docker system information + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver: + + $ docker -D info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 1.13.0 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 + runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 + init version: N/A (expected: v0.13.0) + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-31-generic + Operating System: Ubuntu 16.04.1 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.937 GiB + Name: ubuntu + ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 + Docker Root Dir: /var/lib/docker + Debug Mode (client): true + Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 + Http Proxy: http://test:test@proxy.example.com:8080 + Https Proxy: https://test:test@proxy.example.com:8080 + No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com + Registry: https://index.docker.io/v1/ + WARNING: No swap limit support + Labels: + storage=ssd + staging=true + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false + + + +The global `-D` option tells all `docker` commands to output debug information. + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the devicemapper storage driver. As can be seen in the output, additional +information about the devicemapper storage driver is shown: + + $ docker info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Untagged Images: 52 + Server Version: 1.10.3 + Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) + Execution Driver: native-0.2 + Logging Driver: json-file + Plugins: + Volume: local + Network: null host bridge + Kernel Version: 3.10.0-327.el7.x86_64 + Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) + OSType: linux + Architecture: x86_64 + CPUs: 1 + Total Memory: 991.7 MiB + Name: ip-172-30-0-91.ec2.internal + ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S + Docker Root Dir: /var/lib/docker + Debug mode (client): false + Debug mode (server): false + Username: gordontheturtle + Registry: https://index.docker.io/v1/ + Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 + +You can also specify the output format: + + $ docker info --format '{{json .}}' + {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-inspect.1.md b/vendor/github.com/docker/docker/man/docker-inspect.1.md new file mode 100644 index 0000000..21d7ba6 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-inspect.1.md @@ -0,0 +1,323 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-inspect - Return low-level information on docker objects + +# SYNOPSIS +**docker inspect** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] +[**-s**|**--size**] +[**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume*] +NAME|ID [NAME|ID...] + +# DESCRIPTION + +This displays the low-level information on Docker object(s) (e.g. container, +image, volume,network, node, service, or task) identified by name or ID. By default, +this will render all results in a JSON array. If the container and image have +the same name, this will return container JSON for unspecified type. If a format +is specified, the given template will be executed for each result. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template + +**-s**, **--size** + Display total file sizes if the type is container + +**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume* + Return JSON for specified type, permissible values are "image", "container", + "network", "node", "service", "task", and "volume" + +# EXAMPLES + +Get information about an image when image name conflicts with the container name, +e.g. both image and container are named rhel7: + + $ docker inspect --type=image rhel7 + [ + { + "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", + "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", + .... + } + ] + +## Getting information on a container + +To get information on a container use its ID or instance name: + + $ docker inspect d2cc496561d6 + [{ + "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "Created": "2015-06-08T16:18:02.505155285Z", + "Path": "bash", + "Args": [], + "State": { + "Running": false, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 0, + "ExitCode": 0, + "Error": "", + "StartedAt": "2015-06-08T16:18:03.643865954Z", + "FinishedAt": "2015-06-08T16:57:06.448552862Z" + }, + "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": {}, + "SandboxKey": "/var/run/docker/netns/6b4851d1903e", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "MacAddress": "02:42:ac:12:00:02", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + + }, + "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", + "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", + "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", + "Name": "/adoring_wozniak", + "RestartCount": 0, + "Driver": "devicemapper", + "MountLabel": "", + "ProcessLabel": "", + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + "Propagation": "" + } + ], + "AppArmorProfile": "", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "CpuPeriod": 0, + "CpusetCpus": "", + "CpusetMems": "", + "CpuQuota": 0, + "BlkioWeight": 0, + "OomKillDisable": false, + "Privileged": false, + "PortBindings": {}, + "Links": null, + "PublishAllPorts": false, + "Dns": null, + "DnsSearch": null, + "DnsOptions": null, + "ExtraHosts": null, + "VolumesFrom": null, + "Devices": [], + "NetworkMode": "bridge", + "IpcMode": "", + "PidMode": "", + "UTSMode": "", + "CapAdd": null, + "CapDrop": null, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "SecurityOpt": null, + "ReadonlyRootfs": false, + "Ulimits": null, + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "CgroupParent": "" + }, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "5", + "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "DeviceSize": "171798691840" + } + }, + "Config": { + "Hostname": "d2cc496561d6", + "Domainname": "", + "User": "", + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "ExposedPorts": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": true, + "Env": null, + "Cmd": [ + "bash" + ], + "Image": "fedora", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "Cpuset": "", + "StopSignal": "SIGTERM" + } + } + ] +## Getting the IP address of a container instance + +To get the IP address of a container use: + + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' d2cc496561d6 + 172.17.0.2 + +## Listing all port bindings + +One can loop over arrays and maps in the results to produce simple text +output: + + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ + {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 + 80/tcp -> 80 + +You can get more information about how to write a Go template from: +https://golang.org/pkg/text/template/. + +## Getting size information on a container + + $ docker inspect -s d2cc496561d6 + [ + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + ] + +## Getting information on an image + +Use an image's ID or name (e.g., repository/name[:tag]) to get information +about the image: + + $ docker inspect ded7cd95e059 + [{ + "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Comment": "", + "Created": "2015-05-27T16:58:22.937503085Z", + "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", + "ContainerConfig": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" + ], + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "DockerVersion": "1.6.0", + "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", + "Config": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "Architecture": "amd64", + "Os": "linux", + "Size": 186507296, + "VirtualSize": 186507296, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "3", + "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "DeviceSize": "171798691840" + } + } + } + ] + +# HISTORY +April 2014, originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Qiang Huang +October 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-kill.1.md b/vendor/github.com/docker/docker/man/docker-kill.1.md new file mode 100644 index 0000000..36cbdb9 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-kill.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-kill - Kill a running container using SIGKILL or a specified signal + +# SYNOPSIS +**docker kill** +[**--help**] +[**-s**|**--signal**[=*"KILL"*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The main process inside each container specified will be sent SIGKILL, + or any signal specified with option --signal. + +# OPTIONS +**--help** + Print usage statement + +**-s**, **--signal**="*KILL*" + Signal to send to the container + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) + based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-load.1.md b/vendor/github.com/docker/docker/man/docker-load.1.md new file mode 100644 index 0000000..b165173 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-load.1.md @@ -0,0 +1,56 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-load - Load an image from a tar archive or STDIN + +# SYNOPSIS +**docker load** +[**--help**] +[**-i**|**--input**[=*INPUT*]] +[**-q**|**--quiet**] + +# DESCRIPTION + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. Write image names or IDs imported it +standard output stream. + +# OPTIONS +**--help** + Print usage statement + +**-i**, **--input**="" + Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz. + +**-q**, **--quiet** + Suppress the load progress bar but still outputs the imported images. + +# EXAMPLES + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ docker load --input fedora.tar + # […] + Loaded image: fedora:rawhide + # […] + Loaded image: fedora:20 + # […] + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + +# See also +**docker-save(1)** to save one or more images to a tar archive (streamed to STDOUT by default). + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2015 update by Mary Anthony +June 2016 update by Vincent Demeester diff --git a/vendor/github.com/docker/docker/man/docker-login.1.md b/vendor/github.com/docker/docker/man/docker-login.1.md new file mode 100644 index 0000000..c0d4f79 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-login.1.md @@ -0,0 +1,53 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-login - Log in to a Docker registry. + +# SYNOPSIS +**docker login** +[**--help**] +[**-p**|**--password**[=*PASSWORD*]] +[**-u**|**--username**[=*USERNAME*]] +[SERVER] + +# DESCRIPTION +Log in to a Docker Registry located on the specified +`SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you +do not specify a `SERVER`, the command uses Docker's public registry located at +`https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/engine/articles/security/#docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +# OPTIONS +**--help** + Print usage statement + +**-p**, **--password**="" + Password + +**-u**, **--username**="" + Username + +# EXAMPLES + +## Login to a registry on your localhost + + # docker login localhost:8080 + +# See also +**docker-logout(1)** to log out from a Docker registry. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-logout.1.md b/vendor/github.com/docker/docker/man/docker-logout.1.md new file mode 100644 index 0000000..a8a4b7c --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-logout.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logout - Log out from a Docker registry. + +# SYNOPSIS +**docker logout** +[SERVER] + +# DESCRIPTION +Log out of a Docker Registry located on the specified `SERVER`. You can +specify a URL or a `hostname` for the `SERVER` value. If you do not specify a +`SERVER`, the command attempts to log you out of Docker's public registry +located at `https://registry-1.docker.io/` by default. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Log out from a registry on your localhost + + # docker logout localhost:8080 + +# See also +**docker-login(1)** to log in to a Docker registry server. + +# HISTORY +June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/docker/docker/man/docker-logs.1.md b/vendor/github.com/docker/docker/man/docker-logs.1.md new file mode 100644 index 0000000..e70f796 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-logs.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logs - Fetch the logs of a container + +# SYNOPSIS +**docker logs** +[**-f**|**--follow**] +[**--help**] +[**--since**[=*SINCE*]] +[**-t**|**--timestamps**] +[**--tail**[=*"all"*]] +CONTAINER + +# DESCRIPTION +The **docker logs** command batch-retrieves whatever logs are present for +a container at the time of execution. This does not guarantee execution +order when combined with a docker run (i.e., your run may not have generated +any logs at the time you execute docker logs). + +The **docker logs --follow** command combines commands **docker logs** and +**docker attach**. It will first return all logs from the beginning and +then continue streaming new output from the container's stdout and stderr. + +**Warning**: This command works only for the **json-file** or **journald** +logging drivers. + +# OPTIONS +**--help** + Print usage statement + +**--details**=*true*|*false* + Show extra details provided to logs + +**-f**, **--follow**=*true*|*false* + Follow log output. The default is *false*. + +**--since**="" + Show logs since timestamp + +**-t**, **--timestamps**=*true*|*false* + Show timestamps. The default is *false*. + +**--tail**="*all*" + Output the specified number of lines at the end of logs (defaults to all logs) + +The `--since` option can be Unix timestamps, date formatted timestamps, or Go +duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's +time. Supported formats for date formatted time stamps include RFC3339Nano, +RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, +`2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be +used if you do not provide either a `Z` or a `+-00:00` timezone offset at the +end of the timestamp. When providing Unix timestamps enter +seconds[.nanoseconds], where seconds is the number of seconds that have elapsed +since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix +epoch or Unix time), and the optional .nanoseconds field is a fraction of a +second no more than nine digits long. You can combine the `--since` option with +either or both of the `--follow` or `--tail` options. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Ahmet Alp Balkan +October 2015, updated by Mike Brown diff --git a/vendor/github.com/docker/docker/man/docker-network-connect.1.md b/vendor/github.com/docker/docker/man/docker-network-connect.1.md new file mode 100644 index 0000000..096ec77 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-connect.1.md @@ -0,0 +1,66 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-connect - connect a container to a network + +# SYNOPSIS +**docker network connect** +[**--help**] +NETWORK CONTAINER + +# DESCRIPTION + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +```bash +$ docker network connect multi-host-network container1 +``` + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox +``` +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + + +# OPTIONS +**NETWORK** + Specify network name + +**CONTAINER** + Specify container name + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-create.1.md b/vendor/github.com/docker/docker/man/docker-network-create.1.md new file mode 100644 index 0000000..44ce8e1 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-create.1.md @@ -0,0 +1,187 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-create - create a new network + +# SYNOPSIS +**docker network create** +[**--attachable**] +[**--aux-address**=*map[]*] +[**-d**|**--driver**=*DRIVER*] +[**--gateway**=*[]*] +[**--help**] +[**--internal**] +[**--ip-range**=*[]*] +[**--ipam-driver**=*default*] +[**--ipam-opt**=*map[]*] +[**--ipv6**] +[**--label**[=*[]*]] +[**-o**|**--opt**=*map[]*] +[**--subnet**=*[]*] +NETWORK-NAME + +# DESCRIPTION + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host +network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay/). + +It is also a good idea, though not required, that you install Docker Swarm on to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Connect containers + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `mynet` network. + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +## Specifying advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing network. +It is purely for ip-addressing purposes. You can override this default and +specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create -d bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +# OPTIONS +**--attachable** + Enable manual container attachment + +**--aux-address**=map[] + Auxiliary IPv4 or IPv6 addresses used by network driver + +**-d**, **--driver**=*DRIVER* + Driver to manage the Network bridge or overlay. The default is bridge. + +**--gateway**=[] + IPv4 or IPv6 Gateway for the master subnet + +**--help** + Print usage + +**--internal** + Restrict external access to the network + +**--ip-range**=[] + Allocate container ip from a sub-range + +**--ipam-driver**=*default* + IP Address Management Driver + +**--ipam-opt**=map[] + Set custom IPAM driver options + +**--ipv6** + Enable IPv6 networking + +**--label**=*label* + Set metadata for a network + +**-o**, **--opt**=map[] + Set custom driver options + +**--subnet**=[] + Subnet in CIDR format that represents a network segment + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md b/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md new file mode 100644 index 0000000..09bcac5 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md @@ -0,0 +1,36 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-disconnect - disconnect a container from a network + +# SYNOPSIS +**docker network disconnect** +[**--help**] +[**--force**] +NETWORK CONTAINER + +# DESCRIPTION + +Disconnects a container from a network. + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +# OPTIONS +**NETWORK** + Specify network name + +**CONTAINER** + Specify container name + +**--force** + Force the container to disconnect from a network + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-inspect.1.md b/vendor/github.com/docker/docker/man/docker-network-inspect.1.md new file mode 100644 index 0000000..f27c98c --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-inspect.1.md @@ -0,0 +1,112 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-inspect - inspect a network + +# SYNOPSIS +**docker network inspect** +[**-f**|**--format**[=*FORMAT*]] +[**--help**] +NETWORK [NETWORK...] + +# DESCRIPTION + +Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +```bash +$ sudo docker network inspect bridge +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + } +] +``` + +Returns the information about the user-defined network: + +```bash +$ docker network create simple-network +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +$ docker network inspect simple-network +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {} + } +] +``` + +# OPTIONS +**-f**, **--format**="" + Format the output using the given Go template. + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-ls.1.md b/vendor/github.com/docker/docker/man/docker-network-ls.1.md new file mode 100644 index 0000000..f319e66 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-ls.1.md @@ -0,0 +1,188 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-ls - list networks + +# SYNOPSIS +**docker network ls** +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--no-trunc**[=*true*|*false*]] +[**-q**|**--quiet**[=*true*|*false*]] +[**--help**] + +# DESCRIPTION + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster, for example: + +```bash + $ docker network ls + NETWORK ID NAME DRIVER SCOPE + 7fca4eb8c647 bridge bridge local + 9f904ee27bf5 none null local + cf03ee007fb4 host host local + 78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* type (custom|builtin) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER +db9db329f835 test1 bridge +f6e212da9dfd test2 bridge +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER +63d1ff1f77b0 dev bridge +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER +95e74588f40d foo bridge + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER +db9db329f835 test1 bridge +f6e212da9dfd test2 bridge +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER +f6e212da9dfd test2 bridge +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER +06e7eef0a170 foobar bridge +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +06e7eef0a170 foobar bridge +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +63d1ff1f77b0 dev bridge +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +# OPTIONS + +**-f**, **--filter**=*[]* + filter output based on conditions provided. + +**--format**="*TEMPLATE*" + Pretty-print networks using a Go template. + Valid placeholders: + .ID - Network ID + .Name - Network name + .Driver - Network driver + .Scope - Network scope (local, global) + .IPv6 - Whether IPv6 is enabled on the network or not + .Internal - Whether the network is internal or not + .Labels - All labels assigned to the network + .Label - Value of a specific label for this network. For example `{{.Label "project.version"}}` + +**--no-trunc**=*true*|*false* + Do not truncate the output + +**-q**, **--quiet**=*true*|*false* + Only display network IDs + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-rm.1.md b/vendor/github.com/docker/docker/man/docker-network-rm.1.md new file mode 100644 index 0000000..c094a15 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-rm.1.md @@ -0,0 +1,43 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-rm - remove one or more networks + +# SYNOPSIS +**docker network rm** +[**--help**] +NETWORK [NETWORK...] + +# DESCRIPTION + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +# OPTIONS +**NETWORK** + Specify network name or id + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-pause.1.md b/vendor/github.com/docker/docker/man/docker-pause.1.md new file mode 100644 index 0000000..11eef53 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-pause.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pause - Pause all processes within one or more containers + +# SYNOPSIS +**docker pause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for +further details. + +# OPTIONS +**--help** + Print usage statement + +# See also +**docker-unpause(1)** to unpause all processes within one or more containers. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-port.1.md b/vendor/github.com/docker/docker/man/docker-port.1.md new file mode 100644 index 0000000..83e9cf9 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-port.1.md @@ -0,0 +1,47 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# SYNOPSIS +**docker port** +[**--help**] +CONTAINER [PRIVATE_PORT[/PROTO]] + +# DESCRIPTION +List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + + # docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + +## Find out all the ports mapped + + # docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + +## Find out a specific mapping + + # docker port test 7890/tcp + 0.0.0.0:4321 + + # docker port test 7890 + 0.0.0.0:4321 + +## An example showing error for non-existent mapping + + # docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-ps.1.md b/vendor/github.com/docker/docker/man/docker-ps.1.md new file mode 100644 index 0000000..d9aa39f --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-ps.1.md @@ -0,0 +1,145 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% FEBRUARY 2015 +# NAME +docker-ps - List containers + +# SYNOPSIS +**docker ps** +[**-a**|**--all**] +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--help**] +[**-l**|**--latest**] +[**-n**[=*-1*]] +[**--no-trunc**] +[**-q**|**--quiet**] +[**-s**|**--size**] + +# DESCRIPTION + +List the containers in the local repository. By default this shows only +the running containers. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**-f**, **--filter**=[] + Filter output based on these conditions: + - exited= an exit code of + - label= or label== + - status=(created|restarting|running|paused|exited|dead) + - name= a container's name + - id= a container's ID + - is-task=(true|false) - containers that are a task (part of a service managed by swarm) + - before=(|) + - since=(|) + - ancestor=([:tag]||) - containers created from an image or a descendant. + - volume=(|) + - network=(|) - containers connected to the provided network + - health=(starting|healthy|unhealthy|none) - filters containers based on healthcheck status + +**--format**="*TEMPLATE*" + Pretty-print containers using a Go template. + Valid placeholders: + .ID - Container ID + .Image - Image ID + .Command - Quoted command + .CreatedAt - Time when the container was created. + .RunningFor - Elapsed time since the container was started. + .Ports - Exposed ports. + .Status - Container status. + .Size - Container disk size. + .Names - Container names. + .Labels - All labels assigned to the container. + .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` + .Mounts - Names of the volumes mounted in this container. + +**--help** + Print usage statement + +**-l**, **--latest**=*true*|*false* + Show only the latest created container (includes all states). The default is *false*. + +**-n**=*-1* + Show n last created containers (includes all states). + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only display numeric IDs. The default is *false*. + +**-s**, **--size**=*true*|*false* + Display total file sizes. The default is *false*. + +# EXAMPLES +# Display all containers, including non-running + + # docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain + 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell + c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds + 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike + +# Display only IDs of all containers, including non-running + + # docker ps -a -q + a87ecb4f327c + 01946d9d34d8 + c1d3b0166030 + 41d50ecd2f57 + +# Display only IDs of all containers that have the name `determined_torvalds` + + # docker ps -a -q --filter=name=determined_torvalds + c1d3b0166030 + +# Display containers with their commands + + # docker ps --format "{{.ID}}: {{.Command}}" + a87ecb4f327c: /bin/sh -c #(nop) MA + 01946d9d34d8: /bin/sh -c #(nop) MA + c1d3b0166030: /bin/sh -c yum -y up + 41d50ecd2f57: /bin/sh -c #(nop) MA + +# Display containers with their labels in a table + + # docker ps --format "table {{.ID}}\t{{.Labels}}" + CONTAINER ID LABELS + a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd + 01946d9d34d8 + c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 + 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd + +# Display containers with their node label in a table + + # docker ps --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' + CONTAINER ID NODE + a87ecb4f327c ubuntu + 01946d9d34d8 + c1d3b0166030 debian + 41d50ecd2f57 fedora + +# Display containers with `remote-volume` mounted + + $ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + +# Display containers with a volume mounted in `/data` + + $ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit +February 2015, updated by André Martins +October 2016, updated by Josh Horwitz diff --git a/vendor/github.com/docker/docker/man/docker-pull.1.md b/vendor/github.com/docker/docker/man/docker-pull.1.md new file mode 100644 index 0000000..c61d005 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-pull.1.md @@ -0,0 +1,220 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pull - Pull an image or a repository from a registry + +# SYNOPSIS +**docker pull** +[**-a**|**--all-tags**] +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +This command pulls down an image or a repository from a registry. If +there is more than one image for a repository (e.g., fedora) then all +images for that repository name can be pulled down including any tags +(see the option **-a** or **--all-tags**). + +If you do not specify a `REGISTRY_HOST`, the command uses Docker's public +registry located at `registry-1.docker.io` by default. + +# OPTIONS +**-a**, **--all-tags**=*true*|*false* + Download all tagged images in the repository. The default is *false*. + +**--help** + Print usage statement + +# EXAMPLES + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + + $ docker pull debian + + Using default tag: latest + latest: Pulling from library/debian + fdd5d7827f33: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa + Status: Downloaded newer image for debian:latest + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + + $ docker pull debian:jessie + + jessie: Pulling from library/debian + fdd5d7827f33: Already exists + a3ed95caeb02: Already exists + Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e + Status: Downloaded newer image for debian:jessie + +To see which images are present locally, use the **docker-images(1)** +command: + + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + debian jessie f50f9524513f 5 days ago 125.1 MB + debian latest f50f9524513f 5 days ago 125.1 MB + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/) +in the online documentation. + + +## Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + + $ docker pull ubuntu:14.04 + + 14.04: Pulling from library/ubuntu + 5a132a7e7af1: Pull complete + fd2731e4c50c: Pull complete + 28a2f68d1120: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + Status: Downloaded newer image for ubuntu:14.04 + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + + $ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu + 5a132a7e7af1: Already exists + fd2731e4c50c: Already exists + 28a2f68d1120: Already exists + a3ed95caeb02: Already exists + Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Digest can also be used in the `FROM` of a Dockerfile, for example: + + FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + MAINTAINER some maintainer + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + +## Pulling from a different registry + +By default, `docker pull` pulls images from Docker Hub. It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + + $ docker pull myregistry.local:5000/testing/test-image + +Registry credentials are managed by **docker-login(1)**. + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries) +section in the online documentation for more information. + + +## Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + + $ docker pull --all-tags fedora + + Pulling repository fedora + ad57ef8d78d7: Download complete + 105182bb5e8b: Download complete + 511136ea3c5a: Download complete + 73bd853d2ea5: Download complete + .... + + Status: Downloaded newer image for fedora + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + + $ docker images fedora + + REPOSITORY TAG IMAGE ID CREATED SIZE + fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB + fedora 20 105182bb5e8b 5 days ago 372.7 MB + fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB + fedora latest 105182bb5e8b 5 days ago 372.7 MB + + +## Canceling a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + + $ docker pull fedora + + Using default tag: latest + latest: Pulling from library/fedora + a3ed95caeb02: Pulling fs layer + 236608c7b546: Pulling fs layer + ^C + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +April 2015, updated by John Willis +April 2015, updated by Mary Anthony for v2 +September 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-push.1.md b/vendor/github.com/docker/docker/man/docker-push.1.md new file mode 100644 index 0000000..847e66d --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-push.1.md @@ -0,0 +1,63 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-push - Push an image or a repository to a registry + +# SYNOPSIS +**docker push** +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to **docker-tag(1)** for more information about valid image and tag names. + +Killing the **docker push** process, for example by pressing **CTRL-c** while it +is running in a terminal, terminates the push operation. + +Registry credentials are managed by **docker-login(1)**. + + +# OPTIONS + +**--disable-content-trust** + Skip image verification (default true) + +**--help** + Print usage statement + +# EXAMPLES + +## Pushing a new image to a registry + +First save the new image by finding the container ID (using **docker ps**) +and then committing it to a new image name. Note that only a-z0-9-_. are +allowed when naming images: + + # docker commit c16378f943fe rhel-httpd + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + + # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + # docker push registry-host:5000/myadmin/rhel-httpd + +Check that this worked by running: + + # docker images + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-rename.1.md b/vendor/github.com/docker/docker/man/docker-rename.1.md new file mode 100644 index 0000000..eaeea5c --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-rename.1.md @@ -0,0 +1,15 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCTOBER 2014 +# NAME +docker-rename - Rename a container + +# SYNOPSIS +**docker rename** +CONTAINER NEW_NAME + +# OPTIONS +There are no available options. + +# DESCRIPTION +Rename a container. Container may be running, paused or stopped. diff --git a/vendor/github.com/docker/docker/man/docker-restart.1.md b/vendor/github.com/docker/docker/man/docker-restart.1.md new file mode 100644 index 0000000..271c4ee --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-restart.1.md @@ -0,0 +1,26 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-restart - Restart one or more containers + +# SYNOPSIS +**docker restart** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Restart each container listed. + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=*10* + Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-rm.1.md b/vendor/github.com/docker/docker/man/docker-rm.1.md new file mode 100644 index 0000000..2105288 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-rm.1.md @@ -0,0 +1,72 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rm - Remove one or more containers + +# SYNOPSIS +**docker rm** +[**-f**|**--force**] +[**-l**|**--link**] +[**-v**|**--volumes**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +**docker rm** will remove one or more containers from the host node. The +container name or ID can be used. This does not remove images. You cannot +remove a running container unless you use the **-f** option. To see all +containers on a host use the **docker ps -a** command. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--force**=*true*|*false* + Force the removal of a running container (uses SIGKILL). The default is *false*. + +**-l**, **--link**=*true*|*false* + Remove the specified link and not the underlying container. The default is *false*. + +**-v**, **--volumes**=*true*|*false* + Remove the volumes associated with the container. The default is *false*. + +# EXAMPLES + +## Removing a container using its ID + +To remove a container using its ID, find either from a **docker ps -a** +command, or use the ID returned from the **docker run** command, or retrieve +it from a file used to store it using the **docker run --cidfile**: + + docker rm abebf7571666 + +## Removing a container using the container name + +The name of the container can be found using the **docker ps -a** +command. The use that name as follows: + + docker rm hopeful_morse + +## Removing a container and all associated volumes + + $ docker rm -v redis + redis + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + $ docker rm -v hello + +In this example, the volume for `/foo` will remain in tact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-rmi.1.md b/vendor/github.com/docker/docker/man/docker-rmi.1.md new file mode 100644 index 0000000..35bf8aa --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-rmi.1.md @@ -0,0 +1,42 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rmi - Remove one or more images + +# SYNOPSIS +**docker rmi** +[**-f**|**--force**] +[**--help**] +[**--no-prune**] +IMAGE [IMAGE...] + +# DESCRIPTION + +Removes one or more images from the host node. This does not remove images from +a registry. You cannot remove an image of a running container unless you use the +**-f** option. To see all images on a host use the **docker images** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force removal of the image. The default is *false*. + +**--help** + Print usage statement + +**--no-prune**=*true*|*false* + Do not delete untagged parents. The default is *false*. + +# EXAMPLES + +## Removing an image + +Here is an example of removing an image: + + docker rmi fedora/httpd + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/docker/docker/man/docker-run.1.md b/vendor/github.com/docker/docker/man/docker-run.1.md new file mode 100644 index 0000000..8c1018a --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-run.1.md @@ -0,0 +1,1055 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-run - Run a command in a new container + +# SYNOPSIS +**docker run** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--blkio-weight-device**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-count**[=*0*]] +[**--cpu-percent**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpus**[=*0.0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**-d**|**--detach**] +[**--detach-keys**[=*[]*]] +[**--device**[=*[]*]] +[**--device-read-bps**[=*[]*]] +[**--device-read-iops**[=*[]*]] +[**--device-write-bps**[=*[]*]] +[**--device-write-iops**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-option**[=*[]*]] +[**--dns-search**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**--init**] +[**--init-path**[=*[]*]] +[**-i**|**--interactive**] +[**--ip**[=*IPv4-ADDRESS*]] +[**--ip6**[=*IPv6-ADDRESS*]] +[**--ipc**[=*IPC*]] +[**--isolation**[=*default*]] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--link-local-ip**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*LIMIT*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--network-alias**[=*[]*]] +[**--network**[=*"bridge"*]] +[**--oom-kill-disable**] +[**--oom-score-adj**[=*0*]] +[**-P**|**--publish-all**] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[PID]*]] +[**--userns**[=*[]*]] +[**--pids-limit**[=*PIDS_LIMIT*]] +[**--privileged**] +[**--read-only**] +[**--restart**[=*RESTART*]] +[**--rm**] +[**--security-opt**[=*[]*]] +[**--storage-opt**[=*[]*]] +[**--stop-signal**[=*SIGNAL*]] +[**--stop-timeout**[=*TIMEOUT*]] +[**--shm-size**[=*[]*]] +[**--sig-proxy**[=*true*]] +[**--sysctl**[=*[]*]] +[**-t**|**--tty**] +[**--tmpfs**[=*[CONTAINER-DIR[:]*]] +[**-u**|**--user**[=*USER*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] +[**--volume-driver**[=*DRIVER*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Run a process in a new container. **docker run** starts a process with its own +file system, its own networking, and its own isolated process tree. The IMAGE +which starts the process may define defaults related to the process that will be +run in the container, the networking to expose, and more, but **docker run** +gives final control to the operator or administrator who starts the container +from the image. For that reason **docker run** has more options than any other +Docker command. + +If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and +all image dependencies, from the repository in the same way running **docker +pull** IMAGE, before it starts the container from that image. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + + In foreground mode (the default when **-d** +is not specified), **docker run** can start the process in the container +and attach the console to the process's standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. The **-a** option can be set for +each of stdin, stdout, and stderr. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** +option can be set multiple times. + +**--blkio-weight**=*0* + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--blkio-weight-device**=[] + Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). + +**--cpu-shares**=*0* + CPU shares (relative weight) + + By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the **--cpu-shares** +flag to set the weighting to 2 or higher. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container **{C0}** with **-c=512** running one process, and another container +**{C1}** with **-c=1024** running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cidfile**="" + Write the container ID to the file + +**--cpu-count**=*0* + Limit the number of CPUs available for execution by the container. + + On Windows Server containers, this is approximated as a percentage of total CPU usage. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-percent**=*0* + Limit the percentage of CPU available for execution by a container running on a Windows daemon. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota + + Limit the container's CPU usage. By default, containers run with the full +CPU resource. This flag tell the kernel to restrict the container's CPU usage +to the quota you specify. + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpus**=0.0 + Number of CPUs. The default is *0.0* which means no limit. + +**-d**, **--detach**=*true*|*false* + Detached mode: run the container in the background and print the new container ID. The default is *false*. + + At any time you can run **docker ps** in +the other shell to view a list of the running containers. You can reattach to a +detached container with **docker attach**. If you choose to run a container in +the detached mode, then you cannot use the **-rm** option. + + When attached in the tty mode, you can detach from the container (and leave it +running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. +You configure the key sequence using the **--detach-keys** option or a configuration file. +See **config-json(5)** for documentation on using a configuration file. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--device-read-bps**=[] + Limit read rate from a device (e.g. --device-read-bps=/dev/sda:1mb) + +**--device-read-iops**=[] + Limit read rate from a device (e.g. --device-read-iops=/dev/sda:1000) + +**--device-write-bps**=[] + Limit write rate to a device (e.g. --device-write-bps=/dev/sda:1mb) + +**--device-write-iops**=[] + Limit write rate to a device (e.g. --device-write-iops=/dev/sda:1000) + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**--dns-option**=[] + Set custom DNS options + +**--dns**=[] + Set custom DNS servers + + This option can be used to override the DNS +configuration passed to the container. Typically this is necessary when the +host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this +is the case the **--dns** flags is necessary for every run. + +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary +environment variables that are available for the process that will be launched +inside of the container. + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + + This option allows you to overwrite the default entrypoint of the image that +is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND +because it specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a container its +default nature or behavior, so that when you set an ENTRYPOINT you can run the +container as if it were that binary, complete with default options, and you can +pass in more options via the COMMAND. But, sometimes an operator may want to run +something else inside the container, so you can override the default ENTRYPOINT +at runtime by using a **--entrypoint** and a string to specify the new +ENTRYPOINT. + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=[] + Expose a port, or a range of ports (e.g. --expose=3300-3310) informs Docker +that the container listens on the specified network ports at runtime. Docker +uses this information to interconnect containers using links and to set up port +redirection on the host system. + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + + Sets the container host name that is available inside the container. + +**--help** + Print usage statement + +**--init** + Run an init inside the container that forwards signals and reaps processes + +**--init-path**="" + Path to the docker-init binary + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + + When set to true, keep stdin open even if not attached. The default is false. + +**--ip**="" + Sets the container's interface IPv4 address (e.g. 172.23.0.9) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ip6**="" + Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. + +**-l**, **--label**=[] + Set metadata on the container (e.g., --label com.example.key=value) + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Constrains the kernel memory available to a container. If a limit of 0 +is specified (not using `--kernel-memory`), the container's kernel memory +is not limited. If you specify a limit, it may be rounded up to a multiple +of the operating system's page size and the value can be very large, +millions of trillions. + +**--label-file**=[] + Read in a line delimited file of labels + +**--link**=[] + Add link to another container in the form of :alias or just +in which case the alias will match the name + + If the operator +uses **--link** when starting the new client container, then the client +container can access the exposed port via a private networking interface. Docker +will set some environment variables in the client container to help indicate +which interface and port to use. + +**--link-local-ip**=[] + Add one or more link-local IPv4/IPv6 addresses to the container's interface + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Logging driver for the container. Default is defined by daemon `--log-driver` flag. + **Warning**: the `docker logs` command works only for the `json-file` and + `journald` logging drivers. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: [], where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + + After setting memory reservation, when the system detects memory contention +or low memory, containers are forced to restrict their consumption to their +reservation. So you should always set the value below **--memory**, otherwise the +hard limit will take precedence. By default, memory reservation will be the same +as memory limit. + +**--memory-swap**="LIMIT" + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. By default, the swap `LIMIT` will be set to double +the value of --memory. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + + Remember that the MAC address in an Ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + +**--name**="" + Assign a name to the container + + The operator can identify a container in three ways: + UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) + UUID short identifier (“f78375b1c487”) + Name (“jonah”) + + The UUID identifiers come from the Docker daemon, and if a name is not assigned +to the container with **--name** then the daemon will also generate a random +string name. The name is useful when defining links (see **--link**) (or any +other place you need to identify a container). This works for both background +and foreground Docker containers. + +**--network**="*bridge*" + Set the Network mode for the container + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + '|': connect to a user-defined network + +**--network-alias**=[] + Add network-scoped alias for the container + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**--oom-score-adj**="" + Tune the host's OOM preferences for containers (accepts -1000 to 1000) + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + + When set to true publish all exposed ports to the host interfaces. The +default is false. If the operator uses -P (or -p) then Docker will make the +exposed port accessible on the host and the ports will be available to any +client that can reach the host. When using -P, Docker will bind any exposed +port to a random port on the host within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host +ports and the exposed ports, use `docker port`. + +**-p**, **--publish**=[] + Publish a container's port, or range of ports, to the host. + + Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort` +Both hostPort and containerPort can be specified as a range of ports. +When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. +(e.g., `docker run -p 1234-1236:1222-1224 --name thisWorks -t busybox` +but not `docker run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`) +With ip: `docker run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage` +Use `docker port` to see the actual mapping: `docker port CONTAINER $CONTAINERPORT` + +**--pid**="" + Set the PID mode for the container + Default is to create a private PID namespace for the container + 'container:': join another container's PID namespace + 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--userns**="" + Set the usernamespace mode for the container when `userns-remap` option is enabled. + **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). + +**--pids-limit**="" + Tune the container's pids limit. Set `-1` to have unlimited pids for the container. + +**--uts**=*host* + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + + By default, Docker containers are +“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the +Docker container. This is because by default a container is not allowed to +access any devices. A “privileged” container is given access to all devices. + + When the operator executes **docker run --privileged**, Docker will enable access +to all devices on the host as well as set some configuration in AppArmor to +allow the container nearly all the same access to the host as processes running +outside of a container on the host. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + + By default a container will have its root filesystem writable allowing processes +to write files anywhere. By specifying the `--read-only` flag the container will have +its root filesystem mounted as read only prohibiting any writes. + +**--restart**="*no*" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +**--rm**=*true*|*false* + Automatically remove the container when it exits. The default is *false*. + `--rm` flag can work together with `-d`, and auto-removal will be done on daemon side. Note that it's +incompatible with any restart policy other than `none`. + +**--security-opt**=[] + Security Options + + "label=user:USER" : Set the label user for the container + "label=role:ROLE" : Set the label role for the container + "label=type:TYPE" : Set the label type for the container + "label=level:LEVEL" : Set the label level for the container + "label=disable" : Turn off label confinement for the container + "no-new-privileges" : Disable container processes from gaining additional privileges + + "seccomp=unconfined" : Turn off seccomp confinement for the container + "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter + + "apparmor=unconfined" : Turn off apparmor confinement for the container + "apparmor=your-profile" : Set the apparmor confinement profile for the container + +**--storage-opt**=[] + Storage driver options per container + + $ docker run -it --storage-opt size=120G fedora /bin/bash + + This (size) will allow to set the container rootfs size to 120G at creation time. + This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. + For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. + For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. + Under these conditions, user can pass any size less then the backing fs size. + +**--stop-signal**=*SIGTERM* + Signal to stop a container. Default is SIGTERM. + +**--stop-timeout**=*10* + Timeout (in seconds) to stop a container. Default is 10. + +**--shm-size**="" + Size of `/dev/shm`. The format is ``. + `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). + If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. + +**--sysctl**=SYSCTL + Configure namespaced kernel parameters at runtime + + IPC Namespace - current sysctls allowed: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + If you use the `--ipc=host` option these sysctls will not be allowed. + + Network Namespace - current sysctls allowed: + Sysctls beginning with net.* + + If you use the `--network=host` option these sysctls will not be allowed. + +**--sig-proxy**=*true*|*false* + Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of any container. This can be used, for example, to run a throwaway +interactive shell. The default is false. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +**--tmpfs**=[] Create a tmpfs mount + + Mount a temporary filesystem (`tmpfs`) mount into a container, for example: + + $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image + + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount +options are the same as the Linux default `mount` flags. If you do not specify +any options, the systems uses the following options: +`rw,noexec,nosuid,nodev,size=65536k`. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +**--ulimit**=[] + Ulimit options + +**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] + Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker + bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker + container. If 'HOST-DIR' is omitted, Docker automatically creates the new + volume on the host. The `OPTIONS` are a comma delimited list and can be: + + * [rw|ro] + * [z|Z] + * [`[r]shared`|`[r]slave`|`[r]private`] + * [nocopy] + +The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` +can be an absolute path or a `name` value. A `name` value must start with an +alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or +`-` (hyphen). An absolute path starts with a `/` (forward slash). + +If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the +path you specify. If you supply a `name`, Docker creates a named volume by that +`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` +value. If you supply the `/foo` value, Docker creates a bind-mount. If you +supply the `foo` specification, Docker creates a named volume. + +You can specify multiple **-v** options to mount one or more mounts to a +container. To use these same mounts in other containers, specify the +**--volumes-from** option also. + +You can add `:ro` or `:rw` suffix to a volume to mount it read-only or +read-write mode, respectively. By default, the volumes are mounted read-write. +See examples. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change a label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +By default bind mounted volumes are `private`. That means any mounts done +inside container will not be visible on host and vice-a-versa. One can change +this behavior by specifying a volume mount propagation property. Making a +volume `shared` mounts done under that volume inside container will be +visible on host and vice-a-versa. Making a volume `slave` enables only one +way mount propagation and that is mounts done on host under that volume +will be visible inside container but not the other way around. + +To control mount propagation property of volume one can use `:[r]shared`, +`:[r]slave` or `:[r]private` propagation flag. Propagation property can +be specified only for bind mounted volumes and not for internal volumes or +named volumes. For mount propagation to work source mount point (mount point +where source dir is mounted on) has to have right propagation properties. For +shared volumes, source mount point has to be shared. And for slave volumes, +source mount has to be either shared or slave. + +Use `df ` to figure out the source mount and then use +`findmnt -o TARGET,PROPAGATION ` to figure out propagation +properties of source mount. If `findmnt` utility is not available, then one +can look at mount entry for source mount point in `/proc/self/mountinfo`. Look +at `optional fields` and see if any propagaion properties are specified. +`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if +nothing is there that means mount is `private`. + +To change propagation properties of a mount point use `mount` command. For +example, if one wants to bind mount source directory `/foo` one can do +`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This +will convert /foo into a `shared` mount point. Alternatively one can directly +change propagation properties of source mount. Say `/` is source mount for +`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + +To disable automatic copying of data from the container path to the volume, use +the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. + +**--volume-driver**="" + Container's volume driver. This driver creates volumes specified either from + a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. + See **docker-volume-create(1)** for full details. + +**--volumes-from**=[] + Mount volumes from the specified container(s) + + Mounts already mounted volumes from a source container onto another + container. You must supply the source's container-id. To share + a volume, use the **--volumes-from** option when running + the target container. You can share volumes even if the source container + is not running. + + By default, Docker mounts the volumes in the same mode (read-write or + read-only) as it is mounted in the source container. Optionally, you + can change this by suffixing the container-id with either the `:ro` or + `:rw ` keyword. + + If the location of the volume from the source container overlaps with + data residing on a target container, then the volume hides + that data on the target. + +**-w**, **--workdir**="" + Working directory inside the container + + The default working directory for +running binaries within a container is the root directory (/). The developer can +set a different default with the Dockerfile WORKDIR instruction. The operator +can override the working directory by using the **-w** option. + +# Exit Status + +The exit code from `docker run` gives information about why the container +failed to run or why it exited. When `docker run` exits with a non-zero code, +the exit codes follow the `chroot` standard, see below: + +**_125_** if the error is with Docker daemon **_itself_** + + $ docker run --foo busybox; echo $? + # flag provided but not defined: --foo + See 'docker run --help'. + 125 + +**_126_** if the **_contained command_** cannot be invoked + + $ docker run busybox /etc; echo $? + # exec: "/etc": permission denied + docker: Error response from daemon: Contained command could not be invoked + 126 + +**_127_** if the **_contained command_** cannot be found + + $ docker run busybox foo; echo $? + # exec: "foo": executable file not found in $PATH + docker: Error response from daemon: Contained command not found or does not exist + 127 + +**_Exit code_** of **_contained command_** otherwise + + $ docker run busybox /bin/sh -c 'exit 3' + # 3 + +# EXAMPLES + +## Running container in read-only mode + +During container image development, containers often need to write to the image +content. Installing packages into /usr, for example. In production, +applications seldom need to write to the image. Container applications write +to volumes if they need to write to file systems at all. Applications can be +made more secure by running them in read-only mode using the --read-only switch. +This protects the containers image from modification. Read only containers may +still need to write temporary data. The best way to handle this is to mount +tmpfs directories on /run and /tmp. + + # docker run --read-only --tmpfs /run --tmpfs /tmp -i -t fedora /bin/bash + +## Exposing log messages from the container to the host's log + +If you want messages that are logged in your container to show up in the host's +syslog/journal then you should bind mount the /dev/log directory as follows. + + # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash + +From inside the container you can test this by sending a message to the log. + + (bash)# logger "Hello from my container" + +Then exit and check the journal. + + # exit + + # journalctl -b | grep Hello + +This should list the message sent to logger. + +## Attaching to one or more from STDIN, STDOUT, STDERR + +If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) +. You can specify to which of the three standard streams (stdin, stdout, stderr) +you'd like to connect instead, as in: + + # docker run -a stdin -a stdout -i -t fedora /bin/bash + +## Sharing IPC between containers + +Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html + +Testing `--ipc=host` mode: + +Host shows a shared memory segment with 7 pids attached, happens to be from httpd: + +``` + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` + +Now run a regular container, and it correctly does NOT see the shared memory segment from the host: + +``` + $ docker run -it shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: + + ``` + $ docker run -it --ipc=host shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` +Testing `--ipc=container:CONTAINERID` mode: + +Start a container with a program to create a shared memory segment: +``` + $ docker run -it shm bash + $ sudo shm/shm_server & + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` +Create a 2nd container correctly shows no shared memory segment from 1st container: +``` + $ docker run shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: + +``` + $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` + +## Linking Containers + +> **Note**: This section describes linking between containers on the +> default (bridge) network, also known as "legacy links". Using `--link` +> on user-defined networks uses the DNS-based discovery, which does not add +> entries to `/etc/hosts`, and does not set environment variables for +> discovery. + +The link feature allows multiple containers to communicate with each other. For +example, a container whose Dockerfile has exposed port 80 can be run and named +as follows: + + # docker run --name=link-test -d -i -t fedora/httpd + +A second container, in this case called linker, can communicate with the httpd +container, named link-test, by running with the **--link=:** + + # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash + +Now the container linker is linked to container link-test with the alias lt. +Running the **env** command in the linker container shows environment variables + with the LT (alias) context (**LT_**) + + # env + HOSTNAME=668231cb0978 + TERM=xterm + LT_PORT_80_TCP=tcp://172.17.0.3:80 + LT_PORT_80_TCP_PORT=80 + LT_PORT_80_TCP_PROTO=tcp + LT_PORT=tcp://172.17.0.3:80 + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + LT_NAME=/linker/lt + SHLVL=1 + HOME=/ + LT_PORT_80_TCP_ADDR=172.17.0.3 + _=/usr/bin/env + +When linking two containers Docker will use the exposed ports of the container +to create a secure tunnel for the parent to access. + +If a container is connected to the default bridge network and `linked` +with other containers, then the container's `/etc/hosts` file is updated +with the linked container's name. + +> **Note** Since Docker may live update the container's `/etc/hosts` file, there +may be situations when processes inside the container can end up reading an +empty or incomplete `/etc/hosts` file. In most cases, retrying the read again +should fix the problem. + + +## Mapping Ports for External Usage + +The exposed port of an application can be mapped to a host port using the **-p** +flag. For example, an httpd port 80 can be mapped to the host port 8080 using the +following: + + # docker run -p 8080:80 -d -i -t fedora/httpd + +## Creating and Mounting a Data Volume Container + +Many applications require the sharing of persistent data across several +containers. Docker allows you to create a Data Volume Container that other +containers can mount from. For example, create a named container that contains +directories /var/volume1 and /tmp/volume2. The image will need to contain these +directories so a couple of RUN mkdir instructions might be required for you +fedora-data image: + + # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true + # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash + +Multiple --volumes-from parameters will bring together multiple data volumes from +multiple containers. And it's possible to mount the volumes that came from the +DATA container in yet another container via the fedora-container1 intermediary +container, allowing to abstract the actual data source from users of that data: + + # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash + +## Mounting External Volumes + +To mount a host directory as a container volume, specify the absolute path to +the directory and the absolute path for the container directory separated by a +colon: + + # docker run -v /var/db:/data1 -i -t fedora bash + +When using SELinux, be aware that the host has no knowledge of container SELinux +policy. Therefore, in the above example, if SELinux policy is enforced, the +`/var/db` directory is not writable to the container. A "Permission Denied" +message will occur and an avc: message in the host's syslog. + + +To work around this, at time of writing this man page, the following command +needs to be run in order for the proper SELinux policy type label to be attached +to the host directory: + + # chcon -Rt svirt_sandbox_file_t /var/db + + +Now, writing to the /data1 volume in the container will be allowed and the +changes will also be reflected on the host in /var/db. + +## Using alternative security labeling + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. For example, you can specify the MCS/MLS level, a +requirement for MLS systems. Specifying the level in the following command +allows you to share the same content between containers. + + # docker run --security-opt label=level:s0:c100,c200 -i -t fedora bash + +An MLS example might be: + + # docker run --security-opt label=level:TopSecret -i -t rhel7 bash + +To disable the security labeling for this container versus running with the +`--permissive` flag, use the following command: + + # docker run --security-opt label=disable -i -t fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + # docker run --security-opt label=type:svirt_apache_t -i -t centos bash + +Note: + +You would have to write policy defining a `svirt_apache_t` type. + +## Setting device weight + +If you want to set `/dev/sda` device weight to `200`, you can specify the device +weight by `--blkio-weight-device` flag. Use the following command: + + # docker run -it --blkio-weight-device "/dev/sda:200" ubuntu + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Microsoft Windows. The `--isolation ` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +``` +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Microsoft Windows, can take any of these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation process busybox top +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation hyperv busybox top +``` + +## Setting Namespaced Kernel Parameters (Sysctls) + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + + $ docker run --sysctl net.ipv4.ip_forward=1 someimage + +Note: + +Not all sysctls are namespaced. Docker does not support changing sysctls +inside of a container that also modify the host system. As the kernel +evolves we expect to see more sysctls become namespaced. + +See the definition of the `--sysctl` option above for the current list of +supported sysctls. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-save.1.md b/vendor/github.com/docker/docker/man/docker-save.1.md new file mode 100644 index 0000000..1d1de8a --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-save.1.md @@ -0,0 +1,45 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-save - Save one or more images to a tar archive (streamed to STDOUT by default) + +# SYNOPSIS +**docker save** +[**--help**] +[**-o**|**--output**[=*OUTPUT*]] +IMAGE [IMAGE...] + +# DESCRIPTION +Produces a tarred repository to the standard output stream. Contains all +parent layers, and all tags + versions, or specified repo:tag. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement + +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES + +Save all fedora repository images to a fedora-all.tar and save the latest +fedora image to a fedora-latest.tar: + + $ docker save fedora > fedora-all.tar + $ docker save --output=fedora-latest.tar fedora:latest + $ ls -sh fedora-all.tar + 721M fedora-all.tar + $ ls -sh fedora-latest.tar + 367M fedora-latest.tar + +# See also +**docker-load(1)** to load an image from a tar archive on STDIN. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-search.1.md b/vendor/github.com/docker/docker/man/docker-search.1.md new file mode 100644 index 0000000..ad8bbc7 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-search.1.md @@ -0,0 +1,70 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-search - Search the Docker Hub for images + +# SYNOPSIS +**docker search** +[**-f**|**--filter**[=*[]*]] +[**--help**] +[**--limit**[=*LIMIT*]] +[**--no-trunc**] +TERM + +# DESCRIPTION + +Search Docker Hub for images that match the specified `TERM`. The table +of images returned displays the name, description (truncated by default), number +of stars awarded, whether the image is official, and whether it is automated. + +*Note* - Search queries will only return up to 25 results + +# OPTIONS + +**-f**, **--filter**=[] + Filter output based on these conditions: + - stars= + - is-automated=(true|false) + - is-official=(true|false) + +**--help** + Print usage statement + +**--limit**=*LIMIT* + Maximum returned search results. The default is 25. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +# EXAMPLES + +## Search Docker Hub for ranked images + +Search a registry for the term 'fedora' and only display those images +ranked 3 or higher: + + $ docker search --filter=stars=3 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + mattdm/fedora A basic Fedora image corresponding roughly... 50 + fedora (Semi) Official Fedora base image. 38 + mattdm/fedora-small A small Fedora image on which to build. Co... 8 + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + +## Search Docker Hub for automated images + +Search Docker Hub for the term 'fedora' and only display automated images +ranked 1 or higher: + + $ docker search --filter=is-automated=true --filter=stars=1 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +April 2016, updated by Vincent Demeester + diff --git a/vendor/github.com/docker/docker/man/docker-start.1.md b/vendor/github.com/docker/docker/man/docker-start.1.md new file mode 100644 index 0000000..c00b0a1 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-start.1.md @@ -0,0 +1,39 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-start - Start one or more containers + +# SYNOPSIS +**docker start** +[**-a**|**--attach**] +[**--detach-keys**[=*[]*]] +[**--help**] +[**-i**|**--interactive**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Start one or more containers. + +# OPTIONS +**-a**, **--attach**=*true*|*false* + Attach container's STDOUT and STDERR and forward all signals to the + process. The default is *false*. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Attach container's STDIN. The default is *false*. + +# See also +**docker-stop(1)** to stop a container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-stats.1.md b/vendor/github.com/docker/docker/man/docker-stats.1.md new file mode 100644 index 0000000..0f022cd --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-stats.1.md @@ -0,0 +1,57 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stats - Display a live stream of one or more containers' resource usage statistics + +# SYNOPSIS +**docker stats** +[**-a**|**--all**] +[**--help**] +[**--no-stream**] +[**--format[="*TEMPLATE*"]**] +[CONTAINER...] + +# DESCRIPTION + +Display a live stream of one or more containers' resource usage statistics + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**--help** + Print usage statement + +**--no-stream**=*true*|*false* + Disable streaming stats and only pull the first result, default setting is false. + +**--format**="*TEMPLATE*" + Pretty-print containers statistics using a Go template. + Valid placeholders: + .Container - Container name or ID. + .Name - Container name. + .ID - Container ID. + .CPUPerc - CPU percentage. + .MemUsage - Memory usage. + .NetIO - Network IO. + .BlockIO - Block IO. + .MemPerc - Memory percentage (Not available on Windows). + .PIDs - Number of PIDs (Not available on Windows). + +# EXAMPLES + +Running `docker stats` on all running containers + + $ docker stats + CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O + 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB + 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B + d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B + +Running `docker stats` on multiple containers by name and id. + + $ docker stats fervent_panini 5acfcb1b4fd1 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B + fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B diff --git a/vendor/github.com/docker/docker/man/docker-stop.1.md b/vendor/github.com/docker/docker/man/docker-stop.1.md new file mode 100644 index 0000000..fa377c9 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-stop.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stop - Stop a container by sending SIGTERM and then SIGKILL after a grace period + +# SYNOPSIS +**docker stop** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Stop a container (Send SIGTERM, and then SIGKILL after + grace period) + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=*10* + Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. + +#See also +**docker-start(1)** to restart a stopped container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-tag.1.md b/vendor/github.com/docker/docker/man/docker-tag.1.md new file mode 100644 index 0000000..7f27e1b --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-tag.1.md @@ -0,0 +1,76 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-tag - Create a tag `TARGET_IMAGE` that refers to `SOURCE_IMAGE` + +# SYNOPSIS +**docker tag** +[**--help**] +SOURCE_NAME[:TAG] TARGET_NAME[:TAG] + +# DESCRIPTION +Assigns a new alias to an image in a registry. An alias refers to the +entire image name including the optional `TAG` after the ':'. + +# "OPTIONS" +**--help** + Print usage statement. + +**NAME** + The image name which is made up of slash-separated name components, + optionally prefixed by a registry hostname. The hostname must comply with + standard DNS rules, but may not contain underscores. If a hostname is + present, it may optionally be followed by a port number in the format + `:8080`. If not present, the command uses Docker's public registry located at + `registry-1.docker.io` by default. Name components may contain lowercase + characters, digits and separators. A separator is defined as a period, one or + two underscores, or one or more dashes. A name component may not start or end + with a separator. + +**TAG** + The tag assigned to the image to version and distinguish images with the same + name. The tag name may contain lowercase and uppercase characters, digits, + underscores, periods and dashes. A tag name may not start with a period or a + dash and may contain a maximum of 128 characters. + +# EXAMPLES + +## Tagging an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + + docker tag httpd fedora/httpd:version1.0 + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +## Tagging an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + + docker tag httpd:test fedora/httpd:version1.0.test + +## Tagging an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-top.1.md b/vendor/github.com/docker/docker/man/docker-top.1.md new file mode 100644 index 0000000..a666f7c --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-top.1.md @@ -0,0 +1,36 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-top - Display the running processes of a container + +# SYNOPSIS +**docker top** +[**--help**] +CONTAINER [ps OPTIONS] + +# DESCRIPTION + +Display the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. + +All displayed information is from host's point of view. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +Run **docker top** with the ps option of -x: + + $ docker top 8601afda2b -x + PID TTY STAT TIME COMMAND + 16623 ? Ss 0:00 sleep 99999 + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Ma Shimiao +December 2015, updated by Pavel Pospisil diff --git a/vendor/github.com/docker/docker/man/docker-unpause.1.md b/vendor/github.com/docker/docker/man/docker-unpause.1.md new file mode 100644 index 0000000..e6fd3c4 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-unpause.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-unpause - Unpause all processes within one or more containers + +# SYNOPSIS +**docker unpause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for +further details. + +# OPTIONS +**--help** + Print usage statement + +# See also +**docker-pause(1)** to pause all processes within one or more containers. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-update.1.md b/vendor/github.com/docker/docker/man/docker-update.1.md new file mode 100644 index 0000000..85f3dd0 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-update.1.md @@ -0,0 +1,171 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-update - Update configuration of one or more containers + +# SYNOPSIS +**docker update** +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--cpu-shares**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--help**] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*MEMORY-SWAP*]] +[**--restart**[=*""*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The **docker update** command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the **--kernel-memory** option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, You can only update **--kernel-memory** on a stopped container or on +a running container with kernel memory initialized. + +# OPTIONS + +**--blkio-weight**=0 + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--cpu-shares**=0 + CPU shares (relative weight) + +**--cpu-period**=0 + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpu-quota**=0 + Limit the CPU CFS (Completely Fair Scheduler) quota + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes(MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + +**--help** + Print usage statement + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Note that on kernel version older than 4.6, you can not update kernel memory on + a running container if the container is started without kernel memory initialized, + in this case, it can only be updated after it's stopped. The new setting takes + effect when the container is started. + +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + + Note that the memory should be smaller than the already set swap memory limit. + If you want update a memory limit bigger than the already set swap memory limit, + you should update swap memory limit at the same time. If you don't set swap memory + limit on docker create/run but only memory limit, the swap memory is double + the memory limit. + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + +**--memory-swap**="" + Total memory limit (memory + swap) + +**--restart**="" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +# EXAMPLES + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use **docker ps** to find these values. You can also +use the ID returned from the **docker run** command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the **--kernel-memory** +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with **--kernel-memory**. +If the container was started *without* **--kernel-memory** you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the **--kernel-memory** setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/vendor/github.com/docker/docker/man/docker-version.1.md b/vendor/github.com/docker/docker/man/docker-version.1.md new file mode 100644 index 0000000..1838f82 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-version.1.md @@ -0,0 +1,62 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2015 +# NAME +docker-version - Show the Docker version information. + +# SYNOPSIS +**docker version** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] + +# DESCRIPTION +This command displays version information for both the Docker client and +daemon. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template. + +# EXAMPLES + +## Display Docker version information + +The default output: + + $ docker version + Client: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + + Server: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + +Get server version: + + $ docker version --format '{{.Server.Version}}' + 1.8.0 + +Dump raw data: + +To view all available fields, you can use the format `{{json .}}`. + + $ docker version --format '{{json .}}' + {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} + + +# HISTORY +June 2014, updated by Sven Dowideit +June 2015, updated by John Howard +June 2015, updated by Patrick Hemmer diff --git a/vendor/github.com/docker/docker/man/docker-wait.1.md b/vendor/github.com/docker/docker/man/docker-wait.1.md new file mode 100644 index 0000000..6788009 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-wait.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-wait - Block until one or more containers stop, then print their exit codes + +# SYNOPSIS +**docker wait** +[**--help**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Block until one or more containers stop, then print their exit codes. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + + $ docker run -d fedora sleep 99 + 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 + $ docker wait 079b83f558a2bc + 0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker.1.md b/vendor/github.com/docker/docker/man/docker.1.md new file mode 100644 index 0000000..2a96184 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker.1.md @@ -0,0 +1,237 @@ +% DOCKER(1) Docker User Manuals +% William Henry +% APRIL 2014 +# NAME +docker \- Docker image and container command line interface + +# SYNOPSIS +**docker** [OPTIONS] COMMAND [ARG...] + +**docker** daemon [--help|...] + +**docker** [--help|-v|--version] + +# DESCRIPTION +is a client for interacting with the daemon (see **dockerd(8)**) through the CLI. + +The Docker CLI has over 30 commands. The commands are listed below and each has +its own man page which explain usage and arguments. + +To see the man page for a command run **man docker **. + +# OPTIONS +**--help** + Print usage statement + +**--config**="" + Specifies the location of the Docker client configuration files. The default is '~/.docker'. + +**-D**, **--debug**=*true*|*false* + Enable debug mode. Default is false. + +**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. + If the tcp port is not specified, then it will default to either `2375` when + `--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified. + +**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" + Set the logging level. Default is `info`. + +**--tls**=*true*|*false* + Use TLS; implied by --tlsverify. Default is false. + +**--tlscacert**=*~/.docker/ca.pem* + Trust certs signed only by this CA. + +**--tlscert**=*~/.docker/cert.pem* + Path to TLS certificate file. + +**--tlskey**=*~/.docker/key.pem* + Path to TLS key file. + +**--tlsverify**=*true*|*false* + Use TLS and verify the remote (daemon: verify client, client: verify daemon). + Default is false. + +**-v**, **--version**=*true*|*false* + Print version information and quit. Default is false. + +# COMMANDS +**attach** + Attach to a running container + See **docker-attach(1)** for full documentation on the **attach** command. + +**build** + Build an image from a Dockerfile + See **docker-build(1)** for full documentation on the **build** command. + +**commit** + Create a new image from a container's changes + See **docker-commit(1)** for full documentation on the **commit** command. + +**cp** + Copy files/folders between a container and the local filesystem + See **docker-cp(1)** for full documentation on the **cp** command. + +**create** + Create a new container + See **docker-create(1)** for full documentation on the **create** command. + +**diff** + Inspect changes on a container's filesystem + See **docker-diff(1)** for full documentation on the **diff** command. + +**events** + Get real time events from the server + See **docker-events(1)** for full documentation on the **events** command. + +**exec** + Run a command in a running container + See **docker-exec(1)** for full documentation on the **exec** command. + +**export** + Stream the contents of a container as a tar archive + See **docker-export(1)** for full documentation on the **export** command. + +**history** + Show the history of an image + See **docker-history(1)** for full documentation on the **history** command. + +**images** + List images + See **docker-images(1)** for full documentation on the **images** command. + +**import** + Create a new filesystem image from the contents of a tarball + See **docker-import(1)** for full documentation on the **import** command. + +**info** + Display system-wide information + See **docker-info(1)** for full documentation on the **info** command. + +**inspect** + Return low-level information on a container or image + See **docker-inspect(1)** for full documentation on the **inspect** command. + +**kill** + Kill a running container (which includes the wrapper process and everything +inside it) + See **docker-kill(1)** for full documentation on the **kill** command. + +**load** + Load an image from a tar archive + See **docker-load(1)** for full documentation on the **load** command. + +**login** + Log in to a Docker Registry + See **docker-login(1)** for full documentation on the **login** command. + +**logout** + Log the user out of a Docker Registry + See **docker-logout(1)** for full documentation on the **logout** command. + +**logs** + Fetch the logs of a container + See **docker-logs(1)** for full documentation on the **logs** command. + +**pause** + Pause all processes within a container + See **docker-pause(1)** for full documentation on the **pause** command. + +**port** + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + See **docker-port(1)** for full documentation on the **port** command. + +**ps** + List containers + See **docker-ps(1)** for full documentation on the **ps** command. + +**pull** + Pull an image or a repository from a Docker Registry + See **docker-pull(1)** for full documentation on the **pull** command. + +**push** + Push an image or a repository to a Docker Registry + See **docker-push(1)** for full documentation on the **push** command. + +**rename** + Rename a container. + See **docker-rename(1)** for full documentation on the **rename** command. + +**restart** + Restart one or more containers + See **docker-restart(1)** for full documentation on the **restart** command. + +**rm** + Remove one or more containers + See **docker-rm(1)** for full documentation on the **rm** command. + +**rmi** + Remove one or more images + See **docker-rmi(1)** for full documentation on the **rmi** command. + +**run** + Run a command in a new container + See **docker-run(1)** for full documentation on the **run** command. + +**save** + Save an image to a tar archive + See **docker-save(1)** for full documentation on the **save** command. + +**search** + Search for an image in the Docker index + See **docker-search(1)** for full documentation on the **search** command. + +**start** + Start a container + See **docker-start(1)** for full documentation on the **start** command. + +**stats** + Display a live stream of one or more containers' resource usage statistics + See **docker-stats(1)** for full documentation on the **stats** command. + +**stop** + Stop a container + See **docker-stop(1)** for full documentation on the **stop** command. + +**tag** + Tag an image into a repository + See **docker-tag(1)** for full documentation on the **tag** command. + +**top** + Lookup the running processes of a container + See **docker-top(1)** for full documentation on the **top** command. + +**unpause** + Unpause all processes within a container + See **docker-unpause(1)** for full documentation on the **unpause** command. + +**version** + Show the Docker version information + See **docker-version(1)** for full documentation on the **version** command. + +**wait** + Block until a container stops, then print its exit code + See **docker-wait(1)** for full documentation on the **wait** command. + + +# RUNTIME EXECUTION OPTIONS + +Use the **--exec-opt** flags to specify options to the execution driver. +The following options are available: + +#### native.cgroupdriver +Specifies the management of the container's `cgroups`. You can specify `cgroupfs` +or `systemd`. If you specify `systemd` and it is not available, the system errors +out. + +#### Client +For specific client examples please see the man page for the specific Docker +command. For example: + + man docker-run + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/vendor/github.com/docker/docker/man/dockerd.8.md b/vendor/github.com/docker/docker/man/dockerd.8.md new file mode 100644 index 0000000..761dc6b --- /dev/null +++ b/vendor/github.com/docker/docker/man/dockerd.8.md @@ -0,0 +1,710 @@ +% DOCKER(8) Docker User Manuals +% Shishir Mahajan +% SEPTEMBER 2015 +# NAME +dockerd - Enable daemon mode + +# SYNOPSIS +**dockerd** +[**--add-runtime**[=*[]*]] +[**--api-cors-header**=[=*API-CORS-HEADER*]] +[**--authorization-plugin**[=*[]*]] +[**-b**|**--bridge**[=*BRIDGE*]] +[**--bip**[=*BIP*]] +[**--cgroup-parent**[=*[]*]] +[**--cluster-store**[=*[]*]] +[**--cluster-advertise**[=*[]*]] +[**--cluster-store-opt**[=*map[]*]] +[**--config-file**[=*/etc/docker/daemon.json*]] +[**--containerd**[=*SOCKET-PATH*]] +[**-D**|**--debug**] +[**--default-gateway**[=*DEFAULT-GATEWAY*]] +[**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]] +[**--default-runtime**[=*runc*]] +[**--default-ulimit**[=*[]*]] +[**--disable-legacy-registry**] +[**--dns**[=*[]*]] +[**--dns-opt**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--exec-opt**[=*[]*]] +[**--exec-root**[=*/var/run/docker*]] +[**--experimental**[=*false*]] +[**--fixed-cidr**[=*FIXED-CIDR*]] +[**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]] +[**-G**|**--group**[=*docker*]] +[**-g**|**--graph**[=*/var/lib/docker*]] +[**-H**|**--host**[=*[]*]] +[**--help**] +[**--icc**[=*true*]] +[**--init**[=*false*]] +[**--init-path**[=*""*]] +[**--insecure-registry**[=*[]*]] +[**--ip**[=*0.0.0.0*]] +[**--ip-forward**[=*true*]] +[**--ip-masq**[=*true*]] +[**--iptables**[=*true*]] +[**--ipv6**] +[**--isolation**[=*default*]] +[**-l**|**--log-level**[=*info*]] +[**--label**[=*[]*]] +[**--live-restore**[=*false*]] +[**--log-driver**[=*json-file*]] +[**--log-opt**[=*map[]*]] +[**--mtu**[=*0*]] +[**--max-concurrent-downloads**[=*3*]] +[**--max-concurrent-uploads**[=*5*]] +[**-p**|**--pidfile**[=*/var/run/docker.pid*]] +[**--raw-logs**] +[**--registry-mirror**[=*[]*]] +[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] +[**--seccomp-profile**[=*SECCOMP-PROFILE-PATH*]] +[**--selinux-enabled**] +[**--shutdown-timeout**[=*15*]] +[**--storage-opt**[=*[]*]] +[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]] +[**--tls**] +[**--tlscacert**[=*~/.docker/ca.pem*]] +[**--tlscert**[=*~/.docker/cert.pem*]] +[**--tlskey**[=*~/.docker/key.pem*]] +[**--tlsverify**] +[**--userland-proxy**[=*true*]] +[**--userland-proxy-path**[=*""*]] +[**--userns-remap**[=*default*]] + +# DESCRIPTION +**dockerd** is used for starting the Docker daemon (i.e., to command the daemon +to manage images, containers etc). So **dockerd** is a server, as a daemon. + +To run the Docker daemon you can specify **dockerd**. +You can check the daemon options using **dockerd --help**. +Daemon options should be specified after the **dockerd** keyword in the +following format. + +**dockerd [OPTIONS]** + +# OPTIONS + +**--add-runtime**=[] + Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + + The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + + This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + + **Note**: defining runtime arguments via the command line is not supported. + +**--api-cors-header**="" + Set CORS headers in the Engine API. Default is cors disabled. Give urls like + "http://foo, http://bar, ...". Give "*" to allow all. + +**--authorization-plugin**="" + Set authorization plugins to load + +**-b**, **--bridge**="" + Attach containers to a pre\-existing network bridge; use 'none' to disable + container networking + +**--bip**="" + Use the provided CIDR notation address for the dynamically created bridge + (docker0); Mutually exclusive of \-b + +**--cgroup-parent**="" + Set parent cgroup for all containers. Default is "/docker" for fs cgroup + driver and "system.slice" for systemd cgroup driver. + +**--cluster-store**="" + URL of the distributed storage backend + +**--cluster-advertise**="" + Specifies the 'host:port' or `interface:port` combination that this + particular daemon instance should use when advertising itself to the cluster. + The daemon is reached through this value. + +**--cluster-store-opt**="" + Specifies options for the Key/Value store. + +**--config-file**="/etc/docker/daemon.json" + Specifies the JSON file path to load the configuration from. + +**--containerd**="" + Path to containerd socket. + +**-D**, **--debug**=*true*|*false* + Enable debug mode. Default is false. + +**--default-gateway**="" + IPv4 address of the container default gateway; this address must be part of + the bridge subnet (which is defined by \-b or \--bip) + +**--default-gateway-v6**="" + IPv6 address of the container default gateway + +**--default-runtime**="runc" + Set default runtime if there're more than one specified by `--add-runtime`. + +**--default-ulimit**=[] + Default ulimits for containers. + +**--disable-legacy-registry**=*true*|*false* + Disable contacting legacy registries + +**--dns**="" + Force Docker to use specific DNS servers + +**--dns-opt**="" + DNS options to use. + +**--dns-search**=[] + DNS search domains to use. + +**--exec-opt**=[] + Set runtime execution options. See RUNTIME EXECUTION OPTIONS. + +**--exec-root**="" + Path to use as the root of the Docker execution state files. Default is + `/var/run/docker`. + +**--experimental**="" + Enable the daemon experimental features. + +**--fixed-cidr**="" + IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in + the bridge subnet (which is defined by \-b or \-\-bip). + +**--fixed-cidr-v6**="" + IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) + +**-G**, **--group**="" + Group to assign the unix socket specified by -H when running in daemon mode. + use '' (the empty string) to disable setting of a group. Default is `docker`. + +**-g**, **--graph**="" + Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. + +**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + +**--help** + Print usage statement + +**--icc**=*true*|*false* + Allow unrestricted inter\-container and Docker daemon host communication. If + disabled, containers can still be linked together using the **--link** option + (see **docker-run(1)**). Default is true. + +**--init** + Run an init process inside containers for signal forwarding and process + reaping. + +**--init-path** + Path to the docker-init binary. + +**--insecure-registry**=[] + Enable insecure registry communication, i.e., enable un-encrypted and/or + untrusted communication. + + List of insecure registries can contain an element with CIDR notation to + specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS + with certificates from unknown CAs. + + Enabling `--insecure-registry` is useful when running a local registry. + However, because its use creates security vulnerabilities it should ONLY be + enabled for testing purposes. For increased security, users should add their + CA to their system's list of trusted CAs instead of using + `--insecure-registry`. + +**--ip**="" + Default IP address to use when binding container ports. Default is `0.0.0.0`. + +**--ip-forward**=*true*|*false* + Enables IP forwarding on the Docker host. The default is `true`. This flag + interacts with the IP forwarding setting on your host system's kernel. If + your system has IP forwarding disabled, this setting enables it. If your + system has IP forwarding enabled, setting this flag to `--ip-forward=false` + has no effect. + + This setting will also enable IPv6 forwarding if you have both + `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject + Router Advertisements and interfere with the host's existing IPv6 + configuration. For more information, please consult the documentation about + "Advanced Networking - IPv6". + +**--ip-masq**=*true*|*false* + Enable IP masquerading for bridge's IP range. Default is true. + +**--iptables**=*true*|*false* + Enable Docker's addition of iptables rules. Default is true. + +**--ipv6**=*true*|*false* + Enable IPv6 support. Default is false. Docker will create an IPv6-enabled + bridge with address fe80::1 which will allow you to create IPv6-enabled + containers. Use together with `--fixed-cidr-v6` to provide globally routable + IPv6 addresses. IPv6 forwarding will be enabled if not used with + `--ip-forward=false`. This may collide with your host's current IPv6 + settings. For more information please consult the documentation about + "Advanced Networking - IPv6". + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. + Note that the default on Windows server is `process`, and the default on + Windows client is `hyperv`. Linux only supports `default`. + +**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" + Set the logging level. Default is `info`. + +**--label**="[]" + Set key=value labels to the daemon (displayed in `docker info`) + +**--live-restore**=*false* + Enable live restore of running containers when the daemon starts so that they + are not restarted. This option is applicable only for docker daemon running + on Linux host. + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Default driver for container logs. Default is `json-file`. + **Warning**: `docker logs` command works only for `json-file` logging driver. + +**--log-opt**=[] + Logging driver specific options. + +**--mtu**=*0* + Set the containers network mtu. Default is `0`. + +**--max-concurrent-downloads**=*3* + Set the max concurrent downloads for each pull. Default is `3`. + +**--max-concurrent-uploads**=*5* + Set the max concurrent uploads for each push. Default is `5`. + +**-p**, **--pidfile**="" + Path to use for daemon PID file. Default is `/var/run/docker.pid` + +**--raw-logs** + Output daemon logs in full timestamp format without ANSI coloring. If this + flag is not set, the daemon outputs condensed, colorized logs if a terminal + is detected, or full ("raw") output otherwise. + +**--registry-mirror**=*://* + Prepend a registry mirror to be used for image pulls. May be specified + multiple times. + +**-s**, **--storage-driver**="" + Force the Docker runtime to use a specific storage driver. + +**--seccomp-profile**="" + Path to seccomp profile. + +**--selinux-enabled**=*true*|*false* + Enable selinux support. Default is false. + +**--shutdown-timeout**=*15* + Set the shutdown timeout value in seconds. Default is `15`. + +**--storage-opt**=[] + Set storage driver options. See STORAGE DRIVER OPTIONS. + +**--swarm-default-advertise-addr**=*IP|INTERFACE* + Set default address or interface for swarm to advertise as its + externally-reachable address to other cluster members. This can be a + hostname, an IP address, or an interface such as `eth0`. A port cannot be + specified with this option. + +**--tls**=*true*|*false* + Use TLS; implied by --tlsverify. Default is false. + +**--tlscacert**=*~/.docker/ca.pem* + Trust certs signed only by this CA. + +**--tlscert**=*~/.docker/cert.pem* + Path to TLS certificate file. + +**--tlskey**=*~/.docker/key.pem* + Path to TLS key file. + +**--tlsverify**=*true*|*false* + Use TLS and verify the remote (daemon: verify client, client: verify daemon). + Default is false. + +**--userland-proxy**=*true*|*false* + Rely on a userland proxy implementation for inter-container and + outside-to-container loopback communications. Default is true. + +**--userland-proxy-path**="" + Path to the userland proxy binary. + +**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid* + Enable user namespaces for containers on the daemon. Specifying "default" + will cause a new user and group to be created to handle UID and GID range + remapping for the user namespace mappings used for contained processes. + Specifying a user (or uid) and optionally a group (or gid) will cause the + daemon to lookup the user and group's subordinate ID ranges for use as the + user namespace mappings for contained processes. + +# STORAGE DRIVER OPTIONS + +Docker uses storage backends (known as "graphdrivers" in the Docker +internals) to create writable containers from images. Many of these +backends use operating system level technologies and can be +configured. + +Specify options to the storage backend with **--storage-opt** flags. The +backends that currently take options are *devicemapper*, *zfs* and *btrfs*. +Options for *devicemapper* are prefixed with *dm*, options for *zfs* +start with *zfs* and options for *btrfs* start with *btrfs*. + +Specifically for devicemapper, the default is a "loopback" model which +requires no pre-configuration, but is extremely inefficient. Do not +use it in production. + +To make the best use of Docker with the devicemapper backend, you must +have a recent version of LVM. Use `lvm` to create a thin pool; for +more information see `man lvmthin`. Then, use `--storage-opt +dm.thinpooldev` to tell the Docker engine to use that pool for +allocating images and container snapshots. + +## Devicemapper options + +#### dm.thinpooldev + +Specifies a custom block storage device to use for the thin pool. + +If using a block device for device mapper storage, it is best to use `lvm` +to create and manage the thin-pool volume. This volume is then handed to Docker +to exclusively create snapshot volumes needed for images and containers. + +Managing the thin-pool outside of Engine makes for the most feature-rich +method of having Docker utilize device mapper thin provisioning as the +backing storage for Docker containers. The highlights of the lvm-based +thin-pool management feature include: automatic or interactive thin-pool +resize support, dynamically changing thin-pool features, automatic thinp +metadata checking when lvm activates the thin-pool, etc. + +As a fallback if no thin pool is provided, loopback files are +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Engine daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +Example use: + + $ dockerd \ + --storage-opt dm.thinpooldev=/dev/mapper/thin-pool + +#### dm.basesize + +Specifies the size to use when creating the base device, which limits +the size of images and containers. The default value is 10G. Note, +thin devices are inherently "sparse", so a 10G device which is mostly +empty doesn't use 10 GB of space on the pool. However, the filesystem +will use more space for base images the larger the device +is. + +The base device size can be increased at daemon restart which will allow +all future images and containers (based on those new images) to be of the +new base device size. + +Example use: `dockerd --storage-opt dm.basesize=50G` + +This will increase the base device size to 50G. The Docker daemon will throw an +error if existing base device size is larger than 50G. A user can use +this option to expand the base device size however shrinking is not permitted. + +This value affects the system-wide "base" empty filesystem that may already +be initialized and inherited by pulled images. Typically, a change to this +value requires additional steps to take effect: + + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + +Example use: `dockerd --storage-opt dm.basesize=20G` + +#### dm.fs + +Specifies the filesystem type to use for the base device. The +supported options are `ext4` and `xfs`. The default is `ext4`. + +Example use: `dockerd --storage-opt dm.fs=xfs` + +#### dm.mkfsarg + +Specifies extra mkfs arguments to be used when creating the base device. + +Example use: `dockerd --storage-opt "dm.mkfsarg=-O ^has_journal"` + +#### dm.mountopt + +Specifies extra mount options used when mounting the thin devices. + +Example use: `dockerd --storage-opt dm.mountopt=nodiscard` + +#### dm.use_deferred_removal + +Enables use of deferred device removal if `libdm` and the kernel driver +support the mechanism. + +Deferred device removal means that if device is busy when devices are +being removed/deactivated, then a deferred removal is scheduled on +device. And devices automatically go away when last user of the device +exits. + +For example, when a container exits, its associated thin device is removed. If +that device has leaked into some other mount namespace and can't be removed, +the container exit still succeeds and this option causes the system to schedule +the device for deferred removal. It does not wait in a loop trying to remove a +busy device. + +Example use: `dockerd --storage-opt dm.use_deferred_removal=true` + +#### dm.use_deferred_deletion + +Enables use of deferred device deletion for thin pool devices. By default, +thin pool device deletion is synchronous. Before a container is deleted, the +Docker daemon removes any associated devices. If the storage driver can not +remove a device, the container deletion fails and daemon returns. + +`Error deleting container: Error response from daemon: Cannot destroy container` + +To avoid this failure, enable both deferred device deletion and deferred +device removal on the daemon. + +`dockerd --storage-opt dm.use_deferred_deletion=true --storage-opt dm.use_deferred_removal=true` + +With these two options enabled, if a device is busy when the driver is +deleting a container, the driver marks the device as deleted. Later, when the +device isn't in use, the driver deletes it. + +In general it should be safe to enable this option by default. It will help +when unintentional leaking of mount point happens across multiple mount +namespaces. + +#### dm.loopdatasize + +**Note**: This option configures devicemapper loopback, which should not be +used in production. + +Specifies the size to use when creating the loopback file for the "data" device +which is used for the thin pool. The default size is 100G. The file is sparse, +so it will not initially take up this much space. + +Example use: `dockerd --storage-opt dm.loopdatasize=200G` + +#### dm.loopmetadatasize + +**Note**: This option configures devicemapper loopback, which should not be +used in production. + +Specifies the size to use when creating the loopback file for the "metadata" +device which is used for the thin pool. The default size is 2G. The file is +sparse, so it will not initially take up this much space. + +Example use: `dockerd --storage-opt dm.loopmetadatasize=4G` + +#### dm.datadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for data for a Docker-managed thin pool. +It is better to use `dm.thinpooldev` - see the documentation for it above for +discussion of the advantages. + +#### dm.metadatadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for metadata for a Docker-managed thin +pool. See `dm.datadev` for why this is deprecated. + +#### dm.blocksize + +Specifies a custom blocksize to use for the thin pool. The default +blocksize is 64K. + +Example use: `dockerd --storage-opt dm.blocksize=512K` + +#### dm.blkdiscard + +Enables or disables the use of `blkdiscard` when removing devicemapper devices. +This is disabled by default due to the additional latency, but as a special +case with loopback devices it will be enabled, in order to re-sparsify the +loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal times, +but it also prevents the space used in `/var/lib/docker` directory from being +returned to the system for other use when containers are removed. + +Example use: `dockerd --storage-opt dm.blkdiscard=false` + +#### dm.override_udev_sync_check + +By default, the devicemapper backend attempts to synchronize with the `udev` +device manager for the Linux kernel. This option allows disabling that +synchronization, to continue even though the configuration may be buggy. + +To view the `udev` sync support of a Docker daemon that is using the +`devicemapper` driver, run: + + $ docker info + [...] + Udev Sync Supported: true + [...] + +When `udev` sync support is `true`, then `devicemapper` and `udev` can +coordinate the activation and deactivation of devices for containers. + +When `udev` sync support is `false`, a race condition occurs between the +`devicemapper` and `udev` during create and cleanup. The race condition results +in errors and failures. (For information on these failures, see +[docker#4036](https://github.com/docker/docker/issues/4036)) + +To allow the `docker` daemon to start, regardless of whether `udev` sync is +`false`, set `dm.override_udev_sync_check` to true: + + $ dockerd --storage-opt dm.override_udev_sync_check=true + +When this value is `true`, the driver continues and simply warns you the errors +are happening. + +**Note**: The ideal is to pursue a `docker` daemon and environment that does +support synchronizing with `udev`. For further discussion on this topic, see +[docker#4036](https://github.com/docker/docker/issues/4036). +Otherwise, set this flag for migrating existing Docker daemons to a daemon with +a supported environment. + +#### dm.min_free_space + +Specifies the min free space percent in a thin pool require for new device +creation to succeed. This check applies to both free data space as well +as free metadata space. Valid values are from 0% - 99%. Value 0% disables +free space checking logic. If user does not specify a value for this option, +the Engine uses a default value of 10%. + +Whenever a new a thin pool device is created (during `docker pull` or during +container creation), the Engine checks if the minimum free space is available. +If the space is unavailable, then device creation fails and any relevant +`docker` operation fails. + +To recover from this error, you must create more free space in the thin pool to +recover from the error. You can create free space by deleting some images and +containers from tge thin pool. You can also add more storage to the thin pool. + +To add more space to an LVM (logical volume management) thin pool, just add +more storage to the group container thin pool; this should automatically +resolve any errors. If your configuration uses loop devices, then stop the +Engine daemon, grow the size of loop files and restart the daemon to resolve +the issue. + +Example use:: `dockerd --storage-opt dm.min_free_space=10%` + +#### dm.xfs_nospace_max_retries + +Specifies the maximum number of retries XFS should attempt to complete IO when +ENOSPC (no space) error is returned by underlying storage device. + +By default XFS retries infinitely for IO to finish and this can result in +unkillable process. To change this behavior one can set xfs_nospace_max_retries +to say 0 and XFS will not retry IO after getting ENOSPC and will shutdown +filesystem. + +Example use: + + $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 + + +## ZFS options + +#### zfs.fsname + +Set zfs filesystem under which docker will create its own datasets. By default +docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`) +is located. + +Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker` + +## Btrfs options + +#### btrfs.min_space + +Specifies the mininum size to use when creating the subvolume which is used for +containers. If user uses disk quota for btrfs when creating or running a +container with **--storage-opt size** option, docker should ensure the **size** +cannot be smaller than **btrfs.min_space**. + +Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G` + +# CLUSTER STORE OPTIONS + +The daemon uses libkv to advertise the node within the cluster. Some Key/Value +backends support mutual TLS, and the client TLS settings used by the daemon can +be configured using the **--cluster-store-opt** flag, specifying the paths to +PEM encoded files. + +#### kv.cacertfile + +Specifies the path to a local file with PEM encoded CA certificates to trust + +#### kv.certfile + +Specifies the path to a local file with a PEM encoded certificate. This +certificate is used as the client cert for communication with the Key/Value +store. + +#### kv.keyfile + +Specifies the path to a local file with a PEM encoded private key. This +private key is used as the client key for communication with the Key/Value +store. + +# Access authorization + +Docker's access authorization can be extended by authorization plugins that +your organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its +specification file. The plugin's implementation determines whether you can +specify a name or path. Consult with your Docker administrator to get +information about the plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the command +line or Docker's Engine API are allowed or denied by the plugin. If you have +multiple plugins installed, at least one must allow the request for it to +complete. + +For information about how to create an authorization plugin, see [authorization +plugin](https://docs.docker.com/engine/extend/authorization/) section in the +Docker extend section of this documentation. + + +# HISTORY +Sept 2015, Originally compiled by Shishir Mahajan +based on docker.com source material and internal work. diff --git a/vendor/github.com/docker/docker/man/generate.go b/vendor/github.com/docker/docker/man/generate.go new file mode 100644 index 0000000..f21614d --- /dev/null +++ b/vendor/github.com/docker/docker/man/generate.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/commands" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func generateManPages(path string) error { + header := &doc.GenManHeader{ + Title: "DOCKER", + Section: "1", + Source: "Docker Community", + } + + stdin, stdout, stderr := term.StdStreams() + dockerCli := command.NewDockerCli(stdin, stdout, stderr) + cmd := &cobra.Command{Use: "docker"} + commands.AddCommands(cmd, dockerCli) + + cmd.DisableAutoGenTag = true + return doc.GenManTreeFromOpts(cmd, doc.GenManTreeOptions{ + Header: header, + Path: path, + CommandSeparator: "-", + }) +} + +func main() { + path := "/tmp" + if len(os.Args) > 1 { + path = os.Args[1] + } + fmt.Printf("Generating man pages into %s\n", path) + if err := generateManPages(path); err != nil { + fmt.Fprintf(os.Stderr, "Failed to generate man pages: %s\n", err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/man/generate.sh b/vendor/github.com/docker/docker/man/generate.sh new file mode 100755 index 0000000..e4126ba --- /dev/null +++ b/vendor/github.com/docker/docker/man/generate.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# +# Generate man pages for docker/docker +# + +set -eu + +mkdir -p ./man/man1 + +# Generate man pages from cobra commands +go build -o /tmp/gen-manpages ./man +/tmp/gen-manpages ./man/man1 + +# Generate legacy pages from markdown +./man/md2man-all.sh -q diff --git a/vendor/github.com/docker/docker/man/glide.lock b/vendor/github.com/docker/docker/man/glide.lock new file mode 100644 index 0000000..5ec765a --- /dev/null +++ b/vendor/github.com/docker/docker/man/glide.lock @@ -0,0 +1,52 @@ +hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb +updated: 2016-06-20T21:53:35.420817456Z +imports: +- name: github.com/BurntSushi/toml + version: f0aeabca5a127c4078abb8c8d64298b147264b55 +- name: github.com/cpuguy83/go-md2man + version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa + subpackages: + - md2man +- name: github.com/fsnotify/fsnotify + version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 +- name: github.com/hashicorp/hcl + version: da486364306ed66c218be9b7953e19173447c18b + subpackages: + - hcl/ast + - hcl/parser + - hcl/token + - json/parser + - hcl/scanner + - hcl/strconv + - json/scanner + - json/token +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/magiconair/properties + version: c265cfa48dda6474e208715ca93e987829f572f8 +- name: github.com/mitchellh/mapstructure + version: d2dd0262208475919e1a362f675cfc0e7c10e905 +- name: github.com/russross/blackfriday + version: 1d6b8e9301e720b08a8938b8c25c018285885438 +- name: github.com/shurcooL/sanitized_anchor_name + version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 +- name: github.com/spf13/cast + version: 27b586b42e29bec072fe7379259cc719e1289da6 +- name: github.com/spf13/jwalterweatherman + version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 +- name: github.com/spf13/pflag + version: dabebe21bf790f782ea4c7bbd2efc430de182afd +- name: github.com/spf13/viper + version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd +- name: golang.org/x/sys + version: 62bee037599929a6e9146f29d10dd5208c43507d + subpackages: + - unix +- name: gopkg.in/yaml.v2 + version: a83829b6f1293c91addabc89d0571c246397bbf4 +- name: github.com/spf13/cobra + repo: https://github.com/dnephin/cobra + subpackages: + - doc + version: v1.3 +devImports: [] diff --git a/vendor/github.com/docker/docker/man/glide.yaml b/vendor/github.com/docker/docker/man/glide.yaml new file mode 100644 index 0000000..e99b267 --- /dev/null +++ b/vendor/github.com/docker/docker/man/glide.yaml @@ -0,0 +1,12 @@ +package: github.com/docker/docker/man +import: +- package: github.com/cpuguy83/go-md2man + subpackages: + - md2man +- package: github.com/inconshreveable/mousetrap +- package: github.com/spf13/pflag +- package: github.com/spf13/viper +- package: github.com/spf13/cobra + repo: https://github.com/dnephin/cobra + subpackages: + - doc diff --git a/vendor/github.com/docker/docker/man/md2man-all.sh b/vendor/github.com/docker/docker/man/md2man-all.sh new file mode 100755 index 0000000..97c65c9 --- /dev/null +++ b/vendor/github.com/docker/docker/man/md2man-all.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# get into this script's directory +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +[ "$1" = '-q' ] || { + set -x + pwd +} + +for FILE in *.md; do + base="$(basename "$FILE")" + name="${base%.md}" + num="${name##*.}" + if [ -z "$num" -o "$name" = "$num" ]; then + # skip files that aren't of the format xxxx.N.md (like README.md) + continue + fi + mkdir -p "./man${num}" + go-md2man -in "$FILE" -out "./man${num}/${name}" +done diff --git a/vendor/github.com/docker/docker/migrate/v1/migratev1.go b/vendor/github.com/docker/docker/migrate/v1/migratev1.go new file mode 100644 index 0000000..bc42dd2 --- /dev/null +++ b/vendor/github.com/docker/docker/migrate/v1/migratev1.go @@ -0,0 +1,504 @@ +package v1 + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + "time" + + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + imagev1 "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" +) + +type graphIDRegistrar interface { + RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type graphIDMounter interface { + CreateRWLayerByGraphID(string, string, layer.ChainID) error +} + +type checksumCalculator interface { + ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) +} + +const ( + graphDirName = "graph" + tarDataFileName = "tar-data.json.gz" + migrationFileName = ".migration-v1-images.json" + migrationTagsFileName = ".migration-v1-tags" + migrationDiffIDFileName = ".migration-diffid" + migrationSizeFileName = ".migration-size" + migrationTarDataFileName = ".migration-tardata" + containersDirName = "containers" + configFileNameLegacy = "config.json" + configFileName = "config.v2.json" + repositoriesFilePrefixLegacy = "repositories-" +) + +var ( + errUnsupported = errors.New("migration is not supported") +) + +// Migrate takes an old graph directory and transforms the metadata into the +// new format. +func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error { + graphDir := filepath.Join(root, graphDirName) + if _, err := os.Lstat(graphDir); os.IsNotExist(err) { + return nil + } + + mappings, err := restoreMappings(root) + if err != nil { + return err + } + + if cc, ok := ls.(checksumCalculator); ok { + CalculateLayerChecksums(root, cc, mappings) + } + + if registrar, ok := ls.(graphIDRegistrar); !ok { + return errUnsupported + } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { + return err + } + + err = saveMappings(root, mappings) + if err != nil { + return err + } + + if mounter, ok := ls.(graphIDMounter); !ok { + return errUnsupported + } else if err := migrateContainers(root, mounter, is, mappings); err != nil { + return err + } + + if err := migrateRefs(root, driverName, rs, mappings); err != nil { + return err + } + + return nil +} + +// CalculateLayerChecksums walks an old graph directory and calculates checksums +// for each layer. These checksums are later used for migration. +func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { + graphDir := filepath.Join(root, graphDirName) + // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io + workers := runtime.NumCPU() * 3 + workQueue := make(chan string, workers) + + wg := sync.WaitGroup{} + + for i := 0; i < workers; i++ { + wg.Add(1) + go func() { + for id := range workQueue { + start := time.Now() + if err := calculateLayerChecksum(graphDir, id, ls); err != nil { + logrus.Errorf("could not calculate checksum for %q, %q", id, err) + } + elapsed := time.Since(start) + logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) + } + wg.Done() + }() + } + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + logrus.Errorf("could not read directory %q", graphDir) + return + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, ok := mappings[v1ID]; ok { // support old migrations without helper files + continue + } + workQueue <- v1ID + } + close(workQueue) + wg.Wait() +} + +func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { + diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) + if _, err := os.Lstat(diffIDFile); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + + parent, err := getParent(filepath.Join(graphDir, id)) + if err != nil { + return err + } + + diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { + return err + } + + if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { + return err + } + + logrus.Infof("calculated checksum for layer %s: %s", id, diffID) + return nil +} + +func restoreMappings(root string) (map[string]image.ID, error) { + mappings := make(map[string]image.ID) + + mfile := filepath.Join(root, migrationFileName) + f, err := os.Open(mfile) + if err != nil && !os.IsNotExist(err) { + return nil, err + } else if err == nil { + err := json.NewDecoder(f).Decode(&mappings) + if err != nil { + f.Close() + return nil, err + } + f.Close() + } + + return mappings, nil +} + +func saveMappings(root string, mappings map[string]image.ID) error { + mfile := filepath.Join(root, migrationFileName) + f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(mappings); err != nil { + return err + } + return nil +} + +func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { + graphDir := filepath.Join(root, graphDirName) + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + return err + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, exists := mappings[v1ID]; exists { + continue + } + if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { + continue + } + } + + return nil +} + +func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { + containersDir := filepath.Join(root, containersDirName) + dir, err := ioutil.ReadDir(containersDir) + if err != nil { + return err + } + for _, v := range dir { + id := v.Name() + + if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { + continue + } + + containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) + if err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(containerJSON, &c); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageStrJSON, ok := c["Image"] + if !ok { + return fmt.Errorf("invalid container configuration for %v", id) + } + + var image string + if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageID, ok := imageMappings[image] + if !ok { + logrus.Errorf("image not migrated %v", imageID) // non-fatal error + continue + } + + c["Image"] = rawJSON(imageID) + + containerJSON, err = json.Marshal(c) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { + return err + } + + img, err := is.Get(imageID) + if err != nil { + return err + } + + if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + logrus.Infof("migrated container %s to point to %s", id, imageID) + + } + return nil +} + +type refAdder interface { + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error +} + +func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { + migrationFile := filepath.Join(root, migrationTagsFileName) + if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { + return err + } + + type repositories struct { + Repositories map[string]map[string]string + } + + var repos repositories + + f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&repos); err != nil { + return err + } + + for name, repo := range repos.Repositories { + for tag, id := range repo { + if strongID, exists := mappings[id]; exists { + ref, err := reference.WithName(name) + if err != nil { + logrus.Errorf("migrate tags: invalid name %q, %q", name, err) + continue + } + if dgst, err := digest.ParseDigest(tag); err == nil { + canonical, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) + continue + } + if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err) + } + } else { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) + continue + } + if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) + } + } + logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) + } + } + } + + mf, err := os.Create(migrationFile) + if err != nil { + return err + } + mf.Close() + + return nil +} + +func getParent(confDir string) (string, error) { + jsonFile := filepath.Join(confDir, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return "", err + } + var parent struct { + Parent string + ParentID digest.Digest `json:"parent_id"` + } + if err := json.Unmarshal(imageJSON, &parent); err != nil { + return "", err + } + if parent.Parent == "" && parent.ParentID != "" { // v1.9 + parent.Parent = parent.ParentID.Hex() + } + // compatibilityID for parent + parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) + if err == nil && len(parentCompatibilityID) > 0 { + parent.Parent = string(parentCompatibilityID) + } + return parent.Parent, nil +} + +func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { + defer func() { + if err != nil { + logrus.Errorf("migration failed for %v, err: %v", id, err) + } + }() + + parent, err := getParent(filepath.Join(root, graphDirName, id)) + if err != nil { + return err + } + + var parentID image.ID + if parent != "" { + var exists bool + if parentID, exists = mappings[parent]; !exists { + if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { + // todo: fail or allow broken chains? + return err + } + parentID = mappings[parent] + } + } + + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) + if err != nil { + return err + } + diffID, err := digest.ParseDigest(string(diffIDData)) + if err != nil { + return err + } + + sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) + if err != nil { + return err + } + size, err := strconv.ParseInt(string(sizeStr), 10, 64) + if err != nil { + return err + } + + layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) + if err != nil { + return err + } + logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) + + jsonFile := filepath.Join(root, graphDirName, id, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return err + } + + h, err := imagev1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + rootFS.Append(layer.DiffID()) + + config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + strongID, err := is.Create(config) + if err != nil { + return err + } + logrus.Infof("migrated image %s to %s", id, strongID) + + if parentID != "" { + if err := is.SetParent(strongID, parentID); err != nil { + return err + } + } + + checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) + if err == nil { // best effort + dgst, err := digest.ParseDigest(string(checksum)) + if err == nil { + V2MetadataService := metadata.NewV2MetadataService(ms) + V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) + } + } + _, err = ls.Release(layer) + if err != nil { + return err + } + + mappings[id] = strongID + return +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go b/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go new file mode 100644 index 0000000..be82fdc --- /dev/null +++ b/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go @@ -0,0 +1,438 @@ +package v1 + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +func TestMigrateRefs(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-tags") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) + + ta := &mockTagAdder{} + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + }) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", + } + + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } + + // second migration is no-op + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + }) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } +} + +func TestMigrateContainers(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-containers") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + // container with invalid image + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + ls := &mockMounter{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ + "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, + }) + if err != nil { + t.Fatal(err) + } + + expected := []mountInfo{{ + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", + }} + if !reflect.DeepEqual(expected, ls.mounts) { + t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) + } + + if actual, expected := ls.count, 0; actual != expected { + t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) + } + + config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) + if err != nil { + t.Fatal(err) + } + var config struct{ Image string } + err = json.Unmarshal(config2, &config) + if err != nil { + t.Fatal(err) + } + + if actual, expected := config.Image, string(imgID); actual != expected { + t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) + } + +} + +func TestMigrateImages(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-images") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // busybox from 1.9 + id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") + if err != nil { + t.Fatal(err) + } + + id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") + if err != nil { + t.Fatal(err) + } + + ls := &mockRegistrar{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) + if err != nil { + t.Fatal(err) + } + mappings := make(map[string]image.ID) + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected := map[string]image.ID{ + id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), + id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), + } + + if !reflect.DeepEqual(mappings, expected) { + t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) + } + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + ls.count = 0 + + // next images are busybox from 1.8.2 + _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + if err != nil { + t.Fatal(err) + } + + _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") + if err != nil { + t.Fatal(err) + } + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") + expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + + v2MetadataService := metadata.NewV2MetadataService(ms) + receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) + if err != nil { + t.Fatal(err) + } + + expectedMetadata := []metadata.V2Metadata{ + {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { + t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) + } + +} + +func TestMigrateUnsupported(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) + if err != nil { + t.Fatal(err) + } + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != errUnsupported { + t.Fatalf("expected unsupported error, got %q", err) + } +} + +func TestMigrateEmptyDir(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func addImage(dest, jsonConfig, parent, checksum string) (string, error) { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return "", err + } + if config.ID == "" { + b := make([]byte, 32) + rand.Read(b) + config.ID = hex.EncodeToString(b) + } + contDir := filepath.Join(dest, "graph", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { + return "", err + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { + return "", err + } + if parent != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { + return "", err + } + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + return config.ID, nil +} + +func addContainer(dest, jsonConfig string) error { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return err + } + contDir := filepath.Join(dest, "containers", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { + return err + } + return nil +} + +type mockTagAdder struct { + refs map[string]string +} + +func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if t.refs == nil { + t.refs = make(map[string]string) + } + t.refs[ref.String()] = id.String() + return nil +} +func (t *mockTagAdder) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return t.AddTag(ref, id, force) +} + +type mockRegistrar struct { + layers map[layer.ChainID]*mockLayer + count int +} + +func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { + r.count++ + l := &mockLayer{} + if parent != "" { + p, exists := r.layers[parent] + if !exists { + return nil, fmt.Errorf("invalid parent %q", parent) + } + l.parent = p + l.diffIDs = append(l.diffIDs, p.diffIDs...) + } + l.diffIDs = append(l.diffIDs, diffID) + if r.layers == nil { + r.layers = make(map[layer.ChainID]*mockLayer) + } + r.layers[l.ChainID()] = l + return l, nil +} +func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} +func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +type mountInfo struct { + name, graphID, parent string +} +type mockMounter struct { + mounts []mountInfo + count int +} + +func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { + r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) + return nil +} +func (r *mockMounter) Unmount(string) error { + r.count-- + return nil +} +func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} + +type mockLayer struct { + diffIDs []layer.DiffID + parent *mockLayer +} + +func (l *mockLayer) TarStream() (io.ReadCloser, error) { + return nil, nil +} +func (l *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, nil +} + +func (l *mockLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *mockLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *mockLayer) Parent() layer.Layer { + if l.parent == nil { + return nil + } + return l.parent +} + +func (l *mockLayer) Size() (int64, error) { + return 0, nil +} + +func (l *mockLayer) DiffSize() (int64, error) { + return 0, nil +} + +func (l *mockLayer) Metadata() (map[string]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go new file mode 100644 index 0000000..8b3ce72 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_linux.go @@ -0,0 +1,168 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func sPtr(s string) *string { return &s } +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + s.Process.Capabilities = []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.Namespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind-mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.Device{}, + Resources: &specs.Resources{ + Devices: []specs.DeviceCgroup{ + { + Allow: false, + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(5), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(3), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(9), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(8), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(0), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(1), + Access: sPtr("rwm"), + }, + { + Allow: false, + Type: sPtr("c"), + Major: iPtr(10), + Minor: iPtr(229), + Access: sPtr("rwm"), + }, + }, + }, + } + + return s +} diff --git a/vendor/github.com/docker/docker/oci/defaults_solaris.go b/vendor/github.com/docker/docker/oci/defaults_solaris.go new file mode 100644 index 0000000..85c8b68 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_solaris.go @@ -0,0 +1,20 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: "0.6.0", + Platform: specs.Platform{ + OS: "SunOS", + Arch: runtime.GOARCH, + }, + } + s.Solaris = &specs.Solaris{} + return s +} diff --git a/vendor/github.com/docker/docker/oci/defaults_windows.go b/vendor/github.com/docker/docker/oci/defaults_windows.go new file mode 100644 index 0000000..ab51904 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_windows.go @@ -0,0 +1,19 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default spec used by docker. +func DefaultSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + Windows: &specs.Windows{}, + } +} diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go new file mode 100644 index 0000000..2840d25 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +func Device(d *configs.Device) specs.Device { + return specs.Device{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.DeviceCgroup { + t := string(d.Type) + return specs.DeviceCgroup{ + Allow: true, + Type: &t, + Major: &d.Major, + Minor: &d.Minor, + Access: &d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go new file mode 100644 index 0000000..6252cab --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.Device { return specs.Device{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go new file mode 100644 index 0000000..4902482 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/namespaces.go @@ -0,0 +1,16 @@ +package oci + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.NamespaceType) { + idx := -1 + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + idx = i + } + } + if idx >= 0 { + s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) + } +} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go new file mode 100644 index 0000000..266df1e --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,151 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDockerDaemonHost + if host != "" { + _, err := parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDockerDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} diff --git a/vendor/github.com/docker/docker/opts/hosts_test.go b/vendor/github.com/docker/docker/opts/hosts_test.go new file mode 100644 index 0000000..a5bec30 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_test.go @@ -0,0 +1,148 @@ +package opts + +import ( + "fmt" + "testing" +) + +func TestParseHost(t *testing.T) { + invalid := []string{ + "something with spaces", + "://", + "unknown://", + "tcp://:port", + "tcp://invalid:port", + } + + valid := map[string]string{ + "": DefaultHost, + " ": DefaultHost, + " ": DefaultHost, + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), + "tcp://": DefaultTCPHost, + "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), + "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix://" + DefaultUnixSocket, + "unix://path/to/socket": "unix://path/to/socket", + "npipe://": "npipe://" + DefaultNamedPipe, + "npipe:////./pipe/foo": "npipe:////./pipe/foo", + } + + for _, value := range invalid { + if _, err := ParseHost(false, value); err == nil { + t.Errorf("Expected an error for %v, got [nil]", value) + } + } + + for value, expected := range valid { + if actual, err := ParseHost(false, value); err != nil || actual != expected { + t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func TestParseDockerDaemonHost(t *testing.T) { + invalids := map[string]string{ + + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", + " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", + "": "Invalid bind address format: ", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2375", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + "[::1]:": "tcp://[::1]:2375", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), + ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), + "tcp://": DefaultTCPHost, + "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), + "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix://" + DefaultUnixSocket, + "fd://": "fd://", + "fd://something": "fd://something", + "localhost:": "tcp://localhost:2375", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "[::1]:": "tcp://[::1]:2376", + "[::1]:5555": "tcp://[::1]:5555", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", + "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + "localhost:": "tcp://localhost:2376", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 0000000..611407a --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 0000000..7c239e0 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go new file mode 100644 index 0000000..fb03b50 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parseable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/docker/docker/opts/ip_test.go new file mode 100644 index 0000000..1027d84 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIPOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IPOpt{IP: &ip} + + invalidIP := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIP) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/opts/mount.go b/vendor/github.com/docker/docker/opts/mount.go new file mode 100644 index 0000000..ce6383d --- /dev/null +++ b/vendor/github.com/docker/docker/opts/mount.go @@ -0,0 +1,171 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "strconv" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// MountOpt is a Value type for parsing mounts +type MountOpt struct { + values []mounttypes.Mount +} + +// Set a new mount value +func (m *MountOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + mount := mounttypes.Mount{} + + volumeOptions := func() *mounttypes.VolumeOptions { + if mount.VolumeOptions == nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + Labels: make(map[string]string), + } + } + if mount.VolumeOptions.DriverConfig == nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} + } + return mount.VolumeOptions + } + + bindOptions := func() *mounttypes.BindOptions { + if mount.BindOptions == nil { + mount.BindOptions = new(mounttypes.BindOptions) + } + return mount.BindOptions + } + + tmpfsOptions := func() *mounttypes.TmpfsOptions { + if mount.TmpfsOptions == nil { + mount.TmpfsOptions = new(mounttypes.TmpfsOptions) + } + return mount.TmpfsOptions + } + + setValueOnMap := func(target map[string]string, value string) { + parts := strings.SplitN(value, "=", 2) + if len(parts) == 1 { + target[value] = "" + } else { + target[parts[0]] = parts[1] + } + } + + mount.Type = mounttypes.TypeVolume // default to volume mounts + // Set writable as the default + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) == 1 { + switch key { + case "readonly", "ro": + mount.ReadOnly = true + continue + case "volume-nocopy": + volumeOptions().NoCopy = true + continue + } + } + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "type": + mount.Type = mounttypes.Type(strings.ToLower(value)) + case "source", "src": + mount.Source = value + case "target", "dst", "destination": + mount.Target = value + case "readonly", "ro": + mount.ReadOnly, err = strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + case "bind-propagation": + bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) + case "volume-nocopy": + volumeOptions().NoCopy, err = strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for populate: %s", value) + } + case "volume-label": + setValueOnMap(volumeOptions().Labels, value) + case "volume-driver": + volumeOptions().DriverConfig.Name = value + case "volume-opt": + if volumeOptions().DriverConfig.Options == nil { + volumeOptions().DriverConfig.Options = make(map[string]string) + } + setValueOnMap(volumeOptions().DriverConfig.Options, value) + case "tmpfs-size": + sizeBytes, err := units.RAMInBytes(value) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + tmpfsOptions().SizeBytes = sizeBytes + case "tmpfs-mode": + ui64, err := strconv.ParseUint(value, 8, 32) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + tmpfsOptions().Mode = os.FileMode(ui64) + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + if mount.Type == "" { + return fmt.Errorf("type is required") + } + + if mount.Target == "" { + return fmt.Errorf("target is required") + } + + if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { + return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) + } + if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { + return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) + } + if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { + return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) + } + + m.values = append(m.values, mount) + return nil +} + +// Type returns the type of this option +func (m *MountOpt) Type() string { + return "mount" +} + +// String returns a string repr of this option +func (m *MountOpt) String() string { + mounts := []string{} + for _, mount := range m.values { + repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) + mounts = append(mounts, repr) + } + return strings.Join(mounts, ", ") +} + +// Value returns the mounts +func (m *MountOpt) Value() []mounttypes.Mount { + return m.values +} diff --git a/vendor/github.com/docker/docker/opts/mount_test.go b/vendor/github.com/docker/docker/opts/mount_test.go new file mode 100644 index 0000000..59606c3 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/mount_test.go @@ -0,0 +1,184 @@ +package opts + +import ( + "os" + "testing" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestMountOptString(t *testing.T) { + mount := MountOpt{ + values: []mounttypes.Mount{ + { + Type: mounttypes.TypeBind, + Source: "/home/path", + Target: "/target", + }, + { + Type: mounttypes.TypeVolume, + Source: "foo", + Target: "/target/foo", + }, + }, + } + expected := "bind /home/path /target, volume foo /target/foo" + assert.Equal(t, mount.String(), expected) +} + +func TestMountOptSetBindNoErrorBind(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=bind,target=/target,source=/source", + "type=bind,src=/source,dst=/target", + "type=bind,source=/source,dst=/target", + "type=bind,src=/source,target=/target", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.Equal(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/source", + Target: "/target", + }) + } +} + +func TestMountOptSetVolumeNoError(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=volume,target=/target,source=/source", + "type=volume,src=/source,dst=/target", + "type=volume,source=/source,dst=/target", + "type=volume,src=/source,target=/target", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.Equal(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "/source", + Target: "/target", + }) + } +} + +// TestMountOptDefaultType ensures that a mount without the type defaults to a +// volume mount. +func TestMountOptDefaultType(t *testing.T) { + var mount MountOpt + assert.NilError(t, mount.Set("target=/target,source=/foo")) + assert.Equal(t, mount.values[0].Type, mounttypes.TypeVolume) +} + +func TestMountOptSetErrorNoTarget(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,source=/foo"), "target is required") +} + +func TestMountOptSetErrorInvalidKey(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,bogus=foo"), "unexpected key 'bogus'") +} + +func TestMountOptSetErrorInvalidField(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,bogus"), "invalid field 'bogus'") +} + +func TestMountOptSetErrorInvalidReadOnly(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,readonly=no"), "invalid value for readonly: no") + assert.Error(t, mount.Set("type=volume,readonly=invalid"), "invalid value for readonly: invalid") +} + +func TestMountOptDefaultEnableReadOnly(t *testing.T) { + var m MountOpt + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo")) + assert.Equal(t, m.values[0].ReadOnly, false) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=true")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0")) + assert.Equal(t, m.values[0].ReadOnly, false) +} + +func TestMountOptVolumeNoCopy(t *testing.T) { + var m MountOpt + assert.NilError(t, m.Set("type=volume,target=/foo,volume-nocopy")) + assert.Equal(t, m.values[0].Source, "") + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo")) + assert.Equal(t, m.values[0].VolumeOptions == nil, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) +} + +func TestMountOptTypeConflict(t *testing.T) { + var m MountOpt + assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix") + assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix") +} + +func TestMountOptSetTmpfsNoError(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=tmpfs,target=/target,tmpfs-size=1m,tmpfs-mode=0700", + "type=tmpfs,target=/target,tmpfs-size=1MB,tmpfs-mode=700", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.DeepEqual(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeTmpfs, + Target: "/target", + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 1024 * 1024, // not 1000 * 1000 + Mode: os.FileMode(0700), + }, + }) + } +} + +func TestMountOptSetTmpfsError(t *testing.T) { + var m MountOpt + assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-size=foo"), "invalid value for tmpfs-size") + assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-mode=foo"), "invalid value for tmpfs-mode") + assert.Error(t, m.Set("type=tmpfs"), "target is required") +} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go new file mode 100644 index 0000000..ae85153 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -0,0 +1,360 @@ +package opts + +import ( + "fmt" + "math/big" + "net" + "regexp" + "strings" + + "github.com/docker/docker/api/types/filters" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ValidateSysctl validates a sysctl and returns it. +func ValidateSysctl(val string) (string, error) { + validSysctlMap := map[string]bool{ + "kernel.msgmax": true, + "kernel.msgmnb": true, + "kernel.msgmni": true, + "kernel.sem": true, + "kernel.shmall": true, + "kernel.shmmax": true, + "kernel.shmmni": true, + "kernel.shm_rmid_forced": true, + } + validSysctlPrefixes := []string{ + "net.", + "fs.mqueue.", + } + arr := strings.Split(val, "=") + if len(arr) < 2 { + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) + } + if validSysctlMap[arr[0]] { + return val, nil + } + + for _, vp := range validSysctlPrefixes { + if strings.HasPrefix(arr[0], vp) { + return val, nil + } + } + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) +} + +// FilterOpt is a flag type for validating filters +type FilterOpt struct { + filter filters.Args +} + +// NewFilterOpt returns a new FilterOpt +func NewFilterOpt() FilterOpt { + return FilterOpt{filter: filters.NewArgs()} +} + +func (o *FilterOpt) String() string { + repr, err := filters.ToParam(o.filter) + if err != nil { + return "invalid filters" + } + return repr +} + +// Set sets the value of the opt by parsing the command line value +func (o *FilterOpt) Set(value string) error { + var err error + o.filter, err = filters.ParseFlag(value, o.filter) + return err +} + +// Type returns the option type +func (o *FilterOpt) Type() string { + return "filter" +} + +// Value returns the value of this option +func (o *FilterOpt) Value() filters.Args { + return o.filter +} + +// NanoCPUs is a type for fixed point fractional number. +type NanoCPUs int64 + +// String returns the string format of the number +func (c *NanoCPUs) String() string { + return big.NewRat(c.Value(), 1e9).FloatString(3) +} + +// Set sets the value of the NanoCPU by passing a string +func (c *NanoCPUs) Set(value string) error { + cpus, err := ParseCPUs(value) + *c = NanoCPUs(cpus) + return err +} + +// Type returns the type +func (c *NanoCPUs) Type() string { + return "decimal" +} + +// Value returns the value in int64 +func (c *NanoCPUs) Value() int64 { + return int64(*c) +} + +// ParseCPUs takes a string ratio and returns an integer value of nano cpus +func ParseCPUs(value string) (int64, error) { + cpu, ok := new(big.Rat).SetString(value) + if !ok { + return 0, fmt.Errorf("failed to parse %v as a rational number", value) + } + nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) + if !nano.IsInt() { + return 0, fmt.Errorf("value is too precise") + } + return nano.Num().Int64(), nil +} diff --git a/vendor/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/docker/docker/opts/opts_test.go new file mode 100644 index 0000000..9f41e47 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_test.go @@ -0,0 +1,232 @@ +package opts + +import ( + "fmt" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Errorf("validator is not being called") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("foo=bar") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} + +func TestNamedListOpts(t *testing.T) { + var v []string + o := NewNamedListOptsRef("foo-name", &v, nil) + + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + if o.Name() != "foo-name" { + t.Errorf("%s != foo-name", o.Name()) + } + if len(v) != 1 { + t.Errorf("expected foo to be in the values, got %v", v) + } +} + +func TestNamedMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewNamedMapOpts("max-name", tmpMap, nil) + + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + if o.Name() != "max-name" { + t.Errorf("%s != max-name", o.Name()) + } + if _, exist := tmpMap["max-size"]; !exist { + t.Errorf("expected map-size to be in the values, got %v", tmpMap) + } +} diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 0000000..f1ce844 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 0000000..ebe40c9 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/port.go b/vendor/github.com/docker/docker/opts/port.go new file mode 100644 index 0000000..020a5d1 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/port.go @@ -0,0 +1,146 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +const ( + portOptTargetPort = "target" + portOptPublishedPort = "published" + portOptProtocol = "protocol" + portOptMode = "mode" +) + +// PortOpt represents a port config in swarm mode. +type PortOpt struct { + ports []swarm.PortConfig +} + +// Set a new port value +func (p *PortOpt) Set(value string) error { + longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) + if err != nil { + return err + } + if longSyntax { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + pConfig := swarm.PortConfig{} + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field %s", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case portOptProtocol: + if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { + return fmt.Errorf("invalid protocol value %s", value) + } + + pConfig.Protocol = swarm.PortConfigProtocol(value) + case portOptMode: + if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { + return fmt.Errorf("invalid publish mode value %s", value) + } + + pConfig.PublishMode = swarm.PortConfigPublishMode(value) + case portOptTargetPort: + tPort, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return err + } + + pConfig.TargetPort = uint32(tPort) + case portOptPublishedPort: + pPort, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return err + } + + pConfig.PublishedPort = uint32(pPort) + default: + return fmt.Errorf("invalid field key %s", key) + } + } + + if pConfig.TargetPort == 0 { + return fmt.Errorf("missing mandatory field %q", portOptTargetPort) + } + + if pConfig.PublishMode == "" { + pConfig.PublishMode = swarm.PortConfigPublishModeIngress + } + + if pConfig.Protocol == "" { + pConfig.Protocol = swarm.PortConfigProtocolTCP + } + + p.ports = append(p.ports, pConfig) + } else { + // short syntax + portConfigs := []swarm.PortConfig{} + // We can ignore errors because the format was already validated by ValidatePort + ports, portBindings, _ := nat.ParsePortSpecs([]string{value}) + + for port := range ports { + portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...) + } + p.ports = append(p.ports, portConfigs...) + } + return nil +} + +// Type returns the type of this option +func (p *PortOpt) Type() string { + return "port" +} + +// String returns a string repr of this option +func (p *PortOpt) String() string { + ports := []string{} + for _, port := range p.ports { + repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) + ports = append(ports, repr) + } + return strings.Join(ports, ", ") +} + +// Value returns the ports +func (p *PortOpt) Value() []swarm.PortConfig { + return p.ports +} + +// ConvertPortToPortConfig converts ports to the swarm type +func ConvertPortToPortConfig( + port nat.Port, + portBindings map[nat.Port][]nat.PortBinding, +) []swarm.PortConfig { + ports := []swarm.PortConfig{} + + for _, binding := range portBindings[port] { + hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16) + ports = append(ports, swarm.PortConfig{ + //TODO Name: ? + Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), + TargetPort: uint32(port.Int()), + PublishedPort: uint32(hostPort), + PublishMode: swarm.PortConfigPublishModeIngress, + }) + } + return ports +} diff --git a/vendor/github.com/docker/docker/opts/port_test.go b/vendor/github.com/docker/docker/opts/port_test.go new file mode 100644 index 0000000..67bcf8f --- /dev/null +++ b/vendor/github.com/docker/docker/opts/port_test.go @@ -0,0 +1,259 @@ +package opts + +import ( + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestPortOptValidSimpleSyntax(t *testing.T) { + testCases := []struct { + value string + expected []swarm.PortConfig + }{ + { + value: "80", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80:8080", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "8080:80/tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80:8080/udp", + expected: []swarm.PortConfig{ + { + Protocol: "udp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80-81:8080-8081/tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "tcp", + TargetPort: 8081, + PublishedPort: 81, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80-82:8080-8082/udp", + expected: []swarm.PortConfig{ + { + Protocol: "udp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "udp", + TargetPort: 8081, + PublishedPort: 81, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "udp", + TargetPort: 8082, + PublishedPort: 82, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + } + for _, tc := range testCases { + var port PortOpt + assert.NilError(t, port.Set(tc.value)) + assert.Equal(t, len(port.Value()), len(tc.expected)) + for _, expectedPortConfig := range tc.expected { + assertContains(t, port.Value(), expectedPortConfig) + } + } +} + +func TestPortOptValidComplexSyntax(t *testing.T) { + testCases := []struct { + value string + expected []swarm.PortConfig + }{ + { + value: "target=80", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + Protocol: "tcp", + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,published=8080,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "published=80,target=8080,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,published=8080,protocol=tcp,mode=host", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "host", + }, + }, + }, + { + value: "target=80,published=8080,mode=host", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "host", + Protocol: "tcp", + }, + }, + }, + { + value: "target=80,published=8080,mode=ingress", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "ingress", + Protocol: "tcp", + }, + }, + }, + } + for _, tc := range testCases { + var port PortOpt + assert.NilError(t, port.Set(tc.value)) + assert.Equal(t, len(port.Value()), len(tc.expected)) + for _, expectedPortConfig := range tc.expected { + assertContains(t, port.Value(), expectedPortConfig) + } + } +} + +func TestPortOptInvalidComplexSyntax(t *testing.T) { + testCases := []struct { + value string + expectedError string + }{ + { + value: "invalid,target=80", + expectedError: "invalid field", + }, + { + value: "invalid=field", + expectedError: "invalid field", + }, + { + value: "protocol=invalid", + expectedError: "invalid protocol value", + }, + { + value: "target=invalid", + expectedError: "invalid syntax", + }, + { + value: "published=invalid", + expectedError: "invalid syntax", + }, + { + value: "mode=invalid", + expectedError: "invalid publish mode value", + }, + { + value: "published=8080,protocol=tcp,mode=ingress", + expectedError: "missing mandatory field", + }, + { + value: `target=80,protocol="tcp,mode=ingress"`, + expectedError: "non-quoted-field", + }, + { + value: `target=80,"protocol=tcp,mode=ingress"`, + expectedError: "invalid protocol value", + }, + } + for _, tc := range testCases { + var port PortOpt + assert.Error(t, port.Set(tc.value), tc.expectedError) + } +} + +func assertContains(t *testing.T, portConfigs []swarm.PortConfig, expected swarm.PortConfig) { + var contains = false + for _, portConfig := range portConfigs { + if portConfig == expected { + contains = true + break + } + } + if !contains { + t.Errorf("expected %v to contain %v, did not", portConfigs, expected) + } +} diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go new file mode 100644 index 0000000..fb1e537 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return string(*s.value) +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/docker/docker/opts/quotedstring_test.go b/vendor/github.com/docker/docker/opts/quotedstring_test.go new file mode 100644 index 0000000..0ebf04b --- /dev/null +++ b/vendor/github.com/docker/docker/opts/quotedstring_test.go @@ -0,0 +1,28 @@ +package opts + +import ( + "github.com/docker/docker/pkg/testutil/assert" + "testing" +) + +func TestQuotedStringSetWithQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("\"something\"")) + assert.Equal(t, qs.String(), "something") + assert.Equal(t, value, "something") +} + +func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("\"something'")) + assert.Equal(t, qs.String(), "\"something'") +} + +func TestQuotedStringSetWithNoQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("something")) + assert.Equal(t, qs.String(), "something") +} diff --git a/vendor/github.com/docker/docker/opts/secret.go b/vendor/github.com/docker/docker/opts/secret.go new file mode 100644 index 0000000..1fefcf8 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/secret.go @@ -0,0 +1,107 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/api/types" +) + +// SecretOpt is a Value type for parsing secrets +type SecretOpt struct { + values []*types.SecretRequestOption +} + +// Set a new secret value +func (o *SecretOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + options := &types.SecretRequestOption{ + Source: "", + Target: "", + UID: "0", + GID: "0", + Mode: 0444, + } + + // support a simple syntax of --secret foo + if len(fields) == 1 { + options.Source = fields[0] + options.Target = fields[0] + o.values = append(o.values, options) + return nil + } + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "source", "src": + options.Source = value + case "target": + tDir, _ := filepath.Split(value) + if tDir != "" { + return fmt.Errorf("target must not be a path") + } + options.Target = value + case "uid": + options.UID = value + case "gid": + options.GID = value + case "mode": + m, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + options.Mode = os.FileMode(m) + default: + if len(fields) == 1 && value == "" { + + } else { + return fmt.Errorf("invalid field in secret request: %s", key) + } + } + } + + if options.Source == "" { + return fmt.Errorf("source is required") + } + + o.values = append(o.values, options) + return nil +} + +// Type returns the type of this option +func (o *SecretOpt) Type() string { + return "secret" +} + +// String returns a string repr of this option +func (o *SecretOpt) String() string { + secrets := []string{} + for _, secret := range o.values { + repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target) + secrets = append(secrets, repr) + } + return strings.Join(secrets, ", ") +} + +// Value returns the secret requests +func (o *SecretOpt) Value() []*types.SecretRequestOption { + return o.values +} diff --git a/vendor/github.com/docker/docker/opts/secret_test.go b/vendor/github.com/docker/docker/opts/secret_test.go new file mode 100644 index 0000000..d978c86 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/secret_test.go @@ -0,0 +1,79 @@ +package opts + +import ( + "os" + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestSecretOptionsSimple(t *testing.T) { + var opt SecretOpt + + testCase := "app-secret" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "app-secret") + assert.Equal(t, req.Target, "app-secret") + assert.Equal(t, req.UID, "0") + assert.Equal(t, req.GID, "0") +} + +func TestSecretOptionsSourceTarget(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") +} + +func TestSecretOptionsShorthand(t *testing.T) { + var opt SecretOpt + + testCase := "src=foo,target=testing" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") +} + +func TestSecretOptionsCustomUidGid(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing,uid=1000,gid=1001" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") + assert.Equal(t, req.UID, "1000") + assert.Equal(t, req.GID, "1001") +} + +func TestSecretOptionsCustomMode(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing,uid=1000,gid=1001,mode=0444" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") + assert.Equal(t, req.UID, "1000") + assert.Equal(t, req.GID, "1001") + assert.Equal(t, req.Mode, os.FileMode(0444)) +} diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md new file mode 100644 index 0000000..c4b78a8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go new file mode 100644 index 0000000..ffcc564 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go @@ -0,0 +1,91 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to +// replace the profile. +func LoadProfile(profilePath string) error { + _, err := cmd("", "-r", profilePath) + if err != nil { + return err + } + return nil +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go new file mode 100644 index 0000000..69bc8d2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go @@ -0,0 +1,73 @@ +package aaparser + +import ( + "testing" +) + +type versionExpected struct { + output string + version int +} + +func TestParseVersion(t *testing.T) { + versions := []versionExpected{ + { + output: `AppArmor parser version 2.10 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 210000, + }, + { + output: `AppArmor parser version 2.8 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 208000, + }, + { + output: `AppArmor parser version 2.20 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 220000, + }, + { + output: `AppArmor parser version 2.05 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 205000, + }, + { + output: `AppArmor parser version 2.9.95 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 209095, + }, + { + output: `AppArmor parser version 3.14.159 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 314159, + }, + } + + for _, v := range versions { + version, err := parseVersion(v.output) + if err != nil { + t.Fatalf("expected error to be nil for %#v, got: %v", v, err) + } + if version != v.version { + t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 0000000..7307d96 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 0000000..3261c4f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1175 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *TarChownOptions + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresseses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (eg !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !exceptions { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range patterns { + if pat[0] != '!' { + continue + } + pat = pat[1:] + string(filepath.Separator) + if strings.HasPrefix(pat, dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 0000000..6b2a31f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,95 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go new file mode 100644 index 0000000..d5f046e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go @@ -0,0 +1,187 @@ +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +// setupOverlayTestDir creates files in a directory with overlay whiteouts +// Tree layout +// . +// ├── d1 # opaque, 0700 +// │   └── f1 # empty file, 0600 +// ├── d2 # opaque, 0750 +// │   └── f1 # empty file, 0660 +// └── d3 # 0700 +// └── f1 # whiteout, 0644 +func setupOverlayTestDir(t *testing.T, src string) { + // Create opaque directory containing single file and permission 0700 + if err := os.Mkdir(filepath.Join(src, "d1"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600); err != nil { + t.Fatal(err) + } + + // Create another opaque directory containing single file but with permission 0750 + if err := os.Mkdir(filepath.Join(src, "d2"), 0750); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660); err != nil { + t.Fatal(err) + } + + // Create regular directory with deleted file + if err := os.Mkdir(filepath.Join(src, "d3"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Mknod(filepath.Join(src, "d3", "f1"), syscall.S_IFCHR, 0); err != nil { + t.Fatal(err) + } +} + +func checkOpaqueness(t *testing.T, path string, opaque string) { + xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + t.Fatal(err) + } + if string(xattrOpaque) != opaque { + t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) + } + +} + +func checkOverlayWhiteout(t *testing.T, path string) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) + } + if statT.Rdev != 0 { + t.Fatalf("Non-zero device number for whiteout") + } +} + +func checkFileMode(t *testing.T, path string, perm os.FileMode) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + if stat.Mode() != perm { + t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) + } +} + +func TestOverlayTarUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + options := &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + } + archive, err := TarWithOptions(src, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, options); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) + + checkOpaqueness(t, filepath.Join(dst, "d1"), "y") + checkOpaqueness(t, filepath.Join(dst, "d2"), "y") + checkOpaqueness(t, filepath.Join(dst, "d3"), "") + checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) +} + +func TestOverlayTarAUFSUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + archive, err := TarWithOptions(src, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + }) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: AUFSWhiteoutFormat, + }); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0750) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0600) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 0000000..54acbf2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go new file mode 100644 index 0000000..b883be3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_test.go @@ -0,0 +1,1162 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +var tmp string + +func init() { + tmp = "/tmp/" + if runtime.GOOS == "windows" { + tmp = os.Getenv("TEMP") + `\` + } +} + +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestIsArchivePathDir(t *testing.T) { + cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archivedir") { + t.Fatalf("Incorrectly recognised directory as an archive") + } +} + +func TestIsArchivePathInvalidFile(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archive") { + t.Fatalf("Incorrectly recognised invalid tar path as archive") + } + if IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") + } +} + +func TestIsArchivePathTar(t *testing.T) { + var whichTar string + if runtime.GOOS == "solaris" { + whichTar = "gtar" + } else { + whichTar = "tar" + } + cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) + cmd := exec.Command("sh", "-c", cmdStr) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if !IsArchivePath(tmp + "/archive") { + t.Fatalf("Did not recognise valid tar path as archive") + } + if !IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Did not recognise valid compressed tar path as archive") + } +} + +func testDecompressStream(t *testing.T, ext, compressCommand string) { + cmd := exec.Command("sh", "-c", + fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to create an archive file for test : %s.", output) + } + filename := "archive." + ext + archive, err := os.Open(tmp + filename) + if err != nil { + t.Fatalf("Failed to open file %s: %v", filename, err) + } + defer archive.Close() + + r, err := DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress %s: %v", filename, err) + } + if _, err = ioutil.ReadAll(r); err != nil { + t.Fatalf("Failed to read the decompressed stream: %v ", err) + } + if err = r.Close(); err != nil { + t.Fatalf("Failed to close the decompressed stream: %v ", err) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + testDecompressStream(t, "gz", "gzip -f") +} + +func TestDecompressStreamBzip2(t *testing.T) { + testDecompressStream(t, "bz2", "bzip2 -f") +} + +func TestDecompressStreamXz(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Xz not present in msys2") + } + testDecompressStream(t, "xz", "xz -f") +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of an uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + // TODO Windows: Figure out why this is failing in CI but not locally + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows CI machines") + } + badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, _, err := cmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("sh", "-c", "echo hello; exit 0") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := filepath.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := filepath.Join(tempFolder, "src") + tarFile := filepath.Join(tempFolder, "src.tar") + os.Create(srcFile) + os.Create(invalidDestFolder) // being a file (not dir) should cause an error + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = UntarPath(tarFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := filepath.Join(destFolder, srcFileU) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := filepath.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := filepath.Join(tempFolder, "dest") + invalidSrc := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, filepath.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := filepath.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := filepath.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // TODO Windows: Figure out how to port this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 0000000..7083f2f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,118 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + inode = uint64(s.Ino) + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go new file mode 100644 index 0000000..4eeafdd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go @@ -0,0 +1,249 @@ +// +build !windows + +package archive + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(filepath.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(filepath.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows +func TestTarUntarWithXattr(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip() + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 0000000..5c3a1be --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go new file mode 100644 index 0000000..0c6733d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -0,0 +1,91 @@ +// +build windows + +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestCopyFileWithInvalidDest(t *testing.T) { + // TODO Windows: This is currently failing. Not sure what has + // recently changed in CopyWithTar as used to pass. Further investigation + // is required. + t.Skip("Currently fails") + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := "c:dest" + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, "src", "src") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err == nil { + t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") + } +} + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 0000000..c07d55c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,446 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 0000000..fc5a9df --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,312 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 0000000..da70ed3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go new file mode 100644 index 0000000..095102e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go @@ -0,0 +1,132 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + //TODO Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip("gcp failures on Solaris") + } + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go new file mode 100644 index 0000000..eae1d02 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_test.go @@ -0,0 +1,572 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "runtime" + "sort" + "testing" + "time" + + "github.com/docker/docker/pkg/system" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if runtime.GOOS == "solaris" { + cmd = exec.Command("gcp", "-a", src, dst) + } + + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + {Symlink, "symlink3", root + "/file1", 0666}, + {Symlink, "symlink4", root + "/symlink3", 0666}, + {Symlink, "dirSymlink", root + "/dir1", 0740}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := system.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create a directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failure on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithHardlinks(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destDir) + + creationSize, err := prepareUntarSourceDirectory(100, destDir, true) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + t.Fatal(err) + } + + got := ChangesSize(destDir, changes) + if got != int64(creationSize) { + t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("Expected 6 bytes of changes, got %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 0000000..3778b73 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 0000000..af94243 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 0000000..0614c67 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 0000000..e305b5e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go new file mode 100644 index 0000000..ecbfc17 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go @@ -0,0 +1,978 @@ +// +build !windows + +// TODO Windows: Some of these tests may be salvagable and portable to Windows. + +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, false) +} + +func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, true) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + symlinkPath1 := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + + if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// C. Symbol link following version: +// SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseCFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + symlinkPathBad := filepath.Join(tmpDirA, "symlink1") + symlinkPath := filepath.Join(tmpDirA, "symlink3") + linkTarget := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // first to test broken link + if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + // test symbol link -> symbol link -> target + // Ensure they start out different. + if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. Symbol link following version: +// SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseDFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "symlink4") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// E. Symbol link following version: +// SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseEFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + symSrcDir := filepath.Join(tmpDirA, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now test with symbol link + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// G. Symbol link version: +// SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseGFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dirSymlink") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// H. Symbol link following version: +// SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseHFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + symSrcDir := filepath.Join(tmpDirB, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now try with symbol link of dir + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// J. Symbol link following version: +// SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 0000000..2b775b4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 0000000..9e1a58c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,279 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + if options == nil { + options = &TarOptions{} + } + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/docker/docker/pkg/archive/diff_test.go new file mode 100644 index 0000000..8167941 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_test.go @@ -0,0 +1,386 @@ +package archive + +import ( + "archive/tar" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/docker/pkg/ioutils" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeSymLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerWhiteouts(t *testing.T) { + // TODO Windows: Figure out why this test fails + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + + wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") + if err != nil { + return + } + defer os.RemoveAll(wd) + + base := []string{ + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "baz", + "foo/", + "foo/.abc", + "foo/.bcd/", + "foo/.bcd/a", + "foo/cde/", + "foo/cde/def", + "foo/cde/efg", + "foo/fgh", + "foobar", + } + + type tcase struct { + change, expected []string + } + + tcases := []tcase{ + { + base, + base, + }, + { + []string{ + ".bay", + ".wh.baz", + "foo/", + "foo/.bce", + "foo/.wh..wh..opq", + "foo/cde/", + "foo/cde/efg", + }, + []string{ + ".bay", + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.bce", + "foo/cde/", + "foo/cde/efg", + "foobar", + }, + }, + { + []string{ + ".bay", + ".wh..baz", + ".wh.foobar", + "foo/", + "foo/.abc", + "foo/.wh.cde", + "bar/", + }, + []string{ + ".bay", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.abc", + "foo/.bce", + }, + }, + { + []string{ + ".abc", + ".wh..wh..opq", + "foobar", + }, + []string{ + ".abc", + "foobar", + }, + }, + } + + for i, tc := range tcases { + l, err := makeTestLayer(tc.change) + if err != nil { + t.Fatal(err) + } + + _, err = UnpackLayer(wd, l, nil) + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + + paths, err := readDirContents(wd) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, paths) { + t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) + } + } + +} + +func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { + tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + for _, p := range paths { + if p[len(p)-1] == filepath.Separator { + if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { + return + } + } else { + if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { + return + } + } + } + archive, err := Tar(tmpDir, Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + os.RemoveAll(tmpDir) + return err + }), nil +} + +func readDirContents(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + if info.IsDir() { + rel = rel + "/" + } + files = append(files, rel) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 0000000..cedd46a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar b/vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000000000000000000000000000000000..8f10ea6b87d3eb4fed572349dfe87695603b10a5 GIT binary patch literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 0000000..3448569 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 0000000..e85aac0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/docker/docker/pkg/archive/utils_test.go new file mode 100644 index 0000000..01b9e92 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, r) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 0000000..d20478a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 0000000..b39d12c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go new file mode 100644 index 0000000..46ab366 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go new file mode 100644 index 0000000..05c75f1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/api.go @@ -0,0 +1,88 @@ +package authorization + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" +) + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// PeerCertificate is a wrapper around x509.Certificate which provides a sane +// enconding/decoding to/from PEM format and JSON. +type PeerCertificate x509.Certificate + +// MarshalJSON returns the JSON encoded pem bytes of a PeerCertificate. +func (pc *PeerCertificate) MarshalJSON() ([]byte, error) { + b := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: pc.Raw}) + return json.Marshal(b) +} + +// UnmarshalJSON populates a new PeerCertificate struct from JSON data. +func (pc *PeerCertificate) UnmarshalJSON(b []byte) error { + var buf []byte + if err := json.Unmarshal(b, &buf); err != nil { + return err + } + derBytes, _ := pem.Decode(buf) + c, err := x509.ParseCertificate(derBytes.Bytes) + if err != nil { + return err + } + *pc = PeerCertificate(*c) + return nil +} + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // RequestPeerCertificates stores the request's TLS peer certificates in PEM format + RequestPeerCertificates []*PeerCertificate `json:"RequestPeerCertificates,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go new file mode 100644 index 0000000..dc9a9ae --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/authz.go @@ -0,0 +1,186 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/engine/reference/api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + if r.TLS != nil { + for _, c := range r.TLS.PeerCertificates { + pc := PeerCertificate(*c) + ctx.authReq.RequestPeerCertificates = append(ctx.authReq.RequestPeerCertificates, &pc) + } + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if its length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} + +// authorizationError represents an authorization deny error +type authorizationError struct { + error +} + +// HTTPErrorStatusCode returns the authorization error status code (forbidden) +func (e authorizationError) HTTPErrorStatusCode() int { + return http.StatusForbidden +} + +func newAuthorizationError(plugin, msg string) authorizationError { + return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go new file mode 100644 index 0000000..a787f3c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go @@ -0,0 +1,282 @@ +// +build !windows + +// TODO Windows: This uses a Unix socket for testing. This might be possible +// to port to Windows using a named pipe instead. + +package authorization + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" + "github.com/gorilla/mux" +) + +const ( + pluginAddress = "authz-test-plugin.sock" +) + +func TestAuthZRequestPluginError(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Err: "an error", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZRequestPlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZResponsePlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestURI: "someting.com/auth", + RequestBody: []byte("sample body"), + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZResponse(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestResponseModifier(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + m.FlushAll() + if r.Header().Get("h1") != "v1" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusInternalServerError { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +func TestDrainBody(t *testing.T) { + tests := []struct { + length int // length is the message length send to drainBody + expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called + }{ + {10, 10}, // Small message size + {maxBodySize - 1, maxBodySize - 1}, // Max message size + {maxBodySize * 2, 0}, // Large message size (skip copying body) + + } + + for _, test := range tests { + msg := strings.Repeat("a", test.length) + body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg)))) + if err != nil { + t.Fatal(err) + } + if len(body) != test.expectedBodyLength { + t.Fatalf("Body must be copied, actual length: '%d'", len(body)) + } + if closer == nil { + t.Fatal("Closer must not be nil") + } + modified, err := ioutil.ReadAll(closer) + if err != nil { + t.Fatalf("Error must not be nil: '%v'", err) + } + if len(modified) != len(msg) { + t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified)) + } + } +} + +func TestResponseModifierOverride(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + overrideHeader := make(http.Header) + overrideHeader.Add("h1", "v2") + overrideHeaderBytes, err := json.Marshal(overrideHeader) + if err != nil { + t.Fatalf("override header failed %v", err) + } + + m.OverrideHeader(overrideHeaderBytes) + m.OverrideBody([]byte("override body")) + m.OverrideStatusCode(http.StatusNotFound) + m.FlushAll() + if r.Header().Get("h1") != "v2" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusNotFound { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +// createTestPlugin creates a new sample authorization plugin +func createTestPlugin(t *testing.T) *authorizationPlugin { + pwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatalf("Failed to create client %v", err) + } + + return &authorizationPlugin{name: "plugin", plugin: client} +} + +// AuthZPluginTestServer is a simple server that implements the authZ plugin interface +type authZPluginTestServer struct { + listener net.Listener + t *testing.T + // request stores the request sent from the daemon to the plugin + recordedRequest Request + // response stores the response sent from the plugin to the daemon + replayResponse Response + server *httptest.Server +} + +// start starts the test server that implements the plugin +func (t *authZPluginTestServer) start() { + r := mux.NewRouter() + l, err := net.Listen("unix", pluginAddress) + if err != nil { + t.t.Fatal(err) + } + t.listener = l + r.HandleFunc("/Plugin.Activate", t.activate) + r.HandleFunc("/"+AuthZApiRequest, t.auth) + r.HandleFunc("/"+AuthZApiResponse, t.auth) + t.server = &httptest.Server{ + Listener: l, + Config: &http.Server{ + Handler: r, + Addr: pluginAddress, + }, + } + t.server.Start() +} + +// stop stops the test server that implements the plugin +func (t *authZPluginTestServer) stop() { + t.server.Close() + os.Remove(pluginAddress) + if t.listener != nil { + t.listener.Close() + } +} + +// auth is a used to record/replay the authentication api messages +func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { + t.recordedRequest = Request{} + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.t.Fatal(err) + } + r.Body.Close() + json.Unmarshal(body, &t.recordedRequest) + b, err := json.Marshal(t.replayResponse) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} + +func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware.go b/vendor/github.com/docker/docker/pkg/authorization/middleware.go new file mode 100644 index 0000000..52890dd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/middleware.go @@ -0,0 +1,84 @@ +package authorization + +import ( + "net/http" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "golang.org/x/net/context" +) + +// Middleware uses a list of plugins to +// handle authorization in the API requests. +type Middleware struct { + mu sync.Mutex + plugins []Plugin +} + +// NewMiddleware creates a new Middleware +// with a slice of plugins names. +func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { + SetPluginGetter(pg) + return &Middleware{ + plugins: newPlugins(names), + } +} + +// SetPlugins sets the plugin used for authorization +func (m *Middleware) SetPlugins(names []string) { + m.mu.Lock() + m.plugins = newPlugins(names) + m.mu.Unlock() +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + + m.mu.Lock() + plugins := m.plugins + m.mu.Unlock() + if len(plugins) == 0 { + return handler(ctx, w, r, vars) + } + + user := "" + userAuthNMethod := "" + + // Default authorization using existing TLS connection credentials + // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support + // and ldap) will be extracted using AuthN feature, which is tracked under: + // https://github.com/docker/docker/pull/20883 + if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { + user = r.TLS.PeerCertificates[0].Subject.CommonName + userAuthNMethod = "TLS" + } + + authCtx := NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := NewResponseModifier(w) + + var errD error + + if errD = handler(ctx, rw, r, vars); errD != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) + } + + if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if errD != nil { + return errD + } + + return nil + } +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go new file mode 100644 index 0000000..4b1c71b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/plugin.go @@ -0,0 +1,112 @@ +package authorization + +import ( + "sync" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorizes the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorizes the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// newPlugins constructs and initializes the authorization plugins based on plugin names +func newPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +var getter plugingetter.PluginGetter + +// SetPluginGetter sets the plugingetter +func SetPluginGetter(pg plugingetter.PluginGetter) { + getter = pg +} + +// GetPluginGetter gets the plugingetter +func GetPluginGetter() plugingetter.PluginGetter { + return getter +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Client + name string + once sync.Once +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initializes the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + var err error + a.once.Do(func() { + if a.plugin == nil { + var plugin plugingetter.CompatPlugin + var e error + + if pg := GetPluginGetter(); pg != nil { + plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.LOOKUP) + } else { + plugin, e = plugins.Get(a.name, AuthZApiImplements) + } + if e != nil { + err = e + return + } + a.plugin = plugin.Client() + } + }) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go new file mode 100644 index 0000000..129bf2f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net" + "net/http" + + "github.com/Sirupsen/logrus" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replaces the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replaces the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // FlushAll flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// StatusCode returns the http status code +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// OverrideBody replaces the body of the HTTP response +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +// OverrideStatusCode replaces the status code of the HTTP response +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// OverrideHeader replaces the headers of the HTTP response +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Error("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Error("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + // Copy the status code + // Also WriteHeader needs to be done after all the headers + // have been copied (above). + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go new file mode 100644 index 0000000..784d65d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go new file mode 100644 index 0000000..9f8e72b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go @@ -0,0 +1,162 @@ +package broadcaster + +import ( + "bytes" + "errors" + "strings" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestUnbuffered(t *testing.T) { + writer := new(Unbuffered) + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.Add(bufferA) + bufferB := &dummyWriter{} + writer.Add(bufferB) + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.Add(bufferC) + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test4: Test eviction on multiple simultaneous failures + bufferB.failOnWrite = true + bufferC.failOnWrite = true + bufferD := &dummyWriter{} + writer.Add(bufferD) + writer.Write([]byte("yo")) + writer.Write([]byte("ink")) + if strings.Contains(bufferB.String(), "yoink") { + t.Errorf("bufferB received write. contents: %q", bufferB) + } + if strings.Contains(bufferC.String(), "yoink") { + t.Errorf("bufferC received write. contents: %q", bufferC) + } + if g, w := bufferD.String(), "yoink"; g != w { + t.Errorf("bufferD = %q, want %q", g, w) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceUnbuffered(t *testing.T) { + writer := new(Unbuffered) + c := make(chan bool) + go func() { + writer.Add(devNullCloser(0)) + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkUnbuffered(b *testing.B) { + writer := new(Unbuffered) + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 0000000..a7814f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go new file mode 100644 index 0000000..d2d7e62 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go @@ -0,0 +1,394 @@ +package chrootarchive + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +func init() { + reexec.Init() +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} + +// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of +// local images) +func TestChrootUntarWithHugeExcludesList(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + options := &archive.TarOptions{} + //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow + //on most systems when passed via environment or command line arguments + excludes := make([]string, 65534, 65534) + for i := 0; i < 65534; i++ { + excludes[i] = strings.Repeat(string(i), 64) + } + options.ExcludePatterns = excludes + if err := Untar(stream, dest, options); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarEmptyArchive(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := Untar(nil, tmpdir, nil); err == nil { + t.Fatal("expected error on empty archive") + } +} + +func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeSymLinks { + if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func getHash(filename string) (uint32, error) { + stream, err := ioutil.ReadFile(filename) + if err != nil { + return 0, err + } + hash := crc32.NewIEEE() + hash.Write(stream) + return hash.Sum32(), nil +} + +func compareDirectories(src string, dest string) error { + changes, err := archive.ChangesDirs(dest, src) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("Unexpected differences after untar: %v", changes) + } + return nil +} + +func compareFiles(src string, dest string) error { + srcHash, err := getHash(src) + if err != nil { + return err + } + destHash, err := getHash(dest) + if err != nil { + return err + } + if srcHash != destHash { + return fmt.Errorf("%s is different from %s", src, dest) + } + return nil +} + +func TestChrootTarUntarWithSymlink(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := TarUntar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyWithTar(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("Failing on Windows and Solaris") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyWithTar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyFileWithTar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyFileWithTar(src, dest); err == nil { + t.Fatal("Expected error on copying directory") + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyFileWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarPath(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + // Untar a directory + if err := UntarPath(src, dest); err == nil { + t.Fatal("Expected error on untaring a directory") + } + + // Untar a tar file + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + tarfile := filepath.Join(tmpdir, "src.tar") + if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + if err := UntarPath(tarfile, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyDotDotFile(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 0000000..f2325ab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 0000000..0a500ed --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 0000000..f9d7fed --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,108 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := syscall.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 0000000..16354bf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "syscall" + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 0000000..49acad7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 0000000..eb0aacc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 0000000..9dd9988 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + } + + return s, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 0000000..4f637f1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 0000000..fa17c9b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go new file mode 100644 index 0000000..94b5530 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go @@ -0,0 +1,828 @@ +// +build linux + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// DevmapperLogger defines methods for logging with devicemapper. +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + return ErrUdevWait + } + return nil +} + +// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger + +// LogInit initializes the logger for the device mapper library. +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(&cookie) + + dmSawBusy = false // reset before the task is run + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + var cookie uint + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all follwing calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(&cookie) + + if err = task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + var cookie uint + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go new file mode 100644 index 0000000..8477e36 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,35 @@ +// +build linux + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// DevmapperLogCallback exports the devmapper log callback for cgo. +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 0000000..91fbc85 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,251 @@ +// +build linux + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 0000000..dc361ea --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,34 @@ +// +build linux,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +// LibraryDeferredRemovalSupport is supported when statically linked. +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 0000000..8249ccf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport is not supported when statically linked. +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go new file mode 100644 index 0000000..581b57e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go @@ -0,0 +1,27 @@ +// +build linux + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/log.go b/vendor/github.com/docker/docker/pkg/devicemapper/log.go new file mode 100644 index 0000000..cee5e54 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/docker/docker/pkg/directory/directory.go b/vendor/github.com/docker/docker/pkg/directory/directory.go new file mode 100644 index 0000000..1715ef4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_test.go b/vendor/github.com/docker/docker/pkg/directory/directory_test.go new file mode 100644 index 0000000..2b7a465 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_test.go @@ -0,0 +1,192 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "testing" +) + +// Size of an empty directory should be 0 +func TestSizeEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("empty directory has size: %d", size) + } +} + +// Size of a directory with one empty file should be 0 +func TestSizeEmptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + var size int64 + if size, _ = Size(file.Name()); size != 0 { + t.Fatalf("directory with one file has size: %d", size) + } +} + +// Size of a directory with one 5-byte file should be 5 +func TestSizeNonemptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{97, 98, 99, 100, 101} + file.Write(d) + + var size int64 + if size, _ = Size(file.Name()); size != 5 { + t.Fatalf("directory with one 5-byte file has size: %d", size) + } +} + +// Size of a directory with one empty directory should be 0 +func TestSizeNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("directory with one empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 empty directory +func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{100, 111, 99, 107, 101, 114} + file.Write(d) + + var size int64 + if size, _ = Size(dir); size != 6 { + t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 non-empty directory +func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { + var dir, dirNested string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + data := []byte{100, 111, 99, 107, 101, 114} + file.Write(data) + + var nestedFile *os.File + if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { + t.Fatalf("failed to create file in nested directory: %s", err) + } + + nestedData := []byte{100, 111, 99, 107, 101, 114} + nestedFile.Write(nestedData) + + var size int64 + if size, _ = Size(dir); size != 12 { + t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) + } +} + +// Test migration of directory to a subdir underneath itself +func TestMoveToSubdir(t *testing.T) { + var outerDir, subDir string + var err error + + if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { + t.Fatalf("failed to create directory: %v", err) + } + + if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { + t.Fatalf("failed to create subdirectory: %v", err) + } + + // write 4 temp files in the outer dir to get moved + filesList := []string{"a", "b", "c", "d"} + for _, fName := range filesList { + if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { + t.Fatalf("couldn't create temp file %q: %v", fName, err) + } else { + file.WriteString(fName) + file.Close() + } + } + + if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { + t.Fatalf("Error during migration of content to subdirectory: %v", err) + } + // validate that the files were moved to the subdirectory + infos, err := ioutil.ReadDir(subDir) + if err != nil { + t.Fatal(err) + } + if len(infos) != 4 { + t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) + } + var results []string + for _, info := range infos { + results = append(results, info.Name()) + } + sort.Sort(sort.StringSlice(results)) + if !reflect.DeepEqual(filesList, results) { + t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) + } +} + +// Test a non-existing directory +func TestSizeNonExistingDirectory(t *testing.T) { + if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { + t.Fatalf("error is expected") + } +} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go new file mode 100644 index 0000000..397251b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go new file mode 100644 index 0000000..6fb0917 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/README.md b/vendor/github.com/docker/docker/pkg/discovery/README.md new file mode 100644 index 0000000..39777c2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/README.md @@ -0,0 +1,41 @@ +--- +page_title: Docker discovery +page_description: discovery +page_keywords: docker, clustering, discovery +--- + +# Discovery + +Docker comes with multiple Discovery backends. + +## Backends + +### Using etcd + +Point your Docker Engine instances to a common etcd instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/ +``` + +### Using consul + +Point your Docker Engine instances to a common Consul instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store consul:/// +``` + +### Using zookeeper + +Point your Docker Engine instances to a common Zookeeper instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store zk://,/ +``` diff --git a/vendor/github.com/docker/docker/pkg/discovery/backends.go b/vendor/github.com/docker/docker/pkg/discovery/backends.go new file mode 100644 index 0000000..2eab550 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/backends.go @@ -0,0 +1,107 @@ +package discovery + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" +) + +var ( + // Backends is a global map of discovery backends indexed by their + // associated scheme. + backends = make(map[string]Backend) +) + +// Register makes a discovery backend available by the provided scheme. +// If Register is called twice with the same scheme an error is returned. +func Register(scheme string, d Backend) error { + if _, exists := backends[scheme]; exists { + return fmt.Errorf("scheme already registered %s", scheme) + } + logrus.WithField("name", scheme).Debugf("Registering discovery service") + backends[scheme] = d + return nil +} + +func parse(rawurl string) (string, string) { + parts := strings.SplitN(rawurl, "://", 2) + + // nodes:port,node2:port => nodes://node1:port,node2:port + if len(parts) == 1 { + return "nodes", parts[0] + } + return parts[0], parts[1] +} + +// ParseAdvertise parses the --cluster-advertise daemon config which accepts +// : or : +func ParseAdvertise(advertise string) (string, error) { + var ( + iface *net.Interface + addrs []net.Addr + err error + ) + + addr, port, err := net.SplitHostPort(advertise) + + if err != nil { + return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) + } + + ip := net.ParseIP(addr) + // If it is a valid ip-address, use it as is + if ip != nil { + return advertise, nil + } + + // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise + ifaceName := addr + iface, err = net.InterfaceByName(ifaceName) + if err != nil { + return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) + } + + addrs, err = iface.Addrs() + if err != nil { + return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) + } + + if len(addrs) == 0 { + return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) + } + + addr = "" + for _, a := range addrs { + ip, _, err := net.ParseCIDR(a.String()) + if err != nil { + return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) + } + if ip.To4() == nil || ip.IsLoopback() { + continue + } + addr = ip.String() + break + } + if addr == "" { + return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) + } + + addr = net.JoinHostPort(addr, port) + return addr, nil +} + +// New returns a new Discovery given a URL, heartbeat and ttl settings. +// Returns an error if the URL scheme is not supported. +func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { + scheme, uri := parse(rawurl) + if backend, exists := backends[scheme]; exists { + logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") + err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) + return backend, err + } + + return nil, ErrNotSupported +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery.go b/vendor/github.com/docker/docker/pkg/discovery/discovery.go new file mode 100644 index 0000000..ca7f587 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/discovery.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "errors" + "time" +) + +var ( + // ErrNotSupported is returned when a discovery service is not supported. + ErrNotSupported = errors.New("discovery service not supported") + + // ErrNotImplemented is returned when discovery feature is not implemented + // by discovery backend. + ErrNotImplemented = errors.New("not implemented in this discovery service") +) + +// Watcher provides watching over a cluster for nodes joining and leaving. +type Watcher interface { + // Watch the discovery for entry changes. + // Returns a channel that will receive changes or an error. + // Providing a non-nil stopCh can be used to stop watching. + Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) +} + +// Backend is implemented by discovery backends which manage cluster entries. +type Backend interface { + // Watcher must be provided by every backend. + Watcher + + // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. + Initialize(string, time.Duration, time.Duration, map[string]string) error + + // Register to the discovery. + Register(string) error +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go new file mode 100644 index 0000000..6084f3e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go @@ -0,0 +1,137 @@ +package discovery + +import ( + "testing" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestNewEntry(c *check.C) { + entry, err := NewEntry("127.0.0.1:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") + + entry, err = NewEntry("[2001:db8:0:f101::2]:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375") + + _, err = NewEntry("127.0.0.1") + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestParse(c *check.C) { + scheme, uri := parse("127.0.0.1:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("localhost:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("scheme://127.0.0.1:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("scheme://localhost:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "") +} + +func (s *DiscoverySuite) TestCreateEntries(c *check.C) { + entries, err := CreateEntries(nil) + c.Assert(entries, check.DeepEquals, Entries{}) + c.Assert(err, check.IsNil) + + entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) + c.Assert(err, check.IsNil) + expected := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, + } + c.Assert(entries.Equals(expected), check.Equals, true) + + _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestContainsEntry(c *check.C) { + entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) + c.Assert(err, check.IsNil) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) +} + +func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { + entries := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + } + + // Same + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + }), check. + Equals, true) + + // Different size + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "127.0.0.3", Port: "2375"}, + }), check. + Equals, false) + + // Different content + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.42", Port: "2375"}, + }), check. + Equals, false) + +} + +func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { + entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} + entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} + entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} + entries := Entries{entry1, entry2} + + // No diff + added, removed := entries.Diff(Entries{entry2, entry1}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 0) + + // Add + added, removed = entries.Diff(Entries{entry2, entry3, entry1}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 0) + + // Remove + added, removed = entries.Diff(Entries{entry2}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry1), check.Equals, true) + + // Add and remove + added, removed = entries.Diff(Entries{entry1, entry3}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry2), check.Equals, true) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/entry.go b/vendor/github.com/docker/docker/pkg/discovery/entry.go new file mode 100644 index 0000000..ce23bbf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/entry.go @@ -0,0 +1,94 @@ +package discovery + +import "net" + +// NewEntry creates a new entry. +func NewEntry(url string) (*Entry, error) { + host, port, err := net.SplitHostPort(url) + if err != nil { + return nil, err + } + return &Entry{host, port}, nil +} + +// An Entry represents a host. +type Entry struct { + Host string + Port string +} + +// Equals returns true if cmp contains the same data. +func (e *Entry) Equals(cmp *Entry) bool { + return e.Host == cmp.Host && e.Port == cmp.Port +} + +// String returns the string form of an entry. +func (e *Entry) String() string { + return net.JoinHostPort(e.Host, e.Port) +} + +// Entries is a list of *Entry with some helpers. +type Entries []*Entry + +// Equals returns true if cmp contains the same data. +func (e Entries) Equals(cmp Entries) bool { + // Check if the file has really changed. + if len(e) != len(cmp) { + return false + } + for i := range e { + if !e[i].Equals(cmp[i]) { + return false + } + } + return true +} + +// Contains returns true if the Entries contain a given Entry. +func (e Entries) Contains(entry *Entry) bool { + for _, curr := range e { + if curr.Equals(entry) { + return true + } + } + return false +} + +// Diff compares two entries and returns the added and removed entries. +func (e Entries) Diff(cmp Entries) (Entries, Entries) { + added := Entries{} + for _, entry := range cmp { + if !e.Contains(entry) { + added = append(added, entry) + } + } + + removed := Entries{} + for _, entry := range e { + if !cmp.Contains(entry) { + removed = append(removed, entry) + } + } + + return added, removed +} + +// CreateEntries returns an array of entries based on the given addresses. +func CreateEntries(addrs []string) (Entries, error) { + entries := Entries{} + if addrs == nil { + return entries, nil + } + + for _, addr := range addrs { + if len(addr) == 0 { + continue + } + entry, err := NewEntry(addr) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file.go b/vendor/github.com/docker/docker/pkg/discovery/file/file.go new file mode 100644 index 0000000..2b8e27b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/file/file.go @@ -0,0 +1,107 @@ +package file + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + heartbeat time.Duration + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("file", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { + s.path = path + s.heartbeat = heartbeat + return nil +} + +func parseFileContent(content []byte) []string { + var result []string + for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { + line = strings.TrimSpace(line) + // Ignoring line starts with # + if strings.HasPrefix(line, "#") { + continue + } + // Inlined # comment also ignored. + if strings.Contains(line, "#") { + line = line[0:strings.Index(line, "#")] + // Trim additional spaces caused by above stripping. + line = strings.TrimSpace(line) + } + result = append(result, discovery.Generate(line)...) + } + return result +} + +func (s *Discovery) fetch() (discovery.Entries, error) { + fileContent, err := ioutil.ReadFile(s.path) + if err != nil { + return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) + } + return discovery.CreateEntries(parseFileContent(fileContent)) +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + currentEntries, err := s.fetch() + if err != nil { + errCh <- err + } else { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + newEntries, err := s.fetch() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go new file mode 100644 index 0000000..667f00b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go @@ -0,0 +1,114 @@ +package file + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("/path/to/file", 1000, 0, nil) + c.Assert(d.path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestNew(c *check.C) { + d, err := discovery.New("file:///path/to/file", 0, 0, nil) + c.Assert(err, check.IsNil) + c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestContent(c *check.C) { + data := ` +1.1.1.[1:2]:1111 +2.2.2.[2:4]:2222 +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 5) + c.Assert(ips[0], check.Equals, "1.1.1.1:1111") + c.Assert(ips[1], check.Equals, "1.1.1.2:1111") + c.Assert(ips[2], check.Equals, "2.2.2.2:2222") + c.Assert(ips[3], check.Equals, "2.2.2.3:2222") + c.Assert(ips[4], check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + discovery := &Discovery{path: "/path/to/file"} + c.Assert(discovery.Register("0.0.0.0"), check.NotNil) +} + +func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { + data := ` +### test ### +1.1.1.1:1111 # inline comment +# 2.2.2.2:2222 + ### empty line with comment + 3.3.3.3:3333 +### test ### +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 2) + c.Assert("1.1.1.1:1111", check.Equals, ips[0]) + c.Assert("3.3.3.3:3333", check.Equals, ips[1]) +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + data := ` +1.1.1.1:1111 +2.2.2.2:2222 +` + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + // Create a temporary file and remove it. + tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") + c.Assert(err, check.IsNil) + c.Assert(tmp.Close(), check.IsNil) + c.Assert(os.Remove(tmp.Name()), check.IsNil) + + // Set up file discovery. + d := &Discovery{} + d.Initialize(tmp.Name(), 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // Make sure it fires errors since the file doesn't exist. + c.Assert(<-errCh, check.NotNil) + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Write the file and make sure we get the expected value back. + c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry and look it up. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + _, err = f.WriteString("\n3.3.3.3:3333\n") + c.Assert(err, check.IsNil) + f.Close() + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator.go b/vendor/github.com/docker/docker/pkg/discovery/generator.go new file mode 100644 index 0000000..d222982 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/generator.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "fmt" + "regexp" + "strconv" +) + +// Generate takes care of IP generation +func Generate(pattern string) []string { + re, _ := regexp.Compile(`\[(.+):(.+)\]`) + submatch := re.FindStringSubmatch(pattern) + if submatch == nil { + return []string{pattern} + } + + from, err := strconv.Atoi(submatch[1]) + if err != nil { + return []string{pattern} + } + to, err := strconv.Atoi(submatch[2]) + if err != nil { + return []string{pattern} + } + + template := re.ReplaceAllString(pattern, "%d") + + var result []string + for val := from; val <= to; val++ { + entry := fmt.Sprintf(template, val) + result = append(result, entry) + } + + return result +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator_test.go b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go new file mode 100644 index 0000000..6281c46 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go @@ -0,0 +1,53 @@ +package discovery + +import ( + "github.com/go-check/check" +) + +func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { + ips := Generate("127.0.0.1") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1") +} + +func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { + ips := Generate("127.0.0.1:8080") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1:8080") +} + +func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { + ips := Generate("127.0.0.[1]") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.[1]") +} + +func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { + ips := Generate("127.0.0.[1:11]:2375") + c.Assert(len(ips), check.Equals, 11) + c.Assert(ips[0], check.Equals, "127.0.0.1:2375") + c.Assert(ips[1], check.Equals, "127.0.0.2:2375") + c.Assert(ips[2], check.Equals, "127.0.0.3:2375") + c.Assert(ips[3], check.Equals, "127.0.0.4:2375") + c.Assert(ips[4], check.Equals, "127.0.0.5:2375") + c.Assert(ips[5], check.Equals, "127.0.0.6:2375") + c.Assert(ips[6], check.Equals, "127.0.0.7:2375") + c.Assert(ips[7], check.Equals, "127.0.0.8:2375") + c.Assert(ips[8], check.Equals, "127.0.0.9:2375") + c.Assert(ips[9], check.Equals, "127.0.0.10:2375") + c.Assert(ips[10], check.Equals, "127.0.0.11:2375") +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { + malformedInput := "127.0.0.[x:11]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { + malformedInput := "127.0.0.[1:x]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go new file mode 100644 index 0000000..77eee7d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go @@ -0,0 +1,192 @@ +package kv + +import ( + "fmt" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/store/consul" + "github.com/docker/libkv/store/etcd" + "github.com/docker/libkv/store/zookeeper" +) + +const ( + defaultDiscoveryPath = "docker/nodes" +) + +// Discovery is exported +type Discovery struct { + backend store.Backend + store store.Store + heartbeat time.Duration + ttl time.Duration + prefix string + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + // Register to libkv + zookeeper.Register() + consul.Register() + etcd.Register() + + // Register to internal discovery service + discovery.Register("zk", &Discovery{backend: store.ZK}) + discovery.Register("consul", &Discovery{backend: store.CONSUL}) + discovery.Register("etcd", &Discovery{backend: store.ETCD}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { + var ( + parts = strings.SplitN(uris, "/", 2) + addrs = strings.Split(parts[0], ",") + err error + ) + + // A custom prefix to the path can be optionally used. + if len(parts) == 2 { + s.prefix = parts[1] + } + + s.heartbeat = heartbeat + s.ttl = ttl + + // Use a custom path if specified in discovery options + dpath := defaultDiscoveryPath + if clusterOpts["kv.path"] != "" { + dpath = clusterOpts["kv.path"] + } + + s.path = path.Join(s.prefix, dpath) + + var config *store.Config + if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { + logrus.Info("Initializing discovery with TLS") + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }) + if err != nil { + return err + } + config = &store.Config{ + // Set ClientTLS to trigger https (bug in libkv/etcd) + ClientTLS: &store.ClientTLSConfig{ + CACertFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }, + // The actual TLS config that will be used + TLS: tlsConfig, + } + } else { + logrus.Info("Initializing discovery without TLS") + } + + // Creates a new store, will ignore options given + // if not supported by the chosen store + s.store, err = libkv.NewStore(s.backend, addrs, config) + return err +} + +// Watch the store until either there's a store error or we receive a stop request. +// Returns false if we shouldn't attempt watching the store anymore (stop request received). +func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { + for { + select { + case pairs := <-watchCh: + if pairs == nil { + return true + } + + logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) + + // Convert `KVPair` into `discovery.Entry`. + addrs := make([]string, len(pairs)) + for _, pair := range pairs { + addrs = append(addrs, string(pair.Value)) + } + + entries, err := discovery.CreateEntries(addrs) + if err != nil { + errCh <- err + } else { + discoveryCh <- entries + } + case <-stopCh: + // We were requested to stop watching. + return false + } + } +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + + go func() { + defer close(ch) + defer close(errCh) + + // Forever: Create a store watch, watch until we get an error and then try again. + // Will only stop if we receive a stopCh request. + for { + // Create the path to watch if it does not exist yet + exists, err := s.store.Exists(s.path) + if err != nil { + errCh <- err + } + if !exists { + if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { + errCh <- err + } + } + + // Set up a watch. + watchCh, err := s.store.WatchTree(s.path, stopCh) + if err != nil { + errCh <- err + } else { + if !s.watchOnce(stopCh, watchCh, ch, errCh) { + return + } + } + + // If we get here it means the store watch channel was closed. This + // is unexpected so let's retry later. + errCh <- fmt.Errorf("Unexpected watch error") + time.Sleep(s.heartbeat) + } + }() + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + opts := &store.WriteOptions{TTL: s.ttl} + return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) +} + +// Store returns the underlying store used by KV discovery. +func (s *Discovery) Store() store.Store { + return s.store +} + +// Prefix returns the store prefix +func (s *Discovery) Prefix() string { + return s.prefix +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go new file mode 100644 index 0000000..dab3939 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go @@ -0,0 +1,324 @@ +package kv + +import ( + "errors" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/docker/docker/pkg/discovery" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (ds *DiscoverySuite) TestInitialize(c *check.C) { + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1"}, + } + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1", 0, 0, nil) + d.store = storeMock + + s := d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") + c.Assert(d.path, check.Equals, defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 3) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") + c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") + + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) +} + +// Extremely limited mock store so we can test initialization +type Mock struct { + // Endpoints passed to InitializeMock + Endpoints []string + + // Options passed to InitializeMock + Options *store.Config +} + +func NewMock(endpoints []string, options *store.Config) (store.Store, error) { + s := &Mock{} + s.Endpoints = endpoints + s.Options = options + return s, nil +} +func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { + return errors.New("Put not supported") +} +func (s *Mock) Get(key string) (*store.KVPair, error) { + return nil, errors.New("Get not supported") +} +func (s *Mock) Delete(key string) error { + return errors.New("Delete not supported") +} + +// Exists mock +func (s *Mock) Exists(key string) (bool, error) { + return false, errors.New("Exists not supported") +} + +// Watch mock +func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, errors.New("Watch not supported") +} + +// WatchTree mock +func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, errors.New("WatchTree not supported") +} + +// NewLock mock +func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, errors.New("NewLock not supported") +} + +// List mock +func (s *Mock) List(prefix string) ([]*store.KVPair, error) { + return nil, errors.New("List not supported") +} + +// DeleteTree mock +func (s *Mock) DeleteTree(prefix string) error { + return errors.New("DeleteTree not supported") +} + +// AtomicPut mock +func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { + return false, nil, errors.New("AtomicPut not supported") +} + +// AtomicDelete mock +func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return false, errors.New("AtomicDelete not supported") +} + +// Close mock +func (s *Mock) Close() { + return +} + +func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { + cert := `-----BEGIN CERTIFICATE----- +MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT +B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD +VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC +O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds ++J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q +V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb +UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 +Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT +V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ +BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j +BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz +7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI +xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M +ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY +8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn +t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX +FpTxDmJHEV4bzUzh +-----END CERTIFICATE----- +` + key := `-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 ++zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR +SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr +pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe +rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj +xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj +i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx +qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO +1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 +5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony +MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 +ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP +L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N +XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT +Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B +LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU +t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ +QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV +xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj +xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc +qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa +V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV +PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk +dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL +BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= +-----END RSA PRIVATE KEY----- +` + certFile, err := ioutil.TempFile("", "cert") + c.Assert(err, check.IsNil) + defer os.Remove(certFile.Name()) + certFile.Write([]byte(cert)) + certFile.Close() + keyFile, err := ioutil.TempFile("", "key") + c.Assert(err, check.IsNil) + defer os.Remove(keyFile.Name()) + keyFile.Write([]byte(key)) + keyFile.Close() + + libkv.AddStore("mock", NewMock) + d := &Discovery{backend: "mock"} + err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ + "kv.cacertfile": certFile.Name(), + "kv.certfile": certFile.Name(), + "kv.keyfile": keyFile.Name(), + }) + c.Assert(err, check.IsNil) + s := d.store.(*Mock) + c.Assert(s.Options.TLS, check.NotNil) + c.Assert(s.Options.TLS.RootCAs, check.NotNil) + c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) +} + +func (ds *DiscoverySuite) TestWatch(c *check.C) { + mockCh := make(chan []*store.KVPair) + + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + mockKVChan: mockCh, + } + + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + kvs := []*store.KVPair{ + {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, + {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, + } + + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // It should fire an error since the first WatchTree call failed. + c.Assert(<-errCh, check.ErrorMatches, "test error") + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Push the entries into the store channel and make sure discovery emits. + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + close(mockCh) + // Give it enough time to call WatchTree. + time.Sleep(3 * time.Second) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} + +// FakeStore implements store.Store methods. It mocks all store +// function in a simple, naive way. +type FakeStore struct { + Endpoints []string + Options *store.Config + mockKVChan <-chan []*store.KVPair + + watchTreeCallCount int +} + +func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { + return nil +} + +func (s *FakeStore) Get(key string) (*store.KVPair, error) { + return nil, nil +} + +func (s *FakeStore) Delete(key string) error { + return nil +} + +func (s *FakeStore) Exists(key string) (bool, error) { + return true, nil +} + +func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, nil +} + +// WatchTree will fail the first time, and return the mockKVchan afterwards. +// This is the behavior we need for testing.. If we need 'moar', should update this. +func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + if s.watchTreeCallCount == 0 { + s.watchTreeCallCount = 1 + return nil, errors.New("test error") + } + // First calls error + return s.mockKVChan, nil +} + +func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, nil +} + +func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { + return []*store.KVPair{}, nil +} + +func (s *FakeStore) DeleteTree(directory string) error { + return nil +} + +func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + return true, nil, nil +} + +func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return true, nil +} + +func (s *FakeStore) Close() { +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go new file mode 100644 index 0000000..ba8b1f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go @@ -0,0 +1,93 @@ +package memory + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery implements a discovery backend that keeps +// data in memory. +type Discovery struct { + heartbeat time.Duration + values []string + mu sync.Mutex +} + +func init() { + Init() +} + +// Init registers the memory backend on demand. +func Init() { + discovery.Register("memory", &Discovery{}) +} + +// Initialize sets the heartbeat for the memory backend. +func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { + s.heartbeat = heartbeat + s.values = make([]string, 0) + return nil +} + +// Watch sends periodic discovery updates to a channel. +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + var currentEntries discovery.Entries + var err error + + s.mu.Lock() + if len(s.values) > 0 { + currentEntries, err = discovery.CreateEntries(s.values) + } + s.mu.Unlock() + + if err != nil { + errCh <- err + } else if currentEntries != nil { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + s.mu.Lock() + newEntries, err := discovery.CreateEntries(s.values) + s.mu.Unlock() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register adds a new address to the discovery. +func (s *Discovery) Register(addr string) error { + s.mu.Lock() + s.values = append(s.values, addr) + s.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go new file mode 100644 index 0000000..c2da0a0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go @@ -0,0 +1,48 @@ +package memory + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type discoverySuite struct{} + +var _ = check.Suite(&discoverySuite{}) + +func (s *discoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("foo", 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + } + + c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + expected = discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go new file mode 100644 index 0000000..c0e3c07 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go @@ -0,0 +1,54 @@ +package nodes + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + entries discovery.Entries +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("nodes", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { + for _, input := range strings.Split(uris, ",") { + for _, ip := range discovery.Generate(input) { + entry, err := discovery.NewEntry(ip) + if err != nil { + return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) + } + s.entries = append(s.entries, entry) + } + } + + return nil +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + go func() { + defer close(ch) + ch <- s.entries + <-stopCh + }() + return ch, nil +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go new file mode 100644 index 0000000..e26568c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go @@ -0,0 +1,51 @@ +package nodes + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 2) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") +} + +func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 5) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") + c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") + c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") + c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + ch, _ := d.Watch(nil) + c.Assert(expected.Equals(<-ch), check.Equals, true) +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + d := &Discovery{} + c.Assert(d.Register("0.0.0.0"), check.NotNil) +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go new file mode 100644 index 0000000..7a81cbd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "github.com/fsnotify/fsnotify" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go new file mode 100644 index 0000000..5d08a99 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "github.com/fsnotify/fsnotify" + +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifer interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// Events returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// Errors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go new file mode 100644 index 0000000..dc5ccd0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/fsnotify/fsnotify" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchPoller is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("poller does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed == true { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed == true { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go new file mode 100644 index 0000000..b4c7825 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go @@ -0,0 +1,119 @@ +package filenotify + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "testing" + "time" + + "github.com/fsnotify/fsnotify" +) + +func TestPollerAddRemove(t *testing.T) { + w := NewPollingWatcher() + + if err := w.Add("no-such-file"); err == nil { + t.Fatal("should have gotten error when adding a non-existent file") + } + if err := w.Remove("no-such-file"); err == nil { + t.Fatal("should have gotten error when removing non-existent watch") + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + if err := w.Remove(f.Name()); err != nil { + t.Fatal(err) + } +} + +func TestPollerEvent(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No chmod on Windows") + } + w := NewPollingWatcher() + + f, err := ioutil.TempFile("", "test-poller") + if err != nil { + t.Fatal("error creating temp file") + } + defer os.RemoveAll(f.Name()) + f.Close() + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + select { + case <-w.Events(): + t.Fatal("got event before anything happened") + case <-w.Errors(): + t.Fatal("got error before anything happened") + default: + } + + if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Write); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(f.Name(), 600); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Chmod); err != nil { + t.Fatal(err) + } + + if err := os.Remove(f.Name()); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Remove); err != nil { + t.Fatal(err) + } +} + +func TestPollerClose(t *testing.T) { + w := NewPollingWatcher() + if err := w.Close(); err != nil { + t.Fatal(err) + } + // test double-close + if err := w.Close(); err != nil { + t.Fatal(err) + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + if err := w.Add(f.Name()); err == nil { + t.Fatal("should have gotten error adding watch for closed watcher") + } +} + +func assertEvent(w FileWatcher, eType fsnotify.Op) error { + var err error + select { + case e := <-w.Events(): + if e.Op != eType { + err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e) + } + case e := <-w.Errors(): + err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) + case <-time.After(watchWaitTime * 3): + err = fmt.Errorf("timeout waiting for event %v", eType) + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 0000000..c63ae75 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,283 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// exclusion returns true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty returns true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on its own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := regexpMatch(pattern, file) + if err != nil { + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), + strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +// +// As per the comment in golangs filepath.Match, on Windows, escaping +// is disabled. Instead, '\\' is treated as path separator. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 0000000..ccd648f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 0000000..0f2cb7a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 0000000..6df1be8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,585 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Test lots of variants of patterns & strings +func TestMatches(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + tests := []struct { + pattern string + text string + pass bool + }{ + {"**", "file", true}, + {"**", "file/", true}, + {"**/", "file", true}, // weird one + {"**/", "file/", true}, + {"**", "/", true}, + {"**/", "/", true}, + {"**", "dir/file", true}, + {"**/", "dir/file", false}, + {"**", "dir/file/", true}, + {"**/", "dir/file/", true}, + {"**/**", "dir/file", true}, + {"**/**", "dir/file/", true}, + {"dir/**", "dir/file", true}, + {"dir/**", "dir/file/", true}, + {"dir/**", "dir/dir2/file", true}, + {"dir/**", "dir/dir2/file/", true}, + {"**/dir2/*", "dir/dir2/file", true}, + {"**/dir2/*", "dir/dir2/file/", false}, + {"**/dir2/**", "dir/dir2/dir3/file", true}, + {"**/dir2/**", "dir/dir2/dir3/file/", true}, + {"**file", "file", true}, + {"**file", "dir/file", true}, + {"**/file", "dir/file", true}, + {"**file", "dir/dir/file", true}, + {"**/file", "dir/dir/file", true}, + {"**/file*", "dir/dir/file", true}, + {"**/file*", "dir/dir/file.txt", true}, + {"**/file*txt", "dir/dir/file.txt", true}, + {"**/file*.txt", "dir/dir/file.txt", true}, + {"**/file*.txt*", "dir/dir/file.txt", true}, + {"**/**/*.txt", "dir/dir/file.txt", true}, + {"**/**/*.txt2", "dir/dir/file.txt", false}, + {"**/*.txt", "file.txt", true}, + {"**/**/*.txt", "file.txt", true}, + {"a**/*.txt", "a/file.txt", true}, + {"a**/*.txt", "a/dir/file.txt", true}, + {"a**/*.txt", "a/dir/dir/file.txt", true}, + {"a/*.txt", "a/dir/file.txt", false}, + {"a/*.txt", "a/file.txt", true}, + {"a/*.txt**", "a/file.txt", true}, + {"a[b-d]e", "ae", false}, + {"a[b-d]e", "ace", true}, + {"a[b-d]e", "aae", false}, + {"a[^b-d]e", "aze", true}, + {".*", ".foo", true}, + {".*", "foo", false}, + {"abc.def", "abcdef", false}, + {"abc.def", "abc.def", true}, + {"abc.def", "abcZdef", false}, + {"abc?def", "abcZdef", true}, + {"abc?def", "abcdef", false}, + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + {"a\\\\", "a\\", true}, + {"**/foo/bar", "foo/bar", true}, + {"**/foo/bar", "dir/foo/bar", true}, + {"**/foo/bar", "dir/dir2/foo/bar", true}, + {"abc/**", "abc", false}, + {"abc/**", "abc/def", true}, + {"abc/**", "abc/def/ghi", true}, + } + + for _, test := range tests { + res, _ := regexpMatch(test.pattern, test.text) + if res != test.pass { + t.Fatalf("Failed: %v - res:%v", test, res) + } + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} + +// These matchTests are stolen from go's filepath Match tests. +type matchTest struct { + pattern, s string + match bool + err error +} + +var matchTests = []matchTest{ + {"abc", "abc", true, nil}, + {"*", "abc", true, nil}, + {"*c", "abc", true, nil}, + {"a*", "a", true, nil}, + {"a*", "abc", true, nil}, + {"a*", "ab/c", false, nil}, + {"a*/b", "abc/b", true, nil}, + {"a*/b", "a/c/b", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, + {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, + {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, + {"ab[c]", "abc", true, nil}, + {"ab[b-d]", "abc", true, nil}, + {"ab[e-g]", "abc", false, nil}, + {"ab[^c]", "abc", false, nil}, + {"ab[^b-d]", "abc", false, nil}, + {"ab[^e-g]", "abc", true, nil}, + {"a\\*b", "a*b", true, nil}, + {"a\\*b", "ab", false, nil}, + {"a?b", "a☺b", true, nil}, + {"a[^a]b", "a☺b", true, nil}, + {"a???b", "a☺b", false, nil}, + {"a[^a][^a][^a]b", "a☺b", false, nil}, + {"[a-ζ]*", "α", true, nil}, + {"*[a-ζ]", "A", false, nil}, + {"a?b", "a/b", false, nil}, + {"a*b", "a/b", false, nil}, + {"[\\]a]", "]", true, nil}, + {"[\\-]", "-", true, nil}, + {"[x\\-]", "x", true, nil}, + {"[x\\-]", "-", true, nil}, + {"[x\\-]", "z", false, nil}, + {"[\\-x]", "x", true, nil}, + {"[\\-x]", "-", true, nil}, + {"[\\-x]", "a", false, nil}, + {"[]a]", "]", false, filepath.ErrBadPattern}, + {"[-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "x", false, filepath.ErrBadPattern}, + {"[x-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "z", false, filepath.ErrBadPattern}, + {"[-x]", "x", false, filepath.ErrBadPattern}, + {"[-x]", "-", false, filepath.ErrBadPattern}, + {"[-x]", "a", false, filepath.ErrBadPattern}, + {"\\", "a", false, filepath.ErrBadPattern}, + {"[a-b-c]", "a", false, filepath.ErrBadPattern}, + {"[", "a", false, filepath.ErrBadPattern}, + {"[^", "a", false, filepath.ErrBadPattern}, + {"[^bc", "a", false, filepath.ErrBadPattern}, + {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong + {"a[", "ab", false, filepath.ErrBadPattern}, + {"*x", "xxx", true, nil}, +} + +func errp(e error) string { + if e == nil { + return "" + } + return e.Error() +} + +// TestMatch test's our version of filepath.Match, called regexpMatch. +func TestMatch(t *testing.T) { + for _, tt := range matchTests { + pattern := tt.pattern + s := tt.s + if runtime.GOOS == "windows" { + if strings.Index(pattern, "\\") >= 0 { + // no escape allowed on windows. + continue + } + pattern = filepath.Clean(pattern) + s = filepath.Clean(s) + } + ok, err := regexpMatch(pattern, s) + if ok != tt.match || err != tt.err { + t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 0000000..d5c3abf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 0000000..5ec21ca --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go new file mode 100644 index 0000000..9fd054e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,89 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + if err = dummyFile.Close(); err != nil { + return name, err + } + return name, nil +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go new file mode 100644 index 0000000..4a64823 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go @@ -0,0 +1,91 @@ +// +build linux + +package fsutils + +import ( + "io/ioutil" + "os" + "os/exec" + "syscall" + "testing" +) + +func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg ...string) { + // check whether mkfs is installed + if _, err := exec.LookPath(mkfsCommand); err != nil { + t.Skipf("%s not installed: %v", mkfsCommand, err) + } + + // create a sparse image + imageSize := int64(32 * 1024 * 1024) + imageFile, err := ioutil.TempFile("", "fsutils-image") + if err != nil { + t.Fatal(err) + } + imageFileName := imageFile.Name() + defer os.Remove(imageFileName) + if _, err = imageFile.Seek(imageSize-1, 0); err != nil { + t.Fatal(err) + } + if _, err = imageFile.Write([]byte{0}); err != nil { + t.Fatal(err) + } + if err = imageFile.Close(); err != nil { + t.Fatal(err) + } + + // create a mountpoint + mountpoint, err := ioutil.TempDir("", "fsutils-mountpoint") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(mountpoint) + + // format the image + args := append(mkfsArg, imageFileName) + t.Logf("Executing `%s %v`", mkfsCommand, args) + out, err := exec.Command(mkfsCommand, args...).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Fatal(err) + } + + // loopback-mount the image. + // for ease of setting up loopback device, we use os/exec rather than syscall.Mount + out, err = exec.Command("mount", "-o", "loop", imageFileName, mountpoint).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Skip("skipping the test because mount failed") + } + defer func() { + if err := syscall.Unmount(mountpoint, 0); err != nil { + t.Fatal(err) + } + }() + + // check whether it supports d_type + result, err := SupportsDType(mountpoint) + if err != nil { + t.Fatal(err) + } + t.Logf("Supports d_type: %v", result) + if result != expected { + t.Fatalf("expected %v, got %v", expected, result) + } +} + +func TestSupportsDTypeWithFType0XFS(t *testing.T) { + testSupportsDType(t, false, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=0") +} + +func TestSupportsDTypeWithFType1XFS(t *testing.T) { + testSupportsDType(t, true, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=1") +} + +func TestSupportsDTypeWithExt4(t *testing.T) { + testSupportsDType(t, true, "mkfs.ext4") +} diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go new file mode 100644 index 0000000..ded091f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go @@ -0,0 +1,100 @@ +package gitutils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" +) + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + if !urlutil.IsGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + u, err := url.Parse(remoteURL) + if err != nil { + return "", err + } + + fragment := u.Fragment + clone := cloneArgs(u, root) + + if output, err := git(clone...); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + + return checkoutGit(fragment, root) +} + +func cloneArgs(remoteURL *url.URL, root string) []string { + args := []string{"clone", "--recursive"} + shallow := len(remoteURL.Fragment) == 0 + + if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + if remoteURL.Fragment != "" { + remoteURL.Fragment = "" + } + + return append(args, remoteURL.String(), root) +} + +func checkoutGit(fragment, root string) (string, error) { + refAndDir := strings.SplitN(fragment, ":", 2) + + if len(refAndDir[0]) != 0 { + if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) + if err != nil { + return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go new file mode 100644 index 0000000..d197058 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go @@ -0,0 +1,220 @@ +package gitutils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" +) + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsGit(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsStripFragment(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker#test") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func gitGetConfig(name string) string { + b, err := git([]string{"config", "--get", name}...) + if err != nil { + // since we are interested in empty or non empty string, + // we can safely ignore the err here. + return "" + } + return strings.TrimSpace(string(b)) +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + autocrlf := gitGetConfig("core.autocrlf") + if !(autocrlf == "true" || autocrlf == "false" || + autocrlf == "input" || autocrlf == "") { + t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) + } + eol := "\n" + if autocrlf == "true" { + eol = "\r\n" + } + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + if err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { + t.Fatal(err) + } + + subDir := filepath.Join(gitDir, "subdir") + if err = os.Mkdir(subDir, 0755); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if runtime.GOOS != "windows" { + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { + t.Fatal(err) + } + + type singleCase struct { + frag string + exp string + fail bool + } + + cases := []singleCase{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, + } + + if runtime.GOOS != "windows" { + // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below + // git --work-tree .\repo --git-dir .\repo\.git add -A + // error: readlink("absolutelink"): Function not implemented + // error: unable to index file absolutelink + // fatal: adding files failed + cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + } + + for _, c := range cases { + r, err := checkoutGit(c.frag, gitDir) + + fail := err != nil + if fail != c.fail { + t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) + } + if c.fail { + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + if err != nil { + t.Fatal(err) + } + + if string(b) != c.exp { + t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go new file mode 100644 index 0000000..8e61ff3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go @@ -0,0 +1,19 @@ +// +build cgo + +package graphdb + +import ( + "database/sql" + + _ "github.com/mattn/go-sqlite3" // registers sqlite +) + +// NewSqliteConn opens a connection to a sqlite +// database. +func NewSqliteConn(root string) (*Database, error) { + conn, err := sql.Open("sqlite3", root) + if err != nil { + return nil, err + } + return NewDatabase(conn) +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go new file mode 100644 index 0000000..eca433f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go @@ -0,0 +1,551 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "path" + "strings" + "sync" +) + +const ( + createEntityTable = ` + CREATE TABLE IF NOT EXISTS entity ( + id text NOT NULL PRIMARY KEY + );` + + createEdgeTable = ` + CREATE TABLE IF NOT EXISTS edge ( + "entity_id" text NOT NULL, + "parent_id" text NULL, + "name" text NOT NULL, + CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), + CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") + ); + ` + + createEdgeIndices = ` + CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); + ` +) + +// Entity with a unique id. +type Entity struct { + id string +} + +// An Edge connects two entities together. +type Edge struct { + EntityID string + Name string + ParentID string +} + +// Entities stores the list of entities. +type Entities map[string]*Entity + +// Edges stores the relationships between entities. +type Edges []*Edge + +// WalkFunc is a function invoked to process an individual entity. +type WalkFunc func(fullPath string, entity *Entity) error + +// Database is a graph database for storing entities and their relationships. +type Database struct { + conn *sql.DB + mux sync.RWMutex +} + +// IsNonUniqueNameError processes the error to check if it's caused by +// a constraint violation. +// This is necessary because the error isn't the same across various +// sqlite versions. +func IsNonUniqueNameError(err error) bool { + str := err.Error() + // sqlite 3.7.17-1ubuntu1 returns: + // Set failure: Abort due to constraint violation: columns parent_id, name are not unique + if strings.HasSuffix(str, "name are not unique") { + return true + } + // sqlite-3.8.3-1.fc20 returns: + // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name + if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { + return true + } + // sqlite-3.6.20-1.el6 returns: + // Set failure: Abort due to constraint violation: constraint failed + if strings.HasSuffix(str, "constraint failed") { + return true + } + return false +} + +// NewDatabase creates a new graph database initialized with a root entity. +func NewDatabase(conn *sql.DB) (*Database, error) { + if conn == nil { + return nil, fmt.Errorf("Database connection cannot be nil") + } + db := &Database{conn: conn} + + // Create root entities + tx, err := conn.Begin() + if err != nil { + return nil, err + } + + if _, err := tx.Exec(createEntityTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeIndices); err != nil { + return nil, err + } + + if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + + return db, nil +} + +// Close the underlying connection to the database. +func (db *Database) Close() error { + return db.conn.Close() +} + +// Set the entity id for a given path. +func (db *Database) Set(fullPath, id string) (*Entity, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return nil, err + } + + var entityID string + if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { + if err == sql.ErrNoRows { + if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { + tx.Rollback() + return nil, err + } + } else { + tx.Rollback() + return nil, err + } + } + e := &Entity{id} + + parentPath, name := splitPath(fullPath) + if err := db.setEdge(parentPath, name, e, tx); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + return e, nil +} + +// Exists returns true if a name already exists in the database. +func (db *Database) Exists(name string) bool { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return false + } + return e != nil +} + +func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { + parent, err := db.get(parentPath) + if err != nil { + return err + } + if parent.id == e.id { + return fmt.Errorf("Cannot set self as child") + } + + if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { + return err + } + return nil +} + +// RootEntity returns the root "/" entity for the database. +func (db *Database) RootEntity() *Entity { + return &Entity{ + id: "0", + } +} + +// Get returns the entity for a given path. +func (db *Database) Get(name string) *Entity { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil + } + return e +} + +func (db *Database) get(name string) (*Entity, error) { + e := db.RootEntity() + // We always know the root name so return it if + // it is requested + if name == "/" { + return e, nil + } + + parts := split(name) + for i := 1; i < len(parts); i++ { + p := parts[i] + if p == "" { + continue + } + + next := db.child(e, p) + if next == nil { + return nil, fmt.Errorf("Cannot find child for %s", name) + } + e = next + } + return e, nil + +} + +// List all entities by from the name. +// The key will be the full path of the entity. +func (db *Database) List(name string, depth int) Entities { + db.mux.RLock() + defer db.mux.RUnlock() + + out := Entities{} + e, err := db.get(name) + if err != nil { + return out + } + + children, err := db.children(e, name, depth, nil) + if err != nil { + return out + } + + for _, c := range children { + out[c.FullPath] = c.Entity + } + return out +} + +// Walk through the child graph of an entity, calling walkFunc for each child entity. +// It is safe for walkFunc to call graph functions. +func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { + children, err := db.Children(name, depth) + if err != nil { + return err + } + + // Note: the database lock must not be held while calling walkFunc + for _, c := range children { + if err := walkFunc(c.FullPath, c.Entity); err != nil { + return err + } + } + return nil +} + +// Children returns the children of the specified entity. +func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + + return db.children(e, name, depth, nil) +} + +// Parents returns the parents of a specified entity. +func (db *Database) Parents(name string) ([]string, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + return db.parents(e) +} + +// Refs returns the reference count for a specified id. +func (db *Database) Refs(id string) int { + db.mux.RLock() + defer db.mux.RUnlock() + + var count int + if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { + return 0 + } + return count +} + +// RefPaths returns all the id's path references. +func (db *Database) RefPaths(id string) Edges { + db.mux.RLock() + defer db.mux.RUnlock() + + refs := Edges{} + + rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) + if err != nil { + return refs + } + defer rows.Close() + + for rows.Next() { + var name string + var parentID string + if err := rows.Scan(&name, &parentID); err != nil { + return refs + } + refs = append(refs, &Edge{ + EntityID: id, + Name: name, + ParentID: parentID, + }) + } + return refs +} + +// Delete the reference to an entity at a given path. +func (db *Database) Delete(name string) error { + db.mux.Lock() + defer db.mux.Unlock() + + if name == "/" { + return fmt.Errorf("Cannot delete root entity") + } + + parentPath, n := splitPath(name) + parent, err := db.get(parentPath) + if err != nil { + return err + } + + if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { + return err + } + return nil +} + +// Purge removes the entity with the specified id +// Walk the graph to make sure all references to the entity +// are removed and return the number of references removed +func (db *Database) Purge(id string) (int, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return -1, err + } + + // Delete all edges + rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + changes, err := rows.RowsAffected() + if err != nil { + return -1, err + } + + // Clear who's using this id as parent + refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + refsCount, err := refs.RowsAffected() + if err != nil { + return -1, err + } + + // Delete entity + if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { + tx.Rollback() + return -1, err + } + + if err := tx.Commit(); err != nil { + return -1, err + } + + return int(changes + refsCount), nil +} + +// Rename an edge for a given path +func (db *Database) Rename(currentName, newName string) error { + db.mux.Lock() + defer db.mux.Unlock() + + parentPath, name := splitPath(currentName) + newParentPath, newEdgeName := splitPath(newName) + + if parentPath != newParentPath { + return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) + } + + parent, err := db.get(parentPath) + if err != nil { + return err + } + + rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) + if err != nil { + return err + } + i, err := rows.RowsAffected() + if err != nil { + return err + } + if i == 0 { + return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) + } + return nil +} + +// WalkMeta stores the walk metadata. +type WalkMeta struct { + Parent *Entity + Entity *Entity + FullPath string + Edge *Edge +} + +func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { + if e == nil { + return entities, nil + } + + rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var entityID, entityName string + if err := rows.Scan(&entityID, &entityName); err != nil { + return nil, err + } + child := &Entity{entityID} + edge := &Edge{ + ParentID: e.id, + Name: entityName, + EntityID: child.id, + } + + meta := WalkMeta{ + Parent: e, + Entity: child, + FullPath: path.Join(name, edge.Name), + Edge: edge, + } + + entities = append(entities, meta) + + if depth != 0 { + nDepth := depth + if depth != -1 { + nDepth-- + } + entities, err = db.children(child, meta.FullPath, nDepth, entities) + if err != nil { + return nil, err + } + } + } + + return entities, nil +} + +func (db *Database) parents(e *Entity) (parents []string, err error) { + if e == nil { + return parents, nil + } + + rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var parentID string + if err := rows.Scan(&parentID); err != nil { + return nil, err + } + parents = append(parents, parentID) + } + + return parents, nil +} + +// Return the entity based on the parent path and name. +func (db *Database) child(parent *Entity, name string) *Entity { + var id string + if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { + return nil + } + return &Entity{id} +} + +// ID returns the id used to reference this entity. +func (e *Entity) ID() string { + return e.id +} + +// Paths returns the paths sorted by depth. +func (e Entities) Paths() []string { + out := make([]string, len(e)) + var i int + for k := range e { + out[i] = k + i++ + } + sortByDepth(out) + + return out +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go new file mode 100644 index 0000000..f0fb074 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go @@ -0,0 +1,721 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "os" + "path" + "runtime" + "strconv" + "testing" + + _ "github.com/mattn/go-sqlite3" +) + +func newTestDb(t *testing.T) (*Database, string) { + p := path.Join(os.TempDir(), "sqlite.db") + conn, err := sql.Open("sqlite3", p) + db, err := NewDatabase(conn) + if err != nil { + t.Fatal(err) + } + return db, p +} + +func destroyTestDb(dbPath string) { + os.Remove(dbPath) +} + +func TestNewDatabase(t *testing.T) { + db, dbpath := newTestDb(t) + if db == nil { + t.Fatal("Database should not be nil") + } + db.Close() + defer destroyTestDb(dbpath) +} + +func TestCreateRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + root := db.RootEntity() + if root == nil { + t.Fatal("Root entity should not be nil") + } +} + +func TestGetRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + e := db.Get("/") + if e == nil { + t.Fatal("Entity should not be nil") + } + if e.ID() != "0" { + t.Fatalf("Entity id should be 0, got %s", e.ID()) + } +} + +func TestSetEntityWithDifferentName(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/test", "1") + if _, err := db.Set("/other", "1"); err != nil { + t.Fatal(err) + } +} + +func TestSetDuplicateEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/foo", "42"); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/foo", "43"); err == nil { + t.Fatalf("Creating an entry with a duplicate path did not cause an error") + } +} + +func TestCreateChild(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/db", "1") + if err != nil { + t.Fatal(err) + } + if child == nil { + t.Fatal("Child should not be nil") + } + if child.ID() != "1" { + t.Fail() + } +} + +func TestParents(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + + for i := 6; i < 11; i++ { + a := strconv.Itoa(i) + p := strconv.Itoa(i - 5) + + key := fmt.Sprintf("/%s/%s", p, a) + + if _, err := db.Set(key, a); err != nil { + t.Fatal(err) + } + + parents, err := db.Parents(key) + if err != nil { + t.Fatal(err) + } + + if len(parents) != 1 { + t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) + } + + if parents[0] != p { + t.Fatalf("ID %s received, %s expected", parents[0], p) + } + } +} + +func TestChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + str := "/" + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + + str = "/" + for i := 10; i < 30; i++ { // 20 entities + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + entries, err := db.Children("/", 5) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 11 { + t.Fatalf("Expect 11 entries for / got %d", len(entries)) + } + + entries, err = db.Children("/", 20) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 25 { + t.Fatalf("Expect 25 entries for / got %d", len(entries)) + } +} + +func TestListAllRootChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + entries := db.List("/", -1) + if len(entries) != 5 { + t.Fatalf("Expect 5 entries for / got %d", len(entries)) + } +} + +func TestListAllSubChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + entries := db.List("/webapp", 1) + if len(entries) != 3 { + t.Fatalf("Expect 3 entries for / got %d", len(entries)) + } + + entries = db.List("/webapp", 0) + if len(entries) != 2 { + t.Fatalf("Expect 2 entries for / got %d", len(entries)) + } +} + +func TestAddSelfAsChild(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/test", "1") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/test/other", child.ID()); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestAddChildToNonExistentRoot(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestWalkAll(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/db/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Walk("/", func(p string, e *Entity) error { + t.Logf("Path: %s Entity: %s", p, e.ID()) + return nil + }, -1); err != nil { + t.Fatal(err) + } +} + +func TestGetEntityByPath(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + entity := db.Get("/webapp/db/logs") + if entity == nil { + t.Fatal("Entity should not be nil") + } + if entity.ID() != "4" { + t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) + } +} + +func TestEnitiesPaths(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + out := db.List("/", -1) + for _, p := range out.Paths() { + t.Log(p) + } +} + +func TestDeleteRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if err := db.Delete("/"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestDeleteEntity(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Delete("/webapp/sentry"); err != nil { + t.Fatal(err) + } + entity := db.Get("/webapp/sentry") + if entity != nil { + t.Fatal("Entity /webapp/sentry should be nil") + } +} + +func TestCountRefs(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + if db.Refs("2") != 2 { + t.Fatal("Expect reference count to be 2") + } +} + +func TestPurgeId(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expect reference count to be 1, got %d", c) + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatalf("Expected 2 references to be removed, got %d", count) + } +} + +// Regression test https://github.com/docker/docker/issues/12334 +func TestPurgeIdRefPaths(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + db.Set("/db", "2") + + db.Set("/db/webapp", "1") + + if c := db.Refs("1"); c != 2 { + t.Fatalf("Expected 2 reference for webapp, got %d", c) + } + if c := db.Refs("2"); c != 1 { + t.Fatalf("Expected 1 reference for db, got %d", c) + } + + if rp := db.RefPaths("2"); len(rp) != 1 { + t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) + } + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + + if count != 2 { + t.Fatalf("Expected 2 rows to be removed, got %d", count) + } + + if c := db.Refs("2"); c != 0 { + t.Fatalf("Expected 0 reference for db, got %d", c) + } + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expected 1 reference for webapp, got %d", c) + } +} + +func TestRename(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + if db.Get("/webapp/db") == nil { + t.Fatal("Cannot find entity at path /webapp/db") + } + + if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { + t.Fatal(err) + } + if db.Get("/webapp/db") != nil { + t.Fatal("Entity should not exist at /webapp/db") + } + if db.Get("/webapp/newdb") == nil { + t.Fatal("Cannot find entity at path /webapp/newdb") + } + +} + +func TestCreateMultipleNames(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/db", "1") + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + db.Walk("/", func(p string, e *Entity) error { + t.Logf("%s\n", p) + return nil + }, -1) +} + +func TestRefPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + refs := db.RefPaths("2") + if len(refs) != 2 { + t.Fatalf("Expected reference count to be 2, got %d", len(refs)) + } +} + +func TestExistsTrue(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/testing", "1") + + if !db.Exists("/testing") { + t.Fatalf("/tesing should exist") + } +} + +func TestExistsFalse(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/toerhe", "1") + + if db.Exists("/testing") { + t.Fatalf("/tesing should not exist") + } + +} + +func TestGetNameWithTrailingSlash(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/todo", "1") + + e := db.Get("/todo/") + if e == nil { + t.Fatalf("Entity should not be nil") + } +} + +func TestConcurrentWrites(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + errs := make(chan error, 2) + + save := func(name string, id string) { + if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { + errs <- err + } + errs <- nil + } + purge := func(id string) { + if _, err := db.Purge(id); err != nil { + errs <- err + } + errs <- nil + } + + save("/1", "1") + + go purge("1") + go save("/2", "2") + + any := false + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + any = true + t.Log(err) + } + } + if any { + t.Fail() + } +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go new file mode 100644 index 0000000..c07df07 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go @@ -0,0 +1,27 @@ +package graphdb + +import "sort" + +type pathSorter struct { + paths []string + by func(i, j string) bool +} + +func sortByDepth(paths []string) { + s := &pathSorter{paths, func(i, j string) bool { + return PathDepth(i) > PathDepth(j) + }} + sort.Sort(s) +} + +func (s *pathSorter) Len() int { + return len(s.paths) +} + +func (s *pathSorter) Swap(i, j int) { + s.paths[i], s.paths[j] = s.paths[j], s.paths[i] +} + +func (s *pathSorter) Less(i, j int) bool { + return s.by(s.paths[i], s.paths[j]) +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go new file mode 100644 index 0000000..ddf2266 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go @@ -0,0 +1,29 @@ +package graphdb + +import ( + "testing" +) + +func TestSort(t *testing.T) { + paths := []string{ + "/", + "/myreallylongname", + "/app/db", + } + + sortByDepth(paths) + + if len(paths) != 3 { + t.Fatalf("Expected 3 parts got %d", len(paths)) + } + + if paths[0] != "/app/db" { + t.Fatalf("Expected /app/db got %s", paths[0]) + } + if paths[1] != "/myreallylongname" { + t.Fatalf("Expected /myreallylongname got %s", paths[1]) + } + if paths[2] != "/" { + t.Fatalf("Expected / got %s", paths[2]) + } +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go b/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go new file mode 100644 index 0000000..2b8ba71 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go @@ -0,0 +1,3 @@ +// +build !cgo !linux + +package graphdb diff --git a/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go new file mode 100644 index 0000000..9edd79c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go @@ -0,0 +1,32 @@ +package graphdb + +import ( + "path" + "strings" +) + +// Split p on / +func split(p string) []string { + return strings.Split(p, "/") +} + +// PathDepth returns the depth or number of / in a given path +func PathDepth(p string) int { + parts := split(p) + if len(parts) == 2 && parts[1] == "" { + return 1 + } + return len(parts) +} + +func splitPath(p string) (parent, name string) { + if p[0] != '/' { + p = "/" + p + } + parent, name = path.Split(p) + l := len(parent) + if parent[l-1] == '/' { + parent = parent[:l-1] + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 0000000..8154e83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go new file mode 100644 index 0000000..7a95cb2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go new file mode 100644 index 0000000..d7dc438 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/httputils.go @@ -0,0 +1,56 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +var ( + headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) + errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") +) + +// Download requests a given URL and returns an io.Reader. +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error. +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +// ServerHeader contains the server information. +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// ParseServerHeader extracts pieces from an HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). +func ParseServerHeader(hdr string) (*ServerHeader, error) { + matches := headerRegexp.FindStringSubmatch(hdr) + if len(matches) != 4 { + return nil, errInvalidHeader + } + return &ServerHeader{ + App: strings.TrimSpace(matches[1]), + Ver: strings.TrimSpace(matches[2]), + OS: strings.TrimSpace(matches[3]), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go new file mode 100644 index 0000000..d35d082 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go @@ -0,0 +1,115 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestDownload(t *testing.T) { + expected := "Hello, docker !" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, expected) + })) + defer ts.Close() + response, err := Download(ts.URL) + if err != nil { + t.Fatal(err) + } + + actual, err := ioutil.ReadAll(response.Body) + response.Body.Close() + + if err != nil || string(actual) != expected { + t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) + } +} + +func TestDownload400Errors(t *testing.T) { + expectedError := "Got HTTP status code >= 400: 403 Forbidden" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, "something failed (forbidden)", http.StatusForbidden) + })) + defer ts.Close() + // Expected status code = 403 + if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { + t.Fatalf("Expected the the error %q, got %v", expectedError, err) + } +} + +func TestDownloadOtherErrors(t *testing.T) { + if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { + t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) + } +} + +func TestNewHTTPRequestError(t *testing.T) { + errorMessage := "Some error message" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, errorMessage, http.StatusForbidden) + })) + defer ts.Close() + httpResponse, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { + t.Fatalf("Expected err to be %q, got %v", errorMessage, err) + } +} + +func TestParseServerHeader(t *testing.T) { + inputs := map[string][]string{ + "bad header": {"error"}, + "(bad header)": {"error"}, + "(without/spaces)": {"error"}, + "(header/with spaces)": {"error"}, + "foo/bar (baz)": {"foo", "bar", "baz"}, + "foo/bar": {"error"}, + "foo": {"error"}, + "foo/bar (baz space)": {"foo", "bar", "baz space"}, + " f f / b b ( b s ) ": {"f f", "b b", "b s"}, + "foo/bar (baz) ignore": {"foo", "bar", "baz"}, + "foo/bar ()": {"error"}, + "foo/bar()": {"error"}, + "foo/bar(baz)": {"foo", "bar", "baz"}, + "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, + "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, + "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, + } + + for header, values := range inputs { + serverHeader, err := ParseServerHeader(header) + if err != nil { + if err != errInvalidHeader { + t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) + } + if values[0] == "error" { + continue + } + t.Fatalf("Header %q failed to parse when it shouldn't have", header) + } + if values[0] == "error" { + t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) + } + + if serverHeader.App != values[0] { + t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) + } + + if serverHeader.Ver != values[1] { + t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) + } + + if serverHeader.OS != values[2] { + t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) + } + + } + +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go new file mode 100644 index 0000000..d5cf34e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go @@ -0,0 +1,30 @@ +package httputils + +import ( + "mime" + "net/http" +) + +// MimeTypes stores the MIME content type. +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go new file mode 100644 index 0000000..9de433e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go @@ -0,0 +1,13 @@ +package httputils + +import ( + "testing" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { + t.Errorf("TestDetectContentType failed") + } +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go new file mode 100644 index 0000000..bebc860 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go new file mode 100644 index 0000000..5a2906d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go @@ -0,0 +1,307 @@ +package httputils + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedError := "client and request can't be nil\n" + resreq := &resumableRequestReader{} + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + + resreq = &resumableRequestReader{ + client: client, + request: req, + totalSize: -1, + } + expectedError = "failed to auto detect content length" + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + } + read, err := resreq.Read([]byte{}) + if err != nil || read != 0 { + t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) + } +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError || read != 0 { + t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) + } +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("An error occurred") +} + +// If an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + if err != nil { + t.Fatal(err) + } + + if read != 0 { + t.Fatalf("Expected to have read nothing, but read %v", read) + } +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + if err == nil || err != io.EOF { + t.Fatalf("Expected an io.EOF error, got %v", err) + } +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + resreq := &resumableRequestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + if err == nil || err.Error() != "the server doesn't support byte ranges" { + t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) + } +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + + resreq := ResumableRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReader(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := ResumableRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + + resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 0000000..6bca466 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,197 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000..f9eb31c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,207 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, uid, gid int) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(uid), + statInfo.GID() == uint32(gid), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go new file mode 100644 index 0000000..540d307 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go @@ -0,0 +1,271 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +type node struct { + uid int + gid int +} + +func TestMkdirAllAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirall") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should be chowned, but nothing else + if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllNewAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdirnew") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should NOT be chowned + if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdir") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + } + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should just chown to the requested uid/gid + if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // create a subdir under a dir which doesn't exist--should fail + if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") + } + + // create a subdir under an existing dir; should only change the ownership of the new subdir + if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr/bin"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func buildTree(base string, tree map[string]node) error { + for path, node := range tree { + fullPath := filepath.Join(base, path) + if err := os.MkdirAll(fullPath, 0755); err != nil { + return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) + } + if err := os.Chown(fullPath, node.uid, node.gid); err != nil { + return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) + } + } + return nil +} + +func readTree(base, root string) (map[string]node, error) { + tree := make(map[string]node) + + dirInfos, err := ioutil.ReadDir(base) + if err != nil { + return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) + } + + for _, info := range dirInfos { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) + } + tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} + if info.IsDir() { + // read the subdirectory + subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) + if err != nil { + return nil, err + } + for path, nodeinfo := range subtree { + tree[path] = nodeinfo + } + } + } + return tree, nil +} + +func compareTrees(left, right map[string]node) error { + if len(left) != len(right) { + return fmt.Errorf("Trees aren't the same size") + } + for path, nodeLeft := range left { + if nodeRight, ok := right[path]; ok { + if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { + // mismatch + return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, + nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) + } + continue + } + return fmt.Errorf("right tree didn't contain path %q", path) + } + return nil +} + +func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "parsesubid") + if err != nil { + t.Fatal(err) + } + fnamePath := filepath.Join(tmpDir, "testsubuid") + fcontent := `tss:100000:65536 +# empty default subuid/subgid file + +dockremap:231072:65536` + if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { + t.Fatal(err) + } + ranges, err := parseSubidFile(fnamePath, "dockremap") + if err != nil { + t.Fatal(err) + } + if len(ranges) != 1 { + t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) + } + if ranges[0].Start != 231072 { + t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) + } + if ranges[0].Length != 65536 { + t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) + } +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000..49f67e7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, uid, gid int) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000..9da7975 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000..d98b354 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 0000000..9703ecb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/integration/checker/checker.go b/vendor/github.com/docker/docker/pkg/integration/checker/checker.go new file mode 100644 index 0000000..d1b703a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/checker/checker.go @@ -0,0 +1,46 @@ +// Package checker provides Docker specific implementations of the go-check.Checker interface. +package checker + +import ( + "github.com/go-check/check" + "github.com/vdemeester/shakers" +) + +// As a commodity, we bring all check.Checker variables into the current namespace to avoid having +// to think about check.X versus checker.X. +var ( + DeepEquals = check.DeepEquals + ErrorMatches = check.ErrorMatches + FitsTypeOf = check.FitsTypeOf + HasLen = check.HasLen + Implements = check.Implements + IsNil = check.IsNil + Matches = check.Matches + Not = check.Not + NotNil = check.NotNil + PanicMatches = check.PanicMatches + Panics = check.Panics + + Contains = shakers.Contains + ContainsAny = shakers.ContainsAny + Count = shakers.Count + Equals = shakers.Equals + EqualFold = shakers.EqualFold + False = shakers.False + GreaterOrEqualThan = shakers.GreaterOrEqualThan + GreaterThan = shakers.GreaterThan + HasPrefix = shakers.HasPrefix + HasSuffix = shakers.HasSuffix + Index = shakers.Index + IndexAny = shakers.IndexAny + IsAfter = shakers.IsAfter + IsBefore = shakers.IsBefore + IsBetween = shakers.IsBetween + IsLower = shakers.IsLower + IsUpper = shakers.IsUpper + LessOrEqualThan = shakers.LessOrEqualThan + LessThan = shakers.LessThan + TimeEquals = shakers.TimeEquals + True = shakers.True + TimeIgnore = shakers.TimeIgnore +) diff --git a/vendor/github.com/docker/docker/pkg/integration/cmd/command.go b/vendor/github.com/docker/docker/pkg/integration/cmd/command.go new file mode 100644 index 0000000..76d04e8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/cmd/command.go @@ -0,0 +1,294 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +type testingT interface { + Fatalf(string, ...interface{}) +} + +const ( + // None is a token to inform Result.Assert that the output should be empty + None string = "" +) + +type lockedBuffer struct { + m sync.RWMutex + buf bytes.Buffer +} + +func (buf *lockedBuffer) Write(b []byte) (int, error) { + buf.m.Lock() + defer buf.m.Unlock() + return buf.buf.Write(b) +} + +func (buf *lockedBuffer) String() string { + buf.m.RLock() + defer buf.m.RUnlock() + return buf.buf.String() +} + +// Result stores the result of running a command +type Result struct { + Cmd *exec.Cmd + ExitCode int + Error error + // Timeout is true if the command was killed because it ran for too long + Timeout bool + outBuffer *lockedBuffer + errBuffer *lockedBuffer +} + +// Assert compares the Result against the Expected struct, and fails the test if +// any of the expcetations are not met. +func (r *Result) Assert(t testingT, exp Expected) { + err := r.Compare(exp) + if err == nil { + return + } + + _, file, line, _ := runtime.Caller(1) + t.Fatalf("at %s:%d\n%s", filepath.Base(file), line, err.Error()) +} + +// Compare returns an formatted error with the command, stdout, stderr, exit +// code, and any failed expectations +func (r *Result) Compare(exp Expected) error { + errors := []string{} + add := func(format string, args ...interface{}) { + errors = append(errors, fmt.Sprintf(format, args...)) + } + + if exp.ExitCode != r.ExitCode { + add("ExitCode was %d expected %d", r.ExitCode, exp.ExitCode) + } + if exp.Timeout != r.Timeout { + if exp.Timeout { + add("Expected command to timeout") + } else { + add("Expected command to finish, but it hit the timeout") + } + } + if !matchOutput(exp.Out, r.Stdout()) { + add("Expected stdout to contain %q", exp.Out) + } + if !matchOutput(exp.Err, r.Stderr()) { + add("Expected stderr to contain %q", exp.Err) + } + switch { + // If a non-zero exit code is expected there is going to be an error. + // Don't require an error message as well as an exit code because the + // error message is going to be "exit status which is not useful + case exp.Error == "" && exp.ExitCode != 0: + case exp.Error == "" && r.Error != nil: + add("Expected no error") + case exp.Error != "" && r.Error == nil: + add("Expected error to contain %q, but there was no error", exp.Error) + case exp.Error != "" && !strings.Contains(r.Error.Error(), exp.Error): + add("Expected error to contain %q", exp.Error) + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf("%s\nFailures:\n%s\n", r, strings.Join(errors, "\n")) +} + +func matchOutput(expected string, actual string) bool { + switch expected { + case None: + return actual == "" + default: + return strings.Contains(actual, expected) + } +} + +func (r *Result) String() string { + var timeout string + if r.Timeout { + timeout = " (timeout)" + } + + return fmt.Sprintf(` +Command: %s +ExitCode: %d%s, Error: %s +Stdout: %v +Stderr: %v +`, + strings.Join(r.Cmd.Args, " "), + r.ExitCode, + timeout, + r.Error, + r.Stdout(), + r.Stderr()) +} + +// Expected is the expected output from a Command. This struct is compared to a +// Result struct by Result.Assert(). +type Expected struct { + ExitCode int + Timeout bool + Error string + Out string + Err string +} + +// Success is the default expected result +var Success = Expected{} + +// Stdout returns the stdout of the process as a string +func (r *Result) Stdout() string { + return r.outBuffer.String() +} + +// Stderr returns the stderr of the process as a string +func (r *Result) Stderr() string { + return r.errBuffer.String() +} + +// Combined returns the stdout and stderr combined into a single string +func (r *Result) Combined() string { + return r.outBuffer.String() + r.errBuffer.String() +} + +// SetExitError sets Error and ExitCode based on Error +func (r *Result) SetExitError(err error) { + if err == nil { + return + } + r.Error = err + r.ExitCode = system.ProcessExitCode(err) +} + +type matches struct{} + +// Info returns the CheckerInfo +func (m *matches) Info() *check.CheckerInfo { + return &check.CheckerInfo{ + Name: "CommandMatches", + Params: []string{"result", "expected"}, + } +} + +// Check compares a result against the expected +func (m *matches) Check(params []interface{}, names []string) (bool, string) { + result, ok := params[0].(*Result) + if !ok { + return false, fmt.Sprintf("result must be a *Result, not %T", params[0]) + } + expected, ok := params[1].(Expected) + if !ok { + return false, fmt.Sprintf("expected must be an Expected, not %T", params[1]) + } + + err := result.Compare(expected) + if err == nil { + return true, "" + } + return false, err.Error() +} + +// Matches is a gocheck.Checker for comparing a Result against an Expected +var Matches = &matches{} + +// Cmd contains the arguments and options for a process to run as part of a test +// suite. +type Cmd struct { + Command []string + Timeout time.Duration + Stdin io.Reader + Stdout io.Writer + Dir string + Env []string +} + +// RunCmd runs a command and returns a Result +func RunCmd(cmd Cmd) *Result { + result := StartCmd(cmd) + if result.Error != nil { + return result + } + return WaitOnCmd(cmd.Timeout, result) +} + +// RunCommand parses a command line and runs it, returning a result +func RunCommand(command string, args ...string) *Result { + return RunCmd(Cmd{Command: append([]string{command}, args...)}) +} + +// StartCmd starts a command, but doesn't wait for it to finish +func StartCmd(cmd Cmd) *Result { + result := buildCmd(cmd) + if result.Error != nil { + return result + } + result.SetExitError(result.Cmd.Start()) + return result +} + +func buildCmd(cmd Cmd) *Result { + var execCmd *exec.Cmd + switch len(cmd.Command) { + case 1: + execCmd = exec.Command(cmd.Command[0]) + default: + execCmd = exec.Command(cmd.Command[0], cmd.Command[1:]...) + } + outBuffer := new(lockedBuffer) + errBuffer := new(lockedBuffer) + + execCmd.Stdin = cmd.Stdin + execCmd.Dir = cmd.Dir + execCmd.Env = cmd.Env + if cmd.Stdout != nil { + execCmd.Stdout = io.MultiWriter(outBuffer, cmd.Stdout) + } else { + execCmd.Stdout = outBuffer + } + execCmd.Stderr = errBuffer + return &Result{ + Cmd: execCmd, + outBuffer: outBuffer, + errBuffer: errBuffer, + } +} + +// WaitOnCmd waits for a command to complete. If timeout is non-nil then +// only wait until the timeout. +func WaitOnCmd(timeout time.Duration, result *Result) *Result { + if timeout == time.Duration(0) { + result.SetExitError(result.Cmd.Wait()) + return result + } + + done := make(chan error, 1) + // Wait for command to exit in a goroutine + go func() { + done <- result.Cmd.Wait() + }() + + select { + case <-time.After(timeout): + killErr := result.Cmd.Process.Kill() + if killErr != nil { + fmt.Printf("failed to kill (pid=%d): %v\n", result.Cmd.Process.Pid, killErr) + } + result.Timeout = true + case err := <-done: + result.SetExitError(err) + } + return result +} diff --git a/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go b/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go new file mode 100644 index 0000000..df23442 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go @@ -0,0 +1,118 @@ +package cmd + +import ( + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestRunCommand(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result := RunCommand(cmd) + result.Assert(t, Expected{}) + + result = RunCommand("doesnotexists") + expectedError := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{ExitCode: 127, Error: expectedError}) + + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + ExitCode: 2, + Error: "exit status 2", + Err: "invalid option", + }) + assert.Contains(t, result.Combined(), "invalid option") +} + +func TestRunCommandWithCombined(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCommand("ls", "-a") + result.Assert(t, Expected{}) + + assert.Contains(t, result.Combined(), "..") + assert.Contains(t, result.Stdout(), "..") +} + +func TestRunCommandWithTimeoutFinished(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCmd(Cmd{ + Command: []string{"ls", "-a"}, + Timeout: 50 * time.Millisecond, + }) + result.Assert(t, Expected{Out: ".."}) +} + +func TestRunCommandWithTimeoutKilled(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + command := []string{"sh", "-c", "while true ; do echo 1 ; sleep .5 ; done"} + result := RunCmd(Cmd{Command: command, Timeout: 1250 * time.Millisecond}) + result.Assert(t, Expected{Timeout: true}) + + ones := strings.Split(result.Stdout(), "\n") + assert.Equal(t, len(ones), 4) +} + +func TestRunCommandWithErrors(t *testing.T) { + result := RunCommand("/foobar") + result.Assert(t, Expected{Error: "foobar", ExitCode: 127}) +} + +func TestRunCommandWithStdoutStderr(t *testing.T) { + result := RunCommand("echo", "hello", "world") + result.Assert(t, Expected{Out: "hello world\n", Err: None}) +} + +func TestRunCommandWithStdoutStderrError(t *testing.T) { + result := RunCommand("doesnotexists") + + expected := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{Out: None, Err: None, ExitCode: 127, Error: expected}) + + switch runtime.GOOS { + case "windows": + expected = "ls: unknown option" + case "solaris": + expected = "gls: invalid option" + default: + expected = "ls: invalid option" + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + Out: None, + Err: expected, + ExitCode: 2, + Error: "exit status 2", + }) +} diff --git a/vendor/github.com/docker/docker/pkg/integration/utils.go b/vendor/github.com/docker/docker/pkg/integration/utils.go new file mode 100644 index 0000000..f2089c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/utils.go @@ -0,0 +1,227 @@ +package integration + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/system" +) + +// IsKilled process the specified error and returns whether the process was killed or not. +func IsKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + exitCode = system.ProcessExitCode(err) + output = string(out) + return +} + +// RunCommandPipelineWithOutput runs the array of commands with the output +// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). +// It returns the final output, the exitCode different from 0 and the error +// if something bad happened. +func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + if len(cmds) < 2 { + return "", 0, errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + defer func() { + var pipeErrMsgs []string + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + if pipeErr := cmd.Wait(); pipeErr != nil { + pipeErrMsgs = append(pipeErrMsgs, fmt.Sprintf("command %s failed with error: %v", cmd.Path, pipeErr)) + } + } + if len(pipeErrMsgs) > 0 && err == nil { + err = fmt.Errorf("pipelineError from Wait: %v", strings.Join(pipeErrMsgs, ", ")) + } + }() + + // wait on last cmd + return runCommandWithOutput(cmds[len(cmds)-1]) +} + +// ConvertSliceOfStringsToMap converts a slices of string in a map +// with the strings as key and an empty string as values. +func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) +// and returns an error if different. +func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +// ListTar lists the entries of a tar. +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +// RandomTmpDirPath provides a temporary path with rand string appended. +// does not create or checks if it exists. +func RandomTmpDirPath(s string, platform string) string { + tmp := "/tmp" + if platform == "windows" { + tmp = os.Getenv("TEMP") + } + path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) + if platform == "windows" { + return filepath.FromSlash(path) // Using \ + } + return filepath.ToSlash(path) // Using / +} + +// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping +// for interval duration. Returns total read bytes. Send true to the +// stop channel to return before reading to EOF on the reader. +func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + select { + case <-stop: + return + case <-time.After(interval): + } + } +} + +// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func ParseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. +type ChannelBuffer struct { + C chan []byte +} + +// Write implements Writer. +func (c *ChannelBuffer) Write(b []byte) (int, error) { + c.C <- b + return len(b), nil +} + +// Close closes the go channel. +func (c *ChannelBuffer) Close() error { + close(c.C) + return nil +} + +// ReadTimeout reads the content of the channel in the specified byte array with +// the specified duration as timeout. +func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.C: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + +// RunAtDifferentDate runs the specified function with the given time. +// It changes the date of the system, which can led to weird behaviors. +func RunAtDifferentDate(date time.Time, block func()) { + // Layout for date. MMDDhhmmYYYY + const timeLayout = "010203042006" + // Ensure we bring time back to now + now := time.Now().Format(timeLayout) + defer icmd.RunCommand("date", now) + + icmd.RunCommand("date", date.Format(timeLayout)) + block() + return +} diff --git a/vendor/github.com/docker/docker/pkg/integration/utils_test.go b/vendor/github.com/docker/docker/pkg/integration/utils_test.go new file mode 100644 index 0000000..0b2ef4a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/utils_test.go @@ -0,0 +1,363 @@ +package integration + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { + var lsCmd *exec.Cmd + if runtime.GOOS != "windows" { + lsCmd = exec.Command("ls") + } else { + lsCmd = exec.Command("cmd", "/c", "dir") + } + + err := lsCmd.Run() + if IsKilled(err) { + t.Fatalf("Expected the ls command to not be killed, was.") + } +} + +func TestIsKilledTrueWithKilledProcess(t *testing.T) { + var longCmd *exec.Cmd + if runtime.GOOS != "windows" { + longCmd = exec.Command("top") + } else { + longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") + } + + // Start a command + err := longCmd.Start() + if err != nil { + t.Fatal(err) + } + // Capture the error when *dying* + done := make(chan error, 1) + go func() { + done <- longCmd.Wait() + }() + // Then kill it + longCmd.Process.Kill() + // Get the error + err = <-done + if !IsKilled(err) { + t.Fatalf("Expected the command to be killed, was not.") + } +} + +func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { + _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) + expectedError := "pipeline does not have multiple cmds" + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) + } +} + +func TestRunCommandPipelineWithOutputErrors(t *testing.T) { + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + cmd1 := exec.Command("ls") + cmd1.Stdout = os.Stdout + cmd2 := exec.Command("anything really") + _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) + if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { + t.Fatalf("Expected an error, got %v", err) + } + + cmdWithError := exec.Command("doesnotexists") + cmdCat := exec.Command("cat") + _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) + if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestRunCommandPipelineWithOutput(t *testing.T) { + //TODO: Should run on Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + cmds := []*exec.Cmd{ + // Print 2 characters + exec.Command("echo", "-n", "11"), + // Count the number or char from stdin (previous command) + exec.Command("wc", "-m"), + } + out, exitCode, err := RunCommandPipelineWithOutput(cmds...) + expectedOutput := "2\n" + if out != expectedOutput || exitCode != 0 || err != nil { + t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) + } +} + +func TestConvertSliceOfStringsToMap(t *testing.T) { + input := []string{"a", "b"} + actual := ConvertSliceOfStringsToMap(input) + for _, key := range input { + if _, ok := actual[key]; !ok { + t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) + } + } +} + +func TestCompareDirectoryEntries(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + file1 := filepath.Join(tmpFolder, "file1") + file2 := filepath.Join(tmpFolder, "file2") + os.Create(file1) + os.Create(file2) + + fi1, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi1bis, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi2, err := os.Stat(file2) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + e1 []os.FileInfo + e2 []os.FileInfo + shouldError bool + }{ + // Empty directories + { + []os.FileInfo{}, + []os.FileInfo{}, + false, + }, + // Same FileInfos + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1}, + false, + }, + // Different FileInfos but same names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1bis}, + false, + }, + // Different FileInfos, different names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi2}, + true, + }, + } + for _, elt := range cases { + err := CompareDirectoryEntries(elt.e1, elt.e2) + if elt.shouldError && err == nil { + t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) + } + if !elt.shouldError && err != nil { + t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) + } + } +} + +// FIXME make an "unhappy path" test for ListTar without "panicking" :-) +func TestListTar(t *testing.T) { + // TODO Windows: Figure out why this fails. Should be portable. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows - needs further investigation") + } + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + // Let's create a Tar file + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + reader, err := os.Open(tarFile) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + entries, err := ListTar(reader) + if err != nil { + t.Fatal(err) + } + if len(entries) != 1 && entries[0] != "src" { + t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) + } +} + +func TestRandomTmpDirPath(t *testing.T) { + path := RandomTmpDirPath("something", runtime.GOOS) + + prefix := "/tmp/something" + if runtime.GOOS == "windows" { + prefix = os.Getenv("TEMP") + `\something` + } + expectedSize := len(prefix) + 11 + + if !strings.HasPrefix(path, prefix) { + t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) + } + if len(path) != expectedSize { + t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) + } +} + +func TestConsumeWithSpeed(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 10 { + t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) + } + +} + +func TestConsumeWithSpeedWithStop(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + stopIt := make(chan bool) + + go func() { + time.Sleep(1 * time.Millisecond) + stopIt <- true + }() + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 2 { + t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) + } + +} + +func TestParseCgroupPathsEmpty(t *testing.T) { + cgroupMap := ParseCgroupPaths("") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("\n") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("something:else\nagain:here") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } +} + +func TestParseCgroupPaths(t *testing.T) { + cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") + if len(cgroupMap) != 2 { + t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) + } + if value, ok := cgroupMap["memory"]; !ok || value != "/a" { + t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) + } + if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { + t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) + } +} + +func TestChannelBufferTimeout(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + done := make(chan struct{}, 1) + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + done <- struct{}{} + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 50*time.Millisecond) + if err == nil && err.Error() != "timeout reading from channel" { + t.Fatalf("Expected an error, got %s", err) + } + <-done +} + +func TestChannelBuffer(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 200*time.Millisecond) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("Expected '%s', got '%s'", expected, string(b)) + } +} + +// FIXME doesn't work +// func TestRunAtDifferentDate(t *testing.T) { +// var date string + +// // Layout for date. MMDDhhmmYYYY +// const timeLayout = "20060102" +// expectedDate := "20100201" +// theDate, err := time.Parse(timeLayout, expectedDate) +// if err != nil { +// t.Fatal(err) +// } + +// RunAtDifferentDate(theDate, func() { +// cmd := exec.Command("date", "+%Y%M%d") +// out, err := cmd.Output() +// if err != nil { +// t.Fatal(err) +// } +// date = string(out) +// }) +// } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 0000000..3d737b3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go new file mode 100644 index 0000000..41098fa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go @@ -0,0 +1,75 @@ +package ioutils + +import ( + "bytes" + "testing" +) + +func TestFixedBufferWrite(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + n, err := buf.Write([]byte("hello")) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes written, got %d", n) + } + + if string(buf.buf[:5]) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5])) + } + + n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) + } +} + +func TestFixedBufferRead(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + if _, err := buf.Write([]byte("hello world")); err != nil { + t.Fatal(err) + } + + b := make([]byte, 5) + n, err := buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String()) + } + + if string(b) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(b)) + } + + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d", n) + } + + if string(b) != " worl" { + t.Fatalf("expected \" worl\", got %s", string(b)) + } + + b = b[:1] + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 1 { + t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String()) + } + + if string(b) != "d" { + t.Fatalf("expected \"d\", got %s", string(b)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000..72a04f3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go new file mode 100644 index 0000000..300fb5f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go @@ -0,0 +1,159 @@ +package ioutils + +import ( + "crypto/sha1" + "encoding/hex" + "math/rand" + "testing" + "time" +) + +func TestBytesPipeRead(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + rd := make([]byte, 4) + n, err := buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "1234" { + t.Fatalf("Read %s, but must be %s", rd, "1234") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "5678" { + t.Fatalf("Read %s, but must be %s", rd, "5679") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) + } + if string(rd[:n]) != "90" { + t.Fatalf("Read %s, but must be %s", rd, "90") + } +} + +func TestBytesPipeWrite(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + if buf.buf[0].String() != "1234567890" { + t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") + } +} + +// Write and read in different speeds/chunk sizes and check valid data is read. +func TestBytesPipeWriteRandomChunks(t *testing.T) { + cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ + {100, 10, 1}, + {1000, 10, 5}, + {1000, 100, 0}, + {1000, 5, 6}, + {10000, 50, 25}, + } + + testMessage := []byte("this is a random string for testing") + // random slice sizes to read and write + writeChunks := []int{25, 35, 15, 20} + readChunks := []int{5, 45, 20, 25} + + for _, c := range cases { + // first pass: write directly to hash + hash := sha1.New() + for i := 0; i < c.iterations*c.writesPerLoop; i++ { + if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { + t.Fatal(err) + } + } + expected := hex.EncodeToString(hash.Sum(nil)) + + // write/read through buffer + buf := NewBytesPipe() + hash.Reset() + + done := make(chan struct{}) + + go func() { + // random delay before read starts + <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) + for i := 0; ; i++ { + p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) + n, _ := buf.Read(p) + if n == 0 { + break + } + hash.Write(p[:n]) + } + + close(done) + }() + + for i := 0; i < c.iterations; i++ { + for w := 0; w < c.writesPerLoop; w++ { + buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) + } + } + buf.Close() + <-done + + actual := hex.EncodeToString(hash.Sum(nil)) + + if expected != actual { + t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) + } + + } +} + +func BenchmarkBytesPipeWrite(b *testing.B) { + testData := []byte("pretty short line, because why not?") + for i := 0; i < b.N; i++ { + readBuf := make([]byte, 1024) + buf := NewBytesPipe() + go func() { + var err error + for err == nil { + _, err = buf.Read(readBuf) + } + }() + for j := 0; j < 1000; j++ { + buf.Write(testData) + } + buf.Close() + } +} + +func BenchmarkBytesPipeRead(b *testing.B) { + rd := make([]byte, 512) + for i := 0; i < b.N; i++ { + b.StopTimer() + buf := NewBytesPipe() + for j := 0; j < 500; j++ { + buf.Write(make([]byte, 1024)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + if n, _ := buf.Read(rd); n != 512 { + b.Fatalf("Wrong number of bytes: %d", n) + } + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go new file mode 100644 index 0000000..0b04b0b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go new file mode 100644 index 0000000..8968863 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 0000000..a56c462 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go new file mode 100644 index 0000000..c4d1419 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go @@ -0,0 +1,132 @@ +package ioutils + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +var ( + testMode os.FileMode = 0640 +) + +func init() { + // Windows does not support full Linux file mode + if runtime.GOOS == "windows" { + testMode = 0666 + } +} + +func TestAtomicWriteToFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writers-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + expected := []byte("barbaz") + if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if bytes.Compare(actual, expected) != 0 { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } +} + +func TestAtomicWriteSetCommit(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + targetDir := filepath.Join(tmpDir, "target") + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } + + if err := ws.Commit(targetDir); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if bytes.Compare(actual, expected) != 0 { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } + +} + +func TestAtomicWriteSetCancel(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if err := ws.Cancel(); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(tmpDir, "target", "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } else if !os.IsNotExist(err) { + t.Fatalf("Unexpected error reading file: %s", err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go new file mode 100644 index 0000000..d7b9748 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go @@ -0,0 +1,223 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + + var offsetTo int64 + + for _, rdr := range r.readers { + size, err := getReadSeekerSize(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo+size > offset { + return rdr, offset - offsetTo, nil + } + if rdr == r.readers[len(r.readers)-1] { + return rdr, offsetTo + offset, nil + } + offsetTo += size + } + + return nil, 0, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bLen := int64(len(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bLen) + if err != nil && err != io.EOF { + return -1, err + } + bLen -= readBytes + + if bLen == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go new file mode 100644 index 0000000..65309a9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go @@ -0,0 +1,211 @@ +package ioutils + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} + +func TestMultiReadSeekerCurAfterSet(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + mid := int64(s1.Len() + s2.Len()/2) + + size, err := mr.Seek(mid, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if size != mid { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid) + } + + size, err = mr.Seek(3, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+3 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+3) + } + size, err = mr.Seek(5, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+8 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+8) + } + + size, err = mr.Seek(10, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+18 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+18) + } +} + +func TestMultiReadSeekerSmallReads(t *testing.T) { + readers := []io.ReadSeeker{} + for i := 0; i < 10; i++ { + integer := make([]byte, 4, 4) + binary.BigEndian.PutUint32(integer, uint32(i)) + readers = append(readers, bytes.NewReader(integer)) + } + + reader := MultiReadSeeker(readers...) + for i := 0; i < 10; i++ { + var integer uint32 + if err := binary.Read(reader, binary.BigEndian, &integer); err != nil { + t.Fatalf("Read from NewMultiReadSeeker failed: %v", err) + } + if uint32(i) != integer { + t.Fatalf("Read wrong value from NewMultiReadSeeker: %d != %d", i, integer) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 0000000..63f3c07 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go new file mode 100644 index 0000000..9abc105 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -0,0 +1,94 @@ +package ioutils + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type perpetualReader struct{} + +func (p *perpetualReader) Read(buf []byte) (n int, err error) { + for i := 0; i != len(buf); i++ { + buf[i] = 'a' + } + return len(buf), nil +} + +func TestCancelReadCloser(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) + for { + var buf [128]byte + _, err := cancelReadCloser.Read(buf[:]) + if err == context.DeadlineExceeded { + break + } else if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000..1539ad2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000..c258e5f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000..52a4901 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 0000000..ccc7f9c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go new file mode 100644 index 0000000..564b1cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go new file mode 100644 index 0000000..4734c31 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go @@ -0,0 +1,42 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +// JSONLog represents a log message, typically a single entry from a given log stream. +// JSONLogs can be easily serialized to and from JSON and support custom formatting. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Format returns the log formatted according to format +// If format is nil, returns the log message +// If format is json, returns the log marshaled in json format +// By default, returns the log with the log time formatted according to format. +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +// Reset resets the log to nil. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 0000000..83ce684 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,178 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = FastTimeMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjsonWriteJSONString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// MarshalJSON marshals the JSONLog. +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = FastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go new file mode 100644 index 0000000..3edb271 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go @@ -0,0 +1,34 @@ +package jsonlog + +import ( + "regexp" + "testing" +) + +func TestJSONLogMarshalJSON(t *testing.T) { + logs := map[*JSONLog]string{ + &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, + &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, + &JSONLog{}: `^{\"time\":\".{20,}\"}$`, + // These ones are a little weird + &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, + } + for jsonLog, expression := range logs { + data, err := jsonLog.MarshalJSON() + if err != nil { + t.Fatal(err) + } + res := string(data) + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 0000000..df522c0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,122 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "unicode/utf8" +) + +// JSONLogs is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjsonWriteJSONBytesAsString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go new file mode 100644 index 0000000..6d6ad21 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go @@ -0,0 +1,39 @@ +package jsonlog + +import ( + "bytes" + "regexp" + "testing" +) + +func TestJSONLogsMarshalJSONBuf(t *testing.T) { + logs := map[*JSONLogs]string{ + &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, + &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Created: "time"}: `^{\"time\":time}$`, + &JSONLogs{}: `^{\"time\":}$`, + // These ones are a little weird + &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, + &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, + &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, + // with raw attributes + &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, + } + for jsonLog, expression := range logs { + var buf bytes.Buffer + if err := jsonLog.MarshalJSONBuf(&buf); err != nil { + t.Fatal(err) + } + res := buf.String() + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go new file mode 100644 index 0000000..2117338 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go @@ -0,0 +1,27 @@ +// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. +package jsonlog + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go new file mode 100644 index 0000000..02d0302 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go @@ -0,0 +1,47 @@ +package jsonlog + +import ( + "testing" + "time" +) + +// Testing to ensure 'year' fields is between 0 and 9999 +func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + json, err := FastTimeMarshalJSON(aTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + json, err = FastTimeMarshalJSON(anotherTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + +} + +func TestFastTimeMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected := "\"2015-05-29T11:01:02.000000003Z\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } + + location, err := time.LoadLocation("Europe/Paris") + if err != nil { + t.Fatal(err) + } + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected = "\"2015-05-29T11:01:02.000000003+02:00\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 0000000..5481433 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,225 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" +) + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + current := units.HumanSize(float64(p.Current)) + if p.Total <= 0 { + return fmt.Sprintf("%8v", current) + } + total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + // [2K = erase entire current line + fmt.Fprintf(out, "%c[2K\r", 27) + endl = "\r" + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = len(ids) - line + if isTerminal && diff > 0 { + fmt.Fprintf(out, "%c[%dA", 27, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal && diff > 0 { + fmt.Fprintf(out, "%c[%dB", 27, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go new file mode 100644 index 0000000..c6c5b0e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go @@ -0,0 +1,245 @@ +package jsonmessage + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + termsz, err := term.GetWinsize(0) + if err != nil { + // we can safely ignore the err here + termsz = nil + } + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1 B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expectedStart := "[==========> ] 20 B/100 B" + if termsz != nil && termsz.Width <= 110 { + expectedStart = " 20 B/100 B" + } + jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} + // Just look at the start of the string + // (the remaining time is really hard to test -_-) + if jp3.String()[:len(expectedStart)] != expectedStart { + t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + } + + expected = "[=========================> ] 50 B/100 B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 B/100 B" + } + jp4 := JSONProgress{Current: 50, Total: 100} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } + + // this number can't be negative gh#7136 + expected = "[==================================================>] 50 B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 B" + } + jp5 := JSONProgress{Current: 50, Total: 40} + if jp5.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp5.String()) + } +} + +func TestJSONMessageDisplay(t *testing.T) { + now := time.Now() + messages := map[JSONMessage][]string{ + // Empty + JSONMessage{}: {"\n", "\n"}, + // Status + JSONMessage{ + Status: "status", + }: { + "status\n", + "status\n", + }, + // General + JSONMessage{ + Time: now.Unix(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with nano precision time + JSONMessage{ + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with both times Nano is preferred + JSONMessage{ + Time: now.Unix(), + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // Stream over status + JSONMessage{ + Status: "status", + Stream: "stream", + }: { + "stream", + "stream", + }, + // With progress message + JSONMessage{ + Status: "status", + ProgressMessage: "progressMessage", + }: { + "status progressMessage", + "status progressMessage", + }, + // With progress, stream empty + JSONMessage{ + Status: "status", + Stream: "", + Progress: &JSONProgress{Current: 1}, + }: { + "", + fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), + }, + } + + // The tests :) + for jsonMessage, expectedMessages := range messages { + // Without terminal + data := bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, false); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) + } + // With terminal + data = bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, true); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) + } + } +} + +// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. +func TestJSONMessageDisplayWithJSONError(t *testing.T) { + data := bytes.NewBuffer([]byte{}) + jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} + + err := jsonMessage.Display(data, true) + if err == nil || err.Error() != "Can't find it" { + t.Fatalf("Expected a JSONError 404, got [%v]", err) + } + + jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} + err = jsonMessage.Display(data, true) + if err == nil || err.Error() != "Authentication is required." { + t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) + } +} + +func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { + var ( + inFd uintptr + ) + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader("This is not a 'valid' JSON []") + inFd, _ = term.GetFdInfo(reader) + + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { + t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) + } +} + +func TestDisplayJSONMessagesStream(t *testing.T) { + var ( + inFd uintptr + ) + + messages := map[string][]string{ + // empty string + "": { + "", + ""}, + // Without progress & ID + "{ \"status\": \"status\" }": { + "status\n", + "status\n", + }, + // Without progress, with ID + "{ \"id\": \"ID\",\"status\": \"status\" }": { + "ID: status\n", + fmt.Sprintf("ID: status\n"), + }, + // With progress + "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { + "ID: status ProgressMessage", + fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 1, 27, 1), + }, + // With progressDetail + "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { + "", // progressbar is disabled in non-terminal + fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 1, 27, 27, 1), + }, + } + for jsonMessage, expectedMessages := range messages { + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader(jsonMessage) + inFd, _ = term.GetFdInfo(reader) + + // Without terminal + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) + } + + // With terminal + data = bytes.NewBuffer([]byte{}) + reader = strings.NewReader(jsonMessage) + if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) + } + } + +} diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go new file mode 100644 index 0000000..ff833e3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go @@ -0,0 +1,31 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go new file mode 100644 index 0000000..1bcae7a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go @@ -0,0 +1,94 @@ +// +build !windows,!solaris + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "fd": + fds, err := listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, fds...) + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("invalid protocol format: %q", proto) + } + + return ls, nil +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("too few socket activated files passed in by systemd") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + // TODO: We shouldn't log inside a library. Remove this or error out. + logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go new file mode 100644 index 0000000..5b5a470 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go @@ -0,0 +1,54 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + + case "npipe": + // allow Administrators and SYSTEM, plus whatever additional users or groups were specified + sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" + if socketGroup != "" { + for _, g := range strings.Split(socketGroup, ",") { + sid, err := winio.LookupSidByName(g) + if err != nil { + return nil, err + } + sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) + } + } + c := winio.PipeConfig{ + SecurityDescriptor: sddl, + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := winio.ListenPipe(addr, &c) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") + } + + return ls, nil +} diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md new file mode 100644 index 0000000..e84a815 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modfying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go new file mode 100644 index 0000000..0b22ddf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/locker/locker_test.go b/vendor/github.com/docker/docker/pkg/locker/locker_test.go new file mode 100644 index 0000000..5a297dd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker_test.go @@ -0,0 +1,124 @@ +package locker + +import ( + "sync" + "testing" + "time" +) + +func TestLockCounter(t *testing.T) { + l := &lockCtr{} + l.inc() + + if l.waiters != 1 { + t.Fatal("counter inc failed") + } + + l.dec() + if l.waiters != 0 { + t.Fatal("counter dec failed") + } +} + +func TestLockerLock(t *testing.T) { + l := New() + l.Lock("test") + ctr := l.locks["test"] + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) + } + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + chWaiting := make(chan struct{}) + go func() { + for range time.Tick(1 * time.Millisecond) { + if ctr.count() == 1 { + close(chWaiting) + break + } + } + }() + + select { + case <-chWaiting: + case <-time.After(3 * time.Second): + t.Fatal("timed out waiting for lock waiters to be incremented") + } + + select { + case <-chDone: + t.Fatal("lock should not have returned while it was still held") + default: + } + + if err := l.Unlock("test"); err != nil { + t.Fatal(err) + } + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should have completed") + } + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) + } +} + +func TestLockerUnlock(t *testing.T) { + l := New() + + l.Lock("test") + l.Unlock("test") + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should not be blocked") + } +} + +func TestLockerConcurrency(t *testing.T) { + l := New() + + var wg sync.WaitGroup + for i := 0; i <= 10000; i++ { + wg.Add(1) + go func() { + l.Lock("test") + // if there is a concurrency issue, will very likely panic here + l.Unlock("test") + wg.Done() + }() + } + + chDone := make(chan struct{}) + go func() { + wg.Wait() + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for locks to complete") + } + + // Since everything has unlocked this should not exist anymore + if ctr, exists := l.locks["test"]; exists { + t.Fatalf("lock should not exist: %v", ctr) + } +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 0000000..9b15bff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go new file mode 100644 index 0000000..01865ef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go @@ -0,0 +1,22 @@ +package longpath + +import ( + "strings" + "testing" +) + +func TestStandardLongPath(t *testing.T) { + c := `C:\simple\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\C:\simple\path`) { + t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) + } +} + +func TestUNCLongPath(t *testing.T) { + c := `\\server\share\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { + t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) + } +} diff --git a/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go new file mode 100644 index 0000000..971f45e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go @@ -0,0 +1,137 @@ +// +build linux + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/docker/docker/pkg/loopback/ioctl.go b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go new file mode 100644 index 0000000..0714eb5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go new file mode 100644 index 0000000..e1100ce --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/docker/docker/pkg/loopback/loopback.go b/vendor/github.com/docker/docker/pkg/loopback/loopback.go new file mode 100644 index 0000000..bc04792 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 0000000..607dbed --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 0000000..f166cb2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 0000000..dc696dc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,85 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 0000000..5564f7b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,30 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 0000000..66ac4bf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,74 @@ +package mount + +import ( + "time" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount will unmount the target filesystem, so long as it is mounted. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go new file mode 100644 index 0000000..253aff3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go @@ -0,0 +1,162 @@ +// +build !windows,!solaris + +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} + +func TestMergeTmpfsOptions(t *testing.T) { + options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"} + expected := []string{"atime", "rw", "size=1024k", "slave"} + merged, err := MergeTmpfsOptions(options) + if err != nil { + t.Fatal(err) + } + if len(expected) != len(merged) { + t.Fatalf("Expected %s got %s", expected, merged) + } + for index := range merged { + if merged[index] != expected[index] { + t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged) + } + } + + options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"} + _, err = MergeTmpfsOptions(options) + if err == nil { + t.Fatal("Expected error got nil") + } +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 0000000..bb870e6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 0000000..dd4280c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go new file mode 100644 index 0000000..c684aa8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 0000000..a2a3bb4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 0000000..e3fc353 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 0000000..4f32edc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 0000000..be69fee --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 0000000..bd100e1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,476 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := Info{ + ID: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go new file mode 100644 index 0000000..ad9ab57 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 0000000..7fbcf19 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go new file mode 100644 index 0000000..dab8a37 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000..8ceec84 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 0000000..c183794 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propagated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propagate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is available in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 0000000..09f6b03 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go new file mode 100644 index 0000000..18a939b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/namesgenerator" +) + +func main() { + fmt.Println(namesgenerator.GetRandomName(0)) +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 0000000..cfb8157 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,590 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "blissful", + "boring", + "brave", + "clever", + "cocky", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "gallant", + "gifted", + "goofy", + "gracious", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "jolly", + "jovial", + "keen", + "kickass", + "kind", + "laughing", + "loving", + "lucid", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go new file mode 100644 index 0000000..d1a9497 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go @@ -0,0 +1,27 @@ +package namesgenerator + +import ( + "strings" + "testing" +) + +func TestNameFormat(t *testing.T) { + name := GetRandomName(0) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name contains numbers!") + } +} + +func TestNameRetries(t *testing.T) { + name := GetRandomName(1) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if !strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name doesn't contain a number") + } + +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 0000000..7738fc7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 0000000..71f205b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 0000000..744d5e1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/Sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go new file mode 100644 index 0000000..dc8c0e3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { + var ( + a *VersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +// TestParseRelease tests the ParseRelease() function +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +// TestCompareKernelVersion tests the CompareKernelVersion() function +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 0000000..80fab8f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 0000000..bb9b326 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthrough for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000..49370bd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 0000000..1da3f23 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 0000000..e04a349 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go new file mode 100644 index 0000000..d08ad14 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package operatingsystem + +/* +#include +*/ +import "C" + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var etcOsRelease = "/etc/release" + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("\n")); i >= 0 { + b = bytes.Trim(b[:i], " ") + return string(b), nil + } + return "", errors.New("release not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + if C.getzoneid() != 0 { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go new file mode 100644 index 0000000..bc91c3c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go @@ -0,0 +1,25 @@ +// +build freebsd darwin + +package operatingsystem + +import ( + "errors" + "os/exec" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + cmd := exec.Command("uname", "-s") + osName, err := cmd.Output() + if err != nil { + return "", err + } + return string(osName), nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD and Darwin, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection for freeBSD + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go new file mode 100644 index 0000000..e7120c6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go @@ -0,0 +1,247 @@ +// +build linux freebsd + +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var backup = etcOsRelease + + invalids := []struct { + content string + errorExpected string + }{ + { + `PRETTY_NAME=Source Mage GNU/Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", + }, + { + `PRETTY_NAME="Ubuntu Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME=Ubuntu' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", + }, + } + + valids := []struct { + content string + expected string + }{ + { + `NAME="Ubuntu" +PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`, + "Gentoo/Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Ubuntu 14.04 LTS", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME='Ubuntu 14.04 LTS'`, + "Ubuntu 14.04 LTS", + }, + { + `PRETTY_NAME=Source +NAME="Source Mage"`, + "Source", + }, + { + `PRETTY_NAME=Source +PRETTY_NAME="Source Mage"`, + "Source Mage", + }, + } + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for _, elt := range invalids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err == nil || err.Error() != elt.errorExpected { + t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) + } + } + + for _, elt := range valids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != elt.expected { + t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope +8:net_cls,net_prio:/ +7:cpuset:/ +6:freezer:/ +5:devices:/init.scope +4:blkio:/init.scope +3:cpu,cpuacct:/init.scope +2:perf_event:/ +1:name=systemd:/init.scope +`) + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} + +func TestOsReleaseFallback(t *testing.T) { + var backup = etcOsRelease + var altBackup = altOsRelease + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + altOsRelease = filepath.Join(dir, "altOsRelease") + + defer func() { + os.Remove(dir) + etcOsRelease = backup + altOsRelease = altBackup + }() + content := `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +` + if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != "Gentoo/Linux" { + t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 0000000..3c86b6a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,49 @@ +package operatingsystem + +import ( + "syscall" + "unsafe" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + + var h syscall.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return ret, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = syscall.UTF16ToString(buf[:]) + + return ret, nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 0000000..acc8971 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go new file mode 100644 index 0000000..7f19e90 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -0,0 +1,70 @@ +package parsers + +import ( + "reflect" + "testing" +) + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParseUintList(t *testing.T) { + valids := map[string]map[int]bool{ + "": {}, + "7": {7: true}, + "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, + "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, + "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, + "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, + "03,1-3": {1: true, 2: true, 3: true}, + "3,2,1": {1: true, 2: true, 3: true}, + "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, + } + for k, v := range valids { + out, err := ParseUintList(k) + if err != nil { + t.Fatalf("Expected not to fail, got %v", err) + } + if !reflect.DeepEqual(out, v) { + t.Fatalf("Expected %v, got %v", v, out) + } + } + + invalids := []string{ + "this", + "1--", + "1-10,,10", + "10-1", + "-1", + "-1,0", + } + for _, v := range invalids { + if out, err := ParseUintList(v); err == nil { + t.Fatalf("Expected failure with %s but got %v", v, out) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go new file mode 100644 index 0000000..d832fea --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go @@ -0,0 +1,56 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if processExists(pid) { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + // Note MkdirAll returns nil if a directory already exists + if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go new file mode 100644 index 0000000..5c1cd7a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go @@ -0,0 +1,18 @@ +// +build darwin + +package pidfile + +import ( + "syscall" +) + +func processExists(pid int) bool { + // OS X does not have a proc filesystem. + // Use kill -0 pid to judge if the process exists. + err := syscall.Kill(pid, 0) + if err != nil { + return false + } + + return true +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go new file mode 100644 index 0000000..73e8af7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go @@ -0,0 +1,38 @@ +package pidfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestNewAndRemove(t *testing.T) { + dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") + if err != nil { + t.Fatal("Could not create test directory") + } + + path := filepath.Join(dir, "testfile") + file, err := New(path) + if err != nil { + t.Fatal("Could not create test file", err) + } + + _, err = New(path) + if err == nil { + t.Fatal("Test file creation not blocked") + } + + if err := file.Remove(); err != nil { + t.Fatal("Could not delete created test file") + } +} + +func TestRemoveInvalidPath(t *testing.T) { + file := PIDFile{path: filepath.Join("foo", "bar")} + + if err := file.Remove(); err == nil { + t.Fatal("Non-existing file doesn't give an error on delete") + } +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go new file mode 100644 index 0000000..1bf5221 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!darwin + +package pidfile + +import ( + "os" + "path/filepath" + "strconv" +) + +func processExists(pid int) bool { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go new file mode 100644 index 0000000..ae489c6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go @@ -0,0 +1,23 @@ +package pidfile + +import "syscall" + +const ( + processQueryLimitedInformation = 0x1000 + + stillActive = 259 +) + +func processExists(pid int) bool { + h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) + if err != nil { + return false + } + var c uint32 + err = syscall.GetExitCodeProcess(h, &c) + syscall.Close(h) + if err != nil { + return c == stillActive + } + return true +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go new file mode 100644 index 0000000..2cdc2c5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "syscall" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &syscall.Utsname{} + if err := syscall.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go new file mode 100644 index 0000000..45bbcf1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go @@ -0,0 +1,20 @@ +// +build freebsd solaris darwin + +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "os/exec" + "strings" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("/usr/bin/uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(machine)), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go new file mode 100644 index 0000000..c5f684d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go @@ -0,0 +1,60 @@ +package platform + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") +) + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx +type systeminfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// Constants +const ( + ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 + ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 + ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL + ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + switch sysinfo.wProcessorArchitecture { + case ProcessorArchitecture64, ProcessorArchitectureIA64: + return "x86_64", nil + case ProcessorArchitecture32: + return "i686", nil + case ProcessorArchitectureArm: + return "arm", nil + default: + return "", fmt.Errorf("Unknown processor architecture") + } +} + +// NumProcs returns the number of processors on the system +func NumProcs() uint32 { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + return sysinfo.dwNumberOfProcessors +} diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go new file mode 100644 index 0000000..e4b0312 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could not read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go new file mode 100644 index 0000000..5dcbadf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 +// see golang's sources src/syscall/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go new file mode 100644 index 0000000..c9875cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x +// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go new file mode 100644 index 0000000..dde5f66 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go @@ -0,0 +1,35 @@ +package plugingetter + +import "github.com/docker/docker/pkg/plugins" + +const ( + // LOOKUP doesn't update RefCount + LOOKUP = 0 + // ACQUIRE increments RefCount + ACQUIRE = 1 + // RELEASE decrements RefCount + RELEASE = -1 +) + +// CompatPlugin is a abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Client() *plugins.Client + Name() string + BasePath() string + IsV1() bool +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 0000000..e8e730e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,205 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeoutInSecs), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: time.Duration(timeoutInSecs) * time.Second, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + var retries int + start := time.Now() + + for { + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client_test.go b/vendor/github.com/docker/docker/pkg/plugins/client_test.go new file mode 100644 index 0000000..9faad86 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/client_test.go @@ -0,0 +1,134 @@ +package plugins + +import ( + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setupRemotePluginServer() string { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + return server.URL +} + +func teardownRemotePluginServer() { + if server != nil { + server.Close() + } +} + +func TestFailedConnection(t *testing.T) { + c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true}) + _, err := c.callWithRetry("Service.Method", nil, false) + if err == nil { + t.Fatal("Unexpected successful connection") + } +} + +func TestEchoInputOutput(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(output, m) { + t.Fatalf("Expected %v, was %v\n", m, output) + } + err = c.Call("Test.Echo", nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestBackoff(t *testing.T) { + cases := []struct { + retries int + expTimeOff time.Duration + }{ + {0, time.Duration(1)}, + {1, time.Duration(2)}, + {2, time.Duration(4)}, + {4, time.Duration(16)}, + {6, time.Duration(30)}, + {10, time.Duration(30)}, + } + + for _, c := range cases { + s := c.expTimeOff * time.Second + if d := backoff(c.retries); d != s { + t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) + } + } +} + +func TestAbortRetry(t *testing.T) { + cases := []struct { + timeOff time.Duration + expAbort bool + }{ + {time.Duration(1), false}, + {time.Duration(2), false}, + {time.Duration(10), false}, + {time.Duration(30), true}, + {time.Duration(40), true}, + } + + for _, c := range cases { + s := c.timeOff * time.Second + if a := abort(time.Now(), s); a != c.expAbort { + t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) + } + } +} + +func TestClientScheme(t *testing.T) { + cases := map[string]string{ + "tcp://127.0.0.1:8080": "http", + "unix:///usr/local/plugins/foo": "http", + "http://127.0.0.1:8080": "http", + "https://127.0.0.1:8080": "https", + } + + for addr, scheme := range cases { + u, err := url.Parse(addr) + if err != nil { + t.Fatal(err) + } + s := httpScheme(u) + + if s != scheme { + t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 0000000..e99581c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,131 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go new file mode 100644 index 0000000..03f9d00 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go @@ -0,0 +1,152 @@ +package plugins + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Setup(t *testing.T) (string, func()) { + tmpdir, err := ioutil.TempDir("", "docker-test") + if err != nil { + t.Fatal(err) + } + backup := socketsPath + socketsPath = tmpdir + specsPaths = []string{tmpdir} + + return tmpdir, func() { + socketsPath = backup + os.RemoveAll(tmpdir) + } +} + +func TestFileSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []struct { + path string + name string + addr string + fail bool + }{ + // TODO Windows: Factor out the unix:// variants. + {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(c.name) + if c.fail && err == nil { + continue + } + + if err != nil { + t.Fatal(err) + } + + if p.name != c.name { + t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.name) + } + + if p.Addr != c.addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) + } + + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + } +} + +func TestFileJSONSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { + t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) + } + + if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { + t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) + } + + if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { + t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) + } +} + +func TestFileJSONSpecPluginWithoutTLSConfig(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig != nil { + t.Fatalf("Expected plugin TLSConfig nil, got %v\n", plugin.TLSConfig) + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go new file mode 100644 index 0000000..693a47e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go new file mode 100644 index 0000000..3e2d506 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go @@ -0,0 +1,61 @@ +// +build !windows + +package plugins + +import ( + "fmt" + "net" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestLocalSocket(t *testing.T) { + // TODO Windows: Enable a similar version for Windows named pipes + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []string{ + filepath.Join(tmpdir, "echo.sock"), + filepath.Join(tmpdir, "echo", "echo.sock"), + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { + t.Fatal(err) + } + + l, err := net.Listen("unix", c) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + + pp, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(p, pp) { + t.Fatalf("Expected %v, was %v\n", p, pp) + } + + if p.name != "echo" { + t.Fatalf("Expected plugin `echo`, got %s\n", p.name) + } + + addr := fmt.Sprintf("unix://%s", c) + if p.Addr != addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) + } + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + l.Close() + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go new file mode 100644 index 0000000..d7c1fe4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 0000000..7988471 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go new file mode 100644 index 0000000..b19c0d5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go @@ -0,0 +1,44 @@ +package plugins + +import ( + "errors" + "path/filepath" + "runtime" + "sync" + "testing" + "time" +) + +// regression test for deadlock in handlers +func TestPluginAddHandler(t *testing.T) { + // make a plugin which is pre-activated + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{"bananas"}} + storage.plugins["qwerty"] = p + + testActive(t, p) + Handle("bananas", func(_ string, _ *Client) {}) + testActive(t, p) +} + +func TestPluginWaitBadPlugin(t *testing.T) { + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.activateErr = errors.New("some junk happened") + testActive(t, p) +} + +func testActive(t *testing.T, p *Plugin) { + done := make(chan struct{}) + go func() { + p.waitActive() + close(done) + }() + + select { + case <-time.After(100 * time.Millisecond): + _, f, l, _ := runtime.Caller(1) + t.Fatalf("%s:%d: deadlock in waitActive", filepath.Base(f), l) + case <-done: + } + +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md new file mode 100644 index 0000000..0418a3e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md @@ -0,0 +1,58 @@ +Plugin RPC Generator +==================== + +Generates go code from a Go interface definition for proxying between the plugin +API and the subsystem being extended. + +## Usage + +Given an interface definition: + +```go +type volumeDriver interface { + Create(name string, opts opts) (err error) + Remove(name string) (err error) + Path(name string) (mountpoint string, err error) + Mount(name string) (mountpoint string, err error) + Unmount(name string) (err error) +} +``` + +**Note**: All function options and return values must be named in the definition. + +Run the generator: + +```bash +$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go +``` + +Where: +- `--type` is the name of the interface to use +- `--name` is the subsystem that the plugin "Implements" +- `-i` is the input file containing the interface definition +- `-o` is the output file where the the generated code should go + +**Note**: The generated code will use the same package name as the one defined in the input file + +Optionally, you can skip functions on the interface that should not be +implemented in the generated proxy code by passing in the function name to `--skip`. +This flag can be specified multiple times. + +You can also add build tags that should be prepended to the generated code by +supplying `--tag`. This flag can be specified multiple times. + +## Known issues + +## go-generate + +You can also use this with go-generate, which is pretty awesome. +To do so, place the code at the top of the file which contains the interface +definition (i.e., the input file): + +```go +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver +``` + +Then cd to the package dir and run `go generate` + +**Note**: the `pluginrpc-gen` binary must be within your `$PATH` diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go new file mode 100644 index 0000000..5695dcc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -0,0 +1,89 @@ +package foo + +import ( + "fmt" + + aliasedio "io" + + "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" +) + +var ( + errFakeImport = fmt.Errorf("just to import fmt for imports tests") +) + +type wobble struct { + Some string + Val string + Inception *wobble +} + +// Fooer is an empty interface used for tests. +type Fooer interface{} + +// Fooer2 is an interface used for tests. +type Fooer2 interface { + Foo() +} + +// Fooer3 is an interface used for tests. +type Fooer3 interface { + Foo() + Bar(a string) + Baz(a string) (err error) + Qux(a, b string) (val string, err error) + Wobble() (w *wobble) + Wiggle() (w wobble) + WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) +} + +// Fooer4 is an interface used for tests. +type Fooer4 interface { + Foo() error +} + +// Bar is an interface used for tests. +type Bar interface { + Boo(a string, b string) (s string, err error) +} + +// Fooer5 is an interface used for tests. +type Fooer5 interface { + Foo() + Bar +} + +// Fooer6 is an interface used for tests. +type Fooer6 interface { + Foo(a otherfixture.Spaceship) +} + +// Fooer7 is an interface used for tests. +type Fooer7 interface { + Foo(a *otherfixture.Spaceship) +} + +// Fooer8 is an interface used for tests. +type Fooer8 interface { + Foo(a map[string]otherfixture.Spaceship) +} + +// Fooer9 is an interface used for tests. +type Fooer9 interface { + Foo(a map[string]*otherfixture.Spaceship) +} + +// Fooer10 is an interface used for tests. +type Fooer10 interface { + Foo(a []otherfixture.Spaceship) +} + +// Fooer11 is an interface used for tests. +type Fooer11 interface { + Foo(a []*otherfixture.Spaceship) +} + +// Fooer12 is an interface used for tests. +type Fooer12 interface { + Foo(a aliasedio.Reader) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go new file mode 100644 index 0000000..1937d17 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go @@ -0,0 +1,4 @@ +package otherfixture + +// Spaceship is a fixture for tests +type Spaceship struct{} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go new file mode 100644 index 0000000..e77a7d4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "unicode" + "unicode/utf8" +) + +type stringSet struct { + values map[string]struct{} +} + +func (s stringSet) String() string { + return "" +} + +func (s stringSet) Set(value string) error { + s.values[value] = struct{}{} + return nil +} +func (s stringSet) GetValues() map[string]struct{} { + return s.values +} + +var ( + typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") + rpcName = flag.String("name", *typeName, "RPC name, set if different from type") + inputFile = flag.String("i", "", "input file path") + outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") + + skipFuncs map[string]struct{} + flSkipFuncs = stringSet{make(map[string]struct{})} + + flBuildTags = stringSet{make(map[string]struct{})} +) + +func errorOut(msg string, err error) { + if err == nil { + return + } + fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) + os.Exit(1) +} + +func checkFlags() error { + if *outputFile == "" { + return fmt.Errorf("missing required flag `-o`") + } + if *inputFile == "" { + return fmt.Errorf("missing required flag `-i`") + } + return nil +} + +func main() { + flag.Var(flSkipFuncs, "skip", "skip parsing for function") + flag.Var(flBuildTags, "tag", "build tags to add to generated files") + flag.Parse() + skipFuncs = flSkipFuncs.GetValues() + + errorOut("error", checkFlags()) + + pkg, err := Parse(*inputFile, *typeName) + errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) + + var analysis = struct { + InterfaceType string + RPCName string + BuildTags map[string]struct{} + *ParsedPkg + }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} + var buf bytes.Buffer + + errorOut("parser error", generatedTempl.Execute(&buf, analysis)) + src, err := format.Source(buf.Bytes()) + errorOut("error formatting generated source:\n"+buf.String(), err) + errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) +} + +func toLower(s string) string { + if s == "" { + return "" + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go new file mode 100644 index 0000000..6c547e1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go @@ -0,0 +1,263 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "path" + "reflect" + "strings" +) + +var errBadReturn = errors.New("found return arg with no name: all args must be named") + +type errUnexpectedType struct { + expected string + actual interface{} +} + +func (e errUnexpectedType) Error() string { + return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) +} + +// ParsedPkg holds information about a package that has been parsed, +// its name and the list of functions. +type ParsedPkg struct { + Name string + Functions []function + Imports []importSpec +} + +type function struct { + Name string + Args []arg + Returns []arg + Doc string +} + +type arg struct { + Name string + ArgType string + PackageSelector string +} + +func (a *arg) String() string { + return a.Name + " " + a.ArgType +} + +type importSpec struct { + Name string + Path string +} + +func (s *importSpec) String() string { + var ss string + if len(s.Name) != 0 { + ss += s.Name + } + ss += s.Path + return ss +} + +// Parse parses the given file for an interface definition with the given name. +func Parse(filePath string, objName string) (*ParsedPkg, error) { + fs := token.NewFileSet() + pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + p := &ParsedPkg{} + p.Name = pkg.Name.Name + obj, exists := pkg.Scope.Objects[objName] + if !exists { + return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) + } + if obj.Kind != ast.Typ { + return nil, fmt.Errorf("exected type, got %s", obj.Kind) + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} + } + + p.Functions, err = parseInterface(iface) + if err != nil { + return nil, err + } + + // figure out what imports will be needed + imports := make(map[string]importSpec) + for _, f := range p.Functions { + args := append(f.Args, f.Returns...) + for _, arg := range args { + if len(arg.PackageSelector) == 0 { + continue + } + + for _, i := range pkg.Imports { + if i.Name != nil { + if i.Name.Name != arg.PackageSelector { + continue + } + imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} + break + } + + _, name := path.Split(i.Path.Value) + splitName := strings.Split(name, "-") + if len(splitName) > 1 { + name = splitName[len(splitName)-1] + } + // import paths have quotes already added in, so need to remove them for name comparison + name = strings.TrimPrefix(name, `"`) + name = strings.TrimSuffix(name, `"`) + if name == arg.PackageSelector { + imports[i.Path.Value] = importSpec{Path: i.Path.Value} + break + } + } + } + } + + for _, spec := range imports { + p.Imports = append(p.Imports, spec) + } + + return p, nil +} + +func parseInterface(iface *ast.InterfaceType) ([]function, error) { + var functions []function + for _, field := range iface.Methods.List { + switch f := field.Type.(type) { + case *ast.FuncType: + method, err := parseFunc(field) + if err != nil { + return nil, err + } + if method == nil { + continue + } + functions = append(functions, *method) + case *ast.Ident: + spec, ok := f.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} + } + funcs, err := parseInterface(iface) + if err != nil { + fmt.Println(err) + continue + } + functions = append(functions, funcs...) + default: + return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} + } + } + return functions, nil +} + +func parseFunc(field *ast.Field) (*function, error) { + f := field.Type.(*ast.FuncType) + method := &function{Name: field.Names[0].Name} + if _, exists := skipFuncs[method.Name]; exists { + fmt.Println("skipping:", method.Name) + return nil, nil + } + if f.Params != nil { + args, err := parseArgs(f.Params.List) + if err != nil { + return nil, err + } + method.Args = args + } + if f.Results != nil { + returns, err := parseArgs(f.Results.List) + if err != nil { + return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) + } + method.Returns = returns + } + return method, nil +} + +func parseArgs(fields []*ast.Field) ([]arg, error) { + var args []arg + for _, f := range fields { + if len(f.Names) == 0 { + return nil, errBadReturn + } + for _, name := range f.Names { + p, err := parseExpr(f.Type) + if err != nil { + return nil, err + } + args = append(args, arg{name.Name, p.value, p.pkg}) + } + } + return args, nil +} + +type parsedExpr struct { + value string + pkg string +} + +func parseExpr(e ast.Expr) (parsedExpr, error) { + var parsed parsedExpr + switch i := e.(type) { + case *ast.Ident: + parsed.value += i.Name + case *ast.StarExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.value += "*" + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.SelectorExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.pkg = p.value + parsed.value += p.value + "." + parsed.value += i.Sel.Name + case *ast.MapType: + parsed.value += "map[" + p, err := parseExpr(i.Key) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.value += "]" + p, err = parseExpr(i.Value) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.ArrayType: + parsed.value += "[]" + p, err := parseExpr(i.Elt) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + default: + return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} + } + return parsed, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go new file mode 100644 index 0000000..a1b1ac9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const testFixture = "fixtures/foo.go" + +func TestParseEmptyInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 0, len(pkg.Functions)) +} + +func TestParseNonInterfaceType(t *testing.T) { + _, err := Parse(testFixture, "wobble") + if _, ok := err.(errUnexpectedType); !ok { + t.Fatal("expected type error when parsing non-interface type") + } +} + +func TestParseWithOneFunction(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer2") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 1, len(pkg.Functions)) + assertName(t, "Foo", pkg.Functions[0].Name) + assertNum(t, 0, len(pkg.Functions[0].Args)) + assertNum(t, 0, len(pkg.Functions[0].Returns)) +} + +func TestParseWithMultipleFuncs(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer3") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 7, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Bar", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + f = pkg.Functions[2] + assertName(t, "Baz", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[3] + assertName(t, "Qux", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", f.Args[0].Name) + assertName(t, "string", f.Args[0].ArgType) + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "val", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[4] + assertName(t, "Wobble", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "*wobble", arg.ArgType) + + f = pkg.Functions[5] + assertName(t, "Wiggle", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "wobble", arg.ArgType) + + f = pkg.Functions[6] + assertName(t, "WiggleWobble", f.Name) + assertNum(t, 6, len(f.Args)) + assertNum(t, 6, len(f.Returns)) + expectedArgs := [][]string{ + {"a", "[]*wobble"}, + {"b", "[]wobble"}, + {"c", "map[string]*wobble"}, + {"d", "map[*wobble]wobble"}, + {"e", "map[string][]wobble"}, + {"f", "[]*otherfixture.Spaceship"}, + } + for i, arg := range f.Args { + assertName(t, expectedArgs[i][0], arg.Name) + assertName(t, expectedArgs[i][1], arg.ArgType) + } + expectedReturns := [][]string{ + {"g", "map[*wobble]wobble"}, + {"h", "[][]*wobble"}, + {"i", "otherfixture.Spaceship"}, + {"j", "*otherfixture.Spaceship"}, + {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"}, + {"l", "[]otherfixture.Spaceship"}, + } + for i, ret := range f.Returns { + assertName(t, expectedReturns[i][0], ret.Name) + assertName(t, expectedReturns[i][1], ret.ArgType) + } +} + +func TestParseWithUnamedReturn(t *testing.T) { + _, err := Parse(testFixture, "Fooer4") + if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { + t.Fatalf("expected ErrBadReturn, got %v", err) + } +} + +func TestEmbeddedInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer5") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 2, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Boo", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[0] + assertName(t, "s", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) +} + +func TestParsedImports(t *testing.T) { + cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"} + for _, testCase := range cases { + pkg, err := Parse(testFixture, testCase) + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + importPath := strings.Split(pkg.Imports[0].Path, "/") + assertName(t, "otherfixture\"", importPath[len(importPath)-1]) + assertName(t, "", pkg.Imports[0].Name) + } +} + +func TestAliasedImports(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer12") + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + assertName(t, "aliasedio", pkg.Imports[0].Name) +} + +func assertName(t *testing.T, expected, actual string) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) + } +} + +func assertNum(t *testing.T, expected, actual int) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) + } +} + +func fatalOut(t *testing.T, msg string) { + _, file, ln, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go new file mode 100644 index 0000000..50ed929 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go @@ -0,0 +1,118 @@ +package main + +import ( + "strings" + "text/template" +) + +func printArgs(args []arg) string { + var argStr []string + for _, arg := range args { + argStr = append(argStr, arg.String()) + } + return strings.Join(argStr, ", ") +} + +func buildImports(specs []importSpec) string { + if len(specs) == 0 { + return `import "errors"` + } + imports := "import(\n" + imports += "\t\"errors\"\n" + for _, i := range specs { + imports += "\t" + i.String() + "\n" + } + imports += ")" + return imports +} + +func marshalType(t string) string { + switch t { + case "error": + // convert error types to plain strings to ensure the values are encoded/decoded properly + return "string" + default: + return t + } +} + +func isErr(t string) bool { + switch t { + case "error": + return true + default: + return false + } +} + +// Need to use this helper due to issues with go-vet +func buildTag(s string) string { + return "+build " + s +} + +var templFuncs = template.FuncMap{ + "printArgs": printArgs, + "marshalType": marshalType, + "isErr": isErr, + "lower": strings.ToLower, + "title": title, + "tag": buildTag, + "imports": buildImports, +} + +func title(s string) string { + if strings.ToLower(s) == "id" { + return "ID" + } + return strings.Title(s) +} + +var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` +// generated code - DO NOT EDIT +{{ range $k, $v := .BuildTags }} + // {{ tag $k }} {{ end }} + +package {{ .Name }} + +{{ imports .Imports }} + +type client interface{ + Call(string, interface{}, interface{}) error +} + +type {{ .InterfaceType }}Proxy struct { + client +} + +{{ range .Functions }} + type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ + {{ range .Args }} + {{ title .Name }} {{ .ArgType }} {{ end }} + } + + type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ + {{ range .Returns }} + {{ title .Name }} {{ marshalType .ArgType }} {{ end }} + } + + func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { + var( + req {{ $.InterfaceType }}Proxy{{ .Name }}Request + ret {{ $.InterfaceType }}Proxy{{ .Name }}Response + ) + {{ range .Args }} + req.{{ title .Name }} = {{ lower .Name }} {{ end }} + if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { + return + } + {{ range $r := .Returns }} + {{ if isErr .ArgType }} + if ret.{{ title .Name }} != "" { + {{ lower .Name }} = errors.New(ret.{{ title .Name }}) + } {{ end }} + {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} + + return + } +{{ end }} +`)) diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 0000000..c0059cb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,329 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go new file mode 100644 index 0000000..9c5a0b5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go @@ -0,0 +1,7 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For v1 plugins, this always returns the host's root directory. +func (p *Plugin) BasePath() string { + return "/" +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go new file mode 100644 index 0000000..3c8d8fe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -0,0 +1,8 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Windows v1 plugins, this returns an empty string, since the plugin is already aware +// of the absolute path of the mount. +func (p *Plugin) BasePath() string { + return "" +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 0000000..5be146a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 0000000..d7f1e21 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 0000000..5c5aead --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,116 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/docker/docker/pkg/pools/pools_test.go new file mode 100644 index 0000000..1661b78 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools_test.go @@ -0,0 +1,161 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + if _, err = writer.Write([]byte("barfoo")); err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 0000000..fcf3117 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,84 @@ +package progress + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 0000000..6b3927e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go new file mode 100644 index 0000000..b14d401 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go @@ -0,0 +1,75 @@ +package progress + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestOutputOnPrematureClose(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + part := make([]byte, 4, 4) + _, err := io.ReadFull(pr, part) + if err != nil { + pr.Close() + t.Fatal(err) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + default: + t.Fatalf("Expected some output when closing prematurely") + } +} + +func TestCompleteSilently(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + out, err := ioutil.ReadAll(pr) + if err != nil { + pr.Close() + t.Fatal(err) + } + if string(out) != "TESTING" { + pr.Close() + t.Fatalf("Unexpected output %q from reader", string(out)) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + t.Fatalf("Should have closed silently when read is complete") + default: + } +} diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go new file mode 100644 index 0000000..dd52b90 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go new file mode 100644 index 0000000..0936461 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -0,0 +1,111 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go new file mode 100644 index 0000000..d6b0a1d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go @@ -0,0 +1,142 @@ +package pubsub + +import ( + "fmt" + "testing" + "time" +) + +func TestSendToOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + c := p.Subscribe() + + p.Publish("hi") + + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestSendToMultipleSubs(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + + p.Publish("hi") + + for _, c := range subs { + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } + } +} + +func TestEvictOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + s1 := p.Subscribe() + s2 := p.Subscribe() + + p.Evict(s1) + p.Publish("hi") + if _, ok := <-s1; ok { + t.Fatal("expected s1 to not receive the published message") + } + + msg := <-s2 + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestClosePublisher(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + p.Close() + + for _, c := range subs { + if _, ok := <-c; ok { + t.Fatal("expected all subscriber channels to be closed") + } + } +} + +const sampleText = "test" + +type testSubscriber struct { + dataCh chan interface{} + ch chan error +} + +func (s *testSubscriber) Wait() error { + return <-s.ch +} + +func newTestSubscriber(p *Publisher) *testSubscriber { + ts := &testSubscriber{ + dataCh: p.Subscribe(), + ch: make(chan error), + } + go func() { + for data := range ts.dataCh { + s, ok := data.(string) + if !ok { + ts.ch <- fmt.Errorf("Unexpected type %T", data) + break + } + if s != sampleText { + ts.ch <- fmt.Errorf("Unexpected text %s", s) + break + } + } + close(ts.ch) + }() + return ts +} + +// for testing with -race +func TestPubSubRace(t *testing.T) { + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + s.Wait() + } +} + +func BenchmarkPubSub(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + if err := s.Wait(); err != nil { + b.Fatal(err) + } + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go new file mode 100644 index 0000000..70de4d1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/random/random_test.go b/vendor/github.com/docker/docker/pkg/random/random_test.go new file mode 100644 index 0000000..cf405f7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/random/random_test.go @@ -0,0 +1,22 @@ +package random + +import ( + "math/rand" + "sync" + "testing" +) + +// for go test -v -race +func TestConcurrency(t *testing.T) { + rnd := rand.New(NewSource()) + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + rnd.Int63() + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 0000000..45592ce --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 0000000..34ae2a9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go new file mode 100644 index 0000000..778a720 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000..76edd82 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 0000000..ca871c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 0000000..c56671d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar.go b/vendor/github.com/docker/docker/pkg/registrar/registrar.go new file mode 100644 index 0000000..1e75ee9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/registrar/registrar.go @@ -0,0 +1,127 @@ +// Package registrar provides name registration. It reserves a name to a given key. +package registrar + +import ( + "errors" + "sync" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") + // ErrNoSuchKey is returned when trying to find the names for a key which is not known + ErrNoSuchKey = errors.New("provided key does not exist") +) + +// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to +// Names must be unique. +// Registrar is safe for concurrent access. +type Registrar struct { + idx map[string][]string + names map[string]string + mu sync.Mutex +} + +// NewRegistrar creates a new Registrar with the an empty index +func NewRegistrar() *Registrar { + return &Registrar{ + idx: make(map[string][]string), + names: make(map[string]string), + } +} + +// Reserve registers a key to a name +// Reserve is idempotent +// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (r *Registrar) Reserve(name, key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if k, exists := r.names[name]; exists { + if k != key { + return ErrNameReserved + } + return nil + } + + r.idx[key] = append(r.idx[key], name) + r.names[name] = key + return nil +} + +// Release releases the reserved name +// Once released, a name can be reserved again +func (r *Registrar) Release(name string) { + r.mu.Lock() + defer r.mu.Unlock() + + key, exists := r.names[name] + if !exists { + return + } + + for i, n := range r.idx[key] { + if n != name { + continue + } + r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) + break + } + + delete(r.names, name) + + if len(r.idx[key]) == 0 { + delete(r.idx, key) + } +} + +// Delete removes all reservations for the passed in key. +// All names reserved to this key are released. +func (r *Registrar) Delete(key string) { + r.mu.Lock() + for _, name := range r.idx[key] { + delete(r.names, name) + } + delete(r.idx, key) + r.mu.Unlock() +} + +// GetNames lists all the reserved names for the given key +func (r *Registrar) GetNames(key string) ([]string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + names, exists := r.idx[key] + if !exists { + return nil, ErrNoSuchKey + } + return names, nil +} + +// Get returns the key that the passed in name is reserved to +func (r *Registrar) Get(name string) (string, error) { + r.mu.Lock() + key, exists := r.names[name] + r.mu.Unlock() + + if !exists { + return "", ErrNameNotReserved + } + return key, nil +} + +// GetAll returns all registered names +func (r *Registrar) GetAll() map[string][]string { + out := make(map[string][]string) + + r.mu.Lock() + // copy index into out + for id, names := range r.idx { + out[id] = names + } + r.mu.Unlock() + return out +} diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go b/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go new file mode 100644 index 0000000..0c1ef31 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go @@ -0,0 +1,119 @@ +package registrar + +import ( + "reflect" + "testing" +) + +func TestReserve(t *testing.T) { + r := NewRegistrar() + + obj := "test1" + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + obj2 := "test2" + err := r.Reserve("test", obj2) + if err == nil { + t.Fatalf("expected error when reserving an already reserved name to another object") + } + if err != ErrNameReserved { + t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") + } +} + +func TestRelease(t *testing.T) { + r := NewRegistrar() + obj := "testing" + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + r.Release("test") + r.Release("test") // Ensure there is no panic here + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } +} + +func TestGetNames(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + r.Reserve("test3", "other") + + names2, err := r.GetNames(obj) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(names, names2) { + t.Fatalf("Exepected: %v, Got: %v", names, names2) + } +} + +func TestDelete(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + + r.Reserve("test3", "other") + r.Delete(obj) + + _, err := r.GetNames(obj) + if err == nil { + t.Fatal("expected error getting names for deleted key") + } + + if err != ErrNoSuchKey { + t.Fatal("expected `ErrNoSuchKey`") + } +} + +func TestGet(t *testing.T) { + r := NewRegistrar() + obj := "testing" + name := "test" + + _, err := r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } + + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + + if _, err = r.Get(name); err != nil { + t.Fatal(err) + } + + r.Delete(obj) + _, err = r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md new file mode 100644 index 0000000..2b237a5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go new file mode 100644 index 0000000..68bb77c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go new file mode 100644 index 0000000..946de87 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go new file mode 100644 index 0000000..6b9569b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go new file mode 100644 index 0000000..d418cbe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -0,0 +1,80 @@ +package signal + +import ( + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go new file mode 100644 index 0000000..89576b9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Solaris signals. +// SIGINFO and SIGTHR not defined for Solaris +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go new file mode 100644 index 0000000..5d058fd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go new file mode 100644 index 0000000..c592d37 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package signal + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go new file mode 100644 index 0000000..440f270 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go new file mode 100644 index 0000000..638a1ab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/trap.go @@ -0,0 +1,103 @@ +package signal + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logrus.Infof("Processing signal '%v'", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 0000000..be20765 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,174 @@ +package stdcopy + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Check the first byte to know where to write + switch StdType(buf[stdWriterFdIndex]) { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 0000000..3137a75 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,260 @@ +package stdcopy + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := stdWriter{ + Writer: nil, + prefix: byte(Stdout), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) + } +} + +type errWriter struct { + n int + err error +} + +func (f *errWriter) Write(buf []byte) (int, error) { + return f.n, f.err +} + +func TestWriteWithWriterError(t *testing.T) { + expectedError := errors.New("expected") + expectedReturnedBytes := 10 + writer := NewStdWriter(&errWriter{ + n: stdWriterPrefixLen + expectedReturnedBytes, + err: expectedError}, Stdout) + data := []byte("This won't get written, sigh") + n, err := writer.Write(data) + if err != expectedError { + t.Fatalf("Didn't get expected error.") + } + if n != expectedReturnedBytes { + t.Fatalf("Didn't get expected written bytes %d, got %d.", + expectedReturnedBytes, n) + } +} + +func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { + writer := NewStdWriter(&errWriter{n: -1}, Stdout) + data := []byte("This won't get written, sigh") + actual, _ := writer.Write(data) + if actual != 0 { + t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) + } +} + +func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { + buffer = new(bytes.Buffer) + dstOut := NewStdWriter(buffer, Stdout) + _, err = dstOut.Write(stdOutBytes) + if err != nil { + return + } + dstErr := NewStdWriter(buffer, Stderr) + _, err = dstErr.Write(stdErrBytes) + return +} + +func TestStdCopyWriteAndRead(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err != nil { + t.Fatal(err) + } + expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) + if written != int64(expectedTotalWritten) { + t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) + } +} + +type customReader struct { + n int + err error + totalCalls int + correctCalls int + src *bytes.Buffer +} + +func (f *customReader) Read(buf []byte) (int, error) { + f.totalCalls++ + if f.totalCalls <= f.correctCalls { + return f.src.Read(buf) + } + return f.n, f.err +} + +func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { + expectedError := errors.New("error") + reader := &customReader{ + err: expectedError} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { + expectedError := errors.New("error") + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: expectedError, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyDetectsCorruptedFrame(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: io.EOF, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != startingBufLen { + t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) + } + if err != nil { + t.Fatal("Didn't get nil error") + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func TestStdCopyReturnsWriteErrors(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + expectedError := errors.New("expected") + + dstOut := &errWriter{err: expectedError} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error, got %v", err) + } +} + +func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + dstOut := &errWriter{n: startingBufLen - 10} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) + } + if err != io.ErrShortWrite { + t.Fatalf("Didn't get expected io.ErrShortWrite error") + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go new file mode 100644 index 0000000..ce6ea79 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -0,0 +1,172 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +// StreamFormatter formats a stream, optionally using JSON. +type StreamFormatter struct { + json bool +} + +// NewStreamFormatter returns a simple StreamFormatter +func NewStreamFormatter() *StreamFormatter { + return &StreamFormatter{} +} + +// NewJSONStreamFormatter returns a StreamFormatter configured to stream json +func NewJSONStreamFormatter() *StreamFormatter { + return &StreamFormatter{true} +} + +const streamNewline = "\r\n" + +var streamNewlineBytes = []byte(streamNewline) + +// FormatStream formats the specified stream. +func (sf *StreamFormatter) FormatStream(str string) []byte { + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + "\r") +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + streamNewline) +} + +// FormatError formats the specified error. +func (sf *StreamFormatter) FormatError(err error) []byte { + if sf.json { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return append(b, streamNewlineBytes...) + } + return []byte("{\"error\":\"format error\"}" + streamNewline) + } + return []byte("Error: " + err.Error() + streamNewline) +} + +// FormatProgress formats the progress information for a specified action. +func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + if sf.json { + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return append(b, streamNewlineBytes...) + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{ + sf: sf, + out: out, + newLines: newLines, + } +} + +type progressOutput struct { + sf *StreamFormatter + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.FormatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} + formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.FormatStatus("", "")) + return err + } + + return nil +} + +// StdoutFormatter is a streamFormatter that writes to the standard output. +type StdoutFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +// StderrFormatter is a streamFormatter that writes to the standard error. +type StderrFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go new file mode 100644 index 0000000..93ec90f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go @@ -0,0 +1,108 @@ +package streamformatter + +import ( + "encoding/json" + "errors" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/jsonmessage" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != "stream"+"\r" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONStatus(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != "a1\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != "Error: Error for formatter\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStream(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStatus(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatSimpleError(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatJSONError(t *testing.T) { + sf := NewJSONStreamFormatter() + err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatProgress(t *testing.T) { + sf := NewJSONStreamFormatter() + progress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress, nil) + msg := &jsonmessage.JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + + // The progress will always be in the format of: + // [=========================> ] 15 B/30 B 404933h7m11s + // The last entry '404933h7m11s' is the timeLeftBox. + // However, the timeLeftBox field may change as progress.String() depends on time.Now(). + // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. + + // Compare the progress strings before the timeLeftBox + expectedProgress := "[=========================> ] 15 B/30 B" + // if terminal column is <= 110, expectedProgressShort is expected. + expectedProgressShort := " 15 B/30 B" + if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || + strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { + t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", + expectedProgress, expectedProgressShort, msg.ProgressMessage) + } + + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md new file mode 100644 index 0000000..37a5098 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go new file mode 100644 index 0000000..fa35d8b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -0,0 +1,69 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/pkg/random" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(crypto bool) string { + b := make([]byte, 32) + r := random.Reader + if crypto { + r = rand.Reader + } + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(true) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(false) +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go new file mode 100644 index 0000000..8ff6b43 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go @@ -0,0 +1,72 @@ +package stringid + +import ( + "strings" + "testing" +) + +func TestGenerateRandomID(t *testing.T) { + id := GenerateRandomID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestGenerateNonCryptoID(t *testing.T) { + id := GenerateNonCryptoID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestShortenId(t *testing.T) { + id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2" + truncID := TruncateID(id) + if truncID != "90435eec5c4e" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenSha256Id(t *testing.T) { + id := "sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba" + truncID := TruncateID(id) + if truncID != "4e38e38c8ce0" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdEmpty(t *testing.T) { + id := "" + truncID := TruncateID(id) + if len(truncID) > len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdInvalid(t *testing.T) { + id := "1234" + truncID := TruncateID(id) + if len(truncID) != len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestIsShortIDNonHex(t *testing.T) { + id := "some non-hex value" + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} + +func TestIsShortIDNotCorrectSize(t *testing.T) { + id := strings.Repeat("a", shortLen+1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } + id = strings.Repeat("a", shortLen-1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md new file mode 100644 index 0000000..b3e4545 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go new file mode 100644 index 0000000..8e1c812 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go @@ -0,0 +1,101 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" + + "github.com/docker/docker/pkg/random" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[random.Rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go new file mode 100644 index 0000000..8af2bdc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go @@ -0,0 +1,121 @@ +package stringutils + +import "testing" + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) + } +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + if len(str) != 64 { + t.Fatalf("Id returned is incorrect: %s", str) + } + if _, ok := set[str]; ok { + t.Fatalf("Random number is repeated") + } + set[str] = struct{}{} + } +} + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomASCIIString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestEllipsis(t *testing.T) { + str := "t🐳ststring" + newstr := Ellipsis(str, 3) + if newstr != "t🐳s" { + t.Fatalf("Expected t🐳s, got %s", newstr) + } + newstr = Ellipsis(str, 8) + if newstr != "t🐳sts..." { + t.Fatalf("Expected tests..., got %s", newstr) + } + newstr = Ellipsis(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestTruncate(t *testing.T) { + str := "t🐳ststring" + newstr := Truncate(str, 4) + if newstr != "t🐳st" { + t.Fatalf("Expected t🐳st, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"t🐳st", "in", "slice"} + + test := InSlice(slice, "t🐳st") + if !test { + t.Fatalf("Expected string t🐳st to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} + +func TestShellQuoteArgumentsEmpty(t *testing.T) { + actual := ShellQuoteArguments([]string{}) + expected := "" + if actual != expected { + t.Fatalf("Expected an empty string") + } +} + +func TestShellQuoteArguments(t *testing.T) { + simpleString := "simpleString" + complexString := "This is a 'more' complex $tring with some special char *" + actual := ShellQuoteArguments([]string{simpleString, complexString}) + expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" + if actual != expected { + t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) + } +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 0000000..34c4ea7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 0000000..9b4f4a2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md new file mode 100644 index 0000000..8dba54f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/README.md @@ -0,0 +1,6 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go new file mode 100644 index 0000000..f6bc223 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go new file mode 100644 index 0000000..2270827 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go new file mode 100644 index 0000000..7085c0b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go @@ -0,0 +1,407 @@ +// +build !windows + +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// TODO Windows: This needs some serious work to port to Windows. For now, +// turning off testing in this package. + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go new file mode 100644 index 0000000..241e531 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/longpath" +) + +func toShort(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return syscall.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // syscall.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/vendor/github.com/docker/docker/pkg/sysinfo/README.md new file mode 100644 index 0000000..c1530ce --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go new file mode 100644 index 0000000..aeb1a3a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 0000000..5eacd35 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,43 @@ +// +build linux + +package sysinfo + +import ( + "runtime" + "syscall" + "unsafe" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := syscall.RawSyscall(syscall.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 0000000..1d89dd5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go new file mode 100644 index 0000000..f046de4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 0000000..7ad84a8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,259 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +const ( + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warn("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warn("Your kernel does not support oom control") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warn("Your kernel does not support memory swappiness") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warn("Your kernel does not support kernel memory limit") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + + cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !quiet && !cpuRealtimePeriod { + logrus.Warn("Your kernel does not support cgroup rt period") + } + + cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !quiet && !cpuRealtimeRuntime { + logrus.Warn("Your kernel does not support cgroup rt runtime") + } + + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + CPURealtimePeriod: cpuRealtimePeriod, + CPURealtimeRuntime: cpuRealtimeRuntime, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warn("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go new file mode 100644 index 0000000..fae0fdf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go @@ -0,0 +1,58 @@ +package sysinfo + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +func TestReadProcBool(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + procFile := filepath.Join(tmpDir, "read-proc-bool") + if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { + t.Fatal(err) + } + + if !readProcBool(procFile) { + t.Fatal("expected proc bool to be true, got false") + } + + if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { + t.Fatal(err) + } + if readProcBool(procFile) { + t.Fatal("expected proc bool to be false, got false") + } + + if readProcBool(path.Join(tmpDir, "no-exist")) { + t.Fatal("should be false for non-existent entry") + } + +} + +func TestCgroupEnabled(t *testing.T) { + cgroupDir, err := ioutil.TempDir("", "cgroup-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cgroupDir) + + if cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be false") + } + + if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { + t.Fatal(err) + } + + if !cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be true") + } +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go new file mode 100644 index 0000000..c858d57 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go @@ -0,0 +1,121 @@ +// +build solaris,cgo + +package sysinfo + +import ( + "bytes" + "os/exec" + "strconv" + "strings" +) + +/* +#cgo LDFLAGS: -llgrp +#include +#include +#include +int getLgrpCount() { + lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; + uint_t nlgrps; + + if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { + return -1; + } + nlgrps = lgrp_nlgrps(lgrpcookie); + return nlgrps; +} +*/ +import "C" + +// IsCPUSharesAvailable returns whether CPUShares setting is supported. +// We need FSS to be set as default scheduling class to support CPU Shares +func IsCPUSharesAvailable() bool { + cmd := exec.Command("/usr/sbin/dispadmin", "-d") + outBuf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd.Stderr = errBuf + cmd.Stdout = outBuf + + if err := cmd.Run(); err != nil { + return false + } + return (strings.Contains(outBuf.String(), "FSS")) +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. +//NOTE Solaris: If we change the below capabilities be sure +// to update verifyPlatformContainerSettings() in daemon_solaris.go +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = setCgroupMem(quiet) + sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) + sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) + sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) + + sysInfo.IPv4ForwardingDisabled = false + + sysInfo.AppArmor = false + + return sysInfo +} + +// setCgroupMem reads the memory information for Solaris. +func setCgroupMem(quiet bool) cgroupMemInfo { + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: true, + MemoryReservation: false, + OomKillDisable: false, + MemorySwappiness: false, + KernelMemory: false, + } +} + +// setCgroupCPU reads the cpu information for Solaris. +func setCgroupCPU(quiet bool) cgroupCPUInfo { + + return cgroupCPUInfo{ + CPUShares: true, + CPUCfsPeriod: false, + CPUCfsQuota: true, + CPURealtimePeriod: false, + CPURealtimeRuntime: false, + } +} + +// blkio switches are not supported in Solaris. +func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { + + return cgroupBlkioInfo{ + BlkioWeight: false, + BlkioWeightDevice: false, + } +} + +// setCgroupCPUsetInfo reads the cpuset information for Solaris. +func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: getCPUCount(), + Mems: getLgrpCount(), + } +} + +func getCPUCount() string { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return "" + } + return strconv.FormatInt(int64(ncpus), 16) +} + +func getLgrpCount() string { + nlgrps := C.getLgrpCount() + if nlgrps <= 0 { + return "" + } + return strconv.FormatInt(int64(nlgrps), 16) +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go new file mode 100644 index 0000000..b61fbcf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go @@ -0,0 +1,26 @@ +package sysinfo + +import "testing" + +func TestIsCpusetListAvailable(t *testing.T) { + cases := []struct { + provided string + available string + res bool + err bool + }{ + {"1", "0-4", true, false}, + {"01,3", "0-4", true, false}, + {"", "0-7", true, false}, + {"1--42", "0-7", false, true}, + {"1-42", "00-1,8,,9", false, true}, + {"1,41-42", "43,45", false, false}, + {"0-3", "", false, false}, + } + for _, c := range cases { + r, err := isCpusetListAvailable(c.provided, c.available) + if (c.err && err == nil) && r != c.res { + t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 0000000..45f3ef1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris,!windows + +package sysinfo + +// New returns an empty SysInfo for non linux nor solaris for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 0000000..4e6255b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package sysinfo + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 0000000..7637f12 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,52 @@ +package system + +import ( + "os" + "syscall" + "time" + "unsafe" +) + +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go new file mode 100644 index 0000000..5c87df3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go @@ -0,0 +1,94 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +// prepareTempFile creates a temporary file in a temporary directory. +func prepareTempFile(t *testing.T) (string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + return file, dir +} + +// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent +func TestChtimes(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go new file mode 100644 index 0000000..09d58bc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go new file mode 100644 index 0000000..6ec9a71 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesLinux tests Chtimes access time on a tempfile on Linux +func TestChtimesLinux(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat := f.Sys().(*syscall.Stat_t) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 0000000..2945868 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package system + +import ( + "syscall" + "time" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) + pathp, e := syscall.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer syscall.Close(h) + c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) + return syscall.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go new file mode 100644 index 0000000..72d8a10 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go @@ -0,0 +1,86 @@ +// +build windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesWindows tests Chtimes access time on a tempfile on Windows +func TestChtimesWindows(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 0000000..2883189 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go new file mode 100644 index 0000000..3ec6d22 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go @@ -0,0 +1,85 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go new file mode 100644 index 0000000..60f0514 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 0000000..810c794 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,54 @@ +// +build !windows + +package system + +import ( + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// ACL'd for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return MkdirAll(path, perm) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os package. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 0000000..6094f01 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,236 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// ACL'd for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return mkdirall(path, true) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, adminAndLocalSystem bool) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if adminAndLocalSystem { + err = mkdirWithACL(path) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and syscall.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string) error { + sa := syscall.SecurityAttributes{Length: 0} + sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := syscall.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := syscall.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and syscall packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := syscallOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go new file mode 100644 index 0000000..bd23c4d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go new file mode 100644 index 0000000..062cf53 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go @@ -0,0 +1,30 @@ +// +build linux freebsd + +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 0000000..49e87eb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package system + +import ( + "os" +) + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 0000000..3b6e947 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 0000000..385f1d5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go new file mode 100644 index 0000000..7f4f84f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go @@ -0,0 +1,128 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go new file mode 100644 index 0000000..44f5562 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go @@ -0,0 +1,40 @@ +// +build linux freebsd + +package system + +import ( + "strings" + "testing" + + "github.com/docker/go-units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000..3ce019d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 0000000..883944a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 0000000..7395818 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 0000000..2e863c0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 0000000..c607c4d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 0000000..cbfe2c1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows_test.go b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go new file mode 100644 index 0000000..eccb26a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go @@ -0,0 +1,78 @@ +// +build windows + +package system + +import "testing" + +// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter +func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { + // Fails if not C drive. + path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) + if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { + t.Fatalf("Expected error for d:") + } + + // Single character is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { + t.Fatalf("Single character should pass") + } + if path != "z" { + t.Fatalf("Single character should be unchanged") + } + + // Two characters without colon is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { + t.Fatalf("2 characters without colon should pass") + } + if path != "AB" { + t.Fatalf("2 characters without colon should be unchanged") + } + + // Abs path without drive letter + if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { + t.Fatalf("abs path no drive letter should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter should be unchanged") + } + + // Abs path without drive letter, linux style + if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { + t.Fatalf("abs path no drive letter linux style should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter linux failed %s", path) + } + + // Drive-colon should be stripped + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`An absolute path should have been shortened to \ %s`, path) + } + + // Verify with a linux-style path + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) + } + + // Failure on c: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "c:"` { + t.Fatalf(path, err) + } + + // Failure on d: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "d:"` { + t.Fatalf(path, err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go new file mode 100644 index 0000000..087034c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat.go @@ -0,0 +1,53 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 0000000..f0742f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,32 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// FromStatT loads a system.StatT from a syscall.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 0000000..d0fb6f1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 0000000..8b1eded --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 0000000..3c3b71f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,15 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 0000000..0216985 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,34 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT loads a system.StatT from a syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go new file mode 100644 index 0000000..dee8d30 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go @@ -0,0 +1,39 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.UID() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.GID() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go new file mode 100644 index 0000000..5d85f52 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 0000000..39490c6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime +} + +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 0000000..3ae9128 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 0000000..1f31187 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,105 @@ +package system + +import ( + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +var ( + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := syscall.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go new file mode 100644 index 0000000..4886b2b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go @@ -0,0 +1,9 @@ +package system + +import "testing" + +func TestHasWin32KSupport(t *testing.T) { + s := HasWin32KSupport() // make sure this doesn't panic + + t.Logf("win32k: %v", s) // will be different on different platforms -- informative only +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 0000000..3d0146b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 0000000..13f1de1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000..e2eac3b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 0000000..fc8a1ab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go new file mode 100644 index 0000000..a73ed11 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go @@ -0,0 +1,68 @@ +// +build linux freebsd + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{Sec: 0, Nsec: 0}, {Sec: 0, Nsec: 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000..1397145 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 0000000..d2e2c05 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000..0114f22 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go new file mode 100644 index 0000000..09eb393 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(left, os.SEEK_SET); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go new file mode 100644 index 0000000..31217c0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 0000000..b42983e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go new file mode 100644 index 0000000..f54bf3a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go @@ -0,0 +1,67 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 0000000..5abf5e7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 0000000..bb700d8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Errorf("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Errorf("Should have return nil if name not found.") + } + +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 0000000..154788d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,295 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "path" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = path.Clean(currentHeader.Name) + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md new file mode 100644 index 0000000..89b2e49 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgments + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go new file mode 100644 index 0000000..86df0e2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -0,0 +1,664 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + defer jfh.Close() + + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 0000000..48e2af3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 0000000..2882286 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go new file mode 100644 index 0000000..88e0a57 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 0000000..9727ecd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go new file mode 100644 index 0000000..f5262bc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, byte(key[0])) + } + } + return codes, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii_test.go b/vendor/github.com/docker/docker/pkg/term/ascii_test.go new file mode 100644 index 0000000..4a1e7f3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/ascii_test.go @@ -0,0 +1,43 @@ +package term + +import "testing" + +func TestToBytes(t *testing.T) { + codes, err := ToBytes("ctrl-a,a") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 1 || codes[1] != 97 { + t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) + } + + codes, err = ToBytes("shift-z") + if err == nil { + t.Fatalf("Expected error, got none") + } + + codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") + if err != nil { + t.Fatal(err) + } + if len(codes) != 4 { + t.Fatalf("Expected 4 codes, got %d", len(codes)) + } + if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { + t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) + } + + codes, err = ToBytes("DEL,+") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 127 || codes[1] != 43 { + t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) + } +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go new file mode 100644 index 0000000..59dac5b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -0,0 +1,50 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go new file mode 100644 index 0000000..750d7c3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc_other.go @@ -0,0 +1,20 @@ +// +build !windows +// +build !linux !cgo +// +build !solaris !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go new file mode 100644 index 0000000..c9139d0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go @@ -0,0 +1,63 @@ +// +build solaris,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + /* + VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned + Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It + needs to be explicitly set to 1. + */ + newState.Cc[C.VMIN] = 1 + newState.Cc[C.VTIME] = 0 + + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go new file mode 100644 index 0000000..fe59faa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term.go @@ -0,0 +1,123 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + "syscall" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go new file mode 100644 index 0000000..112debb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_solaris.go @@ -0,0 +1,41 @@ +// +build solaris + +package term + +import ( + "syscall" + "unsafe" +) + +/* +#include +#include +#include + +// Small wrapper to get rid of variadic args of ioctl() +int my_ioctl(int fd, int cmd, struct winsize *ws) { + return ioctl(fd, cmd, ws); +} +*/ +import "C" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go new file mode 100644 index 0000000..ddf87a0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_unix.go @@ -0,0 +1,29 @@ +// +build !solaris,!windows + +package term + +import ( + "syscall" + "unsafe" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go new file mode 100644 index 0000000..a91f07e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -0,0 +1,233 @@ +// +build windows + +package term + +import ( + "io" + "os" + "os/signal" + "syscall" + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + enableVirtualTerminalInput = 0x0200 + enableVirtualTerminalProcessing = 0x0004 + disableNewlineAutoReturn = 0x0008 +) + +// vtInputSupported is true if enableVirtualTerminalInput is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that enableVirtualTerminalInput is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { + // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. + emulateStdin = true + emulateStdout = false + emulateStderr = false + } + + if emulateStdin { + stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windows.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windows.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since disableNewlineAutoReturn might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= enableVirtualTerminalInput + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go new file mode 100644 index 0000000..480db90 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go new file mode 100644 index 0000000..ed843ad --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go new file mode 100644 index 0000000..22921b6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go @@ -0,0 +1,47 @@ +// +build !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go new file mode 100644 index 0000000..ed843ad --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go new file mode 100644 index 0000000..cb0b883 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windows + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go new file mode 100644 index 0000000..a3ce569 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windows + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go new file mode 100644 index 0000000..ca5c3b2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windows + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go new file mode 100644 index 0000000..ce4cb59 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windows + +import ( + "io/ioutil" + "os" + "sync" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go new file mode 100644 index 0000000..52aeab5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go @@ -0,0 +1,3 @@ +// This file is necessary to pass the Docker tests. + +package windows diff --git a/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go b/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go new file mode 100644 index 0000000..6da8518 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go @@ -0,0 +1,97 @@ +// Package assert contains functions for making assertions in unit tests +package assert + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "strings" + + "github.com/davecgh/go-spew/spew" +) + +// TestingT is an interface which defines the methods of testing.T that are +// required by this package +type TestingT interface { + Fatalf(string, ...interface{}) +} + +// Equal compare the actual value to the expected value and fails the test if +// they are not equal. +func Equal(t TestingT, actual, expected interface{}) { + if expected != actual { + fatal(t, "Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual) + } +} + +//EqualStringSlice compares two slices and fails the test if they do not contain +// the same items. +func EqualStringSlice(t TestingT, actual, expected []string) { + if len(actual) != len(expected) { + fatal(t, "Expected (length %d): %q\nActual (length %d): %q", + len(expected), expected, len(actual), actual) + } + for i, item := range actual { + if item != expected[i] { + fatal(t, "Slices differ at element %d, expected %q got %q", + i, expected[i], item) + } + } +} + +// NilError asserts that the error is nil, otherwise it fails the test. +func NilError(t TestingT, err error) { + if err != nil { + fatal(t, "Expected no error, got: %s", err.Error()) + } +} + +// DeepEqual compare the actual value to the expected value and fails the test if +// they are not "deeply equal". +func DeepEqual(t TestingT, actual, expected interface{}) { + if !reflect.DeepEqual(actual, expected) { + fatal(t, "Expected (%T):\n%v\n\ngot (%T):\n%s\n", + expected, spew.Sdump(expected), actual, spew.Sdump(actual)) + } +} + +// Error asserts that error is not nil, and contains the expected text, +// otherwise it fails the test. +func Error(t TestingT, err error, contains string) { + if err == nil { + fatal(t, "Expected an error, but error was nil") + } + + if !strings.Contains(err.Error(), contains) { + fatal(t, "Expected error to contain '%s', got '%s'", contains, err.Error()) + } +} + +// Contains asserts that the string contains a substring, otherwise it fails the +// test. +func Contains(t TestingT, actual, contains string) { + if !strings.Contains(actual, contains) { + fatal(t, "Expected '%s' to contain '%s'", actual, contains) + } +} + +// NotNil fails the test if the object is nil +func NotNil(t TestingT, obj interface{}) { + if obj == nil { + fatal(t, "Expected non-nil value.") + } +} + +func fatal(t TestingT, format string, args ...interface{}) { + t.Fatalf(errorSource()+format, args...) +} + +// See testing.decorate() +func errorSource() string { + _, filename, line, ok := runtime.Caller(3) + if !ok { + return "" + } + return fmt.Sprintf("%s:%d: ", filepath.Base(filename), line) +} diff --git a/vendor/github.com/docker/docker/pkg/testutil/pkg.go b/vendor/github.com/docker/docker/pkg/testutil/pkg.go new file mode 100644 index 0000000..110b2e6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/testutil/pkg.go @@ -0,0 +1 @@ +package testutil diff --git a/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go b/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go new file mode 100644 index 0000000..0e09d99 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go @@ -0,0 +1,36 @@ +package tempfile + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/testutil/assert" +) + +// TempFile is a temporary file that can be used with unit tests. TempFile +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempFile struct { + File *os.File +} + +// NewTempFile returns a new temp file with contents +func NewTempFile(t assert.TestingT, prefix string, content string) *TempFile { + file, err := ioutil.TempFile("", prefix+"-") + assert.NilError(t, err) + + _, err = file.Write([]byte(content)) + assert.NilError(t, err) + file.Close() + return &TempFile{File: file} +} + +// Name returns the filename +func (f *TempFile) Name() string { + return f.File.Name() +} + +// Remove removes the file +func (f *TempFile) Remove() { + os.Remove(f.Name()) +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go new file mode 100644 index 0000000..e4dec3a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go new file mode 100644 index 0000000..0b81665 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go @@ -0,0 +1,31 @@ +// +build go1.6,!go1.7 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.6 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go new file mode 100644 index 0000000..0d5b448 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go @@ -0,0 +1,33 @@ +// +build go1.7,!go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go new file mode 100644 index 0000000..02610b8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go @@ -0,0 +1,137 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addID(id); err != nil { + return err + } + return nil +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs, and passes each of them to the given handler. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go new file mode 100644 index 0000000..8197baf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go @@ -0,0 +1,429 @@ +package truncindex + +import ( + "math/rand" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // An ambiguous id prefix should return an error + if _, err := index.Get(id[:4]); err == nil { + t.Fatal("An ambiguous id prefix should return an error") + } + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) + + assertIndexIterate(t) +} + +func assertIndexIterate(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + "37b36c2c326ccc11e726eee6ee78a0baf166ef96", + "46b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + + index.Iterate(func(targetId string) { + for _, id := range ids { + if targetId == id { + return + } + } + + t.Fatalf("An unknown ID '%s'", targetId) + }) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go new file mode 100644 index 0000000..4415287 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -0,0 +1,50 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go new file mode 100644 index 0000000..75eb464 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go @@ -0,0 +1,70 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } + transportUrls = []string{ + "tcp://example.com", + "tcp+tls://example.com", + "udp://example.com", + "unix:///example", + "unixgram:///example", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsTransport(t *testing.T) { + for _, url := range transportUrls { + if IsTransportURL(url) == false { + t.Fatalf("%q should be detected as valid Transport url", url) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 0000000..d9cb367 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 0000000..1137db5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go new file mode 100644 index 0000000..0ad7243 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go @@ -0,0 +1,31 @@ +package useragent + +import "testing" + +func TestVersionInfo(t *testing.T) { + vi := VersionInfo{"foo", "bar"} + if !vi.isValid() { + t.Fatalf("VersionInfo should be valid") + } + vi = VersionInfo{"", "bar"} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } + vi = VersionInfo{"foo", ""} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } +} + +func TestAppendVersions(t *testing.T) { + vis := []VersionInfo{ + {"foo", "1.0"}, + {"bar", "0.1"}, + {"pi", "3.1.4"}, + } + v := AppendVersions("base", vis...) + expect := "base foo/1.0 bar/0.1 pi/3.1.4" + if v != expect { + t.Fatalf("expected %q, got %q", expect, v) + } +} diff --git a/vendor/github.com/docker/docker/plugin/backend_linux.go b/vendor/github.com/docker/docker/plugin/backend_linux.go new file mode 100644 index 0000000..33200d8 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/backend_linux.go @@ -0,0 +1,790 @@ +// +build linux + +package plugin + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Disable deactivates a plugin. This means resources (volumes, networks) cant use them. +func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if !config.ForceDisable && p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + + if err := pm.disable(p, c); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") + return nil +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + + c := &controller{timeoutInSecs: config.Timeout} + if err := pm.enable(p, c, false); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") + return nil +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return nil, err + } + + return &p.PluginObj, nil +} + +func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error { + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + config.ProgressOutput = progress.ChanOutput(progressChan) + } else { + config.ProgressOutput = progress.DiscardOutput() + } + return distribution.Pull(ctx, ref, config) +} + +type tempConfigStore struct { + config []byte + configDigest digest.Digest +} + +func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { + dgst := digest.FromBytes(c) + + s.config = c + s.configDigest = dgst + + return dgst, nil +} + +func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { + if d != s.configDigest { + return nil, digest.ErrDigestNotFound + } + return s.config, nil +} + +func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} + +func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { + var privileges types.PluginPrivileges + if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { + privileges = append(privileges, types.PluginPrivilege{ + Name: "network", + Description: "permissions to access a network", + Value: []string{c.Network.Type}, + }) + } + for _, mount := range c.Mounts { + if mount.Source != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "mount", + Description: "host path to mount", + Value: []string{*mount.Source}, + }) + } + } + for _, device := range c.Linux.Devices { + if device.Path != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "device", + Description: "host device to access", + Value: []string{*device.Path}, + }) + } + } + if c.Linux.AllowAllDevices { + privileges = append(privileges, types.PluginPrivilege{ + Name: "allow-all-devices", + Description: "allow 'rwm' access to all devices", + Value: []string{"true"}, + }) + } + if len(c.Linux.Capabilities) > 0 { + privileges = append(privileges, types.PluginPrivilege{ + Name: "capabilities", + Description: "list of additional capabilities required", + Value: c.Linux.Capabilities, + }) + } + + return privileges, nil +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + // create image store instance + cs := &tempConfigStore{} + + // DownloadManager not defined because only pulling configuration. + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: func(string, string, string) {}, + ImageStore: cs, + }, + Schema2Types: distribution.PluginTypes, + } + + if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil { + return nil, err + } + + if cs.config == nil { + return nil, errors.New("no configuration pulled") + } + var config types.PluginConfig + if err := json.Unmarshal(cs.config, &config); err != nil { + return nil, err + } + + return computePrivileges(config) +} + +// Upgrade upgrades a plugin +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return errors.Wrap(err, "plugin must be installed before upgrading") + } + + if p.IsEnabled() { + return fmt.Errorf("plugin must be disabled before upgrading") + } + + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.WithDefaultTag(nameref).String() + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + return nil +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.WithDefaultTag(nameref).String() + + if err := pm.config.Store.validateName(name); err != nil { + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges) + if err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + + return nil +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List() ([]types.Plugin, error) { + plugins := pm.config.Store.GetAll() + out := make([]types.Plugin, 0, len(plugins)) + for _, p := range plugins { + out = append(out, p.PluginObj) + } + return out, nil +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + + ref, err := reference.ParseNamed(p.Name()) + if err != nil { + return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) + } + + var po progress.Output + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + po = progress.ChanOutput(progressChan) + } else { + po = progress.DiscardOutput() + } + + // TODO: replace these with manager + is := &pluginConfigStore{ + pm: pm, + plugin: p, + } + ls := &pluginLayerProvider{ + pm: pm, + plugin: p, + } + rs := &pluginReference{ + name: ref, + pluginID: p.Config, + } + + uploadManager := xfer.NewLayerUploadManager(3) + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + ProgressOutput: po, + RegistryService: pm.config.RegistryService, + ReferenceStore: rs, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: is, + RequireSchema2: true, + }, + ConfigMediaType: schema2.MediaTypePluginConfig, + LayerStore: ls, + UploadManager: uploadManager, + } + + return distribution.Push(ctx, ref, imagePushConfig) +} + +type pluginReference struct { + name reference.Named + pluginID digest.Digest +} + +func (r *pluginReference) References(id digest.Digest) []reference.Named { + if r.pluginID != id { + return nil + } + return []reference.Named{r.name} +} + +func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association { + return []reference.Association{ + { + Ref: r.name, + ID: r.pluginID, + }, + } +} + +func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { + if r.name.String() != ref.String() { + return digest.Digest(""), reference.ErrDoesNotExist + } + return r.pluginID, nil +} + +func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) Delete(ref reference.Named) (bool, error) { + // Read only, ignore + return false, nil +} + +type pluginConfigStore struct { + pm *Manager + plugin *v2.Plugin +} + +func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) { + return digest.Digest(""), errors.New("cannot store config on push") +} + +func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { + if s.plugin.Config != d { + return nil, errors.New("plugin not found") + } + rwc, err := s.pm.blobStore.Get(d) + if err != nil { + return nil, err + } + defer rwc.Close() + return ioutil.ReadAll(rwc) +} + +func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} + +type pluginLayerProvider struct { + pm *Manager + plugin *v2.Plugin +} + +func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) { + rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs) + var i int + for i = 1; i <= len(rootFS.DiffIDs); i++ { + if layer.CreateChainID(rootFS.DiffIDs[:i]) == id { + break + } + } + if i > len(rootFS.DiffIDs) { + return nil, errors.New("layer not found") + } + return &pluginLayer{ + pm: p.pm, + diffIDs: rootFS.DiffIDs[:i], + blobs: p.plugin.Blobsums[:i], + }, nil +} + +type pluginLayer struct { + pm *Manager + diffIDs []layer.DiffID + blobs []digest.Digest +} + +func (l *pluginLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *pluginLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *pluginLayer) Parent() distribution.PushLayer { + if len(l.diffIDs) == 1 { + return nil + } + return &pluginLayer{ + pm: l.pm, + diffIDs: l.diffIDs[:len(l.diffIDs)-1], + blobs: l.blobs[:len(l.diffIDs)-1], + } +} + +func (l *pluginLayer) Open() (io.ReadCloser, error) { + return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) Size() (int64, error) { + return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) MediaType() string { + return schema2.MediaTypeLayer +} + +func (l *pluginLayer) Release() { + // Nothing needs to be release, no references held +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + p, err := pm.config.Store.GetV2Plugin(name) + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if err != nil { + return err + } + + if !config.ForceRemove { + if p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + if p.IsEnabled() { + return fmt.Errorf("plugin %s is enabled", p.Name()) + } + } + + if p.IsEnabled() { + if err := pm.disable(p, c); err != nil { + logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) + } + } + + defer func() { + go pm.GC() + }() + + id := p.GetID() + pm.config.Store.Remove(p) + pluginDir := filepath.Join(pm.config.Root, id) + if err := recursiveUnmount(pm.config.Root); err != nil { + logrus.WithField("dir", pm.config.Root).WithField("id", id).Warn(err) + } + if err := os.RemoveAll(pluginDir); err != nil { + logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err) + } + pm.config.LogPluginEvent(id, name, "remove") + return nil +} + +func getMounts(root string) ([]string, error) { + infos, err := mount.GetMounts() + if err != nil { + return nil, errors.Wrap(err, "failed to read mount table while performing recursive unmount") + } + + var mounts []string + for _, m := range infos { + if strings.HasPrefix(m.Mountpoint, root) { + mounts = append(mounts, m.Mountpoint) + } + } + + return mounts, nil +} + +func recursiveUnmount(root string) error { + mounts, err := getMounts(root) + if err != nil { + return err + } + + // sort in reverse-lexicographic order so the root mount will always be last + sort.Sort(sort.Reverse(sort.StringSlice(mounts))) + + for i, m := range mounts { + if err := mount.Unmount(m); err != nil { + if i == len(mounts)-1 { + return errors.Wrapf(err, "error performing recursive unmount on %s", root) + } + logrus.WithError(err).WithField("mountpoint", m).Warn("could not unmount") + } + } + + return nil +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + if err := p.Set(args); err != nil { + return err + } + return pm.save(p) +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + ref, err := reference.ParseNamed(options.RepoName) + if err != nil { + return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) + } + if _, ok := ref.(reference.Canonical); ok { + return errors.Errorf("canonical references are not permitted") + } + taggedRef := reference.WithDefaultTag(ref) + name := taggedRef.String() + + if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + if err != nil { + return errors.Wrap(err, "failed to create temp directory") + } + var configJSON []byte + rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) + + rootFSBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer rootFSBlob.Close() + gzw := gzip.NewWriter(rootFSBlob) + layerDigester := digest.Canonical.New() + rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) + + if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { + return err + } + if err := rootFS.Close(); err != nil { + return err + } + + if configJSON == nil { + return errors.New("config not found") + } + + if err := gzw.Close(); err != nil { + return errors.Wrap(err, "error closing gzip writer") + } + + var config types.PluginConfig + if err := json.Unmarshal(configJSON, &config); err != nil { + return errors.Wrap(err, "failed to parse config") + } + + if err := pm.validateConfig(config); err != nil { + return err + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + rootFSBlobsum, err := rootFSBlob.Commit() + if err != nil { + return err + } + defer func() { + if err != nil { + go pm.GC() + } + }() + + config.Rootfs = &types.PluginConfigRootfs{ + Type: "layers", + DiffIds: []string{layerDigester.Digest().String()}, + } + + configBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer configBlob.Close() + if err := json.NewEncoder(configBlob).Encode(config); err != nil { + return errors.Wrap(err, "error encoding json config") + } + configBlobsum, err := configBlob.Commit() + if err != nil { + return err + } + + p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil) + if err != nil { + return err + } + p.PluginObj.PluginReference = taggedRef.String() + + pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") + + return nil +} + +func (pm *Manager) validateConfig(config types.PluginConfig) error { + return nil // TODO: +} + +func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + tarReader := tar.NewReader(in) + tarWriter := tar.NewWriter(pw) + defer in.Close() + + hasRootFS := false + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + if !hasRootFS { + pw.CloseWithError(errors.Wrap(err, "no rootfs found")) + return + } + // Signals end of archive. + tarWriter.Close() + pw.Close() + return + } + if err != nil { + pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) + return + } + + content := io.Reader(tarReader) + name := path.Clean(hdr.Name) + if path.IsAbs(name) { + name = name[1:] + } + if name == configFileName { + dt, err := ioutil.ReadAll(content) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) + return + } + *config = dt + } + if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { + hdr.Name = path.Clean(path.Join(parts[1:]...)) + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { + hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] + } + if err := tarWriter.WriteHeader(hdr); err != nil { + pw.CloseWithError(errors.Wrap(err, "error writing tar header")) + return + } + if _, err := pools.Copy(tarWriter, content); err != nil { + pw.CloseWithError(errors.Wrap(err, "error copying tar data")) + return + } + hasRootFS = true + } else { + io.Copy(ioutil.Discard, content) + } + } + }() + return pr +} diff --git a/vendor/github.com/docker/docker/plugin/backend_unsupported.go b/vendor/github.com/docker/docker/plugin/backend_unsupported.go new file mode 100644 index 0000000..66e6dab --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/backend_unsupported.go @@ -0,0 +1,71 @@ +// +build !linux + +package plugin + +import ( + "errors" + "io" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +var errNotSupported = errors.New("plugins are not supported on this platform") + +// Disable deactivates a plugin, which implies that they cannot be used by containers. +func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error { + return errNotSupported +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { + return errNotSupported +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + return nil, errNotSupported +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + return nil, errNotSupported +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error { + return errNotSupported +} + +// Upgrade pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error { + return errNotSupported +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List() ([]types.Plugin, error) { + return nil, errNotSupported +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error { + return errNotSupported +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + return errNotSupported +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + return errNotSupported +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error { + return errNotSupported +} diff --git a/vendor/github.com/docker/docker/plugin/blobstore.go b/vendor/github.com/docker/docker/plugin/blobstore.go new file mode 100644 index 0000000..dc9e598 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/blobstore.go @@ -0,0 +1,181 @@ +package plugin + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/progress" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type blobstore interface { + New() (WriteCommitCloser, error) + Get(dgst digest.Digest) (io.ReadCloser, error) + Size(dgst digest.Digest) (int64, error) +} + +type basicBlobStore struct { + path string +} + +func newBasicBlobStore(p string) (*basicBlobStore, error) { + tmpdir := filepath.Join(p, "tmp") + if err := os.MkdirAll(tmpdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", p) + } + return &basicBlobStore{path: p}, nil +} + +func (b *basicBlobStore) New() (WriteCommitCloser, error) { + f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp file") + } + return newInsertion(f), nil +} + +func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { + return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) +} + +func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { + stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) + if err != nil { + return 0, err + } + return stat.Size(), nil +} + +func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) { + for _, alg := range []string{string(digest.Canonical)} { + items, err := ioutil.ReadDir(filepath.Join(b.path, alg)) + if err != nil { + continue + } + for _, fi := range items { + if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists { + p := filepath.Join(b.path, alg, fi.Name()) + err := os.RemoveAll(p) + logrus.Debugf("cleaned up blob %v: %v", p, err) + } + } + } + +} + +// WriteCommitCloser defines object that can be committed to blobstore. +type WriteCommitCloser interface { + io.WriteCloser + Commit() (digest.Digest, error) +} + +type insertion struct { + io.Writer + f *os.File + digester digest.Digester + closed bool +} + +func newInsertion(tempFile *os.File) *insertion { + digester := digest.Canonical.New() + return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} +} + +func (i *insertion) Commit() (digest.Digest, error) { + p := i.f.Name() + d := filepath.Join(filepath.Join(p, "../../")) + i.f.Sync() + defer os.RemoveAll(p) + if err := i.f.Close(); err != nil { + return "", err + } + i.closed = true + dgst := i.digester.Digest() + if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %v", d) + } + if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil { + return "", errors.Wrapf(err, "failed to rename %v", p) + } + return dgst, nil +} + +func (i *insertion) Close() error { + if i.closed { + return nil + } + defer os.RemoveAll(i.f.Name()) + return i.f.Close() +} + +type downloadManager struct { + blobStore blobstore + tmpDir string + blobs []digest.Digest + configDigest digest.Digest +} + +func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + for _, l := range layers { + b, err := dm.blobStore.New() + if err != nil { + return initialRootFS, nil, err + } + defer b.Close() + rc, _, err := l.Download(ctx, progressOutput) + if err != nil { + return initialRootFS, nil, errors.Wrap(err, "failed to download") + } + defer rc.Close() + r := io.TeeReader(rc, b) + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return initialRootFS, nil, err + } + digester := digest.Canonical.New() + if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { + return initialRootFS, nil, err + } + initialRootFS.Append(layer.DiffID(digester.Digest())) + d, err := b.Commit() + if err != nil { + return initialRootFS, nil, err + } + dm.blobs = append(dm.blobs, d) + } + return initialRootFS, nil, nil +} + +func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { + b, err := dm.blobStore.New() + if err != nil { + return "", err + } + defer b.Close() + n, err := b.Write(dt) + if err != nil { + return "", err + } + if n != len(dt) { + return "", io.ErrShortWrite + } + d, err := b.Commit() + dm.configDigest = d + return d, err +} + +func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { + return nil, digest.ErrDigestNotFound +} +func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} diff --git a/vendor/github.com/docker/docker/plugin/defs.go b/vendor/github.com/docker/docker/plugin/defs.go new file mode 100644 index 0000000..927f639 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/defs.go @@ -0,0 +1,26 @@ +package plugin + +import ( + "sync" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" +) + +// Store manages the plugin inventory in memory and on-disk +type Store struct { + sync.RWMutex + plugins map[string]*v2.Plugin + /* handlers are necessary for transition path of legacy plugins + * to the new model. Legacy plugins use Handle() for registering an + * activation callback.*/ + handlers map[string][]func(string, *plugins.Client) +} + +// NewStore creates a Store. +func NewStore(libRoot string) *Store { + return &Store{ + plugins: make(map[string]*v2.Plugin), + handlers: make(map[string][]func(string, *plugins.Client)), + } +} diff --git a/vendor/github.com/docker/docker/plugin/manager.go b/vendor/github.com/docker/docker/plugin/manager.go new file mode 100644 index 0000000..f260aa6 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager.go @@ -0,0 +1,347 @@ +package plugin + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +const configFileName = "config.json" +const rootFSFileName = "rootfs" + +var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func (pm *Manager) restorePlugin(p *v2.Plugin) error { + if p.IsEnabled() { + return pm.restore(p) + } + return nil +} + +type eventLogger func(id, name, action string) + +// ManagerConfig defines configuration needed to start new manager. +type ManagerConfig struct { + Store *Store // remove + Executor libcontainerd.Remote + RegistryService registry.Service + LiveRestoreEnabled bool // TODO: remove + LogPluginEvent eventLogger + Root string + ExecRoot string +} + +// Manager controls the plugin subsystem. +type Manager struct { + config ManagerConfig + mu sync.RWMutex // protects cMap + muGC sync.RWMutex // protects blobstore deletions + cMap map[*v2.Plugin]*controller + containerdClient libcontainerd.Client + blobStore *basicBlobStore +} + +// controller represents the manager's control on a plugin. +type controller struct { + restart bool + exitChan chan bool + timeoutInSecs int +} + +// pluginRegistryService ensures that all resolved repositories +// are of the plugin class. +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +// NewManager returns a new plugin manager. +func NewManager(config ManagerConfig) (*Manager, error) { + if config.RegistryService != nil { + config.RegistryService = pluginRegistryService{config.RegistryService} + } + manager := &Manager{ + config: config, + } + if err := os.MkdirAll(manager.config.Root, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) + } + if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) + } + if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) + } + var err error + manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct + if err != nil { + return nil, errors.Wrap(err, "failed to create containerd client") + } + manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) + if err != nil { + return nil, err + } + + manager.cMap = make(map[*v2.Plugin]*controller) + if err := manager.reload(); err != nil { + return nil, errors.Wrap(err, "failed to restore plugins") + } + return manager, nil +} + +func (pm *Manager) tmpDir() string { + return filepath.Join(pm.config.Root, "tmp") +} + +// StateChanged updates plugin internals using libcontainerd events. +func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { + logrus.Debugf("plugin state changed %s %#v", id, e) + + switch e.State { + case libcontainerd.StateExit: + p, err := pm.config.Store.GetV2Plugin(id) + if err != nil { + return err + } + + pm.mu.RLock() + c := pm.cMap[p] + + if c.exitChan != nil { + close(c.exitChan) + } + restart := c.restart + pm.mu.RUnlock() + + os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)) + + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + if err := mount.Unmount(propRoot); err != nil { + logrus.Warn("Could not unmount %s: %v", propRoot, err) + } + } + + if restart { + pm.enable(p, c, true) + } + } + + return nil +} + +func (pm *Manager) reload() error { // todo: restore + dir, err := ioutil.ReadDir(pm.config.Root) + if err != nil { + return errors.Wrapf(err, "failed to read %v", pm.config.Root) + } + plugins := make(map[string]*v2.Plugin) + for _, v := range dir { + if validFullID.MatchString(v.Name()) { + p, err := pm.loadPlugin(v.Name()) + if err != nil { + return err + } + plugins[p.GetID()] = p + } + } + + pm.config.Store.SetAll(plugins) + + var wg sync.WaitGroup + wg.Add(len(plugins)) + for _, p := range plugins { + c := &controller{} // todo: remove this + pm.cMap[p] = c + go func(p *v2.Plugin) { + defer wg.Done() + if err := pm.restorePlugin(p); err != nil { + logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err) + return + } + + if p.Rootfs != "" { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + } + + // We should only enable rootfs propagation for certain plugin types that need it. + for _, typ := range p.PluginObj.Config.Interface.Types { + if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") { + if p.PluginObj.Config.PropagatedMount != "" { + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + // check if we need to migrate an older propagated mount from before + // these mounts were stored outside the plugin rootfs + if _, err := os.Stat(propRoot); os.IsNotExist(err) { + if _, err := os.Stat(p.PropagatedMount); err == nil { + // make sure nothing is mounted here + // don't care about errors + mount.Unmount(p.PropagatedMount) + if err := os.Rename(p.PropagatedMount, propRoot); err != nil { + logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") + } + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.WithError(err).WithField("dir", p.PropagatedMount).Error("error migrating propagated mount storage") + } + } + } + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + // TODO: sanitize PropagatedMount and prevent breakout + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", p.PropagatedMount, err) + return + } + } + } + } + + pm.save(p) + requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled() + + if requiresManualRestore { + // if liveRestore is not enabled, the plugin will be stopped now so we should enable it + if err := pm.enable(p, c, true); err != nil { + logrus.Errorf("failed to enable plugin '%s': %s", p.Name(), err) + } + } + }(p) + } + wg.Wait() + return nil +} + +func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { + p := filepath.Join(pm.config.Root, id, configFileName) + dt, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "error reading %v", p) + } + var plugin v2.Plugin + if err := json.Unmarshal(dt, &plugin); err != nil { + return nil, errors.Wrapf(err, "error decoding %v", p) + } + return &plugin, nil +} + +func (pm *Manager) save(p *v2.Plugin) error { + pluginJSON, err := json.Marshal(p) + if err != nil { + return errors.Wrap(err, "failed to marshal plugin json") + } + if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil { + return errors.Wrap(err, "failed to write atomically plugin json") + } + return nil +} + +// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine +func (pm *Manager) GC() { + pm.muGC.Lock() + defer pm.muGC.Unlock() + + whitelist := make(map[digest.Digest]struct{}) + for _, p := range pm.config.Store.GetAll() { + whitelist[p.Config] = struct{}{} + for _, b := range p.Blobsums { + whitelist[b] = struct{}{} + } + } + + pm.blobStore.gc(whitelist) +} + +type logHook struct{ id string } + +func (logHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (l logHook) Fire(entry *logrus.Entry) error { + entry.Data = logrus.Fields{"plugin": l.id} + return nil +} + +func attachToLog(id string) func(libcontainerd.IOPipe) error { + return func(iop libcontainerd.IOPipe) error { + iop.Stdin.Close() + + logger := logrus.New() + logger.Hooks.Add(logHook{id}) + // TODO: cache writer per id + w := logger.Writer() + go func() { + io.Copy(w, iop.Stdout) + }() + go func() { + // TODO: update logrus and use logger.WriterLevel + io.Copy(w, iop.Stderr) + }() + return nil + } +} + +func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { + // todo: make a better function that doesn't check order + if !reflect.DeepEqual(privileges, requiredPrivileges) { + return errors.New("incorrect privileges") + } + return nil +} + +func configToRootFS(c []byte) (*image.RootFS, error) { + var pluginConfig types.PluginConfig + if err := json.Unmarshal(c, &pluginConfig); err != nil { + return nil, err + } + // validation for empty rootfs is in distribution code + if pluginConfig.Rootfs == nil { + return nil, nil + } + + return rootFSFromPlugin(pluginConfig.Rootfs), nil +} + +func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { + rootFS := image.RootFS{ + Type: pluginfs.Type, + DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)), + } + for i := range pluginfs.DiffIds { + rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i]) + } + + return &rootFS +} diff --git a/vendor/github.com/docker/docker/plugin/manager_linux.go b/vendor/github.com/docker/docker/plugin/manager_linux.go new file mode 100644 index 0000000..ad66616 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_linux.go @@ -0,0 +1,284 @@ +// +build linux + +package plugin + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + if p.IsEnabled() && !force { + return fmt.Errorf("plugin %s is already enabled", p.Name()) + } + spec, err := p.InitSpec(pm.config.ExecRoot) + if err != nil { + return err + } + + c.restart = true + c.exitChan = make(chan bool) + + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + + var propRoot string + if p.PropagatedMount != "" { + propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + + if err := mount.MakeRShared(propRoot); err != nil { + return errors.Wrap(err, "error setting up propagated mount dir") + } + + if err := mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil { + return errors.Wrap(err, "error creating mount for propagated mount") + } + } + + if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil { + return errors.WithStack(err) + } + + if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil { + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + if err := mount.Unmount(propRoot); err != nil { + logrus.Warnf("Could not unmount %s: %v", propRoot, err) + } + } + return errors.WithStack(err) + } + + return pm.pluginPostStart(p, c) +} + +func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { + client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()), nil, c.timeoutInSecs) + if err != nil { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + return errors.WithStack(err) + } + + p.SetPClient(client) + pm.config.Store.SetState(p, true) + pm.config.Store.CallHandler(p) + + return pm.save(p) +} + +func (pm *Manager) restore(p *v2.Plugin) error { + if err := pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID())); err != nil { + return err + } + + if pm.config.LiveRestoreEnabled { + c := &controller{} + if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 { + // plugin is not running, so follow normal startup procedure + return pm.enable(p, c, true) + } + + c.exitChan = make(chan bool) + c.restart = true + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + return pm.pluginPostStart(p, c) + } + + return nil +} + +func shutdownPlugin(p *v2.Plugin, c *controller, containerdClient libcontainerd.Client) { + pluginID := p.GetID() + + err := containerdClient.Signal(pluginID, int(syscall.SIGTERM)) + if err != nil { + logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) + } else { + select { + case <-c.exitChan: + logrus.Debug("Clean shutdown of plugin") + case <-time.After(time.Second * 10): + logrus.Debug("Force shutdown plugin") + if err := containerdClient.Signal(pluginID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) + } + } + } +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + if !p.IsEnabled() { + return fmt.Errorf("plugin %s is already disabled", p.Name()) + } + + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + pm.config.Store.SetState(p, false) + return pm.save(p) +} + +// Shutdown stops all plugins and called during daemon shutdown. +func (pm *Manager) Shutdown() { + plugins := pm.config.Store.GetAll() + for _, p := range plugins { + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if pm.config.LiveRestoreEnabled && p.IsEnabled() { + logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") + continue + } + if pm.containerdClient != nil && p.IsEnabled() { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + } + } +} + +func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) { + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return err + } + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + orig := filepath.Join(pdir, "rootfs") + backup := orig + "-old" + if err := os.Rename(orig, backup); err != nil { + return err + } + + defer func() { + if err != nil { + if rmErr := os.RemoveAll(orig); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") + return + } + + if err := os.Rename(backup, orig); err != nil { + err = errors.Wrap(err, "error restoring old plugin root on upgrade failure") + } + if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) + } + } else { + if rmErr := os.RemoveAll(backup); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") + } + + p.Config = configDigest + p.Blobsums = blobsums + } + }() + + if err := os.Rename(tmpRootFSDir, orig); err != nil { + return errors.Wrap(err, "error upgrading") + } + + p.PluginObj.Config = config + err = pm.save(p) + return errors.Wrap(err, "error saving upgraded plugin config") +} + +func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest.Digest, privileges *types.PluginPrivileges) (types.PluginConfig, error) { + configRC, err := pm.blobStore.Get(configDigest) + if err != nil { + return types.PluginConfig{}, err + } + defer configRC.Close() + + var config types.PluginConfig + dec := json.NewDecoder(configRC) + if err := dec.Decode(&config); err != nil { + return types.PluginConfig{}, errors.Wrapf(err, "failed to parse config") + } + if dec.More() { + return types.PluginConfig{}, errors.New("invalid config json") + } + + requiredPrivileges, err := computePrivileges(config) + if err != nil { + return types.PluginConfig{}, err + } + if privileges != nil { + if err := validatePrivileges(requiredPrivileges, *privileges); err != nil { + return types.PluginConfig{}, err + } + } + + return config, nil +} + +// createPlugin creates a new plugin. take lock before calling. +func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) { + if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store + return nil, err + } + + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return nil, err + } + + p = &v2.Plugin{ + PluginObj: types.Plugin{ + Name: name, + ID: stringid.GenerateRandomID(), + Config: config, + }, + Config: configDigest, + Blobsums: blobsums, + } + p.InitEmptySettings() + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + if err := os.MkdirAll(pdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", pdir) + } + + defer func() { + if err != nil { + os.RemoveAll(pdir) + } + }() + + if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil { + return nil, errors.Wrap(err, "failed to rename rootfs") + } + + if err := pm.save(p); err != nil { + return nil, err + } + + pm.config.Store.Add(p) // todo: remove + + return p, nil +} diff --git a/vendor/github.com/docker/docker/plugin/manager_solaris.go b/vendor/github.com/docker/docker/plugin/manager_solaris.go new file mode 100644 index 0000000..72ccae7 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_solaris.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/docker/docker/plugin/manager_windows.go b/vendor/github.com/docker/docker/plugin/manager_windows.go new file mode 100644 index 0000000..4469a67 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_windows.go @@ -0,0 +1,30 @@ +// +build windows + +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/docker/docker/plugin/store.go b/vendor/github.com/docker/docker/plugin/store.go new file mode 100644 index 0000000..b7a96a9 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/store.go @@ -0,0 +1,263 @@ +package plugin + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/pkg/errors" +) + +/* allowV1PluginsFallback determines daemon's support for V1 plugins. + * When the time comes to remove support for V1 plugins, flipping + * this bool is all that will be needed. + */ +const allowV1PluginsFallback bool = true + +/* defaultAPIVersion is the version of the plugin API for volume, network, + IPAM and authz. This is a very stable API. When we update this API, then + pluginType should include a version. eg "networkdriver/2.0". +*/ +const defaultAPIVersion string = "1.0" + +// ErrNotFound indicates that a plugin was not found locally. +type ErrNotFound string + +func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } + +// ErrAmbiguous indicates that a plugin was not found locally. +type ErrAmbiguous string + +func (name ErrAmbiguous) Error() string { + return fmt.Sprintf("multiple plugins found for %q", string(name)) +} + +// GetV2Plugin retreives a plugin by name, id or partial ID. +func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { + ps.RLock() + defer ps.RUnlock() + + id, err := ps.resolvePluginID(refOrID) + if err != nil { + return nil, err + } + + p, idOk := ps.plugins[id] + if !idOk { + return nil, errors.WithStack(ErrNotFound(id)) + } + + return p, nil +} + +// validateName returns error if name is already reserved. always call with lock and full name +func (ps *Store) validateName(name string) error { + for _, p := range ps.plugins { + if p.Name() == name { + return errors.Errorf("plugin %q already exists", name) + } + } + return nil +} + +// GetAll retreives all plugins. +func (ps *Store) GetAll() map[string]*v2.Plugin { + ps.RLock() + defer ps.RUnlock() + return ps.plugins +} + +// SetAll initialized plugins during daemon restore. +func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { + ps.Lock() + defer ps.Unlock() + ps.plugins = plugins +} + +func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { + ps.RLock() + defer ps.RUnlock() + + result := make([]plugingetter.CompatPlugin, 0, 1) + for _, p := range ps.plugins { + if p.IsEnabled() { + if _, err := p.FilterByCap(capability); err == nil { + result = append(result, p) + } + } + } + return result +} + +// SetState sets the active state of the plugin and updates plugindb. +func (ps *Store) SetState(p *v2.Plugin, state bool) { + ps.Lock() + defer ps.Unlock() + + p.PluginObj.Enabled = state +} + +// Add adds a plugin to memory and plugindb. +// An error will be returned if there is a collision. +func (ps *Store) Add(p *v2.Plugin) error { + ps.Lock() + defer ps.Unlock() + + if v, exist := ps.plugins[p.GetID()]; exist { + return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) + } + ps.plugins[p.GetID()] = p + return nil +} + +// Remove removes a plugin from memory and plugindb. +func (ps *Store) Remove(p *v2.Plugin) { + ps.Lock() + delete(ps.plugins, p.GetID()) + ps.Unlock() +} + +// Get returns an enabled plugin matching the given name and capability. +func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { + var ( + p *v2.Plugin + err error + ) + + // Lookup using new model. + if ps != nil { + p, err = ps.GetV2Plugin(name) + if err == nil { + p.AddRefCount(mode) + if p.IsEnabled() { + return p.FilterByCap(capability) + } + // Plugin was found but it is disabled, so we should not fall back to legacy plugins + // but we should error out right away + return nil, ErrNotFound(name) + } + if _, ok := errors.Cause(err).(ErrNotFound); !ok { + return nil, err + } + } + + // Lookup using legacy model. + if allowV1PluginsFallback { + p, err := plugins.Get(name, capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + return p, nil + } + + return nil, err +} + +// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. +func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { + return ps.getAllByCap(capability) +} + +// GetAllByCap returns a list of enabled plugins matching the given capability. +func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { + result := make([]plugingetter.CompatPlugin, 0, 1) + + /* Daemon start always calls plugin.Init thereby initializing a store. + * So store on experimental builds can never be nil, even while + * handling legacy plugins. However, there are legacy plugin unit + * tests where the volume subsystem directly talks with the plugin, + * bypassing the daemon. For such tests, this check is necessary. + */ + if ps != nil { + ps.RLock() + result = ps.getAllByCap(capability) + ps.RUnlock() + } + + // Lookup with legacy model + if allowV1PluginsFallback { + pl, err := plugins.GetAll(capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + for _, p := range pl { + result = append(result, p) + } + } + return result, nil +} + +// Handle sets a callback for a given capability. It is only used by network +// and ipam drivers during plugin registration. The callback registers the +// driver with the subsystem (network, ipam). +func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { + pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) + + // Register callback with new plugin model. + ps.Lock() + handlers, ok := ps.handlers[pluginType] + if !ok { + handlers = []func(string, *plugins.Client){} + } + handlers = append(handlers, callback) + ps.handlers[pluginType] = handlers + ps.Unlock() + + // Register callback with legacy plugin model. + if allowV1PluginsFallback { + plugins.Handle(capability, callback) + } +} + +// CallHandler calls the registered callback. It is invoked during plugin enable. +func (ps *Store) CallHandler(p *v2.Plugin) { + for _, typ := range p.GetTypes() { + for _, handler := range ps.handlers[typ.String()] { + handler(p.Name(), p.Client()) + } + } +} + +func (ps *Store) resolvePluginID(idOrName string) (string, error) { + ps.RLock() // todo: fix + defer ps.RUnlock() + + if validFullID.MatchString(idOrName) { + return idOrName, nil + } + + ref, err := reference.ParseNamed(idOrName) + if err != nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + if _, ok := ref.(reference.Canonical); ok { + logrus.Warnf("canonical references cannot be resolved: %v", ref.String()) + return "", errors.WithStack(ErrNotFound(idOrName)) + } + + fullRef := reference.WithDefaultTag(ref) + + for _, p := range ps.plugins { + if p.PluginObj.Name == fullRef.String() { + return p.PluginObj.ID, nil + } + } + + var found *v2.Plugin + for id, p := range ps.plugins { // this can be optimized + if strings.HasPrefix(id, idOrName) { + if found != nil { + return "", errors.WithStack(ErrAmbiguous(idOrName)) + } + found = p + } + } + if found == nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + return found.PluginObj.ID, nil +} diff --git a/vendor/github.com/docker/docker/plugin/store_test.go b/vendor/github.com/docker/docker/plugin/store_test.go new file mode 100644 index 0000000..6b1f6a9 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/store_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/plugin/v2" +) + +func TestFilterByCapNeg(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} + i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("foobar") + if err == nil { + t.Fatalf("expected inadequate error, got %v", err) + } +} + +func TestFilterByCapPos(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + + iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} + i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("volumedriver") + if err != nil { + t.Fatalf("expected no error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go new file mode 100644 index 0000000..93b489a --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -0,0 +1,244 @@ +package v2 + +import ( + "fmt" + "strings" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + PropagatedMount string // TODO: make private + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Plugin objects this returns the host path of the plugin container's rootfs. +func (p *Plugin) BasePath() string { + return p.Rootfs +} + +// Client returns the plugin client. +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.ACQUIRE) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.ACQUIRE) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.RELEASE) +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go new file mode 100644 index 0000000..e980e7f --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -0,0 +1,121 @@ +// +build linux + +package v2 + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + s.Root = specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.NamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.PropagatedMount != "" { + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + s.Linux.RootfsPropagation = "rshared" + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + rwm := "rwm" + s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) + + return &s, nil +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go new file mode 100644 index 0000000..e60fb83 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 + +import ( + "errors" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go new file mode 100644 index 0000000..79c6bef --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable_test.go b/vendor/github.com/docker/docker/plugin/v2/settable_test.go new file mode 100644 index 0000000..7183f3a --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/settable_test.go @@ -0,0 +1,91 @@ +package v2 + +import ( + "reflect" + "testing" +) + +func TestNewSettable(t *testing.T) { + contexts := []struct { + arg string + name string + field string + value string + err error + }{ + {"name=value", "name", "", "value", nil}, + {"name", "name", "", "", nil}, + {"name.field=value", "name", "field", "value", nil}, + {"name.field", "name", "field", "", nil}, + {"=value", "", "", "", errInvalidFormat}, + {"=", "", "", "", errInvalidFormat}, + } + + for _, c := range contexts { + s, err := newSettable(c.arg) + if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + + if s.name != c.name { + t.Fatalf("expected name to be %q, got %q", c.name, s.name) + } + + if s.field != c.field { + t.Fatalf("expected field to be %q, got %q", c.field, s.field) + } + + if s.value != c.value { + t.Fatalf("expected value to be %q, got %q", c.value, s.value) + } + + } +} + +func TestIsSettable(t *testing.T) { + contexts := []struct { + allowedSettableFields []string + set settable + settable []string + result bool + err error + }{ + {allowedSettableFieldsEnv, settable{}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"value"}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"foo"}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value1", "value2"}, false, errMultipleFields}, + } + + for _, c := range contexts { + if res, err := c.set.isSettable(c.allowedSettableFields, c.settable); res != c.result { + t.Fatalf("expected result to be %t, got %t", c.result, res) + } else if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + } +} + +func TestUpdateSettinsEnv(t *testing.T) { + contexts := []struct { + env []string + set settable + newEnv []string + }{ + {[]string{}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"FOO=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0", "BAR=1"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1", "BAR=1"}}, + } + + for _, c := range contexts { + updateSettingsEnv(&c.env, &c.set) + + if !reflect.DeepEqual(c.env, c.newEnv) { + t.Fatalf("expected env to be %q, got %q", c.newEnv, c.env) + } + } +} diff --git a/vendor/github.com/docker/docker/poule.yml b/vendor/github.com/docker/docker/poule.yml new file mode 100644 index 0000000..61aab45 --- /dev/null +++ b/vendor/github.com/docker/docker/poule.yml @@ -0,0 +1,88 @@ +# Add a "status/0-triage" to every newly opened pull request. +- triggers: + pull_request: [ opened ] + operations: + - type: label + settings: { + patterns: { + status/0-triage: [ ".*" ], + } + } + +# For every newly created or modified issue, assign label based on matching regexp using the `label` +# operation, as well as an Engine-specific version label using `version-label`. +- triggers: + issues: [ edited, opened, reopened ] + operations: + - type: label + settings: { + patterns: { + area/builder: [ "dockerfile", "docker build" ], + area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], + area/plugins: [ "docker plugin" ], + area/networking: [ "docker network", "ipvs", "vxlan" ], + area/runtime: [ "oci runtime error" ], + area/security/trust: [ "docker_content_trust" ], + area/swarm: [ "docker node", "docker service", "docker swarm" ], + platform/desktop: [ "docker for mac", "docker for windows" ], + platform/freebsd: [ "freebsd" ], + platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + } + } + - type: version-label + +# When a pull request is closed, attach it to the currently active milestone. +- triggers: + pull_request: [ closed ] + operations: + - type: version-milestone + +# Labeling a PR with `rebuild/` triggers a rebuild job for the associated +# configuration. The label is automatically removed after the rebuild is initiated. There's no such +# thing as "templating" in this configuration, so we need one operation for each type of +# configuration that can be triggered. +- triggers: + pull_request: [ labeled ] + operations: + - type: rebuild + settings: { + # When configurations are empty, the `rebuild` operation rebuilds all the currently + # known statuses for that pull request. + configurations: [], + label: "rebuild/*", + } + - type: rebuild + settings: { + configurations: [ arm ], + label: "rebuild/arm", + } + - type: rebuild + settings: { + configurations: [ experimental ], + label: "rebuild/experimental", + } + - type: rebuild + settings: { + configurations: [ janky ], + label: "rebuild/janky", + } + - type: rebuild + settings: { + configurations: [ userns ], + label: "rebuild/userns", + } + - type: rebuild + settings: { + configurations: [ vendor ], + label: "rebuild/vendor", + } + - type: rebuild + settings: { + configurations: [ win2lin ], + label: "rebuild/win2lin", + } + - type: rebuild + settings: { + configurations: [ windowsRS1 ], + label: "rebuild/windowsRS1", + } diff --git a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go new file mode 100644 index 0000000..5132ebe --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go @@ -0,0 +1,122 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/utils/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + if err := compiled.Execute(out, p); err != nil { + return err + } + return nil +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile in a temp directory determined by +// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. +func InstallDefault(name string) error { + p := profileData{ + Name: name, + } + + // Install to a temporary directory. + f, err := ioutil.TempFile("", name) + if err != nil { + return err + } + profilePath := f.Name() + + defer f.Close() + defer os.Remove(profilePath) + + if err := p.generateDefault(f); err != nil { + f.Close() + return err + } + + if err := aaparser.LoadProfile(profilePath); err != nil { + return err + } + + return nil +} + +// IsLoaded checks if a profile with the given name has been loaded into the +// kernel. +func IsLoaded(name string) (bool, error) { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return false, err + } + defer file.Close() + + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return false, err + } + if strings.HasPrefix(p, name+" ") { + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/template.go b/vendor/github.com/docker/docker/profiles/apparmor/template.go new file mode 100644 index 0000000..c5ea458 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer={{.Name}}, +{{end}} +} +` diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json new file mode 100755 index 0000000..ac129d3 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -0,0 +1,698 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + }, + { + "architecture": "SCMP_ARCH_AARCH64", + "subArchitectures": [ + "SCMP_ARCH_ARM" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64" + ] + }, + { + "architecture": "SCMP_ARCH_S390X", + "subArchitectures": [ + "SCMP_ARCH_S390" + ] + } + ], + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "arm_fadvise64_64", + "arm_sync_file_range", + "breakpoint", + "cacheflush", + "set_tls" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "arm", + "arm64" + ] + }, + "excludes": {} + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32" + ] + }, + "excludes": {} + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32", + "x86" + ] + }, + "excludes": {} + }, + { + "names": [ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": {} + }, + { + "names": [ + "open_by_handle_at" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_DAC_READ_SEARCH" + ] + }, + "excludes": {} + }, + { + "names": [ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + }, + "excludes": {} + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ], + "arches": [ + "s390", + "s390x" + ] + } + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 1, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "s390 parameter ordering for clone is different", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + } + }, + { + "names": [ + "reboot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_BOOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_CHROOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "delete_module", + "init_module", + "finit_module", + "query_module" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_MODULE" + ] + }, + "excludes": {} + }, + { + "names": [ + "acct" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PACCT" + ] + }, + "excludes": {} + }, + { + "names": [ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PTRACE" + ] + }, + "excludes": {} + }, + { + "names": [ + "iopl", + "ioperm" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_RAWIO" + ] + }, + "excludes": {} + }, + { + "names": [ + "settimeofday", + "stime", + "adjtimex" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TIME" + ] + }, + "excludes": {} + }, + { + "names": [ + "vhangup" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TTY_CONFIG" + ] + }, + "excludes": {} + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json b/vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json new file mode 100755 index 0000000..674ca50 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json @@ -0,0 +1,27 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go new file mode 100644 index 0000000..32f22bb --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go new file mode 100644 index 0000000..a54ef50 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -0,0 +1,150 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringutils" + "github.com/opencontainers/runtime-spec/specs-go" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { + return setupSeccomp(DefaultProfile(), rs) +} + +// LoadProfile takes a file path and decodes the seccomp profile. +func LoadProfile(body string, rs *specs.Spec) (*specs.Seccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + return setupSeccomp(&config, rs) +} + +var nativeToSeccomp = map[string]types.Arch{ + "amd64": types.ArchX86_64, + "arm64": types.ArchAARCH64, + "mips64": types.ArchMIPS64, + "mips64n32": types.ArchMIPS64N32, + "mipsel64": types.ArchMIPSEL64, + "mipsel64n32": types.ArchMIPSEL64N32, + "s390x": types.ArchS390X, +} + +func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig := &specs.Seccomp{} + + var arch string + var native, err = libseccomp.GetNativeArch() + if err == nil { + arch = native.String() + } + + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + + if len(config.ArchMap) != 0 { + for _, a := range config.ArchMap { + seccompArch, ok := nativeToSeccomp[arch] + if ok { + if a.Arch == seccompArch { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) + for _, sa := range a.SubArches { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) + } + break + } + } + } + } + + newConfig.DefaultAction = specs.Action(config.DefaultAction) + +Loop: + // Loop through all syscall blocks and convert them to libcontainer format after filtering them + for _, call := range config.Syscalls { + if len(call.Excludes.Arches) > 0 { + if stringutils.InSlice(call.Excludes.Arches, arch) { + continue Loop + } + } + if len(call.Excludes.Caps) > 0 { + for _, c := range call.Excludes.Caps { + if stringutils.InSlice(rs.Process.Capabilities, c) { + continue Loop + } + } + } + if len(call.Includes.Arches) > 0 { + if !stringutils.InSlice(call.Includes.Arches, arch) { + continue Loop + } + } + if len(call.Includes.Caps) > 0 { + for _, c := range call.Includes.Caps { + if !stringutils.InSlice(rs.Process.Capabilities, c) { + continue Loop + } + } + } + + if call.Name != "" && len(call.Names) != 0 { + return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") + } + + if call.Name != "" { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) + } + + for _, n := range call.Names { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) + } + } + + return newConfig, nil +} + +func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.Syscall { + newCall := specs.Syscall{ + Name: name, + Action: specs.Action(action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range args { + newArg := specs.Arg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.Operator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + return newCall +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go new file mode 100644 index 0000000..b84de82 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -0,0 +1,604 @@ +// +build linux,seccomp + +package seccomp + +import ( + "syscall" + + "github.com/docker/docker/api/types" +) + +func arches() []types.Architecture { + return []types.Architecture{ + { + Arch: types.ArchX86_64, + SubArches: []types.Arch{types.ArchX86, types.ArchX32}, + }, + { + Arch: types.ArchAARCH64, + SubArches: []types.Arch{types.ArchARM}, + }, + { + Arch: types.ArchMIPS64, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64N32}, + }, + { + Arch: types.ArchMIPS64N32, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64}, + }, + { + Arch: types.ArchMIPSEL64, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64N32}, + }, + { + Arch: types.ArchMIPSEL64N32, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64}, + }, + { + Arch: types.ArchS390X, + SubArches: []types.Arch{types.ArchS390}, + }, + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile() *types.Seccomp { + syscalls := []*types.Syscall{ + { + Names: []string{ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"arm", "arm64"}, + }, + }, + { + Names: []string{ + "arch_prctl", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32"}, + }, + }, + { + Names: []string{ + "modify_ldt", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32", "x86"}, + }, + }, + { + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "open_by_handle_at", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_DAC_READ_SEARCH"}, + }, + }, + { + Names: []string{ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 1, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Comment: "s390 parameter ordering for clone is different", + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "reboot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_BOOT"}, + }, + }, + { + Names: []string{ + "chroot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_CHROOT"}, + }, + }, + { + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_MODULE"}, + }, + }, + { + Names: []string{ + "acct", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PACCT"}, + }, + }, + { + Names: []string{ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PTRACE"}, + }, + }, + { + Names: []string{ + "iopl", + "ioperm", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_RAWIO"}, + }, + }, + { + Names: []string{ + "settimeofday", + "stime", + "adjtimex", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TIME"}, + }, + }, + { + Names: []string{ + "vhangup", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TTY_CONFIG"}, + }, + }, + } + + return &types.Seccomp{ + DefaultAction: types.ActErrno, + ArchMap: arches(), + Syscalls: syscalls, + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go new file mode 100644 index 0000000..1346921 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go @@ -0,0 +1,32 @@ +// +build linux + +package seccomp + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/oci" +) + +func TestLoadProfile(t *testing.T) { + f, err := ioutil.ReadFile("fixtures/example.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} + +func TestLoadDefaultProfile(t *testing.T) { + f, err := ioutil.ReadFile("default.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 0000000..f84b20b --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,13 @@ +// +build linux,!seccomp + +package seccomp + +import ( + "github.com/docker/docker/api/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultProfile returns a nil pointer on unsupported systems. +func DefaultProfile(rs *specs.Spec) *types.Seccomp { + return nil +} diff --git a/vendor/github.com/docker/docker/project/ARM.md b/vendor/github.com/docker/docker/project/ARM.md new file mode 100644 index 0000000..c4d21bf --- /dev/null +++ b/vendor/github.com/docker/docker/project/ARM.md @@ -0,0 +1,45 @@ +# ARM support + +The ARM support should be considered experimental. It will be extended step by step in the coming weeks. + +Building a Docker Development Image works in the same fashion as for Intel platform (x86-64). +Currently we have initial support for 32bit ARMv7 devices. + +To work with the Docker Development Image you have to clone the Docker/Docker repo on a supported device. +It needs to have a Docker Engine installed to build the Docker Development Image. + +From the root of the Docker/Docker repo one can use make to execute the following make targets: +- make validate +- make binary +- make build +- make deb +- make bundles +- make default +- make shell +- make test-unit +- make test-integration-cli +- make + +The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. +Based on OS and architecture it chooses the correct Dockerfile. +For the ARM 32bit architecture it uses `Dockerfile.armhf`. + +So for example in order to build a Docker binary one has to +1. clone the Docker/Docker repository on an ARM device `git clone git@github.com:docker/docker.git` +2. change into the checked out repository with `cd docker` +3. execute `make binary` to create a Docker Engine binary for ARM + +## Kernel modules +A few libnetwork integration tests require that the kernel be +configured with "dummy" network interface and has the module +loaded. However, the dummy module may be not loaded automatically. + +To load the kernel module permanently, run these commands as `root`. + + modprobe dummy + echo "dummy" >> /etc/modules + +On some systems you also have to sync your kernel modules. + + oc-sync-kernel-modules + depmod diff --git a/vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md b/vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md new file mode 100644 index 0000000..1c6f232 --- /dev/null +++ b/vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md @@ -0,0 +1,35 @@ +Branches and tags +================= + +Note: details of the release process for the Engine are documented in the +[RELEASE-CHECKLIST](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +# Branches + +The docker/docker repository should normally have only three living branches at all time, including +the regular `master` branch: + +## `docs` branch + +The `docs` branch supports documentation updates between product releases. This branch allow us to +decouple documentation releases from product releases. + +## `release` branch + +The `release` branch contains the last _released_ version of the code for the project. + +The `release` branch is only updated at each public release of the project. The mechanism for this +is that the release is materialized by a pull request against the `release` branch which lives for +the duration of the code freeze period. When this pull request is merged, the `release` branch gets +updated, and its new state is tagged accordingly. + +# Tags + +Any public release of a compiled binary, with the logical exception of nightly builds, should have +a corresponding tag in the repository. + +The general format of a tag is `vX.Y.Z[-suffix[N]]`: + +- All of `X`, `Y`, `Z` must be specified (example: `v1.0.0`) +- First release candidate for version `1.8.0` should be tagged `v1.8.0-rc1` +- Second alpha release of a product should be tagged `v1.0.0-alpha1` diff --git a/vendor/github.com/docker/docker/project/CONTRIBUTORS.md b/vendor/github.com/docker/docker/project/CONTRIBUTORS.md new file mode 120000 index 0000000..44fcc63 --- /dev/null +++ b/vendor/github.com/docker/docker/project/CONTRIBUTORS.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/docker/docker/project/GOVERNANCE.md b/vendor/github.com/docker/docker/project/GOVERNANCE.md new file mode 100644 index 0000000..6ae7baf --- /dev/null +++ b/vendor/github.com/docker/docker/project/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](https://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + diff --git a/vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md b/vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md new file mode 100644 index 0000000..824a14b --- /dev/null +++ b/vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md @@ -0,0 +1,37 @@ +# Freenode IRC Administration Guidelines and Tips + +This is not meant to be a general "Here's how to IRC" document, so if you're +looking for that, check Google instead. ♥ + +If you've been charged with helping maintain one of Docker's now many IRC +channels, this might turn out to be useful. If there's information that you +wish you'd known about how a particular channel is organized, you should add +deets here! :) + +## `ChanServ` + +Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For +example, `/msg ChanServ ACCESS LIST` will show you a list of everyone +with "access" privileges for a particular channel. + +A similar command is used to give someone a particular access level. For +example, to add a new maintainer to the `#docker-maintainers` access list so +that they can contribute to the discussions (after they've been merged +appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ +ACCESS #docker-maintainers ADD maintainer`. + +To setup a new channel with a similar `maintainer` access template, use a +command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting +them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` +for more details). + +## Troubleshooting + +The most common cause of not-getting-auto-`+v` woes is people not being +`IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with +their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS +ADD` request with something like `xyz is not registered.`. + +This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` +followed by `/msg NickServ GROUP` to group the two nicknames together. See +`/msg NickServ HELP GROUP` for more information. diff --git a/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md b/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md new file mode 100644 index 0000000..95cb2f1 --- /dev/null +++ b/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md @@ -0,0 +1,132 @@ +Triaging of issues +------------------ + +Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: + +- Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +### 1. Ensure the issue contains basic information + +Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: + +- the output of `docker version` +- the output of `docker info` +- the output of `uname -a` +- a reproducible case if this is a bug, Dockerfiles FTW +- host distribution and version ( ubuntu 14.04, RHEL, fedora 23 ) +- page URL if this is a docs issue or the name of a man page + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. + +If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be +reopened when the necessary information is provided. + +### 2. Classify the Issue + +An issue can have multiple of the following labels. Typically, a properly classified issues should +have: + +- One label identifying its kind (`kind/*`). +- One or multiple labels identifying the functional areas of interest (`area/*`). +- Where applicable, one label categorizing its difficulty (`exp/*`). + +#### Issue kind + +| Kind | Description | +|------------------|---------------------------------------------------------------------------------------------------------------------------------| +| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | +| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shiny. | +| kind/question | Contains a user or contributor question requiring a response. | + +#### Functional area + +| Area | +|---------------------------| +| area/api | +| area/builder | +| area/bundles | +| area/cli | +| area/daemon | +| area/distribution | +| area/docs | +| area/kernel | +| area/logging | +| area/networking | +| area/plugins | +| area/project | +| area/runtime | +| area/security | +| area/security/apparmor | +| area/security/seccomp | +| area/security/selinux | +| area/security/trust | +| area/storage | +| area/storage/aufs | +| area/storage/btrfs | +| area/storage/devicemapper | +| area/storage/overlay | +| area/storage/zfs | +| area/swarm | +| area/testing | +| area/volumes | + +#### Platform + +| Platform | +|---------------------------| +| platform/arm | +| platform/darwin | +| platform/ibm-power | +| platform/ibm-z | +| platform/windows | + +#### Experience level + +Experience level is a way for a contributor to find an issue based on their +skill set. Experience types are applied to the issue or pull request using +labels. + +| Level | Experience level guideline | +|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| exp/beginner | New to Docker, and possibly Golang, and is looking to help while learning the basics. | +| exp/intermediate | Comfortable with golang and understands the core concepts of Docker and looking to dive deeper into the project. | +| exp/expert | Proficient with Docker and Golang and has been following, and active in, the community to understand the rationale behind design decisions and where the project is headed. | + +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert level task. + +#### Triage status + +To communicate the triage status with other collaborators, you can apply status +labels to issues. These labels prevent duplicating effort. + +| Status | Description | +|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| status/confirmed | You triaged the issue, and were able to reproduce the issue. Always leave a comment how you reproduced, so that the person working on resolving the issue has a way to set up a test-case. +| status/accepted | Apply to enhancements / feature requests that we think are good to have. Adding this label helps contributors find things to work on. +| status/more-info-needed | Apply this to issues that are missing information (e.g. no `docker version` or `docker info` output, or no steps to reproduce), or require feedback from the reporter. If the issue is not updated after a week, it can generally be closed. +| status/needs-attention | Apply this label if an issue (or PR) needs more eyes. + +### 3. Prioritizing issue + +When, and only when, an issue is attached to a specific milestone, the issue can be labeled with the +following labels to indicate their degree of priority (from more urgent to less urgent). + +| Priority | Description | +|-------------|-----------------------------------------------------------------------------------------------------------------------------------| +| priority/P0 | Urgent: Security, critical bugs, blocking issues. P0 basically means drop everything you are doing until this issue is addressed. | +| priority/P1 | Important: P1 issues are a top priority and a must-have for the next release. | +| priority/P2 | Normal priority: default priority applied. | +| priority/P3 | Best effort: those are nice to have / minor issues. | + +And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. diff --git a/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md b/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md new file mode 100644 index 0000000..3763f87 --- /dev/null +++ b/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md @@ -0,0 +1,74 @@ +# Apt & Yum Repository Maintenance +## A maintainer's guide to managing Docker's package repos + +### How to clean up old experimental debs and rpms + +We release debs and rpms for experimental nightly, so these can build up. +To remove old experimental debs and rpms, and _ONLY_ keep the latest, follow the +steps below. + +1. Checkout docker master + +2. Run clean scripts + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh clean-apt-repo clean-yum-repo generate-index-listing sign-repos +``` + +3. Upload the changed repos to `s3` (if you host on s3) + +4. Purge the cache, PURGE the cache, PURGE THE CACHE! + +### How to get out of a sticky situation + +Sh\*t happens. We know. Below are steps to get out of any "hash-sum mismatch" or +"gpg sig error" or the likes error that might happen to the apt repo. + +**NOTE:** These are apt repo specific, have had no experimence with anything similar +happening to the yum repo in the past so you can rest easy. + +For each step listed below, move on to the next if the previous didn't work. +Otherwise CELEBRATE! + +1. Purge the cache. + +2. Did you remember to sign the debs after releasing? + +Re-sign the repo with your gpg key: + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh sign-repos +``` + +Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. + +3. Run Jess' magical, save all, only in case of extreme emergencies, "you are +going to have to break this glass to get it" script. + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh update-apt-repo generate-index-listing sign-repos +``` + +4. Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. diff --git a/vendor/github.com/docker/docker/project/PACKAGERS.md b/vendor/github.com/docker/docker/project/PACKAGERS.md new file mode 100644 index 0000000..46ea8e7 --- /dev/null +++ b/vendor/github.com/docker/docker/project/PACKAGERS.md @@ -0,0 +1,307 @@ +# Dear Packager, + +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. + +## Getting Started + +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! + +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. + +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" + +## Package Name + +If possible, your package should be called "docker". If that name is already +taken, a second choice is "docker-engine". Another possible choice is "docker.io". + +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances +allow you to use it, we recommend that you do. + +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. + +## Build Dependencies + +To build Docker, you will need the following: + +* A recent version of Git and Mercurial +* Go version 1.6 or later +* A clean checkout of the source added to a valid [Go + workspace](https://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below) + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux +* SQLite version 3.7.9 or later +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.16.1 or later (unless using an older version is + absolutely necessary, in which case 3.8 is the minimum) +* libseccomp version 2.2.1 or later (for build tag seccomp) + +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. + +### Go Dependencies + +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". + +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). + +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "vendor.conf" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. + +## Stripping Binaries + +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. + +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. + +## Building Docker + +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both docker/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +If you're building a binary that may need to be used on platforms that include +seccomp, you will need to use the `seccomp` build tag: +```bash +export DOCKER_BUILDTAGS='seccomp' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): + +```bash +./hack/make.sh binary +``` + +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". + +### Dynamic Daemon / Client-only Binary + +If you are only interested in a Docker client binary, you can build using: + +```bash +./hack/make.sh binary-client +``` + +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: + +```bash +./hack/make.sh dynbinary-client +``` + +This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. + +## System Dependencies + +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: + +* iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs) +* xfsprogs (in use: mkfs.xfs) +* XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + +* Git version 1.7 or later + +### Kernel Requirements + +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). + +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. + +### Optional Dependencies + +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: + +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) +* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) +* Libseccomp to allow running seccomp profiles with containers + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: + +```bash +docker daemon +``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/vendor/github.com/docker/docker/project/PATCH-RELEASES.md b/vendor/github.com/docker/docker/project/PATCH-RELEASES.md new file mode 100644 index 0000000..548db9a --- /dev/null +++ b/vendor/github.com/docker/docker/project/PATCH-RELEASES.md @@ -0,0 +1,68 @@ +# Docker patch (bugfix) release process + +Patch releases (the 'Z' in vX.Y.Z) are intended to fix major issues in a +release. Docker open source projects follow these procedures when creating a +patch release; + +After each release (both "major" (vX.Y.0) and "patch" releases (vX.Y.Z)), a +patch release milestone (vX.Y.Z + 1) is created. + +The creation of a patch release milestone is no obligation to actually +*create* a patch release. The purpose of these milestones is to collect +issues and pull requests that can *justify* a patch release; + +- Any maintainer is allowed to add issues and PR's to the milestone, when + doing so, preferably leave a comment on the issue or PR explaining *why* + you think it should be considered for inclusion in a patch release. +- Issues introduced in version vX.Y.0 get added to milestone X.Y.Z+1 +- Only *regressions* should be added. Issues *discovered* in version vX.Y.0, + but already present in version vX.Y-1.Z should not be added, unless + critical. +- Patch releases can *only* contain bug-fixes. New features should + *never* be added to a patch release. + +The release captain of the "major" (X.Y.0) release, is also responsible for +patch releases. The release captain, together with another maintainer, will +review issues and PRs on the milestone, and assigns `priority/`labels. These +review sessions take place on a weekly basis, more frequent if needed: + +- A P0 priority is assigned to critical issues. A maintainer *must* be + assigned to these issues. Maintainers should strive to fix a P0 within a week. +- A P1 priority is assigned to major issues, but not critical. A maintainer + *must* be assigned to these issues. +- P2 and P3 priorities are assigned to other issues. A maintainer can be + assigned. +- Non-critical issues and PR's can be removed from the milestone. Minor + changes, such as typo-fixes or omissions in the documentation can be + considered for inclusion in a patch release. + +## Deciding if a patch release should be done + +- Only a P0 can justify to proceed with the patch release. +- P1, P2, and P3 issues/PR's should not influence the decision, and + should be moved to the X.Y.Z+1 milestone, or removed from the + milestone. + +> **Note**: If the next "major" release is imminent, the release captain +> can decide to cancel a patch release, and include the patches in the +> upcoming major release. + +> **Note**: Security releases are also "patch releases", but follow +> a different procedure. Security releases are developed in a private +> repository, released and tested under embargo before they become +> publicly available. + +## Deciding on the content of a patch release + +When the criteria for moving forward with a patch release are met, the release +manager will decide on the exact content of the release. + +- Fixes to all P0 issues *must* be included in the release. +- Fixes to *some* P1, P2, and P3 issues *may* be included as part of the patch + release depending on the severity of the issue and the risk associated with + the patch. + +Any code delivered as part of a patch release should make life easier for a +significant amount of users with zero chance of degrading anybody's experience. +A good rule of thumb for that is to limit cherry-picking to small patches, which +fix well-understood issues, and which come with verifiable tests. diff --git a/vendor/github.com/docker/docker/project/PRINCIPLES.md b/vendor/github.com/docker/docker/project/PRINCIPLES.md new file mode 100644 index 0000000..53f0301 --- /dev/null +++ b/vendor/github.com/docker/docker/project/PRINCIPLES.md @@ -0,0 +1,19 @@ +# Docker principles + +In the design and development of Docker we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between 2 options, choose the one that is easier to reverse. +* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The less moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/vendor/github.com/docker/docker/project/README.md b/vendor/github.com/docker/docker/project/README.md new file mode 100644 index 0000000..3ed68cf --- /dev/null +++ b/vendor/github.com/docker/docker/project/README.md @@ -0,0 +1,24 @@ +# Hacking on Docker + +The `project/` directory holds information and tools for everyone involved in the process of creating and +distributing Docker, specifically: + +## Guides + +If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTORS.md](../CONTRIBUTING.md). + +If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). + +If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). + +If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). + +## Roadmap + +A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). + + +## Build tools + +[hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, +running the test suite, and pushing releases. diff --git a/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md b/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md new file mode 100644 index 0000000..84848ca --- /dev/null +++ b/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md @@ -0,0 +1,518 @@ +# Release Checklist +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + +### 1. Pull from master and create a release branch + +All releases version numbers will be of the form: vX.Y.Z where X is the major +version number, Y is the minor version number and Z is the patch release version number. + +#### Major releases + +The release branch name is just vX.Y because it's going to be the basis for all .Z releases. + +```bash +export BASE=vX.Y +export VERSION=vX.Y.Z +git fetch origin +git checkout --track origin/master +git checkout -b release/$BASE +``` + +This new branch is going to be the base for the release. We need to push it to origin so we +can track the cherry-picked changes and the version bump: + +```bash +git push origin release/$BASE +``` + +When you have the major release branch in origin, we need to create the bump fork branch +that we'll push to our fork: + +```bash +git checkout -b bump_$VERSION +``` + +#### Patch releases + +If we have the release branch in origin, we can create the forked bump branch from it directly: + +```bash +export VERSION=vX.Y.Z +export PATCH=vX.Y.Z+1 +git fetch origin +git checkout --track origin/release/$BASE +git checkout -b bump_$PATCH +``` + +We cherry-pick only the commits we want into the bump branch: + +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick -s -x +git cherry-pick -s -x +... +``` + +### 2. Update the VERSION files and API version on master + +We don't want to stop contributions to master just because we are releasing. +So, after the release branch is up, we bump the VERSION and API version to mark +the start of the "next" release. + +#### 2.1 Update the VERSION files + +Update the content of the `VERSION` file to be the next minor (incrementing Y) +and add the `-dev` suffix. For example, after the release branch for 1.5.0 is +created, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the +making"). + +#### 2.2 Update API version on master + +We don't want API changes to go to the now frozen API version. Create a new +entry in `docs/reference/api/` by copying the latest and bumping the version +number (in both the file's name and content), and submit this in a PR against +master. + +### 3. Update CHANGELOG.md + +You can run this command for reference with git 2.0: + +```bash +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Engine API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. +Each change should be listed under a category heading formatted as `#### CATEGORY`. + +`CATEGORY` should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Engine API + * Runtime + * Other (please use this category sparingly) + +Each change should be formatted as `BULLET DESCRIPTION`, given: + +* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or + upgrade, respectively. + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "Add new feature X which allows Y", + "Fix bug which caused X", "Increase performance of Y". + +EXAMPLES: + +```markdown +## 0.3.6 (1995-12-25) + +#### Builder + ++ 'docker build -t FOO .' applies the tag FOO to the newly built image + +#### Engine API + +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version +``` + +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + +### 4. Change the contents of the VERSION file + +Before the big thing, you'll want to make successive release candidates and get +people to test. The release candidate number `N` should be part of the version: + +```bash +export RC_VERSION=${VERSION}-rcN +echo ${RC_VERSION#v} > VERSION +``` + +### 5. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at https://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` + +### 6. Commit and create a pull request to the "release" branch + +```bash +git add VERSION CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" +``` + +That last command will give you the proper link to visit to ensure that you +open the PR against the "release" branch instead of accidentally against +"master" (like so many brave souls before you already have). + +### 7. Create a PR to update the AUTHORS file for the release + +Update the AUTHORS file, by running the `hack/generate-authors.sh` on the +release branch. To prevent duplicate entries, you may need to update the +`.mailmap` file accordingly. + +### 8. Build release candidate rpms and debs + +**NOTE**: It will be a lot faster if you pass a different graphdriver with +`DOCKER_GRAPHDRIVER` than `vfs`. + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -e DOCKER_GRAPHDRIVER=aufs \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 9. Publish release candidate rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 10. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 11. Publish release candidate binaries + +To run this you will need access to the release credentials. Get them from the +Core maintainers. + +```bash +docker build -t docker . + +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +It will run the test suite, build the binaries and upload to the specified bucket, +so this is a good time to verify that you're running against **test**.docker.com. + +### 12. Purge the cache! + +After the binaries are uploaded to test.docker.com and the packages are on +apt.dockerproject.org and yum.dockerproject.org, make sure +they get tested in both Ubuntu and Debian for any obvious installation +issues or runtime issues. + +If everything looks good, it's time to create a git tag for this candidate: + +```bash +git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION +git push origin $RC_VERSION +``` + +Announcing on multiple medias is the best way to get some help testing! An easy +way to get some useful links for sharing: + +```bash +echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" +echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" +``` + +We recommend announcing the release candidate on: + +- IRC on #docker, #docker-dev, #docker-maintainers +- In a comment on the pull request to notify subscribed people on GitHub +- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group +- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group +- Any social media that can bring some attention to the release candidate + +### 13. Iterate on successive release candidates + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +During this phase, the `bump_$VERSION` branch will keep evolving as you will +produce new release candidates. The frequency of new candidates is up to the +release manager: use your best judgement taking into account the severity of +reported issues, testers availability, and time to scheduled release date. + +Each time you'll want to produce a new release candidate, you will start by +adding commits to the branch, usually by cherry-picking from master: + +```bash +git cherry-pick -s -x -m0 +``` + +You want your "bump commit" (the one that updates the CHANGELOG and VERSION +files) to remain on top, so you'll have to `git rebase -i` to bring it back up. + +Now that your bump commit is back on top, you will need to update the CHANGELOG +file (if appropriate for this particular release candidate), and update the +VERSION file to increment the RC number: + +```bash +export RC_VERSION=$VERSION-rcN +echo $RC_VERSION > VERSION +``` + +You can now amend your last commit and update the bump branch: + +```bash +git commit --amend +git push -f $GITHUBUSER bump_$VERSION +``` + +Repeat step 6 to tag the code, publish new binaries, announce availability, and +get help testing. + +### 14. Finalize the bump branch + +When you're happy with the quality of a release candidate, you can move on and +create the real thing. + +You will first have to amend the "bump commit" to drop the release candidate +suffix in the VERSION file: + +```bash +echo $VERSION > VERSION +git add VERSION +git commit --amend +``` + +You will then repeat step 6 to publish the binaries to test + +### 15. Get 2 other maintainers to validate the pull request + +### 16. Build final rpms and debs + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 17. Publish final rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 18. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 19. Publish final binaries + +Once they're tested and reasonably believed to be working, run against +get.docker.com: + +```bash +docker build -t docker . +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +### 20. Purge the cache! + +### 21. Apply tag and create release + +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + +```bash +git tag -a $VERSION -m $VERSION bump_$VERSION +git push origin $VERSION +``` + +Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). +If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. + +Select the tag that you just pushed as the version and paste the changelog in the description of the release. +You can see examples in this two links: + +https://github.com/docker/docker/releases/tag/v1.8.0 +https://github.com/docker/docker/releases/tag/v1.8.0-rc3 + +### 22. Go to github to merge the `bump_$VERSION` branch into release + +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! + +### 23. Update the docs branch + +You will need to point the docs branch to the newly created release tag: + +```bash +git checkout origin/docs +git reset --hard origin/$VERSION +git push -f origin docs +``` + +The docs will appear on https://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. The `make docs-release` command will do this +_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run +and you can check its progress with the CDN Cloudfront Chrome addon. + +### 24. Create a new pull request to merge your bump commit back into master + +```bash +git checkout master +git fetch +git reset --hard origin/master +git cherry-pick -s -x $VERSION +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" +``` + +Again, get two maintainers to validate, then merge, then push that pretty +blue button to delete your branch. + +### 25. Rejoice and Evangelize! + +Congratulations! You're done. + +Go forth and announce the glad tidings of the new release in `#docker`, +`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), +the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), +and on Twitter! diff --git a/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md b/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md new file mode 100644 index 0000000..d764e9d --- /dev/null +++ b/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md @@ -0,0 +1,78 @@ +# Docker Release Process + +This document describes how the Docker project is released. The Docker project +release process targets the Engine, Compose, Kitematic, Machine, Swarm, +Distribution, Notary and their underlying dependencies (libnetwork, libkv, +etc...). + +Step-by-step technical details of the process are described in +[RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +## Release cycle + +The Docker project follows a **time-based release cycle** and ships every nine +weeks. A release cycle starts the same day the previous release cycle ends. + +The first six weeks of the cycle are dedicated to development and review. During +this phase, new features and bugfixes submitted to any of the projects are +**eligible** to be shipped as part of the next release. No changeset submitted +during this period is however guaranteed to be merged for the current release +cycle. + +## The freeze period + +Six weeks after the beginning of the cycle, the codebase is officially frozen +and the codebase reaches a state close to the final release. A Release Candidate +(RC) gets created at the same time. The freeze period is used to find bugs and +get feedback on the state of the RC before the release. + +During this freeze period, while the `master` branch will continue its normal +development cycle, no new features are accepted into the RC. As bugs are fixed +in `master` the release owner will selectively 'cherry-pick' critical ones to +be included into the RC. As the RC changes, new ones are made available for the +community to test and review. + +This period lasts for three weeks. + +## How to maximize chances of being merged before the freeze date? + +First of all, there is never a guarantee that a specific changeset is going to +be merged. However there are different actions to follow to maximize the chances +for a changeset to be merged: + +- The team gives priority to review the PRs aligned with the Roadmap (usually +defined by a ROADMAP.md file at the root of the repository). +- The earlier a PR is opened, the more time the maintainers have to review. For +example, if a PR is opened the day before the freeze date, it’s very unlikely +that it will be merged for the release. +- Constant communication with the maintainers (mailing-list, IRC, Github issues, +etc.) allows to get early feedback on the design before getting into the +implementation, which usually reduces the time needed to discuss a changeset. +- If the code is commented, fully tested and by extension follows every single +rules defined by the [CONTRIBUTING guide]( +https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help +the maintainers by speeding up the review. + +## The release + +At the end of the freeze (nine weeks after the start of the cycle), all the +projects are released together. + +``` + Codebase Release +Start of is frozen (end of the +the Cycle (7th week) 9th week) ++---------------------------------------+---------------------+ +| | | +| Development phase | Freeze phase | +| | | ++---------------------------------------+---------------------+ + 6 weeks 3 weeks +<---------------------------------------><--------------------> +``` + +## Exceptions + +If a critical issue is found at the end of the freeze period and more time is +needed to address it, the release will be pushed back. When a release gets +pushed back, the next release cycle gets delayed as well. diff --git a/vendor/github.com/docker/docker/project/REVIEWING.md b/vendor/github.com/docker/docker/project/REVIEWING.md new file mode 100644 index 0000000..51ef4c5 --- /dev/null +++ b/vendor/github.com/docker/docker/project/REVIEWING.md @@ -0,0 +1,246 @@ +# Pull request reviewing process + +## Labels + +Labels are carefully picked to optimize for: + + - Readability: maintainers must immediately know the state of a PR + - Filtering simplicity: different labels represent many different aspects of + the reviewing work, and can even be targeted at different maintainers groups. + +A pull request should only be attributed labels documented in this section: other labels that may +exist on the repository should apply to issues. + +### DCO labels + + * `dco/no`: automatically set by a bot when one of the commits lacks proper signature + +### Status labels + + * `status/0-triage` + * `status/1-design-review` + * `status/2-code-review` + * `status/3-docs-review` + * `status/4-ready-to-merge` + +Special status labels: + + * `status/failing-ci`: indicates that the PR in its current state fails the test suite + * `status/needs-attention`: calls for a collective discussion during a review session + +### Impact labels (apply to merged pull requests) + + * `impact/api` + * `impact/changelog` + * `impact/cli` + * `impact/deprecation` + * `impact/distribution` + * `impact/dockerfile` + +### Process labels (apply to merged pull requests) + +Process labels are to assist in preparing (patch) releases. These labels should only be used for pull requests. + +Label | Use for +------------------------------- | ------------------------------------------------------------------------- +`process/cherry-pick` | PRs that should be cherry-picked in the bump/release branch. These pull-requests must also be assigned to a milestone. +`process/cherry-picked` | PRs that have been cherry-picked. This label is helpful to find PR's that have been added to release-candidates, and to update the change log +`process/docs-cherry-pick` | PRs that should be cherry-picked in the docs branch. Only apply this label for changes that apply to the *current* release, and generic documentation fixes, such as Markdown and spelling fixes. +`process/docs-cherry-picked` | PRs that have been cherry-picked in the docs branch +`process/merge-to-master` | PRs that are opened directly on the bump/release branch, but also need to be merged back to "master" +`process/merged-to-master` | PRs that have been merged back to "master" + + +## Workflow + +An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding +label that needs to be applied. + +### Triage - `status/0-triage` + +Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` +label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction +with the PR. The starting label may potentially skip some steps depending on the kind of pull +request: use your best judgement. + +Maintainers should perform an initial, high-level, overview of the pull request before moving it to +the next appropriate stage: + + - Has DCO + - Contains sufficient justification (e.g., usecases) for the proposed change + - References the Github issue it fixes (if any) in the commit or the first Github comment + +Possible transitions from this state: + + * Close: e.g., unresponsive contributor without DCO + * `status/1-design-review`: general case + * `status/2-code-review`: e.g. trivial bugfix + * `status/3-docs-review`: non-proposal documentation-only change + +### Design review - `status/1-design-review` + +Maintainers are expected to comment on the design of the pull request. Review of documentation is +expected only in the context of design validation, not for stylistic changes. + +Ideally, documentation should reflect the expected behavior of the code. No code review should +take place in this step. + +There are no strict rules on the way a design is validated: we usually aim for a consensus, +although a single maintainer approval is often sufficient for obviously reasonable changes. In +general, strong disagreement expressed by any of the maintainers should not be taken lightly. + +Once design is approved, a maintainer should make sure to remove this label and add the next one. + +Possible transitions from this state: + + * Close: design rejected + * `status/2-code-review`: general case + * `status/3-docs-review`: proposals with only documentation changes + +### Code review - `status/2-code-review` + +Maintainers are expected to review the code and ensure that it is good quality and in accordance +with the documentation in the PR. + +New testcases are expected to be added. Ideally, those testcases should fail when the new code is +absent, and pass when present. The testcases should strive to test as many variants, code paths, as +possible to ensure maximum coverage. + +Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When +the author of a PR is a maintainer, he still needs the approval of two other maintainers. + +Once code is approved according to the rules of the subsystem, a maintainer should make sure to +remove this label and add the next one. If documentation is absent but expected, maintainers should +ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/3-docs-review`: general case + * `status/4-ready-to-merge`: change not impacting documentation + +### Docs review - `status/3-docs-review` + +Maintainers are expected to review the documentation in its bigger context, ensuring consistency, +completeness, validity, and breadth of coverage across all existing and new documentation. + +They should ask for any editorial change that makes the documentation more consistent and easier to +understand. + +The docker/docker repository only contains _reference documentation_, all +"narrative" documentation is kept in a [unified documentation +repository](https://github.com/docker/docker.github.io). Reviewers must +therefore verify which parts of the documentation need to be updated. Any +contribution that may require changing the narrative should get the +`impact/documentation` label: this is the signal for documentation maintainers +that a change will likely need to happen on the unified documentation +repository. When in doubt, it’s better to add the label and leave it to +documentation maintainers to decide whether it’s ok to skip. In all cases, +leave a comment to explain what documentation changes you think might be needed. + +- If the pull request does not impact the documentation at all, the docs review + step is skipped, and the pull request is ready to merge. +- If the changes in + the pull request require changes to the reference documentation (either + command-line reference, or API reference), those changes must be included as + part of the pull request and will be reviewed now. Keep in mind that the + narrative documentation may contain output examples of commands, so may need + to be updated as well, in which case the `impact/documentation` label must + be applied. +- If the PR has the `impact/documentation` label, merging is delayed until a + documentation maintainer acknowledges that a corresponding documentation PR + (or issue) is opened on the documentation repository. Once a documentation + maintainer acknowledges the change, she/he will move the PR to `status/4-merge` + for a code maintainer to push the green button. + +Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs +sub-project maintainers. If the docs change originates with a docs maintainer, only one additional +LGTM is required (since we assume a docs maintainer approves of their own PR). + +Once documentation is approved, a maintainer should make sure to remove this label and +add the next one. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/2-code-review`: requires more code changes + * `status/4-ready-to-merge`: general case + +### Merge - `status/4-ready-to-merge` + +Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase +or carry the pull request themselves. + +Possible transitions from this state: + + * Merge: general case + * Close: carry PR + +After merging a pull request, the maintainer should consider applying one or multiple impact labels +to ease future classification: + + * `impact/api` signifies the patch impacted the Engine API + * `impact/changelog` signifies the change is significant enough to make it in the changelog + * `impact/cli` signifies the patch impacted a CLI command + * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax + * `impact/deprecation` signifies the patch participates in deprecating an existing feature + +### Close + +If a pull request is closed it is expected that sufficient justification will be provided. In +particular, if there are alternative ways of achieving the same net result then those needs to be +spelled out. If the pull request is trying to solve a use case that is not one that we (as a +community) want to support then a justification for why should be provided. + +The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We +assume that the group of maintainers is bound by mutual trust and respect, and that opposition from +any single maintainer should be taken into consideration. Similarly, we expect maintainers to +justify their reasoning and to accept debating. + +## Escalation process + +Despite the previously described reviewing process, some PR might not show any progress for various +reasons: + + - No strong opinion for or against the proposed patch + - Debates about the proper way to solve the problem at hand + - Lack of consensus + - ... + +All these will eventually lead to stalled PR, where no apparent progress is made across several +weeks, or even months. + +Maintainers should use their best judgement and apply the `status/needs-attention` label. It must +be used sparingly, as each PR with such label will be discussed by a group of maintainers during a +review session. The goal of that session is to agree on one of the following outcomes for the PR: + + * Close, explaining the rationale for not pursuing further + * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch + (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued + attention) + * Escalate to Solomon by formulating a few specific questions on which his answers will allow + maintainers to decide. + +## Milestones + +Typically, every merged pull request get shipped naturally with the next release cut from the +`master` branch (either the next minor or major version, as indicated by the +[`VERSION`](https://github.com/docker/docker/blob/master/VERSION) file at the root of the +repository). However, the time-based nature of the release process provides no guarantee that a +given pull request will get merged in time. In other words, all open pull requests are implicitly +considered part of the next minor or major release milestone, and this won't be materialized on +GitHub. + +A merged pull request must be attached to the milestone corresponding to the release in which it +will be shipped: this is both useful for tracking, and to help the release manager with the +changelog generation. + +An open pull request may exceptionally get attached to a milestone to express a particular intent to +get it merged in time for that release. This may for example be the case for an important feature to +be included in a minor release, or a critical bugfix to be included in a patch release. + +Finally, and as documented by the [`PATCH-RELEASES.md`](PATCH-RELEASES.md) process, the existence of +a milestone is not a guarantee that a release will happen, as some milestones will be created purely +for the purpose of bookkeeping diff --git a/vendor/github.com/docker/docker/project/TOOLS.md b/vendor/github.com/docker/docker/project/TOOLS.md new file mode 100644 index 0000000..26303c3 --- /dev/null +++ b/vendor/github.com/docker/docker/project/TOOLS.md @@ -0,0 +1,63 @@ +# Tools + +This page describes the tools we use and infrastructure that is in place for +the Docker project. + +### CI + +The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our +continuous integration server. Each Pull Request to Docker is tested by running the +equivalent of `make all`. We chose Jenkins because we can host it ourselves and +we run Docker in Docker to test. + +#### Leeroy + +Leeroy is a Go application which integrates Jenkins with +GitHub pull requests. Leeroy uses +[GitHub hooks](https://developer.github.com/v3/repos/hooks/) +to listen for pull request notifications and starts jobs on your Jenkins +server. Using the Jenkins +[notification plugin][https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin], +Leeroy updates the pull request using GitHub's +[status API](https://developer.github.com/v3/repos/statuses/) +with pending, success, failure, or error statuses. + +The leeroy repository is maintained at +[github.com/docker/leeroy](https://github.com/docker/leeroy). + +#### GordonTheTurtle IRC Bot + +The GordonTheTurtle IRC Bot lives in the +[#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel +on Freenode. He is built in Go and is based off the project at +[github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). + +His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. +This command works by integrating with Leroy. He has a few other commands too, such +as `!gif` or `!godoc`, but we are always looking for more fun commands to add. + +The gordon-bot repository is maintained at +[github.com/docker/gordon-bot](https://github.com/docker/gordon-bot) + +### NSQ + +We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project +infrastructure. + +#### Hooks + +The hooks project, +[github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), +is a small Go application that manages web hooks from github, hub.docker.com, or +other third party services. + +It can be used for listening to github webhooks & pushing them to a queue, +archiving hooks to rethinkdb for processing, and broadcasting hooks to various +jobs. + +#### Docker Master Binaries + +One of the things queued from the Hooks are the building of the Master +Binaries. This happens on every push to the master branch of Docker. The +repository for this is maintained at +[github.com/docker/docker-bb](https://github.com/docker/docker-bb). diff --git a/vendor/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go new file mode 100644 index 0000000..996fc50 --- /dev/null +++ b/vendor/github.com/docker/docker/reference/reference.go @@ -0,0 +1,216 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/image/v1" +) + +const ( + // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified + DefaultTag = "latest" + // DefaultHostname is the default built-in hostname + DefaultHostname = "docker.io" + // LegacyDefaultHostname is automatically converted to DefaultHostname + LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for default repositories in default host + DefaultRepoPrefix = "library/" +) + +// Named is an object with a full name +type Named interface { + // Name returns normalized repository name, like "ubuntu". + Name() string + // String returns full reference, like "ubuntu@sha256:abcdef..." + String() string + // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" + FullName() string + // Hostname returns hostname for the reference, like "docker.io" + Hostname() string + // RemoteName returns the repository component of the full name, like "library/ubuntu" + RemoteName() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := distreference.ParseNamed(s) + if err != nil { + return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag: %s", s, err) + } + r, err := WithName(named.Name()) + if err != nil { + return nil, err + } + if canonical, isCanonical := named.(distreference.Canonical); isCanonical { + return WithDigest(r, canonical.Digest()) + } + if tagged, isTagged := named.(distreference.NamedTagged); isTagged { + return WithTag(r, tagged.Tag()) + } + return r, nil +} + +// TrimNamed removes any tag or digest from the named reference +func TrimNamed(ref Named) Named { + return &namedRef{distreference.TrimNamed(ref)} +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + name, err := normalize(name) + if err != nil { + return nil, err + } + if err := validateName(name); err != nil { + return nil, err + } + r, err := distreference.WithName(name) + if err != nil { + return nil, err + } + return &namedRef{r}, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + r, err := distreference.WithTag(name, tag) + if err != nil { + return nil, err + } + return &taggedRef{namedRef{r}}, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + r, err := distreference.WithDigest(name, digest) + if err != nil { + return nil, err + } + return &canonicalRef{namedRef{r}}, nil +} + +type namedRef struct { + distreference.Named +} +type taggedRef struct { + namedRef +} +type canonicalRef struct { + namedRef +} + +func (r *namedRef) FullName() string { + hostname, remoteName := splitHostname(r.Name()) + return hostname + "/" + remoteName +} +func (r *namedRef) Hostname() string { + hostname, _ := splitHostname(r.Name()) + return hostname +} +func (r *namedRef) RemoteName() string { + _, remoteName := splitHostname(r.Name()) + return remoteName +} +func (r *taggedRef) Tag() string { + return r.namedRef.Named.(distreference.NamedTagged).Tag() +} +func (r *canonicalRef) Digest() digest.Digest { + return r.namedRef.Named.(distreference.Canonical).Digest() +} + +// WithDefaultTag adds a default tag to a reference if it only has a repo name. +func WithDefaultTag(ref Named) Named { + if IsNameOnly(ref) { + ref, _ = WithTag(ref, DefaultTag) + } + return ref +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// ParseIDOrReference parses string for an image ID or a reference. ID can be +// without a default prefix. +func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { + if err := v1.ValidateID(idOrRef); err == nil { + idOrRef = "sha256:" + idOrRef + } + if dgst, err := digest.ParseDigest(idOrRef); err == nil { + return dgst, nil, nil + } + ref, err := ParseNamed(idOrRef) + return "", ref, err +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + if hostname == LegacyDefaultHostname { + hostname = DefaultHostname + } + if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + return +} + +// normalize returns a repository name in its normalized form, meaning it +// will not contain default hostname nor library/ prefix for official images. +func normalize(name string) (string, error) { + host, remoteName := splitHostname(name) + if strings.ToLower(remoteName) != remoteName { + return "", errors.New("invalid reference format: repository name must be lowercase") + } + if host == DefaultHostname { + if strings.HasPrefix(remoteName, DefaultRepoPrefix) { + return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil + } + return remoteName, nil + } + return name, nil +} + +func validateName(name string) error { + if err := v1.ValidateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + return nil +} diff --git a/vendor/github.com/docker/docker/reference/reference_test.go b/vendor/github.com/docker/docker/reference/reference_test.go new file mode 100644 index 0000000..ff35ba3 --- /dev/null +++ b/vendor/github.com/docker/docker/reference/reference_test.go @@ -0,0 +1,275 @@ +package reference + +import ( + "testing" + + "github.com/docker/distribution/digest" +) + +func TestValidateReferenceName(t *testing.T) { + validRepoNames := []string{ + "docker/docker", + "library/debian", + "debian", + "docker.io/docker/docker", + "docker.io/library/debian", + "docker.io/debian", + "index.docker.io/docker/docker", + "index.docker.io/library/debian", + "index.docker.io/debian", + "127.0.0.1:5000/docker/docker", + "127.0.0.1:5000/library/debian", + "127.0.0.1:5000/debian", + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + } + invalidRepoNames := []string{ + "https://github.com/docker/docker", + "docker/Docker", + "-docker", + "-docker/docker", + "-docker.io/docker/docker", + "docker///docker", + "docker.io/docker/Docker", + "docker.io/docker///docker", + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + + for _, name := range invalidRepoNames { + _, err := ParseNamed(name) + if err == nil { + t.Fatalf("Expected invalid repo name for %q", name) + } + } + + for _, name := range validRepoNames { + _, err := ParseNamed(name) + if err != nil { + t.Fatalf("Error parsing repo name %s, got: %q", name, err) + } + } +} + +func TestValidateRemoteName(t *testing.T) { + validRepositoryNames := []string{ + // Sanity check. + "docker/docker", + + // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // Allow embedded hyphens. + "docker-rules/docker", + + // Allow multiple hyphens as well. + "docker---rules/docker", + + //Username doc and image name docker being tested. + "doc/docker", + + // single character names are now allowed. + "d/docker", + "jess/t", + + // Consecutive underscores. + "dock__er/docker", + } + for _, repositoryName := range validRepositoryNames { + _, err := ParseNamed(repositoryName) + if err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + } + + invalidRepositoryNames := []string{ + // Disallow capital letters. + "docker/Docker", + + // Only allow one slash. + "docker///docker", + + // Disallow 64-character hexadecimal. + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + + // Disallow leading and trailing hyphens in namespace. + "-docker/docker", + "docker-/docker", + "-docker-/docker", + + // Don't allow underscores everywhere (as opposed to hyphens). + "____/____", + + "_docker/_docker", + + // Disallow consecutive periods. + "dock..er/docker", + "dock_.er/docker", + "dock-.er/docker", + + // No repository. + "docker/", + + //namespace too long + "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", + } + for _, repositoryName := range invalidRepositoryNames { + if _, err := ParseNamed(repositoryName); err == nil { + t.Errorf("Repository name should be invalid: %v", repositoryName) + } + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type tcase struct { + RemoteName, NormalizedName, FullName, AmbiguousName, Hostname string + } + + tcases := []tcase{ + { + RemoteName: "fooo/bar", + NormalizedName: "fooo/bar", + FullName: "docker.io/fooo/bar", + AmbiguousName: "index.docker.io/fooo/bar", + Hostname: "docker.io", + }, + { + RemoteName: "library/ubuntu", + NormalizedName: "ubuntu", + FullName: "docker.io/library/ubuntu", + AmbiguousName: "library/ubuntu", + Hostname: "docker.io", + }, + { + RemoteName: "nonlibrary/ubuntu", + NormalizedName: "nonlibrary/ubuntu", + FullName: "docker.io/nonlibrary/ubuntu", + AmbiguousName: "", + Hostname: "docker.io", + }, + { + RemoteName: "other/library", + NormalizedName: "other/library", + FullName: "docker.io/other/library", + AmbiguousName: "", + Hostname: "docker.io", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "127.0.0.1:8000/private/moonbase", + FullName: "127.0.0.1:8000/private/moonbase", + AmbiguousName: "", + Hostname: "127.0.0.1:8000", + }, + { + RemoteName: "privatebase", + NormalizedName: "127.0.0.1:8000/privatebase", + FullName: "127.0.0.1:8000/privatebase", + AmbiguousName: "", + Hostname: "127.0.0.1:8000", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "example.com/private/moonbase", + FullName: "example.com/private/moonbase", + AmbiguousName: "", + Hostname: "example.com", + }, + { + RemoteName: "privatebase", + NormalizedName: "example.com/privatebase", + FullName: "example.com/privatebase", + AmbiguousName: "", + Hostname: "example.com", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "example.com:8000/private/moonbase", + FullName: "example.com:8000/private/moonbase", + AmbiguousName: "", + Hostname: "example.com:8000", + }, + { + RemoteName: "privatebasee", + NormalizedName: "example.com:8000/privatebasee", + FullName: "example.com:8000/privatebasee", + AmbiguousName: "", + Hostname: "example.com:8000", + }, + { + RemoteName: "library/ubuntu-12.04-base", + NormalizedName: "ubuntu-12.04-base", + FullName: "docker.io/library/ubuntu-12.04-base", + AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", + Hostname: "docker.io", + }, + } + + for _, tcase := range tcases { + refStrings := []string{tcase.NormalizedName, tcase.FullName} + if tcase.AmbiguousName != "" { + refStrings = append(refStrings, tcase.AmbiguousName) + } + + var refs []Named + for _, r := range refStrings { + named, err := ParseNamed(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + named, err = WithName(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + } + + for _, r := range refs { + if expected, actual := tcase.NormalizedName, r.Name(); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.FullName, r.FullName(); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.Hostname, r.Hostname(); expected != actual { + t.Fatalf("Invalid hostname for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.RemoteName, r.RemoteName(); expected != actual { + t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) + } + + } + } +} + +func TestParseReferenceWithTagAndDigest(t *testing.T) { + ref, err := ParseNamed("busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa") + if err != nil { + t.Fatal(err) + } + if _, isTagged := ref.(NamedTagged); isTagged { + t.Fatalf("Reference from %q should not support tag", ref) + } + if _, isCanonical := ref.(Canonical); !isCanonical { + t.Fatalf("Reference from %q should not support digest", ref) + } + if expected, actual := "busybox@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa", ref.String(); actual != expected { + t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) + } +} + +func TestInvalidReferenceComponents(t *testing.T) { + if _, err := WithName("-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid name") + } + ref, err := WithName("busybox") + if err != nil { + t.Fatal(err) + } + if _, err := WithTag(ref, "-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid tag") + } + if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { + t.Fatal("Expected WithName to detect invalid digest") + } +} diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go new file mode 100644 index 0000000..71ca236 --- /dev/null +++ b/vendor/github.com/docker/docker/reference/store.go @@ -0,0 +1,286 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref Named + ID digest.Digest +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id digest.Digest) []Named + ReferencesByName(ref Named) []Association + AddTag(ref Named, id digest.Digest, force bool) error + AddDigest(ref Canonical, id digest.Digest, force bool) error + Delete(ref Named) (bool, error) + Get(ref Named) (digest.Digest, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[digest.Digest]map[string]Named +} + +// Repository maps tags to digests. The key is a stringified Reference, +// including the repository name. +type repository map[string]digest.Digest + +type lexicalRefs []Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[digest.Digest]map[string]Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(WithDefaultTag(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref Canonical, id digest.Digest, force bool) error { + return store.addReference(ref, id, force) +} + +func (store *store) addReference(ref Named, id digest.Digest, force bool) error { + if ref.Name() == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + repository = make(map[string]digest.Digest) + store.Repositories[ref.Name()] = repository + } + + refStr := ref.String() + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref Named) (bool, error) { + ref = WithDefaultTag(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repoName := ref.Name() + + repository, exists := store.Repositories[repoName] + if !exists { + return false, ErrDoesNotExist + } + + refStr := ref.String() + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, repoName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference +func (store *store) Get(ref Named) (digest.Digest, error) { + ref = WithDefaultTag(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[ref.String()] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given ID. The slice +// will be nil if there are no references to this ID. +func (store *store) References(id digest.Digest) []Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref Named) []Association { + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/reference/store_test.go b/vendor/github.com/docker/docker/reference/store_test.go new file mode 100644 index 0000000..dd1d253 --- /dev/null +++ b/vendor/github.com/docker/docker/reference/store_test.go @@ -0,0 +1,356 @@ +package reference + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/docker/distribution/digest" +) + +var ( + saveLoadTestCases = map[string]digest.Digest{ + "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", + "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", + "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", + "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", + "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", + "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", + "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + } + + marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) +) + +func TestLoad(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.RemoveAll(jsonFile.Name()) + + // Write canned json to the temp file + _, err = jsonFile.Write(marshalledSaveLoadTestCases) + if err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + jsonFile.Close() + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, expectedID := range saveLoadTestCases { + ref, err := ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + id, err := store.Get(ref) + if err != nil { + t.Fatalf("could not find reference %s: %v", refStr, err) + } + if id != expectedID { + t.Fatalf("expected %s - got %s", expectedID, id) + } + } +} + +func TestSave(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, id := range saveLoadTestCases { + ref, err := ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + if canonical, ok := ref.(Canonical); ok { + err = store.AddDigest(canonical, id, false) + if err != nil { + t.Fatalf("could not add digest reference %s: %v", refStr, err) + } + } else { + err = store.AddTag(ref, id, false) + if err != nil { + t.Fatalf("could not add reference %s: %v", refStr, err) + } + } + } + + jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) + if err != nil { + t.Fatalf("could not read json file: %v", err) + } + + if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { + t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) + } +} + +func TestAddDeleteGet(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + testImageID1 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") + testImageID2 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") + testImageID3 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") + + // Try adding a reference with no tag or digest + nameOnly, err := WithName("username/repo") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(nameOnly, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Add a few references + ref1, err := ParseNamed("username/repo1:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref1, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref2, err := ParseNamed("username/repo1:old") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref2, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref3, err := ParseNamed("username/repo1:alias") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref3, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref4, err := ParseNamed("username/repo2:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref4, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref5, err := ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddDigest(ref5.(Canonical), testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Attempt to overwrite with force == false + if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { + t.Fatalf("did not get expected error on overwrite attempt - got %v", err) + } + // Repeat to overwrite with force == true + if err = store.AddTag(ref4, testImageID3, true); err != nil { + t.Fatalf("failed to force tag overwrite: %v", err) + } + + // Check references so far + id, err := store.Get(nameOnly) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref1) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref2) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) + } + + id, err = store.Get(ref3) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref4) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID3 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + id, err = store.Get(ref5) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + // Get should return ErrDoesNotExist for a nonexistent repo + nonExistRepo, err := ParseNamed("username/nonexistrepo:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Get should return ErrDoesNotExist for a nonexistent tag + nonExistTag, err := ParseNamed("username/repo1:nonexist") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Check References + refs := store.References(testImageID1) + if len(refs) != 3 { + t.Fatal("unexpected number of references") + } + // Looking for the references in this order verifies that they are + // returned lexically sorted. + if refs[0].String() != ref3.String() { + t.Fatalf("unexpected reference: %v", refs[0].String()) + } + if refs[1].String() != ref1.String() { + t.Fatalf("unexpected reference: %v", refs[1].String()) + } + if refs[2].String() != nameOnly.String()+":latest" { + t.Fatalf("unexpected reference: %v", refs[2].String()) + } + + // Check ReferencesByName + repoName, err := WithName("username/repo1") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + associations := store.ReferencesByName(repoName) + if len(associations) != 3 { + t.Fatal("unexpected number of associations") + } + // Looking for the associations in this order verifies that they are + // returned lexically sorted. + if associations[0].Ref.String() != ref3.String() { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[0].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[1].Ref.String() != ref1.String() { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[1].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[2].Ref.String() != ref2.String() { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + if associations[2].ID != testImageID2 { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + + // Delete should return ErrDoesNotExist for a nonexistent repo + if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete should return ErrDoesNotExist for a nonexistent tag + if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete a few references + if deleted, err := store.Delete(ref1); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref1); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(ref5); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref5); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(nameOnly); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } +} + +func TestInvalidTags(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "tag-store-test") + defer os.RemoveAll(tmpDir) + + store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") + + // sha256 as repo name + ref, err := ParseNamed("sha256:abc") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting tag %q to fail", ref) + } + + // setting digest as a tag + ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting digest %q to fail", ref) + } + +} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go new file mode 100644 index 0000000..8cadd51 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -0,0 +1,303 @@ +package registry + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +const ( + // AuthClientID is used the ClientID used for the token server + AuthClientID = "docker" +) + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { + registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) + if err != nil { + return "", "", err + } + + serverAddress := registryEndpoint.String() + + logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) + + if serverAddress == "" { + return "", "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + if err != nil { + return "", "", err + } + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + // fallback when request could not be completed + return "", "", fallbackError{ + err: err, + } + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err + } + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", "", nil + } else if resp.StatusCode == http.StatusUnauthorized { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") + } + return "", "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == http.StatusForbidden { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", "", fmt.Errorf("Internal Server Error") + } + return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) +} + +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type staticCredentialStore struct { + auth *types.AuthConfig +} + +// NewStaticCredentialStore returns a credential store +// which always returns the same credential values. +func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { + return staticCredentialStore{ + auth: auth, + } +} + +func (scs staticCredentialStore) Basic(*url.URL) (string, string) { + if scs.auth == nil { + return "", "" + } + return scs.auth.Username, scs.auth.Password +} + +func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { + if scs.auth == nil { + return "" + } + return scs.auth.IdentityToken +} + +func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +type fallbackError struct { + err error +} + +func (err fallbackError) Error() string { + return err.err.Error() +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") + + modifiers := DockerHeaders(userAgent, nil) + authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) + + credentialAuthConfig := *authConfig + creds := loginCredentialStore{ + authConfig: &credentialAuthConfig, + } + + loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return nil, foundV2, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(authTransport, modifiers...) + + return &http.Client{ + Transport: tr, + Timeout: 15 * time.Second, + }, foundV2, nil + +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == ConvertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + + challengeManager := challenge.NewSimpleManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundV2, PingResponseError{ + Err: err, + } + } + + return challengeManager, foundV2, nil +} diff --git a/vendor/github.com/docker/docker/registry/auth_test.go b/vendor/github.com/docker/docker/registry/auth_test.go new file mode 100644 index 0000000..9ab71aa --- /dev/null +++ b/vendor/github.com/docker/docker/registry/auth_test.go @@ -0,0 +1,124 @@ +// +build !solaris + +// TODO: Support Solaris + +package registry + +import ( + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +func buildAuthConfigs() map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + + for _, registry := range []string{"testIndex", IndexServer} { + authConfigs[registry] = types.AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + } + } + + return authConfigs +} + +func TestSameAuthDataPostSave(t *testing.T) { + authConfigs := buildAuthConfigs() + authConfig := authConfigs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + authConfigs := buildAuthConfigs() + indexConfig := authConfigs[IndexServer] + + officialIndex := ®istrytypes.IndexInfo{ + Official: true, + } + privateIndex := ®istrytypes.IndexInfo{ + Official: false, + } + + resolved := ResolveAuthConfig(authConfigs, officialIndex) + assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") + + resolved = ResolveAuthConfig(authConfigs, privateIndex) + assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + authConfigs := buildAuthConfigs() + + registryAuth := types.AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + } + localAuth := types.AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + } + officialAuth := types.AuthConfig{ + Username: "baz-user", + Password: "baz-pass", + } + authConfigs[IndexServer] = officialAuth + + expectedAuths := map[string]types.AuthConfig{ + "registry.example.com": registryAuth, + "localhost:8000": localAuth, + "registry.com": localAuth, + } + + validRegistries := map[string][]string{ + "registry.example.com": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "localhost:8000": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + configured, ok := expectedAuths[configKey] + if !ok { + t.Fail() + } + index := ®istrytypes.IndexInfo{ + Name: configKey, + } + for _, registry := range registries { + authConfigs[registry] = configured + resolved := ResolveAuthConfig(authConfigs, index) + if resolved.Username != configured.Username || resolved.Password != configured.Password { + t.Errorf("%s -> %v != %v\n", registry, resolved, configured) + } + delete(authConfigs, registry) + resolved = ResolveAuthConfig(authConfigs, index) + if resolved.Username == configured.Username || resolved.Password == configured.Password { + t.Errorf("%s -> %v == %v\n", registry, resolved, configured) + } + } + } +} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go new file mode 100644 index 0000000..9a4f6a9 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config.go @@ -0,0 +1,305 @@ +package registry + +import ( + "errors" + "fmt" + "net" + "net/url" + "strings" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/opts" + "github.com/docker/docker/reference" + "github.com/spf13/pflag" +) + +// ServiceOptions holds command line options. +type ServiceOptions struct { + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only bool `json:"disable-legacy-registry,omitempty"` +} + +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig struct { + registrytypes.ServiceConfig + V2Only bool +} + +var ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + + // IndexHostname is the index hostname + IndexHostname = "index.docker.io" + // IndexServer is used for user auth and image search + IndexServer = "https://" + IndexHostname + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: "registry-1.docker.io", + } +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = newServiceConfig(ServiceOptions{}) +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// InstallCliFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { + mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) + insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) + + flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") + flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") + + options.installCliPlatformFlags(flags) +} + +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) *serviceConfig { + config := &serviceConfig{ + ServiceConfig: registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + Mirrors: options.Mirrors, + }, + V2Only: options.V2Only, + } + + config.LoadInsecureRegistries(options.InsecureRegistries) + + return config +} + +// LoadInsecureRegistries loads insecure registries to config +func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + registries = append(registries, "127.0.0.0/8") + + // Store original InsecureRegistryCIDRs and IndexConfigs + // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. + originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs + originalIndexInfos := config.ServiceConfig.IndexConfigs + + config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) + +skip: + for _, r := range registries { + // validate insecure registry + if _, err := ValidateIndexName(r); err != nil { + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return err + } + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. + data := (*registrytypes.NetIPNet)(ipnet) + for _, value := range config.InsecureRegistryCIDRs { + if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { + continue skip + } + } + // ipnet is not found, add it in config.InsecureRegistryCIDRs + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) + + } else { + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func isSecureIndex(config *serviceConfig, indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + host, _, err := net.SplitHostPort(indexName) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = indexName + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range config.InsecureRegistryCIDRs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return false + } + } + } + + return true +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + if val == reference.LegacyDefaultHostname { + val = reference.DefaultHostname + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + return val, nil +} + +func validateNoScheme(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := ®istrytypes.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = isSecureIndex(config, indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, name.Hostname()) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(name.Name(), '/') + return &RepositoryInfo{ + Named: name, + Index: index, + Official: official, + }, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/vendor/github.com/docker/docker/registry/config_test.go b/vendor/github.com/docker/docker/registry/config_test.go new file mode 100644 index 0000000..25578a7 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_test.go @@ -0,0 +1,49 @@ +package registry + +import ( + "testing" +) + +func TestValidateMirror(t *testing.T) { + valid := []string{ + "http://mirror-1.com", + "https://mirror-1.com", + "http://localhost", + "https://localhost", + "http://localhost:5000", + "https://localhost:5000", + "http://127.0.0.1", + "https://127.0.0.1", + "http://127.0.0.1:5000", + "https://127.0.0.1:5000", + } + + invalid := []string{ + "!invalid!://%as%", + "ftp://mirror-1.com", + "http://mirror-1.com/", + "http://mirror-1.com/?q=foo", + "http://mirror-1.com/v1/", + "http://mirror-1.com/v1/?q=foo", + "http://mirror-1.com/v1/?q=foo#frag", + "http://mirror-1.com?q=foo", + "https://mirror-1.com#frag", + "https://mirror-1.com/", + "https://mirror-1.com/#frag", + "https://mirror-1.com/v1/", + "https://mirror-1.com/v1/#", + "https://mirror-1.com?q", + } + + for _, address := range valid { + if ret, err := ValidateMirror(address); err != nil || ret == "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } + + for _, address := range invalid { + if ret, err := ValidateMirror(address); err == nil || ret != "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } +} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go new file mode 100644 index 0000000..d692e8e --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package registry + +import ( + "github.com/spf13/pflag" +) + +var ( + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" +) + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries") +} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go new file mode 100644 index 0000000..d1b313d --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_windows.go @@ -0,0 +1,25 @@ +package registry + +import ( + "os" + "path/filepath" + "strings" + + "github.com/spf13/pflag" +) + +// CertsDir is the directory where certificates are stored +var CertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + // No Windows specific flags. +} diff --git a/vendor/github.com/docker/docker/registry/endpoint_test.go b/vendor/github.com/docker/docker/registry/endpoint_test.go new file mode 100644 index 0000000..8451d3f --- /dev/null +++ b/vendor/github.com/docker/docker/registry/endpoint_test.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServer, IndexServer}, + {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000/nonversion/", "http://0.0.0.0:5000/nonversion/v1/"}, + {"http://0.0.0.0:5000/v0/", "http://0.0.0.0:5000/v0/v1/"}, + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td.str, nil, "", nil) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} + +func TestEndpointParseInvalid(t *testing.T) { + testData := []string{ + "http://0.0.0.0:5000/v2/", + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td, nil, "", nil) + if err == nil { + t.Errorf("expected error parsing %q: parsed as %q", td, e) + } + } +} + +// Ensure that a registry endpoint that responds with a 401 only is determined +// to be a valid v1 registry endpoint +func TestValidateEndpoint(t *testing.T) { + requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) + w.WriteHeader(http.StatusUnauthorized) + }) + + // Make a test server which should validate as a v1 server. + testServer := httptest.NewServer(requireBasicAuthHandler) + defer testServer.Close() + + testServerURL, err := url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint := V1Endpoint{ + URL: testServerURL, + client: HTTPClient(NewTransport(nil)), + } + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.URL.Scheme != "http" { + t.Fatalf("expecting to validate endpoint as http, got url %s", testEndpoint.String()) + } +} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go new file mode 100644 index 0000000..6bcf8c9 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -0,0 +1,198 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// V1Endpoint stores basic information about a V1 registry endpoint. +type V1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// NewV1Endpoint parses the given address to return a registry endpoint. +func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *V1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + endpoint := &V1Endpoint{ + IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) + return endpoint, nil +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + + endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *V1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *V1Endpoint) Path(path string) string { + return e.URL.String() + "/v1/" + path +} + +// Ping returns a PingResult which indicates whether the registry is standalone or not. +func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go new file mode 100644 index 0000000..17fa97c --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -0,0 +1,191 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") +) + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault() + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir != "" { + hostDir := filepath.Join(CertsDir, cleanPath(hostname)) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %v", err) + } + tlsConfig.RootCAs = systemPool + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers with a User-Agent and metaHeaders +func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + return base +} diff --git a/vendor/github.com/docker/docker/registry/registry_mock_test.go b/vendor/github.com/docker/docker/registry/registry_mock_test.go new file mode 100644 index 0000000..21fc1fd --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry_mock_test.go @@ -0,0 +1,478 @@ +// +build !solaris + +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" + "github.com/gorilla/mux" + + "github.com/Sirupsen/logrus" +) + +var ( + testHTTPServer *httptest.Server + testHTTPSServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + "other.com": {net.ParseIP("43.43.43.43")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func makeHTTPSURL(req string) string { + return testHTTPSServer.URL + req +} + +func makeIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeURL(req), + } + return index +} + +func makeHTTPSIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeHTTPSURL(req), + } + return index +} + +func makePublicIndex() *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: IndexServer, + Secure: true, + Official: true, + } + return index +} + +func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { + options := ServiceOptions{ + Mirrors: mirrors, + InsecureRegistries: insecureRegistries, + } + + return newServiceConfig(options) +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a != b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v == %v", a, b) + } + t.Fatal(message) +} + +// Similar to assertEqual, but does not stop test +func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a == b { + return + } + message := fmt.Sprintf("%v != %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +// Similar to assertNotEqual, but does not stop test +func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a != b { + return + } + message := fmt.Sprintf("%v == %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + imageID := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[imageID] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[imageID] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName.String()) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + tags = make(map[string]string) + testRepositories[repositoryName.String()] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for imageID, layer := range testLayers { + image := make(map[string]string) + image["id"] = imageID + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := ®istrytypes.SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []registrytypes.SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/vendor/github.com/docker/docker/registry/registry_test.go b/vendor/github.com/docker/docker/registry/registry_test.go new file mode 100644 index 0000000..786dfbe --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry_test.go @@ -0,0 +1,875 @@ +// +build !solaris + +package registry + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +var ( + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &types.AuthConfig{} + endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) + if err != nil { + t.Fatal(err) + } + userAgent := "docker test client" + var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) + client := HTTPClient(tr) + r, err := NewSession(client, authConfig, endpoint) + if err != nil { + t.Fatal(err) + } + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = token + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + + assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { + endpoint, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") + assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") + assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := ®istrytypes.IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := NewV1Endpoint(index, "", nil) + checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") + } +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.LookupRemoteImage(imageID, makeURL("/v1/")) + assertEqual(t, err, nil, "Expected error of remote lookup to nil") + if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { + t.Fatal("Expected error of remote lookup to not nil") + } +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, int64(154), "Expected size 154") + if len(json) == 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") + if err != nil { + t.Fatal(err) + } + assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 2, "Expected two tags") + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) + assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedURL, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedURL.Host + "/v1/" + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + data, err := r.GetRepositoryData(repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type staticRepositoryInfo struct { + Index *registrytypes.IndexInfo + RemoteName string + CanonicalName string + LocalName string + Official bool + } + + expectedRepoInfos := map[string]staticRepositoryInfo{ + "fooo/bar": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "fooo/bar", + LocalName: "fooo/bar", + CanonicalName: "docker.io/fooo/bar", + Official: false, + }, + "library/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "nonlibrary/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "nonlibrary/ubuntu", + LocalName: "nonlibrary/ubuntu", + CanonicalName: "docker.io/nonlibrary/ubuntu", + Official: false, + }, + "ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "other/library": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "other/library", + LocalName: "other/library", + CanonicalName: "docker.io/other/library", + Official: false, + }, + "127.0.0.1:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "127.0.0.1:8000/private/moonbase", + CanonicalName: "127.0.0.1:8000/private/moonbase", + Official: false, + }, + "127.0.0.1:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "127.0.0.1:8000/privatebase", + CanonicalName: "127.0.0.1:8000/privatebase", + Official: false, + }, + "localhost:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost:8000/private/moonbase", + CanonicalName: "localhost:8000/private/moonbase", + Official: false, + }, + "localhost:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost:8000/privatebase", + CanonicalName: "localhost:8000/privatebase", + Official: false, + }, + "example.com/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com/private/moonbase", + CanonicalName: "example.com/private/moonbase", + Official: false, + }, + "example.com/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com/privatebase", + CanonicalName: "example.com/privatebase", + Official: false, + }, + "example.com:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com:8000/private/moonbase", + CanonicalName: "example.com:8000/private/moonbase", + Official: false, + }, + "example.com:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com:8000/privatebase", + CanonicalName: "example.com:8000/privatebase", + Official: false, + }, + "localhost/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost/private/moonbase", + CanonicalName: "localhost/private/moonbase", + Official: false, + }, + "localhost/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost/privatebase", + CanonicalName: "localhost/privatebase", + Official: false, + }, + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "index." + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + "index." + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + } + + for reposName, expectedRepoInfo := range expectedRepoInfos { + named, err := reference.WithName(reposName) + if err != nil { + t.Error(err) + } + + repoInfo, err := ParseRepositoryInfo(named) + if err != nil { + t.Error(err) + } else { + checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) + checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) + checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) + checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) + } + } +} + +func TestNewIndexInfo(t *testing.T) { + testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { + for indexName, expectedIndexInfo := range expectedIndexInfos { + index, err := newIndexInfo(config, indexName) + if err != nil { + t.Fatal(err) + } else { + checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") + checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") + checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") + checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") + } + } + } + + config := newServiceConfig(ServiceOptions{}) + noMirrors := []string{} + expectedIndexInfos := map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} + config = makeServiceConfig(publicMirrors, []string{"example.com"}) + + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) +} + +func TestMirrorEndpointLookup(t *testing.T) { + containsMirror := func(endpoints []APIEndpoint) bool { + for _, pe := range endpoints { + if pe.URL.Host == "my.mirror" { + return true + } + } + return false + } + s := DefaultService{config: makeServiceConfig([]string{"my.mirror"}, nil)} + + imageName, err := reference.WithName(IndexName + "/test/image") + if err != nil { + t.Error(err) + } + pushAPIEndpoints, err := s.LookupPushEndpoints(imageName.Hostname()) + if err != nil { + t.Fatal(err) + } + if containsMirror(pushAPIEndpoints) { + t.Fatal("Push endpoint should not contain mirror") + } + + pullAPIEndpoints, err := s.LookupPullEndpoints(imageName.Hostname()) + if err != nil { + t.Fatal(err) + } + if !containsMirror(pullAPIEndpoints) { + t.Fatal("Pull endpoint should contain mirror") + } +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery", 25) + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestIsSecureIndex(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexName, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, + {"invalid.domain.com", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, + } + for _, tt := range tests { + config := makeServiceConfig(nil, tt.insecureRegistries) + if sec := isSecureIndex(config, tt.addr); sec != tt.expected { + t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} + +type debugTransport struct { + http.RoundTripper + log func(...interface{}) +} + +func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + tr.log("could not dump request") + } + tr.log(string(dump)) + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + dump, err = httputil.DumpResponse(resp, false) + if err != nil { + tr.log("could not dump response") + } + tr.log(string(dump)) + return resp, err +} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go new file mode 100644 index 0000000..596a9c7 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service.go @@ -0,0 +1,304 @@ +package registry + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +const ( + // DefaultSearchLimit is the default value for maximum number of returned search results. + DefaultSearchLimit = 25 +) + +// Service is the interface defining what a registry service should implement. +type Service interface { + Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) + LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) + LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) + ResolveRepository(name reference.Named) (*RepositoryInfo, error) + Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) + ServiceConfig() *registrytypes.ServiceConfig + TLSConfig(hostname string) (*tls.Config, error) + LoadInsecureRegistries([]string) error +} + +// DefaultService is a registry service. It tracks configuration data such as a list +// of mirrors. +type DefaultService struct { + config *serviceConfig + mu sync.Mutex +} + +// NewService returns a new instance of DefaultService ready to be +// installed into an engine. +func NewService(options ServiceOptions) *DefaultService { + return &DefaultService{ + config: newServiceConfig(options), + } +} + +// ServiceConfig returns the public registry service configuration. +func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { + s.mu.Lock() + defer s.mu.Unlock() + + servConfig := registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), + } + + // construct a new ServiceConfig which will not retrieve s.Config directly, + // and look up items in s.config with mu locked + servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) + + for key, value := range s.config.ServiceConfig.IndexConfigs { + servConfig.IndexConfigs[key] = value + } + + servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) + + return &servConfig +} + +// LoadInsecureRegistries loads insecure registries for Service +func (s *DefaultService) LoadInsecureRegistries(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadInsecureRegistries(registries) +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { + // TODO Use ctx when searching for repositories + serverAddress := authConfig.ServerAddress + if serverAddress == "" { + serverAddress = IndexServer + } + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) + if err != nil { + return "", "", fmt.Errorf("unable to parse server address: %v", err) + } + + endpoints, err := s.LookupPushEndpoints(u.Host) + if err != nil { + return "", "", err + } + + for _, endpoint := range endpoints { + login := loginV2 + if endpoint.Version == APIVersion1 { + login = loginV1 + } + + status, token, err = login(authConfig, endpoint, userAgent) + if err == nil { + return + } + if fErr, ok := err.(fallbackError); ok { + err = fErr.err + logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) + continue + } + return "", "", err + } + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + // TODO Use ctx when searching for repositories + if err := validateNoScheme(term); err != nil { + return nil, err + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.Lock() + index, err := newIndexInfo(s.config, indexName) + s.mu.Unlock() + + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + modifiers := DockerHeaders(userAgent, nil) + v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + if fErr, ok := err.(fallbackError); ok { + logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) + } else { + return nil, err + } + } else if foundV2 { + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } + } + + if client == nil { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + r := newSession(client, authConfig, endpoint) + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName, limit) + } + return r.SearchRepositories(remoteName, limit) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + return newRepositoryInfo(s.config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL *url.URL + Version APIVersion + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { + s.mu.Lock() + defer s.mu.Unlock() + + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +// tlsConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { + return s.tlsConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lookupEndpoints(hostname) +} + +// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + allEndpoints, err := s.lookupEndpoints(hostname) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} + +func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(hostname) + if err != nil { + return nil, err + } + + if s.config.V2Only { + return endpoints, nil + } + + legacyEndpoints, err := s.lookupV1Endpoints(hostname) + if err != nil { + return nil, err + } + endpoints = append(endpoints, legacyEndpoints...) + + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go new file mode 100644 index 0000000..1d251ae --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v1.go @@ -0,0 +1,40 @@ +package registry + +import "net/url" + +func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { + return []APIEndpoint{}, nil + } + + tlsConfig, err := s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/service_v1_test.go b/vendor/github.com/docker/docker/registry/service_v1_test.go new file mode 100644 index 0000000..bd15dff --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v1_test.go @@ -0,0 +1,23 @@ +package registry + +import "testing" + +func TestLookupV1Endpoints(t *testing.T) { + s := NewService(ServiceOptions{}) + + cases := []struct { + hostname string + expectedLen int + }{ + {"example.com", 1}, + {DefaultNamespace, 0}, + {DefaultV2Registry.Host, 0}, + {IndexHostname, 0}, + } + + for _, c := range cases { + if ret, err := s.lookupV1Endpoints(c.hostname); err != nil || len(ret) != c.expectedLen { + t.Errorf("lookupV1Endpoints(`"+c.hostname+"`) returned %+v and %+v", ret, err) + } + } +} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go new file mode 100644 index 0000000..228d745 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/url" + "strings" + + "github.com/docker/go-connections/tlsconfig" +) + +func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + tlsConfig := tlsconfig.ServerDefault() + if hostname == DefaultNamespace || hostname == IndexHostname { + // v2 mirrors + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirrorURL, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + tlsConfig, err = s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion2, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go new file mode 100644 index 0000000..72e286a --- /dev/null +++ b/vendor/github.com/docker/docker/registry/session.go @@ -0,0 +1,783 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/reference" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *V1Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *types.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *types.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { + return &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + + return newSession(client, authConfig, endpoint), nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := int64(-1) + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.ParseInt(hdr, 10, 64) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + statusCode = 0 + res, err = r.client.Do(req) + if err != nil { + logrus.Debugf("Error contacting registry %s: %v", registry, err) + // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debug("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debug("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName()) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if isTimeout(err) { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { + if limit < 1 || limit > 100 { + return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) + } + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(registrytypes.SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +// GetAuthConfig returns the authentication settings for a session +// TODO(tiborvass): remove this once registry client v2 is vendored +func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &types.AuthConfig{ + Username: r.authConfig.Username, + Password: password, + } +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go new file mode 100644 index 0000000..49c123a --- /dev/null +++ b/vendor/github.com/docker/docker/registry/types.go @@ -0,0 +1,73 @@ +package registry + +import ( + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +// RepositoryData tracks the image list, list of endpoints, and list of tokens +// for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string + // Tokens is currently unused (remove it?) + Tokens []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in an HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +// API Version identifiers. +const ( + _ = iota + APIVersion1 APIVersion = iota + APIVersion2 +) + +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + reference.Named + // Index points to registry information + Index *registrytypes.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool + // Class represents the class of the repository, such as "plugin" + // or "image". + Class string +} diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go new file mode 100644 index 0000000..570fc93 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -0,0 +1,128 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + restartCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartmanager based on a policy. +func New(policy container.RestartPolicy, restartCount int) RestartManager { + return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on active restartmanager") + } + // if the container ran for more than 10s, regardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + if rm.timeout == 0 { + rm.timeout = defaultTimeout + } else { + rm.timeout *= backoffMultiplier + } + + var restart bool + switch { + case rm.policy.IsAlways(): + restart = true + case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + rm.restartCount++ + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go b/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go new file mode 100644 index 0000000..20eced5 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go @@ -0,0 +1,34 @@ +package restartmanager + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types/container" +) + +func TestRestartManagerTimeout(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + should, _, err := rm.ShouldRestart(0, false, 1*time.Second) + if err != nil { + t.Fatal(err) + } + if !should { + t.Fatal("container should be restarted") + } + if rm.timeout != 100*time.Millisecond { + t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + } +} + +func TestRestartManagerTimeoutReset(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + rm.timeout = 5 * time.Second + _, _, err := rm.ShouldRestart(0, false, 10*time.Second) + if err != nil { + t.Fatal(err) + } + if rm.timeout != 100*time.Millisecond { + t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/compare.go b/vendor/github.com/docker/docker/runconfig/compare.go new file mode 100644 index 0000000..708922f --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/compare.go @@ -0,0 +1,61 @@ +package runconfig + +import "github.com/docker/docker/api/types/container" + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *container.Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/runconfig/compare_test.go b/vendor/github.com/docker/docker/runconfig/compare_test.go new file mode 100644 index 0000000..6370d7a --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/compare_test.go @@ -0,0 +1,126 @@ +package runconfig + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestCompare(t *testing.T) { + ports1 := make(nat.PortSet) + ports1[newPortNoError("tcp", "1111")] = struct{}{} + ports1[newPortNoError("tcp", "2222")] = struct{}{} + ports2 := make(nat.PortSet) + ports2[newPortNoError("tcp", "3333")] = struct{}{} + ports2[newPortNoError("tcp", "4444")] = struct{}{} + ports3 := make(nat.PortSet) + ports3[newPortNoError("tcp", "1111")] = struct{}{} + ports3[newPortNoError("tcp", "2222")] = struct{}{} + ports3[newPortNoError("tcp", "5555")] = struct{}{} + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + volumes3 := make(map[string]struct{}) + volumes3["/test1"] = struct{}{} + volumes3["/test3"] = struct{}{} + envs1 := []string{"ENV1=value1", "ENV2=value2"} + envs2 := []string{"ENV1=value1", "ENV3=value3"} + entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"} + entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"} + entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + cmd1 := strslice.StrSlice{"/bin/sh", "-c"} + cmd2 := strslice.StrSlice{"/bin/sh", "-d"} + cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} + labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} + labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} + + sameConfigs := map[*container.Config]*container.Config{ + // Empty config + &container.Config{}: {}, + // Does not compare hostname, domainname & image + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user", + }: { + Hostname: "host2", + Domainname: "domain2", + Image: "image2", + User: "user", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs1}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd1}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels1}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes1}, + } + differentConfigs := map[*container.Config]*container.Config{ + nil: nil, + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user1", + }: { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user2", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: true}, + &container.Config{OpenStdin: true}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs2}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd2}, + // not the same number of parts + &container.Config{Cmd: cmd1}: {Cmd: cmd3}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels2}, + // not the same number of labels + &container.Config{Labels: labels1}: {Labels: labels3}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + // not the same number of ports + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + // not the same number of parts + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes2}, + // not the same number of labels + &container.Config{Volumes: volumes1}: {Volumes: volumes3}, + } + for config1, config2 := range sameConfigs { + if !Compare(config1, config2) { + t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) + } + } + for config1, config2 := range differentConfigs { + if Compare(config1, config2) { + t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) + } + } +} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go new file mode 100644 index 0000000..508681c --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -0,0 +1,97 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/volume" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return DecodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return DecodeHostConfig(src) +} + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateMountSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := ValidateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := ValidateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := ValidateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := ValidateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateMountSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateMountSettings(c *container.Config, hc *container.HostConfig) error { + // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/config_test.go b/vendor/github.com/docker/docker/runconfig/config_test.go new file mode 100644 index 0000000..f1f9de5 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_test.go @@ -0,0 +1,139 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" +) + +type f struct { + file string + entrypoint strslice.StrSlice +} + +func TestDecodeContainerConfig(t *testing.T) { + + var ( + fixtures []f + image string + ) + + //TODO: Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + + if runtime.GOOS != "windows" { + image = "ubuntu" + fixtures = []f{ + {"fixtures/unix/container_config_1_14.json", strslice.StrSlice{}}, + {"fixtures/unix/container_config_1_17.json", strslice.StrSlice{"bash"}}, + {"fixtures/unix/container_config_1_19.json", strslice.StrSlice{"bash"}}, + } + } else { + image = "windows" + fixtures = []f{ + {"fixtures/windows/container_config_1_19.json", strslice.StrSlice{"cmd"}}, + } + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != image { + t.Fatalf("Expected %s image, found %s\n", image, c.Image) + } + + if len(c.Entrypoint) != len(f.entrypoint) { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h != nil && h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } +} + +// TestDecodeContainerConfigIsolation validates isolation passed +// to the daemon in the hostConfig structure. Note this is platform specific +// as to what level of container isolation is supported. +func TestDecodeContainerConfigIsolation(t *testing.T) { + + // An invalid isolation level + if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { + t.Fatal(err) + } + } + + // Blank isolation (== default) + if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { + t.Fatal("Blank isolation should have succeeded") + } + + // Default isolation + if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { + t.Fatal("default isolation should have succeeded") + } + + // Process isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + t.Fatal("process isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "process"`) { + t.Fatal(err) + } + } + } + + // Hyper-V Containers isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + t.Fatal("hyperv isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { + t.Fatal(err) + } + } + } +} + +// callDecodeContainerConfigIsolation is a utility function to call +// DecodeContainerConfig for validating isolation +func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + b []byte + err error + ) + w := ContainerConfigWrapper{ + Config: &container.Config{}, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Isolation: container.Isolation(isolation)}, + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + return DecodeContainerConfig(bytes.NewReader(b)) +} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 0000000..4ccfc73 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + hc = SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go new file mode 100644 index 0000000..f2361b5 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 0000000..bb72c16 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,46 @@ +package runconfig + +import ( + "fmt" + + "github.com/docker/docker/api/errors" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links + ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") +) + +func conflictError(err error) error { + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json new file mode 100644 index 0000000..b08334c --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json new file mode 100644 index 0000000..0d78087 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json @@ -0,0 +1,50 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json new file mode 100644 index 0000000..de49cf3 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json new file mode 100644 index 0000000..c72ac91 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json @@ -0,0 +1,18 @@ +{ + "Binds": ["/tmp:/tmp"], + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json new file mode 100644 index 0000000..5ca8aa7 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json @@ -0,0 +1,30 @@ +{ + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json new file mode 100644 index 0000000..724320c --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "cmd", + "Image": "windows", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "c:/windows": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["c:/windows:d:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "default", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go new file mode 100644 index 0000000..2b81d02 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -0,0 +1,35 @@ +package runconfig + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go b/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go new file mode 100644 index 0000000..83ad32e --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go @@ -0,0 +1,41 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + return nil +} + +// ValidateIsolation performs platform specific validation of the +// isolation level in the hostconfig structure. +// This setting is currently discarded for Solaris so this is a no-op. +func ValidateIsolation(hc *container.HostConfig) error { + return nil +} + +// ValidateQoS performs platform specific validation of the QoS settings +func ValidateQoS(hc *container.HostConfig) error { + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go new file mode 100644 index 0000000..a6a2b34 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package runconfig + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// TODO Windows: This will need addressing for a Windows daemon. +func TestNetworkModeTest(t *testing.T) { + networkModes := map[container.NetworkMode][]bool{ + // private, bridge, host, container, none, default + "": {true, false, false, false, false, false}, + "something:weird": {true, false, false, false, false, false}, + "bridge": {true, true, false, false, false, false}, + DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, + "host": {false, false, true, false, false, false}, + "container:name": {false, false, false, true, false, false}, + "none": {true, false, false, false, true, false}, + "default": {true, false, false, false, false, true}, + } + networkModeNames := map[container.NetworkMode]string{ + "": "", + "something:weird": "something:weird", + "bridge": "bridge", + DefaultDaemonNetworkMode(): "bridge", + "host": "host", + "container:name": "container", + "none": "none", + "default": "default", + } + for networkMode, state := range networkModes { + if networkMode.IsPrivate() != state[0] { + t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) + } + if networkMode.IsBridge() != state[1] { + t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) + } + if networkMode.IsHost() != state[2] { + t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) + } + if networkMode.IsContainer() != state[3] { + t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) + } + if networkMode.IsNone() != state[4] { + t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) + } + if networkMode.IsDefault() != state[5] { + t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) + } + if networkMode.NetworkName() != networkModeNames[networkMode] { + t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) + } + } +} + +func TestIpcModeTest(t *testing.T) { + ipcModes := map[container.IpcMode][]bool{ + // private, host, container, valid + "": {true, false, false, true}, + "something:weird": {true, false, false, false}, + ":weird": {true, false, false, true}, + "host": {false, true, false, true}, + "container:name": {false, false, true, true}, + "container:name:something": {false, false, true, false}, + "container:": {false, false, true, false}, + } + for ipcMode, state := range ipcModes { + if ipcMode.IsPrivate() != state[0] { + t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) + } + if ipcMode.IsHost() != state[1] { + t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) + } + if ipcMode.IsContainer() != state[2] { + t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) + } + if ipcMode.Valid() != state[3] { + t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) + } + } + containerIpcModes := map[container.IpcMode]string{ + "": "", + "something": "", + "something:weird": "weird", + "container": "", + "container:": "", + "container:name": "name", + "container:name1:name2": "name1:name2", + } + for ipcMode, container := range containerIpcModes { + if ipcMode.Container() != container { + t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) + } + } +} + +func TestUTSModeTest(t *testing.T) { + utsModes := map[container.UTSMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for utsMode, state := range utsModes { + if utsMode.IsPrivate() != state[0] { + t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) + } + if utsMode.IsHost() != state[1] { + t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) + } + if utsMode.Valid() != state[2] { + t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) + } + } +} + +func TestUsernsModeTest(t *testing.T) { + usrensMode := map[container.UsernsMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for usernsMode, state := range usrensMode { + if usernsMode.IsPrivate() != state[0] { + t.Fatalf("UsernsMode.IsPrivate for %v should have been %v but was %v", usernsMode, state[0], usernsMode.IsPrivate()) + } + if usernsMode.IsHost() != state[1] { + t.Fatalf("UsernsMode.IsHost for %v should have been %v but was %v", usernsMode, state[1], usernsMode.IsHost()) + } + if usernsMode.Valid() != state[2] { + t.Fatalf("UsernsMode.Valid for %v should have been %v but was %v", usernsMode, state[2], usernsMode.Valid()) + } + } +} + +func TestPidModeTest(t *testing.T) { + pidModes := map[container.PidMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for pidMode, state := range pidModes { + if pidMode.IsPrivate() != state[0] { + t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) + } + if pidMode.IsHost() != state[1] { + t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) + } + if pidMode.Valid() != state[2] { + t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) + } + } +} + +func TestRestartPolicy(t *testing.T) { + restartPolicies := map[container.RestartPolicy][]bool{ + // none, always, failure + container.RestartPolicy{}: {true, false, false}, + container.RestartPolicy{"something", 0}: {false, false, false}, + container.RestartPolicy{"no", 0}: {true, false, false}, + container.RestartPolicy{"always", 0}: {false, true, false}, + container.RestartPolicy{"on-failure", 0}: {false, false, true}, + } + for restartPolicy, state := range restartPolicies { + if restartPolicy.IsNone() != state[0] { + t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) + } + if restartPolicy.IsAlways() != state[1] { + t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) + } + if restartPolicy.IsOnFailure() != state[2] { + t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) + } + } +} +func TestDecodeHostConfig(t *testing.T) { + fixtures := []struct { + file string + }{ + {"fixtures/unix/container_hostconfig_1_14.json"}, + {"fixtures/unix/container_hostconfig_1_19.json"}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, err := DecodeHostConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Privileged != false { + t.Fatalf("Expected privileged false, found %v\n", c.Privileged) + } + + if l := len(c.Binds); l != 1 { + t.Fatalf("Expected 1 bind, found %d\n", l) + } + + if len(c.CapAdd) != 1 && c.CapAdd[0] != "NET_ADMIN" { + t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) + } + + if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { + t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) + } + } +} + +func TestValidateResources(t *testing.T) { + type resourceTest struct { + ConfigCPURealtimePeriod int64 + ConfigCPURealtimeRuntime int64 + SysInfoCPURealtimePeriod bool + SysInfoCPURealtimeRuntime bool + ErrorExpected bool + FailureMsg string + } + + tests := []resourceTest{ + { + ConfigCPURealtimePeriod: 1000, + ConfigCPURealtimeRuntime: 1000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: false, + FailureMsg: "Expected valid configuration", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: false, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-period is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 10000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is greater than cpu-rt-period", + }, + } + + for _, rt := range tests { + var hc container.HostConfig + hc.Resources.CPURealtimePeriod = rt.ConfigCPURealtimePeriod + hc.Resources.CPURealtimeRuntime = rt.ConfigCPURealtimeRuntime + + var si sysinfo.SysInfo + si.CPURealtimePeriod = rt.SysInfoCPURealtimePeriod + si.CPURealtimeRuntime = rt.SysInfoCPURealtimeRuntime + + if err := ValidateResources(&hc, &si); (err != nil) != rt.ErrorExpected { + t.Fatal(rt.FailureMsg, err) + } + } +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 0000000..6e2b7f5 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,129 @@ +// +build !windows,!solaris + +package runconfig + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress" +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("--net: invalid net mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} + +// ValidateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// ValidateQoS performs platform specific validation of the QoS settings +func ValidateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) + } + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-period: Your kernel does not support cgroup rt period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("invalid --cpu-rt-runtime: Your kernel does not support cgroup rt runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-runtime: rt runtime cannot be higher than rt period") + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go new file mode 100644 index 0000000..91bd6dc --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -0,0 +1,68 @@ +package runconfig + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if len(parts) > 1 { + return fmt.Errorf("invalid --net: %s", hc.NetworkMode) + } + return nil +} + +// ValidateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// ValidateQoS performs platform specific validation of the Qos settings +func ValidateQoS(hc *container.HostConfig) error { + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("invalid --cpu-rt-period: Windows does not support this feature") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("invalid --cpu-rt-runtime: Windows does not support this feature") + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/envfile.go b/vendor/github.com/docker/docker/runconfig/opts/envfile.go new file mode 100644 index 0000000..f723799 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/envfile.go @@ -0,0 +1,81 @@ +package opts + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + "unicode" + "unicode/utf8" +) + +// ParseEnvFile reads a file with environment variables enumerated by lines +// +// ``Environment variable names used by the utilities in the Shell and +// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase +// letters, digits, and the '_' (underscore) from the characters defined in +// Portable Character Set and do not begin with a digit. *But*, other +// characters may be permitted by an implementation; applications shall +// tolerate the presence of such names.'' +// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html +// +// As of #16585, it's up to application inside docker to validate or not +// environment variables, that's why we just strip leading whitespace and +// nothing more. +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + if !utf8.Valid(scannedBytes) { + return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) + } + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + // trim the line from all leading whitespace first + line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + currentLine++ + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} + } + + if len(data) > 1 { + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, scanner.Err() +} + +var whiteSpaces = " \t" + +// ErrBadEnvVariable typed error for bad environment variable +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go b/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go new file mode 100644 index 0000000..5dd7078 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go @@ -0,0 +1,142 @@ +package opts + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func tmpFileWithContent(content string, t *testing.T) string { + tmpFile, err := ioutil.TempFile("", "envfile-test") + if err != nil { + t.Fatal(err) + } + defer tmpFile.Close() + + tmpFile.WriteString(content) + return tmpFile.Name() +} + +// Test ParseEnvFile for a file with a few well formatted lines +func TestParseEnvFileGoodFile(t *testing.T) { + content := `foo=bar + baz=quux +# comment + +_foobar=foobaz +with.dots=working +and_underscore=working too +` + // Adding a newline + a line with pure whitespace. + // This is being done like this instead of the block above + // because it's common for editors to trim trailing whitespace + // from lines, which becomes annoying since that's the + // exact thing we need to test. + content += "\n \t " + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + expectedLines := []string{ + "foo=bar", + "baz=quux", + "_foobar=foobaz", + "with.dots=working", + "and_underscore=working too", + } + + if !reflect.DeepEqual(lines, expectedLines) { + t.Fatal("lines not equal to expected_lines") + } +} + +// Test ParseEnvFile for an empty file +func TestParseEnvFileEmptyFile(t *testing.T) { + tmpFile := tmpFileWithContent("", t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if len(lines) != 0 { + t.Fatal("lines not empty; expected empty") + } +} + +// Test ParseEnvFile for a non existent file +func TestParseEnvFileNonExistentFile(t *testing.T) { + _, err := ParseEnvFile("foo_bar_baz") + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("Expected a PathError, got [%v]", err) + } +} + +// Test ParseEnvFile for a badly formatted file +func TestParseEnvFileBadlyFormattedFile(t *testing.T) { + content := `foo=bar + f =quux +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatalf("Expected an ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} + +// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize +func TestParseEnvFileLineTooLongFile(t *testing.T) { + content := strings.Repeat("a", bufio.MaxScanTokenSize+42) + content = fmt.Sprint("foo=", content) + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } +} + +// ParseEnvFile with a random file, pass through +func TestParseEnvFileRandomFile(t *testing.T) { + content := `first line +another invalid line` + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + + if err == nil { + t.Fatalf("Expected an ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected an ErrBadEnvvariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env b/vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env new file mode 100755 index 0000000000000000000000000000000000000000..3a73358fffbc0d5d3d4df985ccf2f4a1a29cdb2a GIT binary patch literal 54 ucmezW&yB$!2yGdh7#tab7 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// ValidateMACAddress validates a MAC address. +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/opts_test.go b/vendor/github.com/docker/docker/runconfig/opts/opts_test.go new file mode 100644 index 0000000..43f8730 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/opts_test.go @@ -0,0 +1,113 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "strings" + "testing" +) + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := ValidateAttach("invalid"); err == nil { + t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := ValidateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func TestValidateEnv(t *testing.T) { + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + "asd!qwe": "asd!qwe", + "1asd": "1asd", + "123": "123", + "some space": "some space", + " some space before": " some space before", + "some space after ": "some space after ", + } + // Environment variables are case in-sensitive on Windows + if runtime.GOOS == "windows" { + valids["PaTh"] = fmt.Sprintf("PaTh=%v", os.Getenv("PATH")) + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} + +func TestValidateMACAddress(t *testing.T) { + if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) + } + + if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") + } + + if _, err := ValidateMACAddress(`random invalid string`); err == nil { + t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go new file mode 100644 index 0000000..71a8927 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -0,0 +1,995 @@ +package opts + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// ContainerOptions is a data object with all the options for creating a container +type ContainerOptions struct { + attach opts.ListOpts + volumes opts.ListOpts + tmpfs opts.ListOpts + blkioWeightDevice WeightdeviceOpt + deviceReadBps ThrottledeviceOpt + deviceWriteBps ThrottledeviceOpt + links opts.ListOpts + aliases opts.ListOpts + linkLocalIPs opts.ListOpts + deviceReadIOps ThrottledeviceOpt + deviceWriteIOps ThrottledeviceOpt + env opts.ListOpts + labels opts.ListOpts + devices opts.ListOpts + ulimits *UlimitOpt + sysctls *opts.MapOpts + publish opts.ListOpts + expose opts.ListOpts + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOptions opts.ListOpts + extraHosts opts.ListOpts + volumesFrom opts.ListOpts + envFile opts.ListOpts + capAdd opts.ListOpts + capDrop opts.ListOpts + groupAdd opts.ListOpts + securityOpt opts.ListOpts + storageOpt opts.ListOpts + labelsFile opts.ListOpts + loggingOpts opts.ListOpts + privileged bool + pidMode string + utsMode string + usernsMode string + publishAll bool + stdin bool + tty bool + oomKillDisable bool + oomScoreAdj int + containerIDFile string + entrypoint string + hostname string + memoryString string + memoryReservation string + memorySwap string + kernelMemory string + user string + workingDir string + cpuCount int64 + cpuShares int64 + cpuPercent int64 + cpuPeriod int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpuQuota int64 + cpus opts.NanoCPUs + cpusetCpus string + cpusetMems string + blkioWeight uint16 + ioMaxBandwidth string + ioMaxIOps uint64 + swappiness int64 + netMode string + macAddress string + ipv4Address string + ipv6Address string + ipcMode string + pidsLimit int64 + restartPolicy string + readonlyRootfs bool + loggingDriver string + cgroupParent string + volumeDriver string + stopSignal string + stopTimeout int + isolation string + shmSize string + noHealthcheck bool + healthCmd string + healthInterval time.Duration + healthTimeout time.Duration + healthRetries int + runtime string + autoRemove bool + init bool + initPath string + credentialSpec string + + Image string + Args []string +} + +// AddFlags adds all command line flags that will be used by Parse to the FlagSet +func AddFlags(flags *pflag.FlagSet) *ContainerOptions { + copts := &ContainerOptions{ + aliases: opts.NewListOpts(nil), + attach: opts.NewListOpts(ValidateAttach), + blkioWeightDevice: NewWeightdeviceOpt(ValidateWeightDevice), + capAdd: opts.NewListOpts(nil), + capDrop: opts.NewListOpts(nil), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOptions: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + deviceReadBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), + deviceReadIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), + deviceWriteBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), + deviceWriteIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), + devices: opts.NewListOpts(ValidateDevice), + env: opts.NewListOpts(ValidateEnv), + envFile: opts.NewListOpts(nil), + expose: opts.NewListOpts(nil), + extraHosts: opts.NewListOpts(ValidateExtraHost), + groupAdd: opts.NewListOpts(nil), + labels: opts.NewListOpts(ValidateEnv), + labelsFile: opts.NewListOpts(nil), + linkLocalIPs: opts.NewListOpts(nil), + links: opts.NewListOpts(ValidateLink), + loggingOpts: opts.NewListOpts(nil), + publish: opts.NewListOpts(nil), + securityOpt: opts.NewListOpts(nil), + storageOpt: opts.NewListOpts(nil), + sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), + tmpfs: opts.NewListOpts(nil), + ulimits: NewUlimitOpt(nil), + volumes: opts.NewListOpts(nil), + volumesFrom: opts.NewListOpts(nil), + } + + // General purpose flags + flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") + flags.Var(&copts.devices, "device", "Add a host device to the container") + flags.VarP(&copts.env, "env", "e", "Set environment variables") + flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") + flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") + flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") + flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") + flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") + flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") + flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") + flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") + flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) + flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") + flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) + flags.Var(copts.sysctls, "sysctl", "Sysctl options") + flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.Var(copts.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") + flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") + flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") + + // Security + flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") + flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") + flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") + flags.Var(&copts.securityOpt, "security-opt", "Security Options") + flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") + flags.StringVar(&copts.credentialSpec, "credentialspec", "", "Credential spec for managed service account (Windows only)") + + // Network and port publishing flag + flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.Var(&copts.dns, "dns", "Set custom DNS servers") + // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. + // This is to be consistent with service create/update + flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") + flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") + flags.MarkHidden("dns-opt") + flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") + flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") + flags.StringVar(&copts.ipv4Address, "ip", "", "Container IPv4 address (e.g. 172.30.100.104)") + flags.StringVar(&copts.ipv6Address, "ip6", "", "Container IPv6 address (e.g. 2001:db8::33)") + flags.Var(&copts.links, "link", "Add link to another container") + flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") + flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") + flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") + // We allow for both "--net" and "--network", although the latter is the recommended way. + flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") + flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") + flags.MarkHidden("net") + // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. + flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") + flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") + flags.MarkHidden("net-alias") + + // Logging and storage + flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") + flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") + flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") + flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") + flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") + flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") + flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") + + // Health-checking + flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") + flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ns|us|ms|s|m|h) (default 0s)") + flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") + flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)") + flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") + + // Resource management + flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") + flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") + flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") + flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") + flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") + flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") + flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Var(&copts.cpus, "cpus", "Number of CPUs") + flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") + flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") + flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") + flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") + flags.StringVar(&copts.ioMaxBandwidth, "io-maxbandwidth", "", "Maximum IO bandwidth limit for the system drive (Windows only)") + flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") + flags.StringVar(&copts.kernelMemory, "kernel-memory", "", "Kernel memory limit") + flags.StringVarP(&copts.memoryString, "memory", "m", "", "Memory limit") + flags.StringVar(&copts.memoryReservation, "memory-reservation", "", "Memory soft limit") + flags.StringVar(&copts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") + flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") + flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") + flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") + + // Low-level execution (cgroups, namespaces, ...) + flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use") + flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") + flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") + flags.StringVar(&copts.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") + flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") + flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") + + flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") + flags.StringVar(&copts.initPath, "init-path", "", "Path to the docker-init binary") + return copts +} + +// Parse parses the args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. +func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + attachStdin = copts.attach.Get("stdin") + attachStdout = copts.attach.Get("stdout") + attachStderr = copts.attach.Get("stderr") + ) + + // Validate the input mac address + if copts.macAddress != "" { + if _, err := ValidateMACAddress(copts.macAddress); err != nil { + return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.macAddress) + } + } + if copts.stdin { + attachStdin = true + } + // If -a is not set, attach to stdout and stderr + if copts.attach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var err error + + var memory int64 + if copts.memoryString != "" { + memory, err = units.RAMInBytes(copts.memoryString) + if err != nil { + return nil, nil, nil, err + } + } + + var memoryReservation int64 + if copts.memoryReservation != "" { + memoryReservation, err = units.RAMInBytes(copts.memoryReservation) + if err != nil { + return nil, nil, nil, err + } + } + + var memorySwap int64 + if copts.memorySwap != "" { + if copts.memorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(copts.memorySwap) + if err != nil { + return nil, nil, nil, err + } + } + } + + var kernelMemory int64 + if copts.kernelMemory != "" { + kernelMemory, err = units.RAMInBytes(copts.kernelMemory) + if err != nil { + return nil, nil, nil, err + } + } + + swappiness := copts.swappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + var shmSize int64 + if copts.shmSize != "" { + shmSize, err = units.RAMInBytes(copts.shmSize) + if err != nil { + return nil, nil, nil, err + } + } + + // TODO FIXME units.RAMInBytes should have a uint64 version + var maxIOBandwidth int64 + if copts.ioMaxBandwidth != "" { + maxIOBandwidth, err = units.RAMInBytes(copts.ioMaxBandwidth) + if err != nil { + return nil, nil, nil, err + } + if maxIOBandwidth < 0 { + return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.ioMaxBandwidth) + } + } + + var binds []string + volumes := copts.volumes.GetMap() + // add any bind targets to the list of container volumes + for bind := range copts.volumes.GetMap() { + if arr := volumeSplitN(bind, 2); len(arr) > 1 { + // after creating the bind mount we want to delete it from the copts.volumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if + // there are duplicates entries. + delete(volumes, bind) + } + } + + // Can't evaluate options passed into --tmpfs until we actually mount + tmpfs := make(map[string]string) + for _, t := range copts.tmpfs.GetAll() { + if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { + tmpfs[arr[0]] = arr[1] + } else { + tmpfs[arr[0]] = "" + } + } + + var ( + runCmd strslice.StrSlice + entrypoint strslice.StrSlice + ) + + if len(copts.Args) > 0 { + runCmd = strslice.StrSlice(copts.Args) + } + + if copts.entrypoint != "" { + entrypoint = strslice.StrSlice{copts.entrypoint} + } else if flags.Changed("entrypoint") { + // if `--entrypoint=` is parsed then Entrypoint is reset + entrypoint = []string{""} + } + + ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range copts.expose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, nil, nil, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // parse device mappings + deviceMappings := []container.DeviceMapping{} + for _, device := range copts.devices.GetAll() { + deviceMapping, err := ParseDevice(device) + if err != nil { + return nil, nil, nil, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // collect all the labels for the container + labels, err := ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + ipcMode := container.IpcMode(copts.ipcMode) + if !ipcMode.Valid() { + return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode") + } + + pidMode := container.PidMode(copts.pidMode) + if !pidMode.Valid() { + return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode") + } + + utsMode := container.UTSMode(copts.utsMode) + if !utsMode.Valid() { + return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode") + } + + usernsMode := container.UsernsMode(copts.usernsMode) + if !usernsMode.Valid() { + return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode") + } + + restartPolicy, err := ParseRestartPolicy(copts.restartPolicy) + if err != nil { + return nil, nil, nil, err + } + + loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // Healthcheck + var healthConfig *container.HealthConfig + haveHealthSettings := copts.healthCmd != "" || + copts.healthInterval != 0 || + copts.healthTimeout != 0 || + copts.healthRetries != 0 + if copts.noHealthcheck { + if haveHealthSettings { + return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options") + } + test := strslice.StrSlice{"NONE"} + healthConfig = &container.HealthConfig{Test: test} + } else if haveHealthSettings { + var probe strslice.StrSlice + if copts.healthCmd != "" { + args := []string{"CMD-SHELL", copts.healthCmd} + probe = strslice.StrSlice(args) + } + if copts.healthInterval < 0 { + return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative") + } + if copts.healthTimeout < 0 { + return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative") + } + + healthConfig = &container.HealthConfig{ + Test: probe, + Interval: copts.healthInterval, + Timeout: copts.healthTimeout, + Retries: copts.healthRetries, + } + } + + resources := container.Resources{ + CgroupParent: copts.cgroupParent, + Memory: memory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + MemorySwappiness: &copts.swappiness, + KernelMemory: kernelMemory, + OomKillDisable: &copts.oomKillDisable, + NanoCPUs: copts.cpus.Value(), + CPUCount: copts.cpuCount, + CPUPercent: copts.cpuPercent, + CPUShares: copts.cpuShares, + CPUPeriod: copts.cpuPeriod, + CpusetCpus: copts.cpusetCpus, + CpusetMems: copts.cpusetMems, + CPUQuota: copts.cpuQuota, + CPURealtimePeriod: copts.cpuRealtimePeriod, + CPURealtimeRuntime: copts.cpuRealtimeRuntime, + PidsLimit: copts.pidsLimit, + BlkioWeight: copts.blkioWeight, + BlkioWeightDevice: copts.blkioWeightDevice.GetList(), + BlkioDeviceReadBps: copts.deviceReadBps.GetList(), + BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), + BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), + BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), + IOMaximumIOps: copts.ioMaxIOps, + IOMaximumBandwidth: uint64(maxIOBandwidth), + Ulimits: copts.ulimits.GetList(), + Devices: deviceMappings, + } + + config := &container.Config{ + Hostname: copts.hostname, + ExposedPorts: ports, + User: copts.user, + Tty: copts.tty, + // TODO: deprecated, it comes from -n, --networking + // it's still needed internally to set the network to disabled + // if e.g. bridge is none in daemon opts, and in inspect + NetworkDisabled: false, + OpenStdin: copts.stdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: copts.Image, + Volumes: volumes, + MacAddress: copts.macAddress, + Entrypoint: entrypoint, + WorkingDir: copts.workingDir, + Labels: ConvertKVStringsToMap(labels), + Healthcheck: healthConfig, + } + if flags.Changed("stop-signal") { + config.StopSignal = copts.stopSignal + } + if flags.Changed("stop-timeout") { + config.StopTimeout = &copts.stopTimeout + } + + hostConfig := &container.HostConfig{ + Binds: binds, + ContainerIDFile: copts.containerIDFile, + OomScoreAdj: copts.oomScoreAdj, + AutoRemove: copts.autoRemove, + Privileged: copts.privileged, + PortBindings: portBindings, + Links: copts.links.GetAll(), + PublishAllPorts: copts.publishAll, + // Make sure the dns fields are never nil. + // New containers don't ever have those fields nil, + // but pre created containers can still have those nil values. + // See https://github.com/docker/docker/pull/17779 + // for a more detailed explanation on why we don't want that. + DNS: copts.dns.GetAllOrEmpty(), + DNSSearch: copts.dnsSearch.GetAllOrEmpty(), + DNSOptions: copts.dnsOptions.GetAllOrEmpty(), + ExtraHosts: copts.extraHosts.GetAll(), + VolumesFrom: copts.volumesFrom.GetAll(), + NetworkMode: container.NetworkMode(copts.netMode), + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + UsernsMode: usernsMode, + CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), + CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), + GroupAdd: copts.groupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: securityOpts, + StorageOpt: storageOpts, + ReadonlyRootfs: copts.readonlyRootfs, + LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, + VolumeDriver: copts.volumeDriver, + Isolation: container.Isolation(copts.isolation), + ShmSize: shmSize, + Resources: resources, + Tmpfs: tmpfs, + Sysctls: copts.sysctls.GetAll(), + Runtime: copts.runtime, + } + + // only set this value if the user provided the flag, else it should default to nil + if flags.Changed("init") { + hostConfig.Init = &copts.init + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + + networkingConfig := &networktypes.NetworkingConfig{ + EndpointsConfig: make(map[string]*networktypes.EndpointSettings), + } + + if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { + epConfig := &networktypes.EndpointSettings{} + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + + epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: copts.ipv4Address, + IPv6Address: copts.ipv6Address, + } + + if copts.linkLocalIPs.Len() > 0 { + epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) + copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) + } + } + + if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Links = make([]string, len(hostConfig.Links)) + copy(epConfig.Links, hostConfig.Links) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + if copts.aliases.Len() > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Aliases = make([]string, copts.aliases.Len()) + copy(epConfig.Aliases, copts.aliases.GetAll()) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + return config, hostConfig, networkingConfig, nil +} + +// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys +// present in the file with additional pairs specified in the override parameter +func ReadKVStrings(files []string, override []string) ([]string, error) { + envVariables := []string{} + for _, ef := range files { + parsedVars, err := ParseEnvFile(ef) + if err != nil { + return nil, err + } + envVariables = append(envVariables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, override...) + + return envVariables, nil +} + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} + +// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} +// but set unset keys to nil - meaning the ones with no "=" in them. +// We use this in cases where we need to distinguish between +// FOO= and FOO +// where the latter case just means FOO was mentioned but not given a value +func ConvertKVStringsToMapWithNil(values []string) map[string]*string { + result := make(map[string]*string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = nil + } else { + result[kv[0]] = &kv[1] + } + } + + return result +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := ConvertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// takes a local seccomp daemon, reads the file contents for sending to the daemon +func parseSecurityOpts(securityOpts []string) ([]string, error) { + for key, opt := range securityOpts { + con := strings.SplitN(opt, "=", 2) + if len(con) == 1 && con[0] != "no-new-privileges" { + if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + } else { + return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) + } + } + if con[0] == "seccomp" && con[1] != "unconfined" { + f, err := ioutil.ReadFile(con[1]) + if err != nil { + return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) + } + b := bytes.NewBuffer(nil) + if err := json.Compact(b, f); err != nil { + return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) + } + securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) + } + } + + return securityOpts, nil +} + +// parses storage options per container into a map +func parseStorageOpts(storageOpts []string) (map[string]string, error) { + m := make(map[string]string) + for _, option := range storageOpts { + if strings.Contains(option, "=") { + opt := strings.SplitN(option, "=", 2) + m[opt[0]] = opt[1] + } else { + return nil, fmt.Errorf("invalid storage option") + } + } + return m, nil +} + +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { + p := container.RestartPolicy{} + + if policy == "" { + return p, nil + } + + parts := strings.Split(policy, ":") + + if len(parts) > 2 { + return p, fmt.Errorf("invalid restart policy format") + } + if len(parts) == 2 { + count, err := strconv.Atoi(parts[1]) + if err != nil { + return p, fmt.Errorf("maximum retry count must be an integer") + } + + p.MaximumRetryCount = count + } + + p.Name = parts[0] + + return p, nil +} + +// ParseDevice parses a device mapping string to a container.DeviceMapping struct +func ParseDevice(device string) (container.DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + if ValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := container.DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// ValidateLink validates that the specified string has a valid link format (containerName:alias). +func ValidateLink(val string) (string, error) { + if _, _, err := ParseLink(val); err != nil { + return val, err + } + return val, nil +} + +// ValidDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func ValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// ValidateDevice validates a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +// It also validates the device mode. +func ValidateDevice(val string) (string, error) { + return validatePath(val, ValidDeviceMode) +} + +func validatePath(val string, validator func(string) bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for path: %s", val) + } + + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, fmt.Errorf("bad format for path: %s", val) + } + switch len(split) { + case 1: + containerPath = split[0] + val = path.Clean(containerPath) + case 2: + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) + } + case 3: + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, fmt.Errorf("bad mode specified: %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. +// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). +// In Windows driver letter appears in two situations: +// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) +// b. A string in the format like `\\?\C:\Windows\...` (UNC). +// Therefore, a driver letter can only follow either a `:` or `\\` +// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. +func volumeSplitN(raw string, n int) []string { + var array []string + if len(raw) == 0 || raw[0] == ':' { + // invalid + return nil + } + // numberOfParts counts the number of parts separated by a separator colon + numberOfParts := 0 + // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. + left := 0 + // right represents the right-most cursor in raw incremented with the loop. Note this + // starts at index 1 as index 0 is already handle above as a special case. + for right := 1; right < len(raw); right++ { + // stop parsing if reached maximum number of parts + if n >= 0 && numberOfParts >= n { + break + } + if raw[right] != ':' { + continue + } + potentialDriveLetter := raw[right-1] + if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { + if right > 1 { + beforePotentialDriveLetter := raw[right-2] + // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) + if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { + // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. + } + // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. + } else { + // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + } + // need to take care of the last part + if left < len(raw) { + if n >= 0 && numberOfParts >= n { + // if the maximum number of parts is reached, just append the rest to the last part + // left-1 is at the last `:` that needs to be included since not considered a separator. + array[n-1] += raw[left-1:] + } else { + array = append(array, raw[left:]) + } + } + return array +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse_test.go b/vendor/github.com/docker/docker/runconfig/opts/parse_test.go new file mode 100644 index 0000000..a1be379 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/parse_test.go @@ -0,0 +1,894 @@ +package opts + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/spf13/pflag" +) + +func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + flags := pflag.NewFlagSet("run", pflag.ContinueOnError) + flags.SetOutput(ioutil.Discard) + flags.Usage = nil + copts := AddFlags(flags) + if err := flags.Parse(args); err != nil { + return nil, nil, nil, err + } + return Parse(flags, copts) +} + +func parse(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + + // A single volume + arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) + } + + // Two volumes + arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) + } + + // A single bind-mount + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) + } + + // Two bind-mounts. + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Two bind-mounts, first read-only, second read-write. + // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Similar to previous test but with alternate modes which are only supported by Linux + if runtime.GOOS != "windows" { + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + } + + // One bind mount and one volume + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) + } + + // Root to non-c: drive letter (Windows specific) + if runtime.GOOS == "windows" { + arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { + t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) + } + } + +} + +// This tests the cases for binds which are generated through +// DecodeContainerConfig rather than Parse() +func TestDecodeContainerConfigVolumes(t *testing.T) { + + // Root to root + bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // No destination path + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // // No destination path or mode + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A whole lot of nothing + bindsOrVols = []string{`:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A whole lot of nothing with no mode + bindsOrVols = []string{`::`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Too much including an invalid mode + wTmp := os.Getenv("TEMP") + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Windows specific error tests + if runtime.GOOS == "windows" { + // Volume which does not include a drive letter + bindsOrVols = []string{`\tmp`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Root to C-Drive + bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Container path that does not include a drive letter + bindsOrVols = []string{`c:\windows:\somewhere`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + } + + // Linux-specific error tests + if runtime.GOOS != "windows" { + // Just root + bindsOrVols = []string{`/`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A single volume that looks like a bind mount passed in Volumes. + // This should be handled as a bind mount, not a volume. + vols := []string{`/foo:/bar`} + if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { + t.Fatal("Volume /foo:/bar should have succeeded as a volume name") + } else if hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes[vols[0]]; !exists { + t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) + } + + } +} + +// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes +// to call DecodeContainerConfig. It effectively does what a client would +// do when calling the daemon by constructing a JSON stream of a +// ContainerConfigWrapper which is populated by the set of volume specs +// passed into it. It returns a config and a hostconfig which can be +// validated to ensure DecodeContainerConfig has manipulated the structures +// correctly. +func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { + var ( + b []byte + err error + c *container.Config + h *container.HostConfig + ) + w := runconfig.ContainerConfigWrapper{ + Config: &container.Config{ + Volumes: map[string]struct{}{}, + }, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Binds: binds, + }, + } + for _, v := range volumes { + w.Config.Volumes[v] = struct{}{} + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) + } + if c == nil || h == nil { + return nil, nil, fmt.Errorf("Empty config or hostconfig") + } + + return c, h, err +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} + +// setupPlatformVolume takes two arrays of volume specs - a Unix style +// spec and a Windows style spec. Depending on the platform being unit tested, +// it returns one of them, along with a volume string that would be passed +// on the docker CLI (eg -v /bar -v /foo). +func setupPlatformVolume(u []string, w []string) ([]string, string) { + var a []string + if runtime.GOOS == "windows" { + a = w + } else { + a = u + } + s := "" + for _, v := range a { + s = s + "-v " + v + " " + } + return a, s +} + +// Simple parse with MacAddress validation +func TestParseWithMacAddress(t *testing.T) { + invalidMacAddress := "--mac-address=invalidMacAddress" + validMacAddress := "--mac-address=92:d0:c6:0a:29:33" + if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { + t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) + } + if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { + t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) + } +} + +func TestParseWithMemory(t *testing.T) { + invalidMemory := "--memory=invalid" + validMemory := "--memory=1G" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { + t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) + } +} + +func TestParseWithMemorySwap(t *testing.T) { + invalidMemory := "--memory-swap=invalid" + validMemory := "--memory-swap=1G" + anotherValidMemory := "--memory-swap=-1" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { + t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } + if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { + t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } +} + +func TestParseHostname(t *testing.T) { + validHostnames := map[string]string{ + "hostname": "hostname", + "host-name": "host-name", + "hostname123": "hostname123", + "123hostname": "123hostname", + "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", + } + hostnameWithDomain := "--hostname=hostname.domainname" + hostnameWithDomainTld := "--hostname=hostname.domainname.tld" + for hostname, expectedHostname := range validHostnames { + if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + } + if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) + } +} + +func TestParseWithExpose(t *testing.T) { + invalids := map[string]string{ + ":": "invalid port format for --expose: :", + "8080:9090": "invalid port format for --expose: 8080:9090", + "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", + "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", + "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, + } + valids := map[string][]nat.Port{ + "8080/tcp": {"8080/tcp"}, + "8080/udp": {"8080/udp"}, + "8080/ncp": {"8080/ncp"}, + "8080-8080/udp": {"8080/udp"}, + "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, + } + for expose, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) + } + } + for expose, exposedPorts := range valids { + config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != len(exposedPorts) { + t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) + } + for _, port := range exposedPorts { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) + } + } + } + // Merge with actual published port + config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != 2 { + t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) + } + ports := []nat.Port{"80/tcp", "81/tcp"} + for _, port := range ports { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) + } + } +} + +func TestParseDevice(t *testing.T) { + valids := map[string]container.DeviceMapping{ + "/dev/snd": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rwm", + }, + "/dev/snd:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rw", + }, + "/dev/snd:/something": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rw", + }, + } + for device, deviceMapping := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(hostconfig.Devices) != 1 { + t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) + } + if hostconfig.Devices[0] != deviceMapping { + t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) + } + } + +} + +func TestParseModes(t *testing.T) { + // ipc ko + if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { + t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) + } + // ipc ok + _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.IpcMode.Valid() { + t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) + } + // pid ko + if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { + t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) + } + // pid ok + _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.PidMode.Valid() { + t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) + } + // uts ko + if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { + t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) + } + // uts ok + _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.UTSMode.Valid() { + t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) + } + // shm-size ko + if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != "invalid size: 'a128m'" { + t.Fatalf("Expected an error with message 'invalid size: a128m', got %v", err) + } + // shm-size ok + _, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.ShmSize != 134217728 { + t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) + } +} + +func TestParseRestartPolicy(t *testing.T) { + invalids := map[string]string{ + "always:2:3": "invalid restart policy format", + "on-failure:invalid": "maximum retry count must be an integer", + } + valids := map[string]container.RestartPolicy{ + "": {}, + "always": { + Name: "always", + MaximumRetryCount: 0, + }, + "on-failure:1": { + Name: "on-failure", + MaximumRetryCount: 1, + }, + } + for restart, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) + } + } + for restart, expected := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.RestartPolicy != expected { + t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) + } + } +} + +func TestParseHealth(t *testing.T) { + checkOk := func(args ...string) *container.HealthConfig { + config, _, _, err := parseRun(args) + if err != nil { + t.Fatalf("%#v: %v", args, err) + } + return config.Healthcheck + } + checkError := func(expected string, args ...string) { + config, _, _, err := parseRun(args) + if err == nil { + t.Fatalf("Expected error, but got %#v", config) + } + if err.Error() != expected { + t.Fatalf("Expected %#v, got %#v", expected, err) + } + } + health := checkOk("--no-healthcheck", "img", "cmd") + if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { + t.Fatalf("--no-healthcheck failed: %#v", health) + } + + health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") + if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { + t.Fatalf("--health-cmd: got %#v", health.Test) + } + if health.Timeout != 0 { + t.Fatalf("--health-cmd: timeout = %f", health.Timeout) + } + + checkError("--no-healthcheck conflicts with --health-* options", + "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") + + health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "img", "cmd") + if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond { + t.Fatalf("--health-*: got %#v", health) + } +} + +func TestParseLoggingOpts(t *testing.T) { + // logging opts ko + if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { + t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) + } + // logging opts ok + _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { + t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) + } +} + +func TestParseEnvfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // env ko + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // env ok + config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { + t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) + } + config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { + t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) + } +} + +func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { + // UTF8 with BOM + config, _, _, err := parseRun([]string{"--env-file=fixtures/utf8.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} + if len(config.Env) != len(env) { + t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) + } + for i, v := range env { + if config.Env[i] != v { + t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) + } + } + + // UTF16 with BOM + e := "contains invalid utf8 bytes at line" + if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // UTF16BE with BOM + if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } +} + +func TestParseLabelfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // label ko + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // label ok + config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { + t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) + } + config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { + t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) + } +} + +func TestParseEntryPoint(t *testing.T) { + config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) + if err != nil { + t.Fatal(err) + } + if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { + t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) + } +} + +func TestValidateLink(t *testing.T) { + valid := []string{ + "name", + "dcdfbe62ecd0:alias", + "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", + "angry_torvalds:linus", + } + invalid := map[string]string{ + "": "empty string specified for links", + "too:much:of:it": "bad format for links: too:much:of:it", + } + + for _, link := range valid { + if _, err := ValidateLink(link); err != nil { + t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) + } + } + + for link, expectedError := range invalid { + if _, err := ValidateLink(link); err == nil { + t.Fatalf("ValidateLink(`%q`) should have failed validation", link) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) + } + } + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} + +func TestValidateDevice(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:r", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for path: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for path: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for path: :test", + ":/test": "bad format for path: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for path: :test:", + "::": "bad format for path: ::", + ":::": "bad format for path: :::", + "/tmp:::": "bad format for path: /tmp:::", + ":/tmp::": "bad format for path: :/tmp::", + "path:ro": "ro is not an absolute path", + "path:rr": "rr is not an absolute path", + "a:/b:ro": "bad mode specified: ro", + "a:/b:rr": "bad mode specified: rr", + } + + for _, path := range valid { + if _, err := ValidateDevice(path); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidateDevice(path); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} + +func TestVolumeSplitN(t *testing.T) { + for _, x := range []struct { + input string + n int + expected []string + }{ + {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, + {`:C:\foo:d:`, -1, nil}, + {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, + {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, + {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, + + {`d:\`, -1, []string{`d:\`}}, + {`d:`, -1, []string{`d:`}}, + {`d:\path`, -1, []string{`d:\path`}}, + {`d:\path with space`, -1, []string{`d:\path with space`}}, + {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, + {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, + {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, + {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, + {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, + {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, + {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, + {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, + {`name:D:`, -1, []string{`name`, `D:`}}, + {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, + {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, + {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, + {`c:\Windows`, -1, []string{`c:\Windows`}}, + {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, + + {``, -1, nil}, + {`.`, -1, []string{`.`}}, + {`..\`, -1, []string{`..\`}}, + {`c:\:..\`, -1, []string{`c:\`, `..\`}}, + {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, + + // Cover directories with one-character name + {`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}}, + } { + res := volumeSplitN(x.input, x.n) + if len(res) < len(x.expected) { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + for i, e := range res { + if e != x.expected[i] { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + } + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/runtime.go b/vendor/github.com/docker/docker/runconfig/opts/runtime.go new file mode 100644 index 0000000..4361b3c --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/runtime.go @@ -0,0 +1,79 @@ +package opts + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go new file mode 100644 index 0000000..5024324 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go @@ -0,0 +1,111 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/go-units" +) + +// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) + +// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := units.RAMInBytes(split[1]) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := strconv.ParseUint(split[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ThrottledeviceOpt defines a map of ThrottleDevices +type ThrottledeviceOpt struct { + values []*blkiodev.ThrottleDevice + validator ValidatorThrottleFctType +} + +// NewThrottledeviceOpt creates a new ThrottledeviceOpt +func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { + values := []*blkiodev.ThrottleDevice{} + return ThrottledeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt +func (opt *ThrottledeviceOpt) Set(val string) error { + var value *blkiodev.ThrottleDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns ThrottledeviceOpt values as a string. +func (opt *ThrottledeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to ThrottleDevices. +func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { + var throttledevice []*blkiodev.ThrottleDevice + throttledevice = append(throttledevice, opt.values...) + + return throttledevice +} + +// Type returns the option type +func (opt *ThrottledeviceOpt) Type() string { + return "throttled-device" +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/ulimit.go b/vendor/github.com/docker/docker/runconfig/opts/ulimit.go new file mode 100644 index 0000000..5adfe30 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/ulimit.go @@ -0,0 +1,57 @@ +package opts + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go b/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go new file mode 100644 index 0000000..0aa3fac --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/docker/go-units" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*units.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go new file mode 100644 index 0000000..2a5da6d --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go @@ -0,0 +1,89 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" +) + +// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) + +// ValidateWeightDevice validates that the specified string has a valid device-weight format. +func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + weight, err := strconv.ParseUint(split[1], 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + if weight > 0 && (weight < 10 || weight > 1000) { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + + return &blkiodev.WeightDevice{ + Path: split[0], + Weight: uint16(weight), + }, nil +} + +// WeightdeviceOpt defines a map of WeightDevices +type WeightdeviceOpt struct { + values []*blkiodev.WeightDevice + validator ValidatorWeightFctType +} + +// NewWeightdeviceOpt creates a new WeightdeviceOpt +func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { + values := []*blkiodev.WeightDevice{} + return WeightdeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt +func (opt *WeightdeviceOpt) Set(val string) error { + var value *blkiodev.WeightDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns WeightdeviceOpt values as a string. +func (opt *WeightdeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to WeightDevices. +func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { + var weightdevice []*blkiodev.WeightDevice + for _, v := range opt.values { + weightdevice = append(weightdevice, v) + } + + return weightdevice +} + +// Type returns the option type +func (opt *WeightdeviceOpt) Type() string { + return "weighted-device" +} diff --git a/vendor/github.com/docker/docker/utils/debug.go b/vendor/github.com/docker/docker/utils/debug.go new file mode 100644 index 0000000..d203891 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/debug.go @@ -0,0 +1,26 @@ +package utils + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// EnableDebug sets the DEBUG env var to true +// and makes the logger to log at debug level. +func EnableDebug() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// DisableDebug sets the DEBUG env var to false +// and makes the logger to log at info level. +func DisableDebug() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsDebugEnabled checks whether the debug flag is set or not. +func IsDebugEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/docker/docker/utils/debug_test.go b/vendor/github.com/docker/docker/utils/debug_test.go new file mode 100644 index 0000000..6f9c4df --- /dev/null +++ b/vendor/github.com/docker/docker/utils/debug_test.go @@ -0,0 +1,43 @@ +package utils + +import ( + "os" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestEnableDebug(t *testing.T) { + defer func() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) + }() + EnableDebug() + if os.Getenv("DEBUG") != "1" { + t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.DebugLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) + } +} + +func TestDisableDebug(t *testing.T) { + DisableDebug() + if os.Getenv("DEBUG") != "" { + t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.InfoLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) + } +} + +func TestDebugEnabled(t *testing.T) { + EnableDebug() + if !IsDebugEnabled() { + t.Fatal("expected debug enabled, got false") + } + DisableDebug() + if IsDebugEnabled() { + t.Fatal("expected debug disabled, got true") + } +} diff --git a/vendor/github.com/docker/docker/utils/names.go b/vendor/github.com/docker/docker/utils/names.go new file mode 100644 index 0000000..6320628 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/names.go @@ -0,0 +1,9 @@ +package utils + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/docker/docker/utils/process_unix.go b/vendor/github.com/docker/docker/utils/process_unix.go new file mode 100644 index 0000000..fc0b1c8 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/process_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd solaris + +package utils + +import ( + "syscall" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := syscall.Kill(pid, syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + syscall.Kill(pid, syscall.SIGKILL) +} diff --git a/vendor/github.com/docker/docker/utils/process_windows.go b/vendor/github.com/docker/docker/utils/process_windows.go new file mode 100644 index 0000000..03cb855 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/process_windows.go @@ -0,0 +1,20 @@ +package utils + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + // TODO Windows containerd. Not sure this is needed + // p, err := os.FindProcess(pid) + // if err == nil { + // return true + // } + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + // TODO Windows containerd. Not sure this is needed + // p, err := os.FindProcess(pid) + // if err == nil { + // p.Kill() + // } +} diff --git a/vendor/github.com/docker/docker/utils/templates/templates.go b/vendor/github.com/docker/docker/utils/templates/templates.go new file mode 100644 index 0000000..91c376f --- /dev/null +++ b/vendor/github.com/docker/docker/utils/templates/templates.go @@ -0,0 +1,42 @@ +package templates + +import ( + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, +} + +// Parse creates a new annonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} diff --git a/vendor/github.com/docker/docker/utils/templates/templates_test.go b/vendor/github.com/docker/docker/utils/templates/templates_test.go new file mode 100644 index 0000000..dd42901 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/templates/templates_test.go @@ -0,0 +1,38 @@ +package templates + +import ( + "bytes" + "testing" +) + +func TestParseStringFunctions(t *testing.T) { + tm, err := Parse(`{{join (split . ":") "/"}}`) + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + if err := tm.Execute(&b, "text:with:colon"); err != nil { + t.Fatal(err) + } + want := "text/with/colon" + if b.String() != want { + t.Fatalf("expected %s, got %s", want, b.String()) + } +} + +func TestNewParse(t *testing.T) { + tm, err := NewParse("foo", "this is a {{ . }}") + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + if err := tm.Execute(&b, "string"); err != nil { + t.Fatal(err) + } + want := "this is a string" + if b.String() != want { + t.Fatalf("expected %s, got %s", want, b.String()) + } +} diff --git a/vendor/github.com/docker/docker/utils/utils.go b/vendor/github.com/docker/docker/utils/utils.go new file mode 100644 index 0000000..d3dd00a --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils.go @@ -0,0 +1,87 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = stringid.GenerateNonCryptoID()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/docker/docker/utils/utils_test.go b/vendor/github.com/docker/docker/utils/utils_test.go new file mode 100644 index 0000000..ab3911e --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils_test.go @@ -0,0 +1,21 @@ +package utils + +import "testing" + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf new file mode 100644 index 0000000..bb7718b --- /dev/null +++ b/vendor/github.com/docker/docker/vendor.conf @@ -0,0 +1,140 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 +github.com/Microsoft/hcsshim v0.5.9 +github.com/Microsoft/go-winio v0.3.8 +github.com/Sirupsen/logrus v0.11.0 +github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.0 +github.com/mattn/go-sqlite3 v1.1.0 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +# forked golang.org/x/net package includes a patch for lazy loading trace templates +golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git +golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 +github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97 +github.com/docker/go-connections ecb4cb2dd420ada7df7f2593d6c25441f65f69f2 + +github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 +github.com/imdario/mergo 0.2.1 + +#get libnetwork packages +github.com/docker/libnetwork 45b40861e677e37cf27bc184eca5af92f8cdd32d +github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457 +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707 +github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 +github.com/hashicorp/consul v0.5.2 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 + +# get graph and distribution packages +github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc +github.com/vbatts/tar-split v0.10.1 + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +# get desired notary commit, might also need to be updated in Dockerfile +github.com/docker/notary v0.4.2 + +google.golang.org/grpc v1.0.2 +github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f +github.com/docker/go v1.5.1-1-1-gbaf439e +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c + +# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly +github.com/opencontainers/runc 9df8b306d01f59d3a8029be411de015b7304dd8f https://github.com/docker/runc.git # libcontainer +github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) +github.com/coreos/go-systemd v4 +github.com/godbus/dbus v4.0.0 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a + +# gelf logging driver deps +github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 + +github.com/fluent/fluent-logger-golang v1.2.1 +# fluent-logger-golang deps +github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c + +# fsnotify +github.com/fsnotify/fsnotify v1.2.11 + +# awslogs deps +github.com/aws/aws-sdk-go v1.4.22 +github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03 + +# gcplogs deps +golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be +google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 + +# native credentials +github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd + +# containerd +github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1 +github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 + +# cluster +github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77 +github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 +github.com/gogo/protobuf v0.3 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 +github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87 +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 +github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 + +# cli +github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git +github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff + +# metrics +github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 + +# composefile +github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 +github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a +github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 +github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go new file mode 100644 index 0000000..62ef7df --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -0,0 +1,177 @@ +package volumedrivers + +import ( + "errors" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/volume" +) + +var ( + errNoSuchVolume = errors.New("no such volume") +) + +type volumeDriverAdapter struct { + name string + baseHostPath string + capabilities *volume.Capability + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := a.proxy.Create(name, opts); err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +func hostPath(baseHostPath, path string) string { + if baseHostPath != "" { + path = filepath.Join(baseHostPath, path) + } + return path +} + +func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { + ls, err := a.proxy.List() + if err != nil { + return nil, err + } + + var out []volume.Volume + for _, vp := range ls { + out = append(out, &volumeAdapter{ + proxy: a.proxy, + name: vp.Name, + baseHostPath: a.baseHostPath, + driverName: a.name, + eMount: hostPath(a.baseHostPath, vp.Mountpoint), + }) + } + return out, nil +} + +func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { + v, err := a.proxy.Get(name) + if err != nil { + return nil, err + } + + // plugin may have returned no volume and no error + if v == nil { + return nil, errNoSuchVolume + } + + return &volumeAdapter{ + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + status: v.Status, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Scope() string { + cap := a.getCapabilities() + return cap.Scope +} + +func (a *volumeDriverAdapter) getCapabilities() volume.Capability { + if a.capabilities != nil { + return *a.capabilities + } + cap, err := a.proxy.Capabilities() + if err != nil { + // `GetCapabilities` is a not a required endpoint. + // On error assume it's a local-only driver + logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilties: %v", a.name, err) + return volume.Capability{Scope: volume.LocalScope} + } + + // don't spam the warn log below just because the plugin didn't provide a scope + if len(cap.Scope) == 0 { + cap.Scope = volume.LocalScope + } + + cap.Scope = strings.ToLower(cap.Scope) + if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope { + logrus.Warnf("Volume driver %q returned an invalid scope: %q", a.Name(), cap.Scope) + cap.Scope = volume.LocalScope + } + + a.capabilities = &cap + return cap +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + baseHostPath string + driverName string + eMount string // ephemeral host volume path + status map[string]interface{} +} + +type proxyVolume struct { + Name string + Mountpoint string + Status map[string]interface{} +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) == 0 { + mountpoint, _ := a.proxy.Path(a.name) + a.eMount = hostPath(a.baseHostPath, mountpoint) + } + return a.eMount +} + +func (a *volumeAdapter) CachedPath() string { + return a.eMount +} + +func (a *volumeAdapter) Mount(id string) (string, error) { + mountpoint, err := a.proxy.Mount(a.name, id) + a.eMount = hostPath(a.baseHostPath, mountpoint) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount(id string) error { + err := a.proxy.Unmount(a.name, id) + if err == nil { + a.eMount = "" + } + return err +} + +func (a *volumeAdapter) Status() map[string]interface{} { + out := make(map[string]interface{}, len(a.status)) + for k, v := range a.status { + out[k] = v + } + return out +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go new file mode 100644 index 0000000..576dee8 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint.go @@ -0,0 +1,215 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{ + extensions: make(map[string]volume.Driver), + driverLock: &locker.Locker{}, +} + +const extName = "VolumeDriver" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, baseHostPath string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name: name, baseHostPath: baseHostPath, proxy: proxy} +} + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts map[string]string) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name, id string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name, id string) (err error) + // List lists all the volumes known to the driver + List() (volumes []*proxyVolume, err error) + // Get retrieves the volume with the requested name + Get(name string) (volume *proxyVolume, err error) + // Capabilities gets the list of capabilities of the driver + Capabilities() (capabilities volume.Capability, err error) +} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex + driverLock *locker.Locker + plugingetter getter.PluginGetter +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + drivers.plugingetter = plugingetter +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + if name == "" { + return false + } + + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if exists { + return false + } + + if err := validateDriver(extension); err != nil { + return false + } + + drivers.extensions[name] = extension + + return true +} + +// Unregister dissociates the name from its driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func lookup(name string, mode int) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + + drivers.Lock() + ext, ok := drivers.extensions[name] + drivers.Unlock() + if ok { + return ext, nil + } + if drivers.plugingetter != nil { + p, err := drivers.plugingetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client()) + if err := validateDriver(d); err != nil { + return nil, err + } + + if p.IsV1() { + drivers.Lock() + drivers.extensions[name] = d + drivers.Unlock() + } + return d, nil + } + return nil, fmt.Errorf("Error looking up volume plugin %s", name) +} + +func validateDriver(vd volume.Driver) error { + scope := vd.Scope() + if scope != volume.LocalScope && scope != volume.GlobalScope { + return fmt.Errorf("Driver %q provided an invalid capability scope: %s", vd.Name(), scope) + } + return nil +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.LOOKUP) +} + +// CreateDriver returns a volume driver by its name and increments RefCount. +// If the driver is empty, it looks for the local driver. +func CreateDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.ACQUIRE) +} + +// RemoveDriver returns a volume driver by its name and decrements RefCount.. +// If the driver is empty, it looks for the local driver. +func RemoveDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.RELEASE) +} + +// GetDriverList returns list of volume drivers registered. +// If no driver is registered, empty string list will be returned. +func GetDriverList() []string { + var driverList []string + drivers.Lock() + for driverName := range drivers.extensions { + driverList = append(driverList, driverName) + } + drivers.Unlock() + return driverList +} + +// GetAllDrivers lists all the registered drivers +func GetAllDrivers() ([]volume.Driver, error) { + var plugins []getter.CompatPlugin + if drivers.plugingetter != nil { + var err error + plugins, err = drivers.plugingetter.GetAllByCap(extName) + if err != nil { + return nil, fmt.Errorf("error listing plugins: %v", err) + } + } + var ds []volume.Driver + + drivers.Lock() + defer drivers.Unlock() + + for _, d := range drivers.extensions { + ds = append(ds, d) + } + + for _, p := range plugins { + name := p.Name() + ext, ok := drivers.extensions[name] + if ok { + continue + } + + ext = NewVolumeDriver(name, p.BasePath(), p.Client()) + if p.IsV1() { + drivers.extensions[name] = ext + } + ds = append(ds, ext) + } + return ds, nil +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go new file mode 100644 index 0000000..428b075 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go @@ -0,0 +1,23 @@ +package volumedrivers + +import ( + "testing" + + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestGetDriver(t *testing.T) { + _, err := GetDriver("missing") + if err == nil { + t.Fatal("Expected error, was nil") + } + Register(volumetestutils.NewFakeDriver("fake"), "fake") + + d, err := GetDriver("fake") + if err != nil { + t.Fatal(err) + } + if d.Name() != "fake" { + t.Fatalf("Expected fake driver, got %s\n", d.Name()) + } +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go new file mode 100644 index 0000000..b23db62 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/proxy.go @@ -0,0 +1,242 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import ( + "errors" + + "github.com/docker/docker/volume" +) + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string + Opts map[string]string +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string, opts map[string]string) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + req.Opts = opts + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string + ID string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string, id string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string + ID string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string, id string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyListRequest struct { +} + +type volumeDriverProxyListResponse struct { + Volumes []*proxyVolume + Err string +} + +func (pp *volumeDriverProxy) List() (volumes []*proxyVolume, err error) { + var ( + req volumeDriverProxyListRequest + ret volumeDriverProxyListResponse + ) + + if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + return + } + + volumes = ret.Volumes + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyGetRequest struct { + Name string +} + +type volumeDriverProxyGetResponse struct { + Volume *proxyVolume + Err string +} + +func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { + var ( + req volumeDriverProxyGetRequest + ret volumeDriverProxyGetResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + return + } + + volume = ret.Volume + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyCapabilitiesRequest struct { +} + +type volumeDriverProxyCapabilitiesResponse struct { + Capabilities volume.Capability + Err string +} + +func (pp *volumeDriverProxy) Capabilities() (capabilities volume.Capability, err error) { + var ( + req volumeDriverProxyCapabilitiesRequest + ret volumeDriverProxyCapabilitiesResponse + ) + + if err = pp.Call("VolumeDriver.Capabilities", req, &ret); err != nil { + return + } + + capabilities = ret.Capabilities + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy_test.go b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go new file mode 100644 index 0000000..b78c46a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go @@ -0,0 +1,132 @@ +package volumedrivers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" +) + +func TestVolumeRequestError(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot create volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot remove volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot mount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot unmount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Unknown volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot list volumes"}`) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot get volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + http.Error(w, "error", 500) + }) + + u, _ := url.Parse(server.URL) + client, err := plugins.NewClient("tcp://"+u.Host, &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatal(err) + } + + driver := volumeDriverProxy{client} + + if err = driver.Create("volume", nil); err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot create volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Mount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot mount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Unmount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot unmount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Remove("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot remove volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Path("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Unknown volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.List() + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot list volumes") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Get("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot get volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Capabilities() + if err == nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go new file mode 100644 index 0000000..62c45e6 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local.go @@ -0,0 +1,364 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distinctive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = fmt.Errorf("volume not found") + // volumeNameRegex ensures the name assigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = utils.RestrictedNamePattern +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +type activeMount struct { + count uint64 + mounted bool +} + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string, rootUID, rootGID int) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + rootUID: rootUID, + rootGID: rootGID, + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + mountInfos, err := mount.GetMounts() + if err != nil { + logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + + name := filepath.Base(d.Name()) + v := &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + r.volumes[name] = v + optsFilePath := filepath.Join(rootDirectory, name, "opts.json") + if b, err := ioutil.ReadFile(optsFilePath); err == nil { + opts := optsConfig{} + if err := json.Unmarshal(b, &opts); err != nil { + return nil, errors.Wrapf(err, "error while unmarshaling volume options for volume: %s", name) + } + // Make sure this isn't an empty optsConfig. + // This could be empty due to buggy behavior in older versions of Docker. + if !reflect.DeepEqual(opts, optsConfig{}) { + v.opts = &opts + } + + // unmount anything that may still be mounted (for example, from an unclean shutdown) + for _, info := range mountInfos { + if info.Mountpoint == v.path { + mount.Unmount(v.path) + break + } + } + } + } + + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume + rootUID int + rootGID int +} + +// List lists all the volumes +func (r *Root) List() ([]volume.Volume, error) { + var ls []volume.Volume + r.m.Lock() + for _, v := range r.volumes { + ls = append(ls, v) + } + r.m.Unlock() + return ls, nil +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if exists { + return v, nil + } + + path := r.DataPath(name) + if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, errors.Wrapf(err, "error while creating volume path '%s'", path) + } + + var err error + defer func() { + if err != nil { + os.RemoveAll(filepath.Dir(path)) + } + }() + + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + + if len(opts) != 0 { + if err = setOpts(v, opts); err != nil { + return nil, err + } + var b []byte + b, err = json.Marshal(v.opts) + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { + return nil, errors.Wrap(err, "error while persisting volume options") + } + } + + r.volumes[name] = v + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + + lv, ok := v.(*localVolume) + if !ok { + return fmt.Errorf("unknown volume type %T", v) + } + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + realPath = filepath.Dir(lv.path) + } + + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } + + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "error removing volume path '%s'", path) + } + return nil +} + +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound + } + return v, nil +} + +// Scope returns the local volume scope +func (r *Root) Scope() string { + return volume.LocalScope +} + +func (r *Root) validateName(name string) error { + if len(name) == 1 { + return validationError{fmt.Errorf("volume name is too short, names should be at least two alphanumeric characters")} + } + if !volumeNameRegex.MatchString(name) { + return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed. If you intented to pass a host directory, use absolute path", name, utils.RestrictedNameChars)} + } + return nil +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string + // opts is the parsed list of options used to create the volume + opts *optsConfig + // active refcounts the active mounts + active activeMount +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +func (v *localVolume) Mount(id string) (string, error) { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + if !v.active.mounted { + if err := v.mount(); err != nil { + return "", err + } + v.active.mounted = true + } + v.active.count++ + } + return v.path, nil +} + +// Umount is for satisfying the localVolume interface and does not do anything in this driver. +func (v *localVolume) Unmount(id string) error { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + v.active.count-- + if v.active.count == 0 { + if err := mount.Unmount(v.path); err != nil { + v.active.count++ + return errors.Wrapf(err, "error while unmounting volume path '%s'", v.path) + } + v.active.mounted = false + } + } + return nil +} + +func validateOpts(opts map[string]string) error { + for opt := range opts { + if !validOpts[opt] { + return validationError{fmt.Errorf("invalid option key: %q", opt)} + } + } + return nil +} + +func (v *localVolume) Status() map[string]interface{} { + return nil +} + +// getAddress finds out address/hostname from options +func getAddress(opts string) string { + optsList := strings.Split(opts, ",") + for i := 0; i < len(optsList); i++ { + if strings.HasPrefix(optsList[i], "addr=") { + addr := (strings.SplitN(optsList[i], "=", 2)[1]) + return addr + } + } + return "" +} diff --git a/vendor/github.com/docker/docker/volume/local/local_test.go b/vendor/github.com/docker/docker/volume/local/local_test.go new file mode 100644 index 0000000..f5a519b --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_test.go @@ -0,0 +1,344 @@ +package local + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/mount" +) + +func TestGetAddress(t *testing.T) { + cases := map[string]string{ + "addr=11.11.11.1": "11.11.11.1", + " ": "", + "addr=": "", + "addr=2001:db8::68": "2001:db8::68", + } + for name, success := range cases { + v := getAddress(name) + if v != success { + t.Errorf("Test case failed for %s actual: %s expected : %s", name, v, success) + } + } + +} + +func TestRemove(t *testing.T) { + // TODO Windows: Investigate why this test fails on Windows under CI + // but passes locally. + if runtime.GOOS == "windows" { + t.Skip("Test failing on Windows CI") + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + vol, err = r.Create("testing2", nil) + if err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(vol.Path()); err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(vol.Path()); err != nil && !os.IsNotExist(err) { + t.Fatal("volume dir not removed") + } + + if l, _ := r.List(); len(l) != 0 { + t.Fatal("expected there to be no volumes") + } +} + +func TestInitializeWithVolumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + v, err := r.Get(vol.Name()) + if err != nil { + t.Fatal(err) + } + + if v.Path() != vol.Path() { + t.Fatal("expected to re-initialize root with existing volumes") + } +} + +func TestCreate(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + cases := map[string]bool{ + "name": true, + "name-with-dash": true, + "name_with_underscore": true, + "name/with/slash": false, + "name/with/../../slash": false, + "./name": false, + "../name": false, + "./": false, + "../": false, + "~": false, + ".": false, + "..": false, + "...": false, + } + + for name, success := range cases { + v, err := r.Create(name, nil) + if success { + if err != nil { + t.Fatal(err) + } + if v.Name() != name { + t.Fatalf("Expected volume with name %s, got %s", name, v.Name()) + } + } else { + if err == nil { + t.Fatalf("Expected error creating volume with name %s, got nil", name) + } + } + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } +} + +func TestValidateName(t *testing.T) { + r := &Root{} + names := map[string]bool{ + "x": false, + "/testvol": false, + "thing.d": true, + "hello-world": true, + "./hello": false, + ".hello": false, + } + + for vol, expected := range names { + err := r.validateName(vol) + if expected && err != nil { + t.Fatalf("expected %s to be valid got %v", vol, err) + } + if !expected && err == nil { + t.Fatalf("expected %s to be invalid", vol) + } + } +} + +func TestCreateWithOpts(t *testing.T) { + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip() + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test", map[string]string{"invalidopt": "notsupported"}); err == nil { + t.Fatal("expected invalid opt to cause error") + } + + vol, err := r.Create("test", map[string]string{"device": "tmpfs", "type": "tmpfs", "o": "size=1m,uid=1000"}) + if err != nil { + t.Fatal(err) + } + v := vol.(*localVolume) + + dir, err := v.Mount("1234") + if err != nil { + t.Fatal(err) + } + defer func() { + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + }() + + mountInfos, err := mount.GetMounts() + if err != nil { + t.Fatal(err) + } + + var found bool + for _, info := range mountInfos { + if info.Mountpoint == dir { + found = true + if info.Fstype != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Fstype) + } + if info.Source != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Source) + } + if !strings.Contains(info.VfsOpts, "uid=1000") { + t.Fatalf("expected mount info to have uid=1000: %q", info.VfsOpts) + } + if !strings.Contains(info.VfsOpts, "size=1024k") { + t.Fatalf("expected mount info to have size=1024k: %q", info.VfsOpts) + } + break + } + } + + if !found { + t.Fatal("mount not found") + } + + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + // test double mount + if _, err := v.Mount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 2 { + t.Fatalf("Expected active mount count to be 2, got %d", v.active.count) + } + + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + mounted, err := mount.Mounted(v.path) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatal("expected mount to still be active") + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + v2, exists := r.volumes["test"] + if !exists { + t.Fatal("missing volume on restart") + } + + if !reflect.DeepEqual(v.opts, v2.opts) { + t.Fatal("missing volume options on restart") + } +} + +func TestRealodNoOpts(t *testing.T) { + rootDir, err := ioutil.TempDir("", "volume-test-reload-no-opts") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test1", nil); err != nil { + t.Fatal(err) + } + if _, err := r.Create("test2", nil); err != nil { + t.Fatal(err) + } + // make sure a file with `null` (.e.g. empty opts map from older daemon) is ok + if err := ioutil.WriteFile(filepath.Join(rootDir, "test2"), []byte("null"), 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test3", nil); err != nil { + t.Fatal(err) + } + // make sure an empty opts file doesn't break us too + if err := ioutil.WriteFile(filepath.Join(rootDir, "test3"), nil, 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test4", map[string]string{}); err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + for _, name := range []string{"test1", "test2", "test3", "test4"} { + v, err := r.Get(name) + if err != nil { + t.Fatal(err) + } + lv, ok := v.(*localVolume) + if !ok { + t.Fatalf("expected *localVolume got: %v", reflect.TypeOf(v)) + } + if lv.opts != nil { + t.Fatalf("expected opts to be nil, got: %v", lv.opts) + } + if _, err := lv.Mount("1234"); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go new file mode 100644 index 0000000..fb08862 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -0,0 +1,87 @@ +// +build linux freebsd solaris + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "net" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/docker/docker/pkg/mount" +) + +var ( + oldVfsDir = filepath.Join("vfs", "dir") + + validOpts = map[string]bool{ + "type": true, // specify the filesystem type for mount, e.g. nfs + "o": true, // generic mount options + "device": true, // device to mount from + } +) + +type optsConfig struct { + MountType string + MountOpts string + MountDevice string +} + +func (o *optsConfig) String() string { + return fmt.Sprintf("type='%s' device='%s' o='%s'", o.MountType, o.MountDevice, o.MountOpts) +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) == 0 { + return nil + } + if err := validateOpts(opts); err != nil { + return err + } + + v.opts = &optsConfig{ + MountType: opts["type"], + MountOpts: opts["o"], + MountDevice: opts["device"], + } + return nil +} + +func (v *localVolume) mount() error { + if v.opts.MountDevice == "" { + return fmt.Errorf("missing device in volume options") + } + mountOpts := v.opts.MountOpts + if v.opts.MountType == "nfs" { + if addrValue := getAddress(v.opts.MountOpts); addrValue != "" && net.ParseIP(addrValue).To4() == nil { + ipAddr, err := net.ResolveIPAddr("ip", addrValue) + if err != nil { + return errors.Wrapf(err, "error resolving passed in nfs address") + } + mountOpts = strings.Replace(mountOpts, "addr="+addrValue, "addr="+ipAddr.String(), 1) + } + } + err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts) + return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts) +} diff --git a/vendor/github.com/docker/docker/volume/local/local_windows.go b/vendor/github.com/docker/docker/volume/local/local_windows.go new file mode 100644 index 0000000..1bdb368 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_windows.go @@ -0,0 +1,34 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "path/filepath" + "strings" +) + +type optsConfig struct{} + +var validOpts map[string]bool + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) > 0 { + return fmt.Errorf("options are not supported on this platform") + } + return nil +} + +func (v *localVolume) mount() error { + return nil +} diff --git a/vendor/github.com/docker/docker/volume/store/db.go b/vendor/github.com/docker/docker/volume/store/db.go new file mode 100644 index 0000000..c5fd164 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/db.go @@ -0,0 +1,88 @@ +package store + +import ( + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/pkg/errors" +) + +var volumeBucketName = []byte("volumes") + +type volumeMetadata struct { + Name string + Driver string + Labels map[string]string + Options map[string]string +} + +func (s *VolumeStore) setMeta(name string, meta volumeMetadata) error { + return s.db.Update(func(tx *bolt.Tx) error { + return setMeta(tx, name, meta) + }) +} + +func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error { + metaJSON, err := json.Marshal(meta) + if err != nil { + return err + } + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata") +} + +func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) { + var meta volumeMetadata + err := s.db.View(func(tx *bolt.Tx) error { + return getMeta(tx, name, &meta) + }) + return meta, err +} + +func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error { + b := tx.Bucket(volumeBucketName) + val := b.Get([]byte(name)) + if string(val) == "" { + return nil + } + if err := json.Unmarshal(val, meta); err != nil { + return errors.Wrap(err, "error unmarshaling volume metadata") + } + return nil +} + +func (s *VolumeStore) removeMeta(name string) error { + return s.db.Update(func(tx *bolt.Tx) error { + return removeMeta(tx, name) + }) +} + +func removeMeta(tx *bolt.Tx, name string) error { + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Delete([]byte(name)), "error removing volume metadata") +} + +// listMeta is used during restore to get the list of volume metadata +// from the on-disk database. +// Any errors that occur are only logged. +func listMeta(tx *bolt.Tx) []volumeMetadata { + var ls []volumeMetadata + b := tx.Bucket(volumeBucketName) + b.ForEach(func(k, v []byte) error { + if len(v) == 0 { + // don't try to unmarshal an empty value + return nil + } + + var m volumeMetadata + if err := json.Unmarshal(v, &m); err != nil { + // Just log the error + logrus.Errorf("Error while reading volume metadata for volume %q: %v", string(k), err) + return nil + } + ls = append(ls, m) + return nil + }) + return ls +} diff --git a/vendor/github.com/docker/docker/volume/store/errors.go b/vendor/github.com/docker/docker/volume/store/errors.go new file mode 100644 index 0000000..980175f --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/errors.go @@ -0,0 +1,76 @@ +package store + +import ( + "strings" + + "github.com/pkg/errors" +) + +var ( + // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + errVolumeInUse = errors.New("volume is in use") + // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + errNoSuchVolume = errors.New("no such volume") + // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + errInvalidName = errors.New("volume name is not valid on this platform") + // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver + errNameConflict = errors.New("volume name must be unique") +) + +// OpErr is the error type returned by functions in the store package. It describes +// the operation, volume name, and error. +type OpErr struct { + // Err is the error that occurred during the operation. + Err error + // Op is the operation which caused the error, such as "create", or "list". + Op string + // Name is the name of the resource being requested for this op, typically the volume name or the driver name. + Name string + // Refs is the list of references associated with the resource. + Refs []string +} + +// Error satisfies the built-in error interface type. +func (e *OpErr) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Name != "" { + s = s + " " + e.Name + } + + s = s + ": " + e.Err.Error() + if len(e.Refs) > 0 { + s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" + } + return s +} + +// IsInUse returns a boolean indicating whether the error indicates that a +// volume is in use +func IsInUse(err error) bool { + return isErr(err, errVolumeInUse) +} + +// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist +func IsNotExist(err error) bool { + return isErr(err, errNoSuchVolume) +} + +// IsNameConflict returns a boolean indicating whether the error indicates that a +// volume name is already taken +func IsNameConflict(err error) bool { + return isErr(err, errNameConflict) +} + +func isErr(err error, expected error) bool { + err = errors.Cause(err) + switch pe := err.(type) { + case nil: + return false + case *OpErr: + err = errors.Cause(pe.Err) + } + return err == expected +} diff --git a/vendor/github.com/docker/docker/volume/store/restore.go b/vendor/github.com/docker/docker/volume/store/restore.go new file mode 100644 index 0000000..c0c5b51 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/restore.go @@ -0,0 +1,83 @@ +package store + +import ( + "sync" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +// restore is called when a new volume store is created. +// It's primary purpose is to ensure that all drivers' refcounts are set based +// on known volumes after a restart. +// This only attempts to track volumes that are actually stored in the on-disk db. +// It does not probe the available drivers to find anything that may have been added +// out of band. +func (s *VolumeStore) restore() { + var ls []volumeMetadata + s.db.View(func(tx *bolt.Tx) error { + ls = listMeta(tx) + return nil + }) + + chRemove := make(chan *volumeMetadata, len(ls)) + var wg sync.WaitGroup + for _, meta := range ls { + wg.Add(1) + // this is potentially a very slow operation, so do it in a goroutine + go func(meta volumeMetadata) { + defer wg.Done() + + var v volume.Volume + var err error + if meta.Driver != "" { + v, err = lookupVolume(meta.Driver, meta.Name) + if err != nil && err != errNoSuchVolume { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") + return + } + if v == nil { + // doesn't exist in the driver, remove it from the db + chRemove <- &meta + return + } + } else { + v, err = s.getVolume(meta.Name) + if err != nil { + if err == errNoSuchVolume { + chRemove <- &meta + } + return + } + + meta.Driver = v.DriverName() + if err := s.setMeta(v.Name(), meta); err != nil { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore") + } + } + + // increment driver refcount + volumedrivers.CreateDriver(meta.Driver) + + // cache the volume + s.globalLock.Lock() + s.options[v.Name()] = meta.Options + s.labels[v.Name()] = meta.Labels + s.names[v.Name()] = v + s.globalLock.Unlock() + }(meta) + } + + wg.Wait() + close(chRemove) + s.db.Update(func(tx *bolt.Tx) error { + for meta := range chRemove { + if err := removeMeta(tx, meta.Name); err != nil { + logrus.WithField("volume", meta.Name).Warnf("Error removing stale entry from volume db: %v", err) + } + } + return nil + }) +} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/store/store.go new file mode 100644 index 0000000..38afd86 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store.go @@ -0,0 +1,649 @@ +package store + +import ( + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +const ( + volumeDataDir = "volumes" +) + +type volumeWrapper struct { + volume.Volume + labels map[string]string + scope string + options map[string]string +} + +func (v volumeWrapper) Options() map[string]string { + options := map[string]string{} + for key, value := range v.options { + options[key] = value + } + return options +} + +func (v volumeWrapper) Labels() map[string]string { + return v.labels +} + +func (v volumeWrapper) Scope() string { + return v.scope +} + +func (v volumeWrapper) CachedPath() string { + if vv, ok := v.Volume.(interface { + CachedPath() string + }); ok { + return vv.CachedPath() + } + return v.Volume.Path() +} + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New(rootPath string) (*VolumeStore, error) { + vs := &VolumeStore{ + locks: &locker.Locker{}, + names: make(map[string]volume.Volume), + refs: make(map[string][]string), + labels: make(map[string]map[string]string), + options: make(map[string]map[string]string), + } + + if rootPath != "" { + // initialize metadata store + volPath := filepath.Join(rootPath, volumeDataDir) + if err := os.MkdirAll(volPath, 750); err != nil { + return nil, err + } + + dbPath := filepath.Join(volPath, "metadata.db") + + var err error + vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, errors.Wrap(err, "error while opening volume store metadata database") + } + + // initialize volumes bucket + if err := vs.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil { + return errors.Wrap(err, "error while setting up volume store metadata database") + } + return nil + }); err != nil { + return nil, err + } + } + + vs.restore() + + return vs, nil +} + +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + return v, exists +} + +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + s.globalLock.Lock() + s.names[v.Name()] = v + if len(ref) > 0 { + s.refs[v.Name()] = append(s.refs[v.Name()], ref) + } + s.globalLock.Unlock() +} + +// getRefs gets the list of refs for a given name +// Callers of this function are expected to hold the name lock. +func (s *VolumeStore) getRefs(name string) []string { + s.globalLock.RLock() + refs := s.refs[name] + s.globalLock.RUnlock() + return refs +} + +// Purge allows the cleanup of internal data on docker in case +// the internal data is out of sync with volumes driver plugins. +func (s *VolumeStore) Purge(name string) { + s.globalLock.Lock() + v, exists := s.names[name] + if exists { + if _, err := volumedrivers.RemoveDriver(v.DriverName()); err != nil { + logrus.Error("Error dereferencing volume driver: %v", err) + } + } + if err := s.removeMeta(name); err != nil { + logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) + } + delete(s.names, name) + delete(s.refs, name) + delete(s.labels, name) + delete(s.options, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store + // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. + locks *locker.Locker + // globalLock is used to protect access to mutable structures used by the store object + globalLock sync.RWMutex + // names stores the volume name -> volume relationship. + // This is used for making lookups faster so we don't have to probe all drivers + names map[string]volume.Volume + // refs stores the volume name and the list of things referencing it + refs map[string][]string + // labels stores volume labels for each volume + labels map[string]map[string]string + // options stores volume options for each volume + options map[string]map[string]string + db *bolt.DB +} + +// List proxies to all registered volume drivers to get the full list of volumes +// If a driver returns a volume that has name which conflicts with another volume from a different driver, +// the first volume is chosen and the conflicting volume is dropped. +func (s *VolumeStore) List() ([]volume.Volume, []string, error) { + vols, warnings, err := s.list() + if err != nil { + return nil, nil, &OpErr{Err: err, Op: "list"} + } + var out []volume.Volume + + for _, v := range vols { + name := normaliseVolumeName(v.Name()) + + s.locks.Lock(name) + storedV, exists := s.getNamed(name) + // Note: it's not safe to populate the cache here because the volume may have been + // deleted before we acquire a lock on its name + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + s.locks.Unlock(v.Name()) + continue + } + + out = append(out, v) + s.locks.Unlock(v.Name()) + } + return out, warnings, nil +} + +// list goes through each volume driver and asks for its list of volumes. +func (s *VolumeStore) list() ([]volume.Volume, []string, error) { + var ( + ls []volume.Volume + warnings []string + ) + + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, nil, err + } + + type vols struct { + vols []volume.Volume + err error + driverName string + } + chVols := make(chan vols, len(drivers)) + + for _, vd := range drivers { + go func(d volume.Driver) { + vs, err := d.List() + if err != nil { + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + return + } + for i, v := range vs { + s.globalLock.RLock() + vs[i] = volumeWrapper{v, s.labels[v.Name()], d.Scope(), s.options[v.Name()]} + s.globalLock.RUnlock() + } + + chVols <- vols{vols: vs} + }(vd) + } + + badDrivers := make(map[string]struct{}) + for i := 0; i < len(drivers); i++ { + vs := <-chVols + + if vs.err != nil { + warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} + logrus.Warn(vs.err) + } + ls = append(ls, vs.vols...) + } + + if len(badDrivers) > 0 { + s.globalLock.RLock() + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + s.globalLock.RUnlock() + } + return ls, warnings, nil +} + +// CreateWithRef creates a volume with the given name and driver and stores the ref +// This ensures there's no race between creating a volume and then storing a reference. +func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + + s.setNamed(v, ref) + return v, nil +} + +// Create creates a volume with the given name and driver. +// This is just like CreateWithRef() except we don't store a reference while holding the lock. +func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + return s.CreateWithRef(name, driverName, "", opts, labels) +} + +// checkConflict checks the local cache for name collisions with the passed in name, +// for existing volumes with the same name but in a different driver. +// This is used by `Create` as a best effort to prevent name collisions for volumes. +// If a matching volume is found that is not a conflict that is returned so the caller +// does not need to perform an additional lookup. +// When no matching volume is found, both returns will be nil +// +// Note: This does not probe all the drivers for name collisions because v1 plugins +// are very slow, particularly if the plugin is down, and cause other issues, +// particularly around locking the store. +// TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially +// use a connect timeout for this kind of check to ensure we aren't blocking for a +// long time. +func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, error) { + // check the local cache + v, _ := s.getNamed(name) + if v == nil { + return nil, nil + } + + vDriverName := v.DriverName() + var conflict bool + if driverName != "" { + // Retrieve canonical driver name to avoid inconsistencies (for example + // "plugin" vs. "plugin:latest") + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, err + } + + if vDriverName != vd.Name() { + conflict = true + } + } + + // let's check if the found volume ref + // is stale by checking with the driver if it still exists + exists, err := volumeExists(v) + if err != nil { + return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) + } + + if exists { + if conflict { + return nil, errors.Wrapf(errNameConflict, "driver '%s' already has volume '%s'", vDriverName, name) + } + return v, nil + } + + if len(s.getRefs(v.Name())) > 0 { + // Containers are referencing this volume but it doesn't seem to exist anywhere. + // Return a conflict error here, the user can fix this with `docker volume rm -f` + return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) + } + + // doesn't exist, so purge it from the cache + s.Purge(name) + return nil, nil +} + +// volumeExists returns if the volume is still present in the driver. +// An error is returned if there was an issue communicating with the driver. +func volumeExists(v volume.Volume) (bool, error) { + exists, err := lookupVolume(v.DriverName(), v.Name()) + if err != nil { + return false, err + } + return exists != nil, nil +} + +// create asks the given driver to create a volume with the name/opts. +// If a volume with the name is already known, it will ask the stored driver for the volume. +// If the passed in driver name does not match the driver name which is stored +// for the given volume name, an error is returned after checking if the reference is stale. +// If the reference is stale, it will be purged and this create can continue. +// It is expected that callers of this function hold any necessary locks. +func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} + } + + v, err := s.checkConflict(name, driverName) + if err != nil { + return nil, err + } + + if v != nil { + return v, nil + } + + // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name + if driverName == "" { + v, _ := s.getVolume(name) + if v != nil { + return v, nil + } + } + + vd, err := volumedrivers.CreateDriver(driverName) + + if err != nil { + return nil, &OpErr{Op: "create", Name: name, Err: err} + } + + logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + + if v, _ := vd.Get(name); v != nil { + return v, nil + } + v, err = vd.Create(name, opts) + if err != nil { + return nil, err + } + s.globalLock.Lock() + s.labels[name] = labels + s.options[name] = opts + s.globalLock.Unlock() + + metadata := volumeMetadata{ + Name: name, + Driver: vd.Name(), + Labels: labels, + Options: opts, + } + + if err := s.setMeta(name, metadata); err != nil { + return nil, err + } + return volumeWrapper{v, labels, vd.Scope(), opts}, nil +} + +// GetWithRef gets a volume with the given name from the passed in driver and stores the ref +// This is just like Get(), but we store the reference while holding the lock. +// This makes sure there are no races between checking for the existence of a volume and adding a reference for it +func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + v, err := vd.Get(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + s.setNamed(v, ref) + + s.globalLock.RLock() + defer s.globalLock.RUnlock() + return volumeWrapper{v, s.labels[name], vd.Scope(), s.options[name]}, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.getVolume(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + s.setNamed(v, "") + return v, nil +} + +// getVolume requests the volume, if the driver info is stored it just accesses that driver, +// if the driver is unknown it probes all drivers until it finds the first volume with that name. +// it is expected that callers of this function hold any necessary locks +func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { + var meta volumeMetadata + meta, err := s.getMeta(name) + if err != nil { + return nil, err + } + + driverName := meta.Driver + if driverName == "" { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + if exists { + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + } + } + + if meta.Driver != "" { + vol, err := lookupVolume(meta.Driver, name) + if err != nil { + return nil, err + } + if vol == nil { + s.Purge(name) + return nil, errNoSuchVolume + } + + var scope string + vd, err := volumedrivers.GetDriver(meta.Driver) + if err == nil { + scope = vd.Scope() + } + return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil + } + + logrus.Debugf("Probing all drivers for volume with name: %s", name) + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, err + } + + for _, d := range drivers { + v, err := d.Get(name) + if err != nil || v == nil { + continue + } + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil + } + return nil, errNoSuchVolume +} + +// lookupVolume gets the specified volume from the specified driver. +// This will only return errors related to communications with the driver. +// If the driver returns an error that is not communication related the +// error is logged but not returned. +// If the volume is not found it will return `nil, nil`` +func lookupVolume(driverName, volumeName string) (volume.Volume, error) { + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) + } + v, err := vd.Get(volumeName) + if err != nil { + err = errors.Cause(err) + if _, ok := err.(net.Error); ok { + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", v.Name(), v.DriverName()) + } + + // At this point, the error could be anything from the driver, such as "no such volume" + // Let's not check an error here, and instead check if the driver returned a volume + logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Warnf("Error while looking up volume") + } + return v, nil +} + +// Remove removes the requested volume. A volume is not removed if it has any refs +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + refs := s.getRefs(name) + if len(refs) > 0 { + return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs} + } + + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: vd.Name(), Op: "remove"} + } + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vol := unwrapVolume(v) + if err := vd.Remove(vol); err != nil { + return &OpErr{Err: err, Name: name, Op: "remove"} + } + + s.Purge(name) + return nil +} + +// Dereference removes the specified reference to the volume +func (s *VolumeStore) Dereference(v volume.Volume, ref string) { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + var refs []string + + for _, r := range s.refs[v.Name()] { + if r != ref { + refs = append(refs, r) + } + } + s.refs[v.Name()] = refs +} + +// Refs gets the current list of refs for the given volume +func (s *VolumeStore) Refs(v volume.Volume) []string { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + refs := s.getRefs(v.Name()) + refsOut := make([]string, len(refs)) + copy(refsOut, refs) + return refsOut +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { + vd, err := volumedrivers.GetDriver(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + ls, err := vd.List() + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + for i, v := range ls { + options := map[string]string{} + s.globalLock.RLock() + for key, value := range s.options[v.Name()] { + options[key] = value + } + ls[i] = volumeWrapper{v, s.labels[v.Name()], vd.Scope(), options} + s.globalLock.RUnlock() + } + return ls, nil +} + +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { + return s.filter(vols, func(v volume.Volume) bool { + s.locks.Lock(v.Name()) + l := len(s.getRefs(v.Name())) + s.locks.Unlock(v.Name()) + if (used && l > 0) || (!used && l == 0) { + return true + } + return false + }) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { + var ls []volume.Volume + for _, v := range vols { + if f(v) { + ls = append(ls, v) + } + } + return ls +} + +func unwrapVolume(v volume.Volume) volume.Volume { + if vol, ok := v.(volumeWrapper); ok { + return vol.Volume + } + + return v +} + +// Shutdown releases all resources used by the volume store +// It does not make any changes to volumes, drivers, etc. +func (s *VolumeStore) Shutdown() error { + return s.db.Close() +} diff --git a/vendor/github.com/docker/docker/volume/store/store_test.go b/vendor/github.com/docker/docker/volume/store/store_test.go new file mode 100644 index 0000000..b52f720 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_test.go @@ -0,0 +1,234 @@ +package store + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/volume/drivers" + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestCreate(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + defer volumedrivers.Unregister("fake") + dir, err := ioutil.TempDir("", "test-create") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + v, err := s.Create("fake1", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + if v.Name() != "fake1" { + t.Fatalf("Expected fake1 volume, got %v", v) + } + if l, _, _ := s.List(); len(l) != 1 { + t.Fatalf("Expected 1 volume in the store, got %v: %v", len(l), l) + } + + if _, err := s.Create("none", "none", nil, nil); err == nil { + t.Fatalf("Expected unknown driver error, got nil") + } + + _, err = s.Create("fakeerror", "fake", map[string]string{"error": "create error"}, nil) + expected := &OpErr{Op: "create", Name: "fakeerror", Err: errors.New("create error")} + if err != nil && err.Error() != expected.Error() { + t.Fatalf("Expected create fakeError: create error, got %v", err) + } +} + +func TestRemove(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-remove") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + // doing string compare here since this error comes directly from the driver + expected := "no such volume" + if err := s.Remove(volumetestutils.NoopVolume{}); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Expected error %q, got %v", expected, err) + } + + v, err := s.CreateWithRef("fake1", "fake", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + + if err := s.Remove(v); !IsInUse(err) { + t.Fatalf("Expected ErrVolumeInUse error, got %v", err) + } + s.Dereference(v, "fake") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } + if l, _, _ := s.List(); len(l) != 0 { + t.Fatalf("Expected 0 volumes in the store, got %v, %v", len(l), l) + } +} + +func TestList(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("fake2"), "fake2") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("fake2") + dir, err := ioutil.TempDir("", "test-list") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + if _, err := s.Create("test", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("test2", "fake2", nil, nil); err != nil { + t.Fatal(err) + } + + ls, _, err := s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } + if err := s.Shutdown(); err != nil { + t.Fatal(err) + } + + // and again with a new store + s, err = New(dir) + if err != nil { + t.Fatal(err) + } + ls, _, err = s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } +} + +func TestFilterByDriver(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-filter-driver") + if err != nil { + t.Fatal(err) + } + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.Create("fake1", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake3", "noop", nil, nil); err != nil { + t.Fatal(err) + } + + if l, _ := s.FilterByDriver("fake"); len(l) != 2 { + t.Fatalf("Expected 2 volumes, got %v, %v", len(l), l) + } + + if l, _ := s.FilterByDriver("noop"); len(l) != 1 { + t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) + } +} + +func TestFilterByUsed(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + dir, err := ioutil.TempDir("", "test-filter-used") + if err != nil { + t.Fatal(err) + } + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + + vols, _, err := s.List() + if err != nil { + t.Fatal(err) + } + + dangling := s.FilterByUsed(vols, false) + if len(dangling) != 1 { + t.Fatalf("expected 1 danging volume, got %v", len(dangling)) + } + if dangling[0].Name() != "fake2" { + t.Fatalf("expected danging volume fake2, got %s", dangling[0].Name()) + } + + used := s.FilterByUsed(vols, true) + if len(used) != 1 { + t.Fatalf("expected 1 used volume, got %v", len(used)) + } + if used[0].Name() != "fake1" { + t.Fatalf("expected used volume fake1, got %s", used[0].Name()) + } +} + +func TestDerefMultipleOfSameRef(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + dir, err := ioutil.TempDir("", "test-same-deref") + if err != nil { + t.Fatal(err) + } + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + v, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil) + if err != nil { + t.Fatal(err) + } + + if _, err := s.GetWithRef("fake1", "fake", "volReference"); err != nil { + t.Fatal(err) + } + + s.Dereference(v, "volReference") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/volume/store/store_unix.go b/vendor/github.com/docker/docker/volume/store/store_unix.go new file mode 100644 index 0000000..8ebc1f2 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd solaris + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/docker/docker/volume/store/store_windows.go b/vendor/github.com/docker/docker/volume/store/store_windows.go new file mode 100644 index 0000000..8601cdd --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_windows.go @@ -0,0 +1,12 @@ +package store + +import "strings" + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. On Windows, as NTFS is case insensitive, under +// c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous. +// Hence we can't allow the volume "John" and "john" to be created as separate +// volumes. +func normaliseVolumeName(name string) string { + return strings.ToLower(name) +} diff --git a/vendor/github.com/docker/docker/volume/testutils/testutils.go b/vendor/github.com/docker/docker/volume/testutils/testutils.go new file mode 100644 index 0000000..2dbac02 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/testutils/testutils.go @@ -0,0 +1,116 @@ +package testutils + +import ( + "fmt" + + "github.com/docker/docker/volume" +) + +// NoopVolume is a volume that doesn't perform any operation +type NoopVolume struct{} + +// Name is the name of the volume +func (NoopVolume) Name() string { return "noop" } + +// DriverName is the name of the driver +func (NoopVolume) DriverName() string { return "noop" } + +// Path is the filesystem path to the volume +func (NoopVolume) Path() string { return "noop" } + +// Mount mounts the volume in the container +func (NoopVolume) Mount(_ string) (string, error) { return "noop", nil } + +// Unmount unmounts the volume from the container +func (NoopVolume) Unmount(_ string) error { return nil } + +// Status proivdes low-level details about the volume +func (NoopVolume) Status() map[string]interface{} { return nil } + +// FakeVolume is a fake volume with a random name +type FakeVolume struct { + name string + driverName string +} + +// NewFakeVolume creates a new fake volume for testing +func NewFakeVolume(name string, driverName string) volume.Volume { + return FakeVolume{name: name, driverName: driverName} +} + +// Name is the name of the volume +func (f FakeVolume) Name() string { return f.name } + +// DriverName is the name of the driver +func (f FakeVolume) DriverName() string { return f.driverName } + +// Path is the filesystem path to the volume +func (FakeVolume) Path() string { return "fake" } + +// Mount mounts the volume in the container +func (FakeVolume) Mount(_ string) (string, error) { return "fake", nil } + +// Unmount unmounts the volume from the container +func (FakeVolume) Unmount(_ string) error { return nil } + +// Status proivdes low-level details about the volume +func (FakeVolume) Status() map[string]interface{} { return nil } + +// FakeDriver is a driver that generates fake volumes +type FakeDriver struct { + name string + vols map[string]volume.Volume +} + +// NewFakeDriver creates a new FakeDriver with the specified name +func NewFakeDriver(name string) volume.Driver { + return &FakeDriver{ + name: name, + vols: make(map[string]volume.Volume), + } +} + +// Name is the name of the driver +func (d *FakeDriver) Name() string { return d.name } + +// Create initializes a fake volume. +// It returns an error if the options include an "error" key with a message +func (d *FakeDriver) Create(name string, opts map[string]string) (volume.Volume, error) { + if opts != nil && opts["error"] != "" { + return nil, fmt.Errorf(opts["error"]) + } + v := NewFakeVolume(name, d.name) + d.vols[name] = v + return v, nil +} + +// Remove deletes a volume. +func (d *FakeDriver) Remove(v volume.Volume) error { + if _, exists := d.vols[v.Name()]; !exists { + return fmt.Errorf("no such volume") + } + delete(d.vols, v.Name()) + return nil +} + +// List lists the volumes +func (d *FakeDriver) List() ([]volume.Volume, error) { + var vols []volume.Volume + for _, v := range d.vols { + vols = append(vols, v) + } + return vols, nil +} + +// Get gets the volume +func (d *FakeDriver) Get(name string) (volume.Volume, error) { + if v, exists := d.vols[name]; exists { + return v, nil + } + return nil, fmt.Errorf("no such volume") +} + +// Scope returns the local scope +func (*FakeDriver) Scope() string { + return "local" +} diff --git a/vendor/github.com/docker/docker/volume/validate.go b/vendor/github.com/docker/docker/volume/validate.go new file mode 100644 index 0000000..27a8c5d --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate.go @@ -0,0 +1,125 @@ +package volume + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/mount" +) + +var errBindNotExist = errors.New("bind source path does not exist") + +type validateOpts struct { + skipBindSourceCheck bool + skipAbsolutePathCheck bool +} + +func validateMountConfig(mnt *mount.Mount, options ...func(*validateOpts)) error { + opts := validateOpts{} + for _, o := range options { + o(&opts) + } + + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := validateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if !opts.skipAbsolutePathCheck { + if err := validateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(propagationModes) > 0 { + if _, ok := propagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := validateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + // Do not allow binding to non-existent path + if !opts.skipBindSourceCheck { + fi, err := os.Stat(mnt.Source) + if err != nil { + if !os.IsNotExist(err) { + return &errMountConfig{mnt, err} + } + return &errMountConfig{mnt, errBindNotExist} + } + if err := validateStat(fi); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if valid, err := IsVolumeNameValid(mnt.Source); !valid { + if err == nil { + err = errors.New("invalid volume name") + } + return &errMountConfig{mnt, err} + } + } + case mount.TypeTmpfs: + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errExtraField(name string) error { + return fmt.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return fmt.Errorf("field %s must not be empty", name) +} + +func validateAbsolute(p string) error { + p = convertSlash(p) + if filepath.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} diff --git a/vendor/github.com/docker/docker/volume/validate_test.go b/vendor/github.com/docker/docker/volume/validate_test.go new file mode 100644 index 0000000..8732500 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate_test.go @@ -0,0 +1,43 @@ +package volume + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestValidateMount(t *testing.T) { + testDir, err := ioutil.TempDir("", "test-validate-mount") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []struct { + input mount.Mount + expected error + }{ + {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath, Source: "hello"}, nil}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, nil}, + {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, + {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, + {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, + } + for i, x := range cases { + err := validateMountConfig(&x.input) + if err == nil && x.expected == nil { + continue + } + if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { + t.Fatalf("expected %q, got %q, case: %d", x.expected, err, i) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/validate_test_unix.go b/vendor/github.com/docker/docker/volume/validate_test_unix.go new file mode 100644 index 0000000..dd1de2f --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate_test_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +var ( + testDestinationPath = "/foo" + testSourcePath = "/foo" +) diff --git a/vendor/github.com/docker/docker/volume/validate_test_windows.go b/vendor/github.com/docker/docker/volume/validate_test_windows.go new file mode 100644 index 0000000..d5f86ac --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate_test_windows.go @@ -0,0 +1,6 @@ +package volume + +var ( + testDestinationPath = `c:\foo` + testSourcePath = `c:\foo` +) diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go new file mode 100644 index 0000000..f3227fe --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -0,0 +1,323 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/pkg/errors" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName = "local" + +// Scopes define if a volume has is cluster-wide (global) or local only. +// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume +const ( + LocalScope = "local" + GlobalScope = "global" +) + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) + // Scope returns the scope of the driver (e.g. `global` or `local`). + // Scope determines how the driver is handled at a cluster level + Scope() string +} + +// Capability defines a set of capabilities that a driver is able to handle. +type Capability struct { + // Scope is the scope of the driver, `global` or `local` + // A `global` scope indicates that the driver manages volumes across the cluster + // A `local` scope indicates that the driver only manages volumes resources local to the host + // Scope is declared by the driver + Scope string +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount(id string) (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount(id string) error + // Status returns low-level status information about a volume + Status() map[string]interface{} +} + +// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) +type DetailedVolume interface { + Labels() map[string]string + Options() map[string]string + Scope() string + Volume +} + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +func (m *MountPoint) Setup(mountLabel string, rootUID, rootGID int) (string, error) { + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + m.ID = id + return path, nil + } + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory), + if m.Type == mounttypes.TypeBind { + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllNewAs(m.Source, 0755, rootUID, rootGID); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + if label.RelabelNeeded(m.Mode) { + if err := label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)); err != nil { + return "", errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +// ParseVolumesFrom ensures that the supplied volumes-from is valid. +func ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !ValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if HasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +// ParseMountRaw parses a raw volume spec (e.g. `-v /foo:/bar:shared`) into a +// structured spec. Once the raw spec is parsed it relies on `ParseMountSpec` to +// validate the spec and create a MountPoint +func ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := splitRawSpec(convertSlash(raw)) + if err != nil { + return nil, err + } + + var spec mounttypes.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if ValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. eg /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !ValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if filepath.IsAbs(spec.Source) { + spec.Type = mounttypes.TypeBind + } else { + spec.Type = mounttypes.TypeVolume + } + + spec.ReadOnly = !ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mounttypes.TypeVolume { + spec.VolumeOptions = &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mounttypes.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if HasPropagation(mode) { + spec.BindOptions = &mounttypes.BindOptions{ + Propagation: GetPropagation(mode), + } + } + + mp, err := ParseMountSpec(spec, platformRawValidationOpts...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +// ParseMountSpec reads a mount config, validates it, and configures a mountpoint from it. +func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*MountPoint, error) { + if err := validateMountConfig(&cfg, options...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: clean(convertSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mounttypes.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = DefaultCopyMode + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mounttypes.TypeBind: + mp.Source = clean(convertSlash(cfg.Source)) + if cfg.BindOptions != nil { + if len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } + } + case mounttypes.TypeTmpfs: + // NOP + } + return mp, nil +} + +func errInvalidMode(mode string) error { + return fmt.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return fmt.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/docker/docker/volume/volume_copy.go b/vendor/github.com/docker/docker/volume/volume_copy.go new file mode 100644 index 0000000..77f06a0 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy.go @@ -0,0 +1,23 @@ +package volume + +import "strings" + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return DefaultCopyMode, false +} diff --git a/vendor/github.com/docker/docker/volume/volume_copy_unix.go b/vendor/github.com/docker/docker/volume/volume_copy_unix.go new file mode 100644 index 0000000..ad66e17 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = true +) diff --git a/vendor/github.com/docker/docker/volume/volume_copy_windows.go b/vendor/github.com/docker/docker/volume/volume_copy_windows.go new file mode 100644 index 0000000..798638c --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy_windows.go @@ -0,0 +1,6 @@ +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = false +) diff --git a/vendor/github.com/docker/docker/volume/volume_linux.go b/vendor/github.com/docker/docker/volume/volume_linux.go new file mode 100644 index 0000000..d4b4d80 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_linux.go @@ -0,0 +1,56 @@ +// +build linux + +package volume + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returing the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} diff --git a/vendor/github.com/docker/docker/volume/volume_linux_test.go b/vendor/github.com/docker/docker/volume/volume_linux_test.go new file mode 100644 index 0000000..40ce552 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_linux_test.go @@ -0,0 +1,51 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +func TestConvertTmpfsOptions(t *testing.T) { + type testCase struct { + opt mounttypes.TmpfsOptions + readOnly bool + expectedSubstrings []string + unexpectedSubstrings []string + } + cases := []testCase{ + { + opt: mounttypes.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, + readOnly: false, + expectedSubstrings: []string{"size=1m", "mode=700"}, + unexpectedSubstrings: []string{"ro"}, + }, + { + opt: mounttypes.TmpfsOptions{}, + readOnly: true, + expectedSubstrings: []string{"ro"}, + unexpectedSubstrings: []string{}, + }, + } + for _, c := range cases { + data, err := ConvertTmpfsOptions(&c.opt, c.readOnly) + if err != nil { + t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", + c.opt, c.readOnly, err) + } + t.Logf("data=%q", data) + for _, s := range c.expectedSubstrings { + if !strings.Contains(data, s) { + t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) + } + } + for _, s := range c.unexpectedSubstrings { + if strings.Contains(data, s) { + t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) + } + } + } +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go new file mode 100644 index 0000000..1de57ab --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go @@ -0,0 +1,47 @@ +// +build linux + +package volume + +import ( + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// DefaultPropagationMode defines what propagation mode should be used by +// default if user has not specified one explicitly. +// propagation modes +const DefaultPropagationMode = mounttypes.PropagationRPrivate + +var propagationModes = map[mounttypes.Propagation]bool{ + mounttypes.PropagationPrivate: true, + mounttypes.PropagationRPrivate: true, + mounttypes.PropagationSlave: true, + mounttypes.PropagationRSlave: true, + mounttypes.PropagationShared: true, + mounttypes.PropagationRShared: true, +} + +// GetPropagation extracts and returns the mount propagation mode. If there +// are no specifications, then by default it is "private". +func GetPropagation(mode string) mounttypes.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mounttypes.Propagation(o) + if propagationModes[prop] { + return prop + } + } + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if propagationModes[mounttypes.Propagation(o)] { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go new file mode 100644 index 0000000..46d0265 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go @@ -0,0 +1,65 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" +) + +func TestParseMountRawPropagation(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + valid = []string{ + "/hostPath:/containerPath:shared", + "/hostPath:/containerPath:rshared", + "/hostPath:/containerPath:slave", + "/hostPath:/containerPath:rslave", + "/hostPath:/containerPath:private", + "/hostPath:/containerPath:rprivate", + "/hostPath:/containerPath:ro,shared", + "/hostPath:/containerPath:ro,slave", + "/hostPath:/containerPath:ro,private", + "/hostPath:/containerPath:ro,z,shared", + "/hostPath:/containerPath:ro,Z,slave", + "/hostPath:/containerPath:Z,ro,slave", + "/hostPath:/containerPath:slave,Z,ro", + "/hostPath:/containerPath:Z,slave,ro", + "/hostPath:/containerPath:slave,ro,Z", + "/hostPath:/containerPath:rslave,ro,Z", + "/hostPath:/containerPath:ro,rshared,Z", + "/hostPath:/containerPath:ro,Z,rprivate", + } + invalid = map[string]string{ + "/path:/path:ro,rshared,rslave": `invalid mode`, + "/path:/path:ro,z,rshared,rslave": `invalid mode`, + "/path:shared": "invalid volume specification", + "/path:slave": "invalid volume specification", + "/path:private": "invalid volume specification", + "name:/absolute-path:shared": "invalid volume specification", + "name:/absolute-path:rshared": "invalid volume specification", + "name:/absolute-path:slave": "invalid volume specification", + "name:/absolute-path:rslave": "invalid volume specification", + "name:/absolute-path:private": "invalid volume specification", + "name:/absolute-path:rprivate": "invalid volume specification", + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err %v", path, err) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go new file mode 100644 index 0000000..7311ffc --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux + +package volume + +import mounttypes "github.com/docker/docker/api/types/mount" + +// DefaultPropagationMode is used only in linux. In other cases it returns +// empty string. +const DefaultPropagationMode mounttypes.Propagation = "" + +// propagation modes not supported on this platform. +var propagationModes = map[mounttypes.Propagation]bool{} + +// GetPropagation is not supported. Return empty string. +func GetPropagation(mode string) mounttypes.Propagation { + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_test.go b/vendor/github.com/docker/docker/volume/volume_test.go new file mode 100644 index 0000000..54df380 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_test.go @@ -0,0 +1,269 @@ +package volume + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestParseMountRaw(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + if runtime.GOOS == "windows" { + valid = []string{ + `d:\`, + `d:`, + `d:\path`, + `d:\path with space`, + // TODO Windows post TP5 - readonly support `d:\pathandmode:ro`, + `c:\:d:\`, + `c:\windows\:d:`, + `c:\windows:d:\s p a c e`, + `c:\windows:d:\s p a c e:RW`, + `c:\program files:d:\s p a c e i n h o s t d i r`, + `0123456789name:d:`, + `MiXeDcAsEnAmE:d:`, + `name:D:`, + `name:D::rW`, + `name:D::RW`, + // TODO Windows post TP5 - readonly support `name:D::RO`, + `c:/:d:/forward/slashes/are/good/too`, + // TODO Windows post TP5 - readonly support `c:/:d:/including with/spaces:ro`, + `c:\Windows`, // With capital + `c:\Program Files (x86)`, // With capitals and brackets + } + invalid = map[string]string{ + ``: "invalid volume specification: ", + `.`: "invalid volume specification: ", + `..\`: "invalid volume specification: ", + `c:\:..\`: "invalid volume specification: ", + `c:\:d:\:xyzzy`: "invalid volume specification: ", + `c:`: "cannot be `c:`", + `c:\`: "cannot be `c:`", + `c:\notexist:d:`: `source path does not exist`, + `c:\windows\system32\ntdll.dll:d:`: `source path must be a directory`, + `name<:d:`: `invalid volume specification`, + `name>:d:`: `invalid volume specification`, + `name::d:`: `invalid volume specification`, + `name":d:`: `invalid volume specification`, + `name\:d:`: `invalid volume specification`, + `name*:d:`: `invalid volume specification`, + `name|:d:`: `invalid volume specification`, + `name?:d:`: `invalid volume specification`, + `name/:d:`: `invalid volume specification`, + `d:\pathandmode:rw`: `invalid volume specification`, + `con:d:`: `cannot be a reserved word for Windows filenames`, + `PRN:d:`: `cannot be a reserved word for Windows filenames`, + `aUx:d:`: `cannot be a reserved word for Windows filenames`, + `nul:d:`: `cannot be a reserved word for Windows filenames`, + `com1:d:`: `cannot be a reserved word for Windows filenames`, + `com2:d:`: `cannot be a reserved word for Windows filenames`, + `com3:d:`: `cannot be a reserved word for Windows filenames`, + `com4:d:`: `cannot be a reserved word for Windows filenames`, + `com5:d:`: `cannot be a reserved word for Windows filenames`, + `com6:d:`: `cannot be a reserved word for Windows filenames`, + `com7:d:`: `cannot be a reserved word for Windows filenames`, + `com8:d:`: `cannot be a reserved word for Windows filenames`, + `com9:d:`: `cannot be a reserved word for Windows filenames`, + `lpt1:d:`: `cannot be a reserved word for Windows filenames`, + `lpt2:d:`: `cannot be a reserved word for Windows filenames`, + `lpt3:d:`: `cannot be a reserved word for Windows filenames`, + `lpt4:d:`: `cannot be a reserved word for Windows filenames`, + `lpt5:d:`: `cannot be a reserved word for Windows filenames`, + `lpt6:d:`: `cannot be a reserved word for Windows filenames`, + `lpt7:d:`: `cannot be a reserved word for Windows filenames`, + `lpt8:d:`: `cannot be a reserved word for Windows filenames`, + `lpt9:d:`: `cannot be a reserved word for Windows filenames`, + `c:\windows\system32\ntdll.dll`: `Only directories can be mapped on this platform`, + } + + } else { + valid = []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + } + invalid = map[string]string{ + "": "invalid volume specification", + "./": "mount path must be absolute", + "../": "mount path must be absolute", + "/:../": "mount path must be absolute", + "/:path": "mount path must be absolute", + ":": "invalid volume specification", + "/tmp:": "invalid volume specification", + ":test": "invalid volume specification", + ":/test": "invalid volume specification", + "tmp:": "invalid volume specification", + ":test:": "invalid volume specification", + "::": "invalid volume specification", + ":::": "invalid volume specification", + "/tmp:::": "invalid volume specification", + ":/tmp::": "invalid volume specification", + "/path:rw": "invalid volume specification", + "/path:ro": "invalid volume specification", + "/rw:rw": "invalid volume specification", + "path:ro": "invalid volume specification", + "/path:/path:sw": `invalid mode`, + "/path:/path:rwz": `invalid mode`, + } + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if mp, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} + +// testParseMountRaw is a structure used by TestParseMountRawSplit for +// specifying test cases for the ParseMountRaw() function. +type testParseMountRaw struct { + bind string + driver string + expDest string + expSource string + expName string + expDriver string + expRW bool + fail bool +} + +func TestParseMountRawSplit(t *testing.T) { + var cases []testParseMountRaw + if runtime.GOOS == "windows" { + cases = []testParseMountRaw{ + {`c:\:d:`, "local", `d:`, `c:\`, ``, "", true, false}, + {`c:\:d:\`, "local", `d:\`, `c:\`, ``, "", true, false}, + // TODO Windows post TP5 - Add readonly support {`c:\:d:\:ro`, "local", `d:\`, `c:\`, ``, "", false, false}, + {`c:\:d:\:rw`, "local", `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:foo`, "local", `d:\`, `c:\`, ``, "", false, true}, + {`name:d::rw`, "local", `d:`, ``, `name`, "local", true, false}, + {`name:d:`, "local", `d:`, ``, `name`, "local", true, false}, + // TODO Windows post TP5 - Add readonly support {`name:d::ro`, "local", `d:`, ``, `name`, "local", false, false}, + {`name:c:`, "", ``, ``, ``, "", true, true}, + {`driver/name:c:`, "", ``, ``, ``, "", true, true}, + } + } else { + cases = []testParseMountRaw{ + {"/tmp:/tmp1", "", "/tmp1", "/tmp", "", "", true, false}, + {"/tmp:/tmp2:ro", "", "/tmp2", "/tmp", "", "", false, false}, + {"/tmp:/tmp3:rw", "", "/tmp3", "/tmp", "", "", true, false}, + {"/tmp:/tmp4:foo", "", "", "", "", "", false, true}, + {"name:/named1", "", "/named1", "", "name", "", true, false}, + {"name:/named2", "external", "/named2", "", "name", "external", true, false}, + {"name:/named3:ro", "local", "/named3", "", "name", "local", false, false}, + {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "", true, false}, + {"/tmp:tmp", "", "", "", "", "", true, true}, + } + } + + for i, c := range cases { + t.Logf("case %d", i) + m, err := ParseMountRaw(c.bind, c.driver) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) + } + continue + } + + if m == nil || err != nil { + t.Fatalf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) + continue + } + + if m.Destination != c.expDest { + t.Fatalf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind) + } + + if m.Source != c.expSource { + t.Fatalf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) + } + + if m.Name != c.expName { + t.Fatalf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) + } + + if m.Driver != c.expDriver { + t.Fatalf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) + } + + if m.RW != c.expRW { + t.Fatalf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) + } + } +} + +func TestParseMountSpec(t *testing.T) { + type c struct { + input mount.Mount + expected MountPoint + } + testDir, err := ioutil.TempDir("", "test-mount-config") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []c{ + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + } + + for i, c := range cases { + t.Logf("case %d", i) + mp, err := ParseMountSpec(c.input) + if err != nil { + t.Fatal(err) + } + + if c.expected.Type != mp.Type { + t.Fatalf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) + } + if c.expected.Destination != mp.Destination { + t.Fatalf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) + } + if c.expected.Source != mp.Source { + t.Fatalf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) + } + if c.expected.RW != mp.RW { + t.Fatalf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) + } + if c.expected.Propagation != mp.Propagation { + t.Fatalf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) + } + if c.expected.Driver != mp.Driver { + t.Fatalf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) + } + if c.expected.CopyData != mp.CopyData { + t.Fatalf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go new file mode 100644 index 0000000..0256ebb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_unix.go @@ -0,0 +1,138 @@ +// +build linux freebsd darwin solaris + +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +var platformRawValidationOpts = []func(o *validateOpts){ + // need to make sure to not error out if the bind source does not exist on unix + // this is supported for historical reasons, the path will be automatically + // created later. + func(o *validateOpts) { o.skipBindSourceCheck = true }, +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var labelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *MountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +// HasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *MountPoint) HasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case labelModes[o]: + labelModeCount++ + case propagationModes[mounttypes.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 { + return false + } + return true +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +// If there are no specifications w.r.t read write mode, then by default +// it returns true. +func ReadWrite(mode string) bool { + if !ValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func validateNotRoot(p string) error { + p = filepath.Clean(convertSlash(p)) + if p == "/" { + return fmt.Errorf("invalid specification: destination can't be '/'") + } + return nil +} + +func validateCopyMode(mode bool) error { + return nil +} + +func convertSlash(p string) string { + return filepath.ToSlash(p) +} + +func splitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func clean(p string) string { + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/volume/volume_unsupported.go b/vendor/github.com/docker/docker/volume/volume_unsupported.go new file mode 100644 index 0000000..ff9d6af --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package volume + +import ( + "fmt" + "runtime" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} diff --git a/vendor/github.com/docker/docker/volume/volume_windows.go b/vendor/github.com/docker/docker/volume/volume_windows.go new file mode 100644 index 0000000..22f6fc7 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_windows.go @@ -0,0 +1,201 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, +} + +var platformRawValidationOpts = []func(*validateOpts){ + // filepath.IsAbs is weird on Windows: + // `c:` is not considered an absolute path + // `c:\` is considered an absolute path + // In any case, the regex matching below ensures absolute paths + // TODO: consider this a bug with filepath.IsAbs (?) + func(o *validateOpts) { o.skipAbsolutePathCheck = true }, +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // RXHostDir is the first option of a source + RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` + // RXName is the second option of a source + RXName = `[^\\/:*?"<>|\r\n]+` + // RXReservedNames are reserved names not possible on Windows + RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // RXSource is the combined possibilities for a source + RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // RXDestination is the regex expression for the mount destination + RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // RXMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + RXMode = `(:(?P(?i)ro|rw))?` +) + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Windows volumes are never backwards compatible. +func (m *MountPoint) BackwardsCompatible() bool { + return false +} + +func splitRawSpec(raw string) ([]string, error) { + specExp := regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + validName, err := IsVolumeNameValid(matchgroups["destination"]) + if err != nil { + return nil, err + } + if !validName { + if fi, err := os.Stat(matchgroups["destination"]); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + } + } + } + } + return split, nil +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + nameExp := regexp.MustCompile(`^` + RXName + `$`) + if !nameExp.MatchString(name) { + return false, nil + } + nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) + if nameExp.MatchString(name) { + return false, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +func ReadWrite(mode string) bool { + return rwModes[strings.ToLower(mode)] || mode == "" +} + +func validateNotRoot(p string) error { + p = strings.ToLower(convertSlash(p)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +func validateCopyMode(mode bool) error { + if mode { + return fmt.Errorf("Windows does not support copying image path content") + } + return nil +} + +func convertSlash(p string) string { + return filepath.FromSlash(p) +} + +func clean(p string) string { + if match, _ := regexp.MatchString("^[a-z]:$", p); match { + return p + } + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/CONTRIBUTING.md b/vendor/github.com/docker/go-connections/CONTRIBUTING.md new file mode 100644 index 0000000..926dcc9 --- /dev/null +++ b/vendor/github.com/docker/go-connections/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing to Docker + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 0000000..b55b37b --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/MAINTAINERS b/vendor/github.com/docker/go-connections/MAINTAINERS new file mode 100644 index 0000000..477be8b --- /dev/null +++ b/vendor/github.com/docker/go-connections/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md new file mode 100644 index 0000000..d257e44 --- /dev/null +++ b/vendor/github.com/docker/go-connections/README.md @@ -0,0 +1,13 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections) + +# Introduction + +go-connections provides common package to work with network connections. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation. + +## License + +go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-connections/circle.yml b/vendor/github.com/docker/go-connections/circle.yml new file mode 100644 index 0000000..8a82ee8 --- /dev/null +++ b/vendor/github.com/docker/go-connections/circle.yml @@ -0,0 +1,14 @@ +dependencies: + pre: + # setup ipv6 + - sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 net.ipv6.conf.default.disable_ipv6=0 net.ipv6.conf.all.disable_ipv6=0 + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-connections/doc.go b/vendor/github.com/docker/go-connections/doc.go new file mode 100644 index 0000000..43e2724 --- /dev/null +++ b/vendor/github.com/docker/go-connections/doc.go @@ -0,0 +1,3 @@ +// Package connections provides libraries to work with network connections. +// This library is divided in several components for specific usage. +package connections diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 0000000..4d5f5ae --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/nat_test.go b/vendor/github.com/docker/go-connections/nat/nat_test.go new file mode 100644 index 0000000..787d5ac --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat_test.go @@ -0,0 +1,583 @@ +package nat + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestParsePortRangeToInt(t *testing.T) { + var ( + begin int + end int + err error + ) + + type TestRange struct { + Range string + Begin int + End int + } + validRanges := []TestRange{ + {"1234", 1234, 1234}, + {"1234-1234", 1234, 1234}, + {"1234-1235", 1234, 1235}, + {"8000-9000", 8000, 9000}, + {"0", 0, 0}, + {"0-0", 0, 0}, + } + + for _, r := range validRanges { + begin, end, err = ParsePortRangeToInt(r.Range) + + if err != nil || begin != r.Begin { + t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin) + } + if err != nil || end != r.End { + t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end) + } + } + + invalidRanges := []string{ + "asdf", + "1asdf", + "9000-8000", + "9000-", + "-8000", + "-8000-", + } + + for _, r := range invalidRanges { + begin, end, err = ParsePortRangeToInt(r) + + if err == nil || begin != 0 || end != 0 { + t.Fatalf("Parsing port range '%s' succeeded", r) + } + } +} + +func TestPort(t *testing.T) { + p, err := NewPort("tcp", "1234") + + if err != nil { + t.Fatalf("tcp, 1234 had a parsing issue: %v", err) + } + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } + + p, err = NewPort("tcp", "asd1234") + if err == nil { + t.Fatal("tcp, asd1234 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1230") + if err == nil { + t.Fatal("tcp, 1234-1230 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1242") + if err != nil { + t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err) + } + + if string(p) != "1234-1242/tcp" { + t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results", proto, port) + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecFull(t *testing.T) { + portMappings, err := ParsePortSpec("0.0.0.0:1234-1235:3333-3334/tcp") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "3333/tcp", + Binding: PortBinding{ + HostIP: "0.0.0.0", + HostPort: "1234", + }, + }, + { + Port: "3334/tcp", + Binding: PortBinding{ + HostIP: "0.0.0.0", + HostPort: "1235", + }, + }, + } + + assert.Equal(t, expected, portMappings) +} + +func TestPartPortSpecIPV6(t *testing.T) { + portMappings, err := ParsePortSpec("[2001:4860:0:2001::68]::333") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "333/tcp", + Binding: PortBinding{ + HostIP: "2001:4860:0:2001::68", + HostPort: "", + }, + }, + } + assert.Equal(t, expected, portMappings) +} + +func TestPartPortSpecIPV6WithHostPort(t *testing.T) { + portMappings, err := ParsePortSpec("[::1]:80:80") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "80/tcp", + Binding: PortBinding{ + HostIP: "::1", + HostPort: "80", + }, + }, + } + assert.Equal(t, expected, portMappings) +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "0.0.0.0" { + t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParsePortSpecsWithRange(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { + t.Fatalf("Expect single binding to port %s but found %s", port, bindings) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + if len(ports) != 0 { + t.Logf("Expected nil got %d", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %d", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 0000000..892adf8 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse_test.go b/vendor/github.com/docker/go-connections/nat/parse_test.go new file mode 100644 index 0000000..2ac204a --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse_test.go @@ -0,0 +1,54 @@ +package nat + +import ( + "strings" + "testing" +) + +func TestParsePortRange(t *testing.T) { + if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { + t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) + } +} + +func TestParsePortRangeEmpty(t *testing.T) { + if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { + t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) + } +} + +func TestParsePortRangeWithNoRange(t *testing.T) { + start, end, err := ParsePortRange("8080") + if err != nil { + t.Fatal(err) + } + if start != 8080 || end != 8080 { + t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) + } +} + +func TestParsePortRangeIncorrectRange(t *testing.T) { + if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectEndRange(t *testing.T) { + if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectStartRange(t *testing.T) { + if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 0000000..ce95017 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/nat/sort_test.go b/vendor/github.com/docker/go-connections/nat/sort_test.go new file mode 100644 index 0000000..88ed911 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort_test.go @@ -0,0 +1,85 @@ +package nat + +import ( + "fmt" + "reflect" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} + +func TestSortPortMap(t *testing.T) { + ports := []Port{ + Port("22/tcp"), + Port("22/udp"), + Port("8000/tcp"), + Port("6379/tcp"), + Port("9999/tcp"), + } + + portMap := PortMap{ + Port("22/tcp"): []PortBinding{ + {}, + }, + Port("8000/tcp"): []PortBinding{ + {}, + }, + Port("6379/tcp"): []PortBinding{ + {}, + {HostIP: "0.0.0.0", HostPort: "32749"}, + }, + Port("9999/tcp"): []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "40000"}, + }, + } + + SortPortMap(ports, portMap) + if !reflect.DeepEqual(ports, []Port{ + Port("9999/tcp"), + Port("6379/tcp"), + Port("8000/tcp"), + Port("22/tcp"), + Port("22/udp"), + }) { + t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) + } + if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "32749"}, + {}, + }) { + t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) + } +} diff --git a/vendor/github.com/docker/go-connections/proxy/logger.go b/vendor/github.com/docker/go-connections/proxy/logger.go new file mode 100644 index 0000000..cd8b5fd --- /dev/null +++ b/vendor/github.com/docker/go-connections/proxy/logger.go @@ -0,0 +1,11 @@ +package proxy + +type logger interface { + Printf(format string, args ...interface{}) +} + +type noopLogger struct{} + +func (l *noopLogger) Printf(_ string, _ ...interface{}) { + // Do nothing :) +} diff --git a/vendor/github.com/docker/go-connections/proxy/network_proxy_test.go b/vendor/github.com/docker/go-connections/proxy/network_proxy_test.go new file mode 100644 index 0000000..9a73548 --- /dev/null +++ b/vendor/github.com/docker/go-connections/proxy/network_proxy_test.go @@ -0,0 +1,216 @@ +package proxy + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "testing" + "time" +) + +var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo") +var testBufSize = len(testBuf) + +type EchoServer interface { + Run() + Close() + LocalAddr() net.Addr +} + +type TCPEchoServer struct { + listener net.Listener + testCtx *testing.T +} + +type UDPEchoServer struct { + conn net.PacketConn + testCtx *testing.T +} + +func NewEchoServer(t *testing.T, proto, address string) EchoServer { + var server EchoServer + if strings.HasPrefix(proto, "tcp") { + listener, err := net.Listen(proto, address) + if err != nil { + t.Fatal(err) + } + server = &TCPEchoServer{listener: listener, testCtx: t} + } else { + socket, err := net.ListenPacket(proto, address) + if err != nil { + t.Fatal(err) + } + server = &UDPEchoServer{conn: socket, testCtx: t} + } + return server +} + +func (server *TCPEchoServer) Run() { + go func() { + for { + client, err := server.listener.Accept() + if err != nil { + return + } + go func(client net.Conn) { + if _, err := io.Copy(client, client); err != nil { + server.testCtx.Logf("can't echo to the client: %v\n", err.Error()) + } + client.Close() + }(client) + } + }() +} + +func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() } +func (server *TCPEchoServer) Close() { server.listener.Close() } + +func (server *UDPEchoServer) Run() { + go func() { + readBuf := make([]byte, 1024) + for { + read, from, err := server.conn.ReadFrom(readBuf) + if err != nil { + return + } + for i := 0; i != read; { + written, err := server.conn.WriteTo(readBuf[i:read], from) + if err != nil { + break + } + i += written + } + } + }() +} + +func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() } +func (server *UDPEchoServer) Close() { server.conn.Close() } + +func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) { + defer proxy.Close() + go proxy.Run() + client, err := net.Dial(proto, addr) + if err != nil { + t.Fatalf("Can't connect to the proxy: %v", err) + } + defer client.Close() + client.SetDeadline(time.Now().Add(10 * time.Second)) + if _, err = client.Write(testBuf); err != nil { + t.Fatal(err) + } + recvBuf := make([]byte, testBufSize) + if _, err = client.Read(recvBuf); err != nil { + t.Fatal(err) + } + if !bytes.Equal(testBuf, recvBuf) { + t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) + } +} + +func testProxy(t *testing.T, proto string, proxy Proxy) { + testProxyAt(t, proto, proxy, proxy.FrontendAddr().String()) +} + +func TestTCP4Proxy(t *testing.T) { + backend := NewEchoServer(t, "tcp", "127.0.0.1:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "tcp", proxy) +} + +func TestTCP6Proxy(t *testing.T) { + backend := NewEchoServer(t, "tcp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "tcp", proxy) +} + +func TestTCPDualStackProxy(t *testing.T) { + // If I understand `godoc -src net favoriteAddrFamily` (used by the + // net.Listen* functions) correctly this should work, but it doesn't. + t.Skip("No support for dual stack yet") + backend := NewEchoServer(t, "tcp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + ipv4ProxyAddr := &net.TCPAddr{ + IP: net.IPv4(127, 0, 0, 1), + Port: proxy.FrontendAddr().(*net.TCPAddr).Port, + } + testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String()) +} + +func TestUDP4Proxy(t *testing.T) { + backend := NewEchoServer(t, "udp", "127.0.0.1:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "udp", proxy) +} + +func TestUDP6Proxy(t *testing.T) { + backend := NewEchoServer(t, "udp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "udp", proxy) +} + +func TestUDPWriteError(t *testing.T) { + frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + // Hopefully, this port will be free: */ + backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587} + proxy, err := NewProxy(frontendAddr, backendAddr) + if err != nil { + t.Fatal(err) + } + defer proxy.Close() + go proxy.Run() + client, err := net.Dial("udp", "127.0.0.1:25587") + if err != nil { + t.Fatalf("Can't connect to the proxy: %v", err) + } + defer client.Close() + // Make sure the proxy doesn't stop when there is no actual backend: + client.Write(testBuf) + client.Write(testBuf) + backend := NewEchoServer(t, "udp", "127.0.0.1:25587") + defer backend.Close() + backend.Run() + client.SetDeadline(time.Now().Add(10 * time.Second)) + if _, err = client.Write(testBuf); err != nil { + t.Fatal(err) + } + recvBuf := make([]byte, testBufSize) + if _, err = client.Read(recvBuf); err != nil { + t.Fatal(err) + } + if !bytes.Equal(testBuf, recvBuf) { + t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) + } +} diff --git a/vendor/github.com/docker/go-connections/proxy/proxy.go b/vendor/github.com/docker/go-connections/proxy/proxy.go new file mode 100644 index 0000000..537aebb --- /dev/null +++ b/vendor/github.com/docker/go-connections/proxy/proxy.go @@ -0,0 +1,36 @@ +// Package proxy provides a network Proxy interface and implementations for TCP and UDP. +package proxy + +import ( + "fmt" + "net" +) + +// Proxy defines the behavior of a proxy. It forwards traffic back and forth +// between two endpoints : the frontend and the backend. +// It can be used to do software port-mapping between two addresses. +// e.g. forward all traffic between the frontend (host) 127.0.0.1:3000 +// to the backend (container) at 172.17.42.108:4000. +type Proxy interface { + // Run starts forwarding traffic back and forth between the front + // and back-end addresses. + Run() + // Close stops forwarding traffic and close both ends of the Proxy. + Close() + // FrontendAddr returns the address on which the proxy is listening. + FrontendAddr() net.Addr + // BackendAddr returns the proxied address. + BackendAddr() net.Addr +} + +// NewProxy creates a Proxy according to the specified frontendAddr and backendAddr. +func NewProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + switch frontendAddr.(type) { + case *net.UDPAddr: + return NewUDPProxy(frontendAddr.(*net.UDPAddr), backendAddr.(*net.UDPAddr)) + case *net.TCPAddr: + return NewTCPProxy(frontendAddr.(*net.TCPAddr), backendAddr.(*net.TCPAddr)) + default: + panic(fmt.Errorf("Unsupported protocol")) + } +} diff --git a/vendor/github.com/docker/go-connections/proxy/stub_proxy.go b/vendor/github.com/docker/go-connections/proxy/stub_proxy.go new file mode 100644 index 0000000..571749e --- /dev/null +++ b/vendor/github.com/docker/go-connections/proxy/stub_proxy.go @@ -0,0 +1,31 @@ +package proxy + +import ( + "net" +) + +// StubProxy is a proxy that is a stub (does nothing). +type StubProxy struct { + frontendAddr net.Addr + backendAddr net.Addr +} + +// Run does nothing. +func (p *StubProxy) Run() {} + +// Close does nothing. +func (p *StubProxy) Close() {} + +// FrontendAddr returns the frontend address. +func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } + +// BackendAddr returns the backend address. +func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } + +// NewStubProxy creates a new StubProxy +func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + return &StubProxy{ + frontendAddr: frontendAddr, + backendAddr: backendAddr, + }, nil +} diff --git a/vendor/github.com/docker/go-connections/proxy/tcp_proxy.go b/vendor/github.com/docker/go-connections/proxy/tcp_proxy.go new file mode 100644 index 0000000..8d97196 --- /dev/null +++ b/vendor/github.com/docker/go-connections/proxy/tcp_proxy.go @@ -0,0 +1,105 @@ +package proxy + +import ( + "io" + "net" + "syscall" +) + +// TCPProxy is a proxy for TCP connections. It implements the Proxy interface to +// handle TCP traffic forwarding between the frontend and backend addresses. +type TCPProxy struct { + Logger logger + listener *net.TCPListener + frontendAddr *net.TCPAddr + backendAddr *net.TCPAddr +} + +// NewTCPProxy creates a new TCPProxy. +func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr, ops ...func(*TCPProxy)) (*TCPProxy, error) { + listener, err := net.ListenTCP("tcp", frontendAddr) + if err != nil { + return nil, err + } + // If the port in frontendAddr was 0 then ListenTCP will have a picked + // a port to listen on, hence the call to Addr to get that actual port: + proxy := &TCPProxy{ + listener: listener, + frontendAddr: listener.Addr().(*net.TCPAddr), + backendAddr: backendAddr, + Logger: &noopLogger{}, + } + + for _, op := range ops { + op(proxy) + } + + return proxy, nil +} + +func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { + backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) + if err != nil { + proxy.Logger.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) + client.Close() + return + } + + event := make(chan int64) + var broker = func(to, from *net.TCPConn) { + written, err := io.Copy(to, from) + if err != nil { + // If the socket we are writing to is shutdown with + // SHUT_WR, forward it to the other end of the pipe: + if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE { + from.CloseWrite() + } + } + to.CloseRead() + event <- written + } + + go broker(client, backend) + go broker(backend, client) + + var transferred int64 + for i := 0; i < 2; i++ { + select { + case written := <-event: + transferred += written + case <-quit: + // Interrupt the two brokers and "join" them. + client.Close() + backend.Close() + for ; i < 2; i++ { + transferred += <-event + } + return + } + } + client.Close() + backend.Close() +} + +// Run starts forwarding the traffic using TCP. +func (proxy *TCPProxy) Run() { + quit := make(chan bool) + defer close(quit) + for { + client, err := proxy.listener.Accept() + if err != nil { + proxy.Logger.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) + return + } + go proxy.clientLoop(client.(*net.TCPConn), quit) + } +} + +// Close stops forwarding the traffic. +func (proxy *TCPProxy) Close() { proxy.listener.Close() } + +// FrontendAddr returns the TCP address on which the proxy is listening. +func (proxy *TCPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } + +// BackendAddr returns the TCP proxied address. +func (proxy *TCPProxy) BackendAddr() net.Addr { return proxy.backendAddr } diff --git a/vendor/github.com/docker/go-connections/proxy/udp_proxy.go b/vendor/github.com/docker/go-connections/proxy/udp_proxy.go new file mode 100644 index 0000000..71ddf12 --- /dev/null +++ b/vendor/github.com/docker/go-connections/proxy/udp_proxy.go @@ -0,0 +1,176 @@ +package proxy + +import ( + "encoding/binary" + "net" + "strings" + "sync" + "syscall" + "time" +) + +const ( + // UDPConnTrackTimeout is the timeout used for UDP connection tracking + UDPConnTrackTimeout = 90 * time.Second + // UDPBufSize is the buffer size for the UDP proxy + UDPBufSize = 65507 +) + +// A net.Addr where the IP is split into two fields so you can use it as a key +// in a map: +type connTrackKey struct { + IPHigh uint64 + IPLow uint64 + Port int +} + +func newConnTrackKey(addr *net.UDPAddr) *connTrackKey { + if len(addr.IP) == net.IPv4len { + return &connTrackKey{ + IPHigh: 0, + IPLow: uint64(binary.BigEndian.Uint32(addr.IP)), + Port: addr.Port, + } + } + return &connTrackKey{ + IPHigh: binary.BigEndian.Uint64(addr.IP[:8]), + IPLow: binary.BigEndian.Uint64(addr.IP[8:]), + Port: addr.Port, + } +} + +type connTrackMap map[connTrackKey]*net.UDPConn + +// UDPProxy is proxy for which handles UDP datagrams. It implements the Proxy +// interface to handle UDP traffic forwarding between the frontend and backend +// addresses. +type UDPProxy struct { + Logger logger + listener *net.UDPConn + frontendAddr *net.UDPAddr + backendAddr *net.UDPAddr + connTrackTable connTrackMap + connTrackLock sync.Mutex +} + +// NewUDPProxy creates a new UDPProxy. +func NewUDPProxy(frontendAddr, backendAddr *net.UDPAddr, ops ...func(*UDPProxy)) (*UDPProxy, error) { + listener, err := net.ListenUDP("udp", frontendAddr) + if err != nil { + return nil, err + } + + proxy := &UDPProxy{ + listener: listener, + frontendAddr: listener.LocalAddr().(*net.UDPAddr), + backendAddr: backendAddr, + connTrackTable: make(connTrackMap), + Logger: &noopLogger{}, + } + + for _, op := range ops { + op(proxy) + } + + return proxy, nil +} + +func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { + defer func() { + proxy.connTrackLock.Lock() + delete(proxy.connTrackTable, *clientKey) + proxy.connTrackLock.Unlock() + proxyConn.Close() + }() + + readBuf := make([]byte, UDPBufSize) + for { + proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) + again: + read, err := proxyConn.Read(readBuf) + if err != nil { + if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { + // This will happen if the last write failed + // (e.g: nothing is actually listening on the + // proxied port on the container), ignore it + // and continue until UDPConnTrackTimeout + // expires: + goto again + } + return + } + for i := 0; i != read; { + written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr) + if err != nil { + return + } + i += written + } + } +} + +// Run starts forwarding the traffic using UDP. +func (proxy *UDPProxy) Run() { + readBuf := make([]byte, UDPBufSize) + for { + read, from, err := proxy.listener.ReadFromUDP(readBuf) + if err != nil { + // NOTE: Apparently ReadFrom doesn't return + // ECONNREFUSED like Read do (see comment in + // UDPProxy.replyLoop) + if !isClosedError(err) { + proxy.Logger.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) + } + break + } + + fromKey := newConnTrackKey(from) + proxy.connTrackLock.Lock() + proxyConn, hit := proxy.connTrackTable[*fromKey] + if !hit { + proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) + if err != nil { + proxy.Logger.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) + proxy.connTrackLock.Unlock() + continue + } + proxy.connTrackTable[*fromKey] = proxyConn + go proxy.replyLoop(proxyConn, from, fromKey) + } + proxy.connTrackLock.Unlock() + for i := 0; i != read; { + written, err := proxyConn.Write(readBuf[i:read]) + if err != nil { + proxy.Logger.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) + break + } + i += written + } + } +} + +// Close stops forwarding the traffic. +func (proxy *UDPProxy) Close() { + proxy.listener.Close() + proxy.connTrackLock.Lock() + defer proxy.connTrackLock.Unlock() + for _, conn := range proxy.connTrackTable { + conn.Close() + } +} + +// FrontendAddr returns the UDP address on which the proxy is listening. +func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } + +// BackendAddr returns the proxied UDP address. +func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } + +func isClosedError(err error) bool { + /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. + * See: + * http://golang.org/src/pkg/net/net.go + * https://code.google.com/p/go/issues/detail?id=4337 + * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ + */ + return strings.HasSuffix(err.Error(), "use of closed network connection") +} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 0000000..99846ff --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go new file mode 100644 index 0000000..24dc1d1 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go @@ -0,0 +1,39 @@ +package sockets + +import "testing" + +func TestInmemSocket(t *testing.T) { + l := NewInmemSocket("test", 0) + defer l.Close() + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte("hello")) + conn.Close() + } + }() + + conn, err := l.Dial("test", "test") + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + _, err = conn.Read(buf) + if err != nil { + t.Fatal(err) + } + + if string(buf) != "hello" { + t.Fatalf("expected `hello`, got %s", string(buf)) + } + + l.Close() + conn, err = l.Dial("test", "test") + if err != errClosed { + t.Fatalf("expected `errClosed` error, got %v", err) + } +} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 0000000..98e9a1d --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 0000000..a1d7beb --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 0000000..386cf0d --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 0000000..5c21644 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 0000000..53cbb6c --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 0000000..a8b5dbb --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 0000000..1ca0965 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 0000000..9ca9745 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" + +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 0000000..1b31bbb --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,244 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault() *tls.Config { + return &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 0000000..6b4c6a7 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 0000000..ee22df4 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_test.go b/vendor/github.com/docker/go-connections/tlsconfig/config_test.go new file mode 100644 index 0000000..02131d6 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_test.go @@ -0,0 +1,651 @@ +package tlsconfig + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" + "os" + "reflect" + "testing" +) + +// This is the currently active LetsEncrypt IdenTrust cross-signed CA cert. It expires Mar 17, 2021. +const ( + systemRootTrustedCert = ` +-----BEGIN CERTIFICATE----- +MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow +SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT +GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF +q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 +SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 +Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA +a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj +/PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T +AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG +CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv +bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k +c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw +VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC +ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz +MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu +Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF +AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo +uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ +wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu +X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG +PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 +KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== +-----END CERTIFICATE----- +` + rsaPrivateKeyFile = "fixtures/key.pem" + certificateFile = "fixtures/cert.pem" + multiCertificateFile = "fixtures/multi.pem" + rsaEncryptedPrivateKeyFile = "fixtures/encrypted_key.pem" + certificateOfEncryptedKeyFile = "fixtures/cert_of_encrypted_key.pem" +) + +// returns the name of a pre-generated, multiple-certificate CA file +// with both RSA and ECDSA certs. +func getMultiCert() string { + return multiCertificateFile +} + +// returns the names of pre-generated key and certificate files. +func getCertAndKey() (string, string) { + return rsaPrivateKeyFile, certificateFile +} + +// returns the names of pre-generated, encrypted private key and +// corresponding certificate file +func getCertAndEncryptedKey() (string, string) { + return rsaEncryptedPrivateKeyFile, certificateOfEncryptedKeyFile +} + +// If the cert files and directory are provided but are invalid, an error is +// returned. +func TestConfigServerTLSFailsIfUnableToLoadCerts(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + tempFile, err := ioutil.TempFile("", "cert-test") + if err != nil { + t.Fatal("Unable to create temporary empty file") + } + defer os.RemoveAll(tempFile.Name()) + tempFile.Close() + + for _, badFile := range []string{"not-a-file", tempFile.Name()} { + for i := 0; i < 3; i++ { + files := []string{cert, key, ca} + files[i] = badFile + + result, err := Server(Options{ + CertFile: files[0], + KeyFile: files[1], + CAFile: files[2], + ClientAuth: tls.VerifyClientCertIfGiven, + }) + if err == nil || result != nil { + t.Fatal("Expected a non-real file to error and return a nil TLS config") + } + } + } +} + +// If server cert and key are provided and client auth and client CA are not +// set, a tls config with only the server certs will be returned. +func TestConfigServerTLSServerCertsOnly(t *testing.T) { + key, cert := getCertAndKey() + + keypair, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + t.Fatal("Unable to load the generated cert and key") + } + + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + }) + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected server certificates") + } + if len(tlsConfig.Certificates[0].Certificate) != len(keypair.Certificate) { + t.Fatal("Unexpected server certificates") + } + for i, cert := range tlsConfig.Certificates[0].Certificate { + if !bytes.Equal(cert, keypair.Certificate[i]) { + t.Fatal("Unexpected server certificates") + } + } + + if !reflect.DeepEqual(tlsConfig.CipherSuites, DefaultServerAcceptedCiphers) { + t.Fatal("Unexpected server cipher suites") + } + if !tlsConfig.PreferServerCipherSuites { + t.Fatal("Expected server to prefer cipher suites") + } + if tlsConfig.MinVersion != tls.VersionTLS10 { + t.Fatal("Unexpected server TLS version") + } +} + +// If client CA is provided, it will only be used if the client auth is >= +// VerifyClientCertIfGiven +func TestConfigServerTLSClientCANotSetIfClientAuthTooLow(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.RequestClientCert, + CAFile: ca, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected server certificates") + } + if tlsConfig.ClientAuth != tls.RequestClientCert { + t.Fatal("ClientAuth was not set to what was in the options") + } + if tlsConfig.ClientCAs != nil { + t.Fatalf("Client CAs should never have been set") + } +} + +// If client CA is provided, it will only be used if the client auth is >= +// VerifyClientCertIfGiven +func TestConfigServerTLSClientCASet(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected server certificates") + } + if tlsConfig.ClientAuth != tls.VerifyClientCertIfGiven { + t.Fatal("ClientAuth was not set to what was in the options") + } + basePool, err := SystemCertPool() + if err != nil { + basePool = x509.NewCertPool() + } + // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots + if tlsConfig.ClientCAs == nil || len(tlsConfig.ClientCAs.Subjects()) != len(basePool.Subjects())+2 { + t.Fatalf("Client CAs were never set correctly") + } +} + +// Exclusive root pools determines whether the CA pool will be a union of the system +// certificate pool and custom certs, or an exclusive or of the custom certs and system pool +func TestConfigServerExclusiveRootPools(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + caBytes, err := ioutil.ReadFile(ca) + if err != nil { + t.Fatal("Unable to read CA certs", err) + } + + var testCerts []*x509.Certificate + for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { + pemBlock, _ := pem.Decode(pemBytes) + if pemBlock == nil { + t.Fatal("Malformed certificate") + } + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + t.Fatal("Unable to parse certificate") + } + testCerts = append(testCerts, cert) + } + + // ExclusiveRootPools not set, so should be able to verify both system-signed certs + // and custom CA-signed certs + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}); err != nil { + t.Fatalf("Unable to verify certificate %d: %v", i, err) + } + } + + // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable + // and custom CA-signed certs should be verifiable + tlsConfig, err = Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + ExclusiveRootPools: true, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) + switch { + case i == 0 && err != nil: + t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) + case i == 1 && err == nil: + t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) + } + } + + // No CA file provided, system cert should be verifiable only + tlsConfig, err = Server(Options{ + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) + switch { + case i == 1 && err != nil: + t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) + case i == 0 && err == nil: + t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) + } + } +} + +// If a valid minimum version is specified in the options, the server's +// minimum version should be set accordingly +func TestConfigServerTLSMinVersionIsSetBasedOnOptions(t *testing.T) { + versions := []uint16{ + tls.VersionTLS11, + tls.VersionTLS12, + } + key, cert := getCertAndKey() + + for _, v := range versions { + tlsConfig, err := Server(Options{ + MinVersion: v, + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if tlsConfig.MinVersion != v { + t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) + } + } +} + +// An error should be returned if the specified minimum version for the server +// is too low, i.e. less than VersionTLS10 +func TestConfigServerTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Server(Options{ + MinVersion: tls.VersionSSL30, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned an error for minimum version below TLS10") + } +} + +// An error should be returned if an invalid minimum version for the server is +// in the options struct +func TestConfigServerTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Server(Options{ + MinVersion: 1, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned error on invalid minimum version option") + } +} + +// The root CA is never set if InsecureSkipBoolean is set to true, but the +// default client options are set +func TestConfigClientTLSNoVerify(t *testing.T) { + ca := getMultiCert() + + tlsConfig, err := Client(Options{CAFile: ca, InsecureSkipVerify: true}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if tlsConfig.RootCAs != nil { + t.Fatal("Should not have set Root CAs", err) + } + + if !reflect.DeepEqual(tlsConfig.CipherSuites, clientCipherSuites) { + t.Fatal("Unexpected client cipher suites") + } + if tlsConfig.MinVersion != tls.VersionTLS12 { + t.Fatal("Unexpected client TLS version") + } + + if tlsConfig.Certificates != nil { + t.Fatal("Somehow client certificates were set") + } +} + +// The root CA is never set if InsecureSkipBoolean is set to false and root CA +// is not provided. +func TestConfigClientTLSNoRoot(t *testing.T) { + tlsConfig, err := Client(Options{}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if tlsConfig.RootCAs != nil { + t.Fatal("Should not have set Root CAs", err) + } + + if !reflect.DeepEqual(tlsConfig.CipherSuites, clientCipherSuites) { + t.Fatal("Unexpected client cipher suites") + } + if tlsConfig.MinVersion != tls.VersionTLS12 { + t.Fatal("Unexpected client TLS version") + } + + if tlsConfig.Certificates != nil { + t.Fatal("Somehow client certificates were set") + } +} + +// The RootCA is set if the file is provided and InsecureSkipVerify is false +func TestConfigClientTLSRootCAFileWithOneCert(t *testing.T) { + ca := getMultiCert() + + tlsConfig, err := Client(Options{CAFile: ca}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + basePool, err := SystemCertPool() + if err != nil { + basePool = x509.NewCertPool() + } + // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots + if tlsConfig.RootCAs == nil || len(tlsConfig.RootCAs.Subjects()) != len(basePool.Subjects())+2 { + t.Fatal("Root CAs not set properly", err) + } + if tlsConfig.Certificates != nil { + t.Fatal("Somehow client certificates were set") + } +} + +// An error is returned if a root CA is provided but the file doesn't exist. +func TestConfigClientTLSNonexistentRootCAFile(t *testing.T) { + tlsConfig, err := Client(Options{CAFile: "nonexistent"}) + + if err == nil || tlsConfig != nil { + t.Fatal("Should not have been able to configure client TLS", err) + } +} + +// An error is returned if either the client cert or the key are provided +// but invalid or blank. +func TestConfigClientTLSClientCertOrKeyInvalid(t *testing.T) { + key, cert := getCertAndKey() + + tempFile, err := ioutil.TempFile("", "cert-test") + if err != nil { + t.Fatal("Unable to create temporary empty file") + } + defer os.Remove(tempFile.Name()) + tempFile.Close() + + for i := 0; i < 2; i++ { + for _, invalid := range []string{"not-a-file", "", tempFile.Name()} { + files := []string{cert, key} + files[i] = invalid + + tlsConfig, err := Client(Options{CertFile: files[0], KeyFile: files[1]}) + if err == nil || tlsConfig != nil { + t.Fatal("Should not have been able to configure client TLS", err) + } + } + } +} + +// The certificate is set if the client cert and client key are provided and +// valid. +func TestConfigClientTLSValidClientCertAndKey(t *testing.T) { + key, cert := getCertAndKey() + + keypair, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + t.Fatal("Unable to load the generated cert and key") + } + + tlsConfig, err := Client(Options{CertFile: cert, KeyFile: key}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected client certificates") + } + if len(tlsConfig.Certificates[0].Certificate) != len(keypair.Certificate) { + t.Fatal("Unexpected client certificates") + } + for i, cert := range tlsConfig.Certificates[0].Certificate { + if !bytes.Equal(cert, keypair.Certificate[i]) { + t.Fatal("Unexpected client certificates") + } + } + + if tlsConfig.RootCAs != nil { + t.Fatal("Root CAs should not have been set", err) + } +} + +// The certificate is set if the client cert and encrypted client key are +// provided and valid and passphrase can decrypt the key +func TestConfigClientTLSValidClientCertAndEncryptedKey(t *testing.T) { + key, cert := getCertAndEncryptedKey() + + tlsConfig, err := Client(Options{ + CertFile: cert, + KeyFile: key, + Passphrase: "FooBar123", + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected client certificates") + } +} + +// The certificate is not set if the provided passphrase cannot decrypt +// the encrypted key. +func TestConfigClientTLSNotSetWithInvalidPassphrase(t *testing.T) { + key, cert := getCertAndEncryptedKey() + + tlsConfig, err := Client(Options{ + CertFile: cert, + KeyFile: key, + Passphrase: "InvalidPassphrase", + }) + + if !IsErrEncryptedKey(err) || tlsConfig != nil { + t.Fatal("Expected failure due to incorrect passphrase.") + } +} + +// Exclusive root pools determines whether the CA pool will be a union of the system +// certificate pool and custom certs, or an exclusive or of the custom certs and system pool +func TestConfigClientExclusiveRootPools(t *testing.T) { + ca := getMultiCert() + + caBytes, err := ioutil.ReadFile(ca) + if err != nil { + t.Fatal("Unable to read CA certs", err) + } + + var testCerts []*x509.Certificate + for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { + pemBlock, _ := pem.Decode(pemBytes) + if pemBlock == nil { + t.Fatal("Malformed certificate") + } + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + t.Fatal("Unable to parse certificate") + } + testCerts = append(testCerts, cert) + } + + // ExclusiveRootPools not set, so should be able to verify both system-signed certs + // and custom CA-signed certs + tlsConfig, err := Client(Options{CAFile: ca}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}); err != nil { + t.Fatalf("Unable to verify certificate %d: %v", i, err) + } + } + + // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable + // and custom CA-signed certs should be verifiable + tlsConfig, err = Client(Options{ + CAFile: ca, + ExclusiveRootPools: true, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) + switch { + case i == 0 && err != nil: + t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) + case i == 1 && err == nil: + t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) + } + } + + // No CA file provided, system cert should be verifiable only + tlsConfig, err = Client(Options{}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) + switch { + case i == 1 && err != nil: + t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) + case i == 0 && err == nil: + t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) + } + } +} + +// If a valid MinVersion is specified in the options, the client's +// minimum version should be set accordingly +func TestConfigClientTLSMinVersionIsSetBasedOnOptions(t *testing.T) { + key, cert := getCertAndKey() + + tlsConfig, err := Client(Options{ + MinVersion: tls.VersionTLS12, + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if tlsConfig.MinVersion != tls.VersionTLS12 { + t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) + } +} + +// An error should be returned if the specified minimum version for the client +// is too low, i.e. less than VersionTLS12 +func TestConfigClientTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Client(Options{ + MinVersion: tls.VersionTLS11, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned an error for minimum version below TLS12") + } +} + +// An error should be returned if an invalid minimum version for the client is +// in the options struct +func TestConfigClientTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Client(Options{ + MinVersion: 1, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned error on invalid minimum version option") + } +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert.pem b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert.pem new file mode 100644 index 0000000..09bd69e --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC1jCCAb6gAwIBAgIDAw0/MA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMTBHRl +c3QwHhcNMTYwMzI4MTg0MTQ3WhcNMjcwMzI4MTg0MTQ3WjAPMQ0wCwYDVQQDEwR0 +ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1k1NO4wzCpxZ71Bo +SiYSWh8SE9jHtg6lz0QjMQXzFuLhpedjHJYx9fYbD+JVk5vnRbUqNUeZVKAGahfR +9vhm5I+cm359gYU0gHawLw91oh4JCiwUu77U2obHvtvcXLf6Fb/+MoSA5wH7vbL3 +T4vR1+hLt+R+kILAEHq/IlSdLD8CA0iA+ypHfCPOi5F2wVjAyMnQXgVDkAhzefpu +JkhN1yUgb5WK4qoSuOUDUYq/bRosLdHXDJiWRuqaU2zxO5cHVlrNAE5RuspfEzl4 +YP6boZTOomLEDbBTSJWgX2/ybvY7o4sCw7KrvyBIqSK9HbfaK1nFMFGoiSH6+1m4 +amWKrwIDAQABozswOTAOBgNVHQ8BAf8EBAMCBaAwGQYDVR0lBBIwEAYIKwYBBQUH +AwMGBFUdJQAwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEADuXjLtGk +tU5ql+LFB32Cc2Laa0iO8aqJccOcXYKg4FD0um+1+YQO1CBZZqWjItH4CuJl5+2j +Tc9sFgrIVH5CmvUkOUFPCNDAJtxBvF6RQqRpehjheHDaNsYo9JNKHKEJB6OJrDgy +N5krM5FKyAp/EDTbIrGIZFMdxQGxK5MfpfPkKK44JgOQM3QWeR+LqIpfd34MD1jZ +jjYdl0+quIHiIdFR0a4Uam7o9GfUmcWe1VFthLb5pNhV6t+wyuLyMXVMNacKZSz/ +nOMWVQfgViZk6rHOPSMrFMc7Pp488I907MJKCryd21LcLqMuhb4BpWcJghnY8Lbs +uIPLsUHr3Pfp9Q== +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert_of_encrypted_key.pem b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert_of_encrypted_key.pem new file mode 100644 index 0000000..d071076 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/cert_of_encrypted_key.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC1jCCAb6gAwIBAgIDAw0/MA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMTBHRl +c3QwHhcNMTYwNDIyMDQyMjM1WhcNMTgwNDIyMDQyMjM1WjAPMQ0wCwYDVQQDEwR0 +ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4GRTos+Ik6kQG7wn +8E4HqPwgWXbY0T59UQsrbR+YbyxbUKV67Pgl4VImuUmYaism6Tm3EFYzeom5baMc +vW0hC+WbwVr1rq5ddBE8akYhlPY40SxFlh563vOi7lcFGM7xuUbTlhtAhYa5xc5U +thHYa8Mdqc2kMrmU4JBhNHoRk2mnRBo2J2/8RfOfioM6mH0t/MVtB/jSGpcwbbfj +2twKOpB9CoX57szVo7+DCFHpLxeuop+69REu5Egc2a5BtBuUf0fkUBKuF7yUy2xI +IbgjCiGb3Z+PCIC0CjNt9wExowPAGfxAJ8s1nNlpZav3707VZRtz7Js1skRjm9aU +8fhYNQIDAQABozswOTAOBgNVHQ8BAf8EBAMCBaAwGQYDVR0lBBIwEAYIKwYBBQUH +AwMGBFUdJQAwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAcKCCV5Os +O2U7Ekp0jzOusV2+ykZzUe4sEds+ikblxK9SHV/pAPIVuAevdyE1LKmJ6ZGgeU2M +4MC6jC/XTlYNhsYCfKaJn53UscKI2urXFlk1Gv5VQP5EOrMWb76A5uj1nElxKe2C +bMVoUuMwRd9jnz6594D80jGGYpHRaF7yLtGbiflDjB+yv1OU6WnuVNr0nOb9ShR6 +WPlrQj5TUSpRHF/oKy9LVWuxYA9aiY1YREDZhhauw9pGAMx1lImfJcJ077MdxN4A +DwKAx3ooajAu1n3McY1oncWW+rWs2Ptvp6lKMGoZ50ElEPCMw4/hPtPMLq/DTWNj +l342KLVWgchlIA== +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/go-connections/tlsconfig/fixtures/encrypted_key.pem b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/encrypted_key.pem new file mode 100644 index 0000000..64e949d --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/encrypted_key.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,68ce1d54f187b663e152d9c5dc900fd3 + +ZVBeXx7kWiF0yPOORntrN6BsyIJE7krqTVhRfk6GAllaLQv0jvb31XHB1oWOaqnx +tb7kUuoBeQdl1hs/iAnkDMc59WJfEK9A9cAD/SgxTgdENOrzFSRNEfqketLA4eHZ +2sOLkSfv58HwA0p0gzqSrLQBo/6ZtF/57HxH166PtErPNTS1Usu/f4Oj0UqxTfbZ +B5LHsepyNLt6q/15fcY0TFYJwvgEXa4SridjT+8bTz2T+bx3QFijGnl7EdkTElni +FIwnDjFZaAULqoyUIB1y8guEZVkaWKncxPdRfhId84HklWdrrLtP5D6db1xNNpsp +LzGdciD3phJp6K0hpl+WrhYxuCKURa27tXMCuYOFd1hw/kM29jFbxSIlNBGN4OLL +v4wYrJFM21iWsz9c7Cqw5Yls2Rsx0QrXRFIxwT25z+HNx1fysQxYuxf3r+e2oz8e +8Os7hvcxG2XDz01/zpx8kzxUcLuh+3o5UOYlo9z6qsjaD5NUXY+X90PUrVO9fk5y +8o8pnElPnV88Ihrog5YTYy6egiQWHhDk2I4qlYPOBQNKTLg3KulAcmC9vQ8mR5Sy +p3c3MTgh0A3Zk5Dib+sQ0tdbwDcB2JCTqGal1FNEW5Z7qTHA4Bdm2l7hGs8cRpy4 +Ehkhv3s5wWmKcbwwlPuJ0UfPeDn6v9qE2/IkOy+jWgTpaFyWtXHc1/XdqMsJ8xN0 +thJw/GMtNabB1+zuayJnvmbJd2qW1smsFTHqX3BovXIH4vx1hE2d0lJpEBynk+wr +gpPgrRoEiqsPcsRoVjvKH3qwJLRdcGYhKqhbvRdynlagCLmE8iAI99r82u6t+03h +YNpRbafY4ceAYyK0IlRiJvGkBMfH7bMXcBMmXyQSBF27ZpNidyZSCHrU5xyHqJZO +XWUhl9GHplBfueh5E831S7mDqobd8RqnUvKVygyEOol5VUFDrggTAAKKN9VzM3uT +MaVymt6fA7stzf01fT+Wi7uCm5legTXG3Ca+XxD6TdE0dNzewd5jDsuqwXnt1iC4 +slvuLRZeRZDNvBd0G7Ohhp6jb2HHwkv9kQTZ+UEDbR/Gwxty4oT1MnwSE0mi9ZFN +6PTjrSxpIKe+mAhgzrepLMfATGayYQzucEArPG7Vp+NJva+j6FKloqrzXMjlP0hN +XSBr7AL+j+OR/tzDOoUG3xdsCl/u5hFTpjsW2ti870zoRUcK0fqJ9UIYjh66L7yT +KNkXsC+OcGuGkhtQ0gxx60OI7wp4bh2pKdT6e111/WTvXxVR2C3XhFBLUfNIz/7A +Oj+s0CaV4pBmCjIobLYpxC0ofLplwBLGf9xnsBiQF5dsgKgOhACeDmDMwqAJ3U/t +54hK/8Yb8W46Tjgbm0Qsj5gFXHofnyqDeQxAjsdCXsdMaPB8nyZpEkuQSEj9HlKW +xIEErVufkvqyrzhX1pxPs+C839Ueyeob6ZWQurqCLTdZh+3bhKcvi5iP+aLLjMWK +JT9tmAuFVkbPerqObVQFbnM4/re33YYD7QXCqta5bxcVeBI8N1HdwMYrDVhXelEx +mqGleUkkDHTWzAa3u1GKOzLXAYnD0TsTwml0+k+Rf0QMBiDJiKujfy7fGqfZF2vR +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/go-connections/tlsconfig/fixtures/key.pem b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/key.pem new file mode 100644 index 0000000..6a5e20a --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA1k1NO4wzCpxZ71BoSiYSWh8SE9jHtg6lz0QjMQXzFuLhpedj +HJYx9fYbD+JVk5vnRbUqNUeZVKAGahfR9vhm5I+cm359gYU0gHawLw91oh4JCiwU +u77U2obHvtvcXLf6Fb/+MoSA5wH7vbL3T4vR1+hLt+R+kILAEHq/IlSdLD8CA0iA ++ypHfCPOi5F2wVjAyMnQXgVDkAhzefpuJkhN1yUgb5WK4qoSuOUDUYq/bRosLdHX +DJiWRuqaU2zxO5cHVlrNAE5RuspfEzl4YP6boZTOomLEDbBTSJWgX2/ybvY7o4sC +w7KrvyBIqSK9HbfaK1nFMFGoiSH6+1m4amWKrwIDAQABAoIBAQC802wj9grbZJzS +A1WBUD6Hbi0tk6uVPR7YnD8t6QIivlL5LgLko2ruQKXjvxiMcai8gT7pp2bxa/d6 +7/Yv2PxAlFH3qOLJhyeVsf7X2JVb/X8VmXXDYAiJbI0AHRX0FJ+lHoDK3nn+En9Q +zSqgyqBhz+s343uptauqWZ2kkE3VNyqlPBhmKc5NcbR7Sgb4nJ3CkNAcxRkl1NeI +BRFdsTUYRNR3Vd++OvOzI4uzZfCIeUVqx+r7/SeLW0UwqeprMm7g+hFQLfH+e9SA +9lx0EIRoQFwgvKju2eogpSwvkSlObXnESu5OHYtnc+jpsOC0EbQgO0d6CqVZiqjR +2dRYsZkhAoGBAO69loXSAsyqUj0rT5iq59PuMlBEAlW6hQTfl6c8bnu1JUo2s/CH +OJfswxfHN32qmi99WbK2iLyrnznNYsyPnYKW0ObwuoqAdrlydfu7Fq9HSOACoIvK +jRMOsiJtM3JX2bHHV7yIwJ1+h++o2Ly803j7tKtYsrRQVZiWeTcR2IRZAoGBAOXL +bJFLbAhm3zRqhbiWuORqqyLxrDmIB6RY8vTdX47vwzkFGZJlCuL+vs6877I6eOc9 +wjH9qcOiJQJ4DWkAE+VS5PAPoj0UDRw7AkE9v3RwnmxvAfP5rPo5KimYxKq4yX6r ++Qc4ixwftCj0rxFoG4lnipwBFq4NXuHtIhbZXMZHAoGBAOGfatGtV9f0XyRP+jld +yxoO0p3oqAw86dlhNgFmq0NePo+UgxmdsW5i4z1lmJu6z1xyKoMq3q7vwtrtr6GD +WGhB/8tBVgnuvkUkVzw/44Bi7gxGb1OtaQXJra+7ZBN70tCgg9o5o080dWOZPruf ++Hst5eDJQpoGEd7S1lulEeqBAoGBAKAqdIak6izE/wg6wu+Q5lgW3SejCOakoKb1 +dIoljkhDZ2/j1RoLoVXsNzRDzlIMnV6X1jYf1ubLqj4ZTUeFTVjGuVl1nCA0TJsD +qiOtFTfkkxeDG/pgaSeTFocdut4/o/nNhep5h8RXeKwfN7LLPH4+FAd+Xr98BEk2 +jk8cu6RbAoGAHI9yRXKjlADBZLvxxMGHRfe7eK4PgABmluZLdsXzNmXxybrZDvdC +teipvIUSym7tvdDB6LHXKVp4mYeqHe/ktRatlhbQyPso2VPoMFQyuRBYKKFFAh0V +3d6EyTRnIxn/NW+XdcCUeufFfd+3BHyux68PyUsTtKRCJYfhExzJf70= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/go-connections/tlsconfig/fixtures/multi.pem b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/multi.pem new file mode 100644 index 0000000..9d61d2c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/fixtures/multi.pem @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIC3DCCAcSgAwIBAgIDAw0/MA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMTBHRl +c3QwHhcNMTYwMzI4MTg0MTQ3WhcNMjcwMzI4MTg0MTQ3WjAPMQ0wCwYDVQQDEwR0 +ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArVIJDnNnM1iX7Xj8 +bja4WsgHuENRBsBCROTDjQL1w7Ksin2jmCl/D7Gk9ifRJZ/HPE3BKo6B+3CDXygJ +Qvoe8SGWi6ae8lN4VgPoW7xDViAWhVmjIr+dNQXWD0hCq0YZuXyYSi5iXWeRaTvx +2eoG2VSkNnkc/0weEhX1nBGBscuz1UZqWp53m09eL7otngcNcdjmvLPiw4E3cric +UoLVonzf4ZE84Q7nNmfWfMKh4zJUyn8N766GAAoC6RAKsJ0xSDeRjkzSy7vGJKBv +nTBe6X1xyFZaN0mAjtRkYaxI9ZfI8K41Trhd88s4B4G61p70DY3dMLmuF8wGHVCF +lMMV6wIDAQABo0EwPzAOBgNVHQ8BAf8EBAMCAqQwGQYDVR0lBBIwEAYIKwYBBQUH +AwMGBFUdJQAwEgYDVR0TAQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAQEA +LriCH0FTaOFIBl+kxAKjs7puhIZoYLwQ8IReXdEU7kYjPff3X/eiO82A0GwMM9Fp +/RdMlZGDSLyZ1a/gKCz55j9J4MW8ZH7RSEQs3dJQCvEPDO6UdgKy4Ft9yNh/ba1J +8/n0CqR+0QNov6Qp7eMDkQaDvKgCaABn8at6VLtuifJXFKDGt0LrR7wkQBJ85SZB +9GdfNSPzEZkb4FQ2gPgAk7ySoQ6Hi6mogEORbtJ7+Xiq57J+cEZQV6TOuwYgBG4e +MW3h37+7V5a/absybik1F/gcx4IbEBd/7an6a+a2l5FeTED5kpzvD4+yrQAoY8lT +gccRdP0O4CsLn7zlLRidPQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIBTzCB9qADAgECAgMDDT8wCgYIKoZIzj0EAwIwDzENMAsGA1UEAxMEdGVzdDAe +Fw0xNjAzMjgxODQxNDdaFw0yNzAzMjgxODQxNDdaMA8xDTALBgNVBAMTBHRlc3Qw +WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQy8xfFkSiJA10EC1MMJzkLgu6csocC +UNyix7zOqijLsASE4an5LQsZ1PuhgVYnL+B9rAcnXgJaLM8YOmLRPqNdo0EwPzAO +BgNVHQ8BAf8EBAMCAqQwGQYDVR0lBBIwEAYIKwYBBQUHAwMGBFUdJQAwEgYDVR0T +AQH/BAgwBgEB/wIBATAKBggqhkjOPQQDAgNIADBFAiEAwUrZY7fHwr4FWONiBJo6 +97V9GAbj70ZJqV5M7rt+hMECIFY66kUrv0sG2vlhicSIGwSOdB3VcijdZSelzLn1 +iRk5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 0000000..9ea86d7 --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 0000000..b55b37b --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 0000000..477be8b --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 0000000..4f70a4e --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 0000000..9043b35 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 0000000..ba02af2 --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 46 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/duration_test.go b/vendor/github.com/docker/go-units/duration_test.go new file mode 100644 index 0000000..e436c38 --- /dev/null +++ b/vendor/github.com/docker/go-units/duration_test.go @@ -0,0 +1,95 @@ +package units + +import ( + "fmt" + "testing" + "time" +) + +func ExampleHumanDuration() { + fmt.Println(HumanDuration(450 * time.Millisecond)) + fmt.Println(HumanDuration(47 * time.Second)) + fmt.Println(HumanDuration(1 * time.Minute)) + fmt.Println(HumanDuration(3 * time.Minute)) + fmt.Println(HumanDuration(35 * time.Minute)) + fmt.Println(HumanDuration(35*time.Minute + 40*time.Second)) + fmt.Println(HumanDuration(1 * time.Hour)) + fmt.Println(HumanDuration(1*time.Hour + 45*time.Minute)) + fmt.Println(HumanDuration(3 * time.Hour)) + fmt.Println(HumanDuration(3*time.Hour + 59*time.Minute)) + fmt.Println(HumanDuration(3*time.Hour + 60*time.Minute)) + fmt.Println(HumanDuration(24 * time.Hour)) + fmt.Println(HumanDuration(24*time.Hour + 12*time.Hour)) + fmt.Println(HumanDuration(2 * 24 * time.Hour)) + fmt.Println(HumanDuration(7 * 24 * time.Hour)) + fmt.Println(HumanDuration(13*24*time.Hour + 5*time.Hour)) + fmt.Println(HumanDuration(2 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(2*7*24*time.Hour + 4*24*time.Hour)) + fmt.Println(HumanDuration(3 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(4 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(4*7*24*time.Hour + 3*24*time.Hour)) + fmt.Println(HumanDuration(1 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(1*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(2 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(3*30*24*time.Hour + 1*7*24*time.Hour)) + fmt.Println(HumanDuration(5*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(13 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(23 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(24 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(24*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(3*365*24*time.Hour + 2*30*24*time.Hour)) +} + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "1 second", HumanDuration(1*time.Second)) + assertEquals(t, "45 seconds", HumanDuration(45*time.Second)) + assertEquals(t, "46 seconds", HumanDuration(46*time.Second)) + assertEquals(t, "59 seconds", HumanDuration(59*time.Second)) + assertEquals(t, "About a minute", HumanDuration(60*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "45 minutes", HumanDuration(45*time.Minute)) + assertEquals(t, "45 minutes", HumanDuration(45*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(46*time.Minute)) + assertEquals(t, "About an hour", HumanDuration(59*time.Minute)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+29*time.Minute)) + assertEquals(t, "2 hours", HumanDuration(1*time.Hour+31*time.Minute)) + assertEquals(t, "2 hours", HumanDuration(1*time.Hour+59*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+29*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+31*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "2 months", HumanDuration(2*month)) + assertEquals(t, "2 months", HumanDuration(2*month+2*week)) + assertEquals(t, "3 months", HumanDuration(3*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3 years", HumanDuration(3*year+2*month)) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 0000000..44616c2 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,108 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/vendor/github.com/docker/go-units/size_test.go b/vendor/github.com/docker/go-units/size_test.go new file mode 100644 index 0000000..8923e50 --- /dev/null +++ b/vendor/github.com/docker/go-units/size_test.go @@ -0,0 +1,165 @@ +package units + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "testing" +) + +func ExampleBytesSize() { + fmt.Println(BytesSize(1024)) + fmt.Println(BytesSize(1024 * 1024)) + fmt.Println(BytesSize(1048576)) + fmt.Println(BytesSize(2 * MiB)) + fmt.Println(BytesSize(3.42 * GiB)) + fmt.Println(BytesSize(5.372 * TiB)) + fmt.Println(BytesSize(2.22 * PiB)) +} + +func ExampleHumanSize() { + fmt.Println(HumanSize(1000)) + fmt.Println(HumanSize(1024)) + fmt.Println(HumanSize(1000000)) + fmt.Println(HumanSize(1048576)) + fmt.Println(HumanSize(2 * MB)) + fmt.Println(HumanSize(float64(3.42 * GB))) + fmt.Println(HumanSize(float64(5.372 * TB))) + fmt.Println(HumanSize(float64(2.22 * PB))) +} + +func ExampleFromHumanSize() { + fmt.Println(FromHumanSize("32")) + fmt.Println(FromHumanSize("32b")) + fmt.Println(FromHumanSize("32B")) + fmt.Println(FromHumanSize("32k")) + fmt.Println(FromHumanSize("32K")) + fmt.Println(FromHumanSize("32kb")) + fmt.Println(FromHumanSize("32Kb")) + fmt.Println(FromHumanSize("32Mb")) + fmt.Println(FromHumanSize("32Gb")) + fmt.Println(FromHumanSize("32Tb")) + fmt.Println(FromHumanSize("32Pb")) +} + +func ExampleRAMInBytes() { + fmt.Println(RAMInBytes("32")) + fmt.Println(RAMInBytes("32b")) + fmt.Println(RAMInBytes("32B")) + fmt.Println(RAMInBytes("32k")) + fmt.Println(RAMInBytes("32K")) + fmt.Println(RAMInBytes("32kb")) + fmt.Println(RAMInBytes("32Kb")) + fmt.Println(RAMInBytes("32Mb")) + fmt.Println(RAMInBytes("32Gb")) + fmt.Println(RAMInBytes("32Tb")) + fmt.Println(RAMInBytes("32Pb")) + fmt.Println(RAMInBytes("32PB")) + fmt.Println(RAMInBytes("32P")) +} + +func TestBytesSize(t *testing.T) { + assertEquals(t, "1KiB", BytesSize(1024)) + assertEquals(t, "1MiB", BytesSize(1024*1024)) + assertEquals(t, "1MiB", BytesSize(1048576)) + assertEquals(t, "2MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22PiB", BytesSize(2.22*PiB)) + assertEquals(t, "1.049e+06YiB", BytesSize(KiB*KiB*KiB*KiB*KiB*PiB)) +} + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1kB", HumanSize(1000)) + assertEquals(t, "1.024kB", HumanSize(1024)) + assertEquals(t, "1MB", HumanSize(1000000)) + assertEquals(t, "1.049MB", HumanSize(1048576)) + assertEquals(t, "2MB", HumanSize(2*MB)) + assertEquals(t, "3.42GB", HumanSize(float64(3.42*GB))) + assertEquals(t, "5.372TB", HumanSize(float64(5.372*TB))) + assertEquals(t, "2.22PB", HumanSize(float64(2.22*PB))) + assertEquals(t, "1e+04YB", HumanSize(float64(10000000000000*PB))) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5kB") + assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5 kB") + assertSuccessEquals(t, 32, FromHumanSize, "32.5 B") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, ".3kB") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertSuccessEquals(t, 32, RAMInBytes, "32.3") + tmp := 32.3 * MiB + assertSuccessEquals(t, int64(tmp), RAMInBytes, "32.3 mb") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 0000000..5ac7fd8 --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/docker/go-units/ulimit_test.go b/vendor/github.com/docker/go-units/ulimit_test.go new file mode 100644 index 0000000..902e023 --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit_test.go @@ -0,0 +1,131 @@ +package units + +import ( + "fmt" + "strconv" + "testing" +) + +func ExampleParseUlimit() { + fmt.Println(ParseUlimit("nofile=512:1024")) + fmt.Println(ParseUlimit("nofile=1024")) + fmt.Println(ParseUlimit("cpu=2:4")) + fmt.Println(ParseUlimit("cpu=6")) +} + +func TestParseUlimitValid(t *testing.T) { + u1 := &Ulimit{"nofile", 1024, 512} + if u2, _ := ParseUlimit("nofile=512:1024"); *u1 != *u2 { + t.Fatalf("expected %q, but got %q", u1, u2) + } +} + +func TestParseUlimitInvalidLimitType(t *testing.T) { + if _, err := ParseUlimit("notarealtype=1024:1024"); err == nil { + t.Fatalf("expected error on invalid ulimit type") + } +} + +func TestParseUlimitBadFormat(t *testing.T) { + if _, err := ParseUlimit("nofile:1024:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := ParseUlimit("nofile"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := ParseUlimit("nofile="); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := ParseUlimit("nofile=:"); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := ParseUlimit("nofile=:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } +} + +func TestParseUlimitHardLessThanSoft(t *testing.T) { + if _, err := ParseUlimit("nofile=1024:1"); err == nil { + t.Fatal("expected error on hard limit less than soft limit") + } +} + +func TestParseUlimitInvalidValueType(t *testing.T) { + if _, err := ParseUlimit("nofile=asdf"); err == nil { + t.Fatal("expected error on bad value type, but got no error") + } else if _, ok := err.(*strconv.NumError); !ok { + t.Fatalf("expected error on bad value type, but got `%s`", err) + } + + if _, err := ParseUlimit("nofile=1024:asdf"); err == nil { + t.Fatal("expected error on bad value type, but got no error") + } else if _, ok := err.(*strconv.NumError); !ok { + t.Fatalf("expected error on bad value type, but got `%s`", err) + } +} + +func TestParseUlimitTooManyValueArgs(t *testing.T) { + if _, err := ParseUlimit("nofile=1024:1:50"); err == nil { + t.Fatalf("expected error on more than two value arguments") + } +} + +func TestUlimitStringOutput(t *testing.T) { + u := &Ulimit{"nofile", 1024, 512} + if s := u.String(); s != "nofile=512:1024" { + t.Fatal("expected String to return nofile=512:1024, but got", s) + } +} + +func TestGetRlimit(t *testing.T) { + tt := []struct { + ulimit Ulimit + rlimit Rlimit + }{ + {Ulimit{"core", 10, 12}, Rlimit{rlimitCore, 10, 12}}, + {Ulimit{"cpu", 1, 10}, Rlimit{rlimitCPU, 1, 10}}, + {Ulimit{"data", 5, 0}, Rlimit{rlimitData, 5, 0}}, + {Ulimit{"fsize", 2, 2}, Rlimit{rlimitFsize, 2, 2}}, + {Ulimit{"locks", 0, 0}, Rlimit{rlimitLocks, 0, 0}}, + {Ulimit{"memlock", 10, 10}, Rlimit{rlimitMemlock, 10, 10}}, + {Ulimit{"msgqueue", 9, 1}, Rlimit{rlimitMsgqueue, 9, 1}}, + {Ulimit{"nice", 9, 9}, Rlimit{rlimitNice, 9, 9}}, + {Ulimit{"nofile", 4, 100}, Rlimit{rlimitNofile, 4, 100}}, + {Ulimit{"nproc", 5, 5}, Rlimit{rlimitNproc, 5, 5}}, + {Ulimit{"rss", 0, 5}, Rlimit{rlimitRss, 0, 5}}, + {Ulimit{"rtprio", 100, 65}, Rlimit{rlimitRtprio, 100, 65}}, + {Ulimit{"rttime", 55, 102}, Rlimit{rlimitRttime, 55, 102}}, + {Ulimit{"sigpending", 14, 20}, Rlimit{rlimitSigpending, 14, 20}}, + {Ulimit{"stack", 1, 1}, Rlimit{rlimitStack, 1, 1}}, + } + + for _, te := range tt { + res, err := te.ulimit.GetRlimit() + if err != nil { + t.Errorf("expected not to fail: %s", err) + } + if res.Type != te.rlimit.Type { + t.Errorf("expected Type to be %d but got %d", + te.rlimit.Type, res.Type) + } + if res.Soft != te.rlimit.Soft { + t.Errorf("expected Soft to be %d but got %d", + te.rlimit.Soft, res.Soft) + } + if res.Hard != te.rlimit.Hard { + t.Errorf("expected Hard to be %d but got %d", + te.rlimit.Hard, res.Hard) + } + + } +} + +func TestGetRlimitBadUlimitName(t *testing.T) { + name := "bla" + uLimit := Ulimit{name, 0, 0} + if _, err := uLimit.GetRlimit(); err == nil { + t.Error("expected error on bad Ulimit name") + } +} diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml new file mode 100644 index 0000000..e7c4be0 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/.travis.yml @@ -0,0 +1,38 @@ +language: go + +go: + - 1.6.x + - 1.7.x + +install: + # go-flags + - go get -d -v ./... + - go build -v ./... + + # linting + - go get github.com/golang/lint + - go install github.com/golang/lint/golint + + # code coverage + - go get golang.org/x/tools/cmd/cover + - go get github.com/onsi/ginkgo/ginkgo + - go get github.com/modocache/gover + - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi + +script: + # go-flags + - $(exit $(gofmt -l . | wc -l)) + - go test -v ./... + + # linting + - go tool vet -all=true -v=true . || true + - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./... + + # code coverage + - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover + - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover + - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi + +env: + # coveralls.io + secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU=" diff --git a/vendor/github.com/jessevdk/go-flags/LICENSE b/vendor/github.com/jessevdk/go-flags/LICENSE new file mode 100644 index 0000000..bcca0d5 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2012 Jesse van den Kieboom. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jessevdk/go-flags/README.md b/vendor/github.com/jessevdk/go-flags/README.md new file mode 100644 index 0000000..9378b76 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/README.md @@ -0,0 +1,135 @@ +go-flags: a go library for parsing command line arguments +========================================================= + +[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master) + +This library provides similar functionality to the builtin flag library of +go, but provides much more functionality and nicer formatting. From the +documentation: + +Package flags provides an extensive command line option parser. +The flags package is similar in functionality to the go builtin flag package +but provides more options and uses reflection to provide a convenient and +succinct way of specifying command line options. + +Supported features: +* Options with short names (-v) +* Options with long names (--verbose) +* Options with and without arguments (bool v.s. other type) +* Options with optional arguments and default values +* Multiple option groups each containing a set of options +* Generate and print well-formatted help message +* Passing remaining command line arguments after -- (optional) +* Ignoring unknown command line options (optional) +* Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification +* Supports multiple short options -aux +* Supports all primitive go types (string, int{8..64}, uint{8..64}, float) +* Supports same option multiple times (can store in slice or last option counts) +* Supports maps +* Supports function callbacks +* Supports namespaces for (nested) option groups + +The flags package uses structs, reflection and struct field tags +to allow users to specify command line options. This results in very simple +and concise specification of your application options. For example: + +```go +type Options struct { + Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` +} +``` + +This specifies one option with a short name -v and a long name --verbose. +When either -v or --verbose is found on the command line, a 'true' value +will be appended to the Verbose field. e.g. when specifying -vvv, the +resulting value of Verbose will be {[true, true, true]}. + +Example: +-------- +```go +var opts struct { + // Slice of bool will append 'true' each time the option + // is encountered (can be set multiple times, like -vvv) + Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` + + // Example of automatic marshalling to desired type (uint) + Offset uint `long:"offset" description:"Offset"` + + // Example of a callback, called each time the option is found. + Call func(string) `short:"c" description:"Call phone number"` + + // Example of a required flag + Name string `short:"n" long:"name" description:"A name" required:"true"` + + // Example of a value name + File string `short:"f" long:"file" description:"A file" value-name:"FILE"` + + // Example of a pointer + Ptr *int `short:"p" description:"A pointer to an integer"` + + // Example of a slice of strings + StringSlice []string `short:"s" description:"A slice of strings"` + + // Example of a slice of pointers + PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` + + // Example of a map + IntMap map[string]int `long:"intmap" description:"A map from string to int"` +} + +// Callback which will invoke callto: to call a number. +// Note that this works just on OS X (and probably only with +// Skype) but it shows the idea. +opts.Call = func(num string) { + cmd := exec.Command("open", "callto:"+num) + cmd.Start() + cmd.Process.Release() +} + +// Make some fake arguments to parse. +args := []string{ + "-vv", + "--offset=5", + "-n", "Me", + "-p", "3", + "-s", "hello", + "-s", "world", + "--ptrslice", "hello", + "--ptrslice", "world", + "--intmap", "a:1", + "--intmap", "b:5", + "arg1", + "arg2", + "arg3", +} + +// Parse flags from `args'. Note that here we use flags.ParseArgs for +// the sake of making a working example. Normally, you would simply use +// flags.Parse(&opts) which uses os.Args +args, err := flags.ParseArgs(&opts, args) + +if err != nil { + panic(err) + os.Exit(1) +} + +fmt.Printf("Verbosity: %v\n", opts.Verbose) +fmt.Printf("Offset: %d\n", opts.Offset) +fmt.Printf("Name: %s\n", opts.Name) +fmt.Printf("Ptr: %d\n", *opts.Ptr) +fmt.Printf("StringSlice: %v\n", opts.StringSlice) +fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1]) +fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"]) +fmt.Printf("Remaining args: %s\n", strings.Join(args, " ")) + +// Output: Verbosity: [true true] +// Offset: 5 +// Name: Me +// Ptr: 3 +// StringSlice: [hello world] +// PtrSlice: [hello world] +// IntMap: [a:1 b:5] +// Remaining args: arg1 arg2 arg3 +``` + +More information can be found in the godocs: diff --git a/vendor/github.com/jessevdk/go-flags/arg.go b/vendor/github.com/jessevdk/go-flags/arg.go new file mode 100644 index 0000000..8ec6204 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/arg.go @@ -0,0 +1,27 @@ +package flags + +import ( + "reflect" +) + +// Arg represents a positional argument on the command line. +type Arg struct { + // The name of the positional argument (used in the help) + Name string + + // A description of the positional argument (used in the help) + Description string + + // The minimal number of required positional arguments + Required int + + // The maximum number of required positional arguments + RequiredMaximum int + + value reflect.Value + tag multiTag +} + +func (a *Arg) isRemaining() bool { + return a.value.Type().Kind() == reflect.Slice +} diff --git a/vendor/github.com/jessevdk/go-flags/arg_test.go b/vendor/github.com/jessevdk/go-flags/arg_test.go new file mode 100644 index 0000000..c7c0a61 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/arg_test.go @@ -0,0 +1,163 @@ +package flags + +import ( + "testing" +) + +func TestPositional(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Command int + Filename string + Rest []string + } `positional-args:"yes" required:"yes"` + }{} + + p := NewParser(&opts, Default) + ret, err := p.ParseArgs([]string{"10", "arg_test.go", "a", "b"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + if opts.Positional.Command != 10 { + t.Fatalf("Expected opts.Positional.Command to be 10, but got %v", opts.Positional.Command) + } + + if opts.Positional.Filename != "arg_test.go" { + t.Fatalf("Expected opts.Positional.Filename to be \"arg_test.go\", but got %v", opts.Positional.Filename) + } + + assertStringArray(t, opts.Positional.Rest, []string{"a", "b"}) + assertStringArray(t, ret, []string{}) +} + +func TestPositionalRequired(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Command int + Filename string + Rest []string + } `positional-args:"yes" required:"yes"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{"10"}) + + assertError(t, err, ErrRequired, "the required argument `Filename` was not provided") +} + +func TestPositionalRequiredRest1Fail(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Rest []string `required:"yes"` + } `positional-args:"yes"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{}) + + assertError(t, err, ErrRequired, "the required argument `Rest (at least 1 argument)` was not provided") +} + +func TestPositionalRequiredRest1Pass(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Rest []string `required:"yes"` + } `positional-args:"yes"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{"rest1"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + if len(opts.Positional.Rest) != 1 { + t.Fatalf("Expected 1 positional rest argument") + } + + assertString(t, opts.Positional.Rest[0], "rest1") +} + +func TestPositionalRequiredRest2Fail(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Rest []string `required:"2"` + } `positional-args:"yes"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{"rest1"}) + + assertError(t, err, ErrRequired, "the required argument `Rest (at least 2 arguments, but got only 1)` was not provided") +} + +func TestPositionalRequiredRest2Pass(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Rest []string `required:"2"` + } `positional-args:"yes"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{"rest1", "rest2", "rest3"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + if len(opts.Positional.Rest) != 3 { + t.Fatalf("Expected 3 positional rest argument") + } + + assertString(t, opts.Positional.Rest[0], "rest1") + assertString(t, opts.Positional.Rest[1], "rest2") + assertString(t, opts.Positional.Rest[2], "rest3") +} + +func TestPositionalRequiredRestRangeFail(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Rest []string `required:"1-2"` + } `positional-args:"yes"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{"rest1", "rest2", "rest3"}) + + assertError(t, err, ErrRequired, "the required argument `Rest (at most 2 arguments, but got 3)` was not provided") +} + +func TestPositionalRequiredRestRangeEmptyFail(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Positional struct { + Rest []string `required:"0-0"` + } `positional-args:"yes"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{"some", "thing"}) + + assertError(t, err, ErrRequired, "the required argument `Rest (zero arguments)` was not provided") +} diff --git a/vendor/github.com/jessevdk/go-flags/assert_test.go b/vendor/github.com/jessevdk/go-flags/assert_test.go new file mode 100644 index 0000000..8e06636 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/assert_test.go @@ -0,0 +1,177 @@ +package flags + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "runtime" + "testing" +) + +func assertCallerInfo() (string, int) { + ptr := make([]uintptr, 15) + n := runtime.Callers(1, ptr) + + if n == 0 { + return "", 0 + } + + mef := runtime.FuncForPC(ptr[0]) + mefile, meline := mef.FileLine(ptr[0]) + + for i := 2; i < n; i++ { + f := runtime.FuncForPC(ptr[i]) + file, line := f.FileLine(ptr[i]) + + if file != mefile { + return file, line + } + } + + return mefile, meline +} + +func assertErrorf(t *testing.T, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + + file, line := assertCallerInfo() + + t.Errorf("%s:%d: %s", path.Base(file), line, msg) +} + +func assertFatalf(t *testing.T, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + + file, line := assertCallerInfo() + + t.Fatalf("%s:%d: %s", path.Base(file), line, msg) +} + +func assertString(t *testing.T, a string, b string) { + if a != b { + assertErrorf(t, "Expected %#v, but got %#v", b, a) + } +} + +func assertStringArray(t *testing.T, a []string, b []string) { + if len(a) != len(b) { + assertErrorf(t, "Expected %#v, but got %#v", b, a) + return + } + + for i, v := range a { + if b[i] != v { + assertErrorf(t, "Expected %#v, but got %#v", b, a) + return + } + } +} + +func assertBoolArray(t *testing.T, a []bool, b []bool) { + if len(a) != len(b) { + assertErrorf(t, "Expected %#v, but got %#v", b, a) + return + } + + for i, v := range a { + if b[i] != v { + assertErrorf(t, "Expected %#v, but got %#v", b, a) + return + } + } +} + +func assertParserSuccess(t *testing.T, data interface{}, args ...string) (*Parser, []string) { + parser := NewParser(data, Default&^PrintErrors) + ret, err := parser.ParseArgs(args) + + if err != nil { + t.Fatalf("Unexpected parse error: %s", err) + return nil, nil + } + + return parser, ret +} + +func assertParseSuccess(t *testing.T, data interface{}, args ...string) []string { + _, ret := assertParserSuccess(t, data, args...) + return ret +} + +func assertError(t *testing.T, err error, typ ErrorType, msg string) { + if err == nil { + assertFatalf(t, "Expected error: %s", msg) + return + } + + if e, ok := err.(*Error); !ok { + assertFatalf(t, "Expected Error type, but got %#v", err) + } else { + if e.Type != typ { + assertErrorf(t, "Expected error type {%s}, but got {%s}", typ, e.Type) + } + + if e.Message != msg { + assertErrorf(t, "Expected error message %#v, but got %#v", msg, e.Message) + } + } +} + +func assertParseFail(t *testing.T, typ ErrorType, msg string, data interface{}, args ...string) []string { + parser := NewParser(data, Default&^PrintErrors) + ret, err := parser.ParseArgs(args) + + assertError(t, err, typ, msg) + return ret +} + +func diff(a, b string) (string, error) { + atmp, err := ioutil.TempFile("", "help-diff") + + if err != nil { + return "", err + } + + btmp, err := ioutil.TempFile("", "help-diff") + + if err != nil { + return "", err + } + + if _, err := io.WriteString(atmp, a); err != nil { + return "", err + } + + if _, err := io.WriteString(btmp, b); err != nil { + return "", err + } + + ret, err := exec.Command("diff", "-u", "-d", "--label", "got", atmp.Name(), "--label", "expected", btmp.Name()).Output() + + os.Remove(atmp.Name()) + os.Remove(btmp.Name()) + + if err.Error() == "exit status 1" { + return string(ret), nil + } + + return string(ret), err +} + +func assertDiff(t *testing.T, actual, expected, msg string) { + if actual == expected { + return + } + + ret, err := diff(actual, expected) + + if err != nil { + assertErrorf(t, "Unexpected diff error: %s", err) + assertErrorf(t, "Unexpected %s, expected:\n\n%s\n\nbut got\n\n%s", msg, expected, actual) + } else { + assertErrorf(t, "Unexpected %s:\n\n%s", msg, ret) + } +} diff --git a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh new file mode 100755 index 0000000..c494f61 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +echo '# linux arm7' +GOARM=7 GOARCH=arm GOOS=linux go build +echo '# linux arm5' +GOARM=5 GOARCH=arm GOOS=linux go build +echo '# windows 386' +GOARCH=386 GOOS=windows go build +echo '# windows amd64' +GOARCH=amd64 GOOS=windows go build +echo '# darwin' +GOARCH=amd64 GOOS=darwin go build +echo '# freebsd' +GOARCH=amd64 GOOS=freebsd go build diff --git a/vendor/github.com/jessevdk/go-flags/closest.go b/vendor/github.com/jessevdk/go-flags/closest.go new file mode 100644 index 0000000..3b51875 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/closest.go @@ -0,0 +1,59 @@ +package flags + +func levenshtein(s string, t string) int { + if len(s) == 0 { + return len(t) + } + + if len(t) == 0 { + return len(s) + } + + dists := make([][]int, len(s)+1) + for i := range dists { + dists[i] = make([]int, len(t)+1) + dists[i][0] = i + } + + for j := range t { + dists[0][j] = j + } + + for i, sc := range s { + for j, tc := range t { + if sc == tc { + dists[i+1][j+1] = dists[i][j] + } else { + dists[i+1][j+1] = dists[i][j] + 1 + if dists[i+1][j] < dists[i+1][j+1] { + dists[i+1][j+1] = dists[i+1][j] + 1 + } + if dists[i][j+1] < dists[i+1][j+1] { + dists[i+1][j+1] = dists[i][j+1] + 1 + } + } + } + } + + return dists[len(s)][len(t)] +} + +func closestChoice(cmd string, choices []string) (string, int) { + if len(choices) == 0 { + return "", 0 + } + + mincmd := -1 + mindist := -1 + + for i, c := range choices { + l := levenshtein(cmd, c) + + if mincmd < 0 || l < mindist { + mindist = l + mincmd = i + } + } + + return choices[mincmd], mindist +} diff --git a/vendor/github.com/jessevdk/go-flags/command.go b/vendor/github.com/jessevdk/go-flags/command.go new file mode 100644 index 0000000..2662843 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/command.go @@ -0,0 +1,455 @@ +package flags + +import ( + "reflect" + "sort" + "strconv" + "strings" + "unsafe" +) + +// Command represents an application command. Commands can be added to the +// parser (which itself is a command) and are selected/executed when its name +// is specified on the command line. The Command type embeds a Group and +// therefore also carries a set of command specific options. +type Command struct { + // Embedded, see Group for more information + *Group + + // The name by which the command can be invoked + Name string + + // The active sub command (set by parsing) or nil + Active *Command + + // Whether subcommands are optional + SubcommandsOptional bool + + // Aliases for the command + Aliases []string + + // Whether positional arguments are required + ArgsRequired bool + + commands []*Command + hasBuiltinHelpGroup bool + args []*Arg +} + +// Commander is an interface which can be implemented by any command added in +// the options. When implemented, the Execute method will be called for the last +// specified (sub)command providing the remaining command line arguments. +type Commander interface { + // Execute will be called for the last active (sub)command. The + // args argument contains the remaining command line arguments. The + // error that Execute returns will be eventually passed out of the + // Parse method of the Parser. + Execute(args []string) error +} + +// Usage is an interface which can be implemented to show a custom usage string +// in the help message shown for a command. +type Usage interface { + // Usage is called for commands to allow customized printing of command + // usage in the generated help message. + Usage() string +} + +type lookup struct { + shortNames map[string]*Option + longNames map[string]*Option + + commands map[string]*Command +} + +// AddCommand adds a new command to the parser with the given name and data. The +// data needs to be a pointer to a struct from which the fields indicate which +// options are in the command. The provided data can implement the Command and +// Usage interfaces. +func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) { + cmd := newCommand(command, shortDescription, longDescription, data) + + cmd.parent = c + + if err := cmd.scan(); err != nil { + return nil, err + } + + c.commands = append(c.commands, cmd) + return cmd, nil +} + +// AddGroup adds a new group to the command with the given name and data. The +// data needs to be a pointer to a struct from which the fields indicate which +// options are in the group. +func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { + group := newGroup(shortDescription, longDescription, data) + + group.parent = c + + if err := group.scanType(c.scanSubcommandHandler(group)); err != nil { + return nil, err + } + + c.groups = append(c.groups, group) + return group, nil +} + +// Commands returns a list of subcommands of this command. +func (c *Command) Commands() []*Command { + return c.commands +} + +// Find locates the subcommand with the given name and returns it. If no such +// command can be found Find will return nil. +func (c *Command) Find(name string) *Command { + for _, cc := range c.commands { + if cc.match(name) { + return cc + } + } + + return nil +} + +// FindOptionByLongName finds an option that is part of the command, or any of +// its parent commands, by matching its long name (including the option +// namespace). +func (c *Command) FindOptionByLongName(longName string) (option *Option) { + for option == nil && c != nil { + option = c.Group.FindOptionByLongName(longName) + + c, _ = c.parent.(*Command) + } + + return option +} + +// FindOptionByShortName finds an option that is part of the command, or any of +// its parent commands, by matching its long name (including the option +// namespace). +func (c *Command) FindOptionByShortName(shortName rune) (option *Option) { + for option == nil && c != nil { + option = c.Group.FindOptionByShortName(shortName) + + c, _ = c.parent.(*Command) + } + + return option +} + +// Args returns a list of positional arguments associated with this command. +func (c *Command) Args() []*Arg { + ret := make([]*Arg, len(c.args)) + copy(ret, c.args) + + return ret +} + +func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command { + return &Command{ + Group: newGroup(shortDescription, longDescription, data), + Name: name, + } +} + +func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler { + f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) { + mtag := newMultiTag(string(sfield.Tag)) + + if err := mtag.Parse(); err != nil { + return true, err + } + + positional := mtag.Get("positional-args") + + if len(positional) != 0 { + stype := realval.Type() + + for i := 0; i < stype.NumField(); i++ { + field := stype.Field(i) + + m := newMultiTag((string(field.Tag))) + + if err := m.Parse(); err != nil { + return true, err + } + + name := m.Get("positional-arg-name") + + if len(name) == 0 { + name = field.Name + } + + required := -1 + requiredMaximum := -1 + + sreq := m.Get("required") + + if sreq != "" { + required = 1 + + rng := strings.SplitN(sreq, "-", 2) + + if len(rng) > 1 { + if preq, err := strconv.ParseInt(rng[0], 10, 32); err == nil { + required = int(preq) + } + + if preq, err := strconv.ParseInt(rng[1], 10, 32); err == nil { + requiredMaximum = int(preq) + } + } else { + if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil { + required = int(preq) + } + } + } + + arg := &Arg{ + Name: name, + Description: m.Get("description"), + Required: required, + RequiredMaximum: requiredMaximum, + + value: realval.Field(i), + tag: m, + } + + c.args = append(c.args, arg) + + if len(mtag.Get("required")) != 0 { + c.ArgsRequired = true + } + } + + return true, nil + } + + subcommand := mtag.Get("command") + + if len(subcommand) != 0 { + ptrval := reflect.NewAt(realval.Type(), unsafe.Pointer(realval.UnsafeAddr())) + + shortDescription := mtag.Get("description") + longDescription := mtag.Get("long-description") + subcommandsOptional := mtag.Get("subcommands-optional") + aliases := mtag.GetMany("alias") + + subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface()) + if err != nil { + return true, err + } + + subc.Hidden = mtag.Get("hidden") != "" + + if len(subcommandsOptional) > 0 { + subc.SubcommandsOptional = true + } + + if len(aliases) > 0 { + subc.Aliases = aliases + } + + return true, nil + } + + return parentg.scanSubGroupHandler(realval, sfield) + } + + return f +} + +func (c *Command) scan() error { + return c.scanType(c.scanSubcommandHandler(c.Group)) +} + +func (c *Command) eachOption(f func(*Command, *Group, *Option)) { + c.eachCommand(func(c *Command) { + c.eachGroup(func(g *Group) { + for _, option := range g.options { + f(c, g, option) + } + }) + }, true) +} + +func (c *Command) eachCommand(f func(*Command), recurse bool) { + f(c) + + for _, cc := range c.commands { + if recurse { + cc.eachCommand(f, true) + } else { + f(cc) + } + } +} + +func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) { + c.eachGroup(func(g *Group) { + f(c, g) + }) + + if c.Active != nil { + c.Active.eachActiveGroup(f) + } +} + +func (c *Command) addHelpGroups(showHelp func() error) { + if !c.hasBuiltinHelpGroup { + c.addHelpGroup(showHelp) + c.hasBuiltinHelpGroup = true + } + + for _, cc := range c.commands { + cc.addHelpGroups(showHelp) + } +} + +func (c *Command) makeLookup() lookup { + ret := lookup{ + shortNames: make(map[string]*Option), + longNames: make(map[string]*Option), + commands: make(map[string]*Command), + } + + parent := c.parent + + var parents []*Command + + for parent != nil { + if cmd, ok := parent.(*Command); ok { + parents = append(parents, cmd) + parent = cmd.parent + } else { + parent = nil + } + } + + for i := len(parents) - 1; i >= 0; i-- { + parents[i].fillLookup(&ret, true) + } + + c.fillLookup(&ret, false) + return ret +} + +func (c *Command) fillLookup(ret *lookup, onlyOptions bool) { + c.eachGroup(func(g *Group) { + for _, option := range g.options { + if option.ShortName != 0 { + ret.shortNames[string(option.ShortName)] = option + } + + if len(option.LongName) > 0 { + ret.longNames[option.LongNameWithNamespace()] = option + } + } + }) + + if onlyOptions { + return + } + + for _, subcommand := range c.commands { + ret.commands[subcommand.Name] = subcommand + + for _, a := range subcommand.Aliases { + ret.commands[a] = subcommand + } + } +} + +func (c *Command) groupByName(name string) *Group { + if grp := c.Group.groupByName(name); grp != nil { + return grp + } + + for _, subc := range c.commands { + prefix := subc.Name + "." + + if strings.HasPrefix(name, prefix) { + if grp := subc.groupByName(name[len(prefix):]); grp != nil { + return grp + } + } else if name == subc.Name { + return subc.Group + } + } + + return nil +} + +type commandList []*Command + +func (c commandList) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c commandList) Len() int { + return len(c) +} + +func (c commandList) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +func (c *Command) sortedVisibleCommands() []*Command { + ret := commandList(c.visibleCommands()) + sort.Sort(ret) + + return []*Command(ret) +} + +func (c *Command) visibleCommands() []*Command { + ret := make([]*Command, 0, len(c.commands)) + + for _, cmd := range c.commands { + if !cmd.Hidden { + ret = append(ret, cmd) + } + } + + return ret +} + +func (c *Command) match(name string) bool { + if c.Name == name { + return true + } + + for _, v := range c.Aliases { + if v == name { + return true + } + } + + return false +} + +func (c *Command) hasCliOptions() bool { + ret := false + + c.eachGroup(func(g *Group) { + if g.isBuiltinHelp { + return + } + + for _, opt := range g.options { + if opt.canCli() { + ret = true + } + } + }) + + return ret +} + +func (c *Command) fillParseState(s *parseState) { + s.positional = make([]*Arg, len(c.args)) + copy(s.positional, c.args) + + s.lookup = c.makeLookup() + s.command = c +} diff --git a/vendor/github.com/jessevdk/go-flags/command_test.go b/vendor/github.com/jessevdk/go-flags/command_test.go new file mode 100644 index 0000000..dc04b66 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/command_test.go @@ -0,0 +1,582 @@ +package flags + +import ( + "fmt" + "testing" +) + +func TestCommandInline(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + G bool `short:"g"` + } `command:"cmd"` + }{} + + p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g") + + assertStringArray(t, ret, []string{}) + + if p.Active == nil { + t.Errorf("Expected active command") + } + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.Command.G { + t.Errorf("Expected Command.G to be true") + } + + if p.Command.Find("cmd") != p.Active { + t.Errorf("Expected to find command `cmd' to be active") + } +} + +func TestCommandInlineMulti(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + C1 struct { + } `command:"c1"` + + C2 struct { + G bool `short:"g"` + } `command:"c2"` + }{} + + p, ret := assertParserSuccess(t, &opts, "-v", "c2", "-g") + + assertStringArray(t, ret, []string{}) + + if p.Active == nil { + t.Errorf("Expected active command") + } + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.C2.G { + t.Errorf("Expected C2.G to be true") + } + + if p.Command.Find("c1") == nil { + t.Errorf("Expected to find command `c1'") + } + + if c2 := p.Command.Find("c2"); c2 == nil { + t.Errorf("Expected to find command `c2'") + } else if c2 != p.Active { + t.Errorf("Expected to find command `c2' to be active") + } +} + +func TestCommandFlagOrder1(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + G bool `short:"g"` + } `command:"cmd"` + }{} + + assertParseFail(t, ErrUnknownFlag, "unknown flag `g'", &opts, "-v", "-g", "cmd") +} + +func TestCommandFlagOrder2(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + G bool `short:"g"` + } `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "cmd", "-v", "-g") + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.Command.G { + t.Errorf("Expected Command.G to be true") + } +} + +func TestCommandFlagOrderSub(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + G bool `short:"g"` + + SubCommand struct { + B bool `short:"b"` + } `command:"sub"` + } `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "cmd", "sub", "-v", "-g", "-b") + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.Command.G { + t.Errorf("Expected Command.G to be true") + } + + if !opts.Command.SubCommand.B { + t.Errorf("Expected Command.SubCommand.B to be true") + } +} + +func TestCommandFlagOverride1(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + Value bool `short:"v"` + } `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "-v", "cmd") + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if opts.Command.Value { + t.Errorf("Expected Command.Value to be false") + } +} + +func TestCommandFlagOverride2(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + Value bool `short:"v"` + } `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "cmd", "-v") + + if opts.Value { + t.Errorf("Expected Value to be false") + } + + if !opts.Command.Value { + t.Errorf("Expected Command.Value to be true") + } +} + +func TestCommandFlagOverrideSub(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + Value bool `short:"v"` + + SubCommand struct { + Value bool `short:"v"` + } `command:"sub"` + } `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "cmd", "sub", "-v") + + if opts.Value { + t.Errorf("Expected Value to be false") + } + + if opts.Command.Value { + t.Errorf("Expected Command.Value to be false") + } + + if !opts.Command.SubCommand.Value { + t.Errorf("Expected Command.Value to be true") + } +} + +func TestCommandFlagOverrideSub2(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + Value bool `short:"v"` + + SubCommand struct { + G bool `short:"g"` + } `command:"sub"` + } `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "cmd", "sub", "-v") + + if opts.Value { + t.Errorf("Expected Value to be false") + } + + if !opts.Command.Value { + t.Errorf("Expected Command.Value to be true") + } +} + +func TestCommandEstimate(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Cmd1 struct { + } `command:"remove"` + + Cmd2 struct { + } `command:"add"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{}) + + assertError(t, err, ErrCommandRequired, "Please specify one command of: add or remove") +} + +func TestCommandEstimate2(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Cmd1 struct { + } `command:"remove"` + + Cmd2 struct { + } `command:"add"` + }{} + + p := NewParser(&opts, None) + _, err := p.ParseArgs([]string{"rmive"}) + + assertError(t, err, ErrUnknownCommand, "Unknown command `rmive', did you mean `remove'?") +} + +type testCommand struct { + G bool `short:"g"` + Executed bool + EArgs []string +} + +func (c *testCommand) Execute(args []string) error { + c.Executed = true + c.EArgs = args + + return nil +} + +func TestCommandExecute(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command testCommand `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "-v", "cmd", "-g", "a", "b") + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.Command.Executed { + t.Errorf("Did not execute command") + } + + if !opts.Command.G { + t.Errorf("Expected Command.C to be true") + } + + assertStringArray(t, opts.Command.EArgs, []string{"a", "b"}) +} + +func TestCommandClosest(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Cmd1 struct { + } `command:"remove"` + + Cmd2 struct { + } `command:"add"` + }{} + + args := assertParseFail(t, ErrUnknownCommand, "Unknown command `addd', did you mean `add'?", &opts, "-v", "addd") + + assertStringArray(t, args, []string{"addd"}) +} + +func TestCommandAdd(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + }{} + + var cmd = struct { + G bool `short:"g"` + }{} + + p := NewParser(&opts, Default) + c, err := p.AddCommand("cmd", "", "", &cmd) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + ret, err := p.ParseArgs([]string{"-v", "cmd", "-g", "rest"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + assertStringArray(t, ret, []string{"rest"}) + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !cmd.G { + t.Errorf("Expected Command.G to be true") + } + + if p.Command.Find("cmd") != c { + t.Errorf("Expected to find command `cmd'") + } + + if p.Commands()[0] != c { + t.Errorf("Expected command %#v, but got %#v", c, p.Commands()[0]) + } + + if c.Options()[0].ShortName != 'g' { + t.Errorf("Expected short name `g' but got %v", c.Options()[0].ShortName) + } +} + +func TestCommandNestedInline(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command struct { + G bool `short:"g"` + + Nested struct { + N string `long:"n"` + } `command:"nested"` + } `command:"cmd"` + }{} + + p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g", "nested", "--n", "n", "rest") + + assertStringArray(t, ret, []string{"rest"}) + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.Command.G { + t.Errorf("Expected Command.G to be true") + } + + assertString(t, opts.Command.Nested.N, "n") + + if c := p.Command.Find("cmd"); c == nil { + t.Errorf("Expected to find command `cmd'") + } else { + if c != p.Active { + t.Errorf("Expected `cmd' to be the active parser command") + } + + if nested := c.Find("nested"); nested == nil { + t.Errorf("Expected to find command `nested'") + } else if nested != c.Active { + t.Errorf("Expected to find command `nested' to be the active `cmd' command") + } + } +} + +func TestRequiredOnCommand(t *testing.T) { + var opts = struct { + Value bool `short:"v" required:"true"` + + Command struct { + G bool `short:"g"` + } `command:"cmd"` + }{} + + assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts, "cmd") +} + +func TestRequiredAllOnCommand(t *testing.T) { + var opts = struct { + Value bool `short:"v" required:"true"` + Missing bool `long:"missing" required:"true"` + + Command struct { + G bool `short:"g"` + } `command:"cmd"` + }{} + + assertParseFail(t, ErrRequired, fmt.Sprintf("the required flags `%smissing' and `%cv' were not specified", defaultLongOptDelimiter, defaultShortOptDelimiter), &opts, "cmd") +} + +func TestDefaultOnCommand(t *testing.T) { + var opts = struct { + Command struct { + G string `short:"g" default:"value"` + } `command:"cmd"` + }{} + + assertParseSuccess(t, &opts, "cmd") + + if opts.Command.G != "value" { + t.Errorf("Expected G to be \"value\"") + } +} + +func TestAfterNonCommand(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Cmd1 struct { + } `command:"remove"` + + Cmd2 struct { + } `command:"add"` + }{} + + assertParseFail(t, ErrUnknownCommand, "Unknown command `nocmd'. Please specify one command of: add or remove", &opts, "nocmd", "remove") +} + +func TestSubcommandsOptional(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Cmd1 struct { + } `command:"remove"` + + Cmd2 struct { + } `command:"add"` + }{} + + p := NewParser(&opts, None) + p.SubcommandsOptional = true + + _, err := p.ParseArgs([]string{"-v"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + if !opts.Value { + t.Errorf("Expected Value to be true") + } +} + +func TestSubcommandsOptionalAfterNonCommand(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Cmd1 struct { + } `command:"remove"` + + Cmd2 struct { + } `command:"add"` + }{} + + p := NewParser(&opts, None) + p.SubcommandsOptional = true + + retargs, err := p.ParseArgs([]string{"nocmd", "remove"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + assertStringArray(t, retargs, []string{"nocmd", "remove"}) +} + +func TestCommandAlias(t *testing.T) { + var opts = struct { + Command struct { + G string `short:"g" default:"value"` + } `command:"cmd" alias:"cm"` + }{} + + assertParseSuccess(t, &opts, "cm") + + if opts.Command.G != "value" { + t.Errorf("Expected G to be \"value\"") + } +} + +func TestSubCommandFindOptionByLongFlag(t *testing.T) { + var opts struct { + Testing bool `long:"testing" description:"Testing"` + } + + var cmd struct { + Other bool `long:"other" description:"Other"` + } + + p := NewParser(&opts, Default) + c, _ := p.AddCommand("command", "Short", "Long", &cmd) + + opt := c.FindOptionByLongName("other") + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + assertString(t, opt.LongName, "other") + + opt = c.FindOptionByLongName("testing") + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + assertString(t, opt.LongName, "testing") +} + +func TestSubCommandFindOptionByShortFlag(t *testing.T) { + var opts struct { + Testing bool `short:"t" description:"Testing"` + } + + var cmd struct { + Other bool `short:"o" description:"Other"` + } + + p := NewParser(&opts, Default) + c, _ := p.AddCommand("command", "Short", "Long", &cmd) + + opt := c.FindOptionByShortName('o') + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + if opt.ShortName != 'o' { + t.Errorf("Expected 'o', but got %v", opt.ShortName) + } + + opt = c.FindOptionByShortName('t') + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + if opt.ShortName != 't' { + t.Errorf("Expected 'o', but got %v", opt.ShortName) + } +} diff --git a/vendor/github.com/jessevdk/go-flags/completion.go b/vendor/github.com/jessevdk/go-flags/completion.go new file mode 100644 index 0000000..7a7a08b --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/completion.go @@ -0,0 +1,309 @@ +package flags + +import ( + "fmt" + "path/filepath" + "reflect" + "sort" + "strings" + "unicode/utf8" +) + +// Completion is a type containing information of a completion. +type Completion struct { + // The completed item + Item string + + // A description of the completed item (optional) + Description string +} + +type completions []Completion + +func (c completions) Len() int { + return len(c) +} + +func (c completions) Less(i, j int) bool { + return c[i].Item < c[j].Item +} + +func (c completions) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// Completer is an interface which can be implemented by types +// to provide custom command line argument completion. +type Completer interface { + // Complete receives a prefix representing a (partial) value + // for its type and should provide a list of possible valid + // completions. + Complete(match string) []Completion +} + +type completion struct { + parser *Parser +} + +// Filename is a string alias which provides filename completion. +type Filename string + +func completionsWithoutDescriptions(items []string) []Completion { + ret := make([]Completion, len(items)) + + for i, v := range items { + ret[i].Item = v + } + + return ret +} + +// Complete returns a list of existing files with the given +// prefix. +func (f *Filename) Complete(match string) []Completion { + ret, _ := filepath.Glob(match + "*") + return completionsWithoutDescriptions(ret) +} + +func (c *completion) skipPositional(s *parseState, n int) { + if n >= len(s.positional) { + s.positional = nil + } else { + s.positional = s.positional[n:] + } +} + +func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion { + if short && len(match) != 0 { + return []Completion{ + Completion{ + Item: prefix + match, + }, + } + } + + var results []Completion + repeats := map[string]bool{} + + for name, opt := range s.lookup.longNames { + if strings.HasPrefix(name, match) && !opt.Hidden { + results = append(results, Completion{ + Item: defaultLongOptDelimiter + name, + Description: opt.Description, + }) + + if short { + repeats[string(opt.ShortName)] = true + } + } + } + + if short { + for name, opt := range s.lookup.shortNames { + if _, exist := repeats[name]; !exist && strings.HasPrefix(name, match) && !opt.Hidden { + results = append(results, Completion{ + Item: string(defaultShortOptDelimiter) + name, + Description: opt.Description, + }) + } + } + } + + return results +} + +func (c *completion) completeNamesForLongPrefix(s *parseState, prefix string, match string) []Completion { + return c.completeOptionNames(s, prefix, match, false) +} + +func (c *completion) completeNamesForShortPrefix(s *parseState, prefix string, match string) []Completion { + return c.completeOptionNames(s, prefix, match, true) +} + +func (c *completion) completeCommands(s *parseState, match string) []Completion { + n := make([]Completion, 0, len(s.command.commands)) + + for _, cmd := range s.command.commands { + if cmd.data != c && strings.HasPrefix(cmd.Name, match) { + n = append(n, Completion{ + Item: cmd.Name, + Description: cmd.ShortDescription, + }) + } + } + + return n +} + +func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion { + if value.Kind() == reflect.Slice { + value = reflect.New(value.Type().Elem()) + } + i := value.Interface() + + var ret []Completion + + if cmp, ok := i.(Completer); ok { + ret = cmp.Complete(match) + } else if value.CanAddr() { + if cmp, ok = value.Addr().Interface().(Completer); ok { + ret = cmp.Complete(match) + } + } + + for i, v := range ret { + ret[i].Item = prefix + v.Item + } + + return ret +} + +func (c *completion) complete(args []string) []Completion { + if len(args) == 0 { + args = []string{""} + } + + s := &parseState{ + args: args, + } + + c.parser.fillParseState(s) + + var opt *Option + + for len(s.args) > 1 { + arg := s.pop() + + if (c.parser.Options&PassDoubleDash) != None && arg == "--" { + opt = nil + c.skipPositional(s, len(s.args)-1) + + break + } + + if argumentIsOption(arg) { + prefix, optname, islong := stripOptionPrefix(arg) + optname, _, argument := splitOption(prefix, optname, islong) + + if argument == nil { + var o *Option + canarg := true + + if islong { + o = s.lookup.longNames[optname] + } else { + for i, r := range optname { + sname := string(r) + o = s.lookup.shortNames[sname] + + if o == nil { + break + } + + if i == 0 && o.canArgument() && len(optname) != len(sname) { + canarg = false + break + } + } + } + + if o == nil && (c.parser.Options&PassAfterNonOption) != None { + opt = nil + c.skipPositional(s, len(s.args)-1) + + break + } else if o != nil && o.canArgument() && !o.OptionalArgument && canarg { + if len(s.args) > 1 { + s.pop() + } else { + opt = o + } + } + } + } else { + if len(s.positional) > 0 { + if !s.positional[0].isRemaining() { + // Don't advance beyond a remaining positional arg (because + // it consumes all subsequent args). + s.positional = s.positional[1:] + } + } else if cmd, ok := s.lookup.commands[arg]; ok { + cmd.fillParseState(s) + } + + opt = nil + } + } + + lastarg := s.args[len(s.args)-1] + var ret []Completion + + if opt != nil { + // Completion for the argument of 'opt' + ret = c.completeValue(opt.value, "", lastarg) + } else if argumentStartsOption(lastarg) { + // Complete the option + prefix, optname, islong := stripOptionPrefix(lastarg) + optname, split, argument := splitOption(prefix, optname, islong) + + if argument == nil && !islong { + rname, n := utf8.DecodeRuneInString(optname) + sname := string(rname) + + if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() { + ret = c.completeValue(opt.value, prefix+sname, optname[n:]) + } else { + ret = c.completeNamesForShortPrefix(s, prefix, optname) + } + } else if argument != nil { + if islong { + opt = s.lookup.longNames[optname] + } else { + opt = s.lookup.shortNames[optname] + } + + if opt != nil { + ret = c.completeValue(opt.value, prefix+optname+split, *argument) + } + } else if islong { + ret = c.completeNamesForLongPrefix(s, prefix, optname) + } else { + ret = c.completeNamesForShortPrefix(s, prefix, optname) + } + } else if len(s.positional) > 0 { + // Complete for positional argument + ret = c.completeValue(s.positional[0].value, "", lastarg) + } else if len(s.command.commands) > 0 { + // Complete for command + ret = c.completeCommands(s, lastarg) + } + + sort.Sort(completions(ret)) + return ret +} + +func (c *completion) print(items []Completion, showDescriptions bool) { + if showDescriptions && len(items) > 1 { + maxl := 0 + + for _, v := range items { + if len(v.Item) > maxl { + maxl = len(v.Item) + } + } + + for _, v := range items { + fmt.Printf("%s", v.Item) + + if len(v.Description) > 0 { + fmt.Printf("%s # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description) + } + + fmt.Printf("\n") + } + } else { + for _, v := range items { + fmt.Println(v.Item) + } + } +} diff --git a/vendor/github.com/jessevdk/go-flags/completion_test.go b/vendor/github.com/jessevdk/go-flags/completion_test.go new file mode 100644 index 0000000..26f70e4 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/completion_test.go @@ -0,0 +1,315 @@ +package flags + +import ( + "bytes" + "io" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" +) + +type TestComplete struct { +} + +func (t *TestComplete) Complete(match string) []Completion { + options := []string{ + "hello world", + "hello universe", + "hello multiverse", + } + + ret := make([]Completion, 0, len(options)) + + for _, o := range options { + if strings.HasPrefix(o, match) { + ret = append(ret, Completion{ + Item: o, + }) + } + } + + return ret +} + +var completionTestOptions struct { + Verbose bool `short:"v" long:"verbose" description:"Verbose messages"` + Debug bool `short:"d" long:"debug" description:"Enable debug"` + Info bool `short:"i" description:"Display info"` + Version bool `long:"version" description:"Show version"` + Required bool `long:"required" required:"true" description:"This is required"` + Hidden bool `long:"hidden" hidden:"true" description:"This is hidden"` + + AddCommand struct { + Positional struct { + Filename Filename + } `positional-args:"yes"` + } `command:"add" description:"add an item"` + + AddMultiCommand struct { + Positional struct { + Filename []Filename + } `positional-args:"yes"` + Extra []Filename `short:"f"` + } `command:"add-multi" description:"add multiple items"` + + AddMultiCommandFlag struct { + Files []Filename `short:"f"` + } `command:"add-multi-flag" description:"add multiple items via flags"` + + RemoveCommand struct { + Other bool `short:"o"` + File Filename `short:"f" long:"filename"` + } `command:"rm" description:"remove an item"` + + RenameCommand struct { + Completed TestComplete `short:"c" long:"completed"` + } `command:"rename" description:"rename an item"` +} + +type completionTest struct { + Args []string + Completed []string + ShowDescriptions bool +} + +var completionTests []completionTest + +func init() { + _, sourcefile, _, _ := runtime.Caller(0) + completionTestSourcedir := filepath.Join(filepath.SplitList(path.Dir(sourcefile))...) + + completionTestFilename := []string{filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion_test.go")} + + completionTests = []completionTest{ + { + // Short names + []string{"-"}, + []string{"--debug", "--required", "--verbose", "--version", "-i"}, + false, + }, + + { + // Short names full + []string{"-i"}, + []string{"-i"}, + false, + }, + + { + // Short names concatenated + []string{"-dv"}, + []string{"-dv"}, + false, + }, + + { + // Long names + []string{"--"}, + []string{"--debug", "--required", "--verbose", "--version"}, + false, + }, + + { + // Long names with descriptions + []string{"--"}, + []string{ + "--debug # Enable debug", + "--required # This is required", + "--verbose # Verbose messages", + "--version # Show version", + }, + true, + }, + + { + // Long names partial + []string{"--ver"}, + []string{"--verbose", "--version"}, + false, + }, + + { + // Commands + []string{""}, + []string{"add", "add-multi", "add-multi-flag", "rename", "rm"}, + false, + }, + + { + // Commands with descriptions + []string{""}, + []string{ + "add # add an item", + "add-multi # add multiple items", + "add-multi-flag # add multiple items via flags", + "rename # rename an item", + "rm # remove an item", + }, + true, + }, + + { + // Commands partial + []string{"r"}, + []string{"rename", "rm"}, + false, + }, + + { + // Positional filename + []string{"add", filepath.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + + { + // Multiple positional filename (1 arg) + []string{"add-multi", filepath.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + { + // Multiple positional filename (2 args) + []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + { + // Multiple positional filename (3 args) + []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + + { + // Flag filename + []string{"rm", "-f", path.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + + { + // Flag short concat last filename + []string{"rm", "-of", path.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + + { + // Flag concat filename + []string{"rm", "-f" + path.Join(completionTestSourcedir, "completion")}, + []string{"-f" + completionTestFilename[0], "-f" + completionTestFilename[1]}, + false, + }, + + { + // Flag equal concat filename + []string{"rm", "-f=" + path.Join(completionTestSourcedir, "completion")}, + []string{"-f=" + completionTestFilename[0], "-f=" + completionTestFilename[1]}, + false, + }, + + { + // Flag concat long filename + []string{"rm", "--filename=" + path.Join(completionTestSourcedir, "completion")}, + []string{"--filename=" + completionTestFilename[0], "--filename=" + completionTestFilename[1]}, + false, + }, + + { + // Flag long filename + []string{"rm", "--filename", path.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + + { + // Custom completed + []string{"rename", "-c", "hello un"}, + []string{"hello universe"}, + false, + }, + { + // Multiple flag filename + []string{"add-multi-flag", "-f", filepath.Join(completionTestSourcedir, "completion")}, + completionTestFilename, + false, + }, + } +} + +func TestCompletion(t *testing.T) { + p := NewParser(&completionTestOptions, Default) + c := &completion{parser: p} + + for _, test := range completionTests { + if test.ShowDescriptions { + continue + } + + ret := c.complete(test.Args) + items := make([]string, len(ret)) + + for i, v := range ret { + items[i] = v.Item + } + + if !reflect.DeepEqual(items, test.Completed) { + t.Errorf("Args: %#v, %#v\n Expected: %#v\n Got: %#v", test.Args, test.ShowDescriptions, test.Completed, items) + } + } +} + +func TestParserCompletion(t *testing.T) { + for _, test := range completionTests { + if test.ShowDescriptions { + os.Setenv("GO_FLAGS_COMPLETION", "verbose") + } else { + os.Setenv("GO_FLAGS_COMPLETION", "1") + } + + tmp := os.Stdout + + r, w, _ := os.Pipe() + os.Stdout = w + + out := make(chan string) + + go func() { + var buf bytes.Buffer + + io.Copy(&buf, r) + + out <- buf.String() + }() + + p := NewParser(&completionTestOptions, None) + + p.CompletionHandler = func(items []Completion) { + comp := &completion{parser: p} + comp.print(items, test.ShowDescriptions) + } + + _, err := p.ParseArgs(test.Args) + + w.Close() + + os.Stdout = tmp + + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + got := strings.Split(strings.Trim(<-out, "\n"), "\n") + + if !reflect.DeepEqual(got, test.Completed) { + t.Errorf("Expected: %#v\nGot: %#v", test.Completed, got) + } + } + + os.Setenv("GO_FLAGS_COMPLETION", "") +} diff --git a/vendor/github.com/jessevdk/go-flags/convert.go b/vendor/github.com/jessevdk/go-flags/convert.go new file mode 100644 index 0000000..984aac8 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/convert.go @@ -0,0 +1,348 @@ +// Copyright 2012 Jesse van den Kieboom. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Marshaler is the interface implemented by types that can marshal themselves +// to a string representation of the flag. +type Marshaler interface { + // MarshalFlag marshals a flag value to its string representation. + MarshalFlag() (string, error) +} + +// Unmarshaler is the interface implemented by types that can unmarshal a flag +// argument to themselves. The provided value is directly passed from the +// command line. +type Unmarshaler interface { + // UnmarshalFlag unmarshals a string value representation to the flag + // value (which therefore needs to be a pointer receiver). + UnmarshalFlag(value string) error +} + +func getBase(options multiTag, base int) (int, error) { + sbase := options.Get("base") + + var err error + var ivbase int64 + + if sbase != "" { + ivbase, err = strconv.ParseInt(sbase, 10, 32) + base = int(ivbase) + } + + return base, err +} + +func convertMarshal(val reflect.Value) (bool, string, error) { + // Check first for the Marshaler interface + if val.Type().NumMethod() > 0 && val.CanInterface() { + if marshaler, ok := val.Interface().(Marshaler); ok { + ret, err := marshaler.MarshalFlag() + return true, ret, err + } + } + + return false, "", nil +} + +func convertToString(val reflect.Value, options multiTag) (string, error) { + if ok, ret, err := convertMarshal(val); ok { + return ret, err + } + + tp := val.Type() + + // Support for time.Duration + if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { + stringer := val.Interface().(fmt.Stringer) + return stringer.String(), nil + } + + switch tp.Kind() { + case reflect.String: + return val.String(), nil + case reflect.Bool: + if val.Bool() { + return "true", nil + } + + return "false", nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + base, err := getBase(options, 10) + + if err != nil { + return "", err + } + + return strconv.FormatInt(val.Int(), base), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + base, err := getBase(options, 10) + + if err != nil { + return "", err + } + + return strconv.FormatUint(val.Uint(), base), nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil + case reflect.Slice: + if val.Len() == 0 { + return "", nil + } + + ret := "[" + + for i := 0; i < val.Len(); i++ { + if i != 0 { + ret += ", " + } + + item, err := convertToString(val.Index(i), options) + + if err != nil { + return "", err + } + + ret += item + } + + return ret + "]", nil + case reflect.Map: + ret := "{" + + for i, key := range val.MapKeys() { + if i != 0 { + ret += ", " + } + + keyitem, err := convertToString(key, options) + + if err != nil { + return "", err + } + + item, err := convertToString(val.MapIndex(key), options) + + if err != nil { + return "", err + } + + ret += keyitem + ":" + item + } + + return ret + "}", nil + case reflect.Ptr: + return convertToString(reflect.Indirect(val), options) + case reflect.Interface: + if !val.IsNil() { + return convertToString(val.Elem(), options) + } + } + + return "", nil +} + +func convertUnmarshal(val string, retval reflect.Value) (bool, error) { + if retval.Type().NumMethod() > 0 && retval.CanInterface() { + if unmarshaler, ok := retval.Interface().(Unmarshaler); ok { + if retval.IsNil() { + retval.Set(reflect.New(retval.Type().Elem())) + + // Re-assign from the new value + unmarshaler = retval.Interface().(Unmarshaler) + } + + return true, unmarshaler.UnmarshalFlag(val) + } + } + + if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() { + return convertUnmarshal(val, retval.Addr()) + } + + if retval.Type().Kind() == reflect.Interface && !retval.IsNil() { + return convertUnmarshal(val, retval.Elem()) + } + + return false, nil +} + +func convert(val string, retval reflect.Value, options multiTag) error { + if ok, err := convertUnmarshal(val, retval); ok { + return err + } + + tp := retval.Type() + + // Support for time.Duration + if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { + parsed, err := time.ParseDuration(val) + + if err != nil { + return err + } + + retval.SetInt(int64(parsed)) + return nil + } + + switch tp.Kind() { + case reflect.String: + retval.SetString(val) + case reflect.Bool: + if val == "" { + retval.SetBool(true) + } else { + b, err := strconv.ParseBool(val) + + if err != nil { + return err + } + + retval.SetBool(b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + base, err := getBase(options, 10) + + if err != nil { + return err + } + + parsed, err := strconv.ParseInt(val, base, tp.Bits()) + + if err != nil { + return err + } + + retval.SetInt(parsed) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + base, err := getBase(options, 10) + + if err != nil { + return err + } + + parsed, err := strconv.ParseUint(val, base, tp.Bits()) + + if err != nil { + return err + } + + retval.SetUint(parsed) + case reflect.Float32, reflect.Float64: + parsed, err := strconv.ParseFloat(val, tp.Bits()) + + if err != nil { + return err + } + + retval.SetFloat(parsed) + case reflect.Slice: + elemtp := tp.Elem() + + elemvalptr := reflect.New(elemtp) + elemval := reflect.Indirect(elemvalptr) + + if err := convert(val, elemval, options); err != nil { + return err + } + + retval.Set(reflect.Append(retval, elemval)) + case reflect.Map: + parts := strings.SplitN(val, ":", 2) + + key := parts[0] + var value string + + if len(parts) == 2 { + value = parts[1] + } + + keytp := tp.Key() + keyval := reflect.New(keytp) + + if err := convert(key, keyval, options); err != nil { + return err + } + + valuetp := tp.Elem() + valueval := reflect.New(valuetp) + + if err := convert(value, valueval, options); err != nil { + return err + } + + if retval.IsNil() { + retval.Set(reflect.MakeMap(tp)) + } + + retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval)) + case reflect.Ptr: + if retval.IsNil() { + retval.Set(reflect.New(retval.Type().Elem())) + } + + return convert(val, reflect.Indirect(retval), options) + case reflect.Interface: + if !retval.IsNil() { + return convert(val, retval.Elem(), options) + } + } + + return nil +} + +func isPrint(s string) bool { + for _, c := range s { + if !strconv.IsPrint(c) { + return false + } + } + + return true +} + +func quoteIfNeeded(s string) string { + if !isPrint(s) { + return strconv.Quote(s) + } + + return s +} + +func quoteIfNeededV(s []string) []string { + ret := make([]string, len(s)) + + for i, v := range s { + ret[i] = quoteIfNeeded(v) + } + + return ret +} + +func quoteV(s []string) []string { + ret := make([]string, len(s)) + + for i, v := range s { + ret[i] = strconv.Quote(v) + } + + return ret +} + +func unquoteIfPossible(s string) (string, error) { + if len(s) == 0 || s[0] != '"' { + return s, nil + } + + return strconv.Unquote(s) +} diff --git a/vendor/github.com/jessevdk/go-flags/convert_test.go b/vendor/github.com/jessevdk/go-flags/convert_test.go new file mode 100644 index 0000000..ef131dc --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/convert_test.go @@ -0,0 +1,159 @@ +package flags + +import ( + "testing" + "time" +) + +func expectConvert(t *testing.T, o *Option, expected string) { + s, err := convertToString(o.value, o.tag) + + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + assertString(t, s, expected) +} + +func TestConvertToString(t *testing.T) { + d, _ := time.ParseDuration("1h2m4s") + + var opts = struct { + String string `long:"string"` + + Int int `long:"int"` + Int8 int8 `long:"int8"` + Int16 int16 `long:"int16"` + Int32 int32 `long:"int32"` + Int64 int64 `long:"int64"` + + Uint uint `long:"uint"` + Uint8 uint8 `long:"uint8"` + Uint16 uint16 `long:"uint16"` + Uint32 uint32 `long:"uint32"` + Uint64 uint64 `long:"uint64"` + + Float32 float32 `long:"float32"` + Float64 float64 `long:"float64"` + + Duration time.Duration `long:"duration"` + + Bool bool `long:"bool"` + + IntSlice []int `long:"int-slice"` + IntFloatMap map[int]float64 `long:"int-float-map"` + + PtrBool *bool `long:"ptr-bool"` + Interface interface{} `long:"interface"` + + Int32Base int32 `long:"int32-base" base:"16"` + Uint32Base uint32 `long:"uint32-base" base:"16"` + }{ + "string", + + -2, + -1, + 0, + 1, + 2, + + 1, + 2, + 3, + 4, + 5, + + 1.2, + -3.4, + + d, + true, + + []int{-3, 4, -2}, + map[int]float64{-2: 4.5}, + + new(bool), + float32(5.2), + + -5823, + 4232, + } + + p := NewNamedParser("test", Default) + grp, _ := p.AddGroup("test group", "", &opts) + + expects := []string{ + "string", + "-2", + "-1", + "0", + "1", + "2", + + "1", + "2", + "3", + "4", + "5", + + "1.2", + "-3.4", + + "1h2m4s", + "true", + + "[-3, 4, -2]", + "{-2:4.5}", + + "false", + "5.2", + + "-16bf", + "1088", + } + + for i, v := range grp.Options() { + expectConvert(t, v, expects[i]) + } +} + +func TestConvertToStringInvalidIntBase(t *testing.T) { + var opts = struct { + Int int `long:"int" base:"no"` + }{ + 2, + } + + p := NewNamedParser("test", Default) + grp, _ := p.AddGroup("test group", "", &opts) + o := grp.Options()[0] + + _, err := convertToString(o.value, o.tag) + + if err != nil { + err = newErrorf(ErrMarshal, "%v", err) + } + + assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax") +} + +func TestConvertToStringInvalidUintBase(t *testing.T) { + var opts = struct { + Uint uint `long:"uint" base:"no"` + }{ + 2, + } + + p := NewNamedParser("test", Default) + grp, _ := p.AddGroup("test group", "", &opts) + o := grp.Options()[0] + + _, err := convertToString(o.value, o.tag) + + if err != nil { + err = newErrorf(ErrMarshal, "%v", err) + } + + assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax") +} diff --git a/vendor/github.com/jessevdk/go-flags/error.go b/vendor/github.com/jessevdk/go-flags/error.go new file mode 100644 index 0000000..05528d8 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/error.go @@ -0,0 +1,134 @@ +package flags + +import ( + "fmt" +) + +// ErrorType represents the type of error. +type ErrorType uint + +const ( + // ErrUnknown indicates a generic error. + ErrUnknown ErrorType = iota + + // ErrExpectedArgument indicates that an argument was expected. + ErrExpectedArgument + + // ErrUnknownFlag indicates an unknown flag. + ErrUnknownFlag + + // ErrUnknownGroup indicates an unknown group. + ErrUnknownGroup + + // ErrMarshal indicates a marshalling error while converting values. + ErrMarshal + + // ErrHelp indicates that the built-in help was shown (the error + // contains the help message). + ErrHelp + + // ErrNoArgumentForBool indicates that an argument was given for a + // boolean flag (which don't not take any arguments). + ErrNoArgumentForBool + + // ErrRequired indicates that a required flag was not provided. + ErrRequired + + // ErrShortNameTooLong indicates that a short flag name was specified, + // longer than one character. + ErrShortNameTooLong + + // ErrDuplicatedFlag indicates that a short or long flag has been + // defined more than once + ErrDuplicatedFlag + + // ErrTag indicates an error while parsing flag tags. + ErrTag + + // ErrCommandRequired indicates that a command was required but not + // specified + ErrCommandRequired + + // ErrUnknownCommand indicates that an unknown command was specified. + ErrUnknownCommand + + // ErrInvalidChoice indicates an invalid option value which only allows + // a certain number of choices. + ErrInvalidChoice + + // ErrInvalidTag indicates an invalid tag or invalid use of an existing tag + ErrInvalidTag +) + +func (e ErrorType) String() string { + switch e { + case ErrUnknown: + return "unknown" + case ErrExpectedArgument: + return "expected argument" + case ErrUnknownFlag: + return "unknown flag" + case ErrUnknownGroup: + return "unknown group" + case ErrMarshal: + return "marshal" + case ErrHelp: + return "help" + case ErrNoArgumentForBool: + return "no argument for bool" + case ErrRequired: + return "required" + case ErrShortNameTooLong: + return "short name too long" + case ErrDuplicatedFlag: + return "duplicated flag" + case ErrTag: + return "tag" + case ErrCommandRequired: + return "command required" + case ErrUnknownCommand: + return "unknown command" + case ErrInvalidChoice: + return "invalid choice" + case ErrInvalidTag: + return "invalid tag" + } + + return "unrecognized error type" +} + +// Error represents a parser error. The error returned from Parse is of this +// type. The error contains both a Type and Message. +type Error struct { + // The type of error + Type ErrorType + + // The error message + Message string +} + +// Error returns the error's message +func (e *Error) Error() string { + return e.Message +} + +func newError(tp ErrorType, message string) *Error { + return &Error{ + Type: tp, + Message: message, + } +} + +func newErrorf(tp ErrorType, format string, args ...interface{}) *Error { + return newError(tp, fmt.Sprintf(format, args...)) +} + +func wrapError(err error) *Error { + ret, ok := err.(*Error) + + if !ok { + return newError(ErrUnknown, err.Error()) + } + + return ret +} diff --git a/vendor/github.com/jessevdk/go-flags/example_test.go b/vendor/github.com/jessevdk/go-flags/example_test.go new file mode 100644 index 0000000..4321ed8 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/example_test.go @@ -0,0 +1,110 @@ +// Example of use of the flags package. +package flags + +import ( + "fmt" + "os/exec" +) + +func Example() { + var opts struct { + // Slice of bool will append 'true' each time the option + // is encountered (can be set multiple times, like -vvv) + Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` + + // Example of automatic marshalling to desired type (uint) + Offset uint `long:"offset" description:"Offset"` + + // Example of a callback, called each time the option is found. + Call func(string) `short:"c" description:"Call phone number"` + + // Example of a required flag + Name string `short:"n" long:"name" description:"A name" required:"true"` + + // Example of a value name + File string `short:"f" long:"file" description:"A file" value-name:"FILE"` + + // Example of a pointer + Ptr *int `short:"p" description:"A pointer to an integer"` + + // Example of a slice of strings + StringSlice []string `short:"s" description:"A slice of strings"` + + // Example of a slice of pointers + PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` + + // Example of a map + IntMap map[string]int `long:"intmap" description:"A map from string to int"` + + // Example of a filename (useful for completion) + Filename Filename `long:"filename" description:"A filename"` + + // Example of positional arguments + Args struct { + ID string + Num int + Rest []string + } `positional-args:"yes" required:"yes"` + } + + // Callback which will invoke callto: to call a number. + // Note that this works just on OS X (and probably only with + // Skype) but it shows the idea. + opts.Call = func(num string) { + cmd := exec.Command("open", "callto:"+num) + cmd.Start() + cmd.Process.Release() + } + + // Make some fake arguments to parse. + args := []string{ + "-vv", + "--offset=5", + "-n", "Me", + "-p", "3", + "-s", "hello", + "-s", "world", + "--ptrslice", "hello", + "--ptrslice", "world", + "--intmap", "a:1", + "--intmap", "b:5", + "--filename", "hello.go", + "id", + "10", + "remaining1", + "remaining2", + } + + // Parse flags from `args'. Note that here we use flags.ParseArgs for + // the sake of making a working example. Normally, you would simply use + // flags.Parse(&opts) which uses os.Args + _, err := ParseArgs(&opts, args) + + if err != nil { + panic(err) + } + + fmt.Printf("Verbosity: %v\n", opts.Verbose) + fmt.Printf("Offset: %d\n", opts.Offset) + fmt.Printf("Name: %s\n", opts.Name) + fmt.Printf("Ptr: %d\n", *opts.Ptr) + fmt.Printf("StringSlice: %v\n", opts.StringSlice) + fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1]) + fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"]) + fmt.Printf("Filename: %v\n", opts.Filename) + fmt.Printf("Args.ID: %s\n", opts.Args.ID) + fmt.Printf("Args.Num: %d\n", opts.Args.Num) + fmt.Printf("Args.Rest: %v\n", opts.Args.Rest) + + // Output: Verbosity: [true true] + // Offset: 5 + // Name: Me + // Ptr: 3 + // StringSlice: [hello world] + // PtrSlice: [hello world] + // IntMap: [a:1 b:5] + // Filename: hello.go + // Args.ID: id + // Args.Num: 10 + // Args.Rest: [remaining1 remaining2] +} diff --git a/vendor/github.com/jessevdk/go-flags/examples/add.go b/vendor/github.com/jessevdk/go-flags/examples/add.go new file mode 100644 index 0000000..57d8f23 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/examples/add.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" +) + +type AddCommand struct { + All bool `short:"a" long:"all" description:"Add all files"` +} + +var addCommand AddCommand + +func (x *AddCommand) Execute(args []string) error { + fmt.Printf("Adding (all=%v): %#v\n", x.All, args) + return nil +} + +func init() { + parser.AddCommand("add", + "Add a file", + "The add command adds a file to the repository. Use -a to add all files.", + &addCommand) +} diff --git a/vendor/github.com/jessevdk/go-flags/examples/bash-completion b/vendor/github.com/jessevdk/go-flags/examples/bash-completion new file mode 100644 index 0000000..974f52a --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/examples/bash-completion @@ -0,0 +1,9 @@ +_examples() { + args=("${COMP_WORDS[@]:1:$COMP_CWORD}") + + local IFS=$'\n' + COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) + return 1 +} + +complete -F _examples examples diff --git a/vendor/github.com/jessevdk/go-flags/examples/main.go b/vendor/github.com/jessevdk/go-flags/examples/main.go new file mode 100644 index 0000000..632c331 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/examples/main.go @@ -0,0 +1,79 @@ +package main + +import ( + "errors" + "fmt" + "github.com/jessevdk/go-flags" + "os" + "strconv" + "strings" +) + +type EditorOptions struct { + Input flags.Filename `short:"i" long:"input" description:"Input file" default:"-"` + Output flags.Filename `short:"o" long:"output" description:"Output file" default:"-"` +} + +type Point struct { + X, Y int +} + +func (p *Point) UnmarshalFlag(value string) error { + parts := strings.Split(value, ",") + + if len(parts) != 2 { + return errors.New("expected two numbers separated by a ,") + } + + x, err := strconv.ParseInt(parts[0], 10, 32) + + if err != nil { + return err + } + + y, err := strconv.ParseInt(parts[1], 10, 32) + + if err != nil { + return err + } + + p.X = int(x) + p.Y = int(y) + + return nil +} + +func (p Point) MarshalFlag() (string, error) { + return fmt.Sprintf("%d,%d", p.X, p.Y), nil +} + +type Options struct { + // Example of verbosity with level + Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` + + // Example of optional value + User string `short:"u" long:"user" description:"User name" optional:"yes" optional-value:"pancake"` + + // Example of map with multiple default values + Users map[string]string `long:"users" description:"User e-mail map" default:"system:system@example.org" default:"admin:admin@example.org"` + + // Example of option group + Editor EditorOptions `group:"Editor Options"` + + // Example of custom type Marshal/Unmarshal + Point Point `long:"point" description:"A x,y point" default:"1,2"` +} + +var options Options + +var parser = flags.NewParser(&options, flags.Default) + +func main() { + if _, err := parser.Parse(); err != nil { + if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp { + os.Exit(0) + } else { + os.Exit(1) + } + } +} diff --git a/vendor/github.com/jessevdk/go-flags/examples/rm.go b/vendor/github.com/jessevdk/go-flags/examples/rm.go new file mode 100644 index 0000000..c9c1dd0 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/examples/rm.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" +) + +type RmCommand struct { + Force bool `short:"f" long:"force" description:"Force removal of files"` +} + +var rmCommand RmCommand + +func (x *RmCommand) Execute(args []string) error { + fmt.Printf("Removing (force=%v): %#v\n", x.Force, args) + return nil +} + +func init() { + parser.AddCommand("rm", + "Remove a file", + "The rm command removes a file to the repository. Use -f to force removal of files.", + &rmCommand) +} diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go new file mode 100644 index 0000000..889762d --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/flags.go @@ -0,0 +1,258 @@ +// Copyright 2012 Jesse van den Kieboom. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package flags provides an extensive command line option parser. +The flags package is similar in functionality to the go built-in flag package +but provides more options and uses reflection to provide a convenient and +succinct way of specifying command line options. + + +Supported features + +The following features are supported in go-flags: + + Options with short names (-v) + Options with long names (--verbose) + Options with and without arguments (bool v.s. other type) + Options with optional arguments and default values + Option default values from ENVIRONMENT_VARIABLES, including slice and map values + Multiple option groups each containing a set of options + Generate and print well-formatted help message + Passing remaining command line arguments after -- (optional) + Ignoring unknown command line options (optional) + Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification + Supports multiple short options -aux + Supports all primitive go types (string, int{8..64}, uint{8..64}, float) + Supports same option multiple times (can store in slice or last option counts) + Supports maps + Supports function callbacks + Supports namespaces for (nested) option groups + +Additional features specific to Windows: + Options with short names (/v) + Options with long names (/verbose) + Windows-style options with arguments use a colon as the delimiter + Modify generated help message with Windows-style / options + Windows style options can be disabled at build time using the "forceposix" + build tag + + +Basic usage + +The flags package uses structs, reflection and struct field tags +to allow users to specify command line options. This results in very simple +and concise specification of your application options. For example: + + type Options struct { + Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` + } + +This specifies one option with a short name -v and a long name --verbose. +When either -v or --verbose is found on the command line, a 'true' value +will be appended to the Verbose field. e.g. when specifying -vvv, the +resulting value of Verbose will be {[true, true, true]}. + +Slice options work exactly the same as primitive type options, except that +whenever the option is encountered, a value is appended to the slice. + +Map options from string to primitive type are also supported. On the command +line, you specify the value for such an option as key:value. For example + + type Options struct { + AuthorInfo string[string] `short:"a"` + } + +Then, the AuthorInfo map can be filled with something like +-a name:Jesse -a "surname:van den Kieboom". + +Finally, for full control over the conversion between command line argument +values and options, user defined types can choose to implement the Marshaler +and Unmarshaler interfaces. + + +Available field tags + +The following is a list of tags for struct fields supported by go-flags: + + short: the short name of the option (single character) + long: the long name of the option + required: if non empty, makes the option required to appear on the command + line. If a required option is not present, the parser will + return ErrRequired (optional) + description: the description of the option (optional) + long-description: the long description of the option. Currently only + displayed in generated man pages (optional) + no-flag: if non-empty, this field is ignored as an option (optional) + + optional: if non-empty, makes the argument of the option optional. When an + argument is optional it can only be specified using + --option=argument (optional) + optional-value: the value of an optional option when the option occurs + without an argument. This tag can be specified multiple + times in the case of maps or slices (optional) + default: the default value of an option. This tag can be specified + multiple times in the case of slices or maps (optional) + default-mask: when specified, this value will be displayed in the help + instead of the actual default value. This is useful + mostly for hiding otherwise sensitive information from + showing up in the help. If default-mask takes the special + value "-", then no default value will be shown at all + (optional) + env: the default value of the option is overridden from the + specified environment variable, if one has been defined. + (optional) + env-delim: the 'env' default value from environment is split into + multiple values with the given delimiter string, use with + slices and maps (optional) + value-name: the name of the argument value (to be shown in the help) + (optional) + choice: limits the values for an option to a set of values. + This tag can be specified multiple times (optional) + hidden: if non-empty, the option is not visible in the help or man page. + + base: a base (radix) used to convert strings to integer values, the + default base is 10 (i.e. decimal) (optional) + + ini-name: the explicit ini option name (optional) + no-ini: if non-empty this field is ignored as an ini option + (optional) + + group: when specified on a struct field, makes the struct + field a separate group with the given name (optional) + namespace: when specified on a group struct field, the namespace + gets prepended to every option's long name and + subgroup's namespace of this group, separated by + the parser's namespace delimiter (optional) + command: when specified on a struct field, makes the struct + field a (sub)command with the given name (optional) + subcommands-optional: when specified on a command struct field, makes + any subcommands of that command optional (optional) + alias: when specified on a command struct field, adds the + specified name as an alias for the command. Can be + be specified multiple times to add more than one + alias (optional) + positional-args: when specified on a field with a struct type, + uses the fields of that struct to parse remaining + positional command line arguments into (in order + of the fields). If a field has a slice type, + then all remaining arguments will be added to it. + Positional arguments are optional by default, + unless the "required" tag is specified together + with the "positional-args" tag. The "required" tag + can also be set on the individual rest argument + fields, to require only the first N positional + arguments. If the "required" tag is set on the + rest arguments slice, then its value determines + the minimum amount of rest arguments that needs to + be provided (e.g. `required:"2"`) (optional) + positional-arg-name: used on a field in a positional argument struct; name + of the positional argument placeholder to be shown in + the help (optional) + +Either the `short:` tag or the `long:` must be specified to make the field eligible as an +option. + + +Option groups + +Option groups are a simple way to semantically separate your options. All +options in a particular group are shown together in the help under the name +of the group. Namespaces can be used to specify option long names more +precisely and emphasize the options affiliation to their group. + +There are currently three ways to specify option groups. + + 1. Use NewNamedParser specifying the various option groups. + 2. Use AddGroup to add a group to an existing parser. + 3. Add a struct field to the top-level options annotated with the + group:"group-name" tag. + + + +Commands + +The flags package also has basic support for commands. Commands are often +used in monolithic applications that support various commands or actions. +Take git for example, all of the add, commit, checkout, etc. are called +commands. Using commands you can easily separate multiple functions of your +application. + +There are currently two ways to specify a command. + + 1. Use AddCommand on an existing parser. + 2. Add a struct field to your options struct annotated with the + command:"command-name" tag. + +The most common, idiomatic way to implement commands is to define a global +parser instance and implement each command in a separate file. These +command files should define a go init function which calls AddCommand on +the global parser. + +When parsing ends and there is an active command and that command implements +the Commander interface, then its Execute method will be run with the +remaining command line arguments. + +Command structs can have options which become valid to parse after the +command has been specified on the command line, in addition to the options +of all the parent commands. I.e. considering a -v flag on the parser and an +add command, the following are equivalent: + + ./app -v add + ./app add -v + +However, if the -v flag is defined on the add command, then the first of +the two examples above would fail since the -v flag is not defined before +the add command. + + +Completion + +go-flags has builtin support to provide bash completion of flags, commands +and argument values. To use completion, the binary which uses go-flags +can be invoked in a special environment to list completion of the current +command line argument. It should be noted that this `executes` your application, +and it is up to the user to make sure there are no negative side effects (for +example from init functions). + +Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion +by replacing the argument parsing routine with the completion routine which +outputs completions for the passed arguments. The basic invocation to +complete a set of arguments is therefore: + + GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3 + +where `completion-example` is the binary, `arg1` and `arg2` are +the current arguments, and `arg3` (the last argument) is the argument +to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then +descriptions of possible completion items will also be shown, if there +are more than 1 completion items. + +To use this with bash completion, a simple file can be written which +calls the binary which supports go-flags completion: + + _completion_example() { + # All arguments except the first one + args=("${COMP_WORDS[@]:1:$COMP_CWORD}") + + # Only split on newlines + local IFS=$'\n' + + # Call completion (note that the first element of COMP_WORDS is + # the executable itself) + COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) + return 0 + } + + complete -F _completion_example completion-example + +Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set. + +Customized completion for argument values is supported by implementing +the flags.Completer interface for the argument value type. An example +of a type which does so is the flags.Filename type, an alias of string +allowing simple filename completion. A slice or array argument value +whose element type implements flags.Completer will also be completed. +*/ +package flags diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go new file mode 100644 index 0000000..6133a71 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/group.go @@ -0,0 +1,395 @@ +// Copyright 2012 Jesse van den Kieboom. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +import ( + "errors" + "reflect" + "strings" + "unicode/utf8" + "unsafe" +) + +// ErrNotPointerToStruct indicates that a provided data container is not +// a pointer to a struct. Only pointers to structs are valid data containers +// for options. +var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct") + +// Group represents an option group. Option groups can be used to logically +// group options together under a description. Groups are only used to provide +// more structure to options both for the user (as displayed in the help message) +// and for you, since groups can be nested. +type Group struct { + // A short description of the group. The + // short description is primarily used in the built-in generated help + // message + ShortDescription string + + // A long description of the group. The long + // description is primarily used to present information on commands + // (Command embeds Group) in the built-in generated help and man pages. + LongDescription string + + // The namespace of the group + Namespace string + + // If true, the group is not displayed in the help or man page + Hidden bool + + // The parent of the group or nil if it has no parent + parent interface{} + + // All the options in the group + options []*Option + + // All the subgroups + groups []*Group + + // Whether the group represents the built-in help group + isBuiltinHelp bool + + data interface{} +} + +type scanHandler func(reflect.Value, *reflect.StructField) (bool, error) + +// AddGroup adds a new group to the command with the given name and data. The +// data needs to be a pointer to a struct from which the fields indicate which +// options are in the group. +func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { + group := newGroup(shortDescription, longDescription, data) + + group.parent = g + + if err := group.scan(); err != nil { + return nil, err + } + + g.groups = append(g.groups, group) + return group, nil +} + +// Groups returns the list of groups embedded in this group. +func (g *Group) Groups() []*Group { + return g.groups +} + +// Options returns the list of options in this group. +func (g *Group) Options() []*Option { + return g.options +} + +// Find locates the subgroup with the given short description and returns it. +// If no such group can be found Find will return nil. Note that the description +// is matched case insensitively. +func (g *Group) Find(shortDescription string) *Group { + lshortDescription := strings.ToLower(shortDescription) + + var ret *Group + + g.eachGroup(func(gg *Group) { + if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription { + ret = gg + } + }) + + return ret +} + +func (g *Group) findOption(matcher func(*Option) bool) (option *Option) { + g.eachGroup(func(g *Group) { + for _, opt := range g.options { + if option == nil && matcher(opt) { + option = opt + } + } + }) + + return option +} + +// FindOptionByLongName finds an option that is part of the group, or any of its +// subgroups, by matching its long name (including the option namespace). +func (g *Group) FindOptionByLongName(longName string) *Option { + return g.findOption(func(option *Option) bool { + return option.LongNameWithNamespace() == longName + }) +} + +// FindOptionByShortName finds an option that is part of the group, or any of +// its subgroups, by matching its short name. +func (g *Group) FindOptionByShortName(shortName rune) *Option { + return g.findOption(func(option *Option) bool { + return option.ShortName == shortName + }) +} + +func newGroup(shortDescription string, longDescription string, data interface{}) *Group { + return &Group{ + ShortDescription: shortDescription, + LongDescription: longDescription, + + data: data, + } +} + +func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option { + prio := 0 + var retopt *Option + + g.eachGroup(func(g *Group) { + for _, opt := range g.options { + if namematch != nil && namematch(opt, name) && prio < 4 { + retopt = opt + prio = 4 + } + + if name == opt.field.Name && prio < 3 { + retopt = opt + prio = 3 + } + + if name == opt.LongNameWithNamespace() && prio < 2 { + retopt = opt + prio = 2 + } + + if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 { + retopt = opt + prio = 1 + } + } + }) + + return retopt +} + +func (g *Group) eachGroup(f func(*Group)) { + f(g) + + for _, gg := range g.groups { + gg.eachGroup(f) + } +} + +func isStringFalsy(s string) bool { + return s == "" || s == "false" || s == "no" || s == "0" +} + +func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error { + stype := realval.Type() + + if sfield != nil { + if ok, err := handler(realval, sfield); err != nil { + return err + } else if ok { + return nil + } + } + + for i := 0; i < stype.NumField(); i++ { + field := stype.Field(i) + + // PkgName is set only for non-exported fields, which we ignore + if field.PkgPath != "" && !field.Anonymous { + continue + } + + mtag := newMultiTag(string(field.Tag)) + + if err := mtag.Parse(); err != nil { + return err + } + + // Skip fields with the no-flag tag + if mtag.Get("no-flag") != "" { + continue + } + + // Dive deep into structs or pointers to structs + kind := field.Type.Kind() + fld := realval.Field(i) + + if kind == reflect.Struct { + if err := g.scanStruct(fld, &field, handler); err != nil { + return err + } + } else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { + flagCountBefore := len(g.options) + len(g.groups) + + if fld.IsNil() { + fld = reflect.New(fld.Type().Elem()) + } + + if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil { + return err + } + + if len(g.options)+len(g.groups) != flagCountBefore { + realval.Field(i).Set(fld) + } + } + + longname := mtag.Get("long") + shortname := mtag.Get("short") + + // Need at least either a short or long name + if longname == "" && shortname == "" && mtag.Get("ini-name") == "" { + continue + } + + short := rune(0) + rc := utf8.RuneCountInString(shortname) + + if rc > 1 { + return newErrorf(ErrShortNameTooLong, + "short names can only be 1 character long, not `%s'", + shortname) + + } else if rc == 1 { + short, _ = utf8.DecodeRuneInString(shortname) + } + + description := mtag.Get("description") + def := mtag.GetMany("default") + + optionalValue := mtag.GetMany("optional-value") + valueName := mtag.Get("value-name") + defaultMask := mtag.Get("default-mask") + + optional := !isStringFalsy(mtag.Get("optional")) + required := !isStringFalsy(mtag.Get("required")) + choices := mtag.GetMany("choice") + hidden := !isStringFalsy(mtag.Get("hidden")) + + option := &Option{ + Description: description, + ShortName: short, + LongName: longname, + Default: def, + EnvDefaultKey: mtag.Get("env"), + EnvDefaultDelim: mtag.Get("env-delim"), + OptionalArgument: optional, + OptionalValue: optionalValue, + Required: required, + ValueName: valueName, + DefaultMask: defaultMask, + Choices: choices, + Hidden: hidden, + + group: g, + + field: field, + value: realval.Field(i), + tag: mtag, + } + + if option.isBool() && option.Default != nil { + return newErrorf(ErrInvalidTag, + "boolean flag `%s' may not have default values, they always default to `false' and can only be turned on", + option.shortAndLongName()) + } + + g.options = append(g.options, option) + } + + return nil +} + +func (g *Group) checkForDuplicateFlags() *Error { + shortNames := make(map[rune]*Option) + longNames := make(map[string]*Option) + + var duplicateError *Error + + g.eachGroup(func(g *Group) { + for _, option := range g.options { + if option.LongName != "" { + longName := option.LongNameWithNamespace() + + if otherOption, ok := longNames[longName]; ok { + duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption) + return + } + longNames[longName] = option + } + if option.ShortName != 0 { + if otherOption, ok := shortNames[option.ShortName]; ok { + duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption) + return + } + shortNames[option.ShortName] = option + } + } + }) + + return duplicateError +} + +func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) { + mtag := newMultiTag(string(sfield.Tag)) + + if err := mtag.Parse(); err != nil { + return true, err + } + + subgroup := mtag.Get("group") + + if len(subgroup) != 0 { + ptrval := reflect.NewAt(realval.Type(), unsafe.Pointer(realval.UnsafeAddr())) + description := mtag.Get("description") + + group, err := g.AddGroup(subgroup, description, ptrval.Interface()) + if err != nil { + return true, err + } + + group.Namespace = mtag.Get("namespace") + group.Hidden = mtag.Get("hidden") != "" + + return true, nil + } + + return false, nil +} + +func (g *Group) scanType(handler scanHandler) error { + // Get all the public fields in the data struct + ptrval := reflect.ValueOf(g.data) + + if ptrval.Type().Kind() != reflect.Ptr { + panic(ErrNotPointerToStruct) + } + + stype := ptrval.Type().Elem() + + if stype.Kind() != reflect.Struct { + panic(ErrNotPointerToStruct) + } + + realval := reflect.Indirect(ptrval) + + if err := g.scanStruct(realval, nil, handler); err != nil { + return err + } + + if err := g.checkForDuplicateFlags(); err != nil { + return err + } + + return nil +} + +func (g *Group) scan() error { + return g.scanType(g.scanSubGroupHandler) +} + +func (g *Group) groupByName(name string) *Group { + if len(name) == 0 { + return g + } + + return g.Find(name) +} diff --git a/vendor/github.com/jessevdk/go-flags/group_test.go b/vendor/github.com/jessevdk/go-flags/group_test.go new file mode 100644 index 0000000..18cd6c1 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/group_test.go @@ -0,0 +1,255 @@ +package flags + +import ( + "testing" +) + +func TestGroupInline(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Group struct { + G bool `short:"g"` + } `group:"Grouped Options"` + }{} + + p, ret := assertParserSuccess(t, &opts, "-v", "-g") + + assertStringArray(t, ret, []string{}) + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.Group.G { + t.Errorf("Expected Group.G to be true") + } + + if p.Command.Group.Find("Grouped Options") == nil { + t.Errorf("Expected to find group `Grouped Options'") + } +} + +func TestGroupAdd(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + }{} + + var grp = struct { + G bool `short:"g"` + }{} + + p := NewParser(&opts, Default) + g, err := p.AddGroup("Grouped Options", "", &grp) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + ret, err := p.ParseArgs([]string{"-v", "-g", "rest"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + assertStringArray(t, ret, []string{"rest"}) + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !grp.G { + t.Errorf("Expected Group.G to be true") + } + + if p.Command.Group.Find("Grouped Options") != g { + t.Errorf("Expected to find group `Grouped Options'") + } + + if p.Groups()[1] != g { + t.Errorf("Expected group %#v, but got %#v", g, p.Groups()[0]) + } + + if g.Options()[0].ShortName != 'g' { + t.Errorf("Expected short name `g' but got %v", g.Options()[0].ShortName) + } +} + +func TestGroupNestedInline(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Group struct { + G bool `short:"g"` + + Nested struct { + N string `long:"n"` + } `group:"Nested Options"` + } `group:"Grouped Options"` + }{} + + p, ret := assertParserSuccess(t, &opts, "-v", "-g", "--n", "n", "rest") + + assertStringArray(t, ret, []string{"rest"}) + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + if !opts.Group.G { + t.Errorf("Expected Group.G to be true") + } + + assertString(t, opts.Group.Nested.N, "n") + + if p.Command.Group.Find("Grouped Options") == nil { + t.Errorf("Expected to find group `Grouped Options'") + } + + if p.Command.Group.Find("Nested Options") == nil { + t.Errorf("Expected to find group `Nested Options'") + } +} + +func TestGroupNestedInlineNamespace(t *testing.T) { + var opts = struct { + Opt string `long:"opt"` + + Group struct { + Opt string `long:"opt"` + Group struct { + Opt string `long:"opt"` + } `group:"Subsubgroup" namespace:"sap"` + } `group:"Subgroup" namespace:"sip"` + }{} + + p, ret := assertParserSuccess(t, &opts, "--opt", "a", "--sip.opt", "b", "--sip.sap.opt", "c", "rest") + + assertStringArray(t, ret, []string{"rest"}) + + assertString(t, opts.Opt, "a") + assertString(t, opts.Group.Opt, "b") + assertString(t, opts.Group.Group.Opt, "c") + + for _, name := range []string{"Subgroup", "Subsubgroup"} { + if p.Command.Group.Find(name) == nil { + t.Errorf("Expected to find group '%s'", name) + } + } +} + +func TestDuplicateShortFlags(t *testing.T) { + var opts struct { + Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` + Variables []string `short:"v" long:"variable" description:"Set a variable value."` + } + + args := []string{ + "--verbose", + "-v", "123", + "-v", "456", + } + + _, err := ParseArgs(&opts, args) + + if err == nil { + t.Errorf("Expected an error with type ErrDuplicatedFlag") + } else { + err2 := err.(*Error) + if err2.Type != ErrDuplicatedFlag { + t.Errorf("Expected an error with type ErrDuplicatedFlag") + } + } +} + +func TestDuplicateLongFlags(t *testing.T) { + var opts struct { + Test1 []bool `short:"a" long:"testing" description:"Test 1"` + Test2 []string `short:"b" long:"testing" description:"Test 2."` + } + + args := []string{ + "--testing", + } + + _, err := ParseArgs(&opts, args) + + if err == nil { + t.Errorf("Expected an error with type ErrDuplicatedFlag") + } else { + err2 := err.(*Error) + if err2.Type != ErrDuplicatedFlag { + t.Errorf("Expected an error with type ErrDuplicatedFlag") + } + } +} + +func TestFindOptionByLongFlag(t *testing.T) { + var opts struct { + Testing bool `long:"testing" description:"Testing"` + } + + p := NewParser(&opts, Default) + opt := p.FindOptionByLongName("testing") + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + assertString(t, opt.LongName, "testing") +} + +func TestFindOptionByShortFlag(t *testing.T) { + var opts struct { + Testing bool `short:"t" description:"Testing"` + } + + p := NewParser(&opts, Default) + opt := p.FindOptionByShortName('t') + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + if opt.ShortName != 't' { + t.Errorf("Expected 't', but got %v", opt.ShortName) + } +} + +func TestFindOptionByLongFlagInSubGroup(t *testing.T) { + var opts struct { + Group struct { + Testing bool `long:"testing" description:"Testing"` + } `group:"sub-group"` + } + + p := NewParser(&opts, Default) + opt := p.FindOptionByLongName("testing") + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + assertString(t, opt.LongName, "testing") +} + +func TestFindOptionByShortFlagInSubGroup(t *testing.T) { + var opts struct { + Group struct { + Testing bool `short:"t" description:"Testing"` + } `group:"sub-group"` + } + + p := NewParser(&opts, Default) + opt := p.FindOptionByShortName('t') + + if opt == nil { + t.Errorf("Expected option, but found none") + } + + if opt.ShortName != 't' { + t.Errorf("Expected 't', but got %v", opt.ShortName) + } +} diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go new file mode 100644 index 0000000..d380305 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/help.go @@ -0,0 +1,491 @@ +// Copyright 2012 Jesse van den Kieboom. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +import ( + "bufio" + "bytes" + "fmt" + "io" + "runtime" + "strings" + "unicode/utf8" +) + +type alignmentInfo struct { + maxLongLen int + hasShort bool + hasValueName bool + terminalColumns int + indent bool +} + +const ( + paddingBeforeOption = 2 + distanceBetweenOptionAndDescription = 2 +) + +func (a *alignmentInfo) descriptionStart() int { + ret := a.maxLongLen + distanceBetweenOptionAndDescription + + if a.hasShort { + ret += 2 + } + + if a.maxLongLen > 0 { + ret += 4 + } + + if a.hasValueName { + ret += 3 + } + + return ret +} + +func (a *alignmentInfo) updateLen(name string, indent bool) { + l := utf8.RuneCountInString(name) + + if indent { + l = l + 4 + } + + if l > a.maxLongLen { + a.maxLongLen = l + } +} + +func (p *Parser) getAlignmentInfo() alignmentInfo { + ret := alignmentInfo{ + maxLongLen: 0, + hasShort: false, + hasValueName: false, + terminalColumns: getTerminalColumns(), + } + + if ret.terminalColumns <= 0 { + ret.terminalColumns = 80 + } + + var prevcmd *Command + + p.eachActiveGroup(func(c *Command, grp *Group) { + if c != prevcmd { + for _, arg := range c.args { + ret.updateLen(arg.Name, c != p.Command) + } + } + + for _, info := range grp.options { + if !info.canCli() { + continue + } + + if info.ShortName != 0 { + ret.hasShort = true + } + + if len(info.ValueName) > 0 { + ret.hasValueName = true + } + + l := info.LongNameWithNamespace() + info.ValueName + + if len(info.Choices) != 0 { + l += "[" + strings.Join(info.Choices, "|") + "]" + } + + ret.updateLen(l, c != p.Command) + } + }) + + return ret +} + +func wrapText(s string, l int, prefix string) string { + var ret string + + if l < 10 { + l = 10 + } + + // Basic text wrapping of s at spaces to fit in l + lines := strings.Split(s, "\n") + + for _, line := range lines { + var retline string + + line = strings.TrimSpace(line) + + for len(line) > l { + // Try to split on space + suffix := "" + + pos := strings.LastIndex(line[:l], " ") + + if pos < 0 { + pos = l - 1 + suffix = "-\n" + } + + if len(retline) != 0 { + retline += "\n" + prefix + } + + retline += strings.TrimSpace(line[:pos]) + suffix + line = strings.TrimSpace(line[pos:]) + } + + if len(line) > 0 { + if len(retline) != 0 { + retline += "\n" + prefix + } + + retline += line + } + + if len(ret) > 0 { + ret += "\n" + + if len(retline) > 0 { + ret += prefix + } + } + + ret += retline + } + + return ret +} + +func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) { + line := &bytes.Buffer{} + + prefix := paddingBeforeOption + + if info.indent { + prefix += 4 + } + + if option.Hidden { + return + } + + line.WriteString(strings.Repeat(" ", prefix)) + + if option.ShortName != 0 { + line.WriteRune(defaultShortOptDelimiter) + line.WriteRune(option.ShortName) + } else if info.hasShort { + line.WriteString(" ") + } + + descstart := info.descriptionStart() + paddingBeforeOption + + if len(option.LongName) > 0 { + if option.ShortName != 0 { + line.WriteString(", ") + } else if info.hasShort { + line.WriteString(" ") + } + + line.WriteString(defaultLongOptDelimiter) + line.WriteString(option.LongNameWithNamespace()) + } + + if option.canArgument() { + line.WriteRune(defaultNameArgDelimiter) + + if len(option.ValueName) > 0 { + line.WriteString(option.ValueName) + } + + if len(option.Choices) > 0 { + line.WriteString("[" + strings.Join(option.Choices, "|") + "]") + } + } + + written := line.Len() + line.WriteTo(writer) + + if option.Description != "" { + dw := descstart - written + writer.WriteString(strings.Repeat(" ", dw)) + + var def string + + if len(option.DefaultMask) != 0 { + if option.DefaultMask != "-" { + def = option.DefaultMask + } + } else { + def = option.defaultLiteral + } + + var envDef string + if option.EnvDefaultKey != "" { + var envPrintable string + if runtime.GOOS == "windows" { + envPrintable = "%" + option.EnvDefaultKey + "%" + } else { + envPrintable = "$" + option.EnvDefaultKey + } + envDef = fmt.Sprintf(" [%s]", envPrintable) + } + + var desc string + + if def != "" { + desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef) + } else { + desc = option.Description + envDef + } + + writer.WriteString(wrapText(desc, + info.terminalColumns-descstart, + strings.Repeat(" ", descstart))) + } + + writer.WriteString("\n") +} + +func maxCommandLength(s []*Command) int { + if len(s) == 0 { + return 0 + } + + ret := len(s[0].Name) + + for _, v := range s[1:] { + l := len(v.Name) + + if l > ret { + ret = l + } + } + + return ret +} + +// WriteHelp writes a help message containing all the possible options and +// their descriptions to the provided writer. Note that the HelpFlag parser +// option provides a convenient way to add a -h/--help option group to the +// command line parser which will automatically show the help messages using +// this method. +func (p *Parser) WriteHelp(writer io.Writer) { + if writer == nil { + return + } + + wr := bufio.NewWriter(writer) + aligninfo := p.getAlignmentInfo() + + cmd := p.Command + + for cmd.Active != nil { + cmd = cmd.Active + } + + if p.Name != "" { + wr.WriteString("Usage:\n") + wr.WriteString(" ") + + allcmd := p.Command + + for allcmd != nil { + var usage string + + if allcmd == p.Command { + if len(p.Usage) != 0 { + usage = p.Usage + } else if p.Options&HelpFlag != 0 { + usage = "[OPTIONS]" + } + } else if us, ok := allcmd.data.(Usage); ok { + usage = us.Usage() + } else if allcmd.hasCliOptions() { + usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name) + } + + if len(usage) != 0 { + fmt.Fprintf(wr, " %s %s", allcmd.Name, usage) + } else { + fmt.Fprintf(wr, " %s", allcmd.Name) + } + + if len(allcmd.args) > 0 { + fmt.Fprintf(wr, " ") + } + + for i, arg := range allcmd.args { + if i != 0 { + fmt.Fprintf(wr, " ") + } + + name := arg.Name + + if arg.isRemaining() { + name = name + "..." + } + + if !allcmd.ArgsRequired { + fmt.Fprintf(wr, "[%s]", name) + } else { + fmt.Fprintf(wr, "%s", name) + } + } + + if allcmd.Active == nil && len(allcmd.commands) > 0 { + var co, cc string + + if allcmd.SubcommandsOptional { + co, cc = "[", "]" + } else { + co, cc = "<", ">" + } + + visibleCommands := allcmd.visibleCommands() + + if len(visibleCommands) > 3 { + fmt.Fprintf(wr, " %scommand%s", co, cc) + } else { + subcommands := allcmd.sortedVisibleCommands() + names := make([]string, len(subcommands)) + + for i, subc := range subcommands { + names[i] = subc.Name + } + + fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc) + } + } + + allcmd = allcmd.Active + } + + fmt.Fprintln(wr) + + if len(cmd.LongDescription) != 0 { + fmt.Fprintln(wr) + + t := wrapText(cmd.LongDescription, + aligninfo.terminalColumns, + "") + + fmt.Fprintln(wr, t) + } + } + + c := p.Command + + for c != nil { + printcmd := c != p.Command + + c.eachGroup(func(grp *Group) { + first := true + + // Skip built-in help group for all commands except the top-level + // parser + if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) { + return + } + + for _, info := range grp.options { + if !info.canCli() || info.Hidden { + continue + } + + if printcmd { + fmt.Fprintf(wr, "\n[%s command options]\n", c.Name) + aligninfo.indent = true + printcmd = false + } + + if first && cmd.Group != grp { + fmt.Fprintln(wr) + + if aligninfo.indent { + wr.WriteString(" ") + } + + fmt.Fprintf(wr, "%s:\n", grp.ShortDescription) + first = false + } + + p.writeHelpOption(wr, info, aligninfo) + } + }) + + var args []*Arg + for _, arg := range c.args { + if arg.Description != "" { + args = append(args, arg) + } + } + + if len(args) > 0 { + if c == p.Command { + fmt.Fprintf(wr, "\nArguments:\n") + } else { + fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name) + } + + descStart := aligninfo.descriptionStart() + paddingBeforeOption + + for _, arg := range args { + argPrefix := strings.Repeat(" ", paddingBeforeOption) + argPrefix += arg.Name + + if len(arg.Description) > 0 { + argPrefix += ":" + wr.WriteString(argPrefix) + + // Space between "arg:" and the description start + descPadding := strings.Repeat(" ", descStart-len(argPrefix)) + // How much space the description gets before wrapping + descWidth := aligninfo.terminalColumns - 1 - descStart + // Whitespace to which we can indent new description lines + descPrefix := strings.Repeat(" ", descStart) + + wr.WriteString(descPadding) + wr.WriteString(wrapText(arg.Description, descWidth, descPrefix)) + } else { + wr.WriteString(argPrefix) + } + + fmt.Fprintln(wr) + } + } + + c = c.Active + } + + scommands := cmd.sortedVisibleCommands() + + if len(scommands) > 0 { + maxnamelen := maxCommandLength(scommands) + + fmt.Fprintln(wr) + fmt.Fprintln(wr, "Available commands:") + + for _, c := range scommands { + fmt.Fprintf(wr, " %s", c.Name) + + if len(c.ShortDescription) > 0 { + pad := strings.Repeat(" ", maxnamelen-len(c.Name)) + fmt.Fprintf(wr, "%s %s", pad, c.ShortDescription) + + if len(c.Aliases) > 0 { + fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", ")) + } + + } + + fmt.Fprintln(wr) + } + } + + wr.Flush() +} diff --git a/vendor/github.com/jessevdk/go-flags/help_test.go b/vendor/github.com/jessevdk/go-flags/help_test.go new file mode 100644 index 0000000..bb76640 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/help_test.go @@ -0,0 +1,538 @@ +package flags + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "strings" + "testing" + "time" +) + +type helpOptions struct { + Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information" ini-name:"verbose"` + Call func(string) `short:"c" description:"Call phone number" ini-name:"call"` + PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` + EmptyDescription bool `long:"empty-description"` + + Default string `long:"default" default:"Some\nvalue" description:"Test default value"` + DefaultArray []string `long:"default-array" default:"Some value" default:"Other\tvalue" description:"Test default array value"` + DefaultMap map[string]string `long:"default-map" default:"some:value" default:"another:value" description:"Testdefault map value"` + EnvDefault1 string `long:"env-default1" default:"Some value" env:"ENV_DEFAULT" description:"Test env-default1 value"` + EnvDefault2 string `long:"env-default2" env:"ENV_DEFAULT" description:"Test env-default2 value"` + OptionWithArgName string `long:"opt-with-arg-name" value-name:"something" description:"Option with named argument"` + OptionWithChoices string `long:"opt-with-choices" value-name:"choice" choice:"dog" choice:"cat" description:"Option with choices"` + Hidden string `long:"hidden" description:"Hidden option" hidden:"yes"` + + OnlyIni string `ini-name:"only-ini" description:"Option only available in ini"` + + Other struct { + StringSlice []string `short:"s" default:"some" default:"value" description:"A slice of strings"` + IntMap map[string]int `long:"intmap" default:"a:1" description:"A map from string to int" ini-name:"int-map"` + } `group:"Other Options"` + + HiddenGroup struct { + InsideHiddenGroup string `long:"inside-hidden-group" description:"Inside hidden group"` + } `group:"Hidden group" hidden:"yes"` + + Group struct { + Opt string `long:"opt" description:"This is a subgroup option"` + HiddenInsideGroup string `long:"hidden-inside-group" description:"Hidden inside group" hidden:"yes"` + NotHiddenInsideGroup string `long:"not-hidden-inside-group" description:"Not hidden inside group" hidden:"false"` + + Group struct { + Opt string `long:"opt" description:"This is a subsubgroup option"` + } `group:"Subsubgroup" namespace:"sap"` + } `group:"Subgroup" namespace:"sip"` + + Command struct { + ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"` + } `command:"command" alias:"cm" alias:"cmd" description:"A command"` + + HiddenCommand struct { + ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"` + } `command:"hidden-command" description:"A hidden command" hidden:"yes"` + + Args struct { + Filename string `positional-arg-name:"filename" description:"A filename with a long description to trigger line wrapping"` + Number int `positional-arg-name:"num" description:"A number"` + HiddenInHelp float32 `positional-arg-name:"hidden-in-help" required:"yes"` + } `positional-args:"yes"` +} + +func TestHelp(t *testing.T) { + oldEnv := EnvSnapshot() + defer oldEnv.Restore() + os.Setenv("ENV_DEFAULT", "env-def") + + var opts helpOptions + p := NewNamedParser("TestHelp", HelpFlag) + p.AddGroup("Application Options", "The application options", &opts) + + _, err := p.ParseArgs([]string{"--help"}) + + if err == nil { + t.Fatalf("Expected help error") + } + + if e, ok := err.(*Error); !ok { + t.Fatalf("Expected flags.Error, but got %T", err) + } else { + if e.Type != ErrHelp { + t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) + } + + var expected string + + if runtime.GOOS == "windows" { + expected = `Usage: + TestHelp [OPTIONS] [filename] [num] [hidden-in-help] + +Application Options: + /v, /verbose Show verbose debug information + /c: Call phone number + /ptrslice: A slice of pointers to string + /empty-description + /default: Test default value (default: + "Some\nvalue") + /default-array: Test default array value (default: + Some value, "Other\tvalue") + /default-map: Testdefault map value (default: + some:value, another:value) + /env-default1: Test env-default1 value (default: + Some value) [%ENV_DEFAULT%] + /env-default2: Test env-default2 value + [%ENV_DEFAULT%] + /opt-with-arg-name:something Option with named argument + /opt-with-choices:choice[dog|cat] Option with choices + +Other Options: + /s: A slice of strings (default: some, + value) + /intmap: A map from string to int (default: + a:1) + +Subgroup: + /sip.opt: This is a subgroup option + /sip.not-hidden-inside-group: Not hidden inside group + +Subsubgroup: + /sip.sap.opt: This is a subsubgroup option + +Help Options: + /? Show this help message + /h, /help Show this help message + +Arguments: + filename: A filename with a long description + to trigger line wrapping + num: A number + +Available commands: + command A command (aliases: cm, cmd) +` + } else { + expected = `Usage: + TestHelp [OPTIONS] [filename] [num] [hidden-in-help] + +Application Options: + -v, --verbose Show verbose debug information + -c= Call phone number + --ptrslice= A slice of pointers to string + --empty-description + --default= Test default value (default: + "Some\nvalue") + --default-array= Test default array value (default: + Some value, "Other\tvalue") + --default-map= Testdefault map value (default: + some:value, another:value) + --env-default1= Test env-default1 value (default: + Some value) [$ENV_DEFAULT] + --env-default2= Test env-default2 value + [$ENV_DEFAULT] + --opt-with-arg-name=something Option with named argument + --opt-with-choices=choice[dog|cat] Option with choices + +Other Options: + -s= A slice of strings (default: some, + value) + --intmap= A map from string to int (default: + a:1) + +Subgroup: + --sip.opt= This is a subgroup option + --sip.not-hidden-inside-group= Not hidden inside group + +Subsubgroup: + --sip.sap.opt= This is a subsubgroup option + +Help Options: + -h, --help Show this help message + +Arguments: + filename: A filename with a long description + to trigger line wrapping + num: A number + +Available commands: + command A command (aliases: cm, cmd) +` + } + + assertDiff(t, e.Message, expected, "help message") + } +} + +func TestMan(t *testing.T) { + oldEnv := EnvSnapshot() + defer oldEnv.Restore() + os.Setenv("ENV_DEFAULT", "env-def") + + var opts helpOptions + p := NewNamedParser("TestMan", HelpFlag) + p.ShortDescription = "Test manpage generation" + p.LongDescription = "This is a somewhat `longer' description of what this does" + p.AddGroup("Application Options", "The application options", &opts) + + p.Commands()[0].LongDescription = "Longer `command' description" + + var buf bytes.Buffer + p.WriteManPage(&buf) + + got := buf.String() + + tt := time.Now() + + var envDefaultName string + + if runtime.GOOS == "windows" { + envDefaultName = "%ENV_DEFAULT%" + } else { + envDefaultName = "$ENV_DEFAULT" + } + + expected := fmt.Sprintf(`.TH TestMan 1 "%s" +.SH NAME +TestMan \- Test manpage generation +.SH SYNOPSIS +\fBTestMan\fP [OPTIONS] +.SH DESCRIPTION +This is a somewhat \fBlonger\fP description of what this does +.SH OPTIONS +.SS Application Options +The application options +.TP +\fB\fB\-v\fR, \fB\-\-verbose\fR\fP +Show verbose debug information +.TP +\fB\fB\-c\fR\fP +Call phone number +.TP +\fB\fB\-\-ptrslice\fR\fP +A slice of pointers to string +.TP +\fB\fB\-\-empty-description\fR\fP +.TP +\fB\fB\-\-default\fR \fP +Test default value +.TP +\fB\fB\-\-default-array\fR \fP +Test default array value +.TP +\fB\fB\-\-default-map\fR \fP +Testdefault map value +.TP +\fB\fB\-\-env-default1\fR \fP +Test env-default1 value +.TP +\fB\fB\-\-env-default2\fR \fP +Test env-default2 value +.TP +\fB\fB\-\-opt-with-arg-name\fR \fIsomething\fR\fP +Option with named argument +.TP +\fB\fB\-\-opt-with-choices\fR \fIchoice\fR\fP +Option with choices +.SS Other Options +.TP +\fB\fB\-s\fR \fP +A slice of strings +.TP +\fB\fB\-\-intmap\fR \fP +A map from string to int +.SS Subgroup +.TP +\fB\fB\-\-sip.opt\fR\fP +This is a subgroup option +.TP +\fB\fB\-\-sip.not-hidden-inside-group\fR\fP +Not hidden inside group +.SS Subsubgroup +.TP +\fB\fB\-\-sip.sap.opt\fR\fP +This is a subsubgroup option +.SH COMMANDS +.SS command +A command + +Longer \fBcommand\fP description + +\fBUsage\fP: TestMan [OPTIONS] command [command-OPTIONS] +.TP + +\fBAliases\fP: cm, cmd + +.TP +\fB\fB\-\-extra-verbose\fR\fP +Use for extra verbosity +`, tt.Format("2 January 2006"), envDefaultName) + + assertDiff(t, got, expected, "man page") +} + +type helpCommandNoOptions struct { + Command struct { + } `command:"command" description:"A command"` +} + +func TestHelpCommand(t *testing.T) { + oldEnv := EnvSnapshot() + defer oldEnv.Restore() + os.Setenv("ENV_DEFAULT", "env-def") + + var opts helpCommandNoOptions + p := NewNamedParser("TestHelpCommand", HelpFlag) + p.AddGroup("Application Options", "The application options", &opts) + + _, err := p.ParseArgs([]string{"command", "--help"}) + + if err == nil { + t.Fatalf("Expected help error") + } + + if e, ok := err.(*Error); !ok { + t.Fatalf("Expected flags.Error, but got %T", err) + } else { + if e.Type != ErrHelp { + t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) + } + + var expected string + + if runtime.GOOS == "windows" { + expected = `Usage: + TestHelpCommand [OPTIONS] command + +Help Options: + /? Show this help message + /h, /help Show this help message +` + } else { + expected = `Usage: + TestHelpCommand [OPTIONS] command + +Help Options: + -h, --help Show this help message +` + } + + assertDiff(t, e.Message, expected, "help message") + } +} + +func TestHelpDefaults(t *testing.T) { + var expected string + + if runtime.GOOS == "windows" { + expected = `Usage: + TestHelpDefaults [OPTIONS] + +Application Options: + /with-default: With default (default: default-value) + /without-default: Without default + /with-programmatic-default: With programmatic default (default: + default-value) + +Help Options: + /? Show this help message + /h, /help Show this help message +` + } else { + expected = `Usage: + TestHelpDefaults [OPTIONS] + +Application Options: + --with-default= With default (default: default-value) + --without-default= Without default + --with-programmatic-default= With programmatic default (default: + default-value) + +Help Options: + -h, --help Show this help message +` + } + + tests := []struct { + Args []string + Output string + }{ + { + Args: []string{"-h"}, + Output: expected, + }, + { + Args: []string{"--with-default", "other-value", "--with-programmatic-default", "other-value", "-h"}, + Output: expected, + }, + } + + for _, test := range tests { + var opts struct { + WithDefault string `long:"with-default" default:"default-value" description:"With default"` + WithoutDefault string `long:"without-default" description:"Without default"` + WithProgrammaticDefault string `long:"with-programmatic-default" description:"With programmatic default"` + } + + opts.WithProgrammaticDefault = "default-value" + + p := NewNamedParser("TestHelpDefaults", HelpFlag) + p.AddGroup("Application Options", "The application options", &opts) + + _, err := p.ParseArgs(test.Args) + + if err == nil { + t.Fatalf("Expected help error") + } + + if e, ok := err.(*Error); !ok { + t.Fatalf("Expected flags.Error, but got %T", err) + } else { + if e.Type != ErrHelp { + t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) + } + + assertDiff(t, e.Message, test.Output, "help message") + } + } +} + +func TestHelpRestArgs(t *testing.T) { + opts := struct { + Verbose bool `short:"v"` + }{} + + p := NewNamedParser("TestHelpDefaults", HelpFlag) + p.AddGroup("Application Options", "The application options", &opts) + + retargs, err := p.ParseArgs([]string{"-h", "-v", "rest"}) + + if err == nil { + t.Fatalf("Expected help error") + } + + assertStringArray(t, retargs, []string{"-v", "rest"}) +} + +func TestWrapText(t *testing.T) { + s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." + + got := wrapText(s, 60, " ") + expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, quis nostrud exercitation + ullamco laboris nisi ut aliquip ex ea commodo consequat. + Duis aute irure dolor in reprehenderit in voluptate velit + esse cillum dolore eu fugiat nulla pariatur. Excepteur sint + occaecat cupidatat non proident, sunt in culpa qui officia + deserunt mollit anim id est laborum.` + + assertDiff(t, got, expected, "wrapped text") +} + +func TestWrapParagraph(t *testing.T) { + s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n\n" + s += "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\n\n" + s += "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\n\n" + s += "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n" + + got := wrapText(s, 60, " ") + expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. + + Ut enim ad minim veniam, quis nostrud exercitation ullamco + laboris nisi ut aliquip ex ea commodo consequat. + + Duis aute irure dolor in reprehenderit in voluptate velit + esse cillum dolore eu fugiat nulla pariatur. + + Excepteur sint occaecat cupidatat non proident, sunt in + culpa qui officia deserunt mollit anim id est laborum. +` + + assertDiff(t, got, expected, "wrapped paragraph") +} + +func TestHelpDefaultMask(t *testing.T) { + var tests = []struct { + opts interface{} + present string + }{ + { + opts: &struct { + Value string `short:"v" default:"123" description:"V"` + }{}, + present: "V (default: 123)\n", + }, + { + opts: &struct { + Value string `short:"v" default:"123" default-mask:"abc" description:"V"` + }{}, + present: "V (default: abc)\n", + }, + { + opts: &struct { + Value string `short:"v" default:"123" default-mask:"-" description:"V"` + }{}, + present: "V\n", + }, + { + opts: &struct { + Value string `short:"v" description:"V"` + }{Value: "123"}, + present: "V (default: 123)\n", + }, + { + opts: &struct { + Value string `short:"v" default-mask:"abc" description:"V"` + }{Value: "123"}, + present: "V (default: abc)\n", + }, + { + opts: &struct { + Value string `short:"v" default-mask:"-" description:"V"` + }{Value: "123"}, + present: "V\n", + }, + } + + for _, test := range tests { + p := NewParser(test.opts, HelpFlag) + _, err := p.ParseArgs([]string{"-h"}) + if flagsErr, ok := err.(*Error); ok && flagsErr.Type == ErrHelp { + err = nil + } + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + h := &bytes.Buffer{} + w := bufio.NewWriter(h) + p.writeHelpOption(w, p.FindOptionByShortName('v'), p.getAlignmentInfo()) + w.Flush() + if strings.Index(h.String(), test.present) < 0 { + t.Errorf("Not present %q\n%s", test.present, h.String()) + } + } +} diff --git a/vendor/github.com/jessevdk/go-flags/ini.go b/vendor/github.com/jessevdk/go-flags/ini.go new file mode 100644 index 0000000..e714d3d --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/ini.go @@ -0,0 +1,597 @@ +package flags + +import ( + "bufio" + "fmt" + "io" + "os" + "reflect" + "sort" + "strconv" + "strings" +) + +// IniError contains location information on where an error occurred. +type IniError struct { + // The error message. + Message string + + // The filename of the file in which the error occurred. + File string + + // The line number at which the error occurred. + LineNumber uint +} + +// Error provides a "file:line: message" formatted message of the ini error. +func (x *IniError) Error() string { + return fmt.Sprintf( + "%s:%d: %s", + x.File, + x.LineNumber, + x.Message, + ) +} + +// IniOptions for writing +type IniOptions uint + +const ( + // IniNone indicates no options. + IniNone IniOptions = 0 + + // IniIncludeDefaults indicates that default values should be written. + IniIncludeDefaults = 1 << iota + + // IniCommentDefaults indicates that if IniIncludeDefaults is used + // options with default values are written but commented out. + IniCommentDefaults + + // IniIncludeComments indicates that comments containing the description + // of an option should be written. + IniIncludeComments + + // IniDefault provides a default set of options. + IniDefault = IniIncludeComments +) + +// IniParser is a utility to read and write flags options from and to ini +// formatted strings. +type IniParser struct { + ParseAsDefaults bool // override default flags + + parser *Parser +} + +type iniValue struct { + Name string + Value string + Quoted bool + LineNumber uint +} + +type iniSection []iniValue + +type ini struct { + File string + Sections map[string]iniSection +} + +// NewIniParser creates a new ini parser for a given Parser. +func NewIniParser(p *Parser) *IniParser { + return &IniParser{ + parser: p, + } +} + +// IniParse is a convenience function to parse command line options with default +// settings from an ini formatted file. The provided data is a pointer to a struct +// representing the default option group (named "Application Options"). For +// more control, use flags.NewParser. +func IniParse(filename string, data interface{}) error { + p := NewParser(data, Default) + + return NewIniParser(p).ParseFile(filename) +} + +// ParseFile parses flags from an ini formatted file. See Parse for more +// information on the ini file format. The returned errors can be of the type +// flags.Error or flags.IniError. +func (i *IniParser) ParseFile(filename string) error { + ini, err := readIniFromFile(filename) + + if err != nil { + return err + } + + return i.parse(ini) +} + +// Parse parses flags from an ini format. You can use ParseFile as a +// convenience function to parse from a filename instead of a general +// io.Reader. +// +// The format of the ini file is as follows: +// +// [Option group name] +// option = value +// +// Each section in the ini file represents an option group or command in the +// flags parser. The default flags parser option group (i.e. when using +// flags.Parse) is named 'Application Options'. The ini option name is matched +// in the following order: +// +// 1. Compared to the ini-name tag on the option struct field (if present) +// 2. Compared to the struct field name +// 3. Compared to the option long name (if present) +// 4. Compared to the option short name (if present) +// +// Sections for nested groups and commands can be addressed using a dot `.' +// namespacing notation (i.e [subcommand.Options]). Group section names are +// matched case insensitive. +// +// The returned errors can be of the type flags.Error or flags.IniError. +func (i *IniParser) Parse(reader io.Reader) error { + ini, err := readIni(reader, "") + + if err != nil { + return err + } + + return i.parse(ini) +} + +// WriteFile writes the flags as ini format into a file. See Write +// for more information. The returned error occurs when the specified file +// could not be opened for writing. +func (i *IniParser) WriteFile(filename string, options IniOptions) error { + return writeIniToFile(i, filename, options) +} + +// Write writes the current values of all the flags to an ini format. +// See Parse for more information on the ini file format. You typically +// call this only after settings have been parsed since the default values of each +// option are stored just before parsing the flags (this is only relevant when +// IniIncludeDefaults is _not_ set in options). +func (i *IniParser) Write(writer io.Writer, options IniOptions) { + writeIni(i, writer, options) +} + +func readFullLine(reader *bufio.Reader) (string, error) { + var line []byte + + for { + l, more, err := reader.ReadLine() + + if err != nil { + return "", err + } + + if line == nil && !more { + return string(l), nil + } + + line = append(line, l...) + + if !more { + break + } + } + + return string(line), nil +} + +func optionIniName(option *Option) string { + name := option.tag.Get("_read-ini-name") + + if len(name) != 0 { + return name + } + + name = option.tag.Get("ini-name") + + if len(name) != 0 { + return name + } + + return option.field.Name +} + +func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) { + var sname string + + if len(namespace) != 0 { + sname = namespace + } + + if cmd.Group != group && len(group.ShortDescription) != 0 { + if len(sname) != 0 { + sname += "." + } + + sname += group.ShortDescription + } + + sectionwritten := false + comments := (options & IniIncludeComments) != IniNone + + for _, option := range group.options { + if option.isFunc() || option.Hidden { + continue + } + + if len(option.tag.Get("no-ini")) != 0 { + continue + } + + val := option.value + + if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() { + continue + } + + if !sectionwritten { + fmt.Fprintf(writer, "[%s]\n", sname) + sectionwritten = true + } + + if comments && len(option.Description) != 0 { + fmt.Fprintf(writer, "; %s\n", option.Description) + } + + oname := optionIniName(option) + + commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault() + + kind := val.Type().Kind() + switch kind { + case reflect.Slice: + kind = val.Type().Elem().Kind() + + if val.Len() == 0 { + writeOption(writer, oname, kind, "", "", true, option.iniQuote) + } else { + for idx := 0; idx < val.Len(); idx++ { + v, _ := convertToString(val.Index(idx), option.tag) + + writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) + } + } + case reflect.Map: + kind = val.Type().Elem().Kind() + + if val.Len() == 0 { + writeOption(writer, oname, kind, "", "", true, option.iniQuote) + } else { + mkeys := val.MapKeys() + keys := make([]string, len(val.MapKeys())) + kkmap := make(map[string]reflect.Value) + + for i, k := range mkeys { + keys[i], _ = convertToString(k, option.tag) + kkmap[keys[i]] = k + } + + sort.Strings(keys) + + for _, k := range keys { + v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag) + + writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote) + } + } + default: + v, _ := convertToString(val, option.tag) + + writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) + } + + if comments { + fmt.Fprintln(writer) + } + } + + if sectionwritten && !comments { + fmt.Fprintln(writer) + } +} + +func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) { + if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) { + optionValue = strconv.Quote(optionValue) + } + + comment := "" + if commentOption { + comment = "; " + } + + fmt.Fprintf(writer, "%s%s =", comment, optionName) + + if optionKey != "" { + fmt.Fprintf(writer, " %s:%s", optionKey, optionValue) + } else if optionValue != "" { + fmt.Fprintf(writer, " %s", optionValue) + } + + fmt.Fprintln(writer) +} + +func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) { + command.eachGroup(func(group *Group) { + if !group.Hidden { + writeGroupIni(command, group, namespace, writer, options) + } + }) + + for _, c := range command.commands { + var nns string + + if c.Hidden { + continue + } + + if len(namespace) != 0 { + nns = c.Name + "." + nns + } else { + nns = c.Name + } + + writeCommandIni(c, nns, writer, options) + } +} + +func writeIni(parser *IniParser, writer io.Writer, options IniOptions) { + writeCommandIni(parser.parser.Command, "", writer, options) +} + +func writeIniToFile(parser *IniParser, filename string, options IniOptions) error { + file, err := os.Create(filename) + + if err != nil { + return err + } + + defer file.Close() + + writeIni(parser, file, options) + + return nil +} + +func readIniFromFile(filename string) (*ini, error) { + file, err := os.Open(filename) + + if err != nil { + return nil, err + } + + defer file.Close() + + return readIni(file, filename) +} + +func readIni(contents io.Reader, filename string) (*ini, error) { + ret := &ini{ + File: filename, + Sections: make(map[string]iniSection), + } + + reader := bufio.NewReader(contents) + + // Empty global section + section := make(iniSection, 0, 10) + sectionname := "" + + ret.Sections[sectionname] = section + + var lineno uint + + for { + line, err := readFullLine(reader) + + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + lineno++ + line = strings.TrimSpace(line) + + // Skip empty lines and lines starting with ; (comments) + if len(line) == 0 || line[0] == ';' || line[0] == '#' { + continue + } + + if line[0] == '[' { + if line[0] != '[' || line[len(line)-1] != ']' { + return nil, &IniError{ + Message: "malformed section header", + File: filename, + LineNumber: lineno, + } + } + + name := strings.TrimSpace(line[1 : len(line)-1]) + + if len(name) == 0 { + return nil, &IniError{ + Message: "empty section name", + File: filename, + LineNumber: lineno, + } + } + + sectionname = name + section = ret.Sections[name] + + if section == nil { + section = make(iniSection, 0, 10) + ret.Sections[name] = section + } + + continue + } + + // Parse option here + keyval := strings.SplitN(line, "=", 2) + + if len(keyval) != 2 { + return nil, &IniError{ + Message: fmt.Sprintf("malformed key=value (%s)", line), + File: filename, + LineNumber: lineno, + } + } + + name := strings.TrimSpace(keyval[0]) + value := strings.TrimSpace(keyval[1]) + quoted := false + + if len(value) != 0 && value[0] == '"' { + if v, err := strconv.Unquote(value); err == nil { + value = v + + quoted = true + } else { + return nil, &IniError{ + Message: err.Error(), + File: filename, + LineNumber: lineno, + } + } + } + + section = append(section, iniValue{ + Name: name, + Value: value, + Quoted: quoted, + LineNumber: lineno, + }) + + ret.Sections[sectionname] = section + } + + return ret, nil +} + +func (i *IniParser) matchingGroups(name string) []*Group { + if len(name) == 0 { + var ret []*Group + + i.parser.eachGroup(func(g *Group) { + ret = append(ret, g) + }) + + return ret + } + + g := i.parser.groupByName(name) + + if g != nil { + return []*Group{g} + } + + return nil +} + +func (i *IniParser) parse(ini *ini) error { + p := i.parser + + var quotesLookup = make(map[*Option]bool) + + for name, section := range ini.Sections { + groups := i.matchingGroups(name) + + if len(groups) == 0 { + return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name) + } + + for _, inival := range section { + var opt *Option + + for _, group := range groups { + opt = group.optionByName(inival.Name, func(o *Option, n string) bool { + return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n) + }) + + if opt != nil && len(opt.tag.Get("no-ini")) != 0 { + opt = nil + } + + if opt != nil { + break + } + } + + if opt == nil { + if (p.Options & IgnoreUnknown) == None { + return &IniError{ + Message: fmt.Sprintf("unknown option: %s", inival.Name), + File: ini.File, + LineNumber: inival.LineNumber, + } + } + + continue + } + + // ini value is ignored if override is set and + // value was previously set from non default + if i.ParseAsDefaults && !opt.isSetDefault { + continue + } + + pval := &inival.Value + + if !opt.canArgument() && len(inival.Value) == 0 { + pval = nil + } else { + if opt.value.Type().Kind() == reflect.Map { + parts := strings.SplitN(inival.Value, ":", 2) + + // only handle unquoting + if len(parts) == 2 && parts[1][0] == '"' { + if v, err := strconv.Unquote(parts[1]); err == nil { + parts[1] = v + + inival.Quoted = true + } else { + return &IniError{ + Message: err.Error(), + File: ini.File, + LineNumber: inival.LineNumber, + } + } + + s := parts[0] + ":" + parts[1] + + pval = &s + } + } + } + + if err := opt.set(pval); err != nil { + return &IniError{ + Message: err.Error(), + File: ini.File, + LineNumber: inival.LineNumber, + } + } + + // either all INI values are quoted or only values who need quoting + if _, ok := quotesLookup[opt]; !inival.Quoted || !ok { + quotesLookup[opt] = inival.Quoted + } + + opt.tag.Set("_read-ini-name", inival.Name) + } + } + + for opt, quoted := range quotesLookup { + opt.iniQuote = quoted + } + + return nil +} diff --git a/vendor/github.com/jessevdk/go-flags/ini_test.go b/vendor/github.com/jessevdk/go-flags/ini_test.go new file mode 100644 index 0000000..ad4852e --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/ini_test.go @@ -0,0 +1,1053 @@ +package flags + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func TestWriteIni(t *testing.T) { + oldEnv := EnvSnapshot() + defer oldEnv.Restore() + os.Setenv("ENV_DEFAULT", "env-def") + + var tests = []struct { + args []string + options IniOptions + expected string + }{ + { + []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "3.14", "command"}, + IniDefault, + `[Application Options] +; Show verbose debug information +verbose = true +verbose = true + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +[Other Options] +; A map from string to int +int-map = a:2 +int-map = b:3 + +`, + }, + { + []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "3.14", "command"}, + IniDefault | IniIncludeDefaults, + `[Application Options] +; Show verbose debug information +verbose = true +verbose = true + +; A slice of pointers to string +; PtrSlice = + +EmptyDescription = false + +; Test default value +Default = "Some\nvalue" + +; Test default array value +DefaultArray = Some value +DefaultArray = "Other\tvalue" + +; Testdefault map value +DefaultMap = another:value +DefaultMap = some:value + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +; Option with named argument +OptionWithArgName = + +; Option with choices +OptionWithChoices = + +; Option only available in ini +only-ini = + +[Other Options] +; A slice of strings +StringSlice = some +StringSlice = value + +; A map from string to int +int-map = a:2 +int-map = b:3 + +[Subgroup] +; This is a subgroup option +Opt = + +; Not hidden inside group +NotHiddenInsideGroup = + +[Subsubgroup] +; This is a subsubgroup option +Opt = + +[command] +; Use for extra verbosity +; ExtraVerbose = + +`, + }, + { + []string{"filename", "0", "3.14", "command"}, + IniDefault | IniIncludeDefaults | IniCommentDefaults, + `[Application Options] +; Show verbose debug information +; verbose = + +; A slice of pointers to string +; PtrSlice = + +; EmptyDescription = false + +; Test default value +; Default = "Some\nvalue" + +; Test default array value +; DefaultArray = Some value +; DefaultArray = "Other\tvalue" + +; Testdefault map value +; DefaultMap = another:value +; DefaultMap = some:value + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +; Option with named argument +; OptionWithArgName = + +; Option with choices +; OptionWithChoices = + +; Option only available in ini +; only-ini = + +[Other Options] +; A slice of strings +; StringSlice = some +; StringSlice = value + +; A map from string to int +; int-map = a:1 + +[Subgroup] +; This is a subgroup option +; Opt = + +; Not hidden inside group +; NotHiddenInsideGroup = + +[Subsubgroup] +; This is a subsubgroup option +; Opt = + +[command] +; Use for extra verbosity +; ExtraVerbose = + +`, + }, + { + []string{"--default=New value", "--default-array=New value", "--default-map=new:value", "filename", "0", "3.14", "command"}, + IniDefault | IniIncludeDefaults | IniCommentDefaults, + `[Application Options] +; Show verbose debug information +; verbose = + +; A slice of pointers to string +; PtrSlice = + +; EmptyDescription = false + +; Test default value +Default = New value + +; Test default array value +DefaultArray = New value + +; Testdefault map value +DefaultMap = new:value + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +; Option with named argument +; OptionWithArgName = + +; Option with choices +; OptionWithChoices = + +; Option only available in ini +; only-ini = + +[Other Options] +; A slice of strings +; StringSlice = some +; StringSlice = value + +; A map from string to int +; int-map = a:1 + +[Subgroup] +; This is a subgroup option +; Opt = + +; Not hidden inside group +; NotHiddenInsideGroup = + +[Subsubgroup] +; This is a subsubgroup option +; Opt = + +[command] +; Use for extra verbosity +; ExtraVerbose = + +`, + }, + } + + for _, test := range tests { + var opts helpOptions + + p := NewNamedParser("TestIni", Default) + p.AddGroup("Application Options", "The application options", &opts) + + _, err := p.ParseArgs(test.args) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + inip := NewIniParser(p) + + var b bytes.Buffer + inip.Write(&b, test.options) + + got := b.String() + expected := test.expected + + msg := fmt.Sprintf("with arguments %+v and ini options %b", test.args, test.options) + assertDiff(t, got, expected, msg) + } +} + +func TestReadIni_flagEquivalent(t *testing.T) { + type options struct { + Opt1 bool `long:"opt1"` + + Group1 struct { + Opt2 bool `long:"opt2"` + } `group:"group1"` + + Group2 struct { + Opt3 bool `long:"opt3"` + } `group:"group2" namespace:"ns1"` + + Cmd1 struct { + Opt4 bool `long:"opt4"` + Opt5 bool `long:"foo.opt5"` + + Group1 struct { + Opt6 bool `long:"opt6"` + Opt7 bool `long:"foo.opt7"` + } `group:"group1"` + + Group2 struct { + Opt8 bool `long:"opt8"` + } `group:"group2" namespace:"ns1"` + } `command:"cmd1"` + } + + a := ` +opt1=true + +[group1] +opt2=true + +[group2] +ns1.opt3=true + +[cmd1] +opt4=true +foo.opt5=true + +[cmd1.group1] +opt6=true +foo.opt7=true + +[cmd1.group2] +ns1.opt8=true +` + b := ` +opt1=true +opt2=true +ns1.opt3=true + +[cmd1] +opt4=true +foo.opt5=true +opt6=true +foo.opt7=true +ns1.opt8=true +` + + parse := func(readIni string) (opts options, writeIni string) { + p := NewNamedParser("TestIni", Default) + p.AddGroup("Application Options", "The application options", &opts) + + inip := NewIniParser(p) + err := inip.Parse(strings.NewReader(readIni)) + + if err != nil { + t.Fatalf("Unexpected error: %s\n\nFile:\n%s", err, readIni) + } + + var b bytes.Buffer + inip.Write(&b, Default) + + return opts, b.String() + } + + aOpt, aIni := parse(a) + bOpt, bIni := parse(b) + + assertDiff(t, aIni, bIni, "") + if !reflect.DeepEqual(aOpt, bOpt) { + t.Errorf("not equal") + } +} + +func TestReadIni(t *testing.T) { + var opts helpOptions + + p := NewNamedParser("TestIni", Default) + p.AddGroup("Application Options", "The application options", &opts) + + inip := NewIniParser(p) + + inic := ` +; Show verbose debug information +verbose = true +verbose = true + +DefaultMap = another:"value\n1" +DefaultMap = some:value 2 + +[Application Options] +; A slice of pointers to string +; PtrSlice = + +; Test default value +Default = "New\nvalue" + +; Test env-default1 value +EnvDefault1 = New value + +[Other Options] +# A slice of strings +StringSlice = "some\nvalue" +StringSlice = another value + +; A map from string to int +int-map = a:2 +int-map = b:3 + +` + + b := strings.NewReader(inic) + err := inip.Parse(b) + + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + assertBoolArray(t, opts.Verbose, []bool{true, true}) + + if v := map[string]string{"another": "value\n1", "some": "value 2"}; !reflect.DeepEqual(opts.DefaultMap, v) { + t.Fatalf("Expected %#v for DefaultMap but got %#v", v, opts.DefaultMap) + } + + assertString(t, opts.Default, "New\nvalue") + + assertString(t, opts.EnvDefault1, "New value") + + assertStringArray(t, opts.Other.StringSlice, []string{"some\nvalue", "another value"}) + + if v, ok := opts.Other.IntMap["a"]; !ok { + t.Errorf("Expected \"a\" in Other.IntMap") + } else if v != 2 { + t.Errorf("Expected Other.IntMap[\"a\"] = 2, but got %v", v) + } + + if v, ok := opts.Other.IntMap["b"]; !ok { + t.Errorf("Expected \"b\" in Other.IntMap") + } else if v != 3 { + t.Errorf("Expected Other.IntMap[\"b\"] = 3, but got %v", v) + } +} + +func TestReadAndWriteIni(t *testing.T) { + var tests = []struct { + options IniOptions + read string + write string + }{ + { + IniIncludeComments, + `[Application Options] +; Show verbose debug information +verbose = true +verbose = true + +; Test default value +Default = "quote me" + +; Test default array value +DefaultArray = 1 +DefaultArray = "2" +DefaultArray = 3 + +; Testdefault map value +; DefaultMap = + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +[Other Options] +; A slice of strings +; StringSlice = + +; A map from string to int +int-map = a:2 +int-map = b:"3" + +`, + `[Application Options] +; Show verbose debug information +verbose = true +verbose = true + +; Test default value +Default = "quote me" + +; Test default array value +DefaultArray = 1 +DefaultArray = 2 +DefaultArray = 3 + +; Testdefault map value +; DefaultMap = + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +[Other Options] +; A slice of strings +; StringSlice = + +; A map from string to int +int-map = a:2 +int-map = b:3 + +`, + }, + { + IniIncludeComments, + `[Application Options] +; Show verbose debug information +verbose = true +verbose = true + +; Test default value +Default = "quote me" + +; Test default array value +DefaultArray = "1" +DefaultArray = "2" +DefaultArray = "3" + +; Testdefault map value +; DefaultMap = + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +[Other Options] +; A slice of strings +; StringSlice = + +; A map from string to int +int-map = a:"2" +int-map = b:"3" + +`, + `[Application Options] +; Show verbose debug information +verbose = true +verbose = true + +; Test default value +Default = "quote me" + +; Test default array value +DefaultArray = "1" +DefaultArray = "2" +DefaultArray = "3" + +; Testdefault map value +; DefaultMap = + +; Test env-default1 value +EnvDefault1 = env-def + +; Test env-default2 value +EnvDefault2 = env-def + +[Other Options] +; A slice of strings +; StringSlice = + +; A map from string to int +int-map = a:"2" +int-map = b:"3" + +`, + }, + } + + for _, test := range tests { + var opts helpOptions + + p := NewNamedParser("TestIni", Default) + p.AddGroup("Application Options", "The application options", &opts) + + inip := NewIniParser(p) + + read := strings.NewReader(test.read) + err := inip.Parse(read) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + var write bytes.Buffer + inip.Write(&write, test.options) + + got := write.String() + + msg := fmt.Sprintf("with ini options %b", test.options) + assertDiff(t, got, test.write, msg) + } +} + +func TestReadIniWrongQuoting(t *testing.T) { + var tests = []struct { + iniFile string + lineNumber uint + }{ + { + iniFile: `Default = "New\nvalue`, + lineNumber: 1, + }, + { + iniFile: `StringSlice = "New\nvalue`, + lineNumber: 1, + }, + { + iniFile: `StringSlice = "New\nvalue" + StringSlice = "Second\nvalue`, + lineNumber: 2, + }, + { + iniFile: `DefaultMap = some:"value`, + lineNumber: 1, + }, + { + iniFile: `DefaultMap = some:value + DefaultMap = another:"value`, + lineNumber: 2, + }, + } + + for _, test := range tests { + var opts helpOptions + + p := NewNamedParser("TestIni", Default) + p.AddGroup("Application Options", "The application options", &opts) + + inip := NewIniParser(p) + + inic := test.iniFile + + b := strings.NewReader(inic) + err := inip.Parse(b) + + if err == nil { + t.Fatalf("Expect error") + } + + iniError := err.(*IniError) + + if iniError.LineNumber != test.lineNumber { + t.Fatalf("Expect error on line %d", test.lineNumber) + } + } +} + +func TestIniCommands(t *testing.T) { + var opts struct { + Value string `short:"v" long:"value"` + + Add struct { + Name int `short:"n" long:"name" ini-name:"AliasName"` + + Other struct { + O string `short:"o" long:"other"` + } `group:"Other Options"` + } `command:"add"` + } + + p := NewNamedParser("TestIni", Default) + p.AddGroup("Application Options", "The application options", &opts) + + inip := NewIniParser(p) + + inic := `[Application Options] +value = some value + +[add] +AliasName = 5 + +[add.Other Options] +other = subgroup + +` + + b := strings.NewReader(inic) + err := inip.Parse(b) + + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + assertString(t, opts.Value, "some value") + + if opts.Add.Name != 5 { + t.Errorf("Expected opts.Add.Name to be 5, but got %v", opts.Add.Name) + } + + assertString(t, opts.Add.Other.O, "subgroup") + + // Test writing it back + buf := &bytes.Buffer{} + + inip.Write(buf, IniDefault) + + assertDiff(t, buf.String(), inic, "ini contents") +} + +func TestIniNoIni(t *testing.T) { + var opts struct { + NoValue string `short:"n" long:"novalue" no-ini:"yes"` + Value string `short:"v" long:"value"` + } + + p := NewNamedParser("TestIni", Default) + p.AddGroup("Application Options", "The application options", &opts) + + inip := NewIniParser(p) + + // read INI + inic := `[Application Options] +novalue = some value +value = some other value +` + + b := strings.NewReader(inic) + err := inip.Parse(b) + + if err == nil { + t.Fatalf("Expected error") + } + + iniError := err.(*IniError) + + if v := uint(2); iniError.LineNumber != v { + t.Errorf("Expected opts.Add.Name to be %d, but got %d", v, iniError.LineNumber) + } + + if v := "unknown option: novalue"; iniError.Message != v { + t.Errorf("Expected opts.Add.Name to be %s, but got %s", v, iniError.Message) + } + + // write INI + opts.NoValue = "some value" + opts.Value = "some other value" + + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Cannot create temporary file: %s", err) + } + defer os.Remove(file.Name()) + + err = inip.WriteFile(file.Name(), IniIncludeDefaults) + if err != nil { + t.Fatalf("Could not write ini file: %s", err) + } + + found, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatalf("Could not read written ini file: %s", err) + } + + expected := "[Application Options]\nValue = some other value\n\n" + + assertDiff(t, string(found), expected, "ini content") +} + +func TestIniParse(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Cannot create temporary file: %s", err) + } + defer os.Remove(file.Name()) + + _, err = file.WriteString("value = 123") + if err != nil { + t.Fatalf("Cannot write to temporary file: %s", err) + } + + file.Close() + + var opts struct { + Value int `long:"value"` + } + + err = IniParse(file.Name(), &opts) + if err != nil { + t.Fatalf("Could not parse ini: %s", err) + } + + if opts.Value != 123 { + t.Fatalf("Expected Value to be \"123\" but was \"%d\"", opts.Value) + } +} + +func TestIniCliOverrides(t *testing.T) { + file, err := ioutil.TempFile("", "") + + if err != nil { + t.Fatalf("Cannot create temporary file: %s", err) + } + + defer os.Remove(file.Name()) + + _, err = file.WriteString("values = 123\n") + _, err = file.WriteString("values = 456\n") + + if err != nil { + t.Fatalf("Cannot write to temporary file: %s", err) + } + + file.Close() + + var opts struct { + Values []int `long:"values"` + } + + p := NewParser(&opts, Default) + err = NewIniParser(p).ParseFile(file.Name()) + + if err != nil { + t.Fatalf("Could not parse ini: %s", err) + } + + _, err = p.ParseArgs([]string{"--values", "111", "--values", "222"}) + + if err != nil { + t.Fatalf("Failed to parse arguments: %s", err) + } + + if len(opts.Values) != 2 { + t.Fatalf("Expected Values to contain two elements, but got %d", len(opts.Values)) + } + + if opts.Values[0] != 111 { + t.Fatalf("Expected Values[0] to be 111, but got '%d'", opts.Values[0]) + } + + if opts.Values[1] != 222 { + t.Fatalf("Expected Values[1] to be 222, but got '%d'", opts.Values[1]) + } +} + +func TestIniOverrides(t *testing.T) { + file, err := ioutil.TempFile("", "") + + if err != nil { + t.Fatalf("Cannot create temporary file: %s", err) + } + + defer os.Remove(file.Name()) + + _, err = file.WriteString("value-with-default = \"ini-value\"\n") + _, err = file.WriteString("value-with-default-override-cli = \"ini-value\"\n") + + if err != nil { + t.Fatalf("Cannot write to temporary file: %s", err) + } + + file.Close() + + var opts struct { + ValueWithDefault string `long:"value-with-default" default:"value"` + ValueWithDefaultOverrideCli string `long:"value-with-default-override-cli" default:"value"` + } + + p := NewParser(&opts, Default) + err = NewIniParser(p).ParseFile(file.Name()) + + if err != nil { + t.Fatalf("Could not parse ini: %s", err) + } + + _, err = p.ParseArgs([]string{"--value-with-default-override-cli", "cli-value"}) + + if err != nil { + t.Fatalf("Failed to parse arguments: %s", err) + } + + assertString(t, opts.ValueWithDefault, "ini-value") + assertString(t, opts.ValueWithDefaultOverrideCli, "cli-value") +} + +func TestIniRequired(t *testing.T) { + var opts struct { + Required string `short:"r" required:"yes" description:"required"` + Config func(s string) error `long:"config" default:"no-ini-file" no-ini:"true"` + } + + p := NewParser(&opts, Default) + + opts.Config = func(s string) error { + inip := NewIniParser(p) + inip.ParseAsDefaults = true + return inip.Parse(strings.NewReader("Required = ini-value\n")) + } + + _, err := p.ParseArgs([]string{"-r", "cli-value"}) + + if err != nil { + t.Fatalf("Failed to parse arguments: %s", err) + } + + assertString(t, opts.Required, "cli-value") +} + +func TestWriteFile(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Cannot create temporary file: %s", err) + } + defer os.Remove(file.Name()) + + var opts struct { + Value int `long:"value"` + } + + opts.Value = 123 + + p := NewParser(&opts, Default) + ini := NewIniParser(p) + + err = ini.WriteFile(file.Name(), IniIncludeDefaults) + if err != nil { + t.Fatalf("Could not write ini file: %s", err) + } + + found, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatalf("Could not read written ini file: %s", err) + } + + expected := "[Application Options]\nValue = 123\n\n" + + assertDiff(t, string(found), expected, "ini content") +} + +func TestOverwriteRequiredOptions(t *testing.T) { + var tests = []struct { + args []string + expected []string + }{ + { + args: []string{"--value", "from CLI"}, + expected: []string{ + "from CLI", + "from default", + }, + }, + { + args: []string{"--value", "from CLI", "--default", "from CLI"}, + expected: []string{ + "from CLI", + "from CLI", + }, + }, + { + args: []string{"--config", "no file name"}, + expected: []string{ + "from INI", + "from INI", + }, + }, + { + args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name"}, + expected: []string{ + "from INI", + "from INI", + }, + }, + { + args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name", "--value", "from CLI after", "--default", "from CLI after"}, + expected: []string{ + "from CLI after", + "from CLI after", + }, + }, + } + + for _, test := range tests { + var opts struct { + Config func(s string) error `long:"config" no-ini:"true"` + Value string `long:"value" required:"true"` + Default string `long:"default" required:"true" default:"from default"` + } + + p := NewParser(&opts, Default) + + opts.Config = func(s string) error { + ini := NewIniParser(p) + + return ini.Parse(bytes.NewBufferString("value = from INI\ndefault = from INI")) + } + + _, err := p.ParseArgs(test.args) + if err != nil { + t.Fatalf("Unexpected error %s with args %+v", err, test.args) + } + + if opts.Value != test.expected[0] { + t.Fatalf("Expected Value to be \"%s\" but was \"%s\" with args %+v", test.expected[0], opts.Value, test.args) + } + + if opts.Default != test.expected[1] { + t.Fatalf("Expected Default to be \"%s\" but was \"%s\" with args %+v", test.expected[1], opts.Default, test.args) + } + } +} + +func TestIniOverwriteOptions(t *testing.T) { + var tests = []struct { + args []string + expected string + toggled bool + }{ + { + args: []string{}, + expected: "from default", + }, + { + args: []string{"--value", "from CLI"}, + expected: "from CLI", + }, + { + args: []string{"--config", "no file name"}, + expected: "from INI", + toggled: true, + }, + { + args: []string{"--value", "from CLI before", "--config", "no file name"}, + expected: "from CLI before", + toggled: true, + }, + { + args: []string{"--config", "no file name", "--value", "from CLI after"}, + expected: "from CLI after", + toggled: true, + }, + { + args: []string{"--toggle"}, + toggled: true, + expected: "from default", + }, + } + + for _, test := range tests { + var opts struct { + Config string `long:"config" no-ini:"true"` + Value string `long:"value" default:"from default"` + Toggle bool `long:"toggle"` + } + + p := NewParser(&opts, Default) + + _, err := p.ParseArgs(test.args) + if err != nil { + t.Fatalf("Unexpected error %s with args %+v", err, test.args) + } + + if opts.Config != "" { + inip := NewIniParser(p) + inip.ParseAsDefaults = true + + err = inip.Parse(bytes.NewBufferString("value = from INI\ntoggle = true")) + if err != nil { + t.Fatalf("Unexpected error %s with args %+v", err, test.args) + } + } + + if opts.Value != test.expected { + t.Fatalf("Expected Value to be \"%s\" but was \"%s\" with args %+v", test.expected, opts.Value, test.args) + } + + if opts.Toggle != test.toggled { + t.Fatalf("Expected Toggle to be \"%v\" but was \"%v\" with args %+v", test.toggled, opts.Toggle, test.args) + } + + } +} diff --git a/vendor/github.com/jessevdk/go-flags/long_test.go b/vendor/github.com/jessevdk/go-flags/long_test.go new file mode 100644 index 0000000..02fc8c7 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/long_test.go @@ -0,0 +1,85 @@ +package flags + +import ( + "testing" +) + +func TestLong(t *testing.T) { + var opts = struct { + Value bool `long:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "--value") + + assertStringArray(t, ret, []string{}) + + if !opts.Value { + t.Errorf("Expected Value to be true") + } +} + +func TestLongArg(t *testing.T) { + var opts = struct { + Value string `long:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "--value", "value") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "value") +} + +func TestLongArgEqual(t *testing.T) { + var opts = struct { + Value string `long:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "--value=value") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "value") +} + +func TestLongDefault(t *testing.T) { + var opts = struct { + Value string `long:"value" default:"value"` + }{} + + ret := assertParseSuccess(t, &opts) + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "value") +} + +func TestLongOptional(t *testing.T) { + var opts = struct { + Value string `long:"value" optional:"yes" optional-value:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "--value") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "value") +} + +func TestLongOptionalArg(t *testing.T) { + var opts = struct { + Value string `long:"value" optional:"yes" optional-value:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "--value", "no") + + assertStringArray(t, ret, []string{"no"}) + assertString(t, opts.Value, "value") +} + +func TestLongOptionalArgEqual(t *testing.T) { + var opts = struct { + Value string `long:"value" optional:"yes" optional-value:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "--value=value", "no") + + assertStringArray(t, ret, []string{"no"}) + assertString(t, opts.Value, "value") +} diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go new file mode 100644 index 0000000..0cb114e --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/man.go @@ -0,0 +1,205 @@ +package flags + +import ( + "fmt" + "io" + "runtime" + "strings" + "time" +) + +func manQuote(s string) string { + return strings.Replace(s, "\\", "\\\\", -1) +} + +func formatForMan(wr io.Writer, s string) { + for { + idx := strings.IndexRune(s, '`') + + if idx < 0 { + fmt.Fprintf(wr, "%s", manQuote(s)) + break + } + + fmt.Fprintf(wr, "%s", manQuote(s[:idx])) + + s = s[idx+1:] + idx = strings.IndexRune(s, '\'') + + if idx < 0 { + fmt.Fprintf(wr, "%s", manQuote(s)) + break + } + + fmt.Fprintf(wr, "\\fB%s\\fP", manQuote(s[:idx])) + s = s[idx+1:] + } +} + +func writeManPageOptions(wr io.Writer, grp *Group) { + grp.eachGroup(func(group *Group) { + if group.Hidden || len(group.options) == 0 { + return + } + + // If the parent (grp) has any subgroups, display their descriptions as + // subsection headers similar to the output of --help. + if group.ShortDescription != "" && len(grp.groups) > 0 { + fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription) + + if group.LongDescription != "" { + formatForMan(wr, group.LongDescription) + fmt.Fprintln(wr, "") + } + } + + for _, opt := range group.options { + if !opt.canCli() || opt.Hidden { + continue + } + + fmt.Fprintln(wr, ".TP") + fmt.Fprintf(wr, "\\fB") + + if opt.ShortName != 0 { + fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName) + } + + if len(opt.LongName) != 0 { + if opt.ShortName != 0 { + fmt.Fprintf(wr, ", ") + } + + fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace())) + } + + if len(opt.ValueName) != 0 || opt.OptionalArgument { + if opt.OptionalArgument { + fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", "))) + } else { + fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName)) + } + } + + if len(opt.Default) != 0 { + fmt.Fprintf(wr, " ", manQuote(strings.Join(quoteV(opt.Default), ", "))) + } else if len(opt.EnvDefaultKey) != 0 { + if runtime.GOOS == "windows" { + fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey)) + } else { + fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey)) + } + } + + if opt.Required { + fmt.Fprintf(wr, " (\\fIrequired\\fR)") + } + + fmt.Fprintln(wr, "\\fP") + + if len(opt.Description) != 0 { + formatForMan(wr, opt.Description) + fmt.Fprintln(wr, "") + } + } + }) +} + +func writeManPageSubcommands(wr io.Writer, name string, root *Command) { + commands := root.sortedVisibleCommands() + + for _, c := range commands { + var nn string + + if c.Hidden { + continue + } + + if len(name) != 0 { + nn = name + " " + c.Name + } else { + nn = c.Name + } + + writeManPageCommand(wr, nn, root, c) + } +} + +func writeManPageCommand(wr io.Writer, name string, root *Command, command *Command) { + fmt.Fprintf(wr, ".SS %s\n", name) + fmt.Fprintln(wr, command.ShortDescription) + + if len(command.LongDescription) > 0 { + fmt.Fprintln(wr, "") + + cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name)) + + if strings.HasPrefix(command.LongDescription, cmdstart) { + fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name)) + + formatForMan(wr, command.LongDescription[len(cmdstart):]) + fmt.Fprintln(wr, "") + } else { + formatForMan(wr, command.LongDescription) + fmt.Fprintln(wr, "") + } + } + + var usage string + if us, ok := command.data.(Usage); ok { + usage = us.Usage() + } else if command.hasCliOptions() { + usage = fmt.Sprintf("[%s-OPTIONS]", command.Name) + } + + var pre string + if root.hasCliOptions() { + pre = fmt.Sprintf("%s [OPTIONS] %s", root.Name, command.Name) + } else { + pre = fmt.Sprintf("%s %s", root.Name, command.Name) + } + + if len(usage) > 0 { + fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage)) + } + + if len(command.Aliases) > 0 { + fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", "))) + } + + writeManPageOptions(wr, command.Group) + writeManPageSubcommands(wr, name, command) +} + +// WriteManPage writes a basic man page in groff format to the specified +// writer. +func (p *Parser) WriteManPage(wr io.Writer) { + t := time.Now() + + fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006")) + fmt.Fprintln(wr, ".SH NAME") + fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuote(p.ShortDescription)) + fmt.Fprintln(wr, ".SH SYNOPSIS") + + usage := p.Usage + + if len(usage) == 0 { + usage = "[OPTIONS]" + } + + fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage)) + fmt.Fprintln(wr, ".SH DESCRIPTION") + + formatForMan(wr, p.LongDescription) + fmt.Fprintln(wr, "") + + fmt.Fprintln(wr, ".SH OPTIONS") + + writeManPageOptions(wr, p.Command.Group) + + if len(p.visibleCommands()) > 0 { + fmt.Fprintln(wr, ".SH COMMANDS") + + writeManPageSubcommands(wr, "", p.Command) + } +} diff --git a/vendor/github.com/jessevdk/go-flags/marshal_test.go b/vendor/github.com/jessevdk/go-flags/marshal_test.go new file mode 100644 index 0000000..4cfe865 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/marshal_test.go @@ -0,0 +1,119 @@ +package flags + +import ( + "fmt" + "testing" +) + +type marshalled string + +func (m *marshalled) UnmarshalFlag(value string) error { + if value == "yes" { + *m = "true" + } else if value == "no" { + *m = "false" + } else { + return fmt.Errorf("`%s' is not a valid value, please specify `yes' or `no'", value) + } + + return nil +} + +func (m marshalled) MarshalFlag() (string, error) { + if m == "true" { + return "yes", nil + } + + return "no", nil +} + +type marshalledError bool + +func (m marshalledError) MarshalFlag() (string, error) { + return "", newErrorf(ErrMarshal, "Failed to marshal") +} + +func TestUnmarshal(t *testing.T) { + var opts = struct { + Value marshalled `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v=yes") + + assertStringArray(t, ret, []string{}) + + if opts.Value != "true" { + t.Errorf("Expected Value to be \"true\"") + } +} + +func TestUnmarshalDefault(t *testing.T) { + var opts = struct { + Value marshalled `short:"v" default:"yes"` + }{} + + ret := assertParseSuccess(t, &opts) + + assertStringArray(t, ret, []string{}) + + if opts.Value != "true" { + t.Errorf("Expected Value to be \"true\"") + } +} + +func TestUnmarshalOptional(t *testing.T) { + var opts = struct { + Value marshalled `short:"v" optional:"yes" optional-value:"yes"` + }{} + + ret := assertParseSuccess(t, &opts, "-v") + + assertStringArray(t, ret, []string{}) + + if opts.Value != "true" { + t.Errorf("Expected Value to be \"true\"") + } +} + +func TestUnmarshalError(t *testing.T) { + var opts = struct { + Value marshalled `short:"v"` + }{} + + assertParseFail(t, ErrMarshal, fmt.Sprintf("invalid argument for flag `%cv' (expected flags.marshalled): `invalid' is not a valid value, please specify `yes' or `no'", defaultShortOptDelimiter), &opts, "-vinvalid") +} + +func TestUnmarshalPositionalError(t *testing.T) { + var opts = struct { + Args struct { + Value marshalled + } `positional-args:"yes"` + }{} + + parser := NewParser(&opts, Default&^PrintErrors) + _, err := parser.ParseArgs([]string{"invalid"}) + + msg := "`invalid' is not a valid value, please specify `yes' or `no'" + + if err == nil { + assertFatalf(t, "Expected error: %s", msg) + return + } + + if err.Error() != msg { + assertErrorf(t, "Expected error message %#v, but got %#v", msg, err.Error()) + } +} + +func TestMarshalError(t *testing.T) { + var opts = struct { + Value marshalledError `short:"v"` + }{} + + p := NewParser(&opts, Default) + o := p.Command.Groups()[0].Options()[0] + + _, err := convertToString(o.value, o.tag) + + assertError(t, err, ErrMarshal, "Failed to marshal") +} diff --git a/vendor/github.com/jessevdk/go-flags/multitag.go b/vendor/github.com/jessevdk/go-flags/multitag.go new file mode 100644 index 0000000..96bb1a3 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/multitag.go @@ -0,0 +1,140 @@ +package flags + +import ( + "strconv" +) + +type multiTag struct { + value string + cache map[string][]string +} + +func newMultiTag(v string) multiTag { + return multiTag{ + value: v, + } +} + +func (x *multiTag) scan() (map[string][]string, error) { + v := x.value + + ret := make(map[string][]string) + + // This is mostly copied from reflect.StructTag.Get + for v != "" { + i := 0 + + // Skip whitespace + for i < len(v) && v[i] == ' ' { + i++ + } + + v = v[i:] + + if v == "" { + break + } + + // Scan to colon to find key + i = 0 + + for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' { + i++ + } + + if i >= len(v) { + return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value) + } + + if v[i] != ':' { + return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value) + } + + if i+1 >= len(v) { + return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value) + } + + if v[i+1] != '"' { + return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value) + } + + name := v[:i] + v = v[i+1:] + + // Scan quoted string to find value + i = 1 + + for i < len(v) && v[i] != '"' { + if v[i] == '\n' { + return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value) + } + + if v[i] == '\\' { + i++ + } + i++ + } + + if i >= len(v) { + return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value) + } + + val, err := strconv.Unquote(v[:i+1]) + + if err != nil { + return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value) + } + + v = v[i+1:] + + ret[name] = append(ret[name], val) + } + + return ret, nil +} + +func (x *multiTag) Parse() error { + vals, err := x.scan() + x.cache = vals + + return err +} + +func (x *multiTag) cached() map[string][]string { + if x.cache == nil { + cache, _ := x.scan() + + if cache == nil { + cache = make(map[string][]string) + } + + x.cache = cache + } + + return x.cache +} + +func (x *multiTag) Get(key string) string { + c := x.cached() + + if v, ok := c[key]; ok { + return v[len(v)-1] + } + + return "" +} + +func (x *multiTag) GetMany(key string) []string { + c := x.cached() + return c[key] +} + +func (x *multiTag) Set(key string, value string) { + c := x.cached() + c[key] = []string{value} +} + +func (x *multiTag) SetMany(key string, value []string) { + c := x.cached() + c[key] = value +} diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go new file mode 100644 index 0000000..ea09fb4 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/option.go @@ -0,0 +1,461 @@ +package flags + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "syscall" + "unicode/utf8" +) + +// Option flag information. Contains a description of the option, short and +// long name as well as a default value and whether an argument for this +// flag is optional. +type Option struct { + // The description of the option flag. This description is shown + // automatically in the built-in help. + Description string + + // The short name of the option (a single character). If not 0, the + // option flag can be 'activated' using -. Either ShortName + // or LongName needs to be non-empty. + ShortName rune + + // The long name of the option. If not "", the option flag can be + // activated using --. Either ShortName or LongName needs + // to be non-empty. + LongName string + + // The default value of the option. + Default []string + + // The optional environment default value key name. + EnvDefaultKey string + + // The optional delimiter string for EnvDefaultKey values. + EnvDefaultDelim string + + // If true, specifies that the argument to an option flag is optional. + // When no argument to the flag is specified on the command line, the + // value of OptionalValue will be set in the field this option represents. + // This is only valid for non-boolean options. + OptionalArgument bool + + // The optional value of the option. The optional value is used when + // the option flag is marked as having an OptionalArgument. This means + // that when the flag is specified, but no option argument is given, + // the value of the field this option represents will be set to + // OptionalValue. This is only valid for non-boolean options. + OptionalValue []string + + // If true, the option _must_ be specified on the command line. If the + // option is not specified, the parser will generate an ErrRequired type + // error. + Required bool + + // A name for the value of an option shown in the Help as --flag [ValueName] + ValueName string + + // A mask value to show in the help instead of the default value. This + // is useful for hiding sensitive information in the help, such as + // passwords. + DefaultMask string + + // If non empty, only a certain set of values is allowed for an option. + Choices []string + + // If true, the option is not displayed in the help or man page + Hidden bool + + // The group which the option belongs to + group *Group + + // The struct field which the option represents. + field reflect.StructField + + // The struct field value which the option represents. + value reflect.Value + + // Determines if the option will be always quoted in the INI output + iniQuote bool + + tag multiTag + isSet bool + isSetDefault bool + preventDefault bool + + defaultLiteral string +} + +// LongNameWithNamespace returns the option's long name with the group namespaces +// prepended by walking up the option's group tree. Namespaces and the long name +// itself are separated by the parser's namespace delimiter. If the long name is +// empty an empty string is returned. +func (option *Option) LongNameWithNamespace() string { + if len(option.LongName) == 0 { + return "" + } + + // fetch the namespace delimiter from the parser which is always at the + // end of the group hierarchy + namespaceDelimiter := "" + g := option.group + + for { + if p, ok := g.parent.(*Parser); ok { + namespaceDelimiter = p.NamespaceDelimiter + + break + } + + switch i := g.parent.(type) { + case *Command: + g = i.Group + case *Group: + g = i + } + } + + // concatenate long name with namespace + longName := option.LongName + g = option.group + + for g != nil { + if g.Namespace != "" { + longName = g.Namespace + namespaceDelimiter + longName + } + + switch i := g.parent.(type) { + case *Command: + g = i.Group + case *Group: + g = i + case *Parser: + g = nil + } + } + + return longName +} + +// String converts an option to a human friendly readable string describing the +// option. +func (option *Option) String() string { + var s string + var short string + + if option.ShortName != 0 { + data := make([]byte, utf8.RuneLen(option.ShortName)) + utf8.EncodeRune(data, option.ShortName) + short = string(data) + + if len(option.LongName) != 0 { + s = fmt.Sprintf("%s%s, %s%s", + string(defaultShortOptDelimiter), short, + defaultLongOptDelimiter, option.LongNameWithNamespace()) + } else { + s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short) + } + } else if len(option.LongName) != 0 { + s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace()) + } + + return s +} + +// Value returns the option value as an interface{}. +func (option *Option) Value() interface{} { + return option.value.Interface() +} + +// Field returns the reflect struct field of the option. +func (option *Option) Field() reflect.StructField { + return option.field +} + +// IsSet returns true if option has been set +func (option *Option) IsSet() bool { + return option.isSet +} + +// IsSetDefault returns true if option has been set via the default option tag +func (option *Option) IsSetDefault() bool { + return option.isSetDefault +} + +// Set the value of an option to the specified value. An error will be returned +// if the specified value could not be converted to the corresponding option +// value type. +func (option *Option) set(value *string) error { + kind := option.value.Type().Kind() + + if (kind == reflect.Map || kind == reflect.Slice) && !option.isSet { + option.empty() + } + + option.isSet = true + option.preventDefault = true + + if len(option.Choices) != 0 { + found := false + + for _, choice := range option.Choices { + if choice == *value { + found = true + break + } + } + + if !found { + allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ") + + if len(option.Choices) > 1 { + allowed += " or " + option.Choices[len(option.Choices)-1] + } + + return newErrorf(ErrInvalidChoice, + "Invalid value `%s' for option `%s'. Allowed values are: %s", + *value, option, allowed) + } + } + + if option.isFunc() { + return option.call(value) + } else if value != nil { + return convert(*value, option.value, option.tag) + } + + return convert("", option.value, option.tag) +} + +func (option *Option) canCli() bool { + return option.ShortName != 0 || len(option.LongName) != 0 +} + +func (option *Option) canArgument() bool { + if u := option.isUnmarshaler(); u != nil { + return true + } + + return !option.isBool() +} + +func (option *Option) emptyValue() reflect.Value { + tp := option.value.Type() + + if tp.Kind() == reflect.Map { + return reflect.MakeMap(tp) + } + + return reflect.Zero(tp) +} + +func (option *Option) empty() { + if !option.isFunc() { + option.value.Set(option.emptyValue()) + } +} + +func (option *Option) clearDefault() { + usedDefault := option.Default + + if envKey := option.EnvDefaultKey; envKey != "" { + // os.Getenv() makes no distinction between undefined and + // empty values, so we use syscall.Getenv() + if value, ok := syscall.Getenv(envKey); ok { + if option.EnvDefaultDelim != "" { + usedDefault = strings.Split(value, + option.EnvDefaultDelim) + } else { + usedDefault = []string{value} + } + } + } + + option.isSetDefault = true + + if len(usedDefault) > 0 { + option.empty() + + for _, d := range usedDefault { + option.set(&d) + option.isSetDefault = true + } + } else { + tp := option.value.Type() + + switch tp.Kind() { + case reflect.Map: + if option.value.IsNil() { + option.empty() + } + case reflect.Slice: + if option.value.IsNil() { + option.empty() + } + } + } +} + +func (option *Option) valueIsDefault() bool { + // Check if the value of the option corresponds to its + // default value + emptyval := option.emptyValue() + + checkvalptr := reflect.New(emptyval.Type()) + checkval := reflect.Indirect(checkvalptr) + + checkval.Set(emptyval) + + if len(option.Default) != 0 { + for _, v := range option.Default { + convert(v, checkval, option.tag) + } + } + + return reflect.DeepEqual(option.value.Interface(), checkval.Interface()) +} + +func (option *Option) isUnmarshaler() Unmarshaler { + v := option.value + + for { + if !v.CanInterface() { + break + } + + i := v.Interface() + + if u, ok := i.(Unmarshaler); ok { + return u + } + + if !v.CanAddr() { + break + } + + v = v.Addr() + } + + return nil +} + +func (option *Option) isBool() bool { + tp := option.value.Type() + + for { + switch tp.Kind() { + case reflect.Slice, reflect.Ptr: + tp = tp.Elem() + case reflect.Bool: + return true + case reflect.Func: + return tp.NumIn() == 0 + default: + return false + } + } +} + +func (option *Option) isSignedNumber() bool { + tp := option.value.Type() + + for { + switch tp.Kind() { + case reflect.Slice, reflect.Ptr: + tp = tp.Elem() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64: + return true + default: + return false + } + } +} + +func (option *Option) isFunc() bool { + return option.value.Type().Kind() == reflect.Func +} + +func (option *Option) call(value *string) error { + var retval []reflect.Value + + if value == nil { + retval = option.value.Call(nil) + } else { + tp := option.value.Type().In(0) + + val := reflect.New(tp) + val = reflect.Indirect(val) + + if err := convert(*value, val, option.tag); err != nil { + return err + } + + retval = option.value.Call([]reflect.Value{val}) + } + + if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() { + if retval[0].Interface() == nil { + return nil + } + + return retval[0].Interface().(error) + } + + return nil +} + +func (option *Option) updateDefaultLiteral() { + defs := option.Default + def := "" + + if len(defs) == 0 && option.canArgument() { + var showdef bool + + switch option.field.Type.Kind() { + case reflect.Func, reflect.Ptr: + showdef = !option.value.IsNil() + case reflect.Slice, reflect.String, reflect.Array: + showdef = option.value.Len() > 0 + case reflect.Map: + showdef = !option.value.IsNil() && option.value.Len() > 0 + default: + zeroval := reflect.Zero(option.field.Type) + showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface()) + } + + if showdef { + def, _ = convertToString(option.value, option.tag) + } + } else if len(defs) != 0 { + l := len(defs) - 1 + + for i := 0; i < l; i++ { + def += quoteIfNeeded(defs[i]) + ", " + } + + def += quoteIfNeeded(defs[l]) + } + + option.defaultLiteral = def +} + +func (option *Option) shortAndLongName() string { + ret := &bytes.Buffer{} + + if option.ShortName != 0 { + ret.WriteRune(defaultShortOptDelimiter) + ret.WriteRune(option.ShortName) + } + + if len(option.LongName) != 0 { + if option.ShortName != 0 { + ret.WriteRune('/') + } + + ret.WriteString(option.LongName) + } + + return ret.String() +} diff --git a/vendor/github.com/jessevdk/go-flags/options_test.go b/vendor/github.com/jessevdk/go-flags/options_test.go new file mode 100644 index 0000000..b0fe9f4 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/options_test.go @@ -0,0 +1,45 @@ +package flags + +import ( + "testing" +) + +func TestPassDoubleDash(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + }{} + + p := NewParser(&opts, PassDoubleDash) + ret, err := p.ParseArgs([]string{"-v", "--", "-v", "-g"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + assertStringArray(t, ret, []string{"-v", "-g"}) +} + +func TestPassAfterNonOption(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + }{} + + p := NewParser(&opts, PassAfterNonOption) + ret, err := p.ParseArgs([]string{"-v", "arg", "-v", "-g"}) + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + return + } + + if !opts.Value { + t.Errorf("Expected Value to be true") + } + + assertStringArray(t, ret, []string{"arg", "-v", "-g"}) +} diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_other.go b/vendor/github.com/jessevdk/go-flags/optstyle_other.go new file mode 100644 index 0000000..56dfdae --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/optstyle_other.go @@ -0,0 +1,67 @@ +// +build !windows forceposix + +package flags + +import ( + "strings" +) + +const ( + defaultShortOptDelimiter = '-' + defaultLongOptDelimiter = "--" + defaultNameArgDelimiter = '=' +) + +func argumentStartsOption(arg string) bool { + return len(arg) > 0 && arg[0] == '-' +} + +func argumentIsOption(arg string) bool { + if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { + return true + } + + if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { + return true + } + + return false +} + +// stripOptionPrefix returns the option without the prefix and whether or +// not the option is a long option or not. +func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { + if strings.HasPrefix(optname, "--") { + return "--", optname[2:], true + } else if strings.HasPrefix(optname, "-") { + return "-", optname[1:], false + } + + return "", optname, false +} + +// splitOption attempts to split the passed option into a name and an argument. +// When there is no argument specified, nil will be returned for it. +func splitOption(prefix string, option string, islong bool) (string, string, *string) { + pos := strings.Index(option, "=") + + if (islong && pos >= 0) || (!islong && pos == 1) { + rest := option[pos+1:] + return option[:pos], "=", &rest + } + + return option, "", nil +} + +// addHelpGroup adds a new group that contains default help parameters. +func (c *Command) addHelpGroup(showHelp func() error) *Group { + var help struct { + ShowHelp func() error `short:"h" long:"help" description:"Show this help message"` + } + + help.ShowHelp = showHelp + ret, _ := c.AddGroup("Help Options", "", &help) + ret.isBuiltinHelp = true + + return ret +} diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go new file mode 100644 index 0000000..f3f28ae --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go @@ -0,0 +1,108 @@ +// +build !forceposix + +package flags + +import ( + "strings" +) + +// Windows uses a front slash for both short and long options. Also it uses +// a colon for name/argument delimter. +const ( + defaultShortOptDelimiter = '/' + defaultLongOptDelimiter = "/" + defaultNameArgDelimiter = ':' +) + +func argumentStartsOption(arg string) bool { + return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/') +} + +func argumentIsOption(arg string) bool { + // Windows-style options allow front slash for the option + // delimiter. + if len(arg) > 1 && arg[0] == '/' { + return true + } + + if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { + return true + } + + if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { + return true + } + + return false +} + +// stripOptionPrefix returns the option without the prefix and whether or +// not the option is a long option or not. +func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { + // Determine if the argument is a long option or not. Windows + // typically supports both long and short options with a single + // front slash as the option delimiter, so handle this situation + // nicely. + possplit := 0 + + if strings.HasPrefix(optname, "--") { + possplit = 2 + islong = true + } else if strings.HasPrefix(optname, "-") { + possplit = 1 + islong = false + } else if strings.HasPrefix(optname, "/") { + possplit = 1 + islong = len(optname) > 2 + } + + return optname[:possplit], optname[possplit:], islong +} + +// splitOption attempts to split the passed option into a name and an argument. +// When there is no argument specified, nil will be returned for it. +func splitOption(prefix string, option string, islong bool) (string, string, *string) { + if len(option) == 0 { + return option, "", nil + } + + // Windows typically uses a colon for the option name and argument + // delimiter while POSIX typically uses an equals. Support both styles, + // but don't allow the two to be mixed. That is to say /foo:bar and + // --foo=bar are acceptable, but /foo=bar and --foo:bar are not. + var pos int + var sp string + + if prefix == "/" { + sp = ":" + pos = strings.Index(option, sp) + } else if len(prefix) > 0 { + sp = "=" + pos = strings.Index(option, sp) + } + + if (islong && pos >= 0) || (!islong && pos == 1) { + rest := option[pos+1:] + return option[:pos], sp, &rest + } + + return option, "", nil +} + +// addHelpGroup adds a new group that contains default help parameters. +func (c *Command) addHelpGroup(showHelp func() error) *Group { + // Windows CLI applications typically use /? for help, so make both + // that available as well as the POSIX style h and help. + var help struct { + ShowHelpWindows func() error `short:"?" description:"Show this help message"` + ShowHelpPosix func() error `short:"h" long:"help" description:"Show this help message"` + } + + help.ShowHelpWindows = showHelp + help.ShowHelpPosix = showHelp + + ret, _ := c.AddGroup("Help Options", "", &help) + ret.isBuiltinHelp = true + + return ret +} diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go new file mode 100644 index 0000000..0a7922a --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/parser.go @@ -0,0 +1,700 @@ +// Copyright 2012 Jesse van den Kieboom. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +import ( + "bytes" + "fmt" + "os" + "path" + "sort" + "strings" + "unicode/utf8" +) + +// A Parser provides command line option parsing. It can contain several +// option groups each with their own set of options. +type Parser struct { + // Embedded, see Command for more information + *Command + + // A usage string to be displayed in the help message. + Usage string + + // Option flags changing the behavior of the parser. + Options Options + + // NamespaceDelimiter separates group namespaces and option long names + NamespaceDelimiter string + + // UnknownOptionsHandler is a function which gets called when the parser + // encounters an unknown option. The function receives the unknown option + // name, a SplitArgument which specifies its value if set with an argument + // separator, and the remaining command line arguments. + // It should return a new list of remaining arguments to continue parsing, + // or an error to indicate a parse failure. + UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error) + + // CompletionHandler is a function gets called to handle the completion of + // items. By default, the items are printed and the application is exited. + // You can override this default behavior by specifying a custom CompletionHandler. + CompletionHandler func(items []Completion) + + // CommandHandler is a function that gets called to handle execution of a + // command. By default, the command will simply be executed. This can be + // overridden to perform certain actions (such as applying global flags) + // just before the command is executed. Note that if you override the + // handler it is your responsibility to call the command.Execute function. + // + // The command passed into CommandHandler may be nil in case there is no + // command to be executed when parsing has finished. + CommandHandler func(command Commander, args []string) error + + internalError error +} + +// SplitArgument represents the argument value of an option that was passed using +// an argument separator. +type SplitArgument interface { + // String returns the option's value as a string, and a boolean indicating + // if the option was present. + Value() (string, bool) +} + +type strArgument struct { + value *string +} + +func (s strArgument) Value() (string, bool) { + if s.value == nil { + return "", false + } + + return *s.value, true +} + +// Options provides parser options that change the behavior of the option +// parser. +type Options uint + +const ( + // None indicates no options. + None Options = 0 + + // HelpFlag adds a default Help Options group to the parser containing + // -h and --help options. When either -h or --help is specified on the + // command line, the parser will return the special error of type + // ErrHelp. When PrintErrors is also specified, then the help message + // will also be automatically printed to os.Stdout. + HelpFlag = 1 << iota + + // PassDoubleDash passes all arguments after a double dash, --, as + // remaining command line arguments (i.e. they will not be parsed for + // flags). + PassDoubleDash + + // IgnoreUnknown ignores any unknown options and passes them as + // remaining command line arguments instead of generating an error. + IgnoreUnknown + + // PrintErrors prints any errors which occurred during parsing to + // os.Stderr. In the special case of ErrHelp, the message will be printed + // to os.Stdout. + PrintErrors + + // PassAfterNonOption passes all arguments after the first non option + // as remaining command line arguments. This is equivalent to strict + // POSIX processing. + PassAfterNonOption + + // Default is a convenient default set of options which should cover + // most of the uses of the flags package. + Default = HelpFlag | PrintErrors | PassDoubleDash +) + +type parseState struct { + arg string + args []string + retargs []string + positional []*Arg + err error + + command *Command + lookup lookup +} + +// Parse is a convenience function to parse command line options with default +// settings. The provided data is a pointer to a struct representing the +// default option group (named "Application Options"). For more control, use +// flags.NewParser. +func Parse(data interface{}) ([]string, error) { + return NewParser(data, Default).Parse() +} + +// ParseArgs is a convenience function to parse command line options with default +// settings. The provided data is a pointer to a struct representing the +// default option group (named "Application Options"). The args argument is +// the list of command line arguments to parse. If you just want to parse the +// default program command line arguments (i.e. os.Args), then use flags.Parse +// instead. For more control, use flags.NewParser. +func ParseArgs(data interface{}, args []string) ([]string, error) { + return NewParser(data, Default).ParseArgs(args) +} + +// NewParser creates a new parser. It uses os.Args[0] as the application +// name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for +// more details). The provided data is a pointer to a struct representing the +// default option group (named "Application Options"), or nil if the default +// group should not be added. The options parameter specifies a set of options +// for the parser. +func NewParser(data interface{}, options Options) *Parser { + p := NewNamedParser(path.Base(os.Args[0]), options) + + if data != nil { + g, err := p.AddGroup("Application Options", "", data) + + if err == nil { + g.parent = p + } + + p.internalError = err + } + + return p +} + +// NewNamedParser creates a new parser. The appname is used to display the +// executable name in the built-in help message. Option groups and commands can +// be added to this parser by using AddGroup and AddCommand. +func NewNamedParser(appname string, options Options) *Parser { + p := &Parser{ + Command: newCommand(appname, "", "", nil), + Options: options, + NamespaceDelimiter: ".", + } + + p.Command.parent = p + + return p +} + +// Parse parses the command line arguments from os.Args using Parser.ParseArgs. +// For more detailed information see ParseArgs. +func (p *Parser) Parse() ([]string, error) { + return p.ParseArgs(os.Args[1:]) +} + +// ParseArgs parses the command line arguments according to the option groups that +// were added to the parser. On successful parsing of the arguments, the +// remaining, non-option, arguments (if any) are returned. The returned error +// indicates a parsing error and can be used with PrintError to display +// contextual information on where the error occurred exactly. +// +// When the common help group has been added (AddHelp) and either -h or --help +// was specified in the command line arguments, a help message will be +// automatically printed if the PrintErrors option is enabled. +// Furthermore, the special error type ErrHelp is returned. +// It is up to the caller to exit the program if so desired. +func (p *Parser) ParseArgs(args []string) ([]string, error) { + if p.internalError != nil { + return nil, p.internalError + } + + p.eachOption(func(c *Command, g *Group, option *Option) { + option.isSet = false + option.isSetDefault = false + option.updateDefaultLiteral() + }) + + // Add built-in help group to all commands if necessary + if (p.Options & HelpFlag) != None { + p.addHelpGroups(p.showBuiltinHelp) + } + + compval := os.Getenv("GO_FLAGS_COMPLETION") + + if len(compval) != 0 { + comp := &completion{parser: p} + items := comp.complete(args) + + if p.CompletionHandler != nil { + p.CompletionHandler(items) + } else { + comp.print(items, compval == "verbose") + os.Exit(0) + } + + return nil, nil + } + + s := &parseState{ + args: args, + retargs: make([]string, 0, len(args)), + } + + p.fillParseState(s) + + for !s.eof() { + arg := s.pop() + + // When PassDoubleDash is set and we encounter a --, then + // simply append all the rest as arguments and break out + if (p.Options&PassDoubleDash) != None && arg == "--" { + s.addArgs(s.args...) + break + } + + if !argumentIsOption(arg) { + // Note: this also sets s.err, so we can just check for + // nil here and use s.err later + if p.parseNonOption(s) != nil { + break + } + + continue + } + + var err error + + prefix, optname, islong := stripOptionPrefix(arg) + optname, _, argument := splitOption(prefix, optname, islong) + + if islong { + err = p.parseLong(s, optname, argument) + } else { + err = p.parseShort(s, optname, argument) + } + + if err != nil { + ignoreUnknown := (p.Options & IgnoreUnknown) != None + parseErr := wrapError(err) + + if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) { + s.err = parseErr + break + } + + if ignoreUnknown { + s.addArgs(arg) + } else if p.UnknownOptionHandler != nil { + modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args) + + if err != nil { + s.err = err + break + } + + s.args = modifiedArgs + } + } + } + + if s.err == nil { + p.eachOption(func(c *Command, g *Group, option *Option) { + if option.preventDefault { + return + } + + option.clearDefault() + }) + + s.checkRequired(p) + } + + var reterr error + + if s.err != nil { + reterr = s.err + } else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional { + reterr = s.estimateCommand() + } else if cmd, ok := s.command.data.(Commander); ok { + if p.CommandHandler != nil { + reterr = p.CommandHandler(cmd, s.retargs) + } else { + reterr = cmd.Execute(s.retargs) + } + } else if p.CommandHandler != nil { + reterr = p.CommandHandler(nil, s.retargs) + } + + if reterr != nil { + var retargs []string + + if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp { + retargs = append([]string{s.arg}, s.args...) + } else { + retargs = s.args + } + + return retargs, p.printError(reterr) + } + + return s.retargs, nil +} + +func (p *parseState) eof() bool { + return len(p.args) == 0 +} + +func (p *parseState) pop() string { + if p.eof() { + return "" + } + + p.arg = p.args[0] + p.args = p.args[1:] + + return p.arg +} + +func (p *parseState) peek() string { + if p.eof() { + return "" + } + + return p.args[0] +} + +func (p *parseState) checkRequired(parser *Parser) error { + c := parser.Command + + var required []*Option + + for c != nil { + c.eachGroup(func(g *Group) { + for _, option := range g.options { + if !option.isSet && option.Required { + required = append(required, option) + } + } + }) + + c = c.Active + } + + if len(required) == 0 { + if len(p.positional) > 0 { + var reqnames []string + + for _, arg := range p.positional { + argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != -1 || arg.RequiredMaximum != -1 + + if !argRequired { + continue + } + + if arg.isRemaining() { + if arg.value.Len() < arg.Required { + var arguments string + + if arg.Required > 1 { + arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len()) + } else { + arguments = "argument" + } + + reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`") + } else if arg.RequiredMaximum != -1 && arg.value.Len() > arg.RequiredMaximum { + if arg.RequiredMaximum == 0 { + reqnames = append(reqnames, "`"+arg.Name+" (zero arguments)`") + } else { + var arguments string + + if arg.RequiredMaximum > 1 { + arguments = "arguments, but got " + fmt.Sprintf("%d", arg.value.Len()) + } else { + arguments = "argument" + } + + reqnames = append(reqnames, "`"+arg.Name+" (at most "+fmt.Sprintf("%d", arg.RequiredMaximum)+" "+arguments+")`") + } + } + } else { + reqnames = append(reqnames, "`"+arg.Name+"`") + } + } + + if len(reqnames) == 0 { + return nil + } + + var msg string + + if len(reqnames) == 1 { + msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0]) + } else { + msg = fmt.Sprintf("the required arguments %s and %s were not provided", + strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1]) + } + + p.err = newError(ErrRequired, msg) + return p.err + } + + return nil + } + + names := make([]string, 0, len(required)) + + for _, k := range required { + names = append(names, "`"+k.String()+"'") + } + + sort.Strings(names) + + var msg string + + if len(names) == 1 { + msg = fmt.Sprintf("the required flag %s was not specified", names[0]) + } else { + msg = fmt.Sprintf("the required flags %s and %s were not specified", + strings.Join(names[:len(names)-1], ", "), names[len(names)-1]) + } + + p.err = newError(ErrRequired, msg) + return p.err +} + +func (p *parseState) estimateCommand() error { + commands := p.command.sortedVisibleCommands() + cmdnames := make([]string, len(commands)) + + for i, v := range commands { + cmdnames[i] = v.Name + } + + var msg string + var errtype ErrorType + + if len(p.retargs) != 0 { + c, l := closestChoice(p.retargs[0], cmdnames) + msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0]) + errtype = ErrUnknownCommand + + if float32(l)/float32(len(c)) < 0.5 { + msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c) + } else if len(cmdnames) == 1 { + msg = fmt.Sprintf("%s. You should use the %s command", + msg, + cmdnames[0]) + } else if len(cmdnames) > 1 { + msg = fmt.Sprintf("%s. Please specify one command of: %s or %s", + msg, + strings.Join(cmdnames[:len(cmdnames)-1], ", "), + cmdnames[len(cmdnames)-1]) + } + } else { + errtype = ErrCommandRequired + + if len(cmdnames) == 1 { + msg = fmt.Sprintf("Please specify the %s command", cmdnames[0]) + } else if len(cmdnames) > 1 { + msg = fmt.Sprintf("Please specify one command of: %s or %s", + strings.Join(cmdnames[:len(cmdnames)-1], ", "), + cmdnames[len(cmdnames)-1]) + } + } + + return newError(errtype, msg) +} + +func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) { + if !option.canArgument() { + if argument != nil { + return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option) + } + + err = option.set(nil) + } else if argument != nil || (canarg && !s.eof()) { + var arg string + + if argument != nil { + arg = *argument + } else { + arg = s.pop() + + if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') { + return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got option `%s'", option, arg) + } else if p.Options&PassDoubleDash != 0 && arg == "--" { + return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option) + } + } + + if option.tag.Get("unquote") != "false" { + arg, err = unquoteIfPossible(arg) + } + + if err == nil { + err = option.set(&arg) + } + } else if option.OptionalArgument { + option.empty() + + for _, v := range option.OptionalValue { + err = option.set(&v) + + if err != nil { + break + } + } + } else { + err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option) + } + + if err != nil { + if _, ok := err.(*Error); !ok { + err = newErrorf(ErrMarshal, "invalid argument for flag `%s' (expected %s): %s", + option, + option.value.Type(), + err.Error()) + } + } + + return err +} + +func (p *Parser) parseLong(s *parseState, name string, argument *string) error { + if option := s.lookup.longNames[name]; option != nil { + // Only long options that are required can consume an argument + // from the argument list + canarg := !option.OptionalArgument + + return p.parseOption(s, name, option, canarg, argument) + } + + return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name) +} + +func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) { + c, n := utf8.DecodeRuneInString(optname) + + if n == len(optname) { + return optname, nil + } + + first := string(c) + + if option := s.lookup.shortNames[first]; option != nil && option.canArgument() { + arg := optname[n:] + return first, &arg + } + + return optname, nil +} + +func (p *Parser) parseShort(s *parseState, optname string, argument *string) error { + if argument == nil { + optname, argument = p.splitShortConcatArg(s, optname) + } + + for i, c := range optname { + shortname := string(c) + + if option := s.lookup.shortNames[shortname]; option != nil { + // Only the last short argument can consume an argument from + // the arguments list, and only if it's non optional + canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument + + if err := p.parseOption(s, shortname, option, canarg, argument); err != nil { + return err + } + } else { + return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname) + } + + // Only the first option can have a concatted argument, so just + // clear argument here + argument = nil + } + + return nil +} + +func (p *parseState) addArgs(args ...string) error { + for len(p.positional) > 0 && len(args) > 0 { + arg := p.positional[0] + + if err := convert(args[0], arg.value, arg.tag); err != nil { + p.err = err + return err + } + + if !arg.isRemaining() { + p.positional = p.positional[1:] + } + + args = args[1:] + } + + p.retargs = append(p.retargs, args...) + return nil +} + +func (p *Parser) parseNonOption(s *parseState) error { + if len(s.positional) > 0 { + return s.addArgs(s.arg) + } + + if len(s.command.commands) > 0 && len(s.retargs) == 0 { + if cmd := s.lookup.commands[s.arg]; cmd != nil { + s.command.Active = cmd + cmd.fillParseState(s) + + return nil + } else if !s.command.SubcommandsOptional { + s.addArgs(s.arg) + return newErrorf(ErrUnknownCommand, "Unknown command `%s'", s.arg) + } + } + + if (p.Options & PassAfterNonOption) != None { + // If PassAfterNonOption is set then all remaining arguments + // are considered positional + if err := s.addArgs(s.arg); err != nil { + return err + } + + if err := s.addArgs(s.args...); err != nil { + return err + } + + s.args = []string{} + } else { + return s.addArgs(s.arg) + } + + return nil +} + +func (p *Parser) showBuiltinHelp() error { + var b bytes.Buffer + + p.WriteHelp(&b) + return newError(ErrHelp, b.String()) +} + +func (p *Parser) printError(err error) error { + if err != nil && (p.Options&PrintErrors) != None { + flagsErr, ok := err.(*Error) + + if ok && flagsErr.Type == ErrHelp { + fmt.Fprintln(os.Stdout, err) + } else { + fmt.Fprintln(os.Stderr, err) + } + } + + return err +} + +func (p *Parser) clearIsSet() { + p.eachCommand(func(c *Command) { + c.eachGroup(func(g *Group) { + for _, option := range g.options { + option.isSet = false + } + }) + }, true) +} diff --git a/vendor/github.com/jessevdk/go-flags/parser_test.go b/vendor/github.com/jessevdk/go-flags/parser_test.go new file mode 100644 index 0000000..374f21c --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/parser_test.go @@ -0,0 +1,612 @@ +package flags + +import ( + "fmt" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +type defaultOptions struct { + Int int `long:"i"` + IntDefault int `long:"id" default:"1"` + + Float64 float64 `long:"f"` + Float64Default float64 `long:"fd" default:"-3.14"` + + NumericFlag bool `short:"3"` + + String string `long:"str"` + StringDefault string `long:"strd" default:"abc"` + StringNotUnquoted string `long:"strnot" unquote:"false"` + + Time time.Duration `long:"t"` + TimeDefault time.Duration `long:"td" default:"1m"` + + Map map[string]int `long:"m"` + MapDefault map[string]int `long:"md" default:"a:1"` + + Slice []int `long:"s"` + SliceDefault []int `long:"sd" default:"1" default:"2"` +} + +func TestDefaults(t *testing.T) { + var tests = []struct { + msg string + args []string + expected defaultOptions + }{ + { + msg: "no arguments, expecting default values", + args: []string{}, + expected: defaultOptions{ + Int: 0, + IntDefault: 1, + + Float64: 0.0, + Float64Default: -3.14, + + NumericFlag: false, + + String: "", + StringDefault: "abc", + + Time: 0, + TimeDefault: time.Minute, + + Map: map[string]int{}, + MapDefault: map[string]int{"a": 1}, + + Slice: []int{}, + SliceDefault: []int{1, 2}, + }, + }, + { + msg: "non-zero value arguments, expecting overwritten arguments", + args: []string{"--i=3", "--id=3", "--f=-2.71", "--fd=2.71", "-3", "--str=def", "--strd=def", "--t=3ms", "--td=3ms", "--m=c:3", "--md=c:3", "--s=3", "--sd=3"}, + expected: defaultOptions{ + Int: 3, + IntDefault: 3, + + Float64: -2.71, + Float64Default: 2.71, + + NumericFlag: true, + + String: "def", + StringDefault: "def", + + Time: 3 * time.Millisecond, + TimeDefault: 3 * time.Millisecond, + + Map: map[string]int{"c": 3}, + MapDefault: map[string]int{"c": 3}, + + Slice: []int{3}, + SliceDefault: []int{3}, + }, + }, + { + msg: "zero value arguments, expecting overwritten arguments", + args: []string{"--i=0", "--id=0", "--f=0", "--fd=0", "--str", "", "--strd=\"\"", "--t=0ms", "--td=0s", "--m=:0", "--md=:0", "--s=0", "--sd=0"}, + expected: defaultOptions{ + Int: 0, + IntDefault: 0, + + Float64: 0, + Float64Default: 0, + + String: "", + StringDefault: "", + + Time: 0, + TimeDefault: 0, + + Map: map[string]int{"": 0}, + MapDefault: map[string]int{"": 0}, + + Slice: []int{0}, + SliceDefault: []int{0}, + }, + }, + } + + for _, test := range tests { + var opts defaultOptions + + _, err := ParseArgs(&opts, test.args) + if err != nil { + t.Fatalf("%s:\nUnexpected error: %v", test.msg, err) + } + + if opts.Slice == nil { + opts.Slice = []int{} + } + + if !reflect.DeepEqual(opts, test.expected) { + t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts) + } + } +} + +func TestNoDefaultsForBools(t *testing.T) { + var opts struct { + DefaultBool bool `short:"d" default:"true"` + } + + if runtime.GOOS == "windows" { + assertParseFail(t, ErrInvalidTag, "boolean flag `/d' may not have default values, they always default to `false' and can only be turned on", &opts) + } else { + assertParseFail(t, ErrInvalidTag, "boolean flag `-d' may not have default values, they always default to `false' and can only be turned on", &opts) + } +} + +func TestUnquoting(t *testing.T) { + var tests = []struct { + arg string + err error + value string + }{ + { + arg: "\"abc", + err: strconv.ErrSyntax, + value: "", + }, + { + arg: "\"\"abc\"", + err: strconv.ErrSyntax, + value: "", + }, + { + arg: "\"abc\"", + err: nil, + value: "abc", + }, + { + arg: "\"\\\"abc\\\"\"", + err: nil, + value: "\"abc\"", + }, + { + arg: "\"\\\"abc\"", + err: nil, + value: "\"abc", + }, + } + + for _, test := range tests { + var opts defaultOptions + + for _, delimiter := range []bool{false, true} { + p := NewParser(&opts, None) + + var err error + if delimiter { + _, err = p.ParseArgs([]string{"--str=" + test.arg, "--strnot=" + test.arg}) + } else { + _, err = p.ParseArgs([]string{"--str", test.arg, "--strnot", test.arg}) + } + + if test.err == nil { + if err != nil { + t.Fatalf("Expected no error but got: %v", err) + } + + if test.value != opts.String { + t.Fatalf("Expected String to be %q but got %q", test.value, opts.String) + } + if q := strconv.Quote(test.value); q != opts.StringNotUnquoted { + t.Fatalf("Expected StringDefault to be %q but got %q", q, opts.StringNotUnquoted) + } + } else { + if err == nil { + t.Fatalf("Expected error") + } else if e, ok := err.(*Error); ok { + if strings.HasPrefix(e.Message, test.err.Error()) { + t.Fatalf("Expected error message to end with %q but got %v", test.err.Error(), e.Message) + } + } + } + } + } +} + +// EnvRestorer keeps a copy of a set of env variables and can restore the env from them +type EnvRestorer struct { + env map[string]string +} + +func (r *EnvRestorer) Restore() { + os.Clearenv() + + for k, v := range r.env { + os.Setenv(k, v) + } +} + +// EnvSnapshot returns a snapshot of the currently set env variables +func EnvSnapshot() *EnvRestorer { + r := EnvRestorer{make(map[string]string)} + + for _, kv := range os.Environ() { + parts := strings.SplitN(kv, "=", 2) + + if len(parts) != 2 { + panic("got a weird env variable: " + kv) + } + + r.env[parts[0]] = parts[1] + } + + return &r +} + +type envDefaultOptions struct { + Int int `long:"i" default:"1" env:"TEST_I"` + Time time.Duration `long:"t" default:"1m" env:"TEST_T"` + Map map[string]int `long:"m" default:"a:1" env:"TEST_M" env-delim:";"` + Slice []int `long:"s" default:"1" default:"2" env:"TEST_S" env-delim:","` +} + +func TestEnvDefaults(t *testing.T) { + var tests = []struct { + msg string + args []string + expected envDefaultOptions + env map[string]string + }{ + { + msg: "no arguments, no env, expecting default values", + args: []string{}, + expected: envDefaultOptions{ + Int: 1, + Time: time.Minute, + Map: map[string]int{"a": 1}, + Slice: []int{1, 2}, + }, + }, + { + msg: "no arguments, env defaults, expecting env default values", + args: []string{}, + expected: envDefaultOptions{ + Int: 2, + Time: 2 * time.Minute, + Map: map[string]int{"a": 2, "b": 3}, + Slice: []int{4, 5, 6}, + }, + env: map[string]string{ + "TEST_I": "2", + "TEST_T": "2m", + "TEST_M": "a:2;b:3", + "TEST_S": "4,5,6", + }, + }, + { + msg: "non-zero value arguments, expecting overwritten arguments", + args: []string{"--i=3", "--t=3ms", "--m=c:3", "--s=3"}, + expected: envDefaultOptions{ + Int: 3, + Time: 3 * time.Millisecond, + Map: map[string]int{"c": 3}, + Slice: []int{3}, + }, + env: map[string]string{ + "TEST_I": "2", + "TEST_T": "2m", + "TEST_M": "a:2;b:3", + "TEST_S": "4,5,6", + }, + }, + { + msg: "zero value arguments, expecting overwritten arguments", + args: []string{"--i=0", "--t=0ms", "--m=:0", "--s=0"}, + expected: envDefaultOptions{ + Int: 0, + Time: 0, + Map: map[string]int{"": 0}, + Slice: []int{0}, + }, + env: map[string]string{ + "TEST_I": "2", + "TEST_T": "2m", + "TEST_M": "a:2;b:3", + "TEST_S": "4,5,6", + }, + }, + } + + oldEnv := EnvSnapshot() + defer oldEnv.Restore() + + for _, test := range tests { + var opts envDefaultOptions + oldEnv.Restore() + for envKey, envValue := range test.env { + os.Setenv(envKey, envValue) + } + _, err := ParseArgs(&opts, test.args) + if err != nil { + t.Fatalf("%s:\nUnexpected error: %v", test.msg, err) + } + + if opts.Slice == nil { + opts.Slice = []int{} + } + + if !reflect.DeepEqual(opts, test.expected) { + t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts) + } + } +} + +func TestOptionAsArgument(t *testing.T) { + var tests = []struct { + args []string + expectError bool + errType ErrorType + errMsg string + rest []string + }{ + { + // short option must not be accepted as argument + args: []string{"--string-slice", "foobar", "--string-slice", "-o"}, + expectError: true, + errType: ErrExpectedArgument, + errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-o'", + }, + { + // long option must not be accepted as argument + args: []string{"--string-slice", "foobar", "--string-slice", "--other-option"}, + expectError: true, + errType: ErrExpectedArgument, + errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `--other-option'", + }, + { + // long option must not be accepted as argument + args: []string{"--string-slice", "--"}, + expectError: true, + errType: ErrExpectedArgument, + errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got double dash `--'", + }, + { + // quoted and appended option should be accepted as argument (even if it looks like an option) + args: []string{"--string-slice", "foobar", "--string-slice=\"--other-option\""}, + }, + { + // Accept any single character arguments including '-' + args: []string{"--string-slice", "-"}, + }, + { + // Do not accept arguments which start with '-' even if the next character is a digit + args: []string{"--string-slice", "-3.14"}, + expectError: true, + errType: ErrExpectedArgument, + errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-3.14'", + }, + { + // Do not accept arguments which start with '-' if the next character is not a digit + args: []string{"--string-slice", "-character"}, + expectError: true, + errType: ErrExpectedArgument, + errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-character'", + }, + { + args: []string{"-o", "-", "-"}, + rest: []string{"-", "-"}, + }, + { + // Accept arguments which start with '-' if the next character is a digit, for number options only + args: []string{"--int-slice", "-3"}, + }, + { + // Accept arguments which start with '-' if the next character is a digit, for number options only + args: []string{"--int16", "-3"}, + }, + { + // Accept arguments which start with '-' if the next character is a digit, for number options only + args: []string{"--float32", "-3.2"}, + }, + { + // Accept arguments which start with '-' if the next character is a digit, for number options only + args: []string{"--float32ptr", "-3.2"}, + }, + } + + var opts struct { + StringSlice []string `long:"string-slice"` + IntSlice []int `long:"int-slice"` + Int16 int16 `long:"int16"` + Float32 float32 `long:"float32"` + Float32Ptr *float32 `long:"float32ptr"` + OtherOption bool `long:"other-option" short:"o"` + } + + for _, test := range tests { + if test.expectError { + assertParseFail(t, test.errType, test.errMsg, &opts, test.args...) + } else { + args := assertParseSuccess(t, &opts, test.args...) + + assertStringArray(t, args, test.rest) + } + } +} + +func TestUnknownFlagHandler(t *testing.T) { + + var opts struct { + Flag1 string `long:"flag1"` + Flag2 string `long:"flag2"` + } + + p := NewParser(&opts, None) + + var unknownFlag1 string + var unknownFlag2 bool + var unknownFlag3 string + + // Set up a callback to intercept unknown options during parsing + p.UnknownOptionHandler = func(option string, arg SplitArgument, args []string) ([]string, error) { + if option == "unknownFlag1" { + if argValue, ok := arg.Value(); ok { + unknownFlag1 = argValue + return args, nil + } + // consume a value from remaining args list + unknownFlag1 = args[0] + return args[1:], nil + } else if option == "unknownFlag2" { + // treat this one as a bool switch, don't consume any args + unknownFlag2 = true + return args, nil + } else if option == "unknownFlag3" { + if argValue, ok := arg.Value(); ok { + unknownFlag3 = argValue + return args, nil + } + // consume a value from remaining args list + unknownFlag3 = args[0] + return args[1:], nil + } + + return args, fmt.Errorf("Unknown flag: %v", option) + } + + // Parse args containing some unknown flags, verify that + // our callback can handle all of them + _, err := p.ParseArgs([]string{"--flag1=stuff", "--unknownFlag1", "blah", "--unknownFlag2", "--unknownFlag3=baz", "--flag2=foo"}) + + if err != nil { + assertErrorf(t, "Parser returned unexpected error %v", err) + } + + assertString(t, opts.Flag1, "stuff") + assertString(t, opts.Flag2, "foo") + assertString(t, unknownFlag1, "blah") + assertString(t, unknownFlag3, "baz") + + if !unknownFlag2 { + assertErrorf(t, "Flag should have been set by unknown handler, but had value: %v", unknownFlag2) + } + + // Parse args with unknown flags that callback doesn't handle, verify it returns error + _, err = p.ParseArgs([]string{"--flag1=stuff", "--unknownFlagX", "blah", "--flag2=foo"}) + + if err == nil { + assertErrorf(t, "Parser should have returned error, but returned nil") + } +} + +func TestChoices(t *testing.T) { + var opts struct { + Choice string `long:"choose" choice:"v1" choice:"v2"` + } + + assertParseFail(t, ErrInvalidChoice, "Invalid value `invalid' for option `"+defaultLongOptDelimiter+"choose'. Allowed values are: v1 or v2", &opts, "--choose", "invalid") + assertParseSuccess(t, &opts, "--choose", "v2") + assertString(t, opts.Choice, "v2") +} + +func TestEmbedded(t *testing.T) { + type embedded struct { + V bool `short:"v"` + } + var opts struct { + embedded + } + + assertParseSuccess(t, &opts, "-v") + + if !opts.V { + t.Errorf("Expected V to be true") + } +} + +type command struct { +} + +func (c *command) Execute(args []string) error { + return nil +} + +func TestCommandHandlerNoCommand(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + }{} + + parser := NewParser(&opts, Default&^PrintErrors) + + var executedCommand Commander + var executedArgs []string + + executed := false + + parser.CommandHandler = func(command Commander, args []string) error { + executed = true + + executedCommand = command + executedArgs = args + + return nil + } + + _, err := parser.ParseArgs([]string{"arg1", "arg2"}) + + if err != nil { + t.Fatalf("Unexpected parse error: %s", err) + } + + if !executed { + t.Errorf("Expected command handler to be executed") + } + + if executedCommand != nil { + t.Errorf("Did not exect an executed command") + } + + assertStringArray(t, executedArgs, []string{"arg1", "arg2"}) +} + +func TestCommandHandler(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + + Command command `command:"cmd"` + }{} + + parser := NewParser(&opts, Default&^PrintErrors) + + var executedCommand Commander + var executedArgs []string + + executed := false + + parser.CommandHandler = func(command Commander, args []string) error { + executed = true + + executedCommand = command + executedArgs = args + + return nil + } + + _, err := parser.ParseArgs([]string{"cmd", "arg1", "arg2"}) + + if err != nil { + t.Fatalf("Unexpected parse error: %s", err) + } + + if !executed { + t.Errorf("Expected command handler to be executed") + } + + if executedCommand == nil { + t.Errorf("Expected command handler to be executed") + } + + assertStringArray(t, executedArgs, []string{"arg1", "arg2"}) +} diff --git a/vendor/github.com/jessevdk/go-flags/pointer_test.go b/vendor/github.com/jessevdk/go-flags/pointer_test.go new file mode 100644 index 0000000..dc779c7 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/pointer_test.go @@ -0,0 +1,164 @@ +package flags + +import ( + "testing" +) + +func TestPointerBool(t *testing.T) { + var opts = struct { + Value *bool `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v") + + assertStringArray(t, ret, []string{}) + + if !*opts.Value { + t.Errorf("Expected Value to be true") + } +} + +func TestPointerString(t *testing.T) { + var opts = struct { + Value *string `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v", "value") + + assertStringArray(t, ret, []string{}) + assertString(t, *opts.Value, "value") +} + +func TestPointerSlice(t *testing.T) { + var opts = struct { + Value *[]string `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v", "value1", "-v", "value2") + + assertStringArray(t, ret, []string{}) + assertStringArray(t, *opts.Value, []string{"value1", "value2"}) +} + +func TestPointerMap(t *testing.T) { + var opts = struct { + Value *map[string]int `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v", "k1:2", "-v", "k2:-5") + + assertStringArray(t, ret, []string{}) + + if v, ok := (*opts.Value)["k1"]; !ok { + t.Errorf("Expected key \"k1\" to exist") + } else if v != 2 { + t.Errorf("Expected \"k1\" to be 2, but got %#v", v) + } + + if v, ok := (*opts.Value)["k2"]; !ok { + t.Errorf("Expected key \"k2\" to exist") + } else if v != -5 { + t.Errorf("Expected \"k2\" to be -5, but got %#v", v) + } +} + +type marshalledString string + +func (m *marshalledString) UnmarshalFlag(value string) error { + *m = marshalledString(value) + return nil +} + +func (m marshalledString) MarshalFlag() (string, error) { + return string(m), nil +} + +func TestPointerStringMarshalled(t *testing.T) { + var opts = struct { + Value *marshalledString `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v", "value") + + assertStringArray(t, ret, []string{}) + + if opts.Value == nil { + t.Error("Expected value not to be nil") + return + } + + assertString(t, string(*opts.Value), "value") +} + +type marshalledStruct struct { + Value string +} + +func (m *marshalledStruct) UnmarshalFlag(value string) error { + m.Value = value + return nil +} + +func (m marshalledStruct) MarshalFlag() (string, error) { + return m.Value, nil +} + +func TestPointerStructMarshalled(t *testing.T) { + var opts = struct { + Value *marshalledStruct `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v", "value") + + assertStringArray(t, ret, []string{}) + + if opts.Value == nil { + t.Error("Expected value not to be nil") + return + } + + assertString(t, opts.Value.Value, "value") +} + +type PointerGroup struct { + Value bool `short:"v"` +} + +func TestPointerGroup(t *testing.T) { + var opts = struct { + Group *PointerGroup `group:"Group Options"` + }{} + + ret := assertParseSuccess(t, &opts, "-v") + + assertStringArray(t, ret, []string{}) + + if !opts.Group.Value { + t.Errorf("Expected Group.Value to be true") + } +} + +func TestDoNotChangeNonTaggedFields(t *testing.T) { + var opts struct { + A struct { + Pointer *int + } + B *struct { + Pointer *int + } + } + + ret := assertParseSuccess(t, &opts) + + assertStringArray(t, ret, []string{}) + + if opts.A.Pointer != nil { + t.Error("Expected A.Pointer to be nil") + } + if opts.B != nil { + t.Error("Expected B to be nil") + } + if opts.B != nil && opts.B.Pointer != nil { + t.Error("Expected B.Pointer to be nil") + } +} diff --git a/vendor/github.com/jessevdk/go-flags/short_test.go b/vendor/github.com/jessevdk/go-flags/short_test.go new file mode 100644 index 0000000..5f4106b --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/short_test.go @@ -0,0 +1,234 @@ +package flags + +import ( + "fmt" + "testing" +) + +func TestShort(t *testing.T) { + var opts = struct { + Value bool `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v") + + assertStringArray(t, ret, []string{}) + + if !opts.Value { + t.Errorf("Expected Value to be true") + } +} + +func TestShortTooLong(t *testing.T) { + var opts = struct { + Value bool `short:"vv"` + }{} + + assertParseFail(t, ErrShortNameTooLong, "short names can only be 1 character long, not `vv'", &opts) +} + +func TestShortRequired(t *testing.T) { + var opts = struct { + Value bool `short:"v" required:"true"` + }{} + + assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts) +} + +func TestShortRequiredFalsy1(t *testing.T) { + var opts = struct { + Value bool `short:"v" required:"false"` + }{} + + assertParseSuccess(t, &opts) +} + +func TestShortRequiredFalsy2(t *testing.T) { + var opts = struct { + Value bool `short:"v" required:"no"` + }{} + + assertParseSuccess(t, &opts) +} + +func TestShortMultiConcat(t *testing.T) { + var opts = struct { + V bool `short:"v"` + O bool `short:"o"` + F bool `short:"f"` + }{} + + ret := assertParseSuccess(t, &opts, "-vo", "-f") + + assertStringArray(t, ret, []string{}) + + if !opts.V { + t.Errorf("Expected V to be true") + } + + if !opts.O { + t.Errorf("Expected O to be true") + } + + if !opts.F { + t.Errorf("Expected F to be true") + } +} + +func TestShortMultiRequiredConcat(t *testing.T) { + var opts = struct { + V bool `short:"v" required:"true"` + O bool `short:"o" required:"true"` + F bool `short:"f" required:"true"` + }{} + + ret := assertParseSuccess(t, &opts, "-vo", "-f") + + assertStringArray(t, ret, []string{}) + + if !opts.V { + t.Errorf("Expected V to be true") + } + + if !opts.O { + t.Errorf("Expected O to be true") + } + + if !opts.F { + t.Errorf("Expected F to be true") + } +} + +func TestShortMultiSlice(t *testing.T) { + var opts = struct { + Values []bool `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v", "-v") + + assertStringArray(t, ret, []string{}) + assertBoolArray(t, opts.Values, []bool{true, true}) +} + +func TestShortMultiSliceConcat(t *testing.T) { + var opts = struct { + Values []bool `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-vvv") + + assertStringArray(t, ret, []string{}) + assertBoolArray(t, opts.Values, []bool{true, true, true}) +} + +func TestShortWithEqualArg(t *testing.T) { + var opts = struct { + Value string `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v=value") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "value") +} + +func TestShortWithArg(t *testing.T) { + var opts = struct { + Value string `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-vvalue") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "value") +} + +func TestShortArg(t *testing.T) { + var opts = struct { + Value string `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-v", "value") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "value") +} + +func TestShortMultiWithEqualArg(t *testing.T) { + var opts = struct { + F []bool `short:"f"` + Value string `short:"v"` + }{} + + assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffv=value") +} + +func TestShortMultiArg(t *testing.T) { + var opts = struct { + F []bool `short:"f"` + Value string `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-ffv", "value") + + assertStringArray(t, ret, []string{}) + assertBoolArray(t, opts.F, []bool{true, true}) + assertString(t, opts.Value, "value") +} + +func TestShortMultiArgConcatFail(t *testing.T) { + var opts = struct { + F []bool `short:"f"` + Value string `short:"v"` + }{} + + assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffvvalue") +} + +func TestShortMultiArgConcat(t *testing.T) { + var opts = struct { + F []bool `short:"f"` + Value string `short:"v"` + }{} + + ret := assertParseSuccess(t, &opts, "-vff") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "ff") +} + +func TestShortOptional(t *testing.T) { + var opts = struct { + F []bool `short:"f"` + Value string `short:"v" optional:"yes" optional-value:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "-fv", "f") + + assertStringArray(t, ret, []string{"f"}) + assertString(t, opts.Value, "value") +} + +func TestShortOptionalFalsy1(t *testing.T) { + var opts = struct { + F []bool `short:"f"` + Value string `short:"v" optional:"false" optional-value:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "-fv", "f") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "f") +} + +func TestShortOptionalFalsy2(t *testing.T) { + var opts = struct { + F []bool `short:"f"` + Value string `short:"v" optional:"no" optional-value:"value"` + }{} + + ret := assertParseSuccess(t, &opts, "-fv", "f") + + assertStringArray(t, ret, []string{}) + assertString(t, opts.Value, "f") +} diff --git a/vendor/github.com/jessevdk/go-flags/tag_test.go b/vendor/github.com/jessevdk/go-flags/tag_test.go new file mode 100644 index 0000000..9daa740 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/tag_test.go @@ -0,0 +1,38 @@ +package flags + +import ( + "testing" +) + +func TestTagMissingColon(t *testing.T) { + var opts = struct { + Value bool `short` + }{} + + assertParseFail(t, ErrTag, "expected `:' after key name, but got end of tag (in `short`)", &opts, "") +} + +func TestTagMissingValue(t *testing.T) { + var opts = struct { + Value bool `short:` + }{} + + assertParseFail(t, ErrTag, "expected `\"' to start tag value at end of tag (in `short:`)", &opts, "") +} + +func TestTagMissingQuote(t *testing.T) { + var opts = struct { + Value bool `short:"v` + }{} + + assertParseFail(t, ErrTag, "expected end of tag value `\"' at end of tag (in `short:\"v`)", &opts, "") +} + +func TestTagNewline(t *testing.T) { + var opts = struct { + Value bool `long:"verbose" description:"verbose +something"` + }{} + + assertParseFail(t, ErrTag, "unexpected newline in tag value `description' (in `long:\"verbose\" description:\"verbose\nsomething\"`)", &opts, "") +} diff --git a/vendor/github.com/jessevdk/go-flags/termsize.go b/vendor/github.com/jessevdk/go-flags/termsize.go new file mode 100644 index 0000000..df97e7e --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/termsize.go @@ -0,0 +1,28 @@ +// +build !windows,!plan9,!solaris + +package flags + +import ( + "syscall" + "unsafe" +) + +type winsize struct { + row, col uint16 + xpixel, ypixel uint16 +} + +func getTerminalColumns() int { + ws := winsize{} + + if tIOCGWINSZ != 0 { + syscall.Syscall(syscall.SYS_IOCTL, + uintptr(0), + uintptr(tIOCGWINSZ), + uintptr(unsafe.Pointer(&ws))) + + return int(ws.col) + } + + return 80 +} diff --git a/vendor/github.com/jessevdk/go-flags/termsize_linux.go b/vendor/github.com/jessevdk/go-flags/termsize_linux.go new file mode 100644 index 0000000..e3975e2 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/termsize_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package flags + +const ( + tIOCGWINSZ = 0x5413 +) diff --git a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go new file mode 100644 index 0000000..2a9bbe0 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go @@ -0,0 +1,7 @@ +// +build windows plan9 solaris + +package flags + +func getTerminalColumns() int { + return 80 +} diff --git a/vendor/github.com/jessevdk/go-flags/termsize_other.go b/vendor/github.com/jessevdk/go-flags/termsize_other.go new file mode 100644 index 0000000..3082151 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/termsize_other.go @@ -0,0 +1,7 @@ +// +build !darwin,!freebsd,!netbsd,!openbsd,!linux + +package flags + +const ( + tIOCGWINSZ = 0 +) diff --git a/vendor/github.com/jessevdk/go-flags/termsize_unix.go b/vendor/github.com/jessevdk/go-flags/termsize_unix.go new file mode 100644 index 0000000..fcc1186 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/termsize_unix.go @@ -0,0 +1,7 @@ +// +build darwin freebsd netbsd openbsd + +package flags + +const ( + tIOCGWINSZ = 0x40087468 +) diff --git a/vendor/github.com/jessevdk/go-flags/unknown_test.go b/vendor/github.com/jessevdk/go-flags/unknown_test.go new file mode 100644 index 0000000..858be45 --- /dev/null +++ b/vendor/github.com/jessevdk/go-flags/unknown_test.go @@ -0,0 +1,66 @@ +package flags + +import ( + "testing" +) + +func TestUnknownFlags(t *testing.T) { + var opts = struct { + Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` + }{} + + args := []string{ + "-f", + } + + p := NewParser(&opts, 0) + args, err := p.ParseArgs(args) + + if err == nil { + t.Fatal("Expected error for unknown argument") + } +} + +func TestIgnoreUnknownFlags(t *testing.T) { + var opts = struct { + Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` + }{} + + args := []string{ + "hello", + "world", + "-v", + "--foo=bar", + "--verbose", + "-f", + } + + p := NewParser(&opts, IgnoreUnknown) + args, err := p.ParseArgs(args) + + if err != nil { + t.Fatal(err) + } + + exargs := []string{ + "hello", + "world", + "--foo=bar", + "-f", + } + + issame := (len(args) == len(exargs)) + + if issame { + for i := 0; i < len(args); i++ { + if args[i] != exargs[i] { + issame = false + break + } + } + } + + if !issame { + t.Fatalf("Expected %v but got %v", exargs, args) + } +} diff --git a/vendor/github.com/moby/moby/.dockerignore b/vendor/github.com/moby/moby/.dockerignore new file mode 100644 index 0000000..082cac9 --- /dev/null +++ b/vendor/github.com/moby/moby/.dockerignore @@ -0,0 +1,4 @@ +bundles +.gopath +vendor/pkg +.go-pkg-cache diff --git a/vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md b/vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..7362480 --- /dev/null +++ b/vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,64 @@ + + +**Description** + + + +**Steps to reproduce the issue:** +1. +2. +3. + +**Describe the results you received:** + + +**Describe the results you expected:** + + +**Additional information you deem important (e.g. issue happens only occasionally):** + +**Output of `docker version`:** + +``` +(paste your output here) +``` + +**Output of `docker info`:** + +``` +(paste your output here) +``` + +**Additional environment details (AWS, VirtualBox, physical, etc.):** diff --git a/vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..4269818 --- /dev/null +++ b/vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,30 @@ + + +**- What I did** + +**- How I did it** + +**- How to verify it** + +**- Description for the changelog** + + + +**- A picture of a cute animal (not mandatory but encouraged)** + diff --git a/vendor/github.com/moby/moby/.gitignore b/vendor/github.com/moby/moby/.gitignore new file mode 100644 index 0000000..be8b03d --- /dev/null +++ b/vendor/github.com/moby/moby/.gitignore @@ -0,0 +1,33 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.exe +*.exe~ +*.orig +*.test +.*.swp +.DS_Store +# a .bashrc may be added to customize the build environment +.bashrc +.editorconfig +.gopath/ +.go-pkg-cache/ +autogen/ +bundles/ +cmd/dockerd/dockerd +cmd/docker/docker +dockerversion/version_autogen.go +dockerversion/version_autogen_unix.go +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +vendor/pkg/ diff --git a/vendor/github.com/moby/moby/.mailmap b/vendor/github.com/moby/moby/.mailmap new file mode 100644 index 0000000..014cc0e --- /dev/null +++ b/vendor/github.com/moby/moby/.mailmap @@ -0,0 +1,320 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + +Vincent Demeester + +Vishnu Kannan +xlgao-zju xlgao +yuchangchun y00277921 + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + + + +Arnaud Porterie + +David M. Karr + + + +Kenfe-Mickaël Laventure + + + + + +Runshen Zhu +Tom Barlow +Xianlu Bird +Dan Feldman +Harry Zhang +Alex Chen alexchen +Alex Ellis +Alicia Lauerman +Ben Bonnefoy +Bhumika Bayani +Bingshen Wang +Chen Chuanliang +Chen Mingjie +CUI Wei cuiwei13 +Dattatraya Kumbhar +Diego Siqueira +Evelyn Xu +Felix Ruess +Gabriel Nicolas Avellaneda +Gang Qiao <1373319223@qq.com> +Helen Xie +Jacob Tomlinson +Jiuyue Ma +Jose Diaz-Gonzalez +Josh Eveleth +Josh Wilson +Kevin Kern +Kunal Kushwaha +Lajos Papp +Lyn +Michael Käufl +Michal Minář +Michael Hudson-Doyle +Milind Chawre +Ma Müller +Roberto Muñoz Fernández +Stefan S. +Sun Gengze <690388648@qq.com> +Tim Zju <21651152@zju.edu.cn> +Tõnis Tiigi +Wang Ping +Wang Yuexiao +Wewang Xiaorenfine +Wei Wu cizixs +Ying Li +Yu Peng +Yu Peng +Zhenkun Bi +Zhu Kunjia diff --git a/vendor/github.com/moby/moby/AUTHORS b/vendor/github.com/moby/moby/AUTHORS new file mode 100644 index 0000000..2aedbbd --- /dev/null +++ b/vendor/github.com/moby/moby/AUTHORS @@ -0,0 +1,1731 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Avilla +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Alicia Lauerman +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Gavin +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elena Morozova +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabiano Rosas +Fabio Falci +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +Harshal Patil +He Simei +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Janonymous +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John Warwick +John Willis +johnharris85 +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin L +Konstantin Pelykh +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +Mansi Nahar +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +Muayyad Alsadi +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +pestophagous +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qhuang +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Silvan Jegen +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +tpng +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianglin Gao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yanqiang Miao +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yu Peng +Yuan Sun +yuchangchun +yuchengxia +Yunxiang Huang +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +zhouhao +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/moby/moby/CHANGELOG.md b/vendor/github.com/moby/moby/CHANGELOG.md new file mode 100644 index 0000000..cc0381b --- /dev/null +++ b/vendor/github.com/moby/moby/CHANGELOG.md @@ -0,0 +1,3437 @@ +# Changelog + +Items starting with `DEPRECATE` are important deprecation notices. For more +information on the list of deprecated flags and APIs please have a look at +https://docs.docker.com/engine/deprecated/ where target removal dates can also +be found. + +## 17.03.2-ce (2017-05-29) + +### Networking + +- Fix a concurrency issue preventing network creation [#33273](https://github.com/moby/moby/pull/33273) + +### Runtime + +- Relabel secrets path to avoid a Permission Denied on selinux enabled systems [#33236](https://github.com/moby/moby/pull/33236) (ref [#32529](https://github.com/moby/moby/pull/32529) +- Fix cases where local volume were not properly relabeled if needed [#33236](https://github.com/moby/moby/pull/33236) (ref [#29428](https://github.com/moby/moby/pull/29428)) +- Fix an issue while upgrading if a plugin rootfs was still mounted [#33236](https://github.com/moby/moby/pull/33236) (ref [#32525](https://github.com/moby/moby/pull/32525)) +- Fix an issue where volume wouldn't default to the `rprivate` propagation mode [#33236](https://github.com/moby/moby/pull/33236) (ref [#32851](https://github.com/moby/moby/pull/32851)) +- Fix a panic that could occur when a volume driver could not be retrieved [#33236](https://github.com/moby/moby/pull/33236) (ref [#32347](https://github.com/moby/moby/pull/32347)) ++ Add a warning in `docker info` when the `overlay` or `overlay2` graphdriver is used on a filesystem without `d_type` support [#33236](https://github.com/moby/moby/pull/33236) (ref [#31290](https://github.com/moby/moby/pull/31290)) +- Fix an issue with backporting mount spec to older volumes [#33207](https://github.com/moby/moby/pull/33207) +- Fix issue where a failed unmount can lead to data loss on local volume remove [#33120](https://github.com/moby/moby/pull/33120) + +### Swarm Mode + +- Fix a case where tasks could get killed unexpectedly [#33118](https://github.com/moby/moby/pull/33118) +- Fix an issue preventing to deploy services if the registry cannot be reached despite the needed images being locally present [#33117](https://github.com/moby/moby/pull/33117) + +## 17.03.1-ce (2017-03-27) + +### Remote API (v1.27) & Client + +* Fix autoremove on older api [#31692](https://github.com/docker/docker/pull/31692) +* Fix default network customization for a stack [#31258](https://github.com/docker/docker/pull/31258/) +* Correct CPU usage calculation in presence of offline CPUs and newer Linux [#31802](https://github.com/docker/docker/pull/31802) +* Fix issue where service healthcheck is `{}` in remote API [#30197](https://github.com/docker/docker/pull/30197) + +### Runtime + +* Update runc to 54296cf40ad8143b62dbcaa1d90e520a2136ddfe [#31666](https://github.com/docker/docker/pull/31666) + * Ignore cgroup2 mountpoints [opencontainers/runc#1266](https://github.com/opencontainers/runc/pull/1266) +* Update containerd to 4ab9917febca54791c5f071a9d1f404867857fcc [#31662](https://github.com/docker/docker/pull/31662) [#31852](https://github.com/docker/docker/pull/31852) + * Register healtcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609) +* Fix `docker exec` not working after unattended upgrades that reload apparmor profiles [#31773](https://github.com/docker/docker/pull/31773) +* Fix unmounting layer without merge dir with Overlay2 [#31069](https://github.com/docker/docker/pull/31069) +* Do not ignore "volume in use" errors when force-delete [#31450](https://github.com/docker/docker/pull/31450) + +### Swarm Mode + +* Update swarmkit to 17756457ad6dc4d8a639a1f0b7a85d1b65a617bb [#31807](https://github.com/docker/docker/pull/31807) + * Scheduler now correctly considers tasks which have been assigned to a node but aren't yet running [docker/swarmkit#1980](https://github.com/docker/swarmkit/pull/1980) + * Allow removal of a network when only dead tasks reference it [docker/swarmkit#2018](https://github.com/docker/swarmkit/pull/2018) + * Retry failed network allocations less aggressively [docker/swarmkit#2021](https://github.com/docker/swarmkit/pull/2021) + * Avoid network allocation for tasks that are no longer running [docker/swarmkit#2017](https://github.com/docker/swarmkit/pull/2017) + * Bookkeeping fixes inside network allocator allocator [docker/swarmkit#2019](https://github.com/docker/swarmkit/pull/2019) [docker/swarmkit#2020](https://github.com/docker/swarmkit/pull/2020) +* Avoid timing out service create or update when a registry is slow to respond [#31861](https://github.com/docker/docker/pull/31861) + +### Windows + +* Cleanup HCS on restore [#31503](https://github.com/docker/docker/pull/31503) + +## 17.03.0-ce (2017-03-01) + +**IMPORTANT**: Starting with this release, Docker is on a monthly release cycle and uses a +new YY.MM versioning scheme to reflect this. Two channels are available: monthly and quarterly. +Any given monthly release will only receive security and bugfixes until the next monthly +release is available. Quarterly releases receive security and bugfixes for 4 months after +initial release. This release includes bugfixes for 1.13.1 but +there are no major feature additions and the API version stays the same. +Upgrading from Docker 1.13.1 to 17.03.0 is expected to be simple and low-risk. + +### Client + +* Fix panic in `docker stats --format` [#30776](https://github.com/docker/docker/pull/30776) + +### Contrib + +* Update various `bash` and `zsh` completion scripts [#30823](https://github.com/docker/docker/pull/30823), [#30945](https://github.com/docker/docker/pull/30945) and more... +* Block obsolete socket families in default seccomp profile - mitigates unpatched kernels' CVE-2017-6074 [#29076](https://github.com/docker/docker/pull/29076) + +### Networking + +* Fix bug on overlay encryption keys rotation in cross-datacenter swarm [#30727](https://github.com/docker/docker/pull/30727) +* Fix side effect panic in overlay encryption and network control plane communication failure ("No installed keys could decrypt the message") on frequent swarm leader re-election [#25608](https://github.com/docker/docker/pull/25608) +* Several fixes around system responsiveness and datapath programming when using overlay network with external kv-store [docker/libnetwork#1639](https://github.com/docker/libnetwork/pull/1639), [docker/libnetwork#1632](https://github.com/docker/libnetwork/pull/1632) and more... +* Discard incoming plain vxlan packets for encrypted overlay network [#31170](https://github.com/docker/docker/pull/31170) +* Release the network attachment on allocation failure [#31073](https://github.com/docker/docker/pull/31073) +* Fix port allocation when multiple published ports map to the same target port [docker/swarmkit#1835](https://github.com/docker/swarmkit/pull/1835) + +### Runtime + +* Fix a deadlock in docker logs [#30223](https://github.com/docker/docker/pull/30223) +* Fix cpu spin waiting for log write events [#31070](https://github.com/docker/docker/pull/31070) +* Fix a possible crash when using journald [#31231](https://github.com/docker/docker/pull/31231) [#31263](https://github.com/docker/docker/pull/31263) +* Fix a panic on close of nil channel [#31274](https://github.com/docker/docker/pull/31274) +* Fix duplicate mount point for `--volumes-from` in `docker run` [#29563](https://github.com/docker/docker/pull/29563) +* Fix `--cache-from` does not cache last step [#31189](https://github.com/docker/docker/pull/31189) + +### Swarm Mode + +* Shutdown leaks an error when the container was never started [#31279](https://github.com/docker/docker/pull/31279) +* Fix possibility of tasks getting stuck in the "NEW" state during a leader failover [docker/swarmkit#1938](https://github.com/docker/swarmkit/pull/1938) +* Fix extraneous task creations for global services that led to confusing replica counts in `docker service ls` [docker/swarmkit#1957](https://github.com/docker/swarmkit/pull/1957) +* Fix problem that made rolling updates slow when `task-history-limit` was set to 1 [docker/swarmkit#1948](https://github.com/docker/swarmkit/pull/1948) +* Restart tasks elsewhere, if appropriate, when they are shut down as a result of nodes no longer satisfying constraints [docker/swarmkit#1958](https://github.com/docker/swarmkit/pull/1958) + +## 1.13.1 (2017-02-08) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Contrib + +* Do not require a custom build of tini [#28454](https://github.com/docker/docker/pull/28454) +* Upgrade to Go 1.7.5 [#30489](https://github.com/docker/docker/pull/30489) + +### Remote API (v1.26) & Client + ++ Support secrets in docker stack deploy with compose file [#30144](https://github.com/docker/docker/pull/30144) + +### Runtime + +* Fix size issue in `docker system df` [#30378](https://github.com/docker/docker/pull/30378) +* Fix error on `docker inspect` when Swarm certificates were expired. [#29246](https://github.com/docker/docker/pull/29246) +* Fix deadlock on v1 plugin with activate error [#30408](https://github.com/docker/docker/pull/30408) +* Fix SELinux regression [#30649](https://github.com/docker/docker/pull/30649) + +### Plugins + +* Support global scoped network plugins (v2) in swarm mode [#30332](https://github.com/docker/docker/pull/30332) ++ Add `docker plugin upgrade` [#29414](https://github.com/docker/docker/pull/29414) + +### Windows + +* Fix small regression with old plugins in Windows [#30150](https://github.com/docker/docker/pull/30150) +* Fix warning on Windows [#30730](https://github.com/docker/docker/pull/30730) + +## 1.13.0 (2017-01-18) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Builder + ++ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) ++ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) +* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) +- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) ++ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) ++ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) +- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) +- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) +* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) +- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) ++ Allow `USER` in builder on Windows [#28415](https://github.com/docker/docker/pull/28415) ++ Handle env case-insensitive on Windows [#28725](https://github.com/docker/docker/pull/28725) + +### Contrib + ++ Add support for building docker debs for Ubuntu 16.04 Xenial on PPC64LE [#23438](https://github.com/docker/docker/pull/23438) ++ Add support for building docker debs for Ubuntu 16.04 Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) ++ Add support for building docker debs for Ubuntu 16.10 Yakkety Yak on PPC64LE [#28046](https://github.com/docker/docker/pull/28046) +- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) ++ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) +* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) ++ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) ++ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) ++ Add `make deb` support for aarch64 [#27625](https://github.com/docker/docker/pull/27625) + +### Distribution + +* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) + - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) + - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) + - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) + - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) + - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) + - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) + - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) + - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) +- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) +* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) + +### Logging + +* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) +- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) ++ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) ++ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) ++ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) ++ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) +* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) +- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) +- Fix an issue where `docker logs --tail` returned less lines than expected [#28203](https://github.com/docker/docker/pull/28203) +- Splunk Logging Driver: performance and reliability improvements [#26207](https://github.com/docker/docker/pull/26207) +- Splunk Logging Driver: configurable formats and skip for verifying connection [#25786](https://github.com/docker/docker/pull/25786) + +### Networking + ++ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) ++ Add support for host port PublishMode in services using the `--publish` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) and [#28943](https://github.com/docker/docker/pull/28943) ++ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) +* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) ++ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) +- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) +- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) +- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) ++ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) +- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) +- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) + +### Plugins + +- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) +- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) +* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) ++ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) ++ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) ++ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) +* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) +* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) +* Split `docker plugin install` into two API call `/privileges` and `/pull` [#28963](https://github.com/docker/docker/pull/28963) + +### Remote API (v1.25) & Client + ++ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) ++ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) ++ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) +* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) ++ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) ++ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) ++ Add `--env-file` flag to `docker service create` [#24844](https://github.com/docker/docker/pull/24844) ++ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) ++ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) ++ Add `--no-trunc` to service/node/stack ps output [#25337](https://github.com/docker/docker/pull/25337) ++ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) ++ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) +* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) ++ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) ++ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) +- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) ++ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) ++ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) +* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) ++ Add --cpus flag to control cpu resources for `docker run` and `docker create`, and add `NanoCPUs` to `HostConfig` [#27958](https://github.com/docker/docker/pull/27958) +- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) +* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) +- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) ++ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) +* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) +* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) ++ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) ++ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) ++ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) ++ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) ++ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) + +### Runtime + ++ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) ++ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) ++ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) ++ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) ++ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) ++ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) ++ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) ++ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) +* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) ++ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) +* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) +* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) +- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) +- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) +- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) +- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) +- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) ++ (experimental) Add metrics (Prometheus) output for basic `container`, `image`, and `daemon` operations [#25820](https://github.com/docker/docker/pull/25820) +- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) ++ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) ++ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) ++ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) ++ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) +- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) +- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) +* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) ++ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) +- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) +- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) ++ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) +* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) + +### Swarm Mode + ++ Add secret management [#27794](https://github.com/docker/docker/pull/27794) ++ Add support for templating service options (hostname, mounts, and environment variables) [#28025](https://github.com/docker/docker/pull/28025) +* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) +* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) +* Make `docker node ps` default to the current node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) ++ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) ++ Add `--health-*` and `--no-healthcheck` flags to `docker service create` and `docker service update` [#27369](https://github.com/docker/docker/pull/27369) ++ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) +* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) +- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) +- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) +* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) ++ Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) ++ Add `--host` to `docker service create`, and `--host-add`, `--host-rm` to `docker service update` [#28031](https://github.com/docker/docker/pull/28031) ++ Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) +* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) +* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) ++ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) +- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) ++ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) ++ Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) +* Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) ++ Add options to customize Raft snapshots (`--max-snapshots`, `--snapshot-interval`) [#27997](https://github.com/docker/docker/pull/27997) +- Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) ++ Swarm-mode support for Windows [#27838](https://github.com/docker/docker/pull/27838) ++ Allow hostname to be updated on service [#28771](https://github.com/docker/docker/pull/28771) ++ Support v2 plugins [#29433](https://github.com/docker/docker/pull/29433) ++ Add content trust for services [#29469](https://github.com/docker/docker/pull/29469) + +### Volume + ++ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270) ++ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) +* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) +* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) +* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) + +### Security + +- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) +- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) + +### DEPRECATION + +- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) +- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) +- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) +- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) +- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455) +- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) +- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) +- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) +- Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) +- Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533) +- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437) + +## 1.12.6 (2017-01-10) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix runC privilege escalation (CVE-2016-9962) + +## 1.12.5 (2016-12-15) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix race on sending stdin close event [#29424](https://github.com/docker/docker/pull/29424) + +### Networking + +- Fix panic in docker network ls when a network was created with `--ipv6` and no ipv6 `--subnet` in older docker versions [#29416](https://github.com/docker/docker/pull/29416) + +### Contrib + +- Fix compilation on Darwin [#29370](https://github.com/docker/docker/pull/29370) + +## 1.12.4 (2016-12-12) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix issue where volume metadata was not removed [#29083](https://github.com/docker/docker/pull/29083) +- Asynchronously close streams to prevent holding container lock [#29050](https://github.com/docker/docker/pull/29050) +- Fix selinux labels for newly created container volumes [#29050](https://github.com/docker/docker/pull/29050) +- Remove hostname validation [#28990](https://github.com/docker/docker/pull/28990) +- Fix deadlocks caused by IO races [#29095](https://github.com/docker/docker/pull/29095) [#29141](https://github.com/docker/docker/pull/29141) +- Return an empty stats if the container is restarting [#29150](https://github.com/docker/docker/pull/29150) +- Fix volume store locking [#29151](https://github.com/docker/docker/pull/29151) +- Ensure consistent status code in API [#29150](https://github.com/docker/docker/pull/29150) +- Fix incorrect opaque directory permission in overlay2 [#29093](https://github.com/docker/docker/pull/29093) +- Detect plugin content and error out on `docker pull` [#29297](https://github.com/docker/docker/pull/29297) + +### Swarm Mode + +* Update Swarmkit [#29047](https://github.com/docker/docker/pull/29047) + - orchestrator/global: Fix deadlock on updates [docker/swarmkit#1760](https://github.com/docker/swarmkit/pull/1760) + - on leader switchover preserve the vxlan id for existing networks [docker/swarmkit#1773](https://github.com/docker/swarmkit/pull/1773) +- Refuse swarm spec not named "default" [#29152](https://github.com/docker/docker/pull/29152) + +### Networking + +* Update libnetwork [#29004](https://github.com/docker/docker/pull/29004) [#29146](https://github.com/docker/docker/pull/29146) + - Fix panic in embedded DNS [docker/libnetwork#1561](https://github.com/docker/libnetwork/pull/1561) + - Fix unmarhalling panic when passing --link-local-ip on global scope network [docker/libnetwork#1564](https://github.com/docker/libnetwork/pull/1564) + - Fix panic when network plugin returns nil StaticRoutes [docker/libnetwork#1563](https://github.com/docker/libnetwork/pull/1563) + - Fix panic in osl.(*networkNamespace).DeleteNeighbor [docker/libnetwork#1555](https://github.com/docker/libnetwork/pull/1555) + - Fix panic in swarm networking concurrent map read/write [docker/libnetwork#1570](https://github.com/docker/libnetwork/pull/1570) + * Allow encrypted networks when running docker inside a container [docker/libnetwork#1502](https://github.com/docker/libnetwork/pull/1502) + - Do not block autoallocation of IPv6 pool [docker/libnetwork#1538](https://github.com/docker/libnetwork/pull/1538) + - Set timeout for netlink calls [docker/libnetwork#1557](https://github.com/docker/libnetwork/pull/1557) + - Increase networking local store timeout to one minute [docker/libkv#140](https://github.com/docker/libkv/pull/140) + - Fix a panic in libnetwork.(*sandbox).execFunc [docker/libnetwork#1556](https://github.com/docker/libnetwork/pull/1556) + - Honor icc=false for internal networks [docker/libnetwork#1525](https://github.com/docker/libnetwork/pull/1525) + +### Logging + +* Update syslog log driver [#29150](https://github.com/docker/docker/pull/29150) + +### Contrib + +- Run "dnf upgrade" before installing in fedora [#29150](https://github.com/docker/docker/pull/29150) +- Add build-date back to RPM packages [#29150](https://github.com/docker/docker/pull/29150) +- deb package filename changed to include distro to distinguish between distro code names [#27829](https://github.com/docker/docker/pull/27829) + +## 1.12.3 (2016-10-26) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) +- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) +- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) +* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) +* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) +- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) +- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) + +### Swarm Mode + +- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) +* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) + * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) + * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) + * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) + - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) + +### Networking + +* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) + - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) + - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) + * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) + - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) + - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) + +### Logging + +* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) + +### Contrib + +* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) +* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) + +## 1.12.2 (2016-10-11) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) +* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) +* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) +- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) +- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) +- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) +- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) + +### Networking + +- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) +* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) + * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) + - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) + * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) + * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) + * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) + * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) + * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) + - Disable service discovery in ingress network [docker/libnetwork#1489](https://github.com/docker/libnetwork/pull/1489) + +### Swarm Mode + +* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) +* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) + * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) + - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) + - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) + * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) + - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) + - Do not allow service creation on ingress network [docker/swarmkit#1600](https://github.com/docker/swarmkit/pull/1600) + +### Contrib + +* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) +* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) +- Fix installation on debian stretch [#27184](https://github.com/docker/docker/pull/27184) + +### Windows + +- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) + +## 1.12.1 (2016-08-18) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Client + +* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) +- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) +- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) +- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) +- Remove `service update --network-add` and `service update --network-rm` flags + because this feature is not yet implemented in 1.12, but was inadvertently added + to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) + +### Contrib + ++ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) +- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) + +### Networking + +- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) +- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) +- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) +- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) + +### Plugins (experimental) + +* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) +* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) +- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) +- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) + +### Runtime + +* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) +- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) +- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) + +### Security + +* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) +* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) + +### Swarm + +- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) +- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) +- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) +- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) +- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) +- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) +- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) +- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) +- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) + +### Volume + +- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) +- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) +- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) +- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) + +## 1.12.0 (2016-07-28) + + +**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two +additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for +installing docker, please make sure to update them accordingly. + +### Builder + ++ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) ++ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) ++ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) ++ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) +* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) +* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) +* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) +- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) + +### Contrib + +* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) +- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) + +### Distribution + ++ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) +* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) +* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) +* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) + +### Logging + ++ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) ++ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) ++ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) ++ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) +* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) +* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) + +### Networking + ++ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) ++ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) ++ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) ++ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) ++ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) ++ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) ++ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) ++ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) +* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) +* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) +* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) +- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) +- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) + +### Plugins (experimental) + ++ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) + +### Remote API (v1.24) & Client + ++ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) ++ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) ++ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) ++ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) ++ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) ++ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) ++ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) ++ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) +* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) +- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) +- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) +- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) +- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) +- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) + +### Runtime + ++ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) ++ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) ++ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) ++ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) ++ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) ++ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) ++ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) ++ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) ++ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) ++ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) ++ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) ++ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) ++ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) ++ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) ++ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) ++ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) +* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) +* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) +- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) +- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) +- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) +- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) +- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) +- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) +- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) +- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) +- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) +- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) +- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) +- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) +- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) + +### Swarm Mode + ++ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) ++ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) + +### Volume + ++ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) ++ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) ++ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) +* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) +- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) +- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) + + +### DEPRECATION +* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed + to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) +* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) +* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) +* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) +* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) +* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) +* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) + +## 1.11.2 (2016-05-31) + +### Networking + +- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) +- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) + +### Runtime + +- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) +- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) +- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) +- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) +- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) +- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) +- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) +- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) +- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) + + +## 1.11.1 (2016-04-26) + +### Distribution + +- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) + +### Documentation + ++ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) + +### Builder + +* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) + +### Networking + +- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) +- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) + +### Runtime + +- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) +- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) +- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) +- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) +- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) +- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) +- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) +- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) +- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) +- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) +- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` +- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) +- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) + +## 1.11.0 (2016-04-13) + +**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. + +### Builder + +- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) +- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) + +### Client + +* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) ++ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) +* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) +* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) +- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) +- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) +* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) +- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) ++ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) ++ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) +* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) +* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) +- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) +* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) +* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) +- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) +* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) + +### Distribution + +- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) +- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) ++ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) ++ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) +* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) +* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) +* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) +* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) +- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) +- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) + +### Logging + +- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) +* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) +* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) +* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) ++ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) +* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) ++ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) ++ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) + + +### Misc + ++ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) ++ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) ++ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) +* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) +- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) +- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) +* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) +* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) +* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) +* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) ++ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) ++ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) + +### Networking + +- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) +- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) +* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) ++ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) +* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) +- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) +* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) ++ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) +* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) +- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) +* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) +- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) +- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) +- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) +- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) +- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) +- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) +- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) +- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) + +### Plugins + +- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) +- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) + +### Runtime + +- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) +- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) +- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) +- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) + Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. ++ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) ++ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) ++ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) +* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) +* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) +- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) +- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) +- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) +- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) +- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) +- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) +* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) +- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) +* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) ++ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) +- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) ++ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) +- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) ++ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) ++ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) +- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) +* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) +- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) +* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) +* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) ++ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) +- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) +- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) +- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) +- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) + +### Security + +* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) +* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) +* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) +* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) +* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) + +### Volumes + +* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) +* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) +- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) ++ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) + +## 1.10.3 (2016-03-10) + +### Runtime + +- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) +- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) + +### Distribution + +- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) +- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) + +### Plugin system + +- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) +- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) +- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) + +### Security + +- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) + It was due to the `personality` syscall being blocked by the default seccomp profile. +- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) + It was due to the `ipc` syscall being blocked by the default seccomp profile. +- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) +- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) + +## 1.10.2 (2016-02-22) + +### Runtime + +- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) +- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) +- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) +- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) +- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) + +### Distribution + +- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) +- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) +- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) +- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) + +### Networking + +- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) + +### Volumes + +- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) + +### Security + +- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) + +## 1.10.1 (2016-02-11) + +### Runtime + +* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) +- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) +- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) +- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) +- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) + +### Security + +- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) + +### Distribution + +* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) +- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) + +### Networking + +- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) +- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) +- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) +- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) + +### Logging + +- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) + +### Volumes + +- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) + +### Misc + +- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) + +## 1.10.0 (2016-02-04) + +**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. +A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. +Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. +Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ + +### Runtime + ++ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) ++ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) ++ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) ++ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) ++ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) + This change is backward compatible in the API, but not on the CLI. ++ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) ++ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) ++ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) ++ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) ++ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) ++ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) ++ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) ++ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) ++ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) ++ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) ++ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) +* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) +* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) +* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) +* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) +* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) +* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) +* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) +* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) +- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) +- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) +- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) +- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) +- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) +- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) +- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) + +### Security + ++ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) ++ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) ++ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) ++ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) ++ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) + This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. + Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. +* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) + +### Distribution + +* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) + Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + Images no longer depend on the parent chain but contain a list of layer references. + `docker load`/`docker save` tarballs now also contain content-addressable image configurations. + For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) +* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) +* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) +- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) + +### Networking + ++ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) ++ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) ++ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) ++ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) ++ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) ++ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) ++ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) ++ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) ++ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) ++ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) ++ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) +* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) +* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) +* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) +* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) +* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) +* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) +* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) +* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) +- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) +- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) +- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) +- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) + +### Logging + ++ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) ++ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) +* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) +* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) + +### Volumes + ++ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) +* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) + Existing plugins need to make use of these new APIs to satisfy users' expectation + For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) +- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) +- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) +- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) +- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) +- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) + +### Builder + ++ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) +- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) +- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) + +### Client + ++ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) +- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) + +### Misc + +* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) + +### Deprecations + +* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) +* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) +* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) +* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) +* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) +* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) + +## 1.9.1 (2015-11-21) + +### Runtime + +- Do not prevent daemon from booting if images could not be restored (#17695) +- Force IPC mount to unmount on daemon shutdown/init (#17539) +- Turn IPC unmount errors into warnings (#17554) +- Fix `docker stats` performance regression (#17638) +- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) +- Fix seldom panics (#17639, #17634, #17703) +- Fix opq whiteouts problems for files with dot prefix (#17819) +- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) +- devicemapper: fix displayed fs in docker info (#17974) +- selinux: only relabel if user requested so with the `z` option (#17450, #17834) +- Do not make network calls when normalizing names (#18014) + +### Client + +- Fix `docker login` on windows (#17738) +- Fix bug with `docker inspect` output when not connected to daemon (#17715) +- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) + +### Builder + +- Fix regression with symlink behavior in ADD/COPY (#17710) + +### Networking + +- Allow passing a network ID as an argument for `--net` (#17558) +- Fix connect to host and prevent disconnect from host for `host` network (#17476) +- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is + not the first block in the network (#17853) +- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) +- Allow port-mapping only for endpoints created on docker run (#17858) +- Fixed an endpoint delete issue with a possible stale sbox (#18102) + +### Distribution + +- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) + +## 1.9.0 (2015-11-03) + +### Runtime + ++ `docker stats` now returns block IO metrics (#15005) ++ `docker stats` now details network stats per interface (#15786) ++ Add `ancestor=` filter to `docker ps --filter` flag to filter +containers based on their ancestor images (#14570) ++ Add `label=` filter to `docker ps --filter` to filter containers +based on label (#16530) ++ Add `--kernel-memory` flag to `docker run` (#14006) ++ Add `--message` flag to `docker import` allowing to specify an optional +message (#15711) ++ Add `--privileged` flag to `docker exec` (#14113) ++ Add `--stop-signal` flag to `docker run` allowing to replace the container +process stopping signal (#15307) ++ Add a new `unless-stopped` restart policy (#15348) ++ Inspecting an image now returns tags (#13185) ++ Add container size information to `docker inspect` (#15796) ++ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) +- Remove the deprecated `/container/ps` endpoint from the API (#15972) +- Send and document correct HTTP codes for `/exec//start` (#16250) +- Share shm and mqueue between containers sharing IPC namespace (#15862) +- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) +- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted +with `ro` option (#14965) +- Improve `rmi` performance (#16890) +- Do not update /etc/hosts for the default bridge network, except for links (#17325) +- Fix conflict with duplicate container names (#17389) +- Fix an issue with incorrect template execution in `docker inspect` (#17284) +- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) + +### Client + ++ Allow `docker import` to import from local files (#11907) + +### Builder + ++ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different +stop-signal for the container process (#15307) ++ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` +that allows to add build-time environment variables (#15182) +- Improve cache miss performance (#16890) + +### Storage + +- devicemapper: Implement deferred deletion capability (#16381) + +### Networking + ++ `docker network` exits experimental and is part of standard release (#16645) ++ New network top-level concept, with associated subcommands and API (#16645) + WARNING: the API is different from the experimental API ++ Support for multiple isolated/micro-segmented networks (#16645) ++ Built-in multihost networking using VXLAN based overlay driver (#14071) ++ Support for third-party network plugins (#13424) ++ Ability to dynamically connect containers to multiple networks (#16645) ++ Support for user-defined IP address management via pluggable IPAM drivers (#16910) ++ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) ++ Add `--cluster-store-opt` for setting up TLS settings (#16644) ++ Add `--dns-opt` to the daemon (#16031) +- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + +### Volumes + ++ New top-level `volume` subcommand and API (#14242) +- Move API volume driver settings to host-specific config (#15798) +- Print an error message if volume name is not unique (#16009) +- Ensure volumes created from Dockerfiles always use the local volume driver +(#15507) +- DEPRECATE auto-creating missing host paths for bind mounts (#16349) + +### Logging + ++ Add `awslogs` logging driver for Amazon CloudWatch (#15495) ++ Add generic `tag` log option to allow customizing container/image +information passed to driver (e.g. show container names) (#15384) +- Implement the `docker logs` endpoint for the journald driver (#13707) +- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + +### Distribution + ++ `docker search` now works with partial names (#16509) +- Push optimization: avoid buffering to file (#15493) +- The daemon will display progress for images that were already being pulled +by another client (#15489) +- Only permissions required for the current action being performed are requested (#) ++ Renaming trust keys (and respective environment variables) from `offline` to +`root` and `tagging` to `repository` (#16894) +- DEPRECATE trust key environment variables +`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and +`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + +### Security + ++ Add SELinux profiles to the rpm package (#15832) +- Fix various issues with AppArmor profiles provided in the deb package +(#14609) +- Add AppArmor policy that prevents writing to /proc (#15571) + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +* Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover options to json-file log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi` of dangling images safe while pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + +## 1.7.1 (2015-07-14) + +#### Runtime + +- Fix default user spawning exec process with `docker exec` +- Make `--bridge=none` not to configure the network bridge +- Publish networking stats properly +- Fix implicit devicemapper selection with static binaries +- Fix socket connections that hung intermittently +- Fix bridge interface creation on CentOS/RHEL 6.6 +- Fix local dns lookups added to resolv.conf +- Fix copy command mounting volumes +- Fix read/write privileges in volumes mounted with --volumes-from + +#### Remote API + +- Fix unmarshalling of Command and Entrypoint +- Set limit for minimum client version supported +- Validate port specification +- Return proper errors when attach/reattach fail + +#### Distribution + +- Fix pulling private images +- Fix fallback between registry V2 and V1 + +## 1.7.0 (2015-06-16) + +#### Runtime ++ Experimental feature: support for out-of-process volume plugins +* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag +* The `exec` command supports the `-u|--user` flag to specify the new process owner ++ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags ++ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` ++ Container block IO can be controlled in `docker run` using`--blkio-weight` ++ ZFS support ++ The `docker logs` command supports a `--since` argument ++ UTS namespace can be shared with the host with `docker run --uts=host` + +#### Quality +* Networking stack was entirely rewritten as part of the libnetwork effort +* Engine internals refactoring +* Volumes code was entirely rewritten to support the plugins effort ++ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + +#### Build ++ Support ${variable:-value} and ${variable:+value} syntax for environment variables ++ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` ++ git context changes with branches and directories +* The .dockerignore file support exclusion rules + +#### Distribution ++ Client support for v2 mirroring support for the official registry + +#### Bugfixes +* Firewalld is now supported and will automatically be used when available +* mounting --device recursively + +## 1.6.2 (2015-05-13) + +#### Runtime +- Revert change prohibiting mounting into /sys + +## 1.6.1 (2015-05-07) + +#### Security +- Fix read/write /proc paths (CVE-2015-3630) +- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) +- Fix opening of file-descriptor 1 (CVE-2015-3627) +- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) +- Prohibit mount of /sys + +#### Runtime +- Update AppArmor policy to not allow mounts + +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + +## 1.5.0 (2015-02-10) + +#### Builder ++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag +* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache +* ADD and COPY instructions accept relative paths +* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier +* Improve performance when exposing a large number of ports + +#### Hack ++ Allow client-side only integration tests for Windows +* Include docker-py integration tests against Docker daemon as part of our test suites + +#### Packaging ++ Support for the new version of the registry HTTP API +* Speed up `docker push` for images with a majority of already existing layers +- Fixed contacting a private registry through a proxy + +#### Remote API ++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command ++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command +* Container `inspect` endpoint show the ID of `exec` commands running in this container +* Container `inspect` endpoint show the number of times Docker auto-restarted the container +* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' +- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes + +#### Runtime ++ Docker daemon has full IPv6 support ++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools ++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted ++ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag +* Major stability improvements for devicemapper storage driver +* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted +* Better integration with host system: per-container iptable rules are moved to the DOCKER chain +- Fixed container exiting on out of memory to return an invalid exit code + +#### Other +* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon + +## 1.4.1 (2014-12-15) + +#### Runtime +- Fix issue with volumes-from and bind mounts not being honored after create + +## 1.4.0 (2014-12-11) + +#### Notable Features since 1.3.0 ++ Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag ++ Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` ++ New Overlayfs Storage Driver ++ `docker info` now returns an `ID` and `Name` field ++ Filter events by event name, container, or image ++ `docker cp` now supports copying from container volumes +- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgreSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OS X +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshalling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed Fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OS X compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditions +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where an HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Don't save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/vendor/github.com/moby/moby/CONTRIBUTING.md b/vendor/github.com/moby/moby/CONTRIBUTING.md new file mode 100644 index 0000000..eb5f8ab --- /dev/null +++ b/vendor/github.com/moby/moby/CONTRIBUTING.md @@ -0,0 +1,401 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/docker/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/docker/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in +the contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+

+ Read our IRC quickstart guide + for an easy way to get started. +

+
Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 17000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/moby/moby/Dockerfile b/vendor/github.com/moby/moby/Dockerfile new file mode 100644 index 0000000..354b47a --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile @@ -0,0 +1,246 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Add zfs ppa +COPY keys/launchpad-ppa-zfs.asc /go/src/github.com/docker/docker/keys/ +RUN apt-key add /go/src/github.com/docker/docker/keys/launchpad-ppa-zfs.asc +RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + binutils-mingw-w64 \ + bsdmainutils \ + btrfs-tools \ + build-essential \ + clang \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + gcc-mingw-w64 \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + net-tools \ + pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + ubuntu-zfs \ + xfsprogs \ + vim-common \ + libzfs-dev \ + tar \ + zip \ + --no-install-recommends \ + && pip install awscli==1.10.15 +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Configure the container for OSX cross compilation +ENV OSX_SDK MacOSX10.11.sdk +ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953 +RUN set -x \ + && export OSXCROSS_PATH="/osxcross" \ + && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ + && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ + && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ + && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh +ENV PATH /osxcross/target/bin:$PATH + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 \ + freebsd/amd64 freebsd/386 freebsd/arm \ + windows/amd64 windows/386 \ + solaris/amd64 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install CRIU for checkpoint/restore support +ENV CRIU_VERSION 2.2 +RUN mkdir -p /usr/src/criu \ + && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \ + && cd /usr/src/criu \ + && make \ + && make install-criu + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc +# Add integration helps to bashrc +RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \ + busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ + debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \ + hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.aarch64 b/vendor/github.com/moby/moby/Dockerfile.aarch64 new file mode 100644 index 0000000..6112f80 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.aarch64 @@ -0,0 +1,175 @@ +# This file describes the standard way to build Docker on aarch64, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.aarch64 . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM aarch64/ubuntu:wily + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + g++ \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libc6-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-dev \ + mercurial \ + net-tools \ + parallel \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + gccgo \ + iproute2 \ + iputils-ping \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support aarch64 properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# We don't have official binary tarballs for ARM64, eigher for Go or bootstrap, +# so we use gccgo as bootstrap to build Go from source code. +# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because +# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH +ENV GOPATH /go + +# Only install one version of the registry, because old version which support +# schema1 manifests is not working on ARM64, we should skip integration-cli +# tests for schema1 manifests on ARM64. +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \ + aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \ + aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \ + aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.armhf b/vendor/github.com/moby/moby/Dockerfile.armhf new file mode 100644 index 0000000..1aebc16 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.armhf @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on ARMv7, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.armhf . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + cmake \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends \ + && pip install awscli==1.10.15 + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \ + armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ + armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \ + armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.ppc64le b/vendor/github.com/moby/moby/Dockerfile.ppc64le new file mode 100644 index 0000000..1f9f500 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.ppc64le @@ -0,0 +1,188 @@ +# This file describes the standard way to build Docker on ppc64le, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.ppc64le . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ppc64le/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support ppc64le properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + + +# Install Go +# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \ + ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ + ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \ + ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.s390x b/vendor/github.com/moby/moby/Dockerfile.s390x new file mode 100644 index 0000000..ba94bc7 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.s390x @@ -0,0 +1,190 @@ +# This file describes the standard way to build Docker on s390x, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.s390x . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM s390x/gcc:6.1 + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# glibc in Debian has a bug specific to s390x that won't be fixed until Debian 8.6 is released +# - https://github.com/docker/docker/issues/24748 +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=890b7a4b33d482b5c768ab47d70758b80227e9bc +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=2e807f29595eb5b1e5d0decc6e356a3562ecc58e +RUN echo 'deb http://httpredir.debian.org/debian jessie-proposed-updates main' >> /etc/apt/sources.list.d/pu.list \ + && apt-get update \ + && apt-get install -y libc6 \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support s390x properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux seccomp + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \ + s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ + s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \ + s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.simple b/vendor/github.com/moby/moby/Dockerfile.simple new file mode 100644 index 0000000..8eeb3d9 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.simple @@ -0,0 +1,73 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + build-essential \ + curl \ + cmake \ + gcc \ + git \ + libapparmor-dev \ + libdevmapper-dev \ + libsqlite3-dev \ + \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xfsprogs \ + xz-utils \ + \ + aufs-tools \ + vim-common \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go +ENV CGO_LDFLAGS -L/lib + +# Install runc, containerd, tini and docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh runc containerd tini proxy + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.solaris b/vendor/github.com/moby/moby/Dockerfile.solaris new file mode 100644 index 0000000..bb342e5 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.solaris @@ -0,0 +1,20 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +ENV DOCKER_CROSSPLATFORMS solaris/amd64 +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.windows b/vendor/github.com/moby/moby/Dockerfile.windows new file mode 100644 index 0000000..652d072 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.windows @@ -0,0 +1,267 @@ +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016 or Windows 10. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major +# build number must be at least 14393. This can be confirmed, for example, by +# running the following from an elevated PowerShell prompt - this sample output +# is from a fully up to date machine as at mid-November 2016: +# +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md +# +# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server +# containers as the default option, it is recommended you have at least 1GB +# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you +# should have at least 4GB of memory assigned. Note also, to run Hyper-V +# containers in a VM, it is necessary to configure the VM for nested virtualization. + +# ----------------------------------------------------------------------------------------- + + +# Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker +# +# +# 3. Build a docker image with the components required to build the docker binaries from source +# by running one of the following: +# +# >> docker build -t nativebuildimage -f Dockerfile.windows . +# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) +# +# +# 4. Build the docker executable binaries by running one of the following: +# +# >> docker run --name binaries nativebuildimage hack\make.ps1 -Binary +# >> docker run --name binaries -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) +# +# +# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage +# + +# ----------------------------------------------------------------------------------------- + + +# The validation tests can either run in a container, or directly on the host. To run in a +# container, ensure you have created the nativebuildimage above. Then run one of the +# following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat (if using Hyper-V containers) + +# To run the validation tests on the host, from the root of the repository, run the +# following from a Windows PowerShell prompt (elevation is not required): (Note Go +# must be installed to run these tests) +# +# >> hack\make.ps1 -DCO -PkgImports -GoFormat + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests, ensure you have created the nativebuildimage above. Then run one of +# the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) + + +# ----------------------------------------------------------------------------------------- + + +# To run all tests and binary build, ensure you have created the nativebuildimage above. Then +# run one of the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run nativebuildimage hack\make.ps1 -All +# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) + +# ----------------------------------------------------------------------------------------- + + +# Important notes: +# --------------- +# +# Don't attempt to use a bind-mount to pass a local directory as the bundles target +# directory. It does not work (golang attempts for follow a mapped folder incorrectly). +# Instead, use docker cp as per the example. +# +# go.zip is not removed from the image as it is used by the Windows CI servers +# to ensure the host and image are running consistent versions of go. +# +# Nanoserver support is a work in progress. Although the image will build if the +# FROM statement is updated, it will not work when running autogen through hack\make.ps1. +# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +# quit due to the use of console hooks which are not available. +# +# The docker integration tests do not currently run in a container on Windows, predominantly +# due to Windows not supporting privileged mode, so anything using a volume would fail. +# They (along with the rest of the docker CI suite) can be run using +# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. +# +# ----------------------------------------------------------------------------------------- + + +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-command"] + +# Environment variable notes: +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. +# - FROM_DOCKERFILE is used for detection of building within a container. +ENV GO_VERSION=1.7.5 ` + GIT_VERSION=2.11.0 ` + GOPATH=C:\go ` + FROM_DOCKERFILE=1 + +RUN ` + $ErrorActionPreference = 'Stop'; ` + $ProgressPreference = 'SilentlyContinue'; ` + ` + Function Test-Nano() { ` + $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` + return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` + }` + ` + Function Download-File([string] $source, [string] $target) { ` + if (Test-Nano) { ` + $handler = New-Object System.Net.Http.HttpClientHandler; ` + $client = New-Object System.Net.Http.HttpClient($handler); ` + $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` + $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` + $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` + $responseMsg.Wait(); ` + if (!$responseMsg.IsCanceled) { ` + $response = $responseMsg.Result; ` + if ($response.IsSuccessStatusCode) { ` + $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` + $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` + $copyStreamOp.Wait(); ` + $downloadedFileStream.Close(); ` + if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` + } ` + } else { ` + Throw ("Failed to download " + $source) ` + }` + } else { ` + $webClient = New-Object System.Net.WebClient; ` + $webClient.DownloadFile($source, $target); ` + } ` + } ` + ` + setx /M PATH $('C:\git\bin;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + Write-Host INFO: Downloading git...; ` + $location='https://github.com/git-for-windows/git/releases/download/v'+$env:GIT_VERSION+'.windows.1/PortableGit-'+$env:GIT_VERSION+'-64-bit.7z.exe'; ` + Download-File $location C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` + ` + Write-Host INFO: Installing PS7Zip package...; ` + Install-Package PS7Zip -Force | Out-Null; ` + Write-Host INFO: Importing PS7Zip...; ` + Import-Module PS7Zip -Force; ` + New-Item C:\git -ItemType Directory | Out-Null ; ` + cd C:\git; ` + Write-Host INFO: Extracting git...; ` + Expand-7Zip C:\gitsetup.7z.exe | Out-Null; ` + cd C:\; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler 1 of 3...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 2 of 3...; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 3 of 3...; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` + Write-Host INFO: Removing downloaded files...; ` + Remove-Item C:\gcc.zip; ` + Remove-Item C:\runtime.zip; ` + Remove-Item C:\binutils.zip; ` + Remove-Item C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Creating source directory...; ` + New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` + ` + Write-Host INFO: Configuring git core.autocrlf...; ` + C:\git\bin\git config --global core.autocrlf true; ` + ` + Write-Host INFO: Completed + +# Make PowerShell the default entrypoint +ENTRYPOINT ["powershell.exe"] + +# Set the working directory to the location of the sources +WORKDIR C:\go\src\github.com\docker\docker + +# Copy the sources into the container +COPY . . diff --git a/vendor/github.com/moby/moby/LICENSE b/vendor/github.com/moby/moby/LICENSE new file mode 100644 index 0000000..8f3fee6 --- /dev/null +++ b/vendor/github.com/moby/moby/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/moby/MAINTAINERS b/vendor/github.com/moby/moby/MAINTAINERS new file mode 100644 index 0000000..39bb8c1 --- /dev/null +++ b/vendor/github.com/moby/moby/MAINTAINERS @@ -0,0 +1,376 @@ +# Docker maintainers file +# +# This file describes who runs the docker/docker project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + + people = [ + "aaronlehmann", + "akihirosuda", + "aluzzardi", + "anusha", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "estesp", + "icecrime", + "jhowardmsft", + "justincormack", + "lk4d4", + "mavenugo", + "mhbauer", + "mlaventure", + "mrjana", + "runcom", + "stevvooe", + "tianon", + "tibor", + "tonistiigi", + "unclejack", + "vdemeester", + "vieux" + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "jamtur01", + "misty", + "sven", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "aboch", + "andrewhsu", + "ehazlett", + "mgoelzer", + "programmerq", + "thajeztah" + ] + + [Org.Alumni] + + # This list contains maintainers that are no longer active on the project. + # It is thanks to these people that the project has become what it is today. + # Thank you! + + people = [ + # David Calavera contributed many features to Docker, such as an improved + # event system, dynamic configuration reloading, volume plugins, fancy + # new templating options, and an external client credential store. As a + # maintainer, David was release captain for Docker 1.8, and competing + # with Jess Frazelle to be "top dream killer". + # David is now doing amazing stuff as CTO for https://www.netlify.com, + # and tweets as @calavera. + "calavera", + + # As a maintainer, Erik was responsible for the "builder", and + # started the first designs for the new networking model in + # Docker. Erik is now working on all kinds of plugins for Docker + # (https://github.com/contiv) and various open source projects + # in his own repository https://github.com/erikh. You may + # still stumble into him in our issue tracker, or on IRC. + "erikh", + + # Jessica Frazelle, also known as the "Keyser Söze of containers", + # runs *everything* in containers. She started contributing to + # Docker with a (fun fun) change involving both iptables and regular + # expressions (coz, YOLO!) on July 10, 2014 + # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. + # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed + # many features and improvement, among which "seccomp profiles" (making + # containers a lot more secure). Besides being a maintainer, she + # set up the CI infrastructure for the project, giving everyone + # something to shout at if a PR failed ("noooo Janky!"). + # Jess is currently working on the DCOS security team at Mesosphere, + # and contributing to various open source projects. + # Be sure you don't miss her talks at a conference near you (a must-see), + # read her blog at https://blog.jessfraz.com (a must-read), and + # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). + "jessfraz", + + # As a docs maintainer, Mary Anthony contributed greatly to the Docker + # docs. She wrote the Docker Contributor Guide and Getting Started + # Guides. She helped create a doc build system independent of + # docker/docker project, and implemented a new docs.docker.com theme and + # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub + # public repositories was originally referenced in + # maryatdocker/docker-whale back in May 2015. + "moxiegirl", + + # Vincent "vbatts!" Batts made his first contribution to the project + # in November 2013, to become a maintainer a few months later, on + # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). + # As a maintainer, Vincent made important contributions to core elements + # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). + # He also contributed the "tar-split" library, an important element + # for the content-addressable store. + # Vincent is currently a member of the Open Containers Initiative + # Technical Oversight Board (TOB), besides his work at Red Hat and + # Project Atomic. You can still find him regularly hanging out in + # our repository and the #docker-dev and #docker-maintainers IRC channels + # for a chat, as he's always a lot of fun. + "vbatts", + + # Vishnu became a maintainer to help out on the daemon codebase and + # libcontainer integration. He's currently involved in the + # Open Containers Initiative, working on the specifications, + # besides his work on cAdvisor and Kubernetes for Google. + "vishh" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + + [people.anusha] + Name = "Anusha Ragunathan" + Email = "anusha@docker.com" + GitHub = "anusha-ragunathan" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.coolljt0725] + Name = "Lei Jitang" + Email = "leijitang@huawei.com" + GitHub = "coolljt0725" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "arnaud@docker.com" + GitHub = "icecrime" + + [people.jamtur01] + Name = "James Turnbull" + Email = "james@lovedthanlost.net" + GitHub = "jamtur01" + + [people.jhowardmsft] + Name = "John Howard" + Email = "jhoward@microsoft.com" + GitHub = "jhowardmsft" + + [people.jessfraz] + Name = "Jessie Frazelle" + Email = "jess@linux.com" + GitHub = "jessfraz" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" + + [people.mgoelzer] + Name = "Mike Goelzer" + Email = "mike.goelzer@docker.com" + GitHub = "mgoelzer" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mstanleyjones" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "runcom@redhat.com" + GitHub = "runcom" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" diff --git a/vendor/github.com/moby/moby/Makefile b/vendor/github.com/moby/moby/Makefile new file mode 100644 index 0000000..1b1a129 --- /dev/null +++ b/vendor/github.com/moby/moby/Makefile @@ -0,0 +1,150 @@ +.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win + +# set the graph driver as the current graphdriver if not set +DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) + +# get OS/Arch of docker engine +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}') +DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') + +# env vars passed through directly to Docker's build scripts +# to allow things like `make KEEPBUNDLE=1 binary` easily +# `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILD_APT_MIRROR \ + -e BUILDFLAGS \ + -e KEEPBUNDLE \ + -e DOCKER_BUILD_ARGS \ + -e DOCKER_BUILD_GOGC \ + -e DOCKER_BUILD_PKGS \ + -e DOCKER_DEBUG \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GITCOMMIT \ + -e DOCKER_GRAPHDRIVER=$(DOCKER_GRAPHDRIVER) \ + -e DOCKER_INCREMENTAL_BINARY \ + -e DOCKER_PORT \ + -e DOCKER_REMAP_ROOT \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e http_proxy \ + -e https_proxy \ + -e no_proxy +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + +# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. +# The volume will be cleaned up when the container is removed due to `--rm`. +# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. +DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) + +# enable .go-pkg-cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set +PKGCACHE_DIR := $(if $(PKGCACHE_DIR),$(PKGCACHE_DIR),.go-pkg-cache) +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(CURDIR)/$(PKGCACHE_DIR)/\1"@g'),$(DOCKER_MOUNT)) + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) +DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) + +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) +BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) +export BUILD_APT_MIRROR + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + +default: binary + +all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' + +binary: build ## build the linux binaries + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +build: bundles init-go-pkg-cache + docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . + +bundles: + mkdir bundles + +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross + +deb: build ## build the deb packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb + + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +init-go-pkg-cache: + mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g') + +install: ## install the linux binaries + KEEPBUNDLE=1 hack/make.sh install-binary + +manpages: ## Generate man pages from go source and markdown + docker build -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man + docker run --rm \ + -v $(PWD):/go/src/github.com/docker/docker/ \ + docker-manpage-dev + +rpm: build ## build the rpm packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm + +run: build ## run the docker daemon in a container + $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" + +shell: build ## start a shell inside the build env + $(DOCKER_RUN_DOCKER) bash + +yaml-docs-gen: build ## generate documentation YAML files consumed by docs repo + $(DOCKER_RUN_DOCKER) sh -c 'hack/make.sh yaml-docs-generator && ( root=$$(pwd); cd bundles/latest/yaml-docs-generator; mkdir docs; ./yaml-docs-generator --root $${root} --target $$(pwd)/docs )' + +test: build ## run the unit, integration and docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py + +test-docker-py: build ## run the docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py + +test-integration-cli: build ## run the integration tests + $(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli + +test-unit: build ## run the unit tests + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz + +validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor + $(DOCKER_RUN_DOCKER) hack/validate/all + +win: build ## cross build the binary for windows + $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger:0.7.4 diff --git a/vendor/github.com/moby/moby/NOTICE b/vendor/github.com/moby/moby/NOTICE new file mode 100644 index 0000000..8a37c1c --- /dev/null +++ b/vendor/github.com/moby/moby/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/moby/README.md b/vendor/github.com/moby/moby/README.md new file mode 100644 index 0000000..0b33bdc --- /dev/null +++ b/vendor/github.com/moby/moby/README.md @@ -0,0 +1,304 @@ +Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) +============================ + +Docker is an open source project to pack, ship and run any application +as a lightweight container. + +Docker containers are both *hardware-agnostic* and *platform-agnostic*. +This means they can run anywhere, from your laptop to the largest +cloud compute instance and everything in between - and they don't require +you to use a particular language, framework or packaging system. That +makes them great building blocks for deploying and scaling web apps, +databases, and backend services without depending on a particular stack +or provider. + +Docker began as an open-source implementation of the deployment engine which +powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/), +a popular Platform-as-a-Service. It benefits directly from the experience +accumulated over several years of large-scale operation and support of hundreds +of thousands of applications and databases. + +![Docker logo](docs/static_files/docker-logo-compressed.png "Docker") + +## Security Disclosure + +Security is very important to us. If you have any issue regarding security, +please disclose the information responsibly by sending an email to +security@docker.com and not by creating a GitHub issue. + +## Better than VMs + +A common method for distributing applications and sandboxing their +execution is to use virtual machines, or VMs. Typical VM formats are +VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory +these formats should allow every developer to automatically package +their application into a "machine" for easy distribution and deployment. +In practice, that almost never happens, for a few reasons: + + * *Size*: VMs are very large which makes them impractical to store + and transfer. + * *Performance*: running VMs consumes significant CPU and memory, + which makes them impractical in many scenarios, for example local + development of multi-tier applications, and large-scale deployment + of cpu and memory-intensive applications on large numbers of + machines. + * *Portability*: competing VM environments don't play well with each + other. Although conversion tools do exist, they are limited and + add even more overhead. + * *Hardware-centric*: VMs were designed with machine operators in + mind, not software developers. As a result, they offer very + limited tooling for what developers need most: building, testing + and running their software. For example, VMs offer no facilities + for application versioning, monitoring, configuration, logging or + service discovery. + +By contrast, Docker relies on a different sandboxing method known as +*containerization*. Unlike traditional virtualization, containerization +takes place at the kernel level. Most modern operating system kernels +now support the primitives necessary for containerization, including +Linux with [openvz](https://openvz.org), +[vserver](http://linux-vserver.org) and more recently +[lxc](https://linuxcontainers.org/), Solaris with +[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), +and FreeBSD with +[Jails](https://www.freebsd.org/doc/handbook/jails.html). + +Docker builds on top of these low-level primitives to offer developers a +portable format and runtime environment that solves all four problems. +Docker containers are small (and their transfer can be optimized with +layers), they have basically zero memory and cpu overhead, they are +completely portable, and are designed from the ground up with an +application-centric design. + +Perhaps best of all, because Docker operates at the OS level, it can still be +run inside a VM! + +## Plays well with others + +Docker does not require you to buy into a particular programming +language, framework, packaging system, or configuration language. + +Is your application a Unix process? Does it use files, tcp connections, +environment variables, standard Unix streams and command-line arguments +as inputs and outputs? Then Docker can run it. + +Can your application's build be expressed as a sequence of such +commands? Then Docker can build it. + +## Escape dependency hell + +A common problem for developers is the difficulty of managing all +their application's dependencies in a simple and automated way. + +This is usually difficult for several reasons: + + * *Cross-platform dependencies*. Modern applications often depend on + a combination of system libraries and binaries, language-specific + packages, framework-specific modules, internal components + developed for another project, etc. These dependencies live in + different "worlds" and require different tools - these tools + typically don't work well with each other, requiring awkward + custom integrations. + + * *Conflicting dependencies*. Different applications may depend on + different versions of the same dependency. Packaging tools handle + these situations with various degrees of ease - but they all + handle them in different and incompatible ways, which again forces + the developer to do extra work. + + * *Custom dependencies*. A developer may need to prepare a custom + version of their application's dependency. Some packaging systems + can handle custom versions of a dependency, others can't - and all + of them handle it differently. + + +Docker solves the problem of dependency hell by giving the developer a simple +way to express *all* their application's dependencies in one place, while +streamlining the process of assembling them. If this makes you think of +[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't +*replace* your favorite packaging systems. It simply orchestrates +their use in a simple and repeatable way. How does it do that? With +layers. + +Docker defines a build as running a sequence of Unix commands, one +after the other, in the same container. Build commands modify the +contents of the container (usually by installing new files on the +filesystem), the next command modifies it some more, etc. Since each +build command inherits the result of the previous commands, the +*order* in which the commands are executed expresses *dependencies*. + +Here's a typical Docker build process: + +```bash +FROM ubuntu:12.04 +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN cd helloflask-master && pip install -r requirements.txt +``` + +Note that Docker doesn't care *how* dependencies are built - as long +as they can be built by running a Unix command in a container. + + +Getting started +=============== + +Docker can be installed either on your computer for building applications or +on servers for running them. To get started, [check out the installation +instructions in the +documentation](https://docs.docker.com/engine/installation/). + +Usage examples +============== + +Docker can be used to run short-lived commands, long-running daemons +(app servers, databases, etc.), interactive shell sessions, etc. + +You can find a [list of real-world +examples](https://docs.docker.com/engine/examples/) in the +documentation. + +Under the hood +-------------- + +Under the hood, Docker is built on the following components: + +* The + [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) + and + [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) + capabilities of the Linux kernel +* The [Go](https://golang.org) programming language +* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) +* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) + +Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) +====================== + +| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** | +|------------------|----------------------|---------|---------| +| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | + +Want to hack on Docker? Awesome! We have [instructions to help you get +started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). + +These instructions are probably not perfect, please let us know if anything +feels wrong or incomplete. Better yet, submit a PR and improve them yourself. + +Getting the development builds +============================== + +Want to run Docker from a master build? You can download +master builds at [master.dockerproject.org](https://master.dockerproject.org). +They are updated with each commit merged into the master branch. + +Don't know how to use that super cool new feature in the master build? Check +out the master docs at +[docs.master.dockerproject.org](http://docs.master.dockerproject.org). + +How the project is run +====================== + +Docker is a very, very active project. If you want to learn more about how it is run, +or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). + +We are always open to suggestions on process improvements, and are always looking for more maintainers. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Internet Relay Chat (IRC) +

+ IRC is a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+ Read our IRC quickstart guide for an easy way to get started. +
Docker Community Forums + The Docker Engine + group is for users of the Docker Engine project. +
Google Groups + The docker-dev group is for contributors and other people + contributing to the Docker project. You can join this group without a + Google account by sending an email to docker-dev+subscribe@googlegroups.com. + You'll receive a join-request message; simply reply to the message to + confirm your subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 7000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ +### Legal + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + +Other Docker Related Projects +============================= +There are a number of projects under development that are based on Docker's +core technology. These projects expand the tooling built around the +Docker platform to broaden its application and utility. + +* [Docker Registry](https://github.com/docker/distribution): Registry +server for Docker (hosting/delivery of repositories and images) +* [Docker Machine](https://github.com/docker/machine): Machine management +for a container-centric world +* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering +system +* [Docker Compose](https://github.com/docker/compose) (formerly Fig): +Define and run multi-container apps +* [Kitematic](https://github.com/docker/kitematic): The easiest way to use +Docker on Mac and Windows + +If you know of another project underway that should be listed here, please help +us keep this list up-to-date by submitting a PR. + +Awesome-Docker +============== +You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/vendor/github.com/moby/moby/ROADMAP.md b/vendor/github.com/moby/moby/ROADMAP.md new file mode 100644 index 0000000..21fe06d --- /dev/null +++ b/vendor/github.com/moby/moby/ROADMAP.md @@ -0,0 +1,118 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Runtime improvements + +We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container +execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional +default libcontainer `execdriver`, but the Engine internals were not ready for this. + +As runC continued evolving, and the OCI specification along with it, we created +[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is +the new target for Engine integration, as it can entirely replace the whole `execdriver` +architecture, and container monitoring along with it. + +Docker Engine will rely on a long-running `containerd` companion daemon for all container execution +related operations. This could open the door in the future for Engine restarts without interrupting +running containers. + +## 1.2 Plugins improvements + +Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks +extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real +world feedback before optimizing for any particular workflow. + +In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of +plugins. This implies in particular making it trivially easy to distribute plugins as containers +through any Registry instance, as well as solving the commonly heard pain points of plugins needing +to be treated as somewhat special (being active at all time, started before any other user +containers, and not as easily dismissed). + +## 1.3 Internal decoupling + +A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the +API implementation has been refactored, and the Builder side of the daemon is now +[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in +the same repository. + +We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the +runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support +with the concept of "special" containers opens the door for bootstrapping more Engine internals +using the same facilities. + +## 1.4 Cluster capable Engine + +The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent +adding features such as multihost networking, and node discovery down at the Engine level. Yet, the +Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm +for that. + +We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the +Docker Engine being already capable of discovering each other and establish overlay networking for +their container to communicate, the next step is for a given Engine to gain ability to dispatch work +to another node in the cluster. This will be introduced in a backward compatible way, such that a +`docker run` invocation on a particular node remains fully deterministic. + +# 2 Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. + +## 2.2 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and provenance. This includes +moving to the V2 Registry API and heavily refactoring the code that powers these features. The +desired result is more secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable and flexible interface. +If new features are added that access the registry without solidifying these interfaces, achieving +feature parity will continue to be elusive. While we get a handle on this situation, we are imposing +a moratorium on new code that accesses the Registry API in commands that don't already make remote +calls. + +Currently, only the following commands cause interaction with a remote registry: + + - push + - pull + - run + - build + - search + - login + +In the interest of stabilizing the registry access model during this ongoing work, we are not +accepting additions to other commands that will cause remote interaction with the Registry API. This +moratorium will lift when the goals of the distribution project have been met. diff --git a/vendor/github.com/moby/moby/VENDORING.md b/vendor/github.com/moby/moby/VENDORING.md new file mode 100644 index 0000000..3086f9d --- /dev/null +++ b/vendor/github.com/moby/moby/VENDORING.md @@ -0,0 +1,45 @@ +# Vendoring policies + +This document outlines recommended Vendoring policies for Docker repositories. +(Example, libnetwork is a Docker repo and logrus is not.) + +## Vendoring using tags + +Commit ID based vendoring provides little/no information about the updates +vendored. To fix this, vendors will now require that repositories use annotated +tags along with commit ids to snapshot commits. Annotated tags by themselves +are not sufficient, since the same tag can be force updated to reference +different commits. + +Each tag should: +- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") +- Have a corresponding entry in the change tracking document. + +Each repo should: +- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, +github releases file. + +The goal here is for consuming repos to be able to use the tag version and +changelog updates to determine whether the vendoring will cause any breaking or +backward incompatible changes. This also means that repos can specify having +dependency on a package of a specific version or greater up to the next major +release, without encountering breaking changes. + +## Semantic Versioning +Annotated version tags should follow Schema Versioning policies. +According to http://semver.org: + +"Given a version number MAJOR.MINOR.PATCH, increment the: + MAJOR version when you make incompatible API changes, + MINOR version when you add functionality in a backwards-compatible manner, and + PATCH version when you make backwards-compatible bug fixes. +Additional labels for pre-release and build metadata are available as extensions +to the MAJOR.MINOR.PATCH format." + +## Vendoring cadence +In order to avoid huge vendoring changes, it is recommended to have a regular +cadence for vendoring updates. e.g. monthly. + +## Pre-merge vendoring tests +All related repos will be vendored into docker/docker. +CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/vendor/github.com/moby/moby/VERSION b/vendor/github.com/moby/moby/VERSION new file mode 100644 index 0000000..bc606d7 --- /dev/null +++ b/vendor/github.com/moby/moby/VERSION @@ -0,0 +1 @@ +17.03.2-ce-rc1 diff --git a/vendor/github.com/moby/moby/api/README.md b/vendor/github.com/moby/moby/api/README.md new file mode 100644 index 0000000..464e056 --- /dev/null +++ b/vendor/github.com/moby/moby/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. To automatically generate documentation. +2. To automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +All the documentation generation is done in the documentation repository, [docker/docker.github.io](https://github.com/docker/docker.github.io). The Swagger definition is vendored periodically into this repository, but you can manually copy over the Swagger definition to test changes. + +Copy `api/swagger.yaml` in this repository to `engine/api/[VERSION_NUMBER]/swagger.yaml` in the documentation repository, overwriting what is already there. Then, run `docker-compose up` in the documentation repository and browse to [http://localhost:4000/engine/api/](http://localhost:4000/engine/api/) when it finishes rendering. diff --git a/vendor/github.com/moby/moby/api/common.go b/vendor/github.com/moby/moby/api/common.go new file mode 100644 index 0000000..cde250f --- /dev/null +++ b/vendor/github.com/moby/moby/api/common.go @@ -0,0 +1,166 @@ +package api + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "mime" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.27" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/vendor/github.com/moby/moby/api/common_test.go b/vendor/github.com/moby/moby/api/common_test.go new file mode 100644 index 0000000..31d6f58 --- /dev/null +++ b/vendor/github.com/moby/moby/api/common_test.go @@ -0,0 +1,341 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "os" + + "github.com/docker/docker/api/types" +) + +type ports struct { + ports []types.Port + expected string +} + +// DisplayablePorts +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + if port.expected != actual { + t.Fatalf("Expected %s, got %s.", port.expected, actual) + } + } +} + +// MatchesContentType +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatalf("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folders do not exist. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/vendor/github.com/moby/moby/api/common_unix.go b/vendor/github.com/moby/moby/api/common_unix.go new file mode 100644 index 0000000..081e61c --- /dev/null +++ b/vendor/github.com/moby/moby/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/vendor/github.com/moby/moby/api/common_windows.go b/vendor/github.com/moby/moby/api/common_windows.go new file mode 100644 index 0000000..d930fa0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/common_windows.go @@ -0,0 +1,8 @@ +package api + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (eg docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/moby/moby/api/errors/errors.go b/vendor/github.com/moby/moby/api/errors/errors.go new file mode 100644 index 0000000..29fd254 --- /dev/null +++ b/vendor/github.com/moby/moby/api/errors/errors.go @@ -0,0 +1,47 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The Server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestForbiddenError creates a new API error +// that has the 403 HTTP status code associated to it. +func NewRequestForbiddenError(err error) error { + return NewErrorWithStatusCode(err, http.StatusForbidden) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/vendor/github.com/moby/moby/api/fixtures/keyfile b/vendor/github.com/moby/moby/api/fixtures/keyfile new file mode 100644 index 0000000..322f254 --- /dev/null +++ b/vendor/github.com/moby/moby/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/api/server/httputils/decoder.go b/vendor/github.com/moby/moby/api/server/httputils/decoder.go new file mode 100644 index 0000000..458eac5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/decoder.go @@ -0,0 +1,16 @@ +package httputils + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// ContainerDecoder specifies how +// to translate an io.Reader into +// container configuration. +type ContainerDecoder interface { + DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) + DecodeHostConfig(src io.Reader) (*container.HostConfig, error) +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/errors.go b/vendor/github.com/moby/moby/api/server/httputils/errors.go new file mode 100644 index 0000000..2f87765 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/errors.go @@ -0,0 +1,103 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/gorilla/mux" + "google.golang.org/grpc" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// GetHTTPErrorStatusCode retrieves status code from error message +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + for _, status := range []struct { + keyword string + code int + }{ + {"not found", http.StatusNotFound}, + {"no such", http.StatusNotFound}, + {"bad parameter", http.StatusBadRequest}, + {"no command", http.StatusBadRequest}, + {"conflict", http.StatusConflict}, + {"impossible", http.StatusNotAcceptable}, + {"wrong login/password", http.StatusUnauthorized}, + {"unauthorized", http.StatusUnauthorized}, + {"hasn't been activated", http.StatusForbidden}, + {"this node", http.StatusServiceUnavailable}, + {"needs to be unlocked", http.StatusServiceUnavailable}, + {"certificates have expired", http.StatusServiceUnavailable}, + } { + if strings.Contains(errStr, status.keyword) { + statusCode = status.code + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +func apiVersionSupportsJSONErrors(version string) bool { + const firstAPIVersionWithJSONErrors = "1.23" + return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors) +} + +// MakeErrorHandler makes an HTTP handler that decodes a Docker error and +// returns it in the response. +func MakeErrorHandler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + statusCode := GetHTTPErrorStatusCode(err) + vars := mux.Vars(r) + if apiVersionSupportsJSONErrors(vars["version"]) { + response := &types.ErrorResponse{ + Message: err.Error(), + } + WriteJSON(w, statusCode, response) + } else { + http.Error(w, grpc.ErrorDesc(err), statusCode) + } + } +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/form.go b/vendor/github.com/moby/moby/api/server/httputils/form.go new file mode 100644 index 0000000..20188c1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/form_test.go b/vendor/github.com/moby/moby/api/server/httputils/form_test.go new file mode 100644 index 0000000..c56f7c1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/form_test.go @@ -0,0 +1,105 @@ +package httputils + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := BoolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !BoolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if BoolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := Int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestInt64ValueOrDefault(t *testing.T) { + cases := map[string]int64{ + "": -1, + "-1": -1, + "42": 42, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a, err := Int64ValueOrDefault(r, "test", -1) + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + if err != nil { + t.Fatalf("Error should be nil, but received: %s", err) + } + } +} + +func TestInt64ValueOrDefaultWithError(t *testing.T) { + v := url.Values{} + v.Set("test", "invalid") + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + _, err := Int64ValueOrDefault(r, "test", -1) + if err == nil { + t.Fatalf("Expected an error.") + } +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/httputils.go b/vendor/github.com/moby/moby/api/server/httputils/httputils.go new file mode 100644 index 0000000..7930ff7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/httputils.go @@ -0,0 +1,90 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver string) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(string) +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go b/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go new file mode 100644 index 0000000..4787cc3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go @@ -0,0 +1,17 @@ +// +build go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + return enc.Encode(v) +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json_go16.go b/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json_go16.go new file mode 100644 index 0000000..bdc6981 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json_go16.go @@ -0,0 +1,16 @@ +// +build go1.6,!go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + return enc.Encode(v) +} diff --git a/vendor/github.com/moby/moby/api/server/middleware.go b/vendor/github.com/moby/moby/api/server/middleware.go new file mode 100644 index 0000000..537ce80 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware.go @@ -0,0 +1,24 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" +) + +// handlerWithGlobalMiddlewares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + for _, m := range s.middlewares { + next = m.WrapHandler(next) + } + + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + return next +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/cors.go b/vendor/github.com/moby/moby/api/server/middleware/cors.go new file mode 100644 index 0000000..ea725db --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/cors.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// CORSMiddleware injects CORS headers to each request +// when it's configured. +type CORSMiddleware struct { + defaultHeaders string +} + +// NewCORSMiddleware creates a new CORSMiddleware with default headers. +func NewCORSMiddleware(d string) CORSMiddleware { + return CORSMiddleware{defaultHeaders: d} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := c.defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/debug.go b/vendor/github.com/moby/moby/api/server/middleware/debug.go new file mode 100644 index 0000000..8c85676 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/debug.go @@ -0,0 +1,76 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + maskSecretKeys(postForm) + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} + +func maskSecretKeys(inp interface{}) { + if arr, ok := inp.([]interface{}); ok { + for _, f := range arr { + maskSecretKeys(f) + } + return + } + if form, ok := inp.(map[string]interface{}); ok { + loop0: + for k, v := range form { + for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} { + if strings.EqualFold(m, k) { + form[k] = "*****" + continue loop0 + } + } + maskSecretKeys(v) + } + } +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/experimental.go b/vendor/github.com/moby/moby/api/server/middleware/experimental.go new file mode 100644 index 0000000..b8f56e8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/experimental.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// ExperimentalMiddleware is a the middleware in charge of adding the +// 'Docker-Experimental' header to every outgoing request +type ExperimentalMiddleware struct { + experimental string +} + +// NewExperimentalMiddleware creates a new ExperimentalMiddleware +func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware { + if experimentalEnabled { + return ExperimentalMiddleware{"true"} + } + return ExperimentalMiddleware{"false"} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Docker-Experimental", e.experimental) + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/middleware.go b/vendor/github.com/moby/moby/api/server/middleware/middleware.go new file mode 100644 index 0000000..dc1f5bf --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/middleware.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Middleware is an interface to allow the use of ordinary functions as Docker API filters. +// Any struct that has the appropriate signature can be registered as a middleware. +type Middleware interface { + WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/version.go b/vendor/github.com/moby/moby/api/server/middleware/version.go new file mode 100644 index 0000000..1101465 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/version.go @@ -0,0 +1,50 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VersionMiddleware is a middleware that +// validates the client and server versions. +type VersionMiddleware struct { + serverVersion string + defaultVersion string + minVersion string +} + +// NewVersionMiddleware creates a new VersionMiddleware +// with the default versions. +func NewVersionMiddleware(s, d, m string) VersionMiddleware { + return VersionMiddleware{ + serverVersion: s, + defaultVersion: d, + minVersion: m, + } +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := vars["version"] + if apiVersion == "" { + apiVersion = v.defaultVersion + } + + if versions.LessThan(apiVersion, v.minVersion) { + return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)) + } + + header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + w.Header().Set("API-Version", v.defaultVersion) + ctx = context.WithValue(ctx, "api-version", apiVersion) + return handler(ctx, w, r, vars) + } + +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/version_test.go b/vendor/github.com/moby/moby/api/server/middleware/version_test.go new file mode 100644 index 0000000..9e72efd --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/version_test.go @@ -0,0 +1,57 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +func TestVersionMiddleware(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + if err := h(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} + +func TestVersionMiddlewareWithErrors(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + vars := map[string]string{"version": "0.1"} + err := h(ctx, resp, req, vars) + + if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { + t.Fatalf("Expected too old client error, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/api/server/profiler.go b/vendor/github.com/moby/moby/api/server/profiler.go new file mode 100644 index 0000000..8bf8384 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/profiler.go @@ -0,0 +1,41 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/trace", pprof.Trace) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/moby/moby/api/server/router/build/backend.go b/vendor/github.com/moby/moby/api/server/router/build/backend.go new file mode 100644 index 0000000..0f01c11 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/build/backend.go @@ -0,0 +1,20 @@ +package build + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build builds a Docker image referenced by an imageID string. + // + // Note: Tagging an image should not be done by a Builder, it should instead be done + // by the caller. + // + // TODO: make this return a reference instead of string + BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/build/build.go b/vendor/github.com/moby/moby/api/server/router/build/build.go new file mode 100644 index 0000000..959498e --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &buildRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.Cancellable(router.NewPostRoute("/build", r.postBuild)), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/build/build_routes.go b/vendor/github.com/moby/moby/api/server/router/build/build_routes.go new file mode 100644 index 0000000..75425b1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/build/build_routes.go @@ -0,0 +1,225 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.NetworkMode = r.FormValue("networkmode") + options.Tags = r.Form["t"] + options.SecurityOpt = r.Form["securityopt"] + options.Squash = httputils.BoolValue(r, "squash") + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + if runtime.GOOS != "windows" && options.SecurityOpt != nil { + return nil, fmt.Errorf("the daemon on this platform does not support --security-opt to build") + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + var buildArgs = map[string]*string{} + buildArgsJSON := r.FormValue("buildargs") + + // Note that there are two ways a --build-arg might appear in the + // json of the query param: + // "foo":"bar" + // and "foo":nil + // The first is the normal case, ie. --build-arg foo=bar + // or --build-arg foo + // where foo's value was picked up from an env var. + // The second ("foo":nil) is where they put --build-arg foo + // but "foo" isn't set as an env var. In that case we can't just drop + // the fact they mentioned it, we need to pass that along to the builder + // so that it can print a warning about "foo" being unused if there is + // no "ARG foo" in the Dockerfile. + if buildArgsJSON != "" { + if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + + var labels = map[string]string{} + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { + return nil, err + } + options.Labels = labels + } + + var cacheFrom = []string{} + cacheFromJSON := r.FormValue("cachefrom") + if cacheFromJSON != "" { + if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { + return nil, err + } + options.CacheFrom = cacheFrom + } + + return options, nil +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]types.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + notVerboseBuffer = bytes.NewBuffer(nil) + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + buildOptions.AuthConfigs = authConfigs + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := sf.NewProgressOutput(output, true) + if buildOptions.SuppressOutput { + progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) + } + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) + } + + out := io.Writer(output) + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + out = &syncWriter{w: out} + stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} + stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} + + pg := backend.ProgressWriter{ + Output: out, + StdoutFormatter: stdout, + StderrFormatter: stderr, + ProgressReaderFunc: createProgressReader, + } + + imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + fmt.Fprintf(stdout, "%s\n", string(imgID)) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go b/vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go new file mode 100644 index 0000000..8810f88 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go @@ -0,0 +1,10 @@ +package checkpoint + +import "github.com/docker/docker/api/types" + +// Backend for Checkpoint +type Backend interface { + CheckpointCreate(container string, config types.CheckpointCreateOptions) error + CheckpointDelete(container string, config types.CheckpointDeleteOptions) error + CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go new file mode 100644 index 0000000..c1e9392 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// checkpointRouter is a router to talk with the checkpoint controller +type checkpointRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new checkpoint router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &checkpointRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the checkpoint controller +func (r *checkpointRouter) Routes() []router.Route { + return r.routes +} + +func (r *checkpointRouter) initRoutes() { + r.routes = []router.Route{ + router.Experimental(router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints)), + router.Experimental(router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint)), + router.Experimental(router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint)), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go new file mode 100644 index 0000000..f988431 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var options types.CheckpointCreateOptions + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&options); err != nil { + return err + } + + err := s.backend.CheckpointCreate(vars["name"], options) + if err != nil { + return err + } + + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{ + CheckpointDir: r.Form.Get("dir"), + }) + + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, checkpoints) +} + +func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{ + CheckpointDir: r.Form.Get("dir"), + CheckpointID: vars["checkpoint"], + }) + + if err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/backend.go b/vendor/github.com/moby/moby/api/server/router/container/backend.go new file mode 100644 index 0000000..0d20188 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/backend.go @@ -0,0 +1,79 @@ +package container + +import ( + "io" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/archive" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(name string, config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds *int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(name string, timeout time.Duration) (int, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version string) (interface{}, error) + ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error + ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// systemBackend includes functions to implement to provide system wide containers functionality +type systemBackend interface { + ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend + systemBackend +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/container.go b/vendor/github.com/moby/moby/api/server/router/container/container.go new file mode 100644 index 0000000..bbed7e9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/container.go @@ -0,0 +1,77 @@ +package container + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &containerRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)), + router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + router.NewPostRoute("/containers/prune", r.postContainersPrune), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/container_routes.go b/vendor/github.com/moby/moby/api/server/router/container/container_routes.go new file mode 100644 index 0000000..3c38924 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/container_routes.go @@ -0,0 +1,559 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(ctx, vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + containerName := vars["name"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + }, + OutStream: w, + } + + chStarted := make(chan struct{}) + if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) + default: + return err + } + } + + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + + version := httputils.VersionFromContext(ctx) + var hostConfig *container.HostConfig + // A non-nil json object is at least 7 characters. + if r.ContentLength > 7 || r.ContentLength == -1 { + if versions.GreaterThanOrEqualTo(version, "1.24") { + return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")} + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := s.decoder.DecodeHostConfig(r.Body) + if err != nil { + return err + } + hostConfig = c + } + + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoint := r.Form.Get("checkpoint") + checkpointDir := r.Form.Get("checkpoint-dir") + if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &container.ContainerWaitOKBody{ + StatusCode: int64(status), + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + resp, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := versions.LessThan(version, "1.19") + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(version, "1.25") { + hostConfig.AutoRemove = false + } + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + detachKeys := r.FormValue("detachKeys") + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + MuxStreams: true, + } + + if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + // Remember to close stream if error happens + conn, _, errHijack := hijacker.Hijack() + if errHijack == nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + statusText := http.StatusText(statusCode) + fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) + httputils.CloseStreams(conn) + } else { + logrus.Errorf("Error Hijacking: %v", err) + } + } + return nil +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var err error + detachKeys := r.FormValue("detachKeys") + + done := make(chan struct{}) + started := make(chan struct{}) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} + +func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ContainersPrune(pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/copy.go b/vendor/github.com/moby/moby/api/server/router/container/copy.go new file mode 100644 index 0000000..ede6dff --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/copy.go @@ -0,0 +1,119 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Deprecated since 1.8, Errors out since 1.12 + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.24") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/exec.go b/vendor/github.com/moby/moby/api/server/router/container/exec.go new file mode 100644 index 0000000..1134a0e --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/exec.go @@ -0,0 +1,140 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(name, execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if versions.GreaterThan(version, "1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n") + } else { + fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n") + } + + // copy headers that were removed as part of hijack + if err := w.Header().WriteSubset(outStream, nil); err != nil { + return err + } + fmt.Fprint(outStream, "\r\n") + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + // Maybe we should we pass ctx here if we're not detaching? + if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error() + "\r\n")) + logrus.Errorf("Error running exec in container: %v", err) + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/inspect.go b/vendor/github.com/moby/moby/api/server/router/container/inspect.go new file mode 100644 index 0000000..dbbced7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/moby/moby/api/server/router/experimental.go b/vendor/github.com/moby/moby/api/server/router/experimental.go new file mode 100644 index 0000000..51385c2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/experimental.go @@ -0,0 +1,67 @@ +package router + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" +) + +var ( + errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon with --experimental in order to enable it.") +) + +// ExperimentalRoute defines an experimental API route that can be enabled or disabled. +type ExperimentalRoute interface { + Route + + Enable() + Disable() +} + +// experimentalRoute defines an experimental API route that can be enabled or disabled. +// It implements ExperimentalRoute +type experimentalRoute struct { + local Route + handler httputils.APIFunc +} + +// Enable enables this experimental route +func (r *experimentalRoute) Enable() { + r.handler = r.local.Handler() +} + +// Disable disables the experimental route +func (r *experimentalRoute) Disable() { + r.handler = experimentalHandler +} + +func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented) +} + +// Handler returns returns the APIFunc to let the server wrap it in middlewares. +func (r *experimentalRoute) Handler() httputils.APIFunc { + return r.handler +} + +// Method returns the http method that the route responds to. +func (r *experimentalRoute) Method() string { + return r.local.Method() +} + +// Path returns the subpath where the route responds to. +func (r *experimentalRoute) Path() string { + return r.local.Path() +} + +// Experimental will mark a route as experimental. +func Experimental(r Route) Route { + return &experimentalRoute{ + local: r, + handler: experimentalHandler, + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/image/backend.go b/vendor/github.com/moby/moby/api/server/router/image/backend.go new file mode 100644 index 0000000..19a67a5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/image/backend.go @@ -0,0 +1,45 @@ +package image + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) + ImageHistory(imageName string) ([]*types.ImageHistory, error) + Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(imageName, repository, tag string) error + ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/image/image.go b/vendor/github.com/moby/moby/api/server/router/image/image.go new file mode 100644 index 0000000..54a4d51 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/image/image.go @@ -0,0 +1,50 @@ +package image + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { + r := &imageRouter{ + backend: backend, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)), + router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + router.NewPostRoute("/images/prune", r.postImagesPrune), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/image/image_routes.go b/vendor/github.com/moby/moby/api/server/router/image/image_routes.go new file mode 100644 index 0000000..6940365 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/image/image_routes.go @@ -0,0 +1,344 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { + pause = true + } + + c, _, _, err := s.decoder.DecodeConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: c, + MergeConfigs: true, + }, + Changes: r.Form["changes"], + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output) + } else { //import + src := r.Form.Get("fromSrc") + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"]) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + image := vars["name"] + tag := r.Form.Get("tag") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + imageFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + filterParam := r.Form.Get("filter") + if versions.LessThan(version, "1.28") && filterParam != "" { + imageFilters.Add("reference", filterParam) + } + + images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + limit := registry.DefaultSearchLimit + if r.Form.Get("limit") != "" { + limitValue, err := strconv.Atoi(r.Form.Get("limit")) + if err != nil { + return err + } + limit = limitValue + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} + +func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ImagesPrune(pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router/local.go b/vendor/github.com/moby/moby/api/server/router/local.go new file mode 100644 index 0000000..7cb2a5a --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/local.go @@ -0,0 +1,96 @@ +package router + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc) Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("HEAD", path, handler) +} + +func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if notifier, ok := w.(http.CloseNotifier); ok { + notify := notifier.CloseNotify() + notifyCtx, cancel := context.WithCancel(ctx) + finished := make(chan struct{}) + defer close(finished) + ctx = notifyCtx + go func() { + select { + case <-notify: + cancel() + case <-finished: + } + }() + } + return h(ctx, w, r, vars) + } +} + +// Cancellable makes new route which embeds http.CloseNotifier feature to +// context.Context of handler. +func Cancellable(r Route) Route { + return localRoute{ + method: r.Method(), + path: r.Path(), + handler: cancellableHandler(r.Handler()), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/backend.go b/vendor/github.com/moby/moby/api/server/router/network/backend.go new file mode 100644 index 0000000..0d1dfb0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/backend.go @@ -0,0 +1,22 @@ +package network + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented +// to provide network specific functionality. +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetworkByName(idName string) (libnetwork.Network, error) + GetNetworksByID(partialID string) []libnetwork.Network + GetNetworks() []libnetwork.Network + CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error + DeleteNetwork(name string) error + NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/filter.go b/vendor/github.com/moby/moby/api/server/router/network/filter.go new file mode 100644 index 0000000..94affb8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/filter.go @@ -0,0 +1,96 @@ +package network + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/runconfig" +) + +var ( + // AcceptedFilters is an acceptable filters for validation + AcceptedFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + } +) + +func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) { + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + default: + return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} + +// filterNetworks filters network list according to user specified filter +// and returns user chosen networks +func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + if err := filter.Validate(AcceptedFilters); err != nil { + return nil, err + } + + displayNet := []types.NetworkResource{} + for _, nw := range nws { + if filter.Include("driver") { + if !filter.ExactMatch("driver", nw.Driver) { + continue + } + } + if filter.Include("name") { + if !filter.Match("name", nw.Name) { + continue + } + } + if filter.Include("id") { + if !filter.Match("id", nw.ID) { + continue + } + } + if filter.Include("label") { + if !filter.MatchKVList("label", nw.Labels) { + continue + } + } + displayNet = append(displayNet, nw) + } + + if filter.Include("type") { + var typeNet []types.NetworkResource + errFilter := filter.WalkValues("type", func(fval string) error { + passList, err := filterNetworkByType(displayNet, fval) + if err != nil { + return err + } + typeNet = append(typeNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + displayNet = typeNet + } + + return displayNet, nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/network.go b/vendor/github.com/moby/moby/api/server/router/network/network.go new file mode 100644 index 0000000..08a5c8c --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/network.go @@ -0,0 +1,44 @@ +package network + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + clusterProvider *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &networkRouter{ + backend: b, + clusterProvider: c, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/networks", r.getNetworksList), + router.NewGetRoute("/networks/", r.getNetworksList), + router.NewGetRoute("/networks/{id:.+}", r.getNetwork), + // POST + router.NewPostRoute("/networks/create", r.postNetworkCreate), + router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), + router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), + router.NewPostRoute("/networks/prune", r.postNetworksPrune), + // DELETE + router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/network_routes.go b/vendor/github.com/moby/moby/api/server/router/network/network_routes.go new file mode 100644 index 0000000..7bfc499 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/network_routes.go @@ -0,0 +1,308 @@ +package network + +import ( + "encoding/json" + "net/http" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/networkdb" +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + list := []types.NetworkResource{} + + if nr, err := n.clusterProvider.GetNetworks(); err == nil { + list = append(list, nr...) + } + + // Combine the network list returned by Docker daemon if it is not already + // returned by the cluster manager +SKIP: + for _, nw := range n.backend.GetNetworks() { + for _, nl := range list { + if nl.ID == nw.ID() { + continue SKIP + } + } + list = append(list, *n.buildNetworkResource(nw)) + } + + list, err = filterNetworks(list, netFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { + return httputils.WriteJSON(w, http.StatusOK, nr) + } + return err + } + return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw)) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreateRequest + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if nws, err := n.clusterProvider.GetNetworksByName(create.Name); err == nil && len(nws) > 0 { + return libnetwork.NetworkNameError(create.Name) + } + + nw, err := n.backend.CreateNetwork(create) + if err != nil { + if _, ok := err.(libnetwork.ManagerRedirectError); !ok { + return err + } + id, err := n.clusterProvider.CreateNetwork(create) + if err != nil { + return err + } + nw = &types.NetworkCreateResponse{ID: id} + } + + return httputils.WriteJSON(w, http.StatusCreated, nw) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { + if err = n.clusterProvider.RemoveNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil + } + if err := n.backend.DeleteNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + info := nw.Info() + r.Name = nw.Name() + r.ID = nw.ID() + r.Created = info.Created() + r.Scope = info.Scope() + if n.clusterProvider.IsManager() { + if _, err := n.clusterProvider.GetNetwork(nw.ID()); err == nil { + r.Scope = "swarm" + } + } else if info.Dynamic() { + r.Scope = "swarm" + } + r.Driver = nw.Type() + r.EnableIPv6 = info.IPv6Enabled() + r.Internal = info.Internal() + r.Attachable = info.Attachable() + r.Options = info.DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, info) + r.Labels = info.Labels() + + peers := info.Peers() + if len(peers) != 0 { + r.Peers = buildPeerInfoResources(peers) + } + + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + tmpID := e.ID() + key := "ep-" + tmpID + if sb != nil { + key = sb.ContainerID() + } + + r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei) + } + return r +} + +func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo { + peerInfo := make([]network.PeerInfo, 0, len(peers)) + for _, peer := range peers { + peerInfo = append(peerInfo, network.PeerInfo{ + Name: peer.Name, + IP: peer.IP, + }) + } + return peerInfo +} + +func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { + id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() + + ipv4Info, ipv6Info := nwInfo.IpamInfo() + + r.IPAM.Driver = id + + r.IPAM.Options = opts + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + if ip4.PreferredPool == "" { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if len(r.IPAM.Config) == 0 { + for _, ip4Info := range ipv4Info { + iData := network.IPAMConfig{} + iData.Subnet = ip4Info.IPAMData.Pool.String() + iData.Gateway = ip4Info.IPAMData.Gateway.IP.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } + + hasIpv6Conf := false + for _, ip6 := range ipv6conf { + if ip6.PreferredPool == "" { + continue + } + hasIpv6Conf = true + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if !hasIpv6Conf { + for _, ip6Info := range ipv6Info { + if ip6Info.IPAMData.Pool == nil { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip6Info.IPAMData.Pool.String() + iData.Gateway = ip6Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } +} + +func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource { + er := types.EndpointResource{} + + er.EndpointID = id + er.Name = name + ei := info + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} + +func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneReport, err := n.backend.NetworksPrune(filters.Args{}) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router/plugin/backend.go b/vendor/github.com/moby/moby/api/server/router/plugin/backend.go new file mode 100644 index 0000000..ab006b2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/plugin/backend.go @@ -0,0 +1,25 @@ +package plugin + +import ( + "io" + "net/http" + + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +// Backend for Plugin +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + List() ([]enginetypes.Plugin, error) + Inspect(name string) (*enginetypes.Plugin, error) + Remove(name string, config *enginetypes.PluginRmConfig) error + Set(name string, args []string) error + Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error +} diff --git a/vendor/github.com/moby/moby/api/server/router/plugin/plugin.go b/vendor/github.com/moby/moby/api/server/router/plugin/plugin.go new file mode 100644 index 0000000..e4ea9e2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/plugin/plugin.go @@ -0,0 +1,39 @@ +package plugin + +import "github.com/docker/docker/api/server/router" + +// pluginRouter is a router to talk with the plugin controller +type pluginRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new plugin router +func NewRouter(b Backend) router.Router { + r := &pluginRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the plugin controller +func (r *pluginRouter) Routes() []router.Route { + return r.routes +} + +func (r *pluginRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/plugins", r.listPlugins), + router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin), + router.NewGetRoute("/plugins/privileges", r.getPrivileges), + router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), + router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? + router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), + router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)), + router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)), + router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin)), + router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), + router.NewPostRoute("/plugins/create", r.createPlugin), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go b/vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go new file mode 100644 index 0000000..693fa95 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go @@ -0,0 +1,314 @@ +package plugin + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strconv" + "strings" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) { + + metaHeaders := map[string][]string{} + for k, v := range headers { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + // Get X-Registry-Auth + authEncoded := headers.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + authConfig = &types.AuthConfig{} + } + } + + return metaHeaders, authConfig +} + +// parseRemoteRef parses the remote reference into a reference.Named +// returning the tag associated with the reference. In the case the +// given reference string includes both digest and tag, the returned +// reference will have the digest without the tag, but the tag will +// be returned. +func parseRemoteRef(remote string) (reference.Named, string, error) { + // Parse remote reference, supporting remotes with name and tag + // NOTE: Using distribution reference to handle references + // containing both a name and digest + remoteRef, err := distreference.ParseNamed(remote) + if err != nil { + return nil, "", err + } + + var tag string + if t, ok := remoteRef.(distreference.Tagged); ok { + tag = t.Tag() + } + + // Convert distribution reference to docker reference + // TODO: remove when docker reference changes reconciled upstream + ref, err := reference.WithName(remoteRef.Name()) + if err != nil { + return nil, "", err + } + if d, ok := remoteRef.(distreference.Digested); ok { + ref, err = reference.WithDigest(ref, d.Digest()) + if err != nil { + return nil, "", err + } + } else if tag != "" { + ref, err = reference.WithTag(ref, tag) + if err != nil { + return nil, "", err + } + } else { + ref = reference.WithDefaultTag(ref) + } + + return ref, tag, nil +} + +func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + ref, _, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, privileges) +} + +func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, vars["name"]) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + + return nil +} + +func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, r.FormValue("name")) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + + return nil +} + +func getName(ref reference.Named, tag, name string) (string, error) { + if name == "" { + if _, ok := ref.(reference.Canonical); ok { + trimmed := reference.TrimNamed(ref) + if tag != "" { + nt, err := reference.WithTag(trimmed, tag) + if err != nil { + return "", err + } + name = nt.String() + } else { + name = reference.WithDefaultTag(trimmed).String() + } + } else { + name = ref.String() + } + } else { + localRef, err := reference.ParseNamed(name) + if err != nil { + return "", err + } + if _, ok := localRef.(reference.Canonical); ok { + return "", errors.New("cannot use digest in plugin tag") + } + if distreference.IsNameOnly(localRef) { + // TODO: log change in name to out stream + name = reference.WithDefaultTag(localRef).String() + } + } + return name, nil +} + +func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + options := &types.PluginCreateOptions{ + RepoName: r.FormValue("name")} + + if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil { + return err + } + //TODO: send progress bar + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + timeout, err := strconv.Atoi(r.Form.Get("timeout")) + if err != nil { + return err + } + config := &types.PluginEnableConfig{Timeout: timeout} + + return pr.backend.Enable(name, config) +} + +func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginDisableConfig{ + ForceDisable: httputils.BoolValue(r, "force"), + } + + return pr.backend.Disable(name, config) +} + +func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + } + return pr.backend.Remove(name, config) +} + +func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil +} + +func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var args []string + if err := json.NewDecoder(r.Body).Decode(&args); err != nil { + return err + } + if err := pr.backend.Set(vars["name"], args); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + l, err := pr.backend.List() + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, l) +} + +func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + result, err := pr.backend.Inspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, result) +} diff --git a/vendor/github.com/moby/moby/api/server/router/router.go b/vendor/github.com/moby/moby/api/server/router/router.go new file mode 100644 index 0000000..2de25c2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/moby/moby/api/server/router/swarm/backend.go b/vendor/github.com/moby/moby/api/server/router/swarm/backend.go new file mode 100644 index 0000000..33840f0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/swarm/backend.go @@ -0,0 +1,36 @@ +package swarm + +import ( + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// Backend abstracts an swarm commands manager. +type Backend interface { + Init(req types.InitRequest) (string, error) + Join(req types.JoinRequest) error + Leave(force bool) error + Inspect() (types.Swarm, error) + Update(uint64, types.Spec, types.UpdateFlags) error + GetUnlockKey() (string, error) + UnlockSwarm(req types.UnlockRequest) error + GetServices(basictypes.ServiceListOptions) ([]types.Service, error) + GetService(string) (types.Service, error) + CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) + UpdateService(string, uint64, types.ServiceSpec, string, string) (*basictypes.ServiceUpdateResponse, error) + RemoveService(string) error + ServiceLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error + GetNodes(basictypes.NodeListOptions) ([]types.Node, error) + GetNode(string) (types.Node, error) + UpdateNode(string, uint64, types.NodeSpec) error + RemoveNode(string, bool) error + GetTasks(basictypes.TaskListOptions) ([]types.Task, error) + GetTask(string) (types.Task, error) + GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) + CreateSecret(s types.SecretSpec) (string, error) + RemoveSecret(id string) error + GetSecret(id string) (types.Secret, error) + UpdateSecret(id string, version uint64, spec types.SecretSpec) error +} diff --git a/vendor/github.com/moby/moby/api/server/router/swarm/cluster.go b/vendor/github.com/moby/moby/api/server/router/swarm/cluster.go new file mode 100644 index 0000000..e2d5ad1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/swarm/cluster.go @@ -0,0 +1,52 @@ +package swarm + +import "github.com/docker/docker/api/server/router" + +// swarmRouter is a router to talk with the build controller +type swarmRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &swarmRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the swarm controller +func (sr *swarmRouter) Routes() []router.Route { + return sr.routes +} + +func (sr *swarmRouter) initRoutes() { + sr.routes = []router.Route{ + router.NewPostRoute("/swarm/init", sr.initCluster), + router.NewPostRoute("/swarm/join", sr.joinCluster), + router.NewPostRoute("/swarm/leave", sr.leaveCluster), + router.NewGetRoute("/swarm", sr.inspectCluster), + router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), + router.NewPostRoute("/swarm/update", sr.updateCluster), + router.NewPostRoute("/swarm/unlock", sr.unlockCluster), + router.NewGetRoute("/services", sr.getServices), + router.NewGetRoute("/services/{id}", sr.getService), + router.NewPostRoute("/services/create", sr.createService), + router.NewPostRoute("/services/{id}/update", sr.updateService), + router.NewDeleteRoute("/services/{id}", sr.removeService), + router.Experimental(router.Cancellable(router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs))), + router.NewGetRoute("/nodes", sr.getNodes), + router.NewGetRoute("/nodes/{id}", sr.getNode), + router.NewDeleteRoute("/nodes/{id}", sr.removeNode), + router.NewPostRoute("/nodes/{id}/update", sr.updateNode), + router.NewGetRoute("/tasks", sr.getTasks), + router.NewGetRoute("/tasks/{id}", sr.getTask), + router.NewGetRoute("/secrets", sr.getSecrets), + router.NewPostRoute("/secrets/create", sr.createSecret), + router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), + router.NewGetRoute("/secrets/{id}", sr.getSecret), + router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go b/vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go new file mode 100644 index 0000000..59420fe --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go @@ -0,0 +1,423 @@ +package swarm + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.InitRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + nodeID, err := sr.backend.Init(req) + if err != nil { + logrus.Errorf("Error initializing swarm: %v", err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, nodeID) +} + +func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.JoinRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + return sr.backend.Join(req) +} + +func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + return sr.backend.Leave(force) +} + +func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + swarm, err := sr.backend.Inspect() + if err != nil { + logrus.Errorf("Error getting swarm: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, swarm) +} + +func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var swarm types.Spec + if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid swarm version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + var flags types.UpdateFlags + + if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateWorkerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateWorkerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateManagerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateManagerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value)) + } + + flags.RotateManagerUnlockKey = rot + } + + if err := sr.backend.Update(version, swarm, flags); err != nil { + logrus.Errorf("Error configuring swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.UnlockRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + if err := sr.backend.UnlockSwarm(req); err != nil { + logrus.Errorf("Error unlocking swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + unlockKey, err := sr.backend.GetUnlockKey() + if err != nil { + logrus.WithError(err).Errorf("Error retrieving swarm unlock key") + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + }) +} + +func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting services: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, services) +} + +func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + service, err := sr.backend.GetService(vars["id"]) + if err != nil { + logrus.Errorf("Error getting service %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, service) +} + +func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + + resp, err := sr.backend.CreateService(service, encodedAuth) + if err != nil { + logrus.Errorf("Error creating service %s: %v", service.Name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, resp) +} + +func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid service version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + + registryAuthFrom := r.URL.Query().Get("registryAuthFrom") + + resp, err := sr.backend.UpdateService(vars["id"], version, service, encodedAuth, registryAuthFrom) + if err != nil { + logrus.Errorf("Error updating service %s: %v", vars["id"], err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveService(vars["id"]); err != nil { + logrus.Errorf("Error removing service %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + serviceName := vars["id"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: basictypes.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + }, + OutStream: w, + } + + if logsConfig.Details { + return fmt.Errorf("Bad parameters: details is not currently supported") + } + + chStarted := make(chan struct{}) + if err := sr.backend.ServiceLogs(ctx, serviceName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error grabbing service logs: %v\n", err) + default: + return err + } + } + + return nil +} + +func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting nodes: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, nodes) +} + +func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + node, err := sr.backend.GetNode(vars["id"]) + if err != nil { + logrus.Errorf("Error getting node %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, node) +} + +func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var node types.NodeSpec + if err := json.NewDecoder(r.Body).Decode(&node); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid node version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { + logrus.Errorf("Error updating node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + + if err := sr.backend.RemoveNode(vars["id"], force); err != nil { + logrus.Errorf("Error removing node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting tasks: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, tasks) +} + +func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + task, err := sr.backend.GetTask(vars["id"]) + if err != nil { + logrus.Errorf("Error getting task %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, task) +} + +func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secrets) +} + +func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return err + } + + id, err := sr.backend.CreateSecret(secret) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveSecret(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + secret, err := sr.backend.GetSecret(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secret) +} + +func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid secret version")) + } + + id := vars["id"] + if err := sr.backend.UpdateSecret(id, version, secret); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/system/backend.go b/vendor/github.com/moby/moby/api/server/router/system/backend.go new file mode 100644 index 0000000..6946c4e --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/system/backend.go @@ -0,0 +1,21 @@ +package system + +import ( + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SystemDiskUsage() (*types.DiskUsage, error) + SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/system/system.go b/vendor/github.com/moby/moby/api/server/router/system/system.go new file mode 100644 index 0000000..ed23d3b --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/system/system.go @@ -0,0 +1,39 @@ +package system + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + clusterProvider *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new system router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &systemRouter{ + backend: b, + clusterProvider: c, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.Cancellable(router.NewGetRoute("/events", r.getEvents)), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewGetRoute("/system/df", r.getDiskUsage), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/vendor/github.com/moby/moby/api/server/router/system/system_routes.go b/vendor/github.com/moby/moby/api/server/router/system/system_routes.go new file mode 100644 index 0000000..0d851b6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/system/system_routes.go @@ -0,0 +1,186 @@ +package system + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + if s.clusterProvider != nil { + info.Swarm = s.clusterProvider.Info() + } + + if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") { + // TODO: handle this conversion in engine-api + type oldInfo struct { + *types.Info + ExecutionDriver string + } + old := &oldInfo{ + Info: info, + ExecutionDriver: "", + } + nameOnlySecurityOptions := []string{} + kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) + if err != nil { + return err + } + for _, s := range kvSecOpts { + nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) + } + old.SecurityOptions = nameOnlySecurityOptions + return httputils.WriteJSON(w, http.StatusOK, old) + } + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + du, err := s.backend.SystemDiskUsage() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, du) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + since, err := eventTime(r.Form.Get("since")) + if err != nil { + return err + } + until, err := eventTime(r.Form.Get("until")) + if err != nil { + return err + } + + var ( + timeout <-chan time.Time + onlyPastEvents bool + ) + if !until.IsZero() { + if until.Before(since) { + return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) + } + + now := time.Now() + + onlyPastEvents = until.Before(now) + + if !onlyPastEvents { + dur := until.Sub(now) + timeout = time.NewTimer(dur).C + } + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, until, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + if onlyPastEvents { + return nil + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-ctx.Done(): + logrus.Debug("Client context cancelled, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, ®istry.AuthenticateOKBody{ + Status: status, + IdentityToken: token, + }) +} + +func eventTime(formTime string) (time.Time, error) { + t, tNano, err := timetypes.ParseTimestamps(formTime, -1) + if err != nil { + return time.Time{}, err + } + if t == -1 { + return time.Time{}, nil + } + return time.Unix(t, tNano), nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/volume/backend.go b/vendor/github.com/moby/moby/api/server/router/volume/backend.go new file mode 100644 index 0000000..180c06e --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/volume/backend.go @@ -0,0 +1,17 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string, force bool) error + VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/volume/volume.go b/vendor/github.com/moby/moby/api/server/router/volume/volume.go new file mode 100644 index 0000000..4e9f972 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/volume/volume.go @@ -0,0 +1,36 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + router.NewPostRoute("/volumes/prune", r.postVolumesPrune), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go b/vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go new file mode 100644 index 0000000..cfd4618 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go @@ -0,0 +1,80 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumesListOKBody{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req volumetypes.VolumesCreateBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + force := httputils.BoolValue(r, "force") + if err := v.backend.VolumeRm(vars["name"], force); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneReport, err := v.backend.VolumesPrune(filters.Args{}) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router_swapper.go b/vendor/github.com/moby/moby/api/server/router_swapper.go new file mode 100644 index 0000000..1ecc7a7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/moby/moby/api/server/server.go b/vendor/github.com/moby/moby/api/server/server.go new file mode 100644 index 0000000..60ee075 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/server.go @@ -0,0 +1,210 @@ +package server + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + routerSwapper *routerSwapper + middlewares []middleware.Middleware +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// UseMiddleware appends a new middleware to the request chain. +// This needs to be called before the API routes are configured. +func (s *Server) UseMiddleware(m middleware.Middleware) { + s.middlewares = append(s.middlewares, m) +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Serve method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for i := 0; i < len(s.servers); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create an http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.WithValue(context.Background(), httputils.UAStringKey, r.Header.Get("User-Agent")) + handlerFunc := s.handlerWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + errFormat := "%v" + if statusCode == http.StatusInternalServerError { + errFormat = "%+v" + } + logrus.Errorf("Handler for %s %s returned error: "+errFormat, r.Method, r.URL.Path, err) + httputils.MakeErrorHandler(err)(w, r) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + s.routers = append(s.routers, routers...) + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debug("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) + notFoundHandler := httputils.MakeErrorHandler(err) + m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) + m.NotFoundHandler = notFoundHandler + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/vendor/github.com/moby/moby/api/server/server_test.go b/vendor/github.com/moby/moby/api/server/server_test.go new file mode 100644 index 0000000..11831c1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/server_test.go @@ -0,0 +1,46 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + + "golang.org/x/net/context" +) + +func TestMiddlewares(t *testing.T) { + cfg := &Config{ + Version: "0.1omega2", + } + srv := &Server{ + cfg: cfg, + } + + srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + + if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { + t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) + } + + return nil + } + + handlerFunc := srv.handlerWithGlobalMiddlewares(localHandler) + if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/api/swagger-gen.yaml b/vendor/github.com/moby/moby/api/swagger-gen.yaml new file mode 100644 index 0000000..f07a027 --- /dev/null +++ b/vendor/github.com/moby/moby/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/moby/moby/api/swagger.yaml b/vendor/github.com/moby/moby/api/swagger.yaml new file mode 100644 index 0000000..acbbe47 --- /dev/null +++ b/vendor/github.com/moby/moby/api/swagger.yaml @@ -0,0 +1,7939 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.27" +info: + title: "Docker Engine API" + version: "1.27" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + + For Docker Engine >= 17.03.1, the API version is 1.27. To lock to this version, you prefix the URL with `/v1.27`. For example, calling `/info` is the same as calling `/v1.27/info`. + + Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + + In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + This documentation is for version 1.27 of the API, which was introduced with Docker 17.03.1. Use this table to find documentation for previous versions of the API: + + Docker version | API version | Changes + ----------------|-------------|--------- + 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) + 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) + 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) + 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) + 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) + 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) + 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) + 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) + 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldly. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + Config: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]` + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriver: + description: "Information about this container's graph driver." + type: "object" + properties: + Name: + type: "string" + Data: + type: "object" + additionalProperties: + type: "string" + + Image: + type: "object" + properties: + Id: + type: "string" + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + Comment: + type: "string" + Created: + type: "string" + Container: + type: "string" + ContainerConfig: + $ref: "#/definitions/Config" + DockerVersion: + type: "string" + Author: + type: "string" + Config: + $ref: "#/definitions/Config" + Architecture: + type: "string" + Os: + type: "string" + Size: + type: "integer" + format: "int64" + VirtualSize: + type: "integer" + format: "int64" + GraphDriver: + $ref: "#/definitions/GraphDriver" + RootFS: + type: "object" + properties: + Type: + type: "string" + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + required: [Size, RefCount] + properties: + Size: + type: "integer" + description: "The disk space used by the volume (local driver only)" + default: -1 + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: "The number of containers referencing this volume." + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PropagatedMount + - Mounts + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + WorkDir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + AllowAllDevices: + type: "boolean" + x-nullable: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + diff_ids: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. + The client must send the version number along with the modified specification when updating these objects. + This approach ensures safe concurrency and determinism in that the change on the object + may not be applied if the version number has changed from the last read. In other words, + if two update requests specify the same base version, only one of the requests can succeed. + As a result, two separate update requests that happen at the same time will not + unintentially overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "int64" + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + Engine: + EngineVersion: "1.13.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + EncryptionConfig: + AutoLockManagers: false + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + ContainerSpec: + type: "object" + properties: + Image: + description: "The image name to use for the container." + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. + The format of extra hosts on swarmkit is specified in: + http://man7.org/linux/man-pages/man5/hosts.5.html + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Secrets: + description: "Secrets contains references to zero or more secrets that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: "SecretID represents the ID of the specific secret that we're referencing." + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, but this is just provided for + lookup/display purposes. The secret in the reference will be identified by its ID. + type: "string" + + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + type: "object" + properties: + NanoCPUs: + description: "CPU limit in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory limit in Bytes." + type: "integer" + format: "int64" + Reservation: + description: "Define resources reservation." + properties: + NanoCPUs: + description: "CPU reservation in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory reservation in Bytes." + type: "integer" + format: "int64" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponse: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded secret data" + type: "array" + items: + type: "string" + Secret: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" +paths: + /containers/json: + get: + summary: "List containers" + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/Config" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: "The status of the container. For example, `running` or `exited`." + type: "string" + Running: + description: "Whether this container is running." + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriver" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/Config" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + type: "object" + properties: + Path: + description: "Path to file that has changed" + type: "string" + Kind: + description: "Kind of change" + type: "integer" + enum: + - 0 + - 1 + - 2 + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used + for calculating the CPU usage percentage. It is not the same as the + `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + operationId: "ContainerStats" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or use -f" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveHead" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get an tar archive of a resource in the filesystem of container id." + operationId: "ContainerGetArchive" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "ContainerPutArchive" + consumes: + - "application/x-tar" + - "application/octet-stream" + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/tar" + default: "application/tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + Id: + type: "string" + Created: + type: "integer" + format: "int64" + CreatedBy: + type: "string" + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + Comment: + type: "string" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were referenced by that image. + + Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "1.13.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.6.3" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.25" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: + - "text/plain" + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/Config" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` + + Images report these events: `delete, import, load, pull, push, save, tag, untag` + + Volumes report these events: `create, mount, unmount, destroy` + + Networks report these events: `create, connect, disconnect, destroy` + + The Docker daemon reports these events: `reload` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `plugin`= plugin name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` + - `volume=` volume name or ID + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "" + Labels: null + Scope: "" + Options: null + UsageData: + Size: 0 + RefCount: 0 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Containers: + 39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867: + EndpointID: "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda" + MacAddress: "02:42:ac:11:00:02" + IPv4Address: "172.17.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "NetworkPrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + EncryptionConfig: + AutoLockManagers: false + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `label=` + - `name=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Delay: 30000000000 + Parallelism: 2 + FailureAction: "pause" + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ImageDeleteResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "details" + in: "query" + description: "Show extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created secret." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] diff --git a/vendor/github.com/moby/moby/api/templates/server/operation.gotmpl b/vendor/github.com/moby/moby/api/templates/server/operation.gotmpl new file mode 100644 index 0000000..3a3d752 --- /dev/null +++ b/vendor/github.com/moby/moby/api/templates/server/operation.gotmpl @@ -0,0 +1,26 @@ +package {{ .Package }} + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +import ( + "net/http" + + context "golang.org/x/net/context" + + {{ range .DefaultImports }}{{ printf "%q" . }} + {{ end }} + {{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }} + {{ end }} +) + + +{{ range .ExtraSchemas }} +// {{ .Name }} {{ template "docstring" . }} +// swagger:model {{ .Name }} +{{ template "schema" . }} +{{ end }} diff --git a/vendor/github.com/moby/moby/api/types/auth.go b/vendor/github.com/moby/moby/api/types/auth.go new file mode 100644 index 0000000..056af6b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/backend/backend.go b/vendor/github.com/moby/moby/api/types/backend/backend.go new file mode 100644 index 0000000..abc0bba --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/backend/backend.go @@ -0,0 +1,84 @@ +// Package backend includes types to send information to server backends. +package backend + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// ContainerLogsConfig holds configs for logging operations. Exists +// for users of the backend to to pass it a logging configuration. +type ContainerLogsConfig struct { + types.ContainerLogsOptions + OutStream io.Writer +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// ContainerCommitConfig is a wrapper around +// types.ContainerCommitConfig that also +// transports configuration changes for a container. +type ContainerCommitConfig struct { + types.ContainerCommitConfig + Changes []string +} + +// ProgressWriter is an interface +// to transport progress streams. +type ProgressWriter struct { + Output io.Writer + StdoutFormatter *streamformatter.StdoutFormatter + StderrFormatter *streamformatter.StderrFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} diff --git a/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go new file mode 100644 index 0000000..931ae10 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/moby/moby/api/types/client.go b/vendor/github.com/moby/moby/api/types/client.go new file mode 100644 index 0000000..7900d64 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/client.go @@ -0,0 +1,378 @@ +package types + +import ( + "bufio" + "io" + "net" + "os" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // See the parsing of buildArgs in api/server/router/build/build_routes.go + // for an explaination of why BuildArgs needs to use *string instead of + // just a string + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SecretRequestOption is a type for requesting secrets +type SecretRequestOption struct { + Source string + Target string + UID string + GID string + Mode os.FileMode +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/moby/moby/api/types/configs.go b/vendor/github.com/moby/moby/api/types/configs.go new file mode 100644 index 0000000..20c19f2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/configs.go @@ -0,0 +1,69 @@ +package types + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} diff --git a/vendor/github.com/moby/moby/api/types/container/config.go b/vendor/github.com/moby/moby/api/types/container/config.go new file mode 100644 index 0000000..fc050e5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/config.go @@ -0,0 +1,62 @@ +package container + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_create.go b/vendor/github.com/moby/moby/api/types/container/container_create.go new file mode 100644 index 0000000..d028e3b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_update.go b/vendor/github.com/moby/moby/api/types/container/container_update.go new file mode 100644 index 0000000..81ee12c --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_wait.go b/vendor/github.com/moby/moby/api/types/container/container_wait.go new file mode 100644 index 0000000..16cf335 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/host_config.go b/vendor/github.com/moby/moby/api/types/container/host_config.go new file mode 100644 index 0000000..0c82d62 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/host_config.go @@ -0,0 +1,333 @@ +package container + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// NetworkMode represents the container network stack. +type NetworkMode string + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (eg default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` + + // Custom init path + InitPath string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000..9fb79be --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package container + +import "strings" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000..0ee332b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go @@ -0,0 +1,87 @@ +package container + +import ( + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsContainer indicates whether container uses a container network stack. +// Returns false as windows doesn't support this mode +func (n NetworkMode) IsContainer() bool { + return false +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// ConnectedContainer is the id of the container which network this container is connected to. +// Returns blank string on windows +func (n NetworkMode) ConnectedContainer() string { + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/moby/moby/api/types/error_response.go b/vendor/github.com/moby/moby/api/types/error_response.go new file mode 100644 index 0000000..dc942d9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/moby/moby/api/types/events/events.go b/vendor/github.com/moby/moby/api/types/events/events.go new file mode 100644 index 0000000..7129a65 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/events/events.go @@ -0,0 +1,42 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/filters/parse.go b/vendor/github.com/moby/moby/api/types/filters/parse.go new file mode 100644 index 0000000..e01a41d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/filters/parse.go @@ -0,0 +1,310 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + buf := []byte{} + err := errors.New("") + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) + } + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/moby/moby/api/types/filters/parse_test.go b/vendor/github.com/moby/moby/api/types/filters/parse_test.go new file mode 100644 index 0000000..b2ed27b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/filters/parse_test.go @@ -0,0 +1,417 @@ +package filters + +import ( + "fmt" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = NewArgs() + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args.Get("created")) != 1 { + t.Errorf("failed to set this arg") + } + if len(args.Get("image.name")) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args.Len() != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestToParamWithVersion(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + str1, err := ToParamWithVersion("1.21", a) + if err != nil { + t.Errorf("failed to marshal the filters with version < 1.22: %s", err) + } + str2, err := ToParamWithVersion("1.22", a) + if err != nil { + t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) + } + if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && + str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { + t.Errorf("incorrectly marshaled the filters: %s", str1) + } + if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && + str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { + t.Errorf("incorrectly marshaled the filters: %s", str2) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valid := map[*Args][]string{ + &Args{fields: map[string]map[string]bool{"key": {"value": true}}}: { + `{"key": ["value"]}`, + `{"key": {"value": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + `{"key": ["value1", "value2"]}`, + `{"key": {"value1": true, "value2": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + `{"key1": ["value1"], "key2": ["value2"]}`, + `{"key1": {"value1": true}, "key2": {"value2": true}}`, + }, + } + + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + + for expectedArgs, matchers := range valid { + for _, json := range matchers { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if args.Len() != expectedArgs.Len() { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs.fields { + values := args.Get(key) + + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + + for _, v := range values { + if !expectedValues[v] { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if a.Len() != v1.Len() { + t.Errorf("these should both be empty sets") + } +} + +func TestArgsMatchKVListEmptySources(t *testing.T) { + args := NewArgs() + if !args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected true for (%v,created), got true", args) + } + + args = Args{map[string]map[string]bool{"created": {"today": true}}} + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } +} + +func TestArgsMatchKVList(t *testing.T) { + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value1": true}}, + }: "labels", + } + + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key4": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value3": true}}, + }: "labels", + } + + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "today", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to*": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tod": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"anyting": true, "to*": true}}, + }: "created", + } + + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tomorrow": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(day": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today1": true}, + "labels": map[string]bool{"today": true}}, + }: "created", + } + + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} + +func TestAdd(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + v := f.fields["status"] + if len(v) != 1 || !v["running"] { + t.Fatalf("Expected to include a running status, got %v", v) + } + + f.Add("status", "paused") + if len(v) != 2 || !v["paused"] { + t.Fatalf("Expected to include a paused status, got %v", v) + } +} + +func TestDel(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Del("status", "running") + v := f.fields["status"] + if v["running"] { + t.Fatalf("Expected to not include a running status filter, got true") + } +} + +func TestLen(t *testing.T) { + f := NewArgs() + if f.Len() != 0 { + t.Fatalf("Expected to not include any field") + } + f.Add("status", "running") + if f.Len() != 1 { + t.Fatalf("Expected to include one field") + } +} + +func TestExactMatch(t *testing.T) { + f := NewArgs() + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + f.Add("status", "pause*") + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.ExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } +} + +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to not match only `running` with two filters, got true") + } +} + +func TestInclude(t *testing.T) { + f := NewArgs() + if f.Include("status") { + t.Fatalf("Expected to not include a status key, got true") + } + f.Add("status", "running") + if !f.Include("status") { + t.Fatalf("Expected to include a status key, got false") + } +} + +func TestValidate(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + + valid := map[string]bool{ + "status": true, + "dangling": true, + } + + if err := f.Validate(valid); err != nil { + t.Fatal(err) + } + + f.Add("bogus", "running") + if err := f.Validate(valid); err == nil { + t.Fatalf("Expected to return an error, got nil") + } +} + +func TestWalkValues(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Add("status", "paused") + + f.WalkValues("status", func(value string) error { + if value != "running" && value != "paused" { + t.Fatalf("Unexpected value %s", value) + } + return nil + }) + + err := f.WalkValues("status", func(value string) error { + return fmt.Errorf("return") + }) + if err == nil { + t.Fatalf("Expected to get an error, got nil") + } + + err = f.WalkValues("foo", func(value string) error { + return fmt.Errorf("return") + }) + if err != nil { + t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) + } +} + +func TestFuzzyMatch(t *testing.T) { + f := NewArgs() + f.Add("container", "foo") + + cases := map[string]bool{ + "foo": true, + "foobar": true, + "barfoo": false, + "bar": false, + } + for source, match := range cases { + got := f.FuzzyMatch("container", source) + if got != match { + t.Fatalf("Expected %v, got %v: %s", match, got, source) + } + } +} diff --git a/vendor/github.com/moby/moby/api/types/id_response.go b/vendor/github.com/moby/moby/api/types/id_response.go new file mode 100644 index 0000000..7592d2f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/moby/moby/api/types/image_summary.go b/vendor/github.com/moby/moby/api/types/image_summary.go new file mode 100644 index 0000000..e145b3d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/moby/moby/api/types/mount/mount.go b/vendor/github.com/moby/moby/api/types/mount/mount.go new file mode 100644 index 0000000..31f2365 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/mount/mount.go @@ -0,0 +1,113 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/moby/moby/api/types/network/network.go b/vendor/github.com/moby/moby/api/types/network/network.go new file mode 100644 index 0000000..832b3ed --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network.go @@ -0,0 +1,59 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// PeerInfo represents one peer of a overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} diff --git a/vendor/github.com/moby/moby/api/types/plugin.go b/vendor/github.com/moby/moby/api/types/plugin.go new file mode 100644 index 0000000..6cc7a23 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin.go @@ -0,0 +1,189 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_device.go b/vendor/github.com/moby/moby/api/types/plugin_device.go new file mode 100644 index 0000000..5699010 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_env.go b/vendor/github.com/moby/moby/api/types/plugin_env.go new file mode 100644 index 0000000..32962dc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_interface_type.go b/vendor/github.com/moby/moby/api/types/plugin_interface_type.go new file mode 100644 index 0000000..c82f204 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_mount.go b/vendor/github.com/moby/moby/api/types/plugin_mount.go new file mode 100644 index 0000000..5c031cf --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_responses.go b/vendor/github.com/moby/moby/api/types/plugin_responses.go new file mode 100644 index 0000000..d6f7553 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_responses.go @@ -0,0 +1,64 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/moby/moby/api/types/port.go b/vendor/github.com/moby/moby/api/types/port.go new file mode 100644 index 0000000..ad52d46 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/reference/image_reference.go b/vendor/github.com/moby/moby/api/types/reference/image_reference.go new file mode 100644 index 0000000..be9cf8e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/reference/image_reference.go @@ -0,0 +1,34 @@ +package reference + +import ( + distreference "github.com/docker/distribution/reference" +) + +// Parse parses the given references and returns the repository and +// tag (if present) from it. If there is an error during parsing, it will +// return an error. +func Parse(ref string) (string, string, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return "", "", err + } + + tag := GetTagFromNamedRef(distributionRef) + return distributionRef.Name(), tag, nil +} + +// GetTagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api makes the distinction between repository +// and tags. +func GetTagFromNamedRef(ref distreference.Named) string { + var tag string + switch x := ref.(type) { + case distreference.Digested: + tag = x.Digest().String() + case distreference.NamedTagged: + tag = x.Tag() + default: + tag = "latest" + } + return tag +} diff --git a/vendor/github.com/moby/moby/api/types/reference/image_reference_test.go b/vendor/github.com/moby/moby/api/types/reference/image_reference_test.go new file mode 100644 index 0000000..61fb676 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/reference/image_reference_test.go @@ -0,0 +1,72 @@ +package reference + +import ( + "testing" +) + +func TestParse(t *testing.T) { + testCases := []struct { + ref string + expectedName string + expectedTag string + expectedError bool + }{ + { + ref: "", + expectedName: "", + expectedTag: "", + expectedError: true, + }, + { + ref: "repository", + expectedName: "repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "repository:tag", + expectedName: "repository", + expectedTag: "tag", + expectedError: false, + }, + { + ref: "test.com/repository", + expectedName: "test.com/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/test/repository", + expectedName: "test.com:5000/test/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + { + ref: "test.com:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + } + + for _, c := range testCases { + name, tag, err := Parse(c.ref) + if err != nil && c.expectedError { + continue + } else if err != nil { + t.Fatalf("error with %s: %s", c.ref, err.Error()) + } + if name != c.expectedName { + t.Fatalf("expected name %s, got %s", c.expectedName, name) + } + if tag != c.expectedTag { + t.Fatalf("expected tag %s, got %s", c.expectedTag, tag) + } + } +} diff --git a/vendor/github.com/moby/moby/api/types/registry/authenticate.go b/vendor/github.com/moby/moby/api/types/registry/authenticate.go new file mode 100644 index 0000000..5e37d19 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/registry.go b/vendor/github.com/moby/moby/api/types/registry/registry.go new file mode 100644 index 0000000..28fafab --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/registry.go @@ -0,0 +1,104 @@ +package registry + +import ( + "encoding/json" + "net" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/moby/moby/api/types/seccomp.go b/vendor/github.com/moby/moby/api/types/seccomp.go new file mode 100644 index 0000000..4f02ef3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/seccomp.go @@ -0,0 +1,93 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent an specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/vendor/github.com/moby/moby/api/types/service_update_response.go b/vendor/github.com/moby/moby/api/types/service_update_response.go new file mode 100644 index 0000000..74ea64b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/stats.go b/vendor/github.com/moby/moby/api/types/stats.go new file mode 100644 index 0000000..7ca76a5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/strslice/strslice.go b/vendor/github.com/moby/moby/api/types/strslice/strslice.go new file mode 100644 index 0000000..bad493f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/moby/moby/api/types/strslice/strslice_test.go b/vendor/github.com/moby/moby/api/types/strslice/strslice_test.go new file mode 100644 index 0000000..1163b36 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/strslice/strslice_test.go @@ -0,0 +1,86 @@ +package strslice + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + for _, testcase := range []struct { + input StrSlice + expected string + }{ + // MADNESS(stevvooe): No clue why nil would be "" but empty would be + // "null". Had to make a change here that may affect compatibility. + {input: nil, expected: "null"}, + {StrSlice{}, "[]"}, + {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, + } { + data, err := json.Marshal(testcase.input) + if err != nil { + t.Fatal(err) + } + if string(data) != testcase.expected { + t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := StrSlice{"default", "values"} + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := []string(strs) + if !reflect.DeepEqual(actualParts, expectedParts) { + t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) + } + + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/common.go b/vendor/github.com/moby/moby/api/types/swarm/common.go new file mode 100644 index 0000000..64a648b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/common.go @@ -0,0 +1,27 @@ +package swarm + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` +} + +// Driver represents a driver (network, logging). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/container.go b/vendor/github.com/moby/moby/api/types/swarm/container.go new file mode 100644 index 0000000..4ab476c --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/container.go @@ -0,0 +1,46 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/network.go b/vendor/github.com/moby/moby/api/types/swarm/network.go new file mode 100644 index 0000000..5a5e11b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/network.go @@ -0,0 +1,111 @@ +package swarm + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/node.go b/vendor/github.com/moby/moby/api/types/swarm/node.go new file mode 100644 index 0000000..379e17a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/node.go @@ -0,0 +1,114 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/moby/moby/api/types/swarm/secret.go b/vendor/github.com/moby/moby/api/types/swarm/secret.go new file mode 100644 index 0000000..fdb2388 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/secret.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/service.go b/vendor/github.com/moby/moby/api/types/swarm/service.go new file mode 100644 index 0000000..2cf2642 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service.go @@ -0,0 +1,105 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt time.Time `json:",omitempty"` + CompletedAt time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/swarm.go b/vendor/github.com/moby/moby/api/types/swarm/swarm.go new file mode 100644 index 0000000..0b42219 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/swarm.go @@ -0,0 +1,197 @@ +package swarm + +import "time" + +// ClusterInfo represents info about the cluster for outputing in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + RemoteAddrs []string + JoinToken string // accept by secret +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int + Managers int + + Cluster ClusterInfo +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/task.go b/vendor/github.com/moby/moby/api/types/swarm/task.go new file mode 100644 index 0000000..ace12cc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/task.go @@ -0,0 +1,128 @@ +package swarm + +import "time" + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + ContainerSpec ContainerSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/time/duration_convert.go b/vendor/github.com/moby/moby/api/types/time/duration_convert.go new file mode 100644 index 0000000..63e1eec --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/vendor/github.com/moby/moby/api/types/time/duration_convert_test.go b/vendor/github.com/moby/moby/api/types/time/duration_convert_test.go new file mode 100644 index 0000000..869c08f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/duration_convert_test.go @@ -0,0 +1,26 @@ +package time + +import ( + "testing" + "time" +) + +func TestDurationToSecondsString(t *testing.T) { + cases := []struct { + in time.Duration + expected string + }{ + {0 * time.Second, "0"}, + {1 * time.Second, "1"}, + {1 * time.Minute, "60"}, + {24 * time.Hour, "86400"}, + } + + for _, c := range cases { + s := DurationToSecondsString(c.in) + if s != c.expected { + t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) + t.Fail() + } + } +} diff --git a/vendor/github.com/moby/moby/api/types/time/timestamp.go b/vendor/github.com/moby/moby/api/types/time/timestamp.go new file mode 100644 index 0000000..d3695ba --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseonds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/moby/moby/api/types/time/timestamp_test.go b/vendor/github.com/moby/moby/api/types/time/timestamp_test.go new file mode 100644 index 0000000..a165130 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/timestamp_test.go @@ -0,0 +1,93 @@ +package time + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now().In(time.UTC) + cases := []struct { + in, expected string + expectedErr bool + }{ + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, + {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, + {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, + {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, + {"2006-01-02T15:04:05", "1136214245.000000000", false}, + {"2006-01-02T15:04:0Z", "", true}, + {"2006-01-02T15:04:0", "", true}, + {"2006-01-02T15:04Z", "1136214240.000000000", false}, + {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04", "1136214240.000000000", false}, + {"2006-01-02T15:0Z", "", true}, + {"2006-01-02T15:0", "", true}, + {"2006-01-02T15Z", "1136214000.000000000", false}, + {"2006-01-02T15+00:00", "1136214000.000000000", false}, + {"2006-01-02T15-00:00", "1136214000.000000000", false}, + {"2006-01-02T15", "1136214000.000000000", false}, + {"2006-01-02T1Z", "1136163600.000000000", false}, + {"2006-01-02T1", "1136163600.000000000", false}, + {"2006-01-02TZ", "", true}, + {"2006-01-02T", "", true}, + {"2006-01-02+00:00", "1136160000.000000000", false}, + {"2006-01-02-00:00", "1136160000.000000000", false}, + {"2006-01-02-00:01", "1136160060.000000000", false}, + {"2006-01-02Z", "1136160000.000000000", false}, + {"2006-01-02", "1136160000.000000000", false}, + {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, + + // unix timestamps returned as is + {"1136073600", "1136073600", false}, + {"1136073600.000000001", "1136073600.000000001", false}, + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + + // String fallback + {"invalid", "invalid", false}, + } + + for _, c := range cases { + o, err := GetTimestamp(c.in, now) + if o != c.expected || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) + t.Fail() + } + } +} + +func TestParseTimestamps(t *testing.T) { + cases := []struct { + in string + def, expectedS, expectedN int64 + expectedErr bool + }{ + // unix timestamps + {"1136073600", 0, 1136073600, 0, false}, + {"1136073600.000000001", 0, 1136073600, 1, false}, + {"1136073600.0000000010", 0, 1136073600, 1, false}, + {"1136073600.00000001", 0, 1136073600, 10, false}, + {"foo.bar", 0, 0, 0, true}, + {"1136073600.bar", 0, 1136073600, 0, true}, + {"", -1, -1, 0, false}, + } + + for _, c := range cases { + s, n, err := ParseTimestamps(c.in, c.def) + if s != c.expectedS || + n != c.expectedN || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) + t.Fail() + } + } +} diff --git a/vendor/github.com/moby/moby/api/types/types.go b/vendor/github.com/moby/moby/api/types/types.go new file mode 100644 index 0000000..a82c3e8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/types.go @@ -0,0 +1,549 @@ +package types + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// ContainerChange contains response of Engine API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Engine API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Engine API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// ContainerProcessList contains response of Engine API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + Experimental bool +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit records a external tool actual commit id version along the +// one expect by dockerd as set at build time +type Commit struct { + ID string + Expected string +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + CheckDuplicate bool + Driver string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDelete + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/moby/moby/api/types/versions/README.md b/vendor/github.com/moby/moby/api/types/versions/README.md new file mode 100644 index 0000000..cdac50a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/moby/moby/api/types/versions/compare.go b/vendor/github.com/moby/moby/api/types/versions/compare.go new file mode 100644 index 0000000..611d4fe --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/moby/moby/api/types/versions/compare_test.go b/vendor/github.com/moby/moby/api/types/versions/compare_test.go new file mode 100644 index 0000000..c2b9686 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/compare_test.go @@ -0,0 +1,26 @@ +package versions + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := compare(a, b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) +} diff --git a/vendor/github.com/moby/moby/api/types/versions/v1p19/types.go b/vendor/github.com/moby/moby/api/types/versions/v1p19/types.go new file mode 100644 index 0000000..dc13150 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/moby/moby/api/types/versions/v1p20/types.go b/vendor/github.com/moby/moby/api/types/versions/v1p20/types.go new file mode 100644 index 0000000..94a06d7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/moby/moby/api/types/volume.go b/vendor/github.com/moby/moby/api/types/volume.go new file mode 100644 index 0000000..da4f8eb --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume.go @@ -0,0 +1,58 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData volume usage data +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. + // Required: true + RefCount int64 `json:"RefCount"` + + // The disk space used by the volume (local driver only) + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/volumes_create.go b/vendor/github.com/moby/moby/api/types/volume/volumes_create.go new file mode 100644 index 0000000..679c160 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/volumes_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// VolumesCreateBody volumes create body +// swagger:model VolumesCreateBody +type VolumesCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/volumes_list.go b/vendor/github.com/moby/moby/api/types/volume/volumes_list.go new file mode 100644 index 0000000..7770bcb --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/volumes_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumesListOKBody volumes list o k body +// swagger:model VolumesListOKBody +type VolumesListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/builder/builder.go b/vendor/github.com/moby/moby/builder/builder.go new file mode 100644 index 0000000..ced19e8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/builder.go @@ -0,0 +1,169 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + "os" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/image" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Context represents a file system tree. +type Context interface { + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Stat returns an entry corresponding to path if any. + // It is recommended to return an error if path was not found. + // If path is a symlink it also returns the path to the target file. + Stat(path string) (string, FileInfo, error) + // Open opens path from the context and returns a readable stream of it. + Open(path string) (io.ReadCloser, error) + // Walk walks the tree of the context with the function passed to it. + Walk(root string, walkFn WalkFunc) error +} + +// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). +type WalkFunc func(path string, fi FileInfo, err error) error + +// ModifiableContext represents a modifiable Context. +// TODO: remove this interface once we can get rid of Remove() +type ModifiableContext interface { + Context + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. +// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. +type FileInfo interface { + os.FileInfo + Path() string +} + +// PathFileInfo is a convenience struct that implements the FileInfo interface. +type PathFileInfo struct { + os.FileInfo + // FilePath holds the absolute path to the file. + FilePath string + // Name holds the basename for the file. + FileName string +} + +// Path returns the absolute path to the file. +func (fi PathFileInfo) Path() string { + return fi.FilePath +} + +// Name returns the basename of the file. +func (fi PathFileInfo) Name() string { + if fi.FileName != "" { + return fi.FileName + } + return fi.FileInfo.Name() +} + +// Hashed defines an extra method intended for implementations of os.FileInfo. +type Hashed interface { + // Hash returns the hash of a file. + Hash() string + SetHash(string) +} + +// HashedFileInfo is a convenient struct that augments FileInfo with a field. +type HashedFileInfo struct { + FileInfo + // FileHash represents the hash of a file. + FileHash string +} + +// Hash returns the hash of a file. +func (fi HashedFileInfo) Hash() string { + return fi.FileHash +} + +// SetHash sets the hash of a file. +func (fi *HashedFileInfo) SetHash(h string) { + fi.FileHash = h +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + // TODO: use digest reference instead of name + + // GetImageOnBuild looks up a Docker image referenced by `name`. + GetImageOnBuild(name string) (Image, error) + // TagImage tags an image with newTag + TagImageWithReference(image.ID, reference.Named) error + // PullOnBuild tells Docker to pull image referenced by `name`. + PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(containerID string, timeout time.Duration) (int, error) + // ContainerUpdateCmdOnBuild updates container.Path and container.Args + ContainerUpdateCmdOnBuild(containerID string, cmd []string) error + // ContainerCreateWorkdir creates the workdir (currently only used on Windows) + ContainerCreateWorkdir(containerID string) error + + // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container + // specified by a container object. + // TODO: make an Extract method instead of passing `decompress` + // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used + // with Context.Walk + // ContainerCopy(name string, res string) (io.ReadCloser, error) + // TODO: use copyBackend api + CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error + + // HasExperimental checks if the backend supports experimental features + HasExperimental() bool + + // SquashImage squashes the fs layers from the provided image down to the specified `to` image + SquashImage(from string, to string) (string, error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} diff --git a/vendor/github.com/moby/moby/builder/context.go b/vendor/github.com/moby/moby/builder/context.go new file mode 100644 index 0000000..600f423 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/context.go @@ -0,0 +1,260 @@ +package builder + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/gitutils" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// GetContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(archive.HeaderSize) + if err != nil && err != io.EOF { + return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + + if archive.IsArchive(magic) { + return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil + } + + // Input should be read as a Dockerfile. + tmpDir, err := ioutil.TempDir("", "docker-build-context-") + if err != nil { + return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) + } + + f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) + if err != nil { + return nil, "", err + } + _, err = io.Copy(f, buf) + if err != nil { + f.Close() + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + if err := r.Close(); err != nil { + return nil, "", err + } + + tar, err := archive.Tar(tmpDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(tmpDir) + return err + }), DefaultDockerfileName, nil + +} + +// GetContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", fmt.Errorf("unable to find 'git': %v", err) + } + if absContextDir, err = gitutils.Clone(gitURL); err != nil { + return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// GetContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := httputils.Download(remoteURL) + if err != nil { + return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// GetContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { + // When using a local context directory, when the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + return getDockerfileRelPath(localDir, dockerfileName) +} + +// getDockerfileRelPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory, the relative path of +// the dockerfile in that context directory, and a non-nil error on success. +func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = filepath.Abs(givenContextDir); err != nil { + return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) + } + + if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { + return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) + } + + if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) + } + + return absContextDir, relDockerfile, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} diff --git a/vendor/github.com/moby/moby/builder/context_test.go b/vendor/github.com/moby/moby/builder/context_test.go new file mode 100644 index 0000000..27d29d7 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/context_test.go @@ -0,0 +1,307 @@ +package builder + +import ( + "archive/tar" + "bytes" + "io" + "io/ioutil" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +var prepareEmpty = func(t *testing.T) (string, func()) { + return "", func() {} +} + +var prepareNoFiles = func(t *testing.T) (string, func()) { + return createTestTempDir(t, "", "builder-context-test") +} + +var prepareOneFile = func(t *testing.T) (string, func()) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + return contextDir, cleanup +} + +func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { + contextDir, cleanup := prepare(t) + defer cleanup() + + err := ValidateContextDirectory(contextDir, excludes) + + if err != nil { + t.Fatalf("Error should be nil, got: %s", err) + } +} + +func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + absContextDir, relDockerfile, err := GetContextFromLocalDir(fakePath, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, fakePath) + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { + contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") + defer dirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromLocalDirLocalFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } + +} + +func TestGetContextFromReaderString(t *testing.T) { + tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + _, err = tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + if err = tarArchive.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromReaderTar(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar: %s", err) + } + + tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + header, err := tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + if header.Name != DefaultDockerfileName { + t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + if err = tarArchive.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestValidateContextDirectoryEmptyContext(t *testing.T) { + // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. + // The test will ultimately end up calling filepath.Abs(""). On Windows, + // golang will error. On Linux, golang will return /. Due to there being + // drive letters on Windows, this is probably the correct behaviour for + // Windows. + if runtime.GOOS == "windows" { + t.Skip("Invalid test on Windows") + } + testValidateContextDirectory(t, prepareEmpty, []string{}) +} + +func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { + testValidateContextDirectory(t, prepareNoFiles, []string{}) +} + +func TestValidateContextDirectoryWithOneFile(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{}) +} + +func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) +} diff --git a/vendor/github.com/moby/moby/builder/context_unix.go b/vendor/github.com/moby/moby/builder/context_unix.go new file mode 100644 index 0000000..d1f72e0 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/context_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package builder + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/vendor/github.com/moby/moby/builder/context_windows.go b/vendor/github.com/moby/moby/builder/context_windows.go new file mode 100644 index 0000000..b8ba2ba --- /dev/null +++ b/vendor/github.com/moby/moby/builder/context_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package builder + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/bflag.go b/vendor/github.com/moby/moby/builder/dockerfile/bflag.go new file mode 100644 index 0000000..1e03693 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/bflag.go @@ -0,0 +1,176 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags returns the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) + } + + } + + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go b/vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go new file mode 100644 index 0000000..65cfcea --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go @@ -0,0 +1,187 @@ +package dockerfile + +import ( + "testing" +) + +func TestBuilderFlags(t *testing.T) { + var expected string + var err error + + // --- + + bf := NewBFlags() + bf.Args = []string{} + if err := bf.Parse(); err != nil { + t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + bf.Args = []string{"--"} + if err := bf.Parse(); err != nil { + t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + flStr1 := bf.AddString("str1", "") + flBool1 := bf.AddBool("bool1", false) + bf.Args = []string{} + if err = bf.Parse(); err != nil { + t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.IsUsed() == true { + t.Fatalf("Test3 - str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Test3 - bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "HI" { + t.Fatalf("Str1 was supposed to default to: HI") + } + if flBool1.IsTrue() { + t.Fatalf("Bool1 was supposed to default to: false") + } + if flStr1.IsUsed() == true { + t.Fatalf("Str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1="} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "BYE" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b1 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=true"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b2 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flBool1.IsTrue() { + t.Fatalf("Test-b3 Bool1 was supposed to be false") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool2"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1", "--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "BYE" { + t.Fatalf("Teset %s, str1 should be BYE", bf.Args) + } + if !flBool1.IsTrue() { + t.Fatalf("Teset %s, bool1 should be true", bf.Args) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/builder.go b/vendor/github.com/moby/moby/builder/dockerfile/builder.go new file mode 100644 index 0000000..da43513 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/builder.go @@ -0,0 +1,370 @@ +package dockerfile + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + perrors "github.com/pkg/errors" + "golang.org/x/net/context" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// BuiltinAllowedBuildArgs is list of built-in allowed build args +var BuiltinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Output io.Writer + + docker builder.Backend + context builder.Context + clientCtx context.Context + cancel context.CancelFunc + + dockerfile *parser.Node + runConfig *container.Config // runconfig for cmd, run, entrypoint etc. + flags *BFlags + tmpContainers map[string]struct{} + image string // imageID + noBaseImage bool + maintainer string + cmdSet bool + disableCommit bool + cacheBusted bool + allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. + directive parser.Directive + + // TODO: remove once docker.Commit can receive a tag + id string + + imageCache builder.ImageCache + from builder.Image +} + +// BuildManager implements builder.Backend and is shared across all Builder objects. +type BuildManager struct { + backend builder.Backend +} + +// NewBuildManager creates a BuildManager. +func NewBuildManager(b builder.Backend) (bm *BuildManager) { + return &BuildManager{backend: b} +} + +// BuildFromContext builds a new image from a given context. +func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) { + if buildOptions.Squash && !bm.backend.HasExperimental() { + return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode")) + } + buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc) + if err != nil { + return "", err + } + defer func() { + if err := buildContext.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + + if len(dockerfileName) > 0 { + buildOptions.Dockerfile = dockerfileName + } + b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext}, nil) + if err != nil { + return "", err + } + return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output) +} + +// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. +// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, +// will be read from the Context passed to Build(). +func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { + if config == nil { + config = new(types.ImageBuildOptions) + } + if config.BuildArgs == nil { + config.BuildArgs = make(map[string]*string) + } + ctx, cancel := context.WithCancel(clientCtx) + b = &Builder{ + clientCtx: ctx, + cancel: cancel, + options: config, + Stdout: os.Stdout, + Stderr: os.Stderr, + docker: backend, + context: buildContext, + runConfig: new(container.Config), + tmpContainers: map[string]struct{}{}, + id: stringid.GenerateNonCryptoID(), + allowedBuildArgs: make(map[string]bool), + directive: parser.Directive{ + EscapeSeen: false, + LookingForDirectives: true, + }, + } + if icb, ok := backend.(builder.ImageCacheBuilder); ok { + b.imageCache = icb.MakeImageCache(config.CacheFrom) + } + + parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape + + if dockerfile != nil { + b.dockerfile, err = parser.Parse(dockerfile, &b.directive) + if err != nil { + return nil, err + } + } + + return b, nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNamed(repo) + if err != nil { + return nil, err + } + + ref = reference.WithDefaultTag(ref) + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + if _, isTagged := ref.(reference.NamedTagged); !isTagged { + ref, err = reference.WithTag(ref, reference.DefaultTag) + if err != nil { + return nil, err + } + } + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} + +// build runs the Dockerfile builder from a context and a docker object that allows to make calls +// to Docker. +// +// This will (barring errors): +// +// * read the dockerfile from context +// * parse the dockerfile if not already parsed +// * walk the AST and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Tag image, if applicable. +// * Print a happy message and return the image ID. +// +func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) { + b.Stdout = stdout + b.Stderr = stderr + b.Output = out + + // If Dockerfile was not parsed yet, extract it from the Context + if b.dockerfile == nil { + if err := b.readDockerfile(); err != nil { + return "", err + } + } + + repoAndTags, err := sanitizeRepoAndTags(b.options.Tags) + if err != nil { + return "", err + } + + if len(b.options.Labels) > 0 { + line := "LABEL " + for k, v := range b.options.Labels { + line += fmt.Sprintf("%q='%s' ", k, v) + } + _, node, err := parser.ParseLine(line, &b.directive, false) + if err != nil { + return "", err + } + b.dockerfile.Children = append(b.dockerfile.Children, node) + } + + var shortImgID string + total := len(b.dockerfile.Children) + for _, n := range b.dockerfile.Children { + if err := b.checkDispatch(n, false); err != nil { + return "", err + } + } + + for i, n := range b.dockerfile.Children { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprintf(b.Stdout, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + if err := b.dispatch(i, total, n); err != nil { + if b.options.ForceRemove { + b.clearTmp() + } + return "", err + } + + shortImgID = stringid.TruncateID(b.image) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) + if b.options.Remove { + b.clearTmp() + } + } + + // check if there are any leftover build-args that were passed but not + // consumed during build. Return a warning, if there are any. + leftoverArgs := []string{} + for arg := range b.options.BuildArgs { + if !b.isBuildArgAllowed(arg) { + leftoverArgs = append(leftoverArgs, arg) + } + } + + if len(leftoverArgs) > 0 { + fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") + } + + if b.options.Squash { + var fromID string + if b.from != nil { + fromID = b.from.ImageID() + } + b.image, err = b.docker.SquashImage(b.image, fromID) + if err != nil { + return "", perrors.Wrap(err, "error squashing image") + } + } + + imageID := image.ID(b.image) + for _, rt := range repoAndTags { + if err := b.docker.TagImageWithReference(imageID, rt); err != nil { + return "", err + } + } + + fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) + return b.image, nil +} + +// Cancel cancels an ongoing Dockerfile build. +func (b *Builder) Cancel() { + b.cancel() +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + b, err := NewBuilder(context.Background(), nil, nil, nil, nil) + if err != nil { + return nil, err + } + + ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), &b.directive) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range ast.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b.runConfig = config + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + total := len(ast.Children) + for _, n := range ast.Children { + if err := b.checkDispatch(n, false); err != nil { + return nil, err + } + } + + for i, n := range ast.Children { + if err := b.dispatch(i, total, n); err != nil { + return nil, err + } + } + + return b.runConfig, nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go new file mode 100644 index 0000000..76a7ce7 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package dockerfile + +var defaultShell = []string{"/bin/sh", "-c"} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go new file mode 100644 index 0000000..37e9fbc --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go @@ -0,0 +1,3 @@ +package dockerfile + +var defaultShell = []string{"cmd", "/S", "/C"} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/command/command.go b/vendor/github.com/moby/moby/builder/dockerfile/command/command.go new file mode 100644 index 0000000..f23c687 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go new file mode 100644 index 0000000..3e78abd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go @@ -0,0 +1,821 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // TODO/FIXME/NOT USED + // Just here to show how to use the builder flags stuff within the + // context of a builder command. Will remove once we actually add + // a builder command to something! + /* + flBool1 := b.flags.AddBool("bool1", false) + flStr1 := b.flags.AddString("str1", "HI") + + if err := b.flags.Parse(); err != nil { + return err + } + + fmt.Printf("Bool1:%v\n", flBool1) + fmt.Printf("Str1:%v\n", flStr1) + */ + + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + + if len(args[j]) == 0 { + return errBlankCommandNames("ENV") + } + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + compareTo := args[j] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + compareFrom = strings.ToUpper(compareFrom) + compareTo = strings.ToUpper(compareTo) + } + if compareFrom == compareTo { + b.runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + b.runConfig.Env = append(b.runConfig.Env, newVar) + } + j++ + } + + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.maintainer = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + + if b.runConfig.Labels == nil { + b.runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + + if len(args[j]) == 0 { + return errBlankCommandNames("LABEL") + } + + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + b.runConfig.Labels[args[j]] = args[j+1] + j++ + } + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastTwoArguments("ADD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastTwoArguments("COPY") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("FROM") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + name := args[0] + + var ( + image builder.Image + err error + ) + + // Windows cannot support a container with no base image. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + return fmt.Errorf("Windows does not support FROM scratch") + } + b.image = "" + b.noBaseImage = true + } else { + // TODO: don't use `name`, instead resolve it to a digest + if !b.options.PullParent { + image, err = b.docker.GetImageOnBuild(name) + // TODO: shouldn't we error out if error is different from "not found" ? + } + if image == nil { + image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) + if err != nil { + return err + } + } + } + b.from = image + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + err := b.flags.Parse() + if err != nil { + return err + } + + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + b.runConfig.WorkingDir, err = normaliseWorkdir(b.runConfig.WorkingDir, args[0]) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if b.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + b.runConfig.Image = b.image + + cmd := b.runConfig.Cmd + comment := "WORKDIR " + b.runConfig.WorkingDir + // reset the command for cache detection + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) "+comment)) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + if err := b.docker.ContainerCreateWorkdir(container.ID); err != nil { + return err + } + + return b.commit(container.ID, cmd, comment) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + args = handleJSONArgs(args, attributes) + + if !attributes["json"] { + args = append(getShell(b.runConfig), args...) + } + config := &container.Config{ + Cmd: strslice.StrSlice(args), + Image: b.image, + } + + // stash the cmd + cmd := b.runConfig.Cmd + if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { + b.runConfig.Cmd = config.Cmd + } + + // stash the config environment + env := b.runConfig.Env + + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + defer func(env []string) { b.runConfig.Env = env }(env) + + // derive the net build-time environment for this run. We let config + // environment override the build time environment. + // This means that we take the b.buildArgs list of env vars and remove + // any of those variables that are defined as part of the container. In other + // words, anything in b.Config.Env. What's left is the list of build-time env + // vars that we need to add to each RUN command - note the list could be empty. + // + // We don't persist the build time environment with container's config + // environment, but just sort and prepend it to the command string at time + // of commit. + // This helps with tracing back the image's actual environment at the time + // of RUN, without leaking it to the final image. It also aids cache + // lookup for same image built with same build time environment. + cmdBuildEnv := []string{} + configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + if _, ok := configEnv[key]; !ok && val != nil { + cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, *val)) + } + } + + // derive the command to use for probeCache() and to commit in this container. + // Note that we only do this if there are any build-time env vars. Also, we + // use the special argument "|#" at the start of the args array. This will + // avoid conflicts with any RUN command since commands can not + // start with | (vertical bar). The "#" (number of build envs) is there to + // help ensure proper cache matches. We don't want a RUN command + // that starts with "foo=abc" to be considered part of a build-time env var. + saveCmd := config.Cmd + if len(cmdBuildEnv) > 0 { + sort.Strings(cmdBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) + saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) + } + + b.runConfig.Cmd = saveCmd + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + // set Cmd manually, this is special case only for Dockerfiles + b.runConfig.Cmd = config.Cmd + // set build-time environment for 'run'. + b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) + + cID, err := b.create() + if err != nil { + return err + } + + if err := b.run(cID); err != nil { + return err + } + + // revert to original config environment and set the command string to + // have the build-time env vars in it (if any) so that future cache look-ups + // properly match it. + b.runConfig.Env = env + b.runConfig.Cmd = saveCmd + return b.commit(cID, cmd, "run") +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + cmdSlice := handleJSONArgs(args, attributes) + + if !attributes["json"] { + cmdSlice = append(getShell(b.runConfig), cmdSlice...) + } + + b.runConfig.Cmd = strslice.StrSlice(cmdSlice) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// parseOptInterval(flag) is the duration of flag.Value, or 0 if +// empty. An error is reported if the value is given and is not positive. +func parseOptInterval(f *Flag) (time.Duration, error) { + s := f.Value + if s == "" { + return 0, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + if d <= 0 { + return 0, fmt.Errorf("Interval %#v must be positive", f.name) + } + return d, nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func healthcheck(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("HEALTHCHECK") + } + typ := strings.ToUpper(args[0]) + args = args[1:] + if typ == "NONE" { + if len(args) != 0 { + return fmt.Errorf("HEALTHCHECK NONE takes no arguments") + } + test := strslice.StrSlice{typ} + b.runConfig.Healthcheck = &container.HealthConfig{ + Test: test, + } + } else { + if b.runConfig.Healthcheck != nil { + oldCmd := b.runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(b.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + + healthcheck := container.HealthConfig{} + + flInterval := b.flags.AddString("interval", "") + flTimeout := b.flags.AddString("timeout", "") + flRetries := b.flags.AddString("retries", "") + + if err := b.flags.Parse(); err != nil { + return err + } + + switch typ { + case "CMD": + cmdSlice := handleJSONArgs(args, attributes) + if len(cmdSlice) == 0 { + return fmt.Errorf("Missing command after HEALTHCHECK CMD") + } + + if !attributes["json"] { + typ = "CMD-SHELL" + } + + healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) + default: + return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + } + + interval, err := parseOptInterval(flInterval) + if err != nil { + return err + } + healthcheck.Interval = interval + + timeout, err := parseOptInterval(flTimeout) + if err != nil { + return err + } + healthcheck.Timeout = timeout + + if flRetries.Value != "" { + retries, err := strconv.ParseInt(flRetries.Value, 10, 32) + if err != nil { + return err + } + if retries < 1 { + return fmt.Errorf("--retries must be at least 1 (not %d)", retries) + } + healthcheck.Retries = int(retries) + } else { + healthcheck.Retries = 0 + } + + b.runConfig.Healthcheck = &healthcheck + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("HEALTHCHECK %q", b.runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + parsed := handleJSONArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + b.runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + b.runConfig.Entrypoint = strslice.StrSlice(append(getShell(b.runConfig), parsed[0])) + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.runConfig.Cmd = nil + } + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.runConfig.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if len(args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.ExposedPorts == nil { + b.runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := b.runConfig.ExposedPorts[port]; !exists { + b.runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.runConfig.User = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.Volumes == nil { + b.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("VOLUME specified can not be an empty string") + } + b.runConfig.Volumes[v] = struct{}{} + } + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("STOPSIGNAL") + } + + sig := args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + b.runConfig.StopSignal = sig + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("ARG") + } + + var ( + name string + newValue string + hasDefault bool + ) + + arg := args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return errBlankCommandNames("ARG") + } + + name = parts[0] + newValue = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + // add the arg to allowed list of build-time args from this step on. + b.allowedBuildArgs[name] = true + + // If there is a default value associated with this arg then add it to the + // b.buildArgs if one is not already passed to the builder. The args passed + // to builder override the default value of 'arg'. Note that a 'nil' for + // a value means that the user specified "--build-arg FOO" and "FOO" wasn't + // defined as an env var - and in that case we DO want to use the default + // value specified in the ARG cmd. + if baValue, ok := b.options.BuildArgs[name]; (!ok || baValue == nil) && hasDefault { + b.options.BuildArgs[name] = &newValue + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func shell(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + shellSlice := handleJSONArgs(args, attributes) + switch { + case len(shellSlice) == 0: + // SHELL [] + return errAtLeastOneArgument("SHELL") + case attributes["json"]: + // SHELL ["powershell", "-command"] + b.runConfig.Shell = strslice.StrSlice(shellSlice) + default: + // SHELL powershell -command - not JSON + return errNotJSON("SHELL", original) + } + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("SHELL %v", shellSlice)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errAtLeastTwoArguments(command string) error { + return fmt.Errorf("%s requires at least two arguments", command) +} + +func errBlankCommandNames(command string) error { + return fmt.Errorf("%s names can not be blank", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config) []string { + if 0 == len(c.Shell) { + return defaultShell[:] + } + return c.Shell[:] +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go new file mode 100644 index 0000000..f7c57f7 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go @@ -0,0 +1,517 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +type commandWithFunction struct { + name string + function func(args []string) error +} + +func TestCommandsExactlyOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }}, + {"FROM", func(args []string) error { return from(nil, args, nil, "") }}, + {"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }}, + {"USER", func(args []string) error { return user(nil, args, nil, "") }}, + {"STOPSIGNAL", func(args []string) error { return stopSignal(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errExactlyOneArgument(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsAtLeastOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}, + {"ONBUILD", func(args []string) error { return onbuild(nil, args, nil, "") }}, + {"HEALTHCHECK", func(args []string) error { return healthcheck(nil, args, nil, "") }}, + {"EXPOSE", func(args []string) error { return expose(nil, args, nil, "") }}, + {"VOLUME", func(args []string) error { return volume(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errAtLeastOneArgument(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsAtLeastTwoArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ADD", func(args []string) error { return add(nil, args, nil, "") }}, + {"COPY", func(args []string) error { return dispatchCopy(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{"arg1"}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errAtLeastTwoArguments(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsTooManyArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{"arg1", "arg2", "arg3"}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errTooManyArguments(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandseBlankNames(t *testing.T) { + bflags := &BFlags{} + config := &container.Config{} + + b := &Builder{flags: bflags, runConfig: config, disableCommit: true} + + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(b, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(b, args, nil, "") }}, + } + + for _, command := range commands { + err := command.function([]string{"", ""}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errBlankCommandNames(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestEnv2Variables(t *testing.T) { + variables := []string{"var1", "val1", "var2", "val2"} + + bflags := &BFlags{} + config := &container.Config{} + + b := &Builder{flags: bflags, runConfig: config, disableCommit: true} + + if err := env(b, variables, nil, ""); err != nil { + t.Fatalf("Error when executing env: %s", err.Error()) + } + + expectedVar1 := fmt.Sprintf("%s=%s", variables[0], variables[1]) + expectedVar2 := fmt.Sprintf("%s=%s", variables[2], variables[3]) + + if b.runConfig.Env[0] != expectedVar1 { + t.Fatalf("Wrong env output for first variable. Got: %s. Should be: %s", b.runConfig.Env[0], expectedVar1) + } + + if b.runConfig.Env[1] != expectedVar2 { + t.Fatalf("Wrong env output for second variable. Got: %s, Should be: %s", b.runConfig.Env[1], expectedVar2) + } +} + +func TestMaintainer(t *testing.T) { + maintainerEntry := "Some Maintainer " + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := maintainer(b, []string{maintainerEntry}, nil, ""); err != nil { + t.Fatalf("Error when executing maintainer: %s", err.Error()) + } + + if b.maintainer != maintainerEntry { + t.Fatalf("Maintainer in builder should be set to %s. Got: %s", maintainerEntry, b.maintainer) + } +} + +func TestLabel(t *testing.T) { + labelName := "label" + labelValue := "value" + + labelEntry := []string{labelName, labelValue} + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := label(b, labelEntry, nil, ""); err != nil { + t.Fatalf("Error when executing label: %s", err.Error()) + } + + if val, ok := b.runConfig.Labels[labelName]; ok { + if val != labelValue { + t.Fatalf("Label %s should have value %s, had %s instead", labelName, labelValue, val) + } + } else { + t.Fatalf("Label %s should be present but it is not", labelName) + } +} + +func TestFrom(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := from(b, []string{"scratch"}, nil, "") + + if runtime.GOOS == "windows" { + if err == nil { + t.Fatalf("Error not set on Windows") + } + + expectedError := "Windows does not support FROM scratch" + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Error message not correct on Windows. Should be: %s, got: %s", expectedError, err.Error()) + } + } else { + if err != nil { + t.Fatalf("Error when executing from: %s", err.Error()) + } + + if b.image != "" { + t.Fatalf("Image shoule be empty, got: %s", b.image) + } + + if b.noBaseImage != true { + t.Fatalf("Image should not have any base image, got: %v", b.noBaseImage) + } + } +} + +func TestOnbuildIllegalTriggers(t *testing.T) { + triggers := []struct{ command, expectedError string }{ + {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, + {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, + {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} + + for _, trigger := range triggers { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := onbuild(b, []string{trigger.command}, nil, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if !strings.Contains(err.Error(), trigger.expectedError) { + t.Fatalf("Error message not correct. Should be: %s, got: %s", trigger.expectedError, err.Error()) + } + } +} + +func TestOnbuild(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + expectedOnbuild := "ADD . /app/src" + + if b.runConfig.OnBuild[0] != expectedOnbuild { + t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0]) + } +} + +func TestWorkdir(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + workingDir := "/app" + + if runtime.GOOS == "windows" { + workingDir = "C:\app" + } + + err := workdir(b, []string{workingDir}, nil, "") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.WorkingDir != workingDir { + t.Fatalf("WorkingDir should be set to %s, got %s", workingDir, b.runConfig.WorkingDir) + } + +} + +func TestCmd(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + command := "./executable" + + err := cmd(b, []string{command}, nil, "") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + var expectedCommand strslice.StrSlice + + if runtime.GOOS == "windows" { + expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) + } else { + expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) + } + + if !compareStrSlice(b.runConfig.Cmd, expectedCommand) { + t.Fatalf("Command should be set to %s, got %s", command, b.runConfig.Cmd) + } + + if !b.cmdSet { + t.Fatalf("Command should be marked as set") + } +} + +func compareStrSlice(slice1, slice2 strslice.StrSlice) bool { + if len(slice1) != len(slice2) { + return false + } + + for i := range slice1 { + if slice1[i] != slice2[i] { + return false + } + } + + return true +} + +func TestHealthcheckNone(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := healthcheck(b, []string{"NONE"}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Healthcheck == nil { + t.Fatal("Healthcheck should be set, got nil") + } + + expectedTest := strslice.StrSlice(append([]string{"NONE"})) + + if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { + t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) + } +} + +func TestHealthcheckCmd(t *testing.T) { + b := &Builder{flags: &BFlags{flags: make(map[string]*Flag)}, runConfig: &container.Config{}, disableCommit: true} + + if err := healthcheck(b, []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Healthcheck == nil { + t.Fatal("Healthcheck should be set, got nil") + } + + expectedTest := strslice.StrSlice(append([]string{"CMD-SHELL"}, "curl -f http://localhost/ || exit 1")) + + if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { + t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) + } +} + +func TestEntrypoint(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + entrypointCmd := "/usr/sbin/nginx" + + if err := entrypoint(b, []string{entrypointCmd}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Entrypoint == nil { + t.Fatalf("Entrypoint should be set") + } + + var expectedEntrypoint strslice.StrSlice + + if runtime.GOOS == "windows" { + expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) + } else { + expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) + } + + if !compareStrSlice(expectedEntrypoint, b.runConfig.Entrypoint) { + t.Fatalf("Entrypoint command should be set to %s, got %s", expectedEntrypoint, b.runConfig.Entrypoint) + } +} + +func TestExpose(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + exposedPort := "80" + + if err := expose(b, []string{exposedPort}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.ExposedPorts == nil { + t.Fatalf("ExposedPorts should be set") + } + + if len(b.runConfig.ExposedPorts) != 1 { + t.Fatalf("ExposedPorts should contain only 1 element. Got %s", b.runConfig.ExposedPorts) + } + + portsMapping, err := nat.ParsePortSpec(exposedPort) + + if err != nil { + t.Fatalf("Error when parsing port spec: %s", err.Error()) + } + + if _, ok := b.runConfig.ExposedPorts[portsMapping[0].Port]; !ok { + t.Fatalf("Port %s should be present. Got %s", exposedPort, b.runConfig.ExposedPorts) + } +} + +func TestUser(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + userCommand := "foo" + + if err := user(b, []string{userCommand}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.User != userCommand { + t.Fatalf("User should be set to %s, got %s", userCommand, b.runConfig.User) + } +} + +func TestVolume(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + exposedVolume := "/foo" + + if err := volume(b, []string{exposedVolume}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Volumes == nil { + t.Fatalf("Volumes should be set") + } + + if len(b.runConfig.Volumes) != 1 { + t.Fatalf("Volumes should contain only 1 element. Got %s", b.runConfig.Volumes) + } + + if _, ok := b.runConfig.Volumes[exposedVolume]; !ok { + t.Fatalf("Volume %s should be present. Got %s", exposedVolume, b.runConfig.Volumes) + } +} + +func TestStopSignal(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + signal := "SIGKILL" + + if err := stopSignal(b, []string{signal}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.StopSignal != signal { + t.Fatalf("StopSignal should be set to %s, got %s", signal, b.runConfig.StopSignal) + } +} + +func TestArg(t *testing.T) { + buildOptions := &types.ImageBuildOptions{BuildArgs: make(map[string]*string)} + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true, allowedBuildArgs: make(map[string]bool), options: buildOptions} + + argName := "foo" + argVal := "bar" + argDef := fmt.Sprintf("%s=%s", argName, argVal) + + if err := arg(b, []string{argDef}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + allowed, ok := b.allowedBuildArgs[argName] + + if !ok { + t.Fatalf("%s argument should be allowed as a build arg", argName) + } + + if !allowed { + t.Fatalf("%s argument was present in map but disallowed as a build arg", argName) + } + + val, ok := b.options.BuildArgs[argName] + + if !ok { + t.Fatalf("%s argument should be a build arg", argName) + } + + if *val != "bar" { + t.Fatalf("%s argument should have default value 'bar', got %s", argName, val) + } +} + +func TestShell(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + shellCmd := "powershell" + + attrs := make(map[string]bool) + attrs["json"] = true + + if err := shell(b, []string{shellCmd}, attrs, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Shell == nil { + t.Fatalf("Shell should be set") + } + + expectedShell := strslice.StrSlice([]string{shellCmd}) + + if !compareStrSlice(expectedShell, b.runConfig.Shell) { + t.Fatalf("Shell should be set to %s, got %s", expectedShell, b.runConfig.Shell) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 0000000..8b0dfc3 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" +) + +// normaliseWorkdir normalises a user requested working directory in a +// platform sematically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", fmt.Errorf("cannot normalise nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} + +func errNotJSON(command, _ string) error { + return fmt.Errorf("%s requires the arguments to be in JSON form", command) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go new file mode 100644 index 0000000..4aae6b4 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go @@ -0,0 +1,33 @@ +// +build !windows + +package dockerfile + +import ( + "testing" +) + +func TestNormaliseWorkdir(t *testing.T) { + testCases := []struct{ current, requested, expected, expectedError string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `foo`, `/foo`, ``}, + {``, `/foo`, `/foo`, ``}, + {`/foo`, `bar`, `/foo/bar`, ``}, + {`/foo`, `/bar`, `/bar`, ``}, + } + + for _, test := range testCases { + normalised, err := normaliseWorkdir(test.current, test.requested) + + if test.expectedError != "" && err == nil { + t.Fatalf("NormaliseWorkdir should return an error %s, got nil", test.expectedError) + } + + if test.expectedError != "" && err.Error() != test.expectedError { + t.Fatalf("NormaliseWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) + } + + if normalised != test.expected { + t.Fatalf("NormaliseWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalised) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 0000000..e890c3a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,86 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normaliseWorkdir normalises a user requested working directory in a +// platform sematically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", fmt.Errorf("cannot normalise nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} + +func errNotJSON(command, original string) error { + // For Windows users, give a hint if it looks like it might contain + // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], + // as JSON must be escaped. Unfortunate... + // + // Specifically looking for quote-driveletter-colon-backslash, there's no + // double backslash and a [] pair. No, this is not perfect, but it doesn't + // have to be. It's simply a hint to make life a little easier. + extra := "" + original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) + if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && + !strings.Contains(original, `\\`) && + strings.Contains(original, "[") && + strings.Contains(original, "]") { + extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) + } + return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go new file mode 100644 index 0000000..3319c06 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go @@ -0,0 +1,40 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseWorkdir(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `a`, `C:\a`, ``}, + {``, `c:\foo`, `C:\foo`, ``}, + {``, `c:\\foo`, `C:\foo`, ``}, + {``, `\foo`, `C:\foo`, ``}, + {``, `\\foo`, `C:\foo`, ``}, + {``, `/foo`, `C:\foo`, ``}, + {``, `C:/foo`, `C:\foo`, ``}, + {`C:\foo`, `bar`, `C:\foo\bar`, ``}, + {`C:\foo`, `/bar`, `C:\bar`, ``}, + {`C:\foo`, `\bar`, `C:\bar`, ``}, + } + for _, i := range tests { + r, e := normaliseWorkdir(i.current, i.requested) + + if i.etext != "" && e == nil { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) + } + + if i.etext != "" && e.Error() != i.etext { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) + } + + if r != i.expected { + t.Fatalf("TestNormaliseWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/envVarTest b/vendor/github.com/moby/moby/builder/dockerfile/envVarTest new file mode 100644 index 0000000..067dca9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/envVarTest @@ -0,0 +1,116 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | hello +A|he\'llo | he'llo +A|he\\'llo | he\llo +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | hello +A|"hello\" | hello" +A|"hel'lo" | hel'lo +A|'hello | hello +A|'hello\' | hello\ +A|"''" | '' +A|$. | $. +A|$1 | +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|he\${} | he${} +A|he\${}xx | he${}xx +A|he${} | he +A|he${}xx | hexx +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'녕'하세요 | 안녕하세요 +A|안'녕하세요 | 안녕하세요 +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | 안\녕하세요 +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | 안녕\t하세요 +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | 안녕하세요 +A|"안녕하세요\" | 안녕하세요" +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | 안녕하세요 +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | 안녕 +A|안녕${}xx | 안녕xx +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator.go new file mode 100644 index 0000000..f5997c9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator.go @@ -0,0 +1,244 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "fmt" + "strings" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + command.Add: add, + command.Arg: arg, + command.Cmd: cmd, + command.Copy: dispatchCopy, // copy() is a go builtin + command.Entrypoint: entrypoint, + command.Env: env, + command.Expose: expose, + command.From: from, + command.Healthcheck: healthcheck, + command.Label: label, + command.Maintainer: maintainer, + command.Onbuild: onbuild, + command.Run: run, + command.Shell: shell, + command.StopSignal: stopSignal, + command.User: user, + command.Volume: volume, + command.Workdir: workdir, + } +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, stepTotal int, ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + attrs := ast.Attributes + original := ast.Original + flags := ast.Flags + strList := []string{} + msg := fmt.Sprintf("Step %d/%d : %s", stepN+1, stepTotal, upperCasedCmd) + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + if cmd == "onbuild" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + strList = append(strList, ast.Value) + msg += " " + ast.Value + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + } + + // count the number of nodes that we are going to traverse first + // so we can pre-create the argument and message array. This speeds up the + // allocation of those list a lot when they have a lot of arguments + cursor := ast + var n int + for cursor.Next != nil { + cursor = cursor.Next + n++ + } + msgList := make([]string, n) + + var i int + // Append the build-time args to config-environment. + // This allows builder config to override the variables, making the behavior similar to + // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build + // context. But `ENV foo $foo` will use the value from build context if one + // isn't already been defined by a previous ENV primitive. + // Note, we get this behavior because we know that ProcessWord() will + // stop on the first occurrence of a variable name and not notice + // a subsequent one. So, putting the buildArgs list after the Config.Env + // list, in 'envs', is safe. + envs := b.runConfig.Env + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + envs = append(envs, fmt.Sprintf("%s=%s", key, *val)) + } + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if replaceEnvAllowed[cmd] { + var err error + var words []string + + if allowWordExpansion[cmd] { + words, err = ProcessWords(str, envs, b.directive.EscapeToken) + if err != nil { + return err + } + strList = append(strList, words...) + } else { + str, err = ProcessWord(str, envs, b.directive.EscapeToken) + if err != nil { + return err + } + strList = append(strList, str) + } + } else { + strList = append(strList, str) + } + msgList[i] = ast.Value + i++ + } + + msg += " " + strings.Join(msgList, " ") + fmt.Fprintln(b.Stdout, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + b.flags = NewBFlags() + b.flags.Args = flags + return f(b, strList, attrs, original) + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} + +// checkDispatch does a simple check for syntax errors of the Dockerfile. +// Because some of the instructions can only be validated through runtime, +// arg, env, etc., this syntax check will not be complete and could not replace +// the runtime check. Instead, this function is only a helper that allows +// user to find out the obvious error in Dockerfile earlier on. +// onbuild bool: indicate if instruction XXX is part of `ONBUILD XXX` trigger +func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + // The instruction itself is ONBUILD, we will make sure it follows with at + // least one argument + if upperCasedCmd == "ONBUILD" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + } + + // The instruction is part of ONBUILD trigger (not the instruction itself) + if onbuild { + switch upperCasedCmd { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) + } + } + + if _, ok := evaluateTable[cmd]; ok { + return nil + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go new file mode 100644 index 0000000..4340a2f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go @@ -0,0 +1,197 @@ +package dockerfile + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +type dispatchTestCase struct { + name, dockerfile, expectedError string + files map[string]string +} + +func init() { + reexec.Init() +} + +func initDispatchTestCases() []dispatchTestCase { + dispatchTestCases := []dispatchTestCase{{ + name: "copyEmptyWhitespace", + dockerfile: `COPY + quux \ + bar`, + expectedError: "COPY requires at least two arguments", + }, + { + name: "ONBUILD forbidden FROM", + dockerfile: "ONBUILD FROM scratch", + expectedError: "FROM isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ONBUILD forbidden MAINTAINER", + dockerfile: "ONBUILD MAINTAINER docker.io", + expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ARG two arguments", + dockerfile: "ARG foo bar", + expectedError: "ARG requires exactly one argument", + files: nil, + }, + { + name: "MAINTAINER unknown flag", + dockerfile: "MAINTAINER --boo joe@example.com", + expectedError: "Unknown flag: boo", + files: nil, + }, + { + name: "ADD multiple files to file", + dockerfile: "ADD file1.txt file2.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON ADD multiple files to file", + dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard ADD multiple files to file", + dockerfile: "ADD file*.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard JSON ADD multiple files to file", + dockerfile: `ADD ["file*.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file", + dockerfile: "COPY file1.txt file2.txt test", + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON COPY multiple files to file", + dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "ADD multiple files to file with whitespace", + dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file with whitespace", + dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY wildcard no files", + dockerfile: `COPY file*.txt /tmp/`, + expectedError: "No source files were specified", + files: nil, + }, + { + name: "COPY url", + dockerfile: `COPY https://index.docker.io/robots.txt /`, + expectedError: "Source can't be a URL for COPY", + files: nil, + }, + { + name: "Chaining ONBUILD", + dockerfile: `ONBUILD ONBUILD RUN touch foobar`, + expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + files: nil, + }, + { + name: "Invalid instruction", + dockerfile: `foo bar`, + expectedError: "Unknown instruction: FOO", + files: nil, + }} + + return dispatchTestCases +} + +func TestDispatch(t *testing.T) { + testCases := initDispatchTestCases() + + for _, testCase := range testCases { + executeTestCase(t, testCase) + } +} + +func executeTestCase(t *testing.T, testCase dispatchTestCase) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + for filename, content := range testCase.files { + createTestTempFile(t, contextDir, filename, content, 0777) + } + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := builder.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + r := strings.NewReader(testCase.dockerfile) + d := parser.Directive{} + parser.SetEscapeToken(parser.DefaultEscapeToken, &d) + n, err := parser.Parse(r, &d) + + if err != nil { + t.Fatalf("Error when parsing Dockerfile: %s", err) + } + + config := &container.Config{} + options := &types.ImageBuildOptions{} + + b := &Builder{runConfig: config, options: options, Stdout: ioutil.Discard, context: context} + + err = b.dispatch(0, len(n.Children), n.Children[0]) + + if err == nil { + t.Fatalf("No error when executing test %s", testCase.name) + } + + if !strings.Contains(err.Error(), testCase.expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) + } + +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go new file mode 100644 index 0000000..28fd5b1 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package dockerfile + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go new file mode 100644 index 0000000..72483a2 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go @@ -0,0 +1,13 @@ +package dockerfile + +import "fmt" + +// platformSupports is gives users a quality error message if a Dockerfile uses +// a command not supported on the platform. +func platformSupports(command string) error { + switch command { + case "stopsignal": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals.go b/vendor/github.com/moby/moby/builder/dockerfile/internals.go new file mode 100644 index 0000000..6f0a367 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals.go @@ -0,0 +1,669 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/runconfig/opts" +) + +func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { + if b.disableCommit { + return nil + } + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.runConfig.Image = b.image + + if id == "" { + cmd := b.runConfig.Cmd + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } else if hit { + return nil + } + id, err = b.create() + if err != nil { + return err + } + } + + // Note: Actually copy the struct + autoConfig := *b.runConfig + autoConfig.Cmd = autoCmd + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Author: b.maintainer, + Pause: true, + Config: &autoConfig, + }, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + b.image = imageID + return nil +} + +type copyInfo struct { + builder.FileInfo + decompress bool +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + b.runConfig.Image = b.image + + var infos []copyInfo + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + var err error + for _, orig := range args[0 : len(args)-1] { + var fi builder.FileInfo + decompress := allowLocalDecompression + if urlutil.IsURL(orig) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + fi, err = b.download(orig) + if err != nil { + return err + } + defer os.RemoveAll(filepath.Dir(fi.Path())) + decompress = false + infos = append(infos, copyInfo{fi, decompress}) + continue + } + // not a URL + subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) + if err != nil { + return err + } + + infos = append(infos, subInfos...) + } + + if len(infos) == 0 { + return fmt.Errorf("No source files were specified") + } + if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one info then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(infos) == 1 { + fi := infos[0].FileInfo + origPaths = fi.Name() + if hfi, ok := fi.(builder.Hashed); ok { + srcHash = hfi.Hash() + } + } else { + var hashs []string + var origs []string + for _, info := range infos { + fi := info.FileInfo + origs = append(origs, fi.Name()) + if hfi, ok := fi.(builder.Hashed); ok { + hashs = append(hashs, hfi.Hash()) + } + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.runConfig.Cmd + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) + + // Twiddle the destination when its a relative path - meaning, make it + // relative to the WORKINGDIR + if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { + return err + } + + for _, info := range infos { + if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { + return err + } + } + + return b.commit(container.ID, cmd, comment) +} + +func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { + // get filename from URL + u, err := url.Parse(srcURL) + if err != nil { + return + } + path := filepath.FromSlash(u.Path) // Ensure in platform semantics + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + err = fmt.Errorf("cannot determine filename from url: %s", u) + return + } + + // Initiate the download + resp, err := httputils.Download(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) + progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + fmt.Fprintln(b.Stdout) + // ignoring error because the file was already opened successfully + tmpFileSt, err := tmpFile.Stat() + if err != nil { + tmpFile.Close() + return + } + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return + } + hash := tarSum.Sum(nil) + r.Close() + return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil +} + +func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + var copyInfos []copyInfo + if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + if info.Name() == "" { + // Why are we doing this check? + return nil + } + if match, _ := filepath.Match(origPath, path); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil + } + + // Must be a dir or a file + + statPath, fi, err := b.context.Stat(origPath) + if err != nil { + return nil, err + } + + copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} + + hfi, handleHash := fi.(builder.Hashed) + if !handleHash { + return copyInfos, nil + } + + // Deal with the single file case + if !fi.IsDir() { + hfi.SetHash("file:" + hfi.Hash()) + return copyInfos, nil + } + // Must be a dir + var subfiles []string + err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + // we already checked handleHash above + subfiles = append(subfiles, info.(builder.Hashed).Hash()) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) + + return copyInfos, nil +} + +func (b *Builder) processImageFrom(img builder.Image) error { + if img != nil { + b.image = img.ImageID() + + if img.RunConfig() != nil { + b.runConfig = img.RunConfig() + } + } + + // Check to see if we have a default PATH, note that windows won't + // have one as its set by HCS + if system.DefaultPathEnv != "" { + // Convert the slice of strings that represent the current list + // of env vars into a map so we can see if PATH is already set. + // If its not set then go ahead and give it our default value + configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) + if _, ok := configEnv["PATH"]; !ok { + b.runConfig.Env = append(b.runConfig.Env, + "PATH="+system.DefaultPathEnv) + } + } + + if img == nil { + // Typically this means they used "FROM scratch" + return nil + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := b.runConfig.OnBuild + b.runConfig.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step), &b.directive) + if err != nil { + return err + } + + total := len(ast.Children) + for _, n := range ast.Children { + if err := b.checkDispatch(n, true); err != nil { + return err + } + } + for i, n := range ast.Children { + if err := b.dispatch(i, total, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks if cache match can be found for current build instruction. +// If an image is found, probeCache returns `(true, nil)`. +// If no image is found, it returns `(false, nil)`. +// If there is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + c := b.imageCache + if c == nil || b.options.NoCache || b.cacheBusted { + return false, nil + } + cache, err := c.GetCache(b.image, b.runConfig) + if err != nil { + return false, err + } + if len(cache) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) + b.cacheBusted = true + return false, nil + } + + fmt.Fprintf(b.Stdout, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) + b.image = string(cache) + + return true, nil +} + +func (b *Builder) create() (string, error) { + if b.image == "" && !b.noBaseImage { + return "", fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.runConfig.Image = b.image + + resources := container.Resources{ + CgroupParent: b.options.CgroupParent, + CPUShares: b.options.CPUShares, + CPUPeriod: b.options.CPUPeriod, + CPUQuota: b.options.CPUQuota, + CpusetCpus: b.options.CPUSetCPUs, + CpusetMems: b.options.CPUSetMems, + Memory: b.options.Memory, + MemorySwap: b.options.MemorySwap, + Ulimits: b.options.Ulimits, + } + + // TODO: why not embed a hostconfig in builder? + hostConfig := &container.HostConfig{ + SecurityOpt: b.options.SecurityOpt, + Isolation: b.options.Isolation, + ShmSize: b.options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(b.options.NetworkMode), + } + + config := *b.runConfig + + // Create the container + c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: b.runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return "", err + } + for _, warning := range c.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { + return "", err + } + + return c.ID, nil +} + +var errCancelled = errors.New("build cancelled") + +func (b *Builder) run(cID string) (err error) { + errCh := make(chan error) + go func() { + errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) + }() + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-b.clientCtx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + b.docker.ContainerKill(cID, 0) + b.removeContainer(cID) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v", + cancelErr, err) + } + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v", + cancelErr, err) + } + return err + } + + if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", + cancelErr, ret) + } + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), + Code: ret, + } + } + close(finished) + return <-cancelErrCh +} + +func (b *Builder) removeContainer(c string) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.docker.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return err + } + return nil +} + +func (b *Builder) clearTmp() { + for c := range b.tmpContainers { + if err := b.removeContainer(c); err != nil { + return + } + delete(b.tmpContainers, c) + fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} + +// readDockerfile reads a Dockerfile from the current context. +func (b *Builder) readDockerfile() error { + // If no -f was specified then look for 'Dockerfile'. If we can't find + // that then look for 'dockerfile'. If neither are found then default + // back to 'Dockerfile' and use that in the error message. + if b.options.Dockerfile == "" { + b.options.Dockerfile = builder.DefaultDockerfileName + if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { + lowercase := strings.ToLower(b.options.Dockerfile) + if _, _, err := b.context.Stat(lowercase); err == nil { + b.options.Dockerfile = lowercase + } + } + } + + err := b.parseDockerfile() + + if err != nil { + return err + } + + // After the Dockerfile has been parsed, we need to check the .dockerignore + // file for either "Dockerfile" or ".dockerignore", and if either are + // present then erase them from the build context. These files should never + // have been sent from the client but we did send them to make sure that + // we had the Dockerfile to actually parse, and then we also need the + // .dockerignore file to know whether either file should be removed. + // Note that this assumes the Dockerfile has been read into memory and + // is now safe to be removed. + if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { + dockerIgnore.Process([]string{b.options.Dockerfile}) + } + return nil +} + +func (b *Builder) parseDockerfile() error { + f, err := b.context.Open(b.options.Dockerfile) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) + } + return err + } + defer f.Close() + if f, ok := f.(*os.File); ok { + // ignoring error because Open already succeeded + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) + } + } + b.dockerfile, err = parser.Parse(f, &b.directive) + if err != nil { + return err + } + + return nil +} + +// determine if build arg is part of built-in args or user +// defined args in Dockerfile at any point in time. +func (b *Builder) isBuildArgAllowed(arg string) bool { + if _, ok := BuiltinAllowedBuildArgs[arg]; ok { + return true + } + if _, ok := b.allowedBuildArgs[arg]; ok { + return true + } + return false +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_test.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_test.go new file mode 100644 index 0000000..d170d8e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_test.go @@ -0,0 +1,95 @@ +package dockerfile + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" +) + +func TestEmptyDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) + + readAndCheckDockerfile(t, "emptyDockefile", contextDir, "", "The Dockerfile (Dockerfile) cannot be empty") +} + +func TestSymlinkDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd") + + // The reason the error is "Cannot locate specified Dockerfile" is because + // in the builder, the symlink is resolved within the context, therefore + // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is + // a nonexistent file. + expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName) + + readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError) +} + +func TestDockerfileOutsideTheBuildContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Forbidden path outside the build context" + + readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) +} + +func TestNonExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Cannot locate specified Dockerfile: Dockerfile" + + readAndCheckDockerfile(t, "NonExistingDockerfile", contextDir, "Dockerfile", expectedError) +} + +func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := builder.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + options := &types.ImageBuildOptions{ + Dockerfile: dockerfilePath, + } + + b := &Builder{options: options, context: context} + + err = b.readDockerfile() + + if err == nil { + t.Fatalf("No error when executing test: %s", testName) + } + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go new file mode 100644 index 0000000..a8a47c3 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go @@ -0,0 +1,38 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) + if !system.IsAbs(requested) { + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go new file mode 100644 index 0000000..f60b112 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go @@ -0,0 +1,66 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go new file mode 100644 index 0000000..868a667 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go @@ -0,0 +1,51 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseDest(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, + {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, + {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, + {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, + {``, `D`, `D`, ``}, + {``, `./a1`, `.\a1`, ``}, + {``, `.\b1`, `.\b1`, ``}, + {``, `/`, `\`, ``}, + {``, `\`, `\`, ``}, + {``, `c:/`, `\`, ``}, + {``, `c:\`, `\`, ``}, + {``, `.`, `.`, ``}, + {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, + {`C:\wde`, `.\b1`, `\wde\b1`, ``}, + {`C:\wdf`, `/`, `\`, ``}, + {`C:\wdg`, `\`, `\`, ``}, + {`C:\wdh`, `c:/`, `\`, ``}, + {`C:\wdi`, `c:\`, `\`, ``}, + {`C:\wdj`, `.`, `\wdj`, ``}, + {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, + {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, + {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, + {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, + } + for _, i := range tests { + got, err := normaliseDest("TEST", i.current, i.requested) + if err != nil && i.etext == "" { + t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) + } + if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { + if err == nil { + t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) + } else { + t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) + } + } + if i.etext == "" && got != i.expected { + t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go new file mode 100644 index 0000000..fff3046 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/dockerfile/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + defer f.Close() + + d := parser.Directive{LookingForDirectives: true} + parser.SetEscapeToken(parser.DefaultEscapeToken, &d) + + ast, err := parser.Parse(f, &d) + if err != nil { + panic(err) + } else { + fmt.Println(ast.Dump()) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go new file mode 100644 index 0000000..60d74d9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go @@ -0,0 +1,61 @@ +package parser + +import ( + "testing" +) + +var invalidJSONArraysOfStrings = []string{ + `["a",42,"b"]`, + `["a",123.456,"b"]`, + `["a",{},"b"]`, + `["a",{"c": "d"},"b"]`, + `["a",["c"],"b"]`, + `["a",true,"b"]`, + `["a",false,"b"]`, + `["a",null,"b"]`, +} + +var validJSONArraysOfStrings = map[string][]string{ + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + ` [ "a", "b" ] `: {"a", "b"}, + `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, +} + +func TestJSONArraysOfStrings(t *testing.T) { + for json, expected := range validJSONArraysOfStrings { + d := Directive{} + SetEscapeToken(DefaultEscapeToken, &d) + + if node, _, err := parseJSON(json, &d); err != nil { + t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) + } else { + i := 0 + for node != nil { + if i >= len(expected) { + t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) + } + if node.Value != expected[i] { + t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) + } + node = node.Next + i++ + } + if i != len(expected) { + t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) + } + } + } + for _, json := range invalidJSONArraysOfStrings { + d := Directive{} + SetEscapeToken(DefaultEscapeToken, &d) + + if _, _, err := parseJSON(json, &d); err != errDockerfileNotStringArray { + t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 0000000..d2bf2b0 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,361 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + _, child, err := ParseLine(rest, d, false) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *Directive) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.EscapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.EscapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *Directive) (*Node, map[string]bool, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var rootnode *Node + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := tokenWhitespace.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf(key + " must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } + + return rootnode, nil, nil +} + +func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { + return parseNameVal(rest, "ENV", d) +} + +func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { + return parseNameVal(rest, "LABEL", d) +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go new file mode 100644 index 0000000..e534644 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go @@ -0,0 +1,221 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends +} + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + EscapeToken rune // Current escape token + LineContinuationRegex *regexp.Regexp // Current line contination regex + LookingForDirectives bool // Whether we are currently looking for directives + EscapeSeen bool // Whether the escape directive has been seen +} + +var ( + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = "\\" + +// SetEscapeToken sets the default token for escaping characters in a Dockerfile. +func SetEscapeToken(s string, d *Directive) error { + if s != "`" && s != "\\" { + return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) + } + d.EscapeToken = rune(s[0]) + d.LineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + return nil +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseString, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// ParseLine parses a line and returns the remainder. +func ParseLine(line string, d *Directive, ignoreCont bool) (string, *Node, error) { + // Handle the parser directive '# escape=. Parser directives must precede + // any builder instruction or other comments, and cannot be repeated. + if d.LookingForDirectives { + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) > 0 { + if d.EscapeSeen == true { + return "", nil, fmt.Errorf("only one escape parser directive can be used") + } + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + if err := SetEscapeToken(tecMatch[i], d); err != nil { + return "", nil, err + } + d.EscapeSeen = true + return "", nil, nil + } + } + } + } + + d.LookingForDirectives = false + + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if !ignoreCont && d.LineContinuationRegex.MatchString(line) { + line = d.LineContinuationRegex.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, flags, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args, d) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + node.Original = line + node.Flags = flags + + return "", node, nil +} + +// Parse is the main parse routine. +// It handles an io.ReadWriteCloser and returns the root of the AST. +func Parse(rwc io.Reader, d *Directive) (*Node, error) { + currentLine := 0 + root := &Node{} + root.StartLine = -1 + scanner := bufio.NewScanner(rwc) + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + currentLine++ + line, child, err := ParseLine(scannedLine, d, false) + if err != nil { + return nil, err + } + startLine := currentLine + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + currentLine++ + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = ParseLine(line+newline, d, false) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + if child == nil && line != "" { + // When we call ParseLine we'll pass in 'true' for + // the ignoreCont param if we're at the EOF. This will + // prevent the func from returning immediately w/o + // parsing the line thinking that there's more input + // to come. + + _, child, err = ParseLine(line, d, scanner.Err() == nil) + if err != nil { + return nil, err + } + } + } + + if child != nil { + // Update the line information for the current child. + child.StartLine = startLine + child.EndLine = currentLine + // Update the line information for the root. The starting line of the root is always the + // starting line of the first child and the ending line is the ending line of the last child. + if root.StartLine < 0 { + root.StartLine = currentLine + } + root.EndLine = currentLine + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go new file mode 100644 index 0000000..e8e2696 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go @@ -0,0 +1,173 @@ +package parser + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" +const testFileLineInfo = "testfile-line/Dockerfile" + +func getDirs(t *testing.T, dir string) []string { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + _, err = Parse(df, &d) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s", dir) + } + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir, "Dockerfile") + resultfile := filepath.Join(testDir, dir, "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + ast, err := Parse(df, &d) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) + } + + content, err := ioutil.ReadFile(resultfile) + if err != nil { + t.Fatalf("Error reading %s's result file: %v", dir, err) + } + + if runtime.GOOS == "windows" { + // CRLF --> CR to match Unix behavior + content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) + } + + if ast.Dump()+"\n" != string(content) { + fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) + fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) + t.Fatalf("%s: AST dump of dockerfile does not match result", dir) + } + } +} + +func TestParseWords(t *testing.T) { + tests := []map[string][]string{ + { + "input": {"foo"}, + "expect": {"foo"}, + }, + { + "input": {"foo bar"}, + "expect": {"foo", "bar"}, + }, + { + "input": {"foo\\ bar"}, + "expect": {"foo\\ bar"}, + }, + { + "input": {"foo=bar"}, + "expect": {"foo=bar"}, + }, + { + "input": {"foo bar 'abc xyz'"}, + "expect": {"foo", "bar", "'abc xyz'"}, + }, + { + "input": {`foo bar "abc xyz"`}, + "expect": {"foo", "bar", `"abc xyz"`}, + }, + { + "input": {"àöû"}, + "expect": {"àöû"}, + }, + { + "input": {`föo bàr "âbc xÿz"`}, + "expect": {"föo", "bàr", `"âbc xÿz"`}, + }, + } + + for _, test := range tests { + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + words := parseWords(test["input"][0], &d) + if len(words) != len(test["expect"]) { + t.Fatalf("length check failed. input: %v, expect: %q, output: %q", test["input"][0], test["expect"], words) + } + for i, word := range words { + if word != test["expect"][i] { + t.Fatalf("word check failed for word: %q. input: %q, expect: %q, output: %q", word, test["input"][0], test["expect"], words) + } + } + } +} + +func TestLineInformation(t *testing.T) { + df, err := os.Open(testFileLineInfo) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + ast, err := Parse(df, &d) + if err != nil { + t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) + } + + if ast.StartLine != 5 || ast.EndLine != 31 { + fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine) + t.Fatalf("Root line information doesn't match result.") + } + if len(ast.Children) != 3 { + fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) + t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo) + } + expected := [][]int{ + {5, 5}, + {11, 12}, + {17, 31}, + } + for i, child := range ast.Children { + if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { + t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", + i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) + t.Fatalf("Root line information doesn't match result.") + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile new file mode 100644 index 0000000..c7601c9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile @@ -0,0 +1,35 @@ +# ESCAPE=\ + + + +FROM brimstone/ubuntu:14.04 + + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + + +ENV GOPATH \ +/go + + + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + + + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH + + + + diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile new file mode 100644 index 0000000..1d65578 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 0000000..d1be459 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile new file mode 100644 index 0000000..00b444c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:14.04 +MAINTAINER Seongyeol Lim + +COPY . /go/src/github.com/docker/docker +ADD . / +ADD null / +COPY nullfile /tmp +ADD [ "vimrc", "/tmp" ] +COPY [ "bashrc", "/tmp" ] +COPY [ "test file", "/tmp" ] +ADD [ "test file", "/tmp/test file" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result new file mode 100644 index 0000000..85aee64 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(maintainer "Seongyeol Lim ") +(copy "." "/go/src/github.com/docker/docker") +(add "." "/") +(add "null" "/") +(copy "nullfile" "/tmp") +(add "vimrc" "/tmp") +(copy "bashrc" "/tmp") +(copy "test file" "/tmp") +(add "test file" "/tmp/test file") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 0000000..0364ef9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,26 @@ +#escape=\ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 0000000..227f748 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 0000000..25ae352 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 0000000..16492e5 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 0000000..42b324e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result new file mode 100644 index 0000000..268ae07 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 0000000..8ccb71a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 0000000..25dd3dd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile new file mode 100644 index 0000000..99fbe55 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile @@ -0,0 +1,103 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result new file mode 100644 index 0000000..d032f9b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result @@ -0,0 +1,24 @@ +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile new file mode 100644 index 0000000..08fa18a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result new file mode 100644 index 0000000..ba0a6dd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result @@ -0,0 +1,16 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile new file mode 100644 index 0000000..6def7ef --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile @@ -0,0 +1,9 @@ +# Comment here. Should not be looking for the following parser directive. +# Hence the following line will be ignored, and the subsequent backslash +# continuation will be the default. +# escape = ` + +FROM image +MAINTAINER foo@bar.com +ENV GOPATH \ +\go \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result new file mode 100644 index 0000000..21522a8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile new file mode 100644 index 0000000..08a8cc4 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile @@ -0,0 +1,7 @@ +# escape = `` +# There is no white space line after the directives. This still succeeds, but goes +# against best practices. +FROM image +MAINTAINER foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result new file mode 100644 index 0000000..21522a8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile new file mode 100644 index 0000000..ef30414 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile @@ -0,0 +1,6 @@ +#escape = ` + +FROM image +MAINTAINER foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result new file mode 100644 index 0000000..21522a8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile new file mode 100644 index 0000000..1ffb17e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result new file mode 100644 index 0000000..13e409c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile new file mode 100644 index 0000000..2418e0f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile @@ -0,0 +1,10 @@ +FROM scratch +COPY foo /tmp/ +COPY --user=me foo /tmp/ +COPY --doit=true foo /tmp/ +COPY --user=me --doit=true foo /tmp/ +COPY --doit=true -- foo /tmp/ +COPY -- foo /tmp/ +CMD --doit [ "a", "b" ] +CMD --doit=true -- [ "a", "b" ] +CMD --doit -- [ ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result new file mode 100644 index 0000000..4578f4c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result @@ -0,0 +1,10 @@ +(from "scratch") +(copy "foo" "/tmp/") +(copy ["--user=me"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy ["--user=me" "--doit=true"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy "foo" "/tmp/") +(cmd ["--doit"] "a" "b") +(cmd ["--doit=true"] "a" "b") +(cmd ["--doit"]) diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile new file mode 100644 index 0000000..081e442 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile @@ -0,0 +1,10 @@ +FROM debian +ADD check.sh main.sh /app/ +CMD /app/main.sh +HEALTHCHECK +HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ + CMD /app/check.sh --quiet +HEALTHCHECK CMD +HEALTHCHECK CMD a b +HEALTHCHECK --timeout=3s CMD ["foo"] +HEALTHCHECK CONNECT TCP 7000 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result new file mode 100644 index 0000000..092924f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result @@ -0,0 +1,9 @@ +(from "debian") +(add "check.sh" "main.sh" "/app/") +(cmd "/app/main.sh") +(healthcheck) +(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") +(healthcheck "CMD") +(healthcheck "CMD" "a b") +(healthcheck ["--timeout=3s"] "CMD" "foo") +(healthcheck "CONNECT" "TCP 7000") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 0000000..587fb9b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result new file mode 100644 index 0000000..0998e87 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 0000000..39fe27d --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 0000000..afc220c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 0000000..eaae081 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 0000000..484804e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 0000000..c3ac63c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 0000000..6147891 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 0000000..5fd4afa --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 0000000..1ffbb8f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 0000000..30cc4bb --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 0000000..3204814 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile new file mode 100644 index 0000000..a586917 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile @@ -0,0 +1,8 @@ +CMD [] +CMD [""] +CMD ["a"] +CMD ["a","b"] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result new file mode 100644 index 0000000..c6553e6 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result @@ -0,0 +1,8 @@ +(cmd) +(cmd "") +(cmd "a") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 0000000..35f9c24 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 0000000..b5ac6fe --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 0000000..188395f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 0000000..6f7d57a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile new file mode 100644 index 0000000..f64c116 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result new file mode 100644 index 0000000..a0efcf0 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 0000000..57bb597 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result new file mode 100644 index 0000000..18dbdee --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile new file mode 100644 index 0000000..5b9ec06 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result new file mode 100644 index 0000000..a0036a9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile new file mode 100644 index 0000000..bf8368e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result new file mode 100644 index 0000000..56ddb6f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile new file mode 100644 index 0000000..72b79bd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result new file mode 100644 index 0000000..d4f94cd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile new file mode 100644 index 0000000..4842088 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result new file mode 100644 index 0000000..c3abb4c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile new file mode 100644 index 0000000..3a4da6e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result new file mode 100644 index 0000000..5493b25 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/utils.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/utils.go new file mode 100644 index 0000000..cd7af75 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/utils.go @@ -0,0 +1,176 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string, d *Directive) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args, d) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if tokenComment.MatchString(line) { + return tokenComment.ReplaceAllString(line, "") + } + + return line +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found someting with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go new file mode 100644 index 0000000..189afd1 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go @@ -0,0 +1,329 @@ +package dockerfile + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "runtime" + "strings" + "text/scanner" + "unicode" +) + +type shellWord struct { + word string + scanner scanner.Scanner + envs []string + pos int + escapeToken rune +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string, escapeToken rune) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + escapeToken: escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + word, _, err := sw.process() + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func ProcessWords(word string, env []string, escapeToken rune) ([]string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + escapeToken: escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + _, words, err := sw.process() + return words, err +} + +func (sw *shellWord) process() (string, []string, error) { + return sw.processStopOn(scanner.EOF) +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result string + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result += tmp + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + // '\' (default escape token, but ` allowed) escapes, except end of line + + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result += string(ch) + } + } + + return result, words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + if ch == '\'' || ch == scanner.EOF { + break + } + result += string(ch) + } + + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + var result string + + sw.scanner.Next() + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if ch == '"' { + sw.scanner.Next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.scanner.Next() + if ch == sw.escapeToken { + chNext := sw.scanner.Peek() + + if chNext == scanner.EOF { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.scanner.Next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + ch := sw.scanner.Peek() + if ch == '{' { + sw.scanner.Next() + name := sw.processName() + ch = sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + if runtime.GOOS == "windows" { + // Case-insensitive environment variables on Windows + name = strings.ToUpper(name) + } + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if runtime.GOOS == "windows" { + env = strings.ToUpper(env) + } + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + compareName := env[:i] + if runtime.GOOS == "windows" { + compareName = strings.ToUpper(compareName) + } + if name != compareName { + continue + } + return env[i+1:] + } + return "" +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go new file mode 100644 index 0000000..6cf691c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go @@ -0,0 +1,155 @@ +package dockerfile + +import ( + "bufio" + "os" + "runtime" + "strings" + "testing" +) + +func TestShellParser4EnvVars(t *testing.T) { + fn := "envVarTest" + lineCount := 0 + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} + for scanner.Scan() { + line := scanner.Text() + lineCount++ + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + if len(words) != 3 { + t.Fatalf("Error in '%s' - should be exactly one | in:%q", fn, line) + } + + words[0] = strings.TrimSpace(words[0]) + words[1] = strings.TrimSpace(words[1]) + words[2] = strings.TrimSpace(words[2]) + + // Key W=Windows; A=All; U=Unix + if (words[0] != "W") && (words[0] != "A") && (words[0] != "U") { + t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", words[0], lineCount, fn) + } + + if ((words[0] == "W" || words[0] == "A") && runtime.GOOS == "windows") || + ((words[0] == "U" || words[0] == "A") && runtime.GOOS != "windows") { + newWord, err := ProcessWord(words[1], envs, '\\') + + if err != nil { + newWord = "error" + } + + if newWord != words[2] { + t.Fatalf("Error. Src: %s Calc: %s Expected: %s at line %d", words[1], newWord, words[2], lineCount) + } + } + } +} + +func TestShellParser4Words(t *testing.T) { + fn := "wordsTest" + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + envs := []string{} + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + + if strings.HasPrefix(line, "#") { + continue + } + + if strings.HasPrefix(line, "ENV ") { + line = strings.TrimLeft(line[3:], " ") + envs = append(envs, line) + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) + } + test := strings.TrimSpace(words[0]) + expected := strings.Split(strings.TrimLeft(words[1], " "), ",") + + result, err := ProcessWords(test, envs, '\\') + + if err != nil { + result = []string{"error"} + } + + if len(result) != len(expected) { + t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + } + for i, w := range expected { + if w != result[i] { + t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + } + } + } +} + +func TestGetEnv(t *testing.T) { + sw := &shellWord{ + word: "", + envs: nil, + pos: 0, + } + + sw.envs = []string{} + if sw.getEnv("foo") != "" { + t.Fatalf("2 - 'foo' should map to ''") + } + + sw.envs = []string{"foo"} + if sw.getEnv("foo") != "" { + t.Fatalf("3 - 'foo' should map to ''") + } + + sw.envs = []string{"foo="} + if sw.getEnv("foo") != "" { + t.Fatalf("4 - 'foo' should map to ''") + } + + sw.envs = []string{"foo=bar"} + if sw.getEnv("foo") != "bar" { + t.Fatalf("5 - 'foo' should map to 'bar'") + } + + sw.envs = []string{"foo=bar", "car=hat"} + if sw.getEnv("foo") != "bar" { + t.Fatalf("6 - 'foo' should map to 'bar'") + } + if sw.getEnv("car") != "hat" { + t.Fatalf("7 - 'car' should map to 'hat'") + } + + // Make sure we grab the first 'car' in the list + sw.envs = []string{"foo=bar", "car=hat", "car=bike"} + if sw.getEnv("car") != "hat" { + t.Fatalf("8 - 'car' should map to 'hat'") + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/support.go b/vendor/github.com/moby/moby/builder/dockerfile/support.go new file mode 100644 index 0000000..e875889 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/support.go @@ -0,0 +1,19 @@ +package dockerfile + +import "strings" + +// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile +// for exec form it returns untouched args slice +// for shell form it returns concatenated args as the first element of a slice +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/support_test.go b/vendor/github.com/moby/moby/builder/dockerfile/support_test.go new file mode 100644 index 0000000..7cc6fe9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/support_test.go @@ -0,0 +1,65 @@ +package dockerfile + +import "testing" + +type testCase struct { + name string + args []string + attributes map[string]bool + expected []string +} + +func initTestCases() []testCase { + testCases := []testCase{} + + testCases = append(testCases, testCase{ + name: "empty args", + args: []string{}, + attributes: make(map[string]bool), + expected: []string{}, + }) + + jsonAttributes := make(map[string]bool) + jsonAttributes["json"] = true + + testCases = append(testCases, testCase{ + name: "json attribute with one element", + args: []string{"foo"}, + attributes: jsonAttributes, + expected: []string{"foo"}, + }) + + testCases = append(testCases, testCase{ + name: "json attribute with two elements", + args: []string{"foo", "bar"}, + attributes: jsonAttributes, + expected: []string{"foo", "bar"}, + }) + + testCases = append(testCases, testCase{ + name: "no attributes", + args: []string{"foo", "bar"}, + attributes: nil, + expected: []string{"foo bar"}, + }) + + return testCases +} + +func TestHandleJSONArgs(t *testing.T) { + testCases := initTestCases() + + for _, test := range testCases { + arguments := handleJSONArgs(test.args, test.attributes) + + if len(arguments) != len(test.expected) { + t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) + } + + for i := range test.expected { + if arguments[i] != test.expected[i] { + t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) + } + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/utils_test.go b/vendor/github.com/moby/moby/builder/dockerfile/utils_test.go new file mode 100644 index 0000000..80a3f1b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/utils_test.go @@ -0,0 +1,50 @@ +package dockerfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// createTestSymlink creates a symlink file within dir which points to oldname +func createTestSymlink(t *testing.T, dir, filename, oldname string) string { + filePath := filepath.Join(dir, filename) + if err := os.Symlink(oldname, filePath); err != nil { + t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err) + } + + return filePath +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/wordsTest b/vendor/github.com/moby/moby/builder/dockerfile/wordsTest new file mode 100644 index 0000000..fa916c6 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/wordsTest @@ -0,0 +1,25 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | hello there +hello\" there | hello",there diff --git a/vendor/github.com/moby/moby/builder/dockerignore.go b/vendor/github.com/moby/moby/builder/dockerignore.go new file mode 100644 index 0000000..3da7913 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerignore.go @@ -0,0 +1,48 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" +) + +// DockerIgnoreContext wraps a ModifiableContext to add a method +// for handling the .dockerignore file at the root of the context. +type DockerIgnoreContext struct { + ModifiableContext +} + +// Process reads the .dockerignore file at the root of the embedded context. +// If .dockerignore does not exist in the context, then nil is returned. +// +// It can take a list of files to be removed after .dockerignore is removed. +// This is used for server-side implementations of builders that need to send +// the .dockerignore file as well as the special files specified in filesToRemove, +// but expect them to be excluded from the context after they were processed. +// +// For example, server-side Dockerfile builders are expected to pass in the name +// of the Dockerfile to be removed after it was parsed. +// +// TODO: Don't require a ModifiableContext (use Context instead) and don't remove +// files, instead handle a list of files to be excluded from the context. +func (c DockerIgnoreContext) Process(filesToRemove []string) error { + f, err := c.Open(".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + excludes, _ := dockerignore.ReadAll(f) + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + rm, _ := fileutils.Matches(fileToRemove, excludes) + if rm { + c.Remove(fileToRemove) + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go new file mode 100644 index 0000000..2db67be --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go @@ -0,0 +1,49 @@ +package dockerignore + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go new file mode 100644 index 0000000..612a139 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go @@ -0,0 +1,57 @@ +package dockerignore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadAll(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + di, err := ReadAll(nil) + if err != nil { + t.Fatalf("Expected not to have error, got %v", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + diName := filepath.Join(tmpDir, ".dockerignore") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + diFd, err := os.Open(diName) + if err != nil { + t.Fatal(err) + } + defer diFd.Close() + + di, err = ReadAll(diFd) + if err != nil { + t.Fatal(err) + } + + if di[0] != "test1" { + t.Fatalf("First element is not test1") + } + if di[1] != "/test2" { + t.Fatalf("Second element is not /test2") + } + if di[2] != "/a/file/here" { + t.Fatalf("Third element is not /a/file/here") + } + if di[3] != "lastfile" { + t.Fatalf("Fourth element is not lastfile") + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerignore_test.go b/vendor/github.com/moby/moby/builder/dockerignore_test.go new file mode 100644 index 0000000..3c0ceda --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerignore_test.go @@ -0,0 +1,95 @@ +package builder + +import ( + "io/ioutil" + "log" + "os" + "sort" + "testing" +) + +const shouldStayFilename = "should_stay" + +func extractFilenames(files []os.FileInfo) []string { + filenames := make([]string, len(files), len(files)) + + for i, file := range files { + filenames[i] = file.Name() + } + + return filenames +} + +func checkDirectory(t *testing.T, dir string, expectedFiles []string) { + files, err := ioutil.ReadDir(dir) + + if err != nil { + t.Fatalf("Could not read directory: %s", err) + } + + if len(files) != len(expectedFiles) { + log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) + } + + filenames := extractFilenames(files) + sort.Strings(filenames) + sort.Strings(expectedFiles) + + for i, filename := range filenames { + if filename != expectedFiles[i] { + t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) + } + } +} + +func executeProcess(t *testing.T, contextDir string) { + modifiableCtx := &tarSumContext{root: contextDir} + ctx := DockerIgnoreContext{ModifiableContext: modifiableCtx} + + err := ctx.Process([]string{DefaultDockerfileName}) + + if err != nil { + t.Fatalf("Error when executing Process: %s", err) + } +} + +func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename}) + +} + +func TestProcessNoDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName}) + +} + +func TestProcessShouldLeaveAllFiles(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName, dockerignoreFilename}) + +} diff --git a/vendor/github.com/moby/moby/builder/git.go b/vendor/github.com/moby/moby/builder/git.go new file mode 100644 index 0000000..74df244 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/git.go @@ -0,0 +1,28 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/gitutils" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (ModifiableContext, error) { + root, err := gitutils.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + // TODO: print errors? + c.Close() + os.RemoveAll(root) + }() + return MakeTarSumContext(c) +} diff --git a/vendor/github.com/moby/moby/builder/remote.go b/vendor/github.com/moby/moby/builder/remote.go new file mode 100644 index 0000000..f3a4329 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remote.go @@ -0,0 +1,157 @@ +package builder + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "regexp" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/urlutil" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// MakeRemoteContext downloads a context from remoteURL and returns it. +// +// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of +// maxPreambleLength bytes from the body to help detecting the MIME type. +// Look at acceptableRemoteMIME for more details. +// +// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected +// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). +// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { + f, err := httputils.Download(remoteURL) + if err != nil { + return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) + } + defer f.Body.Close() + + var contextReader io.ReadCloser + if contentTypeHandlers != nil { + contentType := f.Header.Get("Content-Type") + clen := f.ContentLength + + contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) + if err != nil { + return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) + } + defer contextReader.Close() + + // This loop tries to find a content-type handler for the detected content-type. + // If it could not find one from the caller-supplied map, it tries the empty content-type `""` + // which is interpreted as a fallback handler (usually used for raw tar contexts). + for _, ct := range []string{contentType, ""} { + if fn, ok := contentTypeHandlers[ct]; ok { + defer contextReader.Close() + if contextReader, err = fn(contextReader); err != nil { + return nil, err + } + break + } + } + } + + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + return MakeTarSumContext(contextReader) +} + +// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used +// irrespective of user input. +// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). +func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { + switch { + case remoteURL == "": + context, err = MakeTarSumContext(r) + case urlutil.IsGitURL(remoteURL): + context, err = MakeGitContext(remoteURL) + case urlutil.IsURL(remoteURL): + context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller + // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. + dockerfileName = DefaultDockerfileName + + // TODO: return a context without tarsum + r, err := archive.Generate(dockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + + return ioutil.NopCloser(r), nil + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return createProgressReader(rc), nil + }, + }) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { + contentType, _, err = httputils.DetectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/vendor/github.com/moby/moby/builder/remote_test.go b/vendor/github.com/moby/moby/builder/remote_test.go new file mode 100644 index 0000000..691a084 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remote_test.go @@ -0,0 +1,213 @@ +package builder + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" +) + +var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic + +func TestSelectAcceptableMIME(t *testing.T) { + validMimeStrings := []string{ + "application/x-bzip2", + "application/bzip2", + "application/gzip", + "application/x-gzip", + "application/x-xz", + "application/xz", + "application/tar", + "application/x-tar", + "application/octet-stream", + "text/plain", + } + + invalidMimeStrings := []string{ + "", + "application/octet", + "application/json", + } + + for _, m := range invalidMimeStrings { + if len(selectAcceptableMIME(m)) > 0 { + t.Fatalf("Should not have accepted %q", m) + } + } + + for _, m := range validMimeStrings { + if str := selectAcceptableMIME(m); str == "" { + t.Fatalf("Should have accepted %q", m) + } + } +} + +func TestInspectEmptyResponse(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader([]byte(""))) + contentType, bReader, err := inspectResponse(ct, br, 0) + if err == nil { + t.Fatalf("Should have generated an error for an empty response") + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != 0 { + t.Fatal("response body should remain empty") + } +} + +func TestInspectResponseBinary(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader(binaryContext)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) + if err != nil { + t.Fatal(err) + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != len(binaryContext) { + t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) + } + for i := range body { + if body[i] != binaryContext[i] { + t.Fatalf("Corrupted response body at byte index %d", i) + } + } +} + +func TestResponseUnsupportedContentType(t *testing.T) { + content := []byte(dockerfileContents) + ct := "application/json" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) + + if err == nil { + t.Fatal("Should have returned an error on content-type 'application/json'") + } + if contentType != ct { + t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseTextSimple(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseEmptyContentType(t *testing.T) { + content := []byte(dockerfileContents) + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bodyReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestMakeRemoteContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/" + DefaultDockerfileName + remoteURL := serverURL.String() + + mux.Handle("/", http.FileServer(http.Dir(contextDir))) + + remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + r, err := archive.Generate(DefaultDockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil + }, + }) + + if err != nil { + t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) + } + + if remoteContext == nil { + t.Fatalf("Remote context should not be nil") + } + + tarSumCtx, ok := remoteContext.(*tarSumContext) + + if !ok { + t.Fatalf("Cast error, remote context should be casted to tarSumContext") + } + + fileInfoSums := tarSumCtx.sums + + if fileInfoSums.Len() != 1 { + t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) + } + + fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) + + if fileInfo == nil { + t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) + } + + if fileInfo.Pos() != 0 { + t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) + } +} diff --git a/vendor/github.com/moby/moby/builder/tarsum.go b/vendor/github.com/moby/moby/builder/tarsum.go new file mode 100644 index 0000000..77d3142 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/tarsum.go @@ -0,0 +1,159 @@ +package builder + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" +) + +type tarSumContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *tarSumContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return nil, err + } + r, err := os.Open(fullpath) + if err != nil { + return nil, convertPathError(err, cleanpath) + } + return r, nil +} + +func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return "", nil, err + } + + st, err := os.Lstat(fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + sum := path + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} + return rel, fi, nil +} + +// MakeTarSumContext returns a build Context from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &tarSumContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { + cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) + if err != nil { + return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) + } + _, err = os.Lstat(fullpath) + if err != nil { + return "", "", convertPathError(err, path) + } + return +} + +func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { + root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) + return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return err + } + if rel == "." { + return nil + } + + sum := rel + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} + if err := walkFn(rel, fi, nil); err != nil { + return err + } + return nil + }) +} + +func (c *tarSumContext) Remove(path string) error { + _, fullpath, err := c.normalize(path) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} diff --git a/vendor/github.com/moby/moby/builder/tarsum_test.go b/vendor/github.com/moby/moby/builder/tarsum_test.go new file mode 100644 index 0000000..278e583 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/tarsum_test.go @@ -0,0 +1,265 @@ +package builder + +import ( + "bufio" + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + tarsum := &tarSumContext{root: contextDir} + + err = tarsum.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(contextDir) + + if !os.IsNotExist(err) { + t.Fatalf("Directory should not exist at this point") + defer os.RemoveAll(contextDir) + } +} + +func TestOpenFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + file, err := tarSum.Open(filename) + + if err != nil { + t.Fatalf("Error when executing Open: %s", err) + } + + defer file.Close() + + scanner := bufio.NewScanner(file) + buff := bytes.NewBufferString("") + + for scanner.Scan() { + buff.WriteString(scanner.Text()) + } + + if contents != buff.String() { + t.Fatalf("Contents are not equal. Expected: %s, got: %s", contents, buff.String()) + } + +} + +func TestOpenNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + tarSum := &tarSumContext{root: contextDir} + + file, err := tarSum.Open("not-existing") + + if file != nil { + t.Fatal("Opened file should be nil") + } + + if !os.IsNotExist(err) { + t.Fatalf("Error when executing Open: %s", err) + } +} + +func TestStatFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + testFilename := createTestTempFile(t, contextDir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + relPath, fileInfo, err := tarSum.Stat(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if relPath != filename { + t.Fatalf("Relative path should be equal to %s, got %s", filename, relPath) + } + + if fileInfo.Path() != testFilename { + t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) + } +} + +func TestStatSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + relPath, fileInfo, err := tarSum.Stat(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if relPath != relativePath { + t.Fatalf("Relative path should be equal to %s, got %s", relativePath, relPath) + } + + if fileInfo.Path() != testFilename { + t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) + } +} + +func TestStatNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + tarSum := &tarSumContext{root: contextDir} + + relPath, fileInfo, err := tarSum.Stat("not-existing") + + if relPath != "" { + t.Fatal("Relative path should be nil") + } + + if fileInfo != nil { + t.Fatalf("File info should be nil") + } + + if !os.IsNotExist(err) { + t.Fatalf("This file should not exist: %s", err) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + tarSum := &tarSumContext{root: contextDir} + + err = tarSum.Remove(relativePath) + + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = os.Stat(contextSubdir) + + if !os.IsNotExist(err) { + t.Fatalf("Directory should not exist at this point") + } +} + +func TestMakeTarSumContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("error: %s", err) + } + + defer tarStream.Close() + + tarSum, err := MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when executing MakeTarSumContext: %s", err) + } + + if tarSum == nil { + t.Fatalf("Tar sum context should not be nil") + } +} + +func TestWalkWithoutError(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + createTestTempFile(t, contextSubdir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + walkFun := func(path string, fi FileInfo, err error) error { + return nil + } + + err := tarSum.Walk(contextSubdir, walkFun) + + if err != nil { + t.Fatalf("Error when executing Walk: %s", err) + } +} + +type WalkError struct { +} + +func (we WalkError) Error() string { + return "Error when executing Walk" +} + +func TestWalkWithError(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + tarSum := &tarSumContext{root: contextDir} + + walkFun := func(path string, fi FileInfo, err error) error { + return WalkError{} + } + + err := tarSum.Walk(contextSubdir, walkFun) + + if err == nil { + t.Fatalf("Error should not be nil") + } +} diff --git a/vendor/github.com/moby/moby/builder/utils_test.go b/vendor/github.com/moby/moby/builder/utils_test.go new file mode 100644 index 0000000..1101ff1 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/utils_test.go @@ -0,0 +1,87 @@ +package builder + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempSubdir creates a temporary directory for testing. +// It returns the created path but doesn't provide a cleanup function, +// so createTestTempSubdir should be used only for creating temporary subdirectories +// whose parent directories are properly cleaned up. +// When an error occurs, it terminates the test. +func createTestTempSubdir(t *testing.T, dir, prefix string) string { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// chdir changes current working directory to dir. +// It returns a function which changes working directory back to the previous one. +// This function is meant to be executed as a deferred call. +// When an error occurs, it terminates the test. +func chdir(t *testing.T, dir string) func() { + workingDirectory, err := os.Getwd() + + if err != nil { + t.Fatalf("Error when retrieving working directory: %s", err) + } + + err = os.Chdir(dir) + + if err != nil { + t.Fatalf("Error when changing directory to %s: %s", dir, err) + } + + return func() { + err = os.Chdir(workingDirectory) + + if err != nil { + t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err) + } + } +} diff --git a/vendor/github.com/moby/moby/cli/cobra.go b/vendor/github.com/moby/moby/cli/cobra.go new file mode 100644 index 0000000..139845c --- /dev/null +++ b/vendor/github.com/moby/moby/cli/cobra.go @@ -0,0 +1,139 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return err + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return fmt.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{.Flags.FlagUsages | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile.go b/vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile.go new file mode 100644 index 0000000..7fd1e4f --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile.go @@ -0,0 +1,69 @@ +package bundlefile + +import ( + "encoding/json" + "fmt" + "io" +) + +// Bundlefile stores the contents of a bundlefile +type Bundlefile struct { + Version string + Services map[string]Service +} + +// Service is a service from a bundlefile +type Service struct { + Image string + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Env []string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Ports []Port `json:",omitempty"` + WorkingDir *string `json:",omitempty"` + User *string `json:",omitempty"` + Networks []string `json:",omitempty"` +} + +// Port is a port as defined in a bundlefile +type Port struct { + Protocol string + Port uint32 +} + +// LoadFile loads a bundlefile from a path to the file +func LoadFile(reader io.Reader) (*Bundlefile, error) { + bundlefile := &Bundlefile{} + + decoder := json.NewDecoder(reader) + if err := decoder.Decode(bundlefile); err != nil { + switch jsonErr := err.(type) { + case *json.SyntaxError: + return nil, fmt.Errorf( + "JSON syntax error at byte %v: %s", + jsonErr.Offset, + jsonErr.Error()) + case *json.UnmarshalTypeError: + return nil, fmt.Errorf( + "Unexpected type at byte %v. Expected %s but received %s.", + jsonErr.Offset, + jsonErr.Type, + jsonErr.Value) + } + return nil, err + } + + return bundlefile, nil +} + +// Print writes the contents of the bundlefile to the output writer +// as human readable json +func Print(out io.Writer, bundle *Bundlefile) error { + bytes, err := json.MarshalIndent(*bundle, "", " ") + if err != nil { + return err + } + + _, err = out.Write(bytes) + return err +} diff --git a/vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile_test.go b/vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile_test.go new file mode 100644 index 0000000..c343410 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/bundlefile/bundlefile_test.go @@ -0,0 +1,77 @@ +package bundlefile + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestLoadFileV01Success(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } + }`) + + bundle, err := LoadFile(reader) + assert.NilError(t, err) + assert.Equal(t, bundle.Version, "0.1") + assert.Equal(t, len(bundle.Services), 2) +} + +func TestLoadFileSyntaxError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": unquoted string + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "syntax error at byte 37: invalid character 'u'") +} + +func TestLoadFileTypeError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "web": { + "Image": "redis", + "Networks": "none" + } + } + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string") +} + +func TestPrint(t *testing.T) { + var buffer bytes.Buffer + bundle := &Bundlefile{ + Version: "0.1", + Services: map[string]Service{ + "web": { + Image: "image", + Command: []string{"echo", "something"}, + }, + }, + } + assert.NilError(t, Print(&buffer, bundle)) + output := buffer.String() + assert.Contains(t, output, "\"Image\": \"image\"") + assert.Contains(t, output, + `"Command": [ + "echo", + "something" + ]`) +} diff --git a/vendor/github.com/moby/moby/cli/command/checkpoint/cmd.go b/vendor/github.com/moby/moby/cli/command/checkpoint/cmd.go new file mode 100644 index 0000000..d5705a4 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/checkpoint/cmd.go @@ -0,0 +1,24 @@ +package checkpoint + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental) +func NewCheckpointCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "checkpoint", + Short: "Manage checkpoints", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + Tags: map[string]string{"experimental": "", "version": "1.25"}, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/checkpoint/create.go b/vendor/github.com/moby/moby/cli/command/checkpoint/create.go new file mode 100644 index 0000000..473a941 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/checkpoint/create.go @@ -0,0 +1,58 @@ +package checkpoint + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type createOptions struct { + container string + checkpoint string + checkpointDir string + leaveRunning bool +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts createOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONTAINER CHECKPOINT", + Short: "Create a checkpoint from a running container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.checkpoint = args[1] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint") + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + checkpointOpts := types.CheckpointCreateOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + Exit: !opts.leaveRunning, + } + + err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/checkpoint/list.go b/vendor/github.com/moby/moby/cli/command/checkpoint/list.go new file mode 100644 index 0000000..daf8349 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/checkpoint/list.go @@ -0,0 +1,62 @@ +package checkpoint + +import ( + "fmt" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type listOptions struct { + checkpointDir string +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS] CONTAINER", + Aliases: []string{"list"}, + Short: "List checkpoints for a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, args[0], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd + +} + +func runList(dockerCli *command.DockerCli, container string, opts listOptions) error { + client := dockerCli.Client() + + listOpts := types.CheckpointListOptions{ + CheckpointDir: opts.checkpointDir, + } + + checkpoints, err := client.CheckpointList(context.Background(), container, listOpts) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintf(w, "CHECKPOINT NAME") + fmt.Fprintf(w, "\n") + + for _, checkpoint := range checkpoints { + fmt.Fprintf(w, "%s\t", checkpoint.Name) + fmt.Fprint(w, "\n") + } + + w.Flush() + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/checkpoint/remove.go b/vendor/github.com/moby/moby/cli/command/checkpoint/remove.go new file mode 100644 index 0000000..ec39fa7 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/checkpoint/remove.go @@ -0,0 +1,44 @@ +package checkpoint + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + checkpointDir string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER CHECKPOINT", + Aliases: []string{"remove"}, + Short: "Remove a checkpoint", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args[0], args[1], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runRemove(dockerCli *command.DockerCli, container string, checkpoint string, opts removeOptions) error { + client := dockerCli.Client() + + removeOpts := types.CheckpointDeleteOptions{ + CheckpointID: checkpoint, + CheckpointDir: opts.checkpointDir, + } + + return client.CheckpointDelete(context.Background(), container, removeOpts) +} diff --git a/vendor/github.com/moby/moby/cli/command/cli.go b/vendor/github.com/moby/moby/cli/command/cli.go new file mode 100644 index 0000000..6d1dd74 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/cli.go @@ -0,0 +1,260 @@ +package command + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/cliconfig/credentials" + "github.com/docker/docker/client" + "github.com/docker/docker/dockerversion" + dopts "github.com/docker/docker/opts" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *InStream + Out() *OutStream + Err() io.Writer +} + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + in *InStream + out *OutStream + err io.Writer + keyFile string + client client.APIClient + hasExperimental bool + defaultVersion string +} + +// HasExperimental returns true if experimental features are accessible. +func (cli *DockerCli) HasExperimental() bool { + return cli.hasExperimental +} + +// DefaultVersion returns api.defaultVersion of DOCKER_API_VERSION if specified. +func (cli *DockerCli) DefaultVersion() string { + return cli.defaultVersion +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *OutStream { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *InStream { + return cli.in +} + +// ShowHelp shows the command help. +func (cli *DockerCli) ShowHelp(cmd *cobra.Command, args []string) error { + cmd.SetOutput(cli.err) + cmd.HelpFunc()(cmd, args) + return nil +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + return cli.configFile +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + for registry := range cli.configFile.CredentialHelpers { + helper := cli.CredentialsStore(registry) + newAuths, err := helper.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + } + defaultStore := cli.CredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + return auths, nil +} + +func addAll(to, from map[string]types.AuthConfig) { + for reg, ac := range from { + to[reg] = ac + } +} + +// CredentialsStore returns a new credentials store based +// on the settings provided in the configuration file. Empty string returns +// the default credential store. +func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { + if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { + return credentials.NewNativeStore(cli.configFile, helper) + } + return credentials.NewFileStore(cli.configFile) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { + if c.CredentialHelpers != nil && serverAddress != "" { + if helper, exists := c.CredentialHelpers[serverAddress]; exists { + return helper + } + } + return c.CredentialsStore +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { + cli.configFile = LoadDefaultConfigFile(cli.err) + + var err error + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + if err != nil { + return err + } + + cli.defaultVersion = cli.client.ClientVersion() + + if opts.Common.TrustKey == "" { + cli.keyFile = filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) + } else { + cli.keyFile = opts.Common.TrustKey + } + + if ping, err := cli.client.Ping(context.Background()); err == nil { + cli.hasExperimental = ping.Experimental + + // since the new header was added in 1.25, assume server is 1.24 if header is not present. + if ping.APIVersion == "" { + ping.APIVersion = "1.24" + } + + // if server version is lower than the current cli, downgrade + if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) { + cli.client.UpdateClientVersion(ping.APIVersion) + } + } + return nil +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { + return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + return configFile +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + customHeaders := configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = UserAgent() + + verStr := api.DefaultVersion + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + return client.NewClient(host, verStr, httpClient, customHeaders) +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = dopts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + + config, err := tlsconfig.Client(*tlsOptions) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + }, nil +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/moby/moby/cli/command/commands/commands.go b/vendor/github.com/moby/moby/cli/command/commands/commands.go new file mode 100644 index 0000000..d64d568 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/commands/commands.go @@ -0,0 +1,91 @@ +package commands + +import ( + "os" + + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/checkpoint" + "github.com/docker/docker/cli/command/container" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/cli/command/network" + "github.com/docker/docker/cli/command/node" + "github.com/docker/docker/cli/command/plugin" + "github.com/docker/docker/cli/command/registry" + "github.com/docker/docker/cli/command/secret" + "github.com/docker/docker/cli/command/service" + "github.com/docker/docker/cli/command/stack" + "github.com/docker/docker/cli/command/swarm" + "github.com/docker/docker/cli/command/system" + "github.com/docker/docker/cli/command/volume" + "github.com/spf13/cobra" +) + +// AddCommands adds all the commands from cli/command to the root command +func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) { + cmd.AddCommand( + node.NewNodeCommand(dockerCli), + service.NewServiceCommand(dockerCli), + swarm.NewSwarmCommand(dockerCli), + secret.NewSecretCommand(dockerCli), + container.NewContainerCommand(dockerCli), + image.NewImageCommand(dockerCli), + system.NewSystemCommand(dockerCli), + container.NewRunCommand(dockerCli), + image.NewBuildCommand(dockerCli), + network.NewNetworkCommand(dockerCli), + hide(system.NewEventsCommand(dockerCli)), + registry.NewLoginCommand(dockerCli), + registry.NewLogoutCommand(dockerCli), + registry.NewSearchCommand(dockerCli), + system.NewVersionCommand(dockerCli), + volume.NewVolumeCommand(dockerCli), + hide(system.NewInfoCommand(dockerCli)), + hide(container.NewAttachCommand(dockerCli)), + hide(container.NewCommitCommand(dockerCli)), + hide(container.NewCopyCommand(dockerCli)), + hide(container.NewCreateCommand(dockerCli)), + hide(container.NewDiffCommand(dockerCli)), + hide(container.NewExecCommand(dockerCli)), + hide(container.NewExportCommand(dockerCli)), + hide(container.NewKillCommand(dockerCli)), + hide(container.NewLogsCommand(dockerCli)), + hide(container.NewPauseCommand(dockerCli)), + hide(container.NewPortCommand(dockerCli)), + hide(container.NewPsCommand(dockerCli)), + hide(container.NewRenameCommand(dockerCli)), + hide(container.NewRestartCommand(dockerCli)), + hide(container.NewRmCommand(dockerCli)), + hide(container.NewStartCommand(dockerCli)), + hide(container.NewStatsCommand(dockerCli)), + hide(container.NewStopCommand(dockerCli)), + hide(container.NewTopCommand(dockerCli)), + hide(container.NewUnpauseCommand(dockerCli)), + hide(container.NewUpdateCommand(dockerCli)), + hide(container.NewWaitCommand(dockerCli)), + hide(image.NewHistoryCommand(dockerCli)), + hide(image.NewImagesCommand(dockerCli)), + hide(image.NewImportCommand(dockerCli)), + hide(image.NewLoadCommand(dockerCli)), + hide(image.NewPullCommand(dockerCli)), + hide(image.NewPushCommand(dockerCli)), + hide(image.NewRemoveCommand(dockerCli)), + hide(image.NewSaveCommand(dockerCli)), + hide(image.NewTagCommand(dockerCli)), + hide(system.NewInspectCommand(dockerCli)), + stack.NewStackCommand(dockerCli), + stack.NewTopLevelDeployCommand(dockerCli), + checkpoint.NewCheckpointCommand(dockerCli), + plugin.NewPluginCommand(dockerCli), + ) + +} + +func hide(cmd *cobra.Command) *cobra.Command { + if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { + return cmd + } + cmdCopy := *cmd + cmdCopy.Hidden = true + cmdCopy.Aliases = []string{} + return &cmdCopy +} diff --git a/vendor/github.com/moby/moby/cli/command/container/attach.go b/vendor/github.com/moby/moby/cli/command/container/attach.go new file mode 100644 index 0000000..31bb109 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/attach.go @@ -0,0 +1,130 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/signal" + "github.com/spf13/cobra" +) + +type attachOptions struct { + noStdin bool + proxy bool + detachKeys string + + container string +} + +// NewAttachCommand creates a new cobra.Command for `docker attach` +func NewAttachCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts attachOptions + + cmd := &cobra.Command{ + Use: "attach [OPTIONS] CONTAINER", + Short: "Attach to a running container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runAttach(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") + flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + return cmd +} + +func runAttach(dockerCli *command.DockerCli, opts *attachOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + c, err := client.ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if !c.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if c.State.Paused { + return fmt.Errorf("You cannot attach to a paused container, unpause it first") + } + + if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { + return err + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: !opts.noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = dockerCli.In() + } + + if opts.proxy && !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, opts.container) + defer signal.StopCatch(sigc) + } + + resp, errAttach := client.ContainerAttach(ctx, opts.container, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + if c.Config.Tty && dockerCli.Out().IsTerminal() { + height, width := dockerCli.Out().GetTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + resizeTtyTo(ctx, client, opts.container, height+1, width+1, false) + + // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back + // to the actual size. + if err := MonitorTtySize(ctx, dockerCli, opts.container, false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + if err := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { + return err + } + + if errAttach != nil { + return errAttach + } + + _, status, err := getExitCode(ctx, dockerCli, opts.container) + if err != nil { + return err + } + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/cmd.go b/vendor/github.com/moby/moby/cli/command/container/cmd.go new file mode 100644 index 0000000..3e9b488 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/cmd.go @@ -0,0 +1,46 @@ +package container + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewContainerCommand returns a cobra command for `container` subcommands +func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "container", + Short: "Manage containers", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewAttachCommand(dockerCli), + NewCommitCommand(dockerCli), + NewCopyCommand(dockerCli), + NewCreateCommand(dockerCli), + NewDiffCommand(dockerCli), + NewExecCommand(dockerCli), + NewExportCommand(dockerCli), + NewKillCommand(dockerCli), + NewLogsCommand(dockerCli), + NewPauseCommand(dockerCli), + NewPortCommand(dockerCli), + NewRenameCommand(dockerCli), + NewRestartCommand(dockerCli), + NewRmCommand(dockerCli), + NewRunCommand(dockerCli), + NewStartCommand(dockerCli), + NewStatsCommand(dockerCli), + NewStopCommand(dockerCli), + NewTopCommand(dockerCli), + NewUnpauseCommand(dockerCli), + NewUpdateCommand(dockerCli), + NewWaitCommand(dockerCli), + newListCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/container/commit.go b/vendor/github.com/moby/moby/cli/command/container/commit.go new file mode 100644 index 0000000..cf8d010 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/commit.go @@ -0,0 +1,76 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + dockeropts "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type commitOptions struct { + container string + reference string + + pause bool + comment string + author string + changes dockeropts.ListOpts +} + +// NewCommitCommand creates a new cobra.Command for `docker commit` +func NewCommitCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts commitOptions + + cmd := &cobra.Command{ + Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", + Short: "Create a new image from a container's changes", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.reference = args[1] + } + return runCommit(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit") + flags.StringVarP(&opts.comment, "message", "m", "", "Commit message") + flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") + + opts.changes = dockeropts.NewListOpts(nil) + flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") + + return cmd +} + +func runCommit(dockerCli *command.DockerCli, opts *commitOptions) error { + ctx := context.Background() + + name := opts.container + reference := opts.reference + + options := types.ContainerCommitOptions{ + Reference: reference, + Comment: opts.comment, + Author: opts.author, + Changes: opts.changes.GetAll(), + Pause: opts.pause, + } + + response, err := dockerCli.Client().ContainerCommit(ctx, name, options) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/cp.go b/vendor/github.com/moby/moby/cli/command/container/cp.go new file mode 100644 index 0000000..17ab2ac --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/cp.go @@ -0,0 +1,303 @@ +package container + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/spf13/cobra" +) + +type copyOptions struct { + source string + destination string + followLink bool +} + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// NewCopyCommand creates a new `docker cp` command +func NewCopyCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts copyOptions + + cmd := &cobra.Command{ + Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, + Short: "Copy files/folders between a container and the local filesystem", + Long: strings.Join([]string{ + "Copy files/folders between a container and the local filesystem\n", + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + if args[0] == "" { + return fmt.Errorf("source can not be empty") + } + if args[1] == "" { + return fmt.Errorf("destination can not be empty") + } + opts.source = args[0] + opts.destination = args[1] + return runCopy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") + + return cmd +} + +func runCopy(dockerCli *command.DockerCli, opts copyOptions) error { + srcContainer, srcPath := splitCpArg(opts.source) + dstContainer, dstPath := splitCpArg(opts.destination) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: opts.followLink, + } + + ctx := context.Background() + + switch direction { + case fromContainer: + return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +func statContainerPath(ctx context.Context, dockerCli *command.DockerCli, containerName, path string) (types.ContainerPathStat, error) { + return dockerCli.Client().ContainerStatPath(ctx, containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func copyFromContainer(ctx context.Context, dockerCli *command.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func copyToContainer(ctx context.Context, dockerCli *command.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + } + + return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} diff --git a/vendor/github.com/moby/moby/cli/command/container/create.go b/vendor/github.com/moby/moby/cli/command/container/create.go new file mode 100644 index 0000000..d5e63bd --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/create.go @@ -0,0 +1,218 @@ +package container + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + // FIXME migrate to docker/distribution/reference + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + apiclient "github.com/docker/docker/client" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type createOptions struct { + name string +} + +// NewCreateCommand creates a new cobra.Command for `docker create` +func NewCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts createOptions + var copts *runconfigopts.ContainerOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustedFlags(flags, true) + copts = runconfigopts.AddFlags(flags) + return cmd +} + +func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *runconfigopts.ContainerOptions) error { + config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) + if err != nil { + reportError(dockerCli.Err(), "create", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + response, err := createContainer(context.Background(), dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + return nil +} + +func pullImage(ctx context.Context, dockerCli *command.DockerCli, image string, out io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + RegistryAuth: encodedAuth, + } + + responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream( + responseBody, + out, + dockerCli.Out().FD(), + dockerCli.Out().IsTerminal(), + nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func createContainer(ctx context.Context, dockerCli *command.DockerCli, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*container.ContainerCreateCreatedBody, error) { + stderr := dockerCli.Err() + + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + var trustedRef reference.Canonical + _, ref, err := reference.ParseIDOrReference(config.Image) + if err != nil { + return nil, err + } + if ref != nil { + ref = reference.WithDefaultTag(ref) + + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + var err error + trustedRef, err = image.TrustedReference(ctx, dockerCli, ref, nil) + if err != nil { + return nil, err + } + config.Image = trustedRef.String() + } + } + + //create the container + response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if apiclient.IsErrImageNotFound(err) && ref != nil { + fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", ref.String()) + + // we don't want to write to stdout anything apart from container.ID + if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { + return nil, err + } + if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { + if err := image.TagTrusted(ctx, dockerCli, trustedRef, ref); err != nil { + return nil, err + } + } + // Retry + var retryErr error + response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(stderr, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/diff.go b/vendor/github.com/moby/moby/cli/command/container/diff.go new file mode 100644 index 0000000..168af74 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/diff.go @@ -0,0 +1,58 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/spf13/cobra" +) + +type diffOptions struct { + container string +} + +// NewDiffCommand creates a new cobra.Command for `docker diff` +func NewDiffCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diffOptions + + return &cobra.Command{ + Use: "diff CONTAINER", + Short: "Inspect changes to files or directories on a container's filesystem", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runDiff(dockerCli, &opts) + }, + } +} + +func runDiff(dockerCli *command.DockerCli, opts *diffOptions) error { + if opts.container == "" { + return fmt.Errorf("Container name cannot be empty") + } + ctx := context.Background() + + changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) + if err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(dockerCli.Out(), "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/exec.go b/vendor/github.com/moby/moby/cli/command/container/exec.go new file mode 100644 index 0000000..f038149 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/exec.go @@ -0,0 +1,207 @@ +package container + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + apiclient "github.com/docker/docker/client" + options "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type execOptions struct { + detachKeys string + interactive bool + tty bool + detach bool + user string + privileged bool + env *options.ListOpts +} + +func newExecOptions() *execOptions { + var values []string + return &execOptions{ + env: options.NewListOptsRef(&values, runconfigopts.ValidateEnv), + } +} + +// NewExecCommand creats a new cobra.Command for `docker exec` +func NewExecCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := newExecOptions() + + cmd := &cobra.Command{ + Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", + Short: "Run a command in a running container", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + container := args[0] + execCmd := args[1:] + return runExec(dockerCli, opts, container, execCmd) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVarP(&opts.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") + flags.BoolVarP(&opts.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.BoolVarP(&opts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: run command in the background") + flags.StringVarP(&opts.user, "user", "u", "", "Username or UID (format: [:])") + flags.BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the command") + flags.VarP(opts.env, "env", "e", "Set environment variables") + flags.SetAnnotation("env", "version", []string{"1.25"}) + + return cmd +} + +func runExec(dockerCli *command.DockerCli, opts *execOptions, container string, execCmd []string) error { + execConfig, err := parseExec(opts, execCmd) + // just in case the ParseExec does not exit + if container == "" || err != nil { + return cli.StatusError{StatusCode: 1} + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys + + ctx := context.Background() + client := dockerCli.Client() + + response, err := client.ContainerExecCreate(ctx, container, *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintf(dockerCli.Out(), "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + if !execConfig.Detach { + if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if err := client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(dockerCli.Out(), "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = dockerCli.In() + } + if execConfig.AttachStdout { + out = dockerCli.Out() + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = dockerCli.Out() + } else { + stderr = dockerCli.Err() + } + } + + resp, err := client.ContainerExecAttach(ctx, execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + errCh = promise.Go(func() error { + return holdHijackedConnection(ctx, dockerCli, execConfig.Tty, in, out, stderr, resp) + }) + + if execConfig.Tty && dockerCli.In().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { + fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(ctx, client, execID); err != nil { + return err + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, execID string) (bool, int, error) { + resp, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !apiclient.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +// parseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +func parseExec(opts *execOptions, execCmd []string) (*types.ExecConfig, error) { + execConfig := &types.ExecConfig{ + User: opts.user, + Privileged: opts.privileged, + Tty: opts.tty, + Cmd: execCmd, + Detach: opts.detach, + } + + // If -d is not set, attach to everything by default + if !opts.detach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if opts.interactive { + execConfig.AttachStdin = true + } + } + + if opts.env != nil { + execConfig.Env = opts.env.GetAll() + } + + return execConfig, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/exec_test.go b/vendor/github.com/moby/moby/cli/command/container/exec_test.go new file mode 100644 index 0000000..baeeaf1 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/exec_test.go @@ -0,0 +1,116 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +type arguments struct { + options execOptions + execCmd []string +} + +func TestParseExec(t *testing.T) { + valids := map[*arguments]*types.ExecConfig{ + &arguments{ + execCmd: []string{"command"}, + }: { + Cmd: []string{"command"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + execCmd: []string{"command1", "command2"}, + }: { + Cmd: []string{"command1", "command2"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + options: execOptions{ + interactive: true, + tty: true, + user: "uid", + }, + execCmd: []string{"command"}, + }: { + User: "uid", + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Tty: true, + Cmd: []string{"command"}, + }, + &arguments{ + options: execOptions{ + detach: true, + }, + execCmd: []string{"command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Cmd: []string{"command"}, + }, + &arguments{ + options: execOptions{ + tty: true, + interactive: true, + detach: true, + }, + execCmd: []string{"command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Tty: true, + Cmd: []string{"command"}, + }, + } + + for valid, expectedExecConfig := range valids { + execConfig, err := parseExec(&valid.options, valid.execCmd) + if err != nil { + t.Fatal(err) + } + if !compareExecConfig(expectedExecConfig, execConfig) { + t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) + } + } +} + +func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { + if config1.AttachStderr != config2.AttachStderr { + return false + } + if config1.AttachStdin != config2.AttachStdin { + return false + } + if config1.AttachStdout != config2.AttachStdout { + return false + } + if config1.Detach != config2.Detach { + return false + } + if config1.Privileged != config2.Privileged { + return false + } + if config1.Tty != config2.Tty { + return false + } + if config1.User != config2.User { + return false + } + if len(config1.Cmd) != len(config2.Cmd) { + return false + } + for index, value := range config1.Cmd { + if value != config2.Cmd[index] { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/cli/command/container/export.go b/vendor/github.com/moby/moby/cli/command/container/export.go new file mode 100644 index 0000000..8fa2e5d --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/export.go @@ -0,0 +1,59 @@ +package container + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type exportOptions struct { + container string + output string +} + +// NewExportCommand creates a new `docker export` command +func NewExportCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts exportOptions + + cmd := &cobra.Command{ + Use: "export [OPTIONS] CONTAINER", + Short: "Export a container's filesystem as a tar archive", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runExport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runExport(dockerCli *command.DockerCli, opts exportOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ContainerExport(context.Background(), opts.container) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/moby/moby/cli/command/container/hijack.go b/vendor/github.com/moby/moby/cli/command/container/hijack.go new file mode 100644 index 0000000..ca136f0 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/hijack.go @@ -0,0 +1,116 @@ +package container + +import ( + "io" + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +// holdHijackedConnection handles copying input to and output from streams to the +// connection +func holdHijackedConnection(ctx context.Context, streams command.Streams, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var ( + err error + restoreOnce sync.Once + ) + if inputStream != nil && tty { + if err := setRawTerminal(streams); err != nil { + return err + } + defer func() { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + }() + } + + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + // When TTY is ON, use regular copy + if tty && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + // we should restore the terminal as soon as possible once connection end + // so any following print messages will be in normal type. + if inputStream != nil { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + } + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + + logrus.Debug("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + // we should restore the terminal as soon as possible once connection end + // so any following print messages will be in normal type. + if tty { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + } + logrus.Debug("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-ctx.Done(): + } + } + case <-ctx.Done(): + } + + return nil +} + +func setRawTerminal(streams command.Streams) error { + if err := streams.In().SetRawTerminal(); err != nil { + return err + } + return streams.Out().SetRawTerminal() +} + +func restoreTerminal(streams command.Streams, in io.Closer) error { + streams.In().RestoreTerminal() + streams.Out().RestoreTerminal() + // WARNING: DO NOT REMOVE THE OS CHECK !!! + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if in != nil && runtime.GOOS != "darwin" { + return in.Close() + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/inspect.go b/vendor/github.com/moby/moby/cli/command/container/inspect.go new file mode 100644 index 0000000..08a8d24 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/inspect.go @@ -0,0 +1,47 @@ +package container + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + size bool + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker container inspect` +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Display detailed information on one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ContainerInspectWithRaw(ctx, ref, opts.size) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/moby/moby/cli/command/container/kill.go b/vendor/github.com/moby/moby/cli/command/container/kill.go new file mode 100644 index 0000000..6da91a4 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/kill.go @@ -0,0 +1,56 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type killOptions struct { + signal string + + containers []string +} + +// NewKillCommand creates a new cobra.Command for `docker kill` +func NewKillCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts killOptions + + cmd := &cobra.Command{ + Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Kill one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runKill(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") + return cmd +} + +func runKill(dockerCli *command.DockerCli, opts *killOptions) error { + var errs []string + ctx := context.Background() + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + return dockerCli.Client().ContainerKill(ctx, container, opts.signal) + }) + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/list.go b/vendor/github.com/moby/moby/cli/command/container/list.go new file mode 100644 index 0000000..5bbf419 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/list.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +type psOptions struct { + quiet bool + size bool + all bool + noTrunc bool + nLatest bool + last int + format string + filter opts.FilterOpt +} + +// NewPsCommand creates a new cobra.Command for `docker ps` +func NewPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS]", + Short: "List containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") + flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)") + flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewPsCommand(dockerCli) + cmd.Aliases = []string{"ps", "list"} + cmd.Use = "ls [OPTIONS]" + return &cmd +} + +// listOptionsProcessor is used to set any container list options which may only +// be embedded in the format template. +// This is passed directly into tmpl.Execute in order to allow the preprocessor +// to set any list options that were not provided by flags (e.g. `.Size`). +// It is using a `map[string]bool` so that unknown fields passed into the +// template format do not cause errors. These errors will get picked up when +// running through the actual template processor. +type listOptionsProcessor map[string]bool + +// Size sets the size of the map when called by a template execution. +func (o listOptionsProcessor) Size() bool { + o["size"] = true + return true +} + +// Label is needed here as it allows the correct pre-processing +// because Label() is a method with arguments +func (o listOptionsProcessor) Label(name string) string { + return "" +} + +func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { + options := &types.ContainerListOptions{ + All: opts.all, + Limit: opts.last, + Size: opts.size, + Filters: opts.filter.Value(), + } + + if opts.nLatest && opts.last == -1 { + options.Limit = 1 + } + + tmpl, err := templates.Parse(opts.format) + + if err != nil { + return nil, err + } + + optionsProcessor := listOptionsProcessor{} + // This shouldn't error out but swallowing the error makes it harder + // to track down if preProcessor issues come up. Ref #24696 + if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { + return nil, err + } + // At the moment all we need is to capture .Size for preprocessor + options.Size = opts.size || optionsProcessor["size"] + + return options, nil +} + +func runPs(dockerCli *command.DockerCli, opts *psOptions) error { + ctx := context.Background() + + listOptions, err := buildContainerListOptions(opts) + if err != nil { + return err + } + + containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PsFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().PsFormat + } else { + format = formatter.TableFormatKey + } + } + + containerCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewContainerFormat(format, opts.quiet, listOptions.Size), + Trunc: !opts.noTrunc, + } + return formatter.ContainerWrite(containerCtx, containers) +} diff --git a/vendor/github.com/moby/moby/cli/command/container/logs.go b/vendor/github.com/moby/moby/cli/command/container/logs.go new file mode 100644 index 0000000..3a37ced --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/logs.go @@ -0,0 +1,87 @@ +package container + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + +type logsOptions struct { + follow bool + since string + timestamps bool + details bool + tail string + + container string +} + +// NewLogsCommand creates a new cobra.Command for `docker logs` +func NewLogsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] CONTAINER", + Short: "Fetch the logs of a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runLogs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) + } + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + } else { + _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) + } + return err +} diff --git a/vendor/github.com/moby/moby/cli/command/container/pause.go b/vendor/github.com/moby/moby/cli/command/container/pause.go new file mode 100644 index 0000000..6817cf6 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/pause.go @@ -0,0 +1,49 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type pauseOptions struct { + containers []string +} + +// NewPauseCommand creates a new cobra.Command for `docker pause` +func NewPauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pauseOptions + + return &cobra.Command{ + Use: "pause CONTAINER [CONTAINER...]", + Short: "Pause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runPause(dockerCli, &opts) + }, + } +} + +func runPause(dockerCli *command.DockerCli, opts *pauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/port.go b/vendor/github.com/moby/moby/cli/command/container/port.go new file mode 100644 index 0000000..ea15290 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/port.go @@ -0,0 +1,78 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/go-connections/nat" + "github.com/spf13/cobra" +) + +type portOptions struct { + container string + + port string +} + +// NewPortCommand creates a new cobra.Command for `docker port` +func NewPortCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts portOptions + + cmd := &cobra.Command{ + Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", + Short: "List port mappings or a specific mapping for the container", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.port = args[1] + } + return runPort(dockerCli, &opts) + }, + } + return cmd +} + +func runPort(dockerCli *command.DockerCli, opts *portOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if opts.port != "" { + port := opts.port + proto := "tcp" + parts := strings.SplitN(port, "/", 2) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/prune.go b/vendor/github.com/moby/moby/cli/command/container/prune.go new file mode 100644 index 0000000..064f4c0 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/prune.go @@ -0,0 +1,75 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for containers +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all stopped containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all stopped containers. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ContainersPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.ContainersDeleted) > 0 { + output = "Deleted Containers:\n" + for _, id := range report.ContainersDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Container Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true}) +} diff --git a/vendor/github.com/moby/moby/cli/command/container/ps_test.go b/vendor/github.com/moby/moby/cli/command/container/ps_test.go new file mode 100644 index 0000000..62b0545 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/ps_test.go @@ -0,0 +1,118 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestBuildContainerListOptions(t *testing.T) { + filters := opts.NewFilterOpt() + assert.NilError(t, filters.Set("foo=bar")) + assert.NilError(t, filters.Set("baz=foo")) + + contexts := []struct { + psOpts *psOptions + expectedAll bool + expectedSize bool + expectedLimit int + expectedFilters map[string]string + }{ + { + psOpts: &psOptions{ + all: true, + size: true, + last: 5, + filter: filters, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: true, + last: -1, + nLatest: true, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 1, + expectedFilters: make(map[string]string), + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}} {{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // Without .Size, size should be false + format: "{{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: false, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + } + + for _, c := range contexts { + options, err := buildContainerListOptions(c.psOpts) + assert.NilError(t, err) + + assert.Equal(t, c.expectedAll, options.All) + assert.Equal(t, c.expectedSize, options.Size) + assert.Equal(t, c.expectedLimit, options.Limit) + assert.Equal(t, options.Filters.Len(), len(c.expectedFilters)) + + for k, v := range c.expectedFilters { + f := options.Filters + if !f.ExactMatch(k, v) { + t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k)) + } + } + } +} diff --git a/vendor/github.com/moby/moby/cli/command/container/rename.go b/vendor/github.com/moby/moby/cli/command/container/rename.go new file mode 100644 index 0000000..346fb7b --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/rename.go @@ -0,0 +1,51 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type renameOptions struct { + oldName string + newName string +} + +// NewRenameCommand creates a new cobra.Command for `docker rename` +func NewRenameCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts renameOptions + + cmd := &cobra.Command{ + Use: "rename CONTAINER NEW_NAME", + Short: "Rename a container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.oldName = args[0] + opts.newName = args[1] + return runRename(dockerCli, &opts) + }, + } + return cmd +} + +func runRename(dockerCli *command.DockerCli, opts *renameOptions) error { + ctx := context.Background() + + oldName := strings.TrimSpace(opts.oldName) + newName := strings.TrimSpace(opts.newName) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } + + if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/restart.go b/vendor/github.com/moby/moby/cli/command/container/restart.go new file mode 100644 index 0000000..fc3ba93 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/restart.go @@ -0,0 +1,62 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type restartOptions struct { + nSeconds int + nSecondsChanged bool + + containers []string +} + +// NewRestartCommand creates a new cobra.Command for `docker restart` +func NewRestartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts restartOptions + + cmd := &cobra.Command{ + Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Restart one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nSecondsChanged = cmd.Flags().Changed("time") + return runRestart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") + return cmd +} + +func runRestart(dockerCli *command.DockerCli, opts *restartOptions) error { + ctx := context.Background() + var errs []string + var timeout *time.Duration + if opts.nSecondsChanged { + timeoutValue := time.Duration(opts.nSeconds) * time.Second + timeout = &timeoutValue + } + + for _, name := range opts.containers { + if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/rm.go b/vendor/github.com/moby/moby/cli/command/container/rm.go new file mode 100644 index 0000000..60724f1 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/rm.go @@ -0,0 +1,73 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type rmOptions struct { + rmVolumes bool + rmLink bool + force bool + + containers []string +} + +// NewRmCommand creates a new cobra.Command for `docker rm` +func NewRmCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Remove one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runRm(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") + flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") + return cmd +} + +func runRm(dockerCli *command.DockerCli, opts *rmOptions) error { + ctx := context.Background() + + var errs []string + options := types.ContainerRemoveOptions{ + RemoveVolumes: opts.rmVolumes, + RemoveLinks: opts.rmLink, + Force: opts.force, + } + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + if container == "" { + return fmt.Errorf("Container name cannot be empty") + } + container = strings.Trim(container, "/") + return dockerCli.Client().ContainerRemove(ctx, container, options) + }) + + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/run.go b/vendor/github.com/moby/moby/cli/command/container/run.go new file mode 100644 index 0000000..2a11516 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/run.go @@ -0,0 +1,284 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "os" + "runtime" + "strings" + "syscall" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + opttypes "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/libnetwork/resolvconf/dns" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type runOptions struct { + detach bool + sigProxy bool + name string + detachKeys string +} + +// NewRunCommand create a new `docker run` command +func NewRunCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts runOptions + var copts *runconfigopts.ContainerOptions + + cmd := &cobra.Command{ + Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Run a command in a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runRun(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + // These are flags not stored in Config/HostConfig + flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") + flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustedFlags(flags, true) + copts = runconfigopts.AddFlags(flags) + return cmd +} + +func runRun(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { + stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() + client := dockerCli.Client() + // TODO: pass this as an argument + cmdPath := "run" + + var ( + flAttach *opttypes.ListOpts + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ) + + config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) + + // just in case the Parse does not exit + if err != nil { + reportError(stderr, cmdPath, err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") + } + + if len(hostConfig.DNS) > 0 { + // check the DNS settings passed via --dns against + // localhost regexp to warn if they are trying to + // set a DNS to a localhost address + for _, dnsIP := range hostConfig.DNS { + if dns.IsLocalhost(dnsIP) { + fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + break + } + } + } + + config.ArgsEscaped = false + + if !opts.detach { + if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := flags.Lookup("attach"); fl != nil { + flAttach = fl.Value.(*opttypes.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable sigProxy when in TTY mode + if config.Tty { + opts.sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() + } + + ctx, cancelFun := context.WithCancel(context.Background()) + + // preserve AutoRemove state. createContainer() / ContainerCreate() disables daemon-side auto-remove on API < 1.25 + autoRemove := hostConfig.AutoRemove + + createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) + if err != nil { + reportError(stderr, cmdPath, err.Error(), true) + return runStartContainerErr(err) + } + if opts.sigProxy { + sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(stdout, "%s\n", createResponse.ID) + }() + } + attach := config.AttachStdin || config.AttachStdout || config.AttachStderr + if attach { + var ( + out, cerr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = stdin + } + if config.AttachStdout { + out = stdout + } + if config.AttachStderr { + if config.Tty { + cerr = stdout + } else { + cerr = stderr + } + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + errCh = promise.Go(func() error { + errHijack := holdHijackedConnection(ctx, dockerCli, config.Tty, in, out, cerr, resp) + if errHijack == nil { + return errAttach + } + return errHijack + }) + } + + statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, autoRemove) + + //start the container + if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { + // If we have holdHijackedConnection, we should notify + // holdHijackedConnection we are going to exit and wait + // to avoid the terminal are not restored. + if attach { + cancelFun() + <-errCh + } + + reportError(stderr, cmdPath, err.Error(), false) + if autoRemove { + // wait container to be removed + <-statusChan + } + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { + fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + status := <-statusChan + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +// reportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func reportError(stderr io.Writer, name string, str string, withHelp bool) { + if withHelp { + str += ".\nSee '" + os.Args[0] + " " + name + " --help'" + } + fmt.Fprintf(stderr, "%s: %s.\n", os.Args[0], str) +} + +// if container start fails with 'not found'/'no such' error, return 127 +// if container start fails with 'permission denied' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") + statusError := cli.StatusError{StatusCode: 125} + if strings.Contains(trimmedErr, "executable file not found") || + strings.Contains(trimmedErr, "no such file or directory") || + strings.Contains(trimmedErr, "system cannot find the file specified") { + statusError = cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { + statusError = cli.StatusError{StatusCode: 126} + } + + return statusError +} diff --git a/vendor/github.com/moby/moby/cli/command/container/start.go b/vendor/github.com/moby/moby/cli/command/container/start.go new file mode 100644 index 0000000..3521a41 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/start.go @@ -0,0 +1,179 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/spf13/cobra" +) + +type startOptions struct { + attach bool + openStdin bool + detachKeys string + checkpoint string + checkpointDir string + + containers []string +} + +// NewStartCommand creates a new cobra.Command for `docker start` +func NewStartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts startOptions + + cmd := &cobra.Command{ + Use: "start [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Start one or more stopped containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") + flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") + flags.SetAnnotation("checkpoint", "experimental", nil) + flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") + flags.SetAnnotation("checkpoint-dir", "experimental", nil) + return cmd +} + +func runStart(dockerCli *command.DockerCli, opts *startOptions) error { + ctx, cancelFun := context.WithCancel(context.Background()) + + if opts.attach || opts.openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if len(opts.containers) > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + // 2. Attach to the container. + container := opts.containers[0] + c, err := dockerCli.Client().ContainerInspect(ctx, container) + if err != nil { + return err + } + + // We always use c.ID instead of container to maintain consistency during `docker start` + if !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, c.ID) + defer signal.StopCatch(sigc) + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: opts.openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + + if options.Stdin { + in = dockerCli.In() + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach return an ErrPersistEOF (connection closed) + // means server met an error and already put it in Hijacked connection, + // we would keep the error and read the detailed error message from hijacked connection + return errAttach + } + defer resp.Close() + cErr := promise.Go(func() error { + errHijack := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) + if errHijack == nil { + return errAttach + } + return errHijack + }) + + // 3. We should open a channel for receiving status code of the container + // no matter it's detached, removed on daemon side(--rm) or exit normally. + statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + + // 4. Start the container. + if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { + cancelFun() + <-cErr + if c.HostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return err + } + + // 5. Wait for attachment to break. + if c.Config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { + fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + + if status := <-statusChan; status != 0 { + return cli.StatusError{StatusCode: status} + } + } else if opts.checkpoint != "" { + if len(opts.containers) > 1 { + return fmt.Errorf("You cannot restore multiple containers at once.") + } + container := opts.containers[0] + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + return dockerCli.Client().ContainerStart(ctx, container, startOptions) + + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) + } + + return nil +} + +func startContainersWithoutAttachments(ctx context.Context, dockerCli *command.DockerCli, containers []string) error { + var failedContainers []string + for _, container := range containers { + if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + failedContainers = append(failedContainers, container) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + + if len(failedContainers) > 0 { + return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/stats.go b/vendor/github.com/moby/moby/cli/command/container/stats.go new file mode 100644 index 0000000..12d5c68 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/stats.go @@ -0,0 +1,243 @@ +package container + +import ( + "fmt" + "io" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/spf13/cobra" +) + +type statsOptions struct { + all bool + noStream bool + format string + containers []string +} + +// NewStatsCommand creates a new cobra.Command for `docker stats` +func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts statsOptions + + cmd := &cobra.Command{ + Use: "stats [OPTIONS] [CONTAINER...]", + Short: "Display a live stream of container(s) resource usage statistics", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStats(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + return cmd +} + +// runStats displays a live stream of resource usage statistics for one or more containers. +// This shows real-time information on CPU usage, memory usage, and network I/O. +func runStats(dockerCli *command.DockerCli, opts *statsOptions) error { + showAll := len(opts.containers) == 0 + closeChan := make(chan error) + + ctx := context.Background() + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + + eventq, errq := dockerCli.Client().Events(ctx, options) + + // Whether we successfully subscribed to eventq or not, we can now + // unblock the main goroutine. + close(started) + + for { + select { + case event := <-eventq: + c <- event + case err := <-errq: + closeChan <- err + return + } + } + } + + // Get the daemonOSType if not set already + if daemonOSType == "" { + svctx := context.Background() + sv, err := dockerCli.Client().ServerVersion(svctx) + if err != nil { + return err + } + daemonOSType = sv.Os + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: opts.all, + } + cs, err := dockerCli.Client().ContainerList(ctx, options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := formatter.NewContainerStats(container.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := command.InitEventHandler() + eh.Handle("create", func(e events.Message) { + if opts.all { + s := formatter.NewContainerStats(e.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := formatter.NewContainerStats(e.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !opts.all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range opts.containers { + s := formatter.NewContainerStats(name, daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + cErr := c.GetError() + if cErr != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, cErr)) + } + } + cStats.mu.Unlock() + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().StatsFormat) > 0 { + format = dockerCli.ConfigFile().StatsFormat + } else { + format = formatter.TableFormatKey + } + } + statsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewStatsFormat(format, daemonOSType), + } + cleanScreen := func() { + if !opts.noStream { + fmt.Fprint(dockerCli.Out(), "\033[2J") + fmt.Fprint(dockerCli.Out(), "\033[H") + } + } + + var err error + for range time.Tick(500 * time.Millisecond) { + cleanScreen() + ccstats := []formatter.StatsEntry{} + cStats.mu.Lock() + for _, c := range cStats.cs { + ccstats = append(ccstats, c.GetStatistics()) + } + cStats.mu.Unlock() + if err = formatter.ContainerStatsWrite(statsCtx, ccstats); err != nil { + break + } + if len(cStats.cs) == 0 && !showAll { + break + } + if opts.noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return err +} diff --git a/vendor/github.com/moby/moby/cli/command/container/stats_helpers.go b/vendor/github.com/moby/moby/cli/command/container/stats_helpers.go new file mode 100644 index 0000000..b1a7740 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/stats_helpers.go @@ -0,0 +1,230 @@ +package container + +import ( + "encoding/json" + "errors" + "io" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +type stats struct { + ostype string + mu sync.Mutex + cs []*formatter.ContainerStats +} + +// daemonOSType is set once we have at least one stat for a container +// from the daemon. It is used to ensure we print the right header based +// on the daemon platform. +var daemonOSType string + +func (s *stats) add(cs *formatter.ContainerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Container); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Container == cid { + return i, true + } + } + return -1, false +} + +func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + logrus.Debugf("collecting stats for %s", s.Container) + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + response, err := cli.ContainerStats(ctx, s.Container, streamStats) + if err != nil { + s.SetError(err) + return + } + defer response.Body.Close() + + dec := json.NewDecoder(response.Body) + go func() { + for { + var ( + v *types.StatsJSON + memPercent = 0.0 + cpuPercent = 0.0 + blkRead, blkWrite uint64 // Only used on Linux + mem = 0.0 + memLimit = 0.0 + memPerc = 0.0 + pidsStatsCurrent uint64 + ) + + if err := dec.Decode(&v); err != nil { + dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) + u <- err + if err == io.EOF { + break + } + time.Sleep(100 * time.Millisecond) + continue + } + + daemonOSType = response.OSType + + if daemonOSType != "windows" { + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + } + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) + blkRead, blkWrite = calculateBlockIO(v.BlkioStats) + mem = float64(v.MemoryStats.Usage) + memLimit = float64(v.MemoryStats.Limit) + memPerc = memPercent + pidsStatsCurrent = v.PidsStats.Current + } else { + cpuPercent = calculateCPUPercentWindows(v) + blkRead = v.StorageStats.ReadSizeBytes + blkWrite = v.StorageStats.WriteSizeBytes + mem = float64(v.MemoryStats.PrivateWorkingSet) + } + netRx, netTx := calculateNetwork(v.Networks) + s.SetStatistics(formatter.StatsEntry{ + Name: v.Name, + ID: v.ID, + CPUPercentage: cpuPercent, + Memory: mem, + MemoryPercentage: memPerc, + MemoryLimit: memLimit, + NetworkRx: netRx, + NetworkTx: netTx, + BlockRead: float64(blkRead), + BlockWrite: float64(blkWrite), + PidsCurrent: pidsStatsCurrent, + }) + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.SetErrorAndReset(errors.New("timeout waiting for stats")) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + if err != nil { + s.SetError(err) + continue + } + s.SetError(nil) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + onlineCPUs = float64(v.CPUStats.OnlineCPUs) + ) + + if onlineCPUs == 0.0 { + onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage)) + } + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 + } + return cpuPercent +} + +func calculateCPUPercentWindows(v *types.StatsJSON) float64 { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + return float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + return 0.00 +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/vendor/github.com/moby/moby/cli/command/container/stats_unit_test.go b/vendor/github.com/moby/moby/cli/command/container/stats_unit_test.go new file mode 100644 index 0000000..828d634 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/stats_unit_test.go @@ -0,0 +1,20 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCalculateBlockIO(t *testing.T) { + blkio := types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, + } + blkRead, blkWrite := calculateBlockIO(blkio) + if blkRead != 5801 { + t.Fatalf("blkRead = %d, want 5801", blkRead) + } + if blkWrite != 579 { + t.Fatalf("blkWrite = %d, want 579", blkWrite) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/container/stop.go b/vendor/github.com/moby/moby/cli/command/container/stop.go new file mode 100644 index 0000000..c68ede5 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/stop.go @@ -0,0 +1,67 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type stopOptions struct { + time int + timeChanged bool + + containers []string +} + +// NewStopCommand creates a new cobra.Command for `docker stop` +func NewStopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts stopOptions + + cmd := &cobra.Command{ + Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Stop one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.timeChanged = cmd.Flags().Changed("time") + return runStop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") + return cmd +} + +func runStop(dockerCli *command.DockerCli, opts *stopOptions) error { + ctx := context.Background() + + var timeout *time.Duration + if opts.timeChanged { + timeoutValue := time.Duration(opts.time) * time.Second + timeout = &timeoutValue + } + + var errs []string + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { + return dockerCli.Client().ContainerStop(ctx, id, timeout) + }) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/top.go b/vendor/github.com/moby/moby/cli/command/container/top.go new file mode 100644 index 0000000..160153b --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/top.go @@ -0,0 +1,58 @@ +package container + +import ( + "fmt" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type topOptions struct { + container string + + args []string +} + +// NewTopCommand creates a new cobra.Command for `docker top` +func NewTopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts topOptions + + cmd := &cobra.Command{ + Use: "top CONTAINER [ps OPTIONS]", + Short: "Display the running processes of a container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.args = args[1:] + return runTop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTop(dockerCli *command.DockerCli, opts *topOptions) error { + ctx := context.Background() + + procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/tty.go b/vendor/github.com/moby/moby/cli/command/container/tty.go new file mode 100644 index 0000000..6af8e2b --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/tty.go @@ -0,0 +1,103 @@ +package container + +import ( + "fmt" + "os" + gosignal "os/signal" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" +) + +// resizeTtyTo resizes tty to specific height and width +func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) { + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + Height: height, + Width: width, + } + + var err error + if isExec { + err = client.ContainerExecResize(ctx, id, options) + } else { + err = client.ContainerResize(ctx, id, options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// MonitorTtySize updates the container tty size when the terminal tty changes size +func MonitorTtySize(ctx context.Context, cli *command.DockerCli, id string, isExec bool) error { + resizeTty := func() { + height, width := cli.Out().GetTtySize() + resizeTtyTo(ctx, cli.Client(), id, height, width, isExec) + } + + resizeTty() + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.Out().GetTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.Out().GetTtySize() + + if prevW != w || prevH != h { + resizeTty() + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + resizeTty() + } + }() + } + return nil +} + +// ForwardAllSignals forwards signals to the container +func ForwardAllSignals(ctx context.Context, cli *command.DockerCli, cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} diff --git a/vendor/github.com/moby/moby/cli/command/container/unpause.go b/vendor/github.com/moby/moby/cli/command/container/unpause.go new file mode 100644 index 0000000..c4d8d48 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/unpause.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type unpauseOptions struct { + containers []string +} + +// NewUnpauseCommand creates a new cobra.Command for `docker unpause` +func NewUnpauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts unpauseOptions + + cmd := &cobra.Command{ + Use: "unpause CONTAINER [CONTAINER...]", + Short: "Unpause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runUnpause(dockerCli, &opts) + }, + } + return cmd +} + +func runUnpause(dockerCli *command.DockerCli, opts *unpauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerUnpause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/update.go b/vendor/github.com/moby/moby/cli/command/container/update.go new file mode 100644 index 0000000..7576585 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/update.go @@ -0,0 +1,163 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type updateOptions struct { + blkioWeight uint16 + cpuPeriod int64 + cpuQuota int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpusetCpus string + cpusetMems string + cpuShares int64 + memoryString string + memoryReservation string + memorySwap string + kernelMemory string + restartPolicy string + + nFlag int + + containers []string +} + +// NewUpdateCommand creates a new cobra.Command for `docker update` +func NewUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts updateOptions + + cmd := &cobra.Command{ + Use: "update [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Update configuration of one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nFlag = cmd.Flags().NFlag() + return runUpdate(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.Uint16Var(&opts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Int64Var(&opts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&opts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&opts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&opts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&opts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&opts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64VarP(&opts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.StringVarP(&opts.memoryString, "memory", "m", "", "Memory limit") + flags.StringVar(&opts.memoryReservation, "memory-reservation", "", "Memory soft limit") + flags.StringVar(&opts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.StringVar(&opts.kernelMemory, "kernel-memory", "", "Kernel memory limit") + flags.StringVar(&opts.restartPolicy, "restart", "", "Restart policy to apply when a container exits") + + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, opts *updateOptions) error { + var err error + + if opts.nFlag == 0 { + return fmt.Errorf("You must provide one or more flags when using this command.") + } + + var memory int64 + if opts.memoryString != "" { + memory, err = units.RAMInBytes(opts.memoryString) + if err != nil { + return err + } + } + + var memoryReservation int64 + if opts.memoryReservation != "" { + memoryReservation, err = units.RAMInBytes(opts.memoryReservation) + if err != nil { + return err + } + } + + var memorySwap int64 + if opts.memorySwap != "" { + if opts.memorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(opts.memorySwap) + if err != nil { + return err + } + } + } + + var kernelMemory int64 + if opts.kernelMemory != "" { + kernelMemory, err = units.RAMInBytes(opts.kernelMemory) + if err != nil { + return err + } + } + + var restartPolicy containertypes.RestartPolicy + if opts.restartPolicy != "" { + restartPolicy, err = runconfigopts.ParseRestartPolicy(opts.restartPolicy) + if err != nil { + return err + } + } + + resources := containertypes.Resources{ + BlkioWeight: opts.blkioWeight, + CpusetCpus: opts.cpusetCpus, + CpusetMems: opts.cpusetMems, + CPUShares: opts.cpuShares, + Memory: memory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + KernelMemory: kernelMemory, + CPUPeriod: opts.cpuPeriod, + CPUQuota: opts.cpuQuota, + CPURealtimePeriod: opts.cpuRealtimePeriod, + CPURealtimeRuntime: opts.cpuRealtimeRuntime, + } + + updateConfig := containertypes.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + ctx := context.Background() + + var ( + warns []string + errs []string + ) + for _, container := range opts.containers { + r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + warns = append(warns, r.Warnings...) + } + if len(warns) > 0 { + fmt.Fprintf(dockerCli.Out(), "%s", strings.Join(warns, "\n")) + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/container/utils.go b/vendor/github.com/moby/moby/cli/command/container/utils.go new file mode 100644 index 0000000..6bef924 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/utils.go @@ -0,0 +1,143 @@ +package container + +import ( + "strconv" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/cli/command" + clientapi "github.com/docker/docker/client" +) + +func waitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) chan int { + if len(containerID) == 0 { + // containerID can never be empty + panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") + } + + var removeErr error + statusChan := make(chan int) + exitCode := 125 + + // Get events via Events API + f := filters.NewArgs() + f.Add("type", "container") + f.Add("container", containerID) + options := types.EventsOptions{ + Filters: f, + } + eventCtx, cancel := context.WithCancel(ctx) + eventq, errq := dockerCli.Client().Events(eventCtx, options) + + eventProcessor := func(e events.Message) bool { + stopProcessing := false + switch e.Status { + case "die": + if v, ok := e.Actor.Attributes["exitCode"]; ok { + code, cerr := strconv.Atoi(v) + if cerr != nil { + logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) + } else { + exitCode = code + } + } + if !waitRemove { + stopProcessing = true + } else { + // If we are talking to an older daemon, `AutoRemove` is not supported. + // We need to fall back to the old behavior, which is client-side removal + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { + go func() { + removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) + if removeErr != nil { + logrus.Errorf("error removing container: %v", removeErr) + cancel() // cancel the event Q + } + }() + } + } + case "detach": + exitCode = 0 + stopProcessing = true + case "destroy": + stopProcessing = true + } + return stopProcessing + } + + go func() { + defer func() { + statusChan <- exitCode // must always send an exit code or the caller will block + cancel() + }() + + for { + select { + case <-eventCtx.Done(): + if removeErr != nil { + return + } + case evt := <-eventq: + if eventProcessor(evt) { + return + } + case err := <-errq: + logrus.Errorf("error getting events from daemon: %v", err) + return + } + } + }() + + return statusChan +} + +// getExitCode performs an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(ctx context.Context, dockerCli *command.DockerCli, containerID string) (bool, int, error) { + c, err := dockerCli.Client().ContainerInspect(ctx, containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !clientapi.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + return c.State.Running, c.State.ExitCode, nil +} + +func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { + if len(containers) == 0 { + return nil + } + const defaultParallel int = 50 + sem := make(chan struct{}, defaultParallel) + errChan := make(chan error) + + // make sure result is printed in correct order + output := map[string]chan error{} + for _, c := range containers { + output[c] = make(chan error, 1) + } + go func() { + for _, c := range containers { + err := <-output[c] + errChan <- err + } + }() + + go func() { + for _, c := range containers { + sem <- struct{}{} // Wait for active queue sem to drain. + go func(container string) { + output[container] <- op(ctx, container) + <-sem + }(c) + } + }() + return errChan +} diff --git a/vendor/github.com/moby/moby/cli/command/container/wait.go b/vendor/github.com/moby/moby/cli/command/container/wait.go new file mode 100644 index 0000000..19ccf7a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/container/wait.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type waitOptions struct { + containers []string +} + +// NewWaitCommand creates a new cobra.Command for `docker wait` +func NewWaitCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts waitOptions + + cmd := &cobra.Command{ + Use: "wait CONTAINER [CONTAINER...]", + Short: "Block until one or more containers stop, then print their exit codes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runWait(dockerCli, &opts) + }, + } + return cmd +} + +func runWait(dockerCli *command.DockerCli, opts *waitOptions) error { + ctx := context.Background() + + var errs []string + for _, container := range opts.containers { + status, err := dockerCli.Client().ContainerWait(ctx, container) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%d\n", status) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/events_utils.go b/vendor/github.com/moby/moby/cli/command/events_utils.go new file mode 100644 index 0000000..e710c97 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/events_utils.go @@ -0,0 +1,49 @@ +package command + +import ( + "sync" + + "github.com/Sirupsen/logrus" + eventtypes "github.com/docker/docker/api/types/events" +) + +type eventProcessor func(eventtypes.Message, error) error + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/container.go b/vendor/github.com/moby/moby/cli/command/formatter/container.go new file mode 100644 index 0000000..6273453 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/container.go @@ -0,0 +1,235 @@ +package formatter + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + units "github.com/docker/go-units" +) + +const ( + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + + containerIDHeader = "CONTAINER ID" + namesHeader = "NAMES" + commandHeader = "COMMAND" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + mountsHeader = "MOUNTS" + localVolumes = "LOCAL VOLUMES" + networksHeader = "NETWORKS" +) + +// NewContainerFormat returns a Format for rendering using a Context +func NewContainerFormat(source string, quiet bool, size bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + format := defaultContainerTableFormat + if size { + format += `\t{{.Size}}` + } + return Format(format) + case RawFormatKey: + if quiet { + return `container_id: {{.ID}}` + } + format := `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{- pad .Status 1 0}} +names: {{.Names}} +labels: {{- pad .Labels 1 0}} +ports: {{- pad .Ports 1 0}} +` + if size { + format += `size: {{.Size}}\n` + } + return Format(format) + } + return Format(source) +} + +// ContainerWrite renders the context for a list of containers +func ContainerWrite(ctx Context, containers []types.Container) error { + render := func(format func(subContext subContext) error) error { + for _, container := range containers { + err := format(&containerContext{trunc: ctx.Trunc, c: container}) + if err != nil { + return err + } + } + return nil + } + return ctx.Write(&containerContext{}, render) +} + +type containerContext struct { + HeaderContext + trunc bool + c types.Container +} + +func (c *containerContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *containerContext) ID() string { + c.AddHeader(containerIDHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.AddHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.AddHeader(imageHeader) + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.AddHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Ellipsis(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.AddHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.AddHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.AddHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.AddHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.AddHeader(sizeHeader) + srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) + sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.AddHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + c.AddHeader(mountsHeader) + + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = stringutils.Ellipsis(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +func (c *containerContext) LocalVolumes() string { + c.AddHeader(localVolumes) + + count := 0 + for _, m := range c.c.Mounts { + if m.Driver == "local" { + count++ + } + } + + return fmt.Sprintf("%d", count) +} + +func (c *containerContext) Networks() string { + c.AddHeader(networksHeader) + + if c.c.NetworkSettings == nil { + return "" + } + + networks := []string{} + for k := range c.c.NetworkSettings.Networks { + networks = append(networks, k) + } + + return strings.Join(networks, ",") +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/container_test.go b/vendor/github.com/moby/moby/cli/command/formatter/container_test.go new file mode 100644 index 0000000..1613789 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/container_test.go @@ -0,0 +1,398 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestContainerPsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + unix := time.Now().Add(-65 * time.Second).Unix() + + var ctx containerContext + cases := []struct { + container types.Container + trunc bool + expValue string + expHeader string + call func() string + }{ + {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID}, + {types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID}, + {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, + {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + true, + "a5a665ff33ec", + imageHeader, + ctx.Image, + }, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + false, + "a5a665ff33eced1e0803148700880edab4", + imageHeader, + ctx.Image, + }, + {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, + {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, + {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, + {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, + {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, + {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, + {types.Container{}, true, "", labelsHeader, ctx.Labels}, + {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, + {types.Container{Created: unix}, true, "About a minute", runningForHeader, ctx.RunningFor}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set", + Driver: "local", + Source: "/a/path", + }, + }, + }, true, "this-is-a-lo...", mountsHeader, ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "/a/path", mountsHeader, ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", mountsHeader, ctx.Mounts}, + } + + for _, c := range cases { + ctx = containerContext{c: c.container, trunc: c.trunc} + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } + + c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c1, trunc: true} + + sid := ctx.Label("com.docker.swarm.swarm-id") + node := ctx.Label("com.docker.swarm.node_name") + if sid != "33" { + t.Fatalf("Expected 33, was %s\n", sid) + } + + if node != "ubuntu" { + t.Fatalf("Expected ubuntu, was %s\n", node) + } + + h := ctx.FullHeader() + if h != "SWARM ID\tNODE NAME" { + t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) + + } + + c2 := types.Container{} + ctx = containerContext{c: c2, trunc: true} + + label := ctx.Label("anything.really") + if label != "" { + t.Fatalf("Expected an empty string, was %s", label) + } + + ctx = containerContext{c: c2, trunc: true} + FullHeader := ctx.FullHeader() + if FullHeader != "" { + t.Fatalf("Expected FullHeader to be empty, was %s", FullHeader) + } + +} + +func TestContainerContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{Format: NewContainerFormat("table", false, true)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE +containerID1 ubuntu "" 24 hours ago foobar_baz 0 B +containerID2 ubuntu "" 24 hours ago foobar_bar 0 B +`, + }, + { + Context{Format: NewContainerFormat("table", false, false)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +containerID1 ubuntu "" 24 hours ago foobar_baz +containerID2 ubuntu "" 24 hours ago foobar_bar +`, + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, true)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", true, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table", true, false)}, + "containerID1\ncontainerID2\n", + }, + // Raw Format + { + Context{Format: NewContainerFormat("raw", false, false)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", false, true)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: +size: 0 B + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: +size: 0 B + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", true, false)}, + "container_id: containerID1\ncontainer_id: containerID2\n", + }, + // Custom Format + { + Context{Format: "{{.Image}}"}, + "ubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("{{.Image}}", false, true)}, + "ubuntu\nubuntu\n", + }, + } + + for _, testcase := range cases { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ContainerWrite(testcase.context, containers) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestContainerContextWriteWithNoContainers(t *testing.T) { + out := bytes.NewBufferString("") + containers := []types.Container{} + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Image}}", + Output: out, + }, + "", + }, + { + Context{ + Format: "table {{.Image}}", + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: NewContainerFormat("{{.Image}}", false, true), + Output: out, + }, + "", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}", false, true), + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: "table {{.Image}}\t{{.Size}}", + Output: out, + }, + "IMAGE SIZE\n", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true), + Output: out, + }, + "IMAGE SIZE\n", + }, + } + + for _, context := range contexts { + ContainerWrite(context.context, containers) + assert.Equal(t, context.expected, out.String()) + // Clean buffer + out.Reset() + } +} + +func TestContainerContextWriteJSON(t *testing.T) { + unix := time.Now().Add(-65 * time.Second).Unix() + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix}, + } + expectedCreated := time.Unix(unix, 0).String() + expectedJSONs := []map[string]interface{}{ + {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID1", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_baz", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, + {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID2", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_bar", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestContainerContextWriteJSONField(t *testing.T) { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, containers[i].ID) + } +} + +func TestContainerBackCompat(t *testing.T) { + containers := []types.Container{{ID: "brewhaha"}} + cases := []string{ + "ID", + "Names", + "Image", + "Command", + "CreatedAt", + "RunningFor", + "Ports", + "Status", + "Size", + "Labels", + "Mounts", + } + buf := bytes.NewBuffer(nil) + for _, c := range cases { + ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf} + if err := ContainerWrite(ctx, containers); err != nil { + t.Logf("could not render template for field '%s': %v", c, err) + t.Fail() + } + buf.Reset() + } +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/custom.go b/vendor/github.com/moby/moby/cli/command/formatter/custom.go new file mode 100644 index 0000000..df32684 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/custom.go @@ -0,0 +1,51 @@ +package formatter + +import ( + "strings" +) + +const ( + imageHeader = "IMAGE" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + nameHeader = "NAME" + driverHeader = "DRIVER" + scopeHeader = "SCOPE" +) + +type subContext interface { + FullHeader() string + AddHeader(header string) +} + +// HeaderContext provides the subContext interface for managing headers +type HeaderContext struct { + header []string +} + +// FullHeader returns the header as a string +func (c *HeaderContext) FullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +// AddHeader adds another column to the header +func (c *HeaderContext) AddHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func stripNamePrefix(ss []string) []string { + sss := make([]string, len(ss)) + for i, s := range ss { + sss[i] = s[1:] + } + + return sss +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/custom_test.go b/vendor/github.com/moby/moby/cli/command/formatter/custom_test.go new file mode 100644 index 0000000..da42039 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/custom_test.go @@ -0,0 +1,28 @@ +package formatter + +import ( + "reflect" + "strings" + "testing" +) + +func compareMultipleValues(t *testing.T, value, expected string) { + // comma-separated values means probably a map input, which won't + // be guaranteed to have the same order as our expected value + // We'll create maps and use reflect.DeepEquals to check instead: + entriesMap := make(map[string]string) + expMap := make(map[string]string) + entries := strings.Split(value, ",") + expectedEntries := strings.Split(expected, ",") + for _, entry := range entries { + keyval := strings.Split(entry, "=") + entriesMap[keyval[0]] = keyval[1] + } + for _, expected := range expectedEntries { + keyval := strings.Split(expected, "=") + expMap[keyval[0]] = keyval[1] + } + if !reflect.DeepEqual(expMap, entriesMap) { + t.Fatalf("Expected entries: %v, got: %v", expected, value) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/disk_usage.go b/vendor/github.com/moby/moby/cli/command/formatter/disk_usage.go new file mode 100644 index 0000000..5309d88 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/disk_usage.go @@ -0,0 +1,334 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" + defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" + defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" + defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + + typeHeader = "TYPE" + totalHeader = "TOTAL" + activeHeader = "ACTIVE" + reclaimableHeader = "RECLAIMABLE" + containersHeader = "CONTAINERS" + sharedSizeHeader = "SHARED SIZE" + uniqueSizeHeader = "UNIQUE SiZE" +) + +// DiskUsageContext contains disk usage specific information required by the formater, encapsulate a Context struct. +type DiskUsageContext struct { + Context + Verbose bool + LayersSize int64 + Images []*types.ImageSummary + Containers []*types.Container + Volumes []*types.Volume +} + +func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { + ctx.buffer = bytes.NewBufferString("") + ctx.header = "" + ctx.Format = Format(format) + ctx.preFormat() + + return ctx.parseFormat() +} + +func (ctx *DiskUsageContext) Write() { + if ctx.Verbose == false { + ctx.buffer = bytes.NewBufferString("") + ctx.Format = defaultDiskUsageTableFormat + ctx.preFormat() + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ + totalSize: ctx.LayersSize, + images: ctx.Images, + }) + if err != nil { + return + } + err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ + containers: ctx.Containers, + }) + if err != nil { + return + } + + err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ + volumes: ctx.Volumes, + }) + if err != nil { + return + } + + ctx.postFormat(tmpl, &diskUsageContainersContext{containers: []*types.Container{}}) + + return + } + + // First images + tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) + if err != nil { + return + } + + ctx.Output.Write([]byte("Images space usage:\n\n")) + for _, i := range ctx.Images { + repo := "" + tag := "" + if len(i.RepoTags) > 0 && !isDangling(*i) { + // Only show the first tag + ref, err := reference.ParseNamed(i.RepoTags[0]) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repo = ref.Name() + tag = nt.Tag() + } + } + + err = ctx.contextFormat(tmpl, &imageContext{ + repo: repo, + tag: tag, + trunc: true, + i: *i, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &imageContext{}) + + // Now containers + ctx.Output.Write([]byte("\nContainers space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) + if err != nil { + return + } + for _, c := range ctx.Containers { + // Don't display the virtual size + c.SizeRootFs = 0 + err = ctx.contextFormat(tmpl, &containerContext{ + trunc: true, + c: *c, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &containerContext{}) + + // And volumes + ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) + if err != nil { + return + } + for _, v := range ctx.Volumes { + err = ctx.contextFormat(tmpl, &volumeContext{ + v: *v, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &volumeContext{v: types.Volume{}}) +} + +type diskUsageImagesContext struct { + HeaderContext + totalSize int64 + images []*types.ImageSummary +} + +func (c *diskUsageImagesContext) Type() string { + c.AddHeader(typeHeader) + return "Images" +} + +func (c *diskUsageImagesContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.images)) +} + +func (c *diskUsageImagesContext) Active() string { + c.AddHeader(activeHeader) + used := 0 + for _, i := range c.images { + if i.Containers > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageImagesContext) Size() string { + c.AddHeader(sizeHeader) + return units.HumanSize(float64(c.totalSize)) + +} + +func (c *diskUsageImagesContext) Reclaimable() string { + var used int64 + + c.AddHeader(reclaimableHeader) + for _, i := range c.images { + if i.Containers != 0 { + if i.VirtualSize == -1 || i.SharedSize == -1 { + continue + } + used += i.VirtualSize - i.SharedSize + } + } + + reclaimable := c.totalSize - used + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) + } + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageContainersContext struct { + HeaderContext + verbose bool + containers []*types.Container +} + +func (c *diskUsageContainersContext) Type() string { + c.AddHeader(typeHeader) + return "Containers" +} + +func (c *diskUsageContainersContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.containers)) +} + +func (c *diskUsageContainersContext) isActive(container types.Container) bool { + return strings.Contains(container.State, "running") || + strings.Contains(container.State, "paused") || + strings.Contains(container.State, "restarting") +} + +func (c *diskUsageContainersContext) Active() string { + c.AddHeader(activeHeader) + used := 0 + for _, container := range c.containers { + if c.isActive(*container) { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageContainersContext) Size() string { + var size int64 + + c.AddHeader(sizeHeader) + for _, container := range c.containers { + size += container.SizeRw + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageContainersContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + c.AddHeader(reclaimableHeader) + for _, container := range c.containers { + if !c.isActive(*container) { + reclaimable += container.SizeRw + } + totalSize += container.SizeRw + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageVolumesContext struct { + HeaderContext + verbose bool + volumes []*types.Volume +} + +func (c *diskUsageVolumesContext) Type() string { + c.AddHeader(typeHeader) + return "Local Volumes" +} + +func (c *diskUsageVolumesContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.volumes)) +} + +func (c *diskUsageVolumesContext) Active() string { + c.AddHeader(activeHeader) + + used := 0 + for _, v := range c.volumes { + if v.UsageData.RefCount > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageVolumesContext) Size() string { + var size int64 + + c.AddHeader(sizeHeader) + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + size += v.UsageData.Size + } + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageVolumesContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + c.AddHeader(reclaimableHeader) + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + if v.UsageData.RefCount == 0 { + reclaimable += v.UsageData.Size + } + totalSize += v.UsageData.Size + } + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/formatter.go b/vendor/github.com/moby/moby/cli/command/formatter/formatter.go new file mode 100644 index 0000000..e859a1c --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/formatter.go @@ -0,0 +1,123 @@ +package formatter + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/utils/templates" +) + +// Format keys used to specify certain kinds of output formats +const ( + TableFormatKey = "table" + RawFormatKey = "raw" + PrettyFormatKey = "pretty" + + defaultQuietFormat = "{{.ID}}" +) + +// Format is the format string rendered using the Context +type Format string + +// IsTable returns true if the format is a table-type format +func (f Format) IsTable() bool { + return strings.HasPrefix(string(f), TableFormatKey) +} + +// Contains returns true if the format contains the substring +func (f Format) Contains(sub string) bool { + return strings.Contains(string(f), sub) +} + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format Format + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + finalFormat string + header string + buffer *bytes.Buffer +} + +func (c *Context) preFormat() { + c.finalFormat = string(c.Format) + + // TODO: handle this in the Format type + if c.Format.IsTable() { + c.finalFormat = c.finalFormat[len(TableFormatKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + return tmpl, fmt.Errorf("Template parsing error: %v\n", err) + } + return tmpl, err +} + +func (c *Context) postFormat(tmpl *template.Template, subContext subContext) { + if c.Format.IsTable() { + if len(c.header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + tmpl.Execute(bytes.NewBufferString(""), subContext) + c.header = subContext.FullHeader() + } + + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(c.header)) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + return fmt.Errorf("Template parsing error: %v\n", err) + } + if c.Format.IsTable() && len(c.header) == 0 { + c.header = subContext.FullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// SubFormat is a function type accepted by Write() +type SubFormat func(func(subContext) error) error + +// Write the template to the buffer using this Context +func (c *Context) Write(sub subContext, f SubFormat) error { + c.buffer = bytes.NewBufferString("") + c.preFormat() + + tmpl, err := c.parseFormat() + if err != nil { + return err + } + + subFormat := func(subContext subContext) error { + return c.contextFormat(tmpl, subContext) + } + if err := f(subFormat); err != nil { + return err + } + + c.postFormat(tmpl, sub) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/image.go b/vendor/github.com/moby/moby/cli/command/formatter/image.go new file mode 100644 index 0000000..5c7de82 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/image.go @@ -0,0 +1,259 @@ +package formatter + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + units "github.com/docker/go-units" +) + +const ( + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" +) + +// ImageContext contains image specific information required by the formater, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool +} + +func isDangling(image types.ImageSummary) bool { + return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" +} + +// NewImageFormat returns a format for rendering an ImageContext +func NewImageFormat(source string, quiet bool, digest bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return defaultQuietFormat + case digest: + return defaultImageTableFormatWithDigest + default: + return defaultImageTableFormat + } + case RawFormatKey: + switch { + case quiet: + return `image_id: {{.ID}}` + case digest: + return `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + default: + return `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + + format := Format(source) + if format.IsTable() && digest && !format.Contains("{{.Digest}}") { + format += "\t{{.Digest}}" + } + return format +} + +// ImageWrite writes the formatter images using the ImageContext +func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { + render := func(format func(subContext subContext) error) error { + return imageFormat(ctx, images, format) + } + return ctx.Write(&imageContext{}, render) +} + +func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error { + for _, image := range images { + images := []*imageContext{} + if isDangling(image) { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: "", + tag: "", + digest: "", + }) + } else { + repoTags := map[string][]string{} + repoDigests := map[string][]string{} + + for _, refString := range append(image.RepoTags) { + ref, err := reference.ParseNamed(refString) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repoTags[ref.Name()] = append(repoTags[ref.Name()], nt.Tag()) + } + } + for _, refString := range append(image.RepoDigests) { + ref, err := reference.ParseNamed(refString) + if err != nil { + continue + } + if c, ok := ref.(reference.Canonical); ok { + repoDigests[ref.Name()] = append(repoDigests[ref.Name()], c.Digest().String()) + } + } + + for repo, tags := range repoTags { + digests := repoDigests[repo] + + // Do not display digests as their own row + delete(repoDigests, repo) + + if !ctx.Digest { + // Ignore digest references, just show tag once + digests = nil + } + + for _, tag := range tags { + if len(digests) == 0 { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: "", + }) + continue + } + // Display the digests for each tag + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: dgst, + }) + } + + } + } + + // Show rows for remaining digest only references + for repo, digests := range repoDigests { + // If digests are displayed, show row per digest + if ctx.Digest { + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + digest: dgst, + }) + } + } else { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + }) + } + } + } + for _, imageCtx := range images { + if err := format(imageCtx); err != nil { + return err + } + } + } + return nil +} + +type imageContext struct { + HeaderContext + trunc bool + i types.ImageSummary + repo string + tag string + digest string +} + +func (c *imageContext) ID() string { + c.AddHeader(imageIDHeader) + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + c.AddHeader(repositoryHeader) + return c.repo +} + +func (c *imageContext) Tag() string { + c.AddHeader(tagHeader) + return c.tag +} + +func (c *imageContext) Digest() string { + c.AddHeader(digestHeader) + return c.digest +} + +func (c *imageContext) CreatedSince() string { + c.AddHeader(createdSinceHeader) + createdAt := time.Unix(int64(c.i.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *imageContext) CreatedAt() string { + c.AddHeader(createdAtHeader) + return time.Unix(int64(c.i.Created), 0).String() +} + +func (c *imageContext) Size() string { + c.AddHeader(sizeHeader) + return units.HumanSizeWithPrecision(float64(c.i.Size), 3) +} + +func (c *imageContext) Containers() string { + c.AddHeader(containersHeader) + if c.i.Containers == -1 { + return "N/A" + } + return fmt.Sprintf("%d", c.i.Containers) +} + +func (c *imageContext) VirtualSize() string { + c.AddHeader(sizeHeader) + return units.HumanSize(float64(c.i.VirtualSize)) +} + +func (c *imageContext) SharedSize() string { + c.AddHeader(sharedSizeHeader) + if c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.SharedSize)) +} + +func (c *imageContext) UniqueSize() string { + c.AddHeader(uniqueSizeHeader) + if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/image_test.go b/vendor/github.com/moby/moby/cli/command/formatter/image_test.go new file mode 100644 index 0000000..ffe77f6 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/image_test.go @@ -0,0 +1,333 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestImageContext(t *testing.T) { + imageID := stringid.GenerateRandomID() + unix := time.Now().Unix() + + var ctx imageContext + cases := []struct { + imageCtx imageContext + expValue string + expHeader string + call func() string + }{ + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: true, + }, stringid.TruncateID(imageID), imageIDHeader, ctx.ID}, + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: false, + }, imageID, imageIDHeader, ctx.ID}, + {imageContext{ + i: types.ImageSummary{Size: 10, VirtualSize: 10}, + trunc: true, + }, "10 B", sizeHeader, ctx.Size}, + {imageContext{ + i: types.ImageSummary{Created: unix}, + trunc: true, + }, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + // FIXME + // {imageContext{ + // i: types.ImageSummary{Created: unix}, + // trunc: true, + // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, + {imageContext{ + i: types.ImageSummary{}, + repo: "busybox", + }, "busybox", repositoryHeader, ctx.Repository}, + {imageContext{ + i: types.ImageSummary{}, + tag: "latest", + }, "latest", tagHeader, ctx.Tag}, + {imageContext{ + i: types.ImageSummary{}, + digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", + }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest}, + } + + for _, c := range cases { + ctx = c.imageCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestImageContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context ImageContext + expected string + }{ + // Errors + { + ImageContext{ + Context: Context{ + Format: "{{InvalidFunction}}", + }, + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + ImageContext{ + Context: Context{ + Format: "{{nil}}", + }, + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, false), + }, + }, + `REPOSITORY TAG IMAGE ID CREATED SIZE +image tag1 imageID1 24 hours ago 0 B +image tag2 imageID2 24 hours ago 0 B + imageID3 24 hours ago 0 B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + }, + Digest: true, + }, + `REPOSITORY DIGEST +image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image + +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", true, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, false), + }, + }, + "imageID1\nimageID2\nimageID3\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, true), + }, + Digest: true, + }, + `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0 B +image tag2 imageID2 24 hours ago 0 B + imageID3 24 hours ago 0 B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, true), + }, + Digest: true, + }, + "imageID1\nimageID2\nimageID3\n", + }, + // Raw Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, false), + }, + }, + fmt.Sprintf(`repository: image +tag: tag1 +image_id: imageID1 +created_at: %s +virtual_size: 0 B + +repository: image +tag: tag2 +image_id: imageID2 +created_at: %s +virtual_size: 0 B + +repository: +tag: +image_id: imageID3 +created_at: %s +virtual_size: 0 B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, true), + }, + Digest: true, + }, + fmt.Sprintf(`repository: image +tag: tag1 +digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image_id: imageID1 +created_at: %s +virtual_size: 0 B + +repository: image +tag: tag2 +digest: +image_id: imageID2 +created_at: %s +virtual_size: 0 B + +repository: +tag: +digest: +image_id: imageID3 +created_at: %s +virtual_size: 0 B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", true, false), + }, + }, + `image_id: imageID1 +image_id: imageID2 +image_id: imageID3 +`, + }, + // Custom Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + }, + }, + "image\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + }, + Digest: true, + }, + "image\nimage\n\n", + }, + } + + for _, testcase := range cases { + images := []types.ImageSummary{ + {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, + {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, + {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ImageWrite(testcase.context, images) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestImageContextWriteWithNoImage(t *testing.T) { + out := bytes.NewBufferString("") + images := []types.ImageSummary{} + + contexts := []struct { + context ImageContext + expected string + }{ + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + Output: out, + }, + }, + "REPOSITORY\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + Output: out, + }, + }, + "REPOSITORY DIGEST\n", + }, + } + + for _, context := range contexts { + ImageWrite(context.context, images) + assert.Equal(t, out.String(), context.expected) + // Clean buffer + out.Reset() + } +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/network.go b/vendor/github.com/moby/moby/cli/command/formatter/network.go new file mode 100644 index 0000000..7fbad7d --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/network.go @@ -0,0 +1,117 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" + + networkIDHeader = "NETWORK ID" + ipv6Header = "IPV6" + internalHeader = "INTERNAL" +) + +// NewNetworkFormat returns a Format for rendering using a network Context +func NewNetworkFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultNetworkTableFormat + case RawFormatKey: + if quiet { + return `network_id: {{.ID}}` + } + return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` + } + return Format(source) +} + +// NetworkWrite writes the context +func NetworkWrite(ctx Context, networks []types.NetworkResource) error { + render := func(format func(subContext subContext) error) error { + for _, network := range networks { + networkCtx := &networkContext{trunc: ctx.Trunc, n: network} + if err := format(networkCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(&networkContext{}, render) +} + +type networkContext struct { + HeaderContext + trunc bool + n types.NetworkResource +} + +func (c *networkContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *networkContext) ID() string { + c.AddHeader(networkIDHeader) + if c.trunc { + return stringid.TruncateID(c.n.ID) + } + return c.n.ID +} + +func (c *networkContext) Name() string { + c.AddHeader(nameHeader) + return c.n.Name +} + +func (c *networkContext) Driver() string { + c.AddHeader(driverHeader) + return c.n.Driver +} + +func (c *networkContext) Scope() string { + c.AddHeader(scopeHeader) + return c.n.Scope +} + +func (c *networkContext) IPv6() string { + c.AddHeader(ipv6Header) + return fmt.Sprintf("%v", c.n.EnableIPv6) +} + +func (c *networkContext) Internal() string { + c.AddHeader(internalHeader) + return fmt.Sprintf("%v", c.n.Internal) +} + +func (c *networkContext) Labels() string { + c.AddHeader(labelsHeader) + if c.n.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.n.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *networkContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.n.Labels == nil { + return "" + } + return c.n.Labels[name] +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/network_test.go b/vendor/github.com/moby/moby/cli/command/formatter/network_test.go new file mode 100644 index 0000000..b40a534 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/network_test.go @@ -0,0 +1,208 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestNetworkContext(t *testing.T) { + networkID := stringid.GenerateRandomID() + + var ctx networkContext + cases := []struct { + networkCtx networkContext + expValue string + expHeader string + call func() string + }{ + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: false, + }, networkID, networkIDHeader, ctx.ID}, + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: true, + }, stringid.TruncateID(networkID), networkIDHeader, ctx.ID}, + {networkContext{ + n: types.NetworkResource{Name: "network_name"}, + }, "network_name", nameHeader, ctx.Name}, + {networkContext{ + n: types.NetworkResource{Driver: "driver_name"}, + }, "driver_name", driverHeader, ctx.Driver}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: true}, + }, "true", ipv6Header, ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: false}, + }, "false", ipv6Header, ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{Internal: true}, + }, "true", internalHeader, ctx.Internal}, + {networkContext{ + n: types.NetworkResource{Internal: false}, + }, "false", internalHeader, ctx.Internal}, + {networkContext{ + n: types.NetworkResource{}, + }, "", labelsHeader, ctx.Labels}, + {networkContext{ + n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = c.networkCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestNetworkContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewNetworkFormat("table", false)}, + `NETWORK ID NAME DRIVER SCOPE +networkID1 foobar_baz foo local +networkID2 foobar_bar bar local +`, + }, + { + Context{Format: NewNetworkFormat("table", true)}, + `networkID1 +networkID2 +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", false)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewNetworkFormat("raw", false)}, + `network_id: networkID1 +name: foobar_baz +driver: foo +scope: local + +network_id: networkID2 +name: foobar_bar +driver: bar +scope: local + +`, + }, + { + Context{Format: NewNetworkFormat("raw", true)}, + `network_id: networkID1 +network_id: networkID2 +`, + }, + // Custom Format + { + Context{Format: NewNetworkFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local"}, + {ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := NetworkWrite(testcase.context, networks) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestNetworkContextWriteJSON(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": ""}, + {"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": ""}, + } + + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestNetworkContextWriteJSONField(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .ID}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, networks[i].ID) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/reflect.go b/vendor/github.com/moby/moby/cli/command/formatter/reflect.go new file mode 100644 index 0000000..d1d8737 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/reflect.go @@ -0,0 +1,65 @@ +package formatter + +import ( + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +func marshalJSON(x interface{}) ([]byte, error) { + m, err := marshalMap(x) + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// marshalMap marshals x to map[string]interface{} +func marshalMap(x interface{}) (map[string]interface{}, error) { + val := reflect.ValueOf(x) + if val.Kind() != reflect.Ptr { + return nil, fmt.Errorf("expected a pointer to a struct, got %v", val.Kind()) + } + if val.IsNil() { + return nil, fmt.Errorf("expxected a pointer to a struct, got nil pointer") + } + valElem := val.Elem() + if valElem.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) + } + typ := val.Type() + m := make(map[string]interface{}) + for i := 0; i < val.NumMethod(); i++ { + k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) + if err != nil { + return nil, err + } + if k != "" { + m[k] = v + } + } + return m, nil +} + +var unmarshallableNames = map[string]struct{}{"FullHeader": {}} + +// marshalForMethod returns the map key and the map value for marshalling the method. +// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") +func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { + if val.Kind() != reflect.Func { + return "", nil, fmt.Errorf("expected func, got %v", val.Kind()) + } + name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() + _, blackListed := unmarshallableNames[name] + // FIXME: In text/template, (numOut == 2) is marshallable, + // if the type of the second param is error. + marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && + numIn == 0 && numOut == 1 + if !marshallable { + return "", nil, nil + } + result := val.Call(make([]reflect.Value, numIn)) + intf := result[0].Interface() + return name, intf, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/reflect_test.go b/vendor/github.com/moby/moby/cli/command/formatter/reflect_test.go new file mode 100644 index 0000000..e547b18 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/reflect_test.go @@ -0,0 +1,66 @@ +package formatter + +import ( + "reflect" + "testing" +) + +type dummy struct { +} + +func (d *dummy) Func1() string { + return "Func1" +} + +func (d *dummy) func2() string { + return "func2(should not be marshalled)" +} + +func (d *dummy) Func3() (string, int) { + return "Func3(should not be marshalled)", -42 +} + +func (d *dummy) Func4() int { + return 4 +} + +type dummyType string + +func (d *dummy) Func5() dummyType { + return dummyType("Func5") +} + +func (d *dummy) FullHeader() string { + return "FullHeader(should not be marshalled)" +} + +var dummyExpected = map[string]interface{}{ + "Func1": "Func1", + "Func4": 4, + "Func5": dummyType("Func5"), +} + +func TestMarshalMap(t *testing.T) { + d := dummy{} + m, err := marshalMap(&d) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(dummyExpected, m) { + t.Fatalf("expected %+v, got %+v", + dummyExpected, m) + } +} + +func TestMarshalMapBad(t *testing.T) { + if _, err := marshalMap(nil); err == nil { + t.Fatal("expected an error (argument is nil)") + } + if _, err := marshalMap(dummy{}); err == nil { + t.Fatal("expected an error (argument is non-pointer)") + } + x := 42 + if _, err := marshalMap(&x); err == nil { + t.Fatal("expected an error (argument is a pointer to non-struct)") + } +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/service.go b/vendor/github.com/moby/moby/cli/command/formatter/service.go new file mode 100644 index 0000000..aaa7838 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/service.go @@ -0,0 +1,322 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command/inspect" + units "github.com/docker/go-units" +) + +const serviceInspectPrettyTemplate Format = ` +ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Service Mode: +{{- if .IsModeGlobal }} Global +{{- else if .IsModeReplicated }} Replicated +{{- if .ModeReplicatedReplicas }} + Replicas: {{ .ModeReplicatedReplicas }} +{{- end }}{{ end }} +{{- if .HasUpdateStatus }} +UpdateStatus: + State: {{ .UpdateStatusState }} + Started: {{ .UpdateStatusStarted }} +{{- if .UpdateIsCompleted }} + Completed: {{ .UpdateStatusCompleted }} +{{- end }} + Message: {{ .UpdateStatusMessage }} +{{- end }} +Placement: +{{- if .TaskPlacementConstraints -}} + Contraints: {{ .TaskPlacementConstraints }} +{{- end }} +{{- if .HasUpdateConfig }} +UpdateConfig: + Parallelism: {{ .UpdateParallelism }} +{{- if .HasUpdateDelay}} + Delay: {{ .UpdateDelay }} +{{- end }} + On failure: {{ .UpdateOnFailure }} +{{- if .HasUpdateMonitor}} + Monitoring Period: {{ .UpdateMonitor }} +{{- end }} + Max failure ratio: {{ .UpdateMaxFailureRatio }} +{{- end }} +ContainerSpec: + Image: {{ .ContainerImage }} +{{- if .ContainerArgs }} + Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} +{{- end -}} +{{- if .ContainerEnv }} + Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} +{{- end -}} +{{- if .ContainerWorkDir }} + Dir: {{ .ContainerWorkDir }} +{{- end -}} +{{- if .ContainerUser }} + User: {{ .ContainerUser }} +{{- end }} +{{- if .ContainerMounts }} +Mounts: +{{- end }} +{{- range $mount := .ContainerMounts }} + Target = {{ $mount.Target }} + Source = {{ $mount.Source }} + ReadOnly = {{ $mount.ReadOnly }} + Type = {{ $mount.Type }} +{{- end -}} +{{- if .HasResources }} +Resources: +{{- if .HasResourceReservations }} + Reservations: +{{- if gt .ResourceReservationNanoCPUs 0.0 }} + CPU: {{ .ResourceReservationNanoCPUs }} +{{- end }} +{{- if .ResourceReservationMemory }} + Memory: {{ .ResourceReservationMemory }} +{{- end }}{{ end }} +{{- if .HasResourceLimits }} + Limits: +{{- if gt .ResourceLimitsNanoCPUs 0.0 }} + CPU: {{ .ResourceLimitsNanoCPUs }} +{{- end }} +{{- if .ResourceLimitMemory }} + Memory: {{ .ResourceLimitMemory }} +{{- end }}{{ end }}{{ end }} +{{- if .Networks }} +Networks: +{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} +Endpoint Mode: {{ .EndpointMode }} +{{- if .Ports }} +Ports: +{{- range $port := .Ports }} + PublishedPort {{ $port.PublishedPort }} + Protocol = {{ $port.Protocol }} + TargetPort = {{ $port.TargetPort }} +{{- end }} {{ end -}} +` + +// NewServiceFormat returns a Format for rendering using a Context +func NewServiceFormat(source string) Format { + switch source { + case PrettyFormatKey: + return serviceInspectPrettyTemplate + default: + return Format(strings.TrimPrefix(source, RawFormatKey)) + } +} + +// ServiceInspectWrite renders the context for a list of services +func ServiceInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != serviceInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + serviceI, _, err := getRef(ref) + if err != nil { + return err + } + service, ok := serviceI.(swarm.Service) + if !ok { + return fmt.Errorf("got wrong object to inspect") + } + if err := format(&serviceInspectContext{Service: service}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&serviceInspectContext{}, render) +} + +type serviceInspectContext struct { + swarm.Service + subContext +} + +func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { + return marshalJSON(ctx) +} + +func (ctx *serviceInspectContext) ID() string { + return ctx.Service.ID +} + +func (ctx *serviceInspectContext) Name() string { + return ctx.Service.Spec.Name +} + +func (ctx *serviceInspectContext) Labels() map[string]string { + return ctx.Service.Spec.Labels +} + +func (ctx *serviceInspectContext) IsModeGlobal() bool { + return ctx.Service.Spec.Mode.Global != nil +} + +func (ctx *serviceInspectContext) IsModeReplicated() bool { + return ctx.Service.Spec.Mode.Replicated != nil +} + +func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { + return ctx.Service.Spec.Mode.Replicated.Replicas +} + +func (ctx *serviceInspectContext) HasUpdateStatus() bool { + return ctx.Service.UpdateStatus.State != "" +} + +func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { + return ctx.Service.UpdateStatus.State +} + +func (ctx *serviceInspectContext) UpdateStatusStarted() string { + return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.StartedAt)) +} + +func (ctx *serviceInspectContext) UpdateIsCompleted() bool { + return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted +} + +func (ctx *serviceInspectContext) UpdateStatusCompleted() string { + return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.CompletedAt)) +} + +func (ctx *serviceInspectContext) UpdateStatusMessage() string { + return ctx.Service.UpdateStatus.Message +} + +func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { + if ctx.Service.Spec.TaskTemplate.Placement != nil { + return ctx.Service.Spec.TaskTemplate.Placement.Constraints + } + return nil +} + +func (ctx *serviceInspectContext) HasUpdateConfig() bool { + return ctx.Service.Spec.UpdateConfig != nil +} + +func (ctx *serviceInspectContext) UpdateParallelism() uint64 { + return ctx.Service.Spec.UpdateConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasUpdateDelay() bool { + return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateDelay() time.Duration { + return ctx.Service.Spec.UpdateConfig.Delay +} + +func (ctx *serviceInspectContext) UpdateOnFailure() string { + return ctx.Service.Spec.UpdateConfig.FailureAction +} + +func (ctx *serviceInspectContext) HasUpdateMonitor() bool { + return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { + return ctx.Service.Spec.UpdateConfig.Monitor +} + +func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { + return ctx.Service.Spec.UpdateConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) ContainerImage() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image +} + +func (ctx *serviceInspectContext) ContainerArgs() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args +} + +func (ctx *serviceInspectContext) ContainerEnv() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env +} + +func (ctx *serviceInspectContext) ContainerWorkDir() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir +} + +func (ctx *serviceInspectContext) ContainerUser() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.User +} + +func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts +} + +func (ctx *serviceInspectContext) HasResources() bool { + return ctx.Service.Spec.TaskTemplate.Resources != nil +} + +func (ctx *serviceInspectContext) HasResourceReservations() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { + return float64(0) + } + return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceReservationMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) +} + +func (ctx *serviceInspectContext) HasResourceLimits() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { + return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceLimitMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) +} + +func (ctx *serviceInspectContext) Networks() []string { + var out []string + for _, n := range ctx.Service.Spec.Networks { + out = append(out, n.Target) + } + return out +} + +func (ctx *serviceInspectContext) EndpointMode() string { + if ctx.Service.Spec.EndpointSpec == nil { + return "" + } + + return string(ctx.Service.Spec.EndpointSpec.Mode) +} + +func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { + return ctx.Service.Endpoint.Ports +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/stats.go b/vendor/github.com/moby/moby/cli/command/formatter/stats.go new file mode 100644 index 0000000..30c9e30 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/stats.go @@ -0,0 +1,213 @@ +package formatter + +import ( + "fmt" + "sync" + + units "github.com/docker/go-units" +) + +const ( + winOSType = "windows" + defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" + winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + + containerHeader = "CONTAINER" + cpuPercHeader = "CPU %" + netIOHeader = "NET I/O" + blockIOHeader = "BLOCK I/O" + memPercHeader = "MEM %" // Used only on Linux + winMemUseHeader = "PRIV WORKING SET" // Used only on Windows + memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux + pidsHeader = "PIDS" // Used only on Linux +) + +// StatsEntry represents represents the statistics data collected from a container +type StatsEntry struct { + Container string + Name string + ID string + CPUPercentage float64 + Memory float64 // On Windows this is the private working set + MemoryLimit float64 // Not used on Windows + MemoryPercentage float64 // Not used on Windows + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 // Not used on Windows + IsInvalid bool + OSType string +} + +// ContainerStats represents an entity to store containers statistics synchronously +type ContainerStats struct { + mutex sync.Mutex + StatsEntry + err error +} + +// GetError returns the container statistics error. +// This is used to determine whether the statistics are valid or not +func (cs *ContainerStats) GetError() error { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.err +} + +// SetErrorAndReset zeroes all the container statistics and store the error. +// It is used when receiving time out error during statistics collecting to reduce lock overhead +func (cs *ContainerStats) SetErrorAndReset(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.CPUPercentage = 0 + cs.Memory = 0 + cs.MemoryPercentage = 0 + cs.MemoryLimit = 0 + cs.NetworkRx = 0 + cs.NetworkTx = 0 + cs.BlockRead = 0 + cs.BlockWrite = 0 + cs.PidsCurrent = 0 + cs.err = err + cs.IsInvalid = true +} + +// SetError sets container statistics error +func (cs *ContainerStats) SetError(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.err = err + if err != nil { + cs.IsInvalid = true + } +} + +// SetStatistics set the container statistics +func (cs *ContainerStats) SetStatistics(s StatsEntry) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + s.Container = cs.Container + s.OSType = cs.OSType + cs.StatsEntry = s +} + +// GetStatistics returns container statistics with other meta data such as the container name +func (cs *ContainerStats) GetStatistics() StatsEntry { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.StatsEntry +} + +// NewStatsFormat returns a format for rendering an CStatsContext +func NewStatsFormat(source, osType string) Format { + if source == TableFormatKey { + if osType == winOSType { + return Format(winDefaultStatsTableFormat) + } + return Format(defaultStatsTableFormat) + } + return Format(source) +} + +// NewContainerStats returns a new ContainerStats entity and sets in it the given name +func NewContainerStats(container, osType string) *ContainerStats { + return &ContainerStats{ + StatsEntry: StatsEntry{Container: container, OSType: osType}, + } +} + +// ContainerStatsWrite renders the context for a list of containers statistics +func ContainerStatsWrite(ctx Context, containerStats []StatsEntry) error { + render := func(format func(subContext subContext) error) error { + for _, cstats := range containerStats { + containerStatsCtx := &containerStatsContext{ + s: cstats, + } + if err := format(containerStatsCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(&containerStatsContext{}, render) +} + +type containerStatsContext struct { + HeaderContext + s StatsEntry +} + +func (c *containerStatsContext) Container() string { + c.AddHeader(containerHeader) + return c.s.Container +} + +func (c *containerStatsContext) Name() string { + c.AddHeader(nameHeader) + if len(c.s.Name) > 1 { + return c.s.Name[1:] + } + return "--" +} + +func (c *containerStatsContext) ID() string { + c.AddHeader(containerIDHeader) + return c.s.ID +} + +func (c *containerStatsContext) CPUPerc() string { + c.AddHeader(cpuPercHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) +} + +func (c *containerStatsContext) MemUsage() string { + header := memUseHeader + if c.s.OSType == winOSType { + header = winMemUseHeader + } + c.AddHeader(header) + if c.s.IsInvalid { + return fmt.Sprintf("-- / --") + } + if c.s.OSType == winOSType { + return fmt.Sprintf("%s", units.BytesSize(c.s.Memory)) + } + return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) +} + +func (c *containerStatsContext) MemPerc() string { + header := memPercHeader + c.AddHeader(header) + if c.s.IsInvalid || c.s.OSType == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) +} + +func (c *containerStatsContext) NetIO() string { + c.AddHeader(netIOHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) +} + +func (c *containerStatsContext) BlockIO() string { + c.AddHeader(blockIOHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) +} + +func (c *containerStatsContext) PIDs() string { + c.AddHeader(pidsHeader) + if c.s.IsInvalid || c.s.OSType == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%d", c.s.PidsCurrent) +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/stats_test.go b/vendor/github.com/moby/moby/cli/command/formatter/stats_test.go new file mode 100644 index 0000000..f5c6cae --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/stats_test.go @@ -0,0 +1,236 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestContainerStatsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + + var ctx containerStatsContext + tt := []struct { + stats StatsEntry + expValue string + expHeader string + call func() string + }{ + {StatsEntry{Container: containerID}, containerID, containerHeader, ctx.Container}, + {StatsEntry{CPUPercentage: 5.5}, "5.50%", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "--", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "0.31 B / 12.3 B", netIOHeader, ctx.NetIO}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "--", netIOHeader, ctx.NetIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "0.1 B / 2.3 B", blockIOHeader, ctx.BlockIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "--", blockIOHeader, ctx.BlockIO}, + {StatsEntry{MemoryPercentage: 10.2}, "10.20%", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, OSType: "windows"}, "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{Memory: 24, MemoryLimit: 30}, "24 B / 30 B", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "-- / --", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, OSType: "windows"}, "24 B", winMemUseHeader, ctx.MemUsage}, + {StatsEntry{PidsCurrent: 10}, "10", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, IsInvalid: true}, "--", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, OSType: "windows"}, "--", pidsHeader, ctx.PIDs}, + } + + for _, te := range tt { + ctx = containerStatsContext{s: te.stats} + if v := te.call(); v != te.expValue { + t.Fatalf("Expected %q, got %q", te.expValue, v) + } + + h := ctx.FullHeader() + if h != te.expHeader { + t.Fatalf("Expected %q, got %q", te.expHeader, h) + } + } +} + +func TestContainerStatsContextWrite(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + { + Context{Format: "table {{.MemUsage}}"}, + `MEM USAGE / LIMIT +20 B / 20 B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.ID}} {{.Name}}"}, + `container1 abcdef foo +container2 -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + ID: "abcdef", + Name: "/foo", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + OSType: "linux", + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + OSType: "linux", + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Equal(t, out.String(), te.expected) + } + } +} + +func TestContainerStatsContextWriteWindows(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "table {{.MemUsage}}"}, + `PRIV WORKING SET +20 B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + { + Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"}, + `container1 -- -- +container2 -- -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + OSType: "windows", + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + OSType: "windows", + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Equal(t, out.String(), te.expected) + } + } +} + +func TestContainerStatsContextWriteWithNoStats(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Container}}", + Output: &out, + }, + "", + }, + { + Context{ + Format: "table {{.Container}}", + Output: &out, + }, + "CONTAINER\n", + }, + { + Context{ + Format: "table {{.Container}}\t{{.CPUPerc}}", + Output: &out, + }, + "CONTAINER CPU %\n", + }, + } + + for _, context := range contexts { + ContainerStatsWrite(context.context, []StatsEntry{}) + assert.Equal(t, context.expected, out.String()) + // Clean buffer + out.Reset() + } +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/volume.go b/vendor/github.com/moby/moby/cli/command/formatter/volume.go new file mode 100644 index 0000000..90c9b13 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/volume.go @@ -0,0 +1,121 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultVolumeQuietFormat = "{{.Name}}" + defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" + + volumeNameHeader = "VOLUME NAME" + mountpointHeader = "MOUNTPOINT" + linksHeader = "LINKS" + // Status header ? +) + +// NewVolumeFormat returns a format for use with a volume Context +func NewVolumeFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultVolumeQuietFormat + } + return defaultVolumeTableFormat + case RawFormatKey: + if quiet { + return `name: {{.Name}}` + } + return `name: {{.Name}}\ndriver: {{.Driver}}\n` + } + return Format(source) +} + +// VolumeWrite writes formatted volumes using the Context +func VolumeWrite(ctx Context, volumes []*types.Volume) error { + render := func(format func(subContext subContext) error) error { + for _, volume := range volumes { + if err := format(&volumeContext{v: *volume}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&volumeContext{}, render) +} + +type volumeContext struct { + HeaderContext + v types.Volume +} + +func (c *volumeContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *volumeContext) Name() string { + c.AddHeader(volumeNameHeader) + return c.v.Name +} + +func (c *volumeContext) Driver() string { + c.AddHeader(driverHeader) + return c.v.Driver +} + +func (c *volumeContext) Scope() string { + c.AddHeader(scopeHeader) + return c.v.Scope +} + +func (c *volumeContext) Mountpoint() string { + c.AddHeader(mountpointHeader) + return c.v.Mountpoint +} + +func (c *volumeContext) Labels() string { + c.AddHeader(labelsHeader) + if c.v.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.v.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *volumeContext) Label(name string) string { + + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.v.Labels == nil { + return "" + } + return c.v.Labels[name] +} + +func (c *volumeContext) Links() string { + c.AddHeader(linksHeader) + if c.v.UsageData == nil { + return "N/A" + } + return fmt.Sprintf("%d", c.v.UsageData.RefCount) +} + +func (c *volumeContext) Size() string { + c.AddHeader(sizeHeader) + if c.v.UsageData == nil { + return "N/A" + } + return units.HumanSize(float64(c.v.UsageData.Size)) +} diff --git a/vendor/github.com/moby/moby/cli/command/formatter/volume_test.go b/vendor/github.com/moby/moby/cli/command/formatter/volume_test.go new file mode 100644 index 0000000..9ec18b6 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/formatter/volume_test.go @@ -0,0 +1,189 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestVolumeContext(t *testing.T) { + volumeName := stringid.GenerateRandomID() + + var ctx volumeContext + cases := []struct { + volumeCtx volumeContext + expValue string + expHeader string + call func() string + }{ + {volumeContext{ + v: types.Volume{Name: volumeName}, + }, volumeName, volumeNameHeader, ctx.Name}, + {volumeContext{ + v: types.Volume{Driver: "driver_name"}, + }, "driver_name", driverHeader, ctx.Driver}, + {volumeContext{ + v: types.Volume{Scope: "local"}, + }, "local", scopeHeader, ctx.Scope}, + {volumeContext{ + v: types.Volume{Mountpoint: "mountpoint"}, + }, "mountpoint", mountpointHeader, ctx.Mountpoint}, + {volumeContext{ + v: types.Volume{}, + }, "", labelsHeader, ctx.Labels}, + {volumeContext{ + v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = c.volumeCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestVolumeContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewVolumeFormat("table", false)}, + `DRIVER VOLUME NAME +foo foobar_baz +bar foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table", true)}, + `foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", false)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", true)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewVolumeFormat("raw", false)}, + `name: foobar_baz +driver: foo + +name: foobar_bar +driver: bar + +`, + }, + { + Context{Format: NewVolumeFormat("raw", true)}, + `name: foobar_baz +name: foobar_bar +`, + }, + // Custom Format + { + Context{Format: NewVolumeFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + volumes := []*types.Volume{ + {Name: "foobar_baz", Driver: "foo"}, + {Name: "foobar_bar", Driver: "bar"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := VolumeWrite(testcase.context, volumes) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestVolumeContextWriteJSON(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, + {"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestVolumeContextWriteJSONField(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, volumes[i].Name) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/idresolver/idresolver.go b/vendor/github.com/moby/moby/cli/command/idresolver/idresolver.go new file mode 100644 index 0000000..511b1a8 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/idresolver/idresolver.go @@ -0,0 +1,90 @@ +package idresolver + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stringid" +) + +// IDResolver provides ID to Name resolution. +type IDResolver struct { + client client.APIClient + noResolve bool + cache map[string]string +} + +// New creates a new IDResolver. +func New(client client.APIClient, noResolve bool) *IDResolver { + return &IDResolver{ + client: client, + noResolve: noResolve, + cache: make(map[string]string), + } +} + +func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { + switch t := t.(type) { + case swarm.Node: + node, _, err := r.client.NodeInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + if node.Spec.Annotations.Name != "" { + return node.Spec.Annotations.Name, nil + } + if node.Description.Hostname != "" { + return node.Description.Hostname, nil + } + return id, nil + case swarm.Service: + service, _, err := r.client.ServiceInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + return service.Spec.Annotations.Name, nil + case swarm.Task: + // If the caller passes the full task there's no need to do a lookup. + if t.ID == "" { + var err error + + t, _, err = r.client.TaskInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + } + taskID := stringid.TruncateID(t.ID) + if t.ServiceID == "" { + return taskID, nil + } + service, err := r.Resolve(ctx, swarm.Service{}, t.ServiceID) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%d.%s", service, t.Slot, taskID), nil + default: + return "", fmt.Errorf("unsupported type") + } + +} + +// Resolve will attempt to resolve an ID to a Name by querying the manager. +// Results are stored into a cache. +// If the `-n` flag is used in the command-line, resolution is disabled. +func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { + if r.noResolve { + return id, nil + } + if name, ok := r.cache[id]; ok { + return name, nil + } + name, err := r.get(ctx, t, id) + if err != nil { + return "", err + } + r.cache[id] = name + return name, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/image/build.go b/vendor/github.com/moby/moby/cli/command/image/build.go new file mode 100644 index 0000000..0c88af5 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/build.go @@ -0,0 +1,477 @@ +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type buildOptions struct { + context string + dockerfileName string + tags opts.ListOpts + labels opts.ListOpts + buildArgs opts.ListOpts + ulimits *runconfigopts.UlimitOpt + memory string + memorySwap string + shmSize string + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cpuSetCpus string + cpuSetMems string + cgroupParent string + isolation string + quiet bool + noCache bool + rm bool + forceRm bool + pull bool + cacheFrom []string + compress bool + securityOpt []string + networkMode string + squash bool +} + +// NewBuildCommand creates a new `docker build` command +func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command { + ulimits := make(map[string]*units.Ulimit) + options := buildOptions{ + tags: opts.NewListOpts(validateTag), + buildArgs: opts.NewListOpts(runconfigopts.ValidateEnv), + ulimits: runconfigopts.NewUlimitOpt(&ulimits), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "build [OPTIONS] PATH | URL | -", + Short: "Build an image from a Dockerfile", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.context = args[0] + return runBuild(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") + flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") + flags.Var(options.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flags.StringVarP(&options.memory, "memory", "m", "", "Memory limit") + flags.StringVar(&options.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.StringVar(&options.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") + flags.Var(&options.labels, "label", "Set metadata for an image") + flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") + flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") + flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") + flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") + flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") + flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") + flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") + flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") + + command.AddTrustedFlags(flags, true) + + flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") + flags.SetAnnotation("squash", "experimental", nil) + flags.SetAnnotation("squash", "version", []string{"1.25"}) + + return cmd +} + +// lastProgressOutput is the same as progress.Output except +// that it only output with the last update. It is used in +// non terminal scenarios to depresss verbose messages +type lastProgressOutput struct { + output progress.Output +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { + if !prog.LastUpdate { + return nil + } + + return out.output.WriteProgress(prog) +} + +func runBuild(dockerCli *command.DockerCli, options buildOptions) error { + + var ( + buildCtx io.ReadCloser + err error + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + specifiedContext := options.context + progBuff = dockerCli.Out() + buildBuff = dockerCli.Out() + if options.quiet { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + + switch { + case specifiedContext == "-": + buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) + case urlutil.IsURL(specifiedContext): + buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) + default: + contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) + } + + if err != nil { + if options.quiet && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(dockerCli.Err(), progBuff) + } + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if buildCtx == nil { + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return err + } + defer f.Close() + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return err + } + } + + if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + compression := archive.Uncompressed + if options.compress { + compression = archive.Gzip + } + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: compression, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + } + + ctx := context.Background() + + var resolvedTags []*resolvedTag + if command.IsTrusted() { + translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { + return TrustedReference(ctx, dockerCli, ref, nil) + } + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) + if !dockerCli.Out().IsTerminal() { + progressOutput = &lastProgressOutput{output: progressOutput} + } + + var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + + var memory int64 + if options.memory != "" { + parsedMemory, err := units.RAMInBytes(options.memory) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if options.memorySwap != "" { + if options.memorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + + var shmSize int64 + if options.shmSize != "" { + shmSize, err = units.RAMInBytes(options.shmSize) + if err != nil { + return err + } + } + + authConfigs, _ := dockerCli.GetAllCredentials() + buildOptions := types.ImageBuildOptions{ + Memory: memory, + MemorySwap: memorySwap, + Tags: options.tags.GetAll(), + SuppressOutput: options.quiet, + NoCache: options.noCache, + Remove: options.rm, + ForceRemove: options.forceRm, + PullParent: options.pull, + Isolation: container.Isolation(options.isolation), + CPUSetCPUs: options.cpuSetCpus, + CPUSetMems: options.cpuSetMems, + CPUShares: options.cpuShares, + CPUQuota: options.cpuQuota, + CPUPeriod: options.cpuPeriod, + CgroupParent: options.cgroupParent, + Dockerfile: relDockerfile, + ShmSize: shmSize, + Ulimits: options.ulimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()), + AuthConfigs: authConfigs, + Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), + CacheFrom: options.cacheFrom, + SecurityOpt: options.securityOpt, + NetworkMode: options.networkMode, + Squash: options.squash, + } + + response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) + if err != nil { + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s", progBuff) + } + return err + } + defer response.Body.Close() + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { + fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if options.quiet { + fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) + } + + if command.IsTrusted() { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + ref, err := reference.ParseNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.WithDefaultTag(ref) + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + trustedRef, err := translator(ctx, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + content := io.Reader(tarReader) + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/moby/moby/cli/command/image/cmd.go b/vendor/github.com/moby/moby/cli/command/image/cmd.go new file mode 100644 index 0000000..c3ca61f --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/cmd.go @@ -0,0 +1,33 @@ +package image + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewImageCommand returns a cobra command for `image` subcommands +func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "image", + Short: "Manage images", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewBuildCommand(dockerCli), + NewHistoryCommand(dockerCli), + NewImportCommand(dockerCli), + NewLoadCommand(dockerCli), + NewPullCommand(dockerCli), + NewPushCommand(dockerCli), + NewSaveCommand(dockerCli), + NewTagCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/image/history.go b/vendor/github.com/moby/moby/cli/command/image/history.go new file mode 100644 index 0000000..91c8f75 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/history.go @@ -0,0 +1,99 @@ +package image + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type historyOptions struct { + image string + + human bool + quiet bool + noTrunc bool +} + +// NewHistoryCommand creates a new `docker history` command +func NewHistoryCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts historyOptions + + cmd := &cobra.Command{ + Use: "history [OPTIONS] IMAGE", + Short: "Show the history of an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + return runHistory(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + + return cmd +} + +func runHistory(dockerCli *command.DockerCli, opts historyOptions) error { + ctx := context.Background() + + history, err := dockerCli.Client().ImageHistory(ctx, opts.image) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + + if opts.quiet { + for _, entry := range history { + if opts.noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil + } + + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + for _, entry := range history { + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if !opts.noTrunc { + createdBy = stringutils.Ellipsis(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if opts.human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSizeWithPrecision(float64(entry.Size), 3) + } else { + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/image/import.go b/vendor/github.com/moby/moby/cli/command/image/import.go new file mode 100644 index 0000000..60024fb --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/import.go @@ -0,0 +1,88 @@ +package image + +import ( + "io" + "os" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + dockeropts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/urlutil" + "github.com/spf13/cobra" +) + +type importOptions struct { + source string + reference string + changes dockeropts.ListOpts + message string +} + +// NewImportCommand creates a new `docker import` command +func NewImportCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts importOptions + + cmd := &cobra.Command{ + Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", + Short: "Import the contents from a tarball to create a filesystem image", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.source = args[0] + if len(args) > 1 { + opts.reference = args[1] + } + return runImport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + opts.changes = dockeropts.NewListOpts(nil) + flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") + flags.StringVarP(&opts.message, "message", "m", "", "Set commit message for imported image") + + return cmd +} + +func runImport(dockerCli *command.DockerCli, opts importOptions) error { + var ( + in io.Reader + srcName = opts.source + ) + + if opts.source == "-" { + in = dockerCli.In() + } else if !urlutil.IsURL(opts.source) { + srcName = "-" + file, err := os.Open(opts.source) + if err != nil { + return err + } + defer file.Close() + in = file + } + + source := types.ImageImportSource{ + Source: in, + SourceName: srcName, + } + + options := types.ImageImportOptions{ + Message: opts.message, + Changes: opts.changes.GetAll(), + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/inspect.go b/vendor/github.com/moby/moby/cli/command/image/inspect.go new file mode 100644 index 0000000..217863c --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/inspect.go @@ -0,0 +1,44 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] IMAGE [IMAGE...]", + Short: "Display detailed information on one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ImageInspectWithRaw(ctx, ref) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/list.go b/vendor/github.com/moby/moby/cli/command/image/list.go new file mode 100644 index 0000000..679604f --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/list.go @@ -0,0 +1,96 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type imagesOptions struct { + matchName string + + quiet bool + all bool + noTrunc bool + showDigests bool + format string + filter opts.FilterOpt +} + +// NewImagesCommand creates a new `docker images` command +func NewImagesCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := imagesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "images [OPTIONS] [REPOSITORY[:TAG]]", + Short: "List images", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.matchName = args[0] + } + return runImages(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVarP(&opts.all, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVar(&opts.showDigests, "digests", false, "Show digests") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewImagesCommand(dockerCli) + cmd.Aliases = []string{"images", "list"} + cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" + return &cmd +} + +func runImages(dockerCli *command.DockerCli, opts imagesOptions) error { + ctx := context.Background() + + filters := opts.filter.Value() + if opts.matchName != "" { + filters.Add("reference", opts.matchName) + } + + options := types.ImageListOptions{ + All: opts.all, + Filters: filters, + } + + images, err := dockerCli.Client().ImageList(ctx, options) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().ImagesFormat + } else { + format = formatter.TableFormatKey + } + } + + imageCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewImageFormat(format, opts.quiet, opts.showDigests), + Trunc: !opts.noTrunc, + }, + Digest: opts.showDigests, + } + return formatter.ImageWrite(imageCtx, images) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/load.go b/vendor/github.com/moby/moby/cli/command/image/load.go new file mode 100644 index 0000000..988f510 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/load.go @@ -0,0 +1,77 @@ +package image + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/spf13/cobra" +) + +type loadOptions struct { + input string + quiet bool +} + +// NewLoadCommand creates a new `docker load` command +func NewLoadCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts loadOptions + + cmd := &cobra.Command{ + Use: "load [OPTIONS]", + Short: "Load an image from a tar archive or STDIN", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLoad(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") + + return cmd +} + +func runLoad(dockerCli *command.DockerCli, opts loadOptions) error { + + var input io.Reader = dockerCli.In() + if opts.input != "" { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(opts.input) + if err != nil { + return err + } + defer file.Close() + input = file + } + + // To avoid getting stuck, verify that a tar file is given either in + // the input flag or through stdin and if not display an error message and exit. + if opts.input == "" && dockerCli.In().IsTerminal() { + return fmt.Errorf("requested load from stdin, but stdin is empty") + } + + if !dockerCli.Out().IsTerminal() { + opts.quiet = true + } + response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) + } + + _, err = io.Copy(dockerCli.Out(), response.Body) + return err +} diff --git a/vendor/github.com/moby/moby/cli/command/image/prune.go b/vendor/github.com/moby/moby/cli/command/image/prune.go new file mode 100644 index 0000000..82c28fc --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/prune.go @@ -0,0 +1,92 @@ +package image + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused images", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images, not just dangling ones") + + return cmd +} + +const ( + allImageWarning = `WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue?` + danglingWarning = `WARNING! This will remove all dangling images. +Are you sure you want to continue?` +) + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := filters.NewArgs() + pruneFilters.Add("dangling", fmt.Sprintf("%v", !opts.all)) + + warning := danglingWarning + if opts.all { + warning = allImageWarning + } + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) + if err != nil { + return + } + + if len(report.ImagesDeleted) > 0 { + output = "Deleted Images:\n" + for _, st := range report.ImagesDeleted { + if st.Untagged != "" { + output += fmt.Sprintln("untagged:", st.Untagged) + } else { + output += fmt.Sprintln("deleted:", st.Deleted) + } + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Image Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all}) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/pull.go b/vendor/github.com/moby/moby/cli/command/image/pull.go new file mode 100644 index 0000000..24933fe --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/pull.go @@ -0,0 +1,84 @@ +package image + +import ( + "errors" + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type pullOptions struct { + remote string + all bool +} + +// NewPullCommand creates a new `docker pull` command +func NewPullCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pullOptions + + cmd := &cobra.Command{ + Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", + Short: "Pull an image or a repository from a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runPull(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPull(dockerCli *command.DockerCli, opts pullOptions) error { + distributionRef, err := reference.ParseNamed(opts.remote) + if err != nil { + return err + } + if opts.all && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !opts.all && reference.IsNameOnly(distributionRef) { + distributionRef = reference.WithDefaultTag(distributionRef) + fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", reference.DefaultTag) + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull") + + // Check if reference has a digest + _, isCanonical := distributionRef.(reference.Canonical) + if command.IsTrusted() && !isCanonical { + err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege) + } else { + err = imagePullPrivileged(ctx, dockerCli, authConfig, distributionRef.String(), requestPrivilege, opts.all) + } + if err != nil { + if strings.Contains(err.Error(), "target is plugin") { + return errors.New(err.Error() + " - Use `docker plugin install`") + } + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/image/push.go b/vendor/github.com/moby/moby/cli/command/image/push.go new file mode 100644 index 0000000..a8ce494 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/push.go @@ -0,0 +1,61 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewPushCommand creates a new `docker push` command +func NewPushCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] NAME[:TAG]", + Short: "Push an image or a repository to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPush(dockerCli *command.DockerCli, remote string) error { + ref, err := reference.ParseNamed(remote) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") + + if command.IsTrusted() { + return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) + } + + responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref.String(), requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/remove.go b/vendor/github.com/moby/moby/cli/command/image/remove.go new file mode 100644 index 0000000..c79ceba --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/remove.go @@ -0,0 +1,77 @@ +package image + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + noPrune bool +} + +// NewRemoveCommand creates a new `docker remove` command +func NewRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rmi [OPTIONS] IMAGE [IMAGE...]", + Short: "Remove one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") + flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") + + return cmd +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewRemoveCommand(dockerCli) + cmd.Aliases = []string{"rmi", "remove"} + cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" + return &cmd +} + +func runRemove(dockerCli *command.DockerCli, opts removeOptions, images []string) error { + client := dockerCli.Client() + ctx := context.Background() + + options := types.ImageRemoveOptions{ + Force: opts.force, + PruneChildren: !opts.noPrune, + } + + var errs []string + for _, image := range images { + dels, err := client.ImageRemove(ctx, image, options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) + } + } + } + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/image/save.go b/vendor/github.com/moby/moby/cli/command/image/save.go new file mode 100644 index 0000000..bbe82d2 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/save.go @@ -0,0 +1,57 @@ +package image + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type saveOptions struct { + images []string + output string +} + +// NewSaveCommand creates a new `docker save` command +func NewSaveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts saveOptions + + cmd := &cobra.Command{ + Use: "save [OPTIONS] IMAGE [IMAGE...]", + Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.images = args + return runSave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runSave(dockerCli *command.DockerCli, opts saveOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/tag.go b/vendor/github.com/moby/moby/cli/command/image/tag.go new file mode 100644 index 0000000..fb2b703 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/tag.go @@ -0,0 +1,41 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type tagOptions struct { + image string + name string +} + +// NewTagCommand creates a new `docker tag` command +func NewTagCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts tagOptions + + cmd := &cobra.Command{ + Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", + Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + opts.name = args[1] + return runTag(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTag(dockerCli *command.DockerCli, opts tagOptions) error { + ctx := context.Background() + + return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/trust.go b/vendor/github.com/moby/moby/cli/command/image/trust.go new file mode 100644 index 0000000..5136a22 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/trust.go @@ -0,0 +1,381 @@ +package image + +import ( + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "path" + "sort" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/distribution" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/notary/client" + "github.com/docker/notary/tuf/data" +) + +type target struct { + name string + digest digest.Digest + size int64 +} + +// trustedPush handles content trust pushing of an image +func trustedPush(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) +} + +// PushTrustedReference pushes a canonical reference to the trust server. +func PushTrustedReference(cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { + // If it is a trusted push we would like to find the target entry which match the + // tag provided in the function and then do an AddTarget later. + target := &client.Target{} + // Count the times of calling for handleTarget, + // if it is called more that once, that should be considered an error in a trusted push. + cnt := 0 + handleTarget := func(aux *json.RawMessage) { + cnt++ + if cnt > 1 { + // handleTarget should only be called one. This will be treated as an error. + return + } + + var pushResult distribution.PushResult + err := json.Unmarshal(*aux, &pushResult) + if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { + h, err := hex.DecodeString(pushResult.Digest.Hex()) + if err != nil { + target = nil + return + } + target.Name = pushResult.Tag + target.Hashes = data.Hashes{string(pushResult.Digest.Algorithm()): h} + target.Length = int64(pushResult.Size) + } + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + default: + // We want trust signatures to always take an explicit tag, + // otherwise it will act as an untrusted push. + if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), nil); err != nil { + return err + } + fmt.Fprintln(cli.Out(), "No tag specified, skipping trust metadata push") + return nil + } + + if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), handleTarget); err != nil { + return err + } + + if cnt > 1 { + return fmt.Errorf("internal error: only one call to handleTarget expected") + } + + if target == nil { + fmt.Fprintln(cli.Out(), "No targets found, please provide a specific tag in order to sign it") + return nil + } + + fmt.Fprintln(cli.Out(), "Signing and pushing trust metadata") + + repo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "push", "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to notary repository: %s\n", err) + return err + } + + // get the latest repository metadata so we can figure out which roles to sign + err = repo.Update(false) + + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + // Initialize the notary repository with a remotely managed snapshot key + if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + fmt.Fprintf(cli.Out(), "Finished initializing %q\n", repoInfo.FullName()) + err = repo.AddTarget(target, data.CanonicalTargetsRole) + case nil: + // already initialized and we have successfully downloaded the latest metadata + err = addTargetToAllSignableRoles(repo, target) + default: + return trust.NotaryError(repoInfo.FullName(), err) + } + + if err == nil { + err = repo.Publish() + } + + if err != nil { + fmt.Fprintf(cli.Out(), "Failed to sign %q:%s - %s\n", repoInfo.FullName(), tag, err.Error()) + return trust.NotaryError(repoInfo.FullName(), err) + } + + fmt.Fprintf(cli.Out(), "Successfully signed %q:%s\n", repoInfo.FullName(), tag) + return nil +} + +// Attempt to add the image target to all the top level delegation roles we can +// (based on whether we have the signing key and whether the role's path allows +// us to). +// If there are no delegation roles, we add to the targets role. +func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { + var signableRoles []string + + // translate the full key names, which includes the GUN, into just the key IDs + allCanonicalKeyIDs := make(map[string]struct{}) + for fullKeyID := range repo.CryptoService.ListAllKeys() { + allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} + } + + allDelegationRoles, err := repo.GetDelegationRoles() + if err != nil { + return err + } + + // if there are no delegation roles, then just try to sign it into the targets role + if len(allDelegationRoles) == 0 { + return repo.AddTarget(target, data.CanonicalTargetsRole) + } + + // there are delegation roles, find every delegation role we have a key for, and + // attempt to sign into into all those roles. + for _, delegationRole := range allDelegationRoles { + // We do not support signing any delegation role that isn't a direct child of the targets role. + // Also don't bother checking the keys if we can't add the target + // to this role due to path restrictions + if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { + continue + } + + for _, canonicalKeyID := range delegationRole.KeyIDs { + if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { + signableRoles = append(signableRoles, delegationRole.Name) + break + } + } + } + + if len(signableRoles) == 0 { + return fmt.Errorf("no valid signing keys for delegation roles") + } + + return repo.AddTarget(target, signableRoles...) +} + +// imagePushPrivileged push the image +func imagePushPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + } + + return cli.Client().ImagePush(ctx, ref, options) +} + +// trustedPull handles content trust pulling of an image +func trustedPull(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + var refs []target + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return err + } + + if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { + // List all targets + targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.Out(), "Skipping target for %q\n", repoInfo.Name()) + continue + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + continue + } + refs = append(refs, t) + } + if len(refs) == 0 { + return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trusted tags for %s", repoInfo.FullName())) + } + } else { + t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", tagged.Tag())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + r, err := convertTarget(t.Target) + if err != nil { + return err + + } + refs = append(refs, r) + } + + for i, r := range refs { + displayTag := r.name + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) + + ref, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) + if err != nil { + return err + } + if err := imagePullPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege, false); err != nil { + return err + } + + tagged, err := reference.WithTag(repoInfo, r.name) + if err != nil { + return err + } + trustedRef, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) + if err != nil { + return err + } + if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { + return err + } + } + return nil +} + +// imagePullPrivileged pulls the image and displays it to the output +func imagePullPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + All: all, + } + + responseBody, err := cli.Client().ImagePull(ctx, ref, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) +} + +// TrustedReference returns the canonical trusted reference for an image reference +func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { + var ( + repoInfo *registry.RepositoryInfo + err error + ) + if rs != nil { + repoInfo, err = rs.ResolveRepository(ref) + } else { + repoInfo, err = registry.ParseRepositoryInfo(ref) + } + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.FullName(), err) + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.Tag())) + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + + return reference.WithDigest(reference.TrimNamed(ref), r.digest) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + name: t.Name, + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +// TagTrusted tags a trusted ref +func TagTrusted(ctx context.Context, cli *command.DockerCli, trustedRef reference.Canonical, ref reference.NamedTagged) error { + fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedRef.String(), ref.String()) + + return cli.Client().ImageTag(ctx, trustedRef.String(), ref.String()) +} diff --git a/vendor/github.com/moby/moby/cli/command/image/trust_test.go b/vendor/github.com/moby/moby/cli/command/image/trust_test.go new file mode 100644 index 0000000..7814646 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/image/trust_test.go @@ -0,0 +1,57 @@ +package image + +import ( + "os" + "testing" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/registry" +) + +func unsetENV() { + os.Unsetenv("DOCKER_CONTENT_TRUST") + os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") +} + +func TestENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + output, err := trust.Server(indexInfo) + expectedStr := "https://notary-test.com:5000" + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestHTTPENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + _, err := trust.Server(indexInfo) + if err == nil { + t.Fatal("Expected error with invalid scheme") + } +} + +func TestOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} + output, err := trust.Server(indexInfo) + if err != nil || output != registry.NotaryServer { + t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) + } +} + +func TestNonOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} + output, err := trust.Server(indexInfo) + expectedStr := "https://" + indexInfo.Name + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/in.go b/vendor/github.com/moby/moby/cli/command/in.go new file mode 100644 index 0000000..7204b7a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/in.go @@ -0,0 +1,75 @@ +package command + +import ( + "errors" + "io" + "os" + "runtime" + + "github.com/docker/docker/pkg/term" +) + +// InStream is an input stream used by the DockerCli to read user input +type InStream struct { + in io.ReadCloser + fd uintptr + isTerminal bool + state *term.State +} + +func (i *InStream) Read(p []byte) (int, error) { + return i.in.Read(p) +} + +// Close implements the Closer interface +func (i *InStream) Close() error { + return i.in.Close() +} + +// FD returns the file descriptor number for this stream +func (i *InStream) FD() uintptr { + return i.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (i *InStream) IsTerminal() bool { + return i.isTerminal +} + +// SetRawTerminal sets raw mode on the input terminal +func (i *InStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !i.isTerminal { + return nil + } + i.state, err = term.SetRawTerminal(i.fd) + return err +} + +// RestoreTerminal restores normal mode to the terminal +func (i *InStream) RestoreTerminal() { + if i.state != nil { + term.RestoreTerminal(i.fd, i.state) + } +} + +// CheckTty checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !i.isTerminal { + eText := "the input device is not a TTY" + if runtime.GOOS == "windows" { + return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") + } + return errors.New(eText) + } + return nil +} + +// NewInStream returns a new InStream object from a ReadCloser +func NewInStream(in io.ReadCloser) *InStream { + fd, isTerminal := term.GetFdInfo(in) + return &InStream{in: in, fd: fd, isTerminal: isTerminal} +} diff --git a/vendor/github.com/moby/moby/cli/command/inspect/inspector.go b/vendor/github.com/moby/moby/cli/command/inspect/inspector.go new file mode 100644 index 0000000..1d81643 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/inspect/inspector.go @@ -0,0 +1,195 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/template" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/utils/templates" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// NewTemplateInspectorFromString creates a new TemplateInspector from a string +// which is compiled into a template. +func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { + if tmplStr == "" { + return NewIndentedInspector(out), nil + } + + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, fmt.Errorf("Template parsing error: %s", err) + } + return NewTemplateInspector(out, tmpl), nil +} + +// GetRefFunc is a function which used by Inspect to fetch an object from a +// reference +type GetRefFunc func(ref string) (interface{}, []byte, error) + +// Inspect fetches objects by reference using GetRefFunc and writes the json +// representation to the output writer. +func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { + inspector, err := NewTemplateInspectorFromString(out, tmplStr) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErr error + for _, ref := range references { + element, raw, err := getRef(ref) + if err != nil { + inspectErr = err + break + } + + if err := inspector.Inspect(element, raw); err != nil { + inspectErr = err + break + } + } + + if err := inspector.Flush(); err != nil { + logrus.Errorf("%s\n", err) + } + + if inspectErr != nil { + return cli.StatusError{StatusCode: 1, Status: inspectErr.Error()} + } + return nil +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return fmt.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// tryRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/vendor/github.com/moby/moby/cli/command/inspect/inspector_test.go b/vendor/github.com/moby/moby/cli/command/inspect/inspector_test.go new file mode 100644 index 0000000..1ce1593 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/inspect/inspector_test.go @@ -0,0 +1,221 @@ +package inspect + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/docker/utils/templates" +) + +type testElement struct { + DNS string `json:"Dns"` +} + +func TestTemplateInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "\n" { + t.Fatalf("Expected `\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorTemplateError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Foo}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + err = i.Inspect(testElement{"0.0.0.0"}, nil) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorRawFallback(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorRawFallbackError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n1.1.1.1\n" { + t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) + } +} + +func TestIndentedInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + }, + { + "Dns": "1.1.1.1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := "[]\n" + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorRawElements(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0", + "Node": "0" + }, + { + "Dns": "1.1.1.1", + "Node": "1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/network/cmd.go b/vendor/github.com/moby/moby/cli/command/network/cmd.go new file mode 100644 index 0000000..ab8393c --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/cmd.go @@ -0,0 +1,28 @@ +package network + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewNetworkCommand returns a cobra command for `network` subcommands +func NewNetworkCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "network", + Short: "Manage networks", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newConnectCommand(dockerCli), + newCreateCommand(dockerCli), + newDisconnectCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/network/connect.go b/vendor/github.com/moby/moby/cli/command/network/connect.go new file mode 100644 index 0000000..c4b676e --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/connect.go @@ -0,0 +1,64 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type connectOptions struct { + network string + container string + ipaddress string + ipv6address string + links opts.ListOpts + aliases []string + linklocalips []string +} + +func newConnectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := connectOptions{ + links: opts.NewListOpts(runconfigopts.ValidateLink), + } + + cmd := &cobra.Command{ + Use: "connect [OPTIONS] NETWORK CONTAINER", + Short: "Connect a container to a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runConnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.ipaddress, "ip", "", "IP Address") + flags.StringVar(&opts.ipv6address, "ip6", "", "IPv6 Address") + flags.Var(&opts.links, "link", "Add link to another container") + flags.StringSliceVar(&opts.aliases, "alias", []string{}, "Add network-scoped alias for the container") + flags.StringSliceVar(&opts.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") + + return cmd +} + +func runConnect(dockerCli *command.DockerCli, opts connectOptions) error { + client := dockerCli.Client() + + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: opts.ipaddress, + IPv6Address: opts.ipv6address, + LinkLocalIPs: opts.linklocalips, + }, + Links: opts.links.GetAll(), + Aliases: opts.aliases, + } + + return client.NetworkConnect(context.Background(), opts.network, opts.container, epConfig) +} diff --git a/vendor/github.com/moby/moby/cli/command/network/create.go b/vendor/github.com/moby/moby/cli/command/network/create.go new file mode 100644 index 0000000..abc494e --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/create.go @@ -0,0 +1,226 @@ +package network + +import ( + "fmt" + "net" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts + internal bool + ipv6 bool + attachable bool + + ipamDriver string + ipamSubnet []string + ipamIPRange []string + ipamGateway []string + ipamAux opts.MapOpts + ipamOpt opts.MapOpts +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + ipamAux: *opts.NewMapOpts(nil, nil), + ipamOpt: *opts.NewMapOpts(nil, nil), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] NETWORK", + Short: "Create a network", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.driver, "driver", "d", "bridge", "Driver to manage the Network") + flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&opts.labels, "label", "Set metadata on a network") + flags.BoolVar(&opts.internal, "internal", false, "Restrict external access to the network") + flags.BoolVar(&opts.ipv6, "ipv6", false, "Enable IPv6 networking") + flags.BoolVar(&opts.attachable, "attachable", false, "Enable manual container attachment") + + flags.StringVar(&opts.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") + flags.StringSliceVar(&opts.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") + flags.StringSliceVar(&opts.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") + flags.StringSliceVar(&opts.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") + + flags.Var(&opts.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") + flags.Var(&opts.ipamOpt, "ipam-opt", "Set IPAM driver specific options") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Driver: opts.driver, + Options: opts.driverOpts.GetAll(), + IPAM: &network.IPAM{ + Driver: opts.ipamDriver, + Config: ipamCfg, + Options: opts.ipamOpt.GetAll(), + }, + CheckDuplicate: true, + Internal: opts.internal, + EnableIPv6: opts.ipv6, + Attachable: opts.attachable, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + } + + resp, err := client.NetworkCreate(context.Background(), opts.name, nc) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) + return nil +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} diff --git a/vendor/github.com/moby/moby/cli/command/network/disconnect.go b/vendor/github.com/moby/moby/cli/command/network/disconnect.go new file mode 100644 index 0000000..c9d9c14 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/disconnect.go @@ -0,0 +1,41 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type disconnectOptions struct { + network string + container string + force bool +} + +func newDisconnectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := disconnectOptions{} + + cmd := &cobra.Command{ + Use: "disconnect [OPTIONS] NETWORK CONTAINER", + Short: "Disconnect a container from a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runDisconnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") + + return cmd +} + +func runDisconnect(dockerCli *command.DockerCli, opts disconnectOptions) error { + client := dockerCli.Client() + + return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) +} diff --git a/vendor/github.com/moby/moby/cli/command/network/inspect.go b/vendor/github.com/moby/moby/cli/command/network/inspect.go new file mode 100644 index 0000000..1a86855 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/inspect.go @@ -0,0 +1,45 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NETWORK [NETWORK...]", + Short: "Display detailed information on one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getNetFunc := func(name string) (interface{}, []byte, error) { + return client.NetworkInspectWithRaw(ctx, name) + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) +} diff --git a/vendor/github.com/moby/moby/cli/command/network/list.go b/vendor/github.com/moby/moby/cli/command/network/list.go new file mode 100644 index 0000000..1a5d285 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/list.go @@ -0,0 +1,76 @@ +package network + +import ( + "sort" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display network IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output") + flags.StringVar(&opts.format, "format", "", "Pretty-print networks using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + options := types.NetworkListOptions{Filters: opts.filter.Value()} + networkResources, err := client.NetworkList(context.Background(), options) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().NetworksFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byNetworkName(networkResources)) + + networksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewNetworkFormat(format, opts.quiet), + Trunc: !opts.noTrunc, + } + return formatter.NetworkWrite(networksCtx, networkResources) +} diff --git a/vendor/github.com/moby/moby/cli/command/network/prune.go b/vendor/github.com/moby/moby/cli/command/network/prune.go new file mode 100644 index 0000000..9f1979e --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/prune.go @@ -0,0 +1,73 @@ +package network + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for networks +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().NetworksPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.NetworksDeleted) > 0 { + output = "Deleted Networks:\n" + for _, id := range report.NetworksDeleted { + output += id + "\n" + } + } + + return +} + +// RunPrune calls the Network Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + output, err := runPrune(dockerCli, pruneOptions{force: true}) + return 0, output, err +} diff --git a/vendor/github.com/moby/moby/cli/command/network/remove.go b/vendor/github.com/moby/moby/cli/command/network/remove.go new file mode 100644 index 0000000..2034b87 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/network/remove.go @@ -0,0 +1,43 @@ +package network + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "rm NETWORK [NETWORK...]", + Aliases: []string{"remove"}, + Short: "Remove one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } +} + +func runRemove(dockerCli *command.DockerCli, networks []string) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range networks { + if err := client.NetworkRemove(ctx, name); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/node/cmd.go b/vendor/github.com/moby/moby/cli/command/node/cmd.go new file mode 100644 index 0000000..e71b919 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/cmd.go @@ -0,0 +1,43 @@ +package node + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// NewNodeCommand returns a cobra command for `node` subcommands +func NewNodeCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "Manage Swarm nodes", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newDemoteCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newPromoteCommand(dockerCli), + newRemoveCommand(dockerCli), + newPsCommand(dockerCli), + newUpdateCommand(dockerCli), + ) + return cmd +} + +// Reference returns the reference of a node. The special value "self" for a node +// reference is mapped to the current node, hence the node ID is retrieved using +// the `/info` endpoint. +func Reference(ctx context.Context, client apiclient.APIClient, ref string) (string, error) { + if ref == "self" { + info, err := client.Info(ctx) + if err != nil { + return "", err + } + return info.Swarm.NodeID, nil + } + return ref, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/node/demote.go b/vendor/github.com/moby/moby/cli/command/node/demote.go new file mode 100644 index 0000000..33f86c6 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/demote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newDemoteCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "demote NODE [NODE...]", + Short: "Demote one or more nodes from manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDemote(dockerCli, args) + }, + } +} + +func runDemote(dockerCli *command.DockerCli, nodes []string) error { + demote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleWorker { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleWorker + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, demote, success) +} diff --git a/vendor/github.com/moby/moby/cli/command/node/inspect.go b/vendor/github.com/moby/moby/cli/command/node/inspect.go new file mode 100644 index 0000000..fde7018 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/inspect.go @@ -0,0 +1,144 @@ +package node + +import ( + "fmt" + "io" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + nodeIds []string + format string + pretty bool +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] self|NODE [NODE...]", + Short: "Display detailed information on one or more nodes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIds = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + nodeRef, err := Reference(ctx, client, ref) + if err != nil { + return nil, nil, err + } + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + return node, nil, err + } + + if !opts.pretty { + return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef) + } + return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef) +} + +func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { + for idx, ref := range refs { + obj, _, err := getRef(ref) + if err != nil { + return err + } + printNode(out, obj.(swarm.Node)) + + // TODO: better way to do this? + // print extra space between objects, but not after the last one + if idx+1 != len(refs) { + fmt.Fprintf(out, "\n\n") + } else { + fmt.Fprintf(out, "\n") + } + } + return nil +} + +// TODO: use a template +func printNode(out io.Writer, node swarm.Node) { + fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID) + ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name) + if node.Spec.Labels != nil { + fmt.Fprintln(out, "Labels:") + for k, v := range node.Spec.Labels { + fmt.Fprintf(out, " - %s = %s\n", k, v) + } + } + + ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname) + fmt.Fprintf(out, "Joined at:\t\t%s\n", command.PrettyPrint(node.CreatedAt)) + fmt.Fprintln(out, "Status:") + fmt.Fprintf(out, " State:\t\t\t%s\n", command.PrettyPrint(node.Status.State)) + ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", command.PrettyPrint(node.Status.Message)) + fmt.Fprintf(out, " Availability:\t\t%s\n", command.PrettyPrint(node.Spec.Availability)) + ioutils.FprintfIfNotEmpty(out, " Address:\t\t%s\n", command.PrettyPrint(node.Status.Addr)) + + if node.ManagerStatus != nil { + fmt.Fprintln(out, "Manager Status:") + fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr) + fmt.Fprintf(out, " Raft Status:\t\t%s\n", command.PrettyPrint(node.ManagerStatus.Reachability)) + leader := "No" + if node.ManagerStatus.Leader { + leader = "Yes" + } + fmt.Fprintf(out, " Leader:\t\t%s\n", leader) + } + + fmt.Fprintln(out, "Platform:") + fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS) + fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture) + + fmt.Fprintln(out, "Resources:") + fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9) + fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes))) + + var pluginTypes []string + pluginNamesByType := map[string][]string{} + for _, p := range node.Description.Engine.Plugins { + // append to pluginTypes only if not done previously + if _, ok := pluginNamesByType[p.Type]; !ok { + pluginTypes = append(pluginTypes, p.Type) + } + pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name) + } + + if len(pluginTypes) > 0 { + fmt.Fprintln(out, "Plugins:") + sort.Strings(pluginTypes) // ensure stable output + for _, pluginType := range pluginTypes { + fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", ")) + } + } + fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion) + + if len(node.Description.Engine.Labels) != 0 { + fmt.Fprintln(out, "Engine Labels:") + for k, v := range node.Description.Engine.Labels { + fmt.Fprintf(out, " - %s = %s\n", k, v) + } + } +} diff --git a/vendor/github.com/moby/moby/cli/command/node/list.go b/vendor/github.com/moby/moby/cli/command/node/list.go new file mode 100644 index 0000000..9cacdcf --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/list.go @@ -0,0 +1,115 @@ +package node + +import ( + "fmt" + "io" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +const ( + listItemFmt = "%s\t%s\t%s\t%s\t%s\n" +) + +type listOptions struct { + quiet bool + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List nodes in the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + out := dockerCli.Out() + ctx := context.Background() + + nodes, err := client.NodeList( + ctx, + types.NodeListOptions{Filters: opts.filter.Value()}) + if err != nil { + return err + } + + if len(nodes) > 0 && !opts.quiet { + // only non-empty nodes and not quiet, should we call /info api + info, err := client.Info(ctx) + if err != nil { + return err + } + printTable(out, nodes, info) + } else if !opts.quiet { + // no nodes and not quiet, print only one line with columns ID, HOSTNAME, ... + printTable(out, nodes, types.Info{}) + } else { + printQuiet(out, nodes) + } + + return nil +} + +func printTable(out io.Writer, nodes []swarm.Node, info types.Info) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "STATUS", "AVAILABILITY", "MANAGER STATUS") + for _, node := range nodes { + name := node.Description.Hostname + availability := string(node.Spec.Availability) + + reachability := "" + if node.ManagerStatus != nil { + if node.ManagerStatus.Leader { + reachability = "Leader" + } else { + reachability = string(node.ManagerStatus.Reachability) + } + } + + ID := node.ID + if node.ID == info.Swarm.NodeID { + ID = ID + " *" + } + + fmt.Fprintf( + writer, + listItemFmt, + ID, + name, + command.PrettyPrint(string(node.Status.State)), + command.PrettyPrint(availability), + command.PrettyPrint(reachability)) + } +} + +func printQuiet(out io.Writer, nodes []swarm.Node) { + for _, node := range nodes { + fmt.Fprintln(out, node.ID) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/node/opts.go b/vendor/github.com/moby/moby/cli/command/node/opts.go new file mode 100644 index 0000000..7e6c55d --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/opts.go @@ -0,0 +1,60 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" +) + +type nodeOptions struct { + annotations + role string + availability string +} + +type annotations struct { + name string + labels opts.ListOpts +} + +func newNodeOptions() *nodeOptions { + return &nodeOptions{ + annotations: annotations{ + labels: opts.NewListOpts(nil), + }, + } +} + +func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) { + var spec swarm.NodeSpec + + spec.Annotations.Name = opts.annotations.name + spec.Annotations.Labels = runconfigopts.ConvertKVStringsToMap(opts.annotations.labels.GetAll()) + + switch swarm.NodeRole(strings.ToLower(opts.role)) { + case swarm.NodeRoleWorker: + spec.Role = swarm.NodeRoleWorker + case swarm.NodeRoleManager: + spec.Role = swarm.NodeRoleManager + case "": + default: + return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role) + } + + switch swarm.NodeAvailability(strings.ToLower(opts.availability)) { + case swarm.NodeAvailabilityActive: + spec.Availability = swarm.NodeAvailabilityActive + case swarm.NodeAvailabilityPause: + spec.Availability = swarm.NodeAvailabilityPause + case swarm.NodeAvailabilityDrain: + spec.Availability = swarm.NodeAvailabilityDrain + case "": + default: + return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) + } + + return spec, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/node/promote.go b/vendor/github.com/moby/moby/cli/command/node/promote.go new file mode 100644 index 0000000..f47d783 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/promote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newPromoteCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "promote NODE [NODE...]", + Short: "Promote one or more nodes to manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPromote(dockerCli, args) + }, + } +} + +func runPromote(dockerCli *command.DockerCli, nodes []string) error { + promote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleManager { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a manager.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleManager + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, promote, success) +} diff --git a/vendor/github.com/moby/moby/cli/command/node/ps.go b/vendor/github.com/moby/moby/cli/command/node/ps.go new file mode 100644 index 0000000..a034721 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/ps.go @@ -0,0 +1,93 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type psOptions struct { + nodeIDs []string + noResolve bool + noTrunc bool + filter opts.FilterOpt +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] [NODE...]", + Short: "List tasks running on one or more nodes, defaults to current node", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIDs = []string{"self"} + + if len(args) != 0 { + opts.nodeIDs = args + } + + return runPs(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPs(dockerCli *command.DockerCli, opts psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var ( + errs []string + tasks []swarm.Task + ) + + for _, nodeID := range opts.nodeIDs { + nodeRef, err := Reference(ctx, client, nodeID) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + filter := opts.filter.Value() + filter.Add("node", node.ID) + + nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + tasks = append(tasks, nodeTasks...) + } + + if err := task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc); err != nil { + errs = append(errs, err.Error()) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/node/remove.go b/vendor/github.com/moby/moby/cli/command/node/remove.go new file mode 100644 index 0000000..19b4a96 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/remove.go @@ -0,0 +1,56 @@ +package node + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := removeOptions{} + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] NODE [NODE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more nodes from the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force remove a node from the swarm") + return cmd +} + +func runRemove(dockerCli *command.DockerCli, args []string, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, nodeID := range args { + err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/node/update.go b/vendor/github.com/moby/moby/cli/command/node/update.go new file mode 100644 index 0000000..65339e1 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/node/update.go @@ -0,0 +1,121 @@ +package node + +import ( + "errors" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +var ( + errNoRoleChange = errors.New("role was already set to the requested value") +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + nodeOpts := newNodeOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] NODE", + Short: "Update a node", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)") + flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)") + flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") + labelKeys := opts.NewListOpts(nil) + flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, nodeID string) error { + success := func(_ string) { + fmt.Fprintln(dockerCli.Out(), nodeID) + } + return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) +} + +func updateNodes(dockerCli *command.DockerCli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { + client := dockerCli.Client() + ctx := context.Background() + + for _, nodeID := range nodes { + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + err = mergeNode(&node) + if err != nil { + if err == errNoRoleChange { + continue + } + return err + } + err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) + if err != nil { + return err + } + success(nodeID) + } + return nil +} + +func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { + return func(node *swarm.Node) error { + spec := &node.Spec + + if flags.Changed(flagRole) { + str, err := flags.GetString(flagRole) + if err != nil { + return err + } + spec.Role = swarm.NodeRole(str) + } + if flags.Changed(flagAvailability) { + str, err := flags.GetString(flagAvailability) + if err != nil { + return err + } + spec.Availability = swarm.NodeAvailability(str) + } + if spec.Annotations.Labels == nil { + spec.Annotations.Labels = make(map[string]string) + } + if flags.Changed(flagLabelAdd) { + labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for k, v := range runconfigopts.ConvertKVStringsToMap(labels) { + spec.Annotations.Labels[k] = v + } + } + if flags.Changed(flagLabelRemove) { + keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, k := range keys { + // if a key doesn't exist, fail the command explicitly + if _, exists := spec.Annotations.Labels[k]; !exists { + return fmt.Errorf("key %s doesn't exist in node's labels", k) + } + delete(spec.Annotations.Labels, k) + } + } + return nil + } +} + +const ( + flagRole = "role" + flagAvailability = "availability" + flagLabelAdd = "label-add" + flagLabelRemove = "label-rm" +) diff --git a/vendor/github.com/moby/moby/cli/command/out.go b/vendor/github.com/moby/moby/cli/command/out.go new file mode 100644 index 0000000..85718d7 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/out.go @@ -0,0 +1,69 @@ +package command + +import ( + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/term" +) + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +type OutStream struct { + out io.Writer + fd uintptr + isTerminal bool + state *term.State +} + +func (o *OutStream) Write(p []byte) (int, error) { + return o.out.Write(p) +} + +// FD returns the file descriptor number for this stream +func (o *OutStream) FD() uintptr { + return o.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (o *OutStream) IsTerminal() bool { + return o.isTerminal +} + +// SetRawTerminal sets raw mode on the output terminal +func (o *OutStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !o.isTerminal { + return nil + } + o.state, err = term.SetRawTerminalOutput(o.fd) + return err +} + +// RestoreTerminal restores normal mode to the terminal +func (o *OutStream) RestoreTerminal() { + if o.state != nil { + term.RestoreTerminal(o.fd, o.state) + } +} + +// GetTtySize returns the height and width in characters of the tty +func (o *OutStream) GetTtySize() (uint, uint) { + if !o.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(o.fd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return uint(ws.Height), uint(ws.Width) +} + +// NewOutStream returns a new OutStream object from a Writer +func NewOutStream(out io.Writer) *OutStream { + fd, isTerminal := term.GetFdInfo(out) + return &OutStream{out: out, fd: fd, isTerminal: isTerminal} +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/cmd.go b/vendor/github.com/moby/moby/cli/command/plugin/cmd.go new file mode 100644 index 0000000..92c990a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/cmd.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewPluginCommand returns a cobra command for `plugin` subcommands +func NewPluginCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "plugin", + Short: "Manage plugins", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + + cmd.AddCommand( + newDisableCommand(dockerCli), + newEnableCommand(dockerCli), + newInspectCommand(dockerCli), + newInstallCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newSetCommand(dockerCli), + newPushCommand(dockerCli), + newCreateCommand(dockerCli), + newUpgradeCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/create.go b/vendor/github.com/moby/moby/cli/command/plugin/create.go new file mode 100644 index 0000000..2aab1e9 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/create.go @@ -0,0 +1,125 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/reference" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// validateTag checks if the given repoName can be resolved. +func validateTag(rawRepo string) error { + _, err := reference.ParseNamed(rawRepo) + + return err +} + +// validateConfig ensures that a valid config.json is available in the given path +func validateConfig(path string) error { + dt, err := os.Open(filepath.Join(path, "config.json")) + if err != nil { + return err + } + + m := types.PluginConfig{} + err = json.NewDecoder(dt).Decode(&m) + dt.Close() + + return err +} + +// validateContextDir validates the given dir and returns abs path on success. +func validateContextDir(contextDir string) (string, error) { + absContextDir, err := filepath.Abs(contextDir) + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", err + } + + if !stat.IsDir() { + return "", fmt.Errorf("context must be a directory") + } + + return absContextDir, nil +} + +type pluginCreateOptions struct { + repoName string + context string + compress bool +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + options := pluginCreateOptions{} + + cmd := &cobra.Command{ + Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", + Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.repoName = args[0] + options.context = args[1] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.compress, "compress", false, "Compress the context using gzip") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, options pluginCreateOptions) error { + var ( + createCtx io.ReadCloser + err error + ) + + if err := validateTag(options.repoName); err != nil { + return err + } + + absContextDir, err := validateContextDir(options.context) + if err != nil { + return err + } + + if err := validateConfig(options.context); err != nil { + return err + } + + compression := archive.Uncompressed + if options.compress { + logrus.Debugf("compression enabled") + compression = archive.Gzip + } + + createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{ + Compression: compression, + }) + + if err != nil { + return err + } + + ctx := context.Background() + + createOptions := types.PluginCreateOptions{RepoName: options.repoName} + if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), options.repoName) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/disable.go b/vendor/github.com/moby/moby/cli/command/plugin/disable.go new file mode 100644 index 0000000..07b0ec2 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/disable.go @@ -0,0 +1,36 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command { + var force bool + + cmd := &cobra.Command{ + Use: "disable [OPTIONS] PLUGIN", + Short: "Disable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDisable(dockerCli, args[0], force) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin") + return cmd +} + +func runDisable(dockerCli *command.DockerCli, name string, force bool) error { + if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/enable.go b/vendor/github.com/moby/moby/cli/command/plugin/enable.go new file mode 100644 index 0000000..77762f4 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/enable.go @@ -0,0 +1,47 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type enableOpts struct { + timeout int + name string +} + +func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts enableOpts + + cmd := &cobra.Command{ + Use: "enable [OPTIONS] PLUGIN", + Short: "Enable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runEnable(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVar(&opts.timeout, "timeout", 0, "HTTP client timeout (in seconds)") + return cmd +} + +func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error { + name := opts.name + if opts.timeout < 0 { + return fmt.Errorf("negative timeout %d is invalid", opts.timeout) + } + + if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/inspect.go b/vendor/github.com/moby/moby/cli/command/plugin/inspect.go new file mode 100644 index 0000000..c2c7a0d --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/inspect.go @@ -0,0 +1,42 @@ +package plugin + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + pluginNames []string + format string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Display detailed information on one or more plugins", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.pluginNames = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + return client.PluginInspectWithRaw(ctx, ref) + } + + return inspect.Inspect(dockerCli.Out(), opts.pluginNames, opts.format, getRef) +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/install.go b/vendor/github.com/moby/moby/cli/command/plugin/install.go new file mode 100644 index 0000000..2c3170c --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/install.go @@ -0,0 +1,208 @@ +package plugin + +import ( + "bufio" + "errors" + "fmt" + "strings" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +type pluginOptions struct { + remote string + localName string + grantPerms bool + disable bool + args []string + skipRemoteCheck bool +} + +func loadPullFlags(opts *pluginOptions, flags *pflag.FlagSet) { + flags.BoolVar(&opts.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") + command.AddTrustedFlags(flags, true) +} + +func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "install [OPTIONS] PLUGIN [KEY=VALUE...]", + Short: "Install a plugin", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.remote = args[0] + if len(args) > 1 { + options.args = args[1:] + } + return runInstall(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(&options, flags) + flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") + flags.StringVar(&options.localName, "alias", "", "Local name for plugin") + return cmd +} + +func getRepoIndexFromUnnormalizedRef(ref distreference.Named) (*registrytypes.IndexInfo, error) { + named, err := reference.ParseNamed(ref.Name()) + if err != nil { + return nil, err + } + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return nil, err + } + + return repoInfo.Index, nil +} + +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +func newRegistryService() registry.Service { + return pluginRegistryService{ + Service: registry.NewService(registry.ServiceOptions{V2Only: true}), + } +} + +func buildPullConfig(ctx context.Context, dockerCli *command.DockerCli, opts pluginOptions, cmdName string) (types.PluginInstallOptions, error) { + // Parse name using distribution reference package to support name + // containing both tag and digest. Names with both tag and digest + // will be treated by the daemon as a pull by digest with + // an alias for the tag (if no alias is provided). + ref, err := distreference.ParseNamed(opts.remote) + if err != nil { + return types.PluginInstallOptions{}, err + } + + index, err := getRepoIndexFromUnnormalizedRef(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + + repoInfoIndex, err := getRepoIndexFromUnnormalizedRef(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote := ref.String() + + _, isCanonical := ref.(distreference.Canonical) + if command.IsTrusted() && !isCanonical { + var nt reference.NamedTagged + named, err := reference.ParseNamed(ref.Name()) + if err != nil { + return types.PluginInstallOptions{}, err + } + if tagged, ok := ref.(distreference.Tagged); ok { + nt, err = reference.WithTag(named, tagged.Tag()) + if err != nil { + return types.PluginInstallOptions{}, err + } + } else { + named = reference.WithDefaultTag(named) + nt = named.(reference.NamedTagged) + } + + ctx := context.Background() + trusted, err := image.TrustedReference(ctx, dockerCli, nt, newRegistryService()) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote = trusted.String() + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return types.PluginInstallOptions{}, err + } + + registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfoIndex, cmdName) + + options := types.PluginInstallOptions{ + RegistryAuth: encodedAuth, + RemoteRef: remote, + Disabled: opts.disable, + AcceptAllPermissions: opts.grantPerms, + AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.remote), + // TODO: Rename PrivilegeFunc, it has nothing to do with privileges + PrivilegeFunc: registryAuthFunc, + Args: opts.args, + } + return options, nil +} + +func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { + var localName string + if opts.localName != "" { + aref, err := reference.ParseNamed(opts.localName) + if err != nil { + return err + } + aref = reference.WithDefaultTag(aref) + if _, ok := aref.(reference.NamedTagged); !ok { + return fmt.Errorf("invalid name: %s", opts.localName) + } + localName = aref.String() + } + + ctx := context.Background() + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin install") + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.remote) // todo: return proper values from the API for this result + return nil +} + +func acceptPrivileges(dockerCli *command.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { + return func(privileges types.PluginPrivileges) (bool, error) { + fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) + for _, privilege := range privileges { + fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) + } + + fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ") + reader := bufio.NewReader(dockerCli.In()) + line, _, err := reader.ReadLine() + if err != nil { + return false, err + } + return strings.ToLower(string(line)) == "y", nil + } +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/list.go b/vendor/github.com/moby/moby/cli/command/plugin/list.go new file mode 100644 index 0000000..8fd16da --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/list.go @@ -0,0 +1,63 @@ +package plugin + +import ( + "fmt" + "strings" + "text/tabwriter" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type listOptions struct { + noTrunc bool +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Short: "List plugins", + Aliases: []string{"list"}, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + plugins, err := dockerCli.Client().PluginList(context.Background()) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintf(w, "ID \tNAME \tDESCRIPTION\tENABLED") + fmt.Fprintf(w, "\n") + + for _, p := range plugins { + id := p.ID + desc := strings.Replace(p.Config.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !opts.noTrunc { + id = stringid.TruncateID(p.ID) + desc = stringutils.Ellipsis(desc, 45) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", id, p.Name, desc, p.Enabled) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/push.go b/vendor/github.com/moby/moby/cli/command/plugin/push.go new file mode 100644 index 0000000..9abb38e --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/push.go @@ -0,0 +1,71 @@ +package plugin + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +func newPushCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] PLUGIN[:TAG]", + Short: "Push a plugin to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPush(dockerCli *command.DockerCli, name string) error { + named, err := reference.ParseNamed(name) // FIXME: validate + if err != nil { + return err + } + if reference.IsNameOnly(named) { + named = reference.WithDefaultTag(named) + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid name: %s", named.String()) + } + + ctx := context.Background() + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return err + } + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) + if err != nil { + return err + } + defer responseBody.Close() + + if command.IsTrusted() { + repoInfo.Class = "plugin" + return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody) + } + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/remove.go b/vendor/github.com/moby/moby/cli/command/plugin/remove.go new file mode 100644 index 0000000..9f3aba9 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/remove.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type rmOptions struct { + force bool + + plugins []string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Remove one or more plugins", + Aliases: []string{"remove"}, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.plugins = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of an active plugin") + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error { + ctx := context.Background() + + var errs cli.Errors + for _, name := range opts.plugins { + // TODO: pass names to api instead of making multiple api calls + if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { + errs = append(errs, err) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. + if errs != nil { + return errs + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/set.go b/vendor/github.com/moby/moby/cli/command/plugin/set.go new file mode 100644 index 0000000..52b09fb --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/set.go @@ -0,0 +1,22 @@ +package plugin + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newSetCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "set PLUGIN KEY=VALUE [KEY=VALUE...]", + Short: "Change settings for a plugin", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) + }, + } + + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/plugin/upgrade.go b/vendor/github.com/moby/moby/cli/command/plugin/upgrade.go new file mode 100644 index 0000000..d212cd7 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/plugin/upgrade.go @@ -0,0 +1,100 @@ +package plugin + +import ( + "bufio" + "context" + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newUpgradeCommand(dockerCli *command.DockerCli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "upgrade [OPTIONS] PLUGIN [REMOTE]", + Short: "Upgrade an existing plugin", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.localName = args[0] + if len(args) == 2 { + options.remote = args[1] + } + return runUpgrade(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(&options, flags) + flags.BoolVar(&options.skipRemoteCheck, "skip-remote-check", false, "Do not check if specified remote plugin matches existing plugin image") + return cmd +} + +func runUpgrade(dockerCli *command.DockerCli, opts pluginOptions) error { + ctx := context.Background() + p, _, err := dockerCli.Client().PluginInspectWithRaw(ctx, opts.localName) + if err != nil { + return fmt.Errorf("error reading plugin data: %v", err) + } + + if p.Enabled { + return fmt.Errorf("the plugin must be disabled before upgrading") + } + + opts.localName = p.Name + if opts.remote == "" { + opts.remote = p.PluginReference + } + remote, err := reference.ParseNamed(opts.remote) + if err != nil { + return errors.Wrap(err, "error parsing remote upgrade image reference") + } + remote = reference.WithDefaultTag(remote) + + old, err := reference.ParseNamed(p.PluginReference) + if err != nil { + return errors.Wrap(err, "error parsing current image reference") + } + old = reference.WithDefaultTag(old) + + fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, old, remote) + if !opts.skipRemoteCheck && remote.String() != old.String() { + _, err := fmt.Fprint(dockerCli.Out(), "Plugin images do not match, are you sure? ") + if err != nil { + return errors.Wrap(err, "error writing to stdout") + } + + rdr := bufio.NewReader(dockerCli.In()) + line, _, err := rdr.ReadLine() + if err != nil { + return errors.Wrap(err, "error reading from stdin") + } + if strings.ToLower(string(line)) != "y" { + return errors.New("canceling upgrade request") + } + } + + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin upgrade") + if err != nil { + return err + } + + responseBody, err := dockerCli.Client().PluginUpgrade(ctx, opts.localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Upgraded plugin %s to %s\n", opts.localName, opts.remote) // todo: return proper values from the API for this result + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/prune/prune.go b/vendor/github.com/moby/moby/cli/command/prune/prune.go new file mode 100644 index 0000000..a022487 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/prune/prune.go @@ -0,0 +1,50 @@ +package prune + +import ( + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/container" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/cli/command/network" + "github.com/docker/docker/cli/command/volume" + "github.com/spf13/cobra" +) + +// NewContainerPruneCommand returns a cobra prune command for containers +func NewContainerPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return container.NewPruneCommand(dockerCli) +} + +// NewVolumePruneCommand returns a cobra prune command for volumes +func NewVolumePruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return volume.NewPruneCommand(dockerCli) +} + +// NewImagePruneCommand returns a cobra prune command for images +func NewImagePruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return image.NewPruneCommand(dockerCli) +} + +// NewNetworkPruneCommand returns a cobra prune command for Networks +func NewNetworkPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return network.NewPruneCommand(dockerCli) +} + +// RunContainerPrune executes a prune command for containers +func RunContainerPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return container.RunPrune(dockerCli) +} + +// RunVolumePrune executes a prune command for volumes +func RunVolumePrune(dockerCli *command.DockerCli) (uint64, string, error) { + return volume.RunPrune(dockerCli) +} + +// RunImagePrune executes a prune command for images +func RunImagePrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { + return image.RunPrune(dockerCli, all) +} + +// RunNetworkPrune executes a prune command for networks +func RunNetworkPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return network.RunPrune(dockerCli) +} diff --git a/vendor/github.com/moby/moby/cli/command/registry.go b/vendor/github.com/moby/moby/cli/command/registry.go new file mode 100644 index 0000000..65f6b33 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/registry.go @@ -0,0 +1,186 @@ +package command + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" +) + +// ElectAuthServer returns the default registry to use (by asking the daemon) +func ElectAuthServer(ctx context.Context, cli *DockerCli) string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.Client().Info(ctx); err != nil { + fmt.Fprintf(cli.Out(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli *DockerCli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) + authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(authConfig) + } +} + +// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func ResolveAuthConfig(ctx context.Context, cli *DockerCli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = ElectAuthServer(ctx, cli) + } + + a, _ := cli.CredentialsStore(configKey).Get(configKey) + return a +} + +// ConfigureAuth returns an AuthConfig from the specified user, password and server. +func ConfigureAuth(cli *DockerCli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = NewInStream(os.Stdin) + } + + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + + authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) + if err != nil { + return authconfig, err + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return authconfig, fmt.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return authconfig, fmt.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return authconfig, fmt.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli *DockerCli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli *DockerCli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/vendor/github.com/moby/moby/cli/command/registry/login.go b/vendor/github.com/moby/moby/cli/command/registry/login.go new file mode 100644 index 0000000..05b3bb0 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/registry/login.go @@ -0,0 +1,85 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type loginOptions struct { + serverAddress string + user string + password string + email string +} + +// NewLoginCommand creates a new `docker login` command +func NewLoginCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts loginOptions + + cmd := &cobra.Command{ + Use: "login [OPTIONS] [SERVER]", + Short: "Log in to a Docker registry", + Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.serverAddress = args[0] + } + return runLogin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.user, "username", "u", "", "Username") + flags.StringVarP(&opts.password, "password", "p", "", "Password") + + // Deprecated in 1.11: Should be removed in docker 1.14 + flags.StringVarP(&opts.email, "email", "e", "", "Email") + flags.MarkDeprecated("email", "will be removed in 1.14.") + + return cmd +} + +func runLogin(dockerCli *command.DockerCli, opts loginOptions) error { + ctx := context.Background() + clnt := dockerCli.Client() + + var ( + serverAddress string + authServer = command.ElectAuthServer(ctx, dockerCli) + ) + if opts.serverAddress != "" { + serverAddress = opts.serverAddress + } else { + serverAddress = authServer + } + + isDefaultRegistry := serverAddress == authServer + + authConfig, err := command.ConfigureAuth(dockerCli, opts.user, opts.password, serverAddress, isDefaultRegistry) + if err != nil { + return err + } + response, err := clnt.RegistryLogin(ctx, authConfig) + if err != nil { + return err + } + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + if err := dockerCli.CredentialsStore(serverAddress).Store(authConfig); err != nil { + return fmt.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(dockerCli.Out(), response.Status) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/registry/logout.go b/vendor/github.com/moby/moby/cli/command/registry/logout.go new file mode 100644 index 0000000..877e60e --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/registry/logout.go @@ -0,0 +1,77 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewLogoutCommand creates a new `docker login` command +func NewLogoutCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "logout [SERVER]", + Short: "Log out from a Docker registry", + Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var serverAddress string + if len(args) > 0 { + serverAddress = args[0] + } + return runLogout(dockerCli, serverAddress) + }, + } + + return cmd +} + +func runLogout(dockerCli *command.DockerCli, serverAddress string) error { + ctx := context.Background() + var isDefaultRegistry bool + + if serverAddress == "" { + serverAddress = command.ElectAuthServer(ctx, dockerCli) + isDefaultRegistry = true + } + + var ( + loggedIn bool + regsToLogout []string + hostnameAddress = serverAddress + regsToTry = []string{serverAddress} + ) + if !isDefaultRegistry { + hostnameAddress = registry.ConvertToHostname(serverAddress) + // the tries below are kept for backward compatibility where a user could have + // saved the registry in one of the following format. + regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + for _, s := range regsToTry { + if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { + loggedIn = true + regsToLogout = append(regsToLogout, s) + } + } + + if !loggedIn { + fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) + return nil + } + + fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) + for _, r := range regsToLogout { + if err := dockerCli.CredentialsStore(r).Erase(r); err != nil { + fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/registry/search.go b/vendor/github.com/moby/moby/cli/command/registry/search.go new file mode 100644 index 0000000..124b4ae --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/registry/search.go @@ -0,0 +1,126 @@ +package registry + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type searchOptions struct { + term string + noTrunc bool + limit int + filter opts.FilterOpt + + // Deprecated + stars uint + automated bool +} + +// NewSearchCommand creates a new `docker search` command +func NewSearchCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := searchOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "search [OPTIONS] TERM", + Short: "Search the Docker Hub for images", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.term = args[0] + return runSearch(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + flags.IntVar(&opts.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") + + flags.BoolVar(&opts.automated, "automated", false, "Only show automated builds") + flags.UintVarP(&opts.stars, "stars", "s", 0, "Only displays with at least x stars") + + flags.MarkDeprecated("automated", "use --filter=automated=true instead") + flags.MarkDeprecated("stars", "use --filter=stars=3 instead") + + return cmd +} + +func runSearch(dockerCli *command.DockerCli, opts searchOptions) error { + indexInfo, err := registry.ParseSearchIndexInfo(opts.term) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageSearchOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + Filters: opts.filter.Value(), + Limit: opts.limit, + } + + clnt := dockerCli.Client() + + unorderedResults, err := clnt.ImageSearch(ctx, opts.term, options) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + // --automated and -s, --stars are deprecated since Docker 1.12 + if (opts.automated && !res.IsAutomated) || (int(opts.stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !opts.noTrunc { + desc = stringutils.Ellipsis(desc, 45) + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// SearchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/moby/moby/cli/command/secret/cmd.go b/vendor/github.com/moby/moby/cli/command/secret/cmd.go new file mode 100644 index 0000000..79e6698 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/secret/cmd.go @@ -0,0 +1,25 @@ +package secret + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSecretCommand returns a cobra command for `secret` subcommands +func NewSecretCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "secret", + Short: "Manage Docker secrets", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newSecretListCommand(dockerCli), + newSecretCreateCommand(dockerCli), + newSecretInspectCommand(dockerCli), + newSecretRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/secret/create.go b/vendor/github.com/moby/moby/cli/command/secret/create.go new file mode 100644 index 0000000..f4683a6 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/secret/create.go @@ -0,0 +1,79 @@ +package secret + +import ( + "fmt" + "io" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/system" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type createOptions struct { + name string + file string + labels opts.ListOpts +} + +func newSecretCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + createOpts := createOptions{ + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] SECRET file|-", + Short: "Create a secret from a file or STDIN as content", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + createOpts.name = args[0] + createOpts.file = args[1] + return runSecretCreate(dockerCli, createOpts) + }, + } + flags := cmd.Flags() + flags.VarP(&createOpts.labels, "label", "l", "Secret labels") + + return cmd +} + +func runSecretCreate(dockerCli *command.DockerCli, options createOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var in io.Reader = dockerCli.In() + if options.file != "-" { + file, err := system.OpenSequential(options.file) + if err != nil { + return err + } + in = file + defer file.Close() + } + + secretData, err := ioutil.ReadAll(in) + if err != nil { + return fmt.Errorf("Error reading content from %q: %v", options.file, err) + } + + spec := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + Data: secretData, + } + + r, err := client.SecretCreate(ctx, spec) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), r.ID) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/secret/inspect.go b/vendor/github.com/moby/moby/cli/command/secret/inspect.go new file mode 100644 index 0000000..0a8bd4a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/secret/inspect.go @@ -0,0 +1,45 @@ +package secret + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + names []string + format string +} + +func newSecretInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SECRET [SECRET...]", + Short: "Display detailed information on one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runSecretInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runSecretInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) + if err != nil { + return err + } + getRef := func(id string) (interface{}, []byte, error) { + return client.SecretInspectWithRaw(ctx, id) + } + + return inspect.Inspect(dockerCli.Out(), ids, opts.format, getRef) +} diff --git a/vendor/github.com/moby/moby/cli/command/secret/ls.go b/vendor/github.com/moby/moby/cli/command/secret/ls.go new file mode 100644 index 0000000..faeab31 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/secret/ls.go @@ -0,0 +1,68 @@ +package secret + +import ( + "fmt" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type listOptions struct { + quiet bool +} + +func newSecretListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List secrets", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runSecretList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + + return cmd +} + +func runSecretList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + secrets, err := client.SecretList(ctx, types.SecretListOptions{}) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + if opts.quiet { + for _, s := range secrets { + fmt.Fprintf(w, "%s\n", s.ID) + } + } else { + fmt.Fprintf(w, "ID\tNAME\tCREATED\tUPDATED") + fmt.Fprintf(w, "\n") + + for _, s := range secrets { + created := units.HumanDuration(time.Now().UTC().Sub(s.Meta.CreatedAt)) + " ago" + updated := units.HumanDuration(time.Now().UTC().Sub(s.Meta.UpdatedAt)) + " ago" + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", s.ID, s.Spec.Annotations.Name, created, updated) + } + } + + w.Flush() + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/secret/remove.go b/vendor/github.com/moby/moby/cli/command/secret/remove.go new file mode 100644 index 0000000..f45a619 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/secret/remove.go @@ -0,0 +1,57 @@ +package secret + +import ( + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type removeOptions struct { + names []string +} + +func newSecretRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "rm SECRET [SECRET...]", + Aliases: []string{"remove"}, + Short: "Remove one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts := removeOptions{ + names: args, + } + return runSecretRemove(dockerCli, opts) + }, + } +} + +func runSecretRemove(dockerCli *command.DockerCli, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) + if err != nil { + return err + } + + var errs []string + + for _, id := range ids { + if err := client.SecretRemove(ctx, id); err != nil { + errs = append(errs, err.Error()) + continue + } + + fmt.Fprintln(dockerCli.Out(), id) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/secret/utils.go b/vendor/github.com/moby/moby/cli/command/secret/utils.go new file mode 100644 index 0000000..11d31ff --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/secret/utils.go @@ -0,0 +1,76 @@ +package secret + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +// GetSecretsByNameOrIDPrefixes returns secrets given a list of ids or names +func GetSecretsByNameOrIDPrefixes(ctx context.Context, client client.APIClient, terms []string) ([]swarm.Secret, error) { + args := filters.NewArgs() + for _, n := range terms { + args.Add("names", n) + args.Add("id", n) + } + + return client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) +} + +func getCliRequestedSecretIDs(ctx context.Context, client client.APIClient, terms []string) ([]string, error) { + secrets, err := GetSecretsByNameOrIDPrefixes(ctx, client, terms) + if err != nil { + return nil, err + } + + if len(secrets) > 0 { + found := make(map[string]struct{}) + next: + for _, term := range terms { + // attempt to lookup secret by full ID + for _, s := range secrets { + if s.ID == term { + found[s.ID] = struct{}{} + continue next + } + } + // attempt to lookup secret by full name + for _, s := range secrets { + if s.Spec.Annotations.Name == term { + found[s.ID] = struct{}{} + continue next + } + } + // attempt to lookup secret by partial ID (prefix) + // return error if more than one matches found (ambiguous) + n := 0 + for _, s := range secrets { + if strings.HasPrefix(s.ID, term) { + found[s.ID] = struct{}{} + n++ + } + } + if n > 1 { + return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", term, n) + } + } + + // We already collected all the IDs found. + // Now we will remove duplicates by converting the map to slice + ids := []string{} + for id := range found { + ids = append(ids, id) + } + + return ids, nil + } + + return terms, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/cmd.go b/vendor/github.com/moby/moby/cli/command/service/cmd.go new file mode 100644 index 0000000..796fe92 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/cmd.go @@ -0,0 +1,29 @@ +package service + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewServiceCommand returns a cobra command for `service` subcommands +func NewServiceCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "service", + Short: "Manage services", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newPsCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newScaleCommand(dockerCli), + newUpdateCommand(dockerCli), + newLogsCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/service/create.go b/vendor/github.com/moby/moby/cli/command/service/create.go new file mode 100644 index 0000000..1355c19 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/create.go @@ -0,0 +1,100 @@ +package service + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new service", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + if len(args) > 1 { + opts.args = args[1:] + } + return runCreate(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") + flags.StringVar(&opts.name, flagName, "", "Service name") + + addServiceFlags(cmd, opts) + + flags.VarP(&opts.labels, flagLabel, "l", "Service labels") + flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") + flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") + flags.Var(&opts.envFile, flagEnvFile, "Read in a file of environment variables") + flags.Var(&opts.mounts, flagMount, "Attach a filesystem mount to the service") + flags.Var(&opts.constraints, flagConstraint, "Placement constraints") + flags.Var(&opts.networks, flagNetwork, "Network attachments") + flags.Var(&opts.secrets, flagSecret, "Specify secrets to expose to the service") + flags.VarP(&opts.endpoint.publishPorts, flagPublish, "p", "Publish a port as a node port") + flags.Var(&opts.groups, flagGroup, "Set one or more supplementary user groups for the container") + flags.Var(&opts.dns, flagDNS, "Set custom DNS servers") + flags.Var(&opts.dnsOption, flagDNSOption, "Set DNS options") + flags.Var(&opts.dnsSearch, flagDNSSearch, "Set custom DNS search domains") + flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") + + flags.SetInterspersed(false) + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts *serviceOptions) error { + apiClient := dockerCli.Client() + createOpts := types.ServiceCreateOptions{} + + service, err := opts.ToService() + if err != nil { + return err + } + + specifiedSecrets := opts.secrets.Value() + if len(specifiedSecrets) > 0 { + // parse and validate secrets + secrets, err := ParseSecrets(apiClient, specifiedSecrets) + if err != nil { + return err + } + service.TaskTemplate.ContainerSpec.Secrets = secrets + + } + + ctx := context.Background() + + if err := resolveServiceImageDigest(dockerCli, &service); err != nil { + return err + } + + // only send auth if flag was set + if opts.registryAuth { + // Retrieve encoded auth token from the image reference + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, opts.image) + if err != nil { + return err + } + createOpts.EncodedRegistryAuth = encodedAuth + } + + response, err := apiClient.ServiceCreate(ctx, service, createOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/inspect.go b/vendor/github.com/moby/moby/cli/command/service/inspect.go new file mode 100644 index 0000000..deb701b --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/inspect.go @@ -0,0 +1,84 @@ +package service + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + refs []string + format string + pretty bool +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SERVICE [SERVICE...]", + Short: "Display detailed information on one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + + if opts.pretty && len(opts.format) > 0 { + return fmt.Errorf("--format is incompatible with human friendly format") + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(ref string) (interface{}, []byte, error) { + service, _, err := client.ServiceInspectWithRaw(ctx, ref) + if err == nil || !apiclient.IsErrServiceNotFound(err) { + return service, nil, err + } + return nil, nil, fmt.Errorf("Error: no such service: %s", ref) + } + + f := opts.format + if len(f) == 0 { + f = "raw" + if len(dockerCli.ConfigFile().ServiceInspectFormat) > 0 { + f = dockerCli.ConfigFile().ServiceInspectFormat + } + } + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + serviceCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewServiceFormat(f), + } + + if err := formatter.ServiceInspectWrite(serviceCtx, opts.refs, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/inspect_test.go b/vendor/github.com/moby/moby/cli/command/service/inspect_test.go new file mode 100644 index 0000000..04a6508 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/inspect_test.go @@ -0,0 +1,129 @@ +package service + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/pkg/testutil/assert" +) + +func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { + b := new(bytes.Buffer) + + endpointSpec := &swarm.EndpointSpec{ + Mode: "vip", + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + }, + }, + } + + two := uint64(2) + + s := swarm.Service{ + ID: "de179gar9d0o7ltdybungplod", + Meta: swarm.Meta{ + Version: swarm.Version{Index: 315}, + CreatedAt: now, + UpdatedAt: now, + }, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "my_service", + Labels: map[string]string{"com.label": "foo"}, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "foo/bar@sha256:this_is_a_test", + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &two, + }, + }, + UpdateConfig: nil, + Networks: []swarm.NetworkAttachmentConfig{ + { + Target: "5vpyomhb6ievnk0i0o60gcnei", + Aliases: []string{"web"}, + }, + }, + EndpointSpec: endpointSpec, + }, + Endpoint: swarm.Endpoint{ + Spec: *endpointSpec, + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + PublishedPort: 30000, + }, + }, + VirtualIPs: []swarm.EndpointVirtualIP{ + { + NetworkID: "6o4107cj2jx9tihgb0jyts6pj", + Addr: "10.255.0.4/16", + }, + }, + }, + UpdateStatus: swarm.UpdateStatus{ + StartedAt: now, + CompletedAt: now, + }, + } + + ctx := formatter.Context{ + Output: b, + Format: format, + } + + err := formatter.ServiceInspectWrite(ctx, []string{"de179gar9d0o7ltdybungplod"}, func(ref string) (interface{}, []byte, error) { + return s, nil, nil + }) + if err != nil { + t.Fatal(err) + } + return b.String() +} + +func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { + s := formatServiceInspect(t, formatter.NewServiceFormat("pretty"), time.Now()) + if strings.Contains(s, "UpdateStatus") { + t.Fatal("Pretty print failed before parsing UpdateStatus") + } +} + +func TestJSONFormatWithNoUpdateConfig(t *testing.T) { + now := time.Now() + // s1: [{"ID":..}] + // s2: {"ID":..} + s1 := formatServiceInspect(t, formatter.NewServiceFormat(""), now) + t.Log("// s1") + t.Logf("%s", s1) + s2 := formatServiceInspect(t, formatter.NewServiceFormat("{{json .}}"), now) + t.Log("// s2") + t.Logf("%s", s2) + var m1Wrap []map[string]interface{} + if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil { + t.Fatal(err) + } + if len(m1Wrap) != 1 { + t.Fatalf("strange s1=%s", s1) + } + m1 := m1Wrap[0] + t.Logf("m1=%+v", m1) + var m2 map[string]interface{} + if err := json.Unmarshal([]byte(s2), &m2); err != nil { + t.Fatal(err) + } + t.Logf("m2=%+v", m2) + assert.DeepEqual(t, m2, m1) +} diff --git a/vendor/github.com/moby/moby/cli/command/service/list.go b/vendor/github.com/moby/moby/cli/command/service/list.go new file mode 100644 index 0000000..7241260 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/list.go @@ -0,0 +1,158 @@ +package service + +import ( + "fmt" + "io" + "text/tabwriter" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringid" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +const ( + listItemFmt = "%s\t%s\t%s\t%s\t%s\n" +) + +type listOptions struct { + quiet bool + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List services", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + ctx := context.Background() + client := dockerCli.Client() + out := dockerCli.Out() + + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: opts.filter.Value()}) + if err != nil { + return err + } + + if len(services) > 0 && !opts.quiet { + // only non-empty services and not quiet, should we call TaskList and NodeList api + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + PrintNotQuiet(out, services, nodes, tasks) + } else if !opts.quiet { + // no services and not quiet, print only one line with columns ID, NAME, MODE, REPLICAS... + PrintNotQuiet(out, services, []swarm.Node{}, []swarm.Task{}) + } else { + PrintQuiet(out, services) + } + + return nil +} + +// PrintNotQuiet shows service list in a non-quiet way. +// Besides this, command `docker stack services xxx` will call this, too. +func PrintNotQuiet(out io.Writer, services []swarm.Service, nodes []swarm.Node, tasks []swarm.Task) { + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + + running := map[string]int{} + tasksNoShutdown := map[string]int{} + + for _, task := range tasks { + if task.DesiredState != swarm.TaskStateShutdown { + tasksNoShutdown[task.ServiceID]++ + } + + if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == swarm.TaskStateRunning { + running[task.ServiceID]++ + } + } + + printTable(out, services, running, tasksNoShutdown) +} + +func printTable(out io.Writer, services []swarm.Service, running, tasksNoShutdown map[string]int) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MODE", "REPLICAS", "IMAGE") + + for _, service := range services { + mode := "" + replicas := "" + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + mode = "replicated" + replicas = fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas) + } else if service.Spec.Mode.Global != nil { + mode = "global" + replicas = fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID]) + } + image := service.Spec.TaskTemplate.ContainerSpec.Image + ref, err := distreference.ParseNamed(image) + if err == nil { + // update image string for display + namedTagged, ok := ref.(distreference.NamedTagged) + if ok { + image = namedTagged.Name() + ":" + namedTagged.Tag() + } + } + + fmt.Fprintf( + writer, + listItemFmt, + stringid.TruncateID(service.ID), + service.Spec.Name, + mode, + replicas, + image) + } +} + +// PrintQuiet shows service list in a quiet way. +// Besides this, command `docker stack services xxx` will call this, too. +func PrintQuiet(out io.Writer, services []swarm.Service) { + for _, service := range services { + fmt.Fprintln(out, service.ID) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/service/logs.go b/vendor/github.com/moby/moby/cli/command/service/logs.go new file mode 100644 index 0000000..19d3d9a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/logs.go @@ -0,0 +1,163 @@ +package service + +import ( + "bytes" + "fmt" + "io" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +type logsOptions struct { + noResolve bool + follow bool + since string + timestamps bool + details bool + tail string + + service string +} + +func newLogsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] SERVICE", + Short: "Fetch the logs of a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.service = args[0] + return runLogs(dockerCli, &opts) + }, + Tags: map[string]string{"experimental": ""}, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { + ctx := context.Background() + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + + client := dockerCli.Client() + responseBody, err := client.ServiceLogs(ctx, opts.service, options) + if err != nil { + return err + } + defer responseBody.Close() + + resolver := idresolver.New(client, opts.noResolve) + + stdout := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Out()} + stderr := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Err()} + + // TODO(aluzzardi): Do an io.Copy for services with TTY enabled. + _, err = stdcopy.StdCopy(stdout, stderr, responseBody) + return err +} + +type logWriter struct { + ctx context.Context + opts *logsOptions + r *idresolver.IDResolver + w io.Writer +} + +func (lw *logWriter) Write(buf []byte) (int, error) { + contextIndex := 0 + numParts := 2 + if lw.opts.timestamps { + contextIndex++ + numParts++ + } + + parts := bytes.SplitN(buf, []byte(" "), numParts) + if len(parts) != numParts { + return 0, fmt.Errorf("invalid context in log message: %v", string(buf)) + } + + taskName, nodeName, err := lw.parseContext(string(parts[contextIndex])) + if err != nil { + return 0, err + } + + output := []byte{} + for i, part := range parts { + // First part doesn't get space separation. + if i > 0 { + output = append(output, []byte(" ")...) + } + + if i == contextIndex { + // TODO(aluzzardi): Consider constant padding. + output = append(output, []byte(fmt.Sprintf("%s@%s |", taskName, nodeName))...) + } else { + output = append(output, part...) + } + } + _, err = lw.w.Write(output) + if err != nil { + return 0, err + } + + return len(buf), nil +} + +func (lw *logWriter) parseContext(input string) (string, string, error) { + context := make(map[string]string) + + components := strings.Split(input, ",") + for _, component := range components { + parts := strings.SplitN(component, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid context: %s", input) + } + context[parts[0]] = parts[1] + } + + taskID, ok := context["com.docker.swarm.task.id"] + if !ok { + return "", "", fmt.Errorf("missing task id in context: %s", input) + } + taskName, err := lw.r.Resolve(lw.ctx, swarm.Task{}, taskID) + if err != nil { + return "", "", err + } + + nodeID, ok := context["com.docker.swarm.node.id"] + if !ok { + return "", "", fmt.Errorf("missing node id in context: %s", input) + } + nodeName, err := lw.r.Resolve(lw.ctx, swarm.Node{}, nodeID) + if err != nil { + return "", "", err + } + + return taskName, nodeName, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/opts.go b/vendor/github.com/moby/moby/cli/command/service/opts.go new file mode 100644 index 0000000..cbe544a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/opts.go @@ -0,0 +1,648 @@ +package service + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type int64Value interface { + Value() int64 +} + +type memBytes int64 + +func (m *memBytes) String() string { + return units.BytesSize(float64(m.Value())) +} + +func (m *memBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = memBytes(val) + return err +} + +func (m *memBytes) Type() string { + return "bytes" +} + +func (m *memBytes) Value() int64 { + return int64(*m) +} + +// PositiveDurationOpt is an option type for time.Duration that uses a pointer. +// It bahave similarly to DurationOpt but only allows positive duration values. +type PositiveDurationOpt struct { + DurationOpt +} + +// Set a new value on the option. Setting a negative duration value will cause +// an error to be returned. +func (d *PositiveDurationOpt) Set(s string) error { + err := d.DurationOpt.Set(s) + if err != nil { + return err + } + if *d.DurationOpt.value < 0 { + return fmt.Errorf("duration cannot be negative") + } + return nil +} + +// DurationOpt is an option type for time.Duration that uses a pointer. This +// allows us to get nil values outside, instead of defaulting to 0 +type DurationOpt struct { + value *time.Duration +} + +// Set a new value on the option +func (d *DurationOpt) Set(s string) error { + v, err := time.ParseDuration(s) + d.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (d *DurationOpt) Type() string { + return "duration" +} + +// String returns a string repr of this option +func (d *DurationOpt) String() string { + if d.value != nil { + return d.value.String() + } + return "" +} + +// Value returns the time.Duration +func (d *DurationOpt) Value() *time.Duration { + return d.value +} + +// Uint64Opt represents a uint64. +type Uint64Opt struct { + value *uint64 +} + +// Set a new value on the option +func (i *Uint64Opt) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + i.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (i *Uint64Opt) Type() string { + return "uint" +} + +// String returns a string repr of this option +func (i *Uint64Opt) String() string { + if i.value != nil { + return fmt.Sprintf("%v", *i.value) + } + return "" +} + +// Value returns the uint64 +func (i *Uint64Opt) Value() *uint64 { + return i.value +} + +type floatValue float32 + +func (f *floatValue) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = floatValue(v) + return err +} + +func (f *floatValue) Type() string { + return "float" +} + +func (f *floatValue) String() string { + return strconv.FormatFloat(float64(*f), 'g', -1, 32) +} + +func (f *floatValue) Value() float32 { + return float32(*f) +} + +// SecretRequestSpec is a type for requesting secrets +type SecretRequestSpec struct { + source string + target string + uid string + gid string + mode os.FileMode +} + +// SecretOpt is a Value type for parsing secrets +type SecretOpt struct { + values []*SecretRequestSpec +} + +// Set a new secret value +func (o *SecretOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + spec := &SecretRequestSpec{ + source: "", + target: "", + uid: "0", + gid: "0", + mode: 0444, + } + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "source", "src": + spec.source = value + case "target": + tDir, _ := filepath.Split(value) + if tDir != "" { + return fmt.Errorf("target must not have a path") + } + spec.target = value + case "uid": + spec.uid = value + case "gid": + spec.gid = value + case "mode": + m, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + spec.mode = os.FileMode(m) + default: + return fmt.Errorf("invalid field in secret request: %s", key) + } + } + + if spec.source == "" { + return fmt.Errorf("source is required") + } + + o.values = append(o.values, spec) + return nil +} + +// Type returns the type of this option +func (o *SecretOpt) Type() string { + return "secret" +} + +// String returns a string repr of this option +func (o *SecretOpt) String() string { + secrets := []string{} + for _, secret := range o.values { + repr := fmt.Sprintf("%s -> %s", secret.source, secret.target) + secrets = append(secrets, repr) + } + return strings.Join(secrets, ", ") +} + +// Value returns the secret requests +func (o *SecretOpt) Value() []*SecretRequestSpec { + return o.values +} + +type updateOptions struct { + parallelism uint64 + delay time.Duration + monitor time.Duration + onFailure string + maxFailureRatio floatValue +} + +type resourceOptions struct { + limitCPU opts.NanoCPUs + limitMemBytes memBytes + resCPU opts.NanoCPUs + resMemBytes memBytes +} + +func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements { + return &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: r.limitCPU.Value(), + MemoryBytes: r.limitMemBytes.Value(), + }, + Reservations: &swarm.Resources{ + NanoCPUs: r.resCPU.Value(), + MemoryBytes: r.resMemBytes.Value(), + }, + } +} + +type restartPolicyOptions struct { + condition string + delay DurationOpt + maxAttempts Uint64Opt + window DurationOpt +} + +func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy { + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyCondition(r.condition), + Delay: r.delay.Value(), + MaxAttempts: r.maxAttempts.Value(), + Window: r.window.Value(), + } +} + +func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig { + nets := []swarm.NetworkAttachmentConfig{} + for _, network := range networks { + nets = append(nets, swarm.NetworkAttachmentConfig{Target: network}) + } + return nets +} + +type endpointOptions struct { + mode string + publishPorts opts.PortOpt +} + +func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { + return &swarm.EndpointSpec{ + Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), + Ports: e.publishPorts.Value(), + } +} + +type logDriverOptions struct { + name string + opts opts.ListOpts +} + +func newLogDriverOptions() logDriverOptions { + return logDriverOptions{opts: opts.NewListOpts(runconfigopts.ValidateEnv)} +} + +func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { + if ldo.name == "" { + return nil + } + + // set the log driver only if specified. + return &swarm.Driver{ + Name: ldo.name, + Options: runconfigopts.ConvertKVStringsToMap(ldo.opts.GetAll()), + } +} + +type healthCheckOptions struct { + cmd string + interval PositiveDurationOpt + timeout PositiveDurationOpt + retries int + noHealthcheck bool +} + +func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) { + var healthConfig *container.HealthConfig + haveHealthSettings := opts.cmd != "" || + opts.interval.Value() != nil || + opts.timeout.Value() != nil || + opts.retries != 0 + if opts.noHealthcheck { + if haveHealthSettings { + return nil, fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + healthConfig = &container.HealthConfig{Test: []string{"NONE"}} + } else if haveHealthSettings { + var test []string + if opts.cmd != "" { + test = []string{"CMD-SHELL", opts.cmd} + } + var interval, timeout time.Duration + if ptr := opts.interval.Value(); ptr != nil { + interval = *ptr + } + if ptr := opts.timeout.Value(); ptr != nil { + timeout = *ptr + } + healthConfig = &container.HealthConfig{ + Test: test, + Interval: interval, + Timeout: timeout, + Retries: opts.retries, + } + } + return healthConfig, nil +} + +// ValidatePort validates a string is in the expected format for a port definition +func ValidatePort(value string) (string, error) { + portMappings, err := nat.ParsePortSpec(value) + for _, portMapping := range portMappings { + if portMapping.Binding.HostIP != "" { + return "", fmt.Errorf("HostIP is not supported by a service.") + } + } + return value, err +} + +// convertExtraHostsToSwarmHosts converts an array of extra hosts in cli +// : +// into a swarmkit host format: +// IP_address canonical_hostname [aliases...] +// This assumes input value (:) has already been validated +func convertExtraHostsToSwarmHosts(extraHosts []string) []string { + hosts := []string{} + for _, extraHost := range extraHosts { + parts := strings.SplitN(extraHost, ":", 2) + hosts = append(hosts, fmt.Sprintf("%s %s", parts[1], parts[0])) + } + return hosts +} + +type serviceOptions struct { + name string + labels opts.ListOpts + containerLabels opts.ListOpts + image string + args []string + hostname string + env opts.ListOpts + envFile opts.ListOpts + workdir string + user string + groups opts.ListOpts + tty bool + mounts opts.MountOpt + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOption opts.ListOpts + hosts opts.ListOpts + + resources resourceOptions + stopGrace DurationOpt + + replicas Uint64Opt + mode string + + restartPolicy restartPolicyOptions + constraints opts.ListOpts + update updateOptions + networks opts.ListOpts + endpoint endpointOptions + + registryAuth bool + + logDriver logDriverOptions + + healthcheck healthCheckOptions + secrets opts.SecretOpt +} + +func newServiceOptions() *serviceOptions { + return &serviceOptions{ + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + constraints: opts.NewListOpts(nil), + containerLabels: opts.NewListOpts(runconfigopts.ValidateEnv), + env: opts.NewListOpts(runconfigopts.ValidateEnv), + envFile: opts.NewListOpts(nil), + groups: opts.NewListOpts(nil), + logDriver: newLogDriverOptions(), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOption: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + hosts: opts.NewListOpts(runconfigopts.ValidateExtraHost), + networks: opts.NewListOpts(nil), + } +} + +func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) { + var service swarm.ServiceSpec + + envVariables, err := runconfigopts.ReadKVStrings(opts.envFile.GetAll(), opts.env.GetAll()) + if err != nil { + return service, err + } + + currentEnv := make([]string, 0, len(envVariables)) + for _, env := range envVariables { // need to process each var, in order + k := strings.SplitN(env, "=", 2)[0] + for i, current := range currentEnv { // remove duplicates + if current == env { + continue // no update required, may hide this behind flag to preserve order of envVariables + } + if strings.HasPrefix(current, k+"=") { + currentEnv = append(currentEnv[:i], currentEnv[i+1:]...) + } + } + currentEnv = append(currentEnv, env) + } + + service = swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: opts.name, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: opts.image, + Args: opts.args, + Env: currentEnv, + Hostname: opts.hostname, + Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()), + Dir: opts.workdir, + User: opts.user, + Groups: opts.groups.GetAll(), + TTY: opts.tty, + Mounts: opts.mounts.Value(), + DNSConfig: &swarm.DNSConfig{ + Nameservers: opts.dns.GetAll(), + Search: opts.dnsSearch.GetAll(), + Options: opts.dnsOption.GetAll(), + }, + Hosts: convertExtraHostsToSwarmHosts(opts.hosts.GetAll()), + StopGracePeriod: opts.stopGrace.Value(), + Secrets: nil, + }, + Networks: convertNetworks(opts.networks.GetAll()), + Resources: opts.resources.ToResourceRequirements(), + RestartPolicy: opts.restartPolicy.ToRestartPolicy(), + Placement: &swarm.Placement{ + Constraints: opts.constraints.GetAll(), + }, + LogDriver: opts.logDriver.toLogDriver(), + }, + Networks: convertNetworks(opts.networks.GetAll()), + Mode: swarm.ServiceMode{}, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: opts.update.parallelism, + Delay: opts.update.delay, + Monitor: opts.update.monitor, + FailureAction: opts.update.onFailure, + MaxFailureRatio: opts.update.maxFailureRatio.Value(), + }, + EndpointSpec: opts.endpoint.ToEndpointSpec(), + } + + healthConfig, err := opts.healthcheck.toHealthConfig() + if err != nil { + return service, err + } + service.TaskTemplate.ContainerSpec.Healthcheck = healthConfig + + switch opts.mode { + case "global": + if opts.replicas.Value() != nil { + return service, fmt.Errorf("replicas can only be used with replicated mode") + } + + service.Mode.Global = &swarm.GlobalService{} + case "replicated": + service.Mode.Replicated = &swarm.ReplicatedService{ + Replicas: opts.replicas.Value(), + } + default: + return service, fmt.Errorf("Unknown mode: %s", opts.mode) + } + return service, nil +} + +// addServiceFlags adds all flags that are common to both `create` and `update`. +// Any flags that are not common are added separately in the individual command +func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) { + flags := cmd.Flags() + + flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container") + flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: [:])") + flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname") + + flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") + flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") + flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") + flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") + flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)") + + flags.Var(&opts.replicas, flagReplicas, "Number of tasks") + + flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)") + flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)") + flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up") + flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)") + + flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously (0 to update all at once)") + flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates (ns|us|ms|s|m|h) (default 0s)") + flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, time.Duration(0), "Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s)") + flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "pause", "Action on update failure (pause|continue)") + flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update") + + flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)") + + flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") + + flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") + flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") + + flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health") + flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ns|us|ms|s|m|h)") + flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ns|us|ms|s|m|h)") + flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy") + flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK") + + flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY") +} + +const ( + flagConstraint = "constraint" + flagConstraintRemove = "constraint-rm" + flagConstraintAdd = "constraint-add" + flagContainerLabel = "container-label" + flagContainerLabelRemove = "container-label-rm" + flagContainerLabelAdd = "container-label-add" + flagDNS = "dns" + flagDNSRemove = "dns-rm" + flagDNSAdd = "dns-add" + flagDNSOption = "dns-option" + flagDNSOptionRemove = "dns-option-rm" + flagDNSOptionAdd = "dns-option-add" + flagDNSSearch = "dns-search" + flagDNSSearchRemove = "dns-search-rm" + flagDNSSearchAdd = "dns-search-add" + flagEndpointMode = "endpoint-mode" + flagHost = "host" + flagHostAdd = "host-add" + flagHostRemove = "host-rm" + flagHostname = "hostname" + flagEnv = "env" + flagEnvFile = "env-file" + flagEnvRemove = "env-rm" + flagEnvAdd = "env-add" + flagGroup = "group" + flagGroupAdd = "group-add" + flagGroupRemove = "group-rm" + flagLabel = "label" + flagLabelRemove = "label-rm" + flagLabelAdd = "label-add" + flagLimitCPU = "limit-cpu" + flagLimitMemory = "limit-memory" + flagMode = "mode" + flagMount = "mount" + flagMountRemove = "mount-rm" + flagMountAdd = "mount-add" + flagName = "name" + flagNetwork = "network" + flagPublish = "publish" + flagPublishRemove = "publish-rm" + flagPublishAdd = "publish-add" + flagReplicas = "replicas" + flagReserveCPU = "reserve-cpu" + flagReserveMemory = "reserve-memory" + flagRestartCondition = "restart-condition" + flagRestartDelay = "restart-delay" + flagRestartMaxAttempts = "restart-max-attempts" + flagRestartWindow = "restart-window" + flagStopGracePeriod = "stop-grace-period" + flagTTY = "tty" + flagUpdateDelay = "update-delay" + flagUpdateFailureAction = "update-failure-action" + flagUpdateMaxFailureRatio = "update-max-failure-ratio" + flagUpdateMonitor = "update-monitor" + flagUpdateParallelism = "update-parallelism" + flagUser = "user" + flagWorkdir = "workdir" + flagRegistryAuth = "with-registry-auth" + flagLogDriver = "log-driver" + flagLogOpt = "log-opt" + flagHealthCmd = "health-cmd" + flagHealthInterval = "health-interval" + flagHealthRetries = "health-retries" + flagHealthTimeout = "health-timeout" + flagNoHealthcheck = "no-healthcheck" + flagSecret = "secret" + flagSecretAdd = "secret-add" + flagSecretRemove = "secret-rm" +) diff --git a/vendor/github.com/moby/moby/cli/command/service/opts_test.go b/vendor/github.com/moby/moby/cli/command/service/opts_test.go new file mode 100644 index 0000000..78b956a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/opts_test.go @@ -0,0 +1,107 @@ +package service + +import ( + "reflect" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestMemBytesString(t *testing.T) { + var mem memBytes = 1048576 + assert.Equal(t, mem.String(), "1 MiB") +} + +func TestMemBytesSetAndValue(t *testing.T) { + var mem memBytes + assert.NilError(t, mem.Set("5kb")) + assert.Equal(t, mem.Value(), int64(5120)) +} + +func TestNanoCPUsString(t *testing.T) { + var cpus opts.NanoCPUs = 6100000000 + assert.Equal(t, cpus.String(), "6.100") +} + +func TestNanoCPUsSetAndValue(t *testing.T) { + var cpus opts.NanoCPUs + assert.NilError(t, cpus.Set("0.35")) + assert.Equal(t, cpus.Value(), int64(350000000)) +} + +func TestDurationOptString(t *testing.T) { + dur := time.Duration(300 * 10e8) + duration := DurationOpt{value: &dur} + assert.Equal(t, duration.String(), "5m0s") +} + +func TestDurationOptSetAndValue(t *testing.T) { + var duration DurationOpt + assert.NilError(t, duration.Set("300s")) + assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) + assert.NilError(t, duration.Set("-300s")) + assert.Equal(t, *duration.Value(), time.Duration(-300*10e8)) +} + +func TestPositiveDurationOptSetAndValue(t *testing.T) { + var duration PositiveDurationOpt + assert.NilError(t, duration.Set("300s")) + assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) + assert.Error(t, duration.Set("-300s"), "cannot be negative") +} + +func TestUint64OptString(t *testing.T) { + value := uint64(2345678) + opt := Uint64Opt{value: &value} + assert.Equal(t, opt.String(), "2345678") + + opt = Uint64Opt{} + assert.Equal(t, opt.String(), "") +} + +func TestUint64OptSetAndValue(t *testing.T) { + var opt Uint64Opt + assert.NilError(t, opt.Set("14445")) + assert.Equal(t, *opt.Value(), uint64(14445)) +} + +func TestHealthCheckOptionsToHealthConfig(t *testing.T) { + dur := time.Second + opt := healthCheckOptions{ + cmd: "curl", + interval: PositiveDurationOpt{DurationOpt{value: &dur}}, + timeout: PositiveDurationOpt{DurationOpt{value: &dur}}, + retries: 10, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ + Test: []string{"CMD-SHELL", "curl"}, + Interval: time.Second, + Timeout: time.Second, + Retries: 10, + }), true) +} + +func TestHealthCheckOptionsToHealthConfigNoHealthcheck(t *testing.T) { + opt := healthCheckOptions{ + noHealthcheck: true, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ + Test: []string{"NONE"}, + }), true) +} + +func TestHealthCheckOptionsToHealthConfigConflict(t *testing.T) { + opt := healthCheckOptions{ + cmd: "curl", + noHealthcheck: true, + } + _, err := opt.toHealthConfig() + assert.Error(t, err, "--no-healthcheck conflicts with --health-* options") +} diff --git a/vendor/github.com/moby/moby/cli/command/service/parse.go b/vendor/github.com/moby/moby/cli/command/service/parse.go new file mode 100644 index 0000000..ce9b454 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/parse.go @@ -0,0 +1,68 @@ +package service + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +// ParseSecrets retrieves the secrets from the requested names and converts +// them to secret references to use with the spec +func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*types.SecretRequestOption) ([]*swarmtypes.SecretReference, error) { + secretRefs := make(map[string]*swarmtypes.SecretReference) + ctx := context.Background() + + for _, secret := range requestedSecrets { + if _, exists := secretRefs[secret.Target]; exists { + return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source) + } + secretRef := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: secret.Target, + UID: secret.UID, + GID: secret.GID, + Mode: secret.Mode, + }, + SecretName: secret.Source, + } + + secretRefs[secret.Target] = secretRef + } + + args := filters.NewArgs() + for _, s := range secretRefs { + args.Add("names", s.SecretName) + } + + secrets, err := client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundSecrets := make(map[string]string) + for _, secret := range secrets { + foundSecrets[secret.Spec.Annotations.Name] = secret.ID + } + + addedSecrets := []*swarmtypes.SecretReference{} + + for _, ref := range secretRefs { + id, ok := foundSecrets[ref.SecretName] + if !ok { + return nil, fmt.Errorf("secret not found: %s", ref.SecretName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.SecretID = id + addedSecrets = append(addedSecrets, ref) + } + + return addedSecrets, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/ps.go b/vendor/github.com/moby/moby/cli/command/service/ps.go new file mode 100644 index 0000000..cf94ad7 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/ps.go @@ -0,0 +1,76 @@ +package service + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/node" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type psOptions struct { + serviceID string + quiet bool + noResolve bool + noTrunc bool + filter opts.FilterOpt +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] SERVICE", + Short: "List the tasks of a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.serviceID = args[0] + return runPS(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display task IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli *command.DockerCli, opts psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + service, _, err := client.ServiceInspectWithRaw(ctx, opts.serviceID) + if err != nil { + return err + } + + filter := opts.filter.Value() + filter.Add("service", service.ID) + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + nodeReference, err := node.Reference(ctx, client, nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", nodeReference) + } + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if opts.quiet { + return task.PrintQuiet(dockerCli, tasks) + } + return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) +} diff --git a/vendor/github.com/moby/moby/cli/command/service/remove.go b/vendor/github.com/moby/moby/cli/command/service/remove.go new file mode 100644 index 0000000..c3fbbab --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/remove.go @@ -0,0 +1,47 @@ +package service + +import ( + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + + cmd := &cobra.Command{ + Use: "rm SERVICE [SERVICE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } + cmd.Flags() + + return cmd +} + +func runRemove(dockerCli *command.DockerCli, sids []string) error { + client := dockerCli.Client() + + ctx := context.Background() + + var errs []string + for _, sid := range sids { + err := client.ServiceRemove(ctx, sid) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", sid) + } + if len(errs) > 0 { + return fmt.Errorf(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/scale.go b/vendor/github.com/moby/moby/cli/command/service/scale.go new file mode 100644 index 0000000..cf89e90 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/scale.go @@ -0,0 +1,96 @@ +package service + +import ( + "fmt" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newScaleCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", + Short: "Scale one or multiple replicated services", + Args: scaleArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runScale(dockerCli, args) + }, + } +} + +func scaleArgs(cmd *cobra.Command, args []string) error { + if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { + return err + } + for _, arg := range args { + if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { + return fmt.Errorf( + "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", + arg, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } + } + return nil +} + +func runScale(dockerCli *command.DockerCli, args []string) error { + var errors []string + for _, arg := range args { + parts := strings.SplitN(arg, "=", 2) + serviceID, scaleStr := parts[0], parts[1] + + // validate input arg scale number + scale, err := strconv.ParseUint(scaleStr, 10, 64) + if err != nil { + errors = append(errors, fmt.Sprintf("%s: invalid replicas value %s: %v", serviceID, scaleStr, err)) + continue + } + + if err := runServiceScale(dockerCli, serviceID, scale); err != nil { + errors = append(errors, fmt.Sprintf("%s: %v", serviceID, err)) + } + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf(strings.Join(errors, "\n")) +} + +func runServiceScale(dockerCli *command.DockerCli, serviceID string, scale uint64) error { + client := dockerCli.Client() + ctx := context.Background() + + service, _, err := client.ServiceInspectWithRaw(ctx, serviceID) + if err != nil { + return err + } + + serviceMode := &service.Spec.Mode + if serviceMode.Replicated == nil { + return fmt.Errorf("scale can only be used with replicated mode") + } + + serviceMode.Replicated.Replicas = &scale + + response, err := client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s scaled to %d\n", serviceID, scale) + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/trust.go b/vendor/github.com/moby/moby/cli/command/service/trust.go new file mode 100644 index 0000000..052d49c --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/trust.go @@ -0,0 +1,96 @@ +package service + +import ( + "encoding/hex" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/notary/tuf/data" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func resolveServiceImageDigest(dockerCli *command.DockerCli, service *swarm.ServiceSpec) error { + if !command.IsTrusted() { + // Digests are resolved by the daemon when not using content + // trust. + return nil + } + + image := service.TaskTemplate.ContainerSpec.Image + + // We only attempt to resolve the digest if the reference + // could be parsed as a digest reference. Specifying an image ID + // is valid but not resolvable. There is no warning message for + // an image ID because it's valid to use one. + if _, err := digest.ParseDigest(image); err == nil { + return nil + } + + ref, err := reference.ParseNamed(image) + if err != nil { + return fmt.Errorf("Could not parse image reference %s", service.TaskTemplate.ContainerSpec.Image) + } + if _, ok := ref.(reference.Canonical); !ok { + ref = reference.WithDefaultTag(ref) + + taggedRef, ok := ref.(reference.NamedTagged) + if !ok { + // This should never happen because a reference either + // has a digest, or WithDefaultTag would give it a tag. + return errors.New("Failed to resolve image digest using content trust: reference is missing a tag") + } + + resolvedImage, err := trustedResolveDigest(context.Background(), dockerCli, taggedRef) + if err != nil { + return fmt.Errorf("Failed to resolve image digest using content trust: %v", err) + } + logrus.Debugf("resolved image tag to %s using content trust", resolvedImage.String()) + service.TaskTemplate.ContainerSpec.Image = resolvedImage.String() + } + return nil +} + +func trustedResolveDigest(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (distreference.Canonical, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, err + } + + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.FullName(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + h, ok := t.Hashes["sha256"] + if !ok { + return nil, errors.New("no valid hash, expecting sha256") + } + + dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h)) + + // Using distribution reference package to make sure that adding a + // digest does not erase the tag. When the two reference packages + // are unified, this will no longer be an issue. + return distreference.WithDigest(ref, dgst) +} diff --git a/vendor/github.com/moby/moby/cli/command/service/update.go b/vendor/github.com/moby/moby/cli/command/service/update.go new file mode 100644 index 0000000..d56de10 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/update.go @@ -0,0 +1,849 @@ +package service + +import ( + "fmt" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" + shlex "github.com/flynn-archive/go-shlex" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + serviceOpts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] SERVICE", + Short: "Update a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.String("image", "", "Service image tag") + flags.String("args", "", "Service command args") + flags.Bool("rollback", false, "Rollback to previous specification") + flags.Bool("force", false, "Force update even if no changes require it") + addServiceFlags(cmd, serviceOpts) + + flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") + flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") + flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") + flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") + flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") + // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") + flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") + flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") + flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") + flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") + flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") + flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)") + flags.Var(&serviceOpts.labels, flagLabelAdd, "Add or update a service label") + flags.Var(&serviceOpts.containerLabels, flagContainerLabelAdd, "Add or update a container label") + flags.Var(&serviceOpts.env, flagEnvAdd, "Add or update an environment variable") + flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") + flags.Var(&serviceOpts.secrets, flagSecretAdd, "Add or update a secret on a service") + flags.Var(&serviceOpts.mounts, flagMountAdd, "Add or update a mount on a service") + flags.Var(&serviceOpts.constraints, flagConstraintAdd, "Add or update a placement constraint") + flags.Var(&serviceOpts.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") + flags.Var(&serviceOpts.groups, flagGroupAdd, "Add an additional supplementary user group to the container") + flags.Var(&serviceOpts.dns, flagDNSAdd, "Add or update a custom DNS server") + flags.Var(&serviceOpts.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") + flags.Var(&serviceOpts.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") + flags.Var(&serviceOpts.hosts, flagHostAdd, "Add or update a custom host-to-IP mapping (host:ip)") + + return cmd +} + +func newListOptsVar() *opts.ListOpts { + return opts.NewListOptsRef(&[]string{}, nil) +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, serviceID string) error { + apiClient := dockerCli.Client() + ctx := context.Background() + updateOpts := types.ServiceUpdateOptions{} + + service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID) + if err != nil { + return err + } + + rollback, err := flags.GetBool("rollback") + if err != nil { + return err + } + + spec := &service.Spec + if rollback { + spec = service.PreviousSpec + if spec == nil { + return fmt.Errorf("service does not have a previous specification to roll back to") + } + } + + err = updateService(flags, spec) + if err != nil { + return err + } + + if flags.Changed("image") { + if err := resolveServiceImageDigest(dockerCli, spec); err != nil { + return err + } + } + + updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) + if err != nil { + return err + } + + spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets + + // only send auth if flag was set + sendAuth, err := flags.GetBool(flagRegistryAuth) + if err != nil { + return err + } + if sendAuth { + // Retrieve encoded auth token from the image reference + // This would be the old image if it didn't change in this update + image := spec.TaskTemplate.ContainerSpec.Image + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + updateOpts.EncodedRegistryAuth = encodedAuth + } else if rollback { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec + } else { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec + } + + response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) + return nil +} + +func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + updateString := func(flag string, field *string) { + if flags.Changed(flag) { + *field, _ = flags.GetString(flag) + } + } + + updateInt64Value := func(flag string, field *int64) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(int64Value).Value() + } + } + + updateFloatValue := func(flag string, field *float32) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(*floatValue).Value() + } + } + + updateDuration := func(flag string, field *time.Duration) { + if flags.Changed(flag) { + *field, _ = flags.GetDuration(flag) + } + } + + updateDurationOpt := func(flag string, field **time.Duration) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*DurationOpt).Value() + *field = &val + } + } + + updateUint64 := func(flag string, field *uint64) { + if flags.Changed(flag) { + *field, _ = flags.GetUint64(flag) + } + } + + updateUint64Opt := func(flag string, field **uint64) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() + *field = &val + } + } + + cspec := &spec.TaskTemplate.ContainerSpec + task := &spec.TaskTemplate + + taskResources := func() *swarm.ResourceRequirements { + if task.Resources == nil { + task.Resources = &swarm.ResourceRequirements{} + } + return task.Resources + } + + updateLabels(flags, &spec.Labels) + updateContainerLabels(flags, &cspec.Labels) + updateString("image", &cspec.Image) + updateStringToSlice(flags, "args", &cspec.Args) + updateEnvironment(flags, &cspec.Env) + updateString(flagWorkdir, &cspec.Dir) + updateString(flagUser, &cspec.User) + updateString(flagHostname, &cspec.Hostname) + if err := updateMounts(flags, &cspec.Mounts); err != nil { + return err + } + + if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { + taskResources().Limits = &swarm.Resources{} + updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) + updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) + } + if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { + taskResources().Reservations = &swarm.Resources{} + updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) + updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) + } + + updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) + + if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { + if task.RestartPolicy == nil { + task.RestartPolicy = &swarm.RestartPolicy{} + } + + if flags.Changed(flagRestartCondition) { + value, _ := flags.GetString(flagRestartCondition) + task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) + } + updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) + updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) + updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) + } + + if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { + if task.Placement == nil { + task.Placement = &swarm.Placement{} + } + updatePlacement(flags, task.Placement) + } + + if err := updateReplicas(flags, &spec.Mode); err != nil { + return err + } + + if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) { + if spec.UpdateConfig == nil { + spec.UpdateConfig = &swarm.UpdateConfig{} + } + updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) + updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) + updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) + updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) + updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) + } + + if flags.Changed(flagEndpointMode) { + value, _ := flags.GetString(flagEndpointMode) + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + spec.EndpointSpec.Mode = swarm.ResolutionMode(value) + } + + if anyChanged(flags, flagGroupAdd, flagGroupRemove) { + if err := updateGroups(flags, &cspec.Groups); err != nil { + return err + } + } + + if anyChanged(flags, flagPublishAdd, flagPublishRemove) { + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { + return err + } + } + + if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { + if cspec.DNSConfig == nil { + cspec.DNSConfig = &swarm.DNSConfig{} + } + if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { + return err + } + } + + if anyChanged(flags, flagHostAdd, flagHostRemove) { + if err := updateHosts(flags, &cspec.Hosts); err != nil { + return err + } + } + + if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { + return err + } + + force, err := flags.GetBool("force") + if err != nil { + return err + } + + if force { + spec.TaskTemplate.ForceUpdate++ + } + + if err := updateHealthcheck(flags, cspec); err != nil { + return err + } + + if flags.Changed(flagTTY) { + tty, err := flags.GetBool(flagTTY) + if err != nil { + return err + } + cspec.TTY = tty + } + + return nil +} + +func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) error { + if !flags.Changed(flag) { + return nil + } + + value, _ := flags.GetString(flag) + valueSlice, err := shlex.Split(value) + *field = valueSlice + return err +} + +func anyChanged(flags *pflag.FlagSet, fields ...string) bool { + for _, flag := range fields { + if flags.Changed(flag) { + return true + } + } + return false +} + +func updatePlacement(flags *pflag.FlagSet, placement *swarm.Placement) { + if flags.Changed(flagConstraintAdd) { + values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() + placement.Constraints = append(placement.Constraints, values...) + } + toRemove := buildToRemoveSet(flags, flagConstraintRemove) + + newConstraints := []string{} + for _, constraint := range placement.Constraints { + if _, exists := toRemove[constraint]; !exists { + newConstraints = append(newConstraints, constraint) + } + } + // Sort so that result is predictable. + sort.Strings(newConstraints) + + placement.Constraints = newConstraints +} + +func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagContainerLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range runconfigopts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagContainerLabelRemove) { + toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range runconfigopts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagLabelRemove) { + toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateEnvironment(flags *pflag.FlagSet, field *[]string) { + envSet := map[string]string{} + for _, v := range *field { + envSet[envKey(v)] = v + } + if flags.Changed(flagEnvAdd) { + value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) + for _, v := range value.GetAll() { + envSet[envKey(v)] = v + } + } + + *field = []string{} + for _, v := range envSet { + *field = append(*field, v) + } + + toRemove := buildToRemoveSet(flags, flagEnvRemove) + *field = removeItems(*field, toRemove, envKey) +} + +func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { + if flags.Changed(flagSecretAdd) { + values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() + + addSecrets, err := ParseSecrets(apiClient, values) + if err != nil { + return nil, err + } + secrets = append(secrets, addSecrets...) + } + toRemove := buildToRemoveSet(flags, flagSecretRemove) + newSecrets := []*swarm.SecretReference{} + for _, secret := range secrets { + if _, exists := toRemove[secret.SecretName]; !exists { + newSecrets = append(newSecrets, secret) + } + } + + return newSecrets, nil +} + +func envKey(value string) string { + kv := strings.SplitN(value, "=", 2) + return kv[0] +} + +func itemKey(value string) string { + return value +} + +func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { + var empty struct{} + toRemove := make(map[string]struct{}) + + if !flags.Changed(flag) { + return toRemove + } + + toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() + for _, key := range toRemoveSlice { + toRemove[key] = empty + } + return toRemove +} + +func removeItems( + seq []string, + toRemove map[string]struct{}, + keyFunc func(string) string, +) []string { + newSeq := []string{} + for _, item := range seq { + if _, exists := toRemove[keyFunc(item)]; !exists { + newSeq = append(newSeq, item) + } + } + return newSeq +} + +type byMountSource []mounttypes.Mount + +func (m byMountSource) Len() int { return len(m) } +func (m byMountSource) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMountSource) Less(i, j int) bool { + a, b := m[i], m[j] + + if a.Source == b.Source { + return a.Target < b.Target + } + + return a.Source < b.Source +} + +func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { + + mountsByTarget := map[string]mounttypes.Mount{} + + if flags.Changed(flagMountAdd) { + values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() + for _, mount := range values { + if _, ok := mountsByTarget[mount.Target]; ok { + return fmt.Errorf("duplicate mount target") + } + mountsByTarget[mount.Target] = mount + } + } + + // Add old list of mount points minus updated one. + for _, mount := range *mounts { + if _, ok := mountsByTarget[mount.Target]; !ok { + mountsByTarget[mount.Target] = mount + } + } + + newMounts := []mounttypes.Mount{} + + toRemove := buildToRemoveSet(flags, flagMountRemove) + + for _, mount := range mountsByTarget { + if _, exists := toRemove[mount.Target]; !exists { + newMounts = append(newMounts, mount) + } + } + sort.Sort(byMountSource(newMounts)) + *mounts = newMounts + return nil +} + +func updateGroups(flags *pflag.FlagSet, groups *[]string) error { + if flags.Changed(flagGroupAdd) { + values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() + *groups = append(*groups, values...) + } + toRemove := buildToRemoveSet(flags, flagGroupRemove) + + newGroups := []string{} + for _, group := range *groups { + if _, exists := toRemove[group]; !exists { + newGroups = append(newGroups, group) + } + } + // Sort so that result is predictable. + sort.Strings(newGroups) + + *groups = newGroups + return nil +} + +func removeDuplicates(entries []string) []string { + hit := map[string]bool{} + newEntries := []string{} + for _, v := range entries { + if !hit[v] { + newEntries = append(newEntries, v) + hit[v] = true + } + } + return newEntries +} + +func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { + newConfig := &swarm.DNSConfig{} + + nameservers := (*config).Nameservers + if flags.Changed(flagDNSAdd) { + values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() + nameservers = append(nameservers, values...) + } + nameservers = removeDuplicates(nameservers) + toRemove := buildToRemoveSet(flags, flagDNSRemove) + for _, nameserver := range nameservers { + if _, exists := toRemove[nameserver]; !exists { + newConfig.Nameservers = append(newConfig.Nameservers, nameserver) + + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Nameservers) + + search := (*config).Search + if flags.Changed(flagDNSSearchAdd) { + values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() + search = append(search, values...) + } + search = removeDuplicates(search) + toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) + for _, entry := range search { + if _, exists := toRemove[entry]; !exists { + newConfig.Search = append(newConfig.Search, entry) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Search) + + options := (*config).Options + if flags.Changed(flagDNSOptionAdd) { + values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() + options = append(options, values...) + } + options = removeDuplicates(options) + toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) + for _, option := range options { + if _, exists := toRemove[option]; !exists { + newConfig.Options = append(newConfig.Options, option) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Options) + + *config = newConfig + return nil +} + +type byPortConfig []swarm.PortConfig + +func (r byPortConfig) Len() int { return len(r) } +func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortConfig) Less(i, j int) bool { + // We convert PortConfig into `port/protocol`, e.g., `80/tcp` + // In updatePorts we already filter out with map so there is duplicate entries + return portConfigToString(&r[i]) < portConfigToString(&r[j]) +} + +func portConfigToString(portConfig *swarm.PortConfig) string { + protocol := portConfig.Protocol + mode := portConfig.PublishMode + return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) +} + +// FIXME(vdemeester) port to opts.PortOpt +// This validation is only used for `--publish-rm`. +// The `--publish-rm` takes: +// [/] (e.g., 80, 80/tcp, 53/udp) +func validatePublishRemove(val string) (string, error) { + proto, port := nat.SplitProtoPort(val) + if proto != "tcp" && proto != "udp" { + return "", fmt.Errorf("invalid protocol '%s' for %s", proto, val) + } + if strings.Contains(port, ":") { + return "", fmt.Errorf("invalid port format: '%s', should be [/] (e.g., 80, 80/tcp, 53/udp)", port) + } + if _, err := nat.ParsePort(port); err != nil { + return "", err + } + return val, nil +} + +func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { + // The key of the map is `port/protocol`, e.g., `80/tcp` + portSet := map[string]swarm.PortConfig{} + + // Build the current list of portConfig + for _, entry := range *portConfig { + if _, ok := portSet[portConfigToString(&entry)]; !ok { + portSet[portConfigToString(&entry)] = entry + } + } + + newPorts := []swarm.PortConfig{} + + // Clean current ports + toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() +portLoop: + for _, port := range portSet { + for _, pConfig := range toRemove { + if equalProtocol(port.Protocol, pConfig.Protocol) && + port.TargetPort == pConfig.TargetPort && + equalPublishMode(port.PublishMode, pConfig.PublishMode) { + continue portLoop + } + } + + newPorts = append(newPorts, port) + } + + // Check to see if there are any conflict in flags. + if flags.Changed(flagPublishAdd) { + ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() + + for _, port := range ports { + if v, ok := portSet[portConfigToString(&port)]; ok { + if v != port { + fmt.Println("v", v) + return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", port.PublishedPort, port.TargetPort, port.Protocol, v.PublishedPort, v.TargetPort, v.Protocol) + } + continue + } + //portSet[portConfigToString(&port)] = port + newPorts = append(newPorts, port) + } + } + + // Sort the PortConfig to avoid unnecessary updates + sort.Sort(byPortConfig(newPorts)) + *portConfig = newPorts + return nil +} + +func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { + return prot1 == prot2 || + (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || + (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) +} + +func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { + return mode1 == mode2 || + (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || + (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) +} + +func equalPort(targetPort nat.Port, port swarm.PortConfig) bool { + return (string(port.Protocol) == targetPort.Proto() && + port.TargetPort == uint32(targetPort.Int())) +} + +func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { + if !flags.Changed(flagReplicas) { + return nil + } + + if serviceMode == nil || serviceMode.Replicated == nil { + return fmt.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() + return nil +} + +func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { + // Combine existing Hosts (in swarmkit format) with the host to add (convert to swarmkit format) + if flags.Changed(flagHostAdd) { + values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) + *hosts = append(*hosts, values...) + } + // Remove duplicate + *hosts = removeDuplicates(*hosts) + + keysToRemove := make(map[string]struct{}) + if flags.Changed(flagHostRemove) { + var empty struct{} + extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() + for _, entry := range extraHostsToRemove { + key := strings.SplitN(entry, ":", 2)[0] + keysToRemove[key] = empty + } + } + + newHosts := []string{} + for _, entry := range *hosts { + // Since this is in swarmkit format, we need to find the key, which is canonical_hostname of: + // IP_address canonical_hostname [aliases...] + parts := strings.Fields(entry) + if len(parts) > 1 { + key := parts[1] + if _, exists := keysToRemove[key]; !exists { + newHosts = append(newHosts, entry) + } + } else { + newHosts = append(newHosts, entry) + } + } + + // Sort so that result is predictable. + sort.Strings(newHosts) + + *hosts = newHosts + return nil +} + +// updateLogDriver updates the log driver only if the log driver flag is set. +// All options will be replaced with those provided on the command line. +func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { + if !flags.Changed(flagLogDriver) { + return nil + } + + name, err := flags.GetString(flagLogDriver) + if err != nil { + return err + } + + if name == "" { + return nil + } + + taskTemplate.LogDriver = &swarm.Driver{ + Name: name, + Options: runconfigopts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), + } + + return nil +} + +func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { + if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { + return nil + } + if containerSpec.Healthcheck == nil { + containerSpec.Healthcheck = &container.HealthConfig{} + } + noHealthcheck, err := flags.GetBool(flagNoHealthcheck) + if err != nil { + return err + } + if noHealthcheck { + if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { + containerSpec.Healthcheck = &container.HealthConfig{ + Test: []string{"NONE"}, + } + return nil + } + return fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { + containerSpec.Healthcheck.Test = nil + } + if flags.Changed(flagHealthInterval) { + val := *flags.Lookup(flagHealthInterval).Value.(*PositiveDurationOpt).Value() + containerSpec.Healthcheck.Interval = val + } + if flags.Changed(flagHealthTimeout) { + val := *flags.Lookup(flagHealthTimeout).Value.(*PositiveDurationOpt).Value() + containerSpec.Healthcheck.Timeout = val + } + if flags.Changed(flagHealthRetries) { + containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) + } + if flags.Changed(flagHealthCmd) { + cmd, _ := flags.GetString(flagHealthCmd) + if cmd != "" { + containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} + } else { + containerSpec.Healthcheck.Test = nil + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/service/update_test.go b/vendor/github.com/moby/moby/cli/command/service/update_test.go new file mode 100644 index 0000000..08fe248 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/service/update_test.go @@ -0,0 +1,384 @@ +package service + +import ( + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestUpdateServiceArgs(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("args", "the \"new args\"") + + spec := &swarm.ServiceSpec{} + cspec := &spec.TaskTemplate.ContainerSpec + cspec.Args = []string{"old", "args"} + + updateService(flags, spec) + assert.EqualStringSlice(t, cspec.Args, []string{"the", "new args"}) +} + +func TestUpdateLabels(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-add", "toadd=newlabel") + flags.Set("label-rm", "toremove") + + labels := map[string]string{ + "toremove": "thelabeltoremove", + "tokeep": "value", + } + + updateLabels(flags, &labels) + assert.Equal(t, len(labels), 2) + assert.Equal(t, labels["tokeep"], "value") + assert.Equal(t, labels["toadd"], "newlabel") +} + +func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-rm", "dne") + + labels := map[string]string{"foo": "theoldlabel"} + updateLabels(flags, &labels) + assert.Equal(t, len(labels), 1) +} + +func TestUpdatePlacement(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("constraint-add", "node=toadd") + flags.Set("constraint-rm", "node!=toremove") + + placement := &swarm.Placement{ + Constraints: []string{"node!=toremove", "container=tokeep"}, + } + + updatePlacement(flags, placement) + assert.Equal(t, len(placement.Constraints), 2) + assert.Equal(t, placement.Constraints[0], "container=tokeep") + assert.Equal(t, placement.Constraints[1], "node=toadd") +} + +func TestUpdateEnvironment(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "toadd=newenv") + flags.Set("env-rm", "toremove") + + envs := []string{"toremove=theenvtoremove", "tokeep=value"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 2) + // Order has been removed in updateEnvironment (map) + sort.Strings(envs) + assert.Equal(t, envs[0], "toadd=newenv") + assert.Equal(t, envs[1], "tokeep=value") +} + +func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "foo=newenv") + flags.Set("env-add", "foo=dupe") + flags.Set("env-rm", "foo") + + envs := []string{"foo=value"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 0) +} + +func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { + // Test case for #25404 + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "A=b") + + envs := []string{"A=c"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 1) + assert.Equal(t, envs[0], "A=b") +} + +func TestUpdateGroups(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("group-add", "wheel") + flags.Set("group-add", "docker") + flags.Set("group-rm", "root") + flags.Set("group-add", "foo") + flags.Set("group-rm", "docker") + + groups := []string{"bar", "root"} + + updateGroups(flags, &groups) + assert.Equal(t, len(groups), 3) + assert.Equal(t, groups[0], "bar") + assert.Equal(t, groups[1], "foo") + assert.Equal(t, groups[2], "wheel") +} + +func TestUpdateDNSConfig(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + + // IPv4, with duplicates + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "2.2.2.2") + flags.Set("dns-rm", "3.3.3.3") + flags.Set("dns-rm", "2.2.2.2") + // IPv6 + flags.Set("dns-add", "2001:db8:abc8::1") + // Invalid dns record + assert.Error(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address") + + // domains with duplicates + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.org") + flags.Set("dns-search-rm", "example.org") + // Invalid dns search domain + assert.Error(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain") + + flags.Set("dns-option-add", "ndots:9") + flags.Set("dns-option-rm", "timeout:3") + + config := &swarm.DNSConfig{ + Nameservers: []string{"3.3.3.3", "5.5.5.5"}, + Search: []string{"localdomain"}, + Options: []string{"timeout:3"}, + } + + updateDNSConfig(flags, &config) + + assert.Equal(t, len(config.Nameservers), 3) + assert.Equal(t, config.Nameservers[0], "1.1.1.1") + assert.Equal(t, config.Nameservers[1], "2001:db8:abc8::1") + assert.Equal(t, config.Nameservers[2], "5.5.5.5") + + assert.Equal(t, len(config.Search), 2) + assert.Equal(t, config.Search[0], "example.com") + assert.Equal(t, config.Search[1], "localdomain") + + assert.Equal(t, len(config.Options), 1) + assert.Equal(t, config.Options[0], "ndots:9") +} + +func TestUpdateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol2,target=/toadd") + flags.Set("mount-rm", "/toremove") + + mounts := []mounttypes.Mount{ + {Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Equal(t, len(mounts), 2) + assert.Equal(t, mounts[0].Target, "/toadd") + assert.Equal(t, mounts[1].Target, "/tokeep") + +} + +func TestUpdateMountsWithDuplicateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol4,target=/toadd") + + mounts := []mounttypes.Mount{ + {Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind}, + {Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Equal(t, len(mounts), 3) + assert.Equal(t, mounts[0].Target, "/tokeep1") + assert.Equal(t, mounts[1].Target, "/tokeep2") + assert.Equal(t, mounts[2].Target, "/toadd") +} + +func TestUpdatePorts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "1000:1000") + flags.Set("publish-rm", "333/udp") + + portConfigs := []swarm.PortConfig{ + {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, + {TargetPort: 555}, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 2) + // Do a sort to have the order (might have changed by map) + targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} + sort.Ints(targetPorts) + assert.Equal(t, targetPorts[0], 555) + assert.Equal(t, targetPorts[1], 1000) +} + +func TestUpdatePortsDuplicate(t *testing.T) { + // Test case for #25375 + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "80:80") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 1) + assert.Equal(t, portConfigs[0].TargetPort, uint32(80)) +} + +func TestUpdateHealthcheckTable(t *testing.T) { + type test struct { + flags [][2]string + initial *container.HealthConfig + expected *container.HealthConfig + err string + } + testCases := []test{ + { + flags: [][2]string{{"no-healthcheck", "true"}}, + initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"NONE"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + }, + { + flags: [][2]string{{"health-interval", "1m"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute}, + }, + { + flags: [][2]string{{"health-cmd", ""}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "0"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + } + for i, c := range testCases { + flags := newUpdateCommand(nil).Flags() + for _, flag := range c.flags { + flags.Set(flag[0], flag[1]) + } + cspec := &swarm.ContainerSpec{ + Healthcheck: c.initial, + } + err := updateHealthcheck(flags, cspec) + if c.err != "" { + assert.Error(t, err, c.err) + } else { + assert.NilError(t, err) + if !reflect.DeepEqual(cspec.Healthcheck, c.expected) { + t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck) + } + } + } +} + +func TestUpdateHosts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "example.net:2.2.2.2") + flags.Set("host-add", "ipv6.net:2001:db8:abc8::1") + // remove with ipv6 should work + flags.Set("host-rm", "example.net:2001:db8:abc8::1") + // just hostname should work as well + flags.Set("host-rm", "example.net") + // bad format error + assert.Error(t, flags.Set("host-add", "$example.com$"), "bad format for add-host:") + + hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net"} + + updateHosts(flags, &hosts) + assert.Equal(t, len(hosts), 3) + assert.Equal(t, hosts[0], "1.2.3.4 example.com") + assert.Equal(t, hosts[1], "2001:db8:abc8::1 ipv6.net") + assert.Equal(t, hosts[2], "4.3.2.1 example.org") +} + +func TestUpdatePortsRmWithProtocol(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "8081:81") + flags.Set("publish-add", "8082:82") + flags.Set("publish-rm", "80") + flags.Set("publish-rm", "81/tcp") + flags.Set("publish-rm", "82/udp") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 2) + assert.Equal(t, portConfigs[0].TargetPort, uint32(81)) + assert.Equal(t, portConfigs[1].TargetPort, uint32(82)) +} + +// FIXME(vdemeester) port to opts.PortOpt +func TestValidatePort(t *testing.T) { + validPorts := []string{"80/tcp", "80", "80/udp"} + invalidPorts := map[string]string{ + "9999999": "out of range", + "80:80/tcp": "invalid port format", + "53:53/udp": "invalid port format", + "80:80": "invalid port format", + "80/xyz": "invalid protocol", + "tcp": "invalid syntax", + "udp": "invalid syntax", + "": "invalid protocol", + } + for _, port := range validPorts { + _, err := validatePublishRemove(port) + assert.Equal(t, err, nil) + } + for port, e := range invalidPorts { + _, err := validatePublishRemove(port) + assert.Error(t, err, e) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/cmd.go b/vendor/github.com/moby/moby/cli/command/stack/cmd.go new file mode 100644 index 0000000..860bfed --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/cmd.go @@ -0,0 +1,35 @@ +package stack + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewStackCommand returns a cobra command for `stack` subcommands +func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "stack", + Short: "Manage Docker stacks", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + Tags: map[string]string{"version": "1.25"}, + } + cmd.AddCommand( + newDeployCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newServicesCommand(dockerCli), + newPsCommand(dockerCli), + ) + return cmd +} + +// NewTopLevelDeployCommand returns a command for `docker deploy` +func NewTopLevelDeployCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := newDeployCommand(dockerCli) + // Remove the aliases at the top level + cmd.Aliases = []string{} + cmd.Tags = map[string]string{"experimental": "", "version": "1.25"} + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/common.go b/vendor/github.com/moby/moby/cli/command/stack/common.go new file mode 100644 index 0000000..72719f9 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/common.go @@ -0,0 +1,60 @@ +package stack + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" +) + +func getStackFilter(namespace string) filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { + filter := opt.Value() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getAllStacksFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace) + return filter +} + +func getServices( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]swarm.Service, error) { + return apiclient.ServiceList( + ctx, + types.ServiceListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackNetworks( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]types.NetworkResource, error) { + return apiclient.NetworkList( + ctx, + types.NetworkListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackSecrets( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]swarm.Secret, error) { + return apiclient.SecretList( + ctx, + types.SecretListOptions{Filters: getStackFilter(namespace)}) +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/deploy.go b/vendor/github.com/moby/moby/cli/command/stack/deploy.go new file mode 100644 index 0000000..980876a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/deploy.go @@ -0,0 +1,357 @@ +package stack + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + secretcli "github.com/docker/docker/cli/command/secret" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/cli/compose/loader" + composetypes "github.com/docker/docker/cli/compose/types" + dockerclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +const ( + defaultNetworkDriver = "overlay" +) + +type deployOptions struct { + bundlefile string + composefile string + namespace string + sendRegistryAuth bool +} + +func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts deployOptions + + cmd := &cobra.Command{ + Use: "deploy [OPTIONS] STACK", + Aliases: []string{"up"}, + Short: "Deploy a new stack or update an existing stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runDeploy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + addBundlefileFlag(&opts.bundlefile, flags) + addComposefileFlag(&opts.composefile, flags) + addRegistryAuthFlag(&opts.sendRegistryAuth, flags) + return cmd +} + +func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error { + ctx := context.Background() + + switch { + case opts.bundlefile == "" && opts.composefile == "": + return fmt.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") + case opts.bundlefile != "" && opts.composefile != "": + return fmt.Errorf("You cannot specify both a bundle file and a Compose file.") + case opts.bundlefile != "": + return deployBundle(ctx, dockerCli, opts) + default: + return deployCompose(ctx, dockerCli, opts) + } +} + +// checkDaemonIsSwarmManager does an Info API call to verify that the daemon is +// a swarm manager. This is necessary because we must create networks before we +// create services, but the API call for creating a network does not return a +// proper status code when it can't create a network in the "global" scope. +func checkDaemonIsSwarmManager(ctx context.Context, dockerCli *command.DockerCli) error { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if !info.Swarm.ControlAvailable { + return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + return nil +} + +func deployCompose(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { + configDetails, err := getConfigDetails(opts) + if err != nil { + return err + } + + config, err := loader.Load(configDetails) + if err != nil { + if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { + return fmt.Errorf("Compose file contains unsupported options:\n\n%s\n", + propertyWarnings(fpe.Properties)) + } + + return err + } + + unsupportedProperties := loader.GetUnsupportedProperties(configDetails) + if len(unsupportedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", + strings.Join(unsupportedProperties, ", ")) + } + + deprecatedProperties := loader.GetDeprecatedProperties(configDetails) + if len(deprecatedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", + propertyWarnings(deprecatedProperties)) + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.namespace) + + serviceNetworks := getServicesDeclaredNetworks(config.Services) + networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) + if err := validateExternalNetworks(ctx, dockerCli, externalNetworks); err != nil { + return err + } + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + + secrets, err := convert.Secrets(namespace, config.Secrets) + if err != nil { + return err + } + if err := createSecrets(ctx, dockerCli, namespace, secrets); err != nil { + return err + } + + services, err := convert.Services(namespace, config, dockerCli.Client()) + if err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) +} +func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { + serviceNetworks := map[string]struct{}{} + for _, serviceConfig := range serviceConfigs { + if len(serviceConfig.Networks) == 0 { + serviceNetworks["default"] = struct{}{} + continue + } + for network := range serviceConfig.Networks { + serviceNetworks[network] = struct{}{} + } + } + return serviceNetworks +} + +func propertyWarnings(properties map[string]string) string { + var msgs []string + for name, description := range properties { + msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) + } + sort.Strings(msgs) + return strings.Join(msgs, "\n\n") +} + +func getConfigDetails(opts deployOptions) (composetypes.ConfigDetails, error) { + var details composetypes.ConfigDetails + var err error + + details.WorkingDir, err = os.Getwd() + if err != nil { + return details, err + } + + configFile, err := getConfigFile(opts.composefile) + if err != nil { + return details, err + } + // TODO: support multiple files + details.ConfigFiles = []composetypes.ConfigFile{*configFile} + return details, nil +} + +func getConfigFile(filename string) (*composetypes.ConfigFile, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := loader.ParseYAML(bytes) + if err != nil { + return nil, err + } + return &composetypes.ConfigFile{ + Filename: filename, + Config: config, + }, nil +} + +func validateExternalNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + externalNetworks []string) error { + client := dockerCli.Client() + + for _, networkName := range externalNetworks { + network, err := client.NetworkInspect(ctx, networkName) + if err != nil { + if dockerclient.IsErrNetworkNotFound(err) { + return fmt.Errorf("network %q is declared as external, but could not be found. You need to create the network before the stack is deployed (with overlay driver)", networkName) + } + return err + } + if network.Scope != "swarm" { + return fmt.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of %q", networkName, network.Scope, "swarm") + } + } + + return nil +} + +func createSecrets( + ctx context.Context, + dockerCli *command.DockerCli, + namespace convert.Namespace, + secrets []swarm.SecretSpec, +) error { + client := dockerCli.Client() + + for _, secretSpec := range secrets { + // TODO: fix this after https://github.com/docker/docker/pull/29218 + secrets, err := secretcli.GetSecretsByNameOrIDPrefixes(ctx, client, []string{secretSpec.Name}) + switch { + case err != nil: + return err + case len(secrets) > 1: + return errors.Errorf("ambiguous secret name: %s", secretSpec.Name) + case len(secrets) == 0: + fmt.Fprintf(dockerCli.Out(), "Creating secret %s\n", secretSpec.Name) + _, err = client.SecretCreate(ctx, secretSpec) + default: + secret := secrets[0] + // Update secret to ensure that the local data hasn't changed + err = client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec) + } + if err != nil { + return err + } + } + return nil +} + +func createNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + namespace convert.Namespace, + networks map[string]types.NetworkCreate, +) error { + client := dockerCli.Client() + + existingNetworks, err := getStackNetworks(ctx, client, namespace.Name()) + if err != nil { + return err + } + + existingNetworkMap := make(map[string]types.NetworkResource) + for _, network := range existingNetworks { + existingNetworkMap[network.Name] = network + } + + for internalName, createOpts := range networks { + name := namespace.Scope(internalName) + if _, exists := existingNetworkMap[name]; exists { + continue + } + + if createOpts.Driver == "" { + createOpts.Driver = defaultNetworkDriver + } + + fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) + if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { + return err + } + } + + return nil +} + +func deployServices( + ctx context.Context, + dockerCli *command.DockerCli, + services map[string]swarm.ServiceSpec, + namespace convert.Namespace, + sendAuth bool, +) error { + apiClient := dockerCli.Client() + out := dockerCli.Out() + + existingServices, err := getServices(ctx, apiClient, namespace.Name()) + if err != nil { + return err + } + + existingServiceMap := make(map[string]swarm.Service) + for _, service := range existingServices { + existingServiceMap[service.Spec.Name] = service + } + + for internalName, serviceSpec := range services { + name := namespace.Scope(internalName) + + encodedAuth := "" + if sendAuth { + // Retrieve encoded auth token from the image reference + image := serviceSpec.TaskTemplate.ContainerSpec.Image + encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + } + + if service, exists := existingServiceMap[name]; exists { + fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) + + updateOpts := types.ServiceUpdateOptions{} + if sendAuth { + updateOpts.EncodedRegistryAuth = encodedAuth + } + response, err := apiClient.ServiceUpdate( + ctx, + service.ID, + service.Version, + serviceSpec, + updateOpts, + ) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + } else { + fmt.Fprintf(out, "Creating service %s\n", name) + + createOpts := types.ServiceCreateOptions{} + if sendAuth { + createOpts.EncodedRegistryAuth = encodedAuth + } + if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/deploy_bundlefile.go b/vendor/github.com/moby/moby/cli/command/stack/deploy_bundlefile.go new file mode 100644 index 0000000..5a178c4 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/deploy_bundlefile.go @@ -0,0 +1,83 @@ +package stack + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/compose/convert" +) + +func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { + bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) + if err != nil { + return err + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.namespace) + + networks := make(map[string]types.NetworkCreate) + for _, service := range bundle.Services { + for _, networkName := range service.Networks { + networks[networkName] = types.NetworkCreate{ + Labels: convert.AddStackLabel(namespace, nil), + } + } + } + + services := make(map[string]swarm.ServiceSpec) + for internalName, service := range bundle.Services { + name := namespace.Scope(internalName) + + var ports []swarm.PortConfig + for _, portSpec := range service.Ports { + ports = append(ports, swarm.PortConfig{ + Protocol: swarm.PortConfigProtocol(portSpec.Protocol), + TargetPort: portSpec.Port, + }) + } + + nets := []swarm.NetworkAttachmentConfig{} + for _, networkName := range service.Networks { + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: namespace.Scope(networkName), + Aliases: []string{networkName}, + }) + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: convert.AddStackLabel(namespace, service.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: service.Image, + Command: service.Command, + Args: service.Args, + Env: service.Env, + // Service Labels will not be copied to Containers + // automatically during the deployment so we apply + // it here. + Labels: convert.AddStackLabel(namespace, nil), + }, + }, + EndpointSpec: &swarm.EndpointSpec{ + Ports: ports, + }, + Networks: nets, + } + + services[internalName] = serviceSpec + } + + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/list.go b/vendor/github.com/moby/moby/cli/command/stack/list.go new file mode 100644 index 0000000..9b6c645 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/list.go @@ -0,0 +1,113 @@ +package stack + +import ( + "fmt" + "io" + "strconv" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +const ( + listItemFmt = "%s\t%s\n" +) + +type listOptions struct { +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{} + + cmd := &cobra.Command{ + Use: "ls", + Aliases: []string{"list"}, + Short: "List stacks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + stacks, err := getStacks(ctx, client) + if err != nil { + return err + } + + out := dockerCli.Out() + printTable(out, stacks) + return nil +} + +func printTable(out io.Writer, stacks []*stack) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "NAME", "SERVICES") + for _, stack := range stacks { + fmt.Fprintf( + writer, + listItemFmt, + stack.Name, + strconv.Itoa(stack.Services), + ) + } +} + +type stack struct { + // Name is the name of the stack + Name string + // Services is the number of the services + Services int +} + +func getStacks( + ctx context.Context, + apiclient client.APIClient, +) ([]*stack, error) { + services, err := apiclient.ServiceList( + ctx, + types.ServiceListOptions{Filters: getAllStacksFilter()}) + if err != nil { + return nil, err + } + m := make(map[string]*stack, 0) + for _, service := range services { + labels := service.Spec.Labels + name, ok := labels[convert.LabelNamespace] + if !ok { + return nil, fmt.Errorf("cannot get label %s for service %s", + convert.LabelNamespace, service.ID) + } + ztack, ok := m[name] + if !ok { + m[name] = &stack{ + Name: name, + Services: 1, + } + } else { + ztack.Services++ + } + } + var stacks []*stack + for _, stack := range m { + stacks = append(stacks, stack) + } + return stacks, nil +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/opts.go b/vendor/github.com/moby/moby/cli/command/stack/opts.go new file mode 100644 index 0000000..74fe4f5 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/opts.go @@ -0,0 +1,49 @@ +package stack + +import ( + "fmt" + "io" + "os" + + "github.com/docker/docker/cli/command/bundlefile" + "github.com/spf13/pflag" +) + +func addComposefileFlag(opt *string, flags *pflag.FlagSet) { + flags.StringVarP(opt, "compose-file", "c", "", "Path to a Compose file") +} + +func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { + flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file") + flags.SetAnnotation("bundle-file", "experimental", nil) +} + +func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) { + flags.BoolVar(opt, "with-registry-auth", false, "Send registry authentication details to Swarm agents") +} + +func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { + defaultPath := fmt.Sprintf("%s.dab", namespace) + + if path == "" { + path = defaultPath + } + if _, err := os.Stat(path); err != nil { + return nil, fmt.Errorf( + "Bundle %s not found. Specify the path with --file", + path) + } + + fmt.Fprintf(stderr, "Loading bundle from %s\n", path) + reader, err := os.Open(path) + if err != nil { + return nil, err + } + defer reader.Close() + + bundle, err := bundlefile.LoadFile(reader) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %v\n", path, err) + } + return bundle, err +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/ps.go b/vendor/github.com/moby/moby/cli/command/stack/ps.go new file mode 100644 index 0000000..e4351bf --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/ps.go @@ -0,0 +1,61 @@ +package stack + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type psOptions struct { + filter opts.FilterOpt + noTrunc bool + namespace string + noResolve bool +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] STACK", + Short: "List the tasks in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runPS(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli *command.DockerCli, opts psOptions) error { + namespace := opts.namespace + client := dockerCli.Client() + ctx := context.Background() + + filter := getStackFilterFromOpt(opts.namespace, opts.filter) + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if len(tasks) == 0 { + fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) + return nil + } + + return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/remove.go b/vendor/github.com/moby/moby/cli/command/stack/remove.go new file mode 100644 index 0000000..966c1aa --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/remove.go @@ -0,0 +1,112 @@ +package stack + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type removeOptions struct { + namespace string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm STACK", + Aliases: []string{"remove", "down"}, + Short: "Remove the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runRemove(dockerCli, opts) + }, + } + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts removeOptions) error { + namespace := opts.namespace + client := dockerCli.Client() + ctx := context.Background() + + services, err := getServices(ctx, client, namespace) + if err != nil { + return err + } + + networks, err := getStackNetworks(ctx, client, namespace) + if err != nil { + return err + } + + secrets, err := getStackSecrets(ctx, client, namespace) + if err != nil { + return err + } + + if len(services)+len(networks)+len(secrets) == 0 { + fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) + return nil + } + + hasError := removeServices(ctx, dockerCli, services) + hasError = removeSecrets(ctx, dockerCli, secrets) || hasError + hasError = removeNetworks(ctx, dockerCli, networks) || hasError + + if hasError { + return fmt.Errorf("Failed to remove some resources") + } + return nil +} + +func removeServices( + ctx context.Context, + dockerCli *command.DockerCli, + services []swarm.Service, +) bool { + var err error + for _, service := range services { + fmt.Fprintf(dockerCli.Err(), "Removing service %s\n", service.Spec.Name) + if err = dockerCli.Client().ServiceRemove(ctx, service.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove service %s: %s", service.ID, err) + } + } + return err != nil +} + +func removeNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + networks []types.NetworkResource, +) bool { + var err error + for _, network := range networks { + fmt.Fprintf(dockerCli.Err(), "Removing network %s\n", network.Name) + if err = dockerCli.Client().NetworkRemove(ctx, network.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove network %s: %s", network.ID, err) + } + } + return err != nil +} + +func removeSecrets( + ctx context.Context, + dockerCli *command.DockerCli, + secrets []swarm.Secret, +) bool { + var err error + for _, secret := range secrets { + fmt.Fprintf(dockerCli.Err(), "Removing secret %s\n", secret.Spec.Name) + if err = dockerCli.Client().SecretRemove(ctx, secret.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove secret %s: %s", secret.ID, err) + } + } + return err != nil +} diff --git a/vendor/github.com/moby/moby/cli/command/stack/services.go b/vendor/github.com/moby/moby/cli/command/stack/services.go new file mode 100644 index 0000000..a46652d --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/stack/services.go @@ -0,0 +1,79 @@ +package stack + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/service" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type servicesOptions struct { + quiet bool + filter opts.FilterOpt + namespace string +} + +func newServicesCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := servicesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "services [OPTIONS] STACK", + Short: "List the services in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runServices(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runServices(dockerCli *command.DockerCli, opts servicesOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + filter := getStackFilterFromOpt(opts.namespace, opts.filter) + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) + if err != nil { + return err + } + + out := dockerCli.Out() + + // if no services in this stack, print message and exit 0 + if len(services) == 0 { + fmt.Fprintf(out, "Nothing found in stack: %s\n", opts.namespace) + return nil + } + + if opts.quiet { + service.PrintQuiet(out, services) + } else { + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + service.PrintNotQuiet(out, services, nodes, tasks) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/cmd.go b/vendor/github.com/moby/moby/cli/command/swarm/cmd.go new file mode 100644 index 0000000..632679c --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/cmd.go @@ -0,0 +1,28 @@ +package swarm + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSwarmCommand returns a cobra command for `swarm` subcommands +func NewSwarmCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "swarm", + Short: "Manage Swarm", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newInitCommand(dockerCli), + newJoinCommand(dockerCli), + newJoinTokenCommand(dockerCli), + newUnlockKeyCommand(dockerCli), + newUpdateCommand(dockerCli), + newLeaveCommand(dockerCli), + newUnlockCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/init.go b/vendor/github.com/moby/moby/cli/command/swarm/init.go new file mode 100644 index 0000000..2550fee --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/init.go @@ -0,0 +1,85 @@ +package swarm + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type initOptions struct { + swarmOptions + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + forceNewCluster bool +} + +func newInitCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := initOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "init [OPTIONS]", + Short: "Initialize a swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInit(dockerCli, cmd.Flags(), opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state") + flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)") + addSwarmFlags(flags, &opts.swarmOptions) + return cmd +} + +func runInit(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts initOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.InitRequest{ + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + ForceNewCluster: opts.forceNewCluster, + Spec: opts.swarmOptions.ToSpec(flags), + AutoLockManagers: opts.swarmOptions.autolock, + } + + nodeID, err := client.SwarmInit(ctx, req) + if err != nil { + if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { + return errors.New(err.Error() + " - specify one with --advertise-addr") + } + return err + } + + fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) + + if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { + return err + } + + fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") + + if req.AutoLockManagers { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/join.go b/vendor/github.com/moby/moby/cli/command/swarm/join.go new file mode 100644 index 0000000..004313b --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/join.go @@ -0,0 +1,69 @@ +package swarm + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type joinOptions struct { + remote string + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + token string +} + +func newJoinCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := joinOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "join [OPTIONS] HOST:PORT", + Short: "Join a swarm as a node and/or manager", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runJoin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") + return cmd +} + +func runJoin(dockerCli *command.DockerCli, opts joinOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.JoinRequest{ + JoinToken: opts.token, + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + RemoteAddrs: []string{opts.remote}, + } + err := client.SwarmJoin(ctx, req) + if err != nil { + return err + } + + info, err := client.Info(ctx) + if err != nil { + return err + } + + if info.Swarm.ControlAvailable { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") + } else { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/join_token.go b/vendor/github.com/moby/moby/cli/command/swarm/join_token.go new file mode 100644 index 0000000..3a17a80 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/join_token.go @@ -0,0 +1,105 @@ +package swarm + +import ( + "errors" + "fmt" + + "github.com/spf13/cobra" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "golang.org/x/net/context" +) + +func newJoinTokenCommand(dockerCli *command.DockerCli) *cobra.Command { + var rotate, quiet bool + + cmd := &cobra.Command{ + Use: "join-token [OPTIONS] (worker|manager)", + Short: "Manage join tokens", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + worker := args[0] == "worker" + manager := args[0] == "manager" + + if !worker && !manager { + return errors.New("unknown role " + args[0]) + } + + client := dockerCli.Client() + ctx := context.Background() + + if rotate { + var flags swarm.UpdateFlags + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + flags.RotateWorkerToken = worker + flags.RotateManagerToken = manager + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) + if err != nil { + return err + } + if !quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", args[0]) + } + } + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if quiet { + if worker { + fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Worker) + } else { + fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Manager) + } + } else { + info, err := client.Info(ctx) + if err != nil { + return err + } + return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) + } + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVar(&rotate, flagRotate, false, "Rotate join token") + flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func printJoinCommand(ctx context.Context, dockerCli *command.DockerCli, nodeID string, worker bool, manager bool) error { + client := dockerCli.Client() + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + if node.ManagerStatus != nil { + if worker { + fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Worker, node.ManagerStatus.Addr) + } + if manager { + fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Manager, node.ManagerStatus.Addr) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/leave.go b/vendor/github.com/moby/moby/cli/command/swarm/leave.go new file mode 100644 index 0000000..e2cfa0a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/leave.go @@ -0,0 +1,44 @@ +package swarm + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type leaveOptions struct { + force bool +} + +func newLeaveCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := leaveOptions{} + + cmd := &cobra.Command{ + Use: "leave [OPTIONS]", + Short: "Leave the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLeave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force this node to leave the swarm, ignoring warnings") + return cmd +} + +func runLeave(dockerCli *command.DockerCli, opts leaveOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if err := client.SwarmLeave(ctx, opts.force); err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/opts.go b/vendor/github.com/moby/moby/cli/command/swarm/opts.go new file mode 100644 index 0000000..9db46dc --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/opts.go @@ -0,0 +1,209 @@ +package swarm + +import ( + "encoding/csv" + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +const ( + defaultListenAddr = "0.0.0.0:2377" + + flagCertExpiry = "cert-expiry" + flagDispatcherHeartbeat = "dispatcher-heartbeat" + flagListenAddr = "listen-addr" + flagAdvertiseAddr = "advertise-addr" + flagQuiet = "quiet" + flagRotate = "rotate" + flagToken = "token" + flagTaskHistoryLimit = "task-history-limit" + flagExternalCA = "external-ca" + flagMaxSnapshots = "max-snapshots" + flagSnapshotInterval = "snapshot-interval" + flagLockKey = "lock-key" + flagAutolock = "autolock" +) + +type swarmOptions struct { + taskHistoryLimit int64 + dispatcherHeartbeat time.Duration + nodeCertExpiry time.Duration + externalCA ExternalCAOption + maxSnapshots uint64 + snapshotInterval uint64 + autolock bool +} + +// NodeAddrOption is a pflag.Value for listening addresses +type NodeAddrOption struct { + addr string +} + +// String prints the representation of this flag +func (a *NodeAddrOption) String() string { + return a.Value() +} + +// Set the value for this flag +func (a *NodeAddrOption) Set(value string) error { + addr, err := opts.ParseTCPAddr(value, a.addr) + if err != nil { + return err + } + a.addr = addr + return nil +} + +// Type returns the type of this flag +func (a *NodeAddrOption) Type() string { + return "node-addr" +} + +// Value returns the value of this option as addr:port +func (a *NodeAddrOption) Value() string { + return strings.TrimPrefix(a.addr, "tcp://") +} + +// NewNodeAddrOption returns a new node address option +func NewNodeAddrOption(addr string) NodeAddrOption { + return NodeAddrOption{addr} +} + +// NewListenAddrOption returns a NodeAddrOption with default values +func NewListenAddrOption() NodeAddrOption { + return NewNodeAddrOption(defaultListenAddr) +} + +// ExternalCAOption is a Value type for parsing external CA specifications. +type ExternalCAOption struct { + values []*swarm.ExternalCA +} + +// Set parses an external CA option. +func (m *ExternalCAOption) Set(value string) error { + parsed, err := parseExternalCA(value) + if err != nil { + return err + } + + m.values = append(m.values, parsed) + return nil +} + +// Type returns the type of this option. +func (m *ExternalCAOption) Type() string { + return "external-ca" +} + +// String returns a string repr of this option. +func (m *ExternalCAOption) String() string { + externalCAs := []string{} + for _, externalCA := range m.values { + repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) + externalCAs = append(externalCAs, repr) + } + return strings.Join(externalCAs, ", ") +} + +// Value returns the external CAs +func (m *ExternalCAOption) Value() []*swarm.ExternalCA { + return m.values +} + +// parseExternalCA parses an external CA specification from the command line, +// such as protocol=cfssl,url=https://example.com. +func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { + csvReader := csv.NewReader(strings.NewReader(caSpec)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + + externalCA := swarm.ExternalCA{ + Options: make(map[string]string), + } + + var ( + hasProtocol bool + hasURL bool + ) + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + + if len(parts) != 2 { + return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key, value := parts[0], parts[1] + + switch strings.ToLower(key) { + case "protocol": + hasProtocol = true + if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { + externalCA.Protocol = swarm.ExternalCAProtocolCFSSL + } else { + return nil, fmt.Errorf("unrecognized external CA protocol %s", value) + } + case "url": + hasURL = true + externalCA.URL = value + default: + externalCA.Options[key] = value + } + } + + if !hasProtocol { + return nil, errors.New("the external-ca option needs a protocol= parameter") + } + if !hasURL { + return nil, errors.New("the external-ca option needs a url= parameter") + } + + return &externalCA, nil +} + +func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { + flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") + flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period (ns|us|ms|s|m|h)") + flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates (ns|us|ms|s|m|h)") + flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") + flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain") + flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots") +} + +func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet) { + if flags.Changed(flagTaskHistoryLimit) { + spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit + } + if flags.Changed(flagDispatcherHeartbeat) { + spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat + } + if flags.Changed(flagCertExpiry) { + spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry + } + if flags.Changed(flagExternalCA) { + spec.CAConfig.ExternalCAs = opts.externalCA.Value() + } + if flags.Changed(flagMaxSnapshots) { + spec.Raft.KeepOldSnapshots = &opts.maxSnapshots + } + if flags.Changed(flagSnapshotInterval) { + spec.Raft.SnapshotInterval = opts.snapshotInterval + } + if flags.Changed(flagAutolock) { + spec.EncryptionConfig.AutoLockManagers = opts.autolock + } +} + +func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec { + var spec swarm.Spec + opts.mergeSwarmSpec(&spec, flags) + return spec +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/opts_test.go b/vendor/github.com/moby/moby/cli/command/swarm/opts_test.go new file mode 100644 index 0000000..568dc87 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/opts_test.go @@ -0,0 +1,37 @@ +package swarm + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestNodeAddrOptionSetHostAndPort(t *testing.T) { + opt := NewNodeAddrOption("old:123") + addr := "newhost:5555" + assert.NilError(t, opt.Set(addr)) + assert.Equal(t, opt.Value(), addr) +} + +func TestNodeAddrOptionSetHostOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("newhost")) + assert.Equal(t, opt.Value(), "newhost:2377") +} + +func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("::1")) + assert.Equal(t, opt.Value(), "[::1]:2377") +} + +func TestNodeAddrOptionSetPortOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set(":4545")) + assert.Equal(t, opt.Value(), "0.0.0.0:4545") +} + +func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { + opt := NewListenAddrOption() + assert.Error(t, opt.Set("http://localhost:4545"), "Invalid") +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/unlock.go b/vendor/github.com/moby/moby/cli/command/swarm/unlock.go new file mode 100644 index 0000000..048fb56 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/unlock.go @@ -0,0 +1,54 @@ +package swarm + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "golang.org/x/net/context" +) + +func newUnlockCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "unlock", + Short: "Unlock swarm", + Args: cli.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + client := dockerCli.Client() + ctx := context.Background() + + key, err := readKey(dockerCli.In(), "Please enter unlock key: ") + if err != nil { + return err + } + req := swarm.UnlockRequest{ + UnlockKey: key, + } + + return client.SwarmUnlock(ctx, req) + }, + } + + return cmd +} + +func readKey(in *command.InStream, prompt string) (string, error) { + if in.IsTerminal() { + fmt.Print(prompt) + dt, err := terminal.ReadPassword(int(in.FD())) + fmt.Println() + return string(dt), err + } + key, err := bufio.NewReader(in).ReadString('\n') + if err == io.EOF { + err = nil + } + return strings.TrimSpace(key), err +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/unlock_key.go b/vendor/github.com/moby/moby/cli/command/swarm/unlock_key.go new file mode 100644 index 0000000..96450f5 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/unlock_key.go @@ -0,0 +1,79 @@ +package swarm + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func newUnlockKeyCommand(dockerCli *command.DockerCli) *cobra.Command { + var rotate, quiet bool + + cmd := &cobra.Command{ + Use: "unlock-key [OPTIONS]", + Short: "Manage the unlock key", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client := dockerCli.Client() + ctx := context.Background() + + if rotate { + flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if !swarm.Spec.EncryptionConfig.AutoLockManagers { + return errors.New("cannot rotate because autolock is not turned on") + } + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) + if err != nil { + return err + } + if !quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") + } + } + + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + + if unlockKeyResp.UnlockKey == "" { + return errors.New("no unlock key is set") + } + + if quiet { + fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) + } else { + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVar(&rotate, flagRotate, false, "Rotate unlock key") + flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func printUnlockCommand(ctx context.Context, dockerCli *command.DockerCli, unlockKey string) { + if len(unlockKey) == 0 { + return + } + + fmt.Fprintf(dockerCli.Out(), "To unlock a swarm manager after it restarts, run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\nPlease remember to store this key in a password manager, since without it you\nwill not be able to restart the manager.\n", unlockKey) + return +} diff --git a/vendor/github.com/moby/moby/cli/command/swarm/update.go b/vendor/github.com/moby/moby/cli/command/swarm/update.go new file mode 100644 index 0000000..dbbd268 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/swarm/update.go @@ -0,0 +1,72 @@ +package swarm + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := swarmOptions{} + + cmd := &cobra.Command{ + Use: "update [OPTIONS]", + Short: "Update the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), opts) + }, + PreRunE: func(cmd *cobra.Command, args []string) error { + if cmd.Flags().NFlag() == 0 { + return pflag.ErrHelp + } + return nil + }, + } + + cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)") + addSwarmFlags(cmd.Flags(), &opts) + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts swarmOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var updateFlags swarm.UpdateFlags + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + prevAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers + + opts.mergeSwarmSpec(&swarm.Spec, flags) + + curAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Swarm updated.") + + if curAutoLock && !prevAutoLock { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/system/cmd.go b/vendor/github.com/moby/moby/cli/command/system/cmd.go new file mode 100644 index 0000000..ab3beb8 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/system/cmd.go @@ -0,0 +1,26 @@ +package system + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSystemCommand returns a cobra command for `system` subcommands +func NewSystemCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "system", + Short: "Manage Docker", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewEventsCommand(dockerCli), + NewInfoCommand(dockerCli), + NewDiskUsageCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + + return cmd +} diff --git a/vendor/github.com/moby/moby/cli/command/system/df.go b/vendor/github.com/moby/moby/cli/command/system/df.go new file mode 100644 index 0000000..9f71248 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/system/df.go @@ -0,0 +1,56 @@ +package system + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type diskUsageOptions struct { + verbose bool +} + +// NewDiskUsageCommand creates a new cobra.Command for `docker df` +func NewDiskUsageCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diskUsageOptions + + cmd := &cobra.Command{ + Use: "df [OPTIONS]", + Short: "Show docker disk usage", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runDiskUsage(dockerCli, opts) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") + + return cmd +} + +func runDiskUsage(dockerCli *command.DockerCli, opts diskUsageOptions) error { + du, err := dockerCli.Client().DiskUsage(context.Background()) + if err != nil { + return err + } + + duCtx := formatter.DiskUsageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + }, + LayersSize: du.LayersSize, + Images: du.Images, + Containers: du.Containers, + Volumes: du.Volumes, + Verbose: opts.verbose, + } + + duCtx.Write() + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/system/events.go b/vendor/github.com/moby/moby/cli/command/system/events.go new file mode 100644 index 0000000..0875230 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/system/events.go @@ -0,0 +1,140 @@ +package system + +import ( + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "text/template" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +type eventsOptions struct { + since string + until string + filter opts.FilterOpt + format string +} + +// NewEventsCommand creates a new cobra.Command for `docker events` +func NewEventsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := eventsOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "events [OPTIONS]", + Short: "Get real time events from the server", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runEvents(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.since, "since", "", "Show all events created since timestamp") + flags.StringVar(&opts.until, "until", "", "Stream events until this timestamp") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&opts.format, "format", "", "Format the output using the given Go template") + + return cmd +} + +func runEvents(dockerCli *command.DockerCli, opts *eventsOptions) error { + tmpl, err := makeTemplate(opts.format) + if err != nil { + return cli.StatusError{ + StatusCode: 64, + Status: "Error parsing format: " + err.Error()} + } + options := types.EventsOptions{ + Since: opts.since, + Until: opts.until, + Filters: opts.filter.Value(), + } + + ctx, cancel := context.WithCancel(context.Background()) + events, errs := dockerCli.Client().Events(ctx, options) + defer cancel() + + out := dockerCli.Out() + + for { + select { + case event := <-events: + if err := handleEvent(out, event, tmpl); err != nil { + return err + } + case err := <-errs: + if err == io.EOF { + return nil + } + return err + } + } +} + +func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + if tmpl == nil { + return prettyPrintEvent(out, event) + } + + return formatEvent(out, event, tmpl) +} + +func makeTemplate(format string) (*template.Template, error) { + if format == "" { + return nil, nil + } + tmpl, err := templates.Parse(format) + if err != nil { + return tmpl, err + } + // we execute the template for an empty message, so as to validate + // a bad template like "{{.badFieldString}}" + return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) +} + +// prettyPrintEvent prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { + if event.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(out, "\n") + return nil +} + +func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + defer out.Write([]byte{'\n'}) + return tmpl.Execute(out, event) +} diff --git a/vendor/github.com/moby/moby/cli/command/system/info.go b/vendor/github.com/moby/moby/cli/command/system/info.go new file mode 100644 index 0000000..ea4af76 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/system/info.go @@ -0,0 +1,365 @@ +package system + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/utils" + "github.com/docker/docker/utils/templates" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type infoOptions struct { + format string +} + +// NewInfoCommand creates a new cobra.Command for `docker info` +func NewInfoCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts infoOptions + + cmd := &cobra.Command{ + Use: "info [OPTIONS]", + Short: "Display system-wide information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInfo(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInfo(dockerCli *command.DockerCli, opts *infoOptions) error { + ctx := context.Background() + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if opts.format == "" { + return prettyPrintInfo(dockerCli, info) + } + return formatInfo(dockerCli, info, opts.format) +} + +func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error { + fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) + fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) + } + + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) + + fmt.Fprintf(dockerCli.Out(), "Plugins: \n") + fmt.Fprintf(dockerCli.Out(), " Volume:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), " Network:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(dockerCli.Out(), " Authorization:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + } + + fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) + if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.LocalNodeState != swarm.LocalNodeStateLocked { + fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) + if info.Swarm.Error != "" { + fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) + } + fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) + if info.Swarm.ControlAvailable { + fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID) + fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) + fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) + fmt.Fprintf(dockerCli.Out(), " Orchestration:\n") + taskHistoryRetentionLimit := int64(0) + if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { + taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit + } + fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit) + fmt.Fprintf(dockerCli.Out(), " Raft:\n") + fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) + if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { + fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) + } + fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) + fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) + fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n") + fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))) + fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n") + fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) + if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { + fmt.Fprintf(dockerCli.Out(), " External CAs:\n") + for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) + } + } + } + fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr) + managers := []string{} + for _, entry := range info.Swarm.RemoteManagers { + managers = append(managers, entry.Addr) + } + if len(managers) > 0 { + sort.Strings(managers) + fmt.Fprintf(dockerCli.Out(), " Manager Addresses:\n") + for _, entry := range managers { + fmt.Fprintf(dockerCli.Out(), " %s\n", entry) + } + } + } + + if len(info.Runtimes) > 0 { + fmt.Fprintf(dockerCli.Out(), "Runtimes:") + for name := range info.Runtimes { + fmt.Fprintf(dockerCli.Out(), " %s", name) + } + fmt.Fprint(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) + } + + if info.OSType == "linux" { + fmt.Fprintf(dockerCli.Out(), "Init Binary: %v\n", info.InitBinary) + + for _, ci := range []struct { + Name string + Commit types.Commit + }{ + {"containerd", info.ContainerdCommit}, + {"runc", info.RuncCommit}, + {"init", info.InitCommit}, + } { + fmt.Fprintf(dockerCli.Out(), "%s version: %s", ci.Name, ci.Commit.ID) + if ci.Commit.ID != ci.Commit.Expected { + fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) + } + fmt.Fprintf(dockerCli.Out(), "\n") + } + if len(info.SecurityOptions) != 0 { + kvs, err := types.DecodeSecurityOptions(info.SecurityOptions) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Security Options:\n") + for _, so := range kvs { + fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name) + for _, o := range so.Options { + switch o.Key { + case "profile": + if o.Value != "default" { + fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n") + } + fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value) + } + } + } + } + } + + // Isolation only has meaning on a Windows daemon. + if info.OSType == "windows" { + fmt.Fprintf(dockerCli.Out(), "Default Isolation: %v\n", info.Isolation) + } + + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) + fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) + fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) + fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", utils.IsDebugEnabled()) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) + + if info.Debug { + fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) + fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) + } + + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) + } + fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) + } + + if info.Labels != nil { + fmt.Fprintln(dockerCli.Out(), "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) + } + // TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out + // after 3 release cycles (17.12). For now, a WARNING will be generated. The following will + // be removed eventually. + labelMap := map[string]string{} + for _, label := range info.Labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will throw out an warning + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + fmt.Fprintln(dockerCli.Err(), "WARNING: labels with duplicate keys and conflicting values have been deprecated") + break + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + } + + fmt.Fprintf(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) + } + + if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { + fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") + for _, registry := range info.RegistryConfig.IndexConfigs { + if registry.Secure == false { + fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) + } + } + + for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { + mask, _ := registry.Mask.Size() + fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) + } + } + + if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { + fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") + for _, mirror := range info.RegistryConfig.Mirrors { + fmt.Fprintf(dockerCli.Out(), " %s\n", mirror) + } + } + + fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n\n", info.LiveRestoreEnabled) + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + printStorageDriverWarnings(dockerCli, info) + + if !info.MemoryLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + return nil +} + +func printStorageDriverWarnings(dockerCli *command.DockerCli, info types.Info) { + if info.DriverStatus == nil { + return + } + + for _, pair := range info.DriverStatus { + if pair[0] == "Data loop file" { + fmt.Fprintf(dockerCli.Err(), "WARNING: %s: usage of loopback devices is strongly discouraged for production use.\n Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.\n", info.Driver) + } + if pair[0] == "Supports d_type" && pair[1] == "false" { + backingFs := getBackingFs(info) + + msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", info.Driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" + } + msg += " Running without d_type support will not be supported in future releases." + fmt.Fprintln(dockerCli.Err(), msg) + } + } +} + +func getBackingFs(info types.Info) string { + if info.DriverStatus == nil { + return "" + } + + for _, pair := range info.DriverStatus { + if pair[0] == "Backing Filesystem" { + return pair[1] + } + } + return "" +} + +func formatInfo(dockerCli *command.DockerCli, info types.Info, format string) error { + tmpl, err := templates.Parse(format) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + err = tmpl.Execute(dockerCli.Out(), info) + dockerCli.Out().Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/moby/moby/cli/command/system/inspect.go b/vendor/github.com/moby/moby/cli/command/system/inspect.go new file mode 100644 index 0000000..c86e858 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/system/inspect.go @@ -0,0 +1,203 @@ +package system + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + inspectType string + size bool + ids []string +} + +// NewInspectCommand creates a new cobra.Command for `docker inspect` +func NewInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", + Short: "Return low-level information on Docker objects", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.ids = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + var elementSearcher inspect.GetRefFunc + switch opts.inspectType { + case "", "container", "image", "node", "network", "service", "volume", "task", "plugin": + elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) + default: + return fmt.Errorf("%q is not a valid value for --type", opts.inspectType) + } + return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) +} + +func inspectContainers(ctx context.Context, dockerCli *command.DockerCli, getSize bool) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) + } +} + +func inspectImages(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ImageInspectWithRaw(ctx, ref) + } +} + +func inspectNetwork(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NetworkInspectWithRaw(ctx, ref) + } +} + +func inspectNode(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NodeInspectWithRaw(ctx, ref) + } +} + +func inspectService(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ServiceInspectWithRaw(ctx, ref) + } +} + +func inspectTasks(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().TaskInspectWithRaw(ctx, ref) + } +} + +func inspectVolume(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) + } +} + +func inspectPlugin(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().PluginInspectWithRaw(ctx, ref) + } +} + +func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool, typeConstraint string) inspect.GetRefFunc { + var inspectAutodetect = []struct { + objectType string + isSizeSupported bool + isSwarmObject bool + objectInspector func(string) (interface{}, []byte, error) + }{ + { + objectType: "container", + isSizeSupported: true, + objectInspector: inspectContainers(ctx, dockerCli, getSize), + }, + { + objectType: "image", + objectInspector: inspectImages(ctx, dockerCli), + }, + { + objectType: "network", + objectInspector: inspectNetwork(ctx, dockerCli), + }, + { + objectType: "volume", + objectInspector: inspectVolume(ctx, dockerCli), + }, + { + objectType: "service", + isSwarmObject: true, + objectInspector: inspectService(ctx, dockerCli), + }, + { + objectType: "task", + isSwarmObject: true, + objectInspector: inspectTasks(ctx, dockerCli), + }, + { + objectType: "node", + isSwarmObject: true, + objectInspector: inspectNode(ctx, dockerCli), + }, + { + objectType: "plugin", + objectInspector: inspectPlugin(ctx, dockerCli), + }, + } + + // isSwarmManager does an Info API call to verify that the daemon is + // a swarm manager. + isSwarmManager := func() bool { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return false + } + return info.Swarm.ControlAvailable + } + + isErrNotSupported := func(err error) bool { + return strings.Contains(err.Error(), "not supported") + } + + return func(ref string) (interface{}, []byte, error) { + const ( + swarmSupportUnknown = iota + swarmSupported + swarmUnsupported + ) + + isSwarmSupported := swarmSupportUnknown + + for _, inspectData := range inspectAutodetect { + if typeConstraint != "" && inspectData.objectType != typeConstraint { + continue + } + if typeConstraint == "" && inspectData.isSwarmObject { + if isSwarmSupported == swarmSupportUnknown { + if isSwarmManager() { + isSwarmSupported = swarmSupported + } else { + isSwarmSupported = swarmUnsupported + } + } + if isSwarmSupported == swarmUnsupported { + continue + } + } + v, raw, err := inspectData.objectInspector(ref) + if err != nil { + if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSupported(err)) { + continue + } + return v, raw, err + } + if getSize && !inspectData.isSizeSupported { + fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) + } + return v, raw, err + } + return nil, nil, fmt.Errorf("Error: No such object: %s", ref) + } +} diff --git a/vendor/github.com/moby/moby/cli/command/system/prune.go b/vendor/github.com/moby/moby/cli/command/system/prune.go new file mode 100644 index 0000000..92dddbd --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/system/prune.go @@ -0,0 +1,93 @@ +package system + +import ( + "fmt" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/prune" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool +} + +// NewPruneCommand creates a new cobra.Command for `docker prune` +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused data", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPrune(dockerCli, opts) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images not just dangling ones") + + return cmd +} + +const ( + warning = `WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + %s +Are you sure you want to continue?` + + danglingImageDesc = "- all dangling images" + allImageDesc = `- all images without at least one container associated to them` +) + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) error { + var message string + + if opts.all { + message = fmt.Sprintf(warning, allImageDesc) + } else { + message = fmt.Sprintf(warning, danglingImageDesc) + } + + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), message) { + return nil + } + + var spaceReclaimed uint64 + + for _, pruneFn := range []func(dockerCli *command.DockerCli) (uint64, string, error){ + prune.RunContainerPrune, + prune.RunVolumePrune, + prune.RunNetworkPrune, + } { + spc, output, err := pruneFn(dockerCli) + if err != nil { + return err + } + spaceReclaimed += spc + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + } + + spc, output, err := prune.RunImagePrune(dockerCli, opts.all) + if err != nil { + return err + } + if spc > 0 { + spaceReclaimed += spc + fmt.Fprintln(dockerCli.Out(), output) + } + + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/system/version.go b/vendor/github.com/moby/moby/cli/command/system/version.go new file mode 100644 index 0000000..ded4f4d --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/system/version.go @@ -0,0 +1,113 @@ +package system + +import ( + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} (minimum version {{.Server.MinAPIVersion}}) + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}} + Experimental: {{.Server.Experimental}}{{end}}` + +type versionOptions struct { + format string +} + +// NewVersionCommand creates a new cobra.Command for `docker version` +func NewVersionCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts versionOptions + + cmd := &cobra.Command{ + Use: "version [OPTIONS]", + Short: "Show the Docker version information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runVersion(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runVersion(dockerCli *command.DockerCli, opts *versionOptions) error { + ctx := context.Background() + + templateFormat := versionTemplate + if opts.format != "" { + templateFormat = opts.format + } + + tmpl, err := templates.Parse(templateFormat) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + APIVersion := dockerCli.Client().ClientVersion() + if defaultAPIVersion := dockerCli.DefaultVersion(); APIVersion != defaultAPIVersion { + APIVersion = fmt.Sprintf("%s (downgraded from %s)", APIVersion, defaultAPIVersion) + } + + vd := types.VersionResponse{ + Client: &types.Version{ + Version: dockerversion.Version, + APIVersion: APIVersion, + GoVersion: runtime.Version(), + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + + serverVersion, err := dockerCli.Client().ServerVersion(ctx) + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { + err = err2 + } + dockerCli.Out().Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/moby/moby/cli/command/task/print.go b/vendor/github.com/moby/moby/cli/command/task/print.go new file mode 100644 index 0000000..0f1c2cf --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/task/print.go @@ -0,0 +1,161 @@ +package task + +import ( + "fmt" + "io" + "sort" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + psTaskItemFmt = "%s\t%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n" + maxErrLength = 30 +) + +type portStatus swarm.PortStatus + +func (ps portStatus) String() string { + if len(ps.Ports) == 0 { + return "" + } + + str := fmt.Sprintf("*:%d->%d/%s", ps.Ports[0].PublishedPort, ps.Ports[0].TargetPort, ps.Ports[0].Protocol) + for _, pConfig := range ps.Ports[1:] { + str += fmt.Sprintf(",*:%d->%d/%s", pConfig.PublishedPort, pConfig.TargetPort, pConfig.Protocol) + } + + return str +} + +type tasksBySlot []swarm.Task + +func (t tasksBySlot) Len() int { + return len(t) +} + +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) +} + +// Print task information in a table format. +// Besides this, command `docker node ps ` +// and `docker stack ps` will call this, too. +func Print(dockerCli *command.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { + sort.Stable(tasksBySlot(tasks)) + + writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR", "PORTS"}, "\t")) + + if err := print(writer, ctx, tasks, resolver, noTrunc); err != nil { + return err + } + + return nil +} + +// PrintQuiet shows task list in a quiet way. +func PrintQuiet(dockerCli *command.DockerCli, tasks []swarm.Task) error { + sort.Stable(tasksBySlot(tasks)) + + out := dockerCli.Out() + + for _, task := range tasks { + fmt.Fprintln(out, task.ID) + } + + return nil +} + +func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { + prevName := "" + for _, task := range tasks { + id := task.ID + if !noTrunc { + id = stringid.TruncateID(id) + } + + serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) + if err != nil { + return err + } + + nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) + if err != nil { + return err + } + + name := "" + if task.Slot != 0 { + name = fmt.Sprintf("%v.%v", serviceName, task.Slot) + } else { + name = fmt.Sprintf("%v.%v", serviceName, task.NodeID) + } + + // Indent the name if necessary + indentedName := name + if name == prevName { + indentedName = fmt.Sprintf(" \\_ %s", indentedName) + } + prevName = name + + // Trim and quote the error message. + taskErr := task.Status.Err + if !noTrunc && len(taskErr) > maxErrLength { + taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) + } + if len(taskErr) > 0 { + taskErr = fmt.Sprintf("\"%s\"", taskErr) + } + + image := task.Spec.ContainerSpec.Image + if !noTrunc { + ref, err := distreference.ParseNamed(image) + if err == nil { + // update image string for display + namedTagged, ok := ref.(distreference.NamedTagged) + if ok { + image = namedTagged.Name() + ":" + namedTagged.Tag() + } + } + } + + fmt.Fprintf( + out, + psTaskItemFmt, + id, + indentedName, + image, + nodeValue, + command.PrettyPrint(task.DesiredState), + command.PrettyPrint(task.Status.State), + strings.ToLower(units.HumanDuration(time.Since(task.Status.Timestamp))), + taskErr, + portStatus(task.Status.PortStatus), + ) + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/command/trust.go b/vendor/github.com/moby/moby/cli/command/trust.go new file mode 100644 index 0000000..b4c8a84 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/trust.go @@ -0,0 +1,39 @@ +package command + +import ( + "os" + "strconv" + + "github.com/spf13/pflag" +) + +var ( + // TODO: make this not global + untrusted bool +) + +// AddTrustedFlags adds content trust flags to the current command flagset +func AddTrustedFlags(fs *pflag.FlagSet, verify bool) { + trusted, message := setupTrustedFlag(verify) + fs.BoolVar(&untrusted, "disable-content-trust", !trusted, message) +} + +func setupTrustedFlag(verify bool) (bool, string) { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + message := "Skip image signing" + if verify { + message = "Skip image verification" + } + return trusted, message +} + +// IsTrusted returns true if content trust is enabled +func IsTrusted() bool { + return !untrusted +} diff --git a/vendor/github.com/moby/moby/cli/command/utils.go b/vendor/github.com/moby/moby/cli/command/utils.go new file mode 100644 index 0000000..1837ca4 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/utils.go @@ -0,0 +1,87 @@ +package command + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + fmt.Fprintf(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = NewInStream(os.Stdin) + } + + answer := "" + n, _ := fmt.Fscan(ins, &answer) + if n != 1 || (answer != "y" && answer != "Y") { + return false + } + + return true +} diff --git a/vendor/github.com/moby/moby/cli/command/volume/cmd.go b/vendor/github.com/moby/moby/cli/command/volume/cmd.go new file mode 100644 index 0000000..40862f2 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/volume/cmd.go @@ -0,0 +1,45 @@ +package volume + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewVolumeCommand returns a cobra command for `volume` subcommands +func NewVolumeCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "volume COMMAND", + Short: "Manage volumes", + Long: volumeDescription, + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} + +var volumeDescription = ` +The **docker volume** command has subcommands for managing data volumes. A data +volume is a specially-designated directory that by-passes storage driver +management. + +Data volumes persist data independent of a container's life cycle. When you +delete a container, the Docker daemon does not delete any data volumes. You can +share volumes across multiple containers. Moreover, you can share data volumes +with other computing resources in your system. + +To see help for a subcommand, use: + + docker volume COMMAND --help + +For full details on using docker volume visit Docker's online documentation. + +` diff --git a/vendor/github.com/moby/moby/cli/command/volume/create.go b/vendor/github.com/moby/moby/cli/command/volume/create.go new file mode 100644 index 0000000..7b2a7e3 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/volume/create.go @@ -0,0 +1,111 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] [VOLUME]", + Short: "Create a volume", + Long: createDescription, + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + if opts.name != "" { + fmt.Fprint(dockerCli.Err(), "Conflicting options: either specify --name or provide positional arg, not both\n") + return cli.StatusError{StatusCode: 1} + } + opts.name = args[0] + } + return runCreate(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") + flags.StringVar(&opts.name, "name", "", "Specify volume name") + flags.Lookup("name").Hidden = true + flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&opts.labels, "label", "Set metadata for a volume") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + volReq := volumetypes.VolumesCreateBody{ + Driver: opts.driver, + DriverOpts: opts.driverOpts.GetAll(), + Name: opts.name, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + } + + vol, err := client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) + return nil +} + +var createDescription = ` +Creates a new volume that containers can consume and store data in. If a name +is not specified, Docker generates a random name. You create a volume and then +configure the container to use it, for example: + + $ docker volume create hello + hello + $ docker run -d -v hello:/world busybox ls /world + +The mount is created inside the container's **/src** directory. Docker doesn't +not support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is +useful if two containers need access to shared data. For example, if one +container writes and the other reads the data. + +## Driver specific options + +Some volume drivers may take options to customize the volume creation. Use the +**-o** or **--opt** flags to pass driver options: + + $ docker volume create --driver fake --opt tardis=blue --opt timey=wimey + +These options are passed directly to the volume driver. Options for different +volume drivers may do different things (or nothing at all). + +The built-in **local** driver on Windows does not support any options. + +The built-in **local** driver on Linux accepts options similar to the linux +**mount** command: + + $ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 + +Another example: + + $ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 + +` diff --git a/vendor/github.com/moby/moby/cli/command/volume/inspect.go b/vendor/github.com/moby/moby/cli/command/volume/inspect.go new file mode 100644 index 0000000..5eb8ad2 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/volume/inspect.go @@ -0,0 +1,55 @@ +package volume + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] VOLUME [VOLUME...]", + Short: "Display detailed information on one or more volumes", + Long: inspectDescription, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getVolFunc := func(name string) (interface{}, []byte, error) { + i, err := client.VolumeInspect(ctx, name) + return i, nil, err + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) +} + +var inspectDescription = ` +Returns information about one or more volumes. By default, this command renders +all results in a JSON array. You can specify an alternate format to execute a +given template is executed for each result. Go's https://golang.org/pkg/text/template/ +package describes all the details of the format. + +` diff --git a/vendor/github.com/moby/moby/cli/command/volume/list.go b/vendor/github.com/moby/moby/cli/command/volume/list.go new file mode 100644 index 0000000..d76006a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/volume/list.go @@ -0,0 +1,91 @@ +package volume + +import ( + "sort" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List volumes", + Long: listDescription, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") + flags.StringVar(&opts.format, "format", "", "Pretty-print volumes using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + volumes, err := client.VolumeList(context.Background(), opts.filter.Value()) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().VolumesFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byVolumeName(volumes.Volumes)) + + volumeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewVolumeFormat(format, opts.quiet), + } + return formatter.VolumeWrite(volumeCtx, volumes.Volumes) +} + +var listDescription = ` + +Lists all the volumes Docker manages. You can filter using the **-f** or +**--filter** flag. The filtering format is a **key=value** pair. To specify +more than one filter, pass multiple flags (for example, +**--filter "foo=bar" --filter "bif=baz"**) + +The currently supported filters are: + +* **dangling** (boolean - **true** or **false**, **1** or **0**) +* **driver** (a volume driver's name) +* **label** (**label=** or **label==**) +* **name** (a volume's name) + +` diff --git a/vendor/github.com/moby/moby/cli/command/volume/prune.go b/vendor/github.com/moby/moby/cli/command/volume/prune.go new file mode 100644 index 0000000..405fbeb --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/volume/prune.go @@ -0,0 +1,75 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for volumes +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all volumes not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().VolumesPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.VolumesDeleted) > 0 { + output = "Deleted Volumes:\n" + for _, id := range report.VolumesDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Volume Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true}) +} diff --git a/vendor/github.com/moby/moby/cli/command/volume/remove.go b/vendor/github.com/moby/moby/cli/command/volume/remove.go new file mode 100644 index 0000000..f464bb3 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/command/volume/remove.go @@ -0,0 +1,68 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + + volumes []string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] VOLUME [VOLUME...]", + Aliases: []string{"remove"}, + Short: "Remove one or more volumes", + Long: removeDescription, + Example: removeExample, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.volumes = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of one or more volumes") + flags.SetAnnotation("force", "version", []string{"1.25"}) + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts *removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range opts.volumes { + if err := client.VolumeRemove(ctx, name, opts.force); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +var removeDescription = ` +Remove one or more volumes. You cannot remove a volume that is in use by a container. +` + +var removeExample = ` +$ docker volume rm hello +hello +` diff --git a/vendor/github.com/moby/moby/cli/compose/convert/compose.go b/vendor/github.com/moby/moby/cli/compose/convert/compose.go new file mode 100644 index 0000000..8122326 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/convert/compose.go @@ -0,0 +1,116 @@ +package convert + +import ( + "io/ioutil" + + "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" +) + +const ( + // LabelNamespace is the label used to track stack resources + LabelNamespace = "com.docker.stack.namespace" +) + +// Namespace mangles names by prepending the name +type Namespace struct { + name string +} + +// Scope prepends the namespace to a name +func (n Namespace) Scope(name string) string { + return n.name + "_" + name +} + +// Name returns the name of the namespace +func (n Namespace) Name() string { + return n.name +} + +// NewNamespace returns a new Namespace for scoping of names +func NewNamespace(name string) Namespace { + return Namespace{name: name} +} + +// AddStackLabel returns labels with the namespace label added +func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + labels[LabelNamespace] = namespace.name + return labels +} + +type networkMap map[string]composetypes.NetworkConfig + +// Networks converts networks from the compose-file type to the engine API type +func Networks( + namespace Namespace, + networks networkMap, + servicesNetworks map[string]struct{}, +) (map[string]types.NetworkCreate, []string) { + if networks == nil { + networks = make(map[string]composetypes.NetworkConfig) + } + + externalNetworks := []string{} + result := make(map[string]types.NetworkCreate) + + for internalName := range servicesNetworks { + network := networks[internalName] + if network.External.External { + externalNetworks = append(externalNetworks, network.External.Name) + continue + } + + createOpts := types.NetworkCreate{ + Labels: AddStackLabel(namespace, network.Labels), + Driver: network.Driver, + Options: network.DriverOpts, + Internal: network.Internal, + } + + if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 { + createOpts.IPAM = &networktypes.IPAM{} + } + + if network.Ipam.Driver != "" { + createOpts.IPAM.Driver = network.Ipam.Driver + } + for _, ipamConfig := range network.Ipam.Config { + config := networktypes.IPAMConfig{ + Subnet: ipamConfig.Subnet, + } + createOpts.IPAM.Config = append(createOpts.IPAM.Config, config) + } + result[internalName] = createOpts + } + + return result, externalNetworks +} + +// Secrets converts secrets from the Compose type to the engine API type +func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) { + result := []swarm.SecretSpec{} + for name, secret := range secrets { + if secret.External.External { + continue + } + + data, err := ioutil.ReadFile(secret.File) + if err != nil { + return nil, err + } + + result = append(result, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: namespace.Scope(name), + Labels: AddStackLabel(namespace, secret.Labels), + }, + Data: data, + }) + } + return result, nil +} diff --git a/vendor/github.com/moby/moby/cli/compose/convert/compose_test.go b/vendor/github.com/moby/moby/cli/compose/convert/compose_test.go new file mode 100644 index 0000000..f333d73 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/convert/compose_test.go @@ -0,0 +1,122 @@ +package convert + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" +) + +func TestNamespaceScope(t *testing.T) { + scoped := Namespace{name: "foo"}.Scope("bar") + assert.Equal(t, scoped, "foo_bar") +} + +func TestAddStackLabel(t *testing.T) { + labels := map[string]string{ + "something": "labeled", + } + actual := AddStackLabel(Namespace{name: "foo"}, labels) + expected := map[string]string{ + "something": "labeled", + LabelNamespace: "foo", + } + assert.DeepEqual(t, actual, expected) +} + +func TestNetworks(t *testing.T) { + namespace := Namespace{name: "foo"} + source := networkMap{ + "normal": composetypes.NetworkConfig{ + Driver: "overlay", + DriverOpts: map[string]string{ + "opt": "value", + }, + Ipam: composetypes.IPAMConfig{ + Driver: "driver", + Config: []*composetypes.IPAMPool{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + "outside": composetypes.NetworkConfig{ + External: composetypes.External{ + External: true, + Name: "special", + }, + }, + } + expected := map[string]types.NetworkCreate{ + "default": { + Labels: map[string]string{ + LabelNamespace: "foo", + }, + }, + "normal": { + Driver: "overlay", + IPAM: &network.IPAM{ + Driver: "driver", + Config: []network.IPAMConfig{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Options: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + }, + } + + serviceNetworks := map[string]struct{}{ + "default": {}, + "normal": {}, + "outside": {}, + } + networks, externals := Networks(namespace, source, serviceNetworks) + assert.DeepEqual(t, networks, expected) + assert.DeepEqual(t, externals, []string{"special"}) +} + +func TestSecrets(t *testing.T) { + namespace := Namespace{name: "foo"} + + secretText := "this is the first secret" + secretFile := tempfile.NewTempFile(t, "convert-secrets", secretText) + defer secretFile.Remove() + + source := map[string]composetypes.SecretConfig{ + "one": { + File: secretFile.Name(), + Labels: map[string]string{"monster": "mash"}, + }, + "ext": { + External: composetypes.External{ + External: true, + }, + }, + } + + specs, err := Secrets(namespace, source) + assert.NilError(t, err) + assert.Equal(t, len(specs), 1) + secret := specs[0] + assert.Equal(t, secret.Name, "foo_one") + assert.DeepEqual(t, secret.Labels, map[string]string{ + "monster": "mash", + LabelNamespace: "foo", + }) + assert.DeepEqual(t, secret.Data, []byte(secretText)) +} diff --git a/vendor/github.com/moby/moby/cli/compose/convert/service.go b/vendor/github.com/moby/moby/cli/compose/convert/service.go new file mode 100644 index 0000000..875a3b8 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/convert/service.go @@ -0,0 +1,423 @@ +package convert + +import ( + "fmt" + "os" + "sort" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + servicecli "github.com/docker/docker/cli/command/service" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +const defaultNetwork = "default" + +// Services from compose-file types to engine API types +// TODO: fix secrets API so that SecretAPIClient is not required here +func Services( + namespace Namespace, + config *composetypes.Config, + client client.SecretAPIClient, +) (map[string]swarm.ServiceSpec, error) { + result := make(map[string]swarm.ServiceSpec) + + services := config.Services + volumes := config.Volumes + networks := config.Networks + + for _, service := range services { + + secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets) + if err != nil { + return nil, err + } + serviceSpec, err := convertService(namespace, service, networks, volumes, secrets) + if err != nil { + return nil, err + } + result[service.Name] = serviceSpec + } + + return result, nil +} + +func convertService( + namespace Namespace, + service composetypes.ServiceConfig, + networkConfigs map[string]composetypes.NetworkConfig, + volumes map[string]composetypes.VolumeConfig, + secrets []*swarm.SecretReference, +) (swarm.ServiceSpec, error) { + name := namespace.Scope(service.Name) + + endpoint, err := convertEndpointSpec(service.Ports) + if err != nil { + return swarm.ServiceSpec{}, err + } + + mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas) + if err != nil { + return swarm.ServiceSpec{}, err + } + + mounts, err := Volumes(service.Volumes, volumes, namespace) + if err != nil { + // TODO: better error message (include service name) + return swarm.ServiceSpec{}, err + } + + resources, err := convertResources(service.Deploy.Resources) + if err != nil { + return swarm.ServiceSpec{}, err + } + + restartPolicy, err := convertRestartPolicy( + service.Restart, service.Deploy.RestartPolicy) + if err != nil { + return swarm.ServiceSpec{}, err + } + + healthcheck, err := convertHealthcheck(service.HealthCheck) + if err != nil { + return swarm.ServiceSpec{}, err + } + + networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name) + if err != nil { + return swarm.ServiceSpec{}, err + } + + var logDriver *swarm.Driver + if service.Logging != nil { + logDriver = &swarm.Driver{ + Name: service.Logging.Driver, + Options: service.Logging.Options, + } + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: AddStackLabel(namespace, service.Deploy.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: service.Image, + Command: service.Entrypoint, + Args: service.Command, + Hostname: service.Hostname, + Hosts: sortStrings(convertExtraHosts(service.ExtraHosts)), + Healthcheck: healthcheck, + Env: sortStrings(convertEnvironment(service.Environment)), + Labels: AddStackLabel(namespace, service.Labels), + Dir: service.WorkingDir, + User: service.User, + Mounts: mounts, + StopGracePeriod: service.StopGracePeriod, + TTY: service.Tty, + OpenStdin: service.StdinOpen, + Secrets: secrets, + }, + LogDriver: logDriver, + Resources: resources, + RestartPolicy: restartPolicy, + Placement: &swarm.Placement{ + Constraints: service.Deploy.Placement.Constraints, + }, + }, + EndpointSpec: endpoint, + Mode: mode, + Networks: networks, + UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig), + } + + return serviceSpec, nil +} + +func sortStrings(strs []string) []string { + sort.Strings(strs) + return strs +} + +type byNetworkTarget []swarm.NetworkAttachmentConfig + +func (a byNetworkTarget) Len() int { return len(a) } +func (a byNetworkTarget) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byNetworkTarget) Less(i, j int) bool { return a[i].Target < a[j].Target } + +func convertServiceNetworks( + networks map[string]*composetypes.ServiceNetworkConfig, + networkConfigs networkMap, + namespace Namespace, + name string, +) ([]swarm.NetworkAttachmentConfig, error) { + if len(networks) == 0 { + networks = map[string]*composetypes.ServiceNetworkConfig{ + defaultNetwork: {}, + } + } + + nets := []swarm.NetworkAttachmentConfig{} + for networkName, network := range networks { + networkConfig, ok := networkConfigs[networkName] + if !ok && networkName != defaultNetwork { + return []swarm.NetworkAttachmentConfig{}, fmt.Errorf( + "service %q references network %q, which is not declared", name, networkName) + } + var aliases []string + if network != nil { + aliases = network.Aliases + } + target := namespace.Scope(networkName) + if networkConfig.External.External { + target = networkConfig.External.Name + } + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: target, + Aliases: append(aliases, name), + }) + } + + sort.Sort(byNetworkTarget(nets)) + + return nets, nil +} + +// TODO: fix secrets API so that SecretAPIClient is not required here +func convertServiceSecrets( + client client.SecretAPIClient, + namespace Namespace, + secrets []composetypes.ServiceSecretConfig, + secretSpecs map[string]composetypes.SecretConfig, +) ([]*swarm.SecretReference, error) { + opts := []*types.SecretRequestOption{} + for _, secret := range secrets { + target := secret.Target + if target == "" { + target = secret.Source + } + + source := namespace.Scope(secret.Source) + secretSpec := secretSpecs[secret.Source] + if secretSpec.External.External { + source = secretSpec.External.Name + } + + uid := secret.UID + gid := secret.GID + if uid == "" { + uid = "0" + } + if gid == "" { + gid = "0" + } + mode := secret.Mode + if mode == nil { + mode = uint32Ptr(0444) + } + + opts = append(opts, &types.SecretRequestOption{ + Source: source, + Target: target, + UID: uid, + GID: gid, + Mode: os.FileMode(*mode), + }) + } + + return servicecli.ParseSecrets(client, opts) +} + +func uint32Ptr(value uint32) *uint32 { + return &value +} + +func convertExtraHosts(extraHosts map[string]string) []string { + hosts := []string{} + for host, ip := range extraHosts { + hosts = append(hosts, fmt.Sprintf("%s %s", ip, host)) + } + return hosts +} + +func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) { + if healthcheck == nil { + return nil, nil + } + var ( + err error + timeout, interval time.Duration + retries int + ) + if healthcheck.Disable { + if len(healthcheck.Test) != 0 { + return nil, fmt.Errorf("test and disable can't be set at the same time") + } + return &container.HealthConfig{ + Test: []string{"NONE"}, + }, nil + + } + if healthcheck.Timeout != "" { + timeout, err = time.ParseDuration(healthcheck.Timeout) + if err != nil { + return nil, err + } + } + if healthcheck.Interval != "" { + interval, err = time.ParseDuration(healthcheck.Interval) + if err != nil { + return nil, err + } + } + if healthcheck.Retries != nil { + retries = int(*healthcheck.Retries) + } + return &container.HealthConfig{ + Test: healthcheck.Test, + Timeout: timeout, + Interval: interval, + Retries: retries, + }, nil +} + +func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) { + // TODO: log if restart is being ignored + if source == nil { + policy, err := runconfigopts.ParseRestartPolicy(restart) + if err != nil { + return nil, err + } + switch { + case policy.IsNone(): + return nil, nil + case policy.IsAlways(), policy.IsUnlessStopped(): + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + }, nil + case policy.IsOnFailure(): + attempts := uint64(policy.MaximumRetryCount) + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + }, nil + default: + return nil, fmt.Errorf("unknown restart policy: %s", restart) + } + } + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyCondition(source.Condition), + Delay: source.Delay, + MaxAttempts: source.MaxAttempts, + Window: source.Window, + }, nil +} + +func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig { + if source == nil { + return nil + } + parallel := uint64(1) + if source.Parallelism != nil { + parallel = *source.Parallelism + } + return &swarm.UpdateConfig{ + Parallelism: parallel, + Delay: source.Delay, + FailureAction: source.FailureAction, + Monitor: source.Monitor, + MaxFailureRatio: source.MaxFailureRatio, + } +} + +func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) { + resources := &swarm.ResourceRequirements{} + var err error + if source.Limits != nil { + var cpus int64 + if source.Limits.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs) + if err != nil { + return nil, err + } + } + resources.Limits = &swarm.Resources{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Limits.MemoryBytes), + } + } + if source.Reservations != nil { + var cpus int64 + if source.Reservations.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs) + if err != nil { + return nil, err + } + } + resources.Reservations = &swarm.Resources{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Reservations.MemoryBytes), + } + } + return resources, nil + +} + +type byPublishedPort []swarm.PortConfig + +func (a byPublishedPort) Len() int { return len(a) } +func (a byPublishedPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPublishedPort) Less(i, j int) bool { return a[i].PublishedPort < a[j].PublishedPort } + +func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) { + portConfigs := []swarm.PortConfig{} + ports, portBindings, err := nat.ParsePortSpecs(source) + if err != nil { + return nil, err + } + + for port := range ports { + portConfigs = append( + portConfigs, + opts.ConvertPortToPortConfig(port, portBindings)...) + } + + // Sorting to make sure these are always in the same order + sort.Sort(byPublishedPort(portConfigs)) + + return &swarm.EndpointSpec{Ports: portConfigs}, nil +} + +func convertEnvironment(source map[string]string) []string { + var output []string + + for name, value := range source { + output = append(output, fmt.Sprintf("%s=%s", name, value)) + } + + return output +} + +func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) { + serviceMode := swarm.ServiceMode{} + + switch mode { + case "global": + if replicas != nil { + return serviceMode, fmt.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Global = &swarm.GlobalService{} + case "replicated", "": + serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas} + default: + return serviceMode, fmt.Errorf("Unknown mode: %s", mode) + } + return serviceMode, nil +} diff --git a/vendor/github.com/moby/moby/cli/compose/convert/service_test.go b/vendor/github.com/moby/moby/cli/compose/convert/service_test.go new file mode 100644 index 0000000..8153b65 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/convert/service_test.go @@ -0,0 +1,240 @@ +package convert + +import ( + "sort" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestConvertRestartPolicyFromNone(t *testing.T) { + policy, err := convertRestartPolicy("no", nil) + assert.NilError(t, err) + assert.Equal(t, policy, (*swarm.RestartPolicy)(nil)) +} + +func TestConvertRestartPolicyFromUnknown(t *testing.T) { + _, err := convertRestartPolicy("unknown", nil) + assert.Error(t, err, "unknown restart policy: unknown") +} + +func TestConvertRestartPolicyFromAlways(t *testing.T) { + policy, err := convertRestartPolicy("always", nil) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + } + assert.NilError(t, err) + assert.DeepEqual(t, policy, expected) +} + +func TestConvertRestartPolicyFromFailure(t *testing.T) { + policy, err := convertRestartPolicy("on-failure:4", nil) + attempts := uint64(4) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + } + assert.NilError(t, err) + assert.DeepEqual(t, policy, expected) +} + +func TestConvertEnvironment(t *testing.T) { + source := map[string]string{ + "foo": "bar", + "key": "value", + } + env := convertEnvironment(source) + sort.Strings(env) + assert.DeepEqual(t, env, []string{"foo=bar", "key=value"}) +} + +func TestConvertResourcesFull(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.Resource{ + NanoCPUs: "0.003", + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + NanoCPUs: "0.002", + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: 3000000, + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + NanoCPUs: 2000000, + MemoryBytes: 200000000, + }, + } + assert.DeepEqual(t, resources, expected) +} + +func TestConvertResourcesOnlyMemory(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.Resource{ + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + MemoryBytes: 200000000, + }, + } + assert.DeepEqual(t, resources, expected) +} + +func TestConvertHealthcheck(t *testing.T) { + retries := uint64(10) + source := &composetypes.HealthCheckConfig{ + Test: []string{"EXEC", "touch", "/foo"}, + Timeout: "30s", + Interval: "2ms", + Retries: &retries, + } + expected := &container.HealthConfig{ + Test: source.Test, + Timeout: 30 * time.Second, + Interval: 2 * time.Millisecond, + Retries: 10, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.DeepEqual(t, healthcheck, expected) +} + +func TestConvertHealthcheckDisable(t *testing.T) { + source := &composetypes.HealthCheckConfig{Disable: true} + expected := &container.HealthConfig{ + Test: []string{"NONE"}, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.DeepEqual(t, healthcheck, expected) +} + +func TestConvertHealthcheckDisableWithTest(t *testing.T) { + source := &composetypes.HealthCheckConfig{ + Disable: true, + Test: []string{"EXEC", "touch"}, + } + _, err := convertHealthcheck(source) + assert.Error(t, err, "test and disable can't be set") +} + +func TestConvertServiceNetworksOnlyDefault(t *testing.T) { + networkConfigs := networkMap{} + + configs, err := convertServiceNetworks( + nil, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_default", + Aliases: []string{"service"}, + }, + } + + assert.NilError(t, err) + assert.DeepEqual(t, configs, expected) +} + +func TestConvertServiceNetworks(t *testing.T) { + networkConfigs := networkMap{ + "front": composetypes.NetworkConfig{ + External: composetypes.External{ + External: true, + Name: "fronttier", + }, + }, + "back": composetypes.NetworkConfig{}, + } + networks := map[string]*composetypes.ServiceNetworkConfig{ + "front": { + Aliases: []string{"something"}, + }, + "back": { + Aliases: []string{"other"}, + }, + } + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_back", + Aliases: []string{"other", "service"}, + }, + { + Target: "fronttier", + Aliases: []string{"something", "service"}, + }, + } + + sortedConfigs := byTargetSort(configs) + sort.Sort(&sortedConfigs) + + assert.NilError(t, err) + assert.DeepEqual(t, []swarm.NetworkAttachmentConfig(sortedConfigs), expected) +} + +func TestConvertServiceNetworksCustomDefault(t *testing.T) { + networkConfigs := networkMap{ + "default": composetypes.NetworkConfig{ + External: composetypes.External{ + External: true, + Name: "custom", + }, + }, + } + networks := map[string]*composetypes.ServiceNetworkConfig{} + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "custom", + Aliases: []string{"service"}, + }, + } + + assert.NilError(t, err) + assert.DeepEqual(t, []swarm.NetworkAttachmentConfig(configs), expected) +} + +type byTargetSort []swarm.NetworkAttachmentConfig + +func (s byTargetSort) Len() int { + return len(s) +} + +func (s byTargetSort) Less(i, j int) bool { + return strings.Compare(s[i].Target, s[j].Target) < 0 +} + +func (s byTargetSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/moby/moby/cli/compose/convert/volume.go b/vendor/github.com/moby/moby/cli/compose/convert/volume.go new file mode 100644 index 0000000..24442d4 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/convert/volume.go @@ -0,0 +1,128 @@ +package convert + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" +) + +type volumes map[string]composetypes.VolumeConfig + +// Volumes from compose-file types to engine api types +func Volumes(serviceVolumes []string, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) { + var mounts []mount.Mount + + for _, volumeSpec := range serviceVolumes { + mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +func convertVolumeToMount(volumeSpec string, stackVolumes volumes, namespace Namespace) (mount.Mount, error) { + var source, target string + var mode []string + + // TODO: split Windows path mappings properly + parts := strings.SplitN(volumeSpec, ":", 3) + + for _, part := range parts { + if strings.TrimSpace(part) == "" { + return mount.Mount{}, fmt.Errorf("invalid volume: %s", volumeSpec) + } + } + + switch len(parts) { + case 3: + source = parts[0] + target = parts[1] + mode = strings.Split(parts[2], ",") + case 2: + source = parts[0] + target = parts[1] + case 1: + target = parts[0] + } + + if source == "" { + // Anonymous volume + return mount.Mount{ + Type: mount.TypeVolume, + Target: target, + }, nil + } + + // TODO: catch Windows paths here + if strings.HasPrefix(source, "/") { + return mount.Mount{ + Type: mount.TypeBind, + Source: source, + Target: target, + ReadOnly: isReadOnly(mode), + BindOptions: getBindOptions(mode), + }, nil + } + + stackVolume, exists := stackVolumes[source] + if !exists { + return mount.Mount{}, fmt.Errorf("undefined volume: %s", source) + } + + var volumeOptions *mount.VolumeOptions + if stackVolume.External.Name != "" { + source = stackVolume.External.Name + } else { + volumeOptions = &mount.VolumeOptions{ + Labels: AddStackLabel(namespace, stackVolume.Labels), + NoCopy: isNoCopy(mode), + } + + if stackVolume.Driver != "" { + volumeOptions.DriverConfig = &mount.Driver{ + Name: stackVolume.Driver, + Options: stackVolume.DriverOpts, + } + } + source = namespace.Scope(source) + } + return mount.Mount{ + Type: mount.TypeVolume, + Source: source, + Target: target, + ReadOnly: isReadOnly(mode), + VolumeOptions: volumeOptions, + }, nil +} + +func modeHas(mode []string, field string) bool { + for _, item := range mode { + if item == field { + return true + } + } + return false +} + +func isReadOnly(mode []string) bool { + return modeHas(mode, "ro") +} + +func isNoCopy(mode []string) bool { + return modeHas(mode, "nocopy") +} + +func getBindOptions(mode []string) *mount.BindOptions { + for _, item := range mode { + for _, propagation := range mount.Propagations { + if mount.Propagation(item) == propagation { + return &mount.BindOptions{Propagation: mount.Propagation(item)} + } + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/cli/compose/convert/volume_test.go b/vendor/github.com/moby/moby/cli/compose/convert/volume_test.go new file mode 100644 index 0000000..113ab1e --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/convert/volume_test.go @@ -0,0 +1,133 @@ +package convert + +import ( + "testing" + + "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestIsReadOnly(t *testing.T) { + assert.Equal(t, isReadOnly([]string{"foo", "bar", "ro"}), true) + assert.Equal(t, isReadOnly([]string{"ro"}), true) + assert.Equal(t, isReadOnly([]string{}), false) + assert.Equal(t, isReadOnly([]string{"foo", "rw"}), false) + assert.Equal(t, isReadOnly([]string{"foo"}), false) +} + +func TestIsNoCopy(t *testing.T) { + assert.Equal(t, isNoCopy([]string{"foo", "bar", "nocopy"}), true) + assert.Equal(t, isNoCopy([]string{"nocopy"}), true) + assert.Equal(t, isNoCopy([]string{}), false) + assert.Equal(t, isNoCopy([]string{"foo", "rw"}), false) +} + +func TestGetBindOptions(t *testing.T) { + opts := getBindOptions([]string{"slave"}) + expected := mount.BindOptions{Propagation: mount.PropagationSlave} + assert.Equal(t, *opts, expected) +} + +func TestGetBindOptionsNone(t *testing.T) { + opts := getBindOptions([]string{"ro"}) + assert.Equal(t, opts, (*mount.BindOptions)(nil)) +} + +func TestConvertVolumeToMountNamedVolume(t *testing.T) { + stackVolumes := volumes{ + "normal": composetypes.VolumeConfig{ + Driver: "glusterfs", + DriverOpts: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "foo_normal", + Target: "/foo", + ReadOnly: true, + VolumeOptions: &mount.VolumeOptions{ + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + DriverConfig: &mount.Driver{ + Name: "glusterfs", + Options: map[string]string{ + "opt": "value", + }, + }, + }, + } + mount, err := convertVolumeToMount("normal:/foo:ro", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) { + stackVolumes := volumes{ + "outside": composetypes.VolumeConfig{ + External: composetypes.External{ + External: true, + Name: "special", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "special", + Target: "/foo", + } + mount, err := convertVolumeToMount("outside:/foo", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountBind(t *testing.T) { + stackVolumes := volumes{} + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeBind, + Source: "/bar", + Target: "/foo", + ReadOnly: true, + BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared}, + } + mount, err := convertVolumeToMount("/bar:/foo:ro,shared", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) { + namespace := NewNamespace("foo") + _, err := convertVolumeToMount("unknown:/foo:ro", volumes{}, namespace) + assert.Error(t, err, "undefined volume: unknown") +} + +func TestConvertVolumeToMountAnonymousVolume(t *testing.T) { + stackVolumes := map[string]composetypes.VolumeConfig{} + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Target: "/foo/bar", + } + mnt, err := convertVolumeToMount("/foo/bar", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mnt, expected) +} + +func TestConvertVolumeToMountInvalidFormat(t *testing.T) { + namespace := NewNamespace("foo") + invalids := []string{"::", "::cc", ":bb:", "aa::", "aa::cc", "aa:bb:", " : : ", " : :cc", " :bb: ", "aa: : ", "aa: :cc", "aa:bb: "} + for _, vol := range invalids { + _, err := convertVolumeToMount(vol, map[string]composetypes.VolumeConfig{}, namespace) + assert.Error(t, err, "invalid volume: "+vol) + } +} diff --git a/vendor/github.com/moby/moby/cli/compose/interpolation/interpolation.go b/vendor/github.com/moby/moby/cli/compose/interpolation/interpolation.go new file mode 100644 index 0000000..734f28e --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/interpolation/interpolation.go @@ -0,0 +1,90 @@ +package interpolation + +import ( + "fmt" + + "github.com/docker/docker/cli/compose/template" + "github.com/docker/docker/cli/compose/types" +) + +// Interpolate replaces variables in a string with the values from a mapping +func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) { + out := types.Dict{} + + for name, item := range config { + if item == nil { + out[name] = nil + continue + } + interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping) + if err != nil { + return nil, err + } + out[name] = interpolatedItem + } + + return out, nil +} + +func interpolateSectionItem( + name string, + item types.Dict, + section string, + mapping template.Mapping, +) (types.Dict, error) { + + out := types.Dict{} + + for key, value := range item { + interpolatedValue, err := recursiveInterpolate(value, mapping) + if err != nil { + return nil, fmt.Errorf( + "Invalid interpolation format for %#v option in %s %#v: %#v", + key, section, name, err.Template, + ) + } + out[key] = interpolatedValue + } + + return out, nil + +} + +func recursiveInterpolate( + value interface{}, + mapping template.Mapping, +) (interface{}, *template.InvalidTemplateError) { + + switch value := value.(type) { + + case string: + return template.Substitute(value, mapping) + + case types.Dict: + out := types.Dict{} + for key, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, mapping) + if err != nil { + return nil, err + } + out[key] = interpolatedElem + } + return out, nil + + case []interface{}: + out := make([]interface{}, len(value)) + for i, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, mapping) + if err != nil { + return nil, err + } + out[i] = interpolatedElem + } + return out, nil + + default: + return value, nil + + } + +} diff --git a/vendor/github.com/moby/moby/cli/compose/interpolation/interpolation_test.go b/vendor/github.com/moby/moby/cli/compose/interpolation/interpolation_test.go new file mode 100644 index 0000000..c392170 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/interpolation/interpolation_test.go @@ -0,0 +1,59 @@ +package interpolation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/cli/compose/types" +) + +var defaults = map[string]string{ + "USER": "jenny", + "FOO": "bar", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestInterpolate(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "example:${USER}", + "volumes": []interface{}{"$FOO:/target"}, + "logging": types.Dict{ + "driver": "${FOO}", + "options": types.Dict{ + "user": "$USER", + }, + }, + }, + } + expected := types.Dict{ + "servicea": types.Dict{ + "image": "example:jenny", + "volumes": []interface{}{"bar:/target"}, + "logging": types.Dict{ + "driver": "bar", + "options": types.Dict{ + "user": "jenny", + }, + }, + }, + } + result, err := Interpolate(services, "service", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, expected, result) +} + +func TestInvalidInterpolation(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "${", + }, + } + _, err := Interpolate(services, "service", defaultMapping) + assert.EqualError(t, err, `Invalid interpolation format for "image" option in service "servicea": "${"`) +} diff --git a/vendor/github.com/moby/moby/cli/compose/loader/example1.env b/vendor/github.com/moby/moby/cli/compose/loader/example1.env new file mode 100644 index 0000000..3e7a059 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/loader/example1.env @@ -0,0 +1,8 @@ +# passed through +FOO=1 + +# overridden in example2.env +BAR=1 + +# overridden in full-example.yml +BAZ=1 diff --git a/vendor/github.com/moby/moby/cli/compose/loader/example2.env b/vendor/github.com/moby/moby/cli/compose/loader/example2.env new file mode 100644 index 0000000..0920d5a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/loader/example2.env @@ -0,0 +1 @@ +BAR=2 diff --git a/vendor/github.com/moby/moby/cli/compose/loader/full-example.yml b/vendor/github.com/moby/moby/cli/compose/loader/full-example.yml new file mode 100644 index 0000000..fb5686a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/loader/full-example.yml @@ -0,0 +1,287 @@ +version: "3" + +services: + foo: + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + + cgroup_parent: m-executor-abcd + + # String or list + command: bundle exec thin -p 3000 + # command: ["bundle", "exec", "thin", "-p", "3000"] + + container_name: my-web-container + + depends_on: + - db + - redis + + deploy: + mode: replicated + replicas: 6 + labels: [FOO=BAR] + update_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + resources: + limits: + cpus: '0.001' + memory: 50M + reservations: + cpus: '0.0001' + memory: 20M + restart_policy: + condition: on_failure + delay: 5s + max_attempts: 3 + window: 120s + placement: + constraints: [node=foo] + + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + + # String or list + # dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + + # String or list + # dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + + domainname: foo.com + + # String or list + # entrypoint: /code/entrypoint.sh -p 3000 + entrypoint: ["/code/entrypoint.sh", "-p", "3000"] + + # String or list + # env_file: .env + env_file: + - ./example1.env + - ./example2.env + + # Mapping or list + # Mapping values can be strings, numbers or null + # Booleans are not allowed - must be quoted + environment: + RACK_ENV: development + SHOW: 'true' + SESSION_SECRET: + BAZ: 3 + # environment: + # - RACK_ENV=development + # - SHOW=true + # - SESSION_SECRET + + # Items can be strings or numbers + expose: + - "3000" + - 8000 + + external_links: + - redis_1 + - project_db_1:mysql + - project_db_1:postgresql + + # Mapping or list + # Mapping values must be strings + # extra_hosts: + # somehost: "162.242.195.82" + # otherhost: "50.31.209.229" + extra_hosts: + - "somehost:162.242.195.82" + - "otherhost:50.31.209.229" + + hostname: foo + + healthcheck: + test: echo "hello world" + interval: 10s + timeout: 1s + retries: 5 + + # Any valid image reference - repo, tag, id, sha + image: redis + # image: ubuntu:14.04 + # image: tutum/influxdb + # image: example-registry.com:4000/postgresql + # image: a4bc65fd + # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + + ipc: host + + # Mapping or list + # Mapping values can be strings, numbers or null + labels: + com.example.description: "Accounting webapp" + com.example.number: 42 + com.example.empty-label: + # labels: + # - "com.example.description=Accounting webapp" + # - "com.example.number=42" + # - "com.example.empty-label" + + links: + - db + - db:database + - redis + + logging: + driver: syslog + options: + syslog-address: "tcp://192.168.0.42:123" + + mac_address: 02:42:ac:11:65:43 + + # network_mode: "bridge" + # network_mode: "host" + # network_mode: "none" + # Use the network mode of an arbitrary container from another service + # network_mode: "service:db" + # Use the network mode of another container, specified by name or id + # network_mode: "container:some-container" + network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" + + networks: + some-network: + aliases: + - alias1 + - alias3 + other-network: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + other-other-network: + + pid: "host" + + ports: + - 3000 + - "3000-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" + + privileged: true + + read_only: true + + restart: always + + security_opt: + - label=level:s0:c100,c200 + - label=type:svirt_apache_t + + stdin_open: true + + stop_grace_period: 20s + + stop_signal: SIGUSR1 + + # String or list + # tmpfs: /run + tmpfs: + - /run + - /tmp + + tty: true + + ulimits: + # Single number or mapping with soft + hard limits + nproc: 65535 + nofile: + soft: 20000 + hard: 40000 + + user: someone + + volumes: + # Just specify a path and let the Engine create a volume + - /var/lib/mysql + # Specify an absolute path mapping + - /opt/data:/var/lib/mysql + # Path on the host, relative to the Compose file + - .:/code + - ./static:/var/www/html + # User-relative path + - ~/configs:/etc/configs/:ro + # Named volume + - datavolume:/var/lib/mysql + + working_dir: /code + +networks: + # Entries can be null, which specifies simply that a network + # called "{project name}_some-network" should be created and + # use the default driver + some-network: + + other-network: + driver: overlay + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + ipam: + driver: overlay + # driver_opts: + # # Values can be strings or numbers + # com.docker.network.enable_ipv6: "true" + # com.docker.network.numeric_value: 1 + config: + - subnet: 172.16.238.0/24 + # gateway: 172.16.238.1 + - subnet: 2001:3984:3989::/64 + # gateway: 2001:3984:3989::1 + + external-network: + # Specifies that a pre-existing network called "external-network" + # can be referred to within this file as "external-network" + external: true + + other-external-network: + # Specifies that a pre-existing network called "my-cool-network" + # can be referred to within this file as "other-external-network" + external: + name: my-cool-network + +volumes: + # Entries can be null, which specifies simply that a volume + # called "{project name}_some-volume" should be created and + # use the default driver + some-volume: + + other-volume: + driver: flocker + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + external-volume: + # Specifies that a pre-existing volume called "external-volume" + # can be referred to within this file as "external-volume" + external: true + + other-external-volume: + # Specifies that a pre-existing volume called "my-cool-volume" + # can be referred to within this file as "other-external-volume" + external: + name: my-cool-volume diff --git a/vendor/github.com/moby/moby/cli/compose/loader/loader.go b/vendor/github.com/moby/moby/cli/compose/loader/loader.go new file mode 100644 index 0000000..39f69a0 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/loader/loader.go @@ -0,0 +1,653 @@ +package loader + +import ( + "fmt" + "os" + "path" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/cli/compose/interpolation" + "github.com/docker/docker/cli/compose/schema" + "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + shellwords "github.com/mattn/go-shellwords" + "github.com/mitchellh/mapstructure" + yaml "gopkg.in/yaml.v2" +) + +var ( + fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+") +) + +// ParseYAML reads the bytes from a file, parses the bytes into a mapping +// structure, and returns it. +func ParseYAML(source []byte) (types.Dict, error) { + var cfg interface{} + if err := yaml.Unmarshal(source, &cfg); err != nil { + return nil, err + } + cfgMap, ok := cfg.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf("Top-level object must be a mapping") + } + converted, err := convertToStringKeysRecursive(cfgMap, "") + if err != nil { + return nil, err + } + return converted.(types.Dict), nil +} + +// Load reads a ConfigDetails and returns a fully loaded configuration +func Load(configDetails types.ConfigDetails) (*types.Config, error) { + if len(configDetails.ConfigFiles) < 1 { + return nil, fmt.Errorf("No files specified") + } + if len(configDetails.ConfigFiles) > 1 { + return nil, fmt.Errorf("Multiple files are not yet supported") + } + + configDict := getConfigDict(configDetails) + + if services, ok := configDict["services"]; ok { + if servicesDict, ok := services.(types.Dict); ok { + forbidden := getProperties(servicesDict, types.ForbiddenProperties) + + if len(forbidden) > 0 { + return nil, &ForbiddenPropertiesError{Properties: forbidden} + } + } + } + + if err := schema.Validate(configDict, schema.Version(configDict)); err != nil { + return nil, err + } + + cfg := types.Config{} + if services, ok := configDict["services"]; ok { + servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv) + if err != nil { + return nil, err + } + + servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir) + if err != nil { + return nil, err + } + + cfg.Services = servicesList + } + + if networks, ok := configDict["networks"]; ok { + networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv) + if err != nil { + return nil, err + } + + networksMapping, err := loadNetworks(networksConfig) + if err != nil { + return nil, err + } + + cfg.Networks = networksMapping + } + + if volumes, ok := configDict["volumes"]; ok { + volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv) + if err != nil { + return nil, err + } + + volumesMapping, err := loadVolumes(volumesConfig) + if err != nil { + return nil, err + } + + cfg.Volumes = volumesMapping + } + + if secrets, ok := configDict["secrets"]; ok { + secretsConfig, err := interpolation.Interpolate(secrets.(types.Dict), "secret", os.LookupEnv) + if err != nil { + return nil, err + } + + secretsMapping, err := loadSecrets(secretsConfig, configDetails.WorkingDir) + if err != nil { + return nil, err + } + + cfg.Secrets = secretsMapping + } + + return &cfg, nil +} + +// GetUnsupportedProperties returns the list of any unsupported properties that are +// used in the Compose files. +func GetUnsupportedProperties(configDetails types.ConfigDetails) []string { + unsupported := map[string]bool{} + + for _, service := range getServices(getConfigDict(configDetails)) { + serviceDict := service.(types.Dict) + for _, property := range types.UnsupportedProperties { + if _, isSet := serviceDict[property]; isSet { + unsupported[property] = true + } + } + } + + return sortedKeys(unsupported) +} + +func sortedKeys(set map[string]bool) []string { + var keys []string + for key := range set { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// GetDeprecatedProperties returns the list of any deprecated properties that +// are used in the compose files. +func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string { + return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties) +} + +func getProperties(services types.Dict, propertyMap map[string]string) map[string]string { + output := map[string]string{} + + for _, service := range services { + if serviceDict, ok := service.(types.Dict); ok { + for property, description := range propertyMap { + if _, isSet := serviceDict[property]; isSet { + output[property] = description + } + } + } + } + + return output +} + +// ForbiddenPropertiesError is returned when there are properties in the Compose +// file that are forbidden. +type ForbiddenPropertiesError struct { + Properties map[string]string +} + +func (e *ForbiddenPropertiesError) Error() string { + return "Configuration contains forbidden properties" +} + +// TODO: resolve multiple files into a single config +func getConfigDict(configDetails types.ConfigDetails) types.Dict { + return configDetails.ConfigFiles[0].Config +} + +func getServices(configDict types.Dict) types.Dict { + if services, ok := configDict["services"]; ok { + if servicesDict, ok := services.(types.Dict); ok { + return servicesDict + } + } + + return types.Dict{} +} + +func transform(source map[string]interface{}, target interface{}) error { + data := mapstructure.Metadata{} + config := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + transformHook, + mapstructure.StringToTimeDurationHookFunc()), + Result: target, + Metadata: &data, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + err = decoder.Decode(source) + // TODO: log unused keys + return err +} + +func transformHook( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch target { + case reflect.TypeOf(types.External{}): + return transformExternal(data) + case reflect.TypeOf(make(map[string]string, 0)): + return transformMapStringString(source, target, data) + case reflect.TypeOf(types.UlimitsConfig{}): + return transformUlimits(data) + case reflect.TypeOf(types.UnitBytes(0)): + return loadSize(data) + case reflect.TypeOf(types.ServiceSecretConfig{}): + return transformServiceSecret(data) + } + switch target.Kind() { + case reflect.Struct: + return transformStruct(source, target, data) + } + return data, nil +} + +// keys needs to be converted to strings for jsonschema +// TODO: don't use types.Dict +func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { + if mapping, ok := value.(map[interface{}]interface{}); ok { + dict := make(types.Dict) + for key, entry := range mapping { + str, ok := key.(string) + if !ok { + var location string + if keyPrefix == "" { + location = "at top level" + } else { + location = fmt.Sprintf("in %s", keyPrefix) + } + return nil, fmt.Errorf("Non-string key %s: %#v", location, key) + } + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = str + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + dict[str] = convertedEntry + } + return dict, nil + } + if list, ok := value.([]interface{}); ok { + var convertedList []interface{} + for index, entry := range list { + newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + convertedList = append(convertedList, convertedEntry) + } + return convertedList, nil + } + return value, nil +} + +func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) { + var services []types.ServiceConfig + + for name, serviceDef := range servicesDict { + serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir) + if err != nil { + return nil, err + } + services = append(services, *serviceConfig) + } + + return services, nil +} + +func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) { + serviceConfig := &types.ServiceConfig{} + if err := transform(serviceDict, serviceConfig); err != nil { + return nil, err + } + serviceConfig.Name = name + + if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil { + return nil, err + } + + if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil { + return nil, err + } + + return serviceConfig, nil +} + +func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error { + environment := make(map[string]string) + + if envFileVal, ok := serviceDict["env_file"]; ok { + envFiles := loadStringOrListOfStrings(envFileVal) + + var envVars []string + + for _, file := range envFiles { + filePath := absPath(workingDir, file) + fileVars, err := opts.ParseEnvFile(filePath) + if err != nil { + return err + } + envVars = append(envVars, fileVars...) + } + + for k, v := range opts.ConvertKVStringsToMap(envVars) { + environment[k] = v + } + } + + for k, v := range serviceConfig.Environment { + environment[k] = v + } + + serviceConfig.Environment = environment + + return nil +} + +func resolveVolumePaths(volumes []string, workingDir string) error { + for i, mapping := range volumes { + parts := strings.SplitN(mapping, ":", 2) + if len(parts) == 1 { + continue + } + + if strings.HasPrefix(parts[0], ".") { + parts[0] = absPath(workingDir, parts[0]) + } + parts[0] = expandUser(parts[0]) + + volumes[i] = strings.Join(parts, ":") + } + + return nil +} + +// TODO: make this more robust +func expandUser(path string) string { + if strings.HasPrefix(path, "~") { + return strings.Replace(path, "~", os.Getenv("HOME"), 1) + } + return path +} + +func transformUlimits(data interface{}) (interface{}, error) { + switch value := data.(type) { + case int: + return types.UlimitsConfig{Single: value}, nil + case types.Dict: + ulimit := types.UlimitsConfig{} + ulimit.Soft = value["soft"].(int) + ulimit.Hard = value["hard"].(int) + return ulimit, nil + default: + return data, fmt.Errorf("invalid type %T for ulimits", value) + } +} + +func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) { + networks := make(map[string]types.NetworkConfig) + err := transform(source, &networks) + if err != nil { + return networks, err + } + for name, network := range networks { + if network.External.External && network.External.Name == "" { + network.External.Name = name + networks[name] = network + } + } + return networks, nil +} + +func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) { + volumes := make(map[string]types.VolumeConfig) + err := transform(source, &volumes) + if err != nil { + return volumes, err + } + for name, volume := range volumes { + if volume.External.External && volume.External.Name == "" { + volume.External.Name = name + volumes[name] = volume + } + } + return volumes, nil +} + +// TODO: remove duplicate with networks/volumes +func loadSecrets(source types.Dict, workingDir string) (map[string]types.SecretConfig, error) { + secrets := make(map[string]types.SecretConfig) + if err := transform(source, &secrets); err != nil { + return secrets, err + } + for name, secret := range secrets { + if secret.External.External && secret.External.Name == "" { + secret.External.Name = name + secrets[name] = secret + } + if secret.File != "" { + secret.File = absPath(workingDir, secret.File) + } + } + return secrets, nil +} + +func absPath(workingDir string, filepath string) string { + if path.IsAbs(filepath) { + return filepath + } + return path.Join(workingDir, filepath) +} + +func transformStruct( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + structValue, ok := data.(map[string]interface{}) + if !ok { + // FIXME: this is necessary because of convertToStringKeysRecursive + structValue, ok = data.(types.Dict) + if !ok { + panic(fmt.Sprintf( + "transformStruct called with non-map type: %T, %s", data, data)) + } + } + + var err error + for i := 0; i < target.NumField(); i++ { + field := target.Field(i) + fieldTag := field.Tag.Get("compose") + + yamlName := toYAMLName(field.Name) + value, ok := structValue[yamlName] + if !ok { + continue + } + + structValue[yamlName], err = convertField( + fieldTag, reflect.TypeOf(value), field.Type, value) + if err != nil { + return nil, fmt.Errorf("field %s: %s", yamlName, err.Error()) + } + } + return structValue, nil +} + +func transformMapStringString( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch value := data.(type) { + case map[string]interface{}: + return toMapStringString(value), nil + case types.Dict: + return toMapStringString(value), nil + case map[string]string: + return value, nil + default: + return data, fmt.Errorf("invalid type %T for map[string]string", value) + } +} + +func convertField( + fieldTag string, + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch fieldTag { + case "": + return data, nil + case "healthcheck": + return loadHealthcheck(data) + case "list_or_dict_equals": + return loadMappingOrList(data, "="), nil + case "list_or_dict_colon": + return loadMappingOrList(data, ":"), nil + case "list_or_struct_map": + return loadListOrStructMap(data, target) + case "string_or_list": + return loadStringOrListOfStrings(data), nil + case "list_of_strings_or_numbers": + return loadListOfStringsOrNumbers(data), nil + case "shell_command": + return loadShellCommand(data) + case "size": + return loadSize(data) + case "-": + return nil, nil + } + return data, nil +} + +func transformExternal(data interface{}) (interface{}, error) { + switch value := data.(type) { + case bool: + return map[string]interface{}{"external": value}, nil + case types.Dict: + return map[string]interface{}{"external": true, "name": value["name"]}, nil + case map[string]interface{}: + return map[string]interface{}{"external": true, "name": value["name"]}, nil + default: + return data, fmt.Errorf("invalid type %T for external", value) + } +} + +func transformServiceSecret(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return map[string]interface{}{"source": value}, nil + case types.Dict: + return data, nil + case map[string]interface{}: + return data, nil + default: + return data, fmt.Errorf("invalid type %T for external", value) + } + +} + +func toYAMLName(name string) string { + nameParts := fieldNameRegexp.FindAllString(name, -1) + for i, p := range nameParts { + nameParts[i] = strings.ToLower(p) + } + return strings.Join(nameParts, "_") +} + +func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) { + if list, ok := value.([]interface{}); ok { + mapValue := map[interface{}]interface{}{} + for _, name := range list { + mapValue[name] = nil + } + return mapValue, nil + } + + return value, nil +} + +func loadListOfStringsOrNumbers(value interface{}) []string { + list := value.([]interface{}) + result := make([]string, len(list)) + for i, item := range list { + result[i] = fmt.Sprint(item) + } + return result +} + +func loadStringOrListOfStrings(value interface{}) []string { + if list, ok := value.([]interface{}); ok { + result := make([]string, len(list)) + for i, item := range list { + result[i] = fmt.Sprint(item) + } + return result + } + return []string{value.(string)} +} + +func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string { + if mapping, ok := mappingOrList.(types.Dict); ok { + return toMapStringString(mapping) + } + if list, ok := mappingOrList.([]interface{}); ok { + result := make(map[string]string) + for _, value := range list { + parts := strings.SplitN(value.(string), sep, 2) + if len(parts) == 1 { + result[parts[0]] = "" + } else { + result[parts[0]] = parts[1] + } + } + return result + } + panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList)) +} + +func loadShellCommand(value interface{}) (interface{}, error) { + if str, ok := value.(string); ok { + return shellwords.Parse(str) + } + return value, nil +} + +func loadHealthcheck(value interface{}) (interface{}, error) { + if str, ok := value.(string); ok { + return append([]string{"CMD-SHELL"}, str), nil + } + return value, nil +} + +func loadSize(value interface{}) (int64, error) { + switch value := value.(type) { + case int: + return int64(value), nil + case string: + return units.RAMInBytes(value) + } + panic(fmt.Errorf("invalid type for size %T", value)) +} + +func toMapStringString(value map[string]interface{}) map[string]string { + output := make(map[string]string) + for key, value := range value { + output[key] = toString(value) + } + return output +} + +func toString(value interface{}) string { + if value == nil { + return "" + } + return fmt.Sprint(value) +} diff --git a/vendor/github.com/moby/moby/cli/compose/loader/loader_test.go b/vendor/github.com/moby/moby/cli/compose/loader/loader_test.go new file mode 100644 index 0000000..f7fee89 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/loader/loader_test.go @@ -0,0 +1,800 @@ +package loader + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/docker/docker/cli/compose/types" + "github.com/stretchr/testify/assert" +) + +func buildConfigDetails(source types.Dict) types.ConfigDetails { + workingDir, err := os.Getwd() + if err != nil { + panic(err) + } + + return types.ConfigDetails{ + WorkingDir: workingDir, + ConfigFiles: []types.ConfigFile{ + {Filename: "filename.yml", Config: source}, + }, + Environment: nil, + } +} + +var sampleYAML = ` +version: "3" +services: + foo: + image: busybox + networks: + with_me: + bar: + image: busybox + environment: + - FOO=1 + networks: + - with_ipam +volumes: + hello: + driver: default + driver_opts: + beep: boop +networks: + default: + driver: bridge + driver_opts: + beep: boop + with_ipam: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 +` + +var sampleDict = types.Dict{ + "version": "3", + "services": types.Dict{ + "foo": types.Dict{ + "image": "busybox", + "networks": types.Dict{"with_me": nil}, + }, + "bar": types.Dict{ + "image": "busybox", + "environment": []interface{}{"FOO=1"}, + "networks": []interface{}{"with_ipam"}, + }, + }, + "volumes": types.Dict{ + "hello": types.Dict{ + "driver": "default", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + }, + "networks": types.Dict{ + "default": types.Dict{ + "driver": "bridge", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + "with_ipam": types.Dict{ + "ipam": types.Dict{ + "driver": "default", + "config": []interface{}{ + types.Dict{ + "subnet": "172.28.0.0/16", + }, + }, + }, + }, + }, +} + +var sampleConfig = types.Config{ + Services: []types.ServiceConfig{ + { + Name: "foo", + Image: "busybox", + Environment: map[string]string{}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_me": nil, + }, + }, + { + Name: "bar", + Image: "busybox", + Environment: map[string]string{"FOO": "1"}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_ipam": nil, + }, + }, + }, + Networks: map[string]types.NetworkConfig{ + "default": { + Driver: "bridge", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + "with_ipam": { + Ipam: types.IPAMConfig{ + Driver: "default", + Config: []*types.IPAMPool{ + { + Subnet: "172.28.0.0/16", + }, + }, + }, + }, + }, + Volumes: map[string]types.VolumeConfig{ + "hello": { + Driver: "default", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + }, +} + +func TestParseYAML(t *testing.T) { + dict, err := ParseYAML([]byte(sampleYAML)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, sampleDict, dict) +} + +func TestLoad(t *testing.T) { + actual, err := Load(buildConfigDetails(sampleDict)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestLoadV31(t *testing.T) { + actual, err := loadYAML(` +version: "3.1" +services: + foo: + image: busybox + secrets: [super] +secrets: + super: + external: true +`) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, len(actual.Services), 1) + assert.Equal(t, len(actual.Secrets), 1) +} + +func TestParseAndLoad(t *testing.T) { + actual, err := loadYAML(sampleYAML) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestInvalidTopLevelObjectType(t *testing.T) { + _, err := loadYAML("1") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("\"hello\"") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("[\"hello\"]") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") +} + +func TestNonStringKeys(t *testing.T) { + _, err := loadYAML(` +version: "3" +123: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key at top level: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox + 123: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox +networks: + default: + ipam: + config: + - 123: oh dear +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123") + + _, err = loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + 1: FOO +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1") +} + +func TestSupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox +`) + assert.NoError(t, err) + + _, err = loadYAML(` +version: "3.0" +services: + foo: + image: busybox +`) + assert.NoError(t, err) +} + +func TestUnsupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "2" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") + + _, err = loadYAML(` +version: "2.0" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") +} + +func TestInvalidVersion(t *testing.T) { + _, err := loadYAML(` +version: 3 +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version must be a string") +} + +func TestV1Unsupported(t *testing.T) { + _, err := loadYAML(` +foo: + image: busybox +`) + assert.Error(t, err) +} + +func TestNonMappingObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + - foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services must be a mapping") + + _, err = loadYAML(` +version: "3" +services: + foo: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + - default: + driver: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + default: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks.default must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + - data: + driver: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + data: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes.data must be a mapping") +} + +func TestNonStringImage(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: ["busybox", "latest"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo.image must be a string") +} + +func TestValidEnvironment(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: "1" + BAR: 2 + BAZ: 2.5 + QUUX: + list-env: + image: busybox + environment: + - FOO=1 + - BAR=2 + - BAZ=2.5 + - QUUX= +`) + assert.NoError(t, err) + + expected := map[string]string{ + "FOO": "1", + "BAR": "2", + "BAZ": "2.5", + "QUUX": "", + } + + assert.Equal(t, 2, len(config.Services)) + + for _, service := range config.Services { + assert.Equal(t, expected, service.Environment) + } +} + +func TestInvalidEnvironmentValue(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: ["1"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null") +} + +func TestInvalidEnvironmentObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: "FOO=1" +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping") +} + +func TestEnvironmentInterpolation(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + test: + image: busybox + labels: + - home1=$HOME + - home2=${HOME} + - nonexistent=$NONEXISTENT + - default=${NONEXISTENT-default} +networks: + test: + driver: $HOME +volumes: + test: + driver: $HOME +`) + + assert.NoError(t, err) + + home := os.Getenv("HOME") + + expectedLabels := map[string]string{ + "home1": home, + "home2": home, + "nonexistent": "", + "default": "default", + } + + assert.Equal(t, expectedLabels, config.Services[0].Labels) + assert.Equal(t, home, config.Networks["test"].Driver) + assert.Equal(t, home, config.Volumes["test"].Driver) +} + +func TestUnsupportedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + build: ./web + links: + - bar + db: + image: db + build: ./db +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + unsupported := GetUnsupportedProperties(configDetails) + assert.Equal(t, []string{"build", "links"}, unsupported) +} + +func TestDeprecatedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + container_name: web + db: + image: db + container_name: db + expose: ["5434"] +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + deprecated := GetDeprecatedProperties(configDetails) + assert.Equal(t, 2, len(deprecated)) + assert.Contains(t, deprecated, "container_name") + assert.Contains(t, deprecated, "expose") +} + +func TestForbiddenProperties(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox + volumes: + - /data + volume_driver: some-driver + bar: + extends: + service: foo +`) + + assert.Error(t, err) + assert.IsType(t, &ForbiddenPropertiesError{}, err) + fmt.Println(err) + forbidden := err.(*ForbiddenPropertiesError).Properties + + assert.Equal(t, 2, len(forbidden)) + assert.Contains(t, forbidden, "volume_driver") + assert.Contains(t, forbidden, "extends") +} + +func durationPtr(value time.Duration) *time.Duration { + return &value +} + +func int64Ptr(value int64) *int64 { + return &value +} + +func uint64Ptr(value uint64) *uint64 { + return &value +} + +func TestFullExample(t *testing.T) { + bytes, err := ioutil.ReadFile("full-example.yml") + assert.NoError(t, err) + + config, err := loadYAML(string(bytes)) + if !assert.NoError(t, err) { + return + } + + workingDir, err := os.Getwd() + assert.NoError(t, err) + + homeDir := os.Getenv("HOME") + stopGracePeriod := time.Duration(20 * time.Second) + + expectedServiceConfig := types.ServiceConfig{ + Name: "foo", + + CapAdd: []string{"ALL"}, + CapDrop: []string{"NET_ADMIN", "SYS_ADMIN"}, + CgroupParent: "m-executor-abcd", + Command: []string{"bundle", "exec", "thin", "-p", "3000"}, + ContainerName: "my-web-container", + DependsOn: []string{"db", "redis"}, + Deploy: types.DeployConfig{ + Mode: "replicated", + Replicas: uint64Ptr(6), + Labels: map[string]string{"FOO": "BAR"}, + UpdateConfig: &types.UpdateConfig{ + Parallelism: uint64Ptr(3), + Delay: time.Duration(10 * time.Second), + FailureAction: "continue", + Monitor: time.Duration(60 * time.Second), + MaxFailureRatio: 0.3, + }, + Resources: types.Resources{ + Limits: &types.Resource{ + NanoCPUs: "0.001", + MemoryBytes: 50 * 1024 * 1024, + }, + Reservations: &types.Resource{ + NanoCPUs: "0.0001", + MemoryBytes: 20 * 1024 * 1024, + }, + }, + RestartPolicy: &types.RestartPolicy{ + Condition: "on_failure", + Delay: durationPtr(5 * time.Second), + MaxAttempts: uint64Ptr(3), + Window: durationPtr(2 * time.Minute), + }, + Placement: types.Placement{ + Constraints: []string{"node=foo"}, + }, + }, + Devices: []string{"/dev/ttyUSB0:/dev/ttyUSB0"}, + DNS: []string{"8.8.8.8", "9.9.9.9"}, + DNSSearch: []string{"dc1.example.com", "dc2.example.com"}, + DomainName: "foo.com", + Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"}, + Environment: map[string]string{ + "RACK_ENV": "development", + "SHOW": "true", + "SESSION_SECRET": "", + "FOO": "1", + "BAR": "2", + "BAZ": "3", + }, + Expose: []string{"3000", "8000"}, + ExternalLinks: []string{ + "redis_1", + "project_db_1:mysql", + "project_db_1:postgresql", + }, + ExtraHosts: map[string]string{ + "otherhost": "50.31.209.229", + "somehost": "162.242.195.82", + }, + HealthCheck: &types.HealthCheckConfig{ + Test: []string{ + "CMD-SHELL", + "echo \"hello world\"", + }, + Interval: "10s", + Timeout: "1s", + Retries: uint64Ptr(5), + }, + Hostname: "foo", + Image: "redis", + Ipc: "host", + Labels: map[string]string{ + "com.example.description": "Accounting webapp", + "com.example.number": "42", + "com.example.empty-label": "", + }, + Links: []string{ + "db", + "db:database", + "redis", + }, + Logging: &types.LoggingConfig{ + Driver: "syslog", + Options: map[string]string{ + "syslog-address": "tcp://192.168.0.42:123", + }, + }, + MacAddress: "02:42:ac:11:65:43", + NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b", + Networks: map[string]*types.ServiceNetworkConfig{ + "some-network": { + Aliases: []string{"alias1", "alias3"}, + Ipv4Address: "", + Ipv6Address: "", + }, + "other-network": { + Ipv4Address: "172.16.238.10", + Ipv6Address: "2001:3984:3989::10", + }, + "other-other-network": nil, + }, + Pid: "host", + Ports: []string{ + "3000", + "3000-3005", + "8000:8000", + "9090-9091:8080-8081", + "49100:22", + "127.0.0.1:8001:8001", + "127.0.0.1:5000-5010:5000-5010", + }, + Privileged: true, + ReadOnly: true, + Restart: "always", + SecurityOpt: []string{ + "label=level:s0:c100,c200", + "label=type:svirt_apache_t", + }, + StdinOpen: true, + StopSignal: "SIGUSR1", + StopGracePeriod: &stopGracePeriod, + Tmpfs: []string{"/run", "/tmp"}, + Tty: true, + Ulimits: map[string]*types.UlimitsConfig{ + "nproc": { + Single: 65535, + }, + "nofile": { + Soft: 20000, + Hard: 40000, + }, + }, + User: "someone", + Volumes: []string{ + "/var/lib/mysql", + "/opt/data:/var/lib/mysql", + fmt.Sprintf("%s:/code", workingDir), + fmt.Sprintf("%s/static:/var/www/html", workingDir), + fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir), + "datavolume:/var/lib/mysql", + }, + WorkingDir: "/code", + } + + assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services) + + expectedNetworkConfig := map[string]types.NetworkConfig{ + "some-network": {}, + + "other-network": { + Driver: "overlay", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + Ipam: types.IPAMConfig{ + Driver: "overlay", + Config: []*types.IPAMPool{ + {Subnet: "172.16.238.0/24"}, + {Subnet: "2001:3984:3989::/64"}, + }, + }, + }, + + "external-network": { + External: types.External{ + Name: "external-network", + External: true, + }, + }, + + "other-external-network": { + External: types.External{ + Name: "my-cool-network", + External: true, + }, + }, + } + + assert.Equal(t, expectedNetworkConfig, config.Networks) + + expectedVolumeConfig := map[string]types.VolumeConfig{ + "some-volume": {}, + "other-volume": { + Driver: "flocker", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + }, + "external-volume": { + External: types.External{ + Name: "external-volume", + External: true, + }, + }, + "other-external-volume": { + External: types.External{ + Name: "my-cool-volume", + External: true, + }, + }, + } + + assert.Equal(t, expectedVolumeConfig, config.Volumes) +} + +func loadYAML(yaml string) (*types.Config, error) { + dict, err := ParseYAML([]byte(yaml)) + if err != nil { + return nil, err + } + + return Load(buildConfigDetails(dict)) +} + +func serviceSort(services []types.ServiceConfig) []types.ServiceConfig { + sort.Sort(servicesByName(services)) + return services +} + +type servicesByName []types.ServiceConfig + +func (sbn servicesByName) Len() int { return len(sbn) } +func (sbn servicesByName) Swap(i, j int) { sbn[i], sbn[j] = sbn[j], sbn[i] } +func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name } diff --git a/vendor/github.com/moby/moby/cli/compose/schema/bindata.go b/vendor/github.com/moby/moby/cli/compose/schema/bindata.go new file mode 100644 index 0000000..129f544 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/schema/bindata.go @@ -0,0 +1,260 @@ +// Code generated by go-bindata. +// sources: +// data/config_schema_v3.0.json +// data/config_schema_v3.1.json +// DO NOT EDIT! + +package schema + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4b\x8f\xdb\x38\x12\xbe\xfb\x57\x08\x4a\x6e\x71\x77\x07\xd8\x60\x81\xcd\x6d\x8f\x7b\xda\x39\x4f\x43\x11\x68\xa9\x6c\x33\x4d\x91\x4c\x91\x72\xda\x09\xfc\xdf\x07\xd4\xcb\x14\x4d\x8a\xb2\xad\x3c\x30\x98\x53\xb7\xc5\xaa\x62\xbd\xf8\x55\xb1\xa4\xef\xab\x24\x49\xdf\xaa\x62\x0f\x15\x49\x3f\x26\xe9\x5e\x6b\xf9\xf1\xe9\xe9\xb3\x12\xfc\xa1\x7d\xfa\x28\x70\xf7\x54\x22\xd9\xea\x87\xf7\x1f\x9e\xda\x67\x6f\xd2\xb5\xe1\xa3\xa5\x61\x29\x04\xdf\xd2\x5d\xde\xae\xe4\x87\x7f\x3d\xbe\x7f\x34\xec\x2d\x89\x3e\x4a\x30\x44\x62\xf3\x19\x0a\xdd\x3e\x43\xf8\x52\x53\x04\xc3\xfc\x9c\x1e\x00\x15\x15\x3c\xcd\xd6\x2b\xb3\x26\x51\x48\x40\x4d\x41\xa5\x1f\x13\xa3\x5c\x92\x0c\x24\xfd\x03\x4b\xac\xd2\x48\xf9\x2e\x6d\x1e\x9f\x1a\x09\x49\x92\x2a\xc0\x03\x2d\x2c\x09\x83\xaa\x6f\x9e\xce\xf2\x9f\x06\xb2\xb5\x2b\xd5\x52\xb6\x79\x2e\x89\xd6\x80\xfc\x8f\x4b\xdd\x9a\xe5\x4f\xcf\xe4\xe1\xdb\x7f\x1f\xfe\x7c\xff\xf0\x9f\xc7\xfc\x21\x7b\xf7\x76\xb4\x6c\xfc\x8b\xb0\x6d\xb7\x2f\x61\x4b\x39\xd5\x54\xf0\x61\xff\x74\xa0\x3c\x75\xff\x9d\x86\x8d\x49\x59\x36\xc4\x84\x8d\xf6\xde\x12\xa6\x60\x6c\x33\x07\xfd\x55\xe0\x4b\xcc\xe6\x81\xec\x17\xd9\xdc\xed\xef\xb1\x79\x6c\xce\x41\xb0\xba\x8a\x46\xb0\xa7\xfa\x45\xc6\xb4\xdb\xdf\x17\xbf\x55\x6f\xf4\x24\x6d\x4b\x61\xed\xdd\x28\x38\xca\x76\x9f\xab\x7c\xd9\x16\xf6\xd5\xe0\xac\x80\x97\x4a\x90\x4c\x1c\xcd\xb3\x80\x3f\x5a\x82\x0a\xb8\x4e\x07\x17\x24\x49\xba\xa9\x29\x2b\x5d\x8f\x0a\x0e\xff\x37\x22\x9e\xad\x87\x49\xf2\xdd\x3d\xd8\x96\x9c\x66\x7d\xf4\x2b\x1c\xf0\x61\x3d\x60\xcb\xb0\x5e\x08\xae\xe1\x55\x37\x46\x4d\x6f\xdd\xba\x40\x14\x2f\x80\x5b\xca\x60\x2e\x07\xc1\x9d\x9a\x70\x19\xa3\x4a\xe7\x02\xf3\x92\x16\x3a\x3d\x39\xec\x17\xf2\xe2\xf9\x34\xb0\x5a\xbf\xb2\x95\x47\x60\x5a\x10\x99\x93\xb2\x1c\xd9\x41\x10\xc9\x31\x5d\x27\x29\xd5\x50\x29\xbf\x89\x49\x5a\x73\xfa\xa5\x86\xff\x75\x24\x1a\x6b\x70\xe5\x96\x28\xe4\xf2\x82\x77\x28\x6a\x99\x4b\x82\x26\xc1\xa6\xdd\x9f\x16\xa2\xaa\x08\x5f\x2a\xeb\xae\xb1\x63\x86\xe7\x05\xd7\x84\x72\xc0\x9c\x93\x2a\x96\x48\xe6\xd4\x01\x2f\x55\xde\xd6\xbf\xc9\x34\xda\xe6\x2d\xbf\x72\x04\x0c\xc5\x70\xd1\x78\x94\x7c\x2a\xb1\x5b\x31\x26\xb5\x8d\x6e\xa9\xc3\x98\x2b\x20\x58\xec\x6f\xe4\x17\x15\xa1\x7c\x8e\xef\x80\x6b\x3c\x4a\x41\xdb\x7c\xf9\xed\x12\x01\xf8\x21\x1f\xb0\xe4\x6a\x37\x00\x3f\x50\x14\xbc\xea\x4f\xc3\x1c\x80\x19\x40\xde\xf0\xbf\x4a\xa1\xc0\x75\x8c\x63\xa0\xbd\x34\x98\x3a\xf2\x49\xcf\xf1\xdc\x1b\xbe\x4e\x52\x5e\x57\x1b\x40\xd3\xd2\x8d\x28\xb7\x02\x2b\x62\x94\xed\xf7\xb6\x96\x47\x9e\xf6\x64\x9e\xed\x40\xdb\x06\x53\xd6\x09\xcb\x19\xe5\x2f\xcb\xa7\x38\xbc\x6a\x24\xf9\x5e\x28\x3d\x1f\xc3\x2d\xf6\x3d\x10\xa6\xf7\xc5\x1e\x8a\x97\x09\x76\x9b\x6a\xc4\x2d\x94\x9e\x93\xe4\xb4\x22\xbb\x38\x91\x2c\x62\x24\x8c\x6c\x80\xdd\x64\xe7\xa2\xce\xb7\xc4\x8a\xdd\xce\x90\x86\x32\xee\xa2\x73\xe9\x96\x63\x35\xbf\x44\x7a\x00\x9c\x5b\xc0\x85\x3c\x37\x5c\xee\x62\xbc\x01\x49\xe2\xdd\xe7\x88\xf4\xd3\x63\xdb\x7c\x4e\x9c\xaa\xe6\x3f\xc6\xd2\xcc\x6d\x17\x12\xa7\xee\xfb\x9e\x38\x16\xce\x6b\x28\x46\x51\xa9\x48\x61\xfa\x06\x04\x15\x88\xeb\x99\xb4\x6b\xf6\xf3\x4a\x94\xa1\x04\xbd\x20\x76\x7d\x13\x44\xea\xab\x0b\x61\x72\x53\xff\x38\x2b\x74\xd1\x0b\x44\xc4\x9a\x90\x7a\x73\xd5\x3c\xab\x1b\x4f\xb1\x86\x8e\x30\x4a\x14\xc4\x0f\x7b\xd0\x91\x23\x69\x54\x1e\x3e\xcc\xcc\x09\x1f\xef\xbf\x27\x79\x03\xac\x41\x99\xf3\x7b\xe4\x88\xa8\xb3\x2a\xcd\x71\xf3\x29\x92\x45\x4e\xdb\x0f\x6e\xe1\x25\x2d\xc3\x58\xd1\x20\x84\x7d\xc0\xa4\x40\x7d\x71\xba\x7e\x4e\xb9\x6f\xb7\xbe\xbb\xda\x4b\xa4\x07\xca\x60\x07\xe3\x5b\xcb\x46\x08\x06\x84\x8f\xa0\x07\x81\x94\xb9\xe0\xec\x38\x83\x52\x69\x82\xd1\x0b\x85\x82\xa2\x46\xaa\x8f\xb9\x90\x7a\xf1\x3e\x43\xed\xab\x5c\xd1\x6f\x30\x8e\xe6\x19\xef\x3b\x41\xd9\x88\xe7\xa8\x0a\x7d\x5b\xbd\x56\xba\xa4\x3c\x17\x12\x78\xd4\x3b\x4a\x0b\x99\xef\x90\x14\x90\x4b\x40\x2a\x4a\x9f\x81\x6b\x3b\xd6\x65\x8d\xc4\xec\x7f\x29\x46\xd1\x1d\x27\x2c\xe6\x68\x5d\xc9\xed\x8d\x17\x0b\xad\xe3\xe1\xae\x19\xad\x68\xf8\x1c\x78\x00\x76\x46\x0d\x68\xf1\xdf\x0f\xfb\x13\x90\x7f\xd6\x94\x72\x0d\x3b\x40\x1f\x52\x4e\x74\x1d\xd3\x4d\xc7\x8c\x6e\x63\x4f\x70\x1c\xd0\x09\x3d\x1a\x06\x25\xb6\xda\xcf\xe0\xeb\x45\xbc\x7a\x8d\x86\xbf\x8d\xbc\x75\xa7\x48\xe6\xa5\xbf\x0a\xce\x5d\x35\xb2\x20\xa2\x9e\xbc\x88\x5a\xab\x68\x63\xd8\xd0\x70\x35\xd5\xd4\x0c\xa4\xd6\x14\x73\x51\xbc\x30\x8d\x92\x39\x04\x25\xf5\x6b\xbb\x72\x2c\xbb\x62\x8e\xec\xdc\x59\x7a\x01\xbe\x89\xa2\x4d\x1a\x9d\xc0\x4e\x4f\x37\x3b\xa2\xe0\xe4\x91\x2a\xb2\x71\x66\x6e\xbe\xc3\x6d\xb2\x11\x0f\x71\x8c\x41\xd0\x48\x9d\xb8\x74\x68\x3b\xc2\x13\x50\xbf\xe7\xe0\x40\xd3\x0a\x44\xed\xaf\x59\x2b\x3b\xbf\x3b\xa6\xd4\x9a\xcc\x46\x82\x6a\x51\xba\x31\x7d\x1e\x82\xda\xf7\x17\xd1\xc0\xcd\x39\x24\x08\x92\xd1\x82\xa8\x18\x10\xdd\x71\x41\xad\x65\x49\x34\xe4\xed\x8b\xaa\xab\xa0\x7f\x02\xf3\x25\x41\xc2\x18\x30\xaa\xaa\x39\x18\x9a\x96\xc0\xc8\xf1\xa6\xf2\xd9\xb0\x6f\x09\x65\x35\x42\x4e\x0a\xdd\xbd\x0b\x8b\xe4\x5c\x5a\x09\x4e\xb5\xf0\x22\xc4\xbc\x2d\x2b\xf2\x9a\xf7\xdb\x36\x24\xde\x03\x13\x6c\xeb\xe6\xde\x2d\xad\x4c\x50\xa2\xc6\xe2\xc2\xd9\x37\x87\xe8\x5c\xeb\x03\x19\xd3\xef\x78\x61\x3a\x82\x32\x48\x32\x5c\xfd\xa3\xfc\xd1\xd2\xd2\xf5\x99\xb9\x14\x8c\x16\xc7\xa5\x2c\x2c\x04\x6f\x9d\x3c\x27\x21\xee\xcc\x40\x93\x0e\xa6\x15\xaa\xa4\x8e\x1e\xd6\x86\xe1\x2b\xe5\xa5\xf8\x7a\xc5\x86\xcb\xa5\x92\x64\xa4\x00\x07\xef\xee\x75\xb4\xd2\x48\x28\xd7\x57\x97\xf3\x7b\xcd\xba\xa3\x9a\x0f\xf9\x19\x41\xfd\x81\x2e\xfe\x26\x35\x80\xf4\x85\xac\xa3\xf3\xa0\x0a\x2a\x81\xde\x04\x5c\xe0\xcd\x77\xcc\xc4\x9e\x6c\x81\xaa\x36\x6b\x80\xd8\x51\x99\xfb\xe2\xe2\xb7\x8d\xf8\x90\x30\x8b\x03\x12\x95\xa4\x5a\xea\x74\xcc\x1e\xa9\xa6\xde\x1a\x9c\x4c\x8f\x22\x92\xf0\x38\x22\xa6\x75\x5c\xf7\x8e\x42\xd5\x1b\x0e\x93\x1d\x95\xe5\x4f\xdf\x7b\xde\xf9\xd7\x94\x53\xf8\x52\x72\x1f\xe8\xf5\x6f\x43\x02\x51\x7d\x1e\x7a\xe6\xf5\xe0\xab\x6c\x76\x88\x83\xaf\x22\x96\xd3\xbf\x69\xdf\xdd\x11\x81\xaf\xcf\xbf\xb2\x13\xbc\x03\x5c\xba\x4f\x3c\x22\xd8\xd2\x51\xfd\x03\x2d\x7f\x93\x44\xfc\x79\xf9\xe5\x4c\xb3\xac\x3c\xbb\xbc\x68\x4e\xa5\xc4\xec\x31\x7e\xc7\x91\x8d\xd5\x70\xc9\x3c\x5f\xda\x8d\x61\x79\x6a\x7a\xd1\x93\x04\xc6\xba\xce\xa6\x9d\x13\xa7\x2d\x5f\x30\xc3\x1f\xdf\x4d\x14\x9f\xa9\xd7\x6d\x3f\x08\xb5\x17\x98\x0c\xf9\x63\xea\x74\xac\xbd\x77\x2f\x3f\x17\x0b\x80\x9a\xc5\x7f\xf1\xf1\x98\xb1\x93\x1f\x2f\x06\x21\xdf\xc7\xd3\xbd\xf6\xc3\xaf\x6c\xe4\x1f\x87\xa4\x7d\x79\x6d\x41\x4a\x66\x37\xf1\xa1\x30\x7a\x3f\x29\x73\x67\x8b\xfd\xa7\x5d\x99\x1f\xae\x56\xf6\xdf\xe6\x33\xbc\xd5\x69\xf5\x57\x00\x00\x00\xff\xff\x78\x30\xec\x51\x0e\x2b\x00\x00") + +func dataConfig_schema_v30JsonBytes() ([]byte, error) { + return bindataRead( + _dataConfig_schema_v30Json, + "data/config_schema_v3.0.json", + ) +} + +func dataConfig_schema_v30Json() (*asset, error) { + bytes, err := dataConfig_schema_v30JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataConfig_schema_v31Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x1a\xcb\x8e\xdb\x36\xf0\xee\xaf\x10\x94\xdc\xe2\xdd\x4d\xd1\xa0\x40\x73\xeb\xb1\xa7\xf6\xdc\x85\x23\xd0\xd2\x58\x66\x96\x22\x19\x92\x72\xd6\x09\xfc\xef\x05\xf5\x32\x45\x91\x22\x6d\x2b\xd9\x45\xd1\xd3\xae\xc5\x99\xe1\xbc\x67\x38\xe4\xf7\x55\x92\xa4\x6f\x65\xbe\x87\x0a\xa5\x1f\x93\x74\xaf\x14\xff\xf8\xf0\xf0\x59\x32\x7a\xd7\x7e\xbd\x67\xa2\x7c\x28\x04\xda\xa9\xbb\xf7\x1f\x1e\xda\x6f\x6f\xd2\xb5\xc6\xc3\x85\x46\xc9\x19\xdd\xe1\x32\x6b\x57\xb2\xc3\xaf\xf7\xbf\xdc\x6b\xf4\x16\x44\x1d\x39\x68\x20\xb6\xfd\x0c\xb9\x6a\xbf\x09\xf8\x52\x63\x01\x1a\xf9\x31\x3d\x80\x90\x98\xd1\x74\xb3\x5e\xe9\x35\x2e\x18\x07\xa1\x30\xc8\xf4\x63\xa2\x99\x4b\x92\x01\xa4\xff\x60\x90\x95\x4a\x60\x5a\xa6\xcd\xe7\x53\x43\x21\x49\x52\x09\xe2\x80\x73\x83\xc2\xc0\xea\x9b\x87\x33\xfd\x87\x01\x6c\x6d\x53\x35\x98\x6d\xbe\x73\xa4\x14\x08\xfa\xf7\x94\xb7\x66\xf9\xd3\x23\xba\xfb\xf6\xc7\xdd\x3f\xef\xef\x7e\xbf\xcf\xee\x36\xef\xde\x8e\x96\xb5\x7e\x05\xec\xda\xed\x0b\xd8\x61\x8a\x15\x66\x74\xd8\x3f\x1d\x20\x4f\xdd\x7f\xa7\x61\x63\x54\x14\x0d\x30\x22\xa3\xbd\x77\x88\x48\x18\xcb\x4c\x41\x7d\x65\xe2\x29\x24\xf3\x00\xf6\x42\x32\x77\xfb\x3b\x64\x1e\x8b\x73\x60\xa4\xae\x82\x16\xec\xa1\x5e\x48\x98\x76\xfb\x65\xec\x27\x21\x17\xa0\xc2\x2e\xdb\x42\xbd\x98\xc7\xea\xed\x6f\x13\x78\xd5\x0b\x3d\x0b\xdb\x42\x18\x7b\x37\x0c\x8e\xc2\xdb\xa5\x2a\x57\x78\xf9\x75\x35\x28\xcb\xa3\xa5\x02\x38\x61\x47\xfd\xcd\xa3\x8f\x16\xa0\x02\xaa\xd2\x41\x05\x49\x92\x6e\x6b\x4c\x0a\x5b\xa3\x8c\xc2\x5f\x9a\xc4\xa3\xf1\x31\x49\xbe\xdb\x99\xcc\xa0\xd3\xac\x8f\x7e\xf9\x0d\x3e\xac\x7b\x64\x19\xd6\x73\x46\x15\x3c\xab\x46\xa8\xf9\xad\x5b\x15\xb0\xfc\x09\xc4\x0e\x13\x88\xc5\x40\xa2\x94\x33\x2a\x23\x58\xaa\x8c\x89\xac\xc0\xb9\x4a\x4f\x16\xfa\x84\x5e\xd8\x9f\x06\x54\xe3\xd7\x66\xe5\x20\x98\xe6\x88\x67\xa8\x28\x46\x72\x20\x21\xd0\x31\x5d\x27\x29\x56\x50\x49\xb7\x88\x49\x5a\x53\xfc\xa5\x86\x3f\x3b\x10\x25\x6a\xb0\xe9\x16\x82\xf1\xe5\x09\x97\x82\xd5\x3c\xe3\x48\x68\x07\x9b\x57\x7f\x9a\xb3\xaa\x42\x74\x29\xaf\xbb\x44\x8e\x08\xcd\x33\xaa\x10\xa6\x20\x32\x8a\xaa\x90\x23\xe9\xa8\x03\x5a\xc8\xac\x2d\xf8\xb3\x6e\xb4\xcb\x5a\x7c\x69\x11\x18\xaa\xff\xa2\xf6\x28\xe8\x9c\x63\xb7\x64\xb4\x6b\x6b\xde\x52\x0b\x31\x93\x80\x44\xbe\xbf\x12\x9f\x55\x08\xd3\x18\xdd\x01\x55\xe2\xc8\x19\x6e\xfd\xe5\xd5\x39\x02\xd0\x43\x36\xe4\x92\x8b\xd5\x00\xf4\x80\x05\xa3\x55\x1f\x0d\x31\x09\x66\x48\xf2\x1a\xff\x99\x33\x09\xb6\x62\x2c\x01\xcd\xa5\x41\xd4\x91\x4e\x7a\x8c\xc7\x5e\xf0\x75\x92\xd2\xba\xda\x82\xd0\x3d\xec\x08\x72\xc7\x44\x85\x34\xb3\xfd\xde\xc6\xf2\x48\xd3\x0e\xcf\x33\x15\x68\xca\xa0\xcb\x3a\x22\x19\xc1\xf4\x69\x79\x17\x87\x67\x25\x50\xb6\x67\x52\xc5\xe7\x70\x03\x7d\x0f\x88\xa8\x7d\xbe\x87\xfc\x69\x06\xdd\x84\x1a\x61\x33\xa9\x62\x9c\x1c\x57\xa8\x0c\x03\xf1\x3c\x04\x42\xd0\x16\xc8\x55\x72\x2e\xaa\x7c\x83\x2c\x2b\x4b\x0d\xea\xf3\xb8\x49\xe7\xd2\x2d\x87\x6a\x7e\x21\xf0\x01\x44\x6c\x01\x67\xfc\xdc\x70\xd9\x8b\xe1\x06\x24\x09\x77\x9f\x23\xd0\x4f\xf7\x6d\xf3\x39\x13\x55\xcd\x7f\x84\xa4\x1b\xbb\x5d\x48\xac\xba\xef\xfa\x62\x49\x18\xd7\x50\x8c\xac\x52\xa1\x5c\xf7\x0d\x02\xa4\xc7\xae\x67\xd0\xee\x74\x93\x55\xac\xf0\x39\xe8\x04\xd8\xd6\x8d\x37\x53\x5f\x5c\x08\x93\xab\xfa\xc7\x28\xd3\x05\x0f\x10\x01\x69\x7c\xec\xc5\xb2\x79\x66\x37\xec\x62\x0d\x1c\x22\x18\x49\x08\x07\xbb\x57\x91\x23\x6a\x98\x1f\x3e\x44\xfa\x84\x0b\xf7\xb7\x59\x5c\x0f\xaa\x97\x66\x7c\x8f\x1c\x20\x75\x66\xa5\x09\x37\x17\x23\x9b\x40\xb4\xfd\xe0\x16\x9e\xe3\xc2\x9f\x2b\x9a\x0c\x61\x06\x18\x67\x42\x4d\xa2\xeb\xe7\x94\xfb\x76\xeb\x9b\xab\x3d\x17\xf8\x80\x09\x94\x30\x3e\xb5\x6c\x19\x23\x80\xe8\x28\xf5\x08\x40\x45\xc6\x28\x39\x46\x40\x4a\x85\x44\xf0\x40\x21\x21\xaf\x05\x56\xc7\x8c\x71\xb5\x78\x9f\x21\xf7\x55\x26\xf1\x37\x18\x5b\xf3\x9c\xef\x3b\x42\x1b\x8b\x21\x6b\x42\x72\xa5\x41\x7d\x29\x29\x1c\xc6\x8e\x44\x18\x4c\x54\xe1\x14\x95\x4a\x56\x8b\x3c\xf6\x80\xad\xf7\x44\xa2\x84\xd8\x23\xbc\x76\xb7\x71\xd8\xcc\x03\x97\x97\x00\x4f\x0a\x5d\x67\xc2\x50\x55\xb6\x7f\x9b\x79\xe5\xe4\x0c\x7d\x79\x94\xb9\xba\xae\x5b\x93\xaa\xc0\x34\x63\x1c\x68\x30\x36\xa4\x62\x3c\x2b\x05\xca\x21\xe3\x20\x30\x73\xaa\x62\x6d\x46\x7a\x51\x0b\xa4\xf7\x9f\x92\x91\xb8\xa4\x88\x84\xc2\x4c\x55\x7c\x77\xe5\xb1\x52\xa9\x70\xb0\xd7\x04\x57\xd8\x1f\x34\x0e\xaf\x8d\xe8\x00\xda\xea\xef\x2e\xfa\x33\x05\xff\xcc\x29\xa6\x0a\x4a\xed\x26\x53\xa7\x9a\xe9\x39\xe7\x5b\xce\x88\x5e\x73\x8f\xc4\xd8\xa0\x33\x7c\x24\x6d\x60\xee\x94\x1b\xc1\xd5\x89\x3a\xf9\x1a\xdd\x75\x34\xf4\xd6\x1d\x23\x1b\x27\xfc\x45\xc5\xdc\x66\x63\xe3\xad\xa7\xee\xa0\xaa\x65\xf0\x58\xd0\xc0\x50\x39\xd7\xd2\x0e\xa0\xc6\xd0\x7e\xd1\x6a\xa1\xdb\x64\x1d\x04\x05\x76\x73\xbb\xb2\x24\xbb\x60\xec\x6e\x9d\x58\x7b\x02\xae\x79\xb2\x09\x1a\x9c\xbf\xcf\xcf\xb6\x3b\x20\xef\xdc\x19\x4b\xb4\xb5\x26\xae\xae\xe0\xd6\xde\x28\x0e\xe1\x1c\x23\x40\x09\x6c\xd9\xa5\x4f\xd4\x66\x3e\x01\xf9\x3a\xc7\x46\x0a\x57\xc0\x6a\x77\xc1\x5b\x99\xfe\xdd\x21\xa5\xc6\x5c\x3e\x60\x54\x03\xd2\xb6\xe9\xe3\x60\xd4\xbe\xbb\x0c\x1a\x2e\x26\x48\x04\x70\x82\x73\x24\x43\x89\xe8\x86\xf1\x44\xcd\x0b\xa4\x20\x6b\xef\x65\x2f\x4a\xfd\x33\x39\x9f\x23\x81\x08\x01\x82\x65\x15\x93\x43\xd3\x02\x08\x3a\x5e\x55\x3e\x1b\xf4\x1d\xc2\xa4\x16\x90\xa1\x5c\x75\x57\xbf\x01\x9f\x4b\x2b\x46\xb1\x62\xce\x0c\x11\xb7\x65\x85\x9e\xb3\x7e\xdb\x06\x24\xd4\xd9\x8c\x9b\xfa\xd8\xc9\x82\xe1\x09\x6d\xe3\x77\x59\x75\x9e\x31\xd1\xb9\xd6\x7b\x3c\xa6\xdf\x71\x22\xba\x00\xa9\x33\xc9\x30\xf8\x09\xe2\x07\x4b\x4b\x77\xca\xc8\x38\x23\x38\x3f\x2e\x25\x61\xce\x68\xab\xe4\x18\x87\xb8\xd1\x03\xb5\x3b\xe8\x56\xa8\xe2\x2a\x18\xac\x0d\xc2\x57\x4c\x0b\xf6\xf5\x82\x0d\x97\x73\x25\x4e\x50\x0e\x56\xbe\xbb\x55\xd1\x52\x09\x84\xa9\xba\xb8\x9c\xdf\x2a\xd6\x0d\xd5\x7c\xf0\xcf\x40\xd6\x1f\xe0\xc2\xf7\xe8\x9e\x4c\x9f\xf3\x3a\x38\x0d\xac\xa0\x62\xc2\xe9\x80\x0b\x3c\xf4\x08\x89\xd8\x83\x2d\x50\xd5\xa2\xc6\xc7\x1d\x54\xc6\xf8\xf2\xa7\x8d\xf0\x88\x78\x13\x4e\x48\x98\xa3\x6a\xa9\xe8\x88\x1e\xa8\xa7\xce\x1a\x9c\xcc\xcf\x2d\x12\xff\xec\x22\xc4\x75\x98\xf7\x0e\x42\xd6\x5b\xea\x19\x21\x4c\x4f\x19\xae\x5b\xfe\xf8\x63\xca\xc9\x7f\x28\xb9\x2d\xe9\xf5\x77\x61\x1e\xab\x3e\x0e\x3d\xf3\x7a\xd0\xd5\x26\xda\xc4\xde\x8b\xa8\xe5\xf8\x6f\xda\x77\x7b\x44\xe0\xea\xf3\x2f\xec\x04\x6f\x48\x2e\xdd\x8b\xa6\x40\x6e\xe9\xa0\xfe\x4f\x2d\xff\x11\x47\xfc\x79\xfe\xd5\x3d\x20\x0b\xbe\xdc\x6a\xa0\xae\x2e\xce\x11\xcf\x95\x5e\x81\xcd\x5e\xda\x14\xe3\xc1\xa2\x61\x92\xe9\x99\x7f\x4e\x93\xd1\xf7\x69\x1d\xc6\x66\xcc\x86\x0d\xe6\x78\xe3\x3b\xae\x90\x73\x83\xa4\x1e\xc4\x73\xbf\x62\x6d\xda\x29\x71\x5e\xf2\x05\x93\xcd\xfd\xbb\x99\x3e\x60\xee\xde\xfb\x07\x15\xd0\x05\x86\x74\x6e\x9b\x5a\x87\x87\x5e\xbb\xd3\x77\x9b\x9e\xf8\x37\xf0\x27\xaf\x38\xb5\x9c\xf4\x38\x99\x49\x7d\x1f\x0f\x5a\xdb\x17\x98\x9b\x91\x7e\x2c\x90\xf6\x15\x89\x91\xdd\x37\xe6\x79\xca\x67\x46\xe7\xdb\x4e\x7b\xcc\xdb\xbf\xb1\xf4\xdc\x6a\xac\xcc\xbf\xcd\x7b\xd8\xd5\x69\xf5\x6f\x00\x00\x00\xff\xff\xfc\xf3\x11\x6a\x88\x2f\x00\x00") + +func dataConfig_schema_v31JsonBytes() ([]byte, error) { + return bindataRead( + _dataConfig_schema_v31Json, + "data/config_schema_v3.1.json", + ) +} + +func dataConfig_schema_v31Json() (*asset, error) { + bytes, err := dataConfig_schema_v31JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/config_schema_v3.1.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "data/config_schema_v3.0.json": dataConfig_schema_v30Json, + "data/config_schema_v3.1.json": dataConfig_schema_v31Json, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "data": &bintree{nil, map[string]*bintree{ + "config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}}, + "config_schema_v3.1.json": &bintree{dataConfig_schema_v31Json, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.0.json b/vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.0.json new file mode 100644 index 0000000..fbcd8bb --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.0.json @@ -0,0 +1,383 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.0.json", + "type": "object", + "required": ["version"], + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "additionalProperties": false, + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string"} + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": {"$ref": "#/definitions/resource"}, + "reservations": {"$ref": "#/definitions/resource"} + } + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "resource": { + "id": "#/definitions/resource", + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"} + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "internal": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.1.json b/vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.1.json new file mode 100644 index 0000000..b703748 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/schema/data/config_schema_v3.1.json @@ -0,0 +1,428 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.1.json", + "type": "object", + "required": ["version"], + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + }, + + "secrets": { + "id": "#/properties/secrets", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "secrets": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + } + } + ] + } + }, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "additionalProperties": false, + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string"} + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": {"$ref": "#/definitions/resource"}, + "reservations": {"$ref": "#/definitions/resource"} + } + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "resource": { + "id": "#/definitions/resource", + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"} + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "internal": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "secret": { + "id": "#/definitions/secret", + "type": "object", + "properties": { + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/moby/moby/cli/compose/schema/schema.go b/vendor/github.com/moby/moby/cli/compose/schema/schema.go new file mode 100644 index 0000000..ae33c77 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/schema/schema.go @@ -0,0 +1,137 @@ +package schema + +//go:generate go-bindata -pkg schema -nometadata data + +import ( + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" +) + +const ( + defaultVersion = "1.0" + versionField = "version" +) + +type portsFormatChecker struct{} + +func (checker portsFormatChecker) IsFormat(input string) bool { + // TODO: implement this + return true +} + +type durationFormatChecker struct{} + +func (checker durationFormatChecker) IsFormat(input string) bool { + _, err := time.ParseDuration(input) + return err == nil +} + +func init() { + gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{}) +} + +// Version returns the version of the config, defaulting to version 1.0 +func Version(config map[string]interface{}) string { + version, ok := config[versionField] + if !ok { + return defaultVersion + } + return normalizeVersion(fmt.Sprintf("%v", version)) +} + +func normalizeVersion(version string) string { + switch version { + case "3": + return "3.0" + default: + return version + } +} + +// Validate uses the jsonschema to validate the configuration +func Validate(config map[string]interface{}, version string) error { + schemaData, err := Asset(fmt.Sprintf("data/config_schema_v%s.json", version)) + if err != nil { + return errors.Errorf("unsupported Compose file version: %s", version) + } + + schemaLoader := gojsonschema.NewStringLoader(string(schemaData)) + dataLoader := gojsonschema.NewGoLoader(config) + + result, err := gojsonschema.Validate(schemaLoader, dataLoader) + if err != nil { + return err + } + + if !result.Valid() { + return toError(result) + } + + return nil +} + +func toError(result *gojsonschema.Result) error { + err := getMostSpecificError(result.Errors()) + description := getDescription(err) + return fmt.Errorf("%s %s", err.Field(), description) +} + +func getDescription(err gojsonschema.ResultError) string { + if err.Type() == "invalid_type" { + if expectedType, ok := err.Details()["expected"].(string); ok { + return fmt.Sprintf("must be a %s", humanReadableType(expectedType)) + } + } + + return err.Description() +} + +func humanReadableType(definition string) string { + if definition[0:1] == "[" { + allTypes := strings.Split(definition[1:len(definition)-1], ",") + for i, t := range allTypes { + allTypes[i] = humanReadableType(t) + } + return fmt.Sprintf( + "%s or %s", + strings.Join(allTypes[0:len(allTypes)-1], ", "), + allTypes[len(allTypes)-1], + ) + } + if definition == "object" { + return "mapping" + } + if definition == "array" { + return "list" + } + return definition +} + +func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError { + var mostSpecificError gojsonschema.ResultError + + for _, err := range errors { + if mostSpecificError == nil { + mostSpecificError = err + } else if specificity(err) > specificity(mostSpecificError) { + mostSpecificError = err + } else if specificity(err) == specificity(mostSpecificError) { + // Invalid type errors win in a tie-breaker for most specific field name + if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" { + mostSpecificError = err + } + } + } + + return mostSpecificError +} + +func specificity(err gojsonschema.ResultError) int { + return len(strings.Split(err.Field(), ".")) +} diff --git a/vendor/github.com/moby/moby/cli/compose/schema/schema_test.go b/vendor/github.com/moby/moby/cli/compose/schema/schema_test.go new file mode 100644 index 0000000..0935d40 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/schema/schema_test.go @@ -0,0 +1,52 @@ +package schema + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type dict map[string]interface{} + +func TestValidate(t *testing.T) { + config := dict{ + "version": "3.0", + "services": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + assert.NoError(t, Validate(config, "3.0")) +} + +func TestValidateUndefinedTopLevelOption(t *testing.T) { + config := dict{ + "version": "3.0", + "helicopters": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + err := Validate(config, "3.0") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Additional property helicopters is not allowed") +} + +func TestValidateInvalidVersion(t *testing.T) { + config := dict{ + "version": "2.1", + "services": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + err := Validate(config, "2.1") + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported Compose file version: 2.1") +} diff --git a/vendor/github.com/moby/moby/cli/compose/template/template.go b/vendor/github.com/moby/moby/cli/compose/template/template.go new file mode 100644 index 0000000..28495ba --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/template/template.go @@ -0,0 +1,100 @@ +package template + +import ( + "fmt" + "regexp" + "strings" +) + +var delimiter = "\\$" +var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?" + +var patternString = fmt.Sprintf( + "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", + delimiter, delimiter, substitution, substitution, +) + +var pattern = regexp.MustCompile(patternString) + +// InvalidTemplateError is returned when a variable template is not in a valid +// format +type InvalidTemplateError struct { + Template string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("Invalid template: %#v", e.Template) +} + +// Mapping is a user-supplied function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type Mapping func(string) (string, bool) + +// Substitute variables in the string with their values +func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) { + result = pattern.ReplaceAllStringFunc(template, func(substring string) string { + matches := pattern.FindStringSubmatch(substring) + groups := make(map[string]string) + for i, name := range pattern.SubexpNames() { + if i != 0 { + groups[name] = matches[i] + } + } + + substitution := groups["named"] + if substitution == "" { + substitution = groups["braced"] + } + if substitution != "" { + // Soft default (fall back if unset or empty) + if strings.Contains(substitution, ":-") { + name, defaultValue := partition(substitution, ":-") + value, ok := mapping(name) + if !ok || value == "" { + return defaultValue + } + return value + } + + // Hard default (fall back if-and-only-if empty) + if strings.Contains(substitution, "-") { + name, defaultValue := partition(substitution, "-") + value, ok := mapping(name) + if !ok { + return defaultValue + } + return value + } + + // No default (fall back to empty string) + value, ok := mapping(substitution) + if !ok { + return "" + } + return value + } + + if escaped := groups["escaped"]; escaped != "" { + return escaped + } + + err = &InvalidTemplateError{Template: template} + return "" + }) + + return result, err +} + +// Split the string at the first occurrence of sep, and return the part before the separator, +// and the part after the separator. +// +// If the separator is not found, return the string itself, followed by an empty string. +func partition(s, sep string) (string, string) { + if strings.Contains(s, sep) { + parts := strings.SplitN(s, sep, 2) + return parts[0], parts[1] + } + return s, "" +} diff --git a/vendor/github.com/moby/moby/cli/compose/template/template_test.go b/vendor/github.com/moby/moby/cli/compose/template/template_test.go new file mode 100644 index 0000000..6b81bf0 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/template/template_test.go @@ -0,0 +1,83 @@ +package template + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var defaults = map[string]string{ + "FOO": "first", + "BAR": "", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestEscaped(t *testing.T) { + result, err := Substitute("$${foo}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "${foo}", result) +} + +func TestInvalid(t *testing.T) { + invalidTemplates := []string{ + "${", + "$}", + "${}", + "${ }", + "${ foo}", + "${foo }", + "${foo!}", + } + + for _, template := range invalidTemplates { + _, err := Substitute(template, defaultMapping) + assert.Error(t, err) + assert.IsType(t, &InvalidTemplateError{}, err) + } +} + +func TestNoValueNoDefault(t *testing.T) { + for _, template := range []string{"This ${missing} var", "This ${BAR} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This var", result) + } +} + +func TestValueNoDefault(t *testing.T) { + for _, template := range []string{"This $FOO var", "This ${FOO} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This first var", result) + } +} + +func TestNoValueWithDefault(t *testing.T) { + for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) + } +} + +func TestEmptyValueWithSoftDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) +} + +func TestEmptyValueWithHardDefault(t *testing.T) { + result, err := Substitute("ok ${BAR-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok ", result) +} + +func TestNonAlphanumericDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok /non:-alphanumeric", result) +} diff --git a/vendor/github.com/moby/moby/cli/compose/types/types.go b/vendor/github.com/moby/moby/cli/compose/types/types.go new file mode 100644 index 0000000..d1371d2 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/compose/types/types.go @@ -0,0 +1,253 @@ +package types + +import ( + "time" +) + +// UnsupportedProperties not yet supported by this implementation of the compose file +var UnsupportedProperties = []string{ + "build", + "cap_add", + "cap_drop", + "cgroup_parent", + "devices", + "dns", + "dns_search", + "domainname", + "external_links", + "ipc", + "links", + "mac_address", + "network_mode", + "privileged", + "read_only", + "restart", + "security_opt", + "shm_size", + "stop_signal", + "sysctls", + "tmpfs", + "userns_mode", +} + +// DeprecatedProperties that were removed from the v3 format, but their +// use should not impact the behaviour of the application. +var DeprecatedProperties = map[string]string{ + "container_name": "Setting the container name is not supported.", + "expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.", +} + +// ForbiddenProperties that are not supported in this implementation of the +// compose file. +var ForbiddenProperties = map[string]string{ + "extends": "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.", + "volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", + "volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.", + "cpu_quota": "Set resource limits using deploy.resources", + "cpu_shares": "Set resource limits using deploy.resources", + "cpuset": "Set resource limits using deploy.resources", + "mem_limit": "Set resource limits using deploy.resources", + "memswap_limit": "Set resource limits using deploy.resources", +} + +// Dict is a mapping of strings to interface{} +type Dict map[string]interface{} + +// ConfigFile is a filename and the contents of the file as a Dict +type ConfigFile struct { + Filename string + Config Dict +} + +// ConfigDetails are the details about a group of ConfigFiles +type ConfigDetails struct { + WorkingDir string + ConfigFiles []ConfigFile + Environment map[string]string +} + +// Config is a full compose file configuration +type Config struct { + Services []ServiceConfig + Networks map[string]NetworkConfig + Volumes map[string]VolumeConfig + Secrets map[string]SecretConfig +} + +// ServiceConfig is the configuration of one service +type ServiceConfig struct { + Name string + + CapAdd []string `mapstructure:"cap_add"` + CapDrop []string `mapstructure:"cap_drop"` + CgroupParent string `mapstructure:"cgroup_parent"` + Command []string `compose:"shell_command"` + ContainerName string `mapstructure:"container_name"` + DependsOn []string `mapstructure:"depends_on"` + Deploy DeployConfig + Devices []string + DNS []string `compose:"string_or_list"` + DNSSearch []string `mapstructure:"dns_search" compose:"string_or_list"` + DomainName string `mapstructure:"domainname"` + Entrypoint []string `compose:"shell_command"` + Environment map[string]string `compose:"list_or_dict_equals"` + Expose []string `compose:"list_of_strings_or_numbers"` + ExternalLinks []string `mapstructure:"external_links"` + ExtraHosts map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"` + Hostname string + HealthCheck *HealthCheckConfig + Image string + Ipc string + Labels map[string]string `compose:"list_or_dict_equals"` + Links []string + Logging *LoggingConfig + MacAddress string `mapstructure:"mac_address"` + NetworkMode string `mapstructure:"network_mode"` + Networks map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"` + Pid string + Ports []string `compose:"list_of_strings_or_numbers"` + Privileged bool + ReadOnly bool `mapstructure:"read_only"` + Restart string + Secrets []ServiceSecretConfig + SecurityOpt []string `mapstructure:"security_opt"` + StdinOpen bool `mapstructure:"stdin_open"` + StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"` + StopSignal string `mapstructure:"stop_signal"` + Tmpfs []string `compose:"string_or_list"` + Tty bool `mapstructure:"tty"` + Ulimits map[string]*UlimitsConfig + User string + Volumes []string + WorkingDir string `mapstructure:"working_dir"` +} + +// LoggingConfig the logging configuration for a service +type LoggingConfig struct { + Driver string + Options map[string]string +} + +// DeployConfig the deployment configuration for a service +type DeployConfig struct { + Mode string + Replicas *uint64 + Labels map[string]string `compose:"list_or_dict_equals"` + UpdateConfig *UpdateConfig `mapstructure:"update_config"` + Resources Resources + RestartPolicy *RestartPolicy `mapstructure:"restart_policy"` + Placement Placement +} + +// HealthCheckConfig the healthcheck configuration for a service +type HealthCheckConfig struct { + Test []string `compose:"healthcheck"` + Timeout string + Interval string + Retries *uint64 + Disable bool +} + +// UpdateConfig the service update configuration +type UpdateConfig struct { + Parallelism *uint64 + Delay time.Duration + FailureAction string `mapstructure:"failure_action"` + Monitor time.Duration + MaxFailureRatio float32 `mapstructure:"max_failure_ratio"` +} + +// Resources the resource limits and reservations +type Resources struct { + Limits *Resource + Reservations *Resource +} + +// Resource is a resource to be limited or reserved +type Resource struct { + // TODO: types to convert from units and ratios + NanoCPUs string `mapstructure:"cpus"` + MemoryBytes UnitBytes `mapstructure:"memory"` +} + +// UnitBytes is the bytes type +type UnitBytes int64 + +// RestartPolicy the service restart policy +type RestartPolicy struct { + Condition string + Delay *time.Duration + MaxAttempts *uint64 `mapstructure:"max_attempts"` + Window *time.Duration +} + +// Placement constraints for the service +type Placement struct { + Constraints []string +} + +// ServiceNetworkConfig is the network configuration for a service +type ServiceNetworkConfig struct { + Aliases []string + Ipv4Address string `mapstructure:"ipv4_address"` + Ipv6Address string `mapstructure:"ipv6_address"` +} + +// ServiceSecretConfig is the secret configuration for a service +type ServiceSecretConfig struct { + Source string + Target string + UID string + GID string + Mode *uint32 +} + +// UlimitsConfig the ulimit configuration +type UlimitsConfig struct { + Single int + Soft int + Hard int +} + +// NetworkConfig for a network +type NetworkConfig struct { + Driver string + DriverOpts map[string]string `mapstructure:"driver_opts"` + Ipam IPAMConfig + External External + Internal bool + Labels map[string]string `compose:"list_or_dict_equals"` +} + +// IPAMConfig for a network +type IPAMConfig struct { + Driver string + Config []*IPAMPool +} + +// IPAMPool for a network +type IPAMPool struct { + Subnet string +} + +// VolumeConfig for a volume +type VolumeConfig struct { + Driver string + DriverOpts map[string]string `mapstructure:"driver_opts"` + External External + Labels map[string]string `compose:"list_or_dict_equals"` +} + +// External identifies a Volume or Network as a reference to a resource that is +// not managed, and should already exist. +type External struct { + Name string + External bool +} + +// SecretConfig for a secret +type SecretConfig struct { + File string + External External + Labels map[string]string `compose:"list_or_dict_equals"` +} diff --git a/vendor/github.com/moby/moby/cli/error.go b/vendor/github.com/moby/moby/cli/error.go new file mode 100644 index 0000000..62f6243 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/moby/moby/cli/flags/client.go b/vendor/github.com/moby/moby/cli/flags/client.go new file mode 100644 index 0000000..9b6940f --- /dev/null +++ b/vendor/github.com/moby/moby/cli/flags/client.go @@ -0,0 +1,13 @@ +package flags + +// ClientOptions are the options used to configure the client cli +type ClientOptions struct { + Common *CommonOptions + ConfigDir string + Version bool +} + +// NewClientOptions returns a new ClientOptions +func NewClientOptions() *ClientOptions { + return &ClientOptions{Common: NewCommonOptions()} +} diff --git a/vendor/github.com/moby/moby/cli/flags/common.go b/vendor/github.com/moby/moby/cli/flags/common.go new file mode 100644 index 0000000..e2f9da0 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/flags/common.go @@ -0,0 +1,120 @@ +package flags + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + // DefaultTrustKeyFile is the default filename for the trust key + DefaultTrustKeyFile = "key.json" + // DefaultCaFile is the default filename for the CA pem file + DefaultCaFile = "ca.pem" + // DefaultKeyFile is the default filename for the key pem file + DefaultKeyFile = "key.pem" + // DefaultCertFile is the default filename for the cert pem file + DefaultCertFile = "cert.pem" + // FlagTLSVerify is the flag name for the tls verification option + FlagTLSVerify = "tlsverify" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +// CommonOptions are options common to both the client and the daemon. +type CommonOptions struct { + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + TrustKey string +} + +// NewCommonOptions returns a new CommonOptions +func NewCommonOptions() *CommonOptions { + return &CommonOptions{} +} + +// InstallFlags adds flags for the common options on the FlagSet +func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } + + flags.BoolVarP(&commonOpts.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", "Set the logging level (\"debug\", \"info\", \"warn\", \"error\", \"fatal\")") + flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify") + flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") + + commonOpts.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } + tlsOptions := commonOpts.TLSOptions + flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") + flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") + flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + + hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, opts.ValidateHost) + flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") +} + +// SetDefaultOptions sets default values for options after flag parsing is +// complete +func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) { + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need + // to check that here as well + if flags.Changed(FlagTLSVerify) || commonOpts.TLSVerify { + commonOpts.TLS = true + } + + if !commonOpts.TLS { + commonOpts.TLSOptions = nil + } else { + tlsOptions := commonOpts.TLSOptions + tlsOptions.InsecureSkipVerify = !commonOpts.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !flags.Changed("tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !flags.Changed("tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +// SetLogLevel sets the logrus logging level +func SetLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/moby/moby/cli/flags/common_test.go b/vendor/github.com/moby/moby/cli/flags/common_test.go new file mode 100644 index 0000000..81eaa38 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/flags/common_test.go @@ -0,0 +1,42 @@ +package flags + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/pflag" +) + +func TestCommonOptionsInstallFlags(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{ + "--tlscacert=\"/foo/cafile\"", + "--tlscert=\"/foo/cert\"", + "--tlskey=\"/foo/key\"", + }) + assert.NilError(t, err) + assert.Equal(t, opts.TLSOptions.CAFile, "/foo/cafile") + assert.Equal(t, opts.TLSOptions.CertFile, "/foo/cert") + assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") +} + +func defaultPath(filename string) string { + return filepath.Join(cliconfig.ConfigDir(), filename) +} + +func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{}) + assert.NilError(t, err) + assert.Equal(t, opts.TLSOptions.CAFile, defaultPath("ca.pem")) + assert.Equal(t, opts.TLSOptions.CertFile, defaultPath("cert.pem")) + assert.Equal(t, opts.TLSOptions.KeyFile, defaultPath("key.pem")) +} diff --git a/vendor/github.com/moby/moby/cli/required.go b/vendor/github.com/moby/moby/cli/required.go new file mode 100644 index 0000000..8ee02c8 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/required.go @@ -0,0 +1,96 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return fmt.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return fmt.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} + +// RequiresMinArgs returns an error if there is not at least min args +func RequiresMinArgs(min int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresMaxArgs returns an error if there is not at most max args +func RequiresMaxArgs(max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresRangeArgs returns an error if there is not at least min args and at most max args +func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min && len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// ExactArgs returns an error if there is not the exact number of args +func ExactArgs(number int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) == number { + return nil + } + return fmt.Errorf( + "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + number, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} diff --git a/vendor/github.com/moby/moby/cli/trust/trust.go b/vendor/github.com/moby/moby/cli/trust/trust.go new file mode 100644 index 0000000..51914f7 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/trust/trust.go @@ -0,0 +1,232 @@ +package trust + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/notary" + "github.com/docker/notary/client" + "github.com/docker/notary/passphrase" + "github.com/docker/notary/storage" + "github.com/docker/notary/trustmanager" + "github.com/docker/notary/trustpinning" + "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/signed" +) + +var ( + // ReleasesRole is the role named "releases" + ReleasesRole = path.Join(data.CanonicalTargetsRole, "releases") +) + +func trustDirectory() string { + return filepath.Join(cliconfig.ConfigDir(), "trust") +} + +// certificateDirectory returns the directory containing +// TLS certificates for the given server. An error is +// returned if there was an error parsing the server string. +func certificateDirectory(server string) (string, error) { + u, err := url.Parse(server) + if err != nil { + return "", err + } + + return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil +} + +// Server returns the base URL for the trust server. +func Server(index *registrytypes.IndexInfo) (string, error) { + if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { + urlObj, err := url.Parse(s) + if err != nil || urlObj.Scheme != "https" { + return "", fmt.Errorf("valid https URL required for trust server, got %s", s) + } + + return s, nil + } + if index.Official { + return registry.NotaryServer, nil + } + return "https://" + index.Name, nil +} + +type simpleCredentialStore struct { + auth types.AuthConfig +} + +func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { + return scs.auth.Username, scs.auth.Password +} + +func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { + return scs.auth.IdentityToken +} + +func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// GetNotaryRepository returns a NotaryRepository which stores all the +// information needed to operate on a notary repository. +// It creates an HTTP transport providing authentication support. +func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { + server, err := Server(repoInfo.Index) + if err != nil { + return nil, err + } + + var cfg = tlsconfig.ClientDefault() + cfg.InsecureSkipVerify = !repoInfo.Index.Secure + + // Get certificate base directory + certDir, err := certificateDirectory(server) + if err != nil { + return nil, err + } + logrus.Debugf("reading certificate directory: %s", certDir) + + if err := registry.ReadCertsDirectory(cfg, certDir); err != nil { + return nil, err + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + DisableKeepAlives: true, + } + + // Skip configuration headers since request is not going to Docker daemon + modifiers := registry.DockerHeaders(command.UserAgent(), http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := server + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + + challengeManager := challenge.NewSimpleManager() + + resp, err := pingClient.Do(req) + if err != nil { + // Ignore error on ping to operate in offline mode + logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) + } else { + defer resp.Body.Close() + + // Add response to the challenge manager to parse out + // authentication header and register authentication method + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + } + + scope := auth.RepositoryScope{ + Repository: repoInfo.FullName(), + Actions: actions, + Class: repoInfo.Class, + } + creds := simpleCredentialStore{auth: authConfig} + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) + tr := transport.NewTransport(base, modifiers...) + + return client.NewNotaryRepository( + trustDirectory(), + repoInfo.FullName(), + server, + tr, + getPassphraseRetriever(streams), + trustpinning.TrustPinConfig{}) +} + +func getPassphraseRetriever(streams command.Streams) notary.PassRetriever { + aliasMap := map[string]string{ + "root": "root", + "snapshot": "repository", + "targets": "repository", + "default": "repository", + } + baseRetriever := passphrase.PromptRetrieverWithInOut(streams.In(), streams.Out(), aliasMap) + env := map[string]string{ + "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + } + + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { + if v := env[alias]; v != "" { + return v, numAttempts > 1, nil + } + // For non-root roles, we can also try the "default" alias if it is specified + if v := env["default"]; v != "" && alias != data.CanonicalRootRole { + return v, numAttempts > 1, nil + } + return baseRetriever(keyName, alias, createNew, numAttempts) + } +} + +// NotaryError formats an error message received from the notary service +func NotaryError(repoName string, err error) error { + switch err.(type) { + case *json.SyntaxError: + logrus.Debugf("Notary syntax error: %s", err) + return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) + case signed.ErrExpired: + return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) + case trustmanager.ErrKeyNotFound: + return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) + case storage.NetworkError: + return fmt.Errorf("Error: error contacting notary server: %v", err) + case storage.ErrMetaNotFound: + return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) + case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType: + return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) + case signed.ErrNoKeys: + return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) + case signed.ErrLowVersion: + return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) + case signed.ErrRoleThreshold: + return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) + case client.ErrRepositoryNotExist: + return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) + case signed.ErrInsufficientSignatures: + return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) + } + + return err +} diff --git a/vendor/github.com/moby/moby/cliconfig/config.go b/vendor/github.com/moby/moby/cliconfig/config.go new file mode 100644 index 0000000..d81bf86 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/config.go @@ -0,0 +1,120 @@ +package cliconfig + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/pkg/homedir" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") +) + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} + +// ConfigDir returns the directory the configuration file is stored in +func ConfigDir() string { + return configDir +} + +// SetConfigDir sets the directory the configuration file is stored in +func SetConfigDir(dir string) { + configDir = dir +} + +// NewConfigFile initializes an empty configuration file for the given filename 'fn' +func NewConfigFile(fn string) *configfile.ConfigFile { + return &configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + } +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + if configDir == "" { + configDir = ConfigDir() + } + + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + Filename: filepath.Join(configDir, ConfigFileName), + } + + // Try happy path first - latest config file + if _, err := os.Stat(configFile.Filename); err == nil { + file, err := os.Open(configFile.Filename) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + } + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = fmt.Errorf("%s - %v", configFile.Filename, err) + } + return &configFile, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + } + + // Can't find latest config file so check for the old one + confFile := filepath.Join(homedir.Get(), oldConfigfile) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + file, err := os.Open(confFile) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", confFile, err) + } + defer file.Close() + err = configFile.LegacyLoadFromReader(file) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", confFile, err) + } + + if configFile.HTTPHeaders == nil { + configFile.HTTPHeaders = map[string]string{} + } + return &configFile, nil +} diff --git a/vendor/github.com/moby/moby/cliconfig/config_test.go b/vendor/github.com/moby/moby/cliconfig/config_test.go new file mode 100644 index 0000000..d8a099a --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/config_test.go @@ -0,0 +1,621 @@ +package cliconfig + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/pkg/homedir" +) + +func TestEmptyConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + SetConfigDir(tmpHome) + + config, err := Load("") + if err != nil { + t.Fatalf("Failed loading on empty config dir: %q", err) + } + + expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) + if config.Filename != expectedConfigFilename { + t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestMissingFile(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestSaveFileToDirs(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + tmpHome += "/.docker" + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestEmptyFile(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { + t.Fatal(err) + } + + _, err = Load(tmpHome) + if err == nil { + t.Fatalf("Was supposed to fail") + } +} + +func TestEmptyJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestOldInvalidsAuth(t *testing.T) { + invalids := map[string]string{ + `username = test`: "The Auth config file is empty", + `username +password`: "Invalid Auth config file", + `username = test +email`: "Invalid auth configuration file", + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + for content, expectedError := range invalids { + fn := filepath.Join(tmpHome, oldConfigfile) + if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + // Use Contains instead of == since the file name will change each time + if err == nil || !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Should have failed\nConfig: %v\nGot: %v\nExpected: %v", config, err, expectedError) + } + + } +} + +func TestOldValidAuth(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `username = am9lam9lOmhlbGxv + email = user@example.com` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatal(err) + } + + // defaultIndexserver is https://index.docker.io/v1/ + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestOldJSONInvalid(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + // Use Contains instead of == since the file name will change each time + if err == nil || !strings.Contains(err.Error(), "Invalid auth configuration file") { + t.Fatalf("Expected an error got : %v, %v", config, err) + } +} + +func TestOldJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv", + "email": "user@example.com" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n'%s'\n not \n'%s'\n", configStr, expConfStr) + } +} + +func TestNewJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestNewJSONNoEmail(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestJSONWithPsFormat(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"psFormat":`) || + !strings.Contains(configStr, "{{.ID}}") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +func TestJSONWithCredentialStore(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "credsStore": "crazy-secure-storage" +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.CredentialsStore != "crazy-secure-storage" { + t.Fatalf("Unknown credential store: %s\n", config.CredentialsStore) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"credsStore":`) || + !strings.Contains(configStr, "crazy-secure-storage") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +func TestJSONWithCredentialHelpers(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "credHelpers": { "images.io": "images-io", "containers.com": "crazy-secure-storage" } +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.CredentialHelpers == nil { + t.Fatal("config.CredentialHelpers was nil") + } else if config.CredentialHelpers["images.io"] != "images-io" || + config.CredentialHelpers["containers.com"] != "crazy-secure-storage" { + t.Fatalf("Credential helpers not deserialized properly: %v\n", config.CredentialHelpers) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"credHelpers":`) || + !strings.Contains(configStr, "images.io") || + !strings.Contains(configStr, "images-io") || + !strings.Contains(configStr, "containers.com") || + !strings.Contains(configStr, "crazy-secure-storage") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +// Save it and make sure it shows up in new form +func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string { + if err := config.Save(); err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } + return string(buf) +} + +func TestConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + if ConfigDir() == tmpHome { + t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) + } + + // Update configDir + SetConfigDir(tmpHome) + + if ConfigDir() != tmpHome { + t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) + } +} + +func TestConfigFile(t *testing.T) { + configFilename := "configFilename" + configFile := NewConfigFile(configFilename) + + if configFile.Filename != configFilename { + t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename) + } +} + +func TestJSONReaderNoFile(t *testing.T) { + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` + + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + +} + +func TestOldJSONReaderNoFile(t *testing.T) { + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + + config, err := LegacyLoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } +} + +func TestJSONWithPsFormatNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + +} + +func TestJSONSaveWithNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + + err = config.SaveToWriter(f) + if err != nil { + t.Fatalf("Failed saving to file: %q", err) + } + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + if string(buf) != expConfStr { + t.Fatalf("Should have save in new form: \n%s\nnot \n%s", string(buf), expConfStr) + } +} + +func TestLegacyJSONSaveWithNoFile(t *testing.T) { + + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + config, err := LegacyLoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + + if err = config.SaveToWriter(f); err != nil { + t.Fatalf("Failed saving to file: %q", err) + } + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv", + "email": "user@example.com" + } + } +}` + + if string(buf) != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", string(buf), expConfStr) + } +} diff --git a/vendor/github.com/moby/moby/cliconfig/configfile/file.go b/vendor/github.com/moby/moby/cliconfig/configfile/file.go new file mode 100644 index 0000000..3909713 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/configfile/file.go @@ -0,0 +1,183 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +const ( + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexserver = "https://index.docker.io/v1/" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` +} + +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return fmt.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexserver + configFile.AuthConfigs[defaultIndexserver] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + if configFile.Filename == "" { + return fmt.Errorf("Can't save config with empty filename") + } + + if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { + return err + } + f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + return configFile.SaveToWriter(f) +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} diff --git a/vendor/github.com/moby/moby/cliconfig/configfile/file_test.go b/vendor/github.com/moby/moby/cliconfig/configfile/file_test.go new file mode 100644 index 0000000..435797f --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/configfile/file_test.go @@ -0,0 +1,27 @@ +package configfile + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &types.AuthConfig{Username: "ken", Password: "test"} + authStr := encodeAuth(newAuthConfig) + decAuthConfig := &types.AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/credentials.go b/vendor/github.com/moby/moby/cliconfig/credentials/credentials.go new file mode 100644 index 0000000..ca874ca --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/docker/api/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/default_store.go b/vendor/github.com/moby/moby/cliconfig/credentials/default_store.go new file mode 100644 index 0000000..b473370 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import ( + "os/exec" + + "github.com/docker/docker/cliconfig/configfile" +) + +// DetectDefaultStore sets the default credentials store +// if the host includes the default store helper program. +func DetectDefaultStore(c *configfile.ConfigFile) { + if c.CredentialsStore != "" { + // user defined + return + } + + if defaultCredentialsStore != "" { + if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { + c.CredentialsStore = defaultCredentialsStore + } + } +} diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_darwin.go new file mode 100644 index 0000000..63e8ed4 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_darwin.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/default_store_linux.go b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_linux.go new file mode 100644 index 0000000..864c540 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_linux.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_unsupported.go new file mode 100644 index 0000000..519ef53 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_unsupported.go @@ -0,0 +1,5 @@ +// +build !windows,!darwin,!linux + +package credentials + +const defaultCredentialsStore = "" diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/default_store_windows.go b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_windows.go new file mode 100644 index 0000000..fb6a974 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/default_store_windows.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "wincred" diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/file_store.go b/vendor/github.com/moby/moby/cliconfig/credentials/file_store.go new file mode 100644 index 0000000..ca73a38 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/file_store.go @@ -0,0 +1,53 @@ +package credentials + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/registry" +) + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file *configfile.ConfigFile +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file *configfile.ConfigFile) Store { + return &fileStore{ + file: file, + } +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.AuthConfigs, serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.AuthConfigs[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.AuthConfigs { + if serverAddress == registry.ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.AuthConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.AuthConfigs[authConfig.ServerAddress] = authConfig + return c.file.Save() +} diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/file_store_test.go b/vendor/github.com/moby/moby/cliconfig/credentials/file_store_test.go new file mode 100644 index 0000000..efed4e9 --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/file_store_test.go @@ -0,0 +1,139 @@ +package credentials + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/configfile" +) + +func newConfigFile(auths map[string]types.AuthConfig) *configfile.ConfigFile { + tmp, _ := ioutil.TempFile("", "docker-test") + name := tmp.Name() + tmp.Close() + + c := cliconfig.NewConfigFile(name) + c.AuthConfigs = auths + return c +} + +func TestFileStoreAddCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + + s := NewFileStore(f) + err := s.Store(types.AuthConfig{ + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }) + + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 1 { + t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) + } + + a, ok := f.AuthConfigs["https://example.com"] + if !ok { + t.Fatalf("expected auth for https://example.com, got %v", f.AuthConfigs) + } + if a.Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestFileStoreGet(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + "https://example.com": { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + }) + + s := NewFileStore(f) + a, err := s.Get("https://example.com") + if err != nil { + t.Fatal(err) + } + if a.Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestFileStoreGetAll(t *testing.T) { + s1 := "https://example.com" + s2 := "https://example2.com" + f := newConfigFile(map[string]types.AuthConfig{ + s1: { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + s2: { + Auth: "super_secret_token2", + Email: "foo@example2.com", + ServerAddress: "https://example2.com", + }, + }) + + s := NewFileStore(f) + as, err := s.GetAll() + if err != nil { + t.Fatal(err) + } + if len(as) != 2 { + t.Fatalf("wanted 2, got %d", len(as)) + } + if as[s1].Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", as[s1].Auth) + } + if as[s1].Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", as[s1].Email) + } + if as[s2].Auth != "super_secret_token2" { + t.Fatalf("expected auth `super_secret_token2`, got %s", as[s2].Auth) + } + if as[s2].Email != "foo@example2.com" { + t.Fatalf("expected email `foo@example2.com`, got %s", as[s2].Email) + } +} + +func TestFileStoreErase(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + "https://example.com": { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + }) + + s := NewFileStore(f) + err := s.Erase("https://example.com") + if err != nil { + t.Fatal(err) + } + + // file store never returns errors, check that the auth config is empty + a, err := s.Get("https://example.com") + if err != nil { + t.Fatal(err) + } + + if a.Auth != "" { + t.Fatalf("expected empty auth token, got %s", a.Auth) + } + if a.Email != "" { + t.Fatalf("expected empty email, got %s", a.Email) + } +} diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/native_store.go b/vendor/github.com/moby/moby/cliconfig/credentials/native_store.go new file mode 100644 index 0000000..dec2dbc --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/native_store.go @@ -0,0 +1,144 @@ +package credentials + +import ( + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file *configfile.ConfigFile, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac, _ := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keyckain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/moby/moby/cliconfig/credentials/native_store_test.go b/vendor/github.com/moby/moby/cliconfig/credentials/native_store_test.go new file mode 100644 index 0000000..7664faf --- /dev/null +++ b/vendor/github.com/moby/moby/cliconfig/credentials/native_store_test.go @@ -0,0 +1,355 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/api/types" +) + +const ( + validServerAddress = "https://index.docker.io/v1" + validServerAddress2 = "https://example.com:5002" + invalidServerAddress = "https://foobar.example.com" + missingCredsAddress = "https://missing.docker.io/v1" +) + +var errCommandExited = fmt.Errorf("exited 1") + +// mockCommand simulates interactions between the docker client and a remote +// credentials helper. +// Unit tests inject this mocked command into the remote to control execution. +type mockCommand struct { + arg string + input io.Reader +} + +// Output returns responses from the remote credentials helper. +// It mocks those responses based in the input in the mock. +func (m *mockCommand) Output() ([]byte, error) { + in, err := ioutil.ReadAll(m.input) + if err != nil { + return nil, err + } + inS := string(in) + + switch m.arg { + case "erase": + switch inS { + case validServerAddress: + return nil, nil + default: + return []byte("program failed"), errCommandExited + } + case "get": + switch inS { + case validServerAddress: + return []byte(`{"Username": "foo", "Secret": "bar"}`), nil + case validServerAddress2: + return []byte(`{"Username": "", "Secret": "abcd1234"}`), nil + case missingCredsAddress: + return []byte(credentials.NewErrCredentialsNotFound().Error()), errCommandExited + case invalidServerAddress: + return []byte("program failed"), errCommandExited + } + case "store": + var c credentials.Credentials + err := json.NewDecoder(strings.NewReader(inS)).Decode(&c) + if err != nil { + return []byte("program failed"), errCommandExited + } + switch c.ServerURL { + case validServerAddress: + return nil, nil + default: + return []byte("program failed"), errCommandExited + } + case "list": + return []byte(fmt.Sprintf(`{"%s": "%s", "%s": "%s"}`, validServerAddress, "foo", validServerAddress2, "")), nil + } + + return []byte(fmt.Sprintf("unknown argument %q with %q", m.arg, inS)), errCommandExited +} + +// Input sets the input to send to a remote credentials helper. +func (m *mockCommand) Input(in io.Reader) { + m.input = in +} + +func mockCommandFn(args ...string) client.Program { + return &mockCommand{ + arg: args[0], + } +} + +func TestNativeStoreAddCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Store(types.AuthConfig{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + ServerAddress: validServerAddress, + }) + + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 1 { + t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) + } + + a, ok := f.AuthConfigs[validServerAddress] + if !ok { + t.Fatalf("expected auth for %s, got %v", validServerAddress, f.AuthConfigs) + } + if a.Auth != "" { + t.Fatalf("expected auth to be empty, got %s", a.Auth) + } + if a.Username != "" { + t.Fatalf("expected username to be empty, got %s", a.Username) + } + if a.Password != "" { + t.Fatalf("expected password to be empty, got %s", a.Password) + } + if a.IdentityToken != "" { + t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestNativeStoreAddInvalidCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Store(types.AuthConfig{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + ServerAddress: invalidServerAddress, + }) + + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } + + if len(f.AuthConfigs) != 0 { + t.Fatalf("expected 0 auth config, got %d", len(f.AuthConfigs)) + } +} + +func TestNativeStoreGet(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + a, err := s.Get(validServerAddress) + if err != nil { + t.Fatal(err) + } + + if a.Username != "foo" { + t.Fatalf("expected username `foo`, got %s", a.Username) + } + if a.Password != "bar" { + t.Fatalf("expected password `bar`, got %s", a.Password) + } + if a.IdentityToken != "" { + t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestNativeStoreGetIdentityToken(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress2: { + Email: "foo@example2.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + a, err := s.Get(validServerAddress2) + if err != nil { + t.Fatal(err) + } + + if a.Username != "" { + t.Fatalf("expected username to be empty, got %s", a.Username) + } + if a.Password != "" { + t.Fatalf("expected password to be empty, got %s", a.Password) + } + if a.IdentityToken != "abcd1234" { + t.Fatalf("expected identity token `abcd1234`, got %s", a.IdentityToken) + } + if a.Email != "foo@example2.com" { + t.Fatalf("expected email `foo@example2.com`, got %s", a.Email) + } +} + +func TestNativeStoreGetAll(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + as, err := s.GetAll() + if err != nil { + t.Fatal(err) + } + + if len(as) != 2 { + t.Fatalf("wanted 2, got %d", len(as)) + } + + if as[validServerAddress].Username != "foo" { + t.Fatalf("expected username `foo` for %s, got %s", validServerAddress, as[validServerAddress].Username) + } + if as[validServerAddress].Password != "bar" { + t.Fatalf("expected password `bar` for %s, got %s", validServerAddress, as[validServerAddress].Password) + } + if as[validServerAddress].IdentityToken != "" { + t.Fatalf("expected identity to be empty for %s, got %s", validServerAddress, as[validServerAddress].IdentityToken) + } + if as[validServerAddress].Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com` for %s, got %s", validServerAddress, as[validServerAddress].Email) + } + if as[validServerAddress2].Username != "" { + t.Fatalf("expected username to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Username) + } + if as[validServerAddress2].Password != "" { + t.Fatalf("expected password to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Password) + } + if as[validServerAddress2].IdentityToken != "abcd1234" { + t.Fatalf("expected identity token `abcd1324` for %s, got %s", validServerAddress2, as[validServerAddress2].IdentityToken) + } + if as[validServerAddress2].Email != "" { + t.Fatalf("expected no email for %s, got %s", validServerAddress2, as[validServerAddress2].Email) + } +} + +func TestNativeStoreGetMissingCredentials(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + _, err := s.Get(missingCredsAddress) + if err != nil { + // missing credentials do not produce an error + t.Fatal(err) + } +} + +func TestNativeStoreGetInvalidAddress(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + _, err := s.Get(invalidServerAddress) + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } +} + +func TestNativeStoreErase(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Erase(validServerAddress) + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 0 { + t.Fatalf("expected 0 auth configs, got %d", len(f.AuthConfigs)) + } +} + +func TestNativeStoreEraseInvalidAddress(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Erase(invalidServerAddress) + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/client/README.md b/vendor/github.com/moby/moby/client/README.md new file mode 100644 index 0000000..059dfb3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/moby/moby/client/checkpoint_create.go b/vendor/github.com/moby/moby/client/checkpoint_create.go new file mode 100644 index 0000000..0effe49 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_create_test.go b/vendor/github.com/moby/moby/client/checkpoint_create_test.go new file mode 100644 index 0000000..96e5187 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_create_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ + CheckpointID: "noting", + Exit: true, + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointCreate(t *testing.T) { + expectedContainerID := "container_id" + expectedCheckpointID := "checkpoint_id" + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + createOptions := &types.CheckpointCreateOptions{} + if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { + return nil, err + } + + if createOptions.CheckpointID != expectedCheckpointID { + return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) + } + + if !createOptions.Exit { + return nil, fmt.Errorf("expected Exit to be true") + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ + CheckpointID: expectedCheckpointID, + Exit: true, + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_delete.go b/vendor/github.com/moby/moby/client/checkpoint_delete.go new file mode 100644 index 0000000..e6e7558 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_delete_test.go b/vendor/github.com/moby/moby/client/checkpoint_delete_test.go new file mode 100644 index 0000000..a78b050 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_delete_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointDeleteError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointDelete(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints/checkpoint_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_list.go b/vendor/github.com/moby/moby/client/checkpoint_list.go new file mode 100644 index 0000000..8eb720a --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the volumes configured in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_list_test.go b/vendor/github.com/moby/moby/client/checkpoint_list_test.go new file mode 100644 index 0000000..6c90f61 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_list_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointList(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]types.Checkpoint{ + { + Name: "checkpoint", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(checkpoints) != 1 { + t.Fatalf("expected 1 checkpoint, got %v", checkpoints) + } +} diff --git a/vendor/github.com/moby/moby/client/client.go b/vendor/github.com/moby/moby/client/client.go new file mode 100644 index 0000000..50f9fe1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client.go @@ -0,0 +1,246 @@ +/* +Package client is a Go client for the Docker Engine API. + +The "docker" command uses this package to communicate with the daemon. It can also +be used by your own Go applications to do anything the command-line interface does +– running containers, pulling images, managing swarms, etc. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client + +import ( + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// DefaultVersion is the version of the current stable API +const DefaultVersion string = "1.27" + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the tls certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = DefaultVersion + } + + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if os.Getenv("DOCKER_API_VERSION") != "" { + cli.manualOverride = true + } + return cli, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + if client != nil { + if _, ok := client.Transport.(*http.Transport); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + } else { + transport := new(http.Transport) + sockets.ConfigureTransport(transport, proto, addr) + client = &http.Client{ + Transport: transport, + } + } + + scheme := "http" + tlsConfig := resolveTLSConfig(client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + scheme = "https" + } + + return &Client{ + scheme: scheme, + host: host, + proto: proto, + addr: addr, + basePath: basePath, + client: client, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// UpdateClientVersion updates the version string associated with this +// instance of the Client. +func (cli *Client) UpdateClientVersion(v string) { + if !cli.manualOverride { + cli.version = v + } + +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} diff --git a/vendor/github.com/moby/moby/client/client_mock_test.go b/vendor/github.com/moby/moby/client/client_mock_test.go new file mode 100644 index 0000000..0ab935d --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_mock_test.go @@ -0,0 +1,45 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" +) + +func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { + return &http.Client{ + Transport: transportFunc(doer), + } +} + +func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", "application/json") + + body, err := json.Marshal(&types.ErrorResponse{ + Message: message, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(body)), + Header: header, + }, nil + } +} + +func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), + }, nil + } +} diff --git a/vendor/github.com/moby/moby/client/client_test.go b/vendor/github.com/moby/moby/client/client_test.go new file mode 100644 index 0000000..ee199c2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_test.go @@ -0,0 +1,283 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNewEnvClient(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping unix only test for windows") + } + cases := []struct { + envs map[string]string + expectedError string + expectedVersion string + }{ + { + envs: map[string]string{}, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "invalid/path", + }, + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory. Make sure the key is not encrypted", + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_TLS_VERIFY": "1", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_HOST": "https://notaunixsocket", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_HOST": "host", + }, + expectedError: "unable to parse docker host `host`", + }, + { + envs: map[string]string{ + "DOCKER_HOST": "invalid://url", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "anything", + }, + expectedVersion: "anything", + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "1.22", + }, + expectedVersion: "1.22", + }, + } + for _, c := range cases { + recoverEnvs := setupEnvs(t, c.envs) + apiclient, err := NewEnvClient() + if c.expectedError != "" { + if err == nil { + t.Errorf("expected an error for %v", c) + } else if err.Error() != c.expectedError { + t.Errorf("expected an error %s, got %s, for %v", c.expectedError, err.Error(), c) + } + } else { + if err != nil { + t.Error(err) + } + version := apiclient.ClientVersion() + if version != c.expectedVersion { + t.Errorf("expected %s, got %s, for %v", c.expectedVersion, version, c) + } + } + + if c.envs["DOCKER_TLS_VERIFY"] != "" { + // pedantic checking that this is handled correctly + tr := apiclient.client.Transport.(*http.Transport) + if tr.TLSClientConfig == nil { + t.Errorf("no tls config found when DOCKER_TLS_VERIFY enabled") + } + + if tr.TLSClientConfig.InsecureSkipVerify { + t.Errorf("tls verification should be enabled") + } + } + + recoverEnvs(t) + } +} + +func setupEnvs(t *testing.T, envs map[string]string) func(*testing.T) { + oldEnvs := map[string]string{} + for key, value := range envs { + oldEnv := os.Getenv(key) + oldEnvs[key] = oldEnv + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + return func(t *testing.T) { + for key, value := range oldEnvs { + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + } +} + +func TestGetAPIPath(t *testing.T) { + cases := []struct { + v string + p string + q url.Values + e string + }{ + {"", "/containers/json", nil, "/containers/json"}, + {"", "/containers/json", url.Values{}, "/containers/json"}, + {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, + {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, + } + + for _, cs := range cases { + c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) + if err != nil { + t.Fatal(err) + } + g := c.getAPIPath(cs.p, cs.q) + if g != cs.e { + t.Fatalf("Expected %s, got %s", cs.e, g) + } + + err = c.Close() + if nil != err { + t.Fatalf("close client failed, error message: %s", err) + } + } +} + +func TestParseHost(t *testing.T) { + cases := []struct { + host string + proto string + addr string + base string + err bool + }{ + {"", "", "", "", true}, + {"foobar", "", "", "", true}, + {"foo://bar", "foo", "bar", "", false}, + {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, + {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, + } + + for _, cs := range cases { + p, a, b, e := ParseHost(cs.host) + if cs.err && e == nil { + t.Fatalf("expected error, got nil") + } + if !cs.err && e != nil { + t.Fatal(e) + } + if cs.proto != p { + t.Fatalf("expected proto %s, got %s", cs.proto, p) + } + if cs.addr != a { + t.Fatalf("expected addr %s, got %s", cs.addr, a) + } + if cs.base != b { + t.Fatalf("expected base %s, got %s", cs.base, b) + } + } +} + +func TestUpdateClientVersion(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + splitQuery := strings.Split(req.URL.Path, "/") + queryVersion := splitQuery[1] + b, err := json.Marshal(types.Version{ + APIVersion: queryVersion, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + cases := []struct { + v string + }{ + {"1.20"}, + {"v1.21"}, + {"1.22"}, + {"v1.22"}, + } + + for _, cs := range cases { + client.UpdateClientVersion(cs.v) + r, err := client.ServerVersion(context.Background()) + if err != nil { + t.Fatal(err) + } + if strings.TrimPrefix(r.APIVersion, "v") != strings.TrimPrefix(cs.v, "v") { + t.Fatalf("Expected %s, got %s", cs.v, r.APIVersion) + } + } +} + +func TestNewEnvClientSetsDefaultVersion(t *testing.T) { + // Unset environment variables + envVarKeys := []string{ + "DOCKER_HOST", + "DOCKER_API_VERSION", + "DOCKER_TLS_VERIFY", + "DOCKER_CERT_PATH", + } + envVarValues := make(map[string]string) + for _, key := range envVarKeys { + envVarValues[key] = os.Getenv(key) + os.Setenv(key, "") + } + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != DefaultVersion { + t.Fatalf("Expected %s, got %s", DefaultVersion, client.version) + } + + expected := "1.22" + os.Setenv("DOCKER_API_VERSION", expected) + client, err = NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != expected { + t.Fatalf("Expected %s, got %s", expected, client.version) + } + + // Restore environment variables + for _, key := range envVarKeys { + os.Setenv(key, envVarValues[key]) + } +} diff --git a/vendor/github.com/moby/moby/client/client_unix.go b/vendor/github.com/moby/moby/client/client_unix.go new file mode 100644 index 0000000..89de892 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd darwin + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/moby/moby/client/client_windows.go b/vendor/github.com/moby/moby/client/client_windows.go new file mode 100644 index 0000000..07c0c7a --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/moby/moby/client/container_attach.go b/vendor/github.com/moby/moby/client/container_attach.go new file mode 100644 index 0000000..eea4682 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_attach.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/moby/moby/client/container_commit.go b/vendor/github.com/moby/moby/client/container_commit.go new file mode 100644 index 0000000..c766d62 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_commit.go @@ -0,0 +1,53 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + distributionRef, err := distreference.ParseNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + + tag = reference.GetTagFromNamedRef(distributionRef) + repository = distributionRef.Name() + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_commit_test.go b/vendor/github.com/moby/moby/client/container_commit_test.go new file mode 100644 index 0000000..a844675 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_commit_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerCommitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerCommit(t *testing.T) { + expectedURL := "/commit" + expectedContainerID := "container_id" + specifiedReference := "repository_name:tag" + expectedRepositoryName := "repository_name" + expectedTag := "tag" + expectedComment := "comment" + expectedAuthor := "author" + expectedChanges := []string{"change1", "change2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + containerID := query.Get("container") + if containerID != expectedContainerID { + return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) + } + repo := query.Get("repo") + if repo != expectedRepositoryName { + return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) + } + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) + } + comment := query.Get("comment") + if comment != expectedComment { + return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) + } + author := query.Get("author") + if author != expectedAuthor { + return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) + } + pause := query.Get("pause") + if pause != "0" { + return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) + } + changes := query["changes"] + if len(changes) != len(expectedChanges) { + return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) + } + b, err := json.Marshal(types.IDResponse{ + ID: "new_container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ + Reference: specifiedReference, + Comment: expectedComment, + Author: expectedAuthor, + Changes: expectedChanges, + Pause: false, + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "new_container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/container_copy.go b/vendor/github.com/moby/moby/client/container_copy.go new file mode 100644 index 0000000..8380eea --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_copy.go @@ -0,0 +1,97 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/moby/moby/client/container_copy_test.go b/vendor/github.com/moby/moby/client/container_copy_test.go new file mode 100644 index 0000000..706a20c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_copy_test.go @@ -0,0 +1,244 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStatPathError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestContainerStatPathNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestContainerStatPath(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "HEAD" { + return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly") + } + content, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(content) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } +} + +func TestCopyToContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyToContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyToContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "PUT" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") + if noOverwriteDirNonDir != "true" { + return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) + } + + content, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + if string(content) != "content" { + return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyFromContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyFromContainerNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestCopyFromContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + + headercontent, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(headercontent) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } + content, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + if string(content) != "content" { + t.Fatalf("expected content to be 'content', got %s", string(content)) + } +} diff --git a/vendor/github.com/moby/moby/client/container_create.go b/vendor/github.com/moby/moby/client/container_create.go new file mode 100644 index 0000000..6841b0b --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_create_test.go b/vendor/github.com/moby/moby/client/container_create_test.go new file mode 100644 index 0000000..73474cf --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create_test.go @@ -0,0 +1,118 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) + } + + // 404 doesn't automagitally means an unknown image + client = &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) + } +} + +func TestContainerCreateImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "No such image")), + } + _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestContainerCreateWithName(t *testing.T) { + expectedURL := "/containers/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "container_name" { + return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} + +// TestContainerCreateAutoRemove validates that a client using API 1.24 always disables AutoRemove. When using API 1.25 +// or up, AutoRemove should not be disabled. +func TestContainerCreateAutoRemove(t *testing.T) { + autoRemoveValidator := func(expectedValue bool) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + var config configWrapper + + if err := json.NewDecoder(req.Body).Decode(&config); err != nil { + return nil, err + } + if config.HostConfig.AutoRemove != expectedValue { + return nil, fmt.Errorf("expected AutoRemove to be %v, got %v", expectedValue, config.HostConfig.AutoRemove) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + } + + client := &Client{ + client: newMockClient(autoRemoveValidator(false)), + version: "1.24", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } + client = &Client{ + client: newMockClient(autoRemoveValidator(true)), + version: "1.25", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_diff.go b/vendor/github.com/moby/moby/client/container_diff.go new file mode 100644 index 0000000..1e3e554 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { + var changes []types.ContainerChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/vendor/github.com/moby/moby/client/container_diff_test.go b/vendor/github.com/moby/moby/client/container_diff_test.go new file mode 100644 index 0000000..1ce1117 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerDiffError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerDiff(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + +} + +func TestContainerDiff(t *testing.T) { + expectedURL := "/containers/container_id/changes" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal([]types.ContainerChange{ + { + Kind: 0, + Path: "/path/1", + }, + { + Kind: 1, + Path: "/path/2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + changes, err := client.ContainerDiff(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if len(changes) != 2 { + t.Fatalf("expected an array of 2 changes, got %v", changes) + } +} diff --git a/vendor/github.com/moby/moby/client/container_exec.go b/vendor/github.com/moby/moby/client/container_exec.go new file mode 100644 index 0000000..0665c54 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_exec.go @@ -0,0 +1,54 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_exec_test.go b/vendor/github.com/moby/moby/client/container_exec_test.go new file mode 100644 index 0000000..0e296a5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_exec_test.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerExecCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecCreate(t *testing.T) { + expectedURL := "/containers/container_id/exec" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + // FIXME validate the content is the given ExecConfig ? + if err := req.ParseForm(); err != nil { + return nil, err + } + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { + return nil, err + } + if execConfig.User != "user" { + return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) + } + b, err := json.Marshal(types.IDResponse{ + ID: "exec_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ + User: "user", + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "exec_id" { + t.Fatalf("expected `exec_id`, got %s", r.ID) + } +} + +func TestContainerExecStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecStart(t *testing.T) { + expectedURL := "/exec/exec_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if err := req.ParseForm(); err != nil { + return nil, err + } + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { + return nil, err + } + if execStartCheck.Tty || !execStartCheck.Detach { + return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ + Detach: true, + Tty: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecInspect(t *testing.T) { + expectedURL := "/exec/exec_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(types.ContainerExecInspect{ + ExecID: "exec_id", + ContainerID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") + if err != nil { + t.Fatal(err) + } + if inspect.ExecID != "exec_id" { + t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) + } + if inspect.ContainerID != "container_id" { + t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) + } +} diff --git a/vendor/github.com/moby/moby/client/container_export.go b/vendor/github.com/moby/moby/client/container_export.go new file mode 100644 index 0000000..52194f3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/container_export_test.go b/vendor/github.com/moby/moby/client/container_export_test.go new file mode 100644 index 0000000..5849fe9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_export_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerExportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExport(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExport(t *testing.T) { + expectedURL := "/containers/container_id/export" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerExport(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } +} diff --git a/vendor/github.com/moby/moby/client/container_inspect.go b/vendor/github.com/moby/moby/client/container_inspect.go new file mode 100644 index 0000000..17f1809 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/container_inspect_test.go b/vendor/github.com/moby/moby/client/container_inspect_test.go new file mode 100644 index 0000000..f1a6f4a --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_inspect_test.go @@ -0,0 +1,125 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "unknown") + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestContainerInspect(t *testing.T) { + expectedURL := "/containers/container_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } +} + +func TestContainerInspectNode(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + Node: &types.ContainerNode{ + ID: "container_node_id", + Addr: "container_node", + Labels: map[string]string{"foo": "bar"}, + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } + if r.Node.ID != "container_node_id" { + t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) + } + if r.Node.Addr != "container_node" { + t.Fatalf("expected `container_node`, got %s", r.Node.Addr) + } + foo, ok := r.Node.Labels["foo"] + if foo != "bar" || !ok { + t.Fatalf("expected `bar` for label `foo`") + } +} diff --git a/vendor/github.com/moby/moby/client/container_kill.go b/vendor/github.com/moby/moby/client/container_kill.go new file mode 100644 index 0000000..29f80c7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_kill_test.go b/vendor/github.com/moby/moby/client/container_kill_test.go new file mode 100644 index 0000000..9477b0a --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_kill_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerKillError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerKill(t *testing.T) { + expectedURL := "/containers/container_id/kill" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + signal := req.URL.Query().Get("signal") + if signal != "SIGKILL" { + return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_list.go b/vendor/github.com/moby/moby/client/container_list.go new file mode 100644 index 0000000..4398912 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/vendor/github.com/moby/moby/client/container_list_test.go b/vendor/github.com/moby/moby/client/container_list_test.go new file mode 100644 index 0000000..e41c687 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_list_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestContainerListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerList(t *testing.T) { + expectedURL := "/containers/json" + expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + all := query.Get("all") + if all != "1" { + return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) + } + limit := query.Get("limit") + if limit != "0" { + return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) + } + since := query.Get("since") + if since != "container" { + return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) + } + before := query.Get("before") + if before != "" { + return nil, fmt.Errorf("before should have not be present in query, go %s", before) + } + size := query.Get("size") + if size != "1" { + return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) + } + + b, err := json.Marshal([]types.Container{ + { + ID: "container_id1", + }, + { + ID: "container_id2", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("before", "container") + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Size: true, + All: true, + Since: "container", + Filters: filters, + }) + if err != nil { + t.Fatal(err) + } + if len(containers) != 2 { + t.Fatalf("expected 2 containers, got %v", containers) + } +} diff --git a/vendor/github.com/moby/moby/client/container_logs.go b/vendor/github.com/moby/moby/client/container_logs.go new file mode 100644 index 0000000..69056b6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/container_logs_test.go b/vendor/github.com/moby/moby/client/container_logs_test.go new file mode 100644 index 0000000..99e3184 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestContainerLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestContainerLogs(t *testing.T) { + expectedURL := "/containers/container_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ContainerLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_pause.go b/vendor/github.com/moby/moby/client/container_pause.go new file mode 100644 index 0000000..412067a --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_pause_test.go b/vendor/github.com/moby/moby/client/container_pause_test.go new file mode 100644 index 0000000..0ee2f05 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_pause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerPauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerPause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerPause(t *testing.T) { + expectedURL := "/containers/container_id/pause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerPause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_prune.go b/vendor/github.com/moby/moby/client/container_prune.go new file mode 100644 index 0000000..b582170 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/container_remove.go b/vendor/github.com/moby/moby/client/container_remove.go new file mode 100644 index 0000000..3a79590 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_remove_test.go b/vendor/github.com/moby/moby/client/container_remove_test.go new file mode 100644 index 0000000..798c08b --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRemove(t *testing.T) { + expectedURL := "/containers/container_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + volume := query.Get("v") + if volume != "1" { + return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) + } + force := query.Get("force") + if force != "1" { + return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) + } + link := query.Get("link") + if link != "" { + return nil, fmt.Errorf("link should have not be present in query, go %s", link) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_rename.go b/vendor/github.com/moby/moby/client/container_rename.go new file mode 100644 index 0000000..0e718da --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_rename_test.go b/vendor/github.com/moby/moby/client/container_rename_test.go new file mode 100644 index 0000000..732ebff --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_rename_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerRenameError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRename(context.Background(), "nothing", "newNothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRename(t *testing.T) { + expectedURL := "/containers/container_id/rename" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "newName" { + return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRename(context.Background(), "container_id", "newName") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_resize.go b/vendor/github.com/moby/moby/client/container_resize.go new file mode 100644 index 0000000..66c3cc1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_resize_test.go b/vendor/github.com/moby/moby/client/container_resize_test.go new file mode 100644 index 0000000..5b2efec --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_resize_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/containers/container_id/resize")), + } + + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/exec/exec_id/resize")), + } + + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + h := query.Get("h") + if h != "500" { + return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) + } + w := query.Get("w") + if w != "600" { + return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + } +} diff --git a/vendor/github.com/moby/moby/client/container_restart.go b/vendor/github.com/moby/moby/client/container_restart.go new file mode 100644 index 0000000..74d7455 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_restart_test.go b/vendor/github.com/moby/moby/client/container_restart_test.go new file mode 100644 index 0000000..8c3cfd6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_restart_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerRestartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerRestart(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRestart(t *testing.T) { + expectedURL := "/containers/container_id/restart" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerRestart(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_start.go b/vendor/github.com/moby/moby/client/container_start.go new file mode 100644 index 0000000..b1f08de --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_start.go @@ -0,0 +1,24 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_start_test.go b/vendor/github.com/moby/moby/client/container_start_test.go new file mode 100644 index 0000000..5826fa8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_start_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStart(t *testing.T) { + expectedURL := "/containers/container_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + // we're not expecting any payload, but if one is supplied, check it is valid. + if req.Header.Get("Content-Type") == "application/json" { + var startConfig interface{} + if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { + return nil, fmt.Errorf("Unable to parse json: %s", err) + } + } + + checkpoint := req.URL.Query().Get("checkpoint") + if checkpoint != "checkpoint_id" { + return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_stats.go b/vendor/github.com/moby/moby/client/container_stats.go new file mode 100644 index 0000000..4758c66 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stats.go @@ -0,0 +1,26 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/moby/moby/client/container_stats_test.go b/vendor/github.com/moby/moby/client/container_stats_test.go new file mode 100644 index 0000000..7414f13 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stats_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerStatsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStats(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStats(t *testing.T) { + expectedURL := "/containers/container_id/stats" + cases := []struct { + stream bool + expectedStream string + }{ + { + expectedStream: "0", + }, + { + stream: true, + expectedStream: "1", + }, + } + for _, c := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + query := r.URL.Query() + stream := query.Get("stream") + if stream != c.expectedStream { + return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/container_stop.go b/vendor/github.com/moby/moby/client/container_stop.go new file mode 100644 index 0000000..b5418ae --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_stop_test.go b/vendor/github.com/moby/moby/client/container_stop_test.go new file mode 100644 index 0000000..c32cd69 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stop_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerStopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerStop(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStop(t *testing.T) { + expectedURL := "/containers/container_id/stop" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerStop(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_top.go b/vendor/github.com/moby/moby/client/container_top.go new file mode 100644 index 0000000..4e7270e --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { + var response types.ContainerProcessList + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_top_test.go b/vendor/github.com/moby/moby/client/container_top_test.go new file mode 100644 index 0000000..7802be0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_top_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerTopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerTop(context.Background(), "nothing", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerTop(t *testing.T) { + expectedURL := "/containers/container_id/top" + expectedProcesses := [][]string{ + {"p1", "p2"}, + {"p3"}, + } + expectedTitles := []string{"title1", "title2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + args := query.Get("ps_args") + if args != "arg1 arg2" { + return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) + } + + b, err := json.Marshal(types.ContainerProcessList{ + Processes: [][]string{ + {"p1", "p2"}, + {"p3"}, + }, + Titles: []string{"title1", "title2"}, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expectedProcesses, processList.Processes) { + t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) + } + if !reflect.DeepEqual(expectedTitles, processList.Titles) { + t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) + } +} diff --git a/vendor/github.com/moby/moby/client/container_unpause.go b/vendor/github.com/moby/moby/client/container_unpause.go new file mode 100644 index 0000000..5c76211 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_unpause_test.go b/vendor/github.com/moby/moby/client/container_unpause_test.go new file mode 100644 index 0000000..2c42727 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_unpause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerUnpauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerUnpause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUnpause(t *testing.T) { + expectedURL := "/containers/container_id/unpause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerUnpause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_update.go b/vendor/github.com/moby/moby/client/container_update.go new file mode 100644 index 0000000..5082f22 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_update_test.go b/vendor/github.com/moby/moby/client/container_update_test.go new file mode 100644 index 0000000..715bb7c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_update_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUpdate(t *testing.T) { + expectedURL := "/containers/container_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + b, err := json.Marshal(container.ContainerUpdateOKBody{}) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ + Resources: container.Resources{ + CPUPeriod: 1, + }, + RestartPolicy: container.RestartPolicy{ + Name: "always", + }, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_wait.go b/vendor/github.com/moby/moby/client/container_wait.go new file mode 100644 index 0000000..93212c7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_wait.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/container" +) + +// ContainerWait pauses execution until a container exits. +// It returns the API status code as response of its readiness. +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + return -1, err + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} diff --git a/vendor/github.com/moby/moby/client/container_wait_test.go b/vendor/github.com/moby/moby/client/container_wait_test.go new file mode 100644 index 0000000..9300bc0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_wait_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + + "golang.org/x/net/context" +) + +func TestContainerWaitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + code, err := client.ContainerWait(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + if code != -1 { + t.Fatalf("expected a status code equal to '-1', got %d", code) + } +} + +func TestContainerWait(t *testing.T) { + expectedURL := "/containers/container_id/wait" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(container.ContainerWaitOKBody{ + StatusCode: 15, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + code, err := client.ContainerWait(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if code != 15 { + t.Fatalf("expected a status code equal to '15', got %d", code) + } +} + +func ExampleClient_ContainerWait_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + _, err := client.ContainerWait(ctx, "container_id") + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/disk_usage.go b/vendor/github.com/moby/moby/client/disk_usage.go new file mode 100644 index 0000000..03c80b3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/disk_usage.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/vendor/github.com/moby/moby/client/errors.go b/vendor/github.com/moby/moby/client/errors.go new file mode 100644 index 0000000..bf6923f --- /dev/null +++ b/vendor/github.com/moby/moby/client/errors.go @@ -0,0 +1,278 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NotFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NotFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NotFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NotFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a volumeNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NotFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NotFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NotFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker server is version %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NoFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} + +// pluginNotFoundError implements an error returned when a plugin is not in the docker host. +type pluginNotFoundError struct { + name string +} + +// NotFound indicates that this error type is of NotFound +func (e pluginNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a pluginNotFoundError +func (e pluginNotFoundError) Error() string { + return fmt.Sprintf("Error: No such plugin: %s", e.name) +} + +// IsErrPluginNotFound returns true if the error is caused +// when a plugin is not found in the docker host. +func IsErrPluginNotFound(err error) bool { + return IsErrNotFound(err) +} diff --git a/vendor/github.com/moby/moby/client/events.go b/vendor/github.com/moby/moby/client/events.go new file mode 100644 index 0000000..af47aef --- /dev/null +++ b/vendor/github.com/moby/moby/client/events.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/json" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/events_test.go b/vendor/github.com/moby/moby/client/events_test.go new file mode 100644 index 0000000..ba82d2f --- /dev/null +++ b/vendor/github.com/moby/moby/client/events_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +func TestEventsErrorInOptions(t *testing.T) { + errorCases := []struct { + options types.EventsOptions + expectedError string + }{ + { + options: types.EventsOptions{ + Since: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + { + options: types.EventsOptions{ + Until: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + } + for _, e := range errorCases { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), e.options) + err := <-errs + if err == nil || !strings.Contains(err.Error(), e.expectedError) { + t.Fatalf("expected an error %q, got %v", e.expectedError, err) + } + } +} + +func TestEventsErrorFromServer(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), types.EventsOptions{}) + err := <-errs + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestEvents(t *testing.T) { + + expectedURL := "/events" + + filters := filters.NewArgs() + filters.Add("type", events.ContainerEventType) + expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) + + eventsCases := []struct { + options types.EventsOptions + events []events.Message + expectedEvents map[string]bool + expectedQueryParams map[string]string + }{ + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{}, + expectedEvents: make(map[string]bool), + }, + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{ + { + Type: "container", + ID: "1", + Action: "create", + }, + { + Type: "container", + ID: "2", + Action: "die", + }, + { + Type: "container", + ID: "3", + Action: "create", + }, + }, + expectedEvents: map[string]bool{ + "1": true, + "2": true, + "3": true, + }, + }, + } + + for _, eventsCase := range eventsCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + + for key, expected := range eventsCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + buffer := new(bytes.Buffer) + + for _, e := range eventsCase.events { + b, _ := json.Marshal(e) + buffer.Write(b) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(buffer), + }, nil + }), + } + + messages, errs := client.Events(context.Background(), eventsCase.options) + + loop: + for { + select { + case err := <-errs: + if err != nil && err != io.EOF { + t.Fatal(err) + } + + break loop + case e := <-messages: + _, ok := eventsCase.expectedEvents[e.ID] + if !ok { + t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) + } + } + } + } +} diff --git a/vendor/github.com/moby/moby/client/hijack.go b/vendor/github.com/moby/moby/client/hijack.go new file mode 100644 index 0000000..74c53f5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/hijack.go @@ -0,0 +1,177 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + _, err = clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = tlsconfig.Clone(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/vendor/github.com/moby/moby/client/image_build.go b/vendor/github.com/moby/moby/client/image_build.go new file mode 100644 index 0000000..6fde75d --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build.go @@ -0,0 +1,123 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/image_build_test.go b/vendor/github.com/moby/moby/client/image_build_test.go new file mode 100644 index 0000000..b9d04f8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +func TestImageBuildError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageBuild(t *testing.T) { + v1 := "value1" + v2 := "value2" + emptyRegistryConfig := "bnVsbA==" + buildCases := []struct { + buildOptions types.ImageBuildOptions + expectedQueryParams map[string]string + expectedTags []string + expectedRegistryConfig string + }{ + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: true, + NoCache: true, + Remove: true, + ForceRemove: true, + PullParent: true, + }, + expectedQueryParams: map[string]string{ + "q": "1", + "nocache": "1", + "rm": "1", + "forcerm": "1", + "pull": "1", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: false, + NoCache: false, + Remove: false, + ForceRemove: false, + PullParent: false, + }, + expectedQueryParams: map[string]string{ + "q": "", + "nocache": "", + "rm": "0", + "forcerm": "", + "pull": "", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + RemoteContext: "remoteContext", + Isolation: container.Isolation("isolation"), + CPUSetCPUs: "2", + CPUSetMems: "12", + CPUShares: 20, + CPUQuota: 10, + CPUPeriod: 30, + Memory: 256, + MemorySwap: 512, + ShmSize: 10, + CgroupParent: "cgroup_parent", + Dockerfile: "Dockerfile", + }, + expectedQueryParams: map[string]string{ + "remote": "remoteContext", + "isolation": "isolation", + "cpusetcpus": "2", + "cpusetmems": "12", + "cpushares": "20", + "cpuquota": "10", + "cpuperiod": "30", + "memory": "256", + "memswap": "512", + "shmsize": "10", + "cgroupparent": "cgroup_parent", + "dockerfile": "Dockerfile", + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + BuildArgs: map[string]*string{ + "ARG1": &v1, + "ARG2": &v2, + "ARG3": nil, + }, + }, + expectedQueryParams: map[string]string{ + "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + Ulimits: []*units.Ulimit{ + { + Name: "nproc", + Hard: 65557, + Soft: 65557, + }, + { + Name: "nofile", + Hard: 20000, + Soft: 40000, + }, + }, + }, + expectedQueryParams: map[string]string{ + "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + AuthConfigs: map[string]types.AuthConfig{ + "https://index.docker.io/v1/": { + Auth: "dG90bwo=", + }, + }, + }, + expectedQueryParams: map[string]string{ + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", + }, + } + for _, buildCase := range buildCases { + expectedURL := "/build" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check request headers + registryConfig := r.Header.Get("X-Registry-Config") + if registryConfig != buildCase.expectedRegistryConfig { + return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) + } + contentType := r.Header.Get("Content-Type") + if contentType != "application/tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/tar', got %s", contentType) + } + + // Check query parameters + query := r.URL.Query() + for key, expected := range buildCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + // Check tags + if len(buildCase.expectedTags) > 0 { + tags := query["t"] + if !reflect.DeepEqual(tags, buildCase.expectedTags) { + return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) + } + } + + headers := http.Header{} + headers.Add("Server", "Docker/v1.23 (MyOS)") + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + Header: headers, + }, nil + }), + } + buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) + if err != nil { + t.Fatal(err) + } + if buildResponse.OSType != "MyOS" { + t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) + } + response, err := ioutil.ReadAll(buildResponse.Body) + if err != nil { + t.Fatal(err) + } + buildResponse.Body.Close() + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } + } +} + +func TestGetDockerOS(t *testing.T) { + cases := map[string]string{ + "Docker/v1.22 (linux)": "linux", + "Docker/v1.22 (windows)": "windows", + "Foo/v1.22 (bar)": "", + } + for header, os := range cases { + g := getDockerOS(header) + if g != os { + t.Fatalf("Expected %s, got %s", os, g) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_create.go b/vendor/github.com/moby/moby/client/image_create.go new file mode 100644 index 0000000..cf023a7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + query.Set("tag", tag) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/moby/moby/client/image_create_test.go b/vendor/github.com/moby/moby/client/image_create_test.go new file mode 100644 index 0000000..5c2edd2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImageCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageCreate(t *testing.T) { + expectedURL := "/images/create" + expectedImage := "test:5000/my_image" + expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) + expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + registryAuth := r.Header.Get("X-Registry-Auth") + if registryAuth != expectedRegistryAuth { + return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) + } + + query := r.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != expectedImage { + return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) + } + + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ + RegistryAuth: expectedRegistryAuth, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(createResponse) + if err != nil { + t.Fatal(err) + } + if err = createResponse.Close(); err != nil { + t.Fatal(err) + } + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } +} diff --git a/vendor/github.com/moby/moby/client/image_history.go b/vendor/github.com/moby/moby/client/image_history.go new file mode 100644 index 0000000..acb1ee9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { + var history []types.ImageHistory + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/vendor/github.com/moby/moby/client/image_history_test.go b/vendor/github.com/moby/moby/client/image_history_test.go new file mode 100644 index 0000000..729edb1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageHistoryError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageHistory(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageHistory(t *testing.T) { + expectedURL := "/images/image_id/history" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + b, err := json.Marshal([]types.ImageHistory{ + { + ID: "image_id1", + Tags: []string{"tag1", "tag2"}, + }, + { + ID: "image_id2", + Tags: []string{"tag1", "tag2"}, + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageHistories, err := client.ImageHistory(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if len(imageHistories) != 2 { + t.Fatalf("expected 2 containers, got %v", imageHistories) + } +} diff --git a/vendor/github.com/moby/moby/client/image_import.go b/vendor/github.com/moby/moby/client/image_import.go new file mode 100644 index 0000000..c6f154b --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/image_import_test.go b/vendor/github.com/moby/moby/client/image_import_test.go new file mode 100644 index 0000000..e309be7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import_test.go @@ -0,0 +1,81 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageImportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageImport(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + fromSrc := query.Get("fromSrc") + if fromSrc != "image_source" { + return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) + } + repo := query.Get("repo") + if repo != "repository_name:imported" { + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name', got %s", repo) + } + tag := query.Get("tag") + if tag != "imported" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) + } + message := query.Get("message") + if message != "A message" { + return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) + } + changes := query["changes"] + expectedChanges := []string{"change1", "change2"} + if !reflect.DeepEqual(expectedChanges, changes) { + return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ + Source: strings.NewReader("source"), + SourceName: "image_source", + }, "repository_name:imported", types.ImageImportOptions{ + Tag: "imported", + Message: "A message", + Changes: []string{"change1", "change2"}, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(importResponse) + if err != nil { + t.Fatal(err) + } + importResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/moby/moby/client/image_inspect.go b/vendor/github.com/moby/moby/client/image_inspect.go new file mode 100644 index 0000000..b3a64ce --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/image_inspect_test.go b/vendor/github.com/moby/moby/client/image_inspect_test.go new file mode 100644 index 0000000..74a4e49 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect_test.go @@ -0,0 +1,71 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageInspectImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestImageInspect(t *testing.T) { + expectedURL := "/images/image_id/json" + expectedTags := []string{"tag1", "tag2"} + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ImageInspect{ + ID: "image_id", + RepoTags: expectedTags, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if imageInspect.ID != "image_id" { + t.Fatalf("expected `image_id`, got %s", imageInspect.ID) + } + if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { + t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) + } +} diff --git a/vendor/github.com/moby/moby/client/image_list.go b/vendor/github.com/moby/moby/client/image_list.go new file mode 100644 index 0000000..f26464f --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list.go @@ -0,0 +1,45 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/vendor/github.com/moby/moby/client/image_list_test.go b/vendor/github.com/moby/moby/client/image_list_test.go new file mode 100644 index 0000000..7c4a464 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list_test.go @@ -0,0 +1,159 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestImageListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageList(context.Background(), types.ImageListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageList(t *testing.T) { + expectedURL := "/images/json" + + noDanglingfilters := filters.NewArgs() + noDanglingfilters.Add("dangling", "false") + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("dangling", "true") + + listCases := []struct { + options types.ImageListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ImageListOptions{}, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + options: types.ImageListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, + }, + }, + { + options: types.ImageListOptions{ + Filters: noDanglingfilters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + images, err := client.ImageList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } + } +} + +func TestImageListApiBefore125(t *testing.T) { + expectedFilter := "image:tag" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + query := req.URL.Query() + actualFilter := query.Get("filter") + if actualFilter != expectedFilter { + return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) + } + actualFilters := query.Get("filters") + if actualFilters != "" { + return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.24", + } + + filters := filters.NewArgs() + filters.Add("reference", "image:tag") + + options := types.ImageListOptions{ + Filters: filters, + } + + images, err := client.ImageList(context.Background(), options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } +} diff --git a/vendor/github.com/moby/moby/client/image_load.go b/vendor/github.com/moby/moby/client/image_load.go new file mode 100644 index 0000000..77aaf1a --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/moby/moby/client/image_load_test.go b/vendor/github.com/moby/moby/client/image_load_test.go new file mode 100644 index 0000000..68dc14f --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageLoadError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageLoad(context.Background(), nil, true) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageLoad(t *testing.T) { + expectedURL := "/images/load" + expectedInput := "inputBody" + expectedOutput := "outputBody" + loadCases := []struct { + quiet bool + responseContentType string + expectedResponseJSON bool + expectedQueryParams map[string]string + }{ + { + quiet: false, + responseContentType: "text/plain", + expectedResponseJSON: false, + expectedQueryParams: map[string]string{ + "quiet": "0", + }, + }, + { + quiet: true, + responseContentType: "application/json", + expectedResponseJSON: true, + expectedQueryParams: map[string]string{ + "quiet": "1", + }, + }, + } + for _, loadCase := range loadCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + contentType := req.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) + } + query := req.URL.Query() + for key, expected := range loadCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + headers := http.Header{} + headers.Add("Content-Type", loadCase.responseContentType) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + Header: headers, + }, nil + }), + } + + input := bytes.NewReader([]byte(expectedInput)) + imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) + if err != nil { + t.Fatal(err) + } + if imageLoadResponse.JSON != loadCase.expectedResponseJSON { + t.Fatalf("expected a JSON response, was not.") + } + body, err := ioutil.ReadAll(imageLoadResponse.Body) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected %s, got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_prune.go b/vendor/github.com/moby/moby/client/image_prune.go new file mode 100644 index 0000000..5ef98b7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/image_pull.go b/vendor/github.com/moby/moby/client/image_pull.go new file mode 100644 index 0000000..3bffdb7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull.go @@ -0,0 +1,46 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + if tag != "" && !options.All { + query.Set("tag", tag) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/image_pull_test.go b/vendor/github.com/moby/moby/client/image_pull_test.go new file mode 100644 index 0000000..fe6bafe --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull_test.go @@ -0,0 +1,199 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePullReferenceParseError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePullAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePullStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != "myimage" { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) + } + tag := query.Get("tag") + if tag != "latest" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePullWithoutErrors(t *testing.T) { + expectedURL := "/images/create" + expectedOutput := "hello world" + pullCases := []struct { + all bool + reference string + expectedImage string + expectedTag string + }{ + { + all: false, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "latest", + }, + { + all: false, + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + { + all: true, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + all: true, + reference: "myimage:anything", + expectedImage: "myimage", + expectedTag: "", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != pullCase.expectedImage { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) + } + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ + All: pullCase.all, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_push.go b/vendor/github.com/moby/moby/client/image_push.go new file mode 100644 index 0000000..8e73d28 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push.go @@ -0,0 +1,54 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return nil, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + var tag = "" + if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/moby/moby/client/image_push_test.go b/vendor/github.com/moby/moby/client/image_push_test.go new file mode 100644 index 0000000..b52da8b --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push_test.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePushReferenceError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } + // An canonical reference cannot be pushed + _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) + if err == nil || err.Error() != "cannot push a digest reference" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePushAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePushStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/myimage/push" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != "tag" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePushWithoutErrors(t *testing.T) { + expectedOutput := "hello world" + expectedURLFormat := "/images/%s/push" + pullCases := []struct { + reference string + expectedImage string + expectedTag string + }{ + { + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_remove.go b/vendor/github.com/moby/moby/client/image_remove.go new file mode 100644 index 0000000..839e531 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDelete + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/vendor/github.com/moby/moby/client/image_remove_test.go b/vendor/github.com/moby/moby/client/image_remove_test.go new file mode 100644 index 0000000..7b004f7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageRemove(t *testing.T) { + expectedURL := "/images/image_id" + removeCases := []struct { + force bool + pruneChildren bool + expectedQueryParams map[string]string + }{ + { + force: false, + pruneChildren: false, + expectedQueryParams: map[string]string{ + "force": "", + "noprune": "1", + }, + }, { + force: true, + pruneChildren: true, + expectedQueryParams: map[string]string{ + "force": "1", + "noprune": "", + }, + }, + } + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range removeCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + b, err := json.Marshal([]types.ImageDelete{ + { + Untagged: "image_id1", + }, + { + Deleted: "image_id", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ + Force: removeCase.force, + PruneChildren: removeCase.pruneChildren, + }) + if err != nil { + t.Fatal(err) + } + if len(imageDeletes) != 2 { + t.Fatalf("expected 2 deleted images, got %v", imageDeletes) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_save.go b/vendor/github.com/moby/moby/client/image_save.go new file mode 100644 index 0000000..ecac880 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/image_save_test.go b/vendor/github.com/moby/moby/client/image_save_test.go new file mode 100644 index 0000000..8f0cf88 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "strings" +) + +func TestImageSaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSave(context.Background(), []string{"nothing"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageSave(t *testing.T) { + expectedURL := "/images/get" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + names := query["names"] + expectedNames := []string{"image_id1", "image_id2"} + if !reflect.DeepEqual(names, expectedNames) { + return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(saveResponse) + if err != nil { + t.Fatal(err) + } + saveResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/moby/moby/client/image_search.go b/vendor/github.com/moby/moby/client/image_search.go new file mode 100644 index 0000000..b0fcd5c --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/moby/moby/client/image_search_test.go b/vendor/github.com/moby/moby/client/image_search_test.go new file mode 100644 index 0000000..b17bbd8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +func TestImageSearchAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageSearchStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/search" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 result, got %v", results) + } +} + +func TestImageSearchWithoutErrors(t *testing.T) { + expectedURL := "/images/search" + filterArgs := filters.NewArgs() + filterArgs.Add("is-automated", "true") + filterArgs.Add("stars", "3") + + expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + Filters: filterArgs, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected a result, got %v", results) + } +} diff --git a/vendor/github.com/moby/moby/client/image_tag.go b/vendor/github.com/moby/moby/client/image_tag.go new file mode 100644 index 0000000..bdbf94a --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_tag.go @@ -0,0 +1,34 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/reference" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + tag := reference.GetTagFromNamedRef(distributionRef) + + query := url.Values{} + query.Set("repo", distributionRef.Name()) + query.Set("tag", tag) + + resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/image_tag_test.go b/vendor/github.com/moby/moby/client/image_tag_test.go new file mode 100644 index 0000000..7925db9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_tag_test.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageTagError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "repo:tag") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +// Note: this is not testing all the InvalidReference as it's the reponsability +// of distribution/reference package. +func TestImageTagInvalidReference(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag` { + t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) + } +} + +func TestImageTag(t *testing.T) { + expectedURL := "/images/image_id/tag" + tagCases := []struct { + reference string + expectedQueryParams map[string]string + }{ + { + reference: "repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "repository", + "tag": "tag1", + }, + }, { + reference: "another_repository:latest", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "another_repository", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "latest", + }, + }, + } + for _, tagCase := range tagCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range tagCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ImageTag(context.Background(), "image_id", tagCase.reference) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/info.go b/vendor/github.com/moby/moby/client/info.go new file mode 100644 index 0000000..ac07961 --- /dev/null +++ b/vendor/github.com/moby/moby/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/moby/moby/client/info_test.go b/vendor/github.com/moby/moby/client/info_test.go new file mode 100644 index 0000000..79f23c8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/info_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestInfoServerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.Info(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestInfoInvalidResponseJSONError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), + }, nil + }), + } + _, err := client.Info(context.Background()) + if err == nil || !strings.Contains(err.Error(), "invalid character") { + t.Fatalf("expected a 'invalid character' error, got %v", err) + } +} + +func TestInfo(t *testing.T) { + expectedURL := "/info" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + info := &types.Info{ + ID: "daemonID", + Containers: 3, + } + b, err := json.Marshal(info) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + info, err := client.Info(context.Background()) + if err != nil { + t.Fatal(err) + } + + if info.ID != "daemonID" { + t.Fatalf("expected daemonID, got %s", info.ID) + } + + if info.Containers != 3 { + t.Fatalf("expected 3 containers, got %d", info.Containers) + } +} diff --git a/vendor/github.com/moby/moby/client/interface.go b/vendor/github.com/moby/moby/client/interface.go new file mode 100644 index 0000000..0597803 --- /dev/null +++ b/vendor/github.com/moby/moby/client/interface.go @@ -0,0 +1,171 @@ +package client + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ContainerAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + ServerVersion(ctx context.Context) (types.Version, error) + UpdateClientVersion(v string) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string) (int64, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} diff --git a/vendor/github.com/moby/moby/client/interface_experimental.go b/vendor/github.com/moby/moby/client/interface_experimental.go new file mode 100644 index 0000000..51da98e --- /dev/null +++ b/vendor/github.com/moby/moby/client/interface_experimental.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/moby/moby/client/interface_stable.go b/vendor/github.com/moby/moby/client/interface_stable.go new file mode 100644 index 0000000..cc90a3c --- /dev/null +++ b/vendor/github.com/moby/moby/client/interface_stable.go @@ -0,0 +1,10 @@ +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/moby/moby/client/login.go b/vendor/github.com/moby/moby/client/login.go new file mode 100644 index 0000000..600dc71 --- /dev/null +++ b/vendor/github.com/moby/moby/client/login.go @@ -0,0 +1,29 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns UnauthorizerError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/network_connect.go b/vendor/github.com/moby/moby/client/network_connect.go new file mode 100644 index 0000000..c022c17 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/network_connect_test.go b/vendor/github.com/moby/moby/client/network_connect_test.go new file mode 100644 index 0000000..d472f45 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_connect_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +func TestNetworkConnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig != nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkConnect(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig.NetworkID != "NetworkID" { + return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ + NetworkID: "NetworkID", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/network_create.go b/vendor/github.com/moby/moby/client/network_create.go new file mode 100644 index 0000000..4067a54 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/network_create_test.go b/vendor/github.com/moby/moby/client/network_create_test.go new file mode 100644 index 0000000..0e2457f --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_create_test.go @@ -0,0 +1,72 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkCreate(t *testing.T) { + expectedURL := "/networks/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkCreateResponse{ + ID: "network_id", + Warning: "warning", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ + CheckDuplicate: true, + Driver: "mydriver", + EnableIPv6: true, + Internal: true, + Options: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if networkResponse.ID != "network_id" { + t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) + } + if networkResponse.Warning != "warning" { + t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) + } +} diff --git a/vendor/github.com/moby/moby/client/network_disconnect.go b/vendor/github.com/moby/moby/client/network_disconnect.go new file mode 100644 index 0000000..24b58e3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/network_disconnect_test.go b/vendor/github.com/moby/moby/client/network_disconnect_test.go new file mode 100644 index 0000000..b54a2b1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_disconnect_test.go @@ -0,0 +1,64 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkDisconnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkDisconnect(t *testing.T) { + expectedURL := "/networks/network_id/disconnect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var disconnect types.NetworkDisconnect + if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { + return nil, err + } + + if disconnect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) + } + + if !disconnect.Force { + return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/network_inspect.go b/vendor/github.com/moby/moby/client/network_inspect.go new file mode 100644 index 0000000..5ad4ea5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { + var networkResource types.NetworkResource + resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect_test.go b/vendor/github.com/moby/moby/client/network_inspect_test.go new file mode 100644 index 0000000..1f926d6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "unknown") + if err == nil || !IsErrNetworkNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestNetworkInspect(t *testing.T) { + expectedURL := "/networks/network_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.NetworkInspect(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } +} diff --git a/vendor/github.com/moby/moby/client/network_list.go b/vendor/github.com/moby/moby/client/network_list.go new file mode 100644 index 0000000..e566a93 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/vendor/github.com/moby/moby/client/network_list_test.go b/vendor/github.com/moby/moby/client/network_list_test.go new file mode 100644 index 0000000..4d44349 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestNetworkListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ + Filters: filters.NewArgs(), + }) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkList(t *testing.T) { + expectedURL := "/networks" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + options types.NetworkListOptions + expectedFilters string + }{ + { + options: types.NetworkListOptions{ + Filters: filters.NewArgs(), + }, + expectedFilters: "", + }, { + options: types.NetworkListOptions{ + Filters: noDanglingFilters, + }, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: danglingFilters, + }, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: labelFilters, + }, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal([]types.NetworkResource{ + { + Name: "network", + Driver: "bridge", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResources, err := client.NetworkList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(networkResources) != 1 { + t.Fatalf("expected 1 network resource, got %v", networkResources) + } + } +} diff --git a/vendor/github.com/moby/moby/client/network_prune.go b/vendor/github.com/moby/moby/client/network_prune.go new file mode 100644 index 0000000..7352a7f --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/network_remove.go b/vendor/github.com/moby/moby/client/network_remove.go new file mode 100644 index 0000000..6bd6748 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/network_remove_test.go b/vendor/github.com/moby/moby/client/network_remove_test.go new file mode 100644 index 0000000..2a7b964 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestNetworkRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkRemove(t *testing.T) { + expectedURL := "/networks/network_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/node_inspect.go b/vendor/github.com/moby/moby/client/node_inspect.go new file mode 100644 index 0000000..abf505d --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/node_inspect_test.go b/vendor/github.com/moby/moby/client/node_inspect_test.go new file mode 100644 index 0000000..fc13283 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeInspectNodeNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNodeNotFound(err) { + t.Fatalf("expected an nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspect(t *testing.T) { + expectedURL := "/nodes/node_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Node{ + ID: "node_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") + if err != nil { + t.Fatal(err) + } + if nodeInspect.ID != "node_id" { + t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/node_list.go b/vendor/github.com/moby/moby/client/node_list.go new file mode 100644 index 0000000..3e8440f --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/vendor/github.com/moby/moby/client/node_list_test.go b/vendor/github.com/moby/moby/client/node_list_test.go new file mode 100644 index 0000000..0251b5c --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NodeList(context.Background(), types.NodeListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeList(t *testing.T) { + expectedURL := "/nodes" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.NodeListOptions + expectedQueryParams map[string]string + }{ + { + options: types.NodeListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.NodeListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Node{ + { + ID: "node_id1", + }, + { + ID: "node_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodes, err := client.NodeList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %v", nodes) + } + } +} diff --git a/vendor/github.com/moby/moby/client/node_remove.go b/vendor/github.com/moby/moby/client/node_remove.go new file mode 100644 index 0000000..0a77f3d --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/node_remove_test.go b/vendor/github.com/moby/moby/client/node_remove_test.go new file mode 100644 index 0000000..f2f8adc --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_remove_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestNodeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeRemove(t *testing.T) { + expectedURL := "/nodes/node_id" + + removeCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != removeCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/node_update.go b/vendor/github.com/moby/moby/client/node_update.go new file mode 100644 index 0000000..3ca9760 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/node_update_test.go b/vendor/github.com/moby/moby/client/node_update_test.go new file mode 100644 index 0000000..613ff10 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestNodeUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeUpdate(t *testing.T) { + expectedURL := "/nodes/node_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/ping.go b/vendor/github.com/moby/moby/client/ping.go new file mode 100644 index 0000000..22dcda2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/ping.go @@ -0,0 +1,30 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and return the value of the "Docker-Experimental" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + + return ping, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_create.go b/vendor/github.com/moby/moby/client/plugin_create.go new file mode 100644 index 0000000..a660ba5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_disable.go b/vendor/github.com/moby/moby/client/plugin_disable.go new file mode 100644 index 0000000..30467db --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_disable_test.go b/vendor/github.com/moby/moby/client/plugin_disable_test.go new file mode 100644 index 0000000..a4de45b --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_disable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginDisableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginDisable(t *testing.T) { + expectedURL := "/plugins/plugin_name/disable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_enable.go b/vendor/github.com/moby/moby/client/plugin_enable.go new file mode 100644 index 0000000..95517c4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_enable_test.go b/vendor/github.com/moby/moby/client/plugin_enable_test.go new file mode 100644 index 0000000..b276813 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_enable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginEnableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginEnable(t *testing.T) { + expectedURL := "/plugins/plugin_name/enable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_inspect.go b/vendor/github.com/moby/moby/client/plugin_inspect.go new file mode 100644 index 0000000..89f39ee --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_inspect.go @@ -0,0 +1,32 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return nil, nil, pluginNotFoundError{name} + } + return nil, nil, err + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_inspect_test.go b/vendor/github.com/moby/moby/client/plugin_inspect_test.go new file mode 100644 index 0000000..fae407e --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginInspect(t *testing.T) { + expectedURL := "/plugins/plugin_name" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.Plugin{ + ID: "plugin_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") + if err != nil { + t.Fatal(err) + } + if pluginInspect.ID != "plugin_id" { + t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_install.go b/vendor/github.com/moby/moby/client/plugin_install.go new file mode 100644 index 0000000..3217c4c --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_install.go @@ -0,0 +1,113 @@ +package client + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(err) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_list.go b/vendor/github.com/moby/moby/client/plugin_list.go new file mode 100644 index 0000000..88c480a --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_list.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + resp, err := cli.get(ctx, "/plugins", nil, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_list_test.go b/vendor/github.com/moby/moby/client/plugin_list_test.go new file mode 100644 index 0000000..173e4b8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_list_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginList(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginList(t *testing.T) { + expectedURL := "/plugins" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_push.go b/vendor/github.com/moby/moby/client/plugin_push.go new file mode 100644 index 0000000..1e5f963 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_push.go @@ -0,0 +1,17 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_push_test.go b/vendor/github.com/moby/moby/client/plugin_push_test.go new file mode 100644 index 0000000..d9f70cd --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_push_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginPushError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginPush(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + auth := req.Header.Get("X-Registry-Auth") + if auth != "authtoken" { + return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_remove.go b/vendor/github.com/moby/moby/client/plugin_remove.go new file mode 100644 index 0000000..b017e4d --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_remove_test.go b/vendor/github.com/moby/moby/client/plugin_remove_test.go new file mode 100644 index 0000000..a15f166 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_remove_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestPluginRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginRemove(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_set.go b/vendor/github.com/moby/moby/client/plugin_set.go new file mode 100644 index 0000000..3260d2a --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_set.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_set_test.go b/vendor/github.com/moby/moby/client/plugin_set_test.go new file mode 100644 index 0000000..2450254 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_set_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginSetError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginSet(t *testing.T) { + expectedURL := "/plugins/plugin_name/set" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_upgrade.go b/vendor/github.com/moby/moby/client/plugin_upgrade.go new file mode 100644 index 0000000..95a4356 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_upgrade.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) +} diff --git a/vendor/github.com/moby/moby/client/request.go b/vendor/github.com/moby/moby/client/request.go new file mode 100644 index 0000000..ac05363 --- /dev/null +++ b/vendor/github.com/moby/moby/client/request.go @@ -0,0 +1,247 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// getWithContext sends an http request to the docker API using the method GET with a specific go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// postWithContext sends an http request to the docker API using the method POST with a specific go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + return cli.doRequest(ctx, req) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1} + + resp, err := ctxhttp.Do(ctx, cli.client, req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.25/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && + resp.Header.Get("Content-Type") == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return serverResp, fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if body := response.body; body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/moby/moby/client/request_test.go b/vendor/github.com/moby/moby/client/request_test.go new file mode 100644 index 0000000..63908ae --- /dev/null +++ b/vendor/github.com/moby/moby/client/request_test.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// TestSetHostHeader should set fake host for local communications, set real host +// for normal communications. +func TestSetHostHeader(t *testing.T) { + testURL := "/test" + testCases := []struct { + host string + expectedHost string + expectedURLHost string + }{ + { + "unix:///var/run/docker.sock", + "docker", + "/var/run/docker.sock", + }, + { + "npipe:////./pipe/docker_engine", + "docker", + "//./pipe/docker_engine", + }, + { + "tcp://0.0.0.0:4243", + "", + "0.0.0.0:4243", + }, + { + "tcp://localhost:4243", + "", + "localhost:4243", + }, + } + + for c, test := range testCases { + proto, addr, basePath, err := ParseHost(test.host) + if err != nil { + t.Fatal(err) + } + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, testURL) { + return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) + } + if req.Host != test.expectedHost { + return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) + } + if req.URL.Host != test.expectedURLHost { + return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + }, nil + }), + + proto: proto, + addr: addr, + basePath: basePath, + } + + _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } +} + +// TestPlainTextError tests the server returning an error in plain text for +// backwards compatibility with API versions <1.24. All other tests use +// errors returned as JSON +func TestPlainTextError(t *testing.T) { + client := &Client{ + client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_create.go b/vendor/github.com/moby/moby/client/secret_create.go new file mode 100644 index 0000000..de8b041 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_create.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var headers map[string][]string + + var response types.SecretCreateResponse + resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/secret_create_test.go b/vendor/github.com/moby/moby/client/secret_create_test.go new file mode 100644 index 0000000..cb378c7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretCreate(t *testing.T) { + expectedURL := "/secrets/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.SecretCreateResponse{ + ID: "test_secret", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_secret" { + t.Fatalf("expected `test_secret`, got %s", r.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_inspect.go b/vendor/github.com/moby/moby/client/secret_inspect.go new file mode 100644 index 0000000..f774576 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/moby/moby/client/secret_inspect_test.go b/vendor/github.com/moby/moby/client/secret_inspect_test.go new file mode 100644 index 0000000..423d986 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretInspectSecretNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrSecretNotFound(err) { + t.Fatalf("expected an secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspect(t *testing.T) { + expectedURL := "/secrets/secret_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Secret{ + ID: "secret_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } + if secretInspect.ID != "secret_id" { + t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_list.go b/vendor/github.com/moby/moby/client/secret_list.go new file mode 100644 index 0000000..7e9d5ec --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/vendor/github.com/moby/moby/client/secret_list_test.go b/vendor/github.com/moby/moby/client/secret_list_test.go new file mode 100644 index 0000000..1ac11cd --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretList(t *testing.T) { + expectedURL := "/secrets" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.SecretListOptions + expectedQueryParams map[string]string + }{ + { + options: types.SecretListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.SecretListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Secret{ + { + ID: "secret_id1", + }, + { + ID: "secret_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secrets, err := client.SecretList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(secrets) != 2 { + t.Fatalf("expected 2 secrets, got %v", secrets) + } + } +} diff --git a/vendor/github.com/moby/moby/client/secret_remove.go b/vendor/github.com/moby/moby/client/secret_remove.go new file mode 100644 index 0000000..1955b98 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/secret_remove_test.go b/vendor/github.com/moby/moby/client/secret_remove_test.go new file mode 100644 index 0000000..f269f78 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSecretRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretRemove(t *testing.T) { + expectedURL := "/secrets/secret_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_update.go b/vendor/github.com/moby/moby/client/secret_update.go new file mode 100644 index 0000000..b94e24a --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_update.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretUpdate updates a Secret. Currently, the only part of a secret spec +// which can be updated is Labels. +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/secret_update_test.go b/vendor/github.com/moby/moby/client/secret_update_test.go new file mode 100644 index 0000000..c620985 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSecretUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretUpdate(t *testing.T) { + expectedURL := "/secrets/secret_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/service_create.go b/vendor/github.com/moby/moby/client/service_create.go new file mode 100644 index 0000000..3d1be22 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_create.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var headers map[string][]string + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/service_create_test.go b/vendor/github.com/moby/moby/client/service_create_test.go new file mode 100644 index 0000000..1e07382 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceCreate(t *testing.T) { + expectedURL := "/services/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/service_inspect.go b/vendor/github.com/moby/moby/client/service_inspect.go new file mode 100644 index 0000000..ca71cbd --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { + serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/service_inspect_test.go b/vendor/github.com/moby/moby/client/service_inspect_test.go new file mode 100644 index 0000000..e235cf0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceInspectServiceNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrServiceNotFound(err) { + t.Fatalf("expected an serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspect(t *testing.T) { + expectedURL := "/services/service_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Service{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } + if serviceInspect.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/service_list.go b/vendor/github.com/moby/moby/client/service_list.go new file mode 100644 index 0000000..c29e6d4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/vendor/github.com/moby/moby/client/service_list_test.go b/vendor/github.com/moby/moby/client/service_list_test.go new file mode 100644 index 0000000..213981e --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceList(t *testing.T) { + expectedURL := "/services" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ServiceListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ServiceListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ServiceListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Service{ + { + ID: "service_id1", + }, + { + ID: "service_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + services, err := client.ServiceList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(services) != 2 { + t.Fatalf("expected 2 services, got %v", services) + } + } +} diff --git a/vendor/github.com/moby/moby/client/service_logs.go b/vendor/github.com/moby/moby/client/service_logs.go new file mode 100644 index 0000000..24384e3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/service_logs_test.go b/vendor/github.com/moby/moby/client/service_logs_test.go new file mode 100644 index 0000000..a6d002b --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestServiceLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestServiceLogs(t *testing.T) { + expectedURL := "/services/service_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ServiceLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/service_remove.go b/vendor/github.com/moby/moby/client/service_remove.go new file mode 100644 index 0000000..a9331f9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/service_remove_test.go b/vendor/github.com/moby/moby/client/service_remove_test.go new file mode 100644 index 0000000..8e2ac25 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestServiceRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceRemove(t *testing.T) { + expectedURL := "/services/service_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/service_update.go b/vendor/github.com/moby/moby/client/service_update.go new file mode 100644 index 0000000..afa94d4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_update.go @@ -0,0 +1,41 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + headers map[string][]string + query = url.Values{} + ) + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/service_update_test.go b/vendor/github.com/moby/moby/client/service_update_test.go new file mode 100644 index 0000000..76bea17 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_update_test.go @@ -0,0 +1,77 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +func TestServiceUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceUpdate(t *testing.T) { + expectedURL := "/services/service_id/update" + + updateCases := []struct { + swarmVersion swarm.Version + expectedVersion string + }{ + { + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 0, + }, + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 10, + }, + expectedVersion: "10", + }, + } + + for _, updateCase := range updateCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + version := req.URL.Query().Get("version") + if version != updateCase.expectedVersion { + return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + }, nil + }), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go new file mode 100644 index 0000000..be28d32 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_init.go b/vendor/github.com/moby/moby/client/swarm_init.go new file mode 100644 index 0000000..fd45d06 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the Swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_init_test.go b/vendor/github.com/moby/moby/client/swarm_init_test.go new file mode 100644 index 0000000..811155a --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_init_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmInitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInit(t *testing.T) { + expectedURL := "/swarm/init" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), + }, nil + }), + } + + resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } + if resp != "body" { + t.Fatalf("Expected 'body', got %s", resp) + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_inspect.go b/vendor/github.com/moby/moby/client/swarm_inspect.go new file mode 100644 index 0000000..6d95cfc --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the Swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_inspect_test.go b/vendor/github.com/moby/moby/client/swarm_inspect_test.go new file mode 100644 index 0000000..6432d17 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_inspect_test.go @@ -0,0 +1,56 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSwarmInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInspect(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInspect(t *testing.T) { + expectedURL := "/swarm" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + ID: "swarm_id", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + swarmInspect, err := client.SwarmInspect(context.Background()) + if err != nil { + t.Fatal(err) + } + if swarmInspect.ID != "swarm_id" { + t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_join.go b/vendor/github.com/moby/moby/client/swarm_join.go new file mode 100644 index 0000000..cda9993 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the Swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_join_test.go b/vendor/github.com/moby/moby/client/swarm_join_test.go new file mode 100644 index 0000000..31ef2a7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_join_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmJoinError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmJoin(t *testing.T) { + expectedURL := "/swarm/join" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_leave.go b/vendor/github.com/moby/moby/client/swarm_leave.go new file mode 100644 index 0000000..a4df732 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the Swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_leave_test.go b/vendor/github.com/moby/moby/client/swarm_leave_test.go new file mode 100644 index 0000000..c96dac8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_leave_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSwarmLeaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmLeave(context.Background(), false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmLeave(t *testing.T) { + expectedURL := "/swarm/leave" + + leaveCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, leaveCase := range leaveCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != leaveCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmLeave(context.Background(), leaveCase.force) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_unlock.go b/vendor/github.com/moby/moby/client/swarm_unlock.go new file mode 100644 index 0000000..addfb59 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_unlock.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlockes locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + if err != nil { + return err + } + + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_update.go b/vendor/github.com/moby/moby/client/swarm_update.go new file mode 100644 index 0000000..cc8eeb6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the Swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_update_test.go b/vendor/github.com/moby/moby/client/swarm_update_test.go new file mode 100644 index 0000000..3b23db0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUpdate(t *testing.T) { + expectedURL := "/swarm/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/task_inspect.go b/vendor/github.com/moby/moby/client/task_inspect.go new file mode 100644 index 0000000..bc8058f --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/task_inspect_test.go b/vendor/github.com/moby/moby/client/task_inspect_test.go new file mode 100644 index 0000000..148cdad --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskInspect(t *testing.T) { + expectedURL := "/tasks/task_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Task{ + ID: "task_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") + if err != nil { + t.Fatal(err) + } + if taskInspect.ID != "task_id" { + t.Fatalf("expected `task_id`, got %s", taskInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/task_list.go b/vendor/github.com/moby/moby/client/task_list.go new file mode 100644 index 0000000..66324da --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/vendor/github.com/moby/moby/client/task_list_test.go b/vendor/github.com/moby/moby/client/task_list_test.go new file mode 100644 index 0000000..2a9a4c4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.TaskList(context.Background(), types.TaskListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskList(t *testing.T) { + expectedURL := "/tasks" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.TaskListOptions + expectedQueryParams map[string]string + }{ + { + options: types.TaskListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.TaskListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Task{ + { + ID: "task_id1", + }, + { + ID: "task_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + tasks, err := client.TaskList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(tasks) != 2 { + t.Fatalf("expected 2 tasks, got %v", tasks) + } + } +} diff --git a/vendor/github.com/moby/moby/client/testdata/ca.pem b/vendor/github.com/moby/moby/client/testdata/ca.pem new file mode 100644 index 0000000..ad14d47 --- /dev/null +++ b/vendor/github.com/moby/moby/client/testdata/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC0jCCAbqgAwIBAgIRAILlP5WWLaHkQ/m2ASHP7SowDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMBIxEDAOBgNVBAoTB3ZpbmNlbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD0yZPKAGncoaxaU/QW9tWEHbrvDoGVF/65L8Si/jBrlAgLjhmmV1di +vKG9QPzuU8snxHro3/uCwyA6kTqw0U8bGwHxJq2Bpa6JBYj8N2jMJ+M+sjXgSo2t +E0zIzjTW2Pir3C8qwfrVL6NFp9xClwMD23SFZ0UsEH36NkfyrKBVeM8IOjJd4Wjs +xIcuvF3BTVkji84IJBW2JIKf9ZrzJwUlSCPgptRp4Evdbyp5d+UPxtwxD7qjW4lM +yQQ8vfcC4lKkVx5s/RNJ4fzd5uEgLdEbZ20qt7Zt/bLcxFHpUhH2teA0QjmrOWFh +gbL83s95/+hbSVhsO4hoFW7vTeiCCY4xAgMBAAGjIzAhMA4GA1UdDwEB/wQEAwIC +rDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBY51RHajuDuhO2 +tcm26jeNROzfffnjhvbOVPjSEdo9vI3JpMU/RuQw+nbNcLwJrdjL6UH7tD/36Y+q +NXH+xSIjWFH0zXGxrIUsVrvt6f8CbOvw7vD+gygOG+849PDQMbL6czP8rvXY7vZV +9pdpQfrENk4b5kePRW/6HaGSTvtgN7XOrYD9fp3pm/G534T2e3IxgYMRNwdB9Ul9 +bLwMqQqf4eiqqMs6x4IVmZUkGVMKiFKcvkNg9a+Ozx5pMizHeAezWMcZ5V+QJZVT +8lElSCKZ2Yy2xkcl7aeQMLwcAeZwfTp+Yu9dVzlqXiiBTLd1+LtAQCuKHzmw4Q8k +EvD5m49l +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/client/testdata/cert.pem b/vendor/github.com/moby/moby/client/testdata/cert.pem new file mode 100644 index 0000000..9000ffb --- /dev/null +++ b/vendor/github.com/moby/moby/client/testdata/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC8DCCAdigAwIBAgIRAJAS1glgcke4q7eCaretwgUwDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMB4xHDAaBgNVBAoME3ZpbmNlbnQuPGJvb3RzdHJhcD4wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQClpvG442dGEvrRgmCrqY4kBml1LVlw2Y7ZDn6B +TKa52+MuGDmfXbO1UhclNqTXjLgAwKjPz/OvnPRxNEUoQEDbBd+Xev7rxTY5TvYI +27YH3fMH2LL2j62jum649abfhZ6ekD5eD8tCn3mnrEOgqRIlK7efPIVixq/ZqU1H +7ez0ggB7dmWHlhnUaxyQOCSnAX/7nKYQXqZgVvGhDeR2jp7GcnhbK/qPrZ/mOm83 +2IjCeYN145opYlzTSp64GYIZz7uqMNcnDKK37ZbS8MYcTjrRaHEiqZVVdIC+ghbx +qYqzbZRVfgztI9jwmifn0mYrN4yt+nhNYwBcRJ4Pv3uLFbo7AgMBAAGjNTAzMA4G +A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MA0GCSqGSIb3DQEBCwUAA4IBAQDg1r7nksjYgDFYEcBbrRrRHddIoK+RVmSBTTrq +8giC77m0srKdh9XTVWK1PUbGfODV1oD8m9QhPE8zPDyYQ8jeXNRSU5wXdkrTRmmY +w/T3SREqmE7CObMtusokHidjYFuqqCR07sJzqBKRlzr3o0EGe3tuEhUlF5ARY028 +eipaDcVlT5ChGcDa6LeJ4e05u4cVap0dd6Rp1w3Rx1AYAecdgtgBMnw1iWdl/nrC +sp26ZXNaAhFOUovlY9VY257AMd9hQV7WvAK4yNEHcckVu3uXTBmDgNSOPtl0QLsL +Kjlj75ksCx8nCln/hCut/0+kGTsGZqdV5c6ktgcGYRir/5Hs +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/client/testdata/key.pem b/vendor/github.com/moby/moby/client/testdata/key.pem new file mode 100644 index 0000000..c0869df --- /dev/null +++ b/vendor/github.com/moby/moby/client/testdata/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApabxuONnRhL60YJgq6mOJAZpdS1ZcNmO2Q5+gUymudvjLhg5 +n12ztVIXJTak14y4AMCoz8/zr5z0cTRFKEBA2wXfl3r+68U2OU72CNu2B93zB9iy +9o+to7puuPWm34WenpA+Xg/LQp95p6xDoKkSJSu3nzyFYsav2alNR+3s9IIAe3Zl +h5YZ1GsckDgkpwF/+5ymEF6mYFbxoQ3kdo6exnJ4Wyv6j62f5jpvN9iIwnmDdeOa +KWJc00qeuBmCGc+7qjDXJwyit+2W0vDGHE460WhxIqmVVXSAvoIW8amKs22UVX4M +7SPY8Jon59JmKzeMrfp4TWMAXESeD797ixW6OwIDAQABAoIBAHfyAAleL8NfrtnR +S+pApbmUIvxD0AWUooispBE/zWG6xC72P5MTqDJctIGvpYCmVf3Fgvamns7EGYN2 +07Sngc6V3Ca1WqyhaffpIuGbJZ1gqr89u6gotRRexBmNVj13ZTlvPJmjWgxtqQsu +AvHsOkVL+HOGwRaaw24Z1umEcBVCepl7PGTqsLeJUtBUZBiqdJTu4JYLAB6BggBI +OxhHoTWvlNWwzezo2C/IXkXcXD/tp3i5vTn5rAXHSMQkdMAUh7/xJ73Fl36gxZhp +W7NoPKaS9qNh8jhs6p54S7tInb6+mrKtvRFKl5XAR3istXrXteT5UaukpuBbQ/5d +qf4BXuECgYEAzoOKxMee5tG/G9iC6ImNq5xGAZm0OnmteNgIEQj49If1Q68av525 +FioqdC9zV+blfHQqXEIUeum4JAou4xqmB8Lw2H0lYwOJ1IkpUy3QJjU1IrI+U5Qy +ryZuA9cxSTLf1AJFbROsoZDpjaBh0uUQkD/4PHpwXMgHu/3CaJ4nTEkCgYEAzVjE +VWgczWJGyRxmHSeR51ft1jrlChZHEd3HwgLfo854JIj+MGUH4KPLSMIkYNuyiwNQ +W7zdXCB47U8afSL/lPTv1M5+ZsWY6sZAT6gtp/IeU0Va943h9cj10fAOBJaz1H6M +jnZS4jjWhVInE7wpCDVCwDRoHHJ84kb6JeflamMCgYBDQDcKie9HP3q6uLE4xMKr +5gIuNz2n5UQGnGNUGNXp2/SVDArr55MEksqsd19aesi01KeOz74XoNDke6R1NJJo +6KTB+08XhWl3GwuoGL02FBGvsNf3I8W1oBAnlAZqzfRx+CNfuA55ttU318jDgvD3 +6L0QBNdef411PNf4dbhacQKBgAd/e0PHFm4lbYJAaDYeUMSKwGN3KQ/SOmwblgSu +iC36BwcGfYmU1tHMCUsx05Q50W4kA9Ylskt/4AqCPexdz8lHnE4/7/uesXO5I3YF +JQ2h2Jufx6+MXbjUyq0Mv+ZI/m3+5PD6vxIFk0ew9T5SO4lSMIrGHxsSzx6QCuhB +bG4TAoGBAJ5PWG7d2CyCjLtfF8J4NxykRvIQ8l/3kDvDdNrXiXbgonojo2lgRYaM +5LoK9ApN8KHdedpTRipBaDA22Sp5SjMcUE7A6q42PJCL9r+BRYF0foFQx/rqpCff +pVWKgwIPoKnfxDqN1RUgyFcx1jbA3XVJZCuT+wbMuDQ9nlvulD1W +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/client/transport.go b/vendor/github.com/moby/moby/client/transport.go new file mode 100644 index 0000000..f04e601 --- /dev/null +++ b/vendor/github.com/moby/moby/client/transport.go @@ -0,0 +1,28 @@ +package client + +import ( + "crypto/tls" + "errors" + "net/http" +) + +var errTLSConfigUnavailable = errors.New("TLSConfig unavailable") + +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// resolveTLSConfig attempts to resolve the tls configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/moby/moby/client/utils.go b/vendor/github.com/moby/moby/client/utils.go new file mode 100644 index 0000000..23d520e --- /dev/null +++ b/vendor/github.com/moby/moby/client/utils.go @@ -0,0 +1,33 @@ +package client + +import ( + "github.com/docker/docker/api/types/filters" + "net/url" + "regexp" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToParam(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/version.go b/vendor/github.com/moby/moby/client/version.go new file mode 100644 index 0000000..933ceb4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/vendor/github.com/moby/moby/client/volume_create.go b/vendor/github.com/moby/moby/client/volume_create.go new file mode 100644 index 0000000..9620c87 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/github.com/moby/moby/client/volume_create_test.go b/vendor/github.com/moby/moby/client/volume_create_test.go new file mode 100644 index 0000000..9f1b254 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_create_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeCreate(t *testing.T) { + expectedURL := "/volumes/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.Volume{ + Name: "volume", + Driver: "local", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + Name: "myvolume", + Driver: "mydriver", + DriverOpts: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if volume.Name != "volume" { + t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) + } + if volume.Driver != "local" { + t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) + } + if volume.Mountpoint != "mountpoint" { + t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) + } +} diff --git a/vendor/github.com/moby/moby/client/volume_inspect.go b/vendor/github.com/moby/moby/client/volume_inspect.go new file mode 100644 index 0000000..3860e9b --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/github.com/moby/moby/client/volume_inspect_test.go b/vendor/github.com/moby/moby/client/volume_inspect_test.go new file mode 100644 index 0000000..0d1d118 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_inspect_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestVolumeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "unknown") + if err == nil || !IsErrVolumeNotFound(err) { + t.Fatalf("expected a volumeNotFound error, got %v", err) + } +} + +func TestVolumeInspect(t *testing.T) { + expectedURL := "/volumes/volume_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + content, err := json.Marshal(types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + v, err := client.VolumeInspect(context.Background(), "volume_id") + if err != nil { + t.Fatal(err) + } + if v.Name != "name" { + t.Fatalf("expected `name`, got %s", v.Name) + } + if v.Driver != "driver" { + t.Fatalf("expected `driver`, got %s", v.Driver) + } + if v.Mountpoint != "mountpoint" { + t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) + } +} diff --git a/vendor/github.com/moby/moby/client/volume_list.go b/vendor/github.com/moby/moby/client/volume_list.go new file mode 100644 index 0000000..32247ce --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/vendor/github.com/moby/moby/client/volume_list_test.go b/vendor/github.com/moby/moby/client/volume_list_test.go new file mode 100644 index 0000000..f29639b --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_list_test.go @@ -0,0 +1,98 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeList(t *testing.T) { + expectedURL := "/volumes" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + filters filters.Args + expectedFilters string + }{ + { + filters: filters.NewArgs(), + expectedFilters: "", + }, { + filters: noDanglingFilters, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + filters: danglingFilters, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + filters: labelFilters, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal(volumetypes.VolumesListOKBody{ + Volumes: []*types.Volume{ + { + Name: "volume", + Driver: "local", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(volumeResponse.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) + } + } +} diff --git a/vendor/github.com/moby/moby/client/volume_prune.go b/vendor/github.com/moby/moby/client/volume_prune.go new file mode 100644 index 0000000..a07e4ce --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_remove.go b/vendor/github.com/moby/moby/client/volume_remove.go new file mode 100644 index 0000000..6c26575 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/volume_remove_test.go b/vendor/github.com/moby/moby/client/volume_remove_test.go new file mode 100644 index 0000000..1fe6573 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestVolumeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeRemove(t *testing.T) { + expectedURL := "/volumes/volume_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/cmd/docker/daemon_none.go b/vendor/github.com/moby/moby/cmd/docker/daemon_none.go new file mode 100644 index 0000000..65f9f37 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/docker/daemon_none.go @@ -0,0 +1,27 @@ +// +build !daemon + +package main + +import ( + "fmt" + "runtime" + "strings" + + "github.com/spf13/cobra" +) + +func newDaemonCommand() *cobra.Command { + return &cobra.Command{ + Use: "daemon", + Hidden: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDaemon() + }, + } +} + +func runDaemon() error { + return fmt.Errorf( + "`docker daemon` is not supported on %s. Please run `dockerd` directly", + strings.Title(runtime.GOOS)) +} diff --git a/vendor/github.com/moby/moby/cmd/docker/daemon_none_test.go b/vendor/github.com/moby/moby/cmd/docker/daemon_none_test.go new file mode 100644 index 0000000..32032fe --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/docker/daemon_none_test.go @@ -0,0 +1,17 @@ +// +build !daemon + +package main + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestDaemonCommand(t *testing.T) { + cmd := newDaemonCommand() + cmd.SetArgs([]string{"--help"}) + err := cmd.Execute() + + assert.Error(t, err, "Please run `dockerd`") +} diff --git a/vendor/github.com/moby/moby/cmd/docker/daemon_unit_test.go b/vendor/github.com/moby/moby/cmd/docker/daemon_unit_test.go new file mode 100644 index 0000000..26348a8 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/docker/daemon_unit_test.go @@ -0,0 +1,30 @@ +// +build daemon + +package main + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/cobra" +) + +func stubRun(cmd *cobra.Command, args []string) error { + return nil +} + +func TestDaemonCommandHelp(t *testing.T) { + cmd := newDaemonCommand() + cmd.RunE = stubRun + cmd.SetArgs([]string{"--help"}) + err := cmd.Execute() + assert.NilError(t, err) +} + +func TestDaemonCommand(t *testing.T) { + cmd := newDaemonCommand() + cmd.RunE = stubRun + cmd.SetArgs([]string{"--containerd", "/foo"}) + err := cmd.Execute() + assert.NilError(t, err) +} diff --git a/vendor/github.com/moby/moby/cmd/docker/daemon_unix.go b/vendor/github.com/moby/moby/cmd/docker/daemon_unix.go new file mode 100644 index 0000000..f68d220 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/docker/daemon_unix.go @@ -0,0 +1,79 @@ +// +build daemon + +package main + +import ( + "fmt" + + "os" + "os/exec" + "path/filepath" + "syscall" + + "github.com/spf13/cobra" +) + +const daemonBinary = "dockerd" + +func newDaemonCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "daemon", + Hidden: true, + Args: cobra.ArbitraryArgs, + DisableFlagParsing: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDaemon() + }, + Deprecated: "and will be removed in Docker 1.16. Please run `dockerd` directly.", + } + cmd.SetHelpFunc(helpFunc) + return cmd +} + +// CmdDaemon execs dockerd with the same flags +func runDaemon() error { + // Use os.Args[1:] so that "global" args are passed to dockerd + return execDaemon(stripDaemonArg(os.Args[1:])) +} + +func execDaemon(args []string) error { + binaryPath, err := findDaemonBinary() + if err != nil { + return err + } + + return syscall.Exec( + binaryPath, + append([]string{daemonBinary}, args...), + os.Environ()) +} + +func helpFunc(cmd *cobra.Command, args []string) { + if err := execDaemon([]string{"--help"}); err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err.Error()) + } +} + +// findDaemonBinary looks for the path to the dockerd binary starting with +// the directory of the current executable (if one exists) and followed by $PATH +func findDaemonBinary() (string, error) { + execDirname := filepath.Dir(os.Args[0]) + if execDirname != "" { + binaryPath := filepath.Join(execDirname, daemonBinary) + if _, err := os.Stat(binaryPath); err == nil { + return binaryPath, nil + } + } + + return exec.LookPath(daemonBinary) +} + +// stripDaemonArg removes the `daemon` argument from the list +func stripDaemonArg(args []string) []string { + for i, arg := range args { + if arg == "daemon" { + return append(args[:i], args[i+1:]...) + } + } + return args +} diff --git a/vendor/github.com/moby/moby/cmd/docker/docker.go b/vendor/github.com/moby/moby/cmd/docker/docker.go new file mode 100644 index 0000000..d4847a9 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/docker/docker.go @@ -0,0 +1,180 @@ +package main + +import ( + "errors" + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/commands" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newDockerCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := cliflags.NewClientOptions() + var flags *pflag.FlagSet + + cmd := &cobra.Command{ + Use: "docker [OPTIONS] COMMAND [ARG...]", + Short: "A self-sufficient runtime for containers", + SilenceUsage: true, + SilenceErrors: true, + TraverseChildren: true, + Args: noArgs, + RunE: func(cmd *cobra.Command, args []string) error { + if opts.Version { + showVersion() + return nil + } + return dockerCli.ShowHelp(cmd, args) + }, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + // daemon command is special, we redirect directly to another binary + if cmd.Name() == "daemon" { + return nil + } + // flags must be the top-level command flags, not cmd.Flags() + opts.Common.SetDefaultOptions(flags) + dockerPreRun(opts) + if err := dockerCli.Initialize(opts); err != nil { + return err + } + return isSupported(cmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) + }, + } + cli.SetupRootCommand(cmd) + + cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) { + if dockerCli.Client() == nil { // when using --help, PersistenPreRun is not called, so initialization is needed. + // flags must be the top-level command flags, not cmd.Flags() + opts.Common.SetDefaultOptions(flags) + dockerPreRun(opts) + dockerCli.Initialize(opts) + } + + if err := isSupported(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()); err != nil { + ccmd.Println(err) + return + } + + hideUnsupportedFeatures(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) + + if err := ccmd.Help(); err != nil { + ccmd.Println(err) + } + }) + + flags = cmd.Flags() + flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.ConfigDir, "config", cliconfig.ConfigDir(), "Location of client config files") + opts.Common.InstallFlags(flags) + + cmd.SetOutput(dockerCli.Out()) + cmd.AddCommand(newDaemonCommand()) + commands.AddCommands(cmd, dockerCli) + + return cmd +} + +func noArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + return fmt.Errorf( + "docker: '%s' is not a docker command.\nSee 'docker --help'", args[0]) +} + +func main() { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + logrus.SetOutput(stderr) + + dockerCli := command.NewDockerCli(stdin, stdout, stderr) + cmd := newDockerCommand(dockerCli) + + if err := cmd.Execute(); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(stderr, sterr.Status) + } + // StatusError should only be used for errors, and all errors should + // have a non-zero exit status, so never exit with 0 + if sterr.StatusCode == 0 { + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(stderr, err) + os.Exit(1) + } +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func dockerPreRun(opts *cliflags.ClientOptions) { + cliflags.SetLogLevel(opts.Common.LogLevel) + + if opts.ConfigDir != "" { + cliconfig.SetConfigDir(opts.ConfigDir) + } + + if opts.Common.Debug { + utils.EnableDebug() + } +} + +func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) { + cmd.Flags().VisitAll(func(f *pflag.Flag) { + // hide experimental flags + if !hasExperimental { + if _, ok := f.Annotations["experimental"]; ok { + f.Hidden = true + } + } + + // hide flags not supported by the server + if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 && versions.LessThan(clientVersion, flagVersion[0]) { + f.Hidden = true + } + + }) + + for _, subcmd := range cmd.Commands() { + // hide experimental subcommands + if !hasExperimental { + if _, ok := subcmd.Tags["experimental"]; ok { + subcmd.Hidden = true + } + } + + // hide subcommands not supported by the server + if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) { + subcmd.Hidden = true + } + } +} + +func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error { + if !hasExperimental { + if _, ok := cmd.Tags["experimental"]; ok { + return errors.New("only supported with experimental daemon") + } + } + + if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) { + return fmt.Errorf("only supported with daemon version >= %s", cmdVersion) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/cmd/docker/docker_test.go b/vendor/github.com/moby/moby/cmd/docker/docker_test.go new file mode 100644 index 0000000..8738f60 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/docker/docker_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/utils" +) + +func TestClientDebugEnabled(t *testing.T) { + defer utils.DisableDebug() + + cmd := newDockerCommand(&command.DockerCli{}) + cmd.Flags().Set("debug", "true") + + err := cmd.PersistentPreRunE(cmd, []string{}) + assert.NilError(t, err) + assert.Equal(t, os.Getenv("DEBUG"), "1") + assert.Equal(t, logrus.GetLevel(), logrus.DebugLevel) +} + +func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) { + discard := ioutil.Discard + cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard)) + cmd.SetArgs([]string{"help", "invalid"}) + err := cmd.Execute() + assert.Error(t, err, "unknown help topic: invalid") +} diff --git a/vendor/github.com/moby/moby/cmd/docker/docker_windows.go b/vendor/github.com/moby/moby/cmd/docker/docker_windows.go new file mode 100644 index 0000000..9bc507e --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/docker/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/docker" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/README.md b/vendor/github.com/moby/moby/cmd/dockerd/README.md new file mode 100644 index 0000000..a8c20b3 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker daemon's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon.go new file mode 100644 index 0000000..09c7bbb --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon.go @@ -0,0 +1,524 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" + "github.com/docker/docker/api/server/router/container" + "github.com/docker/docker/api/server/router/image" + "github.com/docker/docker/api/server/router/network" + pluginrouter "github.com/docker/docker/api/server/router/plugin" + swarmrouter "github.com/docker/docker/api/server/router/swarm" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/cluster" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + dopts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/listeners" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + flagDaemonConfigFile = "config-file" +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *daemon.Config + configFile *string + flags *pflag.FlagSet + + api *apiserver.Server + d *daemon.Daemon + authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins +} + +// NewDaemonCli returns a daemon CLI +func NewDaemonCli() *DaemonCli { + return &DaemonCli{} +} + +func migrateKey(config *daemon.Config) (err error) { + // No migration necessary on Windows + if runtime.GOOS == "windows" { + return nil + } + + // Migrate trust key if exists at ~/.docker/key.json and owned by current user + oldPath := filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) + newPath := filepath.Join(getDaemonConfDir(config.Root), cliflags.DefaultTrustKeyFile) + if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { + defer func() { + // Ensure old path is removed if no error occurred + if err == nil { + err = os.Remove(oldPath) + } else { + logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) + os.Remove(newPath) + } + }() + + if err := system.MkdirAll(getDaemonConfDir(config.Root), os.FileMode(0644)); err != nil { + return fmt.Errorf("Unable to create daemon configuration directory: %s", err) + } + + newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating key file %q: %s", newPath, err) + } + defer newFile.Close() + + oldFile, err := os.Open(oldPath) + if err != nil { + return fmt.Errorf("error opening key file %q: %s", oldPath, err) + } + defer oldFile.Close() + + if _, err := io.Copy(newFile, oldFile); err != nil { + return fmt.Errorf("error copying key: %s", err) + } + + logrus.Infof("Migrated key from %s to %s", oldPath, newPath) + } + + return nil +} + +func (cli *DaemonCli) start(opts daemonOptions) (err error) { + stopc := make(chan bool) + defer close(stopc) + + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + opts.common.SetDefaultOptions(opts.flags) + + if cli.Config, err = loadDaemonCliConfig(opts); err != nil { + return err + } + cli.configFile = &opts.configFile + cli.flags = opts.flags + + if opts.common.TrustKey == "" { + opts.common.TrustKey = filepath.Join( + getDaemonConfDir(cli.Config.Root), + cliflags.DefaultTrustKeyFile) + } + + if cli.Config.Debug { + utils.EnableDebug() + } + + if cli.Config.Experimental { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + return fmt.Errorf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + return fmt.Errorf("Failed to set log opts: %v", err) + } + } + + // Create the daemon root before we create ANY other files (PID, or migrate keys) + // to ensure the appropriate ACL is set (particularly relevant on Windows) + if err := daemon.CreateDaemonRoot(cli.Config); err != nil { + return err + } + + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + defer func() { + if err := pf.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.Config{ + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + EnableCors: cli.Config.EnableCors, + CorsHeaders: cli.Config.CorsHeaders, + } + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + return err + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + api := apiserver.New(serverConfig) + cli.api = api + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) + } + + proto := protoAddrParts[0] + addr := protoAddrParts[1] + + // It's a bad idea to bind to TCP without tlsverify. + if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { + logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") + } + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + return err + } + ls = wrapListeners(proto, ls) + // If we're binding to a TCP port, make sure that a container doesn't try to use it. + if proto == "tcp" { + if err := allocateDaemonPort(addr); err != nil { + return err + } + } + logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + api.Accept(addr, ls...) + } + + if err := migrateKey(cli.Config); err != nil { + return err + } + + // FIXME: why is this down here instead of with the other TrustKey logic above? + cli.TrustKeyPath = opts.common.TrustKey + + registryService := registry.NewService(cli.Config.ServiceOptions) + containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) + if err != nil { + return err + } + signal.Trap(func() { + cli.stop() + <-stopc // wait for daemonCli.start() to return + }) + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + + if cli.Config.MetricsAddress != "" { + if !d.HasExperimental() { + return fmt.Errorf("metrics-addr is only supported when experimental is enabled") + } + if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { + return err + } + } + + name, _ := os.Hostname() + + c, err := cluster.New(cluster.Config{ + Root: cli.Config.Root, + Name: name, + Backend: d, + NetworkSubnetsProvider: d, + DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, + RuntimeRoot: cli.getSwarmRunRoot(), + }) + if err != nil { + logrus.Fatalf("Error creating cluster component: %v", err) + } + + // Restart all autostart containers which has a swarm endpoint + // and is not yet running now that we have successfully + // initialized the cluster. + d.RestartSwarmContainers() + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver": d.GraphDriverName(), + }).Info("Docker daemon") + + cli.d = d + + // initMiddlewares needs cli.d to be populated. Dont change this init order. + if err := cli.initMiddlewares(api, serverConfig); err != nil { + logrus.Fatalf("Error creating middlewares: %v", err) + } + d.SetCluster(c) + initRouter(api, d, c) + + cli.setupConfigReloadTrap() + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go api.Wait(serveAPIWait) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + c.Cleanup() + shutdownDaemon(d) + containerdRemote.Cleanup() + if errAPI != nil { + return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) + } + + return nil +} + +func (cli *DaemonCli) reloadConfig() { + reload := func(config *daemon.Config) { + + // Revalidate and reload the authorization plugins + if err := validateAuthzPlugins(config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + logrus.Fatalf("Error validating authorization plugin: %v", err) + return + } + cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins) + + if err := cli.d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + + if config.IsValueSet("debug") { + debugEnabled := utils.IsDebugEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + utils.DisableDebug() + cli.api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + utils.EnableDebug() + cli.api.EnableProfiler() + } + + } + } + + if err := daemon.ReloadConfiguration(*cli.configFile, cli.flags, reload); err != nil { + logrus.Error(err) + } +} + +func (cli *DaemonCli) stop() { + cli.api.Close() +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon) { + shutdownTimeout := d.ShutdownTimeout() + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + if shutdownTimeout < 0 { + <-ch + logrus.Debug("Clean shutdown succeeded") + return + } + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(time.Duration(shutdownTimeout) * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(opts daemonOptions) (*daemon.Config, error) { + config := opts.daemonConfig + flags := opts.flags + config.Debug = opts.common.Debug + config.Hosts = opts.common.Hosts + config.LogLevel = opts.common.LogLevel + config.TLS = opts.common.TLS + config.TLSVerify = opts.common.TLSVerify + config.CommonTLSOptions = daemon.CommonTLSOptions{} + + if opts.common.TLSOptions != nil { + config.CommonTLSOptions.CAFile = opts.common.TLSOptions.CAFile + config.CommonTLSOptions.CertFile = opts.common.TLSOptions.CertFile + config.CommonTLSOptions.KeyFile = opts.common.TLSOptions.KeyFile + } + + if opts.configFile != "" { + c, err := daemon.MergeDaemonConfigurations(config, flags, opts.configFile) + if err != nil { + if flags.Changed(flagDaemonConfigFile) || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", opts.configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + config = c + } + } + + if err := daemon.ValidateConfiguration(config); err != nil { + return nil, err + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := daemon.GetConflictFreeLabels(config.Labels) + // if err != nil { + // return nil, err + // } + // config.Labels = newLabels + // + if _, err := daemon.GetConflictFreeLabels(config.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if config.IsValueSet(cliflags.FlagTLSVerify) { + config.TLS = true + } + + // ensure that the log level is the one set after merging configurations + cliflags.SetLogLevel(config.LogLevel) + + return config, nil +} + +func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { + decoder := runconfig.ContainerDecoder{} + + routers := []router.Router{ + // we need to add the checkpoint router before the container router or the DELETE gets masked + checkpointrouter.NewRouter(d, decoder), + container.NewRouter(d, decoder), + image.NewRouter(d, decoder), + systemrouter.NewRouter(d, c), + volume.NewRouter(d), + build.NewRouter(dockerfile.NewBuildManager(d)), + swarmrouter.NewRouter(c), + pluginrouter.NewRouter(d.PluginManager()), + } + + if d.NetworkControllerEnabled() { + routers = append(routers, network.NewRouter(d, c)) + } + + if d.HasExperimental() { + for _, r := range routers { + for _, route := range r.Routes() { + if experimental, ok := route.(router.ExperimentalRoute); ok { + experimental.Enable() + } + } + } + } + + s.InitRouter(utils.IsDebugEnabled(), routers...) +} + +func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config) error { + v := cfg.Version + + exp := middleware.NewExperimentalMiddleware(cli.d.HasExperimental()) + s.UseMiddleware(exp) + + vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) + s.UseMiddleware(vm) + + if cfg.EnableCors { + c := middleware.NewCORSMiddleware(cfg.CorsHeaders) + s.UseMiddleware(c) + } + + if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + return fmt.Errorf("Error validating authorization plugin: %v", err) + } + cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, cli.d.PluginStore) + s.UseMiddleware(cli.authzMiddleware) + return nil +} + +// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver +// plugins present on the host and available to the daemon +func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { + for _, reqPlugin := range requestedPlugins { + if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.LOOKUP); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go new file mode 100644 index 0000000..623aaf4 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go @@ -0,0 +1,5 @@ +package main + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go new file mode 100644 index 0000000..a556daa --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go @@ -0,0 +1,11 @@ +// +build linux + +package main + +import systemdDaemon "github.com/coreos/go-systemd/daemon" + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go new file mode 100644 index 0000000..974ba16 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go @@ -0,0 +1,85 @@ +// +build solaris + +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{} + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +func allocateDaemonPort(addr string) error { + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go new file mode 100644 index 0000000..b364f87 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go @@ -0,0 +1,145 @@ +package main + +import ( + "testing" + + "github.com/Sirupsen/logrus" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/spf13/pflag" +) + +func defaultOptions(configFile string) daemonOptions { + opts := daemonOptions{ + daemonConfig: &daemon.Config{}, + flags: &pflag.FlagSet{}, + common: cliflags.NewCommonOptions(), + } + opts.common.InstallFlags(opts.flags) + opts.daemonConfig.InstallFlags(opts.flags) + opts.flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "") + opts.configFile = configFile + return opts +} + +func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { + opts := defaultOptions("") + opts.common.Debug = true + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + if !loadedConfig.Debug { + t.Fatalf("expected debug to be copied from the common flags, got false") + } +} + +func TestLoadDaemonCliConfigWithTLS(t *testing.T) { + opts := defaultOptions("") + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + opts.common.TLS = true + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/tmp/ca.pem") +} + +func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"labels": ["l3=foo"]}`) + defer tempFile.Remove() + configFile := tempFile.Name() + + opts := defaultOptions(configFile) + flags := opts.flags + + assert.NilError(t, flags.Set(flagDaemonConfigFile, configFile)) + assert.NilError(t, flags.Set("label", "l1=bar")) + assert.NilError(t, flags.Set("label", "l2=baz")) + + _, err := loadDaemonCliConfig(opts) + assert.Error(t, err, "as a flag and in the configuration file: labels") +} + +func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": true}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": false}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, false) +} + +func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"log-level": "warn"}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.LogLevel, "warn") + assert.Equal(t, logrus.GetLevel(), logrus.WarnLevel) +} + +func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { + content := `{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/etc/certs/ca.pem") + assert.Equal(t, loadedConfig.LogConfig.Type, "syslog") +} + +func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { + content := `{ + "registry-mirrors": ["https://mirrors.docker.com"], + "insecure-registries": ["https://insecure.docker.com"] + }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, len(loadedConfig.Mirrors), 1) + assert.Equal(t, len(loadedConfig.InsecureRegistries), 1) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go new file mode 100644 index 0000000..bdce98b --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go @@ -0,0 +1,137 @@ +// +build !windows,!solaris + +package main + +import ( + "fmt" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + "syscall" + + "github.com/docker/docker/cmd/dockerd/hack" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork/portallocator" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + cli.reloadConfig() + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + if cli.Config.LiveRestoreEnabled { + opts = append(opts, libcontainerd.WithLiveRestore(true)) + } + opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +// allocateDaemonPort ensures that there are no containers +// that try to use any port allocated for the docker server. +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + switch proto { + case "unix": + ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + case "fd": + for i := range ls { + ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + } + } + return ls +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go new file mode 100644 index 0000000..d66dba7 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go @@ -0,0 +1,114 @@ +// +build !windows,!solaris + +// TODO: Create new file for Solaris which tests config parameters +// as described in daemon/config_solaris.go + +package main + +import ( + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" + "testing" +) + +func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { + content := `{"log-opts": {"max-size": "1k"}}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.Debug = true + opts.common.LogLevel = "info" + assert.NilError(t, opts.flags.Set("selinux-enabled", "true")) + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, loadedConfig.Debug, true) + assert.Equal(t, loadedConfig.LogLevel, "info") + assert.Equal(t, loadedConfig.EnableSelinuxSupport, true) + assert.Equal(t, loadedConfig.LogConfig.Type, "json-file") + assert.Equal(t, loadedConfig.LogConfig.Config["max-size"], "1k") +} + +func TestLoadDaemonConfigWithNetwork(t *testing.T) { + content := `{"bip": "127.0.0.2", "ip": "127.0.0.1"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, loadedConfig.IP, "127.0.0.2") + assert.Equal(t, loadedConfig.DefaultIP.String(), "127.0.0.1") +} + +func TestLoadDaemonConfigWithMapOptions(t *testing.T) { + content := `{ + "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, + "log-opts": {"tag": "test"} +}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + expectedPath := "/var/lib/docker/discovery_certs/ca.pem" + assert.Equal(t, loadedConfig.ClusterOpts["kv.cacertfile"], expectedPath) + assert.NotNil(t, loadedConfig.LogConfig.Config) + assert.Equal(t, loadedConfig.LogConfig.Config["tag"], "test") +} + +func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { + content := `{ "userland-proxy": false }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + assert.Equal(t, loadedConfig.EnableUserlandProxy, false) + + // make sure reloading doesn't generate configuration + // conflicts after normalizing boolean values. + reload := func(reloadedConfig *daemon.Config) { + assert.Equal(t, reloadedConfig.EnableUserlandProxy, false) + } + assert.NilError(t, daemon.ReloadConfiguration(opts.configFile, opts.flags, reload)) +} + +func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + assert.Equal(t, loadedConfig.EnableUserlandProxy, true) +} + +func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) { + content := `{"disable-legacy-registry": true}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.V2Only, true) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go new file mode 100644 index 0000000..4cccd32 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go @@ -0,0 +1,92 @@ +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +var defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + return false +} + +// setDefaultUmask doesn't do anything on windows +func setDefaultUmask() error { + return nil +} + +func getDaemonConfDir(root string) string { + return filepath.Join(root, `\config`) +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + if service != nil { + err := service.started() + if err != nil { + logrus.Fatal(err) + } + } +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { + if service != nil { + if err != nil { + logrus.Fatal(err) + } + service.stopped(err) + } +} + +// setupConfigReloadTrap configures a Win32 event to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + go func() { + sa := syscall.SecurityAttributes{ + Length: 0, + } + ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) + if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { + logrus.Debugf("Config reload - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + cli.reloadConfig() + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + return nil +} + +// getLibcontainerdRoot gets the root directory for libcontainerd to store its +// state. The Windows libcontainerd implementation does not need to write a spec +// or state to disk, so this is a no-op. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return "" +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return "" +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/docker.go b/vendor/github.com/moby/moby/cmd/dockerd/docker.go new file mode 100644 index 0000000..60742ae --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/docker.go @@ -0,0 +1,110 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type daemonOptions struct { + version bool + configFile string + daemonConfig *daemon.Config + common *cliflags.CommonOptions + flags *pflag.FlagSet +} + +func newDaemonCommand() *cobra.Command { + opts := daemonOptions{ + daemonConfig: daemon.NewConfig(), + common: cliflags.NewCommonOptions(), + } + + cmd := &cobra.Command{ + Use: "dockerd [OPTIONS]", + Short: "A self-sufficient runtime for containers.", + SilenceUsage: true, + SilenceErrors: true, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + opts.flags = cmd.Flags() + return runDaemon(opts) + }, + } + cli.SetupRootCommand(cmd) + + flags := cmd.Flags() + flags.BoolVarP(&opts.version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "Daemon configuration file") + opts.common.InstallFlags(flags) + opts.daemonConfig.InstallFlags(flags) + installServiceFlags(flags) + + return cmd +} + +func runDaemon(opts daemonOptions) error { + if opts.version { + showVersion() + return nil + } + + daemonCli := NewDaemonCli() + + // Windows specific settings as these are not defaulted. + if runtime.GOOS == "windows" { + if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") + } + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + } + + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } + + if stop { + return nil + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func main() { + if reexec.Init() { + return + } + + // Set terminal emulation based on platform as required. + _, stdout, stderr := term.StdStreams() + logrus.SetOutput(stderr) + + cmd := newDaemonCommand() + cmd.SetOutput(stdout) + if err := cmd.Execute(); err != nil { + fmt.Fprintf(stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go b/vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go new file mode 100644 index 0000000..19c5587 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/dockerd" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go new file mode 100644 index 0000000..d4aa3dd --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go @@ -0,0 +1,121 @@ +// +build !windows + +package hack + +import "net" + +// MalformedHostHeaderOverride is a wrapper to be able +// to overcome the 400 Bad request coming from old docker +// clients that send an invalid Host header. +type MalformedHostHeaderOverride struct { + net.Listener +} + +// MalformedHostHeaderOverrideConn wraps the underlying unix +// connection and keeps track of the first read from http.Server +// which just reads the headers. +type MalformedHostHeaderOverrideConn struct { + net.Conn + first bool +} + +var closeConnHeader = []byte("\r\nConnection: close\r") + +// Read reads the first *read* request from http.Server to inspect +// the Host header. If the Host starts with / then we're talking to +// an old docker client which send an invalid Host header. To not +// error out in http.Server we rewrite the first bytes of the request +// to sanitize the Host header itself. +// In case we're not dealing with old docker clients the data is just passed +// to the server w/o modification. +func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { + // http.Server uses a 4k buffer + if l.first && len(b) == 4096 { + // This keeps track of the first read from http.Server which just reads + // the headers + l.first = false + // The first read of the connection by http.Server is done limited to + // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. + // Here we do the first read which gets us all the http headers to + // be inspected and modified below. + c, err := l.Conn.Read(b) + if err != nil { + return c, err + } + + var ( + start, end int + firstLineFeed = -1 + buf []byte + ) + for i := 0; i <= c-1-7; i++ { + if b[i] == '\n' && firstLineFeed == -1 { + firstLineFeed = i + } + if b[i] != '\n' { + continue + } + + if b[i+1] == '\r' && b[i+2] == '\n' { + return c, nil + } + + if b[i+1] != 'H' { + continue + } + if b[i+2] != 'o' { + continue + } + if b[i+3] != 's' { + continue + } + if b[i+4] != 't' { + continue + } + if b[i+5] != ':' { + continue + } + if b[i+6] != ' ' { + continue + } + if b[i+7] != '/' { + continue + } + // ensure clients other than the docker clients do not get this hack + if i != firstLineFeed { + return c, nil + } + start = i + 7 + // now find where the value ends + for ii, bbb := range b[start:c] { + if bbb == '\n' { + end = start + ii + break + } + } + buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) + // strip the value of the host header and + // inject `Connection: close` to ensure we don't reuse this connection + buf = append(buf, b[:start]...) + buf = append(buf, closeConnHeader...) + buf = append(buf, b[end:c]...) + copy(b, buf) + break + } + if len(buf) == 0 { + return c, nil + } + return len(buf), nil + } + return l.Conn.Read(b) +} + +// Accept makes the listener accepts connections and wraps the connection +// in a MalformedHostHeaderOverrideConn initilizing first to true. +func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return c, err + } + return &MalformedHostHeaderOverrideConn{c, true}, nil +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go new file mode 100644 index 0000000..1a0a60b --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go @@ -0,0 +1,124 @@ +// +build !windows + +package hack + +import ( + "bytes" + "io" + "net" + "strings" + "testing" +) + +type bufConn struct { + net.Conn + buf *bytes.Buffer +} + +func (bc *bufConn) Read(b []byte) (int, error) { + return bc.buf.Read(b) +} + +func TestHeaderOverrideHack(t *testing.T) { + tests := [][2][]byte{ + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + }, + { + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + }, + } + + // Test for https://github.com/docker/docker/issues/23045 + h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" + h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" + tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) + + for _, pair := range tests { + read := make([]byte, 4096) + client := &bufConn{ + buf: bytes.NewBuffer(pair[0]), + } + l := MalformedHostHeaderOverrideConn{client, true} + + n, err := l.Read(read) + if err != nil && err != io.EOF { + t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) + } + if !bytes.Equal(read[:n], pair[1][:n]) { + t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) + } + } +} + +func BenchmarkWithHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + l := MalformedHostHeaderOverrideConn{client, true} + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + l.first = true // make sure each subsequent run uses the hack parsing + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if n, err := l.Read(read); err != nil && err != io.EOF { + b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) + } + } + } + l.Close() + <-done +} + +func BenchmarkNoHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if _, err := client.Read(read); err != nil && err != io.EOF { + b.Fatal(err) + } + } + } + client.Close() + <-done +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/metrics.go b/vendor/github.com/moby/moby/cmd/dockerd/metrics.go new file mode 100644 index 0000000..0c88604 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/metrics.go @@ -0,0 +1,27 @@ +package main + +import ( + "net" + "net/http" + + "github.com/Sirupsen/logrus" + metrics "github.com/docker/go-metrics" +) + +func startMetricsServer(addr string) error { + if err := allocateDaemonPort(addr); err != nil { + return err + } + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + if err := http.Serve(l, mux); err != nil { + logrus.Errorf("serve metrics api: %s", err) + } + }() + return nil +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go b/vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go new file mode 100644 index 0000000..64ad7fc --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +import ( + "github.com/spf13/pflag" +) + +func initService(daemonCli *DaemonCli) (bool, error) { + return false, nil +} + +func installServiceFlags(flags *pflag.FlagSet) { +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/service_windows.go b/vendor/github.com/moby/moby/cmd/dockerd/service_windows.go new file mode 100644 index 0000000..dd37abc --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/service_windows.go @@ -0,0 +1,426 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/spf13/pflag" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/debug" + "golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/mgr" +) + +var ( + flServiceName *string + flRegisterService *bool + flUnregisterService *bool + flRunService *bool + + setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") + oldStderr syscall.Handle + panicFile *os.File + + service *handler +) + +const ( + // These should match the values in event_messages.mc. + eventInfo = 1 + eventWarn = 1 + eventError = 1 + eventDebug = 2 + eventPanic = 3 + eventFatal = 4 + + eventExtraOffset = 10 // Add this to any event to get a string that supports extended data +) + +func installServiceFlags(flags *pflag.FlagSet) { + flServiceName = flags.String("service-name", "docker", "Set the Windows service name") + flRegisterService = flags.Bool("register-service", false, "Register the service and exit") + flUnregisterService = flags.Bool("unregister-service", false, "Unregister the service and exit") + flRunService = flags.Bool("run-service", false, "") + flags.MarkHidden("run-service") +} + +type handler struct { + tosvc chan bool + fromsvc chan error + daemonCli *DaemonCli +} + +type etwHook struct { + log *eventlog.Log +} + +func (h *etwHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} + +func (h *etwHook) Fire(e *logrus.Entry) error { + var ( + etype uint16 + eid uint32 + ) + + switch e.Level { + case logrus.PanicLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventPanic + case logrus.FatalLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventFatal + case logrus.ErrorLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventError + case logrus.WarnLevel: + etype = windows.EVENTLOG_WARNING_TYPE + eid = eventWarn + case logrus.InfoLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventInfo + case logrus.DebugLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventDebug + default: + return errors.New("unknown level") + } + + // If there is additional data, include it as a second string. + exts := "" + if len(e.Data) > 0 { + fs := bytes.Buffer{} + for k, v := range e.Data { + fs.WriteString(k) + fs.WriteByte('=') + fmt.Fprint(&fs, v) + fs.WriteByte(' ') + } + + exts = fs.String()[:fs.Len()-1] + eid += eventExtraOffset + } + + if h.log == nil { + fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) + return nil + } + + var ( + ss [2]*uint16 + err error + ) + + ss[0], err = syscall.UTF16PtrFromString(e.Message) + if err != nil { + return err + } + + count := uint16(1) + if exts != "" { + ss[1], err = syscall.UTF16PtrFromString(exts) + if err != nil { + return err + } + + count++ + } + + return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) +} + +func getServicePath() (string, error) { + p, err := exec.LookPath(os.Args[0]) + if err != nil { + return "", err + } + return filepath.Abs(p) +} + +func registerService() error { + p, err := getServicePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + depends := []string{} + + // This dependency is required on build 14393 (RS1) + // it is added to the platform in newer builds + if system.GetOSVersion().Build == 14393 { + depends = append(depends, "ConDrv") + } + + c := mgr.Config{ + ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, + StartType: mgr.StartAutomatic, + ErrorControl: mgr.ErrorNormal, + Dependencies: depends, + DisplayName: "Docker Engine", + } + + // Configure the service to launch with the arguments that were just passed. + args := []string{"--run-service"} + for _, a := range os.Args[1:] { + if a != "--register-service" && a != "--unregister-service" { + args = append(args, a) + } + } + + s, err := m.CreateService(*flServiceName, p, c, args...) + if err != nil { + return err + } + defer s.Close() + + // See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go + const ( + scActionNone = 0 + scActionRestart = 1 + scActionReboot = 2 + scActionRunCommand = 3 + + serviceConfigFailureActions = 2 + ) + + type serviceFailureActions struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions uintptr + } + + type scAction struct { + Type uint32 + Delay uint32 + } + t := []scAction{ + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionNone}, + } + lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))} + err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo))) + if err != nil { + return err + } + + err = eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) + if err != nil { + return err + } + + return nil +} + +func unregisterService() error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(*flServiceName) + if err != nil { + return err + } + defer s.Close() + + eventlog.Remove(*flServiceName) + err = s.Delete() + if err != nil { + return err + } + return nil +} + +func initService(daemonCli *DaemonCli) (bool, error) { + if *flUnregisterService { + if *flRegisterService { + return true, errors.New("--register-service and --unregister-service cannot be used together") + } + return true, unregisterService() + } + + if *flRegisterService { + return true, registerService() + } + + if !*flRunService { + return false, nil + } + + interactive, err := svc.IsAnInteractiveSession() + if err != nil { + return false, err + } + + h := &handler{ + tosvc: make(chan bool), + fromsvc: make(chan error), + daemonCli: daemonCli, + } + + var log *eventlog.Log + if !interactive { + log, err = eventlog.Open(*flServiceName) + if err != nil { + return false, err + } + } + + logrus.AddHook(&etwHook{log}) + logrus.SetOutput(ioutil.Discard) + + service = h + go func() { + if interactive { + err = debug.Run(*flServiceName, h) + } else { + err = svc.Run(*flServiceName, h) + } + + h.fromsvc <- err + }() + + // Wait for the first signal from the service handler. + err = <-h.fromsvc + if err != nil { + return false, err + } + return false, nil +} + +func (h *handler) started() error { + // This must be delayed until daemonCli initializes Config.Root + err := initPanicFile(filepath.Join(h.daemonCli.Config.Root, "panic.log")) + if err != nil { + return err + } + + h.tosvc <- false + return nil +} + +func (h *handler) stopped(err error) { + logrus.Debugf("Stopping service: %v", err) + h.tosvc <- err != nil + <-h.fromsvc +} + +func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { + s <- svc.Status{State: svc.StartPending, Accepts: 0} + // Unblock initService() + h.fromsvc <- nil + + // Wait for initialization to complete. + failed := <-h.tosvc + if failed { + logrus.Debug("Aborting service start due to failure during initialization") + return true, 1 + } + + s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} + logrus.Debug("Service running") +Loop: + for { + select { + case failed = <-h.tosvc: + break Loop + case c := <-r: + switch c.Cmd { + case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): + h.daemonCli.reloadConfig() + case svc.Interrogate: + s <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + s <- svc.Status{State: svc.StopPending, Accepts: 0} + h.daemonCli.stop() + } + } + } + + removePanicFile() + if failed { + return true, 1 + } + return false, 0 +} + +func initPanicFile(path string) error { + var err error + panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return err + } + + st, err := panicFile.Stat() + if err != nil { + return err + } + + // If there are contents in the file already, move the file out of the way + // and replace it. + if st.Size() > 0 { + panicFile.Close() + os.Rename(path, path+".old") + panicFile, err = os.Create(path) + if err != nil { + return err + } + } + + // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to + // it when it panics. Remember the old stderr to restore it before removing + // the panic file. + sh := syscall.STD_ERROR_HANDLE + h, err := syscall.GetStdHandle(sh) + if err != nil { + return err + } + + oldStderr = h + + r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) + if r == 0 && err != nil { + return err + } + + return nil +} + +func removePanicFile() { + if st, err := panicFile.Stat(); err == nil { + if st.Size() == 0 { + sh := syscall.STD_ERROR_HANDLE + setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) + panicFile.Close() + os.Remove(panicFile.Name()) + } + } +} diff --git a/vendor/github.com/moby/moby/container/archive.go b/vendor/github.com/moby/moby/container/archive.go new file mode 100644 index 0000000..56e6598 --- /dev/null +++ b/vendor/github.com/moby/moby/container/archive.go @@ -0,0 +1,76 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/moby/moby/container/container.go b/vendor/github.com/moby/moby/container/container.go new file mode 100644 index 0000000..fc4fe27 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container.go @@ -0,0 +1,1103 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + agentexec "github.com/docker/swarmkit/agent/exec" + "github.com/opencontainers/runc/libcontainer/label" +) + +const configFileName = "config.v2.json" + +const ( + // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. + DefaultStopTimeout = 10 +) + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// DetachError is special error which returned in case of container detach. +type DetachError struct{} + +func (DetachError) Error() string { + return "detached from container" +} + +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. +type CommonContainer struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + SecretStore agentexec.SecretGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + CommonContainer: CommonContainer{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + }, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +// ToDisk saves the container configuration on disk. +func (container *Container) ToDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { + return err + } + + return container.WriteHostConfig() +} + +// ToDiskLocking saves the container configuration on disk in a thread safe way. +func (container *Container) ToDiskLocking() error { + container.Lock() + err := container.ToDisk() + container.Unlock() + return err +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container. +func (container *Container) WriteHostConfig() error { + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.HostConfig) +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { + c, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("Failed to get logging factory: %v", err) + } + ctx := logger.Context{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + return c(ctx) +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// Attach connects to the container's TTY, delegating to standard +// streams or websockets depending on the configuration. +func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + ctx := container.InitAttachContext() + return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) +} + +// AttachStreams connects streams to a TTY. +// Used by exec too. Should this move somewhere else? +func AttachStreams(ctx context.Context, streamConfig *stream.Config, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + var ( + cStdout, cStderr io.ReadCloser + cStdin io.WriteCloser + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if stdin != nil && openStdin { + cStdin = streamConfig.StdinPipe() + wg.Add(1) + } + + if stdout != nil { + cStdout = streamConfig.StdoutPipe() + wg.Add(1) + } + + if stderr != nil { + cStderr = streamConfig.StderrPipe() + wg.Add(1) + } + + // Connect stdin of container to the http conn. + go func() { + if stdin == nil || !openStdin { + return + } + logrus.Debug("attach: stdin: begin") + + var err error + if tty { + _, err = copyEscapable(cStdin, stdin, keys) + } else { + _, err = io.Copy(cStdin, stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if stdinOnce && !tty { + cStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + } + logrus.Debug("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if stdin != nil { + stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", stdout, cStdout) + go attachStream("stderr", stderr, cStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cStdin != nil { + cStdin.Close() + } + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + // Default keys : ctrl-p ctrl-q + keys = []byte{16, 17} + } + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + preservBuf := []byte{} + for i, key := range keys { + preservBuf = append(preservBuf, buf[0:nr]...) + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + src.Close() + return 0, DetachError{} + } + nr, er = src.Read(buf) + } + var nw int + var ew error + if len(preservBuf) > 0 { + nw, ew = dst.Write(preservBuf) + nr = len(preservBuf) + } else { + // ---- End of docker + nw, ew = dst.Write(buf[0:nr]) + } + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + // Check if the mounpoint has an ID, this is currently the best way to tell if it's actually mounted + // TODO(cpuguyh83): there should be a better way to handle this + if volumeMount.Volume != nil && volumeMount.ID != "" { + if err := volumeMount.Volume.Unmount(volumeMount.ID); err != nil { + errors = append(errors, err.Error()) + continue + } + volumeMount.ID = "" + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// GetEndpointInNetwork returns the container's endpoint to the provided network. +func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. +func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.buildPortMapInfo(ep); err != nil { + return err + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + + return nil +} + +// UpdateSandboxNetworkSettings updates the sandbox ID and Key. +func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() + return nil +} + +// BuildJoinOptions builds endpoint Join options from a given network. +func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := runconfigopts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + } + return joinOptions, nil +} + +// BuildCreateEndpointOptions builds endpoint options from a given network. +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "" || len(ipam.LinkLocalIPs) > 0) { + var ipList []net.IP + for _, ips := range ipam.LinkLocalIPs { + if ip := net.ParseIP(ips); ip != nil { + ipList = append(ipList, ip) + } + } + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), ipList, nil)) + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + } + + if container.NetworkSettings.Service != nil { + svcCfg := container.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := GetSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger(container.HostConfig.LogConfig) + if err != nil { + return fmt.Errorf("Failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { + if err := container.startLogging(); err != nil { + container.Reset(false) + return err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/container/container_linux.go b/vendor/github.com/moby/moby/container/container_linux.go new file mode 100644 index 0000000..4d4c16b --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_linux.go @@ -0,0 +1,9 @@ +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/vendor/github.com/moby/moby/container/container_notlinux.go b/vendor/github.com/moby/moby/container/container_notlinux.go new file mode 100644 index 0000000..f65653e --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_notlinux.go @@ -0,0 +1,23 @@ +// +build solaris freebsd + +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + //Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature. + // Therefore there are separate definitions for this. + return unix.Unmount(path, 0) +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} diff --git a/vendor/github.com/moby/moby/container/container_unit_test.go b/vendor/github.com/moby/moby/container/container_unit_test.go new file mode 100644 index 0000000..f301f25 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_unit_test.go @@ -0,0 +1,60 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/signal" +) + +func TestContainerStopSignal(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + def, err := signal.ParseSignal(signal.DefaultStopSignal) + if err != nil { + t.Fatal(err) + } + + s := c.StopSignal() + if s != int(def) { + t.Fatalf("Expected %v, got %v", def, s) + } + + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopSignal: "SIGKILL"}, + }, + } + s = c.StopSignal() + if s != 9 { + t.Fatalf("Expected 9, got %v", s) + } +} + +func TestContainerStopTimeout(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + s := c.StopTimeout() + if s != DefaultStopTimeout { + t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s) + } + + stopTimeout := 15 + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopTimeout: &stopTimeout}, + }, + } + s = c.StopSignal() + if s != 15 { + t.Fatalf("Expected 15, got %v", s) + } +} diff --git a/vendor/github.com/moby/moby/container/container_unix.go b/vendor/github.com/moby/moby/container/container_unix.go new file mode 100644 index 0000000..4f6b795 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_unix.go @@ -0,0 +1,448 @@ +// +build linux freebsd solaris + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/label" + "golang.org/x/sys/unix" +) + +const ( + // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container + DefaultSHMSize int64 = 67108864 + containerSecretMountPath = "/run/secrets" +) + +// Container holds the fields specific to unixen implementations. +// See CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// CreateDaemonEnvironment returns the list of all environment variables given the list of +// environment variables related to links. +// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. +// The defaults set here do not override the values in container.Config.Env +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + env := []string{ + "PATH=" + system.DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + if !container.HasMountFor("/etc/resolv.conf") { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + if !container.HasMountFor("/etc/hostname") { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + if !container.HasMountFor("/etc/hosts") { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + return mounts +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() string { + return filepath.Join(container.Root, "secrets") +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(volume.DefaultPropagationMode), + }) + } + + return mounts +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + if len(container.SecretReferences) > 0 { + return &Mount{ + Source: container.SecretMountPath(), + Destination: containerSecretMountPath, + Writable: false, + } + } + + return nil +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + if _, err := os.Stat(container.SecretMountPath()); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return detachMounted(container.SecretMountPath()) +} + +// UpdateContainer updates configuration of a container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return fmt.Errorf("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving updated container: %v", err) + return err + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := detachMounted(mountPath); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} diff --git a/vendor/github.com/moby/moby/container/container_windows.go b/vendor/github.com/moby/moby/container/container_windows.go new file mode 100644 index 0000000..1025836 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_windows.go @@ -0,0 +1,111 @@ +// +build windows + +package container + +import ( + "fmt" + "os" + "path/filepath" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/utils" +) + +// Container holds fields specific to the Windows implementation. See +// CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(_ bool, linkedEnv []string) []string { + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + return utils.ReplaceOrAppendEnvValues(linkedEnv, container.Config.Env) +} + +// UnmountIpcMounts unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + resources := hostConfig.Resources + if resources.BlkioWeight != 0 || resources.CPUShares != 0 || + resources.CPUPeriod != 0 || resources.CPUQuota != 0 || + resources.CpusetCpus != "" || resources.CpusetMems != "" || + resources.Memory != 0 || resources.MemorySwap != 0 || + resources.MemoryReservation != 0 || resources.KernelMemory != 0 { + return fmt.Errorf("Resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares +// to combine with a volume path +func cleanResourcePath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(os.PathSeparator), path) +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} diff --git a/vendor/github.com/moby/moby/container/health.go b/vendor/github.com/moby/moby/container/health.go new file mode 100644 index 0000000..6e3cd12 --- /dev/null +++ b/vendor/github.com/moby/moby/container/health.go @@ -0,0 +1,49 @@ +package container + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + // This happens when the container is being shutdown and the monitor has stopped + // or the monitor has yet to be setup. + if s.stop == nil { + return types.Unhealthy + } + + switch s.Status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Status + } +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, +// it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/vendor/github.com/moby/moby/container/history.go b/vendor/github.com/moby/moby/container/history.go new file mode 100644 index 0000000..c80c2aa --- /dev/null +++ b/vendor/github.com/moby/moby/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/moby/moby/container/memory_store.go b/vendor/github.com/moby/moby/container/memory_store.go new file mode 100644 index 0000000..706407a --- /dev/null +++ b/vendor/github.com/moby/moby/container/memory_store.go @@ -0,0 +1,95 @@ +package container + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/moby/moby/container/memory_store_test.go b/vendor/github.com/moby/moby/container/memory_store_test.go new file mode 100644 index 0000000..f81738f --- /dev/null +++ b/vendor/github.com/moby/moby/container/memory_store_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "testing" + "time" +) + +func TestNewMemoryStore(t *testing.T) { + s := NewMemoryStore() + m, ok := s.(*memoryStore) + if !ok { + t.Fatalf("store is not a memory store %v", s) + } + if m.s == nil { + t.Fatal("expected store map to not be nil") + } +} + +func TestAddContainers(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + if s.Size() != 1 { + t.Fatalf("expected store size 1, got %v", s.Size()) + } +} + +func TestGetContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + c := s.Get("id") + if c == nil { + t.Fatal("expected container to not be nil") + } +} + +func TestDeleteContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + s.Delete("id") + if c := s.Get("id"); c != nil { + t.Fatalf("expected container to be nil after removal, got %v", c) + } + + if s.Size() != 0 { + t.Fatalf("expected store size to be 0, got %v", s.Size()) + } +} + +func TestListContainers(t *testing.T) { + s := NewMemoryStore() + + cont := NewBaseContainer("id", "root") + cont.Created = time.Now() + cont2 := NewBaseContainer("id2", "root") + cont2.Created = time.Now().Add(24 * time.Hour) + + s.Add("id", cont) + s.Add("id2", cont2) + + list := s.List() + if len(list) != 2 { + t.Fatalf("expected list size 2, got %v", len(list)) + } + if list[0].ID != "id2" { + t.Fatalf("expected older container to be first, got %v", list[0].ID) + } +} + +func TestFirstContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + first := s.First(func(cont *Container) bool { + return cont.ID == "id2" + }) + + if first == nil { + t.Fatal("expected container to not be nil") + } + if first.ID != "id2" { + t.Fatalf("expected id2, got %v", first) + } +} + +func TestApplyAllContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + s.ApplyAll(func(cont *Container) { + if cont.ID == "id2" { + cont.ID = "newID" + } + }) + + cont := s.Get("id2") + if cont == nil { + t.Fatal("expected container to not be nil") + } + if cont.ID != "newID" { + t.Fatalf("expected newID, got %v", cont) + } +} diff --git a/vendor/github.com/moby/moby/container/monitor.go b/vendor/github.com/moby/moby/container/monitor.go new file mode 100644 index 0000000..f05e72b --- /dev/null +++ b/vendor/github.com/moby/moby/container/monitor.go @@ -0,0 +1,46 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/moby/moby/container/mounts_unix.go b/vendor/github.com/moby/moby/container/mounts_unix.go new file mode 100644 index 0000000..c52abed --- /dev/null +++ b/vendor/github.com/moby/moby/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/moby/moby/container/mounts_windows.go b/vendor/github.com/moby/moby/container/mounts_windows.go new file mode 100644 index 0000000..01b327f --- /dev/null +++ b/vendor/github.com/moby/moby/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/vendor/github.com/moby/moby/container/state.go b/vendor/github.com/moby/moby/container/state.go new file mode 100644 index 0000000..4dd2ece --- /dev/null +++ b/vendor/github.com/moby/moby/container/state.go @@ -0,0 +1,343 @@ +package container + +import ( + "fmt" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // FIXME: Why do we have both paused and running if a + // container cannot be paused and running at the same time? + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} + Health *Health +} + +// StateStatus is used to return an error type implementing both +// exec.ExitCode and error. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + error string +} + +func newStateStatus(ec int, err string) *StateStatus { + return &StateStatus{ + exitCode: ec, + error: err, + } +} + +// ExitCode returns current exitcode for the state. +func (ss *StateStatus) ExitCode() int { + return ss.exitCode +} + +// Error returns current error for the state. +func (ss *StateStatus) Error() string { + return ss.error +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// HealthString returns a single string to describe health status. +func (s *State) HealthString() string { + if s.Health == nil { + return types.NoHealthcheck + } + + return s.Health.String() +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStopped +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCodeValue + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + s.Lock() + defer s.Unlock() + return s.ExitCode(), nil +} + +// WaitWithContext waits for the container to stop. Optional context can be +// passed for canceling the request. +func (s *State) WaitWithContext(ctx context.Context) error { + // todo(tonistiigi): make other wait functions use this + s.Lock() + if !s.Running { + state := newStateStatus(s.ExitCode(), s.Error()) + defer s.Unlock() + if state.ExitCode() == 0 { + return nil + } + return state + } + waitChan := s.waitChan + s.Unlock() + select { + case <-waitChan: + s.Lock() + state := newStateStatus(s.ExitCode(), s.Error()) + s.Unlock() + if state.ExitCode() == 0 { + return nil + } + return state + case <-ctx.Done(): + return ctx.Err() + } +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Running = true + s.Restarting = false + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// Error returns current error for the state. +func (s *State) Error() string { + return s.ErrorMsg +} diff --git a/vendor/github.com/moby/moby/container/state_solaris.go b/vendor/github.com/moby/moby/container/state_solaris.go new file mode 100644 index 0000000..1229650 --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_solaris.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/moby/moby/container/state_test.go b/vendor/github.com/moby/moby/container/state_test.go new file mode 100644 index 0000000..c9a7bb4 --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_test.go @@ -0,0 +1,113 @@ +package container + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/api/types" +) + +func TestIsValidHealthString(t *testing.T) { + contexts := []struct { + Health string + Expected bool + }{ + {types.Healthy, true}, + {types.Unhealthy, true}, + {types.Starting, true}, + {types.NoHealthcheck, true}, + {"fail", false}, + } + + for _, c := range contexts { + v := IsValidHealthString(c.Health) + if v != c.Expected { + t.Fatalf("Expected %t, but got %t", c.Expected, v) + } + } +} + +func TestStateRunStop(t *testing.T) { + s := NewState() + for i := 1; i < 3; i++ { // full lifecycle two times + s.Lock() + s.SetRunning(i+100, false) + s.Unlock() + + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i+100 { + t.Fatalf("Pid %v, expected %v", s.Pid, i+100) + } + if s.ExitCode() != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) + } + + stopped := make(chan struct{}) + var exit int64 + go func() { + exitCode, _ := s.WaitStop(-1 * time.Second) + atomic.StoreInt64(&exit, int64(exitCode)) + close(stopped) + }() + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: i}) + s.Unlock() + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + exitCode := int(atomic.LoadInt64(&exit)) + if exitCode != i { + t.Fatalf("ExitCode %v, expected %v", exitCode, i) + } + if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { + t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + } + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + stopped := make(chan struct{}) + go func() { + s.WaitStop(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: 1}) + s.Unlock() + + stopped = make(chan struct{}) + go func() { + s.WaitStop(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + +} diff --git a/vendor/github.com/moby/moby/container/state_unix.go b/vendor/github.com/moby/moby/container/state_unix.go new file mode 100644 index 0000000..a2fa5af --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/moby/moby/container/state_windows.go b/vendor/github.com/moby/moby/container/state_windows.go new file mode 100644 index 0000000..1229650 --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_windows.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/moby/moby/container/store.go b/vendor/github.com/moby/moby/container/store.go new file mode 100644 index 0000000..042fb1a --- /dev/null +++ b/vendor/github.com/moby/moby/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/moby/moby/container/stream/streams.go b/vendor/github.com/moby/moby/container/stream/streams.go new file mode 100644 index 0000000..79f366a --- /dev/null +++ b/vendor/github.com/moby/moby/container/stream/streams.go @@ -0,0 +1,143 @@ +package stream + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { + copyFunc := func(w io.Writer, r io.Reader) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %+v", err) + } + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + }() + } + } +} diff --git a/vendor/github.com/moby/moby/contrib/README.md b/vendor/github.com/moby/moby/contrib/README.md new file mode 100644 index 0000000..92b1d94 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/README.md @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/vendor/github.com/moby/moby/contrib/REVIEWERS b/vendor/github.com/moby/moby/contrib/REVIEWERS new file mode 100644 index 0000000..18e05a3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/REVIEWERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/vendor/github.com/moby/moby/contrib/apparmor/main.go b/vendor/github.com/moby/moby/contrib/apparmor/main.go new file mode 100644 index 0000000..f4a2978 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/apparmor/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "log" + "os" + "path" + "text/template" + + "github.com/docker/docker/pkg/aaparser" +) + +type profileData struct { + Version int +} + +func main() { + if len(os.Args) < 2 { + log.Fatal("pass a filename to save the profile in.") + } + + // parse the arg + apparmorProfilePath := os.Args[1] + + version, err := aaparser.GetVersion() + if err != nil { + log.Fatal(err) + } + data := profileData{ + Version: version, + } + fmt.Printf("apparmor_parser is of version %+v\n", data) + + // parse the template + compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) + if err != nil { + log.Fatalf("parsing template failed: %v", err) + } + + // make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { + log.Fatal(err) + } + + f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + if err := compiled.Execute(f, data); err != nil { + log.Fatalf("executing template failed: %v", err) + } + + fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) +} diff --git a/vendor/github.com/moby/moby/contrib/apparmor/template.go b/vendor/github.com/moby/moby/contrib/apparmor/template.go new file mode 100644 index 0000000..e5e1c8b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/apparmor/template.go @@ -0,0 +1,268 @@ +package main + +const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker + +profile /usr/bin/docker (attach_disconnected, complain) { + # Prevent following links to these files during container setup. + deny /etc/** mkl, + deny /dev/** kl, + deny /sys/** mkl, + deny /proc/** mkl, + + mount -> @{DOCKER_GRAPH_PATH}/**, + mount -> /, + mount -> /proc/**, + mount -> /sys/**, + mount -> /run/docker/netns/**, + mount -> /.pivot_root[0-9]*/, + + / r, + + umount, + pivot_root, +{{if ge .Version 209000}} + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), +{{end}} + network, + capability, + owner /** rw, + @{DOCKER_GRAPH_PATH}/** rwl, + @{DOCKER_GRAPH_PATH}/linkgraph.db k, + @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, + @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, + + # For non-root client use: + /dev/urandom r, + /dev/null rw, + /dev/pts/[0-9]* rw, + /run/docker.sock rw, + /proc/** r, + /proc/[0-9]*/attr/exec w, + /sys/kernel/mm/hugepages/ r, + /etc/localtime r, + /etc/ld.so.cache r, + /etc/passwd r, + +{{if ge .Version 209000}} + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, +{{end}} + + /usr/lib/** rm, + /lib/** rm, + + /usr/bin/docker pix, + /sbin/xtables-multi rCx, + /sbin/iptables rCx, + /sbin/modprobe rCx, + /sbin/auplink rCx, + /sbin/mke2fs rCx, + /sbin/tune2fs rCx, + /sbin/blkid rCx, + /bin/kmod rCx, + /usr/bin/xz rCx, + /bin/ps rCx, + /bin/tar rCx, + /bin/cat rCx, + /sbin/zfs rCx, + /sbin/apparmor_parser rCx, + +{{if ge .Version 209000}} + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, +{{end}} + + profile /bin/cat (complain) { + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /proc r, + /bin/cat mr, + + # For reading in 'docker stats': + /proc/[0-9]*/net/dev r, + } + profile /bin/ps (complain) { + /etc/ld.so.cache r, + /etc/localtime r, + /etc/passwd r, + /etc/nsswitch.conf r, + /lib/** rm, + /proc/[0-9]*/** r, + /dev/null rw, + /bin/ps mr, + +{{if ge .Version 209000}} + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), +{{end}} + + # Quiet dac_override denials + deny capability dac_override, + deny capability dac_read_search, + deny capability sys_ptrace, + + /dev/tty r, + /proc/stat r, + /proc/cpuinfo r, + /proc/meminfo r, + /proc/uptime r, + /sys/devices/system/cpu/online r, + /proc/sys/kernel/pid_max r, + /proc/ r, + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_admin, + capability dac_override, + + @{DOCKER_GRAPH_PATH}/aufs/** rw, + @{DOCKER_GRAPH_PATH}/tmp/** rw, + # For user namespaces: + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, + + /sys/fs/aufs/** r, + /lib/** rm, + /apparmor/.null r, + /dev/null rw, + /etc/ld.so.cache r, + /sbin/auplink rm, + /proc/fs/aufs/** rw, + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_module, + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /apparmor/.null rw, + /sbin/modprobe rm, + /bin/kmod rm, + /proc/cmdline r, + /sys/module/** r, + /etc/modprobe.d{/,/**} r, + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + /etc/ld.so.cache r, + /lib/** rm, + /usr/bin/xz rm, + deny /proc/** rw, + deny /sys/** rw, + } + profile /sbin/xtables-multi (attach_disconnected, complain) { + /etc/ld.so.cache r, + /lib/** rm, + /sbin/xtables-multi rm, + /apparmor/.null w, + /dev/null rw, + + /proc r, + + capability net_raw, + capability net_admin, + network raw, + } + profile /sbin/zfs (attach_disconnected, complain) { + file, + capability, + } + profile /sbin/mke2fs (complain) { + /sbin/mke2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/mke2fs.conf r, + /etc/mtab r, + + /dev/dm-* rw, + /dev/urandom r, + /dev/null rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/tune2fs (complain) { + /sbin/tune2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/blkid.conf r, + /etc/mtab r, + /etc/ld.so.cache r, + + /dev/null rw, + /dev/.blkid.tab r, + /dev/dm-* rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/blkid (complain) { + /sbin/blkid rm, + + /lib/** rm, + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/blkid.conf r, + + /dev/null rw, + /dev/.blkid.tab rl, + /dev/.blkid.tab* rwl, + /dev/dm-* r, + + /sys/devices/virtual/block/** r, + + capability mknod, + + mount -> @{DOCKER_GRAPH_PATH}/**, + } + profile /sbin/apparmor_parser (complain) { + /sbin/apparmor_parser rm, + + /lib/** rm, + + /etc/ld.so.cache r, + /etc/apparmor/** r, + /etc/apparmor.d/** r, + /etc/apparmor.d/cache/** w, + + /dev/null rw, + + /sys/kernel/security/apparmor/** r, + /sys/kernel/security/apparmor/.replace w, + + /proc/[0-9]*/mounts r, + /proc/sys/kernel/osrelease r, + /proc r, + + capability mac_admin, + } +}` diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh new file mode 100755 index 0000000..8271d9d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh new file mode 100755 index 0000000..b5040b7 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh @@ -0,0 +1,118 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-trusty +# to only update ubuntu-trusty/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it +# +# Note: non-LTS versions are not guaranteed to work. + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="aarch64/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! + # + + FROM $from + + EOF + + dockerBuildTags='apparmor pkcs11 selinux' + runcBuildTags='apparmor selinux' + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for trusty only includes Go 1.2 implementation which + # is too old to build current go source, fortunately trusty has + # golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go ) + ;; + xenial) + packages+=( libsystemd-dev ) + packages+=( golang-go libseccomp-dev) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + *) + echo "Unsupported distro:" $distro:$suite + rm -fr "$version" + exit 1 + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$suite" in + trusty) + echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) + ;; + esac + + echo "# Install Go" >> "$version/Dockerfile" + echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile" + echo "# the image to build go from source." >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile" + echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile" + echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile" + echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV PATH $PATH:/usr/src/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..d04860c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile @@ -0,0 +1,24 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH $PATH:/usr/src/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..3cd8442 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH $PATH:/usr/src/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md new file mode 100644 index 0000000..20a0ff1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh new file mode 100755 index 0000000..8271d9d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile new file mode 100644 index 0000000..42aaa56 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile new file mode 100644 index 0000000..c052be5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:stretch + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile new file mode 100644 index 0000000..bcedb47 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:wheezy-backports + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + +RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh new file mode 100755 index 0000000..765db5d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh @@ -0,0 +1,149 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + from+='-backports' + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [ "$distro" = "debian" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + precise|wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev );; + *) packages+=( libsystemd-dev );; + esac + + # debian wheezy & ubuntu precise do not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + precise|wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + + if [ "$suite" = 'precise' ]; then + # precise has a few package issues + + # - dh-systemd doesn't exist at all + packages=( "${packages[@]/dh-systemd}" ) + + # - libdevmapper-dev is missing critical structs (too old) + packages=( "${packages[@]/libdevmapper-dev}" ) + extraBuildTags+=' exclude_graphdriver_devicemapper' + + # - btrfs-tools is missing "ioctl.h" (too old), so it's useless + # (since kernels on precise are old too, just skip btrfs entirely) + packages=( "${packages[@]/btrfs-tools}" ) + extraBuildTags+=' exclude_graphdriver_btrfs' + fi + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile new file mode 100644 index 0000000..aa027f8 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:precise + +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..b03a853 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..af03f62 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..5ac1edf --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile new file mode 100644 index 0000000..a4ac781 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh new file mode 100755 index 0000000..e110a21 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh @@ -0,0 +1,158 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + raspbian:jessie) + from="resin/rpi-raspbian:jessie" + ;; + *) + from="armhf/$from" + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + precise|wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev );; + *) packages+=( libsystemd-dev );; + esac + + # debian wheezy & ubuntu precise do not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + precise|wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + + if [ "$suite" = 'precise' ]; then + # precise has a few package issues + + # - dh-systemd doesn't exist at all + packages=( "${packages[@]/dh-systemd}" ) + + # - libdevmapper-dev is missing critical structs (too old) + packages=( "${packages[@]/libdevmapper-dev}" ) + extraBuildTags+=' exclude_graphdriver_devicemapper' + + # - btrfs-tools is missing "ioctl.h" (too old), so it's useless + # (since kernels on precise are old too, just skip btrfs entirely) + packages=( "${packages[@]/btrfs-tools}" ) + extraBuildTags+=' exclude_graphdriver_btrfs' + fi + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + if [ "$distro" == 'raspbian' ]; + then + cat <> "$version/Dockerfile" +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +EOF + fi + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile new file mode 100644 index 0000000..4dbfd09 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM resin/rpi-raspbian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..b36c1da --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..b5e55ad --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..69c2e7f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh new file mode 100755 index 0000000..7d22e8c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh new file mode 100755 index 0000000..0e20b9c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh @@ -0,0 +1,103 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + ;; + *) + # libseccomp isn't available until ubuntu xenial and is required for "seccomp.h" & "libseccomp.so" + packages+=( libseccomp-dev ) + packages+=( libsystemd-dev ) + ;; + esac + + # buildtags + case "$suite" in + # trusty has no seccomp package + trusty) + runcBuildTags="apparmor selinux" + ;; + # ppc64le support was backported into libseccomp 2.2.3-2, + # so enable seccomp by default + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..4182d68 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..f1521db --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..4f8cc66 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh new file mode 100755 index 0000000..8271d9d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh new file mode 100755 index 0000000..b8f5860 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh @@ -0,0 +1,96 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="s390x/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + libsystemd-dev + vim-common # tini dep + ) + + case "$suite" in + # s390x needs libseccomp 2.3.1 + xenial) + # Ubuntu Xenial has libseccomp 2.2.3 + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor selinux seccomp" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..6d7e4c5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md new file mode 100644 index 0000000..5f2e888 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-rpm` + +This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. + +To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh new file mode 100755 index 0000000..558f7ee --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile new file mode 100644 index 0000000..1f84163 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile new file mode 100644 index 0000000..af040c5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile new file mode 100644 index 0000000..98e57a9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:25 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh new file mode 100755 index 0000000..6f93afa --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh @@ -0,0 +1,189 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + if [[ "$distro" == "photon" ]]; then + installer=tdnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='pkcs11' + runcBuildTags= + + case "$from" in + oraclelinux:6) + # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version + # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo + echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" + echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + oraclelinux:*) + # get "Development Tools" packages and dependencies + # we also need yum-utils for yum-config-manager to pull the latest repo file + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + opensuse:*) + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + photon:*) + echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + libtool-ltdl-devel # for pkcs11 "ltdl.h" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + oraclelinux:7) + # Enable the optional repository + packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) + ;; + esac + + case "$from" in + oraclelinux:6) + # doesn't use systemd, doesn't have a devel package for it + packages=( "${packages[@]/systemd-devel}" ) + ;; + esac + + # opensuse & oraclelinx:6 do not have the right libseccomp libs + case "$from" in + opensuse:*|oraclelinux:6) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + photon:*) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$from" in + oraclelinux:6) + # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. + # The ordering is very important and should not be changed. + echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile new file mode 100644 index 0000000..addd431 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM opensuse:13.2 + +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile new file mode 100644 index 0000000..c34d304 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile @@ -0,0 +1,28 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 +RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + +ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile new file mode 100644 index 0000000..378536b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile new file mode 100644 index 0000000..b77d573 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM photon:1.0 + +RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils +RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/check-config.sh b/vendor/github.com/moby/moby/contrib/check-config.sh new file mode 100755 index 0000000..d07e4ce --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/check-config.sh @@ -0,0 +1,354 @@ +#!/usr/bin/env bash +set -e + +EXITCODE=0 + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + : ${CONFIG:="${possibleConfigs[0]}"} +fi + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +kernelVersion="$(uname -r)" +kernelMajor="${kernelVersion%%.*}" +kernelMinor="${kernelVersion#$kernelMajor.}" +kernelMinor="${kernelMinor%%.*}" + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} +is_set_in_kernel() { + zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null +} +is_set_as_module() { + zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null +} + +color() { + local codes=() + if [ "$1" = 'bold' ]; then + codes=( "${codes[@]}" '1' ) + shift + fi + if [ "$#" -gt 0 ]; then + local code= + case "$1" in + # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors + black) code=30 ;; + red) code=31 ;; + green) code=32 ;; + yellow) code=33 ;; + blue) code=34 ;; + magenta) code=35 ;; + cyan) code=36 ;; + white) code=37 ;; + esac + if [ "$code" ]; then + codes=( "${codes[@]}" "$code" ) + fi + fi + local IFS=';' + echo -en '\033['"${codes[*]}"'m' +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set_in_kernel "$1"; then + wrap_good "CONFIG_$1" 'enabled' + elif is_set_as_module "$1"; then + wrap_good "CONFIG_$1" 'enabled (as module)' + else + wrap_bad "CONFIG_$1" 'missing' + EXITCODE=1 + fi +} + +check_flags() { + for flag in "$@"; do + echo -n "- "; check_flag "$flag" + done +} + +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + EXITCODE=1 + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + EXITCODE=1 + fi +} + +check_distro_userns() { + source /etc/os-release 2>/dev/null || /bin/true + if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then + # this is a CentOS7 or RHEL7 system + grep -q "user_namespace.enable=1" /proc/cmdline || { + # no user namespace support enabled + wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)" + EXITCODE=1 + } + fi +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + EXITCODE=1 + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + EXITCODE=1 + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG + KEYS + VETH BRIDGE BRIDGE_NETFILTER + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS} + IP_NF_NAT NF_NAT NF_NAT_NEEDED + + # required for bind-mounting /dev/mqueue into containers + POSIX_MQUEUE +) +check_flags "${flags[@]}" +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -lt 8 ]; then + check_flags DEVPTS_MULTIPLE_INSTANCES +fi + +echo + +echo 'Optional Features:' +{ + check_flags USER_NS + check_distro_userns +} +{ + check_flags SECCOMP +} +{ + check_flags CGROUP_PIDS +} +{ + check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED + if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)" + fi +} +{ + if is_set LEGACY_VSYSCALL_NATIVE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' + echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)" + elif is_set LEGACY_VSYSCALL_EMULATE; then + echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' + elif is_set LEGACY_VSYSCALL_NONE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' + echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)" + echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)" + echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)" + echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)" + # else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do + # not have these LEGACY_VSYSCALL options and are effectively + # LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably + # effectively LEGACY_VSYSCALL_NATIVE. + fi +} + +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then + check_flags MEMCG_KMEM +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then + check_flags RESOURCE_COUNTERS +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then + netprio=NETPRIO_CGROUP +else + netprio=CGROUP_NET_PRIO +fi + +flags=( + BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED + CGROUP_PERF + CGROUP_HUGETLB + NET_CLS_CGROUP $netprio + CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED + IP_VS + IP_VS_NFCT + IP_VS_RR +) +check_flags "${flags[@]}" + +if ! is_set EXT4_USE_FOR_EXT2; then + check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY + if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then + echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" + fi +fi + +check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY +if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then + if is_set EXT4_USE_FOR_EXT2; then + echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)" + else + echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" + fi +fi + +echo '- Network Drivers:' +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags VXLAN | sed 's/^/ /' +echo ' Optional (for encrypted networks):' +check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \ + XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' +echo ' - "'$(wrap_color 'ipvlan' blue)'":' +check_flags IPVLAN | sed 's/^/ /' +echo ' - "'$(wrap_color 'macvlan' blue)'":' +check_flags MACVLAN DUMMY | sed 's/^/ /' + +# only fail if no storage drivers available +CODE=${EXITCODE} +EXITCODE=0 +STORAGE=1 + +echo '- Storage Drivers:' +echo ' - "'$(wrap_color 'aufs' blue)'":' +check_flags AUFS_FS | sed 's/^/ /' +if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" +fi +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'btrfs' blue)'":' +check_flags BTRFS_FS | sed 's/^/ /' +check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'devicemapper' blue)'":' +check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags OVERLAY_FS | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'zfs' blue)'":' +echo -n " - "; check_device /dev/zfs +echo -n " - "; check_command zfs +echo -n " - "; check_command zpool +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +EXITCODE=$CODE +[ "$STORAGE" = 1 ] && EXITCODE=1 + +echo + +check_limit_over() +{ + if [ $(cat "$1") -le "$2" ]; then + wrap_bad "- $1" "$(cat $1)" + wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black + EXITCODE=1 + else + wrap_good "- $1" "$(cat $1)" + fi +} + +echo 'Limits:' +check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000 +echo + +exit $EXITCODE diff --git a/vendor/github.com/moby/moby/contrib/completion/REVIEWERS b/vendor/github.com/moby/moby/contrib/completion/REVIEWERS new file mode 100644 index 0000000..03ee2dd --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/completion/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/moby/moby/contrib/completion/bash/docker b/vendor/github.com/moby/moby/contrib/completion/bash/docker new file mode 100644 index 0000000..afd99a9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/completion/bash/docker @@ -0,0 +1,4316 @@ +#!/bin/bash +# +# bash completion file for core docker commands +# +# This script provides completion of: +# - commands and their options +# - container ids and names +# - image repos and tags +# - filepaths +# +# To enable the completions either: +# - place this file in /etc/bash_completion.d +# or +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh +# +# Configuration: +# +# For several commands, the amount of completions can be configured by +# setting environment variables. +# +# DOCKER_COMPLETION_SHOW_CONTAINER_IDS +# DOCKER_COMPLETION_SHOW_NETWORK_IDS +# DOCKER_COMPLETION_SHOW_NODE_IDS +# DOCKER_COMPLETION_SHOW_PLUGIN_IDS +# DOCKER_COMPLETION_SHOW_SECRET_IDS +# DOCKER_COMPLETION_SHOW_SERVICE_IDS +# "no" - Show names only (default) +# "yes" - Show names and ids +# +# You can tailor completion for the "events", "history", "inspect", "run", +# "rmi" and "save" commands by settings the following environment +# variables: +# +# DOCKER_COMPLETION_SHOW_IMAGE_IDS +# "none" - Show names only (default) +# "non-intermediate" - Show names and ids, but omit intermediate image IDs +# "all" - Show names and ids, including intermediate image IDs +# +# DOCKER_COMPLETION_SHOW_TAGS +# "yes" - include tags in completion options (default) +# "no" - don't include tags in completion options + +# +# Note: +# Currently, the completions will not work if the docker daemon is not +# bound to the default communication port/socket +# If the docker daemon is using a unix socket for communication your user +# must have access to the socket for the completions to function correctly +# +# Note for developers: +# Please arrange options sorted alphabetically by long name with the short +# options immediately following their corresponding long form. +# This order should be applied to lists, alternatives and code blocks. + +__docker_previous_extglob_setting=$(shopt -p extglob) +shopt -s extglob + +__docker_q() { + docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" +} + +# __docker_containers returns a list of containers. Additional options to +# `docker ps` may be specified in order to filter the list, e.g. +# `__docker_containers --filter status=running` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_CONTAINER_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_containers() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Names}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_CONTAINER_IDS}" = yes ] ; then + format='{{.ID}} {{.Names}}' + else + format='{{.Names}}' + fi + __docker_q ps --format "$format" "$@" +} + +# __docker_complete_containers applies completion of containers based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_containers`. +__docker_complete_containers() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_containers "$@")" -- "$current") ) +} + +__docker_complete_containers_all() { + __docker_complete_containers "$@" --all +} + +__docker_complete_containers_running() { + __docker_complete_containers "$@" --filter status=running +} + +__docker_complete_containers_stopped() { + __docker_complete_containers "$@" --filter status=exited +} + +__docker_complete_containers_unpauseable() { + __docker_complete_containers "$@" --filter status=paused +} + +__docker_complete_container_names() { + local containers=( $(__docker_q ps -aq --no-trunc) ) + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) +} + +__docker_complete_container_ids() { + local containers=( $(__docker_q ps -aq) ) + COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) +} + +__docker_images() { + local images_args="" + + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all) + images_args="--no-trunc -a" + ;; + non-intermediate) + images_args="--no-trunc" + ;; + esac + + local repo_print_command + if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then + repo_print_command='print $1; print $1":"$2' + else + repo_print_command='print $1' + fi + + local awk_script + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all|non-intermediate) + awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' + ;; + none|*) + awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' + ;; + esac + + __docker_q images $images_args | awk "$awk_script" | grep -v '$' +} + +__docker_complete_images() { + COMPREPLY=( $(compgen -W "$(__docker_images)" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_complete_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_complete_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +# __docker_networks returns a list of all networks. Additional options to +# `docker network ls` may be specified in order to filter the list, e.g. +# `__docker_networks --filter type=custom` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_networks() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Name}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + __docker_q network ls --format "$format" "$@" +} + +# __docker_complete_networks applies completion of networks based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_networks`. +__docker_complete_networks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_networks "$@")" -- "$current") ) +} + +__docker_complete_containers_in_network() { + local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") + COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) +} + +# __docker_volumes returns a list of all volumes. Additional options to +# `docker volume ls` may be specified in order to filter the list, e.g. +# `__docker_volumes --filter dangling=true` +# Because volumes do not have IDs, this function does not distinguish between +# IDs and names. +__docker_volumes() { + __docker_q volume ls -q "$@" +} + +# __docker_complete_volumes applies completion of volumes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_volumes`. +__docker_complete_volumes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_volumes "$@")" -- "$current") ) +} + +# __docker_plugins_bundled returns a list of all plugins of a given type. +# The type has to be specified with the mandatory option `--type`. +# Valid types are: Network, Volume, Authorization. +# Completions may be added or removed with `--add` and `--remove` +# This function only deals with plugins that come bundled with Docker. +# For plugins managed by `docker plugin`, see `__docker_plugins_installed`. +__docker_plugins_bundled() { + local type add=() remove=() + while true ; do + case "$1" in + --type) + type="$2" + shift 2 + ;; + --add) + add+=("$2") + shift 2 + ;; + --remove) + remove+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + local plugins=($(__docker_q info | sed -n "/^Plugins/,/^[^ ]/s/ $type: //p")) + for del in "${remove[@]}" ; do + plugins=(${plugins[@]/$del/}) + done + echo "${plugins[@]} ${add[@]}" +} + +# __docker_complete_plugins_bundled applies completion of plugins based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# The plugin type has to be specified with the next option `--type`. +# This function only deals with plugins that come bundled with Docker. +# For completion of plugins managed by `docker plugin`, see +# `__docker_complete_plugins_installed`. +__docker_complete_plugins_bundled() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_bundled "$@")" -- "$current") ) +} + +# __docker_plugins_installed returns a list of all plugins that were installed with +# the Docker plugin API. +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_PLUGIN_IDS=yes to also complete IDs. +# For built-in pugins, see `__docker_plugins_bundled`. +__docker_plugins_installed() { + local fields + if [ "$DOCKER_COMPLETION_SHOW_PLUGIN_IDS" = yes ] ; then + fields='$1,$2' + else + fields='$2' + fi + __docker_q plugin ls | awk "NR>1 {print $fields}" +} + +# __docker_complete_plugins_installed applies completion of plugins that were installed +# with the Docker plugin API, based on the current value of `$cur` or the value of +# the optional first option `--cur`, if given. +# For completion of built-in pugins, see `__docker_complete_plugins_bundled`. +__docker_complete_plugins_installed() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_installed "$@")" -- "$current") ) +} + +__docker_runtimes() { + __docker_q info | sed -n 's/^Runtimes: \(.*\)/\1/p' +} + +__docker_complete_runtimes() { + COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") ) +} + +# __docker_secrets returns a list of all secrets. +# By default, only names of secrets are returned. +# Set DOCKER_COMPLETION_SHOW_SECRET_IDS=yes to also complete IDs of secrets. +__docker_secrets() { + local fields='$2' # default: name only + [ "${DOCKER_COMPLETION_SHOW_SECRET_IDS}" = yes ] && fields='$1,$2' # ID and name + + __docker_q secret ls | awk "NR>1 {print $fields}" +} + +# __docker_complete_secrets applies completion of secrets based on the current value +# of `$cur`. +__docker_complete_secrets() { + COMPREPLY=( $(compgen -W "$(__docker_secrets)" -- "$cur") ) +} + +# __docker_stacks returns a list of all stacks. +__docker_stacks() { + __docker_q stack ls | awk 'NR>1 {print $1}' +} + +# __docker_complete_stacks applies completion of stacks based on the current value +# of `$cur` or the value of the optional first option `--cur`, if given. +__docker_complete_stacks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_stacks "$@")" -- "$current") ) +} + +# __docker_nodes returns a list of all nodes. Additional options to +# `docker node ls` may be specified in order to filter the list, e.g. +# `__docker_nodes --filter role=manager` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +# Completions may be added with `--add`, e.g. `--add self`. +__docker_nodes() { + local add=() + local fields='$2' # default: node name only + [ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name + + while true ; do + case "$1" in + --id) + fields='$1' # IDs only + shift + ;; + --name) + fields='$2' # names only + shift + ;; + --add) + add+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + echo $(__docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}") "${add[@]}" +} + +# __docker_complete_nodes applies completion of nodes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_nodes`. +__docker_complete_nodes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") ) +} + +__docker_complete_nodes_plus_self() { + __docker_complete_nodes --add self "$@" +} + +# __docker_services returns a list of all services. Additional options to +# `docker service ls` may be specified in order to filter the list, e.g. +# `__docker_services --filter name=xxx` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_services() { + local fields='$2' # default: service name only + [ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name + + if [ "$1" = "--id" ] ; then + fields='$1' # IDs only + shift + elif [ "$1" = "--name" ] ; then + fields='$2' # names only + shift + fi + __docker_q service ls "$@" | awk "NR>1 {print $fields}" +} + +# __docker_complete_services applies completion of services based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_services`. +__docker_complete_services() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") ) +} + +# __docker_append_to_completions appends the word passed as an argument to every +# word in `$COMPREPLY`. +# Normally you do this with `compgen -S` while generating the completions. +# This function allows you to append a suffix later. It allows you to use +# the __docker_complete_XXX functions in cases where you need a suffix. +__docker_append_to_completions() { + COMPREPLY=( ${COMPREPLY[@]/%/"$1"} ) +} + +# __docker_is_experimental tests whether the currently configured Docker daemon +# runs in experimental mode. If so, the function exits with 0 (true). +# Otherwise, or if the result cannot be determined, the exit value is 1 (false). +__docker_is_experimental() { + [ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ] +} + +# __docker_pos_first_nonflag finds the position of the first word that is neither +# option nor an option's argument. If there are options that require arguments, +# you should pass a glob describing those options, e.g. "--option1|-o|--option2" +# Use this function to restrict completions to exact positions after the argument list. +__docker_pos_first_nonflag() { + local argument_flags=$1 + + local counter=$((${subcommand_pos:-${command_pos}} + 1)) + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + # eat "=" in case of --option=arg syntax + [ "${words[$counter]}" = "=" ] && (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + + # Bash splits words at "=", retaining "=" as a word, examples: + # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words + while [ "${words[$counter + 1]}" = "=" ] ; do + counter=$(( counter + 2)) + done + + (( counter++ )) + done + + echo $counter +} + +# __docker_map_key_of_current_option returns `key` if we are currently completing the +# value of a map option (`key=value`) which matches the extglob given as an argument. +# This function is needed for key-specific completions. +__docker_map_key_of_current_option() { + local glob="$1" + + local key glob_pos + if [ "$cur" = "=" ] ; then # key= case + key="$prev" + glob_pos=$((cword - 2)) + elif [[ $cur == *=* ]] ; then # key=value case (OSX) + key=${cur%=*} + glob_pos=$((cword - 1)) + elif [ "$prev" = "=" ] ; then + key=${words[$cword - 2]} # key=value case + glob_pos=$((cword - 3)) + else + return + fi + + [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax + + [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" +} + +# __docker_value_of_option returns the value of the first option matching `option_glob`. +# Valid values for `option_glob` are option names like `--log-level` and globs like +# `--log-level|-l` +# Only positions between the command and the current word are considered. +__docker_value_of_option() { + local option_extglob=$(__docker_to_extglob "$1") + + local counter=$((command_pos + 1)) + while [ $counter -lt $cword ]; do + case ${words[$counter]} in + $option_extglob ) + echo ${words[$counter + 1]} + break + ;; + esac + (( counter++ )) + done +} + +# __docker_to_alternatives transforms a multiline list of strings into a single line +# string with the words separated by `|`. +# This is used to prepare arguments to __docker_pos_first_nonflag(). +__docker_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# __docker_to_extglob transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_to_extglob() { + local extglob=$( __docker_to_alternatives "$1" ) + echo "@($extglob)" +} + +# __docker_subcommands processes subcommands +# Locates the first occurrence of any of the subcommands contained in the +# first argument. In case of a match, calls the corresponding completion +# function and returns 0. +# If no match is found, 1 is returned. The calling function can then +# continue processing its completion. +# +# TODO if the preceding command has options that accept arguments and an +# argument is equal ot one of the subcommands, this is falsely detected as +# a match. +__docker_subcommands() { + local subcommands="$1" + + local counter=$(($command_pos + 1)) + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + $(__docker_to_extglob "$subcommands") ) + subcommand_pos=$counter + local subcommand=${words[$counter]} + local completions_func=_docker_${command}_${subcommand//-/_} + declare -F $completions_func >/dev/null && $completions_func + return 0 + ;; + esac + (( counter++ )) + done + return 1 +} + +# __docker_nospace suppresses trailing whitespace +__docker_nospace() { + # compopt is not available in ancient bash versions + type compopt &>/dev/null && compopt -o nospace +} + +__docker_complete_resolved_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_local_interfaces() { + command -v ip >/dev/null 2>&1 || return + ip addr show scope global 2>/dev/null | sed -n 's| \+inet \([0-9.]\+\).* \([^ ]\+\)|\1 \2|p' +} + +__docker_complete_local_interfaces() { + local additional_interface + if [ "$1" = "--add" ] ; then + additional_interface="$2" + fi + + COMPREPLY=( $( compgen -W "$(__docker_local_interfaces) $additional_interface" -- "$cur" ) ) +} + +# __docker_complete_capabilities_addable completes Linux capabilities which are +# not granted by default and may be added. +# see https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities +__docker_complete_capabilities_addable() { + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + BLOCK_SUSPEND + DAC_READ_SEARCH + IPC_LOCK + IPC_OWNER + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + NET_ADMIN + NET_BROADCAST + SYS_ADMIN + SYS_BOOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + +# __docker_complete_capabilities_droppable completes Linux capability options which are +# allowed by default and can be dropped. +# see https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities +__docker_complete_capabilities_droppable() { + COMPREPLY=( $( compgen -W " + ALL + AUDIT_WRITE + CHOWN + DAC_OVERRIDE + FOWNER + FSETID + KILL + MKNOD + NET_BIND_SERVICE + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_CHROOT + " -- "$cur" ) ) +} + +__docker_complete_detach_keys() { + case "$prev" in + --detach-keys) + case "$cur" in + *,) + COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) + ;; + esac + + __docker_nospace + return + ;; + esac + return 1 +} + +__docker_complete_isolation() { + COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) +} + +__docker_complete_log_drivers() { + COMPREPLY=( $( compgen -W " + awslogs + etwlogs + fluentd + gcplogs + gelf + journald + json-file + logentries + none + splunk + syslog + " -- "$cur" ) ) +} + +__docker_complete_log_options() { + # see docs/reference/logging/index.md + local awslogs_options="awslogs-region awslogs-group awslogs-stream" + local fluentd_options="env fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries labels tag" + local gcplogs_options="env gcp-log-cmd gcp-project labels" + local gelf_options="env gelf-address gelf-compression-level gelf-compression-type labels tag" + local journald_options="env labels tag" + local json_file_options="env labels max-file max-size" + local logentries_options="logentries-token" + local syslog_options="env labels syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag" + local splunk_options="env labels splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag" + + local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $logentries_options $json_file_options $syslog_options $splunk_options" + + case $(__docker_value_of_option --log-driver) in + '') + COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) + ;; + awslogs) + COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) + ;; + fluentd) + COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) + ;; + gcplogs) + COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) ) + ;; + gelf) + COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) + ;; + journald) + COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) + ;; + json-file) + COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) + ;; + logentries) + COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) ) + ;; + syslog) + COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) + ;; + splunk) + COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + + __docker_nospace +} + +__docker_complete_log_driver_options() { + local key=$(__docker_map_key_of_current_option '--log-opt') + case "$key" in + fluentd-async-connect) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + gelf-address) + COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) ) + __docker_nospace + return + ;; + gelf-compression-level) + COMPREPLY=( $( compgen -W "1 2 3 4 5 6 7 8 9" -- "${cur##*=}" ) ) + return + ;; + gelf-compression-type) + COMPREPLY=( $( compgen -W "gzip none zlib" -- "${cur##*=}" ) ) + return + ;; + syslog-address) + COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + syslog-facility) + COMPREPLY=( $( compgen -W " + auth + authpriv + cron + daemon + ftp + kern + local0 + local1 + local2 + local3 + local4 + local5 + local6 + local7 + lpr + mail + news + syslog + user + uucp + " -- "${cur##*=}" ) ) + return + ;; + syslog-format) + COMPREPLY=( $( compgen -W "rfc3164 rfc5424 rfc5424micro" -- "${cur##*=}" ) ) + return + ;; + syslog-tls-ca-cert|syslog-tls-cert|syslog-tls-key) + _filedir + return + ;; + syslog-tls-skip-verify) + COMPREPLY=( $( compgen -W "true" -- "${cur##*=}" ) ) + return + ;; + splunk-url) + COMPREPLY=( $( compgen -W "http:// https://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + splunk-gzip|splunk-insecureskipverify|splunk-verify-connection) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + splunk-format) + COMPREPLY=( $( compgen -W "inline json raw" -- "${cur##*=}" ) ) + return + ;; + esac + return 1 +} + +__docker_complete_log_levels() { + COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) +} + +__docker_complete_restart() { + case "$prev" in + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) + ;; + esac + return + ;; + esac + return 1 +} + +# __docker_complete_signals returns a subset of the available signals that is most likely +# relevant in the context of docker containers +__docker_complete_signals() { + local signals=( + SIGCONT + SIGHUP + SIGINT + SIGKILL + SIGQUIT + SIGSTOP + SIGTERM + SIGUSR1 + SIGUSR2 + ) + COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) +} + +__docker_complete_user_group() { + if [[ $cur == *:* ]] ; then + COMPREPLY=( $(compgen -g -- "${cur#*:}") ) + else + COMPREPLY=( $(compgen -u -S : -- "$cur") ) + __docker_nospace + fi +} + +_docker_docker() { + # global options that may appear after the docker command + local boolean_options=" + $global_boolean_options + --help + --version -v + " + + case "$prev" in + --config) + _filedir -d + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + $(__docker_to_extglob "$global_options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" ) + if [ $cword -eq $counter ]; then + __docker_is_experimental && commands+=(${experimental_commands[*]}) + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_attach() { + _docker_container_attach +} + +_docker_build() { + _docker_image_build +} + + +_docker_checkpoint() { + local subcommands=" + create + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_checkpoint_create() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help --leave-running" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_checkpoint_ls() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_checkpoint_rm() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + elif [ $cword -eq $(($counter + 1)) ]; then + COMPREPLY=( $( compgen -W "$(__docker_q checkpoint ls "$prev" | sed 1d)" -- "$cur" ) ) + fi + ;; + esac +} + + +_docker_container() { + local subcommands=" + attach + commit + cp + create + diff + exec + export + inspect + kill + logs + ls + pause + port + prune + rename + restart + rm + run + start + stats + stop + top + unpause + update + wait + " + local aliases=" + list + ps + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_container_attach() { + __docker_complete_detach_keys && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--detach-keys') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_commit() { + case "$prev" in + --author|-a|--change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause=false -p=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') + + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_container_cp() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + # combined container and filename completion + _filedir + local files=( ${COMPREPLY[@]} ) + + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + local containers=( ${COMPREPLY[@]} ) + + COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) + if [[ "$COMPREPLY" == *: ]]; then + __docker_nospace + fi + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + if [ -e "$prev" ]; then + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + else + _filedir + fi + return + fi + ;; + esac +} + +_docker_container_create() { + _docker_container_run +} + +_docker_container_diff() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_exec() { + __docker_complete_detach_keys && return + + case "$prev" in + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach -d --detach-keys --env -e --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_export() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_inspect() { + _docker_inspect --type container +} + +_docker_container_kill() { + case "$prev" in + --signal|-s) + __docker_complete_signals + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_list() { + _docker_container_ls +} + +_docker_container_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + ancestor) + cur="${cur##*=}" + __docker_complete_images + return + ;; + before) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + id) + __docker_complete_containers_all --cur "${cur##*=}" --id + return + ;; + health) + COMPREPLY=( $( compgen -W "healthy starting none unhealthy" -- "${cur##*=}" ) ) + return + ;; + is-task) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_containers_all --cur "${cur##*=}" --name + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + since) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + status) + COMPREPLY=( $( compgen -W "created dead exited paused restarting running removing" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "ancestor before exited health id is-task label name network since status volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --format|--last|-n) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --last -n --latest -l --no-trunc --quiet -q --size -s" -- "$cur" ) ) + ;; + esac +} + +_docker_container_pause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_port() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_container_ps() { + _docker_container_ls +} + +_docker_container_rename() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_restart() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) + ;; + *) + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --force|-f) + __docker_complete_containers_all + return + ;; + esac + done + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_run() { + local options_with_args=" + --add-host + --attach -a + --blkio-weight + --blkio-weight-device + --cap-add + --cap-drop + --cgroup-parent + --cidfile + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpus + --cpuset-mems + --cpu-shares -c + --device + --device-read-bps + --device-read-iops + --device-write-bps + --device-write-iops + --dns + --dns-option + --dns-search + --entrypoint + --env -e + --env-file + --expose + --group-add + --hostname -h + --init-path + --ip + --ip6 + --ipc + --isolation + --kernel-memory + --label-file + --label -l + --link + --link-local-ip + --log-driver + --log-opt + --mac-address + --memory -m + --memory-swap + --memory-swappiness + --memory-reservation + --name + --network + --network-alias + --oom-score-adj + --pid + --pids-limit + --publish -p + --restart + --runtime + --security-opt + --shm-size + --stop-signal + --stop-timeout + --storage-opt + --tmpfs + --sysctl + --ulimit + --user -u + --userns + --uts + --volume-driver + --volumes-from + --volume -v + --workdir -w + " + + local boolean_options=" + --disable-content-trust=false + --help + --init + --interactive -i + --oom-kill-disable + --privileged + --publish-all -P + --read-only + --tty -t + " + + if [ "$command" = "run" -o "$subcommand" = "run" ] ; then + options_with_args="$options_with_args + --detach-keys + --health-cmd + --health-interval + --health-retries + --health-timeout + " + boolean_options="$boolean_options + --detach -d + --no-healthcheck + --rm + --sig-proxy=false + " + __docker_complete_detach_keys && return + fi + + local all_options="$options_with_args $boolean_options" + + + __docker_complete_log_driver_options && return + __docker_complete_restart && return + + local key=$(__docker_map_key_of_current_option '--security-opt') + case "$key" in + label) + [[ $cur == *: ]] && return + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "${cur##*=}") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + __docker_nospace + fi + return + ;; + seccomp) + local cur=${cur##*=} + _filedir + COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) + return + ;; + esac + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --attach|-a) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cap-add) + __docker_complete_capabilities_addable + return + ;; + --cap-drop) + __docker_complete_capabilities_droppable + return + ;; + --cidfile|--env-file|--init-path|--label-file) + _filedir + return + ;; + --device|--tmpfs|--volume|-v) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + __docker_nospace + ;; + /*) + _filedir + __docker_nospace + ;; + esac + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --ipc) + case "$cur" in + *:*) + cur="${cur#*:}" + __docker_complete_containers_running + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --isolation) + __docker_complete_isolation + return + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --pid) + case "$cur" in + *:*) + __docker_complete_containers_running --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --runtime) + __docker_complete_runtimes + return + ;; + --security-opt) + COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then + __docker_nospace + fi + return + ;; + --storage-opt) + COMPREPLY=( $( compgen -W "size" -S = -- "$cur") ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + --userns) + COMPREPLY=( $( compgen -W "host" -- "$cur" ) ) + return + ;; + --volume-driver) + __docker_complete_plugins_bundled --type Volume + return + ;; + --volumes-from) + __docker_complete_containers_all + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_container_start() { + __docker_complete_detach_keys && return + + case "$prev" in + --checkpoint) + if [ __docker_is_experimental ] ; then + return + fi + ;; + --checkpoint-dir) + if [ __docker_is_experimental ] ; then + _filedir -d + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--attach -a --detach-keys --help --interactive -i" + __docker_is_experimental && options+=" --checkpoint --checkpoint-dir" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_stats() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --format --help --no-stream" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_stop() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_top() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_unpause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_unpauseable + fi + ;; + esac +} + +_docker_container_update() { + local options_with_args=" + --blkio-weight + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --kernel-memory + --memory -m + --memory-reservation + --memory-swap + --restart + " + + local boolean_options=" + --help + " + + local all_options="$options_with_args $boolean_options" + + __docker_complete_restart && return + + case "$prev" in + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_wait() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + + +_docker_commit() { + _docker_container_commit +} + +_docker_cp() { + _docker_container_cp +} + +_docker_create() { + _docker_container_run +} + +_docker_daemon() { + local boolean_options=" + $global_boolean_options + --disable-legacy-registry + --experimental + --help + --icc=false + --init + --ip-forward=false + --ip-masq=false + --iptables=false + --ipv6 + --live-restore + --raw-logs + --selinux-enabled + --userland-proxy=false + " + local options_with_args=" + $global_options_with_args + --add-runtime + --api-cors-header + --authorization-plugin + --bip + --bridge -b + --cgroup-parent + --cluster-advertise + --cluster-store + --cluster-store-opt + --config-file + --containerd + --default-gateway + --default-gateway-v6 + --default-ulimit + --dns + --dns-search + --dns-opt + --exec-opt + --exec-root + --fixed-cidr + --fixed-cidr-v6 + --graph -g + --group -G + --init-path + --insecure-registry + --ip + --label + --log-driver + --log-opt + --max-concurrent-downloads + --max-concurrent-uploads + --mtu + --oom-score-adjust + --pidfile -p + --registry-mirror + --seccomp-profile + --shutdown-timeout + --storage-driver -s + --storage-opt + --userland-proxy-path + --userns-remap + " + + __docker_complete_log_driver_options && return + + key=$(__docker_map_key_of_current_option '--cluster-store-opt') + case "$key" in + kv.*file) + cur=${cur##*=} + _filedir + return + ;; + esac + + local key=$(__docker_map_key_of_current_option '--storage-opt') + case "$key" in + dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + dm.fs) + COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur##*=}" ) ) + return + ;; + dm.thinpooldev) + cur=${cur##*=} + _filedir + return + ;; + esac + + case "$prev" in + --authorization-plugin) + __docker_complete_plugins_bundled --type Authorization + return + ;; + --cluster-store) + COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) + __docker_nospace + return + ;; + --cluster-store-opt) + COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path) + _filedir + return + ;; + --exec-root|--graph|-g) + _filedir -d + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --storage-driver|-s) + COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) + return + ;; + --storage-opt) + local btrfs_options="btrfs.min_space" + local devicemapper_options=" + dm.basesize + dm.blkdiscard + dm.blocksize + dm.fs + dm.loopdatasize + dm.loopmetadatasize + dm.min_free_space + dm.mkfsarg + dm.mountopt + dm.override_udev_sync_check + dm.thinpooldev + dm.use_deferred_deletion + dm.use_deferred_removal + " + local zfs_options="zfs.fsname" + + case $(__docker_value_of_option '--storage-driver|-s') in + '') + COMPREPLY=( $( compgen -W "$btrfs_options $devicemapper_options $zfs_options" -S = -- "$cur" ) ) + ;; + btrfs) + COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) ) + ;; + devicemapper) + COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) + ;; + zfs) + COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + __docker_nospace + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --seccomp-profile) + _filedir json + return + ;; + --userns-remap) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + esac +} + +_docker_deploy() { + __docker_is_experimental && _docker_stack_deploy +} + +_docker_diff() { + _docker_container_diff +} + +_docker_events() { + _docker_system_events +} + +_docker_exec() { + _docker_container_exec +} + +_docker_export() { + _docker_container_export +} + +_docker_help() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) + fi +} + +_docker_history() { + _docker_image_history +} + + +_docker_image() { + local subcommands=" + build + history + import + inspect + load + ls + prune + pull + push + rm + save + tag + " + local aliases=" + images + list + remove + rmi + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_image_build() { + local options_with_args=" + --build-arg + --cache-from + --cgroup-parent + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --cpu-period + --cpu-quota + --file -f + --isolation + --label + --memory -m + --memory-swap + --network + --shm-size + --tag -t + --ulimit + " + + local boolean_options=" + --compress + --disable-content-trust=false + --force-rm + --help + --no-cache + --pull + --quiet -q + --rm + " + __docker_is_experimental && boolean_options+="--squash" + + local all_options="$options_with_args $boolean_options" + + case "$prev" in + --build-arg) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --cache-from) + __docker_complete_image_repos_and_tags + return + ;; + --file|-f) + _filedir + return + ;; + --isolation) + __docker_complete_isolation + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --tag|-t) + __docker_complete_image_repos_and_tags + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + _filedir -d + fi + ;; + esac +} + +_docker_image_history() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --human=false -H=false --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_image_images() { + _docker_image_ls +} + +_docker_image_import() { + case "$prev" in + --change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_image_inspect() { + _docker_inspect --type image +} + +_docker_image_load() { + case "$prev" in + --input|-i) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_image_list() { + _docker_image_ls +} + +_docker_image_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + before|since|reference) + cur="${cur##*=}" + __docker_complete_images + return + ;; + dangling) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + label) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "before dangling label reference since" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + =) + return + ;; + *) + __docker_complete_image_repos + ;; + esac +} + +_docker_image_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_image_pull() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --all-tags|-a) + __docker_complete_image_repos + return + ;; + esac + done + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_remove() { + _docker_image_rm +} + +_docker_image_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_rmi() { + _docker_image_rm +} + +_docker_image_save() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_tag() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + + +_docker_images() { + _docker_image_ls +} + +_docker_import() { + _docker_image_import +} + +_docker_info() { + _docker_system_info +} + +_docker_inspect() { + local preselected_type + local type + + if [ "$1" = "--type" ] ; then + preselected_type=yes + type="$2" + else + type=$(__docker_value_of_option --type) + fi + + case "$prev" in + --format|-f) + return + ;; + --type) + if [ -z "$preselected_type" ] ; then + COMPREPLY=( $( compgen -W "container image network node plugin service volume" -- "$cur" ) ) + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--format -f --help --size -s" + if [ -z "$preselected_type" ] ; then + options+=" --type" + fi + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + case "$type" in + '') + COMPREPLY=( $( compgen -W " + $(__docker_containers --all) + $(__docker_images) + $(__docker_networks) + $(__docker_nodes) + $(__docker_plugins_installed) + $(__docker_services) + $(__docker_volumes) + " -- "$cur" ) ) + ;; + container) + __docker_complete_containers_all + ;; + image) + __docker_complete_images + ;; + network) + __docker_complete_networks + ;; + node) + __docker_complete_nodes + ;; + plugin) + __docker_complete_plugins_installed + ;; + service) + __docker_complete_services + ;; + volume) + __docker_complete_volumes + ;; + esac + esac +} + +_docker_kill() { + _docker_container_kill +} + +_docker_load() { + _docker_image_load +} + +_docker_login() { + case "$prev" in + --password|-p|--username|-u) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --password -p --username -u" -- "$cur" ) ) + ;; + esac +} + +_docker_logout() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_logs() { + _docker_container_logs +} + +_docker_network_connect() { + local options_with_args=" + --alias + --ip + --ip6 + --link + --link-local-ip + " + + local boolean_options=" + --help + " + + case "$prev" in + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_network_create() { + case "$prev" in + --aux-address|--gateway|--internal|--ip-range|--ipam-opt|--ipv6|--opt|-o|--subnet) + return + ;; + --ipam-driver) + COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) + return + ;; + --driver|-d) + # remove drivers that allow one instance only, add drivers missing in `docker info` + __docker_complete_plugins_bundled --type Network --remove host --remove null --add macvlan + return + ;; + --label) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--attachable --aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) ) + ;; + esac +} + +_docker_network_disconnect() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_in_network "$prev" + fi + ;; + esac +} + +_docker_network_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks + esac +} + +_docker_network_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Network --add macvlan + return + ;; + id) + __docker_complete_networks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_networks --cur "${cur##*=}" --name + return + ;; + type) + COMPREPLY=( $( compgen -W "builtin custom" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "driver id label name type" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_network_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_network_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks --filter type=custom + esac +} + +_docker_network() { + local subcommands=" + connect + create + disconnect + inspect + ls + prune + rm + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service() { + local subcommands=" + create + inspect + ls list + rm remove + scale + ps + update + " + __docker_is_experimental && subcommands+="logs" + + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service_create() { + _docker_service_update +} + +_docker_service_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --no-resolve --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_list() { + _docker_service_ls +} + +_docker_service_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_service_remove() { + _docker_service_rm +} + +_docker_service_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_scale() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + __docker_append_to_completions "=" + __docker_nospace + ;; + esac +} + +_docker_service_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + node) + __docker_complete_nodes_plus_self --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id name node" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_update() { + local $subcommand="${words[$subcommand_pos]}" + + local options_with_args=" + --constraint + --endpoint-mode + --env -e + --force + --health-cmd + --health-interval + --health-retries + --health-timeout + --hostname + --label -l + --limit-cpu + --limit-memory + --log-driver + --log-opt + --mount + --network + --no-healthcheck + --replicas + --reserve-cpu + --reserve-memory + --restart-condition + --restart-delay + --restart-max-attempts + --restart-window + --rollback + --stop-grace-period + --update-delay + --update-failure-action + --update-max-failure-ratio + --update-monitor + --update-parallelism + --user -u + --workdir -w + " + + local boolean_options=" + --help + --tty -t + --with-registry-auth + " + + __docker_complete_log_driver_options && return + + if [ "$subcommand" = "create" ] ; then + options_with_args="$options_with_args + --container-label + --dns + --dns-option + --dns-search + --env-file + --group + --host + --mode + --name + --publish -p + --secret + " + + case "$prev" in + --env-file) + _filedir + return + ;; + --host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --mode) + COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) ) + return + ;; + --secret) + __docker_complete_secrets + return + ;; + --group) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + esac + fi + if [ "$subcommand" = "update" ] ; then + options_with_args="$options_with_args + --arg + --container-label-add + --container-label-rm + --dns-add + --dns-option-add + --dns-option-rm + --dns-rm + --dns-search-add + --dns-search-rm + --group-add + --group-rm + --host-add + --host-rm + --image + --publish-add + --publish-rm + --secret-add + --secret-rm + " + + case "$prev" in + --group-add) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --group-rm) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --host-add|--host-rm) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --image) + __docker_complete_image_repos_and_tags + return + ;; + --secret-add|--secret-rm) + __docker_complete_secrets + return + ;; + esac + fi + + case "$prev" in + --endpoint-mode) + COMPREPLY=( $( compgen -W "dnsrr vip" -- "$cur" ) ) + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + __docker_complete_networks + return + ;; + --restart-condition) + COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) ) + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ "$subcommand" = "update" ] ; then + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + else + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + fi + ;; + esac +} + +_docker_swarm() { + local subcommands=" + init + join + join-token + leave + unlock + unlock-key + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_init() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --autolock --availability --cert-expiry --dispatcher-heartbeat --external-ca --force-new-cluster --help --listen-addr --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_join() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + --token) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --help --listen-addr --token" -- "$cur" ) ) + ;; + *:) + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + ;; + esac +} + +_docker_swarm_join_token() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag ) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_swarm_leave() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock_key() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_update() { + case "$prev" in + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--autolock --cert-expiry --dispatcher-heartbeat --external-ca --help --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_node() { + local subcommands=" + demote + inspect + ls list + promote + rm remove + ps + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_node_demote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=manager + esac +} + +_docker_node_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes_plus_self + esac +} + +_docker_node_list() { + _docker_node_ls +} + +_docker_node_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_nodes --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_nodes --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_node_promote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=worker + esac +} + +_docker_node_remove() { + _docker_node_rm +} + +_docker_node_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_node_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes_plus_self + ;; + esac +} + +_docker_node_update() { + case "$prev" in + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --role) + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + return + ;; + --label-add|--label-rm) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--availability --help --label-add --label-rm --role" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_pause() { + _docker_container_pause +} + +_docker_plugin() { + local subcommands=" + create + disable + enable + inspect + install + ls + push + rm + set + upgrade + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_create() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--compress --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + # reponame + return + elif [ $cword -eq $((counter + 1)) ]; then + _filedir -d + fi + ;; + esac +} + +_docker_plugin_disable() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_enable() { + case "$prev" in + --timeout) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --timeout" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--timeout') + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_inspect() { + case "$prev" in + --format|f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_install() { + case "$prev" in + --alias) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--alias --disable --disable-content-trust=false --grant-all-permissions --help" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_list() { + _docker_plugin_ls +} + +_docker_plugin_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --no-trunc" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_remove() { + _docker_plugin_rm +} + +_docker_plugin_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_set() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_upgrade() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust --grant-all-permissions --help --skip-remote-check" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + __ltrim_colon_completions "$cur" + elif [ $cword -eq $((counter + 1)) ]; then + local plugin_images="$(__docker_plugins_installed)" + COMPREPLY=( $(compgen -S : -W "${plugin_images%:*}" -- "$cur") ) + __docker_nospace + fi + ;; + esac +} + + +_docker_port() { + _docker_container_port +} + +_docker_ps() { + _docker_container_ls +} + +_docker_pull() { + _docker_image_pull +} + +_docker_push() { + _docker_image_push +} + +_docker_rename() { + _docker_container_rename +} + +_docker_restart() { + _docker_container_restart +} + +_docker_rm() { + _docker_container_rm +} + +_docker_rmi() { + _docker_image_rm +} + +_docker_run() { + _docker_container_run +} + +_docker_save() { + _docker_image_save +} + + +_docker_secret() { + local subcommands=" + create + inspect + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_create() { + case "$prev" in + --label|-l) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_list() { + _docker_secret_ls +} + +_docker_secret_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_remove() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_rm() { + _docker_secret_remove +} + + + +_docker_search() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + is-automated) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + is-official) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) + __docker_nospace + return + ;; + --limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter --help --limit --no-trunc" -- "$cur" ) ) + ;; + esac +} + + +_docker_stack() { + local subcommands=" + deploy + ls + ps + rm + services + " + local aliases=" + down + list + remove + up + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_deploy() { + case "$prev" in + --bundle-file) + if __docker_is_experimental ; then + _filedir dab + return + fi + ;; + --compose-file|-c) + _filedir yml + return + ;; + esac + + case "$cur" in + -*) + local options="--compose-file -c --help --with-registry-auth" + __docker_is_experimental && options+=" --bundle-file" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_down() { + _docker_stack_rm +} + +_docker_stack_list() { + _docker_stack_ls +} + +_docker_stack_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + id) + __docker_complete_stacks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_stacks --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id name desired-state" -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_remove() { + _docker_stack_rm +} + +_docker_stack_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_services() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + label) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_up() { + _docker_stack_deploy +} + + +_docker_start() { + _docker_container_start +} + +_docker_stats() { + _docker_container_stats +} + +_docker_stop() { + _docker_container_stop +} + + +_docker_system() { + local subcommands=" + df + events + info + prune + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_system_df() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --verbose -v" -- "$cur" ) ) + ;; + esac +} + +_docker_system_events() { + local key=$(__docker_map_key_of_current_option '-f|--filter') + case "$key" in + container) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + daemon) + local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') + COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) + return + ;; + event) + COMPREPLY=( $( compgen -W " + attach + commit + connect + copy + create + delete + destroy + detach + die + disconnect + exec_create + exec_detach + exec_start + export + health_status + import + kill + load + mount + oom + pause + pull + push + reload + rename + resize + restart + save + start + stop + tag + top + unmount + unpause + untag + update + " -- "${cur##*=}" ) ) + return + ;; + image) + cur="${cur##*=}" + __docker_complete_images + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + type) + COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --since|--until) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --since --until --format" -- "$cur" ) ) + ;; + esac +} + +_docker_system_info() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_system_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) + ;; + esac +} + + +_docker_tag() { + _docker_image_tag +} + +_docker_unpause() { + _docker_container_unpause +} + +_docker_update() { + _docker_container_update +} + +_docker_top() { + _docker_container_top +} + +_docker_version() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_create() { + case "$prev" in + --driver|-d) + __docker_complete_plugins_bundled --type Volume + return + ;; + --label|--opt|-o) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--driver -d --help --label --opt -o" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + dangling) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Volume + return + ;; + name) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "dangling driver label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume() { + local subcommands=" + create + inspect + ls + prune + rm + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_wait() { + _docker_container_wait +} + +_docker() { + local previous_extglob_setting=$(shopt -p extglob) + shopt -s extglob + + local management_commands=( + container + image + network + node + plugin + secret + service + stack + system + volume + ) + + local top_level_commands=( + build + login + logout + run + search + version + ) + + local legacy_commands=( + commit + cp + create + diff + events + exec + export + history + images + import + info + inspect + kill + load + logs + pause + port + ps + pull + push + rename + restart + rm + rmi + save + start + stats + stop + swarm + tag + top + unpause + update + wait + ) + + local experimental_commands=( + checkpoint + deploy + ) + + local commands=(${management_commands[*]} ${top_level_commands[*]}) + [ -z "$DOCKER_HIDE_LEGACY_COMMANDS" ] && commands+=(${legacy_commands[*]}) + + # These options are valid as global options for all client commands + # and valid as command options for `docker daemon` + local global_boolean_options=" + --debug -D + --tls + --tlsverify + " + local global_options_with_args=" + --config + --host -H + --log-level -l + --tlscacert + --tlscert + --tlskey + " + + local host config + + COMPREPLY=() + local cur prev words cword + _get_comp_words_by_ref -n : cur prev words cword + + local command='docker' command_pos=0 subcommand_pos + local counter=1 + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + # save host so that completion can use custom daemon + --host|-H) + (( counter++ )) + host="${words[$counter]}" + ;; + # save config so that completion can use custom configuration directories + --config) + (( counter++ )) + config="${words[$counter]}" + ;; + $(__docker_to_extglob "$global_options_with_args") ) + (( counter++ )) + ;; + -*) + ;; + =) + (( counter++ )) + ;; + *) + command="${words[$counter]}" + command_pos=$counter + break + ;; + esac + (( counter++ )) + done + + local binary="${words[0]}" + if [[ $binary == ?(*/)dockerd ]] ; then + # for the dockerd binary, we reuse completion of `docker daemon`. + # dockerd does not have subcommands and global options. + command=daemon + command_pos=0 + fi + + local completions_func=_docker_${command//-/_} + declare -F $completions_func >/dev/null && $completions_func + + eval "$previous_extglob_setting" + return 0 +} + +eval "$__docker_previous_extglob_setting" +unset __docker_previous_extglob_setting + +complete -F _docker docker dockerd diff --git a/vendor/github.com/moby/moby/contrib/completion/fish/docker.fish b/vendor/github.com/moby/moby/contrib/completion/fish/docker.fish new file mode 100644 index 0000000..2715cb1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/completion/fish/docker.fish @@ -0,0 +1,405 @@ +# docker.fish - docker completions for fish shell +# +# This file is generated by gen_docker_fish_completions.py from: +# https://github.com/barnybug/docker-fish-completion +# +# To install the completions: +# mkdir -p ~/.config/fish/completions +# cp docker.fish ~/.config/fish/completions +# +# Completion supported: +# - parameters +# - commands +# - containers +# - images +# - repositories + +function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' + for i in (commandline -opc) + if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats + return 1 + end + end + return 0 +end + +function __fish_print_docker_containers --description 'Print a list of docker containers' -a select + switch $select + case running + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF)}' | tr ',' '\n' + case stopped + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF)}' | tr ',' '\n' + case all + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF)}' | tr ',' '\n' + end +end + +function __fish_print_docker_images --description 'Print a list of docker images' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' +end + +function __fish_print_docker_repositories --description 'Print a list of docker repositories' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq +end + +# common options +complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled" +complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' +complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" +complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' +complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' +complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' +complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" +complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' +complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level ("debug", "info", "warn", "error", "fatal")' +complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' +complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' +complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' + +# subcommands +# attach +complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" + +# build +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' + +# commit +complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" + +# cp +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" + +# diff +complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" + +# events +complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l format -d 'Format the output using the given go template' + +# exec +complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" + +# export +complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" + +# history +complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" + +# images +complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" + +# import +complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' +complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' + +# info +complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -l help -d 'Print usage' + +# inspect +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" + +# kill +complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" + +# load +complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' + +# login +complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' + +# logout +complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' + +# logs +complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" + +# port +complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" + +# pause +complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" + +# ps +complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' + +# pull +complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" + +# push +complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" + +# rename +complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' + +# restart +complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" + +# rm +complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container" + +# rmi +complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" + +# run +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + +# save +complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" + +# search +complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' + +# start +complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# stats +complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" + +# stop +complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" + +# tag +complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' + +# top +complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" + +# unpause +complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' +complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" + +# version +complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -l help -d 'Print usage' + +# wait +complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" diff --git a/vendor/github.com/moby/moby/contrib/completion/powershell/readme.txt b/vendor/github.com/moby/moby/contrib/completion/powershell/readme.txt new file mode 100644 index 0000000..18e1b53 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/completion/powershell/readme.txt @@ -0,0 +1 @@ +See https://github.com/samneirinck/posh-docker \ No newline at end of file diff --git a/vendor/github.com/moby/moby/contrib/completion/zsh/REVIEWERS b/vendor/github.com/moby/moby/contrib/completion/zsh/REVIEWERS new file mode 100644 index 0000000..03ee2dd --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/completion/zsh/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/moby/moby/contrib/completion/zsh/_docker b/vendor/github.com/moby/moby/contrib/completion/zsh/_docker new file mode 100644 index 0000000..60cfe05 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/completion/zsh/_docker @@ -0,0 +1,2929 @@ +#compdef docker dockerd +# +# zsh completion for docker (http://docker.com) +# +# version: 0.3.0 +# github: https://github.com/felixr/docker-zsh-completion +# +# contributors: +# - Felix Riedel +# - Steve Durrheimer +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Short-option stacking can be enabled with: +# zstyle ':completion:*:*:docker:*' option-stacking yes +# zstyle ':completion:*:*:docker-*:*' option-stacking yes +__docker_arguments() { + if zstyle -t ":completion:${curcontext}:" option-stacking; then + print -- -s + fi +} + +__docker_get_containers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local kind type line s + declare -a running stopped lines args names + + kind=$1; shift + type=$1; shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)${:-"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line + lines=(${lines[2,-1]}) + + # Container ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + # Names: we only display the one without slash. All other names + # are generated and may clutter the completion. However, with + # Swarm, all names may be prefixed by the swarm node name. + if [[ $type = (names|all) ]]; then + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) + # First step: find a common prefix and strip it (swarm node case) + (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} + # Second step: only keep the first name without a / + s=${${names:#*/*}[1]} + # If no name, well give up. + (( $#s != 0 )) || continue + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 + return ret +} + +__docker_complete_stopped_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers stopped all "$@" +} + +__docker_complete_running_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers running all "$@" +} + +__docker_complete_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all all "$@" +} + +__docker_complete_containers_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all ids "$@" +} + +__docker_complete_containers_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all names "$@" +} + +__docker_complete_info_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + emulate -L zsh + setopt extendedglob + local -a plugins + plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: }) + _describe -t plugins "$1 plugins" plugins && ret=0 + return ret +} + +__docker_complete_images() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a images + images=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images && ret=0 + __docker_complete_repositories_with_tags && ret=0 + return ret +} + +__docker_complete_repositories() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos + repos=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}%% *}[2,-1]}) + repos=(${repos#}) + _describe -t docker-repos "repositories" repos && ret=0 + return ret +} + +__docker_complete_repositories_with_tags() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos onlyrepos matched + declare m + repos=(${${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/ ##/:::}%% *}) + repos=(${${repos%:::}#}) + # Check if we have a prefix-match for the current prefix. + onlyrepos=(${repos%::*}) + for m in $onlyrepos; do + [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { + # Yes, complete with tags + repos=(${${repos/:::/:}/:/\\:}) + _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 + return ret + } + done + # No, only complete repositories + onlyrepos=(${${repos%:::*}/:/\\:}) + _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 + + return ret +} + +__docker_search() { + [[ $PREFIX = -* ]] && return 1 + local cache_policy + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + local searchterm cachename + searchterm="${words[$CURRENT]%/}" + cachename=_docker-search-$searchterm + + local expl + local -a result + if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ + && ! _retrieve_cache ${cachename#_}; then + _message "Searching for ${searchterm}..." + result=(${${${(f)${:-"$(_call_program commands docker $docker_options search $searchterm)"$'\n'}}%% *}[2,-1]}) + _store_cache ${cachename#_} result + fi + _wanted dockersearch expl 'available images' compadd -a result +} + +__docker_get_log_options() { + [[ $PREFIX = -* ]] && return 1 + + integer ret=1 + local log_driver=${opt_args[--log-driver]:-"all"} + local -a awslogs_options fluentd_options gelf_options journald_options json_file_options logentries_options syslog_options splunk_options + + awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream") + fluentd_options=("env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag") + gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels") + gelf_options=("env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag") + journald_options=("env" "labels" "tag") + json_file_options=("env" "labels" "max-file" "max-size") + logentries_options=("logentries-token") + syslog_options=("env" "labels" "syslog-address" "syslog-facility" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "tag") + splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-format" "splunk-gzip" "splunk-gzip-level" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "splunk-verify-connection" "tag") + + [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 + [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 + [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0 + [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 + [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 + [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 + [[ $log_driver = (logentries|all) ]] && _describe -t logentries-options "logentries options" logentries_options "$@" && ret=0 + [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 + [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 + + return ret +} + +__docker_complete_log_drivers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + drivers=(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog) + _describe -t log-drivers "log drivers" drivers && ret=0 + return ret +} + +__docker_complete_log_options() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (syslog-format) + syslog_format_opts=('rfc3164' 'rfc5424' 'rfc5424micro') + _describe -t syslog-format-opts "Syslog format Options" syslog_format_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + __docker_get_log_options -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_detach_keys() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + compset -P "*," + keys=(${:-{a-z}}) + ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) + _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 + _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 +} + +__docker_complete_pid() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local -a opts vopts + + opts=('host') + vopts=('container') + + if compset -P '*:'; then + case "${${words[-1]%:*}#*=}" in + (container) + __docker_complete_running_containers && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0 + _describe -t pid-opts "PID Options" opts && ret=0 + fi + + return ret +} + +__docker_complete_runtimes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + emulate -L zsh + setopt extendedglob + local -a runtimes_opts + runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}}) + _describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0 +} + +__docker_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (ancestor) + __docker_complete_images && ret=0 + ;; + (before|since) + __docker_complete_containers && ret=0 + ;; + (health) + health_opts=('healthy' 'none' 'starting' 'unhealthy') + _describe -t health-filter-opts "health filter options" health_opts && ret=0 + ;; + (id) + __docker_complete_containers_ids && ret=0 + ;; + (is-task) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + (name) + __docker_complete_containers_names && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (status) + status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing') + _describe -t status-filter-opts "status filter options" status_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('ancestor' 'before' 'exited' 'health' 'id' 'label' 'name' 'network' 'since' 'status' 'volume') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_search_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('is-automated' 'is-official' 'stars') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (is-automated|is-official) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_images_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('before' 'dangling' 'label' 'reference' 'since') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (before|reference|since) + __docker_complete_images && ret=0 + ;; + (dangling) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_events_filter() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (container) + __docker_complete_containers && ret=0 + ;; + (daemon) + emulate -L zsh + setopt extendedglob + local -a daemon_opts + daemon_opts=( + ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}} + ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:} + ) + _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0 + ;; + (event) + local -a event_opts + event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach' + 'exec_start' 'export' 'health_status' 'import' 'kill' 'load' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'save' 'start' + 'stop' 'tag' 'top' 'unmount' 'unpause' 'untag' 'update') + _describe -t event-filter-opts "event filter options" event_opts && ret=0 + ;; + (image) + __docker_complete_images && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (type) + local -a type_opts + type_opts=('container' 'daemon' 'image' 'network' 'volume') + _describe -t type-filter-opts "type filter options" type_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_prune_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('until') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +# BO checkpoint + +__docker_checkpoint_commands() { + local -a _docker_checkpoint_subcommands + _docker_checkpoint_subcommands=( + "create:Create a checkpoint from a running container" + "ls:List checkpoints for a container" + "rm:Remove a checkpoint" + ) + _describe -t docker-checkpoint-commands "docker checkpoint command" _docker_checkpoint_subcommands +} + +__docker_checkpoint_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help)--leave-running[Leave the container running after checkpoint]" \ + "($help -)1:container:__docker_complete_running_containers" \ + "($help -)2:checkpoint: " && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help -)1:container:__docker_complete_containers" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help -)1:container:__docker_complete_containers" \ + "($help -)2:checkpoint: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_checkpoint_commands" && ret=0 + ;; + esac + + return ret +} + +# EO checkpoint + +# BO container + +__docker_container_commands() { + local -a _docker_container_subcommands + _docker_container_subcommands=( + "attach:Attach to a running container" + "commit:Create a new image from a container's changes" + "cp:Copy files/folders between a container and the local filesystem" + "create:Create a new container" + "diff:Inspect changes on a container's filesystem" + "exec:Run a command in a running container" + "export:Export a container's filesystem as a tar archive" + "inspect:Display detailed information on one or more containers" + "kill:Kill one or more running containers" + "logs:Fetch the logs of a container" + "ls:List containers" + "pause:Pause all processes within one or more containers" + "port:List port mappings or a specific mapping for the container" + "prune:Remove all stopped containers" + "rename:Rename a container" + "restart:Restart one or more containers" + "rm:Remove one or more containers" + "run:Run a command in a new container" + "start:Start one or more stopped containers" + "stats:Display a live stream of container(s) resource usage statistics" + "stop:Stop one or more running containers" + "top:Display the running processes of a container" + "unpause:Unpause all processes within one or more containers" + "update:Update configuration of one or more containers" + "wait:Block until one or more containers stop, then print their exit codes" + ) + _describe -t docker-container-commands "docker container command" _docker_container_subcommands +} + +__docker_container_subcommand() { + local -a _command_args opts_help opts_attach_exec_run_start opts_create_run opts_create_run_update + local expl help="--help" + integer ret=1 + + opts_attach_exec_run_start=( + "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" + ) + opts_create_run=( + "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" + "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " + "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " + "($help)*--cap-add=[Add Linux capabilities]:capability: " + "($help)*--cap-drop=[Drop Linux capabilities]:capability: " + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " + "($help)--cidfile=[Write the container ID to the file]:CID file:_files" + "($help)--cpus=[Number of CPUs (default 0.000)]:cpus: " + "($help)*--device=[Add a host device to the container]:device:_files" + "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " + "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " + "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " + "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " + "($help)--disable-content-trust[Skip image verification]" + "($help)*--dns=[Custom DNS servers]:DNS server: " + "($help)*--dns-option=[Custom DNS options]:DNS option: " + "($help)*--dns-search=[Custom DNS search domains]:DNS domains: " + "($help)*"{-e=,--env=}"[Environment variables]:environment variable: " + "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" + "($help)*--expose=[Expose a port from the container without publishing it]: " + "($help)*--group=[Set one or more supplementary user groups for the container]:group:_groups" + "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" + "($help)--init[Run an init inside the container that forwards signals and reaps processes]" + "($help)--ip=[Container IPv4 address]:IPv4: " + "($help)--ip6=[Container IPv6 address]:IPv6: " + "($help)--ipc=[IPC namespace to use]:IPC namespace: " + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" + "($help)*--link=[Add link to another container]:link:->link" + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " + "($help)*"{-l=,--label=}"[Container metadata]:label: " + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_complete_log_options" + "($help)--mac-address=[Container MAC address]:MAC address: " + "($help)--name=[Container name]:name: " + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" + "($help)*--network-alias=[Add network-scoped alias for the container]:alias: " + "($help)--oom-kill-disable[Disable OOM Killer]" + "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" + "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]" + "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" + "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" + "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid" + "($help)--privileged[Give extended privileges to this container]" + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)*--security-opt=[Security options]:security option: " + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " + "($help)--stop-timeout=[Timeout (in seconds) to stop a container]:time: " + "($help)*--sysctl=-[sysctl options]:sysctl: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)*--ulimit=[ulimit options]:ulimit: " + "($help)--userns=[Container user namespace]:user namespace:(host)" + "($help)--tmpfs[mount tmpfs]" + "($help)*-v[Bind mount a volume]:volume: " + "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" + "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + opts_create_run_update=( + "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " + "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: " + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " + "($help)--memory-reservation=[Memory soft limit]:Memory limit: " + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " + "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" + ) + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help)--no-stdin[Do not attach stdin]" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help -):containers:__docker_complete_running_containers" && ret=0 + ;; + (commit) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --author)"{-a=,--author=}"[Author]:author: " \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ + "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ + "($help -):container:__docker_complete_containers" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (cp) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \ + "($help -)1:container:->container" \ + "($help -)2:hostpath:_files" && ret=0 + case $state in + (container) + if compset -P "*:"; then + _files && ret=0 + else + __docker_complete_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (diff) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (exec) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)*"{-e=,--env=}"[Set environment variables]:environment variable: " \ + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ + "($help)--privileged[Give extended Linux capabilities to the command]" \ + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ + "($help -):containers:__docker_complete_running_containers" \ + "($help -)*::command:->anycommand" && ret=0 + case $state in + (anycommand) + shift 1 words + (( CURRENT-- )) + _normal && ret=0 + ;; + esac + ;; + (export) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (kill) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--details[Show extra details provided to logs]" \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers]" \ + "($help)--before=[Show only container created before...]:containers:__docker_complete_containers" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \ + "($help)--format=[Pretty-print containers using a Go template]:template: " \ + "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ + "($help -n --last)"{-n=,--last=}"[Show n last created containers (includes all states)]:n:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help)--since=[Show only containers created since...]:containers:__docker_complete_containers" && ret=0 + ;; + (pause|unpause) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (port) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)2:port:_ports" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rename) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):old name:__docker_complete_containers" \ + "($help -):new name: " && ret=0 + ;; + (restart) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_containers_ids" && ret=0 + ;; + (rm) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ + "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ + "($help -)*:containers:->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then + __docker_complete_containers && ret=0 + else + __docker_complete_stopped_containers && ret=0 + fi + ;; + esac + ;; + (run) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)--health-cmd=[Command to run to check health]:command: " \ + "($help)--health-interval=[Time between running the check]:time: " \ + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \ + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \ + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \ + "($help)--rm[Remove intermediate containers when it exits]" \ + "($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help)--stop-signal=[Signal to kill a container]:signal:_signals" \ + "($help)--storage-opt=[Storage driver options for the container]:storage options:->storage-opt" \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + (storage-opt) + if compset -P "*="; then + _message "value" && ret=0 + else + opts=('size') + _describe -t filter-opts "storage options" opts -qS "=" && ret=0 + fi + ;; + esac + ;; + (start) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ + "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ + "($help -)*:containers:__docker_complete_stopped_containers" && ret=0 + ;; + (stats) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-stream[Disable streaming stats and only pull the first result]" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (stop) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (top) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)*:: :->ps-arguments" && ret=0 + case $state in + (ps-arguments) + _ps && ret=0 + ;; + esac + ;; + (update) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + opts_create_run_update \ + "($help -)*: :->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then + __docker_complete_stopped_containers && ret=0 + else + __docker_complete_containers && ret=0 + fi + ;; + esac + ;; + (wait) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO container + +# BO image + +__docker_image_commands() { + local -a _docker_image_subcommands + _docker_image_subcommands=( + "build:Build an image from a Dockerfile" + "history:Show the history of an image" + "import:Import the contents from a tarball to create a filesystem image" + "inspect:Display detailed information on one or more images" + "load:Load an image from a tar archive or STDIN" + "ls:List images" + "prune:Remove unused images" + "pull:Pull an image or a repository from a registry" + "push:Push an image or a repository to a registry" + "rm:Remove one or more images" + "save:Save one or more images to a tar archive (streamed to STDOUT by default)" + "tag:Tag an image into a repository" + ) + _describe -t docker-image-commands "docker image command" _docker_image_subcommands +} + +__docker_image_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (build) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--build-arg=[Build-time variables]:=: " \ + "($help)*--cache-from=[Images to consider as cache sources]: :__docker_complete_repositories_with_tags" \ + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \ + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \ + "($help)--compress[Compress the build context using gzip]" \ + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " \ + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " \ + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " \ + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " \ + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " \ + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ + "($help)--force-rm[Always remove intermediate containers]" \ + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" \ + "($help)*--label=[Set metadata for an image]:label=value: " \ + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \ + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \ + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" \ + "($help)--no-cache[Do not use cache when building the image]" \ + "($help)--pull[Attempt to pull a newer version of the image]" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ + "($help)--rm[Remove intermediate containers after a successful build]" \ + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " \ + "($help)--squash[Squash newly built layers into a single new layer]" \ + "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_complete_repositories_with_tags" \ + "($help)*--ulimit=[ulimit options]:ulimit: " \ + "($help)--userns=[Container user namespace]:user namespace:(host)" \ + "($help -):path or URL:_directories" && ret=0 + ;; + (history) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (import) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \ + "($help -):URL:(- http:// file://)" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:images:__docker_complete_images" && ret=0 + ;; + (load) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0 + ;; + (ls|list) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all images]" \ + "($help)--digests[Show digests]" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -): :__docker_complete_repositories" && ret=0 + case $state in + (filter-options) + __docker_complete_images_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused images, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (pull) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -):name:__docker_search" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image signing]" \ + "($help -): :__docker_complete_images" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help)--no-prune[Do not delete untagged parents]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (save) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (tag) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):source:__docker_complete_images"\ + "($help -):destination:__docker_complete_repositories_with_tags" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO image + +# BO network + +__docker_network_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (driver) + __docker_complete_info_plugins Network && ret=0 + ;; + (id) + __docker_complete_networks_ids && ret=0 + ;; + (name) + __docker_complete_networks_names && ret=0 + ;; + (type) + type_opts=('builtin' 'custom') + _describe -t type-filter-opts "Type Filter Options" type_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('driver' 'id' 'label' 'name' 'type') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_get_networks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines networks + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options network ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Network ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + _describe -t networks-list "networks" networks "$@" && ret=0 + return ret +} + +__docker_complete_networks() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks all "$@" +} + +__docker_complete_networks_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks ids "$@" +} + +__docker_complete_networks_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks names "$@" +} + +__docker_network_commands() { + local -a _docker_network_subcommands + _docker_network_subcommands=( + "connect:Connect a container to a network" + "create:Creates a new network with a name specified by the user" + "disconnect:Disconnects a container from a network" + "inspect:Displays detailed information on a network" + "ls:Lists all the networks created by the user" + "prune:Remove all unused networks" + "rm:Deletes one or more networks" + ) + _describe -t docker-network-commands "docker network command" _docker_network_subcommands +} + +__docker_network_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (connect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ + "($help)--ip=[Container IPv4 address]:IPv4: " \ + "($help)--ip6=[Container IPv6 address]:IPv6: " \ + "($help)*--link=[Add a link to another container]:link:->link" \ + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--attachable[Enable manual container attachment]" \ + "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \ + "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ + "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \ + "($help)--internal[Restricts external access to the network]" \ + "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ + "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ + "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help)*--label=[Set metadata on a network]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \ + "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ + "($help -)1:Network Name: " && ret=0 + ;; + (disconnect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--no-trunc[Do not truncate the output]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print networks using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 + case $state in + (filter-options) + __docker_network_complete_ls_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO network + +# BO node + +__docker_node_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_nodes_ids && ret=0 + ;; + (membership) + membership_opts=('accepted' 'pending' 'rejected') + _describe -t membership-opts "membership options" membership_opts && ret=0 + ;; + (name) + __docker_complete_nodes_names && ret=0 + ;; + (role) + role_opts=('manager' 'worker') + _describe -t role-opts "role options" role_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'membership' 'name' 'role') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_node_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_nodes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines nodes args + + type=$1; shift + filter=$1; shift + [[ $filter != "none" ]] && args=("-f $filter") + + lines=(${(f)${:-"$(_call_program commands docker $docker_options node ls $args)"$'\n'}}) + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Node ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + nodes=($nodes $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + nodes=($nodes $s) + done + fi + + _describe -t nodes-list "nodes" nodes "$@" && ret=0 + return ret +} + +__docker_complete_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all none "$@" +} + +__docker_complete_nodes_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes ids none "$@" +} + +__docker_complete_nodes_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes names none "$@" +} + +__docker_complete_pending_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "membership=pending" "$@" +} + +__docker_complete_manager_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=manager" "$@" +} + +__docker_complete_worker_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=worker" "$@" +} + +__docker_node_commands() { + local -a _docker_node_subcommands + _docker_node_subcommands=( + "demote:Demote a node as manager in the swarm" + "inspect:Display detailed information on one or more nodes" + "ls:List nodes in the swarm" + "promote:Promote a node as manager in the swarm" + "rm:Remove one or more nodes from the swarm" + "ps:List tasks running on one or more nodes, defaults to current node" + "update:Update a node" + ) + _describe -t docker-node-commands "docker node command" _docker_node_subcommands +} + +__docker_node_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force remove a node from the swarm]" \ + "($help -)*:node:__docker_complete_pending_nodes" && ret=0 + ;; + (demote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_manager_nodes" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + case $state in + (filter-options) + __docker_node_complete_ls_filters && ret=0 + ;; + esac + ;; + (promote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_worker_nodes" && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all instances]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + case $state in + (filter-options) + __docker_node_complete_ps_filters && ret=0 + ;; + esac + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--availability=[Availability of the node]:availability:(active pause drain)" \ + "($help)*--label-add=[Add or update a node label]:key=value: " \ + "($help)*--label-rm=[Remove a node label if exists]:label: " \ + "($help)--role=[Role of the node]:role:(manager worker)" \ + "($help -)1:node:__docker_complete_nodes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0 + ;; + esac + + return ret +} + +# EO node + +# BO plugin + +__docker_complete_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines plugins + + lines=(${(f)${:-"$(_call_program commands docker $docker_options plugin ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Name + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}" + plugins=($plugins $s) + done + + _describe -t plugins-list "plugins" plugins "$@" && ret=0 + return ret +} + +__docker_plugin_commands() { + local -a _docker_plugin_subcommands + _docker_plugin_subcommands=( + "disable:Disable a plugin" + "enable:Enable a plugin" + "inspect:Return low-level information about a plugin" + "install:Install a plugin" + "ls:List plugins" + "push:Push a plugin" + "rm:Remove a plugin" + "set:Change settings for a plugin" + "upgrade:Upgrade an existing plugin" + ) + _describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands +} + +__docker_plugin_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (disable) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the disable of an active plugin]" \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (enable) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--timeout=[HTTP client timeout (in seconds)]:timeout: " \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:plugin:__docker_complete_plugins" && ret=0 + ;; + (install) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--alias=[Local name for plugin]:alias: " \ + "($help)--disable[Do not enable the plugin on install]" \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help)--grant-all-permissions[Grant all permissions necessary to run the plugin]" \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -)*:key=value: " && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--no-trunc[Don't truncate output]" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of an active plugin]" \ + "($help -)*:plugin:__docker_complete_plugins" && ret=0 + ;; + (set) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -)*:key=value: " && ret=0 + ;; + (upgrade) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help)--grant-all-permissions[Grant all permissions necessary to run the plugin]" \ + "($help)--skip-remote-check[Do not check if specified remote plugin matches existing plugin image]" \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -):remote: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0 + ;; + esac + + return ret +} + +# EO plugin + +# BO secret + +__docker_secrets() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines secrets + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options secret ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + secrets=($secrets $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + secrets=($secrets $s) + done + fi + + _describe -t secrets-list "secrets" secrets "$@" && ret=0 + return ret +} + +__docker_complete_secrets() { + [[ $PREFIX = -* ]] && return 1 + __docker_secrets all "$@" +} + +__docker_secret_commands() { + local -a _docker_secret_subcommands + _docker_secret_subcommands=( + "create:Create a secret using stdin as content" + "inspect:Display detailed information on one or more secrets" + "ls:List secrets" + "rm:Remove one or more secrets" + ) + _describe -t docker-secret-commands "docker secret command" _docker_secret_subcommands +} + +__docker_secret_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-l=,--label=}"[Secret labels]:label: " \ + "($help -):secret: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_secret_commands" && ret=0 + ;; + esac + + return ret +} + +# EO secret + +# BO service + +__docker_service_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_services_ids && ret=0 + ;; + (name) + __docker_complete_services_names && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_service_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_services() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines services + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options service ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + _describe -t services-list "services" services "$@" && ret=0 + return ret +} + +__docker_complete_services() { + [[ $PREFIX = -* ]] && return 1 + __docker_services all "$@" +} + +__docker_complete_services_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_services ids "$@" +} + +__docker_complete_services_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_services names "$@" +} + +__docker_service_commands() { + local -a _docker_service_subcommands + _docker_service_subcommands=( + "create:Create a new service" + "inspect:Display detailed information on one or more services" + "logs:Fetch the logs of a service" + "ls:List services" + "rm:Remove one or more services" + "scale:Scale one or multiple replicated services" + "ps:List the tasks of a service" + "update:Update a service" + ) + _describe -t docker-service-commands "docker service command" _docker_service_subcommands +} + +__docker_service_subcommand() { + local -a _command_args opts_help opts_create_update + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + opts_create_update=( + "($help)*--constraint=[Placement constraints]:constraint: " + "($help)--endpoint-mode=[Placement constraints]:mode:(dnsrr vip)" + "($help)*"{-e=,--env=}"[Set environment variables]:env: " + "($help)--health-cmd=[Command to run to check health]:command: " + "($help)--health-interval=[Time between running the check]:time: " + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " + "($help)--hostname=[Service container hostname]:hostname: " \ + "($help)*--label=[Service labels]:label: " + "($help)--limit-cpu=[Limit CPUs]:value: " + "($help)--limit-memory=[Limit Memory]:value: " + "($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options" + "($help)*--mount=[Attach a filesystem mount to the service]:mount: " + "($help)*--network=[Network attachments]:network: " + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" + "($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: " + "($help)--replicas=[Number of tasks]:replicas: " + "($help)--reserve-cpu=[Reserve CPUs]:value: " + "($help)--reserve-memory=[Reserve Memory]:value: " + "($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)" + "($help)--restart-delay=[Delay between restart attempts]:delay: " + "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: " + "($help)--restart-window=[Window used to evaluate the restart policy]:window: " + "($help)*--secret=[Specify secrets to expose to the service]:secret:__docker_complete_secrets" + "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]" + "($help)--update-delay=[Delay between updates]:delay: " + "($help)--update-failure-action=[Action on update failure]:mode:(pause continue)" + "($help)--update-max-failure-ratio=[Failure rate to tolerate during an update]:fraction: " + "($help)--update-monitor=[Duration after each task update to monitor for failure]:window: " + "($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: " + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)--with-registry-auth[Send registry authentication details to swarm agents]" + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)*--container-label=[Container labels]:label: " \ + "($help)*--dns=[Set custom DNS servers]:DNS: " \ + "($help)*--dns-option=[Set DNS options]:DNS option: " \ + "($help)*--dns-search=[Set custom DNS search domains]:DNS search: " \ + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \ + "($help)--mode=[Service Mode]:mode:(global replicated)" \ + "($help)--name=[Service name]:name: " \ + "($help)*--publish=[Publish a port]:port: " \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--details[Show extra details provided to logs]" \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--since=[Show logs since timestamp]:timestamp: " \ + "($help)--tail=[Number of lines to show from the end of the logs]:lines:(1 10 20 50 all)" \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:->filter-options" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + case $state in + (filter-options) + __docker_service_complete_ls_filters && ret=0 + ;; + esac + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (scale) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:->values" && ret=0 + case $state in + (values) + if compset -P '*='; then + _message 'replicas' && ret=0 + else + __docker_complete_services -qS "=" + fi + ;; + esac + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + case $state in + (filter-options) + __docker_service_complete_ps_filters && ret=0 + ;; + esac + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)--arg=[Service command args]:arguments: _normal" \ + "($help)*--container-label-add=[Add or update container labels]:label: " \ + "($help)*--container-label-rm=[Remove a container label by its key]:label: " \ + "($help)*--dns-add=[Add or update custom DNS servers]:DNS: " \ + "($help)*--dns-rm=[Remove custom DNS servers]:DNS: " \ + "($help)*--dns-option-add=[Add or update DNS options]:DNS option: " \ + "($help)*--dns-option-rm=[Remove DNS options]:DNS option: " \ + "($help)*--dns-search-add=[Add or update custom DNS search domains]:DNS search: " \ + "($help)*--dns-search-rm=[Remove DNS search domains]:DNS search: " \ + "($help)--force[Force update]" \ + "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \ + "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \ + "($help)--image=[Service image tag]:image:__docker_complete_repositories" \ + "($help)*--publish-add=[Add or update a port]:port: " \ + "($help)*--publish-rm=[Remove a port(target-port mandatory)]:port: " \ + "($help)--rollback[Rollback to previous specification]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0 + ;; + esac + + return ret +} + +# EO service + +# BO stack + +__docker_stack_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stack_complete_services_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stacks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines stacks + + lines=(${(f)${:-"$(_call_program commands docker $docker_options stack ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + stacks=($stacks $s) + done + + _describe -t stacks-list "stacks" stacks "$@" && ret=0 + return ret +} + +__docker_complete_stacks() { + [[ $PREFIX = -* ]] && return 1 + __docker_stacks "$@" +} + +__docker_stack_commands() { + local -a _docker_stack_subcommands + _docker_stack_subcommands=( + "deploy:Deploy a new stack or update an existing stack" + "ls:List stacks" + "ps:List the tasks in the stack" + "rm:Remove the stack" + "services:List the services in the stack" + ) + _describe -t docker-stack-commands "docker stack command" _docker_stack_subcommands +} + +__docker_stack_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (deploy|up) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--bundle-file=[Path to a Distributed Application Bundle file]:dab:_files -g \"*.dab\"" \ + "($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file]:compose file:_files -g \"*.(yml|yaml)\"" \ + "($help)--with-registry-auth[Send registry authentication details to Swarm agents]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all tasks]" \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_ps_filters" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (rm|remove|down) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (services) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_services_filters" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_stack_commands" && ret=0 + ;; + esac + + return ret +} + +# EO stack + +# BO swarm + +__docker_swarm_commands() { + local -a _docker_swarm_subcommands + _docker_swarm_subcommands=( + "init:Initialize a swarm" + "join:Join a swarm as a node and/or manager" + "join-token:Manage join tokens" + "leave:Leave a swarm" + "unlock:Unlock swarm" + "unlock-key:Manage the unlock key" + "update:Update the swarm" + ) + _describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands +} + +__docker_swarm_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (init) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--autolock[Enable manager autolocking]" \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--force-new-cluster[Force create a new cluster from current state]" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (join) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--token=[Token for entry into the swarm]:secret: " \ + "($help -):host\:port: " && ret=0 + ;; + (join-token) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate join token]" \ + "($help -):role:(manager worker)" && ret=0 + ;; + (leave) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force this node to leave the swarm, ignoring warnings]" && ret=0 + ;; + (unlock) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (unlock-key) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate unlock token]" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--autolock[Enable manager autolocking]" \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO swarm + +# BO system + +__docker_system_commands() { + local -a _docker_system_subcommands + _docker_system_subcommands=( + "df:Show docker filesystem usage" + "events:Get real time events from the server" + "info:Display system-wide information" + "prune:Remove unused data" + ) + _describe -t docker-system-commands "docker system command" _docker_system_subcommands +} + +__docker_system_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (df) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -v --verbose)"{-v,--verbose}"[Show detailed information on space usage]" && ret=0 + ;; + (events) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \ + "($help)--since=[Events created since this timestamp]:timestamp: " \ + "($help)--until=[Events created until this timestamp]:timestamp: " \ + "($help)--format=[Format the output using the given go template]:template: " && ret=0 + ;; + (info) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused data, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO system + +# BO volume + +__docker_volume_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (dangling) + dangling_opts=('true' 'false') + _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0 + ;; + (driver) + __docker_complete_info_plugins Volume && ret=0 + ;; + (name) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('dangling' 'driver' 'label' 'name') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_volumes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a lines volumes + + lines=(${(f)${:-"$(_call_program commands docker $docker_options volume ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Names + local line s + for line in $lines; do + s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + volumes=($volumes $s) + done + + _describe -t volumes-list "volumes" volumes && ret=0 + return ret +} + +__docker_volume_commands() { + local -a _docker_volume_subcommands + _docker_volume_subcommands=( + "create:Create a volume" + "inspect:Display detailed information on one or more volumes" + "ls:List volumes" + "prune:Remove all unused volumes" + "rm:Remove one or more volumes" + ) + _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands +} + +__docker_volume_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \ + "($help)*--label=[Set metadata for a volume]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " \ + "($help -)1:Volume name: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)1:volume:__docker_complete_volumes" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print volumes using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 + case $state in + (filter-options) + __docker_volume_complete_ls_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \ + "($help -):volume:__docker_complete_volumes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO volume + +__docker_caching_policy() { + oldp=( "$1"(Nmh+1) ) # 1 hour + (( $#oldp )) +} + +__docker_commands() { + local cache_policy + + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ + && ! _retrieve_cache docker_subcommands; + then + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${(M)${lines[$((${lines[(i)*Commands:]} + 1)),-1]}:# *}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') + (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands + fi + _describe -t docker-commands "docker command" _docker_subcommands +} + +__docker_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach|commit|cp|create|diff|exec|export|kill|logs|pause|unpause|port|rename|restart|rm|run|start|stats|stop|top|update|wait) + __docker_container_subcommand && ret=0 + ;; + (build|history|import|load|pull|push|save|tag) + __docker_image_subcommand && ret=0 + ;; + (checkpoint) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_checkpoint_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_checkpoint_subcommand && ret=0 + ;; + esac + ;; + (container) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_container_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_container_subcommand && ret=0 + ;; + esac + ;; + (daemon) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \ + "($help)--api-cors-header=[CORS headers in the Engine API]:CORS headers: " \ + "($help)*--authorization-plugin=[Authorization plugins to load]" \ + "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ + "($help)--bip=[Network bridge IP]:IP address: " \ + "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \ + "($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \ + "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ + "($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \ + "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \ + "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ + "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ + "($help)*--default-ulimit=[Default ulimits for containers]:ulimit: " \ + "($help)--disable-legacy-registry[Disable contacting legacy registries]" \ + "($help)*--dns=[DNS server to use]:DNS: " \ + "($help)*--dns-opt=[DNS options to use]:DNS option: " \ + "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ + "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \ + "($help)--exec-root=[Root directory for execution state files]:path:_directories" \ + "($help)--experimental[Enable experimental features]" \ + "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ + "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ + "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ + "($help -g --graph)"{-g=,--graph=}"[Root of the Docker runtime]:path:_directories" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help)--icc[Enable inter-container communication]" \ + "($help)--init[Run an init inside containers to forward signals and reap processes]" \ + "($help)--init-path=[Path to the docker-init binary]:docker-init binary:_files" \ + "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ + "($help)--ip=[Default IP when binding container ports]" \ + "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ + "($help)--ip-masq[Enable IP masquerading]" \ + "($help)--iptables[Enable addition of iptables rules]" \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)*--label=[Key=value labels]:label: " \ + "($help)--live-restore[Enable live restore of docker when containers are still running]" \ + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" \ + "($help)*--log-opt=[Default log driver options for containers]:log driver options:__docker_complete_log_options" \ + "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \ + "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \ + "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \ + "($help)--oom-score-adjust=[Set the oom_score_adj for the daemon]:oom-score:(-500)" \ + "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ + "($help)--raw-logs[Full timestamps without ANSI coloring]" \ + "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ + "($help)--seccomp-profile=[Path to seccomp profile]:path:_files -g \"*.json\"" \ + "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs btrfs devicemapper overlay overlay2 vfs zfs)" \ + "($help)--selinux-enabled[Enable selinux support]" \ + "($help)--shutdown-timeout=[Set the shutdown timeout value in seconds]:time: " \ + "($help)*--storage-opt=[Storage driver options]:storage driver options: " \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0 + + case $state in + (cluster-store) + if compset -P '*://'; then + _message 'host:port' && ret=0 + else + store=('consul' 'etcd' 'zk') + _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 + fi + ;; + (cluster-store-options) + if compset -P '*='; then + _files && ret=0 + else + opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') + _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 + fi + ;; + (users-groups) + if compset -P '*:'; then + _groups && ret=0 + else + _describe -t userns-default "default Docker user management" '(default)' && ret=0 + _users && ret=0 + fi + ;; + esac + ;; + (events|info) + __docker_system_subcommand && ret=0 + ;; + (image) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_image_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_image_subcommand && ret=0 + ;; + esac + ;; + (images) + words[1]='ls' + __docker_image_subcommand && ret=0 + ;; + (inspect) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ + "($help)--type=[Return JSON for specified type]:type:(container image network node plugin service volume)" \ + "($help -)*: :->values" && ret=0 + + case $state in + (values) + if [[ ${words[(r)--type=container]} == --type=container ]]; then + __docker_complete_containers && ret=0 + elif [[ ${words[(r)--type=image]} == --type=image ]]; then + __docker_complete_images && ret=0 + elif [[ ${words[(r)--type=network]} == --type=network ]]; then + __docker_complete_networks && ret=0 + elif [[ ${words[(r)--type=node]} == --type=node ]]; then + __docker_complete_nodes && ret=0 + elif [[ ${words[(r)--type=plugin]} == --type=plugin ]]; then + __docker_complete_plugins && ret=0 + elif [[ ${words[(r)--type=service]} == --type=service ]]; then + __docker_complete_services && ret=0 + elif [[ ${words[(r)--type=volume]} == --type=volume ]]; then + __docker_complete_volumes && ret=0 + else + __docker_complete_containers + __docker_complete_images + __docker_complete_networks + __docker_complete_nodes + __docker_complete_plugins + __docker_complete_services + __docker_complete_volumes && ret=0 + fi + ;; + esac + ;; + (login) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -p --password)"{-p=,--password=}"[Password]:password: " \ + "($help -u --user)"{-u=,--user=}"[Username]:username: " \ + "($help -)1:server: " && ret=0 + ;; + (logout) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -)1:server: " && ret=0 + ;; + (network) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_network_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_network_subcommand && ret=0 + ;; + esac + ;; + (node) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_node_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_node_subcommand && ret=0 + ;; + esac + ;; + (plugin) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_plugin_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_plugin_subcommand && ret=0 + ;; + esac + ;; + (ps) + words[1]='ls' + __docker_container_subcommand && ret=0 + ;; + (rmi) + words[1]='rm' + __docker_image_subcommand && ret=0 + ;; + (search) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ + "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):term: " && ret=0 + + case $state in + (filter-options) + __docker_complete_search_filters && ret=0 + ;; + esac + ;; + (secret) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_secret_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_secret_subcommand && ret=0 + ;; + esac + ;; + (service) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_service_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_service_subcommand && ret=0 + ;; + esac + ;; + (stack) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_stack_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_stack_subcommand && ret=0 + ;; + esac + ;; + (swarm) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_swarm_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_swarm_subcommand && ret=0 + ;; + esac + ;; + (system) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_system_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_system_subcommand && ret=0 + ;; + esac + ;; + (version) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (volume) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_volume_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_volume_subcommand && ret=0 + ;; + esac + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 + ;; + esac + + return ret +} + +_docker() { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + + local curcontext="$curcontext" state line help="-h --help" + integer ret=1 + typeset -A opt_args + + _arguments $(__docker_arguments) -C \ + "(: -)"{-h,--help}"[Print usage]" \ + "($help)--config[Location of client config files]:path:_directories" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help -v --version)"{-v,--version}"[Print version information and quit]" \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + local host=${opt_args[-H]}${opt_args[--host]} + local config=${opt_args[--config]} + local docker_options="${host:+--host $host} ${config:+--config $config}" + + case $state in + (command) + __docker_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-$words[1]: + __docker_subcommand && ret=0 + ;; + esac + + return ret +} + +_dockerd() { + integer ret=1 + words[1]='daemon' + __docker_subcommand && ret=0 + return ret +} + +_docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff --git a/vendor/github.com/moby/moby/contrib/desktop-integration/README.md b/vendor/github.com/moby/moby/contrib/desktop-integration/README.md new file mode 100644 index 0000000..85a01b9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile b/vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 0000000..5cacd1f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,36 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile b/vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 0000000..3ddb232 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,31 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/vendor/github.com/moby/moby/contrib/docker-device-tool/README.md b/vendor/github.com/moby/moby/contrib/docker-device-tool/README.md new file mode 100644 index 0000000..6c54d59 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/docker-device-tool/README.md @@ -0,0 +1,14 @@ +Docker device tool for devicemapper storage driver backend +=================== + +The ./contrib/docker-device-tool contains a tool to manipulate devicemapper thin-pool. + +Compile +======== + + $ make shell + ## inside build container + $ go build contrib/docker-device-tool/device_tool.go + + # if devicemapper version is old and compilation fails, compile with `libdm_no_deferred_remove` tag + $ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go diff --git a/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go new file mode 100644 index 0000000..906d064 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,176 @@ +// +build !windows,!solaris + +package main + +import ( + "flag" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceID) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionID) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2], nil) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devicemapper.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], "") + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go new file mode 100644 index 0000000..da29a2c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go @@ -0,0 +1,4 @@ +package main + +func main() { +} diff --git a/vendor/github.com/moby/moby/contrib/dockerize-disk.sh b/vendor/github.com/moby/moby/contrib/dockerize-disk.sh new file mode 100755 index 0000000..444e243 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/dockerize-disk.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -e + +if ! command -v qemu-nbd &> /dev/null; then + echo >&2 'error: "qemu-nbd" not found!' + exit 1 +fi + +usage() { + echo "Convert disk image to docker image" + echo "" + echo "usage: $0 image-name disk-image-file [ base-image ]" + echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" + echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" +} + +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi + +CURDIR=$(pwd) + +image_name="${1%:*}" +image_tag="${1#*:}" +if [ "$image_tag" == "$1" ]; then + image_tag="latest" +fi + +disk_image_file="$2" +docker_base_image="$3" + +block_device=/dev/nbd0 + +builddir=$(mktemp -d) + +cleanup() { + umount "$builddir/disk_image" || true + umount "$builddir/workdir" || true + qemu-nbd -d $block_device &> /dev/null || true + rm -rf $builddir +} +trap cleanup EXIT + +# Mount disk image +modprobe nbd max_part=63 +qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" +mkdir "$builddir/disk_image" +mount -o ro ${block_device} "$builddir/disk_image" + +mkdir "$builddir/workdir" +mkdir "$builddir/diff" + +base_image_mounts="" + +# Unpack base image +if [ -n "$docker_base_image" ]; then + mkdir -p "$builddir/base" + docker pull "$docker_base_image" + docker save "$docker_base_image" | tar -xC "$builddir/base" + + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + while [ -n "$image_id" ]; do + mkdir -p "$builddir/base/$image_id/layer" + tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" + + base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" + image_id=$(docker inspect -f "{{.Parent}}" "$image_id") + done +fi + +# Mount work directory +mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" + +# Update files +cd $builddir +LC_ALL=C diff -rq disk_image workdir \ + | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ + | while read action entry; do + case "$action" in + ADD|UPDATE) + cp -a "disk_image$entry" "workdir$entry" + ;; + DEL) + rm -rf "workdir$entry" + ;; + *) + echo "Error: unknown diff line: $action $entry" >&2 + ;; + esac + done + +# Pack new image +new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" +mkdir -p $builddir/result/$new_image_id +cd diff +tar -cf $builddir/result/$new_image_id/layer.tar * +echo "1.0" > $builddir/result/$new_image_id/VERSION +cat > $builddir/result/$new_image_id/json <<-EOS +{ "docker_version": "1.4.1" +, "id": "$new_image_id" +, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" +EOS + +if [ -n "$docker_base_image" ]; then + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json +fi + +echo "}" >> $builddir/result/$new_image_id/json + +echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories + +cd $builddir/result + +# mkdir -p $CURDIR/$image_name +# cp -r * $CURDIR/$image_name +tar -c * | docker load diff --git a/vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh b/vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh new file mode 100755 index 0000000..29d7ff5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh @@ -0,0 +1,108 @@ +#!/bin/bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@image-id] ..." + echo " ie: $0 /tmp/hello-world hello-world" + echo " $0 /tmp/debian-jessie debian:jessie" + echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" + echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + tag="${imageTag#*:}" + imageId="${tag##*@}" + [ "$imageId" != "$tag" ] || imageId= + [ "$tag" != "$imageTag" ] || tag='latest' + tag="${tag%@*}" + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" + + if [ -z "$imageId" ]; then + imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" + imageId="${imageId//\"/}" + fi + + ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" + if [ "${ancestryJson:0:1}" != '[' ]; then + echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" + echo >&2 " $ancestryJson" + exit 1 + fi + + IFS=',' + ancestry=( ${ancestryJson//[\[\] \"]/} ) + unset IFS + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." + for imageId in "${ancestry[@]}"; do + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh b/vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh new file mode 100755 index 0000000..111e3fa --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh @@ -0,0 +1,121 @@ +#!/bin/bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@digest] ..." + echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + imageTag="${imageTag#*:}" + digest="${imageTag##*@}" + tag="${imageTag%%@*}" + + # add prefix library if passed official image + if [[ "$image" != *"/"* ]]; then + image="library/$image" + fi + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" + + manifestJson="$(curl -sSL -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/manifests/$digest")" + if [ "${manifestJson:0:1}" != '{' ]; then + echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" + echo >&2 " $manifestJson" + exit 1 + fi + + layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum') + + IFS=$'\n' + # bash v4 on Windows CI requires CRLF separator + if [ "$(go env GOHOSTOS)" = 'windows' ]; then + major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) + if [ "$major" -ge 4 ]; then + IFS=$'\r\n' + fi + fi + layers=( ${layersFs} ) + unset IFS + + history=$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]') + imageId=$(echo "$history" | jq --raw-output .[0] | jq --raw-output .id) + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '${image}:${tag}@${digest}' (${#layers[@]} layers)..." + for i in "${!layers[@]}"; do + imageJson=$(echo "$history" | jq --raw-output .[${i}]) + imageId=$(echo "$imageJson" | jq --raw-output .id) + imageLayer=${layers[$i]} + + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + echo "$imageJson" > "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" + curl -SL --progress -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/blobs/$imageLayer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + image="${image#library\/}" + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/moby/moby/contrib/editorconfig b/vendor/github.com/moby/moby/contrib/editorconfig new file mode 100644 index 0000000..97eda89 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space diff --git a/vendor/github.com/moby/moby/contrib/gitdm/aliases b/vendor/github.com/moby/moby/contrib/gitdm/aliases new file mode 100644 index 0000000..dd5dd34 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/aliases @@ -0,0 +1,148 @@ +Danny.Yates@mailonline.co.uk danny@codeaholics.org +KenCochrane@gmail.com kencochrane@gmail.com +LÉVEIL thomasleveil@gmail.com +Vincent.Bernat@exoscale.ch bernat@luffy.cx +acidburn@docker.com jess@docker.com +admin@jtlebi.fr jt@yadutaf.fr +ahmetalpbalkan@gmail.com ahmetb@microsoft.com +aj@gandi.net aj@gandi.net +albers@users.noreply.github.com github@albersweb.de +alexander.larsson@gmail.com alexl@redhat.com +amurdaca@redhat.com antonio.murdaca@gmail.com +amy@gandi.net aj@gandi.net +andrew.weiss@microsoft.com andrew.weiss@outlook.com +angt@users.noreply.github.com adrien@gallouet.fr +ankushagarwal@users.noreply.github.com ankushagarwal11@gmail.com +anonymouse2048@gmail.com lheckemann@twig-world.com +anusha@docker.com anusha.ragunathan@docker.com +asarai@suse.com asarai@suse.de +avi.miller@gmail.com avi.miller@oracle.com +bernat@luffy.cx Vincent.Bernat@exoscale.ch +bgoff@cpuguy83-mbp.home cpuguy83@gmail.com +brandon@ifup.co brandon@ifup.org +brent@docker.com brent.salisbury@docker.com +charmes.guillaume@gmail.com guillaume.charmes@docker.com +chenchun.feed@gmail.com ramichen@tencent.com +chooper@plumata.com charles.hooper@dotcloud.com +crosby.michael@gmail.com michael@docker.com +crosbymichael@gmail.com michael@docker.com +cyphar@cyphar.com asarai@suse.de +daehyeok@daehyeok-ui-MacBook-Air.local daehyeok@gmail.com +daehyeok@daehyeokui-MacBook-Air.local daehyeok@gmail.com +daniel.norberg@gmail.com dano@spotify.com +daniel@dotcloud.com daniel.mizyrycki@dotcloud.com +darren@rancher.com darren.s.shepherd@gmail.com +dave@dtucker.co.uk dt@docker.com +dev@vvieux.com victor.vieux@docker.com +dgasienica@zynga.com daniel@gasienica.ch +dnephin@gmail.com dnephin@docker.com +dominikh@fork-bomb.org dominik@honnef.co +dqminh89@gmail.com dqminh@cloudflare.com +dsxiao@dataman-inc.com dxiao@redhat.com +duglin@users.noreply.github.com dug@us.ibm.com +eric.hanchrow@gmail.com ehanchrow@ine.com +erik+github@hollensbe.org github@hollensbe.org +estesp@gmail.com estesp@linux.vnet.ibm.com +ewindisch@docker.com eric@windisch.us +f.joffrey@gmail.com joffrey@docker.com +fkautz@alumni.cmu.edu fkautz@redhat.com +frank.rosquin@gmail.com frank.rosquin+github@gmail.com +gh@mattyw.net mattyw@me.com +git@julienbordellier.com julienbordellier@gmail.com +github@metaliveblog.com github@developersupport.net +github@srid.name sridharr@activestate.com +guillaume.charmes@dotcloud.com guillaume.charmes@docker.com +guillaume@charmes.net guillaume.charmes@docker.com +guillaume@docker.com guillaume.charmes@docker.com +guillaume@dotcloud.com guillaume.charmes@docker.com +haoshuwei24@gmail.com haosw@cn.ibm.com +hollie.teal@docker.com hollie@docker.com +hollietealok@users.noreply.github.com hollie@docker.com +hsinko@users.noreply.github.com 21551195@zju.edu.cn +iamironbob@gmail.com altsysrq@gmail.com +icecrime@gmail.com arnaud.porterie@docker.com +jatzen@gmail.com jacob@jacobatzen.dk +jeff@allingeek.com jeff.nickoloff@gmail.com +jefferya@programmerq.net jeff@docker.com +jerome.petazzoni@dotcloud.com jerome.petazzoni@dotcloud.com +jfrazelle@users.noreply.github.com jess@docker.com +jhoward@microsoft.com John.Howard@microsoft.com +jlhawn@berkeley.edu josh.hawn@docker.com +joffrey@dotcloud.com joffrey@docker.com +john.howard@microsoft.com John.Howard@microsoft.com +jp@enix.org jerome.petazzoni@dotcloud.com +justin.cormack@unikernel.com justin.cormack@docker.com +justin.simonelis@PTS-JSIMON2.toronto.exclamation.com justin.p.simonelis@gmail.com +justin@specialbusservice.com justin.cormack@docker.com +katsuta_soshi@cyberagent.co.jp soshi.katsuta@gmail.com +kuehnle@online.de git.nivoc@neverbox.com +kwk@users.noreply.github.com konrad.wilhelm.kleine@gmail.com +leijitang@gmail.com leijitang@huawei.com +liubin0329@gmail.com liubin0329@users.noreply.github.com +lk4d4math@gmail.com lk4d4@docker.com +louis@dotcloud.com kalessin@kalessin.fr +lsm5@redhat.com lsm5@fedoraproject.org +lyndaoleary@hotmail.com lyndaoleary29@gmail.com +madhu@socketplane.io madhu@docker.com +martins@noironetworks.com aanm90@gmail.com +mary@docker.com mary.anthony@docker.com +mastahyeti@users.noreply.github.com mastahyeti@gmail.com +maztaim@users.noreply.github.com taim@bosboot.org +me@runcom.ninja antonio.murdaca@gmail.com +mheon@mheonlaptop.redhat.com mheon@redhat.com +michael@crosbymichael.com michael@docker.com +mohitsoni1989@gmail.com mosoni@ebay.com +moxieandmore@gmail.com mary.anthony@docker.com +moyses.furtado@wplex.com.br moysesb@gmail.com +msabramo@gmail.com marc@marc-abramowitz.com +mzdaniel@glidelink.net daniel.mizyrycki@dotcloud.com +nathan.leclaire@gmail.com nathan.leclaire@docker.com +nathanleclaire@gmail.com nathan.leclaire@docker.com +ostezer@users.noreply.github.com ostezer@gmail.com +peter@scraperwiki.com p@pwaller.net +princess@docker.com jess@docker.com +proppy@aminche.com proppy@google.com +qhuang@10.0.2.15 h.huangqiang@huawei.com +resouer@gmail.com resouer@163.com +roberto_hashioka@hotmail.com roberto.hashioka@docker.com +root@vagrant-ubuntu-12.10.vagrantup.com daniel.mizyrycki@dotcloud.com +runcom@linux.com antonio.murdaca@gmail.com +runcom@redhat.com antonio.murdaca@gmail.com +runcom@users.noreply.github.com antonio.murdaca@gmail.com +s@docker.com solomon@docker.com +shawnlandden@gmail.com shawn@churchofgit.com +singh.gurjeet@gmail.com gurjeet@singh.im +sjoerd@byte.nl sjoerd-github@linuxonly.nl +smahajan@redhat.com shishir.mahajan@redhat.com +solomon.hykes@dotcloud.com solomon@docker.com +solomon@dotcloud.com solomon@docker.com +stefanb@us.ibm.com stefanb@linux.vnet.ibm.com +stevvooe@users.noreply.github.com stephen.day@docker.com +superbaloo+registrations.github@superbaloo.net baloo@gandi.net +tangicolin@gmail.com tangicolin@gmail.com +thaJeztah@users.noreply.github.com github@gone.nl +thatcher@dotcloud.com thatcher@docker.com +thatcher@gmx.net thatcher@docker.com +tibor@docker.com teabee89@gmail.com +tiborvass@users.noreply.github.com teabee89@gmail.com +timruffles@googlemail.com oi@truffles.me.uk +tintypemolly@Ohui-MacBook-Pro.local tintypemolly@gmail.com +tj@init.me tejesh.mehta@gmail.com +tristan.carel@gmail.com tristan@cogniteev.com +unclejack@users.noreply.github.com cristian.staretu@gmail.com +unclejacksons@gmail.com cristian.staretu@gmail.com +vbatts@hashbangbash.com vbatts@redhat.com +victor.vieux@dotcloud.com victor.vieux@docker.com +victor@docker.com victor.vieux@docker.com +victor@dotcloud.com victor.vieux@docker.com +victorvieux@gmail.com victor.vieux@docker.com +vieux@docker.com victor.vieux@docker.com +vincent+github@demeester.fr vincent@sbr.pm +vincent@bernat.im bernat@luffy.cx +vojnovski@gmail.com viktor.vojnovski@amadeus.com +whoshuu@gmail.com huu@prismskylabs.com +xiaods@gmail.com dxiao@redhat.com +xlgao@zju.edu.cn xlgao@zju.edu.cn +yestin.sun@polyera.com sunyi0804@gmail.com +yuchangchun1@huawei.com yuchangchun1@huawei.com +zjaffee@us.ibm.com zij@case.edu diff --git a/vendor/github.com/moby/moby/contrib/gitdm/domain-map b/vendor/github.com/moby/moby/contrib/gitdm/domain-map new file mode 100644 index 0000000..1f1849e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/domain-map @@ -0,0 +1,39 @@ +# +# Docker +# + +docker.com Docker +dotcloud.com Docker + +aluzzardi@gmail.com Docker +cpuguy83@gmail.com Docker +derek@mcgstyle.net Docker +github@gone.nl Docker +kencochrane@gmail.com Docker +mickael.laventure@gmail.com Docker +sam.alba@gmail.com Docker +svendowideit@fosiki.com Docker +svendowideit@home.org.au Docker +tonistiigi@gmail.com Docker + +cristian.staretu@gmail.com Docker < 2015-01-01 +cristian.staretu@gmail.com Cisco + +github@hollensbe.org Docker < 2015-01-01 +github@hollensbe.org Cisco + +david.calavera@gmail.com Docker < 2016-04-01 +david.calavera@gmail.com Netlify + +# +# Others +# + +cisco.com Cisco +google.com Google +ibm.com IBM +huawei.com Huawei +microsoft.com Microsoft + +redhat.com Red Hat +mrunalp@gmail.com Red Hat diff --git a/vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh b/vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh new file mode 100755 index 0000000..dd6a564 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# +# This script generates a gitdm compatible email aliases file from a git +# formatted .mailmap file. +# +# Usage: +# $> ./generate_aliases > aliases +# + +cat $1 | \ + grep -v '^#' | \ + sed 's/^[^<]*<\([^>]*\)>/\1/' | \ + grep '<.*>' | sed -e 's/[<>]/ /g' | \ + awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \ + sort | uniq diff --git a/vendor/github.com/moby/moby/contrib/gitdm/gitdm.config b/vendor/github.com/moby/moby/contrib/gitdm/gitdm.config new file mode 100644 index 0000000..d9b62b0 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/gitdm.config @@ -0,0 +1,17 @@ +# +# EmailAliases lets us cope with developers who use more +# than one address. +# +EmailAliases aliases + +# +# EmailMap does the main work of mapping addresses onto +# employers. +# +EmailMap domain-map + +# +# Use GroupMap to map a file full of addresses to the +# same employer +# +# GroupMap company-Docker Docker diff --git a/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile new file mode 100644 index 0000000..747dc91 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris new file mode 100644 index 0000000..3d0d691 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris @@ -0,0 +1,4 @@ +FROM solaris +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/moby/moby/contrib/httpserver/server.go b/vendor/github.com/moby/moby/contrib/httpserver/server.go new file mode 100644 index 0000000..a75d5ab --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/httpserver/server.go @@ -0,0 +1,12 @@ +package main + +import ( + "log" + "net/http" +) + +func main() { + fs := http.FileServer(http.Dir("/static")) + http.Handle("/", fs) + log.Panic(http.ListenAndServe(":80", nil)) +} diff --git a/vendor/github.com/moby/moby/contrib/init/openrc/docker.confd b/vendor/github.com/moby/moby/contrib/init/openrc/docker.confd new file mode 100644 index 0000000..2444031 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/openrc/docker.confd @@ -0,0 +1,13 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +#DOCKER_LOGFILE="/var/log/docker.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKERD_BINARY="/usr/bin/dockerd" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/vendor/github.com/moby/moby/contrib/init/openrc/docker.initd b/vendor/github.com/moby/moby/contrib/init/openrc/docker.initd new file mode 100644 index 0000000..5d31603 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/openrc/docker.initd @@ -0,0 +1,22 @@ +#!/sbin/openrc-run +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +command="${DOCKERD_BINARY:-/usr/bin/dockerd}" +pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" +command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" +DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" +start_stop_daemon_args="--background \ + --stderr \"${DOCKER_LOGFILE}\" --stdout \"${DOCKER_LOGFILE}\"" + +start_pre() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + ulimit -u unlimited + + return 0 +} diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS b/vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS new file mode 100644 index 0000000..b9ba55b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS @@ -0,0 +1,3 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/docker.service b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service new file mode 100644 index 0000000..8bfed93 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service @@ -0,0 +1,29 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target docker.socket firewalld.service +Requires=docker.socket + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd -H fd:// +ExecReload=/bin/kill -s HUP $MAINPID +LimitNOFILE=1048576 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm new file mode 100644 index 0000000..6e41892 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm @@ -0,0 +1,28 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target firewalld.service + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/docker.socket b/vendor/github.com/moby/moby/contrib/init/systemd/docker.socket new file mode 100644 index 0000000..7dd9509 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker new file mode 100755 index 0000000..4f9d38d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker @@ -0,0 +1,152 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=docker + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKERD=/usr/bin/dockerd +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# Check docker is present +if [ ! -x $DOCKERD ]; then + log_failure_msg "$DOCKERD not present or not executable" + exit 1 +fi + +check_init() { + # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) + if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 + fi +} + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + check_init + + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + if [ "$BASH" ]; then + ulimit -u unlimited + else + ulimit -p unlimited + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKERD" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + check_init + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 + log_end_msg $? + ;; + + restart) + check_init + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + check_init + fail_unless_root + $0 restart + ;; + + status) + check_init + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC" + ;; + + *) + echo "Usage: service docker {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 0000000..c4e9319 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,20 @@ +# Docker Upstart and SysVinit configuration file + +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/admin/systemd/ +# + +# Customize location of Docker binary (especially for development testing). +#DOCKERD="/usr/local/bin/dockerd" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker new file mode 100755 index 0000000..df9b02a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,153 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +unshare=/usr/bin/unshare +exec="/usr/bin/dockerd" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + if [ ! -x $exec ]; then + if [ ! -e $exec ]; then + echo "Docker executable $exec not found" + else + echo "You do not have permission to execute the Docker executable $exec" + fi + exit 5 + fi + + check_for_cleanup + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + "$unshare" -m -- $exec $other_args >> $logfile 2>&1 & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + echo -n '.' + done + if [ ! -f $pidfile ]; then + failure + echo + exit 1 + fi + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +check_for_cleanup() { + if [ -f ${pidfile} ]; then + /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} + fi +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 0000000..0864b3d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker daemon + +other_args="" diff --git a/vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS b/vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS new file mode 100644 index 0000000..03ee2dd --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/moby/moby/contrib/init/upstart/docker.conf b/vendor/github.com/moby/moby/contrib/init/upstart/docker.conf new file mode 100644 index 0000000..d58f7d6 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/upstart/docker.conf @@ -0,0 +1,72 @@ +description "Docker daemon" + +start on (filesystem and net-device-up IFACE!=lo) +stop on runlevel [!2345] + +limit nofile 524288 1048576 + +# Having non-zero limits causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +limit nproc unlimited unlimited + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKERD=/usr/bin/dockerd + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKERD" $DOCKER_OPTS --raw-logs +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + DOCKER_SOCKET= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + DOCKER_SOCKET=/var/run/docker.sock + else + DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)' | sed 1q) + fi + + if [ -n "$DOCKER_SOCKET" ]; then + while ! [ -e "$DOCKER_SOCKET" ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for $DOCKER_SOCKET" + sleep 0.1 + done + echo "$DOCKER_SOCKET is up" + fi +end script diff --git a/vendor/github.com/moby/moby/contrib/mac-install-bundle.sh b/vendor/github.com/moby/moby/contrib/mac-install-bundle.sh new file mode 100755 index 0000000..2110d04 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mac-install-bundle.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +set -e + +errexit() { + echo "$1" + exit 1 +} + +[ "$(uname -s)" == "Darwin" ] || errexit "This script can only be used on a Mac" + +[ $# -eq 1 ] || errexit "Usage: $0 install|undo" + +BUNDLE="bundles/$(cat VERSION)" +BUNDLE_PATH="$PWD/$BUNDLE" +CLIENT_PATH="$BUNDLE_PATH/cross/darwin/amd64/docker" +DATABASE="$HOME/Library/Containers/com.docker.docker/Data/database" +DATABASE_KEY="$DATABASE/com.docker.driver.amd64-linux/bundle" + +[ -d "$DATABASE" ] || errexit "Docker for Mac must be installed for this script" + +case "$1" in +"install") + [ -d "$BUNDLE" ] || errexit "cannot find bundle $BUNDLE" + [ -e "$CLIENT_PATH" ] || errexit "you need to run make cross first" + [ -e "$BUNDLE/binary-daemon/dockerd" ] || errexit "you need to build binaries first" + [ -f "$BUNDLE/binary-client/docker" ] || errexit "you need to build binaries first" + git -C "$DATABASE" reset --hard >/dev/null + echo "$BUNDLE_PATH" > "$DATABASE_KEY" + git -C "$DATABASE" add "$DATABASE_KEY" + git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" + rm -f /usr/local/bin/docker + cp "$CLIENT_PATH" /usr/local/bin + echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." + ;; +"undo") + git -C "$DATABASE" reset --hard >/dev/null + [ -f "$DATABASE_KEY" ] || errexit "bundle not set" + git -C "$DATABASE" rm "$DATABASE_KEY" + git -C "$DATABASE" commit -m "remove bundle" + rm -f /usr/local/bin/docker + ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin + echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." + ;; +esac diff --git a/vendor/github.com/moby/moby/contrib/mkimage-alpine.sh b/vendor/github.com/moby/moby/contrib/mkimage-alpine.sh new file mode 100755 index 0000000..47cd35c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-alpine.sh @@ -0,0 +1,87 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories + printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:s" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + c) + ADDITIONALREPO=community + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +MAINREPO=$MIRROR/$REL/main +ADDITIONALREPO=$MIRROR/$REL/community +ARCH=${ARCH:-$(uname -m)} + +tmp +getapk +mkbase +conf +pack +save diff --git a/vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf b/vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf new file mode 100644 index 0000000..45fe03d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/moby/moby/contrib/mkimage-arch.sh b/vendor/github.com/moby/moby/contrib/mkimage-arch.sh new file mode 100755 index 0000000..f941177 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-arch.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + + +export LANG="C.UTF-8" + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=( + cryptsetup + device-mapper + dhcpcd + iproute2 + jfsutils + linux + lvm2 + man-db + man-pages + mdadm + nano + netctl + openresolv + pciutils + pcmciautils + reiserfsprogs + s-nail + systemd-sysvcompat + usbutils + vi + xfsprogs +) +IFS=',' +PKGIGNORE="${PKGIGNORE[*]}" +unset IFS + +arch="$(uname -m)" +case "$arch" in + armv*) + if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then + pacman-key --init + pacman-key --populate archlinuxarm + else + echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" + exit 1 + fi + PACMAN_CONF=$(mktemp ${TMPDIR:-/var/tmp}/pacman-conf-archlinux-XXXXXXXXX) + version="$(echo $arch | cut -c 5)" + sed "s/Architecture = armv/Architecture = armv${version}h/g" './mkimage-archarm-pacman.conf' > "${PACMAN_CONF}" + PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' + PACMAN_EXTRA_PKGS='archlinuxarm-keyring' + EXPECT_TIMEOUT=1800 # Most armv* based devices can be very slow (e.g. RPiv1) + ARCH_KEYRING=archlinuxarm + DOCKER_IMAGE_NAME="armv${version}h/archlinux" + ;; + *) + PACMAN_CONF='./mkimage-arch-pacman.conf' + PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' + PACMAN_EXTRA_PKGS='' + EXPECT_TIMEOUT=60 + ARCH_KEYRING=archlinux + DOCKER_IMAGE_NAME=archlinux + ;; +esac + +export PACMAN_MIRRORLIST + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME +docker run --rm -t $DOCKER_IMAGE_NAME echo Success. +rm -rf $ROOTFS diff --git a/vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf b/vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf new file mode 100644 index 0000000..f4b45f5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf @@ -0,0 +1,98 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = armv + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +[alarm] +Include = /etc/pacman.d/mirrorlist + +[aur] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/moby/moby/contrib/mkimage-busybox.sh b/vendor/github.com/moby/moby/contrib/mkimage-busybox.sh new file mode 100755 index 0000000..b11a6bb --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-busybox.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Generate a very minimal filesystem based on busybox-static, +# and load it into the local docker under the name "busybox". + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + +BUSYBOX=$(which busybox) +[ "$BUSYBOX" ] || { + echo "Sorry, I could not locate busybox." + echo "Try 'apt-get install busybox-static'?" + exit 1 +} + +set -e +ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM +mkdir $ROOTFS +cd $ROOTFS + +mkdir bin etc dev dev/pts lib proc sys tmp +touch etc/resolv.conf +cp /etc/nsswitch.conf etc/nsswitch.conf +echo root:x:0:0:root:/:/bin/sh > etc/passwd +echo root:x:0: > etc/group +ln -s lib lib64 +ln -s bin sbin +cp $BUSYBOX bin +for X in $(busybox --list) +do + ln -s busybox bin/$X +done +rm bin/init +ln bin/busybox bin/init +cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +for X in console null ptmx random stdin stdout stderr tty urandom zero +do + cp -a /dev/$X dev +done + +tar --numeric-owner -cf- . | docker import - busybox +docker run -i -u root busybox /bin/echo Success. diff --git a/vendor/github.com/moby/moby/contrib/mkimage-crux.sh b/vendor/github.com/moby/moby/contrib/mkimage-crux.sh new file mode 100755 index 0000000..3f0bdca --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/vendor/github.com/moby/moby/contrib/mkimage-debootstrap.sh b/vendor/github.com/moby/moby/contrib/mkimage-debootstrap.sh new file mode 100755 index 0000000..412a5ce --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-debootstrap.sh @@ -0,0 +1,297 @@ +#!/usr/bin/env bash +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + +variant='minbase' +include='iproute,iputils-ping' +arch='amd64' # intentionally undocumented for now +skipDetection= +strictDebootstrap= +justTar= + +usage() { + echo >&2 + + echo >&2 "usage: $0 [options] repo suite [mirror]" + + echo >&2 + echo >&2 'options: (not recommended)' + echo >&2 " -p set an http_proxy for debootstrap" + echo >&2 " -v $variant # change default debootstrap variant" + echo >&2 " -i $include # change default package includes" + echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" + echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" + echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" + echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" + + echo >&2 + echo >&2 " ie: $0 username/debian squeeze" + echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" + + echo >&2 + echo >&2 " ie: $0 username/ubuntu precise" + echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" + + echo >&2 + echo >&2 " ie: $0 -t precise.tar.bz2 precise" + echo >&2 " $0 -t wheezy.tgz wheezy" + echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" + + echo >&2 +} + +# these should match the names found at http://www.debian.org/releases/ +debianStable=wheezy +debianUnstable=sid +# this should match the name found at http://releases.ubuntu.com/ +ubuntuLatestLTS=trusty +# this should match the name found at http://releases.tanglu.org/ +tangluLatest=aequorea + +while getopts v:i:a:p:dst name; do + case "$name" in + p) + http_proxy="$OPTARG" + ;; + v) + variant="$OPTARG" + ;; + i) + include="$OPTARG" + ;; + a) + arch="$OPTARG" + ;; + d) + strictDebootstrap=1 + ;; + s) + skipDetection=1 + ;; + t) + justTar=1 + ;; + ?) + usage + exit 0 + ;; + esac +done +shift $(($OPTIND - 1)) + +repo="$1" +suite="$2" +mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided + +if [ ! "$repo" ] || [ ! "$suite" ]; then + usage + exit 1 +fi + +# some rudimentary detection for whether we need to "sudo" our docker calls +docker='' +if docker version > /dev/null 2>&1; then + docker='docker' +elif sudo docker version > /dev/null 2>&1; then + docker='sudo docker' +elif command -v docker > /dev/null 2>&1; then + docker='docker' +else + echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" + echo >&2 " this script is not likely to work as expected" + sleep 3 + docker='docker' # give us a command-not-found later +fi + +# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory +if [ "$justTar" ]; then + if [ ! -d "$(dirname "$repo")" ]; then + echo >&2 "error: $(dirname "$repo") does not exist" + exit 1 + fi + repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" +fi + +# will be filled in later, if [ -z "$skipDetection" ] +lsbDist='' + +target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +if [ "$suite" = 'lucid' ]; then + # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails + include+=',gpgv' +fi + +set -x + +# bootstrap +mkdir -p "$target" +sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" + +cd "$target" + +if [ -z "$strictDebootstrap" ]; then + # prevent init scripts from running during install/update + # policy-rc.d (for most scripts) + echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null + sudo chmod +x usr/sbin/policy-rc.d + # initctl (for some pesky upstart scripts) + sudo chroot . dpkg-divert --local --rename --add /sbin/initctl + sudo ln -sf /bin/true sbin/initctl + # see https://github.com/docker/docker/issues/446#issuecomment-16953173 + + # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) + sudo chroot . apt-get clean + + if strings usr/bin/dpkg | grep -q unsafe-io; then + # while we're at it, apt is unnecessarily slow inside containers + # this forces dpkg not to call sync() after package extraction and speeds up install + # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization + echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null + # we have this wrapped up in an "if" because the "force-unsafe-io" + # option was added in dpkg 1.15.8.6 + # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), + # and ubuntu lucid/10.04 only has 1.15.5.6 + fi + + # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) + { + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo "DPkg::Post-Invoke { ${aptGetClean} };" + echo "APT::Update::Post-Invoke { ${aptGetClean} };" + echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' + } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null + + # and remove the translations, too + echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null + + # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): + # rm /usr/sbin/policy-rc.d + # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl + # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup + # rm /etc/apt/apt.conf.d/no-cache + # rm /etc/apt/apt.conf.d/no-languages + + if [ -z "$skipDetection" ]; then + # see also rudimentary platform detection in hack/install.sh + lsbDist='' + if [ -r etc/lsb-release ]; then + lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then + lsbDist='Debian' + fi + + case "$lsbDist" in + Debian) + # add the updates and security repositories + if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then + # ${suite}-updates only applies to non-unstable + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + + # same for security updates + echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null + fi + ;; + Ubuntu) + # add the universe, updates, and security repositories + sudo sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " etc/apt/sources.list + ;; + Tanglu) + # add the updates repository + if [ "$suite" = "$tangluLatest" ]; then + # ${suite}-updates only applies to stable Tanglu versions + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + fi + ;; + SteamOS) + # add contrib and non-free + sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list + ;; + esac + fi + + # make sure our packages lists are as up to date as we can get them + sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y +fi + +if [ "$justTar" ]; then + # create the tarball file so it has the right permissions (ie, not root) + touch "$repo" + + # fill the tarball + sudo tar --numeric-owner -caf "$repo" . +else + # create the image (and tag $repo:$suite) + sudo tar --numeric-owner -c . | $docker import - $repo:$suite + + # test the image + $docker run -i -t $repo:$suite echo success + + if [ -z "$skipDetection" ]; then + case "$lsbDist" in + Debian) + if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + + if [ -r etc/debian_version ]; then + # tag the specific debian release version (which is only reasonable to tag on debian stable) + ver=$(cat etc/debian_version) + $docker tag $repo:$suite $repo:$ver + fi + fi + ;; + Ubuntu) + if [ "$suite" = "$ubuntuLatestLTS" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Ubuntu version number, if available (12.04, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + Tanglu) + if [ "$suite" = "$tangluLatest" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Tanglu version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + SteamOS) + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific SteamOS version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + esac + fi +fi + +# cleanup +cd "$returnTo" +sudo rm -rf "$target" diff --git a/vendor/github.com/moby/moby/contrib/mkimage-pld.sh b/vendor/github.com/moby/moby/contrib/mkimage-pld.sh new file mode 100755 index 0000000..615c203 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-pld.sh @@ -0,0 +1,73 @@ +#!/bin/sh +# +# Generate a minimal filesystem for PLD Linux and load it into the local docker as "pld". +# https://www.pld-linux.org/packages/docker +# +set -e + +if [ "$(id -u)" != "0" ]; then + echo >&2 "$0: requires root" + exit 1 +fi + +image_name=pld + +tmpdir=$(mktemp -d ${TMPDIR:-/var/tmp}/pld-docker-XXXXXX) +root=$tmpdir/rootfs +install -d -m 755 $root + +# to clean up: +docker rmi $image_name || : + +# build +rpm -r $root --initdb + +set +e +install -d $root/dev/pts +mknod $root/dev/random c 1 8 -m 644 +mknod $root/dev/urandom c 1 9 -m 644 +mknod $root/dev/full c 1 7 -m 666 +mknod $root/dev/null c 1 3 -m 666 +mknod $root/dev/zero c 1 5 -m 666 +mknod $root/dev/console c 5 1 -m 660 +set -e + +poldek -r $root --up --noask -u \ + --noignore \ + -O 'rpmdef=_install_langs C' \ + -O 'rpmdef=_excludedocs 1' \ + vserver-packages \ + bash iproute2 coreutils grep poldek + +# fix netsharedpath, so containers would be able to install when some paths are mounted +sed -i -e 's;^#%_netsharedpath.*;%_netsharedpath /dev/shm:/sys:/proc:/dev:/etc/hostname;' $root/etc/rpm/macros + +# no need for alternatives +poldek-config -c $root/etc/poldek/poldek.conf ignore systemd-init + +# this makes initscripts to believe network is up +touch $root/var/lock/subsys/network + +# cleanup large optional packages +remove_packages="ca-certificates" +for pkg in $remove_packages; do + rpm -r $root -q $pkg && rpm -r $root -e $pkg --nodeps +done + +# cleanup more +rm -v $root/etc/ld.so.cache +rm -rfv $root/var/cache/hrmib/* +rm -rfv $root/usr/share/man/man?/* +rm -rfv $root/usr/share/locale/*/ +rm -rfv $root/usr/share/help/*/ +rm -rfv $root/usr/share/doc/* +rm -rfv $root/usr/src/examples/* +rm -rfv $root/usr/share/pixmaps/* + +# and import +tar --numeric-owner --xattrs --acls -C $root -c . | docker import - $image_name + +# and test +docker run -i -u root $image_name /bin/echo Success. + +rm -r $tmpdir diff --git a/vendor/github.com/moby/moby/contrib/mkimage-rinse.sh b/vendor/github.com/moby/moby/contrib/mkimage-rinse.sh new file mode 100755 index 0000000..7e09350 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-rinse.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + +repo="$1" +distro="$2" +mirror="$3" + +if [ ! "$repo" ] || [ ! "$distro" ]; then + self="$(basename $0)" + echo >&2 "usage: $self repo distro [mirror]" + echo >&2 + echo >&2 " ie: $self username/centos centos-5" + echo >&2 " $self username/centos centos-6" + echo >&2 + echo >&2 " ie: $self username/slc slc-5" + echo >&2 " $self username/slc slc-6" + echo >&2 + echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" + echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" + echo >&2 + echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' + echo >&2 ' expected values of "mirror".' + echo >&2 + echo >&2 'This script is tested to work with the original upstream version of rinse,' + echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' + echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' + echo >&2 + exit 1 +fi + +target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) +if [ "$mirror" ]; then + rinseArgs+=( --mirror "$mirror" ) +fi + +set -x + +mkdir -p "$target" + +sudo rinse "${rinseArgs[@]}" + +cd "$target" + +# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own +sudo rm -rf dev +sudo mkdir -m 755 dev +( + cd dev + sudo ln -sf /proc/self/fd ./ + sudo mkdir -m 755 pts + sudo mkdir -m 1777 shm + sudo mknod -m 600 console c 5 1 + sudo mknod -m 600 initctl p + sudo mknod -m 666 full c 1 7 + sudo mknod -m 666 null c 1 3 + sudo mknod -m 666 ptmx c 5 2 + sudo mknod -m 666 random c 1 8 + sudo mknod -m 666 tty c 5 0 + sudo mknod -m 666 tty0 c 4 0 + sudo mknod -m 666 urandom c 1 9 + sudo mknod -m 666 zero c 1 5 +) + +# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" +# locales +sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} +# docs and man pages +sudo rm -rf usr/share/{man,doc,info,gnome/help} +# cracklib +sudo rm -rf usr/share/cracklib +# i18n +sudo rm -rf usr/share/i18n +# yum cache +sudo rm -rf var/cache/yum +sudo mkdir -p --mode=0755 var/cache/yum +# sln +sudo rm -rf sbin/sln +# ldconfig +#sudo rm -rf sbin/ldconfig +sudo rm -rf etc/ld.so.cache var/cache/ldconfig +sudo mkdir -p --mode=0755 var/cache/ldconfig + +# allow networking init scripts inside the container to work without extra steps +echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null + +# to restore locales later: +# yum reinstall glibc-common + +version= +if [ -r etc/redhat-release ]; then + version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" +elif [ -r etc/SuSE-release ]; then + version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" +fi + +if [ -z "$version" ]; then + echo >&2 "warning: cannot autodetect OS version, using $distro as tag" + sleep 20 + version="$distro" +fi + +sudo tar --numeric-owner -c . | docker import - $repo:$version + +docker run -i -t $repo:$version echo success + +cd "$returnTo" +sudo rm -rf "$target" diff --git a/vendor/github.com/moby/moby/contrib/mkimage-yum.sh b/vendor/github.com/moby/moby/contrib/mkimage-yum.sh new file mode 100755 index 0000000..29da170 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-yum.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +set -e + +usage() { + cat < +OPTIONS: + -p "" The list of packages to install in the container. + The default is blank. + -g "" The groups of packages to install in the container. + The default is "Core". + -y The path to the yum config to install packages from. The + default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then + yum_config=/etc/dnf/dnf.conf + alias yum=dnf +fi +install_groups="Core" +while getopts ":y:p:g:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + p) + install_packages="$OPTARG" + ;; + g) + install_groups="$OPTARG" + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +# amazon linux yum will fail without vars set +if [ -d /etc/yum/vars ]; then + mkdir -p -m 755 "$target"/etc/yum + cp -a /etc/yum/vars "$target"/etc/yum/ +fi + +if [[ -n "$install_groups" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall $install_groups +fi + +if [[ -n "$install_packages" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y install $install_packages +fi + +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version + +docker run -i -t --rm $name:$version /bin/bash -c 'echo success' + +rm -rf "$target" diff --git a/vendor/github.com/moby/moby/contrib/mkimage.sh b/vendor/github.com/moby/moby/contrib/mkimage.sh new file mode 100755 index 0000000..13298c8 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + echo >&2 " $mkimg -t someuser/solaris solaris" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +os= +os=$(uname -o) + +# set up path to gnu tools if solaris +[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH +# TODO check for gnu-tar, gnu-getopt + +# TODO requires root/sudo due to some pkg operations. sigh. +[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege" + +optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +compression="auto" +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + --compression) compression="$2" ; shift 2 ;; + --no-compression) compression="none" ; shift 1 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ "$compression" == 'auto' ] || [ -z "$compression" ] +then + compression='xz' +fi + +[ "$compression" == 'none' ] && compression='' + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar${compression:+.$compression}" +touch "$tarFile" + +( + set -x + tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize b/vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 0000000..7749e63 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs and man pages + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/busybox-static b/vendor/github.com/moby/moby/contrib/mkimage/busybox-static new file mode 100755 index 0000000..e15322b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/debootstrap b/vendor/github.com/moby/moby/contrib/mkimage/debootstrap new file mode 100755 index 0000000..7d56d8e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/debootstrap @@ -0,0 +1,226 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +# get path to "chroot" in our current PATH +chrootPath="$(type -P chroot)" +rootfs_chroot() { + # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! + + # set PATH and chroot away! + PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + "$chrootPath" "$rootfsDir" "$@" +} + +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + +( + set -x + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' + #!/bin/sh + + # For most Docker users, "apt-get install" only happens during "docker build", + # where starting services doesn't work and often fails in humorous ways. This + # prevents those failures by stopping the services from attempting to start. + + exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; rootfs_chroot apt-get clean ) + +# this file is one APT creates to make sure we don't "autoremove" our currently +# in-use kernel, which doesn't really apply to debootstraps/Docker images that +# don't even have kernels installed +rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF + + # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed + echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' + # Since Docker users are looking for the smallest possible final images, the + # following emerges as a very common pattern: + + # RUN apt-get update \ + # && apt-get install -y \ + # && \ + # && apt-get purge -y --auto-remove + + # By default, APT will actually _keep_ packages installed via Recommends or + # Depends if another package Suggests them, even and including if the package + # that originally caused them to be installed is removed. Setting this to + # "false" ensures that APT is appropriately aggressive about removing the + # packages it added. + + # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant + Apt::AutoRemove::SuggestsImportant "false"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi b/vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi new file mode 100755 index 0000000..93fb289 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/vendor/github.com/moby/moby/contrib/mkimage/rinse b/vendor/github.com/moby/moby/contrib/mkimage/rinse new file mode 100755 index 0000000..75eb4f0 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/solaris b/vendor/github.com/moby/moby/contrib/mkimage/solaris new file mode 100755 index 0000000..158970e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/solaris @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Solaris 12 base image build script. +# +set -e + +# TODO add optional package publisher origin + +rootfsDir="$1" +shift + +# base install +( + set -x + + pkg image-create --full --zone \ + --facet facet.locale.*=false \ + --facet facet.locale.POSIX=true \ + --facet facet.doc=false \ + --facet facet.doc.*=false \ + "$rootfsDir" + + pkg -R "$rootfsDir" set-property use-system-repo true + + pkg -R "$rootfsDir" set-property flush-content-cache-on-success true + + pkg -R "$rootfsDir" install core-os +) + +# Lay in stock configuration, set up milestone +# XXX This all may become optional in a base image +( + # faster to build repository database on tmpfs + REPO_DB=/system/volatile/repository.$$ + export SVCCFG_REPOSITORY=${REPO_DB} + export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door + + # Import base manifests. NOTE These are a combination of basic requirement + # and gleaned from container milestone manifest. They may change. + for m in $rootfsDir/lib/svc/manifest/system/environment.xml \ + $rootfsDir/lib/svc/manifest/system/svc/global.xml \ + $rootfsDir/lib/svc/manifest/system/svc/restarter.xml \ + $rootfsDir/lib/svc/manifest/network/dns/client.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/switch.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/cache.xml \ + $rootfsDir/lib/svc/manifest/milestone/container.xml ; do + svccfg import $m + done + + # Apply system layer profile, deleting unnecessary dependencies + svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml + + # XXX Even if we keep a repo in the base image, this is definitely optional + svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml + + for s in svc:/system/svc/restarter \ + svc:/system/environment \ + svc:/network/dns/client \ + svc:/system/name-service/switch \ + svc:/system/name-service/cache \ + svc:/system/svc/global \ + svc:/milestone/container ;do + svccfg -s $s refresh + done + + # now copy the built up repository into the base rootfs + mv $REPO_DB $rootfsDir/etc/svc/repository.db +) + +# pkg(1) needs the zoneproxy-client running in the container. +# use a simple wrapper to run it as needed. +# XXX maybe we go back to running this in SMF? +mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg" +cat > "$rootfsDir/usr/bin/pkg" <<-'EOF' +#!/bin/sh +# +# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION +# +# The Solaris base image uses the sysrepo proxy mechanism. The +# IPS client pkg(1) requires the zoneproxy-client to reach the +# remote publisher origins through the host. This wrapper script +# enables and disables the proxy client as needed. This is a +# temporary solution. + +/usr/lib/zones/zoneproxy-client -s localhost:1008 +PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@" +pkill -9 zoneproxy-client +EOF +chmod +x "$rootfsDir/usr/bin/pkg" diff --git a/vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile b/vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile new file mode 100644 index 0000000..026d869 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile @@ -0,0 +1,9 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static nnp-test.c -o /usr/bin/nnp-test + +RUN chmod +s /usr/bin/nnp-test diff --git a/vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c b/vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c new file mode 100644 index 0000000..b767da7 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main(int argc, char *argv[]) +{ + printf("EUID=%d\n", geteuid()); + return 0; +} + diff --git a/vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh b/vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh new file mode 100755 index 0000000..5eeb45c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/bin/sh +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs > /dev/null 2>&1; then + # Find btrfs subvolumes under $dir checking for inode 256 + # Source: http://stackoverflow.com/a/32865333 + for subvol in $(find "$dir" -type d -inum 256 | sort -r); do + if [ "$dir" != "$subvol" ]; then + ( set -x; btrfs subvolume delete "$subvol" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( shopt -s dotglob; set -x; rm -rf "$dir"/* ) diff --git a/vendor/github.com/moby/moby/contrib/project-stats.sh b/vendor/github.com/moby/moby/contrib/project-stats.sh new file mode 100755 index 0000000..2691c72 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/project-stats.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +## Run this script from the root of the docker repository +## to query project stats useful to the maintainers. +## You will need to install `pulls` and `issues` from +## https://github.com/crosbymichael/pulls + +set -e + +echo -n "Open pulls: " +PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 +echo $PULLS + +echo -n "Pulls alru: " +pulls alru + +echo -n "Open issues: " +ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 +echo $ISSUES + +echo -n "Issues alru: " +issues alru diff --git a/vendor/github.com/moby/moby/contrib/report-issue.sh b/vendor/github.com/moby/moby/contrib/report-issue.sh new file mode 100755 index 0000000..cb54f1a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/report-issue.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# This is a convenience script for reporting issues that include a base +# template of information. See https://github.com/docker/docker/pull/8845 + +set -e + +DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} +DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} +DOCKER=${DOCKER:-"docker"} +DOCKER_COMMAND="${DOCKER}" +export DOCKER_COMMAND + +# pulled from https://gist.github.com/cdown/1163649 +function urlencode() { + # urlencode + + local length="${#1}" + for (( i = 0; i < length; i++ )); do + local c="${1:i:1}" + case $c in + [a-zA-Z0-9.~_-]) printf "$c" ;; + *) printf '%%%02X' "'$c" + esac + done +} + +function template() { +# this should always match the template from CONTRIBUTING.md + cat <<- EOM + Description of problem: + + + \`docker version\`: + `${DOCKER_COMMAND} -D version` + + + \`docker info\`: + `${DOCKER_COMMAND} -D info` + + + \`uname -a\`: + `uname -a` + + + Environment details (AWS, VirtualBox, physical, etc.): + + + How reproducible: + + + Steps to Reproduce: + 1. + 2. + 3. + + + Actual Results: + + + Expected Results: + + + Additional info: + + + EOM +} + +function format_issue_url() { + if [ ${#@} -ne 2 ] ; then + return 1 + fi + local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") + local issue_body=$(urlencode "${2}") + echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" +} + + +echo -ne "Do you use \`sudo\` to call docker? [y|N]: " +read -r -n 1 use_sudo +echo "" + +if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then + export DOCKER_COMMAND="sudo ${DOCKER}" +fi + +echo -ne "Title of new issue?: " +read -r issue_title +echo "" + +issue_url=$(format_issue_url "${issue_title}" "$(template)") + +if which xdg-open 2>/dev/null >/dev/null ; then + echo -ne "Would like to launch this report in your browser? [Y|n]: " + read -r -n 1 launch_now + echo "" + + if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then + xdg-open "${issue_url}" + fi +fi + +echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" + diff --git a/vendor/github.com/moby/moby/contrib/reprepro/suites.sh b/vendor/github.com/moby/moby/contrib/reprepro/suites.sh new file mode 100755 index 0000000..9ecf99d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/reprepro/suites.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +cd "$(dirname "$BASH_SOURCE")/../.." + +targets_from() { + git fetch -q https://github.com/docker/docker.git "$1" + git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / +} + +release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) +{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE new file mode 100644 index 0000000..d511905 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile new file mode 100644 index 0000000..16df33e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md new file mode 100644 index 0000000..7ea3117 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc new file mode 100644 index 0000000..d6cb0e5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc @@ -0,0 +1,29 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if new file mode 100644 index 0000000..e087e8b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if @@ -0,0 +1,523 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute apache +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`apache_exec',` + gen_require(` + type httpd_exec_t; + ') + + can_exec($1, httpd_exec_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te new file mode 100644 index 0000000..4231688 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te @@ -0,0 +1,399 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE new file mode 100644 index 0000000..d511905 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile new file mode 100644 index 0000000..16df33e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md new file mode 100644 index 0000000..7ea3117 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc new file mode 100644 index 0000000..10b7d52 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc @@ -0,0 +1,33 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) + +# OL7.2 systemd selinux update +/var/run/systemd/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_run_t,s0) +/var/lib/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_lib_t,s0) diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if new file mode 100644 index 0000000..4780af0 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if @@ -0,0 +1,659 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') + +######################################## +## +## Send and receive messages from +## systemd machined over dbus. +## +## +## +## Domain allowed access. +## +## +# +interface(`systemd_dbus_chat_machined',` + gen_require(` + type systemd_machined_t; + class dbus send_msg; + ') + + allow $1 systemd_machined_t:dbus send_msg; + allow systemd_machined_t $1:dbus send_msg; + ps_process_pattern(systemd_machined_t, $1) +') + +######################################## +## +## Allow any svirt_sandbox_file_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`virt_sandbox_entrypoint',` + gen_require(` + type svirt_sandbox_file_t; + ') + allow $1 svirt_sandbox_file_t:file entrypoint; +') + +######################################## +## +## Send and receive messages from +## virt over dbus. +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_dbus_chat',` + gen_require(` + type virtd_t; + class dbus send_msg; + ') + + allow $1 virtd_t:dbus send_msg; + allow virtd_t $1:dbus send_msg; + ps_process_pattern(virtd_t, $1) +') + +####################################### +## +## Read the process state of virt sandbox containers +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_sandbox_read_state',` + gen_require(` + attribute svirt_sandbox_domain; + ') + + ps_process_pattern($1, svirt_sandbox_domain) +') + +###################################### +## +## Send a signal to sandbox domains +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_signal_sandbox',` + gen_require(` + attribute svirt_sandbox_domain; + ') + + allow $1 svirt_sandbox_domain:process signal; +') + +####################################### +## +## Getattr Sandbox File systems +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_getattr_sandbox_filesystem',` + gen_require(` + type svirt_sandbox_file_t; + ') + + allow $1 svirt_sandbox_file_t:filesystem getattr; +') + +####################################### +## +## Read Sandbox Files +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_read_sandbox_files',` + gen_require(` + type svirt_sandbox_file_t; + ') + + list_dirs_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) + read_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) + read_lnk_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) +') + +####################################### +## +## Read the process state of spc containers +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_read_state',` + gen_require(` + type spc_t; + ') + + ps_process_pattern($1, spc_t) +') + diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te new file mode 100644 index 0000000..d4de36f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te @@ -0,0 +1,465 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +# OL7 systemd selinux update +type systemd_machined_t; +type systemd_machined_exec_t; +init_daemon_domain(systemd_machined_t, systemd_machined_exec_t) + +# /run/systemd/machines +type systemd_machined_var_run_t; +files_pid_file(systemd_machined_var_run_t) + +# /var/lib/machines +type systemd_machined_var_lib_t; +files_type(systemd_machined_var_lib_t) + + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + # unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) + +######################################## +# +# OL7.2 systemd selinux update +# systemd_machined local policy +# +allow systemd_machined_t self:capability { dac_override setgid sys_admin sys_chroot sys_ptrace }; +allow systemd_machined_t systemd_unit_file_t:service { status start }; +allow systemd_machined_t self:unix_dgram_socket create_socket_perms; + +manage_dirs_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +manage_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +init_pid_filetrans(systemd_machined_t, systemd_machined_var_run_t, dir, "machines") + +manage_dirs_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +manage_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +init_var_lib_filetrans(systemd_machined_t, systemd_machined_var_lib_t, dir, "machines") + +kernel_dgram_send(systemd_machined_t) +# This is a bug, but need for now. +kernel_read_unlabeled_state(systemd_machined_t) + +init_dbus_chat(systemd_machined_t) +init_status(systemd_machined_t) + +userdom_dbus_send_all_users(systemd_machined_t) + +term_use_ptmx(systemd_machined_t) + +optional_policy(` + dbus_connect_system_bus(systemd_machined_t) + dbus_system_bus_client(systemd_machined_t) +') + +optional_policy(` + docker_read_share_files(systemd_machined_t) + docker_spc_read_state(systemd_machined_t) +') + +optional_policy(` + virt_dbus_chat(systemd_machined_t) + virt_sandbox_read_state(systemd_machined_t) + virt_signal_sandbox(systemd_machined_t) + virt_stream_connect_sandbox(systemd_machined_t) + virt_rw_svirt_dev(systemd_machined_t) + virt_getattr_sandbox_filesystem(systemd_machined_t) + virt_read_sandbox_files(systemd_machined_t) +') + + diff --git a/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/LICENSE b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/LICENSE new file mode 100644 index 0000000..5b6e7c6 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/Makefile b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/Makefile new file mode 100644 index 0000000..1bdc695 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/Makefile @@ -0,0 +1,16 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz diff --git a/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.fc b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.fc new file mode 100644 index 0000000..467d659 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.fc @@ -0,0 +1,18 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/dockerd -- gen_context(system_u:object_r:docker_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) + +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.if b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.if new file mode 100644 index 0000000..ca075c0 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.if @@ -0,0 +1,461 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_share_t, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +interface(`domain_stub_named_filetrans_domain',` + gen_require(` + attribute named_filetrans_domain; + ') +') + +interface(`lvm_stub',` + gen_require(` + type lvm_t; + ') +') +interface(`staff_stub',` + gen_require(` + type staff_t; + ') +') +interface(`virt_stub_svirt_sandbox_domain',` + gen_require(` + attribute svirt_sandbox_domain; + ') +') +interface(`virt_stub_svirt_sandbox_file',` + gen_require(` + type svirt_sandbox_file_t; + ') +') +interface(`fs_dontaudit_remount_tmpfs',` + gen_require(` + type tmpfs_t; + ') + + dontaudit $1 tmpfs_t:filesystem remount; +') +interface(`dev_dontaudit_list_all_dev_nodes',` + gen_require(` + type device_t; + ') + + dontaudit $1 device_t:dir list_dir_perms; +') +interface(`kernel_unlabeled_entry_type',` + gen_require(` + type unlabeled_t; + ') + + domain_entry_file($1, unlabeled_t) +') +interface(`kernel_unlabeled_domtrans',` + gen_require(` + type unlabeled_t; + ') + + read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) + domain_transition_pattern($1, unlabeled_t, $2) + type_transition $1 unlabeled_t:process $2; +') +interface(`files_write_all_pid_sockets',` + gen_require(` + attribute pidfile; + ') + + allow $1 pidfile:sock_file write_sock_file_perms; +') +interface(`dev_dontaudit_mounton_sysfs',` + gen_require(` + type sysfs_t; + ') + + dontaudit $1 sysfs_t:dir mounton; +') diff --git a/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.te b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.te new file mode 100644 index 0000000..bad0bb6 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker.te @@ -0,0 +1,407 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +role system_r types spc_t; + +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + unconfined_domain(docker_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) +') + +######################################## +# +# docker upstream policy +# + +optional_policy(` +# domain_stub_named_filetrans_domain() + gen_require(` + attribute named_filetrans_domain; + ') + + docker_filetrans_named_content(named_filetrans_domain) +') + +optional_policy(` + lvm_stub() + docker_rw_sem(lvm_t) +') + +optional_policy(` + staff_stub() + docker_stream_connect(staff_t) + docker_exec(staff_t) +') + +optional_policy(` + virt_stub_svirt_sandbox_domain() + virt_stub_svirt_sandbox_file() + allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; + docker_read_share_files(svirt_sandbox_domain) + docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) + docker_use_ptys(svirt_sandbox_domain) + docker_spc_stream_connect(svirt_sandbox_domain) + fs_list_tmpfs(svirt_sandbox_domain) + fs_rw_hugetlbfs_files(svirt_sandbox_domain) + fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) + dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) + + tunable_policy(`virt_sandbox_use_fusefs',` + fs_manage_fusefs_dirs(svirt_sandbox_domain) + fs_manage_fusefs_files(svirt_sandbox_domain) + fs_manage_fusefs_symlinks(svirt_sandbox_domain) + ') + gen_require(` + attribute domain; + ') + + dontaudit svirt_sandbox_domain domain:key {search link}; +') + +optional_policy(` + gen_require(` + type pcp_pmcd_t; + ') + docker_manage_lib_files(pcp_pmcd_t) +') diff --git a/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz b/vendor/github.com/moby/moby/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz new file mode 100644 index 0000000000000000000000000000000000000000..ab5d59445ac1601ca378aaa3e71fb9cff43a1592 GIT binary patch literal 2847 zcmV+)3*hu0iwFo7v)okz17vSwYh`j@b7gF4ZgqGrH~__3TaVke5`I4V6@*`!6l=S| zL4lsU6wa>G7)ZQ6Yo}<61q526ZDJ)-B`NQ^I6wZ(@S=+?ue_UwK6D$4GsAB#9L|h1 zT74p9kjmtNshEi^7cAB+)14KO+rU$c!fk;-5#O zmV?vz9JoRUq(p7=UrB&Q;!Mydm$39gew3ZrB;ilS8)GkXHjhLJ~Z zb`9~dA;BW%P_PmCCQFh~L6RLy9thu%13cK#JwqnV8WL401Q%PfK6v5y10~;YJ{0bIQB&IB4h8PX!L;;nhe>W6UN2*vY){HP=m;wW%^*n~)VL%-lM6=;wQLDcf$Tqah4DzZ&A-OQ5 zpk}9!d<;9LGN)V+s;qrrJQSwpk3}bnLm)E<+bWW&HyOZUN+Z8! zrYvwTu1*6Pt*!k*0iAMYb~43Bh141ajx6w1(;G*YMQ=Hqr`EP^4~)I(9~ggC#Eqs? zD{L+egeI(L1_4dCa15BrIqU}t4{6QdV-7S)QIVWJI5#x+uY;!+G9tCH0HBZ%Sxi)C z8$>lWY$jj8Y28@j&axe-pr4r%%d53zgn1bWHS|f79H5xC)H0jgI zs0uU)bj^^I3>RHe-bFS9yYM?pRYwLc4Vmq2q_6juX;@({ab1JGR{4 zfw)WDORWuVA|@%o>a>8w)TaS@706>x{ypfAMZF9;#_*bFSi{*fL({Pf9G43qVP2w% zIefPU=Gn9DQa}6`GQB;xg;6xIw?0G;TbJ9dJ-0w6?P0F2$a6Y?)YuAX#LrY*3cta9 znbBSK62e9L9P1w1f-7Y@QM`a6_IzSR@_3V?)m{U-#s5;+q3R`&mIcd5CTWU?x6D`% zV8;+6L+lw|cO#sY_EKF!`4838h8Nl%`!hOJ>#s0)&D)<2@%Y(r-jGtktqviMNwER^ z48UzB*EEZ@e$}nh;O;eIRpUakf#QQ=iR`XhC_rrG0lrx?CC@<(%RcL-uQ2I}h+fpL z9caOv&z5Hp3f=+ka%(o(UvEx4okAy2e(WgrYB{7zbvTC@2yGVCyZlv@2@b z=9Ay1H{|2&^EC99RZZMk!DF%LI|5gCWOU6CMOBv8JxJALYABUav}-9d4&F+u4l=Z! zt$tIpHaGSo&AtLQ{yMwy<-K68`LOBhW^!G%4q$9F$y%XFlC6?ufnD{##t<;$jUKy4 zmY|~YRRVg>(K3^a{nIz&(T{I`?WEsR6=!_ySm4JPevFGmrwyKZ;Z$C|CJP8Jt~=KX zH>2s6DdEf(n*uUl@{rz-3Z5RXd1H00sjUle)ye1-ZW8H-mPzWaX2Z92 z2)V}{CiL_>nKMVNq%`CEQ5d3}lA>0PK!ac7>?t`f8d{Df`Sy8gn~~aa>{ftd?6kTc zF|j|25>LYg?~WqBj^i1)>7ay0aXYDvzLZeVoOJ<)sByEhPdb$d6PF5P+?nTA#c=PA@scfsdyQ}Y5_8NS&t1%dCZ80_lCjU~9q zjg5~^n4io*sRMUjw&e-&PXRHliX zY20}XrJnZnS;I@=y@9J_Lt)mmQkSDND_Fue2MBz)TLn7JCJNW?r(tTxn%2Mxv7ZB5 zT8-6m%Jsu2I&X4wlF-Qy)}Z;JzP3%3&VP8`3&%{68=F@ZwA>hn8)wGbGNbs`rvOw+Ii6L#f+~(-Xebl9tVa4Q=RZL4J-Fm?7Us&zIL%JG!WlDgL{mh6HshYGQfieib=4EQndS1#f<%XMS1_3R~RknrMThpB*A zCYMWHDccRA1lxy7yBA1<_@xnpti)d@-AL;Gr58s<`kYA`A6_^qr^Q>}F{(R=iy*ms z_mz-vzy~*6=s#M}x=+}dR_%&(wP_tsZHrdF6ek~>)suhy9Ri#~Hp*p+-+449V#y8* z2Vd{B>(20^n+gC1%*l?5Ejz8!n$?sqc{{6|sNQ9TW$Yu)$1I|Q6&gyDs=UJFgs-`Q z0ehv#<~$8HY8O8d4mOJ-J2c8J|4Myuef#ALRG`bj8C+l}nrYeoSfF~{9fp7{rG2HY zM<;c3{cS*>;P9>+Vg|o4pzX0HSg7$y!pS!7-9zUVZUj6|-4GUXvo>%SjTOt~zIt=- z-(4J4q<(_iha62Dz7?pde3zq!?w%O>PqiXYgOcCA&On092;Ebjh1w>3&(N6b`qqva z_kj(bC9a^$msVm=VkX>UOUv6-Ye@!LXc8$>j6$ xb`W`pZ+>}u<]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" + +## Strings, double-quoted +color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" + +## Single and double quotes +color brightyellow "('|\")" diff --git a/vendor/github.com/moby/moby/contrib/syntax/nano/README.md b/vendor/github.com/moby/moby/contrib/syntax/nano/README.md new file mode 100644 index 0000000..5985208 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/nano/README.md @@ -0,0 +1,32 @@ +Dockerfile.nanorc +================= + +Dockerfile syntax highlighting for nano + +Single User Installation +------------------------ +1. Create a nano syntax directory in your home directory: + * `mkdir -p ~/.nano/syntax` + +2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` + * `cp Dockerfile.nanorc ~/.nano/syntax/` + +3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file + ``` +## Dockerfile files +include "~/.nano/syntax/Dockerfile.nanorc" + ``` + +System Wide Installation +------------------------ +1. Create a nano syntax directory: + * `mkdir /usr/local/share/nano` + +2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` + * `cp Dockerfile.nanorc /usr/local/share/nano/` + +3. Add the following to your `/etc/nanorc`: + ``` +## Dockerfile files +include "/usr/local/share/nano/Dockerfile.nanorc" + ``` diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 0000000..20f0d04 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 0000000..948a9bf --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,143 @@ + + + + + fileTypes + + Dockerfile + + name + Dockerfile + patterns + + + captures + + 1 + + name + keyword.control.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s + + + captures + + 1 + + name + keyword.operator.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + + + begin + " + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + " + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + ' + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + captures + + 1 + + name + punctuation.whitespace.comment.leading.dockerfile + + 2 + + name + comment.line.number-sign.dockerfile + + 3 + + name + punctuation.definition.comment.dockerfile + + + comment + comment.line + match + ^(\s*)((#).*$\n?) + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 0000000..239f4b0 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/README.md b/vendor/github.com/moby/moby/contrib/syntax/textmate/README.md new file mode 100644 index 0000000..ce61101 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/README.md @@ -0,0 +1,17 @@ +# Docker.tmbundle + +Dockerfile syntax highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. + +enjoy. + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS b/vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS new file mode 100644 index 0000000..965743d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE b/vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE new file mode 100644 index 0000000..e67cdab --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/README.md b/vendor/github.com/moby/moby/contrib/syntax/vim/README.md new file mode 100644 index 0000000..5aa9bd8 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/README.md @@ -0,0 +1,26 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ +With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... + +With [Vundle](https://github.com/gmarik/Vundle.vim) + + Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt b/vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 0000000..e69e2b7 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim b/vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 0000000..ee10e5d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim b/vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 0000000..a067e6a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,31 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s + +" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell +let s:current_syntax = b:current_syntax +unlet b:current_syntax +syntax include @SH syntax/sh.vim +let b:current_syntax = s:current_syntax +syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH +" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile b/vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile new file mode 100644 index 0000000..fcf5892 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile @@ -0,0 +1,16 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ + && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ + && gcc -g -Wall -static acct.c -o /usr/bin/acct-test \ + && gcc -g -Wall -static setuid.c -o /usr/bin/setuid-test \ + && gcc -g -Wall -static setgid.c -o /usr/bin/setgid-test \ + && gcc -g -Wall -static socket.c -o /usr/bin/socket-test \ + && gcc -g -Wall -static raw.c -o /usr/bin/raw-test \ + && gcc -g -Wall -static appletalk.c -o /usr/bin/appletalk-test + +RUN [ "$(uname -m)" = "x86_64" ] && gcc -s -m32 -nostdlib exit32.s -o /usr/bin/exit32-test || true diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/acct.c b/vendor/github.com/moby/moby/contrib/syscall-test/acct.c new file mode 100644 index 0000000..88ac287 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/acct.c @@ -0,0 +1,16 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int err = acct("/tmp/t"); + if (err == -1) { + fprintf(stderr, "acct failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/appletalk.c b/vendor/github.com/moby/moby/contrib/syscall-test/appletalk.c new file mode 100644 index 0000000..0001dd4 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/appletalk.c @@ -0,0 +1,12 @@ +#include +#include + +int main() { + + if (socket(AF_APPLETALK, SOCK_DGRAM, 0) != -1) { + fprintf(stderr, "Opening Appletalk socket worked, should be blocked\n"); + return 1; + } + + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/exit32.s b/vendor/github.com/moby/moby/contrib/syscall-test/exit32.s new file mode 100644 index 0000000..8bbb5c5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/exit32.s @@ -0,0 +1,7 @@ +.globl _start +.text +_start: + xorl %eax, %eax + incl %eax + movb $0, %bl + int $0x80 diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/ns.c b/vendor/github.com/moby/moby/contrib/syscall-test/ns.c new file mode 100644 index 0000000..33684e1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/ns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp argments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/raw.c b/vendor/github.com/moby/moby/contrib/syscall-test/raw.c new file mode 100644 index 0000000..7995a0d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/raw.c @@ -0,0 +1,14 @@ +#include +#include +#include +#include +#include + +int main() { + if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { + perror("socket"); + return 1; + } + + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/setgid.c b/vendor/github.com/moby/moby/contrib/syscall-test/setgid.c new file mode 100644 index 0000000..df9680c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/setgid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setgid(1) == -1) { + perror("setgid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/setuid.c b/vendor/github.com/moby/moby/contrib/syscall-test/setuid.c new file mode 100644 index 0000000..5b93967 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/setuid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setuid(1) == -1) { + perror("setuid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/socket.c b/vendor/github.com/moby/moby/contrib/syscall-test/socket.c new file mode 100644 index 0000000..d26c82f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/socket.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include +#include +#include + +int main() { + int s; + struct sockaddr_in sin; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s == -1) { + perror("socket"); + return 1; + } + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + sin.sin_port = htons(80); + + if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { + perror("bind"); + return 1; + } + + close(s); + + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/userns.c b/vendor/github.com/moby/moby/contrib/syscall-test/userns.c new file mode 100644 index 0000000..2af36f4 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/userns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp argments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWUSER | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/moby/moby/contrib/udev/80-docker.rules b/vendor/github.com/moby/moby/contrib/udev/80-docker.rules new file mode 100644 index 0000000..f934c01 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/vendor/github.com/moby/moby/contrib/vagrant-docker/README.md b/vendor/github.com/moby/moby/contrib/vagrant-docker/README.md new file mode 100644 index 0000000..66e2fa5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Engine API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem +stop on runlevel [!2345] + +respawn + +script + /usr/bin/docker daemon -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up an SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/vendor/github.com/moby/moby/daemon/apparmor_default.go b/vendor/github.com/moby/moby/daemon/apparmor_default.go new file mode 100644 index 0000000..09dd054 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/apparmor_default.go @@ -0,0 +1,36 @@ +// +build linux + +package daemon + +import ( + "fmt" + + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func ensureDefaultAppArmorProfile() error { + if apparmor.IsEnabled() { + loaded, err := aaprofile.IsLoaded(defaultApparmorProfile) + if err != nil { + return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err) + } + + // Nothing to do. + if loaded { + return nil + } + + // Load the profile. + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", defaultApparmorProfile) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go b/vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go new file mode 100644 index 0000000..cd2dd97 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func ensureDefaultAppArmorProfile() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/archive.go b/vendor/github.com/moby/moby/daemon/archive.go new file mode 100644 index 0000000..1999f12 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive.go @@ -0,0 +1,436 @@ +package daemon + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return err + } + + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + uid, gid := daemon.GetRemappedUIDGID() + options := &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &archive.TarChownOptions{ + UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? + }, + } + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} + +// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container +// specified by a container object. +// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). +// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. +func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { + srcPath := src.Path() + destExists := true + destDir := false + rootUID, rootGID := daemon.GetRemappedUIDGID() + + // Work in daemon-local OS specific file paths + destPath = filepath.FromSlash(destPath) + + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(c) + if err != nil { + return err + } + defer daemon.Unmount(c) + + dest, err := c.GetResourcePath(destPath) + if err != nil { + return err + } + + // Preserve the trailing slash + // TODO: why are we appending another path separator if there was already one? + if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { + destDir = true + dest += string(os.PathSeparator) + } + + destPath = dest + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + + if src.IsDir() { + // copy as directory + if err := archiver.CopyWithTar(srcPath, destPath); err != nil { + return err + } + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) + } + if decompress && archive.IsArchivePath(srcPath) { + // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + err := archiver.UntarPath(srcPath, tarDest) + /* + if err != nil { + logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) + } + */ + return err + } + + // only needed for fixPermissions, but might as well put it before CopyFileWithTar + if destDir || (destExists && destStat.IsDir()) { + destPath = filepath.Join(destPath, src.Name()) + } + + if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { + return err + } + if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { + return err + } + + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) +} diff --git a/vendor/github.com/moby/moby/daemon/archive_unix.go b/vendor/github.com/moby/moby/daemon/archive_unix.go new file mode 100644 index 0000000..47666fe --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} diff --git a/vendor/github.com/moby/moby/daemon/archive_windows.go b/vendor/github.com/moby/moby/daemon/archive_windows.go new file mode 100644 index 0000000..b3a1045 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive_windows.go @@ -0,0 +1,18 @@ +package daemon + +import "github.com/docker/docker/container" + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +// +// This is a no-op on Windows which does not support read-only volumes, or +// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + return false, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // chown is not supported on Windows + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/attach.go b/vendor/github.com/moby/moby/daemon/attach.go new file mode 100644 index 0000000..917237d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/attach.go @@ -0,0 +1,147 @@ +package daemon + +import ( + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + keys := []byte{} + var err error + if c.DetachKeys != "" { + keys, err = term.ToBytes(c.DetachKeys) + if err != nil { + return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys) + } + } + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + return errors.NewRequestConflictError(err) + } + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + var stdin io.ReadCloser + var stdout, stderr io.Writer + + if c.UseStdin { + stdin = inStream + } + if c.UseStdout { + stdout = outStream + } + if c.UseStderr { + stderr = errStream + } + + if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) +} + +func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { + if logs { + logDriver, err := daemon.getLogger(c) + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(c, "attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + + waitChan := make(chan struct{}) + if c.Config.StdinOnce && !c.Config.Tty { + go func() { + c.WaitStop(-1 * time.Second) + close(waitChan) + }() + } + + err := <-c.Attach(stdinPipe, stdout, stderr, keys) + if err != nil { + if _, ok := err.(container.DetachError); ok { + daemon.LogContainerEvent(c, "detach") + } else { + logrus.Errorf("attach failed with error: %v", err) + } + } + + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if c.Config.StdinOnce && !c.Config.Tty { + <-waitChan + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/auth.go b/vendor/github.com/moby/moby/daemon/auth.go new file mode 100644 index 0000000..f5f4d7b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/auth.go @@ -0,0 +1,13 @@ +package daemon + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" +) + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) +} diff --git a/vendor/github.com/moby/moby/daemon/bindmount_solaris.go b/vendor/github.com/moby/moby/daemon/bindmount_solaris.go new file mode 100644 index 0000000..87bf3ef --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/bindmount_solaris.go @@ -0,0 +1,5 @@ +// +build solaris + +package daemon + +const bindMountType = "lofs" diff --git a/vendor/github.com/moby/moby/daemon/bindmount_unix.go b/vendor/github.com/moby/moby/daemon/bindmount_unix.go new file mode 100644 index 0000000..3966bab --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/bindmount_unix.go @@ -0,0 +1,5 @@ +// +build linux freebsd + +package daemon + +const bindMountType = "bind" diff --git a/vendor/github.com/moby/moby/daemon/cache.go b/vendor/github.com/moby/moby/daemon/cache.go new file mode 100644 index 0000000..5ea13fd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cache.go @@ -0,0 +1,254 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/runconfig" + "github.com/pkg/errors" +) + +// getLocalCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func (daemon *Daemon) getLocalCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) +} + +// MakeImageCache creates a stateful image cache. +func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache { + if len(sourceRefs) == 0 { + return &localImageCache{daemon} + } + + cache := &imageCache{daemon: daemon, localImageCache: &localImageCache{daemon}} + + for _, ref := range sourceRefs { + img, err := daemon.GetImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.sources = append(cache.sources, img) + } + + return cache +} + +// localImageCache is cache based on parent chain. +type localImageCache struct { + daemon *Daemon +} + +func (lic *localImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { + return getImageIDAndError(lic.daemon.getLocalCachedImage(image.ID(imgID), config)) +} + +// imageCache is cache based on history objects. Requires initial set of images. +type imageCache struct { + sources []*image.Image + daemon *Daemon + localImageCache *localImageCache +} + +func (ic *imageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { + var history []image.History + rootFS := image.NewRootFS() + lenHistory := 0 + if parent != nil { + history = parent.History + rootFS = parent.RootFS + lenHistory = len(parent.History) + } + history = append(history, target.History[lenHistory]) + if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" { + rootFS.Append(layer) + } + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: cfg, + Architecture: target.Architecture, + OS: target.OS, + Author: target.Author, + Created: history[len(history)-1].Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: target.OSFeatures, + OSVersion: target.OSVersion, + }) + if err != nil { + return "", errors.Wrap(err, "failed to marshal image config") + } + + imgID, err := ic.daemon.imageStore.Create(config) + if err != nil { + return "", errors.Wrap(err, "failed to create cache image") + } + + if parent != nil { + if err := ic.daemon.imageStore.SetParent(imgID, parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return imgID, nil +} + +func (ic *imageCache) isParent(imgID, parentID image.ID) bool { + nextParent, err := ic.daemon.imageStore.GetParent(imgID) + if err != nil { + return false + } + if nextParent == parentID { + return true + } + return ic.isParent(nextParent, parentID) +} + +func (ic *imageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { + imgID, err := ic.localImageCache.GetCache(parentID, cfg) + if err != nil { + return "", err + } + if imgID != "" { + for _, s := range ic.sources { + if ic.isParent(s.ID(), image.ID(imgID)) { + return imgID, nil + } + } + } + + var parent *image.Image + lenHistory := 0 + if parentID != "" { + parent, err = ic.daemon.imageStore.Get(image.ID(parentID)) + if err != nil { + return "", errors.Wrapf(err, "unable to find image %v", parentID) + } + lenHistory = len(parent.History) + } + + for _, target := range ic.sources { + if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { + continue + } + + if len(target.History)-1 == lenHistory { // last + if parent != nil { + if err := ic.daemon.imageStore.SetParent(target.ID(), parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return target.ID().String(), nil + } + + imgID, err := ic.restoreCachedImage(parent, target, cfg) + if err != nil { + return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) + } + + ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm + return imgID.String(), nil + } + + return "", nil +} + +func getImageIDAndError(img *image.Image, err error) (string, error) { + if img == nil || err != nil { + return "", err + } + return img.ID().String(), nil +} + +func isValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 { + return false + } + if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 { + return true + } + if len(parent.History) >= len(img.History) { + return false + } + if len(parent.RootFS.DiffIDs) > len(img.RootFS.DiffIDs) { + return false + } + + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + for i, d := range parent.RootFS.DiffIDs { + if d != img.RootFS.DiffIDs[i] { + return false + } + } + return true +} + +func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { + layerIndex := 0 + for i, h := range image.History { + if i == index { + if h.EmptyLayer { + return "" + } + break + } + if !h.EmptyLayer { + layerIndex++ + } + } + return image.RootFS.DiffIDs[layerIndex] // validate? +} + +func isValidConfig(cfg *containertypes.Config, h image.History) bool { + // todo: make this format better than join that loses data + return strings.Join(cfg.Cmd, " ") == h.CreatedBy +} diff --git a/vendor/github.com/moby/moby/daemon/caps/utils_unix.go b/vendor/github.com/moby/moby/daemon/caps/utils_unix.go new file mode 100644 index 0000000..c99485f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/vendor/github.com/moby/moby/daemon/changes.go b/vendor/github.com/moby/moby/daemon/changes.go new file mode 100644 index 0000000..fc8cd27 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/changes.go @@ -0,0 +1,31 @@ +package daemon + +import ( + "errors" + "runtime" + "time" + + "github.com/docker/docker/pkg/archive" +) + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" && container.IsRunning() { + return nil, errors.New("Windows does not support diff of a running container") + } + + container.Lock() + defer container.Unlock() + c, err := container.RWLayer.Changes() + if err != nil { + return nil, err + } + containerActions.WithValues("changes").UpdateSince(start) + return c, nil +} diff --git a/vendor/github.com/moby/moby/daemon/checkpoint.go b/vendor/github.com/moby/moby/daemon/checkpoint.go new file mode 100644 index 0000000..2718174 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/checkpoint.go @@ -0,0 +1,110 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/utils" +) + +var ( + validCheckpointNameChars = utils.RestrictedNameChars + validCheckpointNamePattern = utils.RestrictedNamePattern +) + +// CheckpointCreate checkpoints the process running in a container with CRIU +func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + if !validCheckpointNamePattern.MatchString(config.CheckpointID) { + return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) + } + + err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit) + if err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + daemon.LogContainerEvent(container, "checkpoint") + + return nil +} + +// CheckpointDelete deletes the specified checkpoint +func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) +} + +// CheckpointList lists all checkpoints of the specified container +func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { + var out []types.Checkpoint + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return nil, err + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt types.Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + + return out, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster.go b/vendor/github.com/moby/moby/daemon/cluster.go new file mode 100644 index 0000000..98b2aa1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster.go @@ -0,0 +1,12 @@ +package daemon + +import ( + apitypes "github.com/docker/docker/api/types" +) + +// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). +type Cluster interface { + GetNetwork(input string) (apitypes.NetworkResource, error) + GetNetworks() ([]apitypes.NetworkResource, error) + RemoveNetwork(input string) error +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/cluster.go b/vendor/github.com/moby/moby/daemon/cluster/cluster.go new file mode 100644 index 0000000..4a255ac --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/cluster.go @@ -0,0 +1,2006 @@ +package cluster + +import ( + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + swarmnode "github.com/docker/swarmkit/node" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const swarmDirName = "swarm" +const controlSocket = "control.sock" +const swarmConnectTimeout = 20 * time.Second +const swarmRequestTimeout = 20 * time.Second +const stateFile = "docker-state.json" +const defaultAddr = "0.0.0.0:2377" + +const ( + initialReconnectDelay = 100 * time.Millisecond + maxReconnectDelay = 30 * time.Second + contextPrefix = "com.docker.swarm" +) + +// ErrNoSwarm is returned on leaving a cluster that was never initialized +var ErrNoSwarm = fmt.Errorf("This node is not part of a swarm") + +// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated +var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") + +// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet. +var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.") + +// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. +var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") + +// ErrSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. +var ErrSwarmLocked = fmt.Errorf("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.") + +// ErrSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. +var ErrSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.") + +// NetworkSubnetsProvider exposes functions for retrieving the subnets +// of networks managed by Docker, so they can be filtered. +type NetworkSubnetsProvider interface { + V4Subnets() []net.IPNet + V6Subnets() []net.IPNet +} + +// Config provides values for Cluster. +type Config struct { + Root string + Name string + Backend executorpkg.Backend + NetworkSubnetsProvider NetworkSubnetsProvider + + // DefaultAdvertiseAddr is the default host/IP or network interface to use + // if no AdvertiseAddr value is specified. + DefaultAdvertiseAddr string + + // path to store runtime state, such as the swarm control socket + RuntimeRoot string +} + +// Cluster provides capabilities to participate in a cluster as a worker or a +// manager. +type Cluster struct { + sync.RWMutex + *node + root string + runtimeRoot string + config Config + configEvent chan struct{} // todo: make this array and goroutine safe + actualLocalAddr string // after resolution, not persisted + stop bool + err error + cancelDelay func() + attachers map[string]*attacher + locked bool + lastNodeConfig *nodeStartConfig +} + +// attacher manages the in-memory attachment state of a container +// attachment to a global scope network managed by swarm manager. It +// helps in identifying the attachment ID via the taskID and the +// corresponding attachment configuration obtained from the manager. +type attacher struct { + taskID string + config *network.NetworkingConfig + attachWaitCh chan *network.NetworkingConfig + attachCompleteCh chan struct{} + detachWaitCh chan struct{} +} + +type node struct { + *swarmnode.Node + done chan struct{} + ready bool + conn *grpc.ClientConn + client swarmapi.ControlClient + logs swarmapi.LogsClient + reconnectDelay time.Duration + config nodeStartConfig +} + +// nodeStartConfig holds configuration needed to start a new node. Exported +// fields of this structure are saved to disk in json. Unexported fields +// contain data that shouldn't be persisted between daemon reloads. +type nodeStartConfig struct { + // LocalAddr is this machine's local IP or hostname, if specified. + LocalAddr string + // RemoteAddr is the address that was given to "swarm join". It is used + // to find LocalAddr if necessary. + RemoteAddr string + // ListenAddr is the address we bind to, including a port. + ListenAddr string + // AdvertiseAddr is the address other nodes should connect to, + // including a port. + AdvertiseAddr string + joinAddr string + forceNewCluster bool + joinToken string + lockKey []byte + autolock bool +} + +// New creates a new Cluster instance using provided config. +func New(config Config) (*Cluster, error) { + root := filepath.Join(config.Root, swarmDirName) + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if config.RuntimeRoot == "" { + config.RuntimeRoot = root + } + if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { + return nil, err + } + c := &Cluster{ + root: root, + config: config, + configEvent: make(chan struct{}, 10), + runtimeRoot: config.RuntimeRoot, + attachers: make(map[string]*attacher), + } + + nodeConfig, err := c.loadState() + if err != nil { + if os.IsNotExist(err) { + return c, nil + } + return nil, err + } + + n, err := c.startNewNode(*nodeConfig) + if err != nil { + return nil, err + } + + select { + case <-time.After(swarmConnectTimeout): + logrus.Error("swarm component could not be started before timeout was reached") + case <-n.Ready(): + case <-n.done: + if errors.Cause(c.err) == ErrSwarmLocked { + return c, nil + } + if err, ok := errors.Cause(c.err).(x509.CertificateInvalidError); ok && err.Reason == x509.Expired { + c.err = ErrSwarmCertificatesExpired + return c, nil + } + return nil, fmt.Errorf("swarm component could not be started: %v", c.err) + } + go c.reconnectOnFailure(n) + return c, nil +} + +func (c *Cluster) loadState() (*nodeStartConfig, error) { + dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile)) + if err != nil { + return nil, err + } + // missing certificate means no actual state to restore from + if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil { + if os.IsNotExist(err) { + c.clearState() + } + return nil, err + } + var st nodeStartConfig + if err := json.Unmarshal(dt, &st); err != nil { + return nil, err + } + return &st, nil +} + +func (c *Cluster) saveState(config nodeStartConfig) error { + dt, err := json.Marshal(config) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600) +} + +func (c *Cluster) reconnectOnFailure(n *node) { + for { + <-n.done + c.Lock() + if c.stop || c.node != nil { + c.Unlock() + return + } + n.reconnectDelay *= 2 + if n.reconnectDelay > maxReconnectDelay { + n.reconnectDelay = maxReconnectDelay + } + logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) + c.cancelDelay = cancel + c.Unlock() + <-delayCtx.Done() + if delayCtx.Err() != context.DeadlineExceeded { + return + } + c.Lock() + if c.node != nil { + c.Unlock() + return + } + var err error + config := n.config + config.RemoteAddr = c.getRemoteAddress() + config.joinAddr = config.RemoteAddr + n, err = c.startNewNode(config) + if err != nil { + c.err = err + close(n.done) + } + c.Unlock() + } +} + +func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) { + if err := c.config.Backend.IsSwarmCompatible(); err != nil { + return nil, err + } + + actualLocalAddr := conf.LocalAddr + if actualLocalAddr == "" { + // If localAddr was not specified, resolve it automatically + // based on the route to joinAddr. localAddr can only be left + // empty on "join". + listenHost, _, err := net.SplitHostPort(conf.ListenAddr) + if err != nil { + return nil, fmt.Errorf("could not parse listen address: %v", err) + } + + listenAddrIP := net.ParseIP(listenHost) + if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { + actualLocalAddr = listenHost + } else { + if conf.RemoteAddr == "" { + // Should never happen except using swarms created by + // old versions that didn't save remoteAddr. + conf.RemoteAddr = "8.8.8.8:53" + } + conn, err := net.Dial("udp", conf.RemoteAddr) + if err != nil { + return nil, fmt.Errorf("could not find local IP address: %v", err) + } + localHostPort := conn.LocalAddr().String() + actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) + conn.Close() + } + } + + var control string + if runtime.GOOS == "windows" { + control = `\\.\pipe\` + controlSocket + } else { + control = filepath.Join(c.runtimeRoot, controlSocket) + } + + c.node = nil + c.cancelDelay = nil + c.stop = false + n, err := swarmnode.New(&swarmnode.Config{ + Hostname: c.config.Name, + ForceNewCluster: conf.forceNewCluster, + ListenControlAPI: control, + ListenRemoteAPI: conf.ListenAddr, + AdvertiseRemoteAPI: conf.AdvertiseAddr, + JoinAddr: conf.joinAddr, + StateDir: c.root, + JoinToken: conf.joinToken, + Executor: container.NewExecutor(c.config.Backend), + HeartbeatTick: 1, + ElectionTick: 3, + UnlockKey: conf.lockKey, + AutoLockManagers: conf.autolock, + PluginGetter: c.config.Backend.PluginGetter(), + }) + + if err != nil { + return nil, err + } + ctx := context.Background() + if err := n.Start(ctx); err != nil { + return nil, err + } + node := &node{ + Node: n, + done: make(chan struct{}), + reconnectDelay: initialReconnectDelay, + config: conf, + } + c.node = node + c.actualLocalAddr = actualLocalAddr // not saved + c.saveState(conf) + + c.config.Backend.DaemonJoinsCluster(c) + go func() { + err := detectLockedError(n.Err(ctx)) + if err != nil { + logrus.Errorf("cluster exited with error: %v", err) + } + c.Lock() + c.node = nil + c.err = err + if errors.Cause(err) == ErrSwarmLocked { + c.locked = true + confClone := conf + c.lastNodeConfig = &confClone + } + c.Unlock() + close(node.done) + }() + + go func() { + select { + case <-n.Ready(): + c.Lock() + node.ready = true + c.err = nil + c.Unlock() + case <-ctx.Done(): + } + c.configEvent <- struct{}{} + }() + + go func() { + for conn := range n.ListenControlSocket(ctx) { + c.Lock() + if node.conn != conn { + if conn == nil { + node.client = nil + node.logs = nil + } else { + node.client = swarmapi.NewControlClient(conn) + node.logs = swarmapi.NewLogsClient(conn) + } + } + node.conn = conn + c.Unlock() + c.configEvent <- struct{}{} + } + }() + + return node, nil +} + +// Init initializes new cluster from user provided request. +func (c *Cluster) Init(req types.InitRequest) (string, error) { + c.Lock() + if c.swarmExists() { + if !req.ForceNewCluster { + c.Unlock() + return "", ErrSwarmExists + } + if err := c.stopNode(); err != nil { + c.Unlock() + return "", err + } + } + + if err := validateAndSanitizeInitRequest(&req); err != nil { + c.Unlock() + return "", apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + c.Unlock() + return "", err + } + + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + if err != nil { + c.Unlock() + return "", err + } + + localAddr := listenHost + + // If the local address is undetermined, the advertise address + // will be used as local address, if it belongs to this system. + // If the advertise address is not local, then we try to find + // a system address to use as local address. If this fails, + // we give up and ask user to pass the listen address. + if net.ParseIP(localAddr).IsUnspecified() { + advertiseIP := net.ParseIP(advertiseHost) + + found := false + for _, systemIP := range listSystemIPs() { + if systemIP.Equal(advertiseIP) { + localAddr = advertiseIP.String() + found = true + break + } + } + + if !found { + ip, err := c.resolveSystemAddr() + if err != nil { + c.Unlock() + logrus.Warnf("Could not find a local address: %v", err) + return "", errMustSpecifyListenAddr + } + localAddr = ip.String() + } + } + + // todo: check current state existing + n, err := c.startNewNode(nodeStartConfig{ + forceNewCluster: req.ForceNewCluster, + autolock: req.AutoLockManagers, + LocalAddr: localAddr, + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + }) + if err != nil { + c.Unlock() + return "", err + } + c.Unlock() + + select { + case <-n.Ready(): + if err := initClusterSpec(n, req.Spec); err != nil { + return "", err + } + go c.reconnectOnFailure(n) + return n.NodeID(), nil + case <-n.done: + c.RLock() + defer c.RUnlock() + if !req.ForceNewCluster { // if failure on first attempt don't keep state + if err := c.clearState(); err != nil { + return "", err + } + } + return "", c.err + } +} + +// Join makes current Cluster part of an existing swarm cluster. +func (c *Cluster) Join(req types.JoinRequest) error { + c.Lock() + if c.swarmExists() { + c.Unlock() + return ErrSwarmExists + } + if err := validateAndSanitizeJoinRequest(&req); err != nil { + c.Unlock() + return apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + c.Unlock() + return err + } + + var advertiseAddr string + if req.AdvertiseAddr != "" { + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + // For joining, we don't need to provide an advertise address, + // since the remote side can detect it. + if err == nil { + advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) + } + } + + // todo: check current state existing + n, err := c.startNewNode(nodeStartConfig{ + RemoteAddr: req.RemoteAddrs[0], + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: advertiseAddr, + joinAddr: req.RemoteAddrs[0], + joinToken: req.JoinToken, + }) + if err != nil { + c.Unlock() + return err + } + c.Unlock() + + select { + case <-time.After(swarmConnectTimeout): + // attempt to connect will continue in background, but reconnect only if it didn't fail + go func() { + select { + case <-n.Ready(): + c.reconnectOnFailure(n) + case <-n.done: + logrus.Errorf("failed to join the cluster: %+v", c.err) + } + }() + return ErrSwarmJoinTimeoutReached + case <-n.Ready(): + go c.reconnectOnFailure(n) + return nil + case <-n.done: + c.RLock() + defer c.RUnlock() + return c.err + } +} + +// GetUnlockKey returns the unlock key for the swarm. +func (c *Cluster) GetUnlockKey() (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + client := swarmapi.NewCAClient(c.conn) + + r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) + if err != nil { + return "", err + } + + if len(r.UnlockKey) == 0 { + // no key + return "", nil + } + + return encryption.HumanReadableKey(r.UnlockKey), nil +} + +// UnlockSwarm provides a key to decrypt data that is encrypted at rest. +func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { + c.RLock() + if !c.isActiveManager() { + if err := c.errNoManager(); err != ErrSwarmLocked { + c.RUnlock() + return err + } + } + + if c.node != nil || c.locked != true { + c.RUnlock() + return errors.New("swarm is not locked") + } + c.RUnlock() + + key, err := encryption.ParseHumanReadableKey(req.UnlockKey) + if err != nil { + return err + } + + c.Lock() + config := *c.lastNodeConfig + config.lockKey = key + n, err := c.startNewNode(config) + if err != nil { + c.Unlock() + return err + } + c.Unlock() + select { + case <-n.Ready(): + case <-n.done: + if errors.Cause(c.err) == ErrSwarmLocked { + return errors.New("swarm could not be unlocked: invalid key provided") + } + return fmt.Errorf("swarm component could not be started: %v", c.err) + } + go c.reconnectOnFailure(n) + return nil +} + +// stopNode is a helper that stops the active c.node and waits until it has +// shut down. Call while keeping the cluster lock. +func (c *Cluster) stopNode() error { + if c.node == nil { + return nil + } + c.stop = true + if c.cancelDelay != nil { + c.cancelDelay() + c.cancelDelay = nil + } + node := c.node + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + // TODO: can't hold lock on stop because it calls back to network + c.Unlock() + defer c.Lock() + if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { + return err + } + <-node.done + return nil +} + +func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { + return reachable-2 <= unreachable +} + +func isLastManager(reachable, unreachable int) bool { + return reachable == 1 && unreachable == 0 +} + +// Leave shuts down Cluster and removes current state. +func (c *Cluster) Leave(force bool) error { + c.Lock() + node := c.node + if node == nil { + if c.locked { + c.locked = false + c.lastNodeConfig = nil + c.Unlock() + } else if c.err == ErrSwarmCertificatesExpired { + c.err = nil + c.Unlock() + } else { + c.Unlock() + return ErrNoSwarm + } + } else { + if node.Manager() != nil && !force { + msg := "You are attempting to leave the swarm on a node that is participating as a manager. " + if c.isActiveManager() { + active, reachable, unreachable, err := c.managerStats() + if err == nil { + if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { + if isLastManager(reachable, unreachable) { + msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " + c.Unlock() + return fmt.Errorf(msg) + } + msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) + } + } + } else { + msg += "Doing so may lose the consensus of your cluster. " + } + + msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." + c.Unlock() + return fmt.Errorf(msg) + } + if err := c.stopNode(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + c.Unlock() + return err + } + c.Unlock() + if nodeID := node.NodeID(); nodeID != "" { + nodeContainers, err := c.listContainerForNode(nodeID) + if err != nil { + return err + } + for _, id := range nodeContainers { + if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("error removing %v: %v", id, err) + } + } + } + } + c.configEvent <- struct{}{} + // todo: cleanup optional? + if err := c.clearState(); err != nil { + return err + } + + return nil +} + +func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { + var ids []string + filters := filters.NewArgs() + filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) + containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + Filters: filters, + }) + if err != nil { + return []string{}, err + } + for _, c := range containers { + ids = append(ids, c.ID) + } + return ids, nil +} + +func (c *Cluster) clearState() error { + // todo: backup this data instead of removing? + if err := os.RemoveAll(c.root); err != nil { + return err + } + if err := os.MkdirAll(c.root, 0700); err != nil { + return err + } + c.config.Backend.DaemonLeavesCluster() + return nil +} + +func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost + return context.WithTimeout(context.Background(), swarmRequestTimeout) +} + +// Inspect retrieves the configuration properties of a managed swarm cluster. +func (c *Cluster) Inspect() (types.Swarm, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Swarm{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + swarm, err := getSwarm(ctx, c.client) + if err != nil { + return types.Swarm{}, err + } + + return convert.SwarmFromGRPC(*swarm), nil +} + +// Update updates configuration of a managed swarm cluster. +func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + swarm, err := getSwarm(ctx, c.client) + if err != nil { + return err + } + + // In update, client should provide the complete spec of the swarm, including + // Name and Labels. If a field is specified with 0 or nil, then the default value + // will be used to swarmkit. + clusterSpec, err := convert.SwarmSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + _, err = c.client.UpdateCluster( + ctx, + &swarmapi.UpdateClusterRequest{ + ClusterID: swarm.ID, + Spec: &clusterSpec, + ClusterVersion: &swarmapi.Version{ + Index: version, + }, + Rotation: swarmapi.KeyRotation{ + WorkerJoinToken: flags.RotateWorkerToken, + ManagerJoinToken: flags.RotateManagerToken, + ManagerUnlockKey: flags.RotateManagerUnlockKey, + }, + }, + ) + return err +} + +// IsManager returns true if Cluster is participating as a manager. +func (c *Cluster) IsManager() bool { + c.RLock() + defer c.RUnlock() + return c.isActiveManager() +} + +// IsAgent returns true if Cluster is participating as a worker/agent. +func (c *Cluster) IsAgent() bool { + c.RLock() + defer c.RUnlock() + return c.node != nil && c.ready +} + +// GetLocalAddress returns the local address. +func (c *Cluster) GetLocalAddress() string { + c.RLock() + defer c.RUnlock() + return c.actualLocalAddr +} + +// GetListenAddress returns the listen address. +func (c *Cluster) GetListenAddress() string { + c.RLock() + defer c.RUnlock() + if c.node != nil { + return c.node.config.ListenAddr + } + return "" +} + +// GetAdvertiseAddress returns the remotely reachable address of this node. +func (c *Cluster) GetAdvertiseAddress() string { + c.RLock() + defer c.RUnlock() + if c.node != nil && c.node.config.AdvertiseAddr != "" { + advertiseHost, _, _ := net.SplitHostPort(c.node.config.AdvertiseAddr) + return advertiseHost + } + return c.actualLocalAddr +} + +// GetRemoteAddress returns a known advertise address of a remote manager if +// available. +// todo: change to array/connect with info +func (c *Cluster) GetRemoteAddress() string { + c.RLock() + defer c.RUnlock() + return c.getRemoteAddress() +} + +func (c *Cluster) getRemoteAddress() string { + if c.node == nil { + return "" + } + nodeID := c.node.NodeID() + for _, r := range c.node.Remotes() { + if r.NodeID != nodeID { + return r.Addr + } + } + return "" +} + +// ListenClusterEvents returns a channel that receives messages on cluster +// participation changes. +// todo: make cancelable and accessible to multiple callers +func (c *Cluster) ListenClusterEvents() <-chan struct{} { + return c.configEvent +} + +// Info returns information about the current cluster state. +func (c *Cluster) Info() types.Info { + info := types.Info{ + NodeAddr: c.GetAdvertiseAddress(), + } + + c.RLock() + defer c.RUnlock() + + if c.node == nil { + info.LocalNodeState = types.LocalNodeStateInactive + if c.cancelDelay != nil { + info.LocalNodeState = types.LocalNodeStateError + } + if c.locked { + info.LocalNodeState = types.LocalNodeStateLocked + } else if c.err == ErrSwarmCertificatesExpired { + info.LocalNodeState = types.LocalNodeStateError + } + } else { + info.LocalNodeState = types.LocalNodeStatePending + if c.ready == true { + info.LocalNodeState = types.LocalNodeStateActive + } else if c.locked { + info.LocalNodeState = types.LocalNodeStateLocked + } + } + if c.err != nil { + info.Error = c.err.Error() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + if c.isActiveManager() { + info.ControlAvailable = true + swarm, err := c.Inspect() + if err != nil { + info.Error = err.Error() + } + + // Strip JoinTokens + info.Cluster = swarm.ClusterInfo + + if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil { + info.Nodes = len(r.Nodes) + for _, n := range r.Nodes { + if n.ManagerStatus != nil { + info.Managers = info.Managers + 1 + } + } + } + } + + if c.node != nil { + for _, r := range c.node.Remotes() { + info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) + } + info.NodeID = c.node.NodeID() + } + + return info +} + +// isActiveManager should not be called without a read lock +func (c *Cluster) isActiveManager() bool { + return c.node != nil && c.conn != nil +} + +// swarmExists should not be called without a read lock +func (c *Cluster) swarmExists() bool { + return c.node != nil || c.locked || c.err == ErrSwarmCertificatesExpired +} + +// errNoManager returns error describing why manager commands can't be used. +// Call with read lock. +func (c *Cluster) errNoManager() error { + if c.node == nil { + if c.locked { + return ErrSwarmLocked + } + if c.err == ErrSwarmCertificatesExpired { + return ErrSwarmCertificatesExpired + } + return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + if c.node.Manager() != nil { + return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") + } + return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") +} + +// GetServices returns all services of a managed swarm cluster. +func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListServicesFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListServices( + ctx, + &swarmapi.ListServicesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + services := []types.Service{} + + for _, service := range r.Services { + services = append(services, convert.ServiceFromGRPC(*service)) + } + + return services, nil +} + +// imageWithDigestString takes an image such as name or name:tag +// and returns the image pinned to a digest, such as name@sha256:34234... +// Due to the difference between the docker/docker/reference, and the +// docker/distribution/reference packages, we're parsing the image twice. +// As the two packages converge, this function should be simplified. +// TODO(nishanttotla): After the packages converge, the function must +// convert distreference.Named -> distreference.Canonical, and the logic simplified. +func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { + if _, err := digest.ParseDigest(image); err == nil { + return "", errors.New("image reference is an image ID") + } + ref, err := distreference.ParseNamed(image) + if err != nil { + return "", err + } + // only query registry if not a canonical reference (i.e. with digest) + if _, ok := ref.(distreference.Canonical); !ok { + // create a docker/docker/reference Named object because GetRepository needs it + dockerRef, err := reference.ParseNamed(image) + if err != nil { + return "", err + } + dockerRef = reference.WithDefaultTag(dockerRef) + namedTaggedRef, ok := dockerRef.(reference.NamedTagged) + if !ok { + return "", fmt.Errorf("unable to cast image to NamedTagged reference object") + } + + repo, _, err := c.config.Backend.GetRepository(ctx, namedTaggedRef, authConfig) + if err != nil { + return "", err + } + dscrptr, err := repo.Tags(ctx).Get(ctx, namedTaggedRef.Tag()) + if err != nil { + return "", err + } + + namedDigestedRef, err := distreference.WithDigest(distreference.EnsureTagged(ref), dscrptr.Digest) + if err != nil { + return "", err + } + return namedDigestedRef.String(), nil + } + // reference already contains a digest, so just return it + return ref.String(), nil +} + +// CreateService creates a new service in a managed swarm cluster. +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + err := c.populateNetworkID(ctx, c.client, &s) + if err != nil { + return nil, err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(s) + if err != nil { + return nil, apierrors.NewBadRequestError(err) + } + + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp := &apitypes.ServiceCreateResponse{} + + // pin image by digest + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())) + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to create a service + // if the registry is slow or unresponsive. + var newCancel func() + ctx, newCancel = c.getRequestContext() + defer newCancel() + } + + r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return nil, err + } + + resp.ID = r.Service.ID + return resp, nil +} + +// GetService returns a service based on an ID or name. +func (c *Cluster) GetService(input string) (types.Service, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Service{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + service, err := getService(ctx, c.client, input) + if err != nil { + return types.Service{}, err + } + return convert.ServiceFromGRPC(*service), nil +} + +// UpdateService updates existing service to match new properties. +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) (*apitypes.ServiceUpdateResponse, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + err := c.populateNetworkID(ctx, c.client, &spec) + if err != nil { + return nil, err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(spec) + if err != nil { + return nil, apierrors.NewBadRequestError(err) + } + + currentService, err := getService(ctx, c.client, serviceIDOrName) + if err != nil { + return nil, err + } + + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch registryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return nil, fmt.Errorf("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return nil, fmt.Errorf("unsupported registryAuthFromValue") + } + if ctnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp := &apitypes.ServiceUpdateResponse{} + + // pin image by digest + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to create a service + // if the registry is slow or unresponsive. + var newCancel func() + ctx, newCancel = c.getRequestContext() + defer newCancel() + } + + _, err = c.client.UpdateService( + ctx, + &swarmapi.UpdateServiceRequest{ + ServiceID: currentService.ID, + Spec: &serviceSpec, + ServiceVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + + return resp, err +} + +// RemoveService removes a service from a managed swarm cluster. +func (c *Cluster) RemoveService(input string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + service, err := getService(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil { + return err + } + return nil +} + +// ServiceLogs collects service logs and writes them back to `config.OutStream` +func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error { + c.RLock() + if !c.isActiveManager() { + c.RUnlock() + return c.errNoManager() + } + + service, err := getService(ctx, c.client, input) + if err != nil { + c.RUnlock() + return err + } + + stream, err := c.logs.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ + Selector: &swarmapi.LogSelector{ + ServiceIDs: []string{service.ID}, + }, + Options: &swarmapi.LogSubscriptionOptions{ + Follow: config.Follow, + }, + }) + if err != nil { + c.RUnlock() + return err + } + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) + errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) + + // Release the lock before starting the stream. + c.RUnlock() + for { + // Check the context before doing anything. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + subscribeMsg, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + for _, msg := range subscribeMsg.Messages { + data := []byte{} + + if config.Timestamps { + ts, err := ptypes.Timestamp(msg.Timestamp) + if err != nil { + return err + } + data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...) + } + + data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ", + contextPrefix, msg.Context.NodeID, + contextPrefix, msg.Context.ServiceID, + contextPrefix, msg.Context.TaskID, + ))...) + + data = append(data, msg.Data...) + + switch msg.Stream { + case swarmapi.LogStreamStdout: + outStream.Write(data) + case swarmapi.LogStreamStderr: + errStream.Write(data) + } + } + } +} + +// GetNodes returns a list of all nodes known to a cluster. +func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListNodesFilters(options.Filters) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListNodes( + ctx, + &swarmapi.ListNodesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + nodes := []types.Node{} + + for _, node := range r.Nodes { + nodes = append(nodes, convert.NodeFromGRPC(*node)) + } + return nodes, nil +} + +// GetNode returns a node based on an ID. +func (c *Cluster) GetNode(input string) (types.Node, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Node{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + node, err := getNode(ctx, c.client, input) + if err != nil { + return types.Node{}, err + } + return convert.NodeFromGRPC(*node), nil +} + +// UpdateNode updates existing nodes properties. +func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + nodeSpec, err := convert.NodeSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + currentNode, err := getNode(ctx, c.client, input) + if err != nil { + return err + } + + _, err = c.client.UpdateNode( + ctx, + &swarmapi.UpdateNodeRequest{ + NodeID: currentNode.ID, + Spec: &nodeSpec, + NodeVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + return err +} + +// RemoveNode removes a node from a cluster +func (c *Cluster) RemoveNode(input string, force bool) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + node, err := getNode(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}); err != nil { + return err + } + return nil +} + +// GetTasks returns a list of tasks matching the filter options. +func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + byName := func(filter filters.Args) error { + if filter.Include("service") { + serviceFilters := filter.Get("service") + for _, serviceFilter := range serviceFilters { + service, err := c.GetService(serviceFilter) + if err != nil { + return err + } + filter.Del("service", serviceFilter) + filter.Add("service", service.ID) + } + } + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + node, err := c.GetNode(nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", node.ID) + } + } + return nil + } + + filters, err := newListTasksFilters(options.Filters, byName) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListTasks( + ctx, + &swarmapi.ListTasksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + tasks := []types.Task{} + + for _, task := range r.Tasks { + if task.Spec.GetContainer() != nil { + tasks = append(tasks, convert.TaskFromGRPC(*task)) + } + } + return tasks, nil +} + +// GetTask returns a task by an ID. +func (c *Cluster) GetTask(input string) (types.Task, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Task{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + task, err := getTask(ctx, c.client, input) + if err != nil { + return types.Task{}, err + } + return convert.TaskFromGRPC(*task), nil +} + +// GetNetwork returns a cluster network by an ID. +func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return apitypes.NetworkResource{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + network, err := getNetwork(ctx, c.client, input) + if err != nil { + return apitypes.NetworkResource{}, err + } + return convert.BasicNetworkFromGRPC(*network), nil +} + +func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + var networks []apitypes.NetworkResource + + for _, network := range r.Networks { + networks = append(networks, convert.BasicNetworkFromGRPC(*network)) + } + + return networks, nil +} + +// GetNetworks returns all current cluster managed networks. +func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { + return c.getNetworks(nil) +} + +// GetNetworksByName returns cluster managed networks by name. +// It is ok to have multiple networks here. #18864 +func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { + // Note that swarmapi.GetNetworkRequest.Name is not functional. + // So we cannot just use that with c.GetNetwork. + return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ + Names: []string{name}, + }) +} + +func attacherKey(target, containerID string) string { + return containerID + ":" + target +} + +// UpdateAttachment signals the attachment config to the attachment +// waiter who is trying to start or attach the container to the +// network. +func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { + c.RLock() + attacher, ok := c.attachers[attacherKey(target, containerID)] + c.RUnlock() + if !ok || attacher == nil { + return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) + } + + attacher.attachWaitCh <- config + close(attacher.attachWaitCh) + return nil +} + +// WaitForDetachment waits for the container to stop or detach from +// the network. +func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + c.RLock() + attacher, ok := c.attachers[attacherKey(networkName, containerID)] + if !ok { + attacher, ok = c.attachers[attacherKey(networkID, containerID)] + } + if c.node == nil || c.node.Agent() == nil { + c.RUnlock() + return fmt.Errorf("invalid cluster node while waiting for detachment") + } + + agent := c.node.Agent() + c.RUnlock() + + if ok && attacher != nil && + attacher.detachWaitCh != nil && + attacher.attachCompleteCh != nil { + // Attachment may be in progress still so wait for + // attachment to complete. + select { + case <-attacher.attachCompleteCh: + case <-ctx.Done(): + return ctx.Err() + } + + if attacher.taskID == taskID { + select { + case <-attacher.detachWaitCh: + case <-ctx.Done(): + return ctx.Err() + } + } + } + + return agent.ResourceAllocator().DetachNetwork(ctx, taskID) +} + +// AttachNetwork generates an attachment request towards the manager. +func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { + aKey := attacherKey(target, containerID) + c.Lock() + if c.node == nil || c.node.Agent() == nil { + c.Unlock() + return nil, fmt.Errorf("invalid cluster node while attaching to network") + } + if attacher, ok := c.attachers[aKey]; ok { + c.Unlock() + return attacher.config, nil + } + + agent := c.node.Agent() + attachWaitCh := make(chan *network.NetworkingConfig) + detachWaitCh := make(chan struct{}) + attachCompleteCh := make(chan struct{}) + c.attachers[aKey] = &attacher{ + attachWaitCh: attachWaitCh, + attachCompleteCh: attachCompleteCh, + detachWaitCh: detachWaitCh, + } + c.Unlock() + + ctx, cancel := c.getRequestContext() + defer cancel() + + taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) + if err != nil { + c.Lock() + delete(c.attachers, aKey) + c.Unlock() + return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) + } + + c.Lock() + c.attachers[aKey].taskID = taskID + close(attachCompleteCh) + c.Unlock() + + logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID) + + release := func() { + ctx, cancel := c.getRequestContext() + defer cancel() + if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil { + logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", + taskID, target, err) + } + } + + var config *network.NetworkingConfig + select { + case config = <-attachWaitCh: + case <-ctx.Done(): + release() + return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) + } + + c.Lock() + c.attachers[aKey].config = config + c.Unlock() + + logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) + + return config, nil +} + +// DetachNetwork unblocks the waiters waiting on WaitForDetachment so +// that a request to detach can be generated towards the manager. +func (c *Cluster) DetachNetwork(target string, containerID string) error { + aKey := attacherKey(target, containerID) + + c.Lock() + attacher, ok := c.attachers[aKey] + delete(c.attachers, aKey) + c.Unlock() + + if !ok { + return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) + } + + close(attacher.detachWaitCh) + return nil +} + +// CreateNetwork creates a new cluster managed network. +func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + if runconfig.IsPreDefinedNetwork(s.Name) { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) + return "", apierrors.NewRequestForbiddenError(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + networkSpec := convert.BasicNetworkCreateToGRPC(s) + r, err := c.client.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) + if err != nil { + return "", err + } + + return r.Network.ID, nil +} + +// RemoveNetwork removes a cluster network. +func (c *Cluster) RemoveNetwork(input string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + network, err := getNetwork(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil { + return err + } + return nil +} + +func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { + // Always prefer NetworkAttachmentConfigs from TaskTemplate + // but fallback to service spec for backward compatibility + networks := s.TaskTemplate.Networks + if len(networks) == 0 { + networks = s.Networks + } + + for i, n := range networks { + apiNetwork, err := getNetwork(ctx, client, n.Target) + if err != nil { + if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() { + err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) + return apierrors.NewRequestForbiddenError(err) + } + return err + } + networks[i].Target = apiNetwork.ID + } + return nil +} + +func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { + // GetNetwork to match via full ID. + rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}) + if err != nil { + // If any error (including NotFound), ListNetworks to match via ID prefix and full name. + rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}}) + if err != nil || len(rl.Networks) == 0 { + rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) + } + + return rl.Networks[0], nil + } + return rg.Network, nil +} + +// Cleanup stops active swarm node. This is run before daemon shutdown. +func (c *Cluster) Cleanup() { + c.Lock() + node := c.node + if node == nil { + c.Unlock() + return + } + defer c.Unlock() + if c.isActiveManager() { + active, reachable, unreachable, err := c.managerStats() + if err == nil { + singlenode := active && isLastManager(reachable, unreachable) + if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { + logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) + } + } + } + c.stopNode() +} + +func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) + if err != nil { + return false, 0, 0, err + } + for _, n := range nodes.Nodes { + if n.ManagerStatus != nil { + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { + reachable++ + if n.ID == c.node.NodeID() { + current = true + } + } + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { + unreachable++ + } + } + } + return +} + +func validateAndSanitizeInitRequest(req *types.InitRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + + if req.Spec.Annotations.Name == "" { + req.Spec.Annotations.Name = "default" + } else if req.Spec.Annotations.Name != "default" { + return errors.New(`swarm spec must be named "default"`) + } + + return nil +} + +func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + if len(req.RemoteAddrs) == 0 { + return fmt.Errorf("at least 1 RemoteAddr is required to join") + } + for i := range req.RemoteAddrs { + req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) + if err != nil { + return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) + } + } + return nil +} + +func validateAddr(addr string) (string, error) { + if addr == "" { + return addr, fmt.Errorf("invalid empty address") + } + newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) + if err != nil { + return addr, nil + } + return strings.TrimPrefix(newaddr, "tcp://"), nil +} + +func initClusterSpec(node *node, spec types.Spec) error { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + for conn := range node.ListenControlSocket(ctx) { + if ctx.Err() != nil { + return ctx.Err() + } + if conn != nil { + client := swarmapi.NewControlClient(conn) + var cluster *swarmapi.Cluster + for i := 0; ; i++ { + lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return fmt.Errorf("error on listing clusters: %v", err) + } + if len(lcr.Clusters) == 0 { + if i < 10 { + time.Sleep(200 * time.Millisecond) + continue + } + return fmt.Errorf("empty list of clusters was returned") + } + cluster = lcr.Clusters[0] + break + } + // In init, we take the initial default values from swarmkit, and merge + // any non nil or 0 value from spec to GRPC spec. This will leave the + // default value alone. + // Note that this is different from Update(), as in Update() we expect + // user to specify the complete spec of the cluster (as they already know + // the existing one and knows which field to update) + clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: &clusterSpec, + }) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + return nil + } + } + return ctx.Err() +} + +func detectLockedError(err error) error { + if err == swarmnode.ErrInvalidUnlockKey { + return errors.WithStack(ErrSwarmLocked) + } + return err +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/container.go b/vendor/github.com/moby/moby/daemon/cluster/convert/container.go new file mode 100644 index 0000000..10383f7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/container.go @@ -0,0 +1,235 @@ +package convert + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + container "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { + containerSpec := types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &types.DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + // Mounts + for _, m := range c.Mounts { + mount := mounttypes.Mount{ + Target: m.Target, + Source: m.Source, + Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), + ReadOnly: m.ReadOnly, + } + + if m.BindOptions != nil { + mount.BindOptions = &mounttypes.BindOptions{ + Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &mounttypes.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.StopGracePeriod != nil { + grace, _ := ptypes.Duration(c.StopGracePeriod) + containerSpec.StopGracePeriod = &grace + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) + } + + return containerSpec +} + +func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { + refs := make([]*swarmapi.SecretReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.SecretReference{ + SecretID: s.SecretID, + SecretName: s.SecretName, + } + if s.File != nil { + ref.Target = &swarmapi.SecretReference_File{ + File: &swarmapi.SecretReference_FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} +func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { + refs := make([]*types.SecretReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + continue + } + refs = append(refs, &types.SecretReference{ + File: &types.SecretReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + SecretID: s.SecretID, + SecretName: s.SecretName, + }) + } + + return refs +} + +func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { + containerSpec := &swarmapi.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + if c.StopGracePeriod != nil { + containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod) + } + + // Mounts + for _, m := range c.Mounts { + mount := swarmapi.Mount{ + Target: m.Target, + Source: m.Source, + ReadOnly: m.ReadOnly, + } + + if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { + mount.Type = swarmapi.Mount_MountType(mountType) + } else if string(m.Type) != "" { + return nil, fmt.Errorf("invalid MountType: %q", m.Type) + } + + if m.BindOptions != nil { + if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { + mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} + } else if string(m.BindOptions.Propagation) != "" { + return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) + } + + return containerSpec, nil +} + +func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { + interval, _ := ptypes.Duration(h.Interval) + timeout, _ := ptypes.Duration(h.Timeout) + return &container.HealthConfig{ + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + } +} + +func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { + return &swarmapi.HealthConfig{ + Test: h.Test, + Interval: ptypes.DurationProto(h.Interval), + Timeout: ptypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/network.go b/vendor/github.com/moby/moby/daemon/cluster/convert/network.go new file mode 100644 index 0000000..4d21b4d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/network.go @@ -0,0 +1,210 @@ +package convert + +import ( + "strings" + + basictypes "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { + if na != nil { + return types.NetworkAttachment{ + Network: networkFromGRPC(na.Network), + Addresses: na.Addresses, + } + } + return types.NetworkAttachment{} +} + +func networkFromGRPC(n *swarmapi.Network) types.Network { + if n != nil { + network := types.Network{ + ID: n.ID, + Spec: types.NetworkSpec{ + IPv6Enabled: n.Spec.Ipv6Enabled, + Internal: n.Spec.Internal, + Attachable: n.Spec.Attachable, + IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + }, + IPAMOptions: ipamFromGRPC(n.IPAM), + } + + // Meta + network.Version.Index = n.Meta.Version.Index + network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) + network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + + //Annotations + network.Spec.Name = n.Spec.Annotations.Name + network.Spec.Labels = n.Spec.Annotations.Labels + + //DriverConfiguration + if n.Spec.DriverConfig != nil { + network.Spec.DriverConfiguration = &types.Driver{ + Name: n.Spec.DriverConfig.Name, + Options: n.Spec.DriverConfig.Options, + } + } + + //DriverState + if n.DriverState != nil { + network.DriverState = types.Driver{ + Name: n.DriverState.Name, + Options: n.DriverState.Options, + } + } + + return network + } + return types.Network{} +} + +func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { + var ipam *types.IPAMOptions + if i != nil { + ipam = &types.IPAMOptions{} + if i.Driver != nil { + ipam.Driver.Name = i.Driver.Name + ipam.Driver.Options = i.Driver.Options + } + + for _, config := range i.Configs { + ipam.Configs = append(ipam.Configs, types.IPAMConfig{ + Subnet: config.Subnet, + Range: config.Range, + Gateway: config.Gateway, + }) + } + } + return ipam +} + +func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { + var endpointSpec *types.EndpointSpec + if es != nil { + endpointSpec = &types.EndpointSpec{} + endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) + + for _, portState := range es.Ports { + endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{ + Name: portState.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), + TargetPort: portState.TargetPort, + PublishedPort: portState.PublishedPort, + }) + } + } + return endpointSpec +} + +func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { + endpoint := types.Endpoint{} + if e != nil { + if espec := endpointSpecFromGRPC(e.Spec); espec != nil { + endpoint.Spec = *espec + } + + for _, portState := range e.Ports { + endpoint.Ports = append(endpoint.Ports, types.PortConfig{ + Name: portState.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), + TargetPort: portState.TargetPort, + PublishedPort: portState.PublishedPort, + }) + } + + for _, v := range e.VirtualIPs { + endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ + NetworkID: v.NetworkID, + Addr: v.Addr}) + } + + } + + return endpoint +} + +// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. +func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { + spec := n.Spec + var ipam networktypes.IPAM + if spec.IPAM != nil { + if spec.IPAM.Driver != nil { + ipam.Driver = spec.IPAM.Driver.Name + ipam.Options = spec.IPAM.Driver.Options + } + ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) + for _, ic := range spec.IPAM.Configs { + ipamConfig := networktypes.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + AuxAddress: ic.Reserved, + } + ipam.Config = append(ipam.Config, ipamConfig) + } + } + + nr := basictypes.NetworkResource{ + ID: n.ID, + Name: n.Spec.Annotations.Name, + Scope: "swarm", + EnableIPv6: spec.Ipv6Enabled, + IPAM: ipam, + Internal: spec.Internal, + Attachable: spec.Attachable, + Labels: n.Spec.Annotations.Labels, + } + + if n.DriverState != nil { + nr.Driver = n.DriverState.Name + nr.Options = n.DriverState.Options + } + + return nr +} + +// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. +func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { + ns := swarmapi.NetworkSpec{ + Annotations: swarmapi.Annotations{ + Name: create.Name, + Labels: create.Labels, + }, + DriverConfig: &swarmapi.Driver{ + Name: create.Driver, + Options: create.Options, + }, + Ipv6Enabled: create.EnableIPv6, + Internal: create.Internal, + Attachable: create.Attachable, + } + if create.IPAM != nil { + driver := create.IPAM.Driver + if driver == "" { + driver = "default" + } + ns.IPAM = &swarmapi.IPAMOptions{ + Driver: &swarmapi.Driver{ + Name: driver, + Options: create.IPAM.Options, + }, + } + ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) + for _, ipamConfig := range create.IPAM.Config { + ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ + Subnet: ipamConfig.Subnet, + Range: ipamConfig.IPRange, + Gateway: ipamConfig.Gateway, + }) + } + ns.IPAM.Configs = ipamSpec + } + return ns +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/node.go b/vendor/github.com/moby/moby/daemon/cluster/convert/node.go new file mode 100644 index 0000000..306f34e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/node.go @@ -0,0 +1,89 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// NodeFromGRPC converts a grpc Node to a Node. +func NodeFromGRPC(n swarmapi.Node) types.Node { + node := types.Node{ + ID: n.ID, + Spec: types.NodeSpec{ + Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), + Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), + }, + Status: types.NodeStatus{ + State: types.NodeState(strings.ToLower(n.Status.State.String())), + Message: n.Status.Message, + Addr: n.Status.Addr, + }, + } + + // Meta + node.Version.Index = n.Meta.Version.Index + node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) + node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + + //Annotations + node.Spec.Name = n.Spec.Annotations.Name + node.Spec.Labels = n.Spec.Annotations.Labels + + //Description + if n.Description != nil { + node.Description.Hostname = n.Description.Hostname + if n.Description.Platform != nil { + node.Description.Platform.Architecture = n.Description.Platform.Architecture + node.Description.Platform.OS = n.Description.Platform.OS + } + if n.Description.Resources != nil { + node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs + node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + } + if n.Description.Engine != nil { + node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion + node.Description.Engine.Labels = n.Description.Engine.Labels + for _, plugin := range n.Description.Engine.Plugins { + node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) + } + } + } + + //Manager + if n.ManagerStatus != nil { + node.ManagerStatus = &types.ManagerStatus{ + Leader: n.ManagerStatus.Leader, + Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), + Addr: n.ManagerStatus.Addr, + } + } + + return node +} + +// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. +func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { + spec := swarmapi.NodeSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + } + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { + spec.Role = swarmapi.NodeRole(role) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) + } + + if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { + spec.Availability = swarmapi.NodeSpec_Availability(availability) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) + } + + return spec, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/secret.go b/vendor/github.com/moby/moby/daemon/cluster/convert/secret.go new file mode 100644 index 0000000..3e96687 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/secret.go @@ -0,0 +1,64 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// SecretFromGRPC converts a grpc Secret to a Secret. +func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { + secret := swarmtypes.Secret{ + ID: s.ID, + Spec: swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: s.Spec.Annotations.Name, + Labels: s.Spec.Annotations.Labels, + }, + Data: s.Spec.Data, + }, + } + + secret.Version.Index = s.Meta.Version.Index + // Meta + secret.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) + secret.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + + return secret +} + +// SecretSpecToGRPC converts Secret to a grpc Secret. +func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { + return swarmapi.SecretSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference +func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { + refs := []*swarmtypes.SecretReference{} + + for _, r := range s { + ref := &swarmtypes.SecretReference{ + SecretID: r.SecretID, + SecretName: r.SecretName, + } + + if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { + ref.File = &swarmtypes.SecretReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/service.go b/vendor/github.com/moby/moby/daemon/cluster/convert/service.go new file mode 100644 index 0000000..aa68e01 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/service.go @@ -0,0 +1,366 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/namesgenerator" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// ServiceFromGRPC converts a grpc Service to a Service. +func ServiceFromGRPC(s swarmapi.Service) types.Service { + service := types.Service{ + ID: s.ID, + Spec: *serviceSpecFromGRPC(&s.Spec), + PreviousSpec: serviceSpecFromGRPC(s.PreviousSpec), + + Endpoint: endpointFromGRPC(s.Endpoint), + } + + // Meta + service.Version.Index = s.Meta.Version.Index + service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) + service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + + // UpdateStatus + service.UpdateStatus = types.UpdateStatus{} + if s.UpdateStatus != nil { + switch s.UpdateStatus.State { + case swarmapi.UpdateStatus_UPDATING: + service.UpdateStatus.State = types.UpdateStateUpdating + case swarmapi.UpdateStatus_PAUSED: + service.UpdateStatus.State = types.UpdateStatePaused + case swarmapi.UpdateStatus_COMPLETED: + service.UpdateStatus.State = types.UpdateStateCompleted + } + + service.UpdateStatus.StartedAt, _ = ptypes.Timestamp(s.UpdateStatus.StartedAt) + service.UpdateStatus.CompletedAt, _ = ptypes.Timestamp(s.UpdateStatus.CompletedAt) + service.UpdateStatus.Message = s.UpdateStatus.Message + } + + return service +} + +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec { + if spec == nil { + return nil + } + + serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) + for _, n := range spec.Networks { + serviceNetworks = append(serviceNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Task.Networks)) + for _, n := range spec.Task.Networks { + taskNetworks = append(taskNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container + convertedSpec := &types.ServiceSpec{ + Annotations: types.Annotations{ + Name: spec.Annotations.Name, + Labels: spec.Annotations.Labels, + }, + + TaskTemplate: types.TaskSpec{ + ContainerSpec: containerSpecFromGRPC(containerConfig), + Resources: resourcesFromGRPC(spec.Task.Resources), + RestartPolicy: restartPolicyFromGRPC(spec.Task.Restart), + Placement: placementFromGRPC(spec.Task.Placement), + LogDriver: driverFromGRPC(spec.Task.LogDriver), + Networks: taskNetworks, + ForceUpdate: spec.Task.ForceUpdate, + }, + + Networks: serviceNetworks, + EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), + } + + // UpdateConfig + if spec.Update != nil { + convertedSpec.UpdateConfig = &types.UpdateConfig{ + Parallelism: spec.Update.Parallelism, + MaxFailureRatio: spec.Update.MaxFailureRatio, + } + + convertedSpec.UpdateConfig.Delay, _ = ptypes.Duration(&spec.Update.Delay) + if spec.Update.Monitor != nil { + convertedSpec.UpdateConfig.Monitor, _ = ptypes.Duration(spec.Update.Monitor) + } + + switch spec.Update.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionContinue + } + } + + // Mode + switch t := spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + convertedSpec.Mode.Global = &types.GlobalService{} + case *swarmapi.ServiceSpec_Replicated: + convertedSpec.Mode.Replicated = &types.ReplicatedService{ + Replicas: &t.Replicated.Replicas, + } + } + + return convertedSpec +} + +// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. +func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { + name := s.Name + if name == "" { + name = namesgenerator.GetRandomName(0) + } + + serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) + for _, n := range s.Networks { + serviceNetworks = append(serviceNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) + for _, n := range s.TaskTemplate.Networks { + taskNetworks = append(taskNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + spec := swarmapi.ServiceSpec{ + Annotations: swarmapi.Annotations{ + Name: name, + Labels: s.Labels, + }, + Task: swarmapi.TaskSpec{ + Resources: resourcesToGRPC(s.TaskTemplate.Resources), + LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), + Networks: taskNetworks, + ForceUpdate: s.TaskTemplate.ForceUpdate, + }, + Networks: serviceNetworks, + } + + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + + restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Restart = restartPolicy + + if s.TaskTemplate.Placement != nil { + spec.Task.Placement = &swarmapi.Placement{ + Constraints: s.TaskTemplate.Placement.Constraints, + } + } + + if s.UpdateConfig != nil { + var failureAction swarmapi.UpdateConfig_FailureAction + switch s.UpdateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + failureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + failureAction = swarmapi.UpdateConfig_CONTINUE + default: + return swarmapi.ServiceSpec{}, fmt.Errorf("unrecongized update failure action %s", s.UpdateConfig.FailureAction) + } + spec.Update = &swarmapi.UpdateConfig{ + Parallelism: s.UpdateConfig.Parallelism, + Delay: *ptypes.DurationProto(s.UpdateConfig.Delay), + FailureAction: failureAction, + MaxFailureRatio: s.UpdateConfig.MaxFailureRatio, + } + if s.UpdateConfig.Monitor != 0 { + spec.Update.Monitor = ptypes.DurationProto(s.UpdateConfig.Monitor) + } + } + + if s.EndpointSpec != nil { + if s.EndpointSpec.Mode != "" && + s.EndpointSpec.Mode != types.ResolutionModeVIP && + s.EndpointSpec.Mode != types.ResolutionModeDNSRR { + return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) + } + + spec.Endpoint = &swarmapi.EndpointSpec{} + + spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) + + for _, portConfig := range s.EndpointSpec.Ports { + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ + Name: portConfig.Name, + Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), + PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + } + + // Mode + if s.Mode.Global != nil && s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") + } + + if s.Mode.Global != nil { + spec.Mode = &swarmapi.ServiceSpec_Global{ + Global: &swarmapi.GlobalService{}, + } + } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, + } + } else { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: 1}, + } + } + + return spec, nil +} + +func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { + var resources *types.ResourceRequirements + if res != nil { + resources = &types.ResourceRequirements{} + if res.Limits != nil { + resources.Limits = &types.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + resources.Reservations = &types.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + } + } + + return resources +} + +func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { + var reqs *swarmapi.ResourceRequirements + if res != nil { + reqs = &swarmapi.ResourceRequirements{} + if res.Limits != nil { + reqs.Limits = &swarmapi.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + reqs.Reservations = &swarmapi.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + + } + } + return reqs +} + +func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { + var rp *types.RestartPolicy + if p != nil { + rp = &types.RestartPolicy{} + + switch p.Condition { + case swarmapi.RestartOnNone: + rp.Condition = types.RestartPolicyConditionNone + case swarmapi.RestartOnFailure: + rp.Condition = types.RestartPolicyConditionOnFailure + case swarmapi.RestartOnAny: + rp.Condition = types.RestartPolicyConditionAny + default: + rp.Condition = types.RestartPolicyConditionAny + } + + if p.Delay != nil { + delay, _ := ptypes.Duration(p.Delay) + rp.Delay = &delay + } + if p.Window != nil { + window, _ := ptypes.Duration(p.Window) + rp.Window = &window + } + + rp.MaxAttempts = &p.MaxAttempts + } + return rp +} + +func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { + var rp *swarmapi.RestartPolicy + if p != nil { + rp = &swarmapi.RestartPolicy{} + + switch p.Condition { + case types.RestartPolicyConditionNone: + rp.Condition = swarmapi.RestartOnNone + case types.RestartPolicyConditionOnFailure: + rp.Condition = swarmapi.RestartOnFailure + case types.RestartPolicyConditionAny: + rp.Condition = swarmapi.RestartOnAny + default: + if string(p.Condition) != "" { + return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) + } + rp.Condition = swarmapi.RestartOnAny + } + + if p.Delay != nil { + rp.Delay = ptypes.DurationProto(*p.Delay) + } + if p.Window != nil { + rp.Window = ptypes.DurationProto(*p.Window) + } + if p.MaxAttempts != nil { + rp.MaxAttempts = *p.MaxAttempts + + } + } + return rp, nil +} + +func placementFromGRPC(p *swarmapi.Placement) *types.Placement { + var r *types.Placement + if p != nil { + r = &types.Placement{} + r.Constraints = p.Constraints + } + + return r +} + +func driverFromGRPC(p *swarmapi.Driver) *types.Driver { + if p == nil { + return nil + } + + return &types.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func driverToGRPC(p *types.Driver) *swarmapi.Driver { + if p == nil { + return nil + } + + return &swarmapi.Driver{ + Name: p.Name, + Options: p.Options, + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go b/vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go new file mode 100644 index 0000000..606e00a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go @@ -0,0 +1,122 @@ +package convert + +import ( + "fmt" + "strings" + "time" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// SwarmFromGRPC converts a grpc Cluster to a Swarm. +func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { + swarm := types.Swarm{ + ClusterInfo: types.ClusterInfo{ + ID: c.ID, + Spec: types.Spec{ + Orchestration: types.OrchestrationConfig{ + TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, + }, + Raft: types.RaftConfig{ + SnapshotInterval: c.Spec.Raft.SnapshotInterval, + KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, + LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, + HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), + ElectionTick: int(c.Spec.Raft.ElectionTick), + }, + EncryptionConfig: types.EncryptionConfig{ + AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, + }, + }, + }, + JoinTokens: types.JoinTokens{ + Worker: c.RootCA.JoinTokens.Worker, + Manager: c.RootCA.JoinTokens.Manager, + }, + } + + heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) + swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod + + swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) + + for _, ca := range c.Spec.CAConfig.ExternalCAs { + swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ + Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), + URL: ca.URL, + Options: ca.Options, + }) + } + + // Meta + swarm.Version.Index = c.Meta.Version.Index + swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) + + // Annotations + swarm.Spec.Name = c.Spec.Annotations.Name + swarm.Spec.Labels = c.Spec.Annotations.Labels + + return swarm +} + +// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. +func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { + return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) +} + +// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec +func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { + // We take the initSpec (either created from scratch, or returned by swarmkit), + // and will only change the value if the one taken from types.Spec is not nil or 0. + // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. + if s.Annotations.Name != "" { + spec.Annotations.Name = s.Annotations.Name + } + if len(s.Annotations.Labels) != 0 { + spec.Annotations.Labels = s.Annotations.Labels + } + + if s.Orchestration.TaskHistoryRetentionLimit != nil { + spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit + } + if s.Raft.SnapshotInterval != 0 { + spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval + } + if s.Raft.KeepOldSnapshots != nil { + spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots + } + if s.Raft.LogEntriesForSlowFollowers != 0 { + spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers + } + if s.Raft.HeartbeatTick != 0 { + spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) + } + if s.Raft.ElectionTick != 0 { + spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) + } + if s.Dispatcher.HeartbeatPeriod != 0 { + spec.Dispatcher.HeartbeatPeriod = ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)) + } + if s.CAConfig.NodeCertExpiry != 0 { + spec.CAConfig.NodeCertExpiry = ptypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + + for _, ca := range s.CAConfig.ExternalCAs { + protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] + if !ok { + return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) + } + spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ + Protocol: swarmapi.ExternalCA_CAProtocol(protocol), + URL: ca.URL, + Options: ca.Options, + }) + } + + spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers + + return spec, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/task.go b/vendor/github.com/moby/moby/daemon/cluster/convert/task.go new file mode 100644 index 0000000..d0cf89c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/task.go @@ -0,0 +1,81 @@ +package convert + +import ( + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// TaskFromGRPC converts a grpc Task to a Task. +func TaskFromGRPC(t swarmapi.Task) types.Task { + if t.Spec.GetAttachment() != nil { + return types.Task{} + } + containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container + containerStatus := t.Status.GetContainer() + networks := make([]types.NetworkAttachmentConfig, 0, len(t.Spec.Networks)) + for _, n := range t.Spec.Networks { + networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + task := types.Task{ + ID: t.ID, + Annotations: types.Annotations{ + Name: t.Annotations.Name, + Labels: t.Annotations.Labels, + }, + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: types.TaskSpec{ + ContainerSpec: containerSpecFromGRPC(containerConfig), + Resources: resourcesFromGRPC(t.Spec.Resources), + RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), + Placement: placementFromGRPC(t.Spec.Placement), + LogDriver: driverFromGRPC(t.Spec.LogDriver), + Networks: networks, + }, + Status: types.TaskStatus{ + State: types.TaskState(strings.ToLower(t.Status.State.String())), + Message: t.Status.Message, + Err: t.Status.Err, + }, + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + } + + // Meta + task.Version.Index = t.Meta.Version.Index + task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) + task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) + + task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) + + if containerStatus != nil { + task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID + task.Status.ContainerStatus.PID = int(containerStatus.PID) + task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) + } + + // NetworksAttachments + for _, na := range t.Networks { + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) + } + + if t.Status.PortStatus == nil { + return task + } + + for _, p := range t.Status.PortStatus.Ports { + task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ + Name: p.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), + TargetPort: p.TargetPort, + PublishedPort: p.PublishedPort, + }) + } + + return task +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/backend.go b/vendor/github.com/moby/moby/daemon/cluster/executor/backend.go new file mode 100644 index 0000000..0f1da38 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/backend.go @@ -0,0 +1,61 @@ +package executor + +import ( + "io" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/plugin" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +// Backend defines the executor component for a swarm agent. +type Backend interface { + CreateManagedNetwork(clustertypes.NetworkCreateRequest) error + DeleteManagedNetwork(name string) error + FindNetwork(idName string) (libnetwork.Network, error) + SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + ActivateContainerServiceBinding(containerName string) error + DeactivateContainerServiceBinding(containerName string) error + UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error + ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) + ContainerWaitWithContext(ctx context.Context, name string) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerKill(name string, sig uint64) error + SetContainerSecretStore(name string, store exec.SecretGetter) error + SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SystemInfo() (*types.Info, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + Containers(config *types.ContainerListOptions) ([]*types.Container, error) + SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error + DaemonJoinsCluster(provider cluster.Provider) + DaemonLeavesCluster() + IsSwarmCompatible() error + SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(listener chan interface{}) + UpdateAttachment(string, string, string, *network.NetworkingConfig) error + WaitForDetachment(context.Context, string, string, string, string) error + GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error) + LookupImage(name string) (*types.ImageInspect, error) + PluginManager() *plugin.Manager + PluginGetter() *plugin.Store +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go new file mode 100644 index 0000000..f82f8b5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go @@ -0,0 +1,463 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// containerAdapter conducts remote operations for a container. All calls +// are mostly naked calls to the client API, seeded with information from +// containerConfig. +type containerAdapter struct { + backend executorpkg.Backend + container *containerConfig + secrets exec.SecretGetter +} + +func newContainerAdapter(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task) + if err != nil { + return nil, err + } + + return &containerAdapter{ + container: ctnr, + backend: b, + secrets: secrets, + }, nil +} + +func (c *containerAdapter) pullImage(ctx context.Context) error { + spec := c.container.spec() + + // Skip pulling if the image is referenced by image ID. + if _, err := digest.ParseDigest(spec.Image); err == nil { + return nil + } + + // Skip pulling if the image is referenced by digest and already + // exists locally. + named, err := reference.ParseNamed(spec.Image) + if err == nil { + if _, ok := named.(reference.Canonical); ok { + _, err := c.backend.LookupImage(spec.Image) + if err == nil { + return nil + } + } + } + + // if the image needs to be pulled, the auth config will be retrieved and updated + var encodedAuthConfig string + if spec.PullOptions != nil { + encodedAuthConfig = spec.PullOptions.RegistryAuth + } + + authConfig := &types.AuthConfig{} + if encodedAuthConfig != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + pr, pw := io.Pipe() + metaHeaders := map[string][]string{} + go func() { + err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) + pw.CloseWithError(err) + }() + + dec := json.NewDecoder(pr) + dec.UseNumber() + m := map[string]interface{}{} + spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) + + lastStatus := "" + for { + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + l := log.G(ctx) + // limit pull progress logs unless the status changes + if spamLimiter.Allow() || lastStatus != m["status"] { + // if we have progress details, we have everything we need + if progress, ok := m["progressDetail"].(map[string]interface{}); ok { + // first, log the image and status + l = l.WithFields(logrus.Fields{ + "image": c.container.image(), + "status": m["status"], + }) + // then, if we have progress, log the progress + if progress["current"] != nil && progress["total"] != nil { + l = l.WithFields(logrus.Fields{ + "current": progress["current"], + "total": progress["total"], + }) + } + } + l.Debug("pull in progress") + } + // sometimes, we get no useful information at all, and add no fields + if status, ok := m["status"].(string); ok { + lastStatus = status + } + } + + // if the final stream object contained an error, return it + if errMsg, ok := m["error"]; ok { + return fmt.Errorf("%v", errMsg) + } + return nil +} + +func (c *containerAdapter) createNetworks(ctx context.Context) error { + for _, network := range c.container.networks() { + ncr, err := c.container.networkCreateRequest(network) + if err != nil { + return err + } + + if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing + if _, ok := err.(libnetwork.NetworkNameError); ok { + continue + } + + return err + } + } + + return nil +} + +func (c *containerAdapter) removeNetworks(ctx context.Context) error { + for _, nid := range c.container.networks() { + if err := c.backend.DeleteManagedNetwork(nid); err != nil { + switch err.(type) { + case *libnetwork.ActiveEndpointsError: + continue + case libnetwork.ErrNoSuchNetwork: + continue + default: + log.G(ctx).Errorf("network %s remove failed: %v", nid, err) + return err + } + } + } + + return nil +} + +func (c *containerAdapter) networkAttach(ctx context.Context) error { + config := c.container.createNetworkingConfig() + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) +} + +func (c *containerAdapter) waitForDetach(ctx context.Context) error { + config := c.container.createNetworkingConfig() + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) +} + +func (c *containerAdapter) create(ctx context.Context) error { + var cr containertypes.ContainerCreateCreatedBody + var err error + + if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + Name: c.container.name(), + Config: c.container.config(), + HostConfig: c.container.hostConfig(), + // Use the first network in container create + NetworkingConfig: c.container.createNetworkingConfig(), + }); err != nil { + return err + } + + // Docker daemon currently doesn't support multiple networks in container create + // Connect to all other networks + nc := c.container.connectNetworkingConfig() + + if nc != nil { + for n, ep := range nc.EndpointsConfig { + if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { + return err + } + } + } + + container := c.container.task.Spec.GetContainer() + if container == nil { + return fmt.Errorf("unable to get container from task spec") + } + + // configure secrets + if err := c.backend.SetContainerSecretStore(cr.ID, c.secrets); err != nil { + return err + } + + refs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, refs); err != nil { + return err + } + + if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { + return err + } + + return nil +} + +// checkMounts ensures that the provided mounts won't have any host-specific +// problems at start up. For example, we disallow bind mounts without an +// existing path, which slightly different from the container API. +func (c *containerAdapter) checkMounts() error { + spec := c.container.spec() + for _, mount := range spec.Mounts { + switch mount.Type { + case api.MountTypeBind: + if _, err := os.Stat(mount.Source); os.IsNotExist(err) { + return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) + } + } + } + + return nil +} + +func (c *containerAdapter) start(ctx context.Context) error { + if err := c.checkMounts(); err != nil { + return err + } + + return c.backend.ContainerStart(c.container.name(), nil, "", "") +} + +func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { + cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) + if ctx.Err() != nil { + return types.ContainerJSON{}, ctx.Err() + } + if err != nil { + return types.ContainerJSON{}, err + } + return *cs, nil +} + +// events issues a call to the events API and returns a channel with all +// events. The stream of events can be shutdown by cancelling the context. +func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { + log.G(ctx).Debugf("waiting on events") + buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) + eventsq := make(chan events.Message, len(buffer)) + + for _, event := range buffer { + eventsq <- event + } + + go func() { + defer c.backend.UnsubscribeFromEvents(l) + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + log.G(ctx).Warnf("unexpected event message: %q", ev) + continue + } + select { + case eventsq <- jev: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + + return eventsq +} + +func (c *containerAdapter) wait(ctx context.Context) error { + return c.backend.ContainerWaitWithContext(ctx, c.container.nameOrID()) +} + +func (c *containerAdapter) shutdown(ctx context.Context) error { + // Default stop grace period to nil (daemon will use the stopTimeout of the container) + var stopgrace *int + spec := c.container.spec() + if spec.StopGracePeriod != nil { + stopgraceValue := int(spec.StopGracePeriod.Seconds) + stopgrace = &stopgraceValue + } + return c.backend.ContainerStop(c.container.name(), stopgrace) +} + +func (c *containerAdapter) terminate(ctx context.Context) error { + return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) +} + +func (c *containerAdapter) remove(ctx context.Context) error { + return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ + RemoveVolume: true, + ForceRemove: true, + }) +} + +func (c *containerAdapter) createVolumes(ctx context.Context) error { + // Create plugin volumes that are embedded inside a Mount + for _, mount := range c.container.task.Spec.GetContainer().Mounts { + if mount.Type != api.MountTypeVolume { + continue + } + + if mount.VolumeOptions == nil { + continue + } + + if mount.VolumeOptions.DriverConfig == nil { + continue + } + + req := c.container.volumeCreateRequest(&mount) + + // Check if this volume exists on the engine + if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { + // TODO(amitshukla): Today, volume create through the engine api does not return an error + // when the named volume with the same parameters already exists. + // It returns an error if the driver name is different - that is a valid error + return err + } + + } + + return nil +} + +func (c *containerAdapter) activateServiceBinding() error { + return c.backend.ActivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) deactivateServiceBinding() error { + return c.backend.DeactivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + apiOptions := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: options.Follow, + + // TODO(stevvooe): Parse timestamp out of message. This + // absolutely needs to be done before going to production with + // this, at it is completely redundant. + Timestamps: true, + Details: false, // no clue what to do with this, let's just deprecate it. + }, + OutStream: writer, + } + + if options.Since != nil { + since, err := ptypes.Timestamp(options.Since) + if err != nil { + return nil, err + } + apiOptions.Since = since.Format(time.RFC3339Nano) + } + + if options.Tail < 0 { + // See protobuf documentation for details of how this works. + apiOptions.Tail = fmt.Sprint(-options.Tail - 1) + } else if options.Tail > 0 { + return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") + } + + if len(options.Streams) == 0 { + // empty == all + apiOptions.ShowStdout, apiOptions.ShowStderr = true, true + } else { + for _, stream := range options.Streams { + switch stream { + case api.LogStreamStdout: + apiOptions.ShowStdout = true + case api.LogStreamStderr: + apiOptions.ShowStderr = true + } + } + } + + chStarted := make(chan struct{}) + go func() { + defer writer.Close() + c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted) + }() + + return reader, nil +} + +// todo: typed/wrapped errors +func isContainerCreateNameConflict(err error) bool { + return strings.Contains(err.Error(), "Conflict. The name") +} + +func isUnknownContainer(err error) bool { + return strings.Contains(err.Error(), "No such container:") +} + +func isStoppedContainer(err error) bool { + return strings.Contains(err.Error(), "is already stopped") +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go new file mode 100644 index 0000000..e0ee81a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go @@ -0,0 +1,81 @@ +package container + +import ( + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// networkAttacherController implements agent.Controller against docker's API. +// +// networkAttacherController manages the lifecycle of network +// attachment of a docker unmanaged container managed as a task from +// agent point of view. It provides network attachment information to +// the unmanaged container for it to attach to the network and run. +type networkAttacherController struct { + backend executorpkg.Backend + task *api.Task + adapter *containerAdapter + closed chan struct{} +} + +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, secrets) + if err != nil { + return nil, err + } + + return &networkAttacherController{ + backend: b, + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error { + return nil +} + +func (nc *networkAttacherController) Prepare(ctx context.Context) error { + // Make sure all the networks that the task needs are created. + if err := nc.adapter.createNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Start(ctx context.Context) error { + return nc.adapter.networkAttach(ctx) +} + +func (nc *networkAttacherController) Wait(pctx context.Context) error { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + return nc.adapter.waitForDetach(ctx) +} + +func (nc *networkAttacherController) Shutdown(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Terminate(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Remove(ctx context.Context) error { + // Try removing the network referenced in this task in case this + // task is the last one referencing it + if err := nc.adapter.removeNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Close() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go new file mode 100644 index 0000000..f033ad5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go @@ -0,0 +1,598 @@ +package container + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/api/types" + enginecontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + volumetypes "github.com/docker/docker/api/types/volume" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/reference" + "github.com/docker/go-connections/nat" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/template" +) + +const ( + // Explicitly use the kernel's default setting for CPU quota of 100ms. + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + cpuQuotaPeriod = 100 * time.Millisecond + + // systemLabelPrefix represents the reserved namespace for system labels. + systemLabelPrefix = "com.docker.swarm" +) + +// containerConfig converts task properties into docker container compatible +// components. +type containerConfig struct { + task *api.Task + networksAttachments map[string]*api.NetworkAttachment +} + +// newContainerConfig returns a validated container config. No methods should +// return an error if this function returns without error. +func newContainerConfig(t *api.Task) (*containerConfig, error) { + var c containerConfig + return &c, c.setTask(t) +} + +func (c *containerConfig) setTask(t *api.Task) error { + if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { + return exec.ErrRuntimeUnsupported + } + + container := t.Spec.GetContainer() + if container != nil { + if container.Image == "" { + return ErrImageRequired + } + + if err := validateMounts(container.Mounts); err != nil { + return err + } + } + + // index the networks by name + c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) + for _, attachment := range t.Networks { + c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment + } + + c.task = t + + if t.Spec.GetContainer() != nil { + preparedSpec, err := template.ExpandContainerSpec(t) + if err != nil { + return err + } + c.task.Spec.Runtime = &api.TaskSpec_Container{ + Container: preparedSpec, + } + } + + return nil +} + +func (c *containerConfig) id() string { + attachment := c.task.Spec.GetAttachment() + if attachment == nil { + return "" + } + + return attachment.ContainerID +} + +func (c *containerConfig) taskID() string { + return c.task.ID +} + +func (c *containerConfig) endpoint() *api.Endpoint { + return c.task.Endpoint +} + +func (c *containerConfig) spec() *api.ContainerSpec { + return c.task.Spec.GetContainer() +} + +func (c *containerConfig) nameOrID() string { + if c.task.Spec.GetContainer() != nil { + return c.name() + } + + return c.id() +} + +func (c *containerConfig) name() string { + if c.task.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return c.task.Annotations.Name + } + + slot := fmt.Sprint(c.task.Slot) + if slot == "" || c.task.Slot == 0 { + slot = c.task.NodeID + } + + // fallback to service.slot.id. + return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID) +} + +func (c *containerConfig) image() string { + raw := c.spec().Image + ref, err := reference.ParseNamed(raw) + if err != nil { + return raw + } + return reference.WithDefaultTag(ref).String() +} + +func (c *containerConfig) portBindings() nat.PortMap { + portBindings := nat.PortMap{} + if c.task.Endpoint == nil { + return portBindings + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + binding := []nat.PortBinding{ + {}, + } + + if portConfig.PublishedPort != 0 { + binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) + } + portBindings[port] = binding + } + + return portBindings +} + +func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { + exposedPorts := make(map[nat.Port]struct{}) + if c.task.Endpoint == nil { + return exposedPorts + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + exposedPorts[port] = struct{}{} + } + + return exposedPorts +} + +func (c *containerConfig) config() *enginecontainer.Config { + config := &enginecontainer.Config{ + Labels: c.labels(), + Tty: c.spec().TTY, + OpenStdin: c.spec().OpenStdin, + User: c.spec().User, + Env: c.spec().Env, + Hostname: c.spec().Hostname, + WorkingDir: c.spec().Dir, + Image: c.image(), + ExposedPorts: c.exposedPorts(), + Healthcheck: c.healthcheck(), + } + + if len(c.spec().Command) > 0 { + // If Command is provided, we replace the whole invocation with Command + // by replacing Entrypoint and specifying Cmd. Args is ignored in this + // case. + config.Entrypoint = append(config.Entrypoint, c.spec().Command...) + config.Cmd = append(config.Cmd, c.spec().Args...) + } else if len(c.spec().Args) > 0 { + // In this case, we assume the image has an Entrypoint and Args + // specifies the arguments for that entrypoint. + config.Cmd = c.spec().Args + } + + return config +} + +func (c *containerConfig) labels() map[string]string { + var ( + system = map[string]string{ + "task": "", // mark as cluster task + "task.id": c.task.ID, + "task.name": c.name(), + "node.id": c.task.NodeID, + "service.id": c.task.ServiceID, + "service.name": c.task.ServiceAnnotations.Name, + } + labels = make(map[string]string) + ) + + // base labels are those defined in the spec. + for k, v := range c.spec().Labels { + labels[k] = v + } + + // we then apply the overrides from the task, which may be set via the + // orchestrator. + for k, v := range c.task.Annotations.Labels { + labels[k] = v + } + + // finally, we apply the system labels, which override all labels. + for k, v := range system { + labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v + } + + return labels +} + +func (c *containerConfig) mounts() []enginemount.Mount { + var r []enginemount.Mount + for _, mount := range c.spec().Mounts { + r = append(r, convertMount(mount)) + } + return r +} + +func convertMount(m api.Mount) enginemount.Mount { + mount := enginemount.Mount{ + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + } + + switch m.Type { + case api.MountTypeBind: + mount.Type = enginemount.TypeBind + case api.MountTypeVolume: + mount.Type = enginemount.TypeVolume + case api.MountTypeTmpfs: + mount.Type = enginemount.TypeTmpfs + } + + if m.BindOptions != nil { + mount.BindOptions = &enginemount.BindOptions{} + switch m.BindOptions.Propagation { + case api.MountPropagationRPrivate: + mount.BindOptions.Propagation = enginemount.PropagationRPrivate + case api.MountPropagationPrivate: + mount.BindOptions.Propagation = enginemount.PropagationPrivate + case api.MountPropagationRSlave: + mount.BindOptions.Propagation = enginemount.PropagationRSlave + case api.MountPropagationSlave: + mount.BindOptions.Propagation = enginemount.PropagationSlave + case api.MountPropagationRShared: + mount.BindOptions.Propagation = enginemount.PropagationRShared + case api.MountPropagationShared: + mount.BindOptions.Propagation = enginemount.PropagationShared + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &enginemount.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + } + if m.VolumeOptions.Labels != nil { + mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) + for k, v := range m.VolumeOptions.Labels { + mount.VolumeOptions.Labels[k] = v + } + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &enginemount.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + } + if m.VolumeOptions.DriverConfig.Options != nil { + mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) + for k, v := range m.VolumeOptions.DriverConfig.Options { + mount.VolumeOptions.DriverConfig.Options[k] = v + } + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &enginemount.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + return mount +} + +func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { + hcSpec := c.spec().Healthcheck + if hcSpec == nil { + return nil + } + interval, _ := ptypes.Duration(hcSpec.Interval) + timeout, _ := ptypes.Duration(hcSpec.Timeout) + return &enginecontainer.HealthConfig{ + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + } +} + +func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { + hc := &enginecontainer.HostConfig{ + Resources: c.resources(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Mounts: c.mounts(), + } + + if c.spec().DNSConfig != nil { + hc.DNS = c.spec().DNSConfig.Nameservers + hc.DNSSearch = c.spec().DNSConfig.Search + hc.DNSOptions = c.spec().DNSConfig.Options + } + + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // However, the format of ExtraHosts in HostConfig is + // : + // We need to do the conversion here + // (Alias is ignored for now) + for _, entry := range c.spec().Hosts { + parts := strings.Fields(entry) + if len(parts) > 1 { + hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) + } + } + + if c.task.LogDriver != nil { + hc.LogConfig = enginecontainer.LogConfig{ + Type: c.task.LogDriver.Name, + Config: c.task.LogDriver.Options, + } + } + + return hc +} + +// This handles the case of volumes that are defined inside a service Mount +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody { + var ( + driverName string + driverOpts map[string]string + labels map[string]string + ) + + if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { + driverName = mount.VolumeOptions.DriverConfig.Name + driverOpts = mount.VolumeOptions.DriverConfig.Options + labels = mount.VolumeOptions.Labels + } + + if mount.VolumeOptions != nil { + return &volumetypes.VolumesCreateBody{ + Name: mount.Source, + Driver: driverName, + DriverOpts: driverOpts, + Labels: labels, + } + } + return nil +} + +func (c *containerConfig) resources() enginecontainer.Resources { + resources := enginecontainer.Resources{} + + // If no limits are specified let the engine use its defaults. + // + // TODO(aluzzardi): We might want to set some limits anyway otherwise + // "unlimited" tasks will step over the reservation of other tasks. + r := c.task.Spec.Resources + if r == nil || r.Limits == nil { + return resources + } + + if r.Limits.MemoryBytes > 0 { + resources.Memory = r.Limits.MemoryBytes + } + + if r.Limits.NanoCPUs > 0 { + // CPU Period must be set in microseconds. + resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) + resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 + } + + return resources +} + +// Docker daemon supports just 1 network during container create. +func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { + networks = c.task.Networks + } + + epConfig := make(map[string]*network.EndpointSettings) + if len(networks) > 0 { + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0]) + } + + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create +func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil { + networks = c.task.Networks + } + + // First network is used during container create. Other networks are used in "docker network connect" + if len(networks) < 2 { + return nil + } + + epConfig := make(map[string]*network.EndpointSettings) + for _, na := range networks[1:] { + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na) + } + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { + var ipv4, ipv6 string + for _, addr := range na.Addresses { + ip, _, err := net.ParseCIDR(addr) + if err != nil { + continue + } + + if ip.To4() != nil { + ipv4 = ip.String() + continue + } + + if ip.To16() != nil { + ipv6 = ip.String() + } + } + + return &network.EndpointSettings{ + NetworkID: na.Network.ID, + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: ipv4, + IPv6Address: ipv6, + }, + } +} + +func (c *containerConfig) virtualIP(networkID string) string { + if c.task.Endpoint == nil { + return "" + } + + for _, eVip := range c.task.Endpoint.VirtualIPs { + // We only support IPv4 VIPs for now. + if eVip.NetworkID == networkID { + vip, _, err := net.ParseCIDR(eVip.Addr) + if err != nil { + return "" + } + + return vip.String() + } + } + + return "" +} + +func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { + if len(c.task.Networks) == 0 { + return nil + } + + logrus.Debugf("Creating service config in agent for t = %+v", c.task) + svcCfg := &clustertypes.ServiceConfig{ + Name: c.task.ServiceAnnotations.Name, + Aliases: make(map[string][]string), + ID: c.task.ServiceID, + VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), + } + + for _, na := range c.task.Networks { + svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ + // We support only IPv4 virtual IP for now. + IPv4: c.virtualIP(na.Network.ID), + } + if len(na.Aliases) > 0 { + svcCfg.Aliases[na.Network.ID] = na.Aliases + } + } + + if c.task.Endpoint != nil { + for _, ePort := range c.task.Endpoint.Ports { + if ePort.PublishMode != api.PublishModeIngress { + continue + } + + svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ + Name: ePort.Name, + Protocol: int32(ePort.Protocol), + TargetPort: ePort.TargetPort, + PublishedPort: ePort.PublishedPort, + }) + } + } + + return svcCfg +} + +// networks returns a list of network names attached to the container. The +// returned name can be used to lookup the corresponding network create +// options. +func (c *containerConfig) networks() []string { + var networks []string + + for name := range c.networksAttachments { + networks = append(networks, name) + } + + return networks +} + +func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { + na, ok := c.networksAttachments[name] + if !ok { + return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") + } + + options := types.NetworkCreate{ + // ID: na.Network.ID, + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + }, + Options: na.Network.DriverState.Options, + Labels: na.Network.Spec.Annotations.Labels, + Internal: na.Network.Spec.Internal, + Attachable: na.Network.Spec.Attachable, + EnableIPv6: na.Network.Spec.Ipv6Enabled, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil +} + +func (c containerConfig) eventFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("type", events.ContainerEventType) + filter.Add("name", c.name()) + filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) + return filter +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go new file mode 100644 index 0000000..e97df18 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go @@ -0,0 +1,668 @@ +package container + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// controller implements agent.Controller against docker's API. +// +// Most operations against docker's API are done through the container name, +// which is unique to the task. +type controller struct { + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error + + pulled chan struct{} // closed after pull + cancelPull func() // cancels pull context if not nil + pullErr error // pull error, only read after pulled closed +} + +var _ exec.Controller = &controller{} + +// NewController returns a docker exec runner for the provided task. +func newController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, secrets) + if err != nil { + return nil, err + } + + return &controller{ + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (r *controller) Task() (*api.Task, error) { + return r.task, nil +} + +// ContainerStatus returns the container-specific status for the task. +func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + return nil, err + } + return parseContainerStatus(ctnr) +} + +func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + + return parsePortStatus(ctnr) +} + +// Update tasks a recent task update and applies it to the container. +func (r *controller) Update(ctx context.Context, t *api.Task) error { + // TODO(stevvooe): While assignment of tasks is idempotent, we do allow + // updates of metadata, such as labelling, as well as any other properties + // that make sense. + return nil +} + +// Prepare creates a container and ensures the image is pulled. +// +// If the container has already be created, exec.ErrTaskPrepared is returned. +func (r *controller) Prepare(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // Make sure all the networks that the task needs are created. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + // Make sure all the volumes that the task needs are created. + if err := r.adapter.createVolumes(ctx); err != nil { + return err + } + + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + if r.pulled == nil { + // Fork the pull to a different context to allow pull to continue + // on re-entrant calls to Prepare. This ensures that Prepare can be + // idempotent and not incur the extra cost of pulling when + // cancelled on updates. + var pctx context.Context + + r.pulled = make(chan struct{}) + pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. + + go func() { + defer close(r.pulled) + r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled + }() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.pulled: + if r.pullErr != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(r.pullErr).Error("pulling image failed") + } + } + } + + if err := r.adapter.create(ctx); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { + return err + } + + // container is already created. success! + return exec.ErrTaskPrepared + } + + return err + } + + return nil +} + +// Start the container. An error will be returned if the container is already started. +func (r *controller) Start(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return err + } + + // Detect whether the container has *ever* been started. If so, we don't + // issue the start. + // + // TODO(stevvooe): This is very racy. While reading inspect, another could + // start the process and we could end up starting it twice. + if ctnr.State.Status != "created" { + return exec.ErrTaskStarted + } + + for { + if err := r.adapter.start(ctx); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + // Retry network creation again if we + // failed because some of the networks + // were not found. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + continue + } + + return errors.Wrap(err, "starting container failed") + } + + break + } + + // no health check + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" { + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) + return err + } + return nil + } + + // wait for container to be healthy + eventq := r.adapter.events(ctx) + + var healthErr error + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } else if ctnr.State.ExitCode != 0 { + return &exitError{code: ctnr.State.ExitCode, cause: healthErr} + } + + return nil + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + // set health check error, and wait for container to fully exit ("die" event) + healthErr = ErrContainerUnhealthy + case "health_status: healthy": + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name()) + return err + } + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Wait on the container to exit. +func (r *controller) Wait(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + healthErr := make(chan error, 1) + go func() { + ectx, cancel := context.WithCancel(ctx) // cancel event context on first event + defer cancel() + if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { + healthErr <- ErrContainerUnhealthy + if err := r.Shutdown(ectx); err != nil { + log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") + } + } + }() + + err := r.adapter.wait(ctx) + if ctx.Err() != nil { + return ctx.Err() + } + + if err != nil { + ee := &exitError{} + if ec, ok := err.(exec.ExitCoder); ok { + ee.code = ec.ExitCode() + } + select { + case e := <-healthErr: + ee.cause = e + default: + if err.Error() != "" { + ee.cause = err + } + } + return ee + } + + return nil +} + +// Shutdown the container cleanly. +func (r *controller) Shutdown(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) + // Don't return an error here, because failure to deactivate + // the service binding is expected if the container was never + // started. + } + + if err := r.adapter.shutdown(ctx); err != nil { + if isUnknownContainer(err) || isStoppedContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Terminate the container, with force. +func (r *controller) Terminate(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.terminate(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Remove the container and its resources. +func (r *controller) Remove(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // It may be necessary to shut down the task before removing it. + if err := r.Shutdown(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + // This may fail if the task was already shut down. + log.G(ctx).WithError(err).Debug("shutdown failed on removal") + } + + // Try removing networks referenced in this task in case this + // task is the last one referencing it + if err := r.adapter.removeNetworks(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + return err + } + + if err := r.adapter.remove(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + return nil +} + +// waitReady waits for a container to be "ready". +// Ready means it's past the started state. +func (r *controller) waitReady(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + eventq := r.adapter.events(ctx) + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if !isUnknownContainer(err) { + return errors.Wrap(err, "inspect container failed") + } + } else { + switch ctnr.State.Status { + case "running", "exited", "dead": + return nil + } + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "start": + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { + if err := r.checkClosed(); err != nil { + return err + } + + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } + + rc, err := r.adapter.logs(ctx, options) + if err != nil { + return errors.Wrap(err, "failed getting container logs") + } + defer rc.Close() + + var ( + // use a rate limiter to keep things under control but also provides some + // ability coalesce messages. + limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s + msgctx = api.LogContext{ + NodeID: r.task.NodeID, + ServiceID: r.task.ServiceID, + TaskID: r.task.ID, + } + ) + + brd := bufio.NewReader(rc) + for { + // so, message header is 8 bytes, treat as uint64, pull stream off MSB + var header uint64 + if err := binary.Read(brd, binary.BigEndian, &header); err != nil { + if err == io.EOF { + return nil + } + + return errors.Wrap(err, "failed reading log header") + } + + stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) + + // limit here to decrease allocation back pressure. + if err := limiter.WaitN(ctx, int(size)); err != nil { + return errors.Wrap(err, "failed rate limiter") + } + + buf := make([]byte, size) + _, err := io.ReadFull(brd, buf) + if err != nil { + return errors.Wrap(err, "failed reading buffer") + } + + // Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish + parts := bytes.SplitN(buf, []byte(" "), 2) + if len(parts) != 2 { + return fmt.Errorf("invalid timestamp in log message: %v", buf) + } + + ts, err := time.Parse(time.RFC3339Nano, string(parts[0])) + if err != nil { + return errors.Wrap(err, "failed to parse timestamp") + } + + tsp, err := ptypes.TimestampProto(ts) + if err != nil { + return errors.Wrap(err, "failed to convert timestamp") + } + + if err := publisher.Publish(ctx, api.LogMessage{ + Context: msgctx, + Timestamp: tsp, + Stream: api.LogStream(stream), + + Data: parts[1], + }); err != nil { + return errors.Wrap(err, "failed to publish log message") + } + } +} + +// Close the runner and clean up any ephemeral resources. +func (r *controller) Close() error { + select { + case <-r.closed: + return r.err + default: + if r.cancelPull != nil { + r.cancelPull() + } + + r.err = exec.ErrControllerClosed + close(r.closed) + } + return nil +} + +func (r *controller) matchevent(event events.Message) bool { + if event.Type != events.ContainerEventType { + return false + } + + // TODO(stevvooe): Filter based on ID matching, in addition to name. + + // Make sure the events are for this container. + if event.Actor.Attributes["name"] != r.adapter.container.name() { + return false + } + + return true +} + +func (r *controller) checkClosed() error { + select { + case <-r.closed: + return r.err + default: + return nil + } +} + +func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { + status := &api.ContainerStatus{ + ContainerID: ctnr.ID, + PID: int32(ctnr.State.Pid), + ExitCode: int32(ctnr.State.ExitCode), + } + + return status, nil +} + +func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { + status := &api.PortStatus{} + + if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { + exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) + if err != nil { + return nil, err + } + status.Ports = exposedPorts + } + + return status, nil +} + +func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { + exposedPorts := make([]*api.PortConfig, 0, len(portMap)) + + for portProtocol, mapping := range portMap { + parts := strings.SplitN(string(portProtocol), "/", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) + } + + port, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, err + } + + protocol := api.ProtocolTCP + switch strings.ToLower(parts[1]) { + case "tcp": + protocol = api.ProtocolTCP + case "udp": + protocol = api.ProtocolUDP + default: + return nil, fmt.Errorf("invalid protocol: %s", parts[1]) + } + + for _, binding := range mapping { + hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + if err != nil { + return nil, err + } + + // TODO(aluzzardi): We're losing the port `name` here since + // there's no way to retrieve it back from the Engine. + exposedPorts = append(exposedPorts, &api.PortConfig{ + PublishMode: api.PublishModeHost, + Protocol: protocol, + TargetPort: uint32(port), + PublishedPort: uint32(hostPort), + }) + } + } + + return exposedPorts, nil +} + +type exitError struct { + code int + cause error +} + +func (e *exitError) Error() string { + if e.cause != nil { + return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) + } + + return fmt.Sprintf("task: non-zero exit (%v)", e.code) +} + +func (e *exitError) ExitCode() int { + return int(e.code) +} + +func (e *exitError) Cause() error { + return e.cause +} + +// checkHealth blocks until unhealthy container is detected or ctx exits +func (r *controller) checkHealth(ctx context.Context) error { + eventq := r.adapter.events(ctx) + + for { + select { + case <-ctx.Done(): + return nil + case <-r.closed: + return nil + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "health_status: unhealthy": + return ErrContainerUnhealthy + } + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go new file mode 100644 index 0000000..63e1233 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go @@ -0,0 +1,15 @@ +package container + +import "fmt" + +var ( + // ErrImageRequired returned if a task is missing the image definition. + ErrImageRequired = fmt.Errorf("dockerexec: image required") + + // ErrContainerDestroyed returned when a container is prematurely destroyed + // during a wait call. + ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed") + + // ErrContainerUnhealthy returned if controller detects the health check failure + ErrContainerUnhealthy = fmt.Errorf("dockerexec: unhealthy container") +) diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go new file mode 100644 index 0000000..f0dedd4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go @@ -0,0 +1,194 @@ +package container + +import ( + "sort" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/agent/secrets" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +type executor struct { + backend executorpkg.Backend + secrets exec.SecretsManager +} + +// NewExecutor returns an executor from the docker client. +func NewExecutor(b executorpkg.Backend) exec.Executor { + return &executor{ + backend: b, + secrets: secrets.NewManager(), + } +} + +// Describe returns the underlying node description from the docker client. +func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { + info, err := e.backend.SystemInfo() + if err != nil { + return nil, err + } + + plugins := map[api.PluginDescription]struct{}{} + addPlugins := func(typ string, names []string) { + for _, name := range names { + plugins[api.PluginDescription{ + Type: typ, + Name: name, + }] = struct{}{} + } + } + + // add v1 plugins + addPlugins("Volume", info.Plugins.Volume) + // Add builtin driver "overlay" (the only builtin multi-host driver) to + // the plugin list by default. + addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) + addPlugins("Authorization", info.Plugins.Authorization) + + // add v2 plugins + v2Plugins, err := e.backend.PluginManager().List() + if err == nil { + for _, plgn := range v2Plugins { + for _, typ := range plgn.Config.Interface.Types { + if typ.Prefix != "docker" || !plgn.Enabled { + continue + } + plgnTyp := typ.Capability + if typ.Capability == "volumedriver" { + plgnTyp = "Volume" + } else if typ.Capability == "networkdriver" { + plgnTyp = "Network" + } + plugins[api.PluginDescription{ + Type: plgnTyp, + Name: plgn.Name, + }] = struct{}{} + } + } + } + + pluginFields := make([]api.PluginDescription, 0, len(plugins)) + for k := range plugins { + pluginFields = append(pluginFields, k) + } + + sort.Sort(sortedPlugins(pluginFields)) + + // parse []string labels into a map[string]string + labels := map[string]string{} + for _, l := range info.Labels { + stringSlice := strings.SplitN(l, "=", 2) + // this will take the last value in the list for a given key + // ideally, one shouldn't assign multiple values to the same key + if len(stringSlice) > 1 { + labels[stringSlice[0]] = stringSlice[1] + } + } + + description := &api.NodeDescription{ + Hostname: info.Name, + Platform: &api.Platform{ + Architecture: info.Architecture, + OS: info.OSType, + }, + Engine: &api.EngineDescription{ + EngineVersion: info.ServerVersion, + Labels: labels, + Plugins: pluginFields, + }, + Resources: &api.Resources{ + NanoCPUs: int64(info.NCPU) * 1e9, + MemoryBytes: info.MemTotal, + }, + } + + return description, nil +} + +func (e *executor) Configure(ctx context.Context, node *api.Node) error { + na := node.Attachment + if na == nil { + return nil + } + + options := types.NetworkCreate{ + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + }, + Options: na.Network.DriverState.Options, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ + na.Network.ID, + types.NetworkCreateRequest{ + Name: na.Network.Spec.Annotations.Name, + NetworkCreate: options, + }, + }, na.Addresses[0]) +} + +// Controller returns a docker container runner. +func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + if t.Spec.GetAttachment() != nil { + return newNetworkAttacherController(e.backend, t, e.secrets) + } + + ctlr, err := newController(e.backend, t, e.secrets) + if err != nil { + return nil, err + } + + return ctlr, nil +} + +func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { + nwKeys := []*networktypes.EncryptionKey{} + for _, key := range keys { + nwKey := &networktypes.EncryptionKey{ + Subsystem: key.Subsystem, + Algorithm: int32(key.Algorithm), + Key: make([]byte, len(key.Key)), + LamportTime: key.LamportTime, + } + copy(nwKey.Key, key.Key) + nwKeys = append(nwKeys, nwKey) + } + e.backend.SetNetworkBootstrapKeys(nwKeys) + + return nil +} + +func (e *executor) Secrets() exec.SecretsManager { + return e.secrets +} + +type sortedPlugins []api.PluginDescription + +func (sp sortedPlugins) Len() int { return len(sp) } + +func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +func (sp sortedPlugins) Less(i, j int) bool { + if sp[i].Type != sp[j].Type { + return sp[i].Type < sp[j].Type + } + return sp[i].Name < sp[j].Name +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go new file mode 100644 index 0000000..99cf750 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go @@ -0,0 +1,102 @@ +// +build !windows + +package container + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/events" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func TestHealthStates(t *testing.T) { + + // set up environment: events, task, container .... + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + task := &api.Task{ + ID: "id", + ServiceID: "sid", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + }, + Annotations: api.Annotations{Name: "name"}, + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + } + + daemon := &daemon.Daemon{ + EventsService: e, + } + + controller, err := newController(daemon, task, nil) + if err != nil { + t.Fatalf("create controller fail %v", err) + } + + errChan := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // fire checkHealth + go func() { + err := controller.checkHealth(ctx) + select { + case errChan <- err: + case <-ctx.Done(): + } + }() + + // send an event and expect to get expectedErr + // if expectedErr is nil, shouldn't get any error + logAndExpect := func(msg string, expectedErr error) { + daemon.LogContainerEvent(c, msg) + + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case err := <-errChan: + if err != expectedErr { + t.Fatalf("expect error %v, but get %v", expectedErr, err) + } + case <-timer.C: + if expectedErr != nil { + t.Fatalf("time limit exceeded, didn't get expected error") + } + } + } + + // events that are ignored by checkHealth + logAndExpect("health_status: running", nil) + logAndExpect("health_status: healthy", nil) + logAndExpect("die", nil) + + // unhealthy event will be caught by checkHealth + logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go new file mode 100644 index 0000000..5fda1f2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go @@ -0,0 +1,39 @@ +package container + +import ( + "fmt" + "path/filepath" + + "github.com/docker/swarmkit/api" +) + +func validateMounts(mounts []api.Mount) error { + for _, mount := range mounts { + // Target must always be absolute + if !filepath.IsAbs(mount.Target) { + return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) + } + + switch mount.Type { + // The checks on abs paths are required due to the container API confusing + // volume mounts as bind mounts when the source is absolute (and vice-versa) + // See #25253 + // TODO: This is probably not necessary once #22373 is merged + case api.MountTypeBind: + if !filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) + } + case api.MountTypeVolume: + if filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) + } + case api.MountTypeTmpfs: + if mount.Source != "" { + return fmt.Errorf("invalid tmpfs source, source must be empty") + } + default: + return fmt.Errorf("invalid mount type: %s", mount.Type) + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go new file mode 100644 index 0000000..9d98e2c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/swarmkit/api" +) + +func newTestControllerWithMount(m api.Mount) (*controller, error) { + return newController(&daemon.Daemon{}, &api.Task{ + ID: stringid.GenerateRandomID(), + ServiceID: stringid.GenerateRandomID(), + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + Mounts: []api.Mount{m}, + }, + }, + }, + }, nil) +} + +func TestControllerValidateMountBind(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with non-existing source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsNonExistent, + Target: testAbsPath, + }); err != nil { + t.Fatalf("controller should not error at creation: %v", err) + } + + // with proper source + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountVolume(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: testAbsPath, + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: "foo", + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountTarget(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsPath, + Target: "foo", + }); err == nil || !strings.Contains(err.Error(), "invalid mount target") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountTmpfs(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountInvalidType(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.Mount_MountType(9999), + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid mount type") { + t.Fatalf("expected error, got: %v", err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go new file mode 100644 index 0000000..c616eee --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go @@ -0,0 +1,8 @@ +// +build !windows + +package container + +const ( + testAbsPath = "/foo" + testAbsNonExistent = "/some-non-existing-host-path/" +) diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go new file mode 100644 index 0000000..c346451 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go @@ -0,0 +1,8 @@ +// +build windows + +package container + +const ( + testAbsPath = `c:\foo` + testAbsNonExistent = `c:\some-non-existing-host-path\` +) diff --git a/vendor/github.com/moby/moby/daemon/cluster/filters.go b/vendor/github.com/moby/moby/daemon/cluster/filters.go new file mode 100644 index 0000000..88668ed --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/filters.go @@ -0,0 +1,116 @@ +package cluster + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/filters" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" +) + +func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "role": true, + "membership": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + f := &swarmapi.ListNodesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + } + + for _, r := range filter.Get("role") { + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { + f.Roles = append(f.Roles, swarmapi.NodeRole(role)) + } else if r != "" { + return nil, fmt.Errorf("Invalid role filter: '%s'", r) + } + } + + for _, a := range filter.Get("membership") { + if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { + f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) + } else if a != "" { + return nil, fmt.Errorf("Invalid membership filter: '%s'", a) + } + } + + return f, nil +} + +func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListServicesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} + +func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "service": true, + "node": true, + "desired-state": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + if transformFunc != nil { + if err := transformFunc(filter); err != nil { + return nil, err + } + } + f := &swarmapi.ListTasksRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + ServiceIDs: filter.Get("service"), + NodeIDs: filter.Get("node"), + } + + for _, s := range filter.Get("desired-state") { + if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { + f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) + } else if s != "" { + return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) + } + } + + return f, nil +} + +func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { + accepted := map[string]bool{ + "names": true, + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListSecretsRequest_Filters{ + Names: filter.Get("names"), + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/helpers.go b/vendor/github.com/moby/moby/daemon/cluster/helpers.go new file mode 100644 index 0000000..09a1a5f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/helpers.go @@ -0,0 +1,112 @@ +package cluster + +import ( + "fmt" + + "github.com/docker/docker/api/errors" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { + rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return nil, err + } + + if len(rl.Clusters) == 0 { + return nil, errors.NewRequestNotFoundError(ErrNoSwarm) + } + + // TODO: assume one cluster only + return rl.Clusters[0], nil +} + +func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { + // GetNode to match via full ID. + rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}) + if err != nil { + // If any error (including NotFound), ListNodes to match via full name. + rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}}) + + if err != nil || len(rl.Nodes) == 0 { + // If any error or 0 result, ListNodes to match via ID prefix. + rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Nodes) == 0 { + err := fmt.Errorf("node %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Nodes); l > 1 { + return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) + } + + return rl.Nodes[0], nil + } + return rg.Node, nil +} + +func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) { + // GetService to match via full ID. + rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input}) + if err != nil { + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}}) + if err != nil || len(rl.Services) == 0 { + // If any error or 0 result, ListServices to match via ID prefix. + rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Services) == 0 { + err := fmt.Errorf("service %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Services); l > 1 { + return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) + } + + return rl.Services[0], nil + } + return rg.Service, nil +} + +func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { + // GetTask to match via full ID. + rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}) + if err != nil { + // If any error (including NotFound), ListTasks to match via full name. + rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}}) + + if err != nil || len(rl.Tasks) == 0 { + // If any error or 0 result, ListTasks to match via ID prefix. + rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Tasks) == 0 { + err := fmt.Errorf("task %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Tasks); l > 1 { + return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) + } + + return rl.Tasks[0], nil + } + return rg.Task, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr.go new file mode 100644 index 0000000..c24d486 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr.go @@ -0,0 +1,278 @@ +package cluster + +import ( + "errors" + "fmt" + "net" +) + +var ( + errNoSuchInterface = errors.New("no such interface") + errNoIP = errors.New("could not find the system's IP address") + errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") + errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") + errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") + errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") +) + +func resolveListenAddr(specifiedAddr string) (string, string, error) { + specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) + if err != nil { + return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) + } + + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + interfaceAddr, err := resolveInterfaceAddr(specifiedHost) + if err == nil { + return interfaceAddr.String(), specifiedPort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if net.ParseIP(specifiedHost) == nil { + return "", "", errBadListenAddr + } + + return specifiedHost, specifiedPort, nil +} + +func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { + // Approach: + // - If an advertise address is specified, use that. Resolve the + // interface's address if an interface was specified in + // advertiseAddr. Fill in the port from listenAddrPort if necessary. + // - If DefaultAdvertiseAddr is not empty, use that with the port from + // listenAddrPort. Resolve the interface's address from + // if an interface name was specified in DefaultAdvertiseAddr. + // - Otherwise, try to autodetect the system's address. Use the port in + // listenAddrPort with this address if autodetection succeeds. + + if advertiseAddr != "" { + advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) + if err != nil { + // Not a host:port specification + advertiseHost = advertiseAddr + advertisePort = listenAddrPort + } + + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + interfaceAddr, err := resolveInterfaceAddr(advertiseHost) + if err == nil { + return interfaceAddr.String(), advertisePort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if ip := net.ParseIP(advertiseHost); ip == nil || ip.IsUnspecified() { + return "", "", errBadAdvertiseAddr + } + + return advertiseHost, advertisePort, nil + } + + if c.config.DefaultAdvertiseAddr != "" { + // Does the default advertise address component match any of the + // interface names on the system? If so, use the address from + // that interface. + interfaceAddr, err := resolveInterfaceAddr(c.config.DefaultAdvertiseAddr) + if err == nil { + return interfaceAddr.String(), listenAddrPort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if ip := net.ParseIP(c.config.DefaultAdvertiseAddr); ip == nil || ip.IsUnspecified() { + return "", "", errBadDefaultAdvertiseAddr + } + + return c.config.DefaultAdvertiseAddr, listenAddrPort, nil + } + + systemAddr, err := c.resolveSystemAddr() + if err != nil { + return "", "", err + } + return systemAddr.String(), listenAddrPort, nil +} + +func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { + // Use a specific interface's IP address. + intf, err := net.InterfaceByName(specifiedInterface) + if err != nil { + return nil, errNoSuchInterface + } + + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + if ipAddr.IP.To4() != nil { + // IPv4 + if interfaceAddr4 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP) + } + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + if interfaceAddr6 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP) + } + interfaceAddr6 = ipAddr.IP + } + } + } + + if interfaceAddr4 == nil && interfaceAddr6 == nil { + return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) + } + + // In the case that there's exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + return interfaceAddr4, nil + } + return interfaceAddr6, nil +} + +func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { + // Use the system's only IP address, or fail if there are + // multiple addresses to choose from. Skip interfaces which + // are managed by docker via subnet check. + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var systemAddr net.IP + var systemInterface string + + // List Docker-managed subnets + v4Subnets := c.config.NetworkSubnetsProvider.V4Subnets() + v6Subnets := c.config.NetworkSubnetsProvider.V6Subnets() + +ifaceLoop: + for _, intf := range interfaces { + // Skip inactive interfaces and loopback interfaces + if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { + continue + } + + addrs, err := intf.Addrs() + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + // Skip loopback and link-local addresses + if !ok || !ipAddr.IP.IsGlobalUnicast() { + continue + } + + if ipAddr.IP.To4() != nil { + // IPv4 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v4Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP) + } + + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v6Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP) + } + + interfaceAddr6 = ipAddr.IP + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Name + } + } + + if systemAddr == nil { + return nil, errNoIP + } + + return systemAddr, nil +} + +func listSystemIPs() []net.IP { + interfaces, err := net.Interfaces() + if err != nil { + return nil + } + + var systemAddrs []net.IP + + for _, intf := range interfaces { + addrs, err := intf.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + systemAddrs = append(systemAddrs, ipAddr.IP) + } + } + } + + return systemAddrs +} + +func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { + if interfaceA == interfaceB { + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB) + } + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go new file mode 100644 index 0000000..3d4f239 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go @@ -0,0 +1,91 @@ +// +build linux + +package cluster + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + // Use the system's only device IP address, or fail if there are + // multiple addresses to choose from. + interfaces, err := netlink.LinkList() + if err != nil { + return nil, err + } + + var ( + systemAddr net.IP + systemInterface string + deviceFound bool + ) + + for _, intf := range interfaces { + // Skip non device or inactive interfaces + if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 { + continue + } + + addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL) + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr := addr.IPNet.IP + + // Skip loopback and link-local addresses + if !ipAddr.IsGlobalUnicast() { + continue + } + + // At least one non-loopback device is found and it is administratively up + deviceFound = true + + if ipAddr.To4() != nil { + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr) + } + interfaceAddr4 = ipAddr + } else { + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr) + } + interfaceAddr6 = ipAddr + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Attrs().Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Attrs().Name + } + } + + if systemAddr == nil { + if !deviceFound { + // If no non-loopback device type interface is found, + // fall back to the regular auto-detection mechanism. + // This is to cover the case where docker is running + // inside a container (eths are in fact veths). + return c.resolveSystemAddrViaSubnetCheck() + } + return nil, errNoIP + } + + return systemAddr, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go new file mode 100644 index 0000000..4e845f5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris + +package cluster + +import "net" + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + return c.resolveSystemAddrViaSubnetCheck() +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go new file mode 100644 index 0000000..57a894b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "bufio" + "fmt" + "net" + "os/exec" + "strings" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " + + "`/usr/sbin/route get default | /usr/bin/grep interface | " + + "/usr/bin/awk '{print $2}'`" + out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output() + if err != nil { + return nil, fmt.Errorf("cannot get default route: %v", err) + } + + defInterface := strings.SplitN(string(out), "/", 2) + defInterfaceIP := net.ParseIP(defInterface[0]) + + return defInterfaceIP, nil +} + +func listSystemIPs() []net.IP { + var systemAddrs []net.IP + cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr") + cmdReader, err := cmd.StdoutPipe() + if err != nil { + return nil + } + + if err := cmd.Start(); err != nil { + return nil + } + + scanner := bufio.NewScanner(cmdReader) + go func() { + for scanner.Scan() { + text := scanner.Text() + nameAddrPair := strings.SplitN(text, "/", 2) + // Let go of loopback interfaces and docker interfaces + systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0])) + } + }() + + if err := scanner.Err(); err != nil { + fmt.Printf("scan underwent err: %+v\n", err) + } + + if err := cmd.Wait(); err != nil { + fmt.Printf("run command wait: %+v\n", err) + } + + return systemAddrs +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/provider/network.go b/vendor/github.com/moby/moby/daemon/cluster/provider/network.go new file mode 100644 index 0000000..f4c72ae --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/secrets.go b/vendor/github.com/moby/moby/daemon/cluster/secrets.go new file mode 100644 index 0000000..2b9eb5d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/secrets.go @@ -0,0 +1,133 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" +) + +// GetSecret returns a secret from a managed swarm cluster +func (c *Cluster) GetSecret(id string) (types.Secret, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Secret{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.node.client.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: id}) + if err != nil { + return types.Secret{}, err + } + + return convert.SecretFromGRPC(r.Secret), nil +} + +// GetSecrets returns all secrets of a managed swarm cluster. +func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListSecretsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.node.client.ListSecrets(ctx, + &swarmapi.ListSecretsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + secrets := []types.Secret{} + + for _, secret := range r.Secrets { + secrets = append(secrets, convert.SecretFromGRPC(secret)) + } + + return secrets, nil +} + +// CreateSecret creates a new secret in a managed swarm cluster. +func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + secretSpec := convert.SecretSpecToGRPC(s) + + r, err := c.node.client.CreateSecret(ctx, + &swarmapi.CreateSecretRequest{Spec: &secretSpec}) + if err != nil { + return "", err + } + + return r.Secret.ID, nil +} + +// RemoveSecret removes a secret from a managed swarm cluster. +func (c *Cluster) RemoveSecret(id string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + req := &swarmapi.RemoveSecretRequest{ + SecretID: id, + } + + if _, err := c.node.client.RemoveSecret(ctx, req); err != nil { + return err + } + return nil +} + +// UpdateSecret updates a secret in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateSecret(id string, version uint64, spec types.SecretSpec) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + secretSpec := convert.SecretSpecToGRPC(spec) + + if _, err := c.client.UpdateSecret(ctx, + &swarmapi.UpdateSecretRequest{ + SecretID: id, + SecretVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &secretSpec, + }); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/commit.go b/vendor/github.com/moby/moby/daemon/commit.go new file mode 100644 index 0000000..1e7bffb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/commit.go @@ -0,0 +1,271 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + imageEnvKey = strings.ToUpper(imageEnvKey) + userEnvKey = strings.ToUpper(userEnvKey) + } + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + for l, v := range imageConf.Labels { + if _, ok := userConf.Labels[l]; !ok { + userConf.Labels[l] = v + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + userConf.ArgsEscaped = imageConf.ArgsEscaped + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if imageConf.Healthcheck != nil { + if userConf.Healthcheck == nil { + userConf.Healthcheck = imageConf.Healthcheck + } else { + if len(userConf.Healthcheck.Test) == 0 { + userConf.Healthcheck.Test = imageConf.Healthcheck.Test + } + if userConf.Healthcheck.Interval == 0 { + userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval + } + if userConf.Healthcheck.Timeout == 0 { + userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout + } + if userConf.Healthcheck.Retries == 0 { + userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries + } + } + } + + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows and on Solaris. + if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() { + return "", fmt.Errorf("%+v does not support commit of a running container", runtime.GOOS) + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) + if err != nil { + return "", err + } + + if c.MergeConfigs { + if err := merge(newConfig, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var history []image.History + rootFS := image.NewRootFS() + osVersion := "" + var osFeatures []string + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + osVersion = img.OSVersion + osFeatures = img.OSFeatures + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(container.Config.Cmd, " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: newConfig, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *container.Config, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: osFeatures, + OSVersion: osVersion, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + imageRef := "" + if c.Repo != "" { + newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImageWithReference(id, newTag); err != nil { + return "", err + } + imageRef = newTag.String() + } + + attributes := map[string]string{ + "comment": c.Comment, + "imageID": id.String(), + "imageRef": imageRef, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + containerActions.WithValues("commit").UpdateSince(start) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/vendor/github.com/moby/moby/daemon/config.go b/vendor/github.com/moby/moby/daemon/config.go new file mode 100644 index 0000000..7a69fe0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config.go @@ -0,0 +1,525 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" + "github.com/spf13/pflag" +) + +const ( + // defaultMaxConcurrentDownloads is the default value for + // maximum number of downloads that + // may take place at a time for each pull. + defaultMaxConcurrentDownloads = 3 + // defaultMaxConcurrentUploads is the default value for + // maximum number of uploads that + // may take place at a time for each push. + defaultMaxConcurrentUploads = 5 + // stockRuntimeName is the reserved name/alias used to represent the + // OCI runtime being shipped with the docker daemon package. + stockRuntimeName = "runc" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +const ( + defaultShutdownTimeout = 15 +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, + "runtimes": true, + "default-ulimits": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// commonBridgeConfig stores all the platform-common bridge driver specific +// configuration. +type commonBridgeConfig struct { + Iface string `json:"bridge,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which is +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonConfig struct { + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + Root string `json:"graph,omitempty"` + SocketGroup string `json:"group,omitempty"` + TrustKeyPath string `json:"-"` + CorsHeaders string `json:"api-cors-header,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + + // LiveRestoreEnabled determines whether we should keep containers + // alive upon daemon shutdown/start + LiveRestoreEnabled bool `json:"live-restore,omitempty"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + // MaxConcurrentDownloads is the maximum number of downloads that + // may take place at a time for each pull. + MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` + + // MaxConcurrentUploads is the maximum number of uploads that + // may take place at a time for each push. + MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` + + // ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container + // to stop when daemon is being shutdown + ShutdownTimeout int `json:"shutdown-timeout,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + + // SwarmDefaultAdvertiseAddr is the default host/IP or network interface + // to use if a wildcard address is specified in the ListenAddr value + // given to the /swarm/init endpoint and no advertise address is + // specified. + SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` + MetricsAddress string `json:"metrics-addr"` + + LogConfig + bridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + reloadLock sync.Mutex + valuesSet map[string]interface{} + + Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not +} + +// InstallCommonFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallCommonFlags(flags *pflag.FlagSet) { + var maxConcurrentDownloads, maxConcurrentUploads int + + config.ServiceOptions.InstallCliFlags(flags) + + flags.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), "storage-opt", "Storage driver options") + flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") + flags.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), "exec-opt", "Runtime execution options") + flags.StringVarP(&config.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") + flags.StringVarP(&config.Root, "graph", "g", defaultGraph, "Root of the Docker runtime") + flags.BoolVarP(&config.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flags.MarkDeprecated("restart", "Please use a restart policy on docker run") + flags.StringVarP(&config.GraphDriver, "storage-driver", "s", "", "Storage driver to use") + flags.IntVar(&config.Mtu, "mtu", 0, "Set the containers network MTU") + flags.BoolVar(&config.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") + // FIXME: why the inconsistency between "hosts" and "sockets"? + flags.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") + flags.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), "dns-opt", "DNS options to use") + flags.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") + flags.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") + flags.StringVar(&config.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") + flags.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") + flags.StringVar(&config.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") + flags.StringVar(&config.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") + flags.StringVar(&config.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") + flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", defaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") + flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", defaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") + flags.IntVar(&config.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") + + flags.StringVar(&config.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") + flags.BoolVar(&config.Experimental, "experimental", false, "Enable experimental features") + + flags.StringVar(&config.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") + + config.MaxConcurrentDownloads = &maxConcurrentDownloads + config.MaxConcurrentUploads = &maxConcurrentUploads +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (config *Config) IsValueSet(name string) bool { + if config.valuesSet == nil { + return false + } + _, ok := config.valuesSet[name] + return ok +} + +// NewConfig returns a new fully initialized Config struct +func NewConfig() *Config { + config := Config{} + config.LogConfig.Config = make(map[string]string) + config.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + config.V2Only = true + } + return &config +} + +func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { + if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") { + return "", errors.New("Cluster Advertise Settings not supported on Solaris") + } + if clusterAdvertise == "" { + return "", errDiscoveryDisabled + } + if clusterStore == "" { + return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + } + + advertise, err := discovery.ParseAdvertise(clusterAdvertise) + if err != nil { + return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) + } + return advertise, nil +} + +// GetConflictFreeLabels validate Labels for conflict +// In swarm the duplicates for labels are removed +// so we only take same values here, no conflict values +// If the key-value is the same we will only take the last label +func GetConflictFreeLabels(labels []string) ([]string, error) { + labelMap := map[string]string{} + for _, label := range labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will return an error + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + + newLabels := []string{} + for k, v := range labelMap { + newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) + } + return newLabels, nil +} + +// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. +func ReloadConfiguration(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := ValidateConfiguration(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := GetConflictFreeLabels(newConfig.Labels) + // if err != nil { + // return err + // } + // newConfig.Labels = newLabels + // + if _, err := GetConflictFreeLabels(newConfig.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := ValidateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + // We need to validate again once both fileConfig and flagsConfig + // have been merged + if err := ValidateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overridden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup(key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *pflag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.valuesSet = configSet + } + + reader = bytes.NewReader(b) + err = json.NewDecoder(reader).Decode(&config) + return &config, err +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + if flag := flags.Lookup(key); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *pflag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *pflag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload + for _, name := range []string{f.Name, f.Shorthand} { + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// ValidateConfiguration validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch, +// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. +func ValidateConfiguration(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + + // validate MaxConcurrentDownloads + if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { + return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) + } + + // validate MaxConcurrentUploads + if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { + return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) + } + + // validate that "default" runtime is not reset + if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { + if _, ok := runtimes[stockRuntimeName]; ok { + return fmt.Errorf("runtime name '%s' is reserved", stockRuntimeName) + } + } + + if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != stockRuntimeName { + runtimes := config.GetAllRuntimes() + if _, ok := runtimes[defaultRuntime]; !ok { + return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/config_common_unix.go b/vendor/github.com/moby/moby/daemon/config_common_unix.go new file mode 100644 index 0000000..ab76fe7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_common_unix.go @@ -0,0 +1,90 @@ +// +build solaris linux freebsd + +package daemon + +import ( + "net" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/pflag" +) + +// CommonUnixConfig defines configuration of a docker daemon that is +// common across Unix platforms. +type CommonUnixConfig struct { + ExecRoot string `json:"exec-root,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` + Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` + DefaultRuntime string `json:"default-runtime,omitempty"` +} + +type commonUnixBridgeConfig struct { + DefaultIP net.IP `json:"ip,omitempty"` + IP string `json:"bip,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// InstallCommonUnixFlags adds command-line options to the top-level flag parser for +// the current process that are common across Unix platforms. +func (config *Config) InstallCommonUnixFlags(flags *pflag.FlagSet) { + config.Runtimes = make(map[string]types.Runtime) + + flags.StringVarP(&config.SocketGroup, "group", "G", "docker", "Group for the unix socket") + flags.StringVar(&config.bridgeConfig.IP, "bip", "", "Specify network bridge IP") + flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") + flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") + flags.BoolVar(&config.bridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") + flags.Var(runconfigopts.NewNamedRuntimeOpt("runtimes", &config.Runtimes, stockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") + flags.StringVar(&config.DefaultRuntime, "default-runtime", stockRuntimeName, "Default OCI runtime for containers") + +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (config *Config) GetRuntime(name string) *types.Runtime { + config.reloadLock.Lock() + defer config.reloadLock.Unlock() + if rt, ok := config.Runtimes[name]; ok { + return &rt + } + return nil +} + +// GetDefaultRuntimeName returns the current default runtime +func (config *Config) GetDefaultRuntimeName() string { + config.reloadLock.Lock() + rt := config.DefaultRuntime + config.reloadLock.Unlock() + + return rt +} + +// GetAllRuntimes returns a copy of the runtimes map +func (config *Config) GetAllRuntimes() map[string]types.Runtime { + config.reloadLock.Lock() + rts := config.Runtimes + config.reloadLock.Unlock() + return rts +} + +// GetExecRoot returns the user configured Exec-root +func (config *Config) GetExecRoot() string { + return config.ExecRoot +} + +// GetInitPath returns the configure docker-init path +func (config *Config) GetInitPath() string { + config.reloadLock.Lock() + defer config.reloadLock.Unlock() + if config.InitPath != "" { + return config.InitPath + } + return DefaultInitBinary +} diff --git a/vendor/github.com/moby/moby/daemon/config_experimental.go b/vendor/github.com/moby/moby/daemon/config_experimental.go new file mode 100644 index 0000000..963a51e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_experimental.go @@ -0,0 +1,8 @@ +package daemon + +import ( + "github.com/spf13/pflag" +) + +func (config *Config) attachExperimentalFlags(cmd *pflag.FlagSet) { +} diff --git a/vendor/github.com/moby/moby/daemon/config_solaris.go b/vendor/github.com/moby/moby/daemon/config_solaris.go new file mode 100644 index 0000000..bc18ccd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_solaris.go @@ -0,0 +1,47 @@ +package daemon + +import ( + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/system/volatile/docker/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExec = "zones" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig + + // Fields below here are platform specific. + commonUnixBridgeConfig +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then install flags common to unix platforms + config.InstallCommonUnixFlags(flags) + + // Then platform-specific install flags + config.attachExperimentalFlags(flags) +} + +func (config *Config) isSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/config_test.go b/vendor/github.com/moby/moby/daemon/config_test.go new file mode 100644 index 0000000..90f6a12 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_test.go @@ -0,0 +1,229 @@ +package daemon + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/pflag" +) + +func TestDaemonConfigurationNotFound(t *testing.T) { + _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected does not exist error, got %v", err) + } +} + +func TestDaemonBrokenConfiguration(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"Debug": tru`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected error, got %v", err) + } +} + +func TestParseClusterAdvertiseSettings(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ClusterSettings not supported on Solaris\n") + } + _, err := parseClusterAdvertiseSettings("something", "") + if err != errDiscoveryDisabled { + t.Fatalf("expected discovery disabled error, got %v\n", err) + } + + _, err = parseClusterAdvertiseSettings("", "something") + if err == nil { + t.Fatalf("expected discovery store error, got %v\n", err) + } + + _, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") + if err != nil { + t.Fatal(err) + } +} + +func TestFindConfigurationConflicts(t *testing.T) { + config := map[string]interface{}{"authorization-plugins": "foobar"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.String("authorization-plugins", "", "") + assert.NilError(t, flags.Set("authorization-plugins", "asdf")) + + assert.Error(t, + findConfigurationConflicts(config, flags), + "authorization-plugins: (from flag: asdf, from file: foobar)") +} + +func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { + config := map[string]interface{}{"hosts": []string{"qwer"}} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var hosts []string + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") + assert.NilError(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.NilError(t, flags.Set("host", "unix:///var/run/docker.sock")) + + assert.Error(t, findConfigurationConflicts(config, flags), "hosts") +} + +func TestDaemonConfigurationMergeConflicts(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"debug": true}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.Bool("debug", false, "") + flags.Set("debug", "false") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "debug") { + t.Fatalf("expected debug conflict, got %v", err) + } +} + +func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("tlscacert", "", "") + flags.Set("tlscacert", "~/.docker/ca.pem") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "tlscacert") { + t.Fatalf("expected tlscacert conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.Bool("tlsverify", false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + flags := pflag.NewFlagSet("base", pflag.ContinueOnError) + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} + +func TestValidateConfiguration(t *testing.T) { + c1 := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one"}, + }, + } + + err := ValidateConfiguration(c1) + if err == nil { + t.Fatal("expected error, got nil") + } + + c2 := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one=two"}, + }, + } + + err = ValidateConfiguration(c2) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c3 := &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1"}, + }, + } + + err = ValidateConfiguration(c3) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c4 := &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1o"}, + }, + } + + err = ValidateConfiguration(c4) + if err == nil { + t.Fatal("expected error, got nil") + } + + c5 := &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c"}, + }, + } + + err = ValidateConfiguration(c5) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c6 := &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"123456"}, + }, + } + + err = ValidateConfiguration(c6) + if err == nil { + t.Fatal("expected error, got nil") + } +} diff --git a/vendor/github.com/moby/moby/daemon/config_unix.go b/vendor/github.com/moby/moby/daemon/config_unix.go new file mode 100644 index 0000000..d095788 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_unix.go @@ -0,0 +1,104 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + + runconfigopts "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig + + // Fields below here are platform specific. + CgroupParent string `json:"cgroup-parent,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` + CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` + OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` + Init bool `json:"init,omitempty"` + InitPath string `json:"init-path,omitempty"` + SeccompProfile string `json:"seccomp-profile,omitempty"` +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig + + // These fields are common to all unix platforms. + commonUnixBridgeConfig + + // Fields below here are platform specific. + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + UserlandProxyPath string `json:"userland-proxy-path,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` +} + +// InstallFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then install flags common to unix platforms + config.InstallCommonUnixFlags(flags) + + config.Ulimits = make(map[string]*units.Ulimit) + + // Then platform-specific install flags + flags.BoolVar(&config.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") + flags.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), "default-ulimit", "Default ulimits for containers") + flags.BoolVar(&config.bridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") + flags.BoolVar(&config.bridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") + flags.BoolVar(&config.bridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") + flags.BoolVar(&config.bridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") + flags.StringVar(&config.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") + flags.StringVar(&config.bridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") + flags.BoolVar(&config.bridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") + flags.StringVar(&config.bridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") + flags.BoolVar(&config.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") + flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") + flags.StringVar(&config.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") + flags.StringVar(&config.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") + flags.StringVar(&config.ContainerdAddr, "containerd", "", "Path to containerd socket") + flags.BoolVar(&config.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") + flags.IntVar(&config.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") + flags.BoolVar(&config.Init, "init", false, "Run an init in the container to forward signals and reap processes") + flags.StringVar(&config.InitPath, "init-path", "", "Path to the docker-init binary") + flags.Int64Var(&config.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&config.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&config.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") + + config.attachExperimentalFlags(flags) +} + +func (config *Config) isSwarmCompatible() error { + if config.ClusterStore != "" || config.ClusterAdvertise != "" { + return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + } + if config.LiveRestoreEnabled { + return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/config_unix_test.go b/vendor/github.com/moby/moby/daemon/config_unix_test.go new file mode 100644 index 0000000..86c16f5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_unix_test.go @@ -0,0 +1,80 @@ +// +build !windows + +package daemon + +import ( + "io/ioutil" + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } + + if cc.Ulimits == nil { + t.Fatal("expected default ulimit config, got nil\n") + } else { + if _, OK := cc.Ulimits["nofile"]; OK { + if cc.Ulimits["nofile"].Name != "nofile" || + cc.Ulimits["nofile"].Hard != 2048 || + cc.Ulimits["nofile"].Soft != 1024 { + t.Fatalf("expected default ulimit name, hard and soft are nofile, 2048, 1024, got %s, %d, %d\n", cc.Ulimits["nofile"].Name, cc.Ulimits["nofile"].Hard, cc.Ulimits["nofile"].Soft) + } + } else { + t.Fatal("expected default ulimit name nofile, got nil\n") + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/config_windows.go b/vendor/github.com/moby/moby/daemon/config_windows.go new file mode 100644 index 0000000..df59dcf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_windows.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile string + defaultGraph = filepath.Join(os.Getenv("programdata"), "docker") +) + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker daemon -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// InstallFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then platform-specific install flags. + flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") + flags.StringVarP(&config.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (config *Config) GetRuntime(name string) *types.Runtime { + return nil +} + +// GetInitPath returns the configure docker-init path +func (config *Config) GetInitPath() string { + return "" +} + +// GetDefaultRuntimeName returns the current default runtime +func (config *Config) GetDefaultRuntimeName() string { + return stockRuntimeName +} + +// GetAllRuntimes returns a copy of the runtimes map +func (config *Config) GetAllRuntimes() map[string]types.Runtime { + return map[string]types.Runtime{} +} + +// GetExecRoot returns the user configured Exec-root +func (config *Config) GetExecRoot() string { + return "" +} + +func (config *Config) isSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/config_windows_test.go b/vendor/github.com/moby/moby/daemon/config_windows_test.go new file mode 100644 index 0000000..4a7b95c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config_windows_test.go @@ -0,0 +1,59 @@ +// +build windows + +package daemon + +import ( + "io/ioutil" + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/container.go b/vendor/github.com/moby/moby/daemon/container.go new file mode 100644 index 0000000..2a44800 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container.go @@ -0,0 +1,282 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.StreamConfig.NewInputPipes() + } else { + c.StreamConfig.NewNopInputPipe() + } + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + + return nil +} + +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + if hostConfig.NetworkMode.IsHost() { + if config.Hostname == "" { + config.Hostname, err = os.Hostname() + if err != nil { + return nil, err + } + } + } else { + daemon.generateHostname(id, config) + } + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Managed = managed + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName() + + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + // make sure links is not nil + // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links + if hostConfig.Links == nil { + hostConfig.Links = []string{} + } + + container.HostConfig = hostConfig + return container.ToDisk() +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + + // Validate if Env contains empty variable or not (e.g., ``, `=foo`) + for _, env := range config.Env { + if _, err := opts.ValidateEnv(env); err != nil { + return nil, err + } + } + } + + if hostConfig == nil { + return nil, nil + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy") + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort) + } + } + } + + p := hostConfig.RestartPolicy + + switch p.Name { + case "always", "unless-stopped", "no": + if p.MaximumRetryCount != 0 { + return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) + } + case "on-failure": + if p.MaximumRetryCount < 0 { + return nil, fmt.Errorf("maximum retry count cannot be negative") + } + case "": + // do nothing + default: + return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations.go b/vendor/github.com/moby/moby/daemon/container_operations.go new file mode 100644 index 0000000..c302506 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations.go @@ -0,0 +1,1049 @@ +package daemon + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrRootFSReadOnly is returned when a container + // rootfs is marked readonly. + ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + getPortMapInfo = container.GetSandboxPortMapInfo +) + +func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { + var ( + sboxOptions []libnetwork.SandboxOption + err error + dns []string + dnsSearch []string + dnsOptions []string + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), + libnetwork.OptionDomainname(container.Config.Domainname)) + + if container.HostConfig.NetworkMode.IsHost() { + sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) + if len(container.HostConfig.ExtraHosts) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) + } + if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && + len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && + len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) + } + } else { + // OptionUseExternalKey is mandatory for userns support. + // But optional for non-userns support + sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) + } + + if err = setupPathsAndSandboxOptions(container, &sboxOptions); err != nil { + return nil, err + } + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemon.configStore.DNS) > 0 { + dns = daemon.configStore.DNS + } + + for _, d := range dns { + sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) + } + + if len(container.HostConfig.DNSSearch) > 0 { + dnsSearch = container.HostConfig.DNSSearch + } else if len(daemon.configStore.DNSSearch) > 0 { + dnsSearch = daemon.configStore.DNSSearch + } + + for _, ds := range dnsSearch { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) + } + + if len(container.HostConfig.DNSOptions) > 0 { + dnsOptions = container.HostConfig.DNSOptions + } else if len(daemon.configStore.DNSOptions) > 0 { + dnsOptions = daemon.configStore.DNSOptions + } + + for _, ds := range dnsOptions { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) + } + + if container.NetworkSettings.SecondaryIPAddresses != nil { + name := container.Config.Hostname + if container.Config.Domainname != "" { + name = name + "." + container.Config.Domainname + } + + for _, a := range container.NetworkSettings.SecondaryIPAddresses { + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) + } + } + + for _, extraHost := range container.HostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + parts := strings.SplitN(extraHost, ":", 2) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + sboxOptions = append(sboxOptions, + libnetwork.OptionPortMapping(pbList), + libnetwork.OptionExposedPorts(exposeList)) + + // Legacy Link feature is supported only for the default bridge network. + // return if this call to build join options is not for default bridge network + // Legacy Link is only supported by docker run --link + bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] + if !ok || bridgeSettings.EndpointSettings == nil { + return sboxOptions, nil + } + + if bridgeSettings.EndpointID == "" { + return sboxOptions, nil + } + + var ( + childEndpoints, parentEndpoints []string + cEndpointID string + ) + + children := daemon.children(container) + for linkAlias, child := range children { + if !isLinkable(child) { + return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) + } + _, alias := path.Split(linkAlias) + // allow access to the linked container via the alias, real name, and container hostname + aliasList := alias + " " + child.Config.Hostname + // only add the name if alias isn't equal to the name + if alias != child.Name[1:] { + aliasList = aliasList + " " + child.Name[1:] + } + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) + cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID + if cEndpointID != "" { + childEndpoints = append(childEndpoints, cEndpointID) + } + } + + for alias, parent := range daemon.parents(container) { + if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { + continue + } + + _, alias = path.Split(alias) + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) + sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( + parent.ID, + alias, + bridgeSettings.IPAddress, + )) + if cEndpointID != "" { + parentEndpoints = append(parentEndpoints, cEndpointID) + } + } + + linkOptions := options.Generic{ + netlabel.GenericData: options.Generic{ + "ParentEndpoints": parentEndpoints, + "ChildEndpoints": childEndpoints, + }, + } + + sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) + return sboxOptions, nil +} + +func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error { + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} + } + + if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + for s := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(s) + if err != nil { + continue + } + + if sn.Name() == n.Name() { + // Avoid duplicate config + return nil + } + if !containertypes.NetworkMode(sn.Type()).IsPrivate() || + !containertypes.NetworkMode(n.Type()).IsPrivate() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(sn.Name()).IsNone() || + containertypes.NetworkMode(n.Name()).IsNone() { + return runconfig.ErrConflictNoNetwork + } + } + + if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + + return nil +} + +func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.BuildEndpointInfo(n, ep); err != nil { + return err + } + + if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { + container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface + } + + return nil +} + +// UpdateNetwork is used to update the container's network (e.g. when linked containers +// get removed/unlinked). +func (daemon *Daemon) updateNetwork(container *container.Container) error { + var ( + start = time.Now() + ctrl = daemon.netController + sid = container.NetworkSettings.SandboxID + ) + + sb, err := ctrl.SandboxByID(sid) + if err != nil { + return fmt.Errorf("error locating sandbox id %s: %v", sid, err) + } + + // Find if container is connected to the default bridge network + var n libnetwork.Network + for name := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(name) + if err != nil { + continue + } + if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { + n = sn + break + } + } + + if n == nil { + // Not connected to the default bridge network; Nothing to do + return nil + } + + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return fmt.Errorf("Update network failed: %v", err) + } + + if err := sb.Refresh(options...); err != nil { + return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) + } + + networkActions.WithValues("update").UpdateSince(start) + + return nil +} + +func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { + n, err := daemon.FindNetwork(idOrName) + if err != nil { + // We should always be able to find the network for a + // managed container. + if container.Managed { + return nil, nil, err + } + } + + // If we found a network and if it is not dynamically created + // we should never attempt to attach to that network here. + if n != nil { + if container.Managed || !n.Info().Dynamic() { + return n, nil, nil + } + } + + var addresses []string + if epConfig != nil && epConfig.IPAMConfig != nil { + if epConfig.IPAMConfig.IPv4Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv4Address) + } + + if epConfig.IPAMConfig.IPv6Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv6Address) + } + } + + var ( + config *networktypes.NetworkingConfig + retryCount int + ) + + for { + // In all other cases, attempt to attach to the network to + // trigger attachment in the swarm cluster manager. + if daemon.clusterProvider != nil { + var err error + config, err = daemon.clusterProvider.AttachNetwork(idOrName, container.ID, addresses) + if err != nil { + return nil, nil, err + } + } + + n, err = daemon.FindNetwork(idOrName) + if err != nil { + if daemon.clusterProvider != nil { + if err := daemon.clusterProvider.DetachNetwork(idOrName, container.ID); err != nil { + logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) + } + } + + // Retry network attach again if we failed to + // find the network after successfull + // attachment because the only reason that + // would happen is if some other container + // attached to the swarm scope network went down + // and removed the network while we were in + // the process of attaching. + if config != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + if retryCount >= 5 { + return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName) + } + retryCount++ + continue + } + } + + return nil, nil, err + } + + break + } + + // This container has attachment to a swarm scope + // network. Update the container network settings accordingly. + container.NetworkSettings.HasSwarmEndpoint = true + return n, config, nil +} + +// updateContainerNetworkSettings update the network settings +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { + var n libnetwork.Network + + mode := container.HostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() { + return + } + + networkName := mode.NetworkName() + if mode.IsDefault() { + networkName = daemon.netController.Config().Daemon.DefaultNetwork + } + + if mode.IsUserDefined() { + var err error + + n, err = daemon.FindNetwork(networkName) + if err == nil { + networkName = n.Name() + } + } + + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{} + } + + if len(endpointsConfig) > 0 { + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + } + + for name, epConfig := range endpointsConfig { + container.NetworkSettings.Networks[name] = &network.EndpointSettings{ + EndpointSettings: epConfig, + } + } + } + + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + + // Convert any settings added by client in default name to + // engine's default network name key + if mode.IsDefault() { + if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok { + container.NetworkSettings.Networks[networkName] = nConf + delete(container.NetworkSettings.Networks, mode.NetworkName()) + } + } + + if !mode.IsUserDefined() { + return + } + // Make sure to internally store the per network endpoint config by network name + if _, ok := container.NetworkSettings.Networks[networkName]; ok { + return + } + + if n != nil { + if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { + container.NetworkSettings.Networks[networkName] = nwConfig + delete(container.NetworkSettings.Networks, n.ID()) + return + } + } +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + start := time.Now() + controller := daemon.netController + + if daemon.netController == nil { + return nil + } + + // Cleanup any stale sandbox left over due to ungraceful daemon shutdown + if err := controller.SandboxDestroy(container.ID); err != nil { + logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) + } + + updateSettings := false + if len(container.NetworkSettings.Networks) == 0 { + if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { + return nil + } + + daemon.updateContainerNetworkSettings(container, nil) + updateSettings = true + } + + // always connect default network first since only default + // network mode support link and we need do some setting + // on sandbox initialize for link, but the sandbox only be initialized + // on first network connecting. + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { + cleanOperationalData(nConf) + if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil { + return err + } + + } + + // the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks" + networks := make(map[string]*network.EndpointSettings) + for n, epConf := range container.NetworkSettings.Networks { + if n == defaultNetName { + continue + } + + networks[n] = epConf + } + + for netName, epConf := range networks { + cleanOperationalData(epConf) + if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil { + return err + } + } + + if err := container.WriteHostConfig(); err != nil { + return err + } + networkActions.WithValues("allocate").UpdateSince(start) + return nil +} + +func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { + var sb libnetwork.Sandbox + daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { + if s.ContainerID() == container.ID { + sb = s + return true + } + return false + }) + return sb +} + +// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration +func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { + return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) +} + +// User specified ip address is acceptable only for networks with user specified subnets. +func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { + if n == nil || epConfig == nil { + return nil + } + if !hasUserDefinedIPAddress(epConfig) { + return nil + } + _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() + for _, s := range []struct { + ipConfigured bool + subnetConfigs []*libnetwork.IpamConf + }{ + { + ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, + subnetConfigs: nwIPv4Configs, + }, + { + ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, + subnetConfigs: nwIPv6Configs, + }, + } { + if s.ipConfigured { + foundSubnet := false + for _, cfg := range s.subnetConfigs { + if len(cfg.PreferredPool) > 0 { + foundSubnet = true + break + } + } + if !foundSubnet { + return runconfig.ErrUnsupportedNetworkNoSubnetAndIP + } + } + } + + return nil +} + +// cleanOperationalData resets the operational data from the passed endpoint settings +func cleanOperationalData(es *network.EndpointSettings) { + es.EndpointID = "" + es.Gateway = "" + es.IPAddress = "" + es.IPPrefixLen = 0 + es.IPv6Gateway = "" + es.GlobalIPv6Address = "" + es.GlobalIPv6PrefixLen = 0 + es.MacAddress = "" + if es.IPAMOperational { + es.IPAMConfig = nil + } +} + +func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error { + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + if hasUserDefinedIPAddress(endpointConfig) && !enableIPOnPredefinedNetwork() { + return runconfig.ErrUnsupportedNetworkAndIP + } + if endpointConfig != nil && len(endpointConfig.Aliases) > 0 && !container.EnableServiceDiscoveryOnDefaultNetwork() { + return runconfig.ErrUnsupportedNetworkAndAlias + } + } else { + addShortID := true + shortID := stringid.TruncateID(container.ID) + for _, alias := range endpointConfig.Aliases { + if alias == shortID { + addShortID = false + break + } + } + if addShortID { + endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) + } + } + + if err := validateNetworkingConfig(n, endpointConfig); err != nil { + return err + } + + if updateSettings { + if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { + start := time.Now() + if container.HostConfig.NetworkMode.IsContainer() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(idOrName).IsBridge() && + daemon.configStore.DisableBridge { + container.Config.NetworkDisabled = true + return nil + } + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + + n, config, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig) + if err != nil { + return err + } + if n == nil { + return nil + } + + var operIPAM bool + if config != nil { + if epConfig, ok := config.EndpointsConfig[n.Name()]; ok { + if endpointConfig.IPAMConfig == nil || + (endpointConfig.IPAMConfig.IPv4Address == "" && + endpointConfig.IPAMConfig.IPv6Address == "" && + len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) { + operIPAM = true + } + + // copy IPAMConfig and NetworkID from epConfig via AttachNetwork + endpointConfig.IPAMConfig = epConfig.IPAMConfig + endpointConfig.NetworkID = epConfig.NetworkID + } + } + + err = daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings) + if err != nil { + return err + } + + controller := daemon.netController + sb := daemon.getNetworkSandbox(container) + createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb, daemon.configStore.DNS) + if err != nil { + return err + } + + endpointName := strings.TrimPrefix(container.Name, "/") + ep, err := n.CreateEndpoint(endpointName, createOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(false); e != nil { + logrus.Warnf("Could not rollback container connection to network %s", idOrName) + } + } + }() + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + IPAMOperational: operIPAM, + } + if _, ok := container.NetworkSettings.Networks[n.ID()]; ok { + delete(container.NetworkSettings.Networks, n.ID()) + } + + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { + return err + } + + if sb == nil { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err = controller.NewSandbox(container.ID, options...) + if err != nil { + return err + } + + container.UpdateSandboxNetworkSettings(sb) + } + + joinOptions, err := container.BuildJoinOptions(n) + if err != nil { + return err + } + + if err := ep.Join(sb, joinOptions...); err != nil { + return err + } + + if !container.Managed { + // add container name/alias to DNS + if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil { + return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err) + } + } + + if err := container.UpdateJoinInfo(n, ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sb) + + daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) + networkActions.WithValues("connect").UpdateSince(start) + return nil +} + +// ForceEndpointDelete deletes an endpoint from a network forcefully +func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { + n, err := daemon.FindNetwork(networkName) + if err != nil { + return err + } + + ep, err := n.EndpointByName(name) + if err != nil { + return err + } + return ep.Delete(true) +} + +func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { + var ( + ep libnetwork.Endpoint + sbox libnetwork.Sandbox + ) + + s := func(current libnetwork.Endpoint) bool { + epInfo := current.Info() + if epInfo == nil { + return false + } + if sb := epInfo.Sandbox(); sb != nil { + if sb.ContainerID() == container.ID { + ep = current + sbox = sb + return true + } + } + return false + } + n.WalkEndpoints(s) + + if ep == nil && force { + epName := strings.TrimPrefix(container.Name, "/") + ep, err := n.EndpointByName(epName) + if err != nil { + return err + } + return ep.Delete(force) + } + + if ep == nil { + return fmt.Errorf("container %s is not connected to the network", container.ID) + } + + if err := ep.Leave(sbox); err != nil { + return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sbox) + + if err := ep.Delete(false); err != nil { + return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) + } + + delete(container.NetworkSettings.Networks, n.Name()) + + if daemon.clusterProvider != nil && n.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(n.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", n.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(n.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", n.ID(), err) + } + } + } + + return nil +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + initializeNetworkingPaths(container, nc) + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + if container.Config.Hostname == "" { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + start := time.Now() + if daemon.netController == nil { + return + } + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } + + sid := container.NetworkSettings.SandboxID + settings := container.NetworkSettings.Networks + container.NetworkSettings.Ports = nil + + if sid == "" || len(settings) == 0 { + return + } + + var networks []libnetwork.Network + for n, epSettings := range settings { + if nw, err := daemon.FindNetwork(n); err == nil { + networks = append(networks, nw) + } + + if epSettings.EndpointSettings == nil { + continue + } + + cleanOperationalData(epSettings) + } + + sb, err := daemon.netController.SandboxByID(sid) + if err != nil { + logrus.Warnf("error locating sandbox id %s: %v", sid, err) + return + } + + if err := sb.Delete(); err != nil { + logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) + } + + for _, nw := range networks { + if daemon.clusterProvider != nil && nw.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(nw.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", nw.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(nw.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", nw.ID(), err) + } + } + } + + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes) + } + networkActions.WithValues("release").UpdateSince(start) +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} + +// ConnectToNetwork connects a container to a network +func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + if !container.Running { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + + n, err := daemon.FindNetwork(idOrName) + if err == nil && n != nil { + if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { + return err + } + } else { + container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + } else if !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else { + if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { + return err + } + } + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + return nil +} + +// DisconnectFromNetwork disconnects container from network n. +func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { + n, err := daemon.FindNetwork(networkName) + if !container.Running || (err != nil && force) { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + // In case networkName is resolved we will use n.Name() + // this will cover the case where network id is passed. + if n != nil { + networkName = n.Name() + } + if _, ok := container.NetworkSettings.Networks[networkName]; !ok { + return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) + } + delete(container.NetworkSettings.Networks, networkName) + } else if err == nil && !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else if err == nil { + if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + if err := daemon.disconnectFromNetwork(container, n, false); err != nil { + return err + } + } else { + return err + } + + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + + if n != nil { + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) + } + return nil +} + +// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response +func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.EnableService() +} + +// DeactivateContainerServiceBinding remove this container fromload balancer active rotation, and DNS response +func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.DisableService() +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations_solaris.go b/vendor/github.com/moby/moby/daemon/container_operations_solaris.go new file mode 100644 index 0000000..1653948 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations_solaris.go @@ -0,0 +1,46 @@ +// +build solaris + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations_unix.go b/vendor/github.com/moby/moby/daemon/container_operations_unix.go new file mode 100644 index 0000000..68cb072 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations_unix.go @@ -0,0 +1,283 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/cloudflare/cfssl/log" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + env = append(env, link.ToEnv()...) + } + + return env, nil +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.PidMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join PID of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + shmSize := container.DefaultSHMSize + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + defer func() { + if setupErr != nil { + // cleanup + _ = detachMounted(localMountPath) + + if err := os.RemoveAll(localMountPath); err != nil { + log.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + // retrieve possible remapped range start for root UID, GID + rootUID, rootGID := daemon.GetRemappedUIDGID() + // create tmpfs + if err := idtools.MkdirAllAs(localMountPath, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootUID, rootGID) + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to setup secret mount") + } + + for _, s := range c.SecretReferences { + if c.SecretStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + return fmt.Errorf("secret target type is not a file target") + } + + targetPath := filepath.Clean(s.File.Name) + // ensure that the target is a filename only; no paths allowed + if targetPath != filepath.Base(targetPath) { + return fmt.Errorf("error creating secret: secret must not be a path") + } + + fPath := filepath.Join(localMountPath, targetPath) + if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret mount path") + } + + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret := c.SecretStore.Get(s.SecretID) + if secret == nil { + return fmt.Errorf("unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + + uid, err := strconv.Atoi(s.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(s.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for secret") + } + } + + label.Relabel(localMountPath, c.MountLabel, false) + + // remount secrets ro + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to remount secret dir as readonly") + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return true +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + var err error + + container.HostsPath, err = container.GetRootResourcePath("hosts") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) + + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations_windows.go b/vendor/github.com/moby/moby/daemon/container_operations_windows.go new file mode 100644 index 0000000..d05f251 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations_windows.go @@ -0,0 +1,59 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + // TODO Windows + return 0, 0 +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work +// against containers which have volumes. You will still be able to cp +// to somewhere on the container drive, but not to any mounted volumes +// inside the container. Without this fix, docker cp is broken to any +// container which has a volume, regardless of where the file is inside the +// container. +func (daemon *Daemon) mountVolumes(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func isLinkable(child *container.Container) bool { + return false +} + +func enableIPOnPredefinedNetwork() bool { + return true +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/vendor/github.com/moby/moby/daemon/create.go b/vendor/github.com/moby/moby/daemon/create.go new file mode 100644 index 0000000..c71d14e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/create.go @@ -0,0 +1,290 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + volumestore "github.com/docker/docker/volume/store" + "github.com/opencontainers/runc/libcontainer/label" +) + +// CreateManagedContainer creates a container that is managed by a Service +func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, true) +} + +// ContainerCreate creates a regular container +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, false) +} + +func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { + start := time.Now() + if params.Config == nil { + return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + container, err := daemon.create(params, managed) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + containerActions.WithValues("create").UpdateSince(start) + + return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + + if runtime.GOOS == "solaris" && img.OS != "solaris " { + return nil, errors.New("Platform on which parent image was created is not Solaris") + } + imgID = img.ID() + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Config, params.HostConfig, imgID, managed); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.cleanupContainer(container, true, true); err != nil { + logrus.Errorf("failed to cleanup container on create error: %v", err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + container.HostConfig.StorageOpt = params.HostConfig.StorageOpt + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.CheckpointDir(), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + daemon.updateContainerNetworkSettings(container, endpointsConfigs) + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, err + } + if err := daemon.Register(container); err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode, privileged bool) ([]string, error) { + if ipcMode.IsHost() || pidMode.IsHost() || privileged { + return label.DisableSecOpt(), nil + } + + var ipcLabel []string + var pidLabel []string + ipcContainer := ipcMode.Container() + pidContainer := pidMode.Container() + if ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + ipcLabel = label.DupSecOpt(c.ProcessLabel) + if pidContainer == "" { + return ipcLabel, err + } + } + if pidContainer != "" { + c, err := daemon.GetContainer(pidContainer) + if err != nil { + return nil, err + } + + pidLabel = label.DupSecOpt(c.ProcessLabel) + if ipcContainer == "" { + return pidLabel, err + } + } + + if pidLabel != nil && ipcLabel != nil { + for i := 0; i < len(pidLabel); i++ { + if pidLabel[i] != ipcLabel[i] { + return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") + } + } + return pidLabel, nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + + rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.getLayerInit(), container.HostConfig.StorageOpt) + + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the Engine API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + if volumestore.IsNameConflict(err) { + return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) + } + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + return apiV, nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + // Reset the Entrypoint if it is [""] + if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" { + config.Entrypoint = nil + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +// Checks if the client set configurations for more than one network while creating a container +// Also checks if the IPAMConfig is valid +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { + return nil + } + if len(nwConfig.EndpointsConfig) == 1 { + for _, v := range nwConfig.EndpointsConfig { + if v != nil && v.IPAMConfig != nil { + if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)) + } + if v.IPAMConfig.IPv6Address != "" { + n := net.ParseIP(v.IPAMConfig.IPv6Address) + // if the address is an invalid network address (ParseIP == nil) or if it is + // an IPv4 address (To4() != nil), then it is an invalid IPv6 address + if n == nil || n.To4() != nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)) + } + } + } + } + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return apierrors.NewBadRequestError(err) +} diff --git a/vendor/github.com/moby/moby/daemon/create_unix.go b/vendor/github.com/moby/moby/daemon/create_unix.go new file mode 100644 index 0000000..2fe5c98 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/create_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/runc/libcontainer/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if mnt.Volume == nil { + continue + } + + if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/create_windows.go b/vendor/github.com/moby/moby/daemon/create_windows.go new file mode 100644 index 0000000..bbf0dbe --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/create_windows.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + // Make sure the host config has the default daemon isolation if not specified by caller. + if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { + hostConfig.Isolation = daemon.defaultIsolation + } + + for spec := range config.Volumes { + + mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + if err != nil { + return fmt.Errorf("Unrecognised volume spec: %v", err) + } + + // If the mountpoint doesn't have a name, generate one. + if len(mp.Name) == 0 { + mp.Name = stringid.GenerateNonCryptoID() + } + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(mp.Destination) { + continue + } + + volumeDriver := hostConfig.VolumeDriver + + // Create the volume in the volume driver. If it doesn't exist, + // a new one will be created. + v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + // FIXME Windows: This code block is present in the Linux version and + // allows the contents to be copied to the container FS prior to it + // being started. However, the function utilizes the FollowSymLinkInScope + // path which does not cope with Windows volume-style file paths. There + // is a separate effort to resolve this (@swernli), so this processing + // is deferred for now. A case where this would be useful is when + // a dockerfile includes a VOLUME statement, but something is created + // in that directory during the dockerfile processing. What this means + // on Windows for TP5 is that in that scenario, the contents will not + // copied, but that's (somewhat) OK as HCS will bomb out soon after + // at it doesn't support mapped directories which have contents in the + // destination path anyway. + // + // Example for repro later: + // FROM windowsservercore + // RUN mkdir c:\myvol + // RUN copy c:\windows\system32\ntdll.dll c:\myvol + // VOLUME "c:\myvol" + // + // Then + // docker build -t vol . + // docker run -it --rm vol cmd <-- This is where HCS will error out. + // + // // never attempt to copy existing content in a container FS to a shared volume + // if v.DriverName() == volume.DefaultDriverName { + // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { + // return err + // } + // } + + // Add it to container.MountPoints + container.AddMountPointWithVolume(mp.Destination, v, mp.RW) + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon.go b/vendor/github.com/moby/moby/daemon/daemon.go new file mode 100644 index 0000000..f124612 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon.go @@ -0,0 +1,1321 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/plugin" + "github.com/docker/libnetwork/cluster" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/migrate/v1" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libtrust" + "github.com/pkg/errors" +) + +var ( + // DefaultRuntimeBinary is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeBinary = "docker-runc" + + // DefaultInitBinary is the name of the default init binary + DefaultInitBinary = "docker-init" + + errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") +) + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + execCommands *exec.Store + referenceStore reference.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *Config + statsCollector *statsCollector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discoveryReloader + root string + seccompEnabled bool + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + containerdRemote libcontainerd.Remote + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + + seccompProfile []byte + seccompProfilePath string +} + +// HasExperimental returns whether the experimental features of the daemon are enabled or not +func (daemon *Daemon) HasExperimental() bool { + if daemon.configStore != nil && daemon.configStore.Experimental { + return true + } + return false +} + +func (daemon *Daemon) restore() error { + var ( + currentDriver = daemon.GraphDriverName() + containers = make(map[string]*container.Container) + ) + + logrus.Info("Loading containers: start.") + + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + removeContainers := make(map[string]*container.Container) + restartContainers := make(map[*container.Container]chan struct{}) + activeSandboxes := make(map[string]interface{}) + for id, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + + // verify that all volumes valid and have been migrated from the pre-1.7 layout + if err := daemon.verifyVolumesInfo(c); err != nil { + // don't skip the container due to error + logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) + } + + // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. + // We should rewrite it to use the daemon defaults. + // Fixes https://github.com/docker/docker/issues/22536 + if c.HostConfig.LogConfig.Type == "" { + if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { + logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) + continue + } + } + } + + var migrateLegacyLinks bool // Not relevant on Windows + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + daemon.backportMountSpec(c) + if err := c.ToDiskLocking(); err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") + } + + if c.IsRunning() || c.IsPaused() { + c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking + if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { + logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) + return + } + + // we call Mount and then Unmount to get BaseFs of the container + if err := daemon.Mount(c); err != nil { + // The mount is unlikely to fail. However, in case mount fails + // the container should be allowed to restore here. Some functionalities + // (like docker exec -u user) might be missing but container is able to be + // stopped/restarted/removed. + // See #29365 for related information. + // The error is only logged here. + logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) + } else { + if err := daemon.Unmount(c); err != nil { + logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) + } + } + + c.ResetRestartManager(false) + if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { + options, err := daemon.buildSandboxOptions(c) + if err != nil { + logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) + } + mapLock.Lock() + activeSandboxes[c.NetworkSettings.SandboxID] = options + mapLock.Unlock() + } + + } + // fixme: only if not running + // get list of containers we need to restart + if !c.IsRunning() && !c.IsPaused() { + // Do not autostart containers which + // has endpoints in a swarm scope + // network yet since the cluster is + // not initialized yet. We will start + // it after the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } else if c.HostConfig != nil && c.HostConfig.AutoRemove { + mapLock.Lock() + removeContainers[c.ID] = c + mapLock.Unlock() + } + } + + if c.RemovalInProgress { + // We probably crashed in the middle of a removal, reset + // the flag. + // + // We DO NOT remove the container here as we do not + // know if the user had requested for either the + // associated volumes, network links or both to also + // be removed. So we put the container in the "dead" + // state and leave further processing up to them. + logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) + c.ResetRemovalInProgress() + c.SetDead() + c.ToDisk() + } + + // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated + if c.HostConfig != nil && c.HostConfig.Links == nil { + migrateLegacyLinks = true + } + }(c) + } + wg.Wait() + daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) + if err != nil { + return fmt.Errorf("Error initializing network controller: %v", err) + } + + // Perform migration of legacy sqlite links (no-op on Windows) + if migrateLegacyLinks { + if err := daemon.sqliteMigration(containers); err != nil { + return err + } + } + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + daemon.waitForNetworks(c) + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + removeGroup := sync.WaitGroup{} + for id := range removeContainers { + removeGroup.Add(1) + go func(cid string) { + if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("Failed to remove container %s: %s", cid, err) + } + removeGroup.Done() + }(id) + } + removeGroup.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume dirver is not available. + if _, ok := restartContainers[c]; ok { + continue + } else if _, ok := removeContainers[c.ID]; ok { + // container is automatically removed, skip it. + continue + } + + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + logrus.Info("Loading containers: done.") + + return nil +} + +// RestartSwarmContainers restarts any autostart container which has a +// swarm endpoint. +func (daemon *Daemon) RestartSwarmContainers() { + group := sync.WaitGroup{} + for _, c := range daemon.List() { + if !c.IsRunning() && !c.IsPaused() { + // Autostart all the containers which has a + // swarm endpoint now that the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Error(err) + } + }(c) + } + } + + } + group.Wait() +} + +// waitForNetworks is used during daemon initialization when starting up containers +// It ensures that all of a container's networks are available before the daemon tries to start the container. +// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. +func (daemon *Daemon) waitForNetworks(c *container.Container) { + if daemon.discoveryWatcher == nil { + return + } + // Make sure if the container has a network that requires discovery that the discovery service is available before starting + for netName := range c.NetworkSettings.Networks { + // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready + // Most likely this is because the K/V store used for discovery is in a container and needs to be started + if _, err := daemon.netController.NetworkByName(netName); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + continue + } + // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host + // FIXME: why is this slow??? + logrus.Debugf("Container %s waiting for network to be ready", c.Name) + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(60 * time.Second): + } + return + } + } +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// DaemonJoinsCluster informs the daemon has joined the cluster and provides +// the handler to query the cluster component +func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { + daemon.setClusterProvider(clusterProvider) +} + +// DaemonLeavesCluster informs the daemon has left the cluster +func (daemon *Daemon) DaemonLeavesCluster() { + // Daemon is in charge of removing the attachable networks with + // connected containers when the node leaves the swarm + daemon.clearAttachableNetworks() + daemon.setClusterProvider(nil) +} + +// setClusterProvider sets a component for querying the current cluster state. +func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { + daemon.clusterProvider = clusterProvider + // call this in a goroutine to allow netcontroller handle this event async + // and not block if it is in the middle of talking with cluster + go daemon.netController.SetClusterProvider(clusterProvider) +} + +// IsSwarmCompatible verifies if the current daemon +// configuration is compatible with the swarm mode +func (daemon *Daemon) IsSwarmCompatible() error { + if daemon.configStore == nil { + return nil + } + return daemon.configStore.isSwarmCompatible() +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure that we have a correct root key limit for launching containers. + if err := ModifyRootKeyLimit(); err != nil { + logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) + } + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := setupDaemonProcess(config); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := tempDir(config.Root, rootUID, rootGID) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{configStore: config} + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + if err := d.setupSeccompProfile(); err != nil { + return nil, err + } + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + if err := ensureDefaultAppArmorProfile(); err != nil { + logrus.Errorf(err.Error()) + } + + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if runtime.GOOS == "windows" { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { + return nil, err + } + } + + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + + d.RegistryService = registryService + d.PluginStore = plugin.NewStore(config.Root) // todo: remove + // Plugin system initialization should happen before restore. Do not change order. + d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ + Root: filepath.Join(config.Root, "plugins"), + ExecRoot: "/run/docker/plugins", // possibly needs fixing + Store: d.PluginStore, + Executor: containerdRemote, + RegistryService: registryService, + LiveRestoreEnabled: config.LiveRestoreEnabled, + LogPluginEvent: d.LogPluginEvent, // todo: make private + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't create plugin manager") + } + + d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: driverName, + GraphDriverOptions: config.GraphOptions, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + }) + if err != nil { + return nil, err + } + + graphDriver := d.layerStore.DriverName() + imageRoot := filepath.Join(config.Root, "image", graphDriver) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + return nil, err + } + + logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) + d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) + logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) + d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + if err != nil { + return nil, err + } + + // Configure the volumes driver + volStore, err := d.configureVolumes(rootUID, rootGID) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil { + return nil, err + } + + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + if err != nil { + return nil, err + } + + eventsService := events.New() + + referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + migrationStart := time.Now() + if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) + + // Discovery is only enabled when the daemon is launched with an address to advertise. When + // initialized, the daemon is registered and we can store the discovery backend as its read-only + if err := d.initDiscovery(config); err != nil { + return nil, err + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux. + if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { + return nil, fmt.Errorf("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + d.execCommands = exec.NewStore() + d.referenceStore = referenceStore + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + d.seccompEnabled = sysInfo.Seccomp + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + d.containerdRemote = containerdRemote + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + + // FIXME: this method never returns an error + info, _ := d.SystemInfo() + + engineVersion.WithValues( + dockerversion.Version, + dockerversion.GitCommit, + info.Architecture, + info.Driver, + info.KernelVersion, + info.OperatingSystem, + ).Set(1) + engineCpus.Set(float64(info.NCPU)) + engineMemory.Set(float64(info.MemTotal)) + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + stackDumpDir := config.Root + if execRoot := config.GetExecRoot(); execRoot != "" { + stackDumpDir = execRoot + } + d.setupDumpStackTrap(stackDumpDir) + + return d, nil +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + stopTimeout := c.StopTimeout() + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return fmt.Errorf("System does not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil { + logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return fmt.Errorf("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + c.WaitStop(-1 * time.Second) + return err + } + } + // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, stopTimeout); err != nil { + return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) + } + + c.WaitStop(-1 * time.Second) + return nil +} + +// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, +// and is limited by daemon's ShutdownTimeout. +func (daemon *Daemon) ShutdownTimeout() int { + // By default we use daemon's ShutdownTimeout. + shutdownTimeout := daemon.configStore.ShutdownTimeout + + graceTimeout := 5 + if daemon.containers != nil { + for _, c := range daemon.containers.List() { + if shutdownTimeout >= 0 { + stopTimeout := c.StopTimeout() + if stopTimeout < 0 { + shutdownTimeout = -1 + } else { + if stopTimeout+graceTimeout > shutdownTimeout { + shutdownTimeout = stopTimeout + graceTimeout + } + } + } + } + } + return shutdownTimeout +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + // Keep mounts and networking running on daemon shutdown if + // we are to keep containers running and restore them. + + if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { + // check if there are any running containers, if none we should do some cleanup + if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + return nil + } + } + + if daemon.containers != nil { + logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.volumes != nil { + if err := daemon.volumes.Shutdown(); err != nil { + logrus.Errorf("Error shutting down volume store: %v", err) + } + } + + if daemon.layerStore != nil { + if err := daemon.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + } + } + + // Shutdown plugins after containers and layerstore. Don't change the order. + daemon.pluginShutdown() + + // trigger libnetwork Stop only if it's initialized + if daemon.netController != nil { + daemon.netController.Stop() + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + + return nil +} + +// V4Subnets returns the IPv4 subnets of networks that are managed by Docker. +func (daemon *Daemon) V4Subnets() []net.IPNet { + var subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + v4Infos, _ := managedNetwork.Info().IpamInfo() + for _, v4Info := range v4Infos { + if v4Info.IPAMData.Pool != nil { + subnets = append(subnets, *v4Info.IPAMData.Pool) + } + } + } + + return subnets +} + +// V6Subnets returns the IPv6 subnets of networks that are managed by Docker. +func (daemon *Daemon) V6Subnets() []net.IPNet { + var subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + _, v6Infos := managedNetwork.Info().IpamInfo() + for _, v6Info := range v6Infos { + if v6Info.IPAMData.Pool != nil { + subnets = append(subnets, *v6Info.IPAMData.Pool) + } + } + } + + return subnets +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName() string { + return daemon.layerStore.DriverName() +} + +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// tempDir returns the default directory to use for temporary files. +func tempDir(rootDir string, rootUID, rootGID int) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return initlayer.Setup(initPath, rootUID, rootGID) +} + +func setDefaultMtu(config *Config) { + // do nothing if the config does not have the default 0 value. + if config.Mtu != 0 { + return + } + config.Mtu = defaultNetworkMtu +} + +func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.RegisterPluginGetter(daemon.PluginStore) + + if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { + return nil, fmt.Errorf("local volume driver could not be registered") + } + return store.New(daemon.configStore.Root) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// initDiscovery initializes the discovery watcher for this daemon. +func (daemon *Daemon) initDiscovery(config *Config) error { + advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) + if err != nil { + if err == errDiscoveryDisabled { + return nil + } + return err + } + + config.ClusterAdvertise = advertise + discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + + daemon.discoveryWatcher = discoveryWatcher + return nil +} + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// These are the settings that Reload changes: +// - Daemon labels. +// - Daemon debug log level. +// - Daemon insecure registries. +// - Daemon max concurrent downloads +// - Daemon max concurrent uploads +// - Cluster discovery (reconfigure and restart). +// - Daemon live restore +// - Daemon shutdown timeout (in seconds). +func (daemon *Daemon) Reload(config *Config) (err error) { + + daemon.configStore.reloadLock.Lock() + + attributes := daemon.platformReload(config) + + defer func() { + // we're unlocking here, because + // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() + // holds that lock too. + daemon.configStore.reloadLock.Unlock() + if err == nil { + daemon.LogDaemonEventWithAttributes("reload", attributes) + } + }() + + if err := daemon.reloadClusterDiscovery(config); err != nil { + return err + } + + if config.IsValueSet("labels") { + daemon.configStore.Labels = config.Labels + } + if config.IsValueSet("debug") { + daemon.configStore.Debug = config.Debug + } + if config.IsValueSet("insecure-registries") { + daemon.configStore.InsecureRegistries = config.InsecureRegistries + if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil { + return err + } + } + if config.IsValueSet("live-restore") { + daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled + if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil { + return err + } + } + + // If no value is set for max-concurrent-downloads we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { + *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads + } else { + maxConcurrentDownloads := defaultMaxConcurrentDownloads + daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads + } + logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) + if daemon.downloadManager != nil { + daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) + } + + // If no value is set for max-concurrent-upload we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { + *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads + } else { + maxConcurrentUploads := defaultMaxConcurrentUploads + daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads + } + logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) + if daemon.uploadManager != nil { + daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) + } + + if config.IsValueSet("shutdown-timeout") { + daemon.configStore.ShutdownTimeout = config.ShutdownTimeout + logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) + } + + // We emit daemon reload event here with updatable configurations + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) + attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) + + if daemon.configStore.InsecureRegistries != nil { + insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) + if err != nil { + return err + } + attributes["insecure-registries"] = string(insecureRegistries) + } else { + attributes["insecure-registries"] = "[]" + } + + attributes["cluster-store"] = daemon.configStore.ClusterStore + if daemon.configStore.ClusterOpts != nil { + opts, err := json.Marshal(daemon.configStore.ClusterOpts) + if err != nil { + return err + } + attributes["cluster-store-opts"] = string(opts) + } else { + attributes["cluster-store-opts"] = "{}" + } + attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise + + if daemon.configStore.Labels != nil { + labels, err := json.Marshal(daemon.configStore.Labels) + if err != nil { + return err + } + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) + attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) + + return nil +} + +func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { + var err error + newAdvertise := daemon.configStore.ClusterAdvertise + newClusterStore := daemon.configStore.ClusterStore + if config.IsValueSet("cluster-advertise") { + if config.IsValueSet("cluster-store") { + newClusterStore = config.ClusterStore + } + newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) + if err != nil && err != errDiscoveryDisabled { + return err + } + } + + if daemon.clusterProvider != nil { + if err := config.isSwarmCompatible(); err != nil { + return err + } + } + + // check discovery modifications + if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { + return nil + } + + // enable discovery for the first time if it was not previously enabled + if daemon.discoveryWatcher == nil { + discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + daemon.discoveryWatcher = discoveryWatcher + } else { + if err == errDiscoveryDisabled { + // disable discovery if it was previously enabled and it's disabled now + daemon.discoveryWatcher.Stop() + } else { + // reload discovery + if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { + return err + } + } + } + + daemon.configStore.ClusterStore = newClusterStore + daemon.configStore.ClusterOpts = config.ClusterOpts + daemon.configStore.ClusterAdvertise = newAdvertise + + if daemon.netController == nil { + return nil + } + netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) + if err != nil { + logrus.WithError(err).Warnf("failed to get options with network controller") + return nil + } + err = daemon.netController.ReloadConfiguration(netOptions...) + if err != nil { + logrus.Warnf("Failed to reload configuration with network controller: %v", err) + } + + return nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return config.bridgeConfig.Iface == disableNetworkBridge +} + +func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { + options := []nwconfig.Option{} + if dconfig == nil { + return options, nil + } + + options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) + options = append(options, nwconfig.OptionDataDir(dconfig.Root)) + options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) + + dd := runconfig.DefaultDaemonNetworkMode() + dn := runconfig.DefaultDaemonNetworkMode().NetworkName() + options = append(options, nwconfig.OptionDefaultDriver(string(dd))) + options = append(options, nwconfig.OptionDefaultNetwork(dn)) + + if strings.TrimSpace(dconfig.ClusterStore) != "" { + kv := strings.Split(dconfig.ClusterStore, "://") + if len(kv) != 2 { + return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") + } + options = append(options, nwconfig.OptionKVProvider(kv[0])) + options = append(options, nwconfig.OptionKVProviderURL(kv[1])) + } + if len(dconfig.ClusterOpts) > 0 { + options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) + } + + if daemon.discoveryWatcher != nil { + options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) + } + + if dconfig.ClusterAdvertise != "" { + options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) + } + + options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + options = append(options, driverOptions(dconfig)...) + + if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { + options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) + } + + if pg != nil { + options = append(options, nwconfig.OptionPluginGetter(pg)) + } + + return options, nil +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} + +// GetCluster returns the cluster +func (daemon *Daemon) GetCluster() Cluster { + return daemon.cluster +} + +// SetCluster sets the cluster +func (daemon *Daemon) SetCluster(cluster Cluster) { + daemon.cluster = cluster +} + +func (daemon *Daemon) pluginShutdown() { + manager := daemon.pluginManager + // Check for a valid manager object. In error conditions, daemon init can fail + // and shutdown called, before plugin manager is initialized. + if manager != nil { + manager.Shutdown() + } +} + +// PluginManager returns current pluginManager associated with the daemon +func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method + return daemon.pluginManager +} + +// PluginGetter returns current pluginStore associated with the daemon +func (daemon *Daemon) PluginGetter() *plugin.Store { + return daemon.PluginStore +} + +// CreateDaemonRoot creates the root for the daemon +func CreateDaemonRoot(config *Config) error { + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + + if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_experimental.go b/vendor/github.com/moby/moby/daemon/daemon_experimental.go new file mode 100644 index 0000000..fb0251d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_experimental.go @@ -0,0 +1,7 @@ +package daemon + +import "github.com/docker/docker/api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_linux.go b/vendor/github.com/moby/moby/daemon/daemon_linux.go new file mode 100644 index 0000000..9bdf6e2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_linux.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_linux_test.go b/vendor/github.com/moby/moby/daemon/daemon_linux_test.go new file mode 100644 index 0000000..c40b13b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_linux_test.go @@ -0,0 +1,104 @@ +// +build linux + +package daemon + +import ( + "strings" + "testing" +) + +const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio +143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 +145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset +150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu +151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct +152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory +153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices +154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer +155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio +156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event +157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb +158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +89 142 0:87 / /tmp rw,relatime - tmpfs none rw +97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw +100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio +116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio +242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw +120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio +171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio +310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw +` + +func TestCleanupMounts(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) + + if unmounted != 1 { + t.Fatalf("Expected to unmount the shm (and the shm only)") + } +} + +func TestCleanupMountsByID(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) + + if unmounted != 1 { + t.Fatalf("Expected to unmount the auf root (and that only)") + } +} + +func TestNotCleanupMounts(t *testing.T) { + d := &Daemon{ + repository: "", + } + var unmounted bool + unmount := func(target string) error { + unmounted = true + return nil + } + mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` + d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) + if unmounted { + t.Fatalf("Expected not to clean up /dev/shm") + } +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_solaris.go b/vendor/github.com/moby/moby/daemon/daemon_solaris.go new file mode 100644 index 0000000..2b4d8d0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_solaris.go @@ -0,0 +1,523 @@ +// +build solaris,cgo + +package daemon + +import ( + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/solaris/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + lntypes "github.com/docker/libnetwork/types" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +//#include +import "C" + +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + solarisMinCPUShares = 1 + solarisMaxCPUShares = 65535 +) + +func getMemoryResources(config containertypes.Resources) specs.CappedMemory { + memory := specs.CappedMemory{} + + if config.Memory > 0 { + memory.Physical = strconv.FormatInt(config.Memory, 10) + } + + if config.MemorySwap != 0 { + memory.Swap = strconv.FormatInt(config.MemorySwap, 10) + } + + return memory +} + +func getCPUResources(config containertypes.Resources) specs.CappedCPU { + cpu := specs.CappedCPU{} + + if config.CpusetCpus != "" { + cpu.Ncpus = config.CpusetCpus + } + + return cpu +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + return nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + //Since config.SecurityOpt is specifically defined as a "List of string values to + //customize labels for MLs systems, such as SELinux" + //until we figure out how to map to Trusted Extensions + //this is being disabled for now on Solaris + var ( + labelOpts []string + err error + ) + + if len(config.SecurityOpt) > 0 { + return errors.New("Security options are not supported on Solaris") + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + return nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + // solaris can rely upon checkSystem() below, we don't skew kernel versions + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig.CPUShares < 0 { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) + hostConfig.CPUShares = solarisMinCPUShares + } else if hostConfig.CPUShares > solarisMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) + hostConfig.CPUShares = solarisMaxCPUShares + } + + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + + if hostConfig.ShmSize != 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return false +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and + // therefore we will not do that for Docker container either. + if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. + + if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + hostConfig.MemorySwappiness = nil + } + if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + hostConfig.MemoryReservation = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + hostConfig.KernelMemory = 0 + } + if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + hostConfig.CPUShares = 0 + } + if hostConfig.CPUShares < 0 { + warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { + warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") + logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") + hostConfig.CPUShares = 0 + } + + // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. + if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + if hostConfig.CPUQuota > 0 { + warnings = append(warnings, "Quota will be applied on default period, not period specified.") + logrus.Warnf("Quota will be applied on default period, not period specified.") + } + hostConfig.CPUPeriod = 0 + } + if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUQuota < 0 { + warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + hostConfig.CpusetCpus = "" + hostConfig.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) + } + if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + hostConfig.BlkioWeight = 0 + } + if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { + *hostConfig.OomKillDisable = false + // Don't warn; this is the default setting but only applicable to Linux + } + + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + + // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. + + if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { + warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + hostConfig.CapAdd = nil + hostConfig.CapDrop = nil + } + + if hostConfig.GroupAdd != nil { + warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") + logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") + hostConfig.GroupAdd = nil + } + + if hostConfig.IpcMode != "" { + warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + hostConfig.IpcMode = "" + } + + if hostConfig.PidMode != "" { + warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + hostConfig.PidMode = "" + } + + if hostConfig.Privileged { + warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + hostConfig.Privileged = false + } + + if hostConfig.UTSMode != "" { + warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + hostConfig.UTSMode = "" + } + + if hostConfig.CgroupParent != "" { + warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + hostConfig.CgroupParent = "" + } + + if hostConfig.Ulimits != nil { + warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + hostConfig.Ulimits = nil + } + + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + return map[string]string{} +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + stockRuntimeOpts := []string{} + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} + + // checkSystem validates platform-specific requirements + return nil +} + +func checkSystem() error { + // check OS version for compatibility, ensure running in global zone + var err error + var id C.zoneid_t + + if id, err = C.getzoneid(); err != nil { + return fmt.Errorf("Exiting. Error getting zone id: %+v", err) + } + if int(id) != 0 { + return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") + } + + v, err := kernel.GetKernelVersion() + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { + return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) + } + return err +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + // Initialize default network on "null" + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil { + return nil, fmt.Errorf("Error creating default 'null' network: %v", err) + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ipamV4Conf *libnetwork.IpamConf + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(false)) + if err != nil { + return fmt.Errorf("Error creating default 'bridge' network: %v", err) + } + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { + // Solaris has no custom images to register + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + return nil, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + return types.RootFS{} +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_test.go b/vendor/github.com/moby/moby/daemon/daemon_test.go new file mode 100644 index 0000000..00817bd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_test.go @@ -0,0 +1,627 @@ +// +build !solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/discovery" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/registry" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGetContainer(t *testing.T) { + c1 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + }, + } + + c2 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + }, + } + + c3 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + }, + } + + c4 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + }, + } + + c5 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + }, + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + store.Add(c3.ID, c3) + store.Add(c4.ID, c4) + store.Add(c5.ID, c5) + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + daemon := &Daemon{ + containers: store, + idIndex: index, + nameIndex: registrar.NewRegistrar(), + } + + daemon.reserveName(c1.ID, c1.Name) + daemon.reserveName(c2.ID, c2.Name) + daemon.reserveName(c3.ID, c3.Name) + daemon.reserveName(c4.ID, c4.Name) + daemon.reserveName(c5.ID, c5.Name) + + if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") + } + + if _, err := daemon.GetContainer("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.GetContainer("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } +} + +func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { + var err error + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.volumes, err = store.New(tmp) + if err != nil { + return nil, err + } + + volumesDriver, err := local.New(tmp, 0, 0) + if err != nil { + return nil, err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + + return daemon, nil +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} + +func TestContainerInitDNS(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-container-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` + + // Container struct only used to retrieve path to config file + container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} + configPath, err := container.ConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + + hostConfigPath, err := container.HostConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonWithVolumeStore(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerID) + if err != nil { + t.Fatal(err) + } + + if c.HostConfig.DNS == nil { + t.Fatal("Expected container DNS to not be nil") + } + + if c.HostConfig.DNSSearch == nil { + t.Fatal("Expected container DNSSearch to not be nil") + } + + if c.HostConfig.DNSOptions == nil { + t.Fatal("Expected container DNSOptions to not be nil") + } +} + +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &containertypes.Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &containertypes.Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &containertypes.Config{ + ExposedPorts: ports, + } + + if err := merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} + +func TestDaemonReloadLabels(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:bar"}, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:baz"}, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } +} + +func TestDaemonReloadInsecureRegistries(t *testing.T) { + daemon := &Daemon{} + // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // this will be removed when reloading + "docker1.com", + "docker2.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &Config{} + + insecureRegistries := []string{ + "127.0.0.0/8", // this will be kept + "10.10.1.11:5000", // this will be kept + "10.10.1.33:5000", // this will be newly added + "docker1.com", // this will be kept + "docker3.com", // this will be newly added + } + + valuesSets := make(map[string]interface{}) + valuesSets["insecure-registries"] = insecureRegistries + + newConfig := &Config{ + CommonConfig: CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + InsecureRegistries: insecureRegistries, + }, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + // After Reload, daemon.RegistryService will be changed which is useful + // for registry communication in daemon. + registries := daemon.RegistryService.ServiceConfig() + + // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. + // Then collect registries.InsecureRegistryCIDRs in dataMap. + // When collecting, we need to convert CIDRS into string as a key, + // while the times of key appears as value. + dataMap := map[string]int{} + for _, value := range registries.InsecureRegistryCIDRs { + if _, ok := dataMap[value.String()]; !ok { + dataMap[value.String()] = 1 + } else { + dataMap[value.String()]++ + } + } + + for _, value := range registries.IndexConfigs { + if _, ok := dataMap[value.Name]; !ok { + dataMap[value.Name] = 1 + } else { + dataMap[value.Name]++ + } + } + + // Finally compare dataMap with the original insecureRegistries. + // Each value in insecureRegistries should appear in daemon's insecure registries, + // and each can only appear exactly ONCE. + for _, r := range insecureRegistries { + if value, ok := dataMap[r]; !ok { + t.Fatalf("Expected daemon insecure registry %s, got none", r) + } else if value != 1 { + t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) + } + } + + // assert if "10.10.1.22:5000" is removed when reloading + if value, ok := dataMap["10.10.1.22:5000"]; ok { + t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) + } + + // assert if "docker2.com" is removed when reloading + if value, ok := dataMap["docker2.com"]; ok { + t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) + } +} + +func TestDaemonReloadNotAffectOthers(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:bar"}, + Debug: true, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:baz"}, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } + debug := daemon.configStore.Debug + if !debug { + t.Fatalf("Expected debug 'enabled', got 'disabled'") + } +} + +func TestDaemonDiscoveryReload(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1", + ClusterAdvertise: "127.0.0.1:3333", + }, + } + + if err := daemon.initDiscovery(daemon.configStore); err != nil { + t.Fatal(err) + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + + valuesSets := make(map[string]interface{}) + valuesSets["cluster-store"] = "memory://127.0.0.1:2222" + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSets, + }, + } + + expected = discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + ch, errCh = daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["cluster-store"] = "memory://127.0.0.1:2222" + valuesSet["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSet, + }, + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1", + }, + } + valuesSets := make(map[string]interface{}) + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSets, + }, + } + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(10 * time.Second): + t.Fatal("Timeout waiting for discovery") + } + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_unix.go b/vendor/github.com/moby/moby/daemon/daemon_unix.go new file mode 100644 index 0000000..5b3ffeb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_unix.go @@ -0,0 +1,1237 @@ +// +build linux freebsd + +package daemon + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/blkiodev" + pblkiodev "github.com/docker/docker/api/types/blkiodev" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/options" + lntypes "github.com/docker/libnetwork/types" + "github.com/golang/protobuf/ptypes" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/vishvananda/netlink" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.Memory { + memory := specs.Memory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap != 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) *specs.CPU { + cpu := specs.CPU{} + + if config.CPUShares != 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpuset := config.CpusetCpus + cpu.Cpus = &cpuset + } + + if config.CpusetMems != "" { + cpuset := config.CpusetMems + cpu.Mems = &cpuset + } + + if config.NanoCPUs > 0 { + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + period := uint64(100 * time.Millisecond / time.Microsecond) + quota := uint64(config.NanoCPUs) * period / 1e9 + cpu.Period = &period + cpu.Quota = "a + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + quota := uint64(config.CPUQuota) + cpu.Quota = "a + } + + if config.CPURealtimePeriod != 0 { + period := uint64(config.CPURealtimePeriod) + cpu.RealtimePeriod = &period + } + + if config.CPURealtimeRuntime != 0 { + runtime := uint64(config.CPURealtimeRuntime) + cpu.RealtimeRuntime = &runtime + } + + return &cpu +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.WeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.WeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + continue + } + + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.14, use `=` instead.") + } + + if len(con) != 2 { + return fmt.Errorf("invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + default: + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.ThrottleDevice, error) { + var throttleDevices []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, d := range devs { + if err := syscall.Stat(d.Path, &stat); err != nil { + return nil, err + } + rate := d.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + throttleDevices = append(throttleDevices, d) + } + + return throttleDevices, nil +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + // Docker 1.11 and above doesn't actually run on kernels older than 3.4, + // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). + if !kernel.CheckKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + var err error + opts, err := daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode, hostConfig.Privileged) + if err != nil { + return err + } + hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") + logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") + logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") + } + if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) { + return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted") + } + // The highest precision we could get on Linux is 0.001, by setting + // cpu.cfs_period_us=1000ms + // cpu.cfs_quota=1ms + // See the following link for details: + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. + // The error message is 0.01 so that this is consistent with Windows + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { + return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + resources.CPUQuota = 0 + } + if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { + return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") + } + if resources.CPUPercent > 0 { + warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) + logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) + resources.CPUPercent = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") + } + if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { + return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + + // no matter err is nil or not, w could have data in itself. + warnings = append(warnings, w...) + + if err != nil { + return warnings, err + } + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size can not be less than 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) + } + + // ip-forwarding does not affect container with '--net=host' (or '--net=none') + if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warn("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + if hostConfig.Runtime == "" { + hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + } + + if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { + return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) + } + + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + if config.IsValueSet("runtimes") { + daemon.configStore.Runtimes = config.Runtimes + // Always set the default one + daemon.configStore.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + } + + if config.DefaultRuntime != "" { + daemon.configStore.DefaultRuntime = config.DefaultRuntime + } + + // Update attributes + var runtimeList bytes.Buffer + for name, rt := range daemon.configStore.Runtimes { + if runtimeList.Len() > 0 { + runtimeList.WriteRune(' ') + } + runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) + } + + return map[string]string{ + "runtimes": runtimeList.String(), + "default-runtime": daemon.configStore.DefaultRuntime, + } +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + // Check for mutually incompatible config options + if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { + config.bridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(config); err != nil { + return err + } + if config.CgroupParent != "" && UsingSystemd(config) { + if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +// configureKernelSecuritySupport configures and validates security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + if config.EnableSelinuxSupport { + if !selinuxEnabled() { + logrus.Warn("Docker could not enable SELinux on the host system") + } + } else { + selinuxSetDisabled() + } + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + if len(activeSandboxes) > 0 { + logrus.Info("There are old running containers, the network config will not take affect") + return controller, nil + } + + // Initialize default network on "null" + if n, _ := controller.NetworkByName("none"); n == nil { + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) + } + } + + // Initialize default network on "host" + if n, _ := controller.NetworkByName("host"); n == nil { + if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) + } + } + + // Clear stale bridge network + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return nil, fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } else { + removeDefaultBridgeInterface() + } + + return controller, nil +} + +func driverOptions(config *Config) []nwconfig.Option { + bridgeConfig := options.Generic{ + "EnableIPForwarding": config.bridgeConfig.EnableIPForward, + "EnableIPTables": config.bridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy, + "UserlandProxyPath": config.bridgeConfig.UserlandProxyPath} + bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} + + dOptions := []nwconfig.Option{} + dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) + return dOptions +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ( + ipamV4Conf *libnetwork.IpamConf + ipamV6Conf *libnetwork.IpamConf + ) + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + var deferIPv6Alloc bool + if config.bridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) + if err != nil { + return err + } + + // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has + // at least 48 host bits, we need to guarantee the current behavior where the containers' + // IPv6 addresses will be constructed based on the containers' interface MAC address. + // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints + // on this network until after the driver has created the endpoint and returned the + // constructed address. Libnetwork will then reserve this address with the ipam driver. + ones, _ := fCIDRv6.Mask.Size() + deferIPv6Alloc = ones <= 80 + + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.PreferredPool = fCIDRv6.String() + + // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 + // address belongs to the same network, we need to inform libnetwork about it, so + // that it can be reserved with IPAM and it will not be given away to somebody else + for _, nw6 := range nw6List { + if fCIDRv6.Contains(nw6.IP) { + ipamV6Conf.Gateway = nw6.IP.String() + break + } + } + } + + if config.bridgeConfig.DefaultGatewayIPv6 != nil { + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + if ipamV6Conf != nil { + v6Conf = append(v6Conf, ipamV6Conf) + } + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionEnableIPv6(config.bridgeConfig.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) + if err != nil { + return fmt.Errorf("Error creating default \"bridge\" network: %v", err) + } + return nil +} + +// Remove default bridge interface if present (--bridge=none use case) +func removeDefaultBridgeInterface() { + if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { + if err := netlink.LinkDel(lnk); err != nil { + logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) + } + } +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return daemon.setupInitLayer +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := idtools.LookupUID(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := idtools.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := idtools.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupID = group.Gid + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to an unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := idtools.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exist + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + // we also need to verify that any pre-existing directories in the path to + // the graphroot won't block access to remapped root--if any pre-existing directory + // has strict permissions that don't allow "x", container start will fail, so + // better to warn and fail now + dirPath := config.Root + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if !idtools.CanAccess(dirPath, rootUID, rootGID) { + return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) + } + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := runconfigopts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + return container.WriteHostConfig() +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { + s.MemoryStats.Limit = daemon.statsCollector.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read, err = ptypes.Timestamp(stats.Timestamp) + if err != nil { + return nil, err + } + return s, nil +} + +// setDefaultIsolation determines the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +// setupDaemonProcess sets various settings for the daemon's process +func setupDaemonProcess(config *Config) error { + // setup the daemons oom_score_adj + return setupOOMScoreAdj(config.OOMScoreAdjust) +} + +func setupOOMScoreAdj(score int) error { + f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) + if err != nil { + return err + } + + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) + } + return nil + } + f.Close() + return err +} + +func (daemon *Daemon) initCgroupsPath(path string) error { + if path == "/" || path == "." { + return nil + } + + if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 { + return nil + } + + // Recursively create cgroup to ensure that the system and all parent cgroups have values set + // for the period and runtime as this limits what the children can be set to. + daemon.initCgroupsPath(filepath.Dir(path)) + + _, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") + if err != nil { + return err + } + + path = filepath.Join(root, path) + sysinfo := sysinfo.New(true) + if sysinfo.CPURealtimePeriod && daemon.configStore.CPURealtimePeriod != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_period_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimePeriod, 10)), 0700); err != nil { + return err + } + } + if sysinfo.CPURealtimeRuntime && daemon.configStore.CPURealtimeRuntime != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_runtime_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimeRuntime, 10)), 0700); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + if daemon.configStore.SeccompProfile != "" { + daemon.seccompProfilePath = daemon.configStore.SeccompProfile + b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile) + if err != nil { + return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err) + } + daemon.seccompProfile = b + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_unix_test.go b/vendor/github.com/moby/moby/daemon/daemon_unix_test.go new file mode 100644 index 0000000..6250d35 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_unix_test.go @@ -0,0 +1,283 @@ +// +build !windows,!solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" +) + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUShares(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMinCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMaxCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUSharesNoAdjustment(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMinCPUShares-1 { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMaxCPUShares+1 { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseSecurityOpt(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestNetworkOptions(t *testing.T) { + daemon := &Daemon{} + dconfigCorrect := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "consul://localhost:8500", + ClusterAdvertise: "192.168.0.1:8000", + }, + } + + if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil { + t.Fatalf("Expect networkOptions success, got error: %v", err) + } + + dconfigWrong := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "consul://localhost:8500://test://bbb", + }, + } + + if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil { + t.Fatalf("Expected networkOptions error, got nil") + } +} + +func TestMigratePre17Volumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "test-daemon-volumes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + volumeRoot := filepath.Join(rootDir, "volumes") + err = os.MkdirAll(volumeRoot, 0755) + if err != nil { + t.Fatal(err) + } + + containerRoot := filepath.Join(rootDir, "containers") + cid := "1234" + err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) + + vid := "5678" + vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) + err = os.MkdirAll(vfsPath, 0755) + if err != nil { + t.Fatal(err) + } + + config := []byte(` + { + "ID": "` + cid + `", + "Volumes": { + "/foo": "` + vfsPath + `", + "/bar": "/foo", + "/quux": "/quux" + }, + "VolumesRW": { + "/foo": true, + "/bar": true, + "/quux": false + } + } + `) + + volStore, err := store.New(volumeRoot) + if err != nil { + t.Fatal(err) + } + drv, err := local.New(volumeRoot, 0, 0) + if err != nil { + t.Fatal(err) + } + volumedrivers.Register(drv, volume.DefaultDriverName) + + daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} + err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) + if err != nil { + t.Fatal(err) + } + c, err := daemon.load(cid) + if err != nil { + t.Fatal(err) + } + if err := daemon.verifyVolumesInfo(c); err != nil { + t.Fatal(err) + } + + expected := map[string]volume.MountPoint{ + "/foo": {Destination: "/foo", RW: true, Name: vid}, + "/bar": {Source: "/foo", Destination: "/bar", RW: true}, + "/quux": {Source: "/quux", Destination: "/quux", RW: false}, + } + for id, mp := range c.MountPoints { + x, exists := expected[id] + if !exists { + t.Fatal("volume not migrated") + } + if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name { + t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_unsupported.go b/vendor/github.com/moby/moby/daemon/daemon_unsupported.go new file mode 100644 index 0000000..cb1acf6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows,!solaris + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/moby/moby/daemon/daemon_windows.go b/vendor/github.com/moby/moby/daemon/daemon_windows.go new file mode 100644 index 0000000..51ad68b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_windows.go @@ -0,0 +1,604 @@ +package daemon + +import ( + "fmt" + "os" + "strings" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + winlibnetwork "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + blkiodev "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/windows" +) + +const ( + defaultNetworkSpace = "172.16.0.0/12" + platformSupported = true + windowsMinCPUShares = 1 + windowsMaxCPUShares = 10000 + windowsMinCPUPercent = 1 + windowsMaxCPUPercent = 100 + windowsMinCPUCount = 1 +) + +func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { + return nil, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil +} + +func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig == nil { + return nil + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { + warnings := []string{} + + if !isHyperv { + // The processor resource controls are mutually exclusive on + // Windows Server Containers, the order of precedence is + // CPUCount first, then CPUShares, and CPUPercent last. + if resources.CPUCount > 0 { + if resources.CPUShares > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + resources.CPUShares = 0 + } + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } else if resources.CPUShares > 0 { + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } + } + + if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { + return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) + } + if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { + return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) + } + if resources.CPUCount < 0 { + return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") + } + + if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUShares > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") + } + // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. + // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if len(resources.BlkioDeviceReadBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") + } + if len(resources.BlkioDeviceReadIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") + } + if len(resources.BlkioDeviceWriteBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") + } + if len(resources.BlkioDeviceWriteIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") + } + if resources.BlkioWeight > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") + } + if len(resources.BlkioWeightDevice) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") + } + if resources.CgroupParent != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") + } + if resources.CPUPeriod != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") + } + if resources.CpusetCpus != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") + } + if resources.CpusetMems != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") + } + if resources.KernelMemory != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") + } + if resources.MemoryReservation != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") + } + if resources.MemorySwap != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") + } + if resources.OomKillDisable != nil && *resources.OomKillDisable { + return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") + } + if resources.PidsLimit != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") + } + if len(resources.Ulimits) != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") + } + return warnings, nil +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + + hyperv := daemon.runAsHyperVContainer(hostConfig) + if !hyperv && system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + + w, err := verifyContainerResources(&hostConfig.Resources, hyperv) + warnings = append(warnings, w...) + if err != nil { + return warnings, err + } + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + return map[string]string{} +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + // Validate the OS version. Note that docker.exe must be manifested for this + // call to return the correct version. + osv := system.GetOSVersion() + if osv.MajorVersion < 10 { + return fmt.Errorf("This version of Windows does not support the docker daemon") + } + if osv.Build < 14393 { + return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") + } + + vmcompute := windows.NewLazySystemDLL("vmcompute.dll") + if vmcompute.Load() != nil { + return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.") + } + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +// configureMaxThreads sets the Go runtime max threads threshold +func configureMaxThreads(config *Config) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, nil, nil) + if err != nil { + return nil, err + } + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + + // Remove networks not present in HNS + for _, v := range controller.Networks() { + options := v.Info().DriverOptions() + hnsid := options[winlibnetwork.HNSID] + found := false + + for _, v := range hnsresponse { + if v.Id == hnsid { + found = true + break + } + } + + if !found { + // global networks should not be deleted by local HNS + if v.Info().Scope() != datastore.GlobalScope { + err = v.Delete() + if err != nil { + logrus.Errorf("Error occurred when removing network %v", err) + } + } + } + } + + _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) + if err != nil { + return nil, err + } + + defaultNetworkExists := false + + if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + options := network.Info().DriverOptions() + for _, v := range hnsresponse { + if options[winlibnetwork.HNSID] == v.Id { + defaultNetworkExists = true + break + } + } + } + + // discover and add HNS networks to windows + // network that exist are removed and added again + for _, v := range hnsresponse { + var n libnetwork.Network + s := func(current libnetwork.Network) bool { + options := current.Info().DriverOptions() + if options[winlibnetwork.HNSID] == v.Id { + n = current + return true + } + return false + } + + controller.WalkNetworks(s) + if n != nil { + // global networks should not be deleted by local HNS + if n.Info().Scope() == datastore.GlobalScope { + continue + } + v.Name = n.Name() + // This will not cause network delete from HNS as the network + // is not yet populated in the libnetwork windows driver + n.Delete() + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: v.Name, + winlibnetwork.HNSID: v.Id, + } + + v4Conf := []*libnetwork.IpamConf{} + for _, subnet := range v.Subnets { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnet.AddressPrefix + ipamV4Conf.Gateway = subnet.GatewayAddress + v4Conf = append(v4Conf, &ipamV4Conf) + } + + name := v.Name + + // If there is no nat network create one from the first NAT network + // encountered + if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) { + name = runconfig.DefaultDaemonNetworkMode().NetworkName() + defaultNetworkExists = true + } + + v6Conf := []*libnetwork.IpamConf{} + _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + ) + + if err != nil { + logrus.Errorf("Error occurred when creating network %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + return nil + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), + } + + var ipamOption libnetwork.NetworkOption + var subnetPrefix string + + if config.bridgeConfig.FixedCIDR != "" { + subnetPrefix = config.bridgeConfig.FixedCIDR + } else { + // TP5 doesn't support properly detecting subnet + osv := system.GetOSVersion() + if osv.Build < 14360 { + subnetPrefix = defaultNetworkSpace + } + } + + if subnetPrefix != "" { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnetPrefix + v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) + } + + _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + ipamOption, + ) + + if err != nil { + return fmt.Errorf("Error creating default network: %v", err) + } + + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. As of Windows TP4, links are not supported. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMountsByID(in string) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // Create the root directory if it doesn't exists + if err := system.MkdirAllWithACL(config.Root, 0); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// runasHyperVContainer returns true if we are going to run as a Hyper-V container +func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { + if hostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + return daemon.defaultIsolation.IsHyperV() + } + + // Container is requesting an isolation mode. Honour it. + return hostConfig.Isolation.IsHyperV() + +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + // We do not mount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Mount(container) + } + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // We do not unmount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Unmount(container) + } + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + + // Obtain the stats from HCS via libcontainerd + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + + // Start with an empty structure + s := &types.StatsJSON{} + + // Populate the CPU/processor statistics + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: stats.Processor.TotalRuntime100ns, + UsageInKernelmode: stats.Processor.RuntimeKernel100ns, + UsageInUsermode: stats.Processor.RuntimeKernel100ns, + }, + } + + // Populate the memory statistics + s.MemoryStats = types.MemoryStats{ + Commit: stats.Memory.UsageCommitBytes, + CommitPeak: stats.Memory.UsageCommitPeakBytes, + PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes, + } + + // Populate the storage statistics + s.StorageStats = types.StorageStats{ + ReadCountNormalized: stats.Storage.ReadCountNormalized, + ReadSizeBytes: stats.Storage.ReadSizeBytes, + WriteCountNormalized: stats.Storage.WriteCountNormalized, + WriteSizeBytes: stats.Storage.WriteSizeBytes, + } + + // Populate the network statistics + s.Networks = make(map[string]types.NetworkStats) + + for _, nstats := range stats.Network { + s.Networks[nstats.EndpointId] = types.NetworkStats{ + RxBytes: nstats.BytesReceived, + RxPackets: nstats.PacketsReceived, + RxDropped: nstats.DroppedPacketsIncoming, + TxBytes: nstats.BytesSent, + TxPackets: nstats.PacketsSent, + TxDropped: nstats.DroppedPacketsOutgoing, + } + } + + // Set the timestamp + s.Stats.Read = stats.Timestamp + s.Stats.NumProcs = platform.NumProcs() + + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + daemon.defaultIsolation = containertypes.Isolation("process") + // On client SKUs, default to Hyper-V + if system.IsWindowsClient() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + for _, option := range daemon.configStore.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return err + } + key = strings.ToLower(key) + switch key { + + case "isolation": + if !containertypes.Isolation(val).IsValid() { + return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) + } + if containertypes.Isolation(val).IsHyperV() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + if containertypes.Isolation(val).IsProcess() { + if system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + daemon.defaultIsolation = containertypes.Isolation("process") + } + default: + return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) + } + } + + logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +// verifyVolumesInfo is a no-op on windows. +// This is called during daemon initialization to migrate volumes from pre-1.7. +// volumes were not supported on windows pre-1.7 +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/debugtrap.go b/vendor/github.com/moby/moby/daemon/debugtrap.go new file mode 100644 index 0000000..209048b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/debugtrap.go @@ -0,0 +1,62 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" +) + +const dataStructuresLogNameTemplate = "daemon-data-%s.log" + +// dumpDaemon appends the daemon datastructures into file in dir and returns full path +// to that file. +func (d *Daemon) dumpDaemon(dir string) (string, error) { + // Ensure we recover from a panic as we are doing this without any locking + defer func() { + recover() + }() + + path := filepath.Join(dir, fmt.Sprintf(dataStructuresLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the daemon datastructure dump") + } + defer f.Close() + + dump := struct { + containers interface{} + names interface{} + links interface{} + execs interface{} + volumes interface{} + images interface{} + layers interface{} + imageReferences interface{} + downloads interface{} + uploads interface{} + registry interface{} + plugins interface{} + }{ + containers: d.containers, + execs: d.execCommands, + volumes: d.volumes, + images: d.imageStore, + layers: d.layerStore, + imageReferences: d.referenceStore, + downloads: d.downloadManager, + uploads: d.uploadManager, + registry: d.RegistryService, + plugins: d.PluginStore, + names: d.nameIndex, + links: d.linkIndex, + } + + spew.Fdump(f, dump) // Does not return an error + f.Sync() + return path, nil +} diff --git a/vendor/github.com/moby/moby/daemon/debugtrap_unix.go b/vendor/github.com/moby/moby/daemon/debugtrap_unix.go new file mode 100644 index 0000000..d650eb7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/debugtrap_unix.go @@ -0,0 +1,33 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + stackdump "github.com/docker/docker/pkg/signal" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + path, err := stackdump.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go b/vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go new file mode 100644 index 0000000..f5b9170 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package daemon + +func (d *Daemon) setupDumpStackTrap(_ string) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/debugtrap_windows.go b/vendor/github.com/moby/moby/daemon/debugtrap_windows.go new file mode 100644 index 0000000..fb20c9d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/debugtrap_windows.go @@ -0,0 +1,52 @@ +package daemon + +import ( + "fmt" + "os" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signaled. ACL'd to builtin administrators and local system + ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") + if err != nil { + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error()) + return + } + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + h, err := system.CreateEvent(&sa, false, false, ev) + if h == 0 || err != nil { + logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error()) + return + } + go func() { + logrus.Debugf("Stackdump - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + path, err := signal.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/moby/moby/daemon/delete.go b/vendor/github.com/moby/moby/daemon/delete.go new file mode 100644 index 0000000..10028e1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/delete.go @@ -0,0 +1,168 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + volumestore "github.com/docker/docker/volume/store" + "github.com/pkg/errors" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + err := fmt.Errorf("removal of container %s is already in progress", name) + return apierrors.NewBadRequestError(err) + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume) + containerActions.WithValues("delete").UpdateSince(start) + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { + if container.IsRunning() { + if !forceRemove { + err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) + return apierrors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.stopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // If force removal is required, delete container from various + // indexes even if removal failed. + defer func() { + if err == nil || forceRemove { + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) + } + daemon.LogContainerEvent(container, "destroy") + } + }() + + if err = os.RemoveAll(container.Root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + } + } + + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the Engine API +func (daemon *Daemon) VolumeRm(name string, force bool) error { + err := daemon.volumeRm(name) + if err != nil && volumestore.IsInUse(err) { + return apierrors.NewRequestConflictError(err) + } + if err == nil || force { + daemon.volumes.Purge(name) + return nil + } + return err +} + +func (daemon *Daemon) volumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + return errors.Wrap(err, "unable to remove volume") + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/delete_test.go b/vendor/github.com/moby/moby/daemon/delete_test.go new file mode 100644 index 0000000..1fd27e1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/delete_test.go @@ -0,0 +1,43 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +func TestContainerDoubleDelete(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.containers = container.NewMemoryStore() + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "test", + State: container.NewState(), + Config: &containertypes.Config{}, + }, + } + daemon.containers.Add(container.ID, container) + + // Mark the container as having a delete in progress + container.SetRemovalInProgress() + + // Try to remove the container when its state is removalInProgress. + // It should return an error indicating it is under removal progress. + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err == nil { + t.Fatalf("expected err: %v, got nil", fmt.Sprintf("removal of container %s is already in progress", container.ID)) + } +} diff --git a/vendor/github.com/moby/moby/daemon/discovery.go b/vendor/github.com/moby/moby/daemon/discovery.go new file mode 100644 index 0000000..ee4ea87 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/discovery.go @@ -0,0 +1,215 @@ +package daemon + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + + // Register the libkv backends for discovery. + _ "github.com/docker/docker/pkg/discovery/kv" +) + +const ( + // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. + defaultDiscoveryHeartbeat = 20 * time.Second + // defaultDiscoveryTTLFactor is the default TTL factor for discovery + defaultDiscoveryTTLFactor = 3 +) + +var errDiscoveryDisabled = errors.New("discovery is disabled") + +type discoveryReloader interface { + discovery.Watcher + Stop() + Reload(backend, address string, clusterOpts map[string]string) error + ReadyCh() <-chan struct{} +} + +type daemonDiscoveryReloader struct { + backend discovery.Backend + ticker *time.Ticker + term chan bool + readyCh chan struct{} +} + +func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + return d.backend.Watch(stopCh) +} + +func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { + return d.readyCh +} + +func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { + var ( + heartbeat = defaultDiscoveryHeartbeat + ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat + ) + + if hb, ok := clusterOpts["discovery.heartbeat"]; ok { + h, err := strconv.Atoi(hb) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if h <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.heartbeat must be positive") + } + + heartbeat = time.Duration(h) * time.Second + ttl = defaultDiscoveryTTLFactor * heartbeat + } + + if tstr, ok := clusterOpts["discovery.ttl"]; ok { + t, err := strconv.Atoi(tstr) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if t <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl must be positive") + } + + ttl = time.Duration(t) * time.Second + + if _, ok := clusterOpts["discovery.heartbeat"]; !ok { + h := int(t / defaultDiscoveryTTLFactor) + heartbeat = time.Duration(h) * time.Second + } + + if ttl <= heartbeat { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") + } + } + + return heartbeat, ttl, nil +} + +// initDiscovery initializes the nodes discovery subsystem by connecting to the specified backend +// and starts a registration loop to advertise the current node under the specified address. +func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) { + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return nil, err + } + + reloader := &daemonDiscoveryReloader{ + backend: backend, + ticker: time.NewTicker(heartbeat), + term: make(chan bool), + readyCh: make(chan struct{}), + } + // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, + // but we never actually Watch() for nodes appearing and disappearing for the moment. + go reloader.advertiseHeartbeat(advertiseAddress) + return reloader, nil +} + +// advertiseHeartbeat registers the current node against the discovery backend using the specified +// address. The function never returns, as registration against the backend comes with a TTL and +// requires regular heartbeats. +func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { + var ready bool + if err := d.initHeartbeat(address); err == nil { + ready = true + close(d.readyCh) + } + + for { + select { + case <-d.ticker.C: + if err := d.backend.Register(address); err != nil { + logrus.Warnf("Registering as %q in discovery failed: %v", address, err) + } else { + if !ready { + close(d.readyCh) + ready = true + } + } + case <-d.term: + return + } + } +} + +// initHeartbeat is used to do the first heartbeat. It uses a tight loop until +// either the timeout period is reached or the heartbeat is successful and returns. +func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { + // Setup a short ticker until the first heartbeat has succeeded + t := time.NewTicker(500 * time.Millisecond) + defer t.Stop() + // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service + timeout := time.After(60 * time.Second) + + for { + select { + case <-timeout: + return errors.New("timeout waiting for initial discovery") + case <-d.term: + return errors.New("terminated") + case <-t.C: + if err := d.backend.Register(address); err == nil { + return nil + } + } + } +} + +// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. +func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { + d.Stop() + + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return err + } + + d.backend = backend + d.ticker = time.NewTicker(heartbeat) + d.readyCh = make(chan struct{}) + + go d.advertiseHeartbeat(advertiseAddress) + return nil +} + +// Stop terminates the discovery advertising. +func (d *daemonDiscoveryReloader) Stop() { + d.ticker.Stop() + d.term <- true +} + +func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err != nil { + return 0, nil, err + } + + backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) + if err != nil { + return 0, nil, err + } + return heartbeat, backend, nil +} + +// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not. +func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { + if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { + return true + } + + if (config.ClusterOpts == nil && clusterOpts == nil) || + (config.ClusterOpts == nil && len(clusterOpts) == 0) || + (len(config.ClusterOpts) == 0 && clusterOpts == nil) { + return false + } + + return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) +} diff --git a/vendor/github.com/moby/moby/daemon/discovery_test.go b/vendor/github.com/moby/moby/daemon/discovery_test.go new file mode 100644 index 0000000..336973c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/discovery_test.go @@ -0,0 +1,164 @@ +package daemon + +import ( + "testing" + "time" +) + +func TestDiscoveryOpts(t *testing.T) { + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("discovery.ttl < discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("discovery.ttl == discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("negative discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("negative discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("invalid discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.ttl": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("invalid discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + if ttl != 20*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + expected := 10 * defaultDiscoveryTTLFactor * time.Second + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } + + clusterOpts = map[string]string{"discovery.ttl": "30"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if ttl != 30*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) + } + + expected = 30 * time.Second / defaultDiscoveryTTLFactor + if heartbeat != expected { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) + } + + clusterOpts = map[string]string{} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != defaultDiscoveryHeartbeat { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) + } + + expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } +} + +func TestModifiedDiscoverySettings(t *testing.T) { + cases := []struct { + current *Config + modified *Config + expected bool + }{ + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", nil), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("baz", "bar", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "baz", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: true, + }, + } + + for _, c := range cases { + got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) + if c.expected != got { + t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) + } + } +} + +func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { + return &Config{ + CommonConfig: CommonConfig{ + ClusterStore: backendAddr, + ClusterAdvertise: advertiseAddr, + ClusterOpts: opts, + }, + } +} diff --git a/vendor/github.com/moby/moby/daemon/disk_usage.go b/vendor/github.com/moby/moby/daemon/disk_usage.go new file mode 100644 index 0000000..c3b9186 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/disk_usage.go @@ -0,0 +1,100 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/volume" +) + +func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int { + tmpImages := daemon.imageStore.Map() + layerRefs := map[layer.ChainID]int{} + for id, img := range tmpImages { + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + } + } + + return layerRefs +} + +// SystemDiskUsage returns information about the daemon data disk usage +func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { + // Retrieve container list + allContainers, err := daemon.Containers(&types.ContainerListOptions{ + Size: true, + All: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to retrieve container list: %v", err) + } + + // Get all top images with extra attributes + allImages, err := daemon.Images(filters.NewArgs(), false, true) + if err != nil { + return nil, fmt.Errorf("failed to retrieve image list: %v", err) + } + + // Get all local volumes + allVolumes := []*types.Volume{} + getLocalVols := func(v volume.Volume) error { + name := v.Name() + refs := daemon.volumes.Refs(v) + + tv := volumeToAPIType(v) + sz, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("failed to determine size of volume %v", name) + sz = -1 + } + tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} + allVolumes = append(allVolumes, tv) + + return nil + } + + err = daemon.traverseLocalVolumes(getLocalVols) + if err != nil { + return nil, err + } + + // Get total layers size on disk + layerRefs := daemon.getLayerRefs() + allLayers := daemon.layerStore.Map() + var allLayersSize int64 + for _, l := range allLayers { + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v", l.ChainID()) + } + } else { + logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) + } + + } + + return &types.DiskUsage{ + LayersSize: allLayersSize, + Containers: allContainers, + Volumes: allVolumes, + Images: allImages, + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/errors.go b/vendor/github.com/moby/moby/daemon/errors.go new file mode 100644 index 0000000..566a32f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/errors.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/reference" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + if strings.Contains(dne.RefOrID, "@") { + e := fmt.Errorf("No such image: %s", dne.RefOrID) + return errors.NewRequestNotFoundError(e) + } + tag := reference.DefaultTag + ref, err := reference.ParseNamed(dne.RefOrID) + if err != nil { + e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) + return errors.NewRequestNotFoundError(e) + } + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) + return errors.NewRequestNotFoundError(e) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/moby/moby/daemon/events.go b/vendor/github.com/moby/moby/daemon/events.go new file mode 100644 index 0000000..8fe8e1b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events.go @@ -0,0 +1,132 @@ +package daemon + +import ( + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/libnetwork" +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to an image with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to an image with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogPluginEvent generates an event related to a plugin with only the default attributes. +func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { + daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) +} + +// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. +func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { + attributes["name"] = refName + actor := events.Actor{ + ID: pluginID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.PluginEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// LogNetworkEvent generates an event related to a network with only the default attributes. +func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { + daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) +} + +// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. +func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { + attributes["name"] = nw.Name() + attributes["type"] = nw.Type() + actor := events.Actor{ + ID: nw.ID(), + Attributes: attributes, + } + daemon.EventsService.Log(action, events.NetworkEventType, actor) +} + +// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. +func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { + if daemon.EventsService != nil { + if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + attributes["name"] = info.Name + } + actor := events.Actor{ + ID: daemon.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.DaemonEventType, actor) + } +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { + ef := daemonevents.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, until, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} diff --git a/vendor/github.com/moby/moby/daemon/events/events.go b/vendor/github.com/moby/moby/daemon/events/events.go new file mode 100644 index 0000000..0bf105f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/events.go @@ -0,0 +1,158 @@ +package events + +import ( + "sync" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/pkg/pubsub" +) + +const ( + eventsLimit = 64 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + eventSubscribers.Inc() + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { + eventSubscribers.Inc() + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, until, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + eventSubscribers.Dec() + e.pub.Evict(l) +} + +// Log broadcasts event to listeners. Each listener has 100 millisecond for +// receiving event or it will be skipped. +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + eventsCounter.Inc() + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted between two specific dates. +// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since.IsZero() && until.IsZero() { + return buffered + } + + var sinceNanoUnix int64 + if !since.IsZero() { + sinceNanoUnix = since.UnixNano() + } + + var untilNanoUnix int64 + if !until.IsZero() { + untilNanoUnix = until.UnixNano() + } + + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + + if ev.TimeNano < sinceNanoUnix { + break + } + + if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { + continue + } + + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/vendor/github.com/moby/moby/daemon/events/events_test.go b/vendor/github.com/moby/moby/daemon/events/events_test.go new file mode 100644 index 0000000..bbd160f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/events_test.go @@ -0,0 +1,275 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" + eventstestutils "github.com/docker/docker/daemon/events/testutils" +) + +func TestEventsLog(t *testing.T) { + e := New() + _, l1, _ := e.Subscribe() + _, l2, _ := e.Subscribe() + defer e.Evict(l1) + defer e.Evict(l2) + count := e.SubscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + actor := events.Actor{ + ID: "cont", + Attributes: map[string]string{"image": "image"}, + } + e.Log("test", events.ContainerEventType, actor) + select { + case msg := <-l1: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsLogTimeout(t *testing.T) { + e := New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + c := make(chan struct{}) + go func() { + actor := events.Actor{ + ID: "image", + } + e.Log("test", events.ImageEventType, actor) + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + time.Sleep(50 * time.Millisecond) + current, l, _ := e.Subscribe() + for i := 0; i < 10; i++ { + num := i + eventsLimit + 16 + action := fmt.Sprintf("action_%d", num) + id := fmt.Sprintf("cont_%d", num) + from := fmt.Sprintf("image_%d", num) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + var msgs []events.Message + for len(msgs) < 10 { + m := <-l + jm, ok := (m).(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", m) + } + msgs = append(msgs, jm) + } + if len(current) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) + } + first := current[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Status) + } + last := current[len(current)-1] + if last.Status != "action_79" { + t.Fatalf("Last action is %s, must be action_79", last.Status) + } + + firstC := msgs[0] + if firstC.Status != "action_80" { + t.Fatalf("First action is %s, must be action_80", firstC.Status) + } + lastC := msgs[len(msgs)-1] + if lastC.Status != "action_89" { + t.Fatalf("Last action is %s, must be action_89", lastC.Status) + } +} + +// https://github.com/docker/docker/issues/20999 +// Fixtures: +// +//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) +//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +func TestLoadBufferedEvents(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } +} + +func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + u, uNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Unix(u, uNano) + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } + + if out[0].Type != "network" { + t.Fatalf("expected network event, got %s", out[0].Type) + } +} + +// #13753 +func TestIngoreBufferedWhenNoTimes(t *testing.T) { + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Time{} + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 0 { + t.Fatalf("expected 0 buffered events, got %q", out) + } +} diff --git a/vendor/github.com/moby/moby/daemon/events/filter.go b/vendor/github.com/moby/moby/daemon/events/filter.go new file mode 100644 index 0000000..5c9c527 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/filter.go @@ -0,0 +1,110 @@ +package events + +import ( + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/reference" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.matchEvent(ev) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchDaemon(ev) && + ef.matchContainer(ev) && + ef.matchPlugin(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchEvent(ev events.Message) bool { + // #25798 if an event filter contains either health_status, exec_create or exec_start without a colon + // Let's to a FuzzyMatch instead of an ExactMatch. + if ef.filterContains("event", map[string]struct{}{"health_status": {}, "exec_create": {}, "exec_start": {}}) { + return ef.filter.FuzzyMatch("event", ev.Action) + } + return ef.filter.ExactMatch("event", ev.Action) +} + +func (ef *Filter) filterContains(field string, values map[string]struct{}) bool { + for _, v := range ef.filter.Get(field) { + if _, ok := values[v]; ok { + return true + } + } + return false +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchDaemon(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.DaemonEventType) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchPlugin(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.PluginEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNamed(image) + if err != nil { + return image + } + return ref.Name() +} diff --git a/vendor/github.com/moby/moby/daemon/events/metrics.go b/vendor/github.com/moby/moby/daemon/events/metrics.go new file mode 100644 index 0000000..c9a89ec --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/metrics.go @@ -0,0 +1,15 @@ +package events + +import "github.com/docker/go-metrics" + +var ( + eventsCounter metrics.Counter + eventSubscribers metrics.Gauge +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + eventsCounter = ns.NewCounter("events", "The number of events logged") + eventSubscribers = ns.NewGauge("events_subscribers", "The number of current subscribers to events", metrics.Total) + metrics.Register(ns) +} diff --git a/vendor/github.com/moby/moby/daemon/events/testutils/testutils.go b/vendor/github.com/moby/moby/daemon/events/testutils/testutils.go new file mode 100644 index 0000000..3544446 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/testutils/testutils.go @@ -0,0 +1,76 @@ +package testutils + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" +) + +var ( + reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` + reEventType = `(?P\w+)` + reAction = `(?P\w+)` + reID = `(?P[^\s]+)` + reAttributes = `(\s\((?P[^\)]+)\))?` + reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) + + // eventCliRegexp is a regular expression that matches all possible event outputs in the cli + eventCliRegexp = regexp.MustCompile(reString) +) + +// ScanMap turns an event string like the default ones formatted in the cli output +// and turns it into map. +func ScanMap(text string) map[string]string { + matches := eventCliRegexp.FindAllStringSubmatch(text, -1) + md := map[string]string{} + if len(matches) == 0 { + return md + } + + names := eventCliRegexp.SubexpNames() + for i, n := range matches[0] { + md[names[i]] = n + } + return md +} + +// Scan turns an event string like the default ones formatted in the cli output +// and turns it into an event message. +func Scan(text string) (*events.Message, error) { + md := ScanMap(text) + if len(md) == 0 { + return nil, fmt.Errorf("text is not an event: %s", text) + } + + f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) + if err != nil { + return nil, err + } + + t, tn, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + return nil, err + } + + attrs := make(map[string]string) + for _, a := range strings.SplitN(md["attributes"], ", ", -1) { + kv := strings.SplitN(a, "=", 2) + attrs[kv[0]] = kv[1] + } + + tu := time.Unix(t, tn) + return &events.Message{ + Time: t, + TimeNano: tu.UnixNano(), + Type: md["eventType"], + Action: md["action"], + Actor: events.Actor{ + ID: md["id"], + Attributes: attrs, + }, + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/events_test.go b/vendor/github.com/moby/moby/daemon/events_test.go new file mode 100644 index 0000000..2dbcc27 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events_test.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func TestLogContainerEventCopyLabels(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + daemon.LogContainerEvent(container, "create") + + if _, mutated := container.Config.Labels["image"]; mutated { + t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) + } + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "os": "alpine", + }) +} + +func TestLogContainerEventWithAttributes(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + attributes := map[string]string{ + "node": "2", + "foo": "bar", + } + daemon.LogContainerEventWithAttributes(container, "create", attributes) + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "foo": "bar", + }) +} + +func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { + select { + case ev := <-l: + event, ok := ev.(eventtypes.Message) + if !ok { + t.Fatalf("Unexpected event message: %q", ev) + } + for key, expected := range expectedAttributesToTest { + actual, ok := event.Actor.Attributes[key] + if !ok || actual != expected { + t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) + } + } + case <-time.After(10 * time.Second): + t.Fatalf("LogEvent test timed out") + } +} diff --git a/vendor/github.com/moby/moby/daemon/exec.go b/vendor/github.com/moby/moby/daemon/exec.go new file mode 100644 index 0000000..8197426 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec.go @@ -0,0 +1,280 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" +) + +// Seconds to wait after sending TERM before trying KILL +const termProcessTimeout = 10 + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via Engine API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { + container, err := d.getActiveContainer(name) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) + return "", err + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = container.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + + linkedEnv, err := d.setupLinkedContainers(container) + if err != nil { + return "", err + } + execConfig.Env = utils.ReplaceOrAppendEnvValues(container.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) + if len(execConfig.User) == 0 { + execConfig.User = container.Config.User + } + + d.registerExecCommand(container, execConfig) + + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +// If ctx is cancelled, the process is terminated. +func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + defer func() { + if err != nil { + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + } + }() + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.StreamConfig.NewInputPipes() + } else { + ec.StreamConfig.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Env: ec.Env, + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return err + } + + attachErr := container.AttachStreams(ctx, ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) + + systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio) + if err != nil { + return err + } + ec.Lock() + ec.Pid = systemPid + ec.Unlock() + + select { + case <-ctx.Done(): + logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) + select { + case <-time.After(termProcessTimeout * time.Second): + logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) + case <-attachErr: + // TERM signal worked + } + return fmt.Errorf("context cancelled") + case err := <-attachErr: + if err != nil { + if _, ok := err.(container.DetachError); !ok { + return fmt.Errorf("exec attach failed with error: %v", err) + } + d.LogContainerEvent(c, "exec_detach") + } + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/vendor/github.com/moby/moby/daemon/exec/exec.go b/vendor/github.com/moby/moby/daemon/exec/exec.go new file mode 100644 index 0000000..933136f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec/exec.go @@ -0,0 +1,118 @@ +package exec + +import ( + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/stringid" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + } +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/moby/moby/daemon/exec_linux.go b/vendor/github.com/moby/moby/daemon/exec_linux.go new file mode 100644 index 0000000..bb11c11 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec_linux.go @@ -0,0 +1,50 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &specs.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/exec_solaris.go b/vendor/github.com/moby/moby/daemon/exec_solaris.go new file mode 100644 index 0000000..7003355 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/exec_windows.go b/vendor/github.com/moby/moby/daemon/exec_windows.go new file mode 100644 index 0000000..1d6974c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec_windows.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + // Process arguments need to be escaped before sending to OCI. + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/export.go b/vendor/github.com/moby/moby/daemon/export.go new file mode 100644 index 0000000..5ef6dbb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/export.go @@ -0,0 +1,60 @@ +package daemon + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("the daemon on this platform does not support export of a container") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/moby/moby/daemon/getsize_unix.go b/vendor/github.com/moby/moby/daemon/getsize_unix.go new file mode 100644 index 0000000..707323a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/getsize_unix.go @@ -0,0 +1,41 @@ +// +build linux freebsd solaris + +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + if err := daemon.Mount(container); err != nil { + logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer daemon.Unmount(container) + + sizeRw, err = container.RWLayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(), container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := container.RWLayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 0000000..ec55ea4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,669 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + mountpk "github.com/docker/docker/pkg/mount" + + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Helper function to debug EBUSY errors on remove. +func debugEBusy(mountPath string) (out []string, err error) { + // lsof is not part of GNU coreutils. This is a best effort + // attempt to detect offending processes. + c := exec.Command("lsof") + + r, err := c.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("Assigning pipes failed with %v", err) + } + + if err := c.Start(); err != nil { + return nil, fmt.Errorf("Starting %s failed with %v", c.Path, err) + } + + defer func() { + waiterr := c.Wait() + if waiterr != nil && err == nil { + err = fmt.Errorf("Waiting for %s failed with %v", c.Path, waiterr) + } + }() + + sc := bufio.NewScanner(r) + for sc.Scan() { + entry := sc.Text() + if strings.Contains(entry, mountPath) { + out = append(out, entry, "\n") + } + } + + return out, nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + return err + } + if !mounted { + break + } + + if err := a.unmount(mountpoint); err != nil { + if err != syscall.EBUSY { + return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err) + } + if retries >= 5 { + out, debugErr := debugEBusy(mountpoint) + if debugErr == nil { + logrus.Warnf("debugEBusy returned %v", out) + } + return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + continue + } + break + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + if err == syscall.EBUSY { + logrus.Warn("os.Rename err due to EBUSY") + out, debugErr := debugEBusy(mountpoint) + if debugErr == nil { + logrus.Warnf("debugEBusy returned %v", out) + } + } + return err + } + defer os.RemoveAll(tmpMntPath) + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpDiffpath) + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, parent) + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff io.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, parent) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, parent) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 0000000..dc3c6a3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,802 @@ +// +build linux + +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/stringid" +) + +var ( + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t testing.TB) graphdriver.Driver { + d, err := Init(dir, nil, nil, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t testing.TB) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.getDiffPath("1")) + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker", nil); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.CreateReadWrite("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.CreateReadWrite("3", "2", nil); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "2") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "1") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id none should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[2] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2", nil); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.CreateReadWrite(current, parent, nil); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" + } +} + +func BenchmarkConcurrentAccess(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + d := newDriver(b) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + numConcurent := 256 + // create a bunch of ids + var ids []string + for i := 0; i < numConcurent; i++ { + ids = append(ids, stringid.GenerateNonCryptoID()) + } + + if err := d.Create(ids[0], "", nil); err != nil { + b.Fatal(err) + } + + if err := d.Create(ids[1], ids[0], nil); err != nil { + b.Fatal(err) + } + + parent := ids[1] + ids = append(ids[2:]) + + chErr := make(chan error, numConcurent) + var outerGroup sync.WaitGroup + outerGroup.Add(len(ids)) + b.StartTimer() + + // here's the actual bench + for _, id := range ids { + go func(id string) { + defer outerGroup.Done() + if err := d.Create(id, parent, nil); err != nil { + b.Logf("Create %s failed", id) + chErr <- err + return + } + var innerGroup sync.WaitGroup + for i := 0; i < b.N; i++ { + innerGroup.Add(1) + go func() { + d.Get(id, "") + d.Put(id) + innerGroup.Done() + }() + } + innerGroup.Wait() + d.Remove(id) + }(id) + } + + outerGroup.Wait() + b.StopTimer() + close(chErr) + for err := range chErr { + if err != nil { + b.Log(err) + b.Fail() + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 0000000..d2325fc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go new file mode 100644 index 0000000..da1e892 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 0000000..8062bae --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 0000000..d030b06 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000..44420f1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,530 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/go-units" + "github.com/opencontainers/runc/libcontainer/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +var ( + quotaEnabled = false + userDiskQuota = false +) + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, err := parseOptions(options) + if err != nil { + return nil, err + } + + if userDiskQuota { + if err := subvolEnableQuota(home); err != nil { + return nil, err + } + quotaEnabled = true + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, error) { + var options btrfsOptions + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if quotaEnabled { + if err := subvolDisableQuota(d.home); err != nil { + return err + } + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func subvolEnableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolDisableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolRescanQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if !quotaEnabled { + if err := subvolEnableQuota(d.home); err != nil { + return err + } + quotaEnabled = true + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + if err := subvolRescanQuota(d.home); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000..0038dbc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,63 @@ +// +build linux + +package btrfs + +import ( + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsSubvolDelete(t *testing.T) { + d := graphtest.GetDriver(t, "btrfs") + if err := d.CreateReadWrite("test", "", nil); err != nil { + t.Fatal(err) + } + defer graphtest.PutDriver(t) + + dir, err := d.Get("test", "") + if err != nil { + t.Fatal(err) + } + defer d.Put("test") + + if err := subvolCreate(dir, "subvoltest"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { + t.Fatal(err) + } + + if err := d.Remove("test"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { + t.Fatalf("expected not exist error on nested subvol, got: %v", err) + } +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000..f070888 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go new file mode 100644 index 0000000..73d90cd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 0000000..f802fbc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 0000000..15a6e75 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux,!btrfs_noversion + +package btrfs + +import ( + "testing" +) + +func TestLibVersion(t *testing.T) { + if btrfsLibVersion() <= 0 { + t.Errorf("expected output from btrfs lib version > 0") + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/counter.go b/vendor/github.com/moby/moby/daemon/graphdriver/counter.go new file mode 100644 index 0000000..5ea604f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/counter.go @@ -0,0 +1,67 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increaes the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count++ + c.mu.Unlock() + return m.count +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count-- + c.mu.Unlock() + return m.count +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md new file mode 100644 index 0000000..b23bbb1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,96 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. The preferred model is +to have a thin pool reserved outside of Docker and passed to the +daemon via the `--storage-opt dm.thinpooldev` option. + +As a fallback if no thin pool is provided, loopback files will be +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Docker daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +In loopback, a thin pool is created at `/var/lib/docker/devicemapper` +(devicemapper graph location) based on two block devices, one for +data and one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are +`/var/lib/docker/devicemapper/devicemapper/data` and +`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata +required to map from docker entities to the corresponding devicemapper +volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` +file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the `/var/lib/docker/devicemapper` directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Base Device Size: 107.4 GB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +#### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Base Device Size` tells the maximum size of a container and image + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +### About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker daemon --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#/storage-driver-options). +If you add an options, update both the `man` page and the documentation. diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 0000000..3103e19 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2727 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/loopback" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + units "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + if (devices.deviceIDMap[deviceID/8] & mask) != 0 { + return false + } + return true +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + args = append(args, devices.mkfsArgs...) + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if dockerversion.IAmStatic == "true" { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } else { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if err == devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + syscall.Unmount(path, syscall.MNT_DETACH) + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + return nil +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + default: + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 0000000..9ab3e4f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 0000000..5c2abce --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,110 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + defaultDataLoopbackSize = 300 * 1024 * 1024 + defaultMetaDataLoopbackSize = 200 * 1024 * 1024 + defaultBaseFsSize = 300 * 1024 * 1024 + defaultUdevSyncOverride = true + if err := graphtest.InitLoopbacks(); err != nil { + panic(err) + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +func TestDevmapperReduceLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) +} + +func TestDevmapperIncreaseLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) +} + +func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + // make sure data or metadata loopback size are the default size + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { + t.Fatalf("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } + //Reload + d, err := Init(driver.home, []string{ + fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), + fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), + }, nil, nil) + if err != nil { + t.Fatalf("error creating devicemapper driver: %v", err) + } + driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { + t.Fatalf("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } +} + +// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function +func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + + // Call cleanupDeletedDevices() and after the call take and release + // DeviceSet Lock. If lock has not been released, this will hang. + driver.DeviceSet.cleanupDeletedDevices() + + doneChan := make(chan bool) + + go func() { + driver.DeviceSet.Lock() + defer driver.DeviceSet.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Second * 5): + // Timer expired. That means lock was not released upon + // function return and we are deadlocked. Release lock + // here so that cleanup could succeed and fail the test. + driver.DeviceSet.Unlock() + t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()") + case <-doneChan: + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 0000000..7cf422c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,231 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 0000000..cca1fe1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver.go new file mode 100644 index 0000000..f0bce56 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver.go @@ -0,0 +1,270 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites retuned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 0000000..2891a84 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go new file mode 100644 index 0000000..5c8d0e2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go @@ -0,0 +1,135 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "overlay2", + "overlay", + "devicemapper", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go new file mode 100644 index 0000000..7daf01c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,97 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 0000000..4a87560 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go new file mode 100644 index 0000000..ffd30c2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go b/vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go new file mode 100644 index 0000000..20826cd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go @@ -0,0 +1,169 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go new file mode 100644 index 0000000..def822b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go @@ -0,0 +1,259 @@ +// +build linux freebsd + +package graphtest + +import ( + "bytes" + "io" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// DriverBenchExists benchmarks calls to exist +func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !driver.Exists(base) { + b.Fatal("Newly created image doesn't exist") + } + } +} + +// DriverBenchGetEmpty benchmarks calls to get on an empty layer +func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := driver.Get(base, "") + b.StopTimer() + if err != nil { + b.Fatalf("Error getting mount: %s", err) + } + if err := driver.Put(base); err != nil { + b.Fatalf("Error putting mount: %s", err) + } + b.StartTimer() + } +} + +// DriverBenchDiffBase benchmarks calls to diff on a root layer +func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 3); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(base, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffN benchmarks calls to diff on two layers with +// a provided number of files on the lower and upper layers. +func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, bottom, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, top, 6); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffApplyN benchmarks calls to diff and apply together +func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + b.Fatal(err) + } + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + b.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + b.Fatal(err) + } + + b.StartTimer() + + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, "", arch) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + arch.Close() + + if applyDiffSize != diffSize { + // TODO: enforce this + //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) + } + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + b.Fatal(err) + } + } +} + +// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. +func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 50); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(topLayer, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. +func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + root, err := driver.Get(topLayer, "") + if err != nil { + b.Fatal(err) + } + defer driver.Put(topLayer) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + // Read content + c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + if bytes.Compare(c, content) != 0 { + b.Fatalf("Wrong content in file %v, expected %v", c, content) + } + b.StartTimer() + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go new file mode 100644 index 0000000..6e952de --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go @@ -0,0 +1,358 @@ +// +build linux freebsd solaris + +package graphtest + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "syscall" + "testing" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +var ( + drv *Driver +) + +// Driver conforms to graphdriver.Driver interface and +// contains information such as root and reference count of the number of clients using it. +// This helps in testing drivers added into the framework. +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t testing.TB, name string, options []string) *Driver { + root, err := ioutil.TempDir("", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t testing.TB, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. +func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name, options) + } else { + drv.refCount++ + } + return drv +} + +// PutDriver removes the driver if it is no longer used and updates the reference count. +func PutDriver(t testing.TB) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + if err := driver.Create("empty", "", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + }() + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") +} + +// DriverTestCreateBase create a base driver and verify. +func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + verifyBase(t, driver, "Base") +} + +// DriverTestCreateSnap Create a driver and snap and verify. +func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + + if err := driver.Create("Snap", "Base", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + }() + + verifyBase(t, driver, "Snap") +} + +// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers +func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + t.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + t.Fatal(err) + } + + err = checkManyLayers(driver, topLayer, layerCount) + if err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { + t.Fatal(err) + } +} + +// DriverTestDiffApply tests diffing and applying produces the same layer +func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + deleteFile := "file-remove.txt" + deleteFileContent := []byte("This file should get removed in upper!") + deleteDir := "var/lib" + + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + if err := addDirectory(driver, base, deleteDir); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { + t.Fatal(err) + } + + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + t.Fatal(err) + } + + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + t.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + arch, err := driver.Diff(upper, base) + if err != nil { + t.Fatal(err) + } + + buf := bytes.NewBuffer(nil) + if _, err := buf.ReadFrom(arch); err != nil { + t.Fatal(err) + } + if err := arch.Close(); err != nil { + t.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) + if err != nil { + t.Fatal(err) + } + + if applyDiffSize != diffSize { + t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) + } + + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteFile); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteDir); err != nil { + t.Fatal(err) + } +} + +// DriverTestChanges tests computed changes on a layer matches changes made +func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, 20, 3); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + expectedChanges, err := changeManyFiles(driver, upper, 20, 6) + if err != nil { + t.Fatal(err) + } + + changes, err := driver.Changes(upper, base) + if err != nil { + t.Fatal(err) + } + + if err = checkChanges(expectedChanges, changes); err != nil { + t.Fatal(err) + } +} + +func writeRandomFile(path string, size uint64) error { + buf := make([]int64, size/8) + + r := rand.NewSource(0) + for i := range buf { + buf[i] = r.Int63() + } + + // Cast to []byte + header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) + header.Len *= 8 + header.Cap *= 8 + data := *(*[]byte)(unsafe.Pointer(&header)) + + return ioutil.WriteFile(path, data, 0700) +} + +// DriverTestSetQuota Create a driver and test setting quota. +func DriverTestSetQuota(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + createOpts := &graphdriver.CreateOpts{} + createOpts.StorageOpt = make(map[string]string, 1) + createOpts.StorageOpt["size"] = "50M" + if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + t.Fatal(err) + } + + mountPath, err := driver.Get("zfsTest", "") + if err != nil { + t.Fatal(err) + } + + quota := uint64(50 * units.MiB) + err = writeRandomFile(path.Join(mountPath, "file"), quota*2) + if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { + t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) + } + +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go new file mode 100644 index 0000000..a50c521 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go @@ -0,0 +1 @@ +package graphtest diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go new file mode 100644 index 0000000..35bf6d1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go @@ -0,0 +1,342 @@ +package graphtest + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "sort" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +func randomContent(size int, seed int64) []byte { + s := rand.NewSource(seed) + content := make([]byte, size) + + for i := 0; i < len(content); i += 7 { + val := s.Int63() + for j := 0; i+j < len(content) && j < 7; j++ { + content[i+j] = byte(val) + val >>= 8 + } + } + + return content +} + +func addFiles(drv graphdriver.Driver, layer string, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + return err + } + if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + return err + } + + return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) +} + +func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + if err != nil { + return err + } + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + + return nil +} + +func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return ioutil.WriteFile(path.Join(root, filename), content, 0755) +} + +func addDirectory(drv graphdriver.Driver, layer, dir string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return os.MkdirAll(path.Join(root, dir), 0755) +} + +func removeAll(drv graphdriver.Driver, layer string, names ...string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for _, filename := range names { + if err := os.RemoveAll(path.Join(root, filename)); err != nil { + return err + } + } + return nil +} + +func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if _, err := os.Stat(path.Join(root, filename)); err == nil { + return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + } else if !os.IsNotExist(err) { + return err + } + + return nil +} + +func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + return err + } + } + } + + return nil +} + +func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { + root, err := drv.Get(layer, "") + if err != nil { + return nil, err + } + defer drv.Put(layer) + + changes := []archive.Change{} + for i := 0; i < count; i += 100 { + archiveRoot := fmt.Sprintf("/directory-%d", i) + if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + return nil, err + } + for j := 0; i+j < count && j < 100; j++ { + if j == 0 { + changes = append(changes, archive.Change{ + Path: archiveRoot, + Kind: archive.ChangeModify, + }) + } + var change archive.Change + switch j % 3 { + // Update file + case 0: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeModify + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Add file + case 1: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Kind = archive.ChangeAdd + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Remove file + case 2: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeDelete + if err := os.Remove(path.Join(root, change.Path)); err != nil { + return nil, err + } + } + changes = append(changes, change) + } + } + + return changes, nil +} + +func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + content := randomContent(64, seed+int64(i+j)) + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + } + } + + return nil +} + +type changeList []archive.Change + +func (c changeList) Less(i, j int) bool { + if c[i].Path == c[j].Path { + return c[i].Kind < c[j].Kind + } + return c[i].Path < c[j].Path +} +func (c changeList) Len() int { return len(c) } +func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +func checkChanges(expected, actual []archive.Change) error { + if len(expected) != len(actual) { + return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) + } + sort.Sort(changeList(expected)) + sort.Sort(changeList(actual)) + + for i := range expected { + if expected[i] != actual[i] { + return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) + } + } + + return nil +} + +func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + return err + } + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + if err := os.MkdirAll(layerDir, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { + return err + } + + return nil +} + +func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { + lastLayer := baseLayer + for i := 1; i <= count; i++ { + nextLayer := stringid.GenerateRandomID() + if err := drv.Create(nextLayer, lastLayer, nil); err != nil { + return "", err + } + if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { + return "", err + } + + lastLayer = nextLayer + + } + return lastLayer, nil +} + +func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + if err != nil { + return err + } + + if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) + } + + for i := count; i > 0; i-- { + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + + thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + if err != nil { + return err + } + if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) + } + layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + if err != nil { + return err + } + } + return nil +} + +// readDir reads a directory just like ioutil.ReadDir() +// then hides specific files (currently "lost+found") +// so the tests don't "see" it +func readDir(dir string) ([]os.FileInfo, error) { + a, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + b := a[:0] + for _, x := range a { + if x.Name() != "lost+found" { // ext4 always have this dir + b = append(b, x) + } + } + + return b, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go new file mode 100644 index 0000000..49b0c2c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go @@ -0,0 +1,143 @@ +// +build linux freebsd + +package graphtest + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } +} + +func createBase(t testing.TB, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.CreateReadWrite(name, "", nil); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go new file mode 100644 index 0000000..666a5c0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,174 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 0000000..d30fdbf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,462 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + supportsDType bool +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + // If id has a root, just return + if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { + return nil + } + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 0000000..34b6d80 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,93 @@ +// +build linux + +package overlay + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlay50LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 50, "overlay") +} + +// Fails due to bug in calculating changes after apply +// likely related to https://github.com/docker/docker/issues/21555 +func TestOverlayDiffApply10Files(t *testing.T) { + t.Skipf("Fails to compute changes after apply intermittently") + graphtest.DriverTestDiffApply(t, 10, "overlay") +} + +func TestOverlayChanges(t *testing.T) { + t.Skipf("Fails to compute changes intermittently") + graphtest.DriverTestChanges(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, "overlay") +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, "overlay") +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, "overlay") +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, "overlay") +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, "overlay") +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, "overlay") +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, "overlay") +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay") +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, "overlay") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 0000000..3dbb4de --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go new file mode 100644 index 0000000..53a7199 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go @@ -0,0 +1,79 @@ +// +build linux + +package overlay2 + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory. When this bug exists naive diff should be used. +func hasOpaqueCopyUpBug(d string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + if err := syscall.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := syscall.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go new file mode 100644 index 0000000..60e248b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go new file mode 100644 index 0000000..4984d68 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go @@ -0,0 +1,672 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + units "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + overrideKernelCheck bool + quota quota.Quota +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func useNaiveDiff(home string) bool { + useNaiveDiffLock.Do(func() { + if err := hasOpaqueCopyUpBug(home); err != nil { + logrus.Warnf("Not using native diff for overlay2: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + splitLowers := strings.Split(string(lowers), ":") + absLowers := make([]string, len(splitLowers)) + for i, s := range splitLowers { + absLowers[i] = path.Join(d.home, s) + } + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) + mountData := label.FormatMountLabel(opts, mountLabel) + mount := syscall.Mount + mountTarget := mergedDir + + pageSize := syscall.Getpagesize() + + // Go can return a larger page size than supported by the system + // as of go 1.7. This will be fixed in 1.8 and this block can be + // removed when building with 1.8. + // See https://github.com/golang/go/commit/1b9499b06989d2831e5b156161d6c07642926ee1 + // See https://github.com/docker/docker/issues/27384 + if pageSize > 4096 { + pageSize = 4096 + } + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if len(mountData) > pageSize { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, mountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + dir := d.dir(id) + _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, no mount happened and just return directly + if os.IsNotExist(err) { + return nil + } + return err + } + + mountpoint := path.Join(dir, "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, parent, diff) + } + + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, parent) + } + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, parent) + } + + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, parent) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go new file mode 100644 index 0000000..cf77ff2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go @@ -0,0 +1,121 @@ +// +build linux + +package overlay2 + +import ( + "io/ioutil" + "os" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + untar = archive.UntarUncompressed + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer + + reexec.Init() +} + +func cdMountFrom(dir, device, target, mType, label string) error { + wd, err := os.Getwd() + if err != nil { + return err + } + os.Chdir(dir) + defer os.Chdir(wd) + + return syscall.Mount(device, target, mType, 0, label) +} + +func skipIfNaive(t *testing.T) { + td, err := ioutil.TempDir("", "naive-check-") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(td) + + if useNaiveDiff(td) { + t.Skipf("Cannot run test with naive diff") + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, driverName) +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, driverName) +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, driverName) +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, driverName) +} + +func TestOverlay128LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 128, driverName) +} + +func TestOverlayDiffApply10Files(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestDiffApply(t, 10, driverName) +} + +func TestOverlayChanges(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestChanges(t, driverName) +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, driverName) +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, driverName) +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, driverName) +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, driverName) +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, driverName) +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, driverName) +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, driverName) +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, driverName) +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, driverName) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go new file mode 100644 index 0000000..e5ac4ca --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go new file mode 100644 index 0000000..af5cb65 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go @@ -0,0 +1,80 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go new file mode 100644 index 0000000..7491c34 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go @@ -0,0 +1,18 @@ +// +build linux + +package overlayutils + +import ( + "errors" + "fmt" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type support will no longer be supported in Docker 17.12." + return errors.New(msg) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/plugin.go b/vendor/github.com/moby/moby/daemon/graphdriver/plugin.go new file mode 100644 index 0000000..7294bcc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/plugin.go @@ -0,0 +1,43 @@ +package graphdriver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.ACQUIRE) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/proxy.go b/vendor/github.com/moby/moby/daemon/graphdriver/proxy.go new file mode 100644 index 0000000..bfe74cc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/proxy.go @@ -0,0 +1,252 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return filepath.Join(d.p.BasePath(), ret.Dir), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go b/vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go new file mode 100644 index 0000000..e408d5f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go @@ -0,0 +1,339 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/docker/overlay2 >> /etc/projects +// echo docker:999 >> /etc/projid +// xfs_quota -x -c 'project -s docker' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with docker. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + fileinfo, err := os.Stat(home) + if err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case comeone copied the home directory over to a new device + syscall.Unlink(backingFsBlockDev) + stat := fileinfo.Sys().(*syscall.Stat_t) + if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go new file mode 100644 index 0000000..262954d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/aufs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go new file mode 100644 index 0000000..f456cc5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go new file mode 100644 index 0000000..bb2e9ef --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 0000000..9ba849c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" + _ "github.com/docker/docker/daemon/graphdriver/overlay2" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 0000000..98fad23 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go new file mode 100644 index 0000000..efaa500 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/docker/docker/daemon/graphdriver/windows" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go new file mode 100644 index 0000000..8f34e35 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go new file mode 100644 index 0000000..8832d11 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,145 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000..9ecf21d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,37 @@ +// +build linux + +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go new file mode 100644 index 0000000..54641cb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go @@ -0,0 +1,903 @@ +//+build windows + +package windows + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/longpath" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = syscall.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = syscall.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enlighting the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id, mountLabel string) (string, error) { + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + var dir string + + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + delete(d.cache, rID) + d.cacheMu.Unlock() + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +func (d *Driver) Cleanup() error { + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, rPId) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + if err != nil { + return err + } + return nil +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := syscall.UTF16FromString(path) + if err != nil { + return err + } + h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 0000000..9c270c5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 0000000..8e283cc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,417 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/runc/libcontainer/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 0000000..1c05fa7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 0000000..52ed516 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go new file mode 100644 index 0000000..bb4a85b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 0000000..3e22928 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,35 @@ +// +build linux + +package zfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsSetQuota(t *testing.T) { + graphtest.DriverTestSetQuota(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 0000000..ce8daad --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/moby/moby/daemon/health.go b/vendor/github.com/moby/moby/daemon/health.go new file mode 100644 index 0000000..5b01dc0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/health.go @@ -0,0 +1,341 @@ +package daemon + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +const ( + // Longest healthcheck probe output message to store. Longer messages will be truncated. + maxOutputLen = 4096 + + // Default interval between probe runs (from the end of the first to the start of the second). + // Also the time before the first probe. + defaultProbeInterval = 30 * time.Second + + // The maximum length of time a single probe run should take. If the probe takes longer + // than this, the check is considered to have failed. + defaultProbeTimeout = 30 * time.Second + + // Default number of consecutive failures of the health check + // for the container to be considered unhealthy. + defaultProbeRetries = 3 + + // Maximum number of entries to record + maxLogEntries = 5 +) + +const ( + // Exit status codes that can be returned by the probe command. + + exitStatusHealthy = 0 // Container is healthy + exitStatusUnhealthy = 1 // Container is unhealthy +) + +// probe implementations know how to run a particular type of probe. +type probe interface { + // Perform one run of the check. Returns the exit code and an optional + // short diagnostic string. + run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) +} + +// cmdProbe implements the "CMD" probe type. +type cmdProbe struct { + // Run the command with the system's default shell instead of execing it directly. + shell bool +} + +// exec the healthcheck command in the container. +// Returns the exit code and probe output (if any) +func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { + + cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] + if p.shell { + cmdSlice = append(getShell(container.Config), cmdSlice...) + } + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) + execConfig := exec.NewConfig() + execConfig.OpenStdin = false + execConfig.OpenStdout = true + execConfig.OpenStderr = true + execConfig.ContainerID = container.ID + execConfig.DetachKeys = []byte{} + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = false + execConfig.Privileged = false + execConfig.User = container.Config.User + + d.registerExecCommand(container, execConfig) + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + output := &limitedBuffer{} + err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + if err != nil { + return nil, err + } + info, err := d.getExecConfig(execConfig.ID) + if err != nil { + return nil, err + } + if info.ExitCode == nil { + return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", container.ID) + } + // Note: Go's json package will handle invalid UTF-8 for us + out := output.String() + return &types.HealthcheckResult{ + End: time.Now(), + ExitCode: *info.ExitCode, + Output: out, + }, nil +} + +// Update the container's Status.Health struct based on the latest probe's result. +func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) { + c.Lock() + defer c.Unlock() + + // probe may have been cancelled while waiting on lock. Ignore result then + select { + case <-done: + return + default: + } + + retries := c.Config.Healthcheck.Retries + if retries <= 0 { + retries = defaultProbeRetries + } + + h := c.State.Health + oldStatus := h.Status + + if len(h.Log) >= maxLogEntries { + h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) + } else { + h.Log = append(h.Log, result) + } + + if result.ExitCode == exitStatusHealthy { + h.FailingStreak = 0 + h.Status = types.Healthy + } else { + // Failure (including invalid exit code) + h.FailingStreak++ + if h.FailingStreak >= retries { + h.Status = types.Unhealthy + } + // Else we're starting or healthy. Stay in that state. + } + + if oldStatus != h.Status { + d.LogContainerEvent(c, "health_status: "+h.Status) + } +} + +// Run the container's monitoring thread until notified via "stop". +// There is never more than one monitor thread running per container at a time. +func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { + probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) + probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) + for { + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) + return + case <-time.After(probeInterval): + logrus.Debugf("Running health check for container %s ...", c.ID) + startTime := time.Now() + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + results := make(chan *types.HealthcheckResult) + go func() { + healthChecksCounter.Inc() + result, err := probe.run(ctx, d, c) + if err != nil { + healthChecksFailedCounter.Inc() + logrus.Warnf("Health check for container %s error: %v", c.ID, err) + results <- &types.HealthcheckResult{ + ExitCode: -1, + Output: err.Error(), + Start: startTime, + End: time.Now(), + } + } else { + result.Start = startTime + logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) + results <- result + } + close(results) + }() + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) + // Stop timeout and kill probe, but don't wait for probe to exit. + cancelProbe() + return + case result := <-results: + handleProbeResult(d, c, result, stop) + // Stop timeout + cancelProbe() + case <-ctx.Done(): + logrus.Debugf("Health check for container %s taking too long", c.ID) + handleProbeResult(d, c, &types.HealthcheckResult{ + ExitCode: -1, + Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), + Start: startTime, + End: time.Now(), + }, stop) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + } + } + } +} + +// Get a suitable probe implementation for the container's healthcheck configuration. +// Nil will be returned if no healthcheck was configured or NONE was set. +func getProbe(c *container.Container) probe { + config := c.Config.Healthcheck + if config == nil || len(config.Test) == 0 { + return nil + } + switch config.Test[0] { + case "CMD": + return &cmdProbe{shell: false} + case "CMD-SHELL": + return &cmdProbe{shell: true} + default: + logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) + return nil + } +} + +// Ensure the health-check monitor is running or not, depending on the current +// state of the container. +// Called from monitor.go, with c locked. +func (d *Daemon) updateHealthMonitor(c *container.Container) { + h := c.State.Health + if h == nil { + return // No healthcheck configured + } + + probe := getProbe(c) + wantRunning := c.Running && !c.Paused && probe != nil + if wantRunning { + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor(d, c, stop, probe) + } + } else { + h.CloseMonitorChannel() + } +} + +// Reset the health state for a newly-started, restarted or restored container. +// initHealthMonitor is called from monitor.go and we should never be running +// two instances at once. +// Called with c locked. +func (d *Daemon) initHealthMonitor(c *container.Container) { + // If no healthcheck is setup then don't init the monitor + if getProbe(c) == nil { + return + } + + // This is needed in case we're auto-restarting + d.stopHealthchecks(c) + + if h := c.State.Health; h != nil { + h.Status = types.Starting + h.FailingStreak = 0 + } else { + h := &container.Health{} + h.Status = types.Starting + c.State.Health = h + } + + d.updateHealthMonitor(c) +} + +// Called when the container is being stopped (whether because the health check is +// failing or for any other reason). +func (d *Daemon) stopHealthchecks(c *container.Container) { + h := c.State.Health + if h != nil { + h.CloseMonitorChannel() + } +} + +// Buffer up to maxOutputLen bytes. Further data is discarded. +type limitedBuffer struct { + buf bytes.Buffer + mu sync.Mutex + truncated bool // indicates that data has been lost +} + +// Append to limitedBuffer while there is room. +func (b *limitedBuffer) Write(data []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + bufLen := b.buf.Len() + dataLen := len(data) + keep := min(maxOutputLen-bufLen, dataLen) + if keep > 0 { + b.buf.Write(data[:keep]) + } + if keep < dataLen { + b.truncated = true + } + return dataLen, nil +} + +// The contents of the buffer, with "..." appended if it overflowed. +func (b *limitedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + + out := b.buf.String() + if b.truncated { + out = out + "..." + } + return out +} + +// If configuredValue is zero, use defaultValue instead. +func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { + if configuredValue == 0 { + return defaultValue + } + return configuredValue +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func getShell(config *containertypes.Config) []string { + if len(config.Shell) != 0 { + return config.Shell + } + if runtime.GOOS != "windows" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/moby/moby/daemon/health_test.go b/vendor/github.com/moby/moby/daemon/health_test.go new file mode 100644 index 0000000..7e82115 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/health_test.go @@ -0,0 +1,118 @@ +package daemon + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func reset(c *container.Container) { + c.State = &container.State{} + c.State.Health = &container.Health{} + c.State.Health.Status = types.Starting +} + +func TestNoneHealthcheck(t *testing.T) { + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, + }, + }, + State: &container.State{}, + }, + } + daemon := &Daemon{} + + daemon.initHealthMonitor(c) + if c.State.Health != nil { + t.Errorf("Expecting Health to be nil, but was not") + } +} + +func TestHealthStates(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + expect := func(expected string) { + select { + case event := <-l: + ev := event.(eventtypes.Message) + if ev.Status != expected { + t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) + } + case <-time.After(1 * time.Second): + t.Errorf("Expecting event %#v, but got nothing\n", expected) + } + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + + c.Config.Healthcheck = &containertypes.HealthConfig{ + Retries: 1, + } + + reset(c) + + handleResult := func(startTime time.Time, exitCode int) { + handleProbeResult(daemon, c, &types.HealthcheckResult{ + Start: startTime, + End: startTime, + ExitCode: exitCode, + }, nil) + } + + // starting -> failed -> success -> failed + + handleResult(c.State.StartedAt.Add(1*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(2*time.Second), 0) + expect("health_status: healthy") + + handleResult(c.State.StartedAt.Add(3*time.Second), 1) + expect("health_status: unhealthy") + + // Test retries + + reset(c) + c.Config.Healthcheck.Retries = 3 + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + handleResult(c.State.StartedAt.Add(40*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 2 { + t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(60*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } +} diff --git a/vendor/github.com/moby/moby/daemon/image.go b/vendor/github.com/moby/moby/daemon/image.go new file mode 100644 index 0000000..32a8d77 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image.go @@ -0,0 +1,76 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + id, ref, err := reference.ParseIDOrReference(refOrID) + if err != nil { + return "", err + } + if id != "" { + if _, err := daemon.imageStore.Get(image.IDFromDigest(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } + return image.IDFromDigest(id), nil + } + + if id, err := daemon.referenceStore.Get(ref); err == nil { + return image.IDFromDigest(id), nil + } + + // deprecated: repo:shortid https://github.com/docker/docker/pull/799 + if tagged, ok := ref.(reference.NamedTagged); ok { + if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { + if id, err := daemon.imageStore.Search(tag); err == nil { + for _, namedRef := range daemon.referenceStore.References(id.Digest()) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} + +// GetImageOnBuild looks up a Docker image referenced by `name`. +func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + return img, nil +} diff --git a/vendor/github.com/moby/moby/daemon/image_delete.go b/vendor/github.com/moby/moby/daemon/image_delete.go new file mode 100644 index 0000000..3e3c142 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_delete.go @@ -0,0 +1,412 @@ +package daemon + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { + start := time.Now() + records := []types.ImageDelete{} + + imgID, err := daemon.GetImageID(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.referenceStore.References(imgID.Digest()) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !force && isSingleReference(repoRefs) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.referenceStore.References(imgID.Digest()) + + // If a tag reference was removed and the only remaining + // references to the same repository are digest references, + // then clean up those digest references. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundRepoTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + foundRepoTagRef = true + break + } + } + if !foundRepoTagRef { + // Remove canonical references from same repository + remainingRefs := []reference.Named{} + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + if _, err := daemon.removeImageRef(repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} + records = append(records, untaggedRecord) + } else { + remainingRefs = append(remainingRefs, repoRef) + + } + } + repoRefs = remainingRefs + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is at most one tag + // reference to the image AND all references are within one + // repository, then remove all references. + if isSingleReference(repoRefs) { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + return nil, conflict + } + + for _, repoRef := range repoRefs { + parsedRef, err := daemon.removeImageRef(repoRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + } + + if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { + return nil, err + } + + imageActions.WithValues("delete").UpdateSince(start) + + return records, nil +} + +// isSingleReference returns true when all references are from one repository +// and there is at most one tag. Returns false for empty input. +func isSingleReference(repoRefs []reference.Named) bool { + if len(repoRefs) <= 1 { + return len(repoRefs) == 1 + } + var singleRef reference.Named + canonicalRefs := map[string]struct{}{} + for _, repoRef := range repoRefs { + if _, isCanonical := repoRef.(reference.Canonical); isCanonical { + canonicalRefs[repoRef.Name()] = struct{}{} + } else if singleRef == nil { + singleRef = repoRef + } else { + return false + } + } + if singleRef == nil { + // Just use first canonical ref + singleRef = repoRefs[0] + } + _, ok := canonicalRefs[singleRef.Name()] + return len(canonicalRefs) == 1 && ok +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.WithDefaultTag(ref) + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDelete is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { + imageRefs := daemon.referenceStore.References(imgID.Digest()) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + return err + } + + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in multiple repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +} diff --git a/vendor/github.com/moby/moby/daemon/image_exporter.go b/vendor/github.com/moby/moby/daemon/image_exporter.go new file mode 100644 index 0000000..95d1d3d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_exporter.go @@ -0,0 +1,25 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/image/tarexport" +) + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Save(names, outStream) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Load(inTar, outStream, quiet) +} diff --git a/vendor/github.com/moby/moby/daemon/image_history.go b/vendor/github.com/moby/moby/daemon/image_history.go new file mode 100644 index 0000000..839dd12 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_history.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + start := time.Now() + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id.Digest()) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + imageActions.WithValues("history").UpdateSince(start) + return history, nil +} diff --git a/vendor/github.com/moby/moby/daemon/image_inspect.go b/vendor/github.com/moby/moby/daemon/image_inspect.go new file mode 100644 index 0000000..ebf9124 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_inspect.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID().Digest()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, ref.String()) + case reference.Canonical: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + OsVersion: img.OSVersion, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} diff --git a/vendor/github.com/moby/moby/daemon/image_pull.go b/vendor/github.com/moby/moby/daemon/image_pull.go new file mode 100644 index 0000000..2157d15 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_pull.go @@ -0,0 +1,149 @@ +package daemon + +import ( + "io" + "strings" + + dist "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + if err != nil { + return err + } + } + + return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) +} + +// PullOnBuild tells Docker to pull image referenced by `name`. +func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + ref = reference.WithDefaultTag(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config file, we prefer to use that + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig( + authConfigs, + repoInfo.Index, + ) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + DownloadManager: daemon.downloadManager, + Schema2Types: distribution.ImageTypes, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// GetRepository returns a repository from the registry. +func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig) (dist.Repository, bool, error) { + // get repository info + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, false, err + } + // makes sure name is not empty or `scratch` + if err := distribution.ValidateRepoName(repoInfo.Name()); err != nil { + return nil, false, err + } + + // get endpoints + endpoints, err := daemon.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return nil, false, err + } + + // retrieve repository + var ( + confirmedV2 bool + repository dist.Repository + lastError error + ) + + for _, endpoint := range endpoints { + if endpoint.Version == registry.APIVersion1 { + continue + } + + repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") + if lastError == nil && confirmedV2 { + break + } + } + return repository, confirmedV2, lastError +} diff --git a/vendor/github.com/moby/moby/daemon/image_push.go b/vendor/github.com/moby/moby/daemon/image_push.go new file mode 100644 index 0000000..e6382c7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_push.go @@ -0,0 +1,63 @@ +package daemon + +import ( + "io" + + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + ConfigMediaType: schema2.MediaTypeImageConfig, + LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err = distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} diff --git a/vendor/github.com/moby/moby/daemon/image_tag.go b/vendor/github.com/moby/moby/daemon/image_tag.go new file mode 100644 index 0000000..36fa3b4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_tag.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/reference" +) + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(imageName, repository, tag string) error { + imageID, err := daemon.GetImageID(imageName) + if err != nil { + return err + } + + newTag, err := reference.WithName(repository) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } + + return daemon.TagImageWithReference(imageID, newTag) +} + +// TagImageWithReference adds the given reference to the image ID provided. +func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { + if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { + return err + } + + daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/images.go b/vendor/github.com/moby/moby/daemon/images.go new file mode 100644 index 0000000..88fb8f8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/images.go @@ -0,0 +1,331 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/pkg/errors" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, + "before": true, + "since": true, + "reference": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.ImageSummary + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + var beforeFilter, sinceFilter *image.Image + err = imageFilters.WalkValues("before", func(value string) error { + beforeFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + err = imageFilters.WalkValues("since", func(value string) error { + sinceFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + images := []*types.ImageSummary{} + var imagesMap map[*image.Image]*types.ImageSummary + var layerRefs map[layer.ChainID]int + var allLayers map[layer.ChainID]layer.Layer + var allContainers []*container.Container + + for id, img := range allImages { + if beforeFilter != nil { + if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { + continue + } + } + + if sinceFilter != nil { + if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { + continue + } + } + + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.referenceStore.References(id.Digest()) { + if imageFilters.Include("reference") { + var found bool + var matchErr error + for _, pattern := range imageFilters.Get("reference") { + found, matchErr = reference.Match(pattern, ref) + if matchErr != nil { + return nil, matchErr + } + } + if !found { + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, ref.String()) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if imageFilters.Include("reference") { // skip images with no references if filtering by reference + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly && len(newImage.RepoTags) > 0 { + continue + } + + if withExtraAttrs { + // lazyly init variables + if imagesMap == nil { + allContainers = daemon.List() + allLayers = daemon.layerStore.Map() + imagesMap = make(map[*image.Image]*types.ImageSummary) + layerRefs = make(map[layer.ChainID]int) + } + + // Get container count + newImage.Containers = 0 + for _, c := range allContainers { + if c.ImageID == id { + newImage.Containers++ + } + } + + // count layer references + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + if _, ok := allLayers[chid]; !ok { + return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + } + } + imagesMap[img] = newImage + } + + images = append(images, newImage) + } + + if withExtraAttrs { + // Get Shared sizes + for img, newImage := range imagesMap { + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + newImage.SharedSize = 0 + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + + diffSize, err := allLayers[chid].DiffSize() + if err != nil { + return nil, err + } + + if layerRefs[chid] > 1 { + newImage.SharedSize += diffSize + } + } + } + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +// SquashImage creates a new image with the diff of the specified image and the specified parent. +// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. +// The existing image(s) is not destroyed. +// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. +func (daemon *Daemon) SquashImage(id, parent string) (string, error) { + img, err := daemon.imageStore.Get(image.ID(id)) + if err != nil { + return "", err + } + + var parentImg *image.Image + var parentChainID layer.ChainID + if len(parent) != 0 { + parentImg, err = daemon.imageStore.Get(image.ID(parent)) + if err != nil { + return "", errors.Wrap(err, "error getting specified parent layer") + } + parentChainID = parentImg.RootFS.ChainID() + } else { + rootFS := image.NewRootFS() + parentImg = &image.Image{RootFS: rootFS} + } + + l, err := daemon.layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return "", errors.Wrap(err, "error getting image layer") + } + defer daemon.layerStore.Release(l) + + ts, err := l.TarStreamFrom(parentChainID) + if err != nil { + return "", errors.Wrapf(err, "error getting tar stream to parent") + } + defer ts.Close() + + newL, err := daemon.layerStore.Register(ts, parentChainID) + if err != nil { + return "", errors.Wrap(err, "error registering layer") + } + defer daemon.layerStore.Release(newL) + + var newImage image.Image + newImage = *img + newImage.RootFS = nil + + var rootFS image.RootFS + rootFS = *parentImg.RootFS + rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) + newImage.RootFS = &rootFS + + for i, hi := range newImage.History { + if i >= len(parentImg.History) { + hi.EmptyLayer = true + } + newImage.History[i] = hi + } + + now := time.Now() + var historyComment string + if len(parent) > 0 { + historyComment = fmt.Sprintf("merge %s to %s", id, parent) + } else { + historyComment = fmt.Sprintf("create new from %s", id) + } + + newImage.History = append(newImage.History, image.History{ + Created: now, + Comment: historyComment, + }) + newImage.Created = now + + b, err := json.Marshal(&newImage) + if err != nil { + return "", errors.Wrap(err, "error marshalling image config") + } + + newImgID, err := daemon.imageStore.Create(b) + if err != nil { + return "", errors.Wrap(err, "error creating new image after squash") + } + return string(newImgID), nil +} + +func newImage(image *image.Image, virtualSize int64) *types.ImageSummary { + newImage := new(types.ImageSummary) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = virtualSize + newImage.VirtualSize = virtualSize + newImage.SharedSize = -1 + newImage.Containers = -1 + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/moby/moby/daemon/import.go b/vendor/github.com/moby/moby/daemon/import.go new file mode 100644 index 0000000..c93322b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/import.go @@ -0,0 +1,135 @@ +package daemon + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + rc io.ReadCloser + resp *http.Response + newRef reference.Named + ) + + if repository != "" { + var err error + newRef, err = reference.ParseNamed(repository) + if err != nil { + return err + } + + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) + if err != nil { + return err + } + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressOutput := sf.NewProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(inflatedLayerData, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImageWithReference(id, newRef); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(sf.FormatStatus("", id.String())) + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/info.go b/vendor/github.com/moby/moby/daemon/info.go new file mode 100644 index 0000000..1ab9f29 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/info.go @@ -0,0 +1,180 @@ +package daemon + +import ( + "fmt" + "os" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume/drivers" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + meminfo = &system.MemInfo{} + } + + sysInfo := sysinfo.New(true) + + var cRunning, cPaused, cStopped int32 + daemon.containers.ApplyAll(func(c *container.Container) { + switch c.StateString() { + case "paused": + atomic.AddInt32(&cPaused, 1) + case "running": + atomic.AddInt32(&cRunning, 1) + default: + atomic.AddInt32(&cStopped, 1) + } + }) + + securityOptions := []string{} + if sysInfo.AppArmor { + securityOptions = append(securityOptions, "name=apparmor") + } + if sysInfo.Seccomp && supportsSeccomp { + profile := daemon.seccompProfilePath + if profile == "" { + profile = "default" + } + securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile)) + } + if selinuxEnabled() { + securityOptions = append(securityOptions, "name=selinux") + } + uid, gid := daemon.GetRemappedUIDGID() + if uid != 0 || gid != 0 { + securityOptions = append(securityOptions, "name=userns") + } + + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: len(daemon.imageStore.Map()), + Driver: daemon.GraphDriverName(), + DriverStatus: daemon.layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: utils.IsDebugEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: sysinfo.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: daemon.configStore.Experimental, + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled, + SecurityOptions: securityOptions, + Isolation: daemon.defaultIsolation, + } + + // Retrieve platform specific info + daemon.FillPlatformInfo(v, sysInfo) + + hostname := "" + if hn, err := os.Hostname(); err != nil { + logrus.Warnf("Could not get hostname: %v", err) + } else { + hostname = hn + } + v.Name = hostname + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + MinAPIVersion: api.MinVersion, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: daemon.configStore.Experimental, + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + pluginsInfo.Network = daemon.GetNetworkDriverList() + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + + return pluginsInfo +} diff --git a/vendor/github.com/moby/moby/daemon/info_unix.go b/vendor/github.com/moby/moby/daemon/info_unix.go new file mode 100644 index 0000000..9c41c0e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/info_unix.go @@ -0,0 +1,82 @@ +// +build !windows + +package daemon + +import ( + "context" + "os/exec" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + v.Runtimes = daemon.configStore.GetAllRuntimes() + v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() + v.InitBinary = daemon.configStore.GetInitPath() + + v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID + if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil { + v.ContainerdCommit.ID = sv.Revision + } else { + logrus.Warnf("failed to retrieve containerd version: %v", err) + v.ContainerdCommit.ID = "N/A" + } + + v.RuncCommit.Expected = dockerversion.RuncCommitID + if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), "\n") + if len(parts) == 3 { + parts = strings.Split(parts[1], ": ") + if len(parts) == 2 { + v.RuncCommit.ID = strings.TrimSpace(parts[1]) + } + } + + if v.RuncCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultRuntimeBinary, string(rv)) + v.RuncCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err) + v.RuncCommit.ID = "N/A" + } + + v.InitCommit.Expected = dockerversion.InitCommitID + if rv, err := exec.Command(DefaultInitBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), " - ") + if len(parts) == 2 { + if dockerversion.InitCommitID[0] == 'v' { + vs := strings.TrimPrefix(parts[0], "tini version ") + v.InitCommit.ID = "v" + vs + } else { + // Get the sha1 + gitParts := strings.Split(parts[1], ".") + if len(gitParts) == 2 && gitParts[0] == "git" { + v.InitCommit.ID = gitParts[1] + v.InitCommit.Expected = dockerversion.InitCommitID[0:len(gitParts[1])] + } + } + } + + if v.InitCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultInitBinary, string(rv)) + v.InitCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version", DefaultInitBinary) + v.InitCommit.ID = "N/A" + } +} diff --git a/vendor/github.com/moby/moby/daemon/info_windows.go b/vendor/github.com/moby/moby/daemon/info_windows.go new file mode 100644 index 0000000..c700911 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/info_windows.go @@ -0,0 +1,10 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { +} diff --git a/vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go b/vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go new file mode 100644 index 0000000..66d53f0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go @@ -0,0 +1,13 @@ +// +build solaris,cgo + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go b/vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go new file mode 100644 index 0000000..e83c275 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +package initlayer + +import ( + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/idtools" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootUID, rootGID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go b/vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go new file mode 100644 index 0000000..48a9d71 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/inspect.go b/vendor/github.com/moby/moby/daemon/inspect.go new file mode 100644 index 0000000..557f639 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect.go @@ -0,0 +1,264 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { + switch { + case versions.LessThan(version, "1.20"): + return daemon.containerInspectPre120(name) + case versions.Equal(version, "1.20"): + return daemon.containerInspect120(name) + } + return daemon.ContainerInspectCurrent(name, size) +} + +// ContainerInspectCurrent returns low-level information about a +// container in a most recent api version. +func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, size) + if err != nil { + return nil, err + } + + apiNetworks := make(map[string]*networktypes.EndpointSettings) + for name, epConf := range container.NetworkSettings.Networks { + if epConf.EndpointSettings != nil { + apiNetworks[name] = epConf.EndpointSettings + } + } + + mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + Ports: container.NetworkSettings.Ports, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: apiNetworks, + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // We merge the Ulimits from hostConfig with daemon default + daemon.mergeUlimits(&hostConfig) + + var containerHealth *types.Health + if container.State.Health != nil { + containerHealth = &types.Health{ + Status: container.State.Health.Status, + FailingStreak: container.State.Health.FailingStreak, + Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), + } + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode(), + Error: container.State.Error(), + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + Health: containerHealth, + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + var ( + sizeRw int64 + sizeRootFs int64 + ) + if size { + sizeRw, sizeRootFs = daemon.getSize(container) + contJSONBase.SizeRw = &sizeRw + contJSONBase.SizeRootFs = &sizeRootFs + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + // If container is marked as Dead, the container's graphdriver metadata + // could have been removed, it will cause error if we try to get the metadata, + // we can ignore the error if the container is dead. + if err != nil && !container.Dead { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + Pid: e.Pid, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + apiV.Status = v.Status() + return apiV, nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok && defaultNetwork.EndpointSettings != nil { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/moby/moby/daemon/inspect_solaris.go b/vendor/github.com/moby/moby/daemon/inspect_solaris.go new file mode 100644 index 0000000..0e3dcc1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect_solaris.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + return &v1p19.ContainerJSON{}, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/moby/moby/daemon/inspect_unix.go b/vendor/github.com/moby/moby/daemon/inspect_unix.go new file mode 100644 index 0000000..08a8223 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect_unix.go @@ -0,0 +1,92 @@ +// +build !windows,!solaris + +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/vendor/github.com/moby/moby/daemon/inspect_windows.go b/vendor/github.com/moby/moby/daemon/inspect_windows.go new file mode 100644 index 0000000..b331c83 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect_windows.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspectCurrent(name, false) +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/moby/moby/daemon/keys.go b/vendor/github.com/moby/moby/daemon/keys.go new file mode 100644 index 0000000..055d488 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/keys.go @@ -0,0 +1,59 @@ +// +build linux + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +const ( + rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" + rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" + rootKeyLimit = 1000000 + // it is standard configuration to allocate 25 bytes per key + rootKeyByteMultiplier = 25 +) + +// ModifyRootKeyLimit checks to see if the root key limit is set to +// at least 1000000 and changes it to that limit along with the maxbytes +// allocated to the keys at a 25 to 1 multiplier. +func ModifyRootKeyLimit() error { + value, err := readRootKeyLimit(rootKeyFile) + if err != nil { + return err + } + if value < rootKeyLimit { + return setRootKeyLimit(rootKeyLimit) + } + return nil +} + +func setRootKeyLimit(limit int) error { + keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer keys.Close() + if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { + return err + } + bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer bytes.Close() + _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) + return err +} + +func readRootKeyLimit(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(strings.Trim(string(data), "\n")) +} diff --git a/vendor/github.com/moby/moby/daemon/keys_unsupported.go b/vendor/github.com/moby/moby/daemon/keys_unsupported.go new file mode 100644 index 0000000..b172559 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/keys_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package daemon + +// ModifyRootKeyLimit is an noop on unsupported platforms. +func ModifyRootKeyLimit() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/kill.go b/vendor/github.com/moby/moby/daemon/kill.go new file mode 100644 index 0000000..18d5bbb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/kill.go @@ -0,0 +1,164 @@ +package daemon + +import ( + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { + logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + if container.Config.StopSignal != "" { + containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) + if err != nil { + return err + } + if containerStopSignal == syscall.Signal(sig) { + container.ExitOnNext() + } + } else { + container.ExitOnNext() + } + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on its next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *container.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + if _, err2 := container.WaitStop(2 * time.Second); err2 != nil { + return err + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} + +func (daemon *Daemon) kill(c *container.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} diff --git a/vendor/github.com/moby/moby/daemon/links.go b/vendor/github.com/moby/moby/daemon/links.go new file mode 100644 index 0000000..7f691d4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/vendor/github.com/moby/moby/daemon/links/links.go b/vendor/github.com/moby/moby/daemon/links/links.go new file mode 100644 index 0000000..af15de0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/vendor/github.com/moby/moby/daemon/links/links_test.go b/vendor/github.com/moby/moby/daemon/links/links_test.go new file mode 100644 index 0000000..0273f13 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links/links_test.go @@ -0,0 +1,213 @@ +package links + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatalf("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) + + if link.Name != "/db/docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != newPortNoError("tcp", "6379") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkPortRangeEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } + for i := range []int{6379, 6380, 6381} { + tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) + tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) + if env[tcpaddr] == "172.0.17.2" { + t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) + } + if env[tcpport] == fmt.Sprintf("%d", i) { + t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) + } + if env[tcpproto] == "tcp" { + t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) + } + if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { + t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/links_linux.go b/vendor/github.com/moby/moby/daemon/links_linux.go new file mode 100644 index 0000000..2ea40d9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links_linux.go @@ -0,0 +1,72 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/graphdb" +) + +// migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig +// when sqlite links were used, hostConfig.Links was set to nil +func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error { + // if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped + if container.HostConfig == nil || container.HostConfig.Links != nil { + return nil + } + + logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID) + + fullName := container.Name + if fullName[0] != '/' { + fullName = "/" + fullName + } + + // don't use a nil slice, this ensures that the check above will skip once the migration has completed + links := []string{} + children, err := db.Children(fullName, 0) + if err != nil { + if !strings.Contains(err.Error(), "Cannot find child for") { + return err + } + // else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration + } + + for _, child := range children { + c, err := daemon.GetContainer(child.Entity.ID()) + if err != nil { + return err + } + + links = append(links, c.Name+":"+child.Edge.Name) + } + + container.HostConfig.Links = links + return container.WriteHostConfig() +} + +// sqliteMigration performs the link graph DB migration. +func (daemon *Daemon) sqliteMigration(containers map[string]*container.Container) error { + // migrate any legacy links from sqlite + linkdbFile := filepath.Join(daemon.root, "linkgraph.db") + var ( + legacyLinkDB *graphdb.Database + err error + ) + + legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) + if err != nil { + return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) + } + defer legacyLinkDB.Close() + + for _, c := range containers { + if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/links_linux_test.go b/vendor/github.com/moby/moby/daemon/links_linux_test.go new file mode 100644 index 0000000..e2dbff2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links_linux_test.go @@ -0,0 +1,98 @@ +package daemon + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/stringid" +) + +func TestMigrateLegacySqliteLinks(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + name1 := "test1" + c1 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: stringid.GenerateNonCryptoID(), + Name: name1, + HostConfig: &containertypes.HostConfig{}, + }, + } + c1.Root = tmpDir + + name2 := "test2" + c2 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: stringid.GenerateNonCryptoID(), + Name: name2, + }, + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + + d := &Daemon{root: tmpDir, containers: store} + db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) + if err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/"+name1, c1.ID); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/"+name2, c2.ID); err != nil { + t.Fatal(err) + } + + alias := "hello" + if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { + t.Fatal(err) + } + + if err := d.migrateLegacySqliteLinks(db, c1); err != nil { + t.Fatal(err) + } + + if len(c1.HostConfig.Links) != 1 { + t.Fatal("expected links to be populated but is empty") + } + + expected := name2 + ":" + alias + actual := c1.HostConfig.Links[0] + if actual != expected { + t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) + } + + // ensure this is persisted + b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) + if err != nil { + t.Fatal(err) + } + type hc struct { + Links []string + } + var cfg hc + if err := json.Unmarshal(b, &cfg); err != nil { + t.Fatal(err) + } + + if len(cfg.Links) != 1 { + t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) + } + if cfg.Links[0] != expected { // same expected as above + t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) + } +} diff --git a/vendor/github.com/moby/moby/daemon/links_notlinux.go b/vendor/github.com/moby/moby/daemon/links_notlinux.go new file mode 100644 index 0000000..12c226c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links_notlinux.go @@ -0,0 +1,10 @@ +// +build !linux + +package daemon + +import "github.com/docker/docker/container" + +// sqliteMigration performs the link graph DB migration. No-op on platforms other than Linux +func (daemon *Daemon) sqliteMigration(_ map[string]*container.Container) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/list.go b/vendor/github.com/moby/moby/daemon/list.go new file mode 100644 index 0000000..02805ea --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/list.go @@ -0,0 +1,660 @@ +package daemon + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, + "name": true, + "driver": true, + "label": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "health": true, + "since": true, + "volume": true, + "network": true, + "is-task": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // beforeFilter is a filter to ignore containers that appear before the one given + beforeFilter *container.Container + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + sinceFilter *container.Container + + // taskFilter tells if we should filter based on wether a container is part of a task + taskFilter bool + // isTask tells us if the we should filter container that are a task (true) or not (false) + isTask bool + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// byContainerCreated is a temporary type used to sort a list of containers by creation time. +type byContainerCreated []*container.Container + +func (r byContainerCreated) Len() int { return len(r) } +func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byContainerCreated) Less(i, j int) bool { + return r[i].Created.UnixNano() < r[j].Created.UnixNano() +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.transformContainer) +} + +func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { + idSearch := false + names := ctx.filters.Get("name") + ids := ctx.filters.Get("id") + if len(names)+len(ids) == 0 { + // if name or ID filters are not in use, return to + // standard behavior of walking the entire container + // list from the daemon's in-memory store + return daemon.List() + } + + // idSearch will determine if we limit name matching to the IDs + // matched from any IDs which were specified as filters + if len(ids) > 0 { + idSearch = true + } + + matches := make(map[string]bool) + // find ID matches; errors represent "not found" and can be ignored + for _, id := range ids { + if fullID, err := daemon.idIndex.Get(id); err == nil { + matches[fullID] = true + } + } + + // look for name matches; if ID filtering was used, then limit the + // search space to the matches map only; errors represent "not found" + // and can be ignored + if len(names) > 0 { + for id, idNames := range ctx.names { + // if ID filters were used and no matches on that ID were + // found, continue to next ID in the list + if idSearch && !matches[id] { + continue + } + for _, eachName := range idNames { + if ctx.filters.Match("name", eachName) { + matches[id] = true + } + } + } + } + + cntrs := make([]*container.Container, 0, len(matches)) + for id := range matches { + if c := daemon.containers.Get(id); c != nil { + cntrs = append(cntrs, c) + } + } + + // Restore sort-order after filtering + // Created gives us nanosec resolution for sorting + sort.Sort(sort.Reverse(byContainerCreated(cntrs))) + + return cntrs +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + var ( + containers = []*types.Container{} + ) + + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + // fastpath to only look at a subset of containers if specific name + // or ID matches were provided by the user--otherwise we potentially + // end up locking and querying many more containers than intended + containerList := daemon.filterByNameIDMatches(ctx) + + for _, container := range containerList { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + defer container.Unlock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + return reducer(container, ctx) +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filters + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var taskFilter, isTask bool + if psFilters.Include("is-task") { + if psFilters.ExactMatch("is-task", "true") { + taskFilter = true + isTask = true + } else if psFilters.ExactMatch("is-task", "false") { + taskFilter = true + isTask = false + } else { + return nil, fmt.Errorf("Invalid filter 'is-task=%s'", psFilters.Get("is-task")) + } + } + + err = psFilters.WalkValues("health", func(value string) error { + if !container.IsValidHealthString(value) { + return fmt.Errorf("Unrecognised filter value for health: %s", value) + } + + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Container + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, err := daemon.GetImageID(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + return nil + }) + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + taskFilter: taskFilter, + isTask: isTask, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + if !container.Running && !ctx.All && ctx.Limit <= 0 { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + if ctx.taskFilter { + if ctx.isTask != container.Managed { + return excludeContainer + } + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + // Do not include container if its health doesn't match the filter + if !ctx.filters.ExactMatch("health", container.State.HealthString()) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]*volume.MountPoint) + for _, m := range container.MountPoints { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := container.MountPoints[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + networkExist := fmt.Errorf("container part of network") + if ctx.filters.Include("network") { + err := ctx.filters.WalkValues("network", func(value string) error { + if _, ok := container.NetworkSettings.Networks[value]; ok { + return networkExist + } + for _, nw := range container.NetworkSettings.Networks { + if nw.EndpointSettings == nil { + continue + } + if nw.NetworkID == value { + return networkExist + } + } + return nil + }) + if err != networkExist { + return excludeContainer + } + } + + return includeContainer +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID.String(), + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } + } + newC.Image = image + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.State = container.State.StateString() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + // copy networks to avoid races + networks := make(map[string]*networktypes.EndpointSettings) + for name, network := range container.NetworkSettings.Networks { + if network == nil || network.EndpointSettings == nil { + continue + } + networks[name] = &networktypes.EndpointSettings{ + EndpointID: network.EndpointID, + Gateway: network.Gateway, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + IPv6Gateway: network.IPv6Gateway, + GlobalIPv6Address: network.GlobalIPv6Address, + GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, + MacAddress: network.MacAddress, + NetworkID: network.NetworkID, + } + if network.IPAMConfig != nil { + networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: network.IPAMConfig.IPv4Address, + IPv6Address: network.IPAMConfig.IPv6Address, + } + } + } + newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return nil, err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(container) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + newC.Labels = container.Config.Labels + newC.Mounts = addMountPoints(container) + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + + filterVolumes, err := daemon.filterVolumes(volumes, volFilters) + if err != nil { + return nil, nil, err + } + for _, v := range filterVolumes { + apiV := volumeToAPIType(v) + if vv, ok := v.(interface { + CachedPath() string + }); ok { + apiV.Mountpoint = vv.CachedPath() + } else { + apiV.Mountpoint = v.Path() + } + volumesOut = append(volumesOut, apiV) + } + return volumesOut, warnings, nil +} + +// filterVolumes filters volume list according to user specified filter +// and returns user chosen volumes +func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { + // if filter is empty, return original volume list + if filter.Len() == 0 { + return vols, nil + } + + var retVols []volume.Volume + for _, vol := range vols { + if filter.Include("name") { + if !filter.Match("name", vol.Name()) { + continue + } + } + if filter.Include("driver") { + if !filter.Match("driver", vol.DriverName()) { + continue + } + } + if filter.Include("label") { + v, ok := vol.(volume.DetailedVolume) + if !ok { + continue + } + if !filter.MatchKVList("label", v.Labels()) { + continue + } + } + retVols = append(retVols, vol) + } + danglingOnly := false + if filter.Include("dangling") { + if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) + } + retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) + } + return retVols, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/vendor/github.com/moby/moby/daemon/list_unix.go b/vendor/github.com/moby/moby/daemon/list_unix.go new file mode 100644 index 0000000..91c9cac --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd solaris + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/moby/moby/daemon/list_windows.go b/vendor/github.com/moby/moby/daemon/list_windows.go new file mode 100644 index 0000000..7fbcd3a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/list_windows.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" +) + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + i := strings.ToLower(string(container.HostConfig.Isolation)) + if i == "" { + i = "default" + } + if !ctx.filters.Match("isolation", i) { + return excludeContainer + } + return includeContainer +} diff --git a/vendor/github.com/moby/moby/daemon/logdrivers_linux.go b/vendor/github.com/moby/moby/daemon/logdrivers_linux.go new file mode 100644 index 0000000..ad343c1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logdrivers_linux.go @@ -0,0 +1,15 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/gcplogs" + _ "github.com/docker/docker/daemon/logger/gelf" + _ "github.com/docker/docker/daemon/logger/journald" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/moby/moby/daemon/logdrivers_windows.go b/vendor/github.com/moby/moby/daemon/logdrivers_windows.go new file mode 100644 index 0000000..f3002b9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logdrivers_windows.go @@ -0,0 +1,13 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/etwlogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go new file mode 100644 index 0000000..fee518d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go @@ -0,0 +1,404 @@ +// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs +package awslogs + +import ( + "errors" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" +) + +const ( + name = "awslogs" + regionKey = "awslogs-region" + regionEnvKey = "AWS_REGION" + logGroupKey = "awslogs-group" + logStreamKey = "awslogs-stream" + tagKey = "tag" + batchPublishFrequency = 5 * time.Second + + // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + perEventBytes = 26 + maximumBytesPerPut = 1048576 + maximumLogEventsPerPut = 10000 + + // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + maximumBytesPerEvent = 262144 - perEventBytes + + resourceAlreadyExistsCode = "ResourceAlreadyExistsException" + dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" + invalidSequenceTokenCode = "InvalidSequenceTokenException" + + userAgentHeader = "User-Agent" +) + +type logStream struct { + logStreamName string + logGroupName string + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string +} + +type api interface { + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +type regionFinder interface { + Region() (string, error) +} + +type wrappedEvent struct { + inputLogEvent *cloudwatchlogs.InputLogEvent + insertOrder int +} +type byTimestamp []wrappedEvent + +// init registers the awslogs driver +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates an awslogs logger using the configuration passed in on the +// context. Supported context configuration variables are awslogs-region, +// awslogs-group, and awslogs-stream. When available, configuration is +// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, +// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and +// the EC2 Instance Metadata Service. +func New(ctx logger.Context) (logger.Logger, error) { + logGroupName := ctx.Config[logGroupKey] + logStreamName, err := loggerutils.ParseLogTag(ctx, "{{.FullID}}") + if err != nil { + return nil, err + } + + if ctx.Config[logStreamKey] != "" { + logStreamName = ctx.Config[logStreamKey] + } + client, err := newAWSLogsClient(ctx) + if err != nil { + return nil, err + } + containerStream := &logStream{ + logStreamName: logStreamName, + logGroupName: logGroupName, + client: client, + messages: make(chan *logger.Message, 4096), + } + err = containerStream.create() + if err != nil { + return nil, err + } + go containerStream.collectBatch() + + return containerStream, nil +} + +// newRegionFinder is a variable such that the implementation +// can be swapped out for unit tests. +var newRegionFinder = func() regionFinder { + return ec2metadata.New(session.New()) +} + +// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. +// Customizations to the default client from the SDK include a Docker-specific +// User-Agent string and automatic region detection using the EC2 Instance +// Metadata Service when region is otherwise unspecified. +func newAWSLogsClient(ctx logger.Context) (api, error) { + var region *string + if os.Getenv(regionEnvKey) != "" { + region = aws.String(os.Getenv(regionEnvKey)) + } + if ctx.Config[regionKey] != "" { + region = aws.String(ctx.Config[regionKey]) + } + if region == nil || *region == "" { + logrus.Info("Trying to get region from EC2 Metadata") + ec2MetadataClient := newRegionFinder() + r, err := ec2MetadataClient.Region() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + }).Error("Could not get region from EC2 metadata, environment, or log option") + return nil, errors.New("Cannot determine region for awslogs driver") + } + region = &r + } + logrus.WithFields(logrus.Fields{ + "region": *region, + }).Debug("Created awslogs client") + + client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) + + client.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "DockerUserAgentHandler", + Fn: func(r *request.Request) { + currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) + r.HTTPRequest.Header.Set(userAgentHeader, + fmt.Sprintf("Docker %s (%s) %s", + dockerversion.Version, runtime.GOOS, currentAgent)) + }, + }) + return client, nil +} + +// Name returns the name of the awslogs logging driver +func (l *logStream) Name() string { + return name +} + +// Log submits messages for logging by an instance of the awslogs logging driver +func (l *logStream) Log(msg *logger.Message) error { + l.lock.RLock() + defer l.lock.RUnlock() + if !l.closed { + // buffer up the data, making sure to copy the Line data + l.messages <- logger.CopyMessage(msg) + } + return nil +} + +// Close closes the instance of the awslogs logging driver +func (l *logStream) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if !l.closed { + close(l.messages) + } + l.closed = true + return nil +} + +// create creates a log stream for the instance of the awslogs logging driver +func (l *logStream) create() error { + input := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + + _, err := l.client.CreateLogStream(input) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log stream already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log stream") + } + } + return err +} + +// newTicker is used for time-based batching. newTicker is a variable such +// that the implementation can be swapped out for unit tests. +var newTicker = func(freq time.Duration) *time.Ticker { + return time.NewTicker(freq) +} + +// collectBatch executes as a goroutine to perform batching of log events for +// submission to the log stream. Batching is performed on time- and size- +// bases. Time-based batching occurs at a 5 second interval (defined in the +// batchPublishFrequency const). Size-based batching is performed on the +// maximum number of events per batch (defined in maximumLogEventsPerPut) and +// the maximum number of total bytes in a batch (defined in +// maximumBytesPerPut). Log messages are split by the maximum bytes per event +// (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead +// (defined in perEventBytes) which is accounted for in split- and batch- +// calculations. +func (l *logStream) collectBatch() { + timer := newTicker(batchPublishFrequency) + var events []wrappedEvent + bytes := 0 + for { + select { + case <-timer.C: + l.publishBatch(events) + events = events[:0] + bytes = 0 + case msg, more := <-l.messages: + if !more { + l.publishBatch(events) + return + } + unprocessedLine := msg.Line + for len(unprocessedLine) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(unprocessedLine) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := unprocessedLine[:lineBytes] + unprocessedLine = unprocessedLine[lineBytes:] + if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { + // Publish an existing batch if it's already over the maximum number of events or if adding this + // event would push it over the maximum number of total bytes. + l.publishBatch(events) + events = events[:0] + bytes = 0 + } + events = append(events, wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), + }, + insertOrder: len(events), + }) + bytes += (lineBytes + perEventBytes) + } + } + } +} + +// publishBatch calls PutLogEvents for a given set of InputLogEvents, +// accounting for sequencing requirements (each request must reference the +// sequence token returned by the previous request). +func (l *logStream) publishBatch(events []wrappedEvent) { + if len(events) == 0 { + return + } + + // events in a batch must be sorted by timestamp + // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + sort.Sort(byTimestamp(events)) + cwEvents := unwrapEvents(events) + + nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dataAlreadyAcceptedCode { + // already submitted, just grab the correct sequence token + parts := strings.Split(awsErr.Message(), " ") + nextSequenceToken = &parts[len(parts)-1] + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Info("Data already accepted, ignoring error") + err = nil + } else if awsErr.Code() == invalidSequenceTokenCode { + // sequence code is bad, grab the correct one and retry + parts := strings.Split(awsErr.Message(), " ") + token := parts[len(parts)-1] + nextSequenceToken, err = l.putLogEvents(cwEvents, &token) + } + } + } + if err != nil { + logrus.Error(err) + } else { + l.sequenceToken = nextSequenceToken + } +} + +// putLogEvents wraps the PutLogEvents API +func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { + input := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: sequenceToken, + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + resp, err := l.client.PutLogEvents(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Error("Failed to put log events") + } + return nil, err + } + return resp.NextSequenceToken, nil +} + +// ValidateLogOpt looks for awslogs-specific log options awslogs-region, +// awslogs-group, and awslogs-stream +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case logGroupKey: + case logStreamKey: + case regionKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) + } + } + if cfg[logGroupKey] == "" { + return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) + } + return nil +} + +// Len returns the length of a byTimestamp slice. Len is required by the +// sort.Interface interface. +func (slice byTimestamp) Len() int { + return len(slice) +} + +// Less compares two values in a byTimestamp slice by Timestamp. Less is +// required by the sort.Interface interface. +func (slice byTimestamp) Less(i, j int) bool { + iTimestamp, jTimestamp := int64(0), int64(0) + if slice != nil && slice[i].inputLogEvent.Timestamp != nil { + iTimestamp = *slice[i].inputLogEvent.Timestamp + } + if slice != nil && slice[j].inputLogEvent.Timestamp != nil { + jTimestamp = *slice[j].inputLogEvent.Timestamp + } + if iTimestamp == jTimestamp { + return slice[i].insertOrder < slice[j].insertOrder + } + return iTimestamp < jTimestamp +} + +// Swap swaps two values in a byTimestamp slice with each other. Swap is +// required by the sort.Interface interface. +func (slice byTimestamp) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { + cwEvents := []*cloudwatchlogs.InputLogEvent{} + for _, input := range events { + cwEvents = append(cwEvents, input.inputLogEvent) + } + return cwEvents +} diff --git a/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go new file mode 100644 index 0000000..d5b1aae --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -0,0 +1,724 @@ +package awslogs + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" +) + +const ( + groupName = "groupName" + streamName = "streamName" + sequenceToken = "sequenceToken" + nextSequenceToken = "nextSequenceToken" + logline = "this is a log line" +) + +func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + regionKey: "us-east-1", + }, + } + + client, err := newAWSLogsClient(ctx) + if err != nil { + t.Fatal(err) + } + realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) + if !ok { + t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") + } + buildHandlerList := realClient.Handlers.Build + request := &request.Request{ + HTTPRequest: &http.Request{ + Header: http.Header{}, + }, + } + buildHandlerList.Run(request) + expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", + dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + userAgent := request.HTTPRequest.Header.Get("User-Agent") + if userAgent != expectedUserAgentString { + t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", + expectedUserAgentString, userAgent) + } +} + +func TestNewAWSLogsClientRegionDetect(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{}, + } + + mockMetadata := newMockMetadataClient() + newRegionFinder = func() regionFinder { + return mockMetadata + } + mockMetadata.regionResult <- ®ionResult{ + successResult: "us-east-1", + } + + _, err := newAWSLogsClient(ctx) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: errors.New("Error!"), + } + + err := stream.create() + + if err == nil { + t.Fatal("Expected non-nil err") + } +} + +func TestCreateAlreadyExists(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), + } + + err := stream.create() + + if err != nil { + t.Fatal("Expected nil err") + } +} + +func TestPublishBatchSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: errors.New("Error!"), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != sequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) + } +} + +func TestPublishBatchInvalidSeqSuccess(t *testing.T) { + mockClient := newMockClientBuffered(2) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != "token" { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchAlreadyAccepted(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != "token" { + t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestCollectBatchSimple(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchTicker(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline + " 1"), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte(logline + " 2"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + + // Verify first batch + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 1" { + t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != logline+" 2" { + t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) + } + + stream.Log(&logger.Message{ + Line: []byte(logline + " 3"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 3" { + t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) + } + + stream.Close() + +} + +func TestCollectBatchClose(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchLineSplit(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerEvent) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != longline { + t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != "B" { + t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) + } +} + +func TestCollectBatchMaxEvents(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + line := "A" + for i := 0; i <= maximumLogEventsPerPut; i++ { + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: time.Time{}, + }) + } + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != maximumLogEventsPerPut { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) + } +} + +func TestCollectBatchMaxTotalBytes(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerPut) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + bytes := 0 + for _, event := range argument.LogEvents { + bytes += len(*event.Message) + } + if bytes > maximumBytesPerPut { + t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) + } + + argument = <-mockClient.putLogEventsArgument + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + message := *argument.LogEvents[0].Message + if message[len(message)-1:] != "B" { + t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) + } +} + +func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + times := maximumLogEventsPerPut + expectedEvents := []*cloudwatchlogs.InputLogEvent{} + timestamp := time.Now() + for i := 0; i < times; i++ { + line := fmt.Sprintf("%d", i) + if i%2 == 0 { + timestamp.Add(1 * time.Nanosecond) + } + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: timestamp, + }) + expectedEvents = append(expectedEvents, &cloudwatchlogs.InputLogEvent{ + Message: aws.String(line), + Timestamp: aws.Int64(timestamp.UnixNano() / int64(time.Millisecond)), + }) + } + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != times { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", times, len(argument.LogEvents)) + } + for i := 0; i < times; i++ { + if !reflect.DeepEqual(*argument.LogEvents[i], *expectedEvents[i]) { + t.Errorf("Expected event to be %v but was %v", *expectedEvents[i], *argument.LogEvents[i]) + } + } +} + +func TestCreateTagSuccess(t *testing.T) { + mockClient := newMockClient() + ctx := logger.Context{ + ContainerName: "/test-container", + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, + } + logStreamName, e := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if e != nil { + t.Errorf("Error generating tag: %q", e) + } + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: logStreamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + + if *argument.LogStreamName != "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890" { + t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go b/vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go new file mode 100644 index 0000000..b768a3d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go @@ -0,0 +1,77 @@ +package awslogs + +import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + +type mockcwlogsclient struct { + createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput + createLogStreamResult chan *createLogStreamResult + putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput + putLogEventsResult chan *putLogEventsResult +} + +type createLogStreamResult struct { + successResult *cloudwatchlogs.CreateLogStreamOutput + errorResult error +} + +type putLogEventsResult struct { + successResult *cloudwatchlogs.PutLogEventsOutput + errorResult error +} + +func newMockClient() *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), + createLogStreamResult: make(chan *createLogStreamResult, 1), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), + putLogEventsResult: make(chan *putLogEventsResult, 1), + } +} + +func newMockClientBuffered(buflen int) *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), + createLogStreamResult: make(chan *createLogStreamResult, buflen), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), + putLogEventsResult: make(chan *putLogEventsResult, buflen), + } +} + +func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + m.createLogStreamArgument <- input + output := <-m.createLogStreamResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) + copy(events, input.LogEvents) + m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: input.SequenceToken, + LogGroupName: input.LogGroupName, + LogStreamName: input.LogStreamName, + } + output := <-m.putLogEventsResult + return output.successResult, output.errorResult +} + +type mockmetadataclient struct { + regionResult chan *regionResult +} + +type regionResult struct { + successResult string + errorResult error +} + +func newMockMetadataClient() *mockmetadataclient { + return &mockmetadataclient{ + regionResult: make(chan *regionResult, 1), + } +} + +func (m *mockmetadataclient) Region() (string, error) { + output := <-m.regionResult + return output.successResult, output.errorResult +} diff --git a/vendor/github.com/moby/moby/daemon/logger/context.go b/vendor/github.com/moby/moby/daemon/logger/context.go new file mode 100644 index 0000000..085ab01 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/context.go @@ -0,0 +1,111 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "time" +) + +// Context provides enough information for a logging driver to do its function. +type Context struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { + extra := make(map[string]string) + labels, ok := ctx.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := ctx.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + env, ok := ctx.Config["env"] + if ok && len(env) > 0 { + envMapping := make(map[string]string) + for _, e := range ctx.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + return extra +} + +// Hostname returns the hostname from the underlying OS. +func (ctx *Context) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (ctx *Context) Command() string { + terms := []string{ctx.ContainerEntrypoint} + terms = append(terms, ctx.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (ctx *Context) ID() string { + return ctx.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (ctx *Context) FullID() string { + return ctx.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (ctx *Context) Name() string { + return ctx.ContainerName[1:] +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (ctx *Context) ImageID() string { + return ctx.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (ctx *Context) ImageFullID() string { + return ctx.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (ctx *Context) ImageName() string { + return ctx.ContainerImageName +} diff --git a/vendor/github.com/moby/moby/daemon/logger/copier.go b/vendor/github.com/moby/moby/daemon/logger/copier.go new file mode 100644 index 0000000..10ab46e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/copier.go @@ -0,0 +1,131 @@ +package logger + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + bufSize = 16 * 1024 + readSize = 2 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + buf := make([]byte, bufSize) + n := 0 + eof := false + msg := &Message{Source: name} + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + for q := bytes.Index(buf[p:n], []byte{'\n'}); q >= 0; q = bytes.Index(buf[p:n], []byte{'\n'}) { + msg.Line = buf[p : p+q] + msg.Timestamp = time.Now().UTC() + msg.Partial = false + select { + case <-c.closed: + return + default: + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg.Line = buf[p:n] + msg.Timestamp = time.Now().UTC() + msg.Partial = true + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/vendor/github.com/moby/moby/daemon/logger/copier_test.go b/vendor/github.com/moby/moby/daemon/logger/copier_test.go new file mode 100644 index 0000000..cfd816a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/copier_test.go @@ -0,0 +1,296 @@ +package logger + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" +) + +type TestLoggerJSON struct { + *json.Encoder + mu sync.Mutex + delay time.Duration +} + +func (l *TestLoggerJSON) Log(m *Message) error { + if l.delay > 0 { + time.Sleep(l.delay) + } + l.mu.Lock() + defer l.mu.Unlock() + return l.Encode(m) +} + +func (l *TestLoggerJSON) Close() error { return nil } + +func (l *TestLoggerJSON) Name() string { return "json" } + +func TestCopier(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + stderrLine := "Line that thinks that it is log line from docker stderr" + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { + t.Fatal(err) + } + } + + // Test remaining lines without line-endings + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stdoutLine, stdoutTrailingLine) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stderrLine, stderrTrailingLine) + } + } + } +} + +// TestCopierLongLines tests long lines without line breaks +func TestCopierLongLines(t *testing.T) { + // Long lines (should be split at "bufSize") + const bufSize = 16 * 1024 + stdoutLongLine := strings.Repeat("a", bufSize) + stderrLongLine := strings.Repeat("b", bufSize) + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + + for i := 0; i < 3; i++ { + if _, err := stdout.WriteString(stdoutLongLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLongLine); err != nil { + t.Fatal(err) + } + } + + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLongLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stdoutLongLine' or 'stdoutTrailingLine'", msg.Line) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLongLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stderrLongLine' or 'stderrTrailingLine'", msg.Line) + } + } + } +} + +func TestCopierSlow(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + var stdout bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} + + c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + <-time.After(150 * time.Millisecond) + c.Close() + select { + case <-time.After(200 * time.Millisecond): + t.Fatalf("failed to exit in time after the copier is closed") + case <-wait: + } +} + +type BenchmarkLoggerDummy struct { +} + +func (l *BenchmarkLoggerDummy) Log(m *Message) error { return nil } + +func (l *BenchmarkLoggerDummy) Close() error { return nil } + +func (l *BenchmarkLoggerDummy) Name() string { return "dummy" } + +func BenchmarkCopier64(b *testing.B) { + benchmarkCopier(b, 1<<6) +} +func BenchmarkCopier128(b *testing.B) { + benchmarkCopier(b, 1<<7) +} +func BenchmarkCopier256(b *testing.B) { + benchmarkCopier(b, 1<<8) +} +func BenchmarkCopier512(b *testing.B) { + benchmarkCopier(b, 1<<9) +} +func BenchmarkCopier1K(b *testing.B) { + benchmarkCopier(b, 1<<10) +} +func BenchmarkCopier2K(b *testing.B) { + benchmarkCopier(b, 1<<11) +} +func BenchmarkCopier4K(b *testing.B) { + benchmarkCopier(b, 1<<12) +} +func BenchmarkCopier8K(b *testing.B) { + benchmarkCopier(b, 1<<13) +} +func BenchmarkCopier16K(b *testing.B) { + benchmarkCopier(b, 1<<14) +} +func BenchmarkCopier32K(b *testing.B) { + benchmarkCopier(b, 1<<15) +} +func BenchmarkCopier64K(b *testing.B) { + benchmarkCopier(b, 1<<16) +} +func BenchmarkCopier128K(b *testing.B) { + benchmarkCopier(b, 1<<17) +} +func BenchmarkCopier256K(b *testing.B) { + benchmarkCopier(b, 1<<18) +} + +func piped(b *testing.B, iterations int, delay time.Duration, buf []byte) io.Reader { + r, w, err := os.Pipe() + if err != nil { + b.Fatal(err) + return nil + } + go func() { + for i := 0; i < iterations; i++ { + time.Sleep(delay) + if n, err := w.Write(buf); err != nil || n != len(buf) { + if err != nil { + b.Fatal(err) + } + b.Fatal(fmt.Errorf("short write")) + } + } + w.Close() + }() + return r +} + +func benchmarkCopier(b *testing.B, length int) { + b.StopTimer() + buf := []byte{'A'} + for len(buf) < length { + buf = append(buf, buf...) + } + buf = append(buf[:length-1], []byte{'\n'}...) + b.StartTimer() + for i := 0; i < b.N; i++ { + c := NewCopier( + map[string]io.Reader{ + "buffer": piped(b, 10, time.Nanosecond, buf), + }, + &BenchmarkLoggerDummy{}) + c.Run() + c.Wait() + c.Close() + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go b/vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go new file mode 100644 index 0000000..f296d7f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go @@ -0,0 +1,170 @@ +// Package etwlogs provides a log driver for forwarding container logs +// as ETW events.(ETW stands for Event Tracing for Windows) +// A client can then create an ETW listener to listen for events that are sent +// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". +// Here is an example of how to do this using the logman utility: +// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl +// 2. Run container(s) and generate log messages +// 3. logman stop -ets DockerContainerLogs +// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl +// +// Each container log message generates an ETW event that also contains: +// the container name and ID, the timestamp, and the stream type. +package etwlogs + +import ( + "errors" + "fmt" + "sync" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "golang.org/x/sys/windows" +) + +type etwLogs struct { + containerName string + imageName string + containerID string + imageID string +} + +const ( + name = "etwlogs" + win32CallSuccess = 0 +) + +var ( + modAdvapi32 = windows.NewLazySystemDLL("Advapi32.dll") + procEventRegister = modAdvapi32.NewProc("EventRegister") + procEventWriteString = modAdvapi32.NewProc("EventWriteString") + procEventUnregister = modAdvapi32.NewProc("EventUnregister") +) +var providerHandle syscall.Handle +var refCount int +var mu sync.Mutex + +func init() { + providerHandle = syscall.InvalidHandle + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } +} + +// New creates a new etwLogs logger for the given container and registers the EWT provider. +func New(ctx logger.Context) (logger.Logger, error) { + if err := registerETWProvider(); err != nil { + return nil, err + } + logrus.Debugf("logging driver etwLogs configured for container: %s.", ctx.ContainerID) + + return &etwLogs{ + containerName: fixContainerName(ctx.ContainerName), + imageName: ctx.ContainerImageName, + containerID: ctx.ContainerID, + imageID: ctx.ContainerImageID, + }, nil +} + +// Log logs the message to the ETW stream. +func (etwLogger *etwLogs) Log(msg *logger.Message) error { + if providerHandle == syscall.InvalidHandle { + // This should never be hit, if it is, it indicates a programming error. + errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return callEventWriteString(createLogMessage(etwLogger, msg)) +} + +// Close closes the logger by unregistering the ETW provider. +func (etwLogger *etwLogs) Close() error { + unregisterETWProvider() + return nil +} + +func (etwLogger *etwLogs) Name() string { + return name +} + +func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { + return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", + etwLogger.containerName, + etwLogger.imageName, + etwLogger.containerID, + etwLogger.imageID, + msg.Source, + msg.Line) +} + +// fixContainerName removes the initial '/' from the container name. +func fixContainerName(cntName string) string { + if len(cntName) > 0 && cntName[0] == '/' { + cntName = cntName[1:] + } + return cntName +} + +func registerETWProvider() error { + mu.Lock() + defer mu.Unlock() + if refCount == 0 { + var err error + if err = callEventRegister(); err != nil { + return err + } + } + + refCount++ + return nil +} + +func unregisterETWProvider() { + mu.Lock() + defer mu.Unlock() + if refCount == 1 { + if callEventUnregister() { + refCount-- + providerHandle = syscall.InvalidHandle + } + // Not returning an error if EventUnregister fails, because etwLogs will continue to work + } else { + refCount-- + } +} + +func callEventRegister() error { + // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} + guid := syscall.GUID{ + 0xa3693192, 0x9ed6, 0x46d2, + [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, + } + + ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventWriteString(message string) error { + ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(message)))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventUnregister() bool { + ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) + if ret != win32CallSuccess { + return false + } + return true +} diff --git a/vendor/github.com/moby/moby/daemon/logger/factory.go b/vendor/github.com/moby/moby/daemon/logger/factory.go new file mode 100644 index 0000000..9cf716b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/factory.go @@ -0,0 +1,104 @@ +package logger + +import ( + "fmt" + "sync" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Context) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if !ok { + return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + return c, nil +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(cfg) + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go b/vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go new file mode 100644 index 0000000..a8303cf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go @@ -0,0 +1,246 @@ +// Package fluentd provides the log driver for forwarding server logs +// to fluentd endpoints. +package fluentd + +import ( + "fmt" + "math" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-units" + "github.com/fluent/fluent-logger-golang/fluent" + "github.com/pkg/errors" +) + +type fluentd struct { + tag string + containerID string + containerName string + writer *fluent.Fluent + extra map[string]string +} + +type location struct { + protocol string + host string + port int + path string +} + +const ( + name = "fluentd" + + defaultProtocol = "tcp" + defaultHost = "127.0.0.1" + defaultPort = 24224 + defaultBufferLimit = 1024 * 1024 + + // logger tries to reconnect 2**32 - 1 times + // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] + defaultRetryWait = 1000 + defaultMaxRetries = math.MaxInt32 + + addressKey = "fluentd-address" + bufferLimitKey = "fluentd-buffer-limit" + retryWaitKey = "fluentd-retry-wait" + maxRetriesKey = "fluentd-max-retries" + asyncConnectKey = "fluentd-async-connect" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a fluentd logger using the configuration passed in on +// the context. The supported context configuration variable is +// fluentd-address. +func New(ctx logger.Context) (logger.Logger, error) { + loc, err := parseAddress(ctx.Config[addressKey]) + if err != nil { + return nil, err + } + + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := ctx.ExtraAttributes(nil) + + bufferLimit := defaultBufferLimit + if ctx.Config[bufferLimitKey] != "" { + bl64, err := units.RAMInBytes(ctx.Config[bufferLimitKey]) + if err != nil { + return nil, err + } + bufferLimit = int(bl64) + } + + retryWait := defaultRetryWait + if ctx.Config[retryWaitKey] != "" { + rwd, err := time.ParseDuration(ctx.Config[retryWaitKey]) + if err != nil { + return nil, err + } + retryWait = int(rwd.Seconds() * 1000) + } + + maxRetries := defaultMaxRetries + if ctx.Config[maxRetriesKey] != "" { + mr64, err := strconv.ParseUint(ctx.Config[maxRetriesKey], 10, strconv.IntSize) + if err != nil { + return nil, err + } + maxRetries = int(mr64) + } + + asyncConnect := false + if ctx.Config[asyncConnectKey] != "" { + if asyncConnect, err = strconv.ParseBool(ctx.Config[asyncConnectKey]); err != nil { + return nil, err + } + } + + fluentConfig := fluent.Config{ + FluentPort: loc.port, + FluentHost: loc.host, + FluentNetwork: loc.protocol, + FluentSocketPath: loc.path, + BufferLimit: bufferLimit, + RetryWait: retryWait, + MaxRetry: maxRetries, + AsyncConnect: asyncConnect, + } + + logrus.WithField("container", ctx.ContainerID).WithField("config", fluentConfig). + Debug("logging driver fluentd configured") + + log, err := fluent.New(fluentConfig) + if err != nil { + return nil, err + } + return &fluentd{ + tag: tag, + containerID: ctx.ContainerID, + containerName: ctx.ContainerName, + writer: log, + extra: extra, + }, nil +} + +func (f *fluentd) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + // fluent-logger-golang buffers logs from failures and disconnections, + // and these are transferred again automatically. + return f.writer.PostWithTime(f.tag, msg.Timestamp, data) +} + +func (f *fluentd) Close() error { + return f.writer.Close() +} + +func (f *fluentd) Name() string { + return name +} + +// ValidateLogOpt looks for fluentd specific log option fluentd-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "tag": + case addressKey: + case bufferLimitKey: + case retryWaitKey: + case maxRetriesKey: + case asyncConnectKey: + // Accepted + default: + return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) + } + } + + if _, err := parseAddress(cfg["fluentd-address"]); err != nil { + return err + } + + return nil +} + +func parseAddress(address string) (*location, error) { + if address == "" { + return &location{ + protocol: defaultProtocol, + host: defaultHost, + port: defaultPort, + path: "", + }, nil + } + + protocol := defaultProtocol + givenAddress := address + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + // unix and unixgram socket + if url.Scheme == "unix" || url.Scheme == "unixgram" { + return &location{ + protocol: url.Scheme, + host: "", + port: 0, + path: url.Path, + }, nil + } + // tcp|udp + protocol = url.Scheme + address = url.Host + } + + host, port, err := net.SplitHostPort(address) + if err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: defaultPort, + path: "", + }, nil + } + + portnum, err := strconv.Atoi(port) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: portnum, + path: "", + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go new file mode 100644 index 0000000..9a8c1c9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go @@ -0,0 +1,200 @@ +package gcplogs + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/docker/docker/daemon/logger" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/cloud/compute/metadata" + "google.golang.org/cloud/logging" +) + +const ( + name = "gcplogs" + + projectOptKey = "gcp-project" + logLabelsKey = "labels" + logEnvKey = "env" + logCmdKey = "gcp-log-cmd" + logZoneKey = "gcp-meta-zone" + logNameKey = "gcp-meta-name" + logIDKey = "gcp-meta-id" +) + +var ( + // The number of logs the gcplogs driver has dropped. + droppedLogs uint64 + + onGCE bool + + // instance metadata populated from the metadata server if available + projectID string + zone string + instanceName string + instanceID string +) + +func init() { + + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + + if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { + logrus.Fatal(err) + } +} + +type gcplogs struct { + client *logging.Client + instance *instanceInfo + container *containerInfo +} + +type dockerLogEntry struct { + Instance *instanceInfo `json:"instance,omitempty"` + Container *containerInfo `json:"container,omitempty"` + Data string `json:"data,omitempty"` +} + +type instanceInfo struct { + Zone string `json:"zone,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type containerInfo struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + ImageName string `json:"imageName,omitempty"` + ImageID string `json:"imageId,omitempty"` + Created time.Time `json:"created,omitempty"` + Command string `json:"command,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +var initGCPOnce sync.Once + +func initGCP() { + initGCPOnce.Do(func() { + onGCE = metadata.OnGCE() + if onGCE { + // These will fail on instances if the metadata service is + // down or the client is compiled with an API version that + // has been removed. Since these are not vital, let's ignore + // them and make their fields in the dockeLogEntry ,omitempty + projectID, _ = metadata.ProjectID() + zone, _ = metadata.Zone() + instanceName, _ = metadata.InstanceName() + instanceID, _ = metadata.InstanceID() + } + }) +} + +// New creates a new logger that logs to Google Cloud Logging using the application +// default credentials. +// +// See https://developers.google.com/identity/protocols/application-default-credentials +func New(ctx logger.Context) (logger.Logger, error) { + initGCP() + + var project string + if projectID != "" { + project = projectID + } + if projectID, found := ctx.Config[projectOptKey]; found { + project = projectID + } + if project == "" { + return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project") + } + + c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver") + if err != nil { + return nil, err + } + + if err := c.Ping(); err != nil { + return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) + } + + l := &gcplogs{ + client: c, + container: &containerInfo{ + Name: ctx.ContainerName, + ID: ctx.ContainerID, + ImageName: ctx.ContainerImageName, + ImageID: ctx.ContainerImageID, + Created: ctx.ContainerCreated, + Metadata: ctx.ExtraAttributes(nil), + }, + } + + if ctx.Config[logCmdKey] == "true" { + l.container.Command = ctx.Command() + } + + if onGCE { + l.instance = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if ctx.Config[logZoneKey] != "" || ctx.Config[logNameKey] != "" || ctx.Config[logIDKey] != "" { + l.instance = &instanceInfo{ + Zone: ctx.Config[logZoneKey], + Name: ctx.Config[logNameKey], + ID: ctx.Config[logIDKey], + } + } + + // The logger "overflows" at a rate of 10,000 logs per second and this + // overflow func is called. We want to surface the error to the user + // without overly spamming /var/log/docker.log so we log the first time + // we overflow and every 1000th time after. + c.Overflow = func(_ *logging.Client, _ logging.Entry) error { + if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { + logrus.Errorf("gcplogs driver has dropped %v logs", i) + } + return nil + } + + return l, nil +} + +// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs +// driver doesn't take any arguments. +func ValidateLogOpts(cfg map[string]string) error { + for k := range cfg { + switch k { + case projectOptKey, logLabelsKey, logEnvKey, logCmdKey, logZoneKey, logNameKey, logIDKey: + default: + return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) + } + } + return nil +} + +func (l *gcplogs) Log(m *logger.Message) error { + return l.client.Log(logging.Entry{ + Time: m.Timestamp, + Payload: &dockerLogEntry{ + Instance: l.instance, + Container: l.container, + Data: string(m.Line), + }, + }) +} + +func (l *gcplogs) Close() error { + return l.client.Flush() +} + +func (l *gcplogs) Name() string { + return name +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go new file mode 100644 index 0000000..95860ac --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go @@ -0,0 +1,209 @@ +// +build linux + +// Package gelf provides the log driver for forwarding server logs to +// endpoints that support the Graylog Extended Log Format. +package gelf + +import ( + "bytes" + "compress/flate" + "encoding/json" + "fmt" + "net" + "net/url" + "strconv" + "time" + + "github.com/Graylog2/go-gelf/gelf" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "gelf" + +type gelfLogger struct { + writer *gelf.Writer + ctx logger.Context + hostname string + rawExtra json.RawMessage +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a gelf logger using the configuration passed in on the +// context. The supported context configuration variable is gelf-address. +func New(ctx logger.Context) (logger.Logger, error) { + // parse gelf address + address, err := parseAddress(ctx.Config["gelf-address"]) + if err != nil { + return nil, err + } + + // collect extra data for GELF message + hostname, err := ctx.Hostname() + if err != nil { + return nil, fmt.Errorf("gelf: cannot access hostname to set source field") + } + + // remove trailing slash from container name + containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") + + // parse log tag + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := map[string]interface{}{ + "_container_id": ctx.ContainerID, + "_container_name": string(containerName), + "_image_id": ctx.ContainerImageID, + "_image_name": ctx.ContainerImageName, + "_command": ctx.Command(), + "_tag": tag, + "_created": ctx.ContainerCreated, + } + + extraAttrs := ctx.ExtraAttributes(func(key string) string { + if key[0] == '_' { + return key + } + return "_" + key + }) + for k, v := range extraAttrs { + extra[k] = v + } + + rawExtra, err := json.Marshal(extra) + if err != nil { + return nil, err + } + + // create new gelfWriter + gelfWriter, err := gelf.NewWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + if v, ok := ctx.Config["gelf-compression-type"]; ok { + switch v { + case "gzip": + gelfWriter.CompressionType = gelf.CompressGzip + case "zlib": + gelfWriter.CompressionType = gelf.CompressZlib + case "none": + gelfWriter.CompressionType = gelf.CompressNone + default: + return nil, fmt.Errorf("gelf: invalid compression type %q", v) + } + } + + if v, ok := ctx.Config["gelf-compression-level"]; ok { + val, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) + } + gelfWriter.CompressionLevel = val + } + + return &gelfLogger{ + writer: gelfWriter, + ctx: ctx, + hostname: hostname, + rawExtra: rawExtra, + }, nil +} + +func (s *gelfLogger) Log(msg *logger.Message) error { + level := gelf.LOG_INFO + if msg.Source == "stderr" { + level = gelf.LOG_ERR + } + + m := gelf.Message{ + Version: "1.1", + Host: s.hostname, + Short: string(msg.Line), + TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, + Level: level, + RawExtra: s.rawExtra, + } + + if err := s.writer.WriteMessage(&m); err != nil { + return fmt.Errorf("gelf: cannot send GELF message: %v", err) + } + return nil +} + +func (s *gelfLogger) Close() error { + return s.writer.Close() +} + +func (s *gelfLogger) Name() string { + return name +} + +// ValidateLogOpt looks for gelf specific log option gelf-address. +func ValidateLogOpt(cfg map[string]string) error { + for key, val := range cfg { + switch key { + case "gelf-address": + case "tag": + case "labels": + case "env": + case "gelf-compression-level": + i, err := strconv.Atoi(val) + if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + case "gelf-compression-type": + switch val { + case "gzip", "zlib", "none": + default: + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + default: + return fmt.Errorf("unknown log opt %q for gelf log driver", key) + } + } + + if _, err := parseAddress(cfg["gelf-address"]); err != nil { + return err + } + + return nil +} + +func parseAddress(address string) (string, error) { + if address == "" { + return "", nil + } + if !urlutil.IsTransportURL(address) { + return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", err + } + + // we support only udp + if url.Scheme != "udp" { + return "", fmt.Errorf("gelf: endpoint needs to be UDP") + } + + // get host and port + if _, _, err = net.SplitHostPort(url.Host); err != nil { + return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + } + + return url.Host, nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go new file mode 100644 index 0000000..266f73b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package gelf diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/journald.go b/vendor/github.com/moby/moby/daemon/logger/journald/journald.go new file mode 100644 index 0000000..9569859 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/journald.go @@ -0,0 +1,122 @@ +// +build linux + +// Package journald provides the log driver for forwarding server logs +// to endpoints that receive the systemd format. +package journald + +import ( + "fmt" + "sync" + "unicode" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" +) + +const name = "journald" + +type journald struct { + vars map[string]string // additional variables and values to send to the journal along with the log message + readers readerList +} + +type readerList struct { + mu sync.Mutex + readers map[*logger.LogWatcher]*logger.LogWatcher +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// sanitizeKeyMode returns the sanitized string so that it could be used in journald. +// In journald log, there are special requirements for fields. +// Fields must be composed of uppercase letters, numbers, and underscores, but must +// not start with an underscore. +func sanitizeKeyMod(s string) string { + n := "" + for _, v := range s { + if 'a' <= v && v <= 'z' { + v = unicode.ToUpper(v) + } else if ('Z' < v || v < 'A') && ('9' < v || v < '0') { + v = '_' + } + // If (n == "" && v == '_'), then we will skip as this is the beginning with '_' + if !(n == "" && v == '_') { + n += string(v) + } + } + return n +} + +// New creates a journald logger using the configuration passed in on +// the context. +func New(ctx logger.Context) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + // Strip a leading slash so that people can search for + // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. + name := ctx.ContainerName + if name[0] == '/' { + name = name[1:] + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + vars := map[string]string{ + "CONTAINER_ID": ctx.ContainerID[:12], + "CONTAINER_ID_FULL": ctx.ContainerID, + "CONTAINER_NAME": name, + "CONTAINER_TAG": tag, + } + extraAttrs := ctx.ExtraAttributes(sanitizeKeyMod) + for k, v := range extraAttrs { + vars[k] = v + } + return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil +} + +// We don't actually accept any options, but we have to supply a callback for +// the factory to pass the (probably empty) configuration map to. +func validateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "labels": + case "env": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for journald log driver", key) + } + } + return nil +} + +func (s *journald) Log(msg *logger.Message) error { + vars := map[string]string{} + for k, v := range s.vars { + vars[k] = v + } + if msg.Partial { + vars["CONTAINER_PARTIAL_MESSAGE"] = "true" + } + if msg.Source == "stderr" { + return journal.Send(string(msg.Line), journal.PriErr, vars) + } + return journal.Send(string(msg.Line), journal.PriInfo, vars) +} + +func (s *journald) Name() string { + return name +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go b/vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go new file mode 100644 index 0000000..224423f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go @@ -0,0 +1,23 @@ +// +build linux + +package journald + +import ( + "testing" +) + +func TestSanitizeKeyMod(t *testing.T) { + entries := map[string]string{ + "io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io?.kubernetes.pod.name": "IO__KUBERNETES_POD_NAME", + "?io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "_io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "__io123_kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + } + for k, v := range entries { + if sanitizeKeyMod(k) != v { + t.Fatalf("Failed to sanitize %s, got %s, expected %s", k, sanitizeKeyMod(k), v) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go b/vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go new file mode 100644 index 0000000..d52ca92 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package journald + +type journald struct { +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read.go b/vendor/github.com/moby/moby/daemon/logger/journald/read.go new file mode 100644 index 0000000..2fbbfe1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read.go @@ -0,0 +1,409 @@ +// +build linux,cgo,!static_build,journald + +package journald + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// +//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial) +//{ +// int rc; +// size_t plength; +// *msg = NULL; +// *length = 0; +// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true"); +// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length); +// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0)); +// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); +// if (rc == 0) { +// if (*length > 8) { +// (*msg) += 8; +// *length -= 8; +// } else { +// *msg = NULL; +// *length = 0; +// rc = -ENOENT; +// } +// } +// return rc; +//} +//static int get_priority(sd_journal *j, int *priority) +//{ +// const void *data; +// size_t i, length; +// int rc; +// *priority = -1; +// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); +// if (rc == 0) { +// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { +// *priority = 0; +// for (i = 9; i < length; i++) { +// *priority = *priority * 10 + ((const char *)data)[i] - '0'; +// } +// if (length > 9) { +// rc = 0; +// } +// } +// } +// return rc; +//} +//static int is_attribute_field(const char *msg, size_t length) +//{ +// static const struct known_field { +// const char *name; +// size_t length; +// } fields[] = { +// {"MESSAGE", sizeof("MESSAGE") - 1}, +// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, +// {"PRIORITY", sizeof("PRIORITY") - 1}, +// {"CODE_FILE", sizeof("CODE_FILE") - 1}, +// {"CODE_LINE", sizeof("CODE_LINE") - 1}, +// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, +// {"ERRNO", sizeof("ERRNO") - 1}, +// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, +// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, +// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, +// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, +// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, +// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, +// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, +// }; +// unsigned int i; +// void *p; +// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { +// return -1; +// } +// length = ((const char *) p) - msg; +// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { +// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { +// return -1; +// } +// } +// return 0; +//} +//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) +//{ +// int rc; +// *msg = NULL; +// *length = 0; +// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { +// if (is_attribute_field(*msg, *length) == 0) { +// break; +// } +// rc = -ENOENT; +// } +// return rc; +//} +//static int wait_for_data_cancelable(sd_journal *j, int pipefd) +//{ +// struct pollfd fds[2]; +// uint64_t when = 0; +// int timeout, jevents, i; +// struct timespec ts; +// uint64_t now; +// +// memset(&fds, 0, sizeof(fds)); +// fds[0].fd = pipefd; +// fds[0].events = POLLHUP; +// fds[1].fd = sd_journal_get_fd(j); +// if (fds[1].fd < 0) { +// return fds[1].fd; +// } +// +// do { +// jevents = sd_journal_get_events(j); +// if (jevents < 0) { +// return jevents; +// } +// fds[1].events = jevents; +// sd_journal_get_timeout(j, &when); +// if (when == -1) { +// timeout = -1; +// } else { +// clock_gettime(CLOCK_MONOTONIC, &ts); +// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; +// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; +// } +// i = poll(fds, 2, timeout); +// if ((i == -1) && (errno != EINTR)) { +// /* An unexpected error. */ +// return (errno != 0) ? -errno : -EINTR; +// } +// if (fds[0].revents & POLLHUP) { +// /* The close notification pipe was closed. */ +// return 0; +// } +// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { +// /* Data, which we might care about, was appended. */ +// return 1; +// } +// } while ((fds[0].revents & POLLHUP) == 0); +// return 0; +//} +import "C" + +import ( + "fmt" + "strings" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" +) + +func (s *journald) Close() error { + s.readers.mu.Lock() + for reader := range s.readers.readers { + reader.Close() + } + s.readers.mu.Unlock() + return nil +} + +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char { + var msg, data, cursor *C.char + var length C.size_t + var stamp C.uint64_t + var priority, partial C.int + + // Walk the journal from here forward until we run out of new entries. +drain: + for { + // Try not to send a given entry twice. + if oldCursor != nil { + for C.sd_journal_test_cursor(j, oldCursor) > 0 { + if C.sd_journal_next(j) <= 0 { + break drain + } + } + } + // Read and send the logged message, if there is one to read. + i := C.get_message(j, &msg, &length, &partial) + if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { + // Read the entry's timestamp. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } + // Set up the time and text of the entry. + timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) + line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) + if partial == 0 { + line = append(line, "\n"...) + } + // Recover the stream name by mapping + // from the journal priority back to + // the stream that we would have + // assigned that value. + source := "" + if C.get_priority(j, &priority) != 0 { + source = "" + } else if priority == C.int(journal.PriErr) { + source = "stderr" + } else if priority == C.int(journal.PriInfo) { + source = "stdout" + } + // Retrieve the values of any variables we're adding to the journal. + attrs := make(map[string]string) + C.sd_journal_restart_data(j) + for C.get_attribute_field(j, &data, &length) > C.int(0) { + kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) + attrs[kv[0]] = kv[1] + } + if len(attrs) == 0 { + attrs = nil + } + // Send the log message. + logWatcher.Msg <- &logger.Message{ + Line: line, + Source: source, + Timestamp: timestamp.In(time.UTC), + Attrs: attrs, + } + } + // If we're at the end of the journal, we're done (for now). + if C.sd_journal_next(j) <= 0 { + break + } + } + + // free(NULL) is safe + C.free(unsafe.Pointer(oldCursor)) + if C.sd_journal_get_cursor(j, &cursor) != 0 { + // ensure that we won't be freeing an address that's invalid + cursor = nil + } + return cursor +} + +func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { + s.readers.mu.Lock() + s.readers.readers[logWatcher] = logWatcher + s.readers.mu.Unlock() + + newCursor := make(chan *C.char) + + go func() { + // Keep copying journal data out until we're notified to stop + // or we hit an error. + status := C.wait_for_data_cancelable(j, pfd[0]) + for status == 1 { + cursor = s.drainJournal(logWatcher, config, j, cursor) + status = C.wait_for_data_cancelable(j, pfd[0]) + } + if status < 0 { + cerrstr := C.strerror(C.int(-status)) + errstr := C.GoString(cerrstr) + fmtstr := "error %q while attempting to follow journal for container %q" + logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + } + // Clean up. + C.close(pfd[0]) + s.readers.mu.Lock() + delete(s.readers.readers, logWatcher) + s.readers.mu.Unlock() + close(logWatcher.Msg) + newCursor <- cursor + }() + // Wait until we're told to stop. + select { + case <-logWatcher.WatchClose(): + // Notify the other goroutine that its work is done. + C.close(pfd[1]) + } + + cursor = <-newCursor + + return cursor +} + +func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + var j *C.sd_journal + var cmatch, cursor *C.char + var stamp C.uint64_t + var sinceUnixMicro uint64 + var pipes [2]C.int + + // Get a handle to the journal. + rc := C.sd_journal_open(&j, C.int(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error opening journal") + close(logWatcher.Msg) + return + } + // If we end up following the log, we can set the journal context + // pointer and the channel pointer to nil so that we won't close them + // here, potentially while the goroutine that uses them is still + // running. Otherwise, close them when we return from this function. + following := false + defer func(pfollowing *bool) { + if !*pfollowing { + close(logWatcher.Msg) + } + C.sd_journal_close(j) + }(&following) + // Remove limits on the size of data items that we'll retrieve. + rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal data threshold") + return + } + // Add a match to have the library do the searching for us. + cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) + defer C.free(unsafe.Pointer(cmatch)) + rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal match") + return + } + // If we have a cutoff time, convert it to Unix time once. + if !config.Since.IsZero() { + nano := config.Since.UnixNano() + sinceUnixMicro = uint64(nano / 1000) + } + if config.Tail > 0 { + lines := config.Tail + // Start at the end of the journal. + if C.sd_journal_seek_tail(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to end of journal") + return + } + if C.sd_journal_previous(j) < 0 { + logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") + return + } + // Walk backward. + for lines > 0 { + // Stop if the entry time is before our cutoff. + // We'll need the entry time if it isn't, so go + // ahead and parse it now. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } else { + // Compare the timestamp on the entry + // to our threshold value. + if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { + break + } + } + lines-- + // If we're at the start of the journal, or + // don't need to back up past any more entries, + // stop. + if lines == 0 || C.sd_journal_previous(j) <= 0 { + break + } + } + } else { + // Start at the beginning of the journal. + if C.sd_journal_seek_head(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start of journal") + return + } + // If we have a cutoff date, fast-forward to it. + if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") + return + } + if C.sd_journal_next(j) < 0 { + logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") + return + } + } + cursor = s.drainJournal(logWatcher, config, j, nil) + if config.Follow { + // Allocate a descriptor for following the journal, if we'll + // need one. Do it here so that we can report if it fails. + if fd := C.sd_journal_get_fd(j); fd < C.int(0) { + logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) + } else { + // Create a pipe that we can poll at the same time as + // the journald descriptor. + if C.pipe(&pipes[0]) == C.int(-1) { + logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") + } else { + cursor = s.followJournal(logWatcher, config, j, pipes, cursor) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true + } + } + } + + C.free(unsafe.Pointer(cursor)) + return +} + +func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + go s.readLogs(logWatcher, config) + return logWatcher +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read_native.go b/vendor/github.com/moby/moby/daemon/logger/journald/read_native.go new file mode 100644 index 0000000..bba6de5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read_native.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,!journald_compat + +package journald + +// #cgo pkg-config: libsystemd +import "C" diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go b/vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go new file mode 100644 index 0000000..3f7a43c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,journald_compat + +package journald + +// #cgo pkg-config: libsystemd-journal +import "C" diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go b/vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go new file mode 100644 index 0000000..b43abdc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux !cgo static_build !journald + +package journald + +func (s *journald) Close() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 0000000..a429a08 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,151 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(ctx logger.Context) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := ctx.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := ctx.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + logline := msg.Line + if !msg.Partial { + logline = append(msg.Line, '\n') + } + err = (&jsonlog.JSONLogs{ + Log: logline, + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go new file mode 100644 index 0000000..b5b818a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -0,0 +1,248 @@ +package jsonfilelog + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/jsonlog" +) + +func TestJSONFileLogger(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } +} + +func BenchmarkJSONFileLogger(b *testing.B) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + + testLine := "Line that thinks that it is log line from docker\n" + msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } + } +} + +func TestJSONFileLoggerWithOpts(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"max-file": "2", "max-size": "1k"} + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + for i := 0; i < 20; i++ { + if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { + t.Fatal(err) + } + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + t.Fatal(err) + } + + expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } + if string(penUlt) != expectedPenultimate { + t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) + } + +} + +func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"} + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + Config: config, + ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, + ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"}, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var jsonLog jsonlog.JSONLogs + if err := json.Unmarshal(res, &jsonLog); err != nil { + t.Fatal(err) + } + extra := make(map[string]string) + if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { + t.Fatal(err) + } + expected := map[string]string{ + "rack": "101", + "dc": "lhr", + "environ": "production", + "debug": "false", + "ssl": "true", + } + if !reflect.DeepEqual(extra, expected) { + t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) + } +} + +func BenchmarkJSONFileLoggerWithReader(b *testing.B) { + b.StopTimer() + b.ResetTimer() + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + dir, err := ioutil.TempDir("", "json-logger-bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) + + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filepath.Join(dir, "container.log"), + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + msg := &logger.Message{Line: []byte("line"), Source: "src1"} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + + b.StartTimer() + + go func() { + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + l.Log(msg) + } + } + l.Close() + }() + + lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) + watchClose := lw.WatchClose() + for { + select { + case <-lw.Msg: + case <-watchClose: + return + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go new file mode 100644 index 0000000..30d533f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,326 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/fsnotify/fsnotify" + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: l.Attrs, + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read + // This will block writes!!! + l.mu.Lock() + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + defer f.Close() + + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + l.mu.Unlock() + return + } + defer latestFile.Close() + + if config.Tail != 0 { + tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow { + if err := latestFile.Close(); err != nil { + logrus.Errorf("Error closing file: %v", err) + } + l.mu.Unlock() + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + notifyRotate := l.writer.NotifyRotate() + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() + + l.writer.NotifyRotateEvict(notifyRotate) +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader + rdr = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case <-logWatcher.WatchClose(): + return + case logWatcher.Msg <- msg: + } + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logrus.Debugf("error watching log file for modifications: %v", err) + return nil, err + } + } + return fileWatcher, nil +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + dec = json.NewDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + dec = json.NewDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if err == io.EOF { + for { + err := waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil + } + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + return nil + } + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + return nil + } + return err + } + + // main loop + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go b/vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go new file mode 100644 index 0000000..e794b1e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go @@ -0,0 +1,94 @@ +// Package logentries provides the log driver for forwarding server logs +// to logentries endpoints. +package logentries + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/bsphere/le_go" + "github.com/docker/docker/daemon/logger" +) + +type logentries struct { + tag string + containerID string + containerName string + writer *le_go.Logger + extra map[string]string +} + +const ( + name = "logentries" + token = "logentries-token" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a logentries logger using the configuration passed in on +// the context. The supported context configuration variable is +// logentries-token. +func New(ctx logger.Context) (logger.Logger, error) { + logrus.WithField("container", ctx.ContainerID). + WithField("token", ctx.Config[token]). + Debug("logging driver logentries configured") + + log, err := le_go.Connect(ctx.Config[token]) + if err != nil { + return nil, err + } + return &logentries{ + containerID: ctx.ContainerID, + containerName: ctx.ContainerName, + writer: log, + }, nil +} + +func (f *logentries) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + f.writer.Println(f.tag, msg.Timestamp, data) + return nil +} + +func (f *logentries) Close() error { + return f.writer.Close() +} + +func (f *logentries) Name() string { + return name +} + +// ValidateLogOpt looks for logentries specific log option logentries-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "tag": + case key: + default: + return fmt.Errorf("unknown log opt '%s' for logentries log driver", key) + } + } + + if cfg[token] == "" { + return fmt.Errorf("Missing logentries token") + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/logger.go b/vendor/github.com/moby/moby/daemon/logger/logger.go new file mode 100644 index 0000000..d091997 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/logger.go @@ -0,0 +1,134 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +// Message is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +type Message struct { + Line []byte + Source string + Timestamp time.Time + Attrs LogAttributes + Partial bool +} + +// CopyMessage creates a copy of the passed-in Message which will remain +// unchanged if the original is changed. Log drivers which buffer Messages +// rather than dispatching them during their Log() method should use this +// function to obtain a Message whose Line member's contents won't change. +func CopyMessage(msg *Message) *Message { + m := new(Message) + m.Line = make([]byte, len(msg.Line)) + copy(m.Line, msg.Line) + m.Source = msg.Source + m.Timestamp = msg.Timestamp + m.Partial = msg.Partial + m.Attrs = make(LogAttributes) + for k, v := range msg.Attrs { + m.Attrs[k] = v + } + return m +} + +// LogAttributes is used to hold the extra attributes available in the log message +// Primarily used for converting the map type to string and sorting. +type LogAttributes map[string]string +type byKey []string + +func (s byKey) Len() int { return len(s) } +func (s byKey) Less(i, j int) bool { + keyI := strings.Split(s[i], "=") + keyJ := strings.Split(s[j], "=") + return keyI[0] < keyJ[0] +} +func (s byKey) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (a LogAttributes) String() string { + var ss byKey + for k, v := range a { + ss = append(ss, k+"="+v) + } + sort.Sort(ss) + return strings.Join(ss, ",") +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeOnce sync.Once + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} diff --git a/vendor/github.com/moby/moby/daemon/logger/logger_test.go b/vendor/github.com/moby/moby/daemon/logger/logger_test.go new file mode 100644 index 0000000..16e1514 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/logger_test.go @@ -0,0 +1,26 @@ +package logger + +import ( + "reflect" + "testing" + "time" +) + +func TestCopyMessage(t *testing.T) { + msg := &Message{ + Line: []byte("test line."), + Source: "stdout", + Timestamp: time.Now(), + Attrs: LogAttributes{ + "key1": "val1", + "key2": "val2", + "key3": "val3", + }, + Partial: true, + } + + m := CopyMessage(msg) + if !reflect.DeepEqual(m, msg) { + t.Fatalf("CopyMessage failed to copy message") + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 0000000..4752679 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/utils/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { + tagTemplate := ctx.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &ctx); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go new file mode 100644 index 0000000..e2aa435 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go @@ -0,0 +1,47 @@ +package loggerutils + +import ( + "testing" + + "github.com/docker/docker/daemon/logger" +) + +func TestParseLogTagDefaultTag(t *testing.T) { + ctx := buildContext(map[string]string{}) + tag, e := ParseLogTag(ctx, "{{.ID}}") + assertTag(t, e, tag, ctx.ID()) +} + +func TestParseLogTag(t *testing.T) { + ctx := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) + tag, e := ParseLogTag(ctx, "{{.ID}}") + assertTag(t, e, tag, "test-image/test-container/container-ab") +} + +func TestParseLogTagEmptyTag(t *testing.T) { + ctx := buildContext(map[string]string{}) + tag, e := ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") + assertTag(t, e, tag, "test-dockerd/container-ab") +} + +// Helpers + +func buildContext(cfg map[string]string) logger.Context { + return logger.Context{ + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerName: "/test-container", + ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerImageName: "test-image", + Config: cfg, + DaemonName: "test-dockerd", + } +} + +func assertTag(t *testing.T, e error, tag string, expected string) { + if e != nil { + t.Fatalf("Error generating tag: %q", e) + } + if tag != expected { + t.Fatalf("Wrong tag: %q, should be %q", tag, expected) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 0000000..99e0964 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,124 @@ +package loggerutils + +import ( + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + return w.f.Close() +} diff --git a/vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go new file mode 100644 index 0000000..f858326 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go @@ -0,0 +1,621 @@ +// Package splunk provides the log driver for forwarding server logs to +// Splunk HTTP Event Collector endpoint. +package splunk + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const ( + driverName = "splunk" + splunkURLKey = "splunk-url" + splunkTokenKey = "splunk-token" + splunkSourceKey = "splunk-source" + splunkSourceTypeKey = "splunk-sourcetype" + splunkIndexKey = "splunk-index" + splunkCAPathKey = "splunk-capath" + splunkCANameKey = "splunk-caname" + splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" + splunkFormatKey = "splunk-format" + splunkVerifyConnectionKey = "splunk-verify-connection" + splunkGzipCompressionKey = "splunk-gzip" + splunkGzipCompressionLevelKey = "splunk-gzip-level" + envKey = "env" + labelsKey = "labels" + tagKey = "tag" +) + +const ( + // How often do we send messages (if we are not reaching batch size) + defaultPostMessagesFrequency = 5 * time.Second + // How big can be batch of messages + defaultPostMessagesBatchSize = 1000 + // Maximum number of messages we can store in buffer + defaultBufferMaximum = 10 * defaultPostMessagesBatchSize + // Number of messages allowed to be queued in the channel + defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize +) + +const ( + envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY" + envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE" + envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX" + envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" +) + +type splunkLoggerInterface interface { + logger.Logger + worker() +} + +type splunkLogger struct { + client *http.Client + transport *http.Transport + + url string + auth string + nullMessage *splunkMessage + + // http compression + gzipCompression bool + gzipCompressionLevel int + + // Advanced options + postMessagesFrequency time.Duration + postMessagesBatchSize int + bufferMaximum int + + // For synchronization between background worker and logger. + // We use channel to send messages to worker go routine. + // All other variables for blocking Close call before we flush all messages to HEC + stream chan *splunkMessage + lock sync.RWMutex + closed bool + closedCond *sync.Cond +} + +type splunkLoggerInline struct { + *splunkLogger + + nullEvent *splunkMessageEvent +} + +type splunkLoggerJSON struct { + *splunkLoggerInline +} + +type splunkLoggerRaw struct { + *splunkLogger + + prefix []byte +} + +type splunkMessage struct { + Event interface{} `json:"event"` + Time string `json:"time"` + Host string `json:"host"` + Source string `json:"source,omitempty"` + SourceType string `json:"sourcetype,omitempty"` + Index string `json:"index,omitempty"` +} + +type splunkMessageEvent struct { + Line interface{} `json:"line"` + Source string `json:"source"` + Tag string `json:"tag,omitempty"` + Attrs map[string]string `json:"attrs,omitempty"` +} + +const ( + splunkFormatRaw = "raw" + splunkFormatJSON = "json" + splunkFormatInline = "inline" +) + +func init() { + if err := logger.RegisterLogDriver(driverName, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates splunk logger driver using configuration passed in context +func New(ctx logger.Context) (logger.Logger, error) { + hostname, err := ctx.Hostname() + if err != nil { + return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) + } + + // Parse and validate Splunk URL + splunkURL, err := parseURL(ctx) + if err != nil { + return nil, err + } + + // Splunk Token is required parameter + splunkToken, ok := ctx.Config[splunkTokenKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) + } + + tlsConfig := &tls.Config{} + + // Splunk is using autogenerated certificates by default, + // allow users to trust them with skipping verification + if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { + insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + tlsConfig.InsecureSkipVerify = insecureSkipVerify + } + + // If path to the root certificate is provided - load it + if caPath, ok := ctx.Config[splunkCAPathKey]; ok { + caCert, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, err + } + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + } + + if caName, ok := ctx.Config[splunkCANameKey]; ok { + tlsConfig.ServerName = caName + } + + gzipCompression := false + if gzipCompressionStr, ok := ctx.Config[splunkGzipCompressionKey]; ok { + gzipCompression, err = strconv.ParseBool(gzipCompressionStr) + if err != nil { + return nil, err + } + } + + gzipCompressionLevel := gzip.DefaultCompression + if gzipCompressionLevelStr, ok := ctx.Config[splunkGzipCompressionLevelKey]; ok { + var err error + gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) + if err != nil { + return nil, err + } + gzipCompressionLevel = int(gzipCompressionLevel64) + if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { + err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", + gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) + return nil, err + } + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + client := &http.Client{ + Transport: transport, + } + + source := ctx.Config[splunkSourceKey] + sourceType := ctx.Config[splunkSourceTypeKey] + index := ctx.Config[splunkIndexKey] + + var nullMessage = &splunkMessage{ + Host: hostname, + Source: source, + SourceType: sourceType, + Index: index, + } + + // Allow user to remove tag from the messages by setting tag to empty string + tag := "" + if tagTemplate, ok := ctx.Config[tagKey]; !ok || tagTemplate != "" { + tag, err = loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + } + + attrs := ctx.ExtraAttributes(nil) + + var ( + postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) + postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) + bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) + streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) + ) + + logger := &splunkLogger{ + client: client, + transport: transport, + url: splunkURL.String(), + auth: "Splunk " + splunkToken, + nullMessage: nullMessage, + gzipCompression: gzipCompression, + gzipCompressionLevel: gzipCompressionLevel, + stream: make(chan *splunkMessage, streamChannelSize), + postMessagesFrequency: postMessagesFrequency, + postMessagesBatchSize: postMessagesBatchSize, + bufferMaximum: bufferMaximum, + } + + // By default we verify connection, but we allow use to skip that + verifyConnection := true + if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok { + var err error + verifyConnection, err = strconv.ParseBool(verifyConnectionStr) + if err != nil { + return nil, err + } + } + if verifyConnection { + err = verifySplunkConnection(logger) + if err != nil { + return nil, err + } + } + + var splunkFormat string + if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok { + switch splunkFormatParsed { + case splunkFormatInline: + case splunkFormatJSON: + case splunkFormatRaw: + default: + return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) + } + splunkFormat = splunkFormatParsed + } else { + splunkFormat = splunkFormatInline + } + + var loggerWrapper splunkLoggerInterface + + switch splunkFormat { + case splunkFormatInline: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerInline{logger, nullEvent} + case splunkFormatJSON: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} + case splunkFormatRaw: + var prefix bytes.Buffer + if tag != "" { + prefix.WriteString(tag) + prefix.WriteString(" ") + } + for key, value := range attrs { + prefix.WriteString(key) + prefix.WriteString("=") + prefix.WriteString(value) + prefix.WriteString(" ") + } + + loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} + default: + return nil, fmt.Errorf("Unexpected format %s", splunkFormat) + } + + go loggerWrapper.worker() + + return loggerWrapper, nil +} + +func (l *splunkLoggerInline) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + event := *l.nullEvent + event.Line = string(msg.Line) + event.Source = msg.Source + + message.Event = &event + + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerJSON) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + event := *l.nullEvent + + var rawJSONMessage json.RawMessage + if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil { + event.Line = &rawJSONMessage + } else { + event.Line = string(msg.Line) + } + + event.Source = msg.Source + + message.Event = &event + + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + message.Event = string(append(l.prefix, msg.Line...)) + + return l.queueMessageAsync(message) +} + +func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error { + l.lock.RLock() + defer l.lock.RUnlock() + if l.closedCond != nil { + return fmt.Errorf("%s: driver is closed", driverName) + } + l.stream <- message + return nil +} + +func (l *splunkLogger) worker() { + timer := time.NewTicker(l.postMessagesFrequency) + var messages []*splunkMessage + for { + select { + case message, open := <-l.stream: + if !open { + l.postMessages(messages, true) + l.lock.Lock() + defer l.lock.Unlock() + l.transport.CloseIdleConnections() + l.closed = true + l.closedCond.Signal() + return + } + messages = append(messages, message) + // Only sending when we get exactly to the batch size, + // This also helps not to fire postMessages on every new message, + // when previous try failed. + if len(messages)%l.postMessagesBatchSize == 0 { + messages = l.postMessages(messages, false) + } + case <-timer.C: + messages = l.postMessages(messages, false) + } + } +} + +func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { + messagesLen := len(messages) + for i := 0; i < messagesLen; i += l.postMessagesBatchSize { + upperBound := i + l.postMessagesBatchSize + if upperBound > messagesLen { + upperBound = messagesLen + } + if err := l.tryPostMessages(messages[i:upperBound]); err != nil { + logrus.Error(err) + if messagesLen-i >= l.bufferMaximum || lastChance { + // If this is last chance - print them all to the daemon log + if lastChance { + upperBound = messagesLen + } + // Not all sent, but buffer has got to its maximum, let's log all messages + // we could not send and return buffer minus one batch size + for j := i; j < upperBound; j++ { + if jsonEvent, err := json.Marshal(messages[j]); err != nil { + logrus.Error(err) + } else { + logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) + } + } + return messages[upperBound:messagesLen] + } + // Not all sent, returning buffer from where we have not sent messages + return messages[i:messagesLen] + } + } + // All sent, return empty buffer + return messages[:0] +} + +func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { + if len(messages) == 0 { + return nil + } + var buffer bytes.Buffer + var writer io.Writer + var gzipWriter *gzip.Writer + var err error + // If gzip compression is enabled - create gzip writer with specified compression + // level. If gzip compression is disabled, use standard buffer as a writer + if l.gzipCompression { + gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) + if err != nil { + return err + } + writer = gzipWriter + } else { + writer = &buffer + } + for _, message := range messages { + jsonEvent, err := json.Marshal(message) + if err != nil { + return err + } + if _, err := writer.Write(jsonEvent); err != nil { + return err + } + } + // If gzip compression is enabled, tell it, that we are done + if l.gzipCompression { + err = gzipWriter.Close() + if err != nil { + return err + } + } + req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) + if err != nil { + return err + } + req.Header.Set("Authorization", l.auth) + // Tell if we are sending gzip compressed body + if l.gzipCompression { + req.Header.Set("Content-Encoding", "gzip") + } + res, err := l.client.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) + } + io.Copy(ioutil.Discard, res.Body) + return nil +} + +func (l *splunkLogger) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if l.closedCond == nil { + l.closedCond = sync.NewCond(&l.lock) + close(l.stream) + for !l.closed { + l.closedCond.Wait() + } + } + return nil +} + +func (l *splunkLogger) Name() string { + return driverName +} + +func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage { + message := *l.nullMessage + message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second)) + return &message +} + +// ValidateLogOpt looks for all supported by splunk driver options +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case splunkURLKey: + case splunkTokenKey: + case splunkSourceKey: + case splunkSourceTypeKey: + case splunkIndexKey: + case splunkCAPathKey: + case splunkCANameKey: + case splunkInsecureSkipVerifyKey: + case splunkFormatKey: + case splunkVerifyConnectionKey: + case splunkGzipCompressionKey: + case splunkGzipCompressionLevelKey: + case envKey: + case labelsKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) + } + } + return nil +} + +func parseURL(ctx logger.Context) (*url.URL, error) { + splunkURLStr, ok := ctx.Config[splunkURLKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) + } + + splunkURL, err := url.Parse(splunkURLStr) + if err != nil { + return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) + } + + if !urlutil.IsURL(splunkURLStr) || + !splunkURL.IsAbs() || + (splunkURL.Path != "" && splunkURL.Path != "/") || + splunkURL.RawQuery != "" || + splunkURL.Fragment != "" { + return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) + } + + splunkURL.Path = "/services/collector/event/1.0" + + return splunkURL, nil +} + +func verifySplunkConnection(l *splunkLogger) error { + req, err := http.NewRequest(http.MethodOptions, l.url, nil) + if err != nil { + return err + } + res, err := l.client.Do(req) + if err != nil { + return err + } + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) + } + return nil +} + +func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := time.ParseDuration(valueStr) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) + return defaultValue + } + return parsedValue +} + +func getAdvancedOptionInt(envName string, defaultValue int) int { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := strconv.ParseInt(valueStr, 10, 32) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) + return defaultValue + } + return int(parsedValue) +} diff --git a/vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go new file mode 100644 index 0000000..df74cba --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go @@ -0,0 +1,1302 @@ +package splunk + +import ( + "compress/gzip" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" +) + +// Validate options +func TestValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + splunkURLKey: "http://127.0.0.1", + splunkTokenKey: "2160C7EF-2CE9-4307-A180-F852B99CF417", + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkCAPathKey: "/usr/cert.pem", + splunkCANameKey: "ca_name", + splunkInsecureSkipVerifyKey: "true", + splunkFormatKey: "json", + splunkVerifyConnectionKey: "true", + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + envKey: "a", + labelsKey: "b", + tagKey: "c", + }) + if err != nil { + t.Fatal(err) + } + + err = ValidateLogOpt(map[string]string{ + "not-supported-option": "a", + }) + if err == nil { + t.Fatal("Expecting error on unsupported options") + } +} + +// Driver require user to specify required options +func TestNewMissedConfig(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{}, + } + _, err := New(ctx) + if err == nil { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-url +func TestNewMissedUrl(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + splunkTokenKey: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + }, + } + _, err := New(ctx) + if err.Error() != "splunk: splunk-url is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-token +func TestNewMissedToken(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: "http://127.0.0.1:8088", + }, + } + _, err := New(ctx) + if err.Error() != "splunk: splunk-token is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Test default settings +func TestDefault(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if loggerDriver.Name() != driverName { + t.Fatal("Unexpected logger driver name") + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Found not default values setup in Splunk Logging Driver.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notajson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + if *hec.gzipEnabled { + t.Fatal("Gzip should not be used") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "{\"a\":\"b\"}" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message2) + } + + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notajson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify inline format with a not default settings for most of options +func TestInlineFormatWithNonDefaultOptions(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkFormatKey: splunkFormatInline, + splunkGzipCompressionKey: "true", + tagKey: "{{.ImageName}}/{{.Name}}", + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "mysource" || + splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || + splunkLoggerDriver.nullMessage.Index != "myindex" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + messageTime := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("1"), "stdout", messageTime, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 1 { + t.Fatal("Expected one message") + } + + if !*hec.gzipEnabled { + t.Fatal("Gzip should be used") + } + + message := hec.messages[0] + if message.Time != fmt.Sprintf("%f", float64(messageTime.UnixNano())/float64(time.Second)) || + message.Host != hostname || + message.Source != "mysource" || + message.SourceType != "mysourcetype" || + message.Index != "myindex" { + t.Fatalf("Unexpected values of message %v", message) + } + + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "1" || + event["source"] != "stdout" || + event["tag"] != "container_image_name/container_name" || + event["attrs"].(map[string]interface{})["a"] != "b" || + len(event) != 4 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify JSON format +func TestJsonFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatJSON, + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerJSON) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"].(map[string]interface{})["a"] != "b" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + // If message cannot be parsed as JSON - it should be sent as a line + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notjson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format +func TestRawFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format with labels +func TestRawFormatWithLabels(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid a=b " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that Splunk Logging Driver can accept tag="" which will allow to send raw messages +// in the same way we get them in stdout/stderr +func TestRawFormatWithoutTag(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + tagKey: "", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "" { + t.Log(string(splunkLoggerDriver.prefix) + "a") + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "{\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that we will send messages in batches with default batching parameters, +// but change frequency to be sure that numOfRequests will match expected 17 requests +func TestBatching(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 16 batches + if hec.numOfRequests != 17 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that test is using time to fire events not rare than specified frequency +func TestFrequency(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "5ms"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + time.Sleep(15 * time.Millisecond) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 to verify that we have sent messages with required frequency, + // but because frequency is too small (to keep test quick), instead of 11, use 9 if context switches will be slow + if hec.numOfRequests < 9 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Simulate behavior similar to first version of Splunk Logging Driver, when we were sending one message +// per request +func TestOneMessagePerRequest(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 messages + if hec.numOfRequests != 11 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Driver should not be created when HEC is unresponsive +func TestVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + _, err := New(ctx) + if err == nil { + t.Fatal("Expecting driver to fail, when server is unresponsive") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that user can specify to skip verification that Splunk HEC is working. +// Also in this test we verify retry logic. +func TestSkipVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < defaultStreamChannelSize*2; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify logic for when we filled whole buffer +func TestBufferMaximum(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "10"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 11; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 9 { + t.Fatalf("Expected # of messages %d, got %d", 9, len(hec.messages)) + } + + // First 1000 messages are written to daemon log when buffer was full + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i+2) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that we are not blocking close when HEC is down for the whole time +func TestServerAlwaysDown(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "4"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 5; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be sent") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Cannot send messages after we close driver +func TestCannotSendAfterClose(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{[]byte("message1"), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{[]byte("message2"), "stdout", time.Now(), nil, false}); err == nil { + t.Fatal("Driver should not allow to send messages after close") + } + + if len(hec.messages) != 1 { + t.Fatal("Only one message should be sent") + } + + message := hec.messages[0] + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "message1" { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go b/vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go new file mode 100644 index 0000000..e508948 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go @@ -0,0 +1,157 @@ +package splunk + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "testing" +) + +func (message *splunkMessage) EventAsString() (string, error) { + if val, ok := message.Event.(string); ok { + return val, nil + } + return "", fmt.Errorf("Cannot cast Event %v to string", message.Event) +} + +func (message *splunkMessage) EventAsMap() (map[string]interface{}, error) { + if val, ok := message.Event.(map[string]interface{}); ok { + return val, nil + } + return nil, fmt.Errorf("Cannot cast Event %v to map", message.Event) +} + +type HTTPEventCollectorMock struct { + tcpAddr *net.TCPAddr + tcpListener *net.TCPListener + + token string + simulateServerError bool + + test *testing.T + + connectionVerified bool + gzipEnabled *bool + messages []*splunkMessage + numOfRequests int +} + +func NewHTTPEventCollectorMock(t *testing.T) *HTTPEventCollectorMock { + tcpAddr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0, Zone: ""} + tcpListener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + return &HTTPEventCollectorMock{ + tcpAddr: tcpAddr, + tcpListener: tcpListener, + token: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + simulateServerError: false, + test: t, + connectionVerified: false} +} + +func (hec *HTTPEventCollectorMock) URL() string { + return "http://" + hec.tcpListener.Addr().String() +} + +func (hec *HTTPEventCollectorMock) Serve() error { + return http.Serve(hec.tcpListener, hec) +} + +func (hec *HTTPEventCollectorMock) Close() error { + return hec.tcpListener.Close() +} + +func (hec *HTTPEventCollectorMock) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + var err error + + hec.numOfRequests++ + + if hec.simulateServerError { + if request.Body != nil { + defer request.Body.Close() + } + writer.WriteHeader(http.StatusInternalServerError) + return + } + + switch request.Method { + case http.MethodOptions: + // Verify that options method is getting called only once + if hec.connectionVerified { + hec.test.Errorf("Connection should not be verified more than once. Got second request with %s method.", request.Method) + } + hec.connectionVerified = true + writer.WriteHeader(http.StatusOK) + case http.MethodPost: + // Always verify that Driver is using correct path to HEC + if request.URL.String() != "/services/collector/event/1.0" { + hec.test.Errorf("Unexpected path %v", request.URL) + } + defer request.Body.Close() + + if authorization, ok := request.Header["Authorization"]; !ok || authorization[0] != ("Splunk "+hec.token) { + hec.test.Error("Authorization header is invalid.") + } + + gzipEnabled := false + if contentEncoding, ok := request.Header["Content-Encoding"]; ok && contentEncoding[0] == "gzip" { + gzipEnabled = true + } + + if hec.gzipEnabled == nil { + hec.gzipEnabled = &gzipEnabled + } else if gzipEnabled != *hec.gzipEnabled { + // Nothing wrong with that, but we just know that Splunk Logging Driver does not do that + hec.test.Error("Driver should not change Content Encoding.") + } + + var gzipReader *gzip.Reader + var reader io.Reader + if gzipEnabled { + gzipReader, err = gzip.NewReader(request.Body) + if err != nil { + hec.test.Fatal(err) + } + reader = gzipReader + } else { + reader = request.Body + } + + // Read body + var body []byte + body, err = ioutil.ReadAll(reader) + if err != nil { + hec.test.Fatal(err) + } + + // Parse message + messageStart := 0 + for i := 0; i < len(body); i++ { + if i == len(body)-1 || (body[i] == '}' && body[i+1] == '{') { + var message splunkMessage + err = json.Unmarshal(body[messageStart:i+1], &message) + if err != nil { + hec.test.Log(string(body[messageStart : i+1])) + hec.test.Fatal(err) + } + hec.messages = append(hec.messages, &message) + messageStart = i + 1 + } + } + + if gzipEnabled { + gzipReader.Close() + } + + writer.WriteHeader(http.StatusOK) + default: + hec.test.Errorf("Unexpected HTTP method %s", http.MethodOptions) + writer.WriteHeader(http.StatusBadRequest) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go new file mode 100644 index 0000000..fb9e867 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go @@ -0,0 +1,262 @@ +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances +// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium +// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) +func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, syslog-format. +func New(ctx logger.Context) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(ctx.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(ctx.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"], proto) + if err != nil { + return nil, err + } + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(ctx.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, tag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return s.writer.Err(string(msg.Line)) + } + return s.writer.Info(string(msg.Line)) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix and unixgram socket validation + if url.Scheme == "unix" || url.Scheme == "unixgram" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "syslog-address": + case "syslog-facility": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + if proto == secureProto { + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil + case "rfc5424micro": + if proto == secureProto { + return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go new file mode 100644 index 0000000..5015610 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go @@ -0,0 +1,62 @@ +package syslog + +import ( + "reflect" + "testing" + + syslog "github.com/RackSec/srslog" +) + +func functionMatches(expectedFun interface{}, actualFun interface{}) bool { + return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() +} + +func TestParseLogFormat(t *testing.T) { + formatter, framer, err := parseLogFormat("rfc5424", "udp") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424", "tcp+tls") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "udp") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "tcp+tls") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc3164", "") + if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("", "") + if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse empty format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("invalid", "") + if err == nil { + t.Fatal("Failed to parse invalid format", err, formatter, framer) + } +} + +func TestValidateLogOptEmpty(t *testing.T) { + emptyConfig := make(map[string]string) + if err := ValidateLogOpt(emptyConfig); err != nil { + t.Fatal("Failed to parse empty config", err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logs.go b/vendor/github.com/moby/moby/daemon/logs.go new file mode 100644 index 0000000..004bf20 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logs.go @@ -0,0 +1,146 @@ +package daemon + +import ( + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" +) + +// ContainerLogs hooks up a container's stdout and stderr streams +// configured with the given struct. +func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + if !(config.ShowStdout || config.ShowStderr) { + return fmt.Errorf("You must choose at least one stream") + } + + cLog, err := daemon.getLogger(container) + if err != nil { + return err + } + logReader, ok := cLog.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + + follow := config.Follow && container.IsRunning() + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + logrus.Debug("logs: begin stream") + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return err + } + since = time.Unix(s, n) + } + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + logs := logReader.ReadLogs(readConfig) + // Close logWatcher on exit + defer func() { + logs.Close() + if cLog != container.LogDriver { + // Since the logger isn't cached in the container, which + // occurs if it is running, it must get explicitly closed + // here to avoid leaking it and any file handles it has. + if err := cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + } + }() + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + var outStream io.Writer + outStream = wf + errStream := outStream + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + select { + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + return nil + case <-ctx.Done(): + logrus.Debugf("logs: end stream, ctx is done: %v", ctx.Err()) + return nil + case msg, ok := <-logs.Msg: + if !ok { + logrus.Debug("logs: end stream") + return nil + } + logLine := msg.Line + if config.Details { + logLine = append([]byte(msg.Attrs.String()+" "), logLine...) + } + if config.Timestamps { + logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } + } +} + +func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { + if container.LogDriver != nil && container.IsRunning() { + return container.LogDriver, nil + } + return container.StartLogger(container.HostConfig.LogConfig) +} + +// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. +func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { + if cfg.Type == "" { + cfg.Type = daemon.defaultLogConfig.Type + } + + if cfg.Config == nil { + cfg.Config = make(map[string]string) + } + + if cfg.Type == daemon.defaultLogConfig.Type { + for k, v := range daemon.defaultLogConfig.Config { + if _, ok := cfg.Config[k]; !ok { + cfg.Config[k] = v + } + } + } + + return logger.ValidateLogOpts(cfg.Type, cfg.Config) +} diff --git a/vendor/github.com/moby/moby/daemon/logs_test.go b/vendor/github.com/moby/moby/daemon/logs_test.go new file mode 100644 index 0000000..0c36299 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logs_test.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" +) + +func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { + d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} + cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} + if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/metrics.go b/vendor/github.com/moby/moby/daemon/metrics.go new file mode 100644 index 0000000..69dbfd9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/metrics.go @@ -0,0 +1,42 @@ +package daemon + +import "github.com/docker/go-metrics" + +var ( + containerActions metrics.LabeledTimer + imageActions metrics.LabeledTimer + networkActions metrics.LabeledTimer + engineVersion metrics.LabeledGauge + engineCpus metrics.Gauge + engineMemory metrics.Gauge + healthChecksCounter metrics.Counter + healthChecksFailedCounter metrics.Counter +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action") + for _, a := range []string{ + "start", + "changes", + "commit", + "create", + "delete", + } { + containerActions.WithValues(a).Update(0) + } + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") + engineVersion = ns.NewLabeledGauge("engine", "The version and commit information for the engine process", metrics.Unit("info"), + "version", + "commit", + "architecture", + "graph_driver", "kernel", + "os", + ) + engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) + engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) + healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") + healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") + imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + metrics.Register(ns) +} diff --git a/vendor/github.com/moby/moby/daemon/monitor.go b/vendor/github.com/moby/moby/daemon/monitor.go new file mode 100644 index 0000000..ee0d1fc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor.go @@ -0,0 +1,132 @@ +package daemon + +import ( + "errors" + "fmt" + "runtime" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/restartmanager" +) + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + // if container's AutoRemove flag is set, remove it after clean up + autoRemove := func() { + if c.HostConfig.AutoRemove { + if err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", c.ID, err) + } + } + } + + c.Lock() + c.StreamConfig.Wait() + c.Reset(false) + + restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, false, time.Since(c.StartedAt)) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + } else { + c.SetStopped(platformConstructExitStatus(e)) + defer autoRemove() + } + + daemon.updateHealthMonitor(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } + if err != nil { + c.SetStopped(platformConstructExitStatus(e)) + defer autoRemove() + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + } + }() + } + + defer c.Unlock() + if err := c.ToDisk(); err != nil { + return err + } + return daemon.postRunProcessing(c, e) + case libcontainerd.StateExitProcess: + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.Lock() + defer execConfig.Unlock() + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.StreamConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + // Container is already locked in this case + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + c.HasBeenStartedBefore = true + if err := c.ToDisk(); err != nil { + c.Reset(false) + return err + } + daemon.initHealthMonitor(c) + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + // Container is already locked in this case + c.Paused = true + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + // Container is already locked in this case + c.Paused = false + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "unpause") + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/monitor_linux.go b/vendor/github.com/moby/moby/daemon/monitor_linux.go new file mode 100644 index 0000000..09f5af5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor_linux.go @@ -0,0 +1,19 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/monitor_solaris.go b/vendor/github.com/moby/moby/daemon/monitor_solaris.go new file mode 100644 index 0000000..5ccfada --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor_solaris.go @@ -0,0 +1,18 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/monitor_windows.go b/vendor/github.com/moby/moby/daemon/monitor_windows.go new file mode 100644 index 0000000..9648b1b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + if e.ExitCode == 0 && e.UpdatePending { + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + newOpts := []libcontainerd.CreateOption{&libcontainerd.ServicingOption{ + IsServicing: true, + }} + + copts, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if copts != nil { + newOpts = append(newOpts, copts...) + } + + // Create a new servicing container, which will start, complete the update, and merge back the + // results if it succeeded, all as part of the below function call. + if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, newOpts...); err != nil { + container.SetExitCode(-1) + return fmt.Errorf("Post-run update servicing failed: %s", err) + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/mounts.go b/vendor/github.com/moby/moby/daemon/mounts.go new file mode 100644 index 0000000..35c6ed5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/mounts.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Type != mounttypes.TypeVolume || m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if !rm { + continue + } + + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/names.go b/vendor/github.com/moby/moby/daemon/names.go new file mode 100644 index 0000000..273d551 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/names.go @@ -0,0 +1,116 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/utils" +) + +var ( + validContainerNameChars = utils.RestrictedNameChars + validContainerNamePattern = utils.RestrictedNamePattern +) + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The container name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/network.go b/vendor/github.com/moby/moby/daemon/network.go new file mode 100644 index 0000000..ab8fd88 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/network.go @@ -0,0 +1,498 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + networktypes "github.com/docker/libnetwork/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// NetworkControllerEnabled checks if the networking stack is enabled. +// This feature depends on OS primitives and it's disabled in systems like Windows. +func (daemon *Daemon) NetworkControllerEnabled() bool { + return daemon.netController != nil +} + +// FindNetwork function finds a network for a given string that can represent network name or id +func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { + // Find by Name + n, err := daemon.GetNetworkByName(idName) + if err != nil && !isNoSuchNetworkError(err) { + return nil, err + } + + if n != nil { + return n, nil + } + + // Find by id + return daemon.GetNetworkByID(idName) +} + +func isNoSuchNetworkError(err error) bool { + _, ok := err.(libnetwork.ErrNoSuchNetwork) + return ok +} + +// GetNetworkByID function returns a network whose ID begins with the given prefix. +// It fails with an error if no matching, or more than one matching, networks are found. +func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { + list := daemon.GetNetworksByID(partialID) + + if len(list) == 0 { + return nil, libnetwork.ErrNoSuchNetwork(partialID) + } + if len(list) > 1 { + return nil, libnetwork.ErrInvalidID(partialID) + } + return list[0], nil +} + +// GetNetworkByName function returns a network for a given network name. +// If no network name is given, the default network is returned. +func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { + c := daemon.netController + if c == nil { + return nil, libnetwork.ErrNoSuchNetwork(name) + } + if name == "" { + name = c.Config().Daemon.DefaultNetwork + } + return c.NetworkByName(name) +} + +// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { + c := daemon.netController + if c == nil { + return nil + } + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + if strings.HasPrefix(nw.ID(), partialID) { + list = append(list, nw) + } + return false + } + c.WalkNetworks(l) + + return list +} + +// getAllNetworks returns a list containing all networks +func (daemon *Daemon) getAllNetworks() []libnetwork.Network { + c := daemon.netController + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + list = append(list, nw) + return false + } + c.WalkNetworks(l) + + return list +} + +func isIngressNetwork(name string) bool { + return name == "ingress" +} + +var ingressChan = make(chan struct{}, 1) + +func ingressWait() func() { + ingressChan <- struct{}{} + return func() { <-ingressChan } +} + +// SetupIngress setups ingress networking. +func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + return err + } + + go func() { + controller := daemon.netController + controller.AgentInitWait() + + if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID { + if err := controller.SandboxDestroy("ingress-sbox"); err != nil { + logrus.Errorf("Failed to delete stale ingress sandbox: %v", err) + return + } + + // Cleanup any stale endpoints that might be left over during previous iterations + epList := n.Endpoints() + for _, ep := range epList { + if err := ep.Delete(true); err != nil { + logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + } + } + + if err := n.Delete(); err != nil { + logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err) + return + } + } + + if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { + // If it is any other error other than already + // exists error log error and return. + if _, ok := err.(libnetwork.NetworkNameError); !ok { + logrus.Errorf("Failed creating ingress network: %v", err) + return + } + + // Otherwise continue down the call to create or recreate sandbox. + } + + n, err := daemon.GetNetworkByID(create.ID) + if err != nil { + logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + return + } + + sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) + if err != nil { + if _, ok := err.(networktypes.ForbiddenError); !ok { + logrus.Errorf("Failed creating ingress sandbox: %v", err) + } + return + } + + ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) + if err != nil { + logrus.Errorf("Failed creating ingress endpoint: %v", err) + return + } + + if err := ep.Join(sb, nil); err != nil { + logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) + } + + if err := sb.EnableService(); err != nil { + logrus.WithError(err).Error("Failed enabling service for ingress sandbox") + } + }() + + return nil +} + +// SetNetworkBootstrapKeys sets the bootstrap keys. +func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { + return daemon.netController.SetKeys(keys) +} + +// UpdateAttachment notifies the attacher about the attachment config. +func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil { + return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config) + } + + return nil +} + +// WaitForDetachment makes the cluster manager wait for detachment of +// the container from the network. +func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID) +} + +// CreateManagedNetwork creates an agent network. +func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { + _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) + return err +} + +// CreateNetwork creates a network with the given name, driver and other optional parameters +func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { + resp, err := daemon.createNetwork(create, "", false) + if err != nil { + return nil, err + } + return resp, err +} + +func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { + // If there is a pending ingress network creation wait here + // since ingress network creation can happen via node download + // from manager or task download. + if isIngressNetwork(create.Name) { + defer ingressWait()() + } + + if runconfig.IsPreDefinedNetwork(create.Name) && !agent { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) + return nil, apierrors.NewRequestForbiddenError(err) + } + + var warning string + nw, err := daemon.GetNetworkByName(create.Name) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + return nil, err + } + } + if nw != nil { + if create.CheckDuplicate { + return nil, libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + c := daemon.netController + driver := create.Driver + if driver == "" { + driver = c.Config().Daemon.DefaultDriver + } + + nwOptions := []libnetwork.NetworkOption{ + libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(create.Options), + libnetwork.NetworkOptionLabels(create.Labels), + libnetwork.NetworkOptionAttachable(create.Attachable), + } + + if create.IPAM != nil { + ipam := create.IPAM + v4Conf, v6Conf, err := getIpamConfig(ipam.Config) + if err != nil { + return nil, err + } + nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) + } + + if create.Internal { + nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) + } + if agent { + nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) + nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) + } + + if isIngressNetwork(create.Name) { + nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress()) + } + + n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) + if err != nil { + return nil, err + } + + daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.ACQUIRE) + if create.IPAM != nil { + daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.ACQUIRE) + } + daemon.LogNetworkEvent(n, "create") + + return &types.NetworkCreateResponse{ + ID: n.ID(), + Warning: warning, + }, nil +} + +func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { + var builtinDrivers []string + + if capability == driverapi.NetworkPluginEndpointType { + builtinDrivers = daemon.netController.BuiltinDrivers() + } else if capability == ipamapi.PluginEndpointType { + builtinDrivers = daemon.netController.BuiltinIPAMDrivers() + } + + for _, d := range builtinDrivers { + if d == driver { + return + } + } + + if daemon.PluginStore != nil { + _, err := daemon.PluginStore.Get(driver, capability, mode) + if err != nil { + logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") + } + } +} + +func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { + ipamV4Cfg := []*libnetwork.IpamConf{} + ipamV6Cfg := []*libnetwork.IpamConf{} + for _, d := range data { + iCfg := libnetwork.IpamConf{} + iCfg.PreferredPool = d.Subnet + iCfg.SubPool = d.IPRange + iCfg.Gateway = d.Gateway + iCfg.AuxAddresses = d.AuxAddress + ip, _, err := net.ParseCIDR(d.Subnet) + if err != nil { + return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) + } + if ip.To4() != nil { + ipamV4Cfg = append(ipamV4Cfg, &iCfg) + } else { + ipamV6Cfg = append(ipamV6Cfg, &iCfg) + } + } + return ipamV4Cfg, ipamV6Cfg, nil +} + +// UpdateContainerServiceConfig updates a service configuration. +func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + container.NetworkSettings.Service = serviceConfig + return nil +} + +// ConnectContainerToNetwork connects the given container to the given +// network. If either cannot be found, an err is returned. If the +// network cannot be set up, an err is returned. +func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network connect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + return daemon.ConnectToNetwork(container, networkName, endpointConfig) +} + +// DisconnectContainerFromNetwork disconnects the given container from +// the given network. If either cannot be found, an err is returned. +func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network disconnect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + if force { + return daemon.ForceEndpointDelete(containerName, networkName) + } + return err + } + return daemon.DisconnectFromNetwork(container, networkName, force) +} + +// GetNetworkDriverList returns the list of plugins drivers +// registered for network. +func (daemon *Daemon) GetNetworkDriverList() []string { + if !daemon.NetworkControllerEnabled() { + return nil + } + + pluginList := daemon.netController.BuiltinDrivers() + + managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) + + for _, plugin := range managedPlugins { + pluginList = append(pluginList, plugin.Name()) + } + + pluginMap := make(map[string]bool) + for _, plugin := range pluginList { + pluginMap[plugin] = true + } + + networks := daemon.netController.Networks() + + for _, network := range networks { + if !pluginMap[network.Type()] { + pluginList = append(pluginList, network.Type()) + pluginMap[network.Type()] = true + } + } + + sort.Strings(pluginList) + + return pluginList +} + +// DeleteManagedNetwork deletes an agent network. +func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, true) +} + +// DeleteNetwork destroys a network unless it's one of docker's predefined networks. +func (daemon *Daemon) DeleteNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, false) +} + +func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { + nw, err := daemon.FindNetwork(networkID) + if err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { + err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if err := nw.Delete(); err != nil { + return err + } + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.RELEASE) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.RELEASE) + daemon.LogNetworkEvent(nw, "destroy") + return nil +} + +// GetNetworks returns a list of all networks +func (daemon *Daemon) GetNetworks() []libnetwork.Network { + return daemon.getAllNetworks() +} + +// clearAttachableNetworks removes the attachable networks +// after disconnecting any connected container +func (daemon *Daemon) clearAttachableNetworks() { + for _, n := range daemon.GetNetworks() { + if !n.Info().Attachable() { + continue + } + for _, ep := range n.Endpoints() { + epInfo := ep.Info() + if epInfo == nil { + continue + } + sb := epInfo.Sandbox() + if sb == nil { + continue + } + containerID := sb.ContainerID() + if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { + logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", + containerID, n.Name(), err) + } + } + if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { + logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/network/settings.go b/vendor/github.com/moby/moby/daemon/network/settings.go new file mode 100644 index 0000000..8f6b7dd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/network/settings.go @@ -0,0 +1,33 @@ +package network + +import ( + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} diff --git a/vendor/github.com/moby/moby/daemon/oci_linux.go b/vendor/github.com/moby/moby/daemon/oci_linux.go new file mode 100644 index 0000000..a72b0b8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/oci_linux.go @@ -0,0 +1,790 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes := getCPUResources(r) + blkioWeight := r.BlkioWeight + + specResources := &specs.Resources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.BlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.Pids{ + Limit: &r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.Device + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, oci.Device(d)) + } + rwm := "rwm" + devPermissions = []specs.DeviceCgroup{ + { + Allow: true, + Access: &rwm, + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.Rlimit + + // We want to leave the original HostConfig alone so make a copy here + hostConfig := *c.HostConfig + // Merge with the daemon defaults + daemon.mergeUlimits(&hostConfig) + for _, ul := range hostConfig.Ulimits { + rlimits = append(rlimits, specs.Rlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.Namespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities = caplist + return nil +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap, gidMap := daemon.GetUIDGIDMaps() + if uidMap != nil { + userNS = true + ns := specs.Namespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(gidMap) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.Namespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = c.NetworkSettings.SandboxKey + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.Namespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("ipc")) + } else { + ns := specs.Namespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsContainer() { + ns := specs.Namespace{Type: "pid"} + pc, err := daemon.getPidContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share a PID namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.PidMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("pid")) + } else { + ns := specs.Namespace{Type: "pid"} + setNamespace(s, ns) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.IDMapping { + var ids []specs.IDMapping + for _, item := range s { + ids = append(ids, specs.IDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overridden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + data := m.Data + options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + if data != "" { + options = append(options, strings.Split(data, ",")...) + } + + merged, err := mount.MergeTmpfsOptions(options) + if err != nil { + return err + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + + // only add the custom init if it is specified and the container is running in its + // own private pid namespace. It does not make sense to add if it is running in the + // host namespace or another container's pid namespace where we already have an init + if c.HostConfig.PidMode.IsPrivate() { + if (c.HostConfig.Init != nil && *c.HostConfig.Init) || + (c.HostConfig.Init == nil && daemon.configStore.Init) { + s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) + var path string + if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" { + path, err = exec.LookPath(DefaultInitBinary) + if err != nil { + return err + } + } + if daemon.configStore.InitPath != "" { + path = daemon.configStore.InitPath + } + if c.HostConfig.InitPath != "" { + path = c.HostConfig.InitPath + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/dev/init", + Type: "bind", + Source: path, + Options: []string{"bind", "ro"}, + }) + } + } + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = &cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + s.Linux.Sysctl = c.HostConfig.Sysctls + + p := *s.Linux.CgroupsPath + if useSystemd { + initPath, err := cgroups.GetInitCgroupDir("cpu") + if err != nil { + return nil, err + } + p, _ = cgroups.GetThisCgroupDir("cpu") + if err != nil { + return nil, err + } + p = filepath.Join(initPath, p) + } + + // Clean path to guard against things like ../../../BAD + parentPath := filepath.Dir(p) + if !filepath.IsAbs(parentPath) { + parentPath = filepath.Clean("/" + parentPath) + } + + if err := daemon.initCgroupsPath(parentPath); err != nil { + return nil, fmt.Errorf("linux init cgroups path: %v", err) + } + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + ms = append(ms, c.IpcMounts()...) + + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + + if m := c.SecretMount(); m != nil { + ms = append(ms, *m) + } + + sort.Sort(mounts(ms)) + if err := setMounts(daemon, &s, c, ms); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + for _, ns := range s.Linux.Namespaces { + if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + if err != nil { + return nil, err + } + + s.Hooks = specs.Hooks{ + Prestart: []specs.Hook{{ + Path: target, // FIXME: cross-platform + Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, + }}, + } + } + } + + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return nil, err + } + } + + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*specs.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + ulimits := c.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + c.Ulimits = ulimits +} diff --git a/vendor/github.com/moby/moby/daemon/oci_solaris.go b/vendor/github.com/moby/moby/daemon/oci_solaris.go new file mode 100644 index 0000000..0c757f9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/oci_solaris.go @@ -0,0 +1,188 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "sort" + "strconv" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/libnetwork" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + mem := getMemoryResources(r) + s.Solaris.CappedMemory = &mem + + capCPU := getCPUResources(r) + s.Solaris.CappedCPU = &capCPU + + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + return 0, 0, nil, nil +} + +func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) { + var ( + linkName string + lowerLink string + defRouter string + ) + + epInfo := ep.Info() + if epInfo == nil { + return specs.Anet{}, fmt.Errorf("invalid endpoint") + } + + nw, err := daemon.GetNetworkByName(ep.Network()) + if err != nil { + return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err) + } + + // Evaluate default router, linkname and lowerlink for interface endpoint + switch nw.Type() { + case "bridge": + defRouter = epInfo.Gateway().String() + linkName = "net0" // Should always be net0 for a container + + // TODO We construct lowerlink here exactly as done for solaris bridge + // initialization. Need modular code to reuse. + options := nw.Info().DriverOptions() + nwName := options["com.docker.network.bridge.name"] + lastChar := nwName[len(nwName)-1:] + if _, err = strconv.Atoi(lastChar); err != nil { + lowerLink = nwName + "_0" + } else { + lowerLink = nwName + } + + case "overlay": + defRouter = "" + linkName = "net1" + + // TODO Follows generateVxlanName() in solaris overlay. + id := nw.ID() + if len(nw.ID()) > 12 { + id = nw.ID()[:12] + } + lowerLink = "vx_" + id + "_0" + } + + runzanet := specs.Anet{ + Linkname: linkName, + Lowerlink: lowerLink, + Allowedaddr: epInfo.Iface().Address().String(), + Configallowedaddr: "true", + Defrouter: defRouter, + Linkprotection: "mac-nospoof, ip-nospoof", + Macaddress: epInfo.Iface().MacAddress().String(), + } + + return runzanet, nil +} + +func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error { + var anets []specs.Anet + + sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) + if err != nil { + return fmt.Errorf("Could not obtain sandbox for container") + } + + // Populate interfaces required for each endpoint + for _, ep := range sb.Endpoints() { + runzanet, err := daemon.getRunzAnet(ep) + if err != nil { + return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err) + } + anets = append(anets, runzanet) + } + + s.Solaris.Anet = anets + if anets != nil { + s.Solaris.Milestone = "svc:/milestone/container:default" + } + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: filepath.Dir(c.BaseFS), + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("runtime spec resources: %v", err) + } + + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("spec user: %v", err) + } + + if err := daemon.setNetworkInterface(&s, c); err != nil { + return nil, err + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + ms = append(ms, c.IpcMounts()...) + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + sort.Sort(mounts(ms)) + + return (*specs.Spec)(&s), nil +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/oci_windows.go b/vendor/github.com/moby/moby/daemon/oci_windows.go new file mode 100644 index 0000000..6e26424 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/oci_windows.go @@ -0,0 +1,122 @@ +package daemon + +import ( + "syscall" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/sysinfo" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return nil, err + } + + // Note, unlike Unix, we do NOT call into SetupWorkingDirectory as + // this is done in VMCompute. Further, we couldn't do it for Hyper-V + // containers anyway. + + // In base spec + s.Hostname = c.FullHostname() + + // In s.Mounts + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + for _, mount := range mounts { + m := specs.Mount{ + Source: mount.Source, + Destination: mount.Destination, + } + if !mount.Writable { + m.Options = append(m.Options, "ro") + } + s.Mounts = append(s.Mounts, m) + } + + // In s.Process + s.Process.Args = append([]string{c.Path}, c.Args...) + if !c.Config.ArgsEscaped { + s.Process.Args = escapeArgs(s.Process.Args) + } + s.Process.Cwd = c.Config.WorkingDir + if len(s.Process.Cwd) == 0 { + // We default to C:\ to workaround the oddity of the case that the + // default directory for cmd running as LocalSystem (or + // ContainerAdministrator) is c:\windows\system32. Hence docker run + // cmd will by default end in c:\windows\system32, rather + // than 'root' (/) on Linux. The oddity is that if you have a dockerfile + // which has no WORKDIR and has a COPY file ., . will be interpreted + // as c:\. Hence, setting it to default of c:\ makes for consistency. + s.Process.Cwd = `C:\` + } + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] + s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] + s.Process.Terminal = c.Config.Tty + s.Process.User.Username = c.Config.User + + // In spec.Root. This is not set for Hyper-V containers + isHyperV := false + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + if !isHyperV { + s.Root.Path = c.BaseFS + } + s.Root.Readonly = false // Windows does not support a read-only root filesystem + + // In s.Windows.Resources + // @darrenstahlmsft implement these resources + cpuShares := uint16(c.HostConfig.CPUShares) + cpuPercent := uint8(c.HostConfig.CPUPercent) + if c.HostConfig.NanoCPUs > 0 { + cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(sysinfo.NumCPU()) / 1e9) + } + cpuCount := uint64(c.HostConfig.CPUCount) + memoryLimit := uint64(c.HostConfig.Memory) + s.Windows.Resources = &specs.WindowsResources{ + CPU: &specs.WindowsCPUResources{ + Percent: &cpuPercent, + Shares: &cpuShares, + Count: &cpuCount, + }, + Memory: &specs.WindowsMemoryResources{ + Limit: &memoryLimit, + //TODO Reservation: ..., + }, + Network: &specs.WindowsNetworkResources{ + //TODO Bandwidth: ..., + }, + Storage: &specs.WindowsStorageResources{ + Bps: &c.HostConfig.IOMaximumBandwidth, + Iops: &c.HostConfig.IOMaximumIOps, + }, + } + return (*specs.Spec)(&s), nil +} + +func escapeArgs(args []string) []string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = syscall.EscapeArg(a) + } + return escapedArgs +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/pause.go b/vendor/github.com/moby/moby/daemon/pause.go new file mode 100644 index 0000000..dbfafbc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/prune.go b/vendor/github.com/moby/moby/daemon/prune.go new file mode 100644 index 0000000..a693beb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/prune.go @@ -0,0 +1,236 @@ +package daemon + +import ( + "fmt" + "regexp" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" +) + +// ContainersPrune removes unused containers +func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + rep := &types.ContainersPruneReport{} + + allContainers := daemon.List() + for _, c := range allContainers { + if !c.IsRunning() { + cSize, _ := daemon.getSize(c) + // TODO: sets RmLink to true? + err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + if err != nil { + logrus.Warnf("failed to prune container %s: %v", c.ID, err) + continue + } + if cSize > 0 { + rep.SpaceReclaimed += uint64(cSize) + } + rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) + } + } + + return rep, nil +} + +// VolumesPrune removes unused local volumes +func (daemon *Daemon) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + rep := &types.VolumesPruneReport{} + + pruneVols := func(v volume.Volume) error { + name := v.Name() + refs := daemon.volumes.Refs(v) + + if len(refs) == 0 { + vSize, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("could not determine size of volume %s: %v", name, err) + } + err = daemon.volumes.Remove(v) + if err != nil { + logrus.Warnf("could not remove volume %s: %v", name, err) + return nil + } + rep.SpaceReclaimed += uint64(vSize) + rep.VolumesDeleted = append(rep.VolumesDeleted, name) + } + + return nil + } + + err := daemon.traverseLocalVolumes(pruneVols) + + return rep, err +} + +// ImagesPrune removes unused images +func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + rep := &types.ImagesPruneReport{} + + danglingOnly := true + if pruneFilters.Include("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) + } + } + + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + allContainers := daemon.List() + imageRefs := map[string]bool{} + for _, c := range allContainers { + imageRefs[c.ID] = true + } + + // Filter intermediary images and get their unique size + allLayers := daemon.layerStore.Map() + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + topImages[id] = img + } + + for id := range topImages { + dgst := digest.Digest(id) + hex := dgst.Hex() + if _, ok := imageRefs[hex]; ok { + continue + } + + deletedImages := []types.ImageDelete{} + refs := daemon.referenceStore.References(dgst) + if len(refs) > 0 { + if danglingOnly { + // Not a dangling image + continue + } + + nrRefs := len(refs) + for _, ref := range refs { + // If nrRefs == 1, we have an image marked as myreponame: + // i.e. the tag content was changed + if _, ok := ref.(reference.Canonical); ok && nrRefs > 1 { + continue + } + imgDel, err := daemon.ImageDelete(ref.String(), false, true) + if err != nil { + logrus.Warnf("could not delete reference %s: %v", ref.String(), err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } else { + imgDel, err := daemon.ImageDelete(hex, false, true) + if err != nil { + logrus.Warnf("could not delete image %s: %v", hex, err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } + + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } + } + } + + return rep, nil +} + +// localNetworksPrune removes unused local networks +func (daemon *Daemon) localNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + var err error + // When the function returns true, the walk will stop. + l := func(nw libnetwork.Network) bool { + nwName := nw.Name() + predefined := runconfig.IsPreDefinedNetwork(nwName) + if !predefined && len(nw.Endpoints()) == 0 { + if err = daemon.DeleteNetwork(nw.ID()); err != nil { + logrus.Warnf("could not remove network %s: %v", nwName, err) + return false + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) + } + return false + } + daemon.netController.WalkNetworks(l) + return rep, err +} + +// clusterNetworksPrune removes unused cluster networks +func (daemon *Daemon) clusterNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + cluster := daemon.GetCluster() + networks, err := cluster.GetNetworks() + if err != nil { + return rep, err + } + networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) + for _, nw := range networks { + if nw.Name == "ingress" { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) + } + return rep, nil +} + +// NetworksPrune removes unused networks +func (daemon *Daemon) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + clusterRep, err := daemon.clusterNetworksPrune(pruneFilters) + if err != nil { + logrus.Warnf("could not remove cluster networks: %v", err) + } else { + rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) + } + localRep, err := daemon.localNetworksPrune(pruneFilters) + if err != nil { + logrus.Warnf("could not remove local networks: %v", err) + } else { + rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + } + return rep, err +} diff --git a/vendor/github.com/moby/moby/daemon/rename.go b/vendor/github.com/moby/moby/daemon/rename.go new file mode 100644 index 0000000..ffb7715 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/rename.go @@ -0,0 +1,122 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + dockercontainer "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + var ( + sid string + sb libnetwork.Sandbox + ) + + if oldName == "" || newName == "" { + return fmt.Errorf("Neither old nor new names may be empty") + } + + if newName[0] != '/' { + newName = "/" + newName + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint + + if oldName == newName { + return fmt.Errorf("Renaming a container with the same name as its current name") + } + + container.Lock() + defer container.Unlock() + + links := map[string]*dockercontainer.Container{} + for k, v := range daemon.linkIndex.children(container) { + if !strings.HasPrefix(k, oldName) { + return fmt.Errorf("Linked container %s does not match parent %s", k, oldName) + } + links[strings.TrimPrefix(k, oldName)] = v + } + + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + for k, v := range links { + daemon.nameIndex.Reserve(newName+k, v.ID) + daemon.linkIndex.link(container, v, newName+k) + } + + container.Name = newName + container.NetworkSettings.IsAnonymousEndpoint = false + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + daemon.reserveName(container.ID, oldName) + for k, v := range links { + daemon.nameIndex.Reserve(oldName+k, v.ID) + daemon.linkIndex.link(container, v, oldName+k) + daemon.linkIndex.unlink(newName+k, v, container) + daemon.nameIndex.Release(newName + k) + } + daemon.releaseName(newName) + } + }() + + for k, v := range links { + daemon.linkIndex.unlink(oldName+k, v, container) + daemon.nameIndex.Release(oldName + k) + } + daemon.releaseName(oldName) + if err = container.ToDisk(); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + if e := container.ToDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + sid = container.NetworkSettings.SandboxID + if daemon.netController != nil { + sb, err = daemon.netController.SandboxByID(sid) + if err != nil { + return err + } + + err = sb.Rename(strings.TrimPrefix(container.Name, "/")) + if err != nil { + return err + } + } + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/resize.go b/vendor/github.com/moby/moby/daemon/resize.go new file mode 100644 index 0000000..7473538 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/vendor/github.com/moby/moby/daemon/restart.go b/vendor/github.com/moby/moby/daemon/restart.go new file mode 100644 index 0000000..79292f3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/restart.go @@ -0,0 +1,70 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerRestart(container, *seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil + +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if container.IsRunning() { + // set AutoRemove flag to false before stop so the container won't be + // removed during restart process + autoRemove := container.HostConfig.AutoRemove + + container.HostConfig.AutoRemove = false + err := daemon.containerStop(container, seconds) + // restore AutoRemove irrespective of whether the stop worked or not + container.HostConfig.AutoRemove = autoRemove + // containerStop will write HostConfig to disk, we shall restore AutoRemove + // in disk too + if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { + logrus.Errorf("Write container to disk error: %v", toDiskErr) + } + + if err != nil { + return err + } + } + + if err := daemon.containerStart(container, "", "", true); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/search.go b/vendor/github.com/moby/moby/daemon/search.go new file mode 100644 index 0000000..5d2ac5d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/search.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "fmt" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/dockerversion" +) + +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + + searchFilters, err := filters.FromParam(filtersArgs) + if err != nil { + return nil, err + } + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + var isAutomated, isOfficial bool + var hasStarFilter = 0 + if searchFilters.Include("is-automated") { + if searchFilters.UniqueExactMatch("is-automated", "true") { + isAutomated = true + } else if !searchFilters.UniqueExactMatch("is-automated", "false") { + return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + } + } + if searchFilters.Include("is-official") { + if searchFilters.UniqueExactMatch("is-official", "true") { + isOfficial = true + } else if !searchFilters.UniqueExactMatch("is-official", "false") { + return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + } + } + if searchFilters.Include("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + if err != nil { + return nil, err + } + + filteredResults := []registrytypes.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Include("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Include("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Include("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return ®istrytypes.SearchResults{ + Query: unfilteredResult.Query, + NumResults: len(filteredResults), + Results: filteredResults, + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/search_test.go b/vendor/github.com/moby/moby/daemon/search_test.go new file mode 100644 index 0000000..f5aa85a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/search_test.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" +) + +type FakeService struct { + registry.DefaultService + + shouldReturnError bool + + term string + results []registrytypes.SearchResult +} + +func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + if s.shouldReturnError { + return nil, fmt.Errorf("Search unknown error") + } + return ®istrytypes.SearchResults{ + Query: s.term, + NumResults: len(s.results), + Results: s.results, + }, nil +} + +func TestSearchRegistryForImagesErrors(t *testing.T) { + errorCases := []struct { + filtersArgs string + shouldReturnError bool + expectedError string + }{ + { + expectedError: "Search unknown error", + shouldReturnError: true, + }, + { + filtersArgs: "invalid json", + expectedError: "invalid character 'i' looking for beginning of value", + }, + { + filtersArgs: `{"type":{"custom":true}}`, + expectedError: "Invalid filter 'type'", + }, + { + filtersArgs: `{"is-automated":{"invalid":true}}`, + expectedError: "Invalid filter 'is-automated=[invalid]'", + }, + { + filtersArgs: `{"is-automated":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-automated", + }, + { + filtersArgs: `{"is-official":{"invalid":true}}`, + expectedError: "Invalid filter 'is-official=[invalid]'", + }, + { + filtersArgs: `{"is-official":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-official", + }, + { + filtersArgs: `{"stars":{"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + { + filtersArgs: `{"stars":{"1":true,"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + } + for index, e := range errorCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + shouldReturnError: e.shouldReturnError, + }, + } + _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) + if err == nil { + t.Errorf("%d: expected an error, got nothing", index) + } + if !strings.Contains(err.Error(), e.expectedError) { + t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) + } + } +} + +func TestSearchRegistryForImages(t *testing.T) { + term := "term" + successCases := []struct { + filtersArgs string + registryResults []registrytypes.SearchResult + expectedResults []registrytypes.SearchResult + }{ + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{}, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + }, + { + filtersArgs: `{"stars":{"0":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + IsOfficial: true, + IsAutomated: true, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + IsOfficial: false, + IsAutomated: true, + }, + { + Name: "name2", + Description: "description2", + StarCount: 1, + IsOfficial: true, + IsAutomated: false, + }, + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + }, + } + for index, s := range successCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + term: term, + results: s.registryResults, + }, + } + results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) + if err != nil { + t.Errorf("%d: %v", index, err) + } + if results.Query != term { + t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) + } + if results.NumResults != len(s.expectedResults) { + t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) + } + for _, result := range results.Results { + found := false + for _, expectedResult := range s.expectedResults { + if expectedResult.Name == result.Name && + expectedResult.Description == result.Description && + expectedResult.IsAutomated == result.IsAutomated && + expectedResult.IsOfficial == result.IsOfficial && + expectedResult.StarCount == result.StarCount { + found = true + break + } + } + if !found { + t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) + } + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/seccomp_disabled.go b/vendor/github.com/moby/moby/daemon/seccomp_disabled.go new file mode 100644 index 0000000..ff1127b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/seccomp_disabled.go @@ -0,0 +1,19 @@ +// +build linux,!seccomp + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = false + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/seccomp_linux.go b/vendor/github.com/moby/moby/daemon/seccomp_linux.go new file mode 100644 index 0000000..7f16733 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/seccomp_linux.go @@ -0,0 +1,55 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = true + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.Seccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile, rs) + if err != nil { + return err + } + } else { + if daemon.seccompProfile != nil { + profile, err = seccomp.LoadProfile(string(daemon.seccompProfile), rs) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile(rs) + if err != nil { + return err + } + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/seccomp_unsupported.go b/vendor/github.com/moby/moby/daemon/seccomp_unsupported.go new file mode 100644 index 0000000..b3691e9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/seccomp_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux + +package daemon + +var supportsSeccomp = false diff --git a/vendor/github.com/moby/moby/daemon/secrets.go b/vendor/github.com/moby/moby/daemon/secrets.go new file mode 100644 index 0000000..355cb1e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/secrets.go @@ -0,0 +1,36 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerSecretStore sets the secret store backend for the container +func (daemon *Daemon) SetContainerSecretStore(name string, store exec.SecretGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretStore = store + + return nil +} + +// SetContainerSecretReferences sets the container secret references needed +func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { + if !secretsSupported() && len(refs) > 0 { + logrus.Warn("secrets are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretReferences = refs + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/secrets_linux.go b/vendor/github.com/moby/moby/daemon/secrets_linux.go new file mode 100644 index 0000000..fca4e12 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/secrets_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/vendor/github.com/moby/moby/daemon/secrets_unsupported.go b/vendor/github.com/moby/moby/daemon/secrets_unsupported.go new file mode 100644 index 0000000..d6f36fd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/secrets_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func secretsSupported() bool { + return false +} diff --git a/vendor/github.com/moby/moby/daemon/selinux_linux.go b/vendor/github.com/moby/moby/daemon/selinux_linux.go new file mode 100644 index 0000000..83a3447 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/runc/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/vendor/github.com/moby/moby/daemon/selinux_unsupported.go b/vendor/github.com/moby/moby/daemon/selinux_unsupported.go new file mode 100644 index 0000000..25a56ad --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/moby/moby/daemon/start.go b/vendor/github.com/moby/moby/daemon/start.go new file mode 100644 index 0000000..6c94fd5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/start.go @@ -0,0 +1,230 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { + if checkpoint != "" && !daemon.HasExperimental() { + return apierrors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode")) + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return apierrors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and has been removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.ToDisk(); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if hostConfig != nil { + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + } + + return daemon.containerStart(container, checkpoint, checkpointDir, true) +} + +// Start starts a container +func (daemon *Daemon) Start(container *container.Container) error { + return daemon.containerStart(container, "", "", true) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { + start := time.Now() + container.Lock() + defer container.Unlock() + + if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode() == 0 { + container.SetExitCode(128) + } + container.ToDisk() + + container.Reset(false) + + daemon.Cleanup(container) + // if containers AutoRemove flag is set, remove it after clean up + if container.HostConfig.AutoRemove { + container.Unlock() + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", container.ID, err) + } + container.Lock() + } + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + createOptions, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if resetRestartManager { + container.ResetRestartManager(true) + } + + if checkpointDir == "" { + checkpointDir = container.CheckpointDir() + } + + if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil { + errDesc := grpc.ErrorDesc(err) + contains := func(s1, s2 string) bool { + return strings.Contains(strings.ToLower(s1), s2) + } + logrus.Errorf("Create container failed with error: %s", errDesc) + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if contains(errDesc, container.Path) && + (contains(errDesc, "executable file not found") || + contains(errDesc, "no such file or directory") || + contains(errDesc, "system cannot find the file specified")) { + container.SetExitCode(127) + } + // set to 126 for container cmd can't be invoked errors + if contains(errDesc, syscall.EACCES.Error()) { + container.SetExitCode(126) + } + + // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts + if contains(errDesc, syscall.ENOTDIR.Error()) { + errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" + container.SetExitCode(127) + } + + return fmt.Errorf("%s", errDesc) + } + + containerActions.WithValues("start").UpdateSince(start) + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + if err := container.UnmountSecrets(); err != nil { + logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/vendor/github.com/moby/moby/daemon/start_unix.go b/vendor/github.com/moby/moby/daemon/start_unix.go new file mode 100644 index 0000000..6bbe485 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/start_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Ensure a runtime has been assigned to this container + if container.HostConfig.Runtime == "" { + container.HostConfig.Runtime = stockRuntimeName + container.ToDisk() + } + + rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) + if rt == nil { + return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) + } + if UsingSystemd(daemon.configStore) { + rt.Args = append(rt.Args, "--systemd-cgroup=true") + } + createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) + + return createOptions, nil +} diff --git a/vendor/github.com/moby/moby/daemon/start_windows.go b/vendor/github.com/moby/moby/daemon/start_windows.go new file mode 100644 index 0000000..faa7575 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/start_windows.go @@ -0,0 +1,205 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "golang.org/x/sys/windows/registry" +) + +const ( + credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + credentialSpecFileLocation = "CredentialSpecs" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Are we going to run as a Hyper-V container? + hvOpts := &libcontainerd.HyperVIsolationOption{} + if container.HostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container is requesting an isolation mode. Honour it. + hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() + } + + // Generate the layer folder of the layer options + layerOpts := &libcontainerd.LayerOption{} + m, err := container.RWLayer.Metadata() + if err != nil { + return nil, fmt.Errorf("failed to get layer metadata - %s", err) + } + if hvOpts.IsHyperV { + hvOpts.SandboxPath = filepath.Dir(m["dir"]) + } + + layerOpts.LayerFolderPath = m["dir"] + + // Generate the layer paths of the layer options + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) + } + // Get the layer path for each layer. + max := len(img.RootFS.DiffIDs) + for i := 1; i <= max; i++ { + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] + layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) + if err != nil { + return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) + } + // Reverse order, expecting parent most first + layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) + } + + // Get endpoints for the libnetwork allocated networks to the container + var epList []string + AllowUnqualifiedDNSQuery := false + gwHNSID := "" + if container.NetworkSettings != nil { + for n := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := container.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + gwHNSID = gwInfo["hnsid"].(string) + } + } + + if data["hnsid"] != nil { + epList = append(epList, data["hnsid"].(string)) + } + + if data["AllowUnqualifiedDNSQuery"] != nil { + AllowUnqualifiedDNSQuery = true + } + } + } + + if gwHNSID != "" { + epList = append(epList, gwHNSID) + } + + // Read and add credentials from the security options if a credential spec has been provided. + if container.HostConfig.SecurityOpt != nil { + for _, sOpt := range container.HostConfig.SecurityOpt { + sOpt = strings.ToLower(sOpt) + if !strings.Contains(sOpt, "=") { + return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) + } + var splitsOpt []string + splitsOpt = strings.SplitN(sOpt, "=", 2) + if len(splitsOpt) != 2 { + return nil, fmt.Errorf("invalid security option: %s", sOpt) + } + if splitsOpt[0] != "credentialspec" { + return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) + } + + credentialsOpts := &libcontainerd.CredentialsOption{} + var ( + match bool + csValue string + err error + ) + if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for file:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { + return nil, err + } + } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") + } + createOptions = append(createOptions, credentialsOpts) + } + } + + // Now add the remaining options. + createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) + createOptions = append(createOptions, hvOpts) + createOptions = append(createOptions, layerOpts) + if epList != nil { + createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{Endpoints: epList, AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery}) + } + + return createOptions, nil +} + +// getCredentialSpec is a helper function to get the value of a credential spec supplied +// on the CLI, stripping the prefix +func getCredentialSpec(prefix, value string) (bool, string) { + if strings.HasPrefix(value, prefix) { + return true, strings.TrimPrefix(value, prefix) + } + return false, "" +} + +// readCredentialSpecRegistry is a helper function to read a credential spec from +// the registry. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecRegistry(id, name string) (string, error) { + var ( + k registry.Key + err error + val string + ) + if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { + return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) + } + if val, _, err = k.GetStringValue(name); err != nil { + if err == registry.ErrNotExist { + return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) + } + return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) + } + return val, nil +} + +// readCredentialSpecFile is a helper function to read a credential spec from +// a file. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecFile(id, root, location string) (string, error) { + if filepath.IsAbs(location) { + return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") + } + base := filepath.Join(root, credentialSpecFileLocation) + full := filepath.Join(base, location) + if !strings.HasPrefix(full, base) { + return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) + } + bcontents, err := ioutil.ReadFile(full) + if err != nil { + return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) + } + return string(bcontents[:]), nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats.go b/vendor/github.com/moby/moby/daemon/stats.go new file mode 100644 index 0000000..1778adf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats.go @@ -0,0 +1,160 @@ +package daemon + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "solaris" { + return fmt.Errorf("%+v does not support stats", runtime.GOOS) + } + // Engine API version (used for backwards compatibility) + apiVersion := config.Version + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is either not running or restarting and requires no stream, return an empty stats. + if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{ + Name: container.Name, + ID: container.ID}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + var preRead time.Time + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.Name = container.Name + ss.ID = container.ID + ss.PreCPUStats = preCPUStats + ss.PreRead = preRead + preCPUStats = ss.CPUStats + preRead = ss.Read + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if versions.LessThan(apiVersion, "1.21") { + if runtime.GOOS == "windows" { + return errors.New("API versions pre v1.21 do not support stats on Windows") + } + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-ctx.Done(): + return nil + } + } +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.unsubscribe(c, ch) +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + // We already have the network stats on Windows directly from HCS. + if !container.Config.NetworkDisabled && runtime.GOOS != "windows" { + if stats.Networks, err = daemon.getNetworkStats(container); err != nil { + return nil, err + } + } + + return stats, nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats_collector.go b/vendor/github.com/moby/moby/daemon/stats_collector.go new file mode 100644 index 0000000..6b2bf1a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_collector.go @@ -0,0 +1,146 @@ +// +build !solaris + +package daemon + +import ( + "bufio" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +type statsSupervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// newStatsCollector returns a new statsCollector that collections +// stats for a registered container at the specified interval. +// The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + s := &statsCollector{ + interval: interval, + supervisor: daemon, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + platformNewStatsCollector(s) + go s.run() + return s +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { + m sync.Mutex + supervisor statsSupervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 + machineMemory uint64 +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +func (s *statsCollector) run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + onlineCPUs, err := s.getNumberOnlineCPUs() + if err != nil { + logrus.Errorf("collecting system online cpu count: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(errNotRunning); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + continue + } + + // publish empty stats containing only name and ID if not running + pair.publisher.Publish(types.StatsJSON{ + Name: pair.container.Name, + ID: pair.container.ID, + }) + continue + } + // FIXME: move to containerd on Linux (not Windows) + stats.CPUStats.SystemUsage = systemUsage + stats.CPUStats.OnlineCPUs = onlineCPUs + + pair.publisher.Publish(*stats) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/stats_collector_solaris.go b/vendor/github.com/moby/moby/daemon/stats_collector_solaris.go new file mode 100644 index 0000000..9cf9f0a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_collector_solaris.go @@ -0,0 +1,34 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "time" +) + +// newStatsCollector returns a new statsCollector for collection stats +// for a registered container at the specified interval. The collector allows +// non-running containers to be added and will start processing stats when +// they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + return &statsCollector{} +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + return nil +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { +} diff --git a/vendor/github.com/moby/moby/daemon/stats_collector_unix.go b/vendor/github.com/moby/moby/daemon/stats_collector_unix.go new file mode 100644 index 0000000..0a81cb8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_collector_unix.go @@ -0,0 +1,84 @@ +// +build !windows,!solaris + +package daemon + +import ( + "fmt" + "os" + "strconv" + "strings" + + sysinfo "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/system" +) + +/* +#include +*/ +import "C" + +// platformNewStatsCollector performs platform specific initialisation of the +// statsCollector structure. +func platformNewStatsCollector(s *statsCollector) { + s.clockTicksPerSecond = uint64(system.GetClockTicks()) + meminfo, err := sysinfo.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + s.machineMemory = uint64(meminfo.MemTotal) + } +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} + +func (s *statsCollector) getNumberOnlineCPUs() (uint32, error) { + i, err := C.sysconf(C._SC_NPROCESSORS_ONLN) + if err != nil { + return 0, err + } + return uint32(i), nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats_collector_windows.go b/vendor/github.com/moby/moby/daemon/stats_collector_windows.go new file mode 100644 index 0000000..435bc4b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_collector_windows.go @@ -0,0 +1,19 @@ +// +build windows + +package daemon + +// platformNewStatsCollector performs platform specific initialisation of the +// statsCollector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *statsCollector) { +} + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. This is a no-op on Windows. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + return 0, nil +} + +func (s *statsCollector) getNumberOnlineCPUs() (uint32, error) { + return 0, nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats_unix.go b/vendor/github.com/moby/moby/daemon/stats_unix.go new file mode 100644 index 0000000..d875607 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Resolve Network SandboxID in case the container reuse another container's network stack +func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { + curr := c + for curr.HostConfig.NetworkMode.IsContainer() { + containerID := curr.HostConfig.NetworkMode.ConnectedContainer() + connected, err := daemon.GetContainer(containerID) + if err != nil { + return "", fmt.Errorf("Could not get container for %s", containerID) + } + curr = connected + } + return curr.NetworkSettings.SandboxID, nil +} + +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + sandboxID, err := daemon.getNetworkSandboxID(c) + if err != nil { + return nil, err + } + + sb, err := daemon.netController.SandboxByID(sandboxID) + if err != nil { + return nil, err + } + + lnstats, err := sb.Statistics() + if err != nil { + return nil, err + } + + stats := make(map[string]types.NetworkStats) + // Convert libnetwork nw stats into api stats + for ifName, ifStats := range lnstats { + stats[ifName] = types.NetworkStats{ + RxBytes: ifStats.RxBytes, + RxPackets: ifStats.RxPackets, + RxErrors: ifStats.RxErrors, + RxDropped: ifStats.RxDropped, + TxBytes: ifStats.TxBytes, + TxPackets: ifStats.TxPackets, + TxErrors: ifStats.TxErrors, + TxDropped: ifStats.TxDropped, + } + } + + return stats, nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats_windows.go b/vendor/github.com/moby/moby/daemon/stats_windows.go new file mode 100644 index 0000000..f8e6f6f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_windows.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Windows network stats are obtained directly through HCS, hence this is a no-op. +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + return make(map[string]types.NetworkStats), nil +} diff --git a/vendor/github.com/moby/moby/daemon/stop.go b/vendor/github.com/moby/moby/daemon/stop.go new file mode 100644 index 0000000..aa7b382 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stop.go @@ -0,0 +1,83 @@ +package daemon + +import ( + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/container" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerStop(container, *seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + daemon.stopHealthchecks(container) + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we force kill it. + if _, err := container.WaitStop(2 * time.Second); err != nil { + logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + container.WaitStop(-1 * time.Second) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/top_unix.go b/vendor/github.com/moby/moby/daemon/top_unix.go new file mode 100644 index 0000000..7fb81d0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/top_unix.go @@ -0,0 +1,126 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types" +) + +func validatePSArgs(psArgs string) error { + // NOTE: \\s does not detect unicode whitespaces. + // So we use fieldsASCII instead of strings.Fields in parsePSOutput. + // See https://github.com/docker/docker/pull/24358 + re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") + for _, group := range re.FindAllStringSubmatch(psArgs, -1) { + if len(group) >= 3 { + k := group[1] + v := group[2] + if k != "pid" { + return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) + } + } + } + return nil +} + +// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces +func fieldsASCII(s string) []string { + fn := func(r rune) bool { + switch r { + case '\t', '\n', '\f', '\r', ' ': + return true + } + return false + } + return strings.FieldsFunc(s, fn) +} + +func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, error) { + procList := &types.ContainerProcessList{} + + lines := strings.Split(string(output), "\n") + procList.Titles = fieldsASCII(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := fieldsASCII(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) + } + } + } + return procList, nil +} + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { + psArgs = "-ef" + } + + if err := validatePSArgs(psArgs); err != nil { + return nil, err + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + procList, err := parsePSOutput(output, pids) + if err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/vendor/github.com/moby/moby/daemon/top_unix_test.go b/vendor/github.com/moby/moby/daemon/top_unix_test.go new file mode 100644 index 0000000..269ab6e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/top_unix_test.go @@ -0,0 +1,76 @@ +//+build !windows + +package daemon + +import ( + "testing" +) + +func TestContainerTopValidatePSArgs(t *testing.T) { + tests := map[string]bool{ + "ae -o uid=PID": true, + "ae -o \"uid= PID\"": true, // ascii space (0x20) + "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) + "ae o uid=PID": true, + "aeo uid=PID": true, + "ae -O uid=PID": true, + "ae -o pid=PID2 -o uid=PID": true, + "ae -o pid=PID": false, + "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this + "aeo pid=PID": false, + "ae": false, + "": false, + } + for psArgs, errExpected := range tests { + err := validatePSArgs(psArgs) + t.Logf("tested %q, got err=%v", psArgs, err) + if errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, psArgs) + } + if !errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, psArgs) + } + } +} + +func TestContainerTopParsePSOutput(t *testing.T) { + tests := []struct { + output []byte + pids []int + errExpected bool + }{ + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, false}, + {[]byte(` UID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + // unicode space (U+2003, 0xe2 0x80 0x83) + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + // the first space is U+2003, the second one is ascii. + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + } + + for _, f := range tests { + _, err := parsePSOutput(f.output, f.pids) + t.Logf("tested %q, got err=%v", string(f.output), err) + if f.errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, string(f.output)) + } + if !f.errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/top_windows.go b/vendor/github.com/moby/moby/daemon/top_windows.go new file mode 100644 index 0000000..3dd8ead --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/top_windows.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "errors" + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// ContainerTop handles `docker top` client requests. +// Future considerations: +// -- Windows users are far more familiar with CPU% total. +// Further, users on Windows rarely see user/kernel CPU stats split. +// The kernel returns everything in terms of 100ns. To obtain +// CPU%, we could do something like docker stats does which takes two +// samples, subtract the difference and do the maths. Unfortunately this +// would slow the stat call down and require two kernel calls. So instead, +// we do something similar to linux and display the CPU as combined HH:MM:SS.mmm. +// -- Perhaps we could add an argument to display "raw" stats +// -- "Memory" is an extremely overloaded term in Windows. Hence we do what +// task manager does and use the private working set as the memory counter. +// We could return more info for those who really understand how memory +// management works in Windows if we introduced a "raw" stats (above). +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + // It's not at all an equivalent to linux 'ps' on Windows + if psArgs != "" { + return nil, errors.New("Windows does not support arguments to top") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + s, err := daemon.containerd.Summary(container.ID) + if err != nil { + return nil, err + } + procList := &types.ContainerProcessList{} + procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} + + for _, j := range s { + d := time.Duration((j.KernelTime100ns + j.UserTime100ns) * 100) // Combined time in nanoseconds + procList.Processes = append(procList.Processes, []string{ + j.ImageName, + fmt.Sprint(j.ProcessId), + fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), + units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) + } + return procList, nil +} diff --git a/vendor/github.com/moby/moby/daemon/unpause.go b/vendor/github.com/moby/moby/daemon/unpause.go new file mode 100644 index 0000000..e66b386 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/unpause.go @@ -0,0 +1,38 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/update.go b/vendor/github.com/moby/moby/daemon/update.go new file mode 100644 index 0000000..6e26eeb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update.go @@ -0,0 +1,92 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + return container.ContainerUpdateOKBody{Warnings: warnings}, nil +} + +// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. +func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { + if len(cmd) == 0 { + return nil + } + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + c.Path = cmd[0] + c.Args = cmd[1:] + return nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.ToDisk() + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + + // if Restart Policy changed, we need to update container monitor + if hostConfig.RestartPolicy.Name != "" { + container.UpdateMonitor(hostConfig.RestartPolicy) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/vendor/github.com/moby/moby/daemon/update_linux.go b/vendor/github.com/moby/moby/daemon/update_linux.go new file mode 100644 index 0000000..f422325 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update_linux.go @@ -0,0 +1,25 @@ +// +build linux + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint64(resources.BlkioWeight) + r.CpuShares = uint64(resources.CPUShares) + r.CpuPeriod = uint64(resources.CPUPeriod) + r.CpuQuota = uint64(resources.CPUQuota) + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint64(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint64(resources.MemorySwap) + } + r.MemoryReservation = uint64(resources.MemoryReservation) + r.KernelMemoryLimit = uint64(resources.KernelMemory) + return r +} diff --git a/vendor/github.com/moby/moby/daemon/update_solaris.go b/vendor/github.com/moby/moby/daemon/update_solaris.go new file mode 100644 index 0000000..f3b545c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/moby/moby/daemon/update_windows.go b/vendor/github.com/moby/moby/daemon/update_windows.go new file mode 100644 index 0000000..0146626 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/moby/moby/daemon/volumes.go b/vendor/github.com/moby/moby/daemon/volumes.go new file mode 100644 index 0000000..cb7591e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes.go @@ -0,0 +1,389 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + dockererrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the Engine API +func volumeToAPIType(v volume.Volume) *types.Volume { + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + } + if v, ok := v.(volume.DetailedVolume); ok { + tv.Labels = v.Labels() + tv.Options = v.Options() + tv.Scope = v.Scope() + } + + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + defer func() { + // clean up the container mountpoints once return with error + if retErr != nil { + for _, m := range mountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + } + } + }() + + dereferenceIfExists := func(destination string) { + if v, ok := mountPoints[destination]; ok { + logrus.Debugf("Duplicate mount point '%s'", destination) + if v.Volume != nil { + daemon.volumes.Dereference(v.Volume, container.ID) + } + } + } + + // 1. Read already configured mount points. + for destination, point := range container.MountPoints { + mountPoints[destination] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Spec: m.Spec, + CopyData: false, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + dereferenceIfExists(cp.Destination) + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + // #10618 + _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] + if binds[bind.Destination] || tmpfsExists { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if bind.Type == mounttypes.TypeVolume { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + if bind.Driver == volume.DefaultDriverName { + setBindModeIfNull(bind) + } + } + + binds[bind.Destination] = true + dereferenceIfExists(bind.Destination) + mountPoints[bind.Destination] = bind + } + + for _, cfg := range hostConfig.Mounts { + mp, err := volume.ParseMountSpec(cfg) + if err != nil { + return dockererrors.NewBadRequestError(err) + } + + if binds[mp.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) + } + + if mp.Type == mounttypes.TypeVolume { + var v volume.Volume + if cfg.VolumeOptions != nil { + var driverOpts map[string]string + if cfg.VolumeOptions.DriverConfig != nil { + driverOpts = cfg.VolumeOptions.DriverConfig.Options + } + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) + } else { + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) + } + if err != nil { + return err + } + + mp.Volume = v + mp.Name = v.Name() + mp.Driver = v.DriverName() + + // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow + if cv, ok := v.(interface { + CachedPath() string + }); ok { + mp.Source = cv.CachedPath() + } + } + + binds[mp.Destination] = true + dereferenceIfExists(mp.Destination) + mountPoints[mp.Destination] = mp + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} + +// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 +// mount configurations +// The container lock should not be held when calling this function. +// Changes are only made in-memory and may make changes to containers referenced +// by `container.HostConfig.VolumesFrom` +func (daemon *Daemon) backportMountSpec(container *container.Container) { + container.Lock() + defer container.Unlock() + + maybeUpdate := make(map[string]bool) + for _, mp := range container.MountPoints { + if mp.Spec.Source != "" && mp.Type != "" { + continue + } + maybeUpdate[mp.Destination] = true + } + if len(maybeUpdate) == 0 { + return + } + + mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) + for _, m := range container.HostConfig.Mounts { + mountSpecs[m.Target] = true + } + + binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds)) + for _, rawSpec := range container.HostConfig.Binds { + mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) + if err != nil { + logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") + continue + } + binds[mp.Destination] = mp + } + + volumesFrom := make(map[string]volume.MountPoint) + for _, fromSpec := range container.HostConfig.VolumesFrom { + from, _, err := volume.ParseVolumesFrom(fromSpec) + if err != nil { + logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") + continue + } + fromC, err := daemon.GetContainer(from) + if err != nil { + logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") + continue + } + + // make sure from container's specs have been backported + daemon.backportMountSpec(fromC) + + fromC.Lock() + for t, mp := range fromC.MountPoints { + volumesFrom[t] = *mp + } + fromC.Unlock() + } + + needsUpdate := func(containerMount, other *volume.MountPoint) bool { + if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { + return true + } + return false + } + + // main + for _, cm := range container.MountPoints { + if !maybeUpdate[cm.Destination] { + continue + } + // nothing to backport if from hostconfig.Mounts + if mountSpecs[cm.Destination] { + continue + } + + if mp, exists := binds[cm.Destination]; exists { + if needsUpdate(cm, mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Name != "" { + if mp, exists := volumesFrom[cm.Destination]; exists { + if needsUpdate(cm, &mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Type != "" { + // probably specified via the hostconfig.Mounts + continue + } + + // anon volume + cm.Type = mounttypes.TypeVolume + cm.Spec.Type = mounttypes.TypeVolume + } else { + if cm.Type != "" { + // already updated + continue + } + + cm.Type = mounttypes.TypeBind + cm.Spec.Type = mounttypes.TypeBind + cm.Spec.Source = cm.Source + if cm.Propagation != "" { + cm.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: cm.Propagation, + } + } + } + + cm.Spec.Target = cm.Destination + cm.Spec.ReadOnly = !cm.RW + } +} + +func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { + localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return fmt.Errorf("can't retrieve local volume driver: %v", err) + } + vols, err := localVolumeDriver.List() + if err != nil { + return fmt.Errorf("can't retrieve local volumes: %v", err) + } + + for _, v := range vols { + name := v.Name() + _, err := daemon.volumes.Get(name) + if err != nil { + logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) + } + + err = fn(v) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_unit_test.go b/vendor/github.com/moby/moby/daemon/volumes_unit_test.go new file mode 100644 index 0000000..450d17f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/volume" +) + +func TestParseVolumesFrom(t *testing.T) { + cases := []struct { + spec string + expID string + expMode string + fail bool + }{ + {"", "", "", true}, + {"foobar", "foobar", "rw", false}, + {"foobar:rw", "foobar", "rw", false}, + {"foobar:ro", "foobar", "ro", false}, + {"foobar:baz", "", "", true}, + } + + for _, c := range cases { + id, mode, err := volume.ParseVolumesFrom(c.spec) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) + } + continue + } + + if id != c.expID { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) + } + if mode != c.expMode { + t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_unix.go b/vendor/github.com/moby/moby/daemon/volumes_unix.go new file mode 100644 index 0000000..29dffa9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_unix.go @@ -0,0 +1,219 @@ +// +build !windows + +// TODO(amitkris): We need to split this file for solaris. + +package daemon + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/pkg/errors" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + // TODO: tmpfs mounts should be part of Mountpoints + tmpfsMounts := make(map[string]bool) + tmpfsMountInfo, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + for _, m := range tmpfsMountInfo { + tmpfsMounts[m.Destination] = true + } + for _, m := range c.MountPoints { + if tmpfsMounts[m.Destination] { + continue + } + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + path, err := m.Setup(c.MountLabel, rootUID, rootGID) + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: string(m.Propagation), + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": string(m.Propagation), + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) { + if bind.Mode == "" { + bind.Mode = "z" + } +} + +// migrateVolume links the contents of a volume created pre Docker 1.7 +// into the location expected by the local driver. +// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. +// It preserves the volume json configuration generated pre Docker 1.7 to be able to +// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. +func migrateVolume(id, vfs string) error { + l, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + newDataPath := l.(*local.Root).DataPath(id) + fi, err := os.Stat(newDataPath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if fi != nil && fi.IsDir() { + return nil + } + + return os.Symlink(vfs, newDataPath) +} + +// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. +// It reads the container configuration and creates valid mount points for the old volumes. +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + // Inspect old structures only when we're upgrading from old versions + // to versions >= 1.7 and the MountPoints has not been populated with volumes data. + type volumes struct { + Volumes map[string]string + VolumesRW map[string]bool + } + cfgPath, err := container.ConfigPath() + if err != nil { + return err + } + f, err := os.Open(cfgPath) + if err != nil { + return errors.Wrap(err, "could not open container config") + } + defer f.Close() + var cv volumes + if err := json.NewDecoder(f).Decode(&cv); err != nil { + return errors.Wrap(err, "could not decode container config") + } + + if len(container.MountPoints) == 0 && len(cv.Volumes) > 0 { + for destination, hostPath := range cv.Volumes { + vfsPath := filepath.Join(daemon.root, "vfs", "dir") + rw := cv.VolumesRW != nil && cv.VolumesRW[destination] + + if strings.HasPrefix(hostPath, vfsPath) { + id := filepath.Base(hostPath) + v, err := daemon.volumes.CreateWithRef(id, volume.DefaultDriverName, container.ID, nil, nil) + if err != nil { + return err + } + if err := migrateVolume(id, hostPath); err != nil { + return err + } + container.AddMountPointWithVolume(destination, v, true) + } else { // Bind mount + m := volume.MountPoint{Source: hostPath, Destination: destination, RW: rw} + container.MountPoints[destination] = &m + } + } + return container.ToDisk() + } + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil { + return err + } + + // mountVolumes() seems to be called for temporary mounts + // outside the container. Soon these will be unmounted with + // lazy unmount option and given we have mounted the rbind, + // all the submounts will propagate if these are shared. If + // daemon is running in host namespace and has / as shared + // then these unmounts will propagate and unmount original + // mount as well. So make all these mounts rprivate. + // Do not use propagation property of volume as that should + // apply only when mounting happen inside the container. + if err := mount.MakeRPrivate(dest); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_unix_test.go b/vendor/github.com/moby/moby/daemon/volumes_unix_test.go new file mode 100644 index 0000000..4be7719 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_unix_test.go @@ -0,0 +1,259 @@ +// +build !windows + +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +func TestBackportMountSpec(t *testing.T) { + d := Daemon{containers: container.NewMemoryStore()} + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + State: &container.State{}, + MountPoints: map[string]*volume.MountPoint{ + "/apple": {Destination: "/apple", Source: "/var/lib/docker/volumes/12345678", Name: "12345678", RW: true, CopyData: true}, // anonymous volume + "/banana": {Destination: "/banana", Source: "/var/lib/docker/volumes/data", Name: "data", RW: true, CopyData: true}, // named volume + "/cherry": {Destination: "/cherry", Source: "/var/lib/docker/volumes/data", Name: "data", CopyData: true}, // RO named volume + "/dates": {Destination: "/dates", Source: "/var/lib/docker/volumes/data", Name: "data"}, // named volume nocopy + "/elderberry": {Destination: "/elderberry", Source: "/var/lib/docker/volumes/data", Name: "data"}, // masks anon vol + "/fig": {Destination: "/fig", Source: "/data", RW: true}, // RW bind + "/guava": {Destination: "/guava", Source: "/data", RW: false, Propagation: "shared"}, // RO bind + propagation + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, // volumes-from + + // partially configured mountpoint due to #32613 + // specifically, `mp.Spec.Source` is not set + "/honeydew": { + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/honeydew", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + }, + + // from hostconfig.Mounts + "/jambolan": { + Type: mounttypes.TypeVolume, + Destination: "/jambolan", + Source: "/var/lib/docker/volumes/data", + RW: true, + Name: "data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/jambolan", Source: "data"}, + }, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/banana", + "data:/cherry:ro", + "data:/dates:ro,nocopy", + "data:/elderberry:ro,nocopy", + "/data:/fig", + "/data:/guava:ro,shared", + "data:/honeydew:nocopy", + }, + VolumesFrom: []string{"1:ro"}, + Mounts: []mounttypes.Mount{ + {Type: mounttypes.TypeVolume, Target: "/jambolan"}, + }, + }, + Config: &containertypes.Config{Volumes: map[string]struct{}{ + "/apple": {}, + "/elderberry": {}, + }}, + }} + + d.containers.Add("1", &container.Container{ + CommonContainer: container.CommonContainer{ + State: &container.State{}, + ID: "1", + MountPoints: map[string]*volume.MountPoint{ + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/kumquat:ro", + }, + }, + }, + }) + + type expected struct { + mp *volume.MountPoint + comment string + } + + pretty := func(mp *volume.MountPoint) string { + b, err := json.MarshalIndent(mp, "\t", " ") + if err != nil { + return fmt.Sprintf("%#v", mp) + } + return string(b) + } + + for _, x := range []expected{ + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/apple", + RW: true, + Name: "12345678", + Source: "/var/lib/docker/volumes/12345678", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "", + Target: "/apple", + }, + }, + comment: "anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/banana", + RW: true, + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/banana", + }, + }, + comment: "named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/cherry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/cherry", + ReadOnly: true, + }, + }, + comment: "read-only named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/dates", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/dates", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "named volume with nocopy", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/elderberry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/elderberry", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "masks an anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/fig", + Source: "/data", + RW: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/fig", + }, + }, + comment: "bind mount with read/write", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/guava", + Source: "/data", + RW: false, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/guava", + ReadOnly: true, + BindOptions: &mounttypes.BindOptions{Propagation: "shared"}, + }, + }, + comment: "bind mount with read/write + shared propgation", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Source: "/var/lib/docker/volumes/data", + RW: true, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/honeydew", + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + { + mp: &(*c.MountPoints["/jambolan"]), // copy the mountpoint, expect no changes + comment: "volume defined in mounts API", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/kumquat", + Source: "/var/lib/docker/volumes/data", + RW: false, + Name: "data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/kumquat", + ReadOnly: true, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + } { + + mp := c.MountPoints[x.mp.Destination] + d.backportMountSpec(c) + + if !reflect.DeepEqual(mp.Spec, x.mp.Spec) { + t.Fatalf("%s\nexpected:\n\t%s\n\ngot:\n\t%s", x.comment, pretty(x.mp), pretty(mp)) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_windows.go b/vendor/github.com/moby/moby/daemon/volumes_windows.go new file mode 100644 index 0000000..bf7fc47 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_windows.go @@ -0,0 +1,47 @@ +// +build windows + +package daemon + +import ( + "sort" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +// setupMounts configures the mount points for a container by appending each +// of the configured mounts on the container to the OCI mount structure +// which will ultimately be passed into the oci runtime during container creation. +// It also ensures each of the mounts are lexographically sorted. + +// BUGBUG TODO Windows containerd. This would be much better if it returned +// an array of runtime spec mounts, not container mounts. Then no need to +// do multiple transitions. + +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mnts []container.Mount + for _, mount := range c.MountPoints { // type is volume.MountPoint + if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { + return nil, err + } + s, err := mount.Setup(c.MountLabel, 0, 0) + if err != nil { + return nil, err + } + + mnts = append(mnts, container.Mount{ + Source: s, + Destination: mount.Destination, + Writable: mount.RW, + }) + } + + sort.Sort(mounts(mnts)) + return mnts, nil +} + +// setBindModeIfNull is platform specific processing which is a no-op on +// Windows. +func setBindModeIfNull(bind *volume.MountPoint) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/wait.go b/vendor/github.com/moby/moby/daemon/wait.go new file mode 100644 index 0000000..2dab22e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/wait.go @@ -0,0 +1,32 @@ +package daemon + +import ( + "time" + + "golang.org/x/net/context" +) + +// ContainerWait stops processing until the given container is +// stopped. If the container is not found, an error is returned. On a +// successful stop, the exit code of the container is returned. On a +// timeout, an error is returned. If you want to wait forever, supply +// a negative duration for the timeout. +func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return -1, err + } + + return container.WaitStop(timeout) +} + +// ContainerWaitWithContext returns a channel where exit code is sent +// when container stops. Channel can be cancelled with a context. +func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return container.WaitWithContext(ctx) +} diff --git a/vendor/github.com/moby/moby/daemon/workdir.go b/vendor/github.com/moby/moby/daemon/workdir.go new file mode 100644 index 0000000..5bd0d0c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/workdir.go @@ -0,0 +1,21 @@ +package daemon + +// ContainerCreateWorkdir creates the working directory. This is solves the +// issue arising from https://github.com/docker/docker/issues/27545, +// which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix +// was too expensive in terms of performance on Windows. Instead, +// https://github.com/docker/docker/pull/28514 introduces this new functionality +// where the builder calls into the backend here to create the working directory. +func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { + container, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(container) + if err != nil { + return err + } + defer daemon.Unmount(container) + rootUID, rootGID := daemon.GetRemappedUIDGID() + return container.SetupWorkingDirectory(rootUID, rootGID) +} diff --git a/vendor/github.com/moby/moby/distribution/config.go b/vendor/github.com/moby/moby/distribution/config.go new file mode 100644 index 0000000..bfea8b0 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/config.go @@ -0,0 +1,241 @@ +package distribution + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore reference.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStore manages layers. + LayerStore PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + RootFSFromConfig([]byte) (*image.RootFS, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + + // fail immediately on windows + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + return unmarshalledConfig.RootFS, nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProviderFromStore returns a layer provider backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { + return &storeLayerProvider{ + ls: ls, + } +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/vendor/github.com/moby/moby/distribution/errors.go b/vendor/github.com/moby/moby/distribution/errors.go new file mode 100644 index 0000000..b8cf9fb --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/errors.go @@ -0,0 +1,159 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/reference" + "github.com/pkg/errors" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + var newErr error + switch v.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + newErr = errors.Errorf("repository %s not found: does not exist or no pull access", ref.Name()) + case v2.ErrorCodeManifestUnknown: + newErr = errors.Errorf("manifest for %s not found", ref.String()) + case v2.ErrorCodeNameUnknown: + newErr = errors.Errorf("repository %s not found", ref.Name()) + } + if newErr != nil { + logrus.Infof("Translating %q to %q", err, newErr) + return newErr + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return err +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 0000000..a1f02a6 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 0000000..beec19a --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 0000000..b107de3 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/distribution/metadata/metadata.go b/vendor/github.com/moby/moby/distribution/metadata/metadata.go new file mode 100644 index 0000000..05ba4f8 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/metadata.go @@ -0,0 +1,75 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go new file mode 100644 index 0000000..f262d4d --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go new file mode 100644 index 0000000..5568865 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go @@ -0,0 +1,83 @@ +package metadata + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/layer" +) + +func TestV1IDService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "v1-id-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + v1IDService := NewV1IDService(metadataStore) + + testVectors := []struct { + registry string + v1ID string + layerID layer.DiffID + }{ + { + registry: "registry1", + v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", + layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + }, + { + registry: "registry2", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + }, + { + registry: "registry1", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + }, + } + + // Set some associations + for _, vec := range testVectors { + err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + layerID, err := v1IDService.Get(vec.v1ID, vec.registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != vec.layerID { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test Get on a nonexistent entry + _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != testVectors[1].layerID { + t.Fatal("Get returned incorrect layer ID") + } +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go new file mode 100644 index 0000000..02d1b4a --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC return true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes([]byte(buf))), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go new file mode 100644 index 0000000..7b0ecb1 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go @@ -0,0 +1,115 @@ +package metadata + +import ( + "encoding/hex" + "io/ioutil" + "math/rand" + "os" + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestV2MetadataService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + V2MetadataService := NewV2MetadataService(metadataStore) + + tooManyBlobSums := make([]V2Metadata, 100) + for i := range tooManyBlobSums { + randDigest := randomDigest() + tooManyBlobSums[i] = V2Metadata{Digest: randDigest} + } + + testVectors := []struct { + diffID layer.DiffID + metadata []V2Metadata + }{ + { + diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + }, + }, + { + diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, + }, + }, + { + diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + metadata: tooManyBlobSums, + }, + } + + // Set some associations + for _, vec := range testVectors { + for _, blobsum := range vec.metadata { + err := V2MetadataService.Add(vec.diffID, blobsum) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + metadata, err := V2MetadataService.GetMetadata(vec.diffID) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + expectedMetadataEntries := len(vec.metadata) + if expectedMetadataEntries > 50 { + expectedMetadataEntries = 50 + } + if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test GetMetadata on a nonexistent entry + _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Test GetDiffID on a nonexistent entry + _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) + if err != nil { + t.Fatalf("error calling Add: %v", err) + } + diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) + if err != nil { + t.Fatalf("error calling GetDiffID: %v", err) + } + if diffID != testVectors[1].diffID { + t.Fatal("GetDiffID returned incorrect diffID") + } +} + +func randomDigest() digest.Digest { + b := [32]byte{} + for i := 0; i < len(b); i++ { + b[i] = byte(rand.Intn(256)) + } + d := hex.EncodeToString(b[:]) + return digest.Digest("sha256:" + d) +} diff --git a/vendor/github.com/moby/moby/distribution/pull.go b/vendor/github.com/moby/moby/distribution/pull.go new file mode 100644 index 0000000..a0acfe5 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull.go @@ -0,0 +1,200 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := ValidateRepoName(repoInfo.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Errorf("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != reference.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v1.go b/vendor/github.com/moby/moby/distribution/pull_v1.go new file mode 100644 index 0000000..f44ed4f --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v1.go @@ -0,0 +1,368 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) + + tagged, isTagged := ref.(reference.NamedTagged) + + repoData, err := p.session.GetRepositoryData(p.repoInfo) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + if isTagged { + return fmt.Errorf("Error: image %s:%s not found", p.repoInfo.RemoteName(), tagged.Tag()) + } + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + } + // Unexpected HTTP error + return err + } + + logrus.Debug("Retrieving the tag list") + var tagsList map[string]string + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return err + } + + if p.config.ReferenceStore != nil { + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2.go b/vendor/github.com/moby/moby/distribution/pull_v2.go new file mode 100644 index 0000000..88807ed --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2.go @@ -0,0 +1,878 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + + layerDownload, err := ld.open(ctx) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debug("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier, err = digest.NewDigestVerifier(ld.digest) + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, fmt.Errorf("target is %s", configClass) + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != reference.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, manifestDigest, nil + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.Layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + } + + descriptors = append(descriptors, layerDescriptor) + } + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + ) + + // https://github.com/docker/docker/issues/24766 - Err on the side of caution, + // explicitly blocking images intended for linux from the Windows daemon. On + // Windows, we do this before the attempt to download, effectively serialising + // the download slightly slowing it down. We have to do it this way, as + // chances are the download of layers itself would fail due to file names + // which aren't suitable for NTFS. At some point in the future, if a similar + // check to block Windows images being pulled on Linux is implemented, it + // may be necessary to perform the same type of serialisation. + if runtime.GOOS == "windows" { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + } + + if p.config.DownloadManager != nil { + downloadRootFS := *image.NewRootFS() + rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + if configJSON != nil { + // Already received the config + return "", "", err + } + select { + case err = <-errChan: + return "", "", err + default: + cancel() + select { + case <-configChan: + case <-errChan: + } + return "", "", err + } + } + if release != nil { + defer release() + } + + downloadedRootFS = &rootFS + } + + if configJSON == nil { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { + select { + case configJSON := <-configChan: + rootfs, err := s.RootFSFromConfig(configJSON) + if err != nil { + return nil, nil, err + } + return configJSON, rootfs, nil + case err := <-errChan: + return nil, nil, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", "", errors.New("no supported platform found in manifest list") + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + id, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + id, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2_test.go b/vendor/github.com/moby/moby/distribution/pull_v2_test.go new file mode 100644 index 0000000..b745642 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2_test.go @@ -0,0 +1,183 @@ +package distribution + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/reference" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "invalid parent ID in the base layer of the image") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + expectedDigest, err := reference.ParseNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + if err != nil { + t.Fatal("could not parse reference") + } + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2_unix.go b/vendor/github.com/moby/moby/distribution/pull_v2_unix.go new file mode 100644 index 0000000..45a7a0c --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2_windows.go b/vendor/github.com/moby/moby/distribution/pull_v2_windows.go new file mode 100644 index 0000000..aefed86 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2_windows.go @@ -0,0 +1,49 @@ +// +build windows + +package distribution + +import ( + "net/http" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + if len(ld.src.URLs) == 0 { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) + } + + var ( + err error + rsc distribution.ReadSeekCloser + ) + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} diff --git a/vendor/github.com/moby/moby/distribution/push.go b/vendor/github.com/moby/moby/distribution/push.go new file mode 100644 index 0000000..d35bdb1 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push.go @@ -0,0 +1,186 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", repoInfo.Name()) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Errorf("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/moby/moby/distribution/push_v1.go b/vendor/github.com/moby/moby/distribution/push_v1.go new file mode 100644 index 0000000..257ac18 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push_v1.go @@ -0,0 +1,463 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := imageID.Digest().Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + var dgst digest.Digest + dgst, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + imgID = image.IDFromDigest(dgst) + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[digest.Digest]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + imgID := image.IDFromDigest(association.ID) + tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) + + if _, present := imagesSeen[association.ID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { + ics, ok := p.config.ImageStore.(*imageConfigStore) + if !ok { + return nil, fmt.Errorf("only image store images supported for v1 push") + } + img, err := ics.Store.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + pl, err := p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, pl) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + + // V1 push is deprecated, only support existing layerstore layers + lsl, ok := pl.(*storeLayer) + if !ok { + return nil, fmt.Errorf("only layer store layers supported for v1 push") + } + l := lsl.Layer + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + l.Release() + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/moby/moby/distribution/push_v2.go b/vendor/github.com/moby/moby/distribution/push_v2.go new file mode 100644 index 0000000..1f8c822 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push_v2.go @@ -0,0 +1,697 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest digest.Digest + Size int +} + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", ref.String()) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", ref.String(), err) + } + + l, err := p.config.LayerStore.Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo, + ref: p.ref, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(rootfs.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.FullName() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository if we have a mapping with it + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, false, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.WithName(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", namedRef.String(), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // TODO (brianbland): We need to construct a reference where the Name is + // only the full remote name, so clean this up when distribution has a + // richer reference package + remoteRef, err := distreference.WithName(namedRef.RemoteName()) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", namedRef.RemoteName(), namedRef.RemoteName()) + continue + } + + canonicalRef, err := distreference.WithDigest(distreference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + if len(mountCandidate.SourceRepository) > 0 && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + // upload the blob + desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) + if err != nil { + return desc, err + } + + return desc, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.New() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.FullName() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.FullName() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.FullName() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireReigstryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || repoInfo.Hostname() != sourceRepo.Hostname() { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.FullName() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.FullName()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + // make sure to add docker.io/ prefix to the path + named, err := reference.ParseNamed(path) + if err == nil { + path = named.FullName() + } + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/vendor/github.com/moby/moby/distribution/push_v2_test.go b/vendor/github.com/moby/moby/distribution/push_v2_test.go new file mode 100644 index 0000000..6a5216b --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push_v2_test.go @@ -0,0 +1,579 @@ +package distribution + +import ( + "net/http" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" +) + +func TestGetRepositoryMountCandidates(t *testing.T) { + for _, tc := range []struct { + name string + hmacKey string + targetRepo string + maxCandidates int + metadata []metadata.V2Metadata + candidates []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item not matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("key", "dgst", "127.0.0.1/repo")}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, + candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, + }, + { + name: "allow missing SourceRepository", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + candidates: []metadata.V2Metadata{}, + }, + { + name: "handle docker.io", + targetRepo: "user/app", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, + {Digest: digest.Digest("2"), SourceRepository: "app"}, + }, + candidates: []metadata.V2Metadata{ + {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("2"), SourceRepository: "app"}, + }, + }, + { + name: "sort more items", + hmacKey: "abcd", + targetRepo: "127.0.0.1/foo/bar", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + taggedMetadata("hash", "1", "hello-world"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + taggedMetadata("abcd", "3", "busybox"), + taggedMetadata("hash", "4", "busybox"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"), + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + // then by longest matching prefix + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + // sort the rest of the matching items in reversed order + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + }, + }, + { + name: "limit max candidates", + hmacKey: "abcd", + targetRepo: "user/app", + maxCandidates: 3, + metadata: []metadata.V2Metadata{ + taggedMetadata("abcd", "1", "user/app1"), + taggedMetadata("abcd", "2", "user/app/base"), + taggedMetadata("hash", "3", "user/app"), + taggedMetadata("abcd", "4", "127.0.0.1/user/app"), + taggedMetadata("hash", "5", "user/foo"), + taggedMetadata("hash", "6", "app/bar"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "2", "user/app/base"), + taggedMetadata("abcd", "1", "user/app1"), + // then by longest matching prefix + taggedMetadata("hash", "3", "user/app"), + }, + }, + } { + repoInfo, err := reference.ParseNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + candidates := getRepositoryMountCandidates(repoInfo, []byte(tc.hmacKey), tc.maxCandidates, tc.metadata) + if len(candidates) != len(tc.candidates) { + t.Errorf("[%s] got unexpected number of candidates: %d != %d", tc.name, len(candidates), len(tc.candidates)) + } + for i := 0; i < len(candidates) && i < len(tc.candidates); i++ { + if !reflect.DeepEqual(candidates[i], tc.candidates[i]) { + t.Errorf("[%s] candidate %d does not match expected: %#+v != %#+v", tc.name, i, candidates[i], tc.candidates[i]) + } + } + for i := len(candidates); i < len(tc.candidates); i++ { + t.Errorf("[%s] missing expected candidate at position %d (%#+v)", tc.name, i, tc.candidates[i]) + } + for i := len(tc.candidates); i < len(candidates); i++ { + t.Errorf("[%s] got unexpected candidate at position %d (%#+v)", tc.name, i, candidates[i]) + } + } +} + +func TestLayerAlreadyExists(t *testing.T) { + for _, tc := range []struct { + name string + metadata []metadata.V2Metadata + targetRepo string + hmacKey string + maxExistenceChecks int + checkOtherRepositories bool + remoteBlobs map[digest.Digest]distribution.Descriptor + remoteErrors map[digest.Digest]error + expectedDescriptor distribution.Descriptor + expectedExists bool + expectedError error + expectedRequests []string + expectedAdditions []metadata.V2Metadata + expectedRemovals []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxExistenceChecks: 3, + checkOtherRepositories: true, + }, + { + name: "single not existent metadata", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + expectedRequests: []string{"pear"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "access denied", + targetRepo: "busybox", + maxExistenceChecks: 1, + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + remoteErrors: map[digest.Digest]error{digest.Digest("apple"): distribution.ErrAccessDenied}, + expectedError: nil, + expectedRequests: []string{"apple"}, + }, + { + name: "not matching reposies", + targetRepo: "busybox", + maxExistenceChecks: 3, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + }, + { + name: "check other repositories", + targetRepo: "busybox", + maxExistenceChecks: 10, + checkOtherRepositories: true, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + expectedRequests: []string{"plum", "pear", "apple", "orange", "banana"}, + }, + { + name: "find existing blob", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + }, + { + name: "find existing blob with different hmac", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{SourceRepository: "docker.io/library/busybox", Digest: digest.Digest("apple"), HMAC: "dummyhmac"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "overwrite media types", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + hmacKey: "key", + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple"), MediaType: "custom-media-type"}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "apple", "docker.io/library/busybox")}, + }, + { + name: "find existing blob among many", + targetRepo: "127.0.0.1/myapp", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("someotherkey", "pear", "127.0.0.1/myapp"), + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + taggedMetadata("", "plum", "127.0.0.1/myapp"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "plum", "pear"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "pear", "127.0.0.1/myapp")}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + {Digest: digest.Digest("plum"), SourceRepository: "127.0.0.1/myapp"}, + }, + }, + { + name: "reach maximum existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedExists: false, + expectedRequests: []string{"banana", "plum", "apple"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + }, + }, + { + name: "zero allowed existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 0, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + }, + { + name: "stat single digest just once", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + taggedMetadata("key1", "pear", "docker.io/library/busybox"), + taggedMetadata("key2", "apple", "docker.io/library/busybox"), + taggedMetadata("key3", "apple", "docker.io/library/busybox"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "pear"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{taggedMetadata("key3", "apple", "docker.io/library/busybox")}, + }, + { + name: "don't stop on first error", + targetRepo: "user/app", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("key", "banana", "docker.io/user/app"), + taggedMetadata("key", "orange", "docker.io/user/app"), + taggedMetadata("key", "plum", "docker.io/user/app"), + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrAccessDenied}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {}}, + expectedError: nil, + expectedRequests: []string{"plum", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "plum", "docker.io/user/app"), + taggedMetadata("key", "banana", "docker.io/user/app"), + }, + }, + { + name: "remove outdated metadata", + targetRepo: "docker.io/user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrBlobUnknown}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("plum"): {}}, + expectedExists: false, + expectedRequests: []string{"orange"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}}, + }, + { + name: "missing SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + maxExistenceChecks: 3, + expectedExists: false, + expectedRequests: []string{"2", "3", "1"}, + }, + + { + name: "with and without SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("3")}, + }, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("1"): {Digest: digest.Digest("1")}}, + maxExistenceChecks: 3, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("1"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"2", "3", "1"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("1"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + } { + repoInfo, err := reference.ParseNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + repo := &mockRepo{ + t: t, + errors: tc.remoteErrors, + blobs: tc.remoteBlobs, + requests: []string{}, + } + ctx := context.Background() + ms := &mockV2MetadataService{} + pd := &v2PushDescriptor{ + hmacKey: []byte(tc.hmacKey), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)}, + checkedDigests: make(map[digest.Digest]struct{}), + } + + desc, exists, err := pd.layerAlreadyExists(ctx, &progressSink{t}, layer.EmptyLayer.DiffID(), tc.checkOtherRepositories, tc.maxExistenceChecks, tc.metadata) + + if !reflect.DeepEqual(desc, tc.expectedDescriptor) { + t.Errorf("[%s] got unexpected descriptor: %#+v != %#+v", tc.name, desc, tc.expectedDescriptor) + } + if exists != tc.expectedExists { + t.Errorf("[%s] got unexpected exists: %t != %t", tc.name, exists, tc.expectedExists) + } + if !reflect.DeepEqual(err, tc.expectedError) { + t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) + } + + if len(repo.requests) != len(tc.expectedRequests) { + t.Errorf("[%s] got unexpected number of requests: %d != %d", tc.name, len(repo.requests), len(tc.expectedRequests)) + } + for i := 0; i < len(repo.requests) && i < len(tc.expectedRequests); i++ { + if repo.requests[i] != tc.expectedRequests[i] { + t.Errorf("[%s] request %d does not match expected: %q != %q", tc.name, i, repo.requests[i], tc.expectedRequests[i]) + } + } + for i := len(repo.requests); i < len(tc.expectedRequests); i++ { + t.Errorf("[%s] missing expected request at position %d (%q)", tc.name, i, tc.expectedRequests[i]) + } + for i := len(tc.expectedRequests); i < len(repo.requests); i++ { + t.Errorf("[%s] got unexpected request at position %d (%q)", tc.name, i, repo.requests[i]) + } + + if len(ms.added) != len(tc.expectedAdditions) { + t.Errorf("[%s] got unexpected number of additions: %d != %d", tc.name, len(ms.added), len(tc.expectedAdditions)) + } + for i := 0; i < len(ms.added) && i < len(tc.expectedAdditions); i++ { + if ms.added[i] != tc.expectedAdditions[i] { + t.Errorf("[%s] added metadata at %d does not match expected: %q != %q", tc.name, i, ms.added[i], tc.expectedAdditions[i]) + } + } + for i := len(ms.added); i < len(tc.expectedAdditions); i++ { + t.Errorf("[%s] missing expected addition at position %d (%q)", tc.name, i, tc.expectedAdditions[i]) + } + for i := len(tc.expectedAdditions); i < len(ms.added); i++ { + t.Errorf("[%s] unexpected metadata addition at position %d (%q)", tc.name, i, ms.added[i]) + } + + if len(ms.removed) != len(tc.expectedRemovals) { + t.Errorf("[%s] got unexpected number of removals: %d != %d", tc.name, len(ms.removed), len(tc.expectedRemovals)) + } + for i := 0; i < len(ms.removed) && i < len(tc.expectedRemovals); i++ { + if ms.removed[i] != tc.expectedRemovals[i] { + t.Errorf("[%s] removed metadata at %d does not match expected: %q != %q", tc.name, i, ms.removed[i], tc.expectedRemovals[i]) + } + } + for i := len(ms.removed); i < len(tc.expectedRemovals); i++ { + t.Errorf("[%s] missing expected removal at position %d (%q)", tc.name, i, tc.expectedRemovals[i]) + } + for i := len(tc.expectedRemovals); i < len(ms.removed); i++ { + t.Errorf("[%s] removed unexpected metadata at position %d (%q)", tc.name, i, ms.removed[i]) + } + } +} + +func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { + meta := metadata.V2Metadata{ + Digest: digest.Digest(dgst), + SourceRepository: sourceRepo, + } + + meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta) + return meta +} + +type mockRepo struct { + t *testing.T + errors map[digest.Digest]error + blobs map[digest.Digest]distribution.Descriptor + requests []string +} + +var _ distribution.Repository = &mockRepo{} + +func (m *mockRepo) Named() distreference.Named { + m.t.Fatalf("Named() not implemented") + return nil +} +func (m *mockRepo) Manifests(ctc context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + m.t.Fatalf("Manifests() not implemented") + return nil, nil +} +func (m *mockRepo) Tags(ctc context.Context) distribution.TagService { + m.t.Fatalf("Tags() not implemented") + return nil +} +func (m *mockRepo) Blobs(ctx context.Context) distribution.BlobStore { + return &mockBlobStore{ + repo: m, + } +} + +type mockBlobStore struct { + repo *mockRepo +} + +var _ distribution.BlobStore = &mockBlobStore{} + +func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + m.repo.requests = append(m.repo.requests, dgst.String()) + if err, exists := m.repo.errors[dgst]; exists { + return distribution.Descriptor{}, err + } + if desc, exists := m.repo.blobs[dgst]; exists { + return desc, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} +func (m *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + m.repo.t.Fatal("Get() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + m.repo.t.Fatal("Open() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + m.repo.t.Fatal("Put() not implemented") + return distribution.Descriptor{}, nil +} + +func (m *mockBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Create() not implemented") + return nil, nil +} +func (m *mockBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Resume() not implemented") + return nil, nil +} +func (m *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + m.repo.t.Fatal("Delete() not implemented") + return nil +} +func (m *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + m.repo.t.Fatalf("ServeBlob() not implemented") + return nil +} + +type mockV2MetadataService struct { + added []metadata.V2Metadata + removed []metadata.V2Metadata +} + +var _ metadata.V2MetadataService = &mockV2MetadataService{} + +func (*mockV2MetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return nil, nil +} +func (*mockV2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + return "", nil +} +func (m *mockV2MetadataService) Add(diffID layer.DiffID, metadata metadata.V2Metadata) error { + m.added = append(m.added, metadata) + return nil +} +func (m *mockV2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta metadata.V2Metadata) error { + meta.HMAC = metadata.ComputeV2MetadataHMAC(hmacKey, &meta) + m.Add(diffID, meta) + return nil +} +func (m *mockV2MetadataService) Remove(metadata metadata.V2Metadata) error { + m.removed = append(m.removed, metadata) + return nil +} + +type progressSink struct { + t *testing.T +} + +func (s *progressSink) WriteProgress(p progress.Progress) error { + s.t.Logf("progress update: %#+v", p) + return nil +} diff --git a/vendor/github.com/moby/moby/distribution/registry.go b/vendor/github.com/moby/moby/distribution/registry.go new file mode 100644 index 0000000..95e181d --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.FullName() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := distreference.ParseNamed(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/moby/moby/distribution/registry_unit_test.go b/vendor/github.com/moby/moby/distribution/registry_unit_test.go new file mode 100644 index 0000000..406de34 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/registry_unit_test.go @@ -0,0 +1,136 @@ +package distribution + +import ( + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "golang.org/x/net/context" +) + +const secretRegistryToken = "mysecrettoken" + +type tokenPassThruHandler struct { + reached bool + gotToken bool + shouldSend401 func(url string) bool +} + +func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.reached = true + if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { + logrus.Debug("Detected registry token in auth header") + h.gotToken = true + } + if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + } +} + +func testTokenPassThru(t *testing.T, ts *httptest.Server) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + uri, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("could not parse url from test server: %v", err) + } + + endpoint := registry.APIEndpoint{ + Mirror: false, + URL: uri, + Version: 2, + Official: false, + TrimHostname: false, + TLSConfig: nil, + //VersionHeader: "verheader", + } + n, _ := reference.ParseNamed("testremotename") + repoInfo := ®istry.RepositoryInfo{ + Named: n, + Index: ®istrytypes.IndexInfo{ + Name: "testrepo", + Mirrors: nil, + Secure: false, + Official: false, + }, + Official: false, + } + imagePullConfig := &ImagePullConfig{ + Config: Config{ + MetaHeaders: http.Header{}, + AuthConfig: &types.AuthConfig{ + RegistryToken: secretRegistryToken, + }, + }, + Schema2Types: ImageTypes, + } + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + t.Fatal(err) + } + p := puller.(*v2Puller) + ctx := context.Background() + p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + t.Fatal(err) + } + + logrus.Debug("About to pull") + // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above + tag, _ := reference.WithTag(n, "tag_goes_here") + _ = p.pullV2Repository(ctx, tag) +} + +func TestTokenPassThru(t *testing.T) { + handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} + ts := httptest.NewServer(handler) + defer ts.Close() + + testTokenPassThru(t, ts) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if !handler.gotToken { + t.Fatal("Failed to receive registry token") + } +} + +func TestTokenPassThruDifferentHost(t *testing.T) { + handler := new(tokenPassThruHandler) + ts := httptest.NewServer(handler) + defer ts.Close() + + tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v2/" { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + return + } + http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) + })) + defer tsredirect.Close() + + testTokenPassThru(t, tsredirect) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if handler.gotToken { + t.Fatal("Redirect should not forward Authorization header to another host") + } +} diff --git a/vendor/github.com/moby/moby/distribution/utils/progress.go b/vendor/github.com/moby/moby/distribution/utils/progress.go new file mode 100644 index 0000000..ef8ecc8 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/utils/progress.go @@ -0,0 +1,44 @@ +package utils + +import ( + "io" + "net" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// WriteDistributionProgress is a helper for writing progress from chan to JSON +// stream with an optional cancel function. +func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/download.go b/vendor/github.com/moby/moby/distribution/xfer/download.go new file mode 100644 index 0000000..7545342 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/download.go @@ -0,0 +1,452 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager +} + +// SetConcurrency set the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { + return &LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + } +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/download_test.go b/vendor/github.com/moby/moby/distribution/xfer/download_test.go new file mode 100644 index 0000000..bc20e1e --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/download_test.go @@ -0,0 +1,356 @@ +package xfer + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadConcurrency = 3 + +type mockLayer struct { + layerData bytes.Buffer + diffID layer.DiffID + chainID layer.ChainID + parent layer.Layer +} + +func (ml *mockLayer) TarStream() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil +} + +func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ml *mockLayer) ChainID() layer.ChainID { + return ml.chainID +} + +func (ml *mockLayer) DiffID() layer.DiffID { + return ml.diffID +} + +func (ml *mockLayer) Parent() layer.Layer { + return ml.parent +} + +func (ml *mockLayer) Size() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +type mockLayerStore struct { + layers map[layer.ChainID]*mockLayer +} + +func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) +} + +func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer { + layers := map[layer.ChainID]layer.Layer{} + + for k, v := range ls.layers { + layers[k] = v + } + + return layers +} + +func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { + return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) +} + +func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { + var ( + parent layer.Layer + err error + ) + + if parentID != "" { + parent, err = ls.Get(parentID) + if err != nil { + return nil, err + } + } + + l := &mockLayer{parent: parent} + _, err = l.layerData.ReadFrom(reader) + if err != nil { + return nil, err + } + l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) + l.chainID = createChainIDFromParent(parentID, l.diffID) + + ls.layers[l.chainID] = l + return l, nil +} + +func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { + l, ok := ls.layers[chainID] + if !ok { + return nil, layer.ErrLayerDoesNotExist + } + return l, nil +} + +func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { + return []layer.Metadata{}, nil +} +func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, layer.MountInit, map[string]string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { + return nil, errors.New("not implemented") +} +func (ls *mockLayerStore) GetMountID(string) (string, error) { + return "", errors.New("not implemented") +} + +func (ls *mockLayerStore) Cleanup() error { + return nil +} + +func (ls *mockLayerStore) DriverStatus() [][2]string { + return [][2]string{} +} + +func (ls *mockLayerStore) DriverName() string { + return "mock" +} + +type mockDownloadDescriptor struct { + currentDownloads *int32 + id string + diffID layer.DiffID + registeredDiffID layer.DiffID + expectedDiffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (d *mockDownloadDescriptor) Key() string { + return d.id +} + +// ID returns the ID for display purposes. +func (d *mockDownloadDescriptor) ID() string { + return d.id +} + +// DiffID should return the DiffID for this layer, or an error +// if it is unknown (for example, if it has not been downloaded +// before). +func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { + if d.diffID != "" { + return d.diffID, nil + } + return "", errors.New("no diffID available") +} + +func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { + d.registeredDiffID = diffID +} + +func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { + // The mock implementation returns the ID repeated 5 times as a tar + // stream instead of actual tar data. The data is ignored except for + // computing IDs. + return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) +} + +// Download is called to perform the download. +func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + if d.currentDownloads != nil { + defer atomic.AddInt32(d.currentDownloads, -1) + + if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { + return nil, 0, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming download. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) + } + } + + if d.simulateRetries != 0 { + d.simulateRetries-- + return nil, 0, errors.New("simulating retry") + } + + return d.mockTarStream(), 0, nil +} + +func (d *mockDownloadDescriptor) Close() { +} + +func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { + return []DownloadDescriptor{ + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id1", + expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id3", + expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id4", + expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), + simulateRetries: 1, + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id5", + expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), + }, + } +} + +func TestSuccessfulDownload(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]progress.Progress) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p + } + close(progressDone) + }() + + var currentDownloads int32 + descriptors := downloadDescriptors(¤tDownloads) + + firstDescriptor := descriptors[0].(*mockDownloadDescriptor) + + // Pre-register the first layer to simulate an already-existing layer + l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") + if err != nil { + t.Fatal(err) + } + firstDescriptor.diffID = l.DiffID() + + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("download error: %v", err) + } + + releaseFunc() + + close(progressChan) + <-progressDone + + if len(rootFS.DiffIDs) != len(descriptors) { + t.Fatal("got wrong number of diffIDs in rootfs") + } + + for i, d := range descriptors { + descriptor := d.(*mockDownloadDescriptor) + + if descriptor.diffID != "" { + if receivedProgress[d.ID()].Action != "Already exists" { + t.Fatalf("did not get 'Already exists' message for %v", d.ID()) + } + } else if receivedProgress[d.ID()].Action != "Pull complete" { + t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) + } + + if rootFS.DiffIDs[i] != descriptor.expectedDiffID { + t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) + } + + if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { + t.Fatal("diffID mismatch between rootFS and Registered callback") + } + } +} + +func TestCancelledDownload(t *testing.T) { + ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := downloadDescriptors(nil) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected download to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/transfer.go b/vendor/github.com/moby/moby/distribution/xfer/transfer.go new file mode 100644 index 0000000..14f1566 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency set the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/transfer_test.go b/vendor/github.com/moby/moby/distribution/xfer/transfer_test.go new file mode 100644 index 0000000..6c50ce3 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/transfer_test.go @@ -0,0 +1,410 @@ +package xfer + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/pkg/progress" +) + +func TestTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + select { + case <-start: + default: + t.Fatalf("transfer function not started even though concurrency limit not reached") + } + + xfer := NewTransfer() + go func() { + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + val, present := receivedProgress[p.ID] + if present && p.Current <= val { + t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) + } + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start a few transfers + ids := []string{"id1", "id2", "id3"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestConcurrencyLimit(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestInactiveJobs(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + testDone := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(inactive) + <-testDone + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + close(testDone) + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestWatchRelease(t *testing.T) { + ready := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type watcherInfo struct { + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(w watcherInfo) { + first := true + for range w.progressChan { + if first { + close(w.receivedFirstProgress) + } + first = false + } + close(w.progressDone) + } + + // Start a transfer + watchers := make([]watcherInfo, 5) + var xfer Transfer + watchers[0].progressChan = make(chan progress.Progress) + watchers[0].progressDone = make(chan struct{}) + watchers[0].receivedFirstProgress = make(chan struct{}) + xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) + go progressConsumer(watchers[0]) + + // Give it multiple watchers + for i := 1; i != len(watchers); i++ { + watchers[i].progressChan = make(chan progress.Progress) + watchers[i].progressDone = make(chan struct{}) + watchers[i].receivedFirstProgress = make(chan struct{}) + watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) + go progressConsumer(watchers[i]) + } + + // Now that the watchers are set up, allow the transfer goroutine to + // proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, w := range watchers { + <-w.receivedFirstProgress + } + + // Release one watcher every 5ms + for _, w := range watchers { + xfer.Release(w.watcher) + <-time.After(5 * time.Millisecond) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() + + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-xfer.Done() + + for _, w := range watchers { + close(w.progressChan) + <-w.progressDone + } +} + +func TestWatchFinishedTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + // Finish immediately + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + + // Start a transfer + watchers := make([]*Watcher, 3) + var xfer Transfer + xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) + + // Give it a watcher immediately + watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Wait for the transfer to complete + <-xfer.Done() + + // Set up another watcher + watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Release the watchers + for _, w := range watchers { + xfer.Release(w) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() +} + +func TestDuplicateTransfer(t *testing.T) { + ready := make(chan struct{}) + + var xferFuncCalls int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + atomic.AddInt32(&xferFuncCalls, 1) + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type transferInfo struct { + xfer Transfer + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(t transferInfo) { + first := true + for range t.progressChan { + if first { + close(t.receivedFirstProgress) + } + first = false + } + close(t.progressDone) + } + + // Try to start multiple transfers with the same ID + transfers := make([]transferInfo, 5) + for i := range transfers { + t := &transfers[i] + t.progressChan = make(chan progress.Progress) + t.progressDone = make(chan struct{}) + t.receivedFirstProgress = make(chan struct{}) + t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) + go progressConsumer(*t) + } + + // Allow the transfer goroutine to proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, t := range transfers { + <-t.receivedFirstProgress + } + + // Confirm that the transfer function was called exactly once. + if xferFuncCalls != 1 { + t.Fatal("transfer function wasn't called exactly once") + } + + // Release one watcher every 5ms + for _, t := range transfers { + t.xfer.Release(t.watcher) + <-time.After(5 * time.Millisecond) + } + + for _, t := range transfers { + // Now that all watchers have been released, Released() should + // return a closed channel. + <-t.xfer.Released() + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-t.xfer.Done() + } + + for _, t := range transfers { + close(t.progressChan) + <-t.progressDone + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/upload.go b/vendor/github.com/moby/moby/distribution/xfer/upload.go new file mode 100644 index 0000000..ad33983 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/upload.go @@ -0,0 +1,168 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager +} + +// SetConcurrency set the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { + return &LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + } +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/upload_test.go b/vendor/github.com/moby/moby/distribution/xfer/upload_test.go new file mode 100644 index 0000000..16bd187 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/upload_test.go @@ -0,0 +1,134 @@ +package xfer + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadConcurrency = 3 + +type mockUploadDescriptor struct { + currentUploads *int32 + diffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (u *mockUploadDescriptor) Key() string { + return u.diffID.String() +} + +// ID returns the ID for display purposes. +func (u *mockUploadDescriptor) ID() string { + return u.diffID.String() +} + +// DiffID should return the DiffID for this layer. +func (u *mockUploadDescriptor) DiffID() layer.DiffID { + return u.diffID +} + +// SetRemoteDescriptor is not used in the mock. +func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { +} + +// Upload is called to perform the upload. +func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if u.currentUploads != nil { + defer atomic.AddInt32(u.currentUploads, -1) + + if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { + return distribution.Descriptor{}, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming upload. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return distribution.Descriptor{}, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) + } + } + + if u.simulateRetries != 0 { + u.simulateRetries-- + return distribution.Descriptor{}, errors.New("simulating retry") + } + + return distribution.Descriptor{}, nil +} + +func uploadDescriptors(currentUploads *int32) []UploadDescriptor { + return []UploadDescriptor{ + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, + } +} + +func TestSuccessfulUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + var currentUploads int32 + descriptors := uploadDescriptors(¤tUploads) + + err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("upload error: %v", err) + } + + close(progressChan) + <-progressDone +} + +func TestCancelledUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := uploadDescriptors(nil) + err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected upload to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/moby/moby/dockerversion/useragent.go b/vendor/github.com/moby/moby/dockerversion/useragent.go new file mode 100644 index 0000000..d2a891c --- /dev/null +++ b/vendor/github.com/moby/moby/dockerversion/useragent.go @@ -0,0 +1,74 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(httputils.UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(httputils.UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/moby/moby/dockerversion/version_lib.go b/vendor/github.com/moby/moby/dockerversion/version_lib.go new file mode 100644 index 0000000..33f77d3 --- /dev/null +++ b/vendor/github.com/moby/moby/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" +) diff --git a/vendor/github.com/moby/moby/docs/README.md b/vendor/github.com/moby/moby/docs/README.md new file mode 100644 index 0000000..da93093 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/README.md @@ -0,0 +1,30 @@ +# The non-reference docs have been moved! + + + +The documentation for Docker Engine has been merged into +[the general documentation repo](https://github.com/docker/docker.github.io). + +See the [README](https://github.com/docker/docker.github.io/blob/master/README.md) +for instructions on contributing to and building the documentation. + +If you'd like to edit the current published version of the Engine docs, +do it in the master branch here: +https://github.com/docker/docker.github.io/tree/master/engine + +If you need to document the functionality of an upcoming Engine release, +use the `vnext-engine` branch: +https://github.com/docker/docker.github.io/tree/vnext-engine/engine + +The reference docs have been left in docker/docker (this repo), which remains +the place to edit them. + +The docs in the general repo are open-source and we appreciate +your feedback and pull requests! diff --git a/vendor/github.com/moby/moby/docs/api/v1.18.md b/vendor/github.com/moby/moby/docs/api/v1.18.md new file mode 100644 index 0000000..d7aab29 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.18.md @@ -0,0 +1,2158 @@ +--- +title: "Engine API v1.18" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.18/ +- /reference/api/docker_remote_api_v1.18/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.18/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.18/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpuShares": 0, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.18/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.18/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.18/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.18/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.18/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.18/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.18/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.18/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /v1.18/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.18/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.18/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.18/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.18/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.18/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.18/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.18/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.18/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.18/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Debug": 0, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": 1, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": 1, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": 0, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.18/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.18" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.18/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.18/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.18/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.18/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.18/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + +This might change in the future. + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.19.md b/vendor/github.com/moby/moby/docs/api/v1.19.md new file mode 100644 index 0000000..c5d6290 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.19.md @@ -0,0 +1,2240 @@ +--- +title: "Engine API v1.19" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.19/ +- /reference/api/docker_remote_api_v1.19/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.19/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.19/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `syslog` available options are: `address`. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.19/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.19/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.19/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.19/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.19/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.19/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.19/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.19/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.19/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.19/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.19/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.19/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.19/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.19/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.19/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). This API +returns both `is_trusted` and `is_automated` images. Currently, they +are considered identical. In the future, the `is_trusted` property will +be deprecated and replaced by the `is_automated` property. + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.19/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.19/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.19/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.19/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.19" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.19/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.19/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.19/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.19/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.19/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.20.md b/vendor/github.com/moby/moby/docs/api/v1.20.md new file mode 100644 index 0000000..80868cc --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.20.md @@ -0,0 +1,2393 @@ +--- +title: "Engine API v1.20" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.20/ +- /reference/api/docker_remote_api_v1.20/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.20/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.20/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.20/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.20/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.20/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.20/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.20/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.20/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.20/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.20/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.20/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.20/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.20/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.20/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.20/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.20/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.20/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.20/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.20/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.20/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.20/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.20/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.20/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.20/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.20/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.20/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.20/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.20/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.21.md b/vendor/github.com/moby/moby/docs/api/v1.21.md new file mode 100644 index 0000000..f21c722 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.21.md @@ -0,0 +1,2971 @@ +--- +title: "Engine API v1.21" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.21/ +- /reference/api/docker_remote_api_v1.21/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.21/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.21/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "" + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Example request, with size information**: + + GET /v1.21/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.21/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.21/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.21/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.21/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.21/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.21/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.21/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.21/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.21/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.21/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.21/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.21/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.21/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.21/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.21/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.21/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.21/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.21/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.21/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.21/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.21/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.21/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.21/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.21/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.21/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.21/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"pull","id":"busybox:latest","time":1442421700,"timeNano":1442421700598988358} + {"status":"create","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716853979870} + {"status":"attach","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716894759198} + {"status":"start","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716983607193} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.21/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.21/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Status" : "running", + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "" + } + } + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.21/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.21/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.21/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.21/networks HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: `name=[network-names]` , `id=[network-ids]` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.21/networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.21/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + } + ] + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.22.md b/vendor/github.com/moby/moby/docs/api/v1.22.md new file mode 100644 index 0000000..6b3d6ec --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.22.md @@ -0,0 +1,3309 @@ +--- +title: "Engine API v1.22" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.22/ +- /reference/api/docker_remote_api_v1.22/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.22/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + } + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + } + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + } + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + } + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.22/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `splunk`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.22/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.22/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.22/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update resource configs of one or more containers. + +**Example request**: + + POST /v1.22/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800 + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.22/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.22/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.22/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.22/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.22/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.22/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.22/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.22/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.22/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.22/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.22/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.22/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.22/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.22/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.22/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.22/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.22/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.22/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.22/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.22/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.22/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.10.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.22", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.22/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.22/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.22/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.10.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.22/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.22/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.22/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.22/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.22/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.22/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.22/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.22/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.23.md b/vendor/github.com/moby/moby/docs/api/v1.23.md new file mode 100644 index 0000000..aaf4204 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.23.md @@ -0,0 +1,3426 @@ +--- +title: "Engine API v1.23" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.23/ +- /reference/api/docker_remote_api_v1.23/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.23/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "Exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.23/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.23/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.23/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.23/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.23/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.23/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.23/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.23/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.23/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.23/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.23/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.23/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.23/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.23/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.23/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.23/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.23/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.23/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.23/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.23/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.23/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.23/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.23/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.23/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.23/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.23/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.11.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.23", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.23/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.23/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.23/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.11.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.23/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.23/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.23/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.23/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.23/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.23/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.23/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.23/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.23/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.24.md b/vendor/github.com/moby/moby/docs/api/v1.24.md new file mode 100644 index 0000000..197bee2 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.24.md @@ -0,0 +1,5321 @@ +--- +title: "Engine API v1.24" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.24/ +- /reference/api/docker_remote_api_v1.24/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Errors + +The Engine API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + { + "message": "page not found" + } + +The status codes that are returned for each endpoint are specified in the endpoint documentation below. + +## 3. Endpoints + +### 3.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.24/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "Exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.24/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuPercent": 80, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Sysctls": { "net.ipv4.ip_forward": "1" }, + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "StorageOpt": {}, + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033", + "LinkLocalIPs":["169.254.34.68", "fe80::3468"] + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. This must be a valid RFC 1123 hostname. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuPercent** - An integer value containing the usable percentage of the available CPUs. (Windows daemon only) + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **IOMaximumBandwidth** - Maximum IO absolute rate in terms of IOps. + - **IOMaximumIOps** - Maximum IO absolute rate in terms of bytes per second. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **Sysctls** - A list of kernel parameters (sysctls) to set in the container, specified as + `{ : }`, for example: + `{ "net.ipv4.ip_forward": "1" }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **StorageOpt**: Storage driver options per container. Options can be passed in the form + `{"size":"120G"}` + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuPercent": 80, + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "Sysctls": { + "net.ipv4.ip_forward": "1" + }, + "StorageOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.24/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **details** - 1/True/true or 0/False/flase, Show extra details provided to logs. Default `false`. +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.24/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.24/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.24/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.24/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.24/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.24/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.24/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.24/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.24/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.24/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 3.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.24/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.24/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.24/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.24/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.24/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.24/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.24/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.24/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.24/images/test/tag?repo=myrepo&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.24/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.24/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search +- **limit** – maximum returned search results +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 3.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.24/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.24/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SecurityOptions": [ + "apparmor", + "seccomp", + "selinux" + ], + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.24/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.12.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.6.3", + "GitCommit": "deadbee", + "Arch": "amd64", + "ApiVersion": "1.24", + "BuildTime": "2016-06-14T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.24/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.24/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following event: + + reload + +**Example request**: + + GET /v1.24/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.12.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` or `daemon` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + - `daemon=`; -- daemon name or id to filter + +**Status codes**: + +- **200** – no error +- **400** - bad parameter +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.24/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.24/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 3.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.24/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": null, + "Scope": "local" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all volumes that are "dangling" (not in use by a container). When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. + - `driver=` Matches all or part of a volume driver name. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.24/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Driver": "custom" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +**JSON fields in response**: + +Refer to the [inspect a volume](#inspect-a-volume) section or details about the +JSON fields returned in the response. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.24/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +**JSON fields in response**: + +The following fields can be returned in the API response. Empty fields, or +fields that are not supported by the volume's driver may be omitted in the +response. + +- **Name** - Name of the volume. +- **Driver** - Name of the volume driver used by the volume. +- **Mountpoint** - Mount path of the volume on the host. +- **Status** - Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. + The `Status` field is optional, and is omitted if the volume driver does not + support this feature. +- **Labels** - Labels set on the volume, specified as a map: `{"key":"value","key2":"value2"}`. +- **Scope** - Scope describes the level at which the volume exists, can be one of + `global` for cluster-wide or `local` for machine level. The default is `local`. + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.24/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.24/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network id. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.24/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.24/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **403** - operation not supported for pre-defined networks +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such network +- **500** - server error + +### 3.6 Plugins (experimental) + +#### List plugins + +`GET /plugins` + +Returns information about installed plugins. + +**Example request**: + + GET /v1.24/plugins HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } + } +] +``` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Install a plugin + +`POST /plugins/pull?name=` + +Pulls and installs a plugin. After the plugin is installed, it can be enabled +using the [`POST /plugins/(plugin name)/enable` endpoint](#enable-a-plugin). + +**Example request**: + +``` +POST /v1.24/plugins/pull?name=tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. When using +this endpoint to pull a plugin from the registry, the `X-Registry-Auth` header +can be used to include a base64-encoded AuthConfig object. Refer to the [create +an image](#create-an-image) section for more details. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 175 + +[ + { + "Name": "network", + "Description": "", + "Value": [ + "host" + ] + }, + { + "Name": "mount", + "Description": "", + "Value": [ + "/data" + ] + }, + { + "Name": "device", + "Description": "", + "Value": [ + "/dev/cpu_dma_latency" + ] + } +] +``` + +**Query parameters**: + +- **name** - Name of the plugin to pull. The name may include a tag or digest. + This parameter is required. + +**Status codes**: + +- **200** - no error +- **500** - error parsing reference / not a valid repository/tag: repository + name must have at least one component +- **500** - plugin already exists + +#### Inspect a plugin + +`GET /plugins/(plugin name)` + +Returns detailed information about an installed plugin. + +**Example request**: + +``` +GET /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": false, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed + +#### Enable a plugin + +`POST /plugins/(plugin name)/enable` + +Enables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/enable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already enabled + +#### Disable a plugin + +`POST /plugins/(plugin name)/disable` + +Disables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/disable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already disabled + +#### Remove a plugin + +`DELETE /plugins/(plugin name)` + +Removes a plugin + +**Example request**: + +``` +DELETE /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is active + + + +### 3.7 Nodes + +**Note**: Node operations require the engine to be part of a swarm. + +#### List nodes + + +`GET /nodes` + +List nodes + +**Example request**: + + GET /v1.24/nodes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + nodes list. Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a node + + +`GET /nodes/(id or name)` + +Return low-level information on the node `id` + +**Example request**: + + GET /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Remove a node + + +`DELETE /nodes/(id or name)` + +Remove a node from the swarm. + +**Example request**: + + DELETE /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - 1/True/true or 0/False/false, Force remove a node from the swarm. + Default `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Update a node + + +`POST /nodes/(id)/update` + +Update a node. + +The payload of the `POST` request is the new `NodeSpec` and +overrides the current `NodeSpec` for the specified node. + +If `Availability` or `Role` are omitted, this returns an +error. Any other field omitted resets the current value to either +an empty value or the default cluster-wide value. + +**Example Request** + + POST /v1.24/nodes/24ifsmvkjbyhk/update?version=8 HTTP/1.1 + Content-Type: application/json + + { + "Availability": "active", + "Name": "node-name", + "Role": "manager", + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the node object being updated. This is + required to avoid conflicting writes. + +JSON Parameters: + +- **Annotations** – Optional medata to associate with the node. + - **Name** – User-defined name for the node. + - **Labels** – A map of labels to associate with the node (e.g., + `{"key":"value", "key2":"value2"}`). +- **Role** - Role of the node (worker/manager). +- **Availability** - Availability of the node (active/pause/drain). + + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +### 3.8 Swarm + +#### Inspect swarm + + +`GET /swarm` + +Inspect swarm + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CreatedAt" : "2016-08-15T16:00:20.349727406Z", + "Spec" : { + "Dispatcher" : { + "HeartbeatPeriod" : 5000000000 + }, + "Orchestration" : { + "TaskHistoryRetentionLimit" : 10 + }, + "CAConfig" : { + "NodeCertExpiry" : 7776000000000000 + }, + "Raft" : { + "LogEntriesForSlowFollowers" : 500, + "HeartbeatTick" : 1, + "SnapshotInterval" : 10000, + "ElectionTick" : 3 + }, + "TaskDefaults" : {}, + "Name" : "default" + }, + "JoinTokens" : { + "Worker" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a", + "Manager" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + }, + "ID" : "70ilmkj2f6sp2137c753w2nmt", + "UpdatedAt" : "2016-08-15T16:32:09.623207604Z", + "Version" : { + "Index" : 51 + } + } + +**Status codes**: + +- **200** - no error +- **406** – node is not part of a swarm +- **500** - sever error + +#### Initialize a new swarm + + +`POST /swarm/init` + +Initialize a new swarm. The body of the HTTP response includes the node ID. + +**Example request**: + + POST /v1.24/swarm/init HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "ForceNewCluster": false, + "Spec": { + "Orchestration": {}, + "Raft": {}, + "Dispatcher": {}, + "CAConfig": {} + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 28 + Content-Type: application/json + Date: Thu, 01 Sep 2016 21:49:13 GMT + Server: Docker/1.12.0 (linux) + + "7v2t30z9blmxuhnyo6s4cpenp" + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication, as well as determining + the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an + address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is + used. +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **ForceNewCluster** – Force creation of a new swarm. +- **Spec** – Configuration settings for the new swarm. + - **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. + - **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. + - **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. + - **CAConfig** – Certificate authority configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. + +#### Join an existing swarm + +`POST /swarm/join` + +Join an existing swarm + +**Example request**: + + POST /v1.24/swarm/join HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "RemoteAddrs": ["node1:2377"], + "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to + manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **RemoteAddr** – Address of any manager node already participating in the swarm. +- **JoinToken** – Secret token for joining this swarm. + +#### Leave a swarm + + +`POST /swarm/leave` + +Leave a swarm + +**Example request**: + + POST /v1.24/swarm/leave HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - Boolean (0/1, false/true). Force leave swarm, even if this is the last manager or that it will break the cluster. + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** - server error + +#### Update a swarm + + +`POST /swarm/update` + +Update a swarm + +**Example request**: + + POST /v1.24/swarm/update HTTP/1.1 + + { + "Name": "default", + "Orchestration": { + "TaskHistoryRetentionLimit": 10 + }, + "Raft": { + "SnapshotInterval": 10000, + "LogEntriesForSlowFollowers": 500, + "HeartbeatTick": 1, + "ElectionTick": 3 + }, + "Dispatcher": { + "HeartbeatPeriod": 5000000000 + }, + "CAConfig": { + "NodeCertExpiry": 7776000000000000 + }, + "JoinTokens": { + "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", + "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + } + + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the swarm object being updated. This is + required to avoid conflicting writes. +- **rotateWorkerToken** - Set to `true` (or `1`) to rotate the worker join token. +- **rotateManagerToken** - Set to `true` (or `1`) to rotate the manager join token. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is not part of a swarm +- **500** - server error + +JSON Parameters: + +- **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. +- **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. +- **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. +- **CAConfig** – CA configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. +- **JoinTokens** - Tokens that can be used by other nodes to join the swarm. + - **Worker** - Token to use for joining as a worker. + - **Manager** - Token to use for joining as a manager. + +### 3.9 Services + +**Note**: Service operations require to first be part of a swarm. + +#### List services + + +`GET /services` + +List services + +**Example request**: + + GET /v1.24/services HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Version": { + "Index": 19 + }, + "CreatedAt": "2016-06-07T21:05:51.880065305Z", + "UpdatedAt": "2016-06-07T21:07:29.962229872Z", + "Spec": { + "Name": "hopeful_cori", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + } + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.2/16" + }, + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.3/16" + } + ] + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `label=` + - `name=` + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** – server error + +#### Create a service + +`POST /services/create` + +Create a service. When using this endpoint to create a service using a private +repository from the registry, the `X-Registry-Auth` header must be used to +include a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "web", + "TaskTemplate": { + "ContainerSpec": { + "Image": "nginx:alpine", + "Mounts": [ + { + "ReadOnly": true, + "Source": "web-data", + "Target": "/usr/share/nginx/html", + "Type": "volume", + "VolumeOptions": { + "DriverConfig": { + }, + "Labels": { + "com.example.something": "something-value" + } + } + } + ], + "User": "33" + }, + "Networks": [ + { + "Target": "overlay1" + } + ], + "LogDriver": { + "Name": "json-file", + "Options": { + "max-file": "3", + "max-size": "10M" + } + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + }, + "Resources": { + "Limits": { + "MemoryBytes": 104857600 + }, + "Reservations": { + } + }, + "RestartPolicy": { + "Condition": "on-failure", + "Delay": 10000000000, + "MaxAttempts": 10 + } + }, + "Mode": { + "Replicated": { + "Replicas": 4 + } + }, + "UpdateConfig": { + "Delay": 30000000000, + "Parallelism": 2, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Ports": [ + { + "Protocol": "tcp", + "PublishedPort": 8080, + "TargetPort": 80 + } + ] + }, + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "ID":"ak7w3gjqoa3kuz8xcpnyy0pvl" + } + +**Status codes**: + +- **201** – no error +- **403** - network is not eligible for services +- **406** – node is not part of a swarm +- **409** – name conflicts with an existing object +- **500** - server error + +**JSON Parameters**: + +- **Name** – User-defined name for the service. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers + created as part of the service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type. + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume. + - **Options** - key/value map of driver specific options. + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **LogDriver** - Log configuration for containers created as part of the + service. + - **Name** - Name of the logging driver to use (`json-file`, `syslog`, + `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`). + - **Options** - Driver-specific options. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **NanoCPUs** – CPU limit in units of 10-9 CPU shares. + - **MemoryBytes** – Memory limit in Bytes. + - **Reservation** – Define resources reservation. + - **NanoCPUs** – CPU reservation in units of 10-9 CPU shares. + - **MemoryBytes** – Memory reservation in Bytes. + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. + - **FailureAction** - Action to take if an updated task fails to run, or stops running during the + update. Values are `continue` and `pause`. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + + +#### Remove a service + + +`DELETE /services/(id or name)` + +Stop and remove the service `id` + +**Example request**: + + DELETE /v1.24/services/16253994b7c4 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect one or more services + + +`GET /services/(id or name)` + +Return information on the service `id`. + +**Example request**: + + GET /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha HTTP/1.1 + +**Example response**: + + { + "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", + "Version": { + "Index": 95 + }, + "CreatedAt": "2016-06-07T21:10:20.269723157Z", + "UpdatedAt": "2016-06-07T21:10:20.276301259Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.4/16" + } + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Update a service + +`POST /services/(id)/update` + +Update a service. When using this endpoint to create a service using a +private repository from the registry, the `X-Registry-Auth` header can be used +to update the authentication information for that is stored for the service. +The header contains a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha/update?version=23 HTTP/1.1 + Content-Type: application/json + + { + "Name": "top", + "TaskTemplate": { + "ContainerSpec": { + "Image": "busybox", + "Args": [ + "top" + ] + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1 + }, + "EndpointSpec": { + "Mode": "vip" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**JSON Parameters**: + +- **Name** – User-defined name for the service. Note that renaming services is not supported. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers created as part of the new + service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume + - **Options** - key/value map of driver specific options + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **CPU** – CPU limit + - **Memory** – Memory limit + - **Reservation** – Define resources reservation. + - **CPU** – CPU reservation + - **Memory** – Memory reservation + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Query parameters**: + +- **version** – The version number of the service object being updated. This is + required to avoid conflicting writes. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +### 3.10 Tasks + +**Note**: Task operations require the engine to be part of a swarm. + +#### List tasks + + +`GET /tasks` + +List tasks + +**Example request**: + + GET /v1.24/tasks HTTP/1.1 + +**Example response**: + + [ + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ], + }, + { + "ID": "1yljwbmlr8er2waf8orvqpwms", + "Version": { + "Index": 30 + }, + "CreatedAt": "2016-06-07T21:07:30.019104782Z", + "UpdatedAt": "2016-06-07T21:07:30.231958098Z", + "Name": "hopeful_cori", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:30.202183143Z", + "State": "shutdown", + "Message": "shutdown", + "ContainerStatus": { + "ContainerID": "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + } + }, + "DesiredState": "shutdown", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.5/16" + ] + } + ] + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a task + + +`GET /tasks/(id)` + +Get details on the task `id` + +**Example request**: + + GET /v1.24/tasks/0kzzo1i0y4jz6027t0k7aezc7 HTTP/1.1 + +**Example response**: + + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – unknown task +- **406** - node is not part of a swarm +- **500** – server error + +## 4. Going further + +### 4.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 4.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 4.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/version-history.md b/vendor/github.com/moby/moby/docs/api/version-history.md new file mode 100644 index 0000000..5f3a006 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/version-history.md @@ -0,0 +1,256 @@ +--- +title: "Engine API version history" +description: "Documentation of changes that have been made to Engine API." +keywords: "API, Docker, rcli, REST, documentation" +--- + + + +## v1.27 API changes + +[Docker Engine API v1.27](https://docs.docker.com/engine/api/v1.27/) documentation + +* `GET /containers/(id or name)/stats` now includes an `online_cpus` field in both `precpu_stats` and `cpu_stats`. If this field is `nil` then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. + +## v1.26 API changes + +[Docker Engine API v1.26](https://docs.docker.com/engine/api/v1.26/) documentation + +* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. + +## v1.25 API changes + +[Docker Engine API v1.25](https://docs.docker.com/engine/api/v1.25/) documentation + +* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. +* `GET /version` now returns `MinAPIVersion`. +* `POST /build` accepts `networkmode` parameter to specify network used during build. +* `GET /images/(name)/json` now returns `OsVersion` if populated +* `GET /info` now returns `Isolation`. +* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. +* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. +* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. +* `GET /containers/json` now supports filtering containers by `health` status. +* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. +* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. +* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). +* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. +* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. +* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. +* `GET /networks/` endpoint now correctly returns a list of *all* networks, + instead of the default network if a trailing slash is provided, but no `name` + or `id`. +* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. +* `GET /containers/json` now supports a `is-task` filter to filter + containers that are tasks (part of a service in swarm mode). +* `POST /containers/create` now takes `StopTimeout` field. +* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. +* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. +* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. +* `GET /networks/(name)` now returns field `Created` in response to show network created time. +* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. +* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. +* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. +* `POST /containers/prune` prunes stopped containers. +* `POST /images/prune` prunes unused images. +* `POST /volumes/prune` prunes unused volumes. +* `POST /networks/prune` prunes unused networks. +* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). +* Every API response now includes a `API-Version` header specifying the default API version of the server. +* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. +* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. +* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. +* The `HostConfig` field now includes `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. +* `GET /info` now returns more structured information about security options. +* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. +* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. +* `GET /plugins` list plugins. +* `POST /plugins/pull?name=` pulls a plugin. +* `GET /plugins/(plugin name)` inspect a plugin. +* `POST /plugins/(plugin name)/set` configure a plugin. +* `POST /plugins/(plugin name)/enable` enable a plugin. +* `POST /plugins/(plugin name)/disable` disable a plugin. +* `POST /plugins/(plugin name)/push` push a plugin. +* `POST /plugins/create?name=(plugin name)` create a plugin. +* `DELETE /plugins/(plugin name)` delete a plugin. +* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. +* `GET /images/json` now support a `reference` filter. +* `GET /secrets` returns information on the secrets. +* `POST /secrets/create` creates a secret. +* `DELETE /secrets/{id}` removes the secret `id`. +* `GET /secrets/{id}` returns information on the secret `id`. +* `POST /secrets/{id}/update` updates the secret `id`. +* `POST /services/(id or name)/update` now accepts service name or prefix of service id as a parameter. + +## v1.24 API changes + +[Docker Engine API v1.24](v1.24.md) documentation + +* `POST /containers/create` now takes `StorageOpt` field. +* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. +* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration + with ContainerD in Docker 1.11. +* `GET /networks` now supports filtering by `label` and `driver`. +* `GET /containers/json` now supports filtering containers by `network` name or id. +* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. +* `POST /containers/create` now returns an HTTP 400 "bad parameter" message + if no command is specified (instead of an HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. +* `GET /events` now supports a `detach` event that is emitted on detaching from container process. +* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. +* `GET /images/json` now supports filters `since` and `before`. +* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. +* `POST /images/(name)/tag` no longer has a `force` query parameter. +* `GET /images/search` now supports maximum returned search results `limit`. +* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. +* API errors are now returned as JSON instead of plain text. +* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. +* `POST /containers//exec` and `POST /exec//start` + no longer expects a "Container" field to be present. This property was not used + and is no longer sent by the docker client. +* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). +* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:`, + to have the container join the PID namespace of an existing container. + +## v1.23 API changes + +[Docker Engine API v1.23](v1.23.md) documentation + +* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. +* `GET /containers/json` returns the mount points for the container. +* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. +* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. +* `POST /containers/(name)/update` now supports updating container's restart policy. +* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). +* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. +* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. +* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. +* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. +* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. +* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. +* `POST /auth` now returns an `IdentityToken` when supported by a registry. +* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. +* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. +* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. +* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. + +## v1.22 API changes + +[Docker Engine API v1.22](v1.22.md) documentation + +* `POST /container/(name)/update` updates the resources of a container. +* `GET /containers/json` supports filter `isolation` on Windows. +* `GET /containers/json` now returns the list of networks of containers. +* `GET /info` Now returns `Architecture` and `OSType` fields, providing information + about the host architecture and operating system type that the daemon runs on. +* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. +* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it + consistent with other date/time values returned by the API. +* `AuthConfig` now supports a `registrytoken` for token based authentication +* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` +* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` + will be cancelled if the HTTP connection making the API request is closed before + the push or pull completes. +* `POST /containers/create` now allows you to set a read/write rate limit for a + device (in bytes per second or IO per second). +* `GET /networks` now supports filtering by `name`, `id` and `type`. +* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `GET /info` now includes the number of containers running, stopped, and paused. +* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. +* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network +* `GET /containers/(id)/json` now returns the `NetworkID` of containers. +* `POST /networks/create` Now supports an options field in the IPAM config that provides options + for custom IPAM plugins. +* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any + are available. +* `GET /networks/` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. + +## v1.21 API changes + +[Docker Engine API v1.21](v1.21.md) documentation + +* `GET /volumes` lists volumes from all volume drivers. +* `POST /volumes/create` to create a volume. +* `GET /volumes/(name)` get low-level information about a volume. +* `DELETE /volumes/(name)` remove a volume with the specified name. +* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. +* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. +* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. +* `GET /containers/(id)/stats` will return networking information respectively for each interface. +* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. +* `POST /build` now optionally takes a serialized map of build-time variables. +* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. +* `GET /events` now supports filtering by image and container labels. +* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. +* `GET /containers/json` will return `ImageID` of the image used by container. +* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. +* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. +* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. +* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, + detailing network settings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, + detailing networksettings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the + badness heuristic. This heuristic selects which processes the OOM killer kills + under out-of-memory conditions. + +## v1.20 API changes + +[Docker Engine API v1.20](v1.20.md) documentation + +* `GET /containers/(id)/archive` get an archive of filesystem content from a container. +* `PUT /containers/(id)/archive` upload an archive of content to be extracted to +an existing directory inside a container's filesystem. +* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` +endpoint which can be used to download files and directories from a container. +* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a +list of additional groups that the container process will run as. + +## v1.19 API changes + +[Docker Engine API v1.19](v1.19.md) documentation + +* When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. +* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. +* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. +* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and +`SwapLimit` are now returned as boolean instead of as an int. In addition, the +end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and +`OomKillDisable`. +* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` +* `POST /build` accepts `cpuperiod` and `cpuquota` options + +## v1.18 API changes + +[Docker Engine API v1.18](v1.18.md) documentation + +* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. +* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. +* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. +* `GET /images/json` added a `RepoDigests` field to include image digest information. +* `POST /build` can now set resource constraints for all containers created for the build. +* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. +* `POST /build` closing the HTTP request cancels the build +* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/vendor/github.com/moby/moby/docs/deprecated.md b/vendor/github.com/moby/moby/docs/deprecated.md new file mode 100644 index 0000000..3ac563b --- /dev/null +++ b/vendor/github.com/moby/moby/docs/deprecated.md @@ -0,0 +1,290 @@ +--- +aliases: ["/engine/misc/deprecated/"] +title: "Deprecated Engine Features" +description: "Deprecated Features." +keywords: "docker, documentation, about, technology, deprecate" +--- + + + +# Deprecated Engine Features + +The following list of features are deprecated in Engine. +To learn more about Docker Engine's deprecation policy, +see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy). + + +### Top-level network properties in NetworkSettings + +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +When inspecting a container, `NetworkSettings` contains top-level information +about the default ("bridge") network; + +`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, +`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`. + +These properties are deprecated in favor of per-network properties in +`NetworkSettings.Networks`. These properties were already "deprecated" in +docker 1.9, but kept around for backward compatibility. + +Refer to [#17538](https://github.com/docker/docker/pull/17538) for further +information. + +### `filter` param for `/images/json` endpoint +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. + +### `repository:shortid` image references +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references. + +### `docker daemon` subcommand +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The daemon is moved to a separate binary (`dockerd`), and should be used instead. + +### Duplicate keys with conflicting values in engine labels +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +Duplicate keys with conflicting values have been deprecated. A warning is displayed +in the output, and an error will be returned in the future. + +### `MAINTAINER` in Dockerfile +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +`MAINTAINER` was an early very limited form of `LABEL` which should be used instead. + +### API calls without a version +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +API versions should be supplied to all API calls to ensure compatibility with +future Engine versions. Instead of just requesting, for example, the URL +`/containers/json`, you must now request `/v1.25/containers/json`. + +### Backing filesystem without `d_type` support for overlay/overlay2 +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The overlay and overlay2 storage driver does not work as expected if the backing +filesystem does not support `d_type`. For example, XFS does not support `d_type` +if it is formatted with the `ftype=0` option. + +Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for +further information. + +### Three arguments form in `docker import` +**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. + +### `-h` shorthand for `--help` + +**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v17.09** + +The shorthand (`-h`) is less common than `--help` on Linux and cannot be used +on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on +`docker create`). For this reason, the `-h` shorthand was not printed in the +"usage" output of subcommands, nor documented, and is now marked "deprecated". + +### `-e` and `--email` flags on `docker login` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v17.06** + +The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. + +### Separator (`:`) of `--security-opt` flag on `docker run` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v17.06** + +The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`. + +### `/containers/(id or name)/copy` endpoint + +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. + +### Ambiguous event fields in API +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. +See the events API documentation for the new format. + +### `-f` flag on `docker tag` +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. + +### HostConfig at API container start +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of +defining it at container creation (`POST /containers/create`). + +### `--before` and `--since` flags on `docker ps` + +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker ps --before` and `docker ps --since` options are deprecated. +Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. + +### `--automated` and `--stars` flags on `docker search` + +**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v17.09** + +The `docker search --automated` and `docker search --stars` options are deprecated. +Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. + +### Driver Specific Log Tags +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Log tags are now generated in a standard way across different logging drivers. +Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and +`fluentd-tag` have been deprecated in favor of the generic `tag` option. + +```bash +{% raw %} +docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" +{% endraw %} +``` + +### LXC built-in exec driver +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. + +### Old Command Line Options +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: + + docker daemon -H ... + +The following single-dash (`-opt`) variant of certain command line options +are deprecated and replaced with double-dash options (`--opt`): + + docker attach -nostdin + docker attach -sig-proxy + docker build -no-cache + docker build -rm + docker commit -author + docker commit -run + docker events -since + docker history -notrunc + docker images -notrunc + docker inspect -format + docker ps -beforeId + docker ps -notrunc + docker ps -sinceId + docker rm -link + docker run -cidfile + docker run -dns + docker run -entrypoint + docker run -expose + docker run -link + docker run -lxc-conf + docker run -n + docker run -privileged + docker run -volumes-from + docker search -notrunc + docker search -stars + docker search -t + docker search -trusted + docker tag -force + +The following double-dash options are deprecated and have no replacement: + + docker run --cpuset + docker run --networking + docker ps --since-id + docker ps --before-id + docker search --trusted + +**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The single-dash (`-help`) was removed, in favor of the double-dash `--help` + + docker -help + docker [COMMAND] -help + +### `--run` flag on docker commit + +**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** + +**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor +of the `--changes` flag that allows to pass `Dockerfile` commands. + + +### Interacting with V1 registries + +**Disabled By Default In Release: v17.04** + +**Target For Removal In Release: v17.10** + +Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the +docker daemon from `pull`, `push`, and `login` operations against v1 +registries. Though enabled by default, this signals the intent to deprecate +the v1 protocol. + +Support for the v1 protocol to the public registry was removed in 1.13. Any +mirror configurations using v1 should be updated to use a +[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/). + +### Docker Content Trust ENV passphrase variables name change +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables + +- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE +- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE diff --git a/vendor/github.com/moby/moby/docs/extend/EBS_volume.md b/vendor/github.com/moby/moby/docs/extend/EBS_volume.md new file mode 100644 index 0000000..8c64efa --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/EBS_volume.md @@ -0,0 +1,164 @@ +--- +description: Volume plugin for Amazon EBS +keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume" +title: Volume plugin for Amazon EBS +--- + + + +# A proof-of-concept Rexray plugin + +In this example, a simple Rexray plugin will be created for the purposes of using +it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin. + +The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin). + +To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray) + +## 1. Make a Docker image + +The following is the Dockerfile used to containerize rexray. + +```Dockerfile +FROM debian:jessie +RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates +RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz +RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes +ENTRYPOINT ["rexray"] +CMD ["--help"] +``` + +To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image` +will reference the containerized rexray image. + +## 2. Extract rootfs + +```sh +$ TMPDIR=/tmp/rexray # for the purpose of this example +$ # create container without running it, to extract the rootfs from image +$ docker create --name rexray "$image" +$ # save the rootfs to a tar archive +$ docker export -o $TMPDIR/rexray.tar rexray +$ # extract rootfs from tar archive to a rootfs folder +$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar ) +``` + +## 3. Add plugin configuration + +We have to put the following JSON to `$TMPDIR/config.json`: + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A proof-of-concept EBS plugin (using rexray) for Docker", + "Documentation": "https://github.com/tiborvass/rexray-plugin", + "Entrypoint": [ + "/usr/bin/rexray", "service", "start", "-f" + ], + "Env": [ + { + "Description": "", + "Name": "REXRAY_SERVICE", + "Settable": [ + "value" + ], + "Value": "ebs" + }, + { + "Description": "", + "Name": "EBS_ACCESSKEY", + "Settable": [ + "value" + ], + "Value": "" + }, + { + "Description": "", + "Name": "EBS_SECRETKEY", + "Settable": [ + "value" + ], + "Value": "" + } + ], + "Interface": { + "Socket": "rexray.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "AllowAllDevices": true, + "Capabilities": ["CAP_SYS_ADMIN"], + "Devices": null + }, + "Mounts": [ + { + "Source": "/dev", + "Destination": "/dev", + "Type": "bind", + "Options": ["rbind"] + } + ], + "Network": { + "Type": "host" + }, + "PropagatedMount": "/var/lib/libstorage/volumes", + "User": {}, + "WorkDir": "" +} +``` + +Please note a couple of points: +- `PropagatedMount` is needed so that the docker daemon can see mounts done by the +rexray plugin from within the container, otherwise the docker daemon is not able +to mount a docker volume. +- The rexray plugin needs dynamic access to host devices. For that reason, we +have to give it access to all devices under `/dev` and set `AllowAllDevices` to +true for proper access. +- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`, +`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this +plugin. Ideally other rexray parameters could also be set. + +## 4. Create plugin + +`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin. + +```sh +$ docker plugin ls +ID NAME DESCRIPTION ENABLED +2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false +``` + +## 5. Test plugin + +```sh +$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY` +$ docker plugin enable tiborvass/rexray-plugin +$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume +$ docker volume ls +DRIVER VOLUME NAME +tiborvass/rexray-plugin:latest my-ebs-volume +$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi' +$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi +bye +``` + +## 6. Push plugin + +First, ensure you are logged in with `docker login`. Then you can run: +`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker +image to a registry, to make it available for others to install via +`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`. diff --git a/vendor/github.com/moby/moby/docs/extend/config.md b/vendor/github.com/moby/moby/docs/extend/config.md new file mode 100644 index 0000000..096d2d0 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/config.md @@ -0,0 +1,225 @@ +--- +title: "Plugin config" +description: "How develop and use a plugin with the managed plugin system" +keywords: "API, Usage, plugins, documentation, developer" +--- + + + + +# Plugin Config Version 1 of Plugin V2 + +This document outlines the format of the V0 plugin configuration. The plugin +config described herein was introduced in the Docker daemon in the [v1.12.0 +release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b). + +Plugin configs describe the various constituents of a docker plugin. Plugin +configs can be serialized to JSON format with the following media types: + +Config Type | Media Type +------------- | ------------- +config | "application/vnd.docker.plugin.v1+json" + + +## *Config* Field Descriptions + +Config provides the base accessible fields for working with V0 plugin format + in the registry. + +- **`description`** *string* + + description of the plugin + +- **`documentation`** *string* + + link to the documentation about the plugin + +- **`interface`** *PluginInterface* + + interface implemented by the plugins, struct consisting of the following fields + + - **`types`** *string array* + + types indicate what interface(s) the plugin currently implements. + + currently supported: + + - **docker.volumedriver/1.0** + + - **docker.authz/1.0** + + - **`socket`** *string* + + socket is the name of the socket the engine should use to communicate with the plugins. + the socket will be created in `/run/docker/plugins`. + + +- **`entrypoint`** *string array* + + entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint) + +- **`workdir`** *string* + + workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir) + +- **`network`** *PluginNetwork* + + network of the plugin, struct consisting of the following fields + + - **`type`** *string* + + network type. + + currently supported: + + - **bridge** + - **host** + - **none** + +- **`mounts`** *PluginMount array* + + mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) + + - **`name`** *string* + + name of the mount. + + - **`description`** *string* + + description of the mount. + + - **`source`** *string* + + source of the mount. + + - **`destination`** *string* + + destination of the mount. + + - **`type`** *string* + + mount type. + + - **`options`** *string array* + + options of the mount. + +- **`propagatedMount`** *string* + + path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. + This path will be bind-mounted outisde of the plugin rootfs so it's contents + are preserved on upgrade. + +- **`env`** *PluginEnv array* + + env of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the env. + + - **`description`** *string* + + description of the env. + + - **`value`** *string* + + value of the env. + +- **`args`** *PluginArgs* + + args of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the args. + + - **`description`** *string* + + description of the args. + + - **`value`** *string array* + + values of the args. + +- **`linux`** *PluginLinux* + + - **`capabilities`** *string array* + + capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) + + - **`allowAllDevices`** *boolean* + + If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. + + - **`devices`** *PluginDevice array* + + device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) + + - **`name`** *string* + + name of the device. + + - **`description`** *string* + + description of the device. + + - **`path`** *string* + + path of the device. + +## Example Config + +*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A sample volume plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Entrypoint": [ + "/usr/bin/sample-volume-plugin", + "/data" + ], + "Env": [ + { + "Description": "", + "Name": "DEBUG", + "Settable": [ + "value" + ], + "Value": "0" + } + ], + "Interface": { + "Socket": "plugin.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "Capabilities": null, + "AllowAllDevices": false, + "Devices": null + }, + "Mounts": null, + "Network": { + "Type": "" + }, + "PropagatedMount": "/data", + "User": {}, + "Workdir": "" +} +``` diff --git a/vendor/github.com/moby/moby/docs/extend/images/authz_additional_info.png b/vendor/github.com/moby/moby/docs/extend/images/authz_additional_info.png new file mode 100644 index 0000000000000000000000000000000000000000..1a6a6d01d2048fcb975b7d2b025cdfabd4c4f5f3 GIT binary patch literal 45916 zcmd43bx@UE)He)x5RM?JfJle5lt_rQlp>N6($cM@(x9NyQUU^sbT>#NA)(SC4bm-w zNPg=w?&o>uo%!bb^P4&MJ;U}~wXeO`TEAH5*`3>RXU|Zc!N9;cs~|6?8mEj#}{hMznYINBu?&oe6G$A5M8Mr ztp2%?yOZk~NktGSO(E}t`um}Kj_Q>uvLRE#Vv_j#d5(dNzJ`Sk$N2q`=ttiPm#IZt z{P`34KQ{W{zd!!}_0JMA*zHq|{>*G_?Q1M*>9Dh#VM&LRcFg&rSx zqV8j@3jHsWhM6dpm8o)fUw9v_7<@n2sL#1iD7KdpOnpr=U%!@JyZC-ld^Fc=n`(Vf zM)_vzcntLu*&9zJ`cVqiE^F_shw^Vnd;OY=O}q7q&(Hq!q}M^-d*1oauSeN#GtfA# zk7;ij$t%$jUG;sZpjTY`IUxJC_u*EcN>L!KmzLE~zAaVT!R<_nr_a?GDsE&_@Fi0) zP6ZiKT(Jn3mgzU2sByQuS+|h%I9TZ5=Z4@6OjFa=YRMOo=jYgFK8Dl#&*oRJKRBG@ z_I8}AvEDoSIXO@!_2i5&pJ~T8x6MWzY6I`1vQ&e5FZPXG?Gj3b_pTduQ9oglMuoy& zCkj8zwnkoMpS!nhmFGdZ$xQRJ(B*-gYj@1vJ1tQaX2DAceSOW9(*3ISU{|5!cH?dv zt+%?PzS}hR?0d05y!T=k_|2RWuepBSI^6zp?fH3*`3A?ij%ezK^Ig0h-Pc%@lbn96 zj`T%{dwa>naB5Q#iXRr5w1rX=e8P0zWwDv48NsZ6LB_Q#pDb3(sGKAk`rdg(gp5-w zoc&h>qa4-vd*_GWI%B^?2tPE_xU{U2DtlIg`{84{qUgQ#VN{3O#X6@YRZ|m~C5;oz z&gl<<*Df2><)&uZ(j6Yn&FAaieo{^4_T0Wj_Aoi_AbG-jDJ?cpaXVJIGlomUYtLZ3 z(8xd4X{mSFTTob7doH8%1DPk$TFmvwE<2JshbOe6Zrwd;H_7GJ$Ev7d(LNOY{*f`% z`*=I~_U=>zZNAGodH(rc8>fqld3JaPHqKth<O{E%xZw@1v&Si_61$gmMUD=>Y9uD`AKSVZ);YO=~aOVtu2P{+n;3-nzPj= zq^!!r-LRq5JxbRW?`9|`52jQ4bsHROR#xT2suU(rD{t+Dw9#JN`J^&**M3L3o1ONk zTgD);yZhJ9VvqEIL@bVD@X41emc5x_p?8b#E6Dj`VXKRb5&m!w+Q%cmOmXgT&T7cZ zNWHl&&e>YFZ^qct{jSbU-k)lV* zTNPn%C^&YXL{>-d4t5*X^Y{~RrhF+$*>*De@*=JBezPuz6q>?|e%P3k1dY-sr|Py^FNW@{3|+gd_x@e} z!J}b1>N=H074qPY#Tp0x?nch=&9Fk3H65ID0(Di(d&jsWN6G;>XZP$z%RlkjjR`Rl z2ESk2{L!a;alv{ZdrqswEQGy8JxjIK{o!M+kd08K6j68qcEbTZ5%ouF_MvQQ=|Wb6 zxt59cJ#U}n$J5In&e=ansD5+-yLx2noWZ2qVUgodzFz3hN$<(b6*cASb9>)AxzE0_ ziw^U$cx$tX>V&N)a=aDhok?jyQ#tWYfs{G!OQP^><-vOO*^6h$FRu>QKCYN;4Y^2g zNK1cN3^p(y>!OFpj$HLFUMfR6wSn#Qi@bsIUVMLBJhk(|dIn>kJkO<;zhqI?Wc*4& zJhWzeT)+L3#)e|btZms-c6)1}?M~GB1E;#i2Nho@%;t|puZ_{+cV=axNydA|Y*z+y z78*j()BRa$trh76)FMKX-Gq*~q(>oWhX|JDhKbt8A$#>ly9x1pjRM=Jx;t>6o!2Fx z5;8pGf7565aesejr6Xyh{$v8P(U@SiElhC6`+Gaf{7nLHnyW<4B4bq#km9;|J0cNy}-r55^xow;zzj zB^tk6P=EYhjaTzm9BuqY?aw4NeIcI7;7>IWvyRQ6T#jTPRI$fAio%&1dy5V6QJO8T~s7rs(4oNw&7KK${cDNFc4aJ^?&w&$A} zyRzC(u>4)C=$OU%YA)UgN&aZl6f&wI>PDEp@kOIUS;FE6x^? z-S~TE@uMFv@6~x|6VQleex4pjn|8)%9PFvRce0GGcVzIP<|v9pD1^HXtjq1S1ybu8n#%bC+FPfSngyMl&03W^N?PvBi+_( z5*&97vHIivoQ^cO&)c5N(ogZfckzFrU^Z09wcc!@-GaCu@0uBURMexU-S{0-&HjyQ znqsTz_G8Pgs|z0^J_My62;2^MXq&M>p(Y1UV|1#mYa@@C+>{(TpmwMCq z*VVL^o*2&2thSTo{AB(%Yfp>US|?G3GddBmn40vpX~&~qbEGRlP`R-8G&d&9L4wXY zo=TDXwwWbtI-?AIzaUaK;g=cYSEh}ucNJZKPN24>hglm+{pZ@|Q)qMd6VK~9Y&Txh zxFdM9@ z&)XYnrlw-9@T%2^Jh$YqysOeWL}geGVU|=Hzw^FO8RDbcbMkHx822u;FTZf2CbH-i zGVAc*Z25%Qfl*F=#)y|*hC~HSRFUv-Ic_O|2z6cQEC2bl`Y4?$oP=%O4V(u^+{Lys z;2HC`w0C$WLo2ZTvz&H-n55=)nWX<|D|IH5*5HoYH9rlz;&`8a8YwXs+}%pQd&pRQ zAbPwv8_92ed#6>LN$BBxaK!BmNLw-+l)BqW1cYkTc9V5^6ZI)!MPccZ+oEC{LKW79oZM>U8E(-R)DA}A5 zIV7#!Y{DysP}?Z|9s(#k-jRgl<3)$#L-$&18|7p%+f|b`E02t3Nt!rp&N|7e(+?*d zS@J!$Dw;l(t=^$NDDKe}Zv5V!C|uIET04?<5Sh_DJx^&>h1L zIK8w?y%dESx5y$JrRU*tPJv>**U>kBLa{q)bVNdrhM~;VGxiEHj57X`bX_E?XaEb7 zrHgQm*88w0j=eV`Z`iN+((Oc6H?Hq3FUYaWy|!A&t-ikVI@)KyDf({b!7`rYaalX3Equj=e#2 zyz3Re>5s*p@5culoxvCQQO2n0c9vvy-EUjNCSmPwZ@jp_|6^%6dUe^alQ!6R(TBXG zanhva`S}hxspKJD2x4WU6xZWK-HX4AOVmNFs_xuP=!CCq6=6sw3z6|nFs-=$?#5)n z3xaksMm}7_zDh?ENnuEQC0A_kC$+UDFztMr8+V;Ne|^S?j3%yFssUFqS6^ zTLv&bd8wSDQPeSDcf`>B=s?lDqWnxib+rg=vg(@!U$m>fXf2t%DOMg`+8DH3TPqmP zntAd`Z&0bVZbBMM*{tT{t;fA4ujrZ}y=3cuOZ$>gq1EE6!69<5_R+waj!O*==1ts) z!J3Zw;UW_P&N;q?XP%rlTkv`c?Y(&8&+DEhq)B8qcqpuem3wO0uEjwPC_vi)r*fh42NZj=S&#j7%PinPJBl)_@ zTY1AlqP2Oeg&l1*Bq(kUr?ZcqVCsLqPoOoP`YPkyJBrxCbNb0`FP4dNAC&3V^RO?N zwdv~a8C#d-N%fG~Wg8}bZyg_#+o^u#B<1>(m*c15>#&u<4$oFA+7e@f%t~Ebd;W(8 zKQG^PnHDYogo}WRypKNR|HTv+B{qp?mqo3<}UUF>%+W3}v!+a0E z$^ZKHQn}wkCW7Hcl=MX^MWZhZ$=>-k{ktgy_O&}Pl)S$7vQN&OZ_e#E=-heL(}b=HqOsc@BV32x_V&wbFhQ`mPk z(E}?lLnGg1h1_>=^sY#CnFih1^?@EJAk=68m1rJlv9504ugKVTv9?!s*%wJhmCbUf z*4%5`c+lXD^&xOxgJnxq++b%9iqdGhhC<`^NapCmKFdNBxfhdwRXe?IUm8Lm{%u_bi0U2D8)xEB_HOGbkY`631^n91a87 z8ok*X92KA66`tuMj-!0&X{7yw)2#guL-S0kK~s$B8P;{3oo2j+!2$zrH*-7>olOS* zy&S{+(_(^?-Y3^vC=cmOpwG3 zQ3{p&Z33Z!>!3O7P!{k_2L4E6kJ%SS$X$XgRZlr15C_8>7F(Q@9Wu zL6VWDt@ih#gq0_iVr? zg7^pCZpg5WWZ(yS^2_&v(6b{$#&2KQSl{Q|aUPYzFEaW;kLJB&Cijm}GkgtG5kR^Sh!qLnC{QPZ2t{N-GUd=Eb)r^9}DN>495(<;>G?qAQ1$#TmS`Y z3FF^aMpAg&$FEt0KmGR$M5Yjz7xPRc(J51x|Cqf&I7Z97_1u30^p_+gNsLpvLjTc$ z4I-FlNLISy{QeElHN#tx!jBsH`=T+>S}gG1USClAyUsDt0deqVgM9g}{e5qAm@6n7 zHu<`fZ*Et8!H3A_dzTLp0 zXX!4rR2x{JdAwQ_KDV}>)B~GGkCkLYsn~!tMUxzU zID~`?)S3%Iz0%L7>-U<3~m&gnk-Nb>Afk*2&C{j z+PTZDD!#wSs9kCifmD7m-|yci*_6RBuz+1$P)Fo>{A=OCw~xfBC+>=~D;})t7l5`* z9(p`EWZ=yuXg3xPsb1uj$?-|!lhF$MPADYjXixS6_g6xXYYNGFy{GlT+s(Xa^FYHlhMcRn}pjL;ztUac{38b`5xJzDXF`2!QRwD2UHjU!02xKa=l!|beOYEu z;l(P8AE_%%-bxl5pA}aIj`ZD~H&T8^o=^8K`KS~(`r~xMZvEB~&9R(LIVoVa#&E+B zYX8V-AWmG{w5TevdW&vSNzX3oh;G`6^R08&FJM4T2==C%0EKXbOXxBE?NQonauV^O$xeayQs;++Y`Fi{4g(R_qA@UzcjR*bqcc_)IUZh+>ROBY*q zy=N~QF`*xWk5`HH&}Ga1%!sKIT3d&y2FwaV8|7m%irNpJn3lwcRpy4MG}n!Z?2QJw zq|i&y7iLz4uNk zKYMLPiqX9%K#+Fie=W0Kv6y*OU}hlL&9oY7i+biY^@00?t`8|=^9`sIYHsl7Pq$)7 zV$%C39OH4He-b=(V27l|5_2yK4>Q%hlsT9CZ8Wu(6LCXhyTEffbo{e+HbiZ;c6Kn!ckPb`v6 z?b0hD$G1>JOY{5)5mT-A@)UO4Oo@LaWAI^9$U}GYta_kyAb|qRxas$A?W)=8Y)hjR z1wejDrYj{}0tTsm>Zu*U@ehTE9_u)A7^#f7)Yn|pbwyW+d0L1pHW!jGRjMD%Df;qr zc`S6_-Qqdf9ksts_QI0+)?MTG*nXPcD&ON8K0O()cFifd@q*0x;lNk6A9{!O9*0|P zR#6vsTGiPT9rZTK)*qM|xXLm6eip{UXP|h8tM@TkT>Pmh6@-JrsHM~}vFu*G+D95` z)}!TlB4S>Ll_Klo)fFOM5CC3(*DEr9A~Kn!#xm;x#P2{=M$)4Wp}^P37J9t_kK^bC z1oHA2;OLNtZ@!`hM(>QwDT^`6g*xHWhz=)bdbI${9XuoIb?8>G_4vo)o0+_ORjMu4 zW0ggWKp1WT0uW9EO19{^x!vrY9}x(%KQ~%vB;5wwc`#qH_p!UP7jOivRr8+`avmal zk0Ze~&}CE>Xdf-o&(=hXSu&k5X6aue4`35}`Sin}ZiSujne*)5J||*1!Y8am(GCtP6p(s z>}^8PO&_r}huXW=1oJ0KGA^zs0(@v=7OEo_+=p@f(+$#3IQw$2?cv!axE;8SY$?PH876@*NJN5K7F;DadLuIT-8jTRVqJ85cKL~!M6tEJy z5Zpt8y7h_<^;M3C-IxFoBSf0r@ds8MfI-UC3y!BV)bfDUw>-+2Zqqdl!CQmz`b9xI zbv4wy_3BGA%IQ&{zchyp&z)s7#NE_DD$8Y#M?aXT$PAvtrG!11!xxR^i>5q1;?2*`qS_B0Z52 ziQ?2&-N??XQ<$1=frpO0toOhw2XbWg?8Al6?Xc|LH8V}z^-G(@rC655)u8n)z{E-{ z_=L%D5tGB0Xet5Ygyq&+C7#1i_ zE%Z190HY^tiuWess_j>GgE*q4&FK$M;~HWw4;@cO^?29cdR2dC`h`KA$1l_g3sGy_ zXZ)*%a~F>by1W=flsQ$qPw@5=GxDtUhKH_4Jg?s_CuVZU^`nEF8fKVRo8e>6O`+77 zSY3cIiGkYlsR1UNCRKVQ!9T;BN;$N2zH}^%0JHYn-N~-0jIG|A+*z)Ji*qBJ^u2^0 z<1xOw#L90+hH#T<8n;T%NFB&F(za3!vwXVJR6?jy?eXhDwD6(O7}x68u?(Y1*U6=Mo+~~*b7w_W|O1lEpwC6ek*li+0V@pI!Vptc6@7?XQcw8#f8bmLBA8oi# z#+bc#l`z9($cOu;gKDcf9hD&Qj~|INj(wK*u+zk7pL0!wUYJi+SkIA+K7TWikoNLM&35Q_vNnge)(uKj8>bq?aR)yioT0#K z5!A$`zV)fHZv1?X$z~qz@oBBMmduOZsvMRSf}$uM@sOn}xjYgVO*&2)qr9Xj%mi_Q zW9S#{Ur(c#$ z&R4JnkB^TIIzeze^m-OvC3^kJ#?q*d@a3N$q%jy!f(yJRt*pss4p%Y!Tx4Yo_;5Jc zJX#Y(F8=cZ%thbrXRE~*O!RENS3VKg-D;oO5Q_?ZI?wZX*Jg*({)hKngL4l>*8`I< z5i2V7=&Nk7qR9^hDO_2fwRYbK?a6X8AH?0mWa@6GZNBN>Whzo%vz;8*=nP&8}XQ$w6!Siw;ZP<`+ z*gE&&ty{?yC}tZu`mN3$qm7%FYI+21hEwdWQk1ue<41`n_GA5;$Eqfp;;c6NM7bk& z#w$mlFwg^s(EfUTSOj*k&9p9gVhX*)rcZ+vJL<x!Un#tu4m5t{jkG=h0UOt6(V=zo0CZPzLcJa8`e2Qjb(rs8m#xUU2q|WAJ z60PO=_BY*n%y?v+nO@{LsoX>iZiSMK6&5{GXV9UNQ?DBP`ifhyr5f+I)3We%3n)92{igaPb0467W&o`dy%5KLz}{`G!>m+ zpm!o}8jvf!p07LhuKr;CS|gu9|0z!r-CP%+J8XCJ6Rk;T0$*XE(0=--SF(hTT=Z*l z@jY?Xyol)Bo?v)=$GZ8RS0m^kCTY3b9sBW@ZZCGCp?&YhKC%Lt}pFRpOwu z-51QdCIu`gGhWSVqpvrWm6z@n`jMo{QW%|SAw0kA;XhJ*f|BMIwX_wGB1vNByZaj# z`pOeEuoCIFW7HEr^hqX{`tbDZr;I9PtlKD?Iq8}E7YP4=U*qQ(6UEPm_zZh6C&K(9 zNcN%((-@nBex1$d)!?PPNDMHTxUowL`7+%x+K^J_feg?m>RIsRexA|=4%<)dMPFaOdV_kiwr_QLEw^IynLR}Rp++2x?;(Z6%z!8sj! zf^$)S5qtAhK;y%9;yTKIc>qUAIOpZ~i0Hq7-FFxP;KM)?VI9t&wg{N(Gk-(8kY<6a3h)*2o6iSf3DLCE9s=5EDa3?tW2WD7!bMGnA_!VHxc@M(F zqCcaqZ4rhl*PPyEJ}9-R`B`SDP*QOEX801Ll8+cu#HG*Q|06n7iGenH62S7`00T19 zz0KM*{}CPRFp*i|oB!r9T;d&KNQxKg{EzvNzX8XbCrSE``3SlKb1hG5?*1Rqkp)xv zPO6FYzkvv3vMpyg!~P>W&ciY4@^1gbeDuO>Gbkm_{l|Q~V~~)HJcCvq4rxfc8T$>; zl?(ri-9J1?KsYQJT%-LC07UnS0RyV{Y;rV%xOZnkx6F!JK|#Sz94f9<}b z_L_fA)Q)`l(iCdEtd7tpq;2DMI~Wpi#0gGdy-iH=_mZ z_1QFY^KqR+;*b+bq zK*rMPw_j(^0KF;yVBs^@TxV=7zygY`p*&p{jcoM@QTOlizy8q{<8Gs!Euh*q z48W${3r`u-E-^#PYKvmuA~t$=AHYr|z4Yk|eD{OiDJSb^Yv!?o6D!)XFTEY8pm*R< z@;fBv5m5zNN?Dt70sI?tyvoC_-@hpcoB<$DgWxTRcQTAltHUa+if<*8OyNZ`iTFA_ zBcSWu;Z}VY1aw+Y)-cduBGiF1hTvPWT0W)5`f)OWsAujxxBk5~8d0}9q14wdIW!GS z@2-tnEuZdV0fae_F;I@!UE1bP-D;zA>nzc)eZ=Rweox@}_Wnu%hwkK~6)q5fUkK*} zt#TXG-P0+xpx2SO@$L?p)bD^rM@9dv?>2^aaItCUnPxw6V-y9QJ`tS|yGAw>u(5(- zaUG59)VV)?{j5J(lR4WbglpM*m~jpKJAIY;)T*^}Xcqn9_{hk&>AoPgaDu^!KW5*m zI{4OyniA7aGG;{~RT8uAuHPZ}7VNxRJe3EuSQPXPR*7iROSsM0C>#&P)-c+FL%Gk1 z!l|>`nVaOBx&{PNnqi}#D)_Fl4C?C; ztun94*Sif?adZjWp(@x+vQ(vHU_x*3=mltqSr6u_j*uktt zOe&1#`aAEFV?MC6I8!|nKgS%A(#3~FyD4u}B-NwH6Yqr?JNy1jl?T8dT@Ze7e{A3r z!TF|aD$a!gZr2ijCEu@8BG&^{D23Lnl1X;cB-;7(-k}Q%WFY|J#bC(E`!Rp~;i%-p z$m}>?lS_pT_X&YeH=N09@%%bE?gm=(BJbY-l_b%`d2qB|y@ARRq>xfhfhq_~ZpdvO zPo>y#R(8iL$}Qh~@iETB->d!vmnoP|A@=g?M`z!AYf-#FsggSEa4S&zoBxT_Sj^!( zeyGWrt;cNmOj98~89jt+W6xtN_3AuEbjfP|2n$3QnET-Jbt+w+yV8XEzTADo=e?th9G2+s|^GX1gdW8$?xColpkpTwm(f*a%J-OeaB#)8`2 zcuw8&TM$0XD2hPz7GQ8(`zkPd4`jdlz`6CND_s=!+%I)n?pHAToM?G3`KoPv6uX8Z z#Dq(bI${yx)VkVrBL-L(1>jxNJbr!WGik+lreWl$dN3E+lPY%;?jy~j=dG*=I4_vK zR@ieRu297n#IM01+YYo()MY&#voA@u!$n|mj5Ywbs`@d|iNyUM?j{*{?nL+J>E3-X z-^B)Q%a_kcS)PBGCbD;-eDWah&-!72iDmE;E`D+g*hx^q8C}PnV-f!%B9E&sUS@7S;{MS#8`;%pi)&EidcuO z50||#Qs-H{I##7tw>N_)Nu_K`_o?dP!aE4?y>Fy(%IXapv6hAkk`FgO5GsmF_(*)C zn}FE_nd1hSeO3j&K$2v;mh3izHYJ>4B;=|T9m!hx%=KqY+=o>z^Yme}Vx#{fNJUmr zr=HCC9s~2584(~6LcwbsV5t5UOg&cbrV%&Hgqg_wk3~KYtAhko@H*SBf@~z;KR3^A_D=xy^`i zf5PwOWXOWGw0a-k7M+A4D>oIO1J-Ls{#!2$OQ)gozWVuIa#vXTu1F(W7Iq9;E6Tj{T*mWVjC-@Ufm{ET$D(Xvh}Pe;HR_u@3Ue?$nlCO_8x zoXA;T>f8lSUG~zYhe>f>O&^~TXs)e-)D%o5tjbbxM7D-CT@Ju=>1$ce73;xUz;C6< zB(H^J_`dNj? zagBGIsSGoO^M2`wu1n+laRu}P`Ng!!xI9_!Evgyq%z#M}WCZmc@0eSD`s|w!%V2~c z#UY}R+B0OwCVUIEkpjT>=!eU|9Q0rLeE4fSIxO%r9wS|xj&J1_86@X7bW;v)YhoH0FX%P(u4lmK+rdp-Qj<-a&HTXY-CI1*IiJb(GiT(Q@0Dz$3bS zDgkCATZ}^3@Aam+`@(D2cfh@htp&H?I*>i3{RAZ?7dJXgjwY8Ba*8LCQ!=>x%2ABC zo1{RcFgI9H1OYk5j1nm;V=3MZSBS9+a{J(SQn61aQ zg0iXMJ`SOx(5I$ycnPW@&k;vl(xYWIjnkSFh;C)okE*+*wd6m=rW_nVr;N*e>l31y zeII-HB`a(sUFK_>6=QI`l%s#R1Jv_h2?PJn<1eZ`QPk3?lda#$BGmgujqKkuxW$ye(fBSx*=xX6z56JGRdS-_>u%$UcO`uDDKaGXbD6|u`+zE?@- zW=ZDd5b^t7SEXRx|30r8%2*>9338xE(IMRiPzki0dq@dsnP?Ioa;Dl-yPAin!Fs4D z{!$E;mJz?#N+z(a(2*d#bIRXH;sYm=`{)d@5{uCk)TEc#F}>48KtH$f$H}n-e?9{e zIqM@66BBJNj#}U%lWGAE6_2W5nqu*V?cMo(uCq2YoT_4^mPzh{M^o!bnXb zd?Zt3o{=EI7|azf97F%+szyMH=ulcU7FNt#-glyyEM`l-!h9LgLkc6y?YcsY3xiS| zC801Ffz%dky__tU(RZSnO-qVZVNit*2C)}8`Rd@C&37LcUxmRQc;CyzebLWQJY$D|gyL%KwmnJlavmQJWt)S{{JSfBxf3%W?2W2;#*qM1bDh5|OQP(<7)s4LijB-R8w**>n9vlgxW zXt@#qRVt(=c=zE0mESwS$w;!IW;QD~|B@7PLD!NMFcO?XW&<*tA#R{n7D)=dIR zr9dRuqi_EDK4;n*d_nfmd1X)mJkU(gs?nRP&A-(B#vkdE2`7|wy%S16anvY)idws@ z45pC(FJM<$*Z!&-M_YpD`kotm@`3_g4{#bxj|dHqOrA;rZXJj^ldkc@93*6;GFmJn zplGs1anY`rsYD=9#2y{|#FLQ$H{QS^zK0;srz^nt3(P%)CJCh137=X-9rhRtQjY=S zP}$kLGzP5q^?9_fzbKv7?L;B^ftH0TOXtV#=; zi4hsTIfGa*G&H*cQ8ow!Ci^78DBmE68B8`cTK~&`De6ujz)L(?txaD-bjND}>lnkR z7PW?R#IlOzcH`^-fx+APR31vOG^>H^-U@qzS5Pe?t_gPaOuCP>n1l-=&dY8cy+ZX? z1MFe$O?600SBV2V>&G}=6VwcSe$4$bKv{KV!8L*QJZeZ z%-b&}q$E);+Q5o!2pR;aKW??0yuZj2~{ z=k@2e%={}J)Qq_P`fjRGGe29vb=)fbnoaRAt;NlSrGf2Ybp_EHqb~(R_uCtIWt8Sk z0cb2o%tp-Q$gSBnj*|%R8f5kaLt<}-h-(UkY_N0c;z=JWwU2bAehQBihcqKlxN{ngWfFr)Q-rM^E)Dt4D z*L0`8Enjvk`MoM0-n}jKStTmKpM5*{iZ4@O5>tp%J2B_~M$)L8rYc+VKAvo)dCxV3 zwLa$t18N5>-8t~sDthv^ey&V7-Sjbhm(9o^r08^7RTD9Z3l<5TZvZ`p zcuc-tb*rtXuqsJ~QUcNWR62zqGc#rg#}XLa?jO`uMRB+kH}xrCAuuBgpC_&t0FqE-LNHq7`@2h~{*UApL6>4cO8cx>>ohtb0jiuz zG`}~J=5SB>Mn#>Qwuj)arQbMl6HI>&;ioHZjVQFregR^c~IHvSq#vE zggAfb<7|u>xa8!)LubMC`q=?HZE4UqkZs7zOvz=p73Hb8% zAM_^mg`NRL#M&??L&SWJDd`c`mwya>6j~yRQ>Rp3n|R}GfFD!G>yVbe|KuZxwMnah zQjwCJ$s^yS{F^w3eIyuyNY0R4bh;A%FD=&LHdnJw7aJH{_Zj+oO}aBE!~la-(GXRx zMeCQ_XlsMb0_h~7 z{tUWf0C)oh=u|$W!ItC-lGwnbfY%&K@UZ|oa;(~||0dsM60c`&-6@i)l@3PjV7-EL zcgJOQczQej^IiM#k>#vg@3g^127L`Tz^cOt{K89UmHG$)U*k2e4(hZBQkv*}D9RA6 z9Z3COtaDeQu$uYBpSouvEA5Rn!;zyEUS&tpz57eZI zPT!2-R3Y34kE1-|Dgw7$wvGMJ7%|W28DdP`@o4w|mA?(Gp*D#=yZ-%3GF~YSlBJDT z{$}a;lHe6jxMe$i&EKv~shcS<(~(@dS9c%}%c9Lb#WRDqn(xeVUq%=u+eoEjD&oLI zhk(nc^Dm^ovn778%HL?w`GUKSwes}VA5x$g(r}}7Zy^4KxA_`>83lMcH7^b~e>T}$ zAS^IKBa2EZIV;t!+j}#Lh2m7(8zV^_&AKWakkK=*VaduZ_h-#Usx5zzScwqHXcm%| zjHFoW()mrn$>SP!V5>1*b6r>WVx&lG==T#o2YfZP6csQ?9UcnQv?5OO;RI^p^LGI9 zw?}itA`AhLC+WM6!Cydv1SNt|wLt&+t81?Lxq8)_A)$uZ3*dlK9dWBRyWCb&V7sGv zDP8$F8JE@VOQs!Bjv`lS4Mk2j>L&RPTU}6Wi(o*ac#6BnxCsfSwSHJ`p8)w zd-HS!1;!s7?K_($R9j=8Imbh8x@xUUFC-d`P%9bnZyT@NhD^w;6o2ATQ18B-9{*md z^(*}VaK!k#>V(7DYkc@hKGcANBp6GcBb$%tEEa(B=-S6W*%vfL3wg6$E_43+=LPuw z`Pv393gvMXOqkqID#burXT!O>=Xbw7@&-Z($QmF<9{IpP@xyCClyd_ar+GW=Be~~OhouiP^F^on0jxExMvFhDxz-q{zM2Su~` z>ZVwv3>s|Ty}h|CzXNj|hO87%nP1?QfDf=3FN0Ajao>>g^bnZYGd&cNJiaUZn(`;? zzk;t>oNG|856$Up*mz`7V0Xgxvas9RobIi9Xg;U@+yoah(Vvucw^nak!`a@{xP^$y z%!osCx4}FeRxkU_pf}LTnK>Z6`3$J15Z)(qT*?OJGi}@ZKR39c>fJVdGQAhdOS`3rtJ>BA(}xI zyEC{7rH@ED|9Ra>cmVuRuvSX{!(~KVSocgxxxRD8A`1n`XHbuy(3c|ie4V7Oe>AT= z9R>ttObHW`{}5g`f&;mf=l|L2nScO46N!<+i~gHq!X+#q23aOLSRTAO7Vdn4zY~Wg*0a5mpSi-3{XSCPu_;?2F znVA(4zV;}#mumY!W8Hl4pg32*Rzzc@$l)#M-AF1gNC(UI5VB?q92uOqAo@H%0eQ)j zOc=x*$$G@12pTybfaRr`HSda3Ndljx`}7lX(bj2fk|2aX+UkwWmAggS@B!eK z@$ySZ4vqfuw0|YvXu&oJV2MZzAyl_Wt{zkg&hw(!T_4wlT|^JFsj&r{mI;XM7{Cok zhgt9*D4Q=q>yrmQ{vB*fgzdg?%_a9yrty88xkaBI+I!LxBkSLtgsAu|y6M=!_Vw*c zvH_^Ftk55R#}bHLM&OTQz%$Ioo4zezk92TAH5&tbb~|9vbE$$8Zqgv0b0Q%F8UWNL zJb&tC>6B%|4`4{>dmlM7{<>@6$qYC=8fN{BkW!+MieZ?>CE2G8SeOHKo{xV)Op3Zr zfqCKjqf)Sot3e)M2MkOelqr6+r2M_W`{*VT6@h@)&~_`vA*W{@g5DXVrM60F{iVbh zu{^n}fbkP~x(*cRRPSVky37o9-hg#VB%pWgc{VmqOE253&@h$XIfV>JYW!RIeTitr#kVOPt7@NDSJBy(4G!U+5E`I@^ zKo&7JE}3Wz4gpoB7?BvHdQx28AcCL^u0}9r>;|4^`zWD%RRzle{z=h6!e$K++x4!E zx&u4+{iTe7(la)XK)W!7xXA9cJ^u+1KWFy%p(&QiJlDNS2T7$io5?!8XVH&0YVRtI z8^vcCw>($ml(M0MWXt=&{0sjVQAQ9iqQRxf?ieJ5&qP#!1iAvQ&PO+%U^L^_EmfcX zAHYDW+=Z?cqb+Ysz9@|0?724r9Iq)y=$@FF@7s!$!YJ}Tn!5^@n z(v17PaZ&!Yh_k4vWuifuRdk!E(Ik-61O6_cbATOz!k1!;#qiwBsC|7j4vkv_n+VMm zt`6%oMXF#Sbt*j$A&vT53_lD8^iB;tdH)Zz(9#qkDfQ4OnQ4m0<`X6OT@$7A)WF zHWj~43U~sIbE3InP;SU_IK}^wByJ@eer=@ch+;DlbNbr$1LyzX2;y%0=5692lDV4x zI)NdJ&8fQ&8`Mdq;;)?i%74;BOYK0w$O>qCo)?G8l-bG`@67kYiQE+*v1hIdI{=y))L;HhedUl>9VKZ zC7TjT#VA&KXn=Q6e9FdA;?`&SxMY&epN`Jzp+G3E!xR7IB7;geql~uGNaCAtYFQ3PrL)jUeV`^xfSLZOE zviBWe#3dp|T;?1ag&jBqMOxIe9Kb9X&!PV>UEa^trSfzXnwCl^NarjCTD#gM??K!X z+(+QTpQ~C(QllNJso`}Dxwu#)zMGOn6bSp{3!>W>3x-+?TlKU0wjDrTn5~S-)nUSo zJBSqIjl?+7^h>MN!b}Ca0ew8rH)VsDq3tmvJXYx_z5agXvOpR3!d-9ws)GH5r(5!L z@yK>l+ULAxhIS)m>>?JM#)>g{&S@@JxlRb_ox0 zgB5;m`m^#=)^XY7{3FKgmnRGlyib05^O}@sNN+8>?X5MmJ@G}&yvDdY()<{&#EYJ# z5Twau_hk)AF~(LmRBj#8tdgq#0<0l7#-BP+`e?{$o|^C)EiqTlX{Sg=y+y1;R*TvW zxg9)vR~dTM_e1_~-+*j{>&t_ig{fqUqeub)IYJ);da zgx`7uz!V!zP4+R=rghCZ=IQjVL%^#f&!9|TD7(5ssfu-unZBjH3u+LOL>TmQ#n2QN zg2Bcu6Dy>sKJRXhg-YZ)uMYF7YM2WDsnJf5rp(m*LW>>-sKJA4 zcE4H|+l=rDyR60;jYHAVFZc^G;)R7D^3uQ#e*;X}3+*MZgde6mdP2TJ`ecyq3a5<; zJ+R+c-g^vPmUDoH5YGDIg2x)C-g|ZM&agrAR0RBV1_;S|s9i&u(`^W#9zuQ)NqqvO z$26$jA_c591^8wyzsbhZq6wgUiiOm5AsHx5dE^%Y)+#1rAOu;fRsJdDgNS^ch}i>M z_9P7fghCdgty;;i{uUue69L^i24HZT=4Woc2|odF9}ED!nJSFTMB$6jD-;DkkfM)p zI!I~p1dqJqrLtJocYle-%ri1AkkGSaPuHAB_4ro)Kh(W>IMjdpH_SAav5rwuqQTgg z$WmFyzGlr96{2s6P?jOvNTTdZDN9*eM4}``HKG*SktoKRJ(N_BV1fEka{0G!w;_A6|>^ePesiA=o}cnETh1WUiM zx!6w!fTSf7W}~v;&9meRze8{p*%`4pEM@bS{qHJ4AEwqqZC|MAy>r+JNzCX}f@w@n z!$DRZJS&QUu~*+5F+)XafL?HR&j;QgJ!-Sc^j`)q*3sDkBMBXjwbDco3vTm#d|+We zB0iukk$?i~Fsr1En%#B0u?GQ1G2Zw--}D&mAa|1~9JF45ELV)wYCo;E0$InWy|)L= zk~|xvaKD*ju}d)hyPMb_0-~6VK~7e)!Tr0Kc$*0l=7Q@zu=^WgS>_laK5d#jry3)2 zd~~6|eR>bN%9b0hca!!{(hD^}h!*zkjS0&;-CoRz}@JR($N#)Kly=wmjvo%PU`xNflI%Y#~nejihwyS zFzMIaN_`6VB1xTjWtx`wLVEH}vt0*x=1g^d4)HgzQ&yHE(%( ziKV>;A5+8W^+`|NhZ^l;A_Z+f)Q87n2)9pwM=#E5H5*!SOAk$Dok? z%IfQZX75_5XjZ=rt)D+?J!<8_SK*q-kyJj}`=H035G?Yt27KZwC`Q}sCx=Nh_TtHltiqKwBYOO>XOi(dbehjQ< zmxD1JkLVd&f6PX_KQ`iLN0V!$%VtzV*jZpOx3TdOAhzqu3d_2Irk? z*gBnOH|XW|zmEcVeUF48V3m1zUFn?B$n1G3s%0j#F@!myhbU7Kae?sh&V zN@)hrz%-PRw$B&Bi!7X6s|U*lzVK;f7Q zykO|LXR1bQ6>q2UKeS3_oY03D~KH z$S01x^$@&r?XB9u*JQXK5N?$XO8L@KQJA=c$dwR(sej$+rs5hlo?YI6&DX4u{5szaIM9C9O0fJ7o2I{SN)pOqP?SqwKSJe9VOy$Qw}Ko)UDXGRpk zH^7=(fZx_@?FVR>)a;NFxYW6-h_Dg$ThN+VBT2_VG<0#XZ_Wzri?9vg9ze-wJ|$k@ zwnI{PHCgIm_-C`BIc$MD8tV-JkzH~Qe8eW`h{21o_f~9pdOn00L+3wXn-5x}=MY<$ zElsT*e^Y#_#NwP4BKT00i;Tm;5R~V)Fp%^6=;VFg^@1BFV7i7%CZUSY;Y9CaHEN=J zhoyDL)SwcY!HTG$;UPd5)UqnZ_uJt zB7ghNDF(mMqtmn&o4JG7XUL29J*dK7irR*sobzA$bl+4}F2miWj_&N+tW{@rKwIGO zju>#{$=yRq*XzH&)HElfK_W33K$li%y0APIGb~76MN3~>jDoLh#!^JIuQLSph&MP3 zIDBaYpFr`Zoa+q_?;c*>N`2J&N_Ryb5GJG^eAc(J96G$O=-vK}9K2q_5yBIc+3J(2 z_$8{6`N@#|jH9v+^iEY47F}&TNpbW-HL#&dY!ZoH3JjaR_qmv3G!_o&yZT9Gb?rsn-b4g3f zwIC76q*z5TXUf$y2!|BPbqx;`T2JhvNW>8-4 zDfzRT8=nUQD+FdCQt(1B4%OvZ-9*2`-o_Nn@ssa=w*s}@k>C9_buYCof zLGaKsXCwgotj1P@RX{m9zRUY7!!Ni1LePOT_?wyJO@tf(DK_auI70%> znE1j;&&Ybe9`)JaJ>uuJkJqFh0W`)uts0P*l+C&o#Q_YZOcYXCaPo_%lOy>4m!iUr z|8GTwvJV^k(;y%cDZM44mzUnc!1)0np8FvX#R5%JSv>@^A;KGg$m3q4uM1;U+5gi3 zQF#d5U*j9!dVD(wvTM8B2X@NruD-Sky_8lQm*ovB(wUyz#WII{E2iuvoMwt`z%$Yu zu`qATLyHi63&-0FV})iu=P+~JGX}e11uDmn2O_^ux*#*gWZ$EGFbv2?|9W?^1k$vD zf!YYT;l81&W)XWa68JJuhbMt0im@UB13ClypL_2eHNstdk-M!GhCe6-={FH(*U8UJ;;`_7D*ij*2H#r?Bz=+MfYy} zL$W#o(ag!dMn&@A*h(jY&BxT{zy=cD0|T+z4&oew4zD*agvM!9`}gftdK}9eVQMoC zE;($md;tbH3Ol56_z#X^%YXDNyY`|8L~+zgrh_3rVZFFU9}0e&(x-nL@34SW?n_NZ zke-VaLP4Umfz8#c>R^o9gBt+^kPhCv^oFLbH{ej^LsMd_inkyC#xd-lpmh$zlT z%We6=5F##DuT%Z21=w@{+O>ddPp-xD;02%`#wX+ejW+hq=gc4gh#NlRolc?Q3Is>( z?WtGi>OYizP3&ARt{NZ~&x>D!YeHICXMeqJYm6<4X>%&{gS&4r4X`llY=SSJu)T3S z0F0SbY3d1uxt(Tae3q}s@{C3#pQw1{6lzaF?Ok5A(;uf5;SDs^FR}HV;5alXRh2Hiawp^cAWt zwZbbfq1Hh!X1z?#{LR?uRw z(K;l&Wy{;occlBj?JRkIyWWOidUW*8(ZaTl17U!cnbY^^LJDH8i}5kfWTr*CZ?0Ue zp`wPB_gP<7CvN=Ke(v6~px)b}xX2F6_{AzR6r-xw=6`&@OT*;zEVcsWS?R550==R~n zP;d;H;aVI+X2=aP!(aIdzc$@P6bsbqMF>Qc&})!G z0%(E*!@|NyXByp0k`I+`54t=?yuva7W#Hct1=`yOQIk%{9^6)RFt_6 z!HR!hVt@e=)Uve~o^lHooOVG{h&FX$ZjdgRdSUwU;=Oxo&OR&WzcG!v2w)4Xeq%yO z6EESsczeJF&7khbFbm9-D8jjh1U~U0xJ2Go?KimxlaJwXITka+z}tUQ4=cC))fZ+{ z)7OK(@cWcN_1`>0F)OAbFNhbu{?YU_fg>Yfn;Pd|#r<)9kTn`K8R1wGA1q>TtZF@| z*YF9|(uKrCbCugRe+n+4QkM$9t+;q6I{MN{^xEJLo(}`Z5!zFMR$1h?$iWPORMXRs zR-!)KIehNj9UEc5gin*cGI|~8qXeaFpiORAJP6=S{&h+1@Na2Am8*69w8;mF#yYc+ zztR?Et4nZ9#im}xTj}XYIlT=~;dJtoF@0=w_9sIqwuw+1K7So$OoxA;ltE0lsZY+oU5pOHp-`N} z_q{*-lx3Pi!k4v4n`)`cY=Q9*{?rm^{&_HR8SgzlrRGxOdwvtjJZo9y6wKX%Mqlee zLGyL{5yXut0rYhaC;-#{!e!*#6fH*CJmZ}=il=&VS}#L<=I-{|$B#609@ zLNN4a9Tk;*ZjR;7qU&UL*;v8|f4LTP6B9f=cE_l(B(+1g714E2xt-aK0yR_yaO|@t zB4G^P>L6b{2T>YDaovmfd5ey@d+43yoxUC&6J{~s)FFaK zL@@r;iJdu0dfrb?ZpI9PiZ#|l(z>P)brCr1#I2l7 zAc+leJ$*;4J2JU4OqKH!SI58Cyt*S?PB$d}jN5$#C~AZfoqTvhX_b{;#>Olc z#@uhn#QZTIT_2x}ig(mrgEq3a{}5U*kS$OMa1KFOSOFGb$DW5JMGa4Y@q2r{ z7fLnE{|+VaMiJg_!dHpo{T}ZW<8Lx1&a2eU0usnJV2*Z0_I^Lq)7Cd#dx_`U zkkh%@=m?8Z!@0{UD#p(Q(d>(KDW7oz+tcuOsr`J%Gy(_WUPX>9Ime%B-1GR!yE_qF z=Z0S7zTXqltlq_K%@esd31Q~82y28B(SCLFOfq)mIFVrpl5AZrlxD#g(%3WeYqRUe9Acs4r^IvB>3n! zFm&?V8Ur=w_}aPZR~_os6A4cmY}VJCxI?%)L`e%-|2**^KbF)L=c0qoT)6Tnf=Y&i z7w8*kTzqae{lT<+ZOk5JPS(9lZb|4ZDDW$Dul#%!jq*_gx=(IZicpUH6%-r@FZK>_ z$R?3_Tkn@^&9~dzf%I2x2uWXt&y%&-VvRC0@Lp#M3z0q6WefXq(9T}pMjNilWk`g| zme%*>rg#dnHtM0pPH^g&1NNAePigoB8KBsz?Kv`W^Puf3^UL`V=k34f=J&NabKmDzs*#<+-litkb{DXlBTSc~dnKFTKB!ome_B`dmkr(PxpWU9! zoZwc(nN8bp@#-be?AP$&58>=<_!eX7?)fm*l-Ii;Eo|@5H{rHpaUuk%cDL2i%zPY= zRug5LVBe+kE!Bac9VN%kQBKB^gQR7>^H)ng8B-_`!}h?#xf}|T&j?8r>95 zRsT~L(_&sA1b7MS{`IeCU=yO24Ys^qH7pV;cOXFfB9kxUONC#HLq5TPQI_sMjR^pZ zU^?;q${+J$RVPXfAkx^1PyT%lc76=S3CE9%V<4cbubbTflfJy`t&*WtrX_Z}*Z$ZB zqt)hzNPUegk=&u3ep#L7=AXf&-GqzhrWrLi4Oq;b}cGGQ=tM3p+I9W4=By7t2Bh3E&(ZWtM;_Ur<2F|!4p@5wDP@_ zL2eXbnWq)D)yx={NVyCQ4oZM7!44J)AqGP*UKA<`q?=y3wzaYV#%L5;THFRcyJv8l z2~W78GOY+-^Tc;y1zvj$%b;HKN<4P37)S{8_f)hf)E0Ava z31V$wiy9eVh;oi82pK|CAYNra0YC$5^K*&{&mdT0cPRuD&c)JH-ok<+mj~f@=0Ie1 z#~c=UDMH7U4JGwC46Y^68lbkx|H-k<@X;Ewqo+5uk3>9C#8? zllw56l=p;IYbCKCR0i4qywWY%dCLZy7AQwUty>=L8eqm?E+Tx2g7-g5rU7}?B!gCS zNdVa4<7fr?O>a;c`gB@>w6_GvZSy?Xo0AvYl(-T2=$KXkYaaNK|*^BU(S9bU|n@jA6^s(L_hAfocV=Hx((Q!D8VSx}&on zr>Dh!pw;~v8wMhbbjD9uZWno%*uOy!xZ1O)W%P6;;qrVYiWlCo)5F4)7>u(?T1x!^ zm+s7?b>}3wZj8p2)|sNr;UaziQ_s46 zHxHFd^L#n#Ax5U-8y*)e?dSf9MCXNHk3Oip1(#q7>LM>768377G#q1Tro>%X zlnVuFD;|e==xLTDX`(`p(r20K_-So?T3K?El250lyQ0wf#4ZSDUreEwUWgb20w!n% zZ|{L~3LgX?p`J+_hgdeS%O&6CdmxGi&jAk>UV5n^9uE&T1xW~j0{>6U`!tiIejyAoTVde9!uQY}G^z7|spJQnzq$mx!X9-EOH|D}x;j=VpGMu1GdOyaN|=$@)W` z`#C|Q1~=FpBi9e6j2B0O7e{{UA6-FKKDgB$qM$gnZvgs09(x?AYQD*lAEeM8V8 zAsM_2fbry#rsn2W$gOCsYd)BNg6S8mS9y4J^tC9uMfYn8{E^m`o6u@vNzkv`fs+); z$sr@j06gFdU~bAl$>ML$ATkR4lJ5n4W~Xx??XyUzrFa&8oii|9F^6vxTl8`<6Nqp(BF} zS{69y{Pi)2ZP3Zn;%WoVH;LMBQU1ZC4#()8;l%u#A<)`c27w^z%p=k8Fd?Ig zGEPa`(9IU2u)F%T(Y@cxkXz+Ni(x+UB?TPSKhrp z>S+(+K5uvmwD}X11*ZwDGvFN#X3RjK)y{+{{XN3muT0=bMVhu2{@CP|g=XD$9m>9M z8lH|K5i@$!ZCC??+RFYS4v!PNE}O3lzMG;+q3lbm{n4ry7d_Gfd^H&|~*52+U(d_|{lyG$}YiY4>#j!w+au)Voib0J2 z?5^_tnq44<=&L%CeG(=bWXZe?itV`$r+sE{8Qt*Im;%hKl9{>ZXs( z?%D>CQ5$rl>V3N^^>A2<(cH+3GM*EQb!g})zjq`ke^Ua?104XXlnAqO5$ zOOT)z4oEJ^u-WhT*y1)@%Nc+CAzDznUcx4LVSnnjklq&R4ZIhOf4in=uQ?5~A3b-< z1(V;6_)dIK65XY#CjuNFvM(`>Zs`TDqpCN276!>BwCSA8JsgB49(PCX1101nas++g zR2N?fiur|-edW72`?^JZF_rAlcE3S*9&{re^4vowgd(7>Ma|1;i6*$x%6Ygv==yI! z12R8*mDu)FZwF+z-y_&0JctRs3i{BN9zue~QdP5%rvGpZTfL>ABRP+|UJLKKEV<8( z@8Yq+r`*FE^dNY0%^Tcuh*<*51@LI@xzHK{fSz*Hx_fhmE=jcwB1+qM(47e`J4kIkFIBGZ3h*xHLs7Y&Z;@*feUdhxwhsRZ~c zy?ihXk3O-k^U!yMOh?veyJM>b&0eYw$82a|n5$`zp+3uh3{%zCV$XS!;2@$NALID- z-HFB%q#LL=QGw<{^M_7PfhWdst@uIJUbVO%tc({!u0_Mlxq?f=XT{welrgUx;M_z)k#NU)Hc#MYJ1)H|83c!DI=;sw;c{W3*T52s@SQ}VRH87o#Cv%s_IT- zvaZv=^&7-D$jpnN#PAcr&`0ZgvdJ;P2WxnHTIu~Egc*Gz(5ZXBpZ0oH@M&VXy%)zk z*M0b=8FZ-jbv~U8O}!aRV;LmNHbcyc_zVD9tzZ|c=lSsMOW#T~TL1&2H>F82KTTUO zY=RU(OCo?h>j7&5A+`6Wc$JM6=u0Bsg11Hz66UQ|p#t1xo7MFb>yEtf7?@Z|bW@j0 zd!Q5Qx_tEmjJecy$elrmnn@Gkl5}EMg?A%#19qTtG?QKPPjI>kDE{YuWhnr`U=e|(O_v6reEiqb>Y;ER>@|6Y0iPi-MXtx?S{@Szkr7RU z4(&Vyjzk)v&JCPdWi^Azk71ck;Gzr!iY|1g>O;dv4~hW2)@A4ofYHrZ93Nq26jzj5 z?9PASC;9-8=@_B_%B;a)a2bf}lzEU6oxb_IyX2M5Vq4%xs4&^YLlBLM3hqDs+t8<5 z!U0dPtD()@lvs&Hv3+3g+jS8+E`f#@o&dYTx_ygx)J7seVwD9=K`V5S-thbyX9gDm z`a{yZG0=%26*tPz%)SXN9RntJxSnNsLDg({8;PgcaD?1_XU#LX_YyME+zS2Z%d3y$`5X*l6bf#6%g^7(#bRqM~}e$G>PJqqN3D zXwwP&{KeQOkkLB~FgjXQL{$Jpz^>61qK#&p`#@T?>-_YjFYM~<36b;`?;a>k?j1*c zeD>6Z!M^3Q^^1?8>Y)%gil`xu96=RWC>k7>`|mNLdT;gIorM=i4%;E$5XyVr3`bTW{Ha6vPck z(REO{s5Nphd0V@fVjEmO*Il={WTlWJ1f9JA4TE2Nti9{IzZ24qdh&`Pf&IwI3gvHNC4i# za~SbA4U1jsj1Y~lC4Z`=2`KDtPv_-y)`iw}ztG(H+I8zBAhF@=?u*+rgLf=POcY9R zZ42|+Z#Uk)>33;QUMNhVXr^d`;W6SxeY^0*+jO&?1;rsCPeE^**n;Pk@>9iLxVWx*nF z*$)s&S?B4X#T9>h8QMizJ~hCBz4^CZEwQ?rEwNv`sbIatriW&%WD$ZRl&Z(qHNi&Z ze2OXF4W$WyRwN6hO~W%WoPQwh)w5ANKyw!s2=dJZU~G4%Ler!WS2Gd6egLaiKNIJk z!zK`wkOSbl1q6zqjjoSK+JT-wBqAy-&l5|a*#YeWOJOO?x#_>jT$>T*`7=9Mq{3GN}>P3G6$ z!Bkg#V5f#mW zQ!g(Jy|=VBoBJ^PYzCd{BEQLUaHi`UGVz{hOtUAEZcFP0DD&f-6Pf<`;9Pq5;9+O{ z!s;~S;c>&@?TJWUMO7g`-@R)i1*y0Lp3%(GGPy%YfPW!qL zzlN_CIps!lI8Mi3bi*Cgx%K0d-5dk~q8sBy%i-Cp1@&_8!cYnWOO^iDL&b?Sp!d+o zN)Ql`f*?bssj5yfmc%>1&U7-^X&W3}+rY9@%v~@ZL9!@A5u`eI%B4NH67Rg!+|3Y< z!8iPaFD4ZBvKC=Q_~j(wbNar~?{B9dfDx>yx&=~zz%iQ&^rQ%QalkpdWC3_NUtaua zwbIk$V;qo(Aot}EW8y;}$5v)IMBwQz23F@Y!W(Gcu~9#V1vY2|T?75r)3JA;BHIzY zTu<-eNQ`abcwi{B+Og2qP$4dtv)_=j_2BtcOcLU0tsf~ z=}^w&t~OM+*rD&YV~^@t^h5ds zHZcP@L%LZ%cN*!%fti%@fB5rCadJU->LAJ%gl2;QKtWpD5G2WFA=mhR?K^zmkKeMt z`{H`v8%MRgkUw}_+*zhLMy#E|4j|Ff0L^1q2N@Lg9N6B?z+Nx}*|wP|3yt#-fW;C3 z#K1~Jwb*!5ghZ2aA?Dc$~6n2 z?gLLB9`FHlXzT6*FZ~?$rIWw_BGMBGk?E zFr;d%+YZH|7z{X10mDcF=iv6KML_71Ae;G9j!xu#J^_Kx)o9U$F;EuC06qyRD=R;R z1~D1hr;QLX6+HB{=2iOGO%^BDOxMq73vhP?09PjKHb;I-w*?VpqwC^);6i69=xUy7 zeyJb4liWMmK3p5MIPUa*sc`q_wB+DwaRqcLP}BHnX`Jz<&kXvPt z?(6e=6*0LQ$*BGG?OeZe>;Y!Wem}U{O5EXsPJ*JUp`|Fe9%Wu3Z5gZqxE>#Vpe?8H zG=mrpbLqM8QNwn|Dn#K#PR{J?>DXe!;7jy@U9UA-KzA zbJov8CYK|3HaU+ZDS6!^39vu=4=#JRKkIS~AQct{~?k9`l zSdbhWxhF7gux4-p*{Wo|v;w=ZEA2C!Kv{Ib9Y;%uR%@yV(gI=q$+U0#<&XD#KI43J zqJ3D5eI8sh=7ypbSAgUhxbgef_RzlG?*126q24ZsgtSSH;|kpjx^TT1lQnz1vsy^RHBqNoxJ4wCs<($4uC=d>*r3I zS|7$rwaR?0Ptezu3Ff+KDt}KnI07yq(p`2D*m=eDN}_<>Q55cobz%*y=i*{QL{}RI zL)-0=W#8mZ`97YVEZn(z;Jd59k*Yosp#CR$2u{^>rv0XmtDFupfF9J11DO-o=*B!Q z6K;yZW<2>b!oq(^7op}t6Y(#wpUMP$01j`u{;&9plokQc{`{Y2$ZKR&4j91=;fyB} zT^LSv^ixoBe3J4rDmdfDX+dnGUmAy?eo8WhMFL*ECDzNL%8Jg z1elU7A|mP`)ZYQc?1EfoeWOyzDx2WI+W8eOe$WyGXSu+pk@IkX?$76hkGd1t`#(J_ z9`-P&Lr?7C<1+IC1PpoKCO_D72q?;@YRx4;tZnB%mzz_BGvng0Xgf=uZuDMAF%n1B z4!~p!5Mp`3k#{`_(sf!POVAt60-xXT8qr>tLoivs(m?Up#3i_v;-WXd(dUE!?6Iz@ z;*;_`d8J6h3obTO7;zj5(d-LJrJSC|^=S4r1B9OCCrprX$aS|DPchzo_lN>A5%*@m zC}su&2Ez7_j=hyY(msG!U7;hGzCoFWPP397KH4)uYhQ3hgE6MIcpvaa3qnFz&VJ#0 zAyXGXwqyp1uM()dErX~I-Mo_zVSbPjG@ivaF_VYV$BT-Ju%2uMgqa(TH!h$EvsENJ zP$Na|fB5ta$!F4`633@Il>WmJZwDv4C|YMr;Kkz?(CV=#Cn&eEm=TzltX#kxDG%?> z;r7wpHNG-H$2i^&K?DitgzW~r047*VvTCo@$8>_E)^lpPc;16dlQ*u|_eg|w{pE)^ zym(br6=v23-c9pWNO+ioSb-L3QMER90J@BpmeLxJf`14}Ls6s?0014zf;A0E`H1ZT ze=eeE<{*OK9J-Ve({wSB)?psF#}32sP~{l?rdvsztUR}(iLDY7EDGE?6NQXoF!T-I zZ>{37W*^W~a%DBp#uFLmtJmY%94oF@>~_Fb?6SnoA>`ALhdTP~5QnQirqm+Fb11q# zdZGW+%8goikAp^1mYN3)eJ^zuI2`gLDgMkP2^`Lv!$?qClDrs|Y#k1eiK8 zn%O%!bYSTRUF85beA?fU7Ubsgfx~OabSPs!HEh$PVOK?%F3B@iG#SPjsB3YX(yU=E zJMX#C(SuO#Qkun;>*5lz`esb84(kz>U}{3arw-w#L*myl*K56W9ca8?|EB+8JCW%P zz?zO?G?h;C2S%=qu{2=t%dcDs7rHeD44QM7x%i}oJ9sXRLh|k+#=TX+Bu1HZc4F(s zO+FaP(a|EKGbMoZz=zH6bIe&69c_kKIIt2%jn&2O;K0_HQbf8_cpEg;)C8oJe)FmI zDN81>!%AbfP$qaC`uJT_Z$Ds`Alnjb^5j8RO#8D+|*Ihce7U!q>-L zwxCz&jFz^0*QLatl7pP`)sf-==z;{F;5a>eV_w zU4;v6;IJ+Sa$LNd1+gI#E+9e0bXl$tv@_@r(G49CmH$Xu7sx{(h0{gIpf-mHhfNcJ zKp|9c>|*5i^Mkc;%*g2>zwVCU@vN7R{r?t-!%#6qCkr4A47mk6q<{&{0Hs`WI4re& z@;=J%{*HY>OnXDqh~&9wv**i!1)cw9dKiWsXo-M>?}2t#0#=>a{p4;;131nC;!8=8 zI>9jJ0L9gf0u+$C;;%6WC7vQ{LG1pt|Cm6iJ5iX)V!)<)lmBi&_+2n=aQEil@9ZbL_p(j}$j0{a!fiT|#KTtQBNTgRAt7&Z_$_`QL} z^J$<~eFoV7Fnq9R*$3Lyh0;}!`53B$lNd=uV>eh_%i@>}4Iwb4{U2bvuYUx8rCk4Q zk4Dyeps_})RB%ZkjWq-IxE@G#WbX+^yMod1fjj*lEHK6u!rw~b??OOaf7L^f{#@Jo z>B9#Q3*Ma?GXC?8OaOLdXh<{$Kd<1--s3C3e`K35?`8S@{{$DBCp-W#aRyTSfb|$T zZqUMx7L8*gNpm?!CItD>io26WkM@E5=?**fE9eQ(7WwR2X&^WVJEU*J_a=LURecQW zLG9YmWdlo_(7E{j7myJ{Z>;u6KKj$~lv~igix3jhp&|VgTF7m9ZsZR}D8+RQtKqR7 z+bB~@D~|BjH+;VbfrAtXWZe-qSF+Lg&Ejqp&8v$1i4DKK}k$9e>VKq`-%MWnAE2I|F8No!428W8F6 zR%rr_1|ZkU?&@+5mL9Mt1`P}hc+HP@+}}Q*+%LLM|2e|Ag03f^ss(?}lH7Ylg>Hj< zwG+fXqHRw%z=8}8d2LX_5;lP#VQo`9QD;q~Ou|Zx(;5SP=T4m7HHciciDj*lgS+n> zOhTpm@k(cM&I7#n99$DwId7WpEq-?dxbsu?6Td(I3Sc4@ytpUNR;=u}?v=x$XtdgB z!}fqnr{9saUVtQXm*sPCi6i7L_R?F_(*Q*6P|gu+m#(zP+NjJPb8`C|y?VI%GEsbl zp8?qyVRmc=TM7uVG1?uc_A|QeBY(t)l{{3{_=2m4gmZt_fT+JP_Pj7GiMC}fukCrN zlw9^t6RY&nfzM-1mVGDj4U-$O@46+w#dMsA&LV=ce|3|&*zgeG?WSY7%i4xB`unFdx!K!_1T!Qq#&7mKynUuvTvgP@2 z!P0)>m%FBBm6t2J)@Y39N$#}Y^Ma|Zl@bK#8wGrv(D0S$vT1&x?frQYGDm^e_RG zFcBlOOV*-i8FU>yE+gd*J1gDRO33IQa>g*835c%?F;iF^lU)|}m#XLAm56CuF*R#R zWMkWmTl}X?Ry9LpBCg9Zjj6~b3rmn?xOf=d_)Kcd4HlfuA=|e#ey%Ielo`YhdX;1v zJ85w^9AzDV#et$vk^xx#hK?Z;g|#}^lU^W2Bzuw64p=h@E7780`N*+3EkN`LBz^7l`R5N#PdX!R@X zii66%Q{%#m)wzrsfO?9T4-9%%Ybi|O2 z)Ai9Wz1mU*Xn_p0jL)FvbbJTUsB9kXX&>f%+5+S&O3!JKzTmxVL20pJ%MKG zMaw7>JOs_t=$l6%5bPX^&H$)^ku3sBCySt?cS@BvpRSH^6lx~m4&D1==HPL>S3@8VwVIX1+RjKH` zK+bC2`jLO({iEf}^|mvnexRj!xUziw9x!`^o6*ZZucAx^9uck4W%nZz%(^)8&_L0r zoBMRim8_SW%E6*gEwtRr3UX+f8lC|fHH-1n!FkyFw1~D)a;4LywbAPt*&RW-D zZ%_O6zP@nc+kH@EhTr^zbiE-en~13$9}b6`>)N0C9OZSJNSm>-Nb^URt-)@MJ&nsP z`;Mg6AY>cAC<~3<5x&o`-Bd}ELoHQQIQ03a+95=og1z}HkbW{wl5Q(#Km~Yz$2w}2 zTf%vm@r+P}h1$~#u*9qsmi5s-UAKpg9Ycvy1vX^~!3{Z+Xv=-*SIul3%FavK?3rhv z0rRm1piV_@u-m@rrHA$$Lz5y<`!FyCZ5u&BCh;PH6%E=cUF$zm$3YdO zTnahD5uiUtVmIV}(psRO*y`~Z8#Y>a3adLhl9hGrZQjLJ8F8jB>~;WsbZk@nZYt<$ zdc?fNpVe>*SbLU+>V*8pd#BI*gw=Z!RlB+5JZcGz3i#?@#?J?tkA$GS8$Sh5TjxGl z_IrHq%t|tpE150Zx|0icVF&Locixvf|M|YAta|qWl1d`YjIS0vK1XV`&~lca4v;>h zb3LUla4=p28odyo-*CD6Jb<`0pGeTES`A0R;sl@L0GSB)^i&@zuF(doOp2egGu%eq zy}++Ge}@d;Qa_N61^IHe>tFi7+EP)Hr2^{#_k(EC?D>TfEtry>@&LylQ;UYL(%~W4 zWv)lSyJI2uhn~v{r((kCb#PQ2Jk#Lqz zNa!$3#xJ)+SDU$~;*oCnXYk=w{386mnt8R4>r->^c)K!4Kd;{wLW>5xwZL_q;#a|m z)>5|EgEMFlD4>cu?tLq|P-C1{q#!py-CZT)aIV|?V2SDOE@)4@d0RSgey3AYoq9lH zRk}^6;!($i6F;N*&wu*8tifT{ps)1u;@X@>1MWt4Azs|HM!#coP&HY?Fn7Q7wSfM| zn7a>m^jEA4eU1G0z4)&>6puDsg*}ODSm~q`vvwIBpyU=Z*Yp0A8lY0FSQ}kq<3#yO zh6==IPUqNz=D1z9rqSUhZ2B2)7ps>v4LGv}T!o*sJ)b?zTLuS|to1AKjke>dVVhr> zsU*`C8jrQsuTsvlT+&b2S`M^C|&sF~Vo2C^aDi1dA^|r*2%Uhu~ zPUpH5f9*fsmZRJ~SNMsST^I?~56@s8l_u*dL}Tctn5GiJw}kTL!lmWKNG>j>uiVNsv`p%FSK5 zDgGZ(QHrWyDabpmp6DMmG&1@W+8AdEm_TR(?qkrsg3VSezk^l09~M4N-Uk>Sc0~&R zRPd$cp~r38miC<=Gz>bQQ*voV;G4R2Uxi=)z3IeVLw>wMda*QF_6#VZ>?7UB&1;WS zx3TP>en{|KlZIb|mMKu$c)b0s3YG)4Sl(Vp9X@)HeN^VruV${mjawdk%LqS^Tu_iK zC>Gi9KJxs>V@;U{zYq7n6WL^an;7J~cEF=8@pH*}msXDA}UD($H z>!kXRM4Q(fvYpr+uj4cy(j+eWkfm3h5*7L6Q-3YUlENe1Vih$d`hGPWnI0IUs|DTt zv?o&Q{U_V1<{CIq*?Zvw&J3SU*rpd7T@FLJb#K(79~wF9zh^jh*4807_kI1xLwhg_ z9`Pp3xA!O+#@4CkSZ#c-a>sAwZk0LVD{Q~GIw6_btMdGshZQzKAUULO*n+3bwQyU^?Tl%+|yzjMR``uVB_lhL=YYDM1-vofuQSS#;`km_r zGr!{XA8I{<&Jmw}9d7-6MBr|Ej2Iq9rNPT+-5FR=t{n`l<=?{})ikTcv~w4yzvpSMik@;EUO76(hx`uk(x^^oJudl%E-PU#*t+I-M zs$|YIQ5WXstL)$;6?#)8VCr!G`-}csmp9Iez))LhmHb&j;!bjHx}gwM>+pS<#F6T4 za}V=Bk}=VI?75d5xjxWw<$_X708YdU-|tdI&%pxdMvVA?aSB^THd+V22m!y*v+4zY?kG4)Q2#cv5613ZI!Z= zi5SjyYM0dJ-j?$@;ogx4T!qP@#l*8%YW&DIXsk<{DT7O)$wtbb2anQm#8JUVIKjDh zdXp7*=;xK$Psjf-IR0Vgp+X6yZ+%P`{(QbpbRcW_0R2#hwf210>BJM;n%{lBuqhsU zP_)#qDUB=sKHW$^?S@rCN8^kPKSXZe)kO$`r={+Q8Dm3b1I`aH>n`xo;FhOw+?6hn z-wq4g_e~w0Z2rXaF*m_@AE<|ZRZ)9HR+Ma(+3hS|=5tQ!>sL-9XZu%aiqiy5WQN79 zettnmi*v@ezk*ZmHJ(^Yc2{3Vcq#PphG@(>gKaquF|&RjKRmvXbn5K+&6suN$C~)D z6hC4ubk1S(Y3gmS8-6B-UZI<4OT4O^*(G)NxKs?yCeLZ6J$z9+hDd8{d2K>T*l@t1 ze+jwQx_Q!Lq73EJ%G^!I{8{76`V*I8M!CYJU_%o<6{0Tru6ao zfLZARwb|ea-H(=f(Q4sMAm}j=rZ}8*y~*D4GiV{WSF( zG{RCjBWFVBc}?36Xp*mG$-BIr$5*hOXTst#3RovfHGT)})hg%ujE`R;l4sbN>*p}` z_my`g^eJe!7q*&@`NyZ&Q)@d{I13)m>hfJ1vK9)Q_S{b3nu$K$=Ud;!+iJSJ6vEaL zb#}in=Fp7l*`b^>oPk?;cUlG4XgqE41vcvX0log@7nYsk?w6NuXWZ!2pXK=JT!BkJCkk$+sx8VZ! znu2Un)qi9wZaW`)nIJ)`6^0iy#JbC1C@r!gtubb76devNi3FJjOOph?ul#Nb9ZDN> zWno&iV^NVqHd*R3sTdFZ?;=x6D;oc4{g z&H6)sUHz-X=SjJ|7`swDN$h@l@?XyIFjA0&ti1WVJ0yV}25-JM$-ay%CaM|@eV^6yb}|Sc!~cVqyO6Q{^nwcAuFq4kT{?%e`fQ4u{3{2bv{Ja6$yP{|2sy5 zVwQ0D$L1S1sYYzMG~`JEodj=)nv5a<8mIZXA2OBY0ibcsi<3~t4iLnG@>%^G9My3rQ}2vgXj6;DwJ?hbi#iP!Kry(pDXOf4+|yv^0{9 zIQ>`kL}1qeNMe=TA>qGc(H9MZgI9>fNc|AjL3cfQ~JDl zEP(ab`+4e5nerZA`rhO3(}lLUr_8lAigk_Nh|vD13M00RV|mV{6d+lHlN4vbqA~~L z5=xI2gE{IPOc}0U=60)Z1$(R$04@&q7Ic9A7<7IwnhRMKjwlU6u(_#8S`+4-@n?^~ zomc8~vp|;fGv{`+L48UWA}j_#*SpyiELSDa&HB{c-5T5j`(g@zu?C00=m$zDap<^h zLoVr5@00TXeeLV0zyC+Tdn93WJ_q(yY2G&g+HFC*Do;r^E7oI^S2h3E82D6FP#>5A zmZbvdX?VmnG`efSZ`J+%|LW=5t#}G@+>|$LSDvMH*M402Y);0B6m&r6( zOGtyQXz7r0-PW|pamiM0p;;wLE-4c!!Z2j`*RGLDa?_=BZufNRuK9~3Rz2bW8 zC8z^O)}pW?*Kxh3KzMO?Z^%GZJ|F5WHdIPXu*Vsbg-86=etI#yr9ruu0U0`f-=7w2 zP`^wlo6<2Nmn}j7y9qe>UMOffC5TtdKql9{JERjBmbxpW-w)!G)fm8cI$#tUhPDkC zzRkPPanrn>Gvcwf|F#>Zx(&z`3|0{ zZqR#Qvb_>@RhpF|@jf$%XgG~-G=f3I6hKs!69b|aJ|fz74 z`NA{EWP%InF45)F=1ab5LSj zx{P#jvh=3LSNFFviTg1jV(}?stXeK&81e)*EHZ<20Jp}CMN~Dl^R@4OR9Ao|bIy5Y zNkFxr<%Z@#Qan#A>y7W8&?b7C4!x}+adaz$ERA9q8j#WQW%uDKeSKjjT7V5>4+8D~ z(thm1Yv7u*)74ERfLIu_Ej@`xdH_~TkvPE>+gKseg`^n?Uk*zyewgPdDeSTGw_clq zidYf_TYC%a0M8X`pK@Vz#bN||CvsOzG3ws#prLQD>r&CJwl<47{Oa-0RjukHVFG?t zEtq~k-W20mbutk=L-fEw2{37;x=lQpq>7s-GwFmh3Ax%SFVKIx>-V4c_Z~%@T12H(Ar-^1rWx0h24RG zuk%>N1VUWeAqNqf8i5RR(cVLQWJ-#FiO7(IDPec&W32xLYdE#~k0|LzoE%0w<4eBH z>*r@qwN)+Wh#yxR4!>USLZ`GO%Q9bxo2>#k70Y}-6j{bD=j&0|QkmwOL=b|oAh(8W zO-sKmA+&oFxpHaMrsz7TetoTE?%=e5B;mCRuWsI#saWwddqf<35JW`wB)bF*lR(kHd zr{0cl*g^LN2;55T)RwVGffVR7uf#H>u}~i$70ATCZR|+#k$UG|=!z4jFU`rAHG!KU zGraC@f4k2+i^7tKICDe;8DR}X?zI_XByEr9`N6c$nTIulE;th&8>Xr(YkE}AG6)Z? zoH%*qn#B||OTCwK2;Ogv(vVHN`nbX+LU8TjWTmFxt3%_TeM1=ve6c~kzqFLW-B*8# zcaAFwfkFgwXZGW1RGpju9QRbniV`ViW z0?9ptTbPE)Ya@x>JMn=10T_{?+Z$YzOu)Ap#6SCd6sG^1fo!<_ zB2Y%N?(aan&;GDAkVB>QAu3kpzc`no)wZ4u-yp~a*kLP|vkwTi+^?>=rngg%ACD3x zic~IdFfnn@*SwRp2m{T|7;#ZQB#Gi6jmC}x19J$y&;TPCm6ON`vz_hF5=B^DZHT%K z9O@66i*ixgCY?-$mL`V}L&ZTPrcaA(z=ROO7Q-|c+sM)0r8bgQ39r~*htA345(yYnR5`czI#;WUf zbz5I5a~FFHQKnr#|ErraZ()Ct Ol5(_nwX52}O8Ou06wbc@ literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/docs/extend/images/authz_allow.png b/vendor/github.com/moby/moby/docs/extend/images/authz_allow.png new file mode 100644 index 0000000000000000000000000000000000000000..f42108040bbbf9facb9fff0c320428323f061e10 GIT binary patch literal 33505 zcmdSB1yEIg6hBBjN;;%dQfW}S1ZgBh8UaD1rBg{Mr4a;_loDxCN>UIMkWxZGLZm?& zY4%+8xBK7!?9S}Y&d%)4ym98ryZ65PjdRW?&V8hn=q!g+ci8JxciMc^7!KkZ>@vJuSY|M72eJ0*RBf^rxl2wv9?-oiI(lUN6 zFTW}e7?N}zG8p>)$&@ceomwRb_4h-s%3R0l@IfsB4KM!BhYN$k&kUDM4g>j2Tfh;H zC||MCpF=t1ZlOD;t;3&{XH}UwaPy)c z8F$#SHy>}Q*9Dx^o2<#69lQG;Z+3qFK~_~|J8;3M>4C4MCgG9zOY@PkTYBQ(s%(fI zd{4!$4aQdJC#H>uqm_$~U)H-unBT8V;e7}Hxr~4PB?0TMq(oj5kvgBv7d;u021+rs z>~>!YVoC%_c}jH)HAA%CJk(o_6v27QB#zNAf1z_slYPL`xp4U0`{6HzTHRxB9foKK z=|qwivI5PV%p3jmP7nG5JGzTZYVLJ3>{VKIPQ==Lex}Dze6uL=aYXm&+YvM2L8+tF zs^!|6gN^&Ye|^fn`Y=x=;le9-ojNz`nk5t~IjJ?3o zd3weAucUs$)B9aCYpBgreWGMPTv9aX`@GP!&MjX@w()N3Hl3Zv{`_<2uV1gVhG4OK zE`LpARVFIZEsX6+6ISNb={#}WxEtZ>MzTBPz}PQ-v^!!xH)LR*EFX66Vy4Aqs}2J9 z>VO)|qLe8DGO-6as|I>ZH39bS|!itj*|`F#IXAkVzVyWC6cZ?&Caw%Y;ATj zsSmx5OzTOK-oHM|o^BGi9-cRKuXCHbu|}X3VtVv)>6}}T5L?W@{#If z(FotI^;#RVUKO?NXEw`t=e1@}fA+iiY|Tqb{VC6~;fKfP^&WgFxE}MSp5Vdyw<=|Z z^`YVv*M=MUY8}<$Yp<=lC%SwN=2999+QjF&Q@n3A_zb8_y|z|Q7IB~NDbq+3qFL}< z8NQzHyE|kMBj)tw@p!!pg7rBp&2*6+%F(#dTz!igB{D^8XQvk30Ps<&+?rWfmv zT>t*A=CRgOp0pL18l{FB!i?dGGC#VJ?%z{^&gou9drk&1v{#~qJS9#DB|XLb58tdL zC^K$8tJcnuFN3AIlvidk&Yg;kS2-d68-EYK0Lh3TvBFCwkJ5rDLp_!T>&ncV%ZQTs zZjJ4?N_nlOiq~;d=S;oxE-$v}%j}4oJX&vFH?F!Hv(gbmcS-9+A%dxAXX(qkjyR_E z!@Y6mTY(4N=F#rYM4j&?$=VIFvC1gN(w7Au&oY&1$)0@GD>c5tI+<>EHp&v1r)=!u zDB?7hx3$Bew9uR3&TClFvSxLf5V$j-OsILqVff~pJoVkr&*sLO{NFjWN0Nr~*5@XX z;5d7=v{Q$?+sk~j_3KguQ-Hv#5(7!V!5kGU z(c0n87cIJ>i+Zn;B*Ir(&1?O3UvVowrnux3LBwFXSn=YanD5pdDha%gR1D}0lfDZK zxjhn|%e74qIff?d+k#_o+p@{>*Uwj!S#SKE>CpX@w+xo@yLjw_YNv618wXtJ zADDLK7Gvy!BQgZ|1^&^GUD^auTsyILU1xfCI%63+hz()=MPo8O*a#+#W8!wlRa$hN ze5sQg-LQdaM|Af#?Pc3|*Tu=k`+Oc!8<@DahpwK*&no+!ot{XH?ZU{@u^OhkPq(0Z z_8eRNhAl+5v>VPZIM}%vqa(i6&gn2*QkE5X`c3S3HcqzwZeQlw){ev?>8)?Ctz+Jt zitWE<9X)*J8*qBCp!AKHN#?T83#$}u+yb*E$KU6f95Z0$7L2YCO(b2^joy=cey1mO z!1L_XpGqRHKBFi1fwSk8!Z4pQlN!xi3M%jJ|Gq~hfiWM?Z!VK8Dy0w6Q<#X?=ym&0 z#chQVhw{b(DKsiRQx|rvOw)X)siwvRBD(rE{MHd))VNve8+FU_6N~ZA(!_~MY4qL! z$vfvdObHutiCyOv?J^|2MG~(+7a`1$vXV8Wd+RVF^19=NT;n;$np2i^ad#zn0Gqcl zZ&KZd(cNc#h=PbxJ9h}?kEP=d#-=K*B0Sb7Z!u_QO4apbN}H%X<*{Sq*DZ+lSRK`Q zb+hQg_CjAulmAf{)9D6|c))S3nmarqt5XG?YU#RFob+9aN4^f9$=yHWg}DrI3wpkn zAP$Y86FoQV>NxgBb?t03E|B8Qr_awt=iaS1J%Qj#Gh{#6P*=I;gz0`7Cv*5x;*IBu zzFmKIu!TQfQ{~gPp2gK~RUyO}^V`BUy^eRjy->tAg6$JcFMi|m_co%Q6V~aynr5HIj|<1)Ci`vSV_RZO*tFRQxMrAXbU@*JNX{{_qMePtrDJ z#Way@z&_~?K_){`9xh#g?Pqk$xa>eNe)A?r`i={neHO9r+~yuH+c60>z@{sABJ5+0sLQS*uS(xNu;6 z=9PivGHQTP(yL^m)>T2;MHOFq3_EJPqJo^^ozr;r#ch@!=ehO2>TMpz#Pqdf$j+jyrz9lD+@x}N`vdhSbjvXV*gS($mmt7n z?oKl_GOc>czettzzQ?LQadDbPGeg4q+HJ)*HZ5vPa+vK5vRi&^usS7uw?B)1*Hl)< zdrzDYp^VNwQAx|B5k<-SYLMe5XAOb0SE8}GyQJ@(HE+@MT{Vr2<>6B9G$HF7B3A5; zdr!X>8)z+$l$ZYybsS~(YftaDH8Wt$Oml8N(cfDeFZ?wsr!1lnPOvO+?JPgVjLh^4 z)II}+A7Ln>ogA4Nls3m!^48ibYiq^k@Dw_`wlTGWY*b!0CJz3a<32x^sY#UBiyH5v z4^@+^)n_Z_XRq#2ZE0M-^O5Arix0flsJ~4sYn@SF;8kkOS4+N8xc058liL6JwqR5S zW5)A1ocat-Eyj!bC8_k%em7&L1?mTWX?lniXlB^o!E)UoxGO0Y=lg4Rn5%Y;bi;yF zm^;G@M#XLGr|R3IF7EP=6}Q`VSUer*yn19O=eICt;=dl!cA)A=9UDVF$q0Iec>jb{ zq+fHh_t_Qo^AY3IYAJlYB=@COW>(6ZMara|FD%>5<|srY6*K8x?UVkU-mp7tviy*U zu>duFrDZLDQTx3@yr{F;?oDM=a-WXSUn+b?udiMIp6%pXoLT4ieY4*>m*A&Sjy%?G ze1S!lRzi}Op!xkVM&~Km0L}*jy-g5E_nhAOJT~%8cVa zEyPu_oM>uf2&i@Xf3JU-C+TKOCor-}tutG%!ZlIv zsqSkLPt+HmDrEgQiAzVGh)KrLV^SbFZ=5>!h*$#nHh9644EIOCy zD+!cKh)mT)TlvjNG=SE6z~8 zd6tLytLtMjcgjdZr9IO~Fj4CpgNn7pR0k$6_5yzo|9z)GjPzV@Ih*SSt*eg#BQ(aw z{*KXkY{nL$*Y1+ih8cvVp2~DROQzAnj5wE){&)2v4)>rS?fXSs_irwB&y+bg&+U?< z8;I#QS%pa|X&TaFQE_GJhYJaq$C3uHVJIcKeE(r|U=c#K*OCb}0NuR@CAcG63>pLx zwqHd@{jj{?rjOFinuWz6-GN+ubRqO6?xMbOYl zI&#%gzII%3JSlZ=U}0L_}3y`&iFiyC%708ZdKcj z24t-P_X)YrYwYqJESf0blPRFBhjku}E0Fp~hlW@>E|7Mu2FH{*s+^lyQ88K>qC94PBvBWm+m+x+UExs zNYH^wOnQ4Z02fzoagDYsnY*wI-;8e0@oGvpJ493bw9N)a`EVf);d8sY)K5qhL)~{O z$F@pD|6q7I4H&h~Yr<8Df2XZdEl8fJn77aG$sd5gP^=eSbGMmty`@$hu1fFg-g!s~hjUoFIQbmU^(?UwWHX^d8TrMD`~D?OyIfx%}$H zeWAB5AHTB~0?wV;j+mK`&wNi)S~h0d^zOfi+2!=z`kBb0fPGQtMd&f0fh8=yuPD7?G6Eh8&-H6VG!IryE(U%CR4zmYV3DA;Lq+x7=Vo zB$nBiJB51mV{V+xtI(59robeK)5aTd!BGv6>0Ud26I%LG7}r;SJyXi4H&dFs&V6Ao zje`QGzv}l6U+4w%`I(HCsnb2tIq6^=>+Si^Ti?B>J`EKc=oM}NoJ@g<=>mMynAKyb zOeTjzY&lwPF1y<-27s1rM3~}igema2(zW!5-<80zH+H$#nr;@|7|c~B7QO#@M6CA! z2!*5?``zWCLvk&rv!q8L+O}18J3ki-MG@%PTGiNZ(=mwZo@9pFMVcmpW@O@ zm8QdG?zs+xDConN0XUNgB}4H2l-6&k>7hqaQBO`!j=D=tYQ7TZR5|pFrQ!9Y3Mv3Z zF9>D)avzXS-4^QE7ehNbhOfU@!vh&d&ir)}MRxEBBm zOfM5QP$DoParT$c%k)jjH@I>d3bO)?`ycHR0JCT-hw=MxdHVf+?c_1Pszd!Y%4mbGN+^T)5@xJ#^lOgAgs=L>X zV6TR>_hrfQBc5a1romcZ zlJe31w)^vm*7BVn2lu;H2IQ=Y3A=F{DR1=LKf*VBfHlyYCEMJ5r$g{qSmEWs)t0!W z;_{|fdxABm?c`dxr|6&2-J4b7)}d}ZS3oX6HBn+D{0*oEHDJgLfH9033y*Kniy*>K z@W7ppIOlXQ?%Yh{6pE#nx*SZkSX7vKV~t3sV8bNZjb|;)7Tp!uT{gP z;nL?VjgKAj?tQO!JKlG_f)(aK$Y)r>&|<3*fv|?jm-Ox&?{D6CW2>|Dt*S7I@0Kx# zW;$2kej5`?_WeA`$18T4O!(u}pIJQ?`>*;dTqB*f*L1}Fc}B}n4`uy1N_5K;k}}T7 zpUtPc{8=9?{m^yQUEbvi7vkW_Xl=Mnw*+Hes+J4JFpI^e3R}S+SqJEvv_P@j@rH_$ z)bEthuRCodSM(BPoIATvq=hy^eLM^}!N4sbLkTZ^K_-2~e54Vw8*V&tlfhQ~GF=Bl&HIsho`9;QWU#;|J zNM=Ud!n-vKEC1$*8?4A%6*R3?nwdxSB+j0WL$8uw;~XppNeq?UGWj42b*jYJ+`;`< z;;CnW@uTC;c?qgyBJW*K*jnkwGwstzrIl1&%6~jf)W#}t{E*bb8YgwZW&IhZk`eVL z5L0q7lb<`MuL$7OE|5$Q+Ej3$H-;P>?yhvnX?(WuFnDXPC;CvZUO8xmo~3dGS7~!z zyV6qeZIn;#LrcK+yU}6s zL0U?9;}x?-qZ05$H}5YGoSyaKey7xLYi+L?Y#k88!KywCgGF%2QsH6i{-T z*!U_Q0816}1gAF~6>6eSCF2E%IRq9z0lPhItW-#%tEEaki0Bn2_LpArDf!fLi0cKGa@n&@aZYG!W=499G?qbYi&siSU9mD?Dz#Vx`HOS_4 z%C;#p`LiKBW5dy}cLAT4%{crHVWICle~PoFjwV|`jc&CoX*x$;Iqmn}_M|-Uq}Xlg zDZjbdcg5fjN`|3%3VG$Eqmk2f^=9L_k{KH=g6*6^MIt^+0*CD(N_|1W7?g6BW7zn; zBs(`B@BLVGmy*<#kfE5Wtzp971sXD&awq(PR-iA+SowQvyTvCHIpxddO%EoC1baJB z@LN+?XvJKz5f-adBdohic)FAHch`>>9sc=vShe2--pk`)@cH!>F5P}BrG%T>_n}m~ z6?GDKzDHbxju9i(r2Wk~ec{}gnZxjpRt(stb@OQJL?Mj9VcWEVDaAG1H(zF#nMaG= zez@QndcTx(qn|5$AN6LOv6(!zZI58%=d@<{!S+ITs-V@)1+6$47Ry?8O&sSq%{@or z43Si4sXFrQrBrj}LLsug9Y|c%*K%TEea9KFdnCSz;GVpyH2N&ansT<)Gmdlew&wZY z54;GtC%Ls^<1lKl@unK@6B*v z>wG)yB*9;FHsTYq05SdLFjni)^hz3YI19@dJetdAt1e74VjiNe=qt$rLm#F!FrLy3 zVDe{1M~M6EjLkf4Q()hSi?T7aYNa=I_EG!Fne~9o#UXovn2J`v2_bV&g6Aq{DA#hGQFQ3cqgGs0lqk#UzE0=&^SHJ51l(HR zOV(~whS0b_p`L5?d=&95l}U=M3^ok=jJ2HdUb2Ci4)J>HV}H{rA2Z2-$R}?6wdFz? zBym_yw)cI`j!*jJZ8u5iT-w{5TsTohCj~S;EEPXgdxwbXz%f5SNT#hN%dBeQCsmb+w_$8FC zvZ;Bh1P10Y|BUx}{;#bf@&e7}Ij^%0LQ~{~q|liK73HSMeC;hCM-{oR_IQ1aXIU=8 zZ3vEU4M53%56J253%FKlBlt_ZbtfXTpr7(b8K;lu<-LYtU5WLGoXk1iB}+w&N9bRDTo z5ZMMk-g|`}Hfgq|=%5PnH?u0L0((&_@bJkY+eOw)M>?Q)YOd=JX#e9Ha5K@uWPI*Q z{YPy$Dg?tPdFb9M+=)2xvr>yTpYQ+5B#Xv!DZYef+X)j1R+qUV!t2?0<=n;AuSV#fLT>!vFCk zB5-cC-6dMJf4Y1td3HZeU$o6%4L?4}372gtU&PhbwYrFlFoqtzfuVcOO|eYwDTNS* zLIhE=_L=#%AFk1MyQ$JlEu$6Di%2$i_AM$JLW>k2kCXtx#a_NQG0MlG4orlTRq?m} zwegzng6q$FbYjP5_)&SWFXA{%^PmV7RTH zg@peb61hbfLWRC0@qfb~WR0u6$oqxja_B!U3Vwk+lV3qk!oOo4WWLi1vRVF{Z$FsN zeqrm_|K^JZ=BxWM7U8DiG-#A=Gw-p?BWSe#{JiN1SPR7I+Du)~t`zf_)(V(a`xKi^ zHTmm3DAq4!1J2$|7D|wW&{)-mO|WjEAMb;aX?{tsMuz8HaQEAVPq&x#OawU<25q*scI(^@%wuvnwHPk zyMYMp(Eqzc^jTrF45NO2Oi(yS$@5YM)asEcxysRCE?9Y!m1?sPxBu9kd>{RyX*$RJ=;V=w~n zgQXz_7Kqx>-kSCP#!SeA=@>b2i58P!D_pK|lT05yPP6;zpyGW3j`eG~x%u9&@5Q}# z-|OF+Hu&gBc&)ya5SK6y_{&kDfahbPE{xA#{#x|Jt+cJNOcp>op3&)~P{>9sov3Cq zPVgS3fPHV$#V3di!Q2b5`^F@yC&sFNrt$ZrZrK@PfT-$|;l{)!0KikG0!YIe$dk#} z9#QYWqR}Tc;3~(sh?_3yt%=BWCx^Sb3;f1!R34oHe) zDf#A0?W=x!s}}(cCIQEz0*DjqfYsu%d9gmPxe@Iq2)x^AoU4-HJJ_P7m+(*l%)~)* zW*bVxAc6ZqhW4is2VT5fEp8rxK}XMsWbpC9wmJaz38K3n<6E6%DI~2rWA*r4Y6laV zWrIiN&I7;U#3&nJ+M6!UYAE%x4H)7ie5QLl7+B{PZFj*~;9BFqO*FBEd%9JEm@k0a z@MW0v|9#7P1=MUVL>IL=pRfjiNgdc4MQ@D@H@NUwAQA-&kWEL*M7!jsU73HUp2WpM zG9!15vHFE>5E=`=Np*>7quSDFdf?f3slCerE1TA7R`(K!%^j|d@xA7m zh7WK}K;6nN!2I#!2iQpXI(R+D_*lOPI6HoOt@ub16`3MeYsD&(ta=sBoWUHG3>2()v#Un<9W=R&y`o6|)3Y>ZMdL&KSpw0!@o(_7d~_ zeUN;M75yWKnVy1wpm+(hcDphr2sh4j$Su zWUlpCx`E{Jo2ABjh(-r{;%4!dV4#fmFVnXwMBAreE783YDi(OSR2X81S#oGzX&GkB z8~5;Zs~tm;XIX(}kt>^?=Y5-qJVeQ#;H`N=%BuA8MCCYVR5?Vb8`4@ND);1U^CtTt{3+)2o)1YD_hsmm*F_>#8%qpf)a9ROVz>Exf(6F#I-its%2y zghTU9B^`#XE>h#|t2-t-$z%|Jr4koo!86J@cL`naWvG|idB}pUG##g|f!fyEa zGl?GtkP>7({q92Mk+ASrhE7g2%WM|QiIC_@;)Ef{x&y9}n{nQ``LDlee(ZW;P! zT~Iv7{Y>RKlDk?sAu4NKmXgDG!8m3G8j8lB|Gfvn-pfI7j7|_^K0(TCtL=~d%!>_z zmR0eNd}PQ-;PFD`s;G}hvOkU&mfBu~t??dC#}l@EOwHONen9QuqMw4I z*94_t1N-i72y6cmBY=KS>;)gGr-=Y2+*5xpWV2OCkvA#a{F9}RXL13mzqmF5Ioeejwbzv!a zORP*Y^HI2jBvl-V+}9$ZijdD{y#+AoV6(&7k||=Pp4=hP;y^AC1$ux<9*evk_B~Q=FlRMv_e#D7 zQ<}K@e00JI1o^MHhd^>DnF2kIgzi5YI(W_yD?@=snk9XfnClA)UgMiWcYC;Qz4J;2 zXIPOH4iSAk1S+*^$8UMS$O?h4sLO#U$N*73S6I!w54+S1Ola{jkoG(;mF0c!jaOf{A1MQy$DITaNxFbn z{4nSCNLDgNzV0fLU`3!9sPV=06ApYIiidE)zCRO`5#ji$mdruB$!j0iMG9b8)xi*u z+kh_7nkjw%4v;}x0AJMsyz+r2kPIx7dahDTo>`-xy(ZIznXc_6^LxU*i!o1`v7U9dTl1FppX3(w9qN#^$q>dp1vFv?i9Dq_F;vd+ zYE|FYk0Y#hE$<#;6Ou*0fRs?I)B%3NsLh?)nyCO2u!*rlV2_6F{DBz{}?6T?*{=(?Y6CrE0i-r~1*|~45yD4)g zCy@O|h;h*>_gST`dh~pyPnpZid!_st#?Hlkq>v`HnGB*EHxJrhjt6m}F7T|09@5Zt zycWgsze&h^U18c2{Dfg3)@ZN$oj@#2Cu7@gg<_)U)@Ytjf=qhA*~JdjdA^&Il}DVGscHJNl<<5KRg`pJtkbrrQ`?!w|gZurWiIb%~-%S-a&jnC+e7}4@N{5~8a&XXnA{6N%0lmgbGa;brtR?`OY z7R~Ltc`MBcB~Efe4&fk8kBNhr8H_0*850G26+& zRQq)k6C#EZVn;N-De8adG1r?V?q2XrVZ^f|l7Y|5gZ4OV{`2!AzCwDnI<^1l-h}OoKVSC~A=lXSff{!~TLXcYh`#bNG`ELc+ z>GYU)gHD9d07WLHb0N%`k*OJgd% zHDn;x(M;jXcmUbWK9o2`YNF22x?BM=*;iX@xBuWaEIJpN;5%iZ9O4Wu7JxVH&(~(m>oEj< zPtbGedK-G>TSS$-3@W82u zHc?Ir!!SEyXrBNANiPHv?*O{0a8iyS#W>`D*euRN$S;0_T6006M?Qe`$e=@|9|Eb1 z2C_`At@7}D3>5R04>cj?pUV6ZTvo00s)?hzT+}g0Mvgi-J$+W0nZa6$y|(jg*_GKu%Ccw94X#Z9(Bi0B#JU;TKsF%Db0np~c00 zSoV|TZFuy0ZtwQkK7uTQ`W8dHzZbE@yku8dfe^2s7T(Gy|w;FUialCeF^!i^` zC9Ou^^T=Bq0#hKyf+nctcee+|Aqe$e1J1tN(leQcn#L|1m8^0y;wE#Ug<9Z?ankN1 zYV^nc?b=n$P+_t`uHr;z)M7_}EM1NyuVO&9CXHdXS#x;WtGrX!V0&9WgPdCMJS&;_ zJe>GaMTiMb)Q0axJmcDqGQu!kwW37h)`Y24g>hCF!H;>(jUIUd$!gVJW(`uJ3p3mq zlP+2BF&}!Q^xt_5gHgN0dsPytM-cQ*0CRUcsEg-6JbDR^_3iM+{^TE6C%2&zAjZ6T z_pgdBhoO()Q8o2`BKXb72Nx5n3F>Si^?%0|=ir`t#vxuXjmSri8t`WuS~4ZS)}kG!3I1kAZPs)4rW+fW;+P$TxR5b zglx*#rnTX@BSd(Lt(`Ey1elCa_`LFDSUq-YNE-13)%zi|qta}TE zVCc60`jQ0-ia3L6h6|Ywwp7102R1WkKJ z9gKzhEg^?!*pqjAS!8%R|_l@Cr7_i9vp0`Ld!rNuuZQab$e>A442M< z);b6U+R&$ggTuo;n}^MA$KMuW;>x!G*Z()cv8k;6;0>8Gj<|m-5UL$=eIk-c7~7&P z=Ke_xrO$|z3i{=__BycSbUhEMbJY-S_&s^QYd7$4$IchzR_m4^bcns@m&V=}^MDuG z}*0iT@*tzpJo`VH01tbStO5$b2YdQBmg02w+O$lRvDt@yG zr>O=^Z}E_3sgCze= zM_!>MU?U-fVX@V=Nn<0qijlg`r}N^PHv9hySI5=7ai!V=AaQM)?z$|ba2seQa-PLi zZ2(rae}?9}6y^a~&Qf8t9=ro}o-0quPQU@eWe^BPprMAq?DLy{SBUra{B^yCdGbi! zmpC6}NN#A5^I>>=kBH*gW=F>%u?5?%afU3EC&9J~9|dh>?Z0wekqv|z^U7fplt?^g z4Teyy723!2VilY#(8?;K{P{UuTo#vXLL0|Zy202eh61XP1BI4)IIbm;hGt^~io zu$J6Nx(n}@aEpLfq=7Dp|5NnRBqh}yHBC=|UCntT8MMM#Tc z?gbM;k;NEV@u@+HVUB+HY<9C$r5tJ0p*FU}IWC;2jf$B5QWH_%wYMX8pcJrY>Z5P@ zA?4hfMR?k^X_Hmsh7prHilg>;N&Y%>n!1*8n1l`foGAE95o0a^qtvLp{s4s}+4S3= z%|jLcQ0~4~#!l|kgYB5ri8@`gz?0oP8_IC{hPwTkjDGjMhRpY98ActfEu{WZY|#*1 zEAY`v$Pw$qnq}GRC#?5PuB5O==l4s#pfSQDi@c8Evrk+=u}b-7bk+z9mccz(1}@}x zPh(jHpjV(~p|ef8yDyO1BJ`w#&N;p_upBRtO17PGgbWie^f{VyIlk!a`E@Z2{^nz$ zk^5q0dTqKa&h9()Y&}B}%Lix@S^-ajL^0?G{&TwFvZ6OKmxF2JHcyz*la@e;iV%fN75i$znEaVs$zJcDnZb%vPyiRU616bBVK)*X0A z3Pg*HKr#<=5sGU|mHp%P6Nvs<6n;`ENH!p2mP^n=BO#8+fJC?&X@sN`9`gRUcQMx3 zCHFl5VB~gZ<-?7{Qqjz6x`%d&?pZv)8AIGtsj4ub0SW1d6gT4I(S6*w6I}7O9}uCt z%Wl1618d0B5)$Q|r;e1gib#%W!kf;|JvMxZK3TH?T>&(Dr4nnhv9$xWd>9EPk5$e)E*p)0&2r@57@=53QDeeO?!B0Z@7fV|NVx(r( zyc*SATnqHC{O2&FWVX<&EbKR#{*jG`j0qHI%od833u~Uewbh3Be^O~M7YWj#<2coH z3;3f6HGXhz@qowobR$CcBNza_$13_o$TQjf;EDT_IU(#L^o>vTRY0mghLGl`tzi@A z4^eR*s4$tqb|c2W`PT~fjs1|?#^(7F=AHE1-$N47f1iLE_As~*vq_Dx{-yE_MJVc{ z2xk8)4Tyv3MDKlsBzv4!(ZO=A@yb}R{-4=b!OerBoR_E#L-4_WmYoK^6qYwANUon= zmcJy z4nh|?cPcyrTZcA77<3k?R$6{QW#BTzn}fnQRBHV%r4}prRG9()&kllYBA7;s%$u85 zzrPc0wURJDS^KPSZv7)2Y+U$eG|#2)dm(N$1RRGGGnFk9tYcFz_Sd>vc@i8oZS5Gu zA(kj06!YQU2#cLj%HDkOe_LodENHGUhUqQ&#H80+R4Cs2?=0#BA0ZiZYr?(n6Zl~6 zoE{$tc`RzPn<1V6j_c33THC>CSCtQG|Bs;zaL|x1*uZqRzv5HzB|*m3x=t$RLd9d4 zjoJa=3kdrB(nxvMrSU02=PM%P=Ul@}C%dGebqVQs6S8PQ3oZq6Lr=t~`wM-_s2>lI z!Jd}DAh7Z`?j(wN{j&22L@K;_Vsdd`59LSMs34L&- zBNiczbTOS${1)A{sH(I%2Dvo;?D+Rtn>fU;8IND(r{eDPwJKRP?;V z(5txw-Q(}=!>j?C!!bGiCN)mBG{}--dyHnf?vlI?M8@ZCtDA&WRpN;IZB5<-8WQYf z^7gdS#aP6MbGNcJwg_oGt-H4f=*~$XP zVOPhLpu70IlT&fC(2Np45IqN%k-oq2CSRO;zNkQ7jt+fy8vSqcPc}>bt7+x0p+OtX zaD8)-xI^gsF#PU%&y|V9_xW)Hvey{jI}uS=${sG}s@(+cW2JhT-tq`)fw#CMbIzj` zR{>b&Uuy=a*|i23yP10&1g#9onmR5klksybzxC$9?D(T@d8U`&pe!9BoCVjwfcKRG zTWA=6>3HrDR!q|}y2;yH;W?Dwrz0m{5Gqvtsq)EC;Cf;Oa#%lVi#c=y0|Cj*J*ni) z(6<%MBy09-s=1tU{&tzP|KZMHgRlD>FE!B6(^8R_=KGYP9rL-UvwqY}JpZ8OCX?yj z=$H_GItZX~k z(I=wt>X~c)Hfphw{{IQF#>x9F>4wc1hJg^?$4M)?S9F|k zhI6oW7a7JXN+qiS#;-xC}ooJ8~Jcp8R_oHWF(nE4W0_qW&d{U>9I&BcAYW z|6yHWGPwYgD%YVKO8v<&Xn3JdAYZ3@r2X}2LmiL?$6bUiKmKJB=5%|2(%Gh7LSwoEv6^&%exs3-r3b*piU*zhlV0kaOvRKKzx{ z;^oAkHfU2#@%`^O38@t((0DRYt8&@UJQ`w<`)jna?ehfKfM4zd0*mw==Oy)qX?cOT z$n}nS(orQ^)Bh6@UD?$723WV27{JhkILNo9@DNY(V=8{W^@inN4e7IR#I4 z9?-#v_x+;L>%3?NN&OAJT`+=lL8uuLAMFFl1)}B1$6Vm;5WBrq79EVYDhQZbw6dAk zF*b1>Cu#-F(lb~A1~Nm!7?uZsbYc?Y3j7Wdi628m2h zzs&R^xOd}0?mFns?1bS8LC6FQ$|s3IvKS=3g)!FzpEdh#7nGd1Y5wO0px`q#ber$F z2uWU_bq7e?BLx;uy-&!Pk)57=u=e1^s$U(S^9f+kA{=etkg$eo(krgDRdG z5$zv6AvhH~wC!@BFbYCNsfjlYw#$`^qmA{ z?q6Wcvax#qp$zk0f&v$mlw=MJ#G8|^t)nJAj}RjC7;N)Orq3?hwX1$%RQgbR6@!ww zg|rciHke~HF6fKmWB{*x6ztmv&;fIFq0NVE<0=|=XJvgRt2R>*HEdXmivS8uUWwaA zGvKm$IM6HyExZ?wmy<^U6_C%!o31Uv$Qhi6JYN`NUyLdSShf?PVme|NSn*v+MYXHp zXdvvT8!V=vFTyN%Y~g6Pem~0Ta0fa}KV!BPA>88Zh)N*X1W5y~SR2rMGiZqdCQrGt z#_)MRJ;a1{h$oYE0#8w7Y;u)aB&5h%Hw=PZixDG7zN#K%={!}PJI6-CjRe%e!4jKM zT40H-Q7KateBM z^P;SMk$5=`v2T)-mXSHQoiu1v7|#EHMtLFE1gci>;tr7idgY`aim&PBsjvV;{{&g6 z(C%iD?3KfC#*S2>G%^or4H#LKqT@@9s?SnL-vLexzSs$-GuRpgE zSac4MAd+C&cR@{o`e6>1b?D#qDKW#ZE*2fD0Q1;?1@_wNpGRgR!NU;b-Vt~E86Sd$ z--Q@@5f>Q}0MDJj#Kz>mlhEpqL3~xP|B{xHsLp4C3`gjJt607 zko^V&G6#x7J?*_I*e5AS9|7{d39wJNNQN_*dzKkizR<{!(96fMc|$CLFY&d+NcV1U zdXe|RpJY_c89%sp1y=`a`n@xFI>;W}!Mhq#0hm}G2^4RMSu$h;n-G8nKZOmtWNvf@ z9&al9&ZpV10lL^4wTc5@h`~?j0}1JEfi~f!c(7jFf;__suhOWJg2HgCQ1(Q%?pXq{ z1Qi5igC)VRpx^1aEF>3kZJ`DJyM4?SY}OYjXP^RKLlZgDD!puI zk+)JFE$5^d99$3QnqmrsGAIuW{W%R5V`@tn^~ zJg#kK%236I&^3BNE=TmOH*k(gsVtZNDRW5Mk!f?lEj&J@S$OY3GQ3_y6^ObyxVgTK zRo*06?9)JerLAM#f)AJ8S;&?2-sF{;4ob|+`%Dc#5LWQu&r#V!!;BDe~7>jS%CV8^sHR$ylQpr)kEK3G-f#I3pAit z7Z$MeYywYu<6UqPX=JIyy4>KQRpIugKEKBcfecHc%m+Q451fvQg$pcOTtQZ{zjL-+ zTz*LlUkc&4Q(%!Db`3}3j00ytcjWc07PvS-<(%MYy9L7YBPnq5dkJQEL5<25EZ5UJ za9`+Q5Fe0rbq%hLP(*Bfv&s4dJ0^L|Gu#{Xh1J}D zH<@%9uY-p!gINtIB1`>6!39Fs z_C*@}8qlvfscBZij7VI1oGdG8l{QoPJ9tfDH=Xa+h5eL&m2=@J%#1AhC?~MUFUQ~CgolF`neL8y3jZ^(1R~Q&H zJ>uHw0V6ySwwnu3<-EK|$@8@Rg=~q<{_%F-kd#P)WICgE;8&Uv2GCZ5w(;6a`mOF! z7Oz&yGh86niFj#ok8uWk3`r1RU(FgyMOdX`c4A1Dyzj(y0dJK3kjP*wgM`a;OLiGG zBy;{~y-64ASsbVrCYQ-IlngWC9w2x~f+KZ2T=WHK$6b;nkA*@ik?}kT!fCkR!f(ZZ z*+90~j-n&`@pzZG#(7Gg)%R!2`Qyc&;J^}8$y>t;i>oC!Q+C;cQ#s;$!py}YBEUBB zGI6y<*#AMh?a?sKB>GO?RAHjM^0`kq0(>H9^X)b z9QKOJv@EFB3(0aAEX>)8GJ-Xj;n_ZS|L9FJaDD$5GoUI(2O0fb+ajUmX&06)6|8tB zbRI^S0sg!;YX9b&5NefcD=5O7bh;7ae8vLpTsOu10kruHli zsaisyYN4P=Q|=*E$&k46L1x}K<2(oYeZ*5!_1*R@78E$V{kWGwrewYfzX?Suh>o|< zAy@hPbGR~0O377F-D145&x^qS)ws?Mc#uV+!}q9d@8}C2qk1fvP6rJ!5PXM1Fsc}L z0ULTV>dk14{ZN;)1S1=Q=~a+tNEGRIllser(wl4PBXCIus;oUbZex3L43hiEz?1#I zDiZ#ynEr~}a5Ll+EC2;(1p1{al+M>Y> z9?jRFLAoc6K?_eM!g)E(#(0CEw0(%A9X1d8G$2HW*N)-+KdRu^yaST-X4hR5(ATSQ z-%d@Fssc(5clT?rSuupG%d-uj-f`zRk5`wbOW)V$oB=Q`7Y#*sr{Tk@ z`eC+sc%>9#S~-N*%IFN{samZre12{KXFFJH8oa)vN&`K+zy>N4(Tin2p`L=b1|@*u zQw`if?C_o?q1*3K$z`|O!V}@WTgY2{kg_b_yUuj5IWQ1$Yap&7cs~a}APYSxGbA}c z*NPXNdY}tgn>9{)FO`5=K+c8e{r>=SgRdimYo`li0|R2c2L44g7y~O0df(jEC#5|n z417P5?{3~Lywu;v^pz%)Twnk_GZ_jY&Uy`Jde6yY4s>o36+0|eTTDE$47ozhjGqS! zS>DAQs8V>d);-spiQrU_0X6G(AgirkS%H7mp~C$MRu(r9ri#oO>vxui)b&dYS!7T5 zbEUR>gw-ZM!o7%3$)k;d@A?Zk(Zdoj@y^{}8Rlir1WM~ER5dE4#&45cMC}HzAr%`^ z#Fm&hn`KNv>!d2Y?^jg=RAXz>ku#tIg z>L~|WKLxh(TbVeA)@k$R7ci%UmGcsX%6tiZ(sEYz(dFs;236(`JPVMMDh?hVY->jV zM}Q}Vv2PK}I0tW9(VITMab7?#DB%Q%6tBl{S_$!6gEG=i6zz2!p}Swd^=;Gz@x&qUQc2R&y! zg&7ik?GGNN{4dpgbx>9NyEot_50XlVgtQ0>qJVTtsDN}y3W79%g0}m%;Xd40h(E4rL?|o01jPQJ z9HBRTe7Bo#q1o+6|LJeBPyb-4NOlaVU~$0E({7obO4A)<`oc-~UKfm_Ec60}z7q|E zEF@Nc;NeA9R@r z??quvcY{KgTmzft8jEpcC14E!a$dqnYt10)mPWOCY7~;;2!__k8W5*RE!=CGGzOq7 zukc>$mC2(p5{&-V-F5bnCuL=*f&>HTn~OSwFta^A9BIOkgTC8^qYY-!WB zhIssd>D;?leSu-gXfX7|y&7Dhh(mq~=sH>65;BTHaqMu>B4J+`&~Hl(!JPUq8pKRW zyc>~Q_7xxbY9g--84Q0OzOFJ{ zQ)PD-^bnjeIu4704pR68%C{rhNFPi#-A8`Enc0nW;V%3C%MHj=?xo^gr?< z(24M>vo?-k0+Y!(qT91Ma+wY9Oc6DRvS@7pZ!6>!RLm+5Jm|3+UB~2;7={-MJ%k~5 ze@EnT?Sq_8>`P9A^GyAb01=8(x~HGi`;$4@i9vzQo|O2I6_ATUSWDc>&+BFXIynf7 zwrLv)k+)5mp!F4;NDjYXjLDC>=XKn_bY?#Qa>!n*xiQY(zDqA}3{rRsYU-H^54LRS zgt%a^T3p65jlw4w*Tqym!ZB;o2nAJ?il|5`Z6;A-M}RBE<~i(=53A;amE6ogt1+JL5%45iJoOCL8haJFyvZ2o}3h3rr2M96K!yBw0LD?W9 zC4WU7B%V>BOM|{J;9(;?weVqXEMC9@WCLAPY+M%*5+*h?yWsMwTko*mCR5Z}GBA@C zH4{jv`Tza;2#=U1thHsd0y4CD*IXV8)ou`|vy!3zB46yH3%K-e-B3vGrEP_X`O|Ba zQ>cYRe@TR4zE}+JN_W&{{rxUU8}dyE&A16GP%O9Z`EnHywS9vx7qH}j3m4+Ud85N~ zfotKf7{^X%SG2lL$iewPMUoHC<2@1|D8Fq_MkmF5l?iE#UcBvt-~zYFVPG>^YzQBe z{98?wWC#JWEJC1Ko;Xa_+JC()HX%yJ0(W6}E?2ki(L!fR;3o-M=PdX&f2jZaiq7;O zl=H#Yw#ofq%`U##!mJCj@Lb5qbnXVGD4u zA5RTQ-xr(*L6Hgq=oZN#o6tU>`A+rr*W_2Br&mG80!nKPDfB}d7-@q~bZ>oO~?0666%~7@3SLX=CmL6V5*#s<%58O*QkA0vcLNJWe z){42m16Uq3;Ir8PZHNcSq{2XfQ32*T)QebgZx3Xh0Sk|8=mg-_NZs6 z2W5$cytEGhMsaqDu7*==K;YhU>T=0>rg&s`d5XAOaksS>3 zfRBTNmF^`7A%`U<`ySo8j9BGI#*_po`g!wxLLrNwyAwU~gf`|IP&o zpkIrm0`cBJ2Ff__d2wfet;>gbt^yX>S&!v0|Cr8EK9(L|K~dXlHi-xO8#S6KD7)_1 zPl)Au2c6sGingyeL$wQ9BR>0P*Ka!N;ClM+3&?IP?q5<=Q z`BgK%4JpaE=zN>oza^BHy$a=_bg#?wzg|I-P%XK)`*HM%mi;q@7-O zN0Y4&VpUH`hw`Y;JZN-AqkIbGOZ{1h-Pn9lBi{A3yvAd*q)*>)JaRHbeCS@H1X}ZS z60j~WIuzCXY@)lJEo!g~Sg00bc$VW!;J8+N4>o*bA;j(vm8ywiMtAp?d>^Bv8(_P6 zpy7)K<)IREXj9;biiQq>h$`p!IYR?LJ6Mr-4xY*_(MM$L6(f}D>?0(>ej-@BJpPRV zWP2!Uw{hpWgu!yPwDkJ2`LlxE_q0!mDYwVDYl+;=MhMPu0L*2B>uebq0gnZ!GIhJx zfJP-7rP&-Lv9cZ)xI`eT^)<7^q>-%iZuZkL}jWucKdh0|hlxWp>dhQah} z->8*-?XW(aVu2)un1s|t=n-x{ra*R}-$UITj7Rzf*DT5Z6pecz5f;D6nfUj$P86VY z9=6gC{(LUeO>`WYmhivF@qpI@)|7g(e|X1Jz{Zi%Dp~w}J-|}CGyCcjd+Bd+#4ZJz z{ym@IE7pSU*FLAt7M(_+5Vk1e(>iy|`@wurKi@d_5)SVdNI3S&8WdW@kj{xO^S}yW z@#r41fFlZ6^Cn&SbHqBB5C6aT|4f$&5#9>?COaVS4lJ;GkstS+>7?hgf5$dK0>i2X z;#|X?2F3ZU6Ao2->!W4O{d@Lf7+CoR1QVzLaIAm}_HKL(-tEVMOiGH-L3~Sb9f<)= z07!doiN?7NgVpn2bq#bq@!%v1Yx@@a-(pgSsyqe%HbgH&}2pkwa}SrgwwGaV}p=VKaJcnV2?oP#?-c#Ie{uWE&vm9}2bB~=IqZABq5m<^D1yW#9_#1XuT@z62uT%bME zN2|be3dhX{i;DI9|5ZDPJi;Ow=O%F=krPXnKH5*?YJH5 z7XeFx?U37r;S(6nUjvr-dVuoA36|SFtTz@_Q--PmZ^mOh^R>7Ree}l|7D6~R>3kwMwI`tZQ-~s zkgS?OAkm!L0Q375WNjDKx;4pTIU@dF7~7F_7yb#3PrJJDO4{#;}kdGab4IY8~l)&Z$ zL}GmuoNIG+i~<21=dXG3He{J^?MqI9xa@U3mCJ{^ ztq=#U>ApkiJzU};R9B8U>V;VRO z@I!t9BacZ51PRU$%k_vo@b}X=D!h<^0)mL_+eEe6zcIVz-!rfRlem8Zl?TrXL_LP8 zG+pLPkOAqktSVI|*ypu9fA{5^zx#5@`65-aEjy4sj^zXsxPI@Id($05)rSEV6+zc8 z)cFpgNMr$YFdu&FA}q2VVHI#sm}$Rt_0}_esUc+42bzh6tR&yFBZV)|BqXl z@&~@2w{Vus9ya})u^82xzbH+R-bmFb7cLc}Z+ZUW{kvTJ>T&UjRJSdUab+{}a6%na z(`C-W9h5HCxDa2K|IKN7)w9XHYGd7K$@}2bf$!+(>ejOFW9LrK&W$G<-kt89&Ykq> zB#sih%Yj@g@B{oaJ;HZzw8R6Nmyhj{dDtiQ{Z|bvEOPE18>8z6W*a5%HTh>UdQl)x zE?o6K<$wt4AA-HB%-{Gm^eTV5$yt|uar{sAsJAfM@AFn6*cn)FWCHBG z@GVel-iE-{>{{A@j9*_KRsq-leewjkyL3^m=UOb^~EIZcg zM&+?ennm{z%Ck?$dnaAcuf~KObHVJB%r~RpTookNv0haH5H(a664D+Ii1aIFj2{?q*Il8i^H|V`2T@<^=FiF1g+<4r(rk^Fg089hWw+VD z>FR3K`qRtDj)ZK?hf~6wb=cII>C7Vs~?tr(I|v4 zQ*2(^PSX-?JJf5E5#wmA)#cuLbZF^gO2g#N9?|>BEpUj^@e-)^;Af`lDjoj5eiUgL zq?U~(pHY7S{Th>`9AmhL#p%Nd2}j8;cQ+vln`rR}P>Eb?x=dky5RfwIWL1^8tjBfl>ctfW^KkXw4r%E}7jDzU3Z zAazp=ItI_z4xvIwY5|+u2$C!EcbH*Nl=v%^{-QnC*3-`v0PD$uGUe2FO|S(jS<9I; zjPRY=w1o+%2pbWSxf{YPEHY0^0EEfyvI3h`pO$U%(e<4VEK!zv>0gB2Qv(ar8h@Gv znzh(ZhMqiMovTKxYy`bUTiTZLq6RJ07;tdKW;oW5Dea;Nbg(Arg!4V0?fKveK^afI ze)uY~3cx-i#-JzM@ zHFq6D6^~Nx9vk_b1{v&t@;EGe*+6g>LkrOz<9^WyrSi5G!g+3=1$BDu}9o zmhw96*N^>AAequ!K^UdJebq&MpJCgRj^bNWlx14p%XRo_6%Ho;xLBjFE8{D-Q>V=n z12$h~U0+lEfjr<`vyefd0I?G#ADcl=!vg@2Hvc|=LG=Qgz@^*(T9K1?No_?CTgPR4 zW9b~2Itq{wbNf{LD4WIiA&jB-y5^l8(*f_-#wnBD8LM^SWz0~PIsh%oA z8LhlegHm~}?xeH!Kk0?gg{f4}#Tb|e^WG4%k3l@_4QLBvcnI6;)8m$5rfI$S$slvp z4AFTI&Y@x4TjI30{;BQh!R{@wpmW&MeUKwzZNDlQMF{+G?Uq}{QC&1u?tWMCRdYUj zSymMfNT=@ajC4)=O?{WUeN;!aZ@4_n(S5FQ+6r1JWwzS>Fahib6sa4*61!3m%i9W< z-l&csAFyd*J((n$@x9H-sKJ1yFYG09M74xJ?xMbMXY{~uIWhngaLLkK?;TbL3kk-D z&o(=-fphk@&`%XZZnkZv>W1H65~2wK9jx`rJwbI&{7S^ZMIoE|R}_?GAu$GmW} zy$5t}$q`Kt)wdDLMUdc#=b&`e^^?wX(14Ox`q|YDoqIpIQPj*pd6mQXYz6S-NWgJ) z`*F>#oX6U1db27k@5l5&$xD|o%jwcL_Q61-OkbByq%Ky7Q)V};H%G68-6yHqo$$4c z!k$j*8J2C9WlzQmM8hh=YK%LrSsDW}uU2UPT!@HlkFx7*k{DmGfccFFf#pbjIQ7K2 zGY0&gS742pf*9p!SZ@f$-`j;|Uwd1RKTebB{({K4FrDCV zNN$p*i%BU0dwOBsjIYWsk1b)iC$S{(ULm>kkPhs5Ww7S=k>U|y4wc@ewIDS z7)tmRK$*VbNyBCSx>;2tb~mXd9a4E0o`T?$MCOh&M5k0x*uo`mYHM%rcLQB?&nlR= zzHjlA4`s~2GvEFQHPq`K0QjfOL1i1T1~sKq{kj*OcX;*kT z`IXUD7+VTF(#as3X{NZ4!wV_|&CdE@(kPm0&>WsXU*|+V9E6>Rr^5?okiiMKJc{*i z<~~OW^g@EfwT$8Wddl6lHmKh*(Y1tdO@J_mq%b`vx>eIQhs&B zc_&)cy#2$6?p>xEXE@b8Jl-1GLsm_2^6_%*A0*dR^z|$*EdcLtj$tURK^(ybO#M# zk`#*J{REqs*w8l3GT`)GVtra2pTu2%F=@*9U`PK7(mIssHzNNFwWtId*l! z?#;sMul?r{+wg++i+7amiqKIVvoS}~HsY6DWSiD^U|+AfzQ|uu&C5#!hD(J^y8d#c z6J_r|?-XjhucN8PfZJZYu5aXi{StWoJ}O~d!QMBy7eOwXbBqZ@&1CI~GnWek2uV5$ z{p#-64? zJF8o6YCYF_b(?hkQ}t(6qWCFRByzTK`S`~9me>Ihhf~0`3@D!%_-Yo)j+xd7F|x(1 zHjoKLyv0q?CeXr@#8r-<33g9gifBs&76t(91$78C4|TJKz+la6MRCsJFa<_(K)FH z<6J=2UIbG}y-8r^9SK8_0$4lz7G3+{*`p;cjR<8>$naf-f z`vfTR`A>8(oAZ#2i{`&tBHTSX^j=`g_rz>>6{ID31h}uL29OLxCi?T$?)1f(>7<2j z_rAh~7Q`jj8%d;Y2r?@6Oqy?ksF#IQY%N-Jb1aa&J2F%_Bke$5`SJzFWUTpW_s+ z)1Atzw*>nBq&r;_2VOViuy3&loziWY69^EMt{hnxka*ok_z(3rS)7F1fe%SCRfH~h z;2yyhLeaYHt)$T%D<3Q+kT&%N|=nd$J4bhu>3H1 zS1s4Ap<3r>gUROxvf3f$9L=$gaN+yk>693JXn4Q9#T}pzq4#Gij;MHQ9d)i^49B*H z#*vwH{Hi1*(sVeK8*q1?b_mV54doi(1w4^zn^KQYH zG+(;xgYdW5zj1|v+vxolS8m&S0-mJWN+Nvvmj&yQ`#+`jpi-VKs9aHMr_-)@dh1&0 zxJT1sp{O!3Hm9qXD%umrdEm1+ttowJ_oJZzb1Areo*EXn4O0wW9_o7CJX>pO1gX3A zTdh1`885bT3H!B_GUpon&kw$nCw`8YdNc`t@X$3j!VxksEw$qj+)Sp8hJi-7u7^tc z+RV3KRZS$RNE2Vfq{H}rfpFqx!?Qr0HiACU98u$SKj*aLn`ca(KZuneWbFuHa4X_x zm>%n~4%Q_}RvK$;WEgNr2>gEZovawxSouETdF=D1*R8I@W(~chnD%nZsaf25M-GlE zx2b<{IBI~rn)SFH`Q9}DQN`;IX>a^4F6&=c5fIG~eH z(Misw@eu_qZgd2fVG%J99jO`waxO1aFn;TthVY2>vR>nT&dsq}jgxln5rPa|&X=)9 zKKzREG}Z`C`M!L;$%3DTelpM;myydR{zx0C=;C;ne@&uahthI$%vf~r+^nQD#mCDw zK9x0_g=j~+fNys=@xQS8j6>LN?VFj@xrq19p+ehJ&Fg}}b9=meqmi+{j*m5U z>4nkOE#XkM===0oPAdeQ6(=uuv`pe0=Xf|sLVI3TTW;(enVMRZ-{|^`t4E&vg)OJ> z<6H^*g?+~af_2t0XYje1eRlnslI`q#27R1lmfVK2PV_K#QAHij_7qZBTK{mXG*wpW zkImt{nJ;b|dhGs$lz{c)NpJD77G?kyq9xTRYxD2wWDo~iyj*Jjz~KB#Ck4Z{G{Sc- z@1OVWjaOmrw9p{_9mATZf;H{1BOgDu8GeJV=m#N5+j3>sgKvI$Dtu*1l5$V=)*FWq z=pmU;P>Vl%pU$q^_l;6WjQV}7nOXIm=Ly#jF&B!Er!c+lw$uCt2Gs7u>sF29{D0$Z ze^RDpL_MW7L+6J+uaztE%CkA(IbG)<5~p{$}ke(8BA}>YDAGe;CgO6*z4l zi}1hcJ*Q>hwfXZf+P?w2n}n)8&0XPqe;7|DNQdotc2$!v^KklRC6j=OdNYylZ`yAb zvA@CIb<|QlyDF8~Q_1326KNja@(zDRv6>tu@i#1OBrVjH{~$Dw4z-N(59DOFVh=XD zse#4jh*^Zjpn)y5HXhh1ZpinzGDewjCxh^U?H3K?)oU~R%-~f=Gb_7m6D5V4XeP&~ zVa)y4Gnz#KOzAT8@RSLp7TsU{DZ$tumW!+MsrbXDM`^QC7TiB@X0*Sq3Ak!u819 zWIQa2Bqy!5U_B5N8lzVC(N0yV}`& z2v`K{v_KuEHu9D3xaTR{5pM3t777B%TJUNFHs!8{T+ zI{pOzq)5m`Z_EKSQr}=ne?-BiqyS)WHjJi_b601hSPjBZj{-=!LZN_zVDfZ6fM+z$ zx$}~I+R*Y4NM|l>WMo%abetG2u7JRf+rOGdsS2l;&>jOFqp$&vQ?GM!vm>9{_T4Wg z^Ph{4U~!oQq?n)5GiCW4&dATJ=lP^tkkkcx=-2@I~YXqb9;bZoeAbhb(2<>*t>(m zjSQpWb9i>Zk5Ov3+zy*tY9XsAaE5(eWA2pzbw41vs&88Hi>;d&vLi5zJF zt3)Kld5uR!siHfW$%Mzes)KPR#eOk+<(W9u^W!?d(|nD>P;EA3B;~;1n#7(kF?2pk(8!JTcvZ4^ z6s%0j!q{G)JB22Rok0Vj$)Fkr7|8`V?cPew0>fdH3H4lKmw1N9?d6u=gr6GXMxU^z z&Hr

n2L|QNjuH0gY?``98dz!+rP4sz-~>n0h2UJ|yt@5G3@|T*xn29kD-R9|CCI9twHC4;H|uCd8~Rv$ z1i0gpqA{5ZL|$$I!iAh?9>Ym}j=j0pSuQTv4^>>h*3HW|X$1PVY;dwU0R*Q6?qQ6D zdxA}U_R*+2SIvxDYn4!7UZqVJ%iiTHaQnecp?sd#B-4k9_pvqzT46?A8mgI2vR>8V00Sy@-%Q0`L7Ra6 z((r}bZ1IWwl|DS#t(R%aLo=YMzMEson@yh2VC%I*7y1?M$%`p{$|4Mf<<@lJMiO$xC-czcN^$OG@a?3J(6b$buiqzR`N zdk7_ik1UJ>v>A_pU-wg@ZXTCDi@xnES-|_0k!>|O__lavyp=AG0Z>V+%sG2jyXC%R z@%l%&ts_b=x1`;KCv|V{LgiIBfde;k9AJRC8NqQ>`w;jl3jtVX5vsrP-BpNV+zh`c z3&mvnKg4F71ySRygix%BRf7K3OL#IB_t9Y5Npx}p03HqiG_->usp~%Bdy{*SJ(i`V zd|`5KqFt>J5;vc&)A;6A7sJen%RyQh+4l88JB=Y;09dSm#au*cY#HxanWfdI7uqVT z)v!czU%d}wdKlabCsrkIgUd)hA7Gv-GR zoj8S{4z4PdYf3tc4&3Ut`Xi;L!mjMtZojd6Md>lvr{j7vw~)9lQfNXsRd43;>{xPe zn0UWi{7|Pi_*@-;PnNxXeI0PR?EuAE!5%p{aWOIJEuSxa1fuzQ;4pX|uK`xtA#t+e z$tf%>95+Q-DUE|Im8u$`d{bubv(QiX^NkWeyU@NWe}%3y3@Ud2EtAND4#Cm(>G0qk z2b_6GoZA+uRV3+*xXU1_w=J;b5K?j;0?E|D^e85`olqdJ`*Rnr2Tgw;Y65~fr-k={zlyfOERWx(D8w-jvNrt5Zvf@F-*aRyT&H&wM4sOv8lGzq$i7_aN|&5s zMhmxh^_PglegVeNKCSZ$kn_3blSB-R>#&UVv`A-_l3PNn^<$l6MqKDYUGgZ`m7xQ` zOkqiN>E9w9`Pn7DWAl0^rnlUk^x15Txac;BmD0j0prYX^J83azNASWTaKV`5Srt5` zb74o4UCT3q9}BnQ&Rh{ap^7rAz9z+=u$5>3_f$}T34aRks0DIq;19X9bq`AmyVmsx-HtJ33JiH40FuiB z1<_X4y*Fp@surD_qlyljW##kOZj)w-lW9_cS-^rh0SNkI@t)w2L%m|Bu9IkL&dyXW t{`B9d77A&U7VZb{!qJ#((6j~HnNeK-%)Zb~CNyMLr_wfZ6Ib^eZ5npA9(WRnkv0@}@nx)!0 zf;6vg_@mLt5TeTB#2`4yzqU$=OGYp4&il*s(vl-A8W)XAhMxBQR#^XS*Y~7vrzzd4 z(rM_O@zCBorxj;c-pDVr1Wbgqs26`<2xQjoykxnG9fk7u&zDY|u_rRh~JfX_t#|MQt`Kc<G$S()49>g@OyT0UJI2b91^yU{%mO~_LA@D+atkfKeEk+0*|Ngl3r)sM6y?)go{-yGX8YgKBFP2-_ z2zgq|Qs%AtB|4S&di={1H6IH!_J04!E_5skC-eLKima-1 zjFnr*=Weu9K9y%478= z{WzJCo5a_gWBKico}t?o2MZ}rb96qK$ua2^3I!E#iMefviyqDqy)&pOqm7``EYOJI zG8VP|QI`9Ayh@pjtK#v9mRQ5=oAZ;rm>T6bq8^E6WS762tUR_@`AGPrjxp!Ft!n*z z)%aZP+)B`CLF1ggT6_AuSvaZx*Oxqe!Og~u)tRx}U{+c`@6#i9Ny*d3lP&qC#ax-_ zdsKa~%#jYBFs}5$cj}ZQE5bXD3*E`>H4PrSYKyrF$ph*I8mtv-&;P(q2;1&U6`W;N z$q+B9=dp~S3NdyMi4-F&WV2K4K&CERhyIG?1J ztOi@cei30-PQTR~{#2im?6nq!fZgvZj-O<&`5(WHuZ*2a#uT;mic) z$D3bhC>IYuMyyXYl`M>!_?^KUyxu2nvnMZ!m?>5}j_1C{+E8vw7iyOzNWCIuWgqVC z_+>Ire(O=x$Y$Q(nwI=f_jw_toDj@C*~x5^`K}IxGEcihZ^>{?ulFW9Cz??RaH7YiU^o z)!ny81Jf-5maD_>ZM;d8u~Y9j+%x&QWKryNZ~W*JzLWDmqt6k2D z`e%P$ZyhMSz9sttkCKV2>7;Corl8StcSW=R^e8vcNUfgJpqlpkTSfIJw2^lAJ97Gh zRHE>PMRe#s2fwQomqZ)Vrr9@S$_X?1JG}=AX+oMWG1spbqrZXuR^WBKzn-XVN~N>>#O9-C*BSn z&bFJ|KU{o6c}yehHZ7aTnMT3$srA9pcApqt7yd9W$L%x|mq*FJOLGFEF)bH6qVLD! zb{!7n%IjV4YxFu6E~SfB=1y>K^gIl6DluxXnfv(U2d{Df2A=TKhTQ~H-Q;9EDxtwU z`;!gNF+TEIc3wLsvXRB?YJMWyw9$(D;I6omlaS9@J+u4P+=?5_X-}}Q$n!moW7CH0 zGi>X}C6^T`&D+CxEcD7OUM`1x$-XH+xErf1x+(M`TxEAYTRM`0w?%ET+97)}QtV7+ zetoQ>%W1xogiE*abWeZwL?Qr#{L8OT#AKf}jp@z5%XxNj@Ap-o<*6!o9(jL_VI;wH z+t?6$OEhY&e0^-I+3yne%6$jjftEAnQ5WNHyH49Ne&!Na`Zf-57UaB$9+XkzpFKU} zif7Z5+qmm>RL-GOD*kM3f;C0psTL`5Z|kE$V~?GF>F}SWg9I+&hYa1?H%zHWvDA8A z^P_L83Q1JLeMxD#box@aoe_VA}MGLJBnvje{i<_=DefQb{x@S5^~?|Wb?~VwpLO}Uku|b zQu=9LQH7rE-+n8rB^hmT1ls2>m8ZU(QwmG#Jv)h94y-0?3tDpbaHH7pAr3v3GH@wd zg*tW_+NZ9KE@?k3ab3*-@%4}L62 zyh&ix@FEZR2D@7>Pbn>otjNS9Xqx||`3l!noBpgI($DK~wO)K1+Qw5(rD;jLzgEOH zmybQfFDG+{Y$`5zC2|?>;n02ji2qrWxo!=&ao1_J(#Lj2i{kbmrBlD#G8*x%2UQ`% z?ZP+DtdS8`4l5_eVYW=}Vb`zDc=iuUpHtJ*SUF)oe>EpwTxTS|rivNcy}OmB{Uw|- zL4mgG&Q*D*ngtJ&s2m~;ZNIId#iWv<f~O%-RCQ5eeQM~V&2@q8bUU1+j+v=u<~EE-7+j3klMt zanB4J&yH4Cm`=O-`tqd}c0z=Um8fMHgwwOmRX07PCnQ~jJP%3Go z`thjIVLF<=ewi&P&?9-92mf}$P5p<$S>{)$ahQy~A{sZ%$A#@j8=hCqcn>c!J;PqL zZ}vr9QDvIrH*3Q(mqRJ}Flas79`2!elVJLLp*Gjy?~i?hJ1^2~ZK*pDTioJU+wP-R zER99oHZpJ`B*jVnt5weNC~vc9?XC=|D`e<&d&<IV7TaE1K9|P_pjO0PE>Y?Uw)x zs^Xn=@1Fl%N~0@j+y|*V<}W6RxywJ|laph9FdOl@5t3!#fD>Qd)AJ-<`Llk^j-tav zwL?~Xq;5@nm0LAtv`O;*Qvdy*gBcl{d)$Uvp4$rDho#S=RPrE%cP_s1;S8~e>{49T zf7^>mO1$tOGs-l`EF=57Q=dql_H0Gq=eOU*&NlJ+F>&UfM$`^}A*%CS$_ySX$4H-- z^1n-@MMp?Cv@5)WFLsjgR&ts#Nz|)WZ6Tj8%XKqMIK~hzKPSTrN@9BHi0i7BI;Yr{ z#_fe7GnV%(L>JU}xWX5sTBPtDdEzrMpZd*(aeWs*)cJ$&LJpUGt>Z=u?oJz#GS=I1 z?$S@^-H>jwB1~0cj`2lKTmEF&+Uk`h6S0PH@F6QU^-Z!ukaF&!N>ZK>* zx|+f~*S_69y1p)G{Tu&*IsUr92AZb96Q{f^4YJwHRvVC#Ke?fpeM?arq${t-esrr%Wu!y^47DBD{RMa(^f&T<-W0omiB8r*#G6)mA6XIev&zhOw{Kv z+HN1Ep@Nncy7t= zJ}5leU1etIk~9pcZNagbYHYCRN>HrX-PB^Qo^`&e!@aKEenN}(!o)9u(@?Kr7pK^3 z&>D($qo+NgCC!g8=fdl0aktRLBy}(;g^8zld2;=%;-XCyo6}w)3aRa$xk+q16l}Xm zJB_I&@|L@sVCGo`|5hMJshsL0eWdU&5{VMT_BQjN7kQsKJq0!OH8X-4wuyt2!|m{9p8Ds>FA*}$A_zC*(vtVq zvg2O{;sxL9e7LvK?=LR#*uJ5q0k`lq@(`-Ohoq@rZpRHwmZ9}Sa5uf|P!r~KIz4jP zJ-E6+$}<(}sxuozY#+cEje=brjIxfqSHXYN`dEjT`5(|+8jq4)XZ!hUw~Q|W`PVFp zrZKSh!9wXDkZi(^%4V(az{~gV@sTL;WP%%`fNubfapN@D!Qe1urQY)4Oc#HIb6 zKH36o6_~CCQJ-bPV!n$5qv5f7iw@pGlmN zYN)5H^I~o#dEsaoJCRPMze{m-2xdCF)?HWd;%FJ<(O=(7|B(H=@L8`>;+-`FxY(sH zj{XWq)7R{>+`YIA4di94ELhH8Y!kX(fOWEOL>piNrB(*t(SOfV zPQ=8gcH6WY%#S|eK25vpI(SBA?5S$|M&$YMYQ4%6&-2s$GWYET6^kC>)2r9`ZQ^Xk z;oIb0fLY{UTqe(P+x)dv)pV+etj7*&V(RFUn?Lr67LFa)ET(^aPjPnU3dLk`XN}vY zS?WBn1mjBq3ujApqRvYy@rg~1ubiOpV-$3pi$ZRTh2FGP&EoZjp4E(S%9(nrF_y~5 zWpc|oukWEYou3`^J{je132x5J>38=zKXv_4q+L5Zai9Hw+7Ck+}vL3lzY^u z$-9A~sKl^NODSF0y4>p?>gZ;Z{s%LuuIJy7n96m1GX9qJfZI#!wYobSkTg4?D4Ux) zKdU;T67|%y#W(@< zMhQ%L@|hGWrrcKSO%TLHpS_R5hHc#%Wgc7fat_dcVZ1{ z9HsEq&JJert;Z|hUl(#_SqpP}F^g^T4P9!u>Z$SW`E39fl3h=}oUbvz@ig0!*tx%5 zV%e1-Cy~gZtLCw@G{aa@v;ONf0BtRIpW*lV%dfX)hmvxhk%IqV%2zBOHFZvh3u1x5P&lD2D*T3e!(7 z2C}4ZYh2gUx2_{xq7|7p)2)d*u3FAsQ{0Xqj^inVBtLCsZTKjW`WniT8re-1OA>#{m*Q_Nym>#X+ip`za_ zg*>S3Wsr!SX`z>=3f5^`?1(#2Rwl#e{Hz_1@KiRh;rYH?-LpS&g~X(|b_ei%&F{=d z*Bio=eI$(UjU!s?1^=I^DLj{!ho%G?<4CLlR<$rF&`rnQ{XRTbk#eE zNJoJ!Yf!?g^zkqxZlQei(d&3m7SJT(JUIUxNWD|G58~TZ8>Q3dL?d+Q+fBIgbwEuN z{JgrZQmA$FN2%%kREEB^yUey@<*NH1zQ!`k|0uJZt#9{4TT_-<5;#*Ubr~F?7Ic&x zt9n|pW=+obkl5fHur7Oz&0v0}GSSrYx8q02sI8gou&R34X6YkrH|}8J>}Lj3mkIUU zdEukRqRZksae^dsD#B~#R)z|3&pvcxZ>QX$=^~(ifMu|ewC;I!xMY=1+(hBGy3+;iDDwEfE*VWP0wTs7oP_LsEJ)F0xFoQY2qc)gk?bI) zx;U2GcEjRf*AK6j7D{Bs%iZzP}j`B6;Yoyd96>1D6dV_ zh(KW2GpM$2VZS?d>??Y3yxznfVUnkwacR?Nq%Ld+ws2k)-xWz$OePdI`-w?Y$-aea*(?Dk$ zgP&RFR%tOQr$K(v!)+d&)dhqr8R5jVLN9X$y$yu$w}GDrF^Wn6!9K^vFQmX;dzcfr zN2b%appkhGz2fDmeZ|TSgy#ZHoe>%P$HjUTa=JqHt|{Lkn!C;uAi3JOS0kR?c&9{@ zc#_XPeMjX&C;W9q59+@$S^XamIiJ0!uce?SPn*7Ze{0ufX?6pRZkoht@fGRKP?G-l zeV-g#1F<`Wf`cMxIP`UT?LX0nQ{C0#mQGCy%rm`ef*VOV@lz;MX@dnLW#U_(zh!jX zBI6$?g}1L~btS4pukHj`W&e>$I3c$utBP-5A{24!7A+*gZ5ZFwNKp)6=iF`7=Q zs6|*w$l+IDritm*gUWF22R?W^Jpo?V7=d{v-ch|g$c+~}8kk4*Ew5+0S4bH#x8j|@ zkFIw#%t&#pn^Ak$y2(85mOoQQ4nDOoiyH=yO6wAZmcYWFUvf#tlC7y*m^7YOH=M(S zdstPNuMihKY)n{{RTdZX&V9<8wC}Z;<~q2&d-OrDjrkE{{y2Z;DdV>cUe1!J%AHG6 zD67@D-6bjN<@?H)%K(oQ6ok2(`d|kX{TZ(+bl3LzLiAA?zsz&{+o|Nb^b^#Dt{DQq zJkDY_3IlaHYyICvH7XZ2;u+h<HcHo!>o%*%p zDAG>OPn*t}12FLufQHlM9ikTtYqaW1w{_>le}IWP!ENeJ*GaKvsYAS^Um9cVwW~SL zc~iSMHFQE61rP19Gmfi|Uzkjz>VzJ(h+AOdS4C`fl?~sPmHR4bPg}*QCESu)#qEh> z(zU)B5qO2Kqe!n;nZLBEBGA|yR9Ak7qJDR)ypw)ZXhni1j2y1-6F|$uyKP=q7^bf= zhP%JHasNffQh&ClGlPM6BenpaF!RgaP%E5SIeYz<*M3{U`s4dIEkion^QoK?+$5Pq zWzRpgEmEXYdY~maeJV#}J?@{hC%Z2}kAZ5Z=HwrYE~_t-`I~FOan}QsDNW-SQds1k z$HeJ1QmtKH6LIB*l>2DiKaQB(9$dw4GHF307qCmEh_Wuk$H9tJT%KF!|InDaTeGSA z>wR#NSi2`LhR2K>*ZoqLz~}&>wb2jsTJhY?{nXhym^GiG5!Z?CV7LlQGQ4oVFWgl7 zeBX}C#7!7AvSRwjL*X;TW0qos00Wxh#*^HVp>WD+{ih9#d{(`ekAf~OY?ZU~eS0+I z$vJXg86}ilpa+n4fuLT!5veRDRi<$=5jBtd{!iB-&Qlo;mg847PZ}cpSSF>zYpfB( z$={*W%`_rX7N2`c1ByV3~cFgDbmB`5m!wn>I?3xoz5HESR+n>rQ(N#_f8C z%TB%5WIy!;C8AE#5U#IE*^?RQy7Be3y(9{F8`givKrN&Z*qZF4FTp89X=L>3hzte` z3%r`Bt5KC6yEeOFMdKXl8I&yxM48`qQW-f^-0qv)pk2Bu2XijDL!XJ?QYIdCZDKMc z|8pu;5qCBEW){JA7yZ2rD`z8@~u72eSVh?JQ^S-FNW6v9RkE|H$mzm1iE~6;%L-$T!@!atMJo z9-QAc4}M1~Q}LL5_QL0~^mhn2$CT_|ZHy4V^R|O_`kFjN^>lh5I%;n4Bk2j}OdV5f zbB*x*yYYD~i*=<}iy=^@pS#$5S=KIO~Y#n}dteXV79fSQl z3=iQ?`$Bfi=_py|yO`J}E;HfA)tkShXiMcB)CaHy=9of64Q9^G?KWKnJ1kIA2W5%XXmk^#M-$omte z5x4)|Ck^yCUh2ZO$3dHqDBs?(KmTTDmZl9?zFRMV@w}D>LoZ8gEdH0e^bVy$wlC_L zwEcS97m?Bk;y2Ip4Y&B-1QN)+V6Gx^WQ|it<5HAyD6VDF8@)m!Y9RG%w(y}sv^4GL z9`0U!2mRF=!dYxg1@cEDjm6mM8Gf#)=~#C~wX(JqyJJqjyiU*NuzsxS9(Ms6uc2*_ zPV;DX-%lYjyB)PMQu1`N#|4wfelWE>Kv4A-5*&tHLvz!=My)C3doz9Ng;cnpjqoxW zx~(^nZktADuD_aWUHOczEg`Mn5+e#(*_jG1n!{$oU15m;6g^3|;MwR8OSzPjuT^D( z6Xo^pm;dVp5RgY=UcV?Jaf2@QlKL#~8(d-NfpI8{Irl>olP?f%lz1y(TIN)$5@1V^ zs=i+o@CR2|jUHZrSaCFLW^u+e`)Bnr|L_g>fPWCmaZ48bhmT-L1svP@V^i+$*HH{-NIYg(~pIzBbm3@ zf3;GCUE{gue@bI@x2`l`RAc^Nhc6{GEdNQ2JE0pU`lyU7QV^jLdpek}niIkSB*Uv~ zx|gtP&A(hZ+;2LkTC#6jk8JZMF<#=g8I1DSUC}in{qyIE?jh}!+RQfyzm}~ylcQuy z3{_9=mIP-8R)Vnm#YPPjB%xd}M|5YK8?T1jymy;!QvUgqI(pXS9i!vF)|l=zux95o zoc?$6np@z~sV43ahhMz>2`_*CJS!5pU}gefCGJ)`uuxpQtb>?E_qKX`9RS6+XC1VGsk?&pCwQGy5^1kLWep_wKryJOVkhx`x4Qg^gm@l*H zqjph*vT+Wiw1G-neSUEs-US@wcqMt@eqgYfc=l(E)%BLaz)=+(A0>AwSJ1Q-p-CNA z*o1h@4^_M4TnB-lneAw4X4x+uvdd(rAm_%{oq)2x)r!m20gz`NfYCsWla+3@eP$C! z!=7Q?vzsRADn}icJVZ1x0TPpqO%aqKl0ox$-++gSMhE??A3 z3}9?fYxHzSIwb%#OCiacNa3vzS|0|Te(*S4iOc=q1GWLw?Lg1P-Zb^$qm}?n%ejtd zw9jc8%b+F`vaRG~^*O?R6sKk6VZieXruL8lx_e&J4fv<}%0Hvm#JuYrDLw{(M1eZu$ zh1jESw2Z4zTC+geC}2fWxr-FsVwn^r@i|(xxN+%_5v~dATTH}OvG3u}U!Py$5m>4XjB){C zyfcWJh=?B04=;7yfFpVG3sa>2E~olE>&m4kq!MThS6@MF1_RKKI@c1Rhw+Or*BU5e znfHSkypDD{=dY068IHax9Bl4pBxJA*?9V+ZD$X&EV4#u{;YQ{nu%kp=tq3=%1Sf}l zy0CkW<9w%DiIE7nkU~Pw+C4V`H*1QMEz}fhy)P&ZGE2K<9n(A%v9vW*s9l)<hCI^vOPT}HI76rue!V84U;u|7N8M*W!eAm{F1+B-1D_c3+L5&S-Sf2%?! z{l=(yB)0$8tTaC2UEtrc(`O>~YF`vraa+OinqXBwkcCaE)-c9B0I$*XLo_`J z`qsNrp~Q9%T52I&2KvE@UZedOvP5g5_026$@M3&OE;k>W?>wJY9G!}!r$a7Eyo~?| z+-j!m<{2h<&+ZpTBtk|Ug7z>&H3O;`B0u9)e1;Q-TMEZrp|l|-eu?NVK@FEYWaYz- z61PCozrXjgcWy9zt9N&r75RCC=_Bn+%VQOVVUz-#MllAp z&a6g@%ySk_s@f` z&hIRLe{n4spGGTT6)5j8uW$Sifg~6f23nK74y@~=bgiTfVR$XQp0ixt9*x`mMN>AW()?{AG zm@KJRod7#bJ3c(>r;9fRf<*HSLJrv=eX(%W(OT0NIF!Xs%N0`Z@UReMd;_ltBa+e5s&0G)v{c#S z5za#(!Ym-I^7@>4)_`;8){{|{bhlsF{K0tWLT(!-F4sp48$2{Qjp|t-Q^f)M*$J93 zViCc1X~dw>Q#ajfPuKcqZH3;OyROAMD?{?$N6XB)Dw%Pa2M&|zBF`-#o|ORr>=KTo z5i6d1^8Iai4CW#|z6cgJLtgRS#J?+Phg?Y}>h`US`YY7J`9-G0xQzQhtlqG`;>$F< zOeLgY(twl_b#t1|jtV!micv3N$P{S2%YjUZWC8t#-V=4U*Si5xo(llPap~I)ua5OC zz%``RO+0TRgn9bvK?b!{72ElCe+&4}F7VX6!@#y?g= zW}ia`@|0>&txZ?uJB7CCB#~5j;npm=byt)IdH_pVQ zkc8lB9Gw)7`^L22rp*dl?TS^3{b5cAXb34BdR6${uJ`o>2)pw%NPt z&lBWd)_ESXcbi#fq4Z3J%MAgveHQWWP!e{Ar<3*V@1SfkL3ip~KnV7XypEKb6~d4) zdDZe6=st7@Y_LUX-O`G8q4R?f#t0$oS{hEei)+uxsEsCVCf_%)I1 z4Gv0*pwnANKj1?3d&pNOOjtkN#0z>F|N547RuP|$g~|cY%W6(dkmf$rDp28SV4#g= zY=HKmZuy=*r0D5XqrbAWnGez@diVM;-$IY;@6j8e%T#oK1ev2ibc0)5sXH=~T0~FC zWra8|wcia5ZHt?$3F=Tcv2?Odye$K5_1RDT`Cqa;VHt!9^eYsD zah7-jw+%8v7KYzJR`2XQI+%^z|#6<0$XgI&=tks5CQEjG=UBSl`V57BztY+mu)Yjik{WuKM}|o9KPrYLokK!H3ixlkg&=#Qm|w zC1#~=C?N0UsB3U9zs&sdShg7-^?kO;Ro2Gq-C&dX}@@?+>zr7t7eEpEy*`yrOro|$B;D=RgK_pwNbSmVN? z-$vw{h1?3Xe*al`KZL>mU;ut?{~}J& zO>!Pg!VqrwZGvGFVu3Pji+?dEAu_FtdL8ATLJm#@_2lqW3hMMus7B@hupJSbyB}yw7OR=pgt z%*tAe{Exngmsxf(f$ulY`ukh*MdUpMv55J{N2^2t0`h<&Ea58rJ*Uyd1%+CWpp#|X zIGs9D8tLc(A{VJ4%L0M%SI>J5(7XdmryOtt+xTW}_KoflJ1@lbAcw_;{oky_-mm#$Eg+I1aSfGz%C& zA=Mn=GOE{MT?5~NELZ^>w!+q5XbNg=WnSl_xh&^AR`34NXg&t<1PCVEyJ`<*(1sBv znyqrT&Zk2w?^f6h$(g*I4HmSHyKCNl*^i%^rKRD@D-Xb=F+wgj5v#s}pa|_=ZzwyMNpRJ5kJk>#A6@B-%5ihbNY4 zn+_F|p!viO@cb+z0kIHaoIO&^p)1Ht&CG`xrR-|JBXa{OnbCdg+@%-H>dH04(TNuj z)!{EhbzB|3nOl0-WhG}~7W6!=?THqxHaQIB)z>-9zF;%j-i$}*RaZS$o2jINSXdcA zw@vHELjNsCmWTvh^x1D6zt$ka_UpSzJm!O*JJ41VKlJ?k%uBb{`Q4L=YE^>fW|XyE z&`%4dKfvzo0z>r>PoZ!Xlpb*t|ADL{xBi8!6xN?!;2~?fLYFBqt&7WlN`6*Zk@a?U zoQ7&m{=dS})z1j-#nAs}F!qu72PH7FPuoa4AA%66sd4*?-uz1wAw)*zWfu3i0AHRW zdY9z^sY&SQID}GQ=yp1*G;MN|1!DkmM;@|&nG%II2aRej9z=}$j)kI`qr*YPr(pa< zq&K0|^wV`t*3YjP(0*&5kLZP9e96{43gS3-Ww`Zt%KP~J?}-}qN%zIHZ^Tz44NhyC zXHdU64gO}{^!=)CPLRP!g^TTqEI`vx7Czr~UYge7u~xlXX-%iZ&9)r!m={L@jlq}f zV>90RTn-6;`dur!{}$8fctK zuh&|GWr;vH?Df!|E&4n21L;QW5l1>8Y zqV$!oH+sI|ArB4VH;AbG*qq25LyL;oa94cbz>IR83p*6W_#q)NQ~F;w+ZlG?Jqi5= z+RshI6O=Y+04-x_d`?u=@|37nvg)_;82k%XiL2p`abErl%#yG7vu$TB4x zK)44MOtB3J@SYHqb^I}5$c?%3g0K(PnHDve-~brfIb;$I{^Ii=mi7}v_!9?c%ZbY& zfpe>!aHRfo?te*IcCd#}%PT@4js`}y+vyt;0YtX zsi(n>@(ueCnLS0kHU0tZLV;Y)(9@y}+$b}ca$_Jtb^)CICH!`KNZzLmgNZY0g(c|h;JGatPYm!~PBSN`Y**b<^ZD(-|z!KKpFP)#1IDPT>$q*78{9q?#B04KY$yLA0=7fX*$@Shf*y^ zhu`IVdhr?^x!Zlc5CE|mfzH(cUiZ`MR6xmdJt-qBInt5TompqdHa5s_a))mri?V4I zu>iNIy6AJdJAzht(jzcMd_CYE(tuKy4=MD98~Or}z=I!PmgNETSpuPALw)_}FMms~ zjcEzsZgR>TRqxDUJ9l60wUX)zExQ!|vg*0WRtLR*EqzNU_!cXbl|Bd%=|zqT2KnH} zJ0B!}{L3%N+(NA`oH3dNs`GX53x3?HB0&-k9TnSqrlhQDZJsi=un`RGsU{W{c7XGf z9WK(H|A!NL>0Vhhc5Gwlg0wvQO+C+OW#mTg8+);9=!VT~`KWYYNW6*~ggD zWDfcEHkoD7W8KWaOYy+jjOnb|RyTv;i2ZBqc}b5u@X7Og((OV^U2MvTDf9wNiy=pH z{}=8-3C>A=AMYTe3YLH$1cOdUq#7vXao390!EA;b$F|^GVo*c(#{1|UlBz(DHnkIk zfVIw#e)bcByet?6bLq z_$Y5W|K`oo(vS659tJEKNJ4Qwwto zsw$Ij$1kW&SIQ0dMz03r>@Gtzc7 zU5#=glcvx;(#kw<>}6*A54F?7PjF4#&^AD_orz%!Dr;C;Q!3E1toau`VFH@EgK~kq zA+9r!pgwSACUH-Y{~ME|9-+A*I9?OoTz+I{ID8=k(q;jml6Kx!bwjQ=t=}Ugfa`hR zRr^oUx&>zpu@l9)i0W5+khrqOMOWbB=n&-5{XNn%7l|n=8JX0pI9DJ1{k^OGa5R1G zA&c*CvfA&gOjZ)C!B>{XfW#5>3FvlIfSfAtV?;1J&N^nRUn_=V2U;x zLbeZ$`7HKQ@svDIYr}&Dkc2Byx^?fNI9k8#Gyushpn&8cbX$VEqhD8cI23VR3Fcx3 zPjXX+_8k_Ig-4o`-QjeWP^yDqwVWaOqtUAlv@_{4i?4J@SP1X|w=vQHY->oqvh&h7 zl79kJd(C~gKvNzZv9?c~f5ZNBIUZupj3k8_XYsGguFN2dQ3{G z*R1@$MA9U}7L@~Q?;O+>NamKD$1D)3s{#o}2ev+K_aA)2#GBmIAKo02L3}WXjfsOkq}UyqJ;#D&>G* zkAYN$-K!kW7OzpHqpI6gngM5KqZF{y(AtG@L4!8uUjjFtT^kvdk*?jZ2{%(T_kGVU#UDUlG99bkY49KyQHyv1N#zx4UvS3@Q?`^rw@f#e> za?sg^xP+oJSX#ucUCe=Ol|@R;$d37v)oLhC#a#uCK_XTXXI}!J{Eu|kJG_CE9&lDz z20~!FH8x+pe5tzKrNt}$jz+{|a5WB8WElYPnzs%BlW@UVHTr)+g%A(MYcJ9wHW-y`mlhOVrj*1Q#-_GyeO>m@du7W7~d#cG;<%{?`;4hK9wjZN6 zgm-&(mUMf?`a zBkq<7zKQWhY%NE?jTS%?HwK5<>U1X=KZv_MF{jcA9KV)WYT)YTBQlOhIbUde40=>& zg@Jwa8%3zKPkt7GTWgc?>P;MqU4Uh-%`2a7z9*v2zQ<-%U#(W|Ax69oNo)7*2rt;G z9k&-0&5dE}FsS9BhsoIo!4?EQ5^IL{vFp_kCiFiMq~yi=L zV?czlD!-H)0OfQx2KJpmnDP4*{$`T+sB9}s*7C!Fh!~`pr&fj16)A_Nrz&kZ zufqyc`mankcNeEO-DqDfR2G}RPsaFvg;A@dt#Vn_vL48J z2_pJ^Xi;W>PU31X?+;Y}0)OBNUm&teb&v7cJhX7I^})tvxXo@2{U7jr2e$r0Nk@zW zw0^MrmK1OUh(|u`LEGio`s7Vyh8+6>VGd|y9LOvvH$T|V+_MJT0g^U@(w7MyY!L-f zPafo93+Ts3E&gSfN1*fZVYu-Agp(Xck@=9h6BN*S7 zM?YkLP%f$8Y^UV)*m>0OF2O!|ZLC5bq5$UZ*H);8o>T2t`9(HoUyiGAT{nXEfdCG@ z@{T(sodrO&Ii4Ii=+-@Zul81Duyk$n_aFGe&>@snT9&PflhA_UzTU7uh`BKHHW-su z1-$1mjx#jSTA-Wgw-sa-ppDwO`QOYU(SP8%B5{m3mLV-jY>EgV?Fbi!hni%sV6R`o zV1&*h?GcQECQWy(T>-?=V{`@Bbu7TmsRfX*EC6L$=yLhGk^4N!XR1pX;$J72(b6v9 z=2baJ?{xktJRFi~$j1mk&qE?Sx@6TbL0)G9NpnK4K6}?D_}nXmjcI-l6(*Kdnht3d zYAJ#&mV?hKPaR4*%(L(*S_{wznAj8k4^m<-@l0(H<2aObvY_=*m}o$YXh+|a9B4*y zQh1isEK^7!2rBTn&))wc6q%mGWkKT(q!yP9>4ieP`I4W|yIBK_;*#NFI&fMHEC`A17+sf zg6pn}RZp2py@57+9AcIXE^Ybh>Gx`I`wf{Edd{?`^FL0Y;AjrzgbtbRWWFyQ@aU3q z;Cfv%luD~JN4>hf++N9zZ93AAw_?oeRp+|#SV1AobL%T}==Zzp_1PGFCKy3fPwq`+ zWL-%h?Sm4nG{v_tzFg~@{g2n`@Z5mGlU306y6c9HsvTzIG_e@%;!R%Cn|kXpS0@E1 zmWCl8yy90;NZ-KYiuagm{0P%^7t--y{p|&ho@FR=7J776;kgCs$3IVAa)CBjksph$k0<6bXi>yuM*qSiTw2VTToZ#vm} zWfp|m3X>sR{um&f1GLFT%bK1%g(u_0wYD_k ziynxxD!&iB#7|u0fMD-Q6;$u}(Th|k{V;^4T*W5(u#5uAp2O`qWD2^eS$npm%9%=Q z(teP^P5nw@mKNAzZ1U9G<1Kn&T8C!E6w@vjkB;0V% zsy3T7JNkRSSqFB+-I;Hv&y}|p`Ll;^|AlOTz-vgtKGDN#94h@|_B4#Cx{z0rsUNO75_z!(AG#is< zAG8T|wq#tgyb1jK`_(5`ck$3eq|-7=fql%FYw;AqV77k@s*;As{@Q!*BOxF8i{ay5 z!jN>Mzp;e3y*uVK{oIawTw}0B<(DAT5zmNZ7B4(H z%3_*J+^uh5F(M?&kbEx>$=HAXMJkxIPpJOjg^VE+9f#3>7hHNvx9K}_F zXn85B&Nbpw_s8hrjctGV$V{ebHb%T8jls+{wf+<_66c-Dn~xu$DKSn9F^0d$$a-eu z>RfD~@vYaEQtkm-ze01o90i`76jjg9vB_FhiSe%38psL=W!T3QA^G_M>nwI6u{dMz zZ}xe_`to5_>&a$gmuEeD;G}|+NARr##pSg@9MaZBg)k_|>F-tt9dns!4H|%EIT!jVV4kjnLC5QLDX4mYZv0WS9jd-#fywVd`tvnG zM$%y(dnDy{3wAMNZYJoNA^m-hq$a;Bi@Cf$_&aa&069;ceWB;8v#_L*4f&KP2rcsO z)N*s|XIi($9+zNqd<3Q6&5+-+6PV$g5R2|4Riv;5rkhw`|Jh$1K(AbT%~)9wI)q>? zB*gkpU6RT{2YoOqmja;Gxf77W;El_q;3%R6K_(Uoq)zCS3zN@y(g}nQc<-X1*p{co zr74^5PFAmVUFQT^BmvT5p+rcNr>WuI|Cz(oNY2OX^nkX9T)UsOThQjz37R6_D`zlX zAt`W(x3A(@RPX(->b^Uk%f9cMpFNVj_g>keNcP@QRz}F?M~F~<_Ff@Mg~$$tl#!KP zl$}k=3`N7J=X2;hulu~N`?~Juy6@NPc|Ff_{&$|8bo`FraeTk;&wG74li4bh%tvNR z<*8jwZ{S44m=qf-_29=dgrBH+1^1=^^fw8p=0Z5c0ss(cf~JBXmnDDURr0Mm`2TNxCYbt*J{h9)m0ld}8J0*s`VKxFZpJ=hB0qy7L+ z!ltDC$@*)%rGhw)8`BLJPP&h?!+0p6ANe?p5@|Fmha9fJIeu4uX89LE(D3E=6MU_v z+31@pZyLYN1cf#J1*hyH5y<}nr{LmH@T-to1L)$VuTk`TKLNa}sX+jn7(Zh3dVbRI zu%h=#=?jiQ8zI~0-izwsYt3XVM?jhJGatuVAQ`=W9DEr_)DbIg!zu!Lv{RuxYd>NP z+Y@2n6|>-#`Q;}W6u;&=*JsmjccYvD-8*67J{ z3;q~*tX5Qd)1numK6e98{qDu3m&96kPuG&tiz%9i0uy`-_-MhxdYC{Rd+OZA^o<|t z3ef*|5DO9AXTVM)SSVJ)VTw^#u-T3TZvlEv&^biD_Od;u?AzyUdw`;_+)xU}Sj*ha z$ET;)Y&nOO`5{6lwv)*};uH&Dsn!N!-bt-7#%Y7r1A^~M4 zRkligw_{W9*i+c~@LkX0TYflNbQfh<=nf^=MBCUYJ*k`Rt7lBzUTgJYEPbX7% z<4Nx~P&AAi1H7fp6jNxZZIFr{Y*MS~53Qsh8uAV(#5aVFJAnPbOZZg3TzT)4w4oe= z(Hr2P<)?8gl_r`FzfP`- zeaN;GUo_(6zrk=8S1=dX<-}xn*v6CS zcbDAEACj&UPrdHzf_#C5QQx;p6KaQ-XK{5}DNr|>K1M*;ti1S*L^gcVlw;@U-#sR1 zN2n$|)FlfnTIuT;d?S*~2h*6Nb(c7K=-bF3hF1HmZ%8~}*rwSErm_d9ytzj#U`u{%XlG=Q zBWt8~jw^uV&HGmFvS9J9#2CNya3z9Xi^hOzi?KkiRVHHcb9O}BM;Kb9X@>0m@ReXK_3PqWa!6vd*w1QN??kBB+|;>5-Z86GYt7*kk-Isx$IynPt&?BVL%n97bLC0o9gKU&@JdM_M_us+YS_ich!udyRdQH? z)FoAn?!1^>PkQKdVv#dc0n^-ibiG;-OxP3s+{nOZO6TkOY@O;ijPm3FwlFjE@|oYr zg)<4*nGR_Nyx$rtngnQvr!`V*TEF>jZ-noD)Wd(|d`me(L6$W;u=N&d-Q-1TDtAwH zB1{>S0J>Ot?<^bb78U?&Nxx6o;Pd&tR>ns_IFxrXq+EwJK^e?g4(2=+j#%6Oj!ChA zE}?7)ls!xV|FhMZ4{=D-Zo`hAaHZCfXcLC%3uKUt7!SJ50QVIB=vSYBWa*)c*aM9Z5# zu!3Af>Px`qno7x|-3`3~;2nV(Hd?u2)hm=d6)IEtCxGb5$G3F81Gh&D5x!^Qo;VHk zBCQP~n0$R0{*kqK?C6CkIbvH-M{LWbf62B09Lxmvy9a5xQiw$K@XE!XCxLz}?Nu~K zT1Ub8)D8abgmZc25TFr6sCGbif{w&$$JST}Ht#_-#9gTUv^C|WHh>-OebcuYs`tla z&d6h7fYrE_UiJ-?r{3qJ>7vw3jl_mYhVC$7r$G*iabo7VpN<$gMxtX5&(nd_f?eh_ zVze&hYdlV$j&S!EnTnB3ab%W!xspa~8q%c0pbm(c{eZ^DLa;)*vfvPq`lPVo1Awq? z0U*Ip>E0jGwuU8hG$!f7)31grV{aC7`eHp=DUHuS1SpWv2H{&(^>Eij;Ofq2E>^zy z=^EIqFb*vnjvno$73V8%LkWo{n{lQ*wM_XU7+p&ak!F*B68$lb13t_s(;X?+Fpa@h zhr+YOgM$jh!)`uo_yO}-52_k-!2E2!4uv%7km41LAI`ad?@MIP^mD(Pa51IWxbo!1 zTn^B$laju z1I&!(Ri%oNw;w-;dt-lcHoZ&Hrt;P@lgaRHe8g2h&kM^XEX`JRA55SfL;BZg!0KN- zYZW}FWF2XgV=Z~64pPO=R!~{>xAK~GvW4uVw!9ljoyls50tx&uyn&Go(<0u#dwU2t zjQ%^-qc?eU9)2}n3R>tO5&)*3QP>uZdO22dj?W$o=fczf;9*{-5j=(t2boOf4=l+3 z>GB$94XLL|TfAj+-KbnlG4v&=C;CSFV;R03?E01vs;@^jv6Fz5>j~@6YY`X^VkNWZ zL&-&PD1ZG`iesdBF(GWhTNvXbIKywYh%N%<uqiP-4X~aO2EoK)6ET39WxEpyozPY$Y!n0uhfHDWudl@;+ftQ*Y z&XUrzGd&cmaZFaC$H#HD?2c*<#geaIJ>)s^di?>+N(w5dYocbrk+)k+b1yz>B+|*1 zLNOAHpE+JgDHWt`Ej6ijmS%`>LtT%D)vZ95E=4SDx-W%!(LmZ=q=-|0HS5^{Pm{hj#Tu6z46MR6W`o zu$B@uvT|DRAhO|XM!(h@??4f z?!Upoc)GZZ{*m2UK$JJDn?|-rd=B0(qfK8WMgjQAiJ(RbznBY5#TlwH%q502TFQ){WX5 ze+hsM7m0ryxWDN)HQNakuqgw-8@OH`+qL7HgPqJJd)&B;-WPtZZjWzH+g%;QZyo+I}{tPwfUE^ObRsB z8Kp+g{};?c<{LM5{YWXI0nsslm8WoD^13FwxM$Oy!hYR8ML33t%ovN%!QUQ2R8 zvfOkw;?4nFU6Vgom2MFjmAV1Og92~dfZ5gOce)E9#k}6($tqPlkJ~mj*Vf0y!Piye zlhJ-LiEV1LDjzD`x$uao2XB?}SZ$pz0a|pm%Cs46KkyfL0$|DBf#iY&K0h=Q{ZnxW zT49kZ-VN<)oNY{M{tteL5?z}%O8oBIhmA=r$a@=osr;Fbfqvkka%Fz+GF%;IqAx?I zTVFD}hdCa+(O@GgzsRzZ_{`00ZG7}>ec84VA#=M`e@mY~3H^tE>UoE@dlXW$UB;>|f6a z=6M+!SyC*B1BqowiLOW^JFkSJF!d5iQDYmSH<3Pc?Rnc-s2|ErFFN0bVI&!qMfH#u z^6~ zIN~x;rEu)P@L$xRfMG&J3{sW~6zc)(s4aO^_?{I*>%wI&Vt< zqMW$vRU)$P<6mmB23R21c-tm|XL;b|28x=)<5>MzUJj(}J{8ueSU3|(j(xgug<^|- z*-Q7%e#Q^xM>j*WvG!xL@y8Z*T1+#2m4%nTqn3WW(F>*Z^L;Anrh(Gu0^%Z(e?gJ9 zW{5HJv+SMF^TUL}zhiEOl$s2^{dd!4?u{f0{kY=Q>31x-iTsgyZ?F3x8-}VLis^nsWY(Y^RKGGZd) zz*y>1V%_w7Z_mtG+xAf&ckYWB>I)3+AL|LX>uE5*W@r2qHB5cy=LwW`3ZglibT+Y7 z_+!O=l`{xA)hJM%VVsN^BK)RG*Nl`m^E5Y`v^QC}+3#WzX>}ibY={p0=s2DEN@U&h z(;2?NO}%GcAMONRar)FG>E>}(va9{>qS^Gu>xw=K^-v153$B@FgYOsbc?RTP zZE>d_88F4!NEZX>^^F6U=3vp6F{=9{l4nzYU&wKK&~}|PM7AJ6fh1H?>OY0xy&RD zxCj%NPo=%1-Kh8pCb?g+kVTIHjwL2pVNWv$7HCaux>^`u6DMH^jNP7G5gK*g(c05fQ+!)}VPGd|0WHPWnc`>WG7lpLugtI>;pJ$A_mAWA^{#E7vC8G8!@oS#RL5m*WImI4FSwlk?FM)dr$$b z22^neDB3GHS9qDHn}i`dk0&n}x<(LB3XyMs?+jdEJg|XraAeGs!%@Mj zs+t;+mzUQ~mvsuvMBRMp3~O+9v?DI4=z5H#&f-Xb0otO3SrvjcZ#Do>i%&@*_Xf?Y zFF`hPh$~c!4%ZwukCmW1;`ZuseeTI#SHV$g!KQk4_*bpx^le;UP$oOTc$ikuxYryn z4}>23#sCbMkIH8Q`0=w*3ue4uWKkR_M3@!lY>fskxOPcmaeZf-C3F;Pi zf!$XSA9!6Ra%wxXq^|#1x>fVp=CMO;z+$3IxuC19ZLaf3X+}(PGT$qVY4&=0OqZ@4 z_6qw+eXbr(PEJ*2Wt86*s8mc9o{%5Jb(hQXP-gK=NSL_5dgdB8bojd&fk)Q(tkO}t zt2eN7+`_gu!$Mzxi0eG|h-D%U@wm-g-}k_#$2!L`6j@sHs&~uNI5H zIh#Aux<3>0?INb9nBx$*T$E5bI8olX63bVk8X(l3RxtW?fIHjjJGO^EpssCDp$LPa z+ZDe51Y+@XUoD-@wn8{XyKj5Z32lI{>|8_ZZ0p$A1&F}C;Y2rVFkB5!2d&If}! z?U!XL+hA1QEIpUoTJdM==e}CL%Y&FsDFXuo%mIvJ1wLR~)#0IvvcK%Q=EO*@e{E4p z*@8%EJn$7oPB=RnFuRvXHf?k=wK8oROZY4}Z`B8?c=o@4Ck!^b78XLa;(Z9kpm-)* zRae0ZWWs#PCn60A7UqMj<(rIQD-|WY`fw~63!kfI4NP3MU&C&a>+%eCeBa+c$JWwv zT#`6K$o*bi+N2Hp;2Rr>%l-V<_cvtTK)Vq3$nRR$4uBp#5#iSfc6r^G%uWaZ6?(fE zYB9baew$T$ldq-*Z<-o_pqG%nR0SRo58vX9`Ow+jm5V=E%bW*;Hk&jq5Gs-nKdDQT zo)zIW2S$+`Z5D1ox<`aN1(T$1Gp%4?mU(VzC$5g(g5G5@N^fASh(o@-l1c2~6(u~x zKlA}uSdgLGWQPiCzcZiZlyJp$)v0rJ)5PwrIwdOJeBTHvPOGIdy}iAAE-}KF{R7rV zejAuwR!#{D#vx^|`U5ZqYPZQqNTju?C@a@&!31V#27OXBVpHW7wQ+6m=$AxlsHz%{ z!nE1 z#Nq`@oNk&I5b^2yvol|pMX|B5JD$SSCm}9w6y#)PMjr|1zvz&-y{i$3gyw2}lJ-}i zHVRD5&f1U|=>Hm)D4gqj_w~uii#~B_DJkqgatk#zHWuvFEiNt=K7IOhXKO137F?~i z*Jx(LRInVdGh^;@o|}EtHU^KNx`syCdoTrVcW_BbnUF{}5*S8YHSVT%@f%o0g>&&osLo57T|8k?6LbF6o^1K( zF0LOPp(~pAqlRMOu)_6&EMk%&{PQ-*gKiv9UaDj$6YjntYIbG~RypmR7<91(4ZL1S zp4HFsl`5VAy=JkZI9*W$%SDPh;(jk!80jjE`bo5bFW->25}Jv^eGv}+RAadk(>hkV zpy~DoMn(p;!AuNkEu$c-g`#X@n}0L(nEwj!E%U8UVmo{9{719A#p6Pjx--` z`aS2|x|7`N6;aca{*1oPYT#Todg^P+CXDY5AC>*|>C;*bwa#zC?g3LR3V*Bq%@gZI zotX;cCK>EOyL96QN@)LKD&9QyfZT9fuShSv;e}2@Cmco7C_ zYedQf53zHyu?S9R3_>|(M#PB-glPUHnXQozc2zg%K}NOLX|*jcsoI>K=*mYl7E}l| zm2{S!vG({zs6xe4IiY*#Zqq}<$n58gqc2(ZvrC@KovR-rbdl9l)I_4$w#V}Y+SVD% zJjXYLDc0lJ$J4>?>nw5V^1W*kBe8m?%2vL7K3vmK^mt($PP*OH3c-rLLui^TY5K9U;WNJ3;NZ;?FJmEiQ{-=A_cEGymq_|W!2x)BTBR~ zIgXDQn8*f`*osE0j+;oaPgDutSZ^}N2@^f_$##?4+=&|NGYU|-rQA^pH?Y5S@P7zCu zref8lM<<~Ko`k)PE0&LnwVueTZND6Q zLZXLbx0Rdkc#)qxs_I#*F!rWJjm+>%^1&^o(Ay>kgH@r!#678pYQL=gY@n`wwa<(q zpEN3jILK2;c8X6r)!5HoIP}$MN-hJ2?*JsmmmuV5Q%TgWhGVtW38-}@#Y%UW7wWyH zDPnEAP8uY^d#!T83g2xlMvZK>9<;+>CPY-sh2nb#+yt^bDE&oQd_Ng|%J6CO{R>FU zk~`Ec2vt{W>m^xt)=t_XHa9Bb0nyjpRC7*9#0?dl6lAEMaHxL5V^{iN56LiSpoUgXnw`X$HmzkU$q!APcdZ4YYm zm$Nj3NuugJLQY?8h(5>Ctd}L2-u}>zrTRXz{tYu8Aw9y^KL}pkKWn3tbn@io585kF zYV#S2q; z2Na=mvb(G0w%IhHzdj-6p?jtoMwuy$29-m`kuHvIf4~-@5RrkG(~Et@4`@he?u#q%)v~hl=5INUNCu{ zCj2AtuXuG)VFt+8V%|A7wtZ$DqtuP{*BJT7G)|7(qhrp>1iXVP~B-vrs~X zhnTnRfuZZAmqLZJj~)lvKQ7wKD-qgz(8PbrPxS?c0grx;sdYS-?OV$k{!{kFO}zub z?keoZfV)qlY3u1MuWJ-BUGk=L2Yp-r-uLgOlDuOoEyzZCca3B2IZr1UK^AGctIY_m zoOABQP~JAyv*+`_KYZVN*d>2Wzc%fpjY*Q@?-{cSAKRIvN{`0h`An-5&Ty_0PKem7 zoH$C~@uvT!oOXP^EVu68zho{SxrP7pUvZKX4Ivkd${olx*m6Y=-}HclPCj(RnasnJNa+ct zIIS~Svc(VK_TdUYIeNs{YdKS3=%Zj|6#t4e&8i2)wLFQ1O)M*dAJUMF9;5&&7}USU(__WlvsO^ zdI0K8K3HWrL7&fullJn#T(JPj5G^wi%esUG`7>wKfZgnX`k9*V;Dm_j4Z5G{JsI;k9hIotR0*q zcp2k^1Z;I~K(5^g&KO#V{E1=)vO{GXj^^TI2HOn}BvE*km6iFYm<>$7L6paEa_%nD zt_N}od2@5K30dRu-3tznSWFrHq@yRnoS_QJsVdB!nm5U5X?ZW%+Y6MPSfZ3|xD$Ky zI0vy0LacrQ!ZDXJvaqmd2;S<&)D6+4Zph5^A{G+FwF0xW@jVRTL1Kcv$l=4F&J6~^t#O1&#W9+U<5-L%olx;i` z62{FCo%#${PMs_dxVT7Gr!n<{=wNFuL_TllKH)Anv3&t$hrDJJ8t^bdj!k=@JNX(Y z{Fd=HRkoNM8822Ml`Hk&LOeL@7(Hl8McR}CBw8l8V2Tssy4Eb^Pn`Ofb50u3T25$ zUgk?1Q0r9T2UOu@QiY5NuE~VqmD>x)2Aqn2=3Tz7Xw$ASlRf8w@xPf{L%d%3ujR)@KbCwuKV$pg>qTSl@tSNE`xph;OdV5zu%( zJdNq7kU~UNbv$PRiT-L3BX>2&Ad>7h0QV`P_&#S%?9Ykzb`?oC#Y4|h7Srp(DTla%x@h_oP1tQCucwC+7T{Bf{y z!~^w6K|#R*UU^aI>f{pM#wUx7jg32T5y+9sGxlq@I50(=ItQi`yYi_Q4o{1VNqi+M zsP3@LP`ZJ4)*jbV8dsK=WcfIdkaY0sa5ko}1~4w<_7;CE+B?=JylwKs#InTc+^+Ok z5_AR{Ppr6-$$aObZ&n$>3$sYAT~fa0&wQ?U8~N_n!xRk-Q^p@Vk`TGbQqRLW9dZd~ z_tms~>rbO~NFbdqu z#C>g!u6o?j0A!WPS8%ww1@1bIp(4rAw78jr<-rg-$OlI?w?oEOTO6YW2Ik-sOcC{` z9(N2`S=)Pmc=txE>C{veqsGM@k0>oGm{Lo;7(PE#sNmZ~c2!Ts{^x zB6mdPq|#qQ))dh7em}CiM5sOtIIIW<}7H~s&cTNzPdDF7ZhEo1b34M z4bDD=iA$yvJ119Q(@7Z`1*1`qof{OrNdOCNamuz#{R~uTsUG62P}qP(DOlvgECyyJNo~(Ay0)hN+>j&hZ6W3U#qb7q^u$ zCh3x=A@U3*Uom7yh7?KX6R-mzTw_7ezpb7RWmRccrJ3K%jpJ5&^DL9U0BX~`&fVO{ zB%JOmB-he9?kraJ9^1Kl9t#VbPfJzF#8X={{H%Nz69@syHHsJ%CE3L|_)WI~VQATs z5j+8deTq|qV1u=0I}_t>*aLRvCD@qXLuPXMZtoh~%u+WAskk>LoQ_wA-Cp?}rGdSl z3}3-E>y-`XarpH9^KE!-)b+5g=mgZZuQ563JbTOjz*NUmzvB-uW{6vL{2H_-IDGV9 zj?alXwcdyBBqk&r)E76T%SZYS5qap-Q^7RoWnwZ}Sf&%R zWWDp9;V3n#^iR8Vn5G7s8%$`TBUq(bOW3l;WI2z2oK;v^MMVvqsmsGpp^oOEh3*cG zliyR;W6gg9+c|{T=da2c*`tmAfMoR8XHdTgjE9x7LPN4EvEUyqHGS1b%C=$u3#2zz AiU0rr literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/docs/extend/images/authz_connection_hijack.png b/vendor/github.com/moby/moby/docs/extend/images/authz_connection_hijack.png new file mode 100644 index 0000000000000000000000000000000000000000..f13a2987b28d70f81bda7096a54ec479f20b2690 GIT binary patch literal 38780 zcmdRWcQlrN{5QHQWZa>Wd6P|cLS$x-WJmTWTSjDrtVC8;86`53k(E6|W|FMz>~$kq z;dx*6UFY}5^T%_Z^PKaX<9p8c+}-zeeXh@Vzu&L%z8+mylP4ymBE-SLAy!n7y@7*+ zrw0GA;-7#|9MZp9Nu8EL$=dljUBrC8qOcZqwAp>oGp5>Y6`LFzYZtTam6~jq zl(O->Yv}U0x4S0Q8_JNEaN@t7u9aS2%^r1;MNd*kcE>vaFz z5`!y2a9wzpAnfBm*HrLpe6;=dC}A)=ihhkB)vv3_{WAtU3bpE!|9-z-9X{|4FysIC zNnSWjiBAaQ9qex>ynipe^-RF}iRHnK42757b01?{H@w+Mb#iW8>t~h;CXy?v@!A#| zE;Mf?No$QjeRCr6Kf|fhR%So)ZE^ao@hDl4(_ChP5&vrOXw|)at|5biD%SkSd0{MB3haB+ zu?K4qm?#;BPH3G{z`Q2VdZVK%dT%w4(xM~&;%j~3(skto?zsLpx&}HtpZ9wV4(NS% zOkdSl_2f85QNz5RfcZRj{IUXOfuO{3+z6X~jm2MKO~P^O!D_`Eiie6I%FhvxCU>(d zl$|lO9+v)9us$IL8z$#QX3X+XvGUuSr51<3b{*b+Dq^_vIU~C6=+MB(bLI@YUf~Oq zI`7?$jrCUgs?65z-dFN*bIWjPD!8EerNH~Oyl8BNWO6$H|*flr<}TKpI@tT-lk7%*fg_D^WJ#G!72ZQ zhC|$g_&mRCzG1~wyx1q9*nccoaHfEc7Ig9x8!xA zC@7Ur?a=r2^(cw(Q4WjYpvF~pgM|&Q@z0qiFnHWl1j6Bxb-o9NjOj+j z);(o;w>G|i?BD%X;X*xk+2Px~QXEH|^V@Ejx}Im3Iuu5Vt<~eWuZ2%B@`*f#Vnd303ey;0#-cUPNOQTi5y4&vQlKu2>sck;hC7b3B`MJy9J5J@w@I%jnI~@r;Yob1TCDB)r zc{-f$eU;xBbjsjsmPtcE{ss9bly2+gDG^kc-gT;PzbrqLky8BFayzs&*edw2^5}5? z;!^G18Kr3s$E~$*6~)dg#t#lGGf2M+%+VJLfZ~CI1p<%M^Shc4d z?RzOawF{Dwu@7ar-jJo$_o++Rch>d_tYCd%l|3GfdrT8ocy7Ml`wIKk@X`gFFWDM$ zqMSURS4PTb;3@FbyCSGBTm0Htm)j$~x>G+WHwF`;u^&pVUu+e@rTPhIXBvy>}WvM6J z>3f|sjr~43tQgCp*q^K2Uu5jNdo9)Hr+%sZ;k4uPAD0eahlDInG(229q!hdzWj9u> zFZYCoJ<-{72W#qo@7nXa74PlQdvPa~jkkY}ICfOJEoI0)BHN3a@yZ_5J9~I^@@Qw8 zG)dSw-Fttfe53Ic52a$3T8fR`$;#7h*6qeEscwE*d6pgV8w17)%-X%>A{|!P9bi&cq}6dT_zs3pLogx-*SUu0NOc zUmaGtuiTxaz5)kz`srmyqYEoRr_P0Y$MYDk=E5<31LuUh_Utd7+fq!0%3apN-`wuEdD{_mRDM(phX&uM^$Bh4wZ~^a{#+TcoNkF| z=k(XYTIdbcY>(N}1YbT?Jc{lK^dP1z<1u{yqRjS7Hbdx2Z*b)qZo@$~8G7~3Dwq%- z>z-8YYxVnY2CEHwK zULTSo3w*BCk@5o3z(h(HYNt=tjvw^qGRJ)~j~24hzORl}afs}lU$+{bun%}yH+lRR z>4TA+P5kfTU+k*;i4W3=jV}ApH@$jxeREsjOk2%&{c|27rErK99XC=?G|G^sif%Q9 z`Afdhd7EM0`efatK4neBFl6Eek%mc|oI(3j_TxN6Cz+J3#Q9f54yZ_J6VF=BzOY8A zc~J#Y1cwJ$)-Jm2ZdkrzBTDk5TFeev7wbvdE~RYdE8gMH>K(b6ZA$Mym}+&^ef672 zPv6uV)zou4N|AoP_V`vmiEe+LyY47(!Fc7%!0`<_8=r^ak@z~pRWug^nyCDPXQ zBv-@w4eHN!CPnnk^`u=c+iF?p%i6NKKIYTU3R_lAMCX)X-8PS;u*fSBnlC z`0H`oW9vPiJvnN0=5$_waqjLarSX}|O_Svbl1AHZ1q^USxl<*bC_D}0%$;31{24zs zur7I3^O0$I4=akCX#4A6k-3j{#GE1~4{DIcu(Y6)k8$wQx6oZ3Jau`8yH^i)rrY~o z){xiIwTvysz%e+3da}7gKq7X%1maK+KGi^L%pNT%`+=swxX*UV{lR&Dz94E&<9+kn z2_p2xIEJ;}6;!nk=T1s(plvBP0!-ORQt|Dvvt7v|nrlic!=(``8N=__YWKtQ1`3QF z?aYtic8Poae9Er*I(zb~R|gx;4eQED*#+NlS{*B4x`V!0UP zPB0RT?=V~~6<=>Aw;?w}1v7jWbz8&_R%TFh8ZD?h%gn*}o;-G-a=dZS zFa@fEoXH>e#ysU|TMFsF9)A_>XHFn2em`aFu{hqWzT08`bYPMrsTkIe?@E?JG-FF8 z?|mPZPRS~t{W3h8)QG?&8LPfxEH-~->ceC1PBrQI*xjJ>R`=z!3$6NhHR(Gu`j*BL zmmY3C#x@q^=@&09dim7d-^!)F>|!)XCw{-g`Jxp~ne9OL+k@F3aU9xrO?}wy{rO1* z59lBKnru%J$g=d|eT8kMfAB@y`S@owLu-d~WqcjzS$mzEH;VbFrix6hURz5nGU;p= zmG2pJ<(~Q>a0cIeGfz}p`3w=KR`!d*X@RN-N4}#JwXuG$@IOs|*Hi2WJ1&jW#7Sus za1y`iMCgGOhPRsJf?W7@eTM8k*o;I>1{W@x*$CMWGmTcdX%;UQ-rcznDVDBtIp3f> zKbl*M`u0ftMS~2P5R#ZnwmO%X9%<+Eg}*-bLPOp-81t>5c25awUk8c7X3M^L`GqP3 zkGlrfsAW&*x0#sxILYo3y?`z5d?S#8iBL!TZ_#pL(<~;cpl5f+t8GvYwI2 z7bJ6H^cQXuX4MIIW5gxHs^O%cXvCM%*?B}PHc@KoiE0)xedI5Bw&}~jrua{ML7OiM zSNE3%`klNNc-19C#i{K02nNX~?xG;q#iM8xO@7Sxwm5p0I*i_Y`!Z1P&GA!bDH|qd zjjQijT+quYB(LXKhYBmcN8-q^|G4tK!~{otujBf-WKI~|`^hwe?0IAaJB)=;uiW{X zmo!lCN|aM2=Iv5Cpzsj+@~?`*zB zZKIc^(4*AoL>s)qTeSeYAmjGk`>dibd=nWS318}b(G;%I$(RB4_aOO;){m*AJu&~F z3_mG9y5D8>-@rzpJ{2b@R-8VS`Cnx75I*q_K#K6c{!G*%G=7w9ApRGL6tYTA zTJ(fiEB5p*2*%^|Zmy~LKUYS-aN@u2^zdJ#wG1$6Xme8k-z)RtXYiIu zzt?yrN1UVmMh9Ox{B3$LG4)vkVs{VQb6k2Dm)XwfyEE+~)vxnXFXOo_eu+LoOyzNf z_LK7%Dek11*(tn+YWj=s<{0Qvjf}~)2R{w&4433Y=B~R+cQUJCdN=TbL;kZUvG6n2)2siv|2aUFM)g?Uf7i)_41UJc=<}cZOT!L0MM=l< z?;sH{nkxHV{onf=;a?Z_#BFN&&ma*<8;igFpZlMJr{yv6Nc?w@Vg#}&PJ4jHW_IV& z5}M!Th(41mp;EDz?7$K8q%p=t0KES4-Uy^C9*{hFP zQ#~3DV*DZv4(e>+AacxgCJ9{K^xByWiSW`MFVr>F1T!v-q5W1mQcO{dp`JmPI78C`ifRUv&~k7mC=;-Hzdt)7JD~JRE&`Am`y#!hUPBQ!l=*tKhco;27k;@Ct49EcIw0J? ztYzv9u&QT_j@{TtdxGm-OL}Wqa&PsP#1NqPq#Ab<{KLIp{7x_ zFFo(q1w`{~hF-+usreDJakJ5-;!YpSE>dg$+TC32Xaby+0oncCy|uBy0J7<~K_2t`ajnHaT@T~x02n@SBfK>B$%OO=R6 zRG$lVEuFlm9M2WQs!rEXcrpUT7>!e%gU}|v2YVlZh3J3COg7RW${Jn|@stYyoOUCD zDIstYFE{F0xym!@2m_Ajk9I;fqI@-Tj~TTez^RvMlcb?V-x+@Whdgvb~u9P0`Y+bKGqbN4x6mI(dnA zFPPuXfJ*=F-j6%wT)bnXy<2l>50st@*?$rC2H=z_7eR%mTVD2r_DVaT)VJ=Z9|t+_ zkNfVcrp;a@H?#+n<{mT;xV|K-X3i*>ik# z8aD46F(`-FA|(#rQ;9ZzB3yOzxT`T{9x2YU^KSYM;6E}CO2RW78Px=B-B&Pg zZ5PkFYY;8JaJ;uN%)GVOPyTBSH|y9*5oOA zgQixjt(@?G%uR-&a_4Kk;%jJgrghq!nHYW$~Rzs2a4C4 zvyUfbiI~k3iKp})B>lMW|KSmEb8lV&cT_G+J;?CQ`27O_D+O{~pB#u>E@Y2x9#w01 zSJS`EmR;x5rBp$wrA_DqWj85(ceL75;Drjr6KA(kh&*51E*g|+?YBnK>+U1f-2Phi z<`aWjh&j5gBc-<1G>(uVMSpe*^iZ}I7TNT3xK+zX(zUDuK7V80_gYop$#L5YgLV(K z3-r3J=Rdv7KRnn!N!$>6`ofn3*j|()Vn80Ie}1c1B%S#NjtM6>clTBksl@wR<+pno z@K@OaoyDrD`<3W7LPEA*Rr*iv+Q>YIl#%m90$YD;8Q@849Osa>lkKD8#~VGoh96 z;8flCu;5h)G>y#>WGbMA>e%=qw^9qWP7*b-7MhIihZg&r-VGkYGM@}idcfN}gIPt( z+}8!^-ca=l`{Ui>=fbZ}kg2?oDO^*r+s%yLHIJIjtr%TrQM#XL7W zj5(s`v6D@~^9|=Td#<{6%pcV`J*#t_>TrHSBZAuImlXu za@$18%m_(5hTa}yMCK}IP7xBNm z@Zp@XmW=RBM}ls##OYsQFZAqR)^2OaC(bR4+@e32L8V&W^v1(K?{|-8>@;f7i>mfI zBZj`I1m7=s;icw^<(C1EFijsZ$~8R$LjUQ=t9#kcgvrCjRyMHZ5jP8`l}qEK2Isn- z4=hqdeqck=Ii)7Om`7nao%qeJPkx*OeOUGLw$CP-0=Fc*&wuOLc}*l0%plU!-@->t zGX4X7Ie`7Ef8wF!$-BgTF_IV*`nf*?XQxhM&EuEcp)AdFrt>X!*xbgF5Dfd8k4we< zD(N7?MHhld{Kbjg-TIpnyj%{-kJTkf`zDX*&HTiOQ{_6f3`=fDBh1C4J1eyQZ1rS# z!+E~Bt;G_q!;)j`kBLL&rM<~y@3Vbv-z<;Vc^4t%Fj98WC>A4HdQxaikuFppPvbPr zlb51-_)i9j^AtBXdnBsd*j~RiyD9k9sPP8Q&g+#9-etdfrM!nz3n`5f8&`ey+JFrB zWmMQ@m`YcP{Yl)K96!Q@#>Wk17JDHVPWjTz7b_skOUoPH6FS5jv9~o~tQ+!U14q$w z?Rm%7k)8C1DgT#*TxxfW6nJx1eRxzgjCj2#J$Gjk z`puFPqeFTw`Z^MmmNn;H`~J4`w(t`i+aKc9tL{gSF@}X~%)(|xN}aB<^xG>Vy0jsQ zxVFu)o`lbv59_2uQDO$@wwoDV=EiStUOJ;Tx#Ep!J$C0pt=+@}@v}TF33lENyqX#h zSkTLA7jRWE9Aav>iGnX+S16=W0^C^O4*gs4$D6UwRTEDvUG&pF##aX-g2BNmZM8mH z#pC*v*VRSO@TSN%W z*~q}{6QnryJFSsHJd@5J!bO{RC(CJyb}+suYTPN|Cp=Vz8I{{^K4RnRFZL!w=usV{ zFKBo2CTRt-Fxg7+TnnBA+tv_2k ztrr=%Q*_Oaj2DDC@ZErg@@n;=<~o%T@xG2%i|E7-*QBREJ!Oj!SJa$b;bM<>vNDF` zjc!ooeKe6No6wgoB4tkt>a)vbK_t{dDqXWQqORHYqN%Pd{wCYTg_|xcrER%qtX`PC z*CbV595yc#s&>K-9-lrL>sI^7bM7FP@_s{wkd2sHLqmh%AEDDM0;k$W@AV01RV$f1 zOdiyevrXTgIOA!MPg6`Ew9Wnju1rxlItMK`E+q;FZe>n*N_Uc5Y0C&E=~AehoUY~y z10Z@$!}=ZjA0!HR37c@JaYu!>G52TU+lWU}iTdZlX2|ghE*0RP!zIjeingx!^VKI9 zzrjz|X+AnNG#U&#jlBq3pINx30W3dRrPF!xH|(uf1hAg`g*xxl->}>76)+znm0o%x|G;k6(=VLv-f%4B z{s&P%VF4cC|IUC+_oK(5YP&*3#3>K?$= zjQ8}f(3&wq?A>RAk7nn06p8@QwZoy#gai_O!*T(b@Rj|lDVEfu^f%Zc7O zPf#KNLJc>Rn-k;<#TE{~g{n`3^jfPw|p^VrtPp zOhWRjohI=LdlXFZs&hEe+kSqsM!7TH?}L){crtvp%vsd50$U|11xU z3M@}UN>lDX4glfDusp?k4@(*TrY@X#VR=s8BYE?W&_2Nh%hSC``Zf5UkoS4Z@SO4vv@EKi?58;M{{`bbdg}RZ_UrreEm^y+nSjZ09~83Hzn{AhLNL?#|6X zQCq5d?jq|!#Ys@MxnOIZ3+)Do(gqAbCNRpME8T4MAs}jj&4S}Uy{{93AAjBVz_&-=eC8@ zf9J=3tSW!At`msq%s0a z)@b{1<0KI!MkjCL8c-RzQ~p2DY|k1%K+ZI*xSPx6RO!9@PJ_o$Eb0Xe_jBEY2N&LY zHhnAC*{%lz!38yQ^z*#Xuf0vmj(y+frNf1sjDQ{kq0+;2%%` zMb&b$F-T6$N(U5iP3b#3*hw4VQvr@3PPe~#=#BYxxK;4rBk06WaUO1;v}!#zCm%t@ zoFeMR-%ey6Y8$IAgBdb4U_#d0lTi)5R-@-_r+l4L4x?8+2zg z-d=wh0Q!K;qcC8S43!eN-v)a-kr+Ki(5yjx%Q}z^_q)jPX1;%nU2Gs)>juosq|bj; za%K4E!S;9ua2O^}&$P8XcdX5ckr7O%2g-&O!BBLi09&(N)Wx@+ssNvL(V&Zvt`d|w zg-2w}PbmXF!LSmj$PPfwj$>7BHIiymNfy?aF>gI)R1N#3D(+kxiJ0p|$o(Dj!*84u zSMGdfDqpTY9zeED7=c7RvlKA&kd5N*Zp>)Pu+A>?Sy0$fOZZk(Z8Uy5zfn%EQn?&( z{8Vn&!;_@#Mdq!jCbi+!ZxyeCm$BEo(>3Y`##4A2(~-BO`!MfJ&~gw0<2L7d zsCkGcO?Bu{&A^M!$l*2J<6U8vE-VwG9kfqirEAOP41Ul1D`b$4NOOT1-?hdyb@tvZ zmgZQY&@(+AH29AY60T%I>L!u$0)JuT4s&wr(x;a)qF1(7*{NJpX1h~3HD9ZoCfc5) z3dQfDwwL3I8xrKaLO1(^U`WoJ!t4WY^}-z}3}YcCaF}NH?79^ArCGf#XsSxBux8fk zT6nLDq=8;Fzee_9uff+oSjKt-e)nZPfBeQKIzDytmT--ARmvyQonL^`?Jqe(D(!Dr z9J#^`fD%vHq(1!w)~jEnN{m62SFH5jnvr4xcM27EGa2DvFoPuBTw!W#Rs(s!Nl`KN zVP3&tN(PT2%nLNp0Wd$=49d-miNsP1l+T(vWl9K|Tf~ zpAKTHJTI+9Fg(QhHr=oR6T@SUb-dT3%uF52t*h$Vne3lf}QwUp>ZF#*b} zUQ>@)zjR(j1zdoo|H(&wdc_Y1ru4NX7b)uej)~5=`1ZF~va)MqHF5L)a|A|}U;&ck zx9-j}YesvJB9D##M81OH5|K+fv0ga=9g7N~B+k_*y8@_1eeNA;F@y6TV$YnRgQ79? z5}q?RZ8Dkd#qHvn#IF%aH#~$}E2W4`l5lH6t+PE%SZIRVup0vt{{AKb=af$g`dqBm z#n%a$`1EuBRU`)~+3VfbXANG#Geo>k!!n6W7ABQyNJ%GwZL@0@TNgOBRnP1NF8JmFX}y85bhh5YZ+AP+@e_EIR;e5eEwmFYNqobjSv zOc6s!a9!}4Y1TVMxDYPXfrOOjc`*Ki7vRfYKHXu$)au?iXG}<{4tC@OhNUXzweBP+*y*fD6E0uQ`J4kM~@%Jf}X?F7{aWmBw%v{bxbWZ^8Q+e;l0(nfFv+9lScPzSpYssM+P<`+XuNQr_ zQ_0~03t{!{?DKc73pYgEmlOR>#%q1rn_QrLo7RIe*y4Lz^k=Y$h=6Ud#CAZ4vQm)*K2U0l4rRv^V{Qa#(#jkn#ncxuN_t|rGqe;yLk6s%m8*O67hcgp39hjVnL~AE{H4rE zsVm+y72w{>fw*ANovaV2n|-=9@*K#&?puO{Gry(pW`G+I#eV78>_N)U^x9s%@!rYQ zbGfMXJHnE@*+uJ%xXeC<$>em5tDT2RA|C27joPC_Fa4VPrCfT2l2AvnL10e+yGwQ= zTj(=DT{)ZYSHT|%#pZcLU0nWFp;`NDLs^Q-(Q@LM%Uri2R79Ded)at_)$uc!IT1DP zX`BD`07{ukC{5cyLAU5lzgFf+C+e!uUksr=_o_mqSPAbB9raOnB>YB6a={i3n;DCY zE|a50xbTYo1nQVJ1*Yi)&m0ycG?fXs3#!Sf}Els7@DChpHnMYMC~~P$Ek$ zs}GLZ?>p<$@tzA0(YBWDPc>+wtRVwr0`TE?nN>-VO*xg33Oa=5O5>$ytWq>%5Cz+H zNsQQK$1%H-=hVpQo_H)tXiVUdiegN`saD0G<<`oM>ZKqGY4F+vU6ZP{lbW*Zoj~Y$ zd+-RxkCZ!hG(0@nkH!RlFBt`;lz3>t2cO0MV=QX|Y@Wqi9oyurYS|~NcfUiCR^YB7 z-)!HY`4kl93OFTNSA6;pU~z2{dy`p%X(u0jK6_gnv9+EDI;8g7`r7o#bwWrv|HPRFt}De250CQ@3?J_qdae5W={%y? z!k}6e7B8yfD>U;b5pM|^+g?2vF@k<;a;jk)D0cj*Z^#*pP_=vxC13!6XZAM9q?BTO zNkfyJ#gD9MTBJ&1T7vtreqo=^LWaeapJUq*)Uy=Lb~tQXpa}Z@mvzBApR-?zc_g7j zN`W;%jJEcvh$yw<&q}f-Tip*1;7kApSC`I55TIQng$sa3I8m*2O>n~m8SGy^ks*9m zoJgW6JuzaVbDh2q+Z3M*y;XT8sOb5E7Zox|-I?C^HBA%F*} zEhaGa-2lBO=T=o25uMl|f6eZTa(>?BkdvYTJsRa%X}&i-lQ^_)F!#R!#tRTAh zOpr7eh1Ji7>KTF_vW-B=<|xBh7S-SWZOeR3E_-u#hUzGEERwOv3NN7NHqM|;@3|2r zgC?E$jQEV=IyK*?i+1S`@nt)#QG7;v>9|EUCD_xU@%!xFDE+%Pz^}ueLm=>g4C5b) z@uR#%RPY%=xAjke!=R;2MT&*zz;>5n@WXij{!sQ9#GBjm z*UJ1+G%7ghh$2gp&+v!rID`Yek#b88$OxF>c-^B)9*kg~LQ0r^$#y{bx8h#uP9cz)oEArGIyvX@U}uLjB1C zp-v!t{0Qcq`Lv#uGG39A-xRx!SLzCj{~-9LD5wuQbz&qH2(F6-QCvO+H^*my;h`I? zG?>uJD7I9yobn+ZvQS!NO8N%u*a-rY{m!a0@FX5b%wCdYurrhx3p?T76Lly|s*v5F zBn=W3(Py9*NKdC8!Y6YQ!VLw_84UYBTL#%~NFl-mUy}|<3i^xv`v2}blAqCkmpXU& zt@Gj0*I}+d$TS{<|DD7{F)N?de`ldsolY%y8{@P8Q@i!~C0iC}XXp3=#%p1r1)FV5 zV7GA?$Wnc-1IB*_sDhs8x--#a8`V@U50`S1<+h8)NNRz6~C|b z?6M;_xJsTPF@4LnGM)jDHDG7qt0_CxQk!5x^7ogj(=6SNW>^9-;)@VhN)>4TN}Dfg|L zQ4&OqQ=qu~jL&rTo@tLW+LS#|OBKthz6nsODx8u}8gnpzn=bUNi9^vP!UqcBk+EVO+L=P>my@ z6-_Kw!j|Pfyd~nk)py+X>izLmTA4)r7Wr^0us}rsZmZXDOdjOSBZr7@ zP9W&;O|Pq#`igs=Bua{sVfGH3B#znz@J+Q}aRm#NewkfTo^hQ~>2Wh+nTJ4o-@u(h zJfBeB=eoFiIt-U6J_?8CHYKF4aAFAVFC>=n6YDk=XB|v1_GS0_xvVjg`e1p{1lb;9 z9oT!uk6B4Zc9p{yk`Y@YyLiKWBnYB#o3Ve02872}f9aC15Z~@J@_LBD#1FFvTmYrpb(3srZTVQ6lYrSZNV_!d9U?unK+7Ql~!; zmB(=FGhO_%<@{BmgK_4%5^wJQ5|>;5&cpuNh0I?#fo{V6##3FSBox0>jHf$_aU-aLRh}cQ!D@)9WOc35~Y8 zDQ5bX3tGgIpwfu-3URyOkI%qsKP*^2{-DW;$oR+-hx8L-SKHV*@@ia-5B6xRfx>_l zF-5Db(td^A6j%>CrAN3Pf8mXjZXZXB;s8OBTEopWrMZvy$nb@bVW~dd{iDmER3uM^ zp5xTvny~f61lzXt&IDW$3PPguWW$k{E_)MG~eCG-p)g=*2~zTQ4H z!mRlPG%)<#N^u>$)+rC>LQq2v4c zbJC3sA?5PL4>WPJx>7x#ym1yiO|Sh%m*#lJkV7)E?1^;E-WLiV|fR1s5; z8rc~gWOQQw5~dhKaGLu7_h|>cQWet>zSiy2_Utcrw`4O~(pyt7jFm7ewBoqF#_!35 zS>wX%WlIJ8zeHzY|Mxx8@nxTTOx;DDcH1v{_vd+-ChYJKs~!MBMd*qc<+zzHOMpZ? zc%W+_U(C33$GF2~a?15||GKo3us`I4Q(wQfL*G&LGxvqh*Ty`iBRpC!EWm;?ZJk|G z`aSECcqFU{(b3TH{92`xS$J%D>WuSW*MP~vLcQ}HH?ZojaUjqQB2(;TelfMbe$IqO z`@?Da2uKaDlI1g*rtr0!kwvwCet>&gA(kbAP)Dww-w5nOaZu8=E%v`T)*kZ6haA%N zec7JG%~9#ogsczP_=tbYd6Kf^J3yU%2kgHXl!Ha9LG+9P;4bf4m8`Q1)_u{pN>T-+ zL7JbU`DOkLH0yL5NMZ=ctU*vdID?~?1`0ZWg|-AzBIbfT$->vS&yluE7WGsvARXdh zPL!aX==cJd?0CQod}*m_tyZ&<6DZpod3t>QY< zz|IWqs5ipEIaJ>#j}k#0}m;Q`DY3!YE_JN5_(lG}x?_4h9W z*L$|h;C6i0ttt^yhZIl8;n^%UDj-`v+7)hnIeT1Bd!%QDB)l zSS6JxKuj>MbYHRQ=Fcr-PfpLhGDP`j1vv5E8kEOFRg`_sEP28WkzanJ+p!w2do!MU zV5>rssS-L9;U9GJ*pQ-b0ihcFw{yXF9N>c1Hz>2yNX-HPhS1l@qTUa8(eOXn{futV znbJxBJ;3*t&7r5;sRJ7|=pE<6+db{(E7R>9EzROqL7qJ|O!BaN0m=St?1v80Bc~;@ z=EbBZDN+vFb^gq4BECsBv5YxnYb|G0?4F6jM>{atXjP&45YaE~#(oG1Etz2mgGBKt zSQBHQ5*6{lzM#3{u3~RaUw|Zm=nL{0I&W_x9m*YW!s5yaWW1p$(jFR25lAo}`|4K8 zN#0}yjL~IdmcOci`Iib+V&pe;UC#FwcEvTldU#GiRHzzemIbk&+@RkXC#L-_?T$7?WVLUHI&+>S9qRGom&8izm-eTV(2Bnxt@B60EPduN&c zu0=y&W;(B(AF0eo%Z;@o{W;UbU8Xzm>D?Hvp$7*n;8VLH6W^wH(tTv+5ssJRisUV~ zn(#2W$rIvg8iP4@WPeUzBS8rW1wU7-Nrd11?FATp6VS}SHIWI*T#4j7=Jh<3cKps$ zvK8R-#9XH-Qi2T<3r2rVN2F`+_<&Rf=I}Pu3hzKKJd+&Z9euW8o-)0uY*fVfZ6PQ5 zx@u^zwAi`DO6XOSK}#!`&EJJw%^~Rxj@aB3wjOSEV>eZ>$D4(0C8zfTDk`-e;{H8n z{;1P_)&w#(XmYeLpnzxyB01-R$DJII8sO^!&@=JB+v6^G8p26W$pe?`-MQ|Iu(8hm znplB~EfY2$hcNX%noBs?#KvwUWwRfril0vg?LU{zX|=)d`VGKZ4&~4?#M?R)5IS@d zI)?dN?<8auR8Glz@Bdi10CoC?_ExAk{oLn=&rXj7X1o<{$%0N%3xxSw|AoO4PxO85B1>yKrzVQuF;PP5d7t0fPOU5gs_dL(lZj z&H$Fd>GVWi<-hh9CbSt|Xiqxx6C821N7Jou{>m3nt%gEGCvp>d{&z%YiV_Cs`Ty=a zhByMwzdU|}>?pru_!HYbAR!RK5MI9MPr-`;iv1d8{>>;R z*Pze~BaHlWLXjH554ik4ZB_q#O9!SWFB?nwdxyeTfWrrT_wN6r3;B`aK^+_qZ8E#7 zRcqgFKo77wE%f|XCYFzqIfnqQH$#XByMdx9cXJHNGel*g!{TV(%Qb=~S5?1iiozJpg3yce@)sc1vSEzBIvEu@`?h}BI zM4UEEU}AJ1+63}SG8BM(5We7J27f&!oL&n1R5Fj}+{U$BNZ5rg9R&#dPfvnL!n5p_ z_r`Zc_M0Wy22{&Ub^n2zZV2B|}%6By) zj(SV*OxVO5Ay5%%a$sD9f<4RW?yDABGJGCnRW_^_n9y;kbPd6muQ1O5?=hoAfQ#TY z0xmxadHcX82gk4PER~0IL~DigQm6s!;Xy3E-q1>g z2rKM2GD6GkJdlt3{j0D+NP>-@)=Vk1s}8g=mHU9 zx%4q_#%p3_!g{5rY%iN5uafYUcy3zNEd5LsvcC!K0b$NukkH6fo08?4-~|w;)%@tP z@Yf`D(oo6LnBi-&n!?3&$k|{963*z5D+QGvfMXSG`jbYSNuxK#4h4~e7-+zu`bmT} zJi_0378YrUt#SZU>@^IOZHrlU-kH;$Wd9lq(brJJ<&He?+LPif;f42UY&JsAV^Ao_ zuPnfWDg-1aF%zId01*c?MOQ>gs*u;`@pFAi1h+@xY<&v|F|rb{IP+`fWl<<;=@-yH zO`*=f&?OBI-U$75?8!g$g$QKNlFSeEgW$59V9gJsHlKmie`h0NIv2dhwa9MP+NgUX0(bJX@cA96fgU& zOMeT6|G9YLA7&BV$(3{t373HgqcHfX*aV?k(CWX*;m{Khq20}|kblN#xntS}&CA+h zDj-b3FsNl|^)cpJFeyDZ5F&9fk;*{X&3t`pfY)&BwC7Cw8MmZc&!LqhJ-;XZ;&Y4y z{`IE=|IunOR^po_=96!YDv6OMXCyI5_)|<4NPizNLEe?oH2po^ZoKvu68V2SwrpzQ zEi>!D?Mb3l5_zSFC*ydG$Gny7)2`f8gLY0fggAtk0NnKbwJz@ojYV-gP)3Jr+&w`0 z9FXR7f&$hw?;DUTSdk7kL(n2`xzRL*qV*8&6sV+jP+b|ezqv$;?JpP0i5$L;&{gd7 z2MwO>tO=f}3nVszKy>?8s{xi!X8cT0^L~q!&6HwyvBMW{Rcc|hpnvq$aL7whdx-Pz zEaBPErjsQK@9UaAhFHQuYd#Ac2-(oMM>bHam!m6~4rZ_cOv>>pp3cA6iBr#cD8esizq>b{ESll$AN7h$ypgYhOk12F#g-awz=qTyd!C``BV|Ps3oH&rO(q&enJ(`yE}W4500e*pjU3Qc9K@JjV(7nBDc}|pjk6tt-QU)!-R;8$5Mg!XpndCrtrSN}ca^ zNKnE3g=*=8C;Q&8c2BnRf}fr008J!R9I>+$_LSZR&f5mh1%Jg7-ck~sEaK7*8*=(g zcp{@mKquaD={NA+n;XzuYtS3mi8ipLi)kzZE`cz34C!%1b|)yc*(>KF7}R!vlx~ zNkK$(k#v-Mc3YZUGedy^kbX40I_DO&ZZJGJ*h(N16b_zPMtW?CXP9w9Fjy+h9ad-+ zEO#6~i_)$`oRLp*z+7NMX$P-n;=rLUQUkb&$ zi6v*0g)n|}W2~VXsDTSO^uhJsL2k12rj^*h4i{xo9>V;Pl2 z3109!za-Sq6e#JaQrF6ttekBsGzdV`67Y?Z3I>OkE(z?U?CM0_Qq?Auxd2i=BWYOa zwr6d=jLKIq8h=?952Z#qpQJLeA9!t@>M6&eo1bSz%IHCxda3!Ispm;Oh}vLmy~=O% zg{?f-?7hGju5TRZK-#b@jHJa`NU*0>G2Jugf1U2_BJiEz)21&@fu`x)V0KD;4;D z^p>2<5`dfh)5kCIlRfp9Uo(@2yk9!|V&zj};2RZiApN5xo0uuo2~e#aj4=p7>Q2oG zPQoY@lwV}H+K;njEgF;+foq&k(|r`${w@X?M^qr{*WTMo@VlxYZZLKb$jEd)BLU@c$D>noQ&uk7Vw4s90q%+(^ zqp{nqvy{Gbwn?nw#S3GKYn_(zZIk%c6OF$m0?MA=?c|?}`Ezl|Rg_~(=~n=Ihhx5& zoZuP<9+Y%!AciF|24QsjA1fWI<)yO+x-7IlO8jAC+(|IWV-QN*9RXj<{KgDl;;fIKt>NOqS zGjt`~DxPDF|7kf0TMwsPK>)p=fvD7{+;?kmOWmok+25);&tEr7Aa0N}8?3`b5+`H) z9MwiAM6`n*H3hFHYuj;xjb9vxQ^!#z|_4I0|0bD}h+FH7Kk=ge6 zW?|vWL%Ls}C>X(;G2e+59eg%3xp`(d!p8vb3Gslus!ID@#%Rx=6Ik^XRydps8n^1^ zeSWi9?x`3y*F{tH37w+JK3TUKU|CHMhxWa^X87y-2jVQ?G!O@4zSA#{0bQLFBp`|K zM8#zi$qpw45Az3yw(<5~hR@8wwg*`UE)e-L$%o=)kvdI%+dXUUY}Hu+`M%b2*b4TD zYwi-Ud3yDsSBDnt7NXvxfwe0%SWJzjdg)tmqwde5UG`W$)%vf-1ust*3SQDpmH?fFY!pO|ABVyN%j%oJ}zeScC65P8OOFVZKp&JO%9On?F4Tm@X zm6qY3O$*~0TWL)=mN8f>Ugy0I>JX{@EZemkJ8Z|%rh9RPzA#))8iIIhxo*a?Rte>w zsMkYteiUfFZ-2B8oSYH7O6KMXLhk|w46cmM(-YhOu(EVGT_^Ue@Sp8ElYHXw$-$2U zPR-fzvPj*s0o6Z7+0!Wi8rC=P!4EgLLr)-6C+|@C2;~)`m+Tc;WW(8$W8a>fndn&} zI)Gh~-tbkm!y8p?<%H|uUeyVUU|{WOCly^rZ8_l#=!Rp#QeJyZIYsqSy|87wWOhEveI8W;W>VvZDC~zN-_K*Y-CdH37JJj&sa}s~ zG#h=~0z^^Aw#(prJOO7etyVzUH8BIQu%2K^$(xzUZ*o>X(PQAwXXGP0`c4RC&S=m- zC>)sliY|dnJGlJs=|5J_o|C-ciiGj`U8dEhN*&v?cdA$o;s_`p+r&*SaTYySp211< zqtm8Px_={LnBmedtUc(Uto^vQS2S*cv+P|w!8PE_7f@U#ur#|6Tr4~~XBV*iS~#*C zqF1Z8YKzM#L$eEJ-vH#2p_{zgGwA*0ia@D`a-kRDb~S1oNvq{BgT^Isk2KNZbLsG-11dZj^xd_db)r$_|3(ZlHzTE!HsUzwcZw!26ExZh9qT?jCxsSRijP!h=sjf5>0FyzR_}O_=?u-1=^R%z-|zB7<2Ij2{`t$hO9m^F$ED=-%jE-$+62ISly*D^&*!*i%(C? z)-FcL?s#=?Px16zmu9_}Ymkp-gMF9b0Y1`WN?q$#O{vq zT~GL#G>IH39B55E%Uz_X+1n+!@%iLY$c0ps^USgj{2#`V5 zksRMr68o`>nX)Hvo;Hnr);KwVeXB~n31HSIz&{Pv8qm3dP$9xQ`BVEmfdK#sot=hVV9w71n>8xz`Cr7gg|HbiO^ zEyxMW0>1@|(ru!F+S#5H$~bE}Frp>FVT}$;G-^Z3zPDMj<4?8AZA5GrPKPPW$ZCg0 zTo}xAPO`m5_ciN9_NvK;A@zq{PTFgy5Dy@Sa{sTV1wH(7)?rOVd}gNdLE%L!EsH5G zh~uupnx%q?;l<7_o=fv=JC(})M2Z;i?kQQA;JnJUY}dNWbD7~ek&bqY)(9>fSO=3q zn+VJcXLVP!8$bE|Aj1l!>o>Nz0gL>x=kRHcVis*RVU%oHMNOiKe15MP*xr^B7Z$p@ za-1toO)XV}LmjQada5C+iB4G{*?E*Sha1a&1Hj;zihF@>LeuCE{x$pDqgOwe5S9;w zX&`-V1{lXodQ}@}^7;ovm~uE93Q--Wn|y5T-E7x-|2EQ(O=25|60bQ8cpXPR1Z4TwGAP$W{J4ecD(duP> z91%gqXFx1my{Ssm z<88_61n_hVMo^tnNTof!5EQ=9S6PI+OOH#U(jMc85YIHw(g4K$9=Ak zt}cA|5^~kOcS;7#I2aTw89y>6e2&m84lu9F_md)2-r6NvnX&vqY9Bqntx8(CDvpCE zLr9Y_@7cwZy@^f9J=NZa_&QfdCkc({`BPoS%XGIIYxLfCZK#?sq057xwDjWBAD$^$ zC{I+KOY_$kHtXcMcT=@|H}qo0gwq5j%tgFGn%yo6t%KA8zYo?Lts%nZE5m{lhltWD z>r=p(L++)!O5OC@EQ^Bl%}kO77SohR3fKqE2{t%>1udBqi>HlaZb1|e>Nt=sM%#W8%qGW5*EDL~rg^TyYhz*I&g3QLVTZ*&?nrUC zrmXfQp0n-`D+Le%H-mvPdg&{#ZL@j6>X$h_y@66>e|w+L8Hbk&qRZz+93k#>w2YD& zZm}zF_(m**0%#vgg?*cMV&}oXg-VDD9Sd8;-&{&8}~13Mg&D+lamIKjO!yn3sg(Db~S`2Hzjb zhh{b7|3uDoJF5X<$EO0wRHQo_VJoKsi))k8Q)HQkt^-MEPmLuc{pkOZm|2kh`Gol9 z%h9O4s?l3_L;<6EZn)@3yE*|NPkei8jGmGhN%g;?oh}LTd<>q&8u$}gc|s^MJvkPI zu)hgF=WT@AeAu(QeFrLg2|1F-^jPz4gw$j37_Y_e=$nfl9R45!>@gat2y>qznr?1v z%_U2Tk#-vN!QEg6!mN5FX~KVb8WXJcdU=LtI-Sc*;>)3azjI3NGjPYd+M$r8+*gPr z<06q2Hk^(elk{T{uP^dGy*4_iQ*jy!m|d41EbOu?4P;C&L|wb%vKtJUi4vy2{n>6| z;lAdaoWfnO(lReQ4;_xVw)bwY;j=4k3vC!LlUfJ$U-$iH+chJFFg4{8MZUr1UQP78 zfbnCHk)^4D87qri{vz@N>6;XGfqvATeV+b7p8vQ2<0vd0fhZG^VfwLw+Bp)arvUG! zVm

#p8!y=e%VSEz5A1!{}AOuQ0US)*&9cEQaDFTK4dXv*Ss2JHEZmqF*js$nh!g z7(PsTXuHQ67S_@q_Z*)uCN6R;d{j;IJ4VvHGM16B>XM=jEF?5eHM4mdr{asBpRXTD z-^GxJxM7R1ouXq#q|^xLGIbCUq(55SXt?xM33H$-@wfl^{X<6I_pCysXP#LpuhxhR zo^%;aF?uNv;)&ky(2Ff<{K}6w{cymn$bDnuNKiS@#<&?6elVBqC{bbw(29dI_9F@R zU|=cxcNhVVYnu2gxNRKb52_ym(j7}$7px`TQ^QsHCkM9oxDa?B7J<#xOKcVI+W9hU ze8-_dm?jN-7W^L4IjFt;pNggR^8T%+@utFqo(7y{VaOFsuU<>q{|-IOTka!)rPCoI zkt^(-YD4E3coMQ|@J;GGd{0n*! zxwy=q?<3)U7VsR^;NT)nPdpFb`Wx2=(GRHE83by29O-%2^d?LAeEi;;ncvg=M=d!{ zQ(5koaBcsL5*Z9)d1= z(}}O&GCvN~-p!C4*%;RdGE-*0RmlKfW!zfl%Uq|4>6hEauJY)E+hV^aE;e2EV2xJi zJ{{5w3ps2rj>EQ8i$>dp!De@!{aNJxI{Z@OrL8uoO&``;SD2tcH+@vtZA#ZMWV6vq=&r;om~9#2cM?*N|YfX zDc%r#TYK`?;9&FJ8_QG7Mwy<^0bNwCaWH)7Kdw#@*kEch!2kAnD2~KGvQ=R}sTIDu z8uE?3d*5_1`i;P37kXVu$zN>W^bmIKfTkXP#u(fgYjfsn&%)d@7AueEIz9QYigh^f z^f{PvOBDAo#Jyjixs*|dfp+EhYQh}Nq;BQ5ue~nvHVvo5M-!0CmHn{=F7z(6_9qk% z`|AW#-FWN&_$(au@|r+XR|oMgh!>omy%MMBsa_9KU_{730g!3FWtoprh>XqV0Wjb z+_-do-4VSP;rg9C^1CYL9#N9YjB*%&H$b48wNjVLxzAdRz7IIW_%CbWp$$m^p*USt zKh*t}YiR0@u$aF#zkao%LWmIO@vfFZQPknl?14^%J_3Ei;*MWhrhCgbq&qH&PNs83EiZkbbD&k2@2Mq`!nf$? zJPl=p{Y%9enL`+)m<)Jy{JAJ%Wn8~~iVEINZ74IU>pwhr*tBkI-QD#cc3GvEJhj^3 zUC?J~oZaU15h7N|?|NOD<6ZrNqPVyFss2mC0y%rZQ}2*7b4~Bm5lYcqRr&4j{gbpo z+czkRo#V!zZ(?o%gy|IH=q`4;{Y||{W+0+jRcd& z)BjldMpEhEUV`3sXzDW<*wqrB2z7N^NHRoz{~EkPat0(sB&Sz^m%}@3y2hN{yNMXY zKx0oSMV>{Qw*bUAM|UDuC1xhn+7rHoNT1Tgf0CjP4X|{nZb5~;Av`jc28i;K*lCx+ zv0NDb=Ga(@#x8D)&p6bO+N>nDA%?_v2E_xBUU>BJrOs4eS+n1SU*%Xb=_$`qcfj}n zE8S6|Nk#T08FFB?AcHKOUH{sqX(QRAgwT(Xl8A23VyaoWt zqeTKIZ!VwdV^11FfP)K<^gBTLUDn{{T-ER?!dbuGZUfkV zkvQW>=vf;+k*wiW71WZJIkq1%bP?a2Av|b{1;KU89V7mnM+s*pYm|?F4wH9&yE4fp z&))VZKoctVwq53~CvU3(PIE3@>I@0+n@3PlO2%8oNxp7hR8%1G;03X#XqrH-d1J*# z?_ax;pd1dens!t7>Z?z)ko@#=UHiSvq?jAqFAVO_JvnBa&~kiKC7QSI?hOmKSN9L5 z{h4OuYfPaw1*PSI8ZQ0p4tUa=kzW=jF9GHo|0zWieCV&d!9Rd%F^}x2ty@Nfl54%) zK;KgF98FcNOy!!fcW`K8SbV1p3T+X@<0vG;8PY{)CdkTLAN_pnMb-A^^2?0d_>PuI zU1W>R)|t-iJ7SU%@g~_&l%%p2XZ-pO+)la7y*eQ-o%zb;TQ~kZP3I(N+E*Ce-bSDK zNLvb75G-2!`v!dd=cM<2ZC|`7`59bVaID0^cu7a${C%Z9>1N*6}@Z^bc^$>cIFmHJ}_FfpH0dc+h*```U|`HCJ$eu;mC(9Wn~(-F@dP9la#bewn5O|XFu6qM>@Ky>DS7_F5)u;qCsInqBDpB;5y_OEYH3mVX-0Sr zdZ~-w-fFjlme0t>c+sbq=R|I|@_MtiDd_0y2ajfcv5ej+IMa?1-HMFXj@15r%r#&_bpC;s2G- zhB&ELl*ligFQ2$DLxXY zcuzaMCQUV1k<#?%*2&$g<5ON8fjQFr-E-idE^FYhoHx@io}e61R(TelD)R4^e6?}g z#|*U1{xk0X!kyUJJ*9LjX#R59NN^Kj)mr;E*85W8Oc4`*w1o)F2Sw#T8dk~w`xnF} zHiTVkU}+hBsP$gW<|k;MW_%kS9=2Eixv88RFm$dsmjBT83!(Umk{A2QZq2SnWqcdN z1HxlUzAUg3y>ek*-1R2<-jz_%2p2NWdHiEfdmtMojE7^W3L<6hfkH~?rhx3Vp?;G^ zw~wQYnha=^xJ8IB_(C2V^tVbx@1jgsz)@VM!j2U(kHD5_bf~*zC~VIFguK$qEgsS^ zG7XI|PKbE25Jn|h-%i0CjTmI9jZ}-0=yDar&9G%E=IG~0z`N%PibffJbV9C>cm#w) zbru{@GAykDf{a8*DlO8z5qw$EQ-sf|qe#nVjDUTTH2@*I7BKv6FmlsPh-?uLH?!BZ z%u;Y*Zex^*|0vxU-~iFX7sh9?IzN;v~?`7_Jc z5T*B7vTh>vD}#t#dYHt+@Mybxq>oGVD>c{Gc)c&ULhVXy)F)tnGoX}5CM>%Bn*xXZ%S=;q5C z8+H`I=iRZpxV~ifi4*rmw&tfxv1FkR_7qovUmDL!8?&rZlz8z3amZ?LMyrrCNcp$G zzB;9|TQ^)%EV2Mw|MqI1%O-T$VjX%(jI8uY(t!HuM9I9C8)!^T*5F)UnR+U*9<9iC zAh+3-_n|GCkLDLYl!&8!O`9D{P6%4}lDS6&hXTSK15%#>Np*0lFo<1l#@S&Wi?5$& z;j1o#;sMHgC&il|I-UonskHmPPDZLl@JsI`wJ&_z{GV}ISM3R94{De@kGVuN9;g%> zaBoC55>v>FhHhx(r=yK?PlQSO zHLk$3cE9@WPA*qYKky@-1G8DBA9t@k;(Yqcbg>0~EiE=Gn3SeqfWjg-m?ovKztcck zjN>&WH=Ska+I>K{ZmHys;GFp`i4$hputQshr^{JbwtS=27HnO0LT{EOw+E zP-SQ>=lc~s{^?lJn?H+VO;;R?dMc4X~uHZS6alTo6a#>JCv{y4{Mh)u(!I3GgOTEkS?>DsTIB;n} z)_Vo#4^eX*nJGLaF9>h<&(C{J9IH#W%QL!FLvQ^eL1R{lktosTy6A%_QFL^5C#Gf5 zUZR{2js&UIv#0KFc`^Oqoy*xDbbrrv_g&(ULW+prZat9kwM6H5;oprV-ix5bCxlwg~>ze4X) zs-(+p)r+s>SVZO-IX>=ANC_rg6YmMX9G-`hKL=z9@s`M0P7q!$XcUGZlRPw6{8-xt^0G?LsobyzoIsguZ^Jj75n?Ocx%Vi^b z6c6KQCL=+l(eYQC>`fvzj=VytYSlX4Q(x1Vx;5q&-!E4pQYrKPsgMz3E}TSV+ag)w zMAQYIKl{JNurJ&QaI!G_od;dO!LIUiPE0wVxu4NZfW952g~TGZWs&f)BXhC-=ei<* zzVa1m%eCW*qu=AukzL>dPQWoRjL7^*g+O8!w!_r+Pa}Xq2s`sqi#7Ecwe+Sw^^(+TtNdrv zKii1uQBFRgpX3~0ZvwTP45kxoxdpUKj6^9gjG6Va=;)1D?Yj>GV4TbB(0aFQfS-v0 z&OS18WCXy@bExzh87v9G4M+L0D#4EdX{6a-yGWbG?K)4TC1u?(Sj-zjk3X0lT>qD{#j<5%lo3DcpsIK7gfHg_xF9If?6(ggMnofwW@HW}_-mn#lqW-i7N@Aoui|@=cvq%wlWC5>`yZQM zj?0w7FKbHknWS?R!|z1zzaXugSwtKZkJZoM;fuxwey{8|$fY+!4PQ7b7RI&b$%19y zX!d_c(jryHTx3O*J183RI+Mij(@(i*oQL$)ErY!dd-*C19<^snB5Vs{xjL~1vQ`k03UusA{>r^ z9Zi`jn>QK=sIO(;@&*TfNnd(wEMK0&wG|hQLl`X9@;~79EeUKoK~3iCm$WO=@4sSl z?xLRWv~wK4jX{M^cnTk>?m5w22>oPqg z+zhOA`#XSsZNH%Wx=NZ7Y1Fleu2r%kCdT15dBb87puBv?*H@g;0K_r8{o{8z!5q>b zocnon9j7%%+e19UiZ?cbut_#Hfrm(f&Q+hUc!LX9_P2N_K1()y?v{gU;v9TYf2#%G z^CWxHacFSgL{Z%LPwTJ*aUr@|FL^lH!1cwyrsEzXTrZb^=~7cej*`s3pQK5*M~D1+ zDRZR-%xk!AQ`i4X<29A>E_=0QOWf`MD+n}D5PU!4@D|;wKLMs#0+)J-&QN~-4gZU~ z|8BDd9}#6NEd8JH@iYl;&%(F;)ji`(y^j0HXB4aWvoQGoqXdMTUk~M-=W7o;ZhSaI z0)M5t_(#9E4X#JZ=;`ZAianNXIQ;(678KYr7i~^$RTP%J*pJFjY<*XV3^vBCYi*n{ zx(V9Fch#dg9QlGcS9S`XdM_d(#w-yuJb&q89R_rSiUvD0;nc$-;mYCsYxF>VTZL`S zCR6Xi2x+G%DuV{J@E%%)kBQLkU>I7SM=yGI%Gh`M+%A#Zj2-;XKzs)mz|ufBc(3vX z(ELRhk2|agy9f5a2^c(Gm3?_8Vv&343iU6AMba5jS`v(??!5+dC)iHyy=yk-5;V8o zl3WPx<<>V%<}dGp7y{_UmmBA{*4*;P8DM3(cbrw7-%`pb9wqgam}CAcVOL<;Ob+q( z&3P4)H^W_`hab~l+-X%U(_-q}>o(DFe@TQ(-`1x49v|D6(t`NRhE=y0oa zY)OB28F`ejeWR`}jF>N_4T}A7tIWz_^LHvty9T$qPe5(HOcthP#;vaGpw8c^FXaXH z-qnLc|J!V^m1OVvmP!5h@l@n|AW{6kyP!v}TwC)U&#PeyrFoH;2jPa2nh`xwIWYdB zklJ|MJkXRLgXGQ7%d4>9BOqS4GYDHbCu)uosWLJv<6|LrjIMCIgVbiFq1)c%bL#!k zV31sN5tv11r@m+qJ<{rw;y(Hn_3)`_l0!3vTguQs=lAa-;;|z01sJnd4|pkcZ??K6 zjQvuvFJ};czFSCt5(-5~2|b8WHH1FPa*laFt9!b_Ma{70OJKN@MRYi&p;b!OO1DJH z6ZNmSgoGYTnOP9`;cUoGs?p(eL_<0sGkOT(1I<)HlP%m=hz1kB2AM@P=Df?8*>}*9 zU;<=XYWnpoTpFdGO^uxQG>nCkloZzuH?@OvvxIm}m1zq{vvxicT5beS>9K#%}ZBI^SO|~JwB#zOx9tg5E z#>HzzO%Tj5(dT!&`OS^=j#2}d^-Ct1Fda5{zmv4#h|=eV$LFiQMrcnHscIC$_aY+) zeagprFa2lzk(>E1>UKcEZJl(P@9|Ekx2HhJCGgzXi0fFN6>)LsZ+j@0pQz*RvWHss zh!2F!$qQG}jlXoRW!QTRiI3-`%w)zy4x}Nq2hjeDBelNmv}$7@ugy641jCZ49pRS& ztIrK7Zw>IHlwPFKvq(xxnzJp($lNhI#wt&MC2WVO?pf^k@OS%EoH&7Q3M+wOv7NXHWVD zDql?Sa!-SoW&7cLft@MJ*Z1}&y5l1eK2zH9rAwH9{KQ4PevOQ52Lp?sYU2-T>C>BK zNN$oLz;c>gc%sx1o-fW#Zm_Xkmb? zo_2>#{=R?Fg^N4qv}7gEFB};MS(09q1uJfm#29&wGX#nmPjS{U+aI+&w2}wffUInW zV-Tf%L&u4s4y|WFy23ufVK!as89g{RP85}fSfYty^}Dn#8>r3*D2a>2&%mnO0}9#3 zQ@@~eA$C(*KVrE%*7MU&Ki|!8|AOiu*oedvs_(QKY7GA=;`HE2yc|@oj;l+y2yHm9 z;xoSLC?%tgb$fX`6q!YP0Y=^1^!Z%b6$Ta|zMbP-v(3+DZolfh4%Wb`lJFyJL-Jal zUZ7Q*Q@t4y$e8UK1C*uHW$l0y&_*?c9QXvr53Da_?&m6T+_n8PWR`I!PbO%1% zBpJp~0P zP?5Ko9x7dZ)92>w8K@qdD(S{571zbJdO!OzZMe^8RKKF4In0t_1>b0%9O^`NZG%cY z&|G3*Yh0FT2Rde1M6%-_uro^g0ptGkAINq+YIb2NK*2`{>gvYOVo!G_0`e~cY{Z2m z``5STy5rvHA@#K)C+>Z`ui5Xzp@9b3OFe(vH4jAi;5E^wfg<^CG*rz;V!q>kEWg5u z>@d!6e$t@bTJ-q%m-G{#i=6e_$yBsUZJ3Yn3180*1#sMwvwd)g-*863XwghsEOl#_ z5&e3fCyXK0nz@hVcqoW>7JvVFcgOPdlhDj1!PQF-oL`n{?~^ECMFC3bA$JW&W5Sbj zX!`q`i=e7(DJ-!G)@A#72_ML(tk%4dpx zZ`gRrHFDXsBx9e>UhA!4%AGDt-muxEJ8dmyVmG428t$YH>|t}?QF;A0A=vj`QejKl z{@HhrGsUP=K2mcNSG{aBT4Fh&6+CP7gdnf&yuDB_frDxl6xmV~u|?u|Do9DH6>dhe zy=I^qyM=6#q8QyLZZS{4I;6i^cEhtzdhM|X5bv!W*n8pA)1R}7)a@a>ZhR``sX}y9 zA5hw+w^~H#P5ZF@zL1;FDjKJ1Cni{OD%z~dA&7?AqvJe=2u!tEH+(%Y9*05p0u`kO zi)W~Gft(UXA}hj#G;BMLM@Rd{G}|Z#a&9+j`s%B?=YdFL@eH3#+K}m$f^Q=(UBTWP zsNKx_ZTdg#)%Rtu#71j04wqin?)x;MW4*tM>w8yKGC@=*-E{h_e=z3)#$+*~3Qg@g zA%yxYX%43CCbZH@wV$U;tpyok)wOan2FvOe(w<-Qf>ut2!Db?u=Gn4wLHkX0yNlHi z#qJAakC>QI5E4zu-Y@pixs_TvdPSv}qj*&EXI=pJ$zy>q3ToTDlI0dq&G$de&*?yr zGt9TaU4G0(TTaUd^U8Ls4j+qyPoLKApo`&YpOY76>gl$x`z?R)s?CAq-x)=Vd*3`< zeeu1}0{LUl-L~?XxN-${-h=sj$r%wpx{uXVRr>aGcC_lt8HToaB4jO)sA>yUp%m26 zsU%JI@>;b@ za?Wi3Tzq=KN6}6g_9SNU#dX!YefIL4HFz6H(t_ZoLD!1*{eh?n#y2{;!k4O309oLtUKF zl;C>itVJf9^oL(JMg0f+RI(z@sFYMIlnqci6ntMF4ZBcW@^M|qSuVDT*gIzzDEi+} zZAuDLjmSBgp3^pb?&<9g<&XCcTHJT7WV>puurVu1sb$!FK(07f4uIA(DckdAblKE6 zYKl5Icmz4#e$(&f&URlp$Nq!<*BO8NP?-77R9;tLQ822R_^x!J$<_T$Ks&=_8%eR= zrK%zxv6m@a8)=I8WAjU$#7B1-xs@F7_gQ}D@C(|cbOz5rVU^!Sk9T_S#P~?J-S#>Z z)$xx)E#DT=UT;%)Z=z}Xrt>$ux@HlrXt+(9GO$+YC$5l2q< z-ea67ms2VhVw^FgHdxr_6ql7U`gi_ zzYPb81~6w~MZVR_^OYYrw=mHhX)U3j}izPk*X~ww&N_5|rl+mNp5iE!nnf(RiM{({(?J z7vm%Qa>kg_SDiz9O52tMW#nBID54JSFm~;zj8r^%-FtTWmp4B}t!nAApv!r5djWa% zoo9R_@aXZ7(t_aU<%Qu|aunrwL|)5*(`668`o1gbG z-7u3$w|LrjowKsPLoJ(ZbVr-t76v0LyW^XF==s+){aRqD8}8qEc7J`MmeU%O%<3}l z^X)sz0#-?%=A@T-!`j?w7yh6s^-0qyPL?HyHZfjk$>fq1d|fG5IOJcz=CClc)AX}* zi`dw2rqx`0@BIBT!$&py2Z6@*m~cv|i!^ha>lYsCc(VNZl277Gf3aH2^qr=A^_@j5 z@z3p-oS=L(*%M+{LBD)C^(56kaGm_rKJLpU3y;|~X}(ARd}#LUbnxqyd}L->Rx;^P z_m#EhK2q^XbjbWztmiv^tBhw;qT>_4ksqpzzBJZLzY49dNL_I9d!;tpb$$C$%q$Ze zs)z`elkW=UbsPFoc0|Sght?E>(z5cSf^q^3Sb_WMMG5X^(?ezK{sW85*xx&km)A=3 zSt8Dly(~S>dEb#qv$to$FR@qfL!PvFEB~83YIJTE)0sO=ZCZ8$OWLn`hHieosqa_J z&m{kw*}t6=O-FY;*|@PUWaey|J+=60;1~Kcc6?O~j2?$uZfo=?U(w__nX~EOm>o5F zW1jS`mzL5Ihb-uyINr?5%X3=0-^eJ;QB_Yh%J;*-{)cM&&6(x0a_M;A#CvWxcaCUk zXcJD@Nqb)$Mk>`PsOx}?f) zg z00;hWT~M;|D#sz`Miw!QH~`0l-iM{rP*_#9)w<%H(4h+cqow`dBRCgP&+%SRU@0@W z#j@|eA?q;ME>0^5Ik0cuThXAH4XeK)tGtmer^C(e7%(Uoc%vpJx%aOJ9#)?zSuP1KySf2M+x#s|7q3>>oQ;{ z!=_xn5yUNv?+ltIh2yFwsqV(~m3-$3qQKHxUHvRohG)FkA!OBKq zWeMfWb+5BJA>Ee0YhoBWpF92iD9vNzHyCJh~NyFilk zk-H6vVj7%{zj&b^PPDao>*G^=pRsR@iReb56GJ32qz8&JD+-*j=Kvm_>YRZlMP@ZB zAYu;i7$&`vXVCLChpVz*p6@Y;62G5gCYJvn8gln&lkr;DK9>$M^?l85_nF;gd`eKw zy$B8H~V|x4-*e4kxoS4J|xx(%OS#a ztEu<`dX1WgcS;|bm4FV&IeR!i?DJa=sqIK|JLoHlC@gC>`p}XtM#eaZOR{XllL4>V z1X@v}o9xws?{Isxb_q}r^62IyO)Ww}`|3Ah<|CNdx+Udxg7te?I6AuU~cGUdl@ z{&cTym~~u!jjl5)8No`3Cc{pDe_X|#+Kn~i0YVlml!FHTnk^x(2J=; zRpSi%lxp6Ef6~e{B#qRSR?8U3L(wabw(0!R-T# zHTr56Xd5o2J{pHOK>#@LmSO9GUe2Lke!LG+s|t`R5QL6zH46ptKqh62TgoxqjEGwP z<&z{i%pXN+g6`Oj<;(Z}eG>Ckjx<`nLc5Y6e@>xnZ%k2FUILS{_1dc?j;WqRIb)oJ zio8ygPHqCm3ReiJkZ6pvzkOX3M<&)e$qa(k;8e?Kv1uHhOV{o}JBXTA6q5!yjLPXDP6yn3>>L9CbEg=cnZeS zCh^GL`=S4wVpGL+8MJ*C1>>h(+Z^{tWf6qNkm2zAOWwiAMMc@fZ%eN1GdChL{3sA z!eHRV2lM*Y;Dotx{7^v0Qelf=yOI0f_g$@KF5{pLn~L#!WS1X9li(BiMXvZ4qaH7d z@+b~CRXq4Rc;u27neJxWu76kU!x7h2xLebQG3xA=yE9I%1TOh1TRrPhN!`nbEW4KQ znW_e+FKKwAveV-4E{N6y;YWP+Y7;<+0+sEcA?<~w73rUtW(}Bp;hg%VB{ecw@NIR8 z6yI`pGK0m(^-!g`=Z~@}xM~lF%wmVe3@_3j0ZLPgVlF!17jfnMO7#zXeaS9gzSe(K z$}||{q&{uOmX2;5pEsQO?$?XNdd07nHC#>rDFGJp zQ^!v*=&F0=rCVn?A+EHyz$R-L4O;;af8>d@rTjobG=h?B@F3(%{Me3)wjMpayy&BQ z_DE8y4v&kK>|SKjFcifkvGmoCwN8y@jwSj|FsZP8)fp0O;fV(}`Wau`gU&-a(;@yf z%>`vzJA&ccQJiX%cT4rG*ZBII*-{#qTfYo(|O!GSu@%Cl^ zC&kl05B0=8h3jEY^ae1}M8P0lzM;!BJA;Hys9(sRKlz+ z?&1Irb}PZ5p{lj*cKZvkGt!%mepc9c(FX`>5=w&F1E82iZ11@fe|ju{E#qBBCQBLd z^R>P3is;K@^e_UD4;rG)t>qWYXeWxz z9Cz7??Y}gGxVs%HY(dG6*#r9|&IKlIsx~!x*Wm2MXU8TvLKtkkXSbia+ZkweAa%*& z1;zYDczPbR z!LxJZOWhH>wIbGSB4Tvj{eaTnGzPN) z-PS&R0`H5u`vw~lBP^01sC|Q<&RqCE!(bO954%^?mbAvk)=5*+m~(^y9Z}eSVb&{~79<=wxrS^Z#E_0V5ay literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/docs/extend/images/authz_deny.png b/vendor/github.com/moby/moby/docs/extend/images/authz_deny.png new file mode 100644 index 0000000000000000000000000000000000000000..fa4a48584abb3db280b8226d18888cb0539de89d GIT binary patch literal 27099 zcmdSBbySsY*Dnf)5~8$(2q@hlA|NS9OM`Tygn}SQH&QM-Rir~Y1VN-Fln`m8B_*Xh z_Po{i+k1awpYz`y`w4>|{)(SmnBm_`vP$p%`-!N8Fx9lYaf;>N>#OjT;s1Lx zpD;Aij7t}k<^H~tPiU0qzn?H4W4u7EAvq-JEXGqk>Gi z80oWdry&CguGZU=9&|Q?1@>}#Eo4UJn`31bFJBtT>(^}eDrlWvy=|))!rnq0)XyxV z#^yfhxo7oI=l#R`) z?Q5OqQKswG{N}J(#MdY~ndle|sWyh7U|Z2E$(U+M4Nq8->25k+U*8!r>iF^gk--?F zax(is{sU)wh4z*t!Dr@;e&^*vC^rS(87+K!>m>T|*}`QjuL*%nbHTg1a*WjRn8Wo= zWjY>LLPL9)(!XCwHuYiDHb(!WgPf^pdgtp~3ew?JEU$PiaG%WQCb|V(zJ}?Y z%kfh!ynnvoSW?pGgpE&M@-1tQn&kaqg+nZS>R1}DeTg8;v8qzxgV~tF{&#|c2Qh#Feh32>)UpQAunIX$j7lRR*`Y)mte)jbLcG{N6trxoib-BB*ddA zZpyEYR5%GJ+i<eGv#=TI>R^!uR?-PY7+XV6U;q3cY^EtGN4WB4W4;CBQ9h_{XoH=eh ztFLn1%Jx`q#B%suyN5d{-It@x^#YfaD6`&m>*3y%!@;J7>)yEYd7IT=M#et;E$=1` z?yB0w1c`ZGG>Pf$zE3SMExNbhRo3>?6_-FIR)da!hNf6b>g@hoALX|;p#k}hj zsJlh>lG>gItLLgQ#3d$LzVKctZF3x6`SCvg@sBc{jcUim^v~N%104=GEp)4%rp-O% z3Ob=Oq3ww0P^aKEe~Kq&(N1#h8yS}J+;_q_HQ)ZB^_tZWqZ$XBb50+}Ix3ADF=EPR zHr9WB&VyS}+0UjB&(3N$Q8RLFLHaVa@H<;rmtK>6K5Eh@Ap6-XU;uLvjtx8giyf;%WV|V4p z3=s!GOO^eM^gyk<^YZnZmfvZ2PWC4ApDlb_SjK^*s)5XVXdyn+3}WE z(iIL}?Slv!u?dH6*imk9_XgT%ybJO)^B;a3{rm>&La&TVK+DSpWwDdYzU|D&a4Ny{ zy5miS1hFIk&2M*pmkSIO8R#v%3|fAa;+Z{Ippy}ZPh%U`Qlq^;J(WwvY@E7Nu@#U$ zQf?JfwD7AZqmWlZ?kL*(bSp(`e`z3}tp>X@Swt_6U8mQflC4#%P}j^VQdW>8+n}Lchp?sL2MlR508JOpRC}c z@J065#>wDF24P@VWQ#Mw>x2Ki54K z#z)_hdc1t&dqhX{>b+`}&aK+C_k41;OAsFCS~q>vboYm`W5_AS0hC-)k{knEQr67R z{=O_Xj~BY$eFza^WL*+!%xo(9al#*-E;20(R$dKc-^8SzQQko%gAt~(vC|rUn`EQZvXD#_9ETz zLZ$O>!)x=0JIh+b;>yu_lcA(+x;my|{p-z#uWmPvOuxGM$T3p4!g|q3pP1UlwsO+z z)5rcc*gV*S>$Jnu{uQiS36YVh- zG3~JM*4a8Cj_rXwlp#!;p8}@iKfcYtiT4x39;AE!sM|Jc`&ULpJG=@PoiysZO+;Fe zv)WO7{fY3$lMal}F_DC+n$};w3o&`@ENMP*wZ7ZH-5S%-Z~>n{Saz(%z=TnTaLUAj zj9Dhw^;fchebZ*3Z_uaVak#&WYpa_+^(}q==cuWS9p>6eS1*1}4WjWHW}x477lo6$ zxz#PEx5mBFK!XZNRg7W%T3CE^!Q*pZZP!@Gqua)QJt3bOroJ+J?KggD!#)iCBoV*y zeyFYLKwY}VQoQuZ=!g4PLz>&#^rX2-Sv3gAtz3y~NIKu%d5~({g~hmD_ChqQ9#DXK zwsHz*p`cn|j@o30Y$|~+ zDzAGaKRsWatr#+V77|W&ky)Pk9!lE;9oI&an?t88^YU`O5+_cz0exlN?sEr)k!1_0 zJnY`ujGr!k*c@ii`|xW*L2Z>!Im=#Q zs-gwDx1Wq;7A9;&Com{nxrXPt`|43L;kJ?cIF!A$XuKwqSj$5f=H<6Zb#r!N%C=_? zzMo%45=;g2&}-K+S*J*^_GAR{%Q;8zn13MUH6%^q5x;>ITlXnB_h5JPChuKwg< z*xJhCaGXEI$&Zs05iXTfpIBB+`RBhUI1*V~u2#GX_4iBWvx<(>I62%YeD=(gE6Dzt zkm)FTw(9j^swa;urjMchm3g0ec|uuNrVw&`w^1gox{FmW;_WQ}*FehzL%5Ouf-qmi zwL94|+Agw^8DvL=g=-mJ-tgj zLc@hV&u|f!qHo?(=J81h+oAE$o$qxP#qVoVevFTItLJ-vOzgbNJT?0UOifAH}tjQ$>^Zkpl^1>I*sRclu~zd%V`C)uJ8Iw zbh}p5=+?7ZQUe9o=-DVacZbk(g-!|Knk9k;8j$W~kJIhKGI? z^_O}R+k_5Zd|CSv%eNj6t1S2?@L&&$l|JZA(64b( zjhH9AY>^~jpB~GiF3o+&$Hkx6dV4jbXXeiDnv5gi!R7T{|L4th(sU8Ro52BP6T21n zy$|DA9;CMQFB$p9-+cHvXreAia2N+g&eK%Zzx?BU&UnY1$G0RwErG;UNhpt*dtMX@ zV$T9t#CEx+M^?Ytj45L-*UGl2?I#bdwB;~7H8h`K__XPH_b>}Pcw zMKJZ->L<;WH!kPet_o()u2s!kCbJQ1>lAS@KE#VpD?w{vK7WBIG1Kbhs>wLSjM{y93*9M|b*!-@mvc3Z3Q{hvqwMo5h68iSuK_GVR& zj~|#`!l#o$;cjGIE59l2n&=kLcyTFN-}ibf53QelOr_UyG8x|Ub&)B?`>$nxHob5X z+P~W)i7%3wqr)T5=XXnJ`*-Dpn}YF}KNmw$(P|~9kpr$~?YULG>=j>XI27k~ zziD4exo~(}i>=!&1`TJQ5$)cS!SFY?Y_j8Ge{17OU_>eS*r1|DpR)_%1fb9cZ<$a% zTN@LsEo?I1-tJR=qfRYHYmM`?nY`kDM`)CIFHXbv*1D9-ECcUUB>^7I^=Pv%3i2@G z1kj)t@dO&ZDUqU&T*55j3=>JyUhwy89tv9@P1g3$h!ig;h8i}am$*ashfiV$qZyrH zG_^GRWrICRKB52nqoiiyRpg0(m@DlFjtY;o`^EQOf1ivFL=+F3>>PX`AfQrr&{OtyoG#~qw_H8&yz8w;C*QI@f82?QG!({7#X!Mbj1EX z*$12cVCdF5+r<70CwWV3JPO`so2J>{q{>n*ht_6BamF<3_kvIBu<0xD59P(wPGXJ= zHIBbCrUhpIINDies`FxuIhOn^nx(dr6XqHB!n1+w`k^}=0ZQ~2$BFE&LcHnr&U&~x zcdpKJ;dPJF9VSKVL0?yQCIZy%MQO?j1-VH}#)i|Gv1)xY$A^+Mf7e7#4>KOf zPb)y2YWn}*2MOXsH-?P7;}(+LmD&@yxBx4~QwciAj#oQbOgD!(Y|Yxdl*X?2ft^Gy$L00sz)O8&4ZX)%fY{2+zuvHDkBtJ* zul3={X#Dh?X<)9*)XyNALC>F2GSoP_*M-M#a_D{NKcEux8ha$_wA5dEn6sFZ9PW>W zKWOE;G=!ZZ>gk5iSYasqyk@O<2_EAqES$*_HL_nHX%f7=Ml3geMX4g7_o&tBpbob!CPBl?IRJIVMv7!yhU{3EsY(v- zslB8@xz8uNuWh*fE(W?(WLV2ear2=gOLnc%=b6^9s}9r6I0uV4Dch4p2A^iTZcjRf ze11vJm>SJ|74D=Ke4qsmlV1enzQ*N7+`Ug(cS=k?J9?7~JWUn7Ov(3!<=y0((q5-ogh=+5 ztfJ!f9S&u5nF#8_K9&bXNkY%&QJ1F+jp!Pp85A@E@hCI^ZV#kM202pgJqpdK@%tIC z*3Ftz7Fc#BFh{|%W^yyG+VkkSE1+Zq)|ELfs^w}GzV29-2hKY?YF9tdmp9WEk?=VP z|I_}Pmq2agLAp8L=My_c(D17U`0wUYesO!tOR^S4_47HS-P)s!zkQiRJBURqo1xgg{nM_R4901+BFT2b5E$c zuT0k0?@USPQTzZ>>-g)tgmQ(&mq-aQ+2w)!oSfSG~dF3tTp@^oAbC+Lbt#!nhU4+_Z;DndM7@6X8@(w?mX}BjxNx7k2`HQUagss z|FG*dV`7z9{xB8?U#=Gse`pUgb-}1BF}bh!S5i*>VLk2+{M*bVS4mi@LqS>U9n!v9 zpqddlm0fmmu%CdsZEwO|dBSa(1f3!D!S~G}uS>E3YjFHMVSu0qircnH zW6qPFeprbH$5!s{`<5bYmV}!T-p7(OX^ynSjEdjeGLN?wk{cDe5%H7z(Dw=YAwg_A zkj!iEn{`c&c2{@shzwmiFXo$J;!^|N=qb7vO0qF^HIev`yrP^l;O1?dj zNAFwJ+gJRP-(~vgsV#W;i-^7DG7_y7eY&$W;1fDqNBtcHwYQfCDXtrQdOk3OhzSbS zTk~CgYpbJ`m1AAHZGFmOPd@(qbBIhnIsdl(c&9Ky8$H^78eh-52~!Qz$cBfp5JV!& zE0Z2uVb865GeZpBk3nixB=9_*e#zx1aHqq#{2uUD$LdRzd@8r?CdxNK`^tH4^wA+} zZm`Dn+3Id17LB&8z`_1ZM1?yPbB;ank%`b)RLtMnQQWk%JfsOHRui^ttWISc<+Ib{ zgB{(ZBcarzlSjRu4=LKQilmd)C_dN0wp$we_>8fN^5#R7QU>g~_%}y#pr|+{y8o_y zU{L4rV_ffGX$SN=~)Yvh^P2Eo<^ebEgHo zR4aRU818PT+9C8a^du)t6fHRU?@Vr*Hvd9XH{i~=9N!>xNxd9l_r{dpImr!-Gq>OZ zEcK*~<7cRSGM2~kV;R&YGh21uUC{}{Q12y|8-vAgT5`Ju7_$;b_kU<$?%~wT)96Wf z+D7sr3RHw8b<~aPiC6SWjD5Xq+BEVElD$t|MK(`1+Guu2(#E+4N2%#Nv@=c1Bcg;b zZ8|}f?R#P=9rOHQqfn40^Fs!dG5>{W(X!j8i55*L^ufE2x|2O$GUVX|%_Q=eZ8MY5 zsUDkP1`*!KcDW9BhN$3xLB?1_%=^@1yplW48q=u>6<1HR?D^eDDGe{7+MW9Gt;8@Qsq<<~&o!ReCg2UPVQO{sLClcnNEH=>o7EqvUA zeJiU^#AIr;ZQL{VxkaqVgn#icgw-~4JK(&~c&F0Hpu=3_xY#?sIUOo~wD-F*meSB` ze6))p?X0$c#OUz`Mlz_`5@#(hiL=jQ*sk(F{(drE7jD@bz>~I^(b~26%P>KE*3G;-&a;p@Zsv-B=+T7D7P+^fU1(Ir^m%&t)^rzDa#C`NL9!fd`O=#Yun8;l{5 zR9#rO#tjRciR0_DYt)yAO2@E7He@eVmZ+k%&5_gN0iviODV@+hv=1W+XsvkU+zo6u zx3XVUjzckazoei%iMdYOgne-^EwQ@Ul3+Ek$2fvoWZO|MOFpjr78l+nV;^JstWgY$ zfGWL|*PDq6*E=;-P9xCbNx47!^;|;5kDcc#;)W0Cv58`@tu)5`wf&!w zbzW|$-h0Va7YUossGjy!&n4=^VN;XlzJ+t+qYVLkA+GzB*U{>2GHveMz*nNX)Ta5E(u{SO zeNVY58>rd$dKjotT=mCgim!NEes?wO9(QUa_l>5BJkCs!q?k70bNr=JJ0IA+D}l1V zFw)7%E$F>kF;pJ&LzJ-nL&?IEZyTmB>^w{h2?NQV9{}OndDQT|du)hx^{43~m9#ooS9CdYF|yLV5$e*$uN`3H)IUxQZl?=>T7 z-|p+lD8`xMQf63_pQG^QI2V7FIZBfDu{26NkZaQT5}{pDVtb>s!5C^^i4M=4+rXbo z(__0w#*4Uhlq=i7xHca2Njo>it+bix`vk3+a;`D=KGJe8>3~r&@rdkF20k<4`)>hJ zSET!0Mqj9skX)w@F4=sM>bV||CwWydw`l}_nVnqKSQqEb@6|Wik^@(&xv5W0NM4c^ zfK{~=aj7Al^aOkIv{{l}@Mu%moUBKQ+gQTgY`%5$1g$0D{+@AA*M7-m=})_heKMaU zb$q!7pN^A0aQIEZo?&brnn1lZJ%f647@%U3aiJ@KVrGFf4}bzg#-FEfU|S;Zgm zjTNOr&@kl2g^BV+xQE|-c>42h?OCoyt{SUZD~c5WT%KXA8}u!?N{@2`o)J+;6SV#K z?!|^~iwv5e=+~0v%SPSE1EJCy8YAbx?W5qaJ!=yhvL#TkJrma9&D8`QW>tpI-Gcf_ zRzIu1|9LQl+c&mxW$$kFZhcjifM%VC>kE;jC8WBWuHd6L5fKr|E^T`8Y}cCy zzpu4Yi*aK;_~cUkC~26^5V0mk1bz<|zEPNzqDVSj*Y`>ET3x)_AmkI|z1B^FiIW+Q zs9rm*@6ji(c+E!1@I8pFu%Btw3SaAHWxA}?O~aN(pPVX8(v8{MAjoof)h!J@d*yG`u7_JWsR2g#+igB!%cNnB_pkl<*)ep&j+@MNcWK2E5GLZDv+}jb<#%R zJzT&eL>48AFnaLr1mL1-u(q1NH5(T31`jG5bs5zqP@!g^j^^89O~n?|CMn&I&a<^NR^rWCT-#XsPZr?+ z^@;!2AEeb0yE9O1B(@N{NbGHJsmNJJEm|4%%)XHF>Kyp}|D=Q+EM6*n)UHf3HwXO*Vu2b#|U7EB!jiC1oh za*F6kEjUJg_*dKy*}!Gr-;}ul{=P#94H+3zh4n!6`HNS^qMhjl`U`Z_`RyjCaPC=9 zAYqAy7Z9Ad7@ldMm?WUO`+KtZ3_vI&zVNz_Q|-ep%8fZYhZE=ug)SWfgWHmmXK z3VKFHd4#{xoryd`V$6KX zeyTcv`px&&#xX1gGv$aybX!KKv%NtId6#2~Pc6(`s9O<@7(^gHv%J0I91k71S3R23 z79g*aB5~*yD{*^jK?kWktwL7Mquqgxh0a$QadWUA=fG{Tmm?+v8M?kD?DC(g!-;8o zF~Kk3nIN~D8&E@o(EYJz-5>R_X3~KlMROVmz3qQ-kt|c(7lYTTm%i~`pB%T-%cvPB zwG5Jhc$=+P?S7?#O&7m5{;31}ZiHVuv|V$??hP_7-v^c>z+m;UJ4I}gr)#3ty`v{X zX6Icw$UwXR4dmVo*6$TWh#tzSh$;%I|G7Y8e=g7sc+2?BxsFc(ozV<0JDj*Vr9mml zq35Xx*{)@giZKF}V|x)^a1Ka1hS_yGP;9=d=OqrZt7Qh;54J(}<&*v>7x#VKjrfPq zuFRMEbKNxV7&o993_4G^$_PDQu~crKIT$R|%X(03h{z|bG7$m4WHIO?Ut=D-w^L7qh8nqr|NaGDv(c$dh2SJV?8HUeNQfd?$!SOS__pclUSw+4Jimi<-d{b8xx3 zHg5;soahZGC2Yr?3tar+b#f>Nno2LpQ3{6P3C$qmte8K{_}8TktQyKn4%i0 zTjmvCzIJxzEG-;0+R5vZfnPu2yeG3`m4_7>s_?^$c5u<*+Rz4Yu=LWzgHVQLp^*F*HI& z(R$uM1-J)lem1l1!+9ki`JT7f#ZU;<-oT$)SBm2;C})v>4(rZ~5hn+s8*ipHWH9}e z(P4=<2B+?FyBh!iDQb}Wzu3GF=CX$f(t#d|a5A(2e$C~Z_@j*+^e)$1%V3dC`Ct5# zi^G<|pyCNM%a?L}^^PQ?##cv#DEKK{IKl}ZcF{jcHu0yho(l;~>VJ)iKab{q_W|!V z#;h&;1>b_oEMX|0B(84=(yYXZ;CM2kzYf{#Rp7$$8hHpby!C6bS=Z}Z76286Rsi8T zQiA!j$5vGDyW&bE@Kk)k{&3sMJHs81ER;{=p_I&@ZGR6IAs67i*vs&W|H*6Nt$v_xy$24n zUyx`;92CKeUWV;@f`y8%cM&V*r<8b$4T?(Y1aoRs&5 z`ynsY50NzX%Iy1Z^5BCmEn}R=eFyc&Fa=!KNuiY(Wzk%vB!M)r0J7oQWq_QU2L@*b z?iKcT6JAG8oFE5LVb#Z|nJG)S11Fu8xENaZ63F(fJUpr(ENXWp^322wBaL`Vz-cM= z8{LBaY%6Tg3K^dS@b|Eq98gl zDICe^H8L08eSzyX3waptug3>l|LHQaAc$u{8lxBTRE@uSu{T?V9!=D^W&sf@1LbdK zZexGkc@igEt5h_Uuhh26VNPDefnC3<CpGp*zNbB(4BL+8pFasQY-s$t~g` z zo)>tnlX4l3g@!G*1K#)|c6OlL(dFf5bhDS z>y&Z{oNqx+?$dV@M*1|MmZfQD5H^-=VhNYC6Q{wDCaCV56~|1a(q`qOm$r* z>pbJtQ|F|lj1F`|g1+Kp=b31{IJE~0p#6c7XI4(u=S&y&3&0UlpvIqITKJiCG=V3? z2sf>Q`~^lm5;iN&m*BaZD_)|M$fC~ZX?t70k@*WvWA%0E$W}j6`w~%{IZLDZ?bvvA zYL(CuwTg?7-?$ zCws)-)IBG>J8Z@a$-G6_-{o3SJ2!maF#!5D?lYA-uW;YF@g2@!hKi}Zz5OPrkjsSh zv7I277w*V5FZX7>tbv#augCt=J#+n{()oS(Fdc||eIA!26470khKji$jPgN2K$%R* z)mZm=Px?Jxn?d$Mv=p}`77_UZ-HJS6=ukrVs4F~v#|H~3JG%7jX4jRhOwEJ&HZXix z`oI&@INd4sUKCTdcu86bRjM5h_x3MAt)))4XmLIQ*>j^AmhXZqg&N4G|I&@$9ONfW z!D!h}GSX~Bk5=BhS(6?au&^l5>K>}JtC`u-HW1Psq2mNZiUWc&2a))Cr>`7Tc1)xN zm4xt$K+d^A_cs8#$DBwC8rvBP_0NgB>?txB+}M6;#4iw=+3!B|wPC4OhF@^RS>`(u zlp!WewONyh4jp0z*bJ(cKjIgqB#Y_Qi*=o5XO`kp4?HdNW-Hz zm?<3>of{8t3{tQ_8H|#ImvtCn%~r5}5DM-eiu_86>er}*Uc_=$W(zFaxrqEp+ zNh%5Qr7?Qsd?w|?zC91y*A+Zz))aUER_tRnuoSHZ^5V9C^+=vI$qOXh9_eVc%L21L zsK^T%Z#aj#5~D8`jmP)p;8O71j=4Q}DQ*hkL^c#+T6#QVBd!j>@jcjDSf-J0wY#VG z2LNZm2hAq2nkVK)e>|1rfJc6n4$uXC4BQ`HbY3@092LR9crJCgnpJUVq%(zzzla`U zzN7kFAfZh~6zl*OSaqVIM@@va&C*J4&>yoY1^#lsl2Bn^4CDD6Hj@XM&!3*t$Ot?A zak?T`CML?E`8N->n3r&-D~~-9t?w{>gD0x=#~Pu54RhN$tjk{@fqJ8wnq3%fuL}_b z8H7h)WNk{IwQu-hQLht*PW(Yn0)R{!UhzNyLVyAZ02tJCtHl8Fp?okgkRXD<&DOtJ z{a{mMnPXK2lz*Q*1qJKeqgF8ck#498N&-*l)dbCdS&1b008^dh{(l1wp~y|MpDDBc z`($4PC?!#tA#VyehzAzIY^$#o_V>vM*73b*hWD3RGb10lC9ESaN<08l7e*BX}Y1ax5;18R%C?Phym9rczE4Amp*IkdUL*fU6>TFMcsl#8=pLQx@vS zjuC`<8KAQaI)OLTl2D(UDxwb+)M9p&t_1O+WXiZ!Ai8(P|u?x2vLDQu_Pvvf!uxt5rh>XJX!`5DsaU+aUSnOxy_Ig_jEJOAbHep7|OuRP-1HA z=a3F(dr(Aj{A=^7UCniH84^H-SAf=M0WcCtIDVW7zk|6bnMw~Vb0#)(P83wpHa^ul z@Zy$K7a^Ey{_A@h2E&!d8^|SkoM0t_$c3cLNkfQ>p?`ZTJw3!(3Dk5fp zEU{ch6Im`gW#;xDFSoWpQhO(sKAS(P?)D4IO;AY_MPX2&m`cb|)|lOV7UB9cBozLXq8}dbUvHw`b2L5q^uPo&mUopg-4#+aR?c4S=jTdatsG!v5m;3dBn@_ z(OGds$VCc!BMJiO!K&qJCNB3e&Va98-El6Wr>Fm=n1zPM_TyHIpAy!zb-69f_?l2HZwaYT0=v_TR3J^%F~5-6Pp`aqRI#R4jB&H z5Vi6plFjH$7fp$!T`Lg#Dn zJARpP!X8CJRaweODXE*Kk@S&5nCqo}vP7R8P?-+z3x3}7QP2H3K+WEDhP_`VPtQ!W z9)y-NbCTA~y@n|ex1WFm=%f9o{LH%D};%Mi^b3%nbpNiuFS_w-LRWA9DgTMW;DZb$x zzA!CIA^h(1hdU+4f4duPWN&*^Xw*sg-*&cwlbPv|X^MBW)_?)m0veu=xI9bSXfOy1 zf7)zQ92)zJ@hk}EY<{7O_%d8-mIdMZwTZ8F*`|v8cNzcmS9$_m3}otTwmoh+KJ(`X z_dQ#kIuAk884kx^|DI5R*&0FfTl;oaEVRSc9x*{`NRM6?Y5hAF7c7Ld@1e{Bg1_w> z!Dfs>)H8pVg!#4s0zca_)BedNDL88XxqhZ^#6h>x7v-e>pQ>;(e_6BBVh=a z%kyyC0Vcv^A!O*d|GVx;iTluFF)IPnOC=o|Yd>Av9|&yXH4}Eb+z1l$S?+slytZQk z63Gx|8KC(j&7|9Q$z%3y*A8=12(D$c2stfP zWjQWes4d&u$g}*u+mg81`3lrI@c^O*4d!f4WcXl+l*jsWY>ELyPO`bu8sy{It@e6{ zK&9`?5K;U(G6$KacmxCf6PFLEhW4+5?Sm*;FeAkT$0-gd>0-}~Y-6#bm78x-x5eB= zcb}Kt$kb$$@Ncq|A56D1NvYK>L>(f@f z*WAVqd06e%?G~hbFO&sL6-}l(p|;TSW0za?Nk3m3%ME*)D!|=N#bX+9`hjV1TPa19 z1LiN&^y|G0u1*^lazh*KO}}?hgc51VwA8T#B$J3zT`6I5?B_^$rByWXZi`^u)uq{m zmsOR*rFFr#)CzLa3qZfFAQ3er;jR$=uO4lnJiEYg-^c=Fr;H-H-J98=@5m$f_g%oI zt0#YFwXLCw+3HS0BrD`AeiT`}Ak~VZ7yv=&+Qo9vdpfV@!T6_+2>|dc>!G5;vPa!T zz!{4`tVxI1TkeVX!5o<;=oPC-Nw@{Xn{5K zavq3TtPpR@gGa5wNfx5+b=rJSbUGQ5QzkSQ!i-4mJD^|ZSofsA0ROz25(Yz*oaNKTc6O4#b=Mjhak7qjREIPRf;7w$1Nzr5=vpmp4F>=5A7l6fbUHK4zx>A%;Xhr4 z9o7HR;0Vt$Hb?!d`CWi^GCn%#-2VGyAB4c5P(ZDb-je?+eAXySm~}uF4vF9WWB;;wiu?D=;C}$F!dj zi6-;khoKw)I3iH`Q%%ij{t`u6^s8X+UByfO+giYJraSu6_n`F+L)RHGUXT6P`e42r z>+f1+|1ZsV)BWjS1t}LH$@~_Yrvl6ma{_aFxe{8s4Yz;=PzujI`kE~Vl+0Lvciu8m(^DtHCLyLc=CT(Rgt^INA8;>p6UnJ~uc03inj2r14XacGzp z{)$ALF7^OJ%7pNNo78_)>D|%yk0JmIsv}b~Na&Y@MNLY`d5s-HUf8_vA*1$6(hpFg-&`7 z-1B1CEM(_yg(b6wPW|}J4je@lPQ4ROB#)W&pX|wPeLPA&*>T80kDNfO$}?{CTLNuc z3uvAuoLDs=MXHltM}>zl*-N^)+Lsei4P(>tii#TGFls^HQ~^Gvg2cDMv=~|hm~r3Y zYjjI>9fSHC|4LzSesi}x=HebWdI7U>`gjVj?N|o{yk~B_#KfPo4|qtN`xr3%VFyhu z&k2vM`>WwXsa{C5bms_Kpa#^30Wd{kD|tB~5)ZSrYS+ZP^bMe#rWM?O&DUe)wso!> zMlIffyLau%5b$g@5EoP-;r9S^M{5xBm%weX2HwvKPtWr{J2mW>R+FG1t2>O0__ZBw zf-#XIs)`X9%Ckw?J1Tieo(GBdK$qeqeWr#&#{ZemqLCDhA2jKxC(1oiTAJqf$7`VH76Rls=!hfvcAX6VaTD5#&5~ z#~~^lZ^~zHfRTGvOpavfYa1@<4;AV?I^Pw?mT(9${2B*eF+*IW6o0TC&$At?GHKbH z4?0$%!6x|)cT_WuRnt)jR9p?vT`PMV<&aFB>2_-egI~l&JI^mN8g6X@zN2?VZde@B z6DVT9%_LR)^j5@OOIh$RX1CO=ZM*z>lWE}wfC9I}Z_Ty#Gl9nP1K8R8VGY3xAV42( z=H?_v!(ClssYj%F@TC>Gi)Fkwn;nY{I(NW-L0qNhT%NUdlcF760TonZO2z@0a&LCe zw3Ny$Iv&38xw^Dah=S7?ldGQ7jwDF?Z9sCFd+R(wsQA)tOCd$HHrX2HM$^B20_(@& z^w?G8lE+j7I*VaVmFDH7SZ39`M(%7c6)+7W#e<v=IB?C5RBx!ld$tX+}my+}I4AqcY(!B|tRvFmAC4TM4(_wAb}9Rn128(luO@ia(P z3BsaTl0laTvGLHo_J3mifN!B79rTJ$5(YJ%?vC>s3He|Hwj~U;@tQVc`5;Wt)6saSA&dhRyKVnUbK6@hg!7&=!>L^&4l2BoMb~Rh6OKwS z1_#}W)O8B{j_m!a)nsW|FnejEGX_ya5&vxUZ9eT-4VI;R-$e<>`*w6K@E2JwZ@j=( z+JrfKy-`v{Nn_ooFlyw+XEo;H$mru_SdcRDtXo8jR`I@Bz*vS1eg}sjQ#L*W)1}t1 z`xr^`VO`H7f_2^1Ls<;aYqU)aHo;#TaKAjh)z*fY?6E25{K)VQ0b&$Ry!mMI`NEUq z_n>y@)Vftn7%dUJ_(mUT3LAP2|shA~$n37lRY(jmrU-hqZyZHW;h0y!l$A#oi z46h)WGdV4d^$3HMO)K{2=O6{>sXLTUK{+}fU-x|3cwu<^y6<-)kc2D`cz5WDumiI! zh~7UeXTCvjj_samzwS?UQl+dwWxW!5TTE(?b%`ejzvdqsn1&JJi~i9gjGai5BgK6H z-<-p2onnzO1`E*bkZ2U+kL@3+1`nu*@@?THqVcb*3tm8lA)qH(4!Zd(GzyV&P}e0p z5^}*M8G{bLp~dKka}z@&++>iJ{W6Y}Afhd)60k*fJsv&qyjqy?uFW?|%+y0<1#0N}u5qNxNs)A!?a)q^VG`{yqm zyIuH?(L&d1Ol;xCa`&&=E*n*oh=cqAB3KvhK78``OJ_U)PHdwG{}JNFF-q^j0zQnq zsrU;U*Cjwb!J9WO2ae3^B%ver7#kQcR=;1JVg6 zil1YWLEmTH?+^Y%p`~mZQu6)d*$MSvge{Z*H+J2dqfC>q4CQ&r^Et5GIH(h{h&qkX z?8~RJn*X|kzu{nAMa(}3O7!gZ7GBg9HD&JtisxStv`Dd8!dQl9`rsN5&O3m_6@dIS=V-ZCqsHRzL zI4+OH1as28p*?y!`ZxCNe@;+)NtwW!ufv-C{K?P>f)8%Bf&(M>{$ZE!4gvIm)YL*SfxF@zLjk!YCmzwWO1hW|dJQqsGvBDJj0Br$J8<*sS<$3VYt z>nxhV+{gBx_5M8_O^M=)Y|?mD>4L3n{;%m4CVEI;rbR;;U8WIb2MpGGKm`svn<(lz9HAna&&)F5qF30fz$O#j zn|Fp7&sP`)<45yD?*^<5o$1!$qvT{zrjXuFtvdZErS5Qv4bjydLu%s~d{}vyk&2Va z>5G~uQq_i&Z7WPp}t)KRT@}nQ!04AMn-mWA4^tH}2zM-c`SSHWl(}(tC zkxzzL&AK$GN5U^E7M`H&K$gmRYclJ1TxmCHd%gpvRe3K+a&IGv%cg2Gs3jUOmq$8? z6E(D4TwiqQkxB~qH2HAHmicN9;CqP-L738=140r9p^qJ~YKH6Oi#99P_?wzy8m62@xQkEkty-vLwEWF)5mR(XhK(am6cY_tXT)% zs3~4<)(&-F>St-3=!D>mFzN)I6^zknTi*`f}h;%@*Y|}qi75q#Xme?XJx7reH zVUm*c$$%-6-!@GY?__QTYwimm(AseFkJ+T6&KO2&O*tIeL`1>2FnHd#27AJ6Ap3Vc zQq#gH9iu<00QwnDGs+Y*8jIS+r@iN_t}*hD^bmt-D$kStBA3dnEaiPK@~f%azKQB6 zTFJRz&S%ErfBK6c22U<|PrF7q&(M&L3G%D;m*SnQ(%;h@w(}#n|7gk0B)*ASC`+ph zDzm%+9i6}LDTBdNOfGSuolvYx>Tj>2^`-ZgO1fGiIqK@+`Pk=JvO|nPZEv8puDwBY z>LtO6C!wq@&32qu<{{H^$L)asQQddHQ~AgLm&|0oDcdm$;n<<z ztYh!&y;s>QTU!d*LK(^4gwONteO=$b;QPbp^MjwxIoEYN_v?PWo{xDLURdqz`QJXt zp2?&xJ{*U)b^d)@Ix&7J#r1sFx2rJh{1~rZ?@2SsV=W94gNDuZ_TzeHtdh>%myNm6 z!xbjH(9K19>kCiDJfxaM#j8YpLB9kz`%)PWksp8_6TqOI@6niS{)@_Eej{ zZcQnmwP{G(ZPBNw_4Y7(7V}TQ{|lc7uwsHN6#JxM^n_kJ4Ie;MDL@1xRP~c)ez)*8 zn*ny);j$D^wz!&b9ab^-mlDt>_YrsatJ|K)UVidtOY>N36E6ELh>mDL#5Rv;qYerv zVbJw~Gw-m+6(~Swz;mT$oVCOq4c4oT3oMm;C-7+gguhteACfG_j>Mz@+_G_Lq(vj8 zEvJAfR1pp!Nunjh9Perj_sc-PGv>hqqef>2#i3|wgf z*b)*XU95KC-3p~+;89oZbMh_lc*jfkglDJB`KlfFQIvK@ z%q+__?P5sCBq7J;=0d*!f;531Xl$vV;nh=XgYGry^6Nf-JS4sLtHNJ5BBUt|j5~s4 z$|U4xai5T~h|0FZ5~SGWgQ%{fmi5&cWejNNvAp`|rSba8wgvDLm03G=R+n*q1xN_| zi|GKz1bpTW(7dbq_fo+G2F*x`h}ul`(kv!`CZxXv<$pn5*gRJnzPBq#&r13YkZM>; zU)=cjNym$`EFY2|?QmwyuK-ueaeA`%IvoxAQ&Fp(qkjqu=_gPMUy=++dtUUg`u;I*$=w}!?d!YT5m+bu5?qx_K>ubudLPXe z=o5_$7Mh-^g7b9BMp^0YwJ=Uy{|NM&3Y@z;gXn^eb`}9GgFqj!+Y<=ruR8x85qb)` zPCjt=0WaA;A{-tVJdFbm$9K^&Ioo(EjZo=l3k8T-r-2`VOp>}vUbxc$sCfAZ&x!41 zLd3xG_gHP|6YVm4-~f#fw4E7`3`Lq!8_#@va6t2|HZ7wwoqnH%uv3=iQ)=I3o7vT= zRwEdfi!v_*;->AMhBF+k4`;f%pp&m&HN|}!tz*u|G`aVr4C=S82;%w2@`5v=9+SxW z(T`3nJ-B7>R0cWCTm zz@|tqjje&?>#al>>21Pco_UXGGYA-c+DUxgF4JR6{3Y->#bBuOf}IC3_ChqE8PSCh z6{!&vCwvOQ=KZg`yXynk$N}~rtf%|IG73v_h_)A{afE%tF!Q7k8tV;Rr9r5J& z+GW+~-mx{%KN$!L@`Pd=;9^WZW44Jqbgpm~+xy?k0nW2HM_?wB=P?KD%!7q=w-f~V z>%hr%JRDi9(<}uoZ`dCQf%}U`q1mjgURr6tO1Z~|mBWpAIC&fhn(@!LD=QxY+{jDs zez-%|@%Tcw_XCGNLhql^pDPRclp%d|QB~(3@;)KU-o<;|N;&&o#N-@`Y8whj_m;ty zYGgas2Uv(E2lJ~##J`@=^ZH&MKFWHXra+H&+mjl3^j!LuV3EHQ(Jq?sX4O2b{>$E7 zm^*_eReMx+50g*(XtHCdFW*Vl_7)ZAWm&C8!}@bEM_OH%mcc9a4xCmMW-L|HR7Q5F zrly3%{ zgkR!PJLS>`6*}vv*_(MFkHk{p>+OituLG^<>S3bvhP*t_5pE?uznem<;Qy}Df2*>7 zEqvheaF`d#a1ClCSRA~#ecwI4z6#eSau?pO?u*!UDNB<3@G;!21)llk*8KT7QiJzm zVSR$CkBlT*IS*bK3Ub`%T2_3e6tSb#-0eI*mv6l~(a1nFd>}<)ibd3oZv#>XTzwx~ zwi^n~52Ky>t_ui0}-pv>G!{?qOzlmV;%-cz6rV7hO3hZnQ z!xQsdy6QK`({ljQaLZLC*LCFh5O(}7hn(#CR`cql4$ZI`?=d81w2^$5e@1n&?52=g z=m2VrEVqM28oRhu2NbcbohcndJ8+nH>IvQvJJrjG{S*?onOvL&ED;|s<~czwBOQMu zsp)<5AJ2+i=wLo~txKU7beb@eze8BQKoGD79VP(-~6OPt;WJ zPr~?y%B-~$iy9`{m`$KQN;od(uxh&72XyX)*5wwRWWzWksU=Ex{LbEeLha(@U+wp4 zciVv=aX?pLw8%ufe@l1kk>J$4ND5{!qJGi_tGTLDbdV2u2=iyybgKW#K}xYyVf{_d z5oOt6;qERQ@Z4cPl4Y(0(#>6{Nio4VpN!d6+H$?}W{p4HY-+iCsg23-h z&@6uS`(La0KkVaH#NRUt%&xnvu{4RU2+z5`&{{obsF|bb= z3nc_bG&Rs~q$|8m~-etE+C*k176>Zd!=5li@NKu;qB$Yq<&QJgRTC!G9E$gtMU`~4t^|k47 zarNL5ephp;KV94x1q`G;cL!M+{ciCdQnek21{*l<{(Sldos*7f57_Pf5K(`0HjE^7 zwSzo$!yFu-h*IVr{1VTsY^I5~;>VPTfl|jC8Ug9~p7$g`7Y*&ihR6A$R{aO51fo5r zzMo_Bi0Ny^Q#Es8ZBGuNK8qjQ^psP_6H_H_58Z9pQz!z?*8%-r7w9@&$m7Ae75C(T zu;hq^W3N@dh;`Sr6$&6q6ho=80(vp*w{(C49s*2aQsS^n)wnj)DT5wFIB@g~GIT(W zCcBF05$vC6pSO+c{|(21qiaE~E50C_m3SR&p7)fH_U70Dkb!7n2us&g4vb3{Aq7hr z+PEQW&vxdh1gX&oi=C(wJQ~(3GceB&jRQ~3vh$qHC9l&>Fy!moxOn}u1xrvL`cHFC zGAg8vv+NzBQscM&wzLR7?AyB)107xCxVXMK;2GR9ZyUIrf*XM5v5$v@@JAj$K0(b^WDx*A6(#xvTZ!}PbI zsvzr@!Nb4-pAKb?&Z;19C-d5fB^wd@$LBAEd*X70W;3Ap(n({8<0wkxq{`6J70v!P z&5aT!#!zt7#imRLi{-?8OZlt*w}U(3fAx$04~|(l+CFKhNaOv;#QyHOY`yF6 zXiJjF@d7&ctP68U@Rt9Y@_{;eex^{1jD+PsDneOK5qu3~|Nngr&uIN23q>kpTm=}a z>Z-oPMJq@)YeAK~Nev>mJ%Iw_+Y%SWa^5NS(B1pk(}@@`{H*a{vyFxEP8E_@(nMLM z)A+GFDiW_<%>Pn6DF0|`KMUXx9hJ9261pWVa0!W4(z%K4Fst(ezxeXkH&C3BIX(4r zN#Pl=;Ud2g;3jSTiHXOkt_=T1T0+OE#vgUo4hrZiMgShdF8~nFcbJ9)_#JH$C%`ki zl_W7cJp@qzlAqdSg&Tg>M6(UM{s17n?fL`2 zQ`z?KuL+LZiESx_$iskC!!8WK7|qb(0NSp)7^PRth=7Z>isHy2!r7XCVJD{Ao zHA%3@c#VWD%Oo`U9Aj-qsU*p1IK{gx9p6P9h?rScNaDPFu+(3Stb?O0WX97 z0NXtQ(|aQf(vdIXokMeA@hgh@1I=C1$Kt8>Z3Z^UtMIm8(6O(LSpS~%%bvDi0Zr;w zbv54H68G*dg>JA%Ukf6}ARGV;U_#T;YO^jdrEamIp`mvab%4tr1kHO8$WmRny*glQ zuSHT^y727}=G+P&got|i;oe#$yr}^3Yah4fd&5AA7z&0U_gf3tOU!;uH2vbN9OBp9 zQfg9qPR?BN-maoG-{GAptf};2*jU{r5rH!oVlXDiPaTv1e*~AMj))pR!I=WlON}E> z)$#b_w_KSzfQR&9f$BpWeAPRI88LC+2T4X_FA(OyQUs)z8@^Hk9YIAJ$xvm!PZB7F z);klo-y`>ch*DS2kTUG)=_y7!*^gLdybKD1uvuilV=arQYwrPuxGIPmDe*&O-3oOWl!pL4ZQqngo|(9JU_>ruvq9PBv5}L2A8(1Y`oSQzeUmoK z3C_aAQ?Oi=3tN3CT9?ZN3JW2<4*_-5$j%~E%+bLeMyHiKG$cqsHqm(_pUykqfu$2n z^!4v#AbO@-`$*S2V!vns91Z-ZaOaIbrb%k^q8yCDcjKu#n!SkAf+lxxS`;B%jY%tZ z=$$n@4<&GfA09%kRf*F+3l) z*5)R(pK7!?9YMpH4j{Q92tt(GaNle?{Wi7JqOcdV!SVWCuZ3_Nd)Q~3jYaI;N3muR ze7Kj@nA<03d3rh{Bh(%lQ1V>M;}*v8$c7Q}Dg{0^#AIJst?&POG*MwYabr!KaV@Q| zVIhOH-}}?T#GaH(#)8Pyj|U}3M$K7@^K=Jk=t^pTvoJY(`*IIY&wmyc7A$;yGZpNi zWrLlP_%1Q+Ovm9NHYPT9Gxs=WChS{eW25AGmxm7@7GGUwlco+0Nlj%m7fvb47)@*_ zE-TBsYbU`}5RFps*{>d7>HO&&IyGe)laYa@ymE_p<;EoDF?$(~60NGMOB-~wZ@Ts< zFVnvN{d?^pR9?uAKgug5E{@}a)vK|o(NUF$&uDiZ_l?+$Gm{f7=_DNU81P*jn4T7B zF|Cxiaf8D+GAb(Qqg5`8hklt}sh>UH(9n>Don5%Y(*3)4v!)oAencj-k`60+W+o;U zJpVv7cKn3-sg&7{BO^UMn{-M_%7h69y)RCAK1k8WM`p-waR2dnZD3+TyS24t;q0u%#Kc6*G1)$|wG}t?MNCXgw>4_#)JTCV zJjSQar1T%lnSHi)?qQM7R(Yv3ipK$M1_2WsX;+v;r-JM1t~oh7D|lqfuH$viMrOop?yq{o*Jp5Kep4{aAv%Brru zx9=t6j6*&AzOcBcTvk~bUQlqw%}RDsQ}<6HWz@;+tcAZe-?f^~PKCp>PWWbGpPvlt z>+6>shKG|nucChaURi;CjfDL(E-H$2yNiUfkeOpr;&QfZMj!PB!bastQgKa9%}2}{ z%zR3Rjnn_?r|qwIiT8{b6H46q{d<;; ztu0$$FZnsH+i5;#W@ba?j%iqnJZ*=^W!GBWaI2NUj2h|5;qD*TJ9s1|Q>MyoOq?c5 zc0fm# zGZtGCpOC<_va*u*+(m(mj7+iUsfWj1Ch}lmQBlPee>uNRSJWpISBMwAGWQ#8h9yGD z;+&j-j3B0;o12@LHp#~150@4fv%L6~g-{|+Ed9!|$BNjL^mJWeVPTyc(VpE> z&CShA$8x?kN%8T~>1u5xK?+i2J<8T)jo$_bxh*X%OUUEz5e*pKuCbfum!4pcAs!`D zljrPAi?JqBA=#{VH^*^KQ@U;@DSqGM@QWZ%y+V5J-@#k&HFsUEr+1@=WH?xk>+9>o z-v4ATd?1_>a{1j468QZMLIQgHNLvi%J88@VEcV4USqa6*0U{zIB_!CUfu+^eNP}$D zmLC&d9q)}xHEX|rbL*8Ryph2~X{f7v+kX4nU+23(m(7a%Ha~CWY%D1$d0ttF4?fAo zM9$mJ#L3(|sev=DGg+qVnC)(ntHT>DDlJttG&0Jy7B-?UGr$o%->*6odqE~k&O%a& zBfWaQVu5ah32wSi{kCIsTiX>i|M1brdhNXgMzov0zwdOmH$*1Bj8l|jM^Wt)QMK>U z=EV-7fbai#n_)Jes=qyv$qFivHO`FbB)MrEGI@!-8@;UeK z-^Abla@yPbeBDE;bH$yD@5)%Hs$R)OGQ&hzP@H%qerHBX_QyUfE*32=FaI+-I=cNW z7>mVHV|IJB + +# Docker Engine managed plugin system + +* [Installing and using a plugin](index.md#installing-and-using-a-plugin) +* [Developing a plugin](index.md#developing-a-plugin) +* [Debugging plugins](index.md#debugging-plugins) + +Docker Engine's plugin system allows you to install, start, stop, and remove +plugins using Docker Engine. + +For information about the legacy plugin system available in Docker Engine 1.12 +and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md). + +> **Note**: Docker Engine managed plugins are currently not supported +on Windows daemons. + +## Installing and using a plugin + +Plugins are distributed as Docker images and can be hosted on Docker Hub or on +a private registry. + +To install a plugin, use the `docker plugin install` command, which pulls the +plugin from Docker Hub or your private registry, prompts you to grant +permissions or capabilities if necessary, and enables the plugin. + +To check the status of installed plugins, use the `docker plugin ls` command. +Plugins that start successfully are listed as enabled in the output. + +After a plugin is installed, you can use it as an option for another Docker +operation, such as creating a volume. + +In the following example, you install the `sshfs` plugin, verify that it is +enabled, and use it to create a volume. + +> **Note**: This example is intended for instructional purposes only. Once the volume is created, your SSH password to the remote host will be exposed as plaintext when inspecting the volume. You should delete the volume as soon as you are done with the example. + +1. Install the `sshfs` plugin. + + ```bash + $ docker plugin install vieux/sshfs + + Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN] + Do you grant the above permissions? [y/N] y + + vieux/sshfs + ``` + + The plugin requests 2 privileges: + + - It needs access to the `host` network. + - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run + the `mount` command. + +2. Check that the plugin is enabled in the output of `docker plugin ls`. + + ```bash + $ docker plugin ls + + ID NAME TAG DESCRIPTION ENABLED + 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true + ``` + +3. Create a volume using the plugin. + This example mounts the `/remote` directory on host `1.2.3.4` into a + volume named `sshvolume`. + + This volume can now be mounted into containers. + + ```bash + $ docker volume create \ + -d vieux/sshfs \ + --name sshvolume \ + -o sshcmd=user@1.2.3.4:/remote \ + -o password=$(cat file_containing_password_for_remote_host) + + sshvolume + ``` +4. Verify that the volume was created successfully. + + ```bash + $ docker volume ls + + DRIVER NAME + vieux/sshfs sshvolume + ``` + +5. Start a container that uses the volume `sshvolume`. + + ```bash + $ docker run --rm -v sshvolume:/data busybox ls /data + + + ``` + +6. Remove the volume `sshvolume` + ```bash + docker volume rm sshvolume + + sshvolume + ``` +To disable a plugin, use the `docker plugin disable` command. To completely +remove it, use the `docker plugin remove` command. For other available +commands and options, see the +[command line reference](../reference/commandline/index.md). + +## Service creation using plugins + +In swarm mode, it is possible to create a service that allows for attaching +to networks or mounting volumes. Swarm schedules services based on plugin availability +on a node. In this example, a volume plugin is installed on a swarm worker and a volume +is created using the plugin. In the manager, a service is created with the relevant +mount options. It can be observed that the service is scheduled to run on the worker +node with the said volume plugin and volume. + +In the following example, node1 is the manager and node2 is the worker. + +1. Prepare manager. In node 1: + + ```bash + $ docker swarm init + Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. + ``` + +2. Join swarm, install plugin and create volume on worker. In node 2: + + ```bash + $ docker swarm join \ + --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ + 192.168.99.100:2377 + ``` + + ```bash + $ docker plugin install tiborvass/sample-volume-plugin + latest: Pulling from tiborvass/sample-volume-plugin + eb9c16fbdc53: Download complete + Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639 + Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest + Installed plugin tiborvass/sample-volume-plugin + ``` + + ```bash + $ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol + ``` + +3. Create a service using the plugin and volume. In node1: + + ```bash + $ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top + + $ docker service ls + z1sj8bb8jnfn my-service replicated 1/1 busybox:latest + ``` + docker service ls shows service 1 instance of service running. + +4. Observe the task getting scheduled in node 2: + + ```bash + {% raw %} + $ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}' + 83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top" + {% endraw %} + ``` + +## Developing a plugin + +#### The rootfs directory +The `rootfs` directory represents the root filesystem of the plugin. In this +example, it was created from a Dockerfile: + +>**Note:** The `/run/docker/plugins` directory is mandatory inside of the +plugin's filesystem for docker to communicate with the plugin. + +```bash +$ git clone https://github.com/vieux/docker-volume-sshfs +$ cd docker-volume-sshfs +$ docker build -t rootfsimage . +$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created +$ sudo mkdir -p myplugin/rootfs +$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs +$ docker rm -vf "$id" +$ docker rmi rootfsimage +``` + +#### The config.json file + +The `config.json` file describes the plugin. See the [plugins config reference](config.md). + +Consider the following `config.json` file. + +```json +{ + "description": "sshFS plugin for Docker", + "documentation": "https://docs.docker.com/engine/extend/plugins/", + "entrypoint": ["/docker-volume-sshfs"], + "network": { + "type": "host" + }, + "interface" : { + "types": ["docker.volumedriver/1.0"], + "socket": "sshfs.sock" + }, + "linux": { + "capabilities": ["CAP_SYS_ADMIN"] + } +} +``` + +This plugin is a volume driver. It requires a `host` network and the +`CAP_SYS_ADMIN` capability. It depends upon the `/docker-volume-sshfs` +entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate +with Docker Engine. This plugin has no runtime parameters. + +#### Creating the plugin + +A new plugin can be created by running +`docker plugin create ./path/to/plugin/data` where the plugin +data contains a plugin configuration file `config.json` and a root filesystem +in subdirectory `rootfs`. + +After that the plugin `` will show up in `docker plugin ls`. +Plugins can be pushed to remote registries with +`docker plugin push `. + + +## Debugging plugins + +Stdout of a plugin is redirected to dockerd logs. Such entries have a +`plugin=` suffix. Here are a few examples of commands for pluginID +`f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62` and their +corresponding log entries in the docker daemon logs. + +```bash +$ docker plugin install tiborvass/sample-volume-plugins + +INFO[0036] Starting... Found 0 volumes on startup plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +```bash +$ docker volume create -d tiborvass/sample-volume-plugins samplevol + +INFO[0193] Create Called... Ensuring directory /data/samplevol exists on host... plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] open /var/lib/docker/plugin-data/local-persist.json: no such file or directory plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] Created volume samplevol with mountpoint /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +```bash +$ docker run -v samplevol:/tmp busybox sh + +INFO[0421] Get Called... Found samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Mount Called... Mounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +#### Using docker-runc to obtain logfiles and shell into the plugin. + +`docker-runc`, the default docker container runtime can be used for debugging +plugins. This is specifically useful to collect plugin logs if they are +redirected to a file. + +```bash +$ docker-runc list +ID PID STATUS BUNDLE CREATED +f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2679 running /run/docker/libcontainerd/f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2017-02-06T21:53:03.031537592Z +r +``` + +```bash +$ docker-runc exec f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 cat /var/log/plugin.log +``` + +If the plugin has a built-in shell, then exec into the plugin can be done as +follows: +```bash +$ docker-runc exec -t f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 sh +``` + +#### Using curl to debug plugin socket issues. + +To verify if the plugin API socket that the docker daemon communicates with +is responsive, use curl. In this example, we will make API calls from the +docker host to volume and network plugins using curl 7.47.0 to ensure that +the plugin is listening on the said socket. For a well functioning plugin, +these basic requests should work. Note that plugin sockets are available on the host under `/var/run/docker/plugins/` + + +```bash +curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List + +{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null} +``` + +```bash +curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities + +{"Scope":"local"} +``` +When using curl 7.5 and above, the URL should be of the form +`http://hostname/APICall`, where `hostname` is the valid hostname where the +plugin is installed and `APICall` is the call to the plugin API. + +For example, `http://localhost/VolumeDriver.List` diff --git a/vendor/github.com/moby/moby/docs/extend/legacy_plugins.md b/vendor/github.com/moby/moby/docs/extend/legacy_plugins.md new file mode 100644 index 0000000..901a40a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/legacy_plugins.md @@ -0,0 +1,100 @@ +--- +redirect_from: +- "/engine/extend/plugins/" +title: "Use Docker Engine plugins" +description: "How to add additional functionality to Docker with plugins extensions" +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker, +refer to [Docker Engine plugin system](index.md). + +You can extend the capabilities of the Docker Engine by loading third-party +plugins. This page explains the types of plugins and provides links to several +volume and network plugins for Docker. + +## Types of plugins + +Plugins extend Docker's functionality. They come in specific types. For +example, a [volume plugin](plugins_volume.md) might enable Docker +volumes to persist across multiple Docker hosts and a +[network plugin](plugins_network.md) might provide network plumbing. + +Currently Docker supports authorization, volume and network driver plugins. In the future it +will support additional plugin types. + +## Installing a plugin + +Follow the instructions in the plugin's documentation. + +## Finding a plugin + +The sections below provide an inexhaustive overview of available plugins. + + + +### Network plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. +[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. +[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. + +### Volume plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/). +[BeeGFS Volume Plugin](https://github.com/RedCoolBeans/docker-volume-beegfs) | An open source volume plugin to create persistent volumes in a BeeGFS parallel file system. +[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. +[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS. +[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. +[DigitalOcean Block Storage plugin](https://github.com/omallo/docker-volume-plugin-dostorage) | Integrates DigitalOcean's [block storage solution](https://www.digitalocean.com/products/storage/) into the Docker ecosystem by automatically attaching a given block storage volume to a DigitalOcean droplet and making the contents of the volume available to Docker containers running on that droplet. +[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes. +[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. +[Fuxi Volume Plugin](https://github.com/openstack/fuxi) | A volume plugin that is developed as part of the OpenStack Kuryr project and implements the Docker volume plugin API by utilizing Cinder, the OpenStack block storage service. +[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks). +[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS. +[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. +[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. +[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. +[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. +[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. +[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. +[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. +[Nimble Storage Volume Plugin](https://connect.nimblestorage.com/community/app-integration/docker)| A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones. +[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. +[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. +[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. +[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. +[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. +[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. + +### Authorization plugins + + Plugin | Description +------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + [Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name). + +## Troubleshooting a plugin + +If you are having problems with Docker after loading a plugin, ask the authors +of the plugin for help. The Docker team may not be able to assist you. + +## Writing a plugin + +If you are interested in writing a plugin for Docker, or seeing how they work +under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/vendor/github.com/moby/moby/docs/extend/plugin_api.md b/vendor/github.com/moby/moby/docs/extend/plugin_api.md new file mode 100644 index 0000000..693b77a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/plugin_api.md @@ -0,0 +1,196 @@ +--- +title: "Plugins API" +description: "How to write Docker plugins extensions " +keywords: "API, Usage, plugins, documentation, developer" +--- + + + +# Docker Plugin API + +Docker plugins are out-of-process extensions which add capabilities to the +Docker Engine. + +This document describes the Docker Engine plugin API. To view information on +plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +This page is intended for people who want to develop their own Docker plugin. +If you just want to learn about or use Docker plugins, look +[here](legacy_plugins.md). + +## What plugins are + +A plugin is a process running on the same or a different host as the docker daemon, +which registers itself by placing a file on the same docker host in one of the plugin +directories described in [Plugin discovery](#plugin-discovery). + +Plugins have human-readable names, which are short, lowercase strings. For +example, `flocker` or `weave`. + +Plugins can run inside or outside containers. Currently running them outside +containers is recommended. + +## Plugin discovery + +Docker discovers plugins by looking for them in the plugin directory whenever a +user or container tries to use one by name. + +There are three types of files which can be put in the plugin directory. + +* `.sock` files are UNIX domain sockets. +* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. +* `.json` files are text files containing a full json specification for the plugin. + +Plugins with UNIX domain socket files must run on the same docker host, whereas +plugins with spec or json files can run on a different host if a remote URL is specified. + +UNIX domain socket files must be located under `/run/docker/plugins`, whereas +spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. + +The name of the file (excluding the extension) determines the plugin name. + +For example, the `flocker` plugin might create a UNIX socket at +`/run/docker/plugins/flocker.sock`. + +You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. +For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only +mount `/run/docker/plugins/flocker` inside the `flocker` container. + +Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under +`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as +soon as it finds the first plugin definition with the given name. + +### JSON specification + +This is the JSON format for a plugin: + +```json +{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "InsecureSkipVerify": false, + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +} +``` + +The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. + +## Plugin lifecycle + +Plugins should be started before Docker, and stopped after Docker. For +example, when packaging a plugin for a platform which supports `systemd`, you +might use [`systemd` dependencies]( +http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to +manage startup and shutdown order. + +When upgrading a plugin, you should first stop the Docker daemon, upgrade the +plugin, then start Docker again. + +## Plugin activation + +When a plugin is first referred to -- either by a user referring to it by name +(e.g. `docker run --volume-driver=foo`) or a container already configured to +use a plugin being started -- Docker looks for the named plugin in the plugin +directory and activates it with a handshake. See Handshake API below. + +Plugins are *not* activated automatically at Docker daemon startup. Rather, +they are activated only lazily, or on-demand, when they are needed. + +## Systemd socket activation + +Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers) +natively supports socket activation. In order for a plugin to be socket activated it needs +a `service` file and a `socket` file. + +The `service` file (for example `/lib/systemd/system/your-plugin.service`): + +``` +[Unit] +Description=Your plugin +Before=docker.service +After=network.target your-plugin.socket +Requires=your-plugin.socket docker.service + +[Service] +ExecStart=/usr/lib/docker/your-plugin + +[Install] +WantedBy=multi-user.target +``` +The `socket` file (for example `/lib/systemd/system/your-plugin.socket`): + +``` +[Unit] +Description=Your plugin + +[Socket] +ListenStream=/run/docker/plugins/your-plugin.sock + +[Install] +WantedBy=sockets.target +``` + +This will allow plugins to be actually started when the Docker daemon connects to +the sockets they're listening on (for instance the first time the daemon uses them +or if one of the plugin goes down accidentally). + +## API design + +The Plugin API is RPC-style JSON over HTTP, much like webhooks. + +Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to +implement an HTTP server and bind this to the UNIX socket mentioned in the +"plugin discovery" section. + +All requests are HTTP `POST` requests. + +The API is versioned via an Accept header, which currently is always set to +`application/vnd.docker.plugins.v1+json`. + +## Handshake API + +Plugins are activated via the following "handshake" API call. + +### /Plugin.Activate + +**Request:** empty body + +**Response:** +``` +{ + "Implements": ["VolumeDriver"] +} +``` + +Responds with a list of Docker subsystems which this plugin implements. +After activation, the plugin will then be sent events from this subsystem. + +Possible values are: + +* [`authz`](plugins_authorization.md) +* [`NetworkDriver`](plugins_network.md) +* [`VolumeDriver`](plugins_volume.md) + + +## Plugin retries + +Attempts to call a method on a plugin are retried with an exponential backoff +for up to 30 seconds. This may help when packaging plugins as containers, since +it gives plugin containers a chance to start up before failing any user +containers which depend on them. + +## Plugins helpers + +To ease plugins development, we're providing an `sdk` for each kind of plugins +currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers). diff --git a/vendor/github.com/moby/moby/docs/extend/plugins_authorization.md b/vendor/github.com/moby/moby/docs/extend/plugins_authorization.md new file mode 100644 index 0000000..ac1837f --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/plugins_authorization.md @@ -0,0 +1,260 @@ +--- +title: "Access authorization plugin" +description: "How to create authorization plugins to manage access control to your Docker daemon." +keywords: "security, authorization, authentication, docker, documentation, plugin, extend" +redirect_from: +- "/engine/extend/authorization/" +--- + + + +# Create an authorization plugin + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker Engine, +refer to [Docker Engine plugin system](index.md). + +Docker's out-of-the-box authorization model is all or nothing. Any user with +permission to access the Docker daemon can run any Docker client command. The +same is true for callers using Docker's Engine API to contact the daemon. If you +require greater access control, you can create authorization plugins and add +them to your Docker daemon configuration. Using an authorization plugin, a +Docker administrator can configure granular access policies for managing access +to Docker daemon. + +Anyone with the appropriate skills can develop an authorization plugin. These +skills, at their most basic, are knowledge of Docker, understanding of REST, and +sound programming knowledge. This document describes the architecture, state, +and methods information available to an authorization plugin developer. + +## Basic principles + +Docker's [plugin infrastructure](plugin_api.md) enables +extending Docker by loading, removing and communicating with +third-party components using a generic API. The access authorization subsystem +was built using this mechanism. + +Using this subsystem, you don't need to rebuild the Docker daemon to add an +authorization plugin. You can add a plugin to an installed Docker daemon. You do +need to restart the Docker daemon to add a new plugin. + +An authorization plugin approves or denies requests to the Docker daemon based +on both the current authentication context and the command context. The +authentication context contains all user details and the authentication method. +The command context contains all the relevant request data. + +Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). +Each plugin must reside within directories described under the +[Plugin discovery](plugin_api.md#plugin-discovery) section. + +**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication +respectively. + +## Default user authorization mechanism + +If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name. +That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`. + +## Basic architecture + +You are responsible for registering your plugin as part of the Docker daemon +startup. You can install multiple plugins and chain them together. This chain +can be ordered. Each request to the daemon passes in order through the chain. +Only when all the plugins grant access to the resource, is the access granted. + +When an HTTP request is made to the Docker daemon through the CLI or via the +Engine API, the authentication subsystem passes the request to the installed +authentication plugin(s). The request contains the user (caller) and command +context. The plugin is responsible for deciding whether to allow or deny the +request. + +The sequence diagrams below depict an allow and deny authorization flow: + +![Authorization Allow flow](images/authz_allow.png) + +![Authorization Deny flow](images/authz_deny.png) + +Each request sent to the plugin includes the authenticated user, the HTTP +headers, and the request/response body. Only the user name and the +authentication method used are passed to the plugin. Most importantly, no user +credentials or tokens are passed. Finally, not all request/response bodies +are sent to the authorization plugin. Only those request/response bodies where +the `Content-Type` is either `text/*` or `application/json` are sent. + +For commands that can potentially hijack the HTTP connection (`HTTP +Upgrade`), such as `exec`, the authorization plugin is only called for the +initial HTTP requests. Once the plugin approves the command, authorization is +not applied to the rest of the flow. Specifically, the streaming data is not +passed to the authorization plugins. For commands that return chunked HTTP +response, such as `logs` and `events`, only the HTTP request is sent to the +authorization plugins. + +During request/response processing, some authorization flows might +need to do additional queries to the Docker daemon. To complete such flows, +plugins can call the daemon API similar to a regular user. To enable these +additional queries, the plugin must provide the means for an administrator to +configure proper authentication and security policies. + +## Docker client flows + +To enable and configure the authorization plugin, the plugin developer must +support the Docker client interactions detailed in this section. + +### Setting up Docker daemon + +Enable the authorization plugin with a dedicated command line flag in the +`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` +value. This value can be the plugin’s socket or a path to a specification file. +Authorization plugins can be loaded without restarting the daemon. Refer +to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information. + +```bash +$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. + +### Calling authorized command (allow) + +```bash +$ docker pull centos +... +f1b10cd84249: Pull complete +... +``` + +### Calling unauthorized command (deny) + +```bash +$ docker pull centos +... +docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. +``` + +### Error from plugins + +```bash +$ docker pull centos +... +docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. +``` + +## API schema and implementation + +In addition to Docker's standard plugin registration method, each plugin +should implement the following two methods: + +* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. + +* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. + +#### /AuthZPlugin.AuthZReq + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string " +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` +#### /AuthZPlugin.AuthZRes + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", + "ResponseBody": "Byte array containing the raw HTTP response body", + "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", + "ResponseStatusCode":"Response status code" +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` + +### Request authorization + +Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + +Name | Type | Description +-----------------------|-------------------|------------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | enum | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the request is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) + +### Response authorization + +The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + + +Name | Type | Description +----------------------- |------------------ |---------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | string | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body +Response status code | int | Status code from the docker daemon +Response headers | map[string]string | Response headers as key value pairs +Response body | []byte | Raw docker daemon response body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the response is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) diff --git a/vendor/github.com/moby/moby/docs/extend/plugins_graphdriver.md b/vendor/github.com/moby/moby/docs/extend/plugins_graphdriver.md new file mode 100644 index 0000000..7ec2e47 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/plugins_graphdriver.md @@ -0,0 +1,380 @@ +--- +title: "Graphdriver plugins" +description: "How to manage image and container filesystems with external plugins" +keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api" +advisory: experimental +--- + + + + +## Changelog + +### 1.13.0 + +- Support v2 plugins + +# Docker graph driver plugins + +Docker graph driver plugins enable admins to use an external/out-of-process +graph driver for use with Docker engine. This is an alternative to using the +built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. + +You need to install and enable the plugin and then restart the Docker daemon +before using the plugin. See the following example for the correct ordering +of steps. + +``` +$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver + +$ pkill dockerd +$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin +``` + +# Write a graph driver plugin + +See the [plugin documentation](/docs/extend/index.md) for detailed information +on the underlying plugin protocol. + + +## Graph Driver plugin protocol + +If a plugin registers itself as a `GraphDriver` when activated, then it is +expected to provide the rootfs for containers as well as image layer storage. + +### /GraphDriver.Init + +**Request**: +```json +{ + "Home": "/graph/home/path", + "Opts": [], + "UIDMaps": [], + "GIDMaps": [] +} +``` + +Initialize the graph driver plugin with a home directory and array of options. +These are passed through from the user, but the plugin is not required to parse +or honor them. + +The request also includes a list of UID and GID mappings, structed as follows: +```json +{ + "ContainerID": 0, + "HostID": 0, + "Size": 0 +} +``` + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Create + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Create a new, empty, read-only filesystem layer with the specified +`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no +parent layer. `StorageOpt` is map of strings which indicate storage options. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.CreateReadWrite + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. + +### /GraphDriver.Remove + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Remove the filesystem layer with this given `ID`. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Get + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "MountLabel": "" +} +``` + +Get the mountpoint for the layered filesystem referred to by the given `ID`. + +**Response**: +```json +{ + "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Err": "" +} +``` + +Respond with the absolute path to the mounted layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Put + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Release the system resources for the specified `ID`, such as unmounting the +filesystem layer. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Exists + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Determine if a filesystem layer with the specified `ID` exists. + +**Response**: +```json +{ + "Exists": true +} +``` + +Respond with a boolean for whether or not the filesystem layer with the specified +`ID` exists. + +### /GraphDriver.Status + +**Request**: +```json +{} +``` + +Get low-level diagnostic information about the graph driver. + +**Response**: +```json +{ + "Status": [[]] +} +``` + +Respond with a 2-D array with key/value pairs for the underlying status +information. + + +### /GraphDriver.GetMetadata + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Get low-level diagnostic information about the layered filesystem with the +with the specified `ID` + +**Response**: +```json +{ + "Metadata": {}, + "Err": "" +} +``` + +Respond with a set of key/value pairs containing the low-level diagnostic +information about the layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Cleanup + +**Request**: +```json +{} +``` + +Perform necessary tasks to release resources help by the plugin, such as +unmounting all the layered file systems. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Diff + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get an archive of the changes between the filesystem layers specified by the `ID` +and `Parent`. `Parent` may be an empty string, in which case there is no parent. + +**Response**: +``` +{% raw %} +{{ TAR STREAM }} +{% endraw %} +``` + +### /GraphDriver.Changes + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get a list of changes between the filesystem layers specified by the `ID` and +`Parent`. If `Parent` is an empty string, there is no parent. + +**Response**: +```json +{ + "Changes": [{}], + "Err": "" +} +``` + +Respond with a list of changes. The structure of a change is: +```json + "Path": "/some/path", + "Kind": 0, +``` + +Where the `Path` is the filesystem path within the layered filesystem that is +changed and `Kind` is an integer specifying the type of change that occurred: + +- 0 - Modified +- 1 - Added +- 2 - Deleted + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.ApplyDiff + +**Request**: +``` +{% raw %} +{{ TAR STREAM }} +{% endraw %} +``` + +Extract the changeset from the given diff into the layer with the specified `ID` +and `Parent` + +**Query Parameters**: + +- id (required)- the `ID` of the new filesystem layer to extract the diff to +- parent (required)- the `Parent` of the given `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size of the new layer in bytes. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.DiffSize + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Calculate the changes between the specified `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size changes between the specified `ID` and `Parent` +Respond with a non-empty string error if an error occurred. diff --git a/vendor/github.com/moby/moby/docs/extend/plugins_network.md b/vendor/github.com/moby/moby/docs/extend/plugins_network.md new file mode 100644 index 0000000..a974862 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/plugins_network.md @@ -0,0 +1,77 @@ +--- +title: "Docker network driver plugins" +description: "Network driver plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +# Engine network driver plugins + +This document describes Docker Engine network driver plugins generally +available in Docker Engine. To view information on plugins +managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +Docker Engine network plugins enable Engine deployments to be extended to +support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN +or something completely different. Network driver plugins are supported via the +LibNetwork project. Each plugin is implemented as a "remote driver" for +LibNetwork, which shares plugin infrastructure with Engine. Effectively, network +driver plugins are activated in the same way as other plugins, and use the same +kind of protocol. + +## Network driver plugins and swarm mode + +Docker 1.12 adds support for cluster management and orchestration called +[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently +only supports the built-in overlay driver for networking. Therefore existing +networking plugins will not work in swarm mode. + +When you run Docker Engine outside of swarm mode, all networking plugins that +worked in Docker 1.11 will continue to function normally. They do not require +any modification. + +## Using network driver plugins + +The means of installing and running a network driver plugin depend on the +particular plugin. So, be sure to install your plugin according to the +instructions obtained from the plugin developer. + +Once running however, network driver plugins are used just like the built-in +network drivers: by being mentioned as a driver in network-oriented Docker +commands. For example, + + $ docker network create --driver weave mynet + +Some network driver plugins are listed in [plugins](legacy_plugins.md) + +The `mynet` network is now owned by `weave`, so subsequent commands +referring to that network will be sent to the plugin, + + $ docker run --network=mynet busybox top + + +## Write a network plugin + +Network plugins implement the [Docker plugin +API](plugin_api.md) and the network plugin protocol + +## Network plugin protocol + +The network driver protocol, in addition to the plugin activation call, is +documented as part of libnetwork: +[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). + +# Related Information + +To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. + +- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) +- The [LibNetwork](https://github.com/docker/libnetwork) project diff --git a/vendor/github.com/moby/moby/docs/extend/plugins_volume.md b/vendor/github.com/moby/moby/docs/extend/plugins_volume.md new file mode 100644 index 0000000..807ab5a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/extend/plugins_volume.md @@ -0,0 +1,360 @@ +--- +title: "Volume plugins" +description: "How to manage data with external volume plugins" +keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" +--- + + + +# Write a volume plugin + +Docker Engine volume plugins enable Engine deployments to be integrated with +external storage systems such as Amazon EBS, and enable data volumes to persist +beyond the lifetime of a single Docker host. See the +[plugin documentation](legacy_plugins.md) for more information. + +## Changelog + +### 1.13.0 + +- If used as part of the v2 plugin architecture, mountpoints that are part of + paths returned by the plugin must be mounted under the directory specified by + `PropagatedMount` in the plugin configuration + ([#26398](https://github.com/docker/docker/pull/26398)) + +### 1.12.0 + +- Add `Status` field to `VolumeDriver.Get` response + ([#21006](https://github.com/docker/docker/pull/21006#)) +- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver + ([#22077](https://github.com/docker/docker/pull/22077)) + +### 1.10.0 + +- Add `VolumeDriver.Get` which gets the details about the volume + ([#16534](https://github.com/docker/docker/pull/16534)) +- Add `VolumeDriver.List` which lists all volumes owned by the driver + ([#16534](https://github.com/docker/docker/pull/16534)) + +### 1.8.0 + +- Initial support for volume driver plugins + ([#14659](https://github.com/docker/docker/pull/14659)) + +## Command-line changes + +To give a container access to a volume, use the `--volume` and `--volume-driver` +flags on the `docker container run` command. The `--volume` (or `-v`) flag +accepts a volume name and path on the host, and the `--volume-driver` flag +accepts a driver type. + +```bash +$ docker volume create --driver=flocker volumename + +$ docker container run -it --volume volumename:/data busybox sh +``` + +### `--volume` + +The `--volume` (or `-v`) flag takes a value that is in the format +`:`. The two parts of the value are +separated by a colon (`:`) character. + +- The volume name is a human-readable name for the volume, and cannot begin with + a `/` character. It is referred to as `volume_name` in the rest of this topic. +- The `Mountpoint` is the path on the host (v1) or in the plugin (v2) where the + volume has been made available. + +### `volumedriver` + +Specifying a `volumedriver` in conjunction with a `volumename` allows you to +use plugins such as [Flocker](https://github.com/ScatterHQ/flocker) to manage +volumes external to a single host, such as those on EBS. + +## Create a VolumeDriver + +The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` +field of type `string` allowing to specify the name of the driver. If not +specified, it defaults to `"local"` (the default driver for local volumes). + +## Volume plugin protocol + +If a plugin registers itself as a `VolumeDriver` when activated, it must +provide the Docker Daemon with writeable paths on the host filesystem. The Docker +daemon provides these paths to containers to consume. The Docker daemon makes +the volumes available by bind-mounting the provided paths into the containers. + +> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` +> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` +> directory is reserved for Docker. + +### `/VolumeDriver.Create` + +**Request**: +```json +{ + "Name": "volume_name", + "Opts": {} +} +``` + +Instruct the plugin that the user wants to create a volume, given a user +specified volume name. The plugin does not need to actually manifest the +volume on the filesystem yet (until `Mount` is called). +`Opts` is a map of driver specific options passed through from the user request. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### `/VolumeDriver.Remove` + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Delete the specified volume from disk. This request is issued when a user +invokes `docker rm -v` to remove volumes associated with a container. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### `/VolumeDriver.Mount` + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker requires the plugin to provide a volume, given a user specified volume +name. `Mount` is called once per container start. If the same `volume_name` is requested +more than once, the plugin may need to keep track of each new mount request and provision +at the first mount request and deprovision at the last corresponding unmount request. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: + +- **v1**: + + ```json + { + "Mountpoint": "/path/to/directory/on/host", + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Mountpoint": "/path/under/PropagatedMount", + "Err": "" + } + ``` + +`Mountpoint` is the path on the host (v1) or in the plugin (v2) where the volume +has been made available. + +`Err` is either empty or contains an error string. + +### `/VolumeDriver.Path` + +**Request**: + +```json +{ + "Name": "volume_name" +} +``` + +Request the path to the volume with the given `volume_name`. + +**Response**: + +- **v1**: + + ```json + { + "Mountpoin": "/path/to/directory/on/host", + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Mountpoint": "/path/under/PropagatedMount", + "Err": "" + } + ``` + +Respond with the path on the host (v1) or inside the plugin (v2) where the +volume has been made available, and/or a string error if an error occurred. + +`Mountpoint` is optional. However, the plugin may be queried again later if one +is not provided. + +### `/VolumeDriver.Unmount` + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker is no longer using the named volume. `Unmount` is called once per +container stop. Plugin may deduce that it is safe to deprovision the volume at +this point. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + + +### `/VolumeDriver.Get` + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Get info about `volume_name`. + + +**Response**: + +- **v1**: + + ```json + { + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host", + "Status": {} + }, + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/under/PropagatedMount", + "Status": {} + }, + "Err": "" + } + ``` + +Respond with a string error if an error occurred. `Mountpoint` and `Status` are +optional. + + +### /VolumeDriver.List + +**Request**: +```json +{} +``` + +Get the list of volumes registered with the plugin. + +**Response**: + +- **v1**: + + ```json + { + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host" + } + ], + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/under/PropagatedMount" + } + ], + "Err": "" + } + ``` + + +Respond with a string error if an error occurred. `Mountpoint` is optional. + +### /VolumeDriver.Capabilities + +**Request**: +```json +{} +``` + +Get the list of capabilities the driver supports. + +The driver is not required to implement `Capabilities`. If it is not +implemented, the default values are used. + +**Response**: +```json +{ + "Capabilities": { + "Scope": "global" + } +} +``` + +Supported scopes are `global` and `local`. Any other value in `Scope` will be +ignored, and `local` is used. `Scope` allows cluster managers to handle the +volume in different ways. For instance, a scope of `global`, signals to the +cluster manager that it only needs to create the volume once instead of on each +Docker host. More capabilities may be added in the future. diff --git a/vendor/github.com/moby/moby/docs/reference/builder.md b/vendor/github.com/moby/moby/docs/reference/builder.md new file mode 100644 index 0000000..814e7f7 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/builder.md @@ -0,0 +1,1763 @@ +--- +title: "Dockerfile reference" +description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." +keywords: "builder, docker, Dockerfile, automation, image creation" +redirect_from: +- /reference/builder/ +--- + + + +# Dockerfile reference + +Docker can build images automatically by reading the instructions from a +`Dockerfile`. A `Dockerfile` is a text document that contains all the commands a +user could call on the command line to assemble an image. Using `docker build` +users can create an automated build that executes several command-line +instructions in succession. + +This page describes the commands you can use in a `Dockerfile`. When you are +done reading this page, refer to the [`Dockerfile` Best +Practices](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for a tip-oriented guide. + +## Usage + +The [`docker build`](commandline/build.md) command builds an image from +a `Dockerfile` and a *context*. The build's context is the files at a specified +location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. +The `URL` is a Git repository location. + +A context is processed recursively. So, a `PATH` includes any subdirectories and +the `URL` includes the repository and its submodules. A simple build command +that uses the current directory as context: + + $ docker build . + Sending build context to Docker daemon 6.51 MB + ... + +The build is run by the Docker daemon, not by the CLI. The first thing a build +process does is send the entire context (recursively) to the daemon. In most +cases, it's best to start with an empty directory as context and keep your +Dockerfile in that directory. Add only the files needed for building the +Dockerfile. + +>**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes +>the build to transfer the entire contents of your hard drive to the Docker +>daemon. + +To use a file in the build context, the `Dockerfile` refers to the file specified +in an instruction, for example, a `COPY` instruction. To increase the build's +performance, exclude files and directories by adding a `.dockerignore` file to +the context directory. For information about how to [create a `.dockerignore` +file](#dockerignore-file) see the documentation on this page. + +Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root +of the context. You use the `-f` flag with `docker build` to point to a Dockerfile +anywhere in your file system. + + $ docker build -f /path/to/a/Dockerfile . + +You can specify a repository and tag at which to save the new image if +the build succeeds: + + $ docker build -t shykes/myapp . + +To tag the image into multiple repositories after the build, +add multiple `-t` parameters when you run the `build` command: + + $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . + +Before the Docker daemon runs the instructions in the `Dockerfile`, it performs +a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect: + + $ docker build -t test/myapp . + Sending build context to Docker daemon 2.048 kB + Error response from daemon: Unknown instruction: RUNCMD + +The Docker daemon runs the instructions in the `Dockerfile` one-by-one, +committing the result of each instruction +to a new image if necessary, before finally outputting the ID of your +new image. The Docker daemon will automatically clean up the context you +sent. + +Note that each instruction is run independently, and causes a new image +to be created - so `RUN cd /tmp` will not have any effect on the next +instructions. + +Whenever possible, Docker will re-use the intermediate images (cache), +to accelerate the `docker build` process significantly. This is indicated by +the `Using cache` message in the console output. +(For more information, see the [Build cache section](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#build-cache) in the +`Dockerfile` best practices guide): + + $ docker build -t svendowideit/ambassador . + Sending build context to Docker daemon 15.36 kB + Step 1/4 : FROM alpine:3.2 + ---> 31f630c65071 + Step 2/4 : MAINTAINER SvenDowideit@home.org.au + ---> Using cache + ---> 2a1c91448f5f + Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/ + ---> Using cache + ---> 21ed6e7fbb73 + Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh + ---> Using cache + ---> 7ea8aef582cc + Successfully built 7ea8aef582cc + +Build cache is only used from images that have a local parent chain. This means +that these images were created by previous builds or the whole chain of images +was loaded with `docker load`. If you wish to use build cache of a specific +image you can specify it with `--cache-from` option. Images specified with +`--cache-from` do not need to have a parent chain and may be pulled from other +registries. + +When you're done with your build, you're ready to look into [*Pushing a +repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Format + +Here is the format of the `Dockerfile`: + +```Dockerfile +# Comment +INSTRUCTION arguments +``` + +The instruction is not case-sensitive. However, convention is for them to +be UPPERCASE to distinguish them from arguments more easily. + + +Docker runs instructions in a `Dockerfile` in order. **The first +instruction must be \`FROM\`** in order to specify the [*Base +Image*](glossary.md#base-image) from which you are building. + +Docker treats lines that *begin* with `#` as a comment, unless the line is +a valid [parser directive](#parser-directives). A `#` marker anywhere +else in a line is treated as an argument. This allows statements like: + +```Dockerfile +# Comment +RUN echo 'we are running some # of cool things' +``` + +Line continuation characters are not supported in comments. + +## Parser directives + +Parser directives are optional, and affect the way in which subsequent lines +in a `Dockerfile` are handled. Parser directives do not add layers to the build, +and will not be shown as a build step. Parser directives are written as a +special type of comment in the form `# directive=value`. A single directive +may only be used once. + +Once a comment, empty line or builder instruction has been processed, Docker +no longer looks for parser directives. Instead it treats anything formatted +as a parser directive as a comment and does not attempt to validate if it might +be a parser directive. Therefore, all parser directives must be at the very +top of a `Dockerfile`. + +Parser directives are not case-sensitive. However, convention is for them to +be lowercase. Convention is also to include a blank line following any +parser directives. Line continuation characters are not supported in parser +directives. + +Due to these rules, the following examples are all invalid: + +Invalid due to line continuation: + +```Dockerfile +# direc \ +tive=value +``` + +Invalid due to appearing twice: + +```Dockerfile +# directive=value1 +# directive=value2 + +FROM ImageName +``` + +Treated as a comment due to appearing after a builder instruction: + +```Dockerfile +FROM ImageName +# directive=value +``` + +Treated as a comment due to appearing after a comment which is not a parser +directive: + +```Dockerfile +# About my dockerfile +FROM ImageName +# directive=value +``` + +The unknown directive is treated as a comment due to not being recognized. In +addition, the known directive is treated as a comment due to appearing after +a comment which is not a parser directive. + +```Dockerfile +# unknowndirective=value +# knowndirective=value +``` + +Non line-breaking whitespace is permitted in a parser directive. Hence, the +following lines are all treated identically: + +```Dockerfile +#directive=value +# directive =value +# directive= value +# directive = value +# dIrEcTiVe=value +``` + +The following parser directive is supported: + +* `escape` + +## escape + + # escape=\ (backslash) + +Or + + # escape=` (backtick) + +The `escape` directive sets the character used to escape characters in a +`Dockerfile`. If not specified, the default escape character is `\`. + +The escape character is used both to escape characters in a line, and to +escape a newline. This allows a `Dockerfile` instruction to +span multiple lines. Note that regardless of whether the `escape` parser +directive is included in a `Dockerfile`, *escaping is not performed in +a `RUN` command, except at the end of a line.* + +Setting the escape character to `` ` `` is especially useful on +`Windows`, where `\` is the directory path separator. `` ` `` is consistent +with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). + +Consider the following example which would fail in a non-obvious way on +`Windows`. The second `\` at the end of the second line would be interpreted as an +escape for the newline, instead of a target of the escape from the first `\`. +Similarly, the `\` at the end of the third line would, assuming it was actually +handled as an instruction, cause it be treated as a line continuation. The result +of this dockerfile is that second and third lines are considered a single +instruction: + +```Dockerfile +FROM microsoft/nanoserver +COPY testfile.txt c:\\ +RUN dir c:\ +``` + +Results in: + + PS C:\John> docker build -t cmd . + Sending build context to Docker daemon 3.072 kB + Step 1/2 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/2 : COPY testfile.txt c:\RUN dir c: + GetFileAttributesEx c:RUN: The system cannot find the file specified. + PS C:\John> + +One solution to the above would be to use `/` as the target of both the `COPY` +instruction, and `dir`. However, this syntax is, at best, confusing as it is not +natural for paths on `Windows`, and at worst, error prone as not all commands on +`Windows` support `/` as the path separator. + +By adding the `escape` parser directive, the following `Dockerfile` succeeds as +expected with the use of natural platform semantics for file paths on `Windows`: + + # escape=` + + FROM microsoft/nanoserver + COPY testfile.txt c:\ + RUN dir c:\ + +Results in: + + PS C:\John> docker build -t succeeds --no-cache=true . + Sending build context to Docker daemon 3.072 kB + Step 1/3 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/3 : COPY testfile.txt c:\ + ---> 96655de338de + Removing intermediate container 4db9acbb1682 + Step 3/3 : RUN dir c:\ + ---> Running in a2c157f842f5 + Volume in drive C has no label. + Volume Serial Number is 7E6D-E0F7 + + Directory of c:\ + + 10/05/2016 05:04 PM 1,894 License.txt + 10/05/2016 02:22 PM

Program Files + 10/05/2016 02:14 PM Program Files (x86) + 10/28/2016 11:18 AM 62 testfile.txt + 10/28/2016 11:20 AM Users + 10/28/2016 11:20 AM Windows + 2 File(s) 1,956 bytes + 4 Dir(s) 21,259,096,064 bytes free + ---> 01c7f3bef04f + Removing intermediate container a2c157f842f5 + Successfully built 01c7f3bef04f + PS C:\John> + +## Environment replacement + +Environment variables (declared with [the `ENV` statement](#env)) can also be +used in certain instructions as variables to be interpreted by the +`Dockerfile`. Escapes are also handled for including variable-like syntax +into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +The `${variable_name}` syntax also supports a few of the standard `bash` +modifiers as specified below: + +* `${variable:-word}` indicates that if `variable` is set then the result + will be that value. If `variable` is not set then `word` will be the result. +* `${variable:+word}` indicates that if `variable` is set then `word` will be + the result, otherwise the result is the empty string. + +In all cases, `word` can be any string, including additional environment +variables. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +Environment variables are supported by the following list of instructions in +the `Dockerfile`: + +* `ADD` +* `COPY` +* `ENV` +* `EXPOSE` +* `LABEL` +* `USER` +* `WORKDIR` +* `VOLUME` +* `STOPSIGNAL` + +as well as: + +* `ONBUILD` (when combined with one of the supported instructions above) + +> **Note**: +> prior to 1.4, `ONBUILD` instructions did **NOT** support environment +> variable, even when combined with any of the instructions listed above. + +Environment variable substitution will use the same value for each variable +throughout the entire command. In other words, in this example: + + ENV abc=hello + ENV abc=bye def=$abc + ENV ghi=$abc + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same command +that set `abc` to `bye`. + +## .dockerignore file + +Before the docker CLI sends the context to the docker daemon, it looks +for a file named `.dockerignore` in the root directory of the context. +If this file exists, the CLI modifies the context to exclude files and +directories that match patterns in it. This helps to avoid +unnecessarily sending large or sensitive files and directories to the +daemon and potentially adding them to images using `ADD` or `COPY`. + +The CLI interprets the `.dockerignore` file as a newline-separated +list of patterns similar to the file globs of Unix shells. For the +purposes of matching, the root of the context is considered to be both +the working and the root directory. For example, the patterns +`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` +in the `foo` subdirectory of `PATH` or in the root of the git +repository located at `URL`. Neither excludes anything else. + +If a line in `.dockerignore` file starts with `#` in column 1, then this line is +considered as a comment and is ignored before interpreted by the CLI. + +Here is an example `.dockerignore` file: + +``` +# comment + */temp* + */*/temp* + temp? +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `# comment` | Ignored. | +| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | +| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | +| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. + + +Matching is done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A +preprocessing step removes leading and trailing whitespace and +eliminates `.` and `..` elements using Go's +[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines +that are blank after preprocessing are ignored. + +Beyond Go's filepath.Match rules, Docker also supports a special +wildcard string `**` that matches any number of directories (including +zero). For example, `**/*.go` will exclude all files that end with `.go` +that are found in all directories, including the root of the build context. + +Lines starting with `!` (exclamation mark) can be used to make exceptions +to exclusions. The following is an example `.dockerignore` file that +uses this mechanism: + +``` + *.md + !README.md +``` + +All markdown files *except* `README.md` are excluded from the context. + +The placement of `!` exception rules influences the behavior: the last +line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. Consider the following example: + +``` + *.md + !README*.md + README-secret.md +``` + +No markdown files are included in the context except README files other than +`README-secret.md`. + +Now consider this example: + +``` + *.md + README-secret.md + !README*.md +``` + +All of the README files are included. The middle line has no effect because +`!README*.md` matches `README-secret.md` and comes last. + +You can even use the `.dockerignore` file to exclude the `Dockerfile` +and `.dockerignore` files. These files are still sent to the daemon +because it needs them to do its job. But the `ADD` and `COPY` commands +do not copy them to the image. + +Finally, you may want to specify which files to include in the +context, rather than which to exclude. To achieve this, specify `*` as +the first pattern, followed by one or more `!` exception patterns. + +**Note**: For historical reasons, the pattern `.` is ignored. + +## FROM + + FROM + +Or + + FROM : + +Or + + FROM @ + +The `FROM` instruction sets the [*Base Image*](glossary.md#base-image) +for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as +its first instruction. The image can be any valid image – it is especially easy +to start by **pulling an image** from the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/). + +- `FROM` must be the first non-comment instruction in the `Dockerfile`. + +- `FROM` can appear multiple times within a single `Dockerfile` in order to create +multiple images. Simply make a note of the last image ID output by the commit +before each new `FROM` command. + +- The `tag` or `digest` values are optional. If you omit either of them, the builder +assumes a `latest` by default. The builder returns an error if it cannot match +the `tag` value. + +## RUN + +RUN has 2 forms: + +- `RUN ` (*shell* form, the command is run in a shell, which by +default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) +- `RUN ["executable", "param1", "param2"]` (*exec* form) + +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the `Dockerfile`. + +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. + +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain the specified shell executable. + +The default shell for the *shell* form can be changed using the `SHELL` +command. + +In the *shell* form you can use a `\` (backslash) to continue a single +RUN instruction onto the next line. For example, consider these two lines: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; \ +echo $HOME' +``` +Together they are equivalent to this single line: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' +``` + +> **Note**: +> To use a different shell, other than '/bin/sh', use the *exec* form +> passing in the desired shell. For example, +> `RUN ["/bin/bash", "-c", "echo hello"]` + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. +> +> **Note**: +> In the *JSON* form, it is necessary to escape backslashes. This is +> particularly relevant on Windows where the backslash is the path separator. +> The following line would otherwise be treated as *shell* form due to not +> being valid JSON, and fail in an unexpected way: +> `RUN ["c:\windows\system32\tasklist.exe"]` +> The correct syntax for this example is: +> `RUN ["c:\\windows\\system32\\tasklist.exe"]` + +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like +`RUN apt-get dist-upgrade -y` will be reused during the next build. The +cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + +The cache for `RUN` instructions can be invalidated by `ADD` instructions. See +[below](#add) for details. + +### Known issues (RUN) + +- [Issue 783](https://github.com/docker/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. + + For systems that have recent aufs version (i.e., `dirperm1` mount option can + be set), docker will attempt to fix the issue automatically by mounting + the layers with `dirperm1` option. More details on `dirperm1` option can be + found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) + + If your system doesn't have support for `dirperm1`, the issue describes a workaround. + +## CMD + +The `CMD` instruction has three forms: + +- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (*shell* form) + +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. + +**The main purpose of a `CMD` is to provide defaults for an executing +container.** These defaults can include an executable, or they can omit +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +> **Note**: +> If `CMD` is used to provide default arguments for the `ENTRYPOINT` +> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified +> with the JSON array format. + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. + +If you use the *shell* form of the `CMD`, then the `` will execute in +`/bin/sh -c`: + + FROM ubuntu + CMD echo "This is a test." | wc - + +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of `CMD`.** Any additional parameters +must be individually expressed as strings in the array: + + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See +[*ENTRYPOINT*](#entrypoint). + +If the user specifies arguments to `docker run` then they will override the +default specified in `CMD`. + +> **Note**: +> Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> the result; `CMD` does not execute anything at build time, but specifies +> the intended command for the image. + +## LABEL + + LABEL = = = ... + +The `LABEL` instruction adds metadata to an image. A `LABEL` is a +key-value pair. To include spaces within a `LABEL` value, use quotes and +backslashes as you would in command-line parsing. A few usage examples: + + LABEL "com.example.vendor"="ACME Incorporated" + LABEL com.example.label-with-value="foo" + LABEL version="1.0" + LABEL description="This text illustrates \ + that label-values can span multiple lines." + +An image can have more than one label. To specify multiple labels, +Docker recommends combining labels into a single `LABEL` instruction where +possible. Each `LABEL` instruction produces a new layer which can result in an +inefficient image if you use many labels. This example results in a single image +layer. + + LABEL multi.label1="value1" multi.label2="value2" other="value3" + +The above can also be written as: + + LABEL multi.label1="value1" \ + multi.label2="value2" \ + other="value3" + +Labels are additive including `LABEL`s in `FROM` images. If Docker +encounters a label/key that already exists, the new value overrides any previous +labels with identical keys. + +To view an image's labels, use the `docker inspect` command. + + "Labels": { + "com.example.vendor": "ACME Incorporated" + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" + }, + +## MAINTAINER (deprecated) + + MAINTAINER + +The `MAINTAINER` instruction sets the *Author* field of the generated images. +The `LABEL` instruction is a much more flexible version of this and you should use +it instead, as it enables setting any metadata you require, and can be viewed +easily, for example with `docker inspect`. To set a label corresponding to the +`MAINTAINER` field you could use: + + LABEL maintainer "SvenDowideit@home.org.au" + +This will then be visible from `docker inspect` with the other labels. + +## EXPOSE + + EXPOSE [...] + +The `EXPOSE` instruction informs Docker that the container listens on the +specified network ports at runtime. `EXPOSE` does not make the ports of the +container accessible to the host. To do that, you must use either the `-p` flag +to publish a range of ports or the `-P` flag to publish all of the exposed +ports. You can expose one port number and publish it externally under another +number. + +To set up port redirection on the host system, see [using the -P +flag](run.md#expose-incoming-ports). The Docker network feature supports +creating networks without the need to expose ports within the network, for +detailed information see the [overview of this +feature](https://docs.docker.com/engine/userguide/networking/)). + +## ENV + + ENV + ENV = ... + +The `ENV` instruction sets the environment variable `` to the value +``. This value will be in the environment of all "descendant" +`Dockerfile` commands and can be [replaced inline](#environment-replacement) in +many as well. + +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final image, but the first form +is preferred because it produces a single cache layer. + +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. + +> **Note**: +> Environment persistence can cause unexpected side effects. For example, +> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get +> users on a Debian-based image. To set a value for a single command, use +> `RUN = `. + +## ADD + +ADD has two forms: + +- `ADD ... ` +- `ADD ["",... ""]` (this form is required for paths containing +whitespace) + +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the image at the path ``. + +Multiple `` resource may be specified but if they are files or +directories then they must be relative to the source directory that is +being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + ADD hom* /mydir/ # adds all files starting with "hom" + ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + ADD test /absoluteDir/ # adds "test" to /absoluteDir/ + +All new files and directories are created with a UID and GID of 0. + +In the case where `` is a remote file URL, the destination will +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. However, like any other file +processed during an `ADD`, `mtime` will not be included in the determination +of whether or not the file has changed and the cache should be updated. + +> **Note**: +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will be used as the context of the build. + +> **Note**: +> If your URL files are protected using authentication, you +> will need to use `RUN wget`, `RUN curl` or use another tool from +> within the container as the `ADD` instruction does not support +> authentication. + +> **Note**: +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + + +`ADD` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. + +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`, the result is the union of: + + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. + + > **Note**: + > Whether a file is identified as a recognized compression format or not + > is done solely based on the contents of the file, not the name of the file. + > For example, if an empty file happens to end with `.tar.gz` this will not + > be recognized as a compressed file and **will not** generate any kind of + > decompression error message, rather the file will simply be copied to the + > destination. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## COPY + +COPY has two forms: + +- `COPY ... ` +- `COPY ["",... ""]` (this form is required for paths containing +whitespace) + +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + COPY hom* /mydir/ # adds all files starting with "hom" + COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + COPY test /absoluteDir/ # adds "test" to /absoluteDir/ + +All new files and directories are created with a UID and GID of 0. + +> **Note**: +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +`COPY` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## ENTRYPOINT + +ENTRYPOINT has two forms: + +- `ENTRYPOINT ["executable", "param1", "param2"]` + (*exec* form, preferred) +- `ENTRYPOINT command param1 param2` + (*shell* form) + +An `ENTRYPOINT` allows you to configure a container that will run as an executable. + +For example, the following will start nginx with its default content, listening +on port 80: + + docker run -i -t --rm -p 80:80 nginx + +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. + +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. + + FROM ubuntu + ENTRYPOINT ["top", "-b"] + CMD ["-c"] + +When you run the container, you can see that `top` is the only process: + + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +commands: + +```bash +#!/bin/bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can override the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s + +### Understand how CMD and ENTRYPOINT interact + +Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. +There are few rules that describe their co-operation. + +1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. + +2. `ENTRYPOINT` should be defined when using the container as an executable. + +3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command +or for executing an ad-hoc command in a container. + +4. `CMD` will be overridden when running the container with alternative arguments. + +The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: + +| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | +|--------------------------------|----------------------------|--------------------------------|------------------------------------------------| +| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | +| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | +| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd | +| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | + +## VOLUME + + VOLUME ["/data"] + +The `VOLUME` instruction creates a mount point with the specified name +and marks it as holding externally mounted volumes from native host or other +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log +/var/db`. For more information/examples and mounting instructions via the +Docker client, refer to +[*Share Directories via Volumes*](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume) +documentation. + +The `docker run` command initializes the newly created volume with any data +that exists at the specified location within the base image. For example, +consider the following Dockerfile snippet: + + FROM ubuntu + RUN mkdir /myvol + RUN echo "hello world" > /myvol/greeting + VOLUME /myvol + +This Dockerfile results in an image that causes `docker run`, to +create a new mount point at `/myvol` and copy the `greeting` file +into the newly created volume. + +### Notes about specifying volumes + +Keep the following things in mind about volumes in the `Dockerfile`. + +- **Volumes on Windows-based containers**: When using Windows-based containers, + the destination of a volume inside the container must be one of: + + - a non-existing or empty directory + - a drive other than `C:` + +- **Changing the volume from within the Dockerfile**: If any build steps change the + data within the volume after it has been declared, those changes will be discarded. + +- **JSON formatting**: The list is parsed as a JSON array. + You must enclose words with double quotes (`"`)rather than single quotes (`'`). + +- **The host directory is declared at container run-time**: The host directory + (the mountpoint) is, by its nature, host-dependent. This is to preserve image + portability. since a given host directory can't be guaranteed to be available + on all hosts.For this reason, you can't mount a host directory from + within the Dockerfile. The `VOLUME` instruction does not support specifying a `host-dir` + parameter. You must specify the mountpoint when you create or run the container. + +## USER + + USER daemon + +The `USER` instruction sets the user name or UID to use when running the image +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. + +## WORKDIR + + WORKDIR /path/to/workdir + +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. +If the `WORKDIR` doesn't exist, it will be created even if it's not used in any +subsequent `Dockerfile` instruction. + +It can be used multiple times in the one `Dockerfile`. If a relative path +is provided, it will be relative to the path of the previous `WORKDIR` +instruction. For example: + + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/a/b/c`. + +The `WORKDIR` instruction can resolve environment variables previously set using +`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. +For example: + + ENV DIRPATH /path + WORKDIR $DIRPATH/$DIRNAME + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/path/$DIRNAME` + +## ARG + + ARG [=] + +The `ARG` instruction defines a variable that users can pass at build-time to +the builder with the `docker build` command using the `--build-arg =` +flag. If a user specifies a build argument that was not +defined in the Dockerfile, the build outputs a warning. + +``` +[Warning] One or more build-args [foo] were not consumed. +``` + +The Dockerfile author can define a single variable by specifying `ARG` once or many +variables by specifying `ARG` more than once. For example, a valid Dockerfile: + +``` +FROM busybox +ARG user1 +ARG buildno +... +``` + +A Dockerfile author may optionally specify a default value for an `ARG` instruction: + +``` +FROM busybox +ARG user1=someuser +ARG buildno=1 +... +``` + +If an `ARG` value has a default and if there is no value passed at build-time, the +builder uses the default. + +An `ARG` variable definition comes into effect from the line on which it is +defined in the `Dockerfile` not from the argument's use on the command-line or +elsewhere. For example, consider this Dockerfile: + +``` +1 FROM busybox +2 USER ${user:-some_user} +3 ARG user +4 USER $user +... +``` +A user builds this file by calling: + +``` +$ docker build --build-arg user=what_user Dockerfile +``` + +The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the +subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is +defined and the `what_user` value was passed on the command line. Prior to its definition by an +`ARG` instruction, any use of a variable results in an empty string. + +> **Warning:** It is not recommended to use build-time variables for +> passing secrets like github keys, user credentials etc. Build-time variable +> values are visible to any user of the image with the `docker history` command. + +You can use an `ARG` or an `ENV` instruction to specify variables that are +available to the `RUN` instruction. Environment variables defined using the +`ENV` instruction always override an `ARG` instruction of the same name. Consider +this Dockerfile with an `ENV` and `ARG` instruction. + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER v1.0.0 +4 RUN echo $CONT_IMG_VER +``` +Then, assume this image is built with this command: + +``` +$ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile +``` + +In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting +passed by the user:`v2.0.1` This behavior is similar to a shell +script where a locally scoped variable overrides the variables passed as +arguments or inherited from environment, from its point of definition. + +Using the example above but a different `ENV` specification you can create more +useful interactions between `ARG` and `ENV` instructions: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} +4 RUN echo $CONT_IMG_VER +``` + +Unlike an `ARG` instruction, `ENV` values are always persisted in the built +image. Consider a docker build without the `--build-arg` flag: + +``` +$ docker build Dockerfile +``` + +Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but +its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + +The variable expansion technique in this example allows you to pass arguments +from the command line and persist them in the final image by leveraging the +`ENV` instruction. Variable expansion is only supported for [a limited set of +Dockerfile instructions.](#environment-replacement) + +Docker has a set of predefined `ARG` variables that you can use without a +corresponding `ARG` instruction in the Dockerfile. + +* `HTTP_PROXY` +* `http_proxy` +* `HTTPS_PROXY` +* `https_proxy` +* `FTP_PROXY` +* `ftp_proxy` +* `NO_PROXY` +* `no_proxy` + +To use these, simply pass them on the command line using the flag: + +``` +--build-arg = +``` + +### Impact on build caching + +`ARG` variables are not persisted into the built image as `ENV` variables are. +However, `ARG` variables do impact the build cache in similar ways. If a +Dockerfile defines an `ARG` variable whose value is different from a previous +build, then a "cache miss" occurs upon its first usage, not its definition. In +particular, all `RUN` instructions following an `ARG` instruction use the `ARG` +variable implicitly (as an environment variable), thus can cause a cache miss. + +For example, consider these two Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo $CONT_IMG_VER +``` + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo hello +``` + +If you specify `--build-arg CONT_IMG_VER=` on the command line, in both +cases, the specification on line 2 does not cause a cache miss; line 3 does +cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified +as the same as running `CONT_IMG_VER=` echo hello, so if the `` +changes, we get a cache miss. + +Consider another example under the same command line: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER $CONT_IMG_VER +4 RUN echo $CONT_IMG_VER +``` +In this example, the cache miss occurs on line 3. The miss happens because +the variable's value in the `ENV` references the `ARG` variable and that +variable is changed through the command line. In this example, the `ENV` +command causes the image to include the value. + +If an `ENV` instruction overrides an `ARG` instruction of the same name, like +this Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER hello +4 RUN echo $CONT_IMG_VER +``` + +Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a +constant (`hello`). As a result, the environment variables and values used on +the `RUN` (line 4) doesn't change between builds. + +## ONBUILD + + ONBUILD [INSTRUCTION] + +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base +to build other images, for example an application build environment or a +daemon which may be customized with user-specific configuration. + +For example, if your image is a reusable Python application builder, it +will require application source code to be added in a particular +directory, and it might require a build script to be called *after* +that. You can't just call `ADD` and `RUN` now, because you don't yet +have access to the application source code, and it will be different for +each application build. You could simply provide application developers +with a boilerplate `Dockerfile` to copy-paste into their application, but +that is inefficient, error-prone and difficult to update because it +mixes with application-specific code. + +The solution is to use `ONBUILD` to register advance instructions to +run later, during the next build stage. + +Here's how it works: + +1. When it encounters an `ONBUILD` instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. +3. Later the image may be used as a base for a new build, using the + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. + +For example you might add something like this: + + [...] + ONBUILD ADD . /app/src + ONBUILD RUN /usr/local/bin/python-build --dir /app/src + [...] + +> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. + +> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. + +## STOPSIGNAL + + STOPSIGNAL signal + +The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +## HEALTHCHECK + +The `HEALTHCHECK` instruction has two forms: + +* `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) +* `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) + +The `HEALTHCHECK` instruction tells Docker how to test a container to check that +it is still working. This can detect cases such as a web server that is stuck in +an infinite loop and unable to handle new connections, even though the server +process is still running. + +When a container has a healthcheck specified, it has a _health status_ in +addition to its normal status. This status is initially `starting`. Whenever a +health check passes, it becomes `healthy` (whatever state it was previously in). +After a certain number of consecutive failures, it becomes `unhealthy`. + +The options that can appear before `CMD` are: + +* `--interval=DURATION` (default: `30s`) +* `--timeout=DURATION` (default: `30s`) +* `--retries=N` (default: `3`) + +The health check will first run **interval** seconds after the container is +started, and then again **interval** seconds after each previous check completes. + +If a single run of the check takes longer than **timeout** seconds then the check +is considered to have failed. + +It takes **retries** consecutive failures of the health check for the container +to be considered `unhealthy`. + +There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list +more than one then only the last `HEALTHCHECK` will take effect. + +The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK +CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; +see e.g. `ENTRYPOINT` for details). + +The command's exit status indicates the health status of the container. +The possible values are: + +- 0: success - the container is healthy and ready for use +- 1: unhealthy - the container is not working correctly +- 2: reserved - do not use this exit code + +For example, to check every five minutes or so that a web-server is able to +serve the site's main page within three seconds: + + HEALTHCHECK --interval=5m --timeout=3s \ + CMD curl -f http://localhost/ || exit 1 + +To help debug failing probes, any output text (UTF-8 encoded) that the command writes +on stdout or stderr will be stored in the health status and can be queried with +`docker inspect`. Such output should be kept short (only the first 4096 bytes +are stored currently). + +When the health status of a container changes, a `health_status` event is +generated with the new status. + +The `HEALTHCHECK` feature was added in Docker 1.12. + + +## SHELL + + SHELL ["executable", "parameters"] + +The `SHELL` instruction allows the default shell used for the *shell* form of +commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on +Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON +form in a Dockerfile. + +The `SHELL` instruction is particularly useful on Windows where there are +two commonly used and quite different native shells: `cmd` and `powershell`, as +well as alternate shells available including `sh`. + +The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides +all previous `SHELL` instructions, and affects all subsequent instructions. For example: + + FROM microsoft/windowsservercore + + # Executed as cmd /S /C echo default + RUN echo default + + # Executed as cmd /S /C powershell -command Write-Host default + RUN powershell -command Write-Host default + + # Executed as powershell -command Write-Host hello + SHELL ["powershell", "-command"] + RUN Write-Host hello + + # Executed as cmd /S /C echo hello + SHELL ["cmd", "/S"", "/C"] + RUN echo hello + +The following instructions can be affected by the `SHELL` instruction when the +*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. + +The following example is a common pattern found on Windows which can be +streamlined by using the `SHELL` instruction: + + ... + RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + ... + +The command invoked by docker will be: + + cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + +This is inefficient for two reasons. First, there is an un-necessary cmd.exe command +processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* +form requires an extra `powershell -command` prefixing the command. + +To make this more efficient, one of two mechanisms can be employed. One is to +use the JSON form of the RUN command such as: + + ... + RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] + ... + +While the JSON form is unambiguous and does not use the un-necessary cmd.exe, +it does require more verbosity through double-quoting and escaping. The alternate +mechanism is to use the `SHELL` instruction and the *shell* form, +making a more natural syntax for Windows users, especially when combined with +the `escape` parser directive: + + # escape=` + + FROM microsoft/nanoserver + SHELL ["powershell","-command"] + RUN New-Item -ItemType Directory C:\Example + ADD Execute-MyCmdlet.ps1 c:\example\ + RUN c:\example\Execute-MyCmdlet -sample 'hello world' + +Resulting in: + + PS E:\docker\build\shell> docker build -t shell . + Sending build context to Docker daemon 4.096 kB + Step 1/5 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/5 : SHELL powershell -command + ---> Running in 6fcdb6855ae2 + ---> 6331462d4300 + Removing intermediate container 6fcdb6855ae2 + Step 3/5 : RUN New-Item -ItemType Directory C:\Example + ---> Running in d0eef8386e97 + + + Directory: C:\ + + + Mode LastWriteTime Length Name + ---- ------------- ------ ---- + d----- 10/28/2016 11:26 AM Example + + + ---> 3f2fbf1395d9 + Removing intermediate container d0eef8386e97 + Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ + ---> a955b2621c31 + Removing intermediate container b825593d39fc + Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' + ---> Running in be6d8e63fe75 + hello world + ---> 8e559e9bf424 + Removing intermediate container be6d8e63fe75 + Successfully built 8e559e9bf424 + PS E:\docker\build\shell> + +The `SHELL` instruction could also be used to modify the way in which +a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed +environment variable expansion semantics could be modified. + +The `SHELL` instruction can also be used on Linux should an alternate shell be +required such as `zsh`, `csh`, `tcsh` and others. + +The `SHELL` feature was added in Docker 1.12. + +## Dockerfile examples + +Below you can see some examples of Dockerfile syntax. If you're interested in +something more realistic, take a look at the list of [Dockerization examples](https://docs.docker.com/engine/examples/). + +``` +# Nginx +# +# VERSION 0.0.1 + +FROM ubuntu +LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" +RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server +``` + +``` +# Firefox over VNC +# +# VERSION 0.3 + +FROM ubuntu + +# Install vnc, xvfb in order to create a 'fake' display and firefox +RUN apt-get update && apt-get install -y x11vnc xvfb firefox +RUN mkdir ~/.vnc +# Setup a password +RUN x11vnc -storepasswd 1234 ~/.vnc/passwd +# Autostart firefox (might not be the best way, but it does the trick) +RUN bash -c 'echo "firefox" >> /.bashrc' + +EXPOSE 5900 +CMD ["x11vnc", "-forever", "-usepw", "-create"] +``` + +``` +# Multiple images example +# +# VERSION 0.1 + +FROM ubuntu +RUN echo foo > bar +# Will output something like ===> 907ad6c2736f + +FROM ubuntu +RUN echo moo > oink +# Will output something like ===> 695d7793cbe4 + +# You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with +# /oink. +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/attach.md b/vendor/github.com/moby/moby/docs/reference/commandline/attach.md new file mode 100644 index 0000000..9f76a54 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/attach.md @@ -0,0 +1,152 @@ +--- +title: "attach" +description: "The attach command description and usage" +keywords: "attach, running, container" +--- + + + +# attach + +```markdown +Usage: docker attach [OPTIONS] CONTAINER + +Attach to a running container + +Options: + --detach-keys string Override the key sequence for detaching a container + --help Print usage + --no-stdin Do not attach STDIN + --sig-proxy Proxy all received signals to the process (default true) +``` + +## Description + +Use `docker attach` to attach to a running container using the container's ID +or name, either to view its ongoing output or to control it interactively. +You can attach to the same contained process multiple times simultaneously, +screen sharing style, or quickly view the progress of your detached process. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to +the container. You can detach from a container and leave it running using the + `CTRL-p CTRL-q` key sequence. + +> **Note:** +> A process running as PID 1 inside a container is treated specially by +> Linux: it ignores any signal with the default action. So, the process +> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do +> so. + +It is forbidden to redirect the standard input of a `docker attach` command +while attaching to a tty-enabled container (i.e.: launched with `-t`). + +While a client is connected to container's stdio using `docker attach`, Docker +uses a ~1MB memory buffer to maximize the throughput of the application. If +this buffer is filled, the speed of the API connection will start to have an +effect on the process output writing speed. This is similar to other +applications like SSH. Because of this, it is not recommended to run +performance critical applications that generate a lot of output in the +foreground over a slow client connection. Instead, users should use the +`docker logs` command to get access to the logs. + +### Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see [**Configuration file** section](cli.md#configuration-files). + +## Examples + +### Attach to and detach from a running container + +```bash +$ docker run -d --name topdemo ubuntu /usr/bin/top -b + +$ docker attach topdemo + +top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 +Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie +Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Mem: 373572k total, 355560k used, 18012k free, 27872k buffers +Swap: 786428k total, 0k used, 786428k free, 221740k cached + +PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + + + top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355780k used, 17792k free, 27880k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top +^C$ + +$ echo $? +0 +$ docker ps -a | grep topdemo + +7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo +``` + +### Get the exit code of the container's command + +And in this second example, you can see the exit code returned by the `bash` +process is returned by the `docker attach` command to its caller too: + +```bash + $ docker run --name test -d -it debian + + 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab + + $ docker attach test + + root@f38c87f2a42d:/# exit 13 + + exit + + $ echo $? + + 13 + + $ docker ps -a | grep test + + 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/build.md b/vendor/github.com/moby/moby/docs/reference/commandline/build.md new file mode 100644 index 0000000..5de623a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/build.md @@ -0,0 +1,450 @@ +--- +title: "build" +description: "The build command description and usage" +keywords: "build, docker, image" +--- + + + +# build + +```markdown +Usage: docker build [OPTIONS] PATH | URL | - + +Build an image from a Dockerfile + +Options: + --build-arg value Set build-time variables (default []) + --cache-from value Images to consider as cache sources (default []) + --cgroup-parent string Optional parent cgroup for the container + --compress Compress the build context using gzip + --cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --disable-content-trust Skip image verification (default true) + -f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile') + --force-rm Always remove intermediate containers + --help Print usage + --isolation string Container isolation technology + --label value Set metadata for an image (default []) + -m, --memory string Memory limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --network string Set the networking mode for the RUN instructions during build + 'bridge': use default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-cache Do not use cache when building the image + --pull Always attempt to pull a newer version of the image + -q, --quiet Suppress the build output and print image ID on success + --rm Remove intermediate containers after a successful build (default true) + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --squash Squash newly built layers into a single new layer (**Experimental Only**) + -t, --tag value Name and optionally a tag in the 'name:tag' format (default []) + --ulimit value Ulimit options (default []) +``` + +## Description + +Builds Docker images from a Dockerfile and a "context". A build's context is +the files located in the specified `PATH` or `URL`. The build process can refer +to any of the files in the context. For example, your build can use an +[*ADD*](../builder.md#add) instruction to reference a file in the +context. + +The `URL` parameter can refer to three kinds of resources: Git repositories, +pre-packaged tarball contexts and plain text files. + +### Git repositories + +When the `URL` parameter points to the location of a Git repository, the +repository acts as the build context. The system recursively clones the +repository and its submodules using a `git clone --depth 1 --recursive` +command. This command runs in a temporary directory on your local host. After +the command succeeds, the directory is sent to the Docker daemon as the +context. Local clones give you the ability to access private repositories using +local user credentials, VPN's, and so forth. + +Git URLs accept context configuration in their fragment section, separated by a +colon `:`. The first part represents the reference that Git will check out, +this can be either a branch, a tag, or a commit SHA. The second part represents +a subdirectory inside the repository that will be used as a build context. + +For example, run this command to use a directory called `docker` in the branch +`container`: + +```bash +$ docker build https://github.com/docker/rootfs.git#container:docker +``` + +The following table represents all the valid suffixes with their build +contexts: + +Build Syntax Suffix | Commit Used | Build Context Used +--------------------------------|-----------------------|------------------- +`myrepo.git` | `refs/heads/master` | `/` +`myrepo.git#mytag` | `refs/tags/mytag` | `/` +`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` +`myrepo.git#abcdef` | `sha1 = abcdef` | `/` +`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` +`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` +`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder` + + +### Tarball contexts + +If you pass an URL to a remote tarball, the URL itself is sent to the daemon: + +```bash +$ docker build http://server/context.tar.gz +``` + +The download operation will be performed on the host the Docker daemon is +running on, which is not necessarily the same host from which the build command +is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the +build context. Tarball contexts must be tar archives conforming to the standard +`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', +'gzip' or 'identity' (no compression) formats. + +### Text files + +Instead of specifying a context, you can pass a single `Dockerfile` in the +`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: + +```bash +$ docker build - < Dockerfile +``` + +With Powershell on Windows, you can run: + +```powershell +Get-Content Dockerfile | docker build - +``` + +If you use `STDIN` or specify a `URL` pointing to a plain text file, the system +places the contents into a file called `Dockerfile`, and any `-f`, `--file` +option is ignored. In this scenario, there is no context. + +By default the `docker build` command will look for a `Dockerfile` at the root +of the build context. The `-f`, `--file`, option lets you specify the path to +an alternative file to use instead. This is useful in cases where the same set +of files are used for multiple builds. The path must be to a file within the +build context. If a relative path is specified then it is interpreted as +relative to the root of the context. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. For information on +creating one, see the [.dockerignore file](../builder.md#dockerignore-file). + +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `CTRL-c` or if the Docker +client is killed for any reason. If the build initiated a pull which is still +running at the time the build is cancelled, the pull is cancelled as well. + +## Return code + +On a successful build, a return code of success `0` will be returned. When the +build fails, a non-zero failure code will be returned. + +There should be informational output of the reason for failure output to +`STDERR`: + +```bash +$ docker build -t fail . + +Sending build context to Docker daemon 2.048 kB +Sending build context to Docker daemon +Step 1/3 : FROM busybox + ---> 4986bf8c1536 +Step 2/3 : RUN exit 13 + ---> Running in e26670ec7a0a +INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 +$ echo $? +1 +``` + +See also: + +[*Dockerfile Reference*](../builder.md). + +## Examples + +### Build with PATH + +```bash +$ docker build . + +Uploading context 10240 bytes +Step 1/3 : FROM busybox +Pulling repository busybox + ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ +Step 2/3 : RUN ls -lh / + ---> Running in 9c9e81692ae9 +total 24 +drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin +drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev +drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib +lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib +dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc +lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin +dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys +drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr + ---> b35f4035db3f +Step 3/3 : CMD echo Hello world + ---> Running in 02071fceb21b + ---> f52f38b7823e +Successfully built f52f38b7823e +Removing intermediate container 9c9e81692ae9 +Removing intermediate container 02071fceb21b +``` + +This example specifies that the `PATH` is `.`, and so all the files in the +local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies +where to find the files for the "context" of the build on the Docker daemon. +Remember that the daemon could be running on a remote machine and that no +parsing of the Dockerfile happens at the client side (where you're running +`docker build`). That means that *all* the files at `PATH` get sent, not just +the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. + +The transfer of context from the local machine to the Docker daemon is what the +`docker` client means when you see the "Sending build context" message. + +If you wish to keep the intermediate containers after the build is complete, +you must use `--rm=false`. This does not affect the build cache. + +### Build with URL + +```bash +$ docker build github.com/creack/docker-firefox +``` + +This will clone the GitHub repository and use the cloned repository as context. +The Dockerfile at the root of the repository is used as Dockerfile. You can +specify an arbitrary Git repository by using the `git://` or `git@` scheme. + +```bash +$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz + +Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B +Step 1/3 : FROM busybox + ---> 8c2e06607696 +Step 2/3 : ADD ctx/container.cfg / + ---> e7829950cee3 +Removing intermediate container b35224abf821 +Step 3/3 : CMD /bin/ls + ---> Running in fbc63d321d73 + ---> 3286931702ad +Removing intermediate container fbc63d321d73 +Successfully built 377c409b35e4 +``` + +This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which +downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` +parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used +to build the image. Any `ADD` commands in that `Dockerfile` that refer to local +paths must be relative to the root of the contents inside `ctx.tar.gz`. In the +example above, the tarball contains a directory `ctx/`, so the `ADD +ctx/container.cfg /` operation works as expected. + +### Build with - + +```bash +$ docker build - < Dockerfile +``` + +This will read a Dockerfile from `STDIN` without context. Due to the lack of a +context, no contents of any local directory will be sent to the Docker daemon. +Since there is no context, a Dockerfile `ADD` only works if it refers to a +remote URL. + +```bash +$ docker build - < context.tar.gz +``` + +This will build an image for a compressed context read from `STDIN`. Supported +formats are: bzip2, gzip and xz. + +### Use a .dockerignore file + +```bash +$ docker build . + +Uploading context 18.829 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +$ echo ".git" > .dockerignore +$ docker build . +Uploading context 6.76 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +``` + +This example shows the use of the `.dockerignore` file to exclude the `.git` +directory from the context. Its effect can be seen in the changed size of the +uploaded context. The builder reference contains detailed information on +[creating a .dockerignore file](../builder.md#dockerignore-file) + +### Tag an image (-t) + +```bash +$ docker build -t vieux/apache:2.0 . +``` + +This will build like the previous example, but it will then tag the resulting +image. The repository name will be `vieux/apache` and the tag will be `2.0`. +[Read more about valid tags](tag.md). + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + +```bash +$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . +``` +### Specify a Dockerfile (-f) + +```bash +$ docker build -f Dockerfile.debug . +``` + +This will use a file called `Dockerfile.debug` for the build instructions +instead of `Dockerfile`. + +```bash +$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . +$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . +``` + +The above commands will build the current build context (as specified by the +`.`) twice, once using a debug version of a `Dockerfile` and once using a +production version. + +```bash +$ cd /home/me/myapp/some/dir/really/deep +$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp +$ docker build -f ../../../../dockerfiles/debug /home/me/myapp +``` + +These two `docker build` commands do the exact same thing. They both use the +contents of the `debug` file instead of looking for a `Dockerfile` and will use +`/home/me/myapp` as the root of the build context. Note that `debug` is in the +directory structure of the build context, regardless of how you refer to it on +the command line. + +> **Note:** +> `docker build` will return a `no such file or directory` error if the +> file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is +> elsewhere on the Host system. The context is limited to the current +> directory (and its children) for security reasons, and to ensure +> repeatable builds on remote Docker hosts. This is also the reason why +> `ADD ../file` will not work. + +### Use a custom parent cgroup (--cgroup-parent) + +When `docker build` is run with the `--cgroup-parent` option the containers +used in the build will be run with the [corresponding `docker run` +flag](../run.md#specifying-custom-cgroups). + +### Set ulimits in container (--ulimit) + +Using the `--ulimit` option with `docker build` will cause each build step's +container to be started using those [`--ulimit` +flag values](./run.md#set-ulimits-in-container-ulimit). + +### Set build-time variables (--build-arg) + +You can use `ENV` instructions in a Dockerfile to define variable +values. These values persist in the built image. However, often +persistence is not what you want. Users want to specify variables differently +depending on which host they build an image on. + +A good example is `http_proxy` or source versions for pulling intermediate +files. The `ARG` instruction lets Dockerfile authors define values that users +can set at build-time using the `--build-arg` flag: + +```bash +$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . +``` + +This flag allows you to pass the build-time variables that are +accessed like regular environment variables in the `RUN` instruction of the +Dockerfile. Also, these values don't persist in the intermediate or final images +like `ENV` values do. + +Using this flag will not alter the output you see when the `ARG` lines from the +Dockerfile are echoed during the build process. + +For detailed information on using `ARG` and `ENV` instructions, see the +[Dockerfile reference](../builder.md). + +### Optional security options (--security-opt) + +This flag is only supported on a daemon running on Windows, and only supports +the `credentialspec` option. The `credentialspec` must be in the format +`file://spec.txt` or `registry://keyname`. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + + +### Squash an image's layers (--squash) **Experimental Only** + +Once the image is built, squash the new layers into a new image with a single +new layer. Squashing does not destroy any existing image, rather it creates a new +image with the content of the squshed layers. This effectively makes it look +like all `Dockerfile` commands were created with a single layer. The build +cache is preserved with this method. + +**Note**: using this option means the new image will not be able to take +advantage of layer sharing with other images and may use significantly more +space. + +**Note**: using this option you may see significantly more space used due to +storing two copies of the image, one for the build cache with all the cache +layers in tact, and one for the squashed version. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/cli.md b/vendor/github.com/moby/moby/docs/reference/commandline/cli.md new file mode 100644 index 0000000..3b4577a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/cli.md @@ -0,0 +1,264 @@ +--- +title: "Use the Docker command line" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +--- + + + +# docker + +To list available commands, either run `docker` with no parameters +or execute `docker help`: + +```bash +$ docker +Usage: docker [OPTIONS] COMMAND [ARG...] + docker [ --help | -v | --version ] + +A self-sufficient runtime for containers. + +Options: + --config string Location of client config files (default "/root/.docker") + -D, --debug Enable debug mode + --help Print usage + -H, --host value Daemon socket(s) to connect to (default []) + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") + --tlskey string Path to TLS key file (default "/root/.docker/key.pem") + --tlsverify Use TLS and verify the remote + -v, --version Print version information and quit + +Commands: + attach Attach to a running container + # […] +``` + +## Description + +Depending on your Docker system configuration, you may be required to preface +each `docker` command with `sudo`. To avoid having to use `sudo` with the +`docker` command, your system administrator can create a Unix group called +`docker` and add users to it. + +For more information about installing Docker or `sudo` configuration, refer to +the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system. + +### Environment variables + +For easy reference, the following list of environment variables are supported +by the `docker` command line: + +* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) +* `DOCKER_CONFIG` The location of your client configuration files. +* `DOCKER_CERT_PATH` The location of your authentication keys. +* `DOCKER_DRIVER` The graph driver to use. +* `DOCKER_HOST` Daemon socket to connect to. +* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is + unsuitable for Docker. +* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. +* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. +* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. + Equates to `--disable-content-trust=false` for build, create, pull, push, run. +* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults + to the same URL as the registry. +* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and + `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are + printed. This may become the default in a future release, at which point this environment-variable is removed. +* `DOCKER_TMPDIR` Location for temporary Docker files. + +Because Docker is developed using Go, you can also use any environment +variables used by the Go runtime. In particular, you may find these useful: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `NO_PROXY` + +These Go environment variables are case-insensitive. See the +[Go specification](http://golang.org/pkg/net/http/) for details on these +variables. + +### Configuration files + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. However, you can +specify a different location via the `DOCKER_CONFIG` environment variable +or the `--config` command line option. If both are specified, then the +`--config` option overrides the `DOCKER_CONFIG` environment variable. +For example: + + docker --config ~/testconfigs/ ps + +Instructs Docker to use the configuration files in your `~/testconfigs/` +directory when running the `ps` command. + +Docker manages most of the files in the configuration directory +and you should not modify them. However, you *can modify* the +`config.json` file to control certain aspects of how the `docker` +command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +The property `HttpHeaders` specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does +not allow these headers to change any headers it sets for itself. + +The property `psFormat` specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the +[**Formatting** section in the `docker ps` documentation](ps.md) + +The property `imagesFormat` specifies the default format for `docker images` output. +When the `--format` flag is not provided with the `docker images` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker images` documentation](images.md) + +The property `serviceInspectFormat` specifies the default format for `docker +service inspect` output. When the `--format` flag is not provided with the +`docker service inspect` command, Docker's client uses this property. If this +property is not set, the client falls back to the default json format. For a +list of supported formatting directives, see the +[**Formatting** section in the `docker service inspect` documentation](service_inspect.md) + +The property `statsFormat` specifies the default format for `docker +stats` output. When the `--format` flag is not provided with the +`docker stats` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker stats` documentation](stats.md) + +Once attached to a container, users detach from it and leave it running using +the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable +using the `detachKeys` property. Specify a `` value for the +property. The format of the `` is a comma-separated list of either +a letter [a-Z], or the `ctrl-` combined with any of the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +Your customization applies to all containers started in with your Docker client. +Users can override your custom or the default key sequence on a per-container +basis. To do this, the user specifies the `--detach-keys` flag with the `docker +attach`, `docker exec`, `docker run` or `docker start` command. + +Following is a sample `config.json` file: + +```json +{ + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + "serviceInspectFormat": "pretty", + "detachKeys": "ctrl-e,e", + "credsStore": "secretservice", + "credHelpers": { + "awesomereg.example.org": "hip-star", + "unicorn.example.com": "vcbait" + } +} +``` + +### Notary + +If using your own notary server and a self-signed certificate or an internal +Certificate Authority, you need to place the certificate at +`tls//ca.crt` in your docker config directory. + +Alternatively you can trust the certificate globally by adding it to your system's +list of root Certificate Authorities. + +## Examples + +### Display help text + +To list the help on any command just execute the command, followed by the +`--help` option. + + $ docker run --help + + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + + Run a command in a new container + + Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + ... + +### Option types + +Single character command line options can be combined, so rather than +typing `docker run -i -t --name test busybox sh`, +you can write `docker run -it --name test busybox sh`. + +#### Boolean + +Boolean options take the form `-d=false`. The value you see in the help text is +the default value which is set if you do **not** specify that flag. If you +specify a Boolean flag without a value, this will set the flag to `true`, +irrespective of the default value. + +For example, running `docker run -d` will set the value to `true`, so your +container **will** run in "detached" mode, in the background. + +Options which default to `true` (e.g., `docker build --rm=true`) can only be +set to the non-default value by explicitly setting them to `false`: + +```bash +$ docker build --rm=false . +``` + +#### Multi + +You can specify options like `-a=[]` multiple times in a single command line, +for example in these commands: + +```bash +$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +$ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls +``` + +Sometimes, multiple options can call for a more complex value string as for +`-v`: + +```bash +$ docker run -v /host:/container example/mysql +``` + +> **Note**: Do not use the `-t` and `-a stderr` options together due to +> limitations in the `pty` implementation. All `stderr` in `pty` mode +> simply goes to `stdout`. + +#### Strings and Integers + +Options like `--name=""` expect a string, and they +can only be specified once. Options like `-c=0` +expect an integer, and they can only be specified once. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/commit.md b/vendor/github.com/moby/moby/docs/reference/commandline/commit.md new file mode 100644 index 0000000..438eaf4 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/commit.md @@ -0,0 +1,117 @@ +--- +title: "commit" +description: "The commit command description and usage" +keywords: "commit, file, changes" +--- + + + +# commit + +```markdown +Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] + +Create a new image from a container's changes + +Options: + -a, --author string Author (e.g., "John Hannibal Smith ") + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Commit message + -p, --pause Pause container during commit (default true) +``` + +## Description + +It can be useful to commit a container's file changes or settings into a new +image. This allows you debug a container by running an interactive shell, or to +export a working dataset to another server. Generally, it is better to use +Dockerfiles to manage your images in a documented and maintainable way. +[Read more about valid image names and tags](tag.md). + +The commit operation will not include any data contained in +volumes mounted inside the container. + +By default, the container being committed and its processes will be paused +while the image is committed. This reduces the likelihood of encountering data +corruption during the process of creating the commit. If this behavior is +undesired, set the `--pause` option to false. + +The `--change` option will apply `Dockerfile` instructions to the image that is +created. Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +### Commit a container + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker commit c3f279d17e0a svendowideit/testimage:version3 + +f5283438590d + +$ docker images + +REPOSITORY TAG ID CREATED SIZE +svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB +``` + +### Commit a container with new configurations + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a + +[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] + +$ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 + +f5283438590d + +$ docker inspect -f "{{ .Config.Env }}" f5283438590d + +[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] +``` + +### Commit a container with new `CMD` and `EXPOSE` instructions + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 + +f5283438590d + +$ docker run -d svendowideit/testimage:version4 + +89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 + +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp distracted_fermat +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/container.md b/vendor/github.com/moby/moby/docs/reference/commandline/container.md new file mode 100644 index 0000000..5eefbf2 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/container.md @@ -0,0 +1,61 @@ + +--- +title: "container" +description: "The container command description and usage" +keywords: "container" +--- + + + +# container + +```markdown +Usage: docker container COMMAND + +Manage containers + +Options: + --help Print usage + +Commands: + attach Attach to a running container + commit Create a new image from a container's changes + cp Copy files/folders between a container and the local filesystem + create Create a new container + diff Inspect changes to files or directories on a container's filesystem + exec Run a command in a running container + export Export a container's filesystem as a tar archive + inspect Display detailed information on one or more containers + kill Kill one or more running containers + logs Fetch the logs of a container + ls List containers + pause Pause all processes within one or more containers + port List port mappings or a specific mapping for the container + prune Remove all stopped containers + rename Rename a container + restart Restart one or more containers + rm Remove one or more containers + run Run a command in a new container + start Start one or more stopped containers + stats Display a live stream of container(s) resource usage statistics + stop Stop one or more running containers + top Display the running processes of a container + unpause Unpause all processes within one or more containers + update Update configuration of one or more containers + wait Block until one or more containers stop, then print their exit codes + +Run 'docker container COMMAND --help' for more information on a command. + +``` + +## Description + +Manage containers. + diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/container_prune.md b/vendor/github.com/moby/moby/docs/reference/commandline/container_prune.md new file mode 100644 index 0000000..3b917b2 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/container_prune.md @@ -0,0 +1,53 @@ +--- +title: "container prune" +description: "Remove all stopped containers" +keywords: container, prune, delete, remove +--- + + + +# container prune + +```markdown +Usage: docker container prune [OPTIONS] + +Remove all stopped containers + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Removes all stopped containers. + +## Examples + +### Prune containers + +```bash +$ docker container prune +WARNING! This will remove all stopped containers. +Are you sure you want to continue? [y/N] y +Deleted Containers: +4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063 +f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360 + +Total reclaimed space: 212 B +``` + +## Related commands + +* [system df](system_df.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/cp.md b/vendor/github.com/moby/moby/docs/reference/commandline/cp.md new file mode 100644 index 0000000..5cbbee2 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/cp.md @@ -0,0 +1,115 @@ +--- +title: "cp" +description: "The cp command description and usage" +keywords: "copy, container, files, folders" +--- + + + +# cp + +```markdown +Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH + +Copy files/folders between a container and the local filesystem + +Use '-' as the source to read a tar archive from stdin +and extract it to a directory destination in a container. +Use '-' as the destination to stream a tar archive of a +container source to stdout. + +Options: + -L, --follow-link Always follow symbol link in SRC_PATH + --help Print usage +``` + +## Description + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, specify +the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by +the user in the container. However, you can still copy such files by manually +running `tar` in `docker exec`. Both of the following examples do the same thing +in different ways (consider `SRC_PATH` and `DEST_PATH` are directories): + +```bash +$ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - +``` + +```bash +$ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - +``` + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/create.md b/vendor/github.com/moby/moby/docs/reference/commandline/create.md new file mode 100644 index 0000000..00de1dc --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/create.md @@ -0,0 +1,234 @@ +--- +title: "create" +description: "The create command description and usage" +keywords: "docker, create, container" +--- + + + +# create + +Creates a new container. + +```markdown +Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int CPU percent (Windows only) + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --device value Add a host device to the container (default []) + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + --init-path string Path to the docker-init binary + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string Container IPv4 address (e.g. 172.30.100.104) + --ip6 string Container IPv6 address (e.g. 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network (default "default") + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3 + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are: no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` +## Description + +The `docker create` command creates a writeable container layer over the +specified image and prepares it for running the specified command. The +container ID is then printed to `STDOUT`. This is similar to `docker run -d` +except the container is never started. You can then use the +`docker start ` command to start the container at any point. + +This is useful when you want to set up a container configuration ahead of time +so that it is ready to start when you need it. The initial status of the +new container is `created`. + +Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. + +## Examples + +### Create and start a container + +```bash +$ docker create -t -i fedora bash + +6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 + +$ docker start -a -i 6d8af538ec5 + +bash-4.2# +``` + +### Initialize volumes + +As of v1.4.0 container volumes are initialized during the `docker create` phase +(i.e., `docker run` too). For example, this allows you to `create` the `data` +volume container, and then use it from another container: + +```bash +$ docker create -v /data --name data ubuntu + +240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 + +$ docker run --rm --volumes-from data ubuntu ls -la /data + +total 8 +drwxr-xr-x 2 root root 4096 Dec 5 04:10 . +drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. +``` + +Similarly, `create` a host directory bind mounted volume container, which can +then be used from the subsequent container: + +```bash +$ docker create -v /home/docker:/docker --name docker ubuntu + +9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 + +$ docker run --rm --volumes-from docker ubuntu ls -la /docker + +total 20 +drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . +drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. +-rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history +-rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc +-rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig +drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local +-rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile +drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh +drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker +``` + + +Set storage driver options per container. + +```bash +$ docker create -it --storage-opt size=120G fedora /bin/bash +``` + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the +daemon is running on Windows server, or `hyperv` if running on Windows client. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/deploy.md b/vendor/github.com/moby/moby/docs/reference/commandline/deploy.md new file mode 100644 index 0000000..a430a90 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/deploy.md @@ -0,0 +1,111 @@ +--- +title: "deploy" +description: "The deploy command description and usage" +keywords: "stack, deploy" +advisory: "experimental" +--- + + + +# deploy (experimental) + +An alias for `stack deploy`. + +```markdown +Usage: docker deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + --compose-file string Path to a Compose file + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +## Description + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Examples + +### Compose file + +The `deploy` command supports compose file version `3.0` and above. + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility + +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +### DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility + +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related commands + +* [stack config](stack_config.md) +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/diff.md b/vendor/github.com/moby/moby/docs/reference/commandline/diff.md new file mode 100644 index 0000000..e6e12ce --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/diff.md @@ -0,0 +1,67 @@ +--- +title: "diff" +description: "The diff command description and usage" +keywords: "list, changed, files, container" +--- + + + +# diff + +```markdown +Usage: docker diff CONTAINER + +Inspect changes to files or directories on a container's filesystem + +Options: + --help Print usage +``` + +## Description + +List the changed files and directories in a container᾿s filesystem since the +container was created. Three different types of change are tracked: + +| Symbol | Description | +|--------|---------------------------------| +| `A` | A file or directory was added | +| `D` | A file or directory was deleted | +| `C` | A file or directory was changed | + +You can use the full or shortened container ID or the container name set using +`docker run --name` option. + +## Examples + +Inspect the changes to an `nginx` container: + +```bash +$ docker diff 1fdfd1f54c1b + +C /dev +C /dev/console +C /dev/core +C /dev/stdout +C /dev/fd +C /dev/ptmx +C /dev/stderr +C /dev/stdin +C /run +A /run/nginx.pid +C /var/lib/nginx/tmp +A /var/lib/nginx/tmp/client_body +A /var/lib/nginx/tmp/fastcgi +A /var/lib/nginx/tmp/proxy +A /var/lib/nginx/tmp/scgi +A /var/lib/nginx/tmp/uwsgi +C /var/log/nginx +A /var/log/nginx/access.log +A /var/log/nginx/error.log +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/dockerd.md b/vendor/github.com/moby/moby/docs/reference/commandline/dockerd.md new file mode 100644 index 0000000..f13cf0a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/dockerd.md @@ -0,0 +1,1368 @@ +--- +title: "dockerd" +aliases: ["/engine/reference/commandline/daemon/"] +description: "The daemon command description and usage" +keywords: "container, daemon, runtime" +--- + + + +# daemon + +```markdown +Usage: dockerd COMMAND + +A self-sufficient runtime for containers. + +Options: + --add-runtime runtime Register an additional OCI compatible runtime (default []) + --api-cors-header string Set CORS headers in the Engine API + --authorization-plugin list Authorization plugins to load (default []) + --bip string Specify network bridge IP + -b, --bridge string Attach containers to a network bridge + --cgroup-parent string Set parent cgroup for all containers + --cluster-advertise string Address or interface name to advertise + --cluster-store string URL of the distributed storage backend + --cluster-store-opt map Set cluster store options (default map[]) + --config-file string Daemon configuration file (default "/etc/docker/daemon.json") + --containerd string Path to containerd socket + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + -D, --debug Enable debug mode + --default-gateway ip Container default gateway IPv4 address + --default-gateway-v6 ip Container default gateway IPv6 address + --default-runtime string Default OCI runtime for containers (default "runc") + --default-ulimit ulimit Default ulimits for containers (default []) + --disable-legacy-registry Disable contacting legacy registries + --dns list DNS server to use (default []) + --dns-opt list DNS options to use (default []) + --dns-search list DNS search domains to use (default []) + --exec-opt list Runtime execution options (default []) + --exec-root string Root directory for execution state files (default "/var/run/docker") + --experimental Enable experimental features + --fixed-cidr string IPv4 subnet for fixed IPs + --fixed-cidr-v6 string IPv6 subnet for fixed IPs + -g, --graph string Root of the Docker runtime (default "/var/lib/docker") + -G, --group string Group for the unix socket (default "docker") + --help Print usage + -H, --host list Daemon socket(s) to connect to (default []) + --icc Enable inter-container communication (default true) + --init Run an init in the container to forward signals and reap processes + --init-path string Path to the docker-init binary + --insecure-registry list Enable insecure registry communication (default []) + --ip ip Default IP when binding container ports (default 0.0.0.0) + --ip-forward Enable net.ipv4.ip_forward (default true) + --ip-masq Enable IP masquerading (default true) + --iptables Enable addition of iptables rules (default true) + --ipv6 Enable IPv6 networking + --label list Set key=value labels to the daemon (default []) + --live-restore Enable live restore of docker when containers are still running + --log-driver string Default driver for container logs (default "json-file") + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --log-opt map Default log driver options for containers (default map[]) + --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) + --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) + --metrics-addr string Set default address and port to serve the metrics api on + --mtu int Set the containers network MTU + --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) + -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") + --raw-logs Full timestamps without ANSI coloring + --registry-mirror list Preferred Docker registry mirror (default []) + --seccomp-profile string Path to seccomp profile + --selinux-enabled Enable selinux support + --shutdown-timeout int Set the default shutdown timeout (default 15) + -s, --storage-driver string Storage driver to use + --storage-opt list Storage driver options (default []) + --swarm-default-advertise-addr string Set default address or interface for swarm advertised address + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "~/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "~/.docker/cert.pem") + --tlskey string Path to TLS key file (default ~/.docker/key.pem") + --tlsverify Use TLS and verify the remote + --userland-proxy Use userland proxy for loopback traffic (default true) + --userland-proxy-path string Path to the userland proxy binary + --userns-remap string User/Group setting for user namespaces + -v, --version Print version information and quit +``` + +Options with [] may be specified multiple times. + +## Description + +`dockerd` is the persistent process that manages containers. Docker +uses different binaries for the daemon and client. To run the daemon you +type `dockerd`. + +To run the daemon with debug output, use `dockerd -D` or add `debug: true` to +the `daemon.json` file. + +> **Note**: In Docker 1.13 and higher, enable experimental features by starting +> `dockerd` with the `--experimental` flag or adding `experimental: true` to the +> `daemon.json` file. In earlier Docker versions, a different build was required +> to enable experimental features. + +## Examples + +### Daemon socket option + +The Docker daemon can listen for [Docker Engine API](../api/) +requests via three different types of Socket: `unix`, `tcp`, and `fd`. + +By default, a `unix` domain socket (or IPC socket) is created at +`/var/run/docker.sock`, requiring either `root` permission, or `docker` group +membership. + +If you need to access the Docker daemon remotely, you need to enable the `tcp` +Socket. Beware that the default setup provides un-encrypted and +un-authenticated direct access to the Docker daemon - and should be secured +either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by +putting a secure web proxy in front of it. You can listen on port `2375` on all +network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network +interface using its IP address: `-H tcp://192.168.59.103:2375`. It is +conventional to use port `2375` for un-encrypted, and port `2376` for encrypted +communication with the daemon. + +> **Note**: If you're using an HTTPS encrypted socket, keep in mind that only +> TLS1.0 and greater are supported. Protocols SSLv3 and under are not +> supported anymore for security reasons. + +On Systemd based systems, you can communicate with the daemon via +[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), +use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but +you can also specify individual sockets: `dockerd -H fd://3`. If the +specified socket activated files aren't found, then Docker will exit. You can +find examples of using Systemd socket activation with Docker and Systemd in the +[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). + +You can configure the Docker daemon to listen to multiple sockets at the same +time using multiple `-H` options: + +```bash +# listen using the default unix socket, and on 2 specific IP addresses on this host. + +$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 +``` + +The Docker client will honor the `DOCKER_HOST` environment variable to set the +`-H` flag for the client. Use **one** of the following commands: + +```bash +$ docker -H tcp://0.0.0.0:2375 ps +``` + +```bash +$ export DOCKER_HOST="tcp://0.0.0.0:2375" + +$ docker ps +``` + +Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than +the empty string is equivalent to setting the `--tlsverify` flag. The following +are equivalent: + +```bash +$ docker --tlsverify ps +# or +$ export DOCKER_TLS_VERIFY=1 +$ docker ps +``` + +The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes +precedence over `HTTP_PROXY`. + +#### Bind Docker to another host/port or a Unix socket + +> **Warning**: +> Changing the default `docker` daemon binding to a +> TCP port or Unix *docker* user group will increase your security risks +> by allowing non-root users to gain *root* access on the host. Make sure +> you control access to `docker`. If you are binding +> to a TCP port, anyone with access to that port has full Docker access; +> so it is not advisable on an open network. + +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. + +Similarly, the Docker client can use `-H` to connect to a custom port. +The Docker client will default to connecting to `unix:///var/run/docker.sock` +on Linux, and `tcp://127.0.0.1:2376` on Windows. + +`-H` accepts host and port assignment in the following format: + + tcp://[host]:[port][path] or unix://path + +For example: + +- `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption + is on, or port `2375` when communication is in plain text. +- `tcp://host:2375` -> TCP connection on + host:2375 +- `tcp://host:2375/path` -> TCP connection on + host:2375 and prepend path to all requests +- `unix://path/to/socket` -> Unix socket located + at `path/to/socket` + +`-H`, when empty, will default to the same value as +when no `-H` was passed in. + +`-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` + +Run Docker in daemon mode: + +```bash +$ sudo /dockerd -H 0.0.0.0:5555 & +``` + +Download an `ubuntu` image: + +```bash +$ docker -H :5555 pull ubuntu +``` + +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket + +```bash +# Run docker in daemon mode +$ sudo /dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & +# Download an ubuntu image, use default Unix socket +$ docker pull ubuntu +# OR use the TCP port +$ docker -H tcp://127.0.0.1:2375 pull ubuntu +``` + +### Daemon storage-driver + +The Docker daemon has support for several different image layer storage +drivers: `aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay` and `overlay2`. + +The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that +is unlikely to be merged into the main kernel. These are also known to cause +some serious kernel crashes. However `aufs` allows containers to share +executable and shared library memory, so is a useful choice when running +thousands of containers with the same program or libraries. + +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) +article explains how to tune your existing setup without the use of options. + +The `btrfs` driver is very fast for `docker build` - but like `devicemapper` +does not share executable memory between devices. Use +`dockerd -s btrfs -g /mnt/btrfs_partition`. + +The `zfs` driver is probably not as fast as `btrfs` but has a longer track record +on stability. Thanks to `Single Copy ARC` shared blocks between clones will be +cached only once. Use `dockerd -s zfs`. To select a different zfs filesystem +set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). + +The `overlay` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). `overlay` +also supports page cache sharing, this means multiple containers accessing +the same file can share a single page cache entry (or entries), it makes +`overlay` as efficient with memory as `aufs` driver. Call +`dockerd -s overlay` to use it. + +> **Note**: As promising as `overlay` is, the feature is still quite young and +> should not be used in production. Most notably, using `overlay` can cause +> excessive inode consumption (especially as the number of images grows), as +> well as > being incompatible with the use of RPMs. + +The `overlay2` uses the same fast union filesystem but takes advantage of +[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux +kernel 4.0 to avoid excessive inode consumption. Call `dockerd -s overlay2` +to use it. + +> **Note**: Both `overlay` and `overlay2` are currently unsupported on `btrfs` +> or any Copy on Write filesystem and should only be used over `ext4` partitions. + +### Options per storage driver + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, +options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. + +#### Devicemapper options + +##### `dm.thinpooldev` + +Specifies a custom block storage device to use for the thin pool. + +If using a block device for device mapper storage, it is best to use `lvm` +to create and manage the thin-pool volume. This volume is then handed to Docker +to exclusively create snapshot volumes needed for images and containers. + +Managing the thin-pool outside of Engine makes for the most feature-rich +method of having Docker utilize device mapper thin provisioning as the +backing storage for Docker containers. The highlights of the lvm-based +thin-pool management feature include: automatic or interactive thin-pool +resize support, dynamically changing thin-pool features, automatic thinp +metadata checking when lvm activates the thin-pool, etc. + +As a fallback if no thin pool is provided, loopback files are +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Engine daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool +``` + +##### `dm.basesize` + +Specifies the size to use when creating the base device, which limits the +size of images and containers. The default value is 10G. Note, thin devices +are inherently "sparse", so a 10G device which is mostly empty doesn't use +10 GB of space on the pool. However, the filesystem will use more space for +the empty case the larger the device is. + +The base device size can be increased at daemon restart which will allow +all future images and containers (based on those new images) to be of the +new base device size. + +###### Examples + +```bash +$ sudo dockerd --storage-opt dm.basesize=50G +``` + +This will increase the base device size to 50G. The Docker daemon will throw an +error if existing base device size is larger than 50G. A user can use +this option to expand the base device size however shrinking is not permitted. + +This value affects the system-wide "base" empty filesystem +that may already be initialized and inherited by pulled images. Typically, +a change to this value requires additional steps to take effect: + + ```bash +$ sudo service docker stop + +$ sudo rm -rf /var/lib/docker + +$ sudo service docker start +``` + + +##### `dm.loopdatasize` + +> **Note**: This option configures devicemapper loopback, which should not +> be used in production. + +Specifies the size to use when creating the loopback file for the +"data" device which is used for the thin pool. The default size is +100G. The file is sparse, so it will not initially take up this +much space. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.loopdatasize=200G +``` + +##### `dm.loopmetadatasize` + +> **Note**: This option configures devicemapper loopback, which should not +> be used in production. + +Specifies the size to use when creating the loopback file for the +"metadata" device which is used for the thin pool. The default size +is 2G. The file is sparse, so it will not initially take up +this much space. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.loopmetadatasize=4G +``` + +##### `dm.fs` + +Specifies the filesystem type to use for the base device. The supported +options are "ext4" and "xfs". The default is "xfs" + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.fs=ext4 +``` + +##### `dm.mkfsarg` + +Specifies extra mkfs arguments to be used when creating the base device. + +###### Example + +```bash +$ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" +``` + +##### `dm.mountopt` + +Specifies extra mount options used when mounting the thin devices. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.mountopt=nodiscard +``` + +##### `dm.datadev` + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for data for the thin pool. + +If using a block device for device mapper storage, ideally both `datadev` and +`metadatadev` should be specified to completely avoid using the loopback +device. + +###### Example + +```bash +$ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 +``` + +##### `dm.metadatadev` + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for metadata for the thin pool. + +For best performance the metadata should be on a different spindle than the +data, or even better on an SSD. + +If setting up a new metadata pool it is required to be valid. This can be +achieved by zeroing the first 4k to indicate empty metadata, like this: + +```bash +$ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 +``` + +###### Example + +```bash +$ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 +``` + +##### `dm.blocksize` + +Specifies a custom blocksize to use for the thin pool. The default +blocksize is 64K. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.blocksize=512K +``` + +##### `dm.blkdiscard` + +Enables or disables the use of `blkdiscard` when removing devicemapper +devices. This is enabled by default (only) if using loopback devices and is +required to resparsify the loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal +times, but will make the space used in `/var/lib/docker` directory not be +returned to the system for other use when containers are removed. + +###### Examples + +```bash +$ sudo dockerd --storage-opt dm.blkdiscard=false +``` + +##### `dm.override_udev_sync_check` + +Overrides the `udev` synchronization checks between `devicemapper` and `udev`. +`udev` is the device manager for the Linux kernel. + +To view the `udev` sync support of a Docker daemon that is using the +`devicemapper` driver, run: + +```bash +$ docker info +[...] +Udev Sync Supported: true +[...] +``` + +When `udev` sync support is `true`, then `devicemapper` and udev can +coordinate the activation and deactivation of devices for containers. + +When `udev` sync support is `false`, a race condition occurs between +the`devicemapper` and `udev` during create and cleanup. The race condition +results in errors and failures. (For information on these failures, see +[docker#4036](https://github.com/docker/docker/issues/4036)) + +To allow the `docker` daemon to start, regardless of `udev` sync not being +supported, set `dm.override_udev_sync_check` to true: + +```bash +$ sudo dockerd --storage-opt dm.override_udev_sync_check=true +``` + +When this value is `true`, the `devicemapper` continues and simply warns +you the errors are happening. + +> **Note**: The ideal is to pursue a `docker` daemon and environment that does +> support synchronizing with `udev`. For further discussion on this +> topic, see [docker#4036](https://github.com/docker/docker/issues/4036). +> Otherwise, set this flag for migrating existing Docker daemons to +> a daemon with a supported environment. + +##### `dm.use_deferred_removal` + +Enables use of deferred device removal if `libdm` and the kernel driver +support the mechanism. + +Deferred device removal means that if device is busy when devices are +being removed/deactivated, then a deferred removal is scheduled on +device. And devices automatically go away when last user of the device +exits. + +For example, when a container exits, its associated thin device is removed. +If that device has leaked into some other mount namespace and can't be +removed, the container exit still succeeds and this option causes the +system to schedule the device for deferred removal. It does not wait in a +loop trying to remove a busy device. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.use_deferred_removal=true +``` + +##### `dm.use_deferred_deletion` + +Enables use of deferred device deletion for thin pool devices. By default, +thin pool device deletion is synchronous. Before a container is deleted, +the Docker daemon removes any associated devices. If the storage driver +can not remove a device, the container deletion fails and daemon returns. + +```none +Error deleting container: Error response from daemon: Cannot destroy container +``` + +To avoid this failure, enable both deferred device deletion and deferred +device removal on the daemon. + +```bash +$ sudo dockerd \ + --storage-opt dm.use_deferred_deletion=true \ + --storage-opt dm.use_deferred_removal=true +``` + +With these two options enabled, if a device is busy when the driver is +deleting a container, the driver marks the device as deleted. Later, when +the device isn't in use, the driver deletes it. + +In general it should be safe to enable this option by default. It will help +when unintentional leaking of mount point happens across multiple mount +namespaces. + +##### `dm.min_free_space` + +Specifies the min free space percent in a thin pool require for new device +creation to succeed. This check applies to both free data space as well +as free metadata space. Valid values are from 0% - 99%. Value 0% disables +free space checking logic. If user does not specify a value for this option, +the Engine uses a default value of 10%. + +Whenever a new a thin pool device is created (during `docker pull` or during +container creation), the Engine checks if the minimum free space is +available. If sufficient space is unavailable, then device creation fails +and any relevant `docker` operation fails. + +To recover from this error, you must create more free space in the thin pool +to recover from the error. You can create free space by deleting some images +and containers from the thin pool. You can also add more storage to the thin +pool. + +To add more space to a LVM (logical volume management) thin pool, just add +more storage to the volume group container thin pool; this should automatically +resolve any errors. If your configuration uses loop devices, then stop the +Engine daemon, grow the size of loop files and restart the daemon to resolve +the issue. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.min_free_space=10% +``` + +##### `dm.xfs_nospace_max_retries` + +Specifies the maximum number of retries XFS should attempt to complete +IO when ENOSPC (no space) error is returned by underlying storage device. + +By default XFS retries infinitely for IO to finish and this can result +in unkillable process. To change this behavior one can set +xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting +ENOSPC and will shutdown filesystem. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 +``` + +#### ZFS options + +##### `zfs.fsname` + +Set zfs filesystem under which docker will create its own datasets. +By default docker will pick up the zfs filesystem where docker graph +(`/var/lib/docker`) is located. + +###### Example + +```bash +$ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker +``` + +#### Btrfs options + +##### `btrfs.min_space` + +Specifies the minimum size to use when creating the subvolume which is used +for containers. If user uses disk quota for btrfs when creating or running +a container with **--storage-opt size** option, docker should ensure the +**size** cannot be smaller than **btrfs.min_space**. + +###### Example + +```bash +$ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G +``` + +#### Overlay2 options + +##### `overlay2.override_kernel_check` + +Overrides the Linux kernel version check allowing overlay2. Support for +specifying multiple lower directories needed by overlay2 was added to the +Linux kernel in 4.0.0. However, some older kernel versions may be patched +to add multiple lower directory support for OverlayFS. This option should +only be used after verifying this support exists in the kernel. Applying +this option on a kernel without this support will cause failures on mount. + +### Docker runtime execution options + +The Docker daemon relies on a +[OCI](https://github.com/opencontainers/runtime-spec) compliant runtime +(invoked via the `containerd` daemon) as its interface to the Linux +kernel `namespaces`, `cgroups`, and `SELinux`. + +By default, the Docker daemon automatically starts `containerd`. If you want to +control `containerd` startup, manually start `containerd` and pass the path to +the `containerd` socket using the `--containerd` flag. For example: + +```bash +$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock +``` + +Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + +The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + +> **Note**: Defining runtime arguments via the command line is not supported. + +#### Options for the runtime + +You can configure the runtime using options specified +with the `--exec-opt` flag. All the flag's options have the `native` prefix. A +single `native.cgroupdriver` option is available. + +The `native.cgroupdriver` option specifies the management of the container's +cgroups. You can specify only specify `cgroupfs` or `systemd`. If you specify +`systemd` and it is not available, the system errors out. If you omit the +`native.cgroupdriver` option,` cgroupfs` is used. + +This example sets the `cgroupdriver` to `systemd`: + +```bash +$ sudo dockerd --exec-opt native.cgroupdriver=systemd +``` + +Setting this option applies to all containers the daemon launches. + +Also Windows Container makes use of `--exec-opt` for special purpose. Docker user +can specify default container isolation technology with this, for example: + +```bash +$ sudo dockerd --exec-opt isolation=hyperv +``` + +Will make `hyperv` the default isolation technology on Windows. If no isolation +value is specified on daemon start, on Windows client, the default is +`hyperv`, and on Windows server, the default is `process`. + +#### Daemon DNS options + +To set the DNS server for all Docker containers, use: + +```bash +$ sudo dockerd --dns 8.8.8.8 +``` + + +To set the DNS search domain for all Docker containers, use: + +```bash +$ sudo dockerd --dns-search example.com +``` + +#### Insecure registries + +Docker considers a private registry either secure or insecure. In the rest of +this section, *registry* is used for *private registry*, and `myregistry:5000` +is a placeholder example for a private registry. + +A secure registry uses TLS and a copy of its CA certificate is placed on the +Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure +registry is either not using TLS (i.e., listening on plain text HTTP), or is +using TLS with a CA certificate not known by the Docker daemon. The latter can +happen when the certificate was not found under +`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification +failed (i.e., wrong CA). + +By default, Docker assumes all, but local (see local registries below), +registries are secure. Communicating with an insecure registry is not possible +if Docker assumes that registry is secure. In order to communicate with an +insecure registry, the Docker daemon requires `--insecure-registry` in one of +the following two forms: + +* `--insecure-registry myregistry:5000` tells the Docker daemon that + myregistry:5000 should be considered insecure. +* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries + whose domain resolve to an IP address is part of the subnet described by the + CIDR syntax, should be considered insecure. + +The flag can be used multiple times to allow multiple registries to be marked +as insecure. + +If an insecure registry is not marked as insecure, `docker pull`, +`docker push`, and `docker search` will result in an error message prompting +the user to either secure or pass the `--insecure-registry` flag to the Docker +daemon as described above. + +Local registries, whose IP address falls in the 127.0.0.0/8 range, are +automatically marked as insecure as of Docker 1.3.2. It is not recommended to +rely on this, as it may change in the future. + +Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted +communication, can be useful when running a local registry. However, +because its use creates security vulnerabilities it should ONLY be enabled for +testing purposes. For increased security, users should add their CA to their +system's list of trusted CAs instead of enabling `--insecure-registry`. + +##### Legacy Registries + +Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. + +#### Running a Docker daemon behind an HTTPS_PROXY + +When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub +certificates will be replaced by the proxy's certificates. These certificates +need to be added to your Docker host's configuration: + +1. Install the `ca-certificates` package for your distribution +2. Ask your network admin for the proxy's CA certificate and append them to + `/etc/pki/tls/certs/ca-bundle.crt` +3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ dockerd`. + The `username:` and `password@` are optional - and are only needed if your + proxy is set up to require authentication. + +This will only add the proxy and authentication to the Docker daemon's requests - +your `docker build`s and running containers will need extra configuration to +use the proxy + +#### Default `ulimit` settings + +`--default-ulimit` allows you to set the default `ulimit` options to use for +all containers. It takes the same options as `--ulimit` for `docker run`. If +these defaults are not set, `ulimit` settings will be inherited, if not set on +`docker run`, from the Docker daemon. Any `--ulimit` options passed to +`docker run` will overwrite these defaults. + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to +set the maximum number of processes available to a user, not to a container. For details +please check the [run](run.md) reference. + +#### Node discovery + +The `--cluster-advertise` option specifies the `host:port` or `interface:port` +combination that this particular daemon instance should use when advertising +itself to the cluster. The daemon is reached by remote hosts through this value. +If you specify an interface, make sure it includes the IP address of the actual +Docker host. For Engine installation created through `docker-machine`, the +interface is typically `eth1`. + +The daemon uses [libkv](https://github.com/docker/libkv/) to advertise +the node within the cluster. Some key-value backends support mutual +TLS. To configure the client TLS settings used by the daemon can be configured +using the `--cluster-store-opt` flag, specifying the paths to PEM encoded +files. For example: + +```bash +$ sudo dockerd \ + --cluster-advertise 192.168.1.2:2376 \ + --cluster-store etcd://192.168.1.2:2379 \ + --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ + --cluster-store-opt kv.certfile=/path/to/cert.pem \ + --cluster-store-opt kv.keyfile=/path/to/key.pem +``` + +The currently supported cluster store options are: + +| Option | Description | +|-----------------------|-------------| +| `discovery.heartbeat` | Specifies the heartbeat timer in seconds which is used by the daemon as a `keepalive` mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds. | +| `discovery.ttl` | Specifies the TTL (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds. | +| `kv.cacertfile` | Specifies the path to a local file with PEM encoded CA certificates to trust. | +| `kv.certfile` | Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. | +| `kv.keyfile` | Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. | +| `kv.path` | Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. | + +#### Access authorization + +Docker's access authorization can be extended by authorization plugins that your +organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its specification +file. The plugin's implementation determines whether you can specify a name or +path. Consult with your Docker administrator to get information about the +plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the command +line or Docker's Engine API are allowed or denied by the plugin. If you have +multiple plugins installed, at least one must allow the request for it to +complete. + +For information about how to create an authorization plugin, see [authorization +plugin](../../extend/plugins_authorization.md) section in the Docker extend section of this documentation. + + +#### Daemon user namespace options + +The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling +a process, and therefore a container, to have a unique range of user and +group IDs which are outside the traditional user and group range utilized by +the host system. Potentially the most important security improvement is that, +by default, container processes running as the `root` user will have expected +administrative privilege (with some restrictions) inside the container but will +effectively be mapped to an unprivileged `uid` on the host. + +When user namespace support is enabled, Docker creates a single daemon-wide mapping +for all containers running on the same engine instance. The mappings will +utilize the existing subordinate user and group ID feature available on all modern +Linux distributions. +The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and +[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be +read for the user, and optional group, specified to the `--userns-remap` +parameter. If you do not wish to specify your own user and/or group, you can +provide `default` as the value to this flag, and a user will be created on your behalf +and provided subordinate uid and gid ranges. This default user will be named +`dockremap`, and entries will be created for it in `/etc/passwd` and +`/etc/group` using your distro's standard user and group creation tools. + +> **Note**: The single mapping per-daemon restriction is in place for now +> because Docker shares image layers from its local cache across all +> containers running on the engine instance. Since file ownership must be +> the same for all containers sharing the same layer content, the decision +> was made to map the file ownership on `docker pull` to the daemon's user and +> group mappings so that there is no delay for running containers once the +> content is downloaded. This design preserves the same performance for `docker +> pull`, `docker push`, and container startup as users expect with +> user namespaces disabled. + +##### Start the daemon with user namespaces enabled + +To enable user namespace support, start the daemon with the +`--userns-remap` flag, which accepts values in the following formats: + + - uid + - uid:gid + - username + - username:groupname + +If numeric IDs are provided, translation back to valid user or group names +will occur so that the subordinate uid and gid information can be read, given +these resources are name-based, not id-based. If the numeric ID information +provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon +startup will fail with an error message. + +**Example: starting with default Docker user management:** + +```bash +$ sudo dockerd --userns-remap=default +``` + +When `default` is provided, Docker will create - or find the existing - user and group +named `dockremap`. If the user is created, and the Linux distribution has +appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated +with a contiguous 65536 length range of subordinate user and group IDs, starting +at an offset based on prior entries in those files. For example, Ubuntu will +create the following range, based on an existing user named `user1` already owning +the first 65536 range: + +```bash +$ cat /etc/subuid +user1:100000:65536 +dockremap:165536:65536 +``` + +If you have a preferred/self-managed user with subordinate ID mappings already +configured, you can provide that username or uid to the `--userns-remap` flag. +If you have a group that doesn't match the username, you may provide the `gid` +or group name as well; otherwise the username will be used as the group name +when querying the system for the subordinate group ID range. + +The output of `docker info` can be used to determine if the daemon is running +with user namespaces enabled or not. If the daemon is configured with user +namespaces, the Security Options entry in the response will list "userns" as +one of the enabled security features. + +##### Behavior differences when user namespaces are enabled + +When you start the Docker daemon with `--userns-remap`, Docker segregates the graph directory +where the images are stored by adding an extra directory with a name corresponding to the +remapped UID and GID. For example, if the remapped UID and GID begin with `165536`, all +images and containers running with that remap setting are located in `/var/lib/docker/165536.165536` +instead of `/var/lib/docker/`. + +In addition, the files and directories within the new directory, which correspond to +images and container layers, are also owned by the new UID and GID. To set the ownership +correctly, you need to re-pull the images and restart the containers after starting the +daemon with `--userns-remap`. + +##### Detailed information on `subuid`/`subgid` ranges + +Given potential advanced use of the subordinate ID ranges by power users, the +following paragraphs define how the Docker daemon currently uses the range entries +found within the subordinate range files. + +The simplest case is that only one contiguous range is defined for the +provided user or group. In this case, Docker will use that entire contiguous +range for the mapping of host uids and gids to the container process. This +means that the first ID in the range will be the remapped root user, and the +IDs above that initial ID will map host ID 1 through the end of the range. + +From the example `/etc/subuid` content shown above, the remapped root +user would be uid 165536. + +If the system administrator has set up multiple ranges for a single user or +group, the Docker daemon will read all the available ranges and use the +following algorithm to create the mapping ranges: + +1. The range segments found for the particular user will be sorted by *start ID* ascending. +2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. +3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. + +##### Disable user namespace for a container + +If you enable user namespaces on the daemon, all containers are started +with user namespaces enabled. In some situations you might want to disable +this feature for a container, for example, to start a privileged container (see +[user namespace known restrictions](#user-namespace-known-restrictions)). +To enable those advanced features for a specific container use `--userns=host` +in the `run/exec/create` command. +This option will completely disable user namespace mapping for the container's user. + +##### User namespace known restrictions + +The following standard Docker features are currently incompatible when +running a Docker daemon with user namespaces enabled: + + - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) + - Using `--privileged` mode flag on `docker run` (unless also specifying `--userns=host`) + +In general, user namespaces are an advanced feature and will require +coordination with other capabilities. For example, if volumes are mounted from +the host, file ownership will have to be pre-arranged if the user or +administrator wishes the containers to have expected access to the volume +contents. Note that when using external volume or graph driver plugins, those +external software programs must be made aware of user and group mapping ranges +if they are to work seamlessly with user namespace support. + +Finally, while the `root` user inside a user namespaced container process has +many of the expected admin privileges that go along with being the superuser, the +Linux kernel has restrictions based on internal knowledge that this is a user namespaced +process. The most notable restriction that we are aware of at this time is the +inability to use `mknod`. Permission will be denied for device creation even as +container `root` inside a user namespace. + +### Miscellaneous options + +IP masquerading uses address translation to allow containers without a public +IP to talk to other machines on the Internet. This may interfere with some +network topologies and can be disabled with `--ip-masq=false`. + +Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and +for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be +set like this: + + DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + # or + export DOCKER_TMPDIR=/mnt/disk2/tmp + /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + +#### Default cgroup parent + +The `--cgroup-parent` option allows you to set the default cgroup parent +to use for containers. If this option is not set, it defaults to `/docker` for +fs cgroup driver and `system.slice` for systemd cgroup driver. + +If the cgroup has a leading forward slash (`/`), the cgroup is created +under the root cgroup, otherwise the cgroup is created under the daemon +cgroup. + +Assuming the daemon is running in cgroup `daemoncgroup`, +`--cgroup-parent=/foobar` creates a cgroup in +`/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` +creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` + +The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd +represents hierarchy by slice and the name of the slice encodes the location in +the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A +name can consist of a dash-separated series of names, which describes the path +to the slice from the root slice. For example, `--cgroup-parent=user-a-b.slice` +means the memory cgroup for the container is created in +`/sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-.scope`. + +This setting can also be set per container, using the `--cgroup-parent` +option on `docker create` and `docker run`, and takes precedence over +the `--cgroup-parent` option on the daemon. + +#### Daemon metrics + +The `--metrics-addr` option takes a tcp address to serve the metrics API. +This feature is still experimental, therefore, the daemon must be running in experimental +mode for this feature to work. + +To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337` +allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the +[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format. + +If you are running a prometheus server you can add this address to your scrape configs +to have prometheus collect metrics on Docker. For more information +on prometheus you can view the website [here](https://prometheus.io/). + +```none +scrape_configs: + - job_name: 'docker' + static_configs: + - targets: ['127.0.0.1:1337'] +``` + +Please note that this feature is still marked as experimental as metrics and metric +names could change while this feature is still in experimental. Please provide +feedback on what you would like to see collected in the API. + +#### Daemon configuration file + +The `--config-file` option allows you to set any configuration option +for the daemon in a JSON format. This file uses the same flag names as keys, +except for flags that allow several entries, where it uses the plural +of the flag name, e.g., `labels` for the `label` flag. + +The options set in the configuration file must not conflict with options set +via flags. The docker daemon fails to start if an option is duplicated between +the file and the flags, regardless their value. We do this to avoid +silently ignore changes introduced in configuration reloads. +For example, the daemon fails to start if you set daemon labels +in the configuration file and also set daemon labels via the `--label` flag. +Options that are not present in the file are ignored when the daemon starts. + +##### On Linux + +The default location of the configuration file on Linux is +`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a + non-default location. + +This is a full example of the allowed configuration options on Linux: + +```json +{ + "authorization-plugins": [], + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "exec-root": "", + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "live-restore": true, + "log-driver": "", + "log-opts": {}, + "mtu": 0, + "pidfile": "", + "graph": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tls": true, + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "api-cors-header": "", + "selinux-enabled": false, + "userns-remap": "", + "group": "", + "cgroup-parent": "", + "default-ulimits": {}, + "init": false, + "init-path": "/usr/libexec/docker-init", + "ipv6": false, + "iptables": false, + "ip-forward": false, + "ip-masq": false, + "userland-proxy": false, + "userland-proxy-path": "/usr/libexec/docker-proxy", + "ip": "0.0.0.0", + "bridge": "", + "bip": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "default-gateway": "", + "default-gateway-v6": "", + "icc": false, + "raw-logs": false, + "registry-mirrors": [], + "seccomp-profile": "", + "insecure-registries": [], + "disable-legacy-registry": false, + "default-runtime": "runc", + "oom-score-adjust": -500, + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +> **Note:** You cannot set options in `daemon.json` that have already been set on +> daemon startup as a flag. +> On systems that use `systemd` to start the Docker daemon, `-H` is already set, so +> you cannot use the `hosts` key in `daemon.json` to add listening addresses. +> See https://docs.docker.com/engine/admin/systemd/#custom-docker-daemon-options for how +> to accomplish this task with a systemd drop-in file. + +##### On Windows + +The default location of the configuration file on Windows is + `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be + used to specify a non-default location. + +This is a full example of the allowed configuration options on Windows: + +```json +{ + "authorization-plugins": [], + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "log-driver": "", + "mtu": 0, + "pidfile": "", + "graph": "", + "cluster-store": "", + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "group": "", + "default-ulimits": {}, + "bridge": "", + "fixed-cidr": "", + "raw-logs": false, + "registry-mirrors": [], + "insecure-registries": [], + "disable-legacy-registry": false +} +``` + +#### Configuration reload behavior + +Some options can be reconfigured when the daemon is running without requiring +to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event +in Windows with the key `Global\docker-daemon-config-$PID`. The options can +be modified in the configuration file but still will check for conflicts with +the provided flags. The daemon fails to reconfigure itself +if there are conflicts, but it won't stop execution. + +The list of currently supported options that can be reconfigured is this: + +- `debug`: it changes the daemon to debug mode when set to true. +- `cluster-store`: it reloads the discovery store with the new address. +- `cluster-store-opts`: it uses the new options to reload the discovery store. +- `cluster-advertise`: it modifies the address advertised after reloading. +- `labels`: it replaces the daemon labels with a new set of labels. +- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/engine/admin/live-restore/). +- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. +- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. +- `default-runtime`: it updates the runtime to be used if not is + specified at container creation. It defaults to "default" which is + the runtime shipped with the official docker packages. +- `runtimes`: it updates the list of available OCI runtimes that can + be used to run containers +- `authorization-plugin`: specifies the authorization plugins to use. +- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config. + +Updating and reloading the cluster configurations such as `--cluster-store`, +`--cluster-advertise` and `--cluster-store-opts` will take effect only if +these configurations were not previously configured. If `--cluster-store` +has been provided in flags and `cluster-advertise` not, `cluster-advertise` +can be added in the configuration file without accompanied by `--cluster-store`. +Configuration reload will log a warning message if it detects a change in +previously configured cluster configurations. + + +### Run multiple daemons + +> **Note:** Running multiple daemons on a single host is considered as "experimental". The user should be aware of +> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development +> and will be delivered in the near future. + +This section describes how to run multiple Docker daemons on a single host. To +run multiple daemons, you must configure each daemon so that it does not +conflict with other daemons on the same host. You can set these options either +by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). + +The following daemon options must be configured for each daemon: + +```none +-b, --bridge= Attach containers to a network bridge +--exec-root=/var/run/docker Root of the Docker execdriver +-g, --graph=/var/lib/docker Root of the Docker runtime +-p, --pidfile=/var/run/docker.pid Path to use for daemon PID file +-H, --host=[] Daemon socket(s) to connect to +--iptables=true Enable addition of iptables rules +--config-file=/etc/docker/daemon.json Daemon configuration file +--tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA +--tlscert="~/.docker/cert.pem" Path to TLS certificate file +--tlskey="~/.docker/key.pem" Path to TLS key file +``` + +When your daemons use different values for these flags, you can run them on the same host without any problems. +It is very important to properly understand the meaning of those options and to use them correctly. + +- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. +If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` +- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for +your running daemon here. +- `--graph` is the path where images are stored. The default value is `/var/lib/docker`. To avoid any conflict with other daemons +set this parameter separately for each daemon. +- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your +pid file here. +- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. +- `--iptables=false` prevents the Docker daemon from adding iptables rules. If +multiple daemons manage iptables rules, they may overwrite rules set by another +daemon. Be aware that disabling this option requires you to manually add +iptables rules to expose container ports. If you prevent Docker from adding +iptables rules, Docker will also not add IP masquerading rules, even if you set +`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be +able to connect to external hosts or the internet when using network other than +default bridge. +- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of +daemon flags. Specify the path for each daemon. +- `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. +The `--tls*` options enable use of specific certificates for individual daemons. + +Example script for a separate “bootstrap” instance of the Docker daemon without network: + +```bash +$ sudo dockerd \ + -H unix:///var/run/docker-bootstrap.sock \ + -p /var/run/docker-bootstrap.pid \ + --iptables=false \ + --ip-masq=false \ + --bridge=none \ + --graph=/var/lib/docker-bootstrap \ + --exec-root=/var/run/docker-bootstrap +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/events.md b/vendor/github.com/moby/moby/docs/reference/commandline/events.md new file mode 100644 index 0000000..9bf0513 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/events.md @@ -0,0 +1,349 @@ +--- +title: "events" +description: "The events command description and usage" +keywords: "events, container, report" +--- + + + +# events + +```markdown +Usage: docker events [OPTIONS] + +Get real time events from the server + +Options: + -f, --filter value Filter output based on conditions provided (default []) + --format string Format the output using the given Go template + --help Print usage + --since string Show all events created since timestamp + --until string Stream events until this timestamp +``` + +## Description + +Use `docker events` to get real-time events from the server. These events differ +per Docker object type. + +### Object types + +#### Containers + +Docker containers report the following events: + +- `attach` +- `commit` +- `copy` +- `create` +- `destroy` +- `detach` +- `die` +- `exec_create` +- `exec_detach` +- `exec_start` +- `export` +- `health_status` +- `kill` +- `oom` +- `pause` +- `rename` +- `resize` +- `restart` +- `start` +- `stop` +- `top` +- `unpause` +- `update` + +#### Images + +Docker images report the following events: + +- `delete` +- `import` +- `load` +- `pull` +- `push` +- `save` +- `tag` +- `untag` + +#### Plugins + +Docker plugins report the following events: + +- `install` +- `enable` +- `disable` +- `remove` + +#### Volumes + +Docker volumes report the following events: + +- `create` +- `mount` +- `unmount` +- `destroy` + +#### Networks + +Docker networks report the following events: + +- `create` +- `connect` +- `disconnect` +- `destroy` + +#### Daemons + +Docker daemons report the following events: + +- `reload` + +### Limiting, filtering, and formatting the output + +#### Limit events by time + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine’s time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +#### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container (`container=`) +* daemon (`daemon=`) +* event (`event=`) +* image (`image=`) +* label (`label=` or `label==`) +* network (`network=`) +* plugin (`plugin=`) +* type (`type=`) +* volume (`volume=`) + +#### Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default +format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + +## Examples + +### Basic example + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + +```bash +$ docker events +``` + +**Shell 2: Start and Stop containers:** + +```bash +$ docker create --name test alpine:latest top +$ docker start test +$ docker stop test +``` + +**Shell 1: (Again .. now showing events):** + +```none +2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +To exit the `docker events` command, use `CTRL+C`. + +### Filter events by time + +You can filter the output by an absolute timestamp or relative time on the host +machine, using the following different time syntaxes: + +```bash +$ docker events --since 1483283804 +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '2017-01-05' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '2013-09-03T15:49:29' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '10m' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +### Filter events by criteria + +The following commands show several different ways to filter the `docker event` +output. + +```bash +$ docker events --filter 'event=stop' + +2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) + +$ docker events --filter 'image=alpine' + +2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15) +2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9) +2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) +2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) + +$ docker events --filter 'container=test' + +2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8' + +2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9) +2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) +2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --filter 'container=test' --filter 'event=stop' + +2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) + +$ docker events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) + +$ docker events --filter 'container=container_1' --filter 'container=container_2' + +2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) +2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +$ docker events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) +``` + +The `type=plugin` filter is experimental. + +```bash +$ docker events --filter 'type=plugin' + +2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +``` + +### Format the output + +```bash +$ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + +Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +``` + +#### Format as JSON + +```none + $ docker events --format '{{json .}}' + + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/exec.md b/vendor/github.com/moby/moby/docs/reference/commandline/exec.md new file mode 100644 index 0000000..1ae46cf --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/exec.md @@ -0,0 +1,91 @@ +--- +title: "exec" +description: "The exec command description and usage" +keywords: "command, container, run, execute" +--- + + + +# exec + +```markdown +Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] + +Run a command in a running container + +Options: + -d, --detach Detached mode: run command in the background + --detach-keys Override the key sequence for detaching a container + -e, --env=[] Set environment variables + --help Print usage + -i, --interactive Keep STDIN open even if not attached + --privileged Give extended privileges to the command + -t, --tty Allocate a pseudo-TTY + -u, --user Username or UID (format: [:]) +``` + +## Description + +The `docker exec` command runs a new command in a running container. + +The command started using `docker exec` only runs while the container's primary +process (`PID 1`) is running, and it is not restarted if the container is +restarted. + +## Examples + +### Run `docker exec` on a running container + +First, start a container. + +```bash +$ docker run --name ubuntu_bash --rm -i -t ubuntu bash +``` + +This will create a container named `ubuntu_bash` and start a Bash session. + +Next, execute a command on the container. + +```bash +$ docker exec -d ubuntu_bash touch /tmp/execWorks +``` + +This will create a new file `/tmp/execWorks` inside the running container +`ubuntu_bash`, in the background. + +Next, execute an interactive `bash` shell on the container. + +```bash +$ docker exec -it ubuntu_bash bash +``` + +This will create a new Bash session in the container `ubuntu_bash`. + +### Try to run `docker exec` on a paused container + +If the container is paused, then the `docker exec` command will fail with an error: + +```bash +$ docker pause test + +test + +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test + +$ docker exec test ls + +FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec + +$ echo $? +1 +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/export.md b/vendor/github.com/moby/moby/docs/reference/commandline/export.md new file mode 100644 index 0000000..9de5097 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/export.md @@ -0,0 +1,48 @@ +--- +title: "export" +description: "The export command description and usage" +keywords: "export, file, system, container" +--- + + + +# export + +```markdown +Usage: docker export [OPTIONS] CONTAINER + +Export a container's filesystem as a tar archive + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +## Description + +The `docker export` command does not export the contents of volumes associated +with the container. If a volume is mounted on top of an existing directory in +the container, `docker export` will export the contents of the *underlying* +directory, not the contents of the volume. + +Refer to [Backup, restore, or migrate data volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes) +in the user guide for examples on exporting data in a volume. + +## Examples + +Each of these commands has the same result. + +```bash +$ docker export red_panda > latest.tar +``` + +```bash +$ docker export --output="latest.tar" red_panda +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/history.md b/vendor/github.com/moby/moby/docs/reference/commandline/history.md new file mode 100644 index 0000000..b68cc86 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/history.md @@ -0,0 +1,56 @@ +--- +title: "history" +description: "The history command description and usage" +keywords: "docker, image, history" +--- + + + +# history + +```markdown +Usage: docker history [OPTIONS] IMAGE + +Show the history of an image + +Options: + --help Print usage + -H, --human Print sizes and dates in human readable format (default true) + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + + +## Examples + +To see how the `docker:latest` image was built: + +```bash +$ docker history docker + +IMAGE CREATED CREATED BY SIZE COMMENT +3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B +8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB +be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB +4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB +750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi + +# image + +```markdown +Usage: docker image COMMAND + +Manage images + +Options: + --help Print usage + +Commands: + build Build an image from a Dockerfile + history Show the history of an image + import Import the contents from a tarball to create a filesystem image + inspect Display detailed information on one or more images + load Load an image from a tar archive or STDIN + ls List images + prune Remove unused images + pull Pull an image or a repository from a registry + push Push an image or a repository to a registry + rm Remove one or more images + save Save one or more images to a tar archive (streamed to STDOUT by default) + tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Run 'docker image COMMAND --help' for more information on a command. + +``` + +## Description + +Manage images. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/image_prune.md b/vendor/github.com/moby/moby/docs/reference/commandline/image_prune.md new file mode 100644 index 0000000..6e1fd26 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/image_prune.md @@ -0,0 +1,76 @@ +--- +title: "image prune" +description: "Remove all stopped images" +keywords: "image, prune, delete, remove" +--- + + + +# image prune + +```markdown +Usage: docker image prune [OPTIONS] + +Remove unused images + +Options: + -a, --all Remove all unused images, not just dangling ones + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. + +## Examples + +Example output: + +```bash +$ docker image prune -a + +WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue? [y/N] y +Deleted Images: +untagged: alpine:latest +untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a +deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba +deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f +untagged: alpine:3.3 +untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423 +untagged: my-jq:latest +deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff +deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65 +deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7 +untagged: my-curl:latest +deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e +deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9 +deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e +deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec +deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06 +deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c +deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35 +deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809 +deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0 +deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac +deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b +deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1 + +Total reclaimed space: 16.43 MB +``` + +## Related commands + +* [system df](system_df.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/images.md b/vendor/github.com/moby/moby/docs/reference/commandline/images.md new file mode 100644 index 0000000..9f7f555 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/images.md @@ -0,0 +1,342 @@ +--- +title: "images" +description: "The images command description and usage" +keywords: "list, docker, images" +--- + + + +# images + +```markdown +Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] + +List images + +Options: + -a, --all Show all images (default hides intermediate images) + --digests Show digests + -f, --filter value Filter output based on conditions provided (default []) + - dangling=(true|false) + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + - reference=(pattern of an image reference) + --format string Pretty-print images using a Go template + --help Print usage + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + +## Description + +The default `docker images` will show all top level +images, their repository and tags, and their size. + +Docker images have intermediate layers that increase reusability, +decrease disk usage, and speed up `docker build` by +allowing each step to be cached. These intermediate layers are not shown +by default. + +The `SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `SIZE` listed only once. + +## Examples + +### List the most recently created images + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE + 77af4d6b9913 19 hours ago 1.089 GB +committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB +docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB +postgres 9 746b819f315e 4 days ago 213.4 MB +postgres 9.3 746b819f315e 4 days ago 213.4 MB +postgres 9.3.5 746b819f315e 4 days ago 213.4 MB +postgres latest 746b819f315e 4 days ago 213.4 MB +``` + +### List images by name and tag + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + +For example, to list all images in the "java" repository, run this command : + +```bash +$ docker images java + +REPOSITORY TAG IMAGE ID CREATED SIZE +java 8 308e519aac60 6 days ago 824.5 MB +java 7 493d82594c15 3 months ago 656.3 MB +java latest 2711b1d6f3aa 5 months ago 603.9 MB +``` + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + +```bash +$ docker images java:8 + +REPOSITORY TAG IMAGE ID CREATED SIZE +java 8 308e519aac60 6 days ago 824.5 MB +``` + +If nothing matches `REPOSITORY[:TAG]`, the list is empty. + +```bash +$ docker images java:0 + +REPOSITORY TAG IMAGE ID CREATED SIZE +``` + +### List the full length image IDs + +```bash +$ docker images --no-trunc + +REPOSITORY TAG IMAGE ID CREATED SIZE + sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB +committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB + sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB +docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB + sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB + sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB +tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB + sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB +``` + +### List image digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + +```bash +$ docker images --digests +REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB +``` + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. You can +also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false) +* label (`label=` or `label==`) +* before (`[:]`, `` or ``) - filter images created before given id or references +* since (`[:]`, `` or ``) - filter images created since given id or references +* reference (pattern of an image reference) - filter images whose reference matches the specified pattern + +#### Show untagged images (dangling) + +```bash +$ docker images --filter "dangling=true" + +REPOSITORY TAG IMAGE ID CREATED SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B +``` + +This will display untagged images that are the leaves of the images tree (not +intermediary layers). These images occur when a new build of an image takes the +`repo:tag` away from the image ID, leaving it as `:` or untagged. +A warning will be issued if trying to remove an image when a container is presently +using it. By having this flag it allows for batch cleanup. + +You can use this in conjunction with `docker rmi ...`: + +```bash +$ docker rmi $(docker images -f "dangling=true" -q) + +8abc22fbb042 +48e5f45168b9 +bf747efa0e2f +980fe10e5736 +dea752e4e117 +511136ea3c5a +``` + +> **Note**: Docker warns you if any containers exist that are using these +> untagged images. + + +#### Show images with a given label + +The `label` filter matches images based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches images with the `com.example.version` label regardless of its value. + +```bash +$ docker images --filter "label=com.example.version" + +REPOSITORY TAG IMAGE ID CREATED SIZE +match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB +match-me-2 latest dea752e4e117 About a minute ago 188.3 MB +``` + +The following filter matches images with the `com.example.version` label with the `1.0` value. + +```bash +$ docker images --filter "label=com.example.version=1.0" + +REPOSITORY TAG IMAGE ID CREATED SIZE +match-me latest 511136ea3c5a About a minute ago 188.3 MB +``` + +In this example, with the `0.1` value, it returns an empty set because no matches were found. + +```bash +$ docker images --filter "label=com.example.version=0.1" +REPOSITORY TAG IMAGE ID CREATED SIZE +``` + +#### Filter images by time + +The `before` filter shows only images created before the image with +given id or reference. For example, having these images: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +image1 latest eeae25ada2aa 4 minutes ago 188.3 MB +image2 latest dea752e4e117 9 minutes ago 188.3 MB +image3 latest 511136ea3c5a 25 minutes ago 188.3 MB +``` + +Filtering with `before` would give: + +```bash +$ docker images --filter "before=image1" + +REPOSITORY TAG IMAGE ID CREATED SIZE +image2 latest dea752e4e117 9 minutes ago 188.3 MB +image3 latest 511136ea3c5a 25 minutes ago 188.3 MB +``` + +Filtering with `since` would give: + +```bash +$ docker images --filter "since=image3" +REPOSITORY TAG IMAGE ID CREATED SIZE +image1 latest eeae25ada2aa 4 minutes ago 188.3 MB +image2 latest dea752e4e117 9 minutes ago 188.3 MB +``` + +#### Filter images by reference + +The `reference` filter shows only images whose reference matches +the specified pattern. + +```bash + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest e02e811dd08f 5 weeks ago 1.09 MB + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox musl 733eb3059dce 5 weeks ago 1.21 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB +``` + +Filtering with `reference` would give: + +```bash + $ docker images --filter=reference='busy*:*libc' + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB +``` + +### Format the output + +The formatting option (`--format`) will pretty print container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description| +| ---- | ---- | +| `.ID` | Image ID | +| `.Repository` | Image repository | +| `.Tag` | Image tag | +| `.Digest` | Image digest | +| `.CreatedSince` | Elapsed time since the image was created | +| `.CreatedAt` | Time when the image was created | +| `.Size` | Image disk size | + +When using the `--format` option, the `image` command will either +output the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Repository` entries separated by a colon for all images: + +```bash +$ docker images --format "{{.ID}}: {{.Repository}}" + +77af4d6b9913: +b6fa739cedf5: committ +78a85c484f71: +30557a29d5ab: docker +5ed6274db6ce: +746b819f315e: postgres +746b819f315e: postgres +746b819f315e: postgres +746b819f315e: postgres +``` + +To list all images with their repository and tag in a table format you +can use: + +```bash +$ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + +IMAGE ID REPOSITORY TAG +77af4d6b9913 +b6fa739cedf5 committ latest +78a85c484f71 +30557a29d5ab docker latest +5ed6274db6ce +746b819f315e postgres 9 +746b819f315e postgres 9.3 +746b819f315e postgres 9.3.5 +746b819f315e postgres latest +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/import.md b/vendor/github.com/moby/moby/docs/reference/commandline/import.md new file mode 100644 index 0000000..57edf65 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/import.md @@ -0,0 +1,89 @@ +--- +title: "import" +description: "The import command description and usage" +keywords: "import, file, system, container" +--- + + + +# import + +```markdown +Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] + +Import the contents from a tarball to create a filesystem image + +Options: + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Set commit message for imported image +``` + +## Description + +You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The +`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) +containing a filesystem or to an individual file on the Docker host. If you +specify an archive, Docker untars it in the container relative to the `/` +(root). If you specify an individual file, you must specify the full path within +the host. To import from a remote location, specify a `URI` that begins with the +`http://` or `https://` protocol. + +The `--change` option will apply `Dockerfile` instructions to the image +that is created. +Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +### Import from a remote location + +This will create a new untagged image. + +```bash +$ docker import http://example.com/exampleimage.tgz +``` + +### Import from a local file + +- Import to docker via pipe and `STDIN`. + + ```bash + $ cat exampleimage.tgz | docker import - exampleimagelocal:new + ``` + +- Import with a commit message. + + ```bash + $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + ``` + +- Import to docker from a local archive. + + ```bash + $ docker import /path/to/exampleimage.tgz + ``` + +### Import from a local directory + +```bash +$ sudo tar -c . | docker import - exampleimagedir +``` + +### Import from a local directory with new configurations + +```bash +$ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir +``` + +Note the `sudo` in this example – you must preserve +the ownership of the files (especially root ownership) during the +archiving with tar. If you are not root (or the sudo command) when you +tar, then the ownerships might not get preserved. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/index.md b/vendor/github.com/moby/moby/docs/reference/commandline/index.md new file mode 100644 index 0000000..f38fc52 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/index.md @@ -0,0 +1,184 @@ +--- +title: "Docker commands" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +identifier: "smn_cli_guide" +--- + + + +# The Docker commands + +This section contains reference information on using Docker's command line +client. Each command has a reference page along with samples. If you are +unfamiliar with the command line, you should start by reading about how to [Use +the Docker command line](cli.md). + +You start the Docker daemon with the command line. How you start the daemon +affects your Docker containers. For that reason you should also make sure to +read the [`dockerd`](dockerd.md) reference page. + +## Commands by object + +### Docker management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [dockerd](dockerd.md) | Launch the Docker daemon | +| [info](info.md) | Display system-wide information | +| [inspect](inspect.md)| Return low-level information on a container or image | +| [version](version.md) | Show the Docker version information | + + +### Image commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [build](build.md) | Build an image from a Dockerfile | +| [commit](commit.md) | Create a new image from a container's changes | +| [history](history.md) | Show the history of an image | +| [images](images.md) | List images | +| [import](import.md) | Import the contents from a tarball to create a filesystem image | +| [load](load.md) | Load an image from a tar archive or STDIN | +| [image prune](image_prune.md) | Remove unused images | +| [rmi](rmi.md) | Remove one or more images | +| [save](save.md) | Save images to a tar archive | +| [tag](tag.md) | Tag an image into a repository | + +### Container commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [attach](attach.md) | Attach to a running container | +| [container prune](container_prune.md) | Remove all stopped containers | +| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT | +| [create](create.md) | Create a new container | +| [diff](diff.md) | Inspect changes on a container's filesystem | +| [events](events.md) | Get real time events from the server | +| [exec](exec.md) | Run a command in a running container | +| [export](export.md) | Export a container's filesystem as a tar archive | +| [kill](kill.md) | Kill a running container | +| [logs](logs.md) | Fetch the logs of a container | +| [pause](pause.md) | Pause all processes within a container | +| [port](port.md) | List port mappings or a specific mapping for the container | +| [ps](ps.md) | List containers | +| [rename](rename.md) | Rename a container | +| [restart](restart.md) | Restart a running container | +| [rm](rm.md) | Remove one or more containers | +| [run](run.md) | Run a command in a new container | +| [start](start.md) | Start one or more stopped containers | +| [stats](stats.md) | Display a live stream of container(s) resource usage statistics | +| [stop](stop.md) | Stop a running container | +| [top](top.md) | Display the running processes of a container | +| [unpause](unpause.md) | Unpause all processes within a container | +| [update](update.md) | Update configuration of one or more containers | +| [wait](wait.md) | Block until a container stops, then print its exit code | + +### Hub and registry commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [login](login.md) | Register or log in to a Docker registry | +| [logout](logout.md) | Log out from a Docker registry | +| [pull](pull.md) | Pull an image or a repository from a Docker registry | +| [push](push.md) | Push an image or a repository to a Docker registry | +| [search](search.md) | Search the Docker Hub for images | + +### Network and connectivity commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [network connect](network_connect.md) | Connect a container to a network | +| [network create](network_create.md) | Create a new network | +| [network disconnect](network_disconnect.md) | Disconnect a container from a network | +| [network inspect](network_inspect.md) | Display information about a network | +| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about | +| [network prune](network_prune.md) | Remove all unused networks | +| [network rm](network_rm.md) | Removes one or more networks | + +### Shared data volume commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data | +| [volume inspect](volume_inspect.md) | Display information about a volume | +| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about | +| [volume prune](volume_prune.md) | Remove all unused volumes | +| [volume rm](volume_rm.md) | Remove one or more volumes | + +### Swarm node commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | +| [node inspect](node_inspect.md) | Inspect a node in the swarm | +| [node ls](node_ls.md) | List nodes in the swarm | +| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | +| [node ps](node_ps.md) | List tasks running on one or more nodes | +| [node rm](node_rm.md) | Remove one or more nodes from the swarm | +| [node update](node_update.md) | Update attributes for a node | + +### Swarm management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [swarm init](swarm_init.md) | Initialize a swarm | +| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | +| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | +| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | +| [swarm unlock](swarm_unlock.md) | Unlock swarm | +| [swarm unlock-key](swarm_unlock_key.md) | Manage the unlock key | +| [swarm update](swarm_update.md) | Update attributes of a swarm | + +### Swarm service commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [service create](service_create.md) | Create a new service | +| [service inspect](service_inspect.md) | Inspect a service | +| [service logs](service_logs.md) | Fetch the logs of a service | +| [service ls](service_ls.md) | List services in the swarm | +| [service ps](service_ps.md) | List the tasks of a service | +| [service rm](service_rm.md) | Remove a service from the swarm | +| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service | +| [service update](service_update.md) | Update the attributes of a service | + +### Swarm secret commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [secret create](secret_create.md) | Create a secret from a file or STDIN as content | +| [secret inspect](service_inspect.md) | Inspect the specified secret | +| [secret ls](secret_ls.md) | List secrets in the swarm | +| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm | + +### Swarm stack commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack | +| [stack ls](stack_ls.md) | List stacks in the swarm | +| [stack ps](stack_ps.md) | List the tasks in the stack | +| [stack rm](stack_rm.md) | Remove the stack from the swarm | +| [stack services](stack_services.md) | List the services in the stack | + +### Plugin commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration | +| [plugin disable](plugin_disable.md) | Disable a plugin | +| [plugin enbale](plugin_enable.md) | Enable a plugin | +| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin | +| [plugin install](plugin_install.md) | Install a plugin | +| [plugin ls](plugin_ls.md) | List plugins | +| [plugin push](plugin_push.md) | Push a plugin to a registry | +| [plugin rm](plugin_rm.md) | Remove a plugin | +| [plugin set](plugin_set.md) | Change settings for a plugin | diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/info.md b/vendor/github.com/moby/moby/docs/reference/commandline/info.md new file mode 100644 index 0000000..798c3f0 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/info.md @@ -0,0 +1,244 @@ +--- +title: "info" +description: "The info command description and usage" +keywords: "display, docker, information" +--- + + + +# info + +```markdown +Usage: docker info [OPTIONS] + +Display system-wide information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +## Examples + +### Show output + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the `devicemapper` storage driver. As can be seen in the output, additional +information about the `devicemapper` storage driver is shown: + +```bash +$ docker info + +Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 +Images: 52 +Server Version: 1.10.3 +Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) +Execution Driver: native-0.2 +Logging Driver: json-file +Plugins: + Volume: local + Network: null host bridge +Kernel Version: 3.10.0-327.el7.x86_64 +Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) +OSType: linux +Architecture: x86_64 +CPUs: 1 +Total Memory: 991.7 MiB +Name: ip-172-30-0-91.ec2.internal +ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S +Docker Root Dir: /var/lib/docker +Debug mode (client): false +Debug mode (server): false +Username: gordontheturtle +Registry: https://index.docker.io/v1/ +Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 +``` + +### Show debugging output + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver and a node that is part of a 2-node swarm: + +```bash +$ docker -D info + +Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 +Images: 52 +Server Version: 1.13.0 +Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false +Logging Driver: json-file +Cgroup Driver: cgroupfs +Plugins: + Volume: local + Network: bridge host macvlan null overlay +Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 +Runtimes: runc +Default Runtime: runc +Init Binary: docker-init +containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 +runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 +init version: N/A (expected: v0.13.0) +Security Options: + apparmor + seccomp + Profile: default +Kernel Version: 4.4.0-31-generic +Operating System: Ubuntu 16.04.1 LTS +OSType: linux +Architecture: x86_64 +CPUs: 2 +Total Memory: 1.937 GiB +Name: ubuntu +ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 +Docker Root Dir: /var/lib/docker +Debug Mode (client): true +Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 +Http Proxy: http://test:test@proxy.example.com:8080 +Https Proxy: https://test:test@proxy.example.com:8080 +No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com +Registry: https://index.docker.io/v1/ +WARNING: No swap limit support +Labels: + storage=ssd + staging=true +Experimental: false +Insecure Registries: + 127.0.0.0/8 +Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ +Live Restore Enabled: false +``` + +The global `-D` option causes all `docker` commands to output debug information. + +### Format the output + +You can also specify the output format: + +```bash +$ docker info --format '{{json .}}' + +{"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} +``` + +### Run `docker info` on Windows + +Here is a sample output for a daemon running on Windows Server 2016: + +```none +E:\docker>docker info + +Containers: 1 + Running: 0 + Paused: 0 + Stopped: 1 +Images: 17 +Server Version: 1.13.0 +Storage Driver: windowsfilter + Windows: +Logging Driver: json-file +Plugins: + Volume: local + Network: nat null overlay +Swarm: inactive +Default Isolation: process +Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937) +Operating System: Windows Server 2016 Datacenter +OSType: windows +Architecture: x86_64 +CPUs: 8 +Total Memory: 3.999 GiB +Name: WIN-V0V70C0LU5P +ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62 +Docker Root Dir: C:\control +Debug Mode (client): false +Debug Mode (server): false +Registry: https://index.docker.io/v1/ +Insecure Registries: + 127.0.0.0/8 +Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ +Live Restore Enabled: false +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/inspect.md b/vendor/github.com/moby/moby/docs/reference/commandline/inspect.md new file mode 100644 index 0000000..9ac2e35 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/inspect.md @@ -0,0 +1,101 @@ +--- +title: "inspect" +description: "The inspect command description and usage" +keywords: "inspect, container, json" +--- + + + +# inspect + +```markdown +Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] + +Return low-level information on Docker object(s) (e.g. container, image, volume, +network, node, service, or task) identified by name or ID + +Options: + -f, --format Format the output using the given Go template + --help Print usage + -s, --size Display total file sizes if the type is container + --type Return JSON for specified type +``` + +## Description + +By default, `docker inspect` will render all results in a JSON array. If the container and +image have the same name, this will return container JSON for unspecified type. +If a format is specified, the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Get an instance's IP address + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + +```bash +$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID +``` + +### Get an instance's MAC address + +```bash +$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID +``` + +### Get an instance's log path + +```bash +$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID +``` + +### Get an instance's image name + +```bash +$ docker inspect --format='{{.Config.Image}}' $INSTANCE_ID +``` + +### List all port bindings + +You can loop over arrays and maps in the results to produce simple text +output: + +```bash +$ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID +``` + +### Find a specific port mapping + +The `.Field` syntax doesn't work when the field name begins with a +number, but the template language's `index` function does. The +`.NetworkSettings.Ports` section contains a map of the internal port +mappings to a list of external address/port objects. To grab just the +numeric public port, you use `index` to find the specific port map, and +then `index` 0 contains the first object inside of that. Then we ask for +the `HostPort` field to get the public address. + +```bash +$ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID +``` + +### Get a subsection in JSON format + +If you request a field which is itself a structure containing other +fields, by default you get a Go-style dump of the inner values. +Docker adds a template function, `json`, which can be applied to get +results in JSON format. + +```bash +$ docker inspect --format='{{json .Config}}' $INSTANCE_ID +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/kill.md b/vendor/github.com/moby/moby/docs/reference/commandline/kill.md new file mode 100644 index 0000000..97b15ad --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/kill.md @@ -0,0 +1,35 @@ +--- +title: "kill" +description: "The kill command description and usage" +keywords: "container, kill, signal" +--- + + + +# kill + +```markdown +Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] + +Kill one or more running containers + +Options: + --help Print usage + -s, --signal string Signal to send to the container (default "KILL") +``` + +## Description + +The main process inside the container will be sent `SIGKILL`, or any +signal specified with option `--signal`. + +> **Note**: `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of +> `/bin/sh -c`, which does not pass signals. This means that the executable is +> not the container’s PID 1 and does not receive Unix signals. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/load.md b/vendor/github.com/moby/moby/docs/reference/commandline/load.md new file mode 100644 index 0000000..3ce6c19 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/load.md @@ -0,0 +1,62 @@ +--- +title: "load" +description: "The load command description and usage" +keywords: "stdin, tarred, repository" +--- + + + +# load + +```markdown +Usage: docker load [OPTIONS] + +Load an image from a tar archive or STDIN + +Options: + --help Print usage + -i, --input string Read from tar archive file, instead of STDIN. + The tarball may be compressed with gzip, bzip, or xz + -q, --quiet Suppress the load output but still outputs the imported images +``` +## Description + +`docker load` loads a tarred repository from a file or the standard input stream. +It restores both images and tags. + +## Examples + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE + +$ docker load < busybox.tar.gz + +Loaded image: busybox:latest +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest 769b9341d937 7 weeks ago 2.489 MB + +$ docker load --input fedora.tar + +Loaded image: fedora:rawhide + +Loaded image: fedora:20 + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest 769b9341d937 7 weeks ago 2.489 MB +fedora rawhide 0d20aec6529d 7 weeks ago 387 MB +fedora 20 58394af37342 7 weeks ago 385.5 MB +fedora heisenbug 58394af37342 7 weeks ago 385.5 MB +fedora latest 58394af37342 7 weeks ago 385.5 MB +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/login.md b/vendor/github.com/moby/moby/docs/reference/commandline/login.md new file mode 100644 index 0000000..e5d1656 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/login.md @@ -0,0 +1,156 @@ +--- +title: "login" +description: "The login command description and usage" +keywords: "registry, login, image" +--- + + + +# login + +```markdown +Usage: docker login [OPTIONS] [SERVER] + +Log in to a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage + -p, --password string Password + -u, --username string Username +``` + +## Description + +Login to a registry. + +### Login to a self-hosted registry + +If you want to login to a self-hosted registry you can specify this by +adding the server name. + +```bash +$ docker login localhost:8080 +``` + +### Privileged user requirement + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +### Credentials store + +The Docker Engine can keep user credentials in an external credentials store, +such as the native keychain of the operating system. Using an external store +is more secure than storing credentials in the Docker configuration file. + +To use a credentials store, you need an external helper program to interact +with a specific keychain or external store. Docker requires the helper +program to be in the client's host `$PATH`. + +This is the list of currently available credentials helpers and where +you can download them from: + +- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases +- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases +- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases + +You need to specify the credentials store in `$HOME/.docker/config.json` +to tell the docker engine to use it: + +```json +{ + "credsStore": "osxkeychain" +} +``` + +If you are currently logged in, run `docker logout` to remove +the credentials from the file and run `docker login` again. + +### Credential helper protocol + +Credential helpers can be any program or script that follows a very simple protocol. +This protocol is heavily inspired by Git, but it differs in the information shared. + +The helpers always use the first argument in the command to identify the action. +There are only three possible values for that argument: `store`, `get`, and `erase`. + +The `store` command takes a JSON payload from the standard input. That payload carries +the server address, to identify the credential, the user name, and either a password +or an identity token. + +```json +{ + "ServerURL": "https://index.docker.io/v1", + "Username": "david", + "Secret": "passw0rd1" +} +``` + +If the secret being stored is an identity token, the Username should be set to +``. + +The `store` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +The `get` command takes a string payload from the standard input. That payload carries +the server address that the docker engine needs credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name +and password from this payload: + +```json +{ + "Username": "david", + "Secret": "passw0rd1" +} +``` + +The `erase` command takes a string payload from `STDIN`. That payload carries +the server address that the docker engine wants to remove credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `erase` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +### Credential helpers + +Credential helpers are similar to the credential store above, but act as the +designated programs to handle credentials for *specific registries*. The default +credential store (`credsStore` or the config file itself) will not be used for +operations concerning credentials of the specified registries. + +### Logging out + +If you are currently logged in, run `docker logout` to remove +the credentials from the default store. + +Credential helpers are specified in a similar way to `credsStore`, but +allow for multiple helpers to be configured at a time. Keys specify the +registry domain, and values specify the suffix of the program to use +(i.e. everything after `docker-credential-`). +For example: + +```json +{ + "credHelpers": { + "registry.example.com": "registryhelper", + "awesomereg.example.org": "hip-star", + "unicorn.example.io": "vcbait" + } +} +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/logout.md b/vendor/github.com/moby/moby/docs/reference/commandline/logout.md new file mode 100644 index 0000000..1e150eb --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/logout.md @@ -0,0 +1,32 @@ +--- +title: "logout" +description: "The logout command description and usage" +keywords: "logout, docker, registry" +--- + + + +# logout + +```markdown +Usage: docker logout [SERVER] + +Log out from a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage +``` + +## Examples + +```bash +$ docker logout localhost:8080 +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/logs.md b/vendor/github.com/moby/moby/docs/reference/commandline/logs.md new file mode 100644 index 0000000..75f25f7 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/logs.md @@ -0,0 +1,68 @@ +--- +title: "logs" +description: "The logs command description and usage" +keywords: "logs, retrieve, docker" +--- + + + +# logs + +```markdown +Usage: docker logs [OPTIONS] CONTAINER + +Fetch the logs of a container + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes) + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +## Description + +The `docker logs` command batch-retrieves logs present at the time of execution. + +> **Note**: this command is only functional for containers that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker logs --follow` command will continue streaming the new output from +the container's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +The `--since` option shows only the container logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network.md b/vendor/github.com/moby/moby/docs/reference/commandline/network.md new file mode 100644 index 0000000..b4ef36b --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network.md @@ -0,0 +1,49 @@ +--- +title: "network" +description: "The network command description and usage" +keywords: "network" +--- + + + +# network + +```markdown +Usage: docker network COMMAND + +Manage networks + +Options: + --help Print usage + +Commands: + connect Connect a container to a network + create Create a network + disconnect Disconnect a container from a network + inspect Display detailed information on one or more networks + ls List networks + prune Remove all unused networks + rm Remove one or more networks + +Run 'docker network COMMAND --help' for more information on a command. +``` + +## Description + +Manage networks. You can use subcommand to create, list, inspect, remove, +connect and disconnect networks. + +## Related commands + +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network list](network_list.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network_connect.md b/vendor/github.com/moby/moby/docs/reference/commandline/network_connect.md new file mode 100644 index 0000000..7c5a586 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network_connect.md @@ -0,0 +1,117 @@ +--- +title: "network connect" +description: "The network connect command description and usage" +keywords: "network, connect, user-defined" +--- + + + +# network connect + +```markdown +Usage: docker network connect [OPTIONS] NETWORK CONTAINER + +Connect a container to a network + +Options: + --alias value Add network-scoped alias for the container (default []) + --help Print usage + --ip string IP Address + --ip6 string IPv6 Address + --link value Add link to another container (default []) + --link-local-ip value Add a link-local address for the container (default []) +``` + +## Description + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +## Examples + +### Connect a running container to a network + +```bash +$ docker network connect multi-host-network container1 +``` + +### Connect a container to a network when it starts + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network busybox +``` + +### Specify the IP address a container will use on a given network + +You can specify the IP address you want to be assigned to the container's interface. + +```bash +$ docker network connect --ip 10.10.36.122 multi-host-network container2 +``` + +### Use the legacy `--link` option + +You can use `--link` option to link another container with a preferred alias + +```bash +$ docker network connect --link container1:c1 multi-host-network container2 +``` + +### Create a network alias for a container + +`--alias` option can be used to resolve the container by another name in the network +being connected to. + +```bash +$ docker network connect --alias db --alias mysql multi-host-network container2 +``` + +### Network implications of stopping, pausing, or restarting containers + +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + +## Related commands + +* [network inspect](network_inspect.md) +* [network create](network_create.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network_create.md b/vendor/github.com/moby/moby/docs/reference/commandline/network_create.md new file mode 100644 index 0000000..4540d53 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network_create.md @@ -0,0 +1,206 @@ +--- +title: "network create" +description: "The network create command description and usage" +keywords: "network, create" +--- + + + +# network create + +```markdown +Usage: docker network create [OPTIONS] NETWORK + +Create a network + +Options: + --attachable Enable manual container attachment + --aux-address value Auxiliary IPv4 or IPv6 addresses used by Network + driver (default map[]) + -d, --driver string Driver to manage the Network (default "bridge") + --gateway value IPv4 or IPv6 Gateway for the master subnet (default []) + --help Print usage + --internal Restrict external access to the network + --ip-range value Allocate container ip from a sub-range (default []) + --ipam-driver string IP Address Management Driver (default "default") + --ipam-opt value Set IPAM driver specific options (default map[]) + --ipv6 Enable IPv6 networking + --label value Set metadata on a network (default []) + -o, --opt value Set driver specific options (default map[]) + --subnet value Subnet in CIDR format that represents a + network segment (default []) +``` + +## Description + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When you launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network, but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay). + +While not required, it is a good idea to install Docker Swarm to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management tools that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Examples + +### Connect containers + +When you start a container, use the `--network` flag to connect it to a network. +This example adds the `busybox` container to the `mynet` network: + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running, use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Specify advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing +network. It is purely for ip-addressing purposes. You can override this default +and specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +### Bridge driver options + +When creating a custom network, the default network driver (i.e. `bridge`) has +additional options that can be passed. The following are those options and the +equivalent docker daemon flags used for docker0 bridge: + +| Option | Equivalent | Description | +|--------------------------------------------------|-------------|-------------------------------------------------------| +| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | +| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | +| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | +| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | +| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | + +The following arguments can be passed to `docker network create` for any +network driver, again with their approximate equivalents to `docker daemon`. + +| Argument | Equivalent | Description | +|--------------|----------------|--------------------------------------------| +| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | +| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | +| `--internal` | - | Restrict external access to the network | +| `--ipv6` | `--ipv6` | Enable IPv6 networking | +| `--subnet` | `--bip` | Subnet for network | + +For example, let's use `-o` or `--opt` options to specify an IP address binding +when publishing ports: + +```bash +$ docker network create \ + -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \ + simple-network +``` + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +## Related commands + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network_disconnect.md b/vendor/github.com/moby/moby/docs/reference/commandline/network_disconnect.md new file mode 100644 index 0000000..e855894 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network_disconnect.md @@ -0,0 +1,48 @@ +--- +title: "network disconnect" +description: "The network disconnect command description and usage" +keywords: "network, disconnect, user-defined" +--- + + + +# network disconnect + +```markdown +Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER + +Disconnect a container from a network + +Options: + -f, --force Force the container to disconnect from a network + --help Print usage +``` + +## Description + +Disconnects a container from a network. The container must be running to +disconnect it from the network. + +## Examples + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +## Related commands + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network_inspect.md b/vendor/github.com/moby/moby/docs/reference/commandline/network_inspect.md new file mode 100644 index 0000000..2b4c423 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network_inspect.md @@ -0,0 +1,212 @@ +--- +title: "network inspect" +description: "The network inspect command description and usage" +keywords: "network, inspect, user-defined" +--- + + + +# network inspect + +```markdown +Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...] + +Display detailed information on one or more networks + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about one or more networks. By default, this command renders +all results in a JSON object. + +## Examples + +## Inspect the `bridge` network + +Connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. For networks backed by multi-host network driver, such as Overlay, +this command also shows the container endpoints in other hosts in the +cluster. These endpoints are represented as "ep-{endpoint-id}" in the output. +However, for swarm-scoped networks, only the endpoints that are local to the +node are shown. + +You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +```none +$ sudo docker network inspect bridge + +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": {} + } +] +``` + +### Inspect a user-defined network + +Create and inspect a user-defined network: + +```bash +$ docker network create simple-network + +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +``` + +```none +$ docker network inspect simple-network + +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {}, + "Labels": {} + } +] +``` + +### Inspect the `ingress` network + +For swarm mode overlay networks `network inspect` also shows the IP address and node name +of the peers. Peers are the nodes in the swarm cluster which have at least one task attached +to the network. Node name is of the format `-`. + +```none +$ docker network inspect ingress + +[ + { + "Name": "ingress", + "Id": "j0izitrut30h975vk4m1u5kk3", + "Created": "2016-11-08T06:49:59.803387552Z", + "Scope": "swarm", + "Driver": "overlay", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + }, + "Internal": false, + "Attachable": false, + "Containers": { + "ingress-sbox": { + "Name": "ingress-endpoint", + "EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115", + "MacAddress": "02:42:0a:ff:00:03", + "IPv4Address": "10.255.0.3/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + }, + "Labels": {}, + "Peers": [ + { + "Name": "net-1-1d22adfe4d5c", + "IP": "192.168.33.11" + }, + { + "Name": "net-2-d55d838b34af", + "IP": "192.168.33.12" + }, + { + "Name": "net-3-8473f8140bd9", + "IP": "192.168.33.13" + } + ] + } +] +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network_ls.md b/vendor/github.com/moby/moby/docs/reference/commandline/network_ls.md new file mode 100644 index 0000000..68a1a2c --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network_ls.md @@ -0,0 +1,224 @@ +--- +title: "network ls" +description: "The network ls command description and usage" +keywords: "network, list, user-defined" +--- + + + +# docker network ls + +```markdown +Usage: docker network ls [OPTIONS] + +List networks + +Aliases: + ls, list + +Options: + -f, --filter filter Provide filter values (e.g. 'driver=bridge') + --format string Pretty-print networks using a Go template + --help Print usage + --no-trunc Do not truncate the output + -q, --quiet Only display network IDs +``` + +## Description + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster. + +## Examples + +### List all networks + +```bash +$ sudo docker network ls +NETWORK ID NAME DRIVER SCOPE +7fca4eb8c647 bridge bridge local +9f904ee27bf5 none null local +cf03ee007fb4 host host local +78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER SCOPE +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* type (`custom|builtin`) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER SCOPE +63d1ff1f77b0 dev bridge local +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER SCOPE +f6e212da9dfd test2 bridge local +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER SCOPE +06e7eef0a170 foobar bridge local +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +06e7eef0a170 foobar bridge local +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +63d1ff1f77b0 dev bridge local +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +### Formatting + +The formatting options (`--format`) pretty-prints networks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------|------------------------------------------------------------------------------------------ +`.ID` | Network ID +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.IPv6` | Whether IPv6 is enabled on the network or not. +`.Internal` | Whether the network is internal or not. +`.Labels` | All labels assigned to the network. +`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `network ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Driver` entries separated by a colon for all networks: + +```bash +$ docker network ls --format "{{.ID}}: {{.Driver}}" +afaaab448eb2: bridge +d1584f8dc718: host +391df270dc66: null +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network_prune.md b/vendor/github.com/moby/moby/docs/reference/commandline/network_prune.md new file mode 100644 index 0000000..e57741d --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network_prune.md @@ -0,0 +1,49 @@ +--- +title: "network prune" +description: "Remove unused networks" +keywords: "network, prune, delete" +--- + +# network prune + +```markdown +Usage: docker network prune [OPTIONS] + +Remove all unused networks + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all unused networks. Unused networks are those which are not referenced +by any containers. + +## Examples + +```bash +$ docker network prune + +WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Networks: +n1 +n2 +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [volume prune](volume_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/network_rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/network_rm.md new file mode 100644 index 0000000..aab487a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/network_rm.md @@ -0,0 +1,68 @@ +--- +title: "network rm" +description: "the network rm command description and usage" +keywords: "network, rm, user-defined" +--- + + + +# network rm + +```markdown +Usage: docker network rm NETWORK [NETWORK...] + +Remove one or more networks + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. + +## Examples + +### Remove a network + +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +### Remove multiple networks + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node.md b/vendor/github.com/moby/moby/docs/reference/commandline/node.md new file mode 100644 index 0000000..3a7d4b3 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node.md @@ -0,0 +1,42 @@ + +--- +title: "node" +description: "The node command description and usage" +keywords: "node" +--- + + + +# node + +```markdown +Usage: docker node COMMAND + +Manage Swarm nodes + +Options: + --help Print usage + +Commands: + demote Demote one or more nodes from manager in the swarm + inspect Display detailed information on one or more nodes + ls List nodes in the swarm + promote Promote one or more nodes to manager in the swarm + ps List tasks running on one or more nodes, defaults to current node + rm Remove one or more nodes from the swarm + update Update a node + +Run 'docker node COMMAND --help' for more information on a command. +``` + +## Description + +Manage nodes. + diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node_demote.md b/vendor/github.com/moby/moby/docs/reference/commandline/node_demote.md new file mode 100644 index 0000000..e6e59d8 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node_demote.md @@ -0,0 +1,47 @@ +--- +title: "node demote" +description: "The node demote command description and usage" +keywords: "node, demote" +--- + + + +# node demote + +```markdown +Usage: docker node demote NODE [NODE...] + +Demote one or more nodes from manager in the swarm + +Options: + --help Print usage + +``` + +## Description + +Demotes an existing manager so that it is no longer a manager. This command +targets a docker engine that is a manager in the swarm. + + +## Examples + +```bash +$ docker node demote +``` + +## Related commands + +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node_inspect.md b/vendor/github.com/moby/moby/docs/reference/commandline/node_inspect.md new file mode 100644 index 0000000..b958e3d --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node_inspect.md @@ -0,0 +1,147 @@ +--- +title: "node inspect" +description: "The node inspect command description and usage" +keywords: "node, inspect" +--- + + + +# node inspect + +```markdown +Usage: docker node inspect [OPTIONS] self|NODE [NODE...] + +Display detailed information on one or more nodes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format. +``` + +## Description + +Returns information about a node. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +## Examples + +### Inspect a node + +```none +$ docker node inspect swarm-manager + +[ +{ + "ID": "e216jshn25ckzbvmwlnh5jr3g", + "Version": { + "Index": 10 + }, + "CreatedAt": "2016-06-16T22:52:44.9910662Z", + "UpdatedAt": "2016-06-16T22:52:45.230878043Z", + "Spec": { + "Role": "manager", + "Availability": "active" + }, + "Description": { + "Hostname": "swarm-manager", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 1000000000, + "MemoryBytes": 1039843328 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "overlay" + }, + { + "Type": "Network", + "Name": "null" + }, + { + "Type": "Network", + "Name": "host" + }, + { + "Type": "Network", + "Name": "bridge" + }, + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready", + "Addr": "168.0.32.137" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "168.0.32.137:2377" + } +} +] +``` + +### Specify an output format + +```none +$ docker node inspect --format '{{ .ManagerStatus.Leader }}' self + +false + +$ docker node inspect --pretty self +ID: e216jshn25ckzbvmwlnh5jr3g +Hostname: swarm-manager +Joined at: 2016-06-16 22:52:44.9910662 +0000 utc +Status: + State: Ready + Availability: Active + Address: 172.17.0.2 +Manager Status: + Address: 172.17.0.2:2377 + Raft Status: Reachable + Leader: Yes +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 4 + Memory: 7.704 GiB +Plugins: + Network: overlay, bridge, null, host, overlay + Volume: local +Engine Version: 1.12.0 +``` + +## Related commands + +* [node demote](node_demote.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node_ls.md b/vendor/github.com/moby/moby/docs/reference/commandline/node_ls.md new file mode 100644 index 0000000..59db7e6 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node_ls.md @@ -0,0 +1,134 @@ +--- +title: "node ls" +description: "The node ls command description and usage" +keywords: "node, list" +--- + + + +# node ls + +```markdown +Usage: docker node ls [OPTIONS] + +List nodes in the swarm + +Aliases: + ls, list + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Lists all the nodes that the Docker Swarm manager knows about. You can filter +using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section +for more information about available filter options. + +## Examples + +```bash +$ docker node ls + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](node_ls.md#id) +* [label](node_ls.md#label) +* [membership](node_ls.md#membership) +* [name](node_ls.md#name) +* [role](node_ls.md#role) + +#### id + +The `id` filter matches all or part of a node's id. + +```bash +$ docker node ls -f id=1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### label + +The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering. + +The following filter matches nodes with the `foo` label regardless of its value. + +```bash +$ docker node ls -f "label=foo" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### membership + +The `membership` filter matches nodes based on the presence of a `membership` and a value +`accepted` or `pending`. + +The following filter matches nodes with the `membership` of `accepted`. + +```bash +$ docker node ls -f "membership=accepted" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +``` + +#### name + +The `name` filter matches on all or part of a node hostname. + +The following filter matches the nodes with a name equal to `swarm-master` string. + +```bash +$ docker node ls -f name=swarm-manager1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +#### role + +The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`. + +The following filter matches nodes with the `manager` role. + +```bash +$ docker node ls -f "role=manager" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node_promote.md b/vendor/github.com/moby/moby/docs/reference/commandline/node_promote.md new file mode 100644 index 0000000..1ebbe95 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node_promote.md @@ -0,0 +1,45 @@ +--- +title: "node promote" +description: "The node promote command description and usage" +keywords: "node, promote" +--- + + + +# node promote + +```markdown +Usage: docker node promote NODE [NODE...] + +Promote one or more nodes to manager in the swarm + +Options: + --help Print usage +``` + +## Description + +Promotes a node to manager. This command targets a docker engine that is a +manager in the swarm. + +## Examples + +```bash +$ docker node promote +``` + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node_ps.md b/vendor/github.com/moby/moby/docs/reference/commandline/node_ps.md new file mode 100644 index 0000000..e2e1418 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node_ps.md @@ -0,0 +1,115 @@ +--- +title: "node ps" +description: "The node ps command description and usage" +keywords: node, tasks, ps +aliases: ["/engine/reference/commandline/node_tasks/"] +--- + + + +# node ps + +```markdown +Usage: docker node ps [OPTIONS] [NODE...] + +List tasks running on one or more nodes, defaults to current node. + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output +``` + +## Description + +Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +## Examples + +```bash +$ docker node ps swarm-manager1 +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [name](#name) +* [id](#id) +* [label](#label) +* [desired-state](#desired-state) + +#### name + +The `name` filter matches on all or part of a task's name. + +The following filter matches all tasks with a name containing the `redis` string. + +```bash +$ docker node ps -f name=redis swarm-manager1 + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +#### id + +The `id` filter matches a task's id. + +```bash +$ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1 + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +#### label + +The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches tasks with the `usage` label regardless of its value. + +```bash +$ docker node ps -f "label=usage" + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. + + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node_rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/node_rm.md new file mode 100644 index 0000000..c2fdd4d --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node_rm.md @@ -0,0 +1,80 @@ +--- +title: "node rm" +description: "The node rm command description and usage" +keywords: "node, remove" +--- + + + +# node rm + +```markdown +Usage: docker node rm [OPTIONS] NODE [NODE...] + +Remove one or more nodes from the swarm + +Aliases: + rm, remove + +Options: + -f, --force Force remove a node from the swarm + --help Print usage +``` + +## Description + +When run from a manager node, removes the specified nodes from a swarm. + + +## Examples + +### Remove a stopped node from the swarm + +```bash +$ docker node rm swarm-node-02 + +Node swarm-node-02 removed from swarm +``` +### Attempt to remove a running node from a swarm + +Removes the specified nodes from the swarm, but only if the nodes are in the +down state. If you attempt to remove an active node you will receive an error: + +```non +$ docker node rm swarm-node-03 + +Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not +down and can't be removed +``` + +### Forcibly remove an inaccessible node from a swarm + +If you lose access to a worker node or need to shut it down because it has been +compromised or is not behaving as expected, you can use the `--force` option. +This may cause transient errors or interruptions, depending on the type of task +being run on the node. + +```bash +$ docker node rm --force swarm-node-03 + +Node swarm-node-03 removed from swarm +``` + +A manager node must be demoted to a worker node (using `docker node demote`) +before you can remove it from the swarm. + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node update](node_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/node_update.md b/vendor/github.com/moby/moby/docs/reference/commandline/node_update.md new file mode 100644 index 0000000..ba824c9 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/node_update.md @@ -0,0 +1,77 @@ +--- +title: "node update" +description: "The node update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +# update + +```markdown +Usage: docker node update [OPTIONS] NODE + +Update a node + +Options: + --availability string Availability of the node (active/pause/drain) + --help Print usage + --label-add value Add or update a node label (key=value) (default []) + --label-rm value Remove a node label if exists (default []) + --role string Role of the node (worker/manager) +``` + +## Description + +Update metadata about a node, such as its availability, labels, or roles. + +## Examples + +### Add label metadata to a node + +Add metadata to a swarm node using node labels. You can specify a node label as +a key with an empty value: + +``` bash +$ docker node update --label-add foo worker1 +``` + +To add multiple labels to a node, pass the `--label-add` flag for each label: + +```bash +$ docker node update --label-add foo --label-add bar worker1 +``` + +When you [create a service](service_create.md), +you can use node labels as a constraint. A constraint limits the nodes where the +scheduler deploys tasks for a service. + +For example, to add a `type` label to identify nodes where the scheduler should +deploy message queue service tasks: + +``` bash +$ docker node update --label-add type=queue worker1 +``` + +The labels you set for nodes using `docker node update` apply only to the node +entity within the swarm. Do not confuse them with the docker daemon labels for +[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels). + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/pause.md b/vendor/github.com/moby/moby/docs/reference/commandline/pause.md new file mode 100644 index 0000000..5bb652b --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/pause.md @@ -0,0 +1,48 @@ +--- +title: "pause" +description: "The pause command description and usage" +keywords: "cgroups, container, suspend, SIGSTOP" +--- + + + +# pause + +```markdown +Usage: docker pause CONTAINER [CONTAINER...] + +Pause all processes within one or more containers + +Options: + --help Print usage +``` + +## Description + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Examples + +```bash +$ docker pause my_container +``` + +## Related commands + +* [unpause](unpause.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin.md new file mode 100644 index 0000000..7508247 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin.md @@ -0,0 +1,44 @@ +--- +title: "plugin" +description: "The plugin command description and usage" +keywords: "plugin" +--- + + + +# plugin + +```markdown +Usage: docker plugin COMMAND + +Manage plugins + +Options: + --help Print usage + +Commands: + create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + disable Disable a plugin + enable Enable a plugin + inspect Display detailed information on one or more plugins + install Install a plugin + ls List plugins + push Push a plugin to a registry + rm Remove one or more plugins + set Change settings for a plugin + upgrade Upgrade an existing plugin + +Run 'docker plugin COMMAND --help' for more information on a command. + +``` + +## Description + +Manage plugins. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_create.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_create.md new file mode 100644 index 0000000..6f17543 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_create.md @@ -0,0 +1,65 @@ +--- +title: "plugin create" +description: "the plugin create command description and usage" +keywords: "plugin, create" +--- + + + +# plugin create + +```markdown +Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR + +Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + +Options: + --compress Compress the context using gzip + --help Print usage +``` + +## Description + +Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as +[the config.json](../../extend/config.md) + +## Examples + +The following example shows how to create a sample `plugin`. + +```bash +$ ls -ls /home/pluginDir + +4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json +0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs + +$ docker plugin create plugin /home/pluginDir + +plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +672d8144ec02 plugin latest A sample plugin for Docker false +``` + +The plugin can subsequently be enabled for local use or pushed to the public registry. + +## Related commands + +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_disable.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_disable.md new file mode 100644 index 0000000..2ff8188 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_disable.md @@ -0,0 +1,69 @@ +--- +title: "plugin disable" +description: "the plugin disable command description and usage" +keywords: "plugin, disable" +--- + + + +# plugin disable + +```markdown +Usage: docker plugin disable [OPTIONS] PLUGIN + +Disable a plugin + +Options: + -f, --force Force the disable of an active plugin + --help Print usage +``` + +## Description + +Disables a plugin. The plugin must be installed before it can be disabled, +see [`docker plugin install`](plugin_install.md). Without the `-f` option, +a plugin that has references (eg, volumes, networks) cannot be disabled. + +## Examples + +The following example shows that the `sample-volume-plugin` plugin is installed +and enabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +To disable the plugin, use the following command: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_enable.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_enable.md new file mode 100644 index 0000000..2098a11 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_enable.md @@ -0,0 +1,68 @@ +--- +title: "plugin enable" +description: "the plugin enable command description and usage" +keywords: "plugin, enable" +--- + + + +# plugin enable + +```markdown +Usage: docker plugin enable [OPTIONS] PLUGIN + +Enable a plugin + +Options: + --help Print usage + --timeout int HTTP client timeout (in seconds) +``` + +## Description + +Enables a plugin. The plugin must be installed before it can be enabled, +see [`docker plugin install`](plugin_install.md). + +## Examples + +The following example shows that the `sample-volume-plugin` plugin is installed, +but disabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +To enable the plugin, use the following command: + +```bash +$ docker plugin enable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_inspect.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_inspect.md new file mode 100644 index 0000000..c0e6573 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_inspect.md @@ -0,0 +1,167 @@ +--- +title: "plugin inspect" +description: "The plugin inspect command description and usage" +keywords: "plugin, inspect" +--- + + + +# plugin inspect + +```markdown +Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...] + +Display detailed information on one or more plugins + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about a plugin. By default, this command renders all results +in a JSON array. + +## Examples + + +```none +$ docker plugin inspect tiborvass/sample-volume-plugin:latest + +{ + "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", + "Name": "tiborvass/sample-volume-plugin:latest", + "PluginReference": "tiborvas/sample-volume-plugin:latest", + "Enabled": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-sample-volume-plugin", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +(output formatted for readability) + +### Formatting the output + +```bash +$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest + +8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21 +``` + + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin disable](plugin_disable.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_install.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_install.md new file mode 100644 index 0000000..78d9a61 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_install.md @@ -0,0 +1,75 @@ +--- +title: "plugin install" +description: "the plugin install command description and usage" +keywords: "plugin, install" +--- + + + +# plugin install + +```markdown +Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] + +Install a plugin + +Options: + --alias string Local name for plugin + --disable Do not enable the plugin on install + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage +``` + +## Description + +Installs and enables a plugin. Docker looks first for the plugin on your Docker +host. If the plugin does not exist locally, then the plugin is pulled from +the registry. Note that the minimum required registry version to distribute +plugins is 2.3.0 + +## Examples + +The following example installs `vieus/sshfs` plugin and [sets](plugin_set.md) its +`DEBUG` environment variable to `1`. To install, `pull` the plugin from Docker +Hub and prompt the user to accept the list of privileges that the plugin needs, +set the plugin's parameters and enable the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs +``` + +After the plugin is installed, it appears in the list of plugins: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_ls.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_ls.md new file mode 100644 index 0000000..64661de --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_ls.md @@ -0,0 +1,55 @@ +--- +title: "plugin ls" +description: "The plugin ls command description and usage" +keywords: "plugin, list" +--- + + + +# plugin ls + +```markdown +Usage: docker plugin ls [OPTIONS] + +List plugins + +Aliases: + ls, list + +Options: + --help Print usage + --no-trunc Don't truncate output +``` + +## Description + +Lists all the plugins that are currently installed. You can install plugins +using the [`docker plugin install`](plugin_install.md) command. + +## Examples + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_push.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_push.md new file mode 100644 index 0000000..905fa82 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_push.md @@ -0,0 +1,55 @@ +--- +title: "plugin push" +description: "the plugin push command description and usage" +keywords: "plugin, push" +--- + + + +```markdown +Usage: docker plugin push [OPTIONS] PLUGIN[:TAG] + +Push a plugin to a registry + +Options: + --help Print usage +``` + +## Description + +After you have created a plugin using `docker plugin create` and the plugin is +ready for distribution, use `docker plugin push` to share your images to Docker +Hub or a self-hosted registry. + +Registry credentials are managed by [docker login](login.md). + +## Examples + +The following example shows how to push a sample `user/plugin`. + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d456 user/plugin latest A sample plugin for Docker false +$ docker plugin push user/plugin +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_rm.md new file mode 100644 index 0000000..c820c86 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_rm.md @@ -0,0 +1,61 @@ +--- +title: "plugin rm" +description: "the plugin rm command description and usage" +keywords: "plugin, rm" +--- + + + +# plugin rm + +```markdown +Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] + +Remove one or more plugins + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of an active plugin + --help Print usage +``` + +## Description + +Removes a plugin. You cannot remove a plugin if it is enabled, you must disable +a plugin using the [`docker plugin disable`](plugin_disable.md) before removing +it (or use --force, use of force is not recommended, since it can affect +functioning of running containers using the plugin). + +## Examples + +The following example disables and removes the `sample-volume-plugin:latest` +plugin: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin +tiborvass/sample-volume-plugin + +$ docker plugin rm tiborvass/sample-volume-plugin:latest +tiborvass/sample-volume-plugin +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_set.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_set.md new file mode 100644 index 0000000..4b7ce61 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_set.md @@ -0,0 +1,114 @@ +--- +title: "plugin set" +description: "the plugin set command description and usage" +keywords: "plugin, set" +--- + + + +# plugin set + +```markdown +Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...] + +Change settings for a plugin + +Options: + --help Print usage +``` + +## Description + +Change settings for a plugin. The plugin must be disabled. + +The settings currently supported are: + * env variables + * source of mounts + * path of devices + * args + +## Examples + +### Change an environment variable + +The following example change the env variable `DEBUG` on the +`sample-volume-plugin` plugin. + +```bash +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin + +[DEBUG=0] + +$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1 + +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=1] +``` + +### Change the source of a mount + +The following example change the source of the `mymount` mount on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/foo + +$ docker plugins set myplugin mymount.source=/bar + +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/bar +``` + +> **Note**: Since only `source` is settable in `mymount`, +> `docker plugins set mymount=/bar myplugin` would work too. + +### Change a device path + +The following example change the path of the `mydevice` device on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin +/dev/foo + +$ docker plugins set myplugin mydevice.path=/dev/bar + +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin +/dev/bar +``` + +> **Note**: Since only `path` is settable in `mydevice`, +> `docker plugins set mydevice=/dev/bar myplugin` would work too. + +### Change the source of the arguments + +The following example change the source of the args on the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin +["foo", "bar"] + +$ docker plugins set myplugin args="foo bar baz" + +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin +["foo", "bar", "baz"] +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/plugin_upgrade.md b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_upgrade.md new file mode 100644 index 0000000..38191ff --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/plugin_upgrade.md @@ -0,0 +1,88 @@ +--- +title: "plugin upgrade" +description: "the plugin upgrade command description and usage" +keywords: "plugin, upgrade" +--- + + + +# plugin upgrade + +```markdown +Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE] + +Upgrade a plugin + +Options: + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage + --skip-remote-check Do not check if specified remote plugin matches existing plugin image +``` + +## Description + +Upgrades an existing plugin to the specified remote plugin image. If no remote +is specified, Docker will re-pull the current image and use the updated version. +All existing references to the plugin will continue to work. +The plugin must be disabled before running the upgrade. + +## Examples + +The following example installs `vieus/sshfs` plugin, uses it to create and use +a volume, then upgrades the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs:next + +$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume +sshvolume +$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello" +$ docker plugin disable -f vieux/sshfs:next +viex/sshfs:next + +# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled +$ docker volume ls +DRIVER VOLUME NAME + +$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +Upgrade plugin vieux/sshfs:next to vieux/sshfs:next +$ docker plugin enable vieux/sshfs:next +viex/sshfs:next +$ docker volume ls +DRIVER VOLUME NAME +viuex/sshfs:next sshvolume +$ docker run -it -v sshvolume:/data alpine sh -c "ls /data" +hello +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/port.md b/vendor/github.com/moby/moby/docs/reference/commandline/port.md new file mode 100644 index 0000000..c38763e --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/port.md @@ -0,0 +1,47 @@ +--- +title: "port" +description: "The port command description and usage" +keywords: "port, mapping, container" +--- + + + +# port + +```markdown +Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] + +List port mappings or a specific mapping for the container + +Options: + --help Print usage +``` + +## Examples + +### Show all mapped ports + +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +just a specific mapping: + +```bash +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test +$ docker port test +7890/tcp -> 0.0.0.0:4321 +9876/tcp -> 0.0.0.0:1234 +$ docker port test 7890/tcp +0.0.0.0:4321 +$ docker port test 7890/udp +2014/06/24 11:53:36 Error: No public port '7890/udp' published for test +$ docker port test 7890 +0.0.0.0:4321 +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/ps.md b/vendor/github.com/moby/moby/docs/reference/commandline/ps.md new file mode 100644 index 0000000..d0e956e --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/ps.md @@ -0,0 +1,390 @@ +--- +title: "ps" +description: "The ps command description and usage" +keywords: "container, running, list" +--- + + + +# ps + +```markdown +Usage: docker ps [OPTIONS] + +List containers + +Options: + -a, --all Show all containers (default shows just running) + -f, --filter value Filter output based on conditions provided (default []) + - ancestor=([:tag]||) + containers created from an image or a descendant. + - before=(|) + - exited= an exit code of + - health=(starting|healthy|unhealthy|none) + - id= a container's ID + - isolation=(`default`|`process`|`hyperv`) (Windows daemon only) + - is-task=(true|false) + - label= or label== + - name= a container's name + - network=(|) + - since=(|) + - status=(created|restarting|removing|running|paused|exited) + - volume=(|) + --format string Pretty-print containers using a Go template + --help Print usage + -n, --last int Show n last created containers (includes all states) (default -1) + -l, --latest Show the latest created container (includes all states) + --no-trunc Don't truncate output + -q, --quiet Only display numeric IDs + -s, --size Display total file sizes +``` + +## Examples + +### Prevent truncating output + +Running `docker ps --no-trunc` showing 2 linked containers. + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp +d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db +``` + +### Show both running and stopped containers + +The `docker ps` command only shows running containers by default. To see all +containers, use the `-a` (or `--all`) flag: + +```bash +$ docker ps -a +``` + +`docker ps` groups exposed ports into a single range if possible. E.g., a +container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in +the `PORTS` column. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* id (container's id) +* label (`label=` or `label==`) +* name (container's name) +* exited (int - the code of exited containers. Only useful with `--all`) +* status (`created|restarting|running|removing|paused|exited|dead`) +* ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. +* before (container's id or name) - filters containers created before given id or name +* since (container's id or name) - filters containers created since given id or name +* isolation (`default|process|hyperv`) (Windows daemon only) +* volume (volume name or mount point) - filters containers that mount volumes. +* network (network id or name) - filters containers connected to the provided network +* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status + +#### label + +The `label` filter matches containers based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches containers with the `color` label regardless of its value. + +```bash +$ docker ps --filter "label=color" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley +d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani +``` + +The following filter matches containers with the `color` label with the `blue` value. + +```bash +$ docker ps --filter "label=color=blue" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani +``` + +#### name + +The `name` filter matches on all or part of a container's name. + +The following filter matches all containers with a name containing the `nostalgic_stallman` string. + +```bash +$ docker ps --filter "name=nostalgic_stallman" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker ps --filter "name=nostalgic" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic +9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman +673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley +``` + +#### exited + +The `exited` filter matches containers by exist status code. For example, to +filter for containers that have exited successfully: + +```bash +$ docker ps -a --filter 'exited=0' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey +106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani +48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds +``` + +#### Filter by exit signal + +You can use a filter to locate containers that exited with status of `137` +meaning a `SIGKILL(9)` killed them. + +```none +$ docker ps -a --filter 'exited=137' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski +a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande +``` + +Any of these events result in a `137` status: + +* the `init` process of the container is killed manually +* `docker kill` kills the container +* Docker daemon restarts which kills all running containers + +#### status + +The `status` filter matches containers by status. You can filter using +`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example, +to filter for `running` containers: + +```bash +$ docker ps --filter status=running + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic +d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top +9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman +``` + +To filter for `paused` containers: + +```bash +$ docker ps --filter status=paused + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley +``` + +#### ancestor + +The `ancestor` filter matches containers based on its image or a descendant of +it. The filter supports the following image representation: + +- image +- image:tag +- image:tag@digest +- short-id +- full-id + +If you don't specify a `tag`, the `latest` tag is used. For example, to filter +for containers that use the latest `ubuntu` image: + +```bash +$ docker ps --filter ancestor=ubuntu + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet +82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose +bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath +``` + +Match containers based on the `ubuntu-c1` image which, in this case, is a child +of `ubuntu`: + +```bash +$ docker ps --filter ancestor=ubuntu-c1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +``` + +Match containers based on the `ubuntu` version `12.04.5` image: + +```bash +$ docker ps --filter ancestor=ubuntu:12.04.5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +The following matches containers based on the layer `d0e008c6cf02` or an image +that have this layer in its layer stack. + +```bash +$ docker ps --filter ancestor=d0e008c6cf02 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +#### Create time + +##### before + +The `before` filter shows only containers created before the container with +given id or name. For example, having these containers created: + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky +4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +Filtering with `before` would give: + +```bash +$ docker ps -f before=9c3527ed70ce + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +##### since + +The `since` filter shows only containers created since the container with given +id or name. For example, with the same containers as in `before` filter: + +```bash +$ docker ps -f since=6e63f6ff38b0 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky +4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton +``` + +#### volume + +The `volume` filter shows only containers that mount a specific volume or have +a volume mounted in a specific path: + +```bash +$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume + +$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume +``` + +#### network + +The `network` filter shows only containers that are connected to a network with +a given name or id. + +The following filter matches all containers that are connected to a network +with a name containing `net1`. + +```bash +$ docker run -d --net=net1 --name=test1 ubuntu top +$ docker run -d --net=net2 --name=test2 ubuntu top + +$ docker ps --filter network=net1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +The network filter matches on both the network's name and id. The following +example shows all containers that are attached to the `net1` network, using +the network id as a filter; + +```bash +$ docker network inspect --format "{{.ID}}" net1 + +8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +### Formatting + +The formatting option (`--format`) pretty-prints container output using a Go +template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|---------------------------------------------------------------------------------------------------- +`.ID` | Container ID +`.Image` | Image ID +`.Command` | Quoted command +`.CreatedAt` | Time when the container was created. +`.RunningFor` | Elapsed time since the container was started. +`.Ports` | Exposed ports. +`.Status` | Container status. +`.Size` | Container disk size. +`.Names` | Container names. +`.Labels` | All labels assigned to the container. +`.Label` | Value of a specific label for this container. For example `'{{.Label "com.docker.swarm.cpu"}}'` +`.Mounts` | Names of the volumes mounted in this container. +`.Networks` | Names of the networks attached to this container. + +When using the `--format` option, the `ps` command will either output the data +exactly as the template declares or, when using the `table` directive, includes +column headers as well. + +The following example uses a template without headers and outputs the `ID` and +`Command` entries separated by a colon for all running containers: + +```bash +$ docker ps --format "{{.ID}}: {{.Command}}" + +a87ecb4f327c: /bin/sh -c #(nop) MA +01946d9d34d8: /bin/sh -c #(nop) MA +c1d3b0166030: /bin/sh -c yum -y up +41d50ecd2f57: /bin/sh -c #(nop) MA +``` + +To list all running containers with their labels in a table format you can use: + +```bash +$ docker ps --format "table {{.ID}}\t{{.Labels}}" + +CONTAINER ID LABELS +a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd +01946d9d34d8 +c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 +41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/pull.md b/vendor/github.com/moby/moby/docs/reference/commandline/pull.md new file mode 100644 index 0000000..7bf3df8 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/pull.md @@ -0,0 +1,254 @@ +--- +title: "pull" +description: "The pull command description and usage" +keywords: "pull, image, hub, docker" +--- + + + +# pull + +```markdown +Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] + +Pull an image or a repository from a registry + +Options: + -a, --all-tags Download all tagged images in the repository + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +## Description + +Most of your images will be created on top of a base image from the +[Docker Hub](https://hub.docker.com) registry. + +[Docker Hub](https://hub.docker.com) contains many pre-built images that you +can `pull` and try without needing to define and configure your own. + +To download a particular image, or set of images (i.e., a repository), +use `docker pull`. + +### Proxy configuration + +If you are behind an HTTP proxy server, for example in corporate settings, +before open a connect to registry, you may need to configure the Docker +daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables. To set these environment variables on a host using +`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy) +for variables configuration. + +### Concurrent downloads + +By default the Docker daemon will pull three layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-downloads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + +```bash +$ docker pull debian + +Using default tag: latest +latest: Pulling from library/debian +fdd5d7827f33: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa +Status: Downloaded newer image for debian:latest +``` + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + +```bash +$ docker pull debian:jessie + +jessie: Pulling from library/debian +fdd5d7827f33: Already exists +a3ed95caeb02: Already exists +Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e +Status: Downloaded newer image for debian:jessie +``` + +To see which images are present locally, use the [`docker images`](images.md) +command: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +debian jessie f50f9524513f 5 days ago 125.1 MB +debian latest f50f9524513f 5 days ago 125.1 MB +``` + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). + + +### Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + +```bash +$ docker pull ubuntu:14.04 + +14.04: Pulling from library/ubuntu +5a132a7e7af1: Pull complete +fd2731e4c50c: Pull complete +28a2f68d1120: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu:14.04 +``` + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + +```bash +$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu +5a132a7e7af1: Already exists +fd2731e4c50c: Already exists +28a2f68d1120: Already exists +a3ed95caeb02: Already exists +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +``` + +Digest can also be used in the `FROM` of a Dockerfile, for example: + +```Dockerfile +FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +MAINTAINER some maintainer +``` + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + + +### Pull from a different registry + +By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + +```bash +$ docker pull myregistry.local:5000/testing/test-image +``` + +Registry credentials are managed by [docker login](login.md). + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](dockerd.md#insecure-registries) section for more information. + + +### Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + +```bash +$ docker pull --all-tags fedora + +Pulling repository fedora +ad57ef8d78d7: Download complete +105182bb5e8b: Download complete +511136ea3c5a: Download complete +73bd853d2ea5: Download complete +.... + +Status: Downloaded newer image for fedora +``` + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + +```bash +$ docker images fedora + +REPOSITORY TAG IMAGE ID CREATED SIZE +fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB +fedora 20 105182bb5e8b 5 days ago 372.7 MB +fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB +fedora latest 105182bb5e8b 5 days ago 372.7 MB +``` + +### Cancel a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + +```bash +$ docker pull fedora + +Using default tag: latest +latest: Pulling from library/fedora +a3ed95caeb02: Pulling fs layer +236608c7b546: Pulling fs layer +^C +``` + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/push.md b/vendor/github.com/moby/moby/docs/reference/commandline/push.md new file mode 100644 index 0000000..fa63cfe --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/push.md @@ -0,0 +1,82 @@ +--- +title: "push" +description: "The push command description and usage" +keywords: "share, push, image" +--- + + + +# push + +```markdown +Usage: docker push [OPTIONS] NAME[:TAG] + +Push an image or a repository to a registry + +Options: + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +## Description + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to the [`docker tag`](tag.md) reference for more information about valid +image and tag names. + +Killing the `docker push` process, for example by pressing `CTRL-c` while it is +running in a terminal, terminates the push operation. + +Progress bars are shown during docker push, which show the uncompressed size. The +actual amount of data that's pushed will be compressed before sending, so the uploaded + size will not be reflected by the progress bar. + +Registry credentials are managed by [docker login](login.md). + +### Concurrent uploads + +By default the Docker daemon will push five layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-uploads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Push a new image to a registry + +First save the new image by finding the container ID (using [`docker ps`](ps.md)) +and then committing it to a new image name. Note that only `a-z0-9-_.` are +allowed when naming images: + +```bash +$ docker commit c16378f943fe rhel-httpd +``` + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + +```bash +$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + +$ docker push registry-host:5000/myadmin/rhel-httpd +``` + +Check that this worked by running: + +```bash +$ docker images +``` + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/rename.md b/vendor/github.com/moby/moby/docs/reference/commandline/rename.md new file mode 100644 index 0000000..90268a2 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/rename.md @@ -0,0 +1,35 @@ +--- +title: "rename" +description: "The rename command description and usage" +keywords: "rename, docker, container" +--- + + + +# rename + +```markdown +Usage: docker rename CONTAINER NEW_NAME + +Rename a container + +Options: + --help Print usage +``` + +## Description + +The `docker rename` command renames a container. + +## Examples + +```bash +$ docker rename my_container my_new_container +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/restart.md b/vendor/github.com/moby/moby/docs/reference/commandline/restart.md new file mode 100644 index 0000000..a2796af --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/restart.md @@ -0,0 +1,32 @@ +--- +title: "restart" +description: "The restart command description and usage" +keywords: "restart, container, Docker" +--- + + + +# restart + +```markdown +Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] + +Restart one or more containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing the container (default 10) +``` + +## Examples + +```bash +$ docker restart my_container +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/rm.md new file mode 100644 index 0000000..8ee5b28 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/rm.md @@ -0,0 +1,100 @@ +--- +title: "rm" +description: "The rm command description and usage" +keywords: "remove, Docker, container" +--- + + + +# rm + +```markdown +Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] + +Remove one or more containers + +Options: + -f, --force Force the removal of a running container (uses SIGKILL) + --help Print usage + -l, --link Remove the specified link + -v, --volumes Remove the volumes associated with the container +``` + +## Examples + +### Remove a container + +This will remove the container referenced under the link +`/redis`. + +```bash +$ docker rm /redis + +/redis +``` + +### Remove a link specified with `--link` on the default bridge network + +This will remove the underlying link between `/webapp` and the `/redis` +containers on the default bridge network, removing all network communication +between the two containers. This does not apply when `--link` is used with +user-specified networks. + +```bash +$ docker rm --link /webapp/redis + +/webapp/redis +``` + +### Force-remove a running container + +This command will force-remove a running container. + +```bash +$ docker rm --force redis + +redis +``` + +The main process inside the container referenced under the link `redis` will receive +`SIGKILL`, then the container will be removed. + +### Remove all stopped containers + +```bash +$ docker rm $(docker ps -a -q) +``` + +This command will delete all stopped containers. The command +`docker ps -a -q` will return all existing container IDs and pass them to +the `rm` command which will delete them. Any running containers will not be +deleted. + +### Remove a container and its volumes + +```bash +$ docker rm -v redis +redis +``` + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + +### Remove a container and selectively remove volumes + +```bash +$ docker create -v awesome:/foo -v /bar --name hello redis +hello +$ docker rm -v hello +``` + +In this example, the volume for `/foo` will remain intact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/rmi.md b/vendor/github.com/moby/moby/docs/reference/commandline/rmi.md new file mode 100644 index 0000000..28e21d4 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/rmi.md @@ -0,0 +1,105 @@ +--- +title: "rmi" +description: "The rmi command description and usage" +keywords: "remove, image, Docker" +--- + + + +# rmi + +```markdown +Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] + +Remove one or more images + +Options: + -f, --force Force removal of the image + --help Print usage + --no-prune Do not delete untagged parents +``` + +## Examples + +You can remove an image using its short or long ID, its tag, or its digest. If +an image has one or more tag referencing it, you must remove all of them before +the image is removed. Digest references are removed automatically when an image +is removed by tag. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi fd484f19954f + +Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force +2013/12/11 05:47:16 Error: failed to remove one or more images + +$ docker rmi test1 + +Untagged: test1:latest + +$ docker rmi test2 + +Untagged: test2:latest + + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi test + +Untagged: test:latest +Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 +``` + +If you use the `-f` flag and specify the image's short or long ID, then this +command untags and removes all images that match the specified ID. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi -f fd484f19954f + +Untagged: test1:latest +Untagged: test:latest +Untagged: test2:latest +Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 +``` + +An image pulled by digest has no tag associated with it: + +```bash +$ docker images --digests + +REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB +``` + +To remove an image using its digest: + +```bash +$ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 +Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 +Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/run.md b/vendor/github.com/moby/moby/docs/reference/commandline/run.md new file mode 100644 index 0000000..7452f7b --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/run.md @@ -0,0 +1,805 @@ +--- +title: "run" +description: "The run command description and usage" +keywords: "run, command, container" +--- + + + +# run + +```markdown +Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + +Run a command in a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int Limit percentage of CPU available for execution + by the container. Windows daemon only. + The processor resource controls are mutually + exclusive, the order of precedence is CPUCount + first, then CPUShares, and CPUPercent last. + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + -d, --detach Run container in background and print container ID + --detach-keys string Override the key sequence for detaching a container + --device value Add a host device to the container (default []) + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + --init-path string Path to the docker-init binary + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + (Windows only). The format is ``. + Unit is optional and can be `b` (bytes per second), + `k` (kilobytes per second), `m` (megabytes per second), + or `g` (gigabytes per second). If you omit the unit, + the system uses bytes per second. + --io-maxbandwidth and --io-maxiops are mutually exclusive options. + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string Container IPv4 address (e.g. 172.30.100.104) + --ip6 string Container IPv6 address (e.g. 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited) + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are : no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --sig-proxy Proxy received signals to the process (default true) + --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` + +## Description + +The `docker run` command first `creates` a writeable container layer over the +specified image, and then `starts` it using the specified command. That is, +`docker run` is equivalent to the API `/containers/create` then +`/containers/(id)/start`. A stopped container can be restarted with all its +previous changes intact using `docker start`. See `docker ps -a` to view a list +of all containers. + +The `docker run` command can be used in combination with `docker commit` to +[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). + +For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/). + +## Examples + +### Assign name and allocate pseudo-TTY (--name, -it) + +```bash +$ docker run --name test -it debian + +root@d6c0fe130dba:/# exit 13 +$ echo $? +13 +$ docker ps -a | grep test +d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test +``` + +This example runs a container named `test` using the `debian:latest` +image. The `-it` instructs Docker to allocate a pseudo-TTY connected to +the container's stdin; creating an interactive `bash` shell in the container. +In the example, the `bash` shell is quit by entering +`exit 13`. This exit code is passed on to the caller of +`docker run`, and is recorded in the `test` container's metadata. + +### Capture container ID (--cidfile) + +```bash +$ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" +``` + +This will create a container and print `test` to the console. The `cidfile` +flag makes Docker attempt to create a new file and write the container ID to it. +If the file exists already, Docker will return an error. Docker will close this +file when `docker run` exits. + +### Full container capabilities (--privileged) + +```bash +$ docker run -t -i --rm ubuntu bash +root@bc338942ef20:/# mount -t tmpfs none /mnt +mount: permission denied +``` + +This will *not* work, because by default, most potentially dangerous kernel +capabilities are dropped; including `cap_sys_admin` (which is required to mount +filesystems). However, the `--privileged` flag will allow it to run: + +```bash +$ docker run -t -i --privileged ubuntu bash +root@50e3f57e16e6:/# mount -t tmpfs none /mnt +root@50e3f57e16e6:/# df -h +Filesystem Size Used Avail Use% Mounted on +none 1.9G 0 1.9G 0% /mnt +``` + +The `--privileged` flag gives *all* capabilities to the container, and it also +lifts all the limitations enforced by the `device` cgroup controller. In other +words, the container can then do almost everything that the host can do. This +flag exists to allow special use-cases, like running Docker within Docker. + +### Set working directory (-w) + +```bash +$ docker run -w /path/to/dir/ -i -t ubuntu pwd +``` + +The `-w` lets the command being executed inside directory given, here +`/path/to/dir/`. If the path does not exist it is created inside the container. + +### Set storage driver options per container + +```bash +$ docker run -it --storage-opt size=120G fedora /bin/bash +``` + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Mount tmpfs (--tmpfs) + +```bash +$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image +``` + +The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, `size=65536k` options. + +### Mount volume (-v, --read-only) + +```bash +$ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd +``` + +The `-v` flag mounts the current working directory into the container. The `-w` +lets the command being executed inside the current working directory, by +changing into the directory to the value returned by `pwd`. So this +combination executes the command using the container, but inside the +current working directory. + +```bash +$ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash +``` + +When the host directory of a bind-mounted volume doesn't exist, Docker +will automatically create this directory on the host for you. In the +example above, Docker will create the `/doesnt/exist` +folder before starting your container. + +```bash +$ docker run --read-only -v /icanwrite busybox touch /icanwrite/here +``` + +Volumes can be used in combination with `--read-only` to control where +a container writes files. The `--read-only` flag mounts the container's root +filesystem as read only prohibiting writes to locations other than the +specified volumes for the container. + +```bash +$ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh +``` + +By bind-mounting the docker unix socket and statically linked docker +binary (refer to [get the linux binary]( +https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)), +you give the container the full access to create and manipulate the host's +Docker daemon. + +On Windows, the paths must be specified using Windows-style semantics. + +```powershell +PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt +Contents of file + +PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt +Contents of file +``` + +The following examples will fail when using Windows-based containers, as the +destination of a volume or bind-mount inside the container must be one of: +a non-existing or empty directory; or a drive other than C:. Further, the source +of a bind mount must be a local directory, not a file. + +```powershell +net use z: \\remotemachine\share +docker run -v z:\foo:c:\dest ... +docker run -v \\uncpath\to\directory:c:\dest ... +docker run -v c:\foo\somefile.txt:c:\dest ... +docker run -v c:\foo:c: ... +docker run -v c:\foo:c:\existing-directory-with-contents ... +``` + +For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/) + +### Publish or expose port (-p, --expose) + +```bash +$ docker run -p 127.0.0.1:80:8080 ubuntu bash +``` + +This binds port `8080` of the container to port `80` on `127.0.0.1` of the host +machine. The [Docker User +Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/) +explains in detail how to manipulate ports in Docker. + +```bash +$ docker run --expose 80 ubuntu bash +``` + +This exposes port `80` of the container without publishing the port to the host +system's interfaces. + +### Set environment variables (-e, --env, --env-file) + +```bash +$ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash +``` + +This sets simple (non-array) environmental variables in the container. For +illustration all three +flags are shown here. Where `-e`, `--env` take an environment variable and +value, or if no `=` is provided, then that variable's current value, set via +`export`, is passed through (i.e. `$MYVAR1` from the host is set to `$MYVAR1` +in the container). When no `=` is provided and that variable is not defined +in the client's environment then that variable will be removed from the +container's list of environment variables. All three flags, `-e`, `--env` and +`--env-file` can be repeated. + +Regardless of the order of these three flags, the `--env-file` are processed +first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will +override variables as needed. + +```bash +$ cat ./env.list +TEST_FOO=BAR +$ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO +TEST_FOO=This is a test +``` + +The `--env-file` flag takes a filename as an argument and expects each line +to be in the `VAR=VAL` format, mimicking the argument passed to `--env`. Comment +lines need only be prefixed with `#` + +An example of a file passed with `--env-file` + +```bash +$ cat ./env.list +TEST_FOO=BAR + +# this is a comment +TEST_APP_DEST_HOST=10.10.0.127 +TEST_APP_DEST_PORT=8888 +_TEST_BAR=FOO +TEST_APP_42=magic +helloWorld=true +123qwe=bar +org.spring.config=something + +# pass through this variable from the caller +TEST_PASSTHROUGH +$ TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +HOSTNAME=5198e0745561 +TEST_FOO=BAR +TEST_APP_DEST_HOST=10.10.0.127 +TEST_APP_DEST_PORT=8888 +_TEST_BAR=FOO +TEST_APP_42=magic +helloWorld=true +TEST_PASSTHROUGH=howdy +HOME=/root +123qwe=bar +org.spring.config=something + +$ docker run --env-file ./env.list busybox env +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +HOSTNAME=5198e0745561 +TEST_FOO=BAR +TEST_APP_DEST_HOST=10.10.0.127 +TEST_APP_DEST_PORT=8888 +_TEST_BAR=FOO +TEST_APP_42=magic +helloWorld=true +TEST_PASSTHROUGH= +HOME=/root +123qwe=bar +org.spring.config=something +``` + +### Set metadata on container (-l, --label, --label-file) + +A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: + +```bash +$ docker run -l my-label --label com.example.foo=bar ubuntu bash +``` + +The `my-label` key doesn't specify a value so the label defaults to an empty +string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). + +The `key=value` must be unique to avoid overwriting the label value. If you +specify labels with identical keys but different values, each subsequent value +overwrites the previous. Docker uses the last `key=value` you supply. + +Use the `--label-file` flag to load multiple labels from a file. Delimit each +label in the file with an EOL mark. The example below loads labels from a +labels file in the current directory: + +```bash +$ docker run --label-file ./labels ubuntu bash +``` + +The label-file format is similar to the format for loading environment +variables. (Unlike environment variables, labels are not visible to processes +running inside a container.) The following example illustrates a label-file +format: + +```none +com.example.label1="a label" + +# this is a comment +com.example.label2=another\ label +com.example.label3 +``` + +You can load multiple label-files by supplying multiple `--label-file` flags. + +For additional information on working with labels, see [*Labels - custom +metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User +Guide. + +### Connect a container to a network (--network) + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `my-net` network. + +```bash +$ docker run -itd --network=my-net busybox +``` + +You can also choose the IP addresses for the container with `--ip` and `--ip6` +flags when you start the container on a user-defined network. + +```bash +$ docker run -itd --network=my-net --ip=10.10.9.75 busybox +``` + +If you want to add a running container to a network use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate easily need only another container's IP address +or name. For `overlay` networks or custom plugins that support multi-host +connectivity, containers connected to the same multi-host network but launched +from different Engines can also communicate in this way. + +> **Note**: Service discovery is unavailable on the default bridge network. +> Containers can communicate via their IP addresses by default. To communicate +> by name, they must be linked. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Mount volumes from container (--volumes-from) + +```bash +$ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd +``` + +The `--volumes-from` flag mounts all the defined volumes from the referenced +containers. Containers can be specified by repetitions of the `--volumes-from` +argument. The container ID may be optionally suffixed with `:ro` or `:rw` to +mount the volumes in read-only or read-write mode, respectively. By default, +the volumes are mounted in the same mode (read write or read only) as +the reference container. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change the label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +### Attach to STDIN/STDOUT/STDERR (-a) + +The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` +or `STDERR`. This makes it possible to manipulate the output and input as +needed. + +```bash +$ echo "test" | docker run -i -a stdin ubuntu cat - +``` + +This pipes data into a container and prints the container's ID by attaching +only to the container's `STDIN`. + +```bash +$ docker run -a stderr ubuntu echo test +``` + +This isn't going to print anything unless there's an error because we've +only attached to the `STDERR` of the container. The container's logs +still store what's been written to `STDERR` and `STDOUT`. + +```bash +$ cat somefile | docker run -i -a stdin mybuilder dobuild +``` + +This is how piping a file into a container could be done for a build. +The container's ID will be printed after the build is done and the build +logs could be retrieved using `docker logs`. This is +useful if you need to pipe a file or something else into a container and +retrieve the container's ID once the container has finished running. + +### Add host device to container (--device) + +```bash +$ docker run --device=/dev/sdc:/dev/xvdc \ + --device=/dev/sdd --device=/dev/zero:/dev/nulo \ + -i -t \ + ubuntu ls -l /dev/{xvdc,sdd,nulo} + +brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc +brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd +crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo +``` + +It is often necessary to directly expose devices to a container. The `--device` +option enables that. For example, a specific block storage device or loop +device or audio device can be added to an otherwise unprivileged container +(without the `--privileged` flag) and have the application directly access it. + +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + +```bash +$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + +Command (m for help): q +$ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc +You will not be able to write the partition table. + +Command (m for help): q + +$ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc + +Command (m for help): q + +$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc +fdisk: unable to open /dev/xvdc: Operation not permitted +``` + +> **Note**: `--device` cannot be safely used with ephemeral devices. Block devices +> that may be removed should not be added to untrusted containers with +> `--device`. + +### Restart policies (--restart) + +Use Docker's `--restart` to specify a container's *restart policy*. A restart +policy controls whether the Docker daemon restarts a container after exit. +Docker supports the following restart policies: + +| Policy | Result | +|-------------------|-----------------------------------------| +| `no` | Do not automatically restart the container when it exits. This is the default. | +| `failure` | Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts. | +| `always` | Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container. | + +```bash +$ docker run --restart=always redis +``` + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + +More detailed information on restart policies can be found in the +[Restart Policies (--restart)](../run.md#restart-policies-restart) +section of the Docker run reference page. + +### Add entries to container hosts file (--add-host) + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + +```bash +$ docker run --add-host=docker:10.180.0.1 --rm -it debian + +root@f38c87f2a42d:/# ping docker +PING docker (10.180.0.1): 48 data bytes +56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms +56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms +^C--- docker ping statistics --- +2 packets transmitted, 2 packets received, 0% packet loss +round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms +``` + +Sometimes you need to connect to the Docker host from within your +container. To enable this, pass the Docker host's IP address to +the container using the `--add-host` flag. To find the host's address, +use the `ip addr show` command. + +The flags you pass to `ip addr show` depend on whether you are +using IPv4 or IPv6 networking in your containers. Use the following +flags for IPv4 address retrieval for a network device named `eth0`: + +```bash +$ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` +$ docker run --add-host=docker:${HOSTIP} --rm -it debian +``` + +For IPv6 use the `-6` flag instead of the `-4` flag. For other network +devices, replace `eth0` with the correct device name (for example `docker0` +for the bridge device). + +### Set ulimits in container (--ulimit) + +Since setting `ulimit` settings in a container requires extra privileges not +available in the default container, you can set these using the `--ulimit` flag. +`--ulimit` is specified with a soft and hard limit as such: +`=[:]`, for example: + +```bash +$ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" +1024 +``` + +> **Note**: If you do not provide a `hard limit`, the `soft limit` will be used +> for both values. If no `ulimits` are set, they will be inherited from +> the default `ulimits` set on the daemon. `as` option is disabled now. +> In other words, the following script is not supported: +> +> ```bash +> $ docker run -it --ulimit as=1024 fedora /bin/bash` +> ``` + +The values are sent to the appropriate `syscall` as they are set. +Docker doesn't perform any byte conversion. Take this into account when setting the values. + +#### For `nproc` usage + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the +maximum number of processes available to a user, not to a container. For example, start four +containers with `daemon` user: + +```bash +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top +``` + +The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. +This fails because the caller set `nproc=3` resulting in the first three containers using up +the three processes quota set for the `daemon` user. + +### Stop container with signal (--stop-signal) + +The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +### Optional security options (--security-opt) + +On Windows, this flag can be used to specify the `credentialspec` option. +The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. + +### Stop container with timeout (--stop-timeout) + +The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call +signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Microsoft Windows. The `--isolation ` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +``` +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Microsoft Windows, can take any of these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +On Windows, the default isolation for client is `hyperv`, and for server is +`process`. Therefore when running on Windows server without a `daemon` option +set, these two commands are equivalent: +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation process busybox top +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, +if running on Windows server, any of these commands also result in `hyperv` isolation: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation hyperv busybox top +``` + +### Specify hard limits on memory available to containers (-m, --memory) + +These parameters always set an upper limit on the memory available to the container. On Linux, this +is set on the cgroup and applications in a container can query it at `/sys/fs/cgroup/memory/memory.limit_in_bytes`. + +On Windows, this will affect containers differently depending on what type of isolation is used. + +- With `process` isolation, Windows will report the full memory of the host system, not the limit to applications running inside the container + ```powershell + docker run -it -m 2GB --isolation=process microsoft/nanoserver powershell Get-ComputerInfo *memory* + + CsTotalPhysicalMemory : 17064509440 + CsPhyicallyInstalledMemory : 16777216 + OsTotalVisibleMemorySize : 16664560 + OsFreePhysicalMemory : 14646720 + OsTotalVirtualMemorySize : 19154928 + OsFreeVirtualMemory : 17197440 + OsInUseVirtualMemory : 1957488 + OsMaxProcessMemorySize : 137438953344 + ``` +- With `hyperv` isolation, Windows will create a utility VM that is big enough to hold the memory limit, plus the minimal OS needed to host the container. That size is reported as "Total Physical Memory." + ```powershell + docker run -it -m 2GB --isolation=hyperv microsoft/nanoserver powershell Get-ComputerInfo *memory* + + CsTotalPhysicalMemory : 2683355136 + CsPhyicallyInstalledMemory : + OsTotalVisibleMemorySize : 2620464 + OsFreePhysicalMemory : 2306552 + OsTotalVirtualMemorySize : 2620464 + OsFreeVirtualMemory : 2356692 + OsInUseVirtualMemory : 263772 + OsMaxProcessMemorySize : 137438953344 + ``` + + +### Configure namespaced kernel parameters (sysctls) at runtime + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + +```bash +$ docker run --sysctl net.ipv4.ip_forward=1 someimage +``` + +> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls +> inside of a container that also modify the host system. As the kernel +> evolves we expect to see more sysctls become namespaced. + +#### Currently supported sysctls + +- `IPC Namespace`: + + ```none + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + ``` + + If you use the `--ipc=host` option these sysctls will not be allowed. + +- `Network Namespace`: + + Sysctls beginning with net.* + + If you use the `--network=host` option using these sysctls will not be allowed. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/save.md b/vendor/github.com/moby/moby/docs/reference/commandline/save.md new file mode 100644 index 0000000..cba7385 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/save.md @@ -0,0 +1,62 @@ +--- +title: "save" +description: "The save command description and usage" +keywords: "tarred, repository, backup" +--- + + + +# save + +```markdown +Usage: docker save [OPTIONS] IMAGE [IMAGE...] + +Save one or more images to a tar archive (streamed to STDOUT by default) + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +## Description + +Produces a tarred repository to the standard output stream. +Contains all parent layers, and all tags + versions, or specified `repo:tag`, for +each argument provided. + +## Examples + +### Create a backup that can then be used with `docker load`. + +```bash +$ docker save busybox > busybox.tar + +$ ls -sh busybox.tar + +2.7M busybox.tar + +$ docker save --output busybox.tar busybox + +$ ls -sh busybox.tar + +2.7M busybox.tar + +$ docker save -o fedora-all.tar fedora + +$ docker save -o fedora-latest.tar fedora:latest +``` + +### Cherry-pick particular tags + +You can even cherry-pick particular tags of an image repository. + +```bash +$ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/search.md b/vendor/github.com/moby/moby/docs/reference/commandline/search.md new file mode 100644 index 0000000..f645c78 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/search.md @@ -0,0 +1,149 @@ +--- +title: "search" +description: "The search command description and usage" +keywords: "search, hub, images" +--- + + + +# search + +```markdown +Usage: docker search [OPTIONS] TERM + +Search the Docker Hub for images + +Options: + -f, --filter value Filter output based on conditions provided (default []) + - is-automated=(true|false) + - is-official=(true|false) + - stars= - image has at least 'number' stars + --help Print usage + --limit int Max number of search results (default 25) + --no-trunc Don't truncate output +``` + +## Description + +Search [Docker Hub](https://hub.docker.com) for images + +See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for +more details on finding shared images from the command line. + +> **Note**: Search queries return a maximum of 25 results. + +## Examples + +### Search images by name + +This example displays images with a name containing 'busybox': + +```none +$ docker search busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 316 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +odise/busybox-python 2 [OK] +azukiapp/busybox This image is meant to be used as the base... 2 [OK] +ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] +shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] +odise/busybox-curl 1 [OK] +ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] +peelsky/zulu-openjdk-busybox 1 [OK] +skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] +elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] +socketplane/busybox 1 [OK] +oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] +ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] +nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] +openshift/busybox-http-app 0 [OK] +jllopis/busybox 0 [OK] +swyckoff/busybox 0 [OK] +powellquiring/busybox 0 [OK] +williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] +simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] +fhisamoto/busybox-java Busybox java 0 [OK] +scottabernethy/busybox 0 [OK] +marclop/busybox-solr +``` + +### Display non-truncated description (--no-trunc) + +This example displays images with a name containing 'busybox', +at least 3 stars and the description isn't truncated in the output: + +```bash +$ docker search --stars=3 --no-trunc busybox +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 325 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] +``` + +### Limit search results (--limit) + +The flag `--limit` is the maximum number of results returned by a search. This value could +be in the range between 1 and 100. The default value of `--limit` is 25. + + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* stars (int - number of stars the image has) +* is-automated (true|false) - is the image automated or not +* is-official (true|false) - is the image official or not + + +#### stars + +This example displays images with a name containing 'busybox' and at +least 3 stars: + +```bash +$ docker search --filter stars=3 busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 325 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` + + +#### is-automated + +This example displays images with a name containing 'busybox' +and are automated builds: + +```bash +$ docker search --filter is-automated busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` + +#### is-official + +This example displays images with a name containing 'busybox', at least +3 stars and are official builds: + +```bash +$ docker search --filter "is-official=true" --filter "stars=3" busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/secret.md b/vendor/github.com/moby/moby/docs/reference/commandline/secret.md new file mode 100644 index 0000000..5073440 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/secret.md @@ -0,0 +1,45 @@ +--- +title: "secret" +description: "The secret command description and usage" +keywords: "secret" +--- + + + +# secret + +```markdown +Usage: docker secret COMMAND + +Manage Docker secrets + +Options: + --help Print usage + +Commands: + create Create a secret from a file or STDIN as content + inspect Display detailed information on one or more secrets + ls List secrets + rm Remove one or more secrets + +Run 'docker secret COMMAND --help' for more information on a command. + +``` + +## Description + +Manage secrets. + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret list](secret_list.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/secret_create.md b/vendor/github.com/moby/moby/docs/reference/commandline/secret_create.md new file mode 100644 index 0000000..54612f6 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/secret_create.md @@ -0,0 +1,100 @@ +--- +title: "secret create" +description: "The secret create command description and usage" +keywords: ["secret, create"] +--- + + + +# secret create + +```Markdown +Usage: docker secret create [OPTIONS] SECRET file|- + +Create a secret from a file or STDIN as content + +Options: + --help Print usage + -l, --label list Secret labels (default []) +``` + +## Description + +Creates a secret using standard input or from a file for the secret content. You must run this command on a manager node. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +### Create a secret + +```bash +$ echo | docker secret create my_secret - +mhv17xfe3gh6xc4rij5orpfds + +$ docker secret ls +ID NAME CREATED UPDATED SIZE +mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 +``` + +### Create a secret with a file + +```bash +$ docker secret create my_secret ./secret.json + +mhv17xfe3gh6xc4rij5orpfds + +$ docker secret ls + +ID NAME CREATED UPDATED SIZE +mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 +``` + +### Create a secret with labels + +```bash +$ docker secret create --label env=dev \ + --label rev=20161102 \ + my_secret ./secret.json + +jtn7g6aukl5ky7nr9gvwafoxh +``` + +```none +$ docker secret inspect my_secret + +[ + { + "ID": "jtn7g6aukl5ky7nr9gvwafoxh", + "Version": { + "Index": 541 + }, + "CreatedAt": "2016-11-03T20:54:12.924766548Z", + "UpdatedAt": "2016-11-03T20:54:12.924766548Z", + "Spec": { + "Name": "my_secret", + "Labels": { + "env": "dev", + "rev": "20161102" + }, + "Data": null + }, + "Digest": "sha256:4212a44b14e94154359569333d3fc6a80f6b9959dfdaff26412f4b2796b1f387", + "SecretSize": 1679 + } +] +``` + + +## Related commands + +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/secret_inspect.md b/vendor/github.com/moby/moby/docs/reference/commandline/secret_inspect.md new file mode 100644 index 0000000..f047cbd --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/secret_inspect.md @@ -0,0 +1,90 @@ +--- +title: "secret inspect" +description: "The secret inspect command description and usage" +keywords: ["secret, inspect"] +--- + + + +# secret inspect + +```Markdown +Usage: docker secret inspect [OPTIONS] SECRET [SECRET...] + +Display detailed information on one or more secrets + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Inspects the specified secret. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +### Inspect a secret by name or ID + +You can inspect a secret, either by its *name*, or *ID* + +For example, given the following secret: + +```bash +$ docker secret ls +ID NAME CREATED UPDATED +mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC +``` + +```none +$ docker secret inspect secret.json + +[ + { + "ID": "mhv17xfe3gh6xc4rij5orpfds", + "Version": { + "Index": 1198 + }, + "CreatedAt": "2016-10-27T23:25:43.909181089Z", + "UpdatedAt": "2016-10-27T23:25:43.909181089Z", + "Spec": { + "Name": "secret.json" + } + } +] +``` + +### Formatting + +You can use the --format option to obtain specific information about a +secret. The following example command outputs the creation time of the +secret. + +```bash +$ docker secret inspect --format='{{.CreatedAt}}' mhv17xfe3gh6xc4rij5orpfds + +2016-10-27 23:25:43.909181089 +0000 UTC +``` + + +## Related commands + +* [secret create](secret_create.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/secret_ls.md b/vendor/github.com/moby/moby/docs/reference/commandline/secret_ls.md new file mode 100644 index 0000000..345728f --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/secret_ls.md @@ -0,0 +1,49 @@ +--- +title: "secret ls" +description: "The secret ls command description and usage" +keywords: ["secret, ls"] +--- + + + +# secret ls + +```Markdown +Usage: docker secret ls [OPTIONS] + +List secrets + +Aliases: + ls, list + +Options: + -q, --quiet Only display IDs +``` + +## Description + +Run this command on a manager node to list the secrets in the swarm. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +```bash +$ docker secret ls + +ID NAME CREATED UPDATED +mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC +``` + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/secret_rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/secret_rm.md new file mode 100644 index 0000000..1e10350 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/secret_rm.md @@ -0,0 +1,54 @@ +--- +title: "secret rm" +description: "The secret rm command description and usage" +keywords: ["secret, rm"] +--- + + + +# secret rm + +```Markdown +Usage: docker secret rm SECRET [SECRET...] + +Remove one or more secrets + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes the specified secrets from the swarm. This command has to be run +targeting a manager node. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +This example removes a secret: + +```bash +$ docker secret rm secret.json +sapth4csdo5b6wz2p5uimh5xg +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a secret. + + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service.md b/vendor/github.com/moby/moby/docs/reference/commandline/service.md new file mode 100644 index 0000000..6256c9f --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service.md @@ -0,0 +1,42 @@ +--- +title: "service" +description: "The service command description and usage" +keywords: "service" +--- + + + +# service + +```markdown +Usage: docker service COMMAND + +Manage services + +Options: + --help Print usage + +Commands: + create Create a new service + inspect Display detailed information on one or more services + logs Fetch the logs of a service + ls List services + ps List the tasks of a service + rm Remove one or more services + scale Scale one or multiple replicated services + update Update a service + +Run 'docker service COMMAND --help' for more information on a command. +``` + +## Description + +Manage services. + diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_create.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_create.md new file mode 100644 index 0000000..75d39e0 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_create.md @@ -0,0 +1,748 @@ +--- +title: "service create" +description: "The service create command description and usage" +keywords: "service, create" +--- + + + +# service create + +```Markdown +Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new service + +Options: + --constraint list Placement constraints (default []) + --container-label list Container labels (default []) + --dns list Set custom DNS servers (default []) + --dns-option list Set DNS options (default []) + --dns-search list Set custom DNS search domains (default []) + --endpoint-mode string Endpoint mode (vip or dnsrr) + -e, --env list Set environment variables (default []) + --env-file list Read in a file of environment variables (default []) + --group list Set one or more supplementary user groups for the container (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) + --help Print usage + --host list Set one or more custom host-to-IP mappings (host:ip) (default []) + --hostname string Container hostname + -l, --label list Service labels (default []) + --limit-cpu decimal Limit CPUs (default 0.000) + --limit-memory bytes Limit Memory (default 0 B) + --log-driver string Logging driver for service + --log-opt list Logging driver options (default []) + --mode string Service mode (replicated or global) (default "replicated") + --mount mount Attach a filesystem mount to the service + --name string Service name + --network list Network attachments (default []) + --no-healthcheck Disable any container-specified HEALTHCHECK + -p, --publish port Publish a port as a node port + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs (default 0.000) + --reserve-memory bytes Reserve Memory (default 0 B) + --restart-condition string Restart when condition is met (none, on-failure, or any) + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --secret secret Specify secrets to expose to the service + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure (pause|continue) (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +## Description + +Creates a service as described by the specified parameters. You must run this +command on a manager node. + +## Examples + +### Create a service + +```bash +$ docker service create --name redis redis:3.0.6 + +dmu1ept4cxcfe8k8lhtux3ro3 + +$ docker service create --mode global --name redis2 redis:3.0.6 + +a8q9dasaafudfs8q8w32udass + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 1/1 redis:3.0.6 +a8q9dasaafud redis2 global 1/1 redis:3.0.6 +``` + +### Create a service with 5 replica tasks (--replicas) + +Use the `--replicas` flag to set the number of replica tasks for a replicated +service. The following command creates a `redis` service with `5` replica tasks: + +```bash +$ docker service create --name redis --replicas=5 redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +The above command sets the *desired* number of tasks for the service. Even +though the command returns immediately, actual scaling of the service may take +some time. The `REPLICAS` column shows both the *actual* and *desired* number +of replica tasks for the service. + +In the following example the desired state is `5` replicas, but the current +number of `RUNNING` tasks is `3`: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 3/5 redis:3.0.7 +``` + +Once all the tasks are created and `RUNNING`, the actual number of tasks is +equal to the desired number: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 5/5 redis:3.0.7 +``` + +### Create a service with secrets + +Use the `--secret` flag to give a container access to a +[secret](secret_create.md). + +Create a service specifying a secret: + +```bash +$ docker service create --name redis --secret secret.json redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +Create a service specifying the secret, target, user/group ID and mode: + +```bash +$ docker service create --name redis \ + --secret source=ssh-key,target=ssh \ + --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \ + redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +Secrets are located in `/run/secrets` in the container. If no target is +specified, the name of the secret will be used as the in memory file in the +container. If a target is specified, that will be the filename. In the +example above, two files will be created: `/run/secrets/ssh` and +`/run/secrets/app` for each of the secret targets specified. + +### Create a service with a rolling update policy + +```bash +$ docker service create \ + --replicas 10 \ + --name redis \ + --update-delay 10s \ + --update-parallelism 2 \ + redis:3.0.6 +``` + +When you run a [service update](service_update.md), the scheduler updates a +maximum of 2 tasks at a time, with `10s` between updates. For more information, +refer to the [rolling updates +tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/). + +### Set environment variables (-e, --env) + +This sets environmental variables for all tasks in a service. For example: + +```bash +$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6 +``` + +### Create a service with specific hostname (--hostname) + +This option sets the docker service containers hostname to a specific string. +For example: + +```bash +$ docker service create --name redis --hostname myredis redis:3.0.6 +``` + +### Set metadata on a service (-l, --label) + +A label is a `key=value` pair that applies metadata to a service. To label a +service with two labels: + +```bash +$ docker service create \ + --name redis_2 \ + --label com.example.foo="bar" + --label bar=baz \ + redis:3.0.6 +``` + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +### Add bind-mounts or volumes + +Docker supports two different kinds of mounts, which allow containers to read to +or write from files or directories on other containers or the host operating +system. These types are _data volumes_ (often referred to simply as volumes) and +_bind-mounts_. + +Additionally, Docker supports `tmpfs` mounts. + +A **bind-mount** makes a file or directory on the host available to the +container it is mounted within. A bind-mount may be either read-only or +read-write. For example, a container might share its host's DNS information by +means of a bind-mount of the host's `/etc/resolv.conf` or a container might +write logs to its host's `/var/log/myContainerLogs` directory. If you use +bind-mounts and your host and containers have different notions of permissions, +access controls, or other such details, you will run into portability issues. + +A **named volume** is a mechanism for decoupling persistent data needed by your +container from the image used to create the container and from the host machine. +Named volumes are created and managed by Docker, and a named volume persists +even when no container is currently using it. Data in named volumes can be +shared between a container and the host machine, as well as between multiple +containers. Docker uses a _volume driver_ to create, manage, and mount volumes. +You can back up or restore volumes using Docker commands. + +A **tmpfs** mounts a tmpfs inside a container for volatile data. + +Consider a situation where your image starts a lightweight web server. You could +use that image as a base image, copy in your website's HTML files, and package +that into another image. Each time your website changed, you'd need to update +the new image and redeploy all of the containers serving your website. A better +solution is to store the website in a named volume which is attached to each of +your web server containers when they start. To update the website, you just +update the named volume. + +For more information about named volumes, see +[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/). + +The following table describes options which apply to both bind-mounts and named +volumes in a service: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionRequiredDescription
types +

The type of mount, can be either volume, bind, or tmpfs. Defaults to volume if no type is specified. +

    +
  • volume: mounts a [managed volume](volume_create.md) into the container.
  • +
  • bind: bind-mounts a directory or file from the host into the container.
  • +
  • tmpfs: mount a tmpfs in the container
  • +

+
src or sourcefor type=bind only> +
    +
  • + type=volume: src is an optional way to specify the name of the volume (for example, src=my-volume). + If the named volume does not exist, it is automatically created. If no src is specified, the volume is + assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. + A randomly-named volume has the same lifecycle as its container and is destroyed when the container + is destroyed (which is upon service update, or when scaling or re-balancing the service) +
  • +
  • + type=bind: src is required, and specifies an absolute path to the file or directory to bind-mount + (for example, src=/path/on/host/). An error is produced if the file or directory does not exist. +
  • +
  • + type=tmpfs: src is not supported. +
  • +
+

dst or destination or target

yes +

Mount path inside the container, for example /some/path/in/container/. + If the path does not exist in the container's filesystem, the Engine creates + a directory at the specified location before mounting the volume or bind-mount.

+

readonly or ro

+

The Engine mounts binds and volumes read-write unless readonly option + is given when mounting the bind or volume. +

    +
  • true or 1 or no value: Mounts the bind or volume read-only.
  • +
  • false or 0: Mounts the bind or volume read-write.
  • +

+
+ + +#### Bind Propagation + +Bind propagation refers to whether or not mounts created within a given +bind-mount or named volume can be propagated to replicas of that mount. Consider +a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings +control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each +propagation setting has a recursive counterpoint. In the case of recursion, +consider that `/tmp/a` is also mounted as `/foo`. The propagation settings +control whether `/mnt/a` and/or `/tmp/a` would exist. + +The `bind-propagation` option defaults to `rprivate` for both bind-mounts and +volume mounts, and is only configurable for bind-mounts. In other words, named +volumes do not support bind propagation. + +- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts, + and sub-mounts of replica mounts are also propagated to the + original mount. +- **`slave`**: similar to a shared mount, but only in one direction. If the + original mount exposes a sub-mount, the replica mount can see it. + However, if the replica mount exposes a sub-mount, the original + mount cannot see it. +- **`private`**: The mount is private. Sub-mounts within it are not exposed to + replica mounts, and sub-mounts of replica mounts are not + exposed to the original mount. +- **`rshared`**: The same as shared, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rslave`**: The same as `slave`, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rprivate`**: The default. The same as `private`, meaning that no mount points + anywhere within the original or replica mount points propagate + in either direction. + +For more information about bind propagation, see the +[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + +#### Options for Named Volumes + +The following options can only be used for named volumes (`type=volume`); + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescription
volume-driver +

Name of the volume-driver plugin to use for the volume. Defaults to + "local", to use the local volume driver to create the volume if the + volume does not exist.

+
volume-label + One or more custom metadata ("labels") to apply to the volume upon + creation. For example, + `volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more + information about labels, refer to + apply custom metadata. +
volume-nocopy + By default, if you attach an empty volume to a container, and files or + directories already existed at the mount-path in the container (dst), + the Engine copies those files and directories into the volume, allowing + the host to access them. Set `volume-nocopy` to disables copying files + from the container's filesystem to the volume and mount the empty volume. + + A value is optional: + +
    +
  • true or 1: Default if you do not provide a value. Disables copying.
  • +
  • false or 0: Enables copying.
  • +
+
volume-opt + Options specific to a given volume driver, which will be passed to the + driver when creating the volume. Options are provided as a comma-separated + list of key/value pairs, for example, + volume-opt=some-option=some-value,some-other-option=some-other-value. + For available options for a given driver, refer to that driver's + documentation. +
+ + +#### Options for tmpfs + +The following options can only be used for tmpfs mounts (`type=tmpfs`); + + + + + + + + + + + + + + + +
OptionDescription
tmpfs-sizeSize of the tmpfs mount in bytes. Unlimited by default in Linux.
tmpfs-modeFile mode of the tmpfs in octal. (e.g. "700" or "0700".) Defaults to "1777" in Linux.
+ + +#### Differences between "--mount" and "--volume" + +The `--mount` flag supports most options that are supported by the `-v` +or `--volume` flag for `docker run`, with some important exceptions: + +- The `--mount` flag allows you to specify a volume driver and volume driver + options *per volume*, without creating the volumes in advance. In contrast, + `docker run` allows you to specify a single volume driver which is shared + by all volumes, using the `--volume-driver` flag. + +- The `--mount` flag allows you to specify custom metadata ("labels") for a volume, + before the volume is created. + +- When you use `--mount` with `type=bind`, the host-path must refer to an *existing* + path on the host. The path will not be created for you and the service will fail + with an error if the path does not exist. + +- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags, + which are used for `selinux` labeling. + +#### Create a service using a named volume + +The following example creates a service that uses a named volume: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \ + nginx:alpine +``` + +For each replica of the service, the engine requests a volume named "my-volume" +from the default ("local") volume driver where the task is deployed. If the +volume does not exist, the engine creates a new volume and applies the "color" +and "shape" labels. + +When the task is started, the volume is mounted on `/path/in/container/` inside +the container. + +Be aware that the default ("local") volume is a locally scoped volume driver. +This means that depending on where a task is deployed, either that task gets a +*new* volume named "my-volume", or shares the same "my-volume" with other tasks +of the same service. Multiple containers writing to a single shared volume can +cause data corruption if the software running inside the container is not +designed to handle concurrent processes writing to the same location. Also take +into account that containers can be re-scheduled by the Swarm orchestrator and +be deployed on a different node. + +#### Create a service that uses an anonymous volume + +The following command creates a service with three replicas with an anonymous +volume on `/path/in/container`: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,destination=/path/in/container \ + nginx:alpine +``` + +In this example, no name (`source`) is specified for the volume, so a new volume +is created for each task. This guarantees that each task gets its own volume, +and volumes are not shared between tasks. Anonymous volumes are removed after +the task using them is complete. + +#### Create a service that uses a bind-mounted host directory + +The following example bind-mounts a host directory at `/path/in/container` in +the containers backing the service: + +```bash +$ docker service create \ + --name my-service \ + --mount type=bind,source=/path/on/host,destination=/path/in/container \ + nginx:alpine +``` + +### Set service mode (--mode) + +The service mode determines whether this is a _replicated_ service or a _global_ +service. A replicated service runs as many tasks as specified, while a global +service runs on each active node in the swarm. + +The following command creates a global service: + +```bash +$ docker service create \ + --name redis_2 \ + --mode global \ + redis:3.0.6 +``` + +### Specify service constraints (--constraint) + +You can limit the set of nodes where a task can be scheduled by defining +constraint expressions. Multiple constraints find nodes that satisfy every +expression (AND match). Constraints can match node or Docker Engine labels as +follows: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
node attributematchesexample
node.idNode IDnode.id == 2ivku8v2gvtg4
node.hostnameNode hostnamenode.hostname != node-2
node.roleNode rolenode.role == manager
node.labelsuser defined node labelsnode.labels.security == high
engine.labelsDocker Engine's labelsengine.labels.operatingsystem == ubuntu 14.04
+ + +`engine.labels` apply to Docker Engine labels like operating system, +drivers, etc. Swarm administrators add `node.labels` for operational purposes by +using the [`docker node update`](node_update.md) command. + +For example, the following limits tasks for the redis service to nodes where the +node type label equals queue: + +```bash +$ docker service create \ + --name redis_2 \ + --constraint 'node.labels.type == queue' \ + redis:3.0.6 +``` + +### Attach a service to an existing network (--network) + +You can use overlay networks to connect one or more services within the swarm. + +First, create an overlay network on a manager node the docker network create +command: + +```bash +$ docker network create --driver overlay my-network + +etjpu59cykrptrgw0z0hk5snf +``` + +After you create an overlay network in swarm mode, all manager nodes have +access to the network. + +When you create a service and pass the --network flag to attach the service to +the overlay network: + +```bash +$ docker service create \ + --replicas 3 \ + --network my-network \ + --name my-web \ + nginx + +716thylsndqma81j6kkkb5aus +``` + +The swarm extends my-network to each node running the service. + +Containers on the same network can access each other using +[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery). + +### Publish service ports externally to the swarm (-p, --publish) + +You can publish service ports to make them available externally to the swarm +using the `--publish` flag: + +```bash +$ docker service create --publish : nginx +``` + +For example: + +```bash +$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx +``` + +When you publish a service port, the swarm routing mesh makes the service +accessible at the target port on every node regardless if there is a task for +the service running on the node. For more information refer to +[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/). + +### Publish a port for TCP only or UDP only + +By default, when you publish a port, it is a TCP port. You can +specifically publish a UDP port instead of or in addition to a TCP port. When +you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to +add the suffix `/tcp` for TCP ports. Otherwise it is optional. + +#### TCP only + +The following two commands are equivalent. + +```bash +$ docker service create --name dns-cache -p 53:53 dns-cache + +$ docker service create --name dns-cache -p 53:53/tcp dns-cache +``` + +#### TCP and UDP + +```bash +$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache +``` + +#### UDP only + +```bash +$ docker service create --name dns-cache -p 53:53/udp dns-cache +``` + +### Create services using templates + +You can use templates for some flags of `service create`, using the syntax +provided by the Go's [text/template](http://golang.org/pkg/text/template/) package. + +The supported flags are the following : + +- `--hostname` +- `--mount` +- `--env` + +Valid placeholders for the Go template are listed below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PlaceholderDescription
.Service.IDService ID
.Service.NameService name
.Service.LabelsService labels
.Node.IDNode ID
.Task.IDTask ID
.Task.NameTask name
.Task.SlotTask slot
+ + +#### Template example + +In this example, we are going to set the template of the created containers based on the +service's name and the node's ID where it sits. + +```bash +$ docker service create --name hosttempl \ + --hostname="{{.Node.ID}}-{{.Service.Name}}"\ + busybox top + +va8ew30grofhjoychbr6iot8c + +$ docker service ps va8ew30grofhjoychbr6iot8c + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago + +$ docker inspect --format="{{.Config.Hostname}}" hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj + +x3ti0erg11rjpg64m75kej2mz-hosttempl +``` + +## Related commands + +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) + + diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_inspect.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_inspect.md new file mode 100644 index 0000000..b2088d2 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_inspect.md @@ -0,0 +1,167 @@ +--- +title: "service inspect" +description: "The service inspect command description and usage" +keywords: "service, inspect" +--- + + + +# service inspect + +```Markdown +Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...] + +Display detailed information on one or more services + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format. +``` + +## Description + +Inspects the specified service. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Inspect a service by name or ID + +You can inspect a service, either by its *name*, or *ID* + +For example, given the following service; + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +``` + +Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf` +produce the same result: + +```none +$ docker service inspect redis + +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + "CreatedAt": "2016-06-17T18:44:02.558012087Z", + "UpdatedAt": "2016-06-17T18:44:02.558012087Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis:3.0.6" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": {}, + "EndpointSpec": { + "Mode": "vip" + } + }, + "Endpoint": { + "Spec": {} + } + } +] +``` + +```bash +$ docker service inspect dmu1ept4cxcf + +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + ... + } +] +``` + +### Formatting + +You can print the inspect output in a human-readable format instead of the default +JSON output, by using the `--pretty` option: + +```bash +$ docker service inspect --pretty frontend + +ID: c8wgl7q4ndfd52ni6qftkvnnp +Name: frontend +Labels: + - org.example.projectname=demo-app +Service Mode: REPLICATED + Replicas: 5 +Placement: +UpdateConfig: + Parallelism: 0 +ContainerSpec: + Image: nginx:alpine +Resources: +Endpoint Mode: vip +Ports: + Name = + Protocol = tcp + TargetPort = 443 + PublishedPort = 4443 +``` + +You can also use `--format pretty` for the same effect. + + +#### Find the number of tasks running as part of a service + +The `--format` option can be used to obtain specific information about a +service. For example, the following command outputs the number of replicas +of the "redis" service. + +```bash +$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis + +10 +``` + + +## Related commands + +* [service create](service_create.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_logs.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_logs.md new file mode 100644 index 0000000..844a58e --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_logs.md @@ -0,0 +1,79 @@ +--- +title: "service logs (experimental)" +description: "The service logs command description and usage" +keywords: "service, logs" +advisory: "experimental" +--- + + + +# service logs + +```Markdown +Usage: docker service logs [OPTIONS] SERVICE + +Fetch the logs of a service + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +## Description + +The `docker service logs` command batch-retrieves logs present at the time of execution. + +> **Note**: This command is only functional for services that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker service logs --follow` command will continue streaming the new output from +the service's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker service logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker service logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +service. + +The `--since` option shows only the service logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_ls.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_ls.md new file mode 100644 index 0000000..c6c2ace --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_ls.md @@ -0,0 +1,120 @@ +--- +title: "service ls" +description: "The service ls command description and usage" +keywords: "service, ls" +--- + + + +# service ls + +```Markdown +Usage: docker service ls [OPTIONS] + +List services + +Aliases: + ls, list + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +This command when run targeting a manager, lists services are running in the +swarm. + +## Examples + +On a manager node: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +c8wgl7q4ndfd frontend replicated 5/5 nginx:alpine +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +iwe3278osahj mongo global 7/7 mongo:3.3 +``` + +The `REPLICAS` column shows both the *actual* and *desired* number of tasks for +the service. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](service_ls.md#id) +* [label](service_ls.md#label) +* [name](service_ls.md#name) + +#### id + +The `id` filter matches all or part of a service's id. + +```bash +$ docker service ls -f "id=0bcjw" +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +#### label + +The `label` filter matches services based on the presence of a `label` alone or +a `label` and a value. + +The following filter matches all services with a `project` label regardless of +its value: + +```bash +$ docker service ls --filter label=project +ID NAME MODE REPLICAS IMAGE +01sl1rp6nj5u frontend2 replicated 1/1 nginx:alpine +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +The following filter matches only services with the `project` label with the +`project-a` value. + +```bash +$ docker service ls --filter label=project=project-a +ID NAME MODE REPLICAS IMAGE +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +#### name + +The `name` filter matches on all or part of a service's name. + +The following filter matches services with a name containing `redis`. + +```bash +$ docker service ls --filter name=redis +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_ps.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_ps.md new file mode 100644 index 0000000..4b0118f --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_ps.md @@ -0,0 +1,163 @@ +--- +title: "service ps" +description: "The service ps command description and usage" +keywords: "service, tasks, ps" +aliases: ["/engine/reference/commandline/service_tasks/"] +--- + + + +# service ps + +```Markdown +Usage: docker service ps [OPTIONS] SERVICE + +List the tasks of a service + +Options: + -f, --filter filter Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +## Description + +Lists the tasks that are running as part of the specified services. This command +has to be run targeting a manager node. + +## Examples + +### List the tasks that are part of a service + +The following command shows all the tasks that are part of the `redis` service: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.5 manager1 Running Running 8 seconds +bk658fpbex0d redis.2 redis:3.0.5 worker2 Running Running 9 seconds +5ls5s5fldaqg redis.3 redis:3.0.5 worker1 Running Running 9 seconds +8ryt076polmc redis.4 redis:3.0.5 worker1 Running Running 9 seconds +1x0v8yomsncd redis.5 redis:3.0.5 manager1 Running Running 8 seconds +71v7je3el7rr redis.6 redis:3.0.5 worker2 Running Running 9 seconds +4l3zm9b7tfr7 redis.7 redis:3.0.5 worker2 Running Running 9 seconds +9tfpyixiy2i7 redis.8 redis:3.0.5 worker1 Running Running 9 seconds +3w1wu13yupln redis.9 redis:3.0.5 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds +``` + +In addition to _running_ tasks, the output also shows the task history. For +example, after updating the service to use the `redis:3.0.6` image, the output +may look like this: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxk redis.1 redis:3.0.6 manager1 Running Running 6 seconds ago +ky2re9oz86r9 \_ redis.1 redis:3.0.5 manager1 Shutdown Shutdown 8 seconds ago +3s46te2nzl4i redis.2 redis:3.0.6 worker2 Running Running less than a second ago +nvjljf7rmor4 \_ redis.2 redis:3.0.6 worker2 Shutdown Rejected 23 seconds ago "No such image: redis@sha256:6…" +vtiuz2fpc0yb \_ redis.2 redis:3.0.5 worker2 Shutdown Shutdown 1 second ago +jnarweeha8x4 redis.3 redis:3.0.6 worker1 Running Running 3 seconds ago +vs448yca2nz4 \_ redis.3 redis:3.0.5 worker1 Shutdown Shutdown 4 seconds ago +jf1i992619ir redis.4 redis:3.0.6 worker1 Running Running 10 seconds ago +blkttv7zs8ee \_ redis.4 redis:3.0.5 worker1 Shutdown Shutdown 11 seconds ago +``` + +The number of items in the task history is determined by the +`--task-history-limit` option that was set when initializing the swarm. You can +change the task history retention limit using the +[`docker swarm update`](swarm_update.md) command. + +When deploying a service, docker resolves the digest for the service's +image, and pins the service to that digest. The digest is not shown by +default, but is printed if `--no-trunc` is used. The `--no-trunc` option +also shows the non-truncated task ID, and error-messages, as can be seen below; + +```bash +$ docker service ps --no-trunc redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxksi9w2a704wkp7 redis.1 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 manager1 Running Running 5 minutes ago +ky2re9oz86r9556i2szb8a8af \_ redis.1 redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e worker2 Shutdown Shutdown 5 minutes ago +bk658fpbex0d57cqcwoe3jthu redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Running Running 5 seconds +nvjljf7rmor4htv7l8rwcx7i7 \_ redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Shutdown Rejected 5 minutes ago "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842" +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* [id](#id) +* [name](#name) +* [node](#node) +* [desired-state](#desired-state) + + +#### id + +The `id` filter matches on all or a prefix of a task's ID. + +```bash +$ docker service ps -f "id=8" redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +8ryt076polmc redis.4 redis:3.0.6 worker1 Running Running 9 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + +#### name + +The `name` filter matches on task names. + +```bash +$ docker service ps -f "name=redis.1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +qihejybwf1x5 redis.1 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### node + +The `node` filter matches on a node name or a node ID. + +```bash +$ docker service ps -f "node=manager1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.6 manager1 Running Running 8 seconds +1x0v8yomsncd redis.5 redis:3.0.6 manager1 Running Running 8 seconds +3w1wu13yupln redis.9 redis:3.0.6 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. + + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service update](service_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_rm.md new file mode 100644 index 0000000..448f2c3 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_rm.md @@ -0,0 +1,60 @@ +--- +title: "service rm" +description: "The service rm command description and usage" +keywords: "service, rm" +--- + + + +# service rm + +```Markdown +Usage: docker service rm SERVICE [SERVICE...] + +Remove one or more services + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes the specified services from the swarm. This command has to be run +targeting a manager node. + +## Examples + +Remove the `redis` service: + +```bash +$ docker service rm redis + +redis + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a running service. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_scale.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_scale.md new file mode 100644 index 0000000..a3aef5f --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_scale.md @@ -0,0 +1,104 @@ +--- +title: "service scale" +description: "The service scale command description and usage" +keywords: "service, scale" +--- + + + +# service scale + +```markdown +Usage: docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...] + +Scale one or multiple replicated services + +Options: + --help Print usage +``` + +## Description + +The scale command enables you to scale one or more replicated services either up +or down to the desired number of replicas. This command cannot be applied on +services which are global mode. The command will return immediately, but the +actual scaling of the service may take some time. To stop all replicas of a +service while keeping the service active in the swarm you can set the scale to 0. + +## Examples + +### Scale a single service + +The following command scales the "frontend" service to 50 tasks. + +```bash +$ docker service scale frontend=50 + +frontend scaled to 50 +``` + +The following command tries to scale a global service to 10 tasks and returns an error. + +```bash +$ docker service create --mode global --name backend backend:latest + +b4g08uwuairexjub6ome6usqh + +$ docker service scale backend=10 + +backend: scale can only be used with replicated mode +``` + +Directly afterwards, run `docker service ls`, to see the actual number of +replicas. + +```bash +$ docker service ls --filter name=frontend + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 15/50 nginx:alpine +``` + +You can also scale a service using the [`docker service update`](service_update.md) +command. The following commands are equivalent: + +```bash +$ docker service scale frontend=50 +$ docker service update --replicas=50 frontend +``` + +### Scale multiple services + +The `docker service scale` command allows you to set the desired number of +tasks for multiple services at once. The following example scales both the +backend and frontend services: + +```bash +$ docker service scale backend=3 frontend=5 + +backend scaled to 3 +frontend scaled to 5 + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/service_update.md b/vendor/github.com/moby/moby/docs/reference/commandline/service_update.md new file mode 100644 index 0000000..d73b9a1 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/service_update.md @@ -0,0 +1,228 @@ +--- +title: "service update" +description: "The service update command description and usage" +keywords: "service, update" +--- + + + +# service update + +```Markdown +Usage: docker service update [OPTIONS] SERVICE + +Update a service + +Options: + --args string Service command args + --constraint-add list Add or update a placement constraint (default []) + --constraint-rm list Remove a constraint (default []) + --container-label-add list Add or update a container label (default []) + --container-label-rm list Remove a container label by its key (default []) + --dns-add list Add or update a custom DNS server (default []) + --dns-option-add list Add or update a DNS option (default []) + --dns-option-rm list Remove a DNS option (default []) + --dns-rm list Remove a custom DNS server (default []) + --dns-search-add list Add or update a custom DNS search domain (default []) + --dns-search-rm list Remove a DNS search domain (default []) + --endpoint-mode string Endpoint mode (vip or dnsrr) + --env-add list Add or update an environment variable (default []) + --env-rm list Remove an environment variable (default []) + --force Force update even if no changes require it + --group-add list Add an additional supplementary user group to the container (default []) + --group-rm list Remove a previously added supplementary user group from the container (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) + --help Print usage + --host-add list Add or update a custom host-to-IP mapping (host:ip) (default []) + --host-rm list Remove a custom host-to-IP mapping (host:ip) (default []) + --hostname string Container hostname + --image string Service image tag + --label-add list Add or update a service label (default []) + --label-rm list Remove a label by its key (default []) + --limit-cpu decimal Limit CPUs (default 0.000) + --limit-memory bytes Limit Memory (default 0 B) + --log-driver string Logging driver for service + --log-opt list Logging driver options (default []) + --mount-add mount Add or update a mount on a service + --mount-rm list Remove a mount by its target path (default []) + --no-healthcheck Disable any container-specified HEALTHCHECK + --publish-add port Add or update a published port + --publish-rm port Remove a published port by its target port + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs (default 0.000) + --reserve-memory bytes Reserve Memory (default 0 B) + --restart-condition string Restart when condition is met (none, on-failure, or any) + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --rollback Rollback to previous specification + --secret-add secret Add or update a secret on a service + --secret-rm list Remove a secret (default []) + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure (pause|continue) (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +## Description + +Updates a service as described by the specified parameters. This command has to be run targeting a manager node. +The parameters are the same as [`docker service create`](service_create.md). Please look at the description there +for further information. + +Normally, updating a service will only cause the service's tasks to be replaced with new ones if a change to the +service requires recreating the tasks for it to take effect. For example, only changing the +`--update-parallelism` setting will not recreate the tasks, because the individual tasks are not affected by this +setting. However, the `--force` flag will cause the tasks to be recreated anyway. This can be used to perform a +rolling restart without any changes to the service parameters. + +## Examples + +### Update a service + +```bash +$ docker service update --limit-cpu 2 redis +``` + +### Perform a rolling restart with no parameter changes + +```bash +$ docker service update --force --update-parallelism 1 --update-delay 30s redis +``` + +In this example, the `--force` flag causes the service's tasks to be shut down +and replaced with new ones even though none of the other parameters would +normally cause that to happen. The `--update-parallelism 1` setting ensures +that only one task is replaced at a time (this is the default behavior). The +`--update-delay 30s` setting introduces a 30 second delay between tasks, so +that the rolling restart happens gradually. + +### Add or remove mounts + +Use the `--mount-add` or `--mount-rm` options add or remove a service's bind-mounts +or volumes. + +The following example creates a service which mounts the `test-data` volume to +`/somewhere`. The next step updates the service to also mount the `other-volume` +volume to `/somewhere-else`volume, The last step unmounts the `/somewhere` mount +point, effectively removing the `test-data` volume. Each command returns the +service name. + +- The `--mount-add` flag takes the same parameters as the `--mount` flag on + `service create`. Refer to the [volumes and + bind-mounts](service_create.md#volumes-and-bind-mounts-mount) section in the + `service create` reference for details. + +- The `--mount-rm` flag takes the `target` path of the mount. + +```bash +$ docker service create \ + --name=myservice \ + --mount \ + type=volume,source=test-data,target=/somewhere \ + nginx:alpine \ + myservice + +myservice + +$ docker service update \ + --mount-add \ + type=volume,source=other-volume,target=/somewhere-else \ + myservice + +myservice + +$ docker service update --mount-rm /somewhere myservice + +myservice +``` + +### Rolling back to the previous version of a service + +Use the `--rollback` option to roll back to the previous version of the service. + +This will revert the service to the configuration that was in place before the most recent `docker service update` command. + +The following example updates the number of replicas for the service from 4 to 5, and then rolls back to the previous configuration. + +```bash +$ docker service update --replicas=5 web + +web + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +80bvrzp6vxf3 web replicated 0/5 nginx:alpine + +``` +Roll back the `web` service... + +```bash +$ docker service update --rollback web + +web + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +80bvrzp6vxf3 web replicated 0/4 nginx:alpine + +``` + +Other options can be combined with `--rollback` as well, for example, `--update-delay 0s` to execute the rollback without a delay between tasks: + +```bash +$ docker service update \ + --rollback \ + --update-delay 0s + web + +web + +``` + +### Add or remove secrets + +Use the `--secret-add` or `--secret-rm` options add or remove a service's +secrets. + +The following example adds a secret named `ssh-2` and removes `ssh-1`: + +```bash +$ docker service update \ + --secret-add source=ssh-2,target=ssh-2 \ + --secret-rm ssh-1 \ + myservice +``` + +### Update services using templates + +Some flags of `service update` support the use of templating. +See [`service create`](./service_create.md#templating) for the reference. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service ps](service_ps.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stack.md b/vendor/github.com/moby/moby/docs/reference/commandline/stack.md new file mode 100644 index 0000000..94e3e25 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stack.md @@ -0,0 +1,39 @@ +--- +title: "stack" +description: "The stack command description and usage" +keywords: "stack" +--- + + + +# stack + +```markdown +Usage: docker stack COMMAND + +Manage Docker stacks + +Options: + --help Print usage + +Commands: + deploy Deploy a new stack or update an existing stack + ls List stacks + ps List the tasks in the stack + rm Remove the stack + services List the services in the stack + +Run 'docker stack COMMAND --help' for more information on a command. +``` + +## Description + +Manage stacks. + diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stack_deploy.md b/vendor/github.com/moby/moby/docs/reference/commandline/stack_deploy.md new file mode 100644 index 0000000..39456b7 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stack_deploy.md @@ -0,0 +1,106 @@ +--- +title: "stack deploy" +description: "The stack deploy command description and usage" +keywords: "stack, deploy, up" +--- + + + +# stack deploy + +```markdown +Usage: docker stack deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + -c, --compose-file string Path to a Compose file + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +## Description + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Examples + +### Compose file + +The `deploy` command supports compose file version `3.0` and above." + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility + +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +### DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility + +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related commands + +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stack_ls.md b/vendor/github.com/moby/moby/docs/reference/commandline/stack_ls.md new file mode 100644 index 0000000..567d947 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stack_ls.md @@ -0,0 +1,51 @@ +--- +title: "stack ls" +description: "The stack ls command description and usage" +keywords: "stack, ls" +--- + + + +# stack ls + +```markdown +Usage: docker stack ls + +List stacks + +Aliases: + ls, list + +Options: + --help Print usage +``` + +## Description + +Lists the stacks. + +## Examples + +The following command shows all stacks and some additional information: + +```bash +$ docker stack ls + +ID SERVICES +vossibility-stack 6 +myapp 2 +``` + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stack_ps.md b/vendor/github.com/moby/moby/docs/reference/commandline/stack_ps.md new file mode 100644 index 0000000..9f221d8 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stack_ps.md @@ -0,0 +1,59 @@ +--- +title: "stack ps" +description: "The stack ps command description and usage" +keywords: "stack, ps" +--- + + + +# stack ps + +```markdown +Usage: docker stack ps [OPTIONS] STACK + +List the tasks in the stack + +Options: + -f, --filter filter Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output +``` + +## Description + +Lists the tasks that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Examples + +```bash +$ docker stack ps +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* id +* name +* desired-state + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stack_rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/stack_rm.md new file mode 100644 index 0000000..d9d5043 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stack_rm.md @@ -0,0 +1,40 @@ +--- +title: "stack rm" +description: "The stack rm command description and usage" +keywords: "stack, rm, remove, down" +--- + + + +# stack rm + +```markdown +Usage: docker stack rm STACK + +Remove the stack + +Aliases: + rm, remove, down + +Options: + --help Print usage +``` + +## Description + +Remove the stack from the swarm. This command has to be run targeting +a manager node. + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stack_services.md b/vendor/github.com/moby/moby/docs/reference/commandline/stack_services.md new file mode 100644 index 0000000..13b9d98 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stack_services.md @@ -0,0 +1,74 @@ +--- +title: "stack services" +description: "The stack services command description and usage" +keywords: "stack, services" +advisory: "experimental" +--- + + + +# stack services (experimental) + +```markdown +Usage: docker stack services [OPTIONS] STACK + +List the services in the stack + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Lists the services that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Examples + +The following command shows all services in the `myapp` stack: + +```bash +$ docker stack services myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. + +The following command shows both the `web` and `db` services: + +```bash +$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +The currently supported filters are: + +* id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) +* name (`--filter name=myapp_web`) +* label (`--filter label=key=value`) + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/start.md b/vendor/github.com/moby/moby/docs/reference/commandline/start.md new file mode 100644 index 0000000..aa67228 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/start.md @@ -0,0 +1,34 @@ +--- +title: "start" +description: "The start command description and usage" +keywords: "Start, container, stopped" +--- + + + +# start + +```markdown +Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] + +Start one or more stopped containers + +Options: + -a, --attach Attach STDOUT/STDERR and forward signals + --detach-keys string Override the key sequence for detaching a container + --help Print usage + -i, --interactive Attach container's STDIN +``` + +## Examples + +```bash +$ docker start my_container +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stats.md b/vendor/github.com/moby/moby/docs/reference/commandline/stats.md new file mode 100644 index 0000000..55d5a44 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stats.md @@ -0,0 +1,127 @@ +--- +title: "stats" +description: "The stats command description and usage" +keywords: "container, resource, statistics" +--- + + + +# stats + +```markdown +Usage: docker stats [OPTIONS] [CONTAINER...] + +Display a live stream of container(s) resource usage statistics + +Options: + -a, --all Show all containers (default shows just running) + --format string Pretty-print images using a Go template + --help Print usage + --no-stream Disable streaming stats and only pull the first result +``` + +## Description + +The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. + +If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. + +## Examples + +Running `docker stats` on all running containers against a Linux daemon. + +```bash +$ docker stats +CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O +1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B +``` + +Running `docker stats` on multiple containers by name and id against a Linux daemon. + +```bash +$ docker stats fervent_panini 5acfcb1b4fd1 +CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O +5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B +fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B +``` + +Running `docker stats` on all running containers against a Windows daemon. + +```powershell +PS E:\> docker stats +CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O +09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB +9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB +3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB +``` + +Running `docker stats` on multiple containers by name and id against a Windows daemon. + +```powershell +PS E:\> docker ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +3f214c61ad1d nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky +9db7aa4d986d windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson +09d3bb5b1604 windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley + +PS E:\> docker stats 3f214c61ad1d mad_wilson +CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O +3f214c61ad1d 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB +mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB +``` + +### Formatting + +The formatting option (`--format`) pretty prints container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------ | -------------------------------------------- +`.Container` | Container name or ID (user input) +`.Name` | Container name +`.ID` | Container ID +`.CPUPerc` | CPU percentage +`.MemUsage` | Memory usage +`.NetIO` | Network IO +`.BlockIO` | Block IO +`.MemPerc` | Memory percentage (Not available on Windows) +`.PIDs` | Number of PIDs (Not available on Windows) + + +When using the `--format` option, the `stats` command either +outputs the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Container` and `CPUPerc` entries separated by a colon for all images: + +```bash +$ docker stats --format "{{.Container}}: {{.CPUPerc}}" + +09d3bb5b1604: 6.61% +9db7aa4d986d: 9.19% +3f214c61ad1d: 0.00% +``` + +To list all containers statistics with their name, CPU percentage and memory +usage in a table format you can use: + +```bash +$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" + +CONTAINER CPU % PRIV WORKING SET +1285939c1fd3 0.07% 796 KiB / 64 MiB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/stop.md b/vendor/github.com/moby/moby/docs/reference/commandline/stop.md new file mode 100644 index 0000000..dc00b38 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/stop.md @@ -0,0 +1,37 @@ +--- +title: "stop" +description: "The stop command description and usage" +keywords: "stop, SIGKILL, SIGTERM" +--- + + + +# stop + +```markdown +Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] + +Stop one or more running containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing it (default 10) +``` + +## Description + +The main process inside the container will receive `SIGTERM`, and after a grace +period, `SIGKILL`. + +## Examples + +```bash +$ docker stop my_container +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm.md new file mode 100644 index 0000000..395db69 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm.md @@ -0,0 +1,40 @@ +--- +title: "swarm" +description: "The swarm command description and usage" +keywords: "swarm" +--- + + + +# swarm + +```markdown +Usage: docker swarm COMMAND + +Manage Swarm + +Options: + --help Print usage + +Commands: + init Initialize a swarm + join Join a swarm as a node and/or manager + join-token Manage join tokens + leave Leave the swarm + unlock Unlock swarm + unlock-key Manage the unlock key + update Update the swarm + +Run 'docker swarm COMMAND --help' for more information on a command. +``` + +## Description + +Manage the swarm. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm_init.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_init.md new file mode 100644 index 0000000..8cba118 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_init.md @@ -0,0 +1,145 @@ +--- +title: "swarm init" +description: "The swarm init command description and usage" +keywords: "swarm, init" +--- + + + +# swarm init + +```markdown +Usage: docker swarm init [OPTIONS] + +Initialize a swarm + +Options: + --advertise-addr string Advertised address (format: [:port]) + --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --force-new-cluster Force create a new cluster from current state + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +## Description + +Initialize a swarm. The docker engine targeted by this command becomes a manager +in the newly created single-node swarm. + +## Examples + +```bash +$ docker swarm init --advertise-addr 192.168.99.121 +Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. +``` + +`docker swarm init` generates two random tokens, a worker token and a manager token. When you join +a new node to the swarm, the node joins as a worker or manager node based upon the token you pass +to [swarm join](swarm_join.md). + +After you create the swarm, you can display or rotate the token using +[swarm join-token](swarm_join_token.md). + +### `--autolock` + +This flag enables automatic locking of managers with an encryption key. The +private keys and data stored by all managers will be protected by the +encryption key printed in the output, and will not be accessible without it. +Thus, it is very important to store this key in order to activate a manager +after it restarts. The key can be passed to `docker swarm unlock` to reactivate +the manager. Autolock can be disabled by running +`docker swarm update --autolock=false`. After disabling it, the encryption key +is no longer required to start the manager, and it will start up on its own +without user intervention. + +### `--cert-expiry` + +This flag sets the validity period for node certificates. + +### `--dispatcher-heartbeat` + +This flag sets the frequency with which nodes are told to use as a +period to report their health. + +### `--external-ca` + +This flag sets up the swarm to use an external CA to issue node certificates. The value takes +the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used +to send signing requests to the external CA. Currently, the only supported value is `cfssl`. +The URL specifies the endpoint where signing requests should be submitted. + +### `--force-new-cluster` + +This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. + +### `--listen-addr` + +The node listens for inbound swarm manager traffic on this address. The default is to listen on +0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's +address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--advertise-addr` + +This flag specifies the address that will be advertised to other members of the +swarm for API access and overlay networking. If unspecified, Docker will check +if the system has a single IP address, and use that IP address with the +listening port (see `--listen-addr`). If the system has multiple IP addresses, +`--advertise-addr` must be specified so that the correct address is chosen for +inter-manager communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--task-history-limit` + +This flag sets up task history retention limit. + +### `--max-snapshots` + +This flag sets the number of old Raft snapshots to retain in addition to the +current Raft snapshots. By default, no old snapshots are retained. This option +may be used for debugging, or to store old snapshots of the swarm state for +disaster recovery purposes. + +### `--snapshot-interval` + +This flag specifies how many log entries to allow in between Raft snapshots. +Setting this to a higher number will trigger snapshots less frequently. +Snapshots compact the Raft log and allow for more efficient transfer of the +state to new managers. However, there is a performance cost to taking snapshots +frequently. + +## Related commands + +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) +* [swarm update](swarm_update.md) +* [swarm join-token](swarm_join_token.md) +* [node rm](node_rm.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm_join.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_join.md new file mode 100644 index 0000000..d0b6685 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_join.md @@ -0,0 +1,106 @@ +--- +title: "swarm join" +description: "The swarm join command description and usage" +keywords: "swarm, join" +--- + + + +# swarm join + +```markdown +Usage: docker swarm join [OPTIONS] HOST:PORT + +Join a swarm as a node and/or manager + +Options: + --advertise-addr string Advertised address (format: [:port]) + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --token string Token for entry into the swarm +``` + +## Description + +Join a node to a swarm. The node joins as a manager node or worker node based upon the token you +pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you +pass a worker token, the node joins as a worker. + +## Examples + +### Join a node to swarm as a manager + +The example below demonstrates joining a manager node using a manager token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377 +This node joined a swarm as a manager. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader +``` + +A cluster should only have 3-7 managers at most, because a majority of managers must be available +for the cluster to function. Nodes that aren't meant to participate in this management quorum +should join as workers instead. Managers should be stable hosts that have static IP addresses. + +### Join a node to swarm as a worker + +The example below demonstrates joining a worker node using a worker token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377 +This node joined a swarm as a worker. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +### `--listen-addr value` + +If the node is a manager, it will listen for inbound swarm manager traffic on this +address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a +network interface to listen on that interface's address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--advertise-addr value` + +This flag specifies the address that will be advertised to other members of the +swarm for API access. If unspecified, Docker will check if the system has a +single IP address, and use that IP address with the listening port (see +`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr` +must be specified so that the correct address is chosen for inter-manager +communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--token string` + +Secret value required for nodes to join the swarm + + +## Related commands + +* [swarm init](swarm_init.md) +* [swarm leave](swarm_leave.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm_join_token.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_join_token.md new file mode 100644 index 0000000..60f5622 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_join_token.md @@ -0,0 +1,109 @@ +--- +title: "swarm join-token" +description: "The swarm join-token command description and usage" +keywords: "swarm, join-token" +--- + + + +# swarm join-token + +```markdown +Usage: docker swarm join-token [OPTIONS] (worker|manager) + +Manage join tokens + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate join token +``` + +## Description + +Join tokens are secrets that allow a node to join the swarm. There are two +different join tokens available, one for the worker role and one for the manager +role. You pass the token using the `--token` flag when you run +[swarm join](swarm_join.md). Nodes use the join token only when they join the +swarm. + +## Examples + +You can view or rotate the join tokens using `swarm join-token`. + +As a convenience, you can pass `worker` or `manager` as an argument to +`join-token` to print the full `docker swarm join` command to join a new node to +the swarm: + +```bash +$ docker swarm join-token worker +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +$ docker swarm join-token manager +To add a manager to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ + 172.17.0.2:2377 +``` + +Use the `--rotate` flag to generate a new join token for the specified role: + +```bash +$ docker swarm join-token --rotate worker +Successfully rotated worker join token. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ + 172.17.0.2:2377 +``` + +After using `--rotate`, only the new token will be valid for joining with the specified role. + +The `-q` (or `--quiet`) flag only prints the token: + +```bash +$ docker swarm join-token -q worker + +SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t +``` + +### `--rotate` + +Because tokens allow new nodes to join the swarm, you should keep them secret. +Be particularly careful with manager tokens since they allow new manager nodes +to join the swarm. A rogue manager has the potential to disrupt the operation of +your swarm. + +Rotate your swarm's join token if a token gets checked-in to version control, +stolen, or a node is compromised. You may also want to periodically rotate the +token to ensure any unknown token leaks do not allow a rogue node to join +the swarm. + +To rotate the join token and print the newly generated token, run +`docker swarm join-token --rotate` and pass the role: `manager` or `worker`. + +Rotating a join-token means that no new nodes will be able to join the swarm +using the old token. Rotation does not affect existing nodes in the swarm +because the join token is only used for authorizing new nodes joining the swarm. + +### `--quiet` + +Only print the token. Do not print a complete command for joining. + +## Related commands + +* [swarm join](swarm_join.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm_leave.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_leave.md new file mode 100644 index 0000000..e534579 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_leave.md @@ -0,0 +1,68 @@ +--- +title: "swarm leave" +description: "The swarm leave command description and usage" +keywords: "swarm, leave" +--- + + + +# swarm leave + +```markdown +Usage: docker swarm leave [OPTIONS] + +Leave the swarm + +Options: + -f, --force Force this node to leave the swarm, ignoring warnings + --help Print usage +``` + +## Description + +When you run this command on a worker, that worker leaves the swarm. + +You can use the `--force` option on a manager to remove it from the swarm. +However, this does not reconfigure the swarm to ensure that there are enough +managers to maintain a quorum in the swarm. The safe way to remove a manager +from a swarm is to demote it to a worker and then direct it to leave the quorum +without using `--force`. Only use `--force` in situations where the swarm will +no longer be used after the manager leaves, such as in a single-node swarm. + +## Examples + +Consider the following swarm, as seen from the manager: + +```bash +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +To remove `worker2`, issue the following command from `worker2` itself: + +```bash +$ docker swarm leave +Node left the default swarm. +``` + +The node will still appear in the node list, and marked as `down`. It no longer +affects swarm operation, but a long list of `down` nodes can clutter the node +list. To remove an inactive node from the list, use the [`node rm`](node_rm.md) +command. + +## Related commands + +* [node rm](node_rm.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock.md new file mode 100644 index 0000000..bd7ed14 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock.md @@ -0,0 +1,44 @@ +--- +title: "swarm unlock" +description: "The swarm unlock command description and usage" +keywords: "swarm, unlock" +--- + + + +# swarm unlock + +```markdown +Usage: docker swarm unlock + +Unlock swarm + +Options: + --help Print usage +``` + +## Description + +Unlocks a locked manager using a user-supplied unlock key. This command must be +used to reactivate a manager after its Docker daemon restarts if the autolock +setting is turned on. The unlock key is printed at the time when autolock is +enabled, and is also available from the `docker swarm unlock-key` command. + +## Examples + +```bash +$ docker swarm unlock +Please enter unlock key: +``` + +## Related commands + +* [swarm init](swarm_init.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock_key.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock_key.md new file mode 100644 index 0000000..6dc3aae --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_unlock_key.md @@ -0,0 +1,88 @@ +--- +title: "swarm unlock-key" +description: "The swarm unlock-keycommand description and usage" +keywords: "swarm, unlock-key" +--- + + + +# swarm unlock-key + +```markdown +Usage: docker swarm unlock-key [OPTIONS] + +Manage the unlock key + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate unlock key +``` + +## Description + +An unlock key is a secret key needed to unlock a manager after its Docker daemon +restarts. These keys are only used when the autolock feature is enabled for the +swarm. + +You can view or rotate the unlock key using `swarm unlock-key`. To view the key, +run the `docker swarm unlock-key` command without any arguments: + +## Examples + +```bash +$ docker swarm unlock-key + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +Use the `--rotate` flag to rotate the unlock key to a new, randomly-generated +key: + +```bash +$ docker swarm unlock-key --rotate +Successfully rotated manager unlock key. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +The `-q` (or `--quiet`) flag only prints the key: + +```bash +$ docker swarm unlock-key -q +SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 +``` + +### `--rotate` + +This flag rotates the unlock key, replacing it with a new randomly-generated +key. The old unlock key will no longer be accepted. + +### `--quiet` + +Only print the unlock key, without instructions. + +## Related commands + +* [swarm unlock](swarm_unlock.md) +* [swarm init](swarm_init.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/swarm_update.md b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_update.md new file mode 100644 index 0000000..aed72d0 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/swarm_update.md @@ -0,0 +1,48 @@ +--- +title: "swarm update" +description: "The swarm update command description and usage" +keywords: "swarm, update" +--- + + + +# swarm update + +```markdown +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options: + --autolock Change manager autolocking setting (true|false) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --help Print usage + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +## Description + +Updates a swarm with new parameter values. This command must target a manager node. + +## Examples + +```bash +$ docker swarm update --cert-expiry 720h +``` + +## Related commands + +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/system.md b/vendor/github.com/moby/moby/docs/reference/commandline/system.md new file mode 100644 index 0000000..831575d --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/system.md @@ -0,0 +1,37 @@ +--- +title: "system" +description: "The system command description and usage" +keywords: "system" +--- + + + +# system + +```markdown +Usage: docker system COMMAND + +Manage Docker + +Options: + --help Print usage + +Commands: + df Show docker disk usage + events Get real time events from the server + info Display system-wide information + prune Remove unused data + +Run 'docker system COMMAND --help' for more information on a command. +``` + +## Description + +Manage docker. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/system_df.md b/vendor/github.com/moby/moby/docs/reference/commandline/system_df.md new file mode 100644 index 0000000..86cc989 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/system_df.md @@ -0,0 +1,94 @@ +--- +title: "system df" +description: "The system df command description and usage" +keywords: "system, data, usage, disk" +--- + + + +# system df + +```markdown +Usage: docker system df [OPTIONS] + +Show docker filesystem usage + +Options: + --help Print usage + -v, --verbose Show detailed information on space usage +``` + +## Description + +The `docker system df` command displays information regarding the +amount of disk space used by the docker daemon. + +## Examples + +By default the command will just show a summary of the data used: + +```bash +$ docker system df + +TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 5 2 16.43 MB 11.63 MB (70%) +Containers 2 0 212 B 212 B (100%) +Local Volumes 2 1 36 B 0 B (0%) +``` + +A more detailed view can be requested using the `-v, --verbose` flag: + +```bash +$ docker system df -v + +Images space usage: + +REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS +my-curl latest b2789dd875bf 6 minutes ago 11 MB 11 MB 5 B 0 +my-jq latest ae67841be6d0 6 minutes ago 9.623 MB 8.991 MB 632.1 kB 0 + a0971c4015c1 6 minutes ago 11 MB 11 MB 0 B 0 +alpine latest 4e38e38c8ce0 9 weeks ago 4.799 MB 0 B 4.799 MB 1 +alpine 3.3 47cf20d8c26c 9 weeks ago 4.797 MB 4.797 MB 0 B 1 + +Containers space usage: + +CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES +4a7f7eebae0f alpine:latest "sh" 1 0 B 16 minutes ago Exited (0) 5 minutes ago hopeful_yalow +f98f9c2aa1ea alpine:3.3 "sh" 1 212 B 16 minutes ago Exited (0) 48 seconds ago anon-vol + +Local Volumes space usage: + +NAME LINKS SIZE +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B +my-named-vol 0 0 B +``` + +* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) +* `UNIQUE SIZE` is the amount of space that is only used by a given image +* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` + +> **Note**: Network information is not shown because it doesn't consume the disk +> space. + +## Performance + +The `system df` command can be very resource-intensive. It traverses the +filesystem of every image, container, and volume in the system. You should be +careful running this command in systems with lots of images, containers, or +volumes or in systems where some images, containers, or volumes have very large +filesystems with many files. You should also be careful not to run this command +in systems where performance is critical. + +## Related commands +* [system prune](system_prune.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/system_prune.md b/vendor/github.com/moby/moby/docs/reference/commandline/system_prune.md new file mode 100644 index 0000000..4959b65 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/system_prune.md @@ -0,0 +1,82 @@ +--- +title: "system prune" +description: "Remove unused data" +keywords: "system, prune, delete, remove" +--- + + + +# system prune + +```markdown +Usage: docker system prune [OPTIONS] + +Delete unused data + +Options: + -a, --all Remove all unused data not just dangling ones + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all unused containers, volumes, networks and images (both dangling and unreferenced). + +## Examples + +```bash +$ docker system prune -a + +WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + - all images without at least one container associated to them +Are you sure you want to continue? [y/N] y +Deleted Containers: +0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b +73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d + +Deleted Volumes: +named-vol + +Deleted Images: +untagged: my-curl:latest +deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d +deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b +untagged: alpine:3.3 +deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f +untagged: alpine:latest +deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96 +deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f +deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab +deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3 +untagged: my-jq:latest +deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1 +deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f +deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548 + +Total reclaimed space: 13.5 MB +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/tag.md b/vendor/github.com/moby/moby/docs/reference/commandline/tag.md new file mode 100644 index 0000000..5f9defd --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/tag.md @@ -0,0 +1,84 @@ +--- +title: "tag" +description: "The tag command description and usage" +keywords: "tag, name, image" +--- + + + +# tag + +```markdown +Usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] + +Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Options: + --help Print usage +``` + +## Description + +An image name is made up of slash-separated name components, optionally prefixed +by a registry hostname. The hostname must comply with standard DNS rules, but +may not contain underscores. If a hostname is present, it may optionally be +followed by a port number in the format `:8080`. If not present, the command +uses Docker's public registry located at `registry-1.docker.io` by default. Name +components may contain lowercase letters, digits and separators. A separator +is defined as a period, one or two underscores, or one or more dashes. A name +component may not start or end with a separator. + +A tag name must be valid ASCII and may contain lowercase and uppercase letters, +digits, underscores, periods and dashes. A tag name may not start with a +period or a dash and may contain a maximum of 128 characters. + +You can group your images together using names and tags, and then upload them +to [*Share Images via Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Examples + +### Tag an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + +```bash +$ docker tag 0e5574283393 fedora/httpd:version1.0 +``` + +### Tag an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + +```bash +$ docker tag httpd fedora/httpd:version1.0 +``` + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +### Tag an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + +```bash +$ docker tag httpd:test fedora/httpd:version1.0.test +``` + +### Tag an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + +```bash +$ docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/top.md b/vendor/github.com/moby/moby/docs/reference/commandline/top.md new file mode 100644 index 0000000..0a04828 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/top.md @@ -0,0 +1,25 @@ +--- +title: "top" +description: "The top command description and usage" +keywords: "container, running, processes" +--- + + + +# top + +```markdown +Usage: docker top CONTAINER [ps OPTIONS] + +Display the running processes of a container + +Options: + --help Print usage +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/unpause.md b/vendor/github.com/moby/moby/docs/reference/commandline/unpause.md new file mode 100644 index 0000000..8915a43 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/unpause.md @@ -0,0 +1,44 @@ +--- +title: "unpause" +description: "The unpause command description and usage" +keywords: "cgroups, suspend, container" +--- + + + +# unpause + +```markdown +Usage: docker unpause CONTAINER [CONTAINER...] + +Unpause all processes within one or more containers + +Options: + --help Print usage +``` + +## Description + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Examples + +```bash +$ docker unpause my_container +``` + +## Related commands + +* [pause](pause.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/update.md b/vendor/github.com/moby/moby/docs/reference/commandline/update.md new file mode 100644 index 0000000..f41cf39 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/update.md @@ -0,0 +1,122 @@ +--- +title: "update" +description: "The update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +## update + +```markdown +Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] + +Update configuration of one or more containers + +Options: + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --help Print usage + --kernel-memory string Kernel memory limit + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --restart string Restart policy to apply when a container exits +``` + +## Description + +The `docker update` command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the `--kernel-memory` option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, you can only update `--kernel-memory` on a stopped container or on +a running container with kernel memory initialized. + +## Examples + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use `docker ps` to find these values. You can also +use the ID returned from the `docker run` command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the `--kernel-memory` +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with `--kernel-memory`. +If the container was started *without* `--kernel-memory` you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the `--kernel-memory` setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/version.md b/vendor/github.com/moby/moby/docs/reference/commandline/version.md new file mode 100644 index 0000000..b15d13b --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/version.md @@ -0,0 +1,74 @@ +--- +title: "version" +description: "The version command description and usage" +keywords: "version, architecture, api" +--- + + + +# version + +```markdown +Usage: docker version [OPTIONS] + +Show the Docker version information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +By default, this will render all version information in an easy to read +layout. If a format is specified, the given template will be executed instead. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Default output + +```bash +$ docker version + +Client: +Version: 1.8.0 +API version: 1.20 +Go version: go1.4.2 +Git commit: f5bae0a +Built: Tue Jun 23 17:56:00 UTC 2015 +OS/Arch: linux/amd64 + +Server: +Version: 1.8.0 +API version: 1.20 +Go version: go1.4.2 +Git commit: f5bae0a +Built: Tue Jun 23 17:56:00 UTC 2015 +OS/Arch: linux/amd64 +``` + +### Get the server version + +```bash +$ docker version --format '{{.Server.Version}}' + +1.8.0 +``` + +### Dump raw JSON data + +```bash +$ docker version --format '{{json .}}' + +{"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} +``` diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/volume.md b/vendor/github.com/moby/moby/docs/reference/commandline/volume.md new file mode 100644 index 0000000..d12d33d --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/volume.md @@ -0,0 +1,48 @@ +--- +title: "volume" +description: "The volume command description and usage" +keywords: "volume" +--- + + + +# volume + +```markdown +Usage: docker volume COMMAND + +Manage volumes + +Options: + --help Print usage + +Commands: + create Create a volume + inspect Display detailed information on one or more volumes + ls List volumes + prune Remove all unused volumes + rm Remove one or more volumes + +Run 'docker volume COMMAND --help' for more information on a command. +``` + +## Description + +Manage volumes. You can use subcommand to create, list, inspect, remove +volumes. + +## Related commands + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume list](volume_list.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/volume_create.md b/vendor/github.com/moby/moby/docs/reference/commandline/volume_create.md new file mode 100644 index 0000000..b1eed37 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/volume_create.md @@ -0,0 +1,125 @@ +--- +title: "volume create" +description: "The volume create command description and usage" +keywords: "volume, create" +--- + + + +# volume create + +```markdown +Usage: docker volume create [OPTIONS] [VOLUME] + +Create a volume + +Options: + -d, --driver string Specify volume driver name (default "local") + --help Print usage + --label value Set metadata for a volume (default []) + -o, --opt value Set driver specific options (default map[]) +``` + +## Description + +Creates a new volume that containers can consume and store data in. If a name is +not specified, Docker generates a random name. + +## Examples + +Create a volume and then configure the container to use it: + +```bash +$ docker volume create hello + +hello + +$ docker run -d -v hello:/world busybox ls /world +``` + +The mount is created inside the container's `/world` directory. Docker does not +support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is +useful if two containers need access to shared data. For example, if one +container writes and the other reads the data. + +Volume names must be unique among drivers. This means you cannot use the same +volume name with two different drivers. If you attempt this `docker` returns an +error: + +```none +A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. +``` + +If you specify a volume name already in use on the current driver, Docker +assumes you want to re-use the existing volume and does not return an error. + +### Driver-specific options + +Some volume drivers may take options to customize the volume creation. Use the +`-o` or `--opt` flags to pass driver options: + +```bash +$ docker volume create --driver fake \ + --opt tardis=blue \ + --opt timey=wimey \ + foo +``` + +These options are passed directly to the volume driver. Options for +different volume drivers may do different things (or nothing at all). + +The built-in `local` driver on Windows does not support any options. + +The built-in `local` driver on Linux accepts options similar to the linux +`mount` command. You can provide multiple options by passing the `--opt` flag +multiple times. Some `mount` options (such as the `o` option) can take a +comma-separated list of options. Complete list of available mount options can be +found [here](http://man7.org/linux/man-pages/man8/mount.8.html). + +For example, the following creates a `tmpfs` volume called `foo` with a size of +100 megabyte and `uid` of 1000. + +```bash +$ docker volume create --driver local \ + --opt type=tmpfs \ + --opt device=tmpfs \ + --opt o=size=100m,uid=1000 \ + foo +``` + +Another example that uses `btrfs`: + +```bash +$ docker volume create --driver local \ + --opt type=btrfs \ + --opt device=/dev/sda2 \ + foo +``` + +Another example that uses `nfs` to mount the `/path/to/dir` in `rw` mode from +`192.168.1.1`: + +```bash +$ docker volume create --driver local \ + --opt type=nfs \ + --opt o=addr=192.168.1.1,rw \ + --opt device=:/path/to/dir \ + foo +``` + +## Related commands + +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/volume_inspect.md b/vendor/github.com/moby/moby/docs/reference/commandline/volume_inspect.md new file mode 100644 index 0000000..bbdc6bd --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/volume_inspect.md @@ -0,0 +1,61 @@ +--- +title: "volume inspect" +description: "The volume inspect command description and usage" +keywords: "volume, inspect" +--- + + + +# volume inspect + +```markdown +Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] + +Display detailed information on one or more volumes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about a volume. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +## Examples + +```bash +$ docker volume create +85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +$ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +[ + { + "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data", + "Status": null + } +] + +$ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/volume_ls.md b/vendor/github.com/moby/moby/docs/reference/commandline/volume_ls.md new file mode 100644 index 0000000..c35e662 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/volume_ls.md @@ -0,0 +1,199 @@ +--- +title: "volume ls" +description: "The volume ls command description and usage" +keywords: "volume, list" +--- + + + +# volume ls + +```markdown +Usage: docker volume ls [OPTIONS] + +List volumes + +Aliases: + ls, list + +Options: + -f, --filter value Provide filter values (e.g. 'dangling=true') (default []) + - dangling= a volume if referenced or not + - driver= a volume's driver name + - label= or label== + - name= a volume's name + --format string Pretty-print volumes using a Go template + --help Print usage + -q, --quiet Only display volume names +``` + +## Description + +List all the volumes known to Docker. You can filter using the `-f` or +`--filter` flag. Refer to the [filtering](#filtering) section for more +information about available filter options. + +## Examples + +### Create a volume +```bash +$ docker volume create rosemary + +rosemary + +$ docker volume create tyler + +tyler + +$ docker volume ls + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false, 0 or 1) +* driver (a volume driver's name) +* label (`label=` or `label==`) +* name (a volume's name) + +#### dangling + +The `dangling` filter matches on all volumes not referenced by any containers + +```bash +$ docker run -d -v tyler:/tmpwork busybox + +f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18 +$ docker volume ls -f dangling=true +DRIVER VOLUME NAME +local rosemary +``` + +#### driver + +The `driver` filter matches on all or part of a volume's driver name. + +The following filter matches all volumes with a driver name containing the `local` string. + +```bash +$ docker volume ls -f driver=local + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +#### label + +The `label` filter matches volumes based on the presence of a `label` alone or +a `label` and a value. + +First, let's create some volumes to illustrate this; + +```bash +$ docker volume create the-doctor --label is-timelord=yes + +the-doctor +$ docker volume create daleks --label is-timelord=no + +daleks +``` + +The following example filter matches volumes with the `is-timelord` label +regardless of its value. + +```bash +$ docker volume ls --filter label=is-timelord + +DRIVER VOLUME NAME +local daleks +local the-doctor +``` + +As the above example demonstrates, both volumes with `is-timelord=yes`, and +`is-timelord=no` are returned. + +Filtering on both `key` *and* `value` of the label, produces the expected result: + +```bash +$ docker volume ls --filter label=is-timelord=yes + +DRIVER VOLUME NAME +local the-doctor +``` + +Specifying multiple label filter produces an "and" search; all conditions +should be met; + +```bash +$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no + +DRIVER VOLUME NAME +``` + +#### name + +The `name` filter matches on all or part of a volume's name. + +The following filter matches all volumes with a name containing the `rose` string. + +```bash +$ docker volume ls -f name=rose + +DRIVER VOLUME NAME +local rosemary +``` + +### Formatting + +The formatting options (`--format`) pretty-prints volumes output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|------------------------------------------------------------------------------------------ +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.Mountpoint` | Whether the network is internal or not. +`.Labels` | All labels assigned to the volume. +`.Label` | Value of a specific label for this volume. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `volume ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Driver` entries separated by a colon for all volumes: + +```bash +$ docker volume ls --format "{{.Name}}: {{.Driver}}" + +vol1: local +vol2: local +vol3: local +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/volume_prune.md b/vendor/github.com/moby/moby/docs/reference/commandline/volume_prune.md new file mode 100644 index 0000000..a9c4b70 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/volume_prune.md @@ -0,0 +1,57 @@ +--- +title: "volume prune" +description: "Remove unused volumes" +keywords: "volume, prune, delete" +--- + + + +# volume prune + +```markdown +Usage: docker volume prune [OPTIONS] + +Remove all unused volumes + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all unused volumes. Unused volumes are those which are not referenced by any containers + +## Examples + +```bash +$ docker volume prune + +WARNING! This will remove all volumes not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Volumes: +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e +my-named-vol + +Total reclaimed space: 36 B +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/volume_rm.md b/vendor/github.com/moby/moby/docs/reference/commandline/volume_rm.md new file mode 100644 index 0000000..a1f2d3b --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/volume_rm.md @@ -0,0 +1,48 @@ +--- +title: "volume rm" +description: "the volume rm command description and usage" +keywords: "volume, rm" +--- + + + +# volume rm + +```markdown +Usage: docker volume rm [OPTIONS] VOLUME [VOLUME...] + +Remove one or more volumes + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of one or more volumes + --help Print usage +``` + +## Description + +Remove one or more volumes. You cannot remove a volume that is in use by a container. + +## Examples + +```bash + $ docker volume rm hello + hello +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/moby/moby/docs/reference/commandline/wait.md b/vendor/github.com/moby/moby/docs/reference/commandline/wait.md new file mode 100644 index 0000000..ee8f9ab --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/commandline/wait.md @@ -0,0 +1,58 @@ +--- +title: "wait" +description: "The wait command description and usage" +keywords: "container, stop, wait" +--- + + + +# wait + +```markdown +Usage: docker wait CONTAINER [CONTAINER...] + +Block until one or more containers stop, then print their exit codes + +Options: + --help Print usage +``` + +> **Note**: `docker wait` returns `0` when run against a container which had +> already exited before the `docker wait` command was run. + +## Examples + +Start a container in the background. + +```bash +$ docker run -dit --name=my_container ubuntu bash +``` + +Run `docker wait`, which should block until the container exits. + +```bash +$ docker wait my_container +``` + +In another terminal, stop the first container. The `docker wait` command above +returns the exit code. + +```bash +$ docker stop my_container +``` + +This is the same `docker wait` command from above, but it now exits, returning +`0`. + +```bash +$ docker wait my_container + +0 +``` diff --git a/vendor/github.com/moby/moby/docs/reference/glossary.md b/vendor/github.com/moby/moby/docs/reference/glossary.md new file mode 100644 index 0000000..f829ad5 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/glossary.md @@ -0,0 +1,374 @@ +--- +title: "Docker Glossary" +description: "Glossary of terms used around Docker" +keywords: "glossary, docker, terms, definitions" +--- + + + +# Glossary + +A list of terms used around the Docker project. + +## aufs + +aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that +Docker supports as a storage backend. It implements the +[union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems. + +## base image + +An image that has no parent is a **base image**. + +## boot2docker + +[boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made +specifically to run Docker containers. The boot2docker management tool for Mac and Windows was deprecated and replaced by [`docker-machine`](#machine) which you can install with the Docker Toolbox. + +## bridge + +In terms of generic networking, a bridge is a Link Layer device which forwards +traffic between network segments. A bridge can be a hardware device or a +software device running within a host machine's kernel. + +In terms of Docker, a bridge network uses a software bridge which allows +containers connected to the same bridge network to communicate, while providing +isolation from containers which are not connected to that bridge network. +The Docker bridge driver automatically installs rules in the host machine so +that containers on different bridge networks cannot communicate directly with +each other. + +The default bridge network, which is also named `bridge`, behaves differently +from user-defined bridge networks. Containers connected to the default `bridge` +network can communicate with each other across the bridge by IP address but +cannot resolve each other's container name to an IP address unless they are +explicitly linked using the `--link` flag to `docker run`. + +For more information about Docker networking, see +[Understand container communication](https://docs.docker.com/engine/userguide/networking/default_network/container-communication/). + +## btrfs + +btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker +supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write) +filesystem. + +## build + +build is the process of building Docker images using a [Dockerfile](#dockerfile). +The build uses a Dockerfile and a "context". The context is the set of files in the +directory in which the image is built. + +## cgroups + +cgroups is a Linux kernel feature that limits, accounts for, and isolates +the resource usage (CPU, memory, disk I/O, network, etc.) of a collection +of processes. Docker relies on cgroups to control and isolate resource limits. + +*Also known as : control groups* + +## Compose + +[Compose](https://github.com/docker/compose) is a tool for defining and +running complex applications with Docker. With compose, you define a +multi-container application in a single file, then spin your +application up in a single command which does everything that needs to +be done to get it running. + +*Also known as : docker-compose, fig* + +## copy-on-write + +Docker uses a +[copy-on-write](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/#/the-copy-on-write-strategy) +technique and a [union file system](#union-file-system) for both images and +containers to optimize resources and speed performance. Multiple copies of an +entity share the same instance and each one makes only specific changes to its +unique layer. + +Multiple containers can share access to the same image, and make +container-specific changes on a writable layer which is deleted when +the container is removed. This speeds up container start times and performance. + +Images are essentially layers of filesystems typically predicated on a base +image under a writable layer, and built up with layers of differences from the +base image. This minimizes the footprint of the image and enables shared +development. + +For more about copy-on-write in the context of Docker, see [Understand images, +containers, and storage +drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). + +## container + +A container is a runtime instance of a [docker image](#image). + +A Docker container consists of + +- A Docker image +- Execution environment +- A standard set of instructions + +The concept is borrowed from Shipping Containers, which define a standard to ship +goods globally. Docker defines a standard to ship software. + +## data volume + +A data volume is a specially-designated directory within one or more containers +that bypasses the Union File System. Data volumes are designed to persist data, +independent of the container's life cycle. Docker therefore never automatically +delete volumes when you remove a container, nor will it "garbage collect" +volumes that are no longer referenced by a container. + + +## Docker + +The term Docker can refer to + +- The Docker project as a whole, which is a platform for developers and sysadmins to +develop, ship, and run applications +- The docker daemon process running on the host which manages images and containers + + +## Docker for Mac + +[Docker for Mac](https://docs.docker.com/docker-for-mac/) is an easy-to-install, +lightweight Docker development environment designed specifically for the Mac. A +native Mac application, Docker for Mac uses the macOS Hypervisor framework, +networking, and filesystem. It's the best solution if you want to build, debug, +test, package, and ship Dockerized applications on a Mac. Docker for Mac +supersedes [Docker Toolbox](#toolbox) as state-of-the-art Docker on macOS. + + +## Docker for Windows + +[Docker for Windows](https://docs.docker.com/docker-for-windows/) is an +easy-to-install, lightweight Docker development environment designed +specifically for Windows 10 systems that support Microsoft Hyper-V +(Professional, Enterprise and Education). Docker for Windows uses Hyper-V for +virtualization, and runs as a native Windows app. It works with Windows Server +2016, and gives you the ability to set up and run Windows containers as well as +the standard Linux containers, with an option to switch between the two. Docker +for Windows is the best solution if you want to build, debug, test, package, and +ship Dockerized applications from Windows machines. Docker for Windows +supersedes [Docker Toolbox](#toolbox) as state-of-the-art Docker on Windows. + +## Docker Hub + +The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with +Docker and its components. It provides the following services: + +- Docker image hosting +- User authentication +- Automated image builds and work-flow tools such as build triggers and web hooks +- Integration with GitHub and Bitbucket + + +## Dockerfile + +A Dockerfile is a text document that contains all the commands you would +normally execute manually in order to build a Docker image. Docker can +build images automatically by reading the instructions from a Dockerfile. + +## filesystem + +A file system is the method an operating system uses to name files +and assign them locations for efficient storage and retrieval. + +Examples : + +- Linux : ext4, aufs, btrfs, zfs +- Windows : NTFS +- macOS : HFS+ + +## image + +Docker images are the basis of [containers](#container). An Image is an +ordered collection of root filesystem changes and the corresponding +execution parameters for use within a container runtime. An image typically +contains a union of layered filesystems stacked on top of each other. An image +does not have state and it never changes. + +## libcontainer + +libcontainer provides a native Go implementation for creating containers with +namespaces, cgroups, capabilities, and filesystem access controls. It allows +you to manage the lifecycle of the container performing additional operations +after the container is created. + +## libnetwork + +libnetwork provides a native Go implementation for creating and managing container +network namespaces and other network resources. It manage the networking lifecycle +of the container performing additional operations after the container is created. + +## link + +links provide a legacy interface to connect Docker containers running on the +same host to each other without exposing the hosts' network ports. Use the +Docker networks feature instead. + +## Machine + +[Machine](https://github.com/docker/machine) is a Docker tool which +makes it really easy to create Docker hosts on your computer, on +cloud providers and inside your own data center. It creates servers, +installs Docker on them, then configures the Docker client to talk to them. + +*Also known as : docker-machine* + +## node + +A [node](https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/) is a physical or virtual +machine running an instance of the Docker Engine in swarm mode. + +**Manager nodes** perform swarm management and orchestration duties. By default +manager nodes are also worker nodes. + +**Worker nodes** execute tasks. + +## overlay network driver + +Overlay network driver provides out of the box multi-host network connectivity +for docker containers in a cluster. + +## overlay storage driver + +OverlayFS is a [filesystem](#filesystem) service for Linux which implements a +[union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems. +It is supported by the Docker daemon as a storage driver. + +## registry + +A Registry is a hosted service containing [repositories](#repository) of [images](#image) +which responds to the Registry API. + +The default registry can be accessed using a browser at [Docker Hub](#docker-hub) +or using the `docker search` command. + +## repository + +A repository is a set of Docker images. A repository can be shared by pushing it +to a [registry](#registry) server. The different images in the repository can be +labeled using [tags](#tag). + +Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/) +and its [tags](https://hub.docker.com/r/library/nginx/tags/) + + +## service + +A [service](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) is the definition of how +you want to run your application containers in a swarm. At the most basic level +a service defines which container image to run in the swarm and which commands +to run in the container. For orchestration purposes, the service defines the +"desired state", meaning how many containers to run as tasks and constraints for +deploying the containers. + +Frequently a service is a microservice within the context of some larger +application. Examples of services might include an HTTP server, a database, or +any other type of executable program that you wish to run in a distributed +environment. + +## service discovery + +Swarm mode [service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery) is a DNS component +internal to the swarm that automatically assigns each service on an overlay +network in the swarm a VIP and DNS entry. Containers on the network share DNS +mappings for the service via gossip so any container on the network can access +the service via its service name. + +You don’t need to expose service-specific ports to make the service available to +other services on the same overlay network. The swarm’s internal load balancer +automatically distributes requests to the service VIP among the active tasks. + +## swarm + +A [swarm](https://docs.docker.com/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode). + +## Docker Swarm + +Do not confuse [Docker Swarm](https://github.com/docker/swarm) with the [swarm mode](#swarm-mode) features in Docker Engine. + +Docker Swarm is the name of a standalone native clustering tool for Docker. +Docker Swarm pools together several Docker hosts and exposes them as a single +virtual Docker host. It serves the standard Docker API, so any tool that already +works with Docker can now transparently scale up to multiple hosts. + +*Also known as : docker-swarm* + +## swarm mode + +[Swarm mode](https://docs.docker.com/engine/swarm/) refers to cluster management and orchestration +features embedded in Docker Engine. When you initialize a new swarm (cluster) or +join nodes to a swarm, the Docker Engine runs in swarm mode. + +## tag + +A tag is a label applied to a Docker image in a [repository](#repository). +tags are how various images in a repository are distinguished from each other. + +*Note : This label is not related to the key=value labels set for docker daemon* + +## task + +A [task](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/#/tasks-and-scheduling) is the +atomic unit of scheduling within a swarm. A task carries a Docker container and +the commands to run inside the container. Manager nodes assign tasks to worker +nodes according to the number of replicas set in the service scale. + +The diagram below illustrates the relationship of services to tasks and +containers. + +![services diagram](https://docs.docker.com/engine/swarm/images/services-diagram.png) + +## Toolbox + +[Docker Toolbox](https://docs.docker.com/toolbox/overview/) is a legacy +installer for Mac and Windows users. It uses Oracle VirtualBox for +virtualization. + +For Macs running OS X El Capitan 10.11 and newer macOS releases, [Docker for +Mac](https://docs.docker.com/docker-for-mac/) is the better solution. + +For Windows 10 systems that support Microsoft Hyper-V (Professional, Enterprise +and Education), [Docker for +Windows](https://docs.docker.com/docker-for-windows/) is the better solution. + +## Union file system + +Union file systems implement a [union +mount](https://en.wikipedia.org/wiki/Union_mount) and operate by creating +layers. Docker uses union file systems in conjunction with +[copy-on-write](#copy-on-write) techniques to provide the building blocks for +containers, making them very lightweight and fast. + +For more on Docker and union file systems, see [Docker and AUFS in +practice](https://docs.docker.com/engine/userguide/storagedriver/aufs-driver/), +[Docker and Btrfs in +practice](https://docs.docker.com/engine/userguide/storagedriver/btrfs-driver/), +and [Docker and OverlayFS in +practice](https://docs.docker.com/engine/userguide/storagedriver/overlayfs-driver/) + +Example implementations of union file systems are +[UnionFS](https://en.wikipedia.org/wiki/UnionFS), +[AUFS](https://en.wikipedia.org/wiki/Aufs), and +[Btrfs](https://btrfs.wiki.kernel.org/index.php/Main_Page). + +## virtual machine + +A virtual machine is a program that emulates a complete computer and imitates dedicated hardware. +It shares physical hardware resources with other users but isolates the operating system. The +end user has the same experience on a Virtual Machine as they would have on dedicated hardware. + +Compared to containers, a virtual machine is heavier to run, provides more isolation, +gets its own set of resources and does minimal sharing. + +*Also known as : VM* diff --git a/vendor/github.com/moby/moby/docs/reference/index.md b/vendor/github.com/moby/moby/docs/reference/index.md new file mode 100644 index 0000000..f24c342 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/index.md @@ -0,0 +1,21 @@ +--- +title: "Engine reference" +description: "Docker Engine reference" +keywords: "Engine" +--- + + + +# Engine reference + +* [Dockerfile reference](builder.md) +* [Docker run reference](run.md) +* [Command line reference](commandline/index.md) +* [API Reference](https://docs.docker.com/engine/api/) diff --git a/vendor/github.com/moby/moby/docs/reference/run.md b/vendor/github.com/moby/moby/docs/reference/run.md new file mode 100644 index 0000000..d648ad7 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/reference/run.md @@ -0,0 +1,1562 @@ +--- +title: "Docker run reference" +description: "Configure containers at runtime" +keywords: "docker, run, configure, runtime" +--- + + + +# Docker run reference + +Docker runs processes in isolated containers. A container is a process +which runs on a host. The host may be local or remote. When an operator +executes `docker run`, the container process that runs is isolated in +that it has its own file system, its own networking, and its own +isolated process tree separate from the host. + +This page details how to use the `docker run` command to define the +container's resources at runtime. + +## General form + +The basic `docker run` command takes this form: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +The `docker run` command must specify an [*IMAGE*](glossary.md#image) +to derive the container from. An image developer can define image +defaults related to: + + * detached or foreground running + * container identification + * network settings + * runtime constraints on CPU and memory + +With the `docker run [OPTIONS]` an operator can add to or override the +image defaults set by a developer. And, additionally, operators can +override nearly all the defaults set by the Docker runtime itself. The +operator's ability to override image and Docker runtime defaults is why +[*run*](commandline/run.md) has more options than any +other `docker` command. + +To learn how to interpret the types of `[OPTIONS]`, see [*Option +types*](commandline/cli.md#option-types). + +> **Note**: Depending on your Docker system configuration, you may be +> required to preface the `docker run` command with `sudo`. To avoid +> having to use `sudo` with the `docker` command, your system +> administrator can create a Unix group called `docker` and add users to +> it. For more information about this configuration, refer to the Docker +> installation documentation for your operating system. + + +## Operator exclusive options + +Only the operator (the person executing `docker run`) can set the +following options. + + - [Detached vs foreground](#detached-vs-foreground) + - [Detached (-d)](#detached--d) + - [Foreground](#foreground) + - [Container identification](#container-identification) + - [Name (--name)](#name---name) + - [PID equivalent](#pid-equivalent) + - [IPC settings (--ipc)](#ipc-settings---ipc) + - [Network settings](#network-settings) + - [Restart policies (--restart)](#restart-policies---restart) + - [Clean up (--rm)](#clean-up---rm) + - [Runtime constraints on resources](#runtime-constraints-on-resources) + - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities) + +## Detached vs foreground + +When starting a Docker container, you must first decide if you want to +run the container in the background in a "detached" mode or in the +default foreground mode: + + -d=false: Detached mode: Run container in the background, print new container id + +### Detached (-d) + +To start a container in detached mode, you use `-d=true` or just `-d` option. By +design, containers started in detached mode exit when the root process used to +run the container exits. A container in detached mode cannot be automatically +removed when it stops, this means you cannot use the `--rm` option with `-d` option. + +Do not pass a `service x start` command to a detached container. For example, this +command attempts to start the `nginx` service. + + $ docker run -d -p 80:80 my_image service nginx start + +This succeeds in starting the `nginx` service inside the container. However, it +fails the detached container paradigm in that, the root process (`service nginx +start`) returns and the detached container stops as designed. As a result, the +`nginx` service is started but could not be used. Instead, to start a process +such as the `nginx` web server do the following: + + $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' + +To do input/output with a detached container use network connections or shared +volumes. These are required because the container is no longer listening to the +command line where `docker run` was run. + +To reattach to a detached container, use `docker` +[*attach*](commandline/attach.md) command. + +### Foreground + +In foreground mode (the default when `-d` is not specified), `docker +run` can start the process in the container and attach the console to +the process's standard input, output, and standard error. It can even +pretend to be a TTY (this is what most command line executables expect) +and pass along signals. All of that is configurable: + + -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` + -t : Allocate a pseudo-tty + --sig-proxy=true: Proxy all received signals to the process (non-TTY mode only) + -i : Keep STDIN open even if not attached + +If you do not specify `-a` then Docker will [attach to both stdout and stderr +]( https://github.com/docker/docker/blob/4118e0c9eebda2412a09ae66e90c34b85fae3275/runconfig/opts/parse.go#L267). +You can specify to which of the three standard streams (`STDIN`, `STDOUT`, +`STDERR`) you'd like to connect instead, as in: + + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +For interactive processes (like a shell), you must use `-i -t` together in +order to allocate a tty for the container process. `-i -t` is often written `-it` +as you'll see in later examples. Specifying `-t` is forbidden when the client +standard output is redirected or piped, such as in: + + $ echo test | docker run -i busybox cat + +>**Note**: A process running as PID 1 inside a container is treated +>specially by Linux: it ignores any signal with the default action. +>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is +>coded to do so. + +## Container identification + +### Name (--name) + +The operator can identify a container in three ways: + +| Identifier type | Example value | +| --------------------- | ------------------------------------------------------------------ | +| UUID long identifier | "f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778" | +| UUID short identifier | "f78375b1c487" | +| Name | "evil_ptolemy" | + +The UUID identifiers come from the Docker daemon. If you do not assign a +container name with the `--name` option, then the daemon generates a random +string name for you. Defining a `name` can be a handy way to add meaning to a +container. If you specify a `name`, you can use it when referencing the +container within a Docker network. This works for both background and foreground +Docker containers. + +> **Note**: Containers on the default bridge network must be linked to +> communicate by name. + +### PID equivalent + +Finally, to help with automation, you can have Docker write the +container ID out to a file of your choosing. This is similar to how some +programs might write out their process ID to a file (you've seen them as +PID files): + + --cidfile="": Write the container ID to the file + +### Image[:tag] + +While not strictly a means of identifying a container, you can specify a version of an +image you'd like to run the container with by adding `image[:tag]` to the command. For +example, `docker run ubuntu:14.04`. + +### Image[@digest] + +Images using the v2 or later image format have a content-addressable identifier +called a digest. As long as the input used to generate the image is unchanged, +the digest value is predictable and referenceable. + +The following example runs a container from the `alpine` image with the +`sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0` digest: + + $ docker run alpine@sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 date + +## PID settings (--pid) + + --pid="" : Set the PID (Process) Namespace mode for the container, + 'container:': joins another container's PID namespace + 'host': use the host's PID namespace inside the container + +By default, all containers have the PID namespace enabled. + +PID namespace provides separation of processes. The PID Namespace removes the +view of the system processes, and allows process ids to be reused including +pid 1. + +In certain cases you want your container to share the host's process namespace, +basically allowing processes within the container to see all of the processes +on the system. For example, you could build a container with debugging tools +like `strace` or `gdb`, but want to use these tools when debugging processes +within the container. + +### Example: run htop inside a container + +Create this Dockerfile: + +``` +FROM alpine:latest +RUN apk add --update htop && rm -rf /var/cache/apk/* +CMD ["htop"] +``` + +Build the Dockerfile and tag the image as `myhtop`: + +```bash +$ docker build -t myhtop . +``` + +Use the following command to run `htop` inside a container: + +``` +$ docker run -it --rm --pid=host myhtop +``` + +Joining another container's pid namespace can be used for debugging that container. + +### Example + +Start a container running a redis server: + +```bash +$ docker run --name my-redis -d redis +``` + +Debug the redis container by running another container that has strace in it: + +```bash +$ docker run -it --pid=container:my-redis my_strace_docker_image bash +$ strace -p 1 +``` + +## UTS settings (--uts) + + --uts="" : Set the UTS namespace mode for the container, + 'host': use the host's UTS namespace inside the container + +The UTS namespace is for setting the hostname and the domain that is visible +to running processes in that namespace. By default, all containers, including +those with `--network=host`, have their own UTS namespace. The `host` setting will +result in the container using the same UTS namespace as the host. Note that +`--hostname` is invalid in `host` UTS mode. + +You may wish to share the UTS namespace with the host if you would like the +hostname of the container to change as the hostname of the host changes. A +more advanced use case would be changing the host's hostname from a container. + +## IPC settings (--ipc) + + --ipc="" : Set the IPC mode for the container, + 'container:': reuses another container's IPC namespace + 'host': use the host's IPC namespace inside the container + +By default, all containers have the IPC namespace enabled. + +IPC (POSIX/SysV IPC) namespace provides separation of named shared memory +segments, semaphores and message queues. + +Shared memory segments are used to accelerate inter-process communication at +memory speed, rather than through pipes or through the network stack. Shared +memory is commonly used by databases and custom-built (typically C/OpenMPI, +C++/using boost libraries) high performance applications for scientific +computing and financial services industries. If these types of applications +are broken into multiple containers, you might need to share the IPC mechanisms +of the containers. + +## Network settings + + --dns=[] : Set custom dns servers for the container + --network="bridge" : Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --network-alias=[] : Add network-scoped alias for the container + --add-host="" : Add a line to /etc/hosts (host:IP) + --mac-address="" : Sets the container's Ethernet device's MAC address + --ip="" : Sets the container's Ethernet device's IPv4 address + --ip6="" : Sets the container's Ethernet device's IPv6 address + --link-local-ip=[] : Sets one or more container's Ethernet device's link local IPv4/IPv6 addresses + +By default, all containers have networking enabled and they can make any +outgoing connections. The operator can completely disable networking +with `docker run --network none` which disables all incoming and outgoing +networking. In cases like this, you would perform I/O through files or +`STDIN` and `STDOUT` only. + +Publishing ports and linking to other containers only works with the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking. + +Your container will use the same DNS servers as the host by default, but +you can override this with `--dns`. + +By default, the MAC address is generated using the IP address allocated to the +container. You can set the container's MAC address explicitly by providing a +MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).Be +aware that Docker does not check if manually specified MAC addresses are unique. + +Supported networks : + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NetworkDescription
none + No networking in the container. +
bridge (default) + Connect the container to the bridge via veth interfaces. +
host + Use the host's network stack inside the container. +
container:<name|id> + Use the network stack of another container, specified via + its name or id. +
NETWORK + Connects the container to a user created network (using docker network create command) +
+ +#### Network: none + +With the network is `none` a container will not have +access to any external routes. The container will still have a +`loopback` interface enabled in the container but it does not have any +routes to external traffic. + +#### Network: bridge + +With the network set to `bridge` a container will use docker's +default networking setup. A bridge is setup on the host, commonly named +`docker0`, and a pair of `veth` interfaces will be created for the +container. One side of the `veth` pair will remain on the host attached +to the bridge while the other side of the pair will be placed inside the +container's namespaces in addition to the `loopback` interface. An IP +address will be allocated for containers on the bridge's network and +traffic will be routed though this bridge to the container. + +Containers can communicate via their IP addresses by default. To communicate by +name, they must be linked. + +#### Network: host + +With the network set to `host` a container will share the host's +network stack and all interfaces from the host will be available to the +container. The container's hostname will match the hostname on the host +system. Note that `--mac-address` is invalid in `host` netmode. Even in `host` +network mode a container has its own UTS namespace by default. As such +`--hostname` is allowed in `host` network mode and will only change the +hostname inside the container. +Similar to `--hostname`, the `--add-host`, `--dns`, `--dns-search`, and +`--dns-option` options can be used in `host` network mode. These options update +`/etc/hosts` or `/etc/resolv.conf` inside the container. No change are made to +`/etc/hosts` and `/etc/resolv.conf` on the host. + +Compared to the default `bridge` mode, the `host` mode gives *significantly* +better networking performance since it uses the host's native networking stack +whereas the bridge has to go through one level of virtualization through the +docker daemon. It is recommended to run containers in this mode when their +networking performance is critical, for example, a production Load Balancer +or a High Performance Web Server. + +> **Note**: `--network="host"` gives the container full access to local system +> services such as D-bus and is therefore considered insecure. + +#### Network: container + +With the network set to `container` a container will share the +network stack of another container. The other container's name must be +provided in the format of `--network container:`. Note that `--add-host` +`--hostname` `--dns` `--dns-search` `--dns-option` and `--mac-address` are +invalid in `container` netmode, and `--publish` `--publish-all` `--expose` are +also invalid in `container` netmode. + +Example running a Redis container with Redis binding to `localhost` then +running the `redis-cli` command and connecting to the Redis server over the +`localhost` interface. + + $ docker run -d --name redis example/redis --bind 127.0.0.1 + $ # use the redis container's network stack to access localhost + $ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1 + +#### User-defined network + +You can create a network using a Docker network driver or an external network +driver plugin. You can connect multiple containers to the same network. Once +connected to a user-defined network, the containers can communicate easily using +only another container's IP address or name. + +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +The following example creates a network using the built-in `bridge` network +driver and running a container in the created network + +``` +$ docker network create -d bridge my-net +$ docker run --network=my-net -itd --name=container3 busybox +``` + +### Managing /etc/hosts + +Your container will have lines in `/etc/hosts` which define the hostname of the +container itself as well as `localhost` and a few other common things. The +`--add-host` flag can be used to add additional lines to `/etc/hosts`. + + $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts + 172.17.0.22 09d03f76bf2c + fe00::0 ip6-localnet + ff00::0 ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + 86.75.30.9 db-static + +If a container is connected to the default bridge network and `linked` +with other containers, then the container's `/etc/hosts` file is updated +with the linked container's name. + +> **Note** Since Docker may live update the container’s `/etc/hosts` file, there +may be situations when processes inside the container can end up reading an +empty or incomplete `/etc/hosts` file. In most cases, retrying the read again +should fix the problem. + +## Restart policies (--restart) + +Using the `--restart` flag on Docker run you can specify a restart policy for +how a container should or should not be restarted on exit. + +When a restart policy is active on a container, it will be shown as either `Up` +or `Restarting` in [`docker ps`](commandline/ps.md). It can also be +useful to use [`docker events`](commandline/events.md) to see the +restart policy in effect. + +Docker supports the following restart policies: + + + + + + + + + + + + + + + + + + + + + + + + + + +
PolicyResult
no + Do not automatically restart the container when it exits. This is the + default. +
+ + on-failure[:max-retries] + + + Restart only if the container exits with a non-zero exit status. + Optionally, limit the number of restart retries the Docker + daemon attempts. +
always + Always restart the container regardless of the exit status. + When you specify always, the Docker daemon will try to restart + the container indefinitely. The container will also always start + on daemon startup, regardless of the current state of the container. +
unless-stopped + Always restart the container regardless of the exit status, but + do not start it on daemon startup if the container has been put + to a stopped state before. +
+ +An ever increasing delay (double the previous delay, starting at 100 +milliseconds) is added before each restart to prevent flooding the server. +This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, +and so on until either the `on-failure` limit is hit, or when you `docker stop` +or `docker rm -f` the container. + +If a container is successfully restarted (the container is started and runs +for at least 10 seconds), the delay is reset to its default value of 100 ms. + +You can specify the maximum amount of times Docker will try to restart the +container when using the **on-failure** policy. The default is that Docker +will try forever to restart the container. The number of (attempted) restarts +for a container can be obtained via [`docker inspect`](commandline/inspect.md). For example, to get the number of restarts +for container "my-container"; + + {% raw %} + $ docker inspect -f "{{ .RestartCount }}" my-container + # 2 + {% endraw %} + +Or, to get the last time the container was (re)started; + + {% raw %} + $ docker inspect -f "{{ .State.StartedAt }}" my-container + # 2015-03-04T23:47:07.691840179Z + {% endraw %} + + +Combining `--restart` (restart policy) with the `--rm` (clean up) flag results +in an error. On container restart, attached clients are disconnected. See the +examples on using the [`--rm` (clean up)](#clean-up-rm) flag later in this page. + +### Examples + + $ docker run --restart=always redis + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + + $ docker run --restart=on-failure:10 redis + +This will run the `redis` container with a restart policy of **on-failure** +and a maximum restart count of 10. If the `redis` container exits with a +non-zero exit status more than 10 times in a row Docker will abort trying to +restart the container. Providing a maximum restart limit is only valid for the +**on-failure** policy. + +## Exit Status + +The exit code from `docker run` gives information about why the container +failed to run or why it exited. When `docker run` exits with a non-zero code, +the exit codes follow the `chroot` standard, see below: + +**_125_** if the error is with Docker daemon **_itself_** + + $ docker run --foo busybox; echo $? + # flag provided but not defined: --foo + See 'docker run --help'. + 125 + +**_126_** if the **_contained command_** cannot be invoked + + $ docker run busybox /etc; echo $? + # docker: Error response from daemon: Container command '/etc' could not be invoked. + 126 + +**_127_** if the **_contained command_** cannot be found + + $ docker run busybox foo; echo $? + # docker: Error response from daemon: Container command 'foo' not found or does not exist. + 127 + +**_Exit code_** of **_contained command_** otherwise + + $ docker run busybox /bin/sh -c 'exit 3'; echo $? + # 3 + +## Clean up (--rm) + +By default a container's file system persists even after the container +exits. This makes debugging a lot easier (since you can inspect the +final state) and you retain all your data by default. But if you are +running short-term **foreground** processes, these container file +systems can really pile up. If instead you'd like Docker to +**automatically clean up the container and remove the file system when +the container exits**, you can add the `--rm` flag: + + --rm=false: Automatically remove the container when it exits (incompatible with -d) + +> **Note**: When you set the `--rm` flag, Docker also removes the volumes +associated with the container when the container is removed. This is similar +to running `docker rm -v my-container`. Only volumes that are specified without a +name are removed. For example, with +`docker run --rm -v /foo -v awesome:/bar busybox top`, the volume for `/foo` will be removed, +but the volume for `/bar` will not. Volumes inherited via `--volumes-from` will be removed +with the same logic -- if the original volume was specified with a name it will **not** be removed. + +## Security configuration + --security-opt="label=user:USER" : Set the label user for the container + --security-opt="label=role:ROLE" : Set the label role for the container + --security-opt="label=type:TYPE" : Set the label type for the container + --security-opt="label=level:LEVEL" : Set the label level for the container + --security-opt="label=disable" : Turn off label confinement for the container + --security-opt="apparmor=PROFILE" : Set the apparmor profile to be applied to the container + --security-opt="no-new-privileges" : Disable container processes from gaining new privileges + --security-opt="seccomp=unconfined" : Turn off seccomp confinement for the container + --security-opt="seccomp=profile.json": White listed syscalls seccomp Json file to be used as a seccomp filter + + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. Specifying the level in the following command +allows you to share the same content between containers. + + $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash + +> **Note**: Automatic translation of MLS labels is not currently supported. + +To disable the security labeling for this container versus running with the +`--privileged` flag, use the following command: + + $ docker run --security-opt label=disable -it fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + $ docker run --security-opt label=type:svirt_apache_t -it centos bash + +> **Note**: You would have to write policy defining a `svirt_apache_t` type. + +If you want to prevent your container processes from gaining additional +privileges, you can execute the following command: + + $ docker run --security-opt no-new-privileges -it centos bash + +This means that commands that raise privileges such as `su` or `sudo` will no longer work. +It also causes any seccomp filters to be applied later, after privileges have been dropped +which may mean you can have a more restrictive set of filters. +For more details, see the [kernel documentation](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt). + +## Specify an init process + +You can use the `--init` flag to indicate that an init process should be used as +the PID 1 in the container. Specifying an init process ensures the usual +responsibilities of an init system, such as reaping zombie processes, are +performed inside the created container. + +The default init process used is the first `docker-init` executable found in the +system path of the Docker daemon process. This `docker-init` binary, included in +the default installation, is backed by [tini](https://github.com/krallin/tini). + +## Specify custom cgroups + +Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a +container in. This allows you to create and manage cgroups on their own. You can +define custom resources for those cgroups and put containers under a common +parent group. + +## Runtime constraints on resources + +The operator can also adjust the performance parameters of the +container: + +| Option | Description | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-m`, `--memory=""` | Memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | +| `--memory-swap=""` | Total memory limit (memory + swap, format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | +| `--memory-reservation=""` | Memory soft limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | +| `--kernel-memory=""` | Kernel memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | +| `-c`, `--cpu-shares=0` | CPU shares (relative weight) | +| `--cpus=0.000` | Number of CPUs. Number is a fractional number. 0.000 means no limit. | +| `--cpu-period=0` | Limit the CPU CFS (Completely Fair Scheduler) period | +| `--cpuset-cpus=""` | CPUs in which to allow execution (0-3, 0,1) | +| `--cpuset-mems=""` | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. | +| `--cpu-quota=0` | Limit the CPU CFS (Completely Fair Scheduler) quota | +| `--cpu-rt-period=0` | Limit the CPU real-time period. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | +| `--cpu-rt-runtime=0` | Limit the CPU real-time runtime. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | +| `--blkio-weight=0` | Block IO weight (relative weight) accepts a weight value between 10 and 1000. | +| `--blkio-weight-device=""` | Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) | +| `--device-read-bps=""` | Limit read rate from a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | +| `--device-write-bps=""` | Limit write rate to a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | +| `--device-read-iops="" ` | Limit read rate (IO per second) from a device (format: `:`). Number is a positive integer. | +| `--device-write-iops="" ` | Limit write rate (IO per second) to a device (format: `:`). Number is a positive integer. | +| `--oom-kill-disable=false` | Whether to disable OOM Killer for the container or not. | +| `--oom-score-adj=0` | Tune container's OOM preferences (-1000 to 1000) | +| `--memory-swappiness=""` | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. | +| `--shm-size=""` | Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. | + +### User memory constraints + +We have four ways to set user memory usage: + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionResult
+ memory=inf, memory-swap=inf (default) + + There is no memory limit for the container. The container can use + as much memory as needed. +
memory=L<inf, memory-swap=inf + (specify memory and set memory-swap as -1) The container is + not allowed to use more than L bytes of memory, but can use as much swap + as is needed (if the host supports swap memory). +
memory=L<inf, memory-swap=2*L + (specify memory without memory-swap) The container is not allowed to + use more than L bytes of memory, swap plus memory usage is double + of that. +
+ memory=L<inf, memory-swap=S<inf, L<=S + + (specify both memory and memory-swap) The container is not allowed to + use more than L bytes of memory, swap plus memory usage is limited + by S. +
+ +Examples: + + $ docker run -it ubuntu:14.04 /bin/bash + +We set nothing about memory, this means the processes in the container can use +as much memory and swap memory as they need. + + $ docker run -it -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash + +We set memory limit and disabled swap memory limit, this means the processes in +the container can use 300M memory and as much swap memory as they need (if the +host supports swap memory). + + $ docker run -it -m 300M ubuntu:14.04 /bin/bash + +We set memory limit only, this means the processes in the container can use +300M memory and 300M swap memory, by default, the total virtual memory size +(--memory-swap) will be set as double of memory, in this case, memory + swap +would be 2*300M, so processes can use 300M swap memory as well. + + $ docker run -it -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash + +We set both memory and swap memory, so the processes in the container can use +300M memory and 700M swap memory. + +Memory reservation is a kind of memory soft limit that allows for greater +sharing of memory. Under normal circumstances, containers can use as much of +the memory as needed and are constrained only by the hard limits set with the +`-m`/`--memory` option. When memory reservation is set, Docker detects memory +contention or low memory and forces containers to restrict their consumption to +a reservation limit. + +Always set the memory reservation value below the hard limit, otherwise the hard +limit takes precedence. A reservation of 0 is the same as setting no +reservation. By default (without reservation set), memory reservation is the +same as the hard memory limit. + +Memory reservation is a soft-limit feature and does not guarantee the limit +won't be exceeded. Instead, the feature attempts to ensure that, when memory is +heavily contended for, memory is allocated based on the reservation hints/setup. + +The following example limits the memory (`-m`) to 500M and sets the memory +reservation to 200M. + +```bash +$ docker run -it -m 500M --memory-reservation 200M ubuntu:14.04 /bin/bash +``` + +Under this configuration, when the container consumes memory more than 200M and +less than 500M, the next system memory reclaim attempts to shrink container +memory below 200M. + +The following example set memory reservation to 1G without a hard memory limit. + +```bash +$ docker run -it --memory-reservation 1G ubuntu:14.04 /bin/bash +``` + +The container can use as much memory as it needs. The memory reservation setting +ensures the container doesn't consume too much memory for long time, because +every memory reclaim shrinks the container's consumption to the reservation. + +By default, kernel kills processes in a container if an out-of-memory (OOM) +error occurs. To change this behaviour, use the `--oom-kill-disable` option. +Only disable the OOM killer on containers where you have also set the +`-m/--memory` option. If the `-m` flag is not set, this can result in the host +running out of memory and require killing the host's system processes to free +memory. + +The following example limits the memory to 100M and disables the OOM killer for +this container: + + $ docker run -it -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash + +The following example, illustrates a dangerous way to use the flag: + + $ docker run -it --oom-kill-disable ubuntu:14.04 /bin/bash + +The container has unlimited memory which can cause the host to run out memory +and require killing system processes to free memory. The `--oom-score-adj` +parameter can be changed to select the priority of which containers will +be killed when the system is out of memory, with negative scores making them +less likely to be killed, and positive scores more likely. + +### Kernel memory constraints + +Kernel memory is fundamentally different than user memory as kernel memory can't +be swapped out. The inability to swap makes it possible for the container to +block system services by consuming too much kernel memory. Kernel memory includes: + + - stack pages + - slab pages + - sockets memory pressure + - tcp memory pressure + +You can setup kernel memory limit to constrain these kinds of memory. For example, +every process consumes some stack pages. By limiting kernel memory, you can +prevent new processes from being created when the kernel memory usage is too high. + +Kernel memory is never completely independent of user memory. Instead, you limit +kernel memory in the context of the user memory limit. Assume "U" is the user memory +limit and "K" the kernel limit. There are three possible ways to set limits: + + + + + + + + + + + + + + + + + + + + + + +
OptionResult
U != 0, K = inf (default) + This is the standard memory limitation mechanism already present before using + kernel memory. Kernel memory is completely ignored. +
U != 0, K < U + Kernel memory is a subset of the user memory. This setup is useful in + deployments where the total amount of memory per-cgroup is overcommitted. + Overcommitting kernel memory limits is definitely not recommended, since the + box can still run out of non-reclaimable memory. + In this case, you can configure K so that the sum of all groups is + never greater than the total memory. Then, freely set U at the expense of + the system's service quality. +
U != 0, K > U + Since kernel memory charges are also fed to the user counter and reclamation + is triggered for the container for both kinds of memory. This configuration + gives the admin a unified view of memory. It is also useful for people + who just want to track kernel memory usage. +
+ +Examples: + + $ docker run -it -m 500M --kernel-memory 50M ubuntu:14.04 /bin/bash + +We set memory and kernel memory, so the processes in the container can use +500M memory in total, in this 500M memory, it can be 50M kernel memory tops. + + $ docker run -it --kernel-memory 50M ubuntu:14.04 /bin/bash + +We set kernel memory without **-m**, so the processes in the container can +use as much memory as they want, but they can only use 50M kernel memory. + +### Swappiness constraint + +By default, a container's kernel can swap out a percentage of anonymous pages. +To set this percentage for a container, specify a `--memory-swappiness` value +between 0 and 100. A value of 0 turns off anonymous page swapping. A value of +100 sets all anonymous pages as swappable. By default, if you are not using +`--memory-swappiness`, memory swappiness value will be inherited from the parent. + +For example, you can set: + + $ docker run -it --memory-swappiness=0 ubuntu:14.04 /bin/bash + +Setting the `--memory-swappiness` option is helpful when you want to retain the +container's working set and to avoid swapping performance penalties. + +### CPU share constraint + +By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the `-c` or `--cpu-shares` +flag to set the weighting to 2 or higher. If 0 is set, the system will ignore the +value and use the default of 1024. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container `{C0}` with `-c=512` running one process, and another container +`{C1}` with `-c=1024` running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +### CPU period constraint + +The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use +`--cpu-period` to set the period of CPUs to limit the container's CPU usage. +And usually `--cpu-period` should work with `--cpu-quota`. + +Examples: + + $ docker run -it --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash + +If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms. + +In addition to use `--cpu-period` and `--cpu-quota` for setting CPU period constraints, +it is possible to specify `--cpus` with a float number to achieve the same purpose. +For example, if there is 1 CPU, then `--cpus=0.5` will achieve the same result as +setting `--cpu-period=50000` and `--cpu-quota=25000` (50% CPU). + +The default value for `--cpus` is `0.000`, which means there is no limit. + +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Cpuset constraint + +We can set cpus in which to allow execution for containers. + +Examples: + + $ docker run -it --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 1 and cpu 3. + + $ docker run -it --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. + +We can set mems in which to allow execution for containers. Only effective +on NUMA systems. + +Examples: + + $ docker run -it --cpuset-mems="1,3" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 1 and 3. + + $ docker run -it --cpuset-mems="0-2" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 0, 1 and 2. + +### CPU quota constraint + +The `--cpu-quota` flag limits the container's CPU usage. The default 0 value +allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair +Scheduler) handles resource allocation for executing processes and is default +Linux Scheduler used by the kernel. Set this value to 50000 to limit the container +to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Block IO bandwidth (Blkio) constraint + +By default, all containers get the same proportion of block IO bandwidth +(blkio). This proportion is 500. To modify this proportion, change the +container's blkio weight relative to the weighting of all other running +containers using the `--blkio-weight` flag. + +> **Note:** The blkio weight setting is only available for direct IO. Buffered IO +> is not currently supported. + +The `--blkio-weight` flag can set the weighting to a value between 10 to 1000. +For example, the commands below create two containers with different blkio +weight: + + $ docker run -it --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash + $ docker run -it --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash + +If you do block IO in the two containers at the same time, by, for example: + + $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct + +You'll find that the proportion of time is the same as the proportion of blkio +weights of the two containers. + +The `--blkio-weight-device="DEVICE_NAME:WEIGHT"` flag sets a specific device weight. +The `DEVICE_NAME:WEIGHT` is a string containing a colon-separated device name and weight. +For example, to set `/dev/sda` device weight to `200`: + + $ docker run -it \ + --blkio-weight-device "/dev/sda:200" \ + ubuntu + +If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker +uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` +to override this default with a new value on a specific device. +The following example uses a default weight of `300` and overrides this default +on `/dev/sda` setting that weight to `200`: + + $ docker run -it \ + --blkio-weight 300 \ + --blkio-weight-device "/dev/sda:200" \ + ubuntu + +The `--device-read-bps` flag limits the read rate (bytes per second) from a device. +For example, this command creates a container and limits the read rate to `1mb` +per second from `/dev/sda`: + + $ docker run -it --device-read-bps /dev/sda:1mb ubuntu + +The `--device-write-bps` flag limits the write rate (bytes per second)to a device. +For example, this command creates a container and limits the write rate to `1mb` +per second for `/dev/sda`: + + $ docker run -it --device-write-bps /dev/sda:1mb ubuntu + +Both flags take limits in the `:[unit]` format. Both read +and write rates must be a positive integer. You can specify the rate in `kb` +(kilobytes), `mb` (megabytes), or `gb` (gigabytes). + +The `--device-read-iops` flag limits read rate (IO per second) from a device. +For example, this command creates a container and limits the read rate to +`1000` IO per second from `/dev/sda`: + + $ docker run -ti --device-read-iops /dev/sda:1000 ubuntu + +The `--device-write-iops` flag limits write rate (IO per second) to a device. +For example, this command creates a container and limits the write rate to +`1000` IO per second to `/dev/sda`: + + $ docker run -ti --device-write-iops /dev/sda:1000 ubuntu + +Both flags take limits in the `:` format. Both read and +write rates must be a positive integer. + +## Additional groups + --group-add: Add additional groups to run as + +By default, the docker container process runs with the supplementary groups looked +up for the specified user. If one wants to add more to that list of groups, then +one can use this flag: + + $ docker run --rm --group-add audio --group-add nogroup --group-add 777 busybox id + uid=0(root) gid=0(root) groups=10(wheel),29(audio),99(nogroup),777 + +## Runtime privilege and Linux capabilities + + --cap-add: Add Linux capabilities + --cap-drop: Drop Linux capabilities + --privileged=false: Give extended privileges to this container + --device=[]: Allows you to run devices inside the container without the --privileged flag. + +By default, Docker containers are "unprivileged" and cannot, for +example, run a Docker daemon inside a Docker container. This is because +by default a container is not allowed to access any devices, but a +"privileged" container is given access to all devices (see +the documentation on [cgroups devices](https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt)). + +When the operator executes `docker run --privileged`, Docker will enable +access to all devices on the host as well as set some configuration +in AppArmor or SELinux to allow the container nearly all the same access to the +host as processes running outside containers on the host. Additional +information about running with `--privileged` is available on the +[Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). + +If you want to limit access to a specific device or devices you can use +the `--device` flag. It allows you to specify one or more devices that +will be accessible within the container. + + $ docker run --device=/dev/snd:/dev/snd ... + +By default, the container will be able to `read`, `write`, and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` flag: + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc + crash.... + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + +In addition to `--privileged`, the operator can have fine grain control over the +capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default +list of capabilities that are kept. The following table lists the Linux capability +options which are allowed by default and can be dropped. + +| Capability Key | Capability Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| SETPCAP | Modify process capabilities. | +| MKNOD | Create special files using mknod(2). | +| AUDIT_WRITE | Write records to kernel auditing log. | +| CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). | +| NET_RAW | Use RAW and PACKET sockets. | +| DAC_OVERRIDE | Bypass file read, write, and execute permission checks. | +| FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. | +| FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. | +| KILL | Bypass permission checks for sending signals. | +| SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. | +| SETUID | Make arbitrary manipulations of process UIDs. | +| NET_BIND_SERVICE | Bind a socket to internet domain privileged ports (port numbers less than 1024). | +| SYS_CHROOT | Use chroot(2), change root directory. | +| SETFCAP | Set file capabilities. | + +The next table shows the capabilities which are not granted by default and may be added. + +| Capability Key | Capability Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| SYS_MODULE | Load and unload kernel modules. | +| SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). | +| SYS_PACCT | Use acct(2), switch process accounting on or off. | +| SYS_ADMIN | Perform a range of system administration operations. | +| SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. | +| SYS_RESOURCE | Override resource Limits. | +| SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. | +| SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. | +| AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. | +| MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. | +| MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). | +| NET_ADMIN | Perform various network-related operations. | +| SYSLOG | Perform privileged syslog(2) operations. | +| DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. | +| LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. | +| NET_BROADCAST | Make socket broadcasts, and listen to multicasts. | +| IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). | +| IPC_OWNER | Bypass permission checks for operations on System V IPC objects. | +| SYS_PTRACE | Trace arbitrary processes using ptrace(2). | +| SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. | +| LEASE | Establish leases on arbitrary files (see fcntl(2)). | +| WAKE_ALARM | Trigger something that will wake up the system. | +| BLOCK_SUSPEND | Employ features that can block system suspend. | + +Further reference information is available on the [capabilities(7) - Linux man page](http://man7.org/linux/man-pages/man7/capabilities.7.html) + +Both flags support the value `ALL`, so if the +operator wants to have all capabilities but `MKNOD` they could use: + + $ docker run --cap-add=ALL --cap-drop=MKNOD ... + +For interacting with the network stack, instead of using `--privileged` they +should use `--cap-add=NET_ADMIN` to modify the network interfaces. + + $ docker run -it --rm ubuntu:14.04 ip link add dummy0 type dummy + RTNETLINK answers: Operation not permitted + $ docker run -it --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy + +To mount a FUSE based filesystem, you need to combine both `--cap-add` and +`--device`: + + $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fuse: failed to open /dev/fuse: Operation not permitted + $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fusermount: mount failed: Operation not permitted + $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs + # sshfs sven@10.10.10.20:/home/sven /mnt + The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. + ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. + Are you sure you want to continue connecting (yes/no)? yes + sven@10.10.10.20's password: + root@30aa0cfaf1b5:/# ls -la /mnt/src/docker + total 1516 + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . + drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. + -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore + -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git + -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore + .... + +The default seccomp profile will adjust to the selected capabilities, in order to allow +use of facilities allowed by the capabilities, so you should not have to adjust this, +since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be necessary +to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding +capabilities. + +## Logging drivers (--log-driver) + +The container can have a different logging driver than the Docker daemon. Use +the `--log-driver=VALUE` with the `docker run` command to configure the +container's logging driver. The following options are supported: + +| Driver | Description | +| ----------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | +| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | +| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | +| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | +| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | +| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | +| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs | +| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using Event Http Collector. | + +The `docker logs` command is available only for the `json-file` and `journald` +logging drivers. For detailed information on working with logging drivers, see +[Configure a logging driver](https://docs.docker.com/engine/admin/logging/overview/). + + +## Overriding Dockerfile image defaults + +When a developer builds an image from a [*Dockerfile*](builder.md) +or when she commits it, the developer can set a number of default parameters +that take effect when the image starts up as a container. + +Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, +`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override +in `docker run`. We'll go through what the developer might have set in each +Dockerfile instruction and how the operator can override that setting. + + - [CMD (Default Command or Options)](#cmd-default-command-or-options) + - [ENTRYPOINT (Default Command to Execute at Runtime)]( + #entrypoint-default-command-to-execute-at-runtime) + - [EXPOSE (Incoming Ports)](#expose-incoming-ports) + - [ENV (Environment Variables)](#env-environment-variables) + - [HEALTHCHECK](#healthcheck) + - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) + - [USER](#user) + - [WORKDIR](#workdir) + +### CMD (default command or options) + +Recall the optional `COMMAND` in the Docker +commandline: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +This command is optional because the person who created the `IMAGE` may +have already provided a default `COMMAND` using the Dockerfile `CMD` +instruction. As the operator (the person running a container from the +image), you can override that `CMD` instruction just by specifying a new +`COMMAND`. + +If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` +get appended as arguments to the `ENTRYPOINT`. + +### ENTRYPOINT (default command to execute at runtime) + + --entrypoint="": Overwrite the default entrypoint set by the image + +The `ENTRYPOINT` of an image is similar to a `COMMAND` because it +specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The `ENTRYPOINT` gives a +container its default nature or behavior, so that when you set an +`ENTRYPOINT` you can run the container *as if it were that binary*, +complete with default options, and you can pass in more options via the +`COMMAND`. But, sometimes an operator may want to run something else +inside the container, so you can override the default `ENTRYPOINT` at +runtime by using a string to specify the new `ENTRYPOINT`. Here is an +example of how to run a shell in a container that has been set up to +automatically run something else (like `/usr/bin/redis-server`): + + $ docker run -it --entrypoint /bin/bash example/redis + +or two examples of how to pass more parameters to that ENTRYPOINT: + + $ docker run -it --entrypoint /bin/bash example/redis -c ls -l + $ docker run -it --entrypoint /usr/bin/redis-cli example/redis --help + +You can reset a containers entrypoint by passing an empty string, for example: + + $ docker run -it --entrypoint="" mysql bash + +> **Note**: Passing `--entrypoint` will clear out any default command set on the +> image (i.e. any `CMD` instruction in the Dockerfile used to build it). + +### EXPOSE (incoming ports) + +The following `run` command options work with container networking: + + --expose=[]: Expose a port or a range of ports inside the container. + These are additional to those exposed by the `EXPOSE` instruction + -P : Publish all exposed ports to the host interfaces + -p=[] : Publish a container᾿s port or a range of ports to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a + range of ports. When specifying ranges for both, the + number of container ports in the range must match the + number of host ports in the range, for example: + -p 1234-1236:1234-1236/tcp + + When specifying a range for hostPort only, the + containerPort must not be a range. In this case the + container port is published somewhere within the + specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`) + + (use 'docker port' to see the actual mapping) + + --link="" : Add link to another container (:alias or ) + +With the exception of the `EXPOSE` directive, an image developer hasn't +got much control over networking. The `EXPOSE` instruction defines the +initial incoming ports that provide services. These ports are available +to processes inside the container. An operator can use the `--expose` +option to add to the exposed ports. + +To expose a container's internal port, an operator can start the +container with the `-P` or `-p` flag. The exposed port is accessible on +the host and the ports are available to any client that can reach the +host. + +The `-P` option publishes all the ports to the host interfaces. Docker +binds each exposed port to a random port on the host. The range of +ports are within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to +explicitly map a single port or range of ports. + +The port number inside the container (where the service listens) does +not need to match the port number exposed on the outside of the +container (where clients connect). For example, inside the container an +HTTP service is listening on port 80 (and so the image developer +specifies `EXPOSE 80` in the Dockerfile). At runtime, the port might be +bound to 42800 on the host. To find the mapping between the host ports +and the exposed ports, use `docker port`. + +If the operator uses `--link` when starting a new client container in the +default bridge network, then the client container can access the exposed +port via a private networking interface. +If `--link` is used when starting a container in a user-defined network as +described in [*Docker network overview*](https://docs.docker.com/engine/userguide/networking/), +it will provide a named alias for the container being linked to. + +### ENV (environment variables) + +When a new container is created, Docker will set the following environment +variables automatically: + +| Variable | Value | +| -------- | ----- | +| `HOME` | Set based on the value of `USER` | +| `HOSTNAME` | The hostname associated with the container | +| `PATH` | Includes popular directories, such as `:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin` | +| `TERM` | `xterm` if the container is allocated a pseudo-TTY | + +Additionally, the operator can **set any environment variable** in the +container by using one or more `-e` flags, even overriding those mentioned +above, or already defined by the developer with a Dockerfile `ENV`: + + $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export + declare -x HOME="/" + declare -x HOSTNAME="85bc26a0e200" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x SHLVL="1" + declare -x deep="purple" + +Similarly the operator can set the **hostname** with `-h`. + +### HEALTHCHECK + +``` + --health-cmd Command to run to check health + --health-interval Time between running the check + --health-retries Consecutive failures needed to report unhealthy + --health-timeout Maximum time to allow one check to run + --no-healthcheck Disable any container-specified HEALTHCHECK +``` + +Example: + + {% raw %} + $ docker run --name=test -d \ + --health-cmd='stat /etc/passwd || exit 1' \ + --health-interval=2s \ + busybox sleep 1d + $ sleep 2; docker inspect --format='{{.State.Health.Status}}' test + healthy + $ docker exec test rm /etc/passwd + $ sleep 2; docker inspect --format='{{json .State.Health}}' test + { + "Status": "unhealthy", + "FailingStreak": 3, + "Log": [ + { + "Start": "2016-05-25T17:22:04.635478668Z", + "End": "2016-05-25T17:22:04.7272552Z", + "ExitCode": 0, + "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." + }, + { + "Start": "2016-05-25T17:22:06.732900633Z", + "End": "2016-05-25T17:22:06.822168935Z", + "ExitCode": 0, + "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." + }, + { + "Start": "2016-05-25T17:22:08.823956535Z", + "End": "2016-05-25T17:22:08.897359124Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + }, + { + "Start": "2016-05-25T17:22:10.898802931Z", + "End": "2016-05-25T17:22:10.969631866Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + }, + { + "Start": "2016-05-25T17:22:12.971033523Z", + "End": "2016-05-25T17:22:13.082015516Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + } + ] + } + {% endraw %} + +The health status is also displayed in the `docker ps` output. + +### TMPFS (mount tmpfs filesystems) + +```bash +--tmpfs=[]: Create a tmpfs mount with: container-dir[:], + where the options are identical to the Linux + 'mount -t tmpfs -o' command. +``` + +The example below mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, and `size=65536k` options. + + $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image + +### VOLUME (shared filesystems) + + -v, --volume=[host-src:]container-dest[:]: Bind mount a volume. + The comma-delimited `options` are [rw|ro], [z|Z], + [[r]shared|[r]slave|[r]private], and [nocopy]. + The 'host-src' is an absolute path or a name value. + + If neither 'rw' or 'ro' is specified then the volume is mounted in + read-write mode. + + The `nocopy` modes is used to disable automatic copying requested volume + path in the container to the volume storage location. + For named volumes, `copy` is the default mode. Copy modes are not supported + for bind-mounted volumes. + + --volumes-from="": Mount all volumes from the given container(s) + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + +The volumes commands are complex enough to have their own documentation +in section [*Manage data in +containers*](https://docs.docker.com/engine/tutorials/dockervolumes/). A developer can define +one or more `VOLUME`'s associated with an image, but only the operator +can give access from one container to another (or from a container to a +volume mounted on the host). + +The `container-dest` must always be an absolute path such as `/src/docs`. +The `host-src` can either be an absolute path or a `name` value. If you +supply an absolute path for the `host-dir`, Docker bind-mounts to the path +you specify. If you supply a `name`, Docker creates a named volume by that `name`. + +A `name` value must start with an alphanumeric character, +followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). +An absolute path starts with a `/` (forward slash). + +For example, you can specify either `/foo` or `foo` for a `host-src` value. +If you supply the `/foo` value, Docker creates a bind-mount. If you supply +the `foo` specification, Docker creates a named volume. + +### USER + +`root` (id = 0) is the default user within a container. The image developer can +create additional users. Those users are accessible by name. When passing a numeric +ID, the user does not have to exist in the container. + +The developer can set a default user to run the first process with the +Dockerfile `USER` instruction. When starting a container, the operator can override +the `USER` instruction by passing the `-u` option. + + -u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] + +> **Note:** if you pass a numeric uid, it must be in the range of 0-2147483647. + +### WORKDIR + +The default working directory for running binaries within a container is the +root directory (`/`), but the developer can set a different default with the +Dockerfile `WORKDIR` command. The operator can override this with: + + -w="": Working directory inside the container diff --git a/vendor/github.com/moby/moby/docs/static_files/contributors.png b/vendor/github.com/moby/moby/docs/static_files/contributors.png new file mode 100644 index 0000000000000000000000000000000000000000..63c0a0c09b58bce2e1ade867760a937612934202 GIT binary patch literal 23100 zcmb@OV{j(E_x888ZEbB^Tbp;=t!>=3ZQHint!;a2+qU)J@BCgqZ=W|wCYelTGH1?Q z`J8JKt|%{o0E-Lzo_Y4dhY>#tUQd@oOt3`hoOL|BKcJqN3Vxy$AhV?ax|X zwP7_xdmcs=u4=&I4-7>PVT51Et0V?6adF`Fly>RguBKa_?u?$lQ0phh>{!;bJF?oY zC)U*RkjI zsZ{9ymFQPP=Ffni?-C9P+OC9;+m(1`#sifR2o?cG#SMq<7^FFga@MBPTZc}}8{OtP z!CXnufoQzjhXU6pDu?FO?q?N~x)gG+s5_)nwFPTwBYb7~CibK72sE%1)SrJd^${!^ zEHw&Dtkf^i4vaU^uU^}gy=)4@H#0a?oWg*dF~t!rx~8uPt`)MO;*-1xScpDwk8Yd6 zV|oWAE&v<#MZbeL_k~-u+`8|Wmvs+s;S27t7f}( zmXlHcCaBX3IF<@+Er8_&TT#hEUm$9AWJhM+a2DG>$2af5AN|C9xfYFRFuLL?G1+^h z)!T!@8{JnMuGS?na}ca1a>7RB3N7|6Ju30;Ef@hw{DK01K4-F7z?2a1d z6xYB&;G|Ie<*=I5*<4$#T&O(X^Lx-a9hgg%5-@!cNcW>GOGntHUgUcesfkwoDU2}( zp^XvJc&>8m=-3u88Wgay@j*@{{4f45)4=9Jiy38dDa1naT<3nu?&eE&0d1Rn`W{JW z+U8LtqZPyYp%!)zg2B{3Lbkcu{ZEP<^T>cg)(%UgkghgLTC7~CpI^-|9klpVyAWH7 zHV%sv1WpJwWSw3XfkAq0r09b)a@xX9bKHO9xrn_r-#E&xkbhxq3~^b?MMfTMrD`?h zS1|01CL;=KT9+@O+wU!YZtx6RE#mF=ft=3IwPZU@AV(b-C>&!>dN-kzGwftdTF&rd zV2HwHnX$g0Btk=EJ0UuWB9?h;sVo?F6sphY7i^nLaqx;0#|-1_NIN znQ=;!ID#{lTECe0DwM(&vRwJD*)Gz$_3@es`+CpqM7Q%IYfT1VZ34@S>|}^$zHlkX zqF8IhE*z}@E5)`|)vYmMV8Fuuwl#g)M;dnrznCsx^)FDd&2BGw3%jIv9)+lZGfX0) zYXR{+(nD1^wvq>61Pi@=cYUqu1fPu({)DXZT}E*gsEJb65~g}4&2#wpn~EreB`Aiq zg7Pbi@B7eVz2$nQVzSKlXewM|_q0II6N#6^m%zv+)CQ#e{aek6h^j*oSezfg@@ES8MT#Kt;stNsM)7<|y0gZ}3|BfisAPKfpssv+PnM%1VQY)$rhkVcvBn)CNqo=Dw6JxNoo5 z+?S)IhN;I!PR9W}TV0%Cg%P|=Rwc*vuJcWXX}&i%Q7k8eY_YQNJh(%Oe)&`@<6og9 zh)NBcA`1moGJ-+ovIx@i7-yFFK0~yQnAM8Sr9^6XuoX!AbC?P$q2~6iiAfl=nAEpt ztl<$HhP=f;xkH2R(?;rl($kw#&`v9rb%-xrT#kQOtTDMAWxbBS4HQeav)Z{pn`{*f zNzch%el?n$o;f}o{1b;5TUzc)DubRG z_P6s%V$>GskUzg(#JC12ZBkjI>s2sl+{Py{-jh`1@wQ-7m7=K;DQA=*epw=LFrIgt zbYj&O@gA1R^a2EOt0z3ZbP);;o)nTj2r^hI#QW~6(^`bRt$iy zl()J$C_Q(fa_hBYvjNUJAg)_uN(5Yc^KwcJf^P4KI#lO(yt;Dt%5_C^SN+!I$*8R( z9M?!Cjh>#_`&aK}=msRvaedcGO2XFZhr>Gl8X2nHL0P#*@kV6UApxH+v_8;{N_M0Pl$r%yaDSqn6uqqIJBFa=5ojh-~A-^ zPmeoEHRJI_JUY^dNhY?vAL@v^rHqsPz1_@V2OKVYs(b?9_I#N5q3U0Chzi|5KU0n> zRmfWd!I0N3rM@IH1c^x0U4q4o!Kk0L38x(yTR!)X$*8xmmY-}#7eIVJZkfXzV88`x z!$Al&+Ttx~wK}v8dyHMT_ubr=!e}@<74}62y?&~OkaF>|qt=A81dXQ63s+)pM>@<@++StGdv_h{M{^*6(4wSA?L&`h(vdvy=jjQhB$s3~ob z3Y`hzC%|P0cDQR_1}PGC$1c0J>5p^z_2l~T?oQ@{K_oKU9b&I1Oyd<(gP?o$^i(oi z9e5d7eY~#v%EB)Gf-*+X+6%$qZ+Vc+9vrd(yTQ-(`=Y7~>G&eQq3jTPK z?+SNz?L*%+JQaLZ}9TlRVYrGKEHC-veo<>48F+QB1OX_b-y1 ziFZCun;tAP&7Y0IJMx=&mUN}<`z*`t?N2aY?vk(MCdW=@(=uNkl~T80k&p~)NiMJI z_V}mOu+X9?basLL{_S~lM#kyDMHEU41?1ZZYu1zXWaw)kUC>@9r_yDtAL81{(T__n zc9AR&U8ZH_zL$h_w_(L9u3!zbOuU;*atF*!Dy$yc+6jHptGZq~HnX z=%6U+11T1-&TdgZrFtxrFlCioFmWS8XF@Re;1RY?ajff%ePKRjy6$9|i_b(x*Us2^ z%v;$!7$sR!p8U~Ig?=`jzhrjarKGU>D6stp{3uUyIHUa{YK?X8$H#H%cXf^v!fhH` zd1;q$Ji0XkZSz3MoDGoRY4HrVPP`SD6+hx8B*&} z^G~lwg9($F&bIPYuHK!?;+*1Yn|Z8zXgMP~u^^s+)+BZTyV`i5c+jAnK$g?Dnzz)l z!Lp;k+7bS(+$`DHlDO2k6A{&JPK_#sC_IJ<_YZn9bCeXD@>Dj*oU~^Oipd|a)1j@@ zd9c!{Z-pq$UXCd0r8J=tRefM@qn=`aPczYp}J#$p75w76m2L2&@gGH$xJ5 zEeP7RUtVAh)Jbl6Yn%30+Y`!kdZ6Ddopj|r8m7J_48skVI%iLci*%x^Cd%eekU|aJ za?ga`snZG8h>&sMx#noLwhE~?*DOd#a%inn;Nbr3K6u?*D6_Fo4*LmTtgal6xQ1MT z1fvmX5TVixN1@_b0Lk-qCbFQ0`YXFK?N{fmK3&ch^rVcuNO^sxWLHOilBqsU-jY06u2G0xa-3$+X*yVJf zI~UKD!|JODG?=z?b#@%R)2H;3->;EB5UGT%k572IJ9H)t2rW?e7n=(GlVKcew~ zam>#q{bbEX(IZg%qh}Cl4};R|cEX^#^}ujEn3WWj-=VjnVqeB)6cZ?-M$xdb)@29m z_ufV#^-1gmtJ^Qw-f{Jt@Nxv>aC+H%~G^2VgIo?1)MnmtZ} zMI}6!Tkm&Pk-rA1eY&y+84IJ4bhZc&$fWF6YB3!}hD{c%cKVSh;!zxj<{8y5_Os6v6lFp|HsipZZq#_}(K94RQ%wcq;|?*27TE1D)R4-6exbOcEBS z%I}n{D!QE7d_+oy%BWK3SY(;`3NWvBI}f5(w<3~H%jY>PW<++|`1Y_EEYv@E>|FBc zEpBxzGto3zWkt7uWI}nDJG+c}F8AQ~ug--Ojkl2wf@lG&9pol}ZWNM?@aV(v^eFokThdqjox%^trPxl$6CuYbl8lGynbl3QZ z3|b^N@NV8<$P)TW_=*|bTF4X13gK%)i)@oNh%DHS4h4+3@JM!lRU%NkrgpAinJg%p z+qa_h@kz1s%DxKPs!HgY*4N}qhE0iueK!OWx3xwKQP`Z4B4pdmju_3Md8|mk`Cc%- zYa;n2#lX}yK|c7mT#8@1TY&NeK9!hbYV>XS@%g{uU4GEo&kVQS{-E-?Vm3JG^W~1w zC!8Ud-`K}PlWHPN#=i|nsxXIDk8;#r*=#3GY<<$9w%{2$LIH#tu`su(9oJAnqrfw4 z2B31CsqvQR1`u!wcRiVFFJY2QTH7mv&p0FCxRvc&4P`=p7FV06-O#rf*;`riR2Hen z1j%}T=s42*hZN=hN?|AJ&QSw(pTM;uW#)&wuva7y%?`2+T|(M%dx79#Xr6Tf5EQZneX5 z2P@~l%{-;BRK%8)Gnu9s`b?a}EZWeWB*{-=kM>CA7vs}9+IoD&2L|&IVri>A zKNu&oqw$OineM{b%a(ZL!=TMLe44hW;ClSgZtfDu-Ckdh>m0UO)UTQ@Xl*Q$Va9=y zYRJ(y&SXiWT>*wDSF&~!19j1SQE(K74up4it=ZE9J4HVybMXTlaII>#b~^2i^B{4k}n@Vpf23Wh$D#Ya#JXJ1Q_4S^<1( zeBvx#7Lt$S{5mY>4LYSNp-JyPj)f8Nl!!>RiM{r~r><3RNmkoOaBJhV5071mB<0&6 zMBRb=79(1t*sG{m`7#2S)9&iuYS}3(U$hq&`iCd6TSvwqW)3;&U7gCY ztZY44JH#hvo7gqS=3590X!A|b)PoxmJw%j>qRjFM0gpSCK0d`;XEr16jPQ51fx8Pg zHE3**?VgN+8{s;Q_Xwq`q**R6L}YvGV~&d%YEcmsHyvkY zEt;G0xi+_(@Rs;=y-^647=v$D^kx7XO*I#*?Q74^vu1pOSVT&eYe;;Oxm&_%3dS__ zs%gcJ3XFwo2WK^pr~YD5*6G0vbu;}Ccv>sPJ%&hyooMt)Hz$~RQ-kBPU~E=(0nT5- z*v}0Xo%~=(jxL#S-!KUQrk*}dD_O^@{9r2LkwH7E;&WcsY42>R@ErHV8^YC=r?d?y z>*o$s3jBT#2Z3Rk{rANUiw|L$fsL+2%4DGOYUPpKZ!Xq)d}v0d+p#`1{nQz1MVmSn zXbTQX-;S(NfK1E?Wl2z>fVl6y?utRE&}W| zt>AS>Z!PKGCJRwLEs}cqL*YqtkljsW9JNFS#gM8=6{@E6@B^8@6B!9?`;jdeM`S%w z7^M2f#)~)POPrKF@TOaEl^(IqRJ8k?(rQ(%;~Q(pZdR4HzkM*}5A8#O!n%~$3ozAv zSp+E~WuP?AMQIp{h5@4#!J%U9=W6_vmk|rQ*8a)Yg=UXWtl=)X*R))nDbE0{>1sC} zi3b}oQ_gy-p*=7y;~8;*Vsg&o!(K(Tg(5S?F_9x;(dWg+nze~%qYVG1ZQCQ>BVrWm zCgbrp@g}&O_i-eAkQAAU>aoZZ^LGi8+fz|Crw@SDpfkc0VWK?f3xG zVYb*wPj2A$79(=&ksP?KY>3_)MNsFbgoZp(U=hmM+Z-J>}%+69htY7X}bKOmOd1r0d^CYh6 zkBQB&5v1?nR$AD6KH2T(`_;hnHFoIV`AQ~JMXr4&^(BNg4G;8KIVsUIrMGCEUo`x6 zDBTiM=|!;Z|H;4kAs?mR1Cv}yX6P#{-a6G)-w7F}HTsHYE0zx6AZ z!wcPqXUs@YxXSGgBgcMApzWYs{(4;phk|>E#G{#kPjnc&jp8P5dRd#Io%cSDlzlng z{AhE_^Fcf0^47L~W|bhx1W`NADQjZ9_^#>JJqLWyI}W-hkq0E6s__<1CfV(B6ozE( zJLr?-k4))TjV@qH#IO}MH-XI7y14#Ng$jaCb!7|lCfvK)J|dCKLT$lWu_UI}E7S<=H?DX4tYo#>yf;}>4-2VQy4ce1x&>fn_cLn3eO&nO+_?;#i zw3i(aEh-z&FG}jNXpcJU;%eNMLpxwuqSNXSh&yA&BCXUfAl5O1-lUi4G)SO^1AvJ8{+@GSRJ8mE(5q3!94h^DUc zsr=a9_gw?$ad2Y1Y@1POPgkp0oBiG1WTnHx5?6|LX2{D)66f>f*Q3Vti0qrNAay}C zh%T%?SuzH>n4f<2JNj@Wjs3myLa?dyJSMgBp~-2zxiruG5HA*A9SyZ3o746ncVyUB zr9Z2*CPH_g{xc5N=Ya$YwsAI4P-77_@D(ND{awAZNzpRC! z(eUTAy7E#f%{`!3lImU=oT5I-2KlcfRe)kvr*)18JO{+_enyGJRD_@R}L2D0L**u?*Y(60`!x z*<2sS(;}Aop9kT~WKM610cm6BTlwk|U+)g{D~&%Fsa_7j)V9 zo?I!D6`cW;n~ebDKfE{sc?|SEs}5vlp#k}p-i);>Y43kCC8f86fgaowdl#(exa?0o zh;mM=3V@aE?LPTY5VR5d?CZ)Y&I{J5cCqeX&g?_gF^wg&Z)$WD;jO{K9ui@~Hdj^rxlpf z9Eu+P3O8*wP{yh~-`VZhB!;)-!?ezM{Pkr_BHBZ{w@-!UOb)n_)TB0L@!R$q4^>Yg zt;>bDoi{a{o7x`XuVpZjHDA2aLprmWWd^cfhex7inNuk%{;BKUkSPNRtEc zza-iT|4qj1;R2f=m{ty%pat4qAxH1<*`I~r`uXQf{F}ZnzBbL?E6KIDEzbw}X@C#N zQdK%vM`uA8FrT_Nivxp=Zrj&C^?O=byJN0h$xdf%M@MQoomvB}W=kX66ddBUe9fwD zG=Zxh2c-bo4V|&ZKIM9n_)`^Ty5q0Y{YN~TufSD@@_r;b!8MEl1C}aHRpBKE{wkSQ zIh=c10;>R|62RCSfNm0Bdl_HBR>{wi|HTtoJr1wc^QO{l4I2ukOO*7H?i;kX4S{t< zenI9Nbr@Ll1HLM7Ht4ReF#qL6cnM%m2(C;Tv5ykO#EznYgno$G%mvhivr%s5|O;3m_Q=d@j9rT zYC=+-J@Z!C9qD2FHh!8p1Mia)oXnOOdd*t6GCNqwGR4HrC_&H^oZRR-sZgWwTbj=O z!=0~J4*m{FBDybv(RiiULJ8M5fNf!2g8v?bZ9ywSvj^E(?KyeU8p$Pv#OBihHeBq) zWMM6C$^-$WkbF$@6WfF0YWOJteJ?Pwzx{^{VYtxXU5*=%0mBuQ8=O4~|H7anp)cZ$ z0uTYL;eR|;CVIz(@DOTo^{H01+nVI=hAs;DwN^=jbnIcgOo&L};!2Nk`s%D$iVPGF z-3Nh!pIPV&RqHb0$qkHq*0WJgKe|9$%vAx#?8&{GBxOOgn+v9$!DyXFq{9_l3}j&$ z&d&dVqCWKwqHlHG7)>D3`5}eCDySn zn1R>NqJ45w@Wb40!u`1rM)>4RKIuutkTs+!SVgUV+BCcRHQkN{FkVj|Q}5MC3Mh6w zHq@8pA&j~X?ks$7ZG;V$W1NkFy?v_E--yMa(qO^BJ-f7K&pn(Rd3$uAJfnvwb0_wR zLyuKILd9@=@GNM{qZd7Sk-u-9 z8hnG?8;2d;wu~z9aAgbkitRNa%9#yBeR4pCVMhb62fI@{!@pA3E5W?00%W?<-hTeb zXnt3)qF=5{c=xt{7abojDU8~F!9~(O9G!-mPYuQUT>M+?0c<7J0kkeW8M@=vQEew( z3Jk^Ooge`<)yUQmpGz!H$q;p`ONP!rxNL_Lq)f4PIn;yqOnsv6LK4(;LvT2{K9frj zqBc@E=fP>%qJ!Ts7{?Yn?EUC@MxFf87{&VU`;e1_+JXkqJ#_^!$Jz`(KuMcL7A4(R7Tr8loR1EJ1ZJcFSzl1rW%)Y)3T?Hh{Y+D`UZ25Hre|mZ)23 z=Wl5F(bAXYjh_;At^ar)a3P$^*^YQ8^b!ts(*uO9x=(M)8B;j6A{hr~G$o&V`S#Kobn}9sHW#ioMclp98a; z?N6i`F2enDfUyLe6XgzLH1mmH#vs;j-`sffR8soP>rSG!VK=v_O&gI zgI&!CbG+2p&(uogt>UHN#12YvdgE`X)nNc)o6V6+TIQMP3UKQ_JVt1>(J>iPqSw5o z?+EY5cELRrOysYfHWjLEr&{U_Et9 zBS;@k^qwEsaQFI1*C6-l@EV>-A%_bjq|Z;LYt~#;s@{mcg|Mx#-^9i-?KpktZel32 z(5-QS4&VA|)h3GOf{tlZf08is>8S8^?q&-79$^JC!wYKoe{aLtf*jup$hXJNT-thq z)(Dq%Ph&1*{?k|6?SDPU3g3AawK-cX`OAEK_h_JWx)3@`8E8TcN3;gtaqd>)I9R%=94AHMg1-R3lq_ItzhtX zs@2XwqrcU50Cld9()E{=98uN>?4*<3$=&Jt*=I#M{O7hq(}lm5;4h6OgNF|nI1l_6 zSOZEV!`>JL)jmp#pdugA2g`hftMd`HHl+U?F9Zo-Bgz|7Zx>ARx1!(~lYC(WLQmOR zz40Ifkwe8u8-W_PQj1`Y8+I%pQA%r z*ERO#JtRSRZ22YC1jO9e%2sC4vfi;DCsdJw%r=DyR^Cn<`&m`Fj3*l7`Td4y?-+Zs z6UgzNDDO6sj5vhRNUj5?(K9LWv)2Dk({U7ftADVd{InqJrfH}Qm@Mu(yiXBL8bbyw z?__zMDR%Js2D(_v{yv!2Se(Trd>3>SAXH9Jird_GG0ln#^TSz6Tr+5>(N( zG*L>oEKNiITf^bYBsXMoHrI>vMap2IWREM7f_v5aP7n-1SP}|c-T68l-{5b6oZQUb zf#rC0JH@iUu0yFsgt_gUYcWz{k;3Q_5t z>1{sQXG^SX|G3ENOGAg!obXnMX3JO`P61J)J7U5ziV0`Dt9m&jYaHDMJ)zX3JP%2_W^a*-9oD`JjAwe1U9A|N? zw4O7fOfeGURurS}SI&HQ6@p0Qie-4a*@)Vu-C`r>u5{_b4h<+M4v+{Q?{-OAgmi}~ zLkWK=o68b1DM-A};8tm0s9JlmoRkb`vccCm@+a`?|p2m6%3tek1)wAv zSa1w+Vpxe}Ss~B-lFPh#DiUB|{D@SNZW_^wTL~l*mbv>~>f>l!pxaULpKEw=)qg$Nk9XOZYa0G2t^x+V<4bVRsZwaH)l>g+ zt9F(fdeUuY&9`#q67O4N#lp&p4o0I`h;h~8nD%Y!VOXH11nFhZNF(N2!gf2Ki@*^O zc!w+}zr?24dgV{{jW4j06!46QsxD8>kiKy|T2coVzKR{z8^&;gdKBv?RI~VNRYNda zd)o=b)fVYba=_)__Zg#v!mI%ov5u1dFhn?K3pV+x7}eJWa{bmy%>Hz*yG125JBl{T z0y4rsW1Dfw12$(}+o64>v&Oimt))6>O{WYr!WT){=t?B(hyAnkc2uIh(QKd<&(RY> zNdREH$S)4bcXCL_llWl)#TbELpD;y}%(Hwz)JO`&y7M}*jp$IMR>^)-x~3y?#kQP^ z$c3J+)0Pssgf7!`$Mk$@d8#qM*4rFvy%N+&Q#yiMv#F4d3BQkf4N_@4!((sVyXLA8@w)$&@DoMIpDvLKu>k@E ze2;ItOR)VYr@!35`h2SzmE0*5wMysn z(fd+?#wH#rw-_Quw!}dztCp^KPa$TXl$x4!!8kdyIb`7Z2);G;*s!$k2hB1wRa_VD zLZO*yZb{h|?l-ZkHYx|6%csKFL<>S$eM`b+@*Jo~=DBkS9<~utwZeQN3_>#6ibDBh z(U32@O#U79W_03WUP`mW$8Esvj7=er+iRY;>&r?58Ko#pD>E$Xr?&r1Z|5L|iPvYrRTT=WVsg(!euCvC6uvh>(|SH7tMKxFw@ro?p4Rw4=%LsS5< zKo~laFV)s)e+gu{CqsplvDW8X*XChM5EU@t+mm^5Mbh>6UhEIpQ@A*wiD+`Q2-z|S zONfxkB1I4RrBaaeZI2lJ_M0eo*GU!qV(*VJ--mKto&X2g;VD%+EyP{L0Ndhm$N2)@ zI2@>&i&&htvvfk*6q63hz`!xspc-rkJaSlUroULnDQqaKjpBd8hZ zR&>)cMb4Y$Q`5jmi$wFrQsskEI_XfX4Xx1V<-l=$%PRg$K4YQfca@`wZ)KY>t#7zR z+qI!!jbr65+(blavJ|Iuo~_|TQP%kzq7aBXNAJ5cK&)j8siLY`$SR6)f!VbX*~3f7 zgeWP*E|Kk$?fP}XXAkqwHuDeL?c^1`pF;^{`8XPaB8pTIkc0U}??2l2Bms?ki);ul zl+iT($hNQ_uns6rS6qT_rQA|Kd&plh=u(JbTSX8?r4{BJ@R4^ZGQEt&TElM&IPxvc zkK3^PDYTe5Q}q)cqDlYxq$6|)$m+}`AT1@t19>p$o)BBro?_d&5We2LJ-m z3@9|Sp!rdJ9C~I0qG!sA;JC0Td3JRPAT4QnsQAxrb}m34BH@SEleq0|Q$%CIP!sU3 zP#yg1_u(w9Ltmqy4wK$@Uqw>)w<#I+xkOlR!}9dv#%P{}Jajf$ zkjF|z9$Y*37$OMF1;1AeSQL|b z{1V`=?F*gk=fXn$>uMDvErrzMnKEm?^LAahMRhGzZCQ8hwx4=sR`(T+vBZ?-iA{bD zooqvt>;Tiq+6$u+d4@ZInI{80A}?rz9l?SIH)2ny_-7Sb2N~U`OGzqP5EL2a$287qs^)Tk6Pign6p&L3r?iPUgx7>h)9v&@~SOPb{-r0MD2 zyi1~1dG5c=A9oFa%E9hJ70O*dh8>=<$eQZ(@rIlIk5gqD!2G7gPh(pDfc$s%E14!j zV=q|=9U8IYXHRl&Ag0+-iN5C8$E@NSq{tLxNJ_<*klrU z&%PYkh3%PX1`%*lkJcfa5`eA}6IbuGE-`j(HS+d7-4CME>zu-=Bf{J9nw-Xky*a6+ z-frjGlhPmUkSBxmT#Sok?0-t%QxZDoQM=9^OyybOEH4oiFT4OH}G0 zW|fi!1Wk&~2xn~VaLdqPMJ?(fEWE$L?kDvIsTWv#@5l^M_l13M1coUO8J@WrFF*~} ziWP<YSL&H4` zY;+h5Gz`J+{xMb=EQhZz_Nld(z-{WyIlD$S0l&)lKc*NqmOL7K1?5WYv8|>t9-$2= zJSyqL(K!0PS#loR(<@e?JoqmVW@TEi@ijcYp51_dv68EwV)x>l$dUP*?=p_#C(-dW zW+6n1D`{nI?efj${n?}ZpfoCE?WM0VN2rHoo zV)$feiB2O2&55bEm%i#rGIgC#SvLxp-(x z(3rn7h*CL3`eXe-UqPYb^2!CO5rTs1ngpv)ZC{2OtC{TZK2))j1}aZYcxM9MHEA%K znw;~j%t#WRT1`xK#63Kg5+qu&^^o(Lq=QnMqC@D3FwkIbT}m7BtA|k{l~^238-{nC z=D&w9>hz&h!ZS{Z$_X^B$r_F)%_V&?ieS&O5WDPzusU4FPymx4-j~B8MHCIcTDlmz zoE=L#@gS+g|I2{Yb0 z!!*+l7rs=i@5=Pb6HNiYR!zvcW`HybsPvPvBw~s*iF=);Qj+Z(r!?TV8&2LNKQj*v}$m(Blk*sx4?U4jJAG*7c;0tZh4+Nkv>KUs_#>M zynwKqsv`Tq+2zi*hGv)l(_+g0Kc<=GpV)*!aet}vAnFHugLstX!B10$$U^O)DOW1ntiSk|P0Jp4= zbq5lPxLxa^BWU*>Dc+GIETYL?}JzTo0TzpJGW^&JuRyKiPM=MQjNOf8|6KC3uevYbXv{B+CJ&+%ab}zG91B`AQ z@m-9XDp>rrXCw4OAZR))ggBjWiSLSdj`2ib=-tl@f8R})F>Dxr2;#wkaEDXEx%lmOH z5&H8~CAtDG>y?w&BRTH%0~Nc1^>O2MmDhKjhXPO07YMAE@;o~DQ2hk2FIIiE!00KF zR(yU@MZ(QtWlU)QWkn$Y0ZfN<$h1>>8fSnQRp?Bh`yKYCS<)gt@K3XM6Nom=&cS`@lrzBt3}9YIYsUE(7NkU)jgc#&y+gs)5*$D(>e#e;A$DbNj?Mc68a=3y{JMEPkYd(+8Zt;6>Uq zd_3>Bf9v^Qtd^V}{Y-MP`=!yB7RX-R-R`Cmx0Qg!dex|^!u;T%p=xr|Gp8}5{Ta+*?937 z;X{&jYf)|;yR*&}=#6`$?Fw3VuSCir4w_}pM9c1<68VFYJJml5rZAE zx}^{)x&s6Z0UhN@-Rk8i(9Te86PiarGUl>G?}sO@j4~}qx-`G=-tB$#mYbGV-jkue z{0|4qOsIj+#}y`c2odKwRfZ2oC$aVBPS)feYRr%j@1LPP<-V~m*ROxnXqtqF!Y&iO zlCKrt0TR|5+qfMR&&LQJt?6h8zf&EHX?9*P&Imdf-s>Z$w#%@GJr7V!8a0;Md8zsw z4|TIErPwz{FO;Hu%t;@f_qk_widh?WO1VD!yngw2yx8|RC_W!w3N5DNhn2@4h9>1o^SqS8rFL|ybgPDlmv$-+SFM& zFXM%s^oT-tGNSa0vC#wK_GL1`E0YdrhI)DPm%nlIZa-Ni-hKw*~C4QMvrB z4Xxs*%gvPiM=(<)qX^f=1Gd%^7aZTu%>*_zhHTIcS^&fNWqoXEtuO)i42qf61dG%FEM z5(B^EX(RN)*ElYUhn+=tlsP4JwX}U&$I}^FoDy%j&Cxe;%v>I=I)D7pls9=Bo1gH7 z1p%ppm7S^wKOy5Y?EM3kV6f=uYD<^9q`mE!lsK!@-?&b+ttmB?4nJ&zYLVLHqYCyI(&J5ZU6o;|9a7$ z&@|e$4lCH-fzy%~OFxh<=&P&FU)UEh5wZJyIO4E4wKl#xjb1tddULA0$%DeZjDSqnXc64h?IR+l<+AFuT@!0~16jU$1qnVNJ2l{~?cc=rf@m$ZR)Q z&W?54av*}TKOti+XOH)^SQ9h6A^CizHG#%VulD zqL|#9^9|Ih;!__MpH}{6d3T?j)*Jjf^m0xi-dXaUHxx`6_`5w{dTCdcI`s%GZEf1= zVU3MRKoFuFDti~Ezuywgx>m6c6`YoSMQCV;_KbukvKWPmsTLYNmCcQ0{lOG|bfnPP zqU*`wFg6J8?94pFmNR1T8wyax(N^;EVv?I%cN1mEGU8rR{=Bj~iX^S$HJ6Gs7ZbV=hF7N#Z_rv`I z=j`+BbM}7rTI(z_G^G}9L9;*Jzt1rru~U!RxDK_@Xi8E(;R$+#Omq&uWbT0`x)B)SDNFZobE(jGY{I3MbVf z3G29FiUEJuwGf84-wtMc>-;KJ(h;<3R2gJVl2Xt{Sp8A(9mr*R@0dtZFUZ2m*UIjj z?}Q)FAA3#md$;dtp~u8_P(7TJJEx|Q>mv!U9<;8hRBVyOK-ruqTM{|U`4dkG?j6U$ zk>#qNoM(d%I46G8F1RYSYNMu#bs!4!uR&m9PsPb8`hHM@{P|NcjJToy-v57 z;+sDM*dk_9-1d!HkDnZFoEMUPVM!oOsMlwiwh7wDT`iV zijIsZE9C85w0gZ2;uMCJqo_k9dc_vDf=ysEc0QA}ac5K4{b3`JKf(+DQuOa=LrS+e z%#i;!R8`z$JN^er)%Rs(6o%+Va{Z}HbKIskJ}u^RK|KSq208c(K(G(X&?Ya~{Pn=E z)qp=%k%(6W!Y_Bc;_lmPYg`9MY7>O(a^6J0|8o6Tb>ENSRe0Zr{>{@4ZMN^dRP6AP z)r0yvtM7tw3wI|>+*NnwdF`L%)m64L5SSR?1{VCW8@z1yWT{NVIp8)GmJVsVYFkS% z|HH2xi%-T?O=@rC0^&Y@gwlVUz3A$ zX~`Yu%gPuE`EoMfD02@jLmoFu(}l037@tv}YoZU$o^?Eur*@^$1_`8YJE^g|(H$5< zV%)+26)W1>0OpsXIWmwZRh00zB!NL$_DKzjxN+WO4R>xLIfzr7WA3lBo{S_|{Nh8> zo?oVuRnTWrV@Uw~c*zY!wyx7RL-SjO`?B2ju&L(LBVj_%^NbSSTHsvutI^&P0MQG2 zQljb0aC>4`d_gHB9S)1s7$eSVYXq@cXl&Ir>`Z9xo*;Csw+Tu5cf6$4rR*vvy|Ih? zdq;7+#{{RV2{A?&?-t`eHg6h9$=F(AKJO)!lcy zfJH3h5^qeo&=UPCFO3DIZo-;Z8%227>ML$sO$gmM*u^~Q!oJVi_3wrAReC}Bnl|)$ zX1umAhXIWp6O5v?l@xE0l91=1iYe7cQE!v8ecoQiEY3LQ>$W$O5(4Q>i@Ky5Ic`PI znx#QANYX#|++L$uv=q*GLTNiOI#Kj{!e^(1VbM~}HC;37r{39z-;d`k(a0M*GmU_? z49%Q4son3B$QS~n1(mX-@LgAJqRf>1Fq+TA$sf&5gi*=-U?j{;#02>f?-f>^Su1y> z>je&+`|_`*N$c3A<9@$mY*w=?->DJf83vBd`q7n3MjUsIj?q1M2aJ3Rj;nf>y^)qP zfcqC3Hy7qB_ANG)78E46T4-KYU|DrIn`qxEU2>*9d7;nJeDMeWV~nq65Bp$`$>5## ziJVgM1xtQxnxPb9f?AJu(_1PkYIpMJKV=fN@xqumf=|cqpb@8fL1G%^S=q^Eph#0f z_E(8mc2(p94+THv9GYtK->qC7;gM*|g#vf$(s?~Q>1VXjF=BzwToizePd@VJ2BXnd z9ilh0j-boqjOCw}nQ+Fxi)s>l#n5cIPK*If3LSR0IQ@~BDa#f7<2}VO?lPat3HeoQ zsZ7;Lc!gj172^aDhc!^Xq$E=wwdU0pKSb1MrZy+1;UHS){nx7LTmLd76UFAf0wi$1-K1I3gsV;TzFQe2CaZBl}}= zhK@#=6eAId{*&4KGmia@S0h~W&}Ygt2QfbKgR~kp?M6SqF|JokTcOHa7+U5i2Aiir`qDu869rQnf61M?>DJ^O_2p&hX^B%KX zVk>65tGT;wUg>IadPCWw~=kd0n2Pyw|DVoJ$5byVld5W5^R;Z^0wIDK+e*pe79w z5{Hq-^5>eO<1Wq?G%b3ppF|ZChA*g9@I(JFcZEi`6D&wi2ZS1^*AwE z45YB^7D*Mk=4w-0E8uG=3J-zm5!XUfCo=v{m21{Bl$qt|CWrEgyAuROdvMoVh07dL5ACs{Q02qj_n`z60qB|aqz1~#3v|tRFPJBqD7(l z3k&JyxAoe($X%8_3#-HpYmzrYY$or}pR?|O7yxsI?OBzUnTlSC%J6J^fijDc?PEcG z&d*#GHFY?TNaT-Mo^3FTYJt+UkpZJ1md}fTo>yso3m%)obrXnrE#)@Y7x z&l_KAwbdJX60TsN>R08OOCdU5%04+eQ3I(xEiOc^@O#*VUl>s`*12b#X>egL-t9m> zB-S({5^R5;h#PAzdm;LpPs)sI_Lv%k<`}pb=0C9{#kFF}f++3@P`nA~2z`Nit^y^> zwCZW{bhKfAb{WKGQ?y*|FgI9&MLzed-csc=+;7j$oH@8b#(~c+2gHDkztPsVJ8vJ} z9SC@SV1gM6N08W<{vn^Y)Vo=XFp8cG-MO(fJa|GS;i0>)F}edknqTxCIi~le&z` zQd8cF*5b=}v6?JYAT6MmJSMF7rhC$MF1b()YU5eg`3O#1Y@iyW-+n{ZT0=m^iQqij zIyw1jMCXdpmF*C|fZrv{EpGp`3{i8QF&&6oXx*AV?2p1En;hwdmxfu|7S<3Mu4yC7 z=IYc`4RazPpoKay#hHmUBWWatU*rAEqI|V>=T8!B^z7|RkdeKOu7ksNMN2a}Kc|C6 z)PQ8vez!Q-kcRD6;rGE=hus}7kwN5he1QOfkx6IvGW-^NM&EsE_R9O75A)RiNgJv$ z9XYYIp$kjdn@HQ0z^hF=Kl}0+KfXe{(AFVuW-j!3m6td*?B7N}F;2$z*`Xmkwr>Kf z4$=u64v0-I#=daLpagTMe)HM7B!>>WL zRZHz>*mNF=mF|%&YUR#EbSt8b!#mgDKoo%-w5mSJbQ&+#g!J_EnMudomZ%g|MrN7dfqjJdaV@VbqH=KJ1 z7b+O(6B4*H#^u{C1A<$IGR}ea5Bt9~O+f$s!dB}FDxz}Y&Tn)txOw%qGVaFv6(WC? zz9bYYa>~YTl53*t1N>AsDVm3z&Cf%uL*)UJ3fV;CQe~*jfNTBn4Phx3xCO_gz?^Of zmIuLuqoS>lU{!8d>kcOk5V~sDurFtf&hBbcM0Z8apPYGtyPP_dRc3h;B%YynF@RAz zTB>}f-`xhR#eNT7SP{HW^g;gQ2Y0^Y(0e8U-_cK4Rb`y3R|OSZ@p986A1O|g`UM0o z?jfe#c_A>u4b3()*LWHWW-*Ud^{n?#9~&^g*tm2Ek^n)vJDp5N@nF< z>2L?b4R@?vtbe@@Wrh)NyR!%3qGx#}0UBxx;>_qy{_~t)_Rpg0K9Jy?K~# zT3?=yiHVujuA}-*D5+k&7l@7o^S7QptbGk$X@`-S5i(cXEo;s|(GSbwM@knTDZWkr zZh$*Ui|pd#=O zE|@5YMVWRJYsFUH6vpSd%GmKf3c1v%-R&P__1&R2s@^F2CRpqRU@~G~v0H+MK3y6hTq3hWN>s2!-|q%| zU$!1=64}({#O2x&XY1~ti>rowsSFZd?$AkH^rt4|xBMNvg!`dT@*%RV*d6_biqQls zxvO~VZym74+@nY*Ng6(hCWjjzO)u~`5K2%lN>GoD!U}GA`{nY3H_X4U`+kPcYn>s& z?nmApS0bPl3XU*Xp?WOXYn74gACB$QMV8ZM3rCCS86I`EXdC zU9QycN3MwLTbXKF1GlRDAA^U-O+_^Fl`m9HEr>aVh^#TWYxan~t_?NtcN(zC9fQtz zFS{H}hlMDU9~#Sz?hH8$6nQlEq!UHPi_n*=*>rtn+0U_0=7iPkD%<^ti5*+xhS%Au zy&dZ%4Q;ML^p<(Lm7L@!a(OJnA_Ileo`F25&A~}wbrrET>#0__=XDjV89f8hKFV(d zVYbEsy#GwXmr|1khD!t<#_gVVhiqT}n{=#MtJ?|zYUrCze|>Jm4g_y@Z`atlT22}d zOrj9ONF{oY3etI2BmshHgN1CcpoWb15_YJx?-~H%PVV3Gjtt&`QGK;=5)i$d)yomt z8amgd{-W@)ZCiHK(2yjR7;Zn;kJY>`2!?off#A!TS<7Pnuc3Sz@M70oB5G?_RE?;a*~61G(Z;T@m*MdZH$snsAp4L{~4&3w<;PFiA7crzYB19cr z@;Y8_r+lcnl_lw}`D77jNudxik=@Nif=+DGslF_}ZE8F*J1zXj{b50k@ZkoQqxKM) zYFyfFuQ{J{i7aZIle?xoptdFl*cw<;{`HM6YTZ3;^GUa7;X}Vk3_hMf<2?xi@G=B< zIE}uu_#>bb%{N(GS^Ybl)@ek#Qg69!fFW58OfqCx)YI$2q=fPln?uhm5>V~El3be3 zF^{@nNS-hra8`^pQb487yra7;r5JG`%)xV@BnDVy1x$Z;G_JhI_gEI_G=GCkm1WUC zw4wXmS5X%bmU&OeSb<>^;7@^cQY#W`nF#I$e=AqN#y*yVH$E4n(v)X~^HM;^T?i11 zqdaje=HB2BP&+?O-fh>c< zc*UoO^hPodZ@zNStNQ*66Y?Y^HX`~`qHa5h7_^LhGSQP#@!F zO|Nh2GxrP5M@|}KsDn|rww{nCVaXpJ{?Mcq8Kbc1`jSnl<;6R37R8f%Dh%VMk(K%< zF9kh^yb+)9L@cVsFIU&4N7NQ1Az2)#LgD+_ z;F6z0MW8{6xN4(r513cm`pFEXYUT6~io>~nTTfx6B}6h%T-k~JdarMdSmb4OVHIx z?_;)Mc)dNOujjiEO12~_ARFtj)4IDEo%3}psBt97ooyYA_v`JKA;0(4FE87_9vMl0 z51PI#TJL*HH1oR#Y?INdDP<6h0m+f2H<4bXyh{e<`t%87Ng1mfZyj@Y6h59TRM1wu zbFjOaJt}a`WfmQ;eAdVxz*;+ce z7p)tTw3YS&(`0OJe~^A=p+~XuO{P8V{wYn4q>R*Q4D~)Q>e$U?-!gRVHAC-X)>N3T z*XZywk~H@k&&>YF^6|mq!`zF1knO@A^S*NA$x_{X<`Td13QKR1txaA_Kau}E-N;UF z}_Nd@KmPd)J@GAAJyW1HH|?$Muvj@6BexRYo5)cd~F`_-spS9-Q)8GVMN1)L*T=voe>uY0Uw~F(xk?{pI&ZfN1;Q6M*|@;*?k38mE43V=eHCWpfwx_C#r znAllw?Q*~E6z81p{_Bque~HKLaDi1Q*3|XZtP#Ea-Ut0w_7PG`VGJklvgd1dIu%@Ub#a?BJX7Q2*Nx2x$mJ1*^ja}|1R)=J4Vtiv8$kotL2sflbO>$xT?hsO zuY+Alx)(6`Z%|>0ptQfJN+lq2Q~l089zy4B)UX4LKEKP>ou21e(zDC+-0;~l@Bh{E j`2XOe{3aXi30VC`rN0&MDDi9u{X#`SL%vehEckx_MeJN* literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/docs/static_files/docker-logo-compressed.png b/vendor/github.com/moby/moby/docs/static_files/docker-logo-compressed.png new file mode 100644 index 0000000000000000000000000000000000000000..717d09d773cc46ff8297d06f66d9aa855453f35a GIT binary patch literal 4972 zcmai&cQD-D+s0R0Yj^ebT|uHmbh}CrD|!v0*I@N7h>`>myRteVy6C-&h-eWTEJBos z9-<^fCwij%@;vj-``7QC_sp5O=f2Ko?rY{;=dTm3uctu`WrKo1AZjg5RYMSn3;=;h zs3By3m9`raW`B#bzK*fl-zuwX{C{n-Y$9*-1}L@}R6Pc&oVoU4=l$%0kfT3mND56# zql9}1Uva-kRFRCXF3fhIFQ&XZ}oXTOFRof05QPgBSL6y@1 zl15u3N2zTTe@FcP3$ULSt)XykoVa5Zj-VooAmOmv9K8eRY%r?F2HGJ->^d)z>(fIs z36C3F!*19iQFc#}ioi8E9r%m^O#b1iB0vWrBehe!28TqDgd?xf{jbPSRx|Sc$2mT7 z&m4NttP7}oTSZqXK&JR-6bJ@`zvwcz;r&N4lZ@wj+)(lm!0-sOe} z(k90<@DUDRX8FZ{eyz|1*xIhv%ID3D&1ChI_x`>`Gg;e4bH{_FXe&)x)O!9J{%Jv8 z10pkYH+EE#h@}O2-tj)+FGjX?9&g?%(*vn1tBCYe=Zl)^89~AWtfi;p zbU;b51Uq2kF^Z?IQ(~B|4Iu3_9+o_vFy7H(O8)Wz`Gex&iU2ojQM<@HGj-r2_lrl0U+sN@`J7yX!f1reW_&hJt$I=ng)fsW zyM^%)1x-`I;ysWvqdO~<8gfwWjnv|tC z>F&Q@wW<xfkQ`kwLi(OUouG{MHZ^K0!I&e+06+Oh!1%0UD4#6)B&= z``nN0G{ydfi6juFE*nQ=srFgEaf7z#zakhZrMDm(byQq_VH7tWltTYlN=oY4G9P+p+<^<4=s4n*Xxg_SIz(Q}^0KKI9uw{k^j_ zMOFA(?j7s1Mr0qqa&JjW$S_+=iVjMEHu(J#Edz5Awt)IziHVMWC}rYDyUnB}>NI<# zKLF`P%aUNM?4SlC+A^yMmOR|c&=!nOH55Aa=uc5HY+X!?0tTkvG_gKm15Ox~Fdtf! z`fTZO5?g@IeWGkTG9~8yHTCN&Lk+vj7dl+F{@7>+SWE(&arXi;Zjn*I0Idx5(r zbR1%nDN3=hhhP{pPVCh2HZ${01N6PkQx$>Q(DEo>A%8Z~_LGqHX1M7Gjec|ij00W? z9s6Kw@f=_mO4WdgEnN*>o&7EeiP-CGIFqfDejyp1b3)v?6sQ@Gj-R`2r}f2WexHb_ z6ZU%agw8y_oqjXM+pQcNXZ89xhKiVi+NZ%!q>v(W#4~)Vp_WWSzPEvw4tg4jmXe!1 zEGE`fmefFeS(Q2mtHEesd0sEZ6^kHPJVgno!@w>h{rP(O<}eGR=yj5=&q36W?!CM* z`T?8NFbF4%Q1FzoiPn2^0xuW6^vgKCZXS>j=P?+byDBuSwi30O@hM`Io%)pfri+5Y zk%Z?h`B58R6PkE_gO=;U7vY~AF~T-80ixA6;^E|7e4im?11VycOmIh|iAjMM&)Qxl z$`YSoFv8BFNMJ67@T|c8x*Ad*k-(%4z-99%C#jV4MOPh6)@gBs{l#(bL#1_bLeuOJ zMadQ+Zrrzkl8n4jImhy}u&$}}svi0_5VZC!&wT8_=OsUE|1OgTR~ncer@z#3)YaYQ*?#WrxabQv(>BcRlJOplPc+`&byEqu zSe;z0F6W5sjW{-LbqNo*Y8cT}Bb@z~yI9DLEWN1oWe59z_@ipp zLstkhb7SjtG)K5p8T!^o*bGI%V$0FtNoMIlpau7F(lUkM%3oy@Jgwn}2mJpbVsv zYUL1a#7{fVYxYs?5QBa>1dF#(YQhTY<{rU_!+3F{U(;V?U7m}mCSv`f%>`KB;P!kz zg}Xt&!&;r5fbo;^e-?ODNKx7pBCMv5#Q=9?Z@%340qiWT_HhME+>aGnjB0hEx9mrZ zIf7LeNftg$1IABOR1cnJ&qr69m%R&KzW)Zx^)>eOoLqsYeKH1V{eGaYc$UzK%N(u$ z8MW|qaibkx?1xOd_cB&)9AYUp%jDLJF*q<0_OQFoT5UI4v*lLd)Ct+4ihEmPIr4mv zT3^spo(F~bh_%?WfMyxf;(r^`B|7|(5^YW+;rXpVt6mmzj@;~|;6wte-q-K0Yp_`( zJ;nRYI;I)n>4&$m?y~DBjcs??K>Dz|ef!&(xG(F?8Kqn13naL`$21$cW{lQy$eZwAn~26|FVhQ)&w*@?T;1__?~Q@@RdnRBxDK)W08rl#y?Q zHt6|Y@KA+1?$y3I0R|XEtQ~@(1K^*1>=HW?FL;Gfrn>2X2{)Pq4;z=^FGBy{4 zfjcO)iMEO5!G5H(2PbPs`79(Gg*|FPTSn##qwNj0e}D*6oWkc+NHkG`3@+qjcLh$g z6T8BcIDh5pr@KhK)%}u<*)k0r-o>NFVRBhT%@AQ;O!g*VVZ;OS{t5M+4pX8jTz!fP zUT2PV&V>2OR%#=s5Hrr=k$-fVU2zn>Jc>pmcCD1_#PBHiv8-}q9RPJ1Z_sK3Y# zA27HxB}mSrs&X~!2ZtFIUal{SOK+u$jHlZ&q4q)-v2%g|cEWybWn=rLZAxos;_5{; z%tB8~2Eq>yW6O^wzAD|gfDOFqkjc0CGjmeiY_{XOG^?&-Q6$HP^*dnBu3^{n^e50%;zmk zwc;)Dh{=LBWv_i3!meuG>@sfew~lS&oWM4(iks3~-(?eX81YZI(rwzn7q8e4y$L78 z`4+D#Dl)qaN59RF&-9O!cN;@5)2r>hJYeq;n$uAs@2Z01pYA*HDdD&aosaAzKvx8E z;z|5zUuN5_;k4*HQvgTq^b((I=_2NFM6PVep|fAkTQCl z&iA*FJ-;u>J+8DJ8E=4sJW@%+GeXvl%);76DFJM__y<;;3%*z(2_yT_d4Q#h(SXz0 z6TG{`sFOjtl5o`&3Ged>L{cVrz63hs^eEUZ6D*s7n5fFr7<#%-3rYTGcaKbhthnSZ zvMn0>usnD8z?m8QpU?lM#i}y)Z0=qUB#J$xgB`xiv%~V#F$kHg5_n%lgUE{WATED9 z5Z37T+_QJI?|5hYlFr(_elO;+!WHpPS#rxhmNpG5SS98N{B=1haE1SAlEl)d)k7SZm;xWjT-*RY{tG zN(H#)Vd*7mhLj(sb8{+v^nR8Rgdr@)pL~OMvWo+jlthKWH_7rkcSK^o7TpqSt26HQ zv9Lq=2#t$3r9$CiI%#aWbC8{!1MS>1 z5UZ`lXyO^rgtvE2oaic!Qw{AkN z6!GotTQ_1whY#DPT1GVMS4Yz^iWhhffQ4ra^;Tj+mN*O>C&wdZ3%T64jdoSXNnenO zea@%o;r3TW=&=scbNiVI5=Rkvf{pY+tp_o;Bx zO6NISp=xBP=3DE}=6hZD-a13GU(KY^;wTpDEg?TV(7+$NWG6Sfq@BBZplR|%&spuHx|0@}%*`RMavjdMgtNFi z?6c5OqGt41{y;bM{lvvxTU*vUA|^2TBJ}1zFE$41^u5>}a$N#EgK7jS@T2(%2G>^u zMG{EO*o?=r8ydTN9h#9jGMgK))f7{VAG*3k1F=q=N+F!wD9-r_E9JenHtxz0L8~e# z@$=-I0ZOD$dj8A3`-#!zl1viIELeCBWT;@>EhpCqK4;Vcq!0WX#j%{b>K6JJiZpFg zLP9l+3-z;xQ$p+im5E|cX$26GUzO;&G zuvt5i4v?-+;G|D!5DyOUcJ{ngAt%H4hV-q0pq0g`pm8KZle(5EB=8fd^L-()yOD^# zcq4HkpO)Razp>a{d64bm$vt2#!sCF0X;V_57i1&Q^k5mQoB|V z<(W__{wk;H!y4gdG!mfR-L1s1{wXPs7V}I*f~fHizL@>v=egE;{|Fp0oP2B3DGsrcsO zmPqMH9F@IC++&PuoY2+6lP~d~ZW&j^aaF|~zOE1X+;!6}6HgrY=xS-#(z$}T{tbV@ z>=beW^pmP-9yg&@(NeWi!rjRfO+zyM)>5|4tnk1d8+ez;yd#xM znmNhT$l%P&D%+dLk>Wa$Q!3^Mn5S5aa?m{%g`Ky@Cq~JA5=UvjEDE+oma&EF1bEdv z&`!H>_o}HyFr4)2gsQ>*``6L#I43*9KSz+?morzu{~h`t6(dL&;hE-9;`#3^Ej2yW IT4fCOKM4~1IsgCw literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/docs/yaml/Dockerfile b/vendor/github.com/moby/moby/docs/yaml/Dockerfile new file mode 100644 index 0000000..059b97a --- /dev/null +++ b/vendor/github.com/moby/moby/docs/yaml/Dockerfile @@ -0,0 +1,4 @@ +FROM scratch +COPY docs /docs +# CMD cannot be nil so we set it to empty string +CMD [""] diff --git a/vendor/github.com/moby/moby/docs/yaml/generate.go b/vendor/github.com/moby/moby/docs/yaml/generate.go new file mode 100644 index 0000000..ea5c00e --- /dev/null +++ b/vendor/github.com/moby/moby/docs/yaml/generate.go @@ -0,0 +1,86 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/commands" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const descriptionSourcePath = "docs/reference/commandline/" + +func generateCliYaml(opts *options) error { + stdin, stdout, stderr := term.StdStreams() + dockerCli := command.NewDockerCli(stdin, stdout, stderr) + cmd := &cobra.Command{Use: "docker"} + commands.AddCommands(cmd, dockerCli) + source := filepath.Join(opts.source, descriptionSourcePath) + if err := loadLongDescription(cmd, source); err != nil { + return err + } + + cmd.DisableAutoGenTag = true + return GenYamlTree(cmd, opts.target) +} + +func loadLongDescription(cmd *cobra.Command, path ...string) error { + for _, cmd := range cmd.Commands() { + if cmd.Name() == "" { + continue + } + fullpath := filepath.Join(path[0], strings.Join(append(path[1:], cmd.Name()), "_")+".md") + + if cmd.HasSubCommands() { + loadLongDescription(cmd, path[0], cmd.Name()) + } + + if _, err := os.Stat(fullpath); err != nil { + log.Printf("WARN: %s does not exist, skipping\n", fullpath) + continue + } + + content, err := ioutil.ReadFile(fullpath) + if err != nil { + return err + } + description, examples := parseMDContent(string(content)) + cmd.Long = description + cmd.Example = examples + } + return nil +} + +type options struct { + source string + target string +} + +func parseArgs() (*options, error) { + opts := &options{} + cwd, _ := os.Getwd() + flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError) + flags.StringVar(&opts.source, "root", cwd, "Path to project root") + flags.StringVar(&opts.target, "target", "/tmp", "Target path for generated yaml files") + err := flags.Parse(os.Args[1:]) + return opts, err +} + +func main() { + opts, err := parseArgs() + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + } + fmt.Printf("Project root: %s\n", opts.source) + fmt.Printf("Generating yaml files into %s\n", opts.target) + if err := generateCliYaml(opts); err != nil { + fmt.Fprintf(os.Stderr, "Failed to generate yaml files: %s\n", err.Error()) + } +} diff --git a/vendor/github.com/moby/moby/docs/yaml/yaml.go b/vendor/github.com/moby/moby/docs/yaml/yaml.go new file mode 100644 index 0000000..575f9be --- /dev/null +++ b/vendor/github.com/moby/moby/docs/yaml/yaml.go @@ -0,0 +1,212 @@ +package main + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "gopkg.in/yaml.v2" +) + +type cmdOption struct { + Option string + Shorthand string `yaml:",omitempty"` + DefaultValue string `yaml:"default_value,omitempty"` + Description string `yaml:",omitempty"` +} + +type cmdDoc struct { + Name string `yaml:"command"` + SeeAlso []string `yaml:"parent,omitempty"` + Version string `yaml:"engine_version,omitempty"` + Aliases string `yaml:",omitempty"` + Short string `yaml:",omitempty"` + Long string `yaml:",omitempty"` + Usage string `yaml:",omitempty"` + Pname string `yaml:",omitempty"` + Plink string `yaml:",omitempty"` + Cname []string `yaml:",omitempty"` + Clink []string `yaml:",omitempty"` + Options []cmdOption `yaml:",omitempty"` + InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` + Example string `yaml:"examples,omitempty"` +} + +// GenYamlTree creates yaml structured ref files +func GenYamlTree(cmd *cobra.Command, dir string) error { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + return GenYamlTreeCustom(cmd, dir, emptyStr, identity) +} + +// GenYamlTreeCustom creates yaml structured ref files +func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsHelpCommand() { + continue + } + if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenYamlCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} + +// GenYamlCustom creates custom yaml output +func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + cliDoc := cmdDoc{} + cliDoc.Name = cmd.CommandPath() + + // Check experimental: ok := cmd.Tags["experimental"] + + cliDoc.Aliases = strings.Join(cmd.Aliases, ", ") + cliDoc.Short = cmd.Short + cliDoc.Long = cmd.Long + if len(cliDoc.Long) == 0 { + cliDoc.Long = cliDoc.Short + } + + if cmd.Runnable() { + cliDoc.Usage = cmd.UseLine() + } + + if len(cmd.Example) > 0 { + cliDoc.Example = cmd.Example + } + + flags := cmd.NonInheritedFlags() + if flags.HasFlags() { + cliDoc.Options = genFlagResult(flags) + } + flags = cmd.InheritedFlags() + if flags.HasFlags() { + cliDoc.InheritedOptions = genFlagResult(flags) + } + + if hasSeeAlso(cmd) { + if cmd.HasParent() { + parent := cmd.Parent() + cliDoc.Pname = parent.CommandPath() + link := cliDoc.Pname + ".yaml" + cliDoc.Plink = strings.Replace(link, " ", "_", -1) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if !child.IsAvailableCommand() || child.IsHelpCommand() { + continue + } + currentChild := cliDoc.Name + " " + child.Name() + cliDoc.Cname = append(cliDoc.Cname, cliDoc.Name+" "+child.Name()) + link := currentChild + ".yaml" + cliDoc.Clink = append(cliDoc.Clink, strings.Replace(link, " ", "_", -1)) + } + } + + final, err := yaml.Marshal(&cliDoc) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + if _, err := fmt.Fprintln(w, string(final)); err != nil { + return err + } + return nil +} + +func genFlagResult(flags *pflag.FlagSet) []cmdOption { + var result []cmdOption + + flags.VisitAll(func(flag *pflag.Flag) { + // Todo, when we mark a shorthand is deprecated, but specify an empty message. + // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. + // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. + if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { + opt := cmdOption{ + Option: flag.Name, + Shorthand: flag.Shorthand, + DefaultValue: flag.DefValue, + Description: forceMultiLine(flag.Usage), + } + result = append(result, opt) + } else { + opt := cmdOption{ + Option: flag.Name, + DefaultValue: forceMultiLine(flag.DefValue), + Description: forceMultiLine(flag.Usage), + } + result = append(result, opt) + } + }) + + return result +} + +// Temporary workaround for yaml lib generating incorrect yaml with long strings +// that do not contain \n. +func forceMultiLine(s string) string { + if len(s) > 60 && !strings.Contains(s, "\n") { + s = s + "\n" + } + return s +} + +// Small duplication for cobra utils +func hasSeeAlso(cmd *cobra.Command) bool { + if cmd.HasParent() { + return true + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsHelpCommand() { + continue + } + return true + } + return false +} + +func parseMDContent(mdString string) (description string, examples string) { + parsedContent := strings.Split(mdString, "\n## ") + for _, s := range parsedContent { + if strings.Index(s, "Description") == 0 { + description = strings.Trim(s, "Description\n") + } + if strings.Index(s, "Examples") == 0 { + examples = strings.Trim(s, "Examples\n") + } + } + return +} + +type byName []*cobra.Command + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/vendor/github.com/moby/moby/experimental/README.md b/vendor/github.com/moby/moby/experimental/README.md new file mode 100644 index 0000000..3f0b467 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/README.md @@ -0,0 +1,54 @@ +# Docker Experimental Features + +This page contains a list of features in the Docker engine which are +experimental. Experimental features are **not** ready for production. They are +provided for test and evaluation in your sandbox environments. + +The information below describes each feature and the GitHub pull requests and +issues associated with it. If necessary, links are provided to additional +documentation on an issue. As an active Docker user and community member, +please feel free to provide any feedback on these features you wish. + +## Use Docker experimental + +Experimental features are now included in the standard Docker binaries as of +version 1.13.0. +To enable experimental features, start the Docker daemon with the +`--experimental` flag or enable the daemon flag in the +`/etc/docker/daemon.json` configuration file: + +```json +{ + "experimental": true +} +``` + +You can check to see if experimental features are enabled on a running daemon +using the following command: + +```bash +$ docker version -f '{{.Server.Experimental}}' +true +``` + +## Current experimental features + +Docker service logs command to view logs for a Docker service. This is needed in Swarm mode. +Option to squash image layers to the base image after successful builds. +Checkpoint and restore support for Containers. +Metrics (Prometheus) output for basic container, image, and daemon operations. + + * The top-level [docker deploy](../../docs/reference/deploy.md) command. The + `docker stack deploy` command is **not** experimental. + * [`docker service logs` command](../docs/reference/commandline/service_logs.md) + * [`--squash` option to `docker build` command](../docs/reference/commandline/build.md##squash-an-images-layers---squash-experimental-only) + * [External graphdriver plugins](../docs/extend/plugins_graphdriver.md) + * [Ipvlan Network Drivers](vlan-networks.md) + * [Distributed Application Bundles](docker-stacks-and-bundles.md) + * [Checkpoint & Restore](checkpoint-restore.md) + +## How to comment on an experimental feature + +Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. + +Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/vendor/github.com/moby/moby/experimental/checkpoint-restore.md b/vendor/github.com/moby/moby/experimental/checkpoint-restore.md new file mode 100644 index 0000000..7e609b6 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/checkpoint-restore.md @@ -0,0 +1,88 @@ +# Docker Checkpoint & Restore + +Checkpoint & Restore is a new feature that allows you to freeze a running +container by checkpointing it, which turns its state into a collection of files +on disk. Later, the container can be restored from the point it was frozen. + +This is accomplished using a tool called [CRIU](http://criu.org), which is an +external dependency of this feature. A good overview of the history of +checkpoint and restore in Docker is available in this +[Kubernetes blog post](http://blog.kubernetes.io/2015/07/how-did-quake-demo-from-dockercon-work.html). + +## Installing CRIU + +If you use a Debian system, you can add the CRIU PPA and install with apt-get +[from the criu launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa). + +Alternatively, you can [build CRIU from source](http://criu.org/Installation). + +You need at least version 2.0 of CRIU to run checkpoint/restore in Docker. + +## Use cases for checkpoint & restore + +This feature is currently focused on single-host use cases for checkpoint and +restore. Here are a few: + +- Restarting the host machine without stopping/starting containers +- Speeding up the start time of slow start applications +- "Rewinding" processes to an earlier point in time +- "Forensic debugging" of running processes + +Another primary use case of checkpoint & restore outside of Docker is the live +migration of a server from one machine to another. This is possible with the +current implementation, but not currently a priority (and so the workflow is +not optimized for the task). + +## Using checkpoint & restore + +A new top level command `docker checkpoint` is introduced, with three subcommands: +- `create` (creates a new checkpoint) +- `ls` (lists existing checkpoints) +- `rm` (deletes an existing checkpoint) + +Additionally, a `--checkpoint` flag is added to the container start command. + +The options for checkpoint create: + + Usage: docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT + + Create a checkpoint from a running container + + --leave-running=false Leave the container running after checkpoint + --checkpoint-dir Use a custom checkpoint storage directory + +And to restore a container: + + Usage: docker start --checkpoint CHECKPOINT_ID [OTHER OPTIONS] CONTAINER + + +A simple example of using checkpoint & restore on a container: + + $ docker run --security-opt=seccomp:unconfined --name cr -d busybox /bin/sh -c 'i=0; while true; do echo $i; i=$(expr $i + 1); sleep 1; done' + > abc0123 + + $ docker checkpoint create cr checkpoint1 + + # + $ docker start --checkpoint checkpoint1 cr + > abc0123 + +This process just logs an incrementing counter to stdout. If you `docker logs` +in between running/checkpoint/restoring you should see that the counter +increases while the process is running, stops while it's checkpointed, and +resumes from the point it left off once you restore. + +## Current limitation + +seccomp is only supported by CRIU in very up to date kernels. + +External terminal (i.e. `docker run -t ..`) is not supported at the moment. +If you try to create a checkpoint for a container with an external terminal, +it would fail: + + $ docker checkpoint create cr checkpoint1 + Error response from daemon: Cannot checkpoint container c1: rpc error: code = 2 desc = exit status 1: "criu failed: type NOTIFY errno 0\nlog file: /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log\n" + + $ cat /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log + Error (mount.c:740): mnt: 126:./dev/console doesn't have a proper root mount + diff --git a/vendor/github.com/moby/moby/experimental/docker-stacks-and-bundles.md b/vendor/github.com/moby/moby/experimental/docker-stacks-and-bundles.md new file mode 100644 index 0000000..6ed07e3 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/docker-stacks-and-bundles.md @@ -0,0 +1,205 @@ +# Docker Stacks and Distributed Application Bundles + +## Overview + +Docker Stacks and Distributed Application Bundles are experimental features +introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of +swarm mode, and Nodes and Services in the Engine API. + +A Dockerfile can be built into an image, and containers can be created from +that image. Similarly, a docker-compose.yml can be built into a **distributed +application bundle**, and **stacks** can be created from that bundle. In that +sense, the bundle is a multi-services distributable image format. + +As of Docker 1.12 and Compose 1.8, the features are experimental. Neither +Docker Engine nor the Docker Registry support distribution of bundles. + +## Producing a bundle + +The easiest way to produce a bundle is to generate it using `docker-compose` +from an existing `docker-compose.yml`. Of course, that's just *one* possible way +to proceed, in the same way that `docker build` isn't the only way to produce a +Docker image. + +From `docker-compose`: + +```bash +$ docker-compose bundle +WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring +WARNING: Unsupported key 'links' in services.nsqd - ignoring +WARNING: Unsupported key 'volumes' in services.nsqd - ignoring +[...] +Wrote bundle to vossibility-stack.dab +``` + +## Creating a stack from a bundle + +A stack is created using the `docker deploy` command: + +```bash +$ docker deploy --help +Usage: docker deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + -c, --compose-file string Path to a Compose file + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents + +``` + +Let's deploy the stack created before: + +```bash +$ docker deploy --bundle-file vossibility-stack.dab vossibility-stack +Loading bundle from vossibility-stack.dab +Creating service vossibility-stack_elasticsearch +Creating service vossibility-stack_kibana +Creating service vossibility-stack_logstash +Creating service vossibility-stack_lookupd +Creating service vossibility-stack_nsqd +Creating service vossibility-stack_vossibility-collector +``` + +We can verify that services were correctly created: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility-stack_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility-stack_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility-stack_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility-stack_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility-stack_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility-stack_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Managing stacks + +Stacks are managed using the `docker stack` command: + +```bash +# docker stack --help + +Usage: docker stack COMMAND + +Manage Docker stacks + +Options: + --help Print usage + +Commands: + deploy Deploy a new stack or update an existing stack + ls List stacks + ps List the tasks in the stack + rm Remove the stack + services List the services in the stack + +Run 'docker stack COMMAND --help' for more information on a command. +``` + +## Bundle file format + +Distributed application bundles are described in a JSON format. When bundles +are persisted as files, the file extension is `.dab` (Docker 1.12RC2 tools use +`.dsb` for the file extension—this will be updated in the next release client). + +A bundle has two top-level fields: `version` and `services`. The version used +by Docker 1.12 and later tools is `0.1`. + +`services` in the bundle are the services that comprise the app. They +correspond to the new `Service` object introduced in the 1.12 Docker Engine API. + +A service has the following fields: + +
+
+ Image (required) string +
+
+ The image that the service will run. Docker images should be referenced + with full content hash to fully specify the deployment artifact for the + service. Example: + postgres@sha256:f76245b04ddbcebab5bb6c28e76947f49222c99fec4aadb0bb + 1c24821a 9e83ef +
+
+ Command []string +
+
+ Command to run in service containers. +
+
+ Args []string +
+
+ Arguments passed to the service containers. +
+
+ Env []string +
+
+ Environment variables. +
+
+ Labels map[string]string +
+
+ Labels used for setting meta data on services. +
+
+ Ports []Port +
+
+ Service ports (composed of Port (int) and + Protocol (string). A service description can + only specify the container port to be exposed. These ports can be + mapped on runtime hosts at the operator's discretion. +
+ +
+ WorkingDir string +
+
+ Working directory inside the service containers. +
+ +
+ User string +
+
+ Username or UID (format: <name|uid>[:<group|gid>]). +
+ +
+ Networks []string +
+
+ Networks that the service containers should be connected to. An entity + deploying a bundle should create networks as needed. +
+
+ +The following is an example of bundlefile with two services: + +```json +{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } +} +``` diff --git a/vendor/github.com/moby/moby/experimental/images/ipvlan-l3.gliffy b/vendor/github.com/moby/moby/experimental/images/ipvlan-l3.gliffy new file mode 100644 index 0000000..bf0512a --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/images/ipvlan-l3.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":447,"height":422,"nodeIndex":326,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":9,"y":10.461511948529278},"max":{"x":447,"y":421.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":12.0,"y":200.0,"rotation":0.0,"id":276,"width":434.00000000000006,"height":197.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":275.0,"y":8.93295288085936,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[82.0,295.5670471191406],[-4.628896294384617,211.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":285.0,"y":18.93295288085936,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":316,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-204.0,285.5670471191406],[-100.37110370561533,201.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":203.5,"rotation":0.0,"id":267,"width":116.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":28.93295288085936,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":290,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[217.5,167.06704711914062],[219.11774189711457,53.02855906766992]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":57.51435447730654,"y":10.461511948529278,"rotation":0.0,"id":246,"width":343.20677483961606,"height":143.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":55.19999694824217,"rotation":0.0,"id":262,"width":262.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Unless notified about the container networks, the physical network does not have a route to their subnets

Who has 10.16.20.0/24?

Who has 10.1.20.0/24?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.0,"y":403.5,"rotation":0.0,"id":282,"width":442.0,"height":18.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers can be on different subnets and reach each other

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":252.5,"rotation":0.0,"id":288,"width":238.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Ipvlan L3 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":172.0,"rotation":0.0,"id":290,"width":207.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":3.568965517241383,"y":0.0,"rotation":0.0,"id":291,"width":199.86206896551747,"height":42.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Eth0

192.168.50.10/24

Parent interface acts as a Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":29.0,"y":358.1999969482422,"rotation":0.0,"id":304,"width":390.99999999999994,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

All containers can ping each other without a router if

they share the same parent interface (example eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":24.0,"y":276.0,"rotation":0.0,"id":320,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":316,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":317,"width":109.44000000000001,"height":43.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 

172.16.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":318,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":319,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":300.0,"y":276.0,"rotation":0.0,"id":321,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":272,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":273,"width":109.44000000000001,"height":44.0,"uid":null,"order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":310,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":312,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":85.93295288085938,"rotation":0.0,"id":322,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#434343","fillColor":"none","dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-191.0,222.06704711914062],[-80.9272967534639,222.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.0,"y":25.499999999999986,"rotation":0.0,"id":323,"width":135.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Physical Network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":53}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#434343","strokeWidth":2,"dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"orthoMode":2}},"textStyles":{"global":{"face":"Arial","size":"13px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117032939,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/ipvlan-l3.png b/vendor/github.com/moby/moby/experimental/images/ipvlan-l3.png new file mode 100644 index 0000000000000000000000000000000000000000..3227a83ca1541ec68e06b0aa105e22fdf5ae9e6f GIT binary patch literal 18260 zcmaI6Wl$YmumyT>5AG1$-Q67ydXV7m8r+1pok>`#RfTA-*)bij0K-0C)TQ9yW|^N>XfP5?S3O{d6!$!B8y2koK&0EoKg z14FAa8wP53UX5(L=%bDVL&q0#^aP6T-8%nWjl|N@0|HYEm_x>-zYG8X+2Wz0n@ zzI=J7-uZdIPqutrz|x1Ai?3;VhTlje(*-0XDKBVe#9*5bMa}EV0b?Ns0MG@0Ui2`_ zyA_7pwKNt+WmVFd);j7*KV7ZMKpvgIJVpRB zk%B`-b-EM-07kw-z$Bs2hOsF>sATd?h@Xx{3=r`1M{l@jXOaH_z4MhL$3kWlz_!oll8RWh!Q%ROqdsGnb z(TIUiKA{RJOZ(q_NwQH7TH06B6ls}!%=IAY1^)X7pr@=O1Q$j*e|F|%#TA_w-Y;kC z)_`zJ#`9NCCW{OdMugwWB?QLTkvyi0WxiQ_#T0~ZQXY15j6v3s1PYv2ei$9RP7duU zqHT!9#~0bi?!lGe=?X6%oIH~_-Z`^`G@SyH#Q}0r7>^JBu!Keq-(wCBenOz|2O&fP z%g(=xOm2N&(JGPhAZ&+bB(oa!l?)rZAHYn!%X6=Q?12*Z=-5I<(n;+L6OzT~%aEvO zgv8_ogMJaz{p`T;w{DzN-7~1pVqIlvWlRHg>_wsK z+@e+a#w-GRB!9D%P4X!uI$n1QEJo%7!P9sat=Yi+8?`ar93aXXi7MWN=hA*yn@Ikk z4&`q8r987}hlINhTR;^mdxMx-+>@>TP$>)3LgwW~Dq;mQ#%#p8{3(LFu9AB9%YwlX zX^Jt4C~j&qDj?O8cx?i0hwDmDi>9@%F0a|XWrlKxLu-fY0YY#dF;{(Xz#B^()XVp~ zA+#qOWF3Pblb9U2Xb05_fp~})4vZ# zE$5YiKsXBY*+b>07f3O)+SciM@ArFu`gqLyfO499UTM3S_#0RA)ci8kaC()t{CdH{ zUsheO{k`w`&G72YZ?(N1(zlZBufdwTu4o5M#we0nv6WmAo=8Wu(MTcf4Y?&QG)0oU znn9uhM0uu(8;#a5mXez3LPqk*Z#>J&vJ<;CvtdznTbY7$yAnJkFbEtY;(+bo<^Jk( zA>&D(D;*-QXyd%8x~=Fqj7_gcZx1elO`pe&sgB2$F=5#aF7~q%`{`%x&~7&!Hn|YE;!;Q?f*HvOpP-Tm=35u|Ah?>^rxk9yuVL1k zkP^ZRJesL>xg0b!OlMvo;5~)DutD19qHL%dXWPo&|3$Fk!83qR$Kc!-p}nmF?BkD*xxZZfVH1jrnX&-rzd4 zB9>wgQ1hSXV;Ch*6{l5Ws3&5^Z!QG)EfD;%2R=$vYzyZBIc4%=<%&9Uj*)QAJ~z^( zFcF@R^!oic!@aLO^ zn?UzwRe>!I+wm%NXK&$s=!@y=yA~n?K4GTE9nLwYxBrB5XVLJwnD6h{Z+ixERd5+Q0=_af_Z{pPmwPMNNVo= z^rA8x<0ts<5;8ZH)PoX%ne?R%?CbFx1>|Ft)?W5U-c9Y~dz5En8}m-QISYB@`4mY4 zW9t(O60{?YD}w}=snv7$F%1kM6P(L@8S?sxGEPWj|6uQpuv}y$PXwxizk|Yb$fH2j)Be<57<|=?) z7=toefVrv94SujOJ#Sg}yqaIWaY&Y??*6k$VyYbK8$AKc3jb%HTNw$Jb39d2mjxM@ z=d=|(v4Gd_E2cLLyx|}PT6r4s!8}pN-omkWg$xi>=)Dcr#<%ZPVn6f(SlFy~AHLY}=vShNZvs(>Pq`tkJX`n-95d-;rIln`$j| z`F``ctP$Lp%wj`vVKkAJffxJ%mR%E**F(o4QG1_%i2C!cB_M0?~vd%-A{K#TYrfuY6jxF_V7z1G^W^5 zTkPJ+SUdJ+k0#}#=jb4>-k?DVlkLplLG^rRCC{vgKNjI2DZP{(l3auvN}-9e*HX3E zY|ZkKA5d6X?|Deh9z~GQ@vQf=zMECF6tFx~Cf96uRL2Dyr0O3vRLGz;L)IUN?B zsq%We9EPv;iZn;9Zw!hiW zLd0HaeQ06T&$dp7q5FmKY2{BO06lC!TZ_Ri!ge{a&lmz7vGuv+$C zbi2UsYwN#=!F=TImu$daixwP`59?9biqDER2Aw-gPW8$CFsdmt!!B>PT5w#|C)r}_ z2;_M;_b&-|Y=^zpH(i*;wxxzVY?Or(o&kELz~rcAsz}r4i~dI~HKcqu_#{jl=wi@s6Il?Ufv3kf$F~w?T zXS0y$z?7NWvchoB$7{tI#Ro0uTOhEjvHzIfn}xQ_e1hz7neMVY0$xKoF~@7A9Z6Uv zjoS!uqN>pUm?DoL+82J7uGh=@Y{gT@)8l)~nTHM}BDEce8f#*F&?`G>1XN}0*gkzG zYab%K;%OL+gr5|jq|5GB`|;AA>;DplY!-@Wne1udl)5|VjAOAF3mu&HBp4$r3eWWu zGbk%;Co*CYpfqG1SCJ$%SuWA1I_p-mXg+p95oO_zl%Vn>hFagYp4ls4X0;sVKXL|A z{9GC$LuGOv_`^xGS2dPKM`&)S#B4DZIYEPdjfYgGbCPpwzemhxK>YV6e{jy>o1q zL2V0HFsggj|2)H%xFm=oCa1Iz;*$uC=POKh$r;s;^A6XJ@FF^4mVHxM=l$BmB7biWn!)HR#8Sen(!Q7)C~Ladc&k82OJh>_J?`mN<))eYFWR$j6;7-lu|#i}e*ftl`m>F$2P~R$ zHbl2-&`;>f%_-Duu(o5oF#7o8cgir@y3)Tbnrqf)bYz%&RCY36ECGEAl%NaqW=Z(< znTQ{!!A7)0jX3!%h`PeC$J1#=EG>{YX zf7soO!SffH(yI~zflPj>oQCo@t-lf&W_Z92tQ&ZvF0L$8@B-K(#Ts_P^WjPyv0c~- z^`vN@8u~lK9P`oGLASHezt;*GG*p)HXp+nMY@%4K|HK%qF7io#l|7}S_i&*S7nhUsDyo3PDMlyrL`O?WgU7i!5kUL%_&l&#LbkI zec7`)NhjISeE>!LAyZl@GiVcMQl9)C{q3fOo4?KQB5WK?^6s`oooI@N)o8jP^LKL3hoE+ zf2>c?7s=!J#GA#Bh7D}hhj=I)B!AA^aDSP$wKz9iHAqj~_0J=vviS8#3ET+ThUMYT zu+U#iGfvpVV2rZ+^kxq3EvtV|=*j&3jf|iG5|z7@YE1rCg~eLD;>eYV0^CU4A3-FcXaU4u!CqR>5Hl zd6qua$)>X~Mo4r8?m$#l&>lrw+J|9rK_+EaV|k5i+CJw&@{6QIb9UcoXT-@`LqPX= z@2LgNU;N_jmXq4$R9!QMHq>S?1x%Y5foxeE*GN+gB0nym~A+YzVc9 zU;Eua>iH+B215L3o(BG*_|J$>^?I<$Ih^ZSZyAk`Z(hG^$j>pG>YAN9X-AWrU2TtR zy?oms@-**ya<3-)vVM0v^AHg&4w5HV!?=1k$Qyxe-u6<-NFj9hq`v|6kt#j{|E{9_ z5mekudG@#+N6?+Y0vt8yEfdyhoD&9CN=ba8bfU>rH|CGFbFw$K|t&85e9*)7!M`NgD<&I4y=aaT$oI`wC|^b5mEAEkDqraetCI@ z-+g*2Xo<5lR;PBwMnSn1%pzOnWL-(ZL&Iw(wL0CAAk_YhgLL`<(V!oE+=6=Cq$Q8} zEmZ3n?!2)Zfrt;EIlv{tLhb<7ooMUl(oJEccYX#@?z4vb9Fl(L`x=o-4NV0TQlv)f z_fz9`q{Lo`NhyWhg{gMIl)3%f&fl6e49qG)NS)IJFh~T3otpcK(}T#uIuQPS51t-! z3wF8b-KGC1rEQC79hVisWmDd*0kueu3I~dvL7>}j>@O;qcE^#3$%N4j3VJzuZ$n;s z%ZjphD|29+=ZGDyJL3a?L$ek3D}8+cDS|6VooLE%{DIgSYfX3A?K=ZFltvHJTz~oD zG`L-6cY|6tTYckE3K)(x;F%tbyne&%i3pus>yR1YWX^;>1bc-AB_ESPd00(M2c#!h zQ?E1L?8xmUSjDM5IPe2HM<726F*gMwG8Cpx(*808RW#Hdz0fRYWP<)+7;AL_d=b@d zR>CkiFt3y5YSHER+rM5oUi+zEBt$$EFf_3^V&p5e{{1kZ^1)paM(iYxRPCknIPGuq zMVh#Ym|YQu<8SVpX-77=!cuiq7Q&L(v~EhF{tGM%Au_474&ZX`qAZqOO%j#sI#6crI0s6gy-PogU(m^H|vAak_JK;TY^q>7aM_&(BRmQCRV)If8U`d z(#A2~)7(WLxgH8sWhf&*$6UF%8e1J?M4emU)3>jx+4)9mDI(yRdj8!bzoeR^@}&&W z8{XG7N90j6?hg{g$l=u2#ZJW6C8)4)hT4P<7vM62l55*lO4Xwf+gJql&_Zow2(yrW zWXZ@x%QKs>ISd}n{-$$%X!Jktd{Zh)2hlLlbZgOH?E zBTgBxF*YAQ6uK;4Z)c8hb-)H%|2@s$Bde$U`^#=GHBGz zCI0QztR>93kKu{T-Xq#iY*_%N3aL7i5p0W`x3?zBe7W@e>AZ0m@{ftO<%^T3<3sxZ zH}`TDzmjQywsB6-R0+MQf~477*(IqvA@DkQ4)|zsR|RG4lCU62or*MyO^jX->0X$1T2hUkIC#y*XSfYWY%=xE zjuW;-ifb}QEQVGzFmGhmF`TVaiKE%G)T&KHVj%ohxT-9Bt0&u^O3Ej*luP(^yms7S zOlW>GUo&d)6`7&g(NG7lSN;LQ>-1?J9heLiODulF<0$vH2Ko!`w9$nZ4=$#O|eAS^G53K{9f1SzpXVlAG{i-8;oMhzcstd1R;i$4q{(g5JYjl;B-E>j5?8o_3$B@{1 z9Ghdc!YH9HjFl@-p35w5K-!5-2onQ;j8y_zS@m$uJA?m??<0ZPQkp~gBl^A5TDG?j z>`m11Dl_rq8sJN^*ik~>_F590nK<58GJl8t6r#GxLD3Kdbgo!5<6A>rU08s!-5mi+rT-1Lfs+3qYIZz-) zcm|_FsVog%T{Z9rP?%EyZ;qYAx9mh~XwX8t`)^^#NczBLsoWkv+A@FW)r~QmOz4k0 zqM-`k>IFKf*~FtfJ;Ql0!<3hRd7HzRMp?qd)03tDRo<+@63DwPP$yWrD}@S$~xxJ1zN+vzE@*%OxZUWp`mZKMoZiq^yniF^$sROaAF ztYBOQ5|TB~pOHCO$@^%13XwMcMp>b&PRk%T_JK;tLaJX7o0i>|ywKDkMc*;ZM<*H1 zmVBrLEaRm?0N;gc37I}}%kW6f;#;<{_DAj0I_bHHJ0#=&$2mVEPn#^?q!Q9ckfgX9 zP%3_Ur~@ifD|Mi#To~j{kJSLz*YfnV$c}jA#Ds2TB?CfxduQiO?diuVsJbd6F_5O{ zDVbIEmL2cxqCxdAp_T89V^8_gWXgj6X;(j%%sQVdXDd?&rdbwHK!s?wP{P+3vB=1w zo)2c)9&56^${ZZAUh?YU;Mm5&lAI)zv+MfCz%>VL5WGl><^OgrzsQX45!U>7m4B^U zr;mCfa+_~EI%T@<@@IN{`6B!zT%%-mJ{<6qlSt|Q=U-kmpvrVjv-N5ZKy8YhoSH4$ zZZo+v&13fqqkV06z#%gZXRMnIWcgy^!92-1k!~z2Zm6HOT1^AhO~CP;WQ~^xiB94Ur#J9jM?w@ilch`WKCRsASC>?DCfOaa`ep&u zN&XkzuJDr#I~N-Qe77>Ob?0Qx9avkA#8@Jsw4X~&kr3Jd2+civvp;#?43FNl zS9uG$1$mH#!-K`EWU{f&`wc*dDzcrjB=ktkE^|dimZ$K`6UB50%H80{eo0tsqLmEN zVAhq+mblRz+x520I3^1vI(e)c40TY9{Y!ODmq}-~m+~^{C@~j3`yP`XT}Lv*LrGMR z=FOzdYrIH;P^(>kh7>Ks>3+XGUreeR?9=*R1cn=Kg5t(3cqwX4F0mG998NYSJYNK~ zn!zPCoRaecBNzCtRzy8K-^sZR}uw}W|XmlCUlpf=V}11|M3J|&SfE` z*&G!j*JH{+vfw2?oAdT9(|YaOUYERd`gXohy>oi}$?I4Twz9=K@=OENOvUu+jtfs7rmT}Pfc`gbFw0>?M_G$M0*YhDffOcykB zSUeyro(Z$65FVG3E@<;H@a!vt7qH2B6hVoJ__2H<=3lf?<;0P2Q*ExWQ=C%dZ$FmO z@~j^uP4GIH>?F~Uz0p1|7v5S)rSqIg4&-itkqf`#ZdGNsZ~rk*p$MfSgoo{Mejn_19oQuxL+X}`WXEcb*=12X`_`VjL9XMyel0D{7 zU2&clVO>RzUkHoSrL;#GAnAdNu~H*ZU%~H^n&XFZpJLmpos&nv5$Ir?`ak;-z@v=W z11=VBE>@YanO6!+`;2-t;7omez=@^Rx&iLv8u(W4zd;>=rqZ)O;q}7tY#RowJqfzsqj+eb#%PvYZE* z@vCHaR4TT%LW)K${^f^GA{f6|vdnLIFD6Q@{;SIb{2JW{?nTNM_9~O^jW8ezEo~v- zq$ORU$H?>Gswm;)uhT*wJFL0U5brTnl4;poes2RxUQ*};>2o+swK-xXFGWH7|Yvj5j+X2zTGhw!g@4>Jt#ouOz@UE zZA;qQ2x6`(ZJ{R_=RhB9!5SE2|TG=Nssc!4(zW++6latl&==sNAlf znZp)cO|xFX&E++jiwoWu$E-B!@v_Pc^!dLSr_a3mvSzs0>t+_cDuXP^349L&*oG+_ zJx1Onro!7kZz7MdE4pcFqtjs+?VtDvB}*y<;Wxstw>x%}K7Y(yR5rRB&@;SN1eOy` z+o8s4q6>d&4ocSLT$QtVXjXrSBYQyT*C}5g-k&x{R}g)m0u)?-)POVAN?Pj*F-w;N zAGttpdZni+MLW(hq>_C4CvWRXTQ{B4IQZ};t zfCj7(Xt~%y8XVYU)JZDS?#t7JK=@^t_oIjpOq7f@w z;`p`C26J;3jE#E&7UUdR$$7tB83jcabI@LoQb~`h2GVOxeloh*Uh70KJZOXIt#Lsd zI}m>ETK!y7A{nYF)bd7VjkTqz=nlPo${!xY;d%~37Vem@V5w|a(c-lkrfA7IudAY( zo*8q*XzqbdOG67Z-QdmrO+FgYE+6(~5IRj9&IpNF;%Vo8f@JzD^SxT`%g-Fs`gkTh zutYedybNXt1dEgw01vV(#leHm=fo;8&#vHoz$YtPHo;Uf%p_*V5Z9dyg$JREiCNB5 zA zEO^oEVHdHWj%rt9em85-_mr>9sYRDkP5+LAM=L>>TlOCy!8dviR3$H@IkA%SqrL#4 z8vDx+B?7`b^L4S?8>SCWA6Gx3&(Fw~*VjrS@w?Z_*P354tSs&07D(zM z8aU4Z5@MaWMHDC_iNzTO4t#|f zl=2Iiop^~f*_>Buw0WVxpstXzI*OxmW(1Z436J9twion_%fJg(1_d5q*Yc#3d{nld z-I9UUN0XOkID$A?sM%`HC3X@!Sw`y8w&nA;ak)zInSarJ&Vje8IBPZYs>AHd2f`B= zzgX`{Yd@gzyU-?Nau}u8%*BLSlm}EDH)0J!7l1p4dcINO*vb@s`eqsBqg?7DNEt7> z-|kGbxW~A6t1fU%xyD^5y;|cHmcJ%ip?&$)kYP}u0JYJ{KV(A`DJ{pE3yt4 zM4ilIClq;rW6|wQE#coE#<94s;WMDS2+A^Ie~|u6gfpy#8Q{Vui5|Gf;a5yW;(sb@ zwJ`7nYOpMf6&8Hp5E6euxzALT1kdd?LTJ7IWO2o1ehJ{q>;*00~! z3yo>y5#01Qe-M!v&xyCJZ9(CfyG=xyPtJ`|YShOw){!epHxtNc$4`?y@Urm_H>1`z z^z2KF;odTo(a=dPUlNK}8CKQ*#viT#BTUVyDD4ETA%}MNa(NY9qcCRg2CW;%FzU9b z`Qf}OTbjCfLZN+%u^tu{=G3;{sQ86gE@*LW9yz1n91stI+B7??%F7(KXjah_0%M70w0uw~z4S2K#R-)Ts+r|rV(f=UjY4RAu;`m}7^e3b&LuKoUR{77?`+abD z(a%KcmnY=K_NEL{o^^kw&M~4H--%=mLBL8$yGMPB^4VqzN2Z|$y*ll<(b=jN;Ru9M zmT1ZMGzg!^QcR_#J85JW}m%S{w4tpau3Bh+2cu1 zyLR~C>qvZd-@Jw`!eB+Z!73nlPyb?=J~3Y&1TDbY=@ndKmcoIgmLr?LjVeuJGP>qp zs`eGyyeE0mw<*W*jMK4zfQ(S3AU1)>G>D&MB!ZN&-1BGhLmA~6uRkr)AdrRPXLs1- zb8xym-7@HLbi!xlaHq6#B~DfhD$Cax~r!r@4H&H%%O0YP-No3YXxj;+naW{ zJmtSD9W(Q7GuA6dspe?Pn*J#0wR0Jh7Fj=v_sW1%MOypm+r?<;jdLDqFs({EGc!~! z9N5)K>F)8ez&r)?JrSA#@&a2l^f(pyK$rGuny#z%6zK9)ewBy_?DBrd(?)hZNt%J= zz^^4WD)MkHtAoc~V;BT2i7sg9zOCD6Ez+gvUZTPklb56&U6qU*L(2-}ulpIZ!XS^rYQ83bmQzLzP!)w)4f*^(&}n7_KG z03c8?QdQc;z!FqMf2)MxO_M4in9ZT-L(0j;QeeL})>bzB38xLHt%at5)dlaG7e+s( z>AXiz&-FY7f0c19!qsLZwaC(>iL)VU#OtIAJNmRAO#SSwpM>!E5l>=t6>ezI*ML}s zZGOF;(bK&!1R%=ZOpd@P7(jOLI{NDtx`ENh<`35g$@+)bZq_-uOAoyhN`5}WikFcT zhnc0;p#I2~1wrr913&d&GE}WVc8(Sw>wqUSu4gG0|C(Yn;@*TI;9F*{pW!DvA%#|E z8Cz9z+zhlfA_}_0@7SnCg6xF!+qB3n$ypPypaL;e|{zyvFx*a$Kb>1g5n-WDa`yB;gL<7 z6StQ}cPUw*DSsrE=m*qW(7p)iGRJU+(GMRLN!Nwj_g)Zh?JV$VJ}p03ACgN*oj7J5 zBYojC@=|y}3_cvQoErfwactWc6*g^*giL3!&b)#Q!XhyCVA4^qeSq$t4I)^sKJ1;1 zC4N@1Xd?e*ME_1d`*#l+b`wtCo&gJ(q8M^^E|P=MH4n84Fb*u5ed}b6=0U(;EDbp) z^P`+cR+Zf{k*0uiHnPgHni7>G$q=^^rXT+~H#JT{Q~qC{TlwCzWuQgW-8S4M*NTTEr zaT`Y=a!jhHTVn1OGkuXyR_IUWRF(nnuiNskz$zj!Bs7!M(?6Q0`a}>Y!4N}@9hg@Q zWPxXd#T}1_x1s4b?k*Y`mTD=H)T3V;MdCg1a;MACU;odjaHt{IfJK<$H}$H|D8k}3 zA-`O%5$BH+y@R*kl1ZLwvdS17mdTd)IfZ0;dIg{m7p*hu4bPiL?@a;_>aSnxDn?pr z5|IVfBMZz(Pn_o!Z10Wgm_8SLU6S~ww0m>ET0TG}Di|*Es|LI)lR^JPh%+88%5lX? zCL3k%mf}<#N1R!bUGCpgo}M;Vi@yjb6qH%dLW712_cHCbSK19Cu>eI+erErHh-O-H zG^XT_)Pj|m=zE=gEsZy>Kp6#==aSJ3aFyH%jm-~nl@tkt*#ljYXjmw=k@SR04BgTU zy&ZggKYC^cIr-hxZ$_A|^OyBi>GAd2ThZXS$6yAwshKJEcH~NWWYl$}*RZEF%p!4O z6k|tsWrOX#mFZn4DbHooDZkrb00sQ%{f~wox^)j(PRYv0yB&kX3Fk0)l2z=?#x*m~FXEGfqXoN?@|qa^2Ng$%=Rhy1@K2>B#_6Sf~uG zv?Wpzy38r~U4*S;-jD#v$nbg~)Ds z-V1jS38MSa$3U!PWOv>AG!KB-3!SVD2~@J&Hm$GJ606ezHEt5R!^dP*Sue2R5LlI2 z1-LyCs3tiKBP~1~b7XB;$r2_FqN=cl0N%;V9|aaY?B|!eYOM0-ZbpIf8n<-@TIxp9 z^7c-33NeU<43x4ZFdk1!;pIA|A}N-~J>BmGIIg$RP_gtk#`@VJN5h?>C0I{^b3N~1 zL}T7Y_*AY`dvMB{*^gQSaJ{(MhhL$y<$292A*UFozBZM3~(;sZPVTPX` z;ztmP1_t8Lp6k=iJq^y+;yra(H3?$I`wt!P&D$Rqaw9m~zrI7yN>tnfofCm)94zL< z{8-Whkl46_vzx--yXbF#KUFaL+AsF%slVT0W>T-qm6{TrH%TU1R#Dz#&sIqI^kc9q zw;ri=0GpQBmuLj!F)rno96#NBMH7K1mIV&TBdM=`PwA8(&&;y*v!;cQ2^g}>|7IK& z57obBP%K%COCx4G=d{oLt^#C||MTFtmskAJG=zLftXu-gq5R2!)-dyNzsq;AhpJ*K ztAu{Ph1kAg!;d3$9)mK%N>LoaJ6GW~tMF$LmgWBjvKBQoRUp>PGL|Yg#ahkBU^EbI zCN=B-|3LY1ADW^>m0j6H>nhiv@Dh%uM4VmO^uIyUY7_l5?Qaz;Qx~5a&@|NyX?l; zPWS{aMqby)>DCcpg25VM8%gPd{ z`i70!YNM^ddIOqTtT0OkHEjp@<;h0ZO4uM;keT|rp-APLsivz)9fl6|zTY{|<3`r2 zp2t(8iifGA!(nH(*QDE|&z9Gt;4Rep%U|4mZ@RU#qedH3g)#NR|G8wRR*hT!yYb=O zXDa#S{z1Os6_P4%Hl<$e#142Zt zoR|#2%+LVsMg}7bni>hQ{pP8=FrVA45RMiq8$yvnSL*tL1$I(QfS%M(yfP@iS1AWP z2lIKKPNAM=3+>*#(-?K6gtfuQnxk1THy7?{9n%Sd!kIXmuYZMOhTWt37B?yU#B7r? z=G^ACYZRx!PSFSS6NzhE# z@p9cd3|MUYhbKhOR>}1xDRPICg~677VMF>KM{yOwwG8jd7oE(Rq;LFL?168HD!j6u z)d2J5o;$D6xnA0_@ZFBP_J%DqW3f8XN|uy@{jn%Mm#`VpCJ@E4D1=!QIe1d?Yy(Xc z^!Zww#2ur07i<<=5|fSP_Bw?qoU{lGBg;fMyhdql2A=R4Z6;84N?>%8R+{`C(sL!izbn-3vm$vn0B+f&fsN`3e9f{dyAlRtYQ)7R&;f zo*r*1v&TM6$SXV;|8_Its9kt?6T)TK)NxUS7*1tdO>{+`#NYijV*`DvbsCLSv@;ES zH;(!Dz+Rc`LkCBnZ4O6IIJ}Ij(-iK0oG7sxy3yt99q~GzzL(svlWZs2^>>VrPdQPH zpw8bvR!qC&;aIhf*u`*>;K?rINRW6-(G<<#iDN{W6j1z+D--bGVOxfI!Jd?*GG@HGRC=q%kZm^;YZa#SvWfyxZylM?9sygkg0xe-)JN$i~3j4=>q5^3lW_ zeKCy?#k?>FCGd`ycvAk$(@B1A?dg~tl1$5R1!P?();c;9SYAsr{G;+cQTl-icPs#uf4>R)!KaO&u-5@<9+Qyv2 zgSmcVgp&RhF-Lx9Fr;BoSG%pcxS#E*M)5{$Z{6oiIA)0^`2@ahw9T$?)zep6<8@sN znFvBk8>g@0XERfv;dVO*95Bl7SXK=9|5Ku%R?vlcUbP+!ABR6X`x9tPsM28o9NPb&|y<*Vh|BxTP&W`MwosXwnC$;YCxBRwUUIQMF@s0~s zhMr9cE`1N-j~Lsn*x)W&h%1}OK~Y+-(!IdT4LCYMTgSTKNuLwG#SnV9B^0$o^I-&d zEB_4%Tdej+$XUsazv3nMegSFWg}`zr!-Z`38hLMM_dx=fr$EHXl+Cl~p&_DwluAB{H~tZkx<-hN;&4CCl5*aJQc2qi7WNiE_EbCV{|Mw%gm zU7(B4SKd`!FkvTDi?}!5)_zC(qda#I8p9I+@`^e~n(dK-F%=;)_Ij+JIAnVYBo#AE zNAEvV=szD{AkJY6Lx)#1qa`-2sL7jZM=%W?9RDQ>&TH~rjkqF_X4+h>?2S;6q0mfD z^sL&_!jNx%GMnaAQ=w%3-sXl8aH5+3HF9b5pfq z`C~s?RAg5YSprv)0CvY*>~1o@y6xaU!E9r=hAU!;bt7L&#uJloz7o;bTLQ@hOT*7iQmj=<}S* z8JH^4bM=LI55#zrwlHaIVwK?_g})Rp;Gqjc}(SeJz3?Xs4&({VfvzRl&J?ISrJ82m~9yD z>~4NbSo$qEyF5}p|!AiOZ%uKV{J9q|X2bSTwfkBzp zq@9pkm!7bCcfIRk{sSE0^%U1zxsv*Ssptc7=_tObJsm;QyfBLU=(xs`m9BukN|8_5 zDia)iupL;TlV(JjUiPiS7yqG^P9VU>Z)u3~nGf zJfmNSh;oc(4-Dc>)wXKuuyG=r%n$~OT>B|)nB1$>&&LFm%^LaVf6XBk55vOp;n-8V z>~ICg?cJg8EbAoN-fZZs=`1NYH3}ehhPQ!Ln*2A@%HG&rjQc`)(xNoq7iQoiaZ+Si zGzvHwmCjc<&A1_3Yk-f;*XT~KcR)C3pG75LYWD6m0WWRX51~AhcsY-c zMZP`DQqzpvRmFOpWF~^ljzWxa*@(`&NY$)-{hv)RI(nzPk-Mo451`P@ZA#P|fP&@t z)DqqQR_11mtPm=ESM~X`jTKE3+#*^Gp#Y^+09xTH7FK_C;2m19OdMvPe^Yl;wp5*t!ch-YkD&-ho1 zoI$$7#aIZ&^zJ6!%H*Mli>CcaOPqULQHg^k@gW?ykRae@*v7Pgex5B&-!puyro;E5 zY&Jon-MYCOT?u`(BMS?~j|v=NWq~uFdkSaO%D=@dMP3g^v}IxWy7g)Ic^Y6xu13EJ zQe!HY>%j;7gWQ(FocLV?%oPxptEmq0?EvU$c^&wU+C?xIm$A~h7kXzIycIepNR_u_ zgWyon(Q5ODq@muYEd-&I^w)hdKjn+uAzpj8DyF@x(J~4M9iIpkp0n6rl&Y8Yx*8SSX#4b&t6MtY-+vi9=BG05!BwEc7Z~- zwyE}!zIs(w>9rhNTYsThyfMG|@#yNaw|o)$E^=ru~97`Wc|aHmuv!iaz7SWsgge3Egn&R07)Tuf1jRb-a#<{^6)!Iko7oP}F* z5qbk6nqWmGVs}aGMvZ0X*-MRl*X$|8@IltFJ|Pi=u_WzBTutdTTz@j6Px<> zRukMQ8@8y;1k?ixEt2%P0fzVT^6{HB+G;v)8DLlftHq^IZ5iWC8_#_|>+)Xd>fMlc z;(Cqf=lgW3+Cz`tctK_Z>NE{H>Jp|pMqk|xi@gdByYsc&=n}0Z+(%R2(W`~Lt3#2* zdO)S$L#m*I&Vt&*3g+r9qUoeTgFTe{V@Mw!^`IMhn)SsvQR~5k?4`*B+$kH@C`@oT z^?*W)WPNUc;oVkIw{?*Th9yufNTJ#;w8;{Et#hP)hb|j48~$`KLQ{uLKA1PNdX#k< z(+KCy2%UF{`eT9G$Q8o_{c?l4O<%FsCHO0FZD;3$pxX@V^{hd(@nW=ppxbSrX-xTP zf;x2L;6X(&575D<-fqX{Yio&&E(^7;I6r*q*2vQ=0a1JgYZq|GDH~b@6C7|2D6~lU z+yKKnr(q)l%wK&q;8Lj836t@i+I{)(hYKStSUg|)ZJ|`qi&y<|X*}8{jd!DS15KBh zB^$@01$&gwg+zmiwP3$wtvXkvvkQ}nV$easpaxc=p}#dNmRc{}{P3w;qjt?w3{is9 z47hpG_4^#1EeuBTuswLsYekDWh|x0=UyQqzEP?oci}16k0UxbHl3h z?pe+V!|JPnl*A{(m@!7^EVqp0py&{)@-kH#;Zb?Z(@DN2vu}y6{$!TAk(gyT^Og#B z`wr8ag5jhwiW#8?s6&y>o^3Kmu@@a=7StDkwX+j?01LIg5PA627^7Ep5=@%tiBpZNqnCfvqrQ)xzWwjG|=Qqn)&cF$z9^S{JzWk zygtOc>)O{ulgSEIT!PtXW3x-|6&0188+vC6-}sqN!(1=wP-M3dZT;KB^3g#8)WF)= zE*kxj=p)0_dYh7mPmPhMIW}#g^0gBeSEUQMQ#P~+CM2BtIR}Ln4WAoUop;YhMi^FK z4WuLrW2V5#Q9LPo<;s`*X(l$q#a?lxckao*-sZ`F`1gxXUL*vn|I-_`hu2&{L?`X- z;)?wF-w_kJIuuFtP2CKb;{Y8rKn<*o&lvkFyJPAOG06{~8Y5q`n1QJ19_!f*I$PXn z8(P$60_wLN6k0TVZdi5RErEhF!?60Q#idZKjM*dF-HnZnJ^Kk!-{DockerHostUnlessnotifiedaboutthecontainernetworks,thephysicalnetworkdoesnothavearoutetotheirsubnetsWhohas10.16.20.0/24?Whohas10.1.20.0/24?ContainerscanbeondifferentsubnetsandreacheachotherIpvlanL3ModeEth0192.168.50.10/24ParentinterfaceactsasaRouterAllcontainerscanpingeachotherarouterifwithouttheysharetheparentinterface (sameexampleeth0)Container(s)Eth010.1.20.x/24Container(s)Eth0172.16.20.x/24PhysicalNetwork \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.gliffy b/vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.gliffy new file mode 100644 index 0000000..41b0475 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":323,"height":292,"nodeIndex":211,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":323,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":87.0,"y":24.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 192.168.1.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":79.19999694824219,"rotation":0.0,"id":207,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

192.168.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":36.36247960848299,"rotation":0.0,"id":121,"width":150.0,"height":30.183360522022674,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

192.168.1.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d ipvlan \\

    --subnet=192.168.1.0/24 \\

    --gateway=192.168.1.1 \\

    -o parent=eth0 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457584497063,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.png b/vendor/github.com/moby/moby/experimental/images/ipvlan_l2_simple.png new file mode 100644 index 0000000000000000000000000000000000000000..e489a446ddd255ce9360445f0f895acad31ae214 GIT binary patch literal 20145 zcmcF}Wm_BX)-MEt1b26e7k77e*Wy;ZSn&jRC|X>LyHhNz@S ze8i0cV6p4QB~YT4VUhO25)Fd;Y4*1#ZpvVXQxpL@g=es)ECJoB_ zyXF_<{ZZ+Qd~qMX23VTO0}Ci$EJ^VH|K+bA>SiF`)Q`u6!a+qr=091`>~XaCToMYq z%#PZ)GO53SJ}8O9N}r9aMcoYQQM|NynYTCxUG1+$oqx-fFj)QU^!q?7qhN(_vcsMC~IKT z)i*HpeMViZn9ole+tgKap0V#v_U+Fc?`5d5fT!ue0mC!l_V&Quf=75Qy*au+MC7l- zeCh8?un+IInI*_z|7wZv5M{;bNI>`**5&%Eq!%q41!ycdM7^OH0GJf z#?QYSMbkM*`&<<-gdC^9X2vA8C(zKOe-NE(1VgZ#JZJHk3m!Y(6RbSS8hww*G5t`t zSz6qUA{66ryN~oW1Yk&=qIz5u1wRcy@S@AmY|BSPjXc*BJL9{2X~VRI07TaD;6-ut zO0EEWC8%$xX;5lmLA}upP)&ZthyU*FAaF+2&rpG)o9xT_1osxTGeRG+9>^thtqc?{ zB|`lf3d;R$sPp%6>)a0*^ngtwg*NIi3KA{J4B||mL4-1puMng1JxKL(fVHc34DX*n z$GN^|;Y&9sgHA+;==1OdA9bj879%e!v#`T}*Y}NN%|r=!#L|g;Fs26;L82C|Cajit zRyHwNo;V|V;drzYKZ6;1f-QgFA_$;vTvnDT1l_U}#f@x47rwW^oEo)$P5ogQUi1B(|g0`n?TBSX{|gNu|)%Tz)QCSI{btITmE5X zNJYEcoVGIdrxZV+3Q!)X3`=`{dEO=?rBf>?%E4V>a6Ko+8Q(Ehs-3|UsW2|LwY z+ud(#h@OLgnDwq)Eh9VIH_-+@=cg?)vhTqZfZ&O$WOrJU|rz+4cMZ+(EHf1j|RUrYkwMbbK@;q_D?O0qC2uIm({e-I z8Prm-->~Qq|HP9=?zdH`wQB(L%j2k&?KJRsCxEXwunnL_6PvKP~^+s z<&JOFVhBJhSqz^b7&WD#NSrR&MVv3O8~oHrJbjrXzJwAg*4_ET`TVUoJ^ikQjL{Xe z-=sBrf4LS2v3wi0yuHo6hj_#qMRXfTAOI ze{9uYtqWbo{thk+^c)|}@Po(6y8YSRB+y{)=V7TaS&R&4*F8cy_%1@N^=(e*U(-$? z3?!%Y1~uN?g)IV5b{jGs9sc|2dHu1YQm9%BCMlWo<$WExAKkf?I+RS&iUIm{3O#EK zEu`=PP8HK%HGMLXD;bI+(KWnv)qXt%-ta;fv$DFD+$YNxdAN<|(kIJaV_WS+13kS|OHuZHDuqsV&rZti zUC4?bAHu=keS>z6VY;2p2m&XfvjA*E`;9KVeQxFmagN6l(U{T^Yo%hId!GuUa<5a( zKPX0X@MV%47lw9uad*W|r@m-`{|G#P9W26`izdRAr`AHA*v3j1r5OCgxr`LJ4MT|k zj4m`@Dz8*9m+owX19@VE>8dLS5daXSA%mqb#i2xcg3%&X(;4rn(=4juVbn(M`Ac!4 zW)IBabKS1NDvIV>*k8^Mm%!{8AWDL|=?UO6&BN0l?&sKhk3f!D>DvWlFBf*=xOF)* z2@cX7J^$vdnUS?XYe$2n7AXREUHE*A?#iK>f((Gr7@K)8Sa#|VW-K}qpd=1~C)knR z`5gjXOlRNJ|z{xF1@TC!z z3@>;Tnqyqf8cqhVntIkzbKfy$56 zU2=7{wMW01Y&g5H*3ZL$kUfu!2AeS&)CguhTh@1&?Bh!oabyfQL*4o2(_MPuQ{LV= zJPhY!>4H5oQ8E(!#5FgBH^4ubK>RqsF8^#&HvQwhRnft(#0Mv?9n|xKCgI1SFH602 z5uNdVl_7?Ly6rHeSt40E+YcE$OtSthv8%?maN&iQ9@Jo=T&gN!Tuh)STa6r4xSm(v zZwEiOaD42XeYh*Evi6&DpJfo4A7BL@0T{k;5#&_y&{jrgt$F@PXUQait4|7=NGqWr zZZb|OWde0>UmILF8dqDNsuvVWNPRXVCc0TaU;NbAh~}GnIeY5Pm@JOyWbvbu6|C7u z?lbg9IfZbu@3RA+h8ZhJ9mrw-S1aGe3|9e!it+SxhjfY>U-eqB(J*|H>+GfIU@R@oDe;$GFfr7B9f*qPUV;w0wJ!K2l4 zL+-mP_AThKY}3b--xlc+*k)zhw$WQgEU5uUKjyQ1Kd}8;K^)zRv3)kIxS;OP3b?tx zmQV6<2m^^#S5{VbbdZQOdVj0z=?Rka5sm=h@i_5k-)(<~3m55nG1GA}LcpxobsQ|> za=8E0Q%HQQ|2TNJTadKBAU#bP&g8J1SeO;zrDx*YJ)4dJgkt$qL?98@-V`fywjh{w zBwX&kEHmfuE3U7qa~8&n!Qh%Dw9+K)S+A)6o+yBMp@6rW_$A!C8>W#jL;TU+zPzjq zDNOQFuPizy=Hf_ENojn1{6dMDnK_}xcwu3|>HUU{)_VC3;;j8w%gGO1z?Y{$0l~&_ z#K1HFOsEf1jJpA7`&imyiA@azrS|;gHabV5SV#S}5guvgs~@}DeY>p~I%QtZvP@~z zi)m?8s8Ptx&5e*evkY{V;IT4Y0BDvKB3O|dP>Na#r>>iw`EKA9f-_l0%=^;e(eRZ) zyg?Ci*ApjlmrH&U^!c&w?r6>iQBg?awNMRPA-k6>6!2H^gFr}v6~yYXSp(yqS&SKLpp4OR~pXGlE_ zU`GQP#!S18EqK(-@A5PADxELAUL}%+Ly0!))v7)6QHks2X5SMl8<)Bszh0%V2Z z;BC0huqcb&C-Py48><`jO2tEQn5Ua}^p!TfrkKnOs_;ngt!<5jejKx-+QQapMFJ&1 zbNSZ2gRsD?gvy>Ql>J8@#)FoJed*f4y>@Q!s?b`DvIz*}Dkqsuv^8*I@9ZB2T= zAb$NWBEB-#D-oLX-%JMi{+Ddbu9&1GbyJlxx1 z$YD4LfWX=M*$k>D`@s>q};wwm3lwVnpv|ublV+h!=f&V0w?V~cLakqc_$8RwGp;IL=-RF=K z!NAqiqcbuLEf^&a{i{2vkeOq*Tsd*rP{OB)7@odm@@}%w+9)9I;;hmAplO7ENj{QW z;6DvW0!WT7QknJoaG~6=x;TPafUk#sm3y(?mwQpmWd+_C{^5nDJy*yzUjU%>2h)(~ z*Kh1S4EZoM90XwM3vL0|ou>rmGG?ET zgX|!bKwsE4S&$YCY^UATu&W*qP#s6+)a0KT9yMSfnmV}r-lwEFoDCP!Z$M-IYo~=U z8fGZ+!k>F{4*iN;KK`^LfH#Yw>cd~Zy$f7X`qwhvij1sXj7}3@wyyA|S`|&7K`b(& zxAVXZb;x}yc&3k7FG;=SFKn+b;GX#iHlR!Mb<(d(w-ltOf#99%8z26EXvGBb8t~te zygyMBv7J)+z54NBrDqA_kBxI6*606e^AGNCRoDNjuX_hk9FXNMe?+Fo!ue-Ph)X2E zh!^W$FZWnU@53-4&O08qkbfO{`_TCI@d20869L$?f+|MATFsrI{`cXjhzaymVS8D> zr9WQze(CSI$CrOrIYph}=KjS0&F}hc)Wd&F|4b;%_`M>l6DPhEn6E|+L zN@t|Jz;X@%ErOVseoV8I`e*Dn*5Q!p8 zJ}36X%CP>2Xpcsc(?w4UtS4ze{6Lp&_jA$X=p*BYb6AMOQU|(lEMwJj5W}%TiQWHT z)w6&M6iN6=UEIVDJhoVNQr4`|*Ono`Q|R9bo+#k=cDuuv<;9zyrvI#+tR3E0BVWvH z%dT_lGJg}S_qDA=@k76|Vxh1AlPrJ6k}s)%M!ohCM)Xxh$@ekabu{OR)!KRk!?|jt z8aRxF$41X5TPih=n1_GaY=6yYtgV{Uf&V+{4P*ZI+TU$9oO4#;nUeP3?Sxtmf3{{@ z&>^7e$7gI)CsI1ZXAGjWKuKk!@!D4h^kH{|9$q&HBt8hm4n3u<4s?ohe_=H6bc_aB zevAb4dM@=pylfrh(sNV)BeXtThE|Z)=32VDF7_BMbkHy=`fqU{W6ZM8`*BNW4!i_u zEk(LhK5d=t_F`;eH!$EVUjqLG4Cvm>)cKRw8CBuM!RG6)k>ks|p=<(zC;xdvu^_DW zFL{cSf!ffrn0taGV9##G73eaHR%qiSS!=UeO#fv0am}Et7aB{;bJ6UrA(8LLd(&{9 zK)aP7WLT`L#vLA7F>>d8D^XAW4`{)^YEOA+A;(%7$$dS zZQJwMI!iaVB6Xy>zC)JB<6`?HsA2Y~38QJ)3&WmTE#+anjCS2E;_+ImC^n_=1)h4ZpPV`CxkusY!ON})B=qUH=k+r!BXxp}C> zrkyKz1oukDrpyGd6%NBCxAI18pR-9}K5?$_P!6q`wGhf%0;7$cS2eK&6d-4e!Eu-A zk>n(XBmw7KLK$m~@|0Z5EVN^Hzv0Jhdcl2WQZ#HAuhuF)t(RQgpL}N_&+fb^^ceT??2-7<1H*5jSQ~#&KD4C5{$G= zOvfmmYmL*=Y>G$@dW&XRe%X(Tr?V(4`Qg12wS$S+vr-bXx7c*%JW?U`Jy=W)9*;w{7$2 zT;rd3&aqgp?X-P$=Hu2h=prcY9F27Z$kbDFP6{8=3Xgw6ZPwDYv?Wdp|3$FmOU|hc z7KdHdy7#w0^v-MFpaCDpf-4#S>Aq*ki#VP0-GQ74fxD-O6(_kQ?&m;UXrn(o zCUE&Xn;CJ*ik(a@iUuW8UHwf83FWmFLGDS{;@0r8HB$82C8)HuIFb3Hp*5;5Ik96T10b` zb2&BzJo;l%NOnCeh|qw5RW-jZLfMOLE5n~P)zv2@(LMaF94+WNVeJ-69Cu$Ev{;fh zsYjfCnowV@Ob`*1GM33*IJSTbjW6Pf04`SaNSqrmuIG4Qj%znm3R{))PhSWRQ_ZF4 zvll{b)YoSQg+jscrQn^()>581-UU_{sp&XlK+s=l*}3^(=7nqC6B}u)tmng`;*<+n z_-N~_&oYlR!J0pt>G7gloP0Z;;p4($=a=hAQkK7dh_zJ;EYX;jGTn#dOV`^;*Ad|o zlG4L_umIG2wCu;izsB%VF~;yLXM=qEBV%Pz`S z=g59bi!L;7w<5z}mclTkS%e4q@14v}eCr4WJK+i0akU^DT{@0)XP9(IxN<3T>7UNm zCav!keM_a&3b_KN>5;cu*@}Q*VCCpemqNjby1Ke0B_(~h`v(UCK`)(Bg#qv+rBIh! zmFEqphtk2#bDo~Y`r}zVRnNlpSdCIME|f!hm<~-+Jk@a}H#-|u%k1uSjWAY_wop9c zgbn}*FWp6kZ8kJC{O)m}`4!N>k`Nas@b)IX#`?{(7QI+rs~!@pry&1wNq15zuVllrgDT(P*7rGViL$jB*pruuz<_K z0PmisOiXnin)?MkQ=)JVKE8+zao#MM@pRgQ=es|zrKP7O&F*`utB)7M6bZ4hvGMU1 z(FOV}oI|8PyxwjBwPL@nqLr-N_2{~YF1F?To;5-*FMPZ^Q&Uz(MMceT_7<}(aCf^u zH+Plz<$29W`s20$=60K<*0H*2@)2gg)wILwWP3E(@Amj0(5)|a>tbtoRtqMI^oMRK zOgT;H#Zwpx0Xn(xV%JTR_u218+v(rk0V{8l%V*Vec%M+FG{cQB(|!JOOuvYTUU9eM%@NtStWi59a9L*_U#y(wTk= zVq(S8xvs7*LTCA;>xpSHQqoYj0k$`+Vaj#P4^BtE&{R(yp8e^eK*LA|nhn3*wK}hT zx6Y)q!Q@>;rx5k$=i-8s1*5O`aVaxwelRq(vjxou)@8&f3Us}LQ%+q?Hs^iGs4$aa zVB>L$+tF$m%J#zgFhvn?t>HZfU}U$iJrYbq&T)~}j~&>${OcbSB<4#r`52X|B>j%R znD-k@g32sVE93hq_#;OIMGZd{5$Q@5O>&HQ zXQFmLg(XiaKLu8j&pb1pW^Y^>oPnX>7>XNYI3T02dH0|NM?Sd-7WgsOxfo|BUP*a^ zBjOuV5ALp`!Ny5+`21R^*U7Ty@gfJe@JdUoAr#w};~Whx`IIiy8a>vR^vCOx(Bq$y z8m{=gS_9L$yY!RRCs9T*CoOt|PAw_iIks}Q+})fbPp%f`ZgY>^pZH7ng!fK- zx=Ycjz*gKXS8iBjCOwU*c#>3bDk1qFgH@_;KP9EZ35O;9(Pj%JXVL?GA(r9tVM0;R za5aZTmlt$vV!%hIB#j$3``A&QGfgwAPj;bS90*qxaa$h6{_rDg(4R>)v+QheLY_E+Ka_ z-6snacjq5DjabrSK6mQE+iU+BGsdcuSEa>H!iGIf&| zVpmg-+ohv(!SUjtG4T-JPa>kb;aXDTE?=FK*M56aZV48ALB@%Uex+TWH0*Niu~4oW zE9M!ll(ArCUrzjT4aU+Ke@$hKOVS9xSyollOVI&!)3Kp}@#rpo+={8+wHGCPvT7{f ziWU+YzG;?Z(hFv^GF$VL`>1wgMP}+kl^z$5IwQ9gd6lq4ElVh!3!=YW?uqWIR1P-e( zb)gatU4<;f>yivtJ8oxX$nXPGIl-Fnt)@fk_Qh*&>x?BCZmc6*e&L zvn(^Zw1BqSet$}g zt*LS4?z@3gcXqX;1IOalflV2z{VA@zV0q$huVbnVb3w-5V-+>SX7 zoSflMeMmetKwEl!)5L+0Uw9fFH*gq}`JeJlSUgB{;#lZV8^|0&-ME)EW5O55YC`yU zrT54aHU)3xU|Sx-BO>tpAk~+R|ZI!X-W= z^ms^J!H)EUq!h%=GYApE@S_IU>r7JN_&V#k?>iffLNkL3Qld5wx)zHgw}(@Hbm6M* zDt_}n**q&ANsPL^mDbEmOCf^9>OfZTN#QJkhD2pcVHQT{6y&#iQJ)~87{ozoMaFAV zDE$e*T#Vpt^WCi6TpJLG0->Nb(My1MopnZt3l@|4`X&sK`dK8OEm+Cp#2Uah`AMW- z{ltLZAOZo=dE5A!7b)fBj3N>(JhYbHH-_xEj zN6TjP7-^jmqS7?CGg%HVtNL^4P#VCnlWzn-^Lk=l$A6z=4j^45p4T~3+I`9+`Lv6&q~9~b_$*mQ(C}9|#gX4wiJwgPo*+dTayt5UI(TBJ$+Cs~|9X15|%tnViju(nQj?B<662bBTB=}y08IzTJz>OZ{ zC2({rJG8g2!VH6@1lRRL{kJaJyEa|%z5}q}ps~2gz`_rCF&+8P`=vpnj02dRf{-f* zAWVPv@u#MWFrGI$auVGqhsaj*Afsk&;mFCk6Nfp7bhmxgU7k?f&5-h(shpAJRBUPF z!n{VBk#T)rNc%mQ(xBrD7wNRNJNos{Prguh@{Q4h_1+$J<#t_hlkH{WBK+BnqYm&F ztEmeGwHO?Y~_7q$H%TR(T>v#Z!!=cDEr~*dAhej`#KPXbG+U7ce)1F0Wpk% z#MJL%Q--q<1%?f5iW6es)zSf}Wc1!wrul=8Lt7kgc+y$wF+9F*%lM?_pYL=&$l<@(Jjj(eKT3F9X?J*6ewX-py#=Jj=R zRoB3|4!1=*$7g3!bfmToCHH8EU|zR)vuYp-O^6A?sSp9)7cDfN(vaXFh(TAr=K>pG z{un#VO8zO%8-Y%j)7#Pg!%UYhIL=%3F)QhJ(Cw1#wz1)+86ED&F7?L0*Fa)iBbY#Q z0=mBEg7f!CVF*dp#GQ&%1IA*lR-(}sfmkjlC<2sCoiK8hmNQ|x;8Ee;{vbmNR!LF()|Q{2H#_Z2_Yr%IC!-mQeT5^&)h1kVxk$D14MjjG%5 zD|JkP)&V1EJ|Mu^csd)~N*cUel8v1quPe6$HF_Lg*%Sbm4(!10x@%zbM>?LKuxOPG za)`l>#Paa$?h;7oZOWg&(fO>i2tUhm)o;R&{!sb07Hc?m$@ikH|kfCWHU*tE$) z(Z1gw@{OZ>Ru@9Zu?3nHT*uP7qkbm|#ULDl)YJh&Q3;b>){z&-;?C6d^kMJm(ON?x zFB=^7+wl;15wcx2{hf36edl#1i#3G!WKuX}-{n&T*uGCz9@o%;WD+yrgmFGxHO$@O z+}}Z++mB!OP2kERCjPqZ>BABZxB;jrr<26pWzK7ZUR$fJ52_FOUhN&+QqGGrz)U|m z?AVyaV=+Ts$=HLWM0Zxh(JX<@Rf3-nGFnT6UZy4vrF@be8V=Q8GQ4dPbwp7Lkvth> zJm)5+G>giI&LJ^k^;6HsYFbHo^lYEcf2dV6(jGTbMyp3?YPafBW#b#Q!nW9*7Msi1 zd&Mt9L74E0uLO`yL8xZkwvhWSsulJVhA#Jm(^(Kzvgz@t=*8vzTrFvk^B+=$03WZj zRV^d>MBBTm2c}5s`2yU9Xh(=4T7frJ6VfLy$Ci+BtQdTU3 zd5l&taN}ZRG2w%1x92AITl1^?!BSp|fRA%u_!5E+EB|Lp%ni+E!0%0xi;}t+RGiD7 zzM`~SCP}>2?eKYF%NCiZ#2`R}{&q)wSIp{Asy>C$kZO}yMLs^^;#GzKbS7(k36!uf06Z#|e zx*tAU%1811d|yg?vlD08PR~m&Vb{?O`WG9`)JFNb>-`xHoJ$#1dh#AmT5pVvS0`e8 z>ITWWXd_kNEvsRCPku)X1vCz>><^yk41wTBFa|~A9uTMi zGU*kw3ArXIqqp_#5x){d+Yk1qFh0@VSj$}sC+k}c&tCnz(%Zyl+>H!4t&;l-v^QbC zuFL~^61eSn)YEj;n4T$AMMx<)t19B@yDtn=DE z%d$cD$+cFO%Ok5&N3t2i|DZg6WO(;4Z1R8zUarsw+Gl|DCZ^F!6Ku917=nRUrDaQC;((|5bk3#EuDg(C8dM8zi#N?!^DD*J zl8^t#L-=KQk)_=!qeF097**xENI4nB-Hrk&-+2EcV;|mD$>718qhD&d!sVi^%Z;`e zG->sKZb)9jCZ&4z41z82_sbDph9hBScaTc3ZkzSzw6`LH{Bs@7xCLDQO%m)8o1RsJ zb5OG+`3JFuX&5Ys8_5YA#4=lqhulntMDjTMpJWudYSN1Q&zLa5VGdCzO(VnNW zSaxrcLSi)I@azIDV_5_V5eU<Vxka##r?>5(^|}#>ROKt{QnCU z#DwD6gpTc4zz+kj!7D{9vADZx**ku@&1Dh^vau4`aBsh2fM1-u+iJnet#27HXt0=yjrZf_2bdD{3u0wb1 ztk`VS+>C=nKm@mGQN$~|q%BT9IpW2_ma-laOSD1cW5eAtMxF~NX`&b=y?39O5!V)yk*YUvpXP;-ot zp3XYt^|@o@^!JC(;w8$X7uU!_Z=qk~1gPZ_p6SY63< z-Z5cBjpCHm^%bGrd+%^>m%+Ey)TNbp+o(t3zs8x?f#xPncu37YNd~SIgFY{F$8W22 zn^@u9pNUw1c~;Dzdg*pQQ$76{{X5R@-U$9#HuGkriW#`A~*vshU#7Z zbX?V($VC4>nR04uLJ&LfcFwx^-=S1yC`I^&Pp_{2#NRj@w>_V9;u9fSNUga6heP+mrf=TB-R7JO=w;_ zzhdVV@Tl{l`|E@v3Cb6ExgFu)!Jyi|OIFapuhNF#6S5o(-b!p*-wyr3Tyio({~$l} zCz;NR(U(s$EppiT?MsvYrl-}?CGz-ZDaMC3b5zLNM5r=nx8v2iA;wK^*>I+KRKe() zWhSc+azN zyPPrUXx~AJsk34(T885e&2hgYseccc`*+-?2Fs#S0gLlUqdpWCf|boccW!7GQ1_U z1hr32NThsRGG|^}I9g3ZV`!FqLnZ&=MGDWXc*bqPGU>~RG@&tG{`9m5j5X}-$;xnR zmnNj+^|Q9sFA?5$3zKMy`n*BVzZ5X97##Ra_#bP$Y3}qPuP6F=Sq?)Vt-(CIC#KN5 z(x3Za=uloqyq%ZFXC%?{ssH{aK~3Y@`6R#R zc2?ovyuPvGP5Yu|rRO4gj#eo#R1@Hs^j3a%b`{^LD8YSpL5a1YQWfnB1zrI5^1aRgrX1`-0#0 z@?^eE6Xy1xNP27H6A|AoYiVf-R{@9*dAE)G7C(U(6IfqN9@gP6?&3Tx)8$M{_t#kc z_+1c1>W}{0E_+58F~{^6s>XFT*GEU1VgdYWvmGYU{#7ak&gr7u8~kCQpQaqsE#~pGnDU-zx%p2coe&^WxpC^{J8yg>iUcB z%7KQjsT?Bqdlr`&9xqW?QAb=F-~Fo>p?tUcAn#FZ%zHfHa75l%zM}?u=B`eQLBl``W8T#~K}l2*i);+X6tjk1od`E<Y=KV3#P-k7eqGT4vqwhBlo65 ztTaME%Y#Jh8FkdQJSCx#Hr{%JIXeBPR7JPYBK2Jl#-%6PY5G~ph}q1Wt?czkwS`X{ zyc6%}J&qC-5yAmc{s#k1o`37J4Xe|z&0j{^Wbsn}T(zBQ-+Ay@YOPaH42kKAZQ&~o zB;#$xe&4N1i)V@I25-gNut0?(nZyyoHsN*cK3%6vsLeQdDv^v`n3=ihh{qwe2X-y+ z|FzN;nV|12mLS?6gb7Nm;H%};AHZnUJYw6~Ak#~YsV!^wU)p9i(c;2-41MTa>HF@U zb|^cRS!3_pl~O{)sndA!Gs>b5d+xKweeuF;Mp*FzY9WX{^~bLnFzN&Eyo??MJ1CIW z)+_Lb4_dgOBTwXzsqGPs-Rn1hlBQSq#x(L?H}XiEh<^&NuH^7bVU_2$c%Lf=rdmp3Ika3ZNCpvIcWJ>I;p2pp4KJ*Y$bb5iMrlIp-YpUQ-XPY< zVsD;u#DWo`$DmhnSi+;|o5sRgY-d$gVu&8I6~imjCdd2~-T-1k59YaGA&iaHIi)8SWJp6&U?3zxQ1yobk<|N-nQ1@dcLG>1g#7Lk$$dV7a4riS zs>4Lzu_K-6tmmAPPXBEKlk0x^yd4W-isCyB$!GCO@vaYg+0aDT@fI)NvHW!XA7(0O zL!G1qYjm@LGk1*sHw$`UO3h00EEth@TUG(p5`PLNevikg7>4$TVq+^&&`#BYx^9O}}Dq{44aJB916HfW>L&MNk!s)l$m9J4V|-Yjd2Usct1c*rDSyF z8J#b$#-xF7lKcFs$@;E6d{7&ZWkLp|c*1^iFZew;q;1E8r39VGtvpq1!O;q59g-EM_5M5wJ&jy95n3e11Bjc+k%xM~pQm4C;D9XpYL z^zkQhvUO1=+S^a~@*MMXU(6^8QQ_}bWszEHHs2mlU?DB$hnTbZNvl_Y^{5tmf_kW7 z7>PD>P|{RpgiOT=f`4xi$L>O{N~$sp?L&)Phg>H%Xj+-b>4(QIgl*oh9FvxZmlx=I z?Uki9PNg&Gs%X*JS7fAzIUEVc6U@EnG7n{XI+SSSojGM>U>dA9yO$6a4*Qq)&8XB> z$9LT%ZlUkovhbG!Uf2Z$$|El>$JtN>f#bCMWUTop5^VCCKN_#;98u-Qzf(q>r4UR1 z)mHJyRTTa0HPtjfHZt6Qf|2~O>#xC)uqyq*lkW7O&$F*$B8HKnQsSnoiO(MJPDNmW zXXV$3TkYN3qkZ^lu&f#%#gN|Is@cV|tbrAx*TBWamFItnJT*?UKUf#?q0kh{_H=*W zMvb5p%Y}uYRt_-5e|rc5DAlW+XY=FgYW5nV z>+tU`KO&)l9xAHYuw(I(rp4zVZ}y6}XE1c$hJEF$M<2&Q_O+Oso6}t`2_5js+?WD; z2r)vtP-X%_iA*!^>!RCJD-O_vypBKbr0Nk$?P00TaXSz~|=(@*#;%gqky z)Kv8hX8FEZk=v#vYx1k_F;t1kd@(keVNT3UEMf2P#U zF6jI`E?gig`BmUU)wh~r5DN}_3Up*M3WuI>+1#B_7woHJL1}Jnk*es@^USNzTNT6h z=N*c+xTK_H1voS`R2vo<8Mz#Qjg74dq@_*6QQmr8>aZ{|@;n*4lisAb2Xuq!(0CZ_ z8L6>kG~}feh>PX5aj|@XUnFyL^RNvxjD?G7i1(r%X=}EP+FUn+7js!$&YIhfY64CL z+x_xRsL06Jpg37^hzPr8pZ)Bn0j;c*=7V>`l&+-kJR5&>hr>I(3VUj_csveGTCSez z+>Hf?&@i54Kzz=u6QP4Ri(O_@9zL!|>6zhQ!8OaJDk%W&G^rh0qe!RwFTGZb6|Z){|6HO0lno z_-RWgnH#>kQU5)Ji7Uw7c6qY3>+@5cQRFwJ2K0Dw!cyeL>YX(Nr5|aggseJoPnQuW zDDtcLf^<)D=Pg+$MNK0Xi^&eIAH{5QF@yBZM-9N{0YuD#P3x*TOs;~3nh6m6$*cK& z&9K`5=9>)tP3wb6L&t9E<%yNzpm3W|7i+tg0*{Z^^801*SFx1uj}8YFwj1h(xR|yq zON-+tRS6c7#x|2wb)VLnsd>Q@*h%W$Bn^|rEM_dJI@MY?Rurq&YY3?@QR>cztxq`qR!X0xEi4w z1ZH~?=0G?)ieKqxuZV*f+I}y}$6xV0Je30Pr)gnefX97=q&%OuV%=mp1MKOt=g&RO z2u>v>)?;mT%BC)(P;@fXhB)GgTsF(7&G@ieJ^uJETKAkNks!L=(bSMeNNPk>kb7-q zeae=F{aU2IYSGMu2pM6I_Z&kq&C!k?3|$J<*npK@v>=4TXEw4 zKNc2K^j-Zy!zaxrc~Aw{e9_h3<#nD-;w&7&qRXGLZTf3mxkdLSoNw1N`tL1@3c8#oY zg3t3_PY>%}g;8+#FC)t02H^-fe#REeOti3|FnBq;+rDZ{R?qhT1-F_jwfOlv2xTZ+-Q5bkc}5Rj=YJ(kfT^WMiLmwaxlXw1X~SQaIquj6l%# z$*_!JtuSpLwk-+y{UxD^G+S|wX#FH@m=|OA?iNC_?F#$Skc56=*8ShUUFGT6h_%h3 zDuW+X(^+hcD`-^2oBB zED1w>$or-H8^9bPN(hqe@J4%M4Nws{27yY|*jxlrE)++tg3=2+JbzZ2Ht-15TzK2( zEq@uGva_@>V@*r=f#R=^#tFA#S;+~JCLezy#kdhPvTm=@ZAard>^n4z6qqcAIr~g_ z#zlSVwb9|mvC)lKK8gH<45w_q2$fRdOU>ZG%?fwY1k)=DIMr@iid2LJV%i*cvHJxC zeK-iiNwsjoCc9#5IWhHip{oDWoEzKd$WoG{m^-&B@+J*vHuEXhEED z?|T86vw&w*or*ZAt83}swy@B_|5L@4heN@2@iB+lyphifqZgWQiDiWG7T8 z`#xoC*(Qx8WNT!X$Yh%g4GmdJV;}n(2H8S=qxX;R`|h9TxxahQIrp63xzBT+=WO=Q zvv&xkTVy$y2nDO7>hr(8kj?NYZD5}*E(}&iqqr>BZQ{l-<4a|ZLzRzN`e|QDzCgw6 zfaMiSn!lqWoAY!Rog+XkrjUS?%ah~rWqZN{^AZWtEs-h$=E$*ep^NvYb}u!eHgLjl z{IRL(gGx$XK1gG!i_vv3rw?jw(0%yz+=9LHr{&*^N*uX82v}xIFilM3EqE#Ex_W4P z42NiqPMkynkB`8mg*LM~Cd9L8X8-v#;clUNVMJsUFLrmG$>g@t-Q(RLtt>j0 zz!E`aE|&d;%2fEwG`Y7S27mk{%Ej!}8I$ADwDz)&CvepPARixJ0j*klw1HqLiaLz* zu$|mTqo7W7AJ})o+B`9L53V0O$*IzmZ68%Dbp~8kk>pQmK-R>Fo7=$|wW=)mcnNEX z>TF#uy8Y$~Vt(ohZd9CJ)b6o**~|?SuF4B4ul}C80!~_3Ujr2*?{(!fjSjobZm&5~ zJC1vPed|ZaULhXuN^`*yK0CYn=B#1w6>oKqS+kdSOOWb9dlLjuw&-)R@h#9y)D{gU zW0NLSpF6K|)Xljy0ay}!EaWCFnNqC{*3O+N1Jeq-gekxBj6F!KmdH&0^>9H?hFgTG z!;ODkzV#q=_^Q11$Kr{;nJ>lfyp7DnmQWJ=N#oqo2t;QslVI5Ji2?o}na>QCp@T#V zE69-}5-}tT9hPod_#wgw& zDk*mX;;kBl4Sl)0bO;`w!1Fxbnt752v~PYN%)Z7$>I2P~==T<39(;w5N9(v3WIkmW zd59mlO}#Lz&mZF5o_1Saa?i;A;~6E|(~rvlZV)Bw-t^v_H)E4eu@!t>dDZS+`FE+t z_OhJ-jR9Z>_-<A32giG6D81%G&( zRCAgWi$9i+g-%x_`00^VV7b7zsW-YV`Mf#*1REgaT+!4Ryu^By*bsYkwdcq|pf!iV z7am3P(0ku(1#X-A4OHIQ%8M^b*rV=_67D2WU1vD4y8B3Z;o;8#LIj7kG+5-x&K+O3 zH8!NdfWP}KlT;&Xo@HjyAn=hiLrmu_sm$A_6N5@p#^a@7n11aWHs$hT3#-@?FCB(x zj8Fn3swk#Zk7YotnXl<$P~Is{!cRoT1jX-4l}(eC$BWIS?URCyWfF|LZ0+a#fAJ;i6MTs z2Q&}r{8_BXkVcz<19BRgENIV zybHBb4)gL)Qb>;N-i`i#D{k_u!9|eNiNu5H{mf|@1K-jqn0m z_1xEkjfz@j{&L|h$U?(v-m&(p9$rsHu892_Yc2@g($tEoe`6EoQ80uiFcnrRsHFuy z`1ekO+)nk8FdibDWB0x45%^w@{$!WtkT?5q2BTnh$-mH!+%}X~bkijH$PS0M#oC{z z==v~Ms~TwhA(`apZZqTa^2DIJDLIAx$V$KB_v~)yQX5);1Eyok`VkvmFiBqhFRpy% zQ+4Z)?GGtpYFvk_8>Ug;x0HbJex+umjl!_4SRf0HzQDKBpa9(RJ*FuoYoE3(inCc# z+%Knz9vYVG|FBj$8gf7a-ZB?oA0gaoJD9=T1sXeV`z^%Z8i9Q9pKNqgU9p`Ja$QY+ z2kj`#6k|MRtzno5!Xe0|Y8Wd}EsBX|!aA1; zo+O^W9QW8a#8Il?W+-QS8wfADdFkCioB-boKcPGHGDjrlkY(!|=G#L47oLntB&ad8 zdJfh%-H!`F)_%Rya`|T9Acr2w}sk15+(1H z=E^P#LrFjL-*YYN|3Fhy>)7EA|UOxZ=LwLZUJ;8NyHp6N@5M+40)g1xpq)1b#0CaR41Yb znbX*_ismgYzu4FFOfY%r63M%p>N!iTh`6uuiW@4^gZJk#F1v$8X!JDx;5x(B1aw^B ziv!@%#~}@6r?BJH8Am7P9M|}3rsl54YzxNO@cn5>3Ln`d!;SwRHE21Tk^1x~Pgs*EnrW>_iT99xEcEPiiYM8oqJ9GBrt| zbUs@Lx-}L7tKmRYcontainer1192.168.1.2/24container2192.168.1.3/24pub_net (eth0)DockerHostdockernetworkcreate -dipvlan \--subnet=192.168.1.0/24 \--gateway=192.168.1.1 \-oparent=eth0pub_neteth0192.168.1.0/24NetworkRouter192.168.1.1/24 \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.gliffy b/vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.gliffy new file mode 100644 index 0000000..eceec77 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":541,"height":352,"nodeIndex":290,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":2,"y":6.5},"max":{"x":541,"y":334.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":2.0,"y":6.5,"rotation":0.0,"id":288,"width":541.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Macvlan Bridge Mode & Ipvlan L2 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":177.0,"rotation":0.0,"id":234,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":16.0,"y":240.0,"rotation":0.0,"id":225,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":235,"width":106.56,"height":45.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #1

eth0

172.16.1.10/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":138.0,"y":240.0,"rotation":0.0,"id":237,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":238,"width":106.56,"height":44.0,"uid":null,"order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #2

eth0 172.16.1.11/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":40.0,"y":-26.067047119140625,"rotation":0.0,"id":258,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":237,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":50.0,"y":-16.067047119140625,"rotation":0.0,"id":259,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":225,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":60.0,"y":-6.067047119140625,"rotation":0.0,"id":260,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":241,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[75.0,180.06704711914062],[215.32345076546227,90.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":184.5,"rotation":0.0,"id":261,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":283.0,"y":177.0,"rotation":0.0,"id":276,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":291.0,"y":240.0,"rotation":0.0,"id":274,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":275,"width":106.56,"height":45.0,"uid":null,"order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #3

eth0

172.16.1.12/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":413.0,"y":240.0,"rotation":0.0,"id":272,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":273,"width":106.56,"height":44.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #4

eth0 172.16.1.13/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":315.0,"y":-26.067047119140625,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":325.0,"y":-16.067047119140625,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":274,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":184.5,"rotation":0.0,"id":267,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.0,"y":3.932952880859375,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":270,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[340.0,170.06704711914062],[205.32345076546227,80.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.32131882292583,"y":39.0019243141968,"rotation":0.0,"id":246,"width":216.0042638850729,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":356.0,"y":150.0,"rotation":0.0,"id":270,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172418,"y":0.0,"rotation":0.0,"id":271,"width":104.27586206896557,"height":42.0,"uid":null,"order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.253/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.0,"y":150.0,"rotation":0.0,"id":241,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172415,"y":0.0,"rotation":0.0,"id":242,"width":104.27586206896555,"height":42.0,"uid":null,"order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.254/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":224.0,"y":64.19999694824219,"rotation":0.0,"id":262,"width":120.00000000000001,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Gateway

172.16.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":307.5,"rotation":0.0,"id":282,"width":541.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers Attached Directly to Parent Interface. No Bridge Used (Docker0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":32}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#000000","strokeWidth":1,"orthoMode":2}},"textStyles":{"global":{"italic":true,"face":"Arial","size":"20px","color":"#000000","bold":false}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458124258706,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.png b/vendor/github.com/moby/moby/experimental/images/macvlan-bridge-ipvlan-l2.png new file mode 100644 index 0000000000000000000000000000000000000000..13aa4f212d9db346f307dfbe111fd657406bb943 GIT binary patch literal 14527 zcmZ8|1yEek(&pf9gS!px9wfL7?(XjHfe_r?o!}4%?(Xgc5AMNT154iS`)haSR-Kye z`MUe-K7G$Qb?Z)ql7bWpA^{=*06>uji>m+tkZb?|gb6&FFcuiA2}u9| zCuqei$M4-5nwr`%i3Np4vgR^5xmi?p#UH=*mV3XyA4*G0x3#x92gWff1cLwoYwwVe zXz=#-mVViE$?s==*J21!?}^c7DO)2a0KlX5gh}1OzpT$YG814}Y}0%tmN+Aryb)z3 zsT@@gAoG`$lpO2IFpREFRt13lD?iI9=-GG+2moqFC%hy9I;nj{27uocPT1D10075u zO@yRFl7errMnFEDh#5#PPuh?-x?z&dII=d^&poRym>Ga5A8o_}h!g_|nSE(VHt%mQ zPqw6czuO|90az5wa0^O8kUNShYVM8{SpWbzzUn>s&g!NPQdUl24Wmw9x|Dc7tCD5i z`~h`8htZ7%KLEg;77+f;xPQ6_tOjVPO#d7)KeD-x!mCj40sbB># zBGK_J+`o&7iG6fe=KSsDW0j8+4tQ zhH*-^spHNug`oyE=L)?pv{pax=*n0-F0Ov#MtGq;M)d`C_0|)mHCU--2WMp&h)U&P zcQv?6T)kMcBJ6c$fMY%x)hr|An5WbgL#Op*Pvno^B*6%cGQulpIp?=Ppr)&ez$$6D z*}MDmX)#sm4Bby&%JvaxP;wzHM8DFF^FHjHwaXuDtd)8S?sxf2d3U>RT&%lJ$$CKm zXh*C8HSbv=U%Mg`XBgyBD%Rl}U!)etqPnI`2p{fA6T+dIgA+kT$S=YKz6gfU{%qvb z7e`jpjWyGn%7K-x9`5=yt$bko+n7@#7IU)OAp}+uF5#D0@BlvI_$(!08p3V=l>$Z3 zlwFz4CPzXyYS1KhRVa3_pd_SxNF|)OT`e>lX_!!Pr15$;eD}>yYE?otTcpveS87|& ztkT$vjWL34w}3#Tgw7O^gpwUU2|(|iPy<+_ncLzhsmGAV&thM_)Q|PylCK^T0QjcK zo)J4y05vSxJZF_EhzlM^A^oT4R?8Dmr zwcQDogw_q@WZCF0mOTbPNpjBRXO@O@nrbl2Vm8jTFDVV77`yjlsDjtincO zM(x$`)?EVqN$)d!m{}ZTC)2AeEw+vI>c6fF??;Ax1Z*Js`7TrxV1fJvHlbX1Hp{=wWxaUkp%r;hrW+LO`2E=(bg%#N+RzoYBK2xv zWVjjX{L3{G28PiMMhX<0`Xnn1r`@t})K`Pv)@qv4v=R5^9oiqikp-o-UU|L*Ja?lK z^~t|nwjE>6J~DcyXdjyY;MzpznqdRM$Q^AYD;i&eSwO;!ow^Zq%U0@9iokAYuN1=0 zgjyk@V*PUkjr8X{l@-*_=Ov5&jCwY{A}H~ahq|QDLtcg)p;~3h3no~JY%&`*vC&7^ z>eEyEgAHMBKM!|f zS^386lj~YFezTObA2o}(5mchyM-Ly|toT6%F z7)_9n2@5gyTM>li#2#N+?(o9x8uvAM9D;G%>ym)K>fPrXn@mRJ#**k+lXkp*hum|c zq-1Zrm(C;xkw(JwzZP&ka;|2lw+qcoTme=rp;XgC*aTN#3s+cEC11-J3o1v9za4Xx~HF`t_^?MsU8C-VhCW<=qT-s%dTMDFKtR?Shvc)nrj6n%d3#Y_z&7J zrn*Ns22|<_(Wc?fCoX+yTbklb3JVSk{~bMgl-KmFC;*#;@%qQDYeOJCLe=MOv&Hw+ zO<(DH8U2Dk^4Basq;cIAD+E;=;f42~rYU3FLb!|ErqsXR$WU_m&VGuc)+^xQrq)=I)A(> z3eq>1S5EZYnb3qc&FEHe!-=MDnrX#aNvg?bf)@Gl85B?bP<6YHD_N=&cfdll0*f672)(R19dd$J@@N{w_z&pMbh5kL5@`Ex& zq&SW}KA<5D|7<)1kw7ZCF3_b&b?Vo#7D!g(=aw2`run#=*WBbN>sO)(Bc-=a0u}hBb$_t3wwW`DS%BKn1z*%{>8}Z zL^-J*dwD-znMzHcwswYp;Xpdcs)spLD?%v%r%!Wm!3~J%y9`ry3lbe=yhWi&noL(O{~pRZ zhF-BaAUy|z^c;yWqTyz(=2#Mc0q>nyb_n^oYH%fyF}#B$-F>b^teXvT)0d6kkIl7z zQ=IXU)dXF)oEt1lrWK3f;E~veRry0233TD7pUIQMkKk268myK!minxp;_1w?wPwNBgq62c=52H1bC>GzD#Q>{GypZ?aN*$R=IJf2|D>8XPCX4HG|$i585Vy$kj zUPg%fSbO5#DUq8|zNayBaW7VF15SxZv&K*w+A#k{w8(fy)U>_#_1o@Auj^W&Une}; z!^!7CPCY#K0Ror^@yBz@1Feq3mD}7}P^iKOFdu5vpq6F2Z#Bit=d8$Z`euV38G8rO zB&WfNKOFBuhr`6eZ{NF{ZqA;<;Qf)La@{W*9qQfT7^7`OhJW}>P`~cZ z@Aq?>q)ssj-FYN)y!}~!-kvXw;uyXs3MKs5u$hG1m-zLw?*(o`Qm(S*-J+QMkN4Jg z6;Hd1JBg|2SF7D)zDmEt{alPVl1Q!U=7!5sAuK9pu3&{O8%QegU0`;e=}Zw*tcuQK zl*FhPZvLrri8@W>1H{>pXNY6T@o_2e=8Q~!I2tu175a>nKu5raW~Tk-FR|1mS`}&asn<@r2JsIP(WHGM-CCu+3#EB7cL+5tY`ZAC|;K@PjgQ^CrJS zoSZig&6L68RPw{CZ-?UHBgAtxF`Dskz;p21CwWf^(x7qD@U#L(@t4Nxgj>hvbDw9k zt3Q+;dfN^3`G1g+vFLwE2QT@C-=ed<i+abt)eg^w7Ps@X*{}DCI9dEmxnBdP?5w=jgEL$wSKvEO?o+=B zAu-?zTrBEeJhd2}5X_u+Z^)DiK6uoSR3ioj=sln8YafL*F2>HC{uYyJ79u`EIP<&e z6`zxVV?9zZ_D562P*iO}qr8B)=!DgeJn`m3Mvj%H!uzVap&K>BWMMSDnb5@_=F9ig zXe8{!h78pn3x>z|{vI#MP{epUyBej~V@|07?X~qOiZGKRjrdTPIE=(W@XENha4pF4 z;5*zkK$WHn@-RQ|+-rFI2QL4H9+Yc!J%Z6{l-`Lz!__pVRA15BgkdZe+(Vbr^E)of zX0dK&SY)66l>%;Q$HT#tZ<{Bhb-`WMe7z+ziQeg_WX|EWqwZuk-F6Ip^JEv~#OK$n z&Of6syLejdEuP3#uXY$)Q@OQMCH&*2te)CT)M0!+LukO&^`p@=1stEB_#-plM$TQg zcY1eJTr<{l?u{B)rf8(+UpF2ee5yT3R+n#*g6dD!6;9um24)Q?LJ8Fn(6FBcZ}jow zhEs)_aHuQS@2l*sGA}zWHt!U0lEesoHXiTw+Sf8X6>tuHBJr}iexg64;e+~ag-l91 zY|rjCS*AZ@vBB+dsp^VPWSn$cc}9_%Jz%l<`5EOwNkTB`eE^SyJj{+e9{&6uDnZ~A z%D9hp#9&k1*!F1tdf`3>as2E$d3<3Yn<$uFObQLoyccw((!rC9 zYeaSDtk!>Zm_wn!lsf#f$`*oV?BsMVi}*D-9)_AODWr@>A&1j1%`d`A( zs0rEAdW(%IiXg0?EGB5N*?tcR*z;&JvOLELo7%Tf}W@Ybq7Kz%Rq z6nkUtzzR)UX1h`NaO7jeGrov$#v-IAnB0bRQIUX2`0{Gwog!&=c7_y;QuU!-7!Ma+P9i0)hk)ZK+qvNG=QPW?#5)*a1&e&L2}m$S6=*ltishAM!VI?EpdD zywb>>t45Z2STvPvvBq^@y>_qPLM>lG$1Q)z=~BH$?j={S3{dmK_k9>;)L%{{TUDPa zE45wz^Ur#>q$P>ggIiADPbU12Kd8c;L%!=RG6bg{4s+Kn^8Q?HEtswg1) zcd|>y>)-E_XP#=c8J%U6Mz3hgppg$K7FNBVL*7C3ZLPGG(L78oB3e=SoW~I)9C!J} z=->~^wYhKN1m7HTUJPfDs5-T|sAGBiV0lU==$dyfsppC(Yt4*mlB{^~$I2bqo?c2h z7Uy^>Ozjw)MUGvGG3>2KX)$l~kH`2CgG>>@pDPPgK_P+&UkAC=s8#6Mer(h0wH!Cr zXIs=*Xl|3i;LCGp)sL?*BhihRWcFsRo2k?jVq|mAS;N|TNUNI_&B=Ymnn&JH;jx-z z4rVsW6wwp8XtM~jh7F0H(XJ<4;N+m^U@ZC-cy04xxXx)x7%#ZTVPZMykUT$O^K1=U z`!h2KO}~dT=oL{PR?OA|zsK65?L!@!%(97#Pg)mX9?Ut9yhG&INaWTnLpqPl9JHc6 zi8_z$=O(}O=?J0(|Mf@*mvz!;a%!6_D>=UuUk}!`TVO^1AI3zVf64vdmz|x=oRnXu z9;%rlRn+Q1u=vrEN-3PU+yUK*(JI03^_FnCF=oz0J%ih_OeWAje;9v(zR1jM``$Ba z^JzWMh09%K_V*oG%mQuOdeLX#u-sW9^`r#mcpU;!xQ^1&e4cJZeLghq{aecTN%ZM2 zul|C=+zHK{r@seq7ySZB(zH!28AGO2ovnCCOz;LYBwS~;V8c;x%i0CI* zAIx_N?q-;{IEV-{?MRVi?C?G}Uk?*mh=?@tE?;65a`46D4_8Doa~rRC+Q_+WYG^dy2hDRGv7A*swCIzA1X;dlV0jQk+;#o~)8RkzCo)X2~5 zT9#uaH)daTwGqG_phR;`H%P-NoLRzGtf)bX(n^vzjgcOfm*bN1?o58bbj^z3>%oj4>6w27DAOwo z02PHPSmM5cwh+L3+s1800uZBzH=G>8aRz8_t)s>*Ql841o0)|z?FY^lIyX1f54?!) zSEJ0x?T$+%xe*ljKw>O&8jKsxwqyu`Q8O0@4_=9d+u5X@mOQk)pPc$!aUA`8mYX!) zfOz=0Pe7i``NPIA%L4GKntifOVp6#Ga{b?`&a?aP(PQ{VC=Jft50aYd65m)4CqNYH zFQO5WL}#Y!$2Y6}9Lk`W&49O5W}b+2IEJH(ORn}*XNA?3otDk+EOL&+T*7HBP8kTw z-pKN+-If1XXoE%!lgRl$@Ka4JIr5DokH0Qv$K4*JXSsNCYj>0v*|}_$nkG!{?Wc~{ zK3FA!{bjjS>H%h}{LbUu45)Q~FcN0!6nHgm4Iw#L(FB;$W#-{%?d5cv=Rz6?8jU=fJ2Qjy8M>%cca~=!gFPIR3H1L z_^kb54*k6U;_t?~&xMb3BF{Vy^(UC_Hu+A07ek~*CfaMEeSsqsf%?c{zi*}SM#iYX zuZ>ZStoiJKIJUXr!fGdZyCwhHAaTQ>)Xb!gZ(iZ`7WiuO%OV98e>)4(I^w*-HBT4t znmyx>Mp>FLg03DLCGD4RQf*>-#a6}N@5wOq93=F7qkT2#B7kI>yt>8# zB!@iOF+roqrTO2_M4HX;ay4qd>c+!+N;+#$9|8QI2dF6)F2~T*Q>qQ+6G+Wi5e$0g~XyMHq)YIIKnBwI!+uG~KC!al$ zks=8q28*I7c?28|K?bGtEl0&8EPigeE~7#&S`nNQ0NI>1hYu(TEN7K*g-vpg-lS>I z8Fzw^YL5vOjr0mMOtY8Oeh<%kvWP778^aj(!bsne5r&3^r~X~@7Z?1NVTvI}_q`rP zJs9~!`s?6@pNL_A6Wl%O@06BA6o=rbO~b%EjxmP}hCoqqR-|_ zr>cCM`X2a5baf8vb8~Gz+QCN>E2);k_?SB;s~qb@$rpC%z$rxBnRK(1_y9n#A zCz*VzEuc_Pbo|-eFjz5ZOh{_9V2=KMCPCnapii}m8g-Y^jJ7x}GxVx=?3b1o4E3I< znrH>vhCjpkE(~-a-Iu}K>D8Je3s{jxJwyDN{0Y%Sbtz;XB+MXesZtt|rJAd~C*6Ho z-NxKAMv0X0hy%-#Q}2DgA#u>N7wQZJ(s6j9-wDL+6ONCSH(B4sSRaB$I~?x1wts~) zkGDi;{s#ee3-nW3LK_f-LvX8}d*pL?6#8Gvu5-Z>gI*$sWRITZq+c%QL?tw5aP0X0 z!&BqKR}Q|Obd1fdhZ;duMAoFsV#^N!ekgs-vcabBzi##5iK zL}9GCcTG!`Slc4pn{RVmw`r_p@kTEcU}j&x@??KYs$Je!sot|G#jD!Qfbg0S?KA#k z$9Go1HR*HKnb_xU6BQ|!jRH2zv!;kcbtwYw$ ze$+TSB-*0A3vR_+zu4=qkTM6`5Cgc?Vu4jsKI-$#d0fxCO^MXy@cG1fxjuF4>m z6!S<({3=7jvd@pfmrfbgf@r*_Vwqke#ardA%Vy0VjD~J6DDnd{S1ULdnZ$*wLO+JZ>1-Ix#p1Jr2he#-T!q!N3^$ zUJe2fRJsZJR%AeN9$%uSZZbx`7^72XtZY(SWFi%XQyIEIKN%Qsx)xwbs7wUsL^Q1v zpi=nbP&y8NOudX>ROIJ!0ah(!D0s!ZSeMMS$qlLOCx=LL06gZbb0uRF1WW25G(}^> zp5Y2t`;BSP(%bntPe-X95^n^4`m!1}hsigEO&hgmK6 z&`Ro8_ubx4-3;UYhrLOaB`QTAP{ay-2@C1p*-g?;RoR#ByCwE8gs|GS7V_$ODB`DL z+|;!3FLDB2iIJC!Qjoy5fu2D>A&ko&wOPyem{;z}XHV_JB=i>-zEv)FJRZ-R-56uW z545gHmb)K_2oktDAcsALslbZZr-(B!J9t@+PBoSO@>7S13s?L&iWaLF*x!h7q_;4f zJIbR{d$}b|Lc$xpp5+b?w!*wwz0_dZhAt!v+sW`?JeV4MkczII8dJQwj`eE3>2p4O zpn{rLFnDQ6x3#Qj)$+Oo&EqZwOw?2A(0@ zcAhkIXw+!OGU$IZk1gW1{6=SmEzf|TPGB;b|D zF41;e6fMG{rgD#DzG@F!8KK=N(bAOvGdX;FN>2>FH9!az41IQb-um`tM077kN8F(Z zL^MAVhzx$C2D*U`d}xuVE@^?!1A9MKLNRuHM?q`s2*5%xx&SAilY5#F$Nv_@`IKmp zG(aC_f^4l8ULVp&BB3LNU|LKlzHLn*2a-J+0vd2Mg~+ASk#PR$K4I&OHx`1oN#faz z`*$VoB;y;}k0$bu9nuB~mcwGn1P*+M6cfLGr=ea+f=B+E4^1K=nuXV%Mr;cl_Wbiv z&d6YDw(po{zw6X55sMY#@iKd6lEf!Wn`;7kab*PJD0*o^4d8t3s+9=Hum8DTX3AhA z_06{v%|{N9N{FW#${m&MQoIVOvkV(51eXC#0~LM=5@7u^C6|fz2Jj`kAb(dX_#i27 zGEv_#;O3JaQdc>gh$j(vn7?HF`{f&^J)W!y7D6suwU{C8hfj3S(C<4pMw~-}gYQZ^ zURcu65fH=!M|V~;sq2(F2!jgGl?^q5$N z7RC!Nm?U>bV1RRqKu&_$8-rrXrZm|_MKv;~G$XEkc4xNA^ttN|8m?xZu!mjyVxc!} zeEZo2C;h>CP4_3hH>32dNgKl`5N)ha8C6OTv|X=mWKjUgajgaXYZ3~Q6Y2@f^Wgd? zo|&AZAJdrhG?%A-m_@j-8L$d?81^xCLAXzhm`b}v9Ruf)+QT|IyzF_=5?y z=N&=@r0yCij?ilEV0y>W(28Vxp_K_!2fGHC84)1GYU7z*pkc+#Wbs({D6E*B^RMaB zexKss0$w^QwC2&O)%_t`WD3vA^Z(eIPFu(1Q6;bEh-g1RPhxnF(2%5k#Q-qljI=o>a_qYk z<+tmM$tR|f`10+gfIs{}jaq$k*|Il21CKzP=t4;Ug1OtfC`aWe2brsB_3TbweLorr ztC)1^ju>mN!AVKCR33|V`0*$Cf#=e&n1;ik&?eY{Iib4IR*48kTaZU(rD;c4C5ZZZ zTZacIC&w&1!I3h+DER3s?c8J>|1zm)LEsn<9N=p+l9EWL{x$GL2vjN&Jtmfpg^_WC zznrtLo9+vK9RiSWAjF}fAOzhsdyo&*1-zlqvjD$l2mL_54}2p43AD%9T0<@W) z#UGCezvUT+2p;$x+Au|{RMutiUZ^LR*TQ|M2YOo;G1)^t(#LWOG0^E#41e8Q+dG>KHf{j z(&ex6wRRV1VDqAchNU>gyAzRq;Ye{F)Gx+?^uT9PZ~EyKpFABJVY?1~m7Y`FONk%Z zl~znQs~}Jk5XpLW=Abgc3Ayk`w#meW`fdtUx|CiqyMjK|F{!8{MlrSG0paNqRKi`_CEB$gv3l})i{pa#+EtuMxyrJoV?qH2gx z#Fd_6mkmxV8s*S_MZZD@s%PIo3qaXatLN-fb_CzRW_PQ}Bfs}J!A$m2zZhxTn)->L zsHp1|;{Iv~hBL$T_+mfdw%Pmi$=!fhu_{5$@dp_LS=MV9vSib zdk0r2#aFq9@9e|%yU2B;v;OK{vtfc6D(Q!X*&}JkT=|)Jx5g^YUpA<_0!==F_YvSC zhZ%NyH3ZW}@8L-_3ecQ+0ChqO>wO{tMrX}@Oc_WrLO{O~2hb@^f>c~K-oqS}7Uk$U z#EWGQ0p)gVzS0KVCqjoLM3#cwVLwe_&*K)I;XSmQjIjz3wE*OOr~?$4bIkI>JZk$b z6je`e(bCX7P|~cg2yDo@F8}K%dYBogh?({;m>I$d&Q?;i>DP!Ubtu}faaLPUh{@lo zO~FNJiQi&XH7p@CJ1F{6jB|fsew*aSCe+XbNO1tqd6+?Hd3f?cS<%`nO>aNJawh-x z6`ORk;Roh>l!i&FrC&=GDPUpi7&OdcwROM+!wFRzu`8Ibv4d}^hZxA;2rt>O640n% zlAQm8q+_d$nbvu=Hj4SWs)51*^Itv9hhE6`%CcgQ>a3(q|G$oD*g9eqNpmV|yLAv2 z*)E1Ec4w>YbYUZM2QIkwIRDLS$}*9b$_IQoNjK6{Bo&DWQ>0Ab)aUQY7r{{mM1LH$Oj%c$f3R z|39dgc~fFAC-6?OORoi4qN*Vxgc4=h zH5DTuM7ktaxzc)R>$M8qXJV?G1a+ zB0sjDRYae&;f{b-tR)B$3{U+^S`a#x424s2LzRp>Y+x*nj3w*qM=mb>`u}X+kJLyx z)QW~mNo)*JUdX@{$MIRqy0^FYUwdh6^eq1jx)NmbM&~ayj>5J79RX;+p5!qn4bQ6y z#r#Ju`&w_MKdygy`?s2L;XOu?mmp50&ezvidk6PGkk_7=KhAoF*CD6tJtsIb_IFm=?WH7%Ma=0B*ym!dx2?0>78N{O28Bpwzl(6Glg@L?$@6e1h{vf2(&> zfLS^tEIwlVOG7tY<8a%n{Z3KZF>@TXv7NlIMbljec{@?&Bo-$^DNWqOxjhpUS^H@@ z4~%>te!18=rad+@AYT96it|I^Ex+efi{^ZPFS7B~RG6aW)Ji7rr?f&oG=fO{n3qPZ zJv3x%eRMgpR*zLYI&>SK<=mr69(iSvYK8s$FUc& z6hi1M83a5IC^#T^TAP^6h=5HP4u0wx%(?|A6cNP(kA&fQ=a>_bZ&ju#hYMJ2MZ+Ta z=kQrrME4>eT(IOvyw(;>pJN)zo|}mYD5da8dZsG9 zhsxe`z)PX3Dk)wERe&Df1#Dvq>_(Qe9on_(!M%gOwL&UXyt$D<_+0*s zR0pSzf!inHI`x6m`Um5rWqYpFJ`m-s2()EC7#vw-NwO%%S zqfilAIDIR9t^nG%P|jIDc5=#|-76N`S`|er9~;XV5WDkWLq#b@8BEg1x9W!Sqs_5K zX=`iM*e0y_ic(O_&Eee(Dm;qKW)Q?Qixu)48y9AIwbq4^a70RI@f5HpOXRsEZzH~q zP4Qmj`1q(;u?U!6@IUY$zMOj>=02=i{E(8H&GJKWGG?H5adxAHJ(QUrgcta-;lTtM zRR~k6@>)#hIl~zkk}LO8-6Qg_uG^zszJi&r%8?N$HjaCN5lo3!SHyfQ70=+b# zOxL=>nj&y8X|N(~L?;`##Rn^iN-ptZJQS?c!>8lTGs_+X=dT^5P?n}n+2L#a zUs*jURphV4i|TREJ~Ms1k++gtIk9ajG5$^+t|HIsv>u^5gLUT|p9R>y*ryP_WYu`q zsp}F9+Tpr3HKN1g!RJMZ@nudS7n?38t-ZXAV)`Ri>Pt(nY$Y(+1mgc;$+fvjetR+W zMP_IqI=4Lavlg&LAiN2pvudu*{US2{=Bi6HmI46##PG(ApBA{FhNvPuzzr+Ye(5Bp z_L0@k`;Yg{el>`=KIhtMLE;{o1~K56jI1OovZXti9jb(_;x^TuT@~5;G*9l$MryM11#KG35vk zS8d*DH9pjk%@Q=rEmkhnIX!58nvVm>iBqK#W-5xaN}dn!AJJiCf~9axN;nYqhG=gP zc%}Fgm!~%_zRmj|_PVORyi;Ff7R6!haSbw9Q^TIq73@~(Y=O+?2yufyU?8Il$*X^P z%{^)ywABSH4}GUzm5wTcegzBYo^7LUWglP(O6Mz=DG+FkrqjN6t>xZ*H8HJ;Mbd>l z_-q&4!O&wW=v&8d0^`{Q{SDM<+^xkd{x~@yDC{3F@6tvTq#VWJ2$%LmG6ylERpe^D zxisET>(V3OLq)Qq!iv{wX`|bOLs~iEWOo8xGIWz#$AwT~IUmWw z)peI#IXY*+DdppYxvBrhG!re;d5F?{J^%nW60d_0Bh^mdN)&F7JSW31pqcCsH&8!& zhOZ=6#S6o7;VsAF(zRMX(268A_qxfBd|kM;ZXd1^_X{!Y%^Ms>Ih(>S=7in-eAb@# z^ELrCZ{4$hnY!yL34+;iqoXxkT4DUy1vzvxg?Phb`KE$j_QKC`c#~; zKCpMB@o`1e%Vvw^sZTg56izHBnX~4j{|6UAmAOm|U7#y=nIW@Legy1i7_SJatl08a zk$?1K_l5n*Hy%$Gl81IhM}+Drny|$ilDH)itON+p4vlUR_poRp$>~_kyk@FjDPF@j zICF)Ej7P?xD~sn>@;4fB6bq(qKpxFmLu)QHDAIrE2xXqYr8C=Y(n6K|jhZdlEK$8D zOuxtb+)##;ij4SwWJvpQF5c#X6X^EvXwn1#}Ki0-R-0D zC;$_4aqjQqjG$>p&mOzWK`xa1m|+sJSf4K|<(f3PWta)P%=9SbQwF;u2rq7 zAbx+Q{KKP}X*IO99wIEIE0cnMwgpk`cCS)A#t&-5n9^mOSK{^5kEPU+pwUorG+n8i zEDr*KIbiMJ*C4d5hy53%nNhC^W#}H)y#ZVR)M9ai2J8JDlfO1)6Pwa%Na=l>jPR~D zWN{*vLsbbD4dTsfEGmV3M#B#Le1IOYXL(x2!24ah0wi4l$dMH)e}51&`xQ%(_tfh& z=hfPOrYt*i3XBZ6$8E7dd2HSIomjl@)@s~qgc~P_p1m6jVG|?bj=5*%X#IL67V*O9 zVZsCxlu+AjnRg8nO%no#Y7K9lPK%Xmaq_I2LwBS-nZ{XNYIb$~7-}Zi$F3v0Xk&Za z8?p}5XBw;Fe@wwB7%CY=6C~Kzv%|p9#5S{9OQ9irqHWy`h{?pnMC{x_6Dn8lKCU)o z2^89UU)i2LnMV3tE_qNa)4O&Gf1c}K`=M)9HK?SCq!?=iR%cUcsuf2ijz`^(uDkDNi?s-lBIv@iG01iz zm7=VyiKc`~djzZ<)D!kSKu&JVd23;0IaEf88fYH$&<6rML5|+!-z4p>1w&UPVQ;|k zs@J0zQ=a9#Mg7q;>{{txEtygSrzContainer #1eth0172.16.1.10/24Container #2eth0172.16.1.11/24DockerHost #1Container #3eth0172.16.1.12/24Container #4eth0172.16.1.13/24DockerHost #2(Host)eth0172.16.1.253/24(IPOptional)(Host)eth0172.16.1.254/24(IPOptional)NetworkGateway172.16.1.1/24ContainersAttachedDirectlytoParentInterface.NoBridgeUsed (Docker0)MacvlanBridgeMode &IpvlanL2Mode \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.gliffy b/vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.gliffy new file mode 100644 index 0000000..40eed17 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":389,"height":213,"nodeIndex":276,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":6.6999969482421875},"max":{"x":389,"y":212.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":64.0,"y":36.0,"rotation":0.0,"id":216,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-12.0,33.0],[84.0,33.0],[84.0,86.0],[120.0,86.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":190.0,"y":32.0,"rotation":0.0,"id":254,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#f1c232","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-142.0,16.0],[54.0,16.0],[54.0,115.0],[87.0,115.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":133.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":226,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.147567221510933,"y":139.96785409109907,"rotation":0.0,"id":115,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":29,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":116,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":117,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887324033,-1.055138662316466],[1.3318647887324033,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":118,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":119,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":120,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":121,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1 - vlan10

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.0,"y":82.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":4.1999969482421875,"rotation":0.0,"id":187,"width":108.99999999999999,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 - 802.1q trunk

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":282.0,"y":8.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":55.0,"rotation":0.0,"id":210,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-8.0,11.0],[-8.0,34.0],[26.0,34.0],[26.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":12.805718530101615,"y":11.940280333547719,"rotation":0.0,"id":134,"width":59.31028146989837,"height":83.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":73.19999694824219,"rotation":0.0,"id":211,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":52.19999694824219,"rotation":0.0,"id":212,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.386363636363733,"y":108.14285409109937,"rotation":0.0,"id":219,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":139.1475672215109,"y":139.96785409109907,"rotation":0.0,"id":227,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":228,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":43,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":229,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":232,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":232,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":230,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":231,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":232,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":233,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":54,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2 - vlan20

172.16.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":259.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":248,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":265.14756722151094,"y":139.96785409109907,"rotation":0.0,"id":241,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":73,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":242,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":243,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":246,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":244,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":245,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":246,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":247,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3 - vlan30

10.1.1.2/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":31.199996948242188,"rotation":0.0,"id":253,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.49612211422149,"y":17.874999999999943,"rotation":0.0,"id":266,"width":275.00609168449375,"height":15.70000000000006,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":68.50387788577851,"y":43.12500000000006,"rotation":0.0,"id":258,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-31.924999999999997],[197.00221379871527,-31.925000000000153]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.50387788577851,"y":38.55333333333314,"rotation":0.0,"id":262,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":40.7533333333331,"rotation":0.0,"id":261,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":42.88666666666643,"rotation":0.0,"id":260,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":73.50387788577851,"y":43.95333333333309,"rotation":0.0,"id":259,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ffe599","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":248.0,"y":51.19999694824219,"rotation":0.0,"id":207,"width":143.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router (gateway)

vlan10 - 192.168.1.1/24

vlan20 - 172.16.1.1/24

vlan30 - 10.1.1.1/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":88.19999694824219,"rotation":0.0,"id":272,"width":77.99999999999999,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":80}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#e06666","strokeWidth":2,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586821719,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.png b/vendor/github.com/moby/moby/experimental/images/multi_tenant_8021q_vlans.png new file mode 100644 index 0000000000000000000000000000000000000000..a38633cdbc23014364bfc611d650b2a17dc72ae0 GIT binary patch literal 17879 zcmYhg1yEbx7cE?9f#O>wA2N=k^5TU; zo{GGTp7-KW?!7UgA^9LY@*H}Cd+%K1(B{(ASh93{t=X3Dy466wQ~@&Pj=v%}BW_Tv zbkUAB&>_O7chr-9U4T!T_5s~WI_wo`Hxeo-?Y#yLmK@UoPhtsg``T`mL=N9B!-bsW zgx|Q&gpY0y13~m9EY?WzZE_R{@hs0lpRCMaMHVgp79Xn2Ku{Ma|No98>dQ?kYv>Nv zsWm8Z-;KC6HhR6Lr!VeNd|tr%VOY^m zkwVRr&<8Req~^;ok(4~qc8%?}ztG$_^;BLz`tzB2Oc({;nbWga)0iI=_!KOa4M-C_ ztTazF`Hr*|1=0#(;FjKu?@{<00=iad-;Dj0b?Kb28_=j{!D5U#_8Zd^0xL57Wr;kV zW7Yogi>A0O_xXi?B{b%JsyPNb<9T4@0^Ua9m{`f@Ovrp&86MOYtf-^It+zxU^_2T1@9mNroUAc=~sP<&MwscFUeQ~U<|7u8-^w-37_ z?-JEMYVoHK&{lO$cTS!yzsMU2rqz<^>|956zA~~i8KsrX0scsyr~V=kQBHaJzO|cU zTzmPi`1-h;toXlFvEO)Dhttdt&?@q>Kd)2Vquk7t1{Yd_oSwgUmj1whTCDfD33ck9 z8RJU_(H9rX_0l_CkUw=EALt!#nWmXQAB_t;pO30$=Q^sZZaP`|Lj;nj;8ja2!7oV8 z60r=%w|IV$d3b~n<=QD^cSzQ7Vpm-Q+IaizNe1w?1stu&`ijkMd>wcd20ZSXmE!1I zI8Z;Z4{&eE;orfX6?=uD-Zc^jXFe|_NhE{gSt~z1m@cv{#2Ot7Dt%%N{W=MM*IAB6 zO*h~mm)cFI9OyBP>x`Y6Vxo^NBYlu5a$oT%!`QXsCSLEni?ZE5>9IAgThC|y@b=K) zup@H_BIEF_%5Ob&C2gXbHMgR~wd38|>Y>X)pg^&S(#@Qp{`5}_L)TAjPNKc3{R_Im zSTx*!+wnGYZWnBY3_>`pJ=Dj!#FUC~K^{-G#oe3eQeR|JSmIRTf|KTmOp)u~kKwNy ztr$AvQVd!r-YN-dJzN1!UqQ7M4I_mGlQj}eH&TSJxxDcg!oN~+fq?KYq*czb)8De< z@bw6WQp{wZ*NveGzgX|D9yD6qy=^=-BQVoy8O4?}f=3qpAOvaoMw*+Mnf-ufYlTmy zHG6OWpo!J2Z1iIa~O*@BFdJ2=tR+=&$w0&?W~!n&Tou+fz?XbS5x z?)E3!mMk5G5K4sx%>E`#WAZ0sl3d^Q$J@mmV$t>0g4BG4_>bM64d8oB1YJ65SI|}XnodMBgWbaKzxJQI_?a(SC6hSaSC6PjS3Sys<~Kr(di<~axaTe) za%Hw;r*#TBwCP%3Uo~;Q*)Af+HL~`&f5kcw9!*fzA8$9hki&vnbf2gx1HBGmr|LM$ z`@(Q`VlT9|BD@v0Osq%F*-}u*{5BzxKyc2M9V4oaBegp$`47kKyy)DwQ~J0LHDgA< zWb{EidaHNwHT|vMa~t-2Gd1GWO{fiz5JM=PAwnfkh0g9Ip9Y|WhQKM@UhRN&zPk@N8r}r7J%{fLmcR1XAlPz_PNfc zN|&SLZ2SxtKn_ed>cZM?j3(z@Qm?d4)VR4vJw<`8@OM1x#Ov9;B4#0E z4CVXs=_V=k1>gaGm2iPJZer)vq&w7Rk~oroQIV1gDlmHf{dclgvQ{qtxXoV)uLOSo zX z`CSXah0|4bZk|}UxVTK^cZ*IjP{%t|mFVyBDa8>G@=YnA?} zgD-#oF`fg9bI3JQN;mGX!GD)EonafCDf15VB z)D+V|i~tOxT6z3HYWbn_=c%>V)#cXDDJOGk&}$O{_XS^I7lH zZEVp1(PsD0r`Rw_kove#YX1(JL{n_7*`@jA$u>0>>W$>ND>KqKq}Ht`T13M%;lh2>dyc6VNPYTEIH1cr}Y|Ey8a zfu>QWO3t&dD+t;#Tx*{x3V3SpLH znu1t~d;FK-b@fQC*lQb(x=k21XO9pwH19Lwo9W8IV#p? z)@G&1;m!p8%7LJ(odTd%R+6>fYmz}chCuq(_Lj@nv-j4-x4~*4?9!r|4BSn}f|#r} z757FHa_Ywi5}df-E^S<|xZt8dV3W^5h}cR~{!AaF(d@Kv{RC)ZGv(ZLcp z>sU1pq%7n3Sj+l@fW>QXStG|drHMNll$Zv(q8sCI+I7Mt_aIJ2_zUE0RA#$Sksm8* zLS{KD)hPk)JS<899bTtfq+w`e_-!V{Oi_1@LNp>r$i|;qZ}Bt(@>wcUfsK!QZ^9x9 z_e;2Q>_Fo&8N^H-8 zA7?b&6>{I6&U#vRSdcO|)ETV+wGWAZjIbLuHj?RRL+P;Y)3ybw&ihi0u9rr6AKMkd zba9PkWo4O&L+UclrXF?+$x&-~LB=;Ky?qNcSMUjAX|ZzJY{J+s)~5OZUeu}bcM`9>7?7d0=Zy6~Y5~94 zHqIZ;oY`_Syah{NYlV+)xxLbXQR{atN zIkMpwHgL#z4=J50!Dc?&wXv>)7$HMBt7CZu2jv^_Kczn7(1g|>M9{YLw$Z^$q18?$l*f4Wg2Z`&Fwa|Jt$vzDtSl(R}P5tzI;DyAEYm=!kN>vXVM zxLuUebn3b{#i>s;%+;=b+%mbjhkDqg&Ov{*dEWQmfOm%?U3FWGbGg9R6i{sE`9>gklS7l4J#-RNg@sMFv zFyxqk_j_}|yMn6^V6`9&O%fuBY+4vDglCVl03$43_eIrxG5UqsL_NYGr@W}du2sAA zJIPSItk%GGA%bI~tWpaAC_?AG>x01TfDfNEe%T!jTL)_W>F}(oo@^*Zr>gIt#{R1VDuD*$^ zS|^}(ASNZ!>3VV@mM#QKDN4F}YVQpcARb++%x$%xZut*r`?qmc*7{NQu*dPp?ZHdC zGK#U56sz=K8-&)azkSK-oU`JM1VP{p1EX(}lUClsuKt+=C$BH(#vn=zgRmJ9#-%GE7dn}3|Q3kb|bt{!lG9d=icF zdhScP!jO%;3j8_&aZ$v*7%YL#;q=;{czRtFkMoHc2TCv-1?mGp})8NX4wIBz#d+Vuzl-*M#%(@81e)fgR>XUs^{(gVPM6UW*HV=EHlZO`8neF-+F z(hW;?u>OfdAFfaBd=)8hlv6)C$zs8c*(;jvPyJ+{nks%9>~hZ{=w=++HL-^rG8)8P>9tK$gml8CJrcltW;q!$snXiI{NYh0I8LFyWkilaW; z@n(xXY**z11v*RJ#^7q5{uOjWhH>_QU#F&-eDbtO;{!f6^+>2y56M>*gcftib0;aqY@-M!r#9+Q~ykQjU9PcrZfBmixm0dA| zhBYEv-$N4$8B4%5A5C%1tN>`RDQ5z++>P}43Hn@7$|t>C^=!&3$le`FvShp=u8k#7 zN{MeDNG_)7ywzuBI{@ej5#4bW-sm=1J7#Xb2>~6(o2GPgJ9sxse2D6Aj52kP@q9j* z@+i(ceOU6>SH_J~3H_eldKBhBK9+Tm{KgRYTmH-MquX>Ta8?Xf#F{aE;)lhj z5!JBC9m4*o$!R`{J7JY@D{}dsTJ`%eJTtIh&n1BGbQ*d@tGTHdjH%$z>l+@Sj*-d` zymFLDSD)iZNZ@Bf+=#r`t+hilrAJDneHKuurwiYSXq=pOt$0OS#H6qBhAhQK{u_xbl*gJZZGdYFlkn>Ys&Qn( zYVG5UZw}feXdn&wr!XSdi8}R6Tw|RQ&9Ym6-uHE*0$l}4C*OkIBu??oL7hwdL)IoSBB*R8Npahnzhc4b!tgvLS2o2 zTBl>?8SUah1Zwob3#$ik~fqlk!&e$)ydh zHnA*+)`U5eU8uTZPe!Brw&=-=HbvV(@BX#&|0+KYgLt&BlwC_xy0skCzX znZLO9skp03F}sKHN+&)nN#%<2>#Mvd_*H#E+^iZv0DQ~xx=X$91zn~-pRjz7ygbO< z@;e~+QgB-%6w|9zCs>}dzzLO@ROrm{h(z|W}+I28RQh#bHKL>C$FP( zysKRD(zYLRc|*-ixJb>w_BK*rh{@oK(-X?@sUctdRn{kr-XU2h2^J)?hId$t!WyO> zkMc9?-*Rb*8+%?u0eP$@UB)8jSZ2YU-3L|$(|n7;y!wvKFW0i--cL^O>lgj7YbrWb z42H0h71M5^Gv$0#*&=2<>CG;7au&oY_jAvE{5hMchsalPrjTvNNmS+#DMY^ataq+u z3rh$K|HZ)8_jZGcj5@$k@_U8PMk%!8B&=#ZhPQySrAbD#o`r@v!d!yVh6f+hE*Q^d zI*5Hfp8Q*dpT~zCS+a+aeg}pQuvOqft9ANcf;zSFSI{u;Kd4vJZkfm_%vo&75ekrq z8C{%Xw`uX^w@f-Jh6Ney?6W-R=wQxrwrmlkqBft+F-p4~MF(Jw0uUxr|3$#GLfkXd z^SCZBT{q~*+D1`bAPO`LvG0K0Lh;%i+&qAj9I~?fSOA5Dwo+$2xa9|i+5vB8uRlCP zI-J}J#@zBkU2d*+PdctPS7)>Do0@R0rM`#1f@+2vJud)l$+T@PD*v9}e{SPgYKcW- zdM;)b+%>wc?Cyk|eY!m0Gu}DPsXxA(d$QOq;oitTot7=Gi^IO=6@UhT@?39)uPx-1J`Os4e9@z2rmQrg zpVTmE^QR4wVm04w=`l`?oI})yow?Z+nMs(KTZDBIVxrWM(TB-70AfEliWk0edWuNq z_uDOmr36-nH!}F9_7QLpa)<@`J!fhipPE#LgRRNjnI@;9douW>DUQ!fr5uM@a(H4G zK2~B?@qIt5Skh2=qiPEWWOmA@M9yMzDaXgasOsE%6N50@Iz>@nbiA%E{RMOnWo)$e z5~j|PX&ZuHp`C1p5(1^&=o=e`OTUHQB1m@wMWfeb9}O`1bzVGtEy32|Cgd=#b&f{Oy+ z6*O<2DB!v2`F=S2`Res_Pl3Zi|3a=|UUfKXeVj(wImSB)Iui?yL*B?2;5va-VS|iq z939WEAZONQ1po}Iz<7-Jdp0f$4-8xRD7?_AwqIY5<>L@CGj4tkx~4$NWc)%sPt&N~qvk=b27kYF#3R=f3R6 zjhe(b)T6Z)GfZq-;9;tOnuk{Lp&c%8)(+7g^HwInMm-Q+&W52CNV53x6^5E`lg_B7PDr0APkyIFh-025+IhOK zYF&SH_mRT$qg zLSFMAkW+789n3H3{Ef@PhS>F}E5GQ@`t6lu(_iaVfcU`p(Nentf zA)7yK1krNUjpj0rN!h4!U z-re*S;|zDfG~im$Vcird>%{i*a2RwKRao_B z>B|cbi7kId^Ck5$qXpDyd~)snpTS>Wb||4{PrjTCM%T%6?i}}#Zk;XD*SlI~FyJ-w zGo-~ChiAPO89IyC9CnpbE%gC?RBQ`~8rguj5h$5edOsCl;4b(M@?wze*wp(B21On@kQuRuP$T zcaphyj@XJnKaL+G6ptvpT1FCA@qE~5g&_1)^5Knek^z zK+{*rErU0tCTxq|A?-;pFu;j5Ftg^x<7WW`G0L{>LLYFBcqMn;;eh z1^`+c@er_h3gHM<(|+o-T9#*bNSnd+L zQcN&90+Bz!+c@fD0ARjHi=Jb#m)9RKg4+w&iQ!!)kgIhDg1em?gld-MPfY@D{wURG zGO4*N*h%?q^@QU#HX`bu*1$VSGAiRJ-T;6!$o>U^F!hhUR9QqYnry0u2Gv-`SHK|j5pYZb;ubgZ3QGOtNlN&%6+S$qx&Zl25mP znzw_lw_}Ifntm^_qAaj1&60F^f2p$oa{2>i*Hi0=dGq5wn(+25p4ZKS`M@&0gkY9q zeZL!S^N0lhz41uR$Ka(gx1jru8vpG@>qMC;NNm0F+I=2kgMUp$ew;w<(L#Dn$?4kT z-zn~Ex0OZs2JQv7|HhF0i#Rt7Fgh4L*BmS%d^;MRFLUu>`l*)nwkyn|PxnDuY zGpO}ul99p=s+#_DQ*_U0xa_;JpA|~y!=yR5YJeW#L7)!K#S$Ke*a{m*tlh$Kx*4J} zOOjz$Gn?T@4R~cVH&YKYTU$!5FjVGmcU(u@x#WPr`pK=`D(^U?Oi#*VInO7J6-;$| z=M8R^$6tnW=Ke5VdzPGloQtjD|J#ZA)~0~NvxAfmcih(CQCX5K_F|t2_{ibWB^~-G zI4%f_nIrD>g?+q$~yTO{oSoY&%qqirY5sjACfNXvo8zz

b*{Hq>az{aEwl^vuL-ETW!AsvC0U0rmZ$_<6OOxU4rH z6u*cCCrj%CbZzu?LCU+fGt1_K0=ic0aiH8Tv(d}mzpAB)%@%6-6`Lyn zd;=aEyurlO2oEI|CPfQU*p&b-f0^7f+aySv?;0IYmhE8@&azM@s(W+tZ9c5e4!{sX zfiaPRB-x>31b(-@*OX3SfPO={;5>XR!!5^M3})_?`eW3RYLFL4dn&gCRlddq!UZp3 zAO=>+?ALa2%k{;xrD-8Kl++6_#b$F=O!pn7J1$t#B@!$C5)BfdlU5HN(yJrQ%j!dq}rfM(a$o8fv zTp~yzd4j8p6OX6R;`GAwKclI1Fc=V2d+jG|XGUoRj0-i(!+O4aQ6eJQWJVRWoER@>i z1n>F0IMxw9Tw>{kK1i)xicm&1#D5gu{o>fIFIqAt;KD8L+_HrIBbg;wPdL84HblQ)C+~sF>=()-`U*es#QL*>k3l!ZIIv@K;?LQ#Ty`{4Un&{&BHwE!4CHY8s zb9=V6y1JT|m)G4blMWT&;|nz-WC)pfM6;rDh2LXBA-h@)ttSbpP#BK2EA@3v!))k_I4*8zu-(cSeUL5Bkr5o$S)*>_9hh!9tbP63Ei-@-I*%W`U5q{cz|E*9v&Qg5%&dt z8!AqBkj06l=i^H)WY~-BjUhDa&H65JxDC=MS2hEuZ%EK2A?7il=<>wG#%6A#1e`7} zo5c3NGd9MIpFdt|q97+9fkL^>J9E;~(sFV}If_$KQp(F&<9&f+WOeBp)6DcfK(mM& ze>?Fg>6w+b&jHPHL|?21Nq6=$)R_j15nrFPD&Eh{H+?w}3v;%(RKEj$S!Mmq@NT8ky{N_Eq*cDRbAP`cP^8&pSqLb}@=d)^ojf1TGW= zuDwy*%gkJ?v!NjzhLx@$bKNNM;+5*d$YJ{zRg=C4TI5VM%m4OPr0(G0(9qICd@hXL z6;g5pN&P^^55gwB`9sbgRWKHhJBqdJyXfNrVPqfmezre`PZKHj!qWT@RALZRSPem- z5^p>^3k%wi;p4v4Z=Si@6@b5-guorPcC*_oI4o%WW^tQ)ge-JewU zoM=ex0>Ge^j5r+xnPav+yv)pKieCIzX%!U}w>)7m)}-_ukaFHzgi{wn7|G+5d1`x{ zr0MDDe;+en1TZl%d6)d(P0|sYq|2Usd$A8zE|HZaJf;mTq1MY{WzY(4=jWC?_Z)ui z@>{sQ6=vuSQ-CWdD5!~KaGABg4jtowUSD0oe{hF#+VlZ6_-BZIr3cSO`wR8M!;{5P z&L`WkhATD9p3M{ij5184NkCuY_wR=axPKtim(fa;f|ateva@{@{tu8YWZ2ay=;maF z@I8Y{2+_`OGnrC+00CB2FlwY`K?fuLs|<{990C|AWwK^5>2yB&!-s3o^|_v60eW*D zIjtPRgt>3DR2cA-q_psKZU(LJb|M7&gXFocbeWR(9K=jXv^Lt>+AmBjhJYXLEZjwN z+nG?T)i-~9YWJt}VS|-;lcg9Ywm)@XEfNnH|K<+Qy|wV4V^-iM73T;2p2BISVy&?R z+1aH=A{%|~ffA*ZDM)@p;7A)n;Gi}8<&9~TNoV)C((aRbW?*bgQDa%hL7|zz%%BzP zuZ%##mOR&z{*e+*!tvud{eI$Xe0+SIGM?-7tqIz>evMhj2x3da=m*aUR&H)TZ=~T$ zd9w*K#Vqo7GbmxG5D`KfMypV%*cRFuFp1Th#7&c_utafC6s0V@hnSEh6l=z zyt|W)LV62R*F7WzlgwF660sK-1E$zj*x_n>RjMHUIn zT8DC98iDbqIji4HQ0M-xXPE&3r(Lq+XgHPm=VWNyYhSJuNN)-bXyYYA4KX}?A-WZN zb>rEFp@zUGz?aO1V9cg zUQ<9y0QWk|6g3&X#u-3#Q6+3YbxzWQ4Ypf`Hc&YieLT8y*|r}!5T0qtpq1Oc+OD4C ziyrZ5{q-B}`y>Zb62pg))goLh&hEpb72S9zFOnb6KRi8D{r<8C1+?27}e0s3aI$1Q*m6Z`| z$9mVvQ^)P17IbJq=TXIentF%@*UeafK^jUZM zY%xzfxFw$W;#g%ScC@RLJViv{{DLp_=ziV&AZ?_bDw%!_85R^+K&$c+cC`b7;gf^! zdkf+K++yzA!7U(VlTN^zS(Dh!w$apvnxHq?H_9PLvkrmBv6taxYGR@0E&bY$Jxn`^ z=%9sJCCM_P>1ycq+bfLYH{4p6!&4Tkb*@^gP_cSk(^iEz7E+LhakO54^8zbDujJO_*>BO_v$z zj4LH+Ds`DR|4;;TMfm!oSDjZ-&`LOgx+g|WEv+_0b z`B9yb=ef5Ik~F%XVxI9y;y5N!Nll=ggAuL{8xrbj76_0td5R|R>l;2Cfzn=3!d;Tg zH0#m>fYa6$rzGjzU3HJ%hU1Bn!%yO6*k-7#&+0A@tKgsc{ck9_A!uFh7yJE-kH^XH zthZn0>y9|4+em&Hl=vTnhm_+q(-x^$JEaDWqgv>riK>>G`411%0U!7QNTNZop)^vXVh`0e*yCO;tD%bx59ow#0oplpi`6f zY~gW~ldgiAr6oZJSpww3S40G^TrRSUENHG~msvj@=;JJ-b1j`ZSji?Ee zLTl2nAyhWMHZ3iUzksZ{!}?{2zVfqu5>Y~5@~9WF(^_a;b~E&z=QvCjw|5WNejE?; z{%reMvCHi`dto8d?R}82^=I3&WL#s(LB292YEg%0BERP|DVnt?s>gftiWB$s=Nbvd z=SUbyI<#ttj~x{S{u2T!Q2RFLG*^wk1i9**7zqh7h@5Gl>9xo#|6N~n$3bb;6VgFN zUzJ8^$nWgB*Ikyg?flq8)>h{1cvk?-rpolPY!0(GM!SOzXP)Y2)R$YiF3cd026Zgt zC$-2(vYvx-rJwev4#xy9N*bz{jirO3CfZV~CgeytlXU)Ef;V1Vt6J?s=q7 z=djU<1Y;vHrhH(A>JE~{)8;^F*eLe7O7b%gL#p|Ep;tpwO&~;P>{6;M#4_XfS--bT zEYZIdr}5mCRMMM&O}DlQ6Ni^>a7I$R%&KRyH7)Pr_G{OY?5)>8+e3}RjDg&&Hhxi12ACsFKf`f z7oYv-$yT{+T$4%p1=MQOd3No39a3SnDEYVOLS7{rc{wd@iSWE>-p@m$%r#o02ziO% zxSH&+4z!y~N?Q zoNX(k9_6Cxns2M!L>{r^m_I8Tcf zb}|n5Q3iXZAGJYRPf|ojKYn^}?zDp~9{^7qdg++dm&hou72(25PwkVg{v+vvH1|(V zZTY~N<{za>gyBEcYJXDDYhUqd5=9N`TIFpYkmjC}Wv0L1QF^n3MhFlZVwMZM8>jvG zc8j=_#^DgaDeevn1;A16ClmhZUAZw+BJO|7GvJ0_=j9@hPU0u26PbDX zQwosiGyh(|WsVhbh7B7VSM#^2+bq^7=g9m=hABYDPgecGAu$(OENNcD;k)pY6W93} z{o=)u5!DB6WD$7xgnuu7CS28A_MgzhT{@DDctf8{@&8fwUBv4B?w2@UQwv)(U58s+ z>L-e-FMaZbnb5scP*iU^!5_+F|`*or!y+O+GPRrl6W9d9hq!*i@F(^6WPi+`|1oe#XHM#V8H?3VEHJp-9MA2BWM&*oupsHc!G$ zIl7T+C&KU#W><18`npZ12wP^0?we&E%f*<#SDY9E*M>ei&)MOq5yfCnX@Y`f~9Nw$HcbW(91%)zevdoylqjmA@%P2At{~42)X&EiW?+*+-4v4Ycp!@dWvw zDgBqLm`+2=~Fdb&CFawyt!AknMLEA8XK%Aupv%7KBaJv3dUU~F zDnhkLRK0fs$mi3UOTj*Aqy7ON6&X^UKXq(73j#;3Um*(6a7w=Eb&`O@KbAb|FG5l5 z@1^u!`!0@ondd;Z;zY&k>0abQ{OZ~A%;vBnGk7d#l4-^k?+a$ESC=ES;2A|LIw+sG zov|H??43x6y?*(tHMoJ0&o49co4MN#Rz_Y6vX6e z-(5!8HguEgP@N24RWa6d`e#;0fe(FSLq%Bx^b6%W3e2L;B^-)861&F4|)pMSa)fA5d%70C71M6))P z@6i^+GmhDfM`)-_;NQtQtm#tp>BTTUxJM5ND88t#K~=eB4*1M?e{-tjjU*7!-i@fI z1OK@F9K%`!K$E_XgkzH6ncXhYx+T$6AO;}gM<%e&Bg)$WXxf1q8@o*ynQp zYOfb;D0%56T7))UhDk!_V{bZDXuC|Hatfm5`Sk|heaUkQ4+GhwK!Ady$P(q>(9pkb zXk=u>^>o|eHA?VUb+Q?^S-X9b&=9V~`K1fl4;eWigu`zNPn6y5&tIe;*}Ov(ut@TM zQG6T}*(ppat0YdXMG1(TCjEyY5rKi}HS#1o;&8mf4~Y}wxFj4D1pp`?RzP^Q*`u1^ z+7CH=y)dE^(W%v>rJTa3etdjF^$1sc`vS!2@rb?P}6)hsEi*@mn$*rsZ z*JfPQrK6anuKW4(vrA?lGMd2=a|$k6h-LJAAtTMlC75cSiZj&6P7!sI%c0aCFCn&_ zaw0M}v%sR}WI`hFLD>DRdboJN4l-IZz@+>2#LXSmU)?JFXaW!aQqmm<)w~WAnW$&8 zO06QprL)YG)PHgvYRjT^Cx2nrbs!rfMl1Wtp(E@8wGf-nc%bO#NzDYpej0D?x4Vl5 z>m)%{(S?aP5eP~xF_48ah}6gc{lcH2I-?jlkMGVt+md@3Yx}V)kSbG6R&v++l4&6SN}?GZ zRdZTx5P5hf?(j7jkpe_LZf8E*#8~F#!JL+s`~$ggf;}>kXtlZ;G5`Fg%3Njq=b<&l zU>uvr=7(hE?<{hSeL-8xBbhI9>Yytcel^R1t$?rf&cX~r`L^i=o=O^IOom_Dd{qUJ zail2X-MQHDJkJBL`vp2MOTG)`p?pgp>l(&y%MI_{_LSQNCk`w3YSyDK0DjbJ@_kjH z;~sf4NKJJ26B_xmzpX--4KG(+W{l;!bSMkmne469Ugk|SEIl8^SpJSU@$a*kq(bN30OYWo%wjL35S$m2|v-vCEV z9;fQiEDv%U61L=z%FBOS5sN7^W_wf@xm}`e6~Qx(V3xYgj5KlI_KNU67`+1Y1GMbU zJCM?69K(%tleG#o%!G9113xSRR8o@6+FDyr)PDtYxdf^ND7r|>3+m2t;eOlHAbwrs zbw8)tp{`?#N>Hy>Vw#I7(z02B0x^N^}%SwYLlWp@+i|) zatj!bQnjjCdTV?pg>rnxS44WT0OEQfm>^wgO2=?0i6_}Y_~e$TTEI%|SPD1@R>bq< zg2`!ar%7_C%Kl<)qLZQSw{jsf^Jc+~V#S-%V{$ng&!1vV3q-tD?feCew$g(`diU5! zW=HzyUb}&!#!sNpq;x+tBf*c`0eX-_G$KL-Wq=;r!YaE6?I+d zJP4v zYwZ20rc)i-xvV)D@&Z76bEK3vf9%rU&<65}(H%_@)zH<-)#%-2Dn595eqv$TAnBkM z^?WTC_!ofY$wQ{V_+6uP$VBR2BBM`Sg;4ED@K2Etf1mSu{D-HS(;C?LO{??a92!_V z^fEQ&{q6@vX}el^h4NG4kf=6kDcv)sPjqb5d5K=xaT0#lv=c#($wC>ad^E@~Mm@Gg z*#Dobs?9oLE`6UK;Mo!1-gn+^VhULP-@Na-+)QVz4a;9j(o@sY!kuLImJ|A44rgn` zIu*tSH};M!Wk)1otS&LGhX;^ipQFMHRsVIpn(Nv(l(f-B@=!QB7Ef`Ftz2%qq+RF( z?YerZIHz%hxh0M{&AUy=+ar(f#0%E`)axho@(%&|?>IU;FY$xcE`Odt=|Q8V#;qCF^rc~|!; z$03QC1fa!sN2;rnk3x_e@|#I(4czUYba3DZ(ZWR-w~yzV%!jnS zI=|~Z2r@9^K6cOV9SQpw{wF37N%lAT=kQO~$#=au6eB#p?KtpWTcCmd!iM34ElUM` zT%NWTfA6=o639Q!#xt%X%4G|l@4IemhMEa^QItX%L_qY$Ma<64;qt#U2v=WU9$EjA z9y*uPxeXDO#I3#AAZpSPWa_)nnPJ12t%$=BrW6EHBz%c#iB`PC#H;UpUmx|IqEa}R z7rrqO^$_=QQo2H5f_61(&4qg)TW+IrnFGPhZ{~x4OK4gEcN(vBVXo1T=ck8fCUp6g%z?@m#P*^>KMXZd7=E>JyreSU^X zm&dee)0)ofFnxNm&F9I#?R$;Wa-}&AZ>SGmSY7Xaa?bw=^DkUntS0V%@Z1ESqs_;8 zB%Kt!IXVxTp73@|usNa7{M#zezDcJxbX|GuADdvInV)}7{OoD<@}Bi=_79OW>$m?g z`@eA2+2Y#lhUZV79p>-|Dl~EXVb}it@|S0quN(C$Xw0z|zW3|pzC7=e*S+%Ht15O} z@PGbzli_EFo6LXr8F5J6UioSE_0JD~Z_+CHU^$2X`F*{(@yrtv}nS5&!vBR_42NAzV-A%y8Q(dUenH`-^_q-7HNxz5MeY)8FOa zr*FOXrR@Ixx_i}SR>BV~r~Y)K1J1J{=R9QKWpYm{_9*M9=UPf z#iNJjFFzo6UDuJ@{{7O=r)w`>J9m0ZzP{XlyB||7|J(VuaDhQ-V3hUo-W%6agW2V_ zWJdk46ezQe-oO8ud;g7vb(?Rxn7;qL{e#Wx+}mI`=qZ5}=ncW#H{L8@?Uj_5D>q|nKA5%jk{U=wX7;6)2FF{BBqTqj@vwYd~H*~w%fp*k;|sH>+@1|w*I$?a-V1P zd+48%X}y>+<)EMxODP*S$b{vqQy*sV>MiBYSk->?!91O=^h+_kcTZn8ykpo_c_!IT zX|G7RN!Ox^y1HxWv*+ip+-container1 -vlan10192.168.1.2/24eth0 -802.1qtrunkNetworkRouter (gateway)vlan10 -192.168.1.1/24vlan20172.16.1.1/24vlan3010.1.1.1/16eth0.10eth0.20container2 -vlan20172.16.1.2/24container3 -vlan3010.1.1.2/16eth0.30DockerHost \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.gliffy b/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.gliffy new file mode 100644 index 0000000..4d9f276 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":566,"height":581,"nodeIndex":500,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":-3,"y":-1.0100878848684474},"max":{"x":566,"y":581}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":-5.0,"y":-1.0100878848684474,"rotation":0.0,"id":499,"width":569.0,"height":582.0100878848684,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":103,"lockAspectRatio":false,"lockShape":false,"children":[{"x":374.0,"y":44.510087884868476,"rotation":0.0,"id":497,"width":145.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network & other

Docker Hosts

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.40277777777783,"y":108.18042331083174,"rotation":0.0,"id":492,"width":121.19444444444446,"height":256.03113588084784,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-126.13675213675185,"y":31.971494223140525,"rotation":180.0,"id":453,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.4915197649562,-156.36606993796556],[-121.49151976495622,-99.52846483047983],[-229.68596420939843,-99.52846483047591],[-229.68596420939843,-34.22088765589871]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.82598824786317,"y":137.23816896148608,"rotation":180.0,"id":454,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.05455395299924,191.93174068122784],[291.05455395299924,106.06051735724502],[186.27677617521402,106.06051735724502],[186.27677617521402,69.78655839914467]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":332.0100878848684,"rotation":0.0,"id":490,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":97,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":9.5,"rotation":0.0,"id":365,"width":141.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Parent: eth0.30

VLAN: 30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":342,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":332.0100878848684,"rotation":0.0,"id":489,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":92,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":10.5,"rotation":0.0,"id":367,"width":138.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.10

VLAN ID: 10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":340,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":91,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.40277777777794,"y":126.43727235088903,"rotation":0.0,"id":486,"width":121.19444444444446,"height":250.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":88,"lockAspectRatio":false,"lockShape":false,"children":[{"x":236.18596420940128,"y":158.89044937932732,"rotation":0.0,"id":449,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.49151976495682,-152.05853787273531],[-121.49151976495682,-81.64750068755309],[-229.68596420940125,-81.64750068755139],[-229.68596420940125,-33.27817949077674]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-179.77677617521388,"y":56.523633779319084,"rotation":0.0,"id":450,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.0545539529992,186.6444547140887],[291.0545539529992,117.79470574474337],[186.276776175214,117.79470574474337],[186.276776175214,67.8640963321146]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":447.0,"y":150.01008788486848,"rotation":0.0,"id":472,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":87,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":473,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":474,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":475,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":101.71008483311067,"rotation":0.0,"id":477,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.30.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":350.51767083236393,"y":87.47159983339776,"rotation":0.0,"id":478,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#cc0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":94.0,"y":155.01008788486848,"rotation":0.0,"id":463,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":78,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":464,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":465,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":466,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":80.0,"y":109.71008483311067,"rotation":0.0,"id":468,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.10.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.51767083236396,"y":95.47159983339776,"rotation":0.0,"id":469,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":70,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#38761d","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":341.0,"y":40.010087884868476,"rotation":0.0,"id":460,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":417,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":418,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":419,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":198.51767083236396,"y":41.471599833397754,"rotation":0.0,"id":459,"width":175.20345848455912,"height":79.73848499971291,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":17.482329167636067,"y":14.23848499971291,"rotation":0.0,"id":458,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.20.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":330,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ff9900","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":279.0,"y":129.01008788486848,"rotation":0.0,"id":440,"width":5.0,"height":227.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#ff9900","fillColor":"#ff9900","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.000000000000057,-25.08952732449731],[4.000000000000114,176.01117206537933]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":56.0,"y":503.0913886978766,"rotation":0.0,"id":386,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Frontend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":420.0100878848684,"rotation":0.0,"id":381,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":41,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":382,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":383,"width":98.00597014925374,"height":44.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.10.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":384,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":385,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":382.0,"y":420.0100878848684,"rotation":0.0,"id":376,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":31,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":377,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":378,"width":98.00597014925374,"height":44.0,"uid":null,"order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.30.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":379,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":380,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":214.0,"y":503.0100878848685,"rotation":0.0,"id":374,"width":135.0,"height":20.162601626016258,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Backend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":376.0,"y":502.0100878848684,"rotation":0.0,"id":373,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Credit Cards

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":627.0,"y":99.94304076572786,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":25,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":363,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":342,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-183.0,310.0670471191406],[-183.0,292.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":410.0100878848684,"rotation":0.0,"id":363,"width":144.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":341.5100878848684,"rotation":0.0,"id":366,"width":132.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.20

VLAN ID: 20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":297.0,"y":89.94304076572786,"rotation":0.0,"id":356,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":343,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.0,320.0670471191406],[-13.0,302.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":222.0,"y":420.0100878848684,"rotation":0.0,"id":348,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":349,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":350,"width":98.00597014925374,"height":44.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":351,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":352,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":410.0100878848684,"rotation":0.0,"id":353,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":332.0100878848684,"rotation":0.0,"id":343,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":203.0,"y":307.5100878848684,"rotation":0.0,"id":333,"width":160.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 Interface

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":303.0,"y":240.51008788486845,"rotation":0.0,"id":323,"width":261.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

802.1Q Trunk - can be a single Ethernet link or Multiple Bonded Ethernet links

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.0,"y":291.0100878848684,"rotation":0.0,"id":290,"width":497.0,"height":80.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":543.5100878848684,"rotation":0.0,"id":282,"width":569.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host: Frontend, Backend & Credit Card App Tiers are Isolated but can still communicate inside parent interface or any other Docker hosts using the VLAN ID

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-33.0,"y":79.94304076572786,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":345,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":340,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[157.0,330.0670471191406],[157.0,312.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":410.0100878848684,"rotation":0.0,"id":345,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":323.0100878848684,"rotation":0.0,"id":276,"width":531.0,"height":259.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":19.609892022503004,"y":20.27621073737908,"rotation":355.62347411485274,"id":246,"width":540.0106597126834,"height":225.00000000000003,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":99.94304076572786,"rotation":0.0,"id":394,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.5670471191406],[261.0,108.05111187584177]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.0,"y":90.94304076572786,"rotation":0.0,"id":481,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.56704711914062],[261.0,108.05111187584174]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":104}],"shapeStyles":{},"lineStyles":{"global":{"fill":"#999999","stroke":"#38761d","strokeWidth":3,"dashStyle":"1.0,1.0","orthoMode":2}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"14px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117295143,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.png b/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.png new file mode 100644 index 0000000000000000000000000000000000000000..32d95f600e1d0f028e5a354584d7b3eac1639e35 GIT binary patch literal 38837 zcmV(?K-a&CP)`~Uy{|NsBV<^9ae%>V!XU&~tip+HNj^66}A$mRQ)nVITrYtHNc(eD3Esqa*> z`kbAd%gf76to6~-(lK|cWX@pB=>GHa@o3Xzg_^4Ws!jc*N5S0jld8K^uJzT`)$ek1 zjEs!W&(7fC;Fgw^`k+76^8b5_oPvUbLqS5dw6u?pkKFqIl9H1A{QJh>@4UUdh>3~o zZf_bI8wCUeRju;(_xAs-RR8+*@o{lbulell>t$t)3OtT%C;^muqGbEMhb*uuoa zthc;2H8U@1m?bADOrYYNL_*Bw_Ge{V*N8X=7YhCG-9?x^NsPS9$HfQ|DjO?231S5M zlL5N}0aL3>H+?Qtsp(QsPhQMaGY|=~Zwi2)wv~;4X@!Z;naVZ8!1Fgf;UAc;zXuVure|>tOWE~Z8pARn$o3h1ObAbNj!%u^* z_5J^CTT#Z>-^Nl<{-!W<+HSAO-u~F8IZIryPE6BPOvND_pf4@y_4@7P*??SStj416 zXk>tnp?7p^5>${_Fg7T04AS7|-0SlB;o#|%K{jY`-gbA9fN4Oe^_8!W{kmp&I5V+7}GvKNhH!eJfrZ;)8=# zpwqakn3Tfhgs;~cM<7#nn}SJ8E;MGj>e#%;Zg8JfSz4 zvp_CT_#;99000DZQchF9>dx@GGiCe$0F;4AL_t(|USeQi8bn|eFfcF!*t2>a$5|-4 zqi+-{DqATiEeu5j0zpGVL+6shegeP$D|}pn2ahA~{O#-}nH(wBHO4^h(YZg#^A+KN z|9(9^qXjyfdTZ6k`BKNgy1XfuI#}PWcKuvVT6W&stlrk;s=3{#zPlN!uhga2huH31 zr`8S1DKDn%eDlluV%M`h{dz*^`D*Y05VF|C*xY@XF0?m}R`!QiLc3?IW1l)P+Mkbp zJBq5g|83EGj)%%QDk1j+RInXOmzz<@tvi4Bp-w>P4brX&x2%xY?$pi^;{7l%Hg?BD zJ55Fi5fg+k(=;FYt{WK}yeAHAS9uWfbzKxoYd3T=I6sC4b7@Z&KojH&aBl%8i9-}- z?#F>Rt(WBXR7!D<-vZ}KCSI)MO^Bvvo}kYOP2n|+wT0-Tfup(p2GARf>`j9s%Aww&IJlPEu;7F=CZlaivP(gV@h;xvwx6ru^ zBi^&_U_fL%zq?{Zo@g+g&zw9$)^C)i6B$@iaQqn!=86J%h^FnBcY0rpJJ~^##7rU? z7Lv=t9CnGVe@zLhxET65jbUYDjyRLi6sJMZk+@VyPOp6Q@%}}Vp)&zXOA9BNYAhHk zPJdsifqsfY+%N|TiD4C?WD`w0`Wlr`!dH43t^GJfZmHyz6Tx(urgNIZIt%KOUYC`PTk=3+Npo2+fSsJMp3%#W~E`ov*Wx}10c*&ublvv zD(EoR5fj}jA^9-=5MTTQ=@$bRPJd^w>3lD^m1R$=xB^95O1JfLE$*m=hd7#awnFfI zD#V+HQuQK@#*E(m9aYYO(j{^!UfN2$gQhf?l2RVc(U;ow-jcc=gjz}}MQNDWSs4iZ z4QaYAlx(tTHvtlGHaP&VI~l3qsOa7lf(Ee)RZ5I*lP$iT6L-Wka{_sTCNl%-ybxkS z{t@#Gc&aLK(%N8{yDNnZLN0Vz8e5P4MVCgHiRyGxA)0AEbR!!|!|8N_D?jwJu5Tw3 zv9+~D;>yZq8tz6Ry2T}RC@Mi5{|t`x9rOAf6MZ*DzETR8mK$;(g!pI|I=C40hySKR ze=5<`MRq%#)fm!_O{IO!P!_lD_6gA)x8?gNGkhtFBlKPW4et}=M9$5q~;`woA^t;vy%i+#cf9f8d^Z zV~m=iGPKnzl@IPxpFdq)BZwWkg2s*;a^6+|hOh!Q3PzYLXneSv>ksi2fn>g0Y58;cav6&vtV5QuTJq=C zm1`|!Ie<(`5OwqQ=`#o6@>%<_hB!pK!*EQz0`XHN1fd_*ht{k7BmmmSpz(ib^o7go zvDk1~Mz#4W4|l7#>`DgLB;>vop=0fFoTu5Qd+vVz2o=M3%K1b(Ft6j^Pl02_e zl{{XD%7=@n;Xj!N;e0IiK{D(!jjy)Rf4VBDuw4Ei7W+k#G9GTKLCEPlP#(xND$UOIFwfcCXm6$eA6lTk6? zgGW%3QCeQ%P;l6+V8SQoKg7*AoY$P$a9HtgvB@6pmUVF)iW8rrDph5@wFx{db^Oj>2+;FE)9P6>}*CMg!%4=uCb!zbdotnVpiV-AAKL4T%m(quY4lYl}fqj z2MGl5-=OqBRY*~y>1&w@%HY6K^{g9{oQ;E)2eSb|VAO+d9(K znF`IM3iN7;?&#}(NjHAfHg&sFm`Ne$)6im&zB&c~;R)vU7ZJP{zPm)O(CJ(%G|@`_ zYbg-lD8(BKw=a@_!JTqz%I8m>5Tr`O>har@O6%@vI;zdK2%d;FL}KuoSH)^6Q{Cwz zL-j6;_Y`3)_Q3L3mo>wUjwvgG(9OdT(niiOZK0W{lV05nlJh~)DN5H%*s^Si;B15< zb9*}hfEJ^Zx!~<505ZJ{Z|IME+o*|tSZNK!C6T^fkj{e&tM03~0G+r(C@$o(xb?gN z9;Wc97`{T7&#BxS%6_vYVn(y4fATDF0B?+~=n+AAl-Crqe~{ z%h1errt?WDcztsr+A$OTr_5A5ajcMwusFR&?*O2!=%Ym@`UoH#U-HaG6TPjIaZHGa z6;kMYSdgCRjpDMY0pcm{q!q$N1cx$CSu|xRGV#0YBu`L>?AU16ZaLOnhAI#o7)=aj z*ny!Q*IH?{Ckx#8?cCfif43i(22Da1Q|Z9$bb3Bcw|wt+R8#?q{fS=7nJA%xwpdbZ}wzdF4J{#GU!eEC=lwfh8mJTOLdHt~K- zHWP0m{A=mbCxP~KAzShbll6&=hXSgXxtf`{H@Xc->AY(IzQ1oT#&b)((IJ3&iN;%? zd_HdHHy8Hd(uFeyA;%AoI%c{3ETEH72v_g-x*g{95SNuZ(jpk72qal`BHVKY+3UFC zr=8n{(Y2Av$e)UC@rkYcOL?J z*((j{Z@c{L^uaI)LAwnP0#sFp($DS>gN=~hjOmRhtX|cKIJgraL`@Jl%=Ew8A0gSJ zJI``ViTrjG(m&)|9KBg^^eu5Nd%Q8%*3er_`9Dg}J4VL69b2yeW96zs$Wj6Te3%db zR1_cn;f{QF5&$w9AqcqfTZ0GT&S1q<2m*p6Nw%u3d0sbLhVFSDfFmjMRE=Ip+6>1R zz3XMs2vEqFFoR8q#X}P6=_4#hK$;2JLx`_uhawioSFZnOgZI~uPEM^6v(71TRqp91 z-rE$frUB#MUv4hET3Mnv76RfL#iJ>@LjC^NCHhK$gH1yrl+|t@|I4+5aA_AI^O7Wq zkH7@#9Y{bGIx+WKVj4^YyiX4JTHfI>2zlqr5eSz-hubDJ7%MA|1fNoMidEZm1SRJ{ zPF$Pa`J=?;#``XQbuY2iPdKk6#Wn2vi(hCB^qlRbrC)8vCn4bMcr%@yprvU+qASE~ z8*GYZTlwzIJv$I{XH7!rN1g>7OV|ZZ8c;#-P`QDGryUAHM;aA8E==z?&jD6~X(}R2 zCLrJvCfS$FI5#Eio!x*5IU$8mR*P3xhg;C+P90&vj#+gY9?r{q?#g&vS;z?W)iMR!cGlMMK%z#b1VvDG{q_PqdDP0Ka#v|GIS{o}hO^0wJtA*LL?QxW_ssJ;I+wf z`Air1{-P!F#py`Xm=^(ZM(w!#Z^5U_#LgA==@nyFINs3sqCuwj7k+88Xw)l+k89MS zyhb6ahh8S9z_G}kt6x3f=>ve+ck>4!AX!ox2KU7d9;mP=vT~9uJS1sI@{4NgG6;Uz z>S{vAoM+B#B0voGfpBC14_dhrCSH6Od)fk0SHimvtF0&bZKx`|K`eq%SA1l* zm(nS;S@qBn;CQ4T;X%K{3h}DF(zGD+4gyA)A5}-JEG?d~>li9VJ`{5Nt!d;KdirW4FckZTRs>=Hm0@mtB#lrUBEul8{DW9ntGP zDs{q|a@M0l4vtBy0HC@D{ zf$-UEBUHqIbs(hnf{5Civf7${gufuC%paVxAtKV+2qejwkeYh=$D~fo2nZ}%tRBZD z559WEmwai?b<<&b;c)bE5xcb-fS|+S9(!R;s6)+xq}$L!yO49rMH~)!+U_{LHEMUK zFZLuIo=r{Tmcu@9Dd-4Q#I1Ix-ti;enAp{=X}9`$KI8J>s|OI22!-ir{3swvqW@C_ zI!My^H9-hN2=R|#@k<8Z69gl?k_6_+#dxrUv|f?+k;Ky3**J^e6tYYoCtP=)=hi$x&zZ z?mcCw=lh#6tNmuuI-)rX(4F3s8vU#~wVJ=kKl$oYc<9>hqYo3g#R{#3B!yA%_yK{H zqWP#%$W-9C1me}azw|ig^tcu2Pj)wSn_%|>06TpkQFW>>q2tm$lTl~%MCA7XCjJ4) zL=^DnQYP38;7vh)>tZB<~al}qRiJy>Jv z>X@LX&d+81F;87JQ4v4~Gj(-r$`cJIHDvshvoYzBd1O6XI$GoBJ$16wG>zT_O@=9t z#}W_oHD0)PyK}!mE9J)%ov0vEnS$1+B#spjwkC^*js(Xl@YjNqRa)Fel0sWaAXd7P z40=W3BM=ZwZV6$$_&gp)0*V3zA#|n0V<od@6h(0fX;{e?9Ujm;lYfGUv#SqC z01wv$6H?sx+}zqClDWC@agKtAz9kq{rA3Hv=OEd)PT|O076j&84h02{U-(-^%n1ZR zm2#xatavm4#trFC3{TMK2fW=6;`QE`Cuh00E)<5-q|ZZe+~E&#S8>6$rMq|kbzxy} zup=jfgM$kT|9W>~ZJZSEry=et6L*1f;lN)z>f|rA&c8`PJ21clz&0 zi~7!h(V&_3(s3h>l?P3&1f?;K1vMtQuGLFxHW&cqFjn zRAro|D=9|4aalIn;mCn^MG=fpf*JRCg+C^E~a7 z`LTNlOK*_Hg^rGerSVd{^7;R@2@J~JHS%!XmsQ3rO|Cdaejp$!MU5D)f3L(^2uV_? z@`mqpQt)lNH+i=@SqHvv^+c7%vw)Vzr@nqs8}}p{`Ay2wGP%)a3Re{%MMuJhQGBZ$%XcR;LO^~XMUw}0$QDtU2-j%mR-pT=zEm)+{O+UA`wuVhM zO@q0Gj=`ll@w^1WKAoB2_~KoJ+}hlnFqfjqcXiby_A2)*9|(v>FB6Dlc$8dUl2DmZ zN1fsO^E3ba@83N8=Gq_U=kE{e+A7Tq-0#k$k!h!;(5+_^{8Qg+x6E+~X-JnZ>e`Gn zgj)Jnsl-Uat+*M$mIEcFZCj(!4eMFJjmm1noG+UBxqr$y+UL7D>gKI=qkX%}aLDPq z;FykI`K{z=OLQh2pIiLfpdi>Th;qK;?p*mYYw$q zml=a*B8H;hgf6@X{ioSj2syquaXxz~?@p{Qu1_q8(43z;!a^{XY`gy1|D7M6RhpSxR|-OH(F~>)#l>-M0$sj^yG!FVO_SmhM3U$^wYUThFQO{tCXcV*9hA&ZiBKk~ zOyPY2fuK!*Rnc}pANArFvop5thcmP9V1(J(`}Swu4G-s4d2_`|gsV^Zy?2sU4Jii= z7Vib|8S$~;^5hQX{*z{6+%b#AJsRv-Sf8UmtT|o5rd1Sf4FNs4c))6tWjqvS0N+ct>0r z*A_a?uaAQqZb*)&M~7(ZA9Zv*BKKcCv{Fgsdjdik1wfD?g2knklzDd8D66jveKr%S z)5?sd+2QNm*JsNF$#NH3b^lrKGmmHHvl&ml%s4!YqXs;N~z zM-}1GA&_bpvbdf^8b|`Lj_=^I?T_IHXy@W!$NIh}7%@_!(2?&Ah^z<@{K(wDZtL!T z=oucKHJe$6q8QeU)Mfbo%rH2L5DbAEf2#0Y&%^Fs>mO$U?7#ErRk2aVEhMkR=lw_j z@to3RysV8{?C4m61?M7e_w8(f#X;F#b*k5r zKk|(3&__>C9qAUC0D_u;}crW0v1pNWW21LB?uw@ zLOah~h}}@za6L}Yhf5uJ^}ndK=>}^TF2J?fiU5J!V8_zFcTlC6K&NGnno26UYGQ(s zTrx~Ez1{O>2+G!FmZ#r~=TfsX-QD-eoUg#~JV!$)+G4~%!hQT9c+iI-Hs-3JVPd`e zE7So+9KMJ*8ZeKn-@11R5OOx6zR`-3usmvqdFoQ4iy1oMQxEvGWMmnDCW0$1wDWA5l*{xgACOWJO=7HbH((6Jx|5lVA@u}yxzR8lfqd9t_Y4L|Jt=h*`z z|L{O({h*<+z}S=XKOsa)=HtsNn)@Ok@j1y+-h-CpQSSED-(fk4`qZEvLEN78UhlZe z6&S?rh&LFDR%*m>O}GQ-WNJS^?gIFZ6-No+k(Vr(uVdLo56+`onEC)$r>x9AlMpEkES*F*`bpeM zpv>xptk5mNUhLZkz7&wHflFN6XWZF{w3sVW?o-Cb*yyHH-Ln^ z(9B3PivD2sO-Mjw2T>xJ1kOLpJ}riKxV?K8>?f$<#;3SphG1Aw3ly=@11AYD^fY^L z5)C;i+KbZg?fBZ=Vs3<7S)JY<(2tJ!mSa=KfiY(b^&iXI4WFc&Rin?hF5bq|QQ_y- z4Qqoxb1ndOFUr#f-fDds3fXL3LG_OyQ3wbG;|s{B)1XwKNDazxF#M<*0tuDHSf7nQ z%EYu`^RogxAYfLkcI#~b(a98gkUoXni>MLK?@sT2`9&ub3}O@?On&)iZn9xD0I)&q zD~^ZFch3}qBvG^dPY^2DFzm_ItVjn?8#8)8b)@Ik7XSeyeDJuszi0m+d72-Y&0=OFGggn7F=%-7upZOcV~tO>N%;`n}m zCN7WEYr8DaFQe)wox)VUSRt3ZuOg9HW>O#<|Z>64|W9s#{lRZS{S6jD}`npzG zfU5`qm^iZqbgpLTJ`T#*`t}!R5bbJ59#X6MtanDd-q?wfQ+RF#{Tld z8(SCvP7{jan7QTYkN^8?=4mH@!ML>qkXZqca+x4Jf}>yLXhB}6M76ii763v= z`asGNjnza3#!~)KgLBmHtZ-;OOi~fr)K%>W7yI=p2ILEY0FOig5_ti0^c{BvMvYz z-ni@LM!yw6$b2{U_zy3>3ny7Xx}vlt47e|!zxmd5qc3Oyysa1z^EIS~Sjq$PYaSd{ z#s>xG>R_%aovRORQ@x{SOl(>hoWc*~g}RGBv_RMjX&-VZKq#fuTvJh_QL<$2g!w*V zH;}z8T(ewm=VQalF#|HOAD6);>Z;kAx~c$|LAos@dHcDi(V>vO2gE!h z0bZ*J13KergegxoP%7$rBCCOzUSHWZmaIo)O3ifGOLNTt%I=QEr2X%+$8Cu77!9tsdbSD&jt*Xmd@ zf47F{5{v3cOchLwc)@!KsnT8VF1Kz$8VNisAdBabf|N*5W?XeDKw6jR1Q9>rCQpNb z#*}}^-yb<)+ZejpW&lX3-vY+j2{NB2T0vSfO~z#r0|KOnPn&OyIH%CAa;kYF3}9jt zZ1sNa-7Ns*Z$+#xqZL!CP8^DUR~hv=I5bTDnc4R4d8Rn^62gL<&!QL)6gMUL@w>tY z@o(=)p`(f&?Diuu$K?UxN;M?W#0)uW!HzDmOu77ko^0f-t(iu{*p}H@#CCLK=e7yxhF5qCH}(2M{H`)Y?aFd!iA&^t0d#Q>YefHY69)JCjz z7?8-=PSoQJUD|R~7=8v|%U3&zEXejUcD1CHvS7(#D+K$jw;Wif4kS5|D*=%e*a--T zYx`}TX_W3ksN?}an-jI@G^bR7U3W%nW-&qm$e2}lkP5)q7$8}>D(Jfnn6_}Rm1Kn+ zky8w8@ilflaA!c z{^0|d&;A#-xEo)?#M$3yX+FY3FwvODDytz(O}&UmWl4?;HG_78C4#HW>e82jMx`6< zR#4#02uRkNTarfacJ+$E81e5QHQrhUWZdB8&S{+9U85Tr7$em z=LNxOFHp*XYHwp0WAle+$!sy~(sBOoA}KjKsw^Ig((o;wAZbW`!ccv&G93s=e?w1V z3e|{v%uhRgH@#rj+WJ_JmL%k}e7@6c8Ddv((BNsOJ>+^G+H70#Zfheg+usF8U4P!H z3uKnJ2ikT*BUE}Lg;+=WadUbq)W=)L3RcSN@6wQd^wS|vIB;m8UUm#3E{n_S1{vB& zwBV9BmKEt#3Xm!m;HI1`fu7=}d&l4f>ClTtJbdR2N?vWa4PCV$^OjI~Ndi*d^ag`N z39J_oDw2+Lf~;T$enPrKD7h)yg8W@=`b9vb5~&)A@+w-gO0Nr@(i%;sFs}2CvkeudabsMCH#XP%yghe~sF-GH!RmL}1)Vf#G6SPpV9x|ghDo??fLMwC{ zbLyv)e$Eug%WIiA*r;Qnd7kGWIJ7`&QOv>Jv$CAk?X;=?51COnRRh3B9OuT;YPAvB zJ-gK%NUJf25M2E&AG-bjdN3vGaaMukja6@?icFh4Q_D#imHBzs=0Ibu>e`svQOo?t zfH#jDcu518`!Ug6M=QONIYiN?`QXJ3;#>9FexQ=D@JU`i7TkMI7Vc0WnKwUgsB~g) zqTotEaEyZUa8be(l)97Y(wn?G=Bvusevj;3%PCf)jCdkHZm$OGTW?H9ngakj8snpZ zKVd*Jj@W7-ZrXX9HWJZG+nmpM=0&gcBl6qPDjoTY3{xL2ep8O4HwpkUa(!HVMa1u%gcl%hzO;S%?Ao*L>%-K$eM%gPI%*9O zSj$T}0j{XCnzQ;{ElGO-Y#EqU!(UE^)SgRk{=n5%kNPv1`r)2hFVTPB)J>BnLJuhzaL$2kZdo0~7VZYSzk`sCe$Z}k*@f352DqIK% zu~WICG$&i)F`mt`WPYtHGacK6(!xXHlcewx!tB$S<22wR5pFqx`Hz90x1 zwSEL^Yfx5iG}WD^jrI6n=;{PX6hXvaF@ED_fOcIOo16T7f7C3~vTf*}z}xkT-}1(q zk-(OABe30YTH2(xKQkXQ{Vq8_dG0TS`SE{JvWL=9QD9+2Qo02t>nA>uMlE#+?t@en zVm@5269`CC83n;I``N>vdV7bZ&Qb%KhMP|}l2((o2f!E2&W)rY<48AoE*c!4<7&;{ z0+2Cy)5~`dAb{REJJd)e9LurEPhM+XBcW+$53lCbH+}!;w?)%=q_B{UUE(Rp{<;@#kGqUiI!4(Mk(`bjDILi<5 zbzwdf?*j1^hs+mcy9$C-^JV)k-+G@V3qT<^eee8sA$7YR4p4Os6_LL zRMQFB>HdrAV3bWNeSb*z^t*Y3Kj}!0(D4k=NA1<7W-A5uevd~8`=*)}Hdes@kqu$2yNf}N3=qr!0(N7@u!>8}&k zP$vQ;xw-4s4}@__pt=X_S`DEMM-Ko#?fugr;ZAi1AJ?w*C7M^%oX_BUz7?f1T34;T zg;O`?*^qWQ_=p09dbaaRi*xjzaUkLNOH9#!_^;d0YpBY7iI&15S(_j`q)``H6MY*5 zoCOR6f*QQ?(``D6Y{OJG$x;*xVw$iR`J(`3K5@dV5?Up)6rj-%Kwwv; z?Gy`DZ6~}O3keES-B3QZx2>G-zJ`h3k0~gF5pjPVFCR@zEUmAtElo^Jpw|=d9Un`X zW5InT#z+-ksFd^(E;G#Um3l$|qz3naFAqqrL`o|M@V}Y69?wRyI1b@~MW@@A0eX^z zlE_G%%#De)Bgx)e2wgX+UO9J{BhF^AdLfn^t~R%VJDlFpVJW2=iUu-V=(!vrAt_qe zN}zRE!2JXJUS=kn?R#&2bjI3GTZHK}olm~s&wJnZ{b8q8a;r8}IbRDiNq5rsJpjK7 z;Gq+GaM2$8cyP2g*T-bBV$W4TwZ59KsUS|@bjfVNTrPm0$|^K zW>%5oAWi}JW)0P>d1;;m%%Azo-XE%?l~82BUetd>qV5eqIMiGHpvr%NAH#ahiCJlm z6Oub7LhSC3U=x(S-UX#U>IkT=F3OpGQP=~c1=OHi4 zi+LVGZAl-E-|;+OeHhA#OAsRZMwq=?_&1$S{{e9|nsf-Q@If?Y*0BLQLf%z%E;}Lw z@M@)AKkKO}z^*+b9!XEE$q8uFn8#vLqnpT<**u42UzeNZRvHbP^{{9=( zriMr~FjE_%4hR7OAjK9jd$IzjL@KZx@JVm0OGbN5a40jkSB^k|A%O!`e_ZL%y!yK`Y3vk=>T2=$?flmB-c~?nPq5%tTM%yjfVwV5>g_FYY6>-s^Dm+jyJxNA5#cf z`0sJ`x#S=$A@^$a;c$3StJP4s`_rbvdwtU03Bl`1QL#K)jJ-LbT8JnXi(>mHT1MJN z%-HHVA|z}a7lvKF|1gb^a$|ux*#zW}!(8jm-bKBFh8V}1O;uB5mUG1h8UP{yUkN$t z_xl4tZZn5z2u4UIcS?Etd?b$TL#Tv+0B>Z|MFYSuLdwj8ott|Fjw4wK>S)Gl%_rYL zyHu}`6X-j)kToF;g^<~l$++xfrw+`*=SVz79eL!Qkn?J{{YC55 z?s~JSs!$`8DMA|R(Ddr_%31Bx;de+c=I3`3HNtZm5FK`HV)11!LmuHePYB+m1Fd?^ zSQZG_k35?BtYCt0{hbLZX39@d7n2%>Ivl!j)>6Zsl-VTYLu$WsZw`0i9ZzZFqwB`#1K`F&wqRV1As{&;1RxOC%s#-lxBvSP z3vf5Dp=OYfrCNw$oXCFSVt$AmF*2NfsY>eKEw*#qZjZW%x1I2 z>@8f<2w|n5;eK2YVmhsOJnW|MObJWXa{{)PL;@kQs;V0A-vbml!DkD(LN3etp7OGC zZ{<8oK$${F!aPxTH9v$)=*4WYh#YW(#7TxBKqOg+Rw|YLl6`xA-=bzGg8!m!d)Y7T zrDG!6q-Cn9&k4@m$Rl1dvWFre-bFO&=EZ|;63WE}PRPMQ2}=_G=$5_qMz`^Mb%ltB zI3cqHheg)y$fML^a5hmD38u5eq{7QbJH)}*m%4?#W$*e~BS*4$B3zM@tEK~mLQ(~d zgoKpT8I%;efjBTIkn$TEe4Mkva~o~6-{6>;J8*Cad<@KGPR<7t3|wFsv*Qo2c(M)e z7?=-m@2RR(wB_ryT4n6r^)GO>)uo2+pI*H`Rqwr<64E^YA-0k2-*IEk+-IlTu} z2{NKHqNl_e+uwcto3BTJYyZAMNON&^cHvnj#Cqt&s%PWX%6wgr>4YF7Otn;C^x^NiTkA^)(eL$ddv)Gr--;B{frJf;(Z9*jO1+?W3m zg0f(>`mWtdNVy1a$0EA_m4n=aA7YaaS6RF}_A$(S5hSiD%9Nyxxr{}R+n=z%A7Y6Q z)t@53Dgjd^#H7LrnR{iB((UrL87|Ou(8+j`2SMMEn;T|?P7LgYPKc9fLbms2F;S7N zC~L)sC`b2CX$rl-QnS}H7iRM4zPU4;qvm-l6*uo(0t787VR~Q zymqlKq9i1@M2LMq1W%njPVl`XQ8}KTjrQH7um7#i2-_p%k@_L_QPn|ea&G4nqAo5A zt4mgRbaeK%v`NUHEE9s;^qDgoWt^zcV3B*yTtdcM!Is(=5-!g%5GyeZ+*ALpz>;}T9YJ@u?y3tb^GV6Ep^Cd7%-RC}6sK%hqQ z#}miN-X0wvVe0z{jF@q9etdj{ajxj>`1p^N<9HWG$Ilu$@mNhfKIxsA=HAXRp28L` z7GKygaVl%-B+zsuZRt4H2_dbDjm6v=AKf7ma&dTwqw^L<%sn`P@fi>jq9qoAUOa{i zKmN;jvP&Z$uZdF~;eZ!$gl|}v5Jbfi zV^7ZqiB8U5M7I!f-f2R*?g$BacGz{HB&0(^`iTU)Wpqf$rXNDyy?>3>0HCc6Xl3~S zJ&f;xkWZkcKL7#OFk+4AcV7Xn`zi~4t+z~1yn^d6k{yCBS?Skq5DabXX=`-3eL|c@ zn{dKX?AGf`bTlXD{Yg0-8v+Etk;X7X6(RTC`8m8%!nX}!tkew+jCuDh=#))D$feT8 z2kIOTc_V*c=Mu(OJs|)fK3onVZ2vW^P zkk*Yi>&xAR5GPcX(9%Lg#EN-o2m8MVsD5}?C>dCz;mws&4Odb%z<8;22t1^i)AsoRH# zjeL7sjB^e=G+~>^f`VM&W|2#^;z(#7ndZLMC1f1gPn7|Y=B4w%$WER;J31d--B?o0 zh!~H~?c)dyV|5(od0f|mGRD$02?AWCOrIOvTotiz(%I}w$QXQ`228D6Zd=+Z9Z>Nd z34vl--=6eKmh7cNGICG)Y6)r~NkC^KQUC+E8VuM$y|IoA8}~0ECZ3#VN*I>upAA1) z>_~`mO|()+J%HKa*1;D+>fQvU6sZ6%ii8OugJ%{M7PNSNsu%!hCCFwWQn@(Nugyr zxIk%Auy|Q ziB{l(2E)S2ih_yUFFj2Pxs!j(%Vua(vflkWnF3~)m^*9F=q0iX7gN4hm60t4Ip$${ zbK>z;-o#7>Zj?MHDXm+-%(j+Y8{RAZyk@2!Uy$>;$ABEGNuNYEMN%++-7SM^0#s}JwMYxa3B|<=g+|1!Ux7gX-y1ovmbpM53PTM8BQc-uhg`gc2)uom zMPPPl!oGxTy-Il0*26DrO!6S1b-ebQCW@-6EIrRF%c_c^rdhVJ0Kp1|TUeUPu@Fr* zNkWVtasVLKBm_r%*qAQT9*l!`_9X-{M?wsEwX9u~4dxDzW+B=meOQ4C(F+jTD4X6b zkV$Ev<1u-;iyvaTBjEA2nSO{;Zeg7cEd!HShlw7)gcwr7TqH%$3UxK`6$7NkwZ&^@ zfh&@_jWWJY^mb<|qjF@DglzgD#>yhQC64l@6{%8=6ok31wD3HB3E7ZwuL@&|@f?tJ z=57T`gCbxQjphDU-z|oCjHG0O4zc2gXf`}QmvMBeqywvh+k<__aQlOh@v>RxwQvKZ zB~g;0kphw=!S`sL51xw*S^|rTsx*$WDOKQ9531RINaj_3jK(4xmeaJx!0peD5W|W7 zW@Y5><#A0}Dv@Y%sEG4+ZyA-wqZ*|@up+3#lIvf5!eMKIe#q7S9f>3Z6P%Y38XF0S zEi+a;U01p)(ZlEv%bOc=`>KM$ga5U2I|lbfp`CxwMIv8D-c)|TC}UaMtALM9^|ES~ zYmi_;85$yqOh=+xG%$`{ah&-JB4oTikULPwLlKx&gQWoyB;xh0j;67)2nxoweMQc_ z5?NTZ$bvM=b?S?DYt_|M#LH4Ex1S#&)`f~MYwgk)hbF*KS>1*h)r)#Rlk*I~&km0c zPh_{~Kkvf@4=N#_%2hIoqQ>`^;U1Du3peefhMJ--*N8Lppe+IMnn|}#Gzr23^)fc9Gp9Gi;3Uik`2F^>0RguzK z#(PFwJP{%}Y1-Hg05?1uhLE;1APHV{U3ZoS|M|m@KYsHZ{{IV@y!DWpQ1ja zNNAle{HWwxmb@oIluP61XgHfRN9t~ZvYrs?eun*!zkd(!zeGY9-PmLW3~msTQ1s>$ z6#|%OP&W-!kS9WZ+NG(QMIH+bl_8`*V;=pp;}g0eI;plsbNiYn>7!4Sba_ zD(X7U+jZzim6uH@xAjEGW;i3`=xMffEdcn`vmQaQ(twJAs}<_(=g*%f3@3;lbrW|%b z_T^Yav{?cuN07Zj;f!9HpQ}%%oNu zp*r?Iuy?X$$1?3Ux``z7p&j3V=Gu|BCkU~>XSi)JrNr-GK zTsg{wE-L^;T#_kfw?nZmX5m{?Ve6Ypp7tvTd2K7VpX2h3D0!wz+0^_uXx2vQmoKPy z=G!joqqpAr4RkKXj{!Ns)T!EgDcikP*?tLtT_0yWr%pL3lnvt2!0_C7~IVu^iTmYbJjK9*;jK#N91VXKn_87t<<)>29b2Tb0 zCUh~Dk@KTEHBHR{Kt1U4^|{OTaEWn5t6V%5cPI@hIY`BpZj4Zl%#?opP7Y94xGZ#h z*Ura}AAioZ04ZDx2A!qgTwQX4u*x_A0YTJ~oOA^MtztkBaMF)KknZ%b(grPVl&-mo z!TWWFk+J|FSbB1RnjW=eDxKi-+X2QEsLTNbq9!7KS4tRaLCDXf7#9Gynk0q`@x)q~ z@Qg;b*{H&eEG`LU_$qt`qe;ghjG^LNCRwhdp5od0Nn{l z`y&Soh_LWAOKq&TEJRq9Tzjon4GV!@Z7q2ShzTF7Kvhd2bvF2*&e%g331~#IP+0vN z(EK_TlQwrMuyf}sfL2jqAt=eR7oVi&JrskFY$_6R7GzRXXYVYpK7z)?giR7DISc^J zZI$~G2wQ2X-d<6FP+}}M=yLL;G>{Y;tliq=VvLwL~!>IRg-BFuI`t2)(^I49KRC zE-yX0opCnbg`xw?8!M_V%DYfLi%)75D6imS=^}hQdHQDKiqeRYtESYw{Iupd6Nn`M zIJrvjJdqBk#Lt9GHa`Ky#X#u$^imi#^O4WE`NVos=^`HT2@dMVzdA8o)=tzOahZwVFpbbC^j>GaYhps6 z=?F43;VV&EGXBWp7w8&5j$iO?W5ZhdIY1ETGP#&*Wyen*b1#`HfDk#W?3F*!s^A}O z70AwZ`;SFge6wR5RDAa!8wq3jh-11Ws`?@sjPSb6$`-^|kZm|? zbd)i%b>c4;c=Wl;_WEO5GAY8_hHodn9@v8nOYEc15 zMNLqC?ek0m0R5!@;`y)SH_p6Qg<7?c;YFo*uHaE;KZq>`m`kKs1=wL0$A_vyyb5En zg|jezt2n9rxuUI|BmEl@FEl340KG)@Psb9oOQ&?{(d=E1hb&hDq6_jjmEv||C{dW4 zo>vgm7ZMp?d>2jsXh)>Z!n07qn*DZ{1uq{U9SaA*doHV;p0|)+-YPkEVv1hLMji;V3qV+i$ySTBvw496q093R9c)8 zBK}Mj@*>Ux?1)KOoXj;m%W)^?A6=K@F?Re3lwzuuysA@aYI!xL0+&Dc<>hq;l)OSb z`$u_B_NF(~ZUcli_QQ7{S-`B5hY&1FumrG#K`jVF%K1p3dDpMF#XiV5RaIn0p22wBvtw5@=Zp(Zx%5sRIL5rA@7=XMper@B(;`mRU0<^S{ z;lY7{F!U*nt4(5Lb})biD1n@XjO2q|FAFD3S*3tkf)xXp17jn==7@EGAaECK5(9@3 zM`CNqfdLBw2!XK)2HKy3N*lKbrNK1+K;Nu3bridm?6BC7uYvE(?3>lFAC2D33aydx zP{aab>{#dQPXidlVgRb-D3Qan8>9v@k{{B|x$j2~gd~Q-xKbE;FCe1efI3wOLLmT9 z72wbT|JWHoM+Y36d|1)c*na)x%YXgiFb=JI1f+?+{74MoAgP-lHiV%7xF{qB-UG-$ z;+kOL2%WJ+xvU}bG0Mzi)SxNnh_N!sK{S<{6p$c~Au<)HF>*f;%t>1Gep5!qUedDn zOAi@h^Tv&|2Tn7cZhDBmx5%HR)9DuVY@}bL8yvCq;zhdYAbLTdcNo}gwpe)Lpt-Su z(*qg0q=f>6GYZo{1M=y12lP8QfED^* zJkznafH3g3iylDh2C0SMDQ_S++vH6Gk|r-Y2u<>P9zsKT3)(ZQ0o0v*8&Qog93d-Q zGd@1`k3NS!jX?-QLm(jL*YsSBG9s&x*BOCOPXTZc66BL60hy$NMnI;?%K)rGu7K33 zLxocZ0;mBaAV0N%kUX+OAon_fpaPQa075zh5(FZu5r!lfyrd1EOMnAoiju^p=>!A7LZRhaUvk3kY@3teIu)8Xr$b%e`E;bxQ70{&KzPAI5exPy>u(f@!1z`cXpz)LDkZ^%OjNa<%N+V#fXhKzJ z*E_tU0Xq&b60$##LweWwTh2QI-gSJ|{@~&v0U2}77Z6c?NJ=1kZyh_dqpRAWaI}8B zqf=C=-Gk78nCA)zqDmHDJIERDys(`7*akn62Q|Oc5pWbwu;)B}Q}}Kn=o~odO=@8u z1Q1d?2jIylG7u1yg>hKKw_;d_Kv2|a#KB7!)%N5=0fI^=>mf`I+Le937eM0Lr48Y? z$F9WScN}I%AEi%bc;9ck?Y!;Tk3>@6Xo3Jej*Ia~H30ASDeyXeo`%3UAH%f(ey-MgKO zKah{q6cM}fwLxtl5S*=p2>9WMWRY6tTdEtXDL4%S6$w2K5RiQ^IYcWb%CVk!VBWIl z$6;N{+)92l5%1+FIDCL<7+a$ zx+$N`0{4N`q;zfhvoVv&_2*?WYwOo*8L8iTAal!m(#~4)ATlG9>zOC)e|pKyls3A( zFb{6MgaU-0$7Qd7PpStX;N8|o@veMLpHj;rFLO6*JMv~;#v>g-ejcfAS*z8ZgMZK6 zS{kXzzj^vUP}Fi$Bb)B*aCN6t)k{l}@1qy=JCTuBnTvvT1#<3|y&nhVp$lOE>2Zc| zu_vV!)#akIm79&^Woq^DT{*h;4iJ68sIDg?ltZd|&ay0-tm#w2*vmaPa4&1wnS-j` zXCR2pjBQ!Ae17IYv{UWW2h@XIhQk2D>eJJcW%8~-Hl=jkYzOAw~H+d>*nH^IYdvV zJrR(h*kIhXEGue52uYorj@ipup_bA2am8IAAbsKukWHD~pOGm*)&a!ZI(R%+S{kn2 z?GJ$b`1l8XW5gpMdTKTDO3&+eMus<|3(&(p#PdAP!VniL6S^L?-1wk5&@+(Xl?kh~ z*iUj90XeIkA>vbMyHbvNTT=Gpp)^)!SsE+St$7|UI3VS4A*InGmyHR)9sAVq# z)Us2CmyJf_EAb6nAs;t9GFn8R^*&_z;_TI{mp}jP`mN|tZyUoEefIKafBEQ>Pd@s4 ze;7baP1DTYMlU!Afhafw68b2GO6U0I$|0aKMvfG!!c|q(sk_630BUDkfPXe2df?%- z8jY~Nq56$hGgd(u2Ey=S&ZSU?<3PuQ3?fA^&|Pp)+#IYvfC$dco%LxP+kbn3&NFX#hFE6nW!+5$#H}w;u2V{R6iwZvw>;Qm5UwaXJ*{71_t4d>T zGzTpJcw1F#7q*8_&|uyn*bU)8qDYLd`%#lX4#&kH5+(kGAWCWvv;HDo5Id>_1Q%__ zOvD;uixESR*OIBWgv`L6N!u!rt#j7>0~0t%hD`1Lg35@%h2@)>dYnogU71P#5=cBxRg(!xp0t%cYui8=$2Ujfh!%@O zAasK+hYv!G!M()@GHCKvgh5?Q10hokp_G_XC>Sb@#Z#f1HbV!00%xeb^%HpM6#7AW zzB|3)sG*JSC%-)YqX)$E^CJl%$G0pP_p(?ObkOfG6h z2#k{b^jpF7D~JDXgXunrAZZqI((q7!UI>iGv)hH?yckzfO`OHzVIEbCz&J~;!= zU2Pu2iV;H2*UK$nK>_9jt-!!`x$Z;K{)P~e-Sa6Ldf=9B=XNxtNYWp|x)3Tb zo1sr6MF*`_)qrv#{fk`4Yb=KYgQuL~dmnu*~2Hg&+31JvdCOD8YR*0gQJ}Z2vC18wq1tMd-oyizukzo!{ zLI%AXV(Sp%3jU(TagGxw?{Gw~+z?U8pcK-p2|?)Uz^YjZq7>D(hsU^#{lg3Hsv`hEs+^Z_M zzdHBabE@cs;w2VMQy3M`G)Ici7R|P)B+%ew6Cem#7lCN8WU*LG4pa>jJ{!7)4r16e zMc>Drs@Bq4$pMVu(n=OvI1y<@Xk!Dw5ReZaViaJv?f3n6Q0i<$R_3z65Z$GL#VLc>9_UBNyeRtQh(Rvj;^EA z3mEGF0LXM0XC-~H1B0nt2L!XMzT~(X=S3A;)n!dM3~qbc5JQDU?FFZ}_XQ127-F@Q zQRYX6==2$@x0bP~Dnlz2jh=%Bh;9YsU*8r;$lNfra(P*D3nLvjyQe2S&m8Hv%>l=Q_ata|h4b@&t6RTTq&{B^&KttO8_~^T5gW5$(YwQ z+jVo+MkQytmX*y~?7+5c@))ShN@p2kc7|CNbMsl2se~2*`McYNQpvK7N*A8;10K?D zZ#o^cXQp#JVuiNJJ7;Zn=%Fcb7!URR^#q2RY^fdaHVC1Q^-IhFkh6s2o+*oW6kXqB z1sy;XpTlsYhZKu7R_{5OQC@t>>cz5i1a*k{J#Fw-)fuYQl!l>YULO`%vB8{S*=gO5_ zSP9oEb7@2jDW9`W(pD25kj2W>gqNJy0J3dmY5wzINkE#F49ori5L(qcrfzN?mjHyf z5g_)CkDY?&w0Aw{Y|%N^6BI<0byhg;v%L}T?0WEm5nqacSTz3$eVy^m36QCW(8ZpL zpgxWvoChs6Ssga`=}TTT&RJua;zw0}YE(IH_)|lrjny)+4Ffi$_yI5YzURPwra+df z0m4b}RzM&>5r`YQ-4j{LHoI0%?lvoPuG{UpZXPfyYhlG*L$JU|1PKd6LaA%zD>V5T zb`gkt6OdnBi6JJcT79Qb-~z}#D?koS=S;`p2_Sa_5R2@H9{}P2*3w8US%^P~_WOM4 zcvlCE5kkpzF(f{YA%lTx^cr-v(=w+2TwHBfUD}C&6qmsxMnINe9|I9k20oT7UQi7~ z>kSN}z>)y+3oThPq-!-dLIEW2h8r6jSqVVoSs?DlMl*W>h;3v3U8ph~NFo~zk(IO@J(thXB-WyN?yEa0Vy>NQbpId0*czDQ(UU+nb|4J8pAF zPu%DsLj(0$_5a!F_dNnKFusFoG>TOx0s=&_R^%Z^MzwKTMo=$DKw3U8QYb%9#eF^; zIP4T2lwu7v{&VfJ-n$-1ZVuBYed&g@WDyW7TN0M6xm-5_5=wcoWPO>l?6ljIb52yj33LMzv2D}sS(sQtRPCyXUJ6?j=G0u70 zb6A14z33S2!@!P}fSG+poyR6;Fz9z?fXoDSQq=EwP;+QKDjMH9yy0^IX#q&0%jD{8w6zMoGwv}JI+oG8fzHm_~gYO+x9}b=%%tlu=;4LJZG$o2Z++ z%(XJI1rLFjbXhnK>bAuY<*`OFd)qoeD|9P^AqO&mV3d?00|sBeWF;0$ZIK`<`$14D zZF+qV$z3T3*7fIoPw{Y^`|bTcxd^@mL0ebWgT7Zo%2Df&bo^=)mcF;%C-Hd9Jz)+{v{1hB_zeySH#jb1Iaqn(o_ zR%`62&{8`pHA7WQEsHjesGtBH*?Tu@O8izuyVHhGUjH}R94Qiae8q^czA4Mr0!^sc5CKbz&C=SR`@0(Pl~~*8_Q%G6gbQTpSmwv`Kk4(&Lv3y=2nJ zFc}%?kqS*48=(J;K;HX>ii6IyO=!O?+~O$@wk= zdH557pdNlL5a~@bF-p?ZE=?>F$<^v3+20-r6=|bJC_>ehv3=T$ghVHq)*{DiR6-bk z6bL@1-VmczhnBCXdKE2G%hX`0!Y4kWv6@)lq6W4pkOi?!LPJ@LC|! zc($h#D?uiog2=^p8fJJ%<8|%rf$WzMw^2Am+8)iTVjg-5?D&LvpWt|TOgE&=y(o}%1CGI4L%hK6{;O7i9%ne+KF?BG``U9`^d>7 zr%~29ci7(UK4*-#m76@|S|H1)BpPw!J3egGMWE1@eWV$2RCUpzQW}qljd5j%JLu?f z#`zIU)APhG1Hnu_{N$U3`P=WZ9^8BVm|m8pSC=jVk>=bSO}VvgTVo*G%)&&c8M86n zmH^`BWZ4aE8+9*Cz8EH@yG-OCZjObr^tK+d=Yw|-sKjDYVn(86TA;0 zm1X9>y2QIahoFTW~dAmZiY|K6Hkoxkmp1-bkF%18f!ckdoPeLAybA(z1P zgT~*0AtKatDJQWo`4w9L0So|XR;U=6$>s+-87CkV8v31)vxfqEp<|ut7lFL-q`t^sJa3nZC!E7~IwL|k1@ije?mrY4`}fCJ zJOqMloaP;Acmza4+j8m#JfwjJnS5$#K=xQ`km7ZaD;oaN>2mD)CF}mDzj=zcBkylL zT75GA0`q&jS{8`>?$yfIAARt_C!aKDUlBoqC%bNC>>;x4<~K0YSr^IqED+Li;f5Ol z3A3&MVpG}oGLSExZGHNIkUx1dE52g#XmOS(*jX{`lF}2cOWjKKxU1nY=>CR1uYwb6zAMumQx;_b5*pJs&wds_n&CG79P# z#2NrHTm}?6)9k4PkdL2z@%Yn^zFt{*wYvJ`$@>p(?I9rY+ZT@>eFbSsGqcxqrN3F( z36L<9;0J`lW_Fr@%#DGNhDxwBi6LVk-K#+UvbBXvk$gd~Wlbfa6gqm)(E&uc10egT z?K!A#^$RFA9s(9y4ItufqLy5ZiTuU)YUPN z(Tu@3R;0KLC$gZF{G3_wIwccQ3stC)>FH|V zm{^%Seh455eV>hENC`l=!&%|%j^9y4^&X>{_mSl2v?gYl|oB*=6h6DY%X1=nGj`7cORAkJ# zYgnl z7zo2trAW0DkBO3|8Bqo{Z;`q%gr!qf!~>Ltr7K-Jbm)SF#D>HR@Bl2(g-75WcnZ$G z94F$afs$kBFU7XbiK^yHe2(>d%wom}BpktyAqprC0>V;p8wh`tTR}}9;Z2QQaF8vl zxf-uYf)yNHK-?1$5)f8Kt0(N8gDRsQtTO7PkQF(Hx`5dKi9pCv%?3udK4SfDHK>Fv z;wHnTX&T0q_&S){^%+=WF=V#>Rw9sT>!u!-)O{en{BR50o~RGRK0^q^^Q^lPMG=fO zCr~$s=ww^;0%7-idwq}*F)gmR$wprg#d?5rs(;8*aV%`M_JPQVa|i-a2&7wVw&sY} zAb=F}#6Z+QZ^+RAVf|#t`jz_~i~k(5HgCfFLxN>WK=i5f{*cM8_yUq%*c`BTqeojv z0fkMLf2R3(8VU$Y=Z~^LlErMEB9M_jTf-bljt9($FRKcHz4jrv3cF5^FIRd zc&KoCzDOcL9>KE62}m42JUh3Jgxu_F2hS|#5H-@peQjS63Ax3tyIEchnA9;9DSh(- zeuP9uQM9_+rdM3UL}GU2;g|Nm4Vm>1~y>j+F5KREP)hbv4j0_ z=wV@Dt9Bxs2uWdc+<_q6D-6DYwJ%_6ZR-n|!($h(n!xU_U;f4BgCq?R*uPy?Y4!S& zj)6H&4&?Tx=SWD$W~e1gb+1+N{q}ksmv^U3gy6cjN_X`3^l>qY`>*E)31MNjqfM9V z&*|!H43DetO^m>AV*~t<&b~qBMu?YnBgR~zC4^9z{<_A-7+aY*pUq<{6uE*;%&Iw%mqS_!0M{XjQO3Iab6mO`UW9Gin(82%viNy zAO^#e=<{w4u28|_BPyRw#_D5x81+L#^ME{l*fhU=XhS_k=*uUSA<6ncp{cP=xwOUS{XID0*xVWbv{e#=4MrLehV~Ljs9U zUV*4wf$+Yc7KrfhM}e08m_eR>WXr<%Kp;sHX0$-?r#d6?MO+0!17^#5yE%~Y{AVC? zUm*MpY3(-5kKr>!0)&m9AwS9kurg(5h{(kZ0XsvkBX+-O%MuV;LdWgsvm0s?`lsVPh?G3rC6t(_!5^`R3Ufk_8|vwE}x64Ip}nv(EgWzYsG z+5tJMLKA*Tcy5gE4>9G&v6=)%AukWYu>=JNFb(Oq=i#S%r7oaVO6xgw&kooOg@9iz zd}W&kMF7_#2aS!uDLiOW>xeZqg`E;d>3l7=J%*;-*R?>F;VQByUzbJEOs)jZ1$AX5 zEPdwNMUb8Dd?QsWSJ$ zJJZMocMfvHHhsc3w!_zDdDH_fqs_`ii3R{IXUy6*5XJGasWRQcA%U^zqo2X>C) zsEb_+#fx~*42BHm3LRV!QK59umeFIuWUw|vhd72V!6anPs2+baB~CKo_nKhyj1KOR}u+uP`0v`Hba8K7vA+E+5KFrEc)eQlhrTxZ6 zR#N}zyd3~yZ85|^gr8pce*;qS_GZf0Pf!JwW>~S3uNszq{ib2je#35_J@by z9GDG9m<-M;fjk$%Cl@>3CP12p>Ao!x1_3XZ3rz-GElQq?0Qr90d=yA=Wa%sKj}F7q zJihYFqAF&;mc-Ec^5*&K9;xYmf3hJEZzq+I@h}a@?Y6UGBXe{T#6W;h69ym*1Kohy z3;-s#kWbPX(^E^N3Z!&o`Fexc)vKuMGStCLR%G&o03Jqk1Jx!#ylF!T(c3#qqynUT zWbNJexSfTO>k^uhBN(ylVOFhVYX9SGpOn7wK_q??`#P?4w&03XisE*H*qa{a4pcJtn z8Uy1#0VKhTx z2w7K=>YXv&Q^zw466DoTzpB@Iez(0t8rSiDQPn03bg8z1_l*E3q$^f zuJtl72p9oTAs_@)nUER=2`CC2kkILpnpKWvGIBL+&kH8_BzL{)-vm#Mn8SuH)BgOsx9J8FSKnpb*HVkvZ>aS$m`Zx zUs}Bqy36cst+nCNiulf@`-iT-_zrIPFe3ymVf*MdJREx^=0jTDSlh&{wrpJl8kiZt zS`lkwDqXD17+YbA`t~26gOQmFcJ2wGam#oWh}^dQ(CThsQ5QzL z-zHBQ{{R!e|I0US^t-Pi7hkY5FNC;#x^=?!ihGFHRz%ZU$C}l_b#mPeMw{3+FO2G> z?yoH`txUL@h_`DXM7ELs^s}GLq%4S#gAkNj9#Vd7S#4CyY;tX6Aln9PGu^DUEXFpM z8rOwPkhHfiX(-lF`wX(y84-dK0}@T0Kkt=D9WqGvXb#v3Mbfejz@lacXV|`Z$z!IqJ zCkt_ZV+Ftx%PR{o)epg-P1h)$!GpEUbW)GpvMRS$RMTA{0F%Ff>#QHIhQI-PBm^SF z(c>bE+bOk7n`jm4u{LckJEnDCP04%twSgTITV4mMxK$IqD_%z5J3;_fC4Yt~`o{tno_9=|xPZQH?g-IACfLXBf|KJKa^}0eKR8(kv@M=U zZHyo0@IxhGAYz@FRm6g93^Orkor%n=>_(YY&vlQygAfh-D0G)MoRqTmsmJIIuZ>}G z&$DLGxyhqOWeLj0!I-?tSZKhB=}9^vd{;;&c=QA&h_ey0r%@Ie-EisZL`G#_MLG1W zk$F#&K8%bJ!3bR%qE(KR!T6pK#?x=ylzrD8cVg&->;nWEB0eIR_>@qL2{h!!?+DgW zSi;lrWIplG=dl&!6(J~qvDWEA9E>82e2fV3C?OR2jzAoYhOi+NBV}MMs_cloSA<9y zYyE7ls2^YyReKo4*ufluAO|DFcU&Z`#{p(~fZZ3uKK_rp?c#BSJX#IeZ@})@z`Ngf z4VL?d+l5FxirqZKdsT@1?vu~nIVEM?haG6gFgFjum?aqB7s5FE_6dBw!n}|J48IPx z-;glbfFI}|enklJKmO`{xG3x7QP%l)ayS-p+z8?bxuXg14lzXT@5W;xFP=Y#%jD*Z z`s-i>VB~+n2rTXkLE@L+K7-r*&DRj`xM75LFuH$)@6agrj)dHN{q>h{f4~b;*5yKo zD$BCn2&NJQkK)#DkayG$Mcs#=nOQ>}d4Ld-v_MO$M)lNX{ML@-bucGHAfW;FTgY0o z5pvY%;9z~&G_G3EN|!wtKWYadsH*ck`Uqx-kC?LNh1539b6>6Mj1yFq(T%})Mu<|P z{`EUYHRLE|9W}JFd7d*l)hk43sBL6@G9yF%37Gi3@<4s%0On`_s~iji3={7}A&|38q}?Vd-wb!0}!%sgZV z+r0|jGtnNlTVP6n=AI!lLS||Rz6T?BVPp?;Yj#V7PZjd0H3W0nV^Rf~o04&s`54v_ zHButd0Amluc|zcMrgBDb7>wkS2;p(ykxFF(UIkT|fcPVF2MAyTh{yv>!laRTLotyF zeuI$NQ5F(`AVlU29MCXe7+~Apgxd|+F^Ov8ydfCL9^)AyXO6O#c(svBx7`GX>7u6|jY|hei>38Q=$I%hfeGuXtIo&g*N_WO z13DH$kELQxwNY?h2*F{rKTopQqi*YCyhCe()F#LUqs~3}6 zjwl3}6QXVPo*4?W2H)4q^UGeRE!tP=+(ymUfF5W)#pAOs_bBTtn&tSIemaA;j$wgb|m^7D4p=tz$bO{@O+4zae4Wbc{Z{sAHbK z6oPmKLR`<}2M~S1LCD&0SsGDx>#m`1H9}rn)IrE_W$JEZ8=is?Hj=6^aEt~wRBDP? zvp?=qmSAWAAr(`aJ@1D1v?@j>9aG%o!&S+KvRp2vHwZbJ7|suS9~dDXDlO*qqF7Z$ z&$H!*uR0Heph_=_BrY14>H`~>izRf=9c}pBl=ablAzT#;TeXI(#mH6XYN|nQ1BR}T zfH`#RgKeP?BTMlmLQ+@Mp+@?xMQqvgxJ%)P>fHvIs$8YZA|LFi0k$H%?)+0z*5ghL zNz5<_rSw86h-f^*8XPtPlS(zTl9;ImF;+E=l(&>HLC&oqKe{6Xvj*cZmOfCaXnX)d zlMFzOA2!vN*@Q8`1i%=%WMYT`fJv+bj~n7Q0{(|WH3WLAU;_j&-5Im2X&g+91kVV0 z+*3&p6GOV2BjMdNcKeu;m+Poo5WJs0kq6AwH;0KK-odD&Y=Ou%x`!Q@TNpimV)(~{ zkS)NYcVN5TY+Y>+S(YS@f2L{P0tT+*V)Ga9HQ_LfqN!?qusrX(X*r5dtS$ zD|9goskbZyzBVK$mu@zSBX!Cs3ob`^io;7`45Be(^FrWq1VJu^RZ;qug}|km2_nS9 z4~-5=(GcY-n~xBev;H0s5>fw;k1R0td%A}*5!9TEjHxNzhG#gHk4F7+NdB$EZ13&ja^DP#;8x|Kqq z(Em`s%Tbk(_}V+{XCnkgJX=rC_wIC`E@px&4?zNMP@#nCjVNTpJkiL!<~T*A+aXZo zTDQ4383P$7Aae->nKA}R zGRJ5y&`bMQJD<)1$w-qVYjp@bvU@vJ#1q2>NE;;^vC>XK;F0s~vmi3)0rejf62LN+#(>R&cVij^XA*hZv8HL|%HM%_YMOKwh~8fk!^_ybUBvsMb!0K$ZU1B!sM$ zQDVZc>I7MB6)s?kqZ^bEQ$Hk$xxK~7KL~;j`)NL4-eeSHG02vtEL-rDrg!Cl29Y6Y z3$>=f#cmZtp3^u4?1T%1U#B4Gj=cX$2O)H{HjTTaOB!|iO0V`yHi$z|m>8K`K~`Jz z*?DAlUa7k9MXfX!Cj8Qef^2rNw^QU=2a#3xWDRo(g3I~)9akp-WWwk{8s-)x{GNJ; z7jO4I+;V?ZNQgt3P`5*{EaAboo5KLI`=rerg4hRPq|?tKWLhXihAoSz-!ve1?0w83 zSfgn4S`c3-gc9?uizOYyPLW?R%vnhGE%bam8jqg29fD^y={AZpZ?Ys83q78iY;EfS?M|BRfGh@G_UC1g zb??mX*$>Uef=cyUd9hOwi>+zg%0lFk>Mu#GP<7-%*>V85yq+h!)3?l=)M zUnEu4drySu%X^c*jvXBTIP!X7I)kV8>bcx*y|Ul$=j9or{J6B^l3}BoBh?N1 zt9^qm(qrJcEB#KFEHc=9ulg(xWDiAsoK|E$eo}pH_IQTB>@C?yk ze9&ac!W?gALf5vD^?Dgt@O%Ew=Kj>_ zO>74y+j#Hz!c+H`KF#cV5Td?V%ZixE7NgmS^KdhwG~BWUQijH6nvbY8W>vycdX1ph zqq9^x{>eEVpNXKHjnHqIUz#Dnqi0Ber3HcRSysQ>agi-SfShq(23J;p)s+TM7~J6K zr7IfkU=0Bwm%GV(+OyX*40&evc*y6wFxzpybGR_oI80V%uicPZg@hE()JY=_&xzFR z)QY1K-IDa=JU)qrxbDeVW(b19u_=Q_JUUOU^9DZ|BB?e*nlETh(lNBZ;3b&lm#nr6 zyj%rVNB7+BAn0EN?u95#fxFV&7lQ!|RtE;J?TWMs^uq~x5>iSn>tk*0Pi=lVv3gS{ z$-;Fq-__>trqg)7LFgNA?*mgcNlk%Wj~ zHj9R%go2QqE$Ei9Fkd7maWsSX7bB$G?_$EO1(r8JJsJ$IK7fe25Bp;fvYgt}HQj|LYoFf|;(X-udAOS-bH4{g;eWM_`x8=CbO^mhZDNg> zUW1TE68^w8`37Bn=U-00Pr8H#cB%uRhc6>QOVRGm)%qH}mElQSux#K+$qDs~Q8X{_z zHI7qgcNm)NhL3a3&s#K3>6ma3LboDC2!`?4ibph$sdX&~X`~3Updn!%^6SPIY6wsc zA)05k2ikX1wN*j{uG^NPz7i!1e*J^nx2)CSgcx;BIj3P}gcQ;}-`!Aj4BngeEZ-3> ze`kRZKDXBvME$^n2j`JaDZQ1L%ML?ZdZNY)G5TW^X zqjd(6Z;j(TV&aSl8dB0s9Z*Xi6+i8j%~3nhG_BojN_gRdrG7`F!ljv6*KP=*?s&k!OrWbXkZ`r@c2-+M7| z0HTow*;ft~rx-T3Z%cWWr5}|EP5YD~PygoF1y@o6Ewua29N5a8**Mogqg8ITpQ_4ax5aWzDqeeq4nORplYL5Ny2IGsriREA`R@A?7bn)*U!R^HfBQT2&7NOQ zga(uVsXNZA+Cu z*T<*LW3?V8zFG6qcP6EzNZoOkul{h_#5Gd?j)c|QzkT~vIrtxKcWA>f5JLeJnVsZP zOP20v3$s9FDd3lQh3*iAhX&sQ9U;SYfmTtXD1mMK;E_+hb?zm0wD|oPu>RF>-_(tOV={cR5ofFR&WGK(>9ql*aQ&^`|7krmK=~9 zzE}Jwn4&B+(lrB(`22bx$yqI=jt&87kcSmd$Y@N~mjiHg$=)uz!j18PPf7EDL4FL7 z?;xH-^6|!i#LM30#2`6+Bk}KmKteEw_@^w|CFKYJI6)D9UYZlwJYy%K&XXIijOzhz zuDcdc;6^~GvPZ7S(JpCprWz~1X=$!4u+*?BR&Lk@+3pI8i2Ceg9#Je(dpyIo6x7dyNFsWU2^M(!qFa4!F$RI8R4F81K5n#3W(33H$Gc0l@`T)6@VFgfUA5jFD05 zBa8^65s)0M^&&c_MTlb*!Hl=+Qw|Y8T~bxO8{)A%C}L+;h>ek?#p3(tA)v#H>5k<^ zn2fC8W>uk>AfPxDx0rtMb%c1-PFr0B6UPghW%U`8nPU9AUq?SFsel4+o$*Kvki>As zT$dwkJ|A#wJ^>F}cPvX)hjulh7{{KNF?Nk~F{VroHOt>IB3=Z77u64xO^Yf6jFPnw zYhH*8)ldWiKH_YEApi+9XvoN6?AYjr#X;^_ZYv}=3kY3&x<4FH4kOmKv@W#; zgVBQP%0xUg!xx*`2pg^~QvwYxJnizt6$rk2Isn>`T^ZSmfMr<`=u#)bjL68Y&11us zPr8WbIeOo7+vP4bfJea({^G6Qbg`_+*kF1X+3uVbB98u4orgP*i~5$E)BTKVa~Z|O zxb4npVcjlwF~mymyED2Ej%08I5Nx>wH>{&csFatY;%teBubvcMj z42_vIO7LE$RBu-RtNZ?Y+}r$-g7d_0!-y6dW@Mqf&xM{>GqUQ)t|^Qw1138Cr7nBt<`c&eqU zs^vqzn3rGdZ)JBw)Y2YP;JGzR>xCcJ^dWOIpd6j+-Rog`l+{k%lBBzwsaHRa(;nOpJM5>qO^kx&{z@%+d3;;QbF;HfWwmQjb5f!b4r^WwdO^Z|G9D zotCuhcN7=0roL;Gy5c=+&5AH`(%=}N>p@cod7hx=Ys)$~$__6obGP5`-#j3aPn**1 zZL@yg402|$>2>b;8c?4ZQZZ+ga?{ZYxvn8B04_rMdS%zYZ_75CN}Fc_cQM zgSO=VuE{f2^ilUIYUzWb#!NrphOrgy||^s6Hazo4yjHXFQO zQ`J*{Q>B!H3<=~$Hu`7=KQ4I}nq;M({4fv^B^)j7Lz2_EB_YH#Ow+1sJtocQJbOplT{AlSKTSMw!3Ob=ev4#e|xi0NF` zvmoeJ^y8hkJ^Z5l7YGPGJB?_wb5VYCygYmxiOrgIKnQX`*8*LDa9r(ULQ6Y*PSYP7 zr4{k}A0F!u3tC$0aKH7$bk@_Al;Pi-5Lrtiy%JUL*}y4 z*;?N@^zPrgkXG71bd8KmNYfjLn0}=P;$fG5Om_z2$yUJ%f8TZPblYXWwFe8>YV7j@ z;qkf^wF-N|-@49W^}jx}(zE%&F9+vUx40~vd=S(9brrAcAtH`CcyVF|6v1A`8|7CI zcJN(2XfN0D{DPiISC(#5LKv@^e%0tXOTDHSyeM_WhDz}lTHjswf-^n3tG#*4gG!*TFn)sLmbB==mQtw^)rZX{Ni%R{ij)U-IOdc~AEnor^t> zO0Q7*w0?XnJ$AIy5TQlWf1(AAZfkkyK^XCG8+g5^?K-8$PgbJu%MnKHmu*P4I77aB zf8#v_FY1ycJw^+fBn`th5IrN^Z%g}SNm>bODu+06S&GDl6tzl{WCh!LN2)a6Nmq?7 zMFrKTJX+Kun$z~Rt+X$e2awYR?O3Z6)$pzwZd&{1Rp}Y#(TwDIQWbsbE$Q7EwcLA@ z4nL}LIbY5<(|#*mY3oC+#}N+Cck;T<9=>&L1)h_mJZ+Y|@?|@n*HkC1YtBrXONm4EyG^)X(dz2moFPz*g471s}aJt z)R($f=ta$PK7Gj%yzoP1QPTAupOYlB`$?DYz{2FIW`0`@6$d;j<#QVlhH1J-nu`$zj4#AERtcrQE(WyQ*|C=Cshxz z;$qu%QYitu3SssX_7~!el5(k0Tva%bmhFg;jLqJ(9h)P>o>hn~7>Zz|d}Lqw*s)PY z;XvGW(g6mkdcCgjG_$&@$i_uQ$k@bCcxN`n!wvLs`;p?Zw|3*=%56gryv(i&)Rqy- ztX2dH{K&BR_G$<|6CT*HUBTrMLlqIjju1y&q4c;|&y@QY_4qhLzygt7>^!RQvW#%% zQANfa8S7#Ofh|NGT(C)FdsQKhLNX}@R-(NcQ5Zl?Kpznfrd+QG*eF(P*-6QWL+#l* zm|V5pjLQQ&!bU*CuFJCQI_}L+>X=dtxZ%cL*;tF=gX12;$$K>cF#>mrp~J*6MIaTA zvKAT`W>j(@VFba9&CELigpFxa3Wg|+1yI6AFvj#^n>cpIw3+b{CNzcx4URR)gqMgS zjwcEH*Z^+nfk@kCB;r^yu@LK{v9XC89Mj1x(8MSuD1nY)hz(;H7^~rtV+2U}M4dqc z5jG8;Ppp9L__}#S6BFg*zaF}|+DW|f2~IK$>KHr#8H2~d8j$>@>2sinm8hz60QGx_7jT8T@< zWKd#xN+ul+k+>uUIycOa>13x@GBQFt;>bK1OT`b|2$Ud@u>#4+EEvLcehKG~hU2sX zaUiGGSQppbM0wi$p+ChofsPOnfW8(rcG8p)y{2q1pi`15OlhV;t_3)8FakJ<0K(VZ zu;~&c$RxwZ=>-l5EC(VP0Z2oD4=fG^-pvC-SPmv%9V~}&l9d=}Rv?IT)tL41ND5)Q zf?^ep#}@%*=?vo-+kFm;h2s$6Nor5NY?$F#4hC`aWrTUu?(YKV)BM>2|AXElX-UBI%fzz(P&?7DWH+{zAk0^+=^q=csanP&hp zwqk25VJ4N55wcg~mj{7`#R1cXttgNQFl1u}PO!nIOE?60!dB_6{Pnj*7h!oUN*`FV z(=dGshA_^g>h!@bwqugQrtFf|a<^y`%qC5pi#oq3^%;{8&aegK3gDDDDdMon8O6FS z6JnGsp*&!27?YsF4?VHHcG;3%5eUy+DSL@qrRLgmYFZQ^spgy z%)yt|8@*D57t@ZE$9F&`LU>Vyk$gB`L4zdY_(1x^z&^N@a6l^#GBk7KrKj@`^SndMF2GD|415J;%g(X9f+ca~j=ESFPuWv`SV8N9C5Z0b!(@ z9Wc#~x&f5q2tNBE#SV~*PoL2n2LvzZQFB#E^y5zb-3LU7aj#xX56)m6^7EH19fG=Uh0S!ZG^EQ9U%U=#p3<<|dO$cjrxMiK z>b@R`KW*H%`1WvJg|*X$c6qmGMF=vaFE*b4zCzAA6%F2zoqX`gN<9M36Fm^iCF|>F;I~fLlr}g*nkU_M_PN-w%3f(45Iuqn z+MqZ6cj`ROSyX&c>x(bub_U|T1M%qD*`LYGkj`A5?L=Sg_&Dd)f0-4}m+gedN-{rcU8mvc7XRC+xSh?HccMd8{Jpo% zYFiMHLN{D|jZZ$DOJ247^>-Wuqgd8n-yQI1Vv5lLqH(g-uMVT(ft>%S96*g6yKnqzZG-kLnxIz2b*^wAi%5C z%)D2P5c+aYl@4%I`i@d@ea$Lod$iOmtYs$|5{U)#8NKNq!s^LzSUr3b7S#O%dJjqv z&(2n-=$*MYm05X~>_b;-^mT>3}DA@$=$t9FT9%J->Os^yg4a4`$40 z5i+DtYbD)pl`5FN-o?2cMe{L_&(;Rb@}Te;fBbATo(^zajcIDZ(ux7KG02cSS}3)3 z5T$dLklD1Ny@z56G9+BA&JJ#MWUWsh+=Hw_Tx%HuvhjP5oOdce2_H?RI>-%L+ACHh z`_R&sw~V8=7kTdI@5IgJ^24SUXGn*Z15vBgRJ`bgh|osXWOdoMwqlpeAO*H&-h2ZUgz$7-dKKcP z4z6_RQWOh&TMZR`DX9>Ug|$@iDGU#8^b_tn1SEciwr!sMVni|m64=KOpv$O?Eepx0 zz++hvsI15WZ@2;@P<#%F6|e)^29tLMf?URfO+AnWjAU1kc|j%^cpR2NMkcb$6Ov(W z44GtH0RRBOtYPLLwvLV1499FDLynL7g6)pLH5)^YjWFmS3GfIw%P20#MA;!h23H6% zn8;ujfGacW&|`B>U}wfZM`1w7n4oH~IRGQZ25_ARDQ6D?0yMZmBR8rBoO!;>>&k9p~*R9^rlY`hu8=1$p;28iH8sYDPUmM!e{(rC5pL;cFYDn z*1#TNMU)XE6R}7l8vEnO*iB-MFcHGV;$&imv;dDI<-lNT@UeyG1X$%)cCYFo!pe9``VxW|5_GAWXZHCAqhIxjC$9KOUX4J zOgdv?8k7+^k{byTOh$uap^4ei-*{;1o12J_`BOc(rXmPW&?L&&VNVzAI?Sm-)=p?{ z!u&tAvx!PvmAGytZl;!!YvOa-Pe_8OBIW*9EbMrTEGUr5^Hzux(A1|L7zLUFaV};$ zmMr*l2n?v@nKjw3*P-+n5Cc@D^DJU}z~t zJ1`0?4FT-iAr1f_2td)o#{F+>V$PyTF}xS4e~d(w-H&0(j3&$tSqIkk0iiCo-SqvCT7M#14aQe6999SUnert*{%Qp N002ovPDHLkV1f$s#LfT! literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.svg b/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.svg new file mode 100644 index 0000000..96cd21d --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/images/vlans-deeper-look.svg @@ -0,0 +1 @@ +DockerHost:Frontend,Backend &CreditCardAppTiersareIsolatedbutcanstillcommunicateinsideinterfaceoranyotherDockerhostsusingtheparentVLANID802.1QTrunk -canbeasingleEthernetlinkorMultipleBondedEthernetlinksInterfaceeth0Container(s)Eth010.1.20.0/24Parent:eth0.20VLANID:20CreditCardsBackendContainer(s)Eth010.1.30.0/24Container(s)Eth010.1.10.0/24FrontendGateway10.1.20.1andothercontainersonthesameVLAN/subnetGateway10.1.10.1andothercontainersonthesameVLAN/subnetGateway10.1.30.1andothercontainersonthesameVLAN/subnet:Parenteth0.10VLANID:10Parent:eth0.30VLAN:30NetworkotherDockerHosts \ No newline at end of file diff --git a/vendor/github.com/moby/moby/experimental/vlan-networks.md b/vendor/github.com/moby/moby/experimental/vlan-networks.md new file mode 100644 index 0000000..8d368c2 --- /dev/null +++ b/vendor/github.com/moby/moby/experimental/vlan-networks.md @@ -0,0 +1,475 @@ +# Ipvlan Network Driver + +### Getting Started + +The Ipvlan driver is currently in experimental mode in order to incubate Docker users use cases and vet the implementation to ensure a hardened, production ready driver in a future release. Libnetwork now gives users total control over both IPv4 and IPv6 addressing. The VLAN driver builds on top of that in giving operators complete control of layer 2 VLAN tagging and even Ipvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints see the [multi-host overlay ](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) driver. + +Ipvlan is a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using the traditional Linux bridge for isolation, they are simply associated to a Linux Ethernet interface or sub-interface to enforce separation between networks and connectivity to the physical network. + +Ipvlan offers a number of unique features and plenty of room for further innovations with the various modes. Two high level advantages of these approaches are, the positive performance implications of bypassing the Linux bridge and the simplicity of having fewer moving parts. Removing the bridge that traditionally resides in between the Docker host NIC and container interface leaves a simple setup consisting of container interfaces, attached directly to the Docker host interface. This result is easy access for external facing services as there is no need for port mappings in these scenarios. + +### Pre-Requisites + +- The examples on this page are all single host and setup using Docker experimental builds that can be installed with the following instructions: [Install Docker experimental](https://github.com/docker/docker/tree/master/experimental) + +- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. + +- Kernel requirements: + + - To check your current kernel version, use `uname -r` to display your kernel version + - Ipvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy) + +### Ipvlan L2 Mode Example Usage + +An example of the ipvlan `L2` mode topology is shown in the following image. The driver is specified with `-d driver_name` option. In this case `-d ipvlan`. + +![Simple Ipvlan L2 Mode Example](images/ipvlan_l2_simple.png) + +The parent interface in the next example `-o parent=eth0` is configured as followed: + +``` +$ ip addr show eth0 +3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 +``` + +Use the network from the host's interface as the `--subnet` in the `docker network create`. The container will be attached to the same network as the host interface as set via the `-o parent=` option. + +Create the ipvlan network and run a container attaching to it: + +``` +# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) +$ docker network create -d ipvlan \ + --subnet=192.168.1.0/24 \ + --gateway=192.168.1.1 \ + -o ipvlan_mode=l2 \ + -o parent=eth0 db_net + +# Start a container on the db_net network +$ docker run --net=db_net -it --rm alpine /bin/sh + +# NOTE: the containers can NOT ping the underlying host interfaces as +# they are intentionally filtered by Linux for additional isolation. +``` + +The default mode for Ipvlan is `l2`. If `-o ipvlan_mode=` are left unspecified, the default mode will be used. Similarly, if the `--gateway` is left empty, the first usable address on the network will be set as the gateway. For example, if the subnet provided in the network create is `--subnet=192.168.1.0/24` then the gateway the container receives is `192.168.1.1`. + +To help understand how this mode interacts with other hosts, the following figure shows the same layer 2 segment between two Docker hosts that applies to and Ipvlan L2 mode. + +![Multiple Ipvlan Hosts](images/macvlan-bridge-ipvlan-l2.png) + +The following will create the exact same network as the network `db_net` created prior, with the driver defaults for `--gateway=192.168.1.1` and `-o ipvlan_mode=l2`. + +``` +# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) +$ docker network create -d ipvlan \ + --subnet=192.168.1.0/24 \ + -o parent=eth0 db_net_ipv + +# Start a container with an explicit name in daemon mode +$ docker run --net=db_net_ipv --name=ipv1 -itd alpine /bin/sh + +# Start a second container and ping using the container name +# to see the docker included name resolution functionality +$ docker run --net=db_net_ipv --name=ipv2 -it --rm alpine /bin/sh +$ ping -c 4 ipv1 + +# NOTE: the containers can NOT ping the underlying host interfaces as +# they are intentionally filtered by Linux for additional isolation. +``` + +The drivers also support the `--internal` flag that will completely isolate containers on a network from any communications external to that network. Since network isolation is tightly coupled to the network's parent interface the result of leaving the `-o parent=` option off of a `docker network create` is the exact same as the `--internal` option. If the parent interface is not specified or the `--internal` flag is used, a netlink type `dummy` parent interface is created for the user and used as the parent interface effectively isolating the network completely. + +The following two `docker network create` examples result in identical networks that you can attach container to: + +``` +# Empty '-o parent=' creates an isolated network +$ docker network create -d ipvlan \ + --subnet=192.168.10.0/24 isolated1 + +# Explicit '--internal' flag is the same: +$ docker network create -d ipvlan \ + --subnet=192.168.11.0/24 --internal isolated2 + +# Even the '--subnet=' can be left empty and the default +# IPAM subnet of 172.18.0.0/16 will be assigned +$ docker network create -d ipvlan isolated3 + +$ docker run --net=isolated1 --name=cid1 -it --rm alpine /bin/sh +$ docker run --net=isolated2 --name=cid2 -it --rm alpine /bin/sh +$ docker run --net=isolated3 --name=cid3 -it --rm alpine /bin/sh + +# To attach to any use `docker exec` and start a shell +$ docker exec -it cid1 /bin/sh +$ docker exec -it cid2 /bin/sh +$ docker exec -it cid3 /bin/sh +``` + +### Ipvlan 802.1q Trunk L2 Mode Example Usage + +Architecturally, Ipvlan L2 mode trunking is the same as Macvlan with regard to gateways and L2 path isolation. There are nuances that can be advantageous for CAM table pressure in ToR switches, one MAC per port and MAC exhaustion on a host's parent NIC to name a few. The 802.1q trunk scenario looks the same. Both modes adhere to tagging standards and have seamless integration with the physical network for underlay integration and hardware vendor plugin integrations. + +Hosts on the same VLAN are typically on the same subnet and almost always are grouped together based on their security policy. In most scenarios, a multi-tier application is tiered into different subnets because the security profile of each process requires some form of isolation. For example, hosting your credit card processing on the same virtual network as the frontend webserver would be a regulatory compliance issue, along with circumventing the long standing best practice of layered defense in depth architectures. VLANs or the equivocal VNI (Virtual Network Identifier) when using the Overlay driver, are the first step in isolating tenant traffic. + +![Docker VLANs in Depth](images/vlans-deeper-look.png) + +The Linux sub-interface tagged with a vlan can either already exist or will be created when you call a `docker network create`. `docker network rm` will delete the sub-interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces with a netlink parent index > 0. + +For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. Other sub-interface naming can be used as the specified parent, but the link will not be deleted automatically when `docker network rm` is invoked. + +The option to use either existing parent vlan sub-interfaces or let Docker manage them enables the user to either completely manage the Linux interfaces and networking or let Docker create and delete the Vlan parent sub-interfaces (netlink `ip link`) with no effort from the user. + +For example: use `eth0.10` to denote a sub-interface of `eth0` tagged with the vlan id of `10`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.10 type vlan id 10`. + +The example creates the vlan tagged networks and then start two containers to test connectivity between containers. Different Vlans cannot ping one another without a router routing between the two networks. The default namespace is not reachable per ipvlan design in order to isolate container namespaces from the underlying host. + +**Vlan ID 20** + +In the first network tagged and isolated by the Docker host, `eth0.20` is the parent interface tagged with vlan id `20` specified with `-o parent=eth0.20`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. + +``` +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +$ docker network create -d ipvlan \ + --subnet=192.168.20.0/24 \ + --gateway=192.168.20.1 \ + -o parent=eth0.20 ipvlan20 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +$ docker run --net=ipvlan20 -it --name ivlan_test1 --rm alpine /bin/sh +$ docker run --net=ipvlan20 -it --name ivlan_test2 --rm alpine /bin/sh +``` + +**Vlan ID 30** + +In the second network, tagged and isolated by the Docker host, `eth0.30` is the parent interface tagged with vlan id `30` specified with `-o parent=eth0.30`. The `ipvlan_mode=` defaults to l2 mode `ipvlan_mode=l2`. It can also be explicitly set with the same result as shown in the next example. + +``` +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. +$ docker network create -d ipvlan \ + --subnet=192.168.30.0/24 \ + --gateway=192.168.30.1 \ + -o parent=eth0.30 \ + -o ipvlan_mode=l2 ipvlan30 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +$ docker run --net=ipvlan30 -it --name ivlan_test3 --rm alpine /bin/sh +$ docker run --net=ipvlan30 -it --name ivlan_test4 --rm alpine /bin/sh +``` + +The gateway is set inside of the container as the default gateway. That gateway would typically be an external router on the network. + +``` +$$ ip route + default via 192.168.30.1 dev eth0 + 192.168.30.0/24 dev eth0 src 192.168.30.2 +``` + +Example: Multi-Subnet Ipvlan L2 Mode starting two containers on the same subnet and pinging one another. In order for the `192.168.114.0/24` to reach `192.168.116.0/24` it requires an external router in L2 mode. L3 mode can route between subnets that share a common `-o parent=`. + +Secondary addresses on network routers are common as an address space becomes exhausted to add another secondary to an L3 vlan interface or commonly referred to as a "switched virtual interface" (SVI). + +``` +$ docker network create -d ipvlan \ + --subnet=192.168.114.0/24 --subnet=192.168.116.0/24 \ + --gateway=192.168.114.254 --gateway=192.168.116.254 \ + -o parent=eth0.114 \ + -o ipvlan_mode=l2 ipvlan114 + +$ docker run --net=ipvlan114 --ip=192.168.114.10 -it --rm alpine /bin/sh +$ docker run --net=ipvlan114 --ip=192.168.114.11 -it --rm alpine /bin/sh +``` + +A key takeaway is, operators have the ability to map their physical network into their virtual network for integrating containers into their environment with no operational overhauls required. NetOps simply drops an 802.1q trunk into the Docker host. That virtual link would be the `-o parent=` passed in the network creation. For untagged (non-VLAN) links, it is as simple as `-o parent=eth0` or for 802.1q trunks with VLAN IDs each network gets mapped to the corresponding VLAN/Subnet from the network. + +An example being, NetOps provides VLAN ID and the associated subnets for VLANs being passed on the Ethernet link to the Docker host server. Those values are simply plugged into the `docker network create` commands when provisioning the Docker networks. These are persistent configurations that are applied every time the Docker engine starts which alleviates having to manage often complex configuration files. The network interfaces can also be managed manually by being pre-created and docker networking will never modify them, simply use them as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: + +- VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 + + - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` + +- VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 + + - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20 ` + +- VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 + + - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` + +### IPVlan L3 Mode Example + +IPVlan will require routes to be distributed to each endpoint. The driver only builds the Ipvlan L3 mode port and attaches the container to the interface. Route distribution throughout a cluster is beyond the initial implementation of this single host scoped driver. In L3 mode, the Docker host is very similar to a router starting new networks in the container. They are on networks that the upstream network will not know about without route distribution. For those curious how Ipvlan L3 will fit into container networking see the following examples. + +![Docker Ipvlan L2 Mode](images/ipvlan-l3.png) + +Ipvlan L3 mode drops all broadcast and multicast traffic. This reason alone makes Ipvlan L3 mode a prime candidate for those looking for massive scale and predictable network integrations. It is predictable and in turn will lead to greater uptimes because there is no bridging involved. Bridging loops have been responsible for high profile outages that can be hard to pinpoint depending on the size of the failure domain. This is due to the cascading nature of BPDUs (Bridge Port Data Units) that are flooded throughout a broadcast domain (VLAN) to find and block topology loops. Eliminating bridging domains, or at the least, keeping them isolated to a pair of ToRs (top of rack switches) will reduce hard to troubleshoot bridging instabilities. Ipvlan L2 modes is well suited for isolated VLANs only trunked into a pair of ToRs that can provide a loop-free non-blocking fabric. The next step further is to route at the edge via Ipvlan L3 mode that reduces a failure domain to a local host only. + +- L3 mode needs to be on a separate subnet as the default namespace since it requires a netlink route in the default namespace pointing to the Ipvlan parent interface. + +- The parent interface used in this example is `eth0` and it is on the subnet `192.168.1.0/24`. Notice the `docker network` is **not** on the same subnet as `eth0`. + +- Unlike ipvlan l2 modes, different subnets/networks can ping one another as long as they share the same parent interface `-o parent=`. + +``` +$$ ip a show eth0 +3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 00:50:56:39:45:2e brd ff:ff:ff:ff:ff:ff + inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 +``` + +- A traditional gateway doesn't mean much to an L3 mode Ipvlan interface since there is no broadcast traffic allowed. Because of that, the container default gateway simply point the the containers `eth0` device. See below for CLI output of `ip route` or `ip -6 route` from inside an L3 container for details. + +The mode ` -o ipvlan_mode=l3` must be explicitly specified since the default ipvlan mode is `l2`. + +The following example does not specify a parent interface. The network drivers will create a dummy type link for the user rather then rejecting the network creation and isolating containers from only communicating with one another. + +``` +# Create the Ipvlan L3 network +$ docker network create -d ipvlan \ + --subnet=192.168.214.0/24 \ + --subnet=10.1.214.0/24 \ + -o ipvlan_mode=l3 ipnet210 + +# Test 192.168.214.0/24 connectivity +$ docker run --net=ipnet210 --ip=192.168.214.10 -itd alpine /bin/sh +$ docker run --net=ipnet210 --ip=10.1.214.10 -itd alpine /bin/sh + +# Test L3 connectivity from 10.1.214.0/24 to 192.168.212.0/24 +$ docker run --net=ipnet210 --ip=192.168.214.9 -it --rm alpine ping -c 2 10.1.214.10 + +# Test L3 connectivity from 192.168.212.0/24 to 10.1.214.0/24 +$ docker run --net=ipnet210 --ip=10.1.214.9 -it --rm alpine ping -c 2 192.168.214.10 + +``` + +Notice there is no `--gateway=` option in the network create. The field is ignored if one is specified `l3` mode. Take a look at the container routing table from inside of the container: + +``` +# Inside an L3 mode container +$$ ip route + default dev eth0 + 192.168.214.0/24 dev eth0 src 192.168.214.10 +``` + +In order to ping the containers from a remote Docker host or the container be able to ping a remote host, the remote host or the physical network in between need to have a route pointing to the host IP address of the container's Docker host eth interface. More on this as we evolve the Ipvlan `L3` story. + +### Dual Stack IPv4 IPv6 Ipvlan L2 Mode + +- Not only does Libnetwork give you complete control over IPv4 addressing, but it also gives you total control over IPv6 addressing as well as feature parity between the two address families. + +- The next example will start with IPv6 only. Start two containers on the same VLAN `139` and ping one another. Since the IPv4 subnet is not specified, the default IPAM will provision a default IPv4 subnet. That subnet is isolated unless the upstream network is explicitly routing it on VLAN `139`. + +``` +# Create a v6 network +$ docker network create -d ipvlan \ + --subnet=2001:db8:abc2::/64 --gateway=2001:db8:abc2::22 \ + -o parent=eth0.139 v6ipvlan139 + +# Start a container on the network +$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh + +``` + +View the container eth0 interface and v6 routing table: + +``` +# Inside the IPv6 container +$$ ip a show eth0 +75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 172.18.0.2/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc2::1/64 scope link nodad + valid_lft forever preferred_lft forever + +$$ ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc2::/64 dev eth0 proto kernel metric 256 +default via 2001:db8:abc2::22 dev eth0 metric 1024 +``` + +Start a second container and ping the first container's v6 address. + +``` +# Test L2 connectivity over IPv6 +$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh + +# Inside the second IPv6 container +$$ ip a show eth0 +75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 172.18.0.3/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link tentative dadfailed + valid_lft forever preferred_lft forever + inet6 2001:db8:abc2::2/64 scope link nodad + valid_lft forever preferred_lft forever + +$$ ping6 2001:db8:abc2::1 +PING 2001:db8:abc2::1 (2001:db8:abc2::1): 56 data bytes +64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=0 ttl=64 time=0.044 ms +64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=1 ttl=64 time=0.058 ms + +2 packets transmitted, 2 packets received, 0% packet loss +round-trip min/avg/max/stddev = 0.044/0.051/0.058/0.000 ms +``` + +The next example with setup a dual stack IPv4/IPv6 network with an example VLAN ID of `140`. + +Next create a network with two IPv4 subnets and one IPv6 subnets, all of which have explicit gateways: + +``` +$ docker network create -d ipvlan \ + --subnet=192.168.140.0/24 --subnet=192.168.142.0/24 \ + --gateway=192.168.140.1 --gateway=192.168.142.1 \ + --subnet=2001:db8:abc9::/64 --gateway=2001:db8:abc9::22 \ + -o parent=eth0.140 \ + -o ipvlan_mode=l2 ipvlan140 +``` + +Start a container and view eth0 and both v4 & v6 routing tables: + +``` +$ docker run --net=ipvlan140 --ip6=2001:db8:abc2::51 -it --rm alpine /bin/sh + +$ ip a show eth0 +78: eth0@if77: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 192.168.140.2/24 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc9::1/64 scope link nodad + valid_lft forever preferred_lft forever + +$$ ip route +default via 192.168.140.1 dev eth0 +192.168.140.0/24 dev eth0 proto kernel scope link src 192.168.140.2 + +$$ ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc9::/64 dev eth0 proto kernel metric 256 +default via 2001:db8:abc9::22 dev eth0 metric 1024 +``` + +Start a second container with a specific `--ip4` address and ping the first host using IPv4 packets: + +``` +$ docker run --net=ipvlan140 --ip=192.168.140.10 -it --rm alpine /bin/sh +``` + +**Note**: Different subnets on the same parent interface in Ipvlan `L2` mode cannot ping one another. That requires a router to proxy-arp the requests with a secondary subnet. However, Ipvlan `L3` will route the unicast traffic between disparate subnets as long as they share the same `-o parent` parent link. + +### Dual Stack IPv4 IPv6 Ipvlan L3 Mode + +**Example:** IpVlan L3 Mode Dual Stack IPv4/IPv6, Multi-Subnet w/ 802.1q Vlan Tag:118 + +As in all of the examples, a tagged VLAN interface does not have to be used. The sub-interfaces can be swapped with `eth0`, `eth1`, `bond0` or any other valid interface on the host other then the `lo` loopback. + +The primary difference you will see is that L3 mode does not create a default route with a next-hop but rather sets a default route pointing to `dev eth` only since ARP/Broadcasts/Multicast are all filtered by Linux as per the design. Since the parent interface is essentially acting as a router, the parent interface IP and subnet needs to be different from the container networks. That is the opposite of bridge and L2 modes, which need to be on the same subnet (broadcast domain) in order to forward broadcast and multicast packets. + +``` +# Create an IPv6+IPv4 Dual Stack Ipvlan L3 network +# Gateways for both v4 and v6 are set to a dev e.g. 'default dev eth0' +$ docker network create -d ipvlan \ + --subnet=192.168.110.0/24 \ + --subnet=192.168.112.0/24 \ + --subnet=2001:db8:abc6::/64 \ + -o parent=eth0 \ + -o ipvlan_mode=l3 ipnet110 + + +# Start a few of containers on the network (ipnet110) +# in separate terminals and check connectivity +$ docker run --net=ipnet110 -it --rm alpine /bin/sh +# Start a second container specifying the v6 address +$ docker run --net=ipnet110 --ip6=2001:db8:abc6::10 -it --rm alpine /bin/sh +# Start a third specifying the IPv4 address +$ docker run --net=ipnet110 --ip=192.168.112.30 -it --rm alpine /bin/sh +# Start a 4th specifying both the IPv4 and IPv6 addresses +$ docker run --net=ipnet110 --ip6=2001:db8:abc6::50 --ip=192.168.112.50 -it --rm alpine /bin/sh +``` + +Interface and routing table outputs are as follows: + +``` +$$ ip a show eth0 +63: eth0@if59: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 192.168.112.2/24 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc6::10/64 scope link nodad + valid_lft forever preferred_lft forever + +# Note the default route is simply the eth device because ARPs are filtered. +$$ ip route + default dev eth0 scope link + 192.168.112.0/24 dev eth0 proto kernel scope link src 192.168.112.2 + +$$ ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc6::/64 dev eth0 proto kernel metric 256 +default dev eth0 metric 1024 +``` + +*Note:* There may be a bug when specifying `--ip6=` addresses when you delete a container with a specified v6 address and then start a new container with the same v6 address it throws the following like the address isn't properly being released to the v6 pool. It will fail to unmount the container and be left dead. + +``` +docker: Error response from daemon: Address already in use. +``` + +### Manually Creating 802.1q Links + +**Vlan ID 40** + +If a user does not want the driver to create the vlan sub-interface it simply needs to exist prior to the `docker network create`. If you have sub-interface naming that is not `interface.vlan_id` it is honored in the `-o parent=` option again as long as the interface exists and is up. + +Links, when manually created, can be named anything as long as they exist when the network is created. Manually created links do not get deleted regardless of the name when the network is deleted with `docker network rm`. + +``` +# create a new sub-interface tied to dot1q vlan 40 +$ ip link add link eth0 name eth0.40 type vlan id 40 + +# enable the new sub-interface +$ ip link set eth0.40 up + +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +$ docker network create -d ipvlan \ + --subnet=192.168.40.0/24 \ + --gateway=192.168.40.1 \ + -o parent=eth0.40 ipvlan40 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +$ docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh +$ docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh +``` + +**Example:** Vlan sub-interface manually created with any name: + +``` +# create a new sub interface tied to dot1q vlan 40 +$ ip link add link eth0 name foo type vlan id 40 + +# enable the new sub-interface +$ ip link set foo up + +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +$ docker network create -d ipvlan \ + --subnet=192.168.40.0/24 --gateway=192.168.40.1 \ + -o parent=foo ipvlan40 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +$ docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh +$ docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh +``` + +Manually created links can be cleaned up with: + +``` +$ ip link del foo +``` + +As with all of the Libnetwork drivers, they can be mixed and matched, even as far as running 3rd party ecosystem drivers in parallel for maximum flexibility to the Docker user. diff --git a/vendor/github.com/moby/moby/hack/Jenkins/W2L/postbuild.sh b/vendor/github.com/moby/moby/hack/Jenkins/W2L/postbuild.sh new file mode 100644 index 0000000..662e2dc --- /dev/null +++ b/vendor/github.com/moby/moby/hack/Jenkins/W2L/postbuild.sh @@ -0,0 +1,35 @@ +set +x +set +e + +echo "" +echo "" +echo "---" +echo "Now starting POST-BUILD steps" +echo "---" +echo "" + +echo INFO: Pointing to $DOCKER_HOST + +if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo INFO: Removing containers... + ! docker rm -vf $(docker ps -aq) +fi + +# Remove all images which don't have docker or debian in the name +if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then + echo INFO: Removing images... + ! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }') +fi + +# Kill off any instances of git, go and docker, just in case +! taskkill -F -IM git.exe -T >& /dev/null +! taskkill -F -IM go.exe -T >& /dev/null +! taskkill -F -IM docker.exe -T >& /dev/null + +# Remove everything +! cd /c/jenkins/gopath/src/github.com/docker/docker +! rm -rfd * >& /dev/null +! rm -rfd .* >& /dev/null + +echo INFO: Cleanup complete +exit 0 \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh b/vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh new file mode 100644 index 0000000..30e5884 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh @@ -0,0 +1,309 @@ +# Jenkins CI script for Windows to Linux CI. +# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. +set +xe +SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" + +# TODO to make (even) more resilient: +# - Wait for daemon to be running before executing docker commands +# - Check if jq is installed +# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version +# - Make sure we are not running as local system. Can't do until all Azure nodes are updated. +# - Error if docker versions are not equal. Can't do until all Azure nodes are updated +# - Error if go versions are not equal. Can't do until all Azure nodes are updated. +# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" +# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind +# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP +# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason +# for doing that is that it mirrors the actual release process for docker.exe which is cross-built. +# However, should absolutely not be a problem if built natively, so nit-picking. +# - Tidy up of images and containers. Either here, or in the teardown script. + +ec=0 +uniques=1 +echo INFO: Started at `date`. Script version $SCRIPT_VER + + +# !README! +# There are two daemons running on the remote Linux host: +# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon +# from the sources matching the PR. +# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted +# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). +# The windows integration tests are run against this inner daemon. + +# get the ip, inner and outer ports. +ip="${DOCKER_HOST#*://}" +port_outer="${ip#*:}" +# inner port is like outer port with last two digits inverted. +port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') +ip="${ip%%:*}" + +echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" + +# If TLS is enabled +if [ -n "$DOCKER_TLS_VERIFY" ]; then + protocol=https + if [ -z "$DOCKER_MACHINE_NAME" ]; then + ec=1 + echo "ERROR: DOCKER_MACHINE_NAME is undefined" + fi + certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) + curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" + run_extra_args="-v tlscerts:/etc/docker" + daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" +else + protocol=http +fi + +# Save for use by make.sh and scripts it invokes +export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" + +# Verify we can get the remote node to respond to _ping +if [ $ec -eq 0 ]; then + reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` + if [ "$reply" != "OK" ]; then + ec=1 + echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" + echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" + echo " either the daemon has crashed/is not running, or the Linux node is unavailable." + echo + echo " A regular ping to the remote Linux node is below. It should reply. If not, the" + echo " machine cannot be reached at all and may have crashed. If it does reply, it is" + echo " likely a case of the Linux daemon not running or having crashed, which requires" + echo " further investigation." + echo + echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" + echo " for someone to perform further diagnostics, or take this node out of rotation." + echo + ping $ip + else + echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" + fi +fi + +# Get the version from the remote node. Note this may fail if jq is not installed. +# That's probably worth checking to make sure, just in case. +if [ $ec -eq 0 ]; then + remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` + echo "INFO: Remote daemon is running docker version $remoteVersion" +fi + +# Compare versions. We should really fail if result is no 1. Output at end of script. +if [ $ec -eq 0 ]; then + uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` +fi + +# Make sure we are in repo +if [ $ec -eq 0 ]; then + if [ ! -d hack ]; then + echo "ERROR: Are you sure this is being launched from a the root of docker repository?" + echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." + echo " Current directory is `pwd`" + ec=1 + fi +fi + +# Are we in split binary mode? +if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then + splitBinary=0 + echo "INFO: Running in single binary mode" +else + splitBinary=1 + echo "INFO: Running in split binary mode" +fi + + +# Get the commit has and verify we have something +if [ $ec -eq 0 ]; then + export COMMITHASH=$(git rev-parse --short HEAD) + echo INFO: Commmit hash is $COMMITHASH + if [ -z $COMMITHASH ]; then + echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" + ec=1 + fi +fi + +# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not +# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment +# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which +# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... +if [ $ec -eq 0 ]; then + export TEMP=/c/CI/CI-$COMMITHASH + export TMP=$TEMP + /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p +fi + +# Tidy up time +if [ $ec -eq 0 ]; then + echo INFO: Deleting pre-existing containers and images... + + # Force remove all containers based on a previously built image with this commit + ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null + + # Force remove any container with this commithash as a name + ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null + + # This SHOULD never happen, but just in case, also blow away any containers + # that might be around. + ! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo WARN: There were some leftover containers. Cleaning them up. + ! docker rm -f $(docker ps -aq) + fi + + # Force remove the image if it exists + ! docker rmi -f "docker-$COMMITHASH" &>/dev/null +fi + +# Provide the docker version for debugging purposes. If these fail, game over. +# as the Linux box isn't responding for some reason. +if [ $ec -eq 0 ]; then + echo INFO: Docker version and info of the outer daemon on the Linux node + echo + docker version + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# Same as above, but docker info +if [ $ec -eq 0 ]; then + echo + docker info + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# build the daemon image +if [ $ec -eq 0 ]; then + echo "INFO: Running docker build on Linux host at $DOCKER_HOST" + if [ $splitBinary -eq 0 ]; then + set -x + docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" . + cat < +# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# +# This script should be executed inside a docker container in privileged mode +# ('docker run --privileged', introduced in docker 0.6). + +# Usage: dind CMD [ARG...] + +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and --privileged mode might break.' + } +fi + +# Mount /tmp (conditionally) +if ! mountpoint -q /tmp; then + mount -t tmpfs none /tmp +fi + +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/vendor/github.com/moby/moby/hack/dockerfile/binaries-commits b/vendor/github.com/moby/moby/hack/dockerfile/binaries-commits new file mode 100755 index 0000000..8547b45 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/dockerfile/binaries-commits @@ -0,0 +1,11 @@ +#!/bin/sh + +TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a + +# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly +RUNC_COMMIT=54296cf40ad8143b62dbcaa1d90e520a2136ddfe +CONTAINERD_COMMIT=4ab9917febca54791c5f071a9d1f404867857fcc +TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 +LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e +VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0 +BINDATA_COMMIT=a0ff2567cfb70903282db057e799fd826784d41d diff --git a/vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh b/vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh new file mode 100755 index 0000000..64f2b57 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh @@ -0,0 +1,123 @@ +#!/bin/sh +set -e +set -x + +. $(dirname "$0")/binaries-commits + +RM_GOPATH=0 + +TMP_GOPATH=${TMP_GOPATH:-""} + +if [ -z "$TMP_GOPATH" ]; then + export GOPATH="$(mktemp -d)" + RM_GOPATH=1 +else + export GOPATH="$TMP_GOPATH" +fi + +# Do not build with ambient capabilities support +RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp apparmor selinux"}" + +install_runc() { + echo "Install runc version $RUNC_COMMIT" + git clone https://github.com/docker/runc.git "$GOPATH/src/github.com/opencontainers/runc" + cd "$GOPATH/src/github.com/opencontainers/runc" + git checkout -q "$RUNC_COMMIT" + make BUILDTAGS="$RUNC_BUILDTAGS" $1 + cp runc /usr/local/bin/docker-runc +} + +install_containerd() { + echo "Install containerd version $CONTAINERD_COMMIT" + git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" + cd "$GOPATH/src/github.com/docker/containerd" + git checkout -q "$CONTAINERD_COMMIT" + make $1 + cp bin/containerd /usr/local/bin/docker-containerd + cp bin/containerd-shim /usr/local/bin/docker-containerd-shim + cp bin/ctr /usr/local/bin/docker-containerd-ctr +} + +install_proxy() { + echo "Install docker-proxy version $LIBNETWORK_COMMIT" + git clone https://github.com/docker/libnetwork.git "$GOPATH/src/github.com/docker/libnetwork" + cd "$GOPATH/src/github.com/docker/libnetwork" + git checkout -q "$LIBNETWORK_COMMIT" + go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy +} + +install_bindata() { + echo "Install go-bindata version $BINDATA_COMMIT" + git clone https://github.com/jteeuwen/go-bindata "$GOPATH/src/github.com/jteeuwen/go-bindata" + cd $GOPATH/src/github.com/jteeuwen/go-bindata + git checkout -q "$BINDATA_COMMIT" + go build -o /usr/local/bin/go-bindata github.com/jteeuwen/go-bindata/go-bindata +} + +for prog in "$@" +do + case $prog in + tomlv) + echo "Install tomlv version $TOMLV_COMMIT" + git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" + cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT" + go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv + ;; + + runc) + install_runc static + ;; + + runc-dynamic) + install_runc + ;; + + containerd) + install_containerd static + ;; + + containerd-dynamic) + install_containerd + ;; + + tini) + echo "Install tini version $TINI_COMMIT" + git clone https://github.com/krallin/tini.git "$GOPATH/tini" + cd "$GOPATH/tini" + git checkout -q "$TINI_COMMIT" + cmake . + make tini-static + cp tini-static /usr/local/bin/docker-init + ;; + + proxy) + export CGO_ENABLED=0 + install_proxy + ;; + + proxy-dynamic) + PROXY_LDFLAGS="-linkmode=external" install_proxy + ;; + + vndr) + echo "Install vndr version $VNDR_COMMIT" + git clone https://github.com/LK4D4/vndr.git "$GOPATH/src/github.com/LK4D4/vndr" + cd "$GOPATH/src/github.com/LK4D4/vndr" + git checkout -q "$VNDR_COMMIT" + go build -v -o /usr/local/bin/vndr . + ;; + + bindata) + install_bindata + ;; + + *) + echo echo "Usage: $0 [tomlv|runc|containerd|tini|proxy]" + exit 1 + + esac +done + +if [ $RM_GOPATH -eq 1 ]; then + rm -rf "$GOPATH" +fi diff --git a/vendor/github.com/moby/moby/hack/generate-authors.sh b/vendor/github.com/moby/moby/hack/generate-authors.sh new file mode 100755 index 0000000..e78a97f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/vendor/github.com/moby/moby/hack/generate-swagger-api.sh b/vendor/github.com/moby/moby/hack/generate-swagger-api.sh new file mode 100755 index 0000000..a8e9f81 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/generate-swagger-api.sh @@ -0,0 +1,22 @@ +#!/bin/sh +set -eu + +swagger generate model -f api/swagger.yaml \ + -t api -m types --skip-validator -C api/swagger-gen.yaml \ + -n Volume \ + -n Port \ + -n ImageSummary \ + -n Plugin -n PluginDevice -n PluginMount -n PluginEnv -n PluginInterfaceType \ + -n ErrorResponse \ + -n IdResponse \ + -n ServiceUpdateResponse + +swagger generate operation -f api/swagger.yaml \ + -t api -a types -m types -C api/swagger-gen.yaml \ + -T api/templates --skip-responses --skip-parameters --skip-validator \ + -n VolumesList \ + -n VolumesCreate \ + -n ContainerCreate \ + -n ContainerUpdate \ + -n Authenticate \ + -n ContainerWait diff --git a/vendor/github.com/moby/moby/hack/install.sh b/vendor/github.com/moby/moby/hack/install.sh new file mode 100644 index 0000000..0d46bf7 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/install.sh @@ -0,0 +1,540 @@ +#!/bin/sh +set -e +# +# This script is meant for quick & easy install via: +# 'curl -sSL https://get.docker.com/ | sh' +# or: +# 'wget -qO- https://get.docker.com/ | sh' +# +# For test builds (ie. release candidates): +# 'curl -fsSL https://test.docker.com/ | sh' +# or: +# 'wget -qO- https://test.docker.com/ | sh' +# +# For experimental builds: +# 'curl -fsSL https://experimental.docker.com/ | sh' +# or: +# 'wget -qO- https://experimental.docker.com/ | sh' +# +# Docker Maintainers: +# To update this script on https://get.docker.com, +# use hack/release.sh during a normal release, +# or the following one-liner for script hotfixes: +# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index +# + +url="https://get.docker.com/" +apt_url="https://apt.dockerproject.org" +yum_url="https://yum.dockerproject.org" + +docker_key="-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o +ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R +mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn +TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK +dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT +X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG +HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c +NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ +hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U +65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM +zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB +tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv +Y2tlci5jb20+iQIcBBABCgAGBQJWw7vdAAoJEFyzYeVS+w0QHysP/i37m4SyoOCV +cnybl18vzwBEcp4VCRbXvHvOXty1gccVIV8/aJqNKgBV97lY3vrpOyiIeB8ETQeg +srxFE7t/Gz0rsLObqfLEHdmn5iBJRkhLfCpzjeOnyB3Z0IJB6UogO/msQVYe5CXJ +l6uwr0AmoiCBLrVlDAktxVh9RWch0l0KZRX2FpHu8h+uM0/zySqIidlYfLa3y5oH +scU+nGU1i6ImwDTD3ysZC5jp9aVfvUmcESyAb4vvdcAHR+bXhA/RW8QHeeMFliWw +7Z2jYHyuHmDnWG2yUrnCqAJTrWV+OfKRIzzJFBs4e88ru5h2ZIXdRepw/+COYj34 +LyzxR2cxr2u/xvxwXCkSMe7F4KZAphD+1ws61FhnUMi/PERMYfTFuvPrCkq4gyBj +t3fFpZ2NR/fKW87QOeVcn1ivXl9id3MMs9KXJsg7QasT7mCsee2VIFsxrkFQ2jNp +D+JAERRn9Fj4ArHL5TbwkkFbZZvSi6fr5h2GbCAXIGhIXKnjjorPY/YDX6X8AaHO +W1zblWy/CFr6VFl963jrjJgag0G6tNtBZLrclZgWhOQpeZZ5Lbvz2ZA5CqRrfAVc +wPNW1fObFIRtqV6vuVluFOPCMAAnOnqR02w9t17iVQjO3oVN0mbQi9vjuExXh1Yo +ScVetiO6LSmlQfVEVRTqHLMgXyR/EMo7iQIcBBABCgAGBQJXSWBlAAoJEFyzYeVS ++w0QeH0QAI6btAfYwYPuAjfRUy9qlnPhZ+xt1rnwsUzsbmo8K3XTNh+l/R08nu0d +sczw30Q1wju28fh1N8ay223+69f0+yICaXqR18AbGgFGKX7vo0gfEVaxdItUN3eH +NydGFzmeOKbAlrxIMECnSTG/TkFVYO9Ntlv9vSN2BupmTagTRErxLZKnVsWRzp+X +elwlgU5BCZ6U6Ze8+bIc6F1bZstf17X8i6XNV/rOCLx2yP0hn1osoljoLPpW8nzk +wvqYsYbCA28lMt1aqe0UWvRCqR0zxlKn17NZQqjbxcajEMCajoQ01MshmO5GWePV +iv2abCZ/iaC5zKqVT3deMJHLq7lum6qhA41E9gJH9QoqT+qgadheeFfoC1QP7cke ++tXmYg2R39p3l5Hmm+JQbP4f9V5mpWExvHGCSbcatr35tnakIJZugq2ogzsm1djC +Sz9222RXl9OoFqsm1bNzA78+/cOt5N2cyhU0bM2T/zgh42YbDD+JDU/HSmxUIpU+ +wrGvZGM2FU/up0DRxOC4U1fL6HHlj8liNJWfEg3vhougOh66gGF9ik5j4eIlNoz6 +lst+gmvlZQ9/9hRDeoG+AbhZeIlQ4CCw+Y1j/+fUxIzKHPVK+aFJd+oJVNvbojJW +/SgDdSMtFwqOvXyYcHl30Ws0gZUeDyAmNGZeJ3kFklnApDmeKK+OiQIiBBABCgAM +BQJXe5zTBYMHhh+AAAoJEDG4FaMBBnSp7YMQAJqrXoBonZAq07B6qUaT3aBCgnY4 +JshbXmFb/XrrS75f7YJDPx2fJJdqrbYDIHHgOjzxvp3ngPpOpJzI5sYmkaugeoCO +/KHu/+39XqgTB7fguzapRfbvuWp+qzPcHSdb9opnagfzKAze3DQnnLiwCPlsyvGp +zC4KzXgV2ze/4raaOye1kK7O0cHyapmn/q/TR3S8YapyXq5VpLThwJAw1SRDu0Yx +eXIAQiIfaSxT79EktoioW2CSV8/djt+gBjXnKYJJA8P1zzX7GNt/Rc2YG0Ot4v6t +BW16xqFTg+n5JzbeK5cZ1jbIXXfCcaZJyiM2MzYGhSJ9+EV7JYF05OAIWE4SGTRj +XMquQ2oMLSwMCPQHm+FCD9PXQ0tHYx6tKT34wksdmoWsdejl/n3NS+178mG1WI/l +N079h3im2gRwOykMou/QWs3vGw/xDoOYHPV2gJ7To9BLVnVK/hROgdFLZFeyRScN +zwKm57HmYMFA74tX601OiHhk1ymP2UUc25oDWpLXlfcRULJJlo/KfZZF3pmKwIq3 +CilGayFUi1NNwuavG76EcAVtVFUVFFIITwkhkuRbBHIytzEHYosFgD5/acK0Pauq +JnwrwKv0nWq3aK7nKiALAD+iZvPNjFZau3/APqLEmvmRnAElmugcHsWREFxMMjMM +VgYFiYKUAJO8u46eiQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgID +AQIeAQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0 +CH+nAk40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj +9A4I1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlS +C4SluyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQe +bTGv0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4 +Aal8L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08 +GkzDYBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn +6oOR7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA +/Zxcjk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5 +HWXPHXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1s +FVELMXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1g +EJOQTvBR8Q== +=Yhur +-----END PGP PUBLIC KEY BLOCK----- +" + +mirror='' +while [ $# -gt 0 ]; do + case "$1" in + --mirror) + mirror="$2" + shift + ;; + *) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + AzureChinaCloud) + apt_url="https://mirror.azure.cn/docker-engine/apt" + yum_url="https://mirror.azure.cn/docker-engine/yum" + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +echo_docker_as_nonroot() { + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + cat <<-EOF + + If you would like to use Docker as a non-root user, you should now consider + adding your user to the "docker" group with something like: + + sudo usermod -aG docker $your_user + + Remember that you will have to log out and back in for this to take effect! + + WARNING: Adding a user to the "docker" group will grant the ability to run + containers which can be used to obtain root privileges on the + docker host. + Refer to https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface + for more information. + + EOF +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + # We're Debian and don't even know it! + lsb_dist=debian + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 9) + dist_version="stretch" + ;; + 8|'Kali Linux 2') + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + fi + fi + fi +} + +semverParse() { + major="${1%%.*}" + minor="${1#$major.}" + minor="${minor%%.*}" + patch="${1#$major.$minor.}" + patch="${patch%%[-.]*}" +} + +do_install() { + architecture=$(uname -m) + case $architecture in + # officially supported + amd64|x86_64) + ;; + # unofficially supported with available repositories + armv6l|armv7l) + ;; + # unofficially supported without available repositories + aarch64|arm64|ppc64le|s390x) + cat 1>&2 <<-EOF + Error: This install script does not support $architecture, because no + $architecture package exists in Docker's repositories. + + Other install options include checking your distribution's package repository + for a version of Docker, or building Docker from source. + EOF + exit 1 + ;; + # not supported + *) + cat >&2 <<-EOF + Error: $architecture is not a recognized platform. + EOF + exit 1 + ;; + esac + + if command_exists docker; then + version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)" + MAJOR_W=1 + MINOR_W=10 + + semverParse $version + + shouldWarn=0 + if [ $major -lt $MAJOR_W ]; then + shouldWarn=1 + fi + + if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then + shouldWarn=1 + fi + + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + EOF + + if [ $shouldWarn -eq 1 ]; then + cat >&2 <<-'EOF' + again to update Docker, we urge you to migrate your image store before upgrading + to v1.10+. + + You can find instructions for this here: + https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration + EOF + else + cat >&2 <<-'EOF' + again to update Docker, you can safely ignore this message. + EOF + fi + + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + curl='' + if command_exists curl; then + curl='curl -sSL' + elif command_exists wget; then + curl='wget -qO-' + elif command_exists busybox && busybox --list-modules | grep -q wget; then + curl='busybox wget -qO-' + fi + + # check to see which repo they are trying to install from + if [ -z "$repo" ]; then + repo='main' + if [ "https://test.docker.com/" = "$url" ]; then + repo='testing' + elif [ "https://experimental.docker.com/" = "$url" ]; then + repo='experimental' + fi + fi + + # perform some very rudimentary platform detection + lsb_dist='' + dist_version='' + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then + lsb_dist='oracleserver' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then + lsb_dist='centos' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then + lsb_dist='redhat' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/photon-release ]; then + lsb_dist='photon' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + # Special case redhatenterpriseserver + if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then + # Set it to redhat, it will be changed to centos below anyways + lsb_dist='redhat' + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + ;; + + oracleserver) + # need to switch lsb_dist to match yum repo URL + lsb_dist="oraclelinux" + dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" + ;; + + fedora|centos|redhat) + dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)" + ;; + + "vmware photon") + lsb_dist="photon" + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + + esac + + # Check if this is a forked Linux distro + check_forked + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + export DEBIAN_FRONTEND=noninteractive + + did_apt_get_update= + apt_get_update() { + if [ -z "$did_apt_get_update" ]; then + ( set -x; $sh_c 'sleep 3; apt-get update' ) + did_apt_get_update=1 + fi + } + + if [ "$lsb_dist" != "raspbian" ]; then + # aufs is preferred over devicemapper; try to ensure the driver is available. + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then + kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" + + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' + echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + ( set -x; sleep 10 ) + fi + else + echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' + echo >&2 ' package. We have no AUFS support. Consider installing the packages' + echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' + ( set -x; sleep 10 ) + fi + fi + fi + + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser >/dev/null 2>&1; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + + if [ ! -e /usr/lib/apt/methods/https ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) + fi + if [ -z "$curl" ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) + curl='curl -sSL' + fi + if ! command -v gpg > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' ) + fi + + # dirmngr is a separate package in ubuntu yakkety; see https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1634464 + if ! command -v dirmngr > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q dirmngr' ) + fi + + ( + set -x + echo "$docker_key" | $sh_c 'apt-key add -' + $sh_c "mkdir -p /etc/apt/sources.list.d" + $sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" + $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' + ) + echo_docker_as_nonroot + exit 0 + ;; + + fedora|centos|redhat|oraclelinux|photon) + if [ "${lsb_dist}" = "redhat" ]; then + # we use the centos repository for both redhat and centos releases + lsb_dist='centos' + fi + $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF + [docker-${repo}-repo] + name=Docker ${repo} Repository + baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version} + enabled=1 + gpgcheck=1 + gpgkey=${yum_url}/gpg + EOF + if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then + ( + set -x + $sh_c 'sleep 3; dnf -y -q install docker-engine' + ) + elif [ "$lsb_dist" = "photon" ]; then + ( + set -x + $sh_c 'sleep 3; tdnf -y install docker-engine' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-engine' + ) + fi + echo_docker_as_nonroot + exit 0 + ;; + esac + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' + + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: + + https://docs.docker.com/engine/installation/ + + EOF + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/vendor/github.com/moby/moby/hack/make.ps1 b/vendor/github.com/moby/moby/hack/make.ps1 new file mode 100644 index 0000000..14b9603 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make.ps1 @@ -0,0 +1,408 @@ +<# +.NOTES + Author: @jhowardmsft + + Summary: Windows native build script. This is similar to functionality provided + by hack\make.sh, but uses native Windows PowerShell semantics. It does + not support the full set of options provided by the Linux counterpart. + For example: + + - You can't cross-build Linux docker binaries on Windows + - Hashes aren't generated on binaries + - 'Releasing' isn't supported. + - Integration tests. This is because they currently cannot run inside a container, + and require significant external setup. + + It does however provided the minimum necessary to support parts of local Windows + development and Windows to Windows CI. + + Usage Examples (run from repo root): + "hack\make.ps1 -Binary" to build the binaries + "hack\make.ps1 -Client" to build just the client 64-bit binary + "hack\make.ps1 -TestUnit" to run unit tests + "hack\make.ps1 -Binary -TestUnit" to build the binaries and run unit tests + "hack\make.ps1 -All" to run everything this script knows about + +.PARAMETER Client + Builds the client binaries. + +.PARAMETER Daemon + Builds the daemon binary. + +.PARAMETER Binary + Builds the client binaries and the daemon binary. A convenient shortcut to `make.ps1 -Client -Daemon`. + +.PARAMETER Race + Use -race in go build and go test. + +.PARAMETER Noisy + Use -v in go build. + +.PARAMETER ForceBuildAll + Use -a in go build. + +.PARAMETER NoOpt + Use -gcflags -N -l in go build to disable optimisation (can aide debugging). + +.PARAMETER CommitSuffix + Adds a custom string to be appended to the commit ID (spaces are stripped). + +.PARAMETER DCO + Runs the DCO (Developer Certificate Of Origin) test. + +.PARAMETER PkgImports + Runs the pkg\ directory imports test. + +.PARAMETER GoFormat + Runs the Go formatting test. + +.PARAMETER TestUnit + Runs unit tests. + +.PARAMETER All + Runs everything this script knows about. + + +TODO +- Unify the head commit +- Sort out the GITCOMMIT environment variable in the absense of a .git (longer term) +- Add golint and other checks (swagger maybe?) + +#> + + +param( + [Parameter(Mandatory=$False)][switch]$Client, + [Parameter(Mandatory=$False)][switch]$Daemon, + [Parameter(Mandatory=$False)][switch]$Binary, + [Parameter(Mandatory=$False)][switch]$Race, + [Parameter(Mandatory=$False)][switch]$Noisy, + [Parameter(Mandatory=$False)][switch]$ForceBuildAll, + [Parameter(Mandatory=$False)][switch]$NoOpt, + [Parameter(Mandatory=$False)][string]$CommitSuffix="", + [Parameter(Mandatory=$False)][switch]$DCO, + [Parameter(Mandatory=$False)][switch]$PkgImports, + [Parameter(Mandatory=$False)][switch]$GoFormat, + [Parameter(Mandatory=$False)][switch]$TestUnit, + [Parameter(Mandatory=$False)][switch]$All +) + +$ErrorActionPreference = "Stop" +$pushed=$False # To restore the directory if we have temporarily pushed to one. + +# Utility function to get the commit ID of the repository +Function Get-GitCommit() { + if (-not (Test-Path ".\.git")) { + # If we don't have a .git directory, but we do have the environment + # variable DOCKER_GITCOMMIT set, that can override it. + if ($env:DOCKER_GITCOMMIT.Length -eq 0) { + Throw ".git directory missing and DOCKER_GITCOMMIT environment variable not specified." + } + Write-Host "INFO: Git commit assumed from DOCKER_GITCOMMIT environment variable" + return $env:DOCKER_GITCOMMIT + } + $gitCommit=$(git rev-parse --short HEAD) + if ($(git status --porcelain --untracked-files=no).Length -ne 0) { + $gitCommit="$gitCommit-unsupported" + Write-Host "" + Write-Warning "This version is unsupported because there are uncommitted file(s)." + Write-Warning "Either commit these changes, or add them to .gitignore." + git status --porcelain --untracked-files=no | Write-Warning + Write-Host "" + } + return $gitCommit +} + +# Utility function to get get the current build version of docker +Function Get-DockerVersion() { + if (-not (Test-Path ".\VERSION")) { Throw "VERSION file not found. Is this running from the root of a docker repository?" } + return $(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim() +} + +# Utility function to determine if we are running in a container or not. +# In Windows, we get this through an environment variable set in `Dockerfile.Windows` +Function Check-InContainer() { + if ($env:FROM_DOCKERFILE.Length -eq 0) { + Write-Host "" + Write-Warning "Not running in a container. The result might be an incorrect build." + Write-Host "" + } +} + +# Utility function to get the commit for HEAD +Function Get-HeadCommit() { + $head = Invoke-Expression "git rev-parse --verify HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting HEAD commit" } + + return $head +} + +# Utility function to get the commit for upstream +Function Get-UpstreamCommit() { + Invoke-Expression "git fetch -q https://github.com/docker/docker.git refs/heads/master" + if ($LASTEXITCODE -ne 0) { Throw "Failed fetching" } + + $upstream = Invoke-Expression "git rev-parse --verify FETCH_HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting upstream commit" } + + return $upstream +} + +# Build a binary (client or daemon) +Function Execute-Build($type, $additionalBuildTags, $directory) { + # Generate the build flags + $buildTags = "autogen" + if ($Noisy) { $verboseParm=" -v" } + if ($Race) { Write-Warning "Using race detector"; $raceParm=" -race"} + if ($ForceBuildAll) { $allParm=" -a" } + if ($NoOpt) { $optParm=" -gcflags "+""""+"-N -l"+"""" } + if ($addtionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } + + # Do the go build in the appropriate directory + # Note -linkmode=internal is required to be able to debug on Windows. + # https://github.com/golang/go/issues/14319#issuecomment-189576638 + Write-Host "INFO: Building $type..." + Push-Location $root\cmd\$directory; $global:pushed=$True + $buildCommand = "go build" + ` + $raceParm + ` + $verboseParm + ` + $allParm + ` + $optParm + ` + " -tags """ + $buildTags + """" + ` + " -ldflags """ + "-linkmode=internal" + """" + ` + " -o $root\bundles\"+$directory+".exe" + Invoke-Expression $buildCommand + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile $type" } + Pop-Location; $global:pushed=$False +} + + +# Validates the DCO marker is present on each commit +Function Validate-DCO($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating Developer Certificate of Origin..." + # Username may only contain alphanumeric characters or dashes and cannot begin with a dash + $usernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + + $dcoPrefix="Signed-off-by:" + $dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($usernameRegex)\\))?$" + + $counts = Invoke-Expression "git diff --numstat $upstreamCommit...$headCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" } + + # Counts of adds and deletes after removing multiple white spaces. AWK anyone? :( + $adds=0; $dels=0; $($counts -replace '\s+', ' ') | %{ + $a=$_.Split(" "); + if ($a[0] -ne "-") { $adds+=[int]$a[0] } + if ($a[1] -ne "-") { $dels+=[int]$a[1] } + } + if (($adds -eq 0) -and ($dels -eq 0)) { + Write-Warning "DCO validation - nothing to validate!" + return + } + + $commits = Invoke-Expression "git log $upstreamCommit..$headCommit --format=format:%H%n" + if ($LASTEXITCODE -ne 0) { Throw "Failed git log --format" } + $commits = $($commits -split '\s+' -match '\S') + $badCommits=@() + $commits | %{ + # Skip commits with no content such as merge commits etc + if ($(git log -1 --format=format: --name-status $_).Length -gt 0) { + # Ignore exit code on next call - always process regardless + $commitMessage = Invoke-Expression "git log -1 --format=format:%B --name-status $_" + if (($commitMessage -match $dcoRegex).Length -eq 0) { $badCommits+=$_ } + } + } + if ($badCommits.Length -eq 0) { + Write-Host "Congratulations! All commits are properly signed with the DCO!" + } else { + $e = "`nThese commits do not have a proper '$dcoPrefix' marker:`n" + $badCommits | %{ $e+=" - $_`n"} + $e += "`nPlease amend each commit to include a properly formatted DCO marker.`n`n" + $e += "Visit the following URL for information about the Docker DCO:`n" + $e += "https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work`n" + Throw $e + } +} + +# Validates that .\pkg\... is safely isolated from internal code +Function Validate-PkgImports($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating pkg import isolation..." + + # Get a list of go source-code files which have changed under pkg\. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'pkg\*.go`'" + $badFiles=@(); $files | %{ + $file=$_ + # For the current changed file, get its list of dependencies, sorted and uniqued. + $imports = Invoke-Expression "go list -e -f `'{{ .Deps }}`' $file" + if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" } + $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique + # Filter out what we are looking for + $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" ` + -NotMatch "^github.com/docker/docker/vendor" ` + -Match "^github.com/docker/docker" ` + -Replace "`n", "" + $imports | % { $badFiles+="$file imports $_`n" } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! ".\pkg\*.go" is safely isolated from internal code.' + } else { + $e = "`nThese files import internal code: (either directly or indirectly)`n" + $badFiles | %{ $e+=" - $_"} + Throw $e + } +} + +# Validates that changed files are correctly go-formatted +Function Validate-GoFormat($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating go formatting on changed files..." + + # Verify gofmt is installed + if ($(Get-Command gofmt -ErrorAction SilentlyContinue) -eq $nil) { Throw "gofmt does not appear to be installed" } + + # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" + $files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go" + $badFiles=@(); $files | %{ + # Deliberately ignore error on next line - treat as failed + $content=Invoke-Expression "git show $headCommit`:$_" + + # Next set of hoops are to ensure we have LF not CRLF semantics as otherwise gofmt on Windows will not succeed. + # Also note that gofmt on Windows does not appear to support stdin piping correctly. Hence go through a temporary file. + $content=$content -join "`n" + $content+="`n" + $outputFile=[System.IO.Path]::GetTempFileName() + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $content, (New-Object System.Text.UTF8Encoding($False))) + $valid=Invoke-Expression "gofmt -s -l $outputFile" + Write-Host "Checking $outputFile" + if ($valid.Length -ne 0) { $badFiles+=$_ } + if (Test-Path $outputFile) { Remove-Item $outputFile } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! All Go source files are properly formatted.' + } else { + $e = "`nThese files are not properly gofmt`'d:`n" + $badFiles | %{ $e+=" - $_`n"} + $e+= "`nPlease reformat the above files using `"gofmt -s -w`" and commit the result." + Throw $e + } +} + +# Run the unit tests +Function Run-UnitTests() { + Write-Host "INFO: Running unit tests..." + $testPath="./..." + $goListCommand = "go list -e -f '{{if ne .Name """ + '\"github.com/docker/docker\"' + """}}{{.ImportPath}}{{end}}' $testPath" + $pkgList = $(Invoke-Expression $goListCommand) + if ($LASTEXITCODE -ne 0) { Throw "go list for unit tests failed" } + $pkgList = $pkgList | Select-String -Pattern "github.com/docker/docker" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/vendor" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/man" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration-cli" + $pkgList = $pkgList -replace "`r`n", " " + $goTestCommand = "go test" + $raceParm + " -cover -ldflags -w -tags """ + "autogen daemon" + """ -a """ + "-test.timeout=10m" + """ $pkgList" + Invoke-Expression $goTestCommand + if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" } +} + +# Start of main code. +Try { + Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)" + $root=$(pwd) + + # Handle the "-All" shortcut to turn on all things we can handle. + if ($All) { $Client=$True; $Daemon=$True; $DCO=$True; $PkgImports=$True; $GoFormat=$True; $TestUnit=$True } + + # Handle the "-Binary" shortcut to build both client and daemon. + if ($Binary) { $Client = $True; $Daemon = $True } + + # Make sure we have something to do + if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { Throw 'Nothing to do. Try adding "-All" for everything I can do' } + + # Verify git is installed + if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" } + + # Verify go is installed + if ($(Get-Command go -ErrorAction SilentlyContinue) -eq $nil) { Throw "GoLang does not appear to be installed" } + + # Get the git commit. This will also verify if we are in a repo or not. Then add a custom string if supplied. + $gitCommit=Get-GitCommit + if ($CommitSuffix -ne "") { $gitCommit += "-"+$CommitSuffix -Replace ' ', '' } + + # Get the version of docker (eg 1.14.0-dev) + $dockerVersion=Get-DockerVersion + + # Give a warning if we are not running in a container and are building binaries or running unit tests. + # Not relevant for validation tests as these are fine to run outside of a container. + if ($Client -or $Daemon -or $TestUnit) { Check-InContainer } + + # Verify GOPATH is set + if ($env:GOPATH.Length -eq 0) { Throw "Missing GOPATH environment variable. See https://golang.org/doc/code.html#GOPATH" } + + # Run autogen if building binaries or running unit tests. + if ($Client -or $Daemon -or $TestUnit) { + Write-Host "INFO: Invoking autogen..." + Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion } + Catch [Exception] { Throw $_ } + } + + # DCO, Package import and Go formatting tests. + if ($DCO -or $PkgImports -or $GoFormat) { + # We need the head and upstream commits for these + $headCommit=Get-HeadCommit + $upstreamCommit=Get-UpstreamCommit + + # Run DCO validation + if ($DCO) { Validate-DCO $headCommit $upstreamCommit } + + # Run `gofmt` validation + if ($GoFormat) { Validate-GoFormat $headCommit $upstreamCommit } + + # Run pkg isolation validation + if ($PkgImports) { Validate-PkgImports $headCommit $upstreamCommit } + } + + # Build the binaries + if ($Client -or $Daemon) { + # Create the bundles directory if it doesn't exist + if (-not (Test-Path ".\bundles")) { New-Item ".\bundles" -ItemType Directory | Out-Null } + + # Perform the actual build + if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } + if ($Client) { Execute-Build "client" "" "docker" } + } + + # Run unit tests + if ($TestUnit) { Run-UnitTests } + + # Gratuitous ASCII art. + if ($Daemon -or $Client) { + Write-Host + Write-Host -ForegroundColor Green " ________ ____ __." + Write-Host -ForegroundColor Green " \_____ \ `| `|/ _`|" + Write-Host -ForegroundColor Green " / `| \`| `<" + Write-Host -ForegroundColor Green " / `| \ `| \" + Write-Host -ForegroundColor Green " \_______ /____`|__ \" + Write-Host -ForegroundColor Green " \/ \/" + Write-Host + } +} +Catch [Exception] { + Write-Host -ForegroundColor Red ("`nERROR: make.ps1 failed:`n$_") + + # More gratuitous ASCII art. + Write-Host + Write-Host -ForegroundColor Red "___________ .__.__ .___" + Write-Host -ForegroundColor Red "\_ _____/____ `|__`| `| ____ __`| _/" + Write-Host -ForegroundColor Red " `| __) \__ \ `| `| `| _/ __ \ / __ `| " + Write-Host -ForegroundColor Red " `| \ / __ \`| `| `|_\ ___// /_/ `| " + Write-Host -ForegroundColor Red " \___ / (____ /__`|____/\___ `>____ `| " + Write-Host -ForegroundColor Red " \/ \/ \/ \/ " + Write-Host + + Throw $_ +} +Finally { + if ($global:pushed) { Pop-Location } + Write-Host -ForegroundColor Cyan "INFO: make.ps1 ended at $(Get-Date)" +} diff --git a/vendor/github.com/moby/moby/hack/make.sh b/vendor/github.com/moby/moby/hack/make.sh new file mode 100755 index 0000000..f0e482f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make.sh @@ -0,0 +1,304 @@ +#!/usr/bin/env bash +set -e + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (https://github.com/docker/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -unsupported if the repository isn't clean. +# - The script is intended to be run inside the docker container specified +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "make" from +# your checkout of the Docker repository. +# the Makefile will do a "docker build -t docker ." and then +# "docker run hack/make.sh" in the resulting image. +# + +set -o pipefail + +export DOCKER_PKG='github.com/docker/docker' +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export MAKEDIR="$SCRIPTDIR/make" +export PKG_CONFIG=${PKG_CONFIG:-pkg-config} + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +inContainer="AssumeSoInitially" +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + if [ -z "$FROM_DOCKERFILE" ]; then + unset inContainer + fi +else + if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + unset inContainer + fi +fi + +if [ -z "$inContainer" ]; then + { + echo "# WARNING! I don't seem to be running in a Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo + +# List of bundles to create when no argument is passed +DEFAULT_BUNDLES=( + binary-client + binary-daemon + dynbinary + + test-unit + test-integration-cli + test-docker-py + + cross + tgz +) + +VERSION=$(< ./VERSION) +! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') +if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then + GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + GITCOMMIT="$GITCOMMIT-unsupported" + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + echo "# GITCOMMIT = $GITCOMMIT" + echo "# The version you are building is listed as unsupported because" + echo "# there are some files in the git repository that are in an uncommitted state." + echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version." + echo "# Here is the current list:" + git status --porcelain --untracked-files=no + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + fi +elif [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +else + echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' + echo >&2 ' Please either build with the .git directory accessible, or specify the' + echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' + echo >&2 ' future accountability in diagnosing build issues. Thanks!' + exit 1 +fi + +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" + export GOPATH="${PWD}/.gopath" + + if [ "$(go env GOOS)" = 'solaris' ]; then + # sys/unix is installed outside the standard library on solaris + # TODO need to allow for version change, need to get version from go + export GO_VERSION=${GO_VERSION:-"1.7.1"} + export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" + fi +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + +DOCKER_BUILDTAGS+=" daemon" +if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald" +elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald journald_compat" +fi + +# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately +if \ + command -v gcc &> /dev/null \ + && ! gcc -E - -o /dev/null &> /dev/null <<<'#include ' \ +; then + DOCKER_BUILDTAGS+=' btrfs_noversion' +fi + +# test whether "libdevmapper.h" is new enough to support deferred remove +# functionality. +if \ + command -v gcc &> /dev/null \ + && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \ +; then + DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' +fi + +# Use these flags when compiling the tests and final binary + +IAMSTATIC='true' +source "$SCRIPTDIR/make/.go-autogen" +if [ -z "$DOCKER_DEBUG" ]; then + LDFLAGS='-w' +fi + +LDFLAGS_STATIC='' +EXTLDFLAGS_STATIC='-static' +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -tags "autogen netgo static_build sqlite_omit_load_extension $DOCKER_BUILDTAGS" -installsuffix netgo ) +# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here + +# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental +# builds by installing dependent packages to the GOPATH. +REBUILD_FLAG="-a" +if [ "$DOCKER_INCREMENTAL_BINARY" ]; then + REBUILD_FLAG="-i" +fi +ORIG_BUILDFLAGS+=( $REBUILD_FLAG ) + +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. + +if [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then + : ${TIMEOUT:=10m} +elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then + : ${TIMEOUT:=8m} +else + : ${TIMEOUT:=5m} +fi + +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" +" + +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +# If sqlite3.h doesn't exist under /usr/include, +# check /usr/local/include also just in case +# (e.g. FreeBSD Ports installs it under the directory) +if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then + export CGO_CFLAGS='-I/usr/local/include' + export CGO_LDFLAGS='-L/usr/local/lib' +fi + +HAVE_GO_TEST_COVER= +if \ + go help testflag | grep -- -cover > /dev/null \ + && go tool -n cover > /dev/null 2>&1 \ +; then + HAVE_GO_TEST_COVER=1 +fi + +# a helper to provide ".exe" when it's appropriate +binary_extension() { + if [ "$(go env GOOS)" = 'windows' ]; then + echo -n '.exe' + fi +} + +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + +bundle() { + local bundle="$1"; shift + echo "---> Making bundle: $(basename "$bundle") (in $DEST)" + source "$SCRIPTDIR/make/$bundle" "$@" +} + +copy_binaries() { + dir="$1" + # Add nested executables to bundle dir so we have complete set of + # them available, but only if the native OS/ARCH is the same as the + # OS/ARCH of the build target + if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + if [ -x /usr/local/bin/docker-runc ]; then + echo "Copying nested executables into $dir" + for file in containerd containerd-shim containerd-ctr runc init proxy; do + cp `which "docker-$file"` "$dir/" + if [ "$2" == "hash" ]; then + hash_files "$dir/docker-$file" + fi + done + fi + fi +} + +install_binary() { + file="$1" + target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" + if [ "$(go env GOOS)" == "linux" ]; then + echo "Installing $(basename $file) to ${target}" + cp -L "$file" "$target" + else + echo "Install is only supported on linux" + return 1 + fi +} + +main() { + # We want this to fail if the bundles already exist and cannot be removed. + # This is to avoid mixing bundles from different versions of the code. + mkdir -p bundles + if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then + echo "bundles/$VERSION already exists. Removing." + rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + echo + fi + + if [ "$(go env GOHOSTOS)" != 'windows' ]; then + # Windows and symlinks don't get along well + + rm -f bundles/latest + ln -s "$VERSION" bundles/latest + fi + + if [ $# -lt 1 ]; then + bundles=(${DEFAULT_BUNDLES[@]}) + else + bundles=($@) + fi + for bundle in ${bundles[@]}; do + export DEST="bundles/$VERSION/$(basename "$bundle")" + # Cygdrive paths don't play well with go build -o. + if [[ "$(uname -s)" == CYGWIN* ]]; then + export DEST="$(cygpath -mw "$DEST")" + fi + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + bundle "$bundle" + echo + done +} + +main "$@" diff --git a/vendor/github.com/moby/moby/hack/make/.binary b/vendor/github.com/moby/moby/hack/make/.binary new file mode 100644 index 0000000..f5c35c3 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.binary @@ -0,0 +1,48 @@ +#!/bin/bash +set -e + +BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" +BINARY_EXTENSION="$(binary_extension)" +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +source "${MAKEDIR}/.go-autogen" + +( +export GOGC=${DOCKER_BUILD_GOGC:-1000} + +if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + # must be cross-compiling! + case "$(go env GOOS)/$(go env GOARCH)" in + windows/amd64) + export CC=x86_64-w64-mingw32-gcc + export CGO_ENABLED=1 + ;; + esac +fi + +if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then + if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then + export CGO_ENABLED=1 + export CC=o64-clang + export LDFLAGS='-linkmode external -s' + export LDFLAGS_STATIC_DOCKER='-extld='${CC} + else + export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary + fi +fi + +echo "Building: $DEST/$BINARY_FULLNAME" +go build \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + $GO_PACKAGE +) + +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/vendor/github.com/moby/moby/hack/make/.binary-setup b/vendor/github.com/moby/moby/hack/make/.binary-setup new file mode 100644 index 0000000..b9f8ce2 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.binary-setup @@ -0,0 +1,10 @@ +#!/bin/bash + +DOCKER_CLIENT_BINARY_NAME='docker' +DOCKER_DAEMON_BINARY_NAME='dockerd' +DOCKER_RUNC_BINARY_NAME='docker-runc' +DOCKER_CONTAINERD_BINARY_NAME='docker-containerd' +DOCKER_CONTAINERD_CTR_BINARY_NAME='docker-containerd-ctr' +DOCKER_CONTAINERD_SHIM_BINARY_NAME='docker-containerd-shim' +DOCKER_PROXY_BINARY_NAME='docker-proxy' +DOCKER_INIT_BINARY_NAME='docker-init' diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/compat b/vendor/github.com/moby/moby/hack/make/.build-deb/compat new file mode 100644 index 0000000..ec63514 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/compat @@ -0,0 +1 @@ +9 diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/control b/vendor/github.com/moby/moby/hack/make/.build-deb/control new file mode 100644 index 0000000..0f54399 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/control @@ -0,0 +1,29 @@ +Source: docker-engine +Section: admin +Priority: optional +Maintainer: Docker +Standards-Version: 3.9.6 +Homepage: https://dockerproject.org +Vcs-Browser: https://github.com/docker/docker +Vcs-Git: git://github.com/docker/docker.git + +Package: docker-engine +Architecture: linux-any +Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} +Recommends: aufs-tools, + ca-certificates, + cgroupfs-mount | cgroup-lite, + git, + xz-utils, + ${apparmor:Recommends} +Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs +Description: Docker: the open-source application container engine + Docker is an open source project to build, ship and run any application as a + lightweight container + . + Docker containers are both hardware-agnostic and platform-agnostic. This means + they can run anywhere, from your laptop to the largest EC2 compute instance and + everything in between - and they don't require you to use a particular + language, framework or packaging system. That makes them great building blocks + for deploying and scaling web apps, databases, and backend services without + depending on a particular stack or provider. diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion new file mode 100644 index 0000000..6ea1119 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion @@ -0,0 +1 @@ +contrib/completion/bash/docker diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default new file mode 120000 index 0000000..4278533 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init new file mode 120000 index 0000000..8cb89d3 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart new file mode 120000 index 0000000..7e1b64a --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install new file mode 100644 index 0000000..dc6b25f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install @@ -0,0 +1,12 @@ +#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ +#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ +#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ +contrib/*-integration usr/share/docker-engine/contrib/ +contrib/check-config.sh usr/share/docker-engine/contrib/ +contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ +contrib/mk* usr/share/docker-engine/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ +contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages new file mode 100644 index 0000000..1aa6218 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages @@ -0,0 +1 @@ +man/man*/* diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst new file mode 100644 index 0000000..eeef6ca --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi + fi + ;; + abort-*) + # How'd we get here?? + exit 1 + ;; + *) + ;; +esac + +#DEBHELPER# diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev new file mode 120000 index 0000000..914a361 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docs b/vendor/github.com/moby/moby/hack/make/.build-deb/docs new file mode 100644 index 0000000..b43bf86 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docs @@ -0,0 +1 @@ +README.md diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/rules b/vendor/github.com/moby/moby/hack/make/.build-deb/rules new file mode 100755 index 0000000..6522103 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/rules @@ -0,0 +1,55 @@ +#!/usr/bin/make -f + +VERSION = $(shell cat VERSION) +SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1) +SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true ) + +override_dh_gencontrol: + # if we're on Ubuntu, we need to Recommends: apparmor + echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars + dh_gencontrol + +override_dh_auto_build: + ./hack/make.sh dynbinary + # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +override_dh_auto_test: + ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v + ./bundles/$(VERSION)/dynbinary-client/docker -v + +override_dh_strip: + # Go has lots of problems with stripping, so just don't + +override_dh_auto_install: + mkdir -p debian/docker-engine/usr/bin + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd + cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy + cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd + cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim + cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr + cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc + cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init + mkdir -p debian/docker-engine/usr/lib/docker + +override_dh_installinit: + # use "docker" as our service name, not "docker-engine" + dh_installinit --name=docker +ifeq (true, $(SYSTEMD_GT_227)) + $(warning "Setting TasksMax=infinity") + sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service +endif + +override_dh_installudev: + # match our existing priority + dh_installudev --priority=z80 + +override_dh_install: + dh_install + dh_apparmor --profile-name=docker-engine -pdocker-engine + +override_dh_shlibdeps: + dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info + +%: + dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec new file mode 100644 index 0000000..ae597bd --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec @@ -0,0 +1,96 @@ +# Some bits borrowed from the openstack-selinux package +Name: docker-engine-selinux +Version: %{_version} +Release: %{_release}%{?dist} +Summary: SELinux Policies for the open-source application container engine +BuildArch: noarch +Group: Tools/Docker + +License: GPLv2 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +%global selinux_policyver 3.13.1-102 +%global selinuxtype targeted +%global moduletype services +%global modulenames docker + +Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils +BuildRequires: selinux-policy selinux-policy-devel + +# conflicting packages +Conflicts: docker-selinux + +# Usage: _format var format +# Expand 'modulenames' into various formats as needed +# Format must contain '$x' somewhere to do anything useful +%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done; + +# Relabel files +%global relabel_files() \ + /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \ + +%description +SELinux policy modules for use with Docker + +%prep +%if 0%{?centos} <= 6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +make SHARE="%{_datadir}" TARGETS="%{modulenames}" + +%install + +# Install SELinux interfaces +%_format INTERFACES $x.if +install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} +install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} + +# Install policy modules +%_format MODULES $x.pp.bz2 +install -d %{buildroot}%{_datadir}/selinux/packages +install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages + +%post +# +# Install all modules in a single transaction +# +if [ $1 -eq 1 ]; then + %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1 +fi +%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2 +%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES +if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + if [ $1 -eq 1 ]; then + restorecon -R %{_sharedstatedir}/docker + fi +fi + +%postun +if [ $1 -eq 0 ]; then + %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || : + if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + fi +fi + +%files +%doc LICENSE +%defattr(-,root,root,0755) +%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2 +%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if + +%changelog +* Tue Dec 1 2015 Jessica Frazelle 1.9.1-1 +- add licence to rpm +- add selinux-policy and docker-engine-selinux rpm diff --git a/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec new file mode 100644 index 0000000..d53e55b --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec @@ -0,0 +1,254 @@ +Name: docker-engine +Version: %{_version} +Release: %{_release}%{?dist} +Summary: The open-source application container engine +Group: Tools/Docker + +License: ASL 2.0 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +# is_systemd conditional +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 +%global is_systemd 1 +%endif + +# required packages for build +# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh) +# only require systemd on those systems +%if 0%{?is_systemd} +%if 0%{?suse_version} >= 1210 +BuildRequires: systemd-rpm-macros +%{?systemd_requires} +%else +%if 0%{?fedora} >= 25 +# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301) +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +%else +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +BuildRequires: pkgconfig(libsystemd-journal) +%endif +%endif +%else +Requires(post): chkconfig +Requires(preun): chkconfig +# This is for /sbin/service +Requires(preun): initscripts +%endif + +# required packages on install +Requires: /bin/sh +Requires: iptables +%if !0%{?suse_version} +Requires: libcgroup +%else +Requires: libcgroup1 +%endif +Requires: tar +Requires: xz +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +# Resolves: rhbz#1165615 +Requires: device-mapper-libs >= 1.02.90-1 +%endif +%if 0%{?oraclelinux} >= 6 +# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper +Requires: kernel-uek >= 4.1 +Requires: device-mapper >= 1.02.90-2 +%endif + +# docker-selinux conditional +%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global with_selinux 1 +%endif + +# DWZ problem with multiple golang binary, see bug +# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 +%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global _dwz_low_mem_die_limit 0 +%endif + +# start if with_selinux +%if 0%{?with_selinux} +# Version of SELinux we were using +%if 0%{?fedora} == 20 +%global selinux_policyver 3.12.1-197 +%endif # fedora 20 +%if 0%{?fedora} == 21 +%global selinux_policyver 3.13.1-105 +%endif # fedora 21 +%if 0%{?fedora} >= 22 +%global selinux_policyver 3.13.1-128 +%endif # fedora 22 +%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global selinux_policyver 3.13.1-23 +%endif # centos,oraclelinux 7 +%endif # with_selinux + +# RE: rhbz#1195804 - ensure min NVR for selinux-policy +%if 0%{?with_selinux} +Requires: selinux-policy >= %{selinux_policyver} +Requires(pre): %{name}-selinux >= %{version}-%{release} +%endif # with_selinux + +# conflicting packages +Conflicts: docker +Conflicts: docker-io +Conflicts: docker-engine-cs + +%description +Docker is an open source project to build, ship and run any application as a +lightweight container. + +Docker containers are both hardware-agnostic and platform-agnostic. This means +they can run anywhere, from your laptop to the largest EC2 compute instance and +everything in between - and they don't require you to use a particular +language, framework or packaging system. That makes them great building blocks +for deploying and scaling web apps, databases, and backend services without +depending on a particular stack or provider. + +%prep +%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +export DOCKER_GITCOMMIT=%{_gitcommit} +./hack/make.sh dynbinary +# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +%check +./bundles/%{_origversion}/dynbinary-client/docker -v +./bundles/%{_origversion}/dynbinary-daemon/dockerd -v + +%install +# install binary +install -d $RPM_BUILD_ROOT/%{_bindir} +install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker +install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd + +# install proxy +install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy + +# install containerd +install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd +install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim +install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr + +# install runc +install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc + +# install tini +install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init + +# install udev rules +install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d +install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules + +# add init scripts +install -d $RPM_BUILD_ROOT/etc/sysconfig +install -d $RPM_BUILD_ROOT/%{_initddir} + + +%if 0%{?is_systemd} +install -d $RPM_BUILD_ROOT/%{_unitdir} +install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service +%else +install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker +install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker +%endif +# add bash, zsh, and fish completions +install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions +install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions +install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d +install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker +install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker +install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish + +# install manpages +install -d %{buildroot}%{_mandir}/man1 +install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 +install -d %{buildroot}%{_mandir}/man5 +install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 +install -d %{buildroot}%{_mandir}/man8 +install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8 + +# add vimfiles +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax +install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt +install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim + +# add nano +install -d $RPM_BUILD_ROOT/usr/share/nano +install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc + +# list files owned by the package here +%files +%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md +/%{_bindir}/docker +/%{_bindir}/dockerd +/%{_bindir}/docker-containerd +/%{_bindir}/docker-containerd-shim +/%{_bindir}/docker-containerd-ctr +/%{_bindir}/docker-proxy +/%{_bindir}/docker-runc +/%{_bindir}/docker-init +/%{_sysconfdir}/udev/rules.d/80-docker.rules +%if 0%{?is_systemd} +/%{_unitdir}/docker.service +%else +%config(noreplace,missingok) /etc/sysconfig/docker +/%{_initddir}/docker +%endif +/usr/share/bash-completion/completions/docker +/usr/share/zsh/vendor-completions/_docker +/usr/share/fish/vendor_completions.d/docker.fish +%doc +/%{_mandir}/man1/* +/%{_mandir}/man5/* +/%{_mandir}/man8/* +/usr/share/vim/vimfiles/doc/dockerfile.txt +/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +/usr/share/vim/vimfiles/syntax/dockerfile.vim +/usr/share/nano/Dockerfile.nanorc + +%post +%if 0%{?is_systemd} +%systemd_post docker +%else +# This adds the proper /etc/rc*.d links for the script +/sbin/chkconfig --add docker +%endif +if ! getent group docker > /dev/null; then + groupadd --system docker +fi + +%preun +%if 0%{?is_systemd} +%systemd_preun docker +%else +if [ $1 -eq 0 ] ; then + /sbin/service docker stop >/dev/null 2>&1 + /sbin/chkconfig --del docker +fi +%endif + +%postun +%if 0%{?is_systemd} +%systemd_postun_with_restart docker +%else +if [ "$1" -ge "1" ] ; then + /sbin/service docker condrestart >/dev/null 2>&1 || : +fi +%endif + +%changelog diff --git a/vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch b/vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch new file mode 100644 index 0000000..7395539 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch @@ -0,0 +1,69 @@ +#!/bin/bash +set -e + +docker-version-osarch() { + local target="$1" # "Client" or "Server" + local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}" + if docker version -f "$fmtStr" 2>/dev/null; then + # if "docker version -f" works, let's just use that! + return + fi + docker version | awk ' + $1 ~ /^(Client|Server):$/ { section = 0 } + $1 == "'"$target"':" { section = 1; next } + section && $1 == "OS/Arch:" { print $2 } + + # old versions of Docker + $1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 } + ' +} + +# Retrieve OS/ARCH of docker daemon, e.g. linux/amd64 +export DOCKER_ENGINE_OSARCH="$(docker-version-osarch 'Server')" +export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" +export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" +DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64} + +# and the client, just in case +export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')" +export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" +export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" +DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} + +# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ +PACKAGE_ARCH='amd64' +case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in + arm) + PACKAGE_ARCH='armhf' + ;; + arm64) + PACKAGE_ARCH='aarch64' + ;; + amd64|ppc64le|s390x) + PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" + ;; + *) + echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" + ;; +esac +export PACKAGE_ARCH + +DOCKERFILE='Dockerfile' +TEST_IMAGE_NAMESPACE= +case "$PACKAGE_ARCH" in + amd64) + case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in + windows) + DOCKERFILE='Dockerfile.windows' + ;; + solaris) + DOCKERFILE='Dockerfile.solaris' + ;; + esac + ;; + *) + DOCKERFILE="Dockerfile.$PACKAGE_ARCH" + TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH" + ;; +esac +export DOCKERFILE TEST_IMAGE_NAMESPACE diff --git a/vendor/github.com/moby/moby/hack/make/.ensure-emptyfs b/vendor/github.com/moby/moby/hack/make/.ensure-emptyfs new file mode 100644 index 0000000..e71a30a --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.ensure-emptyfs @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +if ! docker inspect emptyfs &> /dev/null; then + # let's build a "docker save" tarball for "emptyfs" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + dir="$DEST/emptyfs" + mkdir -p "$dir" + ( + cd "$dir" + echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cC "$dir" . | docker load ) + rm -rf "$dir" +fi diff --git a/vendor/github.com/moby/moby/hack/make/.go-autogen b/vendor/github.com/moby/moby/hack/make/.go-autogen new file mode 100644 index 0000000..4d26052 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.go-autogen @@ -0,0 +1,86 @@ +#!/bin/bash + +rm -rf autogen + +source hack/dockerfile/binaries-commits + +cat > dockerversion/version_autogen.go < dockerversion/version_autogen_unix.go < + +param( + [Parameter(Mandatory=$true)][string]$CommitString, + [Parameter(Mandatory=$true)][string]$DockerVersion +) + +$ErrorActionPreference = "Stop" + +# Utility function to get the build date/time in UTC +Function Get-BuildDateTime() { + return $(Get-Date).ToUniversalTime() +} + +try { + $buildDateTime=Get-BuildDateTime + + if (Test-Path ".\autogen") { + Remove-Item ".\autogen" -Recurse -Force | Out-Null + } + + $fileContents = ' +// +build autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "'+$CommitString+'" + Version string = "'+$DockerVersion+'" + BuildTime string = "'+$buildDateTime+'" +) + +// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1 +' + + # Write the file without BOM + $outputFile="$(pwd)\dockerversion\version_autogen.go" + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $fileContents, (New-Object System.Text.UTF8Encoding($False))) + + New-Item -ItemType Directory -Path "autogen\winresources\tmp" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\docker" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\dockerd" | Out-Null + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\docker" + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\dockerd" + + # Generate a version in the form major,minor,patch,build + $versionQuad=$DockerVersion -replace "[^0-9.]*" -replace "\.", "," + + # Compile the messages + windmc hack\make\.resources-windows\event_messages.mc -h autogen\winresources\tmp -r autogen\winresources\tmp + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile event message resources" } + + # If you really want to understand this madness below, search the Internet for powershell variables after verbatim arguments... Needed to get double-quotes passed through to the compiler options. + # Generate the .syso files containing all the resources and manifest needed to compile the final docker binaries. Both 32 and 64-bit clients. + $env:_ag_dockerVersion=$DockerVersion + $env:_ag_gitCommit=$CommitString + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 64-bit resources" } + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_386.syso -F pe-i386 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 32-bit resources" } + + windres -i hack/make/.resources-windows/dockerd.rc -o autogen/winresources/dockerd/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile daemon resources" } +} +Catch [Exception] { + # Throw the error onto the caller to display errors. We don't expect this script to be called directly + Throw ".go-autogen.ps1 failed with error $_" +} +Finally { + Remove-Item .\autogen\winresources\tmp -Recurse -Force -ErrorAction SilentlyContinue | Out-Null + $env:_ag_dockerVersion="" + $env:_ag_gitCommit="" +} diff --git a/vendor/github.com/moby/moby/hack/make/.integration-daemon-setup b/vendor/github.com/moby/moby/hack/make/.integration-daemon-setup new file mode 100644 index 0000000..0efde71 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-daemon-setup @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +bundle .detect-daemon-osarch +if [ $DOCKER_ENGINE_GOOS != "windows" ]; then + bundle .ensure-emptyfs +fi diff --git a/vendor/github.com/moby/moby/hack/make/.integration-daemon-start b/vendor/github.com/moby/moby/hack/make/.integration-daemon-start new file mode 100644 index 0000000..b96979b --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-daemon-start @@ -0,0 +1,116 @@ +#!/bin/bash + +# see test-integration-cli for example usage of this script + +base="$ABS_DEST/.." +export PATH="$base/binary-client:$base/binary-daemon:$base/dynbinary-client:$base/dynbinary-daemon:$PATH" + +if ! command -v docker &> /dev/null; then + echo >&2 'error: binary-client or dynbinary-client must be run before .integration-daemon-start' + false +fi + +# This is a temporary hack for split-binary mode. It can be removed once +# https://github.com/docker/docker/pull/22134 is merged into docker master +if [ "$(go env GOOS)" = 'windows' ]; then + return +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + if docker version &> /dev/null; then + echo >&2 'skipping daemon start, since daemon appears to be already started' + return + fi +fi + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before .integration-daemon-start' + false +fi + +# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers +exec 41>&1 42>&2 + +export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +if [ "$DOCKER_EXPERIMENTAL" ]; then + echo >&2 '# DOCKER_EXPERIMENTAL is set: starting daemon with experimental features enabled! ' + extra_params="$extra_params --experimental" +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + # Start apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + # reset container variable so apparmor profile is applied to process + # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 + export container="" + ( + set -x + /etc/init.d/apparmor start + ) + fi + + export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one + ( set -x; exec \ + dockerd --debug \ + --host "$DOCKER_HOST" \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --pidfile "$DEST/docker.pid" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params \ + &> "$DEST/docker.log" + ) & + # make sure that if the script exits unexpectedly, we stop this daemon we just started + trap 'bundle .integration-daemon-stop' EXIT +else + export DOCKER_HOST="$DOCKER_TEST_HOST" +fi + +# give it a little time to come up so it's "ready" +tries=60 +echo "INFO: Waiting for daemon to start..." +while ! docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + printf "\n" + if [ -z "$DOCKER_HOST" ]; then + echo >&2 "error: daemon failed to start" + echo >&2 " check $DEST/docker.log for details" + else + echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" + docker version >&2 || true + # Additional Windows CI debugging as this is a common error as of + # January 2016 + if [ "$(go env GOOS)" = 'windows' ]; then + echo >&2 "Container log below:" + echo >&2 "---" + # Important - use the docker on the CI host, not the one built locally + # which is currently in our path. + ! /c/bin/docker -H=$MAIN_DOCKER_HOST logs docker-$COMMITHASH + echo >&2 "---" + fi + fi + false + fi + printf "." + sleep 2 +done +printf "\n" diff --git a/vendor/github.com/moby/moby/hack/make/.integration-daemon-stop b/vendor/github.com/moby/moby/hack/make/.integration-daemon-stop new file mode 100644 index 0000000..03c1b14 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-daemon-stop @@ -0,0 +1,27 @@ +#!/bin/bash + +if [ ! "$(go env GOOS)" = 'windows' ]; then + trap - EXIT # reset EXIT trap applied in .integration-daemon-start + + for pidFile in $(find "$DEST" -name docker.pid); do + pid=$(set -x; cat "$pidFile") + ( set -x; kill "$pid" ) + if ! wait "$pid"; then + echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" + fi + done + + if [ -z "$DOCKER_TEST_HOST" ]; then + # Stop apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + ( + set -x + /etc/init.d/apparmor stop + ) + fi + fi +else + # Note this script is not actionable on Windows to Linux CI. Instead the + # DIND daemon under test is torn down by the Jenkins tear-down script + echo "INFO: Not stopping daemon on Windows CI" +fi diff --git a/vendor/github.com/moby/moby/hack/make/.integration-test-helpers b/vendor/github.com/moby/moby/hack/make/.integration-test-helpers new file mode 100644 index 0000000..7b73b2f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-test-helpers @@ -0,0 +1,79 @@ +#!/bin/bash + +: ${TEST_REPEAT:=0} + +bundle_test_integration_cli() { + TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m" + go_test_dir integration-cli $DOCKER_INTEGRATION_TESTS_VERIFIED +} + +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want +# to run certain tests on your local host, you should run with command: +# +# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli +# +go_test_dir() { + dir=$1 + precompiled=$2 + testbinary="$DEST/test.main" + testcover=() + testcoverprofile=() + ( + mkdir -p "$DEST/coverprofiles" + export DEST="$ABS_DEST" # in a subshell this is safe -- our integration-cli tests need DEST, and "cd" screws it up + if [ -z $precompiled ]; then + ensure_test_dir $1 $testbinary + fi + cd "$dir" + i=0 + while ((++i)); do + test_env "$testbinary" $TESTFLAGS + if [ $i -gt "$TEST_REPEAT" ]; then + break + fi + echo "Repeating test ($i)" + done + ) +} + +ensure_test_dir() { + ( + # make sure a test dir will compile + dir="$1" + out="$2" + echo Building test dir: "$dir" + set -xe + cd "$dir" + go test -c -o "$out" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" + ) +} + +test_env() { + ( + set -xe + # use "env -i" to tightly control the environment variables that bleed into the tests + env -i \ + DEST="$DEST" \ + DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ + DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ + DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \ + DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ + DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ + DOCKER_HOST="$DOCKER_HOST" \ + DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \ + DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \ + DOCKERFILE="$DOCKERFILE" \ + GOPATH="$GOPATH" \ + GOTRACEBACK=all \ + HOME="$ABS_DEST/fake-HOME" \ + PATH="$PATH" \ + TEMP="$TEMP" \ + TEST_IMAGE_NAMESPACE="$TEST_IMAGE_NAMESPACE" \ + "$@" + ) +} diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc b/vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc new file mode 100644 index 0000000..000fb35 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc @@ -0,0 +1,38 @@ +// Application icon +1 ICON "docker.ico" + +// Windows executable manifest +1 24 /* RT_MANIFEST */ "docker.exe.manifest" + +// Version information +1 VERSIONINFO + +#ifdef DOCKER_VERSION_QUAD +FILEVERSION DOCKER_VERSION_QUAD +PRODUCTVERSION DOCKER_VERSION_QUAD +#endif + +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004B0" + BEGIN + VALUE "ProductName", DOCKER_NAME + +#ifdef DOCKER_VERSION + VALUE "FileVersion", DOCKER_VERSION + VALUE "ProductVersion", DOCKER_VERSION +#endif + +#ifdef DOCKER_COMMIT + VALUE "OriginalFileName", DOCKER_COMMIT +#endif + + END + END + + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0000, 0x04B0 + END +END diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest new file mode 100644 index 0000000..674bc94 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest @@ -0,0 +1,18 @@ + + + Docker + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.ico b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.ico new file mode 100644 index 0000000000000000000000000000000000000000..c6506ec8dbd8e295d98a084412e9d2c358a6ba39 GIT binary patch literal 370070 zcmeEP2Y405+TL?c0%F4sD!rrFr6_v!{$4AJ1?*S7-fQo@E1)91i}a!b(rX$m0Ro|S zl8_#Huc9K5%>TY~c9PA>Nlzfm^YzX_0Ys1`^UF*YMa1~pn zQjstq44g3rHUvNJ**NcYPxRuB0h?D14oKMWTR>{++JN8Fl)E5}8uGT~8vB{$p7Fiq z3GHSD%>rR6Jw0Ie?|buOFq?qadi(lEl=jLTyr^ZHo z7akkcdFke*UDm`d?HV1stb2UylFl*Mw>EBh&(-lO`z&8Q{gHVQeLchH_qMvv>*e}n zb`R@6u;-^>-%l(WU_HF#C+qHIL#^iPMp?BEgn-W;06W+I1Ne$J)^pInx>VeC~g3qScfHtL7TiJK;c zro@FsC2sibAoeS4E1a*Ay8wPnjH6}NVbS~}?= zHMiddDy*BO!n(Tvrj~*aP;+{E0bihiKd`-Zc6VzT(lGdjX$$&WgBJa0y}Pi#)e_s) zR*keSKCsZL)V-|m<$Jfyd z-o1A7ioUNV$IkjTe$BAx#ElbAr6kOUZO^qaPo*S8*tWem9d>>)HF4pIl*EV=DGA}g zd>)^cf%v&;h$Yguq%KQO*f=45`IJY~X7>zC3+`;Cg?6>lVDqPJoJ4xU#!1WtoC4B; z4BvpVYxOjiWzj9ySk~ zj(}}PoWwS5JlzJ$;2-ocJkEi*xoS)D61{KjocGk+{+Fnb&USy_!wp-`?DzfIHuk04 zFn)qQ!uShQrsyM#pD^(Q(w*=PD-kP9!Cc~th!YyYCtT(f|D>-i*VK-P5qe^taiEMB zoZLvh{4lV2c^`MurtuklJ?{+T@Ou+CP3WDpX;QL|-4nuL&#+y@>4@QJJ&(`1Pa2$OBeT0r57&rJH2VbBt z&#)aa!U*_-k1#*b+*iUO-7F8EIr#3EZDF7+G7!IZxM%mlR9EbhuDY*}jqF%Ee(i|& zF=mfTiJN^qIev~xO~lw-_wC`Zb@uDFAIQnJ^V*MbLTVD{03y|zu(#CgUgxNgA7Jma z`yP4r_xZK=ypA*ez{Cpl4SFmQYR@qsKKKLUhB(9rLos&v=c?h}@q&?aw-+oIVupaF zc=l-aNh%DK1qL>+=z}@@p`PtqmU-=dafj;@H;x~H`T0Z1@p}B8o{|`W{+?rY_VJFG zy(oFu{+8q@l^8ukEgAa{ZTF$wX!nfui@nb;Or7)t977Q1{B({P;0tyDzu?)yODjiM z=NdV~U%0p)0dtKqe`rXNv{W(>z2IXv*5T;*b?R%Dd-Jj$EfUs^o|YUNl*V`*^K`U% z_VLAM^TuD$_EVRu&8vs0@Ike`Iz?gm|(@QtaFN`QW>OC6^eoQHN)Mh3E?_c-8BELrU~mtO-zmr{uBK? z;`;@(dyMIev9C9F&lE8QZGY3^E^1Ei^HgXT+L7mU+jCK`XJP|0caY9G1NeeC_=3Jz zU({47uiaz3;6kk6VN6h=luT~6)G-jf@MAZg*Xg;v^+D|}k6$yiJK}uUI_y4i0sH!* zkLjJZ?whCW`t*Vqksn<1UWzdAZ-(S?-u6ou0xNe=DEUd*l2PwtLg>rz}_N=6wvi zcYB|~m7cMFPPSdp{bml4_h({)&F}+XE*s{3o;c=RtWCxmBg6?351c0bugI6#IPW#r z#<{O*+h055!J6@_hfT!#J=#6S^&H>x*?ck0>E%=p$NRAT_2FN5pTBp{*Yo^>+Fofp zUdIGx?hs>u!+1u}W9bm95%rPpAn=(1)*p! z>{uVRUqa)1UmsxGKF9lE-79(5@SeVBd!@lA=$L@<0DS@04;{fcU@-hZUA4nIM}T=m zJ}(eIP=dBl(W;JH(M|XHk^QYISku!xDK-et@vuHGF`Vml3cgOKFz0Z*k7s%DjmRM$?IRZa=vLwV-D+MR_y#<8=USW$e_%g@D?`3O`vJ6H`vHsr=HY$A>!=Ci z5DUNuNGwozv#1bf;$3?eY+c(v*73cIHGC>1X%YH+zF%8-+c$lDYGOESev#U>ZJEQ? zH>x8CqJd2sw0kc$==}$FuTU$-HAH`J!QKNiY}nXxp;L#SU>%F$6CwwDblfl>K4ETe zuRoyuXZQr<%YZ+~cy@^O1Ha?>;1BD6vCg6bW1H_BN-R(zthwMfnH)D;_YrZchjiex zJ*?%y8s2%F<43=*L0{ktQ)Z``eu8$-^R^~0LjS%@?ccpd+jgd{Z?bLO*nDQnzW?aq z6t#Kbt7_3t7W(x{#b^7DSRjl4@Z%FWj$n+yu|pY)5BS_rzjJ_RhU@W7!@H;voD;xz zjwBW+xOtT8Gg6b6>V5&=*y)rUhj;Xn7N5dAKfblW@9Ow|TPN#xc37Umclx|*{nKIh z@DUMd7{8~%=1(2kzu^@6_*3ZDZ7{y~#`u~2eWvXn-nU*IJ(Qq!Cr?*VBhFKchq_?< z0VQw$e*A(NPtZ?r-huvra|v9hYhr^^921zi0k$_Cw*N!iB=0&xt|i8^Lx}~-wN>Zu z<`UNas`Vf#W@cx`{P-3}T4MARmALUYm9SyVse}zi_;}2sz?;X?A<`L%~+?;;oV9ZJ6PaNL2@g&OA zv-$zV{)gh##?ZE>ukZI``&nWGlp9~bbjeVR6)?Xzzn|Od3+(3&rPLSjU86L{FPIlt ziM2w_s2Ruy;+tmT1M+7k<@%_^b-%b$;^#C-ik(?!^NN0TVxqpUyK&w-bvDd?vG#`9 z|Es-j)?>BS&TLz2-KfG&${phb&ej2 zYqEbwd@Y`-m(9|&F~A+ z^bPa_CyyRZJh?MryjncWea8BIU)%S$ZT!Jv_yRt22=9lu0N)QRrE!6oALO&b!+1vU zsyc4DKD^KSjG$aAx7=Kg4B+{YCmi3s59@3NOvRe=91kpFK|pVJrU zxBz1W_P1Pf!1=>c6c_0ELADKZgCot{ApC*M2^3Ms%i8If6UuR|s5*R$XHR(bb7Ci) zbLpmWRW9E&?ySq#{ZgsU+Mg@cBQ}hwTyxE+v#wY>x@xnHV=inIJE8jJ2f}X2_C4IZ zI2nAa6%aMN=_T6}C!L=d`9{0#vA;~&kuZMt^3fOkv3LmP{D%dkmo{6^t37&r!1)3^ zQ{ego&K;Jrn1Hc>nHyZOe3*4L<%IEoe)f}>%%r^RGk~=r+V(O2dmhh*r$Y7<5Eo8` zUQ9>dFdOqli-FbH7YiiV*ou_jv)hic6g)pqTsl1P;L_m%yMaVJqhE?~@hrr{(-#j3 zn6_kSz|>{KD*d)%MCHM&Mpk}r#fVDv7Y#YL>Z)IBT$&Qr<~*g+{l*)S&sd)2L#nh~ zHazgzs9^!Iw!QiB1Dq2)GxLIZ`2bUHd;!M?^D#bPzgvo)Cu$!6dG5t?!TYGyd_Ex0 z2+O{G=XM9?8xrXM?}Y62yb$}RoZ!?~lMf%@!n}Ouni`|;xu!!sr*O{SQNsfNTt2)~ z!m{B3-QXu)TsgAJvm1Xo{|dHw{mE$Xdet>g>{QgiH#)7X<20=f-|N4BxocrZh_8Gwc zwbS;s{%7k#JOhfRJ;Uz*2G|!kVb+nDHRj$qWPJO6;UIS!^qJ#wU!20elYYSc{>Sk{ zJ?VOF-!M-y?0n&hk(K)|9bT!?+EM3JC)-g&1Dj{qzis0S2U=<|p2aO0Q3XCI0Q2zJ z9*XVaTpv_|{zAtD@LOD;%ymZeyS5y1^uau)1i`fex*x!tz_;Wb-zj(JHa_Gr@+*4` z1b6k$^FYqMZGF-)U|zaK{hZU&rPs*$uBU}<7qstXUrx|hoB-$_OdaX8cf|gwONV

-m7;XQ|EM&#Ha#!_>j8Q`Nqt(JCSGRkd`~Meqs5yhbpyO{Q(dnwqfz z=L_fdIpcmn&k1mRg7M(6#7W*YgYsR0vS;HtwHV)-_r}yeS*jGCU4^y+I4OdZ-Gu6QZN$Sv! zFtuuY<1*(1cz=e|g+F=py9kE!{K%%5kI7gs1RpRhak6zDb&3BAIVYWx{bhpz^S`Xs zZp(wXf01qbjPY~#0lMEWaohLjL+B3>8+hmcBQe(B5i?lr*%PNaeeqxQ;J>a_58i!) zdf={W)rW6Cu96d1sr_4KV7=fu-g$viJ};>KnooRT`~c?)`A*Uqe0Hc~Ld*ruTRYn8 z15DphHnd$@>B|7(I{kbX&v~C_j0d~VSKQBIIcHT2F~2_>&wC%n^jHU=)=X`o{`wWN2gQ;$E~TBrB@-?i$?kDpgZ@V-FI-1ce_<^xX4`vN}p>(8bOvoFIB zV6CzC1N70Se+_}|8{h*LZW?13eqHpJ&`pJ&VBm@n_WbX$P?XMd<&xto&TX$1YaSwEu^qz0>|5d$1MqE!3+o zKBOLbpt)MUG+6D99e{ZX+JDA;Vwuky8Sr@l=U`8>574>-9}tpYzdM9ygEAjbM%;i| z%dzoS{k(6Ztvk8){XAwq#-F9ahV1@7cHWcf@WDj&+J7HVZSQNYo_O>&_3-`8)!+eN zt35kbsrg|e)%I;0Rm$>@^*cjlrt+^!<#j#D4M`;>bA zsk_wA!@pO@4#(l0p%j%E^`Up{kK<2EyGqLE<+on=frW!S`klkm6bqbUAA+?bUsCr& z?6o9i!44{78xi~KIsd4itb6&q-?o1~=OMbE&d@hb?e@PxPF2>gc{D>d#}T zYR@iwS785Ub#V7G6}RwZy%u;0*8w`tDerkBs|c*eK22kSQ_v-aHRR9GrjW`zL#5QY zOz-HmfAsx*ZJW>anIH9s-?%wr&-(JT+W;NgFCBG(T0ip+tOI;nZ3?+pE&sKecioSE zPoQ+a2T)S}$czc*nK3~QK0YVkUx59(AHn+qe_&nEom743m%KY%%6tIE`+9x`?{-$l zdY}Dve3)zB&+k{5{-mVzw5at)aXtWRf%uG%&jq+Pfb#>~U)tXVEb6`I=X}bK&kNZ{ zm*AK{`vB-wD*i8d9jd;0ucVZjL4|D&p7ZN@Kg`oUWY7EMX#4d2y6?xFQhs#o%pZe3 zp7w4~2G{zPB!~0?oWJF_$@DtUl9?aWu_XHbxjSZBfiyJptf|5pT`9V8_+}lGVXkJH z9oyyj%%A;4S?K#q(q_tI-6j^8U2?I&N%kAiv0hYtj0t2MPzrp&GW)x(DO0U;Ap02G z{>}Vfwz)sba8aoq_m`Epr1Z5jGZyf^Pf&nyel|Vvb8b5Njx>%nA_rRSsQOqZB=do1 z*azU7K3WENf4ePwGRJ|8{r$%O?EB}I;yM5F*e{=ndor=W9QgWTewTngfZr6|xpKI7 zE&y{+vOe$(`T)p7+dswt?}Sm_-K`UTe7E-RlwGNN-k;}lfYR5`o(Y>S=rwgLfH@vM ztxmJyk^?_%?1(I-fP5zT;7YTsP7O4Ef?x&%j}i zzx~{QkX0M=GTJ9|e$Ou;uZ06$YUv2B_Z3t$4k-5}_#Z3y{|{=m{S4Tzp5&xI+4pQ+Hr z-01+*YzRhq_`pE5U~tJ|A?nWyCEbRi)Spw^i&AGvpPN&go%`7a?{+0Mr*B|3{qUzV z`Pg?dxSLyr^sJ=E0k1rD?fLE7mYlv9(2i((v_aY;^OpcMfJ=cY#T5gfu6n==z#t$J zI0*2273sj=!0*6e;8-z%^&Pcw?g&bYpVE6u5&b{jc3+fR<4ZHKy$YQ7!%VPsR_^x>A&bzl89dU^z9 z1Z}_x?lXGKV|~vr?6#@*IOq$D4ZVEliuUoTJ;HSvT9ucHe_*-;Xx1J-o*!(0;b_X-B)cuRY4LoP+kZ zz2D2DX)o_T%QXwZGtYN&&AQ1qxnX0R$1zD)_Vr>f8d(ozz0Ap;L_si^PTmeUe0-V z2(zF2m`8dc@H`L#Fcu&`rvUmT`pn#L(mu|Y)4#Ak1z6SopRRPf2V=! z@Mfd*FJEh-K7RRX^~Q78sTUq^t{#2R>mR5;1a$!CLSI^GnK7RmcpS^A>pY)!|Ma7+ z)cgN!t~$SWqZ;_tb!u#vW-6%P)hc30Q?-0lW3_Iq7n{a6QZW;}SU;|@TK#Kde7n1e zS}?S!3hIBgn$Z0kHKNnCs?Vp_s}677q~3k8xqANbmik(}4)GAK`$!wF-^g#9o%ch% zr2dl!-UI8T-MsX4OZD{|H>rM~U$1`Ye65<%=W4ZZSW~s~m&R)Cn8qp^ZI4FVc`w`- zIUH@qJ&x^ijT-pn^{Ug`H>%hF-9pz(KgM=G%68&f+Q;R=tG_n#Jl0{_X*hWHu~zD> z7n-ZD(B7UO-=KzexK{nv^%^y^@6{@Na8nfpPFJCgY%AML&e?t=hZUn6>w60saFrV0 zt(hA9&2_5l2REpXU%FX6*S=MTT-)A{{(}C*mbtTyzIiXN&<(WtVe~Z*-G5s;V04Lk zeE^6776PHbRA3x1(awh|Usu3_(MjdU(x+R3(>do?Njg1Hy&V}1t^1bhPA1vCPxBMvMxv68SDsZpyI&y z68N@Z9j{jJ{^wfw%9e;N@ZB}|0e}5D_wn8sliYvL?OI1(c>Lyc^g*io2lckYcRY>z zydJ0nTmn?~XJ6P)TjX3>HK0Cl8$i3A03-n?0Hcqz3&LzWQ?q|9${zwUu7`XUd*rsi zwv^O9$MyAC!`}W7DeukrG;=Ssb>kB_ZWsYP3S7wcA87T8q}}lYEt$+>^FOTa)Vlg z<7q3@A!FkQ=G52wc8Ao5UfRGhJT5Qn!7&;*PoIx%+CS^0k8ob+5cbjEu~6cg2T>pG z$Z*1Tjzhov9^x1iXZUL$?T0>7l^f{)z zhBt2K#W%}M`zb4)%YCfR_y%5QPsrseVsKT}=aX9M-|buIab&xPyzw8~OSJX&_3EXk zZd5%#s;j2=x=5}0B`_W9&s8e0eTMZI^%ZGGJZ$tA@}kYtCbQ@=%bh=zbH46wpbxfU$etgSn4g$bSdu*u}I9 z+q|bN7~l0$U03vYS55T!sqFWGG^X$aO8W!&gw5kEl{mQyNM*=G`GI)x7e1{=?zubk5{WNUuT=W zHe_tf=xCdJZ&fco*;4nPTM<*R-=?0M`b}EYwwroPN}s}5W@m5%6^!xCo6o_9&=$xp z!%rFCM8EsN|C*~Qy_zY;BD8Pz&1@&zW$JWtvA2ZyHrLu0#&*vZTB`Wxt5THFK|QrD^tzk2ll68Zk!1|G~cYkLO= z+8&wu!00vYL+6d&GsV7(0LBIJfbj!Po9SjB$8%Xnsj++Keb{Z}z3Z4ADdz`yKlEu# zhXb`7$MJr0^kcAd9aA7}2<)NV1KR%WJhmwh!~oOIfv<1q)hp8}sK|FfSt4EtIz z^eXkvf12w#y}@5!r~6{so9;KzUli3=igG^nlWk^7J7Hf%pTM}}qnDcNc|?x8$s_yJ z9vCAh!KP^Uj48~yMQKBR&Nc01zpDM~tOkgyuho8v;}*tKoGWl9>;s4 zZELyU9`kdpk~(H@XhXyY_0+Z*Ra7cs$6vm$uI>ImPuvWIko`Ze#1DycQ>uXsf-2`Ox`}wK+Odo^xniznw zJ?~>LpkpSa#)swkY{GDh^jhEuK>pM1l=%$%-!lDt*ncDFmA7xn&>h;94fb0c&ra*z zOzYnP#I`KUIJTJkp2F6vZOm>nZD&tt12y1_>-Bs%eE{bTj?9CffPc_&?ipyev1R%J z`XJ7WzVJj#{j7uYhN&|eX3Tk*{=Kk#l+1Yz;AJ7(`6T4liB8Q&C*dDg7`*^3# z+q#ye|4D}aT})d;TBQEi*Cw#t65zZdZAX8b5NVLKF+XRKc`x>bBWh5aSl%(%Pjhc{pjznPwo zEG;%gKft*H&ebg)*;IY~=1poFVgknayMv35hmw&~1Ni=in3KIo$N8L3Vt;SzS#GI` z0Y0TJ7L>pxa!^+L#kh z95-&AacO>S->HL)0rcE8(jvtGPMd(8Xn%sVF7Pkgeu}Mr=e0^QkG7k6fIMS;f9(LL zrvJ~?{^=KX%&L}e`_D-a%EJEQo*ygDdoImw%j-Vr1E@osPoh5M17;rTW9qKaReyS1 zn0>Gv^E|Co&-dkaZ@34}aTMBb8BUSr8vj#PJ7!g{5cZ#!9!q)QVqH8RlYF0p^{eZ}Pa_hU|Q zcW@0n^Y`Qx2N+#k2i?@)3_hLqQQoiRu>V@fZ^?)Kr=t%S2nW=GQ#MW_CAggi4m^XfV?y2qTkJeT^P4< z-r4*7pU(gV6`6y*S>$hQp5wy?^a1n-Oif?m*C#Lz*b_n@5SUvWV4ejA+xG8*ZWil2 zKz?kcJRbu--N5z0KY&|-*1p&_JI5p>y$JARF@Z*JfxnS8Q3t{J0?R~7w8Uj27V|czQ=jJ0R3IqVp7Vt9LNBb^FJ8?Yc4d^GtClrLZ zd>#v3(lHs*e*y|fw^QZ|ij(#^TGpwdRja@jHUbJrvs2~^f>ZYKVF|O&(Yk|cyl-FU zR3X~vyvKCd|81lS_Fo6r`MZE^^SzIg_W3;x{b=9MMQm%Def}1tm$P1^B_2m_n|X-~ z&yEB4hc(C&!|a%K>Ft<17_ld`?s3}>&G5>mvx(Iup}1GJ4NfyaR(z)ipx0PU;_Fdc{hE(g{D z!+}cyp7%8Hke&Yzn-xE^N+sAsG_VRd57-Cv0BQq=fR})~0NT;@z^A|gpgJ%f;Ca=6 zL|_5yFP0nrp#jDaBQbx7v4I{7 z&(IuqD z{uu|{0UQEo=dS>YydWJ9gxL8N$oI4J)Ki|<0$~0{?vI;!7VUpCu$0?KcLN;&4{#88 z6le<2)@bLi0sHJ@M<5?%=VOpx%shXn|2_LU)scP#q}uz3AivtqCn4VvI1ixh(5LZu zw(nx#ZRUOKpY729;}$9NZGh_l+J8;pzrbGBk8~Wc#?CKAzCXb64v+26{ruq?V}Rp! z{x{@9?Yu94|3cZ-0R1l0wt(S%6!QCkOYCz`tNrtu=C=cL8nAQBBUICNw`102H(~rg z+nyi59AU$>9YK6wz_0!D88zQmBbax>>^F0U&N__mh{rjDfLl#_kk@kvW?RyNF#w(N z`t(fuzZ?7h51<~_1l|PFSu0ZZL$mGtD&)1EBc)B<1Keuowf)CuxBtDc4cb5b!eO92 za4kSvyBv5OIKcXmjs}+5dHRq@=J~@ljQz8Hp90%}K!EXxvHxV`85>*(u>XGwxB_5X z>DS(2-jDrr{7(PGHa!U30I*N52fP9p|3v#=&-Nf)4h#f1-r=#@FWPOT{r_#}=|6Mg z?|zhB2eALY7T~;rvH#J?mkRsmeekDaf;mk*C)PZ!{Q%w%cpc9W_tOu+KcsJ;$@d1c z`2eR)=a#3v6J{+7eS#TB@I2Yq`*8yGz{Mr99?Eev{4CH42?EiDJ|NBsO4M6*6`WRsB zztr^qzRI;L_zKVO;Wug>+BWmj`j{h(;dp>!0jxdA&BhB;M!RRsVB!JhX8}#wE~JbF zv_JA~r)>LY06Hf24{Gf3KW$#@-@mHlxEFJRdX6x4+PRlvZg81BAAos5d_x4^5%Tj) zpfI-YJjcWaW~}f#?EY;wkoHeMkYi(|APWOx|5@h$#Qw8rN8TG)TdMopq$y`#0^47o zY5SS$1yiP;Qz^5Z4LX>jpUv;P(XU;+yo9OUeh(C(!;m7GOREFy9`MI3T;&vg{H27yHkm z5Cz!4|4`6zKh_R?Y|sCnaN2%mz5%|+T3f$2Y}=@BKVDqhP9IQ-w}R`*d_XQT75f+a z&qY0QEW&)hwySM3E^f3pw2oN=knwInrtN306NaCFo#*>IVukey#s>^x<6-|21LP=0 ziU0jP|C9N@T=c~h!M^ocOneVylx^!-;(lNI=Qf@R;Cr-1dM=nBpJ3($c?!1n^&z|R2J z)p9Lf9N=&LZyj9kGl1)V&jLmOdQCo3uK(=}Tmo?I>@xt@3$v|UH>lST`-%Vi;aXgu z%r>wU`fJOO;iuhHYID=;7UIqWhDF!OfKzMr##51UExV4V zkVXQ1?EU6EzAwnUwtxHmpNw~RU>nH^BAv^#Q&&dl^9c+QB-J4hFQ%BV7mh zd;f>;#=Q?D*zM?t{3<&ikNoEV{Q~_1*VA&{KihX6@CNff_MeXHbhoe1Hu3!*u223Q z;QLAc22A_7{@=XU6p4H<`@CbwJJs=YcgMg8O;rqB@Oah(00kawnhSJ-*pPHb1R>(nx1{&fHNjSyZF^L`_3n=a_s-?smn^FsSuo^8{s zdbZBIC``2M|F(GycnjzN^ag&k!EXh82k?6Wj{`jREGHM(=KDZ0@9!kVPaZmD z?4SJ)ZHD@H5;zE?1IK~=0Ckae#$%X20{mg;SP%(HE2 z0R00~-q$YJ|IKWxz5ag-uERF)`Ue2EkKY7fo<4y0VcK^Td5&RtEZfQZ;<4QBz&iQE zHTnbOl-JCQzf(Adyz^L|LtZHZ=2roi*>#=~`{%cUc>OiMSZD1CtsUiWy`Q6>_t*Dx zAL4)X|G_nN9Lf5UCI{+wcww(adY9LnZ>L#&3a=x-oEd;_oef+KF!m&_0vb8;4UunZ z!{nP_n|X6AkLSB<7w}5S!RNUSEUeW4Xa+R0)2om+w)0JqHnH>E$L*^D9?RoQ{u<=# znAXE)F9xm!8Usy%tL^+%jy(6VjK?(d$!DCmX{uYV7vCFkWuSdcXZzSTXZx6Eo7h%o z`?!zWZ1;6Qb<1lxR`vKej z3o`dd``7UTzA4z1?f*Uerfj|p_N(9FLF)TWK;5TfKU01ykdS`=(AIzXe)Q`^OsLdVD89j}ejT_xCF#p{4Am(Z6tBzc`Q)d!c{59H_Nvs^>~? z>aLNQ_b?Bb@+|q9C?j}YZ9sqfij?hNQ1d*X&VGEe3#fs7EpNVN&8+zv*yb_19LHtk zxlf;mxW6*%jGZ3n!8M%MWSgCBMLV4B(KSrDB0?YvZ1o%v! z&la`<{Jsz88t1E++jLw3`+d*0Y++v%fRl9_T;E5`5;RzvGFN_R;~M zUpN3f0&otDHd76F4bb1$L^>QW=S3sG*xV27*4RJS415HnvJ5HL;OOW7NRxmL0N3R3 z`P}0`b%6GN0q_>{uz&siU)cYZxCYyypX1y4Hb}1p=&R@-{tN8kwUGV>thV#?8T|m> z7oRKW=l|Shd3pI^&cCSkkMTl1g_!>Za)Nk5`u_5w9DeQt`fu!Cum8jLUV!#r31F<< z9=Hym-EnQs>%ak4g>)2P&f~g+NOK9;t+D@F$bSlK0|J5JfVN4b$pD|tUkLCy{Zqgd z0PX*B;9chZ*neMKi+0R5JqX+YFfOPEyaMR&{~;X*thd{`9QlDj6@bTFIv!xwP;k9ood_+LS_cW;?~fqssOwL}^7L`JrfmLCRS2gd&OIyr39{=We@FF^Zm z2k?0zZLK=M{@fcF0cVEX@R$a9R~&;I-1TAZ_E zn;rn@pJ>x{fR_P(_RrY=N1!rLZtWlTs_lRK%*zTh|F56>Vea4uYWRWKjdWe*N89C2 zd>Mc(i2eJLQiiuNXRqV+t9^_P@rq|2)6@;hiU}r|>@mKIQrNKLOJBlZbNu6Zp9 zA+Q3ZV*lmS{{LQlhqhxp%Nt_bzwaC%w)M0AWyzDz((i)enW5Nz1+aPNEsFh@Py5IE zA05ZzJHY?6-veU*Z(;!S`>_4c+KF3dUR;OzVBQ;a>W1XYKLcX_<x*f6V&Y z5!h$Wi$UJs`#)UI`!TSE4MsW`Fzf&M{!d5XJb>RCb-w>|5w>$$|J#do*=^$cKV1LI zcY109{{zhXKV1L6#y)l_@_dhx>zd2$`#-!_=kMtJ8?+C=cL7H5y&k;Jvp99yd3AUU zo)gGizmu#An1?NJ{qF`~H9-5`1M~)(0DS-N4S+Uw3b+N}`?LE1zH2iP;5YB;0tvu; zL!q%j1v$BJNcfE|yd$V%0#hbw#bp5Y#Qy(HpgYhN z=nQlLy4iW|GyA!ZWju!YA8fF^7x2RFaPQhN*zK(Vzah@PpXdy9^U3oYA1vd3mh+q9 z&OEojh5gs0gwX$2#Wgwryq>dtCePHgmG{H8vW?6$jygciw7mQ@CfjUwf@gv|f@ z*^fNu6Zt=W_-#?<4+H%FWz4huIKcVG0|4g|dG0=d)m0x(pZyoD1;SzV?2xe?zpS!aylx z0JiG1f7-nMb~jSy+W^-A{J(#-ftLaOePX1(_WvXH(Qoir{ryj*V*jO3=F%i#z%T$C z^|gQY|D0bm_J2L{jP2_Hw12%m5h?9ofBzRL`+wU1Spbg}`!@tjDh!lj24JiH?B9j` zwEqVH_W${{|H}5UV*jOB=F&7_z%T%N75g`2NGc3eFb2f_E11CLc7*{417iOUG9)Vu zR4@j_{wtWk<#vSu2LodN4l*Pw3{)@%#QrOoz~y#@0S5zO{|+)FD-2XH2E_g=n84+B zg#iZxV*d^@Br6P5Fb4eCe?RQwI)1MIYYXuGAFlu7`#-M%`$-Yf-+&EvemU|#0et^Q z*8f&89g^EEy9~fyo$LSj{$F?7rcWZz^}p8uzXR2P=YZ{YIoJQ|?-L_k1eo`Rjv+7W zf6J~)NXsh_1F%*7jSi$&0(_r81>iS8*#9pDBJBKZr2MupkC_Z?1^7(>e*1&p2;h0s z0G_A6|A|!I|EoZ{B)3}j7=VqM{{J%MR{*r*T|hKI8{cl{cOu;a!~%POL}0U>k3-7; zH?R%haUYTir1JfrvZoT#;tI$BZ581E1>twV&Ic|As@gF53$V>&SkB{2-g(|7*nf5f zq)={I7!U@80bxKGD3c7J-{yP&j01`aJ*Fs=BuJx$0bxKG5C((+VL%uV2801&Ko}4P zgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801& zKo}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV z2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+ zVL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG z5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@8 z0bxKG5C((+VL%uV2801&Ko}4PgaKio%rT(kCkzM!!hkR!3*9?;BCD=sDr-) zQZvr?e}M&=<$gKJEVlgJL1y!C;2qf;;Ao!ZkD?5AcpX0tNacR>b7il8fhp5zhinIQ z%u-%ETX~k^>*gW1co-k^_o zP(@Y=W-rew!7SRT8<1CdRvSt6Hrn~OgYyRcl;4p@d2MI;0%y(Md=`QVQJ#fBKj&v5 zu%q{y&cAt;XCctf`B@0et=wN(Mnk|-cG+f>dhIlGGh0rRxWMX=tpGb}t5mO}JM2;Z z%TIZ`qvKWmmZR>DT878($f+-Z5%TMZZ{PEX5r+^Am?i8TE^Ybcq z3fSNId6suDcjqUi&~qzSu;@GdoR_)Fr##iF+|jztvUJKbZQ+{}u-~$3u-xPA!H-)P zI8E@tn9znnBN~M42pZ5JWE9S#OT5FJtrW_y2^ujbvz+~f!v?dLr)Kfe*~{s(I%HhH ze|csDvX`TNM?aXod;!Wc8|c40Q^&HGcf|RbI+DE{#d(%z>WKgIGYQC6?(LE?2tqmJ z|%Ltzr4#c$@N#yDW5Km zPQa_5-lMaZHwYNsz}g-#Ag6NZr@ntvo+*6q9;3g-Rqiw&*6CY+ZSRFL{uZ?Sj*K(C znXKohJFQb6;9K4ykMn(secG?Gi9c)oDw}wH%Dd!Ie;?m+`kx^9AA5g_Z+TYz@GV7e zMgMo$Ve{D!Df%ARNv@T^zvfz=d4K*7lX4^fvdK?g^3O|u{NJ_g6$XR>VL%uta||@W zFrlL!D`2=_)nN`lmp75W!hkR!3AWd(%ywLJ5BTdv3-<)dghad)QoC#)UrN?8A^Cpm6Tz?Lm*a+ERK zIk>jvne~!&R>&vrv#0*os|V_hy2wc`wGTD;>HP{EvcDyz9ID0!1#5eS|n|n{8e)7%sI)MgZC!K&q+&;pRH07 z!hH~-wr*Xc5;sj%ODDEbvwK!j@cAmNdv?t3VJQM-$LI94wgK~D`~Bzlww_zi&$@d3 z&-||inZIe@TJAYLtN@gWE@aYDY&rON_qNpK8QjLN8QM50dRk}L_2!iLxsdaGAVR?> zFii(eLjGRF&pV0zr?#XlO~Qb;U-%zwENl9= zmJ9m>7Rfjw-{ObA58%OzhFukyuwiVc^(Jku}X@Wp~45% zRUxqZ**yb_L+<|SpzNs!S#$xmzXAQj%gY8^XY*cW{eT#ur&lMqEkFMZB*jkCvX5Ev zLzATFsjE`r(YGf?(6@fuy)ODq`qW+U~>P?-}bk5tc*jM>{S8s0;K9 z^bgu6K;|jXgV&Z1v(DljQwI=BX2df7S*VzMVdq-*acf3AoD>^$%p1>Xd(Yah+j@`; z-7uhTq$2*?k`$qKZeOcbO?yzyMGPM?AW(((cWKP;=Th_fy42j>E;XlDMvR|TFN!9A zr|y{8(&)iv=)t3u?4+-BY#=eBL(RACjMp{+-~TqoaIpD>wD;pcW-O09om->S-kmGefjw*0!M$tL(ZeZf+oqq?;=z`R8WE_XhI_JM$uN&177z8P$iW`9 z@FzF)1iH}&v0+cI{Tp9V1Yclu!Ndw$4-g-OVw~8Jx6Zf#I#J<$!IoXII=;u4ZUE-; z=;PD$xDLLYv3mw!JkpE|OC<4L4xhxV>XUoo~G)4CzcC3QDJ?-`nhlE zdryp>-Xtk@*6_qlQ@SN?7}ptdb>AcQ`!-?2nC}xePV9_%n;u(}7xdk~Giv7k9SdV2 z=fs1%m;Zin_X>4r-&%EO?^^Zy(XA?B$%pBS23l!R!z<-2d)p8A>V@$cOQ0Kzhj`S2 zfo|;^=nJR|MT{LX^Z;_7jd8&h)C0r?y6hIqX`6P5=(B7?cV^@r+d1~v2) z1;h{30j&##ix*6ss37-U7%%=G?+aW6U=G1AAI|$O5BY`meMSznE`c3{&F<-n#uzPj zPEXIaIlVmlfP=^%UN|t|VAP1p$ADc+hF6YRGOW_ZrNb+&iW(j;3Vr<7kprxDv);FA zO?l9*?Puk%i!Xo=4@J3(9OOE+V1T6-{Aj7g+SZ{HIoN!D_fr>mEc=N0h$T(`P{=re zz91d_K^o$KHz?Pc-&+BT2bO;w2=3zT^C0`7Hg{(HY~m*Ecj<$3mW|WKEANJ#Pg^#k z(gRV$1H&Q*S!&&+`lnK({-+YdA4V*A7JRwO>lccs18k4+36by#9A7|&h4lw|oWS!j zhuD=m5Cm@U{Hfgf0oJkz5dFx>YjfzR;{kDp?)6O zMj;#d8so_+E@_2AvtsxF;gRbe6H)aJPlsf7bEFHnTBL0&q*a-#>FZ=yU4 z=nuR)(90S`9hmW5`5GGzdBfYUK@S!=ZJ!)5uFem?xgA5ZT#;E^1*H$GZ zty6y;T%wkYI9DwhYL5*HVc)r3*Iya+V9{Xjc%gtgz&T|4f$ymUIJaEP4`9wi-v{Qu zZnbS+&xzT3pkwMn_`in0yq{}Jj9Lp zfYU2`r%v(S7$b7bm`@#`ZE$RO0&()~nm5~im@jKT6US_sX$2zgi?`*^cwhICk?@nn zD}VOGoCD-D0HkM7_P%<+=MDMv1)3l5l@c|`x{NYE)8oQ+58dJ_3`Tx+f%g6L`?zl6 zy;HX-d)h*A^!rYZOKYBUQ1k(u)8M=TpEvRTLv0%{w&0VVWI3K!PNw|3zImzV?dO}h z9&g*KP;$sgcC5Q3f9U(Q@5g%Ccd=Gd0VkPa{Vn{oOD!AWR!fKH6Fe5s=h%5}=llG; zuH&q;gz9D6myL8|t^&`ZF=s%VuzB&9YOWUcwO*r)aO@e02cbJ2;BufIPy@IKI2Wi2 zT;K=Xe_s2xx19Ux)7MmG+h=wS96hUxM+J59oDAxU6qrA-64vPeKV?S*_Aej?dsA-D zA6Qw<^TB+atB>VzHn{DtPTzgb^CD1>vwr4zyt56y=lk1s*2(smw)nO?d=TzsKqVF2 z-OB~}mx-M!X!gWe&$YYhJa9|ynHq50?8{9iJmx}xGNdf8 z1Oh3a+sz?V9WK8RX6OQ;)Pzk3QH^J<_J7dJHMIpKsq>z4%0P_1`DGcmermaqi=7 zTV@=~$=XIWB{g0!5)J?pAJg>=XvrZoS4C>^$ zti#mDlx@)0LHXmj_M@B=s=c=BQIvCllpGPDuE zx#>K=-GjFrd$>*OzhF}toRU`?X5T4nlY7d9vT(}f2)20~%R+#Ufm?toxyjzQ49Bzo zsSk7lSU>AzJ4_px{spW6hC%M1KmKs5SH^U%wrhOXOV!9uSETp(q_+C%jb`fQXIiPY z58SHm`B!Vz_WoPJ)vbE_-n(1lzFVnp-n>bT`t~|Cd*Ib-+32Qf!*7jM?8HVYesUuf zH>r_|p3qpW`L&5!JOb-H`!`dgzq?NT@cxbJt>-C+TNLYg;GWjt_*RW}58Vpc+^Y9K zg7m%rwopHPeLc=a{iB+yji`S!>Sf&<$2V5Xam~;luU11kT&F&IskyG>UbL0xvMyaO z(l+h$)F(4m{vefQiB4qAhYTNLDt?SH5T@4xNTOHbdZ{`b_4>)YPnx@Y^gtth+y0?z?2 z0iObcfF-~mfKwJmHpj5P2hbEK@Y)le1F#>^@dxz+`C7mRz+5i|`8NQ)9ykm3j#J3f)!@ z-ghf><7PFfM>Dl)d?U3tw7xoqHn1%`mu)-3x*hFiU8a8Cn`w8_)GO84&eh<1Zq#z8 zU+ML69X0pI^G?P5=2ly$TXV?!!*^$YMuhv)TqC^;XI57S<}}puj zZ>R=*aXsw0vHE*aU6j?=Ta#7> z+A)pQpZ5N}IL@5ouYPB}ye?(-=Yo1FWuGzBpWIOE#BRu@F!l2sp38QoKwnq>(pde4y3Mr; zQ>XL(c>dndM(|%X)sC3~3jEPtG{9M!&EHz2dK`c>L*7OVeQDk>$LBXaQ+4ij+?&6)vOM#cEV@ab0)1#ey-+Dr&-SMS_fl5ivqWvX zIP?K+bOG!G8UYQ7?XxbsVQ0`4H}4F({3aVW?+vMa)#0#4^$vwLtbaHRuv2amM^Lse zq;^vrfAh9!)f#P`a#_7?Q>)eAHlOUCT zpuw)-D{k5`vwAb0XX<2qzIAS&UcCvgzdxjIW8GFq-K@`Chh=+$Yc}Jt_WharbfzuQ zzJKivt@H5iklLRj9kVyIZVYsVzQFcn4d@?ELI={leSypL0eW5-Tm}H-mv93ffcuDI z@9&F4AAIZBIK}HHR!y|(ubpJI0q(YOk0bvt|J&#(AvASr_X!`ct=G*_qjrVwT=#mEiUNP2czG1Q}3Fj@h!Fo-dyw0+XlU<4PhgsDR zN7nro=fwj{f#m?}WnE0UZygZaxxMAWdGmqQJRe})tkdL|A|DHMqI}|}RjC9!$Ngpl zWL?7pyAObz1JLJZlM~8Y?g_0k1wH_M0%F1`IwsU(C~&z3hyYdrD}hL0GSCHR59oIc zjE-QtRQdz(r0*MPU*Kcl9pEeAL*P9-{S4^`>nB+QRs5Z2Lk?qr4}cGW_w4jjd*25K z7S-zZdsKtxmyfk>+c?Gb6V83##-~U>a%_Ky^3e-NTD1=^uJ;wrCq4l_0^YarHPUx& z42_*0coxq81$fJ@hwc0vcpG4QS$_wD+#@Z}8V-z&+=y8M4$ZsHjWM$J9lL_AxE-;= zCin!EIJpx00_uR#1IC0-nEk(F|4iU6TL*L>!F*}@fgdCP{>Xm=P)6U`>6b{q0H_aN zLH>hO;=igu7UO|00LuJxfO6yZmrNo5{*eC*kbi5)cO=fGd`w*?rLKL6^05m>TD2g5 z%DJ;$2kT^A)P?VW&j9Wl89mig8Rv}yC}&gG*T}Qoybjy?EzprXBQ22powC^-Qp*F| z*XO59t6CjCVE3NT+L#aWPzN}cW<21FllDI9gwX}+f&OL*(rkXldCk+E2R}~vlRL_# zlbw=J+CFvQ3oZYI|D*l?X3OMbq(%pfj6Q?>``YrS{SU{vMmDU=)JePj4CP}Wf7(Cg zPgy(lihiIIKs_Lg{Kw+l5A3>FH}!z+W}a>BNRUgUg_b|hKN8Vg`vCL_%$ajEjs8urHvD z_53PQr|eDsZ+k!IkoEWNkm_srZ^7x=>yUpxmzJ|HxnMfV-XZ**$)5OoFKQ(>oXXyV2tN4Gi|7ZNi z{+RM-n$hd;WX#tG? z#{i7=DSOsSz18(0WqiT-r4#u?TB!d2;M^u&-yd3MTn_Tr`hdCAH)4LX{)Kp8BIKD2 z7&|vIXLYTTG59CzqQjAO=r?wt4&ORsu%G)9 zQ8v?Vb12#q1F&7rb~4X>oOAftKDNN}-w{+z`{jw9Ef;+MrmVi-%nj=21`4s@e|epQ zbFbpuUnPKZtp5NW1RetJ1UUDa@qC`gm$o1Hx)5jx)boS7*w+BKnC(v6{>a%X<-V%G z`Hqx%jr7#}t18ve-Jpu)n%K*A{**qJFREgn#`vDDb6KZ4@p=Hxzd)tlZ|Y(FRqgGn zD*4_E)Sf5w_ngp zUSsR@3;cdB!Fhe?0CnVy`+R4c3YySKriH9W_yt@ z(YOhHzLx*4keUrJ1~76rDRn^SF;8@C=d9{=*rr`US8{&U`5ZvY+LpO4<8h_+2Rm{B z?p$E{3HpfzzyyH(@I+t|Fxk#egr5&zGr@Z9ujNf!!EZ;R-<}9e044*I?Cr5g$Dw?M z+R)<4brbcR(`?i+-qd5~SvU8we*N49X>RQ_Wrfxe%<*XX?+LBd0I|N2yGbd3-S@*c zL{Of`>^XwcD%;{{7dY2^BE1wC4RB8P1HjA?GhXEO2ata^*#Gm8f9nGaYYl{)&0H>H zMa~(U?GI2sO0B!GHso*S8BJ`+`U%E%?*g2cW-M7~x#pJ#J{#e&=m+XS#wPC1CV%+; z|M57C6$%_<@Vv70GrS|c1mO6K@}jI5Z*%-*(k~$Y{*eC*w){sx-W`t)%7*D%((C(`1n0O{lA_Af?jM$o>E!I zBRlQm%aS|`+E&9mQrbWLKF3~Wj7ZsM%jX&r!^m)i0-egJ7buA#!GvS9lmwaauo$9u@n58P`$ zkX8u!@0;CF`*isBUbg(XF60FBka-VshGzj4LiW5@ANkY&(>`gFw0qjDNk51E_w(`p zoFAg^H~oN_+xy(d{=fIBi~i-b`+wdq>|4tg^T4;$-XZtDwd_OdO#Sss%k|fy+e*A2 zVmnI94{~tY|IgT_Z|0mEK{-(un0^fT_jJU6gCTd$;}Pr!7y~lT^#dP4{-f>qk1+(F z+tc^6Zi4Y7pGh-iEXcX!)5iOW$bafo4g4!+}s>whuzEy%gjB ze;!|PRXxaf5fBE<_5t_J!JO`FtSLDcb2=~JJgx}~23T*HBjrBMIrJqDNKdQn;aQ)) z9{PJX`i0Ya?K3TRxxQs#4CvzeK+Fe-?7jBKIX2E05*hPt&TZJg>(6CPT=4I1*tvnr zk>@^c>*pd!-vKrN3xUNBaE&SVtp@b-C!`{Wa$=Z}r5AVv!FmE* z7r=eLmXq6-+prmcOpW}1K)wUOu_EUN&0I0}ajuBqT%y>1Ino};)5xFp{w=_H0*(`z zQV%)-)CFP`Ao8ym@;7rtBL8wEZ{J&iJ?CWqB7a}9%Hg(;{2AwQ9Vlh*`~C;x0`dRl zOa~xSWB-irDR=hyjP>|l8P^F=A2>E7h62+6mm?j3JdON0&YKTR1*Y3zeg-fFm<3Gb zts@m3C?`4qS?W1kToVm?S#|i0r+v;ErV0HM_HrvO&+Zh$4dbd5M+}^g#wo(i95gqIu9bMiU z?y$ElOMkU1w$k?`T(QY2j^490OZ8sO?9rOX2+jsbE*Y>-u+0$(z z>6x|bT}oNk>kR+64x@>Yna6t+&N$@UPC*Ts+uj?fS*o z$6eNc&T>KEsooP#rgeDtP#VgPdu4d?>kjW8IMCq^+>4fB#`bY9#g1*=!i;USgcq>4 zJMd2QHtN^)rB>I+^ZMty(74pvEYAF8Yrm9c%;-bA^s?}*v9uXi$Y`&_Rq%#Lj7DWCG=KHDeRUdQimw%741=(cZtzSm33 zXuIPgk}Y`#n!wKQ1UdpPs}BC+cc=asR>15Y zR=}K|mS#__I{_;~?awyeo^zf+>V+f&OWeb<$1@CCR7=GUC#G8 z<2%buKIZi*J^$M`FP>83=Xkbl^WJyNq7I&<=*ib7#m;;ud2{gSq}W-DljFjowOeuy*7{xZjV!#U|)MI`*rSg!w8paiQ9hyhN=EdQr{p5vW4DS}Lr&e}Ip} zKsfICW7tANbFU#?wN8|(Zx$s*Pjl}&{Fl~sjJ0mtwHQ zihv;9LY1P%20P#X%)JW;SfkIp_p!g<|DM^I*_m_Bl-)ZsXGYe|v!KawHeBwsBhGEI zQUimioFQ{{R(jOU%yW^qKp}f%b$;G4>4^6yEYx0Lk?XZs_|GB>Tc?&0vM?ESUgr1F1?fZ9LqL?H zU_^R=4xUNB@Voa{T>kVE+XtRI<}xw)pw-Bfgr#FMPwyO6oFDz|)v|MQuT|!5xLTQ; z@P|k5>v66EbRcrMNq&y4N_I$tL{QR4%mxM5MF`59bV21_2XdVtzcSQ?k)#8}Q=!jn z@V=q2r*Q?LAEI>8Ke>e13;BU|=*L$J+3~JIPQoT3HEyHu$H5IkV63ySF=3PH3;3@& zx~nxy_P4A~+NKK~h-&#|J=CG357JHCCIyRHUt1^Y=zwG=J@9@4JS-tz@QzT%I6ZwG zAV1#bp!@gLJc&}c1J!3Ol%*ExH&N{x#G{@c+o@gcw__rkG5s4B?>UksZ4p?)7R0S1 zcYU{9FXA`fQx7CR^_dJjIm$X9`ZTYd^VB*~fs`aC3AMu-&EW%N&-zKO(xUJEFJ;{xC)xbpI8?K-UAKvAlHK_q=RjX3{)S#|AzSQPhM$Mj=Hi@{D-|FG<|K1qhJ?- z_ZPlR@BF?+!G7PWMv}XN9p2VBe&2#Nk8(fs5b-G;IZaM+&(j7yvK{GzqJLqL!~+0c zr0}(Z1bXYBlQ59viE?}DMn?W%qze4`EvLKwyO*E~b_ zgBjRe-&c`K?qP|M0!n|1tWLLB^v$d8G?@ zHvu{UZAcD)e+qS?5|&B<3>d3w}cD9B-jG>wv)W{f(;s*ws^7@#BlqzSVuCf?#v0BGOE{8f_|F(}3LMSBS7P z`?^VP)_tY1BL_*lSH2+S`dLWjb>dT8y3g~s;OUJUHAr$<+)p~aw>wX}Zd^*AryXU6 zvx8099(xmJHO)eba&5s<_9{wqeT0t0Zzj&u0{;XvU^{Re(5Eoa2C~ck2;==Is3B1E zjdkWXEyHvBg=^^^A}b2%3?9tb)#xtlYE&0i8g9x8LvWsLJrv=-B+QIeM47T{h@0bQ z&gm7zr?9&5>HbQ%8M_{%jz?)%;yHT0eq8yv(r~ltt5GI*E(hvWp=|DCcnU1tLs&<$ z_&c`N-n+6rmlJJRN%Cl9B=>#)Lm*u4Kb1DqyKx<4c#3V1c0TA$oZEY&FvR?JVX)bs zMIq)_ibKt>lxTqSJ+Bvsn3V%pz&D{3{9Z*pxLOu&URo!9InIOqlj3ROmQnnIV6*Gc zLj}bnsM9s8KwO*|Y>KmEImj)MUJ!>Bg<7y|@0L|*`$>MW1bqI3WWDGwh_>HbDBYJC zYHwZMNil0e@B7M4&&U_)Kz0+&@KlOOw?l@9*b(Q~nG^*Z8|C`9eIeafe`JQQ{xD#8 zx=-_wxdCnal|&i8TO8GSWN~EYVW6W+B8^|q^fefi=G}a>Chmv~AN>ysLOb>?iRwHY zaR|fU9#s@(^kSN~{yT`r(@pnnJ}T3<<&dJ_&YcT_I&}d|h~iAmo<$)h?*i7)#ZjE8 zDCPPZ+(8{B9n2!0!LJTc`NF*dxJxcM>`~|`FZH(u=n!BxumNz!*^@g`eYM9{B$)n; z^C&lA58{o$j`D;q^G5PztXuc59)Wc%nd=%5TbI0soXfy00sFqqN+eQX`jvnYQ>hHrCz)*jC1 zeH&->tpMn}-Z`f{g=OdbbiRcAi*Ozel(LH=|Z(uT0R$g!G*vM%JQ&oipxQr6m*^{&QVq1Om!vNo2od> zoVU@!kS?#_zB=oG9_c^=ztkTPWcPl!n``v&*!2K>@jeptp8(0v&lraMOVj*xsQl?X zYC3;{U=10kBWw!dk^CeZ$-e~hk3krnOGNU|gZ#FTe;mT*o;e`W`NgDH(gUU6D8B{4 zow>$4*aTEGVNVHxfho%3%t80UMg)WoB>>wq_iKhTGI zSJldI>Y$PT22s2h7En5<3tT_srVBcF?EX)kLhUb=2cXn|=L2^Kfa)x@-zBKW%kf_L z0p&amZGz68Sc1OcC+K16`GDr*(Jt%}2lYZAP@VsTasbr^&u!0@6o!~^I<@s9UC&-Zd*$2KVU%A!!y zZS;I;c=v~DKNR;#T;Nxhza`w805`xD*oFS=2lVq){<{HJzzu!ChIIdC@1y;%M_XS7 z*aI$z`!?|d`cU#yT$DG@8@hNA-v<2sPX5p8g3P)=bD$ZZk2CC5*OR&_(f*r5_GW-S z3lN! zUZC$F-v5C{~zdY0H50d{a-GBxjnRooI?Q8 z17RreyxdkEy=FB%@!zBYlr{B-crV_CXWoDRH|em6*Cq`#X`o31O&Vy@K$8ZVH1IeY zc=|gL{M8()G~0>fXf|H4Rc`@9+pE|hZQAI>nDW3bAGGmFIsRVjsBd}19MTHzWg1KO zHk!R&2Ftsg7~kUl#)H*+z#t)hPYKcmDz%DhWfN<|q-=@r2m+eTVhuF@nuzg;dpjkY z*@OO>uBJD#ve^l`*U<7fW8BrbBxCiBlwFS+tM8xcFYOc2510Vd>=R**o2IKt15Fxe z(m<02nl#X)fhG+!Y2b-yAmii?G5z>v@p#}k5%v>*`|(cL@Ns+2-0Z`uOGSAN`1Q0F zM6$!c+IViu`FF6r(thC^1u%p}q8^dF{IZ0WmM>ZD5Y&N+Zx?0G9UBDl(%kT%* zVm^(BkJ`;;r$_L)>+{K;WFzJQS#FVSU{-o0%gsK*PDU+cp=-5SFzn)B z4_MdENp_R%-(lFd%z-R?Eo(5^3CU4cuBYLSbMMXbIDgj1ChJ@{c#dFM=Llg{`FY20 zo;kR<3if=};p-Gq_|Ig|NOp}JRVWLx#~>Rel7rip#pPvLRfV}Buup2i;>aEYwn~Y0ZSkB$*e8N-wOu0d zF58@OIly-*%40m~oNR5LvnZ{1&F8eXa61uj=j*j@Ja^ZGkkYihdtDyDU0zrII{S@?~ z=G{T)jeKs9jc@Em5jIf@*ayMp3wA(puHucPUxbw4Z9{HtSUs5?-DAZ46JZZjWAg+% zBH8{c0XBhTA6Z8Z;-A|*L52v}*zt8Pu*G@gx>RRFy-k!{`l) zn}^(0u;Y@URh_U!2R1kY^K~1_+&3>{$2~20J!)XjMD|dCY_~+VjgheDqjU)*1KBu2 zu8`U=$>p#$Tp`l z(BjKcmYt0HjOpM!r`?3T);eU{Q%!b1kuEwc0Jc8dK8S3KG;V(f9TvP^1-m017UiPN zg6hPPZHXc@c0^=PM0QE!FWV=1*lDvp)>_PaxmK0Gy^6Wad2VcL)34fnG3ur6;D>BQ zh%d4g>Y%xASyvAD=>e|+UI2rw(UJH5^``amndbh#SQ)~ur+3waPX|a}jvgR6E_gxm zTH8|!cJ3|(0sGhVlr}7SL7FwmQi2^2>|q8-tLF5TJP{{kLwCv7p$F_<`be-3l0JEV zpfm+^&Dt z0Zs#>fVTDc=j{dlB;WMqKXlcn@H}5(Ilw9+JKs6GuRA-wyBlm(Okv|fAX^tR^*!uv zVDG_v9D1`v_a59&9gpIe!hXe+rF(T{dsg*fSw0rrhK2mEY0@C~pRuOwoTo`u#Kx94 zv%Ca$(O+$^aM)dsw?o)3jR9}35nrIZZy~pQk)P~;a<_^q`ziosK1F~VV+!ov|(NO8)-O|RTMXj)tz zVP5z78)WYS+&~$M{Ol7CZi{f4==R?M5+OVz7?~J6sO#WKj5czCBoOi286<`5WW&=H~i~POX-ZGe6##osjxPY>^2D8HV5l9|3&NdF9!TWUZYQPQ~^XoWquz<2IRx#1Z@Ls z1f0O98`k8{yn4uD3D%&lz*_uuC9y`|VNIshNe^))*5<8E_Et^Rgs-ej;NerjKi1-k z&PZbm@D1?+TBHBc@NV3VHMc>u?wDlA^=-Kodh`LWXMrQYCZGj93;Mt^pm=mqK0ptk z5AZ^=x56khQ2#q%{`o-t#h^nkM6{ihAO75e3*l`SgT8~c#1rAa09bN9u=x)Z;%*&A;h!=w1spAN(28H=ueH zz1Ni!x}q}R-_xMa0am~)$g()qSBu)i9Nr${O@2aqo&x{5Xb)uDM7AqqC@oMGo}qLa zlznMfSDrR>9YFk}PkNi4fejD$C-+~yC^N3@3} z)E-bi>!JTIC~vqM)jx&Fe4-tX1i1Y<_XByt zw}=9Ie`0l}K9BOfrVOYqE6#c=Md)8a>-H!dl-6-B0cHc%0Jm2HeIWmG`p<%bKdygJ z1HcJz0JZ>=G;;l|`ImJDeyJ>a1O0)Q0B-9H+MwT&pJ}2`Rs+yAm4S>Vy+2v?Hl^02 zfhG++V-2VmA*l~i!eLUHHCg`&N{h9S&rMq01zJw4xk9aa{ni#gG;W)w55D4?h zVpe)M_GeCP@W0V}sUXt)9_DFiPv$57{|ED^%AD+^nsg_lE4zNRpm|xrQG?JGf_8m* zpXA=Rzwc9zMW-EJrKjy+_L4IzFO*}y# z%DLZdcC_f?wKi@HWl;VSmBwBp3A_YjIBZE(^%t7V}ZYpAH*n^3S)28tt~y zJLbI#b9YX}a1qV}OsTlYZcChy1r-KfGTjkL@?c-=THIPP^{RZe~B4 zi^G1D7A$GI1q*lS&eqNA%NBgwp9MOp{S)20GS?-2*qm=(Wcyb4WN}-%vZKFRutVFr zvW<&+-dZ-pggGrVc7Nl|w_hDMYQSvF-6kTx;Uhn)-7k)Km^8+c&ruf!3a#^f6?vFj zyICG#QF1-TtmtZ#SxIH2X))%@ipwI*N(zE4ivPr%RAr=D3Fgy^%Hel8i2D^`?yVei zTGudl_Gd+uS=GgWmMqUlI3*o@o|o;wmxt^2=|B9gb^(6G&qoYYy#B^J;_OL1R7u

{U$3Iv~D8^4{;l+zqdvpkHAO zb~)yJ=3?A$KF0X0b3$7EfcYM4jJqxq82vEP&pFsC5; z`N{BGfidlG!6)rmWsPU}yg*)Hd)_}|ta<>=fnw}B1^G`w-mbuWfbR=-FCNIBk01I7 zBGSe_A>smz@h`~^HuxH2o>tJuY|JH5o;3Hu*P6hs1#p=$&dmEAP|aRXrduh^M;ZT= z(*nhqxM)Lj63BlxbiNe%FG}&zo&&uv1Akvr+MqS{3i)z3s0GcNV9zO2ij(c#OdGQE zx=8mAb0L2n;6s%8aOD3g$FmmmIAbZD%pi4n*G&`iA4=?V(YmhR zUv*<1hU*x5p#972F}^#I(yT}{yx0=nn=!yYM< z0PLIB`8#?JWBiTgfjZcz|Gn~hkiR@u3fdm%2Jktv2R-+{7EXEqXdLs2>i2(5Pk(25 zXmsJBE|{`0jFdg}x9CI%Eq%Yd_y zi^eHvSi%IT8K-!p-VqNzqYLMhh-c!KiCE+LNcBH6HAu`kAA6US^xp;Fth6TjzG>kb z8)rJSMH^sktgfsAd^g{7?W>dXTK?SSvP%8@OPTh$=cCRi?C4XBZ{`aj ztHng{uqbSuq8+8>I=`)Q=+Ge|zDYSh@y;Tr_9{Vncz192)R?a>oeCa#G0v?S*1KWN zqKhceH#7d17RNXU?@}5;D{J3}^Cop|rZk$|ddK3HU;C|AvhejnRfv;;z}pxc5L4+pN|+M{xqdmi{6&+4G48^mS=E_+ zJqlAjOv{6v&C8SbnwKBmVO|#I()~)Nw^{Mgon}(lhBjvhy!@(pmmZ^g5ijz^*jr*j zoX*=hK{~5%AL%;(a-`nE%MmRX-a2Z!FfUZK>QY3Dmlzw_tTeLSx;$U4GZ+1Y@8xG_ zPwvUTDGu!x+Me)K{xdVE<-*J$gGE`v20vv4=+8K^M|ky&moDEou{gBrt_uOr?>PCZ z@Mb}Nrs+W4r)0qRRW|&7GUmVlv%S#dMr=2;Vn88wB6Dz`d%HIt`@$JCN0`#sr zLEj~S;!XMPEdI^h2te4?+VHw@{||kfaxDM= literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.png b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.png new file mode 100644 index 0000000000000000000000000000000000000000..88df0b66dfcf0b298de8cd296b793b9220c4a914 GIT binary patch literal 658195 zcmeI52VfLM-^S;9dgxUo6akTtgq{#WZz3H8tb`;KDWNHduM$KQ)K9@e6$LBOqzFor zPy}pLL3-~cKzhILKbJeW9LeSS?%qA4!|l#a{mo{dow~Dn_2}&HQ^kid=HInThdzwS zW^?PMCYOZm&Hknz@3CD5O<>HoEVnYY@QX@}`ScvozJ0G=!{aB$PZ%CQHmGa+_CaIE z#}6GbDvq(qCl~h}*01l43av8Ernc)gGvTvt@qNmB1@&q7K|;AXb?VpfF7@=x8cR=< z@4cgR$BxRX`?(x#+r|;dp=fdTNZKnCu9?MF3t&goSA+MH| z`BcqB&2LOL`G~IHd8)o_dRRVNtUp9 zex1H-iINTeD`vSW5tgtxbHN)j_WWlx%FC9_VL@f%W_Do1Te6i0o_)U~ z8yL#U4eho&oE;BlpqF@?FQg{8(u3nKPbb$~V~HeYI*$X74OuQdM06R8F3A~)3z?>Oywui$Z-0RtL7UT=Tbicghm7A=on*rwsm*B4Z7G_6VC z`4p;$C3DJCX;bnh?+;kLe}A)&HU_*Ld~>^6TXV>eOZit0tQgydu?rL5I+WiaWPO!{ zmgMrFMfnVXSz{s#;jVywfGY4z8%t(v-{QiDYGV>?u*clhQGZuU;Q+PinQE7i^?HsP`HgP&aacDs7- z{fq zYklNZalwqt8auiu%Y0Bf=#QD@de`hWr)ih4KJWJn>ffbWa;x$3@b_Nmw0Ta;eXP;j zx$kc69QN6?b-i2t>$jo(re)2mywmAx--EFe!xnyAVacL{ceJBuN_=5Jn-bl zQSJAJCSI?kX}o(>Xv00b+O2KT!28pfzt+}V>9=A}l{L?~XxyvY^LmHz|G6yScaGo!&d^*8Fr?x1Z~5E}gZz(vU`B?+xtI zpw_W6AD0a&^~TKhyPMZ8Gpl{0x|?RpyRQAV-{CTcI~^X=r`C-IKQ3z0rAn<&|2P>t za-Ampvj#y=?VE9V@#S5Y8(*$^xkmEyK8uI68TV~w-;4G7{Q1h$m9Lb#(&UOdZhoV- z{l8h*=ghLD{Yp3e__=ngmTv6xQ}6edm2L9s!tj5lSLoX9yH#I*ex%Cqio-KT{B(Wd z=F(?Af4s-Mg98r#b))n3dDj)Gll=VOEkAwl%whE^FRG-eG_3NZNG$LE@0Xul?%O}5|6lz+{_&;0 zZ?5jtzj5D7KYIT=cSZl@i+kPbTea`7A6qVaZ~3rg@%>tV`_`bmS?WQ%p4rv2{kOfB ztbO^?);&SKF_qU(Zct;S|B82Coz`@Bowfg*_}^>wx6IG0w&jCX%YR%N-oEkEjR&mn zlk(Zn)|yN6$7Oyov4{7ydOyrddF%bNAEk}?L6$`{i?;+`kv@}<_Di2Ds*oZk+!ir0rvVNSgoUZ!aZ$`Aqx|XJ6lTE&H)IL-P9Njn4idt+U@i^?JYB>QjFED@Fd^ zbXzEnOg%A2}VE^Y{T;FNc;#E%^+p=oQ52b(D)Z^10E51B7 zY}cCm8~@n&<1dxhTsf{jwR+>|jc-N|8&YxDFTbzemGtShPv6}9=Ktlj()ho3B49Im;pg$?BA$&nyKw&(Nv+@fJ$3GlkYNK4lsa&{@uiJr|9W!wh{b&u9qBx*;;>B< zrl-vOU{<%c|7tids^Oxld(Q0JcKPXvY2#kJFy(OC;zjEh?Yy(>)K5vxl13!GclxWp zn|$@;mxCHlIJj$H`}4i89BS?R)#srj&#io`XZxPhhHbc3DQ~p>m*@qt{#4j~&!RUyc4?*IRydXS_FKz`LzK8as1fl|y0knoc@&y8VQ69kM&j?QnL{ zu&|!tKLtPCw0_j`hOb1PY`(e4=GJ=}4juIO(SeHxb{*C2vG9iVS2r6o>bG;bcT(QF zxcH*)r8g?h583zP*w>ft-pQFM99A>rP_sibkGFrJ{^LE8yVUPK<&)cUwl;me_V10qYj9=LJ9U4Yz4aOYXU;5& z__D&n*MdejY8A09Z2Bj2W{+#Pyhis=Hx9cw?CQ|h{#qF~V_p95V?OvL7r3 zJ3aYI)%W86+`i!SXFvbAV)v%l149oFy>azy@R!dA%w0b3>b!t4{wGfU?0@drpYNol zcRUp`@1J_7woKpHe$zMGBY)i*_ro9i|7qE}-?#lz`(^iQmUQ^chUot?#~ezk`o`Au zCby5CdVXt0Ueq7Io$4>^-~H&&qj5Q1bJl0HUOj91jm)_@qhDOrWAwN?dliQjd74@Q z8~@w%@6l?9TlMSn@v*0Fbq?5D{kQzZcP9n=H}miRe_!mmSZi#D*Dv45@IIIExPQdS z*FJn>aSHBRxRX}b_R>Q2*gHL_E@r`RS`_H`+7=L$jsfia3chr7xt>xC1 z`(H@@bz<_mQ&-9k_;tX-pCVo!eRJZ?$ox-_^xV7Ui!BRRZ+c<;0}ANvpQ@-1>5An>%e! zv_JFS(bmuBpFV!~^wd`pCoM@ibh>kfddBnv`E$}f4s5Ubc)`biz3cz(tuI?dg@<2k zJwES7?how`#2%>gWySoj^Z#01u|mjOnMZH^xBt-mLrdo+&---y=>L6}cQf%;<)2RN z*}3QO-~O1~IP9g=XLIiCm~?tlwY%S(3+#FMe?e30X@_U6P3wE`hn!C&auIV@AeJ3>p$QY{ZyW zHSccOUNdM!Y^$0BnsyKEKDK?_@DW|6jF0O(rANP*DI;UTV{5jK@`;=rK@p6On;0E5 zdGx3;6Cx(Js;P@Nf{wX!NX;Ofmx&`=)ojZX3>w(IS5W)-@o_;-8#WD&32hV>)I7Xl zSktD>n>T6@)F?EpNl0i|NLb_Gu<(c`O(H@=gY>3mln)(6j*lH0(WgTveR6cxs^;*C z6URn`giM+=so|u?4dcfT3keGk4-W}#6w;_sF!>0c@cNjE(UXJ6OsG{5i6Nd2aT8+3 zj~F{~MEsZ_9&hxJ_*W;ks#%jK^uXxLHG1rWgvLzJvm=TjlcUFmgf$EeDJD?&?&gDy z9$jR}2@^ZMMmf=!tjGi>^m~15Tu7g|3GuIvkBRH}THKh4wTj9_Y|Mjr#=bg!l#Wkq zOi0|QxX~130%bm|$SjOCv_KvNFO-c3my0d^#1TUuR!*U?{7SLG;wFzMCK$g`5KMO~ z6x2{8-G347$Hzraj33`GK7LeG!Cm&S{Y1?tepdz6>mD65VhnFoJsBBd8t7q-Ka4M~ zL-fSBC<@y&IJ8-CXybliP3d-wXiCS;>A)zG0-q*vbf>#NHhN;TC9xFpYZ^~{?1-VS zTNKMfpM~S--aVr0mVQ}ie!KfElllvvZD zEt-T6iES1f8yhn;xJhX2kl^r`A+f>H;mu>CW1EFGj*X5sl%4`$ z(cPwd_VkS#SJ>5XVbloSGh}S^_z7|RPqS4`!{6qEht7j%3J;_E6OV|F;ZL8a7=8oC z#YTn{a($Ss2O;&rMm$=bmLVvJ-za$TX-pa(H^$uUUl5hfSMiZdh#xv}QuO$^w!`QS zvFvZNAl||OO%sR=;m_+)Bb-nb6QW>Vq0zHVrz+d{E=dra>Qu>_2A2#3*|9>oRKM zY!r&?afA+PUwUD@Kt!OQC-kC6w@TSp!|ERrK*HHiy}m;f<<3S8#-m|O)e6CmbK zfy+D}ldHgG0>u0&aGB?0auv8tfS5l8F7td$t^$_{5c8+NWuA}8Rp2rKV*V7k%=0n1 z3S1^Y%%1|6c|Imrfy)Gl`BUIB&&T8{aG3xxe+pdY`IuY3XxXkl0xe8n+K+K;4mw7%W zSAoj}i1}0CGSA24DsY(qF@FkN=J}Xh1uhdH=1+mkJRg&*z-0o&{3&pm=VNjexJ-bU zKQ)WX=izeDxG}T_bP_H0%;}f-4=of8is{m)J7ZIxpj}L+GnS>L-y4j*8p_zs!L-J7 zK4TT*KY980PR#rF?5-W!_M0qYTJj{*@_%7K00ck)1VF$_0&*+;!#fCo00@8p200hJ)YzPDbAOHd&00F^000JNY0w7=$0SJgq*boQ= zKmY_l00M%000ck)1VF$h0uT_Juptl#fB*=900ad000@8p2!Mc11Rx+bVM8Dg009sH z0SE~00T2KI5C8$22tYt=!iGQ~00JNY0uT_~10VnbAOHe35rBZ$gbjf}00ck)1Rx-| z2S5M>KmY`6A^-ug2^#`|00@8p2tYt^4}bs&fB*>CL;wO}6E*|_0T2KI5P*Q-9smIl z009uNi2ww|CTs`<0w4eaAOHcuJpckA00JOj69EW_P1q0!1V8`;KmY=QdjJGL00i8N zfGi<@s_RMw6>%MFqyz#W00M4B00QDx)rrD@00@A9>j*$VT*n$IfdB}AfLjrOfVfq4 zqA(x;0wCZz0uT__u|`TD00JQ3Rs_1jKc$krD`i00_7h0SJg& zRVNAq0w4eat|I^eaUE-<1Ogxc0&YbB0^(NHiNb&Y2!Md=2tYtw#~LYt00@A9TM>YO zxK(wcFdzT|AmBOz5D?d~MoJ(60wCa41Rx-8Rh=jd2!H?xxQ+k>#C5EZ5(t0*2)Gpi z2#8x%Ckg`sAOHfcBLD$$9c!cn0w4eaZbbkB;#Sp(!hiq>fPm`=KtNo_8YzJQ2!McF z5rBZWRdu2;AOHd&;5q^j5ZAFrO2rUJ&`vGp3YKtP-XARywb7vu^AKmY_pPXGcU`YJ*OK>!3mK%4|1 zAmXeSKtP-Xl-6WRCX-p?9ezPT^aQH!Od3L&3~+Hq zzq=4s=9Y`oL~H4(!v2uOs$Ta?f0F3xKrr3V3VF?1y7cmn9c zay(PS2m&Ag0vP%AOHd&U=sldh)viK2n0X?1V8`+f_nf2KmY_lz$O9^5Sy?e5D0((2!H?t1or?4 zfB*=9fK3D-AU0t`AP@in5C8!P2<`z8009sH0hKp+4DAOHdo5ZnVG00JNY z0yYtVfY^i$fj|HRKmY_FAh-uW00ck)1Z*Mz0kH`i0)YSsfB*v)C^@f`AkVRNt9&p7L3pY#Q0yWR+yAlI4FG7s+x{iL8d~Ewbrk{m7Ojn?d#_ zS#Ee7+*~1>OSTMIf3o+<-XrTpwjx<>!Y)LWIaqLre?dSv0@ghLi^?VcV}Sty5Ma!g zAaH9i{B6k~3I@0Gh6DYtcM*QL_!yf&I^h-u1l*1Q1jOyC)L{im&`xz&T%wO?CFQW3 zi*uRe;`ERl2sniR`mvmX6_J7f2!Me23E10@g^_ge*D-Pk0w4eaq9gzT5oNU?Qy>5W zARvAM5D@X#5poCuAOHfQB!H20QC5qSGqv(?BBjvU$*Gt|-<$PoxW?c}b7*_f@lS?x z7ya&_IkasGrM2?#c{(?pa#hHS6Hda21?KWP0W6|)EdjJZT+17&fdB}AfZGs23&d@z z(x!r3pioQ5@|A9r$Z|8S;D_-RLI~MSWcQH`BKtDg8)WB`!W+`uK>-rU8p87xd9%rKGn_16mA91ad9p3Yb|z~`lMdNb{tua1 zWHZSQCRo0$1?2cpW2tYur;|1R!00I&sK!fITAEolmqOvR_#{iXP zB#Fq}qOQZOWC9S7k{Q7V2!Mb@2~fAycomI%n9Dwi$OwptIYPx>gMbvz23J4;1SC#C zqfkt=qzF0@S$BDcC4S-OApr=;Ll!s!0T6H_0yOg~ipD++U-BxkWd3UP6dErXUZO{M zct8LG@&E-6KmY{XiGWfrpJGjU_`o_%^_@#od|O)+&XfH@Ks>qjQFah;V*+Z0!s-Qs ze5$XnQoV@=-wVy!a$`((6%7Jn7d`|A0T2))0cu6Gphbhm-zl~@n<}|nL+eKVvN(VT zdxU^^aOI=uAmFY96f$|7J!SJ&D63Xje`UEnad<*l2#6=tGRg=79!7vxn_JU;m7q0j zg=xykPo?fp&6!@N?l^x)04nm31&%-f1l)uGEdU9!wBu8>P>HQPJeZ<;*yJxb=PA_Y|Bz{>_fxOnl$Ft+^HV>)TMu#z zrDJZnsjc_(c)@9j|MsZ9Gl_>aq!~)*gX!E5-cNtdBPzrHA=8E2g6OvyKOs}w$WMU_ zW6zOWI-OK9IGFi0I)t2Qd9A$&V>jV%Sa~>+O5y4cAM|GZ8m=+WqX&C0I{wLU?xNov z7oy6vDU{aA!{_O|dLcLGpM(*MgU(OJGgGMk%Z(hROzyOoWB$avmzDW=QqO9aq)ynH z=YLUUMHg04RNw{zARt)+^iG`T%*^xGXxg!Hj5E`aO59FUX{iA4(0o2a<#u)5r*)qvSe||dp!^`< zW(2BkxORhPOI2`Yv9j~>QZ99?6yVG>ToE_Auv|eIX@CF-NQOYZR_pW@9u1`XO9qG& zGwSN5yAz9os6hY(K!7n4kSosSz$X;ZdJw}*J7*^++W1}F4=!2|F3H%+!+bjEaF?X@ zFp+yS<7f86Q<`#AFhw@V*v$pU%V-AF@dxRwJj|!H4t#L#nuGJiVu^85#9}O+moj#9 z!SQ@zas5F$D-XAz<1P=*U2|~IcvhTil5|Zf`AmD1Qi^hRYNR2}DUD;!PDjk~LqJ?x zJ6u4fx;9C6fzm4^Aap*MtgEN-9;7rM3J7CdKwQmY2c@}$tg+z&Qr+0W1;=YBjfVol z7#9#%vsg-L@_e|Rp%%yMLy;}{vpK1DY(;Mo(QJ6z|KW_U&tNgCzRobF7 zmY?)y%TGp<9YOYUvRlcXB%4X$lgVBn%j@NUk>&kk7s;lPzq@3Qko}2l0@=P~d6$OZ z2F(z`s{k1Q0T6IBft>vOQ!2TFFZ6Mm(K1H=7h#s4iHQ6w-_B?BO6NTFb zWP6bstiCSAl00w5q50U8JSpSGBBZhJFz zH!G8+=em5%+S+WNrT`194P=df2#7Vb;1>iyKr#fV8|y+wZZ4mxc~9M`Vw{{Bh8UfIxDWFBp#cc6}cf^2#A3M zen9{P+>XGV9@W-UE99ah3P$^+uxsgQe8)ydMCZ&n&Qs)Lotc0kZU~5h1b#sP1l*E9 zR(|dfM?CU*%Y(N-L~i)N%m8YA+;c@fAt0`xj5I(11Oy{Mt&nXrZt;gb8Mu>~A%#J6 zUc{j!9)H?XR%^mSK&+t!zaRhtk|aQ%5$r-!ZfzP+PR`C^_pX$MuXm(jDW>TCNF}U-cfZLOJEEF;`5D+2hBW(}>0YL~{f2K+}b!YujRN^->(phF+Zc&%S z;sRH_SJ3S|}J1o>3m`!r=&geHaZ#6MU3l$gYWb3zM* zQ-A>h5D+1O1g*nIhpFiyr>4jEq#y%iOjfASDb)1fD+Wbo_(#*{k^PX=qNauN+LAd)Z;5J^-MG6@18 zU=0Bhj*2R|d?^V>C%H^kly)%M?_IaxLo3V{yH}_@yUA`Pn@FO;zW^mUBeMoxG4&w1 zLJOARm4`zq?F0JsdDE0n81y2mA}T_%T4bKQDxQ?9MZol5BH*ff6|{D!W+`$>G5<8 z;rR<9?+@mNv*C;k{5U(Ft|3i2e87T-;bD0mcw9S(MH~8kV2H(eI;l?95WXn>hVZ;h z+@G4PH(6ePhAswE{`@-kXDEw-A33owVsVfk=*-H)W9j!}{h1-pw$!S#N?Hob(^|CP<01j#ag`&>8Cz=j-x6Q6ab9oF z(C;C#2gNd`Va@ZO^8JY6H#Lkf1o#8O*zku9x5mcsP%^mjhZ462!yk&KehlJc@MG?` zsPLxo@jn>jKN|jrWa`Ht5{9(6aGClsTr29=G&~mtZViTulxcjX{EAA`Ac}@~%>5P> z-ZV`vaK?uJ{h0bO|VlgZxW3)CHRr-WprUlw(5{^5C8#72$WrWDNG@k zk5bCyok%nuvqm%w@fUoPHq~13xJKcBHpGG7M+u+{>rp0f0Ra#YkN~wZno}#|1zPmi zhFTg=(4e^Sc$n2WGl)l)^_yHRrq&0aC;?Lp0SHJjVBib_AmCgABoq~CyqnLKicl~a zZ)MaW!6-xGVLC8vb4Co}VZ#oIeDIu)kHMHB;JE^_{A6X~(%cN2M=k}bH8p9bNBKuC zzySn6;86lHR+_d9QPZ5La%9WUGJrr5i?U<`6f(KDBZcD8#A&{{{=*(NhzE~uGqpa> z6%{Ys9un|e0b#5yVfgMLijt2s3MKPbna;E=IUIa|00_92fSuyOGyAb?b1co&9M)8n zVDZaJFwe*`2!KEd1RNk9$0&)-B_xB(M8J~;qyp{$5C8%9BH#e=;6P?m_Z1%lfhmE2 zCwF0)t{5%>6)uAS2#A@0jq5y2i*%bDzBP5kdC>$sSwJjWxmq+fxPkx(h>3u1dlFh< z&NoQ3)6ApR2j6oJ#sUJKEFi{vN?Cx4dqJz#LQwDq0xl!KH%8=pTk&5z4c~syP^PYl7`i9;m#;`c=x_l$eMm@!5K-}6{p#oH91VF%Y0;xIK zy5@)3KT6A^<34#4GUrW@qCOZS7(;p%@-M#qNS6PK)`&tt z_O4%bl?ZhwyNbZMsyV_x9cGoW@Ee#`#R<=a324q9WTn=9!erT=H=RFV}aHD5lOG=l2c5_1wkIklzdbyMAP{oUB4wm1m~1 z(rf;YrH6H9X-&E^t;*0yFXYXwPxt~4E*^#@=7HBvv08gpk$BKOlXRJ#iRjPLO7l-M z*lhUH^RqUGc2Gci&hb{p+hoU+HGQdnj@*7HD@r5yhtUaM4I*=tY;jFB-Um2|?Dr%j zyfy0>v;5>Nis&_KBdg{Y!05vu76;?qFus5P!o2@Yq^}ehJs{68%cS0~bf$Ri(aktiDhOxnCo^q+xua3K?Q^W@&WTivP~_Zz(s~n zwOc_j^DOXh-plfnQ^>;%^PV2PTFKj&`C9N{5apdK%=_?m=5_K9#vhi&8Quy>i5kfA z0?QZY#jV^G$+vYaFz=(gtRLf`2L=~h{zjhf6DUb8^7Q-6_n%GlLa~EhD6G4U^Syjo z=2NX$#*;1G`hH2HzZcz1D{Ap;Qt#@7qx6*%#nyK|eXQqpjZk)-1lEcc$(tk|f=u0| z=N@lm^d-xmL7~MK?BNC8trh8vo*2mr2tni@y za{3?Y=eNIKUrP^+%zEK0qhVW?Tc(O6=qQt}G4CUZI`NcI7mU3or*bWpRjV<}su{}i z{L9!IooK?}rGfG!nr+2}qso7~Sw;Q=W`)ouR|wLHNfN!7<#_oOSFY=Hfww-K(KTLN znv9-#{APWL>~ONxi}hIah27-bkDhrqioR@{t0V=4KyuOenyj@DjVbZ&tVFV0gnlNl z=jqpSv;1T?^8B;q0SbKb&k|OkR*23}pS|+dC8oY|lBqBLtNXQY4!?|LS8u>F>a}3m zK@BC_UBUbFxlpJtpCB$r8E=g_%j8t4%`)q@V41ZVF|Ec`Gr%n?4WEkab$UNdn%GAJ z;#Ry+SQh;QU%6$gvg~U0$<||BShT95UvM4-5a9v;W4@hwkEzZdVdc;4XQeM6Cl~H& zqoFUZ->+DM-K$!UCDU&jy=d?*FTEK`;<3nZ;>=%q*70W?f7bLN+m`HuOwh5l<}w9% zk)Cyl)&?xjzoZ0&U_MR-nMQVSaRsu&{S1XoB)f;~-(>$I(8a&%EkD_W{QO1MLc^QW z_gJa>H(15gJ2Xq^0zD*8(Ru~mMd^6M2Til9Jz4Nzp#J~7QWYGZhy!Be-E+!2mvutQ zCkUv>DsR($ZZaV5fTYYvL9Mv#s&!Zv2`}fCSGtmU|4zEfXH==_;ioxwm_*|wlR0L# zkmb3h2WM`%$5>AJ>MXZ>HL{Ph+_H~}_GN@m-Bw=yk7>^yW&y-YPHSPDVe;tjDY-%o zmKjj)TBYNAM#;4KhbW!1^o+1Wr3&Yio^MKWsZX{s*$A?&$krw6Xv1H=-=w@>v^Qc& z2nc~~Km{F5Hkz!fMgemvE#6(pciZXi8S2VY-nmLI8N`gHY74y9 zCzW!4mh2_H=`7t-VS!Zb45j4hUb8ySwRCw3a6J%?Xb=C8YzZ@?i z`xyzzVfwYnh*m&IG+T(z$;4k&I1}#1?-|{FLt{=_dePGI$s=<9lC3wy#d!KL=OO33_%FYI_=g9!B}!p6ltcQzd#MQDkmVDMteRCODghzUXh?;4T5ow1&hq)L@Ko9NKlTO zX33=j649d{UslfthFa@?c^@DQ2!H?xfPnZ3+@dV*Buk5sbfR)-&$^X%O(GC0`cr8pj^Ji*80}w@JU`UAOHep3D9ct{L~C##Dizd+h;Gt!)y)Nb=ww-=9x^T;&Z*NHrkv#n}o;egb)A(BuGG;MO{`Yv{Q)j2BNMmhmX%lHX`D#tCRJl z^gLTYcugynd?Ard2HjXTVM8FP6G+fbHD3fnzg4;bgC52?^Y#6k4G;gq*a^puCjb>u zD7V{+Rhx~$5L?+Hocj^r{Z@I&X-x39E0rpXX}mhRA7Sy82m+!|EU~p(bblx|45HYY z9>RfuSPAfHRt5c5)(TQ%2xIC#JUiw%;>`6p%?IVCO5CgwC+i$Y0EStYht_Y-WXv%2-4+aF>kpORXq|;c37-t^( z`0in&TH(79Hr#g@1F->GkwH}IUy5|M&@mrlAr_K?H>nZevki6AbHykg8ch;YtJ_MA zsk`JCU08;iRHb zy24DBqWHML4FY0-9M|k5y~$YhNiRcC-7o)RqisiFK)@XcXtQaC6(8#mW2=J$=IcFt z{bFgi#FOs8TdHM%fEd@DT5Z?9&wN^=DBskK%ZOtTFh_u|>)>1H@|_UHGQ+B<{QCMW zrL9Vq7Ig`3t|Nd!bYs?E{8f(KyL*#%fwOruT&7krxv!UTtR8pF;XPT}o7MK$Fn_gz zC1>Zc6ZbRONwP5FC7}Co2X94-SvWX8AKK??RI8@qB`KYp**l010Wq!XPj?<=8R_*+ zovk^S(-)^?9{9y6Idv=as$Pl>t5=>?^nP@8{)N_%*Q8)4GX_1h3rZpj}Q=N)Y9zj$t*j|-=3&FX^$)gwLBKJ3Sy1Q7KrA) zJ$u>4pEt1U*RJUhDwRq$>5VC@dQkNTNVi>0W)uIogqFwyBCb8)qM`fFtk}D&3|JJ3 zJQ_3)r*V(J3{Lp9pMbprfOiCUvM6V3+;o4OVx0PDO(87w`+V&(55(h}B}>_t zi@tau9z2RR?b;O*5AN2!N&xFxEs!6=xR8MEQ-SogC%!09^kW?cE^@h+`T4&9@h|`p z&o2a|sJc)nCe!yY?c3mrcMs4UdUrQ!T74f=r(gm9j_rwT*N&Z!6uE4%L&wm0<*Ij0bcldEF0SHJ@y!rQTy?uTv>XJ2X zy1_{|UmMc2ye|u+nO3|hykY&%MdIOITDkNgd4oB6l`-5>y$FI}P; z(07ZB>B9N*?7itT*oJlMb=_N^d_0@+0d{VlEahEf7&zKbz_9V5q{cdoi|yzC6@A!z zvau76#YI3VuIz{?H$$5zvAMf9u+&t$KTgBD4io`2z|5QDZf4483T~m~`^jcYcIo0p z#;w8JCjkkd*;%Ei^#bEq0=kwjpIs&87Z+dOpV=!dhd7oDVskVBw{L+YXs6oAOsN`d z_kak`f_{mwow*TZ@O!jUkQ-mWhjVx>$*8X{ZDC9%CC9$;CQH)t7(X;OTDO#3YJ(l7 z73Ax;m5pp=_uHFkv^aM>0k>BXyO8mwh_}yTyW-FX=C6dh=`W6mOhj>#647QmD*U!IjITSgb|RB}Oh5fw>#SN#DwJ$VMD|0zMzrbSJa-W~y#gphBTw9lLb4sAyfEd0OZ1PF61a_inmHVc@9< z0(^Q^dX|3YLkW(On-$Zw) zSxkxHEK&m45X;hvnUi>(Wo1^h)O$((_^yk5_eFOwK1w~{4Yv~xUUWWR7gMe*xCxx&1p?w6u0_ZC z`R=tWEwx9{7j1KOClTQ-k=VNB+4Hr^GL7QVdCO_k-SySU+wAL8w^=q?9wpx}d~lq% zIOq#cZntw7pV#i|_aTYLt0jYg4`&g8fH;e6QSr{-t6>=#Nh~MZvgt-e#ZiQt+*`wB zUg{#Qh`@=uN)$3hhFq!MDpx39q88X=IoTP1(xa`|Wq`gUBrPlYv)Ta~7C?(VcuRwS z5MTB}(`j9d^BPB;mn_xH3T3auHjrbmASirqGjs63Om|=ru&$P zAHCSSa@jg{CCO#-TxwlhlgpLcWD1>N+$*UxszrMpLy$K3RnL;tmQEjVw1WM!miNO>HVMURWfod?E? zs!WaMI#X-HNIb3=bs28bB!F31w$|H%ePJ}M_h))Ady%F2t6CM)<|}9(tDMg5ezKN0 zE~1;oN&ggdIdOHML}j@$nLJY_m*1n~n@lc0M8|6~UJfgIuUs3-?!EM2hup064f!&y zNC$0fD3l9>=r*h3Gpe!+^$NN$2LdGJ>l-MU1eHX zGPNR(()BYmE^);uj8hN2mmY}C&&f{6%ge2zmGdDI7^?_S>yLL+vRq|-0QpA8LE{$FSruPU_yz2qMrxz1K!DAA(Y&}-SPo$3T@2y-vOEb+A)e9tr9MMcLRX*8N!wEa&6&9uTzBk~)-UI8im zq!4*wkv-c9Y(IbXi5)jHMyX`7j+K0r)hl_cyvup3WZoneCG|;F^3|}&ioR_3jWpXc z;Lag+T~ss$m(Qk>@`pGaQAfVMTbN4Sg$Bz#R6QL5Q95y83`9D0^+-s?x{RQjtGGL2F}6Q)X%iASO@Uwvd}g zpK#HjZ<**s!=2Y{SX#;a6P~_)6R68-wxvPfPj~|Mwm={t4ywc7PMr(P%+vlxkLfVJ zO-DwaR!gg{^K)q%ql{c_9=Y-1(_Ff)n?FU{deH1yS-y-$(&^!@n@LNLBsrgL%d^C% zRe961pC3`HK~9UimEN?vPbFt^H3bONC}rjSePtT8>n1~u+k2jEzwW`U5S?i7mIhh= zp?6(PI_L(Hax@yR8_dhA4Ru)^mvS*YG4Bu%5!E`0h|i6Ty!{ou6?I*eE&d%2UoS7_ z3L&{k-=65S$-}y}4c$|Gl{i`&g>P%#s`d5zmiDuHvG4_)NriyD0)p>>IIyPMJy9c_ z#G}!rwA@n;jMi*ad3kv(IW3Ldp%0CxXJ%4+#(A&B6?`;oKn>Gb$7Zu9$%|TI%2$Tx z)2Ctc?xpI!Bnj~_sY;bPmH7vBf_Rv4@vu{LVL73~sU@O5m7H_ri2zj{Cq!>HO8%`4 zwMxajsB4S=I@+YuTX@vQlkW8$4@1`!AJgEi3@3`PqwB*sz69-5<5MRd^QMQ7?^gO$ zXLlMccgW{DotRrhT>=4n-#VcpB>=YdGAW7TU{coKp=C6ow#H#^IQ|h14Jirl<5DRd zH|uWG#UwWApKJDJPq^@0HE7C|ZrT)A4r+G5SU^CnPGR0Y18KlxqXoX5Sx)H+e=#Y9D;@q9$y z8gqkI7LJ* z98?`jWe)T4na+ksOm|R>h(?42ARr>Fl;W~9WYghZ9V>gUuIQ~4QCpyrvpHgg& z2LUBP00JV3YI>N-*iDD(gqGGEu3uWix8s$(IdLzOjoEdX(W*(^_Y7$fWWiUl%=Xuj zXYQYigT8x7cO&b?Lh?|gS^#gKGfb`K!f{ZlML@o-BLD%hj+ZEWPfa|m&SLTtovQi< zi6TRGW@7Exdu+}jfx5En%sYY;0gWb!c0e3S!m$iIJc|GXM8fqQv-$9%-qn0##X2rx z4chnrxz2vQh<)F!VJ5g=l`5Tt<8AtA$1K6~f(!^k00JTicE=?ev-wEJ@N(*nHT~6& zTd3i<>{%RBE{$1C{QVL;d^^M9Fz_f40hKC?gkuiPl6W0k=L%E@B1skkB9e-*CmSE_ zJ?eEmOM9eq72n!shYsvX#06oKv-8-f9T(Z<)NB`|h9s;epd{h&_FBjk%JEpwVKqIs z3;+RfqiTJ5)8WaTD|x?5AMKFLEG<*~H`8<2=$#kYtqiO;Gi4*#xk{bJyfmNDEVeh; z$W~Z(FIYv0FJTCX`0B*29L?HutV~uu+aFocySiNw2^jc7N;Vs}>ms|Cg=dw3)xZbG z@6sZ`59mW3?+Ka@BraM45D?K;jeQv#y7kDIh_afG9`{pOw?y2&WE~dzTyhqhu=^53 z#9>4&jEMJvdU>6uPXoS5!hu;;7Gm>MZx9eSuKBO_pHTmolDoNWMeo)ch4gy0AR;cj zIr(P+y}WkN0>Du;9ru_EQ$|t}CIA7EaOGK=^_lyQH_9z<8>A6%M6wHUS8Tvk8kR{xQG(`&_*M)xvs#YF`m$ ztHi9_&B|qy_grD8Xw^E50|@ZehQ@0jZL9nS8y24EfEW>lhZ2B*cwoi9z30DiK|acN zAM;guNu~H1x%q6y-`CilYj{bvsm}P=hQ{kMZK1q`wttyHp96#%+XNDUj0r$MjA?k# zF@3N~w*5xtq-x&sw`vEdy`@lezWvo#$8NLlPTbLLg(ZbN3!X=f<_-;rucj`lX*9NR zMezI}18zkC0^(NHX^&Y`PA&gG`i^dC+hTBSs9jf6*!%mhv$P!AZ5O660p8M3 ztM8I%Y@%(CXHXZ_zlA;E3*VFU92ri|n?=8~rju;^GLG|f> zU1i4|y%oG60V)0JL_>3px~G0-O64pX3_l~K>?5zzB_LG+NzhJ}E_6xcocg!|cHheU zxQ4ItxvDoK$&ocb0O=|UVntQu~_QC&z`lzrSrDxp%qA0CY zRYVlIbOk`+_1eS$SIa<*{3A^_hDt zPyeMU5oOQ%tjOj0G?;ON`lbFPyPWI_7SjYPVyq{L5Kbfj0dXQ+ca55h$*Gjwc9+s~ z{#VUg(X6(=+MD;qi@}_|pT%a=7pnffBeA{VRjN$tjyg|Mrhcc7|NcmOiEI%APvijv z%n^Wqn4^Ph77-vpDZBetR=iTC9az;{5%id^N+qz6v`~7RNSIg#c3v2kXXoeFc}HP(B|nb%?lC(0VffF zfH(;(qAg6||NcBt>&U(AI6sA~bvcc^R%LHRDVpJ>a>X}!lc*K)_3=Awl1ThP?tik_rdbY43lV{U2nj$yL|7@Fl%;Wr$Eu!7&FSZ( zkTvsD%IXBFWR**)rtnuOWHeyUln!XX+@vp6@ulp)6elFS)xZa@WwKoQ z%_1R4qu={%?W61<00JOj2LT9(9qI5JlQm;Ye9|S-E1Oy@g z0TBp1k_7<}00F5JfPhH729bXd009sXhyVmcAn-^Q1V8`;q)q?=BJ~!3mK51f)&?0wVPqME*el1VBI_ z0uT^^z#~}@009t?IspiX)N2s=2LTWO0f7iWKm-DhWI+G~KtSpQARtn&LF69DM7j00JNY0)h~LfCz#ei3(33K|59WOdtm!AbkQ*5$V?dd5|BF?0XxIq8}K){U& zKtSBMa#1u8009tiCIJYDGwC935C8!XaAN`x5I3$|6b%GG00f*#00QDnx`-PDKmY{X zm;eOCjVl*L0|5{K0cR3`fH;#b;syZ_00B2900D91%0!3m zz>Nt&K-{=;Q8W+$0T6H|0SJgQ=^}0r009tiV*(HmH?CY14Fo^{1e{3#0^&@%h#Lez z00i8a00hL1D;Grr0T2KIXA*#bIFl~o1_2NN0XHT90deEXMbSV21VF%<1Rx;Jq>H#g z00cn5jR`0T6Iw0uT^4u3Qui1V8`;oJjxz;!L`T z+j;^C+NstDLI@B50l^4BMFc~Sgh2oVKtS3AARyANKja<+KmY^;BLD#r3_TJC0T2KI zX%m2eNW1=!dk_Et5D<(21Vk|ONEie_00g8>00JWI`a|wP00cllFai(|!O$aN5C8!X zkTwAbh_veuxd#Cd00F@WKtKdTkAy)01VBLA1Rx;Nu0P}+1V8`;1S0?e5ez*N1_2NN z0cjI}fJnRkkb4jS0T2+300cxZ^hg*4KmY`!O#lKS?fOIRK>!3mKrjLj5W&zRVGsZT z5Rf(j2#B=n54i^c5C8$e2tYstLyv?(00cll+5{jV(yl+`9t1!D1Oy`h0TB#65(WVf z00C(efPhH5{*ZeR00D^-nB6gcvqZCAINMN>!aQ&W0w4eaAYdT@2#AHC;0**o00cmw zFaZciVIDXG0T2KI5U`K{1jIs6@CE`P00JOTm;eN%Fb|x800@8p2v|q}0%9R3cmn|t z009svOaKB>m 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + fi + + debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" + debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" + debDate="$(date --rfc-2822)" + + # if go-md2man is available, pre-generate the man pages + make manpages + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-deb:$version" + if ! docker inspect "$image" &> /dev/null; then + ( + # Add the APT_MIRROR args only if the consuming Dockerfile uses it + # Otherwise this will cause the build to fail + if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then + DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR" + fi + set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" + ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + WORKDIR /usr/src/docker + COPY . /usr/src/docker + ENV DOCKER_GITCOMMIT $GITCOMMIT + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \ + && ln -snf /usr/src/docker /go/src/github.com/docker/docker + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN cp -aL hack/make/.build-deb debian + RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog + RUN dpkg-buildpackage -uc -us -I.git + EOF + tempImage="docker-temp/build-deb:$version" + ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) + docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/build-integration-test-binary b/vendor/github.com/moby/moby/hack/make/build-integration-test-binary new file mode 100644 index 0000000..2039be4 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/build-integration-test-binary @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +rm -rf "$DEST" +DEST="$DEST/../test-integration-cli" + +if [ -z $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then + source ${MAKEDIR}/.integration-test-helpers + ensure_test_dir integration-cli "$DEST/test.main" + export DOCKER_INTEGRATION_TESTS_VERIFIED=1 +fi diff --git a/vendor/github.com/moby/moby/hack/make/build-rpm b/vendor/github.com/moby/moby/hack/make/build-rpm new file mode 100644 index 0000000..7fec059 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/build-rpm @@ -0,0 +1,148 @@ +#!/bin/bash +set -e + +# subshell so that we can export PATH and TZ without breaking other things +( + export TZ=UTC # make sure our "date" variables are UTC-based + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" + + # TODO consider using frozen images for the dockercore/builder-rpm tags + + rpmName=docker-engine + rpmVersion="$VERSION" + rpmRelease=1 + + # rpmRelease versioning is as follows + # Docker 1.7.0: version=1.7.0, release=1 + # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 + # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 + # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 + # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH + + # if we have a "-rc*" suffix, set appropriate release + if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then + rcVersion=${rpmVersion#*-rc} + rpmVersion=${rpmVersion%-rc*} + rpmRelease="0.${rcVersion}.rc${rcVersion}" + fi + + DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" + fi + + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="${gitDate}.git${gitCommit}" + # gitVersion is now something like '20150128.112847.17e840a' + rpmVersion="${rpmVersion%-dev}" + rpmRelease="0.0.$gitVersion" + fi + + # Replace any other dashes with periods + rpmVersion="${rpmVersion/-/.}" + + rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" + rpmDate="$(date +'%a %b %d %Y')" + + # if go-md2man is available, pre-generate the man pages + make manpages + + # Convert the CHANGELOG.md file into RPM changelog format + VERSION_REGEX="^\W\W (.*) \((.*)\)$" + ENTRY_REGEX="^[-+*] (.*)$" + while read -r line || [[ -n "$line" ]]; do + if [ -z "$line" ]; then continue; fi + if [[ "$line" =~ $VERSION_REGEX ]]; then + echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + if [[ "$line" =~ $ENTRY_REGEX ]]; then + echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + done < CHANGELOG.md + + builderDir="contrib/builder/rpm/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-rpm:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + COPY . /usr/src/${rpmName} + WORKDIR /usr/src/${rpmName} + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN mkdir -p /root/rpmbuild/SOURCES \ + && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros + WORKDIR /root/rpmbuild + RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS + WORKDIR /root/rpmbuild/SPECS + RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName} + RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd + RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy + RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc + RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini + RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar + RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ + ${rpmName}.spec + EOF + # selinux policy referencing systemd things won't work on non-systemd versions + # of centos or rhel, which we don't support anyways + if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then + selinuxDir="selinux" + if [ -d "./contrib/selinux-$version" ]; then + selinuxDir="selinux-${version}" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + ${rpmName}-selinux.spec + EOF + fi + tempImage="docker-temp/build-rpm:$version" + ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) + docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" +) 2>&1 | tee -a $DEST/test.log diff --git a/vendor/github.com/moby/moby/hack/make/clean-apt-repo b/vendor/github.com/moby/moby/hack/make/clean-apt-repo new file mode 100755 index 0000000..1c37d98 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/clean-apt-repo @@ -0,0 +1,43 @@ +#!/bin/bash +set -e + +# This script cleans the experimental pool for the apt repo. +# This is useful when there are a lot of old experimental debs and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental +: ${DOCKER_ARCHIVE_DIR:=$DEST/archive} +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }') + +# get the latest version +latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine) +latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*}) + +echo "latest docker-engine version: $latest_docker_engine_version" + +# remove all the files that are not that version in experimental +pool_dir=$(dirname "$latest_docker_engine_file") +old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") ) + +echo "${old_pkgs[@]}" + +mkdir -p "$DOCKER_ARCHIVE_DIR" +for old_pkg in "${old_pkgs[@]}"; do + echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR" + mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR" +done + +echo +echo "$pool_dir now has contents:" +ls "$pool_dir" + +# now regenerate release files for experimental +export COMPONENT=experimental +source "${DIR}/update-apt-repo" + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/moby/moby/hack/make/clean-yum-repo b/vendor/github.com/moby/moby/hack/make/clean-yum-repo new file mode 100755 index 0000000..1cafbbd --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/clean-yum-repo @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +# This script cleans the experimental pool for the yum repo. +# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental + +suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) ) + +for suite in "${suites[@]}"; do + echo "cleanup in: $suite" + ( set -x; repomanage -k2 --old "$suite" | xargs rm -f ) +done + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/moby/moby/hack/make/cover b/vendor/github.com/moby/moby/hack/make/cover new file mode 100644 index 0000000..08e28e3 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/cover @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +bundle_cover() { + coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) + for p in "${coverprofiles[@]}"; do + echo + ( + set -x + go tool cover -func="$p" + ) + done +} + +bundle_cover 2>&1 | tee "$DEST/report.log" diff --git a/vendor/github.com/moby/moby/hack/make/cross b/vendor/github.com/moby/moby/hack/make/cross new file mode 100644 index 0000000..6d672b1 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/cross @@ -0,0 +1,46 @@ +#!/bin/bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) + +# if we have our linux/amd64 version compiled, let's symlink it in +if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then + arch=$(go env GOHOSTARCH) + mkdir -p "$DEST/linux/${arch}" + ( + cd "$DEST/linux/${arch}" + ln -s ../../../binary-daemon/* ./ + ln -s ../../../binary-client/* ./ + ) + echo "Created symlinks:" "$DEST/linux/${arch}/"* +fi + +for platform in $DOCKER_CROSSPLATFORMS; do + ( + export KEEPDEST=1 + export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + + if [ "$GOOS" != "solaris" ]; then + # TODO. Solaris cannot be cross build because of CGO calls. + if [ -z "${daemonSupporting[$platform]}" ]; then + # we just need a simple client for these platforms + export LDFLAGS_STATIC_DOCKER="" + # remove the "daemon" build tag from platforms that aren't supported + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) + source "${MAKEDIR}/binary-client" + else + source "${MAKEDIR}/binary-client" + source "${MAKEDIR}/binary-daemon" + fi + fi + ) +done diff --git a/vendor/github.com/moby/moby/hack/make/dynbinary b/vendor/github.com/moby/moby/hack/make/dynbinary new file mode 100644 index 0000000..1a435dc --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/dynbinary @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +# This script exists as backwards compatibility for CI +( + DEST="${DEST}-client" + ABS_DEST="${ABS_DEST}-client" + . hack/make/dynbinary-client +) +( + + DEST="${DEST}-daemon" + ABS_DEST="${ABS_DEST}-daemon" + . hack/make/dynbinary-daemon +) diff --git a/vendor/github.com/moby/moby/hack/make/dynbinary-client b/vendor/github.com/moby/moby/hack/make/dynbinary-client new file mode 100644 index 0000000..e4b7741 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/dynbinary-client @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +( + export BINARY_SHORT_NAME='docker' + export GO_PACKAGE='github.com/docker/docker/cmd/docker' + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/moby/moby/hack/make/dynbinary-daemon b/vendor/github.com/moby/moby/hack/make/dynbinary-daemon new file mode 100644 index 0000000..090a916 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/dynbinary-daemon @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +( + export BINARY_SHORT_NAME='dockerd' + export GO_PACKAGE='github.com/docker/docker/cmd/dockerd' + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/moby/moby/hack/make/generate-index-listing b/vendor/github.com/moby/moby/hack/make/generate-index-listing new file mode 100755 index 0000000..ec44171 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/generate-index-listing @@ -0,0 +1,74 @@ +#!/bin/bash +set -e + +# This script generates index files for the directory structure +# of the apt and yum repos + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt +YUMDIR=$DOCKER_RELEASE_DIR/yum + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before generate-index-listing' + exit 1 +fi + +create_index() { + local directory=$1 + local original=$2 + local cleaned=${directory#$original} + + # the index file to create + local index_file="${directory}/index" + + # cd into dir & touch the index file + cd $directory + touch $index_file + + # print the html header + cat <<-EOF > "$index_file" + + + Index of ${cleaned}/ + +

Index of ${cleaned}/


+
../
+	EOF
+
+	# start of content output
+	(
+	# change IFS locally within subshell so the for loop saves line correctly to L var
+	IFS=$'\n';
+
+	# pretty sweet, will mimick the normal apache output. skipping "index" and hidden files
+	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g');
+	do
+		# file
+		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
+
+		# file with file size
+		F=$(du -bh $F | cut -f1);
+
+		# output with correct format
+		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
+	done;
+	) >> $index_file;
+
+	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
+	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file
+
+	# print the footer html
+	echo "

" >> $index_file + +} + +get_dirs() { + local directory=$1 + + for d in `find ${directory} -type d`; do + create_index $d $directory + done +} + +get_dirs $APTDIR +get_dirs $YUMDIR diff --git a/vendor/github.com/moby/moby/hack/make/install-binary b/vendor/github.com/moby/moby/hack/make/install-binary new file mode 100644 index 0000000..82cbc79 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/install-binary @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + source "${MAKEDIR}/install-binary-client" +) + +( + source "${MAKEDIR}/install-binary-daemon" +) diff --git a/vendor/github.com/moby/moby/hack/make/install-binary-client b/vendor/github.com/moby/moby/hack/make/install-binary-client new file mode 100644 index 0000000..6c80452 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/install-binary-client @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-client" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_CLIENT_BINARY_NAME}" +) diff --git a/vendor/github.com/moby/moby/hack/make/install-binary-daemon b/vendor/github.com/moby/moby/hack/make/install-binary-daemon new file mode 100644 index 0000000..08a2d69 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/install-binary-daemon @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-daemon" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}" +) diff --git a/vendor/github.com/moby/moby/hack/make/install-script b/vendor/github.com/moby/moby/hack/make/install-script new file mode 100644 index 0000000..feadac2 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/install-script @@ -0,0 +1,63 @@ +#!/bin/bash +set -e + +# This script modifies the install.sh script for domains and keys other than +# those used by the primary opensource releases. +# +# You can provide `url`, `yum_url`, `apt_url` and optionally `gpg_fingerprint` +# or `GPG_KEYID` as environment variables, or the defaults for open source are used. +# +# The lower-case variables are substituted into install.sh. +# +# gpg_fingerprint and GPG_KEYID are optional, defaulting to the opensource release +# key ("releasedocker"). Other GPG_KEYIDs will require you to mount a volume with +# the correct contents to /root/.gnupg. +# +# It outputs the modified `install.sh` file to $DOCKER_RELEASE_DIR (default: $DEST) +# +# Example usage: +# +# docker run \ +# --rm \ +# --privileged \ +# -e "GPG_KEYID=deadbeef" \ +# -e "GNUPGHOME=/root/.gnupg" \ +# -v $HOME/.gnupg:/root/.gnupg \ +# -v $(pwd):/go/src/github.com/docker/docker/bundles \ +# "$IMAGE_DOCKER" \ +# hack/make.sh install-script + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} + +DEFAULT_URL="https://get.docker.com/" +DEFAULT_APT_URL="https://apt.dockerproject.org" +DEFAULT_YUM_URL="https://yum.dockerproject.org" +DEFAULT_GPG_FINGERPRINT="58118E89F3A912897C070ADBF76221572C52609D" + +: ${url:=$DEFAULT_URL} +: ${apt_url:=$DEFAULT_APT_URL} +: ${yum_url:=$DEFAULT_YUM_URL} +if [[ "$GPG_KEYID" == "releasedocker" ]] ; then + : ${gpg_fingerprint:=$DEFAULT_GPG_FINGERPRINT} +fi + +DEST_FILE="$DOCKER_RELEASE_DIR/install.sh" + +bundle_install_script() { + mkdir -p "$DOCKER_RELEASE_DIR" + + if [[ -z "$gpg_fingerprint" ]] ; then + # NOTE: if no key matching key is in /root/.gnupg, this will fail + gpg_fingerprint=$(gpg --with-fingerprint -k "$GPG_KEYID" | grep "Key fingerprint" | awk -F "=" '{print $2};' | tr -d ' ') + fi + + cp hack/install.sh "$DEST_FILE" + sed -i.bak 's#^url=".*"$#url="'"$url"'"#' "$DEST_FILE" + sed -i.bak 's#^apt_url=".*"$#apt_url="'"$apt_url"'"#' "$DEST_FILE" + sed -i.bak 's#^yum_url=".*"$#yum_url="'"$yum_url"'"#' "$DEST_FILE" + sed -i.bak 's#^gpg_fingerprint=".*"$#gpg_fingerprint="'"$gpg_fingerprint"'"#' "$DEST_FILE" + rm "${DEST_FILE}.bak" +} + +bundle_install_script diff --git a/vendor/github.com/moby/moby/hack/make/release-deb b/vendor/github.com/moby/moby/hack/make/release-deb new file mode 100755 index 0000000..ed65fe2 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/release-deb @@ -0,0 +1,163 @@ +#!/bin/bash +set -e + +# This script creates the apt repos for the .deb files generated by hack/make/build-deb +# +# The following can then be used as apt sources: +# deb http://apt.dockerproject.org/repo $distro-$release $version +# +# For example: +# deb http://apt.dockerproject.org/repo ubuntu-trusty main +# deb http://apt.dockerproject.org/repo ubuntu-trusty testing +# deb http://apt.dockerproject.org/repo debian-wheezy experimental +# deb http://apt.dockerproject.org/repo debian-jessie main +# +# ... and so on and so forth for the builds created by hack/make/build-deb + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# setup the apt repo (if it does not exist) +mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists" + +# supported arches/sections +arches=( amd64 i386 armhf ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit) + if [ -n "$exists" ] ; then + components+=( $component ) + fi +done + +# set the component for the version being released +component="main" + +if [[ "$VERSION" == *-rc* ]]; then + component="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + component="experimental" +fi + +# Make sure our component is in the list of components +if [[ ! "${components[*]}" =~ $component ]] ; then + components+=( $component ) +fi + +# create apt-ftparchive file on every run. This is essential to avoid +# using stale versions of the config file that could cause unnecessary +# refreshing of bits for EOL-ed releases. +cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" +Dir { + ArchiveDir "${APTDIR}"; + CacheDir "${APTDIR}/db"; +}; + +Default { + Packages::Compress ". gzip bzip2"; + Sources::Compress ". gzip bzip2"; + Contents::Compress ". gzip bzip2"; +}; + +TreeDefault { + BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; + Directory "pool/\$(SECTION)"; + Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; + SrcDirectory "pool/\$(SECTION)"; + Sources "\$(DIST)/\$(SECTION)/source/Sources"; + Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; + FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; +}; +EOF + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + suite="${version//debootstrap-}" + + cat <<-EOF + Tree "dists/${suite}" { + Sections "${components[*]}"; + Architectures "${arches[*]}"; + } + + EOF +done >> "$APTDIR/conf/apt-ftparchive.conf" + +cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" +APT::FTPArchive::Release::Origin "Docker"; +APT::FTPArchive::Release::Components "${components[*]}"; +APT::FTPArchive::Release::Label "Docker APT Repository"; +APT::FTPArchive::Release::Architectures "${arches[*]}"; +EOF + +# release the debs +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)" + DEBFILE=( "$dir/docker-engine"*.deb ) + + # add the deb for each component for the distro version into the + # pool (if it is not there already) + mkdir -p "$APTDIR/pool/$component/d/docker-engine/" + for deb in ${DEBFILE[@]}; do + d=$(basename "$deb") + # We do not want to generate a new deb if it has already been + # copied into the APTDIR + if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then + cp "$deb" "$tempdir/" + # if we have a $GPG_PASSPHRASE we may as well + # dpkg-sign before copying the deb into the pool + if [ ! -z "$GPG_PASSPHRASE" ]; then + dpkg-sig -g "--no-tty --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \ + -k "$GPG_KEYID" --sign builder "$tempdir/$d" + fi + mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/" + fi + done + + rm -rf "$tempdir" + + # build the right directory structure, needed for apt-ftparchive + for arch in "${arches[@]}"; do + for c in "${components[@]}"; do + mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch" + done + done + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename}*.deb -o \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Components=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done +done diff --git a/vendor/github.com/moby/moby/hack/make/release-rpm b/vendor/github.com/moby/moby/hack/make/release-rpm new file mode 100755 index 0000000..d7e3ec4 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/release-rpm @@ -0,0 +1,71 @@ +#!/bin/bash +set -e + +# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm +# +# The following can then be used as a yum repo: +# http://yum.dockerproject.org/repo/$release/$distro/$distro-version +# +# For example: +# http://yum.dockerproject.org/repo/main/fedora/23 +# http://yum.dockerproject.org/repo/testing/centos/7 +# http://yum.dockerproject.org/repo/experimental/fedora/23 +# http://yum.dockerproject.org/repo/main/centos/7 +# +# ... and so on and so forth for the builds created by hack/make/build-rpm + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo +: ${GPG_KEYID:=releasedocker} + +# get the release +release="main" + +if [[ "$VERSION" == *-rc* ]]; then + release="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + release="experimental" +fi + +# Setup the yum repo +for dir in bundles/$VERSION/build-rpm/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + distro="${version%-*}" + + REPO=$YUMDIR/$release/$distro + + # if the directory does not exist, initialize the yum repo + if [[ ! -d $REPO/$suite/Packages ]]; then + mkdir -p "$REPO/$suite/Packages" + + createrepo --pretty "$REPO/$suite" + fi + + # path to rpms + RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) + + # if we have a $GPG_PASSPHRASE we may as well + # sign the rpms before adding to repo + if [ ! -z $GPG_PASSPHRASE ]; then + # export our key to rpm import + gpg --armor --export "$GPG_KEYID" > /tmp/gpg + rpm --import /tmp/gpg + + # sign the rpms + echo "yes" | setsid rpm \ + --define "_gpg_name $GPG_KEYID" \ + --define "_signature gpg" \ + --define "__gpg_check_password_cmd /bin/true" \ + --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ + --resign "${RPMFILE[@]}" + fi + + # copy the rpms to the packages folder + cp "${RPMFILE[@]}" "$REPO/$suite/Packages" + + # update the repo + createrepo --pretty --update "$REPO/$suite" +done diff --git a/vendor/github.com/moby/moby/hack/make/run b/vendor/github.com/moby/moby/hack/make/run new file mode 100644 index 0000000..37cfd53 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/run @@ -0,0 +1,44 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before run' + false +fi + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + + +listen_port=2375 +if [ -n "$DOCKER_PORT" ]; then + IFS=':' read -r -a ports <<< "$DOCKER_PORT" + listen_port="${ports[-1]}" +fi + +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +args="--debug \ + --host tcp://0.0.0.0:${listen_port} --host unix:///var/run/docker.sock \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params" + +echo dockerd $args +exec dockerd $args diff --git a/vendor/github.com/moby/moby/hack/make/sign-repos b/vendor/github.com/moby/moby/hack/make/sign-repos new file mode 100755 index 0000000..6ed1606 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/sign-repos @@ -0,0 +1,65 @@ +#!/bin/bash + +# This script signs the deliverables from release-deb and release-rpm +# with a designated GPG key. + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo + +if [ -z "$GPG_PASSPHRASE" ]; then + echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' + exit 1 +fi + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before sign-repos' + exit 1 +fi + +sign_packages(){ + # sign apt repo metadata + if [ -d $APTDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" + + # sign the repo metadata + for F in $(find $APTDIR -name Release); do + if test "$F" -nt "$F.gpg" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.gpg" "$F" + fi + inRelease="$(dirname "$F")/InRelease" + if test "$F" -nt "$inRelease" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --clearsign \ + --batch --yes \ + --output "$inRelease" "$F" + fi + done + fi + + # sign yum repo metadata + if [ -d $YUMDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" + + # sign the repo metadata + for F in $(find $YUMDIR -name repomd.xml); do + if test "$F" -nt "$F.asc" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.asc" "$F" + fi + done + fi +} + +sign_packages diff --git a/vendor/github.com/moby/moby/hack/make/test-deb-install b/vendor/github.com/moby/moby/hack/make/test-deb-install new file mode 100755 index 0000000..aec5847 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-deb-install @@ -0,0 +1,71 @@ +#!/bin/bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +: ${DEB_DIR:="$(pwd)/bundles/$(cat VERSION)/build-deb"} + +if [[ ! -d "${DEB_DIR}" ]]; then + echo "you must first run `make deb` or hack/make/build-deb" + exit 1 +fi + +test_deb_install(){ + # test for each Dockerfile in contrib/builder + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + local dir=$(basename "$dir") + + if [[ ! -d "${DEB_DIR}/${dir}" ]]; then + echo "No deb found for ${dir}" + exit 1 + fi + + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + cat <<-EOF > "${script}" + #!/bin/bash + set -e + set -x + + apt-get update && apt-get install -y apparmor + + dpkg -i /root/debs/*.deb || true + + apt-get install -yf + + /etc/init.d/apparmor start + + # this will do everything _except_ load the profile into the kernel + ( + cd /etc/apparmor.d + /sbin/apparmor_parser --skip-kernel-load docker-engine + ) + EOF + + chmod +x "${script}" + + echo "testing deb install for ${from}" + docker run --rm -i --privileged \ + -v ${DEB_DIR}/${dir}:/root/debs \ + -v ${script}:/install.sh \ + ${from} /install.sh + + rm -f ${script} + done +} + +( + bundle .integration-daemon-start + test_deb_install + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/test-docker-py b/vendor/github.com/moby/moby/hack/make/test-docker-py new file mode 100644 index 0000000..fcacc16 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-docker-py @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + dockerPy='/docker-py' + [ -d "$dockerPy" ] || { + dockerPy="$DEST/docker-py" + git clone https://github.com/docker/docker-py.git "$dockerPy" + } + + # exporting PYTHONPATH to import "docker" from our local docker-py + test_env PYTHONPATH="$dockerPy" py.test --junitxml="$DEST/results.xml" "$dockerPy/tests/integration" + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/test-install-script b/vendor/github.com/moby/moby/hack/make/test-install-script new file mode 100755 index 0000000..4782cbe --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-install-script @@ -0,0 +1,31 @@ +#!/bin/bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +test_install_script(){ + # these are equivalent to main, testing, experimental components + # in the repos, but its the url that will do the conversion + components=( experimental test get ) + + for component in "${components[@]}"; do + # change url to specific component for testing + local test_url=https://${component}.docker.com + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + sed "s,url='https://get.docker.com/',url='${test_url}/'," hack/install.sh > "${script}" + + chmod +x "${script}" + + # test for each Dockerfile in contrib/builder + for dir in contrib/builder/*/*/; do + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + + echo "running install.sh for ${component} with ${from}" + docker run --rm -i -v ${script}:/install.sh ${from} /install.sh + done + + rm -f ${script} + done +} + +test_install_script diff --git a/vendor/github.com/moby/moby/hack/make/test-integration-cli b/vendor/github.com/moby/moby/hack/make/test-integration-cli new file mode 100755 index 0000000..689a528 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-integration-cli @@ -0,0 +1,28 @@ +#!/bin/bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + bundle .integration-daemon-setup + + bundle_test_integration_cli + + bundle .integration-daemon-stop + + if [ "$(go env GOOS)" != 'windows' ] + then + leftovers=$(ps -ax -o pid,cmd | awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration-cli/ { print $1 }') + if [ -n "$leftovers" ] + then + ps aux + kill -9 $leftovers 2> /dev/null + echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" + exit 1 + fi + fi + +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/test-integration-shell b/vendor/github.com/moby/moby/hack/make/test-integration-shell new file mode 100644 index 0000000..86df965 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-integration-shell @@ -0,0 +1,7 @@ +#!/bin/bash + +bundle .integration-daemon-start +bundle .integration-daemon-setup + +export ABS_DEST +bash +e diff --git a/vendor/github.com/moby/moby/hack/make/test-old-apt-repo b/vendor/github.com/moby/moby/hack/make/test-old-apt-repo new file mode 100755 index 0000000..bb20128 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-old-apt-repo @@ -0,0 +1,29 @@ +#!/bin/bash +set -e + +versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) + +install() { + local version=$1 + local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) + local dockerfile="${tmpdir}/Dockerfile" + cat <<-EOF > "$dockerfile" + FROM debian:jessie + ENV VERSION ${version} + RUN apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + --no-install-recommends + RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list + RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ + --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + RUN apt-get update && apt-get install -y \ + lxc-docker-\${VERSION} + EOF + + docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir +} + +for v in "${versions[@]}"; do + install "$v" +done diff --git a/vendor/github.com/moby/moby/hack/make/test-unit b/vendor/github.com/moby/moby/hack/make/test-unit new file mode 100644 index 0000000..f263345 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-unit @@ -0,0 +1,55 @@ +#!/bin/bash +set -e + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + TESTFLAGS+=" -test.timeout=${TIMEOUT}" + INCBUILD="-i" + count=0 + for flag in "${BUILDFLAGS[@]}"; do + if [ "${flag}" == ${INCBUILD} ]; then + unset BUILDFLAGS[${count}] + break + fi + count=$[ ${count} + 1 ] + done + + date + if [ -z "$TESTDIRS" ]; then + TEST_PATH=./... + else + TEST_PATH=./${TESTDIRS} + fi + + if [ "$(go env GOHOSTOS)" = 'solaris' ]; then + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/daemon/graphdriver \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + else + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + fi + + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list +} + +bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/tgz b/vendor/github.com/moby/moby/hack/make/tgz new file mode 100644 index 0000000..3ccd93f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/tgz @@ -0,0 +1,92 @@ +#!/bin/bash + +CROSS="$DEST/../cross" + +set -e + +arch=$(go env GOHOSTARCH) +if [ ! -d "$CROSS/linux/${arch}" ]; then + echo >&2 'error: binary and cross must be run before tgz' + false +fi + +( +for d in "$CROSS/"*/*; do + export GOARCH="$(basename "$d")" + export GOOS="$(basename "$(dirname "$d")")" + + source "${MAKEDIR}/.binary-setup" + + BINARY_NAME="${DOCKER_CLIENT_BINARY_NAME}-$VERSION" + DAEMON_BINARY_NAME="${DOCKER_DAEMON_BINARY_NAME}-$VERSION" + PROXY_BINARY_NAME="${DOCKER_PROXY_BINARY_NAME}-$VERSION" + BINARY_EXTENSION="$(export GOOS && binary_extension)" + if [ "$GOOS" = 'windows' ]; then + # if windows use a zip, not tgz + BUNDLE_EXTENSION=".zip" + IS_TAR="false" + elif [ "$GOOS" == "solaris" ]; then + # Solaris bypasses cross due to CGO issues. + continue + else + BUNDLE_EXTENSION=".tgz" + IS_TAR="true" + fi + BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + DAEMON_BINARY_FULLNAME="$DAEMON_BINARY_NAME$BINARY_EXTENSION" + PROXY_BINARY_FULLNAME="$PROXY_BINARY_NAME$BINARY_EXTENSION" + mkdir -p "$DEST/$GOOS/$GOARCH" + TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME$BUNDLE_EXTENSION" + + # The staging directory for the files in the tgz + BUILD_PATH="$DEST/build" + + # The directory that is at the root of the tar file + TAR_BASE_DIRECTORY="docker" + + # $DEST/build/docker + TAR_PATH="$BUILD_PATH/$TAR_BASE_DIRECTORY" + + # Copy the correct docker binary + mkdir -p $TAR_PATH + cp -L "$d/$BINARY_FULLNAME" "$TAR_PATH/${DOCKER_CLIENT_BINARY_NAME}${BINARY_EXTENSION}" + if [ -f "$d/$DAEMON_BINARY_FULLNAME" ]; then + cp -L "$d/$DAEMON_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_DAEMON_BINARY_NAME}${BINARY_EXTENSION}" + fi + if [ -f "$d/$PROXY_BINARY_FULLNAME" ]; then + cp -L "$d/$PROXY_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_PROXY_BINARY_NAME}${BINARY_EXTENSION}" + fi + + # copy over all the extra binaries + copy_binaries $TAR_PATH + + # add completions + for s in bash fish zsh; do + mkdir -p $TAR_PATH/completion/$s + cp -L contrib/completion/$s/*docker* $TAR_PATH/completion/$s/ + done + + if [ "$IS_TAR" == "true" ]; then + echo "Creating tgz from $BUILD_PATH and naming it $TGZ" + tar --numeric-owner --owner 0 -C "$BUILD_PATH" -czf "$TGZ" $TAR_BASE_DIRECTORY + else + # ZIP needs to full absolute dir path, not the absolute path + ZIP=`pwd`"/$TGZ" + # keep track of where we are, for later. + pushd . + # go into the BUILD_PATH since zip does not have a -C equivalent. + cd $BUILD_PATH + echo "Creating zip from $BUILD_PATH and naming it $ZIP" + zip -q -r $ZIP $TAR_BASE_DIRECTORY + # go back to where we started + popd + fi + + hash_files "$TGZ" + + # cleanup after ourselves + rm -rf "$BUILD_PATH" + + echo "Created tgz: $TGZ" +done +) diff --git a/vendor/github.com/moby/moby/hack/make/ubuntu b/vendor/github.com/moby/moby/hack/make/ubuntu new file mode 100644 index 0000000..8de5d9c --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/ubuntu @@ -0,0 +1,190 @@ +#!/bin/bash + +PKGVERSION="${VERSION//-/'~'}" +# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + GIT_UNIX="$(git log -1 --pretty='%at')" + GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" + GIT_COMMIT="$(git log -1 --pretty='%h')" + GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" + # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' + PKGVERSION="$PKGVERSION~$GIT_VERSION" +fi + +# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false +# true + +# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="https://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" +PACKAGE_DESCRIPTION="Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." +PACKAGE_LICENSE="Apache-2.0" + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + DIR="$ABS_DEST/build" + + # Include our udev rules + mkdir -p "$DIR/etc/udev/rules.d" + cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" + + # Include our init scripts + mkdir -p "$DIR/etc/init" + cp contrib/init/upstart/docker.conf "$DIR/etc/init/" + mkdir -p "$DIR/etc/init.d" + cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" + mkdir -p "$DIR/etc/default" + cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" + mkdir -p "$DIR/lib/systemd/system" + cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" + + # Include contributed completions + mkdir -p "$DIR/etc/bash_completion.d" + cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" + mkdir -p "$DIR/usr/share/zsh/vendor-completions" + cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" + mkdir -p "$DIR/etc/fish/completions" + cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" + + # Include man pages + make manpages + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done + + # Copy the binary + # This will fail if the binary bundle hasn't been built + mkdir -p "$DIR/usr/bin" + cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" + + # Generate postinst/prerm/postrm scripts + cat > "$DEST/postinst" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi + +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi +if [ -n "$2" ]; then + _dh_action=restart +else + _dh_action=start +fi +service docker $_dh_action 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/prerm" <<'EOF' +#!/bin/sh +set -e +set -u + +service docker stop 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/postrm" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = "purge" ] ; then + update-rc.d docker remove > /dev/null || true +fi + +# In case this system is running systemd, we make systemd reload the unit files +# to pick up changes. +if [ -d /run/systemd/system ] ; then + systemctl --system daemon-reload > /dev/null || true +fi + +#DEBHELPER# +EOF + # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way + chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + + ( + # switch directories so we create *.deb in the right folder + cd "$DEST" + + # create lxc-docker-VERSION package + fpm -s dir -C "$DIR" \ + --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ + --after-install "$ABS_DEST/postinst" \ + --before-remove "$ABS_DEST/prerm" \ + --after-remove "$ABS_DEST/postrm" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ + --deb-suggests apparmor \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package + fpm -s empty \ + --name lxc-docker --version "$PKGVERSION" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb + ) + + # clean up after ourselves so we have a clean output directory + rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + rm -r "$DIR" +} + +bundle_ubuntu diff --git a/vendor/github.com/moby/moby/hack/make/update-apt-repo b/vendor/github.com/moby/moby/hack/make/update-apt-repo new file mode 100755 index 0000000..7354a2e --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/update-apt-repo @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo. +# This script is a "fix all" for any sort of problems that might have occurred with +# the Release or Package files in the repo. +# It should only be used in the rare case of extreme emergencies to regenerate +# Release and Package files for the apt repo. +# +# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running +# this script. + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# supported arches/sections +arches=( amd64 i386 ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then + components+=( $component ) + fi +done + +dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) ) + +# override component if it is set +if [ "$COMPONENT" ]; then + components=( $COMPONENT ) +fi + +# release the debs +for version in "${dists[@]}"; do + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" + done +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dist in "${dists[@]}"; do + version=$(basename "$dist") + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Component=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done + done +done diff --git a/vendor/github.com/moby/moby/hack/make/win b/vendor/github.com/moby/moby/hack/make/win new file mode 100644 index 0000000..f9f4111 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/win @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) +platform="windows/amd64" +export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION +mkdir -p "$DEST" +ABS_DEST="$(cd "$DEST" && pwd -P)" +export GOOS=${platform%/*} +export GOARCH=${platform##*/} +if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported +fi +source "${MAKEDIR}/binary" diff --git a/vendor/github.com/moby/moby/hack/make/yaml-docs-generator b/vendor/github.com/moby/moby/hack/make/yaml-docs-generator new file mode 100644 index 0000000..8548dee --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/yaml-docs-generator @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -e + +[ -z "$KEEPDEST" ] && \ + rm -rf "$DEST" + +( + source "${MAKEDIR}/.binary-setup" + export BINARY_SHORT_NAME="yaml-docs-generator" + export GO_PACKAGE='github.com/docker/docker/docs/yaml' + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/moby/moby/hack/release.sh b/vendor/github.com/moby/moby/hack/release.sh new file mode 100755 index 0000000..4b02053 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/release.sh @@ -0,0 +1,325 @@ +#!/usr/bin/env bash +set -e + +# This script looks for bundles built by make.sh, and releases them on a +# public S3 bucket. +# +# Bundles should be available for the VERSION string passed as argument. +# +# The correct way to call this script is inside a container built by the +# official Dockerfile at the root of the Docker source code. The Dockerfile, +# make.sh and release.sh should all be from the same source code revision. + +set -o pipefail + +# Print a usage message and exit. +usage() { + cat >&2 <<'EOF' +To run, I need: +- to be in a container generated by the Dockerfile at the top of the Docker + repository; +- to be provided with the location of an S3 bucket and path, in + environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); +- to be provided with AWS credentials for this S3 bucket, in environment + variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; +- a generous amount of good will and nice manners. +The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" + +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -it --privileged \ + docker ./hack/release.sh +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage +[ "$AWS_ACCESS_KEY_ID" ] || usage +[ "$AWS_SECRET_ACCESS_KEY" ] || usage +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker +[ -x hack/make.sh ] || usage + +export AWS_DEFAULT_REGION +: ${AWS_DEFAULT_REGION:=us-west-1} + +AWS_CLI=${AWS_CLI:-'aws'} + +RELEASE_BUNDLES=( + binary + cross + tgz +) + +if [ "$1" != '--release-regardless-of-test-failure' ]; then + RELEASE_BUNDLES=( + test-unit + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) +fi + +VERSION=$(< VERSION) +BUCKET=$AWS_S3_BUCKET +BUCKET_PATH=$BUCKET +[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH + +if command -v git &> /dev/null && git rev-parse &> /dev/null; then + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + echo "You cannot run the release script on a repo with uncommitted changes" + usage + fi +fi + +# These are the 2 keys we've used to sign the deb's +# release (get.docker.com) +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.com) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + +setup_s3() { + echo "Setting up S3" + # Try creating the bucket. Ignore errors (it might already exist). + $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true + # Check access to the bucket. + $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null + # Make the bucket accessible through website endpoints. + $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET" +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > "$F" + $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" + rm -f "$F" +} + +s3_url() { + case "$BUCKET" in + get.docker.com|test.docker.com|experimental.docker.com) + echo "https://$BUCKET_PATH" + ;; + *) + BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" + if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then + echo "$BASE_URL/$AWS_S3_BUCKET_PATH" + else + echo "$BASE_URL" + fi + ;; + esac +} + +build_all() { + echo "Building release" + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + $AWS_CLI s3 cp --acl public-read "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + +release_build() { + echo "Releasing binaries" + GOOS=$1 + GOARCH=$2 + + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + zipExt=".tgz" + binaryExt="" + tgz=$binary$zipExt + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi + + # we need to map our GOOS and GOARCH to uname values + # see https://en.wikipedia.org/wiki/Uname + # ie, GOOS=linux -> "uname -s"=Linux + + s3Os=$GOOS + case "$s3Os" in + darwin) + s3Os=Darwin + ;; + freebsd) + s3Os=FreeBSD + ;; + linux) + s3Os=Linux + ;; + solaris) + echo skipping solaris release + return 0 + ;; + windows) + # this is windows use the .zip and .exe extensions for the files. + s3Os=Windows + zipExt=".zip" + binaryExt=".exe" + tgz=$binary$zipExt + binary+=$binaryExt + ;; + *) + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" + exit 1 + ;; + esac + + s3Arch=$GOARCH + case "$s3Arch" in + amd64) + s3Arch=x86_64 + ;; + 386) + s3Arch=i386 + ;; + arm) + s3Arch=armel + # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too + ;; + *) + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" + exit 1 + ;; + esac + + s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" + # latest= + latestTgz= + if [ "$latestBase" ]; then + # commented out since we aren't uploading binaries right now. + # latest="$s3Dir/$latestBase$binaryExt" + # we don't include the $binaryExt because we don't want docker.exe.zip + latestTgz="$s3Dir/$latestBase$zipExt" + fi + + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" + exit 1 + fi + # disable binary uploads for now. Only providing tgz downloads + # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" +} + +# Upload binaries and tgz files to S3 +release_binaries() { + [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || { + echo >&2 './hack/make.sh must be run before release_binaries' + exit 1 + } + + for d in bundles/$VERSION/cross/*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + release_build "$GOOS" "$GOARCH" + done + + # TODO create redirect from builds/*/i686 to builds/*/i386 + + cat <&2 + exit 1 +fi + +grep -e '^## ' "$changelogFile" | awk '{print$3}' | sort -c -r || exit 2 + +echo "Congratulations! Changelog $changelogFile dates are in descending order." diff --git a/vendor/github.com/moby/moby/hack/validate/changelog-well-formed b/vendor/github.com/moby/moby/hack/validate/changelog-well-formed new file mode 100755 index 0000000..a6d1fde --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/changelog-well-formed @@ -0,0 +1,25 @@ +#!/bin/bash + +changelogFile=${1:-CHANGELOG.md} + +if [ ! -r "$changelogFile" ]; then + echo "Unable to read file $changelogFile" >&2 + exit 1 +fi + +changelogWellFormed=1 + +# e.g. "## 1.12.3 (2016-10-26)" +VER_LINE_REGEX='^## [0-9]+\.[0-9]+\.[0-9]+(-ce)? \([0-9]+-[0-9]+-[0-9]+\)$' +while read -r line; do + if ! [[ "$line" =~ $VER_LINE_REGEX ]]; then + echo "Malformed changelog $changelogFile line \"$line\"" >&2 + changelogWellFormed=0 + fi +done < <(grep '^## ' $changelogFile) + +if [[ "$changelogWellFormed" == "1" ]]; then + echo "Congratulations! Changelog $changelogFile is well-formed." +else + exit 2 +fi diff --git a/vendor/github.com/moby/moby/hack/validate/compose-bindata b/vendor/github.com/moby/moby/hack/validate/compose-bindata new file mode 100755 index 0000000..f877282 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/compose-bindata @@ -0,0 +1,28 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'cli/compose/schema/data' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + go generate github.com/docker/docker/cli/compose/schema 2> /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- cli/compose/schema 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs' + echo + echo "$diffs" + echo + echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`' + } >&2 + false + else + echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.' + fi +else + echo 'No cli/compose/schema/data changes in diff.' +fi diff --git a/vendor/github.com/moby/moby/hack/validate/dco b/vendor/github.com/moby/moby/hack/validate/dco new file mode 100755 index 0000000..754ce8f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/dco @@ -0,0 +1,55 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff --git a/vendor/github.com/moby/moby/hack/validate/default b/vendor/github.com/moby/moby/hack/validate/default new file mode 100755 index 0000000..bc8a229 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/default @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Run default validation, exclude vendor because it's slow + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +. $SCRIPTDIR/dco +. $SCRIPTDIR/default-seccomp +. $SCRIPTDIR/gofmt +. $SCRIPTDIR/lint +. $SCRIPTDIR/pkg-imports +. $SCRIPTDIR/swagger +. $SCRIPTDIR/swagger-gen +. $SCRIPTDIR/test-imports +. $SCRIPTDIR/toml +. $SCRIPTDIR/vet +. $SCRIPTDIR/changelog-well-formed +. $SCRIPTDIR/changelog-date-descending +. $SCRIPTDIR/compose-bindata diff --git a/vendor/github.com/moby/moby/hack/validate/default-seccomp b/vendor/github.com/moby/moby/hack/validate/default-seccomp new file mode 100755 index 0000000..8fe8435 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/default-seccomp @@ -0,0 +1,28 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run 'go generate' and see if we have a diff afterwards + go generate ./profiles/seccomp/ >/dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- profiles/seccomp 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of go generate ./profiles/seccomp/ differs' + echo + echo "$diffs" + echo + echo 'Please re-run go generate ./profiles/seccomp/' + echo + } >&2 + false + else + echo 'Congratulations! Seccomp profile generation is done correctly.' + fi +fi diff --git a/vendor/github.com/moby/moby/hack/validate/gofmt b/vendor/github.com/moby/moby/hack/validate/gofmt new file mode 100755 index 0000000..2040afa --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/gofmt @@ -0,0 +1,33 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | + grep -v '^vendor/' | + grep -v '^cli/compose/schema/bindata.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/lint b/vendor/github.com/moby/moby/hack/validate/lint new file mode 100755 index 0000000..4ac0a33 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/lint @@ -0,0 +1,31 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedLint=$(golint "$f") + if [ "$failedLint" ]; then + errors+=( "$failedLint" ) + fi +done + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been linted.' +else + { + echo "Errors from golint:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please fix the above errors. You can test via "golint" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/pkg-imports b/vendor/github.com/moby/moby/hack/validate/pkg-imports new file mode 100755 index 0000000..9e4ea74 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/pkg-imports @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + IFS=$'\n' + badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -vE '^github.com/docker/docker/vendor' | grep -E '^github.com/docker/docker' || true) ) + unset IFS + + for import in "${badImports[@]}"; do + badFiles+=( "$f imports $import" ) + done +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' +else + { + echo 'These files import internal code: (either directly or indirectly)' + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/swagger b/vendor/github.com/moby/moby/hack/validate/swagger new file mode 100755 index 0000000..e754fb8 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/swagger @@ -0,0 +1,13 @@ +#!/bin/bash +set -e +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + yamllint -c ${SCRIPTDIR}/.swagger-yamllint api/swagger.yaml + swagger validate api/swagger.yaml +fi diff --git a/vendor/github.com/moby/moby/hack/validate/swagger-gen b/vendor/github.com/moby/moby/hack/validate/swagger-gen new file mode 100755 index 0000000..008abc7 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/swagger-gen @@ -0,0 +1,29 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + ${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of hack/generate-swagger-api.sh differs' + echo + echo "$diffs" + echo + echo 'Please update api/swagger.yaml with any api changes, then ' + echo 'run `hack/generate-swagger-api.sh`.' + } >&2 + false + else + echo 'Congratulations! All api changes are done the right way.' + fi +else + echo 'No api/types/ or api/swagger.yaml changes in diff.' +fi diff --git a/vendor/github.com/moby/moby/hack/validate/test-imports b/vendor/github.com/moby/moby/hack/validate/test-imports new file mode 100755 index 0000000..373caa2 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/test-imports @@ -0,0 +1,38 @@ +#!/bin/bash +# Make sure we're not using gos' Testing package any more in integration-cli + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # skip check_test.go since it *does* use the testing package + if [ "$f" = "integration-cli/check_test.go" ]; then + continue + fi + + # we use "git show" here to validate that what's committed doesn't contain golang built-in testing + if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then + if [ "$(echo $f | grep '_test')" ]; then + # allow testing.T for non- _test files + badFiles+=( "$f" ) + fi + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! No testing.T found.' +else + { + echo "These files use the wrong testing infrastructure:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/toml b/vendor/github.com/moby/moby/hack/validate/toml new file mode 100755 index 0000000..a0cb158 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/toml @@ -0,0 +1,31 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed has valid toml syntax + if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All toml source files changed here have valid syntax.' +else + { + echo "These files are not valid toml:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files as valid toml' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/vendor b/vendor/github.com/moby/moby/hack/validate/vendor new file mode 100755 index 0000000..0cb5aab --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/vendor @@ -0,0 +1,30 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'vendor.conf' 'vendor/' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run vndr to and see if we have a diff afterwards + vndr + # Let see if the working directory is clean + diffs="$(git status --porcelain -- vendor 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of vndr differs' + echo + echo "$diffs" + echo + echo 'Please vendor your package with github.com/LK4D4/vndr.' + echo + } >&2 + false + else + echo 'Congratulations! All vendoring changes are done the right way.' + fi +else + echo 'No vendor changes in diff.' +fi diff --git a/vendor/github.com/moby/moby/hack/validate/vet b/vendor/github.com/moby/moby/hack/validate/vet new file mode 100755 index 0000000..6476048 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/vet @@ -0,0 +1,32 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedVet=$(go vet "$f") + if [ "$failedVet" ]; then + errors+=( "$failedVet" ) + fi +done + + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been vetted.' +else + { + echo "Errors from go vet:" + for err in "${errors[@]}"; do + echo " - $err" + done + echo + echo 'Please fix the above errors. You can test via "go vet" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/vendor.sh b/vendor/github.com/moby/moby/hack/vendor.sh new file mode 100755 index 0000000..9a4d038 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/vendor.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. +# For updating dependencies you should change `vendor.conf` file in root of the +# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for +# vndr usage. + +set -e + +if ! hash vndr; then + echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" + exit 1 +fi + +vndr "$@" diff --git a/vendor/github.com/moby/moby/hooks/post_build b/vendor/github.com/moby/moby/hooks/post_build new file mode 100755 index 0000000..5281707 --- /dev/null +++ b/vendor/github.com/moby/moby/hooks/post_build @@ -0,0 +1,19 @@ +#!/bin/bash + +if [ -n "${BUILD_DOCS}" ]; then + set -e + DOCS_IMAGE=${DOCS_IMAGE:-${IMAGE_NAME}-docs} + docker run \ + --entrypoint '' \ + --privileged \ + -e DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) \ + -v $(pwd)/docs/yaml/docs:/docs \ + "${IMAGE_NAME}" \ + sh -c 'hack/make.sh yaml-docs-generator && bundles/latest/yaml-docs-generator/yaml-docs-generator --target /docs' + + ( + cd docs/yaml + docker build -t ${DOCS_IMAGE} . + docker push ${DOCS_IMAGE} + ) +fi diff --git a/vendor/github.com/moby/moby/image/fs.go b/vendor/github.com/moby/moby/image/fs.go new file mode 100644 index 0000000..39cfbf5 --- /dev/null +++ b/vendor/github.com/moby/moby/image/fs.go @@ -0,0 +1,173 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, err + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("Invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", err + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + if err := os.Remove(s.contentFile(dgst)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/vendor/github.com/moby/moby/image/fs_test.go b/vendor/github.com/moby/moby/image/fs_test.go new file mode 100644 index 0000000..8d602d9 --- /dev/null +++ b/vendor/github.com/moby/moby/image/fs_test.go @@ -0,0 +1,384 @@ +package image + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestFSGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testGetSet(t, fs) +} + +func TestFSGetInvalidData(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id, err := fs.Set([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + + dgst := digest.Digest(id) + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { + t.Fatal(err) + } + + _, err = fs.Get(id) + if err == nil { + t.Fatal("Expected get to fail after data modification.") + } +} + +func TestFSInvalidSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id := digest.FromBytes([]byte("foobar")) + err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) + if err != nil { + t.Fatal(err) + } + + _, err = fs.Set([]byte("foobar")) + if err == nil { + t.Fatal("Expecting error from invalid filesystem data.") + } +} + +func TestFSInvalidRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + tcases := []struct { + root, invalidFile string + }{ + {"root", "root"}, + {"root", "root/content"}, + {"root", "root/metadata"}, + } + + for _, tc := range tcases { + root := filepath.Join(tmpdir, tc.root) + filePath := filepath.Join(tmpdir, tc.invalidFile) + err := os.MkdirAll(filepath.Dir(filePath), 0700) + if err != nil { + t.Fatal(err) + } + f, err := os.Create(filePath) + if err != nil { + t.Fatal(err) + } + f.Close() + + _, err = NewFSStoreBackend(root) + if err == nil { + t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile) + } + + os.RemoveAll(root) + } + +} + +func testMetadataGetSet(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := []struct { + id digest.Digest + key string + value []byte + }{ + {id, "tkey", []byte("tval1")}, + {id, "tkey2", []byte("tval2")}, + {id2, "tkey", []byte("tval3")}, + } + + for _, tc := range tcases { + err = store.SetMetadata(tc.id, tc.key, tc.value) + if err != nil { + t.Fatal(err) + } + + actual, err := store.GetMetadata(tc.id, tc.key) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(actual, tc.value) != 0 { + t.Fatalf("Metadata expected %q, got %q", tc.value, actual) + } + } + + _, err = store.GetMetadata(id2, "tkey2") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown key") + } + + id3 := digest.FromBytes([]byte("baz")) + err = store.SetMetadata(id3, "tkey", []byte("tval")) + if err == nil { + t.Fatal("Expected error for setting metadata for unknown ID.") + } + + _, err = store.GetMetadata(id3, "tkey") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown ID.") + } +} + +func TestFSMetadataGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testMetadataGetSet(t, fs) +} + +func TestFSDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testDelete(t, fs) +} + +func TestFSWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testWalker(t, fs) +} + +func TestFSInvalidWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + fooID, err := fs.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil { + t.Fatal(err) + } + + n := 0 + err = fs.Walk(func(id digest.Digest) error { + if id != fooID { + t.Fatalf("Invalid walker ID %q, expected %q", id, fooID) + } + n++ + return nil + }) + if err != nil { + t.Fatalf("Invalid data should not have caused walker error, got %v", err) + } + if n != 1 { + t.Fatalf("Expected 1 walk initialization, got %d", n) + } +} + +func testGetSet(t *testing.T, store StoreBackend) { + type tcase struct { + input []byte + expected digest.Digest + } + tcases := []tcase{ + {[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, + } + + randomInput := make([]byte, 8*1024) + _, err := rand.Read(randomInput) + if err != nil { + t.Fatal(err) + } + // skipping use of digest pkg because its used by the implementation + h := sha256.New() + _, err = h.Write(randomInput) + if err != nil { + t.Fatal(err) + } + tcases = append(tcases, tcase{ + input: randomInput, + expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))), + }) + + for _, tc := range tcases { + id, err := store.Set([]byte(tc.input)) + if err != nil { + t.Fatal(err) + } + if id != tc.expected { + t.Fatalf("Expected ID %q, got %q", tc.expected, id) + } + } + + for _, emptyData := range [][]byte{nil, {}} { + _, err := store.Set(emptyData) + if err == nil { + t.Fatal("Expected error for nil input.") + } + } + + for _, tc := range tcases { + data, err := store.Get(tc.expected) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(data, tc.input) != 0 { + t.Fatalf("Expected data %q, got %q", tc.input, data) + } + } + + for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { + _, err := store.Get(key) + if err == nil { + t.Fatalf("Expected error for ID %q.", key) + } + } + +} + +func testDelete(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id) + if err != nil { + t.Fatal(err) + } + + _, err = store.Get(id) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id) + } + _, err = store.Get(id2) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id2) + if err != nil { + t.Fatal(err) + } + _, err = store.Get(id2) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id2) + } +} + +func testWalker(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + tcases[id2] = struct{}{} + n := 0 + err = store.Walk(func(id digest.Digest) error { + delete(tcases, id) + n++ + return nil + }) + if err != nil { + t.Fatal(err) + } + + if n != 2 { + t.Fatalf("Expected 2 walk initializations, got %d", n) + } + if len(tcases) != 0 { + t.Fatalf("Expected empty unwalked set, got %+v", tcases) + } + + // stop on error + tcases = make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + err = store.Walk(func(id digest.Digest) error { + return errors.New("") + }) + if err == nil { + t.Fatalf("Exected error from walker.") + } +} diff --git a/vendor/github.com/moby/moby/image/image.go b/vendor/github.com/moby/moby/image/image.go new file mode 100644 index 0000000..29a990a --- /dev/null +++ b/vendor/github.com/moby/moby/image/image.go @@ -0,0 +1,150 @@ +package image + +import ( + "encoding/json" + "errors" + "io" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types/container" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent id of the image + Parent string `json:"parent,omitempty"` + // Comment user added comment + Comment string `json:"comment,omitempty"` + // Created timestamp when image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies version on which image is built + DockerVersion string `json:"docker_version,omitempty"` + // Author of the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// History stores build commands that were used to create an image +type History struct { + // Created timestamp for build point + Created time.Time `json:"created"` + // Author of the build point + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building image. + CreatedBy string `json:"created_by,omitempty"` + // Comment is custom message set by the user when creating the image. + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Exporter provides interface for exporting and importing images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("Invalid image JSON, no RootFS key.") + } + + img.rawJSON = src + + return img, nil +} diff --git a/vendor/github.com/moby/moby/image/image_test.go b/vendor/github.com/moby/moby/image/image_test.go new file mode 100644 index 0000000..525023b --- /dev/null +++ b/vendor/github.com/moby/moby/image/image_test.go @@ -0,0 +1,59 @@ +package image + +import ( + "encoding/json" + "sort" + "strings" + "testing" +) + +const sampleImageJSON = `{ + "architecture": "amd64", + "os": "linux", + "config": {}, + "rootfs": { + "type": "layers", + "diff_ids": [] + } +}` + +func TestJSON(t *testing.T) { + img, err := NewFromJSON([]byte(sampleImageJSON)) + if err != nil { + t.Fatal(err) + } + rawJSON := img.RawJSON() + if string(rawJSON) != sampleImageJSON { + t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON) + } +} + +func TestInvalidJSON(t *testing.T) { + _, err := NewFromJSON([]byte("{}")) + if err == nil { + t.Fatal("Expected JSON parse error") + } +} + +func TestMarshalKeyOrder(t *testing.T) { + b, err := json.Marshal(&Image{ + V1Image: V1Image{ + Comment: "a", + Author: "b", + Architecture: "c", + }, + }) + if err != nil { + t.Fatal(err) + } + + expectedOrder := []string{"architecture", "author", "comment"} + var indexes []int + for _, k := range expectedOrder { + indexes = append(indexes, strings.Index(string(b), k)) + } + + if !sort.IntsAreSorted(indexes) { + t.Fatal("invalid key order in JSON: ", string(b)) + } +} diff --git a/vendor/github.com/moby/moby/image/rootfs.go b/vendor/github.com/moby/moby/image/rootfs.go new file mode 100644 index 0000000..7b24e3e --- /dev/null +++ b/vendor/github.com/moby/moby/image/rootfs.go @@ -0,0 +1,44 @@ +package image + +import ( + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/layer" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/vendor/github.com/moby/moby/image/spec/v1.1.md b/vendor/github.com/moby/moby/image/spec/v1.1.md new file mode 100644 index 0000000..6279da5 --- /dev/null +++ b/vendor/github.com/moby/moby/image/spec/v1.1.md @@ -0,0 +1,637 @@ +# Docker Image Specification v1.1.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.10. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 127 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/moby/moby/image/spec/v1.2.md b/vendor/github.com/moby/moby/image/spec/v1.2.md new file mode 100644 index 0000000..483ce1e --- /dev/null +++ b/vendor/github.com/moby/moby/image/spec/v1.2.md @@ -0,0 +1,696 @@ +# Docker Image Specification v1.2.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.12. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 127 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Healthcheck struct +
+
+ A test to perform to determine whether the container is healthy. + Here is an example: +
{
+  "Test": [
+      "CMD-SHELL",
+      "/usr/bin/check-health localhost"
+  ],
+  "Interval": 30000000000,
+  "Timeout": 10000000000,
+  "Retries": 3
+}
+ The object has the following fields. +
+
+ Test array of strings +
+
+ The test to perform to check that the container is healthy. + The options are: +
    +
  • [] : inherit healthcheck from base image
  • +
  • ["NONE"] : disable healthcheck
  • +
  • ["CMD", arg1, arg2, ...] : exec arguments directly
  • +
  • ["CMD-SHELL", command] : run command with system's default shell
  • +
+ + The test command should exit with a status of 0 if the container is healthy, + or with 1 if it is unhealthy. +
+
+ Interval integer +
+
+ Number of nanoseconds to wait between probe attempts. +
+
+ Timeout integer +
+
+ Number of nanoseconds to wait before considering the check to have hung. +
+
+ Retries integer +
+
+ The number of consecutive failures needed to consider a container as unhealthy. +
+
+ + In each case, the field can be omitted to indicate that the + value should be inherited from the base layer. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/moby/moby/image/spec/v1.md b/vendor/github.com/moby/moby/image/spec/v1.md new file mode 100644 index 0000000..fce3a06 --- /dev/null +++ b/vendor/github.com/moby/moby/image/spec/v1.md @@ -0,0 +1,573 @@ +# Docker Image Specification v1.0.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
    +
  1. The metadata for the layer, described in the JSON format.
  2. +
  3. The filesystem changes described by a layer.
  4. +
+ + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
+
+ Image JSON +
+
+ Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Image ID +
+
+ Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
+
+ Image Parent +
+
+ Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
+
+ Image Checksum +
+
+ Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-Z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-Z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
+
+ id string +
+
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
+
+ parent string +
+
+ ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendant layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ checksum string +
+
+ Image Checksum of the filesystem changeset associated with the image + layer. +
+
+ Size integer +
+
+ The size in bytes of the filesystem changeset associated with the image + layer. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going to add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/vendor/github.com/moby/moby/image/store.go b/vendor/github.com/moby/moby/image/store.go new file mode 100644 index 0000000..b61c456 --- /dev/null +++ b/vendor/github.com/moby/moby/image/store.go @@ -0,0 +1,295 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.Mutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digest.Set +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digest.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", err + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + is.Lock() + defer is.Unlock() + + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digest.ErrDigestNotFound { + err = fmt.Errorf("No such image: %s", term) + } + return "", err + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +func (is *store) Children(id ID) []ID { + is.Lock() + defer is.Unlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.Lock() + defer is.Unlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/vendor/github.com/moby/moby/image/store_test.go b/vendor/github.com/moby/moby/image/store_test.go new file mode 100644 index 0000000..50f8aa8 --- /dev/null +++ b/vendor/github.com/moby/moby/image/store_test.go @@ -0,0 +1,300 @@ +package image + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestRestore(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + _, err = fs.Set([]byte(`invalid`)) + if err != nil { + t.Fatal(err) + } + id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + err = fs.SetMetadata(id2, "parent", []byte(id1)) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + imgs := is.Map() + if actual, expected := len(imgs), 2; actual != expected { + t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) + } + + img1, err := is.Get(ID(id1)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.computedID, ID(id1); actual != expected { + t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) + } + + if actual, expected := img1.computedID.String(), string(id1); actual != expected { + t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) + } + + img2, err := is.Get(ID(id2)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) + } + + if actual, expected := img2.Comment, "def"; actual != expected { + t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) + } + + p, err := is.GetParent(ID(id1)) + if err == nil { + t.Fatal("expected error for getting parent") + } + + p, err = is.GetParent(ID(id2)) + if err != nil { + t.Fatal(err) + } + if actual, expected := p, ID(id1); actual != expected { + t.Fatalf("invalid parent: expected %q, got %q", expected, actual) + } + + children := is.Children(ID(id1)) + if len(children) != 1 { + t.Fatalf("invalid children length: %q", len(children)) + } + if actual, expected := children[0], ID(id2); actual != expected { + t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) + } + + heads := is.Heads() + if actual, expected := len(heads), 1; actual != expected { + t.Fatalf("invalid images length: expected %q, got %q", expected, actual) + } + + sid1, err := is.Search(string(id1)[:10]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + invalidPattern := digest.Digest(id1).Hex()[1:6] + _, err = is.Search(invalidPattern) + if err == nil { + t.Fatalf("expected search for %q to fail", invalidPattern) + } + +} + +func TestAddDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { + t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) + } + + img, err := is.Get(id1) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) + } + + id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = is.SetParent(id2, id1) + if err != nil { + t.Fatal(err) + } + + pid1, err := is.GetParent(id2) + if err != nil { + t.Fatal(err) + } + if actual, expected := pid1, id1; actual != expected { + t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) + } + + _, err = is.Delete(id1) + if err != nil { + t.Fatal(err) + } + _, err = is.Get(id1) + if err == nil { + t.Fatalf("expected get for deleted image %q to fail", id1) + } + _, err = is.Get(id2) + if err != nil { + t.Fatal(err) + } + pid1, err = is.GetParent(id2) + if err == nil { + t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) + } + +} + +func TestSearchAfterDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id1, err := is.Search(string(id)[:15]) + if err != nil { + t.Fatal(err) + } + + if actual, expected := id1, id; expected != actual { + t.Fatalf("wrong id returned from search: expected %q, got %q", expected, actual) + } + + if _, err := is.Delete(id); err != nil { + t.Fatal(err) + } + + if _, err := is.Search(string(id)[:15]); err == nil { + t.Fatal("expected search after deletion to fail") + } +} + +func TestParentReset(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + if err := is.SetParent(id, id2); err != nil { + t.Fatal(err) + } + + ids := is.Children(id2) + if actual, expected := len(ids), 1; expected != actual { + t.Fatalf("wrong number of children: %d, got %d", expected, actual) + } + + if err := is.SetParent(id, id3); err != nil { + t.Fatal(err) + } + + ids = is.Children(id2) + if actual, expected := len(ids), 0; expected != actual { + t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) + } + + ids = is.Children(id3) + if actual, expected := len(ids), 1; expected != actual { + t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) + } + +} + +type mockLayerGetReleaser struct{} + +func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/image/tarexport/load.go b/vendor/github.com/moby/moby/image/tarexport/load.go new file mode 100644 index 0000000..01edd91 --- /dev/null +++ b/vendor/github.com/moby/moby/image/tarexport/load.go @@ -0,0 +1,390 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + progressOutput progress.Output + ) + if !quiet { + progressOutput = sf.NewProgressOutput(outStream, false) + } + outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return err + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + var imageIDsStr string + var imageRefCount int + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), m.LayerSources[diffID], progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) + + imageRefCount = 0 + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", ref))) + imageRefCount++ + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + if imageRefCount == 0 { + outStream.Write([]byte(imageIDsStr)) + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list. On Linux, this equates to a regular os.Open. + rawTar, err := system.OpenSequential(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + var r io.Reader + if progressOutput != nil { + fileInfo, err := rawTar.Stat() + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + } else { + r = rawTar + } + + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if ds, ok := l.ls.(layer.DescribableStore); ok { + return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + return err + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, distribution.Descriptor{}, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/image/tarexport/save.go b/vendor/github.com/moby/moby/image/tarexport/save.go new file mode 100644 index 0000000..6e3a5bc --- /dev/null +++ b/vendor/github.com/moby/moby/image/tarexport/save.go @@ -0,0 +1,355 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} + diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Canonical); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + id, ref, err := reference.ParseIDOrReference(name) + if err != nil { + return nil, err + } + if id != "" { + _, err := l.is.Get(image.IDFromDigest(id)) + if err != nil { + return nil, err + } + addAssoc(image.IDFromDigest(id), nil) + continue + } + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if reference.IsNameOnly(ref) { + assocs := l.rs.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + id, err = l.rs.Get(ref) + if err != nil { + return nil, err + } + addAssoc(image.IDFromDigest(id), ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + s.diffIDPaths = make(map[layer.DiffID]string) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + foreignSrcs, err := s.saveImage(id) + if err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: id.Digest().Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + LayerSources: foreignSrcs, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil { + rf.Close() + return err + } + + rf.Close() + + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(f).Encode(manifest); err != nil { + f.Close() + return err + } + + f.Close() + + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { + img, err := s.is.Get(id) + if err != nil { + return nil, err + } + + if len(img.RootFS.DiffIDs) == 0 { + return nil, fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + var foreignSrcs map[layer.DiffID]distribution.Descriptor + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{ + Created: img.Created, + } + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return nil, err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) + if err != nil { + return nil, err + } + layers = append(layers, v1Img.ID) + parent = v1ID + if src.Digest != "" { + if foreignSrcs == nil { + foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) + } + foreignSrcs[img.RootFS.DiffIDs[i]] = src + } + } + + configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return nil, err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return nil, err + } + + s.images[id].layers = layers + return foreignSrcs, nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return distribution.Descriptor{}, nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return distribution.Descriptor{}, err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return distribution.Descriptor{}, err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return distribution.Descriptor{}, err + } + + // serialize filesystem + layerPath := filepath.Join(outDir, legacyLayerFileName) + l, err := s.ls.Get(id) + if err != nil { + return distribution.Descriptor{}, err + } + defer layer.ReleaseAndLog(s.ls, l) + + if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { + relPath, err := filepath.Rel(outDir, oldPath) + if err != nil { + return distribution.Descriptor{}, err + } + os.Symlink(relPath, layerPath) + } else { + // Use system.CreateSequential rather than os.Create. This ensures sequential + // file access on Windows to avoid eating into MM standby list. + // On Linux, this equates to a regular os.Create. + tarFile, err := system.CreateSequential(layerPath) + if err != nil { + return distribution.Descriptor{}, err + } + defer tarFile.Close() + + arch, err := l.TarStream() + if err != nil { + return distribution.Descriptor{}, err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return distribution.Descriptor{}, err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return distribution.Descriptor{}, err + } + } + + s.diffIDPaths[l.DiffID()] = layerPath + } + s.savedLayers[legacyImg.ID] = struct{}{} + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + return src, nil +} diff --git a/vendor/github.com/moby/moby/image/tarexport/tarexport.go b/vendor/github.com/moby/moby/image/tarexport/tarexport.go new file mode 100644 index 0000000..c0be954 --- /dev/null +++ b/vendor/github.com/moby/moby/image/tarexport/tarexport.go @@ -0,0 +1,47 @@ +package tarexport + +import ( + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` + LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs reference.Store + loggerImgEvent LogImageEvent +} + +// LogImageEvent defines interface for event generation related to image tar(load and save) operations +type LogImageEvent interface { + //LogImageEvent generates an event related to an image operation + LogImageEvent(imageID, refName, action string) +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store, loggerImgEvent LogImageEvent) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + loggerImgEvent: loggerImgEvent, + } +} diff --git a/vendor/github.com/moby/moby/image/v1/imagev1.go b/vendor/github.com/moby/moby/image/v1/imagev1.go new file mode 100644 index 0000000..d498ddb --- /dev/null +++ b/vendor/github.com/moby/moby/image/v1/imagev1.go @@ -0,0 +1,156 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = "1.8.3" + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + imageType := reflect.TypeOf(img).Elem() + for i := 0; i < imageType.NumField(); i++ { + f := imageType.Field(i) + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + // Parent is handled specially below. + if jsonName != "" && jsonName != "parent" { + delete(configAsMap, jsonName) + } + } + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} diff --git a/vendor/github.com/moby/moby/image/v1/imagev1_test.go b/vendor/github.com/moby/moby/image/v1/imagev1_test.go new file mode 100644 index 0000000..936c55e --- /dev/null +++ b/vendor/github.com/moby/moby/image/v1/imagev1_test.go @@ -0,0 +1,55 @@ +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/docker/docker/image" +) + +func TestMakeV1ConfigFromConfig(t *testing.T) { + img := &image.Image{ + V1Image: image.V1Image{ + ID: "v2id", + Parent: "v2parent", + OS: "os", + }, + OSVersion: "osversion", + RootFS: &image.RootFS{ + Type: "layers", + }, + } + v2js, err := json.Marshal(img) + if err != nil { + t.Fatal(err) + } + + // Convert the image back in order to get RawJSON() support. + img, err = image.NewFromJSON(v2js) + if err != nil { + t.Fatal(err) + } + + js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false) + if err != nil { + t.Fatal(err) + } + + newimg := &image.Image{} + err = json.Unmarshal(js, newimg) + if err != nil { + t.Fatal(err) + } + + if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" { + t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent) + } + + if newimg.RootFS != nil { + t.Error("rootfs should have been removed") + } + + if newimg.V1Image.OS != "os" { + t.Error("os should have been preserved") + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/benchmark_test.go b/vendor/github.com/moby/moby/integration-cli/benchmark_test.go new file mode 100644 index 0000000..b87e131 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/benchmark_test.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "sync" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) { + maxConcurrency := runtime.GOMAXPROCS(0) + numIterations := c.N + outerGroup := &sync.WaitGroup{} + outerGroup.Add(maxConcurrency) + chErr := make(chan error, numIterations*2*maxConcurrency) + + for i := 0; i < maxConcurrency; i++ { + go func() { + defer outerGroup.Done() + innerGroup := &sync.WaitGroup{} + innerGroup.Add(2) + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + args := []string{"run", "-d", defaultSleepImage} + args = append(args, sleepCommandForDaemonPlatform()...) + out, _, err := dockerCmdWithError(args...) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + id := strings.TrimSpace(out) + tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id) + if err != nil { + chErr <- err + return + } + defer os.RemoveAll(tmpDir) + out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("start", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + // don't do an rm -f here since it can potentially ignore errors from the graphdriver + out, _, err = dockerCmdWithError("rm", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + out, _, err := dockerCmdWithError("ps") + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + innerGroup.Wait() + }() + } + + outerGroup.Wait() + close(chErr) + + for err := range chErr { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/check_test.go b/vendor/github.com/moby/moby/integration-cli/check_test.go new file mode 100644 index 0000000..7084d6f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/check_test.go @@ -0,0 +1,383 @@ +package main + +import ( + "fmt" + "net/http/httptest" + "os" + "path/filepath" + "sync" + "syscall" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/reexec" + "github.com/go-check/check" +) + +func Test(t *testing.T) { + reexec.Init() // This is required for external graphdriver tests + + if !isLocalDaemon { + fmt.Println("INFO: Testing against a remote daemon") + } else { + fmt.Println("INFO: Testing against a local daemon") + } + + if daemonPlatform == "linux" { + ensureFrozenImagesLinux(t) + } + check.TestingT(t) +} + +func init() { + check.Suite(&DockerSuite{}) +} + +type DockerSuite struct { +} + +func (s *DockerSuite) OnTimeout(c *check.C) { + if daemonPid > 0 && isLocalDaemon { + signalDaemonDump(daemonPid) + } +} + +func (s *DockerSuite) TearDownTest(c *check.C) { + unpauseAllContainers() + deleteAllContainers() + deleteAllImages() + deleteAllVolumes() + deleteAllNetworks() + deleteAllPlugins() +} + +func init() { + check.Suite(&DockerRegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.reg = setupRegistry(c, false, "", "") + s.d = NewDaemon(c) +} + +func (s *DockerRegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerSchema1RegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSchema1RegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) + s.reg = setupRegistry(c, true, "", "") + s.d = NewDaemon(c) +} + +func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthHtpasswdSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthHtpasswdSuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.reg = setupRegistry(c, false, "htpasswd", "") + s.d = NewDaemon(c) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthTokenSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthTokenSuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.d = NewDaemon(c) +} + +func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) { + if s == nil { + c.Fatal("registry suite isn't initialized") + } + s.reg = setupRegistry(c, false, "token", tokenURL) +} + +func init() { + check.Suite(&DockerDaemonSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerDaemonSuite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerDaemonSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerDaemonSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = NewDaemon(c) +} + +func (s *DockerDaemonSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { + filepath.Walk(daemonSockRoot, func(path string, fi os.FileInfo, err error) error { + if err != nil { + // ignore errors here + // not cleaning up sockets is not really an error + return nil + } + if fi.Mode() == os.ModeSocket { + syscall.Unlink(path) + } + return nil + }) + os.RemoveAll(daemonSockRoot) +} + +const defaultSwarmPort = 2477 + +func init() { + check.Suite(&DockerSwarmSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSwarmSuite struct { + server *httptest.Server + ds *DockerSuite + daemons []*SwarmDaemon + daemonsLock sync.Mutex // protect access to daemons + portIndex int +} + +func (s *DockerSwarmSuite) OnTimeout(c *check.C) { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + d.DumpStackAndQuit() + } +} + +func (s *DockerSwarmSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) +} + +func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon { + d := &SwarmDaemon{ + Daemon: NewDaemon(c), + port: defaultSwarmPort + s.portIndex, + } + d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) + args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts + if experimentalDaemon { + args = append(args, "--experimental") + } + err := d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + + if joinSwarm == true { + if len(s.daemons) > 0 { + tokens := s.daemons[0].joinTokens(c) + token := tokens.Worker + if manager { + token = tokens.Manager + } + c.Assert(d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{s.daemons[0].listenAddr}, + JoinToken: token, + }), check.IsNil) + } else { + c.Assert(d.Init(swarm.InitRequest{}), check.IsNil) + } + } + + s.portIndex++ + s.daemonsLock.Lock() + s.daemons = append(s.daemons, d) + s.daemonsLock.Unlock() + + return d +} + +func (s *DockerSwarmSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.daemonsLock.Lock() + for _, d := range s.daemons { + d.Stop() + // raft state file is quite big (64MB) so remove it after every test + walDir := filepath.Join(d.root, "swarm/raft/wal") + if err := os.RemoveAll(walDir); err != nil { + c.Logf("error removing %v: %v", walDir, err) + } + + cleanupExecRoot(c, d.execRoot) + } + s.daemons = nil + s.daemonsLock.Unlock() + + s.portIndex = 0 + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerTrustSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerTrustSuite struct { + ds *DockerSuite + reg *testRegistryV2 + not *testNotary +} + +func (s *DockerTrustSuite) SetUpTest(c *check.C) { + testRequires(c, RegistryHosting, NotaryServerHosting) + s.reg = setupRegistry(c, false, "", "") + s.not = setupNotary(c) +} + +func (s *DockerTrustSuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.not != nil { + s.not.Close() + } + + // Remove trusted keys and metadata after test + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + s.ds.TearDownTest(c) +} + +func init() { + ds := &DockerSuite{} + check.Suite(&DockerTrustedSwarmSuite{ + trustSuite: DockerTrustSuite{ + ds: ds, + }, + swarmSuite: DockerSwarmSuite{ + ds: ds, + }, + }) +} + +type DockerTrustedSwarmSuite struct { + swarmSuite DockerSwarmSuite + trustSuite DockerTrustSuite + reg *testRegistryV2 + not *testNotary +} + +func (s *DockerTrustedSwarmSuite) SetUpTest(c *check.C) { + s.swarmSuite.SetUpTest(c) + s.trustSuite.SetUpTest(c) +} + +func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { + s.trustSuite.TearDownTest(c) + s.swarmSuite.TearDownTest(c) +} + +func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { + s.swarmSuite.OnTimeout(c) +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon.go b/vendor/github.com/moby/moby/integration-cli/daemon.go new file mode 100644 index 0000000..9fd3f1e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon.go @@ -0,0 +1,608 @@ +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var daemonSockRoot = filepath.Join(os.TempDir(), "docker-integration") + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + GlobalFlags []string + + id string + c *check.C + logFile *os.File + folder string + root string + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + wait chan error + userlandProxy bool + useDefaultHost bool + useDefaultTLSHost bool + execRoot string +} + +type clientConfig struct { + transport *http.Transport + scheme string + addr string +} + +// NewDaemon returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DEST. +// The daemon will not automatically start. +func NewDaemon(c *check.C) *Daemon { + dest := os.Getenv("DEST") + c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) + + err := os.MkdirAll(daemonSockRoot, 0700) + c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root")) + + id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) + dir := filepath.Join(dest, id) + daemonFolder, err := filepath.Abs(dir) + c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir)) + daemonRoot := filepath.Join(daemonFolder, "root") + + c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir)) + + userlandProxy := true + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + userlandProxy = val + } + } + + return &Daemon{ + id: id, + c: c, + folder: daemonFolder, + root: daemonRoot, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + userlandProxy: userlandProxy, + execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), + } +} + +// RootDir returns the root directory of the daemon. +func (d *Daemon) RootDir() string { + return d.root +} + +func (d *Daemon) getClientConfig() (*clientConfig, error) { + var ( + transport *http.Transport + scheme string + addr string + proto string + ) + if d.useDefaultTLSHost { + option := &tlsconfig.Options{ + CAFile: "fixtures/https/ca.pem", + CertFile: "fixtures/https/client-cert.pem", + KeyFile: "fixtures/https/client-key.pem", + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) + scheme = "https" + proto = "tcp" + } else if d.useDefaultHost { + addr = opts.DefaultUnixSocket + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } else { + addr = d.sockPath() + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } + + d.c.Assert(sockets.ConfigureTransport(transport, proto, addr), check.IsNil) + + return &clientConfig{ + transport: transport, + scheme: scheme, + addr: addr, + }, nil +} + +// Start will start the daemon and return once it is ready to receive requests. +// You can specify additional daemon flags. +func (d *Daemon) Start(args ...string) error { + logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) + + return d.StartWithLogFile(logFile, args...) +} + +// StartWithLogFile will start the daemon and attach its streams to a given file. +func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { + dockerdBinary, err := exec.LookPath(dockerdBinary) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) + + args := append(d.GlobalFlags, + "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", + "--graph", d.root, + "--exec-root", d.execRoot, + "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), + fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), + ) + if experimentalDaemon { + args = append(args, "--experimental", "--init") + } + if !(d.useDefaultHost || d.useDefaultTLSHost) { + args = append(args, []string{"--host", d.sock()}...) + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + args = append(args, []string{"--userns-remap", root}...) + } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundLog := false + foundSd := false + for _, a := range providedArgs { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundLog = true + } + if strings.Contains(a, "--storage-driver") { + foundSd = true + } + } + if !foundLog { + args = append(args, "--debug") + } + if d.storageDriver != "" && !foundSd { + args = append(args, "--storage-driver", d.storageDriver) + } + + args = append(args, providedArgs...) + d.cmd = exec.Command(dockerdBinary, args...) + d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") + d.cmd.Stdout = out + d.cmd.Stderr = out + d.logFile = out + + if err := d.cmd.Start(); err != nil { + return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.c.Logf("[%s] exiting daemon", d.id) + close(wait) + }() + + d.wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + startTime := time.Now().Unix() + for { + d.c.Logf("[%s] waiting for daemon to start", d.id) + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return fmt.Errorf("[%s] Daemon exited and never started", d.id) + } + select { + case <-time.After(2 * time.Second): + return fmt.Errorf("[%s] timeout: daemon does not respond", d.id) + case <-tick: + clientConfig, err := d.getClientConfig() + if err != nil { + return err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/_ping", nil) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id)) + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + resp, err := client.Do(req) + if err != nil { + continue + } + if resp.StatusCode != http.StatusOK { + d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) + } + d.c.Logf("[%s] daemon started", d.id) + d.root, err = d.queryRootDir() + if err != nil { + return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) + } + return nil + case <-d.wait: + return fmt.Errorf("[%s] Daemon exited during startup", d.id) + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(arg ...string) error { + if err := d.Start(arg...); err != nil { + return err + } + return d.LoadBusybox() +} + +// Kill will send a SIGKILL to the daemon +func (d *Daemon) Kill() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + if err := d.cmd.Process.Kill(); err != nil { + d.c.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + return err + } + + return nil +} + +// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its +// stack to its log file and exit +// This is used primarily for gathering debug information on test timeout +func (d *Daemon) DumpStackAndQuit() { + if d.cmd == nil || d.cmd.Process == nil { + return + } + signalDaemonDump(d.cmd.Process.Pid) +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) Stop() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } +out1: + for { + select { + case err := <-d.wait: + return err + case <-time.After(20 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.c.Logf("timeout: %v", d.id) + break out1 + } + } + +out2: + for { + select { + case err := <-d.wait: + return err + case <-tick: + i++ + if i > 5 { + d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + break out2 + } + d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.c.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and then starting it. +func (d *Daemon) Restart(arg ...string) error { + d.Stop() + // in the case of tests running a user namespace-enabled daemon, we have resolved + // d.root to be the actual final path of the graph dir after the "uid.gid" of + // remapped root is added--we need to subtract it from the path before calling + // start or else we will continue making subdirectories rather than truly restarting + // with the same location/root: + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + d.root = filepath.Dir(d.root) + } + return d.Start(arg...) +} + +// LoadBusybox will load the stored busybox into a newly started daemon +func (d *Daemon) LoadBusybox() error { + bb := filepath.Join(d.folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if out, err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { + imagesOut, _ := exec.Command(dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() + return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) + } + } + // loading busybox image to this daemon + if out, err := d.Cmd("load", "--input", bb); err != nil { + return fmt.Errorf("could not load busybox image: %s", out) + } + if err := os.Remove(bb); err != nil { + d.c.Logf("could not remove %s: %v", bb, err) + } + return nil +} + +func (d *Daemon) queryRootDir() (string, error) { + // update daemon root by asking /info endpoint (to support user + // namespaced daemon with root remapped uid.gid directory) + clientConfig, err := d.getClientConfig() + if err != nil { + return "", err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + + resp, err := client.Do(req) + if err != nil { + return "", err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + return resp.Body.Close() + }) + + type Info struct { + DockerRootDir string + } + var b []byte + var i Info + b, err = readBody(body) + if err == nil && resp.StatusCode == http.StatusOK { + // read the docker root dir + if err = json.Unmarshal(b, &i); err == nil { + return i.DockerRootDir, nil + } + } + return "", err +} + +func (d *Daemon) sock() string { + return fmt.Sprintf("unix://" + d.sockPath()) +} + +func (d *Daemon) sockPath() string { + return filepath.Join(daemonSockRoot, d.id+".sock") +} + +func (d *Daemon) waitRun(contID string) error { + args := []string{"--host", d.sock()} + return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...) +} + +func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { + infoCmdOutput, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "-H", d.sock(), "info"), + exec.Command("grep", "Base Device Size"), + ) + c.Assert(err, checker.IsNil) + basesizeSlice := strings.Split(infoCmdOutput, ":") + basesize := strings.Trim(basesizeSlice[1], " ") + basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + c.Assert(err, checker.IsNil) + basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) + return basesizeBytes +} + +// Cmd will execute a docker CLI command against this Daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(args ...string) (string, error) { + b, err := d.command(args...).CombinedOutput() + return string(b), err +} + +func (d *Daemon) command(args ...string) *exec.Cmd { + return exec.Command(dockerBinary, d.prependHostArg(args)...) +} + +func (d *Daemon) prependHostArg(args []string) []string { + for _, arg := range args { + if arg == "--host" || arg == "-H" { + return args + } + } + return append([]string{"--host", d.sock()}, args...) +} + +// SockRequest executes a socket request on a daemon and returns statuscode and output. +func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := readBody(body) + return res.StatusCode, b, err +} + +// SockRequestRaw executes a socket request on a daemon and returns an http +// response and a reader for the output data. +func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) +} + +// LogFileName returns the path the the daemon's log file +func (d *Daemon) LogFileName() string { + return d.logFile.Name() +} + +func (d *Daemon) getIDByName(name string) (string, error) { + return d.inspectFieldWithError(name, "Id") +} + +func (d *Daemon) activeContainers() (ids []string) { + out, _ := d.Cmd("ps", "-q") + for _, id := range strings.Split(out, "\n") { + if id = strings.TrimSpace(id); id != "" { + ids = append(ids, id) + } + } + return +} + +func (d *Daemon) inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + out, err := d.Cmd("inspect", "-f", format, name) + if err != nil { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { + return d.inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +func (d *Daemon) findContainerIP(id string) string { + out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) + if err != nil { + d.c.Log(err) + } + return strings.Trim(out, " \r\n'") +} + +func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { + buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) + return runCommandWithOutput(buildCmd) +} + +func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + if len(strings.TrimSpace(out)) == 0 { + return 0, nil + } + return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) +} + +func (d *Daemon) reloadConfig() error { + if d.cmd == nil || d.cmd.Process == nil { + return fmt.Errorf("daemon is not running") + } + + errCh := make(chan error) + started := make(chan struct{}) + go func() { + _, body, err := sockRequestRawToDaemon("GET", "/events", nil, "", d.sock()) + close(started) + if err != nil { + errCh <- err + } + defer body.Close() + dec := json.NewDecoder(body) + for { + var e events.Message + if err := dec.Decode(&e); err != nil { + errCh <- err + return + } + if e.Type != events.DaemonEventType { + continue + } + if e.Action != "reload" { + continue + } + close(errCh) // notify that we are done + return + } + }() + + <-started + if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { + return fmt.Errorf("error signaling daemon reload: %v", err) + } + select { + case err := <-errCh: + if err != nil { + return fmt.Errorf("error waiting for daemon reload event: %v", err) + } + case <-time.After(30 * time.Second): + return fmt.Errorf("timeout waiting for daemon reload event") + } + return nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon_swarm.go b/vendor/github.com/moby/moby/integration-cli/daemon_swarm.go new file mode 100644 index 0000000..199bce0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon_swarm.go @@ -0,0 +1,419 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// SwarmDaemon is a test daemon with helpers for participating in a swarm. +type SwarmDaemon struct { + *Daemon + swarm.Info + port int + listenAddr string +} + +// Init initializes a new swarm cluster. +func (d *SwarmDaemon) Init(req swarm.InitRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.listenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/init", req) + if status != http.StatusOK { + return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("initializing swarm: %v", err) + } + info, err := d.info() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Join joins a daemon to an existing cluster. +func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.listenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/join", req) + if status != http.StatusOK { + return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("joining swarm: %v", err) + } + info, err := d.info() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Leave forces daemon to leave current cluster. +func (d *SwarmDaemon) Leave(force bool) error { + url := "/swarm/leave" + if force { + url += "?force=1" + } + status, out, err := d.SockRequest("POST", url, nil) + if status != http.StatusOK { + return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + err = fmt.Errorf("leaving swarm: %v", err) + } + return err +} + +func (d *SwarmDaemon) info() (swarm.Info, error) { + var info struct { + Swarm swarm.Info + } + status, dt, err := d.SockRequest("GET", "/info", nil) + if status != http.StatusOK { + return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status) + } + if err != nil { + return info.Swarm, fmt.Errorf("get swarm info: %v", err) + } + if err := json.Unmarshal(dt, &info); err != nil { + return info.Swarm, err + } + return info.Swarm, nil +} + +type serviceConstructor func(*swarm.Service) +type nodeConstructor func(*swarm.Node) +type specConstructor func(*swarm.Spec) + +func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string { + var service swarm.Service + for _, fn := range f { + fn(&service) + } + status, out, err := d.SockRequest("POST", "/services/create", service.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.ServiceCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { + var service swarm.Service + status, out, err := d.SockRequest("GET", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &service), checker.IsNil) + return &service +} + +func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filterArgs.Add("service", service) + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + return tasks +} + +func (d *SwarmDaemon) checkServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.getServiceTasks(c, service) + var count int + for _, task := range tasks { + if task.Status.State == state { + if message == "" || strings.Contains(task.Status.Message, message) { + count++ + } + } + } + return count, nil + } +} + +func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return d.checkServiceTasksInState(service, swarm.TaskStateRunning, "") +} + +func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + service := d.getService(c, service) + return service.UpdateStatus.State, nil + } +} + +func (d *SwarmDaemon) checkServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.getServiceTasks(c, service) + return len(tasks), nil + } +} + +func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + if task.Status.State == swarm.TaskStateRunning { + result[task.Spec.ContainerSpec.Image]++ + } + } + return result, nil +} + +func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { + nodes := d.listNodes(c) + var readyCount int + for _, node := range nodes { + if node.Status.State == swarm.NodeStateReady { + readyCount++ + } + } + return readyCount, nil +} + +func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { + var task swarm.Task + + status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &task), checker.IsNil) + return task +} + +func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) { + for _, fn := range f { + fn(service) + } + url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) removeService(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { + var node swarm.Node + status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &node), checker.IsNil) + c.Assert(node.ID, checker.Equals, id) + return &node +} + +func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { + url := "/nodes/" + id + if force { + url += "?force=1" + } + + status, out, err := d.SockRequest("DELETE", url, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { + for i := 0; ; i++ { + node := d.getNode(c, id) + for _, fn := range f { + fn(node) + } + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d.SockRequest("POST", url, node.Spec) + if i < 10 && strings.Contains(string(out), "update out of sequence") { + time.Sleep(100 * time.Millisecond) + continue + } + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + return + } +} + +func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { + status, out, err := d.SockRequest("GET", "/nodes", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + nodes := []swarm.Node{} + c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) + return nodes +} + +func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { + status, out, err := d.SockRequest("GET", "/services", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + services := []swarm.Service{} + c.Assert(json.Unmarshal(out, &services), checker.IsNil) + return services +} + +func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string { + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.SecretCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { + status, out, err := d.SockRequest("GET", "/secrets", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + secrets := []swarm.Secret{} + c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) + return secrets +} + +func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { + var secret swarm.Secret + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &secret), checker.IsNil) + return &secret +} + +func (d *SwarmDaemon) deleteSecret(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw +} + +func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { + sw := d.getSwarm(c) + for _, fn := range f { + fn(&sw.Spec) + } + url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index) + status, out, err := d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) rotateTokens(c *check.C) { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + + url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index) + status, out, err = d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw.JoinTokens +} + +func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.info() + c.Assert(err, checker.IsNil) + return info.LocalNodeState, nil +} + +func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + return info.ControlAvailable, nil +} + +func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterface) { + errList := check.Commentf("could not get node list") + status, out, err := d.SockRequest("GET", "/nodes", nil) + if err != nil { + return err, errList + } + if status != http.StatusOK { + return fmt.Errorf("expected http status OK, got: %d", status), errList + } + + var ls []swarm.Node + if err := json.Unmarshal(out, &ls); err != nil { + return err, errList + } + + for _, node := range ls { + if node.ManagerStatus != nil && node.ManagerStatus.Leader { + return nil, nil + } + } + return fmt.Errorf("no leader"), check.Commentf("could not find leader") +} + +func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) { + for i := 0; ; i++ { + out, err := d.Cmd(args...) + if err != nil { + if strings.Contains(out, "update out of sequence") { + if i < 10 { + continue + } + } + } + return out, err + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack.go b/vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack.go new file mode 100644 index 0000000..0cea901 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack.go @@ -0,0 +1,20 @@ +package main + +import "github.com/go-check/check" + +func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + if d.NodeID == nodeID { + return d + } + } + c.Fatalf("could not find node with id: %s", nodeID) + return nil +} + +// nodeCmd executes a command on a given node via the normal docker socket +func (s *DockerSwarmSuite) nodeCmd(c *check.C, id string, args ...string) (string, error) { + return s.getDaemon(c, id).Cmd(args...) +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon_unix.go b/vendor/github.com/moby/moby/integration-cli/daemon_unix.go new file mode 100644 index 0000000..6ca7daf --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "os" + "path/filepath" + "syscall" + + "github.com/go-check/check" +) + +func cleanupExecRoot(c *check.C, execRoot string) { + // Cleanup network namespaces in the exec root of this + // daemon because this exec root is specific to this + // daemon instance and has no chance of getting + // cleaned up when a new daemon is instantiated with a + // new exec root. + netnsPath := filepath.Join(execRoot, "netns") + filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { + if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil { + c.Logf("unmount of %s failed: %v", path, err) + } + os.Remove(path) + return nil + }) +} + +func signalDaemonDump(pid int) { + syscall.Kill(pid, syscall.SIGQUIT) +} + +func signalDaemonReload(pid int) error { + return syscall.Kill(pid, syscall.SIGHUP) +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon_windows.go b/vendor/github.com/moby/moby/integration-cli/daemon_windows.go new file mode 100644 index 0000000..885b703 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon_windows.go @@ -0,0 +1,53 @@ +package main + +import ( + "fmt" + "strconv" + "syscall" + "unsafe" + + "github.com/go-check/check" + "golang.org/x/sys/windows" +) + +func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p2 uint32 + if inheritHandle { + _p2 = 1 + } + r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +func signalDaemonDump(pid int) { + modkernel32 := windows.NewLazySystemDLL("kernel32.dll") + procOpenEvent := modkernel32.NewProc("OpenEventW") + procPulseEvent := modkernel32.NewProc("PulseEvent") + + ev := "Global\\docker-daemon-" + strconv.Itoa(pid) + h2, _ := openEvent(0x0002, false, ev, procOpenEvent) + if h2 == 0 { + return + } + pulseEvent(h2, procPulseEvent) +} + +func signalDaemonReload(pid int) error { + return fmt.Errorf("daemon reload not supported") +} + +func cleanupExecRoot(c *check.C, execRoot string) { +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go new file mode 100644 index 0000000..d43bf3a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go @@ -0,0 +1,210 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stdcopy" + "github.com/go-check/check" + "golang.org/x/net/websocket" +) + +func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") + + rwc, err := sockConn(time.Duration(10*time.Second), "") + c.Assert(err, checker.IsNil) + + cleanedContainerID := strings.TrimSpace(out) + config, err := websocket.NewConfig( + "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", + "http://localhost", + ) + c.Assert(err, checker.IsNil) + + ws, err := websocket.NewClient(config, rwc) + c.Assert(err, checker.IsNil) + defer ws.Close() + + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := io.ReadFull(ws, actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := ws.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to ws") + } + + select { + case err := <-outChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from ws") + } + + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("Websocket didn't return the expected data")) +} + +// regression gh14320 +func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { + req, client, err := newRequestClient("POST", "/containers/doesnotexist/attach", nil, "", "") + c.Assert(err, checker.IsNil) + + resp, err := client.Do(req) + // connection will shutdown, err should be "persistent connection closed" + c.Assert(err, checker.NotNil) // Server shutdown connection + + body, err := readBody(resp.Body) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) + expected := "No such container: doesnotexist\r\n" + c.Assert(string(body), checker.Equals, expected) +} + +func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { + status, body, err := sockRequest("GET", "/containers/doesnotexist/attach/ws", nil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(err, checker.IsNil) + expected := "No such container: doesnotexist" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +func (s *DockerSuite) TestPostContainersAttach(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) { + defer conn.Close() + expected := []byte("success") + _, err := conn.Write(expected) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + lenHeader := 0 + if !tty { + lenHeader = 8 + } + actual := make([]byte, len(expected)+lenHeader) + _, err = io.ReadFull(br, actual) + c.Assert(err, checker.IsNil) + if !tty { + fdMap := map[string]byte{ + "stdin": 0, + "stdout": 1, + "stderr": 2, + } + c.Assert(actual[0], checker.Equals, fdMap[stream]) + } + c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream)) + } + + expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) { + defer conn.Close() + _, err := conn.Write([]byte{'t'}) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + actual := make([]byte, 1) + _, err = io.ReadFull(br, actual) + opErr, ok := err.(*net.OpError) + c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err)) + c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream)) + } + + // Create a container that only emits stdout. + cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cid = strings.TrimSpace(cid) + // Attach to the container's stdout stream. + conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Check if the data from stdout can be received. + expectSuccess(conn, br, "stdout", false) + // Attach to the container's stderr stream. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Since the container only emits stdout, attaching to stderr should return nothing. + expectTimeout(conn, br, "stdout") + + // Test the similar functions of the stderr stream. + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stderr", false) + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectTimeout(conn, br, "stderr") + + // Test with tty. + cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + // Attach to stdout only. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stdout", true) + + // Attach without stdout stream. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Nothing should be received because both the stdout and stderr of the container will be + // sent to the client as stdout when tty is enabled. + expectTimeout(conn, br, "stdout") + + // Test the client API + // Make sure we don't see "hello" if Logs is false + client, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "echo hello; cat") + cid = strings.TrimSpace(cid) + + attachOpts := types.ContainerAttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + } + + resp, err := client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + expectSuccess(resp.Conn, resp.Reader, "stdout", false) + + // Make sure we do see "hello" if Logs is true + attachOpts.Logs = true + resp, err = client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + + defer resp.Conn.Close() + resp.Conn.SetReadDeadline(time.Now().Add(time.Second)) + + _, err = resp.Conn.Write([]byte("success")) + c.Assert(err, checker.IsNil) + + actualStdout := new(bytes.Buffer) + actualStderr := new(bytes.Buffer) + stdcopy.StdCopy(actualStdout, actualStderr, resp.Reader) + c.Assert(actualStdout.Bytes(), checker.DeepEquals, []byte("hello\nsuccess"), check.Commentf("Attach didn't return the expected data from stdout")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go new file mode 100644 index 0000000..bfcae31 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Test case for #22244 +func (s *DockerSuite) TestAuthAPI(c *check.C) { + testRequires(c, Network) + config := types.AuthConfig{ + Username: "no-user", + Password: "no-password", + } + + expected := "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" + status, body, err := sockRequest("POST", "/auth", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusUnauthorized) + msg := getErrorMessage(c, body) + c.Assert(msg, checker.Contains, expected, check.Commentf("Expected: %v, got: %v", expected, msg)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go new file mode 100644 index 0000000..9b069a4 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go @@ -0,0 +1,254 @@ +package main + +import ( + "archive/tar" + "bytes" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { + testRequires(c, NotUserNamespace) + var testD string + if daemonPlatform == "windows" { + testD = `FROM busybox +COPY * /tmp/ +RUN find / -name ba* +RUN find /tmp/` + } else { + // -xdev is required because sysfs can cause EPERM + testD = `FROM busybox +COPY * /tmp/ +RUN find / -xdev -name ba* +RUN find /tmp/` + } + server, err := fakeStorage(map[string]string{"testD": testD}) + c.Assert(err, checker.IsNil) + defer server.Close() + + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + // Make sure Dockerfile exists. + // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL + out := string(buf) + c.Assert(out, checker.Contains, "/tmp/Dockerfile") + c.Assert(out, checker.Not(checker.Contains), "baz") +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte("FROM busybox") + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, checker.IsNil) + + defer server.Close() + + res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + b.Close() +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContextWithCustomDockerfile(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox +RUN echo 'wrong'`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + custom := []byte(`FROM busybox +RUN echo 'right' +`) + err = tw.WriteHeader(&tar.Header{ + Name: "custom", + Size: int64(len(custom)), + }) + + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(custom) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, checker.IsNil) + + defer server.Close() + url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" + res, body, err := sockRequestRaw("POST", url, nil, "application/tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + defer body.Close() + content, err := readBody(body) + c.Assert(err, checker.IsNil) + + // Build used the wrong dockerfile. + c.Assert(string(content), checker.Not(checker.Contains), "wrong") +} + +func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from dockerfile") +} + +func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "baz": `FROM busybox +RUN echo from baz`, + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from baz") +} + +func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) { + testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from Dockerfile") +} + +func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { + // Make sure that build context tars with entries of the form + // x/./y don't cause caching false positives. + + buildFromTarContext := func(fileContents []byte) string { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + COPY dir /dir/`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write Dockerfile in tar file content + c.Assert(err, checker.IsNil) + + err = tw.WriteHeader(&tar.Header{ + Name: "dir/./file", + Size: int64(len(fileContents)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(fileContents) + // failed to write file contents in tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := readBody(body) + c.Assert(err, checker.IsNil) + lines := strings.Split(string(out), "\n") + c.Assert(len(lines), checker.GreaterThan, 1) + c.Assert(lines[len(lines)-2], checker.Matches, ".*Successfully built [0-9a-f]{12}.*") + + re := regexp.MustCompile("Successfully built ([0-9a-f]{12})") + matches := re.FindStringSubmatch(lines[len(lines)-2]) + return matches[1] + } + + imageA := buildFromTarContext([]byte("abc")) + imageB := buildFromTarContext([]byte("def")) + + c.Assert(imageA, checker.Not(checker.Equals), imageB) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go new file mode 100644 index 0000000..d046ec0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go @@ -0,0 +1,1961 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) { + startCount, err := getContainerCount() + c.Assert(err, checker.IsNil, check.Commentf("Cannot query container count")) + + name := "getall" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var inspectJSON []struct { + Names []string + } + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) + + c.Assert(inspectJSON, checker.HasLen, startCount+1) + + actual := inspectJSON[0].Names[0] + c.Assert(actual, checker.Equals, "/"+name) +} + +// regression test for empty json field being omitted #13691 +func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { + dockerCmd(c, "run", "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + // empty Labels field triggered this bug, make sense to check for everything + // cause even Ports for instance can trigger this bug + // better safe than sorry.. + fields := []string{ + "Id", + "Names", + "Image", + "Command", + "Created", + "Ports", + "Labels", + "Status", + "NetworkSettings", + } + + // decoding into types.Container do not work since it eventually unmarshal + // and empty field to an empty go map, so we just check for a string + for _, f := range fields { + if !strings.Contains(string(body), f) { + c.Fatalf("Field %s is missing and it shouldn't", f) + } + } +} + +type containerPs struct { + Names []string + Ports []map[string]interface{} +} + +// regression test for non-empty fields from #13901 +func (s *DockerSuite) TestContainerAPIPsOmitFields(c *check.C) { + // Problematic for Windows porting due to networking not yet being passed back + testRequires(c, DaemonIsLinux) + name := "pstest" + port := 80 + runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port)) + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var resp []containerPs + err = json.Unmarshal(body, &resp) + c.Assert(err, checker.IsNil) + + var foundContainer *containerPs + for _, container := range resp { + for _, testName := range container.Names { + if "/"+name == testName { + foundContainer = &container + break + } + } + } + + c.Assert(foundContainer.Ports, checker.HasLen, 1) + c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) + _, ok := foundContainer.Ports[0]["PublicPort"] + c.Assert(ok, checker.Not(checker.Equals), true) + _, ok = foundContainer.Ports[0]["IP"] + c.Assert(ok, checker.Not(checker.Equals), true) +} + +func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) { + // Not supported on Windows as Windows does not support docker export + testRequires(c, DaemonIsLinux) + name := "exportcontainer" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") + + status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil && err == io.EOF { + break + } + if h.Name == "test" { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image")) +} + +func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) { + // Not supported on Windows as Windows does not support docker diff (/containers/name/changes) + testRequires(c, DaemonIsLinux) + name := "changescontainer" + dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") + + status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + changes := []struct { + Kind int + Path string + }{} + c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff")) +} + +func (s *DockerSuite) TestGetContainerStats(c *check.C) { + var ( + name = "statscontainer" + ) + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + var s *types.Stats + // decode only one object from the stream + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + + buf := &integration.ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + _, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json") + c.Assert(err, checker.IsNil) + defer body.Close() + + chErr := make(chan error, 1) + go func() { + _, err = io.Copy(buf, body) + chErr <- err + }() + + b := make([]byte, 32) + // make sure we've got some stats + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + // Now remove without `-f` and make sure we are still pulling stats + _, _, err = dockerCmdWithError("rm", id) + c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't")) + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", id) + c.Assert(<-chErr, checker.IsNil) +} + +// regression test for gh13421 +// previous test was just checking one stat entry so it didn't fail (stats with +// stream false always return one stat) +func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of "read" of types.Stats + if l := strings.Count(s, "read"); l < 2 { + c.Fatalf("Expected more than one stat streamed, got %d", l) + } + } +} + +func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of `"read"` of types.Stats + c.Assert(strings.Count(s, `"read"`), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, `"read"`))) + } +} + +func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { + name := "statscontainer" + dockerCmd(c, "create", "--name", name, "busybox", "ps") + + type stats struct { + status int + err error + } + chResp := make(chan stats) + + // We expect an immediate response, but if it's not immediate, the test would hang, so put it in a goroutine + // below we'll check this on a timeout. + go func() { + resp, body, err := sockRequestRaw("GET", "/containers/"+name+"/stats", nil, "") + body.Close() + chResp <- stats{resp.StatusCode, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.status, checker.Equals, http.StatusOK) + case <-time.After(10 * time.Second): + c.Fatal("timeout waiting for stats response for stopped container") + } +} + +func (s *DockerSuite) TestContainerAPIPause(c *check.C) { + // Problematic on Windows as Windows does not support pause + testRequires(c, DaemonIsLinux) + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") + ContainerID := strings.TrimSpace(out) + + status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) + + if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pausedContainers, err = getSliceOfPausedContainers() + c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) + c.Assert(pausedContainers, checker.IsNil, check.Commentf("There should be no paused container.")) +} + +func (s *DockerSuite) TestContainerAPITop(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { + c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) + } + c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)) + c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top") + c.Assert(top.Processes[1][10], checker.Equals, "top") +} + +func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := runSleepingContainer(c, "-d") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 4, check.Commentf("expected 4 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "Name" || top.Titles[3] != "Private Working Set" { + c.Fatalf("expected `Name` at `Titles[0]` and `Private Working Set` at Titles[3]: %v", top.Titles) + } + c.Assert(len(top.Processes), checker.GreaterOrEqualThan, 2, check.Commentf("expected at least 2 processes, found %d: %v", len(top.Processes), top.Processes)) + + foundProcess := false + expectedProcess := "busybox.exe" + for _, process := range top.Processes { + if process[0] == expectedProcess { + foundProcess = true + break + } + } + + c.Assert(foundProcess, checker.Equals, true, check.Commentf("expected to find %s: %v", expectedProcess, top.Processes)) +} + +func (s *DockerSuite) TestContainerAPICommit(c *check.C) { + cName := "testapicommit" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + name := "testcontainerapicommit" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) { + cName := "testapicommitwithconfig" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + config := map[string]interface{}{ + "Labels": map[string]string{"key1": "value1", "key2": "value2"}, + } + + name := "testcontainerapicommitwithconfig" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1") + c.Assert(label1, checker.Equals, "value1") + + label2 := inspectFieldMap(c, img.ID, "Config.Labels", "key2") + c.Assert(label2, checker.Equals, "value2") + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) { + // TODO Windows to Windows CI - Port this test + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "echo test"}, + "PortBindings": map[string]interface{}{ + "8080/tcp": []map[string]interface{}{ + { + "HostIP": "", + "HostPort": "aa80", + }, + }, + }, + } + + jsonData := bytes.NewBuffer(nil) + json.NewEncoder(jsonData).Encode(config) + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Equals, `invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body)) +} + +func (s *DockerSuite) TestContainerAPICreate(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + } + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + + out, _ := dockerCmd(c, "start", "-a", container.ID) + c.Assert(strings.TrimSpace(out), checker.Equals, "/test") +} + +func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) { + config := map[string]interface{}{} + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + expected := "Config cannot be empty in order to create a container" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) { + // Container creation must fail if client specified configurations for more than one network + config := map[string]interface{}{ + "Image": "busybox", + "NetworkingConfig": networktypes.NetworkingConfig{ + EndpointsConfig: map[string]*networktypes.EndpointSettings{ + "net1": {}, + "net2": {}, + "net3": {}, + }, + }, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + msg := getErrorMessage(c, body) + // network name order in error message is not deterministic + c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints") + c.Assert(msg, checker.Contains, "net1") + c.Assert(msg, checker.Contains, "net2") + c.Assert(msg, checker.Contains, "net3") +} + +func (s *DockerSuite) TestContainerAPICreateWithHostName(c *check.C) { + hostName := "test-host" + config := map[string]interface{}{ + "Image": "busybox", + "Hostname": hostName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) +} + +func (s *DockerSuite) TestContainerAPICreateWithDomainName(c *check.C) { + domainName := "test-domain" + config := map[string]interface{}{ + "Image": "busybox", + "Domainname": domainName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) +} + +func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *check.C) { + // Windows does not support bridge + testRequires(c, DaemonIsLinux) + UtilCreateNetworkMode(c, "bridge") +} + +func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *check.C) { + // Windows does not support these network modes + testRequires(c, DaemonIsLinux, NotUserNamespace) + UtilCreateNetworkMode(c, "host") + UtilCreateNetworkMode(c, "container:web1") +} + +func UtilCreateNetworkMode(c *check.C, networkMode string) { + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) +} + +func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) { + // TODO Windows to Windows CI. The CpuShares part could be ported. + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "CpuShares": 512, + "CpusetCpus": "0", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + + out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares") + c.Assert(out, checker.Equals, "512") + + outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus") + c.Assert(outCpuset, checker.Equals, "0") +} + +func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + } + + create := func(ct string) (*http.Response, io.ReadCloser, error) { + jsonData := bytes.NewBuffer(nil) + c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) + return sockRequestRaw("POST", "/containers/create", jsonData, ct) + } + + // Try with no content-type + res, body, err := create("") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // Try with wrong content-type + res, body, err = create("application/xml") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // now application/json + res, body, err = create("application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + body.Close() +} + +//Issue 14230. daemon should return 500 for invalid port syntax +func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "NetworkMode": "default", + "PortBindings": { + "19039;1230": [ + {} + ] + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid port") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "something", + "MaximumRetryCount": 0 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "always", + "MaximumRetryCount": 2 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": -2 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": 0 + } + } + }` + + res, _, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { + config := `{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"busybox", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + out := inspectField(c, container.ID, "HostConfig.CpusetCpus") + c.Assert(out, checker.Equals, "") + + outMemory := inspectField(c, container.ID, "HostConfig.Memory") + c.Assert(outMemory, checker.Equals, "0") + outMemorySwap := inspectField(c, container.ID, "HostConfig.MemorySwap") + c.Assert(outMemorySwap, checker.Equals, "0") +} + +func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + config := `{ + "Image": "busybox", + "Cmd": "ls", + "OpenStdin": true, + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + b, err2 := readBody(body) + c.Assert(err2, checker.IsNil) + + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +func (s *DockerSuite) TestContainerAPIRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "TestContainerAPIRename", "-d", "busybox", "sh") + + containerID := strings.TrimSpace(out) + newName := "TestContainerAPIRenameNew" + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + + name := inspectField(c, containerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) +} + +func (s *DockerSuite) TestContainerAPIKill(c *check.C) { + name := "test-api-kill" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + state := inspectField(c, name, "State.Running") + c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) +} + +func (s *DockerSuite) TestContainerAPIRestart(c *check.C) { + name := "test-api-restart" + runSleepingContainer(c, "-di", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) { + name := "test-api-restart-no-timeout-param" + out, _ := runSleepingContainer(c, "-di", "--name", name) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIStart(c *check.C) { + name := "testing-start" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + + // TODO(tibor): figure out why this doesn't work on windows + if isLocalDaemon { + c.Assert(status, checker.Equals, http.StatusNotModified) + } +} + +func (s *DockerSuite) TestContainerAPIStop(c *check.C) { + name := "test-api-stop" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotModified) +} + +func (s *DockerSuite) TestContainerAPIWait(c *check.C) { + name := "test-api-wait" + + sleepCmd := "/bin/sleep" + if daemonPlatform == "windows" { + sleepCmd = "sleep" + } + dockerCmd(c, "run", "--name", name, "busybox", sleepCmd, "2") + + status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + var waitres containertypes.ContainerWaitOKBody + c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) + c.Assert(waitres.StatusCode, checker.Equals, int64(0)) +} + +func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) { + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, _, err := sockRequest("POST", "/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if h.Name == "test.txt" { + found = true + break + } + } + c.Assert(found, checker.True) +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-empty" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Path cannot be empty\n") +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-not-found" + dockerCmd(c, "run", "--name", name, "busybox") + + postData := types.CopyConfig{ + Resource: "/notexist", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") +} + +func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + postData := types.CopyConfig{ + Resource: "/something", + } + + status, _, err := sockRequest("POST", "/v1.23/containers/notexists/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPIDelete(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + dockerCmd(c, "stop", id) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) { + status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(getErrorMessage(c, body), checker.Matches, "No such container: doesnotexist") +} + +func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") + + id2 := strings.TrimSpace(out) + c.Assert(waitRun(id2), checker.IsNil) + + links := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) + + status, b, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) + + linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) +} + +func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + vol := "/testvolume" + if daemonPlatform == "windows" { + vol = `c:\testvolume` + } + + out, _ := runSleepingContainer(c, "-v", vol) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + source, err := inspectMountSourceField(id, vol) + _, err = os.Stat(source) + c.Assert(err, checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + _, err = os.Stat(source) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) +} + +// Regression test for https://github.com/docker/docker/issues/6231 +func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) { + conn, err := sockConn(time.Duration(10*time.Second), "") + c.Assert(err, checker.IsNil) + client := httputil.NewClientConn(conn, nil) + defer client.Close() + + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + b, err := json.Marshal(config) + c.Assert(err, checker.IsNil) + + req, err := http.NewRequest("POST", "/containers/create", bytes.NewBuffer(b)) + c.Assert(err, checker.IsNil) + req.Header.Set("Content-Type", "application/json") + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + + resp, err := client.Do(req) + c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding")) + resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) +} + +func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) { + out, _ := runSleepingContainer(c) + + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) +} + +// #14170 +func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd []string + }{"busybox", "echo", []string{"hello", "world"}} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Entrypoint []string + Cmd []string + }{"busybox", []string{"echo"}, []string{"hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// #14170 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd string + }{"busybox", "echo", "hello world"} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Cmd []string + }{"busybox", []string{"echo", "hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// regression #14318 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { + // Windows doesn't support CapAdd/CapDrop + testRequires(c, DaemonIsLinux) + config := struct { + Image string + CapAdd string + CapDrop string + }{"busybox", "NET_ADMIN", "SYS_ADMIN"} + status, _, err := sockRequest("POST", "/containers/create?name=capaddtest0", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config2 := struct { + Image string + CapAdd []string + CapDrop []string + }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} + status, _, err = sockRequest("POST", "/containers/create?name=capaddtest1", config2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// #14915 +func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only support 1.25 or later + config := struct { + Image string + }{"busybox"} + status, _, err := sockRequest("POST", "/v1.18/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// Ensure an error occurs when you have a container read-only rootfs but you +// extract an archive to a symlink in a writable volume which points to a +// directory outside of the volume. +func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { + // Windows does not support read-only rootfs + // Requires local volume mount bind. + // --read-only + userns has remount issues + testRequires(c, SameHostDaemon, NotUserNamespace, DaemonIsLinux) + + testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + cID := makeTestContainer(c, testContainerOptions{ + readOnly: true, + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + defer deleteContainer(cID) + + // Attempt to extract to a symlink in the volume which points to a + // directory outside the volume. This should cause an error because the + // rootfs is read-only. + query := make(url.Values, 1) + query.Set("path", "/vol2/symlinkToAbsDir") + urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) + + statusCode, body, err := sockRequest("PUT", urlPath, nil) + c.Assert(err, checker.IsNil) + + if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { + c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) + } +} + +func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) { + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(string(body), checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { + // Not supported on Windows + testRequires(c, DaemonIsLinux) + + c1 := struct { + Image string + CpusetCpus string + }{"busybox", "1-42,,"} + name := "wrong-cpuset-cpus" + status, body, err := sockRequest("POST", "/containers/create?name="+name, c1) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected := "Invalid value 1-42,, for cpuset cpus" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + c2 := struct { + Image string + CpusetMems string + }{"busybox", "42-3,1--"} + name = "wrong-cpuset-mems" + status, body, err = sockRequest("POST", "/containers/create?name="+name, c2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected = "Invalid value 42-3,1-- for cpuset mems" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"ShmSize": -1}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Contains, "SHM size can not be less than 0") +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + var defaultSHMSize int64 = 67108864 + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{}, + "Cmd": "mount", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { + // Swappiness is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) +} + +// check validation is done daemon side and not only in cli +func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) { + // OomScoreAdj is not supported on Windows + testRequires(c, DaemonIsLinux) + + config := struct { + Image string + OomScoreAdj int + }{"busybox", 1001} + name := "oomscoreadj-over" + status, b, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]" + msg := getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } + + config = struct { + Image string + OomScoreAdj int + }{"busybox", -1001} + name = "oomscoreadj-low" + status, b, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]" + msg = getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } +} + +// test case for #22210 where an empty container name caused panic. +func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) { + status, out, err := sockRequest("DELETE", "/containers/", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(out), checker.Contains, "No container name or ID supplied") +} + +func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { + // Problematic on Windows as Windows does not support stats + testRequires(c, DaemonIsLinux) + + name := "testing-network-disabled" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "NetworkDisabled": true, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + c.Assert(waitRun(name), check.IsNil) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + // decode only one object from the stream + var s *types.Stats + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { + type m mounttypes.Mount + type hc struct{ Mounts []m } + type cfg struct { + Image string + HostConfig hc + } + type testCase struct { + config cfg + status int + msg string + } + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + notExistPath := prefix + slash + "notexist" + + cases := []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "notreal", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "mount type unknown", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: notExistPath, + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "bind source path does not exist", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello2", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local"}}}}}}, + status: http.StatusCreated, + msg: "", + }, + } + + if SameHostDaemon.Condition() { + tmpDir, err := ioutils.TempDir("", "test-mounts-api") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{}}}}}, + status: http.StatusBadRequest, + msg: "VolumeOptions must not be specified", + }, + }...) + } + + if DaemonIsLinux.Condition() { + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello3", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local", + Options: map[string]string{"o": "size=1"}}}}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath, + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 4096 * 1024, + Mode: 0700, + }}}}}, + status: http.StatusCreated, + msg: "", + }, + + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Source: "/shouldnotbespecified", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be specified", + }, + }...) + + } + + for i, x := range cases { + c.Logf("case %d", i) + status, b, err := sockRequest("POST", "/containers/create", x.config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, x.status, check.Commentf("%s\n%v", string(b), cases[i].config)) + if len(x.msg) > 0 { + c.Assert(string(b), checker.Contains, x.msg, check.Commentf("%v", cases[i].config)) + } + } +} + +func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) { + testRequires(c, NotUserNamespace, SameHostDaemon) + // also with data in the host side + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + tmpDir, err := ioutil.TempDir("", "test-mounts-api-bind") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("hello"), 666) + c.Assert(err, checker.IsNil) + + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "cat /foo/bar"}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{{"Type": "bind", "Source": tmpDir, "Target": destPath}}}, + } + status, resp, err := sockRequest("POST", "/containers/create?name=test", data) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + + out, _ := dockerCmd(c, "start", "-a", "test") + c.Assert(out, checker.Equals, "hello") +} + +// Test Mounts comes out as expected for the MountPoint +func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + + var ( + err error + testImg string + ) + if daemonPlatform != "windows" { + testImg, err = buildImage("test-mount-config", ` + FROM busybox + RUN mkdir `+destPath+` && touch `+destPath+slash+`bar + CMD cat `+destPath+slash+`bar + `, true) + } else { + testImg = "busybox" + } + c.Assert(err, checker.IsNil) + + type testCase struct { + cfg mounttypes.Mount + expected types.MountPoint + } + + cases := []testCase{ + // use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest + // Validation of the actual `Mount` struct is done in another test is not needed here + {mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}}, + } + + if SameHostDaemon.Condition() { + // setup temp dir for testing binds + tmpDir1, err := ioutil.TempDir("", "test-mounts-api-1") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir1) + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}}, + }...) + + // for modes only supported on Linux + if DaemonIsLinux.Condition() { + tmpDir3, err := ioutils.TempDir("", "test-mounts-api-3") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir3) + + c.Assert(mount.Mount(tmpDir3, tmpDir3, "none", "bind,rw"), checker.IsNil) + c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil) + + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}}, + }...) + } + } + + if daemonPlatform != "windows" { // Windows does not support volume populate + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}}, + }...) + } + + type wrapper struct { + containertypes.Config + HostConfig containertypes.HostConfig + } + type createResp struct { + ID string `json:"Id"` + } + for i, x := range cases { + c.Logf("case %d - config: %v", i, x.cfg) + status, data, err := sockRequest("POST", "/containers/create", wrapper{containertypes.Config{Image: testImg}, containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}}) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(data))) + + var resp createResp + err = json.Unmarshal(data, &resp) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + id := resp.ID + + var mps []types.MountPoint + err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps) + c.Assert(err, checker.IsNil) + c.Assert(mps, checker.HasLen, 1) + c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination) + + if len(x.expected.Source) > 0 { + c.Assert(mps[0].Source, checker.Equals, x.expected.Source) + } + if len(x.expected.Name) > 0 { + c.Assert(mps[0].Name, checker.Equals, x.expected.Name) + } + if len(x.expected.Driver) > 0 { + c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver) + } + c.Assert(mps[0].RW, checker.Equals, x.expected.RW) + c.Assert(mps[0].Type, checker.Equals, x.expected.Type) + c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode) + if len(x.expected.Propagation) > 0 { + c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation) + } + + out, _, err := dockerCmdWithError("start", "-a", id) + if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && daemonPlatform != "windows" { + c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0])) + } else { + c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0])) + } + + dockerCmd(c, "rm", "-fv", id) + if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 { + // This should still exist even though we removed the container + dockerCmd(c, "volume", "inspect", mps[0].Name) + } else { + // This should be removed automatically when we removed the container + out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + } + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { + testRequires(c, DaemonIsLinux) + type testCase struct { + cfg map[string]interface{} + expectedOptions []string + } + target := "/foo" + cases := []testCase{ + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + }, + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target, + "TmpfsOptions": map[string]interface{}{ + "SizeBytes": 4096 * 1024, "Mode": 0700}}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k", "mode=700"}, + }, + } + + for i, x := range cases { + cName := fmt.Sprintf("test-tmpfs-%d", i) + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", + fmt.Sprintf("mount | grep 'tmpfs on %s'", target)}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{x.cfg}}, + } + status, resp, err := sockRequest("POST", "/containers/create?name="+cName, data) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + out, _ := dockerCmd(c, "start", "-a", cName) + for _, option := range x.expectedOptions { + c.Assert(out, checker.Contains, option) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go new file mode 100644 index 0000000..41011c3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPICreateWithNotExistImage(c *check.C) { + name := "test" + config := map[string]interface{}{ + "Image": "test456:v1", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected := "No such image: test456:v1" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + config2 := map[string]interface{}{ + "Image": "test456", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err = sockRequest("POST", "/containers/create?name="+name, config2) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: test456:latest" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + config3 := map[string]interface{}{ + "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + } + + status, body, err = sockRequest("POST", "/containers/create?name="+name, config3) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + +} + +// Test for #25099 +func (s *DockerSuite) TestAPICreateEmptyEnv(c *check.C) { + name := "test1" + config := map[string]interface{}{ + "Image": "busybox", + "Env": []string{"", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + + status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected := "invalid environment variable:" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=foo", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =foo" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go new file mode 100644 index 0000000..3891c87 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsAPIEmptyOutput(c *check.C) { + type apiResp struct { + resp *http.Response + err error + } + chResp := make(chan *apiResp) + go func() { + resp, body, err := sockRequestRaw("GET", "/events", nil, "") + body.Close() + chResp <- &apiResp{resp, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for events api to respond, should have responded immediately") + } +} + +func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { + since := daemonTime(c).Unix() + ts := strconv.FormatInt(since, 10) + + out, _ := runSleepingContainer(c, "--name=foo", "-d") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + q := url.Values{} + q.Set("since", ts) + + _, body, err := sockRequestRaw("GET", "/events?"+q.Encode(), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + + dec := json.NewDecoder(body) + var containerCreateEvent *jsonmessage.JSONMessage + for { + var event jsonmessage.JSONMessage + if err := dec.Decode(&event); err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if event.Status == "create" && event.ID == containerID { + containerCreateEvent = &event + break + } + } + + c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) + c.Assert(containerCreateEvent.Status, checker.Equals, "create") + c.Assert(containerCreateEvent.ID, checker.Equals, containerID) + c.Assert(containerCreateEvent.From, checker.Equals, "busybox") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go new file mode 100644 index 0000000..cf4dded --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) +} + +// Part of #14845 +func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { + name := "exec_resize_test" + dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh") + + testExecResize := func() error { + data := map[string]interface{}{ + "AttachStdin": true, + "Cmd": []string{"/bin/sh"}, + } + uri := fmt.Sprintf("/containers/%s/exec", name) + status, body, err := sockRequest("POST", uri, data) + if err != nil { + return err + } + if status != http.StatusCreated { + return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) + } + + out := map[string]string{} + err = json.Unmarshal(body, &out) + if err != nil { + return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) + } + + execID := out["Id"] + if len(execID) < 1 { + return fmt.Errorf("ExecCreate got invalid execID") + } + + payload := bytes.NewBufferString(`{"Tty":true}`) + conn, _, err := sockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json") + if err != nil { + return fmt.Errorf("Failed to start the exec: %q", err.Error()) + } + defer conn.Close() + + _, rc, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), nil, "text/plain") + // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. + if err == io.ErrUnexpectedEOF { + return fmt.Errorf("The daemon might have crashed.") + } + + if err == nil { + rc.Close() + } + + // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. + return nil + } + + // The panic happens when daemon.ContainerExecStart is called but the + // container.Exec is not called. + // Because the panic is not 100% reproducible, we send the requests concurrently + // to increase the probability that the problem is triggered. + var ( + n = 10 + ch = make(chan error, n) + wg sync.WaitGroup + ) + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := testExecResize(); err != nil { + ch <- err + } + }() + } + + wg.Wait() + select { + case err := <-ch: + c.Fatal(err.Error()) + default: + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go new file mode 100644 index 0000000..716e9ac --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go @@ -0,0 +1,198 @@ +// +build !test_no_exec + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Regression test for #9414 +func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + comment := check.Commentf("Expected message when creating exec command with no Cmd specified") + c.Assert(getErrorMessage(c, body), checker.Contains, "No exec command specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { + c.Fatalf("Can not encode data to json %s", err) + } + + res, body, err := sockRequestRaw("POST", fmt.Sprintf("/containers/%s/exec", name), jsonData, "text/plain") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + + comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") + c.Assert(getErrorMessage(c, b), checker.Contains, "Content-Type specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) { + // Not relevant on Windows as Windows containers cannot be paused + testRequires(c, DaemonIsLinux) + name := "exec_create_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + dockerCmd(c, "pause", name) + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + comment := check.Commentf("Expected message when creating exec command with Container %s is paused", name) + c.Assert(getErrorMessage(c, body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) +} + +func (s *DockerSuite) TestExecAPIStart(c *check.C) { + testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvagable to Windows to Windows CI + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + startExec(c, id, http.StatusOK) + + var execJSON struct{ PID int } + inspectExec(c, id, &execJSON) + c.Assert(execJSON.PID, checker.GreaterThan, 1) + + id = createExec(c, "test") + dockerCmd(c, "stop", "test") + + startExec(c, id, http.StatusNotFound) + + dockerCmd(c, "start", "test") + startExec(c, id, http.StatusNotFound) + + // make sure exec is created before pausing + id = createExec(c, "test") + dockerCmd(c, "pause", "test") + startExec(c, id, http.StatusConflict) + dockerCmd(c, "unpause", "test") + startExec(c, id, http.StatusOK) +} + +func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + resp, _, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Server"), checker.Not(checker.Equals), "") +} + +func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "-d", "--name", "test") + id := createExec(c, "test") + + resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") + c.Assert(err, checker.IsNil) + + b, err := readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) +} + +// #19362 +func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { + runSleepingContainer(c, "-d", "--name", "test") + execID := createExec(c, "test") + startExec(c, execID, http.StatusOK) + + timeout := time.After(60 * time.Second) + var execJSON struct{ Running bool } + for { + select { + case <-timeout: + c.Fatal("timeout waiting for exec to start") + default: + } + + inspectExec(c, execID, &execJSON) + if !execJSON.Running { + break + } + } + + startExec(c, execID, http.StatusConflict) +} + +// #20638 +func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { + name := "foo" + runSleepingContainer(c, "-d", "-t", "--name", name) + data := map[string]interface{}{ + "cmd": []string{"true"}, + "AttachStdin": true, + } + _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + + _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + + b, err = readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + + resp, _, err := sockRequestRaw("GET", "/_ping", nil, "") + c.Assert(err, checker.IsNil) + if resp.StatusCode != http.StatusOK { + c.Fatal("daemon is down, it should alive") + } +} + +func createExec(c *check.C, name string) string { + _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + return createResp.ID +} + +func startExec(c *check.C, id string, code int) { + resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + + b, err := readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, code, comment) +} + +func inspectExec(c *check.C, id string, out interface{}) { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/exec/%s/json", id), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go new file mode 100644 index 0000000..b7617ea --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go @@ -0,0 +1,165 @@ +package main + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIImagesFilter(c *check.C) { + name := "utest:tag1" + name2 := "utest/docker:tag2" + name3 := "utest:5000/docker:tag3" + for _, n := range []string{name, name2, name3} { + dockerCmd(c, "tag", "busybox", n) + } + type image types.ImageSummary + getImages := func(filter string) []image { + v := url.Values{} + v.Set("filter", filter) + status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var images []image + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + + return images + } + + //incorrect number of matches returned + images := getImages("utest*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 2) + + images = getImages("utest") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("utest*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("*5000*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) +} + +func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) { + // TODO Windows to Windows CI: Investigate further why this test fails. + testRequires(c, Network) + testRequires(c, DaemonIsLinux) + out, err := buildImage("saveandload", "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + dockerCmd(c, "rmi", id) + + res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") + c.Assert(err, checker.IsNil) + defer loadBody.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + inspectOut := inspectField(c, id, "Id") + c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) +} + +func (s *DockerSuite) TestAPIImagesDelete(c *check.C) { + if daemonPlatform != "windows" { + testRequires(c, Network) + } + name := "test-api-images-delete" + out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + dockerCmd(c, "tag", name, "test:tag1") + + status, _, err := sockRequest("DELETE", "/images/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + status, _, err = sockRequest("DELETE", "/images/test:noexist", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image + + status, _, err = sockRequest("DELETE", "/images/test:tag1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { + if daemonPlatform != "windows" { + testRequires(c, Network) + } + name := "test-api-images-history" + out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + + id := strings.TrimSpace(out) + + status, body, err := sockRequest("GET", "/images/"+id+"/history", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var historydata []types.ImageHistory + err = json.Unmarshal(body, &historydata) + c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) + + c.Assert(historydata, checker.Not(checker.HasLen), 0) + c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") +} + +// #14846 +func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) { + testRequires(c, Network) + + res, b, err := sockRequestRaw("GET", "/images/search?term=test", nil, "application/json") + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") +} + +// Test case for 30027: image size reported as -1 in v1.12 client against v1.13 daemon. +// This test checks to make sure both v1.12 and v1.13 client against v1.13 daemon get correct `Size` after the fix. +func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) { + status, b, err := sockRequest("GET", "/images/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var images []types.ImageSummary + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + c.Assert(len(images), checker.Not(checker.Equals), 0) + for _, image := range images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } + + type v124Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string + } + status, b, err = sockRequest("GET", "/v1.24/images/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var v124Images []v124Image + err = json.Unmarshal(b, &v124Images) + c.Assert(err, checker.IsNil) + c.Assert(len(v124Images), checker.Not(checker.Equals), 0) + for _, image := range v124Images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go new file mode 100644 index 0000000..1556099 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoAPI(c *check.C) { + endpoint := "/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "ContainersRunning", + "ContainersPaused", + "ContainersStopped", + "Images", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "OSType", + "Architecture", + "MemTotal", + "KernelVersion", + "Driver", + "ServerVersion", + "SecurityOptions"} + + out := string(body) + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix) + } +} + +func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + endpoint := "/v1.20/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + out := string(body) + c.Assert(out, checker.Contains, "ExecutionDriver") + c.Assert(out, checker.Contains, "not supported") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go new file mode 100644 index 0000000..546b224 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go @@ -0,0 +1,183 @@ +package main + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", + "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "MountLabel", "ProcessLabel", "GraphDriver"} + + type acase struct { + version string + keys []string + } + + var cases []acase + + if daemonPlatform == "windows" { + cases = []acase{ + {"v1.25", append(keysBase, "Mounts")}, + } + + } else { + cases = []acase{ + {"v1.20", append(keysBase, "Mounts")}, + {"v1.19", append(keysBase, "Volumes", "VolumesRW")}, + } + } + + for _, cs := range cases { + body := getInspectBody(c, cs.version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", cs.version)) + + for _, key := range cs.keys { + _, ok := inspectJSON[key] + c.Check(ok, checker.True, check.Commentf("%s does not exist in response for version %s", key, cs.version)) + } + + //Issue #6830: type not properly converted to JSON/back + _, ok := inspectJSON["Path"].(bool) + c.Assert(ok, checker.False, check.Commentf("Path of `true` should not be converted to boolean `true` via JSON marshalling")) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *check.C) { + // No legacy implications for Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version %s expected to include VolumeDriver in 'Config'", version)) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--volume-driver", "local", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + body := getInspectBody(c, "v1.25", cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version 1.25")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.False, check.Commentf("API version 1.25 expected to not include VolumeDriver in 'Config'")) + + config, ok = inspectJSON["HostConfig"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'HostConfig'")) + cfg = config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version 1.25 expected to include VolumeDriver in 'HostConfig'")) +} + +func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") + + endpoint := "/images/busybox/json" + status, body, err := sockRequest("GET", endpoint, nil) + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var imageJSON types.ImageInspect + err = json.Unmarshal(body, &imageJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) + c.Assert(imageJSON.RepoTags, checker.HasLen, 2) + + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) +} + +// #17131, #17139, #17173 +func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *check.C) { + // Not relevant on Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + for _, f := range []string{"MacAddress", "NetworkDisabled", "ExposedPorts"} { + _, ok := cfg[f] + c.Check(ok, checker.True, check.Commentf("API version %s expected to include %s in 'Config'", version, f)) + } + } +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *check.C) { + // Not relevant on Windows, and besides it doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.20", containerID) + + var inspectJSON v1p20.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings121(c *check.C) { + // Windows doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.21", containerID) + + var inspectJSON types.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) + c.Assert(settings.Networks["bridge"], checker.Not(checker.IsNil)) + c.Assert(settings.IPAddress, checker.Equals, settings.Networks["bridge"].IPAddress) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go new file mode 100644 index 0000000..f49a139 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// #16665 +func (s *DockerSuite) TestInspectAPICpusetInConfigPre120(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cgroupCpuset) + + name := "cpusetinconfig-pre120" + dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") + + status, body, err := sockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["Cpuset"] + c.Assert(ok, checker.True, check.Commentf("API version 1.19 expected to include Cpuset in 'Config'")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go new file mode 100644 index 0000000..2e8ffa9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go @@ -0,0 +1,87 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + type logOut struct { + out string + res *http.Response + err error + } + chLog := make(chan logOut) + + go func() { + res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") + if err != nil { + chLog <- logOut{"", nil, err} + return + } + defer body.Close() + out, err := bufio.NewReader(body).ReadString('\n') + if err != nil { + chLog <- logOut{"", nil, err} + return + } + chLog <- logOut{strings.TrimSpace(out), res, err} + }() + + select { + case l := <-chLog: + c.Assert(l.err, checker.IsNil) + c.Assert(l.res.StatusCode, checker.Equals, http.StatusOK) + if !strings.HasSuffix(l.out, "hello") { + c.Fatalf("expected log output to container 'hello', but it does not") + } + case <-time.After(20 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } +} + +func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) { + name := "logs_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(err, checker.IsNil) + + expected := "Bad parameters: you must choose at least one stream" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +// Regression test for #12704 +func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) { + name := "logs_test" + t0 := time.Now() + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + t1 := time.Now() + c.Assert(err, checker.IsNil) + body.Close() + elapsed := t1.Sub(t0).Seconds() + if elapsed > 20.0 { + c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) + } +} + +func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) { + name := "nonExistentContainer" + resp, _, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go new file mode 100644 index 0000000..1cc66f0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go @@ -0,0 +1,353 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPINetworkGetDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + // By default docker daemon creates 3 networks. check if they are present + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + c.Assert(isNetworkAvailable(c, nn), checker.Equals, true) + } +} + +func (s *DockerSuite) TestAPINetworkCreateDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create a network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + id := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // delete the network and make sure it is deleted + deleteNetwork(c, id, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testcheckduplicate" + configOnCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + configNotCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + // Creating a new network first + createNetwork(c, configOnCheck, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // Creating another network with same name and CheckDuplicate must fail + createNetwork(c, configOnCheck, false) + + // Creating another network with same name and not CheckDuplicate must succeed + createNetwork(c, configNotCheck, true) +} + +func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { + testRequires(c, DaemonIsLinux) + nr := getNetworkResource(c, getNetworkIDByName(c, "bridge")) + c.Assert(nr.Name, checker.Equals, "bridge") +} + +func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Inspect default bridge network + nr := getNetworkResource(c, "bridge") + c.Assert(nr.Name, checker.Equals, "bridge") + + // run a container and attach it to the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + containerIP := findContainerIP(c, "test", "bridge") + + // inspect default bridge network again and make sure the container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + c.Assert(ip.String(), checker.Equals, containerIP) + + // IPAM configuration inspect + ipam := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "172.28.0.0/16", IPRange: "172.28.5.0/24", Gateway: "172.28.5.254"}}, + } + config := types.NetworkCreateRequest{ + Name: "br0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam, + Options: map[string]string{"foo": "bar", "opts": "dopts"}, + }, + } + id0 := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) + + nr = getNetworkResource(c, id0) + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Options["foo"], checker.Equals, "bar") + c.Assert(nr.Options["opts"], checker.Equals, "dopts") + + // delete the network and make sure it is deleted + deleteNetwork(c, id0, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create test network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + } + id := createNetwork(c, config, true) + nr := getNetworkResource(c, id) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(nr.ID, checker.Equals, id) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + + // connect the container to the test network + connectNetwork(c, nr.ID, containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + containerIP := findContainerIP(c, "test", "testnetwork") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + disconnectNetwork(c, nr.ID, containerID) + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // delete the network + deleteNetwork(c, nr.ID, true) +} + +func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + // test0 bridge network + ipam0 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.0.0/16", IPRange: "192.178.128.0/17", Gateway: "192.178.138.100"}}, + } + config0 := types.NetworkCreateRequest{ + Name: "test0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam0, + }, + } + id0 := createNetwork(c, config0, true) + c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) + + ipam1 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.128.0/17", Gateway: "192.178.128.1"}}, + } + // test1 bridge network overlaps with test0 + config1 := types.NetworkCreateRequest{ + Name: "test1", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam1, + }, + } + createNetwork(c, config1, false) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) + + ipam2 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.169.0.0/16", Gateway: "192.169.100.100"}}, + } + // test2 bridge network does not overlap + config2 := types.NetworkCreateRequest{ + Name: "test2", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam2, + }, + } + createNetwork(c, config2, true) + c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) + + // remove test0 and retry to create test1 + deleteNetwork(c, id0, true) + createNetwork(c, config1, true) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true) + c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true) + c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true) + c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) + + for i := 1; i < 6; i++ { + deleteNetwork(c, fmt.Sprintf("test%d", i), true) + } +} + +func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + createDeletePredefinedNetwork(c, "bridge") + createDeletePredefinedNetwork(c, "none") + createDeletePredefinedNetwork(c, "host") +} + +func createDeletePredefinedNetwork(c *check.C, name string) { + // Create pre-defined network + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + shouldSucceed := false + createNetwork(c, config, shouldSucceed) + deleteNetwork(c, name, shouldSucceed) +} + +func isNetworkAvailable(c *check.C, name string) bool { + status, body, err := sockRequest("GET", "/networks", nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.Unmarshal(body, &nJSON) + c.Assert(err, checker.IsNil) + + for _, n := range nJSON { + if n.Name == name { + return true + } + } + return false +} + +func getNetworkIDByName(c *check.C, name string) string { + var ( + v = url.Values{} + filterArgs = filters.NewArgs() + ) + filterArgs.Add("name", name) + filterJSON, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + v.Set("filters", filterJSON) + + status, body, err := sockRequest("GET", "/networks?"+v.Encode(), nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.Unmarshal(body, &nJSON) + c.Assert(err, checker.IsNil) + c.Assert(len(nJSON), checker.Equals, 1) + + return nJSON[0].ID +} + +func getNetworkResource(c *check.C, id string) *types.NetworkResource { + _, obj, err := sockRequest("GET", "/networks/"+id, nil) + c.Assert(err, checker.IsNil) + + nr := types.NetworkResource{} + err = json.Unmarshal(obj, &nr) + c.Assert(err, checker.IsNil) + + return &nr +} + +func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string { + status, resp, err := sockRequest("POST", "/networks/create", config) + if !shouldSucceed { + c.Assert(status, checker.Not(checker.Equals), http.StatusCreated) + return "" + } + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var nr types.NetworkCreateResponse + err = json.Unmarshal(resp, &nr) + c.Assert(err, checker.IsNil) + + return nr.ID +} + +func connectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + status, _, err := sockRequest("POST", "/networks/"+nid+"/connect", config) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func disconnectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + status, _, err := sockRequest("POST", "/networks/"+nid+"/disconnect", config) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func deleteNetwork(c *check.C, id string, shouldSucceed bool) { + status, _, err := sockRequest("DELETE", "/networks/"+id, nil) + if !shouldSucceed { + c.Assert(status, checker.Not(checker.Equals), http.StatusOK) + return + } + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go new file mode 100644 index 0000000..daf1b05 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIHeightWidthNoInt(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIResponseWhenContainerNotStarted(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", cleanedContainerID) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, body, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + c.Assert(getErrorMessage(c, body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_service_update_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_service_update_test.go new file mode 100644 index 0000000..15a21e5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_service_update_test.go @@ -0,0 +1,39 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor { + return func(s *swarm.Service) { + if s.Spec.EndpointSpec == nil { + s.Spec.EndpointSpec = &swarm.EndpointSpec{} + } + s.Spec.EndpointSpec.Ports = portConfig + } +} + +func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service with a port mapping of 8080:8081. + portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} + serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} + remoteService := d.getService(c, serviceID) + d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) + + // Inspect the service and verify port mapping. + updatedService := d.getService(c, serviceID) + c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) + c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go new file mode 100644 index 0000000..23fbdbb --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go @@ -0,0 +1,310 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") + +func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + var cpuPercent = 0.0 + + if daemonPlatform != "windows" { + cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) + systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } else { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + cpuPercent = float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + } + + c.Assert(cpuPercent, check.Not(checker.Equals), 0.0, check.Commentf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent)) +} + +func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") + id := strings.TrimSpace(out) + + getGoRoutines := func() int { + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/info"), nil, "") + c.Assert(err, checker.IsNil) + info := types.Info{} + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + return info.NGoroutines + } + + // When the HTTP connection is closed, the number of goroutines should not increase. + routines := getGoRoutines() + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats", id), nil, "") + c.Assert(err, checker.IsNil) + body.Close() + + t := time.After(30 * time.Second) + for { + select { + case <-t: + c.Assert(getGoRoutines(), checker.LessOrEqualThan, routines) + return + default: + if n := getGoRoutines(); n <= routines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} + +func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + // Retrieve the container address + net := "bridge" + if daemonPlatform == "windows" { + net = "nat" + } + contIP := findContainerIP(c, id, net) + numPings := 1 + + var preRxPackets uint64 + var preTxPackets uint64 + var postRxPackets uint64 + var postTxPackets uint64 + + // Get the container networking stats before and after pinging the container + nwStatsPre := getNetworkStats(c, id) + for _, v := range nwStatsPre { + preRxPackets += v.RxPackets + preTxPackets += v.TxPackets + } + + countParam := "-c" + if runtime.GOOS == "windows" { + countParam = "-n" // Ping count parameter is -n on Windows + } + pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).CombinedOutput() + if err != nil && runtime.GOOS == "linux" { + // If it fails then try a work-around, but just for linux. + // If this fails too then go back to the old error for reporting. + // + // The ping will sometimes fail due to an apparmor issue where it + // denies access to the libc.so.6 shared library - running it + // via /lib64/ld-linux-x86-64.so.2 seems to work around it. + pingout2, err2 := exec.Command("/lib64/ld-linux-x86-64.so.2", "/bin/ping", contIP, "-c", strconv.Itoa(numPings)).CombinedOutput() + if err2 == nil { + pingout = pingout2 + err = err2 + } + } + c.Assert(err, checker.IsNil) + pingouts := string(pingout[:]) + nwStatsPost := getNetworkStats(c, id) + for _, v := range nwStatsPost { + postRxPackets += v.RxPackets + postTxPackets += v.TxPackets + } + + // Verify the stats contain at least the expected number of packets + // On Linux, account for ARP. + expRxPkts := preRxPackets + uint64(numPings) + expTxPkts := preTxPackets + uint64(numPings) + if daemonPlatform != "windows" { + expRxPkts++ + expTxPkts++ + } + c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, + check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) + c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, + check.Commentf("Reported less Txbytes than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) +} + +func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { + // Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21 + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + wg := sync.WaitGroup{} + + for i := 17; i <= 21; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + apiVersion := fmt.Sprintf("v1.%d", i) + statsJSONBlob := getVersionedStats(c, id, apiVersion) + if versions.LessThan(apiVersion, "v1.21") { + c.Assert(jsonBlobHasLTv121NetworkStats(statsJSONBlob), checker.Equals, true, + check.Commentf("Stats JSON blob from API %s %#v does not look like a =v1.21 API stats structure", apiVersion, statsJSONBlob)) + } + }(i) + } + wg.Wait() +} + +func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { + var st *types.StatsJSON + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + + err = json.NewDecoder(body).Decode(&st) + c.Assert(err, checker.IsNil) + body.Close() + + return st.Networks +} + +// getVersionedStats returns stats result for the +// container with id using an API call with version apiVersion. Since the +// stats result type differs between API versions, we simply return +// map[string]interface{}. +func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} { + stats := make(map[string]interface{}) + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + + err = json.NewDecoder(body).Decode(&stats) + c.Assert(err, checker.IsNil, check.Commentf("failed to decode stat: %s", err)) + + return stats +} + +func jsonBlobHasLTv121NetworkStats(blob map[string]interface{}) bool { + networkStatsIntfc, ok := blob["network"] + if !ok { + return false + } + networkStats, ok := networkStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkStats[expectedKey]; !ok { + return false + } + } + return true +} + +func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { + networksStatsIntfc, ok := blob["networks"] + if !ok { + return false + } + networksStats, ok := networksStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, networkInterfaceStatsIntfc := range networksStats { + networkInterfaceStats, ok := networkInterfaceStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkInterfaceStats[expectedKey]; !ok { + return false + } + } + } + return true +} + +func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) { + testRequires(c, DaemonIsLinux) + + status, _, err := sockRequest("GET", "/containers/nonexistent/stats", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + + status, _, err = sockRequest("GET", "/containers/nonexistent/stats?stream=0", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out1, _ := runSleepingContainer(c) + id1 := strings.TrimSpace(out1) + c.Assert(waitRun(id1), checker.IsNil) + + out2, _ := runSleepingContainer(c, "--net", "container:"+id1) + id2 := strings.TrimSpace(out2) + c.Assert(waitRun(id2), checker.IsNil) + + ch := make(chan error) + go func() { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id2), nil, "") + defer body.Close() + if err != nil { + ch <- err + } + if resp.StatusCode != http.StatusOK { + ch <- fmt.Errorf("Invalid StatusCode %v", resp.StatusCode) + } + if resp.Header.Get("Content-Type") != "application/json" { + ch <- fmt.Errorf("Invalid 'Content-Type' %v", resp.Header.Get("Content-Type")) + } + var v *types.Stats + if err := json.NewDecoder(body).Decode(&v); err != nil { + ch <- err + } + ch <- nil + }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("Error in stats Engine API: %v", err)) + case <-time.After(15 * time.Second): + c.Fatalf("Stats did not return after timeout") + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go new file mode 100644 index 0000000..0995ce3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go @@ -0,0 +1,41 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIStatsContainerGetMemoryLimit(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport) + + resp, body, err := sockRequestRaw("GET", "/info", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + var info types.Info + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + + // don't set a memory limit, the memory limit should be system memory + conName := "foo" + dockerCmd(c, "run", "-d", "--name", conName, "busybox", "top") + c.Assert(waitRun(conName), checker.IsNil) + + resp, body, err = sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", conName), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + c.Assert(fmt.Sprintf("%d", v.MemoryStats.Limit), checker.Equals, fmt.Sprintf("%d", info.MemTotal)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go new file mode 100644 index 0000000..24fe049 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go @@ -0,0 +1,1391 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var defaultReconciliationTimeout = 30 * time.Second + +func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { + // todo: should find a better way to verify that components are running than /info + d1 := s.AddDaemon(c, true, true) + info, err := d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d2 := s.AddDaemon(c, true, false) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Leaving cluster + c.Assert(d2.Leave(false), checker.IsNil) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Current state restoring after restarts + err = d1.Stop() + c.Assert(err, checker.IsNil) + err = d2.Stop() + c.Assert(err, checker.IsNil) + + err = d1.Start() + c.Assert(err, checker.IsNil) + err = d2.Start() + c.Assert(err, checker.IsNil) + + info, err = d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + + d2 := s.AddDaemon(c, false, false) + err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken := d1.joinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change tokens + d1.rotateTokens(c) + + err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken = d1.joinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change spec, don't change tokens + d1.updateSwarm(c, func(s *swarm.Spec) {}) + + err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + splitToken := strings.Split(d1.joinTokens(c).Worker, "-") + splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" + replacementToken := strings.Join(splitToken, "-") + err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") +} + +func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d2 := s.AddDaemon(c, true, false) + + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) + + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleWorker + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) + + // Demoting last node should fail + node := d1.getNode(c, d1.NodeID) + node.Spec.Role = swarm.NodeRoleWorker + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d1.SockRequest("POST", url, node.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) + c.Assert(string(out), checker.Contains, "last manager of the swarm") + info, err = d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.ControlAvailable, checker.True) + + // Promote already demoted node + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + services := d.listServices(c) + c.Assert(services, checker.NotNil) + c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + service := d.getService(c, id) + instances = 5 + d.updateService(c, service, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + d.removeService(c, service.ID) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + instances := 9 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + // reconciliation on d2 node down + c.Assert(d2.Stop(), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + // test downscaling + instances = 5 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + d1.createService(c, simpleTestService, setGlobalMode) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) + + d4 := s.AddDaemon(c, true, false) + d5 := s.AddDaemon(c, true, false) + + waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:test" + + // create a different tag + for _, d := range daemons { + out, err := d.Cmd("tag", image1, image2) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } + + // create service + instances := 5 + parallelism := 2 + id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].getService(c, id) + daemons[0].updateService(c, service, setImage(image2)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - parallelism, image1: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - 2*parallelism, image1: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:badtag" + + // create service + instances := 5 + id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].getService(c, id) + daemons[0].updateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) + + // should update 2 tasks and then pause + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) + v, _ := daemons[0].checkServiceRunningTasks(id)(c) + c.Assert(v, checker.Equals, instances-2) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // create service + constraints := []string{"node.role==worker"} + instances := 3 + id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + // validate tasks are running on worker nodes + tasks := daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + node := daemons[0].getNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.role!=worker"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are running on manager nodes + for _, task := range tasks { + node := daemons[0].getNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.role==nosuchrole"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + // validate tasks are not assigned to any node + tasks = daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].listNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "low", + } + }) + } + + // create service + instances := 3 + constraints := []string{"node.labels.security==high"} + id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].getServiceTasks(c, id) + // validate all tasks are running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[0].ID) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.labels.security!=high"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + // validate all tasks are NOT running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) + } + //remove service + daemons[0].removeService(c, id) + + constraints = []string{"node.labels.security==medium"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + //remove service + daemons[0].removeService(c, id) + + // multiple constraints + constraints = []string{ + "node.labels.security==high", + fmt.Sprintf("node.id==%s", nodes[1].ID), + } + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + // make nodes[1] fulfills the constraints + daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[1].ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept + + instances := 9 + d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + getContainers := func() map[string]*SwarmDaemon { + m := make(map[string]*SwarmDaemon) + for _, d := range []*SwarmDaemon{d1, d2, d3} { + for _, id := range d.activeContainers() { + m[id] = d + } + } + return m + } + + containers := getContainers() + c.Assert(containers, checker.HasLen, instances) + var toRemove string + for i := range containers { + toRemove = i + } + + _, err := containers[toRemove].Cmd("stop", toRemove) + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + containers2 := getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } + + containers = containers2 + for i := range containers { + toRemove = i + } + + // try with killing process outside of docker + pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) + c.Assert(err, checker.IsNil) + pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) + c.Assert(err, checker.IsNil) + c.Assert(syscall.Kill(pid, syscall.SIGKILL), checker.IsNil) + + time.Sleep(time.Second) // give some time to handle the signal + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + containers2 = getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { + // add three managers, one of these is leader + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // start a service by hitting each of the 3 managers + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test1" + }) + d2.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test2" + }) + d3.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test3" + }) + + // 3 services should be started now, because the requests were proxied to leader + // query each node and make sure it returns 3 services + for _, d := range []*SwarmDaemon{d1, d2, d3} { + services := d.listServices(c) + c.Assert(services, checker.HasLen, 3) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { + // Create 3 nodes + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // assert that the first node we made is the leader, and the other two are followers + c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) + c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) + + d1.Stop() // stop the leader + + var ( + leader *SwarmDaemon // keep track of leader + followers []*SwarmDaemon // keep track of followers + ) + checkLeader := func(nodes ...*SwarmDaemon) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + // clear these out before each run + leader = nil + followers = nil + for _, d := range nodes { + if d.getNode(c, d.NodeID).ManagerStatus.Leader { + leader = d + } else { + followers = append(followers, d) + } + } + + if leader == nil { + return false, check.Commentf("no leader elected") + } + + return true, check.Commentf("elected %v", leader.id) + } + } + + // wait for an election to occur + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True) + + // assert that we have a new leader + c.Assert(leader, checker.NotNil) + + // Keep track of the current leader, since we want that to be chosen. + stableleader := leader + + // add the d1, the initial leader, back + d1.Start() + + // TODO(stevvooe): may need to wait for rejoin here + + // wait for possible election + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True) + // pick out the leader and the followers again + + // verify that we still only have 1 leader and 2 followers + c.Assert(leader, checker.NotNil) + c.Assert(followers, checker.HasLen, 2) + // and that after we added d1 back, the leader hasn't changed + c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID) +} + +func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d1.createService(c, simpleTestService) + + c.Assert(d2.Stop(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top1" + }) + + c.Assert(d3.Stop(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + var service swarm.Service + simpleTestService(&service) + service.Spec.Name = "top2" + status, out, err := d1.SockRequest("POST", "/services/create", service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out))) + + c.Assert(d2.Start(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top3" + }) +} + +func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + nodes := d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + +loop0: + for _, n := range nodes { + for _, d := range []*SwarmDaemon{d1, d2, d3} { + if n.ID == d.NodeID { + continue loop0 + } + } + c.Errorf("unknown nodeID %v", n.ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + nodes := d.listNodes(c) + + d.updateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + n := d.getNode(c, nodes[0].ID) + c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { + testRequires(c, Network) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + _ = s.AddDaemon(c, true, false) + + nodes := d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + + // Getting the info so we can take the NodeID + d2Info, err := d2.info() + c.Assert(err, checker.IsNil) + + // forceful removal of d2 should work + d1.removeNode(c, d2Info.NodeID, true) + + nodes = d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) + + // Restart the node that was removed + err = d2.Restart() + c.Assert(err, checker.IsNil) + + // Give some time for the node to rejoin + time.Sleep(1 * time.Second) + + // Make sure the node didn't rejoin + nodes = d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + // start a service, expect balanced distribution + instances := 8 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + + // set d2 back to active + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityActive + }) + + instances = 1 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + instances = 8 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + // drained node first so we don't get any old containers + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + d2ContainerCount := len(d2.activeContainers()) + + // set d2 to paused, scale service up, only d1 gets new tasks + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + instances = 14 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) + +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + d.createService(c, simpleTestService, setInstances(instances)) + + id, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) + + c.Assert(d.Leave(false), checker.NotNil) + c.Assert(d.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + id2, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23629 +func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { + testRequires(c, Network) + s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + + id, err := d2.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + err = d2.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d2.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + + id2, err := d2.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23705 +func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { + testRequires(c, Network) + d := s.AddDaemon(c, false, false) + err := d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d.Stop(), checker.IsNil) + c.Assert(d.Start(), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + d1.getService(c, id) + d1.Stop() + d1.Start() + d1.getService(c, id) + + d2 := s.AddDaemon(c, true, true) + d2.getService(c, id) + d2.Stop() + d2.Start() + d2.getService(c, id) + + d3 := s.AddDaemon(c, true, true) + d3.getService(c, id) + d3.Stop() + d3.Start() + d3.getService(c, id) + + d3.Kill() + time.Sleep(1 * time.Second) // time to handle signal + d3.Start() + d3.getService(c, id) +} + +func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + containers := d.activeContainers() + instances = 4 + d.updateService(c, d.getService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + containers2 := d.activeContainers() + +loop0: + for _, c1 := range containers { + for _, c2 := range containers2 { + if c1 == c2 { + continue loop0 + } + } + c.Errorf("container %v not found in new set %#v", c1, containers2) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) { + d := s.AddDaemon(c, false, false) + req := swarm.InitRequest{ + ListenAddr: "", + } + status, _, err := d.SockRequest("POST", "/swarm/init", req) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + + req2 := swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + RemoteAddrs: []string{""}, + } + status, _, err = d.SockRequest("POST", "/swarm/join", req2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) +} + +func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + + c.Assert(d2.Stop(), checker.IsNil) + + c.Assert(d1.Init(swarm.InitRequest{ + ForceNewCluster: true, + Spec: swarm.Spec{}, + }), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + + d3 := s.AddDaemon(c, true, true) + info, err := d3.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + instances = 4 + d3.updateService(c, d3.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) +} + +func simpleTestService(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + } + s.Spec.Name = "top" +} + +func serviceForUpdate(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: 2, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, + } + s.Spec.Name = "updatetest" +} + +func setInstances(replicas int) serviceConstructor { + ureplicas := uint64(replicas) + return func(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + } + } +} + +func setImage(image string) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.TaskTemplate.ContainerSpec.Image = image + } +} + +func setFailureAction(failureAction string) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.FailureAction = failureAction + } +} + +func setMaxFailureRatio(maxFailureRatio float32) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio + } +} + +func setParallelism(parallelism uint64) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.Parallelism = parallelism + } +} + +func setConstraints(constraints []string) serviceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Constraints = constraints + } +} + +func setGlobalMode(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + } +} + +func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) { + var totalMCount, totalWCount int + + for _, d := range cl { + var ( + info swarm.Info + err error + ) + + // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error + checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { + info, err = d.info() + return err, check.Commentf("cluster not ready in time") + } + waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) + if !info.ControlAvailable { + totalWCount++ + continue + } + + var leaderFound bool + totalMCount++ + var mCount, wCount int + + for _, n := range d.listNodes(c) { + waitReady := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Status.State == swarm.NodeStateReady { + return true, nil + } + nn := d.getNode(c, n.ID) + n = *nn + return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True) + + waitActive := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Spec.Availability == swarm.NodeAvailabilityActive { + return true, nil + } + nn := d.getNode(c, n.ID) + n = *nn + return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True) + + if n.Spec.Role == swarm.NodeRoleManager { + c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID)) + if n.ManagerStatus.Leader { + leaderFound = true + } + mCount++ + } else { + c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID)) + wCount++ + } + } + c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID)) + c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID)) + c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID)) + } + c.Assert(totalMCount, checker.Equals, managerCount) + c.Assert(totalWCount, checker.Equals, workerCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { + mCount, wCount := 5, 1 + + var nodes []*SwarmDaemon + for i := 0; i < mCount; i++ { + manager := s.AddDaemon(c, true, true) + info, err := manager.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, manager) + } + + for i := 0; i < wCount; i++ { + worker := s.AddDaemon(c, true, false) + info, err := worker.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, worker) + } + + // stop whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *SwarmDaemon) { + defer wg.Done() + if err := daemon.Stop(); err != nil { + errs <- err + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + daemon.root = filepath.Dir(daemon.root) + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + // start whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *SwarmDaemon) { + defer wg.Done() + if err := daemon.Start("--iptables=false"); err != nil { + errs <- err + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + checkClusterHealth(c, nodes, mCount, wCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + service := d.getService(c, id) + instances = 5 + + setInstances(instances)(service) + url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + secrets := d.listSecrets(c) + c.Assert(secrets, checker.NotNil) + c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secrets := d.listSecrets(c) + c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) + name := secrets[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + + d.deleteSecret(c, secret.ID) + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) +} + +// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`, +// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`. +// This test makes sure the fixes correctly output scopes instead. +func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + var n1 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "bridge" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n1), checker.IsNil) + + var n2 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n2), checker.IsNil) + + var r1 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r1), checker.IsNil) + + c.Assert(r1.Scope, checker.Equals, "local") + + var r2 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r2), checker.IsNil) + + c.Assert(r2.Scope, checker.Equals, "swarm") +} + +// Test case for 30178 +func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "lb") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + instances := 1 + d.createService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) { + s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{} + s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{ + {Target: "lb"}, + } + }) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + containers := d.activeContainers() + + out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_test.go new file mode 100644 index 0000000..3b38ba9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_test.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "net/http" + "net/http/httptest" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIOptionsRoute(c *check.C) { + status, _, err := sockRequest("OPTIONS", "/", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) { + res, body, err := sockRequestRaw("GET", "/version", nil, "") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + body.Close() + // TODO: @runcom incomplete tests, why old integration tests had this headers + // and here none of the headers below are in the response? + //c.Log(res.Header) + //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") + //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") +} + +func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) { + if daemonPlatform != runtime.GOOS { + c.Skip("Daemon platform doesn't match test platform") + } + if api.MinVersion == api.DefaultVersion { + c.Skip("API MinVersion==DefaultVersion") + } + v := strings.Split(api.MinVersion, ".") + vMinInt, err := strconv.Atoi(v[1]) + c.Assert(err, checker.IsNil) + vMinInt-- + v[1] = strconv.Itoa(vMinInt) + version := strings.Join(v, ".") + + status, body, err := sockRequest("GET", "/v"+version+"/version", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + expected := fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", version, api.MinVersion) + c.Assert(strings.TrimSpace(string(body)), checker.Contains, expected) +} + +func (s *DockerSuite) TestAPIDockerAPIVersion(c *check.C) { + var svrVersion string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + url := r.URL.Path + svrVersion = url + })) + defer server.Close() + + // Test using the env var first + result := icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs("-H="+server.URL[7:], "version"), + Env: appendBaseEnv(false, "DOCKER_API_VERSION=xxx"), + }) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "API version: xxx", ExitCode: 1}) + c.Assert(svrVersion, check.Equals, "/vxxx/version", check.Commentf("%s", result.Compare(icmd.Success))) +} + +func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { + httpResp, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(`{}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { + // Windows requires API 1.25 or later. This test is validating a behaviour which was present + // in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors + testRequires(c, DaemonIsLinux) + httpResp, body, err := sockRequestRaw("POST", "/v1.23/containers/create", strings.NewReader(`{}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { + // 404 is a different code path to normal errors, so test separately + httpResp, body, err := sockRequestRaw("GET", "/notfound", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") +} + +func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { + httpResp, body, err := sockRequestRaw("GET", "/v1.23/notfound", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go new file mode 100644 index 0000000..dfe14ec --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIUpdateContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "apiUpdateContainer" + hostConfig := map[string]interface{}{ + "Memory": 314572800, + "MemorySwap": 524288000, + } + dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") + _, _, err := sockRequest("POST", "/containers/"+name+"/update", hostConfig) + c.Assert(err, check.IsNil) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go new file mode 100644 index 0000000..eb2de59 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestGetVersion(c *check.C) { + status, body, err := sockRequest("GET", "/version", nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var v types.Version + + c.Assert(json.Unmarshal(body, &v), checker.IsNil) + + c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go new file mode 100644 index 0000000..d1d4400 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go @@ -0,0 +1,89 @@ +package main + +import ( + "encoding/json" + "net/http" + "path/filepath" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumesAPIList(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") + + status, b, err := sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) +} + +func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := sockRequest("POST", "/volumes/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + var vol types.Volume + err = json.Unmarshal(b, &vol) + c.Assert(err, checker.IsNil) + + c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) +} + +func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") + + status, b, err := sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + v := volumes.Volumes[0] + status, _, err = sockRequest("DELETE", "/volumes/"+v.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) + + dockerCmd(c, "rm", "-f", "test") + status, data, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) + +} + +func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := sockRequest("POST", "/volumes/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + status, b, err = sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + var vol types.Volume + status, b, err = sockRequest("GET", "/volumes/"+config.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + c.Assert(json.Unmarshal(b, &vol), checker.IsNil) + c.Assert(vol.Name, checker.Equals, config.Name) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go new file mode 100644 index 0000000..2df4fdc --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go @@ -0,0 +1,168 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +const attachWait = 5 * time.Second + +func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") + c.Assert(err, check.IsNil) + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + cmd := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + cmd.Wait() + endGroup.Done() + }() + + out, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer out.Close() + + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + c.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + c.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not initialize properly") + } + + dockerCmd(c, "kill", "attacher") + + select { + case <-endDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not finish properly") + } +} + +func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + done := make(chan error) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + done <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + done <- fmt.Errorf("attach should have failed") + return + } else if !strings.Contains(out, expected) { + done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-done: + c.Assert(err, check.IsNil) + case <-time.After(attachWait): + c.Fatal("attach is running but should have failed") + } +} + +func (s *DockerSuite) TestAttachDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + id := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", id) + stdin, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Process.Kill() + + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + c.Assert(stdin.Close(), check.IsNil) + + // Expect container to still be running after stdin is closed + running := inspectField(c, id, "State.Running") + c.Assert(running, check.Equals, "true") +} + +func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + runSleepingContainer(c, "-d", "--name=test") + dockerCmd(c, "pause", "test") + + result := dockerCmdWithResult("attach", "test") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 1", + ExitCode: 1, + Err: "You cannot attach to a paused container, unpause it first", + }) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go new file mode 100644 index 0000000..fb794cc --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go @@ -0,0 +1,237 @@ +// +build !windows + +package main + +import ( + "bufio" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #9860 Make sure attach ends when container ends (with no errors) +func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-dti", "busybox", "/bin/sh", "-c", `trap 'exit 0' SIGTERM; while true; do sleep 1; done`) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + + attachCmd := exec.Command(dockerBinary, "attach", id) + attachCmd.Stdin = tty + attachCmd.Stdout = tty + attachCmd.Stderr = tty + err = attachCmd.Start() + c.Assert(err, check.IsNil) + + errChan := make(chan error) + go func() { + time.Sleep(300 * time.Millisecond) + defer close(errChan) + // Container is waiting for us to signal it to stop + dockerCmd(c, "stop", id) + // And wait for the attach command to end + errChan <- attachCmd.Wait() + }() + + // Wait for the docker to end (should be done by the + // stop command in the go routine) + dockerCmd(c, "wait", id) + + select { + case err := <-errChan: + tty.Close() + out, _ := ioutil.ReadAll(pty) + c.Assert(err, check.IsNil, check.Commentf("out: %v", string(out))) + case <-time.After(attachWait): + c.Fatal("timed out without attach returning") + } + +} + +func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { + name := "detachtest" + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + errChan := make(chan error) + go func() { + errChan <- cmd.Run() + close(errChan) + }() + + c.Assert(waitRun(name), check.IsNil) + + cpty.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + cpty.Write([]byte{17}) + + select { + case err := <-errChan: + if err != nil { + buff := make([]byte, 200) + tty.Read(buff) + c.Fatalf("%s: %s", err, buff) + } + case <-time.After(5 * time.Second): + c.Fatal("timeout while detaching") + } + + cpty, tty, err = pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + + cmd = exec.Command(dockerBinary, "attach", name) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + err = cmd.Start() + c.Assert(err, checker.IsNil) + + bytes := make([]byte, 10) + var nBytes int + readErr := make(chan error, 1) + + go func() { + time.Sleep(500 * time.Millisecond) + cpty.Write([]byte("\n")) + time.Sleep(500 * time.Millisecond) + + nBytes, err = cpty.Read(bytes) + cpty.Close() + readErr <- err + }() + + select { + case err := <-readErr: + c.Assert(err, check.IsNil) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for attach read") + } + + err = cmd.Wait() + c.Assert(err, checker.IsNil) + + c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") + +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func (s *DockerSuite) TestAttachDetach(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, check.IsNil) + c.Assert(waitRun(id), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := stringid.TruncateID(strings.TrimSpace(out)) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, checker.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go new file mode 100644 index 0000000..8a669fb --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go @@ -0,0 +1,133 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var ( + authzPluginName = "riyaz/authz-no-volume-plugin" + authzPluginTag = "latest" + authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag + authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" + nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" +) + +func init() { + check.Suite(&DockerAuthzV2Suite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzV2Suite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + s.d = NewDaemon(c) + c.Assert(s.d.Start(), check.IsNil) +} + +func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // defer disabling the plugin + defer func() { + c.Assert(s.d.Restart(), check.IsNil) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + // Ensure docker run command and accompanying docker ps are successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + + // restart the daemon with the plugin + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) + + // defer disabling the plugin + defer func() { + c.Assert(s.d.Restart(), check.IsNil) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + out, err := s.d.Cmd("volume", "create") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + // The plugin will block the command before it can determine the volume does not exist + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "prune", "-f") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin with bad manifest + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginBadManifestName) + c.Assert(err, checker.IsNil) + + // start the daemon with the plugin, it will error + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginBadManifestName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + c.Assert(s.d.Restart(), check.IsNil) +} + +func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + // start the daemon with a non-existent authz plugin, it will error + c.Assert(s.d.Restart("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + c.Assert(s.d.Restart(), check.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go new file mode 100644 index 0000000..a826249 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go @@ -0,0 +1,477 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + + "bufio" + "bytes" + "os/exec" + "strconv" + "time" + + "net" + "net/http/httputil" + "net/url" + + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +const ( + testAuthZPlugin = "authzplugin" + unauthorizedMessage = "User unauthorized authz plugin" + errorMessage = "something went wrong..." + containerListAPI = "/containers/json" +) + +var ( + alwaysAllowed = []string{"/_ping", "/info"} +) + +func init() { + check.Suite(&DockerAuthzSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzSuite struct { + server *httptest.Server + ds *DockerSuite + d *Daemon + ctrl *authorizationController +} + +type authorizationController struct { + reqRes authorization.Response // reqRes holds the plugin response to the initial client request + resRes authorization.Response // resRes holds the plugin response to the daemon response + psRequestCnt int // psRequestCnt counts the number of calls to list container request api + psResponseCnt int // psResponseCnt counts the number of calls to list containers response API + requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller + reqUser string + resUser string +} + +func (s *DockerAuthzSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ctrl = &authorizationController{} +} + +func (s *DockerAuthzSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) + s.ctrl = nil +} + +func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) + c.Assert(err, check.IsNil) + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) + assertAuthHeaders(c, authReq.RequestHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psRequestCnt++ + } + + s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) + + reqRes := s.ctrl.reqRes + if isAllowed(authReq.RequestURI) { + reqRes = authorization.Response{Allow: true} + } + if reqRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(reqRes) + c.Assert(err, check.IsNil) + s.ctrl.reqUser = authReq.User + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) + assertAuthHeaders(c, authReq.ResponseHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psResponseCnt++ + } + resRes := s.ctrl.resRes + if isAllowed(authReq.RequestURI) { + resRes = authorization.Response{Allow: true} + } + if resRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(resRes) + c.Assert(err, check.IsNil) + s.ctrl.resUser = authReq.User + w.Write(b) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) + err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) + c.Assert(err, checker.IsNil) +} + +// check for always allowed endpoints to not inhibit test framework functions +func isAllowed(reqURI string) bool { + for _, endpoint := range alwaysAllowed { + if strings.HasSuffix(reqURI, endpoint) { + return true + } + } + return false +} + +// assertAuthHeaders validates authentication headers are removed +func assertAuthHeaders(c *check.C, headers map[string]string) error { + for k := range headers { + if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { + c.Errorf("Found authentication headers in request '%v'", headers) + } + } + return nil +} + +// assertBody asserts that body is removed for non text/json requests +func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { + if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { + //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) + c.Errorf("Body included for authentication endpoint %s", string(body)) + } + + for k, v := range headers { + if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { + return + } + } + if len(body) > 0 { + c.Errorf("Body included while it should not (Headers: '%v')", headers) + } +} + +func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // Ensure command successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { + + const testDaemonHTTPSAddr = "tcp://localhost:4271" + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + if err := s.d.Start( + "--authorization-plugin="+testAuthZPlugin, + "--tlsverify", + "--tlscacert", + "fixtures/https/ca.pem", + "--tlscert", + "fixtures/https/server-cert.pem", + "--tlskey", + "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "-H", + testDaemonHTTPSAddr, + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } + + c.Assert(s.ctrl.reqUser, check.Equals, "client") + c.Assert(s.ctrl.resUser, check.Equals, "client") +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = false + s.ctrl.reqRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden +func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + daemonURL, err := url.Parse(s.d.sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden) + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin +func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { + testRequires(c, DaemonIsLinux) + + // start the daemon and load busybox to avoid pulling busybox from Docker Hub + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) + // Add another command to to enable event pipelining + eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Assert(err, check.IsNil) + } + + observer := eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + } + + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // Create a container and wait for the creation events + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + containerID := strings.TrimSpace(out) + c.Assert(s.d.waitRun(containerID), checker.IsNil) + + events := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", events) + processor := processEventMatch(events) + go observer.Match(matcher, processor) + + // Ensure all events are received + for event, eventChannel := range events { + + select { + case <-time.After(30 * time.Second): + // Fail the test + observer.CheckEventError(c, containerID, event, matcher) + c.FailNow() + case <-eventChannel: + // Ignore, event received + } + } + + // Ensure both events and container endpoints are passed to the authorization plugin + assertURIRecorded(c, s.ctrl.requestsURIs, "/events") + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // assert plugin is only called once.. + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) { + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + tmp, err := ioutil.TempDir("", "test-authz-load-import") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + savedImagePath := filepath.Join(tmp, "save.tar") + + out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("load", "--input", savedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) + + exportedImagePath := filepath.Join(tmp, "export.tar") + + out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("import", exportedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { + c.Assert(s.d.Start("--debug", "--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + daemonURL, err := url.Parse(s.d.sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json") +} + +// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin +func assertURIRecorded(c *check.C, uris []string, uri string) { + var found bool + for _, u := range uris { + if strings.Contains(u, uri) { + found = true + break + } + } + if !found { + c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go new file mode 100644 index 0000000..49f2875 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go @@ -0,0 +1,7420 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "text/template" + "time" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { + name := "testbuildjsonemptyrun" + + _, err := buildImage( + name, + ` + FROM busybox + RUN [] + `, + true) + + if err != nil { + c.Fatal("error when dealing with a RUN statement with empty JSON array") + } + +} + +func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { + name := "testbuildshcmdjsonentrypoint" + + _, err := buildImage( + name, + ` + FROM busybox + ENTRYPOINT ["echo"] + CMD echo test + `, + true) + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + + if daemonPlatform == "windows" { + if !strings.Contains(out, "cmd /S /C echo test") { + c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out) + } + } else { + if strings.TrimSpace(out) != "/bin/sh -c echo test" { + c.Fatalf("CMD did not contain /bin/sh -c : %q", out) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { + // Windows does not support FROM scratch or the USER command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV user foo + USER ${user} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.User") + + if res != `"foo"` { + c.Fatal("User foo from environment not in Config.User on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { + name := "testbuildenvironmentreplacement" + + var volumePath string + + if daemonPlatform == "windows" { + volumePath = "c:/quux" + } else { + volumePath = "/quux" + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + ENV volume `+volumePath+` + VOLUME ${volume} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Volumes") + + var volumes map[string]interface{} + + if err := json.Unmarshal([]byte(res), &volumes); err != nil { + c.Fatal(err) + } + + if _, ok := volumes[volumePath]; !ok { + c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { + // Windows does not support FROM scratch or the EXPOSE command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV port 80 + EXPOSE ${port} + ENV ports " 99 100 " + EXPOSE ${ports} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + + var exposedPorts map[string]interface{} + + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + exp := []int{80, 99, 100} + + for _, p := range exp { + tmp := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[tmp]; !ok { + c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `, true) + + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { + name := "testbuildenvironmentreplacement" + + ctx, err := fakeContext(` + FROM `+minimalBaseImage()+` + ENV baz foo + ENV quux bar + ENV dot . + ENV fee fff + ENV gee ggg + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + ADD ${zzz:-${fee}} ${dot} + COPY ${zzz:-${gee}} ${dot} + `, + map[string]string{ + "foo": "test1", + "bar": "test2", + "fff": "test3", + "ggg": "test4", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, + ` + FROM busybox + ENV foo zzz + ENV bar ${foo} + ENV abc1='$foo' + ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" + RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) + ENV abc2="\$foo" + RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) + ENV abc3 '$foo' + RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) + ENV abc4 "\$foo" + RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) + `, true) + + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Env") + + envResult := []string{} + + if err = json.Unmarshal([]byte(res), &envResult); err != nil { + c.Fatal(err) + } + + found := false + envCount := 0 + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "zzz" { + c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "zzz" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "foo" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } + } + + if !found { + c.Fatal("Never found the `bar` env variable") + } + + if envCount != 4 { + c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + } + +} + +func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { + // The volume paths used in this test are invalid on Windows + testRequires(c, DaemonIsLinux) + name := "testbuildhandleescapes" + + _, err := buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME ${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + var result map[string]map[string]struct{} + + res := inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["bar"]; !ok { + c.Fatalf("Could not find volume bar set from env foo in volumes table, got %q", result) + } + + deleteImages(name) + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res = inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["${FOO}"]; !ok { + c.Fatalf("Could not find volume ${FOO} set from env foo in volumes table, got %q", result) + } + + deleteImages(name) + + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \\\\\\\${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res = inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result[`\\\${FOO}`]; !ok { + c.Fatalf(`Could not find volume \\\${FOO} set from env foo in volumes table, got %q`, result) + } + +} + +func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + _, err := buildImage(name, + ` + FROM busybox + onbuild run echo quux + `, true) + + if err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + FROM %s + `, name), true) + + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "quux") { + c.Fatalf("Did not receive the expected echo text, got %s", out) + } + + if strings.Contains(out, "ONBUILD ONBUILD") { + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + } + +} + +func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvescapes" + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \$ + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-t", name) + + if strings.TrimSpace(out) != "$" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvoverwrite" + + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) + + if strings.TrimSpace(out) != "bar" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatalf("did not get echo output from onbuild. Got: %q", out) + } + +} + +func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatal("got malformed output from onbuild", out) + } + +} + +func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet + name := "testbuildtwoimageswithadd" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + if _, err := buildImage(name, + fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL()), + true); err != nil { + c.Fatal(err) + } + if err != nil { + c.Fatal(err) + } + deleteImages(name) + _, out, err := buildImageWithOut(name, + fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if strings.Contains(out, "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } + +} + +func (s *DockerSuite) TestBuildLastModified(c *check.C) { + // Temporary fix for #30890. TODO @jhowardmsft figure out what + // has changed in the master busybox image. + testRequires(c, DaemonIsLinux) + + name := "testbuildlastmodified" + + server, err := fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + var out, out2 string + + dFmt := `FROM busybox +ADD %s/file /` + + dockerfile := fmt.Sprintf(dFmt, server.URL()) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + // Build it again and make sure the mtime of the file didn't change. + // Wait a few seconds to make sure the time changed enough to notice + time.Sleep(2 * time.Second) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + if out != out2 { + c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2) + } + + // Now 'touch' the file and make sure the timestamp DID change this time + // Create a new fakeStorage instead of just using Add() to help windows + server, err = fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + dockerfile = fmt.Sprintf(dFmt, server.URL()) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + if out == out2 { + c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2) + } + +} + +// Regression for https://github.com/docker/docker/pull/27805 +// Makes sure that we don't use the cache if the contents of +// a file in a subfolder of the context is modified and we re-build. +func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) { + name := "testbuildmodifyfileinfolder" + + ctx, err := fakeContext(`FROM busybox +RUN ["mkdir", "/test"] +ADD folder/file /test/changetarget`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if err := ctx.Add("folder/file", "first"); err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if err := ctx.Add("folder/file", "second"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("cache was used even though file contents in folder was changed") + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddimg" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs +func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { + name := "testaddsinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +ADD test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddsinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + name := "testcopymultiplefilestofile" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 %s/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, server.URL()), + map[string]string{ + "test_file1": "test1", + "test_file2": "test2", + "test_file3": "test3", + "test_file4": "test4", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddtonewdest" + ctx, err := fakeContext(`FROM busybox +ADD . /new_dir +RUN ls -l / +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test file", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopytonewdir" + ctx, err := fakeContext(`FROM busybox +COPY test_dir /new_dir +RUN ls -l /new_dir +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test file", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testworkdirownership" + if _, err := buildImage(name, `FROM busybox +WORKDIR /new_dir +RUN ls -l / +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently passing on Windows + name := "testaddfilewithwhitespace" + ctx, err := fakeContext(`FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +ADD [ "test file1", "/test_file1" ] +ADD [ "test_file2", "/test file2" ] +ADD [ "test file3", "/test file3" ] +ADD [ "test dir/test_file4", "/test_dir/test_file4" ] +ADD [ "test_dir/test_file5", "/test dir/test_file5" ] +ADD [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { + dockerfile := `FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]` + + if daemonPlatform == "windows" { + dockerfile = `FROM ` + WindowsBaseImage + ` +RUN mkdir "C:/test dir" +RUN mkdir "C:/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN find "test1" "C:/test_file1" +RUN find "test2" "C:/test file2" +RUN find "test3" "C:/test file3" +RUN find "test4" "C:/test_dir/test_file4" +RUN find "test5" "C:/test dir/test_file5" +RUN find "test6" "C:/test dir/test_file6"` + } + + name := "testcopyfilewithwhitespace" + ctx, err := fakeContext(dockerfile, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { + name := "testcopywildcard" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN [ "mkdir", "/tmp1" ] + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN [ "mkdir", "/tmp2" ] + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL()), + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { + name := "testcopywildcardinname" + ctx, err := fakeContext(`FROM busybox + COPY *.txt /tmp/ + RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] + `, map[string]string{"*.txt": "hi there"}) + + if err != nil { + // Normally we would do c.Fatal(err) here but given that + // the odds of this failing are so rare, it must be because + // the OS we're running the client on doesn't support * in + // filenames (like windows). So, instead of failing the test + // just let it pass. Then we don't need to explicitly + // say which OSs this works on or not. + return + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("should have built: %q", err) + } +} + +func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { + name := "testcopywildcardcache" + ctx, err := fakeContext(`FROM busybox + COPY file1.txt /tmp/`, + map[string]string{ + "file1.txt": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddsinglefiletononexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testadddircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testadddircontenttoexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddwholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #5941 +func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { + name := "testaddetctoroot" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` +ADD . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #9401 +func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddpreservesfilesspecialbits" + ctx, err := fakeContext(`FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, + map[string]string{ + "suidbin": "suidbin", + "/data/usr/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { + name := "testcopysinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +COPY test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletononexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopydircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopydircontenttoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopywholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { + name := "testcopyetctoroot" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` +COPY . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently working on Windows + + dockerfile := ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile := "foo.txt" + var ( + name = "test-link-absolute" + ) + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + var symlinkTarget string + if runtime.GOOS == "windows" { + var driveLetter string + if abs, err := filepath.Abs(tempDir); err != nil { + c.Fatal(err) + } else { + driveLetter = abs[:1] + } + tempDirWithoutDrive := tempDir[2:] + symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) + } else { + symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + } + + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + c.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + c.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { + testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { + testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows + + { + name := "testbuildinaccessiblefiles" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown file to root: %s", err) + } + if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context") + } + } + { + name := "testbuildinaccessibledirectory" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + c.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) + } + + } + { + name := "testlinksok" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { + c.Fatal(err) + } + defer os.Remove(target) + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + } + { + name := "testbuildignoredinaccessible" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", + map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + result := icmd.RunCmd(icmd.Cmd{ + Dir: ctx.Dir, + Command: []string{"su", "unprivilegeduser", "-c", + fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + }) + result.Assert(c, icmd.Expected{}) + } +} + +func (s *DockerSuite) TestBuildForceRm(c *check.C) { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + name := "testbuildforcerm" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + RUN true + RUN thiswillfail`, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--force-rm shouldn't have left containers behind") + } + +} + +func (s *DockerSuite) TestBuildRm(c *check.C) { + name := "testbuildrm" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + ADD foo / + ADD foo /`, map[string]string{"foo": "bar"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + c.Fatalf("--rm=false should have left containers behind") + } + deleteImages(name) + + } + +} + +func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { + testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + _, err := buildImage(name, + `FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Volumes") + + err = json.Unmarshal([]byte(res), &result) + if err != nil { + c.Fatal(err) + } + + equal := reflect.DeepEqual(&result, &expected) + + if !equal { + c.Fatalf("Volumes %s, expected %s", result, expected) + } + +} + +func (s *DockerSuite) TestBuildMaintainer(c *check.C) { + name := "testbuildmaintainer" + + expected := "dockerio" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != expected { + c.Fatalf("Maintainer %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildUser(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + expected := "dockerio" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.User") + if res != expected { + c.Fatalf("User %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { + name := "testbuildrelativeworkdir" + + var ( + expected1 string + expected2 string + expected3 string + expected4 string + expectedFinal string + ) + + if daemonPlatform == "windows" { + expected1 = `C:/` + expected2 = `C:/test1` + expected3 = `C:/test2` + expected4 = `C:/test2/test3` + expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox + } else { + expected1 = `/` + expected2 = `/test1` + expected3 = `/test2` + expected4 = `/test2/test3` + expectedFinal = `/test2/test3` + } + + _, err := buildImage(name, + `FROM busybox + RUN sh -c "[ "$PWD" = "`+expected1+`" ]" + WORKDIR test1 + RUN sh -c "[ "$PWD" = "`+expected2+`" ]" + WORKDIR /test2 + RUN sh -c "[ "$PWD" = "`+expected3+`" ]" + WORKDIR test3 + RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.WorkingDir") + if res != expectedFinal { + c.Fatalf("Workdir %s, expected %s", res, expectedFinal) + } +} + +// #22181 Regression test. Single end-to-end test of using +// Windows semantics. Most path handling verifications are in unit tests +func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsworkdirprocessing" + _, err := buildImage(name, + `FROM busybox + WORKDIR C:\\foo + WORKDIR bar + RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" + `, + true) + if err != nil { + c.Fatal(err) + } +} + +// #22181 Regression test. Most paths handling verifications are in unit test. +// One functional test for end-to-end +func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsaddcopypathprocessing" + // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to + // support backslash such as .\\ being equivalent to ./ and c:\\ being + // equivalent to c:/. This is not currently (nor ever has been) supported + // by docker on the Windows platform. + dockerfile := ` + FROM busybox + # No trailing slash on COPY/ADD + # Results in dir being changed to a file + WORKDIR /wc1 + COPY wc1 c:/wc1 + WORKDIR /wc2 + ADD wc2 c:/wc2 + WORKDIR c:/ + RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]" + RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]" + + # Trailing slash on COPY/ADD, Windows-style path. + WORKDIR /wd1 + COPY wd1 c:/wd1/ + WORKDIR /wd2 + ADD wd2 c:/wd2/ + RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" + RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" + ` + ctx, err := fakeContext(dockerfile, map[string]string{ + "wc1": "hellowc1", + "wc2": "worldwc2", + "wd1": "hellowd1", + "wd2": "worldwd2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { + name := "testbuildworkdirwithenvvariables" + + var expected string + if daemonPlatform == "windows" { + expected = `C:\test1\test2` + } else { + expected = `/test1/test2` + } + + _, err := buildImage(name, + `FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.WorkingDir") + if res != expected { + c.Fatalf("Workdir %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { + // cat /test1/test2/foo gets permission denied for the user + testRequires(c, NotUserNamespace) + + var expected string + if daemonPlatform == "windows" { + expected = `C:/test1/test2` + } else { + expected = `/test1/test2` + } + + name := "testbuildrelativecopy" + dockerfile := ` + FROM busybox + WORKDIR /test1 + WORKDIR test2 + RUN sh -c "[ "$PWD" = '` + expected + `' ]" + COPY foo ./ + RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" + ADD foo ./bar/baz + RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" + COPY foo ./bar/baz2 + RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" + WORKDIR .. + COPY foo ./ + RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" + COPY foo /test3/ + RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" + WORKDIR /test4 + COPY . . + RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" + WORKDIR /test5/test6 + COPY foo ../ + RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" + ` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildBlankName(c *check.C) { + name := "testbuildblankname" + _, _, stderr, err := buildImageWithStdoutStderr(name, + `FROM busybox + ENV =`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "ENV names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } + + _, _, stderr, err = buildImageWithStdoutStderr(name, + `FROM busybox + LABEL =`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "LABEL names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } + + _, _, stderr, err = buildImageWithStdoutStderr(name, + `FROM busybox + ARG =foo`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "ARG names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } +} + +func (s *DockerSuite) TestBuildEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + _, err := buildImage(name, + `FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Env") + if res != expected { + c.Fatalf("Env %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildPATH(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + + defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + + fn := func(dockerfile string, exp string) { + _, err := buildImage("testbldpath", dockerfile, true) + c.Assert(err, check.IsNil) + + res := inspectField(c, "testbldpath", "Config.Env") + + if res != exp { + c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile) + } + } + + tests := []struct{ dockerfile, exp string }{ + {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, + {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, + {"FROM scratch\nENV PATH=''", "[PATH=]"}, + {"FROM busybox\nENV PATH=''", "[PATH=]"}, + } + + for _, test := range tests { + fn(test.dockerfile, test.exp) + } +} + +func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + RUN /non/existing/command`, + true) + if err == nil { + c.Fatalf("expected build to fail, but it didn't") + } + entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildCmd(c *check.C) { + name := "testbuildcmd" + + expected := "[/bin/echo Hello World]" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + CMD ["/bin/echo", "Hello World"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + if res != expected { + c.Fatalf("Cmd %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExpose(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexpose" + expected := "map[2375/tcp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + // start building docker file with a large number of ports + portList := make([]string, 50) + line := make([]string, 100) + expectedPorts := make([]int, len(portList)*len(line)) + for i := 0; i < len(portList); i++ { + for j := 0; j < len(line); j++ { + p := i*len(line) + j + 1 + line[j] = strconv.Itoa(p) + expectedPorts[p-1] = p + } + if i == len(portList)-1 { + portList[i] = strings.Join(line, " ") + } else { + portList[i] = strings.Join(line, " ") + ` \` + } + } + + dockerfile := `FROM scratch + EXPOSE {{range .}} {{.}} + {{end}}` + tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) + buf := bytes.NewBuffer(nil) + tmpl.Execute(buf, portList) + + name := "testbuildexpose" + _, err := buildImage(name, buf.String(), true) + if err != nil { + c.Fatal(err) + } + + // check if all the ports are saved inside Config.ExposedPorts + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + var exposedPorts map[string]interface{} + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + for _, p := range expectedPorts { + ep := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[ep]; !ok { + c.Errorf("Port(%s) is not exposed", ep) + } else { + delete(exposedPorts, ep) + } + } + if len(exposedPorts) != 0 { + c.Errorf("Unexpected extra exposed ports %v", exposedPorts) + } +} + +func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + buildID := func(name, exposed string) string { + _, err := buildImage(name, fmt.Sprintf(`FROM scratch + EXPOSE %s`, exposed), true) + if err != nil { + c.Fatal(err) + } + id := inspectField(c, name, "Id") + return id + } + + id1 := buildID("testbuildexpose1", "80 2375") + id2 := buildID("testbuildexpose2", "2375 80") + if id1 != id2 { + c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") + } +} + +func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexposeuppercaseproto" + expected := "map[5678/udp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 5678/UDP`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + + expected := "[/bin/echo]" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + _, err = buildImage(name2, + fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name), + true) + if err != nil { + c.Fatal(err) + } + res = inspectField(c, name2, "Config.Entrypoint") + + expected = "[]" + + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { + name := "testbuildentrypoint" + expected := "[]" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT []`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { + name := "testbuildentrypoint" + + expected := "[/bin/echo]" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { + var ( + out2, out3 string + ) + { + name1 := "testonbuildtrigger1" + dockerfile1 := ` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + ` + ctx, err := fakeContext(dockerfile1, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out1, err) + } + } + { + name2 := "testonbuildtrigger2" + dockerfile2 := ` + FROM testonbuildtrigger1 + ` + ctx, err := fakeContext(dockerfile2, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out2, err) + } + } + { + name3 := "testonbuildtrigger3" + dockerfile3 := ` + FROM testonbuildtrigger2 + ` + ctx, err := fakeContext(dockerfile3, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out3, err) + } + + } + + // ONBUILD should be run in second build. + if !strings.Contains(out2, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") + } + + // ONBUILD should *not* be run in third build. + if strings.Contains(out3, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } + +} + +func (s *DockerSuite) TestBuildWithCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithcache" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithoutcache" + name2 := "testbuildwithoutcache2" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImage(name2, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { + name := "testbuildconditionalcache" + + dockerfile := ` + FROM busybox + ADD foo /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #1: %s", err) + } + + if err := ctx.Add("foo", "bye"); err != nil { + c.Fatalf("Error modifying foo: %s", err) + } + + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatalf("Error building #2: %s", err) + } + if id2 == id1 { + c.Fatal("Should not have used the cache") + } + + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #3: %s", err) + } + if id3 != id2 { + c.Fatal("Should have used the cache") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { + // local files are not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddlocalfilewithcache" + name2 := "testbuildaddlocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { + name := "testbuildaddmultiplelocalfilewithcache" + name2 := "testbuildaddmultiplelocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { + // local files are not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddlocalfilewithoutcache" + name2 := "testbuildaddlocalfilewithoutcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { + name := "testbuildcopydirbutnotfile" + name2 := "testbuildcopydirbutnotfile2" + + dockerfile := ` + FROM ` + minimalBaseImage() + ` + COPY dir /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "dir/foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { + name := "testbuildaddcurrentdirwithcache" + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id3, err := buildImageFromContext(name3, ctx, true) + if err != nil { + c.Fatal(err) + } + if id2 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content with different mtime does not + // invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id4, err := buildImageFromContext(name4, ctx, true) + if err != nil { + c.Fatal(err) + } + if id3 != id4 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { + name := "testbuildaddcurrentdirwithoutcache" + name2 := "testbuildaddcurrentdirwithoutcache2" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { + name := "testbuildaddremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { + name := "testbuildaddremotefilewithoutcache" + name2 := "testbuildaddremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name2, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { + name := "testbuildaddremotefilemtime" + name2 := name + "2" + name3 := name + "3" + + files := map[string]string{"baz": "hello"} + server, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't - #1") + } + + // Now create a different server with same contents (causes different mtime) + // The cache should still be used + + // allow some time for clock to pass as mtime precision is only 1s + time.Sleep(2 * time.Second) + + server2, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server2.Close() + + ctx2, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx2.Close() + id3, err := buildImageFromContext(name3, ctx2, true) + if err != nil { + c.Fatal(err) + } + if id1 != id3 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func testContextTar(c *check.C, compression archive.Compression) { + ctx, err := fakeContext( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`, + map[string]string{ + "foo": "bar", + }, + ) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + c.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = context + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } +} + +func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { + testContextTar(c, archive.Gzip) +} + +func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { + testContextTar(c, archive.Uncompressed) +} + +func (s *DockerSuite) TestBuildNoContext(c *check.C) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") + buildCmd.Stdin = strings.NewReader( + `FROM busybox + CMD ["echo", "ok"]`) + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } + + if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { + c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } +} + +// TODO: TestCaching +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithoutcache" + name2 := "testbuildaddlocalandremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalidated but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildimg" + + _, err := buildImage(name, + `FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + + if expected := "drw-------"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + + if expected := "daemon daemon"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { + name := "testbuildcmdcleanup" + if _, err := buildImage(name, + `FROM busybox + RUN echo "hello"`, + true); err != nil { + c.Fatal(err) + } + + ctx, err := fakeContext(`FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + // Cmd must be cleaned up + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } +} + +func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { + name := "testbuildaddnotfound" + expected := "foo: no such file or directory" + + if daemonPlatform == "windows" { + expected = "foo: The system cannot find the file specified" + } + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + ADD foo /usr/local/bar`, + map[string]string{"bar": "hello"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + if !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildInheritance(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildinheritance" + + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + ports1 := inspectField(c, name, "Config.ExposedPorts") + + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name), + true) + if err != nil { + c.Fatal(err) + } + + res := inspectField(c, name, "Config.Entrypoint") + if expected := "[/bin/echo]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2 := inspectField(c, name, "Config.ExposedPorts") + if ports1 != ports2 { + c.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } +} + +func (s *DockerSuite) TestBuildFails(c *check.C) { + name := "testbuildfails" + _, err := buildImage(name, + `FROM busybox + RUN sh -c "exit 23"`, + true) + if err != nil { + if !strings.Contains(err.Error(), "returned a non-zero code: 23") { + c.Fatalf("Wrong error %v, must be about non-zero code 23", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildOnBuild(c *check.C) { + name := "testbuildonbuild" + _, err := buildImage(name, + `FROM busybox + ONBUILD RUN touch foobar`, + true) + if err != nil { + c.Fatal(err) + } + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name), + true) + if err != nil { + c.Fatal(err) + } +} + +// gh #2446 +func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { + makeLink := `ln -s /foo /bar` + if daemonPlatform == "windows" { + makeLink = `mklink /D C:\bar C:\foo` + } + name := "testbuildaddtosymlinkdest" + ctx, err := fakeContext(`FROM busybox + RUN sh -c "mkdir /foo" + RUN `+makeLink+` + ADD foo /bar/ + RUN sh -c "[ -f /bar/foo ]" + RUN sh -c "[ -f /foo/foo ]"`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { + name := "testbuildescapewhitespace" + + _, err := buildImage(name, ` + # ESCAPE=\ + FROM busybox + MAINTAINER "Docker \ +IO " + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectField(c, name, "Author") + + if res != "\"Docker IO \"" { + c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) + } + +} + +func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { + // Verify that strings that look like ints are still passed as strings + name := "testbuildstringing" + + _, err := buildImage(name, ` + FROM busybox + MAINTAINER 123 + `, true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "inspect", name) + + if !strings.Contains(out, "\"123\"") { + c.Fatalf("Output does not contain the int as a string:\n%s", out) + } + +} + +func (s *DockerSuite) TestBuildDockerignore(c *check.C) { + name := "testbuilddockerignore" + dockerfile := ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ ! -e /bla/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ ! -e v.cc ]]" + RUN sh -c "[[ ! -e src/v.cc ]]" + RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "src/_vendor/v.cc": "package main", + "src/v.cc": "package main", + "v.cc": "package main", + "dir/foo": "", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +**/*.cc +dir`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { + name := "testbuilddockerignoreexceptions" + dockerfile := ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ -e /bla/dir/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/dir/foo1 ]]" + RUN sh -c "[[ -f /bla/dir/e ]]" + RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ -e /bla/dir/a.cc ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "dir/foo": "", + "dir/foo1": "", + "dir/dir/f1": "", + "dir/dir/foo": "", + "dir/e": "", + "dir/e-dir/foo": "", + ".gitignore": "", + "README.md": "readme", + "dir/a.cc": "hello", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +dir +!dir/e* +!dir/dir/foo +**/*.cc +!**/*.cc`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) + } + + // now try it with ./Dockerfile + ctx.Add(".dockerignore", "./Dockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls /tmp/Dockerfile + RUN sh -c "! ls /tmp/MyDockerfile" + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "Should not use me", + "MyDockerfile": dockerfile, + ".dockerignore": "MyDockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) + } + + // now try it with ./MyDockerfile + ctx.Add(".dockerignore", "./MyDockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { + name := "testbuilddockerignoredockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/.dockerignore" + RUN ls /tmp/Dockerfile` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": ".dockerignore\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { + var id1 string + var id2 string + + name := "testbuilddockerignoretouchdockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if id1, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 1") + } + + // Now make sure touching Dockerfile doesn't invalidate the cache + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 2") + } + + // One more time but just 'touch' it instead of changing the content + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 3") + } + +} + +func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { + name := "testbuilddockerignorewholedir" + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": ".*\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { + name := "testbuilddockerignorebadexclusion" + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": "!\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err == nil { + c.Fatalf("Build was supposed to fail but didn't") + } + + if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { + c.Fatalf("Incorrect output, got:%q", err.Error()) + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.dockerignore ]]" + RUN sh -c "[[ ! -e /Dockerfile ]]" + RUN sh -c "[[ ! -e /file1 ]]" + RUN sh -c "[[ ! -e /dir ]]"` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "file1": "", + "dir/dfile1": "", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + // All of these should result in ignoring all files + for _, variant := range []string{"**", "**/", "**/**", "*"} { + ctx.Add(".dockerignore", variant) + _, err = buildImageFromContext("noname", ctx, true) + c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant)) + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + #RUN sh -c "[[ -e /.dockerignore ]]" + RUN sh -c "[[ -e /Dockerfile ]] && \ + [[ ! -e /file0 ]] && \ + [[ ! -e /dir1/file0 ]] && \ + [[ ! -e /dir2/file0 ]] && \ + [[ ! -e /file1 ]] && \ + [[ ! -e /dir1/file1 ]] && \ + [[ ! -e /dir1/dir2/file1 ]] && \ + [[ ! -e /dir1/file2 ]] && \ + [[ -e /dir1/dir2/file2 ]] && \ + [[ ! -e /dir1/dir2/file4 ]] && \ + [[ ! -e /dir1/dir2/file5 ]] && \ + [[ ! -e /dir1/dir2/file6 ]] && \ + [[ ! -e /dir1/dir3/file7 ]] && \ + [[ ! -e /dir1/dir3/file8 ]] && \ + [[ -e /dir1/dir3 ]] && \ + [[ -e /dir1/dir4 ]] && \ + [[ ! -e 'dir1/dir5/fileAA' ]] && \ + [[ -e 'dir1/dir5/fileAB' ]] && \ + [[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing + + RUN echo all done!` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "file0": "", + "dir1/file0": "", + "dir1/dir2/file0": "", + + "file1": "", + "dir1/file1": "", + "dir1/dir2/file1": "", + + "dir1/file2": "", + "dir1/dir2/file2": "", // remains + + "dir1/dir2/file4": "", + "dir1/dir2/file5": "", + "dir1/dir2/file6": "", + "dir1/dir3/file7": "", + "dir1/dir3/file8": "", + "dir1/dir4/file9": "", + + "dir1/dir5/fileAA": "", + "dir1/dir5/fileAB": "", + "dir1/dir5/fileB": "", + + ".dockerignore": ` +**/file0 +**/*file1 +**/dir1/file2 +dir1/**/file4 +**/dir2/file5 +**/dir1/dir2/file6 +dir1/dir3/** +**/dir4/** +**/file?A +**/file\?B +**/dir5/file. +`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext("noname", ctx, true) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestBuildLineBreak(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildlinebreak" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildeolinline" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcomments" + _, err := buildImage(name, + `FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildusers" + _, err := buildImage(name, + `FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ + echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ + # Add a "supplementary" group for our dockerio user + echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +// FIXME(vdemeester) rename this test (and probably "merge" it with the one below TestBuildEnvUsage2) +func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage" + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc=def +ENV ghi=$abc +RUN [ "$ghi" = "def" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +// FIXME(vdemeester) rename this test (and probably "merge" it with the one above TestBuildEnvUsage) +func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage2" + dockerfile := `FROM busybox +ENV abc=def def="hello world" +RUN [ "$abc,$def" = "def,hello world" ] +ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too" +RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$abc,$(cat $TO)" = "zzz,hello" ] +ENV abc 'yyy' +RUN [ $abc = 'yyy' ] +ENV abc= +RUN [ "$abc" = "" ] + +# use grep to make sure if the builder substitutes \$foo by mistake +# we don't get a false positive +ENV abc=\$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) +ENV abc \$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) + +ENV abc=\'foo\' abc2=\"foo\" +RUN [ "$abc,$abc2" = "'foo',\"foo\"" ] +ENV abc "foo" +RUN [ "$abc" = "foo" ] +ENV abc 'foo' +RUN [ "$abc" = 'foo' ] +ENV abc \'foo\' +RUN [ "$abc" = "'foo'" ] +ENV abc \"foo\" +RUN [ "$abc" = '"foo"' ] + +ENV abc=ABC +RUN [ "$abc" = "ABC" ] +ENV def1=${abc:-DEF} def2=${ccc:-DEF} +ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:} +RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ] +ENV mypath=${mypath:+$mypath:}/home +ENV mypath=${mypath:+$mypath:}/away +RUN [ "$mypath" = '/home:/away' ] + +ENV e1=bar +ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11 +RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] + +ENV ee1 bar +ENV ee2 $ee1 +ENV ee3 $ee11 +ENV ee4 \$ee1 +ENV ee5 \$ee11 +RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] + +ENV eee1="foo" eee2='foo' +ENV eee3 "foo" +ENV eee4 'foo' +RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] + +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddScript(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddscript" + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "test": "#!/bin/sh\necho 'test!' > /testfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddTar(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddtar" + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { + name := "testbuildaddbrokentar" + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar /` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + // Corrupt the tar by removing one byte off the end + stat, err := testTar.Stat() + if err != nil { + c.Fatalf("failed to stat tar archive: %v", err) + } + if err := testTar.Truncate(stat.Size() - 1); err != nil { + c.Fatalf("failed to truncate tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err == nil { + c.Fatalf("build should have failed for TestBuildAddBrokenTar") + } +} + +func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { + name := "testbuildaddnontar" + + // Should not try to extract test.tar + ctx, err := fakeContext(` + FROM busybox + ADD test.tar / + RUN test -f /test.tar`, + map[string]string{"test.tar": "not_a_tar_file"}) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed for TestBuildAddNonTar") + } +} + +func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxzgz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + gzipCompressCmd := exec.Command("gzip", "test.tar.xz") + gzipCompressCmd.Dir = tmpDir + out, _, err = runCommandWithOutput(gzipCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildFromGit(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + _, err = buildImageFromPath(name, git.RepoURL, true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "docker/Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "docker/first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + u := fmt.Sprintf("%s#master:docker", git.RepoURL) + _, err = buildImageFromPath(name, u, true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) { + name := "testbuildfromgitwithf" + git, err := newFakeGit("repo", map[string]string{ + "myApp/myDockerfile": `FROM busybox + RUN echo hi from Dockerfile`, + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL) + if err != nil { + c.Fatalf("Error on build. Out: %s\nErr: %v", out, err) + } + + if !strings.Contains(out, "hi from Dockerfile") { + c.Fatalf("Missing expected output, got:\n%s", out) + } +} + +func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { + name := "testbuildfromremotetarball" + + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + MAINTAINER docker`) + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, check.IsNil) + + defer server.Close() + + _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) + c.Assert(err, check.IsNil) + + res := inspectField(c, name, "Author") + + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { + name := "testbuildcmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + CMD ["test"] + ENTRYPOINT ["echo"]`, + true); err != nil { + c.Fatal(err) + } + if _, err := buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name), + true); err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } + + res = inspectField(c, name, "Config.Entrypoint") + if expected := "[cat]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildClearCmd(c *check.C) { + name := "testbuildclearcmd" + _, err := buildImage(name, + `From `+minimalBaseImage()+` + ENTRYPOINT ["/bin/bash"] + CMD []`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected %s", res, "[]") + } +} + +func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + name := "testbuildemptycmd" + if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "null" { + c.Fatalf("Cmd %s, expected %s", res, "null") + } +} + +func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { + name := "testbuildonbuildparent" + if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "# Executing 1 build trigger") { + c.Fatal("failed to find the build trigger output", out) + } +} + +func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { + name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) + _, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true) + // if the error doesn't check for illegal tag name, or the image is built + // then this should fail + if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") { + c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) + } +} + +func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { + name := "testbuildcmdshc" + if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Cmd") + + expected := `["/bin/sh","-c","echo cmd"]` + if daemonPlatform == "windows" { + expected = `["cmd","/S","/C","echo cmd"]` + } + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { + // Test to make sure that when we strcat arrays we take into account + // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't + // look the same + name := "testbuildcmdspaces" + var id1 string + var id2 string + var err error + + if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same CMD") + } + + // Now do the same with ENTRYPOINT + if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same ENTRYPOINT") + } + +} + +func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { + name := "testbuildcmdjson" + if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Cmd") + + expected := `["echo","cmd"]` + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { + + if _, err := buildImage("parent", ` + FROM busybox + ENTRYPOINT exit 130 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 { + c.Fatalf("expected exit code 130 but received %d", status) + } + + if _, err := buildImage("child", ` + FROM parent + ENTRYPOINT exit 5 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError("run", "child"); status != 5 { + c.Fatalf("expected exit code 5 but received %d", status) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + if daemonPlatform == "windows" { + expected = `["cmd","/S","/C","echo quux"]` + } + + if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { + c.Fatal(err) + } + + if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name2, "Config.Entrypoint") + + if res != expected { + c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + out, _ := dockerCmd(c, "run", name2) + + expected = "quux" + + if strings.TrimSpace(out) != expected { + c.Fatalf("Expected output is %s, got %s", expected, out) + } + +} + +func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { + name := "testbuildentrypoint" + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT echo`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--rm", name) +} + +func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildexoticshellinterpolation" + + _, err := buildImage(name, ` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `, false) + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" instead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + + if _, err := buildImage(name, + `FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`, + true); err != nil { + c.Fatal(err) + } + + if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil { + c.Fatal("The image was not supposed to be able to run") + } + +} + +func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { + name := "testbuildverboseout" + expected := "\n123\n" + + if daemonPlatform == "windows" { + expected = "\n123\r\n" + } + + _, out, err := buildImageWithOut(name, + `FROM busybox +RUN echo 123`, + false) + + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q: %q", "123", out) + } + +} + +func (s *DockerSuite) TestBuildWithTabs(c *check.C) { + name := "testbuildwithtabs" + _, err := buildImage(name, + "FROM busybox\nRUN echo\tone\t\ttwo", true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` + expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + if daemonPlatform == "windows" { + expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` + expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + } + if res != expected1 && res != expected2 { + c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) + } +} + +func (s *DockerSuite) TestBuildLabels(c *check.C) { + name := "testbuildlabel" + expected := `{"License":"GPL","Vendor":"Acme"}` + _, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme + LABEL License GPL`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { + name := "testbuildlabelcache" + + id1, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, false) + if err != nil { + c.Fatalf("Build 1 should have worked: %v", err) + } + + id2, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, true) + if err != nil || id1 != id2 { + c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor=Acme1`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor Acme`, true) // Note: " " and "=" should be same + if err != nil || id1 != id2 { + c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + // Now make sure the cache isn't used by mistake + id1, err = buildImage(name, + `FROM busybox + LABEL f1=b1 f2=b2`, false) + if err != nil { + c.Fatalf("Build 5 should have worked: %q", err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL f1="b1 f2=b2"`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) + } + +} + +func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { + // This test makes sure that -q works correctly when build is successful: + // stdout has only the image ID (long image ID) and stderr is empty. + var stdout, stderr string + var err error + outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") + + tt := []struct { + Name string + BuildFunc func(string) + }{ + { + Name: "quiet_build_stdin_success", + BuildFunc: func(name string) { + _, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm") + }, + }, + { + Name: "quiet_build_ctx_success", + BuildFunc: func(name string) { + ctx, err := fakeContext("FROM busybox", map[string]string{ + "quiet_build_success_fctx": "test", + }) + if err != nil { + c.Fatalf("Failed to create context: %s", err.Error()) + } + defer ctx.Close() + _, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm") + }, + }, + { + Name: "quiet_build_git_success", + BuildFunc: func(name string) { + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": "FROM busybox", + }, true) + if err != nil { + c.Fatalf("Failed to create the git repo: %s", err.Error()) + } + defer git.Close() + _, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm") + + }, + }, + } + + for _, te := range tt { + te.BuildFunc(te.Name) + if err != nil { + c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error()) + } + if outRegexp.Find([]byte(stdout)) == nil { + c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout) + } + + if stderr != "" { + c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr) + } + } + +} + +func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + testRequires(c, Network) + testName := "quiet_build_not_exists_image" + buildCmd := "FROM busybox11" + _, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm") + _, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm") + if verr == nil || qerr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName)) + } + if qstderr != vstdout+vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr)) + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + tt := []struct { + TestName string + BuildCmds string + }{ + {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, + {"quiet_build_unknown_instr", "FROMD busybox"}, + } + + for _, te := range tt { + _, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm") + _, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm") + if verr == nil || qerr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName)) + } + if qstderr != vstdout+vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr)) + } + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { + // This test ensures that when given a wrong URL, stderr in quiet mode and + // stderr in verbose mode are identical. + // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout + URL := "http://something.invalid" + Name := "quiet_build_wrong_remote" + _, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL) + _, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL) + if qerr == nil || verr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name)) + } + if qstderr != vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr)) + } +} + +func (s *DockerSuite) TestBuildStderr(c *check.C) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + _, _, stderr, err := buildImageWithStdoutStderr(name, + "FROM busybox\nRUN echo one", true) + if err != nil { + c.Fatal(err) + } + + if runtime.GOOS == "windows" && + daemonPlatform != "windows" { + // Windows to non-Windows should have a security warning + if !strings.Contains(stderr, "SECURITY WARNING:") { + c.Fatalf("Stderr contains unexpected output: %q", stderr) + } + } else { + // Other platform combinations should have no stderr written too + if stderr != "" { + c.Fatalf("Stderr should have been empty, instead it's: %q", stderr) + } + } +} + +func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { + testRequires(c, UnixCli) // test uses chown: not available on windows + testRequires(c, DaemonIsLinux) + + name := "testbuildchownsinglefile" + + ctx, err := fakeContext(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`, map[string]string{ + "test": "test", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + c.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + c.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + c.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { + c.Fatal(err) + } + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + c.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + c.Fatalf("unexpected error: %v", err) + } +} + +func (s *DockerSuite) TestBuildXZHost(c *check.C) { + // /usr/local/sbin/xz gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildxzhost" + + ctx, err := fakeContext(` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`, + map[string]string{ + "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", + "xz": "#!/bin/sh\ntouch /injected", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { + // /foo/file gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127 + var ( + name = "testbuildvolumescontent" + expected = "some text" + volName = "/foo" + ) + + if daemonPlatform == "windows" { + volName = "C:/foo" + } + + ctx, err := fakeContext(` +FROM busybox +COPY content /foo/file +VOLUME `+volName+` +CMD cat /foo/file`, + map[string]string{ + "content": expected, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, false); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + if out != expected { + c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) + } + +} + +// FIXME(vdemeester) part of this should be unit test, other part should be clearer +func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { + + ctx, err := fakeContext(`FROM busybox + RUN echo from Dockerfile`, + map[string]string{ + "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", + "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", + "files/dFile": "FROM busybox\nRUN echo from files/dFile", + "dFile": "FROM busybox\nRUN echo from dFile", + "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test1 should have used Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/dFile") { + c.Fatalf("test3 should have used files/dFile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from dFile") { + c.Fatalf("test4 should have used dFile, output:%s", out) + } + + dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") + c.Assert(err, check.IsNil) + nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") + if _, err = os.Create(nonDockerfileFile); err != nil { + c.Fatal(err) + } + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") + + if err == nil { + c.Fatalf("test5 was supposed to fail to find passwd") + } + + if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { + c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") + if err != nil { + c.Fatalf("test6 failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test6 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") + if err != nil { + c.Fatalf("test7 failed: %s", err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test7 should have used files Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") + if err == nil || !strings.Contains(out, "must be within the build context") { + c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) + } + + tmpDir := os.TempDir() + out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) + if err != nil { + c.Fatalf("test9 - failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test9 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") + if err != nil { + c.Fatalf("test10 should have worked: %s", err) + } + if !strings.Contains(out, "from files/dFile2") { + c.Fatalf("test10 should have used files/dFile2, output:%s", out) + } + +} + +func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + ctx, err := fakeContext(`FROM busybox + RUN echo from dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { + server, err := fakeStorage(map[string]string{"baz": `FROM busybox +RUN echo from baz +COPY * /tmp/ +RUN find /tmp/`}) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why + ctx, err := fakeContext(`FROM busybox +RUN echo "from Dockerfile"`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") + dockerCommand.Dir = ctx.Dir + dockerCommand.Stdin = strings.NewReader(`FROM busybox +RUN echo "from baz" +COPY * /tmp/ +RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) + out, status, err := runCommandWithOutput(dockerCommand) + if err != nil || status != 0 { + c.Fatalf("Error building: %s", err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { + name := "testbuildfromofficial" + fromNames := []string{ + "busybox", + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + for idx, fromName := range fromNames { + imgName := fmt.Sprintf("%s%d", name, idx) + _, err := buildImage(imgName, "FROM "+fromName, true) + if err != nil { + c.Errorf("Build failed using FROM %s: %s", fromName, err) + } + deleteImages(imgName) + } +} + +func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { + testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerfileoutsidecontext" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { + c.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(wd) + if err := os.Chdir(ctx); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { + c.Fatal(err) + } + + for _, dockerfilePath := range []string{ + filepath.Join("..", "outsideDockerfile"), + filepath.Join(ctx, "dockerfile1"), + filepath.Join(ctx, "dockerfile2"), + } { + result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") + c.Assert(result, icmd.Matches, icmd.Expected{ + Err: "must be within the build context", + ExitCode: 1, + }) + deleteImages(name) + } + + os.Chdir(tmpdir) + + // Path to Dockerfile should be resolved relative to working directory, not relative to context. + // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) + if err == nil { + c.Fatalf("Expected error. Out: %s", out) + } +} + +func (s *DockerSuite) TestBuildSpaces(c *check.C) { + // Test to make sure that leading/trailing spaces on a command + // doesn't change the error msg we get + var ( + err1 error + err2 error + ) + + name := "testspaces" + ctx, err := fakeContext("FROM busybox\nCOPY\n", + map[string]string{ + "Dockerfile": "FROM busybox\nCOPY\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { + c.Fatal("Build 1 was supposed to fail, but didn't") + } + + ctx.Add("Dockerfile", "FROM busybox\nCOPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 2 was supposed to fail, but didn't") + } + + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) + } + + // Skip over the times + e1 := removeLogTimestamps(err1.Error()) + e2 := removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 3 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 4 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) + } + +} + +func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { + // Test to make sure that spaces in quotes aren't lost + name := "testspacesquotes" + + dockerfile := `FROM busybox +RUN echo " \ + foo "` + + _, out, err := buildImageWithOut(name, dockerfile, false) + if err != nil { + c.Fatal("Build failed:", err) + } + + expecting := "\n foo \n" + // Windows uses the builtin echo, which preserves quotes + if daemonPlatform == "windows" { + expecting = "\" foo \"" + } + if !strings.Contains(out, expecting) { + c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) + } + +} + +// #4393 +func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This should error out + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") + buildCmd.Stdin = strings.NewReader(` + FROM busybox + RUN touch /foo + VOLUME /foo + `) + + out, _, err := runCommandWithOutput(buildCmd) + if err == nil || !strings.Contains(out, "file exists") { + c.Fatalf("expected build to fail when file exists in container at requested volume path") + } + +} + +func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { + // Test to make sure that all Dockerfile commands (except the ones listed + // in skipCmds) will generate an error if no args are provided. + // Note: INSERT is deprecated so we exclude it because of that. + skipCmds := map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + } + + if daemonPlatform == "windows" { + skipCmds = map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + "STOPSIGNAL": {}, + "ARG": {}, + "USER": {}, + "EXPOSE": {}, + } + } + + for cmd := range command.Commands { + cmd = strings.ToUpper(cmd) + if _, ok := skipCmds[cmd]; ok { + continue + } + + var dockerfile string + if cmd == "FROM" { + dockerfile = cmd + } else { + // Add FROM to make sure we don't complain about it missing + dockerfile = "FROM busybox\n" + cmd + } + + ctx, err := fakeContext(dockerfile, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + var out string + if out, err = buildImageFromContext("args", ctx, true); err == nil { + c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) + } + if !strings.Contains(err.Error(), cmd+" requires") { + c.Fatalf("%s returned the wrong type of error:%s", cmd, err) + } + } + +} + +func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + _, out, err := buildImageWithOut("sc", "FROM scratch", true) + if err == nil { + c.Fatalf("Build was supposed to fail") + } + if !strings.Contains(out, "No image was generated") { + c.Fatalf("Wrong error message: %v", out) + } +} + +func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { + ctx, err := fakeContext("FROM busybox\n", + map[string]string{ + "..gitme": "", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext("sc", ctx, false); err != nil { + c.Fatalf("Build was supposed to work: %s", err) + } +} + +func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { + testRequires(c, DaemonIsLinux) // No hello-world Windows image + name := "testbuildrunonejson" + + ctx, err := fakeContext(`FROM hello-world:frozen +RUN [ "/hello" ]`, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") + if err != nil { + c.Fatalf("failed to build the image: %s, %v", out, err) + } + + if !strings.Contains(out, "Hello from Docker") { + c.Fatalf("bad output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { + name := "testbuildemptystringvolume" + + _, err := buildImage(name, ` + FROM busybox + ENV foo="" + VOLUME $foo + `, false) + if err == nil { + c.Fatal("Should have failed to build") + } + +} + +func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + data, err := ioutil.ReadFile("/proc/self/cgroup") + if err != nil { + c.Fatalf("failed to read '/proc/self/cgroup - %v", err) + } + selfCgroupPaths := parseCgroupPaths(string(data)) + _, found := selfCgroupPaths["memory"] + if !found { + c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) + } + cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") + cmd.Stdin = strings.NewReader(` +FROM busybox +RUN cat /proc/self/cgroup +`) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out) + c.Assert(err, check.IsNil) + if !m { + c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out) + } +} + +func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { + // Check to make sure our build output prints the Dockerfile cmd + // property - there was a bug that caused it to be duplicated on the + // Step X line + name := "testbuildnodupoutput" + + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN env`, false) + if err != nil { + c.Fatalf("Build should have worked: %q", err) + } + + exp := "\nStep 2/2 : RUN env\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +// GH15826 +func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { + // Explicit check to ensure that build starts from step 1 rather than 0 + name := "testbuildstartsfromone" + + _, out, err := buildImageWithOut(name, ` + FROM busybox`, false) + if err != nil { + c.Fatalf("Build should have worked: %q", err) + } + + exp := "\nStep 1/1 : FROM busybox\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { + // Test to make sure the bad command is quoted with just "s and + // not as a Go []string + name := "testbuildbadrunerrmsg" + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 + if err == nil { + c.Fatal("Should have failed to build") + } + shell := "/bin/sh -c" + exitCode := "127" + if daemonPlatform == "windows" { + shell = "cmd /S /C" + // architectural - Windows has to start the container to determine the exe is bad, Linux does not + exitCode = "1" + } + exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode + if !strings.Contains(out, exp) { + c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) + } +} + +func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-build") + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuild" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err != nil { + c.Fatalf("Error running trusted build: %s\n%s", err, out) + } + + if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { + c.Fatalf("Unexpected output on trusted build:\n%s", out) + } + + // We should also have a tag reference for the image. + if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } + + // We should now be able to remove the tag reference. + if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } +} + +func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuilduntrustedtag" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) + } + + if !strings.Contains(out, "does not have trust data for") { + c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tempDir) + + // Make a real context directory in this temp directory with a simple + // Dockerfile. + realContextDirname := filepath.Join(tempDir, "context") + if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { + c.Fatal(err) + } + + if err = ioutil.WriteFile( + filepath.Join(realContextDirname, "Dockerfile"), + []byte(` + FROM busybox + RUN echo hello world + `), + os.FileMode(0644), + ); err != nil { + c.Fatal(err) + } + + // Make a symlink to the real context directory. + contextSymlinkName := filepath.Join(tempDir, "context_link") + if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { + c.Fatal(err) + } + + // Executing the build with the symlink as the specified context should + // *not* fail. + if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { + c.Fatalf("build failed with exit status %d: %s", exitStatus, out) + } +} + +func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create the releases role + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the releases role + otherTag := fmt.Sprintf("%s:other", repoName) + dockerCmd(c, "tag", "busybox", otherTag) + + pushCmd := exec.Command(dockerBinary, "push", otherTag) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) + s.assertTargetInRoles(c, repoName, "other", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + out, status := dockerCmd(c, "rmi", otherTag) + c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildreleasesrole" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err = runCommandWithOutput(buildCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out)) + c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName)) +} + +func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create a non-releases delegation role + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the other role + otherTag := fmt.Sprintf("%s:other", repoName) + dockerCmd(c, "tag", "busybox", otherTag) + + pushCmd := exec.Command(dockerBinary, "push", otherTag) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) + s.assertTargetInRoles(c, repoName, "other", "targets/other") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + out, status := dockerCmd(c, "rmi", otherTag) + c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildotherrole" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err = runCommandWithOutput(buildCmd) + c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out)) +} + +// Issue #15634: COPY fails when path starts with "null" +func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { + name := "testbuildnullstringinaddcopyvolume" + + volName := "nullvolume" + + if daemonPlatform == "windows" { + volName = `C:\\nullvolume` + } + + ctx, err := fakeContext(` + FROM busybox + + ADD null / + COPY nullfile / + VOLUME `+volName+` + `, + map[string]string{ + "null": "test1", + "nullfile": "test2", + }, + ) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestBuildStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet + imgName := "test_build_stop_signal" + _, err := buildImage(imgName, + `FROM busybox + STOPSIGNAL SIGKILL`, + true) + c.Assert(err, check.IsNil) + res := inspectFieldJSON(c, imgName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } + + containerName := "test-container-stop-signal" + dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") + + res = inspectFieldJSON(c, containerName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)} + var dockerfile string + if daemonPlatform == "windows" { + // Bugs in Windows busybox port - use the default base image and native cmd stuff + dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` + ARG %s + RUN echo %%%s%% + CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey) + } else { + dockerfile = fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s + CMD echo $%s`, envKey, envKey, envKey) + + } + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + containerName := "bldargCont" + out, _ := dockerCmd(c, "run", "--name", containerName, imgName) + out = strings.Trim(out, " \r\n'") + if out != "" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envDef := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s`, envKey, envDef) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + out, _ := dockerCmd(c, "history", "--no-trunc", imgName) + outputTabs := strings.Split(out, "\n")[1] + if !strings.Contains(outputTabs, envDef) { + c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachehit" + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + extraEnvKey := "foo1" + extraEnvVal := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ARG %s + RUN echo $%s`, envKey, extraEnvKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachemiss" + args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal)) + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + newEnvVal := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachemiss" + args = []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal), + } + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + RUN echo $%s + CMD echo $%s + `, envKey, envKey, envValOveride, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ENV %s %s + ARG %s + RUN echo $%s + CMD echo $%s + `, envKey, envValOveride, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + + wdVar := "WDIR" + wdVal := "/tmp/" + addVar := "AFILE" + addVal := "addFile" + copyVar := "CFILE" + copyVal := "copyFile" + envVar := "foo" + envVal := "bar" + exposeVar := "EPORT" + exposeVal := "9999" + userVar := "USER" + userVal := "testUser" + volVar := "VOL" + volVal := "/testVol/" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), + "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), + "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), + "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), + "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), + "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), + "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), + } + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + ARG %s + WORKDIR ${%s} + ARG %s + ADD ${%s} testDir/ + ARG %s + COPY $%s testDir/ + ARG %s + ENV %s=${%s} + ARG %s + EXPOSE $%s + ARG %s + USER $%s + ARG %s + VOLUME ${%s}`, + wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, + envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar), + map[string]string{ + addVal: "some stuff", + copyVal: "some stuff", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil { + c.Fatal(err) + } + + var resMap map[string]interface{} + var resArr []string + res := "" + res = inspectField(c, imgName, "Config.WorkingDir") + if res != filepath.ToSlash(filepath.Clean(wdVal)) { + c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) + } + + inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr) + + found := false + for _, v := range resArr { + if fmt.Sprintf("%s=%s", envVar, envVal) == v { + found = true + break + } + } + if !found { + c.Fatalf("Config.Env value mismatch. Expected to exist: %s=%s, got: %v", + envVar, envVal, resArr) + } + + inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap) + if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { + c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) + } + + res = inspectField(c, imgName, "Config.User") + if res != userVal { + c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) + } + + inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap) + if _, ok := resMap[volVal]; !ok { + c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + envKey := "foo" + envVal := "bar" + envKey1 := "foo1" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + ENV %s ${%s} + RUN echo $%s + CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + ARG %s + CMD echo $%s`, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("able to access environment variable in output: %q expected to be missing", out) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support --build-arg + imgName := "bldargtest" + envKey := "HTTP_PROXY" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s + ENV %s $%s + RUN echo $%s + CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + warnStr := "[Warning] One or more build-args" + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); !strings.Contains(out, warnStr) { + c.Fatalf("build completed without warning: %q %q", out, err) + } else if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + +} + +func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + args := []string{ + "build", + "--build-arg", fmt.Sprintf("FOO1=fromcmd"), + "--build-arg", fmt.Sprintf("FOO2="), + "--build-arg", fmt.Sprintf("FOO3"), // set in env + "--build-arg", fmt.Sprintf("FOO4"), // not set in env + "--build-arg", fmt.Sprintf("FOO5=fromcmd"), + // FOO6 is not set at all + "--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning + "--build-arg", fmt.Sprintf("FOO8="), // should produce a warning + "--build-arg", fmt.Sprintf("FOO9"), // should produce a warning + ".", + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG FOO1=fromfile + ARG FOO2=fromfile + ARG FOO3=fromfile + ARG FOO4=fromfile + ARG FOO5 + ARG FOO6 + RUN env + RUN [ "$FOO1" == "fromcmd" ] + RUN [ "$FOO2" == "" ] + RUN [ "$FOO3" == "fromenv" ] + RUN [ "$FOO4" == "fromfile" ] + RUN [ "$FOO5" == "fromcmd" ] + # The following should not exist at all in the env + RUN [ "$(env | grep FOO6)" == "" ] + RUN [ "$(env | grep FOO7)" == "" ] + RUN [ "$(env | grep FOO8)" == "" ] + RUN [ "$(env | grep FOO9)" == "" ] + `) + + ctx, err := fakeContext(dockerfile, nil) + c.Assert(err, check.IsNil) + defer ctx.Close() + + cmd := exec.Command(dockerBinary, args...) + cmd.Dir = ctx.Dir + cmd.Env = append(os.Environ(), + "FOO1=fromenv", + "FOO2=fromenv", + "FOO3=fromenv") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + // Now check to make sure we got a warning msg about unused build-args + i := strings.Index(out, "[Warning]") + if i < 0 { + c.Fatalf("Missing the build-arg warning in %q", out) + } + + out = out[i:] // "out" should contain just the warning message now + + // These were specified on a --build-arg but no ARG was in the Dockerfile + c.Assert(out, checker.Contains, "FOO7") + c.Assert(out, checker.Contains, "FOO8") + c.Assert(out, checker.Contains, "FOO9") +} + +func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + envKey3 := "foo3" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s="" + ARG %s='' + ARG %s="''" + ARG %s='""' + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, + envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, + envKey2, envKey3) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s= + ARG %s="" + ARG %s='' + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN env`, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out) + } +} + +func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { + volName := "testname:/foo" + + if daemonPlatform == "windows" { + volName = "testname:C:\\foo" + } + dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") + + dockerFile := `FROM busybox + VOLUME ` + volName + ` + RUN ls /foo/oops + ` + _, err := buildImage("test", dockerFile, false) + c.Assert(err, check.NotNil, check.Commentf("image build should have failed")) +} + +func (s *DockerSuite) TestBuildTagEvent(c *check.C) { + since := daemonUnixTime(c) + + dockerFile := `FROM busybox + RUN echo events + ` + _, err := buildImage("test", dockerFile, false) + c.Assert(err, check.IsNil) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, "test:latest", "image") + var foundTag bool + for _, a := range actions { + if a == "tag" { + foundTag = true + break + } + } + + c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) +} + +// #15780 +func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER test-15780 + ` + cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2", + "-t", "tag1:latest", "-t", "tag1", "--no-cache", "-") + cmd.Stdin = strings.NewReader(dockerfile) + _, err := runCommand(cmd) + c.Assert(err, check.IsNil) + + id1, err := getIDByName("tag1") + c.Assert(err, check.IsNil) + id2, err := getIDByName("tag2:v2") + c.Assert(err, check.IsNil) + c.Assert(id1, check.Equals, id2) +} + +// #17290 +func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY . ./`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + // warm up cache + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + // add new file to context, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Not(checker.Contains), "Using cache") + +} + +func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink target`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") + c.Assert(out, checker.Matches, "bar") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + id, out, err = buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Using cache") + + out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") + c.Assert(out, checker.Matches, "baz") +} + +func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink /`, + map[string]string{ + "foo/abc": "bar", + "foo/def": "baz", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + c.Assert(out, checker.Matches, "barbaz") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) + c.Assert(err, checker.IsNil) + + id, out, err = buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Using cache") + + out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + c.Assert(out, checker.Matches, "barbax") + +} + +// TestBuildSymlinkBasename tests that target file gets basename from symlink, +// not from the target file. +func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink /`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") + c.Assert(out, checker.Matches, "bar") + +} + +// #17827 +func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { + name := "testbuildrootsource" + ctx, err := fakeContext(` + FROM busybox + COPY / /data`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + // warm up cache + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + // change file, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Not(checker.Contains), "Using cache") +} + +// #19375 +func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { + cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") + + cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") +} + +// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir +func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildworkdirwindowspath" + + _, err := buildImage(name, ` + FROM `+WindowsBaseImage+` + RUN mkdir C:\\work + WORKDIR C:\\work + RUN if "%CD%" NEQ "C:\work" exit -1 + `, true) + + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildLabel(c *check.C) { + name := "testbuildlabel" + testLabel := "foo" + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, false, "--label", testLabel) + + c.Assert(err, checker.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { + name := "testbuildlabel" + + _, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar") + + c.Assert(err, checker.IsNil) + + res, err := inspectImage(name, "json .Config.Labels") + c.Assert(err, checker.IsNil) + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + v, ok := labels["foo"] + if !ok { + c.Fatal("label `foo` not found in image") + } + c.Assert(v, checker.Equals, "bar") +} + +func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { + name := "testbuildlabelcachecommit" + testLabel := "foo" + + if _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo + `, false); err != nil { + c.Fatal(err) + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, true, "--label", testLabel) + + c.Assert(err, checker.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { + name := "testbuildlabelmultiple" + testLabels := map[string]string{ + "foo": "bar", + "123": "456", + } + + labelArgs := []string{} + + for k, v := range testLabels { + labelArgs = append(labelArgs, "--label", k+"="+v) + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, false, labelArgs...) + + if err != nil { + c.Fatal("error building image with labels", err) + } + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + for k, v := range testLabels { + if x, ok := labels[k]; !ok || x != v { + c.Fatalf("label %s=%s not found in image", k, v) + } + } +} + +func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) { + name := "testbuildlabeloverwrite" + testLabel := "foo" + testValue := "bar" + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL `+testLabel+`+ foo +`, false, []string{"--label", testLabel + "=" + testValue}...) + + if err != nil { + c.Fatal("error building image with labels", err) + } + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + v, ok := labels[testLabel] + if !ok { + c.Fatal("label not found in image") + } + + if v != testValue { + c.Fatal("label not overwritten") + } +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + baseImage := privateRegistryURL + "/baseimage" + + _, err := buildImage(baseImage, ` + FROM busybox + ENV env1 val1 + `, true) + + c.Assert(err, checker.IsNil) + + dockerCmd(c, "push", baseImage) + dockerCmd(c, "rmi", baseImage) + + _, err = buildImage(baseImage, fmt.Sprintf(` + FROM %s + ENV env2 val2 + `, baseImage), true) + + c.Assert(err, checker.IsNil) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + // make sure the image is pulled when building + dockerCmd(c, "rmi", repoName) + + buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-") + buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName)) + + out, _, err := runCommandWithOutput(buildCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +// Test cases in #22036 +func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { + // Command line option labels will always override + name := "scratchy" + expected := `{"bar":"from-flag","foo":"from-flag"}` + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`, + true, "--label", "foo=from-flag", "--label", "bar=from-flag") + c.Assert(err, check.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + name = "from" + expected = `{"foo":"from-dockerfile"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo from-dockerfile`, + true) + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option label will override even via `FROM` + name = "new" + expected = `{"bar":"from-dockerfile2","foo":"new"}` + _, err = buildImage(name, + `FROM from + LABEL bar from-dockerfile2`, + true, "--label", "foo=new") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + name = "scratchy2" + expected = `{"bar":"","foo":""}` + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`, + true, "--label", "foo", "--label", "bar=") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + // This time is for inherited images + name = "new2" + expected = `{"bar":"","foo":""}` + _, err = buildImage(name, + `FROM from + LABEL bar from-dockerfile2`, + true, "--label", "foo=", "--label", "bar") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with only `FROM` + name = "scratchy" + expected = `{"bar":"from-flag","foo":"from-flag"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage(), + true, "--label", "foo=from-flag", "--label", "bar=from-flag") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with env var + name = "scratchz" + expected = `{"bar":"$PATH"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage(), + true, "--label", "bar=$PATH") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + +} + +// Test case for #22855 +func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { + name := "test-delete-committed-file" + + _, err := buildImage(name, + `FROM busybox + RUN echo test > file + RUN test -e file + RUN rm file + RUN sh -c "! test -e file"`, false) + if err != nil { + c.Fatal(err) + } +} + +// #20083 +func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { + // TODO Windows: Figure out why this test is flakey on TP5. If you add + // something like RUN sleep 5, or even RUN ls /tmp after the ADD line, + // it is more reliable, but that's not a good fix. + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(ls -la /tmp/#1)" + RUN sh -c "(! ls -la /tmp/#2)" + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + "#1": "# file 1", + "#2": "# file 2", + ".dockerignore": `# Visual C++ cache files +# because we have git ;-) +# The above comment is from #20083 +foo +#dir1/foo +foo2 +# The following is considered as comment as # is at the beginning +#1 +# The following is not considered as comment as # is not at the beginning + #2 +`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Test case for #23221 +func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { + name := "test-with-utf8-bom" + dockerfile := []byte(`FROM busybox`) + bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) + ctx, err := fakeContextFromNewTempDir() + c.Assert(err, check.IsNil) + defer ctx.Close() + err = ctx.addFile("Dockerfile", bomDockerfile) + c.Assert(err, check.IsNil) + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) +} + +// Test case for UTF-8 BOM in .dockerignore, related to #23221 +func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { + name := "test-with-utf8-bom-dockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls -la /tmp + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + dockerignore := []byte("./Dockerfile\n") + bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + err = ctx.addFile(".dockerignore", bomDockerignore) + c.Assert(err, check.IsNil) + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +// #22489 Shell test to confirm config gets updated correctly +func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { + name := "testbuildshellupdatesconfig" + + expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + SHELL ["foo", "-bar"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + if res != expected { + c.Fatalf("%s, expected %s", res, expected) + } + res = inspectFieldJSON(c, name, "ContainerConfig.Shell") + if res != `["foo","-bar"]` { + c.Fatalf(`%s, expected ["foo","-bar"]`, res) + } +} + +// #22489 Changing the shell multiple times and CMD after. +func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { + name := "testbuildshellmultiple" + + _, out, _, err := buildImageWithStdoutStderr(name, + `FROM busybox + RUN echo defaultshell + SHELL ["echo"] + RUN echoshell + SHELL ["ls"] + RUN -l + CMD -l`, + true) + if err != nil { + c.Fatal(err) + } + + // Must contain 'defaultshell' twice + if len(strings.Split(out, "defaultshell")) != 3 { + c.Fatalf("defaultshell should have appeared twice in %s", out) + } + + // Must contain 'echoshell' twice + if len(strings.Split(out, "echoshell")) != 3 { + c.Fatalf("echoshell should have appeared twice in %s", out) + } + + // Must contain "total " (part of ls -l) + if !strings.Contains(out, "total ") { + c.Fatalf("%s should have contained 'total '", out) + } + + // A container started from the image uses the shell-form CMD. + // Last shell is ls. CMD is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489. Changed SHELL with ENTRYPOINT +func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { + name := "testbuildshellentrypoint" + + _, err := buildImage(name, + `FROM busybox + SHELL ["ls"] + ENTRYPOINT -l`, + true) + if err != nil { + c.Fatal(err) + } + + // A container started from the image uses the shell-form ENTRYPOINT. + // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489 Shell test to confirm shell is inherited in a subsequent build +func (s *DockerSuite) TestBuildShellInherited(c *check.C) { + name1 := "testbuildshellinherited1" + _, err := buildImage(name1, + `FROM busybox + SHELL ["ls"]`, + true) + if err != nil { + c.Fatal(err) + } + + name2 := "testbuildshellinherited2" + _, out, _, err := buildImageWithStdoutStderr(name2, + `FROM `+name1+` + RUN -l`, + true) + if err != nil { + c.Fatal(err) + } + + // ls -l has "total " followed by some number in it, ls without -l does not. + if !strings.Contains(out, "total ") { + c.Fatalf("Should have seen total in 'ls -l'.\n%s", out) + } +} + +// #22489 Shell test to confirm non-JSON doesn't work +func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { + name := "testbuildshellnotjson" + + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. + true) + if err == nil { + c.Fatal("Image build should have failed") + } + if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") { + c.Fatal("Error didn't indicate that arguments must be in JSON form") + } +} + +// #22489 Windows shell test to confirm native is powershell if executing a PS command +// This would error if the default shell were still cmd. +func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildshellpowershell" + _, out, err := buildImageWithOut(name, + `FROM `+minimalBaseImage()+` + SHELL ["powershell", "-command"] + RUN Write-Host John`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "\nJohn\n") { + c.Fatalf("Line with 'John' not found in output %q", out) + } +} + +// Verify that escape is being correctly applied to words when escape directive is not \. +// Tests WORKDIR, ADD +func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildescapenotbackslashwordtesta" + _, out, err := buildImageWithOut(name, + `# escape= `+"`"+` + FROM `+minimalBaseImage()+` + WORKDIR c:\windows + RUN dir /w`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(strings.ToLower(out), "[system32]") { + c.Fatalf("Line with '[windows]' not found in output %q", out) + } + + name = "testbuildescapenotbackslashwordtestb" + _, out, err = buildImageWithOut(name, + `# escape= `+"`"+` + FROM `+minimalBaseImage()+` + SHELL ["powershell.exe"] + WORKDIR c:\foo + ADD Dockerfile c:\foo\ + RUN dir Dockerfile`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(strings.ToLower(out), "-a----") { + c.Fatalf("Line with '-a----' not found in output %q", out) + } + +} + +// #22868. Make sure shell-form CMD is marked as escaped in the config of the image +func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildcmdshellescaped" + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + CMD "ipconfig" + `, true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.ArgsEscaped") + if res != "true" { + c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res) + } + dockerCmd(c, "run", "--name", "inspectme", name) + dockerCmd(c, "wait", "inspectme") + res = inspectFieldJSON(c, name, "Config.Cmd") + + if res != `["cmd","/S","/C","\"ipconfig\""]` { + c.Fatalf("CMD was not escaped Config.Cmd: got %v", res) + } +} + +// Test case for #24912. +func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) { + name := "testbuildstepswithprogress" + + totalRun := 5 + _, out, err := buildImageWithOut(name, "FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun), true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun)) + for i := 2; i <= 1+totalRun; i++ { + c.Assert(out, checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun)) + } +} + +func (s *DockerSuite) TestBuildWithFailure(c *check.C) { + name := "testbuildwithfailure" + + // First test case can only detect `nobody` in runtime so all steps will show up + buildCmd := "FROM busybox\nRUN nobody" + _, stdout, _, err := buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(stdout, checker.Contains, "Step 2/2 : RUN nobody") + + // Second test case `FFOM` should have been detected before build runs so no steps + buildCmd = "FFOM nobody\nRUN nobody" + _, stdout, _, err = buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Not(checker.Contains), "Step 1/2 : FROM busybox") + c.Assert(stdout, checker.Not(checker.Contains), "Step 2/2 : RUN nobody") +} + +func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) { + dockerfile := ` + FROM busybox + RUN echo "test" + ENTRYPOINT ["sh"]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + id1, err := buildImageFromContext("build1", ctx, true) + c.Assert(err, checker.IsNil) + + // rebuild with cache-from + id2, out, err := buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 2) +} + +func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch bax` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + id1, err := buildImageFromContext("build1", ctx, true) + c.Assert(err, checker.IsNil) + + // rebuild with cache-from + id2, out, err := buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + dockerCmd(c, "rmi", "build2") + + // no cache match with unknown source + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=nosuchtag") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 0) + dockerCmd(c, "rmi", "build2") + + // clear parent images + tempDir, err := ioutil.TempDir("", "test-build-cache-from-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, "img.tar") + dockerCmd(c, "save", "-o", tempFile, "build1") + dockerCmd(c, "rmi", "build1") + dockerCmd(c, "load", "-i", tempFile) + parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1") + c.Assert(strings.TrimSpace(parentID), checker.Equals, "") + + // cache still applies without parents + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + history1, _ := dockerCmd(c, "history", "-q", "build2") + + // Retry, no new intermediate images + id3, out, err := buildImageFromContextWithOut("build3", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id3) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + history2, _ := dockerCmd(c, "history", "-q", "build3") + + c.Assert(history1, checker.Equals, history2) + dockerCmd(c, "rmi", "build2") + dockerCmd(c, "rmi", "build3") + dockerCmd(c, "rmi", "build1") + dockerCmd(c, "load", "-i", tempFile) + + // Modify file, everything up to last command and layers are reused + dockerfile = ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch newfile` + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644) + c.Assert(err, checker.IsNil) + + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 2) + + layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1") + layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2") + + var layers1 []string + var layers2 []string + c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil) + c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil) + + c.Assert(len(layers1), checker.Equals, len(layers2)) + for i := 0; i < len(layers1)-1; i++ { + c.Assert(layers1[i], checker.Equals, layers2[i]) + } + c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1]) +} + +func (s *DockerSuite) TestBuildNetNone(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "testbuildnetnone" + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN ping -c 1 8.8.8.8 + `, true, "--network=none") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unreachable") +} + +func (s *DockerSuite) TestBuildNetContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname") + + name := "testbuildnetcontainer" + out, err := buildImage(name, ` + FROM busybox + RUN nc localhost 1234 > /otherhost + `, true, "--network=container:"+strings.TrimSpace(id)) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost") + c.Assert(strings.TrimSpace(host), check.Equals, "foobar") +} + +func (s *DockerSuite) TestBuildSquashParent(c *check.C) { + testRequires(c, ExperimentalDaemon) + dockerFile := ` + FROM busybox + RUN echo hello > /hello + RUN echo world >> /hello + RUN echo hello > /remove_me + ENV HELLO world + RUN rm /remove_me + ` + // build and get the ID that we can use later for history comparison + origID, err := buildImage("test", dockerFile, false) + c.Assert(err, checker.IsNil) + + // build with squash + id, err := buildImage("test", dockerFile, true, "--squash") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld") + + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]") + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`) + + // make sure the ID produced is the ID of the tag we specified + inspectID, err := inspectImage("test", ".ID") + c.Assert(err, checker.IsNil) + c.Assert(inspectID, checker.Equals, id) + + origHistory, _ := dockerCmd(c, "history", origID) + testHistory, _ := dockerCmd(c, "history", "test") + + splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n") + splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n") + c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1) + + out, err = inspectImage(id, "len .RootFS.Layers") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "3") +} + +func (s *DockerSuite) TestBuildContChar(c *check.C) { + name := "testbuildcontchar" + + _, out, err := buildImageWithOut(name, + `FROM busybox\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/1 : FROM busybox") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi\n") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\n") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \\\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") +} + +// TestBuildOpaqueDirectory tests that a build succeeds which +// creates opaque directories. +// See https://github.com/docker/docker/issues/25244 +func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerFile := ` + FROM busybox + RUN mkdir /dir1 && touch /dir1/f1 + RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2 + RUN touch /dir1/f3 + RUN [ -f /dir1/f2 ] + ` + + // Test that build succeeds, last command fails if opaque directory + // was not handled correctly + _, err := buildImage("testopaquedirectory", dockerFile, false) + c.Assert(err, checker.IsNil) +} + +// Windows test for USER in dockerfile +func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsuser" + _, out, err := buildImageWithOut(name, + `FROM `+WindowsBaseImage+` + RUN net user user /add + USER user + RUN set username + `, + true) + if err != nil { + c.Fatal(err) + } + c.Assert(strings.ToLower(out), checker.Contains, "username=user") +} + +// Verifies if COPY file . when WORKDIR is set to a non-existing directory, +// the directory is created and the file is copied into the directory, +// as opposed to the file being copied as a file with the name of the +// directory. Fix for 27545 (found on Windows, but regression good for Linux too). +// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514. +func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) { + name := "testbuildcopyfiledotwithworkdir" + ctx, err := fakeContext(`FROM busybox +WORKDIR /foo +COPY file . +RUN ["cat", "/foo/file"] +`, + map[string]string{}) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if err := ctx.Add("file", "content"); err != nil { + c.Fatal(err) + } + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Case-insensitive environment variables on Windows +func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsenvcaseinsensitive" + if _, err := buildImage(name, ` + FROM `+WindowsBaseImage+` + ENV FOO=bar foo=bar + `, true); err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Env") + if res != `["foo=bar"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped. + c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res) + } +} + +// Test case for 29667 +func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "testworkdirimagecmd" + dockerfile := ` +FROM busybox +WORKDIR /foo/bar +` + out, err := buildImage(image, dockerfile, true) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) + + image = "testworkdirlabelimagecmd" + dockerfile = ` +FROM busybox +WORKDIR /foo/bar +LABEL a=b +` + out, err = buildImage(image, dockerfile, true) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) +} + +// Test case for 28902/28090 +func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerFile := ` + FROM golang:1.7-alpine + WORKDIR / + ` + _, err := buildImage("testbuildworkdircmd", dockerFile, true) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageWithOut("testbuildworkdircmd", dockerFile, true) + c.Assert(err, checker.IsNil) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 1) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go new file mode 100644 index 0000000..0205a92 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go @@ -0,0 +1,207 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/go-units" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { + testRequires(c, cpuCfsQuota) + name := "testbuildresourceconstraints" + + ctx, err := fakeContext(` + FROM hello-world:frozen + RUN ["/hello"] + `, map[string]string{}) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, ".") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "ps", "-lq") + cID := strings.TrimSpace(out) + + type hostConfig struct { + Memory int64 + MemorySwap int64 + CpusetCpus string + CpusetMems string + CPUShares int64 + CPUQuota int64 + Ulimits []*units.Ulimit + } + + cfg := inspectFieldJSON(c, cID, "HostConfig") + + var c1 hostConfig + err = json.Unmarshal([]byte(cfg), &c1) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) + c.Assert(c1.MemorySwap, checker.Equals, int64(-1), check.Commentf("resource constraints not set properly for MemorySwap")) + c.Assert(c1.CpusetCpus, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetCpus")) + c.Assert(c1.CpusetMems, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetMems")) + c.Assert(c1.CPUShares, checker.Equals, int64(100), check.Commentf("resource constraints not set properly for CPUShares")) + c.Assert(c1.CPUQuota, checker.Equals, int64(8000), check.Commentf("resource constraints not set properly for CPUQuota")) + c.Assert(c1.Ulimits[0].Name, checker.Equals, "nofile", check.Commentf("resource constraints not set properly for Ulimits")) + c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) + + // Make sure constraints aren't saved to image + dockerCmd(c, "run", "--name=test", name) + + cfg = inspectFieldJSON(c, "test", "HostConfig") + + var c2 hostConfig + err = json.Unmarshal([]byte(cfg), &c2) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c2.Memory, check.Not(checker.Equals), int64(64*1024*1024), check.Commentf("resource leaked from build for Memory")) + c.Assert(c2.MemorySwap, check.Not(checker.Equals), int64(-1), check.Commentf("resource leaked from build for MemorySwap")) + c.Assert(c2.CpusetCpus, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetCpus")) + c.Assert(c2.CpusetMems, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetMems")) + c.Assert(c2.CPUShares, check.Not(checker.Equals), int64(100), check.Commentf("resource leaked from build for CPUShares")) + c.Assert(c2.CPUQuota, check.Not(checker.Equals), int64(8000), check.Commentf("resource leaked from build for CPUQuota")) + c.Assert(c2.Ulimits, checker.IsNil, check.Commentf("resource leaked from build for Ulimits")) +} + +func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddown" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD foo /bar/ + RUN [ $(stat -c %U:%G "/bar") = 'root:root' ] + RUN [ $(stat -c %U:%G "/bar/foo") = 'root:root' ] + ` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testFile, err := os.Create(filepath.Join(tmpDir, "foo")) + if err != nil { + c.Fatalf("failed to create foo file: %v", err) + } + defer testFile.Close() + + chownCmd := exec.Command("chown", "daemon:daemon", "foo") + chownCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(chownCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddChangeOwnership: %v", err) + } +} + +// Test that an infinite sleep during a build is killed if the client disconnects. +// This test is fairly hairy because there are lots of ways to race. +// Strategy: +// * Monitor the output of docker events starting from before +// * Run a 1-year-long sleep from a docker build. +// * When docker events sees container start, close the "docker build" command +// * Wait for docker events to emit a dying event. +func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcancellation" + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // (Note: one year, will never finish) + ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") + buildCmd.Dir = ctx.Dir + + stdoutBuild, err := buildCmd.StdoutPipe() + if err := buildCmd.Start(); err != nil { + c.Fatalf("failed to run build: %s", err) + } + + matchCID := regexp.MustCompile("Running in (.+)") + scanner := bufio.NewScanner(stdoutBuild) + + outputBuffer := new(bytes.Buffer) + var buildID string + for scanner.Scan() { + line := scanner.Text() + outputBuffer.WriteString(line) + outputBuffer.WriteString("\n") + if matches := matchCID.FindStringSubmatch(line); len(matches) > 0 { + buildID = matches[1] + break + } + } + + if buildID == "" { + c.Fatalf("Unable to find build container id in build output:\n%s", outputBuffer.String()) + } + + testActions := map[string]chan bool{ + "start": make(chan bool, 1), + "die": make(chan bool, 1), + } + + matcher := matchEventLine(buildID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + // Send a kill to the `docker build` command. + // Causes the underlying build to be cancelled due to socket close. + if err := buildCmd.Process.Kill(); err != nil { + c.Fatalf("error killing build command: %s", err) + } + + // Get the exit status of `docker build`, check it exited because killed. + if err := buildCmd.Wait(); err != nil && !integration.IsKilled(err) { + c.Fatalf("wait failed during build run: %T %s", err, err) + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go new file mode 100644 index 0000000..c2d8546 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go @@ -0,0 +1,693 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +var ( + remoteRepoName = "dockercli/busybox-by-dgst" + repoName = fmt.Sprintf("%s/%s", privateRegistryURL, remoteRepoName) + pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") + digestRegex = regexp.MustCompile("Digest: ([\\S]+)") +) + +func setupImage(c *check.C) (digest.Digest, error) { + return setupImageWithTag(c, "latest") +} + +func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { + containerName := "busyboxbydigest" + + dockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox") + + // tag the image to upload it to the private registry + repoAndTag := repoName + ":" + tag + out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) + + // delete the container as we don't need it any more + err = deleteContainer(containerName) + c.Assert(err, checker.IsNil) + + // push the image + out, _, err = dockerCmdWithError("push", repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) + + // delete our local repo that we previously tagged + rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) + + matches := pushDigestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) + pushDigest := matches[1] + + return digest.Digest(pushDigest), nil +} + +func testPullByTagDisplaysDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the tag + out, _ := dockerCmd(c, "pull", repoName) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func testPullByDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + out, _ := dockerCmd(c, "pull", imageReference) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func testPullByDigestNoFallback(c *check.C) { + testRequires(c, DaemonIsLinux) + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) + out, _, err := dockerCmdWithError("pull", imageReference) + c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("manifest for %s not found", imageReference), check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) +} + +func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "createByDigest" + dockerCmd(c, "create", "--name", containerName, imageReference) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "runByDigest" + out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") + + foundRegex := regexp.MustCompile("found=([^\n]+)") + matches := foundRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + c.Assert(matches[1], checker.Equals, "1", check.Commentf("Expected %q, got %q", "1", matches[1])) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // make sure inspect runs ok + inspectField(c, imageReference, "Id") + + // do the delete + err = deleteImages(imageReference) + c.Assert(err, checker.IsNil, check.Commentf("unexpected error deleting image")) + + // try to inspect again - it should error this time + _, err = inspectFieldWithError(imageReference, "Id") + //unexpected nil err trying to inspect what should be a non-existent image + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "No such object") +} + +func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // get the image id + imageID := inspectField(c, imageReference, "Id") + + // do the build + name := "buildbydigest" + _, err = buildImage(name, fmt.Sprintf( + `FROM %s + CMD ["/bin/echo", "Hello World"]`, imageReference), + true) + c.Assert(err, checker.IsNil) + + // get the build's image id + res := inspectField(c, name, "Config.Image") + // make sure they match + c.Assert(res, checker.Equals, imageID) +} + +func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // tag it + tag := "tagbydigest" + dockerCmd(c, "tag", imageReference, tag) + + expectedID := inspectField(c, imageReference, "Id") + + tagID := inspectField(c, tag, "Id") + c.Assert(tagID, checker.Equals, expectedID) +} + +func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "images") + c.Assert(out, checker.Not(checker.Contains), "DIGEST", check.Commentf("list output should not have contained DIGEST header")) +} + +func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { + + // setup image1 + digest1, err := setupImageWithTag(c, "tag1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "tag2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag1 + dockerCmd(c, "pull", repoName+":tag1") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag 2 + dockerCmd(c, "pull", repoName+":tag2") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + // make sure busybox has tag, but not digest + busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) + c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out)) +} + +func (s *DockerRegistrySuite) TestListDanglingImagesWithDigests(c *check.C) { + // setup image1 + digest1, err := setupImageWithTag(c, "dangle1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "dangle2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle1 tag + dockerCmd(c, "pull", repoName+":dangle1") + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*dangle1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle2 tag + dockerCmd(c, "pull", repoName+":dangle2") + + // list images, show tagged images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*dangle2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images, no longer dangling, should not match + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest2.String(), out)) +} + +func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, check.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "inspect", imageReference) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + c.Assert(imageJSON, checker.HasLen, 1) + c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) + c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) +} + +func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // build an image from it + imageName1 := "images_ps_filter_test" + _, err = buildImage(imageName1, fmt.Sprintf( + `FROM %s + LABEL match me 1`, imageReference), true) + c.Assert(err, checker.IsNil) + + // run a container based on that + dockerCmd(c, "run", "--name=test1", imageReference, "echo", "hello") + expectedID, err := getIDByName("test1") + c.Assert(err, check.IsNil) + + // run a container based on the a descendant of that too + dockerCmd(c, "run", "--name=test2", imageName1, "echo", "hello") + expectedID1, err := getIDByName("test2") + c.Assert(err, check.IsNil) + + expectedIDs := []string{expectedID, expectedID1} + + // Invalid imageReference + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", fmt.Sprintf("--filter=ancestor=busybox@%s", digest)) + // Filter container for ancestor filter should be empty + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Valid imageReference + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) + checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) +} + +func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + // just in case... + + dockerCmd(c, "tag", imageReference, repoName+":sometag") + + imageID := inspectField(c, imageReference, "Id") + + dockerCmd(c, "rmi", imageID) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repoName + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted only repoTag2, because there's another tag + inspectField(c, repoTag, "Id") + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndMultiRepoTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + repo2 := fmt.Sprintf("%s/%s", repoName, "repo2") + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repo2 + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted repoTag and image reference, but left repoTag2 + inspectField(c, repoTag2, "Id") + _, err = inspectFieldWithError(imageReference, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image digest reference should have been removed")) + + _, err = inspectFieldWithError(repoTag, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image tag reference should have been removed")) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.FSLayers[0] = schema1.FSLayer{ + BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), + } + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.Layers[0].Digest + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.FSLayers[0].BlobSum + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go new file mode 100644 index 0000000..8008ae1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,157 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +//test commit a paused container should not unpause it after commit +func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + out = inspectField(c, cleanedContainerID, "State.Paused") + // commit should not unpause a paused container + c.Assert(out, checker.Contains, "true") +} + +func (s *DockerSuite) TestCommitNewFile(c *check.C) { + dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + + imageID, _ := dockerCmd(c, "commit", "commiter") + imageID = strings.TrimSpace(imageID) + + out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "koye") +} + +func (s *DockerSuite) TestCommitHardlink(c *check.C) { + testRequires(c, DaemonIsLinux) + firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + + chunks := strings.Split(strings.TrimSpace(firstOutput), " ") + inode := chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(firstOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) + + imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") + imageID = strings.TrimSpace(imageID) + + secondOutput, _ := dockerCmd(c, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") + + chunks = strings.Split(strings.TrimSpace(secondOutput), " ") + inode = chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(secondOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) +} + +func (s *DockerSuite) TestCommitTTY(c *check.C) { + dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", "ttytest", "/bin/ls") +} + +func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", "bindtest", "true") +} + +func (s *DockerSuite) TestCommitChange(c *check.C) { + dockerCmd(c, "run", "--name", "test", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "EXPOSE 8080", + "--change", "ENV DEBUG true", + "--change", "ENV test 1", + "--change", "ENV PATH /foo", + "--change", "LABEL foo bar", + "--change", "CMD [\"/bin/sh\"]", + "--change", "WORKDIR /opt", + "--change", "ENTRYPOINT [\"/bin/sh\"]", + "--change", "USER testuser", + "--change", "VOLUME /var/lib/docker", + "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalised on Windows + expected := map[string]string{ + "Config.ExposedPorts": "map[8080/tcp:{}]", + "Config.Env": "[DEBUG=true test=1 PATH=/foo]", + "Config.Labels": "map[foo:bar]", + "Config.Cmd": "[/bin/sh]", + "Config.WorkingDir": prefix + slash + "opt", + "Config.Entrypoint": "[/bin/sh]", + "Config.User": "testuser", + "Config.Volumes": "map[/var/lib/docker:{}]", + "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", + } + + for conf, value := range expected { + res := inspectField(c, imageID, conf) + if res != value { + c.Errorf("%s('%s'), expected %s", conf, res, value) + } + } +} + +func (s *DockerSuite) TestCommitChangeLabels(c *check.C) { + dockerCmd(c, "run", "--name", "test", "--label", "some=label", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "LABEL some=label2", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + c.Assert(inspectField(c, imageID, "Config.Labels"), checker.Equals, "map[some:label2]") + // check that container labels didn't change + c.Assert(inspectField(c, "test", "Config.Labels"), checker.Equals, "map[some:label]") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go new file mode 100644 index 0000000..1d5e5ad --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go @@ -0,0 +1,140 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestConfigHTTPHeader(c *check.C) { + testRequires(c, UnixCli) // Can't set/unset HOME on windows right now + // We either need a level of Go that supports Unsetenv (for cases + // when HOME/USERPROFILE isn't set), or we need to be able to use + // os/user but user.Current() only works if we aren't statically compiling + + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + headers = r.Header + })) + defer server.Close() + + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + out, _, _ := runCommandWithOutput(cmd) + + c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) + + c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", out)) + + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", out)) + +} + +func (s *DockerSuite) TestConfigDir(c *check.C) { + cDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(cDir) + + // First make sure pointing to empty dir doesn't generate an error + dockerCmd(c, "--config", cDir, "ps") + + // Test with env var too + cmd := exec.Command(dockerBinary, "ps") + cmd.Env = appendBaseEnv(true, "DOCKER_CONFIG="+cDir) + out, _, err := runCommandWithOutput(cmd) + + c.Assert(err, checker.IsNil, check.Commentf("ps2 didn't work,out:%v", out)) + + // Start a server so we can check to see if the config file was + // loaded properly + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + // Create a dummy config file in our new config dir + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + tmpCfg := filepath.Join(cDir, "config.json") + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) + + env := appendBaseEnv(false) + + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + cmd.Env = env + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header,out:%v", out)) + + // Reset headers and try again using env var this time + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG="+cDir) + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header,out:%v", out)) + + // Reset headers and make sure flag overrides the env var + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG=MissingDir") + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header,out:%v", out)) + + // Reset headers and make sure flag overrides the env var. + // Almost same as previous but make sure the "MissingDir" isn't + // ignore - we don't want to default back to the env var. + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG="+cDir) + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value,out:%v", out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go new file mode 100644 index 0000000..9ed7e8c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go @@ -0,0 +1,488 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// docker cp CONTAINER:PATH LOCALPATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPath(tmpDir, "notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPathTrailingSep(tmpDir, "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Check that copying from a container to a local symlink copies to the symlink +// target and does not overwrite the local symlink itself. +func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // First, copy a file from the container to a symlink to a file. This + // should overwrite the symlink target contents with the source contents. + srcPath := containerCpPath(containerID, "/file2") + dstPath := cpPath(tmpDir, "symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a directory. This + // should copy the file into the symlink target directory. + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a file that does + // not exist (a broken symlink). This should create the target file with + // the contents of the source file. + dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = containerCpPath(containerID, "/dir2") + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory that does not exist (a broken symlink). This should create + // the target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpFromCaseA(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-a") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpFromCaseB(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-b") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPathTrailingSep(tmpDir, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpFromCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "file2") + + // Ensure the local file starts with different content. + c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPath(tmpDir, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + // Ensure that dstPath doesn't exist. + _, err := os.Stat(dstPath) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir1") + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpFromCaseE(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-e") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPath(containerID, "dir1") + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpFromCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstDir := cpPath(tmpDir, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + dstPath := filepath.Join(resultDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseH(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-h") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "dir1") + "." + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove resultDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpFromCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstDir := cpPath(tmpDir, "dir2") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go new file mode 100644 index 0000000..4e5c39e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,660 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Ensure that an all-local path case returns an error. +func (s *DockerSuite) TestCpLocalOnly(c *check.C) { + err := runDockerCp(c, "foo", "bar") + c.Assert(err, checker.NotNil) + + c.Assert(err.Error(), checker.Contains, "must specify at least one container source") +} + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func (s *DockerSuite) TestCpGarbagePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("../../../../../../../../../../../../", cpFullPath) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- garbage path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for garbage path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that relative paths are relative to the container's rootfs +func (s *DockerSuite) TestCpRelativePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + var relPath string + if path.IsAbs(cpFullPath) { + // normally this is `filepath.Rel("/", cpFullPath)` but we cannot + // get this unix-path manipulation on windows with filepath. + relPath = cpFullPath[1:] + } + c.Assert(path.IsAbs(cpFullPath), checker.True, check.Commentf("path %s was assumed to be an absolute path", cpFullPath)) + + dockerCmd(c, "cp", containerID+":"+relPath, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- relative path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for relative path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that absolute paths are relative to the container's rootfs +func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- absolute path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for absolute path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, "container_path") + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path") + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + // We should have copied a symlink *NOT* the file itself! + linkTarget, err := os.Readlink(tmpname) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpFullPath)) +} + +// Check that symlinks to a directory behave as expected when copying one from +// a container. +func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + linkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpTestPathParent)) + + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link/", testDir) + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testDir, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) +} + +// Check that symlinks to a directory behave as expected when copying one to a +// container. +func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testVol) + + // Create a test container with a local volume. We will test by copying + // to the volume path in the container which we can then verify locally. + out, _ := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") + + containerID := strings.TrimSpace(out) + + // Create a temp directory to hold a test file nested in a directory. + testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This file will be at "/testDir/some/path/test" and will be copied into + // the test volume later. + hostTestFilename := filepath.Join(testDir, cpFullPath) + c.Assert(os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)), checker.IsNil) + c.Assert(ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)), checker.IsNil) + + // Now create another temp directory to hold a symlink to the + // "/testDir/some" directory. + linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(linkDir) + + // Then symlink "/linkDir/dir_link" to "/testdir/some". + linkTarget := filepath.Join(testDir, cpTestPathParent) + localLink := filepath.Join(linkDir, "dir_link") + c.Assert(os.Symlink(linkTarget, localLink), checker.IsNil) + + // Now copy that symlink into the test volume in the container. + dockerCmd(c, "cp", localLink, containerID+":/testVol") + + // This copy command should have copied the symlink *not* the target. + expectedPath := filepath.Join(testVol, "dir_link") + actualLinkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to read symlink at %q", expectedPath)) + + c.Assert(actualLinkTarget, checker.Equals, linkTarget) + + // Good, now remove that copied link for the next test. + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the test volume directory in the + // container. + dockerCmd(c, "cp", localLink+"/", containerID+":/testVol") + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testVol, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) + + // And this directory should contain the file copied from the host at the + // expected location: "/testVol/dir_link/path/test" + expectedFilepath := filepath.Join(testVol, "dir_link/path/test") + fileContents, err := ioutil.ReadFile(expectedFilepath) + c.Assert(err, checker.IsNil) + + c.Assert(string(fileContents), checker.Equals, cpHostContents) +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path", cpTestName) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- symlink path component can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for symlink path component + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that cp with unprivileged user doesn't return any error +func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, UnixCli) // uses chmod/su: not available on windows + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpdir) + + c.Assert(os.Chmod(tmpdir, 0777), checker.IsNil) + + result := icmd.RunCommand("su", "unprivilegeduser", "-c", + fmt.Sprintf("%s cp %s:%s %s", dockerBinary, containerID, cpTestName, tmpdir)) + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + outDir, err := ioutil.TempDir("", "cp-test-special-files") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) + + expected, err := readContainerFile(containerID, "resolv.conf") + actual, err := ioutil.ReadFile(outDir + "/resolv.conf") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/hosts + dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) + + expected, err = readContainerFile(containerID, "hosts") + actual, err = ioutil.ReadFile(outDir + "/hosts") + + // Expected copied file to be duplicate of the container hosts + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) + + expected, err = readContainerFile(containerID, "hostname") + actual, err = ioutil.ReadFile(outDir + "/hostname") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) +} + +func (s *DockerSuite) TestCpVolumePath(c *check.C) { + // stat /tmp/cp-test-volumepath851508420/test gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual volume path + dockerCmd(c, "cp", containerID+":/foo", outDir) + + stat, err := os.Stat(outDir + "/foo") + c.Assert(err, checker.IsNil) + // expected copied content to be dir + c.Assert(stat.IsDir(), checker.True) + stat, err = os.Stat(outDir + "/foo/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy file nested in volume + dockerCmd(c, "cp", containerID+":/foo/bar", outDir) + + stat, err = os.Stat(outDir + "/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy Bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz", outDir) + stat, err = os.Stat(outDir + "/baz") + c.Assert(err, checker.IsNil) + // Expected `baz` to be a dir + c.Assert(stat.IsDir(), checker.True) + + // Copy file nested in bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + c.Assert(err, checker.IsNil) + fb2, err := ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) + + // Copy bind-mounted file + dockerCmd(c, "cp", containerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + c.Assert(err, checker.IsNil) + fb2, err = ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) +} + +func (s *DockerSuite) TestCpToDot(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + c.Assert(err, checker.IsNil) + defer os.Chdir(cwd) + c.Assert(os.Chdir(tmpdir), checker.IsNil) + dockerCmd(c, "cp", containerID+":/test", ".") + content, err := ioutil.ReadFile("./test") + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCpToStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "cp", containerID+":/test", "-"), + exec.Command("tar", "-vtf", "-")) + + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, "test") + c.Assert(out, checker.Contains, "-rw") +} + +func (s *DockerSuite) TestCpNameHasColon(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) + content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCopyAndRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + expectedMsg := "hello" + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/group", containerID), tmpDir) + + out, _ = dockerCmd(c, "start", "-a", containerID) + + c.Assert(strings.TrimSpace(out), checker.Equals, expectedMsg) +} + +func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") + + tmpDir, err := ioutil.TempDir("", "test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) +} + +// test copy with option `-L`: following symbol link +// Check that symlinks to a file behave as expected when copying one from +// a container to host following symbol link +func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" /dir_link") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + testDir, err := ioutil.TempDir("", "test-cp-symlink-container-to-host-follow-symlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + + expected := []byte(cpContainerContents) + actual, err := ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + os.Remove(expectedPath) + + // now test copy symbol link to a non-existing file in host + expectedPath = filepath.Join(testDir, "somefile_host") + // expectedPath shouldn't exist, if exists, remove it + if _, err := os.Lstat(expectedPath); err == nil { + os.Remove(expectedPath) + } + + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) + + actual, err = ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + defer os.Remove(expectedPath) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go new file mode 100644 index 0000000..f981cb8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go @@ -0,0 +1,599 @@ +package main + +import ( + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// docker cp LOCALPATH CONTAINER:PATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPathTrailingSep(tmpDir, "file1") + dstPath := containerCpPath(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing path separator but exists as a +// file. Also test that we cannot overwrite an existing directory with a +// non-directory and cannot overwrite an existing +func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "dir1/file1-1") + dstPath := containerCpPathTrailingSep(containerID, "file1") + + // The client should encounter an error trying to stat the destination + // and then be unable to copy since the destination is asserted to be a + // directory but does not exist. + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + // The client should encounter an error trying to stat the destination and + // then decide to extract to the parent directory instead with a rebased + // name in the source archive, but this directory would overwrite the + // existing file with the same name. + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) +} + +// Check that copying from a local path to a symlink in a container copies to +// the symlink target and does not overwrite the container symlink itself. +func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { + // stat /tmp/test-cp-to-symlink-destination-262430901/vol3 gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol := getTestDir(c, "test-cp-to-symlink-destination-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + + // First, copy a local file to a symlink to a file in the container. This + // should overwrite the symlink target contents with the source contents. + srcPath := cpPath(testVol, "file2") + dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a local file to a symlink to a directory in the container. + // This should copy the file into the symlink target directory. + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file to a symlink to a file that does not exist (a broken + // symlink) in the container. This should create the target file with the + // contents of the source file. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a directory in the + // container. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = cpPath(testVol, "/dir2") + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a local directory that does + // not exist (a broken symlink) in the container. This should create the + // target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpToCaseA(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + workDir: "/root", command: makeCatFileCommand("itWorks.txt"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-a") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpToCaseB(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("testDir/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-b") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPathTrailingSep(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpToCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("file2"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/file2") + + // Ensure the container's file starts with the original content. + c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPath(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpToCaseE(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-e") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpToCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("dir2/dir1/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "/root/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir2/dir1/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseH(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-h") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpToCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpToCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("/dir2/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/dir2/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// The `docker cp` command should also ensure that you cannot +// write to a container rootfs that is marked as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + readOnly: true, workDir: "/root", + command: makeCatFileCommand("shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} + +// The `docker cp` command should also ensure that you +// cannot write to a volume that is mounted as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(tmpDir), workDir: "/root", + command: makeCatFileCommand("/vol_ro/shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go new file mode 100644 index 0000000..45d85ba --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go @@ -0,0 +1,39 @@ +// +build !windows + +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +// Check ownership is root, both in non-userns and userns enabled modes +func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + tmpVolDir := getTestDir(c, "test-cp-tmpvol") + containerID := makeTestContainer(c, + testContainerOptions{volumes: []string{fmt.Sprintf("%s:/tmpvol", tmpVolDir)}}) + + tmpDir := getTestDir(c, "test-cp-to-check-ownership") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/tmpvol", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.IsNil) + + stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) + c.Assert(err, checker.IsNil) + uid, gid, err := getRootUIDGID() + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils.go new file mode 100644 index 0000000..0501c5d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils.go @@ -0,0 +1,303 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +type fileType uint32 + +const ( + ftRegular fileType = iota + ftDir + ftSymlink +) + +type fileData struct { + filetype fileType + path string + contents string +} + +func (fd fileData) creationCommand() string { + var command string + + switch fd.filetype { + case ftRegular: + // Don't overwrite the file if it already exists! + command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) + case ftDir: + command = fmt.Sprintf("mkdir -p %s", fd.path) + case ftSymlink: + command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) + } + + return command +} + +func mkFilesCommand(fds []fileData) string { + commands := make([]string, len(fds)) + + for i, fd := range fds { + commands[i] = fd.creationCommand() + } + + return strings.Join(commands, " && ") +} + +var defaultFileData = []fileData{ + {ftRegular, "file1", "file1"}, + {ftRegular, "file2", "file2"}, + {ftRegular, "file3", "file3"}, + {ftRegular, "file4", "file4"}, + {ftRegular, "file5", "file5"}, + {ftRegular, "file6", "file6"}, + {ftRegular, "file7", "file7"}, + {ftDir, "dir1", ""}, + {ftRegular, "dir1/file1-1", "file1-1"}, + {ftRegular, "dir1/file1-2", "file1-2"}, + {ftDir, "dir2", ""}, + {ftRegular, "dir2/file2-1", "file2-1"}, + {ftRegular, "dir2/file2-2", "file2-2"}, + {ftDir, "dir3", ""}, + {ftRegular, "dir3/file3-1", "file3-1"}, + {ftRegular, "dir3/file3-2", "file3-2"}, + {ftDir, "dir4", ""}, + {ftRegular, "dir4/file3-1", "file4-1"}, + {ftRegular, "dir4/file3-2", "file4-2"}, + {ftDir, "dir5", ""}, + {ftSymlink, "symlinkToFile1", "file1"}, + {ftSymlink, "symlinkToDir1", "dir1"}, + {ftSymlink, "brokenSymlinkToFileX", "fileX"}, + {ftSymlink, "brokenSymlinkToDirX", "dirX"}, + {ftSymlink, "symlinkToAbsDir", "/root"}, +} + +func defaultMkContentCommand() string { + return mkFilesCommand(defaultFileData) +} + +func makeTestContentInDir(c *check.C, dir string) { + for _, fd := range defaultFileData { + path := filepath.Join(dir, filepath.FromSlash(fd.path)) + switch fd.filetype { + case ftRegular: + c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)), checker.IsNil) + case ftDir: + c.Assert(os.Mkdir(path, os.FileMode(0777)), checker.IsNil) + case ftSymlink: + c.Assert(os.Symlink(fd.contents, path), checker.IsNil) + } + } +} + +type testContainerOptions struct { + addContent bool + readOnly bool + volumes []string + workDir string + command string +} + +func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { + if options.addContent { + mkContentCmd := defaultMkContentCommand() + if options.command == "" { + options.command = mkContentCmd + } else { + options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) + } + } + + if options.command == "" { + options.command = "#(nop)" + } + + args := []string{"run", "-d"} + + for _, volume := range options.volumes { + args = append(args, "-v", volume) + } + + if options.workDir != "" { + args = append(args, "-w", options.workDir) + } + + if options.readOnly { + args = append(args, "--read-only") + } + + args = append(args, "busybox", "/bin/sh", "-c", options.command) + + out, _ := dockerCmd(c, args...) + + containerID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + + exitCode := strings.TrimSpace(out) + if exitCode != "0" { + out, _ = dockerCmd(c, "logs", containerID) + } + c.Assert(exitCode, checker.Equals, "0", check.Commentf("failed to make test container: %s", out)) + + return +} + +func makeCatFileCommand(path string) string { + return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) +} + +func cpPath(pathElements ...string) string { + localizedPathElements := make([]string, len(pathElements)) + for i, path := range pathElements { + localizedPathElements[i] = filepath.FromSlash(path) + } + return strings.Join(localizedPathElements, string(filepath.Separator)) +} + +func cpPathTrailingSep(pathElements ...string) string { + return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) +} + +func containerCpPath(containerID string, pathElements ...string) string { + joined := strings.Join(pathElements, "/") + return fmt.Sprintf("%s:%s", containerID, joined) +} + +func containerCpPathTrailingSep(containerID string, pathElements ...string) string { + return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) +} + +func runDockerCp(c *check.C, src, dst string) (err error) { + c.Logf("running `docker cp %s %s`", src, dst) + + args := []string{"cp", src, dst} + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) + } + + return +} + +func startContainerGetOutput(c *check.C, containerID string) (out string, err error) { + c.Logf("running `docker start -a %s`", containerID) + + args := []string{"start", "-a", containerID} + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) + } + + return +} + +func getTestDir(c *check.C, label string) (tmpDir string) { + var err error + + tmpDir, err = ioutil.TempDir("", label) + // unable to make temporary directory + c.Assert(err, checker.IsNil) + + return +} + +func isCpNotExist(err error) bool { + return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") +} + +func isCpDirNotExist(err error) bool { + return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) +} + +func isCpNotDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") +} + +func isCpCannotCopyDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) +} + +func isCpCannotCopyReadOnly(err error) bool { + return strings.Contains(err.Error(), "marked read-only") +} + +func isCannotOverwriteNonDirWithDir(err error) bool { + return strings.Contains(err.Error(), "cannot overwrite non-directory") +} + +func fileContentEquals(c *check.C, filename, contents string) (err error) { + c.Logf("checking that file %q contains %q\n", filename, contents) + + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return + } + + expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) + if err != nil { + return + } + + if !bytes.Equal(fileBytes, expectedBytes) { + err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) + } + + return +} + +func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { + c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) + + actualTarget, err := os.Readlink(symlink) + if err != nil { + return + } + + if actualTarget != expectedTarget { + err = fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) + } + + return +} + +func containerStartOutputEquals(c *check.C, containerID, contents string) (err error) { + c.Logf("checking that container %q start output contains %q\n", containerID, contents) + + out, err := startContainerGetOutput(c, containerID) + if err != nil { + return + } + + if out != contents { + err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) + } + + return +} + +func defaultVolumes(tmpDir string) []string { + if SameHostDaemon.Condition() { + return []string{ + "/vol1", + fmt.Sprintf("%s:/vol2", tmpDir), + fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), + fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), + } + } + + // Can't bind-mount volumes with separate host daemon. + return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go new file mode 100644 index 0000000..515a340 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go @@ -0,0 +1,513 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "time" + + "os/exec" + + "io/ioutil" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/nat" + "github.com/go-check/check" +) + +// Make sure we can create a simple container with some args +func (s *DockerSuite) TestCreateArgs(c *check.C) { + // Intentionally clear entrypoint, as the Windows busybox image needs an entrypoint, which breaks this test + out, _ := dockerCmd(c, "create", "--entrypoint=", "busybox", "command", "arg1", "arg2", "arg with space", "-c", "flags") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(string(cont.Path), checker.Equals, "command", check.Commentf("Unexpected container path. Expected command, received: %s", cont.Path)) + + b := false + expected := []string{"arg1", "arg2", "arg with space", "-c", "flags"} + for i, arg := range expected { + if arg != cont.Args[i] { + b = true + break + } + } + if len(cont.Args) != len(expected) || b { + c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) + } + +} + +// Make sure we can grow the container's rootfs at creation time. +func (s *DockerSuite) TestCreateGrowRootfs(c *check.C) { + // Windows and Devicemapper support growing the rootfs + if daemonPlatform != "windows" { + testRequires(c, Devicemapper) + } + out, _ := dockerCmd(c, "create", "--storage-opt", "size=120G", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, cleanedContainerID, "HostConfig.StorageOpt") + c.Assert(inspectOut, checker.Equals, "map[size:120G]") +} + +// Make sure we cannot shrink the container's rootfs at creation time. +func (s *DockerSuite) TestCreateShrinkRootfs(c *check.C) { + testRequires(c, Devicemapper) + + // Ensure this fails because of the defaultBaseFsSize is 10G + out, _, err := dockerCmdWithError("create", "--storage-opt", "size=5G", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Container size cannot be smaller than") +} + +// Make sure we can set hostconfig options too +func (s *DockerSuite) TestCreateHostConfig(c *check.C) { + out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PublishAllPorts, check.NotNil, check.Commentf("Expected PublishAllPorts, got false")) +} + +func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 4, check.Commentf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1, check.Commentf("Expected 1 ports binding, for the port %s but found %s", k, v)) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + + } + +} + +func (s *DockerSuite) TestCreateWithLargePortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 65535) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + } + +} + +// "test123" should be printed by docker create + start +func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) + c.Assert(out, checker.Equals, "test123\n", check.Commentf("container should've printed 'test123', got %q", out)) + +} + +func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + name := "test_create_volume" + dockerCmd(c, "create", "--name", name, "-v", prefix+slash+"foo", "busybox") + + dir, err := inspectMountSourceField(name, prefix+slash+"foo") + c.Assert(err, check.IsNil, check.Commentf("Error getting volume host path: %q", err)) + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + c.Fatalf("Volume was not created") + } + if err != nil { + c.Fatalf("Error statting volume host path: %q", err) + } + +} + +func (s *DockerSuite) TestCreateLabels(c *check.C) { + name := "test_create_labels" + expected := map[string]string{"k1": "v1", "k2": "v2"} + dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") + + actual := make(map[string]string) + inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { + imageName := "testcreatebuildlabel" + _, err := buildImage(imageName, + `FROM busybox + LABEL k1=v1 k2=v2`, + true) + + c.Assert(err, check.IsNil) + + name := "test_create_labels_from_image" + expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} + dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) + + actual := make(map[string]string) + inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { + image := "busybox" + // Busybox on Windows does not implement hostname command + if daemonPlatform == "windows" { + image = WindowsBaseImage + } + out, _ := dockerCmd(c, "run", "-h", "web.0", image, "hostname") + c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) + +} + +func (s *DockerSuite) TestCreateRM(c *check.C) { + // Test to make sure we can 'rm' a new container that is in + // "Created" state, and has ever been run. Test "rm -f" too. + + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + dockerCmd(c, "rm", cID) + + // Now do it again so we can "rm -f" this time + out, _ = dockerCmd(c, "create", "busybox") + + cID = strings.TrimSpace(out) + dockerCmd(c, "rm", "-f", cID) +} + +func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { + // Uses Linux specific functionality (--ipc) + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "create", "busybox") + id := strings.TrimSpace(out) + + dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") +} + +func (s *DockerSuite) TestCreateByImageID(c *check.C) { + imageName := "testcreatebyimageid" + imageID, err := buildImage(imageName, + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + + dockerCmd(c, "create", imageID) + dockerCmd(c, "create", truncatedImageID) + dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) + + // Ensure this fails + out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Error parsing reference"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } + + out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Unable to find image"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } +} + +func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-create") + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Try untrusted create to ensure we pushed the tag to the registry + createCmd = exec.Command(dockerBinary, "create", "--disable-content-trust=true", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create with --disable-content-trust:\n%s", out)) + +} + +func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) + withTagName := fmt.Sprintf("%s:latest", repoName) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", withTagName) + dockerCmd(c, "push", withTagName) + dockerCmd(c, "rmi", withTagName) + + // Try trusted create on untrusted tag + createCmd := exec.Command(dockerBinary, "create", withTagName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, fmt.Sprintf("does not have trust data for %s", repoName), check.Commentf("Missing expected output on trusted create:\n%s", out)) + +} + +func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-create") + + // Try create + createCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-create-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) + + }) +} + +func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") + c.Assert(err, check.IsNil) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + c.Assert(err, check.IsNil) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. + createCmd = exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted create:\n%s", out) + } + +} + +func (s *DockerSuite) TestCreateStopSignal(c *check.C) { + name := "test_create_stop_signal" + dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") + + res := inspectFieldJSON(c, name, "Config.StopSignal") + c.Assert(res, checker.Contains, "9") + +} + +func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { + name := "foo" + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + dir := prefix + slash + "home" + slash + "foo" + slash + "bar" + + dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") + // Windows does not create the workdir until the container is started + if daemonPlatform == "windows" { + dockerCmd(c, "start", name) + } + dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), prefix+slash+"tmp") +} + +func (s *DockerSuite) TestCreateWithInvalidLogOpts(c *check.C) { + name := "test-invalidate-log-opts" + out, _, err := dockerCmdWithError("create", "--name", name, "--log-opt", "invalid=true", "busybox") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown log opt") + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Not(checker.Contains), name) +} + +// #20972 +func (s *DockerSuite) TestCreate64ByteHexID(c *check.C) { + out := inspectField(c, "busybox", "Id") + imageID := strings.TrimPrefix(strings.TrimSpace(string(out)), "sha256:") + + dockerCmd(c, "create", imageID) +} + +// Test case for #23498 +func (s *DockerSuite) TestCreateUnsetEntrypoint(c *check.C) { + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "create", "--entrypoint=", name, "echo", "foo") + id := strings.TrimSpace(out) + c.Assert(id, check.Not(check.Equals), "") + out, _ = dockerCmd(c, "start", "-a", id) + c.Assert(strings.TrimSpace(out), check.Equals, "foo") +} + +// #22471 +func (s *DockerSuite) TestCreateStopTimeout(c *check.C) { + name1 := "test_create_stop_timeout_1" + dockerCmd(c, "create", "--name", name1, "--stop-timeout", "15", "busybox") + + res := inspectFieldJSON(c, name1, "Config.StopTimeout") + c.Assert(res, checker.Contains, "15") + + name2 := "test_create_stop_timeout_2" + dockerCmd(c, "create", "--name", name2, "busybox") + + res = inspectFieldJSON(c, name2, "Config.StopTimeout") + c.Assert(res, checker.Contains, "null") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go new file mode 100644 index 0000000..f91edc6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go @@ -0,0 +1,317 @@ +// +build linux + +package main + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/mount" + "github.com/go-check/check" +) + +// TestDaemonRestartWithPluginEnabled tests state restore for an enabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "true") +} + +// TestDaemonRestartWithPluginDisabled tests state restore for a disabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName, "--disable"); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") +} + +// TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start("--live-restore"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 0 { + c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) + } +} + +// TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start("--live-restore"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 0 { + c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) + } +} + +// TestDaemonShutdownWithPlugins shuts down running plugins. +func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network, SameHostDaemon) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + for { + if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH { + break + } + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 1 { + c.Fatalf("Expected exit code '1', got %d err: %v output: %s ", ec, err, out) + } + + s.d.Start("--live-restore") + cmd = exec.Command("pgrep", "-f", pluginProcessName) + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +// TestVolumePlugin tests volume creation using a plugin. +func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { + testRequires(c, IsAmd64, Network) + + volName := "plugin-volume" + destDir := "/tmp/data/" + destFile := "foo" + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + out, err := s.d.Cmd("plugin", "install", pName, "--grant-all-permissions") + if err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + pluginID, err := s.d.Cmd("plugin", "inspect", "-f", "{{.Id}}", pName) + pluginID = strings.TrimSpace(pluginID) + if err != nil { + c.Fatalf("Could not retrieve plugin ID: %v %s", err, pluginID) + } + mountpointPrefix := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs") + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, false) + + }() + + out, err = s.d.Cmd("volume", "create", "-d", pName, volName) + if err != nil { + c.Fatalf("Could not create volume: %v %s", err, out) + } + defer func() { + if out, err := s.d.Cmd("volume", "remove", volName); err != nil { + c.Fatalf("Could not remove volume: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("volume", "ls") + if err != nil { + c.Fatalf("Could not list volume: %v %s", err, out) + } + c.Assert(out, checker.Contains, volName) + c.Assert(out, checker.Contains, pName) + + mountPoint, err := s.d.Cmd("volume", "inspect", volName, "--format", "{{.Mountpoint}}") + if err != nil { + c.Fatalf("Could not inspect volume: %v %s", err, mountPoint) + } + mountPoint = strings.TrimSpace(mountPoint) + + out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "touch", destDir+destFile) + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs", mountPoint, destFile) + _, err = os.Lstat(path) + c.Assert(err, checker.IsNil) + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, true) +} + +func (s *DockerDaemonSuite) TestGraphdriverPlugin(c *check.C) { + testRequires(c, Network, IsAmd64, DaemonIsLinux, overlay2Supported, ExperimentalDaemon) + + s.d.Start() + + // install the plugin + plugin := "cpuguy83/docker-overlay2-graphdriver-plugin" + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", plugin) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // restart the daemon with the plugin set as the storage driver + s.d.Restart("-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1") + + // run a container + out, err = s.d.Cmd("run", "--rm", "busybox", "true") // this will pull busybox using the plugin + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux, Network, IsAmd64) + + s.d.Start("--live-restore=true") + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, err = s.d.Cmd("volume", "create", "--driver", pName, "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + s.d.Restart("--live-restore=true") + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "in use") + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "rm", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) { + mounts, err := mount.GetMounts() + if err != nil { + return false, err + } + for _, mnt := range mounts { + if strings.HasPrefix(mnt.Mountpoint, mountpointPrefix) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go new file mode 100644 index 0000000..b227dd6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go @@ -0,0 +1,2988 @@ +// +build linux + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libtrust" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon +// command. Remove this test when we remove this. +func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) { + cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug") + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'")) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top1: err=%v\n%s", err, out) + } + // --restart=no by default + if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top2: err=%v\n%s", err, out) + } + + testRun := func(m map[string]bool, prefix string) { + var format string + for cont, shouldRun := range m { + out, err := s.d.Cmd("ps") + if err != nil { + c.Fatalf("Could not run ps: err=%v\n%q", err, out) + } + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, cont) { + c.Fatalf(format, prefix, cont) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + c.Fatal(err, out) + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") + c.Assert(err, check.IsNil) + + if _, err := inspectMountPointJSON(out, "/foo"); err != nil { + c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) + } +} + +// #11008 +func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) + + testRun := func(m map[string]bool, prefix string) { + var format string + for name, shouldRun := range m { + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) + } + } + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "") + + out, err = s.d.Cmd("stop", "top1") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("stop", "top2") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // both stopped + testRun(map[string]bool{"top1": false, "top2": false}, "") + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // restart=always running + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") + + out, err = s.d.Cmd("start", "top2") + c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") + +} + +func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + // wait test1 to stop + hostArgs := []string{"--host", s.d.sock()} + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // record last start time + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + lastStartTime := out + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // test1 shouldn't restart at all + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 0, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // make sure test1 isn't restarted when daemon restart + // if "StartAt" time updates, means test1 was once restarted. + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Equals, lastStartTime, check.Commentf("test1 shouldn't start after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { + if err := s.d.Start("--iptables=false"); err != nil { + c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) + } +} + +// Make sure we cannot shrink base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { + testRequires(c, Devicemapper) + c.Assert(s.d.Start(), check.IsNil) + + oldBasesizeBytes := s.d.getBaseDeviceSize(c) + var newBasesizeBytes int64 = 1073741824 //1GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) + } + c.Assert(s.d.Stop(), check.IsNil) +} + +// Make sure we can grow base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { + testRequires(c, Devicemapper) + c.Assert(s.d.Start(), check.IsNil) + + oldBasesizeBytes := s.d.getBaseDeviceSize(c) + + var newBasesizeBytes int64 = 53687091200 //50GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes)))) + } + + err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) + + basesizeAfterRestart := s.d.getBaseDeviceSize(c) + newBasesize, err := convertBasesize(newBasesizeBytes) + c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) + c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) + c.Assert(s.d.Stop(), check.IsNil) +} + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will remove the ip from docker0 and then try starting the daemon + ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + // make sure the container is not running + runningOut, err := s.d.Cmd("inspect", "--format={{.State.Running}}", "top") + if err != nil { + c.Fatalf("Could not inspect on container: %s, %v", out, err) + } + if strings.TrimSpace(runningOut) != "true" { + c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) + } +} + +// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge +// has the fe80::1 address and that a container is assigned a link-local address +func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *check.C) { + testRequires(c, IPv6) + + setupV6(c) + defer teardownV6(c) + + if err := s.d.StartWithBusybox("--ipv6"); err != nil { + c.Fatal(err) + } + + iface, err := net.InterfaceByName("docker0") + if err != nil { + c.Fatalf("Error getting docker0 interface: %v", err) + } + + addrs, err := iface.Addrs() + if err != nil { + c.Fatalf("Error getting addresses for docker0 interface: %v", err) + } + + var found bool + expected := "fe80::1/64" + + for i := range addrs { + if addrs[i].String() == expected { + found = true + break + } + } + + if !found { + c.Fatalf("Bridge does not have an IPv6 Address") + } + + if out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + out, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.LinkLocalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a link-local IPv6 address") + } + + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip != nil { + c.Fatalf("Container should not have a global IPv6 address: %v", out) + } +} + +// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR +// that running containers are given a link-local and global IPv6 address +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + setupV6(c) + defer teardownV6(c) + + err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100") + c.Assert(err, checker.IsNil, check.Commentf("Could not start daemon with busybox: %v", err)) + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + out = strings.Trim(out, " \r\n'") + + c.Assert(err, checker.IsNil, check.Commentf(out)) + + ip := net.ParseIP(out) + c.Assert(ip, checker.NotNil, check.Commentf("Container should have a global IPv6 address")) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.IPv6Gateway}}", "ipv6test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:2::100", check.Commentf("Container should have a global IPv6 gateway")) +} + +// TestDaemonIPv6FixedCIDRAndMac checks that when the daemon is started with ipv6 fixed CIDR +// the running containers are given an IPv6 address derived from the MAC address and the ipv6 fixed CIDR +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + setupV6(c) + defer teardownV6(c) + + err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:1::/64") + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + c.Assert(err, checker.IsNil) + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { + c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { + if err := s.d.Start("--log-level=debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { + // we creating new daemons to create new logFile + if err := s.d.Start("--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { + if err := s.d.Start("-D"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { + if err := s.d.Start("--debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { + if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { + listeningPorts := [][]string{ + {"0.0.0.0", "0.0.0.0", "5678"}, + {"127.0.0.1", "127.0.0.1", "1234"}, + {"localhost", "127.0.0.1", "1235"}, + } + + cmdArgs := make([]string, 0, len(listeningPorts)*2) + for _, hostDirective := range listeningPorts { + cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) + } + + if err := s.d.StartWithBusybox(cmdArgs...); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + for _, hostDirective := range listeningPorts { + output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") + if err == nil { + c.Fatalf("Container should not start, expected port already allocated error: %q", output) + } else if !strings.Contains(output, "port is already allocated") { + c.Fatalf("Expected port is already allocated error: %q", output) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + kid := k.KeyID() + // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) + if len(kid) != 59 { + c.Fatalf("Bad key ID: %s", kid) + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + k1, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + c.Fatalf("Error generating private key: %s", err) + } + if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { + c.Fatalf("Error creating .docker directory: %s", err) + } + if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { + c.Fatalf("Error saving private key: %s", err) + } + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + if k1.KeyID() != k2.KeyID() { + c.Fatalf("Key not migrated") + } +} + +// GH#11320 - verify that the daemon exits on failure properly +// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means +// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required +func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { + //attempt to start daemon with incorrect flags (we know -b and --bip conflict) + if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + //verify we got the right error + if !strings.Contains(err.Error(), "Daemon exited") { + c.Fatalf("Expected daemon not to start, got %v", err) + } + // look in the log and make sure we got the message that daemon is shutting down + runCmd := exec.Command("grep", "Error starting daemon", s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) + } + } else { + //if we didn't get an error and the daemon is running, this is a failure + c.Fatal("Conflicting options should cause the daemon to error out with a failure") + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { + d := s.d + err := d.Start("--bridge", "nosuchbridge") + c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) + defer d.Restart() + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bridge", bridgeName) + c.Assert(err, check.IsNil) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("ExtContainer") + ip := net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *check.C) { + // start with bridge none + d := s.d + err := d.StartWithBusybox("--bridge", "none") + c.Assert(err, check.IsNil) + defer d.Restart() + + // verify docker0 iface is not there + out, _, err := runCommandWithOutput(exec.Command("ifconfig", "docker0")) + c.Assert(err, check.NotNil, check.Commentf("docker0 should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "Device not found"), check.Equals, true) + + // verify default "bridge" network is not there + out, err = d.Cmd("network", "inspect", "bridge") + c.Assert(err, check.NotNil, check.Commentf("\"bridge\" network should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "No such network"), check.Equals, true) +} + +func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { + args := []string{"link", "add", "name", ifName, "type", ifType} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + + ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") + out, _, err = runCommandWithOutput(ifCfgCmd) + return out, err +} + +func deleteInterface(c *check.C, ifName string) { + ifCmd := exec.Command("ip", "link", "delete", ifName) + out, _, err := runCommandWithOutput(ifCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd := exec.Command("iptables", "-t", "nat", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd = exec.Command("iptables", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { + // TestDaemonBridgeIP Steps + // 1. Delete the existing docker0 Bridge + // 2. Set --bip daemon configuration and start the new Docker Daemon + // 3. Check if the bip config has taken effect using ifconfig and iptables commands + // 4. Launch a Container and make sure the IP-Address is in the expected subnet + // 5. Delete the docker0 Bridge + // 6. Restart the Docker Daemon (via deferred action) + // This Restart takes care of bringing docker0 interface back to auto-assigned IP + + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1/24" + ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + ifconfigSearchString := ip.String() + ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) + out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, + check.Commentf("ifconfig output should have contained %q, but was %q", + ifconfigSearchString, out)) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("test") + ip = net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + defer s.d.Restart() + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will change the docker0's IP and then try starting the daemon + bridgeIP := "192.169.100.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start("--bip", bridgeIP); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + //check if the iptables contains new bridgeIP MASQUERADE rule + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + for i := 0; i < 4; i++ { + cName := "Container" + strconv.Itoa(i) + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + if err != nil { + c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, + check.Commentf("Could not run a Container : %s %s", err.Error(), out)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "10.2.2.1/16" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err = d.Cmd("run", "-d", "--name", "bb", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + defer d.Cmd("stop", "bb") + + out, err = d.Cmd("exec", "bb", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(out, checker.Equals, "10.2.2.0\n") + + out, err = d.Cmd("run", "--rm", "busybox", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Equals, "10.2.2.2\n") +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "172.27.42.1/16" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bridge", bridgeName, "--fixed-cidr", bridgeIP) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err = d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + cid1 := strings.TrimSpace(out) + defer d.Cmd("stop", cid1) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIPNet) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", + bridgeIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + gatewayIP := "192.169.1.254" + + err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Explicit default gateway should be %s, but default route was '%s'", + gatewayIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + // Program a custom default gateway outside of the container subnet, daemon should accept it and start + err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") + c.Assert(err, check.IsNil) + + deleteInterface(c, defaultNetworkBridge) + s.d.Restart() +} + +func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + // Start daemon without docker0 bridge + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + discoveryBackend := "consul://consuladdr:consulport/some/path" + err := s.d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + c.Assert(err, checker.IsNil) + + // Start daemon with docker0 bridge + result := icmd.RunCommand("ifconfig", defaultNetworkBridge) + c.Assert(result, icmd.Matches, icmd.Success) + + err = s.d.Restart(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + c.Assert(err, checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { + d := s.d + + ipStr := "192.170.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + args := []string{"--ip", ip.String()} + err := d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.NotNil, + check.Commentf("Running a container must fail with an invalid --ip option")) + c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) + + ifName := "dummy" + out, err = createInterface(c, "dummy", ifName, ipStr) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, ifName) + + _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.IsNil) + + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) +} + +func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { + testRequires(c, bridgeNfIptables) + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + // Pinging another container must fail with --icc=false + pingContainers(c, d, true) + + ipStr := "192.171.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + ifName := "icc-dummy" + + createInterface(c, "dummy", ifName, ipStr) + + // But, Pinging external or a Host interface must succeed + pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) + runArgs := []string{"run", "--rm", "busybox", "sh", "-c", pingCmd} + _, err = d.Cmd(runArgs...) + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") + c.Assert(err, check.IsNil) + defer s.d.Restart() + + _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") + c.Assert(err, check.IsNil) + + childIP := s.d.findContainerIP("child") + parentIP := s.d.findContainerIP("parent") + + sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules not found") + } + + s.d.Cmd("rm", "--link", "parent/http") + if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules should be removed when unlink") + } + + s.d.Cmd("kill", "child") + s.d.Cmd("kill", "parent") +} + +func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + + if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") + if err != nil { + c.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile := strings.TrimSpace(outArr[0]) + nproc := strings.TrimSpace(outArr[1]) + + if nofile != "42" { + c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } + + // Now restart daemon with a new default + if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { + c.Fatal(err) + } + + out, err = s.d.Cmd("start", "-a", "test") + if err != nil { + c.Fatal(err) + } + + outArr = strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile = strings.TrimSpace(outArr[0]) + nproc = strings.TrimSpace(outArr[1]) + + if nofile != "43" { + c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } +} + +// #11315 +func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("start", "test2"); err != nil { + c.Fatal(err, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, check.IsNil, check.Commentf(out)) + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { + c.Assert(s.d.StartWithBusybox("--log-driver=none"), checker.IsNil) + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("logs", "test") + c.Assert(err, check.NotNil, check.Commentf("Logs should fail with 'none' driver")) + expected := `"logs" command is supported only for "json-file" and "journald" logging drivers (got: none)` + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { + dir, err := ioutil.TempDir("", "socket-cleanup-test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + sockPath := filepath.Join(dir, "docker.sock") + if err := s.d.Start("--host", "unix://"+sockPath); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err != nil { + c.Fatal("socket does not exist") + } + + if err := s.d.Stop(); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { + c.Fatal("unix socket is not cleaned up") + } +} + +func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { + type Config struct { + Crv string `json:"crv"` + D string `json:"d"` + Kid string `json:"kid"` + Kty string `json:"kty"` + X string `json:"x"` + Y string `json:"y"` + } + + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Failed to start daemon: %v", err) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + config := &Config{} + bytes, err := ioutil.ReadFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error reading key.json file: %s", err) + } + + // byte[] to Data-Struct + if err := json.Unmarshal(bytes, &config); err != nil { + c.Fatalf("Error Unmarshal: %s", err) + } + + //replace config.Kid with the fake value + config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" + + // NEW Data-Struct to byte[] + newBytes, err := json.Marshal(&config) + if err != nil { + c.Fatalf("Error Marshal: %s", err) + } + + // write back + if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { + c.Fatalf("Error ioutil.WriteFile: %s", err) + } + + defer os.Remove("/etc/docker/key.json") + + if err := s.d.Start(); err == nil { + c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) + } + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + + if !strings.Contains(string(content), "Public Key ID does not match") { + c.Fatalf("Missing KeyID message from daemon logs: %s", string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") + if err != nil { + c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) + } + containerID := strings.TrimSpace(out) + + if out, err := s.d.Cmd("kill", containerID); err != nil { + c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("wait", containerID); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on a stopped (killed) container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +// TestHTTPSInfo connects via two-way authenticated HTTPS to the info endpoint +func (s *DockerDaemonSuite) TestHTTPSInfo(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } +} + +// TestHTTPSRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints. +// https://github.com/docker/docker/issues/19280 +func (s *DockerDaemonSuite) TestHTTPSRun(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.StartWithBusybox("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "run", "busybox", "echo", "TLS response", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } + + if !strings.Contains(out, "TLS response") { + c.Fatalf("expected output to include `TLS response`, got %v", out) + } +} + +// TestTLSVerify verifies that --tlsverify=false turns on tls +func (s *DockerDaemonSuite) TestTLSVerify(c *check.C) { + out, err := exec.Command(dockerdBinary, "--tlsverify=false").CombinedOutput() + if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { + c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) + } +} + +// TestHTTPSInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func (s *DockerDaemonSuite) TestHTTPSInfoRogueCert(c *check.C) { + const ( + errBadCertificate = "bad certificate" + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errBadCertificate) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) + } +} + +// TestHTTPSInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { + const ( + errCaUnknown = "x509: certificate signed by unknown authority" + testDaemonRogueHTTPSAddr = "tcp://localhost:4272" + ) + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonRogueHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errCaUnknown) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) + } +} + +func pingContainers(c *check.C, d *Daemon, expectFailure bool) { + var dargs []string + if d != nil { + dargs = []string{"--host", d.sock()} + } + + args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, args...) + + args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") + pingCmd := "ping -c 1 %s -W 1" + args = append(args, fmt.Sprintf(pingCmd, "alias1")) + _, _, err := dockerCmdWithError(args...) + + if expectFailure { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + args = append(dargs, "rm", "-f", "container1") + dockerCmd(c, args...) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + socket := filepath.Join(s.d.folder, "docker.sock") + + out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(s.d.Restart(), check.IsNil) +} + +// os.Kill should kill daemon ungracefully, leaving behind container mounts. +// A subsequent daemon restart shoud clean up said mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // kill the container + runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Failed to run ctr, ExitCode: %d, err: %v output: %s id: %s\n", ec, err, out, id) + } + + // restart daemon. + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + // Send SIGINT and daemon should clean up + c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) + // Wait for the daemon to stop. + c.Assert(<-s.d.wait, checker.IsNil) + + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) + + out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) + + out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) + // the extra grep and awk clean up the output of `ip` to only list the number and name of + // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to + // be used while still verifying that the interface list is the exact same + cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err != nil { + c.Fatal("Failed to get host network interface") + } + out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), + check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil { + t.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + t.Fatal(err) + } + // Container 'test' should be removed without error + if out, err := s.d.Cmd("rm", "test"); err != nil { + t.Fatal(out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") + if err != nil { + c.Fatal(out, err) + } + + // Get sandbox key via inspect + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + fileName := strings.Trim(out, " \r\n'") + + if out, err := s.d.Cmd("stop", "netns"); err != nil { + c.Fatal(out, err) + } + + // Test if the file still exists + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) + out = strings.TrimSpace(out) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out)) + + // Remove the container and restart the daemon + if out, err := s.d.Cmd("rm", "netns"); err != nil { + c.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // Test again and see now the netns file does not exist + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) + out = strings.TrimSpace(out) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) +} + +// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored +func (s *DockerDaemonSuite) TestDaemonTLSVerifyIssue13964(c *check.C) { + host := "tcp://localhost:4271" + c.Assert(s.d.Start("-H", host), check.IsNil) + cmd := exec.Command(dockerBinary, "-H", host, "info") + cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) + c.Assert(strings.Contains(out, "error during connect"), check.Equals, true) + +} + +func setupV6(c *check.C) { + // Hack to get the right IPv6 address on docker0, which has already been created + result := icmd.RunCommand("ip", "addr", "add", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Expected{}) +} + +func teardownV6(c *check.C) { + result := icmd.RunCommand("ip", "addr", "del", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil) + id := strings.TrimSpace(out) + + _, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("wait", id) + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(out, check.Equals, "") + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) +} + +func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { + if err := s.d.StartWithBusybox("--log-opt=max-size=1k"); err != nil { + c.Fatal(err) + } + name := "logtest" + out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "max-size:1k") + c.Assert(out, checker.Contains, "max-file:5") + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Type }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "json-file") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + if out, err := s.d.Cmd("pause", "test"); err != nil { + c.Fatal(err, out) + } + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + errchan := make(chan error) + go func() { + out, err := s.d.Cmd("start", "test") + if err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + name := strings.TrimSpace(out) + if name != "test" { + errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on start a container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) + c.Assert(out, checker.Contains, "in use") +} + +func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { + c.Assert(s.d.Start(), check.IsNil) + + _, err := s.d.Cmd("volume", "create", "test") + c.Assert(err, check.IsNil) + c.Assert(s.d.Restart(), check.IsNil) + + _, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { + c.Assert(s.d.Start("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) + expected := "Failed to set log opts: syslog-address should be in form proto://address" + runCmd := exec.Command("grep", expected, s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { + c.Assert(s.d.Start("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) + expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " + runCmd := exec.Command("grep", expected, s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { + s.d.useDefaultHost = true + defer func() { + s.d.useDefaultHost = false + }() + c.Assert(s.d.Start(), check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { + s.d.useDefaultTLSHost = true + defer func() { + s.d.useDefaultTLSHost = false + }() + if err := s.d.Start( + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + // The client with --tlsverify should also use default host localhost:2376 + tmpHost := os.Getenv("DOCKER_HOST") + defer func() { + os.Setenv("DOCKER_HOST", tmpHost) + }() + + os.Setenv("DOCKER_HOST", "") + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } +} + +func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + bridgeIP := "192.169.1.1" + bridgeRange := bridgeIP + "/30" + + err := s.d.StartWithBusybox("--bip", bridgeRange) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + var cont int + for { + contName := fmt.Sprintf("container%d", cont) + _, err = s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") + if err != nil { + // pool exhausted + break + } + ip, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.IPAddress}}'", contName) + c.Assert(err, check.IsNil) + + c.Assert(ip, check.Not(check.Equals), bridgeIP) + cont++ + } +} + +// Test daemon for no space left on device error +func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, Network) + + testDir, err := ioutil.TempDir("", "no-space-left-on-device-test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + c.Assert(mount.MakeRShared(testDir), checker.IsNil) + defer mount.Unmount(testDir) + + // create a 2MiB image and mount it as graph root + // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) + dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=2 count=0") + out, _, err := runCommandWithOutput(exec.Command("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img"))) // `mkfs.ext4` is not in busybox + c.Assert(err, checker.IsNil, check.Commentf(out)) + + cmd := exec.Command("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) + loout, err := cmd.CombinedOutput() + c.Assert(err, checker.IsNil) + loopname := strings.TrimSpace(string(loout)) + defer exec.Command("losetup", "-d", loopname).Run() + + dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", fmt.Sprintf("mkdir -p /test/test-mount && mount -t ext4 -no loop,rw %v /test/test-mount", loopname)) + defer mount.Unmount(filepath.Join(testDir, "test-mount")) + + err = s.d.Start("--graph", filepath.Join(testDir, "test-mount")) + defer s.d.Stop() + c.Assert(err, check.IsNil) + + // pull a repository large enough to fill the mount point + pullOut, err := s.d.Cmd("pull", "registry:2") + c.Assert(err, checker.NotNil, check.Commentf(pullOut)) + c.Assert(pullOut, checker.Contains, "no space left on device") +} + +// Test daemon restart with container links + auto restart +func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + parent1Args := []string{} + parent2Args := []string{} + wg := sync.WaitGroup{} + maxChildren := 10 + chErr := make(chan error, maxChildren) + + for i := 0; i < maxChildren; i++ { + wg.Add(1) + name := fmt.Sprintf("test%d", i) + + if i < maxChildren/2 { + parent1Args = append(parent1Args, []string{"--link", name}...) + } else { + parent2Args = append(parent2Args, []string{"--link", name}...) + } + + go func() { + _, err = s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") + chErr <- err + wg.Done() + }() + } + + wg.Wait() + close(chErr) + for err := range chErr { + c.Assert(err, check.IsNil) + } + + parent1Args = append([]string{"run", "-d"}, parent1Args...) + parent1Args = append(parent1Args, []string{"--name=parent1", "--restart=always", "busybox", "top"}...) + parent2Args = append([]string{"run", "-d"}, parent2Args...) + parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) + + _, err = s.d.Cmd(parent1Args...) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd(parent2Args...) + c.Assert(err, check.IsNil) + + err = s.d.Stop() + c.Assert(err, check.IsNil) + // clear the log file -- we don't need any of it but may for the next part + // can ignore the error here, this is just a cleanup + os.Truncate(s.d.LogFileName(), 0) + err = s.d.Start() + c.Assert(err, check.IsNil) + + for _, num := range []string{"1", "2"} { + out, err := s.d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) + c.Assert(err, check.IsNil) + if strings.TrimSpace(out) != "true" { + log, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Fatalf("parent container is not running\n%s", string(log)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + err := s.d.StartWithBusybox("--cgroup-parent", cgroupParent) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") + c.Assert(err, checker.IsNil) + cgroupPaths := parseCgroupPaths(string(out)) + c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) + out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(string(out)) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + c.Assert(s.d.Restart(), check.IsNil) + + // should fail since test is not running yet + out, err = s.d.Cmd("start", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + + out, err = s.d.Cmd("start", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + test2ID := strings.TrimSpace(out) + + out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") + test3ID := strings.TrimSpace(out) + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) + // this one is no longer needed, removing simplifies the remainder of the test + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("ps", "-a", "--no-trunc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + lines := strings.Split(strings.TrimSpace(out), "\n")[1:] + + test2validated := false + test3validated := false + for _, line := range lines { + fields := strings.Fields(line) + names := fields[len(fields)-1] + switch fields[0] { + case test2ID: + c.Assert(names, check.Equals, "test2,test3/abc") + test2validated = true + case test3ID: + c.Assert(names, check.Equals, "test3") + test3validated = true + } + } + + c.Assert(test2validated, check.Equals, true) + c.Assert(test3validated, check.Equals, true) +} + +// TestDaemonRestartWithKilledRunningContainer requires live restore of running containers +func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop() + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + pid = strings.TrimSpace(pid) + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // kill the container + runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + t.Fatalf("Failed to run ctr, ExitCode: %d, err: '%v' output: '%s' cid: '%s'\n", ec, err, out, cid) + } + + // Give time to containerd to process the command if we don't + // the exit event might be received after we do the inspect + pidCmd := exec.Command("kill", "-0", pid) + _, ec, _ := runCommandWithOutput(pidCmd) + for ec == 0 { + time.Sleep(1 * time.Second) + _, ec, _ = runCommandWithOutput(pidCmd) + } + + // restart the daemon + if err := s.d.Start(); err != nil { + t.Fatal(err) + } + + // Check that we've got the correct exit code + out, err := s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "143" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "143", out, cid) + } + +} + +// os.Kill should kill daemon ungracefully, leaving behind live containers. +// The live containers should be known to the restarted daemon. Stopping +// them now, should remove the mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { + testRequires(c, DaemonIsLinux) + c.Assert(s.d.StartWithBusybox("--live-restore"), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // restart daemon. + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatal(err) + } + + // container should be running. + out, err = s.d.Cmd("inspect", "--format={{.State.Running}}", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + out = strings.TrimSpace(out) + if out != "true" { + c.Fatalf("Container %s expected to stay alive after daemon restart", id) + } + + // 'docker stop' should work. + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers. +func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox("--live-restore"); err != nil { + t.Fatal(err) + } + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop() + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + + // pause the container + if _, err := s.d.Cmd("pause", cid); err != nil { + t.Fatal(cid, err) + } + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // resume the container + result := icmd.RunCommand( + ctrBinary, + "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", + "containers", "resume", cid) + t.Assert(result, icmd.Matches, icmd.Success) + + // Give time to containerd to process the command if we don't + // the resume event might be received after we do the inspect + waitAndAssert(t, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCommand("kill", "-0", strings.TrimSpace(pid)) + return result.ExitCode, nil + }, checker.Equals, 0) + + // restart the daemon + if err := s.d.Start("--live-restore"); err != nil { + t.Fatal(err) + } + + // Check that we've got the correct status + out, err := s.d.Cmd("inspect", "-f", "{{.State.Status}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "running" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "running", out, cid) + } + if _, err := s.d.Cmd("kill", cid); err != nil { + t.Fatal(err) + } +} + +// TestRunLinksChanged checks that creating a new container with the same name does not update links +// this ensures that the old, pre gh#16032 functionality continues on +func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received") + + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") + + err = s.d.Restart() + c.Assert(err, check.IsNil) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + infoLog := "\x1b[34mINFO\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + // Enable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=false") + s.d.Stop() + c.Assert(b.String(), checker.Contains, infoLog) + + b.Reset() + + // Disable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=true") + s.d.Stop() + c.Assert(b.String(), check.Not(checker.Contains), infoLog) +} + +func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + debugLog := "\x1b[37mDEBU\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + s.d.StartWithLogFile(tty, "--debug") + s.d.Stop() + c.Assert(b.String(), checker.Contains, debugLog) +} + +func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + daemonConfig := `{ "debug" : false }` + configFile, err := ioutil.TempFile("", "test-daemon-discovery-backend-config-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + configFilePath := configFile.Name() + defer func() { + configFile.Close() + os.RemoveAll(configFile.Name()) + }() + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + // --log-level needs to be set so that d.Start() doesn't add --debug causing + // a conflict with the config + err = s.d.Start("--config-file", configFilePath, "--log-level=info") + c.Assert(err, checker.IsNil) + + // daemon config file + daemonConfig = `{ + "cluster-store": "consul://consuladdr:consulport/some/path", + "cluster-advertise": "192.168.56.100:0", + "debug" : false + }` + + err = configFile.Truncate(0) + c.Assert(err, checker.IsNil) + _, err = configFile.Seek(0, os.SEEK_SET) + c.Assert(err, checker.IsNil) + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + err = s.d.reloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: consul://consuladdr:consulport/some/path")) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: 192.168.56.100:0")) +} + +// Test for #21956 +func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { + err := s.d.StartWithBusybox("--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("inspect", "--format='{{.HostConfig.LogConfig}}'", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "{json-file map[]}") +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { + c.Assert(s.d.Start("--max-concurrent-uploads=6", "--max-concurrent-downloads=8"), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-downloads" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 7, "max-concurrent-downloads" : 9 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-uploads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 1, "max-concurrent-downloads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "labels":["foo=bar"] }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { + err := s.d.StartWithBusybox("-b=none", "--iptables=false") + c.Assert(err, check.IsNil) + s.d.c.Logf("dockerBinary %s", dockerBinary) + out, code, err := s.d.buildImageWithOut("busyboxs", + `FROM busybox + RUN cat /etc/hosts`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns", "1.2.3.4") + c.Assert(err, checker.IsNil) + + expectedOutput := "nameserver 1.2.3.4" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSSearchInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns-search", "example.com") + c.Assert(err, checker.IsNil) + + expectedOutput := "search example.com" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSOptionsInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns-opt", "timeout:3") + c.Assert(err, checker.IsNil) + + expectedOutput := "options timeout:3" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { + conf, err := ioutil.TempFile("", "config-file-") + c.Assert(err, check.IsNil) + configName := conf.Name() + conf.Close() + defer os.Remove(configName) + + config := ` +{ + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + err = s.d.StartWithBusybox("--config-file", configName) + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Reset config to only have the default + config = ` +{ + "runtimes": { + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + config = ` +{ + "runtimes": { + "runc": { + "path": "my-runc" + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) + + // Check that we can select a default runtime + config = ` +{ + "default-runtime": "vm", + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { + err := s.d.StartWithBusybox("--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Start a daemon without any extra runtimes + s.d.Stop() + err = s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + s.d.Stop() + err = s.d.Start("--add-runtime", "runc=my-runc") + c.Assert(err, check.NotNil) + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) + + // Check that we can select a default runtime + s.d.Stop() + err = s.d.StartWithBusybox("--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + // top1 will exist after daemon restarts + out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top1: %v", out)) + // top2 will be removed after daemon restarts + out, err = s.d.Cmd("run", "-d", "--rm", "--name", "top2", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top2: %v", out)) + + out, err = s.d.Cmd("ps") + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should be running")) + c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running")) + + // now restart daemon gracefully + err = s.d.Restart() + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("ps", "-a") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should exist after daemon restarts")) + c.Assert(out, checker.Not(checker.Contains), "top2", check.Commentf("top2 should be removed after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + containerName := "error-values" + // Make a container with both a non 0 exit code and an error message + out, err := s.d.Cmd("run", "--name", containerName, "busybox", "toto") + c.Assert(err, checker.NotNil) + + // Check that those values were saved on disk + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + + // now restart daemon + err = s.d.Restart() + c.Assert(err, checker.IsNil) + + // Check that those values are still around + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { + testRequires(c, SameHostDaemon) + d := s.d + err := d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + // hack to be able to side-load a container config + out, err := d.Cmd("create", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(d.Stop(), checker.IsNil) + <-d.wait + + imageID := strings.TrimSpace(out) + volumeID := stringid.GenerateNonCryptoID() + vfsPath := filepath.Join(d.root, "vfs", "dir", volumeID) + c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) + + config := []byte(` + { + "ID": "` + id + `", + "Name": "hello", + "Driver": "` + d.storageDriver + `", + "Image": "` + imageID + `", + "Config": {"Image": "busybox:latest"}, + "NetworkSettings": {}, + "Volumes": { + "/bar":"/foo", + "/foo": "` + vfsPath + `", + "/quux":"/quux" + }, + "VolumesRW": { + "/bar": true, + "/foo": true, + "/quux": false + } + } + `) + + configPath := filepath.Join(d.root, "containers", id, "config.v2.json") + err = ioutil.WriteFile(configPath, config, 600) + err = d.Start() + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + type mount struct { + Name string + Source string + Destination string + Driver string + RW bool + } + + ls := []mount{} + err = json.NewDecoder(strings.NewReader(out)).Decode(&ls) + c.Assert(err, checker.IsNil) + + expected := []mount{ + {Source: "/foo", Destination: "/bar", RW: true}, + {Name: volumeID, Destination: "/foo", RW: true}, + {Source: "/quux", Destination: "/quux", RW: false}, + } + c.Assert(ls, checker.HasLen, len(expected)) + + for _, m := range ls { + var matched bool + for _, x := range expected { + if m.Source == x.Source && m.Destination == x.Destination && m.RW == x.RW || m.Name != x.Name { + matched = true + break + } + } + c.Assert(matched, checker.True, check.Commentf("did find match for %+v", m)) + } +} + +func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + dockerProxyPath, err := exec.LookPath("docker-proxy") + c.Assert(err, checker.IsNil) + tmpDir, err := ioutil.TempDir("", "test-docker-proxy") + c.Assert(err, checker.IsNil) + + newProxyPath := filepath.Join(tmpDir, "docker-proxy") + cmd := exec.Command("cp", dockerProxyPath, newProxyPath) + c.Assert(cmd.Run(), checker.IsNil) + + // custom one + c.Assert(s.d.StartWithBusybox("--userland-proxy-path", newProxyPath), checker.IsNil) + out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // try with the original one + c.Assert(s.d.Restart("--userland-proxy-path", dockerProxyPath), checker.IsNil) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // not exist + c.Assert(s.d.Restart("--userland-proxy-path", "/does/not/exist"), checker.IsNil) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "driver failed programming external connectivity on endpoint") + c.Assert(out, checker.Contains, "/does/not/exist: no such file or directory") +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { + testRequires(c, SameHostDaemon) + + c.Assert(s.d.StartWithBusybox("--shutdown-timeout=3"), check.IsNil) + + _, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGINT) + + select { + case <-s.d.wait: + case <-time.After(5 * time.Second): + } + + expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "shutdown-timeout" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "shutdown-timeout" : 5 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + select { + case <-s.d.wait: + case <-time.After(3 * time.Second): + } + + expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for 29342 +func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d.StartWithBusybox("--live-restore") + + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.waitRun("top") + + out1, err := s.d.Cmd("exec", "-u", "test", "top", "id") + // uid=100(test) gid=101(test) groups=101(test) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1)) + + // restart daemon. + s.d.Restart("--live-restore") + + out2, err := s.d.Cmd("exec", "-u", "test", "top", "id") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2)) + c.Assert(out1, check.Equals, out2, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, overlayFSSupported, SameHostDaemon) + s.d.StartWithBusybox("--live-restore", "--storage-driver", "overlay") + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.waitRun("top") + + // restart daemon. + s.d.Restart("--live-restore", "--storage-driver", "overlay") + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // test if the rootfs mountpoint still exist + mountpoint, err := s.d.inspectFilter("top", ".GraphDriver.Data.MergedDir") + c.Assert(err, check.IsNil) + f, err := os.Open("/proc/self/mountinfo") + c.Assert(err, check.IsNil) + defer f.Close() + sc := bufio.NewScanner(f) + for sc.Scan() { + line := sc.Text() + if strings.Contains(line, mountpoint) { + c.Fatalf("mountinfo should not include the mountpoint of stop container") + } + } + + out, err = s.d.Cmd("rm", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go new file mode 100644 index 0000000..08cf6e1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,98 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure that an added file shows up in docker diff +func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { + containerCmd := `mkdir /foo; echo xyzzy > /foo/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + // Wait for it to exit as cannot diff a running container on Windows, and + // it will take a few seconds to exit. Also there's no way in Windows to + // differentiate between an Add or a Modify, and all files are under + // a "Files/" prefix. + containerID := strings.TrimSpace(out) + lookingFor := "A /foo/bar" + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + lookingFor = "C Files/foo/bar" + } + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains(line, lookingFor) { + found = true + break + } + } + c.Assert(found, checker.True) +} + +// test to ensure GH #3840 doesn't occur any more +func (s *DockerSuite) TestDiffEnsureInitLayerFilesAreIgnored(c *check.C) { + testRequires(c, DaemonIsLinux) + // this is a list of files which shouldn't show up in `docker diff` + initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} + containerCount := 5 + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < containerCount; i++ { + containerCmd := `echo foo > /root/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + for _, filename := range initLayerFiles { + c.Assert(out, checker.Not(checker.Contains), filename) + } + } +} + +func (s *DockerSuite) TestDiffEnsureDefaultDevs(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/mqueue": true, + "A /dev/kmsg": true, + "A /dev/fd": true, + "A /dev/ptmx": true, + "A /dev/null": true, + "A /dev/random": true, + "A /dev/stdout": true, + "A /dev/stderr": true, + "A /dev/tty1": true, + "A /dev/stdin": true, + "A /dev/tty": true, + "A /dev/urandom": true, + "A /dev/zero": true, + } + + for _, line := range strings.Split(out, "\n") { + c.Assert(line == "" || expected[line], checker.True, check.Commentf(line)) + } +} + +// https://github.com/docker/docker/pull/14381#discussion_r33859347 +func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { + out, _, err := dockerCmdWithError("diff", "") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "Container name cannot be empty") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go new file mode 100644 index 0000000..1fbfc74 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go @@ -0,0 +1,794 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strings" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { + name := "events-time-format-test" + + // Start stopwatch, generate an event + start := daemonTime(c) + time.Sleep(1100 * time.Millisecond) // so that first event occur in different second from since (just for the case) + dockerCmd(c, "run", "--rm", "--name", name, "busybox", "true") + time.Sleep(1100 * time.Millisecond) // so that until > since + end := daemonTime(c) + + // List of available time formats to --since + unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } + rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } + duration := func(t time.Time) string { return time.Now().Sub(t).String() } + + // --since=$start must contain only the 'untag' event + for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { + since, until := f(start), f(end) + out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, name, "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) + } +} + +func (s *DockerSuite) TestEventsUntag(c *check.C) { + image := "busybox" + dockerCmd(c, "tag", image, "utest:tag1") + dockerCmd(c, "tag", image, "utest:tag2") + dockerCmd(c, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag2") + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "events", "--since=1"}, + Timeout: time.Millisecond * 2500, + }) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + events := strings.Split(result.Stdout(), "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + c.Assert(v, checker.Contains, "untag", check.Commentf("event should be untag")) + } +} + +func (s *DockerSuite) TestEventsLimit(c *check.C) { + // Limit to 8 goroutines creating containers in order to prevent timeouts + // creating so many containers simultaneously on Windows + sem := make(chan bool, 8) + numContainers := 17 + errChan := make(chan error, numContainers) + + args := []string{"run", "--rm", "busybox", "true"} + for i := 0; i < numContainers; i++ { + sem <- true + go func() { + defer func() { <-sem }() + out, err := exec.Command(dockerBinary, args...).CombinedOutput() + if err != nil { + err = fmt.Errorf("%v: %s", err, string(out)) + } + errChan <- err + }() + } + + // Wait for all goroutines to finish + for i := 0; i < cap(sem); i++ { + sem <- true + } + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) + } + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents)) +} + +func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "container-events-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--filter", "container=container-events-test", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 3) //Missing expected event + matchedEvents := 0 + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if matches["eventType"] == "container" && matches["action"] == "create" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } else if matches["eventType"] == "container" && matches["action"] == "start" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } + } + c.Assert(matchedEvents, checker.Equals, 2, check.Commentf("missing events for container container-events-test:\n%s", out)) +} + +func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "since-epoch-test", "busybox", "true") + timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) + timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) + out, _ := dockerCmd(c, "events", "--since", timeBeginning, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsImageTag(c *check.C) { + time.Sleep(1 * time.Second) // because API has seconds granularity + since := daemonUnixTime(c) + image := "testimageevents:tag" + dockerCmd(c, "tag", "busybox", image) + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1, check.Commentf("was expecting 1 event. out=%s", out)) + event := strings.TrimSpace(events[0]) + + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, image), checker.True, check.Commentf("matches: %v\nout:\n%s", matches, out)) + c.Assert(matches["action"], checker.Equals, "tag") +} + +func (s *DockerSuite) TestEventsImagePull(c *check.C) { + // TODO Windows: Enable this test once pull and reliable image names are available + testRequires(c, DaemonIsLinux) + since := daemonUnixTime(c) + testRequires(c, Network) + + dockerCmd(c, "pull", "hello-world") + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + matches := eventstestutils.ScanMap(event) + c.Assert(matches["id"], checker.Equals, "hello-world:latest") + c.Assert(matches["action"], checker.Equals, "pull") + +} + +func (s *DockerSuite) TestEventsImageImport(c *check.C) { + // TODO Windows CI. This should be portable once export/import are + // more reliable (@swernli) + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + since := daemonUnixTime(c) + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil, check.Commentf("import failed with output: %q", out)) + imageRef := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=import") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageRef, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "import", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsImageLoad(c *check.C) { + testRequires(c, DaemonIsLinux) + myImageName := "footest:v1" + dockerCmd(c, "tag", "busybox", myImageName) + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + longImageID := strings.TrimSpace(out) + c.Assert(longImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty")) + + dockerCmd(c, "save", "-o", "saveimg.tar", myImageName) + dockerCmd(c, "rmi", myImageName) + out, _ = dockerCmd(c, "images", "-q", myImageName) + noImageID := strings.TrimSpace(out) + c.Assert(noImageID, checker.Equals, "", check.Commentf("Should not have any image")) + dockerCmd(c, "load", "-i", "saveimg.tar") + + result := icmd.RunCommand("rm", "-rf", "saveimg.tar") + c.Assert(result, icmd.Matches, icmd.Success) + + out, _ = dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + imageID := strings.TrimSpace(out) + c.Assert(imageID, checker.Equals, longImageID, check.Commentf("Should have same image id as before")) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=load") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "load", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=save") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches = eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "save", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsPluginOps(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + since := daemonUnixTime(c) + + dockerCmd(c, "plugin", "install", pNameWithTag, "--grant-all-permissions") + dockerCmd(c, "plugin", "disable", pNameWithTag) + dockerCmd(c, "plugin", "remove", pNameWithTag) + + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 4) + + pluginEvents := eventActionsByIDAndType(c, events, pNameWithTag, "plugin") + c.Assert(pluginEvents, checker.HasLen, 4, check.Commentf("events: %v", events)) + + c.Assert(pluginEvents[0], checker.Equals, "pull", check.Commentf(out)) + c.Assert(pluginEvents[1], checker.Equals, "enable", check.Commentf(out)) + c.Assert(pluginEvents[2], checker.Equals, "disable", check.Commentf(out)) + c.Assert(pluginEvents[3], checker.Equals, "remove", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilters(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die") + parseEvents(c, out, "die") + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die", "--filter", "event=start") + parseEvents(c, out, "die|start") + + // make sure we at least got 2 start events + count := strings.Count(out, "start") + c.Assert(strings.Count(out, "start"), checker.GreaterOrEqualThan, 2, check.Commentf("should have had 2 start events but had %d, out: %s", count, out)) + +} + +func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + name := "busybox" + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("image=%s", name)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + c.Assert(events, checker.Not(checker.HasLen), 0) //Expected events but found none for the image busybox:latest + count1 := 0 + count2 := 0 + + for _, e := range events { + if strings.Contains(e, container1) { + count1++ + } else if strings.Contains(e, container2) { + count2++ + } + } + c.Assert(count1, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count1, container1)) + c.Assert(count2, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count2, container2)) + +} + +func (s *DockerSuite) TestEventsFilterLabels(c *check.C) { + since := daemonUnixTime(c) + label := "io.docker.testing=foo" + + out, _ := dockerCmd(c, "run", "-d", "-l", label, "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 3) + + for _, e := range events { + c.Assert(e, checker.Contains, container1) + c.Assert(e, checker.Not(checker.Contains), container2) + } +} + +func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + _, err := buildImage(name, fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label), true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } +} + +func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { + since := daemonUnixTime(c) + nameID := make(map[string]string) + + for _, name := range []string{"container_1", "container_2"} { + dockerCmd(c, "run", "--name", name, "busybox", "true") + id := inspectField(c, name, "Id") + nameID[name] = id + } + + until := daemonUnixTime(c) + + checkEvents := func(id string, events []string) error { + if len(events) != 4 { // create, attach, start, die + return fmt.Errorf("expected 4 events, got %v", events) + } + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if !matchEventID(matches, id) { + return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, matches["id"]) + } + } + return nil + } + + for name, ID := range nameID { + // filter by names + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + + // filter by ID's + out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) + events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + } +} + +func (s *DockerSuite) TestEventsCommit(c *check.C) { + // Problematic on Windows as cannot commit a running container + testRequires(c, DaemonIsLinux) + + out, _ := runSleepingContainer(c) + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", "-m", "test", cID) + dockerCmd(c, "stop", cID) + c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) +} + +func (s *DockerSuite) TestEventsCopy(c *check.C) { + // Build a test image. + id, err := buildImage("cpimg", ` + FROM busybox + RUN echo HI > /file`, true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + // Create an empty test file. + tempFile, err := ioutil.TempFile("", "test-events-copy-") + c.Assert(err, checker.IsNil) + defer os.Remove(tempFile.Name()) + + c.Assert(tempFile.Close(), checker.IsNil) + + dockerCmd(c, "create", "--name=cptest", id) + + dockerCmd(c, "cp", "cptest:/file", tempFile.Name()) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "archive-path", check.Commentf("Missing 'archive-path' log event\n")) + + dockerCmd(c, "cp", tempFile.Name(), "cptest:/filecopy") + + until = daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "extract-to-dir", check.Commentf("Missing 'extract-to-dir' log event")) +} + +func (s *DockerSuite) TestEventsResize(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + endpoint := "/containers/" + cID + "/resize?h=80&w=24" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "resize", check.Commentf("Missing 'resize' log event")) +} + +func (s *DockerSuite) TestEventsAttach(c *check.C) { + // TODO Windows CI: Figure out why this test fails intermittently (TP5). + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + cmd := exec.Command(dockerBinary, "attach", cID) + stdin, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + // Make sure we're done attaching by writing/reading some stuff + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello'")) + + c.Assert(stdin.Close(), checker.IsNil) + + dockerCmd(c, "kill", cID) + c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) +} + +func (s *DockerSuite) TestEventsRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "oldName", "busybox", "true") + cID := strings.TrimSpace(out) + dockerCmd(c, "rename", "oldName", "newName") + + until := daemonUnixTime(c) + // filter by the container id because the name in the event will be the new name. + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until", until) + c.Assert(out, checker.Contains, "rename", check.Commentf("Missing 'rename' log event\n")) +} + +func (s *DockerSuite) TestEventsTop(c *check.C) { + // Problematic on Windows as Windows does not support top + testRequires(c, DaemonIsLinux) + + out, _ := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "top", cID) + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, " top", check.Commentf("Missing 'top' log event")) +} + +// #14316 +func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { + // Problematic to port for Windows CI during TP5 timeframe until + // supporting push + testRequires(c, DaemonIsLinux) + testRequires(c, Network) + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", cID, repoName) + dockerCmd(c, "stop", cID) + dockerCmd(c, "push", repoName) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "image="+repoName, "-f", "event=push", "--until", until) + c.Assert(out, checker.Contains, repoName, check.Commentf("Missing 'push' log event for %s", repoName)) +} + +func (s *DockerSuite) TestEventsFilterType(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + _, err := buildImage(name, fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label), true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=container") + events = strings.Split(strings.TrimSpace(out), "\n") + + // Events generated by the container that builds the image + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", "type=network") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 1, check.Commentf("Events == %s", events)) +} + +// #25798 +func (s *DockerSuite) TestEventsSpecialFiltersWithExecCreate(c *check.C) { + since := daemonUnixTime(c) + runSleepingContainer(c, "--name", "test-container", "-d") + waitRun("test-container") + + dockerCmd(c, "exec", "test-container", "echo", "hello-world") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event='exec_create: echo hello-world'", + ) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event=exec_create", + ) + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilterImageInContainerAction(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerRestart(c *check.C) { + dockerCmd(c, "run", "-d", "--name=testEvent", "--restart=on-failure:3", "busybox", "false") + + // wait until test2 is auto removed. + waitTime := 10 * time.Second + if daemonPlatform == "windows" { + // Windows takes longer... + waitTime = 90 * time.Second + } + + err := waitInspect("testEvent", "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTime) + c.Assert(err, checker.IsNil) + + var ( + createCount int + startCount int + dieCount int + ) + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container=testEvent") + events := strings.Split(strings.TrimSpace(out), "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event + actions := eventActionsByIDAndType(c, events, "testEvent", "container") + + for _, a := range actions { + switch a { + case "create": + createCount++ + case "start": + startCount++ + case "die": + dieCount++ + } + } + c.Assert(createCount, checker.Equals, 1, check.Commentf("testEvent should be created 1 times: %v", actions)) + c.Assert(startCount, checker.Equals, 4, check.Commentf("testEvent should start 4 times: %v", actions)) + c.Assert(dieCount, checker.Equals, 4, check.Commentf("testEvent should die 4 times: %v", actions)) +} + +func (s *DockerSuite) TestEventsSinceInTheFuture(c *check.C) { + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + since := daemonTime(c) + until := since.Add(time.Duration(-24) * time.Hour) + out, _, err := dockerCmdWithError("events", "--filter", "image=busybox", "--since", parseEventTime(since), "--until", parseEventTime(until)) + + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "cannot be after `until`") +} + +func (s *DockerSuite) TestEventsUntilInThePast(c *check.C) { + since := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + until := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container2", "-d", "busybox", "true") + waitRun("test-container2") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", until) + + c.Assert(out, checker.Not(checker.Contains), "test-container2") + c.Assert(out, checker.Contains, "test-container") +} + +func (s *DockerSuite) TestEventsFormat(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--format", "{{json .}}") + dec := json.NewDecoder(strings.NewReader(out)) + // make sure we got 2 start events + startCount := 0 + for { + var err error + var ev eventtypes.Message + if err = dec.Decode(&ev); err == io.EOF { + break + } + c.Assert(err, checker.IsNil) + if ev.Status == "start" { + startCount++ + } + } + + c.Assert(startCount, checker.Equals, 2, check.Commentf("should have had 2 start events but had %d, out: %s", startCount, out)) +} + +func (s *DockerSuite) TestEventsFormatBadFunc(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{badFuncString .}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1: function \"badFuncString\" not defined", + }) +} + +func (s *DockerSuite) TestEventsFormatBadField(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{.badFieldString}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1:2: executing \"\" at <.badFieldString>: can't evaluate field badFieldString in type *events.Message", + }) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go new file mode 100644 index 0000000..dc91667 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go @@ -0,0 +1,486 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "syscall" + "time" + "unicode" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #5979 +func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "busybox", "true") + + file, err := ioutil.TempFile("", "") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file")) + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%s --until=%s > %s", dockerBinary, since, daemonUnixTime(c), file.Name()) + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), checker.IsNil, check.Commentf("run err for command %q", command)) + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, ch := range scanner.Text() { + c.Assert(unicode.IsControl(ch), checker.False, check.Commentf("found control character %v", []byte(string(ch)))) + } + } + c.Assert(scanner.Err(), checker.IsNil, check.Commentf("Scan err for command %q", command)) + +} + +func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport) + + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--name", "oomFalse", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } + + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomFalse", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + nEvents := len(events) + + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + c.Assert(parseEventAction(c, events[nEvents-5]), checker.Equals, "create") + c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "attach") + c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "start") + c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "oom") + c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "die") +} + +func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport) + + errChan := make(chan error) + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--oom-kill-disable=true", "--name", "oomTrue", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + c.Assert(waitRun("oomTrue"), checker.IsNil) + defer dockerCmd(c, "kill", "oomTrue") + containerID := inspectField(c, "oomTrue", "Id") + + testActions := map[string]chan bool{ + "oom": make(chan bool), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(20 * time.Second): + observer.CheckEventError(c, containerID, "oom", matcher) + case <-testActions["oom"]: + // ignore, done + case errRun := <-errChan: + if errRun != nil { + c.Fatalf("%v", errRun) + } else { + c.Fatalf("container should be still running but it's not") + } + } + + status := inspectField(c, "oomTrue", "State.Status") + c.Assert(strings.TrimSpace(status), checker.Equals, "running", check.Commentf("container should be still running")) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterByName(c *check.C) { + testRequires(c, DaemonIsLinux) + cOut, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c1 := strings.TrimSpace(cOut) + waitRun("foo") + cOut, _ = dockerCmd(c, "run", "--name=bar", "-d", "busybox", "top") + c2 := strings.TrimSpace(cOut) + waitRun("bar") + out, _ := dockerCmd(c, "events", "-f", "container=foo", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(out, checker.Contains, c1, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), c2, check.Commentf(out)) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterBeforeCreate(c *check.C) { + testRequires(c, DaemonIsLinux) + buf := &bytes.Buffer{} + cmd := exec.Command(dockerBinary, "events", "-f", "container=foo", "--since=0") + cmd.Stdout = buf + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Wait() + defer cmd.Process.Kill() + + // Sleep for a second to make sure we are testing the case where events are listened before container starts. + time.Sleep(time.Second) + id, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + cID := strings.TrimSpace(id) + for i := 0; ; i++ { + out := buf.String() + if strings.Contains(out, cID) { + break + } + if i > 30 { + c.Fatalf("Missing event of container (foo, %v), got %q", cID, out) + } + time.Sleep(500 * time.Millisecond) + } +} + +func (s *DockerSuite) TestVolumeEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/mount volume actions + dockerCmd(c, "volume", "create", "test-event-volume-local") + dockerCmd(c, "run", "--name", "test-volume-container", "--volume", "test-event-volume-local:/foo", "-d", "busybox", "true") + waitRun("test-volume-container") + + // Observe unmount/destroy volume actions + dockerCmd(c, "rm", "-f", "test-volume-container") + dockerCmd(c, "volume", "rm", "test-event-volume-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") + c.Assert(volumeEvents, checker.HasLen, 4) + c.Assert(volumeEvents[0], checker.Equals, "create") + c.Assert(volumeEvents[1], checker.Equals, "mount") + c.Assert(volumeEvents[2], checker.Equals, "unmount") + c.Assert(volumeEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestNetworkEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local", "-d", "busybox", "true") + waitRun("test-network-container") + + // Observe disconnect/destroy network actions + dockerCmd(c, "rm", "-f", "test-network-container") + dockerCmd(c, "network", "rm", "test-event-network-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + netEvents := eventActionsByIDAndType(c, events, "test-event-network-local", "network") + c.Assert(netEvents, checker.HasLen, 4) + c.Assert(netEvents[0], checker.Equals, "create") + c.Assert(netEvents[1], checker.Equals, "connect") + c.Assert(netEvents[2], checker.Equals, "disconnect") + c.Assert(netEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local-1") + dockerCmd(c, "network", "create", "test-event-network-local-2") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local-1", "-td", "busybox", "sh") + waitRun("test-network-container") + dockerCmd(c, "network", "connect", "test-event-network-local-2", "test-network-container") + + since := daemonUnixTime(c) + + dockerCmd(c, "stop", "-t", "1", "test-network-container") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "-f", "type=network") + netEvents := strings.Split(strings.TrimSpace(out), "\n") + + // received two network disconnect events + c.Assert(len(netEvents), checker.Equals, 2) + c.Assert(netEvents[0], checker.Contains, "disconnect") + c.Assert(netEvents[1], checker.Contains, "disconnect") + + //both networks appeared in the network event output + c.Assert(out, checker.Contains, "test-event-network-local-1") + c.Assert(out, checker.Contains, "test-event-network-local-2") +} + +func (s *DockerSuite) TestEventsStreaming(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") + containerID := strings.TrimSpace(out) + + testActions := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + "die": make(chan bool, 1), + "destroy": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "create", matcher) + case <-testActions["create"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } + + dockerCmd(c, "rm", containerID) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "destroy", matcher) + case <-testActions["destroy"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + name := "testimageevents" + imageID, err := buildImage(name, + `FROM scratch + MAINTAINER "docker"`, + true) + c.Assert(err, checker.IsNil) + c.Assert(deleteImages(name), checker.IsNil) + + testActions := map[string]chan bool{ + "untag": make(chan bool, 1), + "delete": make(chan bool, 1), + } + + matcher := matchEventLine(imageID, "image", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "untag", matcher) + case <-testActions["untag"]: + // ignore, done + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "delete", matcher) + case <-testActions["delete"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsFilterVolumeAndNetworkType(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-type") + dockerCmd(c, "volume", "create", "test-event-volume-type") + + out, _ := dockerCmd(c, "events", "--filter", "type=volume", "--filter", "type=network", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 2, check.Commentf(out)) + + networkActions := eventActionsByIDAndType(c, events, "test-event-network-type", "network") + volumeActions := eventActionsByIDAndType(c, events, "test-event-volume-type", "volume") + + c.Assert(volumeActions[0], checker.Equals, "create") + c.Assert(networkActions[0], checker.Equals, "create") +} + +func (s *DockerSuite) TestEventsFilterVolumeID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "volume", "create", "test-event-volume-id") + out, _ := dockerCmd(c, "events", "--filter", "volume=test-event-volume-id", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-volume-id") + c.Assert(events[0], checker.Contains, "driver=local") +} + +func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-local") + out, _ := dockerCmd(c, "events", "--filter", "network=test-event-network-local", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-network-local") + c.Assert(events[0], checker.Contains, "type=bridge") +} + +func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"], "shutdown-timeout": 10}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, runtimes=runc:{docker-runc []}, shutdown-timeout=10)", daemonID, daemonName)) +} + +func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go new file mode 100644 index 0000000..cac76d9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go @@ -0,0 +1,601 @@ +// +build !test_no_exec + +package main + +import ( + "bufio" + "fmt" + "net/http" + "os" + "os/exec" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExec(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "cat", "/tmp/file") + out = strings.Trim(out, "\r\n") + c.Assert(out, checker.Equals, "test") + +} + +func (s *DockerSuite) TestExecInteractive(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + c.Assert(err, checker.IsNil) + stdout, err := execCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + + err = execCmd.Start() + c.Assert(err, checker.IsNil) + _, err = stdin.Write([]byte("cat /tmp/file\n")) + c.Assert(err, checker.IsNil) + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + c.Assert(err, checker.IsNil) + line = strings.TrimSpace(line) + c.Assert(line, checker.Equals, "test") + err = stdin.Close() + c.Assert(err, checker.IsNil) + errChan := make(chan error) + go func() { + errChan <- execCmd.Wait() + close(errChan) + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("docker exec failed to exit on stdin close") + } + +} + +func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { + out, _ := runSleepingContainer(c) + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + dockerCmd(c, "restart", cleanedContainerID) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") + outStr := strings.TrimSpace(out) + c.Assert(outStr, checker.Equals, "hello") +} + +func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { + // TODO Windows CI: Requires a little work to get this ported. + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) + + err = s.d.Restart() + c.Assert(err, checker.IsNil, check.Commentf("Could not restart daemon")) + + out, err = s.d.Cmd("start", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) + + out, err = s.d.Cmd("exec", "top", "echo", "hello") + c.Assert(err, checker.IsNil, check.Commentf("Could not exec on container top: %s", out)) + + outStr := strings.TrimSpace(string(out)) + c.Assert(outStr, checker.Equals, "hello") +} + +// Regression test for #9155, #9044 +func (s *DockerSuite) TestExecEnv(c *check.C) { + // TODO Windows CI: This one is interesting and may just end up being a feature + // difference between Windows and Linux. On Windows, the environment is passed + // into the process that is launched, not into the machine environment. Hence + // a subsequent exec will not have LALA set/ + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "LALA=value1") + c.Assert(out, checker.Contains, "LALA=value2") + c.Assert(out, checker.Contains, "HOME=/root") +} + +func (s *DockerSuite) TestExecSetEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "HOME=/root", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "-e", "HOME=/another", "-e", "ABC=xyz", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "HOME=/root") + c.Assert(out, checker.Contains, "HOME=/another") + c.Assert(out, checker.Contains, "ABC=xyz") +} + +func (s *DockerSuite) TestExecExitStatus(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top") + + result := icmd.RunCommand(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 23, Error: "exit status 23"}) +} + +func (s *DockerSuite) TestExecPausedContainer(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + out, _ := runSleepingContainer(c, "-d", "--name", "testing") + ContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", "testing") + out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello") + c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new conmmand if it is paused")) + + expected := ContainerID + " is paused, unpause the container before exec" + c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) +} + +// regression test for #9476 +func (s *DockerSuite) TestExecTTYCloseStdin(c *check.C) { + // TODO Windows CI: This requires some work to port to Windows. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + + cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, _ = dockerCmd(c, "top", "exec_tty_stdin") + outArr := strings.Split(out, "\n") + c.Assert(len(outArr), checker.LessOrEqualThan, 3, check.Commentf("exec process left running")) + c.Assert(out, checker.Not(checker.Contains), "nsenter-exec") +} + +func (s *DockerSuite) TestExecTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("exec should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("exec is running but should have failed") + } +} + +func (s *DockerSuite) TestExecParseError(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + // Test normal (non-detached) case first + cmd := exec.Command(dockerBinary, "exec", "top") + _, stderr, _, err := runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stderr, checker.Contains, "See 'docker exec --help'") +} + +func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + err := exec.Command(dockerBinary, "exec", "testing", "top").Start() + c.Assert(err, checker.IsNil) + + type dstop struct { + out []byte + err error + } + + ch := make(chan dstop) + go func() { + out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() + ch <- dstop{out, err} + close(ch) + }() + select { + case <-time.After(3 * time.Second): + c.Fatal("Container stop timed out") + case s := <-ch: + c.Assert(s.err, check.IsNil) + } +} + +func (s *DockerSuite) TestExecCgroup(c *check.C) { + // Not applicable on Windows - using Linux specific functionality + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") + containerCgroups := sort.StringSlice(strings.Split(out, "\n")) + + var wg sync.WaitGroup + var mu sync.Mutex + execCgroups := []sort.StringSlice{} + errChan := make(chan error) + // exec a few times concurrently to get consistent failure + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup") + if err != nil { + errChan <- err + return + } + cg := sort.StringSlice(strings.Split(out, "\n")) + + mu.Lock() + execCgroups = append(execCgroups, cg) + mu.Unlock() + wg.Done() + }() + } + wg.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil) + } + + for _, cg := range execCgroups { + if !reflect.DeepEqual(cg, containerCgroups) { + fmt.Println("exec cgroups:") + for _, name := range cg { + fmt.Printf(" %s\n", name) + } + + fmt.Println("container cgroups:") + for _, name := range containerCgroups { + fmt.Printf(" %s\n", name) + } + c.Fatal("cgroups mismatched") + } + } +} + +func (s *DockerSuite) TestExecInspectID(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + id := strings.TrimSuffix(out, "\n") + + out = inspectField(c, id, "ExecIDs") + c.Assert(out, checker.Equals, "[]", check.Commentf("ExecIDs should be empty, got: %s", out)) + + // Start an exec, have it block waiting so we can do some checking + cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c", + "while ! test -e /execid1; do sleep 1; done") + + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("failed to start the exec cmd")) + + // Give the exec 10 chances/seconds to start then give up and stop the test + tries := 10 + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out != "[]" && out != "" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // Save execID for later + execID, err := inspectFilter(id, "index .ExecIDs 0") + c.Assert(err, checker.IsNil, check.Commentf("failed to get the exec id")) + + // End the exec by creating the missing file + err = exec.Command(dockerBinary, "exec", id, + "sh", "-c", "touch /execid1").Run() + + c.Assert(err, checker.IsNil, check.Commentf("failed to run the 2nd exec cmd")) + + // Wait for 1st exec to complete + cmd.Wait() + + // Give the exec 10 chances/seconds to stop then give up and stop the test + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out == "[]" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still not empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // But we should still be able to query the execID + sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil) + c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) + + // Now delete the container and then an 'inspect' on the exec should + // result in a 404 (not 'container not running') + out, ec := dockerCmd(c, "rm", "-f", id) + c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) + sc, body, err = sockRequest("GET", "/exec/"+execID+"/json", nil) + c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) +} + +func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { + // Problematic on Windows as Windows does not support links + testRequires(c, DaemonIsLinux) + var out string + out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + c.Assert(idA, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + c.Assert(idB, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") +} + +func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { + // Not applicable on Windows to Windows CI. + testRequires(c, SameHostDaemon, DaemonIsLinux) + for _, fn := range []string{"resolv.conf", "hosts"} { + deleteAllContainers() + + content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) + c.Assert(err, checker.IsNil) + + c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) + + out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + contID := strings.TrimSpace(out) + netFilePath := containerStorageFile(contID, fn) + + f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + c.Assert(err, checker.IsNil) + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + c.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + c.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + c.Fatal(err) + } + f.Close() + + res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) + c.Assert(res, checker.Equals, "success2\n") + } +} + +func (s *DockerSuite) TestExecWithUser(c *check.C) { + // TODO Windows CI: This may be fixable in the future once Windows + // supports users + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") + c.Assert(out, checker.Contains, "uid=1(daemon) gid=1(daemon)") + + out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") + c.Assert(out, checker.Contains, "uid=0(root) gid=0(root)", check.Commentf("exec with user by id expected daemon user got %s", out)) +} + +func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Start main loop which attempts mknod repeatedly + dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) + + // Check exec mknod doesn't work + cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16") + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) + c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) + + // Check exec mknod does work with --privileged + cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil) + + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", out)) + + // Check subsequent unprivileged exec cannot mknod + cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32") + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) + c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) + + // Confirm at no point was mknod allowed + logCmd := exec.Command(dockerBinary, "logs", "parent") + out, _, err = runCommandWithOutput(logCmd) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Success") + +} + +func (s *DockerSuite) TestExecWithImageUser(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio`, + true) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") + + out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") + c.Assert(out, checker.Contains, "dockerio", check.Commentf("exec with user by id expected dockerio user got %s", out)) +} + +func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { + // Windows does not support read-only + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") + dockerCmd(c, "exec", "parent", "true") +} + +func (s *DockerSuite) TestExecUlimits(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testexeculimits" + runSleepingContainer(c, "-d", "--ulimit", "nproc=21", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -p") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "21") +} + +// #15750 +func (s *DockerSuite) TestExecStartFails(c *check.C) { + // TODO Windows CI. This test should be portable. Figure out why it fails + // currently. + testRequires(c, DaemonIsLinux) + name := "exec-15750" + runSleepingContainer(c, "-d", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "no-such-cmd") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "executable file not found") +} + +// Fix regression in https://github.com/docker/docker/pull/26461#issuecomment-250287297 +func (s *DockerSuite) TestExecWindowsPathNotWiped(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", minimalBaseImage(), "powershell", "start-sleep", "60") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "powershell", "write-host", "$env:PATH") + out = strings.ToLower(strings.Trim(out, "\r\n")) + c.Assert(out, checker.Contains, `windowspowershell\v1.0`) +} + +func (s *DockerSuite) TestExecEnvLinksHost(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-d", "--name", "foo") + runSleepingContainer(c, "-d", "--link", "foo:db", "--hostname", "myhost", "--name", "bar") + out, _ := dockerCmd(c, "exec", "bar", "env") + c.Assert(out, checker.Contains, "HOSTNAME=myhost") + c.Assert(out, checker.Contains, "DB_NAME=/bar/db") +} + +func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { + testRequires(c, DaemonIsWindows) + runSleepingContainer(c, "-d", "--name", "test") + exec := make(chan bool) + go func() { + dockerCmd(c, "exec", "test", "cmd", "/c", "start sleep 10") + exec <- true + }() + + for { + top := make(chan string) + var out string + go func() { + out, _ := dockerCmd(c, "top", "test") + top <- out + }() + + select { + case <-time.After(time.Second * 5): + c.Error("timed out waiting for top while exec is exiting") + case out = <-top: + break + } + + if strings.Count(out, "busybox.exe") == 2 && !strings.Contains(out, "cmd.exe") { + // The initial exec process (cmd.exe) has exited, and both sleeps are currently running + break + } + time.Sleep(1 * time.Second) + } + + inspect := make(chan bool) + go func() { + dockerCmd(c, "inspect", "test") + inspect <- true + }() + + select { + case <-time.After(time.Second * 5): + c.Error("timed out waiting for inspect while exec is exiting") + case <-inspect: + break + } + + // Ensure the background sleep is still running + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 2) + + // The exec should exit when the background sleep exits + select { + case <-time.After(time.Second * 15): + c.Error("timed out waiting for async exec to exit") + case <-exec: + // Ensure the background sleep has actually exited + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 1) + break + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go new file mode 100644 index 0000000..5f69119 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go @@ -0,0 +1,93 @@ +// +build !windows,!test_no_exec + +package main + +import ( + "bytes" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// regression test for #12546 +func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + ch := make(chan error) + go func() { ch <- cmd.Wait() }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil) + output := b.String() + c.Assert(strings.TrimSpace(output), checker.Equals, "hello") + case <-time.After(5 * time.Second): + c.Fatal("timed out running docker exec") + } +} + +func (s *DockerSuite) TestExecTTY(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + dockerCmd(c, "run", "-d", "--name=test", "busybox", "sh", "-c", "echo hello > /foo && top") + + cmd := exec.Command(dockerBinary, "exec", "-it", "test", "sh") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + defer p.Close() + + _, err = p.Write([]byte("cat /foo && exit\n")) + c.Assert(err, checker.IsNil) + + chErr := make(chan error) + go func() { + chErr <- cmd.Wait() + }() + select { + case err := <-chErr: + c.Assert(err, checker.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for exec to exit") + } + + buf := make([]byte, 256) + read, err := p.Read(buf) + c.Assert(err, checker.IsNil) + c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) +} + +// Test the the TERM env var is set when -t is provided on exec +func (s *DockerSuite) TestExecWithTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-id", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", "-t", contID, "sh", "-c", "if [ -z $TERM ]; then exit 1; else exit 0; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} + +// Test that the TERM env var is not set on exec when -t is not provided, even if it was set +// on run +func (s *DockerSuite) TestExecWithNoTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", contID, "sh", "-c", "if [ -z $TERM ]; then exit 0; else exit 1; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go new file mode 100644 index 0000000..6a49cc8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) { + testRequires(c, ExperimentalDaemon) + + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, "*true") + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} + +func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) { + testRequires(c, NotExperimentalDaemon) + + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, "*false") + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 0000000..069dc08 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "os" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// export an image and try to import it into a new one +func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + + out, _ := dockerCmd(c, "export", containerID) + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err := runCommandWithOutput(importCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) + + cleanedImageID := strings.TrimSpace(out) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} + +// Used to test output flag in the export command +func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerwithoutputandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + dockerCmd(c, "export", "--output=testexp.tar", containerID) + defer os.Remove("testexp.tar") + + out, _, err := runCommandWithOutput(exec.Command("cat", "testexp.tar")) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(importCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) + + cleanedImageID := strings.TrimSpace(out) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go new file mode 100644 index 0000000..a794ca7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -0,0 +1,405 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +func init() { + check.Suite(&DockerExternalGraphdriverSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerExternalGraphdriverSuite struct { + server *httptest.Server + jserver *httptest.Server + ds *DockerSuite + d *Daemon + ec map[string]*graphEventsCounter +} + +type graphEventsCounter struct { + activations int + creations int + removals int + gets int + puts int + stats int + cleanups int + exists int + init int + metadata int + diff int + applydiff int + changes int + diffsize int +} + +func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) +} + +func (s *DockerExternalGraphdriverSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { + s.ec = make(map[string]*graphEventsCounter) + s.setUpPluginViaSpecFile(c) + s.setUpPluginViaJSONFile(c) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaSpecFile(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + s.setUpPlugin(c, "test-external-graph-driver", "spec", mux, []byte(s.server.URL)) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaJSONFile(c *check.C) { + mux := http.NewServeMux() + s.jserver = httptest.NewServer(mux) + + p := plugins.NewLocalPlugin("json-external-graph-driver", s.jserver.URL) + b, err := json.Marshal(p) + c.Assert(err, check.IsNil) + + s.setUpPlugin(c, "json-external-graph-driver", "json", mux, b) +} + +func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ext string, mux *http.ServeMux, b []byte) { + type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + } + + type graphDriverResponse struct { + Err error `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + } + + respond := func(w http.ResponseWriter, data interface{}) { + w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") + switch t := data.(type) { + case error: + fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) + case string: + fmt.Fprintln(w, t) + default: + json.NewEncoder(w).Encode(&data) + } + } + + decReq := func(b io.ReadCloser, out interface{}, w http.ResponseWriter) error { + defer b.Close() + if err := json.NewDecoder(b).Decode(&out); err != nil { + http.Error(w, fmt.Sprintf("error decoding json: %s", err.Error()), 500) + } + return nil + } + + base, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + vfsProto, err := vfs.Init(base, []string{}, nil, nil) + c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) + driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) + + s.ec[ext] = &graphEventsCounter{} + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].activations++ + respond(w, `{"Implements": ["GraphDriver"]}`) + }) + + mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].init++ + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.CreateReadWrite", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.CreateReadWrite(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.Create(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].removals++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Remove(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].gets++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + dir, err := driver.Get(req.ID, req.MountLabel) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Dir: dir}) + }) + + mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].puts++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Put(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].exists++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + respond(w, &graphDriverResponse{Exists: driver.Exists(req.ID)}) + }) + + mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].stats++ + respond(w, &graphDriverResponse{Status: driver.Status()}) + }) + + mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].cleanups++ + err := driver.Cleanup() + if err != nil { + respond(w, err) + return + } + respond(w, `{}`) + }) + + mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].metadata++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + data, err := driver.GetMetadata(req.ID) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Metadata: data}) + }) + + mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diff++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + diff, err := driver.Diff(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + io.Copy(w, diff) + }) + + mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].changes++ + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + changes, err := driver.Changes(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Changes: changes}) + }) + + mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].applydiff++ + diff := r.Body + defer r.Body.Close() + + id := r.URL.Query().Get("id") + parent := r.URL.Query().Get("parent") + + if id == "" { + http.Error(w, fmt.Sprintf("missing id"), 409) + } + + size, err := driver.ApplyDiff(id, parent, diff) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diffsize++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + size, err := driver.DiffSize(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + err = os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) + + specFile := "/etc/docker/plugins/" + name + "." + ext + err = ioutil.WriteFile(specFile, b, 0644) + c.Assert(err, check.IsNil, check.Commentf("error writing to %s", specFile)) +} + +func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { + s.server.Close() + s.jserver.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { + testRequires(c, ExperimentalDaemon) + + s.testExternalGraphDriver("test-external-graph-driver", "spec", c) + s.testExternalGraphDriver("json-external-graph-driver", "json", c) +} + +func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) { + if err := s.d.StartWithBusybox("-s", name); err != nil { + b, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Assert(err, check.IsNil, check.Commentf("\n%s", string(b))) + } + + out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") + c.Assert(err, check.IsNil, check.Commentf(out)) + + err = s.d.Restart("-s", name) + + out, err = s.d.Cmd("inspect", "--format={{.GraphDriver.Name}}", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + out, err = s.d.Cmd("diff", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "A /hello"), check.Equals, true, check.Commentf("diff output: %s", out)) + + out, err = s.d.Cmd("rm", "-f", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("info") + c.Assert(err, check.IsNil, check.Commentf(out)) + + err = s.d.Stop() + c.Assert(err, check.IsNil) + + // Don't check s.ec.exists, because the daemon no longer calls the + // Exists function. + c.Assert(s.ec[ext].activations, check.Equals, 2) + c.Assert(s.ec[ext].init, check.Equals, 2) + c.Assert(s.ec[ext].creations >= 1, check.Equals, true) + c.Assert(s.ec[ext].removals >= 1, check.Equals, true) + c.Assert(s.ec[ext].gets >= 1, check.Equals, true) + c.Assert(s.ec[ext].puts >= 1, check.Equals, true) + c.Assert(s.ec[ext].stats, check.Equals, 5) + c.Assert(s.ec[ext].cleanups, check.Equals, 2) + c.Assert(s.ec[ext].applydiff >= 1, check.Equals, true) + c.Assert(s.ec[ext].changes, check.Equals, 1) + c.Assert(s.ec[ext].diffsize, check.Equals, 0) + c.Assert(s.ec[ext].diff, check.Equals, 0) + c.Assert(s.ec[ext].metadata, check.Equals, 1) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { + testRequires(c, Network, ExperimentalDaemon) + + c.Assert(s.d.Start(), check.IsNil) + + out, err := s.d.Cmd("pull", "busybox:latest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go new file mode 100644 index 0000000..806d87e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -0,0 +1,627 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +const volumePluginName = "test-external-volume-driver" + +func init() { + check.Suite(&DockerExternalVolumeSuite{ + ds: &DockerSuite{}, + }) +} + +type eventCounter struct { + activations int + creations int + removals int + mounts int + unmounts int + paths int + lists int + gets int + caps int +} + +type DockerExternalVolumeSuite struct { + ds *DockerSuite + d *Daemon + *volumePlugin +} + +func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ec = &eventCounter{} +} + +func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { + s.volumePlugin = newVolumePlugin(c, volumePluginName) +} + +type volumePlugin struct { + ec *eventCounter + *httptest.Server + vols map[string]vol +} + +type vol struct { + Name string + Mountpoint string + Ninja bool // hack used to trigger a null volume return on `Get` + Status map[string]interface{} + Options map[string]string +} + +func (p *volumePlugin) Close() { + p.Server.Close() +} + +func newVolumePlugin(c *check.C, name string) *volumePlugin { + mux := http.NewServeMux() + s := &volumePlugin{Server: httptest.NewServer(mux), ec: &eventCounter{}, vols: make(map[string]vol)} + + type pluginRequest struct { + Name string + Opts map[string]string + ID string + } + + type pluginResp struct { + Mountpoint string `json:",omitempty"` + Err string `json:",omitempty"` + } + + read := func(b io.ReadCloser) (pluginRequest, error) { + defer b.Close() + var pr pluginRequest + if err := json.NewDecoder(b).Decode(&pr); err != nil { + return pr, err + } + return pr, nil + } + + send := func(w http.ResponseWriter, data interface{}) { + switch t := data.(type) { + case error: + http.Error(w, t.Error(), 500) + case string: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, t) + default: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + json.NewEncoder(w).Encode(&data) + } + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec.activations++ + send(w, `{"Implements": ["VolumeDriver"]}`) + }) + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec.creations++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + _, isNinja := pr.Opts["ninja"] + status := map[string]interface{}{"Hello": "world"} + s.vols[pr.Name] = vol{Name: pr.Name, Ninja: isNinja, Status: status, Options: pr.Opts} + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + s.ec.lists++ + vols := make([]vol, 0, len(s.vols)) + for _, v := range s.vols { + if v.Ninja { + continue + } + vols = append(vols, v) + } + send(w, map[string][]vol{"Volumes": vols}) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec.gets++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, exists := s.vols[pr.Name] + if !exists { + send(w, `{"Err": "no such volume"}`) + } + + if v.Ninja { + send(w, map[string]vol{}) + return + } + + v.Mountpoint = hostVolumePath(pr.Name) + send(w, map[string]vol{"Volume": v}) + return + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec.removals++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, ok := s.vols[pr.Name] + if !ok { + send(w, nil) + return + } + + if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + delete(s.vols, v.Name) + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + s.ec.paths++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + p := hostVolumePath(pr.Name) + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + s.ec.mounts++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + if v, exists := s.vols[pr.Name]; exists { + // Use this to simulate a mount failure + if _, exists := v.Options["invalidOption"]; exists { + send(w, fmt.Errorf("invalid argument")) + return + } + } + + p := hostVolumePath(pr.Name) + if err := os.MkdirAll(p, 0755); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.Server.URL), 0644); err != nil { + send(w, err) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "mountID"), []byte(pr.ID), 0644); err != nil { + send(w, err) + return + } + + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + s.ec.unmounts++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + s.ec.caps++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, `{"Capabilities": { "Scope": "global" }}`) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + err = ioutil.WriteFile("/etc/docker/plugins/"+name+".spec", []byte(s.Server.URL), 0644) + c.Assert(err, checker.IsNil) + return s +} + +func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { + s.volumePlugin.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *check.C) { + dockerCmd(c, "volume", "create", "test") + + out, _, err := dockerCmdWithError("volume", "create", "test", "--driver", volumePluginName) + c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) + c.Assert(out, checker.Contains, "A volume named test already exists") + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test") + _, _, err = dockerCmdWithError("volume", "create", "test", "--driver", strings.TrimSpace(out)) + c.Assert(err, check.IsNil) + + // make sure hidden --name option conflicts with positional arg name + out, _, err = dockerCmdWithError("volume", "create", "--name", "test2", "test2") + c.Assert(err, check.NotNil, check.Commentf("Conflicting options: either specify --name or provide positional arg, not both")) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + p := hostVolumePath("external-volume-test") + _, err = os.Lstat(p) + c.Assert(err, checker.NotNil) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("Expected volume path in host to not exist: %s, %v\n", p, err)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 2) + c.Assert(s.ec.unmounts, checker.Equals, 2) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func hostVolumePath(name string) string { + return fmt.Sprintf("/var/lib/docker/volumes/%s", name) +} + +// Make sure a request to use a down driver doesn't block other requests +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { + specPath := "/etc/docker/plugins/down-driver.spec" + err := ioutil.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0644) + c.Assert(err, check.IsNil) + defer os.RemoveAll(specPath) + + chCmd1 := make(chan struct{}) + chCmd2 := make(chan error) + cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") + cmd2 := exec.Command(dockerBinary, "volume", "create") + + c.Assert(cmd1.Start(), checker.IsNil) + defer cmd1.Process.Kill() + time.Sleep(100 * time.Millisecond) // ensure API has been called + c.Assert(cmd2.Start(), checker.IsNil) + + go func() { + cmd1.Wait() + close(chCmd1) + }() + go func() { + chCmd2 <- cmd2.Wait() + }() + + select { + case <-chCmd1: + cmd2.Process.Kill() + c.Fatalf("volume create with down driver finished unexpectedly") + case err := <-chCmd2: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + cmd2.Process.Kill() + c.Fatal("volume creates are blocked by previous create requests when previous driver is down") + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + specPath := "/etc/docker/plugins/test-external-volume-driver-retry.spec" + os.RemoveAll(specPath) + defer os.RemoveAll(specPath) + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver-retry", "busybox:latest"); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + go func() { + // wait for a retry to occur, then create spec to allow plugin to register + time.Sleep(2000 * time.Millisecond) + // no need to check for an error here since it will get picked up by the timeout later + ioutil.WriteFile(specPath, []byte(s.Server.URL), 0644) + }() + + select { + case err := <-errchan: + c.Assert(err, checker.IsNil) + case <-time.After(8 * time.Second): + c.Fatal("volume creates fail when plugin not immediately available") + } + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "foo") + dockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") + + var mounts []struct { + Name string + Driver string + } + out := inspectFieldJSON(c, "testing", "Mounts") + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverList(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc3") + out, _ := dockerCmd(c, "volume", "ls") + ls := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(ls), check.Equals, 2, check.Commentf("\n%s", out)) + + vol := strings.Fields(ls[len(ls)-1]) + c.Assert(len(vol), check.Equals, 2, check.Commentf("%v", vol)) + c.Assert(vol[0], check.Equals, volumePluginName) + c.Assert(vol[1], check.Equals, "abc3") + + c.Assert(s.ec.lists, check.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { + out, _, err := dockerCmdWithError("volume", "inspect", "dummy") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") + c.Assert(s.ec.gets, check.Equals, 1) + + dockerCmd(c, "volume", "create", "test", "-d", volumePluginName) + out, _ = dockerCmd(c, "volume", "inspect", "test") + + type vol struct { + Status map[string]string + } + var st []vol + + c.Assert(json.Unmarshal([]byte(out), &st), checker.IsNil) + c.Assert(st, checker.HasLen, 1) + c.Assert(st[0].Status, checker.HasLen, 1, check.Commentf("%v", st[0])) + c.Assert(st[0].Status["Hello"], checker.Equals, "world", check.Commentf("%v", st[0].Status)) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1") + err := s.d.Restart() + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") + var mounts []types.MountPoint + inspectFieldAndMarshall(c, "test", "Mounts", &mounts) + c.Assert(mounts, checker.HasLen, 1) + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. +// Prior the daemon would panic in this scenario. +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "abc2") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") +} + +// Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + c.Assert(s.ec.paths, checker.Equals, 0) + + out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "inspect", "--format='{{.Mountpoint}}'", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + c.Assert(s.ec.paths, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +// Check that VolumeDriver.Capabilities gets called, and only called once +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + c.Assert(s.ec.caps, checker.Equals, 0) + + for i := 0; i < 3; i++ { + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.caps, checker.Equals, 1) + out, err = s.d.Cmd("volume", "inspect", "--format={{.Scope}}", fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, volume.GlobalScope) + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) { + driverName := stringid.GenerateNonCryptoID() + p := newVolumePlugin(c, driverName) + defer p.Close() + + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + + out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "volume named test already exists") + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test re-create with same driver + out, err = s.d.Cmd("volume", "create", "-d", driverName, "--opt", "foo=bar", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var vs []types.Volume + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Driver, checker.Equals, driverName) + c.Assert(vs[0].Options, checker.NotNil) + c.Assert(vs[0].Options["foo"], checker.Equals, "bar") + c.Assert(vs[0].Driver, checker.Equals, driverName) + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test create with different driver + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + vs = nil + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Options, checker.HasLen, 0) + c.Assert(vs[0].Driver, checker.Equals, "local") +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *check.C) { + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount") + + out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) + out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go new file mode 100644 index 0000000..6b7baeb --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go @@ -0,0 +1,169 @@ +package main + +import ( + "encoding/json" + + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func waitForStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func waitForHealthStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func getHealth(c *check.C, name string) *types.Health { + out, _ := dockerCmd(c, "inspect", "--format={{json .State.Health}}", name) + var health types.Health + err := json.Unmarshal([]byte(out), &health) + c.Check(err, checker.Equals, nil) + return &health +} + +func (s *DockerSuite) TestHealth(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + _, err := buildImage(imageName, + `FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD cat /status`, + true) + + c.Check(err, check.IsNil) + + // No health status before starting + name := "test_health" + dockerCmd(c, "create", "--name", name, imageName) + out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") + c.Check(out, checker.Equals, "Created\n") + + // Inspect the options + out, _ = dockerCmd(c, "inspect", + "--format=timeout={{.Config.Healthcheck.Timeout}} "+ + "interval={{.Config.Healthcheck.Interval}} "+ + "retries={{.Config.Healthcheck.Retries}} "+ + "test={{.Config.Healthcheck.Test}}", name) + c.Check(out, checker.Equals, "timeout=30s interval=1s retries=0 test=[CMD-SHELL cat /status]\n") + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + + // Make it fail + dockerCmd(c, "exec", name, "rm", "/status") + waitForHealthStatus(c, name, "healthy", "unhealthy") + + // Inspect the status + out, _ = dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + c.Check(out, checker.Equals, "unhealthy\n") + + // Make it healthy again + dockerCmd(c, "exec", name, "touch", "/status") + waitForHealthStatus(c, name, "unhealthy", "healthy") + + // Remove container + dockerCmd(c, "rm", "-f", name) + + // Disable the check from the CLI + out, _ = dockerCmd(c, "create", "--name=noh", "--no-healthcheck", imageName) + out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "noh") + c.Check(out, checker.Equals, "[NONE]\n") + dockerCmd(c, "rm", "noh") + + // Disable the check with a new build + _, err = buildImage("no_healthcheck", + `FROM testhealth + HEALTHCHECK NONE`, true) + c.Check(err, check.IsNil) + + out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") + c.Check(out, checker.Equals, "[NONE]\n") + + // Enable the checks from the CLI + _, _ = dockerCmd(c, "run", "-d", "--name=fatal_healthcheck", + "--health-interval=0.5s", + "--health-retries=3", + "--health-cmd=cat /status", + "no_healthcheck") + waitForHealthStatus(c, "fatal_healthcheck", "starting", "healthy") + health := getHealth(c, "fatal_healthcheck") + c.Check(health.Status, checker.Equals, "healthy") + c.Check(health.FailingStreak, checker.Equals, 0) + last := health.Log[len(health.Log)-1] + c.Check(last.ExitCode, checker.Equals, 0) + c.Check(last.Output, checker.Equals, "OK\n") + + // Fail the check + dockerCmd(c, "exec", "fatal_healthcheck", "rm", "/status") + waitForHealthStatus(c, "fatal_healthcheck", "healthy", "unhealthy") + + failsStr, _ := dockerCmd(c, "inspect", "--format={{.State.Health.FailingStreak}}", "fatal_healthcheck") + fails, err := strconv.Atoi(strings.TrimSpace(failsStr)) + c.Check(err, check.IsNil) + c.Check(fails >= 3, checker.Equals, true) + dockerCmd(c, "rm", "-f", "fatal_healthcheck") + + // Check timeout + // Note: if the interval is too small, it seems that Docker spends all its time running health + // checks and never gets around to killing it. + _, _ = dockerCmd(c, "run", "-d", "--name=test", + "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1ms", imageName) + waitForHealthStatus(c, "test", "starting", "unhealthy") + health = getHealth(c, "test") + last = health.Log[len(health.Log)-1] + c.Check(health.Status, checker.Equals, "unhealthy") + c.Check(last.ExitCode, checker.Equals, -1) + c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1ms)") + dockerCmd(c, "rm", "-f", "test") + + // Check JSON-format + _, err = buildImage(imageName, + `FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD ["cat", "/my status"]`, + true) + c.Check(err, check.IsNil) + out, _ = dockerCmd(c, "inspect", + "--format={{.Config.Healthcheck.Test}}", imageName) + c.Check(out, checker.Equals, "[CMD cat /my status]\n") + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go new file mode 100644 index 0000000..29b6553 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go @@ -0,0 +1,321 @@ +package main + +import ( + "fmt" + "os/exec" + "runtime" + "strings" + "unicode" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestHelpTextVerify(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Make sure main help text fits within 80 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Test for HOME set to its default value and set to "/" on linux + // Yes on windows setting up an array and looping (right now) isn't + // necessary because we just have one value, but we'll need the + // array/loop on linux so we might as well set it up so that we can + // test any number of home dirs later on and all we need to do is + // modify the array - the rest of the testing infrastructure should work + homes := []string{homedir.Get()} + + // Non-Windows machines need to test for this special case of $HOME + if runtime.GOOS != "windows" { + homes = append(homes, "/") + } + + homeKey := homedir.Key() + baseEnvs := appendBaseEnv(true) + + // Remove HOME env var from list so we can add a new value later. + for i, env := range baseEnvs { + if strings.HasPrefix(env, homeKey+"=") { + baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) + break + } + } + + for _, home := range homes { + + // Dup baseEnvs and add our new HOME value + newEnvs := make([]string, len(baseEnvs)+1) + copy(newEnvs, baseEnvs) + newEnvs[len(newEnvs)-1] = homeKey + "=" + home + + scanForHome := runtime.GOOS != "windows" && home != "/" + + // Check main help text to make sure its not over 80 chars + helpCmd := exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, _, err := runCommandWithOutput(helpCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + lines := strings.Split(out, "\n") + for _, line := range lines { + // All lines should not end with a space + c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) + + if scanForHome && strings.Contains(line, `=`+home) { + c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) + } + if runtime.GOOS != "windows" { + i := strings.Index(line, homedir.GetShortcutString()) + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + c.Fatalf("Main help should not have used home shortcut:\n%s", line) + } + } + } + + // Make sure each cmd's help text fits within 90 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Pull the list of commands from the "Commands:" section of docker help + helpCmd = exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, _, err = runCommandWithOutput(helpCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + i := strings.Index(out, "Commands:") + c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", out)) + + cmds := []string{} + // Grab all chars starting at "Commands:" + helpOut := strings.Split(out[i:], "\n") + // Skip first line, it is just "Commands:" + helpOut = helpOut[1:] + + // Create the list of commands we want to test + cmdsToTest := []string{} + for _, cmd := range helpOut { + // Stop on blank line or non-idented line + if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { + break + } + + // Grab just the first word of each line + cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] + cmds = append(cmds, cmd) // Saving count for later + + cmdsToTest = append(cmdsToTest, cmd) + } + + // Add some 'two word' commands - would be nice to automatically + // calculate this list - somehow + cmdsToTest = append(cmdsToTest, "volume create") + cmdsToTest = append(cmdsToTest, "volume inspect") + cmdsToTest = append(cmdsToTest, "volume ls") + cmdsToTest = append(cmdsToTest, "volume rm") + cmdsToTest = append(cmdsToTest, "network connect") + cmdsToTest = append(cmdsToTest, "network create") + cmdsToTest = append(cmdsToTest, "network disconnect") + cmdsToTest = append(cmdsToTest, "network inspect") + cmdsToTest = append(cmdsToTest, "network ls") + cmdsToTest = append(cmdsToTest, "network rm") + + if experimentalDaemon { + cmdsToTest = append(cmdsToTest, "checkpoint create") + cmdsToTest = append(cmdsToTest, "checkpoint ls") + cmdsToTest = append(cmdsToTest, "checkpoint rm") + } + + // Divide the list of commands into go routines and run the func testcommand on the commands in parallel + // to save runtime of test + + errChan := make(chan error) + + for index := 0; index < len(cmdsToTest); index++ { + go func(index int) { + errChan <- testCommand(cmdsToTest[index], newEnvs, scanForHome, home) + }(index) + } + + for index := 0; index < len(cmdsToTest); index++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } + } +} + +func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { + // Test to make sure the exit code and output (stdout vs stderr) of + // various good and bad cases are what we expect + + // docker : stdout=all, stderr=empty, rc=0 + out, _, err := dockerCmdWithError() + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) + + // docker help: stdout=all, stderr=empty, rc=0 + out, _, err = dockerCmdWithError("help") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) + + // docker --help: stdout=all, stderr=empty, rc=0 + out, _, err = dockerCmdWithError("--help") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) + + // docker inspect busybox: stdout=all, stderr=empty, rc=0 + // Just making sure stderr is empty on valid cmd + out, _, err = dockerCmdWithError("inspect", "busybox") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) + + // docker rm: stdout=empty, stderr=all, rc!=0 + // testing the min arg error msg + cmd := exec.Command(dockerBinary, "rm") + stdout, stderr, _, err := runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Equals, "") + // Should not contain full help text but should contain info about + // # of args and Usage line + c.Assert(stderr, checker.Contains, "requires at least 1 argument", check.Commentf("Missing # of args text from 'docker rm'\n")) + + // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 + // testing to make sure no blank line on error + cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer") + stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(len(stderr), checker.Not(checker.Equals), 0) + c.Assert(stdout, checker.Equals, "") + // Be really picky + c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) + + // docker BadCmd: stdout=empty, stderr=all, rc=0 + cmd = exec.Command(dockerBinary, "BadCmd") + stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Equals, "") + c.Assert(stderr, checker.Equals, "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'\n", check.Commentf("Unexcepted output for 'docker badCmd'\n")) +} + +func testCommand(cmd string, newEnvs []string, scanForHome bool, home string) error { + + args := strings.Split(cmd+" --help", " ") + + // Check the full usage text + helpCmd := exec.Command(dockerBinary, args...) + helpCmd.Env = newEnvs + out, stderr, _, err := runCommandWithStdoutStderr(helpCmd) + if len(stderr) != 0 { + return fmt.Errorf("Error on %q help. non-empty stderr:%q\n", cmd, stderr) + } + if strings.HasSuffix(out, "\n\n") { + return fmt.Errorf("Should not have blank line on %q\n", cmd) + } + if !strings.Contains(out, "--help") { + return fmt.Errorf("All commands should mention '--help'. Command '%v' did not.\n", cmd) + } + + if err != nil { + return fmt.Errorf(out) + } + + // Check each line for lots of stuff + lines := strings.Split(out, "\n") + for _, line := range lines { + i := strings.Index(line, "~") + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + return fmt.Errorf("Help for %q should not have used ~:\n%s", cmd, line) + } + + // If a line starts with 4 spaces then assume someone + // added a multi-line description for an option and we need + // to flag it + if strings.HasPrefix(line, " ") && + !strings.HasPrefix(strings.TrimLeft(line, " "), "--") { + return fmt.Errorf("Help for %q should not have a multi-line option", cmd) + } + + // Options should NOT end with a period + if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { + return fmt.Errorf("Help for %q should not end with a period: %s", cmd, line) + } + + // Options should NOT end with a space + if strings.HasSuffix(line, " ") { + return fmt.Errorf("Help for %q should not end with a space: %s", cmd, line) + } + + } + + // For each command make sure we generate an error + // if we give a bad arg + args = strings.Split(cmd+" --badArg", " ") + + out, _, err = dockerCmdWithError(args...) + if err == nil { + return fmt.Errorf(out) + } + + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line at the end of 'docker rm'\n") + } + + // Now make sure that each command will print a short-usage + // (not a full usage - meaning no opts section) if we + // are missing a required arg or pass in a bad arg + + // These commands will never print a short-usage so don't test + noShortUsage := map[string]string{ + "images": "", + "login": "", + "logout": "", + "network": "", + "stats": "", + "volume create": "", + } + + if _, ok := noShortUsage[cmd]; !ok { + // skipNoArgs are ones that we don't want to try w/o + // any args. Either because it'll hang the test or + // lead to incorrect test result (like false negative). + // Whatever the reason, skip trying to run w/o args and + // jump to trying with a bogus arg. + skipNoArgs := map[string]struct{}{ + "daemon": {}, + "events": {}, + "load": {}, + } + + var result *icmd.Result + if _, ok := skipNoArgs[cmd]; !ok { + result = dockerCmdWithResult(strings.Split(cmd, " ")...) + } + + // If its ok w/o any args then try again with an arg + if result == nil || result.ExitCode == 0 { + result = dockerCmdWithResult(strings.Split(cmd+" badArg", " ")...) + } + + if err := result.Compare(icmd.Expected{ + Out: icmd.None, + Err: "\nUsage:", + ExitCode: 1, + }); err != nil { + return err + } + + stderr := result.Stderr() + // Shouldn't have full usage + if strings.Contains(stderr, "--help=false") { + return fmt.Errorf("Should not have full usage on %q:%v", result.Cmd.Args, stderr) + } + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line on %q\n%v", result.Cmd.Args, stderr) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go new file mode 100644 index 0000000..9979080 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go @@ -0,0 +1,121 @@ +package main + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func (s *DockerSuite) TestBuildHistory(c *check.C) { + name := "testbuildhistory" + _, err := buildImage(name, `FROM `+minimalBaseImage()+` +LABEL label.A="A" +LABEL label.B="B" +LABEL label.C="C" +LABEL label.D="D" +LABEL label.E="E" +LABEL label.F="F" +LABEL label.G="G" +LABEL label.H="H" +LABEL label.I="I" +LABEL label.J="J" +LABEL label.K="K" +LABEL label.L="L" +LABEL label.M="M" +LABEL label.N="N" +LABEL label.O="O" +LABEL label.P="P" +LABEL label.Q="Q" +LABEL label.R="R" +LABEL label.S="S" +LABEL label.T="T" +LABEL label.U="U" +LABEL label.V="V" +LABEL label.W="W" +LABEL label.X="X" +LABEL label.Y="Y" +LABEL label.Z="Z"`, + true) + + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "history", "testbuildhistory") + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("LABEL label.%s=%s", expectedValues[i], expectedValues[i]) + actualValue := actualValues[i] + c.Assert(actualValue, checker.Contains, echoValue) + } + +} + +func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { + dockerCmd(c, "history", "busybox") +} + +func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { + _, _, err := dockerCmdWithError("history", "testHistoryNonExistentImage") + c.Assert(err, checker.NotNil, check.Commentf("history on a non-existent image should fail.")) +} + +func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { + name := "testhistoryimagewithcomment" + + // make an image through docker commit [ -m messages ] + + dockerCmd(c, "run", "--name", name, "busybox", "true") + dockerCmd(c, "wait", name) + + comment := "This_is_a_comment" + dockerCmd(c, "commit", "-m="+comment, name, name) + + // test docker history to check comment messages + + out, _ := dockerCmd(c, "history", name) + outputTabs := strings.Fields(strings.Split(out, "\n")[1]) + actualValue := outputTabs[len(outputTabs)-1] + c.Assert(actualValue, checker.Contains, comment) +} + +func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=false", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + + _, err := strconv.Atoi(strings.TrimSpace(sizeString)) + c.Assert(err, checker.IsNil, check.Commentf("The size '%s' was not an Integer", sizeString)) + } +} + +func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=true", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + humanSizeRegexRaw := "\\d+.*B" // Matches human sizes like 10 MB, 3.2 KB, etc + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + c.Assert(strings.TrimSpace(sizeString), checker.Matches, humanSizeRegexRaw, check.Commentf("The size '%s' was not in human format", sizeString)) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go new file mode 100644 index 0000000..3b678a2 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go @@ -0,0 +1,364 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images") + c.Assert(imagesOut, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestImagesEnsureImageWithTagIsListed(c *check.C) { + name := "imagewithtag" + dockerCmd(c, "tag", "busybox", name+":v1") + dockerCmd(c, "tag", "busybox", name+":v1v1") + dockerCmd(c, "tag", "busybox", name+":v2") + + imagesOut, _ := dockerCmd(c, "images", name+":v1") + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Not(checker.Contains), "v2") + c.Assert(imagesOut, checker.Not(checker.Contains), "v1v1") + + imagesOut, _ = dockerCmd(c, "images", name) + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Contains, "v1v1") + c.Assert(imagesOut, checker.Contains, "v2") +} + +func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images", "busybox:nonexistent") + c.Assert(imagesOut, checker.Not(checker.Contains), "busybox") +} + +func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { + id1, err := buildImage("order:test_a", + `FROM busybox + MAINTAINER dockerio1`, true) + c.Assert(err, checker.IsNil) + time.Sleep(1 * time.Second) + id2, err := buildImage("order:test_c", + `FROM busybox + MAINTAINER dockerio2`, true) + c.Assert(err, checker.IsNil) + time.Sleep(1 * time.Second) + id3, err := buildImage("order:test_b", + `FROM busybox + MAINTAINER dockerio3`, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc") + imgs := strings.Split(out, "\n") + c.Assert(imgs[0], checker.Equals, id3, check.Commentf("First image must be %s, got %s", id3, imgs[0])) + c.Assert(imgs[1], checker.Equals, id2, check.Commentf("First image must be %s, got %s", id2, imgs[1])) + c.Assert(imgs[2], checker.Equals, id1, check.Commentf("First image must be %s, got %s", id1, imgs[2])) +} + +func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { + imageName1 := "images_filter_test1" + imageName2 := "images_filter_test2" + imageName3 := "images_filter_test3" + image1ID, err := buildImage(imageName1, + `FROM busybox + LABEL match me`, true) + c.Assert(err, check.IsNil) + + image2ID, err := buildImage(imageName2, + `FROM busybox + LABEL match="me too"`, true) + c.Assert(err, check.IsNil) + + image3ID, err := buildImage(imageName3, + `FROM busybox + LABEL nomatch me`, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") + out = strings.TrimSpace(out) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) + c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, image2ID) +} + +// Regression : #15659 +func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { + // Create a container + dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") + // Commit with labels "using changes" + out, _ := dockerCmd(c, "commit", "-c", "LABEL foo.version=1.0.0-1", "-c", "LABEL foo.name=bar", "-c", "LABEL foo.author=starlord", "bar", "bar:1.0.0-1") + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=foo.version=1.0.0-1") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, imageID) +} + +func (s *DockerSuite) TestImagesFilterSinceAndBefore(c *check.C) { + imageID1, err := buildImage("image:1", `FROM `+minimalBaseImage()+` +LABEL number=1`, true) + c.Assert(err, checker.IsNil) + imageID2, err := buildImage("image:2", `FROM `+minimalBaseImage()+` +LABEL number=2`, true) + c.Assert(err, checker.IsNil) + imageID3, err := buildImage("image:3", `FROM `+minimalBaseImage()+` +LABEL number=3`, true) + c.Assert(err, checker.IsNil) + + expected := []string{imageID3, imageID2} + + out, _ := dockerCmd(c, "images", "-f", "since=image:1", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID1, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID3} + + out, _ = dockerCmd(c, "images", "-f", "since=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID2, imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:3", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID3, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) +} + +func assertImageList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + imageIDIndex := strings.Index(lines[0], "IMAGE ID") + for i := 0; i < len(expected); i++ { + imageID := lines[i+1][imageIDIndex : imageIDIndex+12] + found := false + for _, e := range expected { + if imageID == e[7:19] { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { + imageName := "images_filter_test" + buildImage(imageName, + `FROM busybox + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`, true) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + out, _ := dockerCmd(c, "images", "-q", "-f", filter) + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d\n", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + c.Fatalf("All output must be the same") + } + } +} + +func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { + testRequires(c, DaemonIsLinux) + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "foobox") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + // overwrite the tag, making the previous image dangling + dockerCmd(c, "tag", "busybox", "foobox") + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") + // Expect one dangling image + c.Assert(strings.Count(out, imageID), checker.Equals, 1) + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false") + //dangling=false would not include dangling images + c.Assert(out, checker.Not(checker.Contains), imageID) + + out, _ = dockerCmd(c, "images") + //docker images still include dangling images + c.Assert(out, checker.Contains, imageID) + +} + +func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker + ENV foo bar` + + head, out, err := buildImageWithOut("scratch-image", dockerfile, false) + c.Assert(err, check.IsNil) + + // this is just the output of docker build + // we're interested in getting the image id of the MAINTAINER instruction + // and that's located at output, line 5, from 7 to end + split := strings.Split(out, "\n") + intermediate := strings.TrimSpace(split[5][7:]) + + out, _ = dockerCmd(c, "images") + // images shouldn't show non-heads images + c.Assert(out, checker.Not(checker.Contains), intermediate) + // images should contain final built images + c.Assert(out, checker.Contains, stringid.TruncateID(head)) +} + +func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support FROM scratch + + dockerfile := ` + FROM scratch + MAINTAINER docker` + + id, _, err := buildImageWithOut("scratch-image", dockerfile, false) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images") + // images should contain images built from scratch + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// For W2W - equivalent to TestImagesEnsureImagesFromScratchShown but Windows +// doesn't support from scratch +func (s *DockerSuite) TestImagesEnsureImagesFromBusyboxShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker` + + id, _, err := buildImageWithOut("busybox-image", dockerfile, false) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images") + // images should contain images built from busybox + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// #18181 +func (s *DockerSuite) TestImagesFilterNameWithPort(c *check.C) { + tag := "a.b.c.d:5000/hello" + dockerCmd(c, "tag", "busybox", tag) + out, _ := dockerCmd(c, "images", tag) + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":latest") + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":no-such-tag") + c.Assert(out, checker.Not(checker.Contains), tag) +} + +func (s *DockerSuite) TestImagesFormat(c *check.C) { + // testRequires(c, DaemonIsLinux) + tag := "myimage" + dockerCmd(c, "tag", "busybox", tag+":v1") + dockerCmd(c, "tag", "busybox", tag+":v2") + + out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag) + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"myimage", "myimage"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// ImagesDefaultFormatAndQuiet +func (s *DockerSuite) TestImagesFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "myimage") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + config := `{ + "imagesFormat": "{{ .ID }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "--config", d, "images", "-q", "myimage") + c.Assert(out, checker.Equals, imageID+"\n", check.Commentf("Expected to print only the image id, got %v\n", out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go new file mode 100644 index 0000000..57dc2a6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go @@ -0,0 +1,150 @@ +package main + +import ( + "bufio" + "compress/gzip" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImportDisplay(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + + image := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportBadURL(c *check.C) { + out, _, err := dockerCmdWithError("import", "http://nourl/bad") + c.Assert(err, checker.NotNil, check.Commentf("import was supposed to fail but didn't")) + // Depending on your system you can get either of these errors + if !strings.Contains(out, "dial tcp") && + !strings.Contains(out, "ApplyLayer exit status 1 stdout: stderr: archive/tar: invalid tar header") && + !strings.Contains(out, "Error processing tar file") { + c.Fatalf("expected an error msg but didn't get one.\nErr: %v\nOut: %v", err, out) + } +} + +func (s *DockerSuite) TestImportFile(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + runCmd.Stdout = bufio.NewWriter(temporaryFile) + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportGzipped(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + w := gzip.NewWriter(temporaryFile) + runCmd.Stdout = w + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + err = w.Close() + c.Assert(err, checker.IsNil, check.Commentf("failed to close gzip writer")) + temporaryFile.Close() + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + runCmd.Stdout = bufio.NewWriter(temporaryFile) + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + + message := "Testing commit message" + out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "history", image) + split := strings.Split(out, "\n") + + c.Assert(split, checker.HasLen, 3, check.Commentf("expected 3 lines from image history")) + r := regexp.MustCompile("[\\s]{2,}") + split = r.Split(split[1], -1) + + c.Assert(message, checker.Equals, split[3], check.Commentf("didn't get expected value in commit message")) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing")) +} + +func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { + _, _, err := dockerCmdWithError("import", "example.com/myImage.tar") + c.Assert(err, checker.NotNil, check.Commentf("import non-existing file must failed")) +} + +func (s *DockerSuite) TestImportWithQuotedChanges(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + result := icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs("export", "test-import"), + Stdout: bufio.NewWriter(temporaryFile), + }) + c.Assert(result, icmd.Matches, icmd.Success) + + result = dockerCmdWithResult("import", "-c", `ENTRYPOINT ["/bin/sh", "-c"]`, temporaryFile.Name()) + c.Assert(result, icmd.Matches, icmd.Success) + image := strings.TrimSpace(result.Stdout()) + + result = dockerCmdWithResult("run", "--rm", image, "true") + c.Assert(result, icmd.Matches, icmd.Expected{Out: icmd.None}) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go new file mode 100644 index 0000000..62ce7e2 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go @@ -0,0 +1,234 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure docker info succeeds +func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "info") + + // always shown fields + stringsToCheck := []string{ + "ID:", + "Containers:", + " Running:", + " Paused:", + " Stopped:", + "Images:", + "OSType:", + "Architecture:", + "Logging Driver:", + "Operating System:", + "CPUs:", + "Total Memory:", + "Kernel Version:", + "Storage Driver:", + "Volume:", + "Network:", + "Live Restore Enabled:", + } + + if daemonPlatform == "linux" { + stringsToCheck = append(stringsToCheck, "Init Binary:", "Security Options:", "containerd version:", "runc version:", "init version:") + } + + if DaemonIsLinux.Condition() { + stringsToCheck = append(stringsToCheck, "Runtimes:", "Default Runtime: runc") + } + + if experimentalDaemon { + stringsToCheck = append(stringsToCheck, "Experimental: true") + } else { + stringsToCheck = append(stringsToCheck, "Experimental: false") + } + + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix, check.Commentf("couldn't find string %v in output", linePrefix)) + } +} + +// TestInfoFormat tests `docker info --format` +func (s *DockerSuite) TestInfoFormat(c *check.C) { + out, status := dockerCmd(c, "info", "--format", "{{json .}}") + c.Assert(status, checker.Equals, 0) + var m map[string]interface{} + err := json.Unmarshal([]byte(out), &m) + c.Assert(err, checker.IsNil) + _, _, err = dockerCmdWithError("info", "--format", "{{.badString}}") + c.Assert(err, checker.NotNil) +} + +// TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and +// `--cluster-store` properly show the backend's endpoint in info output. +func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "1.1.1.1:2375" + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s\n", discoveryAdvertise)) +} + +// TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with +// an invalid `--cluster-advertise` configuration +func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + + // --cluster-advertise with an invalid string is an error + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") + c.Assert(err, checker.Not(checker.IsNil)) + + // --cluster-advertise without --cluster-store is also an error + err = d.Start("--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.Not(checker.IsNil)) +} + +// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` +// configured with interface name properly show the advertise ip-address in info output. +func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { + testRequires(c, SameHostDaemon, Network, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "eth0" + + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) + c.Assert(err, checker.IsNil) + defer d.Stop() + + iface, err := net.InterfaceByName(discoveryAdvertise) + c.Assert(err, checker.IsNil) + addrs, err := iface.Addrs() + c.Assert(err, checker.IsNil) + c.Assert(len(addrs), checker.GreaterThan, 0) + ip, _, err := net.ParseCIDR(addrs[0].String()) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s:2375\n", ip.String())) +} + +func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "-d", "busybox", "top") + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { + testRequires(c, IsPausable) + + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) +} + +func (s *DockerSuite) TestInfoDebug(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + err := d.Start("--debug") + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("--debug", "info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Debug Mode (client): true\n") + c.Assert(out, checker.Contains, "Debug Mode (server): true\n") + c.Assert(out, checker.Contains, "File Descriptors") + c.Assert(out, checker.Contains, "Goroutines") + c.Assert(out, checker.Contains, "System Time") + c.Assert(out, checker.Contains, "EventsListeners") + c.Assert(out, checker.Contains, "Docker Root Dir") +} + +func (s *DockerSuite) TestInsecureRegistries(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryCIDR := "192.168.1.0/24" + registryHost := "insecurehost.com:5000" + + d := NewDaemon(c) + err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Insecure Registries:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryHost)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryCIDR)) +} + +func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryMirror1 := "https://192.168.1.2" + registryMirror2 := "http://registry.mirror.com:5000" + + err := s.d.Start("--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2) + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Registry Mirrors:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror2)) +} + +// Test case for #24392 +func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.Start("--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`) + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go new file mode 100644 index 0000000..b932306 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go @@ -0,0 +1,15 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoSecurityOptions(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, Apparmor, DaemonIsLinux) + + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, "Security Options:\n apparmor\n seccomp\n Profile: default\n") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go new file mode 100644 index 0000000..32ed28a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go @@ -0,0 +1,466 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func checkValidGraphDriver(c *check.C, name string) { + if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { + c.Fatalf("%v is not a valid graph driver name", name) + } +} + +func (s *DockerSuite) TestInspectImage(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + // It is important that this ID remain stable. If a code change causes + // it to be different, this is equivalent to a cache bust when pulling + // a legacy-format manifest. If the check at the end of this function + // fails, fix the difference in the image serialization instead of + // updating this hash. + imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" + id := inspectField(c, imageTest, "Id") + + c.Assert(id, checker.Equals, imageTestID) +} + +func (s *DockerSuite) TestInspectInt64(c *check.C) { + dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true") + inspectOut := inspectField(c, "inspectTest", "HostConfig.Memory") + c.Assert(inspectOut, checker.Equals, "314572800") +} + +func (s *DockerSuite) TestInspectDefault(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch the container JSON. + //If the container JSON is not available, it will go for the image JSON. + + out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + containerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, "busybox", "Id") + c.Assert(strings.TrimSpace(inspectOut), checker.Equals, containerID) +} + +func (s *DockerSuite) TestInspectStatus(c *check.C) { + if daemonPlatform != "windows" { + defer unpauseAllContainers() + } + out, _ := runSleepingContainer(c, "-d") + out = strings.TrimSpace(out) + + inspectOut := inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + + // Windows does not support pause/unpause on Windows Server Containers. + // (RS1 does for Hyper-V Containers, but production CI is not setup for that) + if daemonPlatform != "windows" { + dockerCmd(c, "pause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "paused") + + dockerCmd(c, "unpause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + } + + dockerCmd(c, "stop", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "exited") + +} + +func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch container + //JSON State.Running field. If the field is true, it's a container. + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.State.Running}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(out, checker.Equals, "true\n") // not a container JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { + //Run this test on an image named busybox. docker inspect will try to fetch container + //JSON. Since there is no container named busybox and --type=container, docker inspect will + //not try to get the image JSON. It will throw an error. + + dockerCmd(c, "run", "-d", "busybox", "true") + + _, _, err := dockerCmdWithError("inspect", "--type=container", "busybox") + // docker inspect should fail, as there is no container named busybox + c.Assert(err, checker.NotNil) +} + +func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch image + //JSON as --type=image. if there is no image with name busybox, docker inspect + //will throw an error. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, _ := dockerCmd(c, "inspect", "--type=image", "busybox") + c.Assert(out, checker.Not(checker.Contains), "State") // not an image JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { + //Both the container and image are named busybox. docker inspect will fail + //as --type=foobar is not a valid value for the flag. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("%s", exitCode)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("%s", err)) + c.Assert(out, checker.Contains, "not a valid value for --type") +} + +func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + out := inspectField(c, imageTest, "Size") + + size, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect size of the image: %s, %v", out, err)) + + //now see if the size turns out to be the same + formatStr := fmt.Sprintf("--format={{eq .Size %d}}", size) + out, _ = dockerCmd(c, "inspect", formatStr, imageTest) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to run container: %v, output: %q", err, out)) + + id := strings.TrimSpace(out) + + out = inspectField(c, id, "State.ExitCode") + + exitCode, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect exitcode of the container: %s, %v", out, err)) + + //now get the exit code to verify + formatStr := fmt.Sprintf("--format={{eq .State.ExitCode %d}}", exitCode) + out, _ = dockerCmd(c, "inspect", formatStr, id) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + imageTest := "emptyfs" + name := inspectField(c, imageTest, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + deviceID := inspectField(c, imageTest, "GraphDriver.Data.DeviceId") + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, imageTest, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + out = strings.TrimSpace(out) + + name := inspectField(c, out, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + imageDeviceID := inspectField(c, "busybox", "GraphDriver.Data.DeviceId") + + deviceID := inspectField(c, out, "GraphDriver.Data.DeviceId") + + c.Assert(imageDeviceID, checker.Not(checker.Equals), deviceID) + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, out, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { + modifier := ",z" + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if daemonPlatform == "windows" { + modifier = "" + // TODO Windows: Temporary check - remove once TP5 support is dropped + if windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + // Linux creates the host directory if it doesn't exist. Windows does not. + os.Mkdir(`c:\data`, os.ModeDir) + } + + dockerCmd(c, "run", "-d", "--name", "test", "-v", prefix+slash+"data:"+prefix+slash+"data:ro"+modifier, "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, check.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "") + c.Assert(m.Driver, checker.Equals, "") + c.Assert(m.Source, checker.Equals, prefix+slash+"data") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + if daemonPlatform != "windows" { // Windows does not set mode + c.Assert(m.Mode, checker.Equals, "ro"+modifier) + } + c.Assert(m.RW, checker.Equals, false) +} + +func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:"+prefix+slash+"data", "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, checker.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "data") + c.Assert(m.Driver, checker.Equals, "local") + c.Assert(m.Source, checker.Not(checker.Equals), "") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + c.Assert(m.RW, checker.Equals, true) +} + +// #14947 +func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + id := strings.TrimSpace(out) + startedAt := inspectField(c, id, "State.StartedAt") + finishedAt := inspectField(c, id, "State.FinishedAt") + created := inspectField(c, id, "Created") + + _, err := time.Parse(time.RFC3339Nano, startedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, finishedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) + + created = inspectField(c, "busybox", "Created") + + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) +} + +// #15633 +func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) { + dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox") + var logConfig container.LogConfig + + out := inspectFieldJSON(c, "test", "HostConfig.LogConfig") + + err := json.NewDecoder(strings.NewReader(out)).Decode(&logConfig) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + + c.Assert(logConfig.Type, checker.Equals, "json-file") + c.Assert(logConfig.Config["max-file"], checker.Equals, "42", check.Commentf("%v", logConfig)) +} + +func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch container + //JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields. + + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.SizeRw}},{{.SizeRootFs}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Exepcted not to display size info: %s", out)) +} + +func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" + out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox") + sz := strings.Split(out, ",") + + c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "") + c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "") +} + +func (s *DockerSuite) TestInspectTemplateError(c *check.C) { + // Template parsing error for both the container and image. + + runSleepingContainer(c, "--name=container1", "-d") + + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "container1") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") + + out, _, err = dockerCmdWithError("inspect", "--type=image", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestInspectJSONFields(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format={{.HostConfig.Dns}}", "busybox") + + c.Assert(err, check.IsNil) + c.Assert(out, checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestInspectByPrefix(c *check.C) { + id := inspectField(c, "busybox", "Id") + c.Assert(id, checker.HasPrefix, "sha256:") + + id2 := inspectField(c, id[:12], "Id") + c.Assert(id, checker.Equals, id2) + + id3 := inspectField(c, strings.TrimPrefix(id, "sha256:")[:12], "Id") + c.Assert(id, checker.Equals, id3) +} + +func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + runSleepingContainer(c, "--name=not-shown", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.Name}}'", "busybox", "missing", "not-shown") + + c.Assert(err, checker.Not(check.IsNil)) + c.Assert(out, checker.Contains, "busybox") + c.Assert(out, checker.Not(checker.Contains), "not-shown") + c.Assert(out, checker.Contains, "Error: No such container: missing") +} + +func (s *DockerSuite) TestInspectHistory(c *check.C) { + dockerCmd(c, "run", "--name=testcont", "busybox", "echo", "hello") + dockerCmd(c, "commit", "-m", "test comment", "testcont", "testimg") + out, _, err := dockerCmdWithError("inspect", "--format='{{.Comment}}'", "testimg") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "test comment") +} + +func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) { + testRequires(c, DaemonIsLinux) + + contName := "test1" + dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top") + netOut, _ := dockerCmd(c, "network", "inspect", "--format={{.ID}}", "bridge") + out := inspectField(c, contName, "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "bridge") + out = inspectField(c, contName, "NetworkSettings.Networks.bridge.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) { + testRequires(c, DaemonIsLinux) + + netOut, _ := dockerCmd(c, "network", "create", "net1") + dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top") + out := inspectField(c, "container1", "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "net1") + out = inspectField(c, "container1", "NetworkSettings.Networks.net1.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectRootFS(c *check.C) { + out, _, err := dockerCmdWithError("inspect", "busybox") + c.Assert(err, check.IsNil) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + + c.Assert(len(imageJSON[0].RootFS.Layers), checker.GreaterOrEqualThan, 1) +} + +func (s *DockerSuite) TestInspectAmpersand(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "--env", `TEST_ENV="soanni&rtr"`, "busybox", "env") + c.Assert(out, checker.Contains, `soanni&rtr`) + out, _ = dockerCmd(c, "inspect", name) + c.Assert(out, checker.Contains, `soanni&rtr`) +} + +func (s *DockerSuite) TestInspectPlugin(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + // Even without tag the inspect still work + out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +// Test case for 29185 +func (s *DockerSuite) TestInspectUnknownObject(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: foobar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: foobar") +} + +func (s *DockerSuite) TestInpectInvalidReference(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "FooBar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: FooBar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: FooBar") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go new file mode 100644 index 0000000..4316480 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,134 @@ +package main + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestKillContainer(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + _, _, err := dockerCmdWithError("kill", "-s", "30", cleanedContainerID) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) +} + +func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { + // TODO Windows: Windows does not yet support -u (Feb 2016). + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +// regression test about correct signal parsing see #13665 +func (s *DockerSuite) TestKillWithSignal(c *check.C) { + // Cannot port to Windows - does not support signals in the same way Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + dockerCmd(c, "kill", "-s", "SIGWINCH", cid) + time.Sleep(250 * time.Millisecond) + + running := inspectField(c, cid, "State.Running") + + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithSameSignalShouldDisableRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + // Let docker send a TERM signal to the container + // It will kill the process and disable the restart policy + dockerCmd(c, "kill", "-s", "TERM", cid) + c.Assert(waitExited(cid, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cid, check.Commentf("killed container is still running")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithDifferentSignalShouldKeepRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + // Let docker send a TERM signal to the container + // It will kill the process, but not disable the restart policy + dockerCmd(c, "kill", "-s", "TERM", cid) + c.Assert(waitRestart(cid, 10*time.Second), check.IsNil) + + // Restart policy should still be in place, so it should be still running + c.Assert(waitRun(cid), check.IsNil) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err := dockerCmdWithError("kill", "-s", "0", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) + + running := inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + + out, _ = runSleepingContainer(c, "-d") + cid = strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) + + running = inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + +} + +func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") + dockerCmd(c, "stop", "docker-kill-test-api") + + status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go new file mode 100644 index 0000000..a5872d9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go @@ -0,0 +1,240 @@ +package main + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + _, exitCode, err := dockerCmdWithError("run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + + // run ping failed with error + c.Assert(exitCode, checker.Equals, 1, check.Commentf("error: %v", err)) +} + +// Test for appropriate error when calling --link with an invalid target container +func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--link", "bogus:alias", "busybox", "true") + + // an invalid container target should produce an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an invalid container target should produce an error + c.Assert(out, checker.Contains, "Could not get container") +} + +func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + // Test with the three different ways of specifying the default network on Linux + testLinkPingOnNetwork(c, "") + testLinkPingOnNetwork(c, "default") + testLinkPingOnNetwork(c, "bridge") +} + +func testLinkPingOnNetwork(c *check.C, network string) { + var postArgs []string + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "top"}...) + runArgs1 := append([]string{"run", "-d", "--name", "container1", "--hostname", "fred"}, postArgs...) + runArgs2 := append([]string{"run", "-d", "--name", "container2", "--hostname", "wilma"}, postArgs...) + + // Run the two named containers + dockerCmd(c, runArgs1...) + dockerCmd(c, runArgs2...) + + postArgs = []string{} + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "sh", "-c"}...) + + // Format a run for a container which links to the other two + runArgs := append([]string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2"}, postArgs...) + pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" + + // test ping by alias, ping by name, and ping by hostname + // 1. Ping by alias + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) + // 2. Ping by container name + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) + // 3. Ping by hostname + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) + + // Clean for next round + dockerCmd(c, "rm", "-f", "container1") + dockerCmd(c, "rm", "-f", "container2") +} + +func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(c, "kill", idA) + dockerCmd(c, "kill", idB) + +} + +func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := convertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := convertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name=first", "busybox", "top") + dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") + dockerCmd(c, "start", "first") + +} + +func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + + out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") + idOne := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") + idTwo := strings.TrimSpace(out) + + c.Assert(waitRun(idTwo), checker.IsNil) + + contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("contentOne: %s", string(contentOne))) + + contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("contentTwo: %s", string(contentTwo))) + // Host is not present in updated hosts file + c.Assert(string(contentTwo), checker.Contains, "onetwo") +} + +func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") + out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") + id := strings.TrimSpace(string(out)) + + realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + content, err := readContainerFileWithExec(id, "/etc/hosts") + c.Assert(err, checker.IsNil) + + getIP := func(hosts []byte, hostname string) string { + re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) + matches := re.FindSubmatch(hosts) + c.Assert(matches, checker.NotNil, check.Commentf("Hostname %s have no matches in hosts", hostname)) + return string(matches[1]) + } + ip := getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) + + dockerCmd(c, "restart", "one") + realIP = inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + + content, err = readContainerFileWithExec(id, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("content: %s", string(content))) + ip = getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) +} + +func (s *DockerSuite) TestLinksEnvs(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") + out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") + c.Assert(out, checker.Contains, "FIRST_ENV_e1=\n") + c.Assert(out, checker.Contains, "FIRST_ENV_e2=v2") + c.Assert(out, checker.Contains, "FIRST_ENV_e3=v3=v3") +} + +func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") + + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") + + cid2 := strings.TrimSpace(out) + c.Assert(waitRun(cid2), checker.IsNil) + + links := inspectFieldJSON(c, cid2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") +} + +func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") + out, _, err := dockerCmdWithError("run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") + + // Running container linking to a container with --net host should have failed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // Running container linking to a container with --net host should have failed + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + // /etc/hosts should be a regular file + c.Assert(out, checker.Matches, "^-.+\n") +} + +func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") + dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") + dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go new file mode 100644 index 0000000..1af9279 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go @@ -0,0 +1,26 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { + // In a _unix file as using Unix specific files, and must be on the + // same host as the daemon. + testRequires(c, SameHostDaemon, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + c.Skip("/etc/hosts does not exist, skip this test") + } + + c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go new file mode 100644 index 0000000..01de75d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "bytes" + "os/exec" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { + cmd := exec.Command(dockerBinary, "login") + + // Send to stdin so the process does not get the TTY + cmd.Stdin = bytes.NewBufferString("buffer test string \n") + + // run the command and block until it's done + err := cmd.Run() + c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistryDeprecatedEmailFlag(c *check.C) { + // Test to make sure login still works with the deprecated -e and --email flags + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", "-e", s.reg.email, privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + // -e flag + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "-e", s.reg.email, privateRegistryURL) + // --email flag + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "--email", s.reg.email, privateRegistryURL) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go new file mode 100644 index 0000000..a5f4b10 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go @@ -0,0 +1,100 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + c.Assert(string(b), checker.Contains, privateRegistryURL) + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) + + // check I cannot pull anymore + out, _, err := dockerCmdWithError("--config", tmp, "pull", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithWrongHostnamesStored(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + cmd := exec.Command("docker-credential-shell-test", "store") + stdin := bytes.NewReader([]byte(fmt.Sprintf(`{"ServerURL": "https://%s", "Username": "%s", "Secret": "%s"}`, privateRegistryURL, s.reg.username, s.reg.password))) + cmd.Stdin = stdin + c.Assert(cmd.Run(), checker.IsNil) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := fmt.Sprintf(`{ "auths": {"https://%s": {}}, "credsStore": "shell-test" }`, privateRegistryURL) + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"%s\": {}", privateRegistryURL)) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"%s\": {}", privateRegistryURL)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go new file mode 100644 index 0000000..eeb008d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkLogsCLIRotateFollow(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--log-opt", "max-size=1b", "--log-opt", "max-file=10", "busybox", "sh", "-c", "while true; do usleep 50000; echo hello; done") + id := strings.TrimSpace(out) + ch := make(chan error, 1) + go func() { + ch <- nil + out, _, _ := dockerCmdWithError("logs", "-f", id) + // if this returns at all, it's an error + ch <- fmt.Errorf(out) + }() + + <-ch + select { + case <-time.After(30 * time.Second): + // ran for 30 seconds with no problem + return + case err := <-ch: + if err != nil { + c.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go new file mode 100644 index 0000000..d2dcad1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,328 @@ +package main + +import ( + "fmt" + "io" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/jsonlog" + "github.com/go-check/check" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { + testLen := 32767 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { + testLen := 32768 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { + testLen := 33000 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsTimestamps(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo = >> a.a; done; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "-t", id) + + lines := strings.Split(out, "\n") + + c.Assert(lines, checker.HasLen, testLen+1) + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) + c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) + // ensure we have padded 0's + c.Assert(l[29], checker.Equals, uint8('Z')) + } + } +} + +func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) + + c.Assert(stdout, checker.Equals, "") + + stderr = strings.TrimSpace(stderr) + + c.Assert(stderr, checker.Equals, msg) +} + +func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { + // TODO Windows: Needs investigation why this fails. Obtained string includes + // a bunch of ANSI escape sequences before the "stderr_log" message. + testRequires(c, DaemonIsLinux) + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) + c.Assert(stderr, checker.Equals, "") + + stdout = strings.TrimSpace(stdout) + c.Assert(stdout, checker.Equals, msg) +} + +func (s *DockerSuite) TestLogsTail(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "--tail", "0", id) + lines := strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 1) + + out, _ = dockerCmd(c, "logs", "--tail", "5", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 6) + + out, _ = dockerCmd(c, "logs", "--tail", "99", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 100) + + out, _ = dockerCmd(c, "logs", "--tail", "all", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out, _ = dockerCmd(c, "logs", "--tail", "-1", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello") + id, err := getIDByName("test") + c.Assert(err, check.IsNil) + + logsCmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(logsCmd.Start(), checker.IsNil) + + errChan := make(chan error) + go func() { + errChan <- logsCmd.Wait() + close(errChan) + }() + + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Following logs is hanged") + } +} + +func (s *DockerSuite) TestLogsSince(c *check.C) { + name := "testlogssince" + dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done") + out, _ := dockerCmd(c, "logs", "-t", name) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written + c.Assert(err, checker.IsNil) + since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up + out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) + + // Skip 2 seconds + unexpected := []string{"log1", "log2"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since)) + } + + // Test to make sure a bad since format is caught by the client + out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name) + c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server")) + + // Test with default value specified and parameter omitted + expected := []string{"log1", "log2", "log3"} + for _, cmd := range []*exec.Cmd{ + exec.Command(dockerBinary, "logs", "-t", name), + exec.Command(dockerBinary, "logs", "-t", "--since=0", name), + } { + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to log container: %s", out)) + for _, v := range expected { + c.Assert(out, checker.Contains, v) + } + } +} + +func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { + // TODO Windows TP5 - Figure out why this test is so flakey. Disabled for now. + testRequires(c, DaemonIsLinux) + name := "testlogssincefuturefollow" + out, _ := dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do echo log$i; sleep 1; done`) + + // Extract one timestamp from the log file to give us a starting point for + // our `--since` argument. Because the log producer runs in the background, + // we need to check repeatedly for some output to be produced. + var timestamp string + for i := 0; i != 100 && timestamp == ""; i++ { + if out, _ = dockerCmd(c, "logs", "-t", name); out == "" { + time.Sleep(time.Millisecond * 100) // Retry + } else { + timestamp = strings.Split(strings.Split(out, "\n")[0], " ")[0] + } + } + + c.Assert(timestamp, checker.Not(checker.Equals), "") + t, err := time.Parse(time.RFC3339Nano, timestamp) + c.Assert(err, check.IsNil) + + since := t.Unix() + 2 + out, _ = dockerCmd(c, "logs", "-t", "-f", fmt.Sprintf("--since=%v", since), name) + c.Assert(out, checker.Not(checker.HasLen), 0, check.Commentf("cannot read from empty log")) + lines := strings.Split(strings.TrimSpace(out), "\n") + for _, v := range lines { + ts, err := time.Parse(time.RFC3339Nano, strings.Split(v, " ")[0]) + c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'", v)) + c.Assert(ts.Unix() >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts)) + } +} + +// Regression test for #8832 +func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { + // TODO Windows: Fix this test for TP5. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) + + id := strings.TrimSpace(out) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", id).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", id) + stdout, err := logCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + c.Assert(logCmd.Start(), checker.IsNil) + + // First read slowly + bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + c.Assert(err, checker.IsNil) + + // After the container has finished we can continue reading fast + bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) + c.Assert(err, checker.IsNil) + + actual := bytes1 + bytes2 + expected := 200000 + c.Assert(actual, checker.Equals, expected) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + r, w := io.Pipe() + cmd.Stdout = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + chErr := make(chan error) + go func() { + b := make([]byte, 1) + _, err := r.Read(b) + chErr <- err + }() + c.Assert(<-chErr, checker.IsNil) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(cmd.Start(), checker.IsNil) + time.Sleep(200 * time.Millisecond) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { + name := "testlogsnocontainer" + out, _, _ := dockerCmdWithError("logs", name) + message := fmt.Sprintf("Error: No such container: %s\n", name) + c.Assert(out, checker.Equals, message) +} + +func (s *DockerSuite) TestLogsWithDetails(c *check.C) { + dockerCmd(c, "run", "--name=test", "--label", "foo=bar", "-e", "baz=qux", "--log-opt", "labels=foo", "--log-opt", "env=baz", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "--details", "--timestamps", "test") + + logFields := strings.Fields(strings.TrimSpace(out)) + c.Assert(len(logFields), checker.Equals, 3, check.Commentf(out)) + + details := strings.Split(logFields[1], ",") + c.Assert(details, checker.HasLen, 2) + c.Assert(details[0], checker.Equals, "baz=qux") + c.Assert(details[1], checker.Equals, "foo=bar") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go new file mode 100644 index 0000000..7f4cc2c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func startServerContainer(c *check.C, msg string, port int) string { + name := "server" + cmd := []string{ + "-d", + "-p", fmt.Sprintf("%d:%d", port, port), + "busybox", + "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), + } + c.Assert(waitForContainer(name, cmd...), check.IsNil) + return name +} + +func getExternalAddress(c *check.C) net.IP { + iface, err := net.InterfaceByName("eth0") + if err != nil { + c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) + } + + ifaceAddrs, err := iface.Addrs() + c.Assert(err, check.IsNil) + c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + c.Assert(err, check.IsNil) + + return ifaceIP +} + +func getContainerLogs(c *check.C, containerID string) string { + out, _ := dockerCmd(c, "logs", containerID) + return strings.Trim(out, "\r\n") +} + +func getContainerStatus(c *check.C, containerID string) string { + out := inspectField(c, containerID, "State.Running") + return out +} + +func (s *DockerSuite) TestNetworkNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + var ( + msg = "hi yall" + ) + startServerContainer(c, msg, 8081) + conn, err := net.Dial("tcp", "localhost:8081") + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", + "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) + final := strings.TrimRight(string(out), "\n") + c.Assert(final, checker.Equals, msg) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go new file mode 100644 index 0000000..4dfad93 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +// GH14530. Validates combinations of --net= with other options + +// stringCheckPS is how the output of PS starts in order to validate that +// the command executed in a container did really run PS correctly. +const stringCheckPS = "PID USER" + +// DockerCmdWithFail executes a docker command that is supposed to fail and returns +// the output, the exit code. If the command returns a Nil error, it will fail and +// stop the tests. +func dockerCmdWithFail(c *check.C, args ...string) (string, int) { + out, status, err := dockerCmdWithError(args...) + c.Assert(err, check.NotNil, check.Commentf("%v", out)) + return out, status +} + +func (s *DockerSuite) TestNetHostnameWithNetHost(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) +} + +func (s *DockerSuite) TestNetHostname(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-h=name", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=bridge", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=none", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=container:other", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") + c.Assert(out, checker.Contains, "--net: invalid net mode: invalid container format container:") + + out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") + c.Assert(out, checker.Contains, "network weird not found") +} + +func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictContainerNetworkHostAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeNetHostAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--dns=8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--add-host=name:8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-P", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-p", "8080", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--expose", "8000-9000", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkExposePorts.Error()) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go new file mode 100644 index 0000000..97f204a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go @@ -0,0 +1,1791 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/driverapi" + remoteapi "github.com/docker/libnetwork/drivers/remote/api" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/docker/libnetwork/netlabel" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +const dummyNetworkDriver = "dummy-network-driver" +const dummyIPAMDriver = "dummy-ipam-driver" + +var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest + +func init() { + check.Suite(&DockerNetworkSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerNetworkSuite struct { + server *httptest.Server + ds *DockerSuite + d *Daemon +} + +func (s *DockerNetworkSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) +} + +func (s *DockerNetworkSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIPAMDriver) +} + +func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"local"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func assertNwIsAvailable(c *check.C, name string) { + if !isNwPresent(c, name) { + c.Fatalf("Network %s not found in network ls o/p", name) + } +} + +func assertNwNotAvailable(c *check.C, name string) { + if isNwPresent(c, name) { + c.Fatalf("Found network %s in network ls o/p", name) + } +} + +func isNwPresent(c *check.C, name string) bool { + out, _ := dockerCmd(c, "network", "ls") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + netFields := strings.Fields(lines[i]) + if netFields[1] == name { + return true + } + } + return false +} + +// assertNwList checks network list retrieved with ls command +// equals to expected network list +// note: out should be `network ls [option]` result +func assertNwList(c *check.C, out string, expectNws []string) { + lines := strings.Split(out, "\n") + var nwList []string + for _, line := range lines[1 : len(lines)-1] { + netFields := strings.Fields(line) + // wrap all network name in nwList + nwList = append(nwList, netFields[1]) + } + + // network ls should contains all expected networks + c.Assert(nwList, checker.DeepEquals, expectNws) +} + +func getNwResource(c *check.C, name string) *types.NetworkResource { + out, _ := dockerCmd(c, "network", "inspect", name) + nr := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &nr) + c.Assert(err, check.IsNil) + return &nr[0] +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + assertNwIsAvailable(c, nn) + } +} + +func (s *DockerSuite) TestNetworkLsFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge", "host", "none"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + config := `{ + "networksFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "network", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge default", "host default", "none default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be created again + out, _, err := dockerCmdWithError("network", "create", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { + dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + out, _ := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, "192.168.10.1:5000->5000/tcp") +} + +func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be removed + out, _, err := dockerCmdWithError("network", "rm", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { + testNet := "testnet1" + testLabel := "foo" + testValue := "bar" + out, _ := dockerCmd(c, "network", "create", "dev") + defer func() { + dockerCmd(c, "network", "rm", "dev") + dockerCmd(c, "network", "rm", testNet) + }() + networkID := strings.TrimSpace(out) + + // filter with partial ID + // only show 'dev' network + out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5]) + assertNwList(c, out, []string{"dev"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "name=dge") + assertNwList(c, out, []string{"bridge"}) + + // only show built-in network (bridge, none, host) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "host", "none"}) + + // only show custom networks (dev) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom") + assertNwList(c, out, []string{"dev"}) + + // show all networks with filter + // it should be equivalent of ls without option + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "dev", "host", "none"}) + + out, _ = dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel+"="+testValue) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label=nonexistent") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=null") + assertNwList(c, out, []string{"none"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=host") + assertNwList(c, out, []string{"host"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=bridge") + assertNwList(c, out, []string{"bridge", "dev", testNet}) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateDelete(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateLabel(c *check.C) { + testNet := "testnetcreatelabel" + testLabel := "foo" + testValue := "bar" + + dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _, err := dockerCmdWithError("network", "inspect", "--format={{ .Labels."+testLabel+" }}", testNet) + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) + + dockerCmd(c, "network", "rm", testNet) + assertNwNotAvailable(c, testNet) +} + +func (s *DockerSuite) TestDockerNetworkDeleteNotExists(c *check.C) { + out, _, err := dockerCmdWithError("network", "rm", "test") + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) +} + +func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { + dockerCmd(c, "network", "create", "testDelMulti0") + assertNwIsAvailable(c, "testDelMulti0") + dockerCmd(c, "network", "create", "testDelMulti1") + assertNwIsAvailable(c, "testDelMulti1") + dockerCmd(c, "network", "create", "testDelMulti2") + assertNwIsAvailable(c, "testDelMulti2") + out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + // delete three networks at the same time, since testDelMulti2 + // contains active container, its deletion should fail. + out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2") + // err should not be nil due to deleting testDelMulti2 failed. + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // testDelMulti2 should fail due to network has active endpoints + c.Assert(out, checker.Contains, "has active endpoints") + assertNwNotAvailable(c, "testDelMulti0") + assertNwNotAvailable(c, "testDelMulti1") + // testDelMulti2 can't be deleted, so it should exist + assertNwIsAvailable(c, "testDelMulti2") +} + +func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "host") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Name }}", "host") + c.Assert(strings.TrimSpace(out), check.Equals, "host") +} + +func (s *DockerSuite) TestDockerNetworkInspectWithID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "test2") + networkID := strings.TrimSpace(out) + assertNwIsAvailable(c, "test2") + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Id }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .ID }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) +} + +func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { + result := dockerCmdWithResult("network", "inspect", "host", "none") + c.Assert(result, icmd.Matches, icmd.Success) + + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 2) + + // Should print an error, return an exitCode 1 *but* should print the host network + result = dockerCmdWithResult("network", "inspect", "host", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources = []types.NetworkResource{} + err = json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(networkResources, checker.HasLen, 1) + + // Should print an error and return an exitCode, nothing else + result = dockerCmdWithResult("network", "inspect", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "[]", + }) +} + +func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { + dockerCmd(c, "network", "create", "brNetForInspect") + assertNwIsAvailable(c, "brNetForInspect") + defer func() { + dockerCmd(c, "network", "rm", "brNetForInspect") + assertNwNotAvailable(c, "brNetForInspect") + }() + + out, _ := dockerCmd(c, "run", "-d", "--name", "testNetInspect1", "--net", "brNetForInspect", "busybox", "top") + c.Assert(waitRun("testNetInspect1"), check.IsNil) + containerID := strings.TrimSpace(out) + defer func() { + // we don't stop container by name, because we'll rename it later + dockerCmd(c, "stop", containerID) + }() + + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + container, ok := networkResources[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container.Name, checker.Equals, "testNetInspect1") + + // rename container and check docker inspect output update + newName := "HappyNewName" + dockerCmd(c, "rename", "testNetInspect1", newName) + + // check whether network inspect works properly + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + newNetRes := []types.NetworkResource{} + err = json.Unmarshal([]byte(out), &newNetRes) + c.Assert(err, check.IsNil) + c.Assert(newNetRes, checker.HasLen, 1) + container1, ok := newNetRes[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container1.Name, checker.Equals, newName) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + nr := getNwResource(c, "test") + + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + containerID := strings.TrimSpace(out) + + // connect the container to the test network + dockerCmd(c, "network", "connect", "test", containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], check.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, check.IsNil) + containerIP := findContainerIP(c, "test", "test") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + dockerCmd(c, "network", "disconnect", "test", containerID) + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run another container + out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top") + c.Assert(waitRun("test2"), check.IsNil) + containerID = strings.TrimSpace(out) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 1) + + // force disconnect the container to the test network + dockerCmd(c, "network", "disconnect", "-f", "test", containerID) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { + // test0 bridge network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") + assertNwIsAvailable(c, "test1") + + // test2 bridge network does not overlap + dockerCmd(c, "network", "create", "--subnet=192.169.0.0/16", "test2") + assertNwIsAvailable(c, "test2") + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + dockerCmd(c, "network", "create", "test3") + assertNwIsAvailable(c, "test3") + dockerCmd(c, "network", "create", "test4") + assertNwIsAvailable(c, "test4") + dockerCmd(c, "network", "create", "test5") + assertNwIsAvailable(c, "test5") + + // test network with multiple subnets + // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") + assertNwIsAvailable(c, "test6") + + // test network with multiple subnets with valid ipam combinations + // also check same subnet across networks when the driver supports it. + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, + "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", + "--gateway=192.168.0.100", "--gateway=192.170.0.100", + "--ip-range=192.168.1.0/24", + "--aux-address", "a=192.168.1.5", "--aux-address", "b=192.168.1.6", + "--aux-address", "c=192.170.1.5", "--aux-address", "d=192.170.1.6", + "test7") + assertNwIsAvailable(c, "test7") + + // cleanup + for i := 1; i < 8; i++ { + dockerCmd(c, "network", "rm", fmt.Sprintf("test%d", i)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { + // Create a bridge network using custom ipam driver + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam fields are there + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.IPAM.Driver, checker.Equals, dummyIPAMDriver) + + // remove network and exercise remote ipam driver + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "br0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { + // Create a bridge network using custom ipam driver and options + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam options + nr := getNetworkResource(c, "br0") + opts := nr.IPAM.Options + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { + nr := getNetworkResource(c, "none") + c.Assert(nr.Driver, checker.Equals, "null") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "host") + c.Assert(nr.Driver, checker.Equals, "host") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "bridge") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) { + // if unspecified, network subnet will be selected from inside preferred pool + dockerCmd(c, "network", "create", "test01") + assertNwIsAvailable(c, "test01") + + nr := getNetworkResource(c, "test01") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) + + dockerCmd(c, "network", "rm", "test01") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--ipv6", "--subnet=fd80:24e2:f998:72d6::/64", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0") + assertNwIsAvailable(c, "br0") + + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, true) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 2) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Internal, checker.False) + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C) { + // network with ip-range out of subnet range + _, _, err := dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--ip-range=192.170.0.0/16", "test") + c.Assert(err, check.NotNil) + + // network with multiple gateways for a single subnet + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") + c.Assert(err, check.NotNil) + + // Multiple overlapping subnets in the same network must fail + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") + c.Assert(err, check.NotNil) + + // overlapping subnets across networks must fail + // create a valid test0 network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test0") + assertNwIsAvailable(c, "test0") + // create an overlapping test1 network + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1") + c.Assert(err, check.NotNil) + dockerCmd(c, "network", "rm", "test0") + assertNwNotAvailable(c, "test0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") + assertNwIsAvailable(c, "testopt") + gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] + c.Assert(gopts, checker.NotNil) + opts, ok := gopts.(map[string]interface{}) + c.Assert(ok, checker.Equals, true) + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") + dockerCmd(c, "network", "rm", "testopt") + assertNwNotAvailable(c, "testopt") + +} + +func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + var ( + npName = "tiborvass/test-docker-netplugin" + npTag = "latest" + npNameWithTag = npName + ":" + npTag + ) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npName) + c.Assert(out, checker.Contains, npTag) + c.Assert(out, checker.Contains, "true") + + dockerCmd(c, "network", "create", "-d", npNameWithTag, "v2net") + assertNwIsAvailable(c, "v2net") + dockerCmd(c, "network", "rm", "v2net") + assertNwNotAvailable(c, "v2net") + +} + +func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *check.C) { + testRequires(c, ExecSupport) + // On default bridge network built-in service discovery should not happen + hostsFile := "/etc/hosts" + bridgeName := "external-bridge" + bridgeIP := "192.169.255.254/24" + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = s.d.StartWithBusybox("--bridge", bridgeName) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + // run two containers and store first container's etc/hosts content + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + cid1 := strings.TrimSpace(out) + defer s.d.Cmd("stop", cid1) + + hosts, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("run", "-d", "--name", "container2", "busybox", "top") + c.Assert(err, check.IsNil) + cid2 := strings.TrimSpace(out) + + // verify first container's etc/hosts file has not changed after spawning the second named container + hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // stop container 2 and verify first container's etc/hosts has not changed + _, err = s.d.Cmd("stop", cid2) + c.Assert(err, check.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // but discovery is on when connecting to non default bridge network + network := "anotherbridge" + out, err = s.d.Cmd("network", "create", network) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer s.d.Cmd("network", "rm", network) + + out, err = s.d.Cmd("network", "connect", network, cid1) + c.Assert(err, check.IsNil, check.Commentf(out)) + + hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second network connection", hostsFile)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { + testRequires(c, ExecSupport, NotArm) + hostsFile := "/etc/hosts" + cstmBridgeNw := "custom-bridge-nw" + cstmBridgeNw1 := "custom-bridge-nw1" + + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw) + assertNwIsAvailable(c, cstmBridgeNw) + + // run two anonymous containers and store their etc/hosts content + out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid1 := strings.TrimSpace(out) + + hosts1, err := readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid2 := strings.TrimSpace(out) + + hosts2, err := readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + + // verify first container etc/hosts file has not changed + hosts1post, err := readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) + + // Connect the 2nd container to a new network and verify the + // first container /etc/hosts file still hasn't changed. + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw1) + assertNwIsAvailable(c, cstmBridgeNw1) + + dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) + + hosts2, err = readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + + hosts1post, err = readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on container connect", hostsFile)) + + // start a named container + cName := "AnyName" + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "--name", cName, "busybox", "top") + cid3 := strings.TrimSpace(out) + + // verify that container 1 and 2 can ping the named container + dockerCmd(c, "exec", cid1, "ping", "-c", "1", cName) + dockerCmd(c, "exec", cid2, "ping", "-c", "1", cName) + + // Stop named container and verify first two containers' etc/hosts file hasn't changed + dockerCmd(c, "stop", cid3) + hosts1post, err = readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + hosts2post, err := readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts2), checker.Equals, string(hosts2post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + // verify that container 1 and 2 can't ping the named container now + _, _, err = dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check.C) { + // Legacy Link feature must work only on default network, and not across networks + cnt1 := "container1" + cnt2 := "container2" + network := "anotherbridge" + + // Run first container on default network + dockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top") + + // Create another network and run the second container on it + dockerCmd(c, "network", "create", network) + assertNwIsAvailable(c, network) + dockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top") + + // Try launching a container on default network, linking to the first container. Must succeed + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top") + + // Try launching a container on default network, linking to the second container. Must fail + _, _, err := dockerCmdWithError("run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") + c.Assert(err, checker.NotNil) + + // Connect second container to default network. Now a container on default network can link to it + dockerCmd(c, "network", "connect", "bridge", cnt2) + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") +} + +func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { + // Verify exposed ports are present in ps output when running a container on + // a network managed by a driver which does not provide the default gateway + // for the container + nwn := "ov" + ctn := "bb" + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, expose1, expose2, "busybox", "top") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dnd := "dnd" + did := "did" + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + s.d.StartWithBusybox() + _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) + + // Kill daemon and restart + if err = s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + + server.Close() + + startTime := time.Now().Unix() + if err = s.d.Restart(); err != nil { + c.Fatal(err) + } + lapse := time.Now().Unix() - startTime + if lapse > 60 { + // In normal scenarios, daemon restart takes ~1 second. + // Plugin retry mechanism can delay the daemon start. systemd may not like it. + // Avoid accessing plugins during daemon bootup + c.Logf("daemon restart took too long : %d seconds", lapse) + } + + // Restart the custom dummy plugin + mux = http.NewServeMux() + server = httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + // trying to reuse the same ip must succeed + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { + // Verify endpoint MAC address is correctly populated in container's network settings + nwn := "ov" + ctn := "bb" + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, "busybox", "top") + + mac := inspectField(c, ctn, "NetworkSettings.Networks."+nwn+".MacAddress") + c.Assert(mac, checker.Equals, "a0:b1:c2:d3:e4:f5") +} + +func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "mybridge1") + dockerCmd(c, "network", "create", "mybridge2") + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "network", "connect", "mybridge1", id) + dockerCmd(c, "network", "connect", "mybridge2", id) + + body := getInspectBody(c, "v1.20", id) + var inspect120 v1p20.ContainerJSON + err := json.Unmarshal(body, &inspect120) + c.Assert(err, checker.IsNil) + + versionedIP := inspect120.NetworkSettings.IPAddress + + body = getInspectBody(c, "v1.21", id) + var inspect121 types.ContainerJSON + err = json.Unmarshal(body, &inspect121) + c.Assert(err, checker.IsNil) + c.Assert(inspect121.NetworkSettings.Networks, checker.HasLen, 3) + + bridge := inspect121.NetworkSettings.Networks["bridge"] + c.Assert(bridge.IPAddress, checker.Equals, versionedIP) + c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) +} + +func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { + // Run a container on the default network + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach the container to other networks + for _, nw := range nws { + out, err = d.Cmd("network", "create", nw) + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("network", "connect", nw, cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } +} + +func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { + // Verify container is connected to all the networks + for _, nw := range nws { + out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Equals), "\n") + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { + cName := "bb" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox() + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Reload daemon + s.d.Restart() + + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { + cName := "cc" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox() + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Kill daemon and restart + if err := s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + s.d.Restart() + + // Restart container + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "one") + containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(containerOut)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + s.d.StartWithBusybox() + + // Run a few containers on host network + for i := 0; i < 10; i++ { + cName := fmt.Sprintf("hostc-%d", i) + out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // verfiy container has finished starting before killing daemon + err = s.d.waitRun(cName) + c.Assert(err, checker.IsNil) + } + + // Kill daemon ungracefully and restart + if err := s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // make sure all the containers are up and running + for i := 0; i < 10; i++ { + err := s.d.waitRun(fmt.Sprintf("hostc-%d", i)) + c.Assert(err, checker.IsNil) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectToHostFromOtherNetwork(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + dockerCmd(c, "network", "disconnect", "bridge", "container1") + out, _, err := dockerCmdWithError("network", "connect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "--net=host", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + out, _, err := dockerCmdWithError("network", "disconnect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf("Should err out disconnect from host")) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *check.C) { + testRequires(c, NotArm) + dockerCmd(c, "network", "create", "test1") + dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + dockerCmd(c, "network", "connect", "test1", "c1") +} + +func verifyPortMap(c *check.C, container, port, originalMapping string, mustBeEqual bool) { + chk := checker.Equals + if !mustBeEqual { + chk = checker.Not(checker.Equals) + } + currentMapping, _ := dockerCmd(c, "port", container, port) + c.Assert(currentMapping, chk, originalMapping) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c *check.C) { + // Connect and disconnect a container with explicit and non-explicit + // host port mapping to/from networks which do cause and do not cause + // the container default gateway to change, and verify docker port cmd + // returns congruent information + testRequires(c, NotArm) + cnt := "c1" + dockerCmd(c, "network", "create", "aaa") + dockerCmd(c, "network", "create", "ccc") + + dockerCmd(c, "run", "-d", "--name", cnt, "-p", "9000:90", "-p", "70", "busybox", "top") + c.Assert(waitRun(cnt), check.IsNil) + curPortMap, _ := dockerCmd(c, "port", cnt, "70") + curExplPortMap, _ := dockerCmd(c, "port", cnt, "90") + + // Connect to a network which causes the container's default gw switch + dockerCmd(c, "network", "connect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Disconnect from a network which causes the container's default gw switch + dockerCmd(c, "network", "disconnect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Connect to a network which does not cause the container's default gw switch + dockerCmd(c, "network", "connect", "ccc", cnt) + verifyPortMap(c, cnt, "70", curPortMap, true) + verifyPortMap(c, cnt, "90", curExplPortMap, true) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) { + macAddress := "02:42:ac:11:00:02" + dockerCmd(c, "network", "create", "mynetwork") + dockerCmd(c, "run", "--name=test", "-d", "--mac-address", macAddress, "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + mac1 := inspectField(c, "test", "NetworkSettings.Networks.bridge.MacAddress") + c.Assert(strings.TrimSpace(mac1), checker.Equals, macAddress) + dockerCmd(c, "network", "connect", "mynetwork", "test") + mac2 := inspectField(c, "test", "NetworkSettings.Networks.mynetwork.MacAddress") + c.Assert(strings.TrimSpace(mac2), checker.Not(checker.Equals), strings.TrimSpace(mac1)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCreatedContainer(c *check.C) { + dockerCmd(c, "create", "--name", "test", "busybox") + networks := inspectField(c, "test", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + dockerCmd(c, "network", "connect", "test", "foo") + dockerCmd(c, "restart", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network")) + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "create", "--name=foo", "busybox", "top") + dockerCmd(c, "network", "connect", "test", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // start the container and test if we can ping it from another container in the same network + dockerCmd(c, "start", "foo") + c.Assert(waitRun("foo"), checker.IsNil) + ip := inspectField(c, "foo", "NetworkSettings.Networks.test.IPAddress") + ip = strings.TrimSpace(ip) + dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip)) + + dockerCmd(c, "stop", "foo") + + // Test disconnect + dockerCmd(c, "network", "disconnect", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectContainerNonexistingNetwork(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--net=test", "-d", "--name=foo", "busybox", "top") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Stop container and remove network + dockerCmd(c, "stop", "foo") + dockerCmd(c, "network", "rm", "test") + + // Test disconnecting stopped container from nonexisting network + dockerCmd(c, "network", "disconnect", "-f", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { + // create two networks + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "n0") + assertNwIsAvailable(c, "n0") + + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--ip-range=172.30.5.0/24", "--subnet=2001:db8:abcd::/64", "--ip-range=2001:db8:abcd::/80", "n1") + assertNwIsAvailable(c, "n1") + + // run a container on first network specifying the ip addresses + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + + // connect the container to the second network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Stop and restart the container + dockerCmd(c, "stop", "c0") + dockerCmd(c, "start", "c0") + + // verify requested addresses are applied and configs are still there + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Still it should fail to connect to the default network with a specified IP (whatever ip) + out, _, err := dockerCmdWithError("network", "connect", "--ip", "172.21.55.44", "bridge", "c0") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { + // create a container + dockerCmd(c, "create", "--name", "c0", "busybox", "top") + + // create a network + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") + assertNwIsAvailable(c, "n0") + + // connect the container to the network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // start the container, verify config has not changed and ip addresses are assigned + dockerCmd(c, "start", "c0") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // stop the container and check ip config has not changed + dockerCmd(c, "stop", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") +} + +func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedRequiredIP(c *check.C) { + // requested IP is not supported on predefined networks + for _, mode := range []string{"none", "host", "bridge", "default"} { + checkUnsupportedNetworkAndIP(c, mode) + } + + // requested IP is not supported on networks with no user defined subnets + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + out, _, err := dockerCmdWithError("run", "-d", "--ip", "172.28.99.88", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + out, _, err = dockerCmdWithError("run", "-d", "--ip6", "2001:db8:1234::9988", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + dockerCmd(c, "network", "rm", "n0") + assertNwNotAvailable(c, "n0") +} + +func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { + out, _, err := dockerCmdWithError("run", "-d", "--net", nwMode, "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) +} + +func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { + if ipv4 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + } + + if ipv6 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) + } +} + +func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAddress", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + + out = inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.GlobalIPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *check.C) { + // create one test network + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + // run a container with incorrect link-local address + _, _, err := dockerCmdWithError("run", "--link-local-ip", "169.253.5.5", "busybox", "top") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("run", "--link-local-ip", "2001:db8::89", "busybox", "top") + c.Assert(err, check.NotNil) + + // run two containers with link-local ip on the test network + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--link-local-ip", "169.254.7.7", "--link-local-ip", "fe80::254:77", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + dockerCmd(c, "run", "-d", "--name", "c1", "--net=n0", "--link-local-ip", "169.254.8.8", "--link-local-ip", "fe80::254:88", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + + // run a container on the default network and connect it to the test network specifying a link-local address + dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + c.Assert(waitRun("c2"), check.IsNil) + dockerCmd(c, "network", "connect", "--link-local-ip", "169.254.9.9", "n0", "c2") + + // verify the three containers can ping each other via the link-local addresses + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) + + // Stop and restart the three containers + dockerCmd(c, "stop", "c0") + dockerCmd(c, "stop", "c1") + dockerCmd(c, "stop", "c2") + dockerCmd(c, "start", "c0") + dockerCmd(c, "start", "c1") + dockerCmd(c, "start", "c2") + + // verify the ping again + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "foo1") + dockerCmd(c, "network", "create", "-d", "bridge", "foo2") + + dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in a user-defined network with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias FirstInFoo1 must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.IsNil) + + // connect first container to foo2 network + dockerCmd(c, "network", "connect", "foo2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) + + // disconnect first container from foo1 network + dockerCmd(c, "network", "disconnect", "foo1", "first") + + // link in foo1 network must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.NotNil) + + // link in foo2 network must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) { + netWorkName1 := "test1" + netWorkName2 := "test2" + containerName := "foo" + + dockerCmd(c, "network", "create", netWorkName1) + dockerCmd(c, "network", "create", netWorkName2) + dockerCmd(c, "create", "--name", containerName, "busybox", "top") + dockerCmd(c, "network", "connect", netWorkName1, containerName) + dockerCmd(c, "network", "connect", netWorkName2, containerName) + dockerCmd(c, "network", "disconnect", "bridge", containerName) + + dockerCmd(c, "start", containerName) + c.Assert(waitRun(containerName), checker.IsNil) + networks := inspectField(c, containerName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1))) + c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + containerID := strings.TrimSpace(out) + for _, net := range defaults { + res, _, err := dockerCmdWithError("network", "connect", "--alias", "alias"+net, net, containerID) + c.Assert(err, checker.NotNil) + c.Assert(res, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + dockerCmd(c, "network", "create", "-d", "bridge", "net2") + + cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping first container and its alias + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // connect first container to net2 network + dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "net2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + + // disconnect first container from net1 network + dockerCmd(c, "network", "disconnect", "net1", "first") + + // ping to net1 scoped alias "foo" must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.NotNil) + + // ping to net2 scoped alias "bar" must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + // ping to net2 scoped alias short-id must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // verify the alias option is rejected when running on predefined network + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + + // verify the alias option is rejected when connecting to predefined network + out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + c.Assert(waitRun("c1.net1"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + c.Assert(waitRun("c2.net1"), check.IsNil) + + // ping first container by its unqualified name + _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") + c.Assert(err, check.IsNil) + + // ping first container by its qualified name + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") + c.Assert(err, check.IsNil) + + // ping with first qualified name masked by an additional domain. should fail + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "nw1") + + // Sending garbage to embedded DNS shouldn't crash the daemon + dockerCmd(c, "run", "-i", "--net=nw1", "--name=c1", "debian:jessie", "bash", "-c", "echo InvalidQuery > /dev/udp/127.0.0.11/53") +} + +func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { + dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") + c.Assert(waitRun("bb"), check.IsNil) + + ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + + // A failing redundant network connect should not alter current container's endpoint settings + _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") + c.Assert(err, check.NotNil) + + ns1 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + c.Assert(ns1, check.Equals, ns0) +} + +func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--internal", "internal") + assertNwIsAvailable(c, "internal") + nr := getNetworkResource(c, "internal") + c.Assert(nr.Internal, checker.True) + + dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "ping: bad address") + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +// Test for #21401 +func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *check.C) { + dockerCmd(c, "network", "create", "test@#$") + assertNwIsAvailable(c, "test@#$") + dockerCmd(c, "network", "rm", "test@#$") + assertNwNotAvailable(c, "test@#$") + + dockerCmd(c, "network", "create", "kiwl$%^") + assertNwIsAvailable(c, "kiwl$%^") + dockerCmd(c, "network", "rm", "kiwl$%^") + assertNwNotAvailable(c, "kiwl$%^") +} + +func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox("--live-restore"); err != nil { + t.Fatal(err) + } + defer s.d.Stop() + oldCon := "old" + + _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top") + if err != nil { + t.Fatal(err) + } + oldContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", oldCon) + if err != nil { + t.Fatal(err) + } + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // restart the daemon + if err := s.d.Start("--live-restore"); err != nil { + t.Fatal(err) + } + + // start a new container, the new container's ip should not be the same with + // old running container. + newCon := "new" + _, err = s.d.Cmd("run", "-d", "--name", newCon, "busybox", "top") + if err != nil { + t.Fatal(err) + } + newContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", newCon) + if err != nil { + t.Fatal(err) + } + if strings.Compare(strings.TrimSpace(oldContainerIP), strings.TrimSpace(newContainerIP)) == 0 { + t.Fatalf("new container ip should not equal to old running container ip") + } + + // start a new container, the new container should ping old running container + _, err = s.d.Cmd("run", "-t", "busybox", "ping", "-c", "1", oldContainerIP) + if err != nil { + t.Fatal(err) + } + + // start a new container, trying to publish port 80:80 should fail + out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") { + t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container") + } + + // kill old running container and try to allocate again + _, err = s.d.Cmd("kill", oldCon) + if err != nil { + t.Fatal(err) + } + id, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err != nil { + t.Fatal(err) + } + + // Cleanup because these containers will not be shut down by daemon + out, err = s.d.Cmd("stop", newCon) + if err != nil { + t.Fatalf("err: %v %v", err, string(out)) + } + _, err = s.d.Cmd("stop", strings.TrimSpace(id)) + if err != nil { + t.Fatal(err) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkFlagAlias(c *check.C) { + dockerCmd(c, "network", "create", "user") + output, status := dockerCmd(c, "run", "--rm", "--network=user", "--network-alias=foo", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--net=user", "--network=user", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--network=user", "--net-alias=foo", "--network-alias=bar", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkValidateIP(c *check.C) { + _, _, err := dockerCmdWithError("network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "mynet") + c.Assert(err, check.IsNil) + assertNwIsAvailable(c, "mynet") + + _, _, err = dockerCmdWithError("run", "-d", "--name", "mynet0", "--net=mynet", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, check.IsNil) + c.Assert(waitRun("mynet0"), check.IsNil) + verifyIPAddressConfig(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "mynet_ip", "--ip6", "2001:db8:1234::9999", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv4 address") + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "172.28.99.99", "--ip6", "mynet_ip6", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a case of IPv4 address to `--ip6` + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a special case of an IPv4-mapped IPv6 address + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "::ffff:172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") +} + +// Test case for 26220 +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "--format", "{{.Id}}", "bridge") + + network := strings.TrimSpace(out) + + name := "test" + dockerCmd(c, "create", "--name", name, "busybox", "top") + + _, _, err := dockerCmdWithError("network", "disconnect", network, name) + c.Assert(err, check.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go new file mode 100644 index 0000000..bcf59f8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + + c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "true") +} + +func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "false") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go new file mode 100644 index 0000000..9217a69 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go @@ -0,0 +1,66 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPause(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + name := "testeventpause" + runSleepingContainer(c, "-d", "--name", name) + + dockerCmd(c, "pause", name) + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil) + c.Assert(len(pausedContainers), checker.Equals, 1) + + dockerCmd(c, "unpause", name) + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") +} + +func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + containers := []string{ + "testpausewithmorecontainers1", + "testpausewithmorecontainers2", + } + for _, name := range containers { + runSleepingContainer(c, "-d", "--name", name) + } + dockerCmd(c, append([]string{"pause"}, containers...)...) + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil) + c.Assert(len(pausedContainers), checker.Equals, len(containers)) + + dockerCmd(c, append([]string{"unpause"}, containers...)...) + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + + for _, name := range containers { + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") + } +} + +func (s *DockerSuite) TestPauseFailsOnWindowsServerContainers(c *check.C) { + testRequires(c, DaemonIsWindows, NotPausable) + runSleepingContainer(c, "-d", "--name=test") + out, _, _ := dockerCmdWithError("pause", "test") + c.Assert(out, checker.Contains, "cannot pause Windows Server Containers") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go new file mode 100644 index 0000000..380357d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go @@ -0,0 +1,393 @@ +package main + +import ( + "fmt" + "os/exec" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +var ( + pluginProcessName = "sample-volume-plugin" + pName = "tiborvass/sample-volume-plugin" + npName = "tiborvass/test-docker-netplugin" + pTag = "latest" + pNameWithTag = pName + ":" + pTag + npNameWithTag = npName + ":" + pTag +) + +func (s *DockerSuite) TestPluginBasicOps(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id)) + if !os.IsNotExist(err) { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestPluginForceRemove(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + out, _, err = dockerCmdWithError("plugin", "remove", "--force", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActive(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("volume", "create", "-d", pNameWithTag, "--name", "testvol1") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(out, checker.Contains, "in use") + + _, _, err = dockerCmdWithError("volume", "rm", "testvol1") + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActiveNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("network", "create", "-d", npNameWithTag, "test") + c.Assert(err, checker.IsNil) + + nID := strings.TrimSpace(out) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is in use") + + _, _, err = dockerCmdWithError("network", "rm", nID) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npNameWithTag) +} + +func (s *DockerSuite) TestPluginInstallDisable(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "false") + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) +} + +func (s *DockerSuite) TestPluginInstallDisableVolumeLs(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + dockerCmd(c, "volume", "ls") +} + +func (s *DockerSuite) TestPluginSet(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + + dockerCmd(c, "plugin", "set", pName, "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName, "DEBUG=1") + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) + + out, _, err := dockerCmdWithError("plugin", "install", repoName) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "target is image") +} + +func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already disabled") + + _, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSuite) TestPluginCreate(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + name := "foo/bar-driver" + temp, err := ioutil.TempDir("", "foo") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(temp) + + data := `{"description": "foo plugin"}` + err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644) + c.Assert(err, checker.IsNil) + + err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "already exist") + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + // The output will consists of one HEADER line and one line of foo/bar-driver + c.Assert(len(strings.Split(strings.TrimSpace(out), "\n")), checker.Equals, 2) +} + +func (s *DockerSuite) TestPluginInspect(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + // Find the ID first + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + c.Assert(id, checker.Not(checker.Equals), "") + + // Long form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Short form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name with tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name without tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + // After remove nothing should be found + _, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.NotNil) +} + +// Test case for https://github.com/docker/docker/pull/29186#discussion_r91277345 +func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) { + // This test should work on Windows only + testRequires(c, DaemonIsWindows) + + out, _, err := dockerCmdWithError("plugin", "inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "plugins are not supported on this platform") + c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform") +} + +func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") + + installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", trustedName) + s.trustedCmd(installCmd) + out, _, err := runCommandWithOutput(installCmd) + + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "true") + + out, _, err = dockerCmdWithError("plugin", "disable", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "enable", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "rm", "-f", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + // Try untrusted pull to ensure we pushed the tag to the registry + installCmd = exec.Command(dockerBinary, "plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName) + s.trustedCmd(installCmd) + out, _, err = runCommandWithOutput(installCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "true") + +} + +func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) + // install locally and push to private registry + dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) + dockerCmd(c, "plugin", "push", pluginName) + dockerCmd(c, "plugin", "rm", "-f", pluginName) + + // Try trusted install on untrusted plugin + installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", pluginName) + s.trustedCmd(installCmd) + out, _, err := runCommandWithOutput(installCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +func (s *DockerSuite) TestPluginUpgrade(c *check.C) { + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + plugin := "cpuguy83/docker-volume-driver-plugin-local:latest" + pluginV2 := "cpuguy83/docker-volume-driver-plugin-local:v2" + + dockerCmd(c, "plugin", "install", "--grant-all-permissions", plugin) + dockerCmd(c, "volume", "create", "--driver", plugin, "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "touch /apple/core") + + out, _, err := dockerCmdWithError("plugin", "upgrade", "--grant-all-permissions", plugin, pluginV2) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "disabled before upgrading") + + out, _ = dockerCmd(c, "plugin", "inspect", "--format={{.ID}}", plugin) + id := strings.TrimSpace(out) + + // make sure "v2" does not exists + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf(out)) + + dockerCmd(c, "plugin", "disable", "-f", plugin) + dockerCmd(c, "plugin", "upgrade", "--grant-all-permissions", "--skip-remote-check", plugin, pluginV2) + + // make sure "v2" file exists + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "plugin", "enable", plugin) + dockerCmd(c, "volume", "inspect", "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "ls -lh /apple/core") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go new file mode 100644 index 0000000..80b00fe --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "net" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPortList(c *check.C) { + testRequires(c, DaemonIsLinux) + // one port + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", firstID) + + err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", firstID) + + // three port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", ID) + + // more and one port mapped to the same container port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + testRange := func() { + // host port ranges used + IDs := make([]string, 3) + for i := 0; i < 3; i++ { + out, _ = dockerCmd(c, "run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + IDs[i] = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", IDs[i]) + + err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) + // Port list is not correct + c.Assert(err, checker.IsNil) + } + + // test port range exhaustion + out, _, err = dockerCmdWithError("run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + // Exhausted port range did not return an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + for i := 0; i < 3; i++ { + dockerCmd(c, "rm", "-f", IDs[i]) + } + } + testRange() + // Verify we ran re-use port ranges after they are no longer in use. + testRange() + + // test invalid port ranges + for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { + out, _, err = dockerCmdWithError("run", "-d", + "-p", invalidRange, + "busybox", "top") + // Port range should have returned an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + } + + // test host range:container range spec. + out, _ = dockerCmd(c, "run", "-d", + "-p", "9800-9803:80-83", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9800", + "81/tcp -> 0.0.0.0:9801", + "82/tcp -> 0.0.0.0:9802", + "83/tcp -> 0.0.0.0:9803"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + // test mixing protocols in same port range + out, _ = dockerCmd(c, "run", "-d", + "-p", "8000-8080:80", + "-p", "8000-8080:80/udp", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:8000", + "80/udp -> 0.0.0.0:8000"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) +} + +func assertPortList(c *check.C, out string, expected []string) error { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") + } + } + + return nil +} + +func stopRemoveContainer(id string, c *check.C) { + dockerCmd(c, "rm", "-f", id) +} + +func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { + testRequires(c, DaemonIsLinux) + // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) + + // Run the container forcing to publish the exposed ports + dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the exposed ports in the port bindings + expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) + expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output + c.Assert(expBndRegx1.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort1: %s", out, unpPort1)) + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output + c.Assert(expBndRegx2.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort2: %s", out, unpPort2)) + + // Run the container specifying explicit port bindings for the exposed ports + offset := 10000 + pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) + pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") + id := strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) + expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with explicit port bindings and no exposed ports + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") + id = strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with one unpublished exposed port and one explicit port binding + dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the specified unpublished port and port mapping + out, _ = dockerCmd(c, "ps", "-n=1") + // Missing unpublished exposed ports (unpPort1) in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) +} + +func (s *DockerSuite) TestPortHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + _, exposedPort, err := net.SplitHostPort(out) + c.Assert(err, checker.IsNil, check.Commentf("out: %s", out)) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") + nr := getNetworkResource(c, "internal-net") + c.Assert(nr.Internal, checker.Equals, true) + + dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", + "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") + c.Assert(waitRun("c1"), check.IsNil) + + _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.NotNil, + check.Commentf("Port mapping on internal network is expected to fail")) + + // Connect container to another normal bridge network + dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") + dockerCmd(c, "network", "connect", "foo-net", "c1") + + _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.IsNil, + check.Commentf("Port mapping on the new network is expected to succeed")) + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go new file mode 100644 index 0000000..1cf569b --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "net" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCLIProxyDisableProxyUnixSock(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. + + cmd := exec.Command(dockerBinary, "info") + cmd.Env = appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999") + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + +} + +// Can't use localhost here since go has a special case to not use proxy if connecting to localhost +// See https://golang.org/pkg/net/http/#ProxyFromEnvironment +func (s *DockerDaemonSuite) TestCLIProxyProxyTCPSock(c *check.C) { + testRequires(c, SameHostDaemon) + // get the IP to use to connect since we can't use localhost + addrs, err := net.InterfaceAddrs() + c.Assert(err, checker.IsNil) + var ip string + for _, addr := range addrs { + sAddr := addr.String() + if !strings.Contains(sAddr, "127.0.0.1") { + addrArr := strings.Split(sAddr, "/") + ip = addrArr[0] + break + } + } + + c.Assert(ip, checker.Not(checker.Equals), "") + + err = s.d.Start("-H", "tcp://"+ip+":2375") + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "info") + cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + // Test with no_proxy + cmd.Env = append(cmd.Env, "NO_PROXY="+ip) + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "info")) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go new file mode 100644 index 0000000..dabbc72 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func pruneNetworkAndVerify(c *check.C, d *SwarmDaemon, kept, pruned []string) { + _, err := d.Cmd("network", "prune", "--force") + c.Assert(err, checker.IsNil) + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + for _, s := range kept { + c.Assert(out, checker.Contains, s) + } + for _, s := range pruned { + c.Assert(out, checker.Not(checker.Contains), s) + } +} + +func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + _, err := d.Cmd("network", "create", "n1") // used by container (testprune) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n2") + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n3", "--driver", "overlay") // used by service (testprunesvc) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n4", "--driver", "overlay") + c.Assert(err, checker.IsNil) + + cName := "testprune" + _, err = d.Cmd("run", "-d", "--name", cName, "--net", "n1", "busybox", "top") + c.Assert(err, checker.IsNil) + + serviceName := "testprunesvc" + replicas := 1 + out, err := d.Cmd("service", "create", "--name", serviceName, + "--replicas", strconv.Itoa(replicas), + "--network", "n3", + "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, replicas+1) + + // prune and verify + pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) + + // remove containers, then prune and verify again + _, err = d.Cmd("rm", "-f", cName) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) +} + +func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + + out, _, err := s.d.buildImageWithOut("test", + `FROM busybox + LABEL foo=bar`, true, "-q") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force", "--all") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go new file mode 100644 index 0000000..19ede90 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go @@ -0,0 +1,952 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPsListContainersBase(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + firstID := strings.TrimSpace(out) + + out, _ = runSleepingContainer(c, "-d") + secondID := strings.TrimSpace(out) + + // not long running + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + thirdID := strings.TrimSpace(out) + + out, _ = runSleepingContainer(c, "-d") + fourthID := strings.TrimSpace(out) + + // make sure the second is running + c.Assert(waitRun(secondID), checker.IsNil) + + // make sure third one is not running + dockerCmd(c, "wait", thirdID) + + // make sure the forth is running + c.Assert(waitRun(fourthID), checker.IsNil) + + // all + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) + + // running + out, _ = dockerCmd(c, "ps") + c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) + + // limit + out, _ = dockerCmd(c, "ps", "-n=2", "-a") + expected := []string{fourthID, thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") + expected = []string{fourthID, thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) + expected = []string{fourthID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) + expected = []string{fourthID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter before + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & before + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) + expected = []string{secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") + expected = []string{fourthID, thirdID} + + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since & filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +// FIXME(vdemeester) Move this into a unit test in daemon package +func (s *DockerSuite) TestPsListContainersInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("ps", "-f", "invalidFilter=test") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestPsListContainersSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "busybox") + + baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") + baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") + baseSizeIndex := strings.Index(baseLines[0], "SIZE") + baseFoundsize := baseLines[1][baseSizeIndex:] + baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) + c.Assert(err, checker.IsNil) + + name := "test_size" + dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + id, err := getIDByName(name) + c.Assert(err, checker.IsNil) + + runCmd := exec.Command(dockerBinary, "ps", "-s", "-n=1") + var out string + + wait := make(chan struct{}) + go func() { + out, _, err = runCommandWithOutput(runCmd) + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + c.Fatalf("Calling \"docker ps -s\" timed out!") + } + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) + expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) + foundSize := lines[1][sizeIndex:] + c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) +} + +func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + // start exited container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", firstID) + + // start running container + out, _ = dockerCmd(c, "run", "-itd", "busybox") + secondID := strings.TrimSpace(out) + + // filter containers by exited + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID) + + out, _ = dockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, secondID) + + result := dockerCmdWithTimeout(time.Second*60, "ps", "-a", "-q", "--filter=status=rubbish") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Unrecognised filter value for status", + }) + + // Windows doesn't support pausing of containers + if daemonPlatform != "windows" { + // pause running container + out, _ = dockerCmd(c, "run", "-itd", "busybox") + pausedID := strings.TrimSpace(out) + dockerCmd(c, "pause", pausedID) + // make sure the container is unpaused to let the daemon stop it properly + defer func() { dockerCmd(c, "unpause", pausedID) }() + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, pausedID) + } +} + +func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { + // Test legacy no health check + out, _ := runSleepingContainer(c, "--name=none_legacy") + containerID := strings.TrimSpace(out) + + waitForContainer(containerID) + + out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for legacy none filter, output: %q", containerID, containerOut, out)) + + // Test no health check specified explicitly + out, _ = runSleepingContainer(c, "--name=none", "--no-healthcheck") + containerID = strings.TrimSpace(out) + + waitForContainer(containerID) + + out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for none filter, output: %q", containerID, containerOut, out)) + + // Test failing health check + out, _ = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "failing_container", "starting", "unhealthy") + + out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for unhealthy filter, output: %q", containerID, containerOut, out)) + + // Check passing healthcheck + out, _ = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "passing_container", "starting", "healthy") + + out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { + // start container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + runSleepingContainer(c) + + // filter containers by id + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { + // start container + dockerCmd(c, "run", "--name=a_name_to_match", "busybox") + id, err := getIDByName("a_name_to_match") + c.Assert(err, check.IsNil) + + // start another container + runSleepingContainer(c, "--name=b_name_to_match") + + // filter containers by name + out, _ := dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", id[:12], containerOut, out)) +} + +// Test for the ancestor filter for ps. +// There is also the same test but with image:tag@digest in docker_cli_by_digest_test.go +// +// What the test setups : +// - Create 2 image based on busybox using the same repository but different tags +// - Create an image based on the previous image (images_ps_filter_test2) +// - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) +// - Filter them out :P +func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { + // Build images + imageName1 := "images_ps_filter_test1" + imageID1, err := buildImage(imageName1, + `FROM busybox + LABEL match me 1`, true) + c.Assert(err, checker.IsNil) + + imageName1Tagged := "images_ps_filter_test1:tag" + imageID1Tagged, err := buildImage(imageName1Tagged, + `FROM busybox + LABEL match me 1 tagged`, true) + c.Assert(err, checker.IsNil) + + imageName2 := "images_ps_filter_test2" + imageID2, err := buildImage(imageName2, + fmt.Sprintf(`FROM %s + LABEL match me 2`, imageName1), true) + c.Assert(err, checker.IsNil) + + // start containers + dockerCmd(c, "run", "--name=first", "busybox", "echo", "hello") + firstID, err := getIDByName("first") + c.Assert(err, check.IsNil) + + // start another container + dockerCmd(c, "run", "--name=second", "busybox", "echo", "hello") + secondID, err := getIDByName("second") + c.Assert(err, check.IsNil) + + // start third container + dockerCmd(c, "run", "--name=third", imageName1, "echo", "hello") + thirdID, err := getIDByName("third") + c.Assert(err, check.IsNil) + + // start fourth container + dockerCmd(c, "run", "--name=fourth", imageName1Tagged, "echo", "hello") + fourthID, err := getIDByName("fourth") + c.Assert(err, check.IsNil) + + // start fifth container + dockerCmd(c, "run", "--name=fifth", imageName2, "echo", "hello") + fifthID, err := getIDByName("fifth") + c.Assert(err, check.IsNil) + + var filterTestSuite = []struct { + filterName string + expectedIDs []string + }{ + // non existent stuff + {"nonexistent", []string{}}, + {"nonexistent:tag", []string{}}, + // image + {"busybox", []string{firstID, secondID, thirdID, fourthID, fifthID}}, + {imageName1, []string{thirdID, fifthID}}, + {imageName2, []string{fifthID}}, + // image:tag + {fmt.Sprintf("%s:latest", imageName1), []string{thirdID, fifthID}}, + {imageName1Tagged, []string{fourthID}}, + // short-id + {stringid.TruncateID(imageID1), []string{thirdID, fifthID}}, + {stringid.TruncateID(imageID2), []string{fifthID}}, + // full-id + {imageID1, []string{thirdID, fifthID}}, + {imageID1Tagged, []string{fourthID}}, + {imageID2, []string{fifthID}}, + } + + var out string + for _, filter := range filterTestSuite { + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) + checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) + } + + // Multiple ancestor filter + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) + checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) +} + +func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { + actualIDs := []string{} + if out != "" { + actualIDs = strings.Split(out[:len(out)-1], "\n") + } + sort.Strings(actualIDs) + sort.Strings(expectedIDs) + + c.Assert(actualIDs, checker.HasLen, len(expectedIDs), check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v:%v, got %v:%v", filterName, len(expectedIDs), expectedIDs, len(actualIDs), actualIDs)) + if len(expectedIDs) > 0 { + same := true + for i := range expectedIDs { + if actualIDs[i] != expectedIDs[i] { + c.Logf("%s, %s", actualIDs[i], expectedIDs[i]) + same = false + break + } + } + c.Assert(same, checker.Equals, true, check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v, got %v", filterName, expectedIDs, actualIDs)) + } +} + +func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { + // start container + dockerCmd(c, "run", "--name=first", "-l", "match=me", "-l", "second=tag", "busybox") + firstID, err := getIDByName("first") + c.Assert(err, check.IsNil) + + // start another container + dockerCmd(c, "run", "--name=second", "-l", "match=me too", "busybox") + secondID, err := getIDByName("second") + c.Assert(err, check.IsNil) + + // start third container + dockerCmd(c, "run", "--name=third", "-l", "nomatch=me", "busybox") + thirdID, err := getIDByName("third") + c.Assert(err, check.IsNil) + + // filter containers by exact match + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels, but expect not found because of AND behavior + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, "", check.Commentf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)) + + // filter containers by exact key + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Contains, firstID) + c.Assert(containerOut, checker.Contains, secondID) + c.Assert(containerOut, checker.Not(checker.Contains), thirdID) +} + +func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { + runSleepingContainer(c, "--name=sleep") + + dockerCmd(c, "run", "--name", "zero1", "busybox", "true") + firstZero, err := getIDByName("zero1") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--name", "zero2", "busybox", "true") + secondZero, err := getIDByName("zero2") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + + firstNonZero, err := getIDByName("nonzero1") + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + secondNonZero, err := getIDByName("nonzero2") + c.Assert(err, checker.IsNil) + + // filter containers by exited=0 + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + ids := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out)) + c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1])) + + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + ids = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids))) + c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1])) + +} + +func (s *DockerSuite) TestPsRightTagName(c *check.C) { + // TODO Investigate further why this fails on Windows to Windows CI + testRequires(c, DaemonIsLinux) + tag := "asybox:shmatest" + dockerCmd(c, "tag", "busybox", tag) + + var id1 string + out, _ := runSleepingContainer(c) + id1 = strings.TrimSpace(string(out)) + + var id2 string + out, _ = runSleepingContainerInImage(c, tag) + id2 = strings.TrimSpace(string(out)) + + var imageID string + out = inspectField(c, "busybox", "Id") + imageID = strings.TrimSpace(string(out)) + + var id3 string + out, _ = runSleepingContainerInImage(c, imageID) + id3 = strings.TrimSpace(string(out)) + + out, _ = dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // skip header + lines = lines[1:] + c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) + for _, line := range lines { + f := strings.Fields(line) + switch f[0] { + case id1: + c.Assert(f[1], checker.Equals, "busybox", check.Commentf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])) + case id2: + c.Assert(f[1], checker.Equals, tag, check.Commentf("Expected %s tag for id %s, got %s", tag, id2, f[1])) + case id3: + c.Assert(f[1], checker.Equals, imageID, check.Commentf("Expected %s imageID for id %s, got %s", tag, id3, f[1])) + default: + c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) + } + } +} + +func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { + // Problematic on Windows as it doesn't support links as of Jan 2016 + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "--name=first") + runSleepingContainer(c, "--name=second", "--link=first:first") + + out, _ := dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // strip header + lines = lines[1:] + expected := []string{"second", "first,second/first"} + var names []string + for _, l := range lines { + fields := strings.Fields(l) + names = append(names, fields[len(fields)-1]) + } + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { + // Problematic on Windows as it doesn't support port ranges as of Jan 2016 + testRequires(c, DaemonIsLinux) + portRange := "3850-3900" + dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") + + out, _ := dockerCmd(c, "ps") + + c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) + +} + +func (s *DockerSuite) TestPsWithSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--size") + c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) +} + +func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + shortCID := cID[:12] + + // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), shortCID, check.Commentf("Should have not seen '%s' in ps output:\n%s", shortCID, out)) + + // Make sure it DOES show up as 'Created' for 'ps -a' + out, _ = dockerCmd(c, "ps", "-a") + + hits := 0 + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, shortCID) { + continue + } + hits++ + c.Assert(line, checker.Contains, "Created", check.Commentf("Missing 'Created' on '%s'", line)) + } + + c.Assert(hits, checker.Equals, 1, check.Commentf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out)) + + // filter containers by 'create' - note, no -a needed + out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") + containerOut := strings.TrimSpace(out) + c.Assert(cID, checker.HasPrefix, containerOut) +} + +func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { + // Problematic on Windows as it doesn't support link as of Jan 2016 + testRequires(c, DaemonIsLinux) + //create 2 containers and link them + dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") + dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") + + //use the new format capabilities to only list the names and --no-trunc to get all names + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"parent", "child,parent/linkedone"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) + + //now list without turning off truncation and make sure we only get the non-link names + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + expected = []string{"parent", "child"} + var truncNames []string + truncNames = append(truncNames, lines...) + c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) +} + +// Test for GitHub issue #21772 +func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { + runSleepingContainer(c, "--name=test1") + runSleepingContainer(c, "--name=test2") + + //use the new format capabilities to list the names twice + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"test2 test2", "test1 test1"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { + // make sure no-container "docker ps" still prints the header row + out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") + c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) + + // verify that "docker ps" with a container still prints the header row also + runSleepingContainer(c, "--name=test") + out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") + c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) +} + +func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { + config := `{ + "psFormat": "default {{ .ID }}" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := runSleepingContainer(c, "--name=test") + id := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "--config", d, "ps", "-q") + c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) +} + +// Test for GitHub issue #12595 +func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { + // TODO: Investigate why this fails on Windows to Windows CI further. + testRequires(c, DaemonIsLinux) + originalImageName := "busybox:TestPsImageIDAfterUpdate-original" + updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" + + runCmd := exec.Command(dockerBinary, "tag", "busybox:latest", originalImageName) + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + originalImageID, err := getIDByName(originalImageName) + c.Assert(err, checker.IsNil) + + runCmd = exec.Command(dockerBinary, append([]string{"run", "-d", originalImageName}, sleepCommandForDaemonPlatform()...)...) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + linesOut, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() + c.Assert(err, checker.IsNil) + + lines := strings.Split(strings.TrimSpace(string(linesOut)), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageName) + } + + runCmd = exec.Command(dockerBinary, "commit", containerID, updatedImageName) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + runCmd = exec.Command(dockerBinary, "tag", updatedImageName, originalImageName) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + linesOut, err = exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() + c.Assert(err, checker.IsNil) + + lines = strings.Split(strings.TrimSpace(string(linesOut)), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageID) + } + +} + +func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + out, _ := dockerCmd(c, "ps") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := "0.0.0.0:5000->5000/tcp" + fields := strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) + + dockerCmd(c, "kill", "foo") + dockerCmd(c, "wait", "foo") + out, _ = dockerCmd(c, "ps", "-l") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + fields = strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) +} + +func (s *DockerSuite) TestPsShowMounts(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + mp := prefix + slash + "test" + + dockerCmd(c, "volume", "create", "ps-volume-test") + // volume mount containers + runSleepingContainer(c, "--name=volume-test-1", "--volume", "ps-volume-test:"+mp) + c.Assert(waitRun("volume-test-1"), checker.IsNil) + runSleepingContainer(c, "--name=volume-test-2", "--volume", mp) + c.Assert(waitRun("volume-test-2"), checker.IsNil) + // bind mount container + var bindMountSource string + var bindMountDestination string + if DaemonIsWindows.Condition() { + bindMountSource = "c:\\" + bindMountDestination = "c:\\t" + } else { + bindMountSource = "/tmp" + bindMountDestination = "/t" + } + runSleepingContainer(c, "--name=bind-mount-test", "-v", bindMountSource+":"+bindMountDestination) + c.Assert(waitRun("bind-mount-test"), checker.IsNil) + + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 3) + + fields := strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + fields = strings.Fields(lines[1]) + c.Assert(fields, checker.HasLen, 2) + + annonymounsVolumeID := fields[1] + + fields = strings.Fields(lines[2]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by volume name + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // empty results filtering by unknown volume + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=this-volume-should-not-exist") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) + + // filter by mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 2) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, annonymounsVolumeID) + fields = strings.Fields(lines[1]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by bind mount source + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // filter by bind mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // empty results filtering by unknown mount point + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+prefix+slash+"this-path-was-never-mounted") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) +} + +func (s *DockerSuite) TestPsFormatSize(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c) + + out, _ := dockerCmd(c, "ps", "--format", "table {{.Size}}") + lines := strings.Split(out, "\n") + c.Assert(lines[1], checker.Not(checker.Equals), "0 B", check.Commentf("Should not display a size of 0 B")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "table {{.Size}}") + lines = strings.Split(out, "\n") + c.Assert(lines[0], checker.Equals, "SIZE", check.Commentf("Should only have one size column")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "raw") + lines = strings.Split(out, "\n") + c.Assert(lines[8], checker.HasPrefix, "size:", check.Commentf("Size should be appended on a newline")) +} + +func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { + // TODO default network on Windows is not called "bridge", and creating a + // custom network fails on Windows fails with "Error response from daemon: plugin not found") + testRequires(c, DaemonIsLinux) + + // create some containers + runSleepingContainer(c, "--net=bridge", "--name=onbridgenetwork") + runSleepingContainer(c, "--net=none", "--name=onnonenetwork") + + // Filter docker ps on non existing network + out, _ := dockerCmd(c, "ps", "--filter", "network=doesnotexist") + containerOut := strings.TrimSpace(string(out)) + lines := strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have no containers + c.Assert(lines, checker.HasLen, 0) + + // Filter docker ps on network bridge + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(lines, checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + + // Filter docker ps on networks bridge and none + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge", "--filter", "network=none") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + //ps output should have both the containers + c.Assert(lines, checker.HasLen, 2) + + // Making sure onbridgenetwork and onnonenetwork is on the output + c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on bridge network\n")) + + nwID, _ := dockerCmd(c, "network", "inspect", "--format", "{{.ID}}", "bridge") + + // Filter by network ID + out, _ = dockerCmd(c, "ps", "--filter", "network="+nwID) + containerOut = strings.TrimSpace(string(out)) + + c.Assert(containerOut, checker.Contains, "onbridgenetwork") +} + +func (s *DockerSuite) TestPsByOrder(c *check.C) { + name1 := "xyz-abc" + out, err := runSleepingContainer(c, "--name", name1) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + container1 := strings.TrimSpace(out) + + name2 := "xyz-123" + out, err = runSleepingContainer(c, "--name", name2) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + container2 := strings.TrimSpace(out) + + name3 := "789-abc" + out, err = runSleepingContainer(c, "--name", name3) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + name4 := "789-123" + out, err = runSleepingContainer(c, "--name", name4) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // Run multiple time should have the same result + out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) + + // Run multiple time should have the same result + out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) +} + +func (s *DockerSuite) TestPsFilterMissingArgErrorCode(c *check.C) { + _, errCode, _ := dockerCmdWithError("ps", "--filter") + c.Assert(errCode, checker.Equals, 125) +} + +// Test case for 30291 +func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar") + out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`) + c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go new file mode 100644 index 0000000..cb14c2c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go @@ -0,0 +1,492 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other +// tags for the same image) are not also pulled down. +// +// Ref: docker/docker#8141 +func testPullImageWithAliases(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh"} { + repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) + } + + // Tag and push the same image multiple times. + for _, repo := range repos { + dockerCmd(c, "tag", "busybox", repo) + dockerCmd(c, "push", repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Pull a single tag and verify it doesn't bring down all aliases. + dockerCmd(c, "pull", repos[0]) + dockerCmd(c, "inspect", repos[0]) + for _, repo := range repos[1:] { + _, _, err := dockerCmdWithError("inspect", repo) + c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo)) + } +} + +func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +// testConcurrentPullWholeRepo pulls the same repo concurrently. +func testConcurrentPullWholeRepo(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo), true) + c.Assert(err, checker.IsNil) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Run multiple re-pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", "-a", repoName)) + results <- err + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +// testConcurrentFailingPull tries a concurrent pull that doesn't succeed. +func testConcurrentFailingPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + // Run multiple pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repoName+":asdfasdf")) + results <- err + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail")) + } +} + +func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +// testConcurrentPullMultipleTags pulls multiple tags from the same repo +// concurrently. +func testConcurrentPullMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo), true) + c.Assert(err, checker.IsNil) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull individual tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repo)) + results <- err + }(repo) + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +// testPullIDStability verifies that pushing an image and pulling it back +// preserves the image ID. +func testPullIDStability(c *check.C) { + derivedImage := privateRegistryURL + "/dockercli/id-stability" + baseImage := "busybox" + + _, err := buildImage(derivedImage, fmt.Sprintf(` + FROM %s + ENV derived true + ENV asdf true + RUN dd if=/dev/zero of=/file bs=1024 count=1024 + CMD echo %s + `, baseImage, derivedImage), true) + if err != nil { + c.Fatal(err) + } + + originalID, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + dockerCmd(c, "push", derivedImage) + + // Pull + out, _ := dockerCmd(c, "pull", derivedImage) + if strings.Contains(out, "Pull complete") { + c.Fatalf("repull redownloaded a layer: %s", out) + } + + derivedIDAfterPull, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image runs correctly + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull, err = getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + // Make sure the image still runs + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } +} + +func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +// #21213 +func testPullNoLayers(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) + + _, err := buildImage(repoName, ` + FROM scratch + ENV foo bar`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + dockerCmd(c, "pull", repoName) +} + +func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { + testRequires(c, NotArm) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Inject a manifest list into the registry + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "bogus_arch", + OS: "bogus_os", + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: pushDigest, + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + }, + }, + }, + } + + manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list")) + + manifestListDigest := digest.FromBytes(manifestListJSON) + hexDigest := manifestListDigest.Hex() + + registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2") + + // Write manifest list to blob store + blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) + err = os.MkdirAll(blobDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir")) + blobPath := filepath.Join(blobDir, "data") + err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list")) + + // Add to revision store + revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) + err = os.Mkdir(revisionDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir")) + revisionPath := filepath.Join(revisionDir, "link") + err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing revision link")) + + // Update tag + tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") + err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing tag link")) + + // Verify that the image can be pulled through the manifest list. + out, _ := dockerCmd(c, "pull", repoName) + + // The pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // Make sure the pushed and pull digests match + c.Assert(manifestListDigest.String(), checker.Equals, pullDigest) + + // Was the image actually created? + dockerCmd(c, "inspect", repoName) + + dockerCmd(c, "rmi", repoName) +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, "https://"+privateRegistryURL) + dockerCmd(c, "--config", tmp, "pull", repoName) + + // likewise push should work + repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL) + dockerCmd(c, "tag", repoName, repoName2) + dockerCmd(c, "--config", tmp, "push", repoName2) + + // logout should work w scheme also because it will be stripped + dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "pull", repoName) +} + +// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest) +func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v:latest", repo) + repoTag2 := fmt.Sprintf("%v:t1", repo) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + dockerCmd(c, "tag", "busybox", repoTag2) + dockerCmd(c, "push", repo) + dockerCmd(c, "rmi", repoTag1) + dockerCmd(c, "rmi", repoTag2) + + out, _, err := dockerCmdWithError("run", repo) + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo)) + + // There should be only one line for repo, the one with repo:latest + outImageCmd, _, err := dockerCmdWithError("images", repo) + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go new file mode 100644 index 0000000..a0118a8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,274 @@ +package main + +import ( + "fmt" + "regexp" + "strings" + "sync" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client +// prints all expected output. +func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + out := s.Cmd(c, "pull", "hello-world") + defer deleteImages("hello-world") + + c.Assert(out, checker.Contains, "Using default tag: latest", check.Commentf("expected the 'latest' tag to be automatically assumed")) + c.Assert(out, checker.Contains, "Pulling from library/hello-world", check.Commentf("expected the 'library/' prefix to be automatically assumed")) + c.Assert(out, checker.Contains, "Downloaded newer image for hello-world:latest") + + matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) + c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) + c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) + _, err := digest.ParseDigest(matches[0][1]) + c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullNonExistingImage pulls non-existing images from the central registry, with different +// combinations of implicit tag and library prefix. +func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + type entry struct { + repo string + alias string + tag string + } + + entries := []entry{ + {"asdfasdf", "asdfasdf", "foobar"}, + {"asdfasdf", "library/asdfasdf", "foobar"}, + {"asdfasdf", "asdfasdf", ""}, + {"asdfasdf", "asdfasdf", "latest"}, + {"asdfasdf", "library/asdfasdf", ""}, + {"asdfasdf", "library/asdfasdf", "latest"}, + } + + // The option field indicates "-a" or not. + type record struct { + e entry + option string + out string + err error + } + + // Execute 'docker pull' in parallel, pass results (out, err) and + // necessary information ("-a" or not, and the image name) to channel. + var group sync.WaitGroup + recordChan := make(chan record, len(entries)*2) + for _, e := range entries { + group.Add(1) + go func(e entry) { + defer group.Done() + repoName := e.alias + if e.tag != "" { + repoName += ":" + e.tag + } + out, err := s.CmdWithError("pull", repoName) + recordChan <- record{e, "", out, err} + }(e) + if e.tag == "" { + // pull -a on a nonexistent registry should fall back as well + group.Add(1) + go func(e entry) { + defer group.Done() + out, err := s.CmdWithError("pull", "-a", e.alias) + recordChan <- record{e, "-a", out, err} + }(e) + } + } + + // Wait for completion + group.Wait() + close(recordChan) + + // Process the results (out, err). + for record := range recordChan { + if len(record.option) == 0 { + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found: does not exist or no pull access", record.e.repo), check.Commentf("expected image not found error messages")) + } else { + // pull -a on a nonexistent registry should fall back as well + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) + } + } + +} + +// TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies +// that pulling the same image with different combinations of implicit elements of the the image +// reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to +// multiple images. +func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Pull hello-world from v2 + pullFromV2 := func(ref string) (int, string) { + out := s.Cmd(c, "pull", "hello-world") + v1Retries := 0 + for strings.Contains(out, "this image was pulled from a legacy registry") { + // Some network errors may cause fallbacks to the v1 + // protocol, which would violate the test's assumption + // that it will get the same images. To make the test + // more robust against these network glitches, allow a + // few retries if we end up with a v1 pull. + + if v1Retries > 2 { + c.Fatalf("too many v1 fallback incidents when pulling %s", ref) + } + + s.Cmd(c, "rmi", ref) + out = s.Cmd(c, "pull", ref) + + v1Retries++ + } + + return v1Retries, out + } + + pullFromV2("hello-world") + defer deleteImages("hello-world") + + s.Cmd(c, "tag", "hello-world", "hello-world-backup") + + for _, ref := range []string{ + "hello-world", + "hello-world:latest", + "library/hello-world", + "library/hello-world:latest", + "docker.io/library/hello-world", + "index.docker.io/library/hello-world", + } { + var out string + for { + var v1Retries int + v1Retries, out = pullFromV2(ref) + + // Keep repeating the test case until we don't hit a v1 + // fallback case. We won't get the right "Image is up + // to date" message if the local image was replaced + // with one pulled from v1. + if v1Retries == 0 { + break + } + s.Cmd(c, "rmi", ref) + s.Cmd(c, "tag", "hello-world-backup", "hello-world") + } + c.Assert(out, checker.Contains, "Image is up to date for hello-world:latest") + } + + s.Cmd(c, "rmi", "hello-world-backup") + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullScratchNotAllowed verifies that pulling 'scratch' is rejected. +func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + out, err := s.CmdWithError("pull", "scratch") + c.Assert(err, checker.NotNil, check.Commentf("expected pull of scratch to fail")) + c.Assert(out, checker.Contains, "'scratch' is a reserved name") + c.Assert(out, checker.Not(checker.Contains), "Pulling repository scratch") +} + +// TestPullAllTagsFromCentralRegistry pulls using `all-tags` for a given image and verifies that it +// results in more images than a naked pull. +func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + s.Cmd(c, "pull", "busybox") + outImageCmd := s.Cmd(c, "images", "busybox") + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) + + s.Cmd(c, "pull", "--all-tags=true", "busybox") + outImageAllTagCmd := s.Cmd(c, "images", "busybox") + linesCount := strings.Count(outImageAllTagCmd, "\n") + c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) + + // Verify that the line for 'busybox:latest' is left unchanged. + var latestLine string + for _, line := range strings.Split(outImageAllTagCmd, "\n") { + if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { + latestLine = line + break + } + } + c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) + splitLatest := strings.Fields(latestLine) + splitCurrent := strings.Fields(splitOutImageCmd[1]) + + // Clear relative creation times, since these can easily change between + // two invocations of "docker images". Without this, the test can fail + // like this: + // ... obtained []string = []string{"busybox", "latest", "d9551b4026f0", "27", "minutes", "ago", "1.113", "MB"} + // ... expected []string = []string{"busybox", "latest", "d9551b4026f0", "26", "minutes", "ago", "1.113", "MB"} + splitLatest[3] = "" + splitLatest[4] = "" + splitLatest[5] = "" + splitCurrent[3] = "" + splitCurrent[4] = "" + splitCurrent[5] = "" + + c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) +} + +// TestPullClientDisconnect kills the client during a pull operation and verifies that the operation +// gets cancelled. +// +// Ref: docker/docker#15589 +func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "hello-world:latest" + + pullCmd := s.MakeCmd("pull", repoName) + stdout, err := pullCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + err = pullCmd.Start() + c.Assert(err, checker.IsNil) + + // Cancel as soon as we get some output. + buf := make([]byte, 10) + _, err = stdout.Read(buf) + c.Assert(err, checker.IsNil) + + err = pullCmd.Process.Kill() + c.Assert(err, checker.IsNil) + + time.Sleep(2 * time.Second) + _, err = s.CmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { + // we don't care about the actual image, we just want to see image not found + // because that means v2 call returned 401 and we fell back to v1 which usually + // gives a 404 (in this case the test registry doesn't handle v1 at all) + out, _, err := dockerCmdWithError("pull", privateRegistryURL+"/busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image busybox:latest not found") +} + +// Regression test for https://github.com/docker/docker/issues/26429 +func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows, Network) + _, _, err := dockerCmdWithError("pull", "ubuntu") + c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go new file mode 100644 index 0000000..96a42d6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go @@ -0,0 +1,365 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-pull") + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + // Try untrusted pull to ensure we pushed the tag to the registry + pullCmd = exec.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + +} + +func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-pull") + + // Try pull (run from isolated directory without trust information) + pullCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(string(out))) + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted pull on untrusted tag + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-cert-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf(out)) + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", "--disable-content-trust", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + }) +} + +func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + dockerCmd(c, "rmi", repoName) + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + + c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + + // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted pull:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + runAtDifferentDate(fourYearsLater, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf("Missing expected error running trusted pull with expired snapshots")) + c.Assert(string(out), checker.Contains, "repository out-of-date", check.Commentf(out)) + }) +} + +func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-offline-pull") + + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "error contacting notary server", check.Commentf(out)) + // Do valid trusted pull to warm cache + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + + // Try pull again with invalid notary server, should use cache + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") + // tag the image and upload it to the private registry + _, err := buildImage(repoName, ` + FROM busybox + CMD echo trustedpulldelete + `, true) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "rmi", repoName); status != 0 { + c.Fatalf("Error removing image %q\n%s", repoName, out) + } + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + imageID := inspectField(c, repoName, "Id") + + imageByDigest := repoName + "@" + pullDigest + byDigestID := inspectField(c, imageByDigest, "Id") + + c.Assert(byDigestID, checker.Equals, imageID) + + // rmi of tag should also remove the digest reference + dockerCmd(c, "rmi", repoName) + + _, err = inspectFieldWithError(imageByDigest, "Id") + c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // Push with targets first, initializing the repo + dockerCmd(c, "tag", "busybox", targetName) + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.assertTargetInRoles(c, repoName, "latest", "targets") + + // Try pull, check we retrieve from targets role + pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "retrieving target for targets role") + + // Now we'll create the releases role, and try pushing and pulling + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // try a pull, check that we can still pull because we can still read the + // old tag in the targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "retrieving target for targets role") + + // try a pull -a, check that it succeeds because we can still pull from the + // targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Push, should sign with targets/releases + dockerCmd(c, "tag", "busybox", targetName) + pushCmd = exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") + + // Try pull, check we retrieve from targets/releases role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(out, checker.Contains, "retrieving target for targets/releases role") + + // Create another delegation that we'll sign with + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) + s.notaryPublish(c, repoName) + + dockerCmd(c, "tag", "busybox", targetName) + pushCmd = exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") + + // Try pull, check we retrieve from targets/releases role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(out, checker.Contains, "retrieving target for targets/releases role") +} + +func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // We'll create a repo first with a non-release delegation role, so that when we + // push we'll sign it into the delegation role + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // Push should write to the delegation role, not targets + dockerCmd(c, "tag", "busybox", targetName) + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.assertTargetInRoles(c, repoName, "latest", "targets/other") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull - we should fail, since pull will only pull from the targets/releases + // role or the targets role + pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No trust data for") + + // try a pull -a: we should fail since pull will only pull from the targets/releases + // role or the targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No trusted tags for") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go new file mode 100644 index 0000000..f750c12 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go @@ -0,0 +1,715 @@ +package main + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Pushing an image to a private registry. +func testPushBusyboxImage(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) +} + +func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +// pushing an image without a prefix should throw an error +func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { + out, _, err := dockerCmdWithError("push", "busybox") + c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)) +} + +func testPushUntagged(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + expected := "An image does not exist locally with the tag" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func testPushBadTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) + expected := "does not exist" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func testPushMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) + repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + + dockerCmd(c, "tag", "busybox", repoTag2) + + dockerCmd(c, "push", repoName) + + // Ensure layer list is equivalent for repoTag1 and repoTag2 + out1, _ := dockerCmd(c, "pull", repoTag1) + + imageAlreadyExists := ": Image already exists" + var out1Lines []string + for _, outputLine := range strings.Split(out1, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + + out2, _ := dockerCmd(c, "pull", repoTag2) + + var out2Lines []string + for _, outputLine := range strings.Split(out2, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + c.Assert(out2Lines, checker.HasLen, len(out1Lines)) + + for i := range out1Lines { + c.Assert(out1Lines[i], checker.Equals, out2Lines[i]) + } +} + +func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func testPushEmptyLayer(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) + emptyTarball, err := ioutil.TempFile("", "empty_tarball") + c.Assert(err, check.IsNil, check.Commentf("Unable to create test file")) + + tw := tar.NewWriter(emptyTarball) + err = tw.Close() + c.Assert(err, check.IsNil, check.Commentf("Error creating empty tarball")) + + freader, err := os.Open(emptyTarball.Name()) + c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) + defer freader.Close() + + importCmd := exec.Command(dockerBinary, "import", "-", repoName) + importCmd.Stdin = freader + out, _, err := runCommandWithOutput(importCmd) + c.Assert(err, check.IsNil, check.Commentf("import failed: %q", out)) + + // Now verify we can push it + out, _, err = dockerCmdWithError("push", repoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) +} + +func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +// testConcurrentPush pushes multiple tags to the same repo +// concurrently. +func testConcurrentPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"push1", "push2", "push3"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s +`, repo), true) + c.Assert(err, checker.IsNil) + repos = append(repos, repo) + } + + // Push tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repo)) + results <- err + }(repo) + } + + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err)) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull and run individual tags, to make sure pushes succeeded + for _, repo := range repos { + dockerCmd(c, "pull", repo) + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // ensure that layers were mounted from the first repo during push + c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Equals, digest2) + + // ensure that pushing again produces the same digest + out3, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + + digest3 := reference.DigestRegexp.FindString(out3) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest3, check.Equals, digest2) + + // ensure that we can pull and run the cross-repo-pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out4, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out4, check.Equals, "hello world") +} + +func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen + c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Not(check.Equals), digest2) + + // ensure that we can pull and run the second pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out3, check.Equals, "hello world") +} + +func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) + + // Assert that we rotated the snapshot key to the server by checking our local keystore + contents, err := ioutil.ReadDir(filepath.Join(cliconfig.ConfigDir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) + c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) + // Check that we only have 1 key (targets key) + c.Assert(contents, checker.HasLen, 1) +} + +func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithPassphrases(pushCmd, "12345678", "12345678") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + // Using a name that doesn't resolve to an address makes this test faster + s.trustedCmdWithServer(pushCmd, "https://server.invalid:81/") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Missing error while running trusted push w/ no server")) + c.Assert(out, checker.Contains, "error contacting notary server", check.Commentf("Missing expected output on trusted push")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", "--disable-content-trust", repoName) + // Using a name that doesn't resolve to an address makes this test faster + s.trustedCmdWithServer(pushCmd, "https://server.invalid") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push with no server and --disable-content-trust failed: %s\n%s", err, out)) + c.Assert(out, check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Do a trusted push + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // Do another trusted push + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + dockerCmd(c, "rmi", repoName) + + // Try pull to ensure the double push did not break our ability to pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted pull: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted pull with --disable-content-trust")) + +} + +func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with short targets passphrase: \n%s", out)) + c.Assert(out, checker.Contains, "could not find necessary signing keys", check.Commentf("Missing expected output on trusted push with short targets/snapsnot passphrase")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + runAtDifferentDate(fourYearsLater, func() { + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with expired snapshot: \n%s", out)) + c.Assert(out, checker.Contains, "repository out-of-date", check.Commentf("Missing expected output on trusted push with expired snapshot")) + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // The timestamps expire in two weeks. Lets check three + threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) + + // Should succeed because the server transparently re-signs one + runAtDifferentDate(threeWeeksLater, func() { + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with expired timestamp")) + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + // check to make sure that the target has been added to targets/releases and not targets + s.assertTargetInRoles(c, repoName, "latest", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + + s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) + s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // check to make sure that the target has been added to targets/role1 and targets/role2, and + // not targets (because there are delegations) or targets/role3 (due to missing key) or + // targets/role1/subrole (due to it being a second level delegation) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + // pull should fail because none of these are the releases role + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") + s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // check to make sure that the target has been added to targets/role1 and targets/role4, and + // not targets (because there are delegations) or targets/role2 (due to path restrictions) or + // targets/role3 (due to missing key) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + // pull should fail because none of these are the releases role + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + // do not import any delegations key + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("trusted push succeeded but should have failed:\n%s", out)) + c.Assert(out, checker.Contains, "no valid signing keys", + check.Commentf("Missing expected output on trusted push without keys")) + + s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "no basic auth credentials") +} + +// This may be flaky but it's needed not to regress on unauthorized push, see #21054 +func (s *DockerSuite) TestPushToCentralRegistryUnauthorized(c *check.C) { + testRequires(c, Network) + repoName := "test/busybox" + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") +} + +func getTestTokenService(status int, body string, retries int) *httptest.Server { + var mu sync.Mutex + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + if retries > 0 { + w.WriteHeader(http.StatusServiceUnavailable) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"errors":[{"code":"UNAVAILABLE","message":"cannot create token at this time"}]}`)) + retries-- + } else { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(body)) + } + mu.Unlock() + })) +} + +func (s *DockerRegistryAuthTokenSuite) TestPushTokenServiceUnauthResponse(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"errors": [{"Code":"UNAUTHORIZED", "message": "a message", "detail": null}]}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "unauthorized: a message") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnauthorized(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"error": "unauthorized"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "unauthorized: authentication required") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseError(c *check.C) { + ts := getTestTokenService(http.StatusTooManyRequests, `{"errors": [{"code":"TOOMANYREQUESTS","message":"out of tokens"}]}`, 4) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Retrying") + c.Assert(out, checker.Not(checker.Contains), "Retrying in 15") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "toomanyrequests: out of tokens") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnparsable(c *check.C) { + ts := getTestTokenService(http.StatusForbidden, `no way`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], checker.Contains, "error parsing HTTP 403 response body: ") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseNoToken(c *check.C) { + ts := getTestTokenService(http.StatusOK, `{"something": "wrong"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "authorization server did not include a token in the response") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go new file mode 100644 index 0000000..fb9a66a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go @@ -0,0 +1,120 @@ +package main + +import ( + "fmt" + "net/http" + "regexp" + + "github.com/go-check/check" +) + +// unescapeBackslashSemicolonParens unescapes \;() +func unescapeBackslashSemicolonParens(s string) string { + re := regexp.MustCompile(`\\;`) + ret := re.ReplaceAll([]byte(s), []byte(";")) + + re = regexp.MustCompile(`\\\(`) + ret = re.ReplaceAll([]byte(ret), []byte("(")) + + re = regexp.MustCompile(`\\\)`) + ret = re.ReplaceAll([]byte(ret), []byte(")")) + + re = regexp.MustCompile(`\\\\`) + ret = re.ReplaceAll([]byte(ret), []byte(`\`)) + + return string(ret) +} + +func regexpCheckUA(c *check.C, ua string) { + re := regexp.MustCompile("(?P.+) UpstreamClient(?P.+)") + substrArr := re.FindStringSubmatch(ua) + + c.Assert(substrArr, check.HasLen, 3, check.Commentf("Expected 'UpstreamClient()' with upstream client UA")) + dockerUA := substrArr[1] + upstreamUAEscaped := substrArr[2] + + // check dockerUA looks correct + reDockerUA := regexp.MustCompile("^docker/[0-9A-Za-z+]") + bMatchDockerUA := reDockerUA.MatchString(dockerUA) + c.Assert(bMatchDockerUA, check.Equals, true, check.Commentf("Docker Engine User-Agent malformed")) + + // check upstreamUA looks correct + // Expecting something like: Docker-Client/1.11.0-dev (linux) + upstreamUA := unescapeBackslashSemicolonParens(upstreamUAEscaped) + reUpstreamUA := regexp.MustCompile("^\\(Docker-Client/[0-9A-Za-z+]") + bMatchUpstreamUA := reUpstreamUA.MatchString(upstreamUA) + c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) +} + +func registerUserAgentHandler(reg *testRegistry, result *string) { + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + var ua string + for k, v := range r.Header { + if k == "User-Agent" { + ua = v[0] + } + } + *result = ua + }) +} + +// TestUserAgentPassThrough verifies that when an image is pulled from +// a registry, the registry should see a User-Agent string of the form +// [docker engine UA] UptreamClientSTREAM-CLIENT([client UA]) +func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { + var ( + buildUA string + pullUA string + pushUA string + loginUA string + ) + + buildReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(buildReg, &buildUA) + buildRepoName := fmt.Sprintf("%s/busybox", buildReg.hostport) + + pullReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(pullReg, &pullUA) + pullRepoName := fmt.Sprintf("%s/busybox", pullReg.hostport) + + pushReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(pushReg, &pushUA) + pushRepoName := fmt.Sprintf("%s/busybox", pushReg.hostport) + + loginReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(loginReg, &loginUA) + + err = s.d.Start( + "--insecure-registry", buildReg.hostport, + "--insecure-registry", pullReg.hostport, + "--insecure-registry", pushReg.hostport, + "--insecure-registry", loginReg.hostport, + "--disable-legacy-registry=true") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup1, err := makefile(fmt.Sprintf("FROM %s", buildRepoName)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup1() + s.d.Cmd("build", "--file", dockerfileName, ".") + regexpCheckUA(c, buildUA) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", loginReg.hostport) + regexpCheckUA(c, loginUA) + + s.d.Cmd("pull", pullRepoName) + regexpCheckUA(c, pullUA) + + dockerfileName, cleanup2, err := makefile(`FROM scratch + ENV foo bar`) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup2() + s.d.Cmd("build", "-t", pushRepoName, "--file", dockerfileName, ".") + + s.d.Cmd("push", pushRepoName) + regexpCheckUA(c, pushUA) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go new file mode 100644 index 0000000..373d614 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + name := inspectField(c, cleanedContainerID, "Name") + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name = inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + +} + +func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) +} + +func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { + out, _ := runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + + newName := "new_name" + ContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, ContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) + + out, _ = runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + newContainerID := strings.TrimSpace(out) + name = inspectField(c, newContainerID, "Name") + c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) +} + +func (s *DockerSuite) TestRenameCheckNames(c *check.C) { + dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, newName, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + + result := dockerCmdWithResult("inspect", "-f={{.Name}}", "--type=container", "first_name") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such container: first_name", + }) +} + +func (s *DockerSuite) TestRenameInvalidName(c *check.C) { + runSleepingContainer(c, "--name", "myname") + + out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "requires exactly 2 argument(s).", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname", "") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "", "newname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) +} + +func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "network", "create", "network1") + out, _ := dockerCmd(c, "create", "-it", "--net", "network1", "busybox", "top") + + anonymousContainerID := strings.TrimSpace(out) + + dockerCmd(c, "rename", anonymousContainerID, "container1") + dockerCmd(c, "start", "container1") + + count := "-c" + if daemonPlatform == "windows" { + count = "-n" + } + + _, _, err := dockerCmdWithError("run", "--net", "network1", "busybox", "ping", count, "1", "container1") + c.Assert(err, check.IsNil, check.Commentf("Embedded DNS lookup fails after renaming anonymous container: %v", err)) +} + +func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { + out, _ := runSleepingContainer(c, "--name", "old") + ContainerID := strings.TrimSpace(out) + + out, _, err := dockerCmdWithError("rename", "old", "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", ContainerID, "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) +} + +// Test case for #23973 +func (s *DockerSuite) TestRenameContainerWithLinkedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + db1, _ := dockerCmd(c, "run", "--name", "db1", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "app1", "-d", "--link", "db1:/mysql", "busybox", "top") + dockerCmd(c, "rename", "app1", "app2") + out, _, err := dockerCmdWithError("inspect", "--format={{ .Id }}", "app2/mysql") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(db1)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go new file mode 100644 index 0000000..7d58528 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go @@ -0,0 +1,278 @@ +package main + +import ( + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "foobar") + cleanedContainerID, err := getIDByName("test") + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", cleanedContainerID) + + // Wait until the container has stopped + err = waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + + cleanedContainerID := strings.TrimSpace(out) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", "-t", "1", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out, _ := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") + + cleanedContainerID := strings.TrimSpace(out) + out, err := inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + source, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", cleanedContainerID) + + out, err = inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + c.Assert(source, checker.Equals, sourceAfterRestart) +} + +func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=no", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "no") +} + +func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=always", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "always") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + // MaximumRetryCount=0 if the restart policy is always + c.Assert(MaximumRetryCount, checker.Equals, "0") +} + +func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { + out, _, err := dockerCmdWithError("create", "--restart=on-failure:-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "maximum retry count cannot be negative") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:1", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "1") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:0", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") + + out, _ = dockerCmd(c, "create", "--restart=on-failure", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") +} + +// a good container with --restart=on-failure:3 +// MaximumRetryCount!=0; RestartCount=0 +func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") + + id := strings.TrimSpace(string(out)) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "0") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(MaximumRetryCount, checker.Equals, "3") + +} + +func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { + // TODO Windows. This may be portable following HNS integration post TP5. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udNet") + + dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second", + "--link=first:foo", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Now kill the second container and let the restart policy kick in + pidStr := inspectField(c, "second", "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "restart", id) + + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { + out1, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + out2, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") + + id1 := strings.TrimSpace(string(out1)) + id2 := strings.TrimSpace(string(out2)) + waitTimeout := 15 * time.Second + if daemonPlatform == "windows" { + waitTimeout = 150 * time.Second + } + err := waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", id1) + dockerCmd(c, "restart", id2) + + dockerCmd(c, "stop", id1) + dockerCmd(c, "stop", id2) + dockerCmd(c, "start", id1) + dockerCmd(c, "start", id2) +} + +func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { + out, _ := runSleepingContainer(c, "--rm") + + id := strings.TrimSpace(string(out)) + dockerCmd(c, "restart", id) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go new file mode 100644 index 0000000..0186c56 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go @@ -0,0 +1,86 @@ +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") + + err = os.RemoveAll(tempDir) + c.Assert(err, check.IsNil) + + dockerCmd(c, "rm", "-v", "losemyvolumes") +} + +func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") + + dockerCmd(c, "rm", "-v", "foo") +} + +func (s *DockerSuite) TestRmContainerRunning(c *check.C) { + createRunningContainer(c, "foo") + + _, _, err := dockerCmdWithError("rm", "foo") + c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) +} + +func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { + createRunningContainer(c, "foo") + + // Stop then remove with -s + dockerCmd(c, "rm", "-f", "foo") +} + +func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["true"] + MAINTAINER Integration Tests` + + // build first dockerfile + img1, err := buildImage(img, dockerfile1, true) + c.Assert(err, check.IsNil, check.Commentf("Could not build image %s", img)) + // run container on first image + dockerCmd(c, "run", img) + // rebuild dockerfile with a small addition at the end + _, err = buildImage(img, dockerfile2, true) + c.Assert(err, check.IsNil, check.Commentf("Could not rebuild image %s", img)) + // try to remove the image, should not error out. + out, _, err := dockerCmdWithError("rmi", img) + c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) + + // check if we deleted the first image + out, _ = dockerCmd(c, "images", "-q", "--no-trunc") + c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) + +} + +func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { + out, _, err := dockerCmdWithError("rm", "unknown") + c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) + c.Assert(out, checker.Contains, "No such container") +} + +func createRunningContainer(c *check.C, name string) { + runSleepingContainer(c, "-dt", "--name", name) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go new file mode 100644 index 0000000..cb16d9d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go @@ -0,0 +1,352 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { + errSubstr := "is using it" + + // create a container + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + // try to delete the image + out, _, err := dockerCmdWithError("rmi", "busybox") + // Container is using image, should not be able to rmi + c.Assert(err, checker.NotNil) + // Container is using image, error message should contain errSubstr + c.Assert(out, checker.Contains, errSubstr, check.Commentf("Container: %q", cleanedContainerID)) + + // make sure it didn't delete the busybox name + images, _ := dockerCmd(c, "images") + // The name 'busybox' should not have been removed from images + c.Assert(images, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestRmiTag(c *check.C) { + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox", "utest:tag1") + dockerCmd(c, "tag", "busybox", "utest/docker:tag2") + dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+3, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest/docker:tag2") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+1, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } + dockerCmd(c, "rmi", "utest:tag1") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n"), check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } +} + +func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") + + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "commit", containerID, "busybox-one") + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + + imagesAfter, _ := dockerCmd(c, "images", "-a") + // tag busybox to create 2 more images with same imageID + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) + + imgID := inspectField(c, "busybox-one:tag1", "Id") + + // run a container with the image + out, _ = runSleepingContainerInImage(c, "busybox-one") + + containerID = strings.TrimSpace(out) + + // first checkout without force it fails + out, _, err := dockerCmdWithError("rmi", imgID) + expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) + // rmi tagged in multiple repos should have failed without force + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expected) + + dockerCmd(c, "stop", containerID) + dockerCmd(c, "rmi", "-f", imgID) + + imagesAfter, _ = dockerCmd(c, "images", "-a") + // rmi -f failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) +} + +func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") + + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "commit", containerID, "busybox-test") + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-test", "utest:tag1") + dockerCmd(c, "tag", "busybox-test", "utest:tag2") + dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + imgID := inspectField(c, "busybox-test", "Id") + + // first checkout without force it fails + out, _, err := dockerCmdWithError("rmi", imgID) + // rmi tagged in multiple repos should have failed without force + c.Assert(err, checker.NotNil) + // rmi tagged in multiple repos should have failed without force + c.Assert(out, checker.Contains, "(must be forced) - image is referenced in multiple repositories", check.Commentf("out: %s; err: %v;", out, err)) + + dockerCmd(c, "rmi", "-f", imgID) + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + // rmi failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) + } +} + +// See https://github.com/docker/docker/issues/14116 +func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { + dockerfile := "FROM busybox\nRUN echo test 14116\n" + imgID, err := buildImage("test-14116", dockerfile, false) + c.Assert(err, checker.IsNil) + + newTag := "newtag" + dockerCmd(c, "tag", imgID, newTag) + runSleepingContainerInImage(c, imgID) + + out, _, err := dockerCmdWithError("rmi", "-f", imgID) + // rmi -f should not delete image with running containers + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "(cannot be forced) - image is being used by running container") +} + +func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + dockerCmd(c, "tag", bb, newtag) + + dockerCmd(c, "run", "--name", container, bb, "/bin/true") + + out, _ := dockerCmd(c, "rmi", newtag) + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1) +} + +func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { + image := "busybox-clone" + + cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") + cmd.Stdin = strings.NewReader(`FROM busybox +MAINTAINER foo`) + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("Could not build %s: %s", image, out)) + + dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") + + dockerCmd(c, "rmi", "-f", image) +} + +func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { + newRepo := "127.0.0.1:5000/busybox" + oldRepo := "busybox" + newTag := "busybox:test" + dockerCmd(c, "tag", oldRepo, newRepo) + + dockerCmd(c, "run", "--name", "test", oldRepo, "touch", "/abcd") + + dockerCmd(c, "commit", "test", newTag) + + out, _ := dockerCmd(c, "rmi", newTag) + c.Assert(out, checker.Contains, "Untagged: "+newTag) +} + +func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { + imageName := "rmiimage" + tag1 := imageName + ":tag1" + tag2 := imageName + ":tag2" + + _, err := buildImage(tag1, + `FROM busybox + MAINTAINER "docker"`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "tag", tag1, tag2) + + out, _ := dockerCmd(c, "rmi", "-f", tag2) + c.Assert(out, checker.Contains, "Untagged: "+tag2) + c.Assert(out, checker.Not(checker.Contains), "Untagged: "+tag1) + + // Check built image still exists + images, _ := dockerCmd(c, "images", "-a") + c.Assert(images, checker.Contains, imageName, check.Commentf("Built image missing %q; Images: %q", imageName, images)) +} + +func (s *DockerSuite) TestRmiBlank(c *check.C) { + out, _, err := dockerCmdWithError("rmi", " ") + // Should have failed to delete ' ' image + c.Assert(err, checker.NotNil) + // Wrong error message generated + c.Assert(out, checker.Not(checker.Contains), "no such id", check.Commentf("out: %s", out)) + // Expected error message not generated + c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { + // Build 2 images for testing. + imageNames := []string{"test1", "test2"} + imageIds := make([]string, 2) + for i, name := range imageNames { + dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) + id, err := buildImage(name, dockerfile, false) + c.Assert(err, checker.IsNil) + imageIds[i] = id + } + + // Create a long-running container. + runSleepingContainerInImage(c, imageNames[0]) + + // Create a stopped container, and then force remove its image. + dockerCmd(c, "run", imageNames[1], "true") + dockerCmd(c, "rmi", "-f", imageIds[1]) + + // Try to remove the image of the running container and see if it fails as expected. + out, _, err := dockerCmdWithError("rmi", "-f", imageIds[0]) + // The image of the running container should not be removed. + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "image is being used by running container", check.Commentf("out: %s", out)) +} + +// #13422 +func (s *DockerSuite) TestRmiUntagHistoryLayer(c *check.C) { + image := "tmp1" + // Build an image for testing. + dockerfile := `FROM busybox +MAINTAINER foo +RUN echo 0 #layer0 +RUN echo 1 #layer1 +RUN echo 2 #layer2 +` + _, err := buildImage(image, dockerfile, false) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "history", "-q", image) + ids := strings.Split(out, "\n") + idToTag := ids[2] + + // Tag layer0 to "tmp2". + newTag := "tmp2" + dockerCmd(c, "tag", idToTag, newTag) + // Create a container based on "tmp1". + dockerCmd(c, "run", "-d", image, "true") + + // See if the "tmp2" can be untagged. + out, _ = dockerCmd(c, "rmi", newTag) + // Expected 1 untagged entry + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1, check.Commentf("out: %s", out)) + + // Now let's add the tag again and create a container based on it. + dockerCmd(c, "tag", idToTag, newTag) + out, _ = dockerCmd(c, "run", "-d", newTag, "true") + cid := strings.TrimSpace(out) + + // At this point we have 2 containers, one based on layer2 and another based on layer0. + // Try to untag "tmp2" without the -f flag. + out, _, err = dockerCmdWithError("rmi", newTag) + // should not be untagged without the -f flag + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, cid[:12]) + c.Assert(out, checker.Contains, "(must force)") + + // Add the -f flag and test again. + out, _ = dockerCmd(c, "rmi", "-f", newTag) + // should be allowed to untag with the -f flag + c.Assert(out, checker.Contains, fmt.Sprintf("Untagged: %s:latest", newTag)) +} + +func (*DockerSuite) TestRmiParentImageFail(c *check.C) { + _, err := buildImage("test", ` + FROM busybox + RUN echo hello`, false) + c.Assert(err, checker.IsNil) + + id := inspectField(c, "busybox", "ID") + out, _, err := dockerCmdWithError("rmi", id) + c.Assert(err, check.NotNil) + if !strings.Contains(out, "image has dependent child images") { + c.Fatalf("rmi should have failed because it's a parent image, got %s", out) + } +} + +func (s *DockerSuite) TestRmiWithParentInUse(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "create", imageID) + cID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID = strings.TrimSpace(out) + + dockerCmd(c, "rmi", imageID) +} + +// #18873 +func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) { + dockerCmd(c, "create", "busybox") + + imgID := inspectField(c, "busybox:latest", "Id") + + _, _, err := dockerCmdWithError("rmi", imgID[:12]) + c.Assert(err, checker.NotNil) + + // check that tag was not removed + imgID2 := inspectField(c, "busybox:latest", "Id") + c.Assert(imgID, checker.Equals, imgID2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go new file mode 100644 index 0000000..f1b1fde --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go @@ -0,0 +1,4713 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/go-check/check" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" +) + +// "test123" should be printed by docker run +func (s *DockerSuite) TestRunEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") + if out != "test123\n" { + c.Fatalf("container should've printed 'test123', got '%s'", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test'") + } +} + +// docker run should not leak file descriptors. This test relies on Unix +// specific functionality and cannot run on Windows. +func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + c.Errorf("container should've printed '0 1 2 3', not: %s", out) + } +} + +// it should be possible to lookup Google DNS +// this will fail when Internet access is unavailable +func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) { + testRequires(c, Network, NotArm) + if daemonPlatform == "windows" { + // nslookup isn't present in Windows busybox. Is built-in. Further, + // nslookup isn't present in nanoserver. Hence just use PowerShell... + dockerCmd(c, "run", WindowsBaseImage, "powershell", "Resolve-DNSName", "google.com") + } else { + dockerCmd(c, "run", DefaultImage, "nslookup", "google.com") + } + +} + +// the exit code should be 0 +func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { + dockerCmd(c, "run", "busybox", "true") +} + +// the exit code should be 1 +func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { + _, exitCode, err := dockerCmdWithError("run", "busybox", "false") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 1) +} + +// it should be possible to pipe in data via stdin to a process running in a container +func (s *DockerSuite) TestRunStdinPipe(c *check.C) { + // TODO Windows: This needs some work to make compatible. + testRequires(c, DaemonIsLinux) + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + logsOut, _ := dockerCmd(c, "logs", out) + + containerLogs := strings.TrimSpace(logsOut) + if containerLogs != "blahblah" { + c.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + dockerCmd(c, "rm", out) +} + +// the container's ID should be printed when starting a container in detached mode +func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + rmOut, _ := dockerCmd(c, "rm", out) + + rmOut = strings.TrimSpace(rmOut) + if rmOut != out { + c.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } +} + +// the working directory should be set correctly +func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { + dir := "/root" + image := "busybox" + if daemonPlatform == "windows" { + dir = `C:/Windows` + } + + // First with -w + out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("-w failed to set working directory") + } + + // Then with --workdir + out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("--workdir failed to set working directory") + } +} + +// pinging Google's DNS resolver should fail when we disable the networking +func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { + count := "-c" + image := "busybox" + if daemonPlatform == "windows" { + count = "-n" + image = WindowsBaseImage + } + + // First using the long form --net + out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") + if err != nil && exitCode != 1 { + c.Fatal(out, err) + } + if exitCode != 1 { + c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } +} + +//test --link use container name to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") + + ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container name to link target failed") + } +} + +//test --link use container id to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") + + cID = strings.TrimSpace(cID) + ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container id to link target failed") + } +} + +func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in user-defined network udlinkNet with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping to third and its alias must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.NotNil) + + // start third container now + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") + c.Assert(waitRun("third"), check.IsNil) + + // ping to third and its alias must succeed now + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart second container + dockerCmd(c, "restart", "second") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + for _, net := range defaults { + out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + + cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Check if default short-id alias is added automatically + id := strings.TrimSpace(cid1) + aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check if default short-id alias is added automatically + id = strings.TrimSpace(cid2) + aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + // ping to first and its network-scoped aliases + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its network-scoped aliases must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) +} + +// Issue 9677. +func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { + out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown flag: --exec-opt") +} + +// Regression test for #4979 +func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { + + var ( + out string + exitCode int + ) + + // Create a file in a volume + if daemonPlatform == "windows" { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("1", out, exitCode) + } + + // Read the file from another container using --volumes-from to access the volume in the second container + if daemonPlatform == "windows" { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("2", out, exitCode) + } +} + +// Volume path is a symlink which also exists on the host, and the host side is a file not a dir +// But the volume call is just a normal volume, not a bind mount +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink" + + dir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + // In the case of Windows to Windows CI, if the machine is setup so that + // the temp directory is not the C: drive, this test is invalid and will + // not work. + if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" { + c.Skip("Requires TEMP to point to C: drive") + } + + f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) + if err != nil { + c.Fatal(err) + } + f.Close() + + if daemonPlatform == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) + containerPath = "/test/test" + cmd = "true" + } + if _, err := buildImage(name, dockerFile, false); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +// Volume path is a symlink in the container +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink2" + + if daemonPlatform == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name) + containerPath = "/test/test" + cmd = "true" + } + if _, err := buildImage(name, dockerFile, false); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + var ( + volumeDir string + fileInVol string + ) + if daemonPlatform == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + testRequires(c, DaemonIsLinux) + volumeDir = "/test" + fileInVol = `/test/file` + } + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + + if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +// Regression test for #1201 +func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { + var ( + volumeDir string + fileInVol string + ) + if daemonPlatform == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + volumeDir = "/test" + fileInVol = "/test/file" + } + + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) { + c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) + } + + dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) +} + +func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + hostpath := randomTmpDirPath("test", daemonPlatform) + if err := os.MkdirAll(hostpath, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", hostpath, err) + } + defer os.RemoveAll(hostpath) + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this "rw" mode to be be ignored since the inherited volume is "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this to be read-only since both are "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } +} + +// Test for GH#10618 +func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { + path1 := randomTmpDirPath("test1", daemonPlatform) + path2 := randomTmpDirPath("test2", daemonPlatform) + + someplace := ":/someplace" + if daemonPlatform == "windows" { + // Windows requires that the source directory exists before calling HCS + testRequires(c, SameHostDaemon) + someplace = `:c:\someplace` + if err := os.MkdirAll(path1, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path1) + if err := os.MkdirAll(path2, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path2) + } + mountstr1 := path1 + someplace + mountstr2 := path2 + someplace + + if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + + // Test for https://github.com/docker/docker/issues/22093 + volumename1 := "test1" + volumename2 := "test2" + volume1 := volumename1 + someplace + volume2 := volumename2 + someplace + if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + // create failed should have create volume volumename1 or volumename2 + // we should remove volumename2 or volumename2 successfully + out, _ := dockerCmd(c, "volume", "ls") + if strings.Contains(out, volumename1) { + dockerCmd(c, "volume", "rm", volumename1) + } else { + dockerCmd(c, "volume", "rm", volumename2) + } +} + +// Test for #1351 +func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") +} + +func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") + dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") +} + +// this tests verifies the ID format for the container +func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { + out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") + if err != nil { + c.Fatal(err) + } + if exit != 0 { + c.Fatalf("expected exit code 0 received %d", exit) + } + + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatal(err) + } + if !match { + c.Fatalf("Invalid container ID: %s", out) + } +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func (s *DockerSuite) TestRunCreateVolume(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { + // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) + testRequires(c, DaemonIsLinux) + image := "docker-test-createvolumewithsymlink" + + buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN ln -s home /bar`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build '%s': %v", image, err) + } + + _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") + c.Assert(err, checker.IsNil) + + _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") + if err != nil || exitCode != 0 { + c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + _, err = os.Stat(volPath) + if !os.IsNotExist(err) { + c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, DaemonIsLinux) + name := "docker-test-volumesfromsymlinkpath" + prefix := "" + dfContents := `FROM busybox + RUN ln -s home /foo + VOLUME ["/foo/bar"]` + + if daemonPlatform == "windows" { + prefix = `c:` + dfContents = `FROM ` + WindowsBaseImage + ` + RUN mkdir c:\home + RUN mklink /D c:\foo c:\home + VOLUME ["c:/foo/bar"] + ENTRYPOINT c:\windows\system32\cmd.exe` + } + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = strings.NewReader(dfContents) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) + if err != nil || exitCode != 0 { + c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) + } + + _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } +} + +func (s *DockerSuite) TestRunExitCode(c *check.C) { + var ( + exit int + err error + ) + + _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") + + if err == nil { + c.Fatal("should not have a non nil error") + } + if exit != 72 { + c.Fatalf("expected exit code 72 received %d", exit) + } +} + +func (s *DockerSuite) TestRunUserDefaults(c *check.C) { + expected := "uid=0(root) gid=0(root)" + if daemonPlatform == "windows" { + expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" + } + out, _ := dockerCmd(c, "run", "busybox", "id") + if !strings.Contains(out, expected) { + c.Fatalf("expected '%s' got %s", expected, out) + } +} + +func (s *DockerSuite) TestRunUserByName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("expected root user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux, NotArm) + out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserNotFound(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") + if err == nil { + c.Fatal("unknown user should cause container to fail") + } +} + +func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { + sleepTime := "2" + group := sync.WaitGroup{} + group.Add(2) + + errChan := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) + errChan <- err + }() + } + + group.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestRunEnvironment(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") + cmd.Env = append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // not set in our local env that they're removed (if present) in + // the container + + cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") + cmd.Env = appendBaseEnv(true) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // already in the env that we're overriding them + + cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") + cmd.Env = appendBaseEnv(true, "HOSTNAME=bar") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root2", + "HOSTNAME=bar", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { + if daemonPlatform == "windows" { + // Windows busybox does not have ping. Use built in ping instead. + dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { + // TODO Windows: This is Linux specific as --link is not supported and + // this will be deprecated in favor of container networking model. + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "--name", "linked", "busybox", "true") + + _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") + if err == nil { + c.Fatal("Expected error") + } +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { + // TODO Windows: -h is not yet functional. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunGroupAdd(c *check.C) { + // Not applicable for Windows as there is no concept of --group-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") + + groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" + if actual := strings.Trim(out, "\r\n"); actual != groupsList { + c.Fatalf("expected output %s received %s", groupsList, actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotArm) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { + c.Fatal("sys should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { + c.Fatalf("sys should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { + c.Fatal("proc should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 { + c.Fatalf("proc should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } +} + +func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { + // Not applicable on Windows as it does not support chroot + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "busybox", "chroot", "/", "true") +} + +func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + c.Fatalf("expected output /dev/nulo, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { + c.Fatalf("expected output /dev/zero, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") + if err == nil { + c.Fatalf("run container with device mode ro should fail") + } +} + +func (s *DockerSuite) TestRunModeHostname(c *check.C) { + // Not applicable on Windows as Windows does not support -h + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + c.Fatalf("expected 'testhostname', but says: %q", actual) + } + + out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + hostname, err := os.Hostname() + if err != nil { + c.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + c.Fatalf("expected %q, but says: %q", hostname, actual) + } +} + +func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { + out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") + expected := "/\n" + if daemonPlatform == "windows" { + expected = "C:" + expected + } + if out != expected { + c.Fatalf("pwd returned %q (expected %s)", s, expected) + } +} + +func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { + if daemonPlatform == "windows" { + // Windows busybox will fail with Permission Denied on items such as pagefile.sys + dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`) + } else { + dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") + } +} + +func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { + mount := "/:/" + targetDir := "/host" + if daemonPlatform == "windows" { + mount = `c:\:c\` + targetDir = "c:/host" // Forward slash as using busybox + } + out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) + if err == nil { + c.Fatal(out, err) + } +} + +// Verify that a container gets default DNS when only localhost resolvers exist +func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) { + // Not applicable on Windows as this is testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // preserve original resolv.conf for restoring after test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + // defer restored original conf + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost + // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by + // GetNameservers(), leading to a replacement of nameservers with the default set + tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + + actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + // check that the actual defaults are appended to the commented out + // localhost resolver (which should be preserved) + // NOTE: if we ever change the defaults from google dns, this will break + expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if actual != expected { + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + } +} + +func (s *DockerSuite) TestRunDNSOptions(c *check.C) { + // Not applicable on Windows as Windows does not support --dns*, or + // the Unix-specific functionality of resolv.conf. + testRequires(c, DaemonIsLinux) + out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") + + // The client will get a warning on stderr when setting DNS to a localhost address; verify this: + if !strings.Contains(stderr, "Localhost DNS setting") { + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) + } + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { + c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) + } + + out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf") + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 127.0.0.1 options ndots:3" { + c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf") + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { + c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) { + // Not applicable on Windows as testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + var out string + out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" { + c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } + + out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP) + if len(actualNameservers) != len(hostNameservers) { + c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNameservers[i] { + c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP) + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } +} + +// Test to see if a non-root user can resolve a DNS name. Also +// check if the container resolv.conf file has at least 0644 perm. +func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { + // Not applicable on Windows as Windows does not support --user + testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm) + + dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") + + cID, err := getIDByName("testperm") + if err != nil { + c.Fatal(err) + } + + fmode := (os.FileMode)(0644) + finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) + if err != nil { + c.Fatal(err) + } + + if (finfo.Mode() & fmode) != fmode { + c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) + } +} + +// Test if container resolv.conf gets updated the next time it restarts +// if host /etc/resolv.conf has changed. This only applies if the container +// uses the host's /etc/resolv.conf and does not have any dns options provided. +func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { + // Not applicable on Windows as testing unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + c.Skip("Unstable test, to be re-activated once #19937 is resolved") + + tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") + tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") + + //take a copy of resolv.conf for restoring after test completes + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // This test case is meant to test monitoring resolv.conf when it is + // a regular file not a bind mounc. So we unmount resolv.conf and replace + // it with a file containing the original settings. + mounted, err := mount.Mounted("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + if mounted { + cmd := exec.Command("umount", "/etc/resolv.conf") + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + } + + //cleanup + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + }() + + //1. test that a restarting container gets an updated resolv.conf + dockerCmd(c, "run", "--name=first", "busybox", "true") + containerID1, err := getIDByName("first") + if err != nil { + c.Fatal(err) + } + + // replace resolv.conf with our temporary copy + bytesResolvConf := []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // check for update in container + containerResolv, err := readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + } + + /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } */ + //2. test that a restarting container does not receive resolv.conf updates + // if it modified the container copy of the starting point resolv.conf + dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") + containerID2, err := getIDByName("second") + if err != nil { + c.Fatal(err) + } + + //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // start the container again + dockerCmd(c, "start", "second") + + // check for update in container + containerResolv, err = readContainerFile(containerID2, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, resolvConfSystem) { + c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) + } + + //3. test that a running container's resolv.conf is not modified while running + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + runningContainerID := strings.TrimSpace(out) + + // replace resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) + } + + //4. test that a running container's resolv.conf is updated upon restart + // (the above container is still running..) + dockerCmd(c, "restart", runningContainerID) + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) + } + + //5. test that additions of a localhost resolver are cleaned from + // host resolv.conf before updating container's resolv.conf copies + + // replace resolv.conf with a localhost-only nameserver copy + bytesResolvConf = []byte(tmpLocalhostResolvConf) + if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // our first exited container ID should have been updated, but with default DNS + // after the cleanup of resolv.conf found only a localhost nameserver: + containerResolv, err = readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if !bytes.Equal(containerResolv, []byte(expected)) { + c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) + } + + //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update + // of containers' resolv.conf. + + // Restore the original resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // Run the container so it picks up the old settings + dockerCmd(c, "run", "--name=third", "busybox", "true") + containerID3, err := getIDByName("third") + if err != nil { + c.Fatal(err) + } + + // Create a modified resolv.conf.aside and override resolv.conf with it + bytesResolvConf = []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "third") + + // check for update in container + containerResolv, err = readContainerFile(containerID3, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) + } + + //cleanup, restore original resolv.conf happens in defer func() +} + +func (s *DockerSuite) TestRunAddHost(c *check.C) { + // Not applicable on Windows as it does not support --add-host + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode +// but using --attach instead of -a to make sure we read the flag correctly +func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") + _, stderr, _, err := runCommandWithStdoutStderr(cmd) + if err == nil { + c.Fatal("Container should have exited with error code different than 0") + } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { + c.Fatal("Should have been returned an error with conflicting options -a and -d") + } +} + +func (s *DockerSuite) TestRunState(c *check.C) { + // TODO Windows: This needs some rework as Windows busybox does not support top + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + if pid1 == "0" { + c.Fatal("Container state Pid 0") + } + + dockerCmd(c, "stop", id) + state = inspectField(c, id, "State.Running") + if state != "false" { + c.Fatal("Container state is 'running'") + } + pid2 := inspectField(c, id, "State.Pid") + if pid2 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + dockerCmd(c, "start", id) + state = inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid3 := inspectField(c, id, "State.Pid") + if pid3 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } +} + +// Test for #1737 +func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { + // Not applicable on Windows as it does not support uid or gid in this way + testRequires(c, DaemonIsLinux) + name := "testrunvolumesuidgid" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the uid and gid is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } +} + +// Test for #1582 +func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { + // TODO Windows, post TP5. Windows does not yet support volume functionality + // that copies from the image to the volume. + testRequires(c, DaemonIsLinux) + name := "testruncopyvolumecontent" + _, err := buildImage(name, + `FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the content is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + c.Fatal("Container failed to transfer content to volume") + } +} + +func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { + name := "testrunmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`, + true); err != nil { + c.Fatal(err) + } + + out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) + if exit != 0 { + c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + expected := "root" + if daemonPlatform == "windows" { + if strings.Contains(WindowsBaseImage, "windowsservercore") { + expected = `user manager\containeradministrator` + } else { + expected = `ContainerAdministrator` // nanoserver + } + } + if out != expected { + c.Fatalf("Expected output %s, got %q. %s", expected, out, WindowsBaseImage) + } +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { + existingFile := "/bin/cat" + expected := "not a directory" + if daemonPlatform == "windows" { + existingFile = `\windows\system32\ntdll.dll` + expected = `The directory name is invalid.` + } + + out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") + if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { + c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode) + } +} + +func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { + name := "testrunexitonstdinclose" + + meow := "/bin/cat" + delay := 60 + if daemonPlatform == "windows" { + meow = "cat" + } + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) + + stdin, err := runCmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + c.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + c.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + c.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + finish := make(chan error) + go func() { + finish <- runCmd.Wait() + close(finish) + }() + select { + case err := <-finish: + c.Assert(err, check.IsNil) + case <-time.After(time.Duration(delay) * time.Second): + c.Fatal("docker run failed to exit on stdin close") + } + state := inspectField(c, name, "State.Running") + + if state != "false" { + c.Fatal("Container must be stopped after stdin closing") + } +} + +// Test run -i --restart xxx doesn't hang +func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { + name := "test-inter-restart" + + result := icmd.StartCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh"}, + Stdin: bytes.NewBufferString("exit 11"), + }) + c.Assert(result.Error, checker.IsNil) + defer func() { + dockerCmdWithResult("stop", name).Assert(c, icmd.Success) + }() + + result = icmd.WaitOnCmd(60*time.Second, result) + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11}) +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writehosts" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hosts should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func eqToBaseDiff(out string, c *check.C) bool { + name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) + dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") + cID, err := getIDByName(name) + c.Assert(err, check.IsNil) + + baseDiff, _ := dockerCmd(c, "diff", cID) + baseArr := strings.Split(baseDiff, "\n") + sort.Strings(baseArr) + outArr := strings.Split(out, "\n") + sort.Strings(outArr) + return sliceEq(baseArr, outArr) +} + +func sliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writehostname" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hostname should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writeresolv" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/resolv.conf should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { + // Cannot run on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux) + name := "baddevice" + out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") + + if err == nil { + c.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunEntrypoint(c *check.C) { + name := "entrypoint" + + out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar") + expected := "foobar" + + if out != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunBindMounts(c *check.C) { + testRequires(c, SameHostDaemon) + if daemonPlatform == "linux" { + testRequires(c, DaemonIsLinux, NotUserNamespace) + } + + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", c) + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { + // Test reading from a read-only bind mount + out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") + if !strings.Contains(out, "touch-me") { + c.Fatal("Container failed to read from bind mount") + } + } + + // test writing to bind mount + if daemonPlatform == "windows" { + dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") + } else { + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + } + + readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + if err == nil { + c.Fatal("Container bind mounted illegal directory") + } + + // Windows does not (and likely never will) support mounting a single file + if daemonPlatform != "windows" { + // test mount a file + dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, content) + } + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + + image := "emptyfs" + if daemonPlatform == "windows" { + // Windows can't support an emptyfs image. Just use the regular Windows image + image = WindowsBaseImage + } + out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) + if err == nil { + c.Fatalf("Run without command must fail. out=%s", out) + } else if !strings.Contains(out, "No command specified") { + c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) + } + + if _, err := os.Stat(tmpCidFile); err == nil { + c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + + out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + c.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + c.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + c.Fatalf("cid must be equal to %s, got %s", id, cid) + } +} + +func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { + mac := "12:34:56:78:9a:bc" + var out string + if daemonPlatform == "windows" { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") + mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs + } else { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") + } + + actualMac := strings.TrimSpace(out) + if actualMac != mac { + c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } +} + +func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, DaemonIsLinux) + mac := "12:34:56:78:9a:bc" + out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") + + id := strings.TrimSpace(out) + inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress") + if inspectedMac != mac { + c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + } +} + +// test docker run use an invalid mac address +func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { + out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") + //use an invalid mac address should with an error out + if err == nil || !strings.Contains(out, "is not a valid mac address") { + c.Fatalf("run with an invalid --mac-address should with error out") + } +} + +func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + + id := strings.TrimSpace(out) + ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") + iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") + out, _, err := runCommandWithOutput(iptCmd) + if err != nil { + c.Fatal(err, out) + } + if err := deleteContainer(id); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") +} + +func (s *DockerSuite) TestRunPortInUse(c *check.C) { + // TODO Windows. The duplicate NAT message returned by Windows will be + // changing as is currently completely undecipherable. Does need modifying + // to run sh rather than top though as top isn't in Windows busybox. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + port := "1234" + dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") + if err == nil { + c.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "port is already allocated") { + c.Fatalf("Out must be about \"port is already allocated\", got %s", out) + } +} + +// https://github.com/docker/docker/issues/12148 +func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { + // TODO Windows. -P is not yet supported + testRequires(c, DaemonIsLinux) + // allocate a dynamic port to get the most recent + out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id, "80") + + strPort := strings.Split(strings.TrimSpace(out), ":")[1] + port, err := strconv.ParseInt(strPort, 10, 64) + if err != nil { + c.Fatalf("invalid port, got: %s, error: %s", strPort, err) + } + + // allocate a static port and a dynamic port together, with static port + // takes the next recent port in dynamic port range. + dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") +} + +// Regression test for #7792 +func (s *DockerSuite) TestRunMountOrdering(c *check.C) { + // TODO Windows: Post TP5. Updated, but Windows does not support nested mounts currently. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mounc. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + c.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", + "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), + "busybox:latest", "sh", "-c", + "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { + // Not applicable on Windows as Windows does not support volumes + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + c.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") +} + +//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container +func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { + // While Windows supports volumes, it does not support --add-host hence + // this test is not applicable on Windows. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") + if !strings.Contains(out, "nameserver 127.0.0.1") { + c.Fatal("/etc volume mount hides /etc/resolv.conf") + } + + out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") + if !strings.Contains(out, "test123") { + c.Fatal("/etc volume mount hides /etc/hostname") + } + + out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") + out = strings.Replace(out, "\n", " ", -1) + if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { + c.Fatal("/etc volume mount hides /etc/hosts") + } +} + +func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { + // TODO Windows (Post RS1). Windows does not support volumes which + // are pre-populated such as is built in the dockerfile used in this test. + testRequires(c, DaemonIsLinux) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if _, err := buildImage("dataimage", + `FROM busybox + RUN ["mkdir", "-p", "/foo"] + RUN ["touch", "/foo/bar"]`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox") + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform) + if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } +} + +func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + c.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + c.Fatalf("Stdout contains output from pull: %s", stdout) + } +} + +func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if _, err := buildImage("run_volumes_clean_paths", + `FROM busybox + VOLUME `+prefix+`/foo/`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + + out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) + } +} + +// Regression test for #3631 +func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { + // TODO Windows: This should be able to run on Windows if can find an + // alternate to /dev/zero and /dev/stdout. + testRequires(c, DaemonIsLinux) + cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") + + stdout, err := cont.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := cont.Start(); err != nil { + c.Fatal(err) + } + n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + if err != nil { + c.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + c.Fatalf("Expected %d, got %d", expected, n) + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { + // TODO Windows: -P is not currently supported. Also network + // settings are not propagated back. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + var ports nat.PortMap + if err := json.Unmarshal([]byte(portstr), &ports); err != nil { + c.Fatal(err) + } + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatalf("Port is not mapped for the port %s", port) + } + } +} + +func (s *DockerSuite) TestRunExposePort(c *check.C) { + out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out")) + c.Assert(out, checker.Contains, "invalid range format for --expose") +} + +func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc != out { + c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc == out { + c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) + } +} + +func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if parentContainerIpc != out { + c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) + } + + catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") + if catOutput != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) + } + + // check that /dev/mqueue is actually of mqueue type + grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") + if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { + c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) + } + + lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") + lsOutput = strings.Trim(lsOutput, "\n") + if lsOutput != "toto" { + c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run IPC from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunModePIDContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if parentContainerPid != out { + c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out) + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run PID from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + defer os.Remove("/dev/mqueue/toto") + defer os.Remove("/dev/shm/test") + volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") + c.Assert(err, checker.IsNil) + if volPath != "/dev/shm" { + c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) + } + + out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") + if out != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) + } + + // Check that the mq was created + if _, err := os.Stat("/dev/mqueue/toto"); err != nil { + c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) + } +} + +func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if parentContainerNet != out { + c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) + } +} + +func (s *DockerSuite) TestRunModePIDHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostPid, err := os.Readlink("/proc/1/ns/pid") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid != out { + c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid == out { + c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) + } +} + +func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + hostUTS, err := os.Readlink("/proc/1/ns/uts") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS != out { + c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS == out { + c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) + } + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error()) +} + +func (s *DockerSuite) TestRunTLSVerify(c *check.C) { + // Remote daemons use TLS and this test is not applicable when TLS is required. + testRequires(c, SameHostDaemon) + if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { + c.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + result := dockerCmdWithResult("--tlsverify=false", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "error during connect"}) + + result = dockerCmdWithResult("--tlsverify=true", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "cert"}) +} + +func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { + // TODO Windows. Once moved to libnetwork/CNM, this may be able to be + // re-instated. + testRequires(c, DaemonIsLinux) + // first find allocator current position + out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id) + + out = strings.TrimSpace(out) + if out == "" { + c.Fatal("docker port command output is empty") + } + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + c.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + c.Fatal(err) + } + defer l.Close() + + out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id = strings.TrimSpace(out) + dockerCmd(c, "port", id) +} + +func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) { + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("run should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("container is running but should have failed") + } +} + +func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { + addr := "00:16:3E:08:00:50" + args := []string{"run", "--mac-address", addr} + expected := addr + + if daemonPlatform != "windows" { + args = append(args, "busybox", "ifconfig") + } else { + args = append(args, WindowsBaseImage, "ipconfig", "/all") + expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) + } + + if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) { + c.Fatalf("Output should have contained %q: %s", expected, out) + } +} + +func (s *DockerSuite) TestRunNetHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet == out { + c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) + } +} + +func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { + // TODO Windows. As Windows networking evolves and converges towards + // CNM, this test may be possible to enable on Windows. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") +} + +func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Container should have host network namespace") + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { + // TODO Windows. This may be possible to enable in the future. However, + // Windows does not currently support --expose, or populate the network + // settings seen through inspect. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + + var ports nat.PortMap + err := json.Unmarshal([]byte(portstr), &ports) + c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr)) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatal("Port is not mapped for the port "+port, out) + } + } +} + +func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { + runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy") + out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name") + if out != "no" { + c.Fatalf("Set default restart policy failed") + } +} + +func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + timeout := 10 * time.Second + if daemonPlatform == "windows" { + timeout = 120 * time.Second + } + + id := strings.TrimSpace(string(out)) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { + c.Fatal(err) + } + + count := inspectField(c, id, "RestartCount") + if count != "3" { + c.Fatalf("Container was restarted %s times, expected %d", count, 3) + } + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + if MaximumRetryCount != "3" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + } +} + +func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + testPriv := true + // don't test privileged mode subtest if user namespaces enabled + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + testPriv = false + } + testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") +} + +func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { + // Not applicable on Windows due to use of Unix specific functionality, plus + // the use of --read-only which is not supported. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + // Ensure we have not broken writing /dev/pts + out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") + if status != 0 { + c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") + } + expected := "type devpts (rw," + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output to contain %s but contains %s", expected, out) + } +} + +func testReadOnlyFile(c *check.C, testPriv bool, filenames ...string) { + touch := "touch " + strings.Join(filenames, " ") + out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } + + if !testPriv { + return + } + + out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { + // Not applicable on Windows which does not support --link + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") + if !strings.Contains(string(out), "testlinked") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) { + // Not applicable on Windows which does not support either --read-only or --dns. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") + if !strings.Contains(string(out), "1.1.1.1") { + c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(string(out), "testreadonly") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") + } +} + +func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo") + runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest") + + // Remove the main volume container and restart the consuming container + dockerCmd(c, "rm", "-f", "voltest") + + // This should not fail since the volumes-from were already applied + dockerCmd(c, "restart", "restarter") +} + +// run container with --rm should remove container if exit code != 0 +func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + name := "flowers" + out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + name := "sparkles" + out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunPIDHostWithChildIsKillable(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, NotUserNamespace) + name := "ibuildthecloud" + dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") + + c.Assert(waitRun(name), check.IsNil) + + errchan := make(chan error) + go func() { + if out, _, err := dockerCmdWithError("kill", name); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + select { + case err := <-errchan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Kill container timed out") + } +} + +func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { + // TODO Windows. This may be possible to enable once Windows supports + // memory limits on containers + testRequires(c, DaemonIsLinux) + // this memory limit is 1 byte less than the min, which is 4MB + // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 + out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") + if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { + c.Fatalf("expected run to fail when using too low a memory limit: %q", out) + } +} + +func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") + if err == nil || code == 0 { + c.Fatal("standard container should not be able to write to /proc/asound") + } +} + +func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + // some kernels don't have this configured so skip the test if this file is not found + // on the host running the tests. + if _, err := os.Stat("/proc/latency_stats"); err != nil { + c.Skip("kernel doesn't have latency_stats configured") + return + } + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testReadPaths := []string{ + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/kcore", + } + for i, filePath := range testReadPaths { + name := fmt.Sprintf("procsieve-%d", i) + shellCmd := fmt.Sprintf("exec 3<%s", filePath) + + out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if exitCode != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestMountIntoProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") + if err == nil || code == 0 { + c.Fatal("container should not be able to mount into /proc") + } +} + +func (s *DockerSuite) TestMountIntoSys(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + testRequires(c, NotUserNamespace) + dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") +} + +func (s *DockerSuite) TestRunUnshareProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + // In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run. + errChan := make(chan error) + + go func() { + name := "acidburn" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") + if err == nil || + !(strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + go func() { + name := "cereal" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + /* Ensure still fails if running privileged with the default policy */ + go func() { + name := "crashoverride" + out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + for i := 0; i < 3; i++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerSuite) TestRunPublishPort(c *check.C) { + // TODO Windows: This may be possible once Windows moves to libnetwork and CNM + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") + out, _ := dockerCmd(c, "port", "test") + out = strings.Trim(out, "\r\n") + if out != "" { + c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) + } +} + +// Issue #10184. +func (s *DockerSuite) TestDevicePermissions(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + const permissions = "crw-rw-rw-" + out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") + if status != 0 { + c.Fatalf("expected status 0, got %d", status) + } + if !strings.HasPrefix(out, permissions) { + c.Fatalf("output should begin with %q, got %q", permissions, out) + } +} + +func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +// https://github.com/docker/docker/pull/14498 +func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { + dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") + } + dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") + + if daemonPlatform != "windows" { + mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if mRO.RW { + c.Fatalf("Expected RO volume was RW") + } + } + + mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if !mRW.RW { + c.Fatalf("Expected RW volume was RO") + } +} + +func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testWritePaths := []string{ + /* modprobe and core_pattern should both be denied by generic + * policy of denials for /proc/sys/kernel. These files have been + * picked to be checked as they are particularly sensitive to writes */ + "/proc/sys/kernel/modprobe", + "/proc/sys/kernel/core_pattern", + "/proc/sysrq-trigger", + "/proc/kcore", + } + for i, filePath := range testWritePaths { + name := fmt.Sprintf("writeprocsieve-%d", i) + + shellCmd := fmt.Sprintf("exec 3>%s", filePath) + out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if code != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + expected := "test123" + + filename := createTmpFile(c, expected) + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) + if actual != expected { + c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux, UserNamespaceROMount) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) + if exitCode != 0 { + c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := s.setupTrustedImage(c, "trusted-run") + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s\n", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try untrusted run to ensure we pushed the tag to the registry + runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted run on untrusted tag + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error expected when running trusted run with:\n%s", out) + } + + if !strings.Contains(string(out), "does not have trust data for") { + c.Fatalf("Missing expected output on trusted run:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-run-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "could not validate the path to a trusted root") { + c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) + } + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) + } + }) +} + +func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. + runCmd = exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted run:\n%s", out) + } +} + +func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + _, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux) + + // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace + // itself, but pid>1 should not be able to trace pid1. + _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") + if exitCode == 0 { + c.Fatal("ptrace was not successfully restricted by AppArmor") + } +} + +func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) + + _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") + if exitCode != 0 { + c.Fatal("ptrace of self failed.") + } +} + +func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") + if exitCode == 0 { + // If our test failed, attempt to repair the host system... + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") + if exitCode == 0 { + c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") + } + } +} + +func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") +} + +// run create container failed should clean up the container +func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { + // TODO Windows. This may be possible to enable once link is supported + testRequires(c, DaemonIsLinux) + name := "unique_name" + _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") + c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) + + containerID, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID)) + c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) +} + +func (s *DockerSuite) TestRunNamedVolume(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") + + out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunWithUlimits(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") + ul := strings.TrimSpace(out) + if ul != "42" { + c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "/cgroup-parent/test" + name := "cgroup-test" + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "SHOULD_NOT_EXIST" + name := "cgroup-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "/SHOULD_NOT_EXIST" + name := "cgroup-absolute-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + + filename := "/sys/fs/cgroup/devices/test123" + out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected cgroup mount point to be read-only, touch file should fail") + } + expected := "Read-only file system" + if !strings.Contains(out, expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } +} + +func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") + if err == nil || !strings.Contains(out, "cannot join own network") { + c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { + c.Fatalf("run --net=container with --dns should error out") + } + + out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { + c.Fatalf("run --net=container with --mac-address should error out") + } + + out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { + c.Fatalf("run --net=container with --add-host should error out") + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -p should error out") + } + + out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -P should error out") + } + + out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { + c.Fatalf("run --net=container with --expose should error out") + } +} + +func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { + // Not applicable on Windows which does not support --net=container or --link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") + dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") +} + +func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { + // TODO Windows: This may be possible to convert. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + c.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } +} + +// Issue #4681 +func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { + if daemonPlatform == "windows" { + dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { + // Windows does not support --net=container + testRequires(c, DaemonIsLinux, ExecSupport) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") + out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") + out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") + + if out1 != out { + c.Fatal("containers with shared net namespace should have same hostname") + } +} + +func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { + // TODO Windows: Network settings are not currently propagated. This may + // be resolved in the future with the move to libnetwork and CNM. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + id := strings.TrimSpace(out) + res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress") + if res != "" { + c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } +} + +func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { + // Not applicable as Windows does not support --net=host + testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") + dockerCmd(c, "stop", "first") + dockerCmd(c, "stop", "second") +} + +func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") + dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run 1 container in testnetwork1 and another in testnetwork2 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check Isolation between containers : ping must fail + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) + // Connect first container to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + // ping must succeed now + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.IsNil) + + // Disconnect first container from testnetwork2 + dockerCmd(c, "network", "disconnect", "testnetwork2", "first") + // ping must fail again + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Network delete with active containers must fail + _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) + + dockerCmd(c, "stop", "first") + _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") + + // Stop second container and test ping failures on both networks + dockerCmd(c, "stop", "second") + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") + c.Assert(err, check.NotNil) + + // Start second container and connectivity must be restored on both networks + dockerCmd(c, "start", "second") + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Run a container with --net=host + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + // Run second container in first container's network namespace + dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) +} + +func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) + + // create a container connected to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Connect second container to none network. it must fail as well + _, _, err = dockerCmdWithError("network", "connect", "none", "second") + c.Assert(err, check.NotNil) +} + +// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited +func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") + in, err := cmd.StdinPipe() + c.Assert(err, check.IsNil) + defer in.Close() + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + cmd.Stderr = stdout + c.Assert(cmd.Start(), check.IsNil) + + waitChan := make(chan error) + go func() { + waitChan <- cmd.Wait() + }() + + select { + case err := <-waitChan: + c.Assert(err, check.IsNil, check.Commentf(stdout.String())) + case <-time.After(30 * time.Second): + c.Fatal("timeout waiting for command to exit") + } +} + +func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' +func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { + name := "testNonExecutableCmd" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == 127 && strings.Contains(stateExitCode, "127")) { + c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) + } +} + +// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. +func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { + name := "testNonExistingCmd" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == 127 && strings.Contains(stateExitCode, "127")) { + c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) + } +} + +// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or +// 127 on Windows. The difference is that in Windows, the container must be started +// as that's when the check is made (and yes, by its design...) +func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { + expected := 126 + if daemonPlatform == "windows" { + expected = 127 + } + name := "testCmdCannotBeInvoked" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) { + c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode) + } +} + +// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' +func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "foo") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) { + c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err) + } +} + +// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed +func (s *DockerSuite) TestDockerFails(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 125) { + c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) + } +} + +// TestRunInvalidReference invokes docker run with a bad reference. +func (s *DockerSuite) TestRunInvalidReference(c *check.C) { + out, exit, _ := dockerCmdWithError("run", "busybox@foo") + if exit == 0 { + c.Fatalf("expected non-zero exist code; received %d", exit) + } + + if !strings.Contains(out, "Error parsing reference") { + c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out) + } +} + +// Test fix for issue #17854 +func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { + // Not applicable on Windows as it does not support Linux uid/gid ownership + testRequires(c, DaemonIsLinux) + name := "testetcfileownership" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN chown dockerio:dockerio /etc`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that dockerio ownership of /etc is retained at runtime + out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { + testRequires(c, DaemonIsLinux) + + expected := "642" + out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") + oomScoreAdj := strings.TrimSpace(out) + if oomScoreAdj != "642" { + c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } + out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } +} + +func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") + + // Make sure a bind mount under a shared volume propagated to host. + if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { + c.Fatalf("Bind mount under shared volume did not propagate to host") + } + + mount.Unmount(path.Join(tmpDir, "mnt1")) +} + +func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Prepare a source directory with file in it. We will bind mount this + // directory and see if file shows up. + tmpDir2, err := ioutil.TempDir("", "volume-source2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") + + // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside + // container then contents of tmpDir2/slave-testfile should become + // visible at "/volume-dest/mnt1/slave-testfile" + cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") + + mount.Unmount(path.Join(tmpDir, "mnt1")) + + if out != "Test" { + c.Fatalf("Bind mount under slave volume did not propagate to container") + } +} + +func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, exitCode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") + c.Assert(exitCode, checker.Not(checker.Equals), 0) + c.Assert(out, checker.Contains, "invalid mount config") +} + +func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { + testRequires(c, DaemonIsLinux) + + testImg := "testvolumecopy" + _, err := buildImage(testImg, ` + FROM busybox + RUN mkdir -p /foo && echo hello > /foo/hello + `, true) + c.Assert(err, check.IsNil) + + dockerCmd(c, "run", "-v", "foo:/foo", testImg) + out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") + + dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "rm", "-fv", "test") + dockerCmd(c, "volume", "inspect", "test") + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + + // Remove the parent so there are not other references to the volumes + dockerCmd(c, "rm", "-f", "parent") + // now remove the child and ensure the named volume (and only the named volume) still exists + dockerCmd(c, "rm", "-fv", "child") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + runSleepingContainer(c, "--name=test", "-p", "8000:8000") + + // Wait until container is fully up and running + c.Assert(waitRun("test"), check.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true") + // We will need the following `inspect` to diagnose the issue if test fails (#21247) + out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test") + out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail") + c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)) + // check for windows error as well + // TODO Windows Post TP5. Fix the error message string + c.Assert(strings.Contains(string(out), "port is already allocated") || + strings.Contains(string(out), "were not connected because a duplicate name exists") || + strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") || + strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out)) + dockerCmd(c, "rm", "-f", "test") + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Test for one character directory name case (#20122) +func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo") + c.Assert(strings.TrimSpace(out), checker.Equals, "/foo") +} + +func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume + _, err := buildImage("volumecopy", + `FROM busybox + RUN mkdir /foo && echo hello > /foo/bar + CMD cat /foo/bar`, + true, + ) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "volume", "create", "test") + + // test with the nocopy flag + out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // test default behavior which is to copy for non-binds + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + // error out when the volume is already populated + out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // do not error out when copy isn't explicitly set even though it's already populated + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // do not allow copy modes on volumes-from + dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true") + out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // do not allow copy modes on binds + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "nameserver 127.0.0.1" + expectedWarning := "Localhost DNS setting" + out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr)) + + expectedOutput = "nameserver 1.2.3.4" + out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput = "search example.com" + out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput = "options timeout:3" + out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "1.2.3.4\textra" + out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSuite) TestRunRmAndWait(c *check.C) { + dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2") + + out, code, err := dockerCmdWithError("wait", "test") + c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code)) + c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code)) + c.Assert(code, checker.Equals, 0) +} + +// Test that auto-remove is performed by the daemon (API 1.25 and above) +func (s *DockerSuite) TestRunRm(c *check.C) { + name := "miss-me-when-im-gone" + dockerCmd(c, "run", "--name="+name, "--rm", "busybox") + + _, err := inspectFieldWithError(name, "name") + c.Assert(err, checker.Not(check.IsNil)) + c.Assert(err.Error(), checker.Contains, "No such object: "+name) +} + +// Test that auto-remove is performed by the client on API versions that do not support daemon-side api-remove (API < 1.25) +func (s *DockerSuite) TestRunRmPre125Api(c *check.C) { + name := "miss-me-when-im-gone" + result := icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs("run", "--name="+name, "--rm", "busybox"), + Env: appendBaseEnv(false, "DOCKER_API_VERSION=1.24"), + }) + c.Assert(result, icmd.Matches, icmd.Success) + + _, err := inspectFieldWithError(name, "name") + c.Assert(err, checker.Not(check.IsNil)) + c.Assert(err.Error(), checker.Contains, "No such object: "+name) +} + +// Test case for #23498 +func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo") + c.Assert(strings.TrimSpace(out), check.Equals, "foo") + + // CMD will be reset as well (the same as setting a custom entrypoint) + _, _, err = dockerCmdWithError("run", "--entrypoint=", "-t", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "No command specified") +} + +func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { + c.Assert(s.d.StartWithBusybox("--debug", "--default-ulimit=nofile=65535"), checker.IsNil) + + name := "test-A" + _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.waitRun(name), check.IsNil) + + out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=65535:65535]") + + name = "test-B" + _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.waitRun(name), check.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=42:42]") +} + +func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Handles error conditions for --credentialspec. Validating E2E success cases +// requires additional infrastructure (AD for example) on CI servers. +func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) { + testRequires(c, DaemonIsWindows) + attempts := []struct{ value, expectedError string }{ + {"rubbish", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"rubbish://", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"file://", "no value supplied for file:// credential spec security option"}, + {"registry://", "no value supplied for registry:// credential spec security option"}, + {`file://c:\blah.txt`, "path cannot be absolute"}, + {`file://doesnotexist.txt`, "The system cannot find the file specified"}, + } + for _, attempt := range attempts { + _, _, err := dockerCmdWithError("run", "--security-opt=credentialspec="+attempt.value, "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf("%s expected non-nil err", attempt.value)) + c.Assert(err.Error(), checker.Contains, attempt.expectedError, check.Commentf("%s expected %s got %s", attempt.value, attempt.expectedError, err)) + } +} + +// Windows specific test to validate credential specs with a well-formed spec. +// Note it won't actually do anything in CI configuration with the spec, but +// it should not fail to run a container. +func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + validCS := readFile(`fixtures\credentialspecs\valid.json`, c) + writeFile(filepath.Join(dockerBasePath, `credentialspecs\valid.json`), validCS, c) + dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true") +} + +// Windows specific test to ensure that a servicing app container is started +// if necessary once a container exits. It does this by forcing a no-op +// servicing event and verifying the event from Hyper-V-Compute +func (s *DockerSuite) TestRunServicingContainer(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", WindowsBaseImage, "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255") + containerID := strings.TrimSpace(out) + err := waitExited(containerID, 60*time.Second) + c.Assert(err, checker.IsNil) + + cmd := exec.Command("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) + out2, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil) + c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2)) + c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2)) + c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2)) +} + +func (s *DockerSuite) TestRunDuplicateMount(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + tmpFile, err := ioutil.TempFile("", "touch-me") + c.Assert(err, checker.IsNil) + defer tmpFile.Close() + + data := "touch-me-foo-bar\n" + if _, err := tmpFile.Write([]byte(data)); err != nil { + c.Fatal(err) + } + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "-v", "/tmp:/tmp", "-v", "/tmp:/tmp", "busybox", "sh", "-c", "cat "+tmpFile.Name()+" && ls /") + c.Assert(out, checker.Not(checker.Contains), "tmp:") + c.Assert(out, checker.Contains, data) + + out = inspectFieldJSON(c, name, "Config.Volumes") + c.Assert(out, checker.Contains, "null") +} + +func (s *DockerSuite) TestRunWindowsWithCPUCount(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") +} + +func (s *DockerSuite) TestRunWindowsWithCPUShares(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-shares=1000", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +func (s *DockerSuite) TestRunWindowsWithCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +func (s *DockerSuite) TestRunProcessIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsProcess) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunHypervIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsHyperv) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +// Test for #25099 +func (s *DockerSuite) TestRunEmptyEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "invalid environment variable:" + + out, _, err := dockerCmdWithError("run", "-e", "", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=foo", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +// #28658 +func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { + name := "testslowstdinclosing" + repeat := 3 // regression happened 50% of the time + for i := 0; i < repeat; i++ { + cmd := exec.Command(dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat") + cmd.Stdin = &delayedReader{} + done := make(chan error, 1) + go func() { + _, err := runCommand(cmd) + done <- err + }() + + select { + case <-time.After(15 * time.Second): + c.Fatal("running container timed out") // cleanup in teardown + case err := <-done: + c.Assert(err, checker.IsNil) + } + } +} + +type delayedReader struct{} + +func (s *delayedReader) Read([]byte) (int, error) { + time.Sleep(500 * time.Millisecond) + return 0, io.EOF +} + +// #28823 (originally #28639) +func (s *DockerSuite) TestRunMountReadOnlyDevShm(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + emptyDir, err := ioutil.TempDir("", "test-read-only-dev-shm") + c.Assert(err, check.IsNil) + defer os.RemoveAll(emptyDir) + out, _, err := dockerCmdWithError("run", "--rm", "--read-only", + "-v", fmt.Sprintf("%s:/dev/shm:ro", emptyDir), + "busybox", "touch", "/dev/shm/foo") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Read-only file system") +} + +// Test case for 29129 +func (s *DockerSuite) TestRunHostnameInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "foobar\nfoobar" + out, _ := dockerCmd(c, "run", "--net=host", "--hostname=foobar", "busybox", "sh", "-c", `echo $HOSTNAME && hostname`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go new file mode 100644 index 0000000..fcdd1dd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go @@ -0,0 +1,1604 @@ +// +build !windows + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #6509 +func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { + checkRedirect := func(command string) { + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), checker.IsNil) + ch := make(chan error) + go func() { + ch <- cmd.Wait() + close(ch) + }() + + select { + case <-time.After(10 * time.Second): + c.Fatal("command timeout") + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("wait err")) + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") +} + +// Test recursive bind mount works by default +func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { + // /tmp gets permission denied + testRequires(c, NotUserNamespace, SameHostDaemon) + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + c.Assert(os.MkdirAll(tmpfsDir, 0777), checker.IsNil, check.Commentf("failed to mkdir at %s", tmpfsDir)) + c.Assert(mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""), checker.IsNil, check.Commentf("failed to create a tmpfs mount at %s", tmpfsDir)) + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + c.Assert(err, checker.IsNil) + defer f.Close() + + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) +} + +func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, err := os.Stat("/dev/snd"); err != nil { + c.Skip("Host does not have /dev/snd") + } + + out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "timer", check.Commentf("expected output /dev/snd/timer")) + + out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) +} + +// TestRunDetach checks attaching and detaching with the default escape sequence. +func (s *DockerSuite) TestRunAttachDetach(c *check.C) { + name := "attach-detach" + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + cmd.Stdin = tty + c.Assert(cmd.Start(), checker.IsNil) + c.Assert(waitRun(name), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + + out, err := bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + out, _ = dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container="+name) + // attach and detach event should be monitored + c.Assert(out, checker.Contains, "attach") + c.Assert(out, checker.Contains, "detach") +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { + name := "attach-detach" + keyCtrlA := []byte{1} + keyA := []byte{97} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "top") + c.Assert(waitRun(name), check.IsNil) + + // specify an invalid detach key, container will ignore it and use default + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-A,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + bufReader := bufio.NewReader(stdout) + out, err := bufReader.ReadString('\n') + if err != nil { + c.Fatal(err) + } + // it should print a warning to indicate the detach key flag is invalid + errStr := "Invalid escape keys (ctrl-A,a) provided" + c.Assert(strings.TrimSpace(out), checker.Equals, errStr) +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via config file. +func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-a,a" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunDetach checks attaching and detaching with the detach flags, making sure it overrides config file +func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-e,e" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +func (s *DockerSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *check.C) { + name := "attach-detach" + keyA := []byte{97} + keyB := []byte{98} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=a,b,c", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + // Invalid escape sequence aba, should print aba in output + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyB); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte("\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "aba" { + c.Fatalf("expected 'aba', got %q", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunWithCPUQuota(c *check.C) { + testRequires(c, cpuCfsQuota) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + out, _ := dockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "8000") + + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "8000", check.Commentf("setting the CPU CFS quota failed")) +} + +func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000") + + out, _ = dockerCmd(c, "run", "--cpu-period", "0", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "100000") + + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "50000", check.Commentf("setting the CPU CFS period failed")) +} + +func (s *DockerSuite) TestRunWithInvalidCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + out, _, err := dockerCmdWithError("run", "--cpu-period", "900", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "2000000", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "-3", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file) + c.Assert(strings.TrimSpace(stdout), checker.Equals, "52428800") + + out := inspectField(c, "test1", "HostConfig.KernelMemory") + c.Assert(out, check.Equals, "52428800") +} + +func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + out, _, err := dockerCmdWithError("run", "--kernel-memory", "2M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum kernel memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--kernel-memory", "-16m", "--name", "test2", "busybox", "echo", "test") + c.Assert(err, check.NotNil) + expected = "invalid size" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { + testRequires(c, cpuShare) + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { + testRequires(c, cpuShare) + testRequires(c, memoryLimitSupport) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test") + c.Assert(out, checker.Equals, "test\n", check.Commentf("container should've printed 'test'")) +} + +func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.cpus" + out, _ := dockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetCpus") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.mems" + out, _ := dockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetMems") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + + file := "/sys/fs/cgroup/blkio/blkio.weight" + out, _ := dockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "300") + + out = inspectField(c, "test", "HostConfig.BlkioWeight") + c.Assert(out, check.Equals, "300") +} + +func (s *DockerSuite) TestRunWithInvalidBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "Range of blkio weight is from 10 to 1000" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioWeightDevice(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight-device", "/dev/sdX:100", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { + testRequires(c, memoryLimitSupport, swapMemorySupport) + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(600 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } +} + +func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(stdout), checker.Equals, "33554432") + + out := inspectField(c, "test", "HostConfig.Memory") + c.Assert(out, check.Equals, "33554432") +} + +// TestRunWithoutMemoryswapLimit sets memory limit and disables swap +// memory limit, this means the processes in the container can use +// 16M memory and as much swap memory as they need (if the host +// supports swap memory). +func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + dockerCmd(c, "run", "-m", "32m", "--memory-swap", "-1", "busybox", "true") +} + +func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { + testRequires(c, memorySwappinessSupport) + file := "/sys/fs/cgroup/memory/memory.swappiness" + out, _ := dockerCmd(c, "run", "--memory-swappiness", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.MemorySwappiness") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { + testRequires(c, memorySwappinessSupport) + out, _, err := dockerCmdWithError("run", "--memory-swappiness", "101", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Valid memory swappiness range is 0-100" + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) + + out, _, err = dockerCmdWithError("run", "--memory-swappiness", "-10", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) +} + +func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { + testRequires(c, memoryReservationSupport) + + file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" + out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "209715200") + + out = inspectField(c, "test", "HostConfig.MemoryReservation") + c.Assert(out, check.Equals, "209715200") +} + +func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, memoryReservationSupport) + out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum memory limit can not be less than memory reservation limit" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) + + out, _, err = dockerCmdWithError("run", "--memory-reservation", "1k", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Minimum memory reservation allowed is 4MB" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) +} + +func (s *DockerSuite) TestStopContainerSignal(c *check.C) { + out, _ := dockerCmd(c, "run", "--stop-signal", "SIGUSR1", "-d", "busybox", "/bin/sh", "-c", `trap 'echo "exit trapped"; exit 0' USR1; while true; do sleep 1; done`) + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + dockerCmd(c, "stop", containerID) + out, _ = dockerCmd(c, "logs", containerID) + + c.Assert(out, checker.Contains, "exit trapped", check.Commentf("Expected `exit trapped` in the log")) +} + +func (s *DockerSuite) TestRunSwapLessThanMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + out, _, err := dockerCmdWithError("run", "-m", "16m", "--memory-swap", "15m", "busybox", "echo", "test") + expected := "Minimum memoryswap limit should be larger than memory limit" + c.Assert(err, check.NotNil) + + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetCpusFlagValue(c *check.C) { + testRequires(c, cgroupCpuset, SameHostDaemon) + + sysInfo := sysinfo.New(true) + cpus, err := parsers.ParseUintList(sysInfo.Cpus) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(cpus)+1; i++ { + if !cpus[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-cpus", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested CPUs are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Cpus) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetMemsFlagValue(c *check.C) { + testRequires(c, cgroupCpuset) + + sysInfo := sysinfo.New(true) + mems, err := parsers.ParseUintList(sysInfo.Mems) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(mems)+1; i++ { + if !mems[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-mems", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested memory nodes are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Mems) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCPUShares(c *check.C) { + testRequires(c, cpuShare, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "The minimum allowed cpu-shares is 2" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "-1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "shares: invalid argument" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "99999999", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "The maximum allowed cpu-shares is" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithDefaultShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm-default" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "67108864") +} + +func (s *DockerSuite) TestRunWithShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm" + out, _ := dockerCmd(c, "run", "--name", name, "--shm-size=1G", "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "1073741824") +} + +func (s *DockerSuite) TestRunTmpfsMountsEnsureOrdered(c *check.C) { + tmpFile, err := ioutil.TempFile("", "test") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", "-v", tmpFile.Name()+":/run/test", "busybox", "ls", "/run") + c.Assert(out, checker.Contains, "test") +} + +func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support tmpfs mounts. + testRequires(c, DaemonIsLinux) + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec,nosuid,rw,size=5k,mode=700", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run failed to mount on tmpfs with valid options %q %s", err, out) + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run:foobar", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("/run mounted on tmpfs when it should have vailed within invalid mount option") + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "-v", "/run:/run", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("Should have generated an error saying Duplicate mount points") + } +} + +func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) { + name := "img-with-volumes" + _, err := buildImage( + name, + ` + FROM busybox + VOLUME /run + RUN touch /run/stuff + `, + true) + if err != nil { + c.Fatal(err) + } + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run") + c.Assert(out, checker.Not(checker.Contains), "stuff") +} + +// Test case for #22420 +func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOptions := []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ := dockerCmd(c, "run", "--tmpfs", "/tmp", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "relatime", "size=8192k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,exec,size=8192k", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,size=8192k,exec,size=4096k,noexec", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + // We use debian:jessie as there is no findmnt in busybox. Also the output will be in the format of + // TARGET PROPAGATION + // /tmp shared + // so we only capture `shared` here. + expectedOptions = []string{"shared"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:shared", "debian:jessie", "findmnt", "-o", "TARGET,PROPAGATION", "/tmp") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } +} + +func (s *DockerSuite) TestRunSysctls(c *check.C) { + + testRequires(c, DaemonIsLinux) + var err error + + out, _ := dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=1", "--name", "test", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "1") + + out = inspectFieldJSON(c, "test", "HostConfig.Sysctls") + + sysctls := make(map[string]string) + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "1") + + out, _ = dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=0", "--name", "test1", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "0") + + out = inspectFieldJSON(c, "test1", "HostConfig.Sysctls") + + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "0") + + runCmd := exec.Command(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", "busybox", "cat", "/proc/sys/kernel/foobar") + out, _, _ = runCommandWithOutput(runCmd) + if !strings.Contains(out, "invalid argument") { + c.Fatalf("expected --sysctl to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:jessie unshare' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected unshare with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp=/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name":"fchmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name": "fchmodat", + "action":"SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "400", "/etc/hostname") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected chmod with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to +// deny unshare of a userns exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + // from sched.h + jsonData := fmt.Sprintf(`{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO", + "args": [ + { + "index": 0, + "value": %d, + "op": "SCMP_CMP_EQ" + } + ] + } + ] +}`, uint64(0x10000000)) + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected unshare userns with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyUnusualSocketFamilies checks that rarely used socket families such as Appletalk are blocked by the default profile +func (s *DockerSuite) TestRunSeccompProfileDenyUnusualSocketFamilies(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + ensureSyscallTest(c) + + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "appletalk-test") + _, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatal("expected opening appletalk socket family to fail") + } +} + +// TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' +// with a the default seccomp profile exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + ensureSyscallTest(c) + + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "userns-test", "id") + out, _, err := runCommandWithOutput(runCmd) + if err == nil || !strings.Contains(out, "clone failed: Operation not permitted") { + c.Fatalf("expected clone userns with default seccomp profile denied to fail, got %s: %v", out, err) + } +} + +// TestRunSeccompUnconfinedCloneUserns checks that +// 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns. +func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace, unprivilegedUsernsClone) + ensureSyscallTest(c) + + // make sure running w privileged is ok + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "syscall-test", "userns-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { + c.Fatalf("expected clone userns with --security-opt seccomp=unconfined to succeed, got %s: %v", out, err) + } +} + +// TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' +// allows creating a userns. +func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace) + ensureSyscallTest(c) + + // make sure running w privileged is ok + runCmd := exec.Command(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { + c.Fatalf("expected clone userns with --privileged to succeed, got %s: %v", out, err) + } +} + +// TestRunSeccompProfileAllow32Bit checks that 32 bit code can run on x86_64 +// with the default seccomp profile. +func (s *DockerSuite) TestRunSeccompProfileAllow32Bit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64) + ensureSyscallTest(c) + + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "exit32-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("expected to be able to run 32 bit code, got %s: %v", out, err) + } +} + +// TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. +func (s *DockerSuite) TestRunSeccompAllowSetrlimit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + // ulimit uses setrlimit, so we want to make sure we don't break it + runCmd := exec.Command(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("expected ulimit with seccomp to succeed, got %s: %v", out, err) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileAcct(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 1: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 2: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 3: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileNS(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1") + if err != nil || !strings.Contains(out, "hello1") { + c.Fatalf("test 1: expected hello1, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2") + if err != nil || !strings.Contains(out, "hello2") { + c.Fatalf("test 2: expected hello2, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3") + if err != nil || !strings.Contains(out, "hello3") { + c.Fatalf("test 3: expected hello3, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4") + if err != nil || !strings.Contains(out, "hello4") { + c.Fatalf("test 5: expected hello4, got: %s, %v", out, err) + } +} + +// TestRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", "nnp-test", "/usr/bin/nnp-test") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "EUID=1000") { + c.Fatalf("expected output to contain EUID=1000, got %s: %v", out, err) + } +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChown(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_CHOWN + runCmd := exec.Command(dockerBinary, "run", "busybox", "chown", "100", "/tmp") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_CHOWN + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chown", "100", "/tmp") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_CHOWN + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "chown", "busybox", "chown", "100", "/tmp") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_DAC_OVERRIDE + runCmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "echo test > /etc/passwd") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_DAC_OVERRIDE + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "sh", "-c", "echo test > /etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") + // TODO test that root user can drop default capability CAP_DAC_OVERRIDE +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesFowner(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_FOWNER + runCmd := exec.Command(dockerBinary, "run", "busybox", "chmod", "777", "/etc/passwd") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_FOWNER + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chmod", "777", "/etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // TODO test that root user can drop default capability CAP_FOWNER +} + +// TODO CAP_KILL + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetuid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETUID + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setuid-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SETUID + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setuid-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SETUID + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setuid", "syscall-test", "setuid-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetgid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETGID + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setgid-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SETGID + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setgid-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SETGID + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setgid", "syscall-test", "setgid-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +// TODO CAP_SETPCAP + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_BIND_SERVICE + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "socket-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_NET_BIND_SERVICE + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "socket-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") + // test that root user can drop default capability CAP_NET_BIND_SERVICE + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_bind_service", "syscall-test", "socket-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_RAW + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "raw-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_NET_RAW + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "raw-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_NET_RAW + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_raw", "syscall-test", "raw-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChroot(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SYS_CHROOT + runCmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "/bin/true") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SYS_CHROOT + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chroot", "/", "/bin/true") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SYS_CHROOT + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "sys_chroot", "busybox", "chroot", "/", "/bin/true") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesMknod(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_MKNOD + runCmd := exec.Command(dockerBinary, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_MKNOD + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "mknod", "/tmp/node", "b", "1", "2") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_MKNOD + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "mknod", "busybox", "mknod", "/tmp/node", "b", "1", "2") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +// TODO CAP_AUDIT_WRITE +// TODO CAP_SETFCAP + +func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { + testRequires(c, SameHostDaemon, Apparmor) + + // running w seccomp unconfined tests the apparmor profile + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") + if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", out, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") + if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", out, err) + } +} + +// make sure the default profile can be successfully parsed (using unshare as it is +// something which we know is blocked in the default profile) +func (s *DockerSuite) TestRunSeccompWithDefaultProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + out, _, err := dockerCmdWithError("run", "--security-opt", "seccomp=../profiles/seccomp/default.json", "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "unshare: unshare failed: Operation not permitted") +} + +// TestRunDeviceSymlink checks run with device that follows symlink (#13840 and #22271) +func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, SameHostDaemon) + if _, err := os.Stat("/dev/zero"); err != nil { + c.Skip("Host does not have /dev/zero") + } + + // Create a temporary directory to create symlink + tmpDir, err := ioutil.TempDir("", "docker_device_follow_symlink_tests") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a symbolic link to /dev/zero + symZero := filepath.Join(tmpDir, "zero") + err = os.Symlink("/dev/zero", symZero) + c.Assert(err, checker.IsNil) + + // Create a temporary file "temp" inside tmpDir, write some data to "tmpDir/temp", + // then create a symlink "tmpDir/file" to the temporary file "tmpDir/temp". + tmpFile := filepath.Join(tmpDir, "temp") + err = ioutil.WriteFile(tmpFile, []byte("temp"), 0666) + c.Assert(err, checker.IsNil) + symFile := filepath.Join(tmpDir, "file") + err = os.Symlink(tmpFile, symFile) + c.Assert(err, checker.IsNil) + + // Create a symbolic link to /dev/zero, this time with a relative path (#22271) + err = os.Symlink("zero", "/dev/symzero") + if err != nil { + c.Fatal("/dev/symzero creation failed") + } + // We need to remove this symbolic link here as it is created in /dev/, not temporary directory as above + defer os.Remove("/dev/symzero") + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 + out, _ := dockerCmd(c, "run", "--device", symZero+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) + + // symlink "tmpDir/file" to a file "tmpDir/temp" will result in an error as it is not a device. + out, _, err = dockerCmdWithError("run", "--device", symFile+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(err, check.NotNil) + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "not a device node", check.Commentf("expected output 'not a device node'")) + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 (this time check with relative path backed, see #22271) + out, _ = dockerCmd(c, "run", "--device", "/dev/symzero:/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) +} + +// TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit +func (s *DockerSuite) TestRunPIDsLimit(c *check.C) { + testRequires(c, pidsLimit) + + file := "/sys/fs/cgroup/pids/pids.max" + out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "4") + + out = inspectField(c, "skittles", "HostConfig.PidsLimit") + c.Assert(out, checker.Equals, "4", check.Commentf("setting the pids limit failed")) +} + +func (s *DockerSuite) TestRunPrivilegedAllowedDevices(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "cat", file) + c.Logf("out: %q", out) + c.Assert(strings.TrimSpace(out), checker.Equals, "a *:* rwm") +} + +func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + + fi, err := os.Stat("/dev/snd/timer") + if err != nil { + c.Skip("Host does not have /dev/snd/timer") + } + stat, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + c.Skip("Could not stat /dev/snd/timer") + } + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--device", "/dev/snd/timer:w", "busybox", "cat", file) + c.Assert(out, checker.Contains, fmt.Sprintf("c %d:%d w", stat.Rdev/256, stat.Rdev%256)) +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "names": ["fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + } + ], + "architectures": [ + "SCMP_ARCH_X32" + ], + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") +} + +func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + // 1) verify I can run containers with the Docker default shipped profile which allows chmod + _, err = s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + // 2) restart the daemon and add a custom seccomp profile in which we deny chmod + err = s.d.Restart("--seccomp-profile=" + tmpFile.Name()) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestRunWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpus", "0.5", "--name", "test", "busybox", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + out = inspectField(c, "test", "HostConfig.NanoCpus") + c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err := dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: Nano CPUs and CPU Period cannot both be set") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 0000000..70139a5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,383 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// save a repo using gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + out, _ := dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + repoTarball, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(repoTarball) + out, _, err = runCommandWithOutput(loadCmd) + c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +// save a repo using xz+gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-gz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(loadCmd) + c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +func (s *DockerSuite) TestSaveSingleTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-single-tag-test" + dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedImageID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), + exec.Command("tar", "t"), + exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "busybox:latest" + out, _ := dockerCmd(c, "inspect", repoName) + data := []struct { + ID string + Created time.Time + }{} + err := json.Unmarshal([]byte(out), &data) + c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) + c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) + tarTvTimeFormat := "2006-01-02 15:04" + out, _, err = runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("tar", "tv"), + exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveImageId(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-image-id-test" + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") + + out, _ = dockerCmd(c, "images", "-q", repoName) + cleanedShortImageID := strings.TrimSpace(out) + + // Make sure IDs are not empty + c.Assert(cleanedLongImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + c.Assert(cleanedShortImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + + saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) + tarCmd := exec.Command("tar", "t") + + var err error + tarCmd.Stdin, err = saveCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for tar: %v", err)) + grepCmd := exec.Command("grep", cleanedLongImageID) + grepCmd.Stdin, err = tarCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for grep: %v", err)) + + c.Assert(tarCmd.Start(), checker.IsNil, check.Commentf("tar failed with error: %v", err)) + c.Assert(saveCmd.Start(), checker.IsNil, check.Commentf("docker save failed with error: %v", err)) + defer func() { + saveCmd.Wait() + tarCmd.Wait() + dockerCmd(c, "rmi", repoName) + }() + + out, _, err = runCommandWithOutput(grepCmd) + + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID: %s, %v", out, err)) +} + +// save a repo and try to load it using flags +func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-and-load-repo-flags" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + + deleteImages(repoName) + dockerCmd(c, "commit", name, repoName) + + before, _ := dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + after, _ := dockerCmd(c, "inspect", repoName) + c.Assert(before, checker.Equals, after, check.Commentf("inspect is not the same after a save / load")) +} + +func (s *DockerSuite) TestSaveWithNoExistImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + imgName := "foobar-non-existing-image" + + out, _, err := dockerCmdWithError("save", "-o", "test-img.tar", imgName) + c.Assert(err, checker.NotNil, check.Commentf("save image should fail for non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("No such image: %s", imgName)) +} + +func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-multi-name-test" + + // Make one image + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) + + // Make two images + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), + exec.Command("tar", "xO", "repositories"), + exec.Command("grep", "-q", "-E", "(-one|-two)"), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple repos: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { + testRequires(c, DaemonIsLinux) + makeImage := func(from string, tag string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) + imageID := strings.TrimSpace(out) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName, "busybox:latest"), + exec.Command("tar", "t")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) + + lines := strings.Split(strings.TrimSpace(out), "\n") + var actual []string + for _, l := range lines { + if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { + actual = append(actual, strings.TrimSuffix(l, ".json")) + } + } + + // make the list of expected layers + out = inspectField(c, "busybox:latest", "Id") + expected := []string{strings.TrimSpace(out), idFoo, idBar} + + // prefixes are not in tar + for i := range expected { + expected[i] = digest.Digest(expected[i]).Hex() + } + + sort.Strings(actual) + sort.Strings(expected) + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) +} + +// Issue #6722 #5892 ensure directories are included in changes +func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { + testRequires(c, DaemonIsLinux) + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary directory: %s", err)) + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + defer os.RemoveAll(tmpDir) + _, err = buildImage(name, + `FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, + true) + c.Assert(err, checker.IsNil, check.Commentf("%v", err)) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command("tar", "-xf", "-", "-C", extractionDirectory), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and extract image: %s", out)) + + dirs, err := ioutil.ReadDir(extractionDirectory) + c.Assert(err, checker.IsNil, check.Commentf("failed to get a listing of the layer directories: %s", err)) + + found := false + for _, entry := range dirs { + var entriesSansDev []string + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err)) + defer f.Close() + + entries, err := listTar(f) + for _, e := range entries { + if !strings.Contains(e, "dev/") { + entriesSansDev = append(entriesSansDev, e) + } + } + c.Assert(err, checker.IsNil, check.Commentf("encountered error while listing tar entries: %s", err)) + + if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { + found = true + break + } + } + } + + c.Assert(found, checker.Equals, true, check.Commentf("failed to find the layer with the right content listing")) + +} + +// Test loading a weird image where one of the layers is of zero size. +// The layer.tar file is actually zero bytes, no padding or anything else. +// See issue: 18170 +func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") +} + +func (s *DockerSuite) TestSaveLoadParents(c *check.C) { + testRequires(c, DaemonIsLinux) + + makeImage := func(from string, addfile string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "touch", addfile) + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + imageID := strings.TrimSpace(out) + + dockerCmd(c, "rm", "-f", cleanedContainerID) + return imageID + } + + idFoo := makeImage("busybox", "foo") + idBar := makeImage(idFoo, "bar") + + tmpDir, err := ioutil.TempDir("", "save-load-parents") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + c.Log("tmpdir", tmpDir) + + outfile := filepath.Join(tmpDir, "out.tar") + + dockerCmd(c, "save", "-o", outfile, idBar, idFoo) + dockerCmd(c, "rmi", idBar) + dockerCmd(c, "load", "-i", outfile) + + inspectOut := inspectField(c, idBar, "Parent") + c.Assert(inspectOut, checker.Equals, idFoo) + + inspectOut = inspectField(c, idFoo, "Parent") + c.Assert(inspectOut, checker.Equals, "") +} + +func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "saveloadnotag" + + _, err := buildImage(name, "FROM busybox\nENV foo=bar", true) + c.Assert(err, checker.IsNil, check.Commentf("%v", err)) + + id := inspectField(c, name, "Id") + + // Test to make sure that save w/o name just shows imageID during load + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", id), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + // Should not show 'name' but should show the image ID during the load + c.Assert(out, checker.Not(checker.Contains), "Loaded image: ") + c.Assert(out, checker.Contains, "Loaded image ID:") + c.Assert(out, checker.Contains, id) + + // Test to make sure that save by name shows that name during load + out, _, err = runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + c.Assert(out, checker.Contains, "Loaded image: "+name+":latest") + c.Assert(out, checker.Not(checker.Contains), "Loaded image ID:") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go new file mode 100644 index 0000000..22445e5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go @@ -0,0 +1,109 @@ +// +build !windows + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// save a repo and try to load it using stdout +func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { + name := "test-save-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + before, _ := dockerCmd(c, "commit", name, repoName) + before = strings.TrimRight(before, "\n") + + tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") + c.Assert(err, check.IsNil) + defer os.Remove(tmpFile.Name()) + + saveCmd := exec.Command(dockerBinary, "save", repoName) + saveCmd.Stdout = tmpFile + + _, err = runCommand(saveCmd) + c.Assert(err, check.IsNil) + + tmpFile, err = os.Open(tmpFile.Name()) + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = tmpFile + + out, _, err := runCommandWithOutput(loadCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + after := inspectField(c, repoName, "Id") + after = strings.TrimRight(after, "\n") + + c.Assert(after, check.Equals, before) //inspect is not the same after a save / load + + deleteImages(repoName) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), check.IsNil) + c.Assert(cmd.Wait(), check.NotNil) //did not break writing to a TTY + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "Cowardly refusing", check.Commentf("help output is not being yielded", out)) +} + +func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { + name := "test-load" + _, err := buildImage(name, ` + FROM busybox + RUN touch aa + `, true) + c.Assert(err, check.IsNil) + + tmptar := name + ".tar" + dockerCmd(c, "save", "-o", tmptar, name) + defer os.Remove(tmptar) + + dockerCmd(c, "rmi", name) + dockerCmd(c, "tag", "busybox", name) + out, _ := dockerCmd(c, "load", "-i", tmptar) + expected := fmt.Sprintf("The image %s:latest already exists, renaming the old one with ID", name) + c.Assert(out, checker.Contains, expected) +} + +// fail because load didn't receive data from stdin +func (s *DockerSuite) TestLoadNoStdinFail(c *check.C) { + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, dockerBinary, "load") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), check.NotNil) // docker-load should fail + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "requested load from stdin, but stdin is empty") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go new file mode 100644 index 0000000..5a32f2a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// search for repos named "registry" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "busybox") + c.Assert(out, checker.Contains, "Busybox base image.", check.Commentf("couldn't find any repository named (or containing) 'Busybox base image.'")) +} + +func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { + out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "--stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) +} + +func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "--help") + c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") + + outSearchCmd, _ := dockerCmd(c, "search", "busybox") + outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") + + c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) + + outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") + for i := range outSearchCmdautomatedSlice { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n") + for i := range outSearchCmdNotOfficialSlice { + c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)) + } + + outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n") + c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return + c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial)) + + outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) + + dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox") + + // --automated deprecated since Docker 1.13 + outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n") + for i := range outSearchCmdautomatedSlice1 { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + // -s --stars deprecated since Docker 1.13 + outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)) + + // -s --stars deprecated since Docker 1.13 + dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") +} + +// search for repos which start with "ubuntu-" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + dockerCmd(c, "search", "ubuntu-") +} + +// test case for #23055 +func (s *DockerSuite) TestSearchWithLimit(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + limit := 10 + out, _, err := dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice := strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 50 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 100 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 0 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) + + limit = 200 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go new file mode 100644 index 0000000..b79fdbe --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + c.Assert(len(secret.Spec.Labels), checker.Equals, 2) + c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(secret.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: name, + }, + []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + fake := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: id, + }, + []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", fake)) + + out, err := d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("secret", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("secret", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("secret", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "secretCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_secret" + out, err := d.Cmd("secret", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go new file mode 100644 index 0000000..0985a2b --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("secret", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: n, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "secret", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go new file mode 100644 index 0000000..9e8b1e9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go @@ -0,0 +1,175 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.getTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "foo") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mountConfig[0].VolumeOptions, checker.NotNil) + c.Assert(mountConfig[0].VolumeOptions.NoCopy, checker.True) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + + out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) +} + +func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.getTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mountConfig[0].TmpfsOptions, checker.NotNil) + c.Assert(mountConfig[0].TmpfsOptions.SizeBytes, checker.Equals, int64(1048576)) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mounts[0].Name, checker.Equals, "") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) + + out, err = s.nodeCmd(c, task.NodeID, "logs", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.HasPrefix, "tmpfs on /foo type tmpfs") + c.Assert(strings.TrimSpace(out), checker.Contains, "size=1024k") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go new file mode 100644 index 0000000..30580f6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go @@ -0,0 +1,191 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// start a service, and then make its task unhealthy during running +// finally, unhealthy task should be detected and killed +func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // build image with health-check + // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + RUN touch /status + HEALTHCHECK --interval=1s --timeout=1s --retries=1\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceRun" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) + containerID := task.Status.ContainerStatus.ContainerID + + // wait for container to be healthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "healthy") + + // make it fail + d.Cmd("exec", containerID, "rm", "/status") + // wait for container to be unhealthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "unhealthy") + + // Task should be terminated + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateFailed) + + if !strings.Contains(task.Status.Err, container.ErrContainerUnhealthy.Error()) { + c.Fatal("unhealthy task exits because of other error") + } +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.getTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.getTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_experimental_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_experimental_test.go new file mode 100644 index 0000000..c221654 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_experimental_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +type logMessage struct { + err error + data []byte +} + +func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { + testRequires(c, ExperimentalDaemon) + + d := s.AddDaemon(c, true, true) + + // we have multiple services here for detecting the goroutine issue #28915 + services := map[string]string{ + "TestServiceLogs1": "hello1", + "TestServiceLogs2": "hello2", + } + + for name, message := range services { + out, err := d.Cmd("service", "create", "--name", name, "busybox", + "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + } + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, + d.checkActiveContainerCount, checker.Equals, len(services)) + + for name, message := range services { + out, err := d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + c.Logf("log for %q: %q", name, out) + c.Assert(out, checker.Contains, message) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { + testRequires(c, ExperimentalDaemon) + + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsFollow" + + out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + args := []string{"service", "logs", "-f", name} + cmd := exec.Command(dockerBinary, d.prependHostArg(args)...) + r, w := io.Pipe() + cmd.Stdout = w + cmd.Stderr = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + ch := make(chan *logMessage) + done := make(chan struct{}) + go func() { + reader := bufio.NewReader(r) + for { + msg := &logMessage{} + msg.data, _, msg.err = reader.ReadLine() + select { + case ch <- msg: + case <-done: + return + } + } + }() + + for i := 0; i < 3; i++ { + msg := <-ch + c.Assert(msg.err, checker.IsNil) + c.Assert(string(msg.data), checker.Contains, "log test") + } + close(done) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go new file mode 100644 index 0000000..29cca23 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go @@ -0,0 +1,57 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceScale(c *check.C) { + d := s.AddDaemon(c, true, true) + + service1Name := "TestService1" + service1Args := append([]string{"service", "create", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // global mode + service2Name := "TestService2" + service2Args := append([]string{"service", "create", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create services + out, err := d.Cmd(service1Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd(service2Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=2") + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=foobar") + c.Assert(err, checker.NotNil) + + str := fmt.Sprintf("%s: invalid replicas value %s", service1Name, "foobar") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + out, err = d.Cmd("service", "scale", "TestService1=-1") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: invalid replicas value %s", service1Name, "-1") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + // TestService2 is a global mode + out, err = d.Cmd("service", "scale", "TestService2=2") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: scale can only be used with replicated mode\n", service2Name) + if out != str { + c.Errorf("got: %s, expected: %s", out, str) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go new file mode 100644 index 0000000..837370c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go @@ -0,0 +1,130 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "TestServiceUpdatePort" + serviceArgs := append([]string{"service", "create", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create a service with a port mapping of 8080:8081. + out, err := d.Cmd(serviceArgs...) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) + c.Assert(err, checker.IsNil) + + // Inspect the service and verify port mapping + expected := []swarm.PortConfig{ + { + Protocol: "tcp", + PublishedPort: 8082, + TargetPort: 8083, + PublishMode: "ingress", + }, + } + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) + c.Assert(err, checker.IsNil) + + var portConfig []swarm.PortConfig + if err := json.Unmarshal([]byte(out), &portConfig); err != nil { + c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) + } + c.Assert(portConfig, checker.DeepEquals, expected) +} + +func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service := d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + + // add label to empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") + + // add label to non-empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 2) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "") + + // now make sure we can add again + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") +} + +func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add secret + out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go new file mode 100644 index 0000000..fb896d5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestClientSetsTLSServerName(c *check.C) { + c.Skip("Flakey test") + // there may be more than one hit to the server for each registry request + serverNameReceived := []string{} + var serverName string + + virtualHostServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverNameReceived = append(serverNameReceived, r.TLS.ServerName) + })) + defer virtualHostServer.Close() + // discard TLS handshake errors written by default to os.Stderr + virtualHostServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + + u, err := url.Parse(virtualHostServer.URL) + c.Assert(err, check.IsNil) + hostPort := u.Host + serverName = strings.Split(hostPort, ":")[0] + + repoName := fmt.Sprintf("%v/dockercli/image:latest", hostPort) + cmd := exec.Command(dockerBinary, "pull", repoName) + cmd.Run() + + // check that the fake server was hit at least once + c.Assert(len(serverNameReceived) > 0, check.Equals, true) + // check that for each hit the right server name was received + for _, item := range serverNameReceived { + c.Check(item, check.Equals, serverName) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go new file mode 100644 index 0000000..fd9b154 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go @@ -0,0 +1,186 @@ +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestStackRemoveUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "remove", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackPSUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "ps", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackServicesUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "services", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/default.yaml", + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") + + out, err = d.Cmd("stack", "rm", testStackName) + c.Assert(err, checker.IsNil) + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n") +} + +func (s *DockerSwarmSuite) TestStackDeployWithSecretsTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("secret", "create", "outside", "fixtures/secrets/default") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/secrets.yaml", + testStackName, + } + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", "testdeploy_web") + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 3) + + sort.Sort(sortSecrets(refs)) + c.Assert(refs[0].SecretName, checker.Equals, "outside") + c.Assert(refs[1].SecretName, checker.Equals, "testdeploy_special") + c.Assert(refs[1].File.Name, checker.Equals, "special") + c.Assert(refs[2].SecretName, checker.Equals, "testdeploy_super") + c.Assert(refs[2].File.Name, checker.Equals, "foo.txt") + c.Assert(refs[2].File.Mode, checker.Equals, os.FileMode(0400)) + + // Deploy again to ensure there are no errors when secret hasn't changed + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestStackRemove(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/remove.yaml", + stackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ps", stackName) + c.Assert(err, checker.IsNil) + c.Assert(strings.Split(strings.TrimSpace(out), "\n"), checker.HasLen, 2) + + out, err = d.Cmd("stack", "rm", stackName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Removing service testdeploy_web") + c.Assert(out, checker.Contains, "Removing network testdeploy_default") + c.Assert(out, checker.Contains, "Removing secret testdeploy_special") +} + +type sortSecrets []swarm.SecretReference + +func (s sortSecrets) Len() int { return len(s) } +func (s sortSecrets) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortSecrets) Less(i, j int) bool { return s[i].SecretName < s[j].SecretName } + +// testDAB is the DAB JSON used for testing. +// TODO: Use template/text and substitute "Image" with the result of +// `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest` +const testDAB = `{ + "Version": "0.1", + "Services": { + "srv1": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["top"] + }, + "srv2": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["tail"], + "Args": ["-f", "/dev/null"] + } + } +}` + +func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { + testRequires(c, ExperimentalDaemon) + // setup + testStackName := "test" + testDABFileName := testStackName + ".dab" + defer os.RemoveAll(testDABFileName) + err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444) + c.Assert(err, checker.IsNil) + d := s.AddDaemon(c, true, true) + // deploy + stackArgs := []string{ + "stack", "deploy", + "--bundle-file", testDABFileName, + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Loading bundle from test.dab\n") + c.Assert(out, checker.Contains, "Creating service test_srv1\n") + c.Assert(out, checker.Contains, "Creating service test_srv2\n") + // ls + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n"+"test 2\n") + // rm + stackArgs = []string{"stack", "rm", testStackName} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Removing service test_srv1\n") + c.Assert(out, checker.Contains, "Removing service test_srv2\n") + // ls (empty) + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go new file mode 100644 index 0000000..b1cea35 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go @@ -0,0 +1,199 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { + // Windows does not support link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "busybox") + + // Expect this to fail because the above container is stopped, this is what we want + out, _, err := dockerCmdWithError("run", "--name", "test2", "--link", "test:test", "busybox") + // err shouldn't be nil because container test2 try to link to stopped container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + ch := make(chan error) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if out, _, err := dockerCmdWithError("start", "-a", "test2"); err == nil { + ch <- fmt.Errorf("Expected error but got none:\n%s", out) + } + close(ch) + }() + + select { + case err := <-ch: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatalf("Attach did not exit properly") + } +} + +// gh#8555: Exit code should be passed through when using start -a +func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") + out = strings.TrimSpace(out) + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", out) + + startOut, exitCode, err := dockerCmdWithError("start", "-a", out) + // start command should fail + c.Assert(err, checker.NotNil, check.Commentf("startOut: %s", startOut)) + // start -a did not respond with proper exit code + c.Assert(exitCode, checker.Equals, 1, check.Commentf("startOut: %s", startOut)) + +} + +func (s *DockerSuite) TestStartAttachSilent(c *check.C) { + name := "teststartattachcorrectexitcode" + dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", name) + + startOut, _ := dockerCmd(c, "start", "-a", name) + // start -a produced unexpected output + c.Assert(startOut, checker.Equals, "test\n") +} + +func (s *DockerSuite) TestStartRecordError(c *check.C) { + // TODO Windows CI: Requires further porting work. Should be possible. + testRequires(c, DaemonIsLinux) + // when container runs successfully, we should not have state.Error + dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr := inspectField(c, "test", "State.Error") + // Expected to not have state error + c.Assert(stateErr, checker.Equals, "") + + // Expect this to fail and records error because of ports conflict + out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") + // err shouldn't be nil because docker run will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + stateErr = inspectField(c, "test2", "State.Error") + c.Assert(stateErr, checker.Contains, "port is already allocated") + + // Expect the conflict to be resolved when we stop the initial container + dockerCmd(c, "stop", "test") + dockerCmd(c, "start", "test2") + stateErr = inspectField(c, "test2", "State.Error") + // Expected to not have state error but got one + c.Assert(stateErr, checker.Equals, "") +} + +func (s *DockerSuite) TestStartPausedContainer(c *check.C) { + // Windows does not support pausing containers + testRequires(c, IsPausable) + defer unpauseAllContainers() + + runSleepingContainer(c, "-d", "--name", "testing") + + dockerCmd(c, "pause", "testing") + + out, _, err := dockerCmdWithError("start", "testing") + // an error should have been shown that you cannot start paused container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an error should have been shown that you cannot start paused container + c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") +} + +func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { + // Windows does not support --link + testRequires(c, DaemonIsLinux) + // run a container named 'parent' and create two container link to `parent` + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + for _, container := range []string{"child_first", "child_second"} { + dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") + } + + // stop 'parent' container + dockerCmd(c, "stop", "parent") + + out := inspectField(c, "parent", "State.Running") + // Container should be stopped + c.Assert(out, checker.Equals, "false") + + // start all the three containers, container `child_first` start first which should be failed + // container 'parent' start second and then start container 'child_second' + expOut := "Cannot link to a non running container" + expErr := "failed to start containers: [child_first]" + out, _, err := dockerCmdWithError("start", "child_first", "parent", "child_second") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + if !(strings.Contains(out, expOut) || strings.Contains(err.Error(), expErr)) { + c.Fatalf("Expected out: %v with err: %v but got out: %v with err: %v", expOut, expErr, out, err) + } + + for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { + // run multiple containers to test + for _, container := range []string{"test1", "test2", "test3"} { + runSleepingContainer(c, "--name", container) + } + + // stop all the containers + for _, container := range []string{"test1", "test2", "test3"} { + dockerCmd(c, "stop", container) + } + + // test start and attach multiple containers at once, expected error + for _, option := range []string{"-a", "-i", "-ai"} { + out, _, err := dockerCmdWithError("start", option, "test1", "test2", "test3") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + c.Assert(out, checker.Contains, "You cannot start and attach multiple containers at once.") + } + + // confirm the state of all the containers be stopped + for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +// Test case for #23716 +func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-t", "--name", "before", "busybox") + go func() { + c.Assert(waitRun("before"), checker.IsNil) + dockerCmd(c, "rename", "before", "after") + dockerCmd(c, "stop", "--time=2", "after") + }() + _, stderr, _, _ := runCommandWithStdoutStderr(exec.Command(dockerBinary, "start", "-a", "before")) + c.Assert(stderr, checker.Not(checker.Contains), "No such container") +} + +func (s *DockerSuite) TestStartReturnCorrectExitCode(c *check.C) { + dockerCmd(c, "create", "--restart=on-failure:2", "--name", "withRestart", "busybox", "sh", "-c", "exit 11") + dockerCmd(c, "create", "--rm", "--name", "withRm", "busybox", "sh", "-c", "exit 12") + + _, exitCode, err := dockerCmdWithError("start", "-a", "withRestart") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 11) + _, exitCode, err = dockerCmdWithError("start", "-a", "withRm") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 12) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go new file mode 100644 index 0000000..804074d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go @@ -0,0 +1,178 @@ +package main + +import ( + "bufio" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStatsNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) + type output struct { + out []byte + err error + } + + ch := make(chan output) + go func() { + out, err := statsCmd.Output() + ch <- output{out, err} + }() + + select { + case outerr := <-ch: + c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) + c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output + case <-time.After(3 * time.Second): + statsCmd.Process.Kill() + c.Fatalf("stats did not return immediately when not streaming") + } +} + +func (s *DockerSuite) TestStatsContainerNotFound(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("stats", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats, got %q instead", out)) + + out, _, err = dockerCmdWithError("stats", "--no-stream", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats with --no-stream, got %q instead", out)) +} + +func (s *DockerSuite) TestStatsAllRunningNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id3 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id3), check.IsNil) + dockerCmd(c, "stop", id3) + + out, _ = dockerCmd(c, "stats", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + if strings.Contains(out, id3) { + c.Fatalf("Did not expect %s in stats, got %s", id3, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + // outLines[2] is id1's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) + // check stat result of id1 contains real data + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) +} + +func (s *DockerSuite) TestStatsAllNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + dockerCmd(c, "stop", id1) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + + out, _ = dockerCmd(c, "stats", "--all", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result of %s is empty: %s", id2, out)) + // check stat result of id1 contains all zero + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.IsNil, check.Commentf("stat result of %s should be empty : %s", id1, out)) +} + +func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + id := make(chan string) + addedChan := make(chan struct{}) + + runSleepingContainer(c, "-d") + statsCmd := exec.Command(dockerBinary, "stats") + stdout, err := statsCmd.StdoutPipe() + c.Assert(err, check.IsNil) + c.Assert(statsCmd.Start(), check.IsNil) + defer statsCmd.Process.Kill() + + go func() { + containerID := <-id + matchID := regexp.MustCompile(containerID) + + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + switch { + case matchID.MatchString(scanner.Text()): + close(addedChan) + return + } + } + }() + + out, _ := runSleepingContainer(c, "-d") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + id <- strings.TrimSpace(out)[:12] + + select { + case <-time.After(30 * time.Second): + c.Fatal("failed to observe new container created added to stats") + case <-addedChan: + // ignore, done + } +} + +func (s *DockerSuite) TestStatsFormatAll(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "-d", "--name=RunningOne", "busybox", "top") + c.Assert(waitRun("RunningOne"), check.IsNil) + dockerCmd(c, "run", "-d", "--name=ExitedOne", "busybox", "top") + dockerCmd(c, "stop", "ExitedOne") + c.Assert(waitExited("ExitedOne", 5*time.Second), check.IsNil) + + out, _ := dockerCmd(c, "stats", "--no-stream", "--format", "{{.Name}}") + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Not(checker.Contains), "ExitedOne") + + out, _ = dockerCmd(c, "stats", "--all", "--no-stream", "--format", "{{.Name}}") + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Contains, "ExitedOne") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go new file mode 100644 index 0000000..103d013 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStopContainerWithRestartPolicyAlways(c *check.C) { + dockerCmd(c, "run", "--name", "verifyRestart1", "-d", "--restart=always", "busybox", "false") + dockerCmd(c, "run", "--name", "verifyRestart2", "-d", "--restart=always", "busybox", "false") + + c.Assert(waitRun("verifyRestart1"), checker.IsNil) + c.Assert(waitRun("verifyRestart2"), checker.IsNil) + + dockerCmd(c, "stop", "verifyRestart1") + dockerCmd(c, "stop", "verifyRestart2") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go new file mode 100644 index 0000000..818b3ac --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go @@ -0,0 +1,1278 @@ +// +build !windows + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + getSpec := func() swarm.Spec { + sw := d.getSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + // setting anything under 30m for cert-expiry is not allowed + out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "minimum certificate expiry time") + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) +} + +func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { + d := s.AddDaemon(c, false, false) + + getSpec := func() swarm.Spec { + sw := d.getSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + c.Assert(d.Leave(true), checker.IsNil) + time.Sleep(500 * time.Millisecond) // https://github.com/docker/swarmkit/issues/1421 + out, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 5*time.Second) +} + +func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) { + testRequires(c, IPv6) + d1 := s.AddDaemon(c, false, false) + out, err := d1.Cmd("swarm", "init", "--listen-addr", "::1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + d2 := s.AddDaemon(c, false, false) + out, err = d2.Cmd("swarm", "join", "::1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "Swarm: active") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + out, err := d.Cmd("swarm", "init", "--advertise-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "advertise address must be a non-zero IP address") +} + +func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { + // init swarm mode and stop a daemon + d := s.AddDaemon(c, true, true) + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d.Stop(), checker.IsNil) + + // start a daemon with --cluster-store and --cluster-advertise + err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) + content, _ := ioutil.ReadFile(d.logFile.Name()) + c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + + // start a daemon with --live-restore + err = d.Start("--live-restore") + c.Assert(err, checker.NotNil) + content, _ = ioutil.ReadFile(d.logFile.Name()) + c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") + // restart for teardown + c.Assert(d.Start(), checker.IsNil) +} + +// Test case for #24090 +func (s *DockerSwarmSuite) TestSwarmNodeListHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + // The first line should contain "HOSTNAME" + out, err := d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(strings.Split(out, "\n")[0], checker.Contains, "HOSTNAME") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + containers := d.activeContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) +} + +// Test case for #24270 +func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name1 := "redis-cluster-md5" + name2 := "redis-cluster" + name3 := "other-cluster" + out, err := d.Cmd("service", "create", "--name", name1, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--name", name2, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--name", name3, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter1 := "name=redis-cluster-md5" + filter2 := "name=redis-cluster" + + // We search checker.Contains with `name+" "` to prevent prefix only. + out, err = d.Cmd("service", "ls", "--filter", filter1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Not(checker.Contains), name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls", "--filter", filter2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Contains, name3+" ") +} + +func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "inspect", "--format", "{{ .Description.Hostname }}", "self") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + name := strings.TrimSpace(out) + + filter := "name=" + name[:4] + + out, err = d.Cmd("node", "ls", "--filter", filter) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("node", "ls", "--filter", "name=none") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) + + filter := "name=redis-cluster" + + out, err = d.Cmd("node", "ps", "--filter", filter, "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("node", "ps", "--filter", "name=none", "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") +} + +// Test case for #25375 +func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { + d := s.AddDaemon(c, true, true) + + testCases := []struct { + name string + publishAdd []string + ports string + }{ + { + name: "simple-syntax", + publishAdd: []string{ + "80:80", + "80:80", + "80:80", + "80:20", + }, + ports: "[{ tcp 80 80 ingress}]", + }, + { + name: "complex-syntax", + publishAdd: []string{ + "target=90,published=90,protocol=tcp,mode=ingress", + "target=90,published=90,protocol=tcp,mode=ingress", + "target=90,published=90,protocol=tcp,mode=ingress", + "target=30,published=90,protocol=tcp,mode=ingress", + }, + ports: "[{ tcp 90 90 ingress}]", + }, + } + + for _, tc := range testCases { + out, err := d.Cmd("service", "create", "--name", tc.name, "--label", "x=y", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[0], tc.name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[1], tc.name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[2], "--publish-add", tc.publishAdd[3], tc.name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", tc.name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, tc.ports) + } +} + +func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + container := strings.TrimSpace(out) + + out, err = d.Cmd("exec", container, "id") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777") +} + +func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("run", "-id", "--restart=always", "--net=foo", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + d.Restart() + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // ping first container and its alias + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "testnet") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + networkID := strings.TrimSpace(out) + + out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") + c.Assert(err, checker.IsNil) + cID := strings.TrimSpace(out) + d.waitRun(cID) + + _, err = d.Cmd("rm", "-f", cID) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("network", "rm", "testnet") + c.Assert(err, checker.IsNil) + + checkNetwork := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls") + c.Assert(err, checker.IsNil) + return out, nil + } + + waitAndAssert(c, 3*time.Second, checkNetwork, checker.Not(checker.Contains), "testnet") +} + +func (s *DockerSwarmSuite) TestOverlayAttachable(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // validate attachable + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + // validate containers can attache to this overlay network + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // redo validation, there was a bug that the value of attachable changes after + // containers attach to the network + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create an attachable swarm network + nwName := "attovl" + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", nwName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Connect a container to the network + out, err = d.Cmd("run", "-d", "--network", nwName, "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Leave the swarm + err = d.Leave(true) + c.Assert(err, checker.IsNil) + + // Check the container is disconnected + out, err = d.Cmd("inspect", "c1", "--format", "{{.NetworkSettings.Networks."+nwName+"}}") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Check the network is gone + out, err = d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), nwName) +} + +func (s *DockerSwarmSuite) TestOverlayAttachableReleaseResourcesOnFailure(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create attachable network + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "--subnet", "10.10.9.0/24", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach a container with specific IP + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attempt to attach another contianer with same IP, must fail + _, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c2", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.NotNil) + + // Remove first container + out, err = d.Cmd("rm", "-f", "c1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Verify the network can be removed, no phantom network attachment task left over + out, err = d.Cmd("network", "rm", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmRemoveInternalNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "ingress" + out, err := d.Cmd("network", "rm", name) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, name) + c.Assert(strings.TrimSpace(out), checker.Contains, "is a pre-defined network and cannot be removed") +} + +// Test case for #24108, also the case from: +// https://github.com/docker/docker/pull/24620#issuecomment-233715656 +func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter := "name=redis-cluster" + + checkNumTasks := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + return len(strings.Split(out, "\n")) - 2, nil // includes header and nl in last line + } + + // wait until all tasks have been created + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 3) + + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name="+name+".1", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + name = "redis-cluster-sha1" + out, err = d.Cmd("service", "create", "--name", name, "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 1) + + filter = "name=redis-cluster" + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name="+name, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a bare container + out, err := d.Cmd("run", "-d", "--name=bare-container", "busybox", "top") + c.Assert(err, checker.IsNil) + bareID := strings.TrimSpace(out)[:12] + // Create a service + name := "busybox-top" + out, err = d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceRunningTasks(name), checker.Equals, 1) + + // Filter non-tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") + c.Assert(err, checker.IsNil) + psOut := strings.TrimSpace(out) + c.Assert(psOut, checker.Equals, bareID, check.Commentf("Expected id %s, got %s for is-task label, output %q", bareID, psOut, out)) + + // Filter tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=true") + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 1) + c.Assert(lines[0], checker.Not(checker.Equals), bareID, check.Commentf("Expected not %s, but got it for is-task label, output %q", bareID, out)) +} + +const globalNetworkPlugin = "global-network-plugin" +const globalIPAMPlugin = "global-ipam-plugin" + +func setupRemoteGlobalNetworkPlugin(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"global"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.AllocateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.FreeNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteGlobalNetworkPlugin(c, mux, s.server.URL, globalNetworkPlugin, globalIPAMPlugin) + defer func() { + s.server.Close() + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) + }() + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", globalNetworkPlugin, "foo") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "not supported in swarm mode") +} + +// Test case for #24712 +func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + path := filepath.Join(d.folder, "env.txt") + err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) + c.Assert(err, checker.IsNil) + + name := "worker" + out, err := d.Cmd("service", "create", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // The complete env is [VAR1=A VAR2=A VAR1=B VAR1=C VAR2= VAR2] and duplicates will be removed => [VAR1=C VAR2] + out, err = d.Cmd("inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.Env }}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[VAR1=C VAR2]") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + + ttyCheck := "if [ -t 0 ]; then echo TTY > /status && top; else echo none > /status && top; fi" + + // Without --tty + expectedOutput := "none" + out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + // Remove service + out, err = d.Cmd("service", "rm", name) + c.Assert(err, checker.IsNil) + // Make sure container has been destroyed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + + // With --tty + expectedOutput = "TTY" + out, err = d.Cmd("service", "create", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "false") + + _, err = d.Cmd("service", "update", "--tty", name) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, err = d.Cmd("exec", id, "cat", "/etc/resolv.conf") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.DNSConfig }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") +} + +func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + c.Assert(d.Restart(), checker.IsNil) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + cmd := d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString("wrong-secret-key") + out, err := cmd.CombinedOutput() + c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) + c.Assert(string(out), checker.Contains, "invalid key") + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err = cmd.CombinedOutput() + c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // Wait for autolock to be turned off + time.Sleep(time.Second) + + c.Assert(d.Restart(), checker.IsNil) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") +} + +func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "leave", "--force") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + outs, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { + d := s.AddDaemon(c, true, true) + + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) + + c.Assert(d.Restart(), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd := d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err := cmd.CombinedOutput() + + if err == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + c.Assert(d.Restart(), checker.IsNil) + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err = cmd.CombinedOutput() + } + c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) + c.Assert(string(out), checker.Contains, "invalid key") + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + out, err = cmd.CombinedOutput() + c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + unlockKey = newUnlockKey + } +} + +func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput := "1.2.3.4\texample.com" + out, err = d.Cmd("exec", id, "cat", "/etc/hosts") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + // Manager Addresses will always show Node 1's address + expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.port) + + out, err := d1.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d3.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +func (s *DockerSwarmSuite) TestSwarmServiceInspectPretty(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--name", name, "--limit-cpu=0.5", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + expectedOutput := ` +Resources: + Limits: + CPU: 0.5` + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") + + out, err = d.Cmd("service", "create", "--network=foo", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") +} + +// TODO: migrate to a unit test +// This test could be migrated to unit test and save costly integration test, +// once PR #29143 is merged. +func (s *DockerSwarmSuite) TestSwarmUpdateWithoutArgs(c *check.C) { + d := s.AddDaemon(c, true, true) + + expectedOutput := ` +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options:` + + out, err := d.Cmd("swarm", "update") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "trusted" + serviceCmd := d.command("-D", "service", "create", "--name", name, repoName, "top") + s.trustSuite.trustedCmd(serviceCmd) + out, _, err := runCommandWithOutput(serviceCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service create on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + name = "untrusted" + serviceCmd = d.command("service", "create", "--name", name, repoName, "top") + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "myservice" + + // Create a service without content trust + _, err := d.Cmd("service", "create", "--name", name, repoName, "top") + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Daemon won't insert the digest because this is disabled by + // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. + c.Assert(out, check.Not(checker.Contains), repoName+"@", check.Commentf(out)) + + serviceCmd := d.command("-D", "service", "update", "--image", repoName, name) + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service update on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + serviceCmd = d.command("service", "update", "--image", repoName, name) + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +// Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID. +// e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1". +func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "inspect", "-f", "{{.Id}}", "ingress") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + ingressID := strings.TrimSpace(out) + c.Assert(ingressID, checker.Not(checker.Equals), "") + + // create a network of which name is the prefix of the ID of an overlay network + // (ingressID in this case) + newNetName := ingressID[0:2] + out, err = d.Cmd("network", "create", "--driver", "overlay", newNetName) + // In #27866, it was failing because of "network with name %s already exists" + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "rm", newNetName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) +} + +// Test case for https://github.com/docker/docker/pull/27938#issuecomment-265768303 +// This test creates two networks with the same name sequentially, with various drivers. +// Since the operations in this test are done sequentially, the 2nd call should fail with +// "network with name FOO already exists". +// Note that it is to ok have multiple networks with the same name if the operations are done +// in parallel. (#18864) +func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *check.C) { + d := s.AddDaemon(c, true, true) + drivers := []string{"bridge", "overlay"} + for i, driver1 := range drivers { + nwName := fmt.Sprintf("network-test-%d", i) + for _, driver2 := range drivers { + c.Logf("Creating a network named %q with %q, then %q", + nwName, driver1, driver2) + out, err := d.Cmd("network", "create", "--driver", driver1, nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "create", "--driver", driver2, nwName) + c.Assert(out, checker.Contains, + fmt.Sprintf("network with name %s already exists", nwName)) + c.Assert(err, checker.NotNil) + c.Logf("As expected, the attempt to network %q with %q failed: %s", + nwName, driver2, out) + out, err = d.Cmd("network", "rm", nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go new file mode 100644 index 0000000..d9e56ce --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go @@ -0,0 +1,52 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure task stays pending before plugin is available + waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceTasksInState("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1) + + plugin := newVolumePlugin(c, "customvolumedriver") + defer plugin.Close() + + // create a dummy volume to trigger lazy loading of the plugin + out, err = d.Cmd("volume", "create", "-d", "customvolumedriver", "hello") + + // TODO(aaronl): It will take about 15 seconds for swarm to realize the + // plugin was loaded. Switching the test over to plugin v2 would avoid + // this long delay. + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "-f", "{{json .Mounts}}", containerID) + c.Assert(err, checker.IsNil) + + var mounts []struct { + Name string + Driver string + } + + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "my-volume") + c.Assert(mounts[0].Driver, checker.Equals, "customvolumedriver") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go new file mode 100644 index 0000000..b7d2b1d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,225 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +// tagging a named image in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { + imageID := inspectField(c, "busybox", "Id") + dockerCmd(c, "tag", imageID, "testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} + + for _, repo := range invalidRepos { + out, _, err := dockerCmdWithError("tag", "busybox", repo) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) + } +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { + longTag := stringutils.GenerateRandomAlphaOnlyString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} + + for _, repotag := range invalidTags { + out, _, err := dockerCmdWithError("tag", "busybox", repotag) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) + } +} + +// ensure we allow the use of valid tags +func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} + + for _, repo := range validRepos { + _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) + if err != nil { + c.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + } +} + +// tag an image with an existed tag name without -f option should work +func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + dockerCmd(c, "tag", "busybox:latest", "busybox:test") +} + +func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + // test repository name begin with '-' + out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test namespace name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test index name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) +} + +// ensure tagging using official names works +// ensure all tags result in the same name +func (s *DockerSuite) TestTagOfficialNames(c *check.C) { + names := []string{ + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + + for _, name := range names { + out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") + if err != nil || exitCode != 0 { + c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) + continue + } + + // ensure we don't have multiple tag names. + out, _, err = dockerCmdWithError("images") + if err != nil { + c.Errorf("listing images failed with errors: %v, %s", err, out) + } else if strings.Contains(out, name) { + c.Errorf("images should not have listed '%s'", name) + deleteImages(name + ":latest") + } + } + + for _, name := range names { + _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") + if err != nil || exitCode != 0 { + c.Errorf("tag %v fooo/bar should have worked: %s", name, err) + continue + } + deleteImages("fooo/bar:latest") + } +} + +// ensure tags can not match digests +func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) + if err == nil { + c.Fatal("digest tag a name should have failed") + } + // check that no new image matches the digest + _, _, err = dockerCmdWithError("inspect", digest) + if err == nil { + c.Fatal("inspecting by digest should have failed") + } +} + +func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") + if err == nil { + c.Fatal("tagging with image named \"sha256\" should have failed") + } +} + +// ensure tags cannot create ambiguity with image ids +func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { + //testRequires(c, DaemonIsLinux) + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + imageID, err := buildImage("notbusybox:latest", + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) + + id := inspectField(c, truncatedTag, "Id") + + // Ensure inspect by image id returns image for image id + c.Assert(id, checker.Equals, imageID) + c.Logf("Built image: %s", imageID) + + // test setting tag fails + _, _, err = dockerCmdWithError("tag", "busybox:latest", truncatedTag) + if err != nil { + c.Fatalf("Error tagging with an image id: %s", err) + } + + id = inspectField(c, truncatedTag, "Id") + + // Ensure id is imageID and not busybox:latest + c.Assert(id, checker.Not(checker.Equals), imageID) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go new file mode 100644 index 0000000..caae290 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + var expected icmd.Expected + switch daemonPlatform { + case "windows": + expected = icmd.Expected{ExitCode: 1, Err: "Windows does not support arguments to top"} + default: + expected = icmd.Expected{Out: "PID"} + } + result := dockerCmdWithResult("top", cleanedContainerID, "-o", "pid") + c.Assert(result, icmd.Matches, expected) +} + +func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + // Windows will list the name of the launched executable which in this case is busybox.exe, without the parameters. + // Linux will display the command executed in the container + var lookingFor string + if daemonPlatform == "windows" { + lookingFor = "busybox.exe" + } else { + lookingFor = "top" + } + + c.Assert(out1, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the first time", lookingFor)) + c.Assert(out2, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the second time", lookingFor)) +} + +// TestTopWindowsCoreProcesses validates that there are lines for the critical +// processes which are found in a Windows container. Note Windows is architecturally +// very different to Linux in this regard. +func (s *DockerSuite) TestTopWindowsCoreProcesses(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + out1, _ := dockerCmd(c, "top", cleanedContainerID) + lookingFor := []string{"smss.exe", "csrss.exe", "wininit.exe", "services.exe", "lsass.exe", "CExecSvc.exe"} + for i, s := range lookingFor { + c.Assert(out1, checker.Contains, s, check.Commentf("top should've listed `%s` in the process list, but failed. Test case %d", s, i)) + } +} + +func (s *DockerSuite) TestTopPrivileged(c *check.C) { + // Windows does not support --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) + c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go new file mode 100644 index 0000000..0b31bb4 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go @@ -0,0 +1,41 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false") + timeout := 60 * time.Second + if daemonPlatform == "windows" { + timeout = 180 * time.Second + } + + id := strings.TrimSpace(string(out)) + + // update restart policy to on-failure:5 + dockerCmd(c, "update", "--restart=on-failure:5", id) + + err := waitExited(id, timeout) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "5") + + maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(maximumRetryCount, checker.Equals, "5") +} + +func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { + out, _ := runSleepingContainer(c, "--rm") + id := strings.TrimSpace(out) + + // update restart policy for an AutoRemove container + out, _, err := dockerCmdWithError("update", "--restart=always", id) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Restart policy cannot be updated because AutoRemove is enabled for the container") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go new file mode 100644 index 0000000..580ff02 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "github.com/kr/pty" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateRunningContainerWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + dockerCmd(c, "restart", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + dockerCmd(c, "run", "--name", name, "-m", "300M", "busybox", "cat", file) + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + out, _ := dockerCmd(c, "start", "-a", name) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdatePausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top") + dockerCmd(c, "pause", name) + dockerCmd(c, "update", "--cpu-shares", "500", name) + + c.Assert(inspectField(c, name, "HostConfig.CPUShares"), checker.Equals, "500") + + dockerCmd(c, "unpause", name) + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "500") +} + +func (s *DockerSuite) TestUpdateWithUntouchedFields(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + // Update memory and not touch cpus, `cpuset.cpus` should still have the old value + out := inspectField(c, name, "HostConfig.CPUShares") + c.Assert(out, check.Equals, "800") + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "800") +} + +func (s *DockerSuite) TestUpdateContainerInvalidValue(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + out, _, err := dockerCmdWithError("update", "-m", "2M", name) + c.Assert(err, check.NotNil) + expected := "Minimum memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestUpdateContainerWithoutFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + _, _, err := dockerCmdWithError("update", name) + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--kernel-memory", "50M", "busybox", "top") + dockerCmd(c, "update", "--kernel-memory", "100M", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "104857600") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "104857600") +} + +func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + isNewKernel := kernel.CheckKernelVersion(4, 6, 0) + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) + // Update kernel memory to a running container without kernel memory initialized + // is not allowed before kernel version 4.6. + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "pause", name) + _, _, err = dockerCmdWithError("update", "--kernel-memory", "200M", name) + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + dockerCmd(c, "unpause", name) + + dockerCmd(c, "stop", name) + dockerCmd(c, "update", "--kernel-memory", "300M", name) + dockerCmd(c, "start", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "314572800") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") +} + +func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateInvalidSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + _, _, err := dockerCmdWithError("update", "--memory-swap", "200M", name) + // Update invalid swap memory should fail. + // This will pass docker config validation, but failed at kernel validation + c.Assert(err, check.NotNil) + + // Update invalid swap memory with failure should not change HostConfig + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateStats(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuCfsQuota) + name := "foo" + dockerCmd(c, "run", "-d", "-ti", "--name", name, "-m", "500m", "busybox") + + c.Assert(waitRun(name), checker.IsNil) + + getMemLimit := func(id string) uint64 { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + return v.MemoryStats.Limit + } + preMemLimit := getMemLimit(name) + + dockerCmd(c, "update", "--cpu-quota", "2000", name) + + curMemLimit := getMemLimit(name) + + c.Assert(preMemLimit, checker.Equals, curMemLimit) + +} + +func (s *DockerSuite) TestUpdateMemoryWithSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "busybox", "top") + out, _, err := dockerCmdWithError("update", "--memory", "800M", name) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Memory limit should be smaller than already set memoryswap limit") + + dockerCmd(c, "update", "--memory", "800M", "--memory-swap", "1000M", name) +} + +func (s *DockerSuite) TestUpdateNotAffectMonitorRestartPolicy(c *check.C) { + testRequires(c, DaemonIsLinux, cpuShare) + + out, _ := dockerCmd(c, "run", "-tid", "--restart=always", "busybox", "sh") + id := strings.TrimSpace(string(out)) + dockerCmd(c, "update", "--cpu-shares", "512", id) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + _, err = cpty.Write([]byte("exit\n")) + c.Assert(err, checker.IsNil) + + c.Assert(cmd.Wait(), checker.IsNil) + + // container should restart again and keep running + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(id), checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go new file mode 100644 index 0000000..acf7423 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go @@ -0,0 +1,98 @@ +// +build !windows + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +// user namespaces test: run daemon with remapped root setting +// 1. validate uid/gid maps are set properly +// 2. verify that files created are owned by remapped root +func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel) + + c.Assert(s.d.StartWithBusybox("--userns-remap", "default"), checker.IsNil) + + tmpDir, err := ioutil.TempDir("", "userns") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Set a non-existent path + tmpDirNotExists := path.Join(os.TempDir(), "userns"+stringid.GenerateRandomID()) + defer os.RemoveAll(tmpDirNotExists) + + // we need to find the uid and gid of the remapped root from the daemon's root dir info + uidgid := strings.Split(filepath.Base(s.d.root), ".") + c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) + uid, err := strconv.Atoi(uidgid[0]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) + gid, err := strconv.Atoi(uidgid[1]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse gid")) + + // writable by the remapped root UID/GID pair + c.Assert(os.Chown(tmpDir, uid, gid), checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "userns", "-v", tmpDir+":/goofy", "-v", tmpDirNotExists+":/donald", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user := s.findUser(c, "userns") + c.Assert(uidgid[0], checker.Equals, user) + + // check that the created directory is owned by remapped uid:gid + statNotExists, err := system.Stat(tmpDirNotExists) + c.Assert(err, checker.IsNil) + c.Assert(statNotExists.UID(), checker.Equals, uint32(uid), check.Commentf("Created directory not owned by remapped root UID")) + c.Assert(statNotExists.GID(), checker.Equals, uint32(gid), check.Commentf("Created directory not owned by remapped root GID")) + + pid, err := s.d.Cmd("inspect", "--format={{.State.Pid}}", "userns") + c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) + // check the uid and gid maps for the PID to ensure root is remapped + // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') + out, rc1, err := runCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) + c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) + + out, rc2, err := runCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) + c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) + + // check that the touched file is owned by remapped uid:gid + stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Touched file not owned by remapped root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Touched file not owned by remapped root GID")) + + // use host usernamespace + out, err = s.d.Cmd("run", "-d", "--name", "userns_skip", "--userns", "host", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user = s.findUser(c, "userns_skip") + // userns are skipped, user is root + c.Assert(user, checker.Equals, "root") +} + +// findUser finds the uid or name of the user of the first process that runs in a container +func (s *DockerDaemonSuite) findUser(c *check.C, container string) string { + out, err := s.d.Cmd("top", container) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + rows := strings.Split(out, "\n") + if len(rows) < 2 { + // No process rows founds + c.FailNow() + } + return strings.Fields(rows[1])[0] +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go new file mode 100644 index 0000000..889936a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go @@ -0,0 +1,125 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/go-check/check" +) + +func makefile(contents string) (string, func(), error) { + cleanup := func() { + + } + + f, err := ioutil.TempFile(".", "tmp") + if err != nil { + return "", cleanup, err + } + err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) + if err != nil { + return "", cleanup, err + } + + cleanup = func() { + err := os.Remove(f.Name()) + if err != nil { + fmt.Println("Error removing tmpfile") + } + } + return f.Name(), cleanup, nil + +} + +// TestV2Only ensures that a daemon in v2-only mode does not +// attempt to contact any v1 registry endpoints. +func (s *DockerRegistrySuite) TestV2Only(c *check.C) { + reg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + + reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { + c.Fatal("V1 registry contacted") + }) + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + + s.d.Cmd("run", repoName) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + s.d.Cmd("pull", repoName) +} + +// TestV1 starts a daemon in 'normal' mode +// and ensure v1 endpoints are hit for the following operations: +// login, push, pull, build & run +func (s *DockerRegistrySuite) TestV1(c *check.C) { + reg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + + v2Pings := 0 + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + v2Pings++ + // V2 ping 404 causes fallback to v1 + w.WriteHeader(404) + }) + + v1Pings := 0 + reg.registerHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { + v1Pings++ + }) + + v1Logins := 0 + reg.registerHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { + v1Logins++ + }) + + v1Repo := 0 + reg.registerHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + reg.registerHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + s.d.Cmd("run", repoName) + c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run")) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.hostport) + c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt")) + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + + c.Assert(v1Repo, check.Equals, 2) + + s.d.Cmd("pull", repoName) + c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go new file mode 100644 index 0000000..7672beb --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go @@ -0,0 +1,58 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure docker version works +func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "version") + stringsToCheck := map[string]int{ + "Client:": 1, + "Server:": 1, + " Version:": 2, + " API version:": 2, + " Go version:": 2, + " Git commit:": 2, + " OS/Arch:": 2, + " Built:": 2, + } + + for k, v := range stringsToCheck { + c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) + } +} + +// ensure the Windows daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { + testRequires(c, DaemonIsWindows) + testVersionPlatform(c, "windows/amd64") +} + +// ensure the Linux daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { + testRequires(c, DaemonIsLinux) + testVersionPlatform(c, "linux") +} + +func testVersionPlatform(c *check.C, platform string) { + out, _ := dockerCmd(c, "version") + expected := "OS/Arch: " + platform + + split := strings.Split(out, "\n") + c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) + + // Verify the second 'OS/Arch' matches the platform. Experimental has + // more lines of output than 'regular' + bFound := false + for i := 14; i < len(split); i++ { + if strings.Contains(split[i], expected) { + bFound = true + break + } + } + c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go new file mode 100644 index 0000000..4c0fc78 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go @@ -0,0 +1,627 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { + dockerCmd(c, "volume", "create") + + _, err := runCommand(exec.Command(dockerBinary, "volume", "create", "-d", "nosuchdriver")) + c.Assert(err, check.Not(check.IsNil)) + + // test using hidden --name option + out, _ := dockerCmd(c, "volume", "create", "--name=test") + name := strings.TrimSpace(out) + c.Assert(name, check.Equals, "test") + + out, _ = dockerCmd(c, "volume", "create", "test2") + name = strings.TrimSpace(out) + c.Assert(name, check.Equals, "test2") +} + +func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { + c.Assert( + exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume inspect should error on non-existent volume"), + ) + + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", name) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + dockerCmd(c, "volume", "create", "test") + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", "test") + c.Assert(strings.TrimSpace(out), check.Equals, "test") +} + +func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { + dockerCmd(c, "volume", "create", "test1") + dockerCmd(c, "volume", "create", "test2") + dockerCmd(c, "volume", "create", "not-shown") + + result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesntexist", "not-shown") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such volume: doesntexist", + }) + + out := result.Stdout() + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 2, check.Commentf("\n%s", out)) + + c.Assert(out, checker.Contains, "test1") + c.Assert(out, checker.Contains, "test2") + c.Assert(out, checker.Not(checker.Contains), "not-shown") +} + +func (s *DockerSuite) TestVolumeCLILs(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "aaa") + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "volume", "create", "soo") + dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") + + out, _ := dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + assertVolList(c, out, []string{"aaa", "soo", "test"}) +} + +func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa", "soo", "test"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + config := `{ + "volumesFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "volume", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa default", "soo default", "test default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// assertVolList checks volume retrieved with ls command +// equals to expected volume list +// note: out should be `volume ls [option]` result +func assertVolList(c *check.C, out string, expectVols []string) { + lines := strings.Split(out, "\n") + var volList []string + for _, line := range lines[1 : len(lines)-1] { + volFields := strings.Fields(line) + // wrap all volume name in volList + volList = append(volList, volFields[1]) + } + + // volume ls should contains all expected volumes + c.Assert(volList, checker.DeepEquals, expectVols) +} + +func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "testnotinuse1") + dockerCmd(c, "volume", "create", "testisinuse1") + dockerCmd(c, "volume", "create", "testisinuse2") + + // Make sure both "created" (but not started), and started + // containers are included in reference counting + dockerCmd(c, "run", "--name", "volume-test1", "-v", "testisinuse1:"+prefix+"/foo", "busybox", "true") + dockerCmd(c, "create", "--name", "volume-test2", "-v", "testisinuse2:"+prefix+"/foo", "busybox", "true") + + out, _ := dockerCmd(c, "volume", "ls") + + // No filter, all volumes should show + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") + + // Explicitly disabling dangling + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") + + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") + // dangling=0 is same as dangling=false case + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("execpeted volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invalidDriver") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loc") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + +} + +func (s *DockerSuite) TestVolumeCLILsErrorWithInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLILsWithIncorrectFilterValue(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "rm", id) + dockerCmd(c, "volume", "rm", "test") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + volumeID := "testing" + dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "volume", "rm", "testing")) + c.Assert( + err, + check.Not(check.IsNil), + check.Commentf("Should not be able to remove volume that is in use by a container\n%s", out)) + + out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + dockerCmd(c, "rm", "-fv", "test2") + dockerCmd(c, "volume", "inspect", volumeID) + dockerCmd(c, "rm", "-f", "test") + + out, _ = dockerCmd(c, "run", "--name=test2", "-v", volumeID+":"+prefix+"/foo", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello", check.Commentf("volume data was removed")) + dockerCmd(c, "rm", "test2") + + dockerCmd(c, "volume", "rm", volumeID) + c.Assert( + exec.Command("volume", "rm", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume rm should fail with non-existent volume"), + ) +} + +func (s *DockerSuite) TestVolumeCLINoArgs(c *check.C) { + out, _ := dockerCmd(c, "volume") + // no args should produce the cmd usage output + usage := "Usage: docker volume COMMAND" + c.Assert(out, checker.Contains, usage) + + // invalid arg should error and show the command usage on stderr + _, stderr, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "somearg")) + c.Assert(err, check.NotNil, check.Commentf(stderr)) + c.Assert(stderr, checker.Contains, usage) + + // invalid flag should error and show the flag error and cmd usage + _, stderr, _, err = runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "--no-such-flag")) + c.Assert(err, check.NotNil, check.Commentf(stderr)) + c.Assert(stderr, checker.Contains, usage) + c.Assert(stderr, checker.Contains, "unknown flag: --no-such-flag") +} + +func (s *DockerSuite) TestVolumeCLIInspectTmplError(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + + out, exitCode, err := dockerCmdWithError("volume", "inspect", "--format='{{ .FooBar }}'", name) + c.Assert(err, checker.NotNil, check.Commentf("Output: %s", out)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestVolumeCLICreateWithOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "volume", "create", "-d", "local", "test", "--opt=type=tmpfs", "--opt=device=tmpfs", "--opt=o=size=1m,uid=1000") + out, _ := dockerCmd(c, "run", "-v", "test:/foo", "busybox", "mount") + + mounts := strings.Split(out, "\n") + var found bool + for _, m := range mounts { + if strings.Contains(m, "/foo") { + found = true + info := strings.Fields(m) + // tmpfs on type tmpfs (rw,relatime,size=1024k,uid=1000) + c.Assert(info[0], checker.Equals, "tmpfs") + c.Assert(info[2], checker.Equals, "/foo") + c.Assert(info[4], checker.Equals, "tmpfs") + c.Assert(info[5], checker.Contains, "uid=1000") + c.Assert(info[5], checker.Contains, "size=1024k") + } + } + c.Assert(found, checker.Equals, true) +} + +func (s *DockerSuite) TestVolumeCLICreateLabel(c *check.C) { + testVol := "testvolcreatelabel" + testLabel := "foo" + testValue := "bar" + + out, _, err := dockerCmdWithError("volume", "create", "--label", testLabel+"="+testValue, testVol) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+testLabel+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) +} + +func (s *DockerSuite) TestVolumeCLICreateLabelMultiple(c *check.C) { + testVol := "testvolcreatelabel" + + testLabels := map[string]string{ + "foo": "bar", + "baz": "foo", + } + + args := []string{ + "volume", + "create", + testVol, + } + + for k, v := range testLabels { + args = append(args, "--label", k+"="+v) + } + + out, _, err := dockerCmdWithError(args...) + c.Assert(err, check.IsNil) + + for k, v := range testLabels { + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+k+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, v) + } +} + +func (s *DockerSuite) TestVolumeCLILsFilterLabels(c *check.C) { + testVol1 := "testvolcreatelabel-1" + out, _, err := dockerCmdWithError("volume", "create", "--label", "foo=bar1", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvolcreatelabel-2" + out, _, err = dockerCmdWithError("volume", "create", "--label", "foo=bar2", testVol2) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo") + + // filter with label=key + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, checker.Contains, "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=bar1") + + // filter with label=key=value + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, check.Not(checker.Contains), "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2 in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=non-exist") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=non-exist") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForceUsage(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "rm", "-f", id) + dockerCmd(c, "volume", "rm", "--force", "nonexist") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + out, _ = dockerCmd(c, "volume", "inspect", "--format", "{{.Mountpoint}}", name) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + // Mountpoint is in the form of "/var/lib/docker/volumes/.../_data", removing `/_data` + path := strings.TrimSuffix(strings.TrimSpace(out), "/_data") + out, _, err := runCommandWithOutput(exec.Command("rm", "-rf", path)) + c.Assert(err, check.IsNil) + + dockerCmd(c, "volume", "rm", "-f", name) + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Not(checker.Contains), name) + dockerCmd(c, "volume", "create", name) + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) +} + +// TestVolumeCLIRmForceInUse verifies that repeated `docker volume rm -f` calls does not remove a volume +// if it is in use. Test case for https://github.com/docker/docker/issues/31446 +func (s *DockerSuite) TestVolumeCLIRmForceInUse(c *check.C) { + name := "testvolume" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out, e := dockerCmd(c, "create", "-v", "testvolume:"+prefix+slash+"foo", "busybox") + cid := strings.TrimSpace(out) + + _, _, err := dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) + + // The original issue did not _remove_ the volume from the list + // the first time. But a second call to `volume rm` removed it. + // Calling `volume rm` a second time to confirm it's not removed + // when calling twice. + _, _, err = dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) + + // Verify removing the volume after the container is removed works + _, e = dockerCmd(c, "rm", cid) + c.Assert(e, check.Equals, 0) + + _, e = dockerCmd(c, "volume", "rm", "-f", name) + c.Assert(e, check.Equals, 0) + + out, e = dockerCmd(c, "volume", "ls") + c.Assert(e, check.Equals, 0) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSuite) TestVolumeCliInspectWithVolumeOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Without options + name := "test1" + dockerCmd(c, "volume", "create", "-d", "local", name) + out, _ := dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, "map[]") + + // With options + name = "test2" + k1, v1 := "type", "tmpfs" + k2, v2 := "device", "tmpfs" + k3, v3 := "o", "size=1m,uid=1000" + dockerCmd(c, "volume", "create", "-d", "local", name, "--opt", fmt.Sprintf("%s=%s", k1, v1), "--opt", fmt.Sprintf("%s=%s", k2, v2), "--opt", fmt.Sprintf("%s=%s", k3, v3)) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k1, v1)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k2, v2)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k3, v3)) +} + +// Test case (1) for 21845: duplicate targets for --volumes-from +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFrom(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + _, err := buildImage( + image, + ` + FROM busybox + VOLUME ["/tmp/data"] + `, + true) + c.Assert(err, check.IsNil) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + out, _, err = dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // Only the second volume will be referenced, this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Equals, data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (2) for 21845: duplicate targets for --volumes-from and -v (bind) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndBind(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + _, err := buildImage(image, + ` + FROM busybox + VOLUME ["/tmp/data"] + `, + true) + c.Assert(err, check.IsNil) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + out, _, err = dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-v", "/tmp/data:/tmp/data", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (3) for 21845: duplicate targets for --volumes-from and `Mounts` (API only) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndMounts(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + _, err := buildImage(image, + ` + FROM busybox + VOLUME ["/tmp/data"] + `, + true) + c.Assert(err, check.IsNil) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + // Mounts is available in API + status, body, err := sockRequest("POST", "/containers/create?name=app", map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "HostConfig": map[string]interface{}{ + "VolumesFrom": []string{ + "data1", + "data2", + }, + "Mounts": []map[string]interface{}{ + { + "Type": "bind", + "Source": "/tmp/data", + "Target": "/tmp/data", + }, + }}, + }) + + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go new file mode 100644 index 0000000..961aef5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go @@ -0,0 +1,97 @@ +package main + +import ( + "bytes" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// non-blocking wait with 0 exit code +func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "0", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with 0 exit code +func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { + // Windows busybox does not support trap in this way, not sleep with sub-second + // granularity. It will always exit 0x40010004. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan string) + go func() { + chWait <- "" + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) + chWait <- out + }() + + <-chWait // make sure the goroutine is started + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) + + select { + case status := <-chWait: + c.Assert(strings.TrimSpace(status), checker.Equals, "0", check.Commentf("expected exit 0, got %s", status)) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") + } + +} + +// non-blocking wait with random exit code +func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "99", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with random exit code +func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { + // Cannot run on Windows as trap in Windows busybox does not support trap in this way. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan error) + waitCmd := exec.Command(dockerBinary, "wait", containerID) + waitCmdOut := bytes.NewBuffer(nil) + waitCmd.Stdout = waitCmdOut + c.Assert(waitCmd.Start(), checker.IsNil) + go func() { + chWait <- waitCmd.Wait() + }() + + dockerCmd(c, "stop", containerID) + + select { + case err := <-chWait: + c.Assert(err, checker.IsNil, check.Commentf(waitCmdOut.String())) + status, err := waitCmdOut.ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(status), checker.Equals, "99", check.Commentf("expected exit 99, got %s", status)) + case <-time.After(2 * time.Second): + waitCmd.Process.Kill() + c.Fatal("timeout waiting for `docker wait` to exit") + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go new file mode 100644 index 0000000..7bc287e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go @@ -0,0 +1,227 @@ +// This file will be removed when we completely drop support for +// passing HostConfig to container start API. + +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func formatV123StartAPIURL(url string) string { + return "/v1.23" + url +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartHostConfig(c *check.C) { + name := "test-deprecated-api-124" + dockerCmd(c, "create", "--name", name, "busybox") + config := map[string]interface{}{ + "Binds": []string{"/aa:/bb"}, + } + status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(body), checker.Contains, "was deprecated since v1.10") +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumeBinds(c *check.C) { + // TODO Windows CI: Investigate further why this fails on Windows to Windows CI. + testRequires(c, DaemonIsLinux) + path := "/foo" + if daemonPlatform == "windows" { + path = `c:\foo` + } + name := "testing" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{path: {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath := randomTmpDirPath("test", daemonPlatform) + config = map[string]interface{}{ + "Binds": []string{bindPath + ":" + path}, + } + status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, path) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, bindPath, check.Commentf("expected volume host path to be %s, got %s", bindPath, pth)) +} + +// Test for GH#10618 +func (s *DockerSuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + name := "testdups" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath1 := randomTmpDirPath("test1", daemonPlatform) + bindPath2 := randomTmpDirPath("test2", daemonPlatform) + + config = map[string]interface{}{ + "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, + } + status, body, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumesFrom(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + volName := "voltst" + volPath := "/tmp" + + dockerCmd(c, "run", "--name", volName, "-v", volPath, "busybox") + + name := "TestContainerAPIStartVolumesFrom" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{volPath: {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config = map[string]interface{}{ + "VolumesFrom": []string{volName}, + } + status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, volPath) + c.Assert(err, checker.IsNil) + pth2, err := inspectMountSourceField(volName, volPath) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, pth2, check.Commentf("expected volume host path to be %s, got %s", pth, pth2)) +} + +// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume +func (s *DockerSuite) TestDeprecatedPostContainerBindNormalVolume(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") + + fooDir, err := inspectMountSourceField("one", "/foo") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") + + bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/two/start"), bindSpec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + fooDir2, err := inspectMountSourceField("two", "/foo") + c.Assert(err, checker.IsNil) + c.Assert(fooDir2, checker.Equals, fooDir, check.Commentf("expected volume path to be %s, got: %s", fooDir, fooDir2)) +} + +func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + + containerID := strings.TrimSpace(out) + + config := `{ + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + b, err2 := readBody(body) + c.Assert(err2, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithoutLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, append([]string{"create", "--name", name, "busybox"}, sleepCommandForDaemonPlatform()...)...) + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") + dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") + id := strings.TrimSpace(out) + dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +func (s *DockerSuite) TestDeprecatedStartWithNilDNS(c *check.C) { + // TODO Windows: Add once DNS is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + containerID := strings.TrimSpace(out) + + config := `{"HostConfig": {"Dns": null}}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() + + dns := inspectFieldJSON(c, containerID, "HostConfig.Dns") + c.Assert(dns, checker.Equals, "[]") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go new file mode 100644 index 0000000..94ef9b1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// #19100 This is a deprecated feature test, it should be removed in Docker 1.12 +func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c *check.C) { + netName := "test" + conName := "foo" + dockerCmd(c, "network", "create", netName) + dockerCmd(c, "create", "--name", conName, "busybox", "top") + + config := map[string]interface{}{ + "HostConfig": map[string]interface{}{ + "NetworkMode": netName, + }, + } + _, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+conName+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(conName), checker.IsNil) + networks := inspectField(c, conName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go b/vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go new file mode 100644 index 0000000..85dec31 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go @@ -0,0 +1,594 @@ +// +build !windows + +package main + +import ( + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" +) + +var ( + MacvlanKernelSupport = testRequirement{ + func() bool { + const macvlanKernelVer = 3 // minimum macvlan kernel support + const macvlanMajorVer = 9 // minimum macvlan major kernel support + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + // ensure Kernel version is >= v3.9 for macvlan support + if kv.Kernel < macvlanKernelVer || (kv.Kernel == macvlanKernelVer && kv.Major < macvlanMajorVer) { + return false + } + return true + }, + "kernel version failed to meet the minimum macvlan kernel requirement of 3.9", + } + IpvlanKernelSupport = testRequirement{ + func() bool { + const ipvlanKernelVer = 4 // minimum ipvlan kernel support + const ipvlanMajorVer = 2 // minimum ipvlan major kernel support + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + // ensure Kernel version is >= v4.2 for ipvlan support + if kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) { + return false + } + return true + }, + "kernel version failed to meet the minimum ipvlan kernel requirement of 4.0.0", + } +) + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist") + assertNwIsAvailable(c, "dm-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + // verify network is recreated from persistence + assertNwIsAvailable(c, "dm-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.70) + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'di' notation represent 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist") + assertNwIsAvailable(c, "di-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + // verify network is recreated from persistence + assertNwIsAvailable(c, "di-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.50) + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.50", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.50) + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.60", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, master, "dm-dummy0.40", "40") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err = dockerCmdWithError("network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, master, "di-dummy0.30", "30") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err = dockerCmdWithError("network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { + // create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254", + "--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackbridge") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + c.Skip("Temporarily skipping while invesitigating sporadic v6 CI issues") + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.100.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc2::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.102.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc4::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254", + "--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl2") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.200.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc8::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.202.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc6::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, IPv6, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254", + "--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl3") + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // Verify connectivity across disparate subnets which is unique to L3 mode only + _, _, err = dockerCmdWithError("exec", "third", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "third", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "") + // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled + ip6gw := inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) { + // Ensure the default gateways, next-hops and default dev devices are properly set + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24", + "--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge") + assertNwIsAvailable(c, "dualstackbridge") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top") + // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err := dockerCmdWithError("exec", "first", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.130.1 dev eth0") + // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "first", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abca::254 dev eth0") + + // Verify ipvlan l2 mode sets the proper default gateway routes via netlink + // for either an explicitly set route by the user or inferred via default IPAM + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254", + "--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2") + assertNwIsAvailable(c, "dualstackl2") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top") + // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err = dockerCmdWithError("exec", "second", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.140.254 dev eth0") + // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "second", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0") + + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254", + "--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3") + assertNwIsAvailable(c, "dualstackl3") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top") + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") + // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { + // macvlan bridge mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "dm-nil-parent") + assertNwIsAvailable(c, "dm-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { + // macvlan bridge mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--internal", "dm-internal") + assertNwIsAvailable(c, "dm-internal") + nr := getNetworkResource(c, "dm-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { + // ipvlan l2 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "di-nil-parent") + assertNwIsAvailable(c, "di-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { + // ipvlan l2 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--internal", "di-internal") + assertNwIsAvailable(c, "di-internal") + nr := getNetworkResource(c, "di-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { + // ipvlan l3 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "di-nil-parent-l3") + assertNwIsAvailable(c, "di-nil-parent-l3") + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { + // ipvlan l3 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "--internal", "di-internal-l3") + assertNwIsAvailable(c, "di-internal-l3") + nr := getNetworkResource(c, "di-internal-l3") + c.Assert(nr.Internal, checker.True) + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanExistingParent(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-parent-exists" + out, err := createMasterDummy(c, "dm-dummy0") + //out, err := createVlanInterface(c, "dm-parent", "dm-slave", "macvlan", "bridge") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0", netName) + assertNwIsAvailable(c, netName) + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined link + out, err = linkExists(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) + deleteInterface(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-subinterface" + out, err := createMasterDummy(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, "dm-dummy0", "dm-dummy0.20", "20") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.20", netName) + assertNwIsAvailable(c, netName) + + // start containers on 802.1q tagged '-o parent' sub-interface + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // verify containers can communicate + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + + // remove the containers + dockerCmd(c, "rm", "-f", "first") + dockerCmd(c, "rm", "-f", "second") + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined sub-interface + out, err = linkExists(c, "dm-dummy0.20") + c.Assert(err, check.IsNil, check.Commentf(out)) + // delete the parent interface which also collects the slave + deleteInterface(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func createMasterDummy(c *check.C, master string) (string, error) { + // ip link add type dummy + args := []string{"link", "add", master, "type", "dummy"} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + // ip link set dummy_name up + args = []string{"link", "set", master, "up"} + ipLinkCmd = exec.Command("ip", args...) + out, _, err = runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} + +func createVlanInterface(c *check.C, master, slave, id string) (string, error) { + // ip link add link name . type vlan id + args := []string{"link", "add", "link", master, "name", slave, "type", "vlan", "id", id} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + // ip link set up + args = []string{"link", "set", slave, "up"} + ipLinkCmd = exec.Command("ip", args...) + out, _, err = runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} + +func linkExists(c *check.C, master string) (string, error) { + // verify the specified link exists, ip link show + args := []string{"link", "show", master} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go b/vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go new file mode 100644 index 0000000..df52cae --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go @@ -0,0 +1,90 @@ +package main + +import ( + "os/exec" + "runtime" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func init() { + // FIXME. Temporarily turning this off for Windows as GH16039 was breaking + // Windows to Linux CI @icecrime + if runtime.GOOS != "windows" { + check.Suite(newDockerHubPullSuite()) + } +} + +// DockerHubPullSuite provides an isolated daemon that doesn't have all the +// images that are baked into our 'global' test environment daemon (e.g., +// busybox, httpserver, ...). +// +// We use it for push/pull tests where we want to start fresh, and measure the +// relative impact of each individual operation. As part of this suite, all +// images are removed after each test. +type DockerHubPullSuite struct { + d *Daemon + ds *DockerSuite +} + +// newDockerHubPullSuite returns a new instance of a DockerHubPullSuite. +func newDockerHubPullSuite() *DockerHubPullSuite { + return &DockerHubPullSuite{ + ds: &DockerSuite{}, + } +} + +// SetUpSuite starts the suite daemon. +func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = NewDaemon(c) + err := s.d.Start() + c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) +} + +// TearDownSuite stops the suite daemon. +func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { + if s.d != nil { + err := s.d.Stop() + c.Assert(err, checker.IsNil, check.Commentf("stopping push/pull test daemon: %v", err)) + } +} + +// SetUpTest declares that all tests of this suite require network. +func (s *DockerHubPullSuite) SetUpTest(c *check.C) { + testRequires(c, Network) +} + +// TearDownTest removes all images from the suite daemon. +func (s *DockerHubPullSuite) TearDownTest(c *check.C) { + out := s.Cmd(c, "images", "-aq") + images := strings.Split(out, "\n") + images = append([]string{"rmi", "-f"}, images...) + s.d.Cmd(images...) + s.ds.TearDownTest(c) +} + +// Cmd executes a command against the suite daemon and returns the combined +// output. The function fails the test when the command returns an error. +func (s *DockerHubPullSuite) Cmd(c *check.C, name string, arg ...string) string { + out, err := s.CmdWithError(name, arg...) + c.Assert(err, checker.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(arg, " "), out, err)) + return out +} + +// CmdWithError executes a command against the suite daemon and returns the +// combined output as well as any error. +func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, error) { + c := s.MakeCmd(name, arg...) + b, err := c.CombinedOutput() + return string(b), err +} + +// MakeCmd returns an exec.Cmd command to run against the suite daemon. +func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { + args := []string{"--host", s.d.sock(), name} + args = append(args, arg...) + return exec.Command(dockerBinary, args...) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_test_vars.go b/vendor/github.com/moby/moby/integration-cli/docker_test_vars.go new file mode 100644 index 0000000..3559bfd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_test_vars.go @@ -0,0 +1,165 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/reexec" +) + +var ( + // the docker client binary to use + dockerBinary = "docker" + // the docker daemon binary to use + dockerdBinary = "dockerd" + + // path to containerd's ctr binary + ctrBinary = "docker-containerd-ctr" + + // the private registry image to use for tests involving the registry + registryImageName = "registry" + + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + // TODO Windows CI. These are incorrect and need fixing into + // platform specific pieces. + runtimePath = "/var/run/docker" + + workingDirectory string + + // isLocalDaemon is true if the daemon under test is on the same + // host as the CLI. + isLocalDaemon bool + + // daemonPlatform is held globally so that tests can make intelligent + // decisions on how to configure themselves according to the platform + // of the daemon. This is initialized in docker_utils by sending + // a version call to the daemon and examining the response header. + daemonPlatform string + + // windowsDaemonKV is used on Windows to distinguish between different + // versions. This is necessary to enable certain tests based on whether + // the platform supports it. For example, Windows Server 2016 TP3 did + // not support volumes, but TP4 did. + windowsDaemonKV int + + // daemonDefaultImage is the name of the default image to use when running + // tests. This is platform dependent. + daemonDefaultImage string + + // For a local daemon on Linux, these values will be used for testing + // user namespace support as the standard graph path(s) will be + // appended with the root remapped uid.gid prefix + dockerBasePath string + volumesConfigPath string + containerStoragePath string + + // experimentalDaemon tell whether the main daemon has + // experimental features enabled or not + experimentalDaemon bool + + // daemonStorageDriver is held globally so that tests can know the storage + // driver of the daemon. This is initialized in docker_utils by sending + // a version call to the daemon and examining the response header. + daemonStorageDriver string + + // WindowsBaseImage is the name of the base image for Windows testing + // Environment variable WINDOWS_BASE_IMAGE can override this + WindowsBaseImage = "microsoft/windowsservercore" + + // isolation is the isolation mode of the daemon under test + isolation container.Isolation + + // daemonPid is the pid of the main test daemon + daemonPid int + + daemonKernelVersion string +) + +const ( + // DefaultImage is the name of the base image for the majority of tests that + // are run across suites + DefaultImage = "busybox" +) + +func init() { + reexec.Init() + if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { + dockerBinary = dockerBin + } + var err error + dockerBinary, err = exec.LookPath(dockerBinary) + if err != nil { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)\n", err) + os.Exit(1) + } + if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { + registryImageName = registryImage + } + if registry := os.Getenv("REGISTRY_URL"); registry != "" { + privateRegistryURL = registry + } + workingDirectory, _ = os.Getwd() + + // Deterministically working out the environment in which CI is running + // to evaluate whether the daemon is local or remote is not possible through + // a build tag. + // + // For example Windows to Linux CI under Jenkins tests the 64-bit + // Windows binary build with the daemon build tag, but calls a remote + // Linux daemon. + // + // We can't just say if Windows then assume the daemon is local as at + // some point, we will be testing the Windows CLI against a Windows daemon. + // + // Similarly, it will be perfectly valid to also run CLI tests from + // a Linux CLI (built with the daemon tag) against a Windows daemon. + if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { + isLocalDaemon = false + } else { + isLocalDaemon = true + } + + // TODO Windows CI. This are incorrect and need fixing into + // platform specific pieces. + // This is only used for a tests with local daemon true (Linux-only today) + // default is "/var/lib/docker", but we'll try and ask the + // /info endpoint for the specific root dir + dockerBasePath = "/var/lib/docker" + type Info struct { + DockerRootDir string + ExperimentalBuild bool + KernelVersion string + } + var i Info + status, b, err := sockRequest("GET", "/info", nil) + if err == nil && status == 200 { + if err = json.Unmarshal(b, &i); err == nil { + dockerBasePath = i.DockerRootDir + experimentalDaemon = i.ExperimentalBuild + daemonKernelVersion = i.KernelVersion + } + } + volumesConfigPath = dockerBasePath + "/volumes" + containerStoragePath = dockerBasePath + "/containers" + + if len(os.Getenv("WINDOWS_BASE_IMAGE")) > 0 { + WindowsBaseImage = os.Getenv("WINDOWS_BASE_IMAGE") + fmt.Println("INFO: Windows Base image is ", WindowsBaseImage) + } + + dest := os.Getenv("DEST") + b, err = ioutil.ReadFile(filepath.Join(dest, "docker.pid")) + if err == nil { + if p, err := strconv.ParseInt(string(b), 10, 32); err == nil { + daemonPid = int(p) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_utils.go b/vendor/github.com/moby/moby/integration-cli/docker_utils.go new file mode 100644 index 0000000..749e4b3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_utils.go @@ -0,0 +1,1607 @@ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/httputils" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-connections/tlsconfig" + units "github.com/docker/go-units" + "github.com/go-check/check" +) + +func init() { + cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) + } + images := strings.Split(strings.TrimSpace(string(out)), "\n") + for _, img := range images { + protectedImages[img] = struct{}{} + } + + res, body, err := sockRequestRaw("GET", "/info", nil, "application/json") + if err != nil { + panic(fmt.Errorf("Init failed to get /info: %v", err)) + } + defer body.Close() + if res.StatusCode != http.StatusOK { + panic(fmt.Errorf("Init failed to get /info. Res=%v", res)) + } + + svrHeader, _ := httputils.ParseServerHeader(res.Header.Get("Server")) + daemonPlatform = svrHeader.OS + if daemonPlatform != "linux" && daemonPlatform != "windows" { + panic("Cannot run tests against platform: " + daemonPlatform) + } + + // Now we know the daemon platform, can set paths used by tests. + var info types.Info + err = json.NewDecoder(body).Decode(&info) + if err != nil { + panic(fmt.Errorf("Init failed to unmarshal docker info: %v", err)) + } + + daemonStorageDriver = info.Driver + dockerBasePath = info.DockerRootDir + volumesConfigPath = filepath.Join(dockerBasePath, "volumes") + containerStoragePath = filepath.Join(dockerBasePath, "containers") + // Make sure in context of daemon, not the local platform. Note we can't + // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. + if daemonPlatform == "windows" { + volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) + // On Windows, extract out the version as we need to make selective + // decisions during integration testing as and when features are implemented. + // eg in "10.0 10550 (10550.1000.amd64fre.branch.date-time)" we want 10550 + windowsDaemonKV, _ = strconv.Atoi(strings.Split(info.KernelVersion, " ")[1]) + } else { + volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) + } + isolation = info.Isolation +} + +func convertBasesize(basesizeBytes int64) (int64, error) { + basesize := units.HumanSize(float64(basesizeBytes)) + basesize = strings.Trim(basesize, " ")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + if err != nil { + return 0, err + } + return int64(basesizeFloat) * 1024 * 1024 * 1024, nil +} + +func daemonHost() string { + daemonURLStr := "unix://" + opts.DefaultUnixSocket + if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { + daemonURLStr = daemonHostVar + } + return daemonURLStr +} + +func getTLSConfig() (*tls.Config, error) { + dockerCertPath := os.Getenv("DOCKER_CERT_PATH") + + if dockerCertPath == "" { + return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") + } + + option := &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + + return tlsConfig, nil +} + +func sockConn(timeout time.Duration, daemon string) (net.Conn, error) { + if daemon == "" { + daemon = daemonHost() + } + daemonURL, err := url.Parse(daemon) + if err != nil { + return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) + } + + var c net.Conn + switch daemonURL.Scheme { + case "npipe": + return npipeDial(daemonURL.Path, timeout) + case "unix": + return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) + case "tcp": + if os.Getenv("DOCKER_TLS_VERIFY") != "" { + // Setup the socket TLS configuration. + tlsConfig, err := getTLSConfig() + if err != nil { + return nil, err + } + dialer := &net.Dialer{Timeout: timeout} + return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) + } + return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) + default: + return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) + } +} + +func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := readBody(body) + return res.StatusCode, b, err +} + +func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return sockRequestRawToDaemon(method, endpoint, data, ct, "") +} + +func sockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, daemon) + if err != nil { + return nil, nil, err + } + + resp, err := client.Do(req) + if err != nil { + client.Close() + return nil, nil, err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + defer resp.Body.Close() + return client.Close() + }) + + return resp, body, nil +} + +func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, "") + if err != nil { + return nil, nil, err + } + + client.Do(req) + conn, br := client.Hijack() + return conn, br, nil +} + +func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) { + c, err := sockConn(time.Duration(10*time.Second), daemon) + if err != nil { + return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) + } + + client := httputil.NewClientConn(c, nil) + + req, err := http.NewRequest(method, endpoint, data) + if err != nil { + client.Close() + return nil, nil, fmt.Errorf("could not create new request: %v", err) + } + + if ct != "" { + req.Header.Set("Content-Type", ct) + } + return req, client, nil +} + +func readBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +} + +func deleteContainer(container ...string) error { + result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...) + return result.Compare(icmd.Success) +} + +func getAllContainers() (string, error) { + getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of containers: %v\n", out) + } + + return out, err +} + +func deleteAllContainers() error { + containers, err := getAllContainers() + if err != nil { + fmt.Println(containers) + return err + } + if containers == "" { + return nil + } + + err = deleteContainer(strings.Split(strings.TrimSpace(containers), "\n")...) + if err != nil { + fmt.Println(err.Error()) + } + return err +} + +func deleteAllNetworks() error { + networks, err := getAllNetworks() + if err != nil { + return err + } + var errors []string + for _, n := range networks { + if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { + continue + } + if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { + // nat is a pre-defined network on Windows and cannot be removed + continue + } + status, b, err := sockRequest("DELETE", "/networks/"+n.Name, nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllNetworks() ([]types.NetworkResource, error) { + var networks []types.NetworkResource + _, b, err := sockRequest("GET", "/networks", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &networks); err != nil { + return nil, err + } + return networks, nil +} + +func deleteAllPlugins() error { + plugins, err := getAllPlugins() + if err != nil { + return err + } + var errors []string + for _, p := range plugins { + status, b, err := sockRequest("DELETE", "/plugins/"+p.Name+"?force=1", nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting plugin %s: %s", p.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllPlugins() (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + _, b, err := sockRequest("GET", "/plugins", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &plugins); err != nil { + return nil, err + } + return plugins, nil +} + +func deleteAllVolumes() error { + volumes, err := getAllVolumes() + if err != nil { + return err + } + var errors []string + for _, v := range volumes { + status, b, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllVolumes() ([]*types.Volume, error) { + var volumes volumetypes.VolumesListOKBody + _, b, err := sockRequest("GET", "/volumes", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &volumes); err != nil { + return nil, err + } + return volumes.Volumes, nil +} + +var protectedImages = map[string]struct{}{} + +func deleteAllImages() error { + cmd := exec.Command(dockerBinary, "images") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + return err + } + lines := strings.Split(string(out), "\n")[1:] + var imgs []string + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + if _, ok := protectedImages[imgTag]; !ok { + if fields[0] == "" { + imgs = append(imgs, fields[2]) + continue + } + imgs = append(imgs, imgTag) + } + } + if len(imgs) == 0 { + return nil + } + args := append([]string{"rmi", "-f"}, imgs...) + if err := exec.Command(dockerBinary, args...).Run(); err != nil { + return err + } + return nil +} + +func getPausedContainers() (string, error) { + getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) + } + + return out, err +} + +func getSliceOfPausedContainers() ([]string, error) { + out, err := getPausedContainers() + if err == nil { + if len(out) == 0 { + return nil, err + } + slice := strings.Split(strings.TrimSpace(out), "\n") + return slice, err + } + return []string{out}, err +} + +func unpauseContainer(container string) error { + return icmd.RunCommand(dockerBinary, "unpause", container).Error +} + +func unpauseAllContainers() error { + containers, err := getPausedContainers() + if err != nil { + fmt.Println(containers) + return err + } + + containers = strings.Replace(containers, "\n", " ", -1) + containers = strings.Trim(containers, " ") + containerList := strings.Split(containers, " ") + + for _, value := range containerList { + if err = unpauseContainer(value); err != nil { + return err + } + } + + return nil +} + +func deleteImages(images ...string) error { + args := []string{dockerBinary, "rmi", "-f"} + return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error +} + +func imageExists(image string) error { + return icmd.RunCommand(dockerBinary, "inspect", image).Error +} + +func pullImageIfNotExist(image string) error { + if err := imageExists(image); err != nil { + pullCmd := exec.Command(dockerBinary, "pull", image) + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err != nil || exitCode != 0 { + return fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) + } + } + return nil +} + +func dockerCmdWithError(args ...string) (string, int, error) { + if err := validateArgs(args...); err != nil { + return "", 0, err + } + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + return result.Combined(), result.ExitCode, result.Compare(icmd.Success) + } + return result.Combined(), result.ExitCode, result.Error +} + +func dockerCmdWithStdoutStderr(c *check.C, args ...string) (string, string, int) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + + result := icmd.RunCommand(dockerBinary, args...) + // TODO: why is c ever nil? + if c != nil { + c.Assert(result, icmd.Matches, icmd.Success) + } + return result.Stdout(), result.Stderr(), result.ExitCode +} + +func dockerCmd(c *check.C, args ...string) (string, int) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + result := icmd.RunCommand(dockerBinary, args...) + c.Assert(result, icmd.Matches, icmd.Success) + return result.Combined(), result.ExitCode +} + +func dockerCmdWithResult(args ...string) *icmd.Result { + return icmd.RunCommand(dockerBinary, args...) +} + +func binaryWithArgs(args ...string) []string { + return append([]string{dockerBinary}, args...) +} + +// execute a docker command with a timeout +func dockerCmdWithTimeout(timeout time.Duration, args ...string) *icmd.Result { + if err := validateArgs(args...); err != nil { + return &icmd.Result{Error: err} + } + return icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Timeout: timeout}) +} + +// execute a docker command in a directory +func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + result := icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Dir: path}) + return result.Combined(), result.ExitCode, result.Error +} + +// execute a docker command in a directory with a timeout +func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) *icmd.Result { + if err := validateArgs(args...); err != nil { + return &icmd.Result{Error: err} + } + return icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs(args...), + Timeout: timeout, + Dir: path, + }) +} + +// validateArgs is a checker to ensure tests are not running commands which are +// not supported on platforms. Specifically on Windows this is 'busybox top'. +func validateArgs(args ...string) error { + if daemonPlatform != "windows" { + return nil + } + foundBusybox := -1 + for key, value := range args { + if strings.ToLower(value) == "busybox" { + foundBusybox = key + } + if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { + return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") + } + } + return nil +} + +// find the State.ExitCode in container metadata +func findContainerExitCode(c *check.C, name string, vargs ...string) string { + args := append(vargs, "inspect", "--format='{{ .State.ExitCode }} {{ .State.Error }}'", name) + cmd := exec.Command(dockerBinary, args...) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + return out +} + +func findContainerIP(c *check.C, id string, network string) string { + out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) + return strings.Trim(out, " \r\n'") +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := strings.TrimSpace(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} + +// FakeContext creates directories that can be used as a build context +type FakeContext struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *FakeContext) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *FakeContext) addFile(file string, content []byte) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + dirpath := filepath.Dir(fp) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(fp, content, 0644) + +} + +// Delete a file at a path +func (f *FakeContext) Delete(file string) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + return os.RemoveAll(fp) +} + +// Close deletes the context +func (f *FakeContext) Close() error { + return os.RemoveAll(f.Dir) +} + +func fakeContextFromNewTempDir() (*FakeContext, error) { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return nil, err + } + if err := os.Chmod(tmp, 0755); err != nil { + return nil, err + } + return fakeContextFromDir(tmp), nil +} + +func fakeContextFromDir(dir string) *FakeContext { + return &FakeContext{dir} +} + +func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + return ctx, nil +} + +func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { + if err := ctx.Add("Dockerfile", dockerfile); err != nil { + ctx.Close() + return err + } + return nil +} + +func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { + return nil, err + } + return ctx, nil +} + +// FakeStorage is a static file server. It might be running locally or remotely +// on test host. +type FakeStorage interface { + Close() error + URL() string + CtxDir() string +} + +func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for name, content := range archives { + if err := ctx.addFile(name, content.Bytes()); err != nil { + return nil, err + } + } + return fakeStorageWithContext(ctx) +} + +// fakeStorage returns either a local or remote (at daemon machine) file server +func fakeStorage(files map[string]string) (FakeStorage, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + return fakeStorageWithContext(ctx) +} + +// fakeStorageWithContext returns either a local or remote (at daemon machine) file server +func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { + if isLocalDaemon { + return newLocalFakeStorage(ctx) + } + return newRemoteFileServer(ctx) +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *FakeContext + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.FakeContext.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.FakeContext.Close() +} + +func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + FakeContext: ctx, + Server: server, + }, nil +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + ctx *FakeContext +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + deleteImages(f.image) + } + }() + if f.container == "" { + return nil + } + return deleteContainer(f.container) +} + +func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + ) + + if err := ensureHTTPServerImage(); err != nil { + return nil, err + } + + // Build the image + if err := fakeContextAddDockerfile(ctx, `FROM httpserver +COPY . /static`); err != nil { + return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) + } + if _, err := buildImageFromContext(image, ctx, false); err != nil { + return nil, fmt.Errorf("failed building file storage container image: %v", err) + } + + // Start the container + runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) + } + + // Find out the system assigned port + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) + if err != nil { + return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) + } + + fileserverHostPort := strings.Trim(out, "\n") + _, port, err := net.SplitHostPort(fileserverHostPort) + if err != nil { + return nil, fmt.Errorf("unable to parse file server host:port: %v", err) + } + + dockerHostURL, err := url.Parse(daemonHost()) + if err != nil { + return nil, fmt.Errorf("unable to parse daemon host URL: %v", err) + } + + host, _, err := net.SplitHostPort(dockerHostURL.Host) + if err != nil { + return nil, fmt.Errorf("unable to parse docker daemon host:port: %v", err) + } + + return &remoteFileServer{ + container: container, + image: image, + host: fmt.Sprintf("%s:%s", host, port), + ctx: ctx}, nil +} + +func inspectFieldAndMarshall(c *check.C, name, field string, output interface{}) { + str := inspectFieldJSON(c, name, field) + err := json.Unmarshal([]byte(str), output) + if c != nil { + c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) + } +} + +func inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectFieldWithError(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +func inspectField(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectFieldJSON(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectFieldMap(c *check.C, name, path, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectMountSourceField(name, destination string) (string, error) { + m, err := inspectMountPoint(name, destination) + if err != nil { + return "", err + } + return m.Source, nil +} + +func inspectMountPoint(name, destination string) (types.MountPoint, error) { + out, err := inspectFilter(name, "json .Mounts") + if err != nil { + return types.MountPoint{}, err + } + + return inspectMountPointJSON(out, destination) +} + +var errMountNotFound = errors.New("mount point not found") + +func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { + var mp []types.MountPoint + if err := json.Unmarshal([]byte(j), &mp); err != nil { + return types.MountPoint{}, err + } + + var m *types.MountPoint + for _, c := range mp { + if c.Destination == destination { + m = &c + break + } + } + + if m == nil { + return types.MountPoint{}, errMountNotFound + } + + return *m, nil +} + +func inspectImage(name, filter string) (string, error) { + args := []string{"inspect", "--type", "image"} + if filter != "" { + format := fmt.Sprintf("{{%s}}", filter) + args = append(args, "-f", format) + } + args = append(args, name) + inspectCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func getIDByName(name string) (string, error) { + return inspectFieldWithError(name, "Id") +} + +// getContainerState returns the exit code of the container +// and true if it's running +// the exit code should be ignored if it's running +func getContainerState(c *check.C, id string) (int, bool, error) { + var ( + exitStatus int + running bool + ) + out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) + if exitCode != 0 { + return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) + } + + out = strings.Trim(out, "\n") + splitOutput := strings.Split(out, " ") + if len(splitOutput) != 2 { + return 0, false, fmt.Errorf("failed to get container state: output is broken") + } + if splitOutput[0] == "true" { + running = true + } + if n, err := strconv.Atoi(splitOutput[1]); err == nil { + exitStatus = n + } else { + return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") + } + + return exitStatus, running, nil +} + +func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { + return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) +} + +func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { + args := []string{} + if host != "" { + args = append(args, "--host", host) + } + args = append(args, "build", "-t", name) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd +} + +func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", out, fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", out, err + } + return id, out, nil +} + +func buildImageWithStdoutStderr(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImage(name, dockerfile string, useCache bool, buildFlags ...string) (string, error) { + id, _, err := buildImageWithOut(name, dockerfile, useCache, buildFlags...) + return id, err +} + +func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, error) { + id, _, err := buildImageFromContextWithOut(name, ctx, useCache, buildFlags...) + if err != nil { + return "", err + } + return id, nil +} + +func buildImageFromContextWithOut(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", "", fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", "", err + } + return id, out, nil +} + +func buildImageFromContextWithStdoutStderr(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImageFromGitWithStdoutStderr(name string, ctx *fakeGit, useCache bool, buildFlags ...string) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ctx.RepoURL) + buildCmd := exec.Command(dockerBinary, args...) + + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImageFromPath(name, path string, useCache bool, buildFlags ...string) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, path) + buildCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +type fakeGit struct { + root string + server gitServer + RepoURL string +} + +func (g *fakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (*fakeGit, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + return nil, err + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + return nil, err + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + return nil, err + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + return nil, err + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + return nil, err + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server, err = fakeStorageWithContext(fakeContextFromDir(root)) + if err != nil { + return nil, fmt.Errorf("cannot start fake storage: %v", err) + } + } else { + // always start a local http server on CLI test machine + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &fakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + }, nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Fail the test when error occurs. +func writeFile(dst, content string, c *check.C) { + // Create subdirectories if necessary + c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + c.Assert(err, check.IsNil) + defer f.Close() + // Write content (truncate if it exists) + _, err = io.Copy(f, strings.NewReader(content)) + c.Assert(err, check.IsNil) +} + +// Return the contents of file at path `src`. +// Fail the test when error occurs. +func readFile(src string, c *check.C) (content string) { + data, err := ioutil.ReadFile(src) + c.Assert(err, check.IsNil) + + return string(data) +} + +func containerStorageFile(containerID, basename string) string { + return filepath.Join(containerStoragePath, containerID, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return nil, fmt.Errorf("%v: %q", err, out) + } + + contID := strings.TrimSpace(out) + + if err := waitRun(contID); err != nil { + return nil, fmt.Errorf("%v: %q", contID, err) + } + + return readContainerFile(contID, filename) +} + +func readContainerFile(containerID, filename string) ([]byte, error) { + f, err := os.Open(containerStorageFile(containerID, filename)) + if err != nil { + return nil, err + } + defer f.Close() + + content, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return content, nil +} + +func readContainerFileWithExec(containerID, filename string) ([]byte, error) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerID, "cat", filename)) + return []byte(out), err +} + +// daemonTime provides the current time on the daemon host +func daemonTime(c *check.C) time.Time { + if isLocalDaemon { + return time.Now() + } + + status, body, err := sockRequest("GET", "/info", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + type infoJSON struct { + SystemTime string + } + var info infoJSON + err = json.Unmarshal(body, &info) + c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) + return dt +} + +// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. +// It return the time formatted how the client sends timestamps to the server. +func daemonUnixTime(c *check.C) string { + return parseEventTime(daemonTime(c)) +} + +func parseEventTime(t time.Time) string { + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) +} + +func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *testRegistryV2 { + reg, err := newTestRegistryV2(c, schema1, auth, tokenURL) + c.Assert(err, check.IsNil) + + // Wait for registry to be ready to serve requests. + for i := 0; i != 50; i++ { + if err = reg.Ping(); err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available: %v", err)) + return reg +} + +func setupNotary(c *check.C) *testNotary { + ts, err := newTestNotary(c) + c.Assert(err, check.IsNil) + + return ts +} + +// appendBaseEnv appends the minimum set of environment variables to exec the +// docker cli binary for testing with correct configuration to the given env +// list. +func appendBaseEnv(isTLS bool, env ...string) []string { + preserveList := []string{ + // preserve remote test host + "DOCKER_HOST", + + // windows: requires preserving SystemRoot, otherwise dial tcp fails + // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." + "SystemRoot", + + // testing help text requires the $PATH to dockerd is set + "PATH", + } + if isTLS { + preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") + } + + for _, key := range preserveList { + if val := os.Getenv(key); val != "" { + env = append(env, fmt.Sprintf("%s=%s", key, val)) + } + } + return env +} + +func createTmpFile(c *check.C, content string) string { + f, err := ioutil.TempFile("", "testfile") + c.Assert(err, check.IsNil) + + filename := f.Name() + + err = ioutil.WriteFile(filename, []byte(content), 0644) + c.Assert(err, check.IsNil) + + return filename +} + +func buildImageWithOutInDamon(socket string, name, dockerfile string, useCache bool) (string, error) { + args := []string{"--host", socket} + buildCmd := buildImageCmdArgs(args, name, dockerfile, useCache) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return out, fmt.Errorf("failed to build the image: %s, error: %v", out, err) + } + return out, nil +} + +func buildImageCmdArgs(args []string, name, dockerfile string, useCache bool) *exec.Cmd { + args = append(args, []string{"-D", "build", "-t", name}...) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd + +} + +func waitForContainer(contID string, args ...string) error { + args = append([]string{dockerBinary, "run", "--name", contID}, args...) + result := icmd.RunCmd(icmd.Cmd{Command: args}) + if result.Error != nil { + return result.Error + } + return waitRun(contID) +} + +// waitRestart will wait for the specified container to restart once +func waitRestart(contID string, duration time.Duration) error { + return waitInspect(contID, "{{.RestartCount}}", "1", duration) +} + +// waitRun will wait for the specified container to be running, maximum 5 seconds. +func waitRun(contID string) error { + return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) +} + +// waitExited will wait for the specified container to state exit, subject +// to a maximum time limit in seconds supplied by the caller +func waitExited(contID string, duration time.Duration) error { + return waitInspect(contID, "{{.State.Status}}", "exited", duration) +} + +// waitInspect will wait for the specified container to have the specified string +// in the inspect output. It will wait until the specified timeout (in seconds) +// is reached. +func waitInspect(name, expr, expected string, timeout time.Duration) error { + return waitInspectWithArgs(name, expr, expected, timeout) +} + +func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { + after := time.After(timeout) + + args := append(arg, "inspect", "-f", expr, name) + for { + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + if !strings.Contains(result.Stderr(), "No such") { + return fmt.Errorf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + return result.Error + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} + +func getInspectBody(c *check.C, version, id string) []byte { + endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + return body +} + +// Run a long running idle task in a background container using the +// system-specific default image and command. +func runSleepingContainer(c *check.C, extraArgs ...string) (string, int) { + return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) +} + +// Run a long running idle task in a background container using the specified +// image and the system-specific command. +func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) (string, int) { + args := []string{"run", "-d"} + args = append(args, extraArgs...) + args = append(args, image) + args = append(args, sleepCommandForDaemonPlatform()...) + return dockerCmd(c, args...) +} + +func getRootUIDGID() (int, int, error) { + uidgid := strings.Split(filepath.Base(dockerBasePath), ".") + if len(uidgid) == 1 { + //user namespace remapping is not turned on; return 0 + return 0, 0, nil + } + uid, err := strconv.Atoi(uidgid[0]) + if err != nil { + return 0, 0, err + } + gid, err := strconv.Atoi(uidgid[1]) + if err != nil { + return 0, 0, err + } + return uid, gid, nil +} + +// minimalBaseImage returns the name of the minimal base image for the current +// daemon platform. +func minimalBaseImage() string { + if daemonPlatform == "windows" { + return WindowsBaseImage + } + return "scratch" +} + +func getGoroutineNumber() (int, error) { + i := struct { + NGoroutines int + }{} + status, b, err := sockRequest("GET", "/info", nil) + if err != nil { + return 0, err + } + if status != http.StatusOK { + return 0, fmt.Errorf("http status code: %d", status) + } + if err := json.Unmarshal(b, &i); err != nil { + return 0, err + } + return i.NGoroutines, nil +} + +func waitForGoroutines(expected int) error { + t := time.After(30 * time.Second) + for { + select { + case <-t: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n > expected { + return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) + } + default: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n <= expected { + return nil + } + time.Sleep(200 * time.Millisecond) + } + } +} + +// getErrorMessage returns the error message from an error API response +func getErrorMessage(c *check.C, body []byte) string { + var resp types.ErrorResponse + c.Assert(json.Unmarshal(body, &resp), check.IsNil) + return strings.TrimSpace(resp.Message) +} + +func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { + after := time.After(timeout) + for { + v, comment := f(c) + assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) + select { + case <-after: + assert = true + default: + } + if assert { + if comment != nil { + args = append(args, comment) + } + c.Assert(v, checker, args...) + return + } + time.Sleep(100 * time.Millisecond) + } +} + +type checkF func(*check.C) (interface{}, check.CommentInterface) +type reducer func(...interface{}) interface{} + +func reducedCheck(r reducer, funcs ...checkF) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + var values []interface{} + var comments []string + for _, f := range funcs { + v, comment := f(c) + values = append(values, v) + if comment != nil { + comments = append(comments, comment.CheckCommentString()) + } + } + return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) + } +} + +func sumAsIntegers(vals ...interface{}) interface{} { + var s int + for _, v := range vals { + s += v.(int) + } + return s +} diff --git a/vendor/github.com/moby/moby/integration-cli/events_utils.go b/vendor/github.com/moby/moby/integration-cli/events_utils.go new file mode 100644 index 0000000..ba24179 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/events_utils.go @@ -0,0 +1,206 @@ +package main + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// eventMatcher is a function that tries to match an event input. +// It returns true if the event matches and a map with +// a set of key/value to identify the match. +type eventMatcher func(text string) (map[string]string, bool) + +// eventMatchProcessor is a function to handle an event match. +// It receives a map of key/value with the information extracted in a match. +type eventMatchProcessor func(matches map[string]string) + +// eventObserver runs an events commands and observes its output. +type eventObserver struct { + buffer *bytes.Buffer + command *exec.Cmd + scanner *bufio.Scanner + startTime string + disconnectionError error +} + +// newEventObserver creates the observer and initializes the command +// without running it. Users must call `eventObserver.Start` to start the command. +func newEventObserver(c *check.C, args ...string) (*eventObserver, error) { + since := daemonTime(c).Unix() + return newEventObserverWithBacklog(c, since, args...) +} + +// newEventObserverWithBacklog creates a new observer changing the start time of the backlog to return. +func newEventObserverWithBacklog(c *check.C, since int64, args ...string) (*eventObserver, error) { + startTime := strconv.FormatInt(since, 10) + cmdArgs := []string{"events", "--since", startTime} + if len(args) > 0 { + cmdArgs = append(cmdArgs, args...) + } + eventsCmd := exec.Command(dockerBinary, cmdArgs...) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + return nil, err + } + + return &eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + }, nil +} + +// Start starts the events command. +func (e *eventObserver) Start() error { + return e.command.Start() +} + +// Stop stops the events command. +func (e *eventObserver) Stop() { + e.command.Process.Kill() + e.command.Process.Release() +} + +// Match tries to match the events output with a given matcher. +func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { + for e.scanner.Scan() { + text := e.scanner.Text() + e.buffer.WriteString(text) + e.buffer.WriteString("\n") + + if matches, ok := match(text); ok { + process(matches) + } + } + + err := e.scanner.Err() + if err == nil { + err = io.EOF + } + + logrus.Debugf("EventObserver scanner loop finished: %v", err) + e.disconnectionError = err +} + +func (e *eventObserver) CheckEventError(c *check.C, id, event string, match eventMatcher) { + var foundEvent bool + scannerOut := e.buffer.String() + + if e.disconnectionError != nil { + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", e.startTime, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + for _, e := range events { + if _, ok := match(e); ok { + foundEvent = true + break + } + } + scannerOut = out + } + if !foundEvent { + c.Fatalf("failed to observe event `%s` for %s. Disconnection error: %v\nout:\n%v", event, id, e.disconnectionError, scannerOut) + } +} + +// matchEventLine matches a text with the event regular expression. +// It returns the matches and true if the regular expression matches with the given id and event type. +// It returns an empty map and false if there is no match. +func matchEventLine(id, eventType string, actions map[string]chan bool) eventMatcher { + return func(text string) (map[string]string, bool) { + matches := eventstestutils.ScanMap(text) + if len(matches) == 0 { + return matches, false + } + + if matchIDAndEventType(matches, id, eventType) { + if _, ok := actions[matches["action"]]; ok { + return matches, true + } + } + return matches, false + } +} + +// processEventMatch closes an action channel when an event line matches the expected action. +func processEventMatch(actions map[string]chan bool) eventMatchProcessor { + return func(matches map[string]string) { + if ch, ok := actions[matches["action"]]; ok { + ch <- true + } + } +} + +// parseEventAction parses an event text and returns the action. +// It fails if the text is not in the event format. +func parseEventAction(c *check.C, text string) string { + matches := eventstestutils.ScanMap(text) + return matches["action"] +} + +// eventActionsByIDAndType returns the actions for a given id and type. +// It fails if the text is not in the event format. +func eventActionsByIDAndType(c *check.C, events []string, id, eventType string) []string { + var filtered []string + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matches, checker.Not(checker.IsNil)) + if matchIDAndEventType(matches, id, eventType) { + filtered = append(filtered, matches["action"]) + } + } + return filtered +} + +// matchIDAndEventType returns true if an event matches a given id and type. +// It also resolves names in the event attributes if the id doesn't match. +func matchIDAndEventType(matches map[string]string, id, eventType string) bool { + return matchEventID(matches, id) && matches["eventType"] == eventType +} + +func matchEventID(matches map[string]string, id string) bool { + matchID := matches["id"] == id || strings.HasPrefix(matches["id"], id) + if !matchID && matches["attributes"] != "" { + // try matching a name in the attributes + attributes := map[string]string{} + for _, a := range strings.Split(matches["attributes"], ", ") { + kv := strings.Split(a, "=") + attributes[kv[0]] = kv[1] + } + matchID = attributes["name"] == id + } + return matchID +} + +func parseEvents(c *check.C, out, match string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} + +func parseEventsWithID(c *check.C, out, match, id string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, id), checker.True) + + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures.go b/vendor/github.com/moby/moby/integration-cli/fixtures.go new file mode 100644 index 0000000..e99b738 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" +) + +var ensureHTTPServerOnce sync.Once + +func ensureHTTPServerImage() error { + var doIt bool + ensureHTTPServerOnce.Do(func() { + doIt = true + }) + + if !doIt { + return nil + } + + protectedImages["httpserver:latest"] = struct{}{} + + tmp, err := ioutil.TempDir("", "docker-http-server-test") + if err != nil { + return fmt.Errorf("could not build http server: %v", err) + } + defer os.RemoveAll(tmp) + + goos := daemonPlatform + if goos == "" { + goos = "linux" + } + goarch := os.Getenv("DOCKER_ENGINE_GOARCH") + if goarch == "" { + goarch = "amd64" + } + + goCmd, lookErr := exec.LookPath("go") + if lookErr != nil { + return fmt.Errorf("could not build http server: %v", lookErr) + } + + cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") + cmd.Env = append(os.Environ(), []string{ + "CGO_ENABLED=0", + "GOOS=" + goos, + "GOARCH=" + goarch, + }...) + var out []byte + if out, err = cmd.CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %s", string(out)) + } + + cpCmd, lookErr := exec.LookPath("cp") + if lookErr != nil { + return fmt.Errorf("could not build http server: %v", lookErr) + } + if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %v", string(out)) + } + + if out, err = exec.Command(dockerBinary, "build", "-q", "-t", "httpserver", tmp).CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %v", string(out)) + } + return nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test b/vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test new file mode 100755 index 0000000..a7be56b --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test @@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +listFile=shell_test_list.json + +case $1 in + "store") + in=$( $TEMP/$serverHash + # add the server to the list file + if [[ ! -f $TEMP/$listFile ]]; then + echo "{ \"${server}\": \"${username}\" }" > $TEMP/$listFile + else + list=$(<$TEMP/$listFile) + echo "$list" | jq ". + {\"${server}\": \"${username}\"}" > $TEMP/$listFile + fi + ;; + "get") + in=$( $TEMP/$listFile + ;; + "list") + if [[ ! -f $TEMP/$listFile ]]; then + echo "{}" + else + payload=$(<$TEMP/$listFile) + echo "$payload" + fi + ;; + *) + echo "unknown credential option" + exit 1 + ;; +esac diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json b/vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json new file mode 100644 index 0000000..28913e4 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json @@ -0,0 +1,25 @@ +{ + "CmsPlugins": [ + "ActiveDirectory" + ], + "DomainJoinConfig": { + "Sid": "S-1-5-21-4288985-3632099173-1864715694", + "MachineAccountName": "MusicStoreAcct", + "Guid": "3705d4c3-0b80-42a9-ad97-ebc1801c74b9", + "DnsTreeName": "hyperv.local", + "DnsName": "hyperv.local", + "NetBiosName": "hyperv" + }, + "ActiveDirectoryConfig": { + "GroupManagedServiceAccounts": [ + { + "Name": "MusicStoreAcct", + "Scope": "hyperv.local" + }, + { + "Name": "MusicStoreAcct", + "Scope": "hyperv" + } + ] + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml new file mode 100644 index 0000000..f30c04f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml @@ -0,0 +1,9 @@ + +version: "3" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + db: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: "tail -f /dev/null" diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml new file mode 100644 index 0000000..4ec8cac --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml @@ -0,0 +1,11 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special +secrets: + special: + file: fixtures/secrets/default diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml new file mode 100644 index 0000000..6ac92cd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml @@ -0,0 +1,20 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special + - source: super + target: foo.txt + mode: 0400 + - star +secrets: + special: + file: fixtures/secrets/default + super: + file: fixtures/secrets/default + star: + external: + name: outside diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/ca.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/ca.pem new file mode 100644 index 0000000..6825d6d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-cert.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-cert.pem new file mode 100644 index 0000000..c05ed47 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-key.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-key.pem new file mode 100644 index 0000000..b5c15f8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-cert.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-cert.pem new file mode 100644 index 0000000..21ae4bd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-key.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-key.pem new file mode 100644 index 0000000..53c122a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/client-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-cert.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-cert.pem new file mode 100644 index 0000000..08abfd1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-key.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-key.pem new file mode 100644 index 0000000..c269320 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-cert.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-cert.pem new file mode 100644 index 0000000..28feba6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-key.pem b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-key.pem new file mode 100644 index 0000000..10f7c65 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/https/server-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/load/emptyLayer.tar b/vendor/github.com/moby/moby/integration-cli/fixtures/load/emptyLayer.tar new file mode 100644 index 0000000000000000000000000000000000000000..beabb569ac1e7fea69848f79ffd77fb17b2487b0 GIT binary patch literal 30720 zcmeI4ZExeo5y$c* zovW*2`my-BS#-6VC#(TWylUoH;FtY>E>gSBdD(R?T)*vNRTtCjW{dQq*rhc~JiEJz zsl`J#iI?4n-S>-|RPWcb+PkO&p2Vj7>-wi+S}$j_7sXd|yt?(*H*av1 z+nj$j?T=M`^{U#O?d!U0Kio7`Pi=kdMg6yBHQQD7cjwb=bNHthMQp;4h~=l$E~*AE zuwWB8>5;iMTvuHR-Lmal{@LpHT4MUleZcZ73oou6S9)q>^d-k>oX_}Qcxyo^p;T#I z4yBjEs}MP78A@rGh|&6(B$v4q&IKEaPbcYiRLK(|Fm&Lbue#U-=g_}>K?m>u=o9SE z|2Wfh|Nm{EC(M}~AUk1=_?|0jAs(MS-F>HlGq1Fv!G%u}F zxq~RdI2NK#f|*#Nn~(V{vFLOO8UqEuV@wWH0lWbP;+XE!kN?B|zvfDCV+{KLI33XJ zG420+{r|!CKbik8jnddl(C`@lF&t?A{}{IbApid{FhAKg29N*Y{y*OI0q1{v<6mif z{ttd8A4C0_$Nx6||IluLzUGZ<*Y1BAOzlPXfBaM3-28d; z^3|(Ru4ZY9j6K(!m62PF7=SBvvY>6u9_MkwI>$54a7OCT$T3-m=lJE( zmUnm}hhi;&u@=Bq+_JlF`T)-5bsBwzUH;iw#FV;!sB`|AkTbLnLJ;j*XJ0q(@q+&r#AY7K_tidj`Je@64Y zt^SAcHvH+*phv{y_9nO&aXZ27@H1gNA7uuCn|I7u4AJ4h5%>!(#?o4@01sM9=?Y<}U-wbn}~IT=+q) zpnU-y`(0?Hj|bqk-Cup*+8>Y5g-CYfCvx)B0vO)01+SpM1Tko z0U|&IhyW2F0z`la5CI}U1c(3;cp3zTzMXo^%Y*&eXctdy*>?6A8Sq zp|r}Cz7&k%LWj}|qk_p@3pT0=6BEu*_dHZBTzvXOYO{wlC)G^^hyW2F0z`la5CI}U k1c(3;AOb{y2oM1xKm>>Y5g-CYfCvx)B0vO)z%wTBe{YUtx&QzG literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go b/vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go new file mode 100644 index 0000000..13cd393 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go @@ -0,0 +1,182 @@ +package load + +import ( + "bufio" + "bytes" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var frozenImgDir = "/docker-frozen-images" + +// FrozenImagesLinux loads the frozen image set for the integration suite +// If the images are not available locally it will download them +// TODO: This loads whatever is in the frozen image dir, regardless of what +// images were passed in. If the images need to be downloaded, then it will respect +// the passed in images +func FrozenImagesLinux(dockerBinary string, images ...string) error { + imgNS := os.Getenv("TEST_IMAGE_NAMESPACE") + var loadImages []struct{ srcName, destName string } + for _, img := range images { + if err := exec.Command(dockerBinary, "inspect", "--type=image", img).Run(); err != nil { + srcName := img + // hello-world:latest gets re-tagged as hello-world:frozen + // there are some tests that use hello-world:latest specifically so it pulls + // the image and hello-world:frozen is used for when we just want a super + // small image + if img == "hello-world:frozen" { + srcName = "hello-world:latest" + } + if imgNS != "" { + srcName = imgNS + "/" + srcName + } + loadImages = append(loadImages, struct{ srcName, destName string }{ + srcName: srcName, + destName: img, + }) + } + } + if len(loadImages) == 0 { + // everything is loaded, we're done + return nil + } + + fi, err := os.Stat(frozenImgDir) + if err != nil || !fi.IsDir() { + srcImages := make([]string, 0, len(loadImages)) + for _, img := range loadImages { + srcImages = append(srcImages, img.srcName) + } + if err := pullImages(dockerBinary, srcImages); err != nil { + return errors.Wrap(err, "error pulling image list") + } + } else { + if err := loadFrozenImages(dockerBinary); err != nil { + return err + } + } + + for _, img := range loadImages { + if img.srcName != img.destName { + if out, err := exec.Command(dockerBinary, "tag", img.srcName, img.destName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + if out, err := exec.Command(dockerBinary, "rmi", img.srcName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + } + } + return nil +} + +func loadFrozenImages(dockerBinary string) error { + tar, err := exec.LookPath("tar") + if err != nil { + return errors.Wrap(err, "could not find tar binary") + } + tarCmd := exec.Command(tar, "-cC", frozenImgDir, ".") + out, err := tarCmd.StdoutPipe() + if err != nil { + return errors.Wrap(err, "error getting stdout pipe for tar command") + } + + errBuf := bytes.NewBuffer(nil) + tarCmd.Stderr = errBuf + tarCmd.Start() + defer tarCmd.Wait() + + cmd := exec.Command(dockerBinary, "load") + cmd.Stdin = out + if out, err := cmd.CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + return nil +} + +func pullImages(dockerBinary string, images []string) error { + cwd, err := os.Getwd() + if err != nil { + return errors.Wrap(err, "error getting path to dockerfile") + } + dockerfile := os.Getenv("DOCKERFILE") + if dockerfile == "" { + dockerfile = "Dockerfile" + } + dockerfilePath := filepath.Join(filepath.Dir(filepath.Clean(cwd)), dockerfile) + pullRefs, err := readFrozenImageList(dockerfilePath, images) + if err != nil { + return errors.Wrap(err, "error reading frozen image list") + } + + var wg sync.WaitGroup + chErr := make(chan error, len(images)) + for tag, ref := range pullRefs { + wg.Add(1) + go func(tag, ref string) { + defer wg.Done() + if out, err := exec.Command(dockerBinary, "pull", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "tag", ref, tag).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "rmi", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + }(tag, ref) + } + wg.Wait() + close(chErr) + return <-chErr +} + +func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { + f, err := os.Open(dockerfilePath) + if err != nil { + return nil, errors.Wrap(err, "error reading dockerfile") + } + defer f.Close() + ls := make(map[string]string) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + if len(line) < 3 { + continue + } + if !(line[0] == "RUN" && line[1] == "./contrib/download-frozen-image-v2.sh") { + continue + } + + frozenImgDir = line[2] + if line[2] == frozenImgDir { + frozenImgDir = filepath.Join(os.Getenv("DEST"), "frozen-images") + } + + for scanner.Scan() { + img := strings.TrimSpace(scanner.Text()) + img = strings.TrimSuffix(img, "\\") + img = strings.TrimSpace(img) + split := strings.Split(img, "@") + if len(split) < 2 { + break + } + + for _, i := range images { + if split[0] == i { + ls[i] = img + break + } + } + } + } + return ls, nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt new file mode 100644 index 0000000..2218f23 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAP2EcMN2UXPcMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvgewhaYs +Ke5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqIOdxWjYITgJuHrTwB4ZhBqWS7 +tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbWK9PPhGGkeR01c/Q932m92Hsn +fCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4ylPRxs0RrE/rP+bEGssKQSbeCZ +wazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdvBqrRdWnkOZClhlLgEQ5nK2yV +B6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW8oKHlBBl6pRxHIKzNN4VFbeB +vvYvrogrDrC/owIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUFoHfukRa6qGk1ncON64Z +ASKlZdkwDQYJKoZIhvcNAQELBQADggEBAEq9Adpd03CPmpbRtTAJGAkjjLFr60sV +2r+/l/m9R31ZCN9ymM9nxToQ8zfMdeAh/nnPcErziil2gDVqXueCNDkRj09tmDIE +Q1Oc92uyNZNgcECow77cKZCTZSTku+qsJrYaykH5vSnia8ltcKj8inJedIcpBR+p +608HEQvF0Eg5eaLPJwH48BCb0Gqdri1dJgrNnqptz7MDr8M+u7tHVulbAd3YxLlq +JH1W2bkVUx6esbn/MUE5HL5iTuOYREEINvBSmLdmmFkampmCnCB/bDEyJeL9bAkt +ZPIi0UNSnqFKLSP1Vf8AGLXt6iO7+1OGvtsDXEEYdXVOMsSXZtUuT7A= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key new file mode 100644 index 0000000..cb37efc --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvgewhaYsKe5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqI +OdxWjYITgJuHrTwB4ZhBqWS7tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbW +K9PPhGGkeR01c/Q932m92HsnfCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4yl +PRxs0RrE/rP+bEGssKQSbeCZwazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdv +BqrRdWnkOZClhlLgEQ5nK2yVB6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW +8oKHlBBl6pRxHIKzNN4VFbeBvvYvrogrDrC/owIDAQABAoIBAB/o8KZwsgfUhqh7 +WoViSCwQb0e0z7hoFwhpUl4uXPTGf1v6HEgDDPG0PwwgkdbwNaypQZVtWevj4NTQ +R326jjdjH1xbfQa2PZpz722L3jDqJR6plEtFxRoIv3KrCffPsrgabIu2mnnJJpDB +ixtW5cq0sT4ov2i4H0i85CWWwbSY/G/MHsvCuK9PhoCj9uToVqrf1KrAESE5q4fh +mPSYUL99KVnj7SZkUz+79rc8sLLPVks3szZACMlm1n05ZTj/d6Nd2ZZUO45DllIj +1XJghfWmnChrB/P/KYXgQ3Y9BofIAw1ra2y3wOZeqRFNsbmojcGldfdtN/iQzhEj +uk4ThokCgYEA9FTmv36N8qSPWuqX/KzkixDQ8WrDGohcB54kK98Wx4ijXx3i38SY +tFjO8YUS9GVo1+UgmRjZbzVX7xeum6+TdBBwOjNOxEQ4tzwiQBWDdGpli8BccdJ2 +OOIVxSslWhiUWfpYloXVetrR88iHbT882g795pbonDaJdXSLnij4UW8CgYEAxxrr +QFpsmOEZvI/yPSOGdG7A1RIsCeH+cEOf4cKghs7+aCtAHlIweztNOrqirl3oKI1r +I0zQl46WsaW8S/y99v9lmmnZbWwqLa4vIu0NWs0zaZdzKZw3xljMhgp4Ge69hHa2 +utCtAxcX+7q/yLlHoTiYwKdxX54iLkheCB8csw0CgYEAleEG820kkjXUIodJ2JwO +Tihwo8dEC6CeI6YktizRgnEVFqH0rCOjMO5Rc+KX8AfNOrK5PnD54LguSuKSH7qi +j04OKgWTSd43lF90+y63RtCFnibQDpp2HwrBJAQFk7EEP/XMJfnPLN/SbuMSADgM +kg8kPTFRW5Iw3DYz9z9WpE0CgYAkn6/8Q2XMbUOFqti9JEa8Lg8sYk5VdwuNbPMA +3QMYKQUk9ieyLB4c3Nik3+XCuyVUKEc31A5egmz3umu7cn8i6vGuiJ/k/8t2YZ7s +Bry5Ihu95Yzab5DW3Eiqs0xKQN79ebS9AluAwQO5Wy2h52rknfuDHIm/M+BHsSoS +xl5KFQKBgQCokCsYuX1z2GojHw369/R2aX3ovCGuHqy4k7fWxUrpHTHvth2+qNPr +84qLJ9rLWoZE5sUiZ5YdwCgW877EdfkT+v4aaBX79ixso5VdqgJ/PdnoNntah/Vq +njQiW1skn6/P5V/eyimN2n0VsyBr/zMDEtYTRP/Tb1zi/njFLQkZEA== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt new file mode 100644 index 0000000..bec0847 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAIq8naKlYAQfMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyY2EWYTW +5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aHfoOe8wGKg3Ohz7UCBdD5Mob/ +L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3AaawEUOw2rwwMDEjLnDDTSZM +z8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY8ioRbROCL2PGgqywWq2fThav +c70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFHVARXiUv/ILHk7ImYnSGJUcuk +JTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJDSiRP72nkg/cE4BqMl9FrMwK +9iS8xa9yMDLUvwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUvQzzFmh3Sv3HcdExY3wx +/1u6JLAwDQYJKoZIhvcNAQELBQADggEBAJcmDme2Xj/HPUPwaN/EyCmjhY73EiHO +x6Pm16tscg5JGn5A+u3CZ1DmxUYl8Hp6MaW/sWzdtL0oKJg76pynadCWh5EacFR8 +u+2GV/IcN9mSX6JQzvrqbjSqo5/FehqBD+W5h3euwwApWA3STAadYeyEfmdOA3SQ +W1vzrA1y7i8qgTqeJ7UX1sEAXlIhBK2zPYaMB+en+ZOiPyNxJYj6IDdGdD2paC9L +6H9wKC+GAUTSdCWp89HP7ETSXEGr94AXkrwU+qNsiN+OyK8ke0EMngEPh5IQoplw +/7zEZCth3oKxvR1/4S5LmTVaHI2ZlbU4q9bnY72G4tw8YQr2gcBGo4w= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key new file mode 100644 index 0000000..5ccabe9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAyY2EWYTW5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aH +foOe8wGKg3Ohz7UCBdD5Mob/L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3 +AaawEUOw2rwwMDEjLnDDTSZMz8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY +8ioRbROCL2PGgqywWq2fThavc70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFH +VARXiUv/ILHk7ImYnSGJUcukJTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJ +DSiRP72nkg/cE4BqMl9FrMwK9iS8xa9yMDLUvwIDAQABAoIBAHmffvzx7ydESWwa +zcfdu26BkptiTvjjfJrqEd4wSewxWGPKqJqMXE8xX99A2KTZClZuKuH1mmnecQQY +iRXGrK9ewFMuHYGeKEiLlPlqR8ohXhyGLVm+t0JDwaXMp5t9G0i73O5iLTm5fNGd +FGxa9YnVW20Q8MqNczbVGH1D1zInhxzzOyFzBd4bBBJ8PdrUdyLpd7+RxY2ghnbT +p9ZANR2vk5zmDLJgZx72n/u+miJWuhY6p0v3Vq4z/HHgdhf+K6vpDdzTcYlA0rO4 +c/c+RKED3ZadGUD5QoLsmEN0e3FVSMPN1kt4ZRTqWfH8f2X4mLz33aBryTjktP6+ +1rX6ThECgYEA74wc1Tq23B5R0/GaMm1AK3Ko2zzTD8wK7NSCElh2dls02B+GzrEB +aE3A2GMQSuzb+EA0zkipwANBaqs3ZemH5G1pu4hstQsXCMd4jAJn0TmTXlplXBCf +PSc8ZUU6XcJENRr9Q7O9/TGlgahX+z0ndxYx/CMCsSu7XsMg4IZsbAcCgYEA12Vb +wKOVG15GGp7pMshr+2rQfVimARUP4gf3JnQmenktI4PfdnMW3a4L3DEHfLhIerwT +6lRp/NpxSADmuT4h1UO1l2lc+gmTVPw0Vbl6VwHpgS5Kfu4ZyM6n3S66f/dE4nu7 +hQF9yZz7vn5Agghak4p6a1wC1gdMzR1tvxFzk4kCgYByBMTskWfcWeok8Yitm+bB +R3Ar+kWT7VD97SCETusD5uG+RTNLSmEbHnc+B9kHcLo67YS0800pAeOvPBPARGnU +RmffRU5I1iB+o0MzkSmNItSMQoagTaEd4IEUyuC/I+qHRHNsOC+kRm86ycAm67LP +MhdUpe1wGxqyPjp15EXTHQKBgDKzFu+3EWfJvvKRKQ7dAh3BvKVkcl6a2Iw5l8Ej +YdM+JpPPfI/i8yTmzL/dgoem0Nii4IUtrWzo9fUe0TAVId2S/HFRSaNJEbbVTnRH +HjbQqmfPv5U08jjD+9siHp/0UfCFc1QRT8xe+RqTmReCY9+KntoaZEiAm2FEZgqt +TukRAoGAf7QqbTP5/UH1KSkX89F5qy/6GS3pw6TLj9Ufm/l/NO8Um8gag6YhEKWR +7HpkpCqjfWj8Av8ESR9cqddPGrbdqXFm9z7dCjlAd5T3Q3h/h+v+JzLQWbsI6WOb +SsOSWNyE006ZZdIiFwO6GfxpLI24sVtYKgyob6Q71oxSqfnrnT0= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt new file mode 100644 index 0000000..f434b45 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAKHt/jxiWqMtMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqfbJk2Dk +C9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJzetsclsV/95nBhinIGcSmPQA +l318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCeS86SOyLNTpMD9gsF0S8nR1RN +h0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5PhyrMZgNip4IrG46umCkFlrw +zMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKorIJQbPtHVYdr4UxYnNmk6fbU +biEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj9fZ7Viw0t5IKXZPsxMhwknUT +9vmPzIJO6NiniwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUdTXRP1EzxQ+UDZSoheVo +Mobud1cwDQYJKoZIhvcNAQELBQADggEBADV9asTWWdbmpkeRuKyi0xGho39ONK88 +xxkFlco766BVgemo/rGQj3oPuw6M6SzHFoJ6JUPjmLiAQDIGEU/2/b6LcOuLjP+4 +YejCcDTY3lSW/HMNoAmzr2foo/LngNGfe/qhVFUqV7GjFT9+XzFFBfIZ1cQiL2ed +kc8rgQxFPwWXFCSwaENWeFnMDugkd+7xanoAHq8GsJpg5fTruDTmJkUqC2RNiMLn +WM7QaqW7+lmUnMnc1IBoz0hFhgoiadWM/1RQxx51zTVw6Au1koIm4ZXu5a+/WyC8 +K1+HyUbc0AVaDaRBpRSOR9aHRwLGh6WQ4aUZQNyJroc999qfYrDEEV8= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key new file mode 100644 index 0000000..a61d18c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAqfbJk2DkC9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJ +zetsclsV/95nBhinIGcSmPQAl318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCe +S86SOyLNTpMD9gsF0S8nR1RNh0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5 +PhyrMZgNip4IrG46umCkFlrwzMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKo +rIJQbPtHVYdr4UxYnNmk6fbUbiEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj +9fZ7Viw0t5IKXZPsxMhwknUT9vmPzIJO6NiniwIDAQABAoIBAQCAr/ed3A2umO7T +FDYZik3nXBiiiW4t7r+nGGgZ3/kNgY1lnuHlROxehXLZwbX1mrLnyML/BjhwezV9 +7ZNVPd6laVPpNj6DyxtWHRZ5yARlm1Al39E7CpQTrF0QsiWcpGnqIa62xjDRTpnq +askV/Q5qggyvqmE9FnFCQpEiAjlhvp7F0kVHVJm9s3MK3zSyR0UTZ3cpYus2Jr2z +OotHgAMHq5Hgb3dvxOeE2xRMeYAVDujbkNzXm2SddAtiRdLhWDh7JIr3zXhp0HyN +4rLOyhlgz00oIGeDt/C0q3fRmghr3iZOG+7m2sUx0FD1Ru1dI9v2A+jYmIVNW6+x +YJk5PzxJAoGBANDj7AGdcHSci/LDBPoTTUiz3uucAd27/IJma/iy8mdbVfOAb0Fy +PRSPvoozlpZyOxg2J4eH/o4QxQR4lVKtnLKZLNHK2tg3LarwyBX1LiI3vVlB+DT1 +AmV8i5bJAckDhqFeEH5qdWZFi03oZsSXWEqX5iMYCrdK5lTZggcrFZeHAoGBANBL +fkk3knAdcVfTYpmHx18GBi2AsCWTd20KD49YBdbVy0Y2Jaa1EJAmGWpTUKdYx40R +H5CuGgcAviXQz3bugdTU1I3tAclBtpJNU7JkhuE+Epz0CM/6WERJrE0YxcGQA5ui +6fOguFyiXD1/85jrDBOKy74aoS7lYz9r/a6eqmjdAoGBAJpm/nmrIAZx+Ff2ouUe +A1Ar9Ch/Zjm5zEmu3zwzOU4AiyWz14iuoktifNq2iyalRNz+mnVpplToPFizsNwu +C9dPtXtU0DJlhtIFrD/evLz6KnGhe4/ZUm4lgyBvb2xfuNHqL5Lhqelwmil6EQxb +Oh3Y7XkfOjyFln89TwlxZUJdAoGAJRMa4kta7EvBTeGZLjyltvsqhFTghX+vBSCC +ToBbYbbiHJgssXSPAylU4sD7nR3HPwuqM6VZip+OOMrm8oNXZpuPTce+xqTEq1vK +JvmPrG3RAFDLdMFZjqYSXhKnuGE60yv3Ol8EEbDwfB3XLQPBPYU56Jdy0xcPSE2f +dMJXEJ0CgYEAisZw0nXw6lFeYecu642EGuU0wv1O9i21p7eho9QwOcsoTl4Q9l+M +M8iBv+qTHO+D19l4JbkGvy2H2diKoYduUFACcuiFYs8fjrT+4Z6DyOQAQGAf6Ylw +BFbU15k6KbA9v4mZDfd1tY9x62L/XO55ZxYG+J+q0e26tEThgD8cEog= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt new file mode 100644 index 0000000..c8cbe46 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJANae++ZkUEWMMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqULAjgba +Y2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4ltkQj1iO4zBTs0Ft9EzXFc5ZBh +pTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3HZpVOlEMI3npRfBGNIBllUaRN +PWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ImhSo3aipJUHHcp9Z9NgvpNC +3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw+YTrWZq3qVnnqUouHO//c9PG +Ry3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih58i/OBKe81eD9NuZDP2KrjTxI +5xkXKhj6DV2NnQIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUDt95hiqbQvi0KcvZGAUu +VisnztQwDQYJKoZIhvcNAQELBQADggEBAGi7qHai7MWbfeu6SlXhzIP3AIMa8TMi +lp/+mvPUFPswIVqYJ71MAN8uA7CTH3z50a2vYupGeOEtZqVJeRf+xgOEpwycncxp +Qz6wc6TWPVIoT5q1Hqxw1RD2MyKL+Y+QBDYwFxFkthpDMlX48I9frcqoJUWFxBF2 +lnRr/cE7BbPE3sMbXV3wGPlH7+eUf+CgzXJo2HB6THzagyEgNrDiz/0rCQa1ipFd +mNU3D/U6BFGmJNxhvSOtXX9escg8yjr05YwwzokHS2K4jE0ZuJPBd50C/Rvo3Mf4 +0h7/2Q95e7d42zPe9WYPu2F8KTWsf4r+6ddhKrKhYzXIcTAfHIOiO+U= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key new file mode 100644 index 0000000..f473cc4 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqULAjgbaY2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4lt +kQj1iO4zBTs0Ft9EzXFc5ZBhpTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3H +ZpVOlEMI3npRfBGNIBllUaRNPWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ +ImhSo3aipJUHHcp9Z9NgvpNC3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw ++YTrWZq3qVnnqUouHO//c9PGRy3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih5 +8i/OBKe81eD9NuZDP2KrjTxI5xkXKhj6DV2NnQIDAQABAoIBAGK0ZKnuYSiXux60 +5MvK4pOCsa/nY3mOcgVHhW4IzpRgJdIrcFOlz9ncXrBsSAIWjX7o3u2Ydvjs4DOW +t8d6frB3QiDInYcRVDjLCD6otWV97Bk9Ua0G4N4hAWkMF7ysV4oihS1JDSoAdo39 +qOdki6s9yeyHZGKwk2oHLlowU5TxQMBA8DHmxqBII1HTm+8xRz45bcEqRXydYSUn +P1JuSU9jFqdylxU+Nrq6ehslMQ3y7qNWQyiLGxu6EmR+vgrzSU0s3iAOqCHthaOS +VBBXPL3DNEYUS+0QGnGrACuJhanOMBfdiO6Orelx6ZzWZm38PNGv0yBt0WCM+8/A +TtQNGkECgYEA1LqR6AH9XikUQ0+rM4526BgVuYqtjw21h4Lj9alaA+YTQntBBJOv +iAcUpnJiV4T8jzAMLeqpK8R/rbxRnK5S9jOV2gr+puk4L6tH46cgahBUESDigDp8 +6vK8ur6ubBcXNPh3AT6rsPj+Ph2EU3raqiYdouvCdga/OCYZb+jr6UkCgYEAy7Cr +l8WssI/8/ORcQ4MFJFNyfz/Y2beNXyLd1PX0H+wRSiGcKzeUuTHNtzFFpMbrK/nx +ZOPCT2ROdHsBHzp1L+WquCb0fyMVSiYiXBU+VCFDbUU5tBr3ycTc7VwuFPENOiha +IdlWgew/aW110FQHIaqe9g+htRe+mXe++faZtbUCgYB/MSJmNzJX53XvHSZ/CBJ+ +iVAMBSfq3caJRLCqRNzGcf1YBbwFUYxlZ95n+wJj0+byckcF+UW3HqE8rtmZNf3y +qTtTCLnj8JQgpGeybU4LPMIXD7N9+fqQvBwuCC7gABpnGJyHCQK9KNNTLnDdPRqb +G3ki3ZYC3dvdZaJV8E2FyQKBgQCMa5Mf4kqWvezueo+QizZ0QILibqWUEhIH0AWV +1qkhiKCytlDvCjYhJdBnxjP40Jk3i+t6XfmKud/MNTAk0ywOhQoYQeKz8v+uSnPN +f2ekn/nXzq1lGGJSWsDjcXTjQvqXaVIZm7cjgjaE+80IfaUc9H75qvUT3vaq3f5u +XC7DMQKBgQDMAzCCpWlEPbZoFMl6F49+7jG0/TiqM/WRUSQnNtufPMbrR9Je4QM1 +L1UCANCPaHFOncKYer15NfIV1ctt5MZKImevDsUaQO8CUlO+dzd5H8KvHw9E29gA +B22v8k3jIjsYeRL+UJ/sBnWHgxdAe/NEM+TdlP2oP9D1gTifutPqAg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh new file mode 100755 index 0000000..8d6381c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh @@ -0,0 +1,18 @@ +for selfsigned in delgkey1 delgkey2 delgkey3 delgkey4; do + subj='/C=US/ST=CA/L=SanFrancisco/O=Docker/CN=delegation' + + openssl genrsa -out "${selfsigned}.key" 2048 + openssl req -new -key "${selfsigned}.key" -out "${selfsigned}.csr" -sha256 -subj "${subj}" + cat > "${selfsigned}.cnf" < 1 && buf[0] == 'Y' + }, + "Test requires apparmor is enabled.", + } + RegistryHosting = testRequirement{ + func() bool { + // for now registry binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // registry binary is in PATH. + _, err := exec.LookPath(v2binary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), + } + NotaryHosting = testRequirement{ + func() bool { + // for now notary binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), + } + NotaryServerHosting = testRequirement{ + func() bool { + // for now notary-server binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), + } + NotOverlay = testRequirement{ + func() bool { + return !strings.HasPrefix(daemonStorageDriver, "overlay") + }, + "Test requires underlying root filesystem not be backed by overlay.", + } + + Devicemapper = testRequirement{ + func() bool { + return strings.HasPrefix(daemonStorageDriver, "devicemapper") + }, + "Test requires underlying root filesystem to be backed by devicemapper.", + } + + IPv6 = testRequirement{ + func() bool { + cmd := exec.Command("test", "-f", "/proc/net/if_inet6") + + if err := cmd.Run(); err != nil { + return true + } + return false + }, + "Test requires support for IPv6", + } + UserNamespaceROMount = testRequirement{ + func() bool { + // quick case--userns not enabled in this test run + if os.Getenv("DOCKER_REMAP_ROOT") == "" { + return true + } + if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { + return false + } + return true + }, + "Test cannot be run if user namespaces enabled but readonly mounts fail on this kernel.", + } + UserNamespaceInKernel = testRequirement{ + func() bool { + if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { + /* + * This kernel-provided file only exists if user namespaces are + * supported + */ + return false + } + + // We need extra check on redhat based distributions + if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { + defer f.Close() + b := make([]byte, 1) + _, _ = f.Read(b) + if string(b) == "N" { + return false + } + return true + } + + return true + }, + "Kernel must have user namespaces configured and enabled.", + } + NotUserNamespace = testRequirement{ + func() bool { + root := os.Getenv("DOCKER_REMAP_ROOT") + if root != "" { + return false + } + return true + }, + "Test cannot be run when remapping root", + } + IsPausable = testRequirement{ + func() bool { + if daemonPlatform == "windows" { + return isolation == "hyperv" + } + return true + }, + "Test requires containers are pausable.", + } + NotPausable = testRequirement{ + func() bool { + if daemonPlatform == "windows" { + return isolation == "process" + } + return false + }, + "Test requires containers are not pausable.", + } + IsolationIsHyperv = testRequirement{ + func() bool { + return daemonPlatform == "windows" && isolation == "hyperv" + }, + "Test requires a Windows daemon running default isolation mode of hyperv.", + } + IsolationIsProcess = testRequirement{ + func() bool { + return daemonPlatform == "windows" && isolation == "process" + }, + "Test requires a Windows daemon running default isolation mode of process.", + } +) + +// testRequires checks if the environment satisfies the requirements +// for the test to run or skips the tests. +func testRequires(c *check.C, requirements ...testRequirement) { + for _, r := range requirements { + if !r.Condition() { + c.Skip(r.SkipMessage) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/requirements_unix.go b/vendor/github.com/moby/moby/integration-cli/requirements_unix.go new file mode 100644 index 0000000..ef017d8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/requirements_unix.go @@ -0,0 +1,159 @@ +// +build !windows + +package main + +import ( + "bytes" + "io/ioutil" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" +) + +var ( + // SysInfo stores information about which features a kernel supports. + SysInfo *sysinfo.SysInfo + cpuCfsPeriod = testRequirement{ + func() bool { + return SysInfo.CPUCfsPeriod + }, + "Test requires an environment that supports cgroup cfs period.", + } + cpuCfsQuota = testRequirement{ + func() bool { + return SysInfo.CPUCfsQuota + }, + "Test requires an environment that supports cgroup cfs quota.", + } + cpuShare = testRequirement{ + func() bool { + return SysInfo.CPUShares + }, + "Test requires an environment that supports cgroup cpu shares.", + } + oomControl = testRequirement{ + func() bool { + return SysInfo.OomKillDisable + }, + "Test requires Oom control enabled.", + } + pidsLimit = testRequirement{ + func() bool { + return SysInfo.PidsLimit + }, + "Test requires pids limit enabled.", + } + kernelMemorySupport = testRequirement{ + func() bool { + return SysInfo.KernelMemory + }, + "Test requires an environment that supports cgroup kernel memory.", + } + memoryLimitSupport = testRequirement{ + func() bool { + return SysInfo.MemoryLimit + }, + "Test requires an environment that supports cgroup memory limit.", + } + memoryReservationSupport = testRequirement{ + func() bool { + return SysInfo.MemoryReservation + }, + "Test requires an environment that supports cgroup memory reservation.", + } + swapMemorySupport = testRequirement{ + func() bool { + return SysInfo.SwapLimit + }, + "Test requires an environment that supports cgroup swap memory limit.", + } + memorySwappinessSupport = testRequirement{ + func() bool { + return SysInfo.MemorySwappiness + }, + "Test requires an environment that supports cgroup memory swappiness.", + } + blkioWeight = testRequirement{ + func() bool { + return SysInfo.BlkioWeight + }, + "Test requires an environment that supports blkio weight.", + } + cgroupCpuset = testRequirement{ + func() bool { + return SysInfo.Cpuset + }, + "Test requires an environment that supports cgroup cpuset.", + } + seccompEnabled = testRequirement{ + func() bool { + return supportsSeccomp && SysInfo.Seccomp + }, + "Test requires that seccomp support be enabled in the daemon.", + } + bridgeNfIptables = testRequirement{ + func() bool { + return !SysInfo.BridgeNFCallIPTablesDisabled + }, + "Test requires that bridge-nf-call-iptables support be enabled in the daemon.", + } + bridgeNfIP6tables = testRequirement{ + func() bool { + return !SysInfo.BridgeNFCallIP6TablesDisabled + }, + "Test requires that bridge-nf-call-ip6tables support be enabled in the daemon.", + } + unprivilegedUsernsClone = testRequirement{ + func() bool { + content, err := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") + if err == nil && strings.Contains(string(content), "0") { + return false + } + return true + }, + "Test cannot be run with 'sysctl kernel.unprivileged_userns_clone' = 0", + } + ambientCapabilities = testRequirement{ + func() bool { + content, err := ioutil.ReadFile("/proc/self/status") + if err == nil && strings.Contains(string(content), "CapAmb:") { + return true + } + return false + }, + "Test cannot be run without a kernel (4.3+) supporting ambient capabilities", + } + overlayFSSupported = testRequirement{ + func() bool { + cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems") + out, err := cmd.CombinedOutput() + if err != nil { + return false + } + return bytes.Contains(out, []byte("overlay\n")) + }, + "Test cannot be run without suppport for overlayfs", + } + overlay2Supported = testRequirement{ + func() bool { + if !overlayFSSupported.Condition() { + return false + } + + daemonV, err := kernel.ParseRelease(daemonKernelVersion) + if err != nil { + return false + } + requiredV := kernel.VersionInfo{Kernel: 4} + return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 + + }, + "Test cannot be run without overlay2 support (kernel 4.0+)", + } +) + +func init() { + SysInfo = sysinfo.New(true) +} diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars.go b/vendor/github.com/moby/moby/integration-cli/test_vars.go new file mode 100644 index 0000000..97bcddd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars.go @@ -0,0 +1,11 @@ +package main + +// sleepCommandForDaemonPlatform is a helper function that determines what +// the command is for a sleeping container based on the daemon platform. +// The Windows busybox image does not have a `top` command. +func sleepCommandForDaemonPlatform() []string { + if daemonPlatform == "windows" { + return []string{"sleep", "240"} + } + return []string{"top"} +} diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_exec.go b/vendor/github.com/moby/moby/integration-cli/test_vars_exec.go new file mode 100644 index 0000000..7633b34 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_exec.go @@ -0,0 +1,8 @@ +// +build !test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = true +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_noexec.go b/vendor/github.com/moby/moby/integration-cli/test_vars_noexec.go new file mode 100644 index 0000000..0845090 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_noexec.go @@ -0,0 +1,8 @@ +// +build test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = false +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp.go b/vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp.go new file mode 100644 index 0000000..2f47ab0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp.go @@ -0,0 +1,8 @@ +// +build !seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = false +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_seccomp.go b/vendor/github.com/moby/moby/integration-cli/test_vars_seccomp.go new file mode 100644 index 0000000..00cf697 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_seccomp.go @@ -0,0 +1,8 @@ +// +build seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = true +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_unix.go b/vendor/github.com/moby/moby/integration-cli/test_vars_unix.go new file mode 100644 index 0000000..f9ecc01 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = true + + expectedFileChmod = "-rw-r--r--" + + // On Unix variants, the busybox image comes with the `top` command which + // runs indefinitely while still being interruptible by a signal. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_windows.go b/vendor/github.com/moby/moby/integration-cli/test_vars_windows.go new file mode 100644 index 0000000..bfc9a5a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_windows.go @@ -0,0 +1,15 @@ +// +build windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = false + + // this is the expected file permission set on windows: gh#11395 + expectedFileChmod = "-rwxr-xr-x" + + // On Windows, the busybox image doesn't have the `top` command, so we rely + // on `sleep` with a high duration. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/moby/moby/integration-cli/trust_server.go b/vendor/github.com/moby/moby/integration-cli/trust_server.go new file mode 100644 index 0000000..1887631 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/trust_server.go @@ -0,0 +1,344 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var notaryBinary = "notary" +var notaryServerBinary = "notary-server" + +type keyPair struct { + Public string + Private string +} + +type testNotary struct { + cmd *exec.Cmd + dir string + keys []keyPair +} + +const notaryHost = "localhost:4443" +const notaryURL = "https://" + notaryHost + +func newTestNotary(c *check.C) (*testNotary, error) { + // generate server config + template := `{ + "server": { + "http_addr": "%s", + "tls_key_file": "%s", + "tls_cert_file": "%s" + }, + "trust_service": { + "type": "local", + "hostname": "", + "port": "", + "key_algorithm": "ed25519" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "memory" + } +}` + tmp, err := ioutil.TempDir("", "notary-test-") + if err != nil { + return nil, err + } + confPath := filepath.Join(tmp, "config.json") + config, err := os.Create(confPath) + if err != nil { + return nil, err + } + defer config.Close() + + workingDir, err := os.Getwd() + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // generate client config + clientConfPath := filepath.Join(tmp, "client-config.json") + clientConfig, err := os.Create(clientConfPath) + if err != nil { + return nil, err + } + defer clientConfig.Close() + + template = `{ + "trust_dir" : "%s", + "remote_server": { + "url": "%s", + "skipTLSVerify": true + } +}` + if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.ConfigDir(), "trust"), notaryURL); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // load key fixture filenames + var keys []keyPair + for i := 1; i < 5; i++ { + keys = append(keys, keyPair{ + Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), + Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), + }) + } + + // run notary-server + cmd := exec.Command(notaryServerBinary, "-config", confPath) + if err := cmd.Start(); err != nil { + os.RemoveAll(tmp) + if os.IsNotExist(err) { + c.Skip(err.Error()) + } + return nil, err + } + + testNotary := &testNotary{ + cmd: cmd, + dir: tmp, + keys: keys, + } + + // Wait for notary to be ready to serve requests. + for i := 1; i <= 20; i++ { + if err = testNotary.Ping(); err == nil { + break + } + time.Sleep(10 * time.Millisecond * time.Duration(i*i)) + } + + if err != nil { + c.Fatalf("Timeout waiting for test notary to become available: %s", err) + } + + return testNotary, nil +} + +func (t *testNotary) Ping() error { + tlsConfig := tlsconfig.ClientDefault() + tlsConfig.InsecureSkipVerify = true + client := http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (t *testNotary) Close() { + t.cmd.Process.Kill() + os.RemoveAll(t.dir) +} + +func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) { + pwd := "12345678" + trustCmdEnv(cmd, notaryURL, pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) { + pwd := "12345678" + trustCmdEnv(cmd, server, pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, rootPwd, repositoryPwd string) { + trustCmdEnv(cmd, notaryURL, rootPwd, repositoryPwd) +} + +func trustCmdEnv(cmd *exec.Cmd, server, rootPwd, repositoryPwd string) { + env := []string{ + "DOCKER_CONTENT_TRUST=1", + fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), + fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), + fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), + } + cmd.Env = append(os.Environ(), env...) +} + +func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "rmi", repoName); status != 0 { + c.Fatalf("Error removing image %q\n%s", repoName, out) + } + + return repoName +} + +func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) + + pushCmd := exec.Command(dockerBinary, "plugin", "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + if err != nil { + c.Fatalf("Error running trusted plugin push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "plugin", "rm", "-f", repoName); status != 0 { + c.Fatalf("Error removing plugin %q\n%s", repoName, out) + } + + return repoName +} + +func notaryClientEnv(cmd *exec.Cmd) { + pwd := "12345678" + env := []string{ + fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), + } + cmd.Env = append(os.Environ(), env...) +} + +func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { + initCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "init", repoName) + notaryClientEnv(initCmd) + out, _, err := runCommandWithOutput(initCmd) + if err != nil { + c.Fatalf("Error initializing notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { + pathsArg := "--all-paths" + if len(paths) > 0 { + pathsArg = "--paths=" + strings.Join(paths, ",") + } + + delgCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), + "delegation", "add", repoName, role, pubKey, pathsArg) + notaryClientEnv(delgCmd) + out, _, err := runCommandWithOutput(delgCmd) + if err != nil { + c.Fatalf("Error adding %s role to notary repository: %s\n", role, out) + } +} + +func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { + pubCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "publish", repoName) + notaryClientEnv(pubCmd) + out, _, err := runCommandWithOutput(pubCmd) + if err != nil { + c.Fatalf("Error publishing notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { + impCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "key", + "import", privKey, "-g", repoName, "-r", role) + notaryClientEnv(impCmd) + out, _, err := runCommandWithOutput(impCmd) + if err != nil { + c.Fatalf("Error importing key to notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { + listCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "list", + repoName, "-r", role) + notaryClientEnv(listCmd) + out, _, err := runCommandWithOutput(listCmd) + if err != nil { + c.Fatalf("Error listing targets in notary repository: %s\n", out) + } + + // should look something like: + // NAME DIGEST SIZE (BYTES) ROLE + // ------------------------------------------------------------------------------------------------------ + // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets + + targets := make(map[string]string) + + // no target + lines := strings.Split(strings.TrimSpace(out), "\n") + if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { + return targets + } + + // otherwise, there is at least one target + c.Assert(len(lines), checker.GreaterOrEqualThan, 3) + + for _, line := range lines[2:] { + tokens := strings.Fields(line) + c.Assert(tokens, checker.HasLen, 4) + targets[tokens[0]] = tokens[3] + } + + return targets +} + +func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { + // check all the roles + for _, role := range roles { + targets := s.notaryListTargetsInRole(c, repoName, role) + roleName, ok := targets[target] + c.Assert(ok, checker.True) + c.Assert(roleName, checker.Equals, role) + } +} + +func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { + targets := s.notaryListTargetsInRole(c, repoName, "targets") + + roleName, ok := targets[target] + if ok { + for _, role := range roles { + c.Assert(roleName, checker.Not(checker.Equals), role) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/utils.go b/vendor/github.com/moby/moby/integration-cli/utils.go new file mode 100644 index 0000000..87d48e4 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/utils.go @@ -0,0 +1,79 @@ +package main + +import ( + "io" + "os" + "os/exec" + "time" + + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/cmd" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if daemonPlatform == "windows" { + return "c:", `\` + } + return "", "/" +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommandWithOutput(execCmd *exec.Cmd) (string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Combined(), result.ExitCode, result.Error +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommandWithStdoutStderr(execCmd *exec.Cmd) (string, string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Stdout(), result.Stderr(), result.ExitCode, result.Error +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommand(execCmd *exec.Cmd) (exitCode int, err error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.ExitCode, result.Error +} + +// Temporary shim for migrating commands to the new function +func transformCmd(execCmd *exec.Cmd) cmd.Cmd { + return cmd.Cmd{ + Command: execCmd.Args, + Env: execCmd.Env, + Dir: execCmd.Dir, + Stdin: execCmd.Stdin, + Stdout: execCmd.Stdout, + } +} + +func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + return integration.RunCommandPipelineWithOutput(cmds...) +} + +func convertSliceOfStringsToMap(input []string) map[string]struct{} { + return integration.ConvertSliceOfStringsToMap(input) +} + +func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + return integration.CompareDirectoryEntries(e1, e2) +} + +func listTar(f io.Reader) ([]string, error) { + return integration.ListTar(f) +} + +func randomTmpDirPath(s string, platform string) string { + return integration.RandomTmpDirPath(s, platform) +} + +func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + return integration.ConsumeWithSpeed(reader, chunkSize, interval, stop) +} + +func parseCgroupPaths(procCgroupData string) map[string]string { + return integration.ParseCgroupPaths(procCgroupData) +} + +func runAtDifferentDate(date time.Time, block func()) { + integration.RunAtDifferentDate(date, block) +} diff --git a/vendor/github.com/moby/moby/keys/launchpad-ppa-zfs.asc b/vendor/github.com/moby/moby/keys/launchpad-ppa-zfs.asc new file mode 100644 index 0000000..1c5b4de --- /dev/null +++ b/vendor/github.com/moby/moby/keys/launchpad-ppa-zfs.asc @@ -0,0 +1,13 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0ETjjRQwEEAN1t7LdXiXEDucAXemaXZphLeDSmUE2gHxj/b+Gqt1wRaCMAE1NU +rLOqTDNq8XPi4ZSp8Rr8R8jVupmKlt446ESGOadUO0AAjFyYe+YwZ65uYa69536k +T+PhcFepWm8YgJL1skn0u+qpHzMJLvLB6iyAP8fP5C19wjiY8TtpSEtLABEBAAG0 +JkxhdW5jaHBhZCBQUEEgZm9yIE5hdGl2ZSBaRlMgZm9yIExpbnV4iLgEEwECACIF +Ak440UMCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEBGWuoH2sPxh32cD +/2uniH9nyAKYI3/6X29pmRXcsuf1J+ZYqEnUIWT41ZBvNJHkbMiSgNC0lUvW4miq +LgHZrft2X3D1fUP6djnueTnFG/Rs/uVRCMU32YjmxW92nZc6StfNt35LT7CUd9xV +/6e3h5klln/xUsimOm9BcHglUXF7n8U39qw9JGV2sheo +=qkiU +-----END PGP PUBLIC KEY BLOCK----- diff --git a/vendor/github.com/moby/moby/layer/empty.go b/vendor/github.com/moby/moby/layer/empty.go new file mode 100644 index 0000000..3b6ffc8 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/empty.go @@ -0,0 +1,56 @@ +package layer + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/vendor/github.com/moby/moby/layer/empty_test.go b/vendor/github.com/moby/moby/layer/empty_test.go new file mode 100644 index 0000000..c22da76 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/empty_test.go @@ -0,0 +1,46 @@ +package layer + +import ( + "io" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestEmptyLayer(t *testing.T) { + if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { + t.Fatal("wrong ID for empty layer") + } + + if EmptyLayer.DiffID() != DigestSHA256EmptyTar { + t.Fatal("wrong DiffID for empty layer") + } + + if EmptyLayer.Parent() != nil { + t.Fatal("expected no parent for empty layer") + } + + if size, err := EmptyLayer.Size(); err != nil || size != 0 { + t.Fatal("expected zero size for empty layer") + } + + if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { + t.Fatal("expected zero diffsize for empty layer") + } + + tarStream, err := EmptyLayer.TarStream() + if err != nil { + t.Fatalf("error streaming tar for empty layer: %v", err) + } + + digester := digest.Canonical.New() + _, err = io.Copy(digester.Hash(), tarStream) + + if err != nil { + t.Fatalf("error hashing empty tar layer: %v", err) + } + + if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { + t.Fatal("empty layer tar stream hashes to wrong value") + } +} diff --git a/vendor/github.com/moby/moby/layer/filestore.go b/vendor/github.com/moby/moby/layer/filestore.go new file mode 100644 index 0000000..42b4555 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/filestore.go @@ -0,0 +1,354 @@ +package layer + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/vendor/github.com/moby/moby/layer/filestore_test.go b/vendor/github.com/moby/moby/layer/filestore_test.go new file mode 100644 index 0000000..55e3b28 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/filestore_test.go @@ -0,0 +1,104 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/docker/distribution/digest" +) + +func randomLayerID(seed int64) ChainID { + r := rand.New(rand.NewSource(seed)) + + return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) +} + +func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { + td, err := ioutil.TempDir("", "layers-") + if err != nil { + t.Fatal(err) + } + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + + return fms.(*fileMetadataStore), td, func() { + if err := os.RemoveAll(td); err != nil { + t.Logf("Failed to cleanup %q: %s", td, err) + } + } +} + +func assertNotDirectoryError(t *testing.T, err error) { + perr, ok := err.(*os.PathError) + if !ok { + t.Fatalf("Unexpected error %#v, expected path error", err) + } + + if perr.Err != syscall.ENOTDIR { + t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) + } +} + +func TestCommitFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if err := tx.SetSize(0); err != nil { + t.Fatal(err) + } + + err = tx.Commit(randomLayerID(5)) + if err == nil { + t.Fatalf("Expected error committing with invalid layer parent directory") + } + assertNotDirectoryError(t, err) +} + +func TestStartTransactionFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + _, err := fms.StartTransaction() + if err == nil { + t.Fatalf("Expected error starting transaction with invalid layer parent directory") + } + assertNotDirectoryError(t, err) + + if err := os.Remove(filepath.Join(td, "tmp")); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { + t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) + } + + if err := tx.Cancel(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/layer/layer.go b/vendor/github.com/moby/moby/layer/layer.go new file mode 100644 index 0000000..ec1d434 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer.go @@ -0,0 +1,275 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/archive" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current platform + ErrNotSupported = errors.New("not support on this platform") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + SetDescriptor(distribution.Descriptor) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + GetDescriptor(ChainID) (distribution.Descriptor, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/vendor/github.com/moby/moby/layer/layer_store.go b/vendor/github.com/moby/moby/layer/layer_store.go new file mode 100644 index 0000000..1a1ff9f --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_store.go @@ -0,0 +1,684 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.StorePath, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.New() + tr := io.TeeReader(ts, digester.Hash()) + + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil +} diff --git a/vendor/github.com/moby/moby/layer/layer_store_windows.go b/vendor/github.com/moby/moby/layer/layer_store_windows.go new file mode 100644 index 0000000..1276a91 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, descriptor) +} diff --git a/vendor/github.com/moby/moby/layer/layer_test.go b/vendor/github.com/moby/moby/layer/layer_test.go new file mode 100644 index 0000000..10712df --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_test.go @@ -0,0 +1,771 @@ +package layer + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" +) + +func init() { + graphdriver.ApplyUncompressedLayer = archive.UnpackLayer + vfs.CopyWithTar = archive.CopyWithTar +} + +func newVFSGraphDriver(td string) (graphdriver.Driver, error) { + uidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }, + } + gidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }, + } + + options := graphdriver.Options{Root: td, UIDMaps: uidMap, GIDMaps: gidMap} + return graphdriver.GetDriver("vfs", nil, options) +} + +func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { + td, err := ioutil.TempDir("", "graph-") + if err != nil { + t.Fatal(err) + } + + driver, err := newVFSGraphDriver(td) + if err != nil { + t.Fatal(err) + } + + return driver, func() { + os.RemoveAll(td) + } +} + +func newTestStore(t *testing.T) (Store, string, func()) { + td, err := ioutil.TempDir("", "layerstore-") + if err != nil { + t.Fatal(err) + } + + graph, graphcleanup := newTestGraphDriver(t) + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + return ls, td, func() { + graphcleanup() + os.RemoveAll(td) + } +} + +type layerInit func(root string) error + +func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { + containerID := stringid.GenerateRandomID() + mount, err := ls.CreateRWLayer(containerID, parent, "", nil, nil) + if err != nil { + return nil, err + } + + path, err := mount.Mount("") + if err != nil { + return nil, err + } + + if err := layerFunc(path); err != nil { + return nil, err + } + + ts, err := mount.TarStream() + if err != nil { + return nil, err + } + defer ts.Close() + + layer, err := ls.Register(ts, parent) + if err != nil { + return nil, err + } + + if err := mount.Unmount(); err != nil { + return nil, err + } + + if _, err := ls.ReleaseRWLayer(mount); err != nil { + return nil, err + } + + return layer, nil +} + +type FileApplier interface { + ApplyFile(root string) error +} + +type testFile struct { + name string + content []byte + permission os.FileMode +} + +func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { + return &testFile{ + name: name, + content: content, + permission: perm, + } +} + +func (tf *testFile) ApplyFile(root string) error { + fullPath := filepath.Join(root, tf.name) + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return err + } + // Check if already exists + if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := os.Chmod(fullPath, tf.permission); err != nil { + return err + } + } + if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { + return err + } + return nil +} + +func initWithFiles(files ...FileApplier) layerInit { + return func(root string) error { + for _, f := range files { + if err := f.ApplyFile(root); err != nil { + return err + } + } + return nil + } +} + +func getCachedLayer(l Layer) *roLayer { + if rl, ok := l.(*referencedCacheLayer); ok { + return rl.roLayer + } + return l.(*roLayer) +} + +func getMountLayer(l RWLayer) *mountedLayer { + return l.(*referencedRWLayer).mountedLayer +} + +func createMetadata(layers ...Layer) []Metadata { + metadata := make([]Metadata, len(layers)) + for i := range layers { + size, err := layers[i].Size() + if err != nil { + panic(err) + } + + metadata[i].ChainID = layers[i].ChainID() + metadata[i].DiffID = layers[i].DiffID() + metadata[i].Size = size + metadata[i].DiffSize = getCachedLayer(layers[i]).size + } + + return metadata +} + +func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { + if len(metadata) != len(expectedMetadata) { + t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) + } + + for i := range metadata { + if metadata[i] != expectedMetadata[i] { + t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) + } + } + if t.Failed() { + t.FailNow() + } +} + +func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { + layerCount := len(ls.(*layerStore).layerMap) + expectedMetadata := createMetadata(removed...) + metadata, err := ls.Release(layer) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, expectedMetadata) + + if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } +} + +func cacheID(l Layer) string { + return getCachedLayer(l).cacheID +} + +func assertLayerEqual(t *testing.T, l1, l2 Layer) { + if l1.ChainID() != l2.ChainID() { + t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) + } + if l1.DiffID() != l2.DiffID() { + t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) + } + + size1, err := l1.Size() + if err != nil { + t.Fatal(err) + } + + size2, err := l2.Size() + if err != nil { + t.Fatal(err) + } + + if size1 != size2 { + t.Fatalf("Mismatched size: %d vs %d", size1, size2) + } + + if cacheID(l1) != cacheID(l2) { + t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) + } + + p1 := l1.Parent() + p2 := l2.Parent() + if p1 != nil && p2 != nil { + assertLayerEqual(t, p1, p2) + } else if p1 != nil || p2 != nil { + t.Fatalf("Mismatched parents: %v vs %v", p1, p2) + } +} + +func TestMountAndRegister(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + size, _ := layer.Size() + t.Logf("Layer size: %d", size) + + mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), "", nil, nil) + if err != nil { + t.Fatal(err) + } + + path2, err := mount2.Mount("") + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + + if expected := "some test data"; string(b) != expected { + t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) + } + + if err := mount2.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(mount2); err != nil { + t.Fatal(err) + } +} + +func TestLayerRelease(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + t.Logf("Layer1: %s", layer1.ChainID()) + t.Logf("Layer2: %s", layer2.ChainID()) + t.Logf("Layer3a: %s", layer3a.ChainID()) + t.Logf("Layer3b: %s", layer3b.ChainID()) + + if expected := 4; len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } + + releaseAndCheckDeleted(t, ls, layer3b, layer3b) + releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) +} + +func TestStoreRestore(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), "", nil, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + t.Fatal(err) + } + + if err := m.Unmount(); err != nil { + t.Fatal(err) + } + + ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) + if err != nil { + t.Fatal(err) + } + + layer3b, err := ls2.Get(layer3.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertLayerEqual(t, layer3b, layer3) + + // Create again with same name, should return error + if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), "", nil, nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + m2, err := ls2.GetRWLayer("some-mount_name") + if err != nil { + t.Fatal(err) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + if expected := "nothing here"; string(b) != expected { + t.Fatalf("Unexpected content %q, expected %q", string(b), expected) + } + + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) +} + +func TestTarStreamStability(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), + } + addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) + files2 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), + newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + // hack layer to add file + p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") + if err != nil { + t.Fatal(err) + } + + if err := addedFile.ApplyFile(p); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + id1 := layer1.ChainID() + t.Logf("Layer 1: %s", layer1.ChainID()) + t.Logf("Layer 2: %s", layer2.ChainID()) + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar2, layer2) + + layer1b, err := ls.Get(id1) + if err != nil { + t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar1, layer1b) + + if _, err := ls.Release(layer1b); err != nil { + t.Fatal(err) + } +} + +func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { + expectedDigest := digest.FromBytes(expected) + + if digest.Digest(layer.DiffID()) != expectedDigest { + t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) + } + + ts, err := layer.TarStream() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + actual, err := ioutil.ReadAll(ts) + if err != nil { + t.Fatal(err) + } + + if len(actual) != len(expected) { + logByteDiff(t, actual, expected) + t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) + } + + actualDigest := digest.FromBytes(actual) + + if actualDigest != expectedDigest { + logByteDiff(t, actual, expected) + t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) + } +} + +const maxByteLog = 4 * 1024 + +func logByteDiff(t *testing.T, actual, expected []byte) { + d1, d2 := byteDiff(actual, expected) + if len(d1) == 0 && len(d2) == 0 { + return + } + + prefix := len(actual) - len(d1) + if len(d1) > maxByteLog || len(d2) > maxByteLog { + t.Logf("Byte diff after %d matching bytes", prefix) + } else { + t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) + } +} + +// byteDiff returns the differing bytes after the matching prefix +func byteDiff(b1, b2 []byte) ([]byte, []byte) { + i := 0 + for i < len(b1) && i < len(b2) { + if b1[i] != b2[i] { + break + } + i++ + } + + return b1[i:], b2[i:] +} + +func tarFromFiles(files ...FileApplier) ([]byte, error) { + td, err := ioutil.TempDir("", "tar-") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + + for _, f := range files { + if err := f.ApplyFile(td); err != nil { + return nil, err + } + } + + r, err := archive.Tar(td, archive.Uncompressed) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, r); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// assertReferences asserts that all the references are to the same +// image and represent the full set of references to that image. +func assertReferences(t *testing.T, references ...Layer) { + if len(references) == 0 { + return + } + base := references[0].(*referencedCacheLayer).roLayer + seenReferences := map[Layer]struct{}{ + references[0]: {}, + } + for i := 1; i < len(references); i++ { + other := references[i].(*referencedCacheLayer).roLayer + if base != other { + t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) + } + if _, ok := base.references[references[i]]; !ok { + t.Fatalf("Reference not part of reference list: %v", references[i]) + } + if _, ok := seenReferences[references[i]]; ok { + t.Fatalf("Duplicated reference %v", references[i]) + } + } + if rc := len(base.references); rc != len(references) { + t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) + } +} + +func TestRegisterExistingLayer(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layerFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), + } + + li := initWithFiles(baseFiles...) + layer1, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + tar1, err := tarFromFiles(layerFiles...) + if err != nil { + t.Fatal(err) + } + + layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) +} + +func TestTarStreamVerification(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, tmpdir, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0644), + } + files2 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0600), // different perm + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), "") + if err != nil { + t.Fatal(err) + } + id1 := digest.Digest(layer1.ChainID()) + id2 := digest.Digest(layer2.ChainID()) + + // Replace tar data files + src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer src.Close() + + dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + t.Fatal(err) + } + + src.Sync() + dst.Sync() + + ts, err := layer2.TarStream() + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ts) + if err == nil { + t.Fatal("expected data verification to fail") + } + if !strings.Contains(err.Error(), "could not verify layer data") { + t.Fatalf("wrong error returned from tarstream: %q", err) + } +} diff --git a/vendor/github.com/moby/moby/layer/layer_unix.go b/vendor/github.com/moby/moby/layer/layer_unix.go new file mode 100644 index 0000000..776b78a --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd solaris + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/vendor/github.com/moby/moby/layer/layer_unix_test.go b/vendor/github.com/moby/moby/layer/layer_unix_test.go new file mode 100644 index 0000000..9aa1afd --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_unix_test.go @@ -0,0 +1,71 @@ +// +build !windows + +package layer + +import "testing" + +func graphDiffSize(ls Store, l Layer) (int64, error) { + cl := getCachedLayer(l) + var parent string + if cl.parent != nil { + parent = cl.parent.cacheID + } + return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) +} + +// Unix as Windows graph driver does not support Changes which is indirectly +// invoked by calling DiffSize on the driver +func TestLayerSize(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Added contents") + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) + if err != nil { + t.Fatal(err) + } + + layer1DiffSize, err := graphDiffSize(ls, layer1) + if err != nil { + t.Fatal(err) + } + + if int(layer1DiffSize) != len(content1) { + t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) + } + + layer1Size, err := layer1.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1); int(layer1Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) + } + + layer2DiffSize, err := graphDiffSize(ls, layer2) + if err != nil { + t.Fatal(err) + } + + if int(layer2DiffSize) != len(content2) { + t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) + } + + layer2Size, err := layer2.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1) + len(content2); int(layer2Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) + } + +} diff --git a/vendor/github.com/moby/moby/layer/layer_windows.go b/vendor/github.com/moby/moby/layer/layer_windows.go new file mode 100644 index 0000000..e20311a --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_windows.go @@ -0,0 +1,98 @@ +package layer + +import ( + "errors" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" +) + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { + var err error // this is used for cleanup in existingLayer case + diffID := digest.FromBytes([]byte(graphID)) + + // Create new roLayer + layer := &roLayer{ + cacheID: graphID, + diffID: DiffID(diffID), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + size: size, + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + layer.chainID = createChainIDFromParent("", layer.diffID) + + if !ls.driver.Exists(layer.cacheID) { + return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) + } + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} + +func (ls *layerStore) GraphDriver() graphdriver.Driver { + return ls.driver +} diff --git a/vendor/github.com/moby/moby/layer/migration.go b/vendor/github.com/moby/moby/layer/migration.go new file mode 100644 index 0000000..b45c310 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/migration.go @@ -0,0 +1,256 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.New() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/vendor/github.com/moby/moby/layer/migration_test.go b/vendor/github.com/moby/moby/layer/migration_test.go new file mode 100644 index 0000000..07b4b68 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/migration_test.go @@ -0,0 +1,435 @@ +package layer + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func writeTarSplitFile(name string, tarContent []byte) error { + f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + fz := gzip.NewWriter(f) + + metaPacker := storage.NewJSONPacker(fz) + defer fz.Close() + + rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) + if err != nil { + return err + } + + if _, err := io.Copy(ioutil.Discard, rdr); err != nil { + return err + } + + return nil +} + +func TestLayerMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + tar1, err := tarFromFiles(layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(layer2Files...) + if err != nil { + t.Fatal(err) + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + + graphID1 := stringid.GenerateRandomID() + if err := graph.Create(graphID1, "", nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID1, "", bytes.NewReader(tar1)); err != nil { + t.Fatal(err) + } + + tf1 := filepath.Join(td, "tar1.json.gz") + if err := writeTarSplitFile(tf1, tar1); err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + graphID2 := stringid.GenerateRandomID() + if err := graph.Create(graphID2, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID2, graphID1, bytes.NewReader(tar2)); err != nil { + t.Fatal(err) + } + + tf2 := filepath.Join(td, "tar2.json.gz") + if err := writeTarSplitFile(tf2, tar2); err != nil { + t.Fatal(err) + } + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { + t, err := tarFromFiles(files...) + if err != nil { + return nil, err + } + + if err := graph.Create(graphID, parentID, nil); err != nil { + return nil, err + } + if _, err := graph.ApplyDiff(graphID, parentID, bytes.NewReader(t)); err != nil { + return nil, err + } + + ar, err := graph.Diff(graphID, parentID) + if err != nil { + return nil, err + } + defer ar.Close() + + return ioutil.ReadAll(ar) +} + +func TestLayerMigrationNoTarsplit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + graphID1 := stringid.GenerateRandomID() + graphID2 := stringid.GenerateRandomID() + + tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) + if err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func TestMountMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing (obvious - paths... needs porting) + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + initFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte{}, 0644), + newTestFile("/etc/resolv.conf", []byte{}, 0644), + } + mountFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), + } + + initTar, err := tarFromFiles(initFiles...) + if err != nil { + t.Fatal(err) + } + + mountTar, err := tarFromFiles(mountFiles...) + if err != nil { + t.Fatal(err) + } + + graph := ls.(*layerStore).driver + + layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) + if err != nil { + t.Fatal(err) + } + + graphID1 := layer1.(*referencedCacheLayer).cacheID + + containerID := stringid.GenerateRandomID() + containerInit := fmt.Sprintf("%s-init", containerID) + + if err := graph.Create(containerInit, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil { + t.Fatal(err) + } + + if err := graph.Create(containerID, containerInit, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { + t.Fatal(err) + } + + rwLayer1, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if _, err := rwLayer1.Mount(""); err != nil { + t.Fatal(err) + } + + changes, err := rwLayer1.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 5; len(changes) != expected { + t.Logf("Changes %#v", changes) + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/etc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/etc/hosts", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/root", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/root/.bashrc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[4], archive.Change{ + Path: "/root/testfile1.txt", + Kind: archive.ChangeAdd, + }) + + if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + rwLayer2, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { + t.Fatal("Expected same layer from get with same name as from migrate") + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if metadata, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) + } + + if err := rwLayer1.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { + t.Fatal(err) + } + + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + metadata, err := ls.ReleaseRWLayer(rwLayer2) + if err != nil { + t.Fatal(err) + } + if len(metadata) == 0 { + t.Fatal("Expected base layer to be deleted when deleting mount") + } + + assertMetadata(t, metadata, createMetadata(layer1)) +} diff --git a/vendor/github.com/moby/moby/layer/mount_test.go b/vendor/github.com/moby/moby/layer/mount_test.go new file mode 100644 index 0000000..7a8637e --- /dev/null +++ b/vendor/github.com/moby/moby/layer/mount_test.go @@ -0,0 +1,230 @@ +package layer + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestMountInit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) + initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefile) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + if expected := "init data!"; string(b) != expected { + t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) + } + + if fi.Mode().Perm() != 0777 { + t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) + } +} + +func TestMountSize(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Mutable contents") + contentInit := []byte("why am I excluded from the size ☹") + + li := initWithFiles(newTestFile("file1", content1, 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return newTestFile("file-init", contentInit, 0777).ApplyFile(root) + } + + m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + t.Fatal(err) + } + + mountSize, err := m.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content2); int(mountSize) != expected { + t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) + } +} + +func TestMountChanges(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefiles := []FileApplier{ + newTestFile("testfile1.txt", []byte("base data!"), 0644), + newTestFile("testfile2.txt", []byte("base data!"), 0644), + newTestFile("testfile3.txt", []byte("base data!"), 0644), + } + initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefiles...) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + t.Fatal(err) + } + + if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + t.Fatal(err) + } + + changes, err := m.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 4; len(changes) != expected { + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/testfile1.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/testfile2.txt", + Kind: archive.ChangeDelete, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/testfile3.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/testfile4.txt", + Kind: archive.ChangeAdd, + }) +} + +func assertChange(t *testing.T, actual, expected archive.Change) { + if actual.Path != expected.Path { + t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) + } + if actual.Kind != expected.Kind { + t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) + } +} + +func sortChanges(changes []archive.Change) { + cs := &changeSorter{ + changes: changes, + } + sort.Sort(cs) +} + +type changeSorter struct { + changes []archive.Change +} + +func (cs *changeSorter) Len() int { + return len(cs.changes) +} + +func (cs *changeSorter) Swap(i, j int) { + cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] +} + +func (cs *changeSorter) Less(i, j int) bool { + return cs.changes[i].Path < cs.changes[j].Path +} diff --git a/vendor/github.com/moby/moby/layer/mounted_layer.go b/vendor/github.com/moby/moby/layer/mounted_layer.go new file mode 100644 index 0000000..a5cfcfa --- /dev/null +++ b/vendor/github.com/moby/moby/layer/mounted_layer.go @@ -0,0 +1,99 @@ +package layer + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/vendor/github.com/moby/moby/layer/ro_layer.go b/vendor/github.com/moby/moby/layer/ro_layer.go new file mode 100644 index 0000000..7c8d233 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/ro_layer.go @@ -0,0 +1,192 @@ +package layer + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarentees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + r, err := rl.layerStore.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return rc, nil +} + +// TarStreamFrom does not make any guarentees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + + return nil +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: verifier, + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/moby/moby/layer/ro_layer_windows.go b/vendor/github.com/moby/moby/layer/ro_layer_windows.go new file mode 100644 index 0000000..32bd718 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/ro_layer_windows.go @@ -0,0 +1,9 @@ +package layer + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client.go b/vendor/github.com/moby/moby/libcontainerd/client.go new file mode 100644 index 0000000..c14c1c5 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write oprations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(containerID string) { + clnt.mapMutex.Lock() + delete(clnt.containers, containerID) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_linux.go b/vendor/github.com/moby/moby/libcontainerd/client_linux.go new file mode 100644 index 0000000..190f981 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_linux.go @@ -0,0 +1,605 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (pid int, err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + + spec, err := container.spec() + if err != nil { + return -1, err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if len(specp.Env) > 0 { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(syscall.Stdin), + Stdout: p.fifo(syscall.Stdout), + Stderr: p.fifo(syscall.Stderr), + Capabilities: sp.Capabilities, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := p.openFifos(fifoCtx, sp.Terminal) + if err != nil { + return -1, err + } + + resp, err := clnt.remote.apiClient.AddProcess(ctx, r) + if err != nil { + p.closeFifos(iopipe) + return -1, err + } + + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + if err2 := p.sendCloseStdin(); err == nil { + err = err2 + } + }) + return err + }) + + container.processes[processFriendlyName] = p + + if err := attachStdio(*iopipe); err != nil { + p.closeFifos(iopipe) + return -1, err + } + + return int(resp.SystemPid), nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: pid, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happened without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string, exitCode uint32) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + err := clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: exitCode, + }}) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is already active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := container.openFifos(fifoCtx, terminal) + if err != nil { + return err + } + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + }) + return err + }) + + if err := attachStdio(*iopipe); err != nil { + container.closeFifos(iopipe) + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateRestore, + Pid: container.systemPid, + }}) + + if err != nil { + container.closeFifos(iopipe) + return err + } + + if lastEvent != nil { + // This should only be a pause or resume event + if lastEvent.Type == StatePause || lastEvent.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: lastEvent.Type, + Pid: container.systemPid, + }}) + } + + logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent) + } + + return nil +} + +func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) { + er := &containerd.EventsRequest{ + Timestamp: tsp, + StoredOnly: true, + Id: id, + } + events, err := clnt.remote.apiClient.Events(context.Background(), er) + if err != nil { + logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err) + return nil, err + } + + var ev *containerd.Event + for { + e, err := events.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err) + return nil, err + } + ev = e + logrus.Debugf("libcontainerd: received past event %#v", ev) + } + + return ev, nil +} + +func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) { + ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp) + if err == nil && ev == nil { + // If ev is nil and the container is running in containerd, + // we already consumed all the event of the + // container, included the "exit" one. + // Thus, we request all events containerd has in memory for + // this container in order to get the last one (which should + // be an exit event) + logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id) + // Request all events since beginning of time + t := time.Unix(0, 0) + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err) + return nil, err + } + + return clnt.getContainerLastEventSinceTime(id, tsp) + } + + return ev, err +} + +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + // Synchronize with live events + clnt.remote.Lock() + defer clnt.remote.Unlock() + // Check that containerd still knows this container. + // + // In the unlikely event that Restore for this container process + // the its past event before the main loop, the event will be + // processed twice. However, this is not an issue as all those + // events will do is change the state of the container to be + // exactly the same. + cont, err := clnt.getContainerdContainer(containerID) + // Get its last event + ev, eerr := clnt.getContainerLastEvent(containerID) + if err != nil || cont.Status == "Stopped" { + if err != nil { + logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err) + } + if ev != nil && (ev.Pid != InitFriendlyName || ev.Type != StateExit) { + // Wait a while for the exit event + timeout := time.NewTimer(10 * time.Second) + tick := time.NewTicker(100 * time.Millisecond) + stop: + for { + select { + case <-timeout.C: + break stop + case <-tick.C: + ev, eerr = clnt.getContainerLastEvent(containerID) + if eerr != nil { + break stop + } + if ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + break stop + } + } + } + timeout.Stop() + tick.Stop() + } + + // get the exit status for this container, if we don't have + // one, indicate an error + ec := uint32(255) + if eerr == nil && ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + ec = ev.Status + } + clnt.setExited(containerID, ec) + + return nil + } + + // container is still alive + if clnt.liveRestore { + if err := clnt.restore(cont, ev, attachStdio, options...); err != nil { + logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err) + } + return nil + } + + // Kill the container if liveRestore == false + w := clnt.getOrCreateExitNotifier(containerID) + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + container.discardFifos() + + if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { + logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err) + } + // Let the main loop handle the exit event + clnt.remote.Unlock() + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + // relock because of the defer + clnt.remote.Lock() + + clnt.deleteContainer(containerID) + + return clnt.setExited(containerID, uint32(255)) +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.CreateCheckpoint(context.Background(), &containerd.CreateCheckpointRequest{ + Id: containerID, + Checkpoint: &containerd.Checkpoint{ + Name: checkpointID, + Exit: exit, + Tcp: true, + UnixSockets: true, + Shell: false, + EmptyNS: []string{"network"}, + }, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.DeleteCheckpoint(context.Background(), &containerd.DeleteCheckpointRequest{ + Id: containerID, + Name: checkpointID, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return nil, err + } + + resp, err := clnt.remote.apiClient.ListCheckpoint(context.Background(), &containerd.ListCheckpointRequest{ + Id: containerID, + CheckpointDir: checkpointDir, + }) + if err != nil { + return nil, err + } + return (*Checkpoints)(resp), nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_solaris.go b/vendor/github.com/moby/moby/libcontainerd/client_solaris.go new file mode 100644 index 0000000..cb93997 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_solaris.go @@ -0,0 +1,101 @@ +package libcontainerd + +import "golang.org/x/net/context" + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (int, error) { + return -1, nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + return nil +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + return nil +} + +func (clnt *client) Pause(containerID string) error { + return nil +} + +func (clnt *client) Resume(containerID string) error { + return nil +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + return nil, nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + defer clnt.mapMutex.Unlock() + w, ok := clnt.exitNotifiers[containerID] + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + return nil +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, nil +} + +// Summary returns a summary of the processes running in a container. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Solaris + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return nil +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return nil +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_unix.go b/vendor/github.com/moby/moby/libcontainerd/client_unix.go new file mode 100644 index 0000000..21e8fea --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_unix.go @@ -0,0 +1,142 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("Container %s is already active", containerID) + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + return container.start(checkpoint, checkpointDir, attachStdio) +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: newContainer(): %v", err) + } + } + return container +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_windows.go b/vendor/github.com/moby/moby/libcontainerd/client_windows.go new file mode 100644 index 0000000..3d5d570 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_windows.go @@ -0,0 +1,645 @@ +package libcontainerd + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "golang.org/x/net/context" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/sysinfo" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type client struct { + clientCommon + + // Platform specific properties below here (none presently on Windows) +} + +// Win32 error codes that are used for various workarounds +// These really should be ALL_CAPS to match golangs syscall library and standard +// Win32 error conventions, but golint insists on CamelCase. +const ( + CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string + ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started + ErrorBadPathname = syscall.Errno(161) // The specified path is invalid + ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object +) + +// defaultOwner is a tag passed to HCS to allow it to differentiate between +// container creator management stacks. We hard code "docker" in the case +// of docker. +const defaultOwner = "docker" + +// Create is the entrypoint to create a container from a spec, and if successfully +// created, start it too. Table below shows the fields required for HCS JSON calling parameters, +// where if not populated, is omitted. +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | | Isolation=Process | Isolation=Hyper-V | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | VolumePath | \\?\\Volume{GUIDa} | | +// | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | +// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | +// | SandboxPath | | %root%\windowsfilter | +// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// +// Isolation=Process example: +// +// { +// "SystemType": "Container", +// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Owner": "docker", +// "IsDummy": false, +// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "5e0055c814a6", +// "MappedDirectories": [], +// "HvPartition": false, +// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], +// "Servicing": false +//} +// +// Isolation=Hyper-V example: +// +//{ +// "SystemType": "Container", +// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", +// "Owner": "docker", +// "IsDummy": false, +// "IgnoreFlushesDuringBoot": true, +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "475c2c58933b", +// "MappedDirectories": [], +// "SandboxPath": "C:\\\\control\\\\windowsfilter", +// "HvPartition": true, +// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], +// "HvRuntime": { +// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" +// }, +// "Servicing": false +//} +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + logrus.Debugln("libcontainerd: client.Create() with spec", spec) + + configuration := &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: containerID, + Owner: defaultOwner, + IgnoreFlushesDuringBoot: false, + HostName: spec.Hostname, + HvPartition: false, + } + + if spec.Windows.Resources != nil { + if spec.Windows.Resources.CPU != nil { + if spec.Windows.Resources.CPU.Count != nil { + // This check is being done here rather than in adaptContainerSettings + // because we don't want to update the HostConfig in case this container + // is moved to a host with more CPUs than this one. + cpuCount := *spec.Windows.Resources.CPU.Count + hostCPUCount := uint64(sysinfo.NumCPU()) + if cpuCount > hostCPUCount { + logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) + cpuCount = hostCPUCount + } + configuration.ProcessorCount = uint32(cpuCount) + } + if spec.Windows.Resources.CPU.Shares != nil { + configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) + } + if spec.Windows.Resources.CPU.Percent != nil { + configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 + } + } + if spec.Windows.Resources.Memory != nil { + if spec.Windows.Resources.Memory.Limit != nil { + configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 + } + } + if spec.Windows.Resources.Storage != nil { + if spec.Windows.Resources.Storage.Bps != nil { + configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps + } + if spec.Windows.Resources.Storage.Iops != nil { + configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops + } + } + } + + var layerOpt *LayerOption + for _, option := range options { + if s, ok := option.(*ServicingOption); ok { + configuration.Servicing = s.IsServicing + continue + } + if f, ok := option.(*FlushOption); ok { + configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot + continue + } + if h, ok := option.(*HyperVIsolationOption); ok { + configuration.HvPartition = h.IsHyperV + configuration.SandboxPath = h.SandboxPath + continue + } + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + continue + } + if c, ok := option.(*CredentialsOption); ok { + configuration.Credentials = c.Credentials + continue + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + if configuration.HvPartition { + // Find the upper-most utility VM image, since the utility VM does not + // use layering in RS1. + // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. + var uvmImagePath string + for _, path := range layerOpt.LayerPaths { + fullPath := filepath.Join(path, "UtilityVM") + _, err := os.Stat(fullPath) + if err == nil { + uvmImagePath = fullPath + break + } + if !os.IsNotExist(err) { + return err + } + } + if uvmImagePath == "" { + return errors.New("utility VM image could not be found") + } + configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} + } else { + configuration.VolumePath = spec.Root.Path + } + + configuration.LayerFolderPath = layerOpt.LayerFolderPath + + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: layerPath, + }) + } + + // Add the mounts (volumes, bind mounts etc) to the structure + mds := make([]hcsshim.MappedDir, len(spec.Mounts)) + for i, mount := range spec.Mounts { + mds[i] = hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: mount.Destination, + ReadOnly: false, + } + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + mds[i].ReadOnly = true + } + } + } + configuration.MappedDirectories = mds + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + commandLine: strings.Join(spec.Process.Args, " "), + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) + return nil + +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := hcsshim.ProcessConfig{ + EmulateConsole: procToAdd.Terminal, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: !procToAdd.Terminal, + } + createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) + + // Take working directory from the process to add if it is defined, + // otherwise take from the first process. + if procToAdd.Cwd != "" { + createProcessParms.WorkingDirectory = procToAdd.Cwd + } else { + createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) + createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") + createProcessParms.User = procToAdd.User.Username + + logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) + + // Start the command running in the container. + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) + return -1, err + } + + pid := newProcess.Pid() + + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) + return -1, err + } + + iopipe := &IOPipe{Terminal: procToAdd.Terminal} + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + proc := &process{ + processCommon: processCommon{ + containerID: containerID, + friendlyName: processFriendlyName, + client: clnt, + systemPid: uint32(pid), + }, + commandLine: createProcessParms.CommandLine, + hcsProcess: newProcess, + } + + // Add the process to the container's list of processes + container.processes[processFriendlyName] = proc + + // Tell the engine to attach streams back to the client + if err := attachStdio(*iopipe); err != nil { + return -1, err + } + + // Spin up a go routine waiting for exit to handle cleanup + go container.waitExit(proc, false) + + return pid, nil +} + +// Signal handles `docker stop` on Windows. While Linux has support for +// the full range of signals, signals aren't really implemented on Windows. +// We fake supporting regular stop and -9 to force kill. +func (clnt *client) Signal(containerID string, sig int) error { + var ( + cont *container + err error + ) + + // Get the container as we need it to get the container handle. + clnt.lock(containerID) + defer clnt.unlock(containerID) + if cont, err = clnt.getContainer(containerID); err != nil { + return err + } + + cont.manualStopRequested = true + + logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) + + if syscall.Signal(sig) == syscall.SIGKILL { + // Terminate the compute system + if err := cont.hcsContainer.Terminate(); err != nil { + if !hcsshim.IsPending(err) { + logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) + } + } + } else { + // Terminate Process + if err := cont.hcsProcess.Kill(); err != nil && !hcsshim.IsAlreadyStopped(err) { + // ignore errors + logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err) + } + } + + return nil +} + +// While Linux has support for the full range of signals, signals aren't really implemented on Windows. +// We try to terminate the specified process whatever signal is requested. +func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + return p.hcsProcess.Kill() + } + } + + return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) +} + +// Resize handles a CLI event to resize an interactive docker run or docker exec +// window. +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + h, w := uint16(height), uint16(width) + + if processFriendlyName == InitFriendlyName { + logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) + return cont.process.hcsProcess.ResizeConsole(w, h) + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) + return p.hcsProcess.ResizeConsole(w, h) + } + } + + return fmt.Errorf("Resize could not find containerID %s to resize", containerID) + +} + +// Pause handles pause requests for containers +func (clnt *client) Pause(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot pause Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Pause() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StatePause, + }}) +} + +// Resume handles resume requests for containers +func (clnt *client) Resume(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + // This should never happen, since Windows Server Containers cannot be paused + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot resume Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Resume() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateResume, + }}) +} + +// Stats handles stats requests for containers +func (clnt *client) Stats(containerID string) (*Stats, error) { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + s, err := container.hcsContainer.Statistics() + if err != nil { + return nil, err + } + st := Stats(s) + return &st, nil +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { + logrus.Debugf("libcontainerd: Restore(%s)", containerID) + + // TODO Windows: On RS1, a re-attach isn't possible. + // However, there is a scenario in which there is an issue. + // Consider a background container. The daemon dies unexpectedly. + // HCS will still have the compute service alive and running. + // For consistence, we call in to shoot it regardless if HCS knows about it + // We explicitly just log a warning if the terminate fails. + // Then we tell the backend the container exited. + if hc, err := hcsshim.OpenContainer(containerID); err == nil { + if err := hc.Terminate(); err != nil { + if !hcsshim.IsPending(err) { + logrus.Warnf("libcontainerd: failed to terminate %s on restore - %q", containerID, err) + } + } + } + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: 1 << 31, + }}) +} + +// GetPidsForContainer returns a list of process IDs running in a container. +// Although implemented, this is not used in Windows. +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + var pids []int + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + + // Add the first process + pids = append(pids, int(cont.containerCommon.systemPid)) + // And add all the exec'd processes + for _, p := range cont.processes { + pids = append(pids, int(p.processCommon.systemPid)) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is present in Windows to support docker top. In linux, the +// engine shells out to ps to get process information. On Windows, as +// the containers could be Hyper-V containers, they would not be +// visible on the container host. However, libcontainerd does have +// that information. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + p, err := container.hcsContainer.ProcessList() + if err != nil { + return nil, err + } + pl := make([]Summary, len(p)) + for i := range p { + pl[i] = Summary(p[i]) + } + return pl, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Windows + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + return &ServerVersion{}, nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/container.go b/vendor/github.com/moby/moby/libcontainerd/container.go new file mode 100644 index 0000000..b403213 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/container.go @@ -0,0 +1,13 @@ +package libcontainerd + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + processes map[string]*process +} diff --git a/vendor/github.com/moby/moby/libcontainerd/container_unix.go b/vendor/github.com/moby/moby/libcontainerd/container_unix.go new file mode 100644 index 0000000..61bab14 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/container_unix.go @@ -0,0 +1,250 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool + runtime string + runtimeArgs []string +} + +type runtime struct { + path string + args []string +} + +// WithRuntime sets the runtime to be used for the created container +func WithRuntime(path string, args []string) CreateOption { + return runtime{path, args} +} + +func (rt runtime) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.runtime = rt.path + pr.runtimeArgs = rt.args + } + return nil +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) { + logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start(checkpoint string, checkpointDir string, attachStdio StdioCallback) (err error) { + spec, err := ctr.spec() + if err != nil { + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ready := make(chan struct{}) + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := ctr.openFifos(fifoCtx, spec.Process.Terminal) + if err != nil { + return err + } + + var stdinOnce sync.Once + + // we need to delay stdin closure after container start or else "stdin close" + // event will be rejected by containerd. + // stdin closure happens in attachStdio + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + go func() { + select { + case <-ready: + case <-ctx.Done(): + } + select { + case <-ready: + if err := ctr.sendCloseStdin(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + default: + } + }() + }) + return err + }) + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(syscall.Stdin), + Stdout: ctr.fifo(syscall.Stdout), + Stderr: ctr.fifo(syscall.Stderr), + Checkpoint: checkpoint, + CheckpointDir: checkpointDir, + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + Runtime: ctr.runtime, + RuntimeArgs: ctr.runtimeArgs, + } + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + ctr.closeFifos(iopipe) + return err + } + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.systemPid = systemPid(resp.Container) + close(ready) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }}) +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: e.Type, + ExitCode: e.Status, + }, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + + // Remove process from list if we have exited + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("libcontainerd: event unhandled: %+v", e) + } + return nil +} + +// discardFifos attempts to fully read the container fifos to unblock processes +// that may be blocked on the writer side. +func (ctr *container) discardFifos() { + ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) + for _, i := range []int{syscall.Stdout, syscall.Stderr} { + f, err := fifo.OpenFifo(ctx, ctr.fifo(i), syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + logrus.Warnf("error opening fifo %v for discarding: %+v", f, err) + continue + } + go func() { + io.Copy(ioutil.Discard, f) + }() + } +} diff --git a/vendor/github.com/moby/moby/libcontainerd/container_windows.go b/vendor/github.com/moby/moby/libcontainerd/container_windows.go new file mode 100644 index 0000000..9b19650 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/container_windows.go @@ -0,0 +1,311 @@ +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "syscall" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. There are none presently on Windows. + options []CreateOption + + // The ociSpec is required, as client.Create() needs a spec, + // but can be called from the RestartManager context which does not + // otherwise have access to the Spec + ociSpec specs.Spec + + manualStopRequested bool + hcsContainer hcsshim.Container +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +// start starts a created container. +// Caller needs to lock container ID before calling this method. +func (ctr *container) start(attachStdio StdioCallback) error { + var err error + isServicing := false + + for _, option := range ctr.options { + if s, ok := option.(*ServicingOption); ok && s.IsServicing { + isServicing = true + } + } + + // Start the container. If this is a servicing container, this call will block + // until the container is done with the servicing execution. + logrus.Debugln("libcontainerd: starting container ", ctr.containerID) + if err = ctr.hcsContainer.Start(); err != nil { + logrus.Errorf("libcontainerd: failed to start container: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate") + } + return err + } + + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := &hcsshim.ProcessConfig{ + EmulateConsole: ctr.ociSpec.Process.Terminal, + WorkingDirectory: ctr.ociSpec.Process.Cwd, + CreateStdInPipe: !isServicing, + CreateStdOutPipe: !isServicing, + CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, + } + createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) + createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + createProcessParms.User = ctr.ociSpec.Process.User.Username + + // Start the command running in the container. + newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: CreateProcess() failed %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate") + } + return err + } + + pid := newProcess.Pid() + + // Save the hcs Process and PID + ctr.process.friendlyName = InitFriendlyName + ctr.process.hcsProcess = newProcess + + // If this is a servicing container, wait on the process synchronously here and + // if it succeeds, wait for it cleanly shutdown and merge into the parent container. + if isServicing { + exitCode := ctr.waitProcessExitCode(&ctr.process) + + if exitCode != 0 { + if err := ctr.terminate(); err != nil { + logrus.Warnf("libcontainerd: terminating servicing container %s failed: %s", ctr.containerID, err) + } + return fmt.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) + } + + return ctr.hcsContainer.WaitTimeout(time.Minute * 5) + } + + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err) + } + return err + } + + iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} + + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + // Save the PID + logrus.Debugf("libcontainerd: process started - PID %d", pid) + ctr.systemPid = uint32(pid) + + // Spin up a go routine waiting for exit to handle cleanup + go ctr.waitExit(&ctr.process, true) + + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + // OK to return the error here, as waitExit will handle tear-down in HCS + return err + } + + // Tell the docker engine that the container has started. + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft + }} + logrus.Debugf("libcontainerd: start() completed OK, %+v", si) + return ctr.client.backend.StateChanged(ctr.containerID, si) + +} + +// waitProcessExitCode will wait for the given process to exit and return its error code. +func (ctr *container) waitProcessExitCode(process *process) int { + // Block indefinitely for the process to exit. + err := process.hcsProcess.Wait() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err) + } + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + exitCode, err := process.hcsProcess.ExitCode() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID) + } + // Since we got an error retrieving the exit code, make sure that the code we return + // doesn't incorrectly indicate success. + exitCode = -1 + + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + return exitCode +} + +// waitExit runs as a goroutine waiting for the process to exit. It's +// equivalent to (in the linux containerd world) where events come in for +// state change notifications from containerd. +func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { + logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid) + + exitCode := ctr.waitProcessExitCode(process) + // Lock the container while shutting down + ctr.client.lock(ctr.containerID) + + // Assume the container has exited + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: uint32(exitCode), + Pid: process.systemPid, + ProcessID: process.friendlyName, + }, + UpdatePending: false, + } + + // But it could have been an exec'd process which exited + if !isFirstProcessToStart { + si.State = StateExitProcess + ctr.cleanProcess(process.friendlyName) + } else { + updatePending, err := ctr.hcsContainer.HasPendingUpdates() + if err != nil { + logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) + } else { + si.UpdatePending = updatePending + } + + logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) + if err := ctr.shutdown(); err != nil { + logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID) + } else { + logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID) + } + if err := ctr.hcsContainer.Close(); err != nil { + logrus.Error(err) + } + + // Remove process from list if we have exited + if si.State == StateExit { + ctr.client.deleteContainer(ctr.containerID) + } + } + + if err := process.hcsProcess.Close(); err != nil { + logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err) + } + + // Unlock here before we call back into the daemon to update state + ctr.client.unlock(ctr.containerID) + + // Call into the backend to notify it of the state change. + logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si) + if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { + logrus.Error(err) + } + + logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si) + + return nil +} + +// cleanProcess removes process from the map. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + delete(ctr.processes, id) +} + +// shutdown shuts down the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) shutdown() error { + const shutdownTimeout = time.Minute * 5 + err := ctr.hcsContainer.Shutdown() + if hcsshim.IsPending(err) { + // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. + err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err) + if err := ctr.terminate(); err != nil { + return err + } + return err + } + + return nil +} + +// terminate terminates the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) terminate() error { + const terminateTimeout = time.Minute * 5 + err := ctr.hcsContainer.Terminate() + + if hcsshim.IsPending(err) { + err = ctr.hcsContainer.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err) + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/oom_linux.go b/vendor/github.com/moby/moby/libcontainerd/oom_linux.go new file mode 100644 index 0000000..e126b7a --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/oom_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/system" +) + +func setOOMScore(pid, score int) error { + oomScoreAdjPath := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + f, err := os.OpenFile(oomScoreAdjPath, os.O_WRONLY, 0) + if err != nil { + return err + } + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + f.Close() + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !system.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to %s", stringScore, oomScoreAdjPath) + } + return nil + } + return err +} diff --git a/vendor/github.com/moby/moby/libcontainerd/oom_solaris.go b/vendor/github.com/moby/moby/libcontainerd/oom_solaris.go new file mode 100644 index 0000000..2ebe5e8 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/oom_solaris.go @@ -0,0 +1,5 @@ +package libcontainerd + +func setOOMScore(pid, score int) error { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go b/vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go new file mode 100644 index 0000000..4f3766d --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package libcontainerd + +import ( + "sync" +) + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + sync.Mutex + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/vendor/github.com/moby/moby/libcontainerd/process.go b/vendor/github.com/moby/moby/libcontainerd/process.go new file mode 100644 index 0000000..57562c8 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/vendor/github.com/moby/moby/libcontainerd/process_unix.go b/vendor/github.com/moby/moby/libcontainerd/process_unix.go new file mode 100644 index 0000000..506fca6 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/process_unix.go @@ -0,0 +1,107 @@ +// +build linux solaris + +package libcontainerd + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + goruntime "runtime" + "strings" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +var fdNames = map[int]string{ + unix.Stdin: "stdin", + unix.Stdout: "stdout", + unix.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(ctx context.Context, terminal bool) (pipe *IOPipe, err error) { + if err := os.MkdirAll(p.dir, 0700); err != nil { + return nil, err + } + + io := &IOPipe{} + + io.Stdin, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdin), unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdin.Close() + } + }() + + io.Stdout, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdout), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdout.Close() + } + }() + + if goruntime.GOOS == "solaris" || !terminal { + // For Solaris terminal handling is done exclusively by the runtime therefore we make no distinction + // in the processing for terminal and !terminal cases. + io.Stderr, err = fifo.OpenFifo(ctx, p.fifo(unix.Stderr), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + io.Stderr.Close() + } + }() + } else { + io.Stderr = ioutil.NopCloser(emptyReader{}) + } + + return io, nil +} + +func (p *process) sendCloseStdin() error { + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + if err != nil && (strings.Contains(err.Error(), "container not found") || strings.Contains(err.Error(), "process not found")) { + return nil + } + return err +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + io.Stdout.Close() + io.Stderr.Close() +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/vendor/github.com/moby/moby/libcontainerd/process_windows.go b/vendor/github.com/moby/moby/libcontainerd/process_windows.go new file mode 100644 index 0000000..57ecc94 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/process_windows.go @@ -0,0 +1,51 @@ +package libcontainerd + +import ( + "io" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/ioutils" +) + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + + // commandLine is to support returning summary information for docker top + commandLine string + hcsProcess hcsshim.Process +} + +type autoClosingReader struct { + io.ReadCloser + sync.Once +} + +func (r *autoClosingReader) Read(b []byte) (n int, err error) { + n, err = r.ReadCloser.Read(b) + if err == io.EOF { + r.Once.Do(func() { r.ReadCloser.Close() }) + } + return +} + +func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(pipe, func() error { + if err := pipe.Close(); err != nil { + return err + } + + err := process.CloseStdin() + if err != nil && !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyClosed(err) { + // This error will occur if the compute system is currently shutting down + if perr, ok := err.(*hcsshim.ProcessError); ok && perr.Err != hcsshim.ErrVmcomputeOperationInvalidState { + return err + } + } + + return nil + }) +} diff --git a/vendor/github.com/moby/moby/libcontainerd/queue_unix.go b/vendor/github.com/moby/moby/libcontainerd/queue_unix.go new file mode 100644 index 0000000..b848b98 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/queue_unix.go @@ -0,0 +1,31 @@ +// +build linux solaris + +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + }() +} diff --git a/vendor/github.com/moby/moby/libcontainerd/remote.go b/vendor/github.com/moby/moby/libcontainerd/remote.go new file mode 100644 index 0000000..9031e3a --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/remote.go @@ -0,0 +1,20 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() + // UpdateOptions allows various remote options to be updated at runtime. + UpdateOptions(...RemoteOption) error +} + +// RemoteOption allows to configure parameters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/vendor/github.com/moby/moby/libcontainerd/remote_unix.go b/vendor/github.com/moby/moby/libcontainerd/remote_unix.go new file mode 100644 index 0000000..64a2864 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/remote_unix.go @@ -0,0 +1,544 @@ +// +build linux solaris + +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + goruntime "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + containerdHealthCheckTimeout = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + containerdStateDir = "containerd" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closeManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + runtime string + runtimeArgs []string + daemonWaitCh chan struct{} + liveRestore bool + oomScore int + restoreFromTimestamp *timestamp.Timestamp +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specified the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + // don't output the grpc reconnect logging + grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) + dialOpts := append([]grpc.DialOption{grpc.WithInsecure()}, + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + ) + conn, err := grpc.Dial(r.rpcAddr, dialOpts...) + if err != nil { + return nil, fmt.Errorf("error connecting to containerd: %v", err) + } + + r.rpcConn = conn + r.apiClient = containerd.NewAPIClient(conn) + + // Get the timestamp to restore from + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + r.restoreFromTimestamp = tsp + + go r.handleConnectionChange() + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) UpdateOptions(options ...RemoteOption) error { + for _, option := range options { + if err := option.Apply(r); err != nil { + return err + } + } + return nil +} + +func (r *remote) handleConnectionChange() { + var transientFailureCount = 0 + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(r.rpcConn) + + for { + <-ticker.C + ctx, cancel := context.WithTimeout(context.Background(), containerdHealthCheckTimeout) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err == nil { + continue + } + + logrus.Debugf("libcontainerd: containerd health check returned error: %v", err) + + if r.daemonPid != -1 { + if strings.Contains(err.Error(), "is closing") { + // Well, we asked for it to stop, just return + return + } + // all other errors are transient + // Reset state to be notified of next failure + transientFailureCount++ + if transientFailureCount >= maxConnectionRetryCount { + transientFailureCount = 0 + if utils.IsProcessAlive(r.daemonPid) { + utils.KillProcess(r.daemonPid) + } + <-r.daemonWaitCh + if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error + logrus.Errorf("libcontainerd: error restarting containerd: %v", err) + } + continue + } + } + } +} + +func (r *remote) Cleanup() { + if r.daemonPid == -1 { + return + } + r.closeManually = true + r.rpcConn.Close() + // Ask the daemon to quit + syscall.Kill(r.daemonPid, syscall.SIGTERM) + + // Wait up to 15secs for it to stop + for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { + if !utils.IsProcessAlive(r.daemonPid) { + break + } + time.Sleep(time.Second) + } + + if utils.IsProcessAlive(r.daemonPid) { + logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) + syscall.Kill(r.daemonPid, syscall.SIGKILL) + } + + // cleanup some files + os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) + os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + liveRestore: r.liveRestore, + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + defer f.Close() + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } +} + +func (r *remote) getLastEventTimestamp() time.Time { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t + } + + f, err := os.Open(r.eventTsPath) + if err != nil { + logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) + return t + } + defer f.Close() + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) + return t + } + + t.UnmarshalText(b) + + return t +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + er := &containerd.EventsRequest{ + Timestamp: tsp, + } + events, err := r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) + if err != nil { + return err + } + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closeManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + logrus.Debugf("libcontainerd: received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Warnf("libcontainerd: unknown container %s", e.Id) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) + } + + tsp, err := ptypes.Timestamp(e.Timestamp) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) + continue + } + + r.updateEventTimestamp(tsp) + } +} + +func (r *remote) runContainerdDaemon() error { + pidFilename := filepath.Join(r.stateDir, containerdPidFilename) + f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + // File exist, check if the daemon is alive + b := make([]byte, 8) + n, err := f.Read(b) + if err != nil && err != io.EOF { + return err + } + + if n > 0 { + pid, err := strconv.ParseUint(string(b[:n]), 10, 64) + if err != nil { + return err + } + if utils.IsProcessAlive(int(pid)) { + logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid) + r.daemonPid = int(pid) + return nil + } + } + + // rewind the file + _, err = f.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Truncate it + err = f.Truncate(0) + if err != nil { + return err + } + + // Start a new instance + args := []string{ + "-l", fmt.Sprintf("unix://%s", r.rpcAddr), + "--metrics-interval=0", + "--start-timeout", "2m", + "--state-dir", filepath.Join(r.stateDir, containerdStateDir), + } + if goruntime.GOOS == "solaris" { + args = append(args, "--shim", "containerd-shim", "--runtime", "runc") + } else { + args = append(args, "--shim", "docker-containerd-shim") + if r.runtime != "" { + args = append(args, "--runtime") + args = append(args, r.runtime) + } + } + if r.debugLog { + args = append(args, "--debug") + } + if len(r.runtimeArgs) > 0 { + for _, v := range r.runtimeArgs { + args = append(args, "--runtime-args") + args = append(args, v) + } + logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args) + } + + cmd := exec.Command(containerdBinary, args...) + // redirect containerd logs to docker logs + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.SysProcAttr = setSysProcAttr(true) + cmd.Env = nil + // clear the NOTIFY_SOCKET from the env when starting containerd + for _, e := range os.Environ() { + if !strings.HasPrefix(e, "NOTIFY_SOCKET") { + cmd.Env = append(cmd.Env, e) + } + } + if err := cmd.Start(); err != nil { + return err + } + logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) + if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { + utils.KillProcess(cmd.Process.Pid) + return err + } + if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { + utils.KillProcess(cmd.Process.Pid) + return err + } + + r.daemonWaitCh = make(chan struct{}) + go func() { + cmd.Wait() + close(r.daemonWaitCh) + }() // Reap our child when needed + r.daemonPid = cmd.Process.Pid + return nil +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimePath sets the path of the runtime to be used as the +// default by containerd +func WithRuntimePath(rt string) RemoteOption { + return runtimePath(rt) +} + +type runtimePath string + +func (rt runtimePath) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtime = string(rt) + return nil + } + return fmt.Errorf("WithRuntime option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} + +// WithLiveRestore defines if containers are stopped on shutdown or restored. +func WithLiveRestore(v bool) RemoteOption { + return liveRestore(v) +} + +type liveRestore bool + +func (l liveRestore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.liveRestore = bool(l) + for _, c := range remote.clients { + c.liveRestore = bool(l) + } + return nil + } + return fmt.Errorf("WithLiveRestore option not supported for this remote") +} + +// WithOOMScore defines the oom_score_adj to set for the containerd process. +func WithOOMScore(score int) RemoteOption { + return oomScore(score) +} + +type oomScore int + +func (o oomScore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.oomScore = int(o) + return nil + } + return fmt.Errorf("WithOOMScore option not supported for this remote") +} diff --git a/vendor/github.com/moby/moby/libcontainerd/remote_windows.go b/vendor/github.com/moby/moby/libcontainerd/remote_windows.go new file mode 100644 index 0000000..74c1044 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/remote_windows.go @@ -0,0 +1,36 @@ +package libcontainerd + +import "github.com/docker/docker/pkg/locker" + +type remote struct { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + } + return c, nil +} + +// Cleanup is a no-op on Windows. It is here to implement the interface. +func (r *remote) Cleanup() { +} + +func (r *remote) UpdateOptions(opts ...RemoteOption) error { + return nil +} + +// New creates a fresh instance of libcontainerd remote. On Windows, +// this is not used as there is no remote containerd process. +func New(_ string, _ ...RemoteOption) (Remote, error) { + return &remote{}, nil +} + +// WithLiveRestore is a noop on windows. +func WithLiveRestore(v bool) RemoteOption { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/types.go b/vendor/github.com/moby/moby/libcontainerd/types.go new file mode 100644 index 0000000..3d981e3 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types.go @@ -0,0 +1,75 @@ +package libcontainerd + +import ( + "io" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestore = "restore" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state +) + +// CommonStateInfo contains the state info common to all platforms. +type CommonStateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error +} + +// Client provides access to containerd features. +type Client interface { + GetServerVersion(ctx context.Context) (*ServerVersion, error) + Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error + Signal(containerID string, sig int) error + SignalProcess(containerID string, processFriendlyName string, sig int) error + AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process, attachStdio StdioCallback) (int, error) + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error + CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error + DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error + ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// StdioCallback is called to connect a container or process stdio. +type StdioCallback func(IOPipe) error + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser + Terminal bool // Whether stderr is connected on Windows +} + +// ServerVersion contains version information as retrieved from the +// server +type ServerVersion struct { + containerd.GetServerVersionResponse +} diff --git a/vendor/github.com/moby/moby/libcontainerd/types_linux.go b/vendor/github.com/moby/moby/libcontainerd/types_linux.go new file mode 100644 index 0000000..cc2a17a --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types_linux.go @@ -0,0 +1,49 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.Rlimit `json:"rlimits,omitempty"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary contains a container summary from containerd +type Summary struct{} + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/moby/moby/libcontainerd/types_solaris.go b/vendor/github.com/moby/moby/libcontainerd/types_solaris.go new file mode 100644 index 0000000..dbafef6 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types_solaris.go @@ -0,0 +1,43 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats struct{} + +// Summary contains a container summary from containerd +type Summary struct{} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Resources defines updatable container resource values. +type Resources struct{} + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/moby/moby/libcontainerd/types_windows.go b/vendor/github.com/moby/moby/libcontainerd/types_windows.go new file mode 100644 index 0000000..24a9a96 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types_windows.go @@ -0,0 +1,79 @@ +package libcontainerd + +import ( + "github.com/Microsoft/hcsshim" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process specs.Process + +// Summary contains a ProcessList item from HCS to support `top` +type Summary hcsshim.ProcessListItem + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + + UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. +} + +// Stats contains statics from HCS +type Stats hcsshim.Statistics + +// Resources defines updatable container resource values. +type Resources struct{} + +// ServicingOption is a CreateOption with a no-op application that signifies +// the container needs to be used for a Windows servicing operation. +type ServicingOption struct { + IsServicing bool +} + +// FlushOption is a CreateOption that signifies if the container should be +// started with flushes ignored until boot has completed. This is an optimisation +// for first boot of a container. +type FlushOption struct { + IgnoreFlushesDuringBoot bool +} + +// HyperVIsolationOption is a CreateOption that indicates whether the runtime +// should start the container as a Hyper-V container, and if so, the sandbox path. +type HyperVIsolationOption struct { + IsHyperV bool + SandboxPath string `json:",omitempty"` +} + +// LayerOption is a CreateOption that indicates to the runtime the layer folder +// and layer paths for a container. +type LayerOption struct { + // LayerFolder is the path to the current layer folder. Empty for Hyper-V containers. + LayerFolderPath string `json:",omitempty"` + // Layer paths of the parent layers + LayerPaths []string +} + +// NetworkEndpointsOption is a CreateOption that provides the runtime list +// of network endpoints to which a container should be attached during its creation. +type NetworkEndpointsOption struct { + Endpoints []string + AllowUnqualifiedDNSQuery bool +} + +// CredentialsOption is a CreateOption that indicates the credentials from +// a credential spec to be used to the runtime +type CredentialsOption struct { + Credentials string +} + +// Checkpoint holds the details of a checkpoint (not supported in windows) +type Checkpoint struct { + Name string +} + +// Checkpoints contains the details of a checkpoint +type Checkpoints struct { + Checkpoints []*Checkpoint +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_linux.go b/vendor/github.com/moby/moby/libcontainerd/utils_linux.go new file mode 100644 index 0000000..78828bc --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_linux.go @@ -0,0 +1,62 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.IDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setsid: sid, + Pdeathsig: syscall.SIGKILL, + } +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_solaris.go b/vendor/github.com/moby/moby/libcontainerd/utils_solaris.go new file mode 100644 index 0000000..49632b4 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_solaris.go @@ -0,0 +1,27 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + return 0, 0, nil +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_windows.go b/vendor/github.com/moby/moby/libcontainerd/utils_windows.go new file mode 100644 index 0000000..41ac40d --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_windows.go @@ -0,0 +1,46 @@ +package libcontainerd + +import "strings" + +// setupEnvironmentVariables converts a string array of environment variables +// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. +func setupEnvironmentVariables(a []string) map[string]string { + r := make(map[string]string) + for _, s := range a { + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + r[arr[0]] = arr[1] + } + } + return r +} + +// Apply for a servicing option is a no-op. +func (s *ServicingOption) Apply(interface{}) error { + return nil +} + +// Apply for the flush option is a no-op. +func (f *FlushOption) Apply(interface{}) error { + return nil +} + +// Apply for the hypervisolation option is a no-op. +func (h *HyperVIsolationOption) Apply(interface{}) error { + return nil +} + +// Apply for the layer option is a no-op. +func (h *LayerOption) Apply(interface{}) error { + return nil +} + +// Apply for the network endpoints option is a no-op. +func (s *NetworkEndpointsOption) Apply(interface{}) error { + return nil +} + +// Apply for the credentials option is a no-op. +func (s *CredentialsOption) Apply(interface{}) error { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go b/vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go new file mode 100644 index 0000000..f3679bf --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go @@ -0,0 +1,13 @@ +package libcontainerd + +import ( + "testing" +) + +func TestEnvironmentParsing(t *testing.T) { + env := []string{"foo=bar", "car=hat", "a=b=c"} + result := setupEnvironmentVariables(env) + if len(result) != 3 || result["foo"] != "bar" || result["car"] != "hat" || result["a"] != "b=c" { + t.Fatalf("Expected map[foo:bar car:hat a:b=c], got %v", result) + } +} diff --git a/vendor/github.com/moby/moby/man/Dockerfile b/vendor/github.com/moby/moby/man/Dockerfile new file mode 100644 index 0000000..80e97ff --- /dev/null +++ b/vendor/github.com/moby/moby/man/Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.7.5-alpine + +RUN apk add -U git bash curl gcc musl-dev make + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/moby/moby/man/Dockerfile.5.md b/vendor/github.com/moby/moby/man/Dockerfile.5.md new file mode 100644 index 0000000..5191b19 --- /dev/null +++ b/vendor/github.com/moby/moby/man/Dockerfile.5.md @@ -0,0 +1,474 @@ +% DOCKERFILE(5) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION + +The **Dockerfile** is a configuration file that automates the steps of creating +a Docker image. It is similar to a Makefile. Docker reads instructions from the +**Dockerfile** to automate the steps otherwise performed manually to create an +image. To build an image, create a file called **Dockerfile**. + +The **Dockerfile** describes the steps taken to assemble the image. When the +**Dockerfile** has been created, call the `docker build` command, using the +path of directory that contains **Dockerfile** as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + + FROM image + +# DESCRIPTION + +A Dockerfile is a file that automates the steps of creating a Docker image. +A Dockerfile is similar to a Makefile. + +# USAGE + + docker build . + + -- Runs the steps and commits them, building a final image. + The path to the source repository defines where to find the context of the + build. The build is run by the Docker daemon, not the CLI. The whole + context must be transferred to the daemon. The Docker CLI reports + `"Sending build context to Docker daemon"` when the context is sent to the + daemon. + + ``` + docker build -t repository/tag . + ``` + + -- specifies a repository and tag at which to save the new image if the build + succeeds. The Docker daemon runs the steps one-by-one, committing the result + to a new image if necessary, before finally outputting the ID of the new + image. The Docker daemon automatically cleans up the context it is given. + + Docker re-uses intermediate images whenever possible. This significantly + accelerates the *docker build* process. + +# FORMAT + + `FROM image` + + `FROM image:tag` + + `FROM image@digest` + + -- The **FROM** instruction sets the base image for subsequent instructions. A + valid Dockerfile must have **FROM** as its first instruction. The image can be any + valid image. It is easy to start by pulling an image from the public + repositories. + + -- **FROM** must be the first non-comment instruction in Dockerfile. + + -- **FROM** may appear multiple times within a single Dockerfile in order to create + multiple images. Make a note of the last image ID output by the commit before + each new **FROM** command. + + -- If no tag is given to the **FROM** instruction, Docker applies the + `latest` tag. If the used tag does not exist, an error is returned. + + -- If no digest is given to the **FROM** instruction, Docker applies the + `latest` tag. If the used tag does not exist, an error is returned. + +**MAINTAINER** + -- **MAINTAINER** sets the Author field for the generated images. + Useful for providing users with an email or url for support. + +**RUN** + -- **RUN** has two forms: + + ``` + # the command is run in a shell - /bin/sh -c + RUN + + # Executable form + RUN ["executable", "param1", "param2"] + ``` + + + -- The **RUN** instruction executes any commands in a new layer on top of the current + image and commits the results. The committed image is used for the next step in + Dockerfile. + + -- Layering **RUN** instructions and generating commits conforms to the core + concepts of Docker where commits are cheap and containers can be created from + any point in the history of an image. This is similar to source control. The + exec form makes it possible to avoid shell string munging. The exec form makes + it possible to **RUN** commands using a base image that does not contain `/bin/sh`. + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + +**CMD** + -- **CMD** has three forms: + + ``` + # Executable form + CMD ["executable", "param1", "param2"]` + + # Provide default arguments to ENTRYPOINT + CMD ["param1", "param2"]` + + # the command is run in a shell - /bin/sh -c + CMD command param1 param2 + ``` + + -- There should be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only + the last **CMD** takes effect. + The main purpose of a **CMD** is to provide defaults for an executing container. + These defaults may include an executable, or they can omit the executable. If + they omit the executable, an **ENTRYPOINT** must be specified. + When used in the shell or exec formats, the **CMD** instruction sets the command to + be executed when running the image. + If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + + ``` + FROM ubuntu + CMD echo "This is a test." | wc - + ``` + + -- If you run **command** without a shell, then you must express the command as a + JSON array and give the full path to the executable. This array form is the + preferred form of **CMD**. All additional parameters must be individually expressed + as strings in the array: + + ``` + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + ``` + + -- To make the container run the same executable every time, use **ENTRYPOINT** in + combination with **CMD**. + If the user specifies arguments to `docker run`, the specified commands + override the default in **CMD**. + Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. + **CMD** executes nothing at build time, but specifies the intended command for + the image. + +**LABEL** + -- `LABEL = [= ...]`or + ``` + LABEL [ ] + LABEL [ ] + ... + ``` + The **LABEL** instruction adds metadata to an image. A **LABEL** is a + key-value pair. To specify a **LABEL** without a value, simply use an empty + string. To include spaces within a **LABEL** value, use quotes and + backslashes as you would in command-line parsing. + + ``` + LABEL com.example.vendor="ACME Incorporated" + LABEL com.example.vendor "ACME Incorporated" + LABEL com.example.vendor.is-beta "" + LABEL com.example.vendor.is-beta= + LABEL com.example.vendor.is-beta="" + ``` + + An image can have more than one label. To specify multiple labels, separate + each key-value pair by a space. + + Labels are additive including `LABEL`s in `FROM` images. As the system + encounters and then applies a new label, new `key`s override any previous + labels with identical keys. + + To display an image's labels, use the `docker inspect` command. + +**EXPOSE** + -- `EXPOSE [...]` + The **EXPOSE** instruction informs Docker that the container listens on the + specified network ports at runtime. Docker uses this information to + interconnect containers using links and to set up port redirection on the host + system. + +**ENV** + -- `ENV ` + The **ENV** instruction sets the environment variable to + the value ``. This value is passed to all future + **RUN**, **ENTRYPOINT**, and **CMD** instructions. This is + functionally equivalent to prefixing the command with `=`. The + environment variables that are set with **ENV** persist when a container is run + from the resulting image. Use `docker inspect` to inspect these values, and + change them using `docker run --env =`. + + Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause + unintended consequences, because it will persist when the container is run + interactively, as with the following command: `docker run -t -i image bash` + +**ADD** + -- **ADD** has two forms: + + ``` + ADD + + # Required for paths with whitespace + ADD ["",... ""] + ``` + + The **ADD** instruction copies new files, directories + or remote file URLs to the filesystem of the container at path ``. + Multiple `` resources may be specified but if they are files or directories + then they must be relative to the source directory that is being built + (the context of the build). The `` is the absolute path, or path relative + to **WORKDIR**, into which the source is copied inside the target container. + If the `` argument is a local file in a recognized compression format + (tar, gzip, bzip2, etc) then it is unpacked at the specified `` in the + container's filesystem. Note that only local compressed files will be unpacked, + i.e., the URL download and archive unpacking features cannot be used together. + All new directories are created with mode 0755 and with the uid and gid of **0**. + +**COPY** + -- **COPY** has two forms: + + ``` + COPY + + # Required for paths with whitespace + COPY ["",... ""] + ``` + + The **COPY** instruction copies new files from `` and + adds them to the filesystem of the container at path . The `` must be + the path to a file or directory relative to the source directory that is + being built (the context of the build) or a remote file URL. The `` is an + absolute path, or a path relative to **WORKDIR**, into which the source will + be copied inside the target container. If you **COPY** an archive file it will + land in the container exactly as it appears in the build context without any + attempt to unpack it. All new files and directories are created with mode **0755** + and with the uid and gid of **0**. + +**ENTRYPOINT** + -- **ENTRYPOINT** has two forms: + + ``` + # executable form + ENTRYPOINT ["executable", "param1", "param2"]` + + # run command in a shell - /bin/sh -c + ENTRYPOINT command param1 param2 + ``` + + -- An **ENTRYPOINT** helps you configure a + container that can be run as an executable. When you specify an **ENTRYPOINT**, + the whole container runs as if it was only that executable. The **ENTRYPOINT** + instruction adds an entry command that is not overwritten when arguments are + passed to docker run. This is different from the behavior of **CMD**. This allows + arguments to be passed to the entrypoint, for instance `docker run -d` + passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the + **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** + statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run + arguments. Parameters specified via **CMD** are overwritten by docker run + arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in + `/bin/sh -c`, like a **CMD** instruction: + + ``` + FROM ubuntu + ENTRYPOINT wc -l - + ``` + + This means that the Dockerfile's image always takes stdin as input (that's + what "-" means), and prints the number of lines (that's what "-l" means). To + make this optional but default, use a **CMD**: + + ``` + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + ``` + +**VOLUME** + -- `VOLUME ["/data"]` + The **VOLUME** instruction creates a mount point with the specified name and marks + it as holding externally-mounted volumes from the native host or from other + containers. + +**USER** + -- `USER daemon` + Sets the username or UID used for running subsequent commands. + + The **USER** instruction can optionally be used to set the group or GID. The + followings examples are all valid: + USER [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Until the **USER** instruction is set, instructions will be run as root. The USER + instruction can be used any number of times in a Dockerfile, and will only affect + subsequent commands. + +**WORKDIR** + -- `WORKDIR /path/to/workdir` + The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, + **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can + be used multiple times in a single Dockerfile. Relative paths are defined + relative to the path of the previous **WORKDIR** instruction. For example: + + ``` + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + ``` + + In the above example, the output of the **pwd** command is **a/b/c**. + +**ARG** + -- ARG [=] + + The `ARG` instruction defines a variable that users can pass at build-time to + the builder with the `docker build` command using the `--build-arg + =` flag. If a user specifies a build argument that was not + defined in the Dockerfile, the build outputs a warning. + + ``` + [Warning] One or more build-args [foo] were not consumed + ``` + + The Dockerfile author can define a single variable by specifying `ARG` once or many + variables by specifying `ARG` more than once. For example, a valid Dockerfile: + + ``` + FROM busybox + ARG user1 + ARG buildno + ... + ``` + + A Dockerfile author may optionally specify a default value for an `ARG` instruction: + + ``` + FROM busybox + ARG user1=someuser + ARG buildno=1 + ... + ``` + + If an `ARG` value has a default and if there is no value passed at build-time, the + builder uses the default. + + An `ARG` variable definition comes into effect from the line on which it is + defined in the `Dockerfile` not from the argument's use on the command-line or + elsewhere. For example, consider this Dockerfile: + + ``` + 1 FROM busybox + 2 USER ${user:-some_user} + 3 ARG user + 4 USER $user + ... + ``` + A user builds this file by calling: + + ``` + $ docker build --build-arg user=what_user Dockerfile + ``` + + The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the + subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is + defined and the `what_user` value was passed on the command line. Prior to its definition by an + `ARG` instruction, any use of a variable results in an empty string. + + > **Warning:** It is not recommended to use build-time variables for + > passing secrets like github keys, user credentials etc. Build-time variable + > values are visible to any user of the image with the `docker history` command. + + You can use an `ARG` or an `ENV` instruction to specify variables that are + available to the `RUN` instruction. Environment variables defined using the + `ENV` instruction always override an `ARG` instruction of the same name. Consider + this Dockerfile with an `ENV` and `ARG` instruction. + + ``` + 1 FROM ubuntu + 2 ARG CONT_IMG_VER + 3 ENV CONT_IMG_VER v1.0.0 + 4 RUN echo $CONT_IMG_VER + ``` + Then, assume this image is built with this command: + + ``` + $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile + ``` + + In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting + passed by the user:`v2.0.1` This behavior is similar to a shell + script where a locally scoped variable overrides the variables passed as + arguments or inherited from environment, from its point of definition. + + Using the example above but a different `ENV` specification you can create more + useful interactions between `ARG` and `ENV` instructions: + + ``` + 1 FROM ubuntu + 2 ARG CONT_IMG_VER + 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} + 4 RUN echo $CONT_IMG_VER + ``` + + Unlike an `ARG` instruction, `ENV` values are always persisted in the built + image. Consider a docker build without the --build-arg flag: + + ``` + $ docker build Dockerfile + ``` + + Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but + its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + + The variable expansion technique in this example allows you to pass arguments + from the command line and persist them in the final image by leveraging the + `ENV` instruction. Variable expansion is only supported for [a limited set of + Dockerfile instructions.](#environment-replacement) + + Docker has a set of predefined `ARG` variables that you can use without a + corresponding `ARG` instruction in the Dockerfile. + + * `HTTP_PROXY` + * `http_proxy` + * `HTTPS_PROXY` + * `https_proxy` + * `FTP_PROXY` + * `ftp_proxy` + * `NO_PROXY` + * `no_proxy` + + To use these, simply pass them on the command line using the `--build-arg + =` flag. + +**ONBUILD** + -- `ONBUILD [INSTRUCTION]` + The **ONBUILD** instruction adds a trigger instruction to an image. The + trigger is executed at a later time, when the image is used as the base for + another build. Docker executes the trigger in the context of the downstream + build, as if the trigger existed immediately after the **FROM** instruction in + the downstream Dockerfile. + + You can register any build instruction as a trigger. A trigger is useful if + you are defining an image to use as a base for building other images. For + example, if you are defining an application build environment or a daemon that + is customized with a user-specific configuration. + + Consider an image intended as a reusable python application builder. It must + add application source code to a particular directory, and might need a build + script called after that. You can't just call **ADD** and **RUN** now, because + you don't yet have access to the application source code, and it is different + for each application build. + + -- Providing application developers with a boilerplate Dockerfile to copy-paste + into their application is inefficient, error-prone, and + difficult to update because it mixes with application-specific code. + The solution is to use **ONBUILD** to register instructions in advance, to + run later, during the next build stage. + +# HISTORY +*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. +*Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability +*Sept 2015, updated by Sally O'Malley (somalley@redhat.com) +*Oct 2016, updated by Addam Hardy (addam.hardy@gmail.com) diff --git a/vendor/github.com/moby/moby/man/Dockerfile.aarch64 b/vendor/github.com/moby/moby/man/Dockerfile.aarch64 new file mode 100644 index 0000000..e788eb1 --- /dev/null +++ b/vendor/github.com/moby/moby/man/Dockerfile.aarch64 @@ -0,0 +1,25 @@ +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y git golang-go + +RUN mkdir -p /go/src /go/bin /go/pkg +ENV GOPATH=/go +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/moby/moby/man/Dockerfile.armhf b/vendor/github.com/moby/moby/man/Dockerfile.armhf new file mode 100644 index 0000000..e7ea495 --- /dev/null +++ b/vendor/github.com/moby/moby/man/Dockerfile.armhf @@ -0,0 +1,43 @@ +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y \ + git \ + bash \ + curl \ + gcc \ + make + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/moby/moby/man/Dockerfile.ppc64le b/vendor/github.com/moby/moby/man/Dockerfile.ppc64le new file mode 100644 index 0000000..fc96ca7 --- /dev/null +++ b/vendor/github.com/moby/moby/man/Dockerfile.ppc64le @@ -0,0 +1,35 @@ +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y \ + curl \ + gcc \ + git \ + make \ + tar + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH=/go + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/moby/moby/man/Dockerfile.s390x b/vendor/github.com/moby/moby/man/Dockerfile.s390x new file mode 100644 index 0000000..d4bcf1d --- /dev/null +++ b/vendor/github.com/moby/moby/man/Dockerfile.s390x @@ -0,0 +1,35 @@ +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y \ + curl \ + gcc \ + git \ + make \ + tar + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH=/go + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/moby/moby/man/README.md b/vendor/github.com/moby/moby/man/README.md new file mode 100644 index 0000000..82dac65 --- /dev/null +++ b/vendor/github.com/moby/moby/man/README.md @@ -0,0 +1,15 @@ +Docker Documentation +==================== + +This directory contains scripts for generating the man pages. Many of the man +pages are generated directly from the `spf13/cobra` `Command` definition. Some +legacy pages are still generated from the markdown files in this directory. +Do *not* edit the man pages in the man1 directory. Instead, update the +Cobra command or amend the Markdown files for legacy pages. + + +## Generate the man pages + +From within the project root directory run: + + make manpages diff --git a/vendor/github.com/moby/moby/man/docker-attach.1.md b/vendor/github.com/moby/moby/man/docker-attach.1.md new file mode 100644 index 0000000..c39d1c92 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-attach.1.md @@ -0,0 +1,99 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-attach - Attach to a running container + +# SYNOPSIS +**docker attach** +[**--detach-keys**[=*[]*]] +[**--help**] +[**--no-stdin**] +[**--sig-proxy**[=*true*]] +CONTAINER + +# DESCRIPTION +The **docker attach** command allows you to attach to a running container using +the container's ID or name, either to view its ongoing output or to control it +interactively. You can attach to the same contained process multiple times +simultaneously, screen sharing style, or quickly view the progress of your +detached process. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. You can detach from the container (and leave it running) using a +configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You +configure the key sequence using the **--detach-keys** option or a configuration +file. See **config-json(5)** for documentation on using a configuration file. + +It is forbidden to redirect the standard input of a `docker attach` command while +attaching to a tty-enabled container (i.e.: launched with `-t`). + +# OPTIONS +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--help** + Print usage statement + +**--no-stdin**=*true*|*false* + Do not attach STDIN. The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + +# Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see **docker(1)**. + +# EXAMPLES + +## Attaching to a container + +In this example the top command is run inside a container, from an image called +fedora, in detached mode. The ID from the container is passed into the **docker +attach** command: + + # ID=$(sudo docker run -d fedora /usr/bin/top -b) + # sudo docker attach $ID + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-build.1.md b/vendor/github.com/moby/moby/man/docker-build.1.md new file mode 100644 index 0000000..4beee88 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-build.1.md @@ -0,0 +1,340 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-build - Build an image from a Dockerfile + +# SYNOPSIS +**docker build** +[**--build-arg**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cgroup-parent**[=*CGROUP-PARENT*]] +[**--help**] +[**-f**|**--file**[=*PATH/Dockerfile*]] +[**-squash**] *Experimental* +[**--force-rm**] +[**--isolation**[=*default*]] +[**--label**[=*[]*]] +[**--no-cache**] +[**--pull**] +[**--compress**] +[**-q**|**--quiet**] +[**--rm**[=*true*]] +[**-t**|**--tag**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--memory-swap**[=*LIMIT*]] +[**--network**[=*"default"*]] +[**--shm-size**[=*SHM-SIZE*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--ulimit**[=*[]*]] +PATH | URL | - + +# DESCRIPTION +This will read the Dockerfile from the directory specified in **PATH**. +It also sends any other files and directories found in the current +directory to the Docker daemon. The contents of this directory would +be used by **ADD** commands found within the Dockerfile. + +Warning, this will send a lot of data to the Docker daemon depending +on the contents of the current directory. The build is run by the Docker +daemon, not by the CLI, so the whole context must be transferred to the daemon. +The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to +the daemon. + +When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from +the client to the Docker daemon. In this case, the Dockerfile at the root of the archive and +the rest of the archive will get used as the context of the build. When a Git repository is +set as the **URL**, the repository is cloned locally and then sent as the context. + +# OPTIONS +**-f**, **--file**=*PATH/Dockerfile* + Path to the Dockerfile to use. If the path is a relative path and you are + building from a local directory, then the path must be relative to that + directory. If you are building from a remote URL pointing to either a + tarball or a Git repository, then the path must be relative to the root of + the remote context. In all cases, the file must be within the build context. + The default is *Dockerfile*. + +**--squash**=*true*|*false* + **Experimental Only** + Once the image is built, squash the new layers into a new image with a single + new layer. Squashing does not destroy any existing image, rather it creates a new + image with the content of the squshed layers. This effectively makes it look + like all `Dockerfile` commands were created with a single layer. The build + cache is preserved with this method. + + **Note**: using this option means the new image will not be able to take + advantage of layer sharing with other images and may use significantly more + space. + + **Note**: using this option you may see significantly more space used due to + storing two copies of the image, one for the build cache with all the cache + layers in tact, and one for the squashed version. + +**--build-arg**=*variable* + name and value of a **buildarg**. + + For example, if you want to pass a value for `http_proxy`, use + `--build-arg=http_proxy="http://some.proxy.url"` + + Users pass these values at build-time. Docker uses the `buildargs` as the + environment context for command(s) run via the Dockerfile's `RUN` instruction + or for variable expansion in other Dockerfile instructions. This is not meant + for passing secret values. [Read more about the buildargs instruction](https://docs.docker.com/engine/reference/builder/#arg) + +**--force-rm**=*true*|*false* + Always remove intermediate containers, even after unsuccessful builds. The default is *false*. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. + +**--label**=*label* + Set metadata for an image + +**--no-cache**=*true*|*false* + Do not use cache when building the image. The default is *false*. + +**--help** + Print usage statement + +**--pull**=*true*|*false* + Always attempt to pull a newer version of the image. The default is *false*. + +**--compress**=*true*|*false* + Compress the build context using gzip. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Suppress the build output and print image ID on success. The default is *false*. + +**--rm**=*true*|*false* + Remove intermediate containers after a successful build. The default is *true*. + +**-t**, **--tag**="" + Repository names (and optionally with tags) to be applied to the resulting + image in case of success. Refer to **docker-tag(1)** for more information + about valid tag names. + +**-m**, **--memory**=*MEMORY* + Memory limit + +**--memory-swap**=*LIMIT* + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--network**=*bridge* + Set the networking mode for the RUN instructions during build. Supported standard + values are: `bridge`, `host`, `none` and `container:`. Any other value + is taken as a custom network's name or ID which this container should connect to. + +**--shm-size**=*SHM-SIZE* + Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. + If you omit the size entirely, the system uses `64m`. + +**--cpu-shares**=*0* + CPU shares (relative weight). + + By default, all containers get the same proportion of CPU cycles. + CPU shares is a 'relative weight', relative to the default setting of 1024. + This default value is defined here: + ``` + cat /sys/fs/cgroup/cpu/cpu.shares + 1024 + ``` + You can change this proportion by adjusting the container's CPU share + weighting relative to the weighting of all other running containers. + + To modify the proportion from the default of 1024, use the **--cpu-shares** + flag to set the weighting to 2 or higher. + + Container CPU share Flag + {C0} 60% of CPU --cpu-shares=614 (614 is 60% of 1024) + {C1} 40% of CPU --cpu-shares=410 (410 is 40% of 1024) + + The proportion is only applied when CPU-intensive processes are running. + When tasks in one container are idle, the other containers can use the + left-over CPU time. The actual amount of CPU time used varies depending on + the number of containers running on the system. + + For example, consider three containers, where one has **--cpu-shares=1024** and + two others have **--cpu-shares=512**. When processes in all three + containers attempt to use 100% of CPU, the first container would receive + 50% of the total CPU time. If you add a fourth container with **--cpu-shares=1024**, + the first container only gets 33% of the CPU. The remaining containers + receive 16.5%, 16.5% and 33% of the CPU. + + + Container CPU share Flag CPU time + {C0} 100% --cpu-shares=1024 33% + {C1} 50% --cpu-shares=512 16.5% + {C2} 50% --cpu-shares=512 16.5% + {C4} 100% --cpu-shares=1024 33% + + + On a multi-core system, the shares of CPU time are distributed across the CPU + cores. Even if a container is limited to less than 100% of CPU time, it can + use 100% of each individual CPU core. + + For example, consider a system with more than three cores. If you start one + container **{C0}** with **--cpu-shares=512** running one process, and another container + **{C1}** with **--cpu-shares=1024** running two processes, this can result in the following + division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period. + + Limit the container's CPU usage. This flag causes the kernel to restrict the + container's CPU usage to the period you specify. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota. + + By default, containers run with the full CPU resource. This flag causes the +kernel to restrict the container's CPU usage to the quota you specify. + +**--cpuset-cpus**=*CPUSET-CPUS* + CPUs in which to allow execution (0-3, 0,1). + +**--cpuset-mems**=*CPUSET-MEMS* + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on + NUMA systems. + + For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +to ensure the processes in your Docker container only use memory from the first +two memory nodes. + +**--cgroup-parent**=*CGROUP-PARENT* + Path to `cgroups` under which the container's `cgroup` are created. + + If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. +Cgroups are created if they do not already exist. + +**--ulimit**=[] + Ulimit options + + For more information about `ulimit` see [Setting ulimits in a +container](https://docs.docker.com/engine/reference/commandline/run/#set-ulimits-in-container---ulimit) + +# EXAMPLES + +## Building an image using a Dockerfile located inside the current directory + +Docker images can be built using the build command and a Dockerfile: + + docker build . + +During the build process Docker creates intermediate images. In order to +keep them, you must explicitly set `--rm=false`. + + docker build --rm=false . + +A good practice is to make a sub-directory with a related name and create +the Dockerfile in that directory. For example, a directory called mongo may +contain a Dockerfile to create a Docker MongoDB image. Likewise, another +directory called httpd may be used to store Dockerfiles for Apache web +server images. + +It is also a good practice to add the files required for the image to the +sub-directory. These files will then be specified with the `COPY` or `ADD` +instructions in the `Dockerfile`. + +Note: If you include a tar file (a good practice), then Docker will +automatically extract the contents of the tar file specified within the `ADD` +instruction into the specified target. + +## Building an image and naming that image + +A good practice is to give a name to the image you are building. Note that +only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. + +The **-t**/**--tag** flag is used to rename an image. Here are some examples: + +Though it is not a good practice, image names can be arbitrary: + + docker build -t myimage . + +A better approach is to provide a fully qualified and meaningful repository, +name, and tag (where the tag in this context means the qualifier after +the ":"). In this example we build a JBoss image for the Fedora repository +and give it the version 1.0: + + docker build -t fedora/jboss:1.0 . + +The next example is for the "whenry" user repository and uses Fedora and +JBoss and gives it the version 2.1 : + + docker build -t whenry/fedora-jboss:v2.1 . + +If you do not provide a version tag then Docker will assign `latest`: + + docker build -t whenry/fedora-jboss . + +When you list the images, the image above will have the tag `latest`. + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + + docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . + +So renaming an image is arbitrary but consideration should be given to +a useful convention that makes sense for consumers and should also take +into account Docker community conventions. + + +## Building an image using a URL + +This will clone the specified GitHub repository from the URL and use it +as context. The Dockerfile at the root of the repository is used as +Dockerfile. This only works if the GitHub repository is a dedicated +repository. + + docker build github.com/scollier/purpletest + +Note: You can set an arbitrary Git repository via the `git://` scheme. + +## Building an image using a URL to a tarball'ed context + +This will send the URL itself to the Docker daemon. The daemon will fetch the +tarball archive, decompress it and use its contents as the build context. The +Dockerfile at the root of the archive and the rest of the archive will get used +as the context of the build. If you pass an **-f PATH/Dockerfile** option as well, +the system will look for that file inside the contents of the tarball. + + docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz + +Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +# HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-commit.1.md b/vendor/github.com/moby/moby/man/docker-commit.1.md new file mode 100644 index 0000000..d8a4cf8 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-commit.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-commit - Create a new image from a container's changes + +# SYNOPSIS +**docker commit** +[**-a**|**--author**[=*AUTHOR*]] +[**-c**|**--change**[=\[*DOCKERFILE INSTRUCTIONS*\]]] +[**--help**] +[**-m**|**--message**[=*MESSAGE*]] +[**-p**|**--pause**[=*true*]] +CONTAINER [REPOSITORY[:TAG]] + +# DESCRIPTION +Create a new image from an existing container specified by name or +container ID. The new image will contain the contents of the +container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)** +for more information about valid image and tag names. + +While the `docker commit` command is a convenient way of extending an +existing image, you should prefer the use of a Dockerfile and `docker +build` for generating images that you intend to share with other +people. + +# OPTIONS +**-a**, **--author**="" + Author (e.g., "John Hannibal Smith ") + +**-c** , **--change**=[] + Apply specified Dockerfile instructions while committing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +**--help** + Print usage statement + +**-m**, **--message**="" + Commit message + +**-p**, **--pause**=*true*|*false* + Pause container during commit. The default is *true*. + +# EXAMPLES + +## Creating a new image from an existing container +An existing Fedora based container has had Apache installed while running +in interactive mode with the bash shell. Apache is also running. To +create a new image run `docker ps` to find the container's ID and then run: + + # docker commit -m="Added Apache to Fedora base image" \ + -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 + +Note that only a-z0-9-_. are allowed when naming images from an +existing container. + +## Apply specified Dockerfile instructions while committing the image +If an existing container was created without the DEBUG environment +variable set to "true", you can create a new image based on that +container by first getting the container's ID with `docker ps` and +then running: + + # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and in +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +Oct 2014, updated by Daniel, Dao Quang Minh +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-config-json.5.md b/vendor/github.com/moby/moby/man/docker-config-json.5.md new file mode 100644 index 0000000..49987f0 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-config-json.5.md @@ -0,0 +1,72 @@ +% CONFIG.JSON(5) Docker User Manuals +% Docker Community +% JANUARY 2016 +# NAME +HOME/.docker/config.json - Default Docker configuration file + +# INTRODUCTION + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. Docker manages most of +the files in the configuration directory and you should not modify them. +However, you *can modify* the `config.json` file to control certain aspects of +how the `docker` command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +* The `HttpHeaders` property specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does not +allow these headers to change any headers it sets for itself. + +* The `psFormat` property specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see **docker-ps(1)**. + +* The `detachKeys` property specifies the default key sequence which +detaches the container. When the `--detach-keys` flag is not provide +with the `docker attach`, `docker exec`, `docker run` or `docker +start`, Docker's client uses this property. If this property is not +set, the client falls back to the default sequence `ctrl-p,ctrl-q`. + + +* The `imagesFormat` property specifies the default format for `docker images` +output. When the `--format` flag is not provided with the `docker images` +command, Docker's client uses this property. If this property is not set, the +client falls back to the default table format. For a list of supported +formatting directives, see **docker-images(1)**. + +You can specify a different location for the configuration files via the +`DOCKER_CONFIG` environment variable or the `--config` command line option. If +both are specified, then the `--config` option overrides the `DOCKER_CONFIG` +environment variable: + + docker --config ~/testconfigs/ ps + +This command instructs Docker to use the configuration files in the +`~/testconfigs/` directory when running the `ps` command. + +## Examples + +Following is a sample `config.json` file: + + { + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "detachKeys": "ctrl-e,e" + } + +# HISTORY +January 2016, created by Moxiegirl diff --git a/vendor/github.com/moby/moby/man/docker-cp.1.md b/vendor/github.com/moby/moby/man/docker-cp.1.md new file mode 100644 index 0000000..41d3c34 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-cp.1.md @@ -0,0 +1,175 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-cp - Copy files/folders between a container and the local filesystem. + +# SYNOPSIS +**docker cp** +[**--help**] +CONTAINER:SRC_PATH DEST_PATH|- + +**docker cp** +[**--help**] +SRC_PATH|- CONTAINER:DEST_PATH + +# DESCRIPTION + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, +specify the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container. +However, you can still copy such files by manually running `tar` in `docker exec`. +For example (consider `SRC_PATH` and `DEST_PATH` are directories): + + $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + +or + + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - + + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. + +# OPTIONS +**-L**, **--follow-link**=*true*|*false* + Follow symbol link in SRC_PATH + +**--help** + Print usage statement + +# EXAMPLES + +Suppose a container has finished producing some output as a file it saves +to somewhere in its filesystem. This could be the output of a build job or +some other computation. You can copy these outputs from the container to a +location on your local host. + +If you want to copy the `/tmp/foo` directory from a container to the +existing `/tmp` directory on your host. If you run `docker cp` in your `~` +(home) directory on the local host: + + $ docker cp compassionate_darwin:tmp/foo /tmp + +Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit +the leading slash in the command. If you execute this command from your home +directory: + + $ docker cp compassionate_darwin:tmp/foo tmp + +If `~/tmp` does not exist, Docker will create it and copy the contents of +`/tmp/foo` from the container into this new directory. If `~/tmp` already +exists as a directory, then Docker will copy the contents of `/tmp/foo` from +the container into a directory at `~/tmp/foo`. + +When copying a single file to an existing `LOCALPATH`, the `docker cp` command +will either overwrite the contents of `LOCALPATH` if it is a file or place it +into `LOCALPATH` if it is a directory, overwriting an existing file of the same +name if one exists. For example, this command: + + $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test + +If `/test` does not exist on the local machine, it will be created as a file +with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` +exists as a file, it will be overwritten. Lastly, if `/test` exists as a +directory, the file will be copied to `/test/myfile.txt`. + +Next, suppose you want to copy a file or folder into a container. For example, +this could be a configuration file or some other input to a long running +computation that you would like to place into a created container before it +starts. This is useful because it does not require the configuration file or +other input to exist in the container image. + +If you have a file, `config.yml`, in the current directory on your local host +and wish to copy it to an existing directory at `/etc/my-app.d` in a container, +this command can be used: + + $ docker cp config.yml myappcontainer:/etc/my-app.d + +If you have several files in a local directory `/config` which you need to copy +to a directory `/etc/my-app.d` in a container: + + $ docker cp /config/. myappcontainer:/etc/my-app.d + +The above command will copy the contents of the local `/config` directory into +the directory `/etc/my-app.d` in the container. + +Finally, if you want to copy a symbolic link into a container, you typically +want to copy the linked target and not the link itself. To copy the target, use +the `-L` option, for example: + + $ ln -s /tmp/somefile /tmp/somefile.ln + $ docker cp -L /tmp/somefile.ln myappcontainer:/tmp/ + +This command copies content of the local `/tmp/somefile` into the file +`/tmp/somefile.ln` in the container. Without `-L` option, the `/tmp/somefile.ln` +preserves its symbolic link but not its content. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +May 2015, updated by Josh Hawn diff --git a/vendor/github.com/moby/moby/man/docker-create.1.md b/vendor/github.com/moby/moby/man/docker-create.1.md new file mode 100644 index 0000000..3f8a076 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-create.1.md @@ -0,0 +1,553 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-create - Create a new container + +# SYNOPSIS +**docker create** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--blkio-weight-device**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-count**[=*0*]] +[**--cpu-percent**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpus**[=*0.0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--device**[=*[]*]] +[**--device-read-bps**[=*[]*]] +[**--device-read-iops**[=*[]*]] +[**--device-write-bps**[=*[]*]] +[**--device-write-iops**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--dns-option**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**-i**|**--interactive**] +[**--ip**[=*IPv4-ADDRESS*]] +[**--ip6**[=*IPv6-ADDRESS*]] +[**--ipc**[=*IPC*]] +[**--isolation**[=*default*]] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--link-local-ip**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*LIMIT*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--network-alias**[=*[]*]] +[**--network**[=*"bridge"*]] +[**--oom-kill-disable**] +[**--oom-score-adj**[=*0*]] +[**-P**|**--publish-all**] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[PID]*]] +[**--userns**[=*[]*]] +[**--pids-limit**[=*PIDS_LIMIT*]] +[**--privileged**] +[**--read-only**] +[**--restart**[=*RESTART*]] +[**--rm**] +[**--security-opt**[=*[]*]] +[**--storage-opt**[=*[]*]] +[**--stop-signal**[=*SIGNAL*]] +[**--stop-timeout**[=*TIMEOUT*]] +[**--shm-size**[=*[]*]] +[**--sysctl**[=*[]*]] +[**-t**|**--tty**] +[**--tmpfs**[=*[CONTAINER-DIR[:]*]] +[**-u**|**--user**[=*USER*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] +[**--volume-driver**[=*DRIVER*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Creates a writeable container layer over the specified image and prepares it for +running the specified command. The container ID is then printed to STDOUT. This +is similar to **docker run -d** except the container is never started. You can +then use the **docker start ** command to start the container at +any point. + +The initial status of the container created with **docker create** is 'created'. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + +**--blkio-weight**=*0* + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--blkio-weight-device**=[] + Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). + +**--cpu-shares**=*0* + CPU shares (relative weight) + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cidfile**="" + Write the container ID to the file + +**--cpu-count**=*0* + Limit the number of CPUs available for execution by the container. + + On Windows Server containers, this is approximated as a percentage of total CPU usage. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-percent**=*0* + Limit the percentage of CPU available for execution by a container running on a Windows daemon. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpus**=0.0 + Number of CPUs. The default is *0.0*. + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--device-read-bps**=[] + Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb) + +**--device-read-iops**=[] + Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000) + +**--device-write-bps**=[] + Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb) + +**--device-write-iops**=[] + Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000) + +**--dns**=[] + Set custom DNS servers + +**--dns-option**=[] + Set custom DNS options + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**-e**, **--env**=[] + Set environment variables + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + +**--env-file**=[] + Read in a line-delimited file of environment variables + +**--expose**=[] + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--ip**="" + Sets the container's interface IPv4 address (e.g. 172.23.0.9) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ip6**="" + Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Constrains the kernel memory available to a container. If a limit of 0 +is specified (not using `--kernel-memory`), the container's kernel memory +is not limited. If you specify a limit, it may be rounded up to a multiple +of the operating system's page size and the value can be very large, +millions of trillions. + +**-l**, **--label**=[] + Adds metadata to a container (e.g., --label=com.example.key=value) + +**--label-file**=[] + Read labels from a file. Delimit each label with an EOL. + +**--link**=[] + Add link to another container in the form of :alias or just + in which case the alias will match the name. + +**--link-local-ip**=[] + Add one or more link-local IPv4/IPv6 addresses to the container's interface + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Logging driver for the container. Default is defined by daemon `--log-driver` flag. + **Warning**: the `docker logs` command works only for the `json-file` and + `journald` logging drivers. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: [], where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + + After setting memory reservation, when the system detects memory contention +or low memory, containers are forced to restrict their consumption to their +reservation. So you should always set the value below **--memory**, otherwise the +hard limit will take precedence. By default, memory reservation will be the same +as memory limit. + +**--memory-swap**="LIMIT" + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**--name**="" + Assign a name to the container + +**--network**="*bridge*" + Set the Network mode for the container + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + '|': connect to a user-defined network + +**--network-alias**=[] + Add network-scoped alias for the container + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**--oom-score-adj**="" + Tune the host's OOM preferences for containers (accepts -1000 to 1000) + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + +**-p**, **--publish**=[] + Publish a container's port, or a range of ports, to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a range of ports. + When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) + (use 'docker port' to see the actual mapping) + +**--pid**="" + Set the PID mode for the container + Default is to create a private PID namespace for the container + 'container:': join another container's PID namespace + 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--userns**="" + Set the usernamespace mode for the container when `userns-remap` option is enabled. + **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). + +**--pids-limit**="" + Tune the container's pids limit. Set `-1` to have unlimited pids for the container. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + +**--restart**="*no*" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +**--rm**=*true*|*false* + Automatically remove the container when it exits. The default is *false*. + +**--shm-size**="" + Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. + If you omit the size entirely, the system uses `64m`. + +**--security-opt**=[] + Security Options + + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + "no-new-privileges" : Disable container processes from gaining additional privileges + "seccomp:unconfined" : Turn off seccomp confinement for the container + "seccomp:profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter + +**--storage-opt**=[] + Storage driver options per container + + $ docker create -it --storage-opt size=120G fedora /bin/bash + + This (size) will allow to set the container rootfs size to 120G at creation time. + This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. + For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. + For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. + Under these conditions, user can pass any size less then the backing fs size. + +**--stop-signal**=*SIGTERM* + Signal to stop a container. Default is SIGTERM. + +**--stop-timeout**=*10* + Timeout (in seconds) to stop a container. Default is 10. + +**--sysctl**=SYSCTL + Configure namespaced kernel parameters at runtime + + IPC Namespace - current sysctls allowed: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + Note: if you use --ipc=host using these sysctls will not be allowed. + + Network Namespace - current sysctls allowed: + Sysctls beginning with net.* + + Note: if you use --network=host using these sysctls will not be allowed. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**--tmpfs**=[] Create a tmpfs mount + + Mount a temporary filesystem (`tmpfs`) mount into a container, for example: + + $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image + + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount +options are the same as the Linux default `mount` flags. If you do not specify +any options, the systems uses the following options: +`rw,noexec,nosuid,nodev,size=65536k`. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument root user will be used in the container by default. + +**--ulimit**=[] + Ulimit options + +**--uts**=*host* + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] + Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker + bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker + container. If 'HOST-DIR' is omitted, Docker automatically creates the new + volume on the host. The `OPTIONS` are a comma delimited list and can be: + + * [rw|ro] + * [z|Z] + * [`[r]shared`|`[r]slave`|`[r]private`] + +The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` +can be an absolute path or a `name` value. A `name` value must start with an +alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or +`-` (hyphen). An absolute path starts with a `/` (forward slash). + +If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the +path you specify. If you supply a `name`, Docker creates a named volume by that +`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` +value. If you supply the `/foo` value, Docker creates a bind-mount. If you +supply the `foo` specification, Docker creates a named volume. + +You can specify multiple **-v** options to mount one or more mounts to a +container. To use these same mounts in other containers, specify the +**--volumes-from** option also. + +You can add `:ro` or `:rw` suffix to a volume to mount it read-only or +read-write mode, respectively. By default, the volumes are mounted read-write. +See examples. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change a label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +By default bind mounted volumes are `private`. That means any mounts done +inside container will not be visible on host and vice-a-versa. One can change +this behavior by specifying a volume mount propagation property. Making a +volume `shared` mounts done under that volume inside container will be +visible on host and vice-a-versa. Making a volume `slave` enables only one +way mount propagation and that is mounts done on host under that volume +will be visible inside container but not the other way around. + +To control mount propagation property of volume one can use `:[r]shared`, +`:[r]slave` or `:[r]private` propagation flag. Propagation property can +be specified only for bind mounted volumes and not for internal volumes or +named volumes. For mount propagation to work source mount point (mount point +where source dir is mounted on) has to have right propagation properties. For +shared volumes, source mount point has to be shared. And for slave volumes, +source mount has to be either shared or slave. + +Use `df ` to figure out the source mount and then use +`findmnt -o TARGET,PROPAGATION ` to figure out propagation +properties of source mount. If `findmnt` utility is not available, then one +can look at mount entry for source mount point in `/proc/self/mountinfo`. Look +at `optional fields` and see if any propagaion properties are specified. +`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if +nothing is there that means mount is `private`. + +To change propagation properties of a mount point use `mount` command. For +example, if one wants to bind mount source directory `/foo` one can do +`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This +will convert /foo into a `shared` mount point. Alternatively one can directly +change propagation properties of source mount. Say `/` is source mount for +`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + + +To disable automatic copying of data from the container path to the volume, use +the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. + +**--volume-driver**="" + Container's volume driver. This driver creates volumes specified either from + a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. + See **docker-volume-create(1)** for full details. + +**--volumes-from**=[] + Mount volumes from the specified container(s) + +**-w**, **--workdir**="" + Working directory inside the container + +# EXAMPLES + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +# HISTORY +August 2014, updated by Sven Dowideit +September 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-diff.1.md b/vendor/github.com/moby/moby/man/docker-diff.1.md new file mode 100644 index 0000000..3342ad1 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-diff.1.md @@ -0,0 +1,61 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-diff - Inspect changes to files or directories on a container's filesystem + +# SYNOPSIS +**docker diff** +[**--help**] +CONTAINER + +# DESCRIPTION +List the changed files and directories in a container᾿s filesystem since the +container was created. Three different types of change are tracked: + +| Symbol | Description | +|--------|---------------------------------| +| `A` | A file or directory was added | +| `D` | A file or directory was deleted | +| `C` | A file or directory was changed | + +You can use the full or shortened container ID or the container name set using +**docker run --name** option. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +Inspect the changes to an `nginx` container: + +```bash +$ docker diff 1fdfd1f54c1b + +C /dev +C /dev/console +C /dev/core +C /dev/stdout +C /dev/fd +C /dev/ptmx +C /dev/stderr +C /dev/stdin +C /run +A /run/nginx.pid +C /var/lib/nginx/tmp +A /var/lib/nginx/tmp/client_body +A /var/lib/nginx/tmp/fastcgi +A /var/lib/nginx/tmp/proxy +A /var/lib/nginx/tmp/scgi +A /var/lib/nginx/tmp/uwsgi +C /var/log/nginx +A /var/log/nginx/access.log +A /var/log/nginx/error.log +``` + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-events.1.md b/vendor/github.com/moby/moby/man/docker-events.1.md new file mode 100644 index 0000000..51b0427 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-events.1.md @@ -0,0 +1,180 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-events - Get real time events from the server + +# SYNOPSIS +**docker events** +[**--help**] +[**-f**|**--filter**[=*[]*]] +[**--since**[=*SINCE*]] +[**--until**[=*UNTIL*]] +[**--format**[=*FORMAT*]] + + +# DESCRIPTION +Get event information from the Docker daemon. Information can include historical +information and real-time information. + +Docker containers will report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--filter**=[] + Filter output based on these conditions + - container (`container=`) + - event (`event=`) + - image (`image=`) + - plugin (experimental) (`plugin=`) + - label (`label=` or `label==`) + - type (`type=`) + - volume (`volume=`) + - network (`network=`) + - daemon (`daemon=`) + +**--since**="" + Show all events created since timestamp + +**--until**="" + Stream events until this timestamp + +**--format**="" + Format the output using the given Go template + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine's time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +# EXAMPLES + +## Listening for Docker events + +After running docker events a container 786d698004576 is started and stopped +(The container name has been shortened in the output below): + + # docker events + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die + 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop + +## Listening for events since a given date +Again the output container IDs have been shortened for the purposes of this document: + + # docker events --since '2015-01-28' + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + +The following example outputs all events that were generated in the last 3 minutes, +relative to the current time on the client machine: + + # docker events --since '3m' + 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2015-05-12T15:52:12.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +If you do not provide the --since option, the command returns only new and/or +live events. + +## Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default format. Go's **text/template** package describes all the +details of the format. + + # docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + + # docker events --format '{{json .}}' + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + +## Filters + + $ docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + + $ docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'type=volume' + 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) + 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) + 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) + 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + + $ docker events --filter 'type=network' + 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) + 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + + $ docker events --filter 'type=plugin' (experimental) + 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Brian Goff +October 2015, updated by Mike Brown diff --git a/vendor/github.com/moby/moby/man/docker-exec.1.md b/vendor/github.com/moby/moby/man/docker-exec.1.md new file mode 100644 index 0000000..fe9c279 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-exec.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-exec - Run a command in a running container + +# SYNOPSIS +**docker exec** +[**-d**|**--detach**] +[**--detach-keys**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--help**] +[**-i**|**--interactive**] +[**--privileged**] +[**-t**|**--tty**] +[**-u**|**--user**[=*USER*]] +CONTAINER COMMAND [ARG...] + +# DESCRIPTION + +Run a process in a running container. + +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. + +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run + +# OPTIONS +**-d**, **--detach**=*true*|*false* + Detached mode: run command in the background. The default is *false*. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary environment variables that are +available for the command to be executed. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--privileged**=*true*|*false* + Give the process extended [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) +when running in a container. The default is *false*. + + Without this flag, the process run by `docker exec` in a running container has +the same capabilities as the container, which may be limited. Set +`--privileged` to give all capabilities to the process. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +# HISTORY +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-export.1.md b/vendor/github.com/moby/moby/man/docker-export.1.md new file mode 100644 index 0000000..3d59e47 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-export.1.md @@ -0,0 +1,46 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-export - Export the contents of a container's filesystem as a tar archive + +# SYNOPSIS +**docker export** +[**--help**] +[**-o**|**--output**[=*""*]] +CONTAINER + +# DESCRIPTION +Export the contents of a container's filesystem using the full or shortened +container ID or container name. The output is exported to STDOUT and can be +redirected to a tar file. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement + +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES +Export the contents of the container called angry_bell to a tar file +called angry_bell.tar: + + # docker export angry_bell > angry_bell.tar + # docker export --output=angry_bell-latest.tar angry_bell + # ls -sh angry_bell.tar + 321M angry_bell.tar + # ls -sh angry_bell-latest.tar + 321M angry_bell-latest.tar + +# See also +**docker-import(1)** to create an empty filesystem image +and import the contents of the tarball into it, then optionally tag it. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +January 2015, updated by Joseph Kern (josephakern at gmail dot com) diff --git a/vendor/github.com/moby/moby/man/docker-history.1.md b/vendor/github.com/moby/moby/man/docker-history.1.md new file mode 100644 index 0000000..91edefe --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-history.1.md @@ -0,0 +1,52 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-history - Show the history of an image + +# SYNOPSIS +**docker history** +[**--help**] +[**-H**|**--human**[=*true*]] +[**--no-trunc**] +[**-q**|**--quiet**] +IMAGE + +# DESCRIPTION + +Show the history of when and how an image was created. + +# OPTIONS +**--help** + Print usage statement + +**-H**, **--human**=*true*|*false* + Print sizes and dates in human readable format. The default is *true*. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + $ docker history fedora + IMAGE CREATED CREATED BY SIZE COMMENT + 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB + 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 10 months ago 0 B Imported from - + +## Display comments in the image history +The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. + + $ sudo docker history docker:scm + IMAGE CREATED CREATED BY SIZE COMMENT + 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image + 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB + c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 19 months ago 0 B Imported from - + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-images.1.md b/vendor/github.com/moby/moby/man/docker-images.1.md new file mode 100644 index 0000000..521cbea --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-images.1.md @@ -0,0 +1,154 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-images - List images + +# SYNOPSIS +**docker images** +[**--help**] +[**-a**|**--all**] +[**--digests**] +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--no-trunc**] +[**-q**|**--quiet**] +[REPOSITORY[:TAG]] + +# DESCRIPTION +This command lists the images stored in the local Docker repository. + +By default, intermediate images, used during builds, are not listed. Some of the +output, e.g., image ID, is truncated, for space reasons. However the truncated +image ID, and often the first few characters, are enough to be used in other +Docker commands that use the image ID. The output includes repository, tag, image +ID, date created and the virtual size. + +The title REPOSITORY for the first title may seem confusing. It is essentially +the image name. However, because you can tag a specific image, and multiple tags +(image instances) can be associated with a single name, the name is really a +repository for all tagged images of the same name. For example consider an image +called fedora. It may be tagged with 18, 19, or 20, etc. to manage different +versions. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all images (by default filter out the intermediate image layers). The default is *false*. + +**--digests**=*true*|*false* + Show image digests. The default is *false*. + +**-f**, **--filter**=[] + Filters the output based on these conditions: + - dangling=(true|false) - find unused images + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + - reference=(pattern of an image reference) + +**--format**="*TEMPLATE*" + Pretty-print images using a Go template. + Valid placeholders: + .ID - Image ID + .Repository - Image repository + .Tag - Image tag + .Digest - Image digest + .CreatedSince - Elapsed time since the image was created + .CreatedAt - Time when the image was created + .Size - Image disk size + +**--help** + Print usage statement + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + +## Listing the images + +To list the images in a local repository (not the registry) run: + + docker images + +The list will contain the image repository name, a tag for the image, and an +image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, +IMAGE ID, CREATED, and SIZE. + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + + docker images java + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + + docker images java:8 + +To get a verbose list of images which contains all the intermediate images +used in builds use **-a**: + + docker images -a + +Previously, the docker images command supported the --tree and --dot arguments, +which displayed different visualizations of the image data. Docker core removed +this functionality in the 1.7 version. If you liked this functionality, you can +still find it in the third-party dockviz tool: https://github.com/justone/dockviz. + +## Listing images in a desired format + +When using the --format option, the image command will either output the data +exactly as the template declares or, when using the `table` directive, will +include column headers as well. You can use special characters like `\t` for +inserting tab spacing between columns. + +The following example uses a template without headers and outputs the ID and +Repository entries separated by a colon for all images: + + docker images --format "{{.ID}}: {{.Repository}}" + 77af4d6b9913: + b6fa739cedf5: committ + 78a85c484bad: ipbabble + 30557a29d5ab: docker + 5ed6274db6ce: + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + +To list all images with their repository and tag in a table format you can use: + + docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + IMAGE ID REPOSITORY TAG + 77af4d6b9913 + b6fa739cedf5 committ latest + 78a85c484bad ipbabble + 30557a29d5ab docker latest + 5ed6274db6ce + 746b819f315e postgres 9 + 746b819f315e postgres 9.3 + 746b819f315e postgres 9.3.5 + 746b819f315e postgres latest + +Valid template placeholders are listed above. + +## Listing only the shortened image IDs + +Listing just the shortened image IDs. This can be useful for some automated +tools. + + docker images -q + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-import.1.md b/vendor/github.com/moby/moby/man/docker-import.1.md new file mode 100644 index 0000000..43d65ef --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-import.1.md @@ -0,0 +1,72 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + +# SYNOPSIS +**docker import** +[**-c**|**--change**[=*[]*]] +[**-m**|**--message**[=*MESSAGE*]] +[**--help**] +file|URL|**-**[REPOSITORY[:TAG]] + +# OPTIONS +**-c**, **--change**=[] + Apply specified Dockerfile instructions while importing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +**--help** + Print usage statement + +**-m**, **--message**="" + Set commit message for imported image + +# DESCRIPTION +Create a new filesystem image from the contents of a tarball (`.tar`, +`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. + + +# EXAMPLES + +## Import from a remote location + + # docker import http://example.com/exampleimage.tgz example/imagerepo + +## Import from a local file + +Import to docker via pipe and stdin: + + # cat exampleimage.tgz | docker import - example/imagelocal + +Import with a commit message. + + # cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + +Import to a Docker image from a local file. + + # docker import /path/to/exampleimage.tgz + + +## Import from a local file and tag + +Import to docker via pipe and stdin: + + # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 + +## Import from a local directory + + # tar -c . | docker import - exampleimagedir + +## Apply specified Dockerfile instructions while importing the image +This example sets the docker image ENV variable DEBUG to true by default. + + # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir + +# See also +**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-info.1.md b/vendor/github.com/moby/moby/man/docker-info.1.md new file mode 100644 index 0000000..bb7a8fb --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-info.1.md @@ -0,0 +1,187 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-info - Display system-wide information + +# SYNOPSIS +**docker info** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] + +# DESCRIPTION +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's **text/template** package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template + +# EXAMPLES + +## Display Docker system information + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver: + + $ docker -D info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 1.13.0 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 + runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 + init version: N/A (expected: v0.13.0) + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-31-generic + Operating System: Ubuntu 16.04.1 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.937 GiB + Name: ubuntu + ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 + Docker Root Dir: /var/lib/docker + Debug Mode (client): true + Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 + Http Proxy: http://test:test@proxy.example.com:8080 + Https Proxy: https://test:test@proxy.example.com:8080 + No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com + Registry: https://index.docker.io/v1/ + WARNING: No swap limit support + Labels: + storage=ssd + staging=true + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false + + + +The global `-D` option tells all `docker` commands to output debug information. + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the devicemapper storage driver. As can be seen in the output, additional +information about the devicemapper storage driver is shown: + + $ docker info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Untagged Images: 52 + Server Version: 1.10.3 + Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) + Execution Driver: native-0.2 + Logging Driver: json-file + Plugins: + Volume: local + Network: null host bridge + Kernel Version: 3.10.0-327.el7.x86_64 + Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) + OSType: linux + Architecture: x86_64 + CPUs: 1 + Total Memory: 991.7 MiB + Name: ip-172-30-0-91.ec2.internal + ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S + Docker Root Dir: /var/lib/docker + Debug mode (client): false + Debug mode (server): false + Username: gordontheturtle + Registry: https://index.docker.io/v1/ + Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 + +You can also specify the output format: + + $ docker info --format '{{json .}}' + {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-inspect.1.md b/vendor/github.com/moby/moby/man/docker-inspect.1.md new file mode 100644 index 0000000..21d7ba6 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-inspect.1.md @@ -0,0 +1,323 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-inspect - Return low-level information on docker objects + +# SYNOPSIS +**docker inspect** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] +[**-s**|**--size**] +[**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume*] +NAME|ID [NAME|ID...] + +# DESCRIPTION + +This displays the low-level information on Docker object(s) (e.g. container, +image, volume,network, node, service, or task) identified by name or ID. By default, +this will render all results in a JSON array. If the container and image have +the same name, this will return container JSON for unspecified type. If a format +is specified, the given template will be executed for each result. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template + +**-s**, **--size** + Display total file sizes if the type is container + +**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume* + Return JSON for specified type, permissible values are "image", "container", + "network", "node", "service", "task", and "volume" + +# EXAMPLES + +Get information about an image when image name conflicts with the container name, +e.g. both image and container are named rhel7: + + $ docker inspect --type=image rhel7 + [ + { + "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", + "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", + .... + } + ] + +## Getting information on a container + +To get information on a container use its ID or instance name: + + $ docker inspect d2cc496561d6 + [{ + "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "Created": "2015-06-08T16:18:02.505155285Z", + "Path": "bash", + "Args": [], + "State": { + "Running": false, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 0, + "ExitCode": 0, + "Error": "", + "StartedAt": "2015-06-08T16:18:03.643865954Z", + "FinishedAt": "2015-06-08T16:57:06.448552862Z" + }, + "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": {}, + "SandboxKey": "/var/run/docker/netns/6b4851d1903e", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "MacAddress": "02:42:ac:12:00:02", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + + }, + "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", + "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", + "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", + "Name": "/adoring_wozniak", + "RestartCount": 0, + "Driver": "devicemapper", + "MountLabel": "", + "ProcessLabel": "", + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + "Propagation": "" + } + ], + "AppArmorProfile": "", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "CpuPeriod": 0, + "CpusetCpus": "", + "CpusetMems": "", + "CpuQuota": 0, + "BlkioWeight": 0, + "OomKillDisable": false, + "Privileged": false, + "PortBindings": {}, + "Links": null, + "PublishAllPorts": false, + "Dns": null, + "DnsSearch": null, + "DnsOptions": null, + "ExtraHosts": null, + "VolumesFrom": null, + "Devices": [], + "NetworkMode": "bridge", + "IpcMode": "", + "PidMode": "", + "UTSMode": "", + "CapAdd": null, + "CapDrop": null, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "SecurityOpt": null, + "ReadonlyRootfs": false, + "Ulimits": null, + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "CgroupParent": "" + }, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "5", + "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "DeviceSize": "171798691840" + } + }, + "Config": { + "Hostname": "d2cc496561d6", + "Domainname": "", + "User": "", + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "ExposedPorts": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": true, + "Env": null, + "Cmd": [ + "bash" + ], + "Image": "fedora", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "Cpuset": "", + "StopSignal": "SIGTERM" + } + } + ] +## Getting the IP address of a container instance + +To get the IP address of a container use: + + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' d2cc496561d6 + 172.17.0.2 + +## Listing all port bindings + +One can loop over arrays and maps in the results to produce simple text +output: + + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ + {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 + 80/tcp -> 80 + +You can get more information about how to write a Go template from: +https://golang.org/pkg/text/template/. + +## Getting size information on a container + + $ docker inspect -s d2cc496561d6 + [ + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + ] + +## Getting information on an image + +Use an image's ID or name (e.g., repository/name[:tag]) to get information +about the image: + + $ docker inspect ded7cd95e059 + [{ + "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Comment": "", + "Created": "2015-05-27T16:58:22.937503085Z", + "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", + "ContainerConfig": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" + ], + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "DockerVersion": "1.6.0", + "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", + "Config": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "Architecture": "amd64", + "Os": "linux", + "Size": 186507296, + "VirtualSize": 186507296, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "3", + "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "DeviceSize": "171798691840" + } + } + } + ] + +# HISTORY +April 2014, originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Qiang Huang +October 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-kill.1.md b/vendor/github.com/moby/moby/man/docker-kill.1.md new file mode 100644 index 0000000..36cbdb9 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-kill.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-kill - Kill a running container using SIGKILL or a specified signal + +# SYNOPSIS +**docker kill** +[**--help**] +[**-s**|**--signal**[=*"KILL"*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The main process inside each container specified will be sent SIGKILL, + or any signal specified with option --signal. + +# OPTIONS +**--help** + Print usage statement + +**-s**, **--signal**="*KILL*" + Signal to send to the container + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) + based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-load.1.md b/vendor/github.com/moby/moby/man/docker-load.1.md new file mode 100644 index 0000000..b165173 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-load.1.md @@ -0,0 +1,56 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-load - Load an image from a tar archive or STDIN + +# SYNOPSIS +**docker load** +[**--help**] +[**-i**|**--input**[=*INPUT*]] +[**-q**|**--quiet**] + +# DESCRIPTION + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. Write image names or IDs imported it +standard output stream. + +# OPTIONS +**--help** + Print usage statement + +**-i**, **--input**="" + Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz. + +**-q**, **--quiet** + Suppress the load progress bar but still outputs the imported images. + +# EXAMPLES + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ docker load --input fedora.tar + # […] + Loaded image: fedora:rawhide + # […] + Loaded image: fedora:20 + # […] + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + +# See also +**docker-save(1)** to save one or more images to a tar archive (streamed to STDOUT by default). + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2015 update by Mary Anthony +June 2016 update by Vincent Demeester diff --git a/vendor/github.com/moby/moby/man/docker-login.1.md b/vendor/github.com/moby/moby/man/docker-login.1.md new file mode 100644 index 0000000..2eee40e --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-login.1.md @@ -0,0 +1,53 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-login - Log in to a Docker registry. + +# SYNOPSIS +**docker login** +[**--help**] +[**-p**|**--password**[=*PASSWORD*]] +[**-u**|**--username**[=*USERNAME*]] +[SERVER] + +# DESCRIPTION +Log in to a Docker Registry located on the specified +`SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you +do not specify a `SERVER`, the command uses Docker's public registry located at +`https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/engine/security/security/#/docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +# OPTIONS +**--help** + Print usage statement + +**-p**, **--password**="" + Password + +**-u**, **--username**="" + Username + +# EXAMPLES + +## Login to a registry on your localhost + + # docker login localhost:8080 + +# See also +**docker-logout(1)** to log out from a Docker registry. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-logout.1.md b/vendor/github.com/moby/moby/man/docker-logout.1.md new file mode 100644 index 0000000..a8a4b7c --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-logout.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logout - Log out from a Docker registry. + +# SYNOPSIS +**docker logout** +[SERVER] + +# DESCRIPTION +Log out of a Docker Registry located on the specified `SERVER`. You can +specify a URL or a `hostname` for the `SERVER` value. If you do not specify a +`SERVER`, the command attempts to log you out of Docker's public registry +located at `https://registry-1.docker.io/` by default. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Log out from a registry on your localhost + + # docker logout localhost:8080 + +# See also +**docker-login(1)** to log in to a Docker registry server. + +# HISTORY +June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/moby/moby/man/docker-logs.1.md b/vendor/github.com/moby/moby/man/docker-logs.1.md new file mode 100644 index 0000000..e70f796 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-logs.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logs - Fetch the logs of a container + +# SYNOPSIS +**docker logs** +[**-f**|**--follow**] +[**--help**] +[**--since**[=*SINCE*]] +[**-t**|**--timestamps**] +[**--tail**[=*"all"*]] +CONTAINER + +# DESCRIPTION +The **docker logs** command batch-retrieves whatever logs are present for +a container at the time of execution. This does not guarantee execution +order when combined with a docker run (i.e., your run may not have generated +any logs at the time you execute docker logs). + +The **docker logs --follow** command combines commands **docker logs** and +**docker attach**. It will first return all logs from the beginning and +then continue streaming new output from the container's stdout and stderr. + +**Warning**: This command works only for the **json-file** or **journald** +logging drivers. + +# OPTIONS +**--help** + Print usage statement + +**--details**=*true*|*false* + Show extra details provided to logs + +**-f**, **--follow**=*true*|*false* + Follow log output. The default is *false*. + +**--since**="" + Show logs since timestamp + +**-t**, **--timestamps**=*true*|*false* + Show timestamps. The default is *false*. + +**--tail**="*all*" + Output the specified number of lines at the end of logs (defaults to all logs) + +The `--since` option can be Unix timestamps, date formatted timestamps, or Go +duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's +time. Supported formats for date formatted time stamps include RFC3339Nano, +RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, +`2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be +used if you do not provide either a `Z` or a `+-00:00` timezone offset at the +end of the timestamp. When providing Unix timestamps enter +seconds[.nanoseconds], where seconds is the number of seconds that have elapsed +since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix +epoch or Unix time), and the optional .nanoseconds field is a fraction of a +second no more than nine digits long. You can combine the `--since` option with +either or both of the `--follow` or `--tail` options. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Ahmet Alp Balkan +October 2015, updated by Mike Brown diff --git a/vendor/github.com/moby/moby/man/docker-network-connect.1.md b/vendor/github.com/moby/moby/man/docker-network-connect.1.md new file mode 100644 index 0000000..096ec77 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-network-connect.1.md @@ -0,0 +1,66 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-connect - connect a container to a network + +# SYNOPSIS +**docker network connect** +[**--help**] +NETWORK CONTAINER + +# DESCRIPTION + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +```bash +$ docker network connect multi-host-network container1 +``` + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox +``` +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + + +# OPTIONS +**NETWORK** + Specify network name + +**CONTAINER** + Specify container name + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/moby/moby/man/docker-network-create.1.md b/vendor/github.com/moby/moby/man/docker-network-create.1.md new file mode 100644 index 0000000..44ce8e1 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-network-create.1.md @@ -0,0 +1,187 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-create - create a new network + +# SYNOPSIS +**docker network create** +[**--attachable**] +[**--aux-address**=*map[]*] +[**-d**|**--driver**=*DRIVER*] +[**--gateway**=*[]*] +[**--help**] +[**--internal**] +[**--ip-range**=*[]*] +[**--ipam-driver**=*default*] +[**--ipam-opt**=*map[]*] +[**--ipv6**] +[**--label**[=*[]*]] +[**-o**|**--opt**=*map[]*] +[**--subnet**=*[]*] +NETWORK-NAME + +# DESCRIPTION + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host +network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay/). + +It is also a good idea, though not required, that you install Docker Swarm on to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Connect containers + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `mynet` network. + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +## Specifying advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing network. +It is purely for ip-addressing purposes. You can override this default and +specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create -d bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +# OPTIONS +**--attachable** + Enable manual container attachment + +**--aux-address**=map[] + Auxiliary IPv4 or IPv6 addresses used by network driver + +**-d**, **--driver**=*DRIVER* + Driver to manage the Network bridge or overlay. The default is bridge. + +**--gateway**=[] + IPv4 or IPv6 Gateway for the master subnet + +**--help** + Print usage + +**--internal** + Restrict external access to the network + +**--ip-range**=[] + Allocate container ip from a sub-range + +**--ipam-driver**=*default* + IP Address Management Driver + +**--ipam-opt**=map[] + Set custom IPAM driver options + +**--ipv6** + Enable IPv6 networking + +**--label**=*label* + Set metadata for a network + +**-o**, **--opt**=map[] + Set custom driver options + +**--subnet**=[] + Subnet in CIDR format that represents a network segment + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/moby/moby/man/docker-network-disconnect.1.md b/vendor/github.com/moby/moby/man/docker-network-disconnect.1.md new file mode 100644 index 0000000..09bcac5 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-network-disconnect.1.md @@ -0,0 +1,36 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-disconnect - disconnect a container from a network + +# SYNOPSIS +**docker network disconnect** +[**--help**] +[**--force**] +NETWORK CONTAINER + +# DESCRIPTION + +Disconnects a container from a network. + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +# OPTIONS +**NETWORK** + Specify network name + +**CONTAINER** + Specify container name + +**--force** + Force the container to disconnect from a network + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/moby/moby/man/docker-network-inspect.1.md b/vendor/github.com/moby/moby/man/docker-network-inspect.1.md new file mode 100644 index 0000000..f27c98c --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-network-inspect.1.md @@ -0,0 +1,112 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-inspect - inspect a network + +# SYNOPSIS +**docker network inspect** +[**-f**|**--format**[=*FORMAT*]] +[**--help**] +NETWORK [NETWORK...] + +# DESCRIPTION + +Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +```bash +$ sudo docker network inspect bridge +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + } +] +``` + +Returns the information about the user-defined network: + +```bash +$ docker network create simple-network +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +$ docker network inspect simple-network +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {} + } +] +``` + +# OPTIONS +**-f**, **--format**="" + Format the output using the given Go template. + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/moby/moby/man/docker-network-ls.1.md b/vendor/github.com/moby/moby/man/docker-network-ls.1.md new file mode 100644 index 0000000..f319e66 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-network-ls.1.md @@ -0,0 +1,188 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-ls - list networks + +# SYNOPSIS +**docker network ls** +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--no-trunc**[=*true*|*false*]] +[**-q**|**--quiet**[=*true*|*false*]] +[**--help**] + +# DESCRIPTION + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster, for example: + +```bash + $ docker network ls + NETWORK ID NAME DRIVER SCOPE + 7fca4eb8c647 bridge bridge local + 9f904ee27bf5 none null local + cf03ee007fb4 host host local + 78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* type (custom|builtin) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER +db9db329f835 test1 bridge +f6e212da9dfd test2 bridge +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER +63d1ff1f77b0 dev bridge +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER +95e74588f40d foo bridge + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER +db9db329f835 test1 bridge +f6e212da9dfd test2 bridge +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER +f6e212da9dfd test2 bridge +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER +06e7eef0a170 foobar bridge +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +06e7eef0a170 foobar bridge +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +63d1ff1f77b0 dev bridge +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +# OPTIONS + +**-f**, **--filter**=*[]* + filter output based on conditions provided. + +**--format**="*TEMPLATE*" + Pretty-print networks using a Go template. + Valid placeholders: + .ID - Network ID + .Name - Network name + .Driver - Network driver + .Scope - Network scope (local, global) + .IPv6 - Whether IPv6 is enabled on the network or not + .Internal - Whether the network is internal or not + .Labels - All labels assigned to the network + .Label - Value of a specific label for this network. For example `{{.Label "project.version"}}` + +**--no-trunc**=*true*|*false* + Do not truncate the output + +**-q**, **--quiet**=*true*|*false* + Only display network IDs + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/moby/moby/man/docker-network-rm.1.md b/vendor/github.com/moby/moby/man/docker-network-rm.1.md new file mode 100644 index 0000000..c094a15 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-network-rm.1.md @@ -0,0 +1,43 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-rm - remove one or more networks + +# SYNOPSIS +**docker network rm** +[**--help**] +NETWORK [NETWORK...] + +# DESCRIPTION + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +# OPTIONS +**NETWORK** + Specify network name or id + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/moby/moby/man/docker-pause.1.md b/vendor/github.com/moby/moby/man/docker-pause.1.md new file mode 100644 index 0000000..11eef53 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-pause.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pause - Pause all processes within one or more containers + +# SYNOPSIS +**docker pause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for +further details. + +# OPTIONS +**--help** + Print usage statement + +# See also +**docker-unpause(1)** to unpause all processes within one or more containers. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-port.1.md b/vendor/github.com/moby/moby/man/docker-port.1.md new file mode 100644 index 0000000..83e9cf9 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-port.1.md @@ -0,0 +1,47 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# SYNOPSIS +**docker port** +[**--help**] +CONTAINER [PRIVATE_PORT[/PROTO]] + +# DESCRIPTION +List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + + # docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + +## Find out all the ports mapped + + # docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + +## Find out a specific mapping + + # docker port test 7890/tcp + 0.0.0.0:4321 + + # docker port test 7890 + 0.0.0.0:4321 + +## An example showing error for non-existent mapping + + # docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-ps.1.md b/vendor/github.com/moby/moby/man/docker-ps.1.md new file mode 100644 index 0000000..d9aa39f --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-ps.1.md @@ -0,0 +1,145 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% FEBRUARY 2015 +# NAME +docker-ps - List containers + +# SYNOPSIS +**docker ps** +[**-a**|**--all**] +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--help**] +[**-l**|**--latest**] +[**-n**[=*-1*]] +[**--no-trunc**] +[**-q**|**--quiet**] +[**-s**|**--size**] + +# DESCRIPTION + +List the containers in the local repository. By default this shows only +the running containers. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**-f**, **--filter**=[] + Filter output based on these conditions: + - exited= an exit code of + - label= or label== + - status=(created|restarting|running|paused|exited|dead) + - name= a container's name + - id= a container's ID + - is-task=(true|false) - containers that are a task (part of a service managed by swarm) + - before=(|) + - since=(|) + - ancestor=([:tag]||) - containers created from an image or a descendant. + - volume=(|) + - network=(|) - containers connected to the provided network + - health=(starting|healthy|unhealthy|none) - filters containers based on healthcheck status + +**--format**="*TEMPLATE*" + Pretty-print containers using a Go template. + Valid placeholders: + .ID - Container ID + .Image - Image ID + .Command - Quoted command + .CreatedAt - Time when the container was created. + .RunningFor - Elapsed time since the container was started. + .Ports - Exposed ports. + .Status - Container status. + .Size - Container disk size. + .Names - Container names. + .Labels - All labels assigned to the container. + .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` + .Mounts - Names of the volumes mounted in this container. + +**--help** + Print usage statement + +**-l**, **--latest**=*true*|*false* + Show only the latest created container (includes all states). The default is *false*. + +**-n**=*-1* + Show n last created containers (includes all states). + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only display numeric IDs. The default is *false*. + +**-s**, **--size**=*true*|*false* + Display total file sizes. The default is *false*. + +# EXAMPLES +# Display all containers, including non-running + + # docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain + 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell + c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds + 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike + +# Display only IDs of all containers, including non-running + + # docker ps -a -q + a87ecb4f327c + 01946d9d34d8 + c1d3b0166030 + 41d50ecd2f57 + +# Display only IDs of all containers that have the name `determined_torvalds` + + # docker ps -a -q --filter=name=determined_torvalds + c1d3b0166030 + +# Display containers with their commands + + # docker ps --format "{{.ID}}: {{.Command}}" + a87ecb4f327c: /bin/sh -c #(nop) MA + 01946d9d34d8: /bin/sh -c #(nop) MA + c1d3b0166030: /bin/sh -c yum -y up + 41d50ecd2f57: /bin/sh -c #(nop) MA + +# Display containers with their labels in a table + + # docker ps --format "table {{.ID}}\t{{.Labels}}" + CONTAINER ID LABELS + a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd + 01946d9d34d8 + c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 + 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd + +# Display containers with their node label in a table + + # docker ps --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' + CONTAINER ID NODE + a87ecb4f327c ubuntu + 01946d9d34d8 + c1d3b0166030 debian + 41d50ecd2f57 fedora + +# Display containers with `remote-volume` mounted + + $ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + +# Display containers with a volume mounted in `/data` + + $ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit +February 2015, updated by André Martins +October 2016, updated by Josh Horwitz diff --git a/vendor/github.com/moby/moby/man/docker-pull.1.md b/vendor/github.com/moby/moby/man/docker-pull.1.md new file mode 100644 index 0000000..c61d005 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-pull.1.md @@ -0,0 +1,220 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pull - Pull an image or a repository from a registry + +# SYNOPSIS +**docker pull** +[**-a**|**--all-tags**] +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +This command pulls down an image or a repository from a registry. If +there is more than one image for a repository (e.g., fedora) then all +images for that repository name can be pulled down including any tags +(see the option **-a** or **--all-tags**). + +If you do not specify a `REGISTRY_HOST`, the command uses Docker's public +registry located at `registry-1.docker.io` by default. + +# OPTIONS +**-a**, **--all-tags**=*true*|*false* + Download all tagged images in the repository. The default is *false*. + +**--help** + Print usage statement + +# EXAMPLES + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + + $ docker pull debian + + Using default tag: latest + latest: Pulling from library/debian + fdd5d7827f33: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa + Status: Downloaded newer image for debian:latest + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + + $ docker pull debian:jessie + + jessie: Pulling from library/debian + fdd5d7827f33: Already exists + a3ed95caeb02: Already exists + Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e + Status: Downloaded newer image for debian:jessie + +To see which images are present locally, use the **docker-images(1)** +command: + + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + debian jessie f50f9524513f 5 days ago 125.1 MB + debian latest f50f9524513f 5 days ago 125.1 MB + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/) +in the online documentation. + + +## Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + + $ docker pull ubuntu:14.04 + + 14.04: Pulling from library/ubuntu + 5a132a7e7af1: Pull complete + fd2731e4c50c: Pull complete + 28a2f68d1120: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + Status: Downloaded newer image for ubuntu:14.04 + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + + $ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu + 5a132a7e7af1: Already exists + fd2731e4c50c: Already exists + 28a2f68d1120: Already exists + a3ed95caeb02: Already exists + Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Digest can also be used in the `FROM` of a Dockerfile, for example: + + FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + MAINTAINER some maintainer + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + +## Pulling from a different registry + +By default, `docker pull` pulls images from Docker Hub. It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + + $ docker pull myregistry.local:5000/testing/test-image + +Registry credentials are managed by **docker-login(1)**. + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries) +section in the online documentation for more information. + + +## Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + + $ docker pull --all-tags fedora + + Pulling repository fedora + ad57ef8d78d7: Download complete + 105182bb5e8b: Download complete + 511136ea3c5a: Download complete + 73bd853d2ea5: Download complete + .... + + Status: Downloaded newer image for fedora + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + + $ docker images fedora + + REPOSITORY TAG IMAGE ID CREATED SIZE + fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB + fedora 20 105182bb5e8b 5 days ago 372.7 MB + fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB + fedora latest 105182bb5e8b 5 days ago 372.7 MB + + +## Canceling a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + + $ docker pull fedora + + Using default tag: latest + latest: Pulling from library/fedora + a3ed95caeb02: Pulling fs layer + 236608c7b546: Pulling fs layer + ^C + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +April 2015, updated by John Willis +April 2015, updated by Mary Anthony for v2 +September 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-push.1.md b/vendor/github.com/moby/moby/man/docker-push.1.md new file mode 100644 index 0000000..847e66d --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-push.1.md @@ -0,0 +1,63 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-push - Push an image or a repository to a registry + +# SYNOPSIS +**docker push** +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to **docker-tag(1)** for more information about valid image and tag names. + +Killing the **docker push** process, for example by pressing **CTRL-c** while it +is running in a terminal, terminates the push operation. + +Registry credentials are managed by **docker-login(1)**. + + +# OPTIONS + +**--disable-content-trust** + Skip image verification (default true) + +**--help** + Print usage statement + +# EXAMPLES + +## Pushing a new image to a registry + +First save the new image by finding the container ID (using **docker ps**) +and then committing it to a new image name. Note that only a-z0-9-_. are +allowed when naming images: + + # docker commit c16378f943fe rhel-httpd + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + + # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + # docker push registry-host:5000/myadmin/rhel-httpd + +Check that this worked by running: + + # docker images + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-rename.1.md b/vendor/github.com/moby/moby/man/docker-rename.1.md new file mode 100644 index 0000000..eaeea5c --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-rename.1.md @@ -0,0 +1,15 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCTOBER 2014 +# NAME +docker-rename - Rename a container + +# SYNOPSIS +**docker rename** +CONTAINER NEW_NAME + +# OPTIONS +There are no available options. + +# DESCRIPTION +Rename a container. Container may be running, paused or stopped. diff --git a/vendor/github.com/moby/moby/man/docker-restart.1.md b/vendor/github.com/moby/moby/man/docker-restart.1.md new file mode 100644 index 0000000..271c4ee --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-restart.1.md @@ -0,0 +1,26 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-restart - Restart one or more containers + +# SYNOPSIS +**docker restart** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Restart each container listed. + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=*10* + Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-rm.1.md b/vendor/github.com/moby/moby/man/docker-rm.1.md new file mode 100644 index 0000000..2105288 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-rm.1.md @@ -0,0 +1,72 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rm - Remove one or more containers + +# SYNOPSIS +**docker rm** +[**-f**|**--force**] +[**-l**|**--link**] +[**-v**|**--volumes**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +**docker rm** will remove one or more containers from the host node. The +container name or ID can be used. This does not remove images. You cannot +remove a running container unless you use the **-f** option. To see all +containers on a host use the **docker ps -a** command. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--force**=*true*|*false* + Force the removal of a running container (uses SIGKILL). The default is *false*. + +**-l**, **--link**=*true*|*false* + Remove the specified link and not the underlying container. The default is *false*. + +**-v**, **--volumes**=*true*|*false* + Remove the volumes associated with the container. The default is *false*. + +# EXAMPLES + +## Removing a container using its ID + +To remove a container using its ID, find either from a **docker ps -a** +command, or use the ID returned from the **docker run** command, or retrieve +it from a file used to store it using the **docker run --cidfile**: + + docker rm abebf7571666 + +## Removing a container using the container name + +The name of the container can be found using the **docker ps -a** +command. The use that name as follows: + + docker rm hopeful_morse + +## Removing a container and all associated volumes + + $ docker rm -v redis + redis + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + $ docker rm -v hello + +In this example, the volume for `/foo` will remain in tact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-rmi.1.md b/vendor/github.com/moby/moby/man/docker-rmi.1.md new file mode 100644 index 0000000..35bf8aa --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-rmi.1.md @@ -0,0 +1,42 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rmi - Remove one or more images + +# SYNOPSIS +**docker rmi** +[**-f**|**--force**] +[**--help**] +[**--no-prune**] +IMAGE [IMAGE...] + +# DESCRIPTION + +Removes one or more images from the host node. This does not remove images from +a registry. You cannot remove an image of a running container unless you use the +**-f** option. To see all images on a host use the **docker images** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force removal of the image. The default is *false*. + +**--help** + Print usage statement + +**--no-prune**=*true*|*false* + Do not delete untagged parents. The default is *false*. + +# EXAMPLES + +## Removing an image + +Here is an example of removing an image: + + docker rmi fedora/httpd + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/moby/moby/man/docker-run.1.md b/vendor/github.com/moby/moby/man/docker-run.1.md new file mode 100644 index 0000000..8c1018a --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-run.1.md @@ -0,0 +1,1055 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-run - Run a command in a new container + +# SYNOPSIS +**docker run** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--blkio-weight-device**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-count**[=*0*]] +[**--cpu-percent**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpus**[=*0.0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**-d**|**--detach**] +[**--detach-keys**[=*[]*]] +[**--device**[=*[]*]] +[**--device-read-bps**[=*[]*]] +[**--device-read-iops**[=*[]*]] +[**--device-write-bps**[=*[]*]] +[**--device-write-iops**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-option**[=*[]*]] +[**--dns-search**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**--init**] +[**--init-path**[=*[]*]] +[**-i**|**--interactive**] +[**--ip**[=*IPv4-ADDRESS*]] +[**--ip6**[=*IPv6-ADDRESS*]] +[**--ipc**[=*IPC*]] +[**--isolation**[=*default*]] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--link-local-ip**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*LIMIT*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--network-alias**[=*[]*]] +[**--network**[=*"bridge"*]] +[**--oom-kill-disable**] +[**--oom-score-adj**[=*0*]] +[**-P**|**--publish-all**] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[PID]*]] +[**--userns**[=*[]*]] +[**--pids-limit**[=*PIDS_LIMIT*]] +[**--privileged**] +[**--read-only**] +[**--restart**[=*RESTART*]] +[**--rm**] +[**--security-opt**[=*[]*]] +[**--storage-opt**[=*[]*]] +[**--stop-signal**[=*SIGNAL*]] +[**--stop-timeout**[=*TIMEOUT*]] +[**--shm-size**[=*[]*]] +[**--sig-proxy**[=*true*]] +[**--sysctl**[=*[]*]] +[**-t**|**--tty**] +[**--tmpfs**[=*[CONTAINER-DIR[:]*]] +[**-u**|**--user**[=*USER*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] +[**--volume-driver**[=*DRIVER*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Run a process in a new container. **docker run** starts a process with its own +file system, its own networking, and its own isolated process tree. The IMAGE +which starts the process may define defaults related to the process that will be +run in the container, the networking to expose, and more, but **docker run** +gives final control to the operator or administrator who starts the container +from the image. For that reason **docker run** has more options than any other +Docker command. + +If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and +all image dependencies, from the repository in the same way running **docker +pull** IMAGE, before it starts the container from that image. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + + In foreground mode (the default when **-d** +is not specified), **docker run** can start the process in the container +and attach the console to the process's standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. The **-a** option can be set for +each of stdin, stdout, and stderr. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** +option can be set multiple times. + +**--blkio-weight**=*0* + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--blkio-weight-device**=[] + Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). + +**--cpu-shares**=*0* + CPU shares (relative weight) + + By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the **--cpu-shares** +flag to set the weighting to 2 or higher. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container **{C0}** with **-c=512** running one process, and another container +**{C1}** with **-c=1024** running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cidfile**="" + Write the container ID to the file + +**--cpu-count**=*0* + Limit the number of CPUs available for execution by the container. + + On Windows Server containers, this is approximated as a percentage of total CPU usage. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-percent**=*0* + Limit the percentage of CPU available for execution by a container running on a Windows daemon. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota + + Limit the container's CPU usage. By default, containers run with the full +CPU resource. This flag tell the kernel to restrict the container's CPU usage +to the quota you specify. + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpus**=0.0 + Number of CPUs. The default is *0.0* which means no limit. + +**-d**, **--detach**=*true*|*false* + Detached mode: run the container in the background and print the new container ID. The default is *false*. + + At any time you can run **docker ps** in +the other shell to view a list of the running containers. You can reattach to a +detached container with **docker attach**. If you choose to run a container in +the detached mode, then you cannot use the **-rm** option. + + When attached in the tty mode, you can detach from the container (and leave it +running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. +You configure the key sequence using the **--detach-keys** option or a configuration file. +See **config-json(5)** for documentation on using a configuration file. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--device-read-bps**=[] + Limit read rate from a device (e.g. --device-read-bps=/dev/sda:1mb) + +**--device-read-iops**=[] + Limit read rate from a device (e.g. --device-read-iops=/dev/sda:1000) + +**--device-write-bps**=[] + Limit write rate to a device (e.g. --device-write-bps=/dev/sda:1mb) + +**--device-write-iops**=[] + Limit write rate to a device (e.g. --device-write-iops=/dev/sda:1000) + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**--dns-option**=[] + Set custom DNS options + +**--dns**=[] + Set custom DNS servers + + This option can be used to override the DNS +configuration passed to the container. Typically this is necessary when the +host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this +is the case the **--dns** flags is necessary for every run. + +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary +environment variables that are available for the process that will be launched +inside of the container. + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + + This option allows you to overwrite the default entrypoint of the image that +is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND +because it specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a container its +default nature or behavior, so that when you set an ENTRYPOINT you can run the +container as if it were that binary, complete with default options, and you can +pass in more options via the COMMAND. But, sometimes an operator may want to run +something else inside the container, so you can override the default ENTRYPOINT +at runtime by using a **--entrypoint** and a string to specify the new +ENTRYPOINT. + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=[] + Expose a port, or a range of ports (e.g. --expose=3300-3310) informs Docker +that the container listens on the specified network ports at runtime. Docker +uses this information to interconnect containers using links and to set up port +redirection on the host system. + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + + Sets the container host name that is available inside the container. + +**--help** + Print usage statement + +**--init** + Run an init inside the container that forwards signals and reaps processes + +**--init-path**="" + Path to the docker-init binary + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + + When set to true, keep stdin open even if not attached. The default is false. + +**--ip**="" + Sets the container's interface IPv4 address (e.g. 172.23.0.9) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ip6**="" + Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. + +**-l**, **--label**=[] + Set metadata on the container (e.g., --label com.example.key=value) + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Constrains the kernel memory available to a container. If a limit of 0 +is specified (not using `--kernel-memory`), the container's kernel memory +is not limited. If you specify a limit, it may be rounded up to a multiple +of the operating system's page size and the value can be very large, +millions of trillions. + +**--label-file**=[] + Read in a line delimited file of labels + +**--link**=[] + Add link to another container in the form of :alias or just +in which case the alias will match the name + + If the operator +uses **--link** when starting the new client container, then the client +container can access the exposed port via a private networking interface. Docker +will set some environment variables in the client container to help indicate +which interface and port to use. + +**--link-local-ip**=[] + Add one or more link-local IPv4/IPv6 addresses to the container's interface + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Logging driver for the container. Default is defined by daemon `--log-driver` flag. + **Warning**: the `docker logs` command works only for the `json-file` and + `journald` logging drivers. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: [], where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + + After setting memory reservation, when the system detects memory contention +or low memory, containers are forced to restrict their consumption to their +reservation. So you should always set the value below **--memory**, otherwise the +hard limit will take precedence. By default, memory reservation will be the same +as memory limit. + +**--memory-swap**="LIMIT" + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. By default, the swap `LIMIT` will be set to double +the value of --memory. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + + Remember that the MAC address in an Ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + +**--name**="" + Assign a name to the container + + The operator can identify a container in three ways: + UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) + UUID short identifier (“f78375b1c487”) + Name (“jonah”) + + The UUID identifiers come from the Docker daemon, and if a name is not assigned +to the container with **--name** then the daemon will also generate a random +string name. The name is useful when defining links (see **--link**) (or any +other place you need to identify a container). This works for both background +and foreground Docker containers. + +**--network**="*bridge*" + Set the Network mode for the container + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + '|': connect to a user-defined network + +**--network-alias**=[] + Add network-scoped alias for the container + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**--oom-score-adj**="" + Tune the host's OOM preferences for containers (accepts -1000 to 1000) + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + + When set to true publish all exposed ports to the host interfaces. The +default is false. If the operator uses -P (or -p) then Docker will make the +exposed port accessible on the host and the ports will be available to any +client that can reach the host. When using -P, Docker will bind any exposed +port to a random port on the host within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host +ports and the exposed ports, use `docker port`. + +**-p**, **--publish**=[] + Publish a container's port, or range of ports, to the host. + + Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort` +Both hostPort and containerPort can be specified as a range of ports. +When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. +(e.g., `docker run -p 1234-1236:1222-1224 --name thisWorks -t busybox` +but not `docker run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`) +With ip: `docker run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage` +Use `docker port` to see the actual mapping: `docker port CONTAINER $CONTAINERPORT` + +**--pid**="" + Set the PID mode for the container + Default is to create a private PID namespace for the container + 'container:': join another container's PID namespace + 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--userns**="" + Set the usernamespace mode for the container when `userns-remap` option is enabled. + **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). + +**--pids-limit**="" + Tune the container's pids limit. Set `-1` to have unlimited pids for the container. + +**--uts**=*host* + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + + By default, Docker containers are +“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the +Docker container. This is because by default a container is not allowed to +access any devices. A “privileged” container is given access to all devices. + + When the operator executes **docker run --privileged**, Docker will enable access +to all devices on the host as well as set some configuration in AppArmor to +allow the container nearly all the same access to the host as processes running +outside of a container on the host. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + + By default a container will have its root filesystem writable allowing processes +to write files anywhere. By specifying the `--read-only` flag the container will have +its root filesystem mounted as read only prohibiting any writes. + +**--restart**="*no*" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +**--rm**=*true*|*false* + Automatically remove the container when it exits. The default is *false*. + `--rm` flag can work together with `-d`, and auto-removal will be done on daemon side. Note that it's +incompatible with any restart policy other than `none`. + +**--security-opt**=[] + Security Options + + "label=user:USER" : Set the label user for the container + "label=role:ROLE" : Set the label role for the container + "label=type:TYPE" : Set the label type for the container + "label=level:LEVEL" : Set the label level for the container + "label=disable" : Turn off label confinement for the container + "no-new-privileges" : Disable container processes from gaining additional privileges + + "seccomp=unconfined" : Turn off seccomp confinement for the container + "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter + + "apparmor=unconfined" : Turn off apparmor confinement for the container + "apparmor=your-profile" : Set the apparmor confinement profile for the container + +**--storage-opt**=[] + Storage driver options per container + + $ docker run -it --storage-opt size=120G fedora /bin/bash + + This (size) will allow to set the container rootfs size to 120G at creation time. + This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. + For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. + For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. + Under these conditions, user can pass any size less then the backing fs size. + +**--stop-signal**=*SIGTERM* + Signal to stop a container. Default is SIGTERM. + +**--stop-timeout**=*10* + Timeout (in seconds) to stop a container. Default is 10. + +**--shm-size**="" + Size of `/dev/shm`. The format is ``. + `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). + If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. + +**--sysctl**=SYSCTL + Configure namespaced kernel parameters at runtime + + IPC Namespace - current sysctls allowed: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + If you use the `--ipc=host` option these sysctls will not be allowed. + + Network Namespace - current sysctls allowed: + Sysctls beginning with net.* + + If you use the `--network=host` option these sysctls will not be allowed. + +**--sig-proxy**=*true*|*false* + Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of any container. This can be used, for example, to run a throwaway +interactive shell. The default is false. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +**--tmpfs**=[] Create a tmpfs mount + + Mount a temporary filesystem (`tmpfs`) mount into a container, for example: + + $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image + + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount +options are the same as the Linux default `mount` flags. If you do not specify +any options, the systems uses the following options: +`rw,noexec,nosuid,nodev,size=65536k`. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +**--ulimit**=[] + Ulimit options + +**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] + Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker + bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker + container. If 'HOST-DIR' is omitted, Docker automatically creates the new + volume on the host. The `OPTIONS` are a comma delimited list and can be: + + * [rw|ro] + * [z|Z] + * [`[r]shared`|`[r]slave`|`[r]private`] + * [nocopy] + +The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` +can be an absolute path or a `name` value. A `name` value must start with an +alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or +`-` (hyphen). An absolute path starts with a `/` (forward slash). + +If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the +path you specify. If you supply a `name`, Docker creates a named volume by that +`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` +value. If you supply the `/foo` value, Docker creates a bind-mount. If you +supply the `foo` specification, Docker creates a named volume. + +You can specify multiple **-v** options to mount one or more mounts to a +container. To use these same mounts in other containers, specify the +**--volumes-from** option also. + +You can add `:ro` or `:rw` suffix to a volume to mount it read-only or +read-write mode, respectively. By default, the volumes are mounted read-write. +See examples. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change a label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +By default bind mounted volumes are `private`. That means any mounts done +inside container will not be visible on host and vice-a-versa. One can change +this behavior by specifying a volume mount propagation property. Making a +volume `shared` mounts done under that volume inside container will be +visible on host and vice-a-versa. Making a volume `slave` enables only one +way mount propagation and that is mounts done on host under that volume +will be visible inside container but not the other way around. + +To control mount propagation property of volume one can use `:[r]shared`, +`:[r]slave` or `:[r]private` propagation flag. Propagation property can +be specified only for bind mounted volumes and not for internal volumes or +named volumes. For mount propagation to work source mount point (mount point +where source dir is mounted on) has to have right propagation properties. For +shared volumes, source mount point has to be shared. And for slave volumes, +source mount has to be either shared or slave. + +Use `df ` to figure out the source mount and then use +`findmnt -o TARGET,PROPAGATION ` to figure out propagation +properties of source mount. If `findmnt` utility is not available, then one +can look at mount entry for source mount point in `/proc/self/mountinfo`. Look +at `optional fields` and see if any propagaion properties are specified. +`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if +nothing is there that means mount is `private`. + +To change propagation properties of a mount point use `mount` command. For +example, if one wants to bind mount source directory `/foo` one can do +`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This +will convert /foo into a `shared` mount point. Alternatively one can directly +change propagation properties of source mount. Say `/` is source mount for +`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + +To disable automatic copying of data from the container path to the volume, use +the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. + +**--volume-driver**="" + Container's volume driver. This driver creates volumes specified either from + a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. + See **docker-volume-create(1)** for full details. + +**--volumes-from**=[] + Mount volumes from the specified container(s) + + Mounts already mounted volumes from a source container onto another + container. You must supply the source's container-id. To share + a volume, use the **--volumes-from** option when running + the target container. You can share volumes even if the source container + is not running. + + By default, Docker mounts the volumes in the same mode (read-write or + read-only) as it is mounted in the source container. Optionally, you + can change this by suffixing the container-id with either the `:ro` or + `:rw ` keyword. + + If the location of the volume from the source container overlaps with + data residing on a target container, then the volume hides + that data on the target. + +**-w**, **--workdir**="" + Working directory inside the container + + The default working directory for +running binaries within a container is the root directory (/). The developer can +set a different default with the Dockerfile WORKDIR instruction. The operator +can override the working directory by using the **-w** option. + +# Exit Status + +The exit code from `docker run` gives information about why the container +failed to run or why it exited. When `docker run` exits with a non-zero code, +the exit codes follow the `chroot` standard, see below: + +**_125_** if the error is with Docker daemon **_itself_** + + $ docker run --foo busybox; echo $? + # flag provided but not defined: --foo + See 'docker run --help'. + 125 + +**_126_** if the **_contained command_** cannot be invoked + + $ docker run busybox /etc; echo $? + # exec: "/etc": permission denied + docker: Error response from daemon: Contained command could not be invoked + 126 + +**_127_** if the **_contained command_** cannot be found + + $ docker run busybox foo; echo $? + # exec: "foo": executable file not found in $PATH + docker: Error response from daemon: Contained command not found or does not exist + 127 + +**_Exit code_** of **_contained command_** otherwise + + $ docker run busybox /bin/sh -c 'exit 3' + # 3 + +# EXAMPLES + +## Running container in read-only mode + +During container image development, containers often need to write to the image +content. Installing packages into /usr, for example. In production, +applications seldom need to write to the image. Container applications write +to volumes if they need to write to file systems at all. Applications can be +made more secure by running them in read-only mode using the --read-only switch. +This protects the containers image from modification. Read only containers may +still need to write temporary data. The best way to handle this is to mount +tmpfs directories on /run and /tmp. + + # docker run --read-only --tmpfs /run --tmpfs /tmp -i -t fedora /bin/bash + +## Exposing log messages from the container to the host's log + +If you want messages that are logged in your container to show up in the host's +syslog/journal then you should bind mount the /dev/log directory as follows. + + # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash + +From inside the container you can test this by sending a message to the log. + + (bash)# logger "Hello from my container" + +Then exit and check the journal. + + # exit + + # journalctl -b | grep Hello + +This should list the message sent to logger. + +## Attaching to one or more from STDIN, STDOUT, STDERR + +If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) +. You can specify to which of the three standard streams (stdin, stdout, stderr) +you'd like to connect instead, as in: + + # docker run -a stdin -a stdout -i -t fedora /bin/bash + +## Sharing IPC between containers + +Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html + +Testing `--ipc=host` mode: + +Host shows a shared memory segment with 7 pids attached, happens to be from httpd: + +``` + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` + +Now run a regular container, and it correctly does NOT see the shared memory segment from the host: + +``` + $ docker run -it shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: + + ``` + $ docker run -it --ipc=host shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` +Testing `--ipc=container:CONTAINERID` mode: + +Start a container with a program to create a shared memory segment: +``` + $ docker run -it shm bash + $ sudo shm/shm_server & + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` +Create a 2nd container correctly shows no shared memory segment from 1st container: +``` + $ docker run shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: + +``` + $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` + +## Linking Containers + +> **Note**: This section describes linking between containers on the +> default (bridge) network, also known as "legacy links". Using `--link` +> on user-defined networks uses the DNS-based discovery, which does not add +> entries to `/etc/hosts`, and does not set environment variables for +> discovery. + +The link feature allows multiple containers to communicate with each other. For +example, a container whose Dockerfile has exposed port 80 can be run and named +as follows: + + # docker run --name=link-test -d -i -t fedora/httpd + +A second container, in this case called linker, can communicate with the httpd +container, named link-test, by running with the **--link=:** + + # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash + +Now the container linker is linked to container link-test with the alias lt. +Running the **env** command in the linker container shows environment variables + with the LT (alias) context (**LT_**) + + # env + HOSTNAME=668231cb0978 + TERM=xterm + LT_PORT_80_TCP=tcp://172.17.0.3:80 + LT_PORT_80_TCP_PORT=80 + LT_PORT_80_TCP_PROTO=tcp + LT_PORT=tcp://172.17.0.3:80 + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + LT_NAME=/linker/lt + SHLVL=1 + HOME=/ + LT_PORT_80_TCP_ADDR=172.17.0.3 + _=/usr/bin/env + +When linking two containers Docker will use the exposed ports of the container +to create a secure tunnel for the parent to access. + +If a container is connected to the default bridge network and `linked` +with other containers, then the container's `/etc/hosts` file is updated +with the linked container's name. + +> **Note** Since Docker may live update the container's `/etc/hosts` file, there +may be situations when processes inside the container can end up reading an +empty or incomplete `/etc/hosts` file. In most cases, retrying the read again +should fix the problem. + + +## Mapping Ports for External Usage + +The exposed port of an application can be mapped to a host port using the **-p** +flag. For example, an httpd port 80 can be mapped to the host port 8080 using the +following: + + # docker run -p 8080:80 -d -i -t fedora/httpd + +## Creating and Mounting a Data Volume Container + +Many applications require the sharing of persistent data across several +containers. Docker allows you to create a Data Volume Container that other +containers can mount from. For example, create a named container that contains +directories /var/volume1 and /tmp/volume2. The image will need to contain these +directories so a couple of RUN mkdir instructions might be required for you +fedora-data image: + + # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true + # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash + +Multiple --volumes-from parameters will bring together multiple data volumes from +multiple containers. And it's possible to mount the volumes that came from the +DATA container in yet another container via the fedora-container1 intermediary +container, allowing to abstract the actual data source from users of that data: + + # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash + +## Mounting External Volumes + +To mount a host directory as a container volume, specify the absolute path to +the directory and the absolute path for the container directory separated by a +colon: + + # docker run -v /var/db:/data1 -i -t fedora bash + +When using SELinux, be aware that the host has no knowledge of container SELinux +policy. Therefore, in the above example, if SELinux policy is enforced, the +`/var/db` directory is not writable to the container. A "Permission Denied" +message will occur and an avc: message in the host's syslog. + + +To work around this, at time of writing this man page, the following command +needs to be run in order for the proper SELinux policy type label to be attached +to the host directory: + + # chcon -Rt svirt_sandbox_file_t /var/db + + +Now, writing to the /data1 volume in the container will be allowed and the +changes will also be reflected on the host in /var/db. + +## Using alternative security labeling + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. For example, you can specify the MCS/MLS level, a +requirement for MLS systems. Specifying the level in the following command +allows you to share the same content between containers. + + # docker run --security-opt label=level:s0:c100,c200 -i -t fedora bash + +An MLS example might be: + + # docker run --security-opt label=level:TopSecret -i -t rhel7 bash + +To disable the security labeling for this container versus running with the +`--permissive` flag, use the following command: + + # docker run --security-opt label=disable -i -t fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + # docker run --security-opt label=type:svirt_apache_t -i -t centos bash + +Note: + +You would have to write policy defining a `svirt_apache_t` type. + +## Setting device weight + +If you want to set `/dev/sda` device weight to `200`, you can specify the device +weight by `--blkio-weight-device` flag. Use the following command: + + # docker run -it --blkio-weight-device "/dev/sda:200" ubuntu + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Microsoft Windows. The `--isolation ` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +``` +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Microsoft Windows, can take any of these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation process busybox top +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation hyperv busybox top +``` + +## Setting Namespaced Kernel Parameters (Sysctls) + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + + $ docker run --sysctl net.ipv4.ip_forward=1 someimage + +Note: + +Not all sysctls are namespaced. Docker does not support changing sysctls +inside of a container that also modify the host system. As the kernel +evolves we expect to see more sysctls become namespaced. + +See the definition of the `--sysctl` option above for the current list of +supported sysctls. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-save.1.md b/vendor/github.com/moby/moby/man/docker-save.1.md new file mode 100644 index 0000000..1d1de8a --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-save.1.md @@ -0,0 +1,45 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-save - Save one or more images to a tar archive (streamed to STDOUT by default) + +# SYNOPSIS +**docker save** +[**--help**] +[**-o**|**--output**[=*OUTPUT*]] +IMAGE [IMAGE...] + +# DESCRIPTION +Produces a tarred repository to the standard output stream. Contains all +parent layers, and all tags + versions, or specified repo:tag. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement + +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES + +Save all fedora repository images to a fedora-all.tar and save the latest +fedora image to a fedora-latest.tar: + + $ docker save fedora > fedora-all.tar + $ docker save --output=fedora-latest.tar fedora:latest + $ ls -sh fedora-all.tar + 721M fedora-all.tar + $ ls -sh fedora-latest.tar + 367M fedora-latest.tar + +# See also +**docker-load(1)** to load an image from a tar archive on STDIN. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-search.1.md b/vendor/github.com/moby/moby/man/docker-search.1.md new file mode 100644 index 0000000..ad8bbc7 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-search.1.md @@ -0,0 +1,70 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-search - Search the Docker Hub for images + +# SYNOPSIS +**docker search** +[**-f**|**--filter**[=*[]*]] +[**--help**] +[**--limit**[=*LIMIT*]] +[**--no-trunc**] +TERM + +# DESCRIPTION + +Search Docker Hub for images that match the specified `TERM`. The table +of images returned displays the name, description (truncated by default), number +of stars awarded, whether the image is official, and whether it is automated. + +*Note* - Search queries will only return up to 25 results + +# OPTIONS + +**-f**, **--filter**=[] + Filter output based on these conditions: + - stars= + - is-automated=(true|false) + - is-official=(true|false) + +**--help** + Print usage statement + +**--limit**=*LIMIT* + Maximum returned search results. The default is 25. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +# EXAMPLES + +## Search Docker Hub for ranked images + +Search a registry for the term 'fedora' and only display those images +ranked 3 or higher: + + $ docker search --filter=stars=3 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + mattdm/fedora A basic Fedora image corresponding roughly... 50 + fedora (Semi) Official Fedora base image. 38 + mattdm/fedora-small A small Fedora image on which to build. Co... 8 + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + +## Search Docker Hub for automated images + +Search Docker Hub for the term 'fedora' and only display automated images +ranked 1 or higher: + + $ docker search --filter=is-automated=true --filter=stars=1 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +April 2016, updated by Vincent Demeester + diff --git a/vendor/github.com/moby/moby/man/docker-start.1.md b/vendor/github.com/moby/moby/man/docker-start.1.md new file mode 100644 index 0000000..c00b0a1 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-start.1.md @@ -0,0 +1,39 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-start - Start one or more containers + +# SYNOPSIS +**docker start** +[**-a**|**--attach**] +[**--detach-keys**[=*[]*]] +[**--help**] +[**-i**|**--interactive**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Start one or more containers. + +# OPTIONS +**-a**, **--attach**=*true*|*false* + Attach container's STDOUT and STDERR and forward all signals to the + process. The default is *false*. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Attach container's STDIN. The default is *false*. + +# See also +**docker-stop(1)** to stop a container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-stats.1.md b/vendor/github.com/moby/moby/man/docker-stats.1.md new file mode 100644 index 0000000..0f022cd --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-stats.1.md @@ -0,0 +1,57 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stats - Display a live stream of one or more containers' resource usage statistics + +# SYNOPSIS +**docker stats** +[**-a**|**--all**] +[**--help**] +[**--no-stream**] +[**--format[="*TEMPLATE*"]**] +[CONTAINER...] + +# DESCRIPTION + +Display a live stream of one or more containers' resource usage statistics + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**--help** + Print usage statement + +**--no-stream**=*true*|*false* + Disable streaming stats and only pull the first result, default setting is false. + +**--format**="*TEMPLATE*" + Pretty-print containers statistics using a Go template. + Valid placeholders: + .Container - Container name or ID. + .Name - Container name. + .ID - Container ID. + .CPUPerc - CPU percentage. + .MemUsage - Memory usage. + .NetIO - Network IO. + .BlockIO - Block IO. + .MemPerc - Memory percentage (Not available on Windows). + .PIDs - Number of PIDs (Not available on Windows). + +# EXAMPLES + +Running `docker stats` on all running containers + + $ docker stats + CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O + 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB + 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B + d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B + +Running `docker stats` on multiple containers by name and id. + + $ docker stats fervent_panini 5acfcb1b4fd1 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B + fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B diff --git a/vendor/github.com/moby/moby/man/docker-stop.1.md b/vendor/github.com/moby/moby/man/docker-stop.1.md new file mode 100644 index 0000000..fa377c9 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-stop.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stop - Stop a container by sending SIGTERM and then SIGKILL after a grace period + +# SYNOPSIS +**docker stop** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Stop a container (Send SIGTERM, and then SIGKILL after + grace period) + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=*10* + Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. + +#See also +**docker-start(1)** to restart a stopped container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-tag.1.md b/vendor/github.com/moby/moby/man/docker-tag.1.md new file mode 100644 index 0000000..2fb5bd3 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-tag.1.md @@ -0,0 +1,77 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-tag - Create a tag `TARGET_IMAGE` that refers to `SOURCE_IMAGE` + +# SYNOPSIS +**docker tag** +[**--help**] +SOURCE_NAME[:TAG] TARGET_NAME[:TAG] + +# DESCRIPTION +Assigns a new alias to an image in a registry. An alias refers to the +entire image name including the optional `TAG` after the ':'. + +# "OPTIONS" +**--help** + Print usage statement. + +**NAME** + The image name which is made up of slash-separated name components, + optionally prefixed by a registry hostname. The hostname must comply with + standard DNS rules, but may not contain underscores. If a hostname is + present, it may optionally be followed by a port number in the format + `:8080`. If not present, the command uses Docker's public registry located at + `registry-1.docker.io` by default. Name components may contain lowercase + letters, digits and separators. A separator is defined as a period, one or + two underscores, or one or more dashes. A name component may not start or end + with a separator. + +**TAG** + The tag assigned to the image to version and distinguish images with the same + name. The tag name must be valid ASCII and may contain lowercase and + uppercase letters, digits, underscores, periods and hyphens. A tag name + may not start with a period or a hyphen and may contain a maximum of 128 + characters. + +# EXAMPLES + +## Tagging an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + + docker tag httpd fedora/httpd:version1.0 + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +## Tagging an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + + docker tag httpd:test fedora/httpd:version1.0.test + +## Tagging an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/moby/moby/man/docker-top.1.md b/vendor/github.com/moby/moby/man/docker-top.1.md new file mode 100644 index 0000000..a666f7c --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-top.1.md @@ -0,0 +1,36 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-top - Display the running processes of a container + +# SYNOPSIS +**docker top** +[**--help**] +CONTAINER [ps OPTIONS] + +# DESCRIPTION + +Display the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. + +All displayed information is from host's point of view. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +Run **docker top** with the ps option of -x: + + $ docker top 8601afda2b -x + PID TTY STAT TIME COMMAND + 16623 ? Ss 0:00 sleep 99999 + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Ma Shimiao +December 2015, updated by Pavel Pospisil diff --git a/vendor/github.com/moby/moby/man/docker-unpause.1.md b/vendor/github.com/moby/moby/man/docker-unpause.1.md new file mode 100644 index 0000000..e6fd3c4 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-unpause.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-unpause - Unpause all processes within one or more containers + +# SYNOPSIS +**docker unpause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for +further details. + +# OPTIONS +**--help** + Print usage statement + +# See also +**docker-pause(1)** to pause all processes within one or more containers. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker-update.1.md b/vendor/github.com/moby/moby/man/docker-update.1.md new file mode 100644 index 0000000..85f3dd0 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-update.1.md @@ -0,0 +1,171 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-update - Update configuration of one or more containers + +# SYNOPSIS +**docker update** +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--cpu-shares**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--help**] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*MEMORY-SWAP*]] +[**--restart**[=*""*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The **docker update** command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the **--kernel-memory** option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, You can only update **--kernel-memory** on a stopped container or on +a running container with kernel memory initialized. + +# OPTIONS + +**--blkio-weight**=0 + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--cpu-shares**=0 + CPU shares (relative weight) + +**--cpu-period**=0 + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpu-quota**=0 + Limit the CPU CFS (Completely Fair Scheduler) quota + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes(MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + +**--help** + Print usage statement + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Note that on kernel version older than 4.6, you can not update kernel memory on + a running container if the container is started without kernel memory initialized, + in this case, it can only be updated after it's stopped. The new setting takes + effect when the container is started. + +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + + Note that the memory should be smaller than the already set swap memory limit. + If you want update a memory limit bigger than the already set swap memory limit, + you should update swap memory limit at the same time. If you don't set swap memory + limit on docker create/run but only memory limit, the swap memory is double + the memory limit. + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + +**--memory-swap**="" + Total memory limit (memory + swap) + +**--restart**="" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +# EXAMPLES + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use **docker ps** to find these values. You can also +use the ID returned from the **docker run** command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the **--kernel-memory** +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with **--kernel-memory**. +If the container was started *without* **--kernel-memory** you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the **--kernel-memory** setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/vendor/github.com/moby/moby/man/docker-version.1.md b/vendor/github.com/moby/moby/man/docker-version.1.md new file mode 100644 index 0000000..1838f82 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-version.1.md @@ -0,0 +1,62 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2015 +# NAME +docker-version - Show the Docker version information. + +# SYNOPSIS +**docker version** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] + +# DESCRIPTION +This command displays version information for both the Docker client and +daemon. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template. + +# EXAMPLES + +## Display Docker version information + +The default output: + + $ docker version + Client: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + + Server: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + +Get server version: + + $ docker version --format '{{.Server.Version}}' + 1.8.0 + +Dump raw data: + +To view all available fields, you can use the format `{{json .}}`. + + $ docker version --format '{{json .}}' + {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} + + +# HISTORY +June 2014, updated by Sven Dowideit +June 2015, updated by John Howard +June 2015, updated by Patrick Hemmer diff --git a/vendor/github.com/moby/moby/man/docker-wait.1.md b/vendor/github.com/moby/moby/man/docker-wait.1.md new file mode 100644 index 0000000..6788009 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker-wait.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-wait - Block until one or more containers stop, then print their exit codes + +# SYNOPSIS +**docker wait** +[**--help**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Block until one or more containers stop, then print their exit codes. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + + $ docker run -d fedora sleep 99 + 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 + $ docker wait 079b83f558a2bc + 0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/moby/moby/man/docker.1.md b/vendor/github.com/moby/moby/man/docker.1.md new file mode 100644 index 0000000..2a96184 --- /dev/null +++ b/vendor/github.com/moby/moby/man/docker.1.md @@ -0,0 +1,237 @@ +% DOCKER(1) Docker User Manuals +% William Henry +% APRIL 2014 +# NAME +docker \- Docker image and container command line interface + +# SYNOPSIS +**docker** [OPTIONS] COMMAND [ARG...] + +**docker** daemon [--help|...] + +**docker** [--help|-v|--version] + +# DESCRIPTION +is a client for interacting with the daemon (see **dockerd(8)**) through the CLI. + +The Docker CLI has over 30 commands. The commands are listed below and each has +its own man page which explain usage and arguments. + +To see the man page for a command run **man docker **. + +# OPTIONS +**--help** + Print usage statement + +**--config**="" + Specifies the location of the Docker client configuration files. The default is '~/.docker'. + +**-D**, **--debug**=*true*|*false* + Enable debug mode. Default is false. + +**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. + If the tcp port is not specified, then it will default to either `2375` when + `--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified. + +**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" + Set the logging level. Default is `info`. + +**--tls**=*true*|*false* + Use TLS; implied by --tlsverify. Default is false. + +**--tlscacert**=*~/.docker/ca.pem* + Trust certs signed only by this CA. + +**--tlscert**=*~/.docker/cert.pem* + Path to TLS certificate file. + +**--tlskey**=*~/.docker/key.pem* + Path to TLS key file. + +**--tlsverify**=*true*|*false* + Use TLS and verify the remote (daemon: verify client, client: verify daemon). + Default is false. + +**-v**, **--version**=*true*|*false* + Print version information and quit. Default is false. + +# COMMANDS +**attach** + Attach to a running container + See **docker-attach(1)** for full documentation on the **attach** command. + +**build** + Build an image from a Dockerfile + See **docker-build(1)** for full documentation on the **build** command. + +**commit** + Create a new image from a container's changes + See **docker-commit(1)** for full documentation on the **commit** command. + +**cp** + Copy files/folders between a container and the local filesystem + See **docker-cp(1)** for full documentation on the **cp** command. + +**create** + Create a new container + See **docker-create(1)** for full documentation on the **create** command. + +**diff** + Inspect changes on a container's filesystem + See **docker-diff(1)** for full documentation on the **diff** command. + +**events** + Get real time events from the server + See **docker-events(1)** for full documentation on the **events** command. + +**exec** + Run a command in a running container + See **docker-exec(1)** for full documentation on the **exec** command. + +**export** + Stream the contents of a container as a tar archive + See **docker-export(1)** for full documentation on the **export** command. + +**history** + Show the history of an image + See **docker-history(1)** for full documentation on the **history** command. + +**images** + List images + See **docker-images(1)** for full documentation on the **images** command. + +**import** + Create a new filesystem image from the contents of a tarball + See **docker-import(1)** for full documentation on the **import** command. + +**info** + Display system-wide information + See **docker-info(1)** for full documentation on the **info** command. + +**inspect** + Return low-level information on a container or image + See **docker-inspect(1)** for full documentation on the **inspect** command. + +**kill** + Kill a running container (which includes the wrapper process and everything +inside it) + See **docker-kill(1)** for full documentation on the **kill** command. + +**load** + Load an image from a tar archive + See **docker-load(1)** for full documentation on the **load** command. + +**login** + Log in to a Docker Registry + See **docker-login(1)** for full documentation on the **login** command. + +**logout** + Log the user out of a Docker Registry + See **docker-logout(1)** for full documentation on the **logout** command. + +**logs** + Fetch the logs of a container + See **docker-logs(1)** for full documentation on the **logs** command. + +**pause** + Pause all processes within a container + See **docker-pause(1)** for full documentation on the **pause** command. + +**port** + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + See **docker-port(1)** for full documentation on the **port** command. + +**ps** + List containers + See **docker-ps(1)** for full documentation on the **ps** command. + +**pull** + Pull an image or a repository from a Docker Registry + See **docker-pull(1)** for full documentation on the **pull** command. + +**push** + Push an image or a repository to a Docker Registry + See **docker-push(1)** for full documentation on the **push** command. + +**rename** + Rename a container. + See **docker-rename(1)** for full documentation on the **rename** command. + +**restart** + Restart one or more containers + See **docker-restart(1)** for full documentation on the **restart** command. + +**rm** + Remove one or more containers + See **docker-rm(1)** for full documentation on the **rm** command. + +**rmi** + Remove one or more images + See **docker-rmi(1)** for full documentation on the **rmi** command. + +**run** + Run a command in a new container + See **docker-run(1)** for full documentation on the **run** command. + +**save** + Save an image to a tar archive + See **docker-save(1)** for full documentation on the **save** command. + +**search** + Search for an image in the Docker index + See **docker-search(1)** for full documentation on the **search** command. + +**start** + Start a container + See **docker-start(1)** for full documentation on the **start** command. + +**stats** + Display a live stream of one or more containers' resource usage statistics + See **docker-stats(1)** for full documentation on the **stats** command. + +**stop** + Stop a container + See **docker-stop(1)** for full documentation on the **stop** command. + +**tag** + Tag an image into a repository + See **docker-tag(1)** for full documentation on the **tag** command. + +**top** + Lookup the running processes of a container + See **docker-top(1)** for full documentation on the **top** command. + +**unpause** + Unpause all processes within a container + See **docker-unpause(1)** for full documentation on the **unpause** command. + +**version** + Show the Docker version information + See **docker-version(1)** for full documentation on the **version** command. + +**wait** + Block until a container stops, then print its exit code + See **docker-wait(1)** for full documentation on the **wait** command. + + +# RUNTIME EXECUTION OPTIONS + +Use the **--exec-opt** flags to specify options to the execution driver. +The following options are available: + +#### native.cgroupdriver +Specifies the management of the container's `cgroups`. You can specify `cgroupfs` +or `systemd`. If you specify `systemd` and it is not available, the system errors +out. + +#### Client +For specific client examples please see the man page for the specific Docker +command. For example: + + man docker-run + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/vendor/github.com/moby/moby/man/dockerd.8.md b/vendor/github.com/moby/moby/man/dockerd.8.md new file mode 100644 index 0000000..761dc6b --- /dev/null +++ b/vendor/github.com/moby/moby/man/dockerd.8.md @@ -0,0 +1,710 @@ +% DOCKER(8) Docker User Manuals +% Shishir Mahajan +% SEPTEMBER 2015 +# NAME +dockerd - Enable daemon mode + +# SYNOPSIS +**dockerd** +[**--add-runtime**[=*[]*]] +[**--api-cors-header**=[=*API-CORS-HEADER*]] +[**--authorization-plugin**[=*[]*]] +[**-b**|**--bridge**[=*BRIDGE*]] +[**--bip**[=*BIP*]] +[**--cgroup-parent**[=*[]*]] +[**--cluster-store**[=*[]*]] +[**--cluster-advertise**[=*[]*]] +[**--cluster-store-opt**[=*map[]*]] +[**--config-file**[=*/etc/docker/daemon.json*]] +[**--containerd**[=*SOCKET-PATH*]] +[**-D**|**--debug**] +[**--default-gateway**[=*DEFAULT-GATEWAY*]] +[**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]] +[**--default-runtime**[=*runc*]] +[**--default-ulimit**[=*[]*]] +[**--disable-legacy-registry**] +[**--dns**[=*[]*]] +[**--dns-opt**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--exec-opt**[=*[]*]] +[**--exec-root**[=*/var/run/docker*]] +[**--experimental**[=*false*]] +[**--fixed-cidr**[=*FIXED-CIDR*]] +[**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]] +[**-G**|**--group**[=*docker*]] +[**-g**|**--graph**[=*/var/lib/docker*]] +[**-H**|**--host**[=*[]*]] +[**--help**] +[**--icc**[=*true*]] +[**--init**[=*false*]] +[**--init-path**[=*""*]] +[**--insecure-registry**[=*[]*]] +[**--ip**[=*0.0.0.0*]] +[**--ip-forward**[=*true*]] +[**--ip-masq**[=*true*]] +[**--iptables**[=*true*]] +[**--ipv6**] +[**--isolation**[=*default*]] +[**-l**|**--log-level**[=*info*]] +[**--label**[=*[]*]] +[**--live-restore**[=*false*]] +[**--log-driver**[=*json-file*]] +[**--log-opt**[=*map[]*]] +[**--mtu**[=*0*]] +[**--max-concurrent-downloads**[=*3*]] +[**--max-concurrent-uploads**[=*5*]] +[**-p**|**--pidfile**[=*/var/run/docker.pid*]] +[**--raw-logs**] +[**--registry-mirror**[=*[]*]] +[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] +[**--seccomp-profile**[=*SECCOMP-PROFILE-PATH*]] +[**--selinux-enabled**] +[**--shutdown-timeout**[=*15*]] +[**--storage-opt**[=*[]*]] +[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]] +[**--tls**] +[**--tlscacert**[=*~/.docker/ca.pem*]] +[**--tlscert**[=*~/.docker/cert.pem*]] +[**--tlskey**[=*~/.docker/key.pem*]] +[**--tlsverify**] +[**--userland-proxy**[=*true*]] +[**--userland-proxy-path**[=*""*]] +[**--userns-remap**[=*default*]] + +# DESCRIPTION +**dockerd** is used for starting the Docker daemon (i.e., to command the daemon +to manage images, containers etc). So **dockerd** is a server, as a daemon. + +To run the Docker daemon you can specify **dockerd**. +You can check the daemon options using **dockerd --help**. +Daemon options should be specified after the **dockerd** keyword in the +following format. + +**dockerd [OPTIONS]** + +# OPTIONS + +**--add-runtime**=[] + Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + + The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + + This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + + **Note**: defining runtime arguments via the command line is not supported. + +**--api-cors-header**="" + Set CORS headers in the Engine API. Default is cors disabled. Give urls like + "http://foo, http://bar, ...". Give "*" to allow all. + +**--authorization-plugin**="" + Set authorization plugins to load + +**-b**, **--bridge**="" + Attach containers to a pre\-existing network bridge; use 'none' to disable + container networking + +**--bip**="" + Use the provided CIDR notation address for the dynamically created bridge + (docker0); Mutually exclusive of \-b + +**--cgroup-parent**="" + Set parent cgroup for all containers. Default is "/docker" for fs cgroup + driver and "system.slice" for systemd cgroup driver. + +**--cluster-store**="" + URL of the distributed storage backend + +**--cluster-advertise**="" + Specifies the 'host:port' or `interface:port` combination that this + particular daemon instance should use when advertising itself to the cluster. + The daemon is reached through this value. + +**--cluster-store-opt**="" + Specifies options for the Key/Value store. + +**--config-file**="/etc/docker/daemon.json" + Specifies the JSON file path to load the configuration from. + +**--containerd**="" + Path to containerd socket. + +**-D**, **--debug**=*true*|*false* + Enable debug mode. Default is false. + +**--default-gateway**="" + IPv4 address of the container default gateway; this address must be part of + the bridge subnet (which is defined by \-b or \--bip) + +**--default-gateway-v6**="" + IPv6 address of the container default gateway + +**--default-runtime**="runc" + Set default runtime if there're more than one specified by `--add-runtime`. + +**--default-ulimit**=[] + Default ulimits for containers. + +**--disable-legacy-registry**=*true*|*false* + Disable contacting legacy registries + +**--dns**="" + Force Docker to use specific DNS servers + +**--dns-opt**="" + DNS options to use. + +**--dns-search**=[] + DNS search domains to use. + +**--exec-opt**=[] + Set runtime execution options. See RUNTIME EXECUTION OPTIONS. + +**--exec-root**="" + Path to use as the root of the Docker execution state files. Default is + `/var/run/docker`. + +**--experimental**="" + Enable the daemon experimental features. + +**--fixed-cidr**="" + IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in + the bridge subnet (which is defined by \-b or \-\-bip). + +**--fixed-cidr-v6**="" + IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) + +**-G**, **--group**="" + Group to assign the unix socket specified by -H when running in daemon mode. + use '' (the empty string) to disable setting of a group. Default is `docker`. + +**-g**, **--graph**="" + Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. + +**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + +**--help** + Print usage statement + +**--icc**=*true*|*false* + Allow unrestricted inter\-container and Docker daemon host communication. If + disabled, containers can still be linked together using the **--link** option + (see **docker-run(1)**). Default is true. + +**--init** + Run an init process inside containers for signal forwarding and process + reaping. + +**--init-path** + Path to the docker-init binary. + +**--insecure-registry**=[] + Enable insecure registry communication, i.e., enable un-encrypted and/or + untrusted communication. + + List of insecure registries can contain an element with CIDR notation to + specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS + with certificates from unknown CAs. + + Enabling `--insecure-registry` is useful when running a local registry. + However, because its use creates security vulnerabilities it should ONLY be + enabled for testing purposes. For increased security, users should add their + CA to their system's list of trusted CAs instead of using + `--insecure-registry`. + +**--ip**="" + Default IP address to use when binding container ports. Default is `0.0.0.0`. + +**--ip-forward**=*true*|*false* + Enables IP forwarding on the Docker host. The default is `true`. This flag + interacts with the IP forwarding setting on your host system's kernel. If + your system has IP forwarding disabled, this setting enables it. If your + system has IP forwarding enabled, setting this flag to `--ip-forward=false` + has no effect. + + This setting will also enable IPv6 forwarding if you have both + `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject + Router Advertisements and interfere with the host's existing IPv6 + configuration. For more information, please consult the documentation about + "Advanced Networking - IPv6". + +**--ip-masq**=*true*|*false* + Enable IP masquerading for bridge's IP range. Default is true. + +**--iptables**=*true*|*false* + Enable Docker's addition of iptables rules. Default is true. + +**--ipv6**=*true*|*false* + Enable IPv6 support. Default is false. Docker will create an IPv6-enabled + bridge with address fe80::1 which will allow you to create IPv6-enabled + containers. Use together with `--fixed-cidr-v6` to provide globally routable + IPv6 addresses. IPv6 forwarding will be enabled if not used with + `--ip-forward=false`. This may collide with your host's current IPv6 + settings. For more information please consult the documentation about + "Advanced Networking - IPv6". + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. + Note that the default on Windows server is `process`, and the default on + Windows client is `hyperv`. Linux only supports `default`. + +**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" + Set the logging level. Default is `info`. + +**--label**="[]" + Set key=value labels to the daemon (displayed in `docker info`) + +**--live-restore**=*false* + Enable live restore of running containers when the daemon starts so that they + are not restarted. This option is applicable only for docker daemon running + on Linux host. + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Default driver for container logs. Default is `json-file`. + **Warning**: `docker logs` command works only for `json-file` logging driver. + +**--log-opt**=[] + Logging driver specific options. + +**--mtu**=*0* + Set the containers network mtu. Default is `0`. + +**--max-concurrent-downloads**=*3* + Set the max concurrent downloads for each pull. Default is `3`. + +**--max-concurrent-uploads**=*5* + Set the max concurrent uploads for each push. Default is `5`. + +**-p**, **--pidfile**="" + Path to use for daemon PID file. Default is `/var/run/docker.pid` + +**--raw-logs** + Output daemon logs in full timestamp format without ANSI coloring. If this + flag is not set, the daemon outputs condensed, colorized logs if a terminal + is detected, or full ("raw") output otherwise. + +**--registry-mirror**=*://* + Prepend a registry mirror to be used for image pulls. May be specified + multiple times. + +**-s**, **--storage-driver**="" + Force the Docker runtime to use a specific storage driver. + +**--seccomp-profile**="" + Path to seccomp profile. + +**--selinux-enabled**=*true*|*false* + Enable selinux support. Default is false. + +**--shutdown-timeout**=*15* + Set the shutdown timeout value in seconds. Default is `15`. + +**--storage-opt**=[] + Set storage driver options. See STORAGE DRIVER OPTIONS. + +**--swarm-default-advertise-addr**=*IP|INTERFACE* + Set default address or interface for swarm to advertise as its + externally-reachable address to other cluster members. This can be a + hostname, an IP address, or an interface such as `eth0`. A port cannot be + specified with this option. + +**--tls**=*true*|*false* + Use TLS; implied by --tlsverify. Default is false. + +**--tlscacert**=*~/.docker/ca.pem* + Trust certs signed only by this CA. + +**--tlscert**=*~/.docker/cert.pem* + Path to TLS certificate file. + +**--tlskey**=*~/.docker/key.pem* + Path to TLS key file. + +**--tlsverify**=*true*|*false* + Use TLS and verify the remote (daemon: verify client, client: verify daemon). + Default is false. + +**--userland-proxy**=*true*|*false* + Rely on a userland proxy implementation for inter-container and + outside-to-container loopback communications. Default is true. + +**--userland-proxy-path**="" + Path to the userland proxy binary. + +**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid* + Enable user namespaces for containers on the daemon. Specifying "default" + will cause a new user and group to be created to handle UID and GID range + remapping for the user namespace mappings used for contained processes. + Specifying a user (or uid) and optionally a group (or gid) will cause the + daemon to lookup the user and group's subordinate ID ranges for use as the + user namespace mappings for contained processes. + +# STORAGE DRIVER OPTIONS + +Docker uses storage backends (known as "graphdrivers" in the Docker +internals) to create writable containers from images. Many of these +backends use operating system level technologies and can be +configured. + +Specify options to the storage backend with **--storage-opt** flags. The +backends that currently take options are *devicemapper*, *zfs* and *btrfs*. +Options for *devicemapper* are prefixed with *dm*, options for *zfs* +start with *zfs* and options for *btrfs* start with *btrfs*. + +Specifically for devicemapper, the default is a "loopback" model which +requires no pre-configuration, but is extremely inefficient. Do not +use it in production. + +To make the best use of Docker with the devicemapper backend, you must +have a recent version of LVM. Use `lvm` to create a thin pool; for +more information see `man lvmthin`. Then, use `--storage-opt +dm.thinpooldev` to tell the Docker engine to use that pool for +allocating images and container snapshots. + +## Devicemapper options + +#### dm.thinpooldev + +Specifies a custom block storage device to use for the thin pool. + +If using a block device for device mapper storage, it is best to use `lvm` +to create and manage the thin-pool volume. This volume is then handed to Docker +to exclusively create snapshot volumes needed for images and containers. + +Managing the thin-pool outside of Engine makes for the most feature-rich +method of having Docker utilize device mapper thin provisioning as the +backing storage for Docker containers. The highlights of the lvm-based +thin-pool management feature include: automatic or interactive thin-pool +resize support, dynamically changing thin-pool features, automatic thinp +metadata checking when lvm activates the thin-pool, etc. + +As a fallback if no thin pool is provided, loopback files are +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Engine daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +Example use: + + $ dockerd \ + --storage-opt dm.thinpooldev=/dev/mapper/thin-pool + +#### dm.basesize + +Specifies the size to use when creating the base device, which limits +the size of images and containers. The default value is 10G. Note, +thin devices are inherently "sparse", so a 10G device which is mostly +empty doesn't use 10 GB of space on the pool. However, the filesystem +will use more space for base images the larger the device +is. + +The base device size can be increased at daemon restart which will allow +all future images and containers (based on those new images) to be of the +new base device size. + +Example use: `dockerd --storage-opt dm.basesize=50G` + +This will increase the base device size to 50G. The Docker daemon will throw an +error if existing base device size is larger than 50G. A user can use +this option to expand the base device size however shrinking is not permitted. + +This value affects the system-wide "base" empty filesystem that may already +be initialized and inherited by pulled images. Typically, a change to this +value requires additional steps to take effect: + + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + +Example use: `dockerd --storage-opt dm.basesize=20G` + +#### dm.fs + +Specifies the filesystem type to use for the base device. The +supported options are `ext4` and `xfs`. The default is `ext4`. + +Example use: `dockerd --storage-opt dm.fs=xfs` + +#### dm.mkfsarg + +Specifies extra mkfs arguments to be used when creating the base device. + +Example use: `dockerd --storage-opt "dm.mkfsarg=-O ^has_journal"` + +#### dm.mountopt + +Specifies extra mount options used when mounting the thin devices. + +Example use: `dockerd --storage-opt dm.mountopt=nodiscard` + +#### dm.use_deferred_removal + +Enables use of deferred device removal if `libdm` and the kernel driver +support the mechanism. + +Deferred device removal means that if device is busy when devices are +being removed/deactivated, then a deferred removal is scheduled on +device. And devices automatically go away when last user of the device +exits. + +For example, when a container exits, its associated thin device is removed. If +that device has leaked into some other mount namespace and can't be removed, +the container exit still succeeds and this option causes the system to schedule +the device for deferred removal. It does not wait in a loop trying to remove a +busy device. + +Example use: `dockerd --storage-opt dm.use_deferred_removal=true` + +#### dm.use_deferred_deletion + +Enables use of deferred device deletion for thin pool devices. By default, +thin pool device deletion is synchronous. Before a container is deleted, the +Docker daemon removes any associated devices. If the storage driver can not +remove a device, the container deletion fails and daemon returns. + +`Error deleting container: Error response from daemon: Cannot destroy container` + +To avoid this failure, enable both deferred device deletion and deferred +device removal on the daemon. + +`dockerd --storage-opt dm.use_deferred_deletion=true --storage-opt dm.use_deferred_removal=true` + +With these two options enabled, if a device is busy when the driver is +deleting a container, the driver marks the device as deleted. Later, when the +device isn't in use, the driver deletes it. + +In general it should be safe to enable this option by default. It will help +when unintentional leaking of mount point happens across multiple mount +namespaces. + +#### dm.loopdatasize + +**Note**: This option configures devicemapper loopback, which should not be +used in production. + +Specifies the size to use when creating the loopback file for the "data" device +which is used for the thin pool. The default size is 100G. The file is sparse, +so it will not initially take up this much space. + +Example use: `dockerd --storage-opt dm.loopdatasize=200G` + +#### dm.loopmetadatasize + +**Note**: This option configures devicemapper loopback, which should not be +used in production. + +Specifies the size to use when creating the loopback file for the "metadata" +device which is used for the thin pool. The default size is 2G. The file is +sparse, so it will not initially take up this much space. + +Example use: `dockerd --storage-opt dm.loopmetadatasize=4G` + +#### dm.datadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for data for a Docker-managed thin pool. +It is better to use `dm.thinpooldev` - see the documentation for it above for +discussion of the advantages. + +#### dm.metadatadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for metadata for a Docker-managed thin +pool. See `dm.datadev` for why this is deprecated. + +#### dm.blocksize + +Specifies a custom blocksize to use for the thin pool. The default +blocksize is 64K. + +Example use: `dockerd --storage-opt dm.blocksize=512K` + +#### dm.blkdiscard + +Enables or disables the use of `blkdiscard` when removing devicemapper devices. +This is disabled by default due to the additional latency, but as a special +case with loopback devices it will be enabled, in order to re-sparsify the +loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal times, +but it also prevents the space used in `/var/lib/docker` directory from being +returned to the system for other use when containers are removed. + +Example use: `dockerd --storage-opt dm.blkdiscard=false` + +#### dm.override_udev_sync_check + +By default, the devicemapper backend attempts to synchronize with the `udev` +device manager for the Linux kernel. This option allows disabling that +synchronization, to continue even though the configuration may be buggy. + +To view the `udev` sync support of a Docker daemon that is using the +`devicemapper` driver, run: + + $ docker info + [...] + Udev Sync Supported: true + [...] + +When `udev` sync support is `true`, then `devicemapper` and `udev` can +coordinate the activation and deactivation of devices for containers. + +When `udev` sync support is `false`, a race condition occurs between the +`devicemapper` and `udev` during create and cleanup. The race condition results +in errors and failures. (For information on these failures, see +[docker#4036](https://github.com/docker/docker/issues/4036)) + +To allow the `docker` daemon to start, regardless of whether `udev` sync is +`false`, set `dm.override_udev_sync_check` to true: + + $ dockerd --storage-opt dm.override_udev_sync_check=true + +When this value is `true`, the driver continues and simply warns you the errors +are happening. + +**Note**: The ideal is to pursue a `docker` daemon and environment that does +support synchronizing with `udev`. For further discussion on this topic, see +[docker#4036](https://github.com/docker/docker/issues/4036). +Otherwise, set this flag for migrating existing Docker daemons to a daemon with +a supported environment. + +#### dm.min_free_space + +Specifies the min free space percent in a thin pool require for new device +creation to succeed. This check applies to both free data space as well +as free metadata space. Valid values are from 0% - 99%. Value 0% disables +free space checking logic. If user does not specify a value for this option, +the Engine uses a default value of 10%. + +Whenever a new a thin pool device is created (during `docker pull` or during +container creation), the Engine checks if the minimum free space is available. +If the space is unavailable, then device creation fails and any relevant +`docker` operation fails. + +To recover from this error, you must create more free space in the thin pool to +recover from the error. You can create free space by deleting some images and +containers from tge thin pool. You can also add more storage to the thin pool. + +To add more space to an LVM (logical volume management) thin pool, just add +more storage to the group container thin pool; this should automatically +resolve any errors. If your configuration uses loop devices, then stop the +Engine daemon, grow the size of loop files and restart the daemon to resolve +the issue. + +Example use:: `dockerd --storage-opt dm.min_free_space=10%` + +#### dm.xfs_nospace_max_retries + +Specifies the maximum number of retries XFS should attempt to complete IO when +ENOSPC (no space) error is returned by underlying storage device. + +By default XFS retries infinitely for IO to finish and this can result in +unkillable process. To change this behavior one can set xfs_nospace_max_retries +to say 0 and XFS will not retry IO after getting ENOSPC and will shutdown +filesystem. + +Example use: + + $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 + + +## ZFS options + +#### zfs.fsname + +Set zfs filesystem under which docker will create its own datasets. By default +docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`) +is located. + +Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker` + +## Btrfs options + +#### btrfs.min_space + +Specifies the mininum size to use when creating the subvolume which is used for +containers. If user uses disk quota for btrfs when creating or running a +container with **--storage-opt size** option, docker should ensure the **size** +cannot be smaller than **btrfs.min_space**. + +Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G` + +# CLUSTER STORE OPTIONS + +The daemon uses libkv to advertise the node within the cluster. Some Key/Value +backends support mutual TLS, and the client TLS settings used by the daemon can +be configured using the **--cluster-store-opt** flag, specifying the paths to +PEM encoded files. + +#### kv.cacertfile + +Specifies the path to a local file with PEM encoded CA certificates to trust + +#### kv.certfile + +Specifies the path to a local file with a PEM encoded certificate. This +certificate is used as the client cert for communication with the Key/Value +store. + +#### kv.keyfile + +Specifies the path to a local file with a PEM encoded private key. This +private key is used as the client key for communication with the Key/Value +store. + +# Access authorization + +Docker's access authorization can be extended by authorization plugins that +your organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its +specification file. The plugin's implementation determines whether you can +specify a name or path. Consult with your Docker administrator to get +information about the plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the command +line or Docker's Engine API are allowed or denied by the plugin. If you have +multiple plugins installed, at least one must allow the request for it to +complete. + +For information about how to create an authorization plugin, see [authorization +plugin](https://docs.docker.com/engine/extend/authorization/) section in the +Docker extend section of this documentation. + + +# HISTORY +Sept 2015, Originally compiled by Shishir Mahajan +based on docker.com source material and internal work. diff --git a/vendor/github.com/moby/moby/man/generate.go b/vendor/github.com/moby/moby/man/generate.go new file mode 100644 index 0000000..f21614d --- /dev/null +++ b/vendor/github.com/moby/moby/man/generate.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/commands" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func generateManPages(path string) error { + header := &doc.GenManHeader{ + Title: "DOCKER", + Section: "1", + Source: "Docker Community", + } + + stdin, stdout, stderr := term.StdStreams() + dockerCli := command.NewDockerCli(stdin, stdout, stderr) + cmd := &cobra.Command{Use: "docker"} + commands.AddCommands(cmd, dockerCli) + + cmd.DisableAutoGenTag = true + return doc.GenManTreeFromOpts(cmd, doc.GenManTreeOptions{ + Header: header, + Path: path, + CommandSeparator: "-", + }) +} + +func main() { + path := "/tmp" + if len(os.Args) > 1 { + path = os.Args[1] + } + fmt.Printf("Generating man pages into %s\n", path) + if err := generateManPages(path); err != nil { + fmt.Fprintf(os.Stderr, "Failed to generate man pages: %s\n", err.Error()) + } +} diff --git a/vendor/github.com/moby/moby/man/generate.sh b/vendor/github.com/moby/moby/man/generate.sh new file mode 100755 index 0000000..e4126ba --- /dev/null +++ b/vendor/github.com/moby/moby/man/generate.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# +# Generate man pages for docker/docker +# + +set -eu + +mkdir -p ./man/man1 + +# Generate man pages from cobra commands +go build -o /tmp/gen-manpages ./man +/tmp/gen-manpages ./man/man1 + +# Generate legacy pages from markdown +./man/md2man-all.sh -q diff --git a/vendor/github.com/moby/moby/man/glide.lock b/vendor/github.com/moby/moby/man/glide.lock new file mode 100644 index 0000000..5ec765a --- /dev/null +++ b/vendor/github.com/moby/moby/man/glide.lock @@ -0,0 +1,52 @@ +hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb +updated: 2016-06-20T21:53:35.420817456Z +imports: +- name: github.com/BurntSushi/toml + version: f0aeabca5a127c4078abb8c8d64298b147264b55 +- name: github.com/cpuguy83/go-md2man + version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa + subpackages: + - md2man +- name: github.com/fsnotify/fsnotify + version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 +- name: github.com/hashicorp/hcl + version: da486364306ed66c218be9b7953e19173447c18b + subpackages: + - hcl/ast + - hcl/parser + - hcl/token + - json/parser + - hcl/scanner + - hcl/strconv + - json/scanner + - json/token +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/magiconair/properties + version: c265cfa48dda6474e208715ca93e987829f572f8 +- name: github.com/mitchellh/mapstructure + version: d2dd0262208475919e1a362f675cfc0e7c10e905 +- name: github.com/russross/blackfriday + version: 1d6b8e9301e720b08a8938b8c25c018285885438 +- name: github.com/shurcooL/sanitized_anchor_name + version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 +- name: github.com/spf13/cast + version: 27b586b42e29bec072fe7379259cc719e1289da6 +- name: github.com/spf13/jwalterweatherman + version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 +- name: github.com/spf13/pflag + version: dabebe21bf790f782ea4c7bbd2efc430de182afd +- name: github.com/spf13/viper + version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd +- name: golang.org/x/sys + version: 62bee037599929a6e9146f29d10dd5208c43507d + subpackages: + - unix +- name: gopkg.in/yaml.v2 + version: a83829b6f1293c91addabc89d0571c246397bbf4 +- name: github.com/spf13/cobra + repo: https://github.com/dnephin/cobra + subpackages: + - doc + version: v1.3 +devImports: [] diff --git a/vendor/github.com/moby/moby/man/glide.yaml b/vendor/github.com/moby/moby/man/glide.yaml new file mode 100644 index 0000000..e99b267 --- /dev/null +++ b/vendor/github.com/moby/moby/man/glide.yaml @@ -0,0 +1,12 @@ +package: github.com/docker/docker/man +import: +- package: github.com/cpuguy83/go-md2man + subpackages: + - md2man +- package: github.com/inconshreveable/mousetrap +- package: github.com/spf13/pflag +- package: github.com/spf13/viper +- package: github.com/spf13/cobra + repo: https://github.com/dnephin/cobra + subpackages: + - doc diff --git a/vendor/github.com/moby/moby/man/md2man-all.sh b/vendor/github.com/moby/moby/man/md2man-all.sh new file mode 100755 index 0000000..97c65c9 --- /dev/null +++ b/vendor/github.com/moby/moby/man/md2man-all.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# get into this script's directory +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +[ "$1" = '-q' ] || { + set -x + pwd +} + +for FILE in *.md; do + base="$(basename "$FILE")" + name="${base%.md}" + num="${name##*.}" + if [ -z "$num" -o "$name" = "$num" ]; then + # skip files that aren't of the format xxxx.N.md (like README.md) + continue + fi + mkdir -p "./man${num}" + go-md2man -in "$FILE" -out "./man${num}/${name}" +done diff --git a/vendor/github.com/moby/moby/migrate/v1/migratev1.go b/vendor/github.com/moby/moby/migrate/v1/migratev1.go new file mode 100644 index 0000000..bc42dd2 --- /dev/null +++ b/vendor/github.com/moby/moby/migrate/v1/migratev1.go @@ -0,0 +1,504 @@ +package v1 + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + "time" + + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + imagev1 "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" +) + +type graphIDRegistrar interface { + RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type graphIDMounter interface { + CreateRWLayerByGraphID(string, string, layer.ChainID) error +} + +type checksumCalculator interface { + ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) +} + +const ( + graphDirName = "graph" + tarDataFileName = "tar-data.json.gz" + migrationFileName = ".migration-v1-images.json" + migrationTagsFileName = ".migration-v1-tags" + migrationDiffIDFileName = ".migration-diffid" + migrationSizeFileName = ".migration-size" + migrationTarDataFileName = ".migration-tardata" + containersDirName = "containers" + configFileNameLegacy = "config.json" + configFileName = "config.v2.json" + repositoriesFilePrefixLegacy = "repositories-" +) + +var ( + errUnsupported = errors.New("migration is not supported") +) + +// Migrate takes an old graph directory and transforms the metadata into the +// new format. +func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error { + graphDir := filepath.Join(root, graphDirName) + if _, err := os.Lstat(graphDir); os.IsNotExist(err) { + return nil + } + + mappings, err := restoreMappings(root) + if err != nil { + return err + } + + if cc, ok := ls.(checksumCalculator); ok { + CalculateLayerChecksums(root, cc, mappings) + } + + if registrar, ok := ls.(graphIDRegistrar); !ok { + return errUnsupported + } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { + return err + } + + err = saveMappings(root, mappings) + if err != nil { + return err + } + + if mounter, ok := ls.(graphIDMounter); !ok { + return errUnsupported + } else if err := migrateContainers(root, mounter, is, mappings); err != nil { + return err + } + + if err := migrateRefs(root, driverName, rs, mappings); err != nil { + return err + } + + return nil +} + +// CalculateLayerChecksums walks an old graph directory and calculates checksums +// for each layer. These checksums are later used for migration. +func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { + graphDir := filepath.Join(root, graphDirName) + // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io + workers := runtime.NumCPU() * 3 + workQueue := make(chan string, workers) + + wg := sync.WaitGroup{} + + for i := 0; i < workers; i++ { + wg.Add(1) + go func() { + for id := range workQueue { + start := time.Now() + if err := calculateLayerChecksum(graphDir, id, ls); err != nil { + logrus.Errorf("could not calculate checksum for %q, %q", id, err) + } + elapsed := time.Since(start) + logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) + } + wg.Done() + }() + } + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + logrus.Errorf("could not read directory %q", graphDir) + return + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, ok := mappings[v1ID]; ok { // support old migrations without helper files + continue + } + workQueue <- v1ID + } + close(workQueue) + wg.Wait() +} + +func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { + diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) + if _, err := os.Lstat(diffIDFile); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + + parent, err := getParent(filepath.Join(graphDir, id)) + if err != nil { + return err + } + + diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { + return err + } + + if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { + return err + } + + logrus.Infof("calculated checksum for layer %s: %s", id, diffID) + return nil +} + +func restoreMappings(root string) (map[string]image.ID, error) { + mappings := make(map[string]image.ID) + + mfile := filepath.Join(root, migrationFileName) + f, err := os.Open(mfile) + if err != nil && !os.IsNotExist(err) { + return nil, err + } else if err == nil { + err := json.NewDecoder(f).Decode(&mappings) + if err != nil { + f.Close() + return nil, err + } + f.Close() + } + + return mappings, nil +} + +func saveMappings(root string, mappings map[string]image.ID) error { + mfile := filepath.Join(root, migrationFileName) + f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(mappings); err != nil { + return err + } + return nil +} + +func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { + graphDir := filepath.Join(root, graphDirName) + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + return err + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, exists := mappings[v1ID]; exists { + continue + } + if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { + continue + } + } + + return nil +} + +func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { + containersDir := filepath.Join(root, containersDirName) + dir, err := ioutil.ReadDir(containersDir) + if err != nil { + return err + } + for _, v := range dir { + id := v.Name() + + if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { + continue + } + + containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) + if err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(containerJSON, &c); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageStrJSON, ok := c["Image"] + if !ok { + return fmt.Errorf("invalid container configuration for %v", id) + } + + var image string + if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageID, ok := imageMappings[image] + if !ok { + logrus.Errorf("image not migrated %v", imageID) // non-fatal error + continue + } + + c["Image"] = rawJSON(imageID) + + containerJSON, err = json.Marshal(c) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { + return err + } + + img, err := is.Get(imageID) + if err != nil { + return err + } + + if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + logrus.Infof("migrated container %s to point to %s", id, imageID) + + } + return nil +} + +type refAdder interface { + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error +} + +func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { + migrationFile := filepath.Join(root, migrationTagsFileName) + if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { + return err + } + + type repositories struct { + Repositories map[string]map[string]string + } + + var repos repositories + + f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&repos); err != nil { + return err + } + + for name, repo := range repos.Repositories { + for tag, id := range repo { + if strongID, exists := mappings[id]; exists { + ref, err := reference.WithName(name) + if err != nil { + logrus.Errorf("migrate tags: invalid name %q, %q", name, err) + continue + } + if dgst, err := digest.ParseDigest(tag); err == nil { + canonical, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) + continue + } + if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err) + } + } else { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) + continue + } + if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) + } + } + logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) + } + } + } + + mf, err := os.Create(migrationFile) + if err != nil { + return err + } + mf.Close() + + return nil +} + +func getParent(confDir string) (string, error) { + jsonFile := filepath.Join(confDir, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return "", err + } + var parent struct { + Parent string + ParentID digest.Digest `json:"parent_id"` + } + if err := json.Unmarshal(imageJSON, &parent); err != nil { + return "", err + } + if parent.Parent == "" && parent.ParentID != "" { // v1.9 + parent.Parent = parent.ParentID.Hex() + } + // compatibilityID for parent + parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) + if err == nil && len(parentCompatibilityID) > 0 { + parent.Parent = string(parentCompatibilityID) + } + return parent.Parent, nil +} + +func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { + defer func() { + if err != nil { + logrus.Errorf("migration failed for %v, err: %v", id, err) + } + }() + + parent, err := getParent(filepath.Join(root, graphDirName, id)) + if err != nil { + return err + } + + var parentID image.ID + if parent != "" { + var exists bool + if parentID, exists = mappings[parent]; !exists { + if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { + // todo: fail or allow broken chains? + return err + } + parentID = mappings[parent] + } + } + + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) + if err != nil { + return err + } + diffID, err := digest.ParseDigest(string(diffIDData)) + if err != nil { + return err + } + + sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) + if err != nil { + return err + } + size, err := strconv.ParseInt(string(sizeStr), 10, 64) + if err != nil { + return err + } + + layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) + if err != nil { + return err + } + logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) + + jsonFile := filepath.Join(root, graphDirName, id, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return err + } + + h, err := imagev1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + rootFS.Append(layer.DiffID()) + + config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + strongID, err := is.Create(config) + if err != nil { + return err + } + logrus.Infof("migrated image %s to %s", id, strongID) + + if parentID != "" { + if err := is.SetParent(strongID, parentID); err != nil { + return err + } + } + + checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) + if err == nil { // best effort + dgst, err := digest.ParseDigest(string(checksum)) + if err == nil { + V2MetadataService := metadata.NewV2MetadataService(ms) + V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) + } + } + _, err = ls.Release(layer) + if err != nil { + return err + } + + mappings[id] = strongID + return +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/moby/moby/migrate/v1/migratev1_test.go b/vendor/github.com/moby/moby/migrate/v1/migratev1_test.go new file mode 100644 index 0000000..be82fdc --- /dev/null +++ b/vendor/github.com/moby/moby/migrate/v1/migratev1_test.go @@ -0,0 +1,438 @@ +package v1 + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +func TestMigrateRefs(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-tags") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) + + ta := &mockTagAdder{} + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + }) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", + } + + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } + + // second migration is no-op + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + }) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } +} + +func TestMigrateContainers(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-containers") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + // container with invalid image + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + ls := &mockMounter{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ + "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, + }) + if err != nil { + t.Fatal(err) + } + + expected := []mountInfo{{ + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", + }} + if !reflect.DeepEqual(expected, ls.mounts) { + t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) + } + + if actual, expected := ls.count, 0; actual != expected { + t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) + } + + config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) + if err != nil { + t.Fatal(err) + } + var config struct{ Image string } + err = json.Unmarshal(config2, &config) + if err != nil { + t.Fatal(err) + } + + if actual, expected := config.Image, string(imgID); actual != expected { + t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) + } + +} + +func TestMigrateImages(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-images") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // busybox from 1.9 + id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") + if err != nil { + t.Fatal(err) + } + + id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") + if err != nil { + t.Fatal(err) + } + + ls := &mockRegistrar{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) + if err != nil { + t.Fatal(err) + } + mappings := make(map[string]image.ID) + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected := map[string]image.ID{ + id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), + id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), + } + + if !reflect.DeepEqual(mappings, expected) { + t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) + } + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + ls.count = 0 + + // next images are busybox from 1.8.2 + _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + if err != nil { + t.Fatal(err) + } + + _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") + if err != nil { + t.Fatal(err) + } + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") + expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + + v2MetadataService := metadata.NewV2MetadataService(ms) + receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) + if err != nil { + t.Fatal(err) + } + + expectedMetadata := []metadata.V2Metadata{ + {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { + t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) + } + +} + +func TestMigrateUnsupported(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) + if err != nil { + t.Fatal(err) + } + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != errUnsupported { + t.Fatalf("expected unsupported error, got %q", err) + } +} + +func TestMigrateEmptyDir(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func addImage(dest, jsonConfig, parent, checksum string) (string, error) { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return "", err + } + if config.ID == "" { + b := make([]byte, 32) + rand.Read(b) + config.ID = hex.EncodeToString(b) + } + contDir := filepath.Join(dest, "graph", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { + return "", err + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { + return "", err + } + if parent != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { + return "", err + } + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + return config.ID, nil +} + +func addContainer(dest, jsonConfig string) error { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return err + } + contDir := filepath.Join(dest, "containers", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { + return err + } + return nil +} + +type mockTagAdder struct { + refs map[string]string +} + +func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if t.refs == nil { + t.refs = make(map[string]string) + } + t.refs[ref.String()] = id.String() + return nil +} +func (t *mockTagAdder) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return t.AddTag(ref, id, force) +} + +type mockRegistrar struct { + layers map[layer.ChainID]*mockLayer + count int +} + +func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { + r.count++ + l := &mockLayer{} + if parent != "" { + p, exists := r.layers[parent] + if !exists { + return nil, fmt.Errorf("invalid parent %q", parent) + } + l.parent = p + l.diffIDs = append(l.diffIDs, p.diffIDs...) + } + l.diffIDs = append(l.diffIDs, diffID) + if r.layers == nil { + r.layers = make(map[layer.ChainID]*mockLayer) + } + r.layers[l.ChainID()] = l + return l, nil +} +func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} +func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +type mountInfo struct { + name, graphID, parent string +} +type mockMounter struct { + mounts []mountInfo + count int +} + +func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { + r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) + return nil +} +func (r *mockMounter) Unmount(string) error { + r.count-- + return nil +} +func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} + +type mockLayer struct { + diffIDs []layer.DiffID + parent *mockLayer +} + +func (l *mockLayer) TarStream() (io.ReadCloser, error) { + return nil, nil +} +func (l *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, nil +} + +func (l *mockLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *mockLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *mockLayer) Parent() layer.Layer { + if l.parent == nil { + return nil + } + return l.parent +} + +func (l *mockLayer) Size() (int64, error) { + return 0, nil +} + +func (l *mockLayer) DiffSize() (int64, error) { + return 0, nil +} + +func (l *mockLayer) Metadata() (map[string]string, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/oci/defaults_linux.go b/vendor/github.com/moby/moby/oci/defaults_linux.go new file mode 100644 index 0000000..8b3ce72 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/defaults_linux.go @@ -0,0 +1,168 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func sPtr(s string) *string { return &s } +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + s.Process.Capabilities = []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.Namespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind-mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.Device{}, + Resources: &specs.Resources{ + Devices: []specs.DeviceCgroup{ + { + Allow: false, + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(5), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(3), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(9), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(8), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(0), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(1), + Access: sPtr("rwm"), + }, + { + Allow: false, + Type: sPtr("c"), + Major: iPtr(10), + Minor: iPtr(229), + Access: sPtr("rwm"), + }, + }, + }, + } + + return s +} diff --git a/vendor/github.com/moby/moby/oci/defaults_solaris.go b/vendor/github.com/moby/moby/oci/defaults_solaris.go new file mode 100644 index 0000000..85c8b68 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/defaults_solaris.go @@ -0,0 +1,20 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: "0.6.0", + Platform: specs.Platform{ + OS: "SunOS", + Arch: runtime.GOARCH, + }, + } + s.Solaris = &specs.Solaris{} + return s +} diff --git a/vendor/github.com/moby/moby/oci/defaults_windows.go b/vendor/github.com/moby/moby/oci/defaults_windows.go new file mode 100644 index 0000000..ab51904 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/defaults_windows.go @@ -0,0 +1,19 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default spec used by docker. +func DefaultSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + Windows: &specs.Windows{}, + } +} diff --git a/vendor/github.com/moby/moby/oci/devices_linux.go b/vendor/github.com/moby/moby/oci/devices_linux.go new file mode 100644 index 0000000..2840d25 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +func Device(d *configs.Device) specs.Device { + return specs.Device{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.DeviceCgroup { + t := string(d.Type) + return specs.DeviceCgroup{ + Allow: true, + Type: &t, + Major: &d.Major, + Minor: &d.Minor, + Access: &d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/vendor/github.com/moby/moby/oci/devices_unsupported.go b/vendor/github.com/moby/moby/oci/devices_unsupported.go new file mode 100644 index 0000000..6252cab --- /dev/null +++ b/vendor/github.com/moby/moby/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.Device { return specs.Device{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/vendor/github.com/moby/moby/oci/namespaces.go b/vendor/github.com/moby/moby/oci/namespaces.go new file mode 100644 index 0000000..4902482 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/namespaces.go @@ -0,0 +1,16 @@ +package oci + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.NamespaceType) { + idx := -1 + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + idx = i + } + } + if idx >= 0 { + s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) + } +} diff --git a/vendor/github.com/moby/moby/opts/hosts.go b/vendor/github.com/moby/moby/opts/hosts.go new file mode 100644 index 0000000..266df1e --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts.go @@ -0,0 +1,151 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDockerDaemonHost + if host != "" { + _, err := parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDockerDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} diff --git a/vendor/github.com/moby/moby/opts/hosts_test.go b/vendor/github.com/moby/moby/opts/hosts_test.go new file mode 100644 index 0000000..a5bec30 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts_test.go @@ -0,0 +1,148 @@ +package opts + +import ( + "fmt" + "testing" +) + +func TestParseHost(t *testing.T) { + invalid := []string{ + "something with spaces", + "://", + "unknown://", + "tcp://:port", + "tcp://invalid:port", + } + + valid := map[string]string{ + "": DefaultHost, + " ": DefaultHost, + " ": DefaultHost, + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), + "tcp://": DefaultTCPHost, + "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), + "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix://" + DefaultUnixSocket, + "unix://path/to/socket": "unix://path/to/socket", + "npipe://": "npipe://" + DefaultNamedPipe, + "npipe:////./pipe/foo": "npipe:////./pipe/foo", + } + + for _, value := range invalid { + if _, err := ParseHost(false, value); err == nil { + t.Errorf("Expected an error for %v, got [nil]", value) + } + } + + for value, expected := range valid { + if actual, err := ParseHost(false, value); err != nil || actual != expected { + t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func TestParseDockerDaemonHost(t *testing.T) { + invalids := map[string]string{ + + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", + " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", + "": "Invalid bind address format: ", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2375", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + "[::1]:": "tcp://[::1]:2375", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), + ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), + "tcp://": DefaultTCPHost, + "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), + "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix://" + DefaultUnixSocket, + "fd://": "fd://", + "fd://something": "fd://something", + "localhost:": "tcp://localhost:2375", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "[::1]:": "tcp://[::1]:2376", + "[::1]:5555": "tcp://[::1]:5555", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", + "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + "localhost:": "tcp://localhost:2376", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} diff --git a/vendor/github.com/moby/moby/opts/hosts_unix.go b/vendor/github.com/moby/moby/opts/hosts_unix.go new file mode 100644 index 0000000..611407a --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/moby/moby/opts/hosts_windows.go b/vendor/github.com/moby/moby/opts/hosts_windows.go new file mode 100644 index 0000000..7c239e0 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/moby/moby/opts/ip.go b/vendor/github.com/moby/moby/opts/ip.go new file mode 100644 index 0000000..fb03b50 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/ip.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parseable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/moby/moby/opts/ip_test.go b/vendor/github.com/moby/moby/opts/ip_test.go new file mode 100644 index 0000000..1027d84 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIPOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IPOpt{IP: &ip} + + invalidIP := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIP) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/moby/moby/opts/mount.go b/vendor/github.com/moby/moby/opts/mount.go new file mode 100644 index 0000000..ce6383d --- /dev/null +++ b/vendor/github.com/moby/moby/opts/mount.go @@ -0,0 +1,171 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "strconv" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// MountOpt is a Value type for parsing mounts +type MountOpt struct { + values []mounttypes.Mount +} + +// Set a new mount value +func (m *MountOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + mount := mounttypes.Mount{} + + volumeOptions := func() *mounttypes.VolumeOptions { + if mount.VolumeOptions == nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + Labels: make(map[string]string), + } + } + if mount.VolumeOptions.DriverConfig == nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} + } + return mount.VolumeOptions + } + + bindOptions := func() *mounttypes.BindOptions { + if mount.BindOptions == nil { + mount.BindOptions = new(mounttypes.BindOptions) + } + return mount.BindOptions + } + + tmpfsOptions := func() *mounttypes.TmpfsOptions { + if mount.TmpfsOptions == nil { + mount.TmpfsOptions = new(mounttypes.TmpfsOptions) + } + return mount.TmpfsOptions + } + + setValueOnMap := func(target map[string]string, value string) { + parts := strings.SplitN(value, "=", 2) + if len(parts) == 1 { + target[value] = "" + } else { + target[parts[0]] = parts[1] + } + } + + mount.Type = mounttypes.TypeVolume // default to volume mounts + // Set writable as the default + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) == 1 { + switch key { + case "readonly", "ro": + mount.ReadOnly = true + continue + case "volume-nocopy": + volumeOptions().NoCopy = true + continue + } + } + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "type": + mount.Type = mounttypes.Type(strings.ToLower(value)) + case "source", "src": + mount.Source = value + case "target", "dst", "destination": + mount.Target = value + case "readonly", "ro": + mount.ReadOnly, err = strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + case "bind-propagation": + bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) + case "volume-nocopy": + volumeOptions().NoCopy, err = strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for populate: %s", value) + } + case "volume-label": + setValueOnMap(volumeOptions().Labels, value) + case "volume-driver": + volumeOptions().DriverConfig.Name = value + case "volume-opt": + if volumeOptions().DriverConfig.Options == nil { + volumeOptions().DriverConfig.Options = make(map[string]string) + } + setValueOnMap(volumeOptions().DriverConfig.Options, value) + case "tmpfs-size": + sizeBytes, err := units.RAMInBytes(value) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + tmpfsOptions().SizeBytes = sizeBytes + case "tmpfs-mode": + ui64, err := strconv.ParseUint(value, 8, 32) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + tmpfsOptions().Mode = os.FileMode(ui64) + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + if mount.Type == "" { + return fmt.Errorf("type is required") + } + + if mount.Target == "" { + return fmt.Errorf("target is required") + } + + if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { + return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) + } + if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { + return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) + } + if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { + return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) + } + + m.values = append(m.values, mount) + return nil +} + +// Type returns the type of this option +func (m *MountOpt) Type() string { + return "mount" +} + +// String returns a string repr of this option +func (m *MountOpt) String() string { + mounts := []string{} + for _, mount := range m.values { + repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) + mounts = append(mounts, repr) + } + return strings.Join(mounts, ", ") +} + +// Value returns the mounts +func (m *MountOpt) Value() []mounttypes.Mount { + return m.values +} diff --git a/vendor/github.com/moby/moby/opts/mount_test.go b/vendor/github.com/moby/moby/opts/mount_test.go new file mode 100644 index 0000000..59606c3 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/mount_test.go @@ -0,0 +1,184 @@ +package opts + +import ( + "os" + "testing" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestMountOptString(t *testing.T) { + mount := MountOpt{ + values: []mounttypes.Mount{ + { + Type: mounttypes.TypeBind, + Source: "/home/path", + Target: "/target", + }, + { + Type: mounttypes.TypeVolume, + Source: "foo", + Target: "/target/foo", + }, + }, + } + expected := "bind /home/path /target, volume foo /target/foo" + assert.Equal(t, mount.String(), expected) +} + +func TestMountOptSetBindNoErrorBind(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=bind,target=/target,source=/source", + "type=bind,src=/source,dst=/target", + "type=bind,source=/source,dst=/target", + "type=bind,src=/source,target=/target", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.Equal(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/source", + Target: "/target", + }) + } +} + +func TestMountOptSetVolumeNoError(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=volume,target=/target,source=/source", + "type=volume,src=/source,dst=/target", + "type=volume,source=/source,dst=/target", + "type=volume,src=/source,target=/target", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.Equal(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "/source", + Target: "/target", + }) + } +} + +// TestMountOptDefaultType ensures that a mount without the type defaults to a +// volume mount. +func TestMountOptDefaultType(t *testing.T) { + var mount MountOpt + assert.NilError(t, mount.Set("target=/target,source=/foo")) + assert.Equal(t, mount.values[0].Type, mounttypes.TypeVolume) +} + +func TestMountOptSetErrorNoTarget(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,source=/foo"), "target is required") +} + +func TestMountOptSetErrorInvalidKey(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,bogus=foo"), "unexpected key 'bogus'") +} + +func TestMountOptSetErrorInvalidField(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,bogus"), "invalid field 'bogus'") +} + +func TestMountOptSetErrorInvalidReadOnly(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,readonly=no"), "invalid value for readonly: no") + assert.Error(t, mount.Set("type=volume,readonly=invalid"), "invalid value for readonly: invalid") +} + +func TestMountOptDefaultEnableReadOnly(t *testing.T) { + var m MountOpt + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo")) + assert.Equal(t, m.values[0].ReadOnly, false) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=true")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0")) + assert.Equal(t, m.values[0].ReadOnly, false) +} + +func TestMountOptVolumeNoCopy(t *testing.T) { + var m MountOpt + assert.NilError(t, m.Set("type=volume,target=/foo,volume-nocopy")) + assert.Equal(t, m.values[0].Source, "") + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo")) + assert.Equal(t, m.values[0].VolumeOptions == nil, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) +} + +func TestMountOptTypeConflict(t *testing.T) { + var m MountOpt + assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix") + assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix") +} + +func TestMountOptSetTmpfsNoError(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=tmpfs,target=/target,tmpfs-size=1m,tmpfs-mode=0700", + "type=tmpfs,target=/target,tmpfs-size=1MB,tmpfs-mode=700", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.DeepEqual(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeTmpfs, + Target: "/target", + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 1024 * 1024, // not 1000 * 1000 + Mode: os.FileMode(0700), + }, + }) + } +} + +func TestMountOptSetTmpfsError(t *testing.T) { + var m MountOpt + assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-size=foo"), "invalid value for tmpfs-size") + assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-mode=foo"), "invalid value for tmpfs-mode") + assert.Error(t, m.Set("type=tmpfs"), "target is required") +} diff --git a/vendor/github.com/moby/moby/opts/opts.go b/vendor/github.com/moby/moby/opts/opts.go new file mode 100644 index 0000000..ae85153 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts.go @@ -0,0 +1,360 @@ +package opts + +import ( + "fmt" + "math/big" + "net" + "regexp" + "strings" + + "github.com/docker/docker/api/types/filters" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ValidateSysctl validates a sysctl and returns it. +func ValidateSysctl(val string) (string, error) { + validSysctlMap := map[string]bool{ + "kernel.msgmax": true, + "kernel.msgmnb": true, + "kernel.msgmni": true, + "kernel.sem": true, + "kernel.shmall": true, + "kernel.shmmax": true, + "kernel.shmmni": true, + "kernel.shm_rmid_forced": true, + } + validSysctlPrefixes := []string{ + "net.", + "fs.mqueue.", + } + arr := strings.Split(val, "=") + if len(arr) < 2 { + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) + } + if validSysctlMap[arr[0]] { + return val, nil + } + + for _, vp := range validSysctlPrefixes { + if strings.HasPrefix(arr[0], vp) { + return val, nil + } + } + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) +} + +// FilterOpt is a flag type for validating filters +type FilterOpt struct { + filter filters.Args +} + +// NewFilterOpt returns a new FilterOpt +func NewFilterOpt() FilterOpt { + return FilterOpt{filter: filters.NewArgs()} +} + +func (o *FilterOpt) String() string { + repr, err := filters.ToParam(o.filter) + if err != nil { + return "invalid filters" + } + return repr +} + +// Set sets the value of the opt by parsing the command line value +func (o *FilterOpt) Set(value string) error { + var err error + o.filter, err = filters.ParseFlag(value, o.filter) + return err +} + +// Type returns the option type +func (o *FilterOpt) Type() string { + return "filter" +} + +// Value returns the value of this option +func (o *FilterOpt) Value() filters.Args { + return o.filter +} + +// NanoCPUs is a type for fixed point fractional number. +type NanoCPUs int64 + +// String returns the string format of the number +func (c *NanoCPUs) String() string { + return big.NewRat(c.Value(), 1e9).FloatString(3) +} + +// Set sets the value of the NanoCPU by passing a string +func (c *NanoCPUs) Set(value string) error { + cpus, err := ParseCPUs(value) + *c = NanoCPUs(cpus) + return err +} + +// Type returns the type +func (c *NanoCPUs) Type() string { + return "decimal" +} + +// Value returns the value in int64 +func (c *NanoCPUs) Value() int64 { + return int64(*c) +} + +// ParseCPUs takes a string ratio and returns an integer value of nano cpus +func ParseCPUs(value string) (int64, error) { + cpu, ok := new(big.Rat).SetString(value) + if !ok { + return 0, fmt.Errorf("failed to parse %v as a rational number", value) + } + nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) + if !nano.IsInt() { + return 0, fmt.Errorf("value is too precise") + } + return nano.Num().Int64(), nil +} diff --git a/vendor/github.com/moby/moby/opts/opts_test.go b/vendor/github.com/moby/moby/opts/opts_test.go new file mode 100644 index 0000000..9f41e47 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts_test.go @@ -0,0 +1,232 @@ +package opts + +import ( + "fmt" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Errorf("validator is not being called") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("foo=bar") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} + +func TestNamedListOpts(t *testing.T) { + var v []string + o := NewNamedListOptsRef("foo-name", &v, nil) + + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + if o.Name() != "foo-name" { + t.Errorf("%s != foo-name", o.Name()) + } + if len(v) != 1 { + t.Errorf("expected foo to be in the values, got %v", v) + } +} + +func TestNamedMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewNamedMapOpts("max-name", tmpMap, nil) + + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + if o.Name() != "max-name" { + t.Errorf("%s != max-name", o.Name()) + } + if _, exist := tmpMap["max-size"]; !exist { + t.Errorf("expected map-size to be in the values, got %v", tmpMap) + } +} diff --git a/vendor/github.com/moby/moby/opts/opts_unix.go b/vendor/github.com/moby/moby/opts/opts_unix.go new file mode 100644 index 0000000..f1ce844 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/moby/moby/opts/opts_windows.go b/vendor/github.com/moby/moby/opts/opts_windows.go new file mode 100644 index 0000000..ebe40c9 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/moby/moby/opts/port.go b/vendor/github.com/moby/moby/opts/port.go new file mode 100644 index 0000000..020a5d1 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/port.go @@ -0,0 +1,146 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +const ( + portOptTargetPort = "target" + portOptPublishedPort = "published" + portOptProtocol = "protocol" + portOptMode = "mode" +) + +// PortOpt represents a port config in swarm mode. +type PortOpt struct { + ports []swarm.PortConfig +} + +// Set a new port value +func (p *PortOpt) Set(value string) error { + longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) + if err != nil { + return err + } + if longSyntax { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + pConfig := swarm.PortConfig{} + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field %s", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case portOptProtocol: + if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { + return fmt.Errorf("invalid protocol value %s", value) + } + + pConfig.Protocol = swarm.PortConfigProtocol(value) + case portOptMode: + if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { + return fmt.Errorf("invalid publish mode value %s", value) + } + + pConfig.PublishMode = swarm.PortConfigPublishMode(value) + case portOptTargetPort: + tPort, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return err + } + + pConfig.TargetPort = uint32(tPort) + case portOptPublishedPort: + pPort, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return err + } + + pConfig.PublishedPort = uint32(pPort) + default: + return fmt.Errorf("invalid field key %s", key) + } + } + + if pConfig.TargetPort == 0 { + return fmt.Errorf("missing mandatory field %q", portOptTargetPort) + } + + if pConfig.PublishMode == "" { + pConfig.PublishMode = swarm.PortConfigPublishModeIngress + } + + if pConfig.Protocol == "" { + pConfig.Protocol = swarm.PortConfigProtocolTCP + } + + p.ports = append(p.ports, pConfig) + } else { + // short syntax + portConfigs := []swarm.PortConfig{} + // We can ignore errors because the format was already validated by ValidatePort + ports, portBindings, _ := nat.ParsePortSpecs([]string{value}) + + for port := range ports { + portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...) + } + p.ports = append(p.ports, portConfigs...) + } + return nil +} + +// Type returns the type of this option +func (p *PortOpt) Type() string { + return "port" +} + +// String returns a string repr of this option +func (p *PortOpt) String() string { + ports := []string{} + for _, port := range p.ports { + repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) + ports = append(ports, repr) + } + return strings.Join(ports, ", ") +} + +// Value returns the ports +func (p *PortOpt) Value() []swarm.PortConfig { + return p.ports +} + +// ConvertPortToPortConfig converts ports to the swarm type +func ConvertPortToPortConfig( + port nat.Port, + portBindings map[nat.Port][]nat.PortBinding, +) []swarm.PortConfig { + ports := []swarm.PortConfig{} + + for _, binding := range portBindings[port] { + hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16) + ports = append(ports, swarm.PortConfig{ + //TODO Name: ? + Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), + TargetPort: uint32(port.Int()), + PublishedPort: uint32(hostPort), + PublishMode: swarm.PortConfigPublishModeIngress, + }) + } + return ports +} diff --git a/vendor/github.com/moby/moby/opts/port_test.go b/vendor/github.com/moby/moby/opts/port_test.go new file mode 100644 index 0000000..67bcf8f --- /dev/null +++ b/vendor/github.com/moby/moby/opts/port_test.go @@ -0,0 +1,259 @@ +package opts + +import ( + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestPortOptValidSimpleSyntax(t *testing.T) { + testCases := []struct { + value string + expected []swarm.PortConfig + }{ + { + value: "80", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80:8080", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "8080:80/tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80:8080/udp", + expected: []swarm.PortConfig{ + { + Protocol: "udp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80-81:8080-8081/tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "tcp", + TargetPort: 8081, + PublishedPort: 81, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80-82:8080-8082/udp", + expected: []swarm.PortConfig{ + { + Protocol: "udp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "udp", + TargetPort: 8081, + PublishedPort: 81, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "udp", + TargetPort: 8082, + PublishedPort: 82, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + } + for _, tc := range testCases { + var port PortOpt + assert.NilError(t, port.Set(tc.value)) + assert.Equal(t, len(port.Value()), len(tc.expected)) + for _, expectedPortConfig := range tc.expected { + assertContains(t, port.Value(), expectedPortConfig) + } + } +} + +func TestPortOptValidComplexSyntax(t *testing.T) { + testCases := []struct { + value string + expected []swarm.PortConfig + }{ + { + value: "target=80", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + Protocol: "tcp", + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,published=8080,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "published=80,target=8080,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,published=8080,protocol=tcp,mode=host", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "host", + }, + }, + }, + { + value: "target=80,published=8080,mode=host", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "host", + Protocol: "tcp", + }, + }, + }, + { + value: "target=80,published=8080,mode=ingress", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "ingress", + Protocol: "tcp", + }, + }, + }, + } + for _, tc := range testCases { + var port PortOpt + assert.NilError(t, port.Set(tc.value)) + assert.Equal(t, len(port.Value()), len(tc.expected)) + for _, expectedPortConfig := range tc.expected { + assertContains(t, port.Value(), expectedPortConfig) + } + } +} + +func TestPortOptInvalidComplexSyntax(t *testing.T) { + testCases := []struct { + value string + expectedError string + }{ + { + value: "invalid,target=80", + expectedError: "invalid field", + }, + { + value: "invalid=field", + expectedError: "invalid field", + }, + { + value: "protocol=invalid", + expectedError: "invalid protocol value", + }, + { + value: "target=invalid", + expectedError: "invalid syntax", + }, + { + value: "published=invalid", + expectedError: "invalid syntax", + }, + { + value: "mode=invalid", + expectedError: "invalid publish mode value", + }, + { + value: "published=8080,protocol=tcp,mode=ingress", + expectedError: "missing mandatory field", + }, + { + value: `target=80,protocol="tcp,mode=ingress"`, + expectedError: "non-quoted-field", + }, + { + value: `target=80,"protocol=tcp,mode=ingress"`, + expectedError: "invalid protocol value", + }, + } + for _, tc := range testCases { + var port PortOpt + assert.Error(t, port.Set(tc.value), tc.expectedError) + } +} + +func assertContains(t *testing.T, portConfigs []swarm.PortConfig, expected swarm.PortConfig) { + var contains = false + for _, portConfig := range portConfigs { + if portConfig == expected { + contains = true + break + } + } + if !contains { + t.Errorf("expected %v to contain %v, did not", portConfigs, expected) + } +} diff --git a/vendor/github.com/moby/moby/opts/quotedstring.go b/vendor/github.com/moby/moby/opts/quotedstring.go new file mode 100644 index 0000000..fb1e537 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return string(*s.value) +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/moby/moby/opts/quotedstring_test.go b/vendor/github.com/moby/moby/opts/quotedstring_test.go new file mode 100644 index 0000000..0ebf04b --- /dev/null +++ b/vendor/github.com/moby/moby/opts/quotedstring_test.go @@ -0,0 +1,28 @@ +package opts + +import ( + "github.com/docker/docker/pkg/testutil/assert" + "testing" +) + +func TestQuotedStringSetWithQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("\"something\"")) + assert.Equal(t, qs.String(), "something") + assert.Equal(t, value, "something") +} + +func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("\"something'")) + assert.Equal(t, qs.String(), "\"something'") +} + +func TestQuotedStringSetWithNoQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("something")) + assert.Equal(t, qs.String(), "something") +} diff --git a/vendor/github.com/moby/moby/opts/secret.go b/vendor/github.com/moby/moby/opts/secret.go new file mode 100644 index 0000000..1fefcf8 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/secret.go @@ -0,0 +1,107 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/api/types" +) + +// SecretOpt is a Value type for parsing secrets +type SecretOpt struct { + values []*types.SecretRequestOption +} + +// Set a new secret value +func (o *SecretOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + options := &types.SecretRequestOption{ + Source: "", + Target: "", + UID: "0", + GID: "0", + Mode: 0444, + } + + // support a simple syntax of --secret foo + if len(fields) == 1 { + options.Source = fields[0] + options.Target = fields[0] + o.values = append(o.values, options) + return nil + } + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "source", "src": + options.Source = value + case "target": + tDir, _ := filepath.Split(value) + if tDir != "" { + return fmt.Errorf("target must not be a path") + } + options.Target = value + case "uid": + options.UID = value + case "gid": + options.GID = value + case "mode": + m, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + options.Mode = os.FileMode(m) + default: + if len(fields) == 1 && value == "" { + + } else { + return fmt.Errorf("invalid field in secret request: %s", key) + } + } + } + + if options.Source == "" { + return fmt.Errorf("source is required") + } + + o.values = append(o.values, options) + return nil +} + +// Type returns the type of this option +func (o *SecretOpt) Type() string { + return "secret" +} + +// String returns a string repr of this option +func (o *SecretOpt) String() string { + secrets := []string{} + for _, secret := range o.values { + repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target) + secrets = append(secrets, repr) + } + return strings.Join(secrets, ", ") +} + +// Value returns the secret requests +func (o *SecretOpt) Value() []*types.SecretRequestOption { + return o.values +} diff --git a/vendor/github.com/moby/moby/opts/secret_test.go b/vendor/github.com/moby/moby/opts/secret_test.go new file mode 100644 index 0000000..d978c86 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/secret_test.go @@ -0,0 +1,79 @@ +package opts + +import ( + "os" + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestSecretOptionsSimple(t *testing.T) { + var opt SecretOpt + + testCase := "app-secret" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "app-secret") + assert.Equal(t, req.Target, "app-secret") + assert.Equal(t, req.UID, "0") + assert.Equal(t, req.GID, "0") +} + +func TestSecretOptionsSourceTarget(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") +} + +func TestSecretOptionsShorthand(t *testing.T) { + var opt SecretOpt + + testCase := "src=foo,target=testing" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") +} + +func TestSecretOptionsCustomUidGid(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing,uid=1000,gid=1001" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") + assert.Equal(t, req.UID, "1000") + assert.Equal(t, req.GID, "1001") +} + +func TestSecretOptionsCustomMode(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing,uid=1000,gid=1001,mode=0444" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") + assert.Equal(t, req.UID, "1000") + assert.Equal(t, req.GID, "1001") + assert.Equal(t, req.Mode, os.FileMode(0444)) +} diff --git a/vendor/github.com/moby/moby/pkg/README.md b/vendor/github.com/moby/moby/pkg/README.md new file mode 100644 index 0000000..c4b78a8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/moby/moby/pkg/aaparser/aaparser.go b/vendor/github.com/moby/moby/pkg/aaparser/aaparser.go new file mode 100644 index 0000000..ffcc564 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/aaparser/aaparser.go @@ -0,0 +1,91 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to +// replace the profile. +func LoadProfile(profilePath string) error { + _, err := cmd("", "-r", profilePath) + if err != nil { + return err + } + return nil +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go b/vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go new file mode 100644 index 0000000..69bc8d2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go @@ -0,0 +1,73 @@ +package aaparser + +import ( + "testing" +) + +type versionExpected struct { + output string + version int +} + +func TestParseVersion(t *testing.T) { + versions := []versionExpected{ + { + output: `AppArmor parser version 2.10 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 210000, + }, + { + output: `AppArmor parser version 2.8 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 208000, + }, + { + output: `AppArmor parser version 2.20 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 220000, + }, + { + output: `AppArmor parser version 2.05 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 205000, + }, + { + output: `AppArmor parser version 2.9.95 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 209095, + }, + { + output: `AppArmor parser version 3.14.159 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 314159, + }, + } + + for _, v := range versions { + version, err := parseVersion(v.output) + if err != nil { + t.Fatalf("expected error to be nil for %#v, got: %v", v, err) + } + if version != v.version { + t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/README.md b/vendor/github.com/moby/moby/pkg/archive/README.md new file mode 100644 index 0000000..7307d96 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/moby/moby/pkg/archive/archive.go b/vendor/github.com/moby/moby/pkg/archive/archive.go new file mode 100644 index 0000000..3261c4f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive.go @@ -0,0 +1,1175 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *TarChownOptions + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresseses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (eg !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !exceptions { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range patterns { + if pat[0] != '!' { + continue + } + pat = pat[1:] + string(filepath.Separator) + if strings.HasPrefix(pat, dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_linux.go b/vendor/github.com/moby/moby/pkg/archive/archive_linux.go new file mode 100644 index 0000000..6b2a31f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_linux.go @@ -0,0 +1,95 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go new file mode 100644 index 0000000..d5f046e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go @@ -0,0 +1,187 @@ +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +// setupOverlayTestDir creates files in a directory with overlay whiteouts +// Tree layout +// . +// ├── d1 # opaque, 0700 +// │   └── f1 # empty file, 0600 +// ├── d2 # opaque, 0750 +// │   └── f1 # empty file, 0660 +// └── d3 # 0700 +// └── f1 # whiteout, 0644 +func setupOverlayTestDir(t *testing.T, src string) { + // Create opaque directory containing single file and permission 0700 + if err := os.Mkdir(filepath.Join(src, "d1"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600); err != nil { + t.Fatal(err) + } + + // Create another opaque directory containing single file but with permission 0750 + if err := os.Mkdir(filepath.Join(src, "d2"), 0750); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660); err != nil { + t.Fatal(err) + } + + // Create regular directory with deleted file + if err := os.Mkdir(filepath.Join(src, "d3"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Mknod(filepath.Join(src, "d3", "f1"), syscall.S_IFCHR, 0); err != nil { + t.Fatal(err) + } +} + +func checkOpaqueness(t *testing.T, path string, opaque string) { + xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + t.Fatal(err) + } + if string(xattrOpaque) != opaque { + t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) + } + +} + +func checkOverlayWhiteout(t *testing.T, path string) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) + } + if statT.Rdev != 0 { + t.Fatalf("Non-zero device number for whiteout") + } +} + +func checkFileMode(t *testing.T, path string, perm os.FileMode) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + if stat.Mode() != perm { + t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) + } +} + +func TestOverlayTarUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + options := &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + } + archive, err := TarWithOptions(src, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, options); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) + + checkOpaqueness(t, filepath.Join(dst, "d1"), "y") + checkOpaqueness(t, filepath.Join(dst, "d2"), "y") + checkOpaqueness(t, filepath.Join(dst, "d3"), "") + checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) +} + +func TestOverlayTarAUFSUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + archive, err := TarWithOptions(src, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + }) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: AUFSWhiteoutFormat, + }); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0750) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0600) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_other.go b/vendor/github.com/moby/moby/pkg/archive/archive_other.go new file mode 100644 index 0000000..54acbf2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_test.go new file mode 100644 index 0000000..b883be3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_test.go @@ -0,0 +1,1162 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +var tmp string + +func init() { + tmp = "/tmp/" + if runtime.GOOS == "windows" { + tmp = os.Getenv("TEMP") + `\` + } +} + +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestIsArchivePathDir(t *testing.T) { + cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archivedir") { + t.Fatalf("Incorrectly recognised directory as an archive") + } +} + +func TestIsArchivePathInvalidFile(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archive") { + t.Fatalf("Incorrectly recognised invalid tar path as archive") + } + if IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") + } +} + +func TestIsArchivePathTar(t *testing.T) { + var whichTar string + if runtime.GOOS == "solaris" { + whichTar = "gtar" + } else { + whichTar = "tar" + } + cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) + cmd := exec.Command("sh", "-c", cmdStr) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if !IsArchivePath(tmp + "/archive") { + t.Fatalf("Did not recognise valid tar path as archive") + } + if !IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Did not recognise valid compressed tar path as archive") + } +} + +func testDecompressStream(t *testing.T, ext, compressCommand string) { + cmd := exec.Command("sh", "-c", + fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to create an archive file for test : %s.", output) + } + filename := "archive." + ext + archive, err := os.Open(tmp + filename) + if err != nil { + t.Fatalf("Failed to open file %s: %v", filename, err) + } + defer archive.Close() + + r, err := DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress %s: %v", filename, err) + } + if _, err = ioutil.ReadAll(r); err != nil { + t.Fatalf("Failed to read the decompressed stream: %v ", err) + } + if err = r.Close(); err != nil { + t.Fatalf("Failed to close the decompressed stream: %v ", err) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + testDecompressStream(t, "gz", "gzip -f") +} + +func TestDecompressStreamBzip2(t *testing.T) { + testDecompressStream(t, "bz2", "bzip2 -f") +} + +func TestDecompressStreamXz(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Xz not present in msys2") + } + testDecompressStream(t, "xz", "xz -f") +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of an uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + // TODO Windows: Figure out why this is failing in CI but not locally + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows CI machines") + } + badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, _, err := cmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("sh", "-c", "echo hello; exit 0") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := filepath.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := filepath.Join(tempFolder, "src") + tarFile := filepath.Join(tempFolder, "src.tar") + os.Create(srcFile) + os.Create(invalidDestFolder) // being a file (not dir) should cause an error + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = UntarPath(tarFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := filepath.Join(destFolder, srcFileU) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := filepath.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := filepath.Join(tempFolder, "dest") + invalidSrc := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, filepath.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := filepath.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := filepath.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // TODO Windows: Figure out how to port this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_unix.go b/vendor/github.com/moby/moby/pkg/archive/archive_unix.go new file mode 100644 index 0000000..7083f2f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_unix.go @@ -0,0 +1,118 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + inode = uint64(s.Ino) + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go new file mode 100644 index 0000000..4eeafdd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go @@ -0,0 +1,249 @@ +// +build !windows + +package archive + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(filepath.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(filepath.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows +func TestTarUntarWithXattr(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip() + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_windows.go b/vendor/github.com/moby/moby/pkg/archive/archive_windows.go new file mode 100644 index 0000000..5c3a1be --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go new file mode 100644 index 0000000..0c6733d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go @@ -0,0 +1,91 @@ +// +build windows + +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestCopyFileWithInvalidDest(t *testing.T) { + // TODO Windows: This is currently failing. Not sure what has + // recently changed in CopyWithTar as used to pass. Further investigation + // is required. + t.Skip("Currently fails") + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := "c:dest" + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, "src", "src") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err == nil { + t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") + } +} + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes.go b/vendor/github.com/moby/moby/pkg/archive/changes.go new file mode 100644 index 0000000..c07d55c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes.go @@ -0,0 +1,446 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_linux.go b/vendor/github.com/moby/moby/pkg/archive/changes_linux.go new file mode 100644 index 0000000..fc5a9df --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_linux.go @@ -0,0 +1,312 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_other.go b/vendor/github.com/moby/moby/pkg/archive/changes_other.go new file mode 100644 index 0000000..da70ed3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go b/vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go new file mode 100644 index 0000000..095102e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go @@ -0,0 +1,132 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + //TODO Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip("gcp failures on Solaris") + } + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_test.go b/vendor/github.com/moby/moby/pkg/archive/changes_test.go new file mode 100644 index 0000000..eae1d02 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_test.go @@ -0,0 +1,572 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "runtime" + "sort" + "testing" + "time" + + "github.com/docker/docker/pkg/system" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if runtime.GOOS == "solaris" { + cmd = exec.Command("gcp", "-a", src, dst) + } + + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + {Symlink, "symlink3", root + "/file1", 0666}, + {Symlink, "symlink4", root + "/symlink3", 0666}, + {Symlink, "dirSymlink", root + "/dir1", 0740}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := system.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create a directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failure on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithHardlinks(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destDir) + + creationSize, err := prepareUntarSourceDirectory(100, destDir, true) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + t.Fatal(err) + } + + got := ChangesSize(destDir, changes) + if got != int64(creationSize) { + t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("Expected 6 bytes of changes, got %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_unix.go b/vendor/github.com/moby/moby/pkg/archive/changes_unix.go new file mode 100644 index 0000000..3778b73 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_windows.go b/vendor/github.com/moby/moby/pkg/archive/changes_windows.go new file mode 100644 index 0000000..af94243 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy.go b/vendor/github.com/moby/moby/pkg/archive/copy.go new file mode 100644 index 0000000..0614c67 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy_unix.go b/vendor/github.com/moby/moby/pkg/archive/copy_unix.go new file mode 100644 index 0000000..e305b5e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go b/vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go new file mode 100644 index 0000000..ecbfc17 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go @@ -0,0 +1,978 @@ +// +build !windows + +// TODO Windows: Some of these tests may be salvagable and portable to Windows. + +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, false) +} + +func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, true) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + symlinkPath1 := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + + if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// C. Symbol link following version: +// SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseCFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + symlinkPathBad := filepath.Join(tmpDirA, "symlink1") + symlinkPath := filepath.Join(tmpDirA, "symlink3") + linkTarget := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // first to test broken link + if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + // test symbol link -> symbol link -> target + // Ensure they start out different. + if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. Symbol link following version: +// SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseDFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "symlink4") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// E. Symbol link following version: +// SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseEFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + symSrcDir := filepath.Join(tmpDirA, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now test with symbol link + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// G. Symbol link version: +// SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseGFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dirSymlink") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// H. Symbol link following version: +// SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseHFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + symSrcDir := filepath.Join(tmpDirB, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now try with symbol link of dir + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// J. Symbol link following version: +// SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy_windows.go b/vendor/github.com/moby/moby/pkg/archive/copy_windows.go new file mode 100644 index 0000000..2b775b4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/diff.go b/vendor/github.com/moby/moby/pkg/archive/diff.go new file mode 100644 index 0000000..9e1a58c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/diff.go @@ -0,0 +1,279 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + if options == nil { + options = &TarOptions{} + } + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/diff_test.go b/vendor/github.com/moby/moby/pkg/archive/diff_test.go new file mode 100644 index 0000000..8167941 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/diff_test.go @@ -0,0 +1,386 @@ +package archive + +import ( + "archive/tar" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/docker/pkg/ioutils" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeSymLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerWhiteouts(t *testing.T) { + // TODO Windows: Figure out why this test fails + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + + wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") + if err != nil { + return + } + defer os.RemoveAll(wd) + + base := []string{ + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "baz", + "foo/", + "foo/.abc", + "foo/.bcd/", + "foo/.bcd/a", + "foo/cde/", + "foo/cde/def", + "foo/cde/efg", + "foo/fgh", + "foobar", + } + + type tcase struct { + change, expected []string + } + + tcases := []tcase{ + { + base, + base, + }, + { + []string{ + ".bay", + ".wh.baz", + "foo/", + "foo/.bce", + "foo/.wh..wh..opq", + "foo/cde/", + "foo/cde/efg", + }, + []string{ + ".bay", + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.bce", + "foo/cde/", + "foo/cde/efg", + "foobar", + }, + }, + { + []string{ + ".bay", + ".wh..baz", + ".wh.foobar", + "foo/", + "foo/.abc", + "foo/.wh.cde", + "bar/", + }, + []string{ + ".bay", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.abc", + "foo/.bce", + }, + }, + { + []string{ + ".abc", + ".wh..wh..opq", + "foobar", + }, + []string{ + ".abc", + "foobar", + }, + }, + } + + for i, tc := range tcases { + l, err := makeTestLayer(tc.change) + if err != nil { + t.Fatal(err) + } + + _, err = UnpackLayer(wd, l, nil) + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + + paths, err := readDirContents(wd) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, paths) { + t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) + } + } + +} + +func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { + tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + for _, p := range paths { + if p[len(p)-1] == filepath.Separator { + if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { + return + } + } else { + if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { + return + } + } + } + archive, err := Tar(tmpDir, Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + os.RemoveAll(tmpDir) + return err + }), nil +} + +func readDirContents(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + if info.IsDir() { + rel = rel + "/" + } + files = append(files, rel) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/example_changes.go b/vendor/github.com/moby/moby/pkg/archive/example_changes.go new file mode 100644 index 0000000..cedd46a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/testdata/broken.tar b/vendor/github.com/moby/moby/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000000000000000000000000000000000..8f10ea6b87d3eb4fed572349dfe87695603b10a5 GIT binary patch literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/pkg/archive/time_linux.go b/vendor/github.com/moby/moby/pkg/archive/time_linux.go new file mode 100644 index 0000000..3448569 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/time_unsupported.go b/vendor/github.com/moby/moby/pkg/archive/time_unsupported.go new file mode 100644 index 0000000..e85aac0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/utils_test.go b/vendor/github.com/moby/moby/pkg/archive/utils_test.go new file mode 100644 index 0000000..01b9e92 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, r) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/whiteouts.go b/vendor/github.com/moby/moby/pkg/archive/whiteouts.go new file mode 100644 index 0000000..d20478a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/moby/moby/pkg/archive/wrap.go b/vendor/github.com/moby/moby/pkg/archive/wrap.go new file mode 100644 index 0000000..b39d12c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/moby/moby/pkg/archive/wrap_test.go b/vendor/github.com/moby/moby/pkg/archive/wrap_test.go new file mode 100644 index 0000000..46ab366 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/api.go b/vendor/github.com/moby/moby/pkg/authorization/api.go new file mode 100644 index 0000000..05c75f1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/api.go @@ -0,0 +1,88 @@ +package authorization + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" +) + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// PeerCertificate is a wrapper around x509.Certificate which provides a sane +// enconding/decoding to/from PEM format and JSON. +type PeerCertificate x509.Certificate + +// MarshalJSON returns the JSON encoded pem bytes of a PeerCertificate. +func (pc *PeerCertificate) MarshalJSON() ([]byte, error) { + b := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: pc.Raw}) + return json.Marshal(b) +} + +// UnmarshalJSON populates a new PeerCertificate struct from JSON data. +func (pc *PeerCertificate) UnmarshalJSON(b []byte) error { + var buf []byte + if err := json.Unmarshal(b, &buf); err != nil { + return err + } + derBytes, _ := pem.Decode(buf) + c, err := x509.ParseCertificate(derBytes.Bytes) + if err != nil { + return err + } + *pc = PeerCertificate(*c) + return nil +} + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // RequestPeerCertificates stores the request's TLS peer certificates in PEM format + RequestPeerCertificates []*PeerCertificate `json:"RequestPeerCertificates,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/authz.go b/vendor/github.com/moby/moby/pkg/authorization/authz.go new file mode 100644 index 0000000..dc9a9ae --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/authz.go @@ -0,0 +1,186 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/engine/reference/api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + if r.TLS != nil { + for _, c := range r.TLS.PeerCertificates { + pc := PeerCertificate(*c) + ctx.authReq.RequestPeerCertificates = append(ctx.authReq.RequestPeerCertificates, &pc) + } + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if its length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} + +// authorizationError represents an authorization deny error +type authorizationError struct { + error +} + +// HTTPErrorStatusCode returns the authorization error status code (forbidden) +func (e authorizationError) HTTPErrorStatusCode() int { + return http.StatusForbidden +} + +func newAuthorizationError(plugin, msg string) authorizationError { + return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go b/vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go new file mode 100644 index 0000000..a787f3c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go @@ -0,0 +1,282 @@ +// +build !windows + +// TODO Windows: This uses a Unix socket for testing. This might be possible +// to port to Windows using a named pipe instead. + +package authorization + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" + "github.com/gorilla/mux" +) + +const ( + pluginAddress = "authz-test-plugin.sock" +) + +func TestAuthZRequestPluginError(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Err: "an error", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZRequestPlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZResponsePlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestURI: "someting.com/auth", + RequestBody: []byte("sample body"), + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZResponse(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestResponseModifier(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + m.FlushAll() + if r.Header().Get("h1") != "v1" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusInternalServerError { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +func TestDrainBody(t *testing.T) { + tests := []struct { + length int // length is the message length send to drainBody + expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called + }{ + {10, 10}, // Small message size + {maxBodySize - 1, maxBodySize - 1}, // Max message size + {maxBodySize * 2, 0}, // Large message size (skip copying body) + + } + + for _, test := range tests { + msg := strings.Repeat("a", test.length) + body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg)))) + if err != nil { + t.Fatal(err) + } + if len(body) != test.expectedBodyLength { + t.Fatalf("Body must be copied, actual length: '%d'", len(body)) + } + if closer == nil { + t.Fatal("Closer must not be nil") + } + modified, err := ioutil.ReadAll(closer) + if err != nil { + t.Fatalf("Error must not be nil: '%v'", err) + } + if len(modified) != len(msg) { + t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified)) + } + } +} + +func TestResponseModifierOverride(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + overrideHeader := make(http.Header) + overrideHeader.Add("h1", "v2") + overrideHeaderBytes, err := json.Marshal(overrideHeader) + if err != nil { + t.Fatalf("override header failed %v", err) + } + + m.OverrideHeader(overrideHeaderBytes) + m.OverrideBody([]byte("override body")) + m.OverrideStatusCode(http.StatusNotFound) + m.FlushAll() + if r.Header().Get("h1") != "v2" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusNotFound { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +// createTestPlugin creates a new sample authorization plugin +func createTestPlugin(t *testing.T) *authorizationPlugin { + pwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatalf("Failed to create client %v", err) + } + + return &authorizationPlugin{name: "plugin", plugin: client} +} + +// AuthZPluginTestServer is a simple server that implements the authZ plugin interface +type authZPluginTestServer struct { + listener net.Listener + t *testing.T + // request stores the request sent from the daemon to the plugin + recordedRequest Request + // response stores the response sent from the plugin to the daemon + replayResponse Response + server *httptest.Server +} + +// start starts the test server that implements the plugin +func (t *authZPluginTestServer) start() { + r := mux.NewRouter() + l, err := net.Listen("unix", pluginAddress) + if err != nil { + t.t.Fatal(err) + } + t.listener = l + r.HandleFunc("/Plugin.Activate", t.activate) + r.HandleFunc("/"+AuthZApiRequest, t.auth) + r.HandleFunc("/"+AuthZApiResponse, t.auth) + t.server = &httptest.Server{ + Listener: l, + Config: &http.Server{ + Handler: r, + Addr: pluginAddress, + }, + } + t.server.Start() +} + +// stop stops the test server that implements the plugin +func (t *authZPluginTestServer) stop() { + t.server.Close() + os.Remove(pluginAddress) + if t.listener != nil { + t.listener.Close() + } +} + +// auth is a used to record/replay the authentication api messages +func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { + t.recordedRequest = Request{} + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.t.Fatal(err) + } + r.Body.Close() + json.Unmarshal(body, &t.recordedRequest) + b, err := json.Marshal(t.replayResponse) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} + +func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/middleware.go b/vendor/github.com/moby/moby/pkg/authorization/middleware.go new file mode 100644 index 0000000..52890dd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/middleware.go @@ -0,0 +1,84 @@ +package authorization + +import ( + "net/http" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "golang.org/x/net/context" +) + +// Middleware uses a list of plugins to +// handle authorization in the API requests. +type Middleware struct { + mu sync.Mutex + plugins []Plugin +} + +// NewMiddleware creates a new Middleware +// with a slice of plugins names. +func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { + SetPluginGetter(pg) + return &Middleware{ + plugins: newPlugins(names), + } +} + +// SetPlugins sets the plugin used for authorization +func (m *Middleware) SetPlugins(names []string) { + m.mu.Lock() + m.plugins = newPlugins(names) + m.mu.Unlock() +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + + m.mu.Lock() + plugins := m.plugins + m.mu.Unlock() + if len(plugins) == 0 { + return handler(ctx, w, r, vars) + } + + user := "" + userAuthNMethod := "" + + // Default authorization using existing TLS connection credentials + // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support + // and ldap) will be extracted using AuthN feature, which is tracked under: + // https://github.com/docker/docker/pull/20883 + if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { + user = r.TLS.PeerCertificates[0].Subject.CommonName + userAuthNMethod = "TLS" + } + + authCtx := NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := NewResponseModifier(w) + + var errD error + + if errD = handler(ctx, rw, r, vars); errD != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) + } + + if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if errD != nil { + return errD + } + + return nil + } +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/plugin.go b/vendor/github.com/moby/moby/pkg/authorization/plugin.go new file mode 100644 index 0000000..4b1c71b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/plugin.go @@ -0,0 +1,112 @@ +package authorization + +import ( + "sync" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorizes the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorizes the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// newPlugins constructs and initializes the authorization plugins based on plugin names +func newPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +var getter plugingetter.PluginGetter + +// SetPluginGetter sets the plugingetter +func SetPluginGetter(pg plugingetter.PluginGetter) { + getter = pg +} + +// GetPluginGetter gets the plugingetter +func GetPluginGetter() plugingetter.PluginGetter { + return getter +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Client + name string + once sync.Once +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initializes the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + var err error + a.once.Do(func() { + if a.plugin == nil { + var plugin plugingetter.CompatPlugin + var e error + + if pg := GetPluginGetter(); pg != nil { + plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.LOOKUP) + } else { + plugin, e = plugins.Get(a.name, AuthZApiImplements) + } + if e != nil { + err = e + return + } + a.plugin = plugin.Client() + } + }) + return err +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/response.go b/vendor/github.com/moby/moby/pkg/authorization/response.go new file mode 100644 index 0000000..129bf2f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net" + "net/http" + + "github.com/Sirupsen/logrus" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replaces the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replaces the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // FlushAll flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// StatusCode returns the http status code +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// OverrideBody replaces the body of the HTTP response +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +// OverrideStatusCode replaces the status code of the HTTP response +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// OverrideHeader replaces the headers of the HTTP response +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Error("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Error("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + // Copy the status code + // Also WriteHeader needs to be done after all the headers + // have been copied (above). + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go new file mode 100644 index 0000000..784d65d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go new file mode 100644 index 0000000..9f8e72b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go @@ -0,0 +1,162 @@ +package broadcaster + +import ( + "bytes" + "errors" + "strings" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestUnbuffered(t *testing.T) { + writer := new(Unbuffered) + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.Add(bufferA) + bufferB := &dummyWriter{} + writer.Add(bufferB) + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.Add(bufferC) + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test4: Test eviction on multiple simultaneous failures + bufferB.failOnWrite = true + bufferC.failOnWrite = true + bufferD := &dummyWriter{} + writer.Add(bufferD) + writer.Write([]byte("yo")) + writer.Write([]byte("ink")) + if strings.Contains(bufferB.String(), "yoink") { + t.Errorf("bufferB received write. contents: %q", bufferB) + } + if strings.Contains(bufferC.String(), "yoink") { + t.Errorf("bufferC received write. contents: %q", bufferC) + } + if g, w := bufferD.String(), "yoink"; g != w { + t.Errorf("bufferD = %q, want %q", g, w) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceUnbuffered(t *testing.T) { + writer := new(Unbuffered) + c := make(chan bool) + go func() { + writer.Add(devNullCloser(0)) + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkUnbuffered(b *testing.B) { + writer := new(Unbuffered) + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive.go new file mode 100644 index 0000000..a7814f5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go new file mode 100644 index 0000000..d2d7e62 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go @@ -0,0 +1,394 @@ +package chrootarchive + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +func init() { + reexec.Init() +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} + +// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of +// local images) +func TestChrootUntarWithHugeExcludesList(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + options := &archive.TarOptions{} + //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow + //on most systems when passed via environment or command line arguments + excludes := make([]string, 65534, 65534) + for i := 0; i < 65534; i++ { + excludes[i] = strings.Repeat(string(i), 64) + } + options.ExcludePatterns = excludes + if err := Untar(stream, dest, options); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarEmptyArchive(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := Untar(nil, tmpdir, nil); err == nil { + t.Fatal("expected error on empty archive") + } +} + +func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeSymLinks { + if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func getHash(filename string) (uint32, error) { + stream, err := ioutil.ReadFile(filename) + if err != nil { + return 0, err + } + hash := crc32.NewIEEE() + hash.Write(stream) + return hash.Sum32(), nil +} + +func compareDirectories(src string, dest string) error { + changes, err := archive.ChangesDirs(dest, src) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("Unexpected differences after untar: %v", changes) + } + return nil +} + +func compareFiles(src string, dest string) error { + srcHash, err := getHash(src) + if err != nil { + return err + } + destHash, err := getHash(dest) + if err != nil { + return err + } + if srcHash != destHash { + return fmt.Errorf("%s is different from %s", src, dest) + } + return nil +} + +func TestChrootTarUntarWithSymlink(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := TarUntar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyWithTar(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("Failing on Windows and Solaris") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyWithTar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyFileWithTar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyFileWithTar(src, dest); err == nil { + t.Fatal("Expected error on copying directory") + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyFileWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarPath(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + // Untar a directory + if err := UntarPath(src, dest); err == nil { + t.Fatal("Expected error on untaring a directory") + } + + // Untar a tar file + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + tarfile := filepath.Join(tmpdir, "src.tar") + if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + if err := UntarPath(tarfile, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyDotDotFile(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go new file mode 100644 index 0000000..f2325ab --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go new file mode 100644 index 0000000..0a500ed --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 0000000..f9d7fed --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,108 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := syscall.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 0000000..16354bf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "syscall" + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/diff.go b/vendor/github.com/moby/moby/pkg/chrootarchive/diff.go new file mode 100644 index 0000000..49acad7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go new file mode 100644 index 0000000..eb0aacc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go new file mode 100644 index 0000000..9dd9988 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + } + + return s, nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go new file mode 100644 index 0000000..4f637f1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go b/vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go new file mode 100644 index 0000000..fa17c9b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go new file mode 100644 index 0000000..94b5530 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go @@ -0,0 +1,828 @@ +// +build linux + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// DevmapperLogger defines methods for logging with devicemapper. +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + return ErrUdevWait + } + return nil +} + +// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger + +// LogInit initializes the logger for the device mapper library. +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(&cookie) + + dmSawBusy = false // reset before the task is run + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + var cookie uint + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all follwing calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(&cookie) + + if err = task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + var cookie uint + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go new file mode 100644 index 0000000..8477e36 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,35 @@ +// +build linux + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// DevmapperLogCallback exports the devmapper log callback for cgo. +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 0000000..91fbc85 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,251 @@ +// +build linux + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 0000000..dc361ea --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,34 @@ +// +build linux,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +// LibraryDeferredRemovalSupport is supported when statically linked. +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 0000000..8249ccf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport is not supported when statically linked. +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go b/vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go new file mode 100644 index 0000000..581b57e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go @@ -0,0 +1,27 @@ +// +build linux + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/log.go b/vendor/github.com/moby/moby/pkg/devicemapper/log.go new file mode 100644 index 0000000..cee5e54 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/moby/moby/pkg/directory/directory.go b/vendor/github.com/moby/moby/pkg/directory/directory.go new file mode 100644 index 0000000..1715ef4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/directory/directory_test.go b/vendor/github.com/moby/moby/pkg/directory/directory_test.go new file mode 100644 index 0000000..2b7a465 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory_test.go @@ -0,0 +1,192 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "testing" +) + +// Size of an empty directory should be 0 +func TestSizeEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("empty directory has size: %d", size) + } +} + +// Size of a directory with one empty file should be 0 +func TestSizeEmptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + var size int64 + if size, _ = Size(file.Name()); size != 0 { + t.Fatalf("directory with one file has size: %d", size) + } +} + +// Size of a directory with one 5-byte file should be 5 +func TestSizeNonemptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{97, 98, 99, 100, 101} + file.Write(d) + + var size int64 + if size, _ = Size(file.Name()); size != 5 { + t.Fatalf("directory with one 5-byte file has size: %d", size) + } +} + +// Size of a directory with one empty directory should be 0 +func TestSizeNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("directory with one empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 empty directory +func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{100, 111, 99, 107, 101, 114} + file.Write(d) + + var size int64 + if size, _ = Size(dir); size != 6 { + t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 non-empty directory +func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { + var dir, dirNested string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + data := []byte{100, 111, 99, 107, 101, 114} + file.Write(data) + + var nestedFile *os.File + if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { + t.Fatalf("failed to create file in nested directory: %s", err) + } + + nestedData := []byte{100, 111, 99, 107, 101, 114} + nestedFile.Write(nestedData) + + var size int64 + if size, _ = Size(dir); size != 12 { + t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) + } +} + +// Test migration of directory to a subdir underneath itself +func TestMoveToSubdir(t *testing.T) { + var outerDir, subDir string + var err error + + if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { + t.Fatalf("failed to create directory: %v", err) + } + + if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { + t.Fatalf("failed to create subdirectory: %v", err) + } + + // write 4 temp files in the outer dir to get moved + filesList := []string{"a", "b", "c", "d"} + for _, fName := range filesList { + if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { + t.Fatalf("couldn't create temp file %q: %v", fName, err) + } else { + file.WriteString(fName) + file.Close() + } + } + + if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { + t.Fatalf("Error during migration of content to subdirectory: %v", err) + } + // validate that the files were moved to the subdirectory + infos, err := ioutil.ReadDir(subDir) + if err != nil { + t.Fatal(err) + } + if len(infos) != 4 { + t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) + } + var results []string + for _, info := range infos { + results = append(results, info.Name()) + } + sort.Sort(sort.StringSlice(results)) + if !reflect.DeepEqual(filesList, results) { + t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) + } +} + +// Test a non-existing directory +func TestSizeNonExistingDirectory(t *testing.T) { + if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { + t.Fatalf("error is expected") + } +} diff --git a/vendor/github.com/moby/moby/pkg/directory/directory_unix.go b/vendor/github.com/moby/moby/pkg/directory/directory_unix.go new file mode 100644 index 0000000..397251b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/moby/moby/pkg/directory/directory_windows.go b/vendor/github.com/moby/moby/pkg/directory/directory_windows.go new file mode 100644 index 0000000..6fb0917 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/README.md b/vendor/github.com/moby/moby/pkg/discovery/README.md new file mode 100644 index 0000000..39777c2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/README.md @@ -0,0 +1,41 @@ +--- +page_title: Docker discovery +page_description: discovery +page_keywords: docker, clustering, discovery +--- + +# Discovery + +Docker comes with multiple Discovery backends. + +## Backends + +### Using etcd + +Point your Docker Engine instances to a common etcd instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/ +``` + +### Using consul + +Point your Docker Engine instances to a common Consul instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store consul:/// +``` + +### Using zookeeper + +Point your Docker Engine instances to a common Zookeeper instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store zk://,/ +``` diff --git a/vendor/github.com/moby/moby/pkg/discovery/backends.go b/vendor/github.com/moby/moby/pkg/discovery/backends.go new file mode 100644 index 0000000..2eab550 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/backends.go @@ -0,0 +1,107 @@ +package discovery + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" +) + +var ( + // Backends is a global map of discovery backends indexed by their + // associated scheme. + backends = make(map[string]Backend) +) + +// Register makes a discovery backend available by the provided scheme. +// If Register is called twice with the same scheme an error is returned. +func Register(scheme string, d Backend) error { + if _, exists := backends[scheme]; exists { + return fmt.Errorf("scheme already registered %s", scheme) + } + logrus.WithField("name", scheme).Debugf("Registering discovery service") + backends[scheme] = d + return nil +} + +func parse(rawurl string) (string, string) { + parts := strings.SplitN(rawurl, "://", 2) + + // nodes:port,node2:port => nodes://node1:port,node2:port + if len(parts) == 1 { + return "nodes", parts[0] + } + return parts[0], parts[1] +} + +// ParseAdvertise parses the --cluster-advertise daemon config which accepts +// : or : +func ParseAdvertise(advertise string) (string, error) { + var ( + iface *net.Interface + addrs []net.Addr + err error + ) + + addr, port, err := net.SplitHostPort(advertise) + + if err != nil { + return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) + } + + ip := net.ParseIP(addr) + // If it is a valid ip-address, use it as is + if ip != nil { + return advertise, nil + } + + // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise + ifaceName := addr + iface, err = net.InterfaceByName(ifaceName) + if err != nil { + return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) + } + + addrs, err = iface.Addrs() + if err != nil { + return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) + } + + if len(addrs) == 0 { + return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) + } + + addr = "" + for _, a := range addrs { + ip, _, err := net.ParseCIDR(a.String()) + if err != nil { + return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) + } + if ip.To4() == nil || ip.IsLoopback() { + continue + } + addr = ip.String() + break + } + if addr == "" { + return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) + } + + addr = net.JoinHostPort(addr, port) + return addr, nil +} + +// New returns a new Discovery given a URL, heartbeat and ttl settings. +// Returns an error if the URL scheme is not supported. +func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { + scheme, uri := parse(rawurl) + if backend, exists := backends[scheme]; exists { + logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") + err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) + return backend, err + } + + return nil, ErrNotSupported +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/discovery.go b/vendor/github.com/moby/moby/pkg/discovery/discovery.go new file mode 100644 index 0000000..ca7f587 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/discovery.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "errors" + "time" +) + +var ( + // ErrNotSupported is returned when a discovery service is not supported. + ErrNotSupported = errors.New("discovery service not supported") + + // ErrNotImplemented is returned when discovery feature is not implemented + // by discovery backend. + ErrNotImplemented = errors.New("not implemented in this discovery service") +) + +// Watcher provides watching over a cluster for nodes joining and leaving. +type Watcher interface { + // Watch the discovery for entry changes. + // Returns a channel that will receive changes or an error. + // Providing a non-nil stopCh can be used to stop watching. + Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) +} + +// Backend is implemented by discovery backends which manage cluster entries. +type Backend interface { + // Watcher must be provided by every backend. + Watcher + + // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. + Initialize(string, time.Duration, time.Duration, map[string]string) error + + // Register to the discovery. + Register(string) error +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/discovery_test.go b/vendor/github.com/moby/moby/pkg/discovery/discovery_test.go new file mode 100644 index 0000000..6084f3e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/discovery_test.go @@ -0,0 +1,137 @@ +package discovery + +import ( + "testing" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestNewEntry(c *check.C) { + entry, err := NewEntry("127.0.0.1:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") + + entry, err = NewEntry("[2001:db8:0:f101::2]:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375") + + _, err = NewEntry("127.0.0.1") + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestParse(c *check.C) { + scheme, uri := parse("127.0.0.1:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("localhost:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("scheme://127.0.0.1:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("scheme://localhost:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "") +} + +func (s *DiscoverySuite) TestCreateEntries(c *check.C) { + entries, err := CreateEntries(nil) + c.Assert(entries, check.DeepEquals, Entries{}) + c.Assert(err, check.IsNil) + + entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) + c.Assert(err, check.IsNil) + expected := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, + } + c.Assert(entries.Equals(expected), check.Equals, true) + + _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestContainsEntry(c *check.C) { + entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) + c.Assert(err, check.IsNil) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) +} + +func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { + entries := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + } + + // Same + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + }), check. + Equals, true) + + // Different size + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "127.0.0.3", Port: "2375"}, + }), check. + Equals, false) + + // Different content + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.42", Port: "2375"}, + }), check. + Equals, false) + +} + +func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { + entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} + entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} + entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} + entries := Entries{entry1, entry2} + + // No diff + added, removed := entries.Diff(Entries{entry2, entry1}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 0) + + // Add + added, removed = entries.Diff(Entries{entry2, entry3, entry1}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 0) + + // Remove + added, removed = entries.Diff(Entries{entry2}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry1), check.Equals, true) + + // Add and remove + added, removed = entries.Diff(Entries{entry1, entry3}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry2), check.Equals, true) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/entry.go b/vendor/github.com/moby/moby/pkg/discovery/entry.go new file mode 100644 index 0000000..ce23bbf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/entry.go @@ -0,0 +1,94 @@ +package discovery + +import "net" + +// NewEntry creates a new entry. +func NewEntry(url string) (*Entry, error) { + host, port, err := net.SplitHostPort(url) + if err != nil { + return nil, err + } + return &Entry{host, port}, nil +} + +// An Entry represents a host. +type Entry struct { + Host string + Port string +} + +// Equals returns true if cmp contains the same data. +func (e *Entry) Equals(cmp *Entry) bool { + return e.Host == cmp.Host && e.Port == cmp.Port +} + +// String returns the string form of an entry. +func (e *Entry) String() string { + return net.JoinHostPort(e.Host, e.Port) +} + +// Entries is a list of *Entry with some helpers. +type Entries []*Entry + +// Equals returns true if cmp contains the same data. +func (e Entries) Equals(cmp Entries) bool { + // Check if the file has really changed. + if len(e) != len(cmp) { + return false + } + for i := range e { + if !e[i].Equals(cmp[i]) { + return false + } + } + return true +} + +// Contains returns true if the Entries contain a given Entry. +func (e Entries) Contains(entry *Entry) bool { + for _, curr := range e { + if curr.Equals(entry) { + return true + } + } + return false +} + +// Diff compares two entries and returns the added and removed entries. +func (e Entries) Diff(cmp Entries) (Entries, Entries) { + added := Entries{} + for _, entry := range cmp { + if !e.Contains(entry) { + added = append(added, entry) + } + } + + removed := Entries{} + for _, entry := range e { + if !cmp.Contains(entry) { + removed = append(removed, entry) + } + } + + return added, removed +} + +// CreateEntries returns an array of entries based on the given addresses. +func CreateEntries(addrs []string) (Entries, error) { + entries := Entries{} + if addrs == nil { + return entries, nil + } + + for _, addr := range addrs { + if len(addr) == 0 { + continue + } + entry, err := NewEntry(addr) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/file/file.go b/vendor/github.com/moby/moby/pkg/discovery/file/file.go new file mode 100644 index 0000000..2b8e27b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/file/file.go @@ -0,0 +1,107 @@ +package file + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + heartbeat time.Duration + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("file", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { + s.path = path + s.heartbeat = heartbeat + return nil +} + +func parseFileContent(content []byte) []string { + var result []string + for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { + line = strings.TrimSpace(line) + // Ignoring line starts with # + if strings.HasPrefix(line, "#") { + continue + } + // Inlined # comment also ignored. + if strings.Contains(line, "#") { + line = line[0:strings.Index(line, "#")] + // Trim additional spaces caused by above stripping. + line = strings.TrimSpace(line) + } + result = append(result, discovery.Generate(line)...) + } + return result +} + +func (s *Discovery) fetch() (discovery.Entries, error) { + fileContent, err := ioutil.ReadFile(s.path) + if err != nil { + return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) + } + return discovery.CreateEntries(parseFileContent(fileContent)) +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + currentEntries, err := s.fetch() + if err != nil { + errCh <- err + } else { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + newEntries, err := s.fetch() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/file/file_test.go b/vendor/github.com/moby/moby/pkg/discovery/file/file_test.go new file mode 100644 index 0000000..667f00b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/file/file_test.go @@ -0,0 +1,114 @@ +package file + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("/path/to/file", 1000, 0, nil) + c.Assert(d.path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestNew(c *check.C) { + d, err := discovery.New("file:///path/to/file", 0, 0, nil) + c.Assert(err, check.IsNil) + c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestContent(c *check.C) { + data := ` +1.1.1.[1:2]:1111 +2.2.2.[2:4]:2222 +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 5) + c.Assert(ips[0], check.Equals, "1.1.1.1:1111") + c.Assert(ips[1], check.Equals, "1.1.1.2:1111") + c.Assert(ips[2], check.Equals, "2.2.2.2:2222") + c.Assert(ips[3], check.Equals, "2.2.2.3:2222") + c.Assert(ips[4], check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + discovery := &Discovery{path: "/path/to/file"} + c.Assert(discovery.Register("0.0.0.0"), check.NotNil) +} + +func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { + data := ` +### test ### +1.1.1.1:1111 # inline comment +# 2.2.2.2:2222 + ### empty line with comment + 3.3.3.3:3333 +### test ### +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 2) + c.Assert("1.1.1.1:1111", check.Equals, ips[0]) + c.Assert("3.3.3.3:3333", check.Equals, ips[1]) +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + data := ` +1.1.1.1:1111 +2.2.2.2:2222 +` + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + // Create a temporary file and remove it. + tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") + c.Assert(err, check.IsNil) + c.Assert(tmp.Close(), check.IsNil) + c.Assert(os.Remove(tmp.Name()), check.IsNil) + + // Set up file discovery. + d := &Discovery{} + d.Initialize(tmp.Name(), 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // Make sure it fires errors since the file doesn't exist. + c.Assert(<-errCh, check.NotNil) + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Write the file and make sure we get the expected value back. + c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry and look it up. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + _, err = f.WriteString("\n3.3.3.3:3333\n") + c.Assert(err, check.IsNil) + f.Close() + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/generator.go b/vendor/github.com/moby/moby/pkg/discovery/generator.go new file mode 100644 index 0000000..d222982 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/generator.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "fmt" + "regexp" + "strconv" +) + +// Generate takes care of IP generation +func Generate(pattern string) []string { + re, _ := regexp.Compile(`\[(.+):(.+)\]`) + submatch := re.FindStringSubmatch(pattern) + if submatch == nil { + return []string{pattern} + } + + from, err := strconv.Atoi(submatch[1]) + if err != nil { + return []string{pattern} + } + to, err := strconv.Atoi(submatch[2]) + if err != nil { + return []string{pattern} + } + + template := re.ReplaceAllString(pattern, "%d") + + var result []string + for val := from; val <= to; val++ { + entry := fmt.Sprintf(template, val) + result = append(result, entry) + } + + return result +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/generator_test.go b/vendor/github.com/moby/moby/pkg/discovery/generator_test.go new file mode 100644 index 0000000..6281c46 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/generator_test.go @@ -0,0 +1,53 @@ +package discovery + +import ( + "github.com/go-check/check" +) + +func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { + ips := Generate("127.0.0.1") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1") +} + +func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { + ips := Generate("127.0.0.1:8080") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1:8080") +} + +func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { + ips := Generate("127.0.0.[1]") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.[1]") +} + +func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { + ips := Generate("127.0.0.[1:11]:2375") + c.Assert(len(ips), check.Equals, 11) + c.Assert(ips[0], check.Equals, "127.0.0.1:2375") + c.Assert(ips[1], check.Equals, "127.0.0.2:2375") + c.Assert(ips[2], check.Equals, "127.0.0.3:2375") + c.Assert(ips[3], check.Equals, "127.0.0.4:2375") + c.Assert(ips[4], check.Equals, "127.0.0.5:2375") + c.Assert(ips[5], check.Equals, "127.0.0.6:2375") + c.Assert(ips[6], check.Equals, "127.0.0.7:2375") + c.Assert(ips[7], check.Equals, "127.0.0.8:2375") + c.Assert(ips[8], check.Equals, "127.0.0.9:2375") + c.Assert(ips[9], check.Equals, "127.0.0.10:2375") + c.Assert(ips[10], check.Equals, "127.0.0.11:2375") +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { + malformedInput := "127.0.0.[x:11]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { + malformedInput := "127.0.0.[1:x]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/kv/kv.go b/vendor/github.com/moby/moby/pkg/discovery/kv/kv.go new file mode 100644 index 0000000..77eee7d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/kv/kv.go @@ -0,0 +1,192 @@ +package kv + +import ( + "fmt" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/store/consul" + "github.com/docker/libkv/store/etcd" + "github.com/docker/libkv/store/zookeeper" +) + +const ( + defaultDiscoveryPath = "docker/nodes" +) + +// Discovery is exported +type Discovery struct { + backend store.Backend + store store.Store + heartbeat time.Duration + ttl time.Duration + prefix string + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + // Register to libkv + zookeeper.Register() + consul.Register() + etcd.Register() + + // Register to internal discovery service + discovery.Register("zk", &Discovery{backend: store.ZK}) + discovery.Register("consul", &Discovery{backend: store.CONSUL}) + discovery.Register("etcd", &Discovery{backend: store.ETCD}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { + var ( + parts = strings.SplitN(uris, "/", 2) + addrs = strings.Split(parts[0], ",") + err error + ) + + // A custom prefix to the path can be optionally used. + if len(parts) == 2 { + s.prefix = parts[1] + } + + s.heartbeat = heartbeat + s.ttl = ttl + + // Use a custom path if specified in discovery options + dpath := defaultDiscoveryPath + if clusterOpts["kv.path"] != "" { + dpath = clusterOpts["kv.path"] + } + + s.path = path.Join(s.prefix, dpath) + + var config *store.Config + if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { + logrus.Info("Initializing discovery with TLS") + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }) + if err != nil { + return err + } + config = &store.Config{ + // Set ClientTLS to trigger https (bug in libkv/etcd) + ClientTLS: &store.ClientTLSConfig{ + CACertFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }, + // The actual TLS config that will be used + TLS: tlsConfig, + } + } else { + logrus.Info("Initializing discovery without TLS") + } + + // Creates a new store, will ignore options given + // if not supported by the chosen store + s.store, err = libkv.NewStore(s.backend, addrs, config) + return err +} + +// Watch the store until either there's a store error or we receive a stop request. +// Returns false if we shouldn't attempt watching the store anymore (stop request received). +func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { + for { + select { + case pairs := <-watchCh: + if pairs == nil { + return true + } + + logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) + + // Convert `KVPair` into `discovery.Entry`. + addrs := make([]string, len(pairs)) + for _, pair := range pairs { + addrs = append(addrs, string(pair.Value)) + } + + entries, err := discovery.CreateEntries(addrs) + if err != nil { + errCh <- err + } else { + discoveryCh <- entries + } + case <-stopCh: + // We were requested to stop watching. + return false + } + } +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + + go func() { + defer close(ch) + defer close(errCh) + + // Forever: Create a store watch, watch until we get an error and then try again. + // Will only stop if we receive a stopCh request. + for { + // Create the path to watch if it does not exist yet + exists, err := s.store.Exists(s.path) + if err != nil { + errCh <- err + } + if !exists { + if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { + errCh <- err + } + } + + // Set up a watch. + watchCh, err := s.store.WatchTree(s.path, stopCh) + if err != nil { + errCh <- err + } else { + if !s.watchOnce(stopCh, watchCh, ch, errCh) { + return + } + } + + // If we get here it means the store watch channel was closed. This + // is unexpected so let's retry later. + errCh <- fmt.Errorf("Unexpected watch error") + time.Sleep(s.heartbeat) + } + }() + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + opts := &store.WriteOptions{TTL: s.ttl} + return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) +} + +// Store returns the underlying store used by KV discovery. +func (s *Discovery) Store() store.Store { + return s.store +} + +// Prefix returns the store prefix +func (s *Discovery) Prefix() string { + return s.prefix +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go b/vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go new file mode 100644 index 0000000..dab3939 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go @@ -0,0 +1,324 @@ +package kv + +import ( + "errors" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/docker/docker/pkg/discovery" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (ds *DiscoverySuite) TestInitialize(c *check.C) { + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1"}, + } + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1", 0, 0, nil) + d.store = storeMock + + s := d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") + c.Assert(d.path, check.Equals, defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 3) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") + c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") + + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) +} + +// Extremely limited mock store so we can test initialization +type Mock struct { + // Endpoints passed to InitializeMock + Endpoints []string + + // Options passed to InitializeMock + Options *store.Config +} + +func NewMock(endpoints []string, options *store.Config) (store.Store, error) { + s := &Mock{} + s.Endpoints = endpoints + s.Options = options + return s, nil +} +func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { + return errors.New("Put not supported") +} +func (s *Mock) Get(key string) (*store.KVPair, error) { + return nil, errors.New("Get not supported") +} +func (s *Mock) Delete(key string) error { + return errors.New("Delete not supported") +} + +// Exists mock +func (s *Mock) Exists(key string) (bool, error) { + return false, errors.New("Exists not supported") +} + +// Watch mock +func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, errors.New("Watch not supported") +} + +// WatchTree mock +func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, errors.New("WatchTree not supported") +} + +// NewLock mock +func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, errors.New("NewLock not supported") +} + +// List mock +func (s *Mock) List(prefix string) ([]*store.KVPair, error) { + return nil, errors.New("List not supported") +} + +// DeleteTree mock +func (s *Mock) DeleteTree(prefix string) error { + return errors.New("DeleteTree not supported") +} + +// AtomicPut mock +func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { + return false, nil, errors.New("AtomicPut not supported") +} + +// AtomicDelete mock +func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return false, errors.New("AtomicDelete not supported") +} + +// Close mock +func (s *Mock) Close() { + return +} + +func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { + cert := `-----BEGIN CERTIFICATE----- +MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT +B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD +VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC +O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds ++J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q +V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb +UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 +Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT +V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ +BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j +BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz +7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI +xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M +ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY +8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn +t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX +FpTxDmJHEV4bzUzh +-----END CERTIFICATE----- +` + key := `-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 ++zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR +SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr +pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe +rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj +xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj +i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx +qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO +1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 +5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony +MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 +ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP +L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N +XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT +Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B +LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU +t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ +QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV +xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj +xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc +qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa +V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV +PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk +dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL +BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= +-----END RSA PRIVATE KEY----- +` + certFile, err := ioutil.TempFile("", "cert") + c.Assert(err, check.IsNil) + defer os.Remove(certFile.Name()) + certFile.Write([]byte(cert)) + certFile.Close() + keyFile, err := ioutil.TempFile("", "key") + c.Assert(err, check.IsNil) + defer os.Remove(keyFile.Name()) + keyFile.Write([]byte(key)) + keyFile.Close() + + libkv.AddStore("mock", NewMock) + d := &Discovery{backend: "mock"} + err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ + "kv.cacertfile": certFile.Name(), + "kv.certfile": certFile.Name(), + "kv.keyfile": keyFile.Name(), + }) + c.Assert(err, check.IsNil) + s := d.store.(*Mock) + c.Assert(s.Options.TLS, check.NotNil) + c.Assert(s.Options.TLS.RootCAs, check.NotNil) + c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) +} + +func (ds *DiscoverySuite) TestWatch(c *check.C) { + mockCh := make(chan []*store.KVPair) + + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + mockKVChan: mockCh, + } + + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + kvs := []*store.KVPair{ + {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, + {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, + } + + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // It should fire an error since the first WatchTree call failed. + c.Assert(<-errCh, check.ErrorMatches, "test error") + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Push the entries into the store channel and make sure discovery emits. + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + close(mockCh) + // Give it enough time to call WatchTree. + time.Sleep(3 * time.Second) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} + +// FakeStore implements store.Store methods. It mocks all store +// function in a simple, naive way. +type FakeStore struct { + Endpoints []string + Options *store.Config + mockKVChan <-chan []*store.KVPair + + watchTreeCallCount int +} + +func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { + return nil +} + +func (s *FakeStore) Get(key string) (*store.KVPair, error) { + return nil, nil +} + +func (s *FakeStore) Delete(key string) error { + return nil +} + +func (s *FakeStore) Exists(key string) (bool, error) { + return true, nil +} + +func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, nil +} + +// WatchTree will fail the first time, and return the mockKVchan afterwards. +// This is the behavior we need for testing.. If we need 'moar', should update this. +func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + if s.watchTreeCallCount == 0 { + s.watchTreeCallCount = 1 + return nil, errors.New("test error") + } + // First calls error + return s.mockKVChan, nil +} + +func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, nil +} + +func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { + return []*store.KVPair{}, nil +} + +func (s *FakeStore) DeleteTree(directory string) error { + return nil +} + +func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + return true, nil, nil +} + +func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return true, nil +} + +func (s *FakeStore) Close() { +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/memory/memory.go b/vendor/github.com/moby/moby/pkg/discovery/memory/memory.go new file mode 100644 index 0000000..ba8b1f5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/memory/memory.go @@ -0,0 +1,93 @@ +package memory + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery implements a discovery backend that keeps +// data in memory. +type Discovery struct { + heartbeat time.Duration + values []string + mu sync.Mutex +} + +func init() { + Init() +} + +// Init registers the memory backend on demand. +func Init() { + discovery.Register("memory", &Discovery{}) +} + +// Initialize sets the heartbeat for the memory backend. +func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { + s.heartbeat = heartbeat + s.values = make([]string, 0) + return nil +} + +// Watch sends periodic discovery updates to a channel. +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + var currentEntries discovery.Entries + var err error + + s.mu.Lock() + if len(s.values) > 0 { + currentEntries, err = discovery.CreateEntries(s.values) + } + s.mu.Unlock() + + if err != nil { + errCh <- err + } else if currentEntries != nil { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + s.mu.Lock() + newEntries, err := discovery.CreateEntries(s.values) + s.mu.Unlock() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register adds a new address to the discovery. +func (s *Discovery) Register(addr string) error { + s.mu.Lock() + s.values = append(s.values, addr) + s.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go b/vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go new file mode 100644 index 0000000..c2da0a0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go @@ -0,0 +1,48 @@ +package memory + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type discoverySuite struct{} + +var _ = check.Suite(&discoverySuite{}) + +func (s *discoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("foo", 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + } + + c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + expected = discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go new file mode 100644 index 0000000..c0e3c07 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go @@ -0,0 +1,54 @@ +package nodes + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + entries discovery.Entries +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("nodes", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { + for _, input := range strings.Split(uris, ",") { + for _, ip := range discovery.Generate(input) { + entry, err := discovery.NewEntry(ip) + if err != nil { + return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) + } + s.entries = append(s.entries, entry) + } + } + + return nil +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + go func() { + defer close(ch) + ch <- s.entries + <-stopCh + }() + return ch, nil +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go new file mode 100644 index 0000000..e26568c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go @@ -0,0 +1,51 @@ +package nodes + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 2) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") +} + +func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 5) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") + c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") + c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") + c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + ch, _ := d.Watch(nil) + c.Assert(expected.Equals(<-ch), check.Equals, true) +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + d := &Discovery{} + c.Assert(d.Register("0.0.0.0"), check.NotNil) +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/filenotify.go b/vendor/github.com/moby/moby/pkg/filenotify/filenotify.go new file mode 100644 index 0000000..7a81cbd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "github.com/fsnotify/fsnotify" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go b/vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go new file mode 100644 index 0000000..5d08a99 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "github.com/fsnotify/fsnotify" + +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifer interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// Events returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// Errors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/poller.go b/vendor/github.com/moby/moby/pkg/filenotify/poller.go new file mode 100644 index 0000000..dc5ccd0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/fsnotify/fsnotify" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchPoller is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("poller does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed == true { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed == true { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/poller_test.go b/vendor/github.com/moby/moby/pkg/filenotify/poller_test.go new file mode 100644 index 0000000..b4c7825 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/poller_test.go @@ -0,0 +1,119 @@ +package filenotify + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "testing" + "time" + + "github.com/fsnotify/fsnotify" +) + +func TestPollerAddRemove(t *testing.T) { + w := NewPollingWatcher() + + if err := w.Add("no-such-file"); err == nil { + t.Fatal("should have gotten error when adding a non-existent file") + } + if err := w.Remove("no-such-file"); err == nil { + t.Fatal("should have gotten error when removing non-existent watch") + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + if err := w.Remove(f.Name()); err != nil { + t.Fatal(err) + } +} + +func TestPollerEvent(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No chmod on Windows") + } + w := NewPollingWatcher() + + f, err := ioutil.TempFile("", "test-poller") + if err != nil { + t.Fatal("error creating temp file") + } + defer os.RemoveAll(f.Name()) + f.Close() + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + select { + case <-w.Events(): + t.Fatal("got event before anything happened") + case <-w.Errors(): + t.Fatal("got error before anything happened") + default: + } + + if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Write); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(f.Name(), 600); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Chmod); err != nil { + t.Fatal(err) + } + + if err := os.Remove(f.Name()); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Remove); err != nil { + t.Fatal(err) + } +} + +func TestPollerClose(t *testing.T) { + w := NewPollingWatcher() + if err := w.Close(); err != nil { + t.Fatal(err) + } + // test double-close + if err := w.Close(); err != nil { + t.Fatal(err) + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + if err := w.Add(f.Name()); err == nil { + t.Fatal("should have gotten error adding watch for closed watcher") + } +} + +func assertEvent(w FileWatcher, eType fsnotify.Op) error { + var err error + select { + case e := <-w.Events(): + if e.Op != eType { + err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e) + } + case e := <-w.Errors(): + err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) + case <-time.After(watchWaitTime * 3): + err = fmt.Errorf("timeout waiting for event %v", eType) + } + return err +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils.go new file mode 100644 index 0000000..c63ae75 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils.go @@ -0,0 +1,283 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// exclusion returns true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty returns true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on its own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := regexpMatch(pattern, file) + if err != nil { + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), + strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +// +// As per the comment in golangs filepath.Match, on Windows, escaping +// is disabled. Instead, '\\' is treated as path separator. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 0000000..ccd648f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 0000000..0f2cb7a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go new file mode 100644 index 0000000..6df1be8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go @@ -0,0 +1,585 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Test lots of variants of patterns & strings +func TestMatches(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + tests := []struct { + pattern string + text string + pass bool + }{ + {"**", "file", true}, + {"**", "file/", true}, + {"**/", "file", true}, // weird one + {"**/", "file/", true}, + {"**", "/", true}, + {"**/", "/", true}, + {"**", "dir/file", true}, + {"**/", "dir/file", false}, + {"**", "dir/file/", true}, + {"**/", "dir/file/", true}, + {"**/**", "dir/file", true}, + {"**/**", "dir/file/", true}, + {"dir/**", "dir/file", true}, + {"dir/**", "dir/file/", true}, + {"dir/**", "dir/dir2/file", true}, + {"dir/**", "dir/dir2/file/", true}, + {"**/dir2/*", "dir/dir2/file", true}, + {"**/dir2/*", "dir/dir2/file/", false}, + {"**/dir2/**", "dir/dir2/dir3/file", true}, + {"**/dir2/**", "dir/dir2/dir3/file/", true}, + {"**file", "file", true}, + {"**file", "dir/file", true}, + {"**/file", "dir/file", true}, + {"**file", "dir/dir/file", true}, + {"**/file", "dir/dir/file", true}, + {"**/file*", "dir/dir/file", true}, + {"**/file*", "dir/dir/file.txt", true}, + {"**/file*txt", "dir/dir/file.txt", true}, + {"**/file*.txt", "dir/dir/file.txt", true}, + {"**/file*.txt*", "dir/dir/file.txt", true}, + {"**/**/*.txt", "dir/dir/file.txt", true}, + {"**/**/*.txt2", "dir/dir/file.txt", false}, + {"**/*.txt", "file.txt", true}, + {"**/**/*.txt", "file.txt", true}, + {"a**/*.txt", "a/file.txt", true}, + {"a**/*.txt", "a/dir/file.txt", true}, + {"a**/*.txt", "a/dir/dir/file.txt", true}, + {"a/*.txt", "a/dir/file.txt", false}, + {"a/*.txt", "a/file.txt", true}, + {"a/*.txt**", "a/file.txt", true}, + {"a[b-d]e", "ae", false}, + {"a[b-d]e", "ace", true}, + {"a[b-d]e", "aae", false}, + {"a[^b-d]e", "aze", true}, + {".*", ".foo", true}, + {".*", "foo", false}, + {"abc.def", "abcdef", false}, + {"abc.def", "abc.def", true}, + {"abc.def", "abcZdef", false}, + {"abc?def", "abcZdef", true}, + {"abc?def", "abcdef", false}, + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + {"a\\\\", "a\\", true}, + {"**/foo/bar", "foo/bar", true}, + {"**/foo/bar", "dir/foo/bar", true}, + {"**/foo/bar", "dir/dir2/foo/bar", true}, + {"abc/**", "abc", false}, + {"abc/**", "abc/def", true}, + {"abc/**", "abc/def/ghi", true}, + } + + for _, test := range tests { + res, _ := regexpMatch(test.pattern, test.text) + if res != test.pass { + t.Fatalf("Failed: %v - res:%v", test, res) + } + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} + +// These matchTests are stolen from go's filepath Match tests. +type matchTest struct { + pattern, s string + match bool + err error +} + +var matchTests = []matchTest{ + {"abc", "abc", true, nil}, + {"*", "abc", true, nil}, + {"*c", "abc", true, nil}, + {"a*", "a", true, nil}, + {"a*", "abc", true, nil}, + {"a*", "ab/c", false, nil}, + {"a*/b", "abc/b", true, nil}, + {"a*/b", "a/c/b", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, + {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, + {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, + {"ab[c]", "abc", true, nil}, + {"ab[b-d]", "abc", true, nil}, + {"ab[e-g]", "abc", false, nil}, + {"ab[^c]", "abc", false, nil}, + {"ab[^b-d]", "abc", false, nil}, + {"ab[^e-g]", "abc", true, nil}, + {"a\\*b", "a*b", true, nil}, + {"a\\*b", "ab", false, nil}, + {"a?b", "a☺b", true, nil}, + {"a[^a]b", "a☺b", true, nil}, + {"a???b", "a☺b", false, nil}, + {"a[^a][^a][^a]b", "a☺b", false, nil}, + {"[a-ζ]*", "α", true, nil}, + {"*[a-ζ]", "A", false, nil}, + {"a?b", "a/b", false, nil}, + {"a*b", "a/b", false, nil}, + {"[\\]a]", "]", true, nil}, + {"[\\-]", "-", true, nil}, + {"[x\\-]", "x", true, nil}, + {"[x\\-]", "-", true, nil}, + {"[x\\-]", "z", false, nil}, + {"[\\-x]", "x", true, nil}, + {"[\\-x]", "-", true, nil}, + {"[\\-x]", "a", false, nil}, + {"[]a]", "]", false, filepath.ErrBadPattern}, + {"[-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "x", false, filepath.ErrBadPattern}, + {"[x-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "z", false, filepath.ErrBadPattern}, + {"[-x]", "x", false, filepath.ErrBadPattern}, + {"[-x]", "-", false, filepath.ErrBadPattern}, + {"[-x]", "a", false, filepath.ErrBadPattern}, + {"\\", "a", false, filepath.ErrBadPattern}, + {"[a-b-c]", "a", false, filepath.ErrBadPattern}, + {"[", "a", false, filepath.ErrBadPattern}, + {"[^", "a", false, filepath.ErrBadPattern}, + {"[^bc", "a", false, filepath.ErrBadPattern}, + {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong + {"a[", "ab", false, filepath.ErrBadPattern}, + {"*x", "xxx", true, nil}, +} + +func errp(e error) string { + if e == nil { + return "" + } + return e.Error() +} + +// TestMatch test's our version of filepath.Match, called regexpMatch. +func TestMatch(t *testing.T) { + for _, tt := range matchTests { + pattern := tt.pattern + s := tt.s + if runtime.GOOS == "windows" { + if strings.Index(pattern, "\\") >= 0 { + // no escape allowed on windows. + continue + } + pattern = filepath.Clean(pattern) + s = filepath.Clean(s) + } + ok, err := regexpMatch(pattern, s) + if ok != tt.match || err != tt.err { + t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go new file mode 100644 index 0000000..d5c3abf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go new file mode 100644 index 0000000..5ec21ca --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go new file mode 100644 index 0000000..9fd054e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,89 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + if err = dummyFile.Close(); err != nil { + return name, err + } + return name, nil +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go new file mode 100644 index 0000000..4a64823 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go @@ -0,0 +1,91 @@ +// +build linux + +package fsutils + +import ( + "io/ioutil" + "os" + "os/exec" + "syscall" + "testing" +) + +func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg ...string) { + // check whether mkfs is installed + if _, err := exec.LookPath(mkfsCommand); err != nil { + t.Skipf("%s not installed: %v", mkfsCommand, err) + } + + // create a sparse image + imageSize := int64(32 * 1024 * 1024) + imageFile, err := ioutil.TempFile("", "fsutils-image") + if err != nil { + t.Fatal(err) + } + imageFileName := imageFile.Name() + defer os.Remove(imageFileName) + if _, err = imageFile.Seek(imageSize-1, 0); err != nil { + t.Fatal(err) + } + if _, err = imageFile.Write([]byte{0}); err != nil { + t.Fatal(err) + } + if err = imageFile.Close(); err != nil { + t.Fatal(err) + } + + // create a mountpoint + mountpoint, err := ioutil.TempDir("", "fsutils-mountpoint") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(mountpoint) + + // format the image + args := append(mkfsArg, imageFileName) + t.Logf("Executing `%s %v`", mkfsCommand, args) + out, err := exec.Command(mkfsCommand, args...).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Fatal(err) + } + + // loopback-mount the image. + // for ease of setting up loopback device, we use os/exec rather than syscall.Mount + out, err = exec.Command("mount", "-o", "loop", imageFileName, mountpoint).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Skip("skipping the test because mount failed") + } + defer func() { + if err := syscall.Unmount(mountpoint, 0); err != nil { + t.Fatal(err) + } + }() + + // check whether it supports d_type + result, err := SupportsDType(mountpoint) + if err != nil { + t.Fatal(err) + } + t.Logf("Supports d_type: %v", result) + if result != expected { + t.Fatalf("expected %v, got %v", expected, result) + } +} + +func TestSupportsDTypeWithFType0XFS(t *testing.T) { + testSupportsDType(t, false, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=0") +} + +func TestSupportsDTypeWithFType1XFS(t *testing.T) { + testSupportsDType(t, true, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=1") +} + +func TestSupportsDTypeWithExt4(t *testing.T) { + testSupportsDType(t, true, "mkfs.ext4") +} diff --git a/vendor/github.com/moby/moby/pkg/gitutils/gitutils.go b/vendor/github.com/moby/moby/pkg/gitutils/gitutils.go new file mode 100644 index 0000000..ded091f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/gitutils/gitutils.go @@ -0,0 +1,100 @@ +package gitutils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" +) + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + if !urlutil.IsGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + u, err := url.Parse(remoteURL) + if err != nil { + return "", err + } + + fragment := u.Fragment + clone := cloneArgs(u, root) + + if output, err := git(clone...); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + + return checkoutGit(fragment, root) +} + +func cloneArgs(remoteURL *url.URL, root string) []string { + args := []string{"clone", "--recursive"} + shallow := len(remoteURL.Fragment) == 0 + + if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + if remoteURL.Fragment != "" { + remoteURL.Fragment = "" + } + + return append(args, remoteURL.String(), root) +} + +func checkoutGit(fragment, root string) (string, error) { + refAndDir := strings.SplitN(fragment, ":", 2) + + if len(refAndDir[0]) != 0 { + if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) + if err != nil { + return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} diff --git a/vendor/github.com/moby/moby/pkg/gitutils/gitutils_test.go b/vendor/github.com/moby/moby/pkg/gitutils/gitutils_test.go new file mode 100644 index 0000000..d197058 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/gitutils/gitutils_test.go @@ -0,0 +1,220 @@ +package gitutils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" +) + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsGit(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsStripFragment(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker#test") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func gitGetConfig(name string) string { + b, err := git([]string{"config", "--get", name}...) + if err != nil { + // since we are interested in empty or non empty string, + // we can safely ignore the err here. + return "" + } + return strings.TrimSpace(string(b)) +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + autocrlf := gitGetConfig("core.autocrlf") + if !(autocrlf == "true" || autocrlf == "false" || + autocrlf == "input" || autocrlf == "") { + t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) + } + eol := "\n" + if autocrlf == "true" { + eol = "\r\n" + } + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + if err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { + t.Fatal(err) + } + + subDir := filepath.Join(gitDir, "subdir") + if err = os.Mkdir(subDir, 0755); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if runtime.GOOS != "windows" { + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { + t.Fatal(err) + } + + type singleCase struct { + frag string + exp string + fail bool + } + + cases := []singleCase{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, + } + + if runtime.GOOS != "windows" { + // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below + // git --work-tree .\repo --git-dir .\repo\.git add -A + // error: readlink("absolutelink"): Function not implemented + // error: unable to index file absolutelink + // fatal: adding files failed + cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + } + + for _, c := range cases { + r, err := checkoutGit(c.frag, gitDir) + + fail := err != nil + if fail != c.fail { + t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) + } + if c.fail { + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + if err != nil { + t.Fatal(err) + } + + if string(b) != c.exp { + t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/graphdb/conn_sqlite3_linux.go b/vendor/github.com/moby/moby/pkg/graphdb/conn_sqlite3_linux.go new file mode 100644 index 0000000..8e61ff3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/graphdb/conn_sqlite3_linux.go @@ -0,0 +1,19 @@ +// +build cgo + +package graphdb + +import ( + "database/sql" + + _ "github.com/mattn/go-sqlite3" // registers sqlite +) + +// NewSqliteConn opens a connection to a sqlite +// database. +func NewSqliteConn(root string) (*Database, error) { + conn, err := sql.Open("sqlite3", root) + if err != nil { + return nil, err + } + return NewDatabase(conn) +} diff --git a/vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux.go b/vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux.go new file mode 100644 index 0000000..eca433f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux.go @@ -0,0 +1,551 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "path" + "strings" + "sync" +) + +const ( + createEntityTable = ` + CREATE TABLE IF NOT EXISTS entity ( + id text NOT NULL PRIMARY KEY + );` + + createEdgeTable = ` + CREATE TABLE IF NOT EXISTS edge ( + "entity_id" text NOT NULL, + "parent_id" text NULL, + "name" text NOT NULL, + CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), + CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") + ); + ` + + createEdgeIndices = ` + CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); + ` +) + +// Entity with a unique id. +type Entity struct { + id string +} + +// An Edge connects two entities together. +type Edge struct { + EntityID string + Name string + ParentID string +} + +// Entities stores the list of entities. +type Entities map[string]*Entity + +// Edges stores the relationships between entities. +type Edges []*Edge + +// WalkFunc is a function invoked to process an individual entity. +type WalkFunc func(fullPath string, entity *Entity) error + +// Database is a graph database for storing entities and their relationships. +type Database struct { + conn *sql.DB + mux sync.RWMutex +} + +// IsNonUniqueNameError processes the error to check if it's caused by +// a constraint violation. +// This is necessary because the error isn't the same across various +// sqlite versions. +func IsNonUniqueNameError(err error) bool { + str := err.Error() + // sqlite 3.7.17-1ubuntu1 returns: + // Set failure: Abort due to constraint violation: columns parent_id, name are not unique + if strings.HasSuffix(str, "name are not unique") { + return true + } + // sqlite-3.8.3-1.fc20 returns: + // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name + if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { + return true + } + // sqlite-3.6.20-1.el6 returns: + // Set failure: Abort due to constraint violation: constraint failed + if strings.HasSuffix(str, "constraint failed") { + return true + } + return false +} + +// NewDatabase creates a new graph database initialized with a root entity. +func NewDatabase(conn *sql.DB) (*Database, error) { + if conn == nil { + return nil, fmt.Errorf("Database connection cannot be nil") + } + db := &Database{conn: conn} + + // Create root entities + tx, err := conn.Begin() + if err != nil { + return nil, err + } + + if _, err := tx.Exec(createEntityTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeIndices); err != nil { + return nil, err + } + + if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + + return db, nil +} + +// Close the underlying connection to the database. +func (db *Database) Close() error { + return db.conn.Close() +} + +// Set the entity id for a given path. +func (db *Database) Set(fullPath, id string) (*Entity, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return nil, err + } + + var entityID string + if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { + if err == sql.ErrNoRows { + if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { + tx.Rollback() + return nil, err + } + } else { + tx.Rollback() + return nil, err + } + } + e := &Entity{id} + + parentPath, name := splitPath(fullPath) + if err := db.setEdge(parentPath, name, e, tx); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + return e, nil +} + +// Exists returns true if a name already exists in the database. +func (db *Database) Exists(name string) bool { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return false + } + return e != nil +} + +func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { + parent, err := db.get(parentPath) + if err != nil { + return err + } + if parent.id == e.id { + return fmt.Errorf("Cannot set self as child") + } + + if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { + return err + } + return nil +} + +// RootEntity returns the root "/" entity for the database. +func (db *Database) RootEntity() *Entity { + return &Entity{ + id: "0", + } +} + +// Get returns the entity for a given path. +func (db *Database) Get(name string) *Entity { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil + } + return e +} + +func (db *Database) get(name string) (*Entity, error) { + e := db.RootEntity() + // We always know the root name so return it if + // it is requested + if name == "/" { + return e, nil + } + + parts := split(name) + for i := 1; i < len(parts); i++ { + p := parts[i] + if p == "" { + continue + } + + next := db.child(e, p) + if next == nil { + return nil, fmt.Errorf("Cannot find child for %s", name) + } + e = next + } + return e, nil + +} + +// List all entities by from the name. +// The key will be the full path of the entity. +func (db *Database) List(name string, depth int) Entities { + db.mux.RLock() + defer db.mux.RUnlock() + + out := Entities{} + e, err := db.get(name) + if err != nil { + return out + } + + children, err := db.children(e, name, depth, nil) + if err != nil { + return out + } + + for _, c := range children { + out[c.FullPath] = c.Entity + } + return out +} + +// Walk through the child graph of an entity, calling walkFunc for each child entity. +// It is safe for walkFunc to call graph functions. +func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { + children, err := db.Children(name, depth) + if err != nil { + return err + } + + // Note: the database lock must not be held while calling walkFunc + for _, c := range children { + if err := walkFunc(c.FullPath, c.Entity); err != nil { + return err + } + } + return nil +} + +// Children returns the children of the specified entity. +func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + + return db.children(e, name, depth, nil) +} + +// Parents returns the parents of a specified entity. +func (db *Database) Parents(name string) ([]string, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + return db.parents(e) +} + +// Refs returns the reference count for a specified id. +func (db *Database) Refs(id string) int { + db.mux.RLock() + defer db.mux.RUnlock() + + var count int + if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { + return 0 + } + return count +} + +// RefPaths returns all the id's path references. +func (db *Database) RefPaths(id string) Edges { + db.mux.RLock() + defer db.mux.RUnlock() + + refs := Edges{} + + rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) + if err != nil { + return refs + } + defer rows.Close() + + for rows.Next() { + var name string + var parentID string + if err := rows.Scan(&name, &parentID); err != nil { + return refs + } + refs = append(refs, &Edge{ + EntityID: id, + Name: name, + ParentID: parentID, + }) + } + return refs +} + +// Delete the reference to an entity at a given path. +func (db *Database) Delete(name string) error { + db.mux.Lock() + defer db.mux.Unlock() + + if name == "/" { + return fmt.Errorf("Cannot delete root entity") + } + + parentPath, n := splitPath(name) + parent, err := db.get(parentPath) + if err != nil { + return err + } + + if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { + return err + } + return nil +} + +// Purge removes the entity with the specified id +// Walk the graph to make sure all references to the entity +// are removed and return the number of references removed +func (db *Database) Purge(id string) (int, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return -1, err + } + + // Delete all edges + rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + changes, err := rows.RowsAffected() + if err != nil { + return -1, err + } + + // Clear who's using this id as parent + refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + refsCount, err := refs.RowsAffected() + if err != nil { + return -1, err + } + + // Delete entity + if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { + tx.Rollback() + return -1, err + } + + if err := tx.Commit(); err != nil { + return -1, err + } + + return int(changes + refsCount), nil +} + +// Rename an edge for a given path +func (db *Database) Rename(currentName, newName string) error { + db.mux.Lock() + defer db.mux.Unlock() + + parentPath, name := splitPath(currentName) + newParentPath, newEdgeName := splitPath(newName) + + if parentPath != newParentPath { + return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) + } + + parent, err := db.get(parentPath) + if err != nil { + return err + } + + rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) + if err != nil { + return err + } + i, err := rows.RowsAffected() + if err != nil { + return err + } + if i == 0 { + return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) + } + return nil +} + +// WalkMeta stores the walk metadata. +type WalkMeta struct { + Parent *Entity + Entity *Entity + FullPath string + Edge *Edge +} + +func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { + if e == nil { + return entities, nil + } + + rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var entityID, entityName string + if err := rows.Scan(&entityID, &entityName); err != nil { + return nil, err + } + child := &Entity{entityID} + edge := &Edge{ + ParentID: e.id, + Name: entityName, + EntityID: child.id, + } + + meta := WalkMeta{ + Parent: e, + Entity: child, + FullPath: path.Join(name, edge.Name), + Edge: edge, + } + + entities = append(entities, meta) + + if depth != 0 { + nDepth := depth + if depth != -1 { + nDepth-- + } + entities, err = db.children(child, meta.FullPath, nDepth, entities) + if err != nil { + return nil, err + } + } + } + + return entities, nil +} + +func (db *Database) parents(e *Entity) (parents []string, err error) { + if e == nil { + return parents, nil + } + + rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var parentID string + if err := rows.Scan(&parentID); err != nil { + return nil, err + } + parents = append(parents, parentID) + } + + return parents, nil +} + +// Return the entity based on the parent path and name. +func (db *Database) child(parent *Entity, name string) *Entity { + var id string + if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { + return nil + } + return &Entity{id} +} + +// ID returns the id used to reference this entity. +func (e *Entity) ID() string { + return e.id +} + +// Paths returns the paths sorted by depth. +func (e Entities) Paths() []string { + out := make([]string, len(e)) + var i int + for k := range e { + out[i] = k + i++ + } + sortByDepth(out) + + return out +} diff --git a/vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux_test.go b/vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux_test.go new file mode 100644 index 0000000..f0fb074 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/graphdb/graphdb_linux_test.go @@ -0,0 +1,721 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "os" + "path" + "runtime" + "strconv" + "testing" + + _ "github.com/mattn/go-sqlite3" +) + +func newTestDb(t *testing.T) (*Database, string) { + p := path.Join(os.TempDir(), "sqlite.db") + conn, err := sql.Open("sqlite3", p) + db, err := NewDatabase(conn) + if err != nil { + t.Fatal(err) + } + return db, p +} + +func destroyTestDb(dbPath string) { + os.Remove(dbPath) +} + +func TestNewDatabase(t *testing.T) { + db, dbpath := newTestDb(t) + if db == nil { + t.Fatal("Database should not be nil") + } + db.Close() + defer destroyTestDb(dbpath) +} + +func TestCreateRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + root := db.RootEntity() + if root == nil { + t.Fatal("Root entity should not be nil") + } +} + +func TestGetRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + e := db.Get("/") + if e == nil { + t.Fatal("Entity should not be nil") + } + if e.ID() != "0" { + t.Fatalf("Entity id should be 0, got %s", e.ID()) + } +} + +func TestSetEntityWithDifferentName(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/test", "1") + if _, err := db.Set("/other", "1"); err != nil { + t.Fatal(err) + } +} + +func TestSetDuplicateEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/foo", "42"); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/foo", "43"); err == nil { + t.Fatalf("Creating an entry with a duplicate path did not cause an error") + } +} + +func TestCreateChild(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/db", "1") + if err != nil { + t.Fatal(err) + } + if child == nil { + t.Fatal("Child should not be nil") + } + if child.ID() != "1" { + t.Fail() + } +} + +func TestParents(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + + for i := 6; i < 11; i++ { + a := strconv.Itoa(i) + p := strconv.Itoa(i - 5) + + key := fmt.Sprintf("/%s/%s", p, a) + + if _, err := db.Set(key, a); err != nil { + t.Fatal(err) + } + + parents, err := db.Parents(key) + if err != nil { + t.Fatal(err) + } + + if len(parents) != 1 { + t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) + } + + if parents[0] != p { + t.Fatalf("ID %s received, %s expected", parents[0], p) + } + } +} + +func TestChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + str := "/" + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + + str = "/" + for i := 10; i < 30; i++ { // 20 entities + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + entries, err := db.Children("/", 5) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 11 { + t.Fatalf("Expect 11 entries for / got %d", len(entries)) + } + + entries, err = db.Children("/", 20) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 25 { + t.Fatalf("Expect 25 entries for / got %d", len(entries)) + } +} + +func TestListAllRootChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + entries := db.List("/", -1) + if len(entries) != 5 { + t.Fatalf("Expect 5 entries for / got %d", len(entries)) + } +} + +func TestListAllSubChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + entries := db.List("/webapp", 1) + if len(entries) != 3 { + t.Fatalf("Expect 3 entries for / got %d", len(entries)) + } + + entries = db.List("/webapp", 0) + if len(entries) != 2 { + t.Fatalf("Expect 2 entries for / got %d", len(entries)) + } +} + +func TestAddSelfAsChild(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/test", "1") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/test/other", child.ID()); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestAddChildToNonExistentRoot(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestWalkAll(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/db/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Walk("/", func(p string, e *Entity) error { + t.Logf("Path: %s Entity: %s", p, e.ID()) + return nil + }, -1); err != nil { + t.Fatal(err) + } +} + +func TestGetEntityByPath(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + entity := db.Get("/webapp/db/logs") + if entity == nil { + t.Fatal("Entity should not be nil") + } + if entity.ID() != "4" { + t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) + } +} + +func TestEnitiesPaths(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + out := db.List("/", -1) + for _, p := range out.Paths() { + t.Log(p) + } +} + +func TestDeleteRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if err := db.Delete("/"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestDeleteEntity(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Delete("/webapp/sentry"); err != nil { + t.Fatal(err) + } + entity := db.Get("/webapp/sentry") + if entity != nil { + t.Fatal("Entity /webapp/sentry should be nil") + } +} + +func TestCountRefs(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + if db.Refs("2") != 2 { + t.Fatal("Expect reference count to be 2") + } +} + +func TestPurgeId(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expect reference count to be 1, got %d", c) + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatalf("Expected 2 references to be removed, got %d", count) + } +} + +// Regression test https://github.com/docker/docker/issues/12334 +func TestPurgeIdRefPaths(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + db.Set("/db", "2") + + db.Set("/db/webapp", "1") + + if c := db.Refs("1"); c != 2 { + t.Fatalf("Expected 2 reference for webapp, got %d", c) + } + if c := db.Refs("2"); c != 1 { + t.Fatalf("Expected 1 reference for db, got %d", c) + } + + if rp := db.RefPaths("2"); len(rp) != 1 { + t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) + } + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + + if count != 2 { + t.Fatalf("Expected 2 rows to be removed, got %d", count) + } + + if c := db.Refs("2"); c != 0 { + t.Fatalf("Expected 0 reference for db, got %d", c) + } + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expected 1 reference for webapp, got %d", c) + } +} + +func TestRename(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + if db.Get("/webapp/db") == nil { + t.Fatal("Cannot find entity at path /webapp/db") + } + + if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { + t.Fatal(err) + } + if db.Get("/webapp/db") != nil { + t.Fatal("Entity should not exist at /webapp/db") + } + if db.Get("/webapp/newdb") == nil { + t.Fatal("Cannot find entity at path /webapp/newdb") + } + +} + +func TestCreateMultipleNames(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/db", "1") + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + db.Walk("/", func(p string, e *Entity) error { + t.Logf("%s\n", p) + return nil + }, -1) +} + +func TestRefPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + refs := db.RefPaths("2") + if len(refs) != 2 { + t.Fatalf("Expected reference count to be 2, got %d", len(refs)) + } +} + +func TestExistsTrue(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/testing", "1") + + if !db.Exists("/testing") { + t.Fatalf("/tesing should exist") + } +} + +func TestExistsFalse(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/toerhe", "1") + + if db.Exists("/testing") { + t.Fatalf("/tesing should not exist") + } + +} + +func TestGetNameWithTrailingSlash(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/todo", "1") + + e := db.Get("/todo/") + if e == nil { + t.Fatalf("Entity should not be nil") + } +} + +func TestConcurrentWrites(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + errs := make(chan error, 2) + + save := func(name string, id string) { + if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { + errs <- err + } + errs <- nil + } + purge := func(id string) { + if _, err := db.Purge(id); err != nil { + errs <- err + } + errs <- nil + } + + save("/1", "1") + + go purge("1") + go save("/2", "2") + + any := false + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + any = true + t.Log(err) + } + } + if any { + t.Fail() + } +} diff --git a/vendor/github.com/moby/moby/pkg/graphdb/sort_linux.go b/vendor/github.com/moby/moby/pkg/graphdb/sort_linux.go new file mode 100644 index 0000000..c07df07 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/graphdb/sort_linux.go @@ -0,0 +1,27 @@ +package graphdb + +import "sort" + +type pathSorter struct { + paths []string + by func(i, j string) bool +} + +func sortByDepth(paths []string) { + s := &pathSorter{paths, func(i, j string) bool { + return PathDepth(i) > PathDepth(j) + }} + sort.Sort(s) +} + +func (s *pathSorter) Len() int { + return len(s.paths) +} + +func (s *pathSorter) Swap(i, j int) { + s.paths[i], s.paths[j] = s.paths[j], s.paths[i] +} + +func (s *pathSorter) Less(i, j int) bool { + return s.by(s.paths[i], s.paths[j]) +} diff --git a/vendor/github.com/moby/moby/pkg/graphdb/sort_linux_test.go b/vendor/github.com/moby/moby/pkg/graphdb/sort_linux_test.go new file mode 100644 index 0000000..ddf2266 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/graphdb/sort_linux_test.go @@ -0,0 +1,29 @@ +package graphdb + +import ( + "testing" +) + +func TestSort(t *testing.T) { + paths := []string{ + "/", + "/myreallylongname", + "/app/db", + } + + sortByDepth(paths) + + if len(paths) != 3 { + t.Fatalf("Expected 3 parts got %d", len(paths)) + } + + if paths[0] != "/app/db" { + t.Fatalf("Expected /app/db got %s", paths[0]) + } + if paths[1] != "/myreallylongname" { + t.Fatalf("Expected /myreallylongname got %s", paths[1]) + } + if paths[2] != "/" { + t.Fatalf("Expected / got %s", paths[2]) + } +} diff --git a/vendor/github.com/moby/moby/pkg/graphdb/unsupported.go b/vendor/github.com/moby/moby/pkg/graphdb/unsupported.go new file mode 100644 index 0000000..2b8ba71 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/graphdb/unsupported.go @@ -0,0 +1,3 @@ +// +build !cgo !linux + +package graphdb diff --git a/vendor/github.com/moby/moby/pkg/graphdb/utils_linux.go b/vendor/github.com/moby/moby/pkg/graphdb/utils_linux.go new file mode 100644 index 0000000..9edd79c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/graphdb/utils_linux.go @@ -0,0 +1,32 @@ +package graphdb + +import ( + "path" + "strings" +) + +// Split p on / +func split(p string) []string { + return strings.Split(p, "/") +} + +// PathDepth returns the depth or number of / in a given path +func PathDepth(p string) int { + parts := split(p) + if len(parts) == 2 && parts[1] == "" { + return 1 + } + return len(parts) +} + +func splitPath(p string) (parent, name string) { + if p[0] != '/' { + p = "/" + p + } + parent, name = path.Split(p) + l := len(parent) + if parent[l-1] == '/' { + parent = parent[:l-1] + } + return +} diff --git a/vendor/github.com/moby/moby/pkg/homedir/homedir.go b/vendor/github.com/moby/moby/pkg/homedir/homedir.go new file mode 100644 index 0000000..8154e83 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/vendor/github.com/moby/moby/pkg/homedir/homedir_test.go b/vendor/github.com/moby/moby/pkg/homedir/homedir_test.go new file mode 100644 index 0000000..7a95cb2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/vendor/github.com/moby/moby/pkg/httputils/httputils.go b/vendor/github.com/moby/moby/pkg/httputils/httputils.go new file mode 100644 index 0000000..d7dc438 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/httputils/httputils.go @@ -0,0 +1,56 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +var ( + headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) + errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") +) + +// Download requests a given URL and returns an io.Reader. +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error. +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +// ServerHeader contains the server information. +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// ParseServerHeader extracts pieces from an HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). +func ParseServerHeader(hdr string) (*ServerHeader, error) { + matches := headerRegexp.FindStringSubmatch(hdr) + if len(matches) != 4 { + return nil, errInvalidHeader + } + return &ServerHeader{ + App: strings.TrimSpace(matches[1]), + Ver: strings.TrimSpace(matches[2]), + OS: strings.TrimSpace(matches[3]), + }, nil +} diff --git a/vendor/github.com/moby/moby/pkg/httputils/httputils_test.go b/vendor/github.com/moby/moby/pkg/httputils/httputils_test.go new file mode 100644 index 0000000..d35d082 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/httputils/httputils_test.go @@ -0,0 +1,115 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestDownload(t *testing.T) { + expected := "Hello, docker !" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, expected) + })) + defer ts.Close() + response, err := Download(ts.URL) + if err != nil { + t.Fatal(err) + } + + actual, err := ioutil.ReadAll(response.Body) + response.Body.Close() + + if err != nil || string(actual) != expected { + t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) + } +} + +func TestDownload400Errors(t *testing.T) { + expectedError := "Got HTTP status code >= 400: 403 Forbidden" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, "something failed (forbidden)", http.StatusForbidden) + })) + defer ts.Close() + // Expected status code = 403 + if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { + t.Fatalf("Expected the the error %q, got %v", expectedError, err) + } +} + +func TestDownloadOtherErrors(t *testing.T) { + if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { + t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) + } +} + +func TestNewHTTPRequestError(t *testing.T) { + errorMessage := "Some error message" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, errorMessage, http.StatusForbidden) + })) + defer ts.Close() + httpResponse, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { + t.Fatalf("Expected err to be %q, got %v", errorMessage, err) + } +} + +func TestParseServerHeader(t *testing.T) { + inputs := map[string][]string{ + "bad header": {"error"}, + "(bad header)": {"error"}, + "(without/spaces)": {"error"}, + "(header/with spaces)": {"error"}, + "foo/bar (baz)": {"foo", "bar", "baz"}, + "foo/bar": {"error"}, + "foo": {"error"}, + "foo/bar (baz space)": {"foo", "bar", "baz space"}, + " f f / b b ( b s ) ": {"f f", "b b", "b s"}, + "foo/bar (baz) ignore": {"foo", "bar", "baz"}, + "foo/bar ()": {"error"}, + "foo/bar()": {"error"}, + "foo/bar(baz)": {"foo", "bar", "baz"}, + "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, + "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, + "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, + } + + for header, values := range inputs { + serverHeader, err := ParseServerHeader(header) + if err != nil { + if err != errInvalidHeader { + t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) + } + if values[0] == "error" { + continue + } + t.Fatalf("Header %q failed to parse when it shouldn't have", header) + } + if values[0] == "error" { + t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) + } + + if serverHeader.App != values[0] { + t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) + } + + if serverHeader.Ver != values[1] { + t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) + } + + if serverHeader.OS != values[2] { + t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) + } + + } + +} diff --git a/vendor/github.com/moby/moby/pkg/httputils/mimetype.go b/vendor/github.com/moby/moby/pkg/httputils/mimetype.go new file mode 100644 index 0000000..d5cf34e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/httputils/mimetype.go @@ -0,0 +1,30 @@ +package httputils + +import ( + "mime" + "net/http" +) + +// MimeTypes stores the MIME content type. +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/vendor/github.com/moby/moby/pkg/httputils/mimetype_test.go b/vendor/github.com/moby/moby/pkg/httputils/mimetype_test.go new file mode 100644 index 0000000..9de433e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/httputils/mimetype_test.go @@ -0,0 +1,13 @@ +package httputils + +import ( + "testing" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { + t.Errorf("TestDetectContentType failed") + } +} diff --git a/vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader.go b/vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader.go new file mode 100644 index 0000000..bebc860 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader_test.go b/vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader_test.go new file mode 100644 index 0000000..5a2906d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/httputils/resumablerequestreader_test.go @@ -0,0 +1,307 @@ +package httputils + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedError := "client and request can't be nil\n" + resreq := &resumableRequestReader{} + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + + resreq = &resumableRequestReader{ + client: client, + request: req, + totalSize: -1, + } + expectedError = "failed to auto detect content length" + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + } + read, err := resreq.Read([]byte{}) + if err != nil || read != 0 { + t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) + } +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError || read != 0 { + t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) + } +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("An error occurred") +} + +// If an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + if err != nil { + t.Fatal(err) + } + + if read != 0 { + t.Fatalf("Expected to have read nothing, but read %v", read) + } +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + if err == nil || err != io.EOF { + t.Fatalf("Expected an io.EOF error, got %v", err) + } +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + resreq := &resumableRequestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + if err == nil || err.Error() != "the server doesn't support byte ranges" { + t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) + } +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + + resreq := ResumableRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReader(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := ResumableRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + + resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools.go b/vendor/github.com/moby/moby/pkg/idtools/idtools.go new file mode 100644 index 0000000..6bca466 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools.go @@ -0,0 +1,197 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000..f9eb31c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go @@ -0,0 +1,207 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, uid, gid int) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(uid), + statInfo.GID() == uint32(gid), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go new file mode 100644 index 0000000..540d307 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go @@ -0,0 +1,271 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +type node struct { + uid int + gid int +} + +func TestMkdirAllAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirall") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should be chowned, but nothing else + if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllNewAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdirnew") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should NOT be chowned + if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdir") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + } + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should just chown to the requested uid/gid + if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // create a subdir under a dir which doesn't exist--should fail + if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") + } + + // create a subdir under an existing dir; should only change the ownership of the new subdir + if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr/bin"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func buildTree(base string, tree map[string]node) error { + for path, node := range tree { + fullPath := filepath.Join(base, path) + if err := os.MkdirAll(fullPath, 0755); err != nil { + return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) + } + if err := os.Chown(fullPath, node.uid, node.gid); err != nil { + return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) + } + } + return nil +} + +func readTree(base, root string) (map[string]node, error) { + tree := make(map[string]node) + + dirInfos, err := ioutil.ReadDir(base) + if err != nil { + return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) + } + + for _, info := range dirInfos { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) + } + tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} + if info.IsDir() { + // read the subdirectory + subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) + if err != nil { + return nil, err + } + for path, nodeinfo := range subtree { + tree[path] = nodeinfo + } + } + } + return tree, nil +} + +func compareTrees(left, right map[string]node) error { + if len(left) != len(right) { + return fmt.Errorf("Trees aren't the same size") + } + for path, nodeLeft := range left { + if nodeRight, ok := right[path]; ok { + if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { + // mismatch + return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, + nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) + } + continue + } + return fmt.Errorf("right tree didn't contain path %q", path) + } + return nil +} + +func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "parsesubid") + if err != nil { + t.Fatal(err) + } + fnamePath := filepath.Join(tmpDir, "testsubuid") + fcontent := `tss:100000:65536 +# empty default subuid/subgid file + +dockremap:231072:65536` + if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { + t.Fatal(err) + } + ranges, err := parseSubidFile(fnamePath, "dockremap") + if err != nil { + t.Fatal(err) + } + if len(ranges) != 1 { + t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) + } + if ranges[0].Start != 231072 { + t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) + } + if ranges[0].Length != 65536 { + t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) + } +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go b/vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000..49f67e7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, uid, gid int) bool { + return true +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000..9da7975 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000..d98b354 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/utils_unix.go b/vendor/github.com/moby/moby/pkg/idtools/utils_unix.go new file mode 100644 index 0000000..9703ecb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/moby/moby/pkg/integration/checker/checker.go b/vendor/github.com/moby/moby/pkg/integration/checker/checker.go new file mode 100644 index 0000000..d1b703a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/integration/checker/checker.go @@ -0,0 +1,46 @@ +// Package checker provides Docker specific implementations of the go-check.Checker interface. +package checker + +import ( + "github.com/go-check/check" + "github.com/vdemeester/shakers" +) + +// As a commodity, we bring all check.Checker variables into the current namespace to avoid having +// to think about check.X versus checker.X. +var ( + DeepEquals = check.DeepEquals + ErrorMatches = check.ErrorMatches + FitsTypeOf = check.FitsTypeOf + HasLen = check.HasLen + Implements = check.Implements + IsNil = check.IsNil + Matches = check.Matches + Not = check.Not + NotNil = check.NotNil + PanicMatches = check.PanicMatches + Panics = check.Panics + + Contains = shakers.Contains + ContainsAny = shakers.ContainsAny + Count = shakers.Count + Equals = shakers.Equals + EqualFold = shakers.EqualFold + False = shakers.False + GreaterOrEqualThan = shakers.GreaterOrEqualThan + GreaterThan = shakers.GreaterThan + HasPrefix = shakers.HasPrefix + HasSuffix = shakers.HasSuffix + Index = shakers.Index + IndexAny = shakers.IndexAny + IsAfter = shakers.IsAfter + IsBefore = shakers.IsBefore + IsBetween = shakers.IsBetween + IsLower = shakers.IsLower + IsUpper = shakers.IsUpper + LessOrEqualThan = shakers.LessOrEqualThan + LessThan = shakers.LessThan + TimeEquals = shakers.TimeEquals + True = shakers.True + TimeIgnore = shakers.TimeIgnore +) diff --git a/vendor/github.com/moby/moby/pkg/integration/cmd/command.go b/vendor/github.com/moby/moby/pkg/integration/cmd/command.go new file mode 100644 index 0000000..76d04e8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/integration/cmd/command.go @@ -0,0 +1,294 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +type testingT interface { + Fatalf(string, ...interface{}) +} + +const ( + // None is a token to inform Result.Assert that the output should be empty + None string = "" +) + +type lockedBuffer struct { + m sync.RWMutex + buf bytes.Buffer +} + +func (buf *lockedBuffer) Write(b []byte) (int, error) { + buf.m.Lock() + defer buf.m.Unlock() + return buf.buf.Write(b) +} + +func (buf *lockedBuffer) String() string { + buf.m.RLock() + defer buf.m.RUnlock() + return buf.buf.String() +} + +// Result stores the result of running a command +type Result struct { + Cmd *exec.Cmd + ExitCode int + Error error + // Timeout is true if the command was killed because it ran for too long + Timeout bool + outBuffer *lockedBuffer + errBuffer *lockedBuffer +} + +// Assert compares the Result against the Expected struct, and fails the test if +// any of the expcetations are not met. +func (r *Result) Assert(t testingT, exp Expected) { + err := r.Compare(exp) + if err == nil { + return + } + + _, file, line, _ := runtime.Caller(1) + t.Fatalf("at %s:%d\n%s", filepath.Base(file), line, err.Error()) +} + +// Compare returns an formatted error with the command, stdout, stderr, exit +// code, and any failed expectations +func (r *Result) Compare(exp Expected) error { + errors := []string{} + add := func(format string, args ...interface{}) { + errors = append(errors, fmt.Sprintf(format, args...)) + } + + if exp.ExitCode != r.ExitCode { + add("ExitCode was %d expected %d", r.ExitCode, exp.ExitCode) + } + if exp.Timeout != r.Timeout { + if exp.Timeout { + add("Expected command to timeout") + } else { + add("Expected command to finish, but it hit the timeout") + } + } + if !matchOutput(exp.Out, r.Stdout()) { + add("Expected stdout to contain %q", exp.Out) + } + if !matchOutput(exp.Err, r.Stderr()) { + add("Expected stderr to contain %q", exp.Err) + } + switch { + // If a non-zero exit code is expected there is going to be an error. + // Don't require an error message as well as an exit code because the + // error message is going to be "exit status which is not useful + case exp.Error == "" && exp.ExitCode != 0: + case exp.Error == "" && r.Error != nil: + add("Expected no error") + case exp.Error != "" && r.Error == nil: + add("Expected error to contain %q, but there was no error", exp.Error) + case exp.Error != "" && !strings.Contains(r.Error.Error(), exp.Error): + add("Expected error to contain %q", exp.Error) + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf("%s\nFailures:\n%s\n", r, strings.Join(errors, "\n")) +} + +func matchOutput(expected string, actual string) bool { + switch expected { + case None: + return actual == "" + default: + return strings.Contains(actual, expected) + } +} + +func (r *Result) String() string { + var timeout string + if r.Timeout { + timeout = " (timeout)" + } + + return fmt.Sprintf(` +Command: %s +ExitCode: %d%s, Error: %s +Stdout: %v +Stderr: %v +`, + strings.Join(r.Cmd.Args, " "), + r.ExitCode, + timeout, + r.Error, + r.Stdout(), + r.Stderr()) +} + +// Expected is the expected output from a Command. This struct is compared to a +// Result struct by Result.Assert(). +type Expected struct { + ExitCode int + Timeout bool + Error string + Out string + Err string +} + +// Success is the default expected result +var Success = Expected{} + +// Stdout returns the stdout of the process as a string +func (r *Result) Stdout() string { + return r.outBuffer.String() +} + +// Stderr returns the stderr of the process as a string +func (r *Result) Stderr() string { + return r.errBuffer.String() +} + +// Combined returns the stdout and stderr combined into a single string +func (r *Result) Combined() string { + return r.outBuffer.String() + r.errBuffer.String() +} + +// SetExitError sets Error and ExitCode based on Error +func (r *Result) SetExitError(err error) { + if err == nil { + return + } + r.Error = err + r.ExitCode = system.ProcessExitCode(err) +} + +type matches struct{} + +// Info returns the CheckerInfo +func (m *matches) Info() *check.CheckerInfo { + return &check.CheckerInfo{ + Name: "CommandMatches", + Params: []string{"result", "expected"}, + } +} + +// Check compares a result against the expected +func (m *matches) Check(params []interface{}, names []string) (bool, string) { + result, ok := params[0].(*Result) + if !ok { + return false, fmt.Sprintf("result must be a *Result, not %T", params[0]) + } + expected, ok := params[1].(Expected) + if !ok { + return false, fmt.Sprintf("expected must be an Expected, not %T", params[1]) + } + + err := result.Compare(expected) + if err == nil { + return true, "" + } + return false, err.Error() +} + +// Matches is a gocheck.Checker for comparing a Result against an Expected +var Matches = &matches{} + +// Cmd contains the arguments and options for a process to run as part of a test +// suite. +type Cmd struct { + Command []string + Timeout time.Duration + Stdin io.Reader + Stdout io.Writer + Dir string + Env []string +} + +// RunCmd runs a command and returns a Result +func RunCmd(cmd Cmd) *Result { + result := StartCmd(cmd) + if result.Error != nil { + return result + } + return WaitOnCmd(cmd.Timeout, result) +} + +// RunCommand parses a command line and runs it, returning a result +func RunCommand(command string, args ...string) *Result { + return RunCmd(Cmd{Command: append([]string{command}, args...)}) +} + +// StartCmd starts a command, but doesn't wait for it to finish +func StartCmd(cmd Cmd) *Result { + result := buildCmd(cmd) + if result.Error != nil { + return result + } + result.SetExitError(result.Cmd.Start()) + return result +} + +func buildCmd(cmd Cmd) *Result { + var execCmd *exec.Cmd + switch len(cmd.Command) { + case 1: + execCmd = exec.Command(cmd.Command[0]) + default: + execCmd = exec.Command(cmd.Command[0], cmd.Command[1:]...) + } + outBuffer := new(lockedBuffer) + errBuffer := new(lockedBuffer) + + execCmd.Stdin = cmd.Stdin + execCmd.Dir = cmd.Dir + execCmd.Env = cmd.Env + if cmd.Stdout != nil { + execCmd.Stdout = io.MultiWriter(outBuffer, cmd.Stdout) + } else { + execCmd.Stdout = outBuffer + } + execCmd.Stderr = errBuffer + return &Result{ + Cmd: execCmd, + outBuffer: outBuffer, + errBuffer: errBuffer, + } +} + +// WaitOnCmd waits for a command to complete. If timeout is non-nil then +// only wait until the timeout. +func WaitOnCmd(timeout time.Duration, result *Result) *Result { + if timeout == time.Duration(0) { + result.SetExitError(result.Cmd.Wait()) + return result + } + + done := make(chan error, 1) + // Wait for command to exit in a goroutine + go func() { + done <- result.Cmd.Wait() + }() + + select { + case <-time.After(timeout): + killErr := result.Cmd.Process.Kill() + if killErr != nil { + fmt.Printf("failed to kill (pid=%d): %v\n", result.Cmd.Process.Pid, killErr) + } + result.Timeout = true + case err := <-done: + result.SetExitError(err) + } + return result +} diff --git a/vendor/github.com/moby/moby/pkg/integration/cmd/command_test.go b/vendor/github.com/moby/moby/pkg/integration/cmd/command_test.go new file mode 100644 index 0000000..df23442 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/integration/cmd/command_test.go @@ -0,0 +1,118 @@ +package cmd + +import ( + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestRunCommand(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result := RunCommand(cmd) + result.Assert(t, Expected{}) + + result = RunCommand("doesnotexists") + expectedError := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{ExitCode: 127, Error: expectedError}) + + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + ExitCode: 2, + Error: "exit status 2", + Err: "invalid option", + }) + assert.Contains(t, result.Combined(), "invalid option") +} + +func TestRunCommandWithCombined(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCommand("ls", "-a") + result.Assert(t, Expected{}) + + assert.Contains(t, result.Combined(), "..") + assert.Contains(t, result.Stdout(), "..") +} + +func TestRunCommandWithTimeoutFinished(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCmd(Cmd{ + Command: []string{"ls", "-a"}, + Timeout: 50 * time.Millisecond, + }) + result.Assert(t, Expected{Out: ".."}) +} + +func TestRunCommandWithTimeoutKilled(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + command := []string{"sh", "-c", "while true ; do echo 1 ; sleep .5 ; done"} + result := RunCmd(Cmd{Command: command, Timeout: 1250 * time.Millisecond}) + result.Assert(t, Expected{Timeout: true}) + + ones := strings.Split(result.Stdout(), "\n") + assert.Equal(t, len(ones), 4) +} + +func TestRunCommandWithErrors(t *testing.T) { + result := RunCommand("/foobar") + result.Assert(t, Expected{Error: "foobar", ExitCode: 127}) +} + +func TestRunCommandWithStdoutStderr(t *testing.T) { + result := RunCommand("echo", "hello", "world") + result.Assert(t, Expected{Out: "hello world\n", Err: None}) +} + +func TestRunCommandWithStdoutStderrError(t *testing.T) { + result := RunCommand("doesnotexists") + + expected := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{Out: None, Err: None, ExitCode: 127, Error: expected}) + + switch runtime.GOOS { + case "windows": + expected = "ls: unknown option" + case "solaris": + expected = "gls: invalid option" + default: + expected = "ls: invalid option" + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + Out: None, + Err: expected, + ExitCode: 2, + Error: "exit status 2", + }) +} diff --git a/vendor/github.com/moby/moby/pkg/integration/utils.go b/vendor/github.com/moby/moby/pkg/integration/utils.go new file mode 100644 index 0000000..f2089c4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/integration/utils.go @@ -0,0 +1,227 @@ +package integration + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/system" +) + +// IsKilled process the specified error and returns whether the process was killed or not. +func IsKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + exitCode = system.ProcessExitCode(err) + output = string(out) + return +} + +// RunCommandPipelineWithOutput runs the array of commands with the output +// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). +// It returns the final output, the exitCode different from 0 and the error +// if something bad happened. +func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + if len(cmds) < 2 { + return "", 0, errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + defer func() { + var pipeErrMsgs []string + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + if pipeErr := cmd.Wait(); pipeErr != nil { + pipeErrMsgs = append(pipeErrMsgs, fmt.Sprintf("command %s failed with error: %v", cmd.Path, pipeErr)) + } + } + if len(pipeErrMsgs) > 0 && err == nil { + err = fmt.Errorf("pipelineError from Wait: %v", strings.Join(pipeErrMsgs, ", ")) + } + }() + + // wait on last cmd + return runCommandWithOutput(cmds[len(cmds)-1]) +} + +// ConvertSliceOfStringsToMap converts a slices of string in a map +// with the strings as key and an empty string as values. +func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) +// and returns an error if different. +func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +// ListTar lists the entries of a tar. +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +// RandomTmpDirPath provides a temporary path with rand string appended. +// does not create or checks if it exists. +func RandomTmpDirPath(s string, platform string) string { + tmp := "/tmp" + if platform == "windows" { + tmp = os.Getenv("TEMP") + } + path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) + if platform == "windows" { + return filepath.FromSlash(path) // Using \ + } + return filepath.ToSlash(path) // Using / +} + +// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping +// for interval duration. Returns total read bytes. Send true to the +// stop channel to return before reading to EOF on the reader. +func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + select { + case <-stop: + return + case <-time.After(interval): + } + } +} + +// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func ParseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. +type ChannelBuffer struct { + C chan []byte +} + +// Write implements Writer. +func (c *ChannelBuffer) Write(b []byte) (int, error) { + c.C <- b + return len(b), nil +} + +// Close closes the go channel. +func (c *ChannelBuffer) Close() error { + close(c.C) + return nil +} + +// ReadTimeout reads the content of the channel in the specified byte array with +// the specified duration as timeout. +func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.C: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + +// RunAtDifferentDate runs the specified function with the given time. +// It changes the date of the system, which can led to weird behaviors. +func RunAtDifferentDate(date time.Time, block func()) { + // Layout for date. MMDDhhmmYYYY + const timeLayout = "010203042006" + // Ensure we bring time back to now + now := time.Now().Format(timeLayout) + defer icmd.RunCommand("date", now) + + icmd.RunCommand("date", date.Format(timeLayout)) + block() + return +} diff --git a/vendor/github.com/moby/moby/pkg/integration/utils_test.go b/vendor/github.com/moby/moby/pkg/integration/utils_test.go new file mode 100644 index 0000000..0b2ef4a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/integration/utils_test.go @@ -0,0 +1,363 @@ +package integration + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { + var lsCmd *exec.Cmd + if runtime.GOOS != "windows" { + lsCmd = exec.Command("ls") + } else { + lsCmd = exec.Command("cmd", "/c", "dir") + } + + err := lsCmd.Run() + if IsKilled(err) { + t.Fatalf("Expected the ls command to not be killed, was.") + } +} + +func TestIsKilledTrueWithKilledProcess(t *testing.T) { + var longCmd *exec.Cmd + if runtime.GOOS != "windows" { + longCmd = exec.Command("top") + } else { + longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") + } + + // Start a command + err := longCmd.Start() + if err != nil { + t.Fatal(err) + } + // Capture the error when *dying* + done := make(chan error, 1) + go func() { + done <- longCmd.Wait() + }() + // Then kill it + longCmd.Process.Kill() + // Get the error + err = <-done + if !IsKilled(err) { + t.Fatalf("Expected the command to be killed, was not.") + } +} + +func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { + _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) + expectedError := "pipeline does not have multiple cmds" + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) + } +} + +func TestRunCommandPipelineWithOutputErrors(t *testing.T) { + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + cmd1 := exec.Command("ls") + cmd1.Stdout = os.Stdout + cmd2 := exec.Command("anything really") + _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) + if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { + t.Fatalf("Expected an error, got %v", err) + } + + cmdWithError := exec.Command("doesnotexists") + cmdCat := exec.Command("cat") + _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) + if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestRunCommandPipelineWithOutput(t *testing.T) { + //TODO: Should run on Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + cmds := []*exec.Cmd{ + // Print 2 characters + exec.Command("echo", "-n", "11"), + // Count the number or char from stdin (previous command) + exec.Command("wc", "-m"), + } + out, exitCode, err := RunCommandPipelineWithOutput(cmds...) + expectedOutput := "2\n" + if out != expectedOutput || exitCode != 0 || err != nil { + t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) + } +} + +func TestConvertSliceOfStringsToMap(t *testing.T) { + input := []string{"a", "b"} + actual := ConvertSliceOfStringsToMap(input) + for _, key := range input { + if _, ok := actual[key]; !ok { + t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) + } + } +} + +func TestCompareDirectoryEntries(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + file1 := filepath.Join(tmpFolder, "file1") + file2 := filepath.Join(tmpFolder, "file2") + os.Create(file1) + os.Create(file2) + + fi1, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi1bis, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi2, err := os.Stat(file2) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + e1 []os.FileInfo + e2 []os.FileInfo + shouldError bool + }{ + // Empty directories + { + []os.FileInfo{}, + []os.FileInfo{}, + false, + }, + // Same FileInfos + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1}, + false, + }, + // Different FileInfos but same names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1bis}, + false, + }, + // Different FileInfos, different names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi2}, + true, + }, + } + for _, elt := range cases { + err := CompareDirectoryEntries(elt.e1, elt.e2) + if elt.shouldError && err == nil { + t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) + } + if !elt.shouldError && err != nil { + t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) + } + } +} + +// FIXME make an "unhappy path" test for ListTar without "panicking" :-) +func TestListTar(t *testing.T) { + // TODO Windows: Figure out why this fails. Should be portable. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows - needs further investigation") + } + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + // Let's create a Tar file + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + reader, err := os.Open(tarFile) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + entries, err := ListTar(reader) + if err != nil { + t.Fatal(err) + } + if len(entries) != 1 && entries[0] != "src" { + t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) + } +} + +func TestRandomTmpDirPath(t *testing.T) { + path := RandomTmpDirPath("something", runtime.GOOS) + + prefix := "/tmp/something" + if runtime.GOOS == "windows" { + prefix = os.Getenv("TEMP") + `\something` + } + expectedSize := len(prefix) + 11 + + if !strings.HasPrefix(path, prefix) { + t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) + } + if len(path) != expectedSize { + t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) + } +} + +func TestConsumeWithSpeed(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 10 { + t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) + } + +} + +func TestConsumeWithSpeedWithStop(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + stopIt := make(chan bool) + + go func() { + time.Sleep(1 * time.Millisecond) + stopIt <- true + }() + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 2 { + t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) + } + +} + +func TestParseCgroupPathsEmpty(t *testing.T) { + cgroupMap := ParseCgroupPaths("") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("\n") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("something:else\nagain:here") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } +} + +func TestParseCgroupPaths(t *testing.T) { + cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") + if len(cgroupMap) != 2 { + t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) + } + if value, ok := cgroupMap["memory"]; !ok || value != "/a" { + t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) + } + if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { + t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) + } +} + +func TestChannelBufferTimeout(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + done := make(chan struct{}, 1) + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + done <- struct{}{} + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 50*time.Millisecond) + if err == nil && err.Error() != "timeout reading from channel" { + t.Fatalf("Expected an error, got %s", err) + } + <-done +} + +func TestChannelBuffer(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 200*time.Millisecond) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("Expected '%s', got '%s'", expected, string(b)) + } +} + +// FIXME doesn't work +// func TestRunAtDifferentDate(t *testing.T) { +// var date string + +// // Layout for date. MMDDhhmmYYYY +// const timeLayout = "20060102" +// expectedDate := "20100201" +// theDate, err := time.Parse(timeLayout, expectedDate) +// if err != nil { +// t.Fatal(err) +// } + +// RunAtDifferentDate(theDate, func() { +// cmd := exec.Command("date", "+%Y%M%d") +// out, err := cmd.Output() +// if err != nil { +// t.Fatal(err) +// } +// date = string(out) +// }) +// } diff --git a/vendor/github.com/moby/moby/pkg/ioutils/buffer.go b/vendor/github.com/moby/moby/pkg/ioutils/buffer.go new file mode 100644 index 0000000..3d737b3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go b/vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go new file mode 100644 index 0000000..41098fa --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go @@ -0,0 +1,75 @@ +package ioutils + +import ( + "bytes" + "testing" +) + +func TestFixedBufferWrite(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + n, err := buf.Write([]byte("hello")) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes written, got %d", n) + } + + if string(buf.buf[:5]) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5])) + } + + n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) + } +} + +func TestFixedBufferRead(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + if _, err := buf.Write([]byte("hello world")); err != nil { + t.Fatal(err) + } + + b := make([]byte, 5) + n, err := buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String()) + } + + if string(b) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(b)) + } + + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d", n) + } + + if string(b) != " worl" { + t.Fatalf("expected \" worl\", got %s", string(b)) + } + + b = b[:1] + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 1 { + t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String()) + } + + if string(b) != "d" { + t.Fatalf("expected \"d\", got %s", string(b)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000..72a04f3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go new file mode 100644 index 0000000..300fb5f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go @@ -0,0 +1,159 @@ +package ioutils + +import ( + "crypto/sha1" + "encoding/hex" + "math/rand" + "testing" + "time" +) + +func TestBytesPipeRead(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + rd := make([]byte, 4) + n, err := buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "1234" { + t.Fatalf("Read %s, but must be %s", rd, "1234") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "5678" { + t.Fatalf("Read %s, but must be %s", rd, "5679") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) + } + if string(rd[:n]) != "90" { + t.Fatalf("Read %s, but must be %s", rd, "90") + } +} + +func TestBytesPipeWrite(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + if buf.buf[0].String() != "1234567890" { + t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") + } +} + +// Write and read in different speeds/chunk sizes and check valid data is read. +func TestBytesPipeWriteRandomChunks(t *testing.T) { + cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ + {100, 10, 1}, + {1000, 10, 5}, + {1000, 100, 0}, + {1000, 5, 6}, + {10000, 50, 25}, + } + + testMessage := []byte("this is a random string for testing") + // random slice sizes to read and write + writeChunks := []int{25, 35, 15, 20} + readChunks := []int{5, 45, 20, 25} + + for _, c := range cases { + // first pass: write directly to hash + hash := sha1.New() + for i := 0; i < c.iterations*c.writesPerLoop; i++ { + if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { + t.Fatal(err) + } + } + expected := hex.EncodeToString(hash.Sum(nil)) + + // write/read through buffer + buf := NewBytesPipe() + hash.Reset() + + done := make(chan struct{}) + + go func() { + // random delay before read starts + <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) + for i := 0; ; i++ { + p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) + n, _ := buf.Read(p) + if n == 0 { + break + } + hash.Write(p[:n]) + } + + close(done) + }() + + for i := 0; i < c.iterations; i++ { + for w := 0; w < c.writesPerLoop; w++ { + buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) + } + } + buf.Close() + <-done + + actual := hex.EncodeToString(hash.Sum(nil)) + + if expected != actual { + t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) + } + + } +} + +func BenchmarkBytesPipeWrite(b *testing.B) { + testData := []byte("pretty short line, because why not?") + for i := 0; i < b.N; i++ { + readBuf := make([]byte, 1024) + buf := NewBytesPipe() + go func() { + var err error + for err == nil { + _, err = buf.Read(readBuf) + } + }() + for j := 0; j < 1000; j++ { + buf.Write(testData) + } + buf.Close() + } +} + +func BenchmarkBytesPipeRead(b *testing.B) { + rd := make([]byte, 512) + for i := 0; i < b.N; i++ { + b.StopTimer() + buf := NewBytesPipe() + for j := 0; j < 500; j++ { + buf.Write(make([]byte, 1024)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + if n, _ := buf.Read(rd); n != 512 { + b.Fatalf("Wrong number of bytes: %d", n) + } + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/fmt.go b/vendor/github.com/moby/moby/pkg/ioutils/fmt.go new file mode 100644 index 0000000..0b04b0b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/fmt_test.go b/vendor/github.com/moby/moby/pkg/ioutils/fmt_test.go new file mode 100644 index 0000000..8968863 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/fswriters.go b/vendor/github.com/moby/moby/pkg/ioutils/fswriters.go new file mode 100644 index 0000000..a56c462 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go b/vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go new file mode 100644 index 0000000..c4d1419 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go @@ -0,0 +1,132 @@ +package ioutils + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +var ( + testMode os.FileMode = 0640 +) + +func init() { + // Windows does not support full Linux file mode + if runtime.GOOS == "windows" { + testMode = 0666 + } +} + +func TestAtomicWriteToFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writers-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + expected := []byte("barbaz") + if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if bytes.Compare(actual, expected) != 0 { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } +} + +func TestAtomicWriteSetCommit(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + targetDir := filepath.Join(tmpDir, "target") + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } + + if err := ws.Commit(targetDir); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if bytes.Compare(actual, expected) != 0 { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } + +} + +func TestAtomicWriteSetCancel(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if err := ws.Cancel(); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(tmpDir, "target", "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } else if !os.IsNotExist(err) { + t.Fatalf("Unexpected error reading file: %s", err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/multireader.go b/vendor/github.com/moby/moby/pkg/ioutils/multireader.go new file mode 100644 index 0000000..d7b9748 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/multireader.go @@ -0,0 +1,223 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + + var offsetTo int64 + + for _, rdr := range r.readers { + size, err := getReadSeekerSize(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo+size > offset { + return rdr, offset - offsetTo, nil + } + if rdr == r.readers[len(r.readers)-1] { + return rdr, offsetTo + offset, nil + } + offsetTo += size + } + + return nil, 0, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bLen := int64(len(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bLen) + if err != nil && err != io.EOF { + return -1, err + } + bLen -= readBytes + + if bLen == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/multireader_test.go b/vendor/github.com/moby/moby/pkg/ioutils/multireader_test.go new file mode 100644 index 0000000..65309a9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/multireader_test.go @@ -0,0 +1,211 @@ +package ioutils + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} + +func TestMultiReadSeekerCurAfterSet(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + mid := int64(s1.Len() + s2.Len()/2) + + size, err := mr.Seek(mid, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if size != mid { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid) + } + + size, err = mr.Seek(3, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+3 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+3) + } + size, err = mr.Seek(5, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+8 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+8) + } + + size, err = mr.Seek(10, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+18 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+18) + } +} + +func TestMultiReadSeekerSmallReads(t *testing.T) { + readers := []io.ReadSeeker{} + for i := 0; i < 10; i++ { + integer := make([]byte, 4, 4) + binary.BigEndian.PutUint32(integer, uint32(i)) + readers = append(readers, bytes.NewReader(integer)) + } + + reader := MultiReadSeeker(readers...) + for i := 0; i < 10; i++ { + var integer uint32 + if err := binary.Read(reader, binary.BigEndian, &integer); err != nil { + t.Fatalf("Read from NewMultiReadSeeker failed: %v", err) + } + if uint32(i) != integer { + t.Fatalf("Read wrong value from NewMultiReadSeeker: %d != %d", i, integer) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/readers.go b/vendor/github.com/moby/moby/pkg/ioutils/readers.go new file mode 100644 index 0000000..63f3c07 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/readers_test.go b/vendor/github.com/moby/moby/pkg/ioutils/readers_test.go new file mode 100644 index 0000000..9abc105 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/readers_test.go @@ -0,0 +1,94 @@ +package ioutils + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type perpetualReader struct{} + +func (p *perpetualReader) Read(buf []byte) (n int, err error) { + for i := 0; i != len(buf); i++ { + buf[i] = 'a' + } + return len(buf), nil +} + +func TestCancelReadCloser(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) + for { + var buf [128]byte + _, err := cancelReadCloser.Read(buf[:]) + if err == context.DeadlineExceeded { + break + } else if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go b/vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000..1539ad2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go b/vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000..c258e5f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go b/vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000..52a4901 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/writers.go b/vendor/github.com/moby/moby/pkg/ioutils/writers.go new file mode 100644 index 0000000..ccc7f9c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/writers_test.go b/vendor/github.com/moby/moby/pkg/ioutils/writers_test.go new file mode 100644 index 0000000..564b1cd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go new file mode 100644 index 0000000..4734c31 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go @@ -0,0 +1,42 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +// JSONLog represents a log message, typically a single entry from a given log stream. +// JSONLogs can be easily serialized to and from JSON and support custom formatting. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Format returns the log formatted according to format +// If format is nil, returns the log message +// If format is json, returns the log marshaled in json format +// By default, returns the log with the log time formatted according to format. +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +// Reset resets the log to nil. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 0000000..83ce684 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,178 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = FastTimeMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjsonWriteJSONString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// MarshalJSON marshals the JSONLog. +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = FastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go new file mode 100644 index 0000000..3edb271 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go @@ -0,0 +1,34 @@ +package jsonlog + +import ( + "regexp" + "testing" +) + +func TestJSONLogMarshalJSON(t *testing.T) { + logs := map[*JSONLog]string{ + &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, + &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, + &JSONLog{}: `^{\"time\":\".{20,}\"}$`, + // These ones are a little weird + &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, + } + for jsonLog, expression := range logs { + data, err := jsonLog.MarshalJSON() + if err != nil { + t.Fatal(err) + } + res := string(data) + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 0000000..df522c0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,122 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "unicode/utf8" +) + +// JSONLogs is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjsonWriteJSONBytesAsString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go new file mode 100644 index 0000000..6d6ad21 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go @@ -0,0 +1,39 @@ +package jsonlog + +import ( + "bytes" + "regexp" + "testing" +) + +func TestJSONLogsMarshalJSONBuf(t *testing.T) { + logs := map[*JSONLogs]string{ + &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, + &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Created: "time"}: `^{\"time\":time}$`, + &JSONLogs{}: `^{\"time\":}$`, + // These ones are a little weird + &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, + &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, + &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, + // with raw attributes + &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, + } + for jsonLog, expression := range logs { + var buf bytes.Buffer + if err := jsonLog.MarshalJSONBuf(&buf); err != nil { + t.Fatal(err) + } + res := buf.String() + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go new file mode 100644 index 0000000..2117338 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go @@ -0,0 +1,27 @@ +// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. +package jsonlog + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go new file mode 100644 index 0000000..02d0302 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go @@ -0,0 +1,47 @@ +package jsonlog + +import ( + "testing" + "time" +) + +// Testing to ensure 'year' fields is between 0 and 9999 +func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + json, err := FastTimeMarshalJSON(aTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + json, err = FastTimeMarshalJSON(anotherTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + +} + +func TestFastTimeMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected := "\"2015-05-29T11:01:02.000000003Z\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } + + location, err := time.LoadLocation("Europe/Paris") + if err != nil { + t.Fatal(err) + } + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected = "\"2015-05-29T11:01:02.000000003+02:00\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 0000000..5481433 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,225 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" +) + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + current := units.HumanSize(float64(p.Current)) + if p.Total <= 0 { + return fmt.Sprintf("%8v", current) + } + total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + // [2K = erase entire current line + fmt.Fprintf(out, "%c[2K\r", 27) + endl = "\r" + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = len(ids) - line + if isTerminal && diff > 0 { + fmt.Fprintf(out, "%c[%dA", 27, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal && diff > 0 { + fmt.Fprintf(out, "%c[%dB", 27, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go new file mode 100644 index 0000000..c6c5b0e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go @@ -0,0 +1,245 @@ +package jsonmessage + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + termsz, err := term.GetWinsize(0) + if err != nil { + // we can safely ignore the err here + termsz = nil + } + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1 B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expectedStart := "[==========> ] 20 B/100 B" + if termsz != nil && termsz.Width <= 110 { + expectedStart = " 20 B/100 B" + } + jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} + // Just look at the start of the string + // (the remaining time is really hard to test -_-) + if jp3.String()[:len(expectedStart)] != expectedStart { + t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + } + + expected = "[=========================> ] 50 B/100 B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 B/100 B" + } + jp4 := JSONProgress{Current: 50, Total: 100} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } + + // this number can't be negative gh#7136 + expected = "[==================================================>] 50 B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 B" + } + jp5 := JSONProgress{Current: 50, Total: 40} + if jp5.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp5.String()) + } +} + +func TestJSONMessageDisplay(t *testing.T) { + now := time.Now() + messages := map[JSONMessage][]string{ + // Empty + JSONMessage{}: {"\n", "\n"}, + // Status + JSONMessage{ + Status: "status", + }: { + "status\n", + "status\n", + }, + // General + JSONMessage{ + Time: now.Unix(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with nano precision time + JSONMessage{ + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with both times Nano is preferred + JSONMessage{ + Time: now.Unix(), + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // Stream over status + JSONMessage{ + Status: "status", + Stream: "stream", + }: { + "stream", + "stream", + }, + // With progress message + JSONMessage{ + Status: "status", + ProgressMessage: "progressMessage", + }: { + "status progressMessage", + "status progressMessage", + }, + // With progress, stream empty + JSONMessage{ + Status: "status", + Stream: "", + Progress: &JSONProgress{Current: 1}, + }: { + "", + fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), + }, + } + + // The tests :) + for jsonMessage, expectedMessages := range messages { + // Without terminal + data := bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, false); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) + } + // With terminal + data = bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, true); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) + } + } +} + +// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. +func TestJSONMessageDisplayWithJSONError(t *testing.T) { + data := bytes.NewBuffer([]byte{}) + jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} + + err := jsonMessage.Display(data, true) + if err == nil || err.Error() != "Can't find it" { + t.Fatalf("Expected a JSONError 404, got [%v]", err) + } + + jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} + err = jsonMessage.Display(data, true) + if err == nil || err.Error() != "Authentication is required." { + t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) + } +} + +func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { + var ( + inFd uintptr + ) + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader("This is not a 'valid' JSON []") + inFd, _ = term.GetFdInfo(reader) + + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { + t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) + } +} + +func TestDisplayJSONMessagesStream(t *testing.T) { + var ( + inFd uintptr + ) + + messages := map[string][]string{ + // empty string + "": { + "", + ""}, + // Without progress & ID + "{ \"status\": \"status\" }": { + "status\n", + "status\n", + }, + // Without progress, with ID + "{ \"id\": \"ID\",\"status\": \"status\" }": { + "ID: status\n", + fmt.Sprintf("ID: status\n"), + }, + // With progress + "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { + "ID: status ProgressMessage", + fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 1, 27, 1), + }, + // With progressDetail + "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { + "", // progressbar is disabled in non-terminal + fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 1, 27, 27, 1), + }, + } + for jsonMessage, expectedMessages := range messages { + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader(jsonMessage) + inFd, _ = term.GetFdInfo(reader) + + // Without terminal + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) + } + + // With terminal + data = bytes.NewBuffer([]byte{}) + reader = strings.NewReader(jsonMessage) + if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) + } + } + +} diff --git a/vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go b/vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go new file mode 100644 index 0000000..ff833e3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go @@ -0,0 +1,31 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} diff --git a/vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go b/vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go new file mode 100644 index 0000000..1bcae7a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go @@ -0,0 +1,94 @@ +// +build !windows,!solaris + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "fd": + fds, err := listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, fds...) + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("invalid protocol format: %q", proto) + } + + return ls, nil +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("too few socket activated files passed in by systemd") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + // TODO: We shouldn't log inside a library. Remove this or error out. + logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go b/vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go new file mode 100644 index 0000000..5b5a470 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go @@ -0,0 +1,54 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + + case "npipe": + // allow Administrators and SYSTEM, plus whatever additional users or groups were specified + sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" + if socketGroup != "" { + for _, g := range strings.Split(socketGroup, ",") { + sid, err := winio.LookupSidByName(g) + if err != nil { + return nil, err + } + sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) + } + } + c := winio.PipeConfig{ + SecurityDescriptor: sddl, + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := winio.ListenPipe(addr, &c) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") + } + + return ls, nil +} diff --git a/vendor/github.com/moby/moby/pkg/locker/README.md b/vendor/github.com/moby/moby/pkg/locker/README.md new file mode 100644 index 0000000..9817498 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modfying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/moby/moby/pkg/locker/locker.go b/vendor/github.com/moby/moby/pkg/locker/locker.go new file mode 100644 index 0000000..0b22ddf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/locker/locker_test.go b/vendor/github.com/moby/moby/pkg/locker/locker_test.go new file mode 100644 index 0000000..5a297dd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/locker/locker_test.go @@ -0,0 +1,124 @@ +package locker + +import ( + "sync" + "testing" + "time" +) + +func TestLockCounter(t *testing.T) { + l := &lockCtr{} + l.inc() + + if l.waiters != 1 { + t.Fatal("counter inc failed") + } + + l.dec() + if l.waiters != 0 { + t.Fatal("counter dec failed") + } +} + +func TestLockerLock(t *testing.T) { + l := New() + l.Lock("test") + ctr := l.locks["test"] + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) + } + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + chWaiting := make(chan struct{}) + go func() { + for range time.Tick(1 * time.Millisecond) { + if ctr.count() == 1 { + close(chWaiting) + break + } + } + }() + + select { + case <-chWaiting: + case <-time.After(3 * time.Second): + t.Fatal("timed out waiting for lock waiters to be incremented") + } + + select { + case <-chDone: + t.Fatal("lock should not have returned while it was still held") + default: + } + + if err := l.Unlock("test"); err != nil { + t.Fatal(err) + } + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should have completed") + } + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) + } +} + +func TestLockerUnlock(t *testing.T) { + l := New() + + l.Lock("test") + l.Unlock("test") + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should not be blocked") + } +} + +func TestLockerConcurrency(t *testing.T) { + l := New() + + var wg sync.WaitGroup + for i := 0; i <= 10000; i++ { + wg.Add(1) + go func() { + l.Lock("test") + // if there is a concurrency issue, will very likely panic here + l.Unlock("test") + wg.Done() + }() + } + + chDone := make(chan struct{}) + go func() { + wg.Wait() + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for locks to complete") + } + + // Since everything has unlocked this should not exist anymore + if ctr, exists := l.locks["test"]; exists { + t.Fatalf("lock should not exist: %v", ctr) + } +} diff --git a/vendor/github.com/moby/moby/pkg/longpath/longpath.go b/vendor/github.com/moby/moby/pkg/longpath/longpath.go new file mode 100644 index 0000000..9b15bff --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/moby/moby/pkg/longpath/longpath_test.go b/vendor/github.com/moby/moby/pkg/longpath/longpath_test.go new file mode 100644 index 0000000..01865ef --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/longpath/longpath_test.go @@ -0,0 +1,22 @@ +package longpath + +import ( + "strings" + "testing" +) + +func TestStandardLongPath(t *testing.T) { + c := `C:\simple\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\C:\simple\path`) { + t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) + } +} + +func TestUNCLongPath(t *testing.T) { + c := `\\server\share\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { + t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) + } +} diff --git a/vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go b/vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go new file mode 100644 index 0000000..971f45e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go @@ -0,0 +1,137 @@ +// +build linux + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/moby/moby/pkg/loopback/ioctl.go b/vendor/github.com/moby/moby/pkg/loopback/ioctl.go new file mode 100644 index 0000000..0714eb5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go b/vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go new file mode 100644 index 0000000..e1100ce --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/moby/moby/pkg/loopback/loopback.go b/vendor/github.com/moby/moby/pkg/loopback/loopback.go new file mode 100644 index 0000000..bc04792 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/flags.go b/vendor/github.com/moby/moby/pkg/mount/flags.go new file mode 100644 index 0000000..607dbed --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go b/vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go new file mode 100644 index 0000000..f166cb2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/moby/moby/pkg/mount/flags_linux.go b/vendor/github.com/moby/moby/pkg/mount/flags_linux.go new file mode 100644 index 0000000..dc696dc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags_linux.go @@ -0,0 +1,85 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go b/vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go new file mode 100644 index 0000000..5564f7b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go @@ -0,0 +1,30 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/vendor/github.com/moby/moby/pkg/mount/mount.go b/vendor/github.com/moby/moby/pkg/mount/mount.go new file mode 100644 index 0000000..66ac4bf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mount.go @@ -0,0 +1,74 @@ +package mount + +import ( + "time" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount will unmount the target filesystem, so long as it is mounted. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go b/vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go new file mode 100644 index 0000000..253aff3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go @@ -0,0 +1,162 @@ +// +build !windows,!solaris + +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} + +func TestMergeTmpfsOptions(t *testing.T) { + options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"} + expected := []string{"atime", "rw", "size=1024k", "slave"} + merged, err := MergeTmpfsOptions(options) + if err != nil { + t.Fatal(err) + } + if len(expected) != len(merged) { + t.Fatalf("Expected %s got %s", expected, merged) + } + for index := range merged { + if merged[index] != expected[index] { + t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged) + } + } + + options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"} + _, err = MergeTmpfsOptions(options) + if err == nil { + t.Fatal("Expected error got nil") + } +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go b/vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go new file mode 100644 index 0000000..bb870e6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_linux.go b/vendor/github.com/moby/moby/pkg/mount/mounter_linux.go new file mode 100644 index 0000000..dd4280c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go b/vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go new file mode 100644 index 0000000..c684aa8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go b/vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go new file mode 100644 index 0000000..a2a3bb4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo.go new file mode 100644 index 0000000..e3fc353 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 0000000..4f32edc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go new file mode 100644 index 0000000..be69fee --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 0000000..bd100e1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,476 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := Info{ + ID: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go new file mode 100644 index 0000000..ad9ab57 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 0000000..7fbcf19 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go new file mode 100644 index 0000000..dab8a37 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000..8ceec84 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 0000000..c183794 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propagated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propagate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is available in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 0000000..09f6b03 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go b/vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go new file mode 100644 index 0000000..18a939b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/namesgenerator" +) + +func main() { + fmt.Println(namesgenerator.GetRandomName(0)) +} diff --git a/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go new file mode 100644 index 0000000..cfb8157 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go @@ -0,0 +1,590 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "blissful", + "boring", + "brave", + "clever", + "cocky", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "gallant", + "gifted", + "goofy", + "gracious", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "jolly", + "jovial", + "keen", + "kickass", + "kind", + "laughing", + "loving", + "lucid", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go new file mode 100644 index 0000000..d1a9497 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go @@ -0,0 +1,27 @@ +package namesgenerator + +import ( + "strings" + "testing" +) + +func TestNameFormat(t *testing.T) { + name := GetRandomName(0) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name contains numbers!") + } +} + +func TestNameRetries(t *testing.T) { + name := GetRandomName(1) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if !strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name doesn't contain a number") + } + +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go new file mode 100644 index 0000000..7738fc7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 0000000..71f205b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 0000000..744d5e1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/Sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go new file mode 100644 index 0000000..dc8c0e3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { + var ( + a *VersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +// TestParseRelease tests the ParseRelease() function +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +// TestCompareKernelVersion tests the CompareKernelVersion() function +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 0000000..80fab8f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 0000000..bb9b326 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthrough for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000..49370bd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 0000000..1da3f23 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 0000000..e04a349 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go new file mode 100644 index 0000000..d08ad14 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package operatingsystem + +/* +#include +*/ +import "C" + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var etcOsRelease = "/etc/release" + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("\n")); i >= 0 { + b = bytes.Trim(b[:i], " ") + return string(b), nil + } + return "", errors.New("release not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + if C.getzoneid() != 0 { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go new file mode 100644 index 0000000..bc91c3c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go @@ -0,0 +1,25 @@ +// +build freebsd darwin + +package operatingsystem + +import ( + "errors" + "os/exec" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + cmd := exec.Command("uname", "-s") + osName, err := cmd.Output() + if err != nil { + return "", err + } + return string(osName), nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD and Darwin, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection for freeBSD + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go new file mode 100644 index 0000000..e7120c6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go @@ -0,0 +1,247 @@ +// +build linux freebsd + +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var backup = etcOsRelease + + invalids := []struct { + content string + errorExpected string + }{ + { + `PRETTY_NAME=Source Mage GNU/Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", + }, + { + `PRETTY_NAME="Ubuntu Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME=Ubuntu' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", + }, + } + + valids := []struct { + content string + expected string + }{ + { + `NAME="Ubuntu" +PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`, + "Gentoo/Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Ubuntu 14.04 LTS", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME='Ubuntu 14.04 LTS'`, + "Ubuntu 14.04 LTS", + }, + { + `PRETTY_NAME=Source +NAME="Source Mage"`, + "Source", + }, + { + `PRETTY_NAME=Source +PRETTY_NAME="Source Mage"`, + "Source Mage", + }, + } + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for _, elt := range invalids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err == nil || err.Error() != elt.errorExpected { + t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) + } + } + + for _, elt := range valids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != elt.expected { + t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope +8:net_cls,net_prio:/ +7:cpuset:/ +6:freezer:/ +5:devices:/init.scope +4:blkio:/init.scope +3:cpu,cpuacct:/init.scope +2:perf_event:/ +1:name=systemd:/init.scope +`) + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} + +func TestOsReleaseFallback(t *testing.T) { + var backup = etcOsRelease + var altBackup = altOsRelease + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + altOsRelease = filepath.Join(dir, "altOsRelease") + + defer func() { + os.Remove(dir) + etcOsRelease = backup + altOsRelease = altBackup + }() + content := `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +` + if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != "Gentoo/Linux" { + t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 0000000..3c86b6a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,49 @@ +package operatingsystem + +import ( + "syscall" + "unsafe" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + + var h syscall.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return ret, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = syscall.UTF16ToString(buf[:]) + + return ret, nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/parsers.go b/vendor/github.com/moby/moby/pkg/parsers/parsers.go new file mode 100644 index 0000000..acc8971 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/parsers_test.go b/vendor/github.com/moby/moby/pkg/parsers/parsers_test.go new file mode 100644 index 0000000..7f19e90 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/parsers_test.go @@ -0,0 +1,70 @@ +package parsers + +import ( + "reflect" + "testing" +) + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParseUintList(t *testing.T) { + valids := map[string]map[int]bool{ + "": {}, + "7": {7: true}, + "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, + "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, + "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, + "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, + "03,1-3": {1: true, 2: true, 3: true}, + "3,2,1": {1: true, 2: true, 3: true}, + "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, + } + for k, v := range valids { + out, err := ParseUintList(k) + if err != nil { + t.Fatalf("Expected not to fail, got %v", err) + } + if !reflect.DeepEqual(out, v) { + t.Fatalf("Expected %v, got %v", v, out) + } + } + + invalids := []string{ + "this", + "1--", + "1-10,,10", + "10-1", + "-1", + "-1,0", + } + for _, v := range invalids { + if out, err := ParseUintList(v); err == nil { + t.Fatalf("Expected failure with %s but got %v", v, out) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile.go new file mode 100644 index 0000000..d832fea --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile.go @@ -0,0 +1,56 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if processExists(pid) { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + // Note MkdirAll returns nil if a directory already exists + if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go new file mode 100644 index 0000000..5c1cd7a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go @@ -0,0 +1,18 @@ +// +build darwin + +package pidfile + +import ( + "syscall" +) + +func processExists(pid int) bool { + // OS X does not have a proc filesystem. + // Use kill -0 pid to judge if the process exists. + err := syscall.Kill(pid, 0) + if err != nil { + return false + } + + return true +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go new file mode 100644 index 0000000..73e8af7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go @@ -0,0 +1,38 @@ +package pidfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestNewAndRemove(t *testing.T) { + dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") + if err != nil { + t.Fatal("Could not create test directory") + } + + path := filepath.Join(dir, "testfile") + file, err := New(path) + if err != nil { + t.Fatal("Could not create test file", err) + } + + _, err = New(path) + if err == nil { + t.Fatal("Test file creation not blocked") + } + + if err := file.Remove(); err != nil { + t.Fatal("Could not delete created test file") + } +} + +func TestRemoveInvalidPath(t *testing.T) { + file := PIDFile{path: filepath.Join("foo", "bar")} + + if err := file.Remove(); err == nil { + t.Fatal("Non-existing file doesn't give an error on delete") + } +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go new file mode 100644 index 0000000..1bf5221 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!darwin + +package pidfile + +import ( + "os" + "path/filepath" + "strconv" +) + +func processExists(pid int) bool { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go new file mode 100644 index 0000000..ae489c6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go @@ -0,0 +1,23 @@ +package pidfile + +import "syscall" + +const ( + processQueryLimitedInformation = 0x1000 + + stillActive = 259 +) + +func processExists(pid int) bool { + h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) + if err != nil { + return false + } + var c uint32 + err = syscall.GetExitCodeProcess(h, &c) + syscall.Close(h) + if err != nil { + return c == stillActive + } + return true +} diff --git a/vendor/github.com/moby/moby/pkg/platform/architecture_linux.go b/vendor/github.com/moby/moby/pkg/platform/architecture_linux.go new file mode 100644 index 0000000..2cdc2c5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "syscall" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &syscall.Utsname{} + if err := syscall.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/vendor/github.com/moby/moby/pkg/platform/architecture_unix.go b/vendor/github.com/moby/moby/pkg/platform/architecture_unix.go new file mode 100644 index 0000000..45bbcf1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/architecture_unix.go @@ -0,0 +1,20 @@ +// +build freebsd solaris darwin + +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "os/exec" + "strings" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("/usr/bin/uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(machine)), nil +} diff --git a/vendor/github.com/moby/moby/pkg/platform/architecture_windows.go b/vendor/github.com/moby/moby/pkg/platform/architecture_windows.go new file mode 100644 index 0000000..c5f684d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/architecture_windows.go @@ -0,0 +1,60 @@ +package platform + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") +) + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx +type systeminfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// Constants +const ( + ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 + ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 + ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL + ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + switch sysinfo.wProcessorArchitecture { + case ProcessorArchitecture64, ProcessorArchitectureIA64: + return "x86_64", nil + case ProcessorArchitecture32: + return "i686", nil + case ProcessorArchitectureArm: + return "arm", nil + default: + return "", fmt.Errorf("Unknown processor architecture") + } +} + +// NumProcs returns the number of processors on the system +func NumProcs() uint32 { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + return sysinfo.dwNumberOfProcessors +} diff --git a/vendor/github.com/moby/moby/pkg/platform/platform.go b/vendor/github.com/moby/moby/pkg/platform/platform.go new file mode 100644 index 0000000..e4b0312 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could not read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/vendor/github.com/moby/moby/pkg/platform/utsname_int8.go b/vendor/github.com/moby/moby/pkg/platform/utsname_int8.go new file mode 100644 index 0000000..5dcbadf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 +// see golang's sources src/syscall/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go b/vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go new file mode 100644 index 0000000..c9875cf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x +// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/moby/moby/pkg/plugingetter/getter.go b/vendor/github.com/moby/moby/pkg/plugingetter/getter.go new file mode 100644 index 0000000..dde5f66 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugingetter/getter.go @@ -0,0 +1,35 @@ +package plugingetter + +import "github.com/docker/docker/pkg/plugins" + +const ( + // LOOKUP doesn't update RefCount + LOOKUP = 0 + // ACQUIRE increments RefCount + ACQUIRE = 1 + // RELEASE decrements RefCount + RELEASE = -1 +) + +// CompatPlugin is a abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Client() *plugins.Client + Name() string + BasePath() string + IsV1() bool +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/client.go b/vendor/github.com/moby/moby/pkg/plugins/client.go new file mode 100644 index 0000000..e8e730e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/client.go @@ -0,0 +1,205 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeoutInSecs), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: time.Duration(timeoutInSecs) * time.Second, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + var retries int + start := time.Now() + + for { + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/client_test.go b/vendor/github.com/moby/moby/pkg/plugins/client_test.go new file mode 100644 index 0000000..9faad86 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/client_test.go @@ -0,0 +1,134 @@ +package plugins + +import ( + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setupRemotePluginServer() string { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + return server.URL +} + +func teardownRemotePluginServer() { + if server != nil { + server.Close() + } +} + +func TestFailedConnection(t *testing.T) { + c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true}) + _, err := c.callWithRetry("Service.Method", nil, false) + if err == nil { + t.Fatal("Unexpected successful connection") + } +} + +func TestEchoInputOutput(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(output, m) { + t.Fatalf("Expected %v, was %v\n", m, output) + } + err = c.Call("Test.Echo", nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestBackoff(t *testing.T) { + cases := []struct { + retries int + expTimeOff time.Duration + }{ + {0, time.Duration(1)}, + {1, time.Duration(2)}, + {2, time.Duration(4)}, + {4, time.Duration(16)}, + {6, time.Duration(30)}, + {10, time.Duration(30)}, + } + + for _, c := range cases { + s := c.expTimeOff * time.Second + if d := backoff(c.retries); d != s { + t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) + } + } +} + +func TestAbortRetry(t *testing.T) { + cases := []struct { + timeOff time.Duration + expAbort bool + }{ + {time.Duration(1), false}, + {time.Duration(2), false}, + {time.Duration(10), false}, + {time.Duration(30), true}, + {time.Duration(40), true}, + } + + for _, c := range cases { + s := c.timeOff * time.Second + if a := abort(time.Now(), s); a != c.expAbort { + t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) + } + } +} + +func TestClientScheme(t *testing.T) { + cases := map[string]string{ + "tcp://127.0.0.1:8080": "http", + "unix:///usr/local/plugins/foo": "http", + "http://127.0.0.1:8080": "http", + "https://127.0.0.1:8080": "https", + } + + for addr, scheme := range cases { + u, err := url.Parse(addr) + if err != nil { + t.Fatal(err) + } + s := httpScheme(u) + + if s != scheme { + t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery.go b/vendor/github.com/moby/moby/pkg/plugins/discovery.go new file mode 100644 index 0000000..e99581c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery.go @@ -0,0 +1,131 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_test.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_test.go new file mode 100644 index 0000000..03f9d00 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_test.go @@ -0,0 +1,152 @@ +package plugins + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Setup(t *testing.T) (string, func()) { + tmpdir, err := ioutil.TempDir("", "docker-test") + if err != nil { + t.Fatal(err) + } + backup := socketsPath + socketsPath = tmpdir + specsPaths = []string{tmpdir} + + return tmpdir, func() { + socketsPath = backup + os.RemoveAll(tmpdir) + } +} + +func TestFileSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []struct { + path string + name string + addr string + fail bool + }{ + // TODO Windows: Factor out the unix:// variants. + {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(c.name) + if c.fail && err == nil { + continue + } + + if err != nil { + t.Fatal(err) + } + + if p.name != c.name { + t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.name) + } + + if p.Addr != c.addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) + } + + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + } +} + +func TestFileJSONSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { + t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) + } + + if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { + t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) + } + + if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { + t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) + } +} + +func TestFileJSONSpecPluginWithoutTLSConfig(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig != nil { + t.Fatalf("Expected plugin TLSConfig nil, got %v\n", plugin.TLSConfig) + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go new file mode 100644 index 0000000..693a47e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go new file mode 100644 index 0000000..3e2d506 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go @@ -0,0 +1,61 @@ +// +build !windows + +package plugins + +import ( + "fmt" + "net" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestLocalSocket(t *testing.T) { + // TODO Windows: Enable a similar version for Windows named pipes + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []string{ + filepath.Join(tmpdir, "echo.sock"), + filepath.Join(tmpdir, "echo", "echo.sock"), + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { + t.Fatal(err) + } + + l, err := net.Listen("unix", c) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + + pp, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(p, pp) { + t.Fatalf("Expected %v, was %v\n", p, pp) + } + + if p.name != "echo" { + t.Fatalf("Expected plugin `echo`, got %s\n", p.name) + } + + addr := fmt.Sprintf("unix://%s", c) + if p.Addr != addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) + } + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + l.Close() + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go new file mode 100644 index 0000000..d7c1fe4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/moby/moby/pkg/plugins/errors.go b/vendor/github.com/moby/moby/pkg/plugins/errors.go new file mode 100644 index 0000000..7988471 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugin_test.go b/vendor/github.com/moby/moby/pkg/plugins/plugin_test.go new file mode 100644 index 0000000..b19c0d5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugin_test.go @@ -0,0 +1,44 @@ +package plugins + +import ( + "errors" + "path/filepath" + "runtime" + "sync" + "testing" + "time" +) + +// regression test for deadlock in handlers +func TestPluginAddHandler(t *testing.T) { + // make a plugin which is pre-activated + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{"bananas"}} + storage.plugins["qwerty"] = p + + testActive(t, p) + Handle("bananas", func(_ string, _ *Client) {}) + testActive(t, p) +} + +func TestPluginWaitBadPlugin(t *testing.T) { + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.activateErr = errors.New("some junk happened") + testActive(t, p) +} + +func testActive(t *testing.T, p *Plugin) { + done := make(chan struct{}) + go func() { + p.waitActive() + close(done) + }() + + select { + case <-time.After(100 * time.Millisecond): + _, f, l, _ := runtime.Caller(1) + t.Fatalf("%s:%d: deadlock in waitActive", filepath.Base(f), l) + case <-done: + } + +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md new file mode 100644 index 0000000..0418a3e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md @@ -0,0 +1,58 @@ +Plugin RPC Generator +==================== + +Generates go code from a Go interface definition for proxying between the plugin +API and the subsystem being extended. + +## Usage + +Given an interface definition: + +```go +type volumeDriver interface { + Create(name string, opts opts) (err error) + Remove(name string) (err error) + Path(name string) (mountpoint string, err error) + Mount(name string) (mountpoint string, err error) + Unmount(name string) (err error) +} +``` + +**Note**: All function options and return values must be named in the definition. + +Run the generator: + +```bash +$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go +``` + +Where: +- `--type` is the name of the interface to use +- `--name` is the subsystem that the plugin "Implements" +- `-i` is the input file containing the interface definition +- `-o` is the output file where the the generated code should go + +**Note**: The generated code will use the same package name as the one defined in the input file + +Optionally, you can skip functions on the interface that should not be +implemented in the generated proxy code by passing in the function name to `--skip`. +This flag can be specified multiple times. + +You can also add build tags that should be prepended to the generated code by +supplying `--tag`. This flag can be specified multiple times. + +## Known issues + +## go-generate + +You can also use this with go-generate, which is pretty awesome. +To do so, place the code at the top of the file which contains the interface +definition (i.e., the input file): + +```go +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver +``` + +Then cd to the package dir and run `go generate` + +**Note**: the `pluginrpc-gen` binary must be within your `$PATH` diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go new file mode 100644 index 0000000..5695dcc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -0,0 +1,89 @@ +package foo + +import ( + "fmt" + + aliasedio "io" + + "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" +) + +var ( + errFakeImport = fmt.Errorf("just to import fmt for imports tests") +) + +type wobble struct { + Some string + Val string + Inception *wobble +} + +// Fooer is an empty interface used for tests. +type Fooer interface{} + +// Fooer2 is an interface used for tests. +type Fooer2 interface { + Foo() +} + +// Fooer3 is an interface used for tests. +type Fooer3 interface { + Foo() + Bar(a string) + Baz(a string) (err error) + Qux(a, b string) (val string, err error) + Wobble() (w *wobble) + Wiggle() (w wobble) + WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) +} + +// Fooer4 is an interface used for tests. +type Fooer4 interface { + Foo() error +} + +// Bar is an interface used for tests. +type Bar interface { + Boo(a string, b string) (s string, err error) +} + +// Fooer5 is an interface used for tests. +type Fooer5 interface { + Foo() + Bar +} + +// Fooer6 is an interface used for tests. +type Fooer6 interface { + Foo(a otherfixture.Spaceship) +} + +// Fooer7 is an interface used for tests. +type Fooer7 interface { + Foo(a *otherfixture.Spaceship) +} + +// Fooer8 is an interface used for tests. +type Fooer8 interface { + Foo(a map[string]otherfixture.Spaceship) +} + +// Fooer9 is an interface used for tests. +type Fooer9 interface { + Foo(a map[string]*otherfixture.Spaceship) +} + +// Fooer10 is an interface used for tests. +type Fooer10 interface { + Foo(a []otherfixture.Spaceship) +} + +// Fooer11 is an interface used for tests. +type Fooer11 interface { + Foo(a []*otherfixture.Spaceship) +} + +// Fooer12 is an interface used for tests. +type Fooer12 interface { + Foo(a aliasedio.Reader) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go new file mode 100644 index 0000000..1937d17 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go @@ -0,0 +1,4 @@ +package otherfixture + +// Spaceship is a fixture for tests +type Spaceship struct{} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go new file mode 100644 index 0000000..e77a7d4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "unicode" + "unicode/utf8" +) + +type stringSet struct { + values map[string]struct{} +} + +func (s stringSet) String() string { + return "" +} + +func (s stringSet) Set(value string) error { + s.values[value] = struct{}{} + return nil +} +func (s stringSet) GetValues() map[string]struct{} { + return s.values +} + +var ( + typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") + rpcName = flag.String("name", *typeName, "RPC name, set if different from type") + inputFile = flag.String("i", "", "input file path") + outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") + + skipFuncs map[string]struct{} + flSkipFuncs = stringSet{make(map[string]struct{})} + + flBuildTags = stringSet{make(map[string]struct{})} +) + +func errorOut(msg string, err error) { + if err == nil { + return + } + fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) + os.Exit(1) +} + +func checkFlags() error { + if *outputFile == "" { + return fmt.Errorf("missing required flag `-o`") + } + if *inputFile == "" { + return fmt.Errorf("missing required flag `-i`") + } + return nil +} + +func main() { + flag.Var(flSkipFuncs, "skip", "skip parsing for function") + flag.Var(flBuildTags, "tag", "build tags to add to generated files") + flag.Parse() + skipFuncs = flSkipFuncs.GetValues() + + errorOut("error", checkFlags()) + + pkg, err := Parse(*inputFile, *typeName) + errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) + + var analysis = struct { + InterfaceType string + RPCName string + BuildTags map[string]struct{} + *ParsedPkg + }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} + var buf bytes.Buffer + + errorOut("parser error", generatedTempl.Execute(&buf, analysis)) + src, err := format.Source(buf.Bytes()) + errorOut("error formatting generated source:\n"+buf.String(), err) + errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) +} + +func toLower(s string) string { + if s == "" { + return "" + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go new file mode 100644 index 0000000..6c547e1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go @@ -0,0 +1,263 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "path" + "reflect" + "strings" +) + +var errBadReturn = errors.New("found return arg with no name: all args must be named") + +type errUnexpectedType struct { + expected string + actual interface{} +} + +func (e errUnexpectedType) Error() string { + return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) +} + +// ParsedPkg holds information about a package that has been parsed, +// its name and the list of functions. +type ParsedPkg struct { + Name string + Functions []function + Imports []importSpec +} + +type function struct { + Name string + Args []arg + Returns []arg + Doc string +} + +type arg struct { + Name string + ArgType string + PackageSelector string +} + +func (a *arg) String() string { + return a.Name + " " + a.ArgType +} + +type importSpec struct { + Name string + Path string +} + +func (s *importSpec) String() string { + var ss string + if len(s.Name) != 0 { + ss += s.Name + } + ss += s.Path + return ss +} + +// Parse parses the given file for an interface definition with the given name. +func Parse(filePath string, objName string) (*ParsedPkg, error) { + fs := token.NewFileSet() + pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + p := &ParsedPkg{} + p.Name = pkg.Name.Name + obj, exists := pkg.Scope.Objects[objName] + if !exists { + return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) + } + if obj.Kind != ast.Typ { + return nil, fmt.Errorf("exected type, got %s", obj.Kind) + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} + } + + p.Functions, err = parseInterface(iface) + if err != nil { + return nil, err + } + + // figure out what imports will be needed + imports := make(map[string]importSpec) + for _, f := range p.Functions { + args := append(f.Args, f.Returns...) + for _, arg := range args { + if len(arg.PackageSelector) == 0 { + continue + } + + for _, i := range pkg.Imports { + if i.Name != nil { + if i.Name.Name != arg.PackageSelector { + continue + } + imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} + break + } + + _, name := path.Split(i.Path.Value) + splitName := strings.Split(name, "-") + if len(splitName) > 1 { + name = splitName[len(splitName)-1] + } + // import paths have quotes already added in, so need to remove them for name comparison + name = strings.TrimPrefix(name, `"`) + name = strings.TrimSuffix(name, `"`) + if name == arg.PackageSelector { + imports[i.Path.Value] = importSpec{Path: i.Path.Value} + break + } + } + } + } + + for _, spec := range imports { + p.Imports = append(p.Imports, spec) + } + + return p, nil +} + +func parseInterface(iface *ast.InterfaceType) ([]function, error) { + var functions []function + for _, field := range iface.Methods.List { + switch f := field.Type.(type) { + case *ast.FuncType: + method, err := parseFunc(field) + if err != nil { + return nil, err + } + if method == nil { + continue + } + functions = append(functions, *method) + case *ast.Ident: + spec, ok := f.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} + } + funcs, err := parseInterface(iface) + if err != nil { + fmt.Println(err) + continue + } + functions = append(functions, funcs...) + default: + return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} + } + } + return functions, nil +} + +func parseFunc(field *ast.Field) (*function, error) { + f := field.Type.(*ast.FuncType) + method := &function{Name: field.Names[0].Name} + if _, exists := skipFuncs[method.Name]; exists { + fmt.Println("skipping:", method.Name) + return nil, nil + } + if f.Params != nil { + args, err := parseArgs(f.Params.List) + if err != nil { + return nil, err + } + method.Args = args + } + if f.Results != nil { + returns, err := parseArgs(f.Results.List) + if err != nil { + return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) + } + method.Returns = returns + } + return method, nil +} + +func parseArgs(fields []*ast.Field) ([]arg, error) { + var args []arg + for _, f := range fields { + if len(f.Names) == 0 { + return nil, errBadReturn + } + for _, name := range f.Names { + p, err := parseExpr(f.Type) + if err != nil { + return nil, err + } + args = append(args, arg{name.Name, p.value, p.pkg}) + } + } + return args, nil +} + +type parsedExpr struct { + value string + pkg string +} + +func parseExpr(e ast.Expr) (parsedExpr, error) { + var parsed parsedExpr + switch i := e.(type) { + case *ast.Ident: + parsed.value += i.Name + case *ast.StarExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.value += "*" + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.SelectorExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.pkg = p.value + parsed.value += p.value + "." + parsed.value += i.Sel.Name + case *ast.MapType: + parsed.value += "map[" + p, err := parseExpr(i.Key) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.value += "]" + p, err = parseExpr(i.Value) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.ArrayType: + parsed.value += "[]" + p, err := parseExpr(i.Elt) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + default: + return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} + } + return parsed, nil +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go new file mode 100644 index 0000000..a1b1ac9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const testFixture = "fixtures/foo.go" + +func TestParseEmptyInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 0, len(pkg.Functions)) +} + +func TestParseNonInterfaceType(t *testing.T) { + _, err := Parse(testFixture, "wobble") + if _, ok := err.(errUnexpectedType); !ok { + t.Fatal("expected type error when parsing non-interface type") + } +} + +func TestParseWithOneFunction(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer2") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 1, len(pkg.Functions)) + assertName(t, "Foo", pkg.Functions[0].Name) + assertNum(t, 0, len(pkg.Functions[0].Args)) + assertNum(t, 0, len(pkg.Functions[0].Returns)) +} + +func TestParseWithMultipleFuncs(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer3") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 7, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Bar", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + f = pkg.Functions[2] + assertName(t, "Baz", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[3] + assertName(t, "Qux", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", f.Args[0].Name) + assertName(t, "string", f.Args[0].ArgType) + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "val", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[4] + assertName(t, "Wobble", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "*wobble", arg.ArgType) + + f = pkg.Functions[5] + assertName(t, "Wiggle", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "wobble", arg.ArgType) + + f = pkg.Functions[6] + assertName(t, "WiggleWobble", f.Name) + assertNum(t, 6, len(f.Args)) + assertNum(t, 6, len(f.Returns)) + expectedArgs := [][]string{ + {"a", "[]*wobble"}, + {"b", "[]wobble"}, + {"c", "map[string]*wobble"}, + {"d", "map[*wobble]wobble"}, + {"e", "map[string][]wobble"}, + {"f", "[]*otherfixture.Spaceship"}, + } + for i, arg := range f.Args { + assertName(t, expectedArgs[i][0], arg.Name) + assertName(t, expectedArgs[i][1], arg.ArgType) + } + expectedReturns := [][]string{ + {"g", "map[*wobble]wobble"}, + {"h", "[][]*wobble"}, + {"i", "otherfixture.Spaceship"}, + {"j", "*otherfixture.Spaceship"}, + {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"}, + {"l", "[]otherfixture.Spaceship"}, + } + for i, ret := range f.Returns { + assertName(t, expectedReturns[i][0], ret.Name) + assertName(t, expectedReturns[i][1], ret.ArgType) + } +} + +func TestParseWithUnamedReturn(t *testing.T) { + _, err := Parse(testFixture, "Fooer4") + if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { + t.Fatalf("expected ErrBadReturn, got %v", err) + } +} + +func TestEmbeddedInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer5") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 2, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Boo", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[0] + assertName(t, "s", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) +} + +func TestParsedImports(t *testing.T) { + cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"} + for _, testCase := range cases { + pkg, err := Parse(testFixture, testCase) + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + importPath := strings.Split(pkg.Imports[0].Path, "/") + assertName(t, "otherfixture\"", importPath[len(importPath)-1]) + assertName(t, "", pkg.Imports[0].Name) + } +} + +func TestAliasedImports(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer12") + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + assertName(t, "aliasedio", pkg.Imports[0].Name) +} + +func assertName(t *testing.T, expected, actual string) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) + } +} + +func assertNum(t *testing.T, expected, actual int) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) + } +} + +func fatalOut(t *testing.T, msg string) { + _, file, ln, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go new file mode 100644 index 0000000..50ed929 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go @@ -0,0 +1,118 @@ +package main + +import ( + "strings" + "text/template" +) + +func printArgs(args []arg) string { + var argStr []string + for _, arg := range args { + argStr = append(argStr, arg.String()) + } + return strings.Join(argStr, ", ") +} + +func buildImports(specs []importSpec) string { + if len(specs) == 0 { + return `import "errors"` + } + imports := "import(\n" + imports += "\t\"errors\"\n" + for _, i := range specs { + imports += "\t" + i.String() + "\n" + } + imports += ")" + return imports +} + +func marshalType(t string) string { + switch t { + case "error": + // convert error types to plain strings to ensure the values are encoded/decoded properly + return "string" + default: + return t + } +} + +func isErr(t string) bool { + switch t { + case "error": + return true + default: + return false + } +} + +// Need to use this helper due to issues with go-vet +func buildTag(s string) string { + return "+build " + s +} + +var templFuncs = template.FuncMap{ + "printArgs": printArgs, + "marshalType": marshalType, + "isErr": isErr, + "lower": strings.ToLower, + "title": title, + "tag": buildTag, + "imports": buildImports, +} + +func title(s string) string { + if strings.ToLower(s) == "id" { + return "ID" + } + return strings.Title(s) +} + +var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` +// generated code - DO NOT EDIT +{{ range $k, $v := .BuildTags }} + // {{ tag $k }} {{ end }} + +package {{ .Name }} + +{{ imports .Imports }} + +type client interface{ + Call(string, interface{}, interface{}) error +} + +type {{ .InterfaceType }}Proxy struct { + client +} + +{{ range .Functions }} + type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ + {{ range .Args }} + {{ title .Name }} {{ .ArgType }} {{ end }} + } + + type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ + {{ range .Returns }} + {{ title .Name }} {{ marshalType .ArgType }} {{ end }} + } + + func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { + var( + req {{ $.InterfaceType }}Proxy{{ .Name }}Request + ret {{ $.InterfaceType }}Proxy{{ .Name }}Response + ) + {{ range .Args }} + req.{{ title .Name }} = {{ lower .Name }} {{ end }} + if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { + return + } + {{ range $r := .Returns }} + {{ if isErr .ArgType }} + if ret.{{ title .Name }} != "" { + {{ lower .Name }} = errors.New(ret.{{ title .Name }}) + } {{ end }} + {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} + + return + } +{{ end }} +`)) diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugins.go b/vendor/github.com/moby/moby/pkg/plugins/plugins.go new file mode 100644 index 0000000..c0059cb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugins.go @@ -0,0 +1,329 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugins_linux.go b/vendor/github.com/moby/moby/pkg/plugins/plugins_linux.go new file mode 100644 index 0000000..9c5a0b5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugins_linux.go @@ -0,0 +1,7 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For v1 plugins, this always returns the host's root directory. +func (p *Plugin) BasePath() string { + return "/" +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go b/vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go new file mode 100644 index 0000000..3c8d8fe --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go @@ -0,0 +1,8 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Windows v1 plugins, this returns an empty string, since the plugin is already aware +// of the absolute path of the mount. +func (p *Plugin) BasePath() string { + return "" +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/transport/http.go b/vendor/github.com/moby/moby/pkg/plugins/transport/http.go new file mode 100644 index 0000000..5be146a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/transport/transport.go b/vendor/github.com/moby/moby/pkg/plugins/transport/transport.go new file mode 100644 index 0000000..d7f1e21 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/moby/moby/pkg/pools/pools.go b/vendor/github.com/moby/moby/pkg/pools/pools.go new file mode 100644 index 0000000..5c5aead --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pools/pools.go @@ -0,0 +1,116 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/moby/moby/pkg/pools/pools_test.go b/vendor/github.com/moby/moby/pkg/pools/pools_test.go new file mode 100644 index 0000000..1661b78 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pools/pools_test.go @@ -0,0 +1,161 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + if _, err = writer.Write([]byte("barfoo")); err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} diff --git a/vendor/github.com/moby/moby/pkg/progress/progress.go b/vendor/github.com/moby/moby/pkg/progress/progress.go new file mode 100644 index 0000000..fcf3117 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/progress/progress.go @@ -0,0 +1,84 @@ +package progress + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/vendor/github.com/moby/moby/pkg/progress/progressreader.go b/vendor/github.com/moby/moby/pkg/progress/progressreader.go new file mode 100644 index 0000000..6b3927e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/vendor/github.com/moby/moby/pkg/progress/progressreader_test.go b/vendor/github.com/moby/moby/pkg/progress/progressreader_test.go new file mode 100644 index 0000000..b14d401 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/progress/progressreader_test.go @@ -0,0 +1,75 @@ +package progress + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestOutputOnPrematureClose(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + part := make([]byte, 4, 4) + _, err := io.ReadFull(pr, part) + if err != nil { + pr.Close() + t.Fatal(err) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + default: + t.Fatalf("Expected some output when closing prematurely") + } +} + +func TestCompleteSilently(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + out, err := ioutil.ReadAll(pr) + if err != nil { + pr.Close() + t.Fatal(err) + } + if string(out) != "TESTING" { + pr.Close() + t.Fatalf("Unexpected output %q from reader", string(out)) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + t.Fatalf("Should have closed silently when read is complete") + default: + } +} diff --git a/vendor/github.com/moby/moby/pkg/promise/promise.go b/vendor/github.com/moby/moby/pkg/promise/promise.go new file mode 100644 index 0000000..dd52b90 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/moby/moby/pkg/pubsub/publisher.go b/vendor/github.com/moby/moby/pkg/pubsub/publisher.go new file mode 100644 index 0000000..0936461 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pubsub/publisher.go @@ -0,0 +1,111 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go b/vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go new file mode 100644 index 0000000..d6b0a1d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go @@ -0,0 +1,142 @@ +package pubsub + +import ( + "fmt" + "testing" + "time" +) + +func TestSendToOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + c := p.Subscribe() + + p.Publish("hi") + + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestSendToMultipleSubs(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + + p.Publish("hi") + + for _, c := range subs { + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } + } +} + +func TestEvictOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + s1 := p.Subscribe() + s2 := p.Subscribe() + + p.Evict(s1) + p.Publish("hi") + if _, ok := <-s1; ok { + t.Fatal("expected s1 to not receive the published message") + } + + msg := <-s2 + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestClosePublisher(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + p.Close() + + for _, c := range subs { + if _, ok := <-c; ok { + t.Fatal("expected all subscriber channels to be closed") + } + } +} + +const sampleText = "test" + +type testSubscriber struct { + dataCh chan interface{} + ch chan error +} + +func (s *testSubscriber) Wait() error { + return <-s.ch +} + +func newTestSubscriber(p *Publisher) *testSubscriber { + ts := &testSubscriber{ + dataCh: p.Subscribe(), + ch: make(chan error), + } + go func() { + for data := range ts.dataCh { + s, ok := data.(string) + if !ok { + ts.ch <- fmt.Errorf("Unexpected type %T", data) + break + } + if s != sampleText { + ts.ch <- fmt.Errorf("Unexpected text %s", s) + break + } + } + close(ts.ch) + }() + return ts +} + +// for testing with -race +func TestPubSubRace(t *testing.T) { + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + s.Wait() + } +} + +func BenchmarkPubSub(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + if err := s.Wait(); err != nil { + b.Fatal(err) + } + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/random/random.go b/vendor/github.com/moby/moby/pkg/random/random.go new file mode 100644 index 0000000..70de4d1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/random/random_test.go b/vendor/github.com/moby/moby/pkg/random/random_test.go new file mode 100644 index 0000000..cf405f7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/random/random_test.go @@ -0,0 +1,22 @@ +package random + +import ( + "math/rand" + "sync" + "testing" +) + +// for go test -v -race +func TestConcurrency(t *testing.T) { + rnd := rand.New(NewSource()) + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + rnd.Int63() + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/README.md b/vendor/github.com/moby/moby/pkg/reexec/README.md new file mode 100644 index 0000000..45592ce --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/README.md @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_linux.go b/vendor/github.com/moby/moby/pkg/reexec/command_linux.go new file mode 100644 index 0000000..34ae2a9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_unix.go b/vendor/github.com/moby/moby/pkg/reexec/command_unix.go new file mode 100644 index 0000000..778a720 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go b/vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000..76edd82 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_windows.go b/vendor/github.com/moby/moby/pkg/reexec/command_windows.go new file mode 100644 index 0000000..ca871c4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/reexec.go b/vendor/github.com/moby/moby/pkg/reexec/reexec.go new file mode 100644 index 0000000..c56671d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/moby/moby/pkg/registrar/registrar.go b/vendor/github.com/moby/moby/pkg/registrar/registrar.go new file mode 100644 index 0000000..1e75ee9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/registrar/registrar.go @@ -0,0 +1,127 @@ +// Package registrar provides name registration. It reserves a name to a given key. +package registrar + +import ( + "errors" + "sync" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") + // ErrNoSuchKey is returned when trying to find the names for a key which is not known + ErrNoSuchKey = errors.New("provided key does not exist") +) + +// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to +// Names must be unique. +// Registrar is safe for concurrent access. +type Registrar struct { + idx map[string][]string + names map[string]string + mu sync.Mutex +} + +// NewRegistrar creates a new Registrar with the an empty index +func NewRegistrar() *Registrar { + return &Registrar{ + idx: make(map[string][]string), + names: make(map[string]string), + } +} + +// Reserve registers a key to a name +// Reserve is idempotent +// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (r *Registrar) Reserve(name, key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if k, exists := r.names[name]; exists { + if k != key { + return ErrNameReserved + } + return nil + } + + r.idx[key] = append(r.idx[key], name) + r.names[name] = key + return nil +} + +// Release releases the reserved name +// Once released, a name can be reserved again +func (r *Registrar) Release(name string) { + r.mu.Lock() + defer r.mu.Unlock() + + key, exists := r.names[name] + if !exists { + return + } + + for i, n := range r.idx[key] { + if n != name { + continue + } + r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) + break + } + + delete(r.names, name) + + if len(r.idx[key]) == 0 { + delete(r.idx, key) + } +} + +// Delete removes all reservations for the passed in key. +// All names reserved to this key are released. +func (r *Registrar) Delete(key string) { + r.mu.Lock() + for _, name := range r.idx[key] { + delete(r.names, name) + } + delete(r.idx, key) + r.mu.Unlock() +} + +// GetNames lists all the reserved names for the given key +func (r *Registrar) GetNames(key string) ([]string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + names, exists := r.idx[key] + if !exists { + return nil, ErrNoSuchKey + } + return names, nil +} + +// Get returns the key that the passed in name is reserved to +func (r *Registrar) Get(name string) (string, error) { + r.mu.Lock() + key, exists := r.names[name] + r.mu.Unlock() + + if !exists { + return "", ErrNameNotReserved + } + return key, nil +} + +// GetAll returns all registered names +func (r *Registrar) GetAll() map[string][]string { + out := make(map[string][]string) + + r.mu.Lock() + // copy index into out + for id, names := range r.idx { + out[id] = names + } + r.mu.Unlock() + return out +} diff --git a/vendor/github.com/moby/moby/pkg/registrar/registrar_test.go b/vendor/github.com/moby/moby/pkg/registrar/registrar_test.go new file mode 100644 index 0000000..0c1ef31 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/registrar/registrar_test.go @@ -0,0 +1,119 @@ +package registrar + +import ( + "reflect" + "testing" +) + +func TestReserve(t *testing.T) { + r := NewRegistrar() + + obj := "test1" + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + obj2 := "test2" + err := r.Reserve("test", obj2) + if err == nil { + t.Fatalf("expected error when reserving an already reserved name to another object") + } + if err != ErrNameReserved { + t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") + } +} + +func TestRelease(t *testing.T) { + r := NewRegistrar() + obj := "testing" + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + r.Release("test") + r.Release("test") // Ensure there is no panic here + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } +} + +func TestGetNames(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + r.Reserve("test3", "other") + + names2, err := r.GetNames(obj) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(names, names2) { + t.Fatalf("Exepected: %v, Got: %v", names, names2) + } +} + +func TestDelete(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + + r.Reserve("test3", "other") + r.Delete(obj) + + _, err := r.GetNames(obj) + if err == nil { + t.Fatal("expected error getting names for deleted key") + } + + if err != ErrNoSuchKey { + t.Fatal("expected `ErrNoSuchKey`") + } +} + +func TestGet(t *testing.T) { + r := NewRegistrar() + obj := "testing" + name := "test" + + _, err := r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } + + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + + if _, err = r.Get(name); err != nil { + t.Fatal(err) + } + + r.Delete(obj) + _, err = r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/signal/README.md b/vendor/github.com/moby/moby/pkg/signal/README.md new file mode 100644 index 0000000..2b237a5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/moby/moby/pkg/signal/signal.go b/vendor/github.com/moby/moby/pkg/signal/signal.go new file mode 100644 index 0000000..68bb77c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_darwin.go b/vendor/github.com/moby/moby/pkg/signal/signal_darwin.go new file mode 100644 index 0000000..946de87 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go b/vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go new file mode 100644 index 0000000..6b9569b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_linux.go b/vendor/github.com/moby/moby/pkg/signal/signal_linux.go new file mode 100644 index 0000000..d418cbe --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_linux.go @@ -0,0 +1,80 @@ +package signal + +import ( + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_solaris.go b/vendor/github.com/moby/moby/pkg/signal/signal_solaris.go new file mode 100644 index 0000000..89576b9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_solaris.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Solaris signals. +// SIGINFO and SIGTHR not defined for Solaris +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_unix.go b/vendor/github.com/moby/moby/pkg/signal/signal_unix.go new file mode 100644 index 0000000..5d058fd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go b/vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go new file mode 100644 index 0000000..c592d37 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package signal + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_windows.go b/vendor/github.com/moby/moby/pkg/signal/signal_windows.go new file mode 100644 index 0000000..440f270 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/trap.go b/vendor/github.com/moby/moby/pkg/signal/trap.go new file mode 100644 index 0000000..638a1ab --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/trap.go @@ -0,0 +1,103 @@ +package signal + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logrus.Infof("Processing signal '%v'", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go new file mode 100644 index 0000000..be20765 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go @@ -0,0 +1,174 @@ +package stdcopy + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Check the first byte to know where to write + switch StdType(buf[stdWriterFdIndex]) { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 0000000..3137a75 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,260 @@ +package stdcopy + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := stdWriter{ + Writer: nil, + prefix: byte(Stdout), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) + } +} + +type errWriter struct { + n int + err error +} + +func (f *errWriter) Write(buf []byte) (int, error) { + return f.n, f.err +} + +func TestWriteWithWriterError(t *testing.T) { + expectedError := errors.New("expected") + expectedReturnedBytes := 10 + writer := NewStdWriter(&errWriter{ + n: stdWriterPrefixLen + expectedReturnedBytes, + err: expectedError}, Stdout) + data := []byte("This won't get written, sigh") + n, err := writer.Write(data) + if err != expectedError { + t.Fatalf("Didn't get expected error.") + } + if n != expectedReturnedBytes { + t.Fatalf("Didn't get expected written bytes %d, got %d.", + expectedReturnedBytes, n) + } +} + +func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { + writer := NewStdWriter(&errWriter{n: -1}, Stdout) + data := []byte("This won't get written, sigh") + actual, _ := writer.Write(data) + if actual != 0 { + t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) + } +} + +func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { + buffer = new(bytes.Buffer) + dstOut := NewStdWriter(buffer, Stdout) + _, err = dstOut.Write(stdOutBytes) + if err != nil { + return + } + dstErr := NewStdWriter(buffer, Stderr) + _, err = dstErr.Write(stdErrBytes) + return +} + +func TestStdCopyWriteAndRead(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err != nil { + t.Fatal(err) + } + expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) + if written != int64(expectedTotalWritten) { + t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) + } +} + +type customReader struct { + n int + err error + totalCalls int + correctCalls int + src *bytes.Buffer +} + +func (f *customReader) Read(buf []byte) (int, error) { + f.totalCalls++ + if f.totalCalls <= f.correctCalls { + return f.src.Read(buf) + } + return f.n, f.err +} + +func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { + expectedError := errors.New("error") + reader := &customReader{ + err: expectedError} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { + expectedError := errors.New("error") + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: expectedError, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyDetectsCorruptedFrame(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: io.EOF, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != startingBufLen { + t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) + } + if err != nil { + t.Fatal("Didn't get nil error") + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func TestStdCopyReturnsWriteErrors(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + expectedError := errors.New("expected") + + dstOut := &errWriter{err: expectedError} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error, got %v", err) + } +} + +func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + dstOut := &errWriter{n: startingBufLen - 10} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) + } + if err != io.ErrShortWrite { + t.Fatalf("Didn't get expected io.ErrShortWrite error") + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go new file mode 100644 index 0000000..ce6ea79 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go @@ -0,0 +1,172 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +// StreamFormatter formats a stream, optionally using JSON. +type StreamFormatter struct { + json bool +} + +// NewStreamFormatter returns a simple StreamFormatter +func NewStreamFormatter() *StreamFormatter { + return &StreamFormatter{} +} + +// NewJSONStreamFormatter returns a StreamFormatter configured to stream json +func NewJSONStreamFormatter() *StreamFormatter { + return &StreamFormatter{true} +} + +const streamNewline = "\r\n" + +var streamNewlineBytes = []byte(streamNewline) + +// FormatStream formats the specified stream. +func (sf *StreamFormatter) FormatStream(str string) []byte { + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + "\r") +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + streamNewline) +} + +// FormatError formats the specified error. +func (sf *StreamFormatter) FormatError(err error) []byte { + if sf.json { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return append(b, streamNewlineBytes...) + } + return []byte("{\"error\":\"format error\"}" + streamNewline) + } + return []byte("Error: " + err.Error() + streamNewline) +} + +// FormatProgress formats the progress information for a specified action. +func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + if sf.json { + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return append(b, streamNewlineBytes...) + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{ + sf: sf, + out: out, + newLines: newLines, + } +} + +type progressOutput struct { + sf *StreamFormatter + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.FormatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} + formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.FormatStatus("", "")) + return err + } + + return nil +} + +// StdoutFormatter is a streamFormatter that writes to the standard output. +type StdoutFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +// StderrFormatter is a streamFormatter that writes to the standard error. +type StderrFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} diff --git a/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go new file mode 100644 index 0000000..93ec90f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go @@ -0,0 +1,108 @@ +package streamformatter + +import ( + "encoding/json" + "errors" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/jsonmessage" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != "stream"+"\r" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONStatus(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != "a1\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != "Error: Error for formatter\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStream(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStatus(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatSimpleError(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatJSONError(t *testing.T) { + sf := NewJSONStreamFormatter() + err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatProgress(t *testing.T) { + sf := NewJSONStreamFormatter() + progress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress, nil) + msg := &jsonmessage.JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + + // The progress will always be in the format of: + // [=========================> ] 15 B/30 B 404933h7m11s + // The last entry '404933h7m11s' is the timeLeftBox. + // However, the timeLeftBox field may change as progress.String() depends on time.Now(). + // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. + + // Compare the progress strings before the timeLeftBox + expectedProgress := "[=========================> ] 15 B/30 B" + // if terminal column is <= 110, expectedProgressShort is expected. + expectedProgressShort := " 15 B/30 B" + if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || + strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { + t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", + expectedProgress, expectedProgressShort, msg.ProgressMessage) + } + + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff --git a/vendor/github.com/moby/moby/pkg/stringid/README.md b/vendor/github.com/moby/moby/pkg/stringid/README.md new file mode 100644 index 0000000..37a5098 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/moby/moby/pkg/stringid/stringid.go b/vendor/github.com/moby/moby/pkg/stringid/stringid.go new file mode 100644 index 0000000..fa35d8b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringid/stringid.go @@ -0,0 +1,69 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/pkg/random" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(crypto bool) string { + b := make([]byte, 32) + r := random.Reader + if crypto { + r = rand.Reader + } + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(true) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(false) +} diff --git a/vendor/github.com/moby/moby/pkg/stringid/stringid_test.go b/vendor/github.com/moby/moby/pkg/stringid/stringid_test.go new file mode 100644 index 0000000..8ff6b43 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringid/stringid_test.go @@ -0,0 +1,72 @@ +package stringid + +import ( + "strings" + "testing" +) + +func TestGenerateRandomID(t *testing.T) { + id := GenerateRandomID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestGenerateNonCryptoID(t *testing.T) { + id := GenerateNonCryptoID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestShortenId(t *testing.T) { + id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2" + truncID := TruncateID(id) + if truncID != "90435eec5c4e" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenSha256Id(t *testing.T) { + id := "sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba" + truncID := TruncateID(id) + if truncID != "4e38e38c8ce0" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdEmpty(t *testing.T) { + id := "" + truncID := TruncateID(id) + if len(truncID) > len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdInvalid(t *testing.T) { + id := "1234" + truncID := TruncateID(id) + if len(truncID) != len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestIsShortIDNonHex(t *testing.T) { + id := "some non-hex value" + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} + +func TestIsShortIDNotCorrectSize(t *testing.T) { + id := strings.Repeat("a", shortLen+1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } + id = strings.Repeat("a", shortLen-1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} diff --git a/vendor/github.com/moby/moby/pkg/stringutils/README.md b/vendor/github.com/moby/moby/pkg/stringutils/README.md new file mode 100644 index 0000000..b3e4545 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/moby/moby/pkg/stringutils/stringutils.go b/vendor/github.com/moby/moby/pkg/stringutils/stringutils.go new file mode 100644 index 0000000..8e1c812 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringutils/stringutils.go @@ -0,0 +1,101 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" + + "github.com/docker/docker/pkg/random" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[random.Rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go b/vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go new file mode 100644 index 0000000..8af2bdc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go @@ -0,0 +1,121 @@ +package stringutils + +import "testing" + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) + } +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + if len(str) != 64 { + t.Fatalf("Id returned is incorrect: %s", str) + } + if _, ok := set[str]; ok { + t.Fatalf("Random number is repeated") + } + set[str] = struct{}{} + } +} + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomASCIIString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestEllipsis(t *testing.T) { + str := "t🐳ststring" + newstr := Ellipsis(str, 3) + if newstr != "t🐳s" { + t.Fatalf("Expected t🐳s, got %s", newstr) + } + newstr = Ellipsis(str, 8) + if newstr != "t🐳sts..." { + t.Fatalf("Expected tests..., got %s", newstr) + } + newstr = Ellipsis(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestTruncate(t *testing.T) { + str := "t🐳ststring" + newstr := Truncate(str, 4) + if newstr != "t🐳st" { + t.Fatalf("Expected t🐳st, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"t🐳st", "in", "slice"} + + test := InSlice(slice, "t🐳st") + if !test { + t.Fatalf("Expected string t🐳st to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} + +func TestShellQuoteArgumentsEmpty(t *testing.T) { + actual := ShellQuoteArguments([]string{}) + expected := "" + if actual != expected { + t.Fatalf("Expected an empty string") + } +} + +func TestShellQuoteArguments(t *testing.T) { + simpleString := "simpleString" + complexString := "This is a 'more' complex $tring with some special char *" + actual := ShellQuoteArguments([]string{simpleString, complexString}) + expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" + if actual != expected { + t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) + } +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE new file mode 100644 index 0000000..34c4ea7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD new file mode 100644 index 0000000..9b4f4a2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/moby/pkg/symlink/README.md b/vendor/github.com/moby/moby/pkg/symlink/README.md new file mode 100644 index 0000000..8dba54f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/README.md @@ -0,0 +1,6 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs.go b/vendor/github.com/moby/moby/pkg/symlink/fs.go new file mode 100644 index 0000000..f6bc223 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs_unix.go b/vendor/github.com/moby/moby/pkg/symlink/fs_unix.go new file mode 100644 index 0000000..2270827 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go b/vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go new file mode 100644 index 0000000..7085c0b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go @@ -0,0 +1,407 @@ +// +build !windows + +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// TODO Windows: This needs some serious work to port to Windows. For now, +// turning off testing in this package. + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs_windows.go b/vendor/github.com/moby/moby/pkg/symlink/fs_windows.go new file mode 100644 index 0000000..241e531 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/longpath" +) + +func toShort(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return syscall.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // syscall.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/README.md b/vendor/github.com/moby/moby/pkg/sysinfo/README.md new file mode 100644 index 0000000..c1530ce --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go new file mode 100644 index 0000000..aeb1a3a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 0000000..5eacd35 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,43 @@ +// +build linux + +package sysinfo + +import ( + "runtime" + "syscall" + "unsafe" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := syscall.RawSyscall(syscall.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 0000000..1d89dd5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go new file mode 100644 index 0000000..f046de4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 0000000..7ad84a8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,259 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +const ( + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warn("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warn("Your kernel does not support oom control") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warn("Your kernel does not support memory swappiness") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warn("Your kernel does not support kernel memory limit") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + + cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !quiet && !cpuRealtimePeriod { + logrus.Warn("Your kernel does not support cgroup rt period") + } + + cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !quiet && !cpuRealtimeRuntime { + logrus.Warn("Your kernel does not support cgroup rt runtime") + } + + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + CPURealtimePeriod: cpuRealtimePeriod, + CPURealtimeRuntime: cpuRealtimeRuntime, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warn("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go new file mode 100644 index 0000000..fae0fdf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go @@ -0,0 +1,58 @@ +package sysinfo + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +func TestReadProcBool(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + procFile := filepath.Join(tmpDir, "read-proc-bool") + if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { + t.Fatal(err) + } + + if !readProcBool(procFile) { + t.Fatal("expected proc bool to be true, got false") + } + + if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { + t.Fatal(err) + } + if readProcBool(procFile) { + t.Fatal("expected proc bool to be false, got false") + } + + if readProcBool(path.Join(tmpDir, "no-exist")) { + t.Fatal("should be false for non-existent entry") + } + +} + +func TestCgroupEnabled(t *testing.T) { + cgroupDir, err := ioutil.TempDir("", "cgroup-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cgroupDir) + + if cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be false") + } + + if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { + t.Fatal(err) + } + + if !cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be true") + } +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go new file mode 100644 index 0000000..c858d57 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go @@ -0,0 +1,121 @@ +// +build solaris,cgo + +package sysinfo + +import ( + "bytes" + "os/exec" + "strconv" + "strings" +) + +/* +#cgo LDFLAGS: -llgrp +#include +#include +#include +int getLgrpCount() { + lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; + uint_t nlgrps; + + if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { + return -1; + } + nlgrps = lgrp_nlgrps(lgrpcookie); + return nlgrps; +} +*/ +import "C" + +// IsCPUSharesAvailable returns whether CPUShares setting is supported. +// We need FSS to be set as default scheduling class to support CPU Shares +func IsCPUSharesAvailable() bool { + cmd := exec.Command("/usr/sbin/dispadmin", "-d") + outBuf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd.Stderr = errBuf + cmd.Stdout = outBuf + + if err := cmd.Run(); err != nil { + return false + } + return (strings.Contains(outBuf.String(), "FSS")) +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. +//NOTE Solaris: If we change the below capabilities be sure +// to update verifyPlatformContainerSettings() in daemon_solaris.go +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = setCgroupMem(quiet) + sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) + sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) + sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) + + sysInfo.IPv4ForwardingDisabled = false + + sysInfo.AppArmor = false + + return sysInfo +} + +// setCgroupMem reads the memory information for Solaris. +func setCgroupMem(quiet bool) cgroupMemInfo { + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: true, + MemoryReservation: false, + OomKillDisable: false, + MemorySwappiness: false, + KernelMemory: false, + } +} + +// setCgroupCPU reads the cpu information for Solaris. +func setCgroupCPU(quiet bool) cgroupCPUInfo { + + return cgroupCPUInfo{ + CPUShares: true, + CPUCfsPeriod: false, + CPUCfsQuota: true, + CPURealtimePeriod: false, + CPURealtimeRuntime: false, + } +} + +// blkio switches are not supported in Solaris. +func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { + + return cgroupBlkioInfo{ + BlkioWeight: false, + BlkioWeightDevice: false, + } +} + +// setCgroupCPUsetInfo reads the cpuset information for Solaris. +func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: getCPUCount(), + Mems: getLgrpCount(), + } +} + +func getCPUCount() string { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return "" + } + return strconv.FormatInt(int64(ncpus), 16) +} + +func getLgrpCount() string { + nlgrps := C.getLgrpCount() + if nlgrps <= 0 { + return "" + } + return strconv.FormatInt(int64(nlgrps), 16) +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go new file mode 100644 index 0000000..b61fbcf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go @@ -0,0 +1,26 @@ +package sysinfo + +import "testing" + +func TestIsCpusetListAvailable(t *testing.T) { + cases := []struct { + provided string + available string + res bool + err bool + }{ + {"1", "0-4", true, false}, + {"01,3", "0-4", true, false}, + {"", "0-7", true, false}, + {"1--42", "0-7", false, true}, + {"1-42", "00-1,8,,9", false, true}, + {"1,41-42", "43,45", false, false}, + {"0-3", "", false, false}, + } + for _, c := range cases { + r, err := isCpusetListAvailable(c.provided, c.available) + if (c.err && err == nil) && r != c.res { + t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 0000000..45f3ef1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris,!windows + +package sysinfo + +// New returns an empty SysInfo for non linux nor solaris for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 0000000..4e6255b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package sysinfo + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes.go b/vendor/github.com/moby/moby/pkg/system/chtimes.go new file mode 100644 index 0000000..7637f12 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes.go @@ -0,0 +1,52 @@ +package system + +import ( + "os" + "syscall" + "time" + "unsafe" +) + +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_test.go b/vendor/github.com/moby/moby/pkg/system/chtimes_test.go new file mode 100644 index 0000000..5c87df3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_test.go @@ -0,0 +1,94 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +// prepareTempFile creates a temporary file in a temporary directory. +func prepareTempFile(t *testing.T) (string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + return file, dir +} + +// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent +func TestChtimes(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_unix.go b/vendor/github.com/moby/moby/pkg/system/chtimes_unix.go new file mode 100644 index 0000000..09d58bc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go b/vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go new file mode 100644 index 0000000..6ec9a71 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesLinux tests Chtimes access time on a tempfile on Linux +func TestChtimesLinux(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat := f.Sys().(*syscall.Stat_t) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_windows.go b/vendor/github.com/moby/moby/pkg/system/chtimes_windows.go new file mode 100644 index 0000000..2945868 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package system + +import ( + "syscall" + "time" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) + pathp, e := syscall.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer syscall.Close(h) + c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) + return syscall.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go b/vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go new file mode 100644 index 0000000..72d8a10 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go @@ -0,0 +1,86 @@ +// +build windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesWindows tests Chtimes access time on a tempfile on Windows +func TestChtimesWindows(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/errors.go b/vendor/github.com/moby/moby/pkg/system/errors.go new file mode 100644 index 0000000..2883189 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/moby/moby/pkg/system/events_windows.go b/vendor/github.com/moby/moby/pkg/system/events_windows.go new file mode 100644 index 0000000..3ec6d22 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/events_windows.go @@ -0,0 +1,85 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/github.com/moby/moby/pkg/system/exitcode.go b/vendor/github.com/moby/moby/pkg/system/exitcode.go new file mode 100644 index 0000000..60f0514 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/moby/moby/pkg/system/filesys.go b/vendor/github.com/moby/moby/pkg/system/filesys.go new file mode 100644 index 0000000..810c794 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/filesys.go @@ -0,0 +1,54 @@ +// +build !windows + +package system + +import ( + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// ACL'd for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return MkdirAll(path, perm) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os package. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} diff --git a/vendor/github.com/moby/moby/pkg/system/filesys_windows.go b/vendor/github.com/moby/moby/pkg/system/filesys_windows.go new file mode 100644 index 0000000..6094f01 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/filesys_windows.go @@ -0,0 +1,236 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// ACL'd for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return mkdirall(path, true) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, adminAndLocalSystem bool) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if adminAndLocalSystem { + err = mkdirWithACL(path) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and syscall.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string) error { + sa := syscall.SecurityAttributes{Length: 0} + sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := syscall.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := syscall.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and syscall packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := syscallOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} diff --git a/vendor/github.com/moby/moby/pkg/system/lstat.go b/vendor/github.com/moby/moby/pkg/system/lstat.go new file mode 100644 index 0000000..bd23c4d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go b/vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go new file mode 100644 index 0000000..062cf53 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go @@ -0,0 +1,30 @@ +// +build linux freebsd + +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/lstat_windows.go b/vendor/github.com/moby/moby/pkg/system/lstat_windows.go new file mode 100644 index 0000000..49e87eb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lstat_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package system + +import ( + "os" +) + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo.go b/vendor/github.com/moby/moby/pkg/system/meminfo.go new file mode 100644 index 0000000..3b6e947 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_linux.go b/vendor/github.com/moby/moby/pkg/system/meminfo_linux.go new file mode 100644 index 0000000..385f1d5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go b/vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go new file mode 100644 index 0000000..7f4f84f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go @@ -0,0 +1,128 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go b/vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go new file mode 100644 index 0000000..44f5562 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go @@ -0,0 +1,40 @@ +// +build linux freebsd + +package system + +import ( + "strings" + "testing" + + "github.com/docker/go-units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go b/vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000..3ce019d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_windows.go b/vendor/github.com/moby/moby/pkg/system/meminfo_windows.go new file mode 100644 index 0000000..883944a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/mknod.go b/vendor/github.com/moby/moby/pkg/system/mknod.go new file mode 100644 index 0000000..7395818 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/moby/moby/pkg/system/mknod_windows.go b/vendor/github.com/moby/moby/pkg/system/mknod_windows.go new file mode 100644 index 0000000..2e863c0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/moby/moby/pkg/system/path_unix.go b/vendor/github.com/moby/moby/pkg/system/path_unix.go new file mode 100644 index 0000000..c607c4d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/path_windows.go b/vendor/github.com/moby/moby/pkg/system/path_windows.go new file mode 100644 index 0000000..cbfe2c1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/path_windows_test.go b/vendor/github.com/moby/moby/pkg/system/path_windows_test.go new file mode 100644 index 0000000..eccb26a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/path_windows_test.go @@ -0,0 +1,78 @@ +// +build windows + +package system + +import "testing" + +// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter +func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { + // Fails if not C drive. + path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) + if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { + t.Fatalf("Expected error for d:") + } + + // Single character is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { + t.Fatalf("Single character should pass") + } + if path != "z" { + t.Fatalf("Single character should be unchanged") + } + + // Two characters without colon is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { + t.Fatalf("2 characters without colon should pass") + } + if path != "AB" { + t.Fatalf("2 characters without colon should be unchanged") + } + + // Abs path without drive letter + if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { + t.Fatalf("abs path no drive letter should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter should be unchanged") + } + + // Abs path without drive letter, linux style + if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { + t.Fatalf("abs path no drive letter linux style should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter linux failed %s", path) + } + + // Drive-colon should be stripped + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`An absolute path should have been shortened to \ %s`, path) + } + + // Verify with a linux-style path + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) + } + + // Failure on c: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "c:"` { + t.Fatalf(path, err) + } + + // Failure on d: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "d:"` { + t.Fatalf(path, err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat.go b/vendor/github.com/moby/moby/pkg/system/stat.go new file mode 100644 index 0000000..087034c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat.go @@ -0,0 +1,53 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_darwin.go b/vendor/github.com/moby/moby/pkg/system/stat_darwin.go new file mode 100644 index 0000000..f0742f5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_darwin.go @@ -0,0 +1,32 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// FromStatT loads a system.StatT from a syscall.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_freebsd.go b/vendor/github.com/moby/moby/pkg/system/stat_freebsd.go new file mode 100644 index 0000000..d0fb6f1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_linux.go b/vendor/github.com/moby/moby/pkg/system/stat_linux.go new file mode 100644 index 0000000..8b1eded --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_openbsd.go b/vendor/github.com/moby/moby/pkg/system/stat_openbsd.go new file mode 100644 index 0000000..3c3b71f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_openbsd.go @@ -0,0 +1,15 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_solaris.go b/vendor/github.com/moby/moby/pkg/system/stat_solaris.go new file mode 100644 index 0000000..0216985 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_solaris.go @@ -0,0 +1,34 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT loads a system.StatT from a syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_unix_test.go b/vendor/github.com/moby/moby/pkg/system/stat_unix_test.go new file mode 100644 index 0000000..dee8d30 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_unix_test.go @@ -0,0 +1,39 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.UID() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.GID() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_unsupported.go b/vendor/github.com/moby/moby/pkg/system/stat_unsupported.go new file mode 100644 index 0000000..5d85f52 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_windows.go b/vendor/github.com/moby/moby/pkg/system/stat_windows.go new file mode 100644 index 0000000..39490c6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime +} + +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir +} diff --git a/vendor/github.com/moby/moby/pkg/system/syscall_unix.go b/vendor/github.com/moby/moby/pkg/system/syscall_unix.go new file mode 100644 index 0000000..3ae9128 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/syscall_windows.go b/vendor/github.com/moby/moby/pkg/system/syscall_windows.go new file mode 100644 index 0000000..1f31187 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/syscall_windows.go @@ -0,0 +1,105 @@ +package system + +import ( + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +var ( + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := syscall.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go b/vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go new file mode 100644 index 0000000..4886b2b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go @@ -0,0 +1,9 @@ +package system + +import "testing" + +func TestHasWin32KSupport(t *testing.T) { + s := HasWin32KSupport() // make sure this doesn't panic + + t.Logf("win32k: %v", s) // will be different on different platforms -- informative only +} diff --git a/vendor/github.com/moby/moby/pkg/system/umask.go b/vendor/github.com/moby/moby/pkg/system/umask.go new file mode 100644 index 0000000..3d0146b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/umask_windows.go b/vendor/github.com/moby/moby/pkg/system/umask_windows.go new file mode 100644 index 0000000..13f1de1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go b/vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000..e2eac3b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_linux.go b/vendor/github.com/moby/moby/pkg/system/utimes_linux.go new file mode 100644 index 0000000..fc8a1ab --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go b/vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go new file mode 100644 index 0000000..a73ed11 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go @@ -0,0 +1,68 @@ +// +build linux freebsd + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{Sec: 0, Nsec: 0}, {Sec: 0, Nsec: 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go b/vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000..1397145 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/system/xattrs_linux.go b/vendor/github.com/moby/moby/pkg/system/xattrs_linux.go new file mode 100644 index 0000000..d2e2c05 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go b/vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000..0114f22 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/tailfile/tailfile.go b/vendor/github.com/moby/moby/pkg/tailfile/tailfile.go new file mode 100644 index 0000000..09eb393 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(left, os.SEEK_SET); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go b/vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go new file mode 100644 index 0000000..31217c0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/builder_context.go b/vendor/github.com/moby/moby/pkg/tarsum/builder_context.go new file mode 100644 index 0000000..b42983e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go b/vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go new file mode 100644 index 0000000..f54bf3a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go @@ -0,0 +1,67 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go new file mode 100644 index 0000000..5abf5e7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 0000000..bb700d8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Errorf("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Errorf("Should have return nil if name not found.") + } + +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/tarsum.go b/vendor/github.com/moby/moby/pkg/tarsum/tarsum.go new file mode 100644 index 0000000..154788d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/tarsum.go @@ -0,0 +1,295 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "path" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = path.Clean(currentHeader.Name) + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md new file mode 100644 index 0000000..89b2e49 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgments + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go new file mode 100644 index 0000000..86df0e2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go @@ -0,0 +1,664 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + defer jfh.Close() + + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 0000000..48e2af3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/pkg/tarsum/versioning.go b/vendor/github.com/moby/moby/pkg/tarsum/versioning.go new file mode 100644 index 0000000..2882286 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go b/vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go new file mode 100644 index 0000000..88e0a57 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/writercloser.go b/vendor/github.com/moby/moby/pkg/tarsum/writercloser.go new file mode 100644 index 0000000..9727ecd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/ascii.go b/vendor/github.com/moby/moby/pkg/term/ascii.go new file mode 100644 index 0000000..f5262bc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, byte(key[0])) + } + } + return codes, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/ascii_test.go b/vendor/github.com/moby/moby/pkg/term/ascii_test.go new file mode 100644 index 0000000..4a1e7f3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/ascii_test.go @@ -0,0 +1,43 @@ +package term + +import "testing" + +func TestToBytes(t *testing.T) { + codes, err := ToBytes("ctrl-a,a") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 1 || codes[1] != 97 { + t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) + } + + codes, err = ToBytes("shift-z") + if err == nil { + t.Fatalf("Expected error, got none") + } + + codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") + if err != nil { + t.Fatal(err) + } + if len(codes) != 4 { + t.Fatalf("Expected 4 codes, got %d", len(codes)) + } + if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { + t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) + } + + codes, err = ToBytes("DEL,+") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 127 || codes[1] != 43 { + t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) + } +} diff --git a/vendor/github.com/moby/moby/pkg/term/tc_linux_cgo.go b/vendor/github.com/moby/moby/pkg/term/tc_linux_cgo.go new file mode 100644 index 0000000..59dac5b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/tc_linux_cgo.go @@ -0,0 +1,50 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/vendor/github.com/moby/moby/pkg/term/tc_other.go b/vendor/github.com/moby/moby/pkg/term/tc_other.go new file mode 100644 index 0000000..750d7c3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/tc_other.go @@ -0,0 +1,20 @@ +// +build !windows +// +build !linux !cgo +// +build !solaris !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go b/vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go new file mode 100644 index 0000000..c9139d0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go @@ -0,0 +1,63 @@ +// +build solaris,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + /* + VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned + Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It + needs to be explicitly set to 1. + */ + newState.Cc[C.VMIN] = 1 + newState.Cc[C.VTIME] = 0 + + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/vendor/github.com/moby/moby/pkg/term/term.go b/vendor/github.com/moby/moby/pkg/term/term.go new file mode 100644 index 0000000..fe59faa --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/term.go @@ -0,0 +1,123 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + "syscall" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/moby/moby/pkg/term/term_solaris.go b/vendor/github.com/moby/moby/pkg/term/term_solaris.go new file mode 100644 index 0000000..112debb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/term_solaris.go @@ -0,0 +1,41 @@ +// +build solaris + +package term + +import ( + "syscall" + "unsafe" +) + +/* +#include +#include +#include + +// Small wrapper to get rid of variadic args of ioctl() +int my_ioctl(int fd, int cmd, struct winsize *ws) { + return ioctl(fd, cmd, ws); +} +*/ +import "C" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/moby/moby/pkg/term/term_unix.go b/vendor/github.com/moby/moby/pkg/term/term_unix.go new file mode 100644 index 0000000..ddf87a0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/term_unix.go @@ -0,0 +1,29 @@ +// +build !solaris,!windows + +package term + +import ( + "syscall" + "unsafe" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/moby/moby/pkg/term/term_windows.go b/vendor/github.com/moby/moby/pkg/term/term_windows.go new file mode 100644 index 0000000..a91f07e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/term_windows.go @@ -0,0 +1,233 @@ +// +build windows + +package term + +import ( + "io" + "os" + "os/signal" + "syscall" + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + enableVirtualTerminalInput = 0x0200 + enableVirtualTerminalProcessing = 0x0004 + disableNewlineAutoReturn = 0x0008 +) + +// vtInputSupported is true if enableVirtualTerminalInput is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that enableVirtualTerminalInput is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { + // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. + emulateStdin = true + emulateStdout = false + emulateStderr = false + } + + if emulateStdin { + stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windows.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windows.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since disableNewlineAutoReturn might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= enableVirtualTerminalInput + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/moby/moby/pkg/term/termios_darwin.go b/vendor/github.com/moby/moby/pkg/term/termios_darwin.go new file mode 100644 index 0000000..480db90 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/termios_darwin.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/termios_freebsd.go b/vendor/github.com/moby/moby/pkg/term/termios_freebsd.go new file mode 100644 index 0000000..ed843ad --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/termios_freebsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/termios_linux.go b/vendor/github.com/moby/moby/pkg/term/termios_linux.go new file mode 100644 index 0000000..22921b6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/termios_linux.go @@ -0,0 +1,47 @@ +// +build !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/termios_openbsd.go b/vendor/github.com/moby/moby/pkg/term/termios_openbsd.go new file mode 100644 index 0000000..ed843ad --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/termios_openbsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go b/vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go new file mode 100644 index 0000000..cb0b883 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windows + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go b/vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go new file mode 100644 index 0000000..a3ce569 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windows + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/console.go b/vendor/github.com/moby/moby/pkg/term/windows/console.go new file mode 100644 index 0000000..ca5c3b2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windows + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/windows.go b/vendor/github.com/moby/moby/pkg/term/windows/windows.go new file mode 100644 index 0000000..ce4cb59 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windows + +import ( + "io/ioutil" + "os" + "sync" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/windows_test.go b/vendor/github.com/moby/moby/pkg/term/windows/windows_test.go new file mode 100644 index 0000000..52aeab5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/windows_test.go @@ -0,0 +1,3 @@ +// This file is necessary to pass the Docker tests. + +package windows diff --git a/vendor/github.com/moby/moby/pkg/testutil/assert/assert.go b/vendor/github.com/moby/moby/pkg/testutil/assert/assert.go new file mode 100644 index 0000000..6da8518 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/assert/assert.go @@ -0,0 +1,97 @@ +// Package assert contains functions for making assertions in unit tests +package assert + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "strings" + + "github.com/davecgh/go-spew/spew" +) + +// TestingT is an interface which defines the methods of testing.T that are +// required by this package +type TestingT interface { + Fatalf(string, ...interface{}) +} + +// Equal compare the actual value to the expected value and fails the test if +// they are not equal. +func Equal(t TestingT, actual, expected interface{}) { + if expected != actual { + fatal(t, "Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual) + } +} + +//EqualStringSlice compares two slices and fails the test if they do not contain +// the same items. +func EqualStringSlice(t TestingT, actual, expected []string) { + if len(actual) != len(expected) { + fatal(t, "Expected (length %d): %q\nActual (length %d): %q", + len(expected), expected, len(actual), actual) + } + for i, item := range actual { + if item != expected[i] { + fatal(t, "Slices differ at element %d, expected %q got %q", + i, expected[i], item) + } + } +} + +// NilError asserts that the error is nil, otherwise it fails the test. +func NilError(t TestingT, err error) { + if err != nil { + fatal(t, "Expected no error, got: %s", err.Error()) + } +} + +// DeepEqual compare the actual value to the expected value and fails the test if +// they are not "deeply equal". +func DeepEqual(t TestingT, actual, expected interface{}) { + if !reflect.DeepEqual(actual, expected) { + fatal(t, "Expected (%T):\n%v\n\ngot (%T):\n%s\n", + expected, spew.Sdump(expected), actual, spew.Sdump(actual)) + } +} + +// Error asserts that error is not nil, and contains the expected text, +// otherwise it fails the test. +func Error(t TestingT, err error, contains string) { + if err == nil { + fatal(t, "Expected an error, but error was nil") + } + + if !strings.Contains(err.Error(), contains) { + fatal(t, "Expected error to contain '%s', got '%s'", contains, err.Error()) + } +} + +// Contains asserts that the string contains a substring, otherwise it fails the +// test. +func Contains(t TestingT, actual, contains string) { + if !strings.Contains(actual, contains) { + fatal(t, "Expected '%s' to contain '%s'", actual, contains) + } +} + +// NotNil fails the test if the object is nil +func NotNil(t TestingT, obj interface{}) { + if obj == nil { + fatal(t, "Expected non-nil value.") + } +} + +func fatal(t TestingT, format string, args ...interface{}) { + t.Fatalf(errorSource()+format, args...) +} + +// See testing.decorate() +func errorSource() string { + _, filename, line, ok := runtime.Caller(3) + if !ok { + return "" + } + return fmt.Sprintf("%s:%d: ", filepath.Base(filename), line) +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/pkg.go b/vendor/github.com/moby/moby/pkg/testutil/pkg.go new file mode 100644 index 0000000..110b2e6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/pkg.go @@ -0,0 +1 @@ +package testutil diff --git a/vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go b/vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go new file mode 100644 index 0000000..0e09d99 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go @@ -0,0 +1,36 @@ +package tempfile + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/testutil/assert" +) + +// TempFile is a temporary file that can be used with unit tests. TempFile +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempFile struct { + File *os.File +} + +// NewTempFile returns a new temp file with contents +func NewTempFile(t assert.TestingT, prefix string, content string) *TempFile { + file, err := ioutil.TempFile("", prefix+"-") + assert.NilError(t, err) + + _, err = file.Write([]byte(content)) + assert.NilError(t, err) + file.Close() + return &TempFile{File: file} +} + +// Name returns the filename +func (f *TempFile) Name() string { + return f.File.Name() +} + +// Remove removes the file +func (f *TempFile) Remove() { + os.Remove(f.Name()) +} diff --git a/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go new file mode 100644 index 0000000..e4dec3a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go16.go new file mode 100644 index 0000000..0b81665 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go16.go @@ -0,0 +1,31 @@ +// +build go1.6,!go1.7 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.6 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} diff --git a/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go new file mode 100644 index 0000000..0d5b448 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go @@ -0,0 +1,33 @@ +// +build go1.7,!go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/moby/moby/pkg/truncindex/truncindex.go b/vendor/github.com/moby/moby/pkg/truncindex/truncindex.go new file mode 100644 index 0000000..02610b8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/truncindex/truncindex.go @@ -0,0 +1,137 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addID(id); err != nil { + return err + } + return nil +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs, and passes each of them to the given handler. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go b/vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go new file mode 100644 index 0000000..8197baf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go @@ -0,0 +1,429 @@ +package truncindex + +import ( + "math/rand" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // An ambiguous id prefix should return an error + if _, err := index.Get(id[:4]); err == nil { + t.Fatal("An ambiguous id prefix should return an error") + } + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) + + assertIndexIterate(t) +} + +func assertIndexIterate(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + "37b36c2c326ccc11e726eee6ee78a0baf166ef96", + "46b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + + index.Iterate(func(targetId string) { + for _, id := range ids { + if targetId == id { + return + } + } + + t.Fatalf("An unknown ID '%s'", targetId) + }) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/urlutil/urlutil.go b/vendor/github.com/moby/moby/pkg/urlutil/urlutil.go new file mode 100644 index 0000000..4415287 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/urlutil/urlutil.go @@ -0,0 +1,50 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go b/vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go new file mode 100644 index 0000000..75eb464 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go @@ -0,0 +1,70 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } + transportUrls = []string{ + "tcp://example.com", + "tcp+tls://example.com", + "udp://example.com", + "unix:///example", + "unixgram:///example", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsTransport(t *testing.T) { + for _, url := range transportUrls { + if IsTransportURL(url) == false { + t.Fatalf("%q should be detected as valid Transport url", url) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/useragent/README.md b/vendor/github.com/moby/moby/pkg/useragent/README.md new file mode 100644 index 0000000..d9cb367 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/moby/moby/pkg/useragent/useragent.go b/vendor/github.com/moby/moby/pkg/useragent/useragent.go new file mode 100644 index 0000000..1137db5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/moby/moby/pkg/useragent/useragent_test.go b/vendor/github.com/moby/moby/pkg/useragent/useragent_test.go new file mode 100644 index 0000000..0ad7243 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/useragent/useragent_test.go @@ -0,0 +1,31 @@ +package useragent + +import "testing" + +func TestVersionInfo(t *testing.T) { + vi := VersionInfo{"foo", "bar"} + if !vi.isValid() { + t.Fatalf("VersionInfo should be valid") + } + vi = VersionInfo{"", "bar"} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } + vi = VersionInfo{"foo", ""} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } +} + +func TestAppendVersions(t *testing.T) { + vis := []VersionInfo{ + {"foo", "1.0"}, + {"bar", "0.1"}, + {"pi", "3.1.4"}, + } + v := AppendVersions("base", vis...) + expect := "base foo/1.0 bar/0.1 pi/3.1.4" + if v != expect { + t.Fatalf("expected %q, got %q", expect, v) + } +} diff --git a/vendor/github.com/moby/moby/plugin/backend_linux.go b/vendor/github.com/moby/moby/plugin/backend_linux.go new file mode 100644 index 0000000..a5a3f9b --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/backend_linux.go @@ -0,0 +1,790 @@ +// +build linux + +package plugin + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Disable deactivates a plugin. This means resources (volumes, networks) cant use them. +func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if !config.ForceDisable && p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + + if err := pm.disable(p, c); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") + return nil +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + + c := &controller{timeoutInSecs: config.Timeout} + if err := pm.enable(p, c, false); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") + return nil +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return nil, err + } + + return &p.PluginObj, nil +} + +func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error { + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + config.ProgressOutput = progress.ChanOutput(progressChan) + } else { + config.ProgressOutput = progress.DiscardOutput() + } + return distribution.Pull(ctx, ref, config) +} + +type tempConfigStore struct { + config []byte + configDigest digest.Digest +} + +func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { + dgst := digest.FromBytes(c) + + s.config = c + s.configDigest = dgst + + return dgst, nil +} + +func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { + if d != s.configDigest { + return nil, digest.ErrDigestNotFound + } + return s.config, nil +} + +func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} + +func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { + var privileges types.PluginPrivileges + if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { + privileges = append(privileges, types.PluginPrivilege{ + Name: "network", + Description: "permissions to access a network", + Value: []string{c.Network.Type}, + }) + } + for _, mount := range c.Mounts { + if mount.Source != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "mount", + Description: "host path to mount", + Value: []string{*mount.Source}, + }) + } + } + for _, device := range c.Linux.Devices { + if device.Path != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "device", + Description: "host device to access", + Value: []string{*device.Path}, + }) + } + } + if c.Linux.AllowAllDevices { + privileges = append(privileges, types.PluginPrivilege{ + Name: "allow-all-devices", + Description: "allow 'rwm' access to all devices", + Value: []string{"true"}, + }) + } + if len(c.Linux.Capabilities) > 0 { + privileges = append(privileges, types.PluginPrivilege{ + Name: "capabilities", + Description: "list of additional capabilities required", + Value: c.Linux.Capabilities, + }) + } + + return privileges, nil +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + // create image store instance + cs := &tempConfigStore{} + + // DownloadManager not defined because only pulling configuration. + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: func(string, string, string) {}, + ImageStore: cs, + }, + Schema2Types: distribution.PluginTypes, + } + + if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil { + return nil, err + } + + if cs.config == nil { + return nil, errors.New("no configuration pulled") + } + var config types.PluginConfig + if err := json.Unmarshal(cs.config, &config); err != nil { + return nil, err + } + + return computePrivileges(config) +} + +// Upgrade upgrades a plugin +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return errors.Wrap(err, "plugin must be installed before upgrading") + } + + if p.IsEnabled() { + return fmt.Errorf("plugin must be disabled before upgrading") + } + + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.WithDefaultTag(nameref).String() + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + return nil +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.WithDefaultTag(nameref).String() + + if err := pm.config.Store.validateName(name); err != nil { + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges) + if err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + + return nil +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List() ([]types.Plugin, error) { + plugins := pm.config.Store.GetAll() + out := make([]types.Plugin, 0, len(plugins)) + for _, p := range plugins { + out = append(out, p.PluginObj) + } + return out, nil +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + + ref, err := reference.ParseNamed(p.Name()) + if err != nil { + return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) + } + + var po progress.Output + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + po = progress.ChanOutput(progressChan) + } else { + po = progress.DiscardOutput() + } + + // TODO: replace these with manager + is := &pluginConfigStore{ + pm: pm, + plugin: p, + } + ls := &pluginLayerProvider{ + pm: pm, + plugin: p, + } + rs := &pluginReference{ + name: ref, + pluginID: p.Config, + } + + uploadManager := xfer.NewLayerUploadManager(3) + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + ProgressOutput: po, + RegistryService: pm.config.RegistryService, + ReferenceStore: rs, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: is, + RequireSchema2: true, + }, + ConfigMediaType: schema2.MediaTypePluginConfig, + LayerStore: ls, + UploadManager: uploadManager, + } + + return distribution.Push(ctx, ref, imagePushConfig) +} + +type pluginReference struct { + name reference.Named + pluginID digest.Digest +} + +func (r *pluginReference) References(id digest.Digest) []reference.Named { + if r.pluginID != id { + return nil + } + return []reference.Named{r.name} +} + +func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association { + return []reference.Association{ + { + Ref: r.name, + ID: r.pluginID, + }, + } +} + +func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { + if r.name.String() != ref.String() { + return digest.Digest(""), reference.ErrDoesNotExist + } + return r.pluginID, nil +} + +func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) Delete(ref reference.Named) (bool, error) { + // Read only, ignore + return false, nil +} + +type pluginConfigStore struct { + pm *Manager + plugin *v2.Plugin +} + +func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) { + return digest.Digest(""), errors.New("cannot store config on push") +} + +func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { + if s.plugin.Config != d { + return nil, errors.New("plugin not found") + } + rwc, err := s.pm.blobStore.Get(d) + if err != nil { + return nil, err + } + defer rwc.Close() + return ioutil.ReadAll(rwc) +} + +func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} + +type pluginLayerProvider struct { + pm *Manager + plugin *v2.Plugin +} + +func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) { + rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs) + var i int + for i = 1; i <= len(rootFS.DiffIDs); i++ { + if layer.CreateChainID(rootFS.DiffIDs[:i]) == id { + break + } + } + if i > len(rootFS.DiffIDs) { + return nil, errors.New("layer not found") + } + return &pluginLayer{ + pm: p.pm, + diffIDs: rootFS.DiffIDs[:i], + blobs: p.plugin.Blobsums[:i], + }, nil +} + +type pluginLayer struct { + pm *Manager + diffIDs []layer.DiffID + blobs []digest.Digest +} + +func (l *pluginLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *pluginLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *pluginLayer) Parent() distribution.PushLayer { + if len(l.diffIDs) == 1 { + return nil + } + return &pluginLayer{ + pm: l.pm, + diffIDs: l.diffIDs[:len(l.diffIDs)-1], + blobs: l.blobs[:len(l.diffIDs)-1], + } +} + +func (l *pluginLayer) Open() (io.ReadCloser, error) { + return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) Size() (int64, error) { + return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) MediaType() string { + return schema2.MediaTypeLayer +} + +func (l *pluginLayer) Release() { + // Nothing needs to be release, no references held +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + p, err := pm.config.Store.GetV2Plugin(name) + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if err != nil { + return err + } + + if !config.ForceRemove { + if p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + if p.IsEnabled() { + return fmt.Errorf("plugin %s is enabled", p.Name()) + } + } + + if p.IsEnabled() { + if err := pm.disable(p, c); err != nil { + logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) + } + } + + defer func() { + go pm.GC() + }() + + id := p.GetID() + pm.config.Store.Remove(p) + pluginDir := filepath.Join(pm.config.Root, id) + if err := recursiveUnmount(pm.config.Root); err != nil { + logrus.WithField("dir", pm.config.Root).WithField("id", id).Warn(err) + } + if err := os.RemoveAll(pluginDir); err != nil { + logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err) + } + pm.config.LogPluginEvent(id, name, "remove") + return nil +} + +func getMounts(root string) ([]string, error) { + infos, err := mount.GetMounts() + if err != nil { + return nil, errors.Wrap(err, "failed to read mount table") + } + + var mounts []string + for _, m := range infos { + if strings.HasPrefix(m.Mountpoint, root) { + mounts = append(mounts, m.Mountpoint) + } + } + + return mounts, nil +} + +func recursiveUnmount(root string) error { + mounts, err := getMounts(root) + if err != nil { + return err + } + + // sort in reverse-lexicographic order so the root mount will always be last + sort.Sort(sort.Reverse(sort.StringSlice(mounts))) + + for i, m := range mounts { + if err := mount.Unmount(m); err != nil { + if i == len(mounts)-1 { + return errors.Wrapf(err, "error performing recursive unmount on %s", root) + } + logrus.WithError(err).WithField("mountpoint", m).Warn("could not unmount") + } + } + + return nil +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + if err := p.Set(args); err != nil { + return err + } + return pm.save(p) +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + ref, err := reference.ParseNamed(options.RepoName) + if err != nil { + return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) + } + if _, ok := ref.(reference.Canonical); ok { + return errors.Errorf("canonical references are not permitted") + } + taggedRef := reference.WithDefaultTag(ref) + name := taggedRef.String() + + if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + if err != nil { + return errors.Wrap(err, "failed to create temp directory") + } + var configJSON []byte + rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) + + rootFSBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer rootFSBlob.Close() + gzw := gzip.NewWriter(rootFSBlob) + layerDigester := digest.Canonical.New() + rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) + + if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { + return err + } + if err := rootFS.Close(); err != nil { + return err + } + + if configJSON == nil { + return errors.New("config not found") + } + + if err := gzw.Close(); err != nil { + return errors.Wrap(err, "error closing gzip writer") + } + + var config types.PluginConfig + if err := json.Unmarshal(configJSON, &config); err != nil { + return errors.Wrap(err, "failed to parse config") + } + + if err := pm.validateConfig(config); err != nil { + return err + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + rootFSBlobsum, err := rootFSBlob.Commit() + if err != nil { + return err + } + defer func() { + if err != nil { + go pm.GC() + } + }() + + config.Rootfs = &types.PluginConfigRootfs{ + Type: "layers", + DiffIds: []string{layerDigester.Digest().String()}, + } + + configBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer configBlob.Close() + if err := json.NewEncoder(configBlob).Encode(config); err != nil { + return errors.Wrap(err, "error encoding json config") + } + configBlobsum, err := configBlob.Commit() + if err != nil { + return err + } + + p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil) + if err != nil { + return err + } + p.PluginObj.PluginReference = taggedRef.String() + + pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") + + return nil +} + +func (pm *Manager) validateConfig(config types.PluginConfig) error { + return nil // TODO: +} + +func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + tarReader := tar.NewReader(in) + tarWriter := tar.NewWriter(pw) + defer in.Close() + + hasRootFS := false + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + if !hasRootFS { + pw.CloseWithError(errors.Wrap(err, "no rootfs found")) + return + } + // Signals end of archive. + tarWriter.Close() + pw.Close() + return + } + if err != nil { + pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) + return + } + + content := io.Reader(tarReader) + name := path.Clean(hdr.Name) + if path.IsAbs(name) { + name = name[1:] + } + if name == configFileName { + dt, err := ioutil.ReadAll(content) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) + return + } + *config = dt + } + if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { + hdr.Name = path.Clean(path.Join(parts[1:]...)) + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { + hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] + } + if err := tarWriter.WriteHeader(hdr); err != nil { + pw.CloseWithError(errors.Wrap(err, "error writing tar header")) + return + } + if _, err := pools.Copy(tarWriter, content); err != nil { + pw.CloseWithError(errors.Wrap(err, "error copying tar data")) + return + } + hasRootFS = true + } else { + io.Copy(ioutil.Discard, content) + } + } + }() + return pr +} diff --git a/vendor/github.com/moby/moby/plugin/backend_unsupported.go b/vendor/github.com/moby/moby/plugin/backend_unsupported.go new file mode 100644 index 0000000..66e6dab --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/backend_unsupported.go @@ -0,0 +1,71 @@ +// +build !linux + +package plugin + +import ( + "errors" + "io" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +var errNotSupported = errors.New("plugins are not supported on this platform") + +// Disable deactivates a plugin, which implies that they cannot be used by containers. +func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error { + return errNotSupported +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { + return errNotSupported +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + return nil, errNotSupported +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + return nil, errNotSupported +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error { + return errNotSupported +} + +// Upgrade pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error { + return errNotSupported +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List() ([]types.Plugin, error) { + return nil, errNotSupported +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error { + return errNotSupported +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + return errNotSupported +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + return errNotSupported +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error { + return errNotSupported +} diff --git a/vendor/github.com/moby/moby/plugin/blobstore.go b/vendor/github.com/moby/moby/plugin/blobstore.go new file mode 100644 index 0000000..dc9e598 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/blobstore.go @@ -0,0 +1,181 @@ +package plugin + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/progress" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type blobstore interface { + New() (WriteCommitCloser, error) + Get(dgst digest.Digest) (io.ReadCloser, error) + Size(dgst digest.Digest) (int64, error) +} + +type basicBlobStore struct { + path string +} + +func newBasicBlobStore(p string) (*basicBlobStore, error) { + tmpdir := filepath.Join(p, "tmp") + if err := os.MkdirAll(tmpdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", p) + } + return &basicBlobStore{path: p}, nil +} + +func (b *basicBlobStore) New() (WriteCommitCloser, error) { + f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp file") + } + return newInsertion(f), nil +} + +func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { + return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) +} + +func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { + stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) + if err != nil { + return 0, err + } + return stat.Size(), nil +} + +func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) { + for _, alg := range []string{string(digest.Canonical)} { + items, err := ioutil.ReadDir(filepath.Join(b.path, alg)) + if err != nil { + continue + } + for _, fi := range items { + if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists { + p := filepath.Join(b.path, alg, fi.Name()) + err := os.RemoveAll(p) + logrus.Debugf("cleaned up blob %v: %v", p, err) + } + } + } + +} + +// WriteCommitCloser defines object that can be committed to blobstore. +type WriteCommitCloser interface { + io.WriteCloser + Commit() (digest.Digest, error) +} + +type insertion struct { + io.Writer + f *os.File + digester digest.Digester + closed bool +} + +func newInsertion(tempFile *os.File) *insertion { + digester := digest.Canonical.New() + return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} +} + +func (i *insertion) Commit() (digest.Digest, error) { + p := i.f.Name() + d := filepath.Join(filepath.Join(p, "../../")) + i.f.Sync() + defer os.RemoveAll(p) + if err := i.f.Close(); err != nil { + return "", err + } + i.closed = true + dgst := i.digester.Digest() + if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %v", d) + } + if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil { + return "", errors.Wrapf(err, "failed to rename %v", p) + } + return dgst, nil +} + +func (i *insertion) Close() error { + if i.closed { + return nil + } + defer os.RemoveAll(i.f.Name()) + return i.f.Close() +} + +type downloadManager struct { + blobStore blobstore + tmpDir string + blobs []digest.Digest + configDigest digest.Digest +} + +func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + for _, l := range layers { + b, err := dm.blobStore.New() + if err != nil { + return initialRootFS, nil, err + } + defer b.Close() + rc, _, err := l.Download(ctx, progressOutput) + if err != nil { + return initialRootFS, nil, errors.Wrap(err, "failed to download") + } + defer rc.Close() + r := io.TeeReader(rc, b) + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return initialRootFS, nil, err + } + digester := digest.Canonical.New() + if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { + return initialRootFS, nil, err + } + initialRootFS.Append(layer.DiffID(digester.Digest())) + d, err := b.Commit() + if err != nil { + return initialRootFS, nil, err + } + dm.blobs = append(dm.blobs, d) + } + return initialRootFS, nil, nil +} + +func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { + b, err := dm.blobStore.New() + if err != nil { + return "", err + } + defer b.Close() + n, err := b.Write(dt) + if err != nil { + return "", err + } + if n != len(dt) { + return "", io.ErrShortWrite + } + d, err := b.Commit() + dm.configDigest = d + return d, err +} + +func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { + return nil, digest.ErrDigestNotFound +} +func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} diff --git a/vendor/github.com/moby/moby/plugin/defs.go b/vendor/github.com/moby/moby/plugin/defs.go new file mode 100644 index 0000000..927f639 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/defs.go @@ -0,0 +1,26 @@ +package plugin + +import ( + "sync" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" +) + +// Store manages the plugin inventory in memory and on-disk +type Store struct { + sync.RWMutex + plugins map[string]*v2.Plugin + /* handlers are necessary for transition path of legacy plugins + * to the new model. Legacy plugins use Handle() for registering an + * activation callback.*/ + handlers map[string][]func(string, *plugins.Client) +} + +// NewStore creates a Store. +func NewStore(libRoot string) *Store { + return &Store{ + plugins: make(map[string]*v2.Plugin), + handlers: make(map[string][]func(string, *plugins.Client)), + } +} diff --git a/vendor/github.com/moby/moby/plugin/manager.go b/vendor/github.com/moby/moby/plugin/manager.go new file mode 100644 index 0000000..f260aa6 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager.go @@ -0,0 +1,347 @@ +package plugin + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +const configFileName = "config.json" +const rootFSFileName = "rootfs" + +var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func (pm *Manager) restorePlugin(p *v2.Plugin) error { + if p.IsEnabled() { + return pm.restore(p) + } + return nil +} + +type eventLogger func(id, name, action string) + +// ManagerConfig defines configuration needed to start new manager. +type ManagerConfig struct { + Store *Store // remove + Executor libcontainerd.Remote + RegistryService registry.Service + LiveRestoreEnabled bool // TODO: remove + LogPluginEvent eventLogger + Root string + ExecRoot string +} + +// Manager controls the plugin subsystem. +type Manager struct { + config ManagerConfig + mu sync.RWMutex // protects cMap + muGC sync.RWMutex // protects blobstore deletions + cMap map[*v2.Plugin]*controller + containerdClient libcontainerd.Client + blobStore *basicBlobStore +} + +// controller represents the manager's control on a plugin. +type controller struct { + restart bool + exitChan chan bool + timeoutInSecs int +} + +// pluginRegistryService ensures that all resolved repositories +// are of the plugin class. +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +// NewManager returns a new plugin manager. +func NewManager(config ManagerConfig) (*Manager, error) { + if config.RegistryService != nil { + config.RegistryService = pluginRegistryService{config.RegistryService} + } + manager := &Manager{ + config: config, + } + if err := os.MkdirAll(manager.config.Root, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) + } + if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) + } + if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) + } + var err error + manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct + if err != nil { + return nil, errors.Wrap(err, "failed to create containerd client") + } + manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) + if err != nil { + return nil, err + } + + manager.cMap = make(map[*v2.Plugin]*controller) + if err := manager.reload(); err != nil { + return nil, errors.Wrap(err, "failed to restore plugins") + } + return manager, nil +} + +func (pm *Manager) tmpDir() string { + return filepath.Join(pm.config.Root, "tmp") +} + +// StateChanged updates plugin internals using libcontainerd events. +func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { + logrus.Debugf("plugin state changed %s %#v", id, e) + + switch e.State { + case libcontainerd.StateExit: + p, err := pm.config.Store.GetV2Plugin(id) + if err != nil { + return err + } + + pm.mu.RLock() + c := pm.cMap[p] + + if c.exitChan != nil { + close(c.exitChan) + } + restart := c.restart + pm.mu.RUnlock() + + os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)) + + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + if err := mount.Unmount(propRoot); err != nil { + logrus.Warn("Could not unmount %s: %v", propRoot, err) + } + } + + if restart { + pm.enable(p, c, true) + } + } + + return nil +} + +func (pm *Manager) reload() error { // todo: restore + dir, err := ioutil.ReadDir(pm.config.Root) + if err != nil { + return errors.Wrapf(err, "failed to read %v", pm.config.Root) + } + plugins := make(map[string]*v2.Plugin) + for _, v := range dir { + if validFullID.MatchString(v.Name()) { + p, err := pm.loadPlugin(v.Name()) + if err != nil { + return err + } + plugins[p.GetID()] = p + } + } + + pm.config.Store.SetAll(plugins) + + var wg sync.WaitGroup + wg.Add(len(plugins)) + for _, p := range plugins { + c := &controller{} // todo: remove this + pm.cMap[p] = c + go func(p *v2.Plugin) { + defer wg.Done() + if err := pm.restorePlugin(p); err != nil { + logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err) + return + } + + if p.Rootfs != "" { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + } + + // We should only enable rootfs propagation for certain plugin types that need it. + for _, typ := range p.PluginObj.Config.Interface.Types { + if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") { + if p.PluginObj.Config.PropagatedMount != "" { + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + // check if we need to migrate an older propagated mount from before + // these mounts were stored outside the plugin rootfs + if _, err := os.Stat(propRoot); os.IsNotExist(err) { + if _, err := os.Stat(p.PropagatedMount); err == nil { + // make sure nothing is mounted here + // don't care about errors + mount.Unmount(p.PropagatedMount) + if err := os.Rename(p.PropagatedMount, propRoot); err != nil { + logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") + } + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.WithError(err).WithField("dir", p.PropagatedMount).Error("error migrating propagated mount storage") + } + } + } + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + // TODO: sanitize PropagatedMount and prevent breakout + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", p.PropagatedMount, err) + return + } + } + } + } + + pm.save(p) + requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled() + + if requiresManualRestore { + // if liveRestore is not enabled, the plugin will be stopped now so we should enable it + if err := pm.enable(p, c, true); err != nil { + logrus.Errorf("failed to enable plugin '%s': %s", p.Name(), err) + } + } + }(p) + } + wg.Wait() + return nil +} + +func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { + p := filepath.Join(pm.config.Root, id, configFileName) + dt, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "error reading %v", p) + } + var plugin v2.Plugin + if err := json.Unmarshal(dt, &plugin); err != nil { + return nil, errors.Wrapf(err, "error decoding %v", p) + } + return &plugin, nil +} + +func (pm *Manager) save(p *v2.Plugin) error { + pluginJSON, err := json.Marshal(p) + if err != nil { + return errors.Wrap(err, "failed to marshal plugin json") + } + if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil { + return errors.Wrap(err, "failed to write atomically plugin json") + } + return nil +} + +// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine +func (pm *Manager) GC() { + pm.muGC.Lock() + defer pm.muGC.Unlock() + + whitelist := make(map[digest.Digest]struct{}) + for _, p := range pm.config.Store.GetAll() { + whitelist[p.Config] = struct{}{} + for _, b := range p.Blobsums { + whitelist[b] = struct{}{} + } + } + + pm.blobStore.gc(whitelist) +} + +type logHook struct{ id string } + +func (logHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (l logHook) Fire(entry *logrus.Entry) error { + entry.Data = logrus.Fields{"plugin": l.id} + return nil +} + +func attachToLog(id string) func(libcontainerd.IOPipe) error { + return func(iop libcontainerd.IOPipe) error { + iop.Stdin.Close() + + logger := logrus.New() + logger.Hooks.Add(logHook{id}) + // TODO: cache writer per id + w := logger.Writer() + go func() { + io.Copy(w, iop.Stdout) + }() + go func() { + // TODO: update logrus and use logger.WriterLevel + io.Copy(w, iop.Stderr) + }() + return nil + } +} + +func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { + // todo: make a better function that doesn't check order + if !reflect.DeepEqual(privileges, requiredPrivileges) { + return errors.New("incorrect privileges") + } + return nil +} + +func configToRootFS(c []byte) (*image.RootFS, error) { + var pluginConfig types.PluginConfig + if err := json.Unmarshal(c, &pluginConfig); err != nil { + return nil, err + } + // validation for empty rootfs is in distribution code + if pluginConfig.Rootfs == nil { + return nil, nil + } + + return rootFSFromPlugin(pluginConfig.Rootfs), nil +} + +func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { + rootFS := image.RootFS{ + Type: pluginfs.Type, + DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)), + } + for i := range pluginfs.DiffIds { + rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i]) + } + + return &rootFS +} diff --git a/vendor/github.com/moby/moby/plugin/manager_linux.go b/vendor/github.com/moby/moby/plugin/manager_linux.go new file mode 100644 index 0000000..7e734b7 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager_linux.go @@ -0,0 +1,292 @@ +// +build linux + +package plugin + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + if p.IsEnabled() && !force { + return fmt.Errorf("plugin %s is already enabled", p.Name()) + } + spec, err := p.InitSpec(pm.config.ExecRoot) + if err != nil { + return err + } + + c.restart = true + c.exitChan = make(chan bool) + + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + + var propRoot string + if p.PropagatedMount != "" { + propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + + if err := mount.MakeRShared(propRoot); err != nil { + return errors.Wrap(err, "error setting up propagated mount dir") + } + + if err := mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil { + return errors.Wrap(err, "error creating mount for propagated mount") + } + } + + if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil { + return errors.WithStack(err) + } + + if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil { + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + if err := mount.Unmount(propRoot); err != nil { + logrus.Warnf("Could not unmount %s: %v", propRoot, err) + } + } + return errors.WithStack(err) + } + + return pm.pluginPostStart(p, c) +} + +func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { + client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()), nil, c.timeoutInSecs) + if err != nil { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + return errors.WithStack(err) + } + + p.SetPClient(client) + pm.config.Store.SetState(p, true) + pm.config.Store.CallHandler(p) + + return pm.save(p) +} + +func (pm *Manager) restore(p *v2.Plugin) error { + if err := pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID())); err != nil { + return err + } + + if pm.config.LiveRestoreEnabled { + c := &controller{} + if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 { + // plugin is not running, so follow normal startup procedure + return pm.enable(p, c, true) + } + + c.exitChan = make(chan bool) + c.restart = true + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + return pm.pluginPostStart(p, c) + } + + return nil +} + +func shutdownPlugin(p *v2.Plugin, c *controller, containerdClient libcontainerd.Client) { + pluginID := p.GetID() + + err := containerdClient.Signal(pluginID, int(syscall.SIGTERM)) + if err != nil { + logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) + } else { + select { + case <-c.exitChan: + logrus.Debug("Clean shutdown of plugin") + case <-time.After(time.Second * 10): + logrus.Debug("Force shutdown plugin") + if err := containerdClient.Signal(pluginID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) + } + } + } +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + if !p.IsEnabled() { + return fmt.Errorf("plugin %s is already disabled", p.Name()) + } + + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + pm.config.Store.SetState(p, false) + return pm.save(p) +} + +// Shutdown stops all plugins and called during daemon shutdown. +func (pm *Manager) Shutdown() { + plugins := pm.config.Store.GetAll() + for _, p := range plugins { + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if pm.config.LiveRestoreEnabled && p.IsEnabled() { + logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") + continue + } + if pm.containerdClient != nil && p.IsEnabled() { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + } + } +} + +func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) { + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return err + } + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + orig := filepath.Join(pdir, "rootfs") + + // Make sure nothing is mounted + // This could happen if the plugin was disabled with `-f` with active mounts. + // If there is anything in `orig` is still mounted, this should error out. + if err := recursiveUnmount(orig); err != nil { + return err + } + + backup := orig + "-old" + if err := os.Rename(orig, backup); err != nil { + return errors.Wrap(err, "error backing up plugin data before upgrade") + } + + defer func() { + if err != nil { + if rmErr := os.RemoveAll(orig); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") + return + } + + if err := os.Rename(backup, orig); err != nil { + err = errors.Wrap(err, "error restoring old plugin root on upgrade failure") + } + if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) + } + } else { + if rmErr := os.RemoveAll(backup); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") + } + + p.Config = configDigest + p.Blobsums = blobsums + } + }() + + if err := os.Rename(tmpRootFSDir, orig); err != nil { + return errors.Wrap(err, "error upgrading") + } + + p.PluginObj.Config = config + err = pm.save(p) + return errors.Wrap(err, "error saving upgraded plugin config") +} + +func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest.Digest, privileges *types.PluginPrivileges) (types.PluginConfig, error) { + configRC, err := pm.blobStore.Get(configDigest) + if err != nil { + return types.PluginConfig{}, err + } + defer configRC.Close() + + var config types.PluginConfig + dec := json.NewDecoder(configRC) + if err := dec.Decode(&config); err != nil { + return types.PluginConfig{}, errors.Wrapf(err, "failed to parse config") + } + if dec.More() { + return types.PluginConfig{}, errors.New("invalid config json") + } + + requiredPrivileges, err := computePrivileges(config) + if err != nil { + return types.PluginConfig{}, err + } + if privileges != nil { + if err := validatePrivileges(requiredPrivileges, *privileges); err != nil { + return types.PluginConfig{}, err + } + } + + return config, nil +} + +// createPlugin creates a new plugin. take lock before calling. +func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) { + if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store + return nil, err + } + + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return nil, err + } + + p = &v2.Plugin{ + PluginObj: types.Plugin{ + Name: name, + ID: stringid.GenerateRandomID(), + Config: config, + }, + Config: configDigest, + Blobsums: blobsums, + } + p.InitEmptySettings() + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + if err := os.MkdirAll(pdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", pdir) + } + + defer func() { + if err != nil { + os.RemoveAll(pdir) + } + }() + + if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil { + return nil, errors.Wrap(err, "failed to rename rootfs") + } + + if err := pm.save(p); err != nil { + return nil, err + } + + pm.config.Store.Add(p) // todo: remove + + return p, nil +} diff --git a/vendor/github.com/moby/moby/plugin/manager_solaris.go b/vendor/github.com/moby/moby/plugin/manager_solaris.go new file mode 100644 index 0000000..72ccae7 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager_solaris.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/moby/moby/plugin/manager_windows.go b/vendor/github.com/moby/moby/plugin/manager_windows.go new file mode 100644 index 0000000..4469a67 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager_windows.go @@ -0,0 +1,30 @@ +// +build windows + +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/moby/moby/plugin/store.go b/vendor/github.com/moby/moby/plugin/store.go new file mode 100644 index 0000000..b7a96a9 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/store.go @@ -0,0 +1,263 @@ +package plugin + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/pkg/errors" +) + +/* allowV1PluginsFallback determines daemon's support for V1 plugins. + * When the time comes to remove support for V1 plugins, flipping + * this bool is all that will be needed. + */ +const allowV1PluginsFallback bool = true + +/* defaultAPIVersion is the version of the plugin API for volume, network, + IPAM and authz. This is a very stable API. When we update this API, then + pluginType should include a version. eg "networkdriver/2.0". +*/ +const defaultAPIVersion string = "1.0" + +// ErrNotFound indicates that a plugin was not found locally. +type ErrNotFound string + +func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } + +// ErrAmbiguous indicates that a plugin was not found locally. +type ErrAmbiguous string + +func (name ErrAmbiguous) Error() string { + return fmt.Sprintf("multiple plugins found for %q", string(name)) +} + +// GetV2Plugin retreives a plugin by name, id or partial ID. +func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { + ps.RLock() + defer ps.RUnlock() + + id, err := ps.resolvePluginID(refOrID) + if err != nil { + return nil, err + } + + p, idOk := ps.plugins[id] + if !idOk { + return nil, errors.WithStack(ErrNotFound(id)) + } + + return p, nil +} + +// validateName returns error if name is already reserved. always call with lock and full name +func (ps *Store) validateName(name string) error { + for _, p := range ps.plugins { + if p.Name() == name { + return errors.Errorf("plugin %q already exists", name) + } + } + return nil +} + +// GetAll retreives all plugins. +func (ps *Store) GetAll() map[string]*v2.Plugin { + ps.RLock() + defer ps.RUnlock() + return ps.plugins +} + +// SetAll initialized plugins during daemon restore. +func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { + ps.Lock() + defer ps.Unlock() + ps.plugins = plugins +} + +func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { + ps.RLock() + defer ps.RUnlock() + + result := make([]plugingetter.CompatPlugin, 0, 1) + for _, p := range ps.plugins { + if p.IsEnabled() { + if _, err := p.FilterByCap(capability); err == nil { + result = append(result, p) + } + } + } + return result +} + +// SetState sets the active state of the plugin and updates plugindb. +func (ps *Store) SetState(p *v2.Plugin, state bool) { + ps.Lock() + defer ps.Unlock() + + p.PluginObj.Enabled = state +} + +// Add adds a plugin to memory and plugindb. +// An error will be returned if there is a collision. +func (ps *Store) Add(p *v2.Plugin) error { + ps.Lock() + defer ps.Unlock() + + if v, exist := ps.plugins[p.GetID()]; exist { + return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) + } + ps.plugins[p.GetID()] = p + return nil +} + +// Remove removes a plugin from memory and plugindb. +func (ps *Store) Remove(p *v2.Plugin) { + ps.Lock() + delete(ps.plugins, p.GetID()) + ps.Unlock() +} + +// Get returns an enabled plugin matching the given name and capability. +func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { + var ( + p *v2.Plugin + err error + ) + + // Lookup using new model. + if ps != nil { + p, err = ps.GetV2Plugin(name) + if err == nil { + p.AddRefCount(mode) + if p.IsEnabled() { + return p.FilterByCap(capability) + } + // Plugin was found but it is disabled, so we should not fall back to legacy plugins + // but we should error out right away + return nil, ErrNotFound(name) + } + if _, ok := errors.Cause(err).(ErrNotFound); !ok { + return nil, err + } + } + + // Lookup using legacy model. + if allowV1PluginsFallback { + p, err := plugins.Get(name, capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + return p, nil + } + + return nil, err +} + +// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. +func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { + return ps.getAllByCap(capability) +} + +// GetAllByCap returns a list of enabled plugins matching the given capability. +func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { + result := make([]plugingetter.CompatPlugin, 0, 1) + + /* Daemon start always calls plugin.Init thereby initializing a store. + * So store on experimental builds can never be nil, even while + * handling legacy plugins. However, there are legacy plugin unit + * tests where the volume subsystem directly talks with the plugin, + * bypassing the daemon. For such tests, this check is necessary. + */ + if ps != nil { + ps.RLock() + result = ps.getAllByCap(capability) + ps.RUnlock() + } + + // Lookup with legacy model + if allowV1PluginsFallback { + pl, err := plugins.GetAll(capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + for _, p := range pl { + result = append(result, p) + } + } + return result, nil +} + +// Handle sets a callback for a given capability. It is only used by network +// and ipam drivers during plugin registration. The callback registers the +// driver with the subsystem (network, ipam). +func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { + pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) + + // Register callback with new plugin model. + ps.Lock() + handlers, ok := ps.handlers[pluginType] + if !ok { + handlers = []func(string, *plugins.Client){} + } + handlers = append(handlers, callback) + ps.handlers[pluginType] = handlers + ps.Unlock() + + // Register callback with legacy plugin model. + if allowV1PluginsFallback { + plugins.Handle(capability, callback) + } +} + +// CallHandler calls the registered callback. It is invoked during plugin enable. +func (ps *Store) CallHandler(p *v2.Plugin) { + for _, typ := range p.GetTypes() { + for _, handler := range ps.handlers[typ.String()] { + handler(p.Name(), p.Client()) + } + } +} + +func (ps *Store) resolvePluginID(idOrName string) (string, error) { + ps.RLock() // todo: fix + defer ps.RUnlock() + + if validFullID.MatchString(idOrName) { + return idOrName, nil + } + + ref, err := reference.ParseNamed(idOrName) + if err != nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + if _, ok := ref.(reference.Canonical); ok { + logrus.Warnf("canonical references cannot be resolved: %v", ref.String()) + return "", errors.WithStack(ErrNotFound(idOrName)) + } + + fullRef := reference.WithDefaultTag(ref) + + for _, p := range ps.plugins { + if p.PluginObj.Name == fullRef.String() { + return p.PluginObj.ID, nil + } + } + + var found *v2.Plugin + for id, p := range ps.plugins { // this can be optimized + if strings.HasPrefix(id, idOrName) { + if found != nil { + return "", errors.WithStack(ErrAmbiguous(idOrName)) + } + found = p + } + } + if found == nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + return found.PluginObj.ID, nil +} diff --git a/vendor/github.com/moby/moby/plugin/store_test.go b/vendor/github.com/moby/moby/plugin/store_test.go new file mode 100644 index 0000000..6b1f6a9 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/store_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/plugin/v2" +) + +func TestFilterByCapNeg(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} + i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("foobar") + if err == nil { + t.Fatalf("expected inadequate error, got %v", err) + } +} + +func TestFilterByCapPos(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + + iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} + i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("volumedriver") + if err != nil { + t.Fatalf("expected no error, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/plugin/v2/plugin.go b/vendor/github.com/moby/moby/plugin/v2/plugin.go new file mode 100644 index 0000000..93b489a --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/plugin.go @@ -0,0 +1,244 @@ +package v2 + +import ( + "fmt" + "strings" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + PropagatedMount string // TODO: make private + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Plugin objects this returns the host path of the plugin container's rootfs. +func (p *Plugin) BasePath() string { + return p.Rootfs +} + +// Client returns the plugin client. +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.ACQUIRE) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.ACQUIRE) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.RELEASE) +} diff --git a/vendor/github.com/moby/moby/plugin/v2/plugin_linux.go b/vendor/github.com/moby/moby/plugin/v2/plugin_linux.go new file mode 100644 index 0000000..e980e7f --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/plugin_linux.go @@ -0,0 +1,121 @@ +// +build linux + +package v2 + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + s.Root = specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.NamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.PropagatedMount != "" { + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + s.Linux.RootfsPropagation = "rshared" + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + rwm := "rwm" + s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) + + return &s, nil +} diff --git a/vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go b/vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go new file mode 100644 index 0000000..e60fb83 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 + +import ( + "errors" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/moby/moby/plugin/v2/settable.go b/vendor/github.com/moby/moby/plugin/v2/settable.go new file mode 100644 index 0000000..79c6bef --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/vendor/github.com/moby/moby/plugin/v2/settable_test.go b/vendor/github.com/moby/moby/plugin/v2/settable_test.go new file mode 100644 index 0000000..7183f3a --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/settable_test.go @@ -0,0 +1,91 @@ +package v2 + +import ( + "reflect" + "testing" +) + +func TestNewSettable(t *testing.T) { + contexts := []struct { + arg string + name string + field string + value string + err error + }{ + {"name=value", "name", "", "value", nil}, + {"name", "name", "", "", nil}, + {"name.field=value", "name", "field", "value", nil}, + {"name.field", "name", "field", "", nil}, + {"=value", "", "", "", errInvalidFormat}, + {"=", "", "", "", errInvalidFormat}, + } + + for _, c := range contexts { + s, err := newSettable(c.arg) + if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + + if s.name != c.name { + t.Fatalf("expected name to be %q, got %q", c.name, s.name) + } + + if s.field != c.field { + t.Fatalf("expected field to be %q, got %q", c.field, s.field) + } + + if s.value != c.value { + t.Fatalf("expected value to be %q, got %q", c.value, s.value) + } + + } +} + +func TestIsSettable(t *testing.T) { + contexts := []struct { + allowedSettableFields []string + set settable + settable []string + result bool + err error + }{ + {allowedSettableFieldsEnv, settable{}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"value"}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"foo"}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value1", "value2"}, false, errMultipleFields}, + } + + for _, c := range contexts { + if res, err := c.set.isSettable(c.allowedSettableFields, c.settable); res != c.result { + t.Fatalf("expected result to be %t, got %t", c.result, res) + } else if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + } +} + +func TestUpdateSettinsEnv(t *testing.T) { + contexts := []struct { + env []string + set settable + newEnv []string + }{ + {[]string{}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"FOO=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0", "BAR=1"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1", "BAR=1"}}, + } + + for _, c := range contexts { + updateSettingsEnv(&c.env, &c.set) + + if !reflect.DeepEqual(c.env, c.newEnv) { + t.Fatalf("expected env to be %q, got %q", c.newEnv, c.env) + } + } +} diff --git a/vendor/github.com/moby/moby/poule.yml b/vendor/github.com/moby/moby/poule.yml new file mode 100644 index 0000000..61aab45 --- /dev/null +++ b/vendor/github.com/moby/moby/poule.yml @@ -0,0 +1,88 @@ +# Add a "status/0-triage" to every newly opened pull request. +- triggers: + pull_request: [ opened ] + operations: + - type: label + settings: { + patterns: { + status/0-triage: [ ".*" ], + } + } + +# For every newly created or modified issue, assign label based on matching regexp using the `label` +# operation, as well as an Engine-specific version label using `version-label`. +- triggers: + issues: [ edited, opened, reopened ] + operations: + - type: label + settings: { + patterns: { + area/builder: [ "dockerfile", "docker build" ], + area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], + area/plugins: [ "docker plugin" ], + area/networking: [ "docker network", "ipvs", "vxlan" ], + area/runtime: [ "oci runtime error" ], + area/security/trust: [ "docker_content_trust" ], + area/swarm: [ "docker node", "docker service", "docker swarm" ], + platform/desktop: [ "docker for mac", "docker for windows" ], + platform/freebsd: [ "freebsd" ], + platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + } + } + - type: version-label + +# When a pull request is closed, attach it to the currently active milestone. +- triggers: + pull_request: [ closed ] + operations: + - type: version-milestone + +# Labeling a PR with `rebuild/` triggers a rebuild job for the associated +# configuration. The label is automatically removed after the rebuild is initiated. There's no such +# thing as "templating" in this configuration, so we need one operation for each type of +# configuration that can be triggered. +- triggers: + pull_request: [ labeled ] + operations: + - type: rebuild + settings: { + # When configurations are empty, the `rebuild` operation rebuilds all the currently + # known statuses for that pull request. + configurations: [], + label: "rebuild/*", + } + - type: rebuild + settings: { + configurations: [ arm ], + label: "rebuild/arm", + } + - type: rebuild + settings: { + configurations: [ experimental ], + label: "rebuild/experimental", + } + - type: rebuild + settings: { + configurations: [ janky ], + label: "rebuild/janky", + } + - type: rebuild + settings: { + configurations: [ userns ], + label: "rebuild/userns", + } + - type: rebuild + settings: { + configurations: [ vendor ], + label: "rebuild/vendor", + } + - type: rebuild + settings: { + configurations: [ win2lin ], + label: "rebuild/win2lin", + } + - type: rebuild + settings: { + configurations: [ windowsRS1 ], + label: "rebuild/windowsRS1", + } diff --git a/vendor/github.com/moby/moby/profiles/apparmor/apparmor.go b/vendor/github.com/moby/moby/profiles/apparmor/apparmor.go new file mode 100644 index 0000000..5132ebe --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/apparmor/apparmor.go @@ -0,0 +1,122 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/utils/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + if err := compiled.Execute(out, p); err != nil { + return err + } + return nil +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile in a temp directory determined by +// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. +func InstallDefault(name string) error { + p := profileData{ + Name: name, + } + + // Install to a temporary directory. + f, err := ioutil.TempFile("", name) + if err != nil { + return err + } + profilePath := f.Name() + + defer f.Close() + defer os.Remove(profilePath) + + if err := p.generateDefault(f); err != nil { + f.Close() + return err + } + + if err := aaparser.LoadProfile(profilePath); err != nil { + return err + } + + return nil +} + +// IsLoaded checks if a profile with the given name has been loaded into the +// kernel. +func IsLoaded(name string) (bool, error) { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return false, err + } + defer file.Close() + + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return false, err + } + if strings.HasPrefix(p, name+" ") { + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/moby/moby/profiles/apparmor/template.go b/vendor/github.com/moby/moby/profiles/apparmor/template.go new file mode 100644 index 0000000..c5ea458 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer={{.Name}}, +{{end}} +} +` diff --git a/vendor/github.com/moby/moby/profiles/seccomp/default.json b/vendor/github.com/moby/moby/profiles/seccomp/default.json new file mode 100755 index 0000000..006592f --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/default.json @@ -0,0 +1,929 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + }, + { + "architecture": "SCMP_ARCH_AARCH64", + "subArchitectures": [ + "SCMP_ARCH_ARM" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64" + ] + }, + { + "architecture": "SCMP_ARCH_S390X", + "subArchitectures": [ + "SCMP_ARCH_S390" + ] + } + ], + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 10, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 16, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 17, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socketcall" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_GT" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socketcall" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 1, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socketcall" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 1, + "value": 2, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socketcall" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 1, + "value": 10, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socketcall" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 1, + "value": 16, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "socketcall" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 1, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 1, + "value": 17, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "sync_file_range2" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "ppc64le" + ] + }, + "excludes": {} + }, + { + "names": [ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "arm", + "arm64" + ] + }, + "excludes": {} + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32" + ] + }, + "excludes": {} + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32", + "x86" + ] + }, + "excludes": {} + }, + { + "names": [ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": {} + }, + { + "names": [ + "open_by_handle_at" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_DAC_READ_SEARCH" + ] + }, + "excludes": {} + }, + { + "names": [ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + }, + "excludes": {} + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ], + "arches": [ + "s390", + "s390x" + ] + } + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 1, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "s390 parameter ordering for clone is different", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + } + }, + { + "names": [ + "reboot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_BOOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_CHROOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "delete_module", + "init_module", + "finit_module", + "query_module" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_MODULE" + ] + }, + "excludes": {} + }, + { + "names": [ + "acct" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PACCT" + ] + }, + "excludes": {} + }, + { + "names": [ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PTRACE" + ] + }, + "excludes": {} + }, + { + "names": [ + "iopl", + "ioperm" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_RAWIO" + ] + }, + "excludes": {} + }, + { + "names": [ + "settimeofday", + "stime", + "adjtimex", + "clock_settime" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TIME" + ] + }, + "excludes": {} + }, + { + "names": [ + "vhangup" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TTY_CONFIG" + ] + }, + "excludes": {} + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json b/vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json new file mode 100755 index 0000000..674ca50 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json @@ -0,0 +1,27 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/generate.go b/vendor/github.com/moby/moby/profiles/seccomp/generate.go new file mode 100644 index 0000000..32f22bb --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp.go new file mode 100644 index 0000000..a54ef50 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp.go @@ -0,0 +1,150 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringutils" + "github.com/opencontainers/runtime-spec/specs-go" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { + return setupSeccomp(DefaultProfile(), rs) +} + +// LoadProfile takes a file path and decodes the seccomp profile. +func LoadProfile(body string, rs *specs.Spec) (*specs.Seccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + return setupSeccomp(&config, rs) +} + +var nativeToSeccomp = map[string]types.Arch{ + "amd64": types.ArchX86_64, + "arm64": types.ArchAARCH64, + "mips64": types.ArchMIPS64, + "mips64n32": types.ArchMIPS64N32, + "mipsel64": types.ArchMIPSEL64, + "mipsel64n32": types.ArchMIPSEL64N32, + "s390x": types.ArchS390X, +} + +func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig := &specs.Seccomp{} + + var arch string + var native, err = libseccomp.GetNativeArch() + if err == nil { + arch = native.String() + } + + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + + if len(config.ArchMap) != 0 { + for _, a := range config.ArchMap { + seccompArch, ok := nativeToSeccomp[arch] + if ok { + if a.Arch == seccompArch { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) + for _, sa := range a.SubArches { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) + } + break + } + } + } + } + + newConfig.DefaultAction = specs.Action(config.DefaultAction) + +Loop: + // Loop through all syscall blocks and convert them to libcontainer format after filtering them + for _, call := range config.Syscalls { + if len(call.Excludes.Arches) > 0 { + if stringutils.InSlice(call.Excludes.Arches, arch) { + continue Loop + } + } + if len(call.Excludes.Caps) > 0 { + for _, c := range call.Excludes.Caps { + if stringutils.InSlice(rs.Process.Capabilities, c) { + continue Loop + } + } + } + if len(call.Includes.Arches) > 0 { + if !stringutils.InSlice(call.Includes.Arches, arch) { + continue Loop + } + } + if len(call.Includes.Caps) > 0 { + for _, c := range call.Includes.Caps { + if !stringutils.InSlice(rs.Process.Capabilities, c) { + continue Loop + } + } + } + + if call.Name != "" && len(call.Names) != 0 { + return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") + } + + if call.Name != "" { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) + } + + for _, n := range call.Names { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) + } + } + + return newConfig, nil +} + +func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.Syscall { + newCall := specs.Syscall{ + Name: name, + Action: specs.Action(action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range args { + newArg := specs.Arg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.Operator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + return newCall +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go new file mode 100644 index 0000000..e3943f8 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go @@ -0,0 +1,761 @@ +// +build linux,seccomp + +package seccomp + +import ( + "syscall" + + "github.com/docker/docker/api/types" +) + +func arches() []types.Architecture { + return []types.Architecture{ + { + Arch: types.ArchX86_64, + SubArches: []types.Arch{types.ArchX86, types.ArchX32}, + }, + { + Arch: types.ArchAARCH64, + SubArches: []types.Arch{types.ArchARM}, + }, + { + Arch: types.ArchMIPS64, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64N32}, + }, + { + Arch: types.ArchMIPS64N32, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64}, + }, + { + Arch: types.ArchMIPSEL64, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64N32}, + }, + { + Arch: types.ArchMIPSEL64N32, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64}, + }, + { + Arch: types.ArchS390X, + SubArches: []types.Arch{types.ArchS390}, + }, + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile() *types.Seccomp { + syscalls := []*types.Syscall{ + { + Names: []string{ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socket"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.AF_UNIX, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socket"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.AF_INET, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socket"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.AF_INET6, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socket"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.AF_NETLINK, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socket"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.AF_PACKET, + Op: types.OpEqualTo, + }, + }, + }, + // socketcall(1, ...) is equivalent to socket(...) on some architectures eg i386 + { + Names: []string{"socketcall"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 1, + Op: types.OpGreaterThan, + }, + }, + }, + { + Names: []string{"socketcall"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 1, + Op: types.OpEqualTo, + }, + { + Index: 1, + Value: syscall.AF_UNIX, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socketcall"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 1, + Op: types.OpEqualTo, + }, + { + Index: 1, + Value: syscall.AF_INET, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socketcall"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 1, + Op: types.OpEqualTo, + }, + { + Index: 1, + Value: syscall.AF_INET6, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socketcall"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 1, + Op: types.OpEqualTo, + }, + { + Index: 1, + Value: syscall.AF_NETLINK, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"socketcall"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 1, + Op: types.OpEqualTo, + }, + { + Index: 1, + Value: syscall.AF_PACKET, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{ + "sync_file_range2", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"ppc64le"}, + }, + }, + { + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"arm", "arm64"}, + }, + }, + { + Names: []string{ + "arch_prctl", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32"}, + }, + }, + { + Names: []string{ + "modify_ldt", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32", "x86"}, + }, + }, + { + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "open_by_handle_at", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_DAC_READ_SEARCH"}, + }, + }, + { + Names: []string{ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 1, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Comment: "s390 parameter ordering for clone is different", + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "reboot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_BOOT"}, + }, + }, + { + Names: []string{ + "chroot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_CHROOT"}, + }, + }, + { + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_MODULE"}, + }, + }, + { + Names: []string{ + "acct", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PACCT"}, + }, + }, + { + Names: []string{ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PTRACE"}, + }, + }, + { + Names: []string{ + "iopl", + "ioperm", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_RAWIO"}, + }, + }, + { + Names: []string{ + "settimeofday", + "stime", + "adjtimex", + "clock_settime", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TIME"}, + }, + }, + { + Names: []string{ + "vhangup", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TTY_CONFIG"}, + }, + }, + } + + return &types.Seccomp{ + DefaultAction: types.ActErrno, + ArchMap: arches(), + Syscalls: syscalls, + } +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go new file mode 100644 index 0000000..1346921 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go @@ -0,0 +1,32 @@ +// +build linux + +package seccomp + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/oci" +) + +func TestLoadProfile(t *testing.T) { + f, err := ioutil.ReadFile("fixtures/example.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} + +func TestLoadDefaultProfile(t *testing.T) { + f, err := ioutil.ReadFile("default.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 0000000..f84b20b --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,13 @@ +// +build linux,!seccomp + +package seccomp + +import ( + "github.com/docker/docker/api/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultProfile returns a nil pointer on unsupported systems. +func DefaultProfile(rs *specs.Spec) *types.Seccomp { + return nil +} diff --git a/vendor/github.com/moby/moby/project/ARM.md b/vendor/github.com/moby/moby/project/ARM.md new file mode 100644 index 0000000..c4d21bf --- /dev/null +++ b/vendor/github.com/moby/moby/project/ARM.md @@ -0,0 +1,45 @@ +# ARM support + +The ARM support should be considered experimental. It will be extended step by step in the coming weeks. + +Building a Docker Development Image works in the same fashion as for Intel platform (x86-64). +Currently we have initial support for 32bit ARMv7 devices. + +To work with the Docker Development Image you have to clone the Docker/Docker repo on a supported device. +It needs to have a Docker Engine installed to build the Docker Development Image. + +From the root of the Docker/Docker repo one can use make to execute the following make targets: +- make validate +- make binary +- make build +- make deb +- make bundles +- make default +- make shell +- make test-unit +- make test-integration-cli +- make + +The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. +Based on OS and architecture it chooses the correct Dockerfile. +For the ARM 32bit architecture it uses `Dockerfile.armhf`. + +So for example in order to build a Docker binary one has to +1. clone the Docker/Docker repository on an ARM device `git clone git@github.com:docker/docker.git` +2. change into the checked out repository with `cd docker` +3. execute `make binary` to create a Docker Engine binary for ARM + +## Kernel modules +A few libnetwork integration tests require that the kernel be +configured with "dummy" network interface and has the module +loaded. However, the dummy module may be not loaded automatically. + +To load the kernel module permanently, run these commands as `root`. + + modprobe dummy + echo "dummy" >> /etc/modules + +On some systems you also have to sync your kernel modules. + + oc-sync-kernel-modules + depmod diff --git a/vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md b/vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md new file mode 100644 index 0000000..1c6f232 --- /dev/null +++ b/vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md @@ -0,0 +1,35 @@ +Branches and tags +================= + +Note: details of the release process for the Engine are documented in the +[RELEASE-CHECKLIST](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +# Branches + +The docker/docker repository should normally have only three living branches at all time, including +the regular `master` branch: + +## `docs` branch + +The `docs` branch supports documentation updates between product releases. This branch allow us to +decouple documentation releases from product releases. + +## `release` branch + +The `release` branch contains the last _released_ version of the code for the project. + +The `release` branch is only updated at each public release of the project. The mechanism for this +is that the release is materialized by a pull request against the `release` branch which lives for +the duration of the code freeze period. When this pull request is merged, the `release` branch gets +updated, and its new state is tagged accordingly. + +# Tags + +Any public release of a compiled binary, with the logical exception of nightly builds, should have +a corresponding tag in the repository. + +The general format of a tag is `vX.Y.Z[-suffix[N]]`: + +- All of `X`, `Y`, `Z` must be specified (example: `v1.0.0`) +- First release candidate for version `1.8.0` should be tagged `v1.8.0-rc1` +- Second alpha release of a product should be tagged `v1.0.0-alpha1` diff --git a/vendor/github.com/moby/moby/project/CONTRIBUTORS.md b/vendor/github.com/moby/moby/project/CONTRIBUTORS.md new file mode 120000 index 0000000..44fcc63 --- /dev/null +++ b/vendor/github.com/moby/moby/project/CONTRIBUTORS.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/moby/moby/project/GOVERNANCE.md b/vendor/github.com/moby/moby/project/GOVERNANCE.md new file mode 100644 index 0000000..6ae7baf --- /dev/null +++ b/vendor/github.com/moby/moby/project/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](https://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + diff --git a/vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md b/vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md new file mode 100644 index 0000000..824a14b --- /dev/null +++ b/vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md @@ -0,0 +1,37 @@ +# Freenode IRC Administration Guidelines and Tips + +This is not meant to be a general "Here's how to IRC" document, so if you're +looking for that, check Google instead. ♥ + +If you've been charged with helping maintain one of Docker's now many IRC +channels, this might turn out to be useful. If there's information that you +wish you'd known about how a particular channel is organized, you should add +deets here! :) + +## `ChanServ` + +Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For +example, `/msg ChanServ ACCESS LIST` will show you a list of everyone +with "access" privileges for a particular channel. + +A similar command is used to give someone a particular access level. For +example, to add a new maintainer to the `#docker-maintainers` access list so +that they can contribute to the discussions (after they've been merged +appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ +ACCESS #docker-maintainers ADD maintainer`. + +To setup a new channel with a similar `maintainer` access template, use a +command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting +them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` +for more details). + +## Troubleshooting + +The most common cause of not-getting-auto-`+v` woes is people not being +`IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with +their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS +ADD` request with something like `xyz is not registered.`. + +This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` +followed by `/msg NickServ GROUP` to group the two nicknames together. See +`/msg NickServ HELP GROUP` for more information. diff --git a/vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md b/vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md new file mode 100644 index 0000000..95cb2f1 --- /dev/null +++ b/vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md @@ -0,0 +1,132 @@ +Triaging of issues +------------------ + +Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: + +- Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +### 1. Ensure the issue contains basic information + +Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: + +- the output of `docker version` +- the output of `docker info` +- the output of `uname -a` +- a reproducible case if this is a bug, Dockerfiles FTW +- host distribution and version ( ubuntu 14.04, RHEL, fedora 23 ) +- page URL if this is a docs issue or the name of a man page + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. + +If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be +reopened when the necessary information is provided. + +### 2. Classify the Issue + +An issue can have multiple of the following labels. Typically, a properly classified issues should +have: + +- One label identifying its kind (`kind/*`). +- One or multiple labels identifying the functional areas of interest (`area/*`). +- Where applicable, one label categorizing its difficulty (`exp/*`). + +#### Issue kind + +| Kind | Description | +|------------------|---------------------------------------------------------------------------------------------------------------------------------| +| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | +| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shiny. | +| kind/question | Contains a user or contributor question requiring a response. | + +#### Functional area + +| Area | +|---------------------------| +| area/api | +| area/builder | +| area/bundles | +| area/cli | +| area/daemon | +| area/distribution | +| area/docs | +| area/kernel | +| area/logging | +| area/networking | +| area/plugins | +| area/project | +| area/runtime | +| area/security | +| area/security/apparmor | +| area/security/seccomp | +| area/security/selinux | +| area/security/trust | +| area/storage | +| area/storage/aufs | +| area/storage/btrfs | +| area/storage/devicemapper | +| area/storage/overlay | +| area/storage/zfs | +| area/swarm | +| area/testing | +| area/volumes | + +#### Platform + +| Platform | +|---------------------------| +| platform/arm | +| platform/darwin | +| platform/ibm-power | +| platform/ibm-z | +| platform/windows | + +#### Experience level + +Experience level is a way for a contributor to find an issue based on their +skill set. Experience types are applied to the issue or pull request using +labels. + +| Level | Experience level guideline | +|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| exp/beginner | New to Docker, and possibly Golang, and is looking to help while learning the basics. | +| exp/intermediate | Comfortable with golang and understands the core concepts of Docker and looking to dive deeper into the project. | +| exp/expert | Proficient with Docker and Golang and has been following, and active in, the community to understand the rationale behind design decisions and where the project is headed. | + +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert level task. + +#### Triage status + +To communicate the triage status with other collaborators, you can apply status +labels to issues. These labels prevent duplicating effort. + +| Status | Description | +|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| status/confirmed | You triaged the issue, and were able to reproduce the issue. Always leave a comment how you reproduced, so that the person working on resolving the issue has a way to set up a test-case. +| status/accepted | Apply to enhancements / feature requests that we think are good to have. Adding this label helps contributors find things to work on. +| status/more-info-needed | Apply this to issues that are missing information (e.g. no `docker version` or `docker info` output, or no steps to reproduce), or require feedback from the reporter. If the issue is not updated after a week, it can generally be closed. +| status/needs-attention | Apply this label if an issue (or PR) needs more eyes. + +### 3. Prioritizing issue + +When, and only when, an issue is attached to a specific milestone, the issue can be labeled with the +following labels to indicate their degree of priority (from more urgent to less urgent). + +| Priority | Description | +|-------------|-----------------------------------------------------------------------------------------------------------------------------------| +| priority/P0 | Urgent: Security, critical bugs, blocking issues. P0 basically means drop everything you are doing until this issue is addressed. | +| priority/P1 | Important: P1 issues are a top priority and a must-have for the next release. | +| priority/P2 | Normal priority: default priority applied. | +| priority/P3 | Best effort: those are nice to have / minor issues. | + +And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. diff --git a/vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md b/vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md new file mode 100644 index 0000000..3763f87 --- /dev/null +++ b/vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md @@ -0,0 +1,74 @@ +# Apt & Yum Repository Maintenance +## A maintainer's guide to managing Docker's package repos + +### How to clean up old experimental debs and rpms + +We release debs and rpms for experimental nightly, so these can build up. +To remove old experimental debs and rpms, and _ONLY_ keep the latest, follow the +steps below. + +1. Checkout docker master + +2. Run clean scripts + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh clean-apt-repo clean-yum-repo generate-index-listing sign-repos +``` + +3. Upload the changed repos to `s3` (if you host on s3) + +4. Purge the cache, PURGE the cache, PURGE THE CACHE! + +### How to get out of a sticky situation + +Sh\*t happens. We know. Below are steps to get out of any "hash-sum mismatch" or +"gpg sig error" or the likes error that might happen to the apt repo. + +**NOTE:** These are apt repo specific, have had no experimence with anything similar +happening to the yum repo in the past so you can rest easy. + +For each step listed below, move on to the next if the previous didn't work. +Otherwise CELEBRATE! + +1. Purge the cache. + +2. Did you remember to sign the debs after releasing? + +Re-sign the repo with your gpg key: + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh sign-repos +``` + +Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. + +3. Run Jess' magical, save all, only in case of extreme emergencies, "you are +going to have to break this glass to get it" script. + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh update-apt-repo generate-index-listing sign-repos +``` + +4. Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. diff --git a/vendor/github.com/moby/moby/project/PACKAGERS.md b/vendor/github.com/moby/moby/project/PACKAGERS.md new file mode 100644 index 0000000..46ea8e7 --- /dev/null +++ b/vendor/github.com/moby/moby/project/PACKAGERS.md @@ -0,0 +1,307 @@ +# Dear Packager, + +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. + +## Getting Started + +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! + +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. + +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" + +## Package Name + +If possible, your package should be called "docker". If that name is already +taken, a second choice is "docker-engine". Another possible choice is "docker.io". + +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances +allow you to use it, we recommend that you do. + +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. + +## Build Dependencies + +To build Docker, you will need the following: + +* A recent version of Git and Mercurial +* Go version 1.6 or later +* A clean checkout of the source added to a valid [Go + workspace](https://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below) + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux +* SQLite version 3.7.9 or later +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.16.1 or later (unless using an older version is + absolutely necessary, in which case 3.8 is the minimum) +* libseccomp version 2.2.1 or later (for build tag seccomp) + +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. + +### Go Dependencies + +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". + +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). + +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "vendor.conf" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. + +## Stripping Binaries + +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. + +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. + +## Building Docker + +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both docker/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +If you're building a binary that may need to be used on platforms that include +seccomp, you will need to use the `seccomp` build tag: +```bash +export DOCKER_BUILDTAGS='seccomp' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): + +```bash +./hack/make.sh binary +``` + +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". + +### Dynamic Daemon / Client-only Binary + +If you are only interested in a Docker client binary, you can build using: + +```bash +./hack/make.sh binary-client +``` + +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: + +```bash +./hack/make.sh dynbinary-client +``` + +This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. + +## System Dependencies + +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: + +* iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs) +* xfsprogs (in use: mkfs.xfs) +* XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + +* Git version 1.7 or later + +### Kernel Requirements + +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). + +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. + +### Optional Dependencies + +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: + +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) +* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) +* Libseccomp to allow running seccomp profiles with containers + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: + +```bash +docker daemon +``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/vendor/github.com/moby/moby/project/PATCH-RELEASES.md b/vendor/github.com/moby/moby/project/PATCH-RELEASES.md new file mode 100644 index 0000000..548db9a --- /dev/null +++ b/vendor/github.com/moby/moby/project/PATCH-RELEASES.md @@ -0,0 +1,68 @@ +# Docker patch (bugfix) release process + +Patch releases (the 'Z' in vX.Y.Z) are intended to fix major issues in a +release. Docker open source projects follow these procedures when creating a +patch release; + +After each release (both "major" (vX.Y.0) and "patch" releases (vX.Y.Z)), a +patch release milestone (vX.Y.Z + 1) is created. + +The creation of a patch release milestone is no obligation to actually +*create* a patch release. The purpose of these milestones is to collect +issues and pull requests that can *justify* a patch release; + +- Any maintainer is allowed to add issues and PR's to the milestone, when + doing so, preferably leave a comment on the issue or PR explaining *why* + you think it should be considered for inclusion in a patch release. +- Issues introduced in version vX.Y.0 get added to milestone X.Y.Z+1 +- Only *regressions* should be added. Issues *discovered* in version vX.Y.0, + but already present in version vX.Y-1.Z should not be added, unless + critical. +- Patch releases can *only* contain bug-fixes. New features should + *never* be added to a patch release. + +The release captain of the "major" (X.Y.0) release, is also responsible for +patch releases. The release captain, together with another maintainer, will +review issues and PRs on the milestone, and assigns `priority/`labels. These +review sessions take place on a weekly basis, more frequent if needed: + +- A P0 priority is assigned to critical issues. A maintainer *must* be + assigned to these issues. Maintainers should strive to fix a P0 within a week. +- A P1 priority is assigned to major issues, but not critical. A maintainer + *must* be assigned to these issues. +- P2 and P3 priorities are assigned to other issues. A maintainer can be + assigned. +- Non-critical issues and PR's can be removed from the milestone. Minor + changes, such as typo-fixes or omissions in the documentation can be + considered for inclusion in a patch release. + +## Deciding if a patch release should be done + +- Only a P0 can justify to proceed with the patch release. +- P1, P2, and P3 issues/PR's should not influence the decision, and + should be moved to the X.Y.Z+1 milestone, or removed from the + milestone. + +> **Note**: If the next "major" release is imminent, the release captain +> can decide to cancel a patch release, and include the patches in the +> upcoming major release. + +> **Note**: Security releases are also "patch releases", but follow +> a different procedure. Security releases are developed in a private +> repository, released and tested under embargo before they become +> publicly available. + +## Deciding on the content of a patch release + +When the criteria for moving forward with a patch release are met, the release +manager will decide on the exact content of the release. + +- Fixes to all P0 issues *must* be included in the release. +- Fixes to *some* P1, P2, and P3 issues *may* be included as part of the patch + release depending on the severity of the issue and the risk associated with + the patch. + +Any code delivered as part of a patch release should make life easier for a +significant amount of users with zero chance of degrading anybody's experience. +A good rule of thumb for that is to limit cherry-picking to small patches, which +fix well-understood issues, and which come with verifiable tests. diff --git a/vendor/github.com/moby/moby/project/PRINCIPLES.md b/vendor/github.com/moby/moby/project/PRINCIPLES.md new file mode 100644 index 0000000..53f0301 --- /dev/null +++ b/vendor/github.com/moby/moby/project/PRINCIPLES.md @@ -0,0 +1,19 @@ +# Docker principles + +In the design and development of Docker we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between 2 options, choose the one that is easier to reverse. +* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The less moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/vendor/github.com/moby/moby/project/README.md b/vendor/github.com/moby/moby/project/README.md new file mode 100644 index 0000000..3ed68cf --- /dev/null +++ b/vendor/github.com/moby/moby/project/README.md @@ -0,0 +1,24 @@ +# Hacking on Docker + +The `project/` directory holds information and tools for everyone involved in the process of creating and +distributing Docker, specifically: + +## Guides + +If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTORS.md](../CONTRIBUTING.md). + +If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). + +If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). + +If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). + +## Roadmap + +A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). + + +## Build tools + +[hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, +running the test suite, and pushing releases. diff --git a/vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md b/vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md new file mode 100644 index 0000000..84848ca --- /dev/null +++ b/vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md @@ -0,0 +1,518 @@ +# Release Checklist +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + +### 1. Pull from master and create a release branch + +All releases version numbers will be of the form: vX.Y.Z where X is the major +version number, Y is the minor version number and Z is the patch release version number. + +#### Major releases + +The release branch name is just vX.Y because it's going to be the basis for all .Z releases. + +```bash +export BASE=vX.Y +export VERSION=vX.Y.Z +git fetch origin +git checkout --track origin/master +git checkout -b release/$BASE +``` + +This new branch is going to be the base for the release. We need to push it to origin so we +can track the cherry-picked changes and the version bump: + +```bash +git push origin release/$BASE +``` + +When you have the major release branch in origin, we need to create the bump fork branch +that we'll push to our fork: + +```bash +git checkout -b bump_$VERSION +``` + +#### Patch releases + +If we have the release branch in origin, we can create the forked bump branch from it directly: + +```bash +export VERSION=vX.Y.Z +export PATCH=vX.Y.Z+1 +git fetch origin +git checkout --track origin/release/$BASE +git checkout -b bump_$PATCH +``` + +We cherry-pick only the commits we want into the bump branch: + +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick -s -x +git cherry-pick -s -x +... +``` + +### 2. Update the VERSION files and API version on master + +We don't want to stop contributions to master just because we are releasing. +So, after the release branch is up, we bump the VERSION and API version to mark +the start of the "next" release. + +#### 2.1 Update the VERSION files + +Update the content of the `VERSION` file to be the next minor (incrementing Y) +and add the `-dev` suffix. For example, after the release branch for 1.5.0 is +created, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the +making"). + +#### 2.2 Update API version on master + +We don't want API changes to go to the now frozen API version. Create a new +entry in `docs/reference/api/` by copying the latest and bumping the version +number (in both the file's name and content), and submit this in a PR against +master. + +### 3. Update CHANGELOG.md + +You can run this command for reference with git 2.0: + +```bash +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Engine API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. +Each change should be listed under a category heading formatted as `#### CATEGORY`. + +`CATEGORY` should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Engine API + * Runtime + * Other (please use this category sparingly) + +Each change should be formatted as `BULLET DESCRIPTION`, given: + +* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or + upgrade, respectively. + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "Add new feature X which allows Y", + "Fix bug which caused X", "Increase performance of Y". + +EXAMPLES: + +```markdown +## 0.3.6 (1995-12-25) + +#### Builder + ++ 'docker build -t FOO .' applies the tag FOO to the newly built image + +#### Engine API + +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version +``` + +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + +### 4. Change the contents of the VERSION file + +Before the big thing, you'll want to make successive release candidates and get +people to test. The release candidate number `N` should be part of the version: + +```bash +export RC_VERSION=${VERSION}-rcN +echo ${RC_VERSION#v} > VERSION +``` + +### 5. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at https://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` + +### 6. Commit and create a pull request to the "release" branch + +```bash +git add VERSION CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" +``` + +That last command will give you the proper link to visit to ensure that you +open the PR against the "release" branch instead of accidentally against +"master" (like so many brave souls before you already have). + +### 7. Create a PR to update the AUTHORS file for the release + +Update the AUTHORS file, by running the `hack/generate-authors.sh` on the +release branch. To prevent duplicate entries, you may need to update the +`.mailmap` file accordingly. + +### 8. Build release candidate rpms and debs + +**NOTE**: It will be a lot faster if you pass a different graphdriver with +`DOCKER_GRAPHDRIVER` than `vfs`. + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -e DOCKER_GRAPHDRIVER=aufs \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 9. Publish release candidate rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 10. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 11. Publish release candidate binaries + +To run this you will need access to the release credentials. Get them from the +Core maintainers. + +```bash +docker build -t docker . + +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +It will run the test suite, build the binaries and upload to the specified bucket, +so this is a good time to verify that you're running against **test**.docker.com. + +### 12. Purge the cache! + +After the binaries are uploaded to test.docker.com and the packages are on +apt.dockerproject.org and yum.dockerproject.org, make sure +they get tested in both Ubuntu and Debian for any obvious installation +issues or runtime issues. + +If everything looks good, it's time to create a git tag for this candidate: + +```bash +git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION +git push origin $RC_VERSION +``` + +Announcing on multiple medias is the best way to get some help testing! An easy +way to get some useful links for sharing: + +```bash +echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" +echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" +``` + +We recommend announcing the release candidate on: + +- IRC on #docker, #docker-dev, #docker-maintainers +- In a comment on the pull request to notify subscribed people on GitHub +- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group +- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group +- Any social media that can bring some attention to the release candidate + +### 13. Iterate on successive release candidates + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +During this phase, the `bump_$VERSION` branch will keep evolving as you will +produce new release candidates. The frequency of new candidates is up to the +release manager: use your best judgement taking into account the severity of +reported issues, testers availability, and time to scheduled release date. + +Each time you'll want to produce a new release candidate, you will start by +adding commits to the branch, usually by cherry-picking from master: + +```bash +git cherry-pick -s -x -m0 +``` + +You want your "bump commit" (the one that updates the CHANGELOG and VERSION +files) to remain on top, so you'll have to `git rebase -i` to bring it back up. + +Now that your bump commit is back on top, you will need to update the CHANGELOG +file (if appropriate for this particular release candidate), and update the +VERSION file to increment the RC number: + +```bash +export RC_VERSION=$VERSION-rcN +echo $RC_VERSION > VERSION +``` + +You can now amend your last commit and update the bump branch: + +```bash +git commit --amend +git push -f $GITHUBUSER bump_$VERSION +``` + +Repeat step 6 to tag the code, publish new binaries, announce availability, and +get help testing. + +### 14. Finalize the bump branch + +When you're happy with the quality of a release candidate, you can move on and +create the real thing. + +You will first have to amend the "bump commit" to drop the release candidate +suffix in the VERSION file: + +```bash +echo $VERSION > VERSION +git add VERSION +git commit --amend +``` + +You will then repeat step 6 to publish the binaries to test + +### 15. Get 2 other maintainers to validate the pull request + +### 16. Build final rpms and debs + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 17. Publish final rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 18. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 19. Publish final binaries + +Once they're tested and reasonably believed to be working, run against +get.docker.com: + +```bash +docker build -t docker . +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +### 20. Purge the cache! + +### 21. Apply tag and create release + +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + +```bash +git tag -a $VERSION -m $VERSION bump_$VERSION +git push origin $VERSION +``` + +Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). +If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. + +Select the tag that you just pushed as the version and paste the changelog in the description of the release. +You can see examples in this two links: + +https://github.com/docker/docker/releases/tag/v1.8.0 +https://github.com/docker/docker/releases/tag/v1.8.0-rc3 + +### 22. Go to github to merge the `bump_$VERSION` branch into release + +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! + +### 23. Update the docs branch + +You will need to point the docs branch to the newly created release tag: + +```bash +git checkout origin/docs +git reset --hard origin/$VERSION +git push -f origin docs +``` + +The docs will appear on https://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. The `make docs-release` command will do this +_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run +and you can check its progress with the CDN Cloudfront Chrome addon. + +### 24. Create a new pull request to merge your bump commit back into master + +```bash +git checkout master +git fetch +git reset --hard origin/master +git cherry-pick -s -x $VERSION +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" +``` + +Again, get two maintainers to validate, then merge, then push that pretty +blue button to delete your branch. + +### 25. Rejoice and Evangelize! + +Congratulations! You're done. + +Go forth and announce the glad tidings of the new release in `#docker`, +`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), +the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), +and on Twitter! diff --git a/vendor/github.com/moby/moby/project/RELEASE-PROCESS.md b/vendor/github.com/moby/moby/project/RELEASE-PROCESS.md new file mode 100644 index 0000000..d764e9d --- /dev/null +++ b/vendor/github.com/moby/moby/project/RELEASE-PROCESS.md @@ -0,0 +1,78 @@ +# Docker Release Process + +This document describes how the Docker project is released. The Docker project +release process targets the Engine, Compose, Kitematic, Machine, Swarm, +Distribution, Notary and their underlying dependencies (libnetwork, libkv, +etc...). + +Step-by-step technical details of the process are described in +[RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +## Release cycle + +The Docker project follows a **time-based release cycle** and ships every nine +weeks. A release cycle starts the same day the previous release cycle ends. + +The first six weeks of the cycle are dedicated to development and review. During +this phase, new features and bugfixes submitted to any of the projects are +**eligible** to be shipped as part of the next release. No changeset submitted +during this period is however guaranteed to be merged for the current release +cycle. + +## The freeze period + +Six weeks after the beginning of the cycle, the codebase is officially frozen +and the codebase reaches a state close to the final release. A Release Candidate +(RC) gets created at the same time. The freeze period is used to find bugs and +get feedback on the state of the RC before the release. + +During this freeze period, while the `master` branch will continue its normal +development cycle, no new features are accepted into the RC. As bugs are fixed +in `master` the release owner will selectively 'cherry-pick' critical ones to +be included into the RC. As the RC changes, new ones are made available for the +community to test and review. + +This period lasts for three weeks. + +## How to maximize chances of being merged before the freeze date? + +First of all, there is never a guarantee that a specific changeset is going to +be merged. However there are different actions to follow to maximize the chances +for a changeset to be merged: + +- The team gives priority to review the PRs aligned with the Roadmap (usually +defined by a ROADMAP.md file at the root of the repository). +- The earlier a PR is opened, the more time the maintainers have to review. For +example, if a PR is opened the day before the freeze date, it’s very unlikely +that it will be merged for the release. +- Constant communication with the maintainers (mailing-list, IRC, Github issues, +etc.) allows to get early feedback on the design before getting into the +implementation, which usually reduces the time needed to discuss a changeset. +- If the code is commented, fully tested and by extension follows every single +rules defined by the [CONTRIBUTING guide]( +https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help +the maintainers by speeding up the review. + +## The release + +At the end of the freeze (nine weeks after the start of the cycle), all the +projects are released together. + +``` + Codebase Release +Start of is frozen (end of the +the Cycle (7th week) 9th week) ++---------------------------------------+---------------------+ +| | | +| Development phase | Freeze phase | +| | | ++---------------------------------------+---------------------+ + 6 weeks 3 weeks +<---------------------------------------><--------------------> +``` + +## Exceptions + +If a critical issue is found at the end of the freeze period and more time is +needed to address it, the release will be pushed back. When a release gets +pushed back, the next release cycle gets delayed as well. diff --git a/vendor/github.com/moby/moby/project/REVIEWING.md b/vendor/github.com/moby/moby/project/REVIEWING.md new file mode 100644 index 0000000..51ef4c5 --- /dev/null +++ b/vendor/github.com/moby/moby/project/REVIEWING.md @@ -0,0 +1,246 @@ +# Pull request reviewing process + +## Labels + +Labels are carefully picked to optimize for: + + - Readability: maintainers must immediately know the state of a PR + - Filtering simplicity: different labels represent many different aspects of + the reviewing work, and can even be targeted at different maintainers groups. + +A pull request should only be attributed labels documented in this section: other labels that may +exist on the repository should apply to issues. + +### DCO labels + + * `dco/no`: automatically set by a bot when one of the commits lacks proper signature + +### Status labels + + * `status/0-triage` + * `status/1-design-review` + * `status/2-code-review` + * `status/3-docs-review` + * `status/4-ready-to-merge` + +Special status labels: + + * `status/failing-ci`: indicates that the PR in its current state fails the test suite + * `status/needs-attention`: calls for a collective discussion during a review session + +### Impact labels (apply to merged pull requests) + + * `impact/api` + * `impact/changelog` + * `impact/cli` + * `impact/deprecation` + * `impact/distribution` + * `impact/dockerfile` + +### Process labels (apply to merged pull requests) + +Process labels are to assist in preparing (patch) releases. These labels should only be used for pull requests. + +Label | Use for +------------------------------- | ------------------------------------------------------------------------- +`process/cherry-pick` | PRs that should be cherry-picked in the bump/release branch. These pull-requests must also be assigned to a milestone. +`process/cherry-picked` | PRs that have been cherry-picked. This label is helpful to find PR's that have been added to release-candidates, and to update the change log +`process/docs-cherry-pick` | PRs that should be cherry-picked in the docs branch. Only apply this label for changes that apply to the *current* release, and generic documentation fixes, such as Markdown and spelling fixes. +`process/docs-cherry-picked` | PRs that have been cherry-picked in the docs branch +`process/merge-to-master` | PRs that are opened directly on the bump/release branch, but also need to be merged back to "master" +`process/merged-to-master` | PRs that have been merged back to "master" + + +## Workflow + +An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding +label that needs to be applied. + +### Triage - `status/0-triage` + +Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` +label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction +with the PR. The starting label may potentially skip some steps depending on the kind of pull +request: use your best judgement. + +Maintainers should perform an initial, high-level, overview of the pull request before moving it to +the next appropriate stage: + + - Has DCO + - Contains sufficient justification (e.g., usecases) for the proposed change + - References the Github issue it fixes (if any) in the commit or the first Github comment + +Possible transitions from this state: + + * Close: e.g., unresponsive contributor without DCO + * `status/1-design-review`: general case + * `status/2-code-review`: e.g. trivial bugfix + * `status/3-docs-review`: non-proposal documentation-only change + +### Design review - `status/1-design-review` + +Maintainers are expected to comment on the design of the pull request. Review of documentation is +expected only in the context of design validation, not for stylistic changes. + +Ideally, documentation should reflect the expected behavior of the code. No code review should +take place in this step. + +There are no strict rules on the way a design is validated: we usually aim for a consensus, +although a single maintainer approval is often sufficient for obviously reasonable changes. In +general, strong disagreement expressed by any of the maintainers should not be taken lightly. + +Once design is approved, a maintainer should make sure to remove this label and add the next one. + +Possible transitions from this state: + + * Close: design rejected + * `status/2-code-review`: general case + * `status/3-docs-review`: proposals with only documentation changes + +### Code review - `status/2-code-review` + +Maintainers are expected to review the code and ensure that it is good quality and in accordance +with the documentation in the PR. + +New testcases are expected to be added. Ideally, those testcases should fail when the new code is +absent, and pass when present. The testcases should strive to test as many variants, code paths, as +possible to ensure maximum coverage. + +Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When +the author of a PR is a maintainer, he still needs the approval of two other maintainers. + +Once code is approved according to the rules of the subsystem, a maintainer should make sure to +remove this label and add the next one. If documentation is absent but expected, maintainers should +ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/3-docs-review`: general case + * `status/4-ready-to-merge`: change not impacting documentation + +### Docs review - `status/3-docs-review` + +Maintainers are expected to review the documentation in its bigger context, ensuring consistency, +completeness, validity, and breadth of coverage across all existing and new documentation. + +They should ask for any editorial change that makes the documentation more consistent and easier to +understand. + +The docker/docker repository only contains _reference documentation_, all +"narrative" documentation is kept in a [unified documentation +repository](https://github.com/docker/docker.github.io). Reviewers must +therefore verify which parts of the documentation need to be updated. Any +contribution that may require changing the narrative should get the +`impact/documentation` label: this is the signal for documentation maintainers +that a change will likely need to happen on the unified documentation +repository. When in doubt, it’s better to add the label and leave it to +documentation maintainers to decide whether it’s ok to skip. In all cases, +leave a comment to explain what documentation changes you think might be needed. + +- If the pull request does not impact the documentation at all, the docs review + step is skipped, and the pull request is ready to merge. +- If the changes in + the pull request require changes to the reference documentation (either + command-line reference, or API reference), those changes must be included as + part of the pull request and will be reviewed now. Keep in mind that the + narrative documentation may contain output examples of commands, so may need + to be updated as well, in which case the `impact/documentation` label must + be applied. +- If the PR has the `impact/documentation` label, merging is delayed until a + documentation maintainer acknowledges that a corresponding documentation PR + (or issue) is opened on the documentation repository. Once a documentation + maintainer acknowledges the change, she/he will move the PR to `status/4-merge` + for a code maintainer to push the green button. + +Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs +sub-project maintainers. If the docs change originates with a docs maintainer, only one additional +LGTM is required (since we assume a docs maintainer approves of their own PR). + +Once documentation is approved, a maintainer should make sure to remove this label and +add the next one. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/2-code-review`: requires more code changes + * `status/4-ready-to-merge`: general case + +### Merge - `status/4-ready-to-merge` + +Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase +or carry the pull request themselves. + +Possible transitions from this state: + + * Merge: general case + * Close: carry PR + +After merging a pull request, the maintainer should consider applying one or multiple impact labels +to ease future classification: + + * `impact/api` signifies the patch impacted the Engine API + * `impact/changelog` signifies the change is significant enough to make it in the changelog + * `impact/cli` signifies the patch impacted a CLI command + * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax + * `impact/deprecation` signifies the patch participates in deprecating an existing feature + +### Close + +If a pull request is closed it is expected that sufficient justification will be provided. In +particular, if there are alternative ways of achieving the same net result then those needs to be +spelled out. If the pull request is trying to solve a use case that is not one that we (as a +community) want to support then a justification for why should be provided. + +The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We +assume that the group of maintainers is bound by mutual trust and respect, and that opposition from +any single maintainer should be taken into consideration. Similarly, we expect maintainers to +justify their reasoning and to accept debating. + +## Escalation process + +Despite the previously described reviewing process, some PR might not show any progress for various +reasons: + + - No strong opinion for or against the proposed patch + - Debates about the proper way to solve the problem at hand + - Lack of consensus + - ... + +All these will eventually lead to stalled PR, where no apparent progress is made across several +weeks, or even months. + +Maintainers should use their best judgement and apply the `status/needs-attention` label. It must +be used sparingly, as each PR with such label will be discussed by a group of maintainers during a +review session. The goal of that session is to agree on one of the following outcomes for the PR: + + * Close, explaining the rationale for not pursuing further + * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch + (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued + attention) + * Escalate to Solomon by formulating a few specific questions on which his answers will allow + maintainers to decide. + +## Milestones + +Typically, every merged pull request get shipped naturally with the next release cut from the +`master` branch (either the next minor or major version, as indicated by the +[`VERSION`](https://github.com/docker/docker/blob/master/VERSION) file at the root of the +repository). However, the time-based nature of the release process provides no guarantee that a +given pull request will get merged in time. In other words, all open pull requests are implicitly +considered part of the next minor or major release milestone, and this won't be materialized on +GitHub. + +A merged pull request must be attached to the milestone corresponding to the release in which it +will be shipped: this is both useful for tracking, and to help the release manager with the +changelog generation. + +An open pull request may exceptionally get attached to a milestone to express a particular intent to +get it merged in time for that release. This may for example be the case for an important feature to +be included in a minor release, or a critical bugfix to be included in a patch release. + +Finally, and as documented by the [`PATCH-RELEASES.md`](PATCH-RELEASES.md) process, the existence of +a milestone is not a guarantee that a release will happen, as some milestones will be created purely +for the purpose of bookkeeping diff --git a/vendor/github.com/moby/moby/project/TOOLS.md b/vendor/github.com/moby/moby/project/TOOLS.md new file mode 100644 index 0000000..26303c3 --- /dev/null +++ b/vendor/github.com/moby/moby/project/TOOLS.md @@ -0,0 +1,63 @@ +# Tools + +This page describes the tools we use and infrastructure that is in place for +the Docker project. + +### CI + +The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our +continuous integration server. Each Pull Request to Docker is tested by running the +equivalent of `make all`. We chose Jenkins because we can host it ourselves and +we run Docker in Docker to test. + +#### Leeroy + +Leeroy is a Go application which integrates Jenkins with +GitHub pull requests. Leeroy uses +[GitHub hooks](https://developer.github.com/v3/repos/hooks/) +to listen for pull request notifications and starts jobs on your Jenkins +server. Using the Jenkins +[notification plugin][https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin], +Leeroy updates the pull request using GitHub's +[status API](https://developer.github.com/v3/repos/statuses/) +with pending, success, failure, or error statuses. + +The leeroy repository is maintained at +[github.com/docker/leeroy](https://github.com/docker/leeroy). + +#### GordonTheTurtle IRC Bot + +The GordonTheTurtle IRC Bot lives in the +[#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel +on Freenode. He is built in Go and is based off the project at +[github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). + +His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. +This command works by integrating with Leroy. He has a few other commands too, such +as `!gif` or `!godoc`, but we are always looking for more fun commands to add. + +The gordon-bot repository is maintained at +[github.com/docker/gordon-bot](https://github.com/docker/gordon-bot) + +### NSQ + +We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project +infrastructure. + +#### Hooks + +The hooks project, +[github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), +is a small Go application that manages web hooks from github, hub.docker.com, or +other third party services. + +It can be used for listening to github webhooks & pushing them to a queue, +archiving hooks to rethinkdb for processing, and broadcasting hooks to various +jobs. + +#### Docker Master Binaries + +One of the things queued from the Hooks are the building of the Master +Binaries. This happens on every push to the master branch of Docker. The +repository for this is maintained at +[github.com/docker/docker-bb](https://github.com/docker/docker-bb). diff --git a/vendor/github.com/moby/moby/reference/reference.go b/vendor/github.com/moby/moby/reference/reference.go new file mode 100644 index 0000000..996fc50 --- /dev/null +++ b/vendor/github.com/moby/moby/reference/reference.go @@ -0,0 +1,216 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/image/v1" +) + +const ( + // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified + DefaultTag = "latest" + // DefaultHostname is the default built-in hostname + DefaultHostname = "docker.io" + // LegacyDefaultHostname is automatically converted to DefaultHostname + LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for default repositories in default host + DefaultRepoPrefix = "library/" +) + +// Named is an object with a full name +type Named interface { + // Name returns normalized repository name, like "ubuntu". + Name() string + // String returns full reference, like "ubuntu@sha256:abcdef..." + String() string + // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" + FullName() string + // Hostname returns hostname for the reference, like "docker.io" + Hostname() string + // RemoteName returns the repository component of the full name, like "library/ubuntu" + RemoteName() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := distreference.ParseNamed(s) + if err != nil { + return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag: %s", s, err) + } + r, err := WithName(named.Name()) + if err != nil { + return nil, err + } + if canonical, isCanonical := named.(distreference.Canonical); isCanonical { + return WithDigest(r, canonical.Digest()) + } + if tagged, isTagged := named.(distreference.NamedTagged); isTagged { + return WithTag(r, tagged.Tag()) + } + return r, nil +} + +// TrimNamed removes any tag or digest from the named reference +func TrimNamed(ref Named) Named { + return &namedRef{distreference.TrimNamed(ref)} +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + name, err := normalize(name) + if err != nil { + return nil, err + } + if err := validateName(name); err != nil { + return nil, err + } + r, err := distreference.WithName(name) + if err != nil { + return nil, err + } + return &namedRef{r}, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + r, err := distreference.WithTag(name, tag) + if err != nil { + return nil, err + } + return &taggedRef{namedRef{r}}, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + r, err := distreference.WithDigest(name, digest) + if err != nil { + return nil, err + } + return &canonicalRef{namedRef{r}}, nil +} + +type namedRef struct { + distreference.Named +} +type taggedRef struct { + namedRef +} +type canonicalRef struct { + namedRef +} + +func (r *namedRef) FullName() string { + hostname, remoteName := splitHostname(r.Name()) + return hostname + "/" + remoteName +} +func (r *namedRef) Hostname() string { + hostname, _ := splitHostname(r.Name()) + return hostname +} +func (r *namedRef) RemoteName() string { + _, remoteName := splitHostname(r.Name()) + return remoteName +} +func (r *taggedRef) Tag() string { + return r.namedRef.Named.(distreference.NamedTagged).Tag() +} +func (r *canonicalRef) Digest() digest.Digest { + return r.namedRef.Named.(distreference.Canonical).Digest() +} + +// WithDefaultTag adds a default tag to a reference if it only has a repo name. +func WithDefaultTag(ref Named) Named { + if IsNameOnly(ref) { + ref, _ = WithTag(ref, DefaultTag) + } + return ref +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// ParseIDOrReference parses string for an image ID or a reference. ID can be +// without a default prefix. +func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { + if err := v1.ValidateID(idOrRef); err == nil { + idOrRef = "sha256:" + idOrRef + } + if dgst, err := digest.ParseDigest(idOrRef); err == nil { + return dgst, nil, nil + } + ref, err := ParseNamed(idOrRef) + return "", ref, err +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + if hostname == LegacyDefaultHostname { + hostname = DefaultHostname + } + if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + return +} + +// normalize returns a repository name in its normalized form, meaning it +// will not contain default hostname nor library/ prefix for official images. +func normalize(name string) (string, error) { + host, remoteName := splitHostname(name) + if strings.ToLower(remoteName) != remoteName { + return "", errors.New("invalid reference format: repository name must be lowercase") + } + if host == DefaultHostname { + if strings.HasPrefix(remoteName, DefaultRepoPrefix) { + return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil + } + return remoteName, nil + } + return name, nil +} + +func validateName(name string) error { + if err := v1.ValidateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + return nil +} diff --git a/vendor/github.com/moby/moby/reference/reference_test.go b/vendor/github.com/moby/moby/reference/reference_test.go new file mode 100644 index 0000000..ff35ba3 --- /dev/null +++ b/vendor/github.com/moby/moby/reference/reference_test.go @@ -0,0 +1,275 @@ +package reference + +import ( + "testing" + + "github.com/docker/distribution/digest" +) + +func TestValidateReferenceName(t *testing.T) { + validRepoNames := []string{ + "docker/docker", + "library/debian", + "debian", + "docker.io/docker/docker", + "docker.io/library/debian", + "docker.io/debian", + "index.docker.io/docker/docker", + "index.docker.io/library/debian", + "index.docker.io/debian", + "127.0.0.1:5000/docker/docker", + "127.0.0.1:5000/library/debian", + "127.0.0.1:5000/debian", + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + } + invalidRepoNames := []string{ + "https://github.com/docker/docker", + "docker/Docker", + "-docker", + "-docker/docker", + "-docker.io/docker/docker", + "docker///docker", + "docker.io/docker/Docker", + "docker.io/docker///docker", + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + + for _, name := range invalidRepoNames { + _, err := ParseNamed(name) + if err == nil { + t.Fatalf("Expected invalid repo name for %q", name) + } + } + + for _, name := range validRepoNames { + _, err := ParseNamed(name) + if err != nil { + t.Fatalf("Error parsing repo name %s, got: %q", name, err) + } + } +} + +func TestValidateRemoteName(t *testing.T) { + validRepositoryNames := []string{ + // Sanity check. + "docker/docker", + + // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // Allow embedded hyphens. + "docker-rules/docker", + + // Allow multiple hyphens as well. + "docker---rules/docker", + + //Username doc and image name docker being tested. + "doc/docker", + + // single character names are now allowed. + "d/docker", + "jess/t", + + // Consecutive underscores. + "dock__er/docker", + } + for _, repositoryName := range validRepositoryNames { + _, err := ParseNamed(repositoryName) + if err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + } + + invalidRepositoryNames := []string{ + // Disallow capital letters. + "docker/Docker", + + // Only allow one slash. + "docker///docker", + + // Disallow 64-character hexadecimal. + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + + // Disallow leading and trailing hyphens in namespace. + "-docker/docker", + "docker-/docker", + "-docker-/docker", + + // Don't allow underscores everywhere (as opposed to hyphens). + "____/____", + + "_docker/_docker", + + // Disallow consecutive periods. + "dock..er/docker", + "dock_.er/docker", + "dock-.er/docker", + + // No repository. + "docker/", + + //namespace too long + "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", + } + for _, repositoryName := range invalidRepositoryNames { + if _, err := ParseNamed(repositoryName); err == nil { + t.Errorf("Repository name should be invalid: %v", repositoryName) + } + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type tcase struct { + RemoteName, NormalizedName, FullName, AmbiguousName, Hostname string + } + + tcases := []tcase{ + { + RemoteName: "fooo/bar", + NormalizedName: "fooo/bar", + FullName: "docker.io/fooo/bar", + AmbiguousName: "index.docker.io/fooo/bar", + Hostname: "docker.io", + }, + { + RemoteName: "library/ubuntu", + NormalizedName: "ubuntu", + FullName: "docker.io/library/ubuntu", + AmbiguousName: "library/ubuntu", + Hostname: "docker.io", + }, + { + RemoteName: "nonlibrary/ubuntu", + NormalizedName: "nonlibrary/ubuntu", + FullName: "docker.io/nonlibrary/ubuntu", + AmbiguousName: "", + Hostname: "docker.io", + }, + { + RemoteName: "other/library", + NormalizedName: "other/library", + FullName: "docker.io/other/library", + AmbiguousName: "", + Hostname: "docker.io", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "127.0.0.1:8000/private/moonbase", + FullName: "127.0.0.1:8000/private/moonbase", + AmbiguousName: "", + Hostname: "127.0.0.1:8000", + }, + { + RemoteName: "privatebase", + NormalizedName: "127.0.0.1:8000/privatebase", + FullName: "127.0.0.1:8000/privatebase", + AmbiguousName: "", + Hostname: "127.0.0.1:8000", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "example.com/private/moonbase", + FullName: "example.com/private/moonbase", + AmbiguousName: "", + Hostname: "example.com", + }, + { + RemoteName: "privatebase", + NormalizedName: "example.com/privatebase", + FullName: "example.com/privatebase", + AmbiguousName: "", + Hostname: "example.com", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "example.com:8000/private/moonbase", + FullName: "example.com:8000/private/moonbase", + AmbiguousName: "", + Hostname: "example.com:8000", + }, + { + RemoteName: "privatebasee", + NormalizedName: "example.com:8000/privatebasee", + FullName: "example.com:8000/privatebasee", + AmbiguousName: "", + Hostname: "example.com:8000", + }, + { + RemoteName: "library/ubuntu-12.04-base", + NormalizedName: "ubuntu-12.04-base", + FullName: "docker.io/library/ubuntu-12.04-base", + AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", + Hostname: "docker.io", + }, + } + + for _, tcase := range tcases { + refStrings := []string{tcase.NormalizedName, tcase.FullName} + if tcase.AmbiguousName != "" { + refStrings = append(refStrings, tcase.AmbiguousName) + } + + var refs []Named + for _, r := range refStrings { + named, err := ParseNamed(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + named, err = WithName(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + } + + for _, r := range refs { + if expected, actual := tcase.NormalizedName, r.Name(); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.FullName, r.FullName(); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.Hostname, r.Hostname(); expected != actual { + t.Fatalf("Invalid hostname for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.RemoteName, r.RemoteName(); expected != actual { + t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) + } + + } + } +} + +func TestParseReferenceWithTagAndDigest(t *testing.T) { + ref, err := ParseNamed("busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa") + if err != nil { + t.Fatal(err) + } + if _, isTagged := ref.(NamedTagged); isTagged { + t.Fatalf("Reference from %q should not support tag", ref) + } + if _, isCanonical := ref.(Canonical); !isCanonical { + t.Fatalf("Reference from %q should not support digest", ref) + } + if expected, actual := "busybox@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa", ref.String(); actual != expected { + t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) + } +} + +func TestInvalidReferenceComponents(t *testing.T) { + if _, err := WithName("-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid name") + } + ref, err := WithName("busybox") + if err != nil { + t.Fatal(err) + } + if _, err := WithTag(ref, "-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid tag") + } + if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { + t.Fatal("Expected WithName to detect invalid digest") + } +} diff --git a/vendor/github.com/moby/moby/reference/store.go b/vendor/github.com/moby/moby/reference/store.go new file mode 100644 index 0000000..71ca236 --- /dev/null +++ b/vendor/github.com/moby/moby/reference/store.go @@ -0,0 +1,286 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref Named + ID digest.Digest +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id digest.Digest) []Named + ReferencesByName(ref Named) []Association + AddTag(ref Named, id digest.Digest, force bool) error + AddDigest(ref Canonical, id digest.Digest, force bool) error + Delete(ref Named) (bool, error) + Get(ref Named) (digest.Digest, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[digest.Digest]map[string]Named +} + +// Repository maps tags to digests. The key is a stringified Reference, +// including the repository name. +type repository map[string]digest.Digest + +type lexicalRefs []Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[digest.Digest]map[string]Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(WithDefaultTag(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref Canonical, id digest.Digest, force bool) error { + return store.addReference(ref, id, force) +} + +func (store *store) addReference(ref Named, id digest.Digest, force bool) error { + if ref.Name() == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + repository = make(map[string]digest.Digest) + store.Repositories[ref.Name()] = repository + } + + refStr := ref.String() + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref Named) (bool, error) { + ref = WithDefaultTag(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repoName := ref.Name() + + repository, exists := store.Repositories[repoName] + if !exists { + return false, ErrDoesNotExist + } + + refStr := ref.String() + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, repoName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference +func (store *store) Get(ref Named) (digest.Digest, error) { + ref = WithDefaultTag(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[ref.String()] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given ID. The slice +// will be nil if there are no references to this ID. +func (store *store) References(id digest.Digest) []Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref Named) []Association { + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/reference/store_test.go b/vendor/github.com/moby/moby/reference/store_test.go new file mode 100644 index 0000000..dd1d253 --- /dev/null +++ b/vendor/github.com/moby/moby/reference/store_test.go @@ -0,0 +1,356 @@ +package reference + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/docker/distribution/digest" +) + +var ( + saveLoadTestCases = map[string]digest.Digest{ + "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", + "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", + "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", + "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", + "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", + "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", + "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + } + + marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) +) + +func TestLoad(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.RemoveAll(jsonFile.Name()) + + // Write canned json to the temp file + _, err = jsonFile.Write(marshalledSaveLoadTestCases) + if err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + jsonFile.Close() + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, expectedID := range saveLoadTestCases { + ref, err := ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + id, err := store.Get(ref) + if err != nil { + t.Fatalf("could not find reference %s: %v", refStr, err) + } + if id != expectedID { + t.Fatalf("expected %s - got %s", expectedID, id) + } + } +} + +func TestSave(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, id := range saveLoadTestCases { + ref, err := ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + if canonical, ok := ref.(Canonical); ok { + err = store.AddDigest(canonical, id, false) + if err != nil { + t.Fatalf("could not add digest reference %s: %v", refStr, err) + } + } else { + err = store.AddTag(ref, id, false) + if err != nil { + t.Fatalf("could not add reference %s: %v", refStr, err) + } + } + } + + jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) + if err != nil { + t.Fatalf("could not read json file: %v", err) + } + + if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { + t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) + } +} + +func TestAddDeleteGet(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + testImageID1 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") + testImageID2 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") + testImageID3 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") + + // Try adding a reference with no tag or digest + nameOnly, err := WithName("username/repo") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(nameOnly, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Add a few references + ref1, err := ParseNamed("username/repo1:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref1, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref2, err := ParseNamed("username/repo1:old") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref2, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref3, err := ParseNamed("username/repo1:alias") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref3, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref4, err := ParseNamed("username/repo2:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref4, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref5, err := ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddDigest(ref5.(Canonical), testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Attempt to overwrite with force == false + if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { + t.Fatalf("did not get expected error on overwrite attempt - got %v", err) + } + // Repeat to overwrite with force == true + if err = store.AddTag(ref4, testImageID3, true); err != nil { + t.Fatalf("failed to force tag overwrite: %v", err) + } + + // Check references so far + id, err := store.Get(nameOnly) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref1) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref2) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) + } + + id, err = store.Get(ref3) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref4) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID3 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + id, err = store.Get(ref5) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + // Get should return ErrDoesNotExist for a nonexistent repo + nonExistRepo, err := ParseNamed("username/nonexistrepo:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Get should return ErrDoesNotExist for a nonexistent tag + nonExistTag, err := ParseNamed("username/repo1:nonexist") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Check References + refs := store.References(testImageID1) + if len(refs) != 3 { + t.Fatal("unexpected number of references") + } + // Looking for the references in this order verifies that they are + // returned lexically sorted. + if refs[0].String() != ref3.String() { + t.Fatalf("unexpected reference: %v", refs[0].String()) + } + if refs[1].String() != ref1.String() { + t.Fatalf("unexpected reference: %v", refs[1].String()) + } + if refs[2].String() != nameOnly.String()+":latest" { + t.Fatalf("unexpected reference: %v", refs[2].String()) + } + + // Check ReferencesByName + repoName, err := WithName("username/repo1") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + associations := store.ReferencesByName(repoName) + if len(associations) != 3 { + t.Fatal("unexpected number of associations") + } + // Looking for the associations in this order verifies that they are + // returned lexically sorted. + if associations[0].Ref.String() != ref3.String() { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[0].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[1].Ref.String() != ref1.String() { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[1].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[2].Ref.String() != ref2.String() { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + if associations[2].ID != testImageID2 { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + + // Delete should return ErrDoesNotExist for a nonexistent repo + if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete should return ErrDoesNotExist for a nonexistent tag + if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete a few references + if deleted, err := store.Delete(ref1); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref1); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(ref5); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref5); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(nameOnly); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } +} + +func TestInvalidTags(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "tag-store-test") + defer os.RemoveAll(tmpDir) + + store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") + + // sha256 as repo name + ref, err := ParseNamed("sha256:abc") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting tag %q to fail", ref) + } + + // setting digest as a tag + ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting digest %q to fail", ref) + } + +} diff --git a/vendor/github.com/moby/moby/registry/auth.go b/vendor/github.com/moby/moby/registry/auth.go new file mode 100644 index 0000000..8cadd51 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/auth.go @@ -0,0 +1,303 @@ +package registry + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +const ( + // AuthClientID is used the ClientID used for the token server + AuthClientID = "docker" +) + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { + registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) + if err != nil { + return "", "", err + } + + serverAddress := registryEndpoint.String() + + logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) + + if serverAddress == "" { + return "", "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + if err != nil { + return "", "", err + } + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + // fallback when request could not be completed + return "", "", fallbackError{ + err: err, + } + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err + } + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", "", nil + } else if resp.StatusCode == http.StatusUnauthorized { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") + } + return "", "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == http.StatusForbidden { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", "", fmt.Errorf("Internal Server Error") + } + return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) +} + +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type staticCredentialStore struct { + auth *types.AuthConfig +} + +// NewStaticCredentialStore returns a credential store +// which always returns the same credential values. +func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { + return staticCredentialStore{ + auth: auth, + } +} + +func (scs staticCredentialStore) Basic(*url.URL) (string, string) { + if scs.auth == nil { + return "", "" + } + return scs.auth.Username, scs.auth.Password +} + +func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { + if scs.auth == nil { + return "" + } + return scs.auth.IdentityToken +} + +func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +type fallbackError struct { + err error +} + +func (err fallbackError) Error() string { + return err.err.Error() +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") + + modifiers := DockerHeaders(userAgent, nil) + authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) + + credentialAuthConfig := *authConfig + creds := loginCredentialStore{ + authConfig: &credentialAuthConfig, + } + + loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return nil, foundV2, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(authTransport, modifiers...) + + return &http.Client{ + Transport: tr, + Timeout: 15 * time.Second, + }, foundV2, nil + +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == ConvertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + + challengeManager := challenge.NewSimpleManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundV2, PingResponseError{ + Err: err, + } + } + + return challengeManager, foundV2, nil +} diff --git a/vendor/github.com/moby/moby/registry/auth_test.go b/vendor/github.com/moby/moby/registry/auth_test.go new file mode 100644 index 0000000..9ab71aa --- /dev/null +++ b/vendor/github.com/moby/moby/registry/auth_test.go @@ -0,0 +1,124 @@ +// +build !solaris + +// TODO: Support Solaris + +package registry + +import ( + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +func buildAuthConfigs() map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + + for _, registry := range []string{"testIndex", IndexServer} { + authConfigs[registry] = types.AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + } + } + + return authConfigs +} + +func TestSameAuthDataPostSave(t *testing.T) { + authConfigs := buildAuthConfigs() + authConfig := authConfigs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + authConfigs := buildAuthConfigs() + indexConfig := authConfigs[IndexServer] + + officialIndex := ®istrytypes.IndexInfo{ + Official: true, + } + privateIndex := ®istrytypes.IndexInfo{ + Official: false, + } + + resolved := ResolveAuthConfig(authConfigs, officialIndex) + assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") + + resolved = ResolveAuthConfig(authConfigs, privateIndex) + assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + authConfigs := buildAuthConfigs() + + registryAuth := types.AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + } + localAuth := types.AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + } + officialAuth := types.AuthConfig{ + Username: "baz-user", + Password: "baz-pass", + } + authConfigs[IndexServer] = officialAuth + + expectedAuths := map[string]types.AuthConfig{ + "registry.example.com": registryAuth, + "localhost:8000": localAuth, + "registry.com": localAuth, + } + + validRegistries := map[string][]string{ + "registry.example.com": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "localhost:8000": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + configured, ok := expectedAuths[configKey] + if !ok { + t.Fail() + } + index := ®istrytypes.IndexInfo{ + Name: configKey, + } + for _, registry := range registries { + authConfigs[registry] = configured + resolved := ResolveAuthConfig(authConfigs, index) + if resolved.Username != configured.Username || resolved.Password != configured.Password { + t.Errorf("%s -> %v != %v\n", registry, resolved, configured) + } + delete(authConfigs, registry) + resolved = ResolveAuthConfig(authConfigs, index) + if resolved.Username == configured.Username || resolved.Password == configured.Password { + t.Errorf("%s -> %v == %v\n", registry, resolved, configured) + } + } + } +} diff --git a/vendor/github.com/moby/moby/registry/config.go b/vendor/github.com/moby/moby/registry/config.go new file mode 100644 index 0000000..9a4f6a9 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config.go @@ -0,0 +1,305 @@ +package registry + +import ( + "errors" + "fmt" + "net" + "net/url" + "strings" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/opts" + "github.com/docker/docker/reference" + "github.com/spf13/pflag" +) + +// ServiceOptions holds command line options. +type ServiceOptions struct { + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only bool `json:"disable-legacy-registry,omitempty"` +} + +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig struct { + registrytypes.ServiceConfig + V2Only bool +} + +var ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + + // IndexHostname is the index hostname + IndexHostname = "index.docker.io" + // IndexServer is used for user auth and image search + IndexServer = "https://" + IndexHostname + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: "registry-1.docker.io", + } +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = newServiceConfig(ServiceOptions{}) +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// InstallCliFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { + mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) + insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) + + flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") + flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") + + options.installCliPlatformFlags(flags) +} + +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) *serviceConfig { + config := &serviceConfig{ + ServiceConfig: registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + Mirrors: options.Mirrors, + }, + V2Only: options.V2Only, + } + + config.LoadInsecureRegistries(options.InsecureRegistries) + + return config +} + +// LoadInsecureRegistries loads insecure registries to config +func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + registries = append(registries, "127.0.0.0/8") + + // Store original InsecureRegistryCIDRs and IndexConfigs + // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. + originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs + originalIndexInfos := config.ServiceConfig.IndexConfigs + + config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) + +skip: + for _, r := range registries { + // validate insecure registry + if _, err := ValidateIndexName(r); err != nil { + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return err + } + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. + data := (*registrytypes.NetIPNet)(ipnet) + for _, value := range config.InsecureRegistryCIDRs { + if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { + continue skip + } + } + // ipnet is not found, add it in config.InsecureRegistryCIDRs + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) + + } else { + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func isSecureIndex(config *serviceConfig, indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + host, _, err := net.SplitHostPort(indexName) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = indexName + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range config.InsecureRegistryCIDRs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return false + } + } + } + + return true +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + if val == reference.LegacyDefaultHostname { + val = reference.DefaultHostname + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + return val, nil +} + +func validateNoScheme(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := ®istrytypes.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = isSecureIndex(config, indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, name.Hostname()) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(name.Name(), '/') + return &RepositoryInfo{ + Named: name, + Index: index, + Official: official, + }, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/vendor/github.com/moby/moby/registry/config_test.go b/vendor/github.com/moby/moby/registry/config_test.go new file mode 100644 index 0000000..25578a7 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config_test.go @@ -0,0 +1,49 @@ +package registry + +import ( + "testing" +) + +func TestValidateMirror(t *testing.T) { + valid := []string{ + "http://mirror-1.com", + "https://mirror-1.com", + "http://localhost", + "https://localhost", + "http://localhost:5000", + "https://localhost:5000", + "http://127.0.0.1", + "https://127.0.0.1", + "http://127.0.0.1:5000", + "https://127.0.0.1:5000", + } + + invalid := []string{ + "!invalid!://%as%", + "ftp://mirror-1.com", + "http://mirror-1.com/", + "http://mirror-1.com/?q=foo", + "http://mirror-1.com/v1/", + "http://mirror-1.com/v1/?q=foo", + "http://mirror-1.com/v1/?q=foo#frag", + "http://mirror-1.com?q=foo", + "https://mirror-1.com#frag", + "https://mirror-1.com/", + "https://mirror-1.com/#frag", + "https://mirror-1.com/v1/", + "https://mirror-1.com/v1/#", + "https://mirror-1.com?q", + } + + for _, address := range valid { + if ret, err := ValidateMirror(address); err != nil || ret == "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } + + for _, address := range invalid { + if ret, err := ValidateMirror(address); err == nil || ret != "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } +} diff --git a/vendor/github.com/moby/moby/registry/config_unix.go b/vendor/github.com/moby/moby/registry/config_unix.go new file mode 100644 index 0000000..d692e8e --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package registry + +import ( + "github.com/spf13/pflag" +) + +var ( + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" +) + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries") +} diff --git a/vendor/github.com/moby/moby/registry/config_windows.go b/vendor/github.com/moby/moby/registry/config_windows.go new file mode 100644 index 0000000..d1b313d --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config_windows.go @@ -0,0 +1,25 @@ +package registry + +import ( + "os" + "path/filepath" + "strings" + + "github.com/spf13/pflag" +) + +// CertsDir is the directory where certificates are stored +var CertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + // No Windows specific flags. +} diff --git a/vendor/github.com/moby/moby/registry/endpoint_test.go b/vendor/github.com/moby/moby/registry/endpoint_test.go new file mode 100644 index 0000000..8451d3f --- /dev/null +++ b/vendor/github.com/moby/moby/registry/endpoint_test.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServer, IndexServer}, + {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000/nonversion/", "http://0.0.0.0:5000/nonversion/v1/"}, + {"http://0.0.0.0:5000/v0/", "http://0.0.0.0:5000/v0/v1/"}, + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td.str, nil, "", nil) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} + +func TestEndpointParseInvalid(t *testing.T) { + testData := []string{ + "http://0.0.0.0:5000/v2/", + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td, nil, "", nil) + if err == nil { + t.Errorf("expected error parsing %q: parsed as %q", td, e) + } + } +} + +// Ensure that a registry endpoint that responds with a 401 only is determined +// to be a valid v1 registry endpoint +func TestValidateEndpoint(t *testing.T) { + requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) + w.WriteHeader(http.StatusUnauthorized) + }) + + // Make a test server which should validate as a v1 server. + testServer := httptest.NewServer(requireBasicAuthHandler) + defer testServer.Close() + + testServerURL, err := url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint := V1Endpoint{ + URL: testServerURL, + client: HTTPClient(NewTransport(nil)), + } + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.URL.Scheme != "http" { + t.Fatalf("expecting to validate endpoint as http, got url %s", testEndpoint.String()) + } +} diff --git a/vendor/github.com/moby/moby/registry/endpoint_v1.go b/vendor/github.com/moby/moby/registry/endpoint_v1.go new file mode 100644 index 0000000..6bcf8c9 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/endpoint_v1.go @@ -0,0 +1,198 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// V1Endpoint stores basic information about a V1 registry endpoint. +type V1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// NewV1Endpoint parses the given address to return a registry endpoint. +func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *V1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + endpoint := &V1Endpoint{ + IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) + return endpoint, nil +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + + endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *V1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *V1Endpoint) Path(path string) string { + return e.URL.String() + "/v1/" + path +} + +// Ping returns a PingResult which indicates whether the registry is standalone or not. +func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/vendor/github.com/moby/moby/registry/registry.go b/vendor/github.com/moby/moby/registry/registry.go new file mode 100644 index 0000000..17fa97c --- /dev/null +++ b/vendor/github.com/moby/moby/registry/registry.go @@ -0,0 +1,191 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") +) + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault() + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir != "" { + hostDir := filepath.Join(CertsDir, cleanPath(hostname)) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %v", err) + } + tlsConfig.RootCAs = systemPool + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers with a User-Agent and metaHeaders +func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + return base +} diff --git a/vendor/github.com/moby/moby/registry/registry_mock_test.go b/vendor/github.com/moby/moby/registry/registry_mock_test.go new file mode 100644 index 0000000..21fc1fd --- /dev/null +++ b/vendor/github.com/moby/moby/registry/registry_mock_test.go @@ -0,0 +1,478 @@ +// +build !solaris + +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" + "github.com/gorilla/mux" + + "github.com/Sirupsen/logrus" +) + +var ( + testHTTPServer *httptest.Server + testHTTPSServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + "other.com": {net.ParseIP("43.43.43.43")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func makeHTTPSURL(req string) string { + return testHTTPSServer.URL + req +} + +func makeIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeURL(req), + } + return index +} + +func makeHTTPSIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeHTTPSURL(req), + } + return index +} + +func makePublicIndex() *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: IndexServer, + Secure: true, + Official: true, + } + return index +} + +func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { + options := ServiceOptions{ + Mirrors: mirrors, + InsecureRegistries: insecureRegistries, + } + + return newServiceConfig(options) +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a != b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v == %v", a, b) + } + t.Fatal(message) +} + +// Similar to assertEqual, but does not stop test +func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a == b { + return + } + message := fmt.Sprintf("%v != %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +// Similar to assertNotEqual, but does not stop test +func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a != b { + return + } + message := fmt.Sprintf("%v == %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + imageID := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[imageID] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[imageID] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName.String()) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + tags = make(map[string]string) + testRepositories[repositoryName.String()] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for imageID, layer := range testLayers { + image := make(map[string]string) + image["id"] = imageID + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := ®istrytypes.SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []registrytypes.SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/vendor/github.com/moby/moby/registry/registry_test.go b/vendor/github.com/moby/moby/registry/registry_test.go new file mode 100644 index 0000000..786dfbe --- /dev/null +++ b/vendor/github.com/moby/moby/registry/registry_test.go @@ -0,0 +1,875 @@ +// +build !solaris + +package registry + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +var ( + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &types.AuthConfig{} + endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) + if err != nil { + t.Fatal(err) + } + userAgent := "docker test client" + var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) + client := HTTPClient(tr) + r, err := NewSession(client, authConfig, endpoint) + if err != nil { + t.Fatal(err) + } + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = token + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + + assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { + endpoint, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") + assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") + assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := ®istrytypes.IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := NewV1Endpoint(index, "", nil) + checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") + } +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.LookupRemoteImage(imageID, makeURL("/v1/")) + assertEqual(t, err, nil, "Expected error of remote lookup to nil") + if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { + t.Fatal("Expected error of remote lookup to not nil") + } +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, int64(154), "Expected size 154") + if len(json) == 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") + if err != nil { + t.Fatal(err) + } + assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 2, "Expected two tags") + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) + assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedURL, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedURL.Host + "/v1/" + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + data, err := r.GetRepositoryData(repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type staticRepositoryInfo struct { + Index *registrytypes.IndexInfo + RemoteName string + CanonicalName string + LocalName string + Official bool + } + + expectedRepoInfos := map[string]staticRepositoryInfo{ + "fooo/bar": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "fooo/bar", + LocalName: "fooo/bar", + CanonicalName: "docker.io/fooo/bar", + Official: false, + }, + "library/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "nonlibrary/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "nonlibrary/ubuntu", + LocalName: "nonlibrary/ubuntu", + CanonicalName: "docker.io/nonlibrary/ubuntu", + Official: false, + }, + "ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "other/library": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "other/library", + LocalName: "other/library", + CanonicalName: "docker.io/other/library", + Official: false, + }, + "127.0.0.1:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "127.0.0.1:8000/private/moonbase", + CanonicalName: "127.0.0.1:8000/private/moonbase", + Official: false, + }, + "127.0.0.1:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "127.0.0.1:8000/privatebase", + CanonicalName: "127.0.0.1:8000/privatebase", + Official: false, + }, + "localhost:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost:8000/private/moonbase", + CanonicalName: "localhost:8000/private/moonbase", + Official: false, + }, + "localhost:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost:8000/privatebase", + CanonicalName: "localhost:8000/privatebase", + Official: false, + }, + "example.com/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com/private/moonbase", + CanonicalName: "example.com/private/moonbase", + Official: false, + }, + "example.com/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com/privatebase", + CanonicalName: "example.com/privatebase", + Official: false, + }, + "example.com:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com:8000/private/moonbase", + CanonicalName: "example.com:8000/private/moonbase", + Official: false, + }, + "example.com:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com:8000/privatebase", + CanonicalName: "example.com:8000/privatebase", + Official: false, + }, + "localhost/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost/private/moonbase", + CanonicalName: "localhost/private/moonbase", + Official: false, + }, + "localhost/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost/privatebase", + CanonicalName: "localhost/privatebase", + Official: false, + }, + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "index." + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + "index." + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + } + + for reposName, expectedRepoInfo := range expectedRepoInfos { + named, err := reference.WithName(reposName) + if err != nil { + t.Error(err) + } + + repoInfo, err := ParseRepositoryInfo(named) + if err != nil { + t.Error(err) + } else { + checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) + checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) + checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) + checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) + } + } +} + +func TestNewIndexInfo(t *testing.T) { + testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { + for indexName, expectedIndexInfo := range expectedIndexInfos { + index, err := newIndexInfo(config, indexName) + if err != nil { + t.Fatal(err) + } else { + checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") + checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") + checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") + checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") + } + } + } + + config := newServiceConfig(ServiceOptions{}) + noMirrors := []string{} + expectedIndexInfos := map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} + config = makeServiceConfig(publicMirrors, []string{"example.com"}) + + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) +} + +func TestMirrorEndpointLookup(t *testing.T) { + containsMirror := func(endpoints []APIEndpoint) bool { + for _, pe := range endpoints { + if pe.URL.Host == "my.mirror" { + return true + } + } + return false + } + s := DefaultService{config: makeServiceConfig([]string{"my.mirror"}, nil)} + + imageName, err := reference.WithName(IndexName + "/test/image") + if err != nil { + t.Error(err) + } + pushAPIEndpoints, err := s.LookupPushEndpoints(imageName.Hostname()) + if err != nil { + t.Fatal(err) + } + if containsMirror(pushAPIEndpoints) { + t.Fatal("Push endpoint should not contain mirror") + } + + pullAPIEndpoints, err := s.LookupPullEndpoints(imageName.Hostname()) + if err != nil { + t.Fatal(err) + } + if !containsMirror(pullAPIEndpoints) { + t.Fatal("Pull endpoint should contain mirror") + } +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery", 25) + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestIsSecureIndex(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexName, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, + {"invalid.domain.com", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, + } + for _, tt := range tests { + config := makeServiceConfig(nil, tt.insecureRegistries) + if sec := isSecureIndex(config, tt.addr); sec != tt.expected { + t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} + +type debugTransport struct { + http.RoundTripper + log func(...interface{}) +} + +func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + tr.log("could not dump request") + } + tr.log(string(dump)) + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + dump, err = httputil.DumpResponse(resp, false) + if err != nil { + tr.log("could not dump response") + } + tr.log(string(dump)) + return resp, err +} diff --git a/vendor/github.com/moby/moby/registry/service.go b/vendor/github.com/moby/moby/registry/service.go new file mode 100644 index 0000000..596a9c7 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service.go @@ -0,0 +1,304 @@ +package registry + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +const ( + // DefaultSearchLimit is the default value for maximum number of returned search results. + DefaultSearchLimit = 25 +) + +// Service is the interface defining what a registry service should implement. +type Service interface { + Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) + LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) + LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) + ResolveRepository(name reference.Named) (*RepositoryInfo, error) + Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) + ServiceConfig() *registrytypes.ServiceConfig + TLSConfig(hostname string) (*tls.Config, error) + LoadInsecureRegistries([]string) error +} + +// DefaultService is a registry service. It tracks configuration data such as a list +// of mirrors. +type DefaultService struct { + config *serviceConfig + mu sync.Mutex +} + +// NewService returns a new instance of DefaultService ready to be +// installed into an engine. +func NewService(options ServiceOptions) *DefaultService { + return &DefaultService{ + config: newServiceConfig(options), + } +} + +// ServiceConfig returns the public registry service configuration. +func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { + s.mu.Lock() + defer s.mu.Unlock() + + servConfig := registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), + } + + // construct a new ServiceConfig which will not retrieve s.Config directly, + // and look up items in s.config with mu locked + servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) + + for key, value := range s.config.ServiceConfig.IndexConfigs { + servConfig.IndexConfigs[key] = value + } + + servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) + + return &servConfig +} + +// LoadInsecureRegistries loads insecure registries for Service +func (s *DefaultService) LoadInsecureRegistries(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadInsecureRegistries(registries) +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { + // TODO Use ctx when searching for repositories + serverAddress := authConfig.ServerAddress + if serverAddress == "" { + serverAddress = IndexServer + } + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) + if err != nil { + return "", "", fmt.Errorf("unable to parse server address: %v", err) + } + + endpoints, err := s.LookupPushEndpoints(u.Host) + if err != nil { + return "", "", err + } + + for _, endpoint := range endpoints { + login := loginV2 + if endpoint.Version == APIVersion1 { + login = loginV1 + } + + status, token, err = login(authConfig, endpoint, userAgent) + if err == nil { + return + } + if fErr, ok := err.(fallbackError); ok { + err = fErr.err + logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) + continue + } + return "", "", err + } + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + // TODO Use ctx when searching for repositories + if err := validateNoScheme(term); err != nil { + return nil, err + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.Lock() + index, err := newIndexInfo(s.config, indexName) + s.mu.Unlock() + + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + modifiers := DockerHeaders(userAgent, nil) + v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + if fErr, ok := err.(fallbackError); ok { + logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) + } else { + return nil, err + } + } else if foundV2 { + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } + } + + if client == nil { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + r := newSession(client, authConfig, endpoint) + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName, limit) + } + return r.SearchRepositories(remoteName, limit) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + return newRepositoryInfo(s.config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL *url.URL + Version APIVersion + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { + s.mu.Lock() + defer s.mu.Unlock() + + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +// tlsConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { + return s.tlsConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lookupEndpoints(hostname) +} + +// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + allEndpoints, err := s.lookupEndpoints(hostname) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} + +func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(hostname) + if err != nil { + return nil, err + } + + if s.config.V2Only { + return endpoints, nil + } + + legacyEndpoints, err := s.lookupV1Endpoints(hostname) + if err != nil { + return nil, err + } + endpoints = append(endpoints, legacyEndpoints...) + + return endpoints, nil +} diff --git a/vendor/github.com/moby/moby/registry/service_v1.go b/vendor/github.com/moby/moby/registry/service_v1.go new file mode 100644 index 0000000..1d251ae --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service_v1.go @@ -0,0 +1,40 @@ +package registry + +import "net/url" + +func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { + return []APIEndpoint{}, nil + } + + tlsConfig, err := s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/vendor/github.com/moby/moby/registry/service_v1_test.go b/vendor/github.com/moby/moby/registry/service_v1_test.go new file mode 100644 index 0000000..bd15dff --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service_v1_test.go @@ -0,0 +1,23 @@ +package registry + +import "testing" + +func TestLookupV1Endpoints(t *testing.T) { + s := NewService(ServiceOptions{}) + + cases := []struct { + hostname string + expectedLen int + }{ + {"example.com", 1}, + {DefaultNamespace, 0}, + {DefaultV2Registry.Host, 0}, + {IndexHostname, 0}, + } + + for _, c := range cases { + if ret, err := s.lookupV1Endpoints(c.hostname); err != nil || len(ret) != c.expectedLen { + t.Errorf("lookupV1Endpoints(`"+c.hostname+"`) returned %+v and %+v", ret, err) + } + } +} diff --git a/vendor/github.com/moby/moby/registry/service_v2.go b/vendor/github.com/moby/moby/registry/service_v2.go new file mode 100644 index 0000000..228d745 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service_v2.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/url" + "strings" + + "github.com/docker/go-connections/tlsconfig" +) + +func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + tlsConfig := tlsconfig.ServerDefault() + if hostname == DefaultNamespace || hostname == IndexHostname { + // v2 mirrors + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirrorURL, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + tlsConfig, err = s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion2, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/vendor/github.com/moby/moby/registry/session.go b/vendor/github.com/moby/moby/registry/session.go new file mode 100644 index 0000000..72e286a --- /dev/null +++ b/vendor/github.com/moby/moby/registry/session.go @@ -0,0 +1,783 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/reference" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *V1Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *types.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *types.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { + return &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + + return newSession(client, authConfig, endpoint), nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := int64(-1) + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.ParseInt(hdr, 10, 64) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + statusCode = 0 + res, err = r.client.Do(req) + if err != nil { + logrus.Debugf("Error contacting registry %s: %v", registry, err) + // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debug("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debug("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName()) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if isTimeout(err) { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { + if limit < 1 || limit > 100 { + return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) + } + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(registrytypes.SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +// GetAuthConfig returns the authentication settings for a session +// TODO(tiborvass): remove this once registry client v2 is vendored +func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &types.AuthConfig{ + Username: r.authConfig.Username, + Password: password, + } +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/vendor/github.com/moby/moby/registry/types.go b/vendor/github.com/moby/moby/registry/types.go new file mode 100644 index 0000000..49c123a --- /dev/null +++ b/vendor/github.com/moby/moby/registry/types.go @@ -0,0 +1,73 @@ +package registry + +import ( + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +// RepositoryData tracks the image list, list of endpoints, and list of tokens +// for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string + // Tokens is currently unused (remove it?) + Tokens []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in an HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +// API Version identifiers. +const ( + _ = iota + APIVersion1 APIVersion = iota + APIVersion2 +) + +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + reference.Named + // Index points to registry information + Index *registrytypes.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool + // Class represents the class of the repository, such as "plugin" + // or "image". + Class string +} diff --git a/vendor/github.com/moby/moby/restartmanager/restartmanager.go b/vendor/github.com/moby/moby/restartmanager/restartmanager.go new file mode 100644 index 0000000..570fc93 --- /dev/null +++ b/vendor/github.com/moby/moby/restartmanager/restartmanager.go @@ -0,0 +1,128 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + restartCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartmanager based on a policy. +func New(policy container.RestartPolicy, restartCount int) RestartManager { + return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on active restartmanager") + } + // if the container ran for more than 10s, regardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + if rm.timeout == 0 { + rm.timeout = defaultTimeout + } else { + rm.timeout *= backoffMultiplier + } + + var restart bool + switch { + case rm.policy.IsAlways(): + restart = true + case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + rm.restartCount++ + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/moby/moby/restartmanager/restartmanager_test.go b/vendor/github.com/moby/moby/restartmanager/restartmanager_test.go new file mode 100644 index 0000000..20eced5 --- /dev/null +++ b/vendor/github.com/moby/moby/restartmanager/restartmanager_test.go @@ -0,0 +1,34 @@ +package restartmanager + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types/container" +) + +func TestRestartManagerTimeout(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + should, _, err := rm.ShouldRestart(0, false, 1*time.Second) + if err != nil { + t.Fatal(err) + } + if !should { + t.Fatal("container should be restarted") + } + if rm.timeout != 100*time.Millisecond { + t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + } +} + +func TestRestartManagerTimeoutReset(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + rm.timeout = 5 * time.Second + _, _, err := rm.ShouldRestart(0, false, 10*time.Second) + if err != nil { + t.Fatal(err) + } + if rm.timeout != 100*time.Millisecond { + t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + } +} diff --git a/vendor/github.com/moby/moby/runconfig/compare.go b/vendor/github.com/moby/moby/runconfig/compare.go new file mode 100644 index 0000000..708922f --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/compare.go @@ -0,0 +1,61 @@ +package runconfig + +import "github.com/docker/docker/api/types/container" + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *container.Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/runconfig/compare_test.go b/vendor/github.com/moby/moby/runconfig/compare_test.go new file mode 100644 index 0000000..6370d7a --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/compare_test.go @@ -0,0 +1,126 @@ +package runconfig + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestCompare(t *testing.T) { + ports1 := make(nat.PortSet) + ports1[newPortNoError("tcp", "1111")] = struct{}{} + ports1[newPortNoError("tcp", "2222")] = struct{}{} + ports2 := make(nat.PortSet) + ports2[newPortNoError("tcp", "3333")] = struct{}{} + ports2[newPortNoError("tcp", "4444")] = struct{}{} + ports3 := make(nat.PortSet) + ports3[newPortNoError("tcp", "1111")] = struct{}{} + ports3[newPortNoError("tcp", "2222")] = struct{}{} + ports3[newPortNoError("tcp", "5555")] = struct{}{} + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + volumes3 := make(map[string]struct{}) + volumes3["/test1"] = struct{}{} + volumes3["/test3"] = struct{}{} + envs1 := []string{"ENV1=value1", "ENV2=value2"} + envs2 := []string{"ENV1=value1", "ENV3=value3"} + entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"} + entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"} + entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + cmd1 := strslice.StrSlice{"/bin/sh", "-c"} + cmd2 := strslice.StrSlice{"/bin/sh", "-d"} + cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} + labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} + labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} + + sameConfigs := map[*container.Config]*container.Config{ + // Empty config + &container.Config{}: {}, + // Does not compare hostname, domainname & image + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user", + }: { + Hostname: "host2", + Domainname: "domain2", + Image: "image2", + User: "user", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs1}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd1}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels1}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes1}, + } + differentConfigs := map[*container.Config]*container.Config{ + nil: nil, + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user1", + }: { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user2", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: true}, + &container.Config{OpenStdin: true}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs2}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd2}, + // not the same number of parts + &container.Config{Cmd: cmd1}: {Cmd: cmd3}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels2}, + // not the same number of labels + &container.Config{Labels: labels1}: {Labels: labels3}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + // not the same number of ports + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + // not the same number of parts + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes2}, + // not the same number of labels + &container.Config{Volumes: volumes1}: {Volumes: volumes3}, + } + for config1, config2 := range sameConfigs { + if !Compare(config1, config2) { + t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) + } + } + for config1, config2 := range differentConfigs { + if Compare(config1, config2) { + t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) + } + } +} diff --git a/vendor/github.com/moby/moby/runconfig/config.go b/vendor/github.com/moby/moby/runconfig/config.go new file mode 100644 index 0000000..508681c --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config.go @@ -0,0 +1,97 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/volume" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return DecodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return DecodeHostConfig(src) +} + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateMountSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := ValidateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := ValidateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := ValidateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := ValidateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateMountSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateMountSettings(c *container.Config, hc *container.HostConfig) error { + // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/config_test.go b/vendor/github.com/moby/moby/runconfig/config_test.go new file mode 100644 index 0000000..f1f9de5 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config_test.go @@ -0,0 +1,139 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" +) + +type f struct { + file string + entrypoint strslice.StrSlice +} + +func TestDecodeContainerConfig(t *testing.T) { + + var ( + fixtures []f + image string + ) + + //TODO: Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + + if runtime.GOOS != "windows" { + image = "ubuntu" + fixtures = []f{ + {"fixtures/unix/container_config_1_14.json", strslice.StrSlice{}}, + {"fixtures/unix/container_config_1_17.json", strslice.StrSlice{"bash"}}, + {"fixtures/unix/container_config_1_19.json", strslice.StrSlice{"bash"}}, + } + } else { + image = "windows" + fixtures = []f{ + {"fixtures/windows/container_config_1_19.json", strslice.StrSlice{"cmd"}}, + } + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != image { + t.Fatalf("Expected %s image, found %s\n", image, c.Image) + } + + if len(c.Entrypoint) != len(f.entrypoint) { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h != nil && h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } +} + +// TestDecodeContainerConfigIsolation validates isolation passed +// to the daemon in the hostConfig structure. Note this is platform specific +// as to what level of container isolation is supported. +func TestDecodeContainerConfigIsolation(t *testing.T) { + + // An invalid isolation level + if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { + t.Fatal(err) + } + } + + // Blank isolation (== default) + if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { + t.Fatal("Blank isolation should have succeeded") + } + + // Default isolation + if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { + t.Fatal("default isolation should have succeeded") + } + + // Process isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + t.Fatal("process isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "process"`) { + t.Fatal(err) + } + } + } + + // Hyper-V Containers isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + t.Fatal("hyperv isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { + t.Fatal(err) + } + } + } +} + +// callDecodeContainerConfigIsolation is a utility function to call +// DecodeContainerConfig for validating isolation +func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + b []byte + err error + ) + w := ContainerConfigWrapper{ + Config: &container.Config{}, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Isolation: container.Isolation(isolation)}, + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + return DecodeContainerConfig(bytes.NewReader(b)) +} diff --git a/vendor/github.com/moby/moby/runconfig/config_unix.go b/vendor/github.com/moby/moby/runconfig/config_unix.go new file mode 100644 index 0000000..4ccfc73 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + hc = SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/moby/moby/runconfig/config_windows.go b/vendor/github.com/moby/moby/runconfig/config_windows.go new file mode 100644 index 0000000..f2361b5 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/vendor/github.com/moby/moby/runconfig/errors.go b/vendor/github.com/moby/moby/runconfig/errors.go new file mode 100644 index 0000000..bb72c16 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/errors.go @@ -0,0 +1,46 @@ +package runconfig + +import ( + "fmt" + + "github.com/docker/docker/api/errors" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links + ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") +) + +func conflictError(err error) error { + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json new file mode 100644 index 0000000..b08334c --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json new file mode 100644 index 0000000..0d78087 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json @@ -0,0 +1,50 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json new file mode 100644 index 0000000..de49cf3 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json new file mode 100644 index 0000000..c72ac91 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json @@ -0,0 +1,18 @@ +{ + "Binds": ["/tmp:/tmp"], + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json new file mode 100644 index 0000000..5ca8aa7 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json @@ -0,0 +1,30 @@ +{ + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json b/vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json new file mode 100644 index 0000000..724320c --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "cmd", + "Image": "windows", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "c:/windows": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["c:/windows:d:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "default", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig.go b/vendor/github.com/moby/moby/runconfig/hostconfig.go new file mode 100644 index 0000000..2b81d02 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig.go @@ -0,0 +1,35 @@ +package runconfig + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } + return hc +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go b/vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go new file mode 100644 index 0000000..83ad32e --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go @@ -0,0 +1,41 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + return nil +} + +// ValidateIsolation performs platform specific validation of the +// isolation level in the hostconfig structure. +// This setting is currently discarded for Solaris so this is a no-op. +func ValidateIsolation(hc *container.HostConfig) error { + return nil +} + +// ValidateQoS performs platform specific validation of the QoS settings +func ValidateQoS(hc *container.HostConfig) error { + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_test.go b/vendor/github.com/moby/moby/runconfig/hostconfig_test.go new file mode 100644 index 0000000..a6a2b34 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package runconfig + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// TODO Windows: This will need addressing for a Windows daemon. +func TestNetworkModeTest(t *testing.T) { + networkModes := map[container.NetworkMode][]bool{ + // private, bridge, host, container, none, default + "": {true, false, false, false, false, false}, + "something:weird": {true, false, false, false, false, false}, + "bridge": {true, true, false, false, false, false}, + DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, + "host": {false, false, true, false, false, false}, + "container:name": {false, false, false, true, false, false}, + "none": {true, false, false, false, true, false}, + "default": {true, false, false, false, false, true}, + } + networkModeNames := map[container.NetworkMode]string{ + "": "", + "something:weird": "something:weird", + "bridge": "bridge", + DefaultDaemonNetworkMode(): "bridge", + "host": "host", + "container:name": "container", + "none": "none", + "default": "default", + } + for networkMode, state := range networkModes { + if networkMode.IsPrivate() != state[0] { + t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) + } + if networkMode.IsBridge() != state[1] { + t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) + } + if networkMode.IsHost() != state[2] { + t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) + } + if networkMode.IsContainer() != state[3] { + t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) + } + if networkMode.IsNone() != state[4] { + t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) + } + if networkMode.IsDefault() != state[5] { + t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) + } + if networkMode.NetworkName() != networkModeNames[networkMode] { + t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) + } + } +} + +func TestIpcModeTest(t *testing.T) { + ipcModes := map[container.IpcMode][]bool{ + // private, host, container, valid + "": {true, false, false, true}, + "something:weird": {true, false, false, false}, + ":weird": {true, false, false, true}, + "host": {false, true, false, true}, + "container:name": {false, false, true, true}, + "container:name:something": {false, false, true, false}, + "container:": {false, false, true, false}, + } + for ipcMode, state := range ipcModes { + if ipcMode.IsPrivate() != state[0] { + t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) + } + if ipcMode.IsHost() != state[1] { + t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) + } + if ipcMode.IsContainer() != state[2] { + t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) + } + if ipcMode.Valid() != state[3] { + t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) + } + } + containerIpcModes := map[container.IpcMode]string{ + "": "", + "something": "", + "something:weird": "weird", + "container": "", + "container:": "", + "container:name": "name", + "container:name1:name2": "name1:name2", + } + for ipcMode, container := range containerIpcModes { + if ipcMode.Container() != container { + t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) + } + } +} + +func TestUTSModeTest(t *testing.T) { + utsModes := map[container.UTSMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for utsMode, state := range utsModes { + if utsMode.IsPrivate() != state[0] { + t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) + } + if utsMode.IsHost() != state[1] { + t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) + } + if utsMode.Valid() != state[2] { + t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) + } + } +} + +func TestUsernsModeTest(t *testing.T) { + usrensMode := map[container.UsernsMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for usernsMode, state := range usrensMode { + if usernsMode.IsPrivate() != state[0] { + t.Fatalf("UsernsMode.IsPrivate for %v should have been %v but was %v", usernsMode, state[0], usernsMode.IsPrivate()) + } + if usernsMode.IsHost() != state[1] { + t.Fatalf("UsernsMode.IsHost for %v should have been %v but was %v", usernsMode, state[1], usernsMode.IsHost()) + } + if usernsMode.Valid() != state[2] { + t.Fatalf("UsernsMode.Valid for %v should have been %v but was %v", usernsMode, state[2], usernsMode.Valid()) + } + } +} + +func TestPidModeTest(t *testing.T) { + pidModes := map[container.PidMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for pidMode, state := range pidModes { + if pidMode.IsPrivate() != state[0] { + t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) + } + if pidMode.IsHost() != state[1] { + t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) + } + if pidMode.Valid() != state[2] { + t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) + } + } +} + +func TestRestartPolicy(t *testing.T) { + restartPolicies := map[container.RestartPolicy][]bool{ + // none, always, failure + container.RestartPolicy{}: {true, false, false}, + container.RestartPolicy{"something", 0}: {false, false, false}, + container.RestartPolicy{"no", 0}: {true, false, false}, + container.RestartPolicy{"always", 0}: {false, true, false}, + container.RestartPolicy{"on-failure", 0}: {false, false, true}, + } + for restartPolicy, state := range restartPolicies { + if restartPolicy.IsNone() != state[0] { + t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) + } + if restartPolicy.IsAlways() != state[1] { + t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) + } + if restartPolicy.IsOnFailure() != state[2] { + t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) + } + } +} +func TestDecodeHostConfig(t *testing.T) { + fixtures := []struct { + file string + }{ + {"fixtures/unix/container_hostconfig_1_14.json"}, + {"fixtures/unix/container_hostconfig_1_19.json"}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, err := DecodeHostConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Privileged != false { + t.Fatalf("Expected privileged false, found %v\n", c.Privileged) + } + + if l := len(c.Binds); l != 1 { + t.Fatalf("Expected 1 bind, found %d\n", l) + } + + if len(c.CapAdd) != 1 && c.CapAdd[0] != "NET_ADMIN" { + t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) + } + + if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { + t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) + } + } +} + +func TestValidateResources(t *testing.T) { + type resourceTest struct { + ConfigCPURealtimePeriod int64 + ConfigCPURealtimeRuntime int64 + SysInfoCPURealtimePeriod bool + SysInfoCPURealtimeRuntime bool + ErrorExpected bool + FailureMsg string + } + + tests := []resourceTest{ + { + ConfigCPURealtimePeriod: 1000, + ConfigCPURealtimeRuntime: 1000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: false, + FailureMsg: "Expected valid configuration", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: false, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-period is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 10000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is greater than cpu-rt-period", + }, + } + + for _, rt := range tests { + var hc container.HostConfig + hc.Resources.CPURealtimePeriod = rt.ConfigCPURealtimePeriod + hc.Resources.CPURealtimeRuntime = rt.ConfigCPURealtimeRuntime + + var si sysinfo.SysInfo + si.CPURealtimePeriod = rt.SysInfoCPURealtimePeriod + si.CPURealtimeRuntime = rt.SysInfoCPURealtimeRuntime + + if err := ValidateResources(&hc, &si); (err != nil) != rt.ErrorExpected { + t.Fatal(rt.FailureMsg, err) + } + } +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_unix.go b/vendor/github.com/moby/moby/runconfig/hostconfig_unix.go new file mode 100644 index 0000000..6e2b7f5 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_unix.go @@ -0,0 +1,129 @@ +// +build !windows,!solaris + +package runconfig + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress" +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("--net: invalid net mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} + +// ValidateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// ValidateQoS performs platform specific validation of the QoS settings +func ValidateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) + } + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-period: Your kernel does not support cgroup rt period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("invalid --cpu-rt-runtime: Your kernel does not support cgroup rt runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-runtime: rt runtime cannot be higher than rt period") + } + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_windows.go b/vendor/github.com/moby/moby/runconfig/hostconfig_windows.go new file mode 100644 index 0000000..91bd6dc --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_windows.go @@ -0,0 +1,68 @@ +package runconfig + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if len(parts) > 1 { + return fmt.Errorf("invalid --net: %s", hc.NetworkMode) + } + return nil +} + +// ValidateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// ValidateQoS performs platform specific validation of the Qos settings +func ValidateQoS(hc *container.HostConfig) error { + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("invalid --cpu-rt-period: Windows does not support this feature") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("invalid --cpu-rt-runtime: Windows does not support this feature") + } + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/envfile.go b/vendor/github.com/moby/moby/runconfig/opts/envfile.go new file mode 100644 index 0000000..f723799 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/envfile.go @@ -0,0 +1,81 @@ +package opts + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + "unicode" + "unicode/utf8" +) + +// ParseEnvFile reads a file with environment variables enumerated by lines +// +// ``Environment variable names used by the utilities in the Shell and +// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase +// letters, digits, and the '_' (underscore) from the characters defined in +// Portable Character Set and do not begin with a digit. *But*, other +// characters may be permitted by an implementation; applications shall +// tolerate the presence of such names.'' +// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html +// +// As of #16585, it's up to application inside docker to validate or not +// environment variables, that's why we just strip leading whitespace and +// nothing more. +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + if !utf8.Valid(scannedBytes) { + return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) + } + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + // trim the line from all leading whitespace first + line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + currentLine++ + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} + } + + if len(data) > 1 { + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, scanner.Err() +} + +var whiteSpaces = " \t" + +// ErrBadEnvVariable typed error for bad environment variable +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/envfile_test.go b/vendor/github.com/moby/moby/runconfig/opts/envfile_test.go new file mode 100644 index 0000000..5dd7078 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/envfile_test.go @@ -0,0 +1,142 @@ +package opts + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func tmpFileWithContent(content string, t *testing.T) string { + tmpFile, err := ioutil.TempFile("", "envfile-test") + if err != nil { + t.Fatal(err) + } + defer tmpFile.Close() + + tmpFile.WriteString(content) + return tmpFile.Name() +} + +// Test ParseEnvFile for a file with a few well formatted lines +func TestParseEnvFileGoodFile(t *testing.T) { + content := `foo=bar + baz=quux +# comment + +_foobar=foobaz +with.dots=working +and_underscore=working too +` + // Adding a newline + a line with pure whitespace. + // This is being done like this instead of the block above + // because it's common for editors to trim trailing whitespace + // from lines, which becomes annoying since that's the + // exact thing we need to test. + content += "\n \t " + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + expectedLines := []string{ + "foo=bar", + "baz=quux", + "_foobar=foobaz", + "with.dots=working", + "and_underscore=working too", + } + + if !reflect.DeepEqual(lines, expectedLines) { + t.Fatal("lines not equal to expected_lines") + } +} + +// Test ParseEnvFile for an empty file +func TestParseEnvFileEmptyFile(t *testing.T) { + tmpFile := tmpFileWithContent("", t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if len(lines) != 0 { + t.Fatal("lines not empty; expected empty") + } +} + +// Test ParseEnvFile for a non existent file +func TestParseEnvFileNonExistentFile(t *testing.T) { + _, err := ParseEnvFile("foo_bar_baz") + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("Expected a PathError, got [%v]", err) + } +} + +// Test ParseEnvFile for a badly formatted file +func TestParseEnvFileBadlyFormattedFile(t *testing.T) { + content := `foo=bar + f =quux +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatalf("Expected an ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} + +// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize +func TestParseEnvFileLineTooLongFile(t *testing.T) { + content := strings.Repeat("a", bufio.MaxScanTokenSize+42) + content = fmt.Sprint("foo=", content) + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } +} + +// ParseEnvFile with a random file, pass through +func TestParseEnvFileRandomFile(t *testing.T) { + content := `first line +another invalid line` + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + + if err == nil { + t.Fatalf("Expected an ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected an ErrBadEnvvariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/fixtures/utf16.env b/vendor/github.com/moby/moby/runconfig/opts/fixtures/utf16.env new file mode 100755 index 0000000000000000000000000000000000000000..3a73358fffbc0d5d3d4df985ccf2f4a1a29cdb2a GIT binary patch literal 54 ucmezW&yB$!2yGdh7#tab7 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// ValidateMACAddress validates a MAC address. +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/opts_test.go b/vendor/github.com/moby/moby/runconfig/opts/opts_test.go new file mode 100644 index 0000000..43f8730 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/opts_test.go @@ -0,0 +1,113 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "strings" + "testing" +) + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := ValidateAttach("invalid"); err == nil { + t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := ValidateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func TestValidateEnv(t *testing.T) { + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + "asd!qwe": "asd!qwe", + "1asd": "1asd", + "123": "123", + "some space": "some space", + " some space before": " some space before", + "some space after ": "some space after ", + } + // Environment variables are case in-sensitive on Windows + if runtime.GOOS == "windows" { + valids["PaTh"] = fmt.Sprintf("PaTh=%v", os.Getenv("PATH")) + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} + +func TestValidateMACAddress(t *testing.T) { + if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) + } + + if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") + } + + if _, err := ValidateMACAddress(`random invalid string`); err == nil { + t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") + } +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/parse.go b/vendor/github.com/moby/moby/runconfig/opts/parse.go new file mode 100644 index 0000000..3a0da79 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/parse.go @@ -0,0 +1,999 @@ +package opts + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// ContainerOptions is a data object with all the options for creating a container +type ContainerOptions struct { + attach opts.ListOpts + volumes opts.ListOpts + tmpfs opts.ListOpts + blkioWeightDevice WeightdeviceOpt + deviceReadBps ThrottledeviceOpt + deviceWriteBps ThrottledeviceOpt + links opts.ListOpts + aliases opts.ListOpts + linkLocalIPs opts.ListOpts + deviceReadIOps ThrottledeviceOpt + deviceWriteIOps ThrottledeviceOpt + env opts.ListOpts + labels opts.ListOpts + devices opts.ListOpts + ulimits *UlimitOpt + sysctls *opts.MapOpts + publish opts.ListOpts + expose opts.ListOpts + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOptions opts.ListOpts + extraHosts opts.ListOpts + volumesFrom opts.ListOpts + envFile opts.ListOpts + capAdd opts.ListOpts + capDrop opts.ListOpts + groupAdd opts.ListOpts + securityOpt opts.ListOpts + storageOpt opts.ListOpts + labelsFile opts.ListOpts + loggingOpts opts.ListOpts + privileged bool + pidMode string + utsMode string + usernsMode string + publishAll bool + stdin bool + tty bool + oomKillDisable bool + oomScoreAdj int + containerIDFile string + entrypoint string + hostname string + memoryString string + memoryReservation string + memorySwap string + kernelMemory string + user string + workingDir string + cpuCount int64 + cpuShares int64 + cpuPercent int64 + cpuPeriod int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpuQuota int64 + cpus opts.NanoCPUs + cpusetCpus string + cpusetMems string + blkioWeight uint16 + ioMaxBandwidth string + ioMaxIOps uint64 + swappiness int64 + netMode string + macAddress string + ipv4Address string + ipv6Address string + ipcMode string + pidsLimit int64 + restartPolicy string + readonlyRootfs bool + loggingDriver string + cgroupParent string + volumeDriver string + stopSignal string + stopTimeout int + isolation string + shmSize string + noHealthcheck bool + healthCmd string + healthInterval time.Duration + healthTimeout time.Duration + healthRetries int + runtime string + autoRemove bool + init bool + initPath string + credentialSpec string + + Image string + Args []string +} + +// AddFlags adds all command line flags that will be used by Parse to the FlagSet +func AddFlags(flags *pflag.FlagSet) *ContainerOptions { + copts := &ContainerOptions{ + aliases: opts.NewListOpts(nil), + attach: opts.NewListOpts(ValidateAttach), + blkioWeightDevice: NewWeightdeviceOpt(ValidateWeightDevice), + capAdd: opts.NewListOpts(nil), + capDrop: opts.NewListOpts(nil), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOptions: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + deviceReadBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), + deviceReadIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), + deviceWriteBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), + deviceWriteIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), + devices: opts.NewListOpts(ValidateDevice), + env: opts.NewListOpts(ValidateEnv), + envFile: opts.NewListOpts(nil), + expose: opts.NewListOpts(nil), + extraHosts: opts.NewListOpts(ValidateExtraHost), + groupAdd: opts.NewListOpts(nil), + labels: opts.NewListOpts(ValidateEnv), + labelsFile: opts.NewListOpts(nil), + linkLocalIPs: opts.NewListOpts(nil), + links: opts.NewListOpts(ValidateLink), + loggingOpts: opts.NewListOpts(nil), + publish: opts.NewListOpts(nil), + securityOpt: opts.NewListOpts(nil), + storageOpt: opts.NewListOpts(nil), + sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), + tmpfs: opts.NewListOpts(nil), + ulimits: NewUlimitOpt(nil), + volumes: opts.NewListOpts(nil), + volumesFrom: opts.NewListOpts(nil), + } + + // General purpose flags + flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") + flags.Var(&copts.devices, "device", "Add a host device to the container") + flags.VarP(&copts.env, "env", "e", "Set environment variables") + flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") + flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") + flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") + flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") + flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") + flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") + flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") + flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") + flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) + flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") + flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) + flags.Var(copts.sysctls, "sysctl", "Sysctl options") + flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.Var(copts.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") + flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") + flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") + + // Security + flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") + flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") + flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") + flags.Var(&copts.securityOpt, "security-opt", "Security Options") + flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") + flags.StringVar(&copts.credentialSpec, "credentialspec", "", "Credential spec for managed service account (Windows only)") + + // Network and port publishing flag + flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.Var(&copts.dns, "dns", "Set custom DNS servers") + // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. + // This is to be consistent with service create/update + flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") + flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") + flags.MarkHidden("dns-opt") + flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") + flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") + flags.StringVar(&copts.ipv4Address, "ip", "", "Container IPv4 address (e.g. 172.30.100.104)") + flags.StringVar(&copts.ipv6Address, "ip6", "", "Container IPv6 address (e.g. 2001:db8::33)") + flags.Var(&copts.links, "link", "Add link to another container") + flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") + flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") + flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") + // We allow for both "--net" and "--network", although the latter is the recommended way. + flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") + flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") + flags.MarkHidden("net") + // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. + flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") + flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") + flags.MarkHidden("net-alias") + + // Logging and storage + flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") + flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") + flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") + flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") + flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") + flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") + flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") + + // Health-checking + flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") + flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ns|us|ms|s|m|h) (default 0s)") + flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") + flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)") + flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") + + // Resource management + flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") + flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") + flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") + flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") + flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") + flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") + flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Var(&copts.cpus, "cpus", "Number of CPUs") + flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") + flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") + flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") + flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") + flags.StringVar(&copts.ioMaxBandwidth, "io-maxbandwidth", "", "Maximum IO bandwidth limit for the system drive (Windows only)") + flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") + flags.StringVar(&copts.kernelMemory, "kernel-memory", "", "Kernel memory limit") + flags.StringVarP(&copts.memoryString, "memory", "m", "", "Memory limit") + flags.StringVar(&copts.memoryReservation, "memory-reservation", "", "Memory soft limit") + flags.StringVar(&copts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") + flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") + flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") + flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") + + // Low-level execution (cgroups, namespaces, ...) + flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use") + flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") + flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") + flags.StringVar(&copts.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") + flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") + flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") + + flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") + flags.StringVar(&copts.initPath, "init-path", "", "Path to the docker-init binary") + return copts +} + +// Parse parses the args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. +func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + attachStdin = copts.attach.Get("stdin") + attachStdout = copts.attach.Get("stdout") + attachStderr = copts.attach.Get("stderr") + ) + + // Validate the input mac address + if copts.macAddress != "" { + if _, err := ValidateMACAddress(copts.macAddress); err != nil { + return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.macAddress) + } + } + if copts.stdin { + attachStdin = true + } + // If -a is not set, attach to stdout and stderr + if copts.attach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var err error + + var memory int64 + if copts.memoryString != "" { + memory, err = units.RAMInBytes(copts.memoryString) + if err != nil { + return nil, nil, nil, err + } + } + + var memoryReservation int64 + if copts.memoryReservation != "" { + memoryReservation, err = units.RAMInBytes(copts.memoryReservation) + if err != nil { + return nil, nil, nil, err + } + } + + var memorySwap int64 + if copts.memorySwap != "" { + if copts.memorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(copts.memorySwap) + if err != nil { + return nil, nil, nil, err + } + } + } + + var kernelMemory int64 + if copts.kernelMemory != "" { + kernelMemory, err = units.RAMInBytes(copts.kernelMemory) + if err != nil { + return nil, nil, nil, err + } + } + + swappiness := copts.swappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + var shmSize int64 + if copts.shmSize != "" { + shmSize, err = units.RAMInBytes(copts.shmSize) + if err != nil { + return nil, nil, nil, err + } + } + + // TODO FIXME units.RAMInBytes should have a uint64 version + var maxIOBandwidth int64 + if copts.ioMaxBandwidth != "" { + maxIOBandwidth, err = units.RAMInBytes(copts.ioMaxBandwidth) + if err != nil { + return nil, nil, nil, err + } + if maxIOBandwidth < 0 { + return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.ioMaxBandwidth) + } + } + + var binds []string + volumes := copts.volumes.GetMap() + // add any bind targets to the list of container volumes + for bind := range copts.volumes.GetMap() { + if arr := volumeSplitN(bind, 2); len(arr) > 1 { + // after creating the bind mount we want to delete it from the copts.volumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if + // there are duplicates entries. + delete(volumes, bind) + } + } + + // Can't evaluate options passed into --tmpfs until we actually mount + tmpfs := make(map[string]string) + for _, t := range copts.tmpfs.GetAll() { + if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { + tmpfs[arr[0]] = arr[1] + } else { + tmpfs[arr[0]] = "" + } + } + + var ( + runCmd strslice.StrSlice + entrypoint strslice.StrSlice + ) + + if len(copts.Args) > 0 { + runCmd = strslice.StrSlice(copts.Args) + } + + if copts.entrypoint != "" { + entrypoint = strslice.StrSlice{copts.entrypoint} + } else if flags.Changed("entrypoint") { + // if `--entrypoint=` is parsed then Entrypoint is reset + entrypoint = []string{""} + } + + ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range copts.expose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, nil, nil, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // parse device mappings + deviceMappings := []container.DeviceMapping{} + for _, device := range copts.devices.GetAll() { + deviceMapping, err := ParseDevice(device) + if err != nil { + return nil, nil, nil, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // collect all the labels for the container + labels, err := ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + ipcMode := container.IpcMode(copts.ipcMode) + if !ipcMode.Valid() { + return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode") + } + + pidMode := container.PidMode(copts.pidMode) + if !pidMode.Valid() { + return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode") + } + + utsMode := container.UTSMode(copts.utsMode) + if !utsMode.Valid() { + return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode") + } + + usernsMode := container.UsernsMode(copts.usernsMode) + if !usernsMode.Valid() { + return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode") + } + + restartPolicy, err := ParseRestartPolicy(copts.restartPolicy) + if err != nil { + return nil, nil, nil, err + } + + loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // Healthcheck + var healthConfig *container.HealthConfig + haveHealthSettings := copts.healthCmd != "" || + copts.healthInterval != 0 || + copts.healthTimeout != 0 || + copts.healthRetries != 0 + if copts.noHealthcheck { + if haveHealthSettings { + return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options") + } + test := strslice.StrSlice{"NONE"} + healthConfig = &container.HealthConfig{Test: test} + } else if haveHealthSettings { + var probe strslice.StrSlice + if copts.healthCmd != "" { + args := []string{"CMD-SHELL", copts.healthCmd} + probe = strslice.StrSlice(args) + } + if copts.healthInterval < 0 { + return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative") + } + if copts.healthTimeout < 0 { + return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative") + } + + healthConfig = &container.HealthConfig{ + Test: probe, + Interval: copts.healthInterval, + Timeout: copts.healthTimeout, + Retries: copts.healthRetries, + } + } + + resources := container.Resources{ + CgroupParent: copts.cgroupParent, + Memory: memory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + MemorySwappiness: &copts.swappiness, + KernelMemory: kernelMemory, + OomKillDisable: &copts.oomKillDisable, + NanoCPUs: copts.cpus.Value(), + CPUCount: copts.cpuCount, + CPUPercent: copts.cpuPercent, + CPUShares: copts.cpuShares, + CPUPeriod: copts.cpuPeriod, + CpusetCpus: copts.cpusetCpus, + CpusetMems: copts.cpusetMems, + CPUQuota: copts.cpuQuota, + CPURealtimePeriod: copts.cpuRealtimePeriod, + CPURealtimeRuntime: copts.cpuRealtimeRuntime, + PidsLimit: copts.pidsLimit, + BlkioWeight: copts.blkioWeight, + BlkioWeightDevice: copts.blkioWeightDevice.GetList(), + BlkioDeviceReadBps: copts.deviceReadBps.GetList(), + BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), + BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), + BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), + IOMaximumIOps: copts.ioMaxIOps, + IOMaximumBandwidth: uint64(maxIOBandwidth), + Ulimits: copts.ulimits.GetList(), + Devices: deviceMappings, + } + + config := &container.Config{ + Hostname: copts.hostname, + ExposedPorts: ports, + User: copts.user, + Tty: copts.tty, + // TODO: deprecated, it comes from -n, --networking + // it's still needed internally to set the network to disabled + // if e.g. bridge is none in daemon opts, and in inspect + NetworkDisabled: false, + OpenStdin: copts.stdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: copts.Image, + Volumes: volumes, + MacAddress: copts.macAddress, + Entrypoint: entrypoint, + WorkingDir: copts.workingDir, + Labels: ConvertKVStringsToMap(labels), + Healthcheck: healthConfig, + } + if flags.Changed("stop-signal") { + config.StopSignal = copts.stopSignal + } + if flags.Changed("stop-timeout") { + config.StopTimeout = &copts.stopTimeout + } + + hostConfig := &container.HostConfig{ + Binds: binds, + ContainerIDFile: copts.containerIDFile, + OomScoreAdj: copts.oomScoreAdj, + AutoRemove: copts.autoRemove, + Privileged: copts.privileged, + PortBindings: portBindings, + Links: copts.links.GetAll(), + PublishAllPorts: copts.publishAll, + // Make sure the dns fields are never nil. + // New containers don't ever have those fields nil, + // but pre created containers can still have those nil values. + // See https://github.com/docker/docker/pull/17779 + // for a more detailed explanation on why we don't want that. + DNS: copts.dns.GetAllOrEmpty(), + DNSSearch: copts.dnsSearch.GetAllOrEmpty(), + DNSOptions: copts.dnsOptions.GetAllOrEmpty(), + ExtraHosts: copts.extraHosts.GetAll(), + VolumesFrom: copts.volumesFrom.GetAll(), + NetworkMode: container.NetworkMode(copts.netMode), + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + UsernsMode: usernsMode, + CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), + CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), + GroupAdd: copts.groupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: securityOpts, + StorageOpt: storageOpts, + ReadonlyRootfs: copts.readonlyRootfs, + LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, + VolumeDriver: copts.volumeDriver, + Isolation: container.Isolation(copts.isolation), + ShmSize: shmSize, + Resources: resources, + Tmpfs: tmpfs, + Sysctls: copts.sysctls.GetAll(), + Runtime: copts.runtime, + } + + if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, nil, nil, fmt.Errorf("Conflicting options: --restart and --rm") + } + + // only set this value if the user provided the flag, else it should default to nil + if flags.Changed("init") { + hostConfig.Init = &copts.init + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + + networkingConfig := &networktypes.NetworkingConfig{ + EndpointsConfig: make(map[string]*networktypes.EndpointSettings), + } + + if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { + epConfig := &networktypes.EndpointSettings{} + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + + epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: copts.ipv4Address, + IPv6Address: copts.ipv6Address, + } + + if copts.linkLocalIPs.Len() > 0 { + epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) + copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) + } + } + + if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Links = make([]string, len(hostConfig.Links)) + copy(epConfig.Links, hostConfig.Links) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + if copts.aliases.Len() > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Aliases = make([]string, copts.aliases.Len()) + copy(epConfig.Aliases, copts.aliases.GetAll()) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + return config, hostConfig, networkingConfig, nil +} + +// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys +// present in the file with additional pairs specified in the override parameter +func ReadKVStrings(files []string, override []string) ([]string, error) { + envVariables := []string{} + for _, ef := range files { + parsedVars, err := ParseEnvFile(ef) + if err != nil { + return nil, err + } + envVariables = append(envVariables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, override...) + + return envVariables, nil +} + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} + +// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} +// but set unset keys to nil - meaning the ones with no "=" in them. +// We use this in cases where we need to distinguish between +// FOO= and FOO +// where the latter case just means FOO was mentioned but not given a value +func ConvertKVStringsToMapWithNil(values []string) map[string]*string { + result := make(map[string]*string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = nil + } else { + result[kv[0]] = &kv[1] + } + } + + return result +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := ConvertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// takes a local seccomp daemon, reads the file contents for sending to the daemon +func parseSecurityOpts(securityOpts []string) ([]string, error) { + for key, opt := range securityOpts { + con := strings.SplitN(opt, "=", 2) + if len(con) == 1 && con[0] != "no-new-privileges" { + if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + } else { + return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) + } + } + if con[0] == "seccomp" && con[1] != "unconfined" { + f, err := ioutil.ReadFile(con[1]) + if err != nil { + return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) + } + b := bytes.NewBuffer(nil) + if err := json.Compact(b, f); err != nil { + return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) + } + securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) + } + } + + return securityOpts, nil +} + +// parses storage options per container into a map +func parseStorageOpts(storageOpts []string) (map[string]string, error) { + m := make(map[string]string) + for _, option := range storageOpts { + if strings.Contains(option, "=") { + opt := strings.SplitN(option, "=", 2) + m[opt[0]] = opt[1] + } else { + return nil, fmt.Errorf("invalid storage option") + } + } + return m, nil +} + +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { + p := container.RestartPolicy{} + + if policy == "" { + return p, nil + } + + parts := strings.Split(policy, ":") + + if len(parts) > 2 { + return p, fmt.Errorf("invalid restart policy format") + } + if len(parts) == 2 { + count, err := strconv.Atoi(parts[1]) + if err != nil { + return p, fmt.Errorf("maximum retry count must be an integer") + } + + p.MaximumRetryCount = count + } + + p.Name = parts[0] + + return p, nil +} + +// ParseDevice parses a device mapping string to a container.DeviceMapping struct +func ParseDevice(device string) (container.DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + if ValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := container.DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// ValidateLink validates that the specified string has a valid link format (containerName:alias). +func ValidateLink(val string) (string, error) { + if _, _, err := ParseLink(val); err != nil { + return val, err + } + return val, nil +} + +// ValidDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func ValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// ValidateDevice validates a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +// It also validates the device mode. +func ValidateDevice(val string) (string, error) { + return validatePath(val, ValidDeviceMode) +} + +func validatePath(val string, validator func(string) bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for path: %s", val) + } + + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, fmt.Errorf("bad format for path: %s", val) + } + switch len(split) { + case 1: + containerPath = split[0] + val = path.Clean(containerPath) + case 2: + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) + } + case 3: + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, fmt.Errorf("bad mode specified: %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. +// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). +// In Windows driver letter appears in two situations: +// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) +// b. A string in the format like `\\?\C:\Windows\...` (UNC). +// Therefore, a driver letter can only follow either a `:` or `\\` +// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. +func volumeSplitN(raw string, n int) []string { + var array []string + if len(raw) == 0 || raw[0] == ':' { + // invalid + return nil + } + // numberOfParts counts the number of parts separated by a separator colon + numberOfParts := 0 + // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. + left := 0 + // right represents the right-most cursor in raw incremented with the loop. Note this + // starts at index 1 as index 0 is already handle above as a special case. + for right := 1; right < len(raw); right++ { + // stop parsing if reached maximum number of parts + if n >= 0 && numberOfParts >= n { + break + } + if raw[right] != ':' { + continue + } + potentialDriveLetter := raw[right-1] + if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { + if right > 1 { + beforePotentialDriveLetter := raw[right-2] + // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) + if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { + // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. + } + // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. + } else { + // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + } + // need to take care of the last part + if left < len(raw) { + if n >= 0 && numberOfParts >= n { + // if the maximum number of parts is reached, just append the rest to the last part + // left-1 is at the last `:` that needs to be included since not considered a separator. + array[n-1] += raw[left-1:] + } else { + array = append(array, raw[left:]) + } + } + return array +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/parse_test.go b/vendor/github.com/moby/moby/runconfig/opts/parse_test.go new file mode 100644 index 0000000..98894bb --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/parse_test.go @@ -0,0 +1,902 @@ +package opts + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/spf13/pflag" +) + +func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + flags := pflag.NewFlagSet("run", pflag.ContinueOnError) + flags.SetOutput(ioutil.Discard) + flags.Usage = nil + copts := AddFlags(flags) + if err := flags.Parse(args); err != nil { + return nil, nil, nil, err + } + return Parse(flags, copts) +} + +func parse(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + + // A single volume + arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) + } + + // Two volumes + arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) + } + + // A single bind-mount + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) + } + + // Two bind-mounts. + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Two bind-mounts, first read-only, second read-write. + // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Similar to previous test but with alternate modes which are only supported by Linux + if runtime.GOOS != "windows" { + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + } + + // One bind mount and one volume + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) + } + + // Root to non-c: drive letter (Windows specific) + if runtime.GOOS == "windows" { + arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { + t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) + } + } + +} + +// This tests the cases for binds which are generated through +// DecodeContainerConfig rather than Parse() +func TestDecodeContainerConfigVolumes(t *testing.T) { + + // Root to root + bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // No destination path + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // // No destination path or mode + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A whole lot of nothing + bindsOrVols = []string{`:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A whole lot of nothing with no mode + bindsOrVols = []string{`::`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Too much including an invalid mode + wTmp := os.Getenv("TEMP") + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Windows specific error tests + if runtime.GOOS == "windows" { + // Volume which does not include a drive letter + bindsOrVols = []string{`\tmp`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Root to C-Drive + bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Container path that does not include a drive letter + bindsOrVols = []string{`c:\windows:\somewhere`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + } + + // Linux-specific error tests + if runtime.GOOS != "windows" { + // Just root + bindsOrVols = []string{`/`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A single volume that looks like a bind mount passed in Volumes. + // This should be handled as a bind mount, not a volume. + vols := []string{`/foo:/bar`} + if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { + t.Fatal("Volume /foo:/bar should have succeeded as a volume name") + } else if hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes[vols[0]]; !exists { + t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) + } + + } +} + +// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes +// to call DecodeContainerConfig. It effectively does what a client would +// do when calling the daemon by constructing a JSON stream of a +// ContainerConfigWrapper which is populated by the set of volume specs +// passed into it. It returns a config and a hostconfig which can be +// validated to ensure DecodeContainerConfig has manipulated the structures +// correctly. +func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { + var ( + b []byte + err error + c *container.Config + h *container.HostConfig + ) + w := runconfig.ContainerConfigWrapper{ + Config: &container.Config{ + Volumes: map[string]struct{}{}, + }, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Binds: binds, + }, + } + for _, v := range volumes { + w.Config.Volumes[v] = struct{}{} + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) + } + if c == nil || h == nil { + return nil, nil, fmt.Errorf("Empty config or hostconfig") + } + + return c, h, err +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} + +// setupPlatformVolume takes two arrays of volume specs - a Unix style +// spec and a Windows style spec. Depending on the platform being unit tested, +// it returns one of them, along with a volume string that would be passed +// on the docker CLI (eg -v /bar -v /foo). +func setupPlatformVolume(u []string, w []string) ([]string, string) { + var a []string + if runtime.GOOS == "windows" { + a = w + } else { + a = u + } + s := "" + for _, v := range a { + s = s + "-v " + v + " " + } + return a, s +} + +// Simple parse with MacAddress validation +func TestParseWithMacAddress(t *testing.T) { + invalidMacAddress := "--mac-address=invalidMacAddress" + validMacAddress := "--mac-address=92:d0:c6:0a:29:33" + if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { + t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) + } + if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { + t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) + } +} + +func TestParseWithMemory(t *testing.T) { + invalidMemory := "--memory=invalid" + validMemory := "--memory=1G" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { + t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) + } +} + +func TestParseWithMemorySwap(t *testing.T) { + invalidMemory := "--memory-swap=invalid" + validMemory := "--memory-swap=1G" + anotherValidMemory := "--memory-swap=-1" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { + t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } + if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { + t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } +} + +func TestParseHostname(t *testing.T) { + validHostnames := map[string]string{ + "hostname": "hostname", + "host-name": "host-name", + "hostname123": "hostname123", + "123hostname": "123hostname", + "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", + } + hostnameWithDomain := "--hostname=hostname.domainname" + hostnameWithDomainTld := "--hostname=hostname.domainname.tld" + for hostname, expectedHostname := range validHostnames { + if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + } + if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) + } +} + +func TestParseWithExpose(t *testing.T) { + invalids := map[string]string{ + ":": "invalid port format for --expose: :", + "8080:9090": "invalid port format for --expose: 8080:9090", + "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", + "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", + "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, + } + valids := map[string][]nat.Port{ + "8080/tcp": {"8080/tcp"}, + "8080/udp": {"8080/udp"}, + "8080/ncp": {"8080/ncp"}, + "8080-8080/udp": {"8080/udp"}, + "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, + } + for expose, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) + } + } + for expose, exposedPorts := range valids { + config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != len(exposedPorts) { + t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) + } + for _, port := range exposedPorts { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) + } + } + } + // Merge with actual published port + config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != 2 { + t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) + } + ports := []nat.Port{"80/tcp", "81/tcp"} + for _, port := range ports { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) + } + } +} + +func TestParseDevice(t *testing.T) { + valids := map[string]container.DeviceMapping{ + "/dev/snd": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rwm", + }, + "/dev/snd:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rw", + }, + "/dev/snd:/something": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rw", + }, + } + for device, deviceMapping := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(hostconfig.Devices) != 1 { + t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) + } + if hostconfig.Devices[0] != deviceMapping { + t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) + } + } + +} + +func TestParseModes(t *testing.T) { + // ipc ko + if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { + t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) + } + // ipc ok + _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.IpcMode.Valid() { + t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) + } + // pid ko + if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { + t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) + } + // pid ok + _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.PidMode.Valid() { + t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) + } + // uts ko + if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { + t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) + } + // uts ok + _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.UTSMode.Valid() { + t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) + } + // shm-size ko + if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != "invalid size: 'a128m'" { + t.Fatalf("Expected an error with message 'invalid size: a128m', got %v", err) + } + // shm-size ok + _, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.ShmSize != 134217728 { + t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) + } +} + +func TestParseRestartPolicy(t *testing.T) { + invalids := map[string]string{ + "always:2:3": "invalid restart policy format", + "on-failure:invalid": "maximum retry count must be an integer", + } + valids := map[string]container.RestartPolicy{ + "": {}, + "always": { + Name: "always", + MaximumRetryCount: 0, + }, + "on-failure:1": { + Name: "on-failure", + MaximumRetryCount: 1, + }, + } + for restart, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) + } + } + for restart, expected := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.RestartPolicy != expected { + t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) + } + } +} + +func TestParseRestartPolicyAutoRemove(t *testing.T) { + expected := "Conflicting options: --restart and --rm" + _, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) + if err == nil || err.Error() != expected { + t.Fatalf("Expected error %v, but got none", expected) + } +} + +func TestParseHealth(t *testing.T) { + checkOk := func(args ...string) *container.HealthConfig { + config, _, _, err := parseRun(args) + if err != nil { + t.Fatalf("%#v: %v", args, err) + } + return config.Healthcheck + } + checkError := func(expected string, args ...string) { + config, _, _, err := parseRun(args) + if err == nil { + t.Fatalf("Expected error, but got %#v", config) + } + if err.Error() != expected { + t.Fatalf("Expected %#v, got %#v", expected, err) + } + } + health := checkOk("--no-healthcheck", "img", "cmd") + if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { + t.Fatalf("--no-healthcheck failed: %#v", health) + } + + health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") + if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { + t.Fatalf("--health-cmd: got %#v", health.Test) + } + if health.Timeout != 0 { + t.Fatalf("--health-cmd: timeout = %f", health.Timeout) + } + + checkError("--no-healthcheck conflicts with --health-* options", + "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") + + health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "img", "cmd") + if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond { + t.Fatalf("--health-*: got %#v", health) + } +} + +func TestParseLoggingOpts(t *testing.T) { + // logging opts ko + if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { + t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) + } + // logging opts ok + _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { + t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) + } +} + +func TestParseEnvfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // env ko + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // env ok + config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { + t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) + } + config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { + t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) + } +} + +func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { + // UTF8 with BOM + config, _, _, err := parseRun([]string{"--env-file=fixtures/utf8.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} + if len(config.Env) != len(env) { + t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) + } + for i, v := range env { + if config.Env[i] != v { + t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) + } + } + + // UTF16 with BOM + e := "contains invalid utf8 bytes at line" + if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // UTF16BE with BOM + if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } +} + +func TestParseLabelfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // label ko + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // label ok + config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { + t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) + } + config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { + t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) + } +} + +func TestParseEntryPoint(t *testing.T) { + config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) + if err != nil { + t.Fatal(err) + } + if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { + t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) + } +} + +func TestValidateLink(t *testing.T) { + valid := []string{ + "name", + "dcdfbe62ecd0:alias", + "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", + "angry_torvalds:linus", + } + invalid := map[string]string{ + "": "empty string specified for links", + "too:much:of:it": "bad format for links: too:much:of:it", + } + + for _, link := range valid { + if _, err := ValidateLink(link); err != nil { + t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) + } + } + + for link, expectedError := range invalid { + if _, err := ValidateLink(link); err == nil { + t.Fatalf("ValidateLink(`%q`) should have failed validation", link) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) + } + } + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} + +func TestValidateDevice(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:r", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for path: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for path: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for path: :test", + ":/test": "bad format for path: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for path: :test:", + "::": "bad format for path: ::", + ":::": "bad format for path: :::", + "/tmp:::": "bad format for path: /tmp:::", + ":/tmp::": "bad format for path: :/tmp::", + "path:ro": "ro is not an absolute path", + "path:rr": "rr is not an absolute path", + "a:/b:ro": "bad mode specified: ro", + "a:/b:rr": "bad mode specified: rr", + } + + for _, path := range valid { + if _, err := ValidateDevice(path); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidateDevice(path); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} + +func TestVolumeSplitN(t *testing.T) { + for _, x := range []struct { + input string + n int + expected []string + }{ + {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, + {`:C:\foo:d:`, -1, nil}, + {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, + {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, + {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, + + {`d:\`, -1, []string{`d:\`}}, + {`d:`, -1, []string{`d:`}}, + {`d:\path`, -1, []string{`d:\path`}}, + {`d:\path with space`, -1, []string{`d:\path with space`}}, + {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, + {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, + {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, + {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, + {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, + {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, + {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, + {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, + {`name:D:`, -1, []string{`name`, `D:`}}, + {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, + {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, + {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, + {`c:\Windows`, -1, []string{`c:\Windows`}}, + {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, + + {``, -1, nil}, + {`.`, -1, []string{`.`}}, + {`..\`, -1, []string{`..\`}}, + {`c:\:..\`, -1, []string{`c:\`, `..\`}}, + {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, + + // Cover directories with one-character name + {`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}}, + } { + res := volumeSplitN(x.input, x.n) + if len(res) < len(x.expected) { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + for i, e := range res { + if e != x.expected[i] { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + } + } +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/runtime.go b/vendor/github.com/moby/moby/runconfig/opts/runtime.go new file mode 100644 index 0000000..4361b3c --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/runtime.go @@ -0,0 +1,79 @@ +package opts + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/throttledevice.go b/vendor/github.com/moby/moby/runconfig/opts/throttledevice.go new file mode 100644 index 0000000..5024324 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/throttledevice.go @@ -0,0 +1,111 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/go-units" +) + +// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) + +// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := units.RAMInBytes(split[1]) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := strconv.ParseUint(split[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ThrottledeviceOpt defines a map of ThrottleDevices +type ThrottledeviceOpt struct { + values []*blkiodev.ThrottleDevice + validator ValidatorThrottleFctType +} + +// NewThrottledeviceOpt creates a new ThrottledeviceOpt +func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { + values := []*blkiodev.ThrottleDevice{} + return ThrottledeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt +func (opt *ThrottledeviceOpt) Set(val string) error { + var value *blkiodev.ThrottleDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns ThrottledeviceOpt values as a string. +func (opt *ThrottledeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to ThrottleDevices. +func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { + var throttledevice []*blkiodev.ThrottleDevice + throttledevice = append(throttledevice, opt.values...) + + return throttledevice +} + +// Type returns the option type +func (opt *ThrottledeviceOpt) Type() string { + return "throttled-device" +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/ulimit.go b/vendor/github.com/moby/moby/runconfig/opts/ulimit.go new file mode 100644 index 0000000..5adfe30 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/ulimit.go @@ -0,0 +1,57 @@ +package opts + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/ulimit_test.go b/vendor/github.com/moby/moby/runconfig/opts/ulimit_test.go new file mode 100644 index 0000000..0aa3fac --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/docker/go-units" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*units.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/weightdevice.go b/vendor/github.com/moby/moby/runconfig/opts/weightdevice.go new file mode 100644 index 0000000..2a5da6d --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/weightdevice.go @@ -0,0 +1,89 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" +) + +// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) + +// ValidateWeightDevice validates that the specified string has a valid device-weight format. +func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + weight, err := strconv.ParseUint(split[1], 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + if weight > 0 && (weight < 10 || weight > 1000) { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + + return &blkiodev.WeightDevice{ + Path: split[0], + Weight: uint16(weight), + }, nil +} + +// WeightdeviceOpt defines a map of WeightDevices +type WeightdeviceOpt struct { + values []*blkiodev.WeightDevice + validator ValidatorWeightFctType +} + +// NewWeightdeviceOpt creates a new WeightdeviceOpt +func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { + values := []*blkiodev.WeightDevice{} + return WeightdeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt +func (opt *WeightdeviceOpt) Set(val string) error { + var value *blkiodev.WeightDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns WeightdeviceOpt values as a string. +func (opt *WeightdeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to WeightDevices. +func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { + var weightdevice []*blkiodev.WeightDevice + for _, v := range opt.values { + weightdevice = append(weightdevice, v) + } + + return weightdevice +} + +// Type returns the option type +func (opt *WeightdeviceOpt) Type() string { + return "weighted-device" +} diff --git a/vendor/github.com/moby/moby/utils/debug.go b/vendor/github.com/moby/moby/utils/debug.go new file mode 100644 index 0000000..d203891 --- /dev/null +++ b/vendor/github.com/moby/moby/utils/debug.go @@ -0,0 +1,26 @@ +package utils + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// EnableDebug sets the DEBUG env var to true +// and makes the logger to log at debug level. +func EnableDebug() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// DisableDebug sets the DEBUG env var to false +// and makes the logger to log at info level. +func DisableDebug() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsDebugEnabled checks whether the debug flag is set or not. +func IsDebugEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/moby/moby/utils/debug_test.go b/vendor/github.com/moby/moby/utils/debug_test.go new file mode 100644 index 0000000..6f9c4df --- /dev/null +++ b/vendor/github.com/moby/moby/utils/debug_test.go @@ -0,0 +1,43 @@ +package utils + +import ( + "os" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestEnableDebug(t *testing.T) { + defer func() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) + }() + EnableDebug() + if os.Getenv("DEBUG") != "1" { + t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.DebugLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) + } +} + +func TestDisableDebug(t *testing.T) { + DisableDebug() + if os.Getenv("DEBUG") != "" { + t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.InfoLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) + } +} + +func TestDebugEnabled(t *testing.T) { + EnableDebug() + if !IsDebugEnabled() { + t.Fatal("expected debug enabled, got false") + } + DisableDebug() + if IsDebugEnabled() { + t.Fatal("expected debug disabled, got true") + } +} diff --git a/vendor/github.com/moby/moby/utils/names.go b/vendor/github.com/moby/moby/utils/names.go new file mode 100644 index 0000000..6320628 --- /dev/null +++ b/vendor/github.com/moby/moby/utils/names.go @@ -0,0 +1,9 @@ +package utils + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/moby/moby/utils/process_unix.go b/vendor/github.com/moby/moby/utils/process_unix.go new file mode 100644 index 0000000..fc0b1c8 --- /dev/null +++ b/vendor/github.com/moby/moby/utils/process_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd solaris + +package utils + +import ( + "syscall" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := syscall.Kill(pid, syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + syscall.Kill(pid, syscall.SIGKILL) +} diff --git a/vendor/github.com/moby/moby/utils/process_windows.go b/vendor/github.com/moby/moby/utils/process_windows.go new file mode 100644 index 0000000..03cb855 --- /dev/null +++ b/vendor/github.com/moby/moby/utils/process_windows.go @@ -0,0 +1,20 @@ +package utils + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + // TODO Windows containerd. Not sure this is needed + // p, err := os.FindProcess(pid) + // if err == nil { + // return true + // } + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + // TODO Windows containerd. Not sure this is needed + // p, err := os.FindProcess(pid) + // if err == nil { + // p.Kill() + // } +} diff --git a/vendor/github.com/moby/moby/utils/templates/templates.go b/vendor/github.com/moby/moby/utils/templates/templates.go new file mode 100644 index 0000000..91c376f --- /dev/null +++ b/vendor/github.com/moby/moby/utils/templates/templates.go @@ -0,0 +1,42 @@ +package templates + +import ( + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, +} + +// Parse creates a new annonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} diff --git a/vendor/github.com/moby/moby/utils/templates/templates_test.go b/vendor/github.com/moby/moby/utils/templates/templates_test.go new file mode 100644 index 0000000..dd42901 --- /dev/null +++ b/vendor/github.com/moby/moby/utils/templates/templates_test.go @@ -0,0 +1,38 @@ +package templates + +import ( + "bytes" + "testing" +) + +func TestParseStringFunctions(t *testing.T) { + tm, err := Parse(`{{join (split . ":") "/"}}`) + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + if err := tm.Execute(&b, "text:with:colon"); err != nil { + t.Fatal(err) + } + want := "text/with/colon" + if b.String() != want { + t.Fatalf("expected %s, got %s", want, b.String()) + } +} + +func TestNewParse(t *testing.T) { + tm, err := NewParse("foo", "this is a {{ . }}") + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + if err := tm.Execute(&b, "string"); err != nil { + t.Fatal(err) + } + want := "this is a string" + if b.String() != want { + t.Fatalf("expected %s, got %s", want, b.String()) + } +} diff --git a/vendor/github.com/moby/moby/utils/utils.go b/vendor/github.com/moby/moby/utils/utils.go new file mode 100644 index 0000000..d3dd00a --- /dev/null +++ b/vendor/github.com/moby/moby/utils/utils.go @@ -0,0 +1,87 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = stringid.GenerateNonCryptoID()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/moby/moby/utils/utils_test.go b/vendor/github.com/moby/moby/utils/utils_test.go new file mode 100644 index 0000000..ab3911e --- /dev/null +++ b/vendor/github.com/moby/moby/utils/utils_test.go @@ -0,0 +1,21 @@ +package utils + +import "testing" + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} diff --git a/vendor/github.com/moby/moby/vendor.conf b/vendor/github.com/moby/moby/vendor.conf new file mode 100644 index 0000000..399b6e6 --- /dev/null +++ b/vendor/github.com/moby/moby/vendor.conf @@ -0,0 +1,140 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 +github.com/Microsoft/hcsshim v0.5.12 +github.com/Microsoft/go-winio v0.3.8 +github.com/Sirupsen/logrus v0.11.0 +github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.0 +github.com/mattn/go-sqlite3 v1.1.0 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +# forked golang.org/x/net package includes a patch for lazy loading trace templates +golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git +golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 +github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97 +github.com/docker/go-connections ecb4cb2dd420ada7df7f2593d6c25441f65f69f2 + +github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 +github.com/imdario/mergo 0.2.1 + +#get libnetwork packages +github.com/docker/libnetwork 57248dc8d53e7b6cf3124db8f22bf8bbf5d363cb +github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457 +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707 +github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 +github.com/hashicorp/consul v0.5.2 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 + +# get graph and distribution packages +github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc +github.com/vbatts/tar-split v0.10.1 + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +# get desired notary commit, might also need to be updated in Dockerfile +github.com/docker/notary v0.4.2 + +google.golang.org/grpc v1.0.2 +github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f +github.com/docker/go v1.5.1-1-1-gbaf439e +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c + +# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly +github.com/opencontainers/runc 54296cf40ad8143b62dbcaa1d90e520a2136ddfe https://github.com/docker/runc.git # libcontainer +github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) +github.com/coreos/go-systemd v4 +github.com/godbus/dbus v4.0.0 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a + +# gelf logging driver deps +github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 + +github.com/fluent/fluent-logger-golang v1.2.1 +# fluent-logger-golang deps +github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c + +# fsnotify +github.com/fsnotify/fsnotify v1.2.11 + +# awslogs deps +github.com/aws/aws-sdk-go v1.4.22 +github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03 + +# gcplogs deps +golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be +google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 + +# native credentials +github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd + +# containerd +github.com/docker/containerd 4ab9917febca54791c5f071a9d1f404867857fcc +github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 + +# cluster +github.com/docker/swarmkit 2591ac368b6a8ae1c6f8438874e62f8e4778450c +github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 +github.com/gogo/protobuf v0.3 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 +github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87 +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 +github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 + +# cli +github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git +github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff + +# metrics +github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 + +# composefile +github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 +github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a +github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 +github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 diff --git a/vendor/github.com/moby/moby/volume/drivers/adapter.go b/vendor/github.com/moby/moby/volume/drivers/adapter.go new file mode 100644 index 0000000..62ef7df --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/adapter.go @@ -0,0 +1,177 @@ +package volumedrivers + +import ( + "errors" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/volume" +) + +var ( + errNoSuchVolume = errors.New("no such volume") +) + +type volumeDriverAdapter struct { + name string + baseHostPath string + capabilities *volume.Capability + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := a.proxy.Create(name, opts); err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +func hostPath(baseHostPath, path string) string { + if baseHostPath != "" { + path = filepath.Join(baseHostPath, path) + } + return path +} + +func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { + ls, err := a.proxy.List() + if err != nil { + return nil, err + } + + var out []volume.Volume + for _, vp := range ls { + out = append(out, &volumeAdapter{ + proxy: a.proxy, + name: vp.Name, + baseHostPath: a.baseHostPath, + driverName: a.name, + eMount: hostPath(a.baseHostPath, vp.Mountpoint), + }) + } + return out, nil +} + +func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { + v, err := a.proxy.Get(name) + if err != nil { + return nil, err + } + + // plugin may have returned no volume and no error + if v == nil { + return nil, errNoSuchVolume + } + + return &volumeAdapter{ + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + status: v.Status, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Scope() string { + cap := a.getCapabilities() + return cap.Scope +} + +func (a *volumeDriverAdapter) getCapabilities() volume.Capability { + if a.capabilities != nil { + return *a.capabilities + } + cap, err := a.proxy.Capabilities() + if err != nil { + // `GetCapabilities` is a not a required endpoint. + // On error assume it's a local-only driver + logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilties: %v", a.name, err) + return volume.Capability{Scope: volume.LocalScope} + } + + // don't spam the warn log below just because the plugin didn't provide a scope + if len(cap.Scope) == 0 { + cap.Scope = volume.LocalScope + } + + cap.Scope = strings.ToLower(cap.Scope) + if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope { + logrus.Warnf("Volume driver %q returned an invalid scope: %q", a.Name(), cap.Scope) + cap.Scope = volume.LocalScope + } + + a.capabilities = &cap + return cap +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + baseHostPath string + driverName string + eMount string // ephemeral host volume path + status map[string]interface{} +} + +type proxyVolume struct { + Name string + Mountpoint string + Status map[string]interface{} +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) == 0 { + mountpoint, _ := a.proxy.Path(a.name) + a.eMount = hostPath(a.baseHostPath, mountpoint) + } + return a.eMount +} + +func (a *volumeAdapter) CachedPath() string { + return a.eMount +} + +func (a *volumeAdapter) Mount(id string) (string, error) { + mountpoint, err := a.proxy.Mount(a.name, id) + a.eMount = hostPath(a.baseHostPath, mountpoint) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount(id string) error { + err := a.proxy.Unmount(a.name, id) + if err == nil { + a.eMount = "" + } + return err +} + +func (a *volumeAdapter) Status() map[string]interface{} { + out := make(map[string]interface{}, len(a.status)) + for k, v := range a.status { + out[k] = v + } + return out +} diff --git a/vendor/github.com/moby/moby/volume/drivers/extpoint.go b/vendor/github.com/moby/moby/volume/drivers/extpoint.go new file mode 100644 index 0000000..576dee8 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/extpoint.go @@ -0,0 +1,215 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{ + extensions: make(map[string]volume.Driver), + driverLock: &locker.Locker{}, +} + +const extName = "VolumeDriver" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, baseHostPath string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name: name, baseHostPath: baseHostPath, proxy: proxy} +} + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts map[string]string) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name, id string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name, id string) (err error) + // List lists all the volumes known to the driver + List() (volumes []*proxyVolume, err error) + // Get retrieves the volume with the requested name + Get(name string) (volume *proxyVolume, err error) + // Capabilities gets the list of capabilities of the driver + Capabilities() (capabilities volume.Capability, err error) +} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex + driverLock *locker.Locker + plugingetter getter.PluginGetter +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + drivers.plugingetter = plugingetter +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + if name == "" { + return false + } + + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if exists { + return false + } + + if err := validateDriver(extension); err != nil { + return false + } + + drivers.extensions[name] = extension + + return true +} + +// Unregister dissociates the name from its driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func lookup(name string, mode int) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + + drivers.Lock() + ext, ok := drivers.extensions[name] + drivers.Unlock() + if ok { + return ext, nil + } + if drivers.plugingetter != nil { + p, err := drivers.plugingetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client()) + if err := validateDriver(d); err != nil { + return nil, err + } + + if p.IsV1() { + drivers.Lock() + drivers.extensions[name] = d + drivers.Unlock() + } + return d, nil + } + return nil, fmt.Errorf("Error looking up volume plugin %s", name) +} + +func validateDriver(vd volume.Driver) error { + scope := vd.Scope() + if scope != volume.LocalScope && scope != volume.GlobalScope { + return fmt.Errorf("Driver %q provided an invalid capability scope: %s", vd.Name(), scope) + } + return nil +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.LOOKUP) +} + +// CreateDriver returns a volume driver by its name and increments RefCount. +// If the driver is empty, it looks for the local driver. +func CreateDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.ACQUIRE) +} + +// RemoveDriver returns a volume driver by its name and decrements RefCount.. +// If the driver is empty, it looks for the local driver. +func RemoveDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.RELEASE) +} + +// GetDriverList returns list of volume drivers registered. +// If no driver is registered, empty string list will be returned. +func GetDriverList() []string { + var driverList []string + drivers.Lock() + for driverName := range drivers.extensions { + driverList = append(driverList, driverName) + } + drivers.Unlock() + return driverList +} + +// GetAllDrivers lists all the registered drivers +func GetAllDrivers() ([]volume.Driver, error) { + var plugins []getter.CompatPlugin + if drivers.plugingetter != nil { + var err error + plugins, err = drivers.plugingetter.GetAllByCap(extName) + if err != nil { + return nil, fmt.Errorf("error listing plugins: %v", err) + } + } + var ds []volume.Driver + + drivers.Lock() + defer drivers.Unlock() + + for _, d := range drivers.extensions { + ds = append(ds, d) + } + + for _, p := range plugins { + name := p.Name() + ext, ok := drivers.extensions[name] + if ok { + continue + } + + ext = NewVolumeDriver(name, p.BasePath(), p.Client()) + if p.IsV1() { + drivers.extensions[name] = ext + } + ds = append(ds, ext) + } + return ds, nil +} diff --git a/vendor/github.com/moby/moby/volume/drivers/extpoint_test.go b/vendor/github.com/moby/moby/volume/drivers/extpoint_test.go new file mode 100644 index 0000000..428b075 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/extpoint_test.go @@ -0,0 +1,23 @@ +package volumedrivers + +import ( + "testing" + + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestGetDriver(t *testing.T) { + _, err := GetDriver("missing") + if err == nil { + t.Fatal("Expected error, was nil") + } + Register(volumetestutils.NewFakeDriver("fake"), "fake") + + d, err := GetDriver("fake") + if err != nil { + t.Fatal(err) + } + if d.Name() != "fake" { + t.Fatalf("Expected fake driver, got %s\n", d.Name()) + } +} diff --git a/vendor/github.com/moby/moby/volume/drivers/proxy.go b/vendor/github.com/moby/moby/volume/drivers/proxy.go new file mode 100644 index 0000000..b23db62 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/proxy.go @@ -0,0 +1,242 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import ( + "errors" + + "github.com/docker/docker/volume" +) + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string + Opts map[string]string +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string, opts map[string]string) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + req.Opts = opts + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string + ID string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string, id string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string + ID string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string, id string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyListRequest struct { +} + +type volumeDriverProxyListResponse struct { + Volumes []*proxyVolume + Err string +} + +func (pp *volumeDriverProxy) List() (volumes []*proxyVolume, err error) { + var ( + req volumeDriverProxyListRequest + ret volumeDriverProxyListResponse + ) + + if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + return + } + + volumes = ret.Volumes + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyGetRequest struct { + Name string +} + +type volumeDriverProxyGetResponse struct { + Volume *proxyVolume + Err string +} + +func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { + var ( + req volumeDriverProxyGetRequest + ret volumeDriverProxyGetResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + return + } + + volume = ret.Volume + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyCapabilitiesRequest struct { +} + +type volumeDriverProxyCapabilitiesResponse struct { + Capabilities volume.Capability + Err string +} + +func (pp *volumeDriverProxy) Capabilities() (capabilities volume.Capability, err error) { + var ( + req volumeDriverProxyCapabilitiesRequest + ret volumeDriverProxyCapabilitiesResponse + ) + + if err = pp.Call("VolumeDriver.Capabilities", req, &ret); err != nil { + return + } + + capabilities = ret.Capabilities + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/vendor/github.com/moby/moby/volume/drivers/proxy_test.go b/vendor/github.com/moby/moby/volume/drivers/proxy_test.go new file mode 100644 index 0000000..b78c46a --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/proxy_test.go @@ -0,0 +1,132 @@ +package volumedrivers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" +) + +func TestVolumeRequestError(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot create volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot remove volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot mount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot unmount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Unknown volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot list volumes"}`) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot get volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + http.Error(w, "error", 500) + }) + + u, _ := url.Parse(server.URL) + client, err := plugins.NewClient("tcp://"+u.Host, &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatal(err) + } + + driver := volumeDriverProxy{client} + + if err = driver.Create("volume", nil); err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot create volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Mount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot mount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Unmount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot unmount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Remove("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot remove volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Path("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Unknown volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.List() + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot list volumes") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Get("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot get volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Capabilities() + if err == nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/volume/local/local.go b/vendor/github.com/moby/moby/volume/local/local.go new file mode 100644 index 0000000..56fccb0 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local.go @@ -0,0 +1,389 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distinctive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = fmt.Errorf("volume not found") + // volumeNameRegex ensures the name assigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = utils.RestrictedNamePattern +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +type activeMount struct { + count uint64 + mounted bool +} + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string, rootUID, rootGID int) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + rootUID: rootUID, + rootGID: rootGID, + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + mountInfos, err := mount.GetMounts() + if err != nil { + logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + + name := filepath.Base(d.Name()) + v := &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + r.volumes[name] = v + optsFilePath := filepath.Join(rootDirectory, name, "opts.json") + if b, err := ioutil.ReadFile(optsFilePath); err == nil { + opts := optsConfig{} + if err := json.Unmarshal(b, &opts); err != nil { + return nil, errors.Wrapf(err, "error while unmarshaling volume options for volume: %s", name) + } + // Make sure this isn't an empty optsConfig. + // This could be empty due to buggy behavior in older versions of Docker. + if !reflect.DeepEqual(opts, optsConfig{}) { + v.opts = &opts + } + + // unmount anything that may still be mounted (for example, from an unclean shutdown) + for _, info := range mountInfos { + if info.Mountpoint == v.path { + mount.Unmount(v.path) + break + } + } + } + } + + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume + rootUID int + rootGID int +} + +// List lists all the volumes +func (r *Root) List() ([]volume.Volume, error) { + var ls []volume.Volume + r.m.Lock() + for _, v := range r.volumes { + ls = append(ls, v) + } + r.m.Unlock() + return ls, nil +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if exists { + return v, nil + } + + path := r.DataPath(name) + if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, errors.Wrapf(err, "error while creating volume path '%s'", path) + } + + var err error + defer func() { + if err != nil { + os.RemoveAll(filepath.Dir(path)) + } + }() + + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + + if len(opts) != 0 { + if err = setOpts(v, opts); err != nil { + return nil, err + } + var b []byte + b, err = json.Marshal(v.opts) + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { + return nil, errors.Wrap(err, "error while persisting volume options") + } + } + + r.volumes[name] = v + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + + lv, ok := v.(*localVolume) + if !ok { + return fmt.Errorf("unknown volume type %T", v) + } + + if lv.active.count > 0 { + return fmt.Errorf("volume has active mounts") + } + + if err := lv.unmount(); err != nil { + return err + } + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + realPath = filepath.Dir(lv.path) + } + + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } + + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "error removing volume path '%s'", path) + } + return nil +} + +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound + } + return v, nil +} + +// Scope returns the local volume scope +func (r *Root) Scope() string { + return volume.LocalScope +} + +func (r *Root) validateName(name string) error { + if len(name) == 1 { + return validationError{fmt.Errorf("volume name is too short, names should be at least two alphanumeric characters")} + } + if !volumeNameRegex.MatchString(name) { + return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed. If you intented to pass a host directory, use absolute path", name, utils.RestrictedNameChars)} + } + return nil +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string + // opts is the parsed list of options used to create the volume + opts *optsConfig + // active refcounts the active mounts + active activeMount +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +// If there are any provided mount options, the resources will be mounted at this point +func (v *localVolume) Mount(id string) (string, error) { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + if !v.active.mounted { + if err := v.mount(); err != nil { + return "", err + } + v.active.mounted = true + } + v.active.count++ + } + return v.path, nil +} + +// Unmount dereferences the id, and if it is the last reference will unmount any resources +// that were previously mounted. +func (v *localVolume) Unmount(id string) error { + v.m.Lock() + defer v.m.Unlock() + + // Always decrement the count, even if the unmount fails + // Essentially docker doesn't care if this fails, it will send an error, but + // ultimately there's nothing that can be done. If we don't decrement the count + // this volume can never be removed until a daemon restart occurs. + if v.opts != nil { + v.active.count-- + } + + if v.active.count > 0 { + return nil + } + + return v.unmount() +} + +func (v *localVolume) unmount() error { + if v.opts != nil { + if err := unmount(v.path); err != nil { + if mounted, mErr := mount.Mounted(v.path); mounted || mErr != nil { + return errors.Wrapf(err, "error while unmounting volume path '%s'", v.path) + } + } + v.active.mounted = false + } + return nil +} + +func validateOpts(opts map[string]string) error { + for opt := range opts { + if !validOpts[opt] { + return validationError{fmt.Errorf("invalid option key: %q", opt)} + } + } + return nil +} + +func (v *localVolume) Status() map[string]interface{} { + return nil +} + +// getAddress finds out address/hostname from options +func getAddress(opts string) string { + optsList := strings.Split(opts, ",") + for i := 0; i < len(optsList); i++ { + if strings.HasPrefix(optsList[i], "addr=") { + addr := (strings.SplitN(optsList[i], "=", 2)[1]) + return addr + } + } + return "" +} diff --git a/vendor/github.com/moby/moby/volume/local/local_test.go b/vendor/github.com/moby/moby/volume/local/local_test.go new file mode 100644 index 0000000..f5a519b --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local_test.go @@ -0,0 +1,344 @@ +package local + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/mount" +) + +func TestGetAddress(t *testing.T) { + cases := map[string]string{ + "addr=11.11.11.1": "11.11.11.1", + " ": "", + "addr=": "", + "addr=2001:db8::68": "2001:db8::68", + } + for name, success := range cases { + v := getAddress(name) + if v != success { + t.Errorf("Test case failed for %s actual: %s expected : %s", name, v, success) + } + } + +} + +func TestRemove(t *testing.T) { + // TODO Windows: Investigate why this test fails on Windows under CI + // but passes locally. + if runtime.GOOS == "windows" { + t.Skip("Test failing on Windows CI") + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + vol, err = r.Create("testing2", nil) + if err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(vol.Path()); err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(vol.Path()); err != nil && !os.IsNotExist(err) { + t.Fatal("volume dir not removed") + } + + if l, _ := r.List(); len(l) != 0 { + t.Fatal("expected there to be no volumes") + } +} + +func TestInitializeWithVolumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + v, err := r.Get(vol.Name()) + if err != nil { + t.Fatal(err) + } + + if v.Path() != vol.Path() { + t.Fatal("expected to re-initialize root with existing volumes") + } +} + +func TestCreate(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + cases := map[string]bool{ + "name": true, + "name-with-dash": true, + "name_with_underscore": true, + "name/with/slash": false, + "name/with/../../slash": false, + "./name": false, + "../name": false, + "./": false, + "../": false, + "~": false, + ".": false, + "..": false, + "...": false, + } + + for name, success := range cases { + v, err := r.Create(name, nil) + if success { + if err != nil { + t.Fatal(err) + } + if v.Name() != name { + t.Fatalf("Expected volume with name %s, got %s", name, v.Name()) + } + } else { + if err == nil { + t.Fatalf("Expected error creating volume with name %s, got nil", name) + } + } + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } +} + +func TestValidateName(t *testing.T) { + r := &Root{} + names := map[string]bool{ + "x": false, + "/testvol": false, + "thing.d": true, + "hello-world": true, + "./hello": false, + ".hello": false, + } + + for vol, expected := range names { + err := r.validateName(vol) + if expected && err != nil { + t.Fatalf("expected %s to be valid got %v", vol, err) + } + if !expected && err == nil { + t.Fatalf("expected %s to be invalid", vol) + } + } +} + +func TestCreateWithOpts(t *testing.T) { + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip() + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test", map[string]string{"invalidopt": "notsupported"}); err == nil { + t.Fatal("expected invalid opt to cause error") + } + + vol, err := r.Create("test", map[string]string{"device": "tmpfs", "type": "tmpfs", "o": "size=1m,uid=1000"}) + if err != nil { + t.Fatal(err) + } + v := vol.(*localVolume) + + dir, err := v.Mount("1234") + if err != nil { + t.Fatal(err) + } + defer func() { + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + }() + + mountInfos, err := mount.GetMounts() + if err != nil { + t.Fatal(err) + } + + var found bool + for _, info := range mountInfos { + if info.Mountpoint == dir { + found = true + if info.Fstype != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Fstype) + } + if info.Source != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Source) + } + if !strings.Contains(info.VfsOpts, "uid=1000") { + t.Fatalf("expected mount info to have uid=1000: %q", info.VfsOpts) + } + if !strings.Contains(info.VfsOpts, "size=1024k") { + t.Fatalf("expected mount info to have size=1024k: %q", info.VfsOpts) + } + break + } + } + + if !found { + t.Fatal("mount not found") + } + + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + // test double mount + if _, err := v.Mount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 2 { + t.Fatalf("Expected active mount count to be 2, got %d", v.active.count) + } + + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + mounted, err := mount.Mounted(v.path) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatal("expected mount to still be active") + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + v2, exists := r.volumes["test"] + if !exists { + t.Fatal("missing volume on restart") + } + + if !reflect.DeepEqual(v.opts, v2.opts) { + t.Fatal("missing volume options on restart") + } +} + +func TestRealodNoOpts(t *testing.T) { + rootDir, err := ioutil.TempDir("", "volume-test-reload-no-opts") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test1", nil); err != nil { + t.Fatal(err) + } + if _, err := r.Create("test2", nil); err != nil { + t.Fatal(err) + } + // make sure a file with `null` (.e.g. empty opts map from older daemon) is ok + if err := ioutil.WriteFile(filepath.Join(rootDir, "test2"), []byte("null"), 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test3", nil); err != nil { + t.Fatal(err) + } + // make sure an empty opts file doesn't break us too + if err := ioutil.WriteFile(filepath.Join(rootDir, "test3"), nil, 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test4", map[string]string{}); err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + for _, name := range []string{"test1", "test2", "test3", "test4"} { + v, err := r.Get(name) + if err != nil { + t.Fatal(err) + } + lv, ok := v.(*localVolume) + if !ok { + t.Fatalf("expected *localVolume got: %v", reflect.TypeOf(v)) + } + if lv.opts != nil { + t.Fatalf("expected opts to be nil, got: %v", lv.opts) + } + if _, err := lv.Mount("1234"); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/volume/local/local_unix.go b/vendor/github.com/moby/moby/volume/local/local_unix.go new file mode 100644 index 0000000..fb08862 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local_unix.go @@ -0,0 +1,87 @@ +// +build linux freebsd solaris + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "net" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/docker/docker/pkg/mount" +) + +var ( + oldVfsDir = filepath.Join("vfs", "dir") + + validOpts = map[string]bool{ + "type": true, // specify the filesystem type for mount, e.g. nfs + "o": true, // generic mount options + "device": true, // device to mount from + } +) + +type optsConfig struct { + MountType string + MountOpts string + MountDevice string +} + +func (o *optsConfig) String() string { + return fmt.Sprintf("type='%s' device='%s' o='%s'", o.MountType, o.MountDevice, o.MountOpts) +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) == 0 { + return nil + } + if err := validateOpts(opts); err != nil { + return err + } + + v.opts = &optsConfig{ + MountType: opts["type"], + MountOpts: opts["o"], + MountDevice: opts["device"], + } + return nil +} + +func (v *localVolume) mount() error { + if v.opts.MountDevice == "" { + return fmt.Errorf("missing device in volume options") + } + mountOpts := v.opts.MountOpts + if v.opts.MountType == "nfs" { + if addrValue := getAddress(v.opts.MountOpts); addrValue != "" && net.ParseIP(addrValue).To4() == nil { + ipAddr, err := net.ResolveIPAddr("ip", addrValue) + if err != nil { + return errors.Wrapf(err, "error resolving passed in nfs address") + } + mountOpts = strings.Replace(mountOpts, "addr="+addrValue, "addr="+ipAddr.String(), 1) + } + } + err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts) + return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts) +} diff --git a/vendor/github.com/moby/moby/volume/local/local_windows.go b/vendor/github.com/moby/moby/volume/local/local_windows.go new file mode 100644 index 0000000..1bdb368 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local_windows.go @@ -0,0 +1,34 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "path/filepath" + "strings" +) + +type optsConfig struct{} + +var validOpts map[string]bool + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) > 0 { + return fmt.Errorf("options are not supported on this platform") + } + return nil +} + +func (v *localVolume) mount() error { + return nil +} diff --git a/vendor/github.com/moby/moby/volume/local/unmount_linux.go b/vendor/github.com/moby/moby/volume/local/unmount_linux.go new file mode 100644 index 0000000..e825e4d --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/unmount_linux.go @@ -0,0 +1,7 @@ +package local + +import "golang.org/x/sys/unix" + +func unmount(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/vendor/github.com/moby/moby/volume/local/unmount_unix.go b/vendor/github.com/moby/moby/volume/local/unmount_unix.go new file mode 100644 index 0000000..c1c2c16 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/unmount_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!windows + +package local + +import "golang.org/x/sys/unix" + +func unmount(path string) error { + return unix.Unmount(path, 0) +} diff --git a/vendor/github.com/moby/moby/volume/local/unmount_windows.go b/vendor/github.com/moby/moby/volume/local/unmount_windows.go new file mode 100644 index 0000000..42b38fa --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/unmount_windows.go @@ -0,0 +1,5 @@ +package local + +func unmount(_ string) error { + return nil +} diff --git a/vendor/github.com/moby/moby/volume/store/db.go b/vendor/github.com/moby/moby/volume/store/db.go new file mode 100644 index 0000000..c5fd164 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/db.go @@ -0,0 +1,88 @@ +package store + +import ( + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/pkg/errors" +) + +var volumeBucketName = []byte("volumes") + +type volumeMetadata struct { + Name string + Driver string + Labels map[string]string + Options map[string]string +} + +func (s *VolumeStore) setMeta(name string, meta volumeMetadata) error { + return s.db.Update(func(tx *bolt.Tx) error { + return setMeta(tx, name, meta) + }) +} + +func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error { + metaJSON, err := json.Marshal(meta) + if err != nil { + return err + } + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata") +} + +func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) { + var meta volumeMetadata + err := s.db.View(func(tx *bolt.Tx) error { + return getMeta(tx, name, &meta) + }) + return meta, err +} + +func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error { + b := tx.Bucket(volumeBucketName) + val := b.Get([]byte(name)) + if string(val) == "" { + return nil + } + if err := json.Unmarshal(val, meta); err != nil { + return errors.Wrap(err, "error unmarshaling volume metadata") + } + return nil +} + +func (s *VolumeStore) removeMeta(name string) error { + return s.db.Update(func(tx *bolt.Tx) error { + return removeMeta(tx, name) + }) +} + +func removeMeta(tx *bolt.Tx, name string) error { + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Delete([]byte(name)), "error removing volume metadata") +} + +// listMeta is used during restore to get the list of volume metadata +// from the on-disk database. +// Any errors that occur are only logged. +func listMeta(tx *bolt.Tx) []volumeMetadata { + var ls []volumeMetadata + b := tx.Bucket(volumeBucketName) + b.ForEach(func(k, v []byte) error { + if len(v) == 0 { + // don't try to unmarshal an empty value + return nil + } + + var m volumeMetadata + if err := json.Unmarshal(v, &m); err != nil { + // Just log the error + logrus.Errorf("Error while reading volume metadata for volume %q: %v", string(k), err) + return nil + } + ls = append(ls, m) + return nil + }) + return ls +} diff --git a/vendor/github.com/moby/moby/volume/store/errors.go b/vendor/github.com/moby/moby/volume/store/errors.go new file mode 100644 index 0000000..980175f --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/errors.go @@ -0,0 +1,76 @@ +package store + +import ( + "strings" + + "github.com/pkg/errors" +) + +var ( + // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + errVolumeInUse = errors.New("volume is in use") + // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + errNoSuchVolume = errors.New("no such volume") + // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + errInvalidName = errors.New("volume name is not valid on this platform") + // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver + errNameConflict = errors.New("volume name must be unique") +) + +// OpErr is the error type returned by functions in the store package. It describes +// the operation, volume name, and error. +type OpErr struct { + // Err is the error that occurred during the operation. + Err error + // Op is the operation which caused the error, such as "create", or "list". + Op string + // Name is the name of the resource being requested for this op, typically the volume name or the driver name. + Name string + // Refs is the list of references associated with the resource. + Refs []string +} + +// Error satisfies the built-in error interface type. +func (e *OpErr) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Name != "" { + s = s + " " + e.Name + } + + s = s + ": " + e.Err.Error() + if len(e.Refs) > 0 { + s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" + } + return s +} + +// IsInUse returns a boolean indicating whether the error indicates that a +// volume is in use +func IsInUse(err error) bool { + return isErr(err, errVolumeInUse) +} + +// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist +func IsNotExist(err error) bool { + return isErr(err, errNoSuchVolume) +} + +// IsNameConflict returns a boolean indicating whether the error indicates that a +// volume name is already taken +func IsNameConflict(err error) bool { + return isErr(err, errNameConflict) +} + +func isErr(err error, expected error) bool { + err = errors.Cause(err) + switch pe := err.(type) { + case nil: + return false + case *OpErr: + err = errors.Cause(pe.Err) + } + return err == expected +} diff --git a/vendor/github.com/moby/moby/volume/store/restore.go b/vendor/github.com/moby/moby/volume/store/restore.go new file mode 100644 index 0000000..c0c5b51 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/restore.go @@ -0,0 +1,83 @@ +package store + +import ( + "sync" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +// restore is called when a new volume store is created. +// It's primary purpose is to ensure that all drivers' refcounts are set based +// on known volumes after a restart. +// This only attempts to track volumes that are actually stored in the on-disk db. +// It does not probe the available drivers to find anything that may have been added +// out of band. +func (s *VolumeStore) restore() { + var ls []volumeMetadata + s.db.View(func(tx *bolt.Tx) error { + ls = listMeta(tx) + return nil + }) + + chRemove := make(chan *volumeMetadata, len(ls)) + var wg sync.WaitGroup + for _, meta := range ls { + wg.Add(1) + // this is potentially a very slow operation, so do it in a goroutine + go func(meta volumeMetadata) { + defer wg.Done() + + var v volume.Volume + var err error + if meta.Driver != "" { + v, err = lookupVolume(meta.Driver, meta.Name) + if err != nil && err != errNoSuchVolume { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") + return + } + if v == nil { + // doesn't exist in the driver, remove it from the db + chRemove <- &meta + return + } + } else { + v, err = s.getVolume(meta.Name) + if err != nil { + if err == errNoSuchVolume { + chRemove <- &meta + } + return + } + + meta.Driver = v.DriverName() + if err := s.setMeta(v.Name(), meta); err != nil { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore") + } + } + + // increment driver refcount + volumedrivers.CreateDriver(meta.Driver) + + // cache the volume + s.globalLock.Lock() + s.options[v.Name()] = meta.Options + s.labels[v.Name()] = meta.Labels + s.names[v.Name()] = v + s.globalLock.Unlock() + }(meta) + } + + wg.Wait() + close(chRemove) + s.db.Update(func(tx *bolt.Tx) error { + for meta := range chRemove { + if err := removeMeta(tx, meta.Name); err != nil { + logrus.WithField("volume", meta.Name).Warnf("Error removing stale entry from volume db: %v", err) + } + } + return nil + }) +} diff --git a/vendor/github.com/moby/moby/volume/store/store.go b/vendor/github.com/moby/moby/volume/store/store.go new file mode 100644 index 0000000..02c858a --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store.go @@ -0,0 +1,653 @@ +package store + +import ( + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +const ( + volumeDataDir = "volumes" +) + +type volumeWrapper struct { + volume.Volume + labels map[string]string + scope string + options map[string]string +} + +func (v volumeWrapper) Options() map[string]string { + options := map[string]string{} + for key, value := range v.options { + options[key] = value + } + return options +} + +func (v volumeWrapper) Labels() map[string]string { + return v.labels +} + +func (v volumeWrapper) Scope() string { + return v.scope +} + +func (v volumeWrapper) CachedPath() string { + if vv, ok := v.Volume.(interface { + CachedPath() string + }); ok { + return vv.CachedPath() + } + return v.Volume.Path() +} + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New(rootPath string) (*VolumeStore, error) { + vs := &VolumeStore{ + locks: &locker.Locker{}, + names: make(map[string]volume.Volume), + refs: make(map[string][]string), + labels: make(map[string]map[string]string), + options: make(map[string]map[string]string), + } + + if rootPath != "" { + // initialize metadata store + volPath := filepath.Join(rootPath, volumeDataDir) + if err := os.MkdirAll(volPath, 750); err != nil { + return nil, err + } + + dbPath := filepath.Join(volPath, "metadata.db") + + var err error + vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, errors.Wrap(err, "error while opening volume store metadata database") + } + + // initialize volumes bucket + if err := vs.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil { + return errors.Wrap(err, "error while setting up volume store metadata database") + } + return nil + }); err != nil { + return nil, err + } + } + + vs.restore() + + return vs, nil +} + +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + return v, exists +} + +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + s.globalLock.Lock() + s.names[v.Name()] = v + if len(ref) > 0 { + s.refs[v.Name()] = append(s.refs[v.Name()], ref) + } + s.globalLock.Unlock() +} + +// getRefs gets the list of refs for a given name +// Callers of this function are expected to hold the name lock. +func (s *VolumeStore) getRefs(name string) []string { + s.globalLock.RLock() + refs := s.refs[name] + s.globalLock.RUnlock() + return refs +} + +// Purge allows the cleanup of internal data on docker in case +// the internal data is out of sync with volumes driver plugins. +func (s *VolumeStore) Purge(name string) { + s.globalLock.Lock() + v, exists := s.names[name] + if exists { + if _, err := volumedrivers.RemoveDriver(v.DriverName()); err != nil { + logrus.Error("Error dereferencing volume driver: %v", err) + } + } + if err := s.removeMeta(name); err != nil { + logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) + } + delete(s.names, name) + delete(s.refs, name) + delete(s.labels, name) + delete(s.options, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store + // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. + locks *locker.Locker + // globalLock is used to protect access to mutable structures used by the store object + globalLock sync.RWMutex + // names stores the volume name -> volume relationship. + // This is used for making lookups faster so we don't have to probe all drivers + names map[string]volume.Volume + // refs stores the volume name and the list of things referencing it + refs map[string][]string + // labels stores volume labels for each volume + labels map[string]map[string]string + // options stores volume options for each volume + options map[string]map[string]string + db *bolt.DB +} + +// List proxies to all registered volume drivers to get the full list of volumes +// If a driver returns a volume that has name which conflicts with another volume from a different driver, +// the first volume is chosen and the conflicting volume is dropped. +func (s *VolumeStore) List() ([]volume.Volume, []string, error) { + vols, warnings, err := s.list() + if err != nil { + return nil, nil, &OpErr{Err: err, Op: "list"} + } + var out []volume.Volume + + for _, v := range vols { + name := normaliseVolumeName(v.Name()) + + s.locks.Lock(name) + storedV, exists := s.getNamed(name) + // Note: it's not safe to populate the cache here because the volume may have been + // deleted before we acquire a lock on its name + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + s.locks.Unlock(v.Name()) + continue + } + + out = append(out, v) + s.locks.Unlock(v.Name()) + } + return out, warnings, nil +} + +// list goes through each volume driver and asks for its list of volumes. +func (s *VolumeStore) list() ([]volume.Volume, []string, error) { + var ( + ls []volume.Volume + warnings []string + ) + + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, nil, err + } + + type vols struct { + vols []volume.Volume + err error + driverName string + } + chVols := make(chan vols, len(drivers)) + + for _, vd := range drivers { + go func(d volume.Driver) { + vs, err := d.List() + if err != nil { + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + return + } + for i, v := range vs { + s.globalLock.RLock() + vs[i] = volumeWrapper{v, s.labels[v.Name()], d.Scope(), s.options[v.Name()]} + s.globalLock.RUnlock() + } + + chVols <- vols{vols: vs} + }(vd) + } + + badDrivers := make(map[string]struct{}) + for i := 0; i < len(drivers); i++ { + vs := <-chVols + + if vs.err != nil { + warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} + logrus.Warn(vs.err) + } + ls = append(ls, vs.vols...) + } + + if len(badDrivers) > 0 { + s.globalLock.RLock() + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + s.globalLock.RUnlock() + } + return ls, warnings, nil +} + +// CreateWithRef creates a volume with the given name and driver and stores the ref +// This ensures there's no race between creating a volume and then storing a reference. +func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + + s.setNamed(v, ref) + return v, nil +} + +// Create creates a volume with the given name and driver. +// This is just like CreateWithRef() except we don't store a reference while holding the lock. +func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + return s.CreateWithRef(name, driverName, "", opts, labels) +} + +// checkConflict checks the local cache for name collisions with the passed in name, +// for existing volumes with the same name but in a different driver. +// This is used by `Create` as a best effort to prevent name collisions for volumes. +// If a matching volume is found that is not a conflict that is returned so the caller +// does not need to perform an additional lookup. +// When no matching volume is found, both returns will be nil +// +// Note: This does not probe all the drivers for name collisions because v1 plugins +// are very slow, particularly if the plugin is down, and cause other issues, +// particularly around locking the store. +// TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially +// use a connect timeout for this kind of check to ensure we aren't blocking for a +// long time. +func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, error) { + // check the local cache + v, _ := s.getNamed(name) + if v == nil { + return nil, nil + } + + vDriverName := v.DriverName() + var conflict bool + if driverName != "" { + // Retrieve canonical driver name to avoid inconsistencies (for example + // "plugin" vs. "plugin:latest") + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, err + } + + if vDriverName != vd.Name() { + conflict = true + } + } + + // let's check if the found volume ref + // is stale by checking with the driver if it still exists + exists, err := volumeExists(v) + if err != nil { + return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) + } + + if exists { + if conflict { + return nil, errors.Wrapf(errNameConflict, "driver '%s' already has volume '%s'", vDriverName, name) + } + return v, nil + } + + if len(s.getRefs(v.Name())) > 0 { + // Containers are referencing this volume but it doesn't seem to exist anywhere. + // Return a conflict error here, the user can fix this with `docker volume rm -f` + return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) + } + + // doesn't exist, so purge it from the cache + s.Purge(name) + return nil, nil +} + +// volumeExists returns if the volume is still present in the driver. +// An error is returned if there was an issue communicating with the driver. +func volumeExists(v volume.Volume) (bool, error) { + exists, err := lookupVolume(v.DriverName(), v.Name()) + if err != nil { + return false, err + } + return exists != nil, nil +} + +// create asks the given driver to create a volume with the name/opts. +// If a volume with the name is already known, it will ask the stored driver for the volume. +// If the passed in driver name does not match the driver name which is stored +// for the given volume name, an error is returned after checking if the reference is stale. +// If the reference is stale, it will be purged and this create can continue. +// It is expected that callers of this function hold any necessary locks. +func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} + } + + v, err := s.checkConflict(name, driverName) + if err != nil { + return nil, err + } + + if v != nil { + return v, nil + } + + // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name + if driverName == "" { + v, _ := s.getVolume(name) + if v != nil { + return v, nil + } + } + + vd, err := volumedrivers.CreateDriver(driverName) + + if err != nil { + return nil, &OpErr{Op: "create", Name: name, Err: err} + } + + logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + + if v, _ := vd.Get(name); v != nil { + return v, nil + } + v, err = vd.Create(name, opts) + if err != nil { + return nil, err + } + s.globalLock.Lock() + s.labels[name] = labels + s.options[name] = opts + s.globalLock.Unlock() + + metadata := volumeMetadata{ + Name: name, + Driver: vd.Name(), + Labels: labels, + Options: opts, + } + + if err := s.setMeta(name, metadata); err != nil { + return nil, err + } + return volumeWrapper{v, labels, vd.Scope(), opts}, nil +} + +// GetWithRef gets a volume with the given name from the passed in driver and stores the ref +// This is just like Get(), but we store the reference while holding the lock. +// This makes sure there are no races between checking for the existence of a volume and adding a reference for it +func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + v, err := vd.Get(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + s.setNamed(v, ref) + + s.globalLock.RLock() + defer s.globalLock.RUnlock() + return volumeWrapper{v, s.labels[name], vd.Scope(), s.options[name]}, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.getVolume(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + s.setNamed(v, "") + return v, nil +} + +// getVolume requests the volume, if the driver info is stored it just accesses that driver, +// if the driver is unknown it probes all drivers until it finds the first volume with that name. +// it is expected that callers of this function hold any necessary locks +func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { + var meta volumeMetadata + meta, err := s.getMeta(name) + if err != nil { + return nil, err + } + + driverName := meta.Driver + if driverName == "" { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + if exists { + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + } + } + + if meta.Driver != "" { + vol, err := lookupVolume(meta.Driver, name) + if err != nil { + return nil, err + } + if vol == nil { + s.Purge(name) + return nil, errNoSuchVolume + } + + var scope string + vd, err := volumedrivers.GetDriver(meta.Driver) + if err == nil { + scope = vd.Scope() + } + return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil + } + + logrus.Debugf("Probing all drivers for volume with name: %s", name) + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, err + } + + for _, d := range drivers { + v, err := d.Get(name) + if err != nil || v == nil { + continue + } + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil + } + return nil, errNoSuchVolume +} + +// lookupVolume gets the specified volume from the specified driver. +// This will only return errors related to communications with the driver. +// If the driver returns an error that is not communication related the +// error is logged but not returned. +// If the volume is not found it will return `nil, nil`` +func lookupVolume(driverName, volumeName string) (volume.Volume, error) { + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) + } + v, err := vd.Get(volumeName) + if err != nil { + err = errors.Cause(err) + if _, ok := err.(net.Error); ok { + if v != nil { + volumeName = v.Name() + driverName = v.DriverName() + } + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) + } + + // At this point, the error could be anything from the driver, such as "no such volume" + // Let's not check an error here, and instead check if the driver returned a volume + logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Warnf("Error while looking up volume") + } + return v, nil +} + +// Remove removes the requested volume. A volume is not removed if it has any refs +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + refs := s.getRefs(name) + if len(refs) > 0 { + return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs} + } + + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: v.DriverName(), Op: "remove"} + } + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vol := unwrapVolume(v) + if err := vd.Remove(vol); err != nil { + return &OpErr{Err: err, Name: name, Op: "remove"} + } + + s.Purge(name) + return nil +} + +// Dereference removes the specified reference to the volume +func (s *VolumeStore) Dereference(v volume.Volume, ref string) { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + var refs []string + + for _, r := range s.refs[v.Name()] { + if r != ref { + refs = append(refs, r) + } + } + s.refs[v.Name()] = refs +} + +// Refs gets the current list of refs for the given volume +func (s *VolumeStore) Refs(v volume.Volume) []string { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + refs := s.getRefs(v.Name()) + refsOut := make([]string, len(refs)) + copy(refsOut, refs) + return refsOut +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { + vd, err := volumedrivers.GetDriver(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + ls, err := vd.List() + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + for i, v := range ls { + options := map[string]string{} + s.globalLock.RLock() + for key, value := range s.options[v.Name()] { + options[key] = value + } + ls[i] = volumeWrapper{v, s.labels[v.Name()], vd.Scope(), options} + s.globalLock.RUnlock() + } + return ls, nil +} + +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { + return s.filter(vols, func(v volume.Volume) bool { + s.locks.Lock(v.Name()) + l := len(s.getRefs(v.Name())) + s.locks.Unlock(v.Name()) + if (used && l > 0) || (!used && l == 0) { + return true + } + return false + }) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { + var ls []volume.Volume + for _, v := range vols { + if f(v) { + ls = append(ls, v) + } + } + return ls +} + +func unwrapVolume(v volume.Volume) volume.Volume { + if vol, ok := v.(volumeWrapper); ok { + return vol.Volume + } + + return v +} + +// Shutdown releases all resources used by the volume store +// It does not make any changes to volumes, drivers, etc. +func (s *VolumeStore) Shutdown() error { + return s.db.Close() +} diff --git a/vendor/github.com/moby/moby/volume/store/store_test.go b/vendor/github.com/moby/moby/volume/store/store_test.go new file mode 100644 index 0000000..b52f720 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store_test.go @@ -0,0 +1,234 @@ +package store + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/volume/drivers" + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestCreate(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + defer volumedrivers.Unregister("fake") + dir, err := ioutil.TempDir("", "test-create") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + v, err := s.Create("fake1", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + if v.Name() != "fake1" { + t.Fatalf("Expected fake1 volume, got %v", v) + } + if l, _, _ := s.List(); len(l) != 1 { + t.Fatalf("Expected 1 volume in the store, got %v: %v", len(l), l) + } + + if _, err := s.Create("none", "none", nil, nil); err == nil { + t.Fatalf("Expected unknown driver error, got nil") + } + + _, err = s.Create("fakeerror", "fake", map[string]string{"error": "create error"}, nil) + expected := &OpErr{Op: "create", Name: "fakeerror", Err: errors.New("create error")} + if err != nil && err.Error() != expected.Error() { + t.Fatalf("Expected create fakeError: create error, got %v", err) + } +} + +func TestRemove(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-remove") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + // doing string compare here since this error comes directly from the driver + expected := "no such volume" + if err := s.Remove(volumetestutils.NoopVolume{}); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Expected error %q, got %v", expected, err) + } + + v, err := s.CreateWithRef("fake1", "fake", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + + if err := s.Remove(v); !IsInUse(err) { + t.Fatalf("Expected ErrVolumeInUse error, got %v", err) + } + s.Dereference(v, "fake") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } + if l, _, _ := s.List(); len(l) != 0 { + t.Fatalf("Expected 0 volumes in the store, got %v, %v", len(l), l) + } +} + +func TestList(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("fake2"), "fake2") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("fake2") + dir, err := ioutil.TempDir("", "test-list") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + if _, err := s.Create("test", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("test2", "fake2", nil, nil); err != nil { + t.Fatal(err) + } + + ls, _, err := s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } + if err := s.Shutdown(); err != nil { + t.Fatal(err) + } + + // and again with a new store + s, err = New(dir) + if err != nil { + t.Fatal(err) + } + ls, _, err = s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } +} + +func TestFilterByDriver(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-filter-driver") + if err != nil { + t.Fatal(err) + } + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.Create("fake1", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake3", "noop", nil, nil); err != nil { + t.Fatal(err) + } + + if l, _ := s.FilterByDriver("fake"); len(l) != 2 { + t.Fatalf("Expected 2 volumes, got %v, %v", len(l), l) + } + + if l, _ := s.FilterByDriver("noop"); len(l) != 1 { + t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) + } +} + +func TestFilterByUsed(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + dir, err := ioutil.TempDir("", "test-filter-used") + if err != nil { + t.Fatal(err) + } + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + + vols, _, err := s.List() + if err != nil { + t.Fatal(err) + } + + dangling := s.FilterByUsed(vols, false) + if len(dangling) != 1 { + t.Fatalf("expected 1 danging volume, got %v", len(dangling)) + } + if dangling[0].Name() != "fake2" { + t.Fatalf("expected danging volume fake2, got %s", dangling[0].Name()) + } + + used := s.FilterByUsed(vols, true) + if len(used) != 1 { + t.Fatalf("expected 1 used volume, got %v", len(used)) + } + if used[0].Name() != "fake1" { + t.Fatalf("expected used volume fake1, got %s", used[0].Name()) + } +} + +func TestDerefMultipleOfSameRef(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + dir, err := ioutil.TempDir("", "test-same-deref") + if err != nil { + t.Fatal(err) + } + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + v, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil) + if err != nil { + t.Fatal(err) + } + + if _, err := s.GetWithRef("fake1", "fake", "volReference"); err != nil { + t.Fatal(err) + } + + s.Dereference(v, "volReference") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/volume/store/store_unix.go b/vendor/github.com/moby/moby/volume/store/store_unix.go new file mode 100644 index 0000000..8ebc1f2 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd solaris + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/moby/moby/volume/store/store_windows.go b/vendor/github.com/moby/moby/volume/store/store_windows.go new file mode 100644 index 0000000..8601cdd --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store_windows.go @@ -0,0 +1,12 @@ +package store + +import "strings" + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. On Windows, as NTFS is case insensitive, under +// c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous. +// Hence we can't allow the volume "John" and "john" to be created as separate +// volumes. +func normaliseVolumeName(name string) string { + return strings.ToLower(name) +} diff --git a/vendor/github.com/moby/moby/volume/testutils/testutils.go b/vendor/github.com/moby/moby/volume/testutils/testutils.go new file mode 100644 index 0000000..2dbac02 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/testutils/testutils.go @@ -0,0 +1,116 @@ +package testutils + +import ( + "fmt" + + "github.com/docker/docker/volume" +) + +// NoopVolume is a volume that doesn't perform any operation +type NoopVolume struct{} + +// Name is the name of the volume +func (NoopVolume) Name() string { return "noop" } + +// DriverName is the name of the driver +func (NoopVolume) DriverName() string { return "noop" } + +// Path is the filesystem path to the volume +func (NoopVolume) Path() string { return "noop" } + +// Mount mounts the volume in the container +func (NoopVolume) Mount(_ string) (string, error) { return "noop", nil } + +// Unmount unmounts the volume from the container +func (NoopVolume) Unmount(_ string) error { return nil } + +// Status proivdes low-level details about the volume +func (NoopVolume) Status() map[string]interface{} { return nil } + +// FakeVolume is a fake volume with a random name +type FakeVolume struct { + name string + driverName string +} + +// NewFakeVolume creates a new fake volume for testing +func NewFakeVolume(name string, driverName string) volume.Volume { + return FakeVolume{name: name, driverName: driverName} +} + +// Name is the name of the volume +func (f FakeVolume) Name() string { return f.name } + +// DriverName is the name of the driver +func (f FakeVolume) DriverName() string { return f.driverName } + +// Path is the filesystem path to the volume +func (FakeVolume) Path() string { return "fake" } + +// Mount mounts the volume in the container +func (FakeVolume) Mount(_ string) (string, error) { return "fake", nil } + +// Unmount unmounts the volume from the container +func (FakeVolume) Unmount(_ string) error { return nil } + +// Status proivdes low-level details about the volume +func (FakeVolume) Status() map[string]interface{} { return nil } + +// FakeDriver is a driver that generates fake volumes +type FakeDriver struct { + name string + vols map[string]volume.Volume +} + +// NewFakeDriver creates a new FakeDriver with the specified name +func NewFakeDriver(name string) volume.Driver { + return &FakeDriver{ + name: name, + vols: make(map[string]volume.Volume), + } +} + +// Name is the name of the driver +func (d *FakeDriver) Name() string { return d.name } + +// Create initializes a fake volume. +// It returns an error if the options include an "error" key with a message +func (d *FakeDriver) Create(name string, opts map[string]string) (volume.Volume, error) { + if opts != nil && opts["error"] != "" { + return nil, fmt.Errorf(opts["error"]) + } + v := NewFakeVolume(name, d.name) + d.vols[name] = v + return v, nil +} + +// Remove deletes a volume. +func (d *FakeDriver) Remove(v volume.Volume) error { + if _, exists := d.vols[v.Name()]; !exists { + return fmt.Errorf("no such volume") + } + delete(d.vols, v.Name()) + return nil +} + +// List lists the volumes +func (d *FakeDriver) List() ([]volume.Volume, error) { + var vols []volume.Volume + for _, v := range d.vols { + vols = append(vols, v) + } + return vols, nil +} + +// Get gets the volume +func (d *FakeDriver) Get(name string) (volume.Volume, error) { + if v, exists := d.vols[name]; exists { + return v, nil + } + return nil, fmt.Errorf("no such volume") +} + +// Scope returns the local scope +func (*FakeDriver) Scope() string { + return "local" +} diff --git a/vendor/github.com/moby/moby/volume/validate.go b/vendor/github.com/moby/moby/volume/validate.go new file mode 100644 index 0000000..27a8c5d --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate.go @@ -0,0 +1,125 @@ +package volume + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/mount" +) + +var errBindNotExist = errors.New("bind source path does not exist") + +type validateOpts struct { + skipBindSourceCheck bool + skipAbsolutePathCheck bool +} + +func validateMountConfig(mnt *mount.Mount, options ...func(*validateOpts)) error { + opts := validateOpts{} + for _, o := range options { + o(&opts) + } + + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := validateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if !opts.skipAbsolutePathCheck { + if err := validateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(propagationModes) > 0 { + if _, ok := propagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := validateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + // Do not allow binding to non-existent path + if !opts.skipBindSourceCheck { + fi, err := os.Stat(mnt.Source) + if err != nil { + if !os.IsNotExist(err) { + return &errMountConfig{mnt, err} + } + return &errMountConfig{mnt, errBindNotExist} + } + if err := validateStat(fi); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if valid, err := IsVolumeNameValid(mnt.Source); !valid { + if err == nil { + err = errors.New("invalid volume name") + } + return &errMountConfig{mnt, err} + } + } + case mount.TypeTmpfs: + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errExtraField(name string) error { + return fmt.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return fmt.Errorf("field %s must not be empty", name) +} + +func validateAbsolute(p string) error { + p = convertSlash(p) + if filepath.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} diff --git a/vendor/github.com/moby/moby/volume/validate_test.go b/vendor/github.com/moby/moby/volume/validate_test.go new file mode 100644 index 0000000..8732500 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate_test.go @@ -0,0 +1,43 @@ +package volume + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestValidateMount(t *testing.T) { + testDir, err := ioutil.TempDir("", "test-validate-mount") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []struct { + input mount.Mount + expected error + }{ + {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath, Source: "hello"}, nil}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, nil}, + {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, + {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, + {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, + } + for i, x := range cases { + err := validateMountConfig(&x.input) + if err == nil && x.expected == nil { + continue + } + if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { + t.Fatalf("expected %q, got %q, case: %d", x.expected, err, i) + } + } +} diff --git a/vendor/github.com/moby/moby/volume/validate_test_unix.go b/vendor/github.com/moby/moby/volume/validate_test_unix.go new file mode 100644 index 0000000..dd1de2f --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate_test_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +var ( + testDestinationPath = "/foo" + testSourcePath = "/foo" +) diff --git a/vendor/github.com/moby/moby/volume/validate_test_windows.go b/vendor/github.com/moby/moby/volume/validate_test_windows.go new file mode 100644 index 0000000..d5f86ac --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate_test_windows.go @@ -0,0 +1,6 @@ +package volume + +var ( + testDestinationPath = `c:\foo` + testSourcePath = `c:\foo` +) diff --git a/vendor/github.com/moby/moby/volume/volume.go b/vendor/github.com/moby/moby/volume/volume.go new file mode 100644 index 0000000..a941d8f --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume.go @@ -0,0 +1,333 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/pkg/errors" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName = "local" + +// Scopes define if a volume has is cluster-wide (global) or local only. +// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume +const ( + LocalScope = "local" + GlobalScope = "global" +) + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) + // Scope returns the scope of the driver (e.g. `global` or `local`). + // Scope determines how the driver is handled at a cluster level + Scope() string +} + +// Capability defines a set of capabilities that a driver is able to handle. +type Capability struct { + // Scope is the scope of the driver, `global` or `local` + // A `global` scope indicates that the driver manages volumes across the cluster + // A `local` scope indicates that the driver only manages volumes resources local to the host + // Scope is declared by the driver + Scope string +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount(id string) (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount(id string) error + // Status returns low-level status information about a volume + Status() map[string]interface{} +} + +// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) +type DetailedVolume interface { + Labels() map[string]string + Options() map[string]string + Scope() string + Volume +} + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +func (m *MountPoint) Setup(mountLabel string, rootUID, rootGID int) (path string, err error) { + defer func() { + if err == nil { + if label.RelabelNeeded(m.Mode) { + if err = label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)); err != nil { + path = "" + err = errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) + return + } + } + } + return + }() + + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + m.ID = id + return path, nil + } + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory), + if m.Type == mounttypes.TypeBind { + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllNewAs(m.Source, 0755, rootUID, rootGID); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +// ParseVolumesFrom ensures that the supplied volumes-from is valid. +func ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !ValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if HasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +// ParseMountRaw parses a raw volume spec (e.g. `-v /foo:/bar:shared`) into a +// structured spec. Once the raw spec is parsed it relies on `ParseMountSpec` to +// validate the spec and create a MountPoint +func ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := splitRawSpec(convertSlash(raw)) + if err != nil { + return nil, err + } + + var spec mounttypes.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if ValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. eg /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !ValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if filepath.IsAbs(spec.Source) { + spec.Type = mounttypes.TypeBind + } else { + spec.Type = mounttypes.TypeVolume + } + + spec.ReadOnly = !ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mounttypes.TypeVolume { + spec.VolumeOptions = &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mounttypes.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if HasPropagation(mode) { + spec.BindOptions = &mounttypes.BindOptions{ + Propagation: GetPropagation(mode), + } + } + + mp, err := ParseMountSpec(spec, platformRawValidationOpts...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +// ParseMountSpec reads a mount config, validates it, and configures a mountpoint from it. +func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*MountPoint, error) { + if err := validateMountConfig(&cfg, options...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: clean(convertSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mounttypes.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = DefaultCopyMode + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mounttypes.TypeBind: + mp.Source = clean(convertSlash(cfg.Source)) + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = DefaultPropagationMode + } + case mounttypes.TypeTmpfs: + // NOP + } + return mp, nil +} + +func errInvalidMode(mode string) error { + return fmt.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return fmt.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/moby/moby/volume/volume_copy.go b/vendor/github.com/moby/moby/volume/volume_copy.go new file mode 100644 index 0000000..77f06a0 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_copy.go @@ -0,0 +1,23 @@ +package volume + +import "strings" + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return DefaultCopyMode, false +} diff --git a/vendor/github.com/moby/moby/volume/volume_copy_unix.go b/vendor/github.com/moby/moby/volume/volume_copy_unix.go new file mode 100644 index 0000000..ad66e17 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_copy_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = true +) diff --git a/vendor/github.com/moby/moby/volume/volume_copy_windows.go b/vendor/github.com/moby/moby/volume/volume_copy_windows.go new file mode 100644 index 0000000..798638c --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_copy_windows.go @@ -0,0 +1,6 @@ +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = false +) diff --git a/vendor/github.com/moby/moby/volume/volume_linux.go b/vendor/github.com/moby/moby/volume/volume_linux.go new file mode 100644 index 0000000..d4b4d80 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_linux.go @@ -0,0 +1,56 @@ +// +build linux + +package volume + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returing the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} diff --git a/vendor/github.com/moby/moby/volume/volume_linux_test.go b/vendor/github.com/moby/moby/volume/volume_linux_test.go new file mode 100644 index 0000000..40ce552 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_linux_test.go @@ -0,0 +1,51 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +func TestConvertTmpfsOptions(t *testing.T) { + type testCase struct { + opt mounttypes.TmpfsOptions + readOnly bool + expectedSubstrings []string + unexpectedSubstrings []string + } + cases := []testCase{ + { + opt: mounttypes.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, + readOnly: false, + expectedSubstrings: []string{"size=1m", "mode=700"}, + unexpectedSubstrings: []string{"ro"}, + }, + { + opt: mounttypes.TmpfsOptions{}, + readOnly: true, + expectedSubstrings: []string{"ro"}, + unexpectedSubstrings: []string{}, + }, + } + for _, c := range cases { + data, err := ConvertTmpfsOptions(&c.opt, c.readOnly) + if err != nil { + t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", + c.opt, c.readOnly, err) + } + t.Logf("data=%q", data) + for _, s := range c.expectedSubstrings { + if !strings.Contains(data, s) { + t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) + } + } + for _, s := range c.unexpectedSubstrings { + if strings.Contains(data, s) { + t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) + } + } + } +} diff --git a/vendor/github.com/moby/moby/volume/volume_propagation_linux.go b/vendor/github.com/moby/moby/volume/volume_propagation_linux.go new file mode 100644 index 0000000..1de57ab --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_propagation_linux.go @@ -0,0 +1,47 @@ +// +build linux + +package volume + +import ( + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// DefaultPropagationMode defines what propagation mode should be used by +// default if user has not specified one explicitly. +// propagation modes +const DefaultPropagationMode = mounttypes.PropagationRPrivate + +var propagationModes = map[mounttypes.Propagation]bool{ + mounttypes.PropagationPrivate: true, + mounttypes.PropagationRPrivate: true, + mounttypes.PropagationSlave: true, + mounttypes.PropagationRSlave: true, + mounttypes.PropagationShared: true, + mounttypes.PropagationRShared: true, +} + +// GetPropagation extracts and returns the mount propagation mode. If there +// are no specifications, then by default it is "private". +func GetPropagation(mode string) mounttypes.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mounttypes.Propagation(o) + if propagationModes[prop] { + return prop + } + } + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if propagationModes[mounttypes.Propagation(o)] { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go b/vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go new file mode 100644 index 0000000..46d0265 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go @@ -0,0 +1,65 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" +) + +func TestParseMountRawPropagation(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + valid = []string{ + "/hostPath:/containerPath:shared", + "/hostPath:/containerPath:rshared", + "/hostPath:/containerPath:slave", + "/hostPath:/containerPath:rslave", + "/hostPath:/containerPath:private", + "/hostPath:/containerPath:rprivate", + "/hostPath:/containerPath:ro,shared", + "/hostPath:/containerPath:ro,slave", + "/hostPath:/containerPath:ro,private", + "/hostPath:/containerPath:ro,z,shared", + "/hostPath:/containerPath:ro,Z,slave", + "/hostPath:/containerPath:Z,ro,slave", + "/hostPath:/containerPath:slave,Z,ro", + "/hostPath:/containerPath:Z,slave,ro", + "/hostPath:/containerPath:slave,ro,Z", + "/hostPath:/containerPath:rslave,ro,Z", + "/hostPath:/containerPath:ro,rshared,Z", + "/hostPath:/containerPath:ro,Z,rprivate", + } + invalid = map[string]string{ + "/path:/path:ro,rshared,rslave": `invalid mode`, + "/path:/path:ro,z,rshared,rslave": `invalid mode`, + "/path:shared": "invalid volume specification", + "/path:slave": "invalid volume specification", + "/path:private": "invalid volume specification", + "name:/absolute-path:shared": "invalid volume specification", + "name:/absolute-path:rshared": "invalid volume specification", + "name:/absolute-path:slave": "invalid volume specification", + "name:/absolute-path:rslave": "invalid volume specification", + "name:/absolute-path:private": "invalid volume specification", + "name:/absolute-path:rprivate": "invalid volume specification", + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err %v", path, err) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} diff --git a/vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go b/vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go new file mode 100644 index 0000000..7311ffc --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux + +package volume + +import mounttypes "github.com/docker/docker/api/types/mount" + +// DefaultPropagationMode is used only in linux. In other cases it returns +// empty string. +const DefaultPropagationMode mounttypes.Propagation = "" + +// propagation modes not supported on this platform. +var propagationModes = map[mounttypes.Propagation]bool{} + +// GetPropagation is not supported. Return empty string. +func GetPropagation(mode string) mounttypes.Propagation { + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + return false +} diff --git a/vendor/github.com/moby/moby/volume/volume_test.go b/vendor/github.com/moby/moby/volume/volume_test.go new file mode 100644 index 0000000..18018f4 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_test.go @@ -0,0 +1,269 @@ +package volume + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestParseMountRaw(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + if runtime.GOOS == "windows" { + valid = []string{ + `d:\`, + `d:`, + `d:\path`, + `d:\path with space`, + // TODO Windows post TP5 - readonly support `d:\pathandmode:ro`, + `c:\:d:\`, + `c:\windows\:d:`, + `c:\windows:d:\s p a c e`, + `c:\windows:d:\s p a c e:RW`, + `c:\program files:d:\s p a c e i n h o s t d i r`, + `0123456789name:d:`, + `MiXeDcAsEnAmE:d:`, + `name:D:`, + `name:D::rW`, + `name:D::RW`, + // TODO Windows post TP5 - readonly support `name:D::RO`, + `c:/:d:/forward/slashes/are/good/too`, + // TODO Windows post TP5 - readonly support `c:/:d:/including with/spaces:ro`, + `c:\Windows`, // With capital + `c:\Program Files (x86)`, // With capitals and brackets + } + invalid = map[string]string{ + ``: "invalid volume specification: ", + `.`: "invalid volume specification: ", + `..\`: "invalid volume specification: ", + `c:\:..\`: "invalid volume specification: ", + `c:\:d:\:xyzzy`: "invalid volume specification: ", + `c:`: "cannot be `c:`", + `c:\`: "cannot be `c:`", + `c:\notexist:d:`: `source path does not exist`, + `c:\windows\system32\ntdll.dll:d:`: `source path must be a directory`, + `name<:d:`: `invalid volume specification`, + `name>:d:`: `invalid volume specification`, + `name::d:`: `invalid volume specification`, + `name":d:`: `invalid volume specification`, + `name\:d:`: `invalid volume specification`, + `name*:d:`: `invalid volume specification`, + `name|:d:`: `invalid volume specification`, + `name?:d:`: `invalid volume specification`, + `name/:d:`: `invalid volume specification`, + `d:\pathandmode:rw`: `invalid volume specification`, + `con:d:`: `cannot be a reserved word for Windows filenames`, + `PRN:d:`: `cannot be a reserved word for Windows filenames`, + `aUx:d:`: `cannot be a reserved word for Windows filenames`, + `nul:d:`: `cannot be a reserved word for Windows filenames`, + `com1:d:`: `cannot be a reserved word for Windows filenames`, + `com2:d:`: `cannot be a reserved word for Windows filenames`, + `com3:d:`: `cannot be a reserved word for Windows filenames`, + `com4:d:`: `cannot be a reserved word for Windows filenames`, + `com5:d:`: `cannot be a reserved word for Windows filenames`, + `com6:d:`: `cannot be a reserved word for Windows filenames`, + `com7:d:`: `cannot be a reserved word for Windows filenames`, + `com8:d:`: `cannot be a reserved word for Windows filenames`, + `com9:d:`: `cannot be a reserved word for Windows filenames`, + `lpt1:d:`: `cannot be a reserved word for Windows filenames`, + `lpt2:d:`: `cannot be a reserved word for Windows filenames`, + `lpt3:d:`: `cannot be a reserved word for Windows filenames`, + `lpt4:d:`: `cannot be a reserved word for Windows filenames`, + `lpt5:d:`: `cannot be a reserved word for Windows filenames`, + `lpt6:d:`: `cannot be a reserved word for Windows filenames`, + `lpt7:d:`: `cannot be a reserved word for Windows filenames`, + `lpt8:d:`: `cannot be a reserved word for Windows filenames`, + `lpt9:d:`: `cannot be a reserved word for Windows filenames`, + `c:\windows\system32\ntdll.dll`: `Only directories can be mapped on this platform`, + } + + } else { + valid = []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + } + invalid = map[string]string{ + "": "invalid volume specification", + "./": "mount path must be absolute", + "../": "mount path must be absolute", + "/:../": "mount path must be absolute", + "/:path": "mount path must be absolute", + ":": "invalid volume specification", + "/tmp:": "invalid volume specification", + ":test": "invalid volume specification", + ":/test": "invalid volume specification", + "tmp:": "invalid volume specification", + ":test:": "invalid volume specification", + "::": "invalid volume specification", + ":::": "invalid volume specification", + "/tmp:::": "invalid volume specification", + ":/tmp::": "invalid volume specification", + "/path:rw": "invalid volume specification", + "/path:ro": "invalid volume specification", + "/rw:rw": "invalid volume specification", + "path:ro": "invalid volume specification", + "/path:/path:sw": `invalid mode`, + "/path:/path:rwz": `invalid mode`, + } + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if mp, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} + +// testParseMountRaw is a structure used by TestParseMountRawSplit for +// specifying test cases for the ParseMountRaw() function. +type testParseMountRaw struct { + bind string + driver string + expDest string + expSource string + expName string + expDriver string + expRW bool + fail bool +} + +func TestParseMountRawSplit(t *testing.T) { + var cases []testParseMountRaw + if runtime.GOOS == "windows" { + cases = []testParseMountRaw{ + {`c:\:d:`, "local", `d:`, `c:\`, ``, "", true, false}, + {`c:\:d:\`, "local", `d:\`, `c:\`, ``, "", true, false}, + // TODO Windows post TP5 - Add readonly support {`c:\:d:\:ro`, "local", `d:\`, `c:\`, ``, "", false, false}, + {`c:\:d:\:rw`, "local", `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:foo`, "local", `d:\`, `c:\`, ``, "", false, true}, + {`name:d::rw`, "local", `d:`, ``, `name`, "local", true, false}, + {`name:d:`, "local", `d:`, ``, `name`, "local", true, false}, + // TODO Windows post TP5 - Add readonly support {`name:d::ro`, "local", `d:`, ``, `name`, "local", false, false}, + {`name:c:`, "", ``, ``, ``, "", true, true}, + {`driver/name:c:`, "", ``, ``, ``, "", true, true}, + } + } else { + cases = []testParseMountRaw{ + {"/tmp:/tmp1", "", "/tmp1", "/tmp", "", "", true, false}, + {"/tmp:/tmp2:ro", "", "/tmp2", "/tmp", "", "", false, false}, + {"/tmp:/tmp3:rw", "", "/tmp3", "/tmp", "", "", true, false}, + {"/tmp:/tmp4:foo", "", "", "", "", "", false, true}, + {"name:/named1", "", "/named1", "", "name", "", true, false}, + {"name:/named2", "external", "/named2", "", "name", "external", true, false}, + {"name:/named3:ro", "local", "/named3", "", "name", "local", false, false}, + {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "", true, false}, + {"/tmp:tmp", "", "", "", "", "", true, true}, + } + } + + for i, c := range cases { + t.Logf("case %d", i) + m, err := ParseMountRaw(c.bind, c.driver) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) + } + continue + } + + if m == nil || err != nil { + t.Fatalf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) + continue + } + + if m.Destination != c.expDest { + t.Fatalf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind) + } + + if m.Source != c.expSource { + t.Fatalf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) + } + + if m.Name != c.expName { + t.Fatalf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) + } + + if m.Driver != c.expDriver { + t.Fatalf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) + } + + if m.RW != c.expRW { + t.Fatalf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) + } + } +} + +func TestParseMountSpec(t *testing.T) { + type c struct { + input mount.Mount + expected MountPoint + } + testDir, err := ioutil.TempDir("", "test-mount-config") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []c{ + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + } + + for i, c := range cases { + t.Logf("case %d", i) + mp, err := ParseMountSpec(c.input) + if err != nil { + t.Fatal(err) + } + + if c.expected.Type != mp.Type { + t.Fatalf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) + } + if c.expected.Destination != mp.Destination { + t.Fatalf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) + } + if c.expected.Source != mp.Source { + t.Fatalf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) + } + if c.expected.RW != mp.RW { + t.Fatalf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) + } + if c.expected.Propagation != mp.Propagation { + t.Fatalf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) + } + if c.expected.Driver != mp.Driver { + t.Fatalf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) + } + if c.expected.CopyData != mp.CopyData { + t.Fatalf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) + } + } +} diff --git a/vendor/github.com/moby/moby/volume/volume_unix.go b/vendor/github.com/moby/moby/volume/volume_unix.go new file mode 100644 index 0000000..0256ebb --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_unix.go @@ -0,0 +1,138 @@ +// +build linux freebsd darwin solaris + +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +var platformRawValidationOpts = []func(o *validateOpts){ + // need to make sure to not error out if the bind source does not exist on unix + // this is supported for historical reasons, the path will be automatically + // created later. + func(o *validateOpts) { o.skipBindSourceCheck = true }, +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var labelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *MountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +// HasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *MountPoint) HasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case labelModes[o]: + labelModeCount++ + case propagationModes[mounttypes.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 { + return false + } + return true +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +// If there are no specifications w.r.t read write mode, then by default +// it returns true. +func ReadWrite(mode string) bool { + if !ValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func validateNotRoot(p string) error { + p = filepath.Clean(convertSlash(p)) + if p == "/" { + return fmt.Errorf("invalid specification: destination can't be '/'") + } + return nil +} + +func validateCopyMode(mode bool) error { + return nil +} + +func convertSlash(p string) string { + return filepath.ToSlash(p) +} + +func splitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func clean(p string) string { + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/moby/moby/volume/volume_unsupported.go b/vendor/github.com/moby/moby/volume/volume_unsupported.go new file mode 100644 index 0000000..ff9d6af --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package volume + +import ( + "fmt" + "runtime" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} diff --git a/vendor/github.com/moby/moby/volume/volume_windows.go b/vendor/github.com/moby/moby/volume/volume_windows.go new file mode 100644 index 0000000..22f6fc7 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_windows.go @@ -0,0 +1,201 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, +} + +var platformRawValidationOpts = []func(*validateOpts){ + // filepath.IsAbs is weird on Windows: + // `c:` is not considered an absolute path + // `c:\` is considered an absolute path + // In any case, the regex matching below ensures absolute paths + // TODO: consider this a bug with filepath.IsAbs (?) + func(o *validateOpts) { o.skipAbsolutePathCheck = true }, +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // RXHostDir is the first option of a source + RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` + // RXName is the second option of a source + RXName = `[^\\/:*?"<>|\r\n]+` + // RXReservedNames are reserved names not possible on Windows + RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // RXSource is the combined possibilities for a source + RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // RXDestination is the regex expression for the mount destination + RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // RXMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + RXMode = `(:(?P(?i)ro|rw))?` +) + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Windows volumes are never backwards compatible. +func (m *MountPoint) BackwardsCompatible() bool { + return false +} + +func splitRawSpec(raw string) ([]string, error) { + specExp := regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + validName, err := IsVolumeNameValid(matchgroups["destination"]) + if err != nil { + return nil, err + } + if !validName { + if fi, err := os.Stat(matchgroups["destination"]); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + } + } + } + } + return split, nil +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + nameExp := regexp.MustCompile(`^` + RXName + `$`) + if !nameExp.MatchString(name) { + return false, nil + } + nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) + if nameExp.MatchString(name) { + return false, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +func ReadWrite(mode string) bool { + return rwModes[strings.ToLower(mode)] || mode == "" +} + +func validateNotRoot(p string) error { + p = strings.ToLower(convertSlash(p)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +func validateCopyMode(mode bool) error { + if mode { + return fmt.Errorf("Windows does not support copying image path content") + } + return nil +} + +func convertSlash(p string) string { + return filepath.FromSlash(p) +} + +func clean(p string) string { + if match, _ := regexp.MatchString("^[a-z]:$", p); match { + return p + } + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + return nil +} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 0000000..588ceca --- /dev/null +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.4.3 + - 1.5.4 + - 1.6.2 + - 1.7.1 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000..835ba3e --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 0000000..273db3c --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 0000000..a932ead --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/bench_test.go b/vendor/github.com/pkg/errors/bench_test.go new file mode 100644 index 0000000..0416a3c --- /dev/null +++ b/vendor/github.com/pkg/errors/bench_test.go @@ -0,0 +1,59 @@ +// +build go1.7 + +package errors + +import ( + "fmt" + "testing" + + stderrors "errors" +) + +func noErrors(at, depth int) error { + if at >= depth { + return stderrors.New("no error") + } + return noErrors(at+1, depth) +} +func yesErrors(at, depth int) error { + if at >= depth { + return New("ye error") + } + return yesErrors(at+1, depth) +} + +func BenchmarkErrors(b *testing.B) { + var toperr error + type run struct { + stack int + std bool + } + runs := []run{ + {10, false}, + {10, true}, + {100, false}, + {100, true}, + {1000, false}, + {1000, true}, + } + for _, r := range runs { + part := "pkg/errors" + if r.std { + part = "errors" + } + name := fmt.Sprintf("%s-stack-%d", part, r.stack) + b.Run(name, func(b *testing.B) { + var err error + f := yesErrors + if r.std { + f = noErrors + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err = f(0, r.stack) + } + b.StopTimer() + toperr = err + }) + } +} diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000..842ee80 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/errors_test.go b/vendor/github.com/pkg/errors/errors_test.go new file mode 100644 index 0000000..1d8c635 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors_test.go @@ -0,0 +1,226 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "reflect" + "testing" +) + +func TestNew(t *testing.T) { + tests := []struct { + err string + want error + }{ + {"", fmt.Errorf("")}, + {"foo", fmt.Errorf("foo")}, + {"foo", New("foo")}, + {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, + } + + for _, tt := range tests { + got := New(tt.err) + if got.Error() != tt.want.Error() { + t.Errorf("New.Error(): got: %q, want %q", got, tt.want) + } + } +} + +func TestWrapNil(t *testing.T) { + got := Wrap(nil, "no error") + if got != nil { + t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrap(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := Wrap(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +type nilError struct{} + +func (nilError) Error() string { return "nil error" } + +func TestCause(t *testing.T) { + x := New("error") + tests := []struct { + err error + want error + }{{ + // nil error is nil + err: nil, + want: nil, + }, { + // explicit nil error is nil + err: (error)(nil), + want: nil, + }, { + // typed nil is nil + err: (*nilError)(nil), + want: (*nilError)(nil), + }, { + // uncaused error is unaffected + err: io.EOF, + want: io.EOF, + }, { + // caused error returns cause + err: Wrap(io.EOF, "ignored"), + want: io.EOF, + }, { + err: x, // return from errors.New + want: x, + }, { + WithMessage(nil, "whoops"), + nil, + }, { + WithMessage(io.EOF, "whoops"), + io.EOF, + }, { + WithStack(nil), + nil, + }, { + WithStack(io.EOF), + io.EOF, + }} + + for i, tt := range tests { + got := Cause(tt.err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) + } + } +} + +func TestWrapfNil(t *testing.T) { + got := Wrapf(nil, "no error") + if got != nil { + t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrapf(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, + {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, + } + + for _, tt := range tests { + got := Wrapf(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +func TestErrorf(t *testing.T) { + tests := []struct { + err error + want string + }{ + {Errorf("read error without format specifiers"), "read error without format specifiers"}, + {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, + } + + for _, tt := range tests { + got := tt.err.Error() + if got != tt.want { + t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) + } + } +} + +func TestWithStackNil(t *testing.T) { + got := WithStack(nil) + if got != nil { + t.Errorf("WithStack(nil): got %#v, expected nil", got) + } +} + +func TestWithStack(t *testing.T) { + tests := []struct { + err error + want string + }{ + {io.EOF, "EOF"}, + {WithStack(io.EOF), "EOF"}, + } + + for _, tt := range tests { + got := WithStack(tt.err).Error() + if got != tt.want { + t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) + } + } +} + +func TestWithMessageNil(t *testing.T) { + got := WithMessage(nil, "no error") + if got != nil { + t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWithMessage(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := WithMessage(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) + } + } + +} + +// errors.New, etc values are not expected to be compared by value +// but the change in errors#27 made them incomparable. Assert that +// various kinds of errors have a functional equality operator, even +// if the result of that equality is always false. +func TestErrorEquality(t *testing.T) { + vals := []error{ + nil, + io.EOF, + errors.New("EOF"), + New("EOF"), + Errorf("EOF"), + Wrap(io.EOF, "EOF"), + Wrapf(io.EOF, "EOF%d", 2), + WithMessage(nil, "whoops"), + WithMessage(io.EOF, "whoops"), + WithStack(io.EOF), + WithStack(nil), + } + + for i := range vals { + for j := range vals { + _ = vals[i] == vals[j] // mustn't panic + } + } +} diff --git a/vendor/github.com/pkg/errors/example_test.go b/vendor/github.com/pkg/errors/example_test.go new file mode 100644 index 0000000..c1fc13e --- /dev/null +++ b/vendor/github.com/pkg/errors/example_test.go @@ -0,0 +1,205 @@ +package errors_test + +import ( + "fmt" + + "github.com/pkg/errors" +) + +func ExampleNew() { + err := errors.New("whoops") + fmt.Println(err) + + // Output: whoops +} + +func ExampleNew_printf() { + err := errors.New("whoops") + fmt.Printf("%+v", err) + + // Example output: + // whoops + // github.com/pkg/errors_test.ExampleNew_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:17 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func ExampleWithMessage() { + cause := errors.New("whoops") + err := errors.WithMessage(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func ExampleWithStack() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Println(err) + + // Output: whoops +} + +func ExampleWithStack_printf() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Printf("%+v", err) + + // Example Output: + // whoops + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 +} + +func ExampleWrap() { + cause := errors.New("whoops") + err := errors.Wrap(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func fn() error { + e1 := errors.New("error") + e2 := errors.Wrap(e1, "inner") + e3 := errors.Wrap(e2, "middle") + return errors.Wrap(e3, "outer") +} + +func ExampleCause() { + err := fn() + fmt.Println(err) + fmt.Println(errors.Cause(err)) + + // Output: outer: middle: inner: error + // error +} + +func ExampleWrap_extended() { + err := fn() + fmt.Printf("%+v\n", err) + + // Example output: + // error + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.ExampleCause_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:63 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:104 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer +} + +func ExampleWrapf() { + cause := errors.New("whoops") + err := errors.Wrapf(cause, "oh noes #%d", 2) + fmt.Println(err) + + // Output: oh noes #2: whoops +} + +func ExampleErrorf_extended() { + err := errors.Errorf("whoops: %s", "foo") + fmt.Printf("%+v", err) + + // Example output: + // whoops: foo + // github.com/pkg/errors_test.ExampleErrorf + // /home/dfc/src/github.com/pkg/errors/example_test.go:101 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:102 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func Example_stackTrace() { + type stackTracer interface { + StackTrace() errors.StackTrace + } + + err, ok := errors.Cause(fn()).(stackTracer) + if !ok { + panic("oops, err does not implement stackTracer") + } + + st := err.StackTrace() + fmt.Printf("%+v", st[0:2]) // top two frames + + // Example output: + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.Example_stackTrace + // /home/dfc/src/github.com/pkg/errors/example_test.go:127 +} + +func ExampleCause_printf() { + err := errors.Wrap(func() error { + return func() error { + return errors.Errorf("hello %s", fmt.Sprintf("world")) + }() + }(), "failed") + + fmt.Printf("%v", err) + + // Output: failed: hello world +} diff --git a/vendor/github.com/pkg/errors/format_test.go b/vendor/github.com/pkg/errors/format_test.go new file mode 100644 index 0000000..15fd7d8 --- /dev/null +++ b/vendor/github.com/pkg/errors/format_test.go @@ -0,0 +1,535 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "regexp" + "strings" + "testing" +) + +func TestFormatNew(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + New("error"), + "%s", + "error", + }, { + New("error"), + "%v", + "error", + }, { + New("error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatNew\n" + + "\t.+/github.com/pkg/errors/format_test.go:26", + }, { + New("error"), + "%q", + `"error"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatErrorf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Errorf("%s", "error"), + "%s", + "error", + }, { + Errorf("%s", "error"), + "%v", + "error", + }, { + Errorf("%s", "error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatErrorf\n" + + "\t.+/github.com/pkg/errors/format_test.go:56", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrap(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrap(New("error"), "error2"), + "%s", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%v", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:82", + }, { + Wrap(io.EOF, "error"), + "%s", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%v", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%+v", + "EOF\n" + + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:96", + }, { + Wrap(Wrap(io.EOF, "error1"), "error2"), + "%+v", + "EOF\n" + + "error1\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:103\n", + }, { + Wrap(New("error with space"), "context"), + "%q", + `"context: error with space"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrapf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrapf(io.EOF, "error%d", 2), + "%s", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%v", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%+v", + "EOF\n" + + "error2\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:134", + }, { + Wrapf(New("error"), "error%d", 2), + "%s", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%v", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:149", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWithStack(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithStack(io.EOF), + "%s", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%v", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:175"}, + }, { + WithStack(New("error")), + "%s", + []string{"error"}, + }, { + WithStack(New("error")), + "%v", + []string{"error"}, + }, { + WithStack(New("error")), + "%+v", + []string{"error", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189"}, + }, { + WithStack(WithStack(io.EOF)), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197"}, + }, { + WithStack(WithStack(Wrapf(io.EOF, "message"))), + "%+v", + []string{"EOF", + "message", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205"}, + }, { + WithStack(Errorf("error%d", 1)), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatWithMessage(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithMessage(New("error"), "error2"), + "%s", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%v", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%+v", + []string{ + "error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:244", + "error2"}, + }, { + WithMessage(io.EOF, "addition1"), + "%s", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%v", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%+v", + []string{"EOF", "addition1"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%v", + []string{"addition2: addition1: EOF"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%+v", + []string{"EOF", "addition1", "addition2"}, + }, { + Wrap(WithMessage(io.EOF, "error1"), "error2"), + "%+v", + []string{"EOF", "error1", "error2", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:272"}, + }, { + WithMessage(Errorf("error%d", 1), "error2"), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:278", + "error2"}, + }, { + WithMessage(WithStack(io.EOF), "error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:285", + "error"}, + }, { + WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "inside-error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "outside-error"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatGeneric(t *testing.T) { + starts := []struct { + err error + want []string + }{ + {New("new-error"), []string{ + "new-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:315"}, + }, {Errorf("errorf-error"), []string{ + "errorf-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:319"}, + }, {errors.New("errors-new-error"), []string{ + "errors-new-error"}, + }, + } + + wrappers := []wrapper{ + { + func(err error) error { return WithMessage(err, "with-message") }, + []string{"with-message"}, + }, { + func(err error) error { return WithStack(err) }, + []string{ + "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + + ".+/github.com/pkg/errors/format_test.go:333", + }, + }, { + func(err error) error { return Wrap(err, "wrap-error") }, + []string{ + "wrap-error", + "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + + ".+/github.com/pkg/errors/format_test.go:339", + }, + }, { + func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, + []string{ + "wrapf-error1", + "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + + ".+/github.com/pkg/errors/format_test.go:346", + }, + }, + } + + for s := range starts { + err := starts[s].err + want := starts[s].want + testFormatCompleteCompare(t, s, err, "%+v", want, false) + testGenericRecursive(t, err, want, wrappers, 3) + } +} + +func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { + got := fmt.Sprintf(format, arg) + gotLines := strings.SplitN(got, "\n", -1) + wantLines := strings.SplitN(want, "\n", -1) + + if len(wantLines) > len(gotLines) { + t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) + return + } + + for i, w := range wantLines { + match, err := regexp.MatchString(w, gotLines[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) + } + } +} + +var stackLineR = regexp.MustCompile(`\.`) + +// parseBlocks parses input into a slice, where: +// - incase entry contains a newline, its a stacktrace +// - incase entry contains no newline, its a solo line. +// +// Detecting stack boundaries only works incase the WithStack-calls are +// to be found on the same line, thats why it is optionally here. +// +// Example use: +// +// for _, e := range blocks { +// if strings.ContainsAny(e, "\n") { +// // Match as stack +// } else { +// // Match as line +// } +// } +// +func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { + var blocks []string + + stack := "" + wasStack := false + lines := map[string]bool{} // already found lines + + for _, l := range strings.Split(input, "\n") { + isStackLine := stackLineR.MatchString(l) + + switch { + case !isStackLine && wasStack: + blocks = append(blocks, stack, l) + stack = "" + lines = map[string]bool{} + case isStackLine: + if wasStack { + // Detecting two stacks after another, possible cause lines match in + // our tests due to WithStack(WithStack(io.EOF)) on same line. + if detectStackboundaries { + if lines[l] { + if len(stack) == 0 { + return nil, errors.New("len of block must not be zero here") + } + + blocks = append(blocks, stack) + stack = l + lines = map[string]bool{l: true} + continue + } + } + + stack = stack + "\n" + l + } else { + stack = l + } + lines[l] = true + case !isStackLine && !wasStack: + blocks = append(blocks, l) + default: + return nil, errors.New("must not happen") + } + + wasStack = isStackLine + } + + // Use up stack + if stack != "" { + blocks = append(blocks, stack) + } + return blocks, nil +} + +func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { + gotStr := fmt.Sprintf(format, arg) + + got, err := parseBlocks(gotStr, detectStackBoundaries) + if err != nil { + t.Fatal(err) + } + + if len(got) != len(want) { + t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", + n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) + } + + for i := range got { + if strings.ContainsAny(want[i], "\n") { + // Match as stack + match, err := regexp.MatchString(want[i], got[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", + n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) + } + } else { + // Match as message + if got[i] != want[i] { + t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) + } + } + } +} + +type wrapper struct { + wrap func(err error) error + want []string +} + +func prettyBlocks(blocks []string, prefix ...string) string { + var out []string + + for _, b := range blocks { + out = append(out, fmt.Sprintf("%v", b)) + } + + return " " + strings.Join(out, "\n ") +} + +func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { + if len(beforeWant) == 0 { + panic("beforeWant must not be empty") + } + for _, w := range list { + if len(w.want) == 0 { + panic("want must not be empty") + } + + err := w.wrap(beforeErr) + + // Copy required cause append(beforeWant, ..) modified beforeWant subtly. + beforeCopy := make([]string, len(beforeWant)) + copy(beforeCopy, beforeWant) + + beforeWant := beforeCopy + last := len(beforeWant) - 1 + var want []string + + // Merge two stacks behind each other. + if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { + want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) + } else { + want = append(beforeWant, w.want...) + } + + testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) + if maxDepth > 0 { + testGenericRecursive(t, err, want, list, maxDepth-1) + } + } +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000..6b1f289 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/github.com/pkg/errors/stack_test.go b/vendor/github.com/pkg/errors/stack_test.go new file mode 100644 index 0000000..510c27a --- /dev/null +++ b/vendor/github.com/pkg/errors/stack_test.go @@ -0,0 +1,292 @@ +package errors + +import ( + "fmt" + "runtime" + "testing" +) + +var initpc, _, _, _ = runtime.Caller(0) + +func TestFrameLine(t *testing.T) { + var tests = []struct { + Frame + want int + }{{ + Frame(initpc), + 9, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) + }(), + 20, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(1) + return Frame(pc) + }(), + 28, + }, { + Frame(0), // invalid PC + 0, + }} + + for _, tt := range tests { + got := tt.Frame.line() + want := tt.want + if want != got { + t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) + } + } +} + +type X struct{} + +func (x X) val() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func (x *X) ptr() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func TestFrameFormat(t *testing.T) { + var tests = []struct { + Frame + format string + want string + }{{ + Frame(initpc), + "%s", + "stack_test.go", + }, { + Frame(initpc), + "%+s", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go", + }, { + Frame(0), + "%s", + "unknown", + }, { + Frame(0), + "%+s", + "unknown", + }, { + Frame(initpc), + "%d", + "9", + }, { + Frame(0), + "%d", + "0", + }, { + Frame(initpc), + "%n", + "init", + }, { + func() Frame { + var x X + return x.ptr() + }(), + "%n", + `\(\*X\).ptr`, + }, { + func() Frame { + var x X + return x.val() + }(), + "%n", + "X.val", + }, { + Frame(0), + "%n", + "", + }, { + Frame(initpc), + "%v", + "stack_test.go:9", + }, { + Frame(initpc), + "%+v", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go:9", + }, { + Frame(0), + "%v", + "unknown:0", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) + } +} + +func TestFuncname(t *testing.T) { + tests := []struct { + name, want string + }{ + {"", ""}, + {"runtime.main", "main"}, + {"github.com/pkg/errors.funcname", "funcname"}, + {"funcname", "funcname"}, + {"io.copyBuffer", "copyBuffer"}, + {"main.(*R).Write", "(*R).Write"}, + } + + for _, tt := range tests { + got := funcname(tt.name) + want := tt.want + if got != want { + t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) + } + } +} + +func TestTrimGOPATH(t *testing.T) { + var tests = []struct { + Frame + want string + }{{ + Frame(initpc), + "github.com/pkg/errors/stack_test.go", + }} + + for i, tt := range tests { + pc := tt.Frame.pc() + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + got := trimGOPATH(fn.Name(), file) + testFormatRegexp(t, i, got, "%s", tt.want) + } +} + +func TestStackTrace(t *testing.T) { + tests := []struct { + err error + want []string + }{{ + New("ooh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:172", + }, + }, { + Wrap(New("ooh"), "ahh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New + }, + }, { + Cause(Wrap(New("ooh"), "ahh")), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New + }, + }, { + func() error { return New("ooh") }(), []string{ + `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller + }, + }, { + Cause(func() error { + return func() error { + return Errorf("hello %s", fmt.Sprintf("world")) + }() + }()), []string{ + `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf + `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller + }, + }} + for i, tt := range tests { + x, ok := tt.err.(interface { + StackTrace() StackTrace + }) + if !ok { + t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) + continue + } + st := x.StackTrace() + for j, want := range tt.want { + testFormatRegexp(t, i, st[j], "%+v", want) + } + } +} + +func stackTrace() StackTrace { + const depth = 8 + var pcs [depth]uintptr + n := runtime.Callers(1, pcs[:]) + var st stack = pcs[0:n] + return st.StackTrace() +} + +func TestStackTraceFormat(t *testing.T) { + tests := []struct { + StackTrace + format string + want string + }{{ + nil, + "%s", + `\[\]`, + }, { + nil, + "%v", + `\[\]`, + }, { + nil, + "%+v", + "", + }, { + nil, + "%#v", + `\[\]errors.Frame\(nil\)`, + }, { + make(StackTrace, 0), + "%s", + `\[\]`, + }, { + make(StackTrace, 0), + "%v", + `\[\]`, + }, { + make(StackTrace, 0), + "%+v", + "", + }, { + make(StackTrace, 0), + "%#v", + `\[\]errors.Frame{}`, + }, { + stackTrace()[:2], + "%s", + `\[stack_test.go stack_test.go\]`, + }, { + stackTrace()[:2], + "%v", + `\[stack_test.go:225 stack_test.go:272\]`, + }, { + stackTrace()[:2], + "%+v", + "\n" + + "github.com/pkg/errors.stackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:225\n" + + "github.com/pkg/errors.TestStackTraceFormat\n" + + "\t.+/github.com/pkg/errors/stack_test.go:276", + }, { + stackTrace()[:2], + "%#v", + `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) + } +} diff --git a/vendor/github.com/pmezard/go-difflib/.travis.yml b/vendor/github.com/pmezard/go-difflib/.travis.yml new file mode 100644 index 0000000..90c9c6f --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.5 + - tip + diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 0000000..c67dad6 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/README.md b/vendor/github.com/pmezard/go-difflib/README.md new file mode 100644 index 0000000..e87f307 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/README.md @@ -0,0 +1,50 @@ +go-difflib +========== + +[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib) +[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib) + +Go-difflib is a partial port of python 3 difflib package. Its main goal +was to make unified and context diff available in pure Go, mostly for +testing purposes. + +The following class and functions (and related tests) have be ported: + +* `SequenceMatcher` +* `unified_diff()` +* `context_diff()` + +## Installation + +```bash +$ go get github.com/pmezard/go-difflib/difflib +``` + +### Quick Start + +Diffs are configured with Unified (or ContextDiff) structures, and can +be output to an io.Writer or returned as a string. + +```Go +diff := UnifiedDiff{ + A: difflib.SplitLines("foo\nbar\n"), + B: difflib.SplitLines("foo\nbaz\n"), + FromFile: "Original", + ToFile: "Current", + Context: 3, +} +text, _ := GetUnifiedDiffString(diff) +fmt.Printf(text) +``` + +would output: + +``` +--- Original ++++ Current +@@ -1,3 +1,3 @@ + foo +-bar ++baz +``` + diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 0000000..003e99f --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go new file mode 100644 index 0000000..d725119 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go @@ -0,0 +1,426 @@ +package difflib + +import ( + "bytes" + "fmt" + "math" + "reflect" + "strings" + "testing" +) + +func assertAlmostEqual(t *testing.T, a, b float64, places int) { + if math.Abs(a-b) > math.Pow10(-places) { + t.Errorf("%.7f != %.7f", a, b) + } +} + +func assertEqual(t *testing.T, a, b interface{}) { + if !reflect.DeepEqual(a, b) { + t.Errorf("%v != %v", a, b) + } +} + +func splitChars(s string) []string { + chars := make([]string, 0, len(s)) + // Assume ASCII inputs + for i := 0; i != len(s); i++ { + chars = append(chars, string(s[i])) + } + return chars +} + +func TestSequenceMatcherRatio(t *testing.T) { + s := NewMatcher(splitChars("abcd"), splitChars("bcde")) + assertEqual(t, s.Ratio(), 0.75) + assertEqual(t, s.QuickRatio(), 0.75) + assertEqual(t, s.RealQuickRatio(), 1.0) +} + +func TestGetOptCodes(t *testing.T) { + a := "qabxcd" + b := "abycdf" + s := NewMatcher(splitChars(a), splitChars(b)) + w := &bytes.Buffer{} + for _, op := range s.GetOpCodes() { + fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag), + op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2]) + } + result := string(w.Bytes()) + expected := `d a[0:1], (q) b[0:0] () +e a[1:3], (ab) b[0:2] (ab) +r a[3:4], (x) b[2:3] (y) +e a[4:6], (cd) b[3:5] (cd) +i a[6:6], () b[5:6] (f) +` + if expected != result { + t.Errorf("unexpected op codes: \n%s", result) + } +} + +func TestGroupedOpCodes(t *testing.T) { + a := []string{} + for i := 0; i != 39; i++ { + a = append(a, fmt.Sprintf("%02d", i)) + } + b := []string{} + b = append(b, a[:8]...) + b = append(b, " i") + b = append(b, a[8:19]...) + b = append(b, " x") + b = append(b, a[20:22]...) + b = append(b, a[27:34]...) + b = append(b, " y") + b = append(b, a[35:]...) + s := NewMatcher(a, b) + w := &bytes.Buffer{} + for _, g := range s.GetGroupedOpCodes(-1) { + fmt.Fprintf(w, "group\n") + for _, op := range g { + fmt.Fprintf(w, " %s, %d, %d, %d, %d\n", string(op.Tag), + op.I1, op.I2, op.J1, op.J2) + } + } + result := string(w.Bytes()) + expected := `group + e, 5, 8, 5, 8 + i, 8, 8, 8, 9 + e, 8, 11, 9, 12 +group + e, 16, 19, 17, 20 + r, 19, 20, 20, 21 + e, 20, 22, 21, 23 + d, 22, 27, 23, 23 + e, 27, 30, 23, 26 +group + e, 31, 34, 27, 30 + r, 34, 35, 30, 31 + e, 35, 38, 31, 34 +` + if expected != result { + t.Errorf("unexpected op codes: \n%s", result) + } +} + +func ExampleGetUnifiedDiffCode() { + a := `one +two +three +four +fmt.Printf("%s,%T",a,b)` + b := `zero +one +three +four` + diff := UnifiedDiff{ + A: SplitLines(a), + B: SplitLines(b), + FromFile: "Original", + FromDate: "2005-01-26 23:30:50", + ToFile: "Current", + ToDate: "2010-04-02 10:20:52", + Context: 3, + } + result, _ := GetUnifiedDiffString(diff) + fmt.Println(strings.Replace(result, "\t", " ", -1)) + // Output: + // --- Original 2005-01-26 23:30:50 + // +++ Current 2010-04-02 10:20:52 + // @@ -1,5 +1,4 @@ + // +zero + // one + // -two + // three + // four + // -fmt.Printf("%s,%T",a,b) +} + +func ExampleGetContextDiffCode() { + a := `one +two +three +four +fmt.Printf("%s,%T",a,b)` + b := `zero +one +tree +four` + diff := ContextDiff{ + A: SplitLines(a), + B: SplitLines(b), + FromFile: "Original", + ToFile: "Current", + Context: 3, + Eol: "\n", + } + result, _ := GetContextDiffString(diff) + fmt.Print(strings.Replace(result, "\t", " ", -1)) + // Output: + // *** Original + // --- Current + // *************** + // *** 1,5 **** + // one + // ! two + // ! three + // four + // - fmt.Printf("%s,%T",a,b) + // --- 1,4 ---- + // + zero + // one + // ! tree + // four +} + +func ExampleGetContextDiffString() { + a := `one +two +three +four` + b := `zero +one +tree +four` + diff := ContextDiff{ + A: SplitLines(a), + B: SplitLines(b), + FromFile: "Original", + ToFile: "Current", + Context: 3, + Eol: "\n", + } + result, _ := GetContextDiffString(diff) + fmt.Printf(strings.Replace(result, "\t", " ", -1)) + // Output: + // *** Original + // --- Current + // *************** + // *** 1,4 **** + // one + // ! two + // ! three + // four + // --- 1,4 ---- + // + zero + // one + // ! tree + // four +} + +func rep(s string, count int) string { + return strings.Repeat(s, count) +} + +func TestWithAsciiOneInsert(t *testing.T) { + sm := NewMatcher(splitChars(rep("b", 100)), + splitChars("a"+rep("b", 100))) + assertAlmostEqual(t, sm.Ratio(), 0.995, 3) + assertEqual(t, sm.GetOpCodes(), + []OpCode{{'i', 0, 0, 0, 1}, {'e', 0, 100, 1, 101}}) + assertEqual(t, len(sm.bPopular), 0) + + sm = NewMatcher(splitChars(rep("b", 100)), + splitChars(rep("b", 50)+"a"+rep("b", 50))) + assertAlmostEqual(t, sm.Ratio(), 0.995, 3) + assertEqual(t, sm.GetOpCodes(), + []OpCode{{'e', 0, 50, 0, 50}, {'i', 50, 50, 50, 51}, {'e', 50, 100, 51, 101}}) + assertEqual(t, len(sm.bPopular), 0) +} + +func TestWithAsciiOnDelete(t *testing.T) { + sm := NewMatcher(splitChars(rep("a", 40)+"c"+rep("b", 40)), + splitChars(rep("a", 40)+rep("b", 40))) + assertAlmostEqual(t, sm.Ratio(), 0.994, 3) + assertEqual(t, sm.GetOpCodes(), + []OpCode{{'e', 0, 40, 0, 40}, {'d', 40, 41, 40, 40}, {'e', 41, 81, 40, 80}}) +} + +func TestWithAsciiBJunk(t *testing.T) { + isJunk := func(s string) bool { + return s == " " + } + sm := NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), + splitChars(rep("a", 44)+rep("b", 40)), true, isJunk) + assertEqual(t, sm.bJunk, map[string]struct{}{}) + + sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), + splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) + assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}}) + + isJunk = func(s string) bool { + return s == " " || s == "b" + } + sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), + splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) + assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}}) +} + +func TestSFBugsRatioForNullSeqn(t *testing.T) { + sm := NewMatcher(nil, nil) + assertEqual(t, sm.Ratio(), 1.0) + assertEqual(t, sm.QuickRatio(), 1.0) + assertEqual(t, sm.RealQuickRatio(), 1.0) +} + +func TestSFBugsComparingEmptyLists(t *testing.T) { + groups := NewMatcher(nil, nil).GetGroupedOpCodes(-1) + assertEqual(t, len(groups), 0) + diff := UnifiedDiff{ + FromFile: "Original", + ToFile: "Current", + Context: 3, + } + result, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, result, "") +} + +func TestOutputFormatRangeFormatUnified(t *testing.T) { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + // + // Each field shall be of the form: + // %1d", if the range contains exactly one line, + // and: + // "%1d,%1d", , otherwise. + // If a range is empty, its beginning line number shall be the number of + // the line just before the range, or 0 if the empty range starts the file. + fm := formatRangeUnified + assertEqual(t, fm(3, 3), "3,0") + assertEqual(t, fm(3, 4), "4") + assertEqual(t, fm(3, 5), "4,2") + assertEqual(t, fm(3, 6), "4,3") + assertEqual(t, fm(0, 0), "0,0") +} + +func TestOutputFormatRangeFormatContext(t *testing.T) { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + // + // The range of lines in file1 shall be written in the following format + // if the range contains two or more lines: + // "*** %d,%d ****\n", , + // and the following format otherwise: + // "*** %d ****\n", + // The ending line number of an empty range shall be the number of the preceding line, + // or 0 if the range is at the start of the file. + // + // Next, the range of lines in file2 shall be written in the following format + // if the range contains two or more lines: + // "--- %d,%d ----\n", , + // and the following format otherwise: + // "--- %d ----\n", + fm := formatRangeContext + assertEqual(t, fm(3, 3), "3") + assertEqual(t, fm(3, 4), "4") + assertEqual(t, fm(3, 5), "4,5") + assertEqual(t, fm(3, 6), "4,6") + assertEqual(t, fm(0, 0), "0") +} + +func TestOutputFormatTabDelimiter(t *testing.T) { + diff := UnifiedDiff{ + A: splitChars("one"), + B: splitChars("two"), + FromFile: "Original", + FromDate: "2005-01-26 23:30:50", + ToFile: "Current", + ToDate: "2010-04-12 10:20:52", + Eol: "\n", + } + ud, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(ud)[:2], []string{ + "--- Original\t2005-01-26 23:30:50\n", + "+++ Current\t2010-04-12 10:20:52\n", + }) + cd, err := GetContextDiffString(ContextDiff(diff)) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(cd)[:2], []string{ + "*** Original\t2005-01-26 23:30:50\n", + "--- Current\t2010-04-12 10:20:52\n", + }) +} + +func TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) { + diff := UnifiedDiff{ + A: splitChars("one"), + B: splitChars("two"), + FromFile: "Original", + ToFile: "Current", + Eol: "\n", + } + ud, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(ud)[:2], []string{"--- Original\n", "+++ Current\n"}) + + cd, err := GetContextDiffString(ContextDiff(diff)) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(cd)[:2], []string{"*** Original\n", "--- Current\n"}) +} + +func TestOmitFilenames(t *testing.T) { + diff := UnifiedDiff{ + A: SplitLines("o\nn\ne\n"), + B: SplitLines("t\nw\no\n"), + Eol: "\n", + } + ud, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(ud), []string{ + "@@ -0,0 +1,2 @@\n", + "+t\n", + "+w\n", + "@@ -2,2 +3,0 @@\n", + "-n\n", + "-e\n", + "\n", + }) + + cd, err := GetContextDiffString(ContextDiff(diff)) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(cd), []string{ + "***************\n", + "*** 0 ****\n", + "--- 1,2 ----\n", + "+ t\n", + "+ w\n", + "***************\n", + "*** 2,3 ****\n", + "- n\n", + "- e\n", + "--- 3 ----\n", + "\n", + }) +} + +func TestSplitLines(t *testing.T) { + allTests := []struct { + input string + want []string + }{ + {"foo", []string{"foo\n"}}, + {"foo\nbar", []string{"foo\n", "bar\n"}}, + {"foo\nbar\n", []string{"foo\n", "bar\n", "\n"}}, + } + for _, test := range allTests { + assertEqual(t, SplitLines(test.input), test.want) + } +} + +func benchmarkSplitLines(b *testing.B, count int) { + str := strings.Repeat("foo\n", count) + + b.ResetTimer() + + n := 0 + for i := 0; i < b.N; i++ { + n += len(SplitLines(str)) + } +} + +func BenchmarkSplitLines100(b *testing.B) { + benchmarkSplitLines(b, 100) +} + +func BenchmarkSplitLines10000(b *testing.B) { + benchmarkSplitLines(b, 10000) +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 0000000..66be63a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 0000000..a23296a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - tip +env: + - GOMAXPROCS=4 GORACE=halt_on_error=1 +install: + - go get github.com/stretchr/testify/assert + - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 + - go get golang.org/x/sys/unix + - go get golang.org/x/sys/windows +script: + - go test -race -v ./... diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000..1bd1deb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,123 @@ +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 0000000..f090cb4 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 0000000..f77819b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,511 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +**Are you interested in assisting in maintaining Logrus?** Currently I have a +lot of obligations, and I am unable to provide Logrus with the maintainership it +needs. If you'd like to help, please reach out to me at `simon at author's +username dot com`. + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/) +| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage| +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka | +| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | +| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | +| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000..8af9063 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/sirupsen/logrus/alt_exit_test.go b/vendor/github.com/sirupsen/logrus/alt_exit_test.go new file mode 100644 index 0000000..a08b1a8 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit_test.go @@ -0,0 +1,83 @@ +package logrus + +import ( + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "testing" + "time" +) + +func TestRegister(t *testing.T) { + current := len(handlers) + RegisterExitHandler(func() {}) + if len(handlers) != current+1 { + t.Fatalf("expected %d handlers, got %d", current+1, len(handlers)) + } +} + +func TestHandler(t *testing.T) { + tempDir, err := ioutil.TempDir("", "test_handler") + if err != nil { + log.Fatalf("can't create temp dir. %q", err) + } + defer os.RemoveAll(tempDir) + + gofile := filepath.Join(tempDir, "gofile.go") + if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { + t.Fatalf("can't create go file. %q", err) + } + + outfile := filepath.Join(tempDir, "outfile.out") + arg := time.Now().UTC().String() + err = exec.Command("go", "run", gofile, outfile, arg).Run() + if err == nil { + t.Fatalf("completed normally, should have failed") + } + + data, err := ioutil.ReadFile(outfile) + if err != nil { + t.Fatalf("can't read output file %s. %q", outfile, err) + } + + if string(data) != arg { + t.Fatalf("bad data. Expected %q, got %q", data, arg) + } +} + +var testprog = []byte(` +// Test program for atexit, gets output file and data as arguments and writes +// data to output file in atexit handler. +package main + +import ( + "github.com/sirupsen/logrus" + "flag" + "fmt" + "io/ioutil" +) + +var outfile = "" +var data = "" + +func handler() { + ioutil.WriteFile(outfile, []byte(data), 0666) +} + +func badHandler() { + n := 0 + fmt.Println(1/n) +} + +func main() { + flag.Parse() + outfile = flag.Arg(0) + data = flag.Arg(1) + + logrus.RegisterExitHandler(handler) + logrus.RegisterExitHandler(badHandler) + logrus.Fatal("Bye bye") +} +`) diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000..96c2ce1 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 0000000..da67aba --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 0000000..778f4c9 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,288 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + entry.fireHooks() + + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + + entry.write() + + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, &entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + serialized, err := entry.Logger.Formatter.Format(entry) + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + } else { + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/entry_test.go b/vendor/github.com/sirupsen/logrus/entry_test.go new file mode 100644 index 0000000..a81e2b3 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry_test.go @@ -0,0 +1,115 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryWithError(t *testing.T) { + + assert := assert.New(t) + + defer func() { + ErrorKey = "error" + }() + + err := fmt.Errorf("kaboom at layer %d", 4711) + + assert.Equal(err, WithError(err).Data["error"]) + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + + assert.Equal(err, entry.WithError(err).Data["error"]) + + ErrorKey = "err" + + assert.Equal(err, entry.WithError(err).Data["err"]) + +} + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} + +const ( + badMessage = "this is going to panic" + panicMessage = "this is broken" +) + +type panickyHook struct{} + +func (p *panickyHook) Levels() []Level { + return []Level{InfoLevel} +} + +func (p *panickyHook) Fire(entry *Entry) error { + if entry.Message == badMessage { + panic(panicMessage) + } + + return nil +} + +func TestEntryHooksPanic(t *testing.T) { + logger := New() + logger.Out = &bytes.Buffer{} + logger.Level = InfoLevel + logger.Hooks.Add(&panickyHook{}) + + defer func() { + p := recover() + assert.NotNil(t, p) + assert.Equal(t, panicMessage, p) + + entry := NewEntry(logger) + entry.Info("another message") + }() + + entry := NewEntry(logger) + entry.Info(badMessage) +} diff --git a/vendor/github.com/sirupsen/logrus/example_basic_test.go b/vendor/github.com/sirupsen/logrus/example_basic_test.go new file mode 100644 index 0000000..a2acf55 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/example_basic_test.go @@ -0,0 +1,69 @@ +package logrus_test + +import ( + "github.com/sirupsen/logrus" + "os" +) + +func Example_basic() { + var log = logrus.New() + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) //default + log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output + log.Level = logrus.DebugLevel + log.Out = os.Stdout + + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + defer func() { + err := recover() + if err != nil { + entry := err.(*logrus.Entry) + log.WithFields(logrus.Fields{ + "omg": true, + "err_animal": entry.Data["animal"], + "err_size": entry.Data["size"], + "err_level": entry.Level, + "err_message": entry.Message, + "number": 100, + }).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") + + // Output: + // level=debug msg="Started observing beach" animal=walrus number=8 + // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 + // level=warning msg="The group's number increased tremendously!" number=122 omg=true + // level=debug msg="Temperature changes" temperature=-4 + // level=panic msg="It's over 9000!" animal=orca size=9009 + // level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true +} diff --git a/vendor/github.com/sirupsen/logrus/example_hook_test.go b/vendor/github.com/sirupsen/logrus/example_hook_test.go new file mode 100644 index 0000000..d4ddffc --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/example_hook_test.go @@ -0,0 +1,35 @@ +package logrus_test + +import ( + "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" + "os" +) + +func Example_hook() { + var log = logrus.New() + log.Formatter = new(logrus.TextFormatter) // default + log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) + log.Out = os.Stdout + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Error("The ice breaks!") + + // Output: + // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 + // level=warning msg="The group's number increased tremendously!" number=122 omg=true + // level=error msg="The ice breaks!" number=100 omg=true +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 0000000..013183e --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.level() +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 0000000..b183ff5 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const defaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 0000000..d948158 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,101 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + logger := New() + + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + Logger: logger, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/github.com/sirupsen/logrus/hook_test.go b/vendor/github.com/sirupsen/logrus/hook_test.go new file mode 100644 index 0000000..4fea751 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hook_test.go @@ -0,0 +1,144 @@ +package logrus + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +func TestAddHookRace(t *testing.T) { + var wg sync.WaitGroup + wg.Add(2) + hook := new(ErrorHook) + LogAndAssertJSON(t, func(log *Logger) { + go func() { + defer wg.Done() + log.AddHook(hook) + }() + go func() { + defer wg.Done() + log.Error("test") + }() + wg.Wait() + }, func(fields Fields) { + // the line may have been logged + // before the hook was added, so we can't + // actually assert on the hook + }) +} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 0000000..3f151cd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 0000000..1bbc0f7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,39 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/sirupsen/logrus" + lSyslog "github.com/sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. + +```go +import ( + "log/syslog" + "github.com/sirupsen/logrus" + lSyslog "github.com/sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 0000000..329ce0d --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,55 @@ +// +build !windows,!nacl,!plan9 + +package syslog + +import ( + "fmt" + "log/syslog" + "os" + + "github.com/sirupsen/logrus" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return logrus.AllLevels +} diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 0000000..5ec3a44 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,27 @@ +package syslog + +import ( + "log/syslog" + "testing" + + "github.com/sirupsen/logrus" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/vendor/github.com/sirupsen/logrus/hooks/test/test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test.go new file mode 100644 index 0000000..62c4845 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks/test/test.go @@ -0,0 +1,95 @@ +// The Test package is used for testing logrus. It is here for backwards +// compatibility from when logrus' organization was upper-case. Please use +// lower-case logrus and the `null` package instead of this one. +package test + +import ( + "io/ioutil" + "sync" + + "github.com/sirupsen/logrus" +) + +// Hook is a hook designed for dealing with logs in test scenarios. +type Hook struct { + // Entries is an array of all entries that have been received by this hook. + // For safe access, use the AllEntries() method, rather than reading this + // value directly. + Entries []*logrus.Entry + mu sync.RWMutex +} + +// NewGlobal installs a test hook for the global logger. +func NewGlobal() *Hook { + + hook := new(Hook) + logrus.AddHook(hook) + + return hook + +} + +// NewLocal installs a test hook for a given local logger. +func NewLocal(logger *logrus.Logger) *Hook { + + hook := new(Hook) + logger.Hooks.Add(hook) + + return hook + +} + +// NewNullLogger creates a discarding logger and installs the test hook. +func NewNullLogger() (*logrus.Logger, *Hook) { + + logger := logrus.New() + logger.Out = ioutil.Discard + + return logger, NewLocal(logger) + +} + +func (t *Hook) Fire(e *logrus.Entry) error { + t.mu.Lock() + defer t.mu.Unlock() + t.Entries = append(t.Entries, e) + return nil +} + +func (t *Hook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// LastEntry returns the last entry that was logged or nil. +func (t *Hook) LastEntry() *logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + i := len(t.Entries) - 1 + if i < 0 { + return nil + } + // Make a copy, for safety + e := *t.Entries[i] + return &e +} + +// AllEntries returns all entries that were logged. +func (t *Hook) AllEntries() []*logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + // Make a copy so the returned value won't race with future log requests + entries := make([]*logrus.Entry, len(t.Entries)) + for i, entry := range t.Entries { + // Make a copy, for safety + e := *entry + entries[i] = &e + } + return entries +} + +// Reset removes all Entries from this test hook. +func (t *Hook) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + t.Entries = make([]*logrus.Entry, 0) +} diff --git a/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go new file mode 100644 index 0000000..dea768e --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go @@ -0,0 +1,61 @@ +package test + +import ( + "sync" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestAllHooks(t *testing.T) { + assert := assert.New(t) + + logger, hook := NewNullLogger() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + logger.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) + + logger.Warn("Hello warning") + assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) + assert.Equal("Hello warning", hook.LastEntry().Message) + assert.Equal(2, len(hook.Entries)) + + hook.Reset() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + hook = NewGlobal() + + logrus.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) +} + +func TestLoggingWithHooksRace(t *testing.T) { + assert := assert.New(t) + logger, hook := NewNullLogger() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + + assert.Equal(logrus.InfoLevel, hook.LastEntry().Level) + assert.Equal("info", hook.LastEntry().Message) + + wg.Wait() + + entries := hook.AllEntries() + assert.Equal(100, len(entries)) +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000..fb01c1b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,79 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +// Default key names for the default fields +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter_test.go b/vendor/github.com/sirupsen/logrus/json_formatter_test.go new file mode 100644 index 0000000..51093a7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,199 @@ +package logrus + +import ( + "encoding/json" + "errors" + "strings" + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} + +func TestJSONMessageKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyMsg: "message", + }, + } + + b, err := formatter.Format(&Entry{Message: "oh hai"}) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) { + t.Fatal("Expected JSON to format message key") + } +} + +func TestJSONLevelKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyLevel: "somelevel", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "somelevel") { + t.Fatal("Expected JSON to format level key") + } +} + +func TestJSONTimeKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyTime: "timeywimey", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "timeywimey") { + t.Fatal("Expected JSON to format time key") + } +} + +func TestJSONDisableTimestamp(t *testing.T) { + formatter := &JSONFormatter{ + DisableTimestamp: true, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if strings.Contains(s, FieldKeyTime) { + t.Error("Did not prevent timestamp", s) + } +} + +func TestJSONEnableTimestamp(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, FieldKeyTime) { + t.Error("Timestamp not present", s) + } +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 0000000..fdaf8a6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,323 @@ +package logrus + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} diff --git a/vendor/github.com/sirupsen/logrus/logger_bench_test.go b/vendor/github.com/sirupsen/logrus/logger_bench_test.go new file mode 100644 index 0000000..dd23a35 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger_bench_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "os" + "testing" +) + +// smallFields is a small size data set for benchmarking +var loggerFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +func BenchmarkDummyLogger(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkDummyLoggerNoLock(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} + +func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + logger.SetNoLock() + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 0000000..dd38999 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/logrus_test.go b/vendor/github.com/sirupsen/logrus/logrus_test.go new file mode 100644 index 0000000..78cbc28 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus_test.go @@ -0,0 +1,386 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("PANIC") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("FATAL") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("ERROR") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARN") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARNING") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("INFO") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("DEBUG") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} + +func TestLoggingRace(t *testing.T) { + logger := New() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + wg.Wait() +} + +// Compile test +func TestLogrusInterface(t *testing.T) { + var buffer bytes.Buffer + fn := func(l FieldLogger) { + b := l.WithField("key", "value") + b.Debug("Test") + } + // test logger + logger := New() + logger.Out = &buffer + fn(logger) + + // test Entry + e := logger.WithField("another", "value") + fn(e) +} + +// Implements io.Writer using channels for synchronization, so we can wait on +// the Entry.Writer goroutine to write in a non-racey way. This does assume that +// there is a single call to Logger.Out for each message. +type channelWriter chan []byte + +func (cw channelWriter) Write(p []byte) (int, error) { + cw <- p + return len(p), nil +} + +func TestEntryWriter(t *testing.T) { + cw := channelWriter(make(chan []byte, 1)) + log := New() + log.Out = cw + log.Formatter = new(JSONFormatter) + log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n")) + + bs := <-cw + var fields Fields + err := json.Unmarshal(bs, &fields) + assert.Nil(t, err) + assert.Equal(t, fields["foo"], "bar") + assert.Equal(t, fields["level"], "warning") +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go new file mode 100644 index 0000000..4880d13 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine,!gopherjs + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000..3de08e8 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine gopherjs + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000..067047a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,19 @@ +// +build !appengine,!gopherjs + +package logrus + +import ( + "io" + "os" + + "golang.org/x/crypto/ssh/terminal" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000..f29a009 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gopherjs + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000..61b21ca --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,178 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 36 + gray = 37 +) + +var ( + baseTimestamp time.Time +) + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter_test.go b/vendor/github.com/sirupsen/logrus/text_formatter_test.go new file mode 100644 index 0000000..d93b931 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,141 @@ +package logrus + +import ( + "bytes" + "errors" + "fmt" + "strings" + "testing" + "time" +) + +func TestFormatting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + testCases := []struct { + value string + expected string + }{ + {`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + + if string(b) != tc.expected { + t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte("\"")) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "") + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(false, "/foobar") + checkQuoting(false, "foo_bar") + checkQuoting(false, "foo@bar") + checkQuoting(false, "foobar^") + checkQuoting(false, "+/-_^@f.oobar") + checkQuoting(true, "foobar$") + checkQuoting(true, "&foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) + + // Test for quoting empty fields. + tf.QuoteEmptyFields = true + checkQuoting(true, "") + checkQuoting(false, "abcd") + checkQuoting(true, errors.New("invalid argument")) +} + +func TestEscaping(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + testCases := []struct { + value string + expected string + }{ + {`ba"r`, `ba\"r`}, + {`ba'r`, `ba'r`}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + if !bytes.Contains(b, []byte(tc.expected)) { + t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + +func TestEscaping_Interface(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + ts := time.Now() + + testCases := []struct { + value interface{} + expected string + }{ + {ts, fmt.Sprintf("\"%s\"", ts.String())}, + {errors.New("error: something went wrong"), "\"error: something went wrong\""}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + if !bytes.Contains(b, []byte(tc.expected)) { + t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")] + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +func TestDisableTimestampWithColoredOutput(t *testing.T) { + tf := &TextFormatter{DisableTimestamp: true, ForceColors: true} + + b, _ := tf.Format(WithField("test", "test")) + if strings.Contains(string(b), "[0000]") { + t.Error("timestamp not expected when DisableTimestamp is true") + } +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 0000000..7bdebed --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,62 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/stretchr/testify/.gitignore b/vendor/github.com/stretchr/testify/.gitignore new file mode 100644 index 0000000..5aacdb7 --- /dev/null +++ b/vendor/github.com/stretchr/testify/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.DS_Store diff --git a/vendor/github.com/stretchr/testify/.travis.gofmt.sh b/vendor/github.com/stretchr/testify/.travis.gofmt.sh new file mode 100755 index 0000000..bfffdca --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.gofmt.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -n "$(gofmt -l .)" ]; then + echo "Go code is not formatted:" + gofmt -d . + exit 1 +fi diff --git a/vendor/github.com/stretchr/testify/.travis.gogenerate.sh b/vendor/github.com/stretchr/testify/.travis.gogenerate.sh new file mode 100755 index 0000000..161b449 --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.gogenerate.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [[ "$TRAVIS_GO_VERSION" =~ ^1\.[45](\..*)?$ ]]; then + exit 0 +fi + +go get github.com/ernesto-jimenez/gogen/imports +go generate ./... +if [ -n "$(git diff)" ]; then + echo "Go generate had not been run" + git diff + exit 1 +fi diff --git a/vendor/github.com/stretchr/testify/.travis.govet.sh b/vendor/github.com/stretchr/testify/.travis.govet.sh new file mode 100755 index 0000000..f8fbba7 --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.govet.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cd "$(dirname $0)" +DIRS=". assert require mock _codegen" +set -e +for subdir in $DIRS; do + pushd $subdir + go vet + popd +done diff --git a/vendor/github.com/stretchr/testify/.travis.yml b/vendor/github.com/stretchr/testify/.travis.yml new file mode 100644 index 0000000..6e51e63 --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.yml @@ -0,0 +1,15 @@ +language: go + +sudo: false + +go: + - 1.7 + - 1.8 + - 1.9 + - tip + +script: + - ./.travis.gogenerate.sh + - ./.travis.gofmt.sh + - ./.travis.govet.sh + - go test -v -race $(go list ./... | grep -v vendor) diff --git a/vendor/github.com/stretchr/testify/Gopkg.lock b/vendor/github.com/stretchr/testify/Gopkg.lock new file mode 100644 index 0000000..294cda0 --- /dev/null +++ b/vendor/github.com/stretchr/testify/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/objx" + packages = ["."] + revision = "facf9a85c22f48d2f52f2380e4efce1768749a89" + version = "v0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "448ddae4702c6aded2555faafd390c537789bb1c483f70b0431e6634f73f2090" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/stretchr/testify/Gopkg.toml b/vendor/github.com/stretchr/testify/Gopkg.toml new file mode 100644 index 0000000..a16374c --- /dev/null +++ b/vendor/github.com/stretchr/testify/Gopkg.toml @@ -0,0 +1,16 @@ +[prune] + unused-packages = true + non-go = true + go-tests = true + +[[constraint]] + name = "github.com/davecgh/go-spew" + version = "~1.1.0" + +[[constraint]] + name = "github.com/pmezard/go-difflib" + version = "~1.0.0" + +[[constraint]] + name = "github.com/stretchr/objx" + version = "~0.1.0" diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 0000000..473b670 --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/README.md b/vendor/github.com/stretchr/testify/README.md new file mode 100644 index 0000000..d3b942b --- /dev/null +++ b/vendor/github.com/stretchr/testify/README.md @@ -0,0 +1,301 @@ +Testify - Thou Shalt Write Tests +================================ + +[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) [![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/testify)](https://goreportcard.com/report/github.com/stretchr/testify) [![GoDoc](https://godoc.org/github.com/stretchr/testify?status.svg)](https://godoc.org/github.com/stretchr/testify) + +Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend. + +Features include: + + * [Easy assertions](#assert-package) + * [Mocking](#mock-package) + * [Testing suite interfaces and functions](#suite-package) + +Get started: + + * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date) + * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing + * Check out the API Documentation http://godoc.org/github.com/stretchr/testify + * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc) + * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development) + + + +[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package +------------------------------------------------------------------------------------------- + +The `assert` package provides some helpful methods that allow you to write better test code in Go. + + * Prints friendly, easy to read failure descriptions + * Allows for very readable code + * Optionally annotate each assertion with a message + +See it in action: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + // assert equality + assert.Equal(t, 123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(t, 123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(t, object) + + // assert for not nil (good when you expect something) + if assert.NotNil(t, object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal(t, "Something", object.Value) + + } + +} +``` + + * Every assert func takes the `testing.T` object as the first argument. This is how it writes the errors out through the normal `go test` capabilities. + * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions. + +if you assert many times, use the below: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + assert := assert.New(t) + + // assert equality + assert.Equal(123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(object) + + // assert for not nil (good when you expect something) + if assert.NotNil(object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal("Something", object.Value) + } +} +``` + +[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package +--------------------------------------------------------------------------------------------- + +The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test. + +See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details. + +[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package +---------------------------------------------------------------------------------------- + +The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code. + +An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/mock" +) + +/* + Test objects +*/ + +// MyMockedObject is a mocked object that implements an interface +// that describes an object that the code I am testing relies on. +type MyMockedObject struct{ + mock.Mock +} + +// DoSomething is a method on MyMockedObject that implements some interface +// and just records the activity, and returns what the Mock object tells it to. +// +// In the real object, this method would do something useful, but since this +// is a mocked object - we're just going to stub it out. +// +// NOTE: This method is not being tested here, code that uses this object is. +func (m *MyMockedObject) DoSomething(number int) (bool, error) { + + args := m.Called(number) + return args.Bool(0), args.Error(1) + +} + +/* + Actual test functions +*/ + +// TestSomething is an example of how to use our test object to +// make assertions about some target code we are testing. +func TestSomething(t *testing.T) { + + // create an instance of our test object + testObj := new(MyMockedObject) + + // setup expectations + testObj.On("DoSomething", 123).Return(true, nil) + + // call the code we are testing + targetFuncThatDoesSomethingWithObj(testObj) + + // assert that the expectations were met + testObj.AssertExpectations(t) + +} +``` + +For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock). + +You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker. + +[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package +----------------------------------------------------------------------------------------- + +The `suite` package provides functionality that you might be used to from more common object oriented languages. With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal. + +An example suite is shown below: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including a T() method which +// returns the current testing context +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go) + +For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite). + +`Suite` object has assertion methods: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including assertion methods. +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + suite.Equal(suite.VariableThatShouldStartAtFive, 5) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +------ + +Installation +============ + +To install Testify, use `go get`: + + go get github.com/stretchr/testify + +This will then make the following packages available to you: + + github.com/stretchr/testify/assert + github.com/stretchr/testify/mock + github.com/stretchr/testify/http + +Import the `testify/assert` package into your code using this template: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + assert.True(t, true, "True is true!") + +} +``` + +------ + +Staying up to date +================== + +To update Testify to the latest version, use `go get -u github.com/stretchr/testify`. + +------ + +Contributing +============ + +Please feel free to submit issues, fork the repository and send pull requests! + +When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it. diff --git a/vendor/github.com/stretchr/testify/_codegen/main.go b/vendor/github.com/stretchr/testify/_codegen/main.go new file mode 100644 index 0000000..2e5e812 --- /dev/null +++ b/vendor/github.com/stretchr/testify/_codegen/main.go @@ -0,0 +1,316 @@ +// This program reads all assertion functions from the assert package and +// automatically generates the corresponding requires and forwarded assertions + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/format" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path" + "regexp" + "strings" + "text/template" + + "github.com/ernesto-jimenez/gogen/imports" +) + +var ( + pkg = flag.String("assert-path", "github.com/stretchr/testify/assert", "Path to the assert package") + includeF = flag.Bool("include-format-funcs", false, "include format functions such as Errorf and Equalf") + outputPkg = flag.String("output-package", "", "package for the resulting code") + tmplFile = flag.String("template", "", "What file to load the function template from") + out = flag.String("out", "", "What file to write the source code to") +) + +func main() { + flag.Parse() + + scope, docs, err := parsePackageSource(*pkg) + if err != nil { + log.Fatal(err) + } + + importer, funcs, err := analyzeCode(scope, docs) + if err != nil { + log.Fatal(err) + } + + if err := generateCode(importer, funcs); err != nil { + log.Fatal(err) + } +} + +func generateCode(importer imports.Importer, funcs []testFunc) error { + buff := bytes.NewBuffer(nil) + + tmplHead, tmplFunc, err := parseTemplates() + if err != nil { + return err + } + + // Generate header + if err := tmplHead.Execute(buff, struct { + Name string + Imports map[string]string + }{ + *outputPkg, + importer.Imports(), + }); err != nil { + return err + } + + // Generate funcs + for _, fn := range funcs { + buff.Write([]byte("\n\n")) + if err := tmplFunc.Execute(buff, &fn); err != nil { + return err + } + } + + code, err := format.Source(buff.Bytes()) + if err != nil { + return err + } + + // Write file + output, err := outputFile() + if err != nil { + return err + } + defer output.Close() + _, err = io.Copy(output, bytes.NewReader(code)) + return err +} + +func parseTemplates() (*template.Template, *template.Template, error) { + tmplHead, err := template.New("header").Parse(headerTemplate) + if err != nil { + return nil, nil, err + } + if *tmplFile != "" { + f, err := ioutil.ReadFile(*tmplFile) + if err != nil { + return nil, nil, err + } + funcTemplate = string(f) + } + tmpl, err := template.New("function").Parse(funcTemplate) + if err != nil { + return nil, nil, err + } + return tmplHead, tmpl, nil +} + +func outputFile() (*os.File, error) { + filename := *out + if filename == "-" || (filename == "" && *tmplFile == "") { + return os.Stdout, nil + } + if filename == "" { + filename = strings.TrimSuffix(strings.TrimSuffix(*tmplFile, ".tmpl"), ".go") + ".go" + } + return os.Create(filename) +} + +// analyzeCode takes the types scope and the docs and returns the import +// information and information about all the assertion functions. +func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []testFunc, error) { + testingT := scope.Lookup("TestingT").Type().Underlying().(*types.Interface) + + importer := imports.New(*outputPkg) + var funcs []testFunc + // Go through all the top level functions + for _, fdocs := range docs.Funcs { + // Find the function + obj := scope.Lookup(fdocs.Name) + + fn, ok := obj.(*types.Func) + if !ok { + continue + } + // Check function signature has at least two arguments + sig := fn.Type().(*types.Signature) + if sig.Params().Len() < 2 { + continue + } + // Check first argument is of type testingT + first, ok := sig.Params().At(0).Type().(*types.Named) + if !ok { + continue + } + firstType, ok := first.Underlying().(*types.Interface) + if !ok { + continue + } + if !types.Implements(firstType, testingT) { + continue + } + + // Skip functions ending with f + if strings.HasSuffix(fdocs.Name, "f") && !*includeF { + continue + } + + funcs = append(funcs, testFunc{*outputPkg, fdocs, fn}) + importer.AddImportsFrom(sig.Params()) + } + return importer, funcs, nil +} + +// parsePackageSource returns the types scope and the package documentation from the package +func parsePackageSource(pkg string) (*types.Scope, *doc.Package, error) { + pd, err := build.Import(pkg, ".", 0) + if err != nil { + return nil, nil, err + } + + fset := token.NewFileSet() + files := make(map[string]*ast.File) + fileList := make([]*ast.File, len(pd.GoFiles)) + for i, fname := range pd.GoFiles { + src, err := ioutil.ReadFile(path.Join(pd.SrcRoot, pd.ImportPath, fname)) + if err != nil { + return nil, nil, err + } + f, err := parser.ParseFile(fset, fname, src, parser.ParseComments|parser.AllErrors) + if err != nil { + return nil, nil, err + } + files[fname] = f + fileList[i] = f + } + + cfg := types.Config{ + Importer: importer.Default(), + } + info := types.Info{ + Defs: make(map[*ast.Ident]types.Object), + } + tp, err := cfg.Check(pkg, fset, fileList, &info) + if err != nil { + return nil, nil, err + } + + scope := tp.Scope() + + ap, _ := ast.NewPackage(fset, files, nil, nil) + docs := doc.New(ap, pkg, 0) + + return scope, docs, nil +} + +type testFunc struct { + CurrentPkg string + DocInfo *doc.Func + TypeInfo *types.Func +} + +func (f *testFunc) Qualifier(p *types.Package) string { + if p == nil || p.Name() == f.CurrentPkg { + return "" + } + return p.Name() +} + +func (f *testFunc) Params() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s %s", comma, param.Name(), types.TypeString(param.Type(), f.Qualifier)) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s ...%s", comma, param.Name(), types.TypeString(param.Type().(*types.Slice).Elem(), f.Qualifier)) + } + return p +} + +func (f *testFunc) ForwardedParams() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s", comma, param.Name()) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s...", comma, param.Name()) + } + return p +} + +func (f *testFunc) ParamsFormat() string { + return strings.Replace(f.Params(), "msgAndArgs", "msg string, args", 1) +} + +func (f *testFunc) ForwardedParamsFormat() string { + return strings.Replace(f.ForwardedParams(), "msgAndArgs", "append([]interface{}{msg}, args...)", 1) +} + +func (f *testFunc) Comment() string { + return "// " + strings.Replace(strings.TrimSpace(f.DocInfo.Doc), "\n", "\n// ", -1) +} + +func (f *testFunc) CommentFormat() string { + search := fmt.Sprintf("%s", f.DocInfo.Name) + replace := fmt.Sprintf("%sf", f.DocInfo.Name) + comment := strings.Replace(f.Comment(), search, replace, -1) + exp := regexp.MustCompile(replace + `\(((\(\)|[^)])+)\)`) + return exp.ReplaceAllString(comment, replace+`($1, "error message %s", "formatted")`) +} + +func (f *testFunc) CommentWithoutT(receiver string) string { + search := fmt.Sprintf("assert.%s(t, ", f.DocInfo.Name) + replace := fmt.Sprintf("%s.%s(", receiver, f.DocInfo.Name) + return strings.Replace(f.Comment(), search, replace, -1) +} + +var headerTemplate = `/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package {{.Name}} + +import ( +{{range $path, $name := .Imports}} + {{$name}} "{{$path}}"{{end}} +) +` + +var funcTemplate = `{{.Comment}} +func (fwd *AssertionsForwarder) {{.DocInfo.Name}}({{.Params}}) bool { + return assert.{{.DocInfo.Name}}({{.ForwardedParams}}) +}` diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 0000000..ae06a54 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,349 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl new file mode 100644 index 0000000..c5cc66f --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentFormat}} +func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { + return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 0000000..ffa5428 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,686 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + return Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + return Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + return EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + return Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + return Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + return FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + return Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + return FileExistsf(a.t, path, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + return JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + return Lenf(a.t, object, length, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + return Nilf(a.t, object, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + return NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + return NoErrorf(a.t, err, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + return NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + return NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + return NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + return NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + return Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + return Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 0000000..99f9acf --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 0000000..47bda77 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1256 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + if exp, ok := expected.([]byte); ok { + act, ok := actual.([]byte) + if !ok { + return false + } else if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) + } + return reflect.DeepEqual(expected, actual) + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) + if i != 0 { + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")}, + {"Error", failureMessage}, + } + + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } + + message := messageFromMsgAndArgs(msgAndArgs...) + if len(message) > 0 { + content = append(content, labeledContent{"Messages", message}) + } + + t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...)) + + return false +} + +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \r\t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + // get nil case out of the way + if object == nil { + return true + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty + case reflect.Ptr: + if objValue.IsNil() { + return true + } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + funcDidPanic, panicValue := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + case time.Duration: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) + if !result { + return result + } + } + + return true +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if !Error(t, theError, msgAndArgs...) { + return false + } + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "actual : %q", expected, actual), msgAndArgs...) + } + return true +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} + +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions_test.go b/vendor/github.com/stretchr/testify/assert/assertions_test.go new file mode 100644 index 0000000..6757bd1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions_test.go @@ -0,0 +1,1581 @@ +package assert + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + "time" +) + +var ( + i interface{} + zeros = []interface{}{ + false, + byte(0), + complex64(0), + complex128(0), + float32(0), + float64(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + rune(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + uintptr(0), + "", + [0]interface{}{}, + []interface{}(nil), + struct{ x int }{}, + (*interface{})(nil), + (func())(nil), + nil, + interface{}(nil), + map[interface{}]interface{}(nil), + (chan interface{})(nil), + (<-chan interface{})(nil), + (chan<- interface{})(nil), + } + nonZeros = []interface{}{ + true, + byte(1), + complex64(1), + complex128(1), + float32(1), + float64(1), + int(1), + int8(1), + int16(1), + int32(1), + int64(1), + rune(1), + uint(1), + uint8(1), + uint16(1), + uint32(1), + uint64(1), + uintptr(1), + "s", + [1]interface{}{1}, + []interface{}{}, + struct{ x int }{1}, + (*interface{})(&i), + (func())(func() {}), + interface{}(1), + map[interface{}]interface{}{}, + (chan interface{})(make(chan interface{})), + (<-chan interface{})(make(chan interface{})), + (chan<- interface{})(make(chan interface{})), + } +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +func TestObjectsAreEqual(t *testing.T) { + + if !ObjectsAreEqual("Hello World", "Hello World") { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123, 123) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123.5, 123.5) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(nil, nil) { + t.Error("objectsAreEqual should return true") + } + if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual('x', "x") { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual("x", 'x') { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0, 0.1) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0.1, 0) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(uint32(10), int32(10)) { + t.Error("objectsAreEqual should return false") + } + if !ObjectsAreEqualValues(uint32(10), int32(10)) { + t.Error("ObjectsAreEqualValues should return true") + } + if ObjectsAreEqualValues(0, nil) { + t.Fail() + } + if ObjectsAreEqualValues(nil, 0) { + t.Fail() + } + +} + +func TestImplements(t *testing.T) { + + mockT := new(testing.T) + + if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), nil) { + t.Error("Implements method should return false: nil does not implement AssertionTesterInterface") + } + +} + +func TestIsType(t *testing.T) { + + mockT := new(testing.T) + + if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqual(t *testing.T) { + + mockT := new(testing.T) + + if !Equal(mockT, "Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !Equal(mockT, 123, 123) { + t.Error("Equal should return true") + } + if !Equal(mockT, 123.5, 123.5) { + t.Error("Equal should return true") + } + if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !Equal(mockT, nil, nil) { + t.Error("Equal should return true") + } + if !Equal(mockT, int32(123), int32(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, uint64(123), uint64(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, &struct{}{}, &struct{}{}) { + t.Error("Equal should return true (pointer equality is based on equality of underlying value)") + } + var m map[string]interface{} + if Equal(mockT, m["bar"], "something") { + t.Error("Equal should return false") + } +} + +// bufferT implements TestingT. Its implementation of Errorf writes the output that would be produced by +// testing.T.Errorf to an internal bytes.Buffer. +type bufferT struct { + buf bytes.Buffer +} + +func (t *bufferT) Errorf(format string, args ...interface{}) { + // implementation of decorate is copied from testing.T + decorate := func(s string) string { + _, file, line, ok := runtime.Caller(3) // decorate + log + public function. + if ok { + // Truncate file name at last file name separator. + if index := strings.LastIndex(file, "/"); index >= 0 { + file = file[index+1:] + } else if index = strings.LastIndex(file, "\\"); index >= 0 { + file = file[index+1:] + } + } else { + file = "???" + line = 1 + } + buf := new(bytes.Buffer) + // Every line is indented at least one tab. + buf.WriteByte('\t') + fmt.Fprintf(buf, "%s:%d: ", file, line) + lines := strings.Split(s, "\n") + if l := len(lines); l > 1 && lines[l-1] == "" { + lines = lines[:l-1] + } + for i, line := range lines { + if i > 0 { + // Second and subsequent lines are indented an extra tab. + buf.WriteString("\n\t\t") + } + buf.WriteString(line) + } + buf.WriteByte('\n') + return buf.String() + } + t.buf.WriteString(decorate(fmt.Sprintf(format, args...))) +} + +func TestEqualFormatting(t *testing.T) { + for i, currCase := range []struct { + equalWant string + equalGot string + msgAndArgs []interface{} + want string + }{ + {equalWant: "want", equalGot: "got", want: "\tassertions.go:[0-9]+: \r \r\tError Trace:\t\n\t\t\r\tError: \tNot equal: \n\t\t\r\t \texpected: \"want\"\n\t\t\r\t \tactual : \"got\"\n"}, + {equalWant: "want", equalGot: "got", msgAndArgs: []interface{}{"hello, %v!", "world"}, want: "\tassertions.go:[0-9]+: \r \r\tError Trace:\t\n\t\t\r\tError: \tNot equal: \n\t\t\r\t \texpected: \"want\"\n\t\t\r\t \tactual : \"got\"\n\t\t\r\tMessages: \thello, world!\n"}, + } { + mockT := &bufferT{} + Equal(mockT, currCase.equalWant, currCase.equalGot, currCase.msgAndArgs...) + Regexp(t, regexp.MustCompile(currCase.want), mockT.buf.String(), "Case %d", i) + } +} + +func TestFormatUnequalValues(t *testing.T) { + expected, actual := formatUnequalValues("foo", "bar") + Equal(t, `"foo"`, expected, "value should not include type") + Equal(t, `"bar"`, actual, "value should not include type") + + expected, actual = formatUnequalValues(123, 123) + Equal(t, `123`, expected, "value should not include type") + Equal(t, `123`, actual, "value should not include type") + + expected, actual = formatUnequalValues(int64(123), int32(123)) + Equal(t, `int64(123)`, expected, "value should include type") + Equal(t, `int32(123)`, actual, "value should include type") + + expected, actual = formatUnequalValues(int64(123), nil) + Equal(t, `int64(123)`, expected, "value should include type") + Equal(t, `()`, actual, "value should include type") + + type testStructType struct { + Val string + } + + expected, actual = formatUnequalValues(&testStructType{Val: "test"}, &testStructType{Val: "test"}) + Equal(t, `&assert.testStructType{Val:"test"}`, expected, "value should not include type annotation") + Equal(t, `&assert.testStructType{Val:"test"}`, actual, "value should not include type annotation") +} + +func TestNotNil(t *testing.T) { + + mockT := new(testing.T) + + if !NotNil(mockT, new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if NotNil(mockT, nil) { + t.Error("NotNil should return false: object is nil") + } + if NotNil(mockT, (*struct{})(nil)) { + t.Error("NotNil should return false: object is (*struct{})(nil)") + } + +} + +func TestNil(t *testing.T) { + + mockT := new(testing.T) + + if !Nil(mockT, nil) { + t.Error("Nil should return true: object is nil") + } + if !Nil(mockT, (*struct{})(nil)) { + t.Error("Nil should return true: object is (*struct{})(nil)") + } + if Nil(mockT, new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrue(t *testing.T) { + + mockT := new(testing.T) + + if !True(mockT, true) { + t.Error("True should return true") + } + if True(mockT, false) { + t.Error("True should return false") + } + +} + +func TestFalse(t *testing.T) { + + mockT := new(testing.T) + + if !False(mockT, false) { + t.Error("False should return true") + } + if False(mockT, true) { + t.Error("False should return false") + } + +} + +func TestExactly(t *testing.T) { + + mockT := new(testing.T) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if Exactly(mockT, a, b) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, d) { + t.Error("Exactly should return false") + } + if !Exactly(mockT, a, c) { + t.Error("Exactly should return true") + } + + if Exactly(mockT, nil, a) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqual(t *testing.T) { + + mockT := new(testing.T) + + if !NotEqual(mockT, "Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123, 1234) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } + funcA := func() int { return 23 } + funcB := func() int { return 42 } + if NotEqual(mockT, funcA, funcB) { + t.Error("NotEqual should return false") + } + + if NotEqual(mockT, "Hello World", "Hello World") { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123, 123) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123.5, 123.5) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, &struct{}{}, &struct{}{}) { + t.Error("NotEqual should return false") + } +} + +type A struct { + Name, Value string +} + +func TestContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + complexList := []*A{ + {"b", "c"}, + {"d", "e"}, + {"g", "h"}, + {"j", "k"}, + } + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !Contains(mockT, "Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if Contains(mockT, "Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !Contains(mockT, list, "Bar") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") + } + if Contains(mockT, list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + if !Contains(mockT, complexList, &A{"g", "h"}) { + t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if !Contains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if Contains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func TestNotContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !NotContains(mockT, "Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if NotContains(mockT, "Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !NotContains(mockT, list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if NotContains(mockT, list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if NotContains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if !NotContains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func TestSubset(t *testing.T) { + mockT := new(testing.T) + + if !Subset(mockT, []int{1, 2, 3}, nil) { + t.Error("Subset should return true: given subset is nil") + } + if !Subset(mockT, []int{1, 2, 3}, []int{}) { + t.Error("Subset should return true: any set contains the nil set") + } + if !Subset(mockT, []int{1, 2, 3}, []int{1, 2}) { + t.Error("Subset should return true: [1, 2, 3] contains [1, 2]") + } + if !Subset(mockT, []int{1, 2, 3}, []int{1, 2, 3}) { + t.Error("Subset should return true: [1, 2, 3] contains [1, 2, 3]") + } + if !Subset(mockT, []string{"hello", "world"}, []string{"hello"}) { + t.Error("Subset should return true: [\"hello\", \"world\"] contains [\"hello\"]") + } + + if Subset(mockT, []string{"hello", "world"}, []string{"hello", "testify"}) { + t.Error("Subset should return false: [\"hello\", \"world\"] does not contain [\"hello\", \"testify\"]") + } + if Subset(mockT, []int{1, 2, 3}, []int{4, 5}) { + t.Error("Subset should return false: [1, 2, 3] does not contain [4, 5]") + } + if Subset(mockT, []int{1, 2, 3}, []int{1, 5}) { + t.Error("Subset should return false: [1, 2, 3] does not contain [1, 5]") + } +} + +func TestNotSubset(t *testing.T) { + mockT := new(testing.T) + + if NotSubset(mockT, []int{1, 2, 3}, nil) { + t.Error("NotSubset should return false: given subset is nil") + } + if NotSubset(mockT, []int{1, 2, 3}, []int{}) { + t.Error("NotSubset should return false: any set contains the nil set") + } + if NotSubset(mockT, []int{1, 2, 3}, []int{1, 2}) { + t.Error("NotSubset should return false: [1, 2, 3] contains [1, 2]") + } + if NotSubset(mockT, []int{1, 2, 3}, []int{1, 2, 3}) { + t.Error("NotSubset should return false: [1, 2, 3] contains [1, 2, 3]") + } + if NotSubset(mockT, []string{"hello", "world"}, []string{"hello"}) { + t.Error("NotSubset should return false: [\"hello\", \"world\"] contains [\"hello\"]") + } + + if !NotSubset(mockT, []string{"hello", "world"}, []string{"hello", "testify"}) { + t.Error("NotSubset should return true: [\"hello\", \"world\"] does not contain [\"hello\", \"testify\"]") + } + if !NotSubset(mockT, []int{1, 2, 3}, []int{4, 5}) { + t.Error("NotSubset should return true: [1, 2, 3] does not contain [4, 5]") + } + if !NotSubset(mockT, []int{1, 2, 3}, []int{1, 5}) { + t.Error("NotSubset should return true: [1, 2, 3] does not contain [1, 5]") + } +} + +func TestNotSubsetNil(t *testing.T) { + mockT := new(testing.T) + NotSubset(mockT, []string{"foo"}, nil) + if !mockT.Failed() { + t.Error("NotSubset on nil set should have failed the test") + } +} + +func Test_includeElement(t *testing.T) { + + list1 := []string{"Foo", "Bar"} + list2 := []int{1, 2} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + ok, found := includeElement("Hello World", "World") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Bar") + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 1) + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 2) + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo!") + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, 3) + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, "1") + True(t, ok) + False(t, found) + + ok, found = includeElement(simpleMap, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(simpleMap, "Bar") + True(t, ok) + False(t, found) + + ok, found = includeElement(1433, "1") + False(t, ok) + False(t, found) +} + +func TestElementsMatch(t *testing.T) { + mockT := new(testing.T) + + if !ElementsMatch(mockT, nil, nil) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{}, []int{}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1}, []int{1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1, 1}, []int{1, 1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1, 2}, []int{1, 2}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1, 2}, []int{2, 1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, [2]int{1, 2}, [2]int{2, 1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []string{"hello", "world"}, []string{"world", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []string{"hello", "hello"}, []string{"hello", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []string{"hello", "hello", "world"}, []string{"hello", "world", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, [3]string{"hello", "hello", "world"}, [3]string{"hello", "world", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{}, nil) { + t.Error("ElementsMatch should return true") + } + + if ElementsMatch(mockT, []int{1}, []int{1, 1}) { + t.Error("ElementsMatch should return false") + } + if ElementsMatch(mockT, []int{1, 2}, []int{2, 2}) { + t.Error("ElementsMatch should return false") + } + if ElementsMatch(mockT, []string{"hello", "hello"}, []string{"hello"}) { + t.Error("ElementsMatch should return false") + } +} + +func TestCondition(t *testing.T) { + mockT := new(testing.T) + + if !Condition(mockT, func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if Condition(mockT, func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanic(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanics(t *testing.T) { + + mockT := new(testing.T) + + if !Panics(mockT, func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if Panics(mockT, func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestPanicsWithValue(t *testing.T) { + + mockT := new(testing.T) + + if !PanicsWithValue(mockT, "Panic!", func() { + panic("Panic!") + }) { + t.Error("PanicsWithValue should return true") + } + + if PanicsWithValue(mockT, "Panic!", func() { + }) { + t.Error("PanicsWithValue should return false") + } + + if PanicsWithValue(mockT, "at the disco", func() { + panic("Panic!") + }) { + t.Error("PanicsWithValue should return false") + } +} + +func TestNotPanics(t *testing.T) { + + mockT := new(testing.T) + + if !NotPanics(mockT, func() { + }) { + t.Error("NotPanics should return true") + } + + if NotPanics(mockT, func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + True(t, NoError(mockT, err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("some error") + + False(t, NoError(mockT, err), "NoError with error should return False") + + // returning an empty error interface + err = func() error { + var err *customError + if err != nil { + t.Fatal("err should be nil here") + } + return err + }() + + if err == nil { // err is not nil here! + t.Errorf("Error should be nil due to empty interface: %s", err) + } + + False(t, NoError(mockT, err), "NoError should fail with empty error interface") +} + +type customError struct{} + +func (*customError) Error() string { return "fail" } + +func TestError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + False(t, Error(mockT, err), "Error should return False for nil arg") + + // now set an error + err = errors.New("some error") + + True(t, Error(mockT, err), "Error with error should return True") + + // go vet check + True(t, Errorf(mockT, err, "example with %s", "formatted message"), "Errorf with error should rturn True") + + // returning an empty error interface + err = func() error { + var err *customError + if err != nil { + t.Fatal("err should be nil here") + } + return err + }() + + if err == nil { // err is not nil here! + t.Errorf("Error should be nil due to empty interface: %s", err) + } + + True(t, Error(mockT, err), "Error should pass with empty error interface") +} + +func TestEqualError(t *testing.T) { + mockT := new(testing.T) + + // start with a nil error + var err error + False(t, EqualError(mockT, err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + False(t, EqualError(mockT, err, "Not some error"), + "EqualError should return false for different error string") + True(t, EqualError(mockT, err, "some error"), + "EqualError should return true") +} + +func Test_isEmpty(t *testing.T) { + + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, isEmpty("")) + True(t, isEmpty(nil)) + True(t, isEmpty([]string{})) + True(t, isEmpty(0)) + True(t, isEmpty(int32(0))) + True(t, isEmpty(int64(0))) + True(t, isEmpty(false)) + True(t, isEmpty(map[string]string{})) + True(t, isEmpty(new(time.Time))) + True(t, isEmpty(time.Time{})) + True(t, isEmpty(make(chan struct{}))) + False(t, isEmpty("something")) + False(t, isEmpty(errors.New("something"))) + False(t, isEmpty([]string{"something"})) + False(t, isEmpty(1)) + False(t, isEmpty(true)) + False(t, isEmpty(map[string]string{"Hello": "World"})) + False(t, isEmpty(chWithValue)) + +} + +func TestEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + var tiP *time.Time + var tiNP time.Time + var s *string + var f *os.File + sP := &s + x := 1 + xP := &x + + type TString string + type TStruct struct { + x int + s []int + } + + True(t, Empty(mockT, ""), "Empty string is empty") + True(t, Empty(mockT, nil), "Nil is empty") + True(t, Empty(mockT, []string{}), "Empty string array is empty") + True(t, Empty(mockT, 0), "Zero int value is empty") + True(t, Empty(mockT, false), "False value is empty") + True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") + True(t, Empty(mockT, s), "Nil string pointer is empty") + True(t, Empty(mockT, f), "Nil os.File pointer is empty") + True(t, Empty(mockT, tiP), "Nil time.Time pointer is empty") + True(t, Empty(mockT, tiNP), "time.Time is empty") + True(t, Empty(mockT, TStruct{}), "struct with zero values is empty") + True(t, Empty(mockT, TString("")), "empty aliased string is empty") + True(t, Empty(mockT, sP), "ptr to nil value is empty") + + False(t, Empty(mockT, "something"), "Non Empty string is not empty") + False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") + False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") + False(t, Empty(mockT, 1), "Non-zero int value is not empty") + False(t, Empty(mockT, true), "True value is not empty") + False(t, Empty(mockT, chWithValue), "Channel with values is not empty") + False(t, Empty(mockT, TStruct{x: 1}), "struct with initialized values is empty") + False(t, Empty(mockT, TString("abc")), "non-empty aliased string is empty") + False(t, Empty(mockT, xP), "ptr to non-nil value is not empty") +} + +func TestNotEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + False(t, NotEmpty(mockT, ""), "Empty string is empty") + False(t, NotEmpty(mockT, nil), "Nil is empty") + False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") + False(t, NotEmpty(mockT, 0), "Zero int value is empty") + False(t, NotEmpty(mockT, false), "False value is empty") + False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") + + True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") + True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") + True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") + True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") + True(t, NotEmpty(mockT, true), "True value is not empty") + True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") +} + +func Test_getLen(t *testing.T) { + falseCases := []interface{}{ + nil, + 0, + true, + false, + 'A', + struct{}{}, + } + for _, v := range falseCases { + ok, l := getLen(v) + False(t, ok, "Expected getLen fail to get length of %#v", v) + Equal(t, 0, l, "getLen should return 0 for %#v", v) + } + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + trueCases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range trueCases { + ok, l := getLen(c.v) + True(t, ok, "Expected getLen success to get length of %#v", c.v) + Equal(t, c.l, l) + } +} + +func TestLen(t *testing.T) { + mockT := new(testing.T) + + False(t, Len(mockT, nil, 0), "nil does not have length") + False(t, Len(mockT, 0, 0), "int does not have length") + False(t, Len(mockT, true, 0), "true does not have length") + False(t, Len(mockT, false, 0), "false does not have length") + False(t, Len(mockT, 'A', 0), "Rune does not have length") + False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } + + cases = []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 4}, + {[...]int{1, 2, 3}, 2}, + {"ABC", 2}, + {map[int]int{1: 2, 2: 4, 3: 6}, 4}, + {ch, 2}, + + {[]int{}, 1}, + {map[int]int{}, 1}, + {make(chan int), 1}, + + {[]int(nil), 1}, + {map[int]int(nil), 1}, + {(chan int)(nil), 1}, + } + + for _, c := range cases { + False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDuration(t *testing.T) { + + mockT := new(testing.T) + a := time.Now() + b := a.Add(10 * time.Second) + + True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDelta(t *testing.T) { + mockT := new(testing.T) + + True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") + False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") + False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") + False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInDeltaSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InDeltaSlice(mockT, + []float64{1.001, 0.999}, + []float64{1, 1}, + 0.1), "{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1") + + True(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 1), "{1, 2} is element-wise close to {0, 3} in delta=1") + + False(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 0.1), "{1, 2} is not element-wise close to {0, 3} in delta=0.1") + + False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestInDeltaMapValues(t *testing.T) { + mockT := new(testing.T) + + for _, tc := range []struct { + title string + expect interface{} + actual interface{} + f func(TestingT, bool, ...interface{}) bool + delta float64 + }{ + { + title: "Within delta", + expect: map[string]float64{ + "foo": 1.0, + "bar": 2.0, + }, + actual: map[string]float64{ + "foo": 1.01, + "bar": 1.99, + }, + delta: 0.1, + f: True, + }, + { + title: "Within delta", + expect: map[int]float64{ + 1: 1.0, + 2: 2.0, + }, + actual: map[int]float64{ + 1: 1.0, + 2: 1.99, + }, + delta: 0.1, + f: True, + }, + { + title: "Different number of keys", + expect: map[int]float64{ + 1: 1.0, + 2: 2.0, + }, + actual: map[int]float64{ + 1: 1.0, + }, + delta: 0.1, + f: False, + }, + { + title: "Within delta with zero value", + expect: map[string]float64{ + "zero": 0.0, + }, + actual: map[string]float64{ + "zero": 0.0, + }, + delta: 0.1, + f: True, + }, + { + title: "With missing key with zero value", + expect: map[string]float64{ + "zero": 0.0, + "foo": 0.0, + }, + actual: map[string]float64{ + "zero": 0.0, + "bar": 0.0, + }, + f: False, + }, + } { + tc.f(t, InDeltaMapValues(mockT, tc.expect, tc.actual, tc.delta), tc.title+"\n"+diff(tc.expect, tc.actual)) + } +} + +func TestInEpsilon(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + {0.1, 0, 2}, + {time.Second, time.Second + time.Millisecond, 0.002}, + } + + for _, tc := range cases { + True(t, InEpsilon(t, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon), "test: %q", tc) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + {0, 0.1, 2}, // expected must be different to zero + {time.Second, time.Second + 10*time.Millisecond, 0.002}, + } + + for _, tc := range cases { + False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + +} + +func TestInEpsilonSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.06), "{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06") + + False(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.04), "{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04") + + False(t, InEpsilonSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestRegexp(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, Regexp(mockT, tc.rx, tc.str)) + True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + False(t, NotRegexp(mockT, tc.rx, tc.str)) + False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + True(t, NotRegexp(mockT, tc.rx, tc.str)) + True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } +} + +func testAutogeneratedFunction() { + defer func() { + if err := recover(); err == nil { + panic("did not panic") + } + CallerInfo() + }() + t := struct { + io.Closer + }{} + var c io.Closer + c = t + c.Close() +} + +func TestCallerInfoWithAutogeneratedFunctions(t *testing.T) { + NotPanics(t, func() { + testAutogeneratedFunction() + }) +} + +func TestZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + True(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + False(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestNotZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + False(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + True(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestFileExists(t *testing.T) { + mockT := new(testing.T) + True(t, FileExists(mockT, "assertions.go")) + + mockT = new(testing.T) + False(t, FileExists(mockT, "random_file")) + + mockT = new(testing.T) + False(t, FileExists(mockT, "../_codegen")) +} + +func TestDirExists(t *testing.T) { + mockT := new(testing.T) + False(t, DirExists(mockT, "assertions.go")) + + mockT = new(testing.T) + False(t, DirExists(mockT, "random_dir")) + + mockT = new(testing.T) + True(t, DirExists(mockT, "../_codegen")) +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`)) +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}")) +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`)) +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`)) +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, "Not JSON")) +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", "Not JSON")) +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`)) +} + +func TestDiff(t *testing.T) { + expected := ` + +Diff: +--- Expected ++++ Actual +@@ -1,3 +1,3 @@ + (struct { foo string }) { +- foo: (string) (len=5) "hello" ++ foo: (string) (len=3) "bar" + } +` + actual := diff( + struct{ foo string }{"hello"}, + struct{ foo string }{"bar"}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,5 +2,5 @@ + (int) 1, +- (int) 2, + (int) 3, +- (int) 4 ++ (int) 5, ++ (int) 7 + } +` + actual = diff( + []int{1, 2, 3, 4}, + []int{1, 3, 5, 7}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,4 +2,4 @@ + (int) 1, +- (int) 2, +- (int) 3 ++ (int) 3, ++ (int) 5 + } +` + actual = diff( + []int{1, 2, 3, 4}[0:3], + []int{1, 3, 5, 7}[0:3], + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -1,6 +1,6 @@ + (map[string]int) (len=4) { +- (string) (len=4) "four": (int) 4, ++ (string) (len=4) "five": (int) 5, + (string) (len=3) "one": (int) 1, +- (string) (len=5) "three": (int) 3, +- (string) (len=3) "two": (int) 2 ++ (string) (len=5) "seven": (int) 7, ++ (string) (len=5) "three": (int) 3 + } +` + + actual = diff( + map[string]int{"one": 1, "two": 2, "three": 3, "four": 4}, + map[string]int{"one": 1, "three": 3, "five": 5, "seven": 7}, + ) + Equal(t, expected, actual) +} + +func TestDiffEmptyCases(t *testing.T) { + Equal(t, "", diff(nil, nil)) + Equal(t, "", diff(struct{ foo string }{}, nil)) + Equal(t, "", diff(nil, struct{ foo string }{})) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff([]int{1}, []bool{true})) +} + +// Ensure there are no data races +func TestDiffRace(t *testing.T) { + t.Parallel() + + expected := map[string]string{ + "a": "A", + "b": "B", + "c": "C", + } + + actual := map[string]string{ + "d": "D", + "e": "E", + "f": "F", + } + + // run diffs in parallel simulating tests with t.Parallel() + numRoutines := 10 + rChans := make([]chan string, numRoutines) + for idx := range rChans { + rChans[idx] = make(chan string) + go func(ch chan string) { + defer close(ch) + ch <- diff(expected, actual) + }(rChans[idx]) + } + + for _, ch := range rChans { + for msg := range ch { + NotZero(t, msg) // dummy assert + } + } +} + +type mockTestingT struct { +} + +func (m *mockTestingT) Errorf(format string, args ...interface{}) {} + +func TestFailNowWithPlainTestingT(t *testing.T) { + mockT := &mockTestingT{} + + Panics(t, func() { + FailNow(mockT, "failed") + }, "should panic since mockT is missing FailNow()") +} + +type mockFailNowTestingT struct { +} + +func (m *mockFailNowTestingT) Errorf(format string, args ...interface{}) {} + +func (m *mockFailNowTestingT) FailNow() {} + +func TestFailNowWithFullTestingT(t *testing.T) { + mockT := &mockFailNowTestingT{} + + NotPanics(t, func() { + FailNow(mockT, "failed") + }, "should call mockT.FailNow() rather than panicking") +} + +func TestBytesEqual(t *testing.T) { + var cases = []struct { + a, b []byte + }{ + {make([]byte, 2), make([]byte, 2)}, + {make([]byte, 2), make([]byte, 2, 3)}, + {nil, make([]byte, 0)}, + } + for i, c := range cases { + Equal(t, reflect.DeepEqual(c.a, c.b), ObjectsAreEqual(c.a, c.b), "case %d failed", i+1) + } +} + +func BenchmarkBytesEqual(b *testing.B) { + const size = 1024 * 8 + s := make([]byte, size) + for i := range s { + s[i] = byte(i % 255) + } + s2 := make([]byte, size) + copy(s2, s) + + mockT := &mockFailNowTestingT{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + Equal(mockT, s, s2) + } +} + +func TestEqualArgsValidation(t *testing.T) { + err := validateEqualArgs(time.Now, time.Now) + EqualError(t, err, "cannot take func type as argument") +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 0000000..c9dccc4 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 0000000..ac9dc9d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 0000000..9ad5685 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go b/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go new file mode 100644 index 0000000..22e1df1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go @@ -0,0 +1,611 @@ +package assert + +import ( + "errors" + "regexp" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } +} + +func TestIsTypeWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqualWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Equal("Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !assert.Equal(123, 123) { + t.Error("Equal should return true") + } + if !assert.Equal(123.5, 123.5) { + t.Error("Equal should return true") + } + if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !assert.Equal(nil, nil) { + t.Error("Equal should return true") + } +} + +func TestEqualValuesWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.EqualValues(uint32(10), int32(10)) { + t.Error("EqualValues should return true") + } +} + +func TestNotNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.NotNil(new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if assert.NotNil(nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Nil(nil) { + t.Error("Nil should return true: object is nil") + } + if assert.Nil(new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrueWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.True(true) { + t.Error("True should return true") + } + if assert.True(false) { + t.Error("True should return false") + } + +} + +func TestFalseWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.False(false) { + t.Error("False should return true") + } + if assert.False(true) { + t.Error("False should return false") + } + +} + +func TestExactlyWrapper(t *testing.T) { + assert := New(new(testing.T)) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if assert.Exactly(a, b) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, d) { + t.Error("Exactly should return false") + } + if !assert.Exactly(a, c) { + t.Error("Exactly should return true") + } + + if assert.Exactly(nil, a) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqualWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotEqual("Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123, 1234) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.Contains("Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if assert.Contains("Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !assert.Contains(list, "Foo") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if assert.Contains(list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + +} + +func TestNotContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.NotContains("Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if assert.NotContains("Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !assert.NotContains(list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if assert.NotContains(list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func TestConditionWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Condition(func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if assert.Condition(func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanicWrapper(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Panics(func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if assert.Panics(func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotPanics(func() { + }) { + t.Error("NotPanics should return true") + } + + if assert.NotPanics(func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.False(mockAssert.NoError(err), "NoError with error should return False") + +} + +func TestErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.False(mockAssert.Error(err), "Error should return False for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.True(mockAssert.Error(err), "Error with error should return True") + +} + +func TestEqualErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + assert.False(mockAssert.EqualError(err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + assert.False(mockAssert.EqualError(err, "Not some error"), + "EqualError should return false for different error string") + assert.True(mockAssert.EqualError(err, "some error"), + "EqualError should return true") +} + +func TestEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.Empty(""), "Empty string is empty") + assert.True(mockAssert.Empty(nil), "Nil is empty") + assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") + assert.True(mockAssert.Empty(0), "Zero int value is empty") + assert.True(mockAssert.Empty(false), "False value is empty") + + assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") + assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") + assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") + assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") + assert.False(mockAssert.Empty(true), "True value is not empty") + +} + +func TestNotEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.NotEmpty(""), "Empty string is empty") + assert.False(mockAssert.NotEmpty(nil), "Nil is empty") + assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") + assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") + assert.False(mockAssert.NotEmpty(false), "False value is empty") + + assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") + assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") + assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") + assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") + assert.True(mockAssert.NotEmpty(true), "True value is not empty") + +} + +func TestLenWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.Len(nil, 0), "nil does not have length") + assert.False(mockAssert.Len(0, 0), "int does not have length") + assert.False(mockAssert.Len(true, 0), "true does not have length") + assert.False(mockAssert.Len(false, 0), "false does not have length") + assert.False(mockAssert.Len('A', 0), "Rune does not have length") + assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDurationWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + a := time.Now() + b := a.Add(10 * time.Second) + + assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDeltaWrapper(t *testing.T) { + assert := New(new(testing.T)) + + True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") + False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInEpsilonWrapper(t *testing.T) { + assert := New(new(testing.T)) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } +} + +func TestRegexpWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, assert.Regexp(tc.rx, tc.str)) + True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + False(t, assert.NotRegexp(tc.rx, tc.str)) + False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + True(t, assert.NotRegexp(tc.rx, tc.str)) + True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } +} + +func TestZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.True(mockAssert.Zero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.False(mockAssert.Zero(test), "Zero should return false for %v", test) + } +} + +func TestNotZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.False(mockAssert.NotZero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.True(mockAssert.NotZero(test), "Zero should return false for %v", test) + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") { + t.Error("JSONEq should return true") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) { + t.Error("JSONEq should return false") + } +} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 0000000..3101e78 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,127 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return -1, err + } + handler(w, req) + return w.Code, nil +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions_test.go b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go new file mode 100644 index 0000000..3ab7683 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go @@ -0,0 +1,117 @@ +package assert + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +func httpOK(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func httpRedirect(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTemporaryRedirect) +} + +func httpError(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) +} + +func TestHTTPSuccess(t *testing.T) { + assert := New(t) + + mockT1 := new(testing.T) + assert.Equal(HTTPSuccess(mockT1, httpOK, "GET", "/", nil), true) + assert.False(mockT1.Failed()) + + mockT2 := new(testing.T) + assert.Equal(HTTPSuccess(mockT2, httpRedirect, "GET", "/", nil), false) + assert.True(mockT2.Failed()) + + mockT3 := new(testing.T) + assert.Equal(HTTPSuccess(mockT3, httpError, "GET", "/", nil), false) + assert.True(mockT3.Failed()) +} + +func TestHTTPRedirect(t *testing.T) { + assert := New(t) + + mockT1 := new(testing.T) + assert.Equal(HTTPRedirect(mockT1, httpOK, "GET", "/", nil), false) + assert.True(mockT1.Failed()) + + mockT2 := new(testing.T) + assert.Equal(HTTPRedirect(mockT2, httpRedirect, "GET", "/", nil), true) + assert.False(mockT2.Failed()) + + mockT3 := new(testing.T) + assert.Equal(HTTPRedirect(mockT3, httpError, "GET", "/", nil), false) + assert.True(mockT3.Failed()) +} + +func TestHTTPError(t *testing.T) { + assert := New(t) + + mockT1 := new(testing.T) + assert.Equal(HTTPError(mockT1, httpOK, "GET", "/", nil), false) + assert.True(mockT1.Failed()) + + mockT2 := new(testing.T) + assert.Equal(HTTPError(mockT2, httpRedirect, "GET", "/", nil), false) + assert.True(mockT2.Failed()) + + mockT3 := new(testing.T) + assert.Equal(HTTPError(mockT3, httpError, "GET", "/", nil), true) + assert.False(mockT3.Failed()) +} + +func TestHTTPStatusesWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) +} + +func httpHelloName(w http.ResponseWriter, r *http.Request) { + name := r.FormValue("name") + w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) +} + +func TestHttpBody(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) +} + +func TestHttpBodyWrappers(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + +} diff --git a/vendor/github.com/stretchr/testify/doc.go b/vendor/github.com/stretchr/testify/doc.go new file mode 100644 index 0000000..377d5cc --- /dev/null +++ b/vendor/github.com/stretchr/testify/doc.go @@ -0,0 +1,22 @@ +// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend. +// +// testify contains the following packages: +// +// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system. +// +// The http package contains tools to make it easier to test http activity using the Go testing system. +// +// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected. +// +// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces. +package testify + +// blank imports help docs. +import ( + // assert package + _ "github.com/stretchr/testify/assert" + // http package + _ "github.com/stretchr/testify/http" + // mock package + _ "github.com/stretchr/testify/mock" +) diff --git a/vendor/github.com/stretchr/testify/http/doc.go b/vendor/github.com/stretchr/testify/http/doc.go new file mode 100644 index 0000000..695167c --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/doc.go @@ -0,0 +1,2 @@ +// Package http DEPRECATED USE net/http/httptest +package http diff --git a/vendor/github.com/stretchr/testify/http/test_response_writer.go b/vendor/github.com/stretchr/testify/http/test_response_writer.go new file mode 100644 index 0000000..5c3f813 --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/test_response_writer.go @@ -0,0 +1,49 @@ +package http + +import ( + "net/http" +) + +// TestResponseWriter DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +type TestResponseWriter struct { + + // StatusCode is the last int written by the call to WriteHeader(int) + StatusCode int + + // Output is a string containing the written bytes using the Write([]byte) func. + Output string + + // header is the internal storage of the http.Header object + header http.Header +} + +// Header DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Header() http.Header { + + if rw.header == nil { + rw.header = make(http.Header) + } + + return rw.header +} + +// Write DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Write(bytes []byte) (int, error) { + + // assume 200 success if no header has been set + if rw.StatusCode == 0 { + rw.WriteHeader(200) + } + + // add these bytes to the output string + rw.Output = rw.Output + string(bytes) + + // return normal values + return 0, nil + +} + +// WriteHeader DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) WriteHeader(i int) { + rw.StatusCode = i +} diff --git a/vendor/github.com/stretchr/testify/http/test_round_tripper.go b/vendor/github.com/stretchr/testify/http/test_round_tripper.go new file mode 100644 index 0000000..b1e32f1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/test_round_tripper.go @@ -0,0 +1,17 @@ +package http + +import ( + "github.com/stretchr/testify/mock" + "net/http" +) + +// TestRoundTripper DEPRECATED USE net/http/httptest +type TestRoundTripper struct { + mock.Mock +} + +// RoundTrip DEPRECATED USE net/http/httptest +func (t *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + args := t.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 0000000..7324128 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 0000000..1e232b5 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,815 @@ +package mock + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Call to this method can be optional + optional bool + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + waitTime time.Duration + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) +} + +func newCall(parent *Mock, methodName string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() + c.waitTime = d + return c +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method such as unmarshalers that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { + diffCount := 0 + var closestCall *Call + + for _, call := range m.expectedCalls() { + if call.Method == method { + + _, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = call + } + + } + } + + if closestCall == nil { + return false, nil + } + + return true, closestCall +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + //Next four lines are required to use GCCGO function naming conventions. + //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + //With GCCGO we need to remove interface information starting from pN

. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} + +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + found, call := m.findExpectedCall(methodName, arguments...) + + if found < 0 { + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestFound, closestCall := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() + + if closestFound { + panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(methodName, arguments, true), callString(methodName, closestCall.Arguments, true), diffArguments(closestCall.Arguments, arguments))) + } else { + panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo())) + } + } + + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + + // add the call + m.Calls = append(m.Calls, *newCall(m, methodName, arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } else { + time.Sleep(call.waitTime) + } + + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) + } + + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs +} + +/* + Assertions +*/ + +type assertExpectationser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + for _, obj := range testObjects { + if m, ok := obj.(Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = &m + } + m := obj.(assertExpectationser) + if !m.AssertExpectations(t) { + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } else { + if expectedCall.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } else { + t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything string = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } + + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MatchedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + + var output = "\n" + var differences int + + var maxArgCount = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + + if len(objects) <= i { + actual = "(Missing)" + } else { + actual = objects[i] + } + + if len(args) <= i { + expected = "(Missing)" + } else { + expected = args[i] + } + + if matcher, ok := expected.(argumentMatcher); ok { + if matcher.Matches(actual) { + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actual, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: PASS: %s not matched by %s\n", output, i, actual, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actual, expected) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actual, expected) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} diff --git a/vendor/github.com/stretchr/testify/mock/mock_test.go b/vendor/github.com/stretchr/testify/mock/mock_test.go new file mode 100644 index 0000000..cb245ba --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock_test.go @@ -0,0 +1,1352 @@ +package mock + +import ( + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +/* + Test objects +*/ + +// ExampleInterface represents an example interface. +type ExampleInterface interface { + TheExampleMethod(a, b, c int) (int, error) +} + +// TestExampleImplementation is a test implementation of ExampleInterface +type TestExampleImplementation struct { + Mock +} + +func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) { + args := i.Called(a, b, c) + return args.Int(0), errors.New("Whoops") +} + +func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) { + i.Called(yesorno) +} + +type ExampleType struct { + ran bool +} + +func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error { + args := i.Called(et) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod4(v ExampleInterface) error { + args := i.Called(v) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod5(ch chan struct{}) error { + args := i.Called(ch) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod6(m map[string]bool) error { + args := i.Called(m) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod7(slice []bool) error { + args := i.Called(slice) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodFunc(fn func(string) error) error { + args := i.Called(fn) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadic(a ...int) error { + args := i.Called(a) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadicInterface(a ...interface{}) error { + args := i.Called(a) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodMixedVariadic(a int, b ...int) error { + args := i.Called(a, b) + return args.Error(0) +} + +type ExampleFuncType func(string) error + +func (i *TestExampleImplementation) TheExampleMethodFuncType(fn ExampleFuncType) error { + args := i.Called(fn) + return args.Error(0) +} + +/* + Mock +*/ + +func Test_Mock_TestData(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + if assert.NotNil(t, mockedService.TestData()) { + + mockedService.TestData().Set("something", 123) + assert.Equal(t, 123, mockedService.TestData().Get("something").Data()) + } +} + +func Test_Mock_On(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod") + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) +} + +func Test_Mock_Chained_On(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + mockedService. + On("TheExampleMethod", 1, 2, 3). + Return(0). + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil) + + expectedCalls := []*Call{ + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod", + Arguments: []interface{}{1, 2, 3}, + ReturnArguments: []interface{}{0}, + }, + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod3", + Arguments: []interface{}{AnythingOfType("*mock.ExampleType")}, + ReturnArguments: []interface{}{nil}, + }, + } + assert.Equal(t, expectedCalls, mockedService.ExpectedCalls) +} + +func Test_Mock_On_WithArgs(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", 1, 2, 3, 4) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) + assert.Equal(t, Arguments{1, 2, 3, 4}, c.Arguments) +} + +func Test_Mock_On_WithFuncArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFunc", AnythingOfType("func(string) error")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethodFunc", c.Method) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("func(string) error"), c.Arguments[0]) + + fn := func(string) error { return nil } + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFunc(fn) + }) +} + +func Test_Mock_On_WithIntArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod", + MatchedBy(func(a int) bool { + return a == 1 + }), MatchedBy(func(b int) bool { + return b == 2 + }), MatchedBy(func(c int) bool { + return c == 3 + })).Return(0, nil) + + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 4) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethod(2, 2, 3) + }) + assert.NotPanics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) +} + +func Test_Mock_On_WithPtrArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a != nil && a.ran == true }), + ).Return(nil) + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a != nil && a.ran == false }), + ).Return(errors.New("error")) + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a == nil }), + ).Return(errors.New("error2")) + + assert.Equal(t, mockedService.TheExampleMethod3(&ExampleType{true}), nil) + assert.EqualError(t, mockedService.TheExampleMethod3(&ExampleType{false}), "error") + assert.EqualError(t, mockedService.TheExampleMethod3(nil), "error2") +} + +func Test_Mock_On_WithFuncArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + fixture1, fixture2 := errors.New("fixture1"), errors.New("fixture2") + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a != nil && a("string") == fixture1 }), + ).Return(errors.New("fixture1")) + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a != nil && a("string") == fixture2 }), + ).Return(errors.New("fixture2")) + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a == nil }), + ).Return(errors.New("fixture3")) + + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture1 }), "fixture1") + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture2 }), "fixture2") + assert.EqualError(t, mockedService.TheExampleMethodFunc(nil), "fixture3") +} + +func Test_Mock_On_WithInterfaceArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod4", + MatchedBy(func(a ExampleInterface) bool { return a == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod4(nil), "fixture1") +} + +func Test_Mock_On_WithChannelArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod5", + MatchedBy(func(ch chan struct{}) bool { return ch == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod5(nil), "fixture1") +} + +func Test_Mock_On_WithMapArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod6", + MatchedBy(func(m map[string]bool) bool { return m == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod6(nil), "fixture1") +} + +func Test_Mock_On_WithSliceArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod7", + MatchedBy(func(slice []bool) bool { return slice == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod7(nil), "fixture1") +} + +func Test_Mock_On_WithVariadicFunc(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodVariadic", []int{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []int{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2) + }) + +} + +func Test_Mock_On_WithMixedVariadicFunc(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodMixedVariadic", 1, []int{2, 3, 4}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 2, len(c.Arguments)) + assert.Equal(t, 1, c.Arguments[0]) + assert.Equal(t, []int{2, 3, 4}, c.Arguments[1]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 4) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 5) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithInterface(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethodVariadicInterface", []interface{}{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []interface{}{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithEmptyInterfaceArray(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + var expected []interface{} + c := mockedService. + On("TheExampleMethodVariadicInterface", expected). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, expected, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface() + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithFuncPanics(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + assert.Panics(t, func() { + mockedService.On("TheExampleMethodFunc", func(string) error { return nil }) + }) +} + +func Test_Mock_On_WithFuncTypeArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFuncType", AnythingOfType("mock.ExampleFuncType")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("mock.ExampleFuncType"), c.Arguments[0]) + + fn := func(string) error { return nil } + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFuncType(fn) + }) +} + +func Test_Mock_Return(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_WaitUntil(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + ch := time.After(time.Second) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + WaitUntil(ch). + Return(1, "two", true) + + // assert that the call was created + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Equal(t, ch, call.WaitFor) +} + +func Test_Mock_Return_After(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + After(time.Second) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + +} + +func Test_Mock_Return_Run(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + fn := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil). + Run(fn) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) + + et := ExampleType{} + assert.Equal(t, false, et.ran) + mockedService.TheExampleMethod3(&et) + assert.Equal(t, true, et.ran) +} + +func Test_Mock_Return_Run_Out_Of_Order(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + f := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Run(f). + Return(nil) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) +} + +func Test_Mock_Return_Once(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Once() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 1, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Twice(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Twice() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 2, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Times(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Times(5) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 5, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Nothing(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 0, len(call.ReturnArguments)) +} + +func Test_Mock_findExpectedCall(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, _ := m.findExpectedCall("Two") + + assert.Equal(t, -1, f) + +} + +func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two").Once() + m.On("Two", 3).Return("three").Twice() + m.On("Two", 3).Return("three").Times(8) + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_callString(t *testing.T) { + + assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false)) + +} + +func Test_Mock_Called(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true) + + returnArguments := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 1, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func asyncCall(m *Mock, ch chan Arguments) { + ch <- m.Called(1, 2, 3) +} + +func Test_Mock_Called_blocks(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.Mock.On("asyncCall", 1, 2, 3).Return(5, "6", true).After(2 * time.Millisecond) + + ch := make(chan Arguments) + + go asyncCall(&mockedService.Mock, ch) + + select { + case <-ch: + t.Fatal("should have waited") + case <-time.After(1 * time.Millisecond): + } + + returnArguments := <-ch + + if assert.Equal(t, 1, len(mockedService.Mock.Calls)) { + assert.Equal(t, "asyncCall", mockedService.Mock.Calls[0].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(5, "6", true). + Once() + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(-1, "hi", false) + + returnArguments1 := mockedService.Called(1, 2, 3) + returnArguments2 := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 2, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[1].Method) + assert.Equal(t, 1, mockedService.Calls[1].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[1].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[1].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments1)) { + assert.Equal(t, 5, returnArguments1[0]) + assert.Equal(t, "6", returnArguments1[1]) + assert.Equal(t, true, returnArguments1[2]) + } + + if assert.Equal(t, 3, len(returnArguments2)) { + assert.Equal(t, -1, returnArguments2[0]) + assert.Equal(t, "hi", returnArguments2[1]) + assert.Equal(t, false, returnArguments2[2]) + } + +} + +func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4) + + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) + +} + +func Test_Mock_Called_Unexpected(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + // make sure it panics if no expectation was made + assert.Panics(t, func() { + mockedService.Called(1, 2, 3) + }, "Calling unexpected method should panic") + +} + +func Test_AssertExpectationsForObjects_Helper(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper", 3).Return() + + mockedService1.Called(1) + mockedService2.Called(2) + mockedService3.Called(3) + + assert.True(t, AssertExpectationsForObjects(t, &mockedService1.Mock, &mockedService2.Mock, &mockedService3.Mock)) + assert.True(t, AssertExpectationsForObjects(t, mockedService1, mockedService2, mockedService3)) + +} + +func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return() + + mockedService1.Called(1) + mockedService3.Called(3) + + tt := new(testing.T) + assert.False(t, AssertExpectationsForObjects(tt, &mockedService1.Mock, &mockedService2.Mock, &mockedService3.Mock)) + assert.False(t, AssertExpectationsForObjects(tt, mockedService1, mockedService2, mockedService3)) + +} + +func Test_Mock_AssertExpectations(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder_NoArgs(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called() + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 3, 2, 1).Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.False(t, mockedService.AssertExpectations(tt)) + + // make call to the second expectation + mockedService.Called(3, 2, 1) + + // now assert expectations again + assert.True(t, mockedService.AssertExpectations(tt)) +} + +func Test_Mock_AssertExpectations_With_Pointers(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{1}).Return(1) + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{2}).Return(2) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + s := struct{ Foo int }{1} + // make the calls now + mockedService.Called(&s) + s.Foo = 2 + mockedService.Called(&s) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectationsCustomType(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.TheExampleMethod3(&ExampleType{}) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + assert.False(t, mockedService.AssertExpectations(tt)) + + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7) + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7) + + args1 := mockedService.Called(1, 2, 3) + assert.Equal(t, 5, args1.Int(0)) + assert.Equal(t, 6, args1.Int(1)) + assert.Equal(t, 7, args1.Int(2)) + + args2 := mockedService.Called(4, 5, 6) + assert.Equal(t, 5, args2.Int(0)) + assert.Equal(t, 6, args2.Int(1)) + assert.Equal(t, 7, args2.Int(2)) + +} + +func Test_Mock_AssertNumberOfCalls(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1)) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2)) + +} + +func Test_Mock_AssertCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3)) + +} + +func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything). + Return() + + mockedService.Called(1, "two", []uint8("three")) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8"))) + +} + +func Test_Mock_AssertCalled_WithArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4)) + +} + +func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once() + + mockedService.Called(1, 2, 3) + mockedService.Called(2, 3, 4) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3)) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5)) + +} + +func Test_Mock_AssertNotCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled")) + +} + +func Test_Mock_AssertOptional(t *testing.T) { + // Optional called + var ms1 = new(TestExampleImplementation) + ms1.On("TheExampleMethod", 1, 2, 3).Maybe().Return(4, nil) + ms1.TheExampleMethod(1, 2, 3) + + tt1 := new(testing.T) + assert.Equal(t, true, ms1.AssertExpectations(tt1)) + + // Optional not called + var ms2 = new(TestExampleImplementation) + ms2.On("TheExampleMethod", 1, 2, 3).Maybe().Return(4, nil) + + tt2 := new(testing.T) + assert.Equal(t, true, ms2.AssertExpectations(tt2)) + + // Non-optional called + var ms3 = new(TestExampleImplementation) + ms3.On("TheExampleMethod", 1, 2, 3).Return(4, nil) + ms3.TheExampleMethod(1, 2, 3) + + tt3 := new(testing.T) + assert.Equal(t, true, ms3.AssertExpectations(tt3)) +} + +/* + Arguments helper methods +*/ +func Test_Arguments_Get(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.Equal(t, "string", args.Get(0).(string)) + assert.Equal(t, 123, args.Get(1).(int)) + assert.Equal(t, true, args.Get(2).(bool)) + +} + +func Test_Arguments_Is(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Is("string", 123, true)) + assert.False(t, args.Is("wrong", 456, false)) + +} + +func Test_Arguments_Diff(t *testing.T) { + + var args = Arguments([]interface{}{"Hello World", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"Hello World", 456, "false"}) + + assert.Equal(t, 2, count) + assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`) + assert.Contains(t, diff, `false != %!s(bool=true)`) + +} + +func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"}) + + assert.Equal(t, 3, count) + assert.Contains(t, diff, `extra != (Missing)`) + +} + +func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var count int + _, count = args.Diff([]interface{}{"string", Anything, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) { + + var args = Arguments([]interface{}{"string", Anything, true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("int"), true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("string"), true}) + var count int + var diff string + diff, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 1, count) + assert.Contains(t, diff, `string != type int - %!s(int=123)`) + +} + +func Test_Arguments_Diff_WithArgMatcher(t *testing.T) { + matchFn := func(a int) bool { + return a == 123 + } + var args = Arguments([]interface{}{"string", MatchedBy(matchFn), true}) + + diff, count := args.Diff([]interface{}{"string", 124, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(int=124) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", false, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(bool=false) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, false}) + assert.Contains(t, diff, `%!s(int=123) matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, true}) + assert.Equal(t, 0, count) + assert.Contains(t, diff, `No differences.`) +} + +func Test_Arguments_Assert(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Assert(t, "string", 123, true)) + +} + +func Test_Arguments_String_Representation(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, `string,int,bool`, args.String()) + +} + +func Test_Arguments_String(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, "string", args.String(0)) + +} + +func Test_Arguments_Error(t *testing.T) { + + var err = errors.New("An Error") + var args = Arguments([]interface{}{"string", 123, true, err}) + assert.Equal(t, err, args.Error(3)) + +} + +func Test_Arguments_Error_Nil(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true, nil}) + assert.Equal(t, nil, args.Error(3)) + +} + +func Test_Arguments_Int(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, 123, args.Int(1)) + +} + +func Test_Arguments_Bool(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, true, args.Bool(2)) + +} + +func Test_WaitUntil_Parallel(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + ch1 := make(chan time.Time) + ch2 := make(chan time.Time) + + mockedService.Mock.On("TheExampleMethod2", true).Return().WaitUntil(ch2).Run(func(args Arguments) { + ch1 <- time.Now() + }) + + mockedService.Mock.On("TheExampleMethod2", false).Return().WaitUntil(ch1) + + // Lock both goroutines on the .WaitUntil method + go func() { + mockedService.TheExampleMethod2(false) + }() + go func() { + mockedService.TheExampleMethod2(true) + }() + + // Allow the first call to execute, so the second one executes afterwards + ch2 <- time.Now() +} + +func Test_MockMethodCalled(t *testing.T) { + m := new(Mock) + m.On("foo", "hello").Return("world") + + retArgs := m.MethodCalled("foo", "hello") + require.True(t, len(retArgs) == 1) + require.Equal(t, "world", retArgs[0]) + m.AssertExpectations(t) +} + +// Test to validate fix for racy concurrent call access in MethodCalled() +func Test_MockReturnAndCalledConcurrent(t *testing.T) { + iterations := 1000 + m := &Mock{} + call := m.On("ConcurrencyTestMethod") + + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + for i := 0; i < iterations; i++ { + call.Return(10) + } + wg.Done() + }() + go func() { + for i := 0; i < iterations; i++ { + ConcurrencyTestMethod(m) + } + wg.Done() + }() + wg.Wait() +} + +type timer struct{ Mock } + +func (s *timer) GetTime(i int) string { + return s.Called(i).Get(0).(string) +} + +func TestAfterTotalWaitTimeWhileExecution(t *testing.T) { + waitDuration := 1 + total, waitMs := 5, time.Millisecond*time.Duration(waitDuration) + aTimer := new(timer) + for i := 0; i < total; i++ { + aTimer.On("GetTime", i).After(waitMs).Return(fmt.Sprintf("Time%d", i)).Once() + } + time.Sleep(waitMs) + start := time.Now() + var results []string + + for i := 0; i < total; i++ { + results = append(results, aTimer.GetTime(i)) + } + + end := time.Now() + elapsedTime := end.Sub(start) + assert.True(t, elapsedTime > waitMs, fmt.Sprintf("Total elapsed time:%v should be atleast greater than %v", elapsedTime, waitMs)) + assert.Equal(t, total, len(results)) + for i, _ := range results { + assert.Equal(t, fmt.Sprintf("Time%d", i), results[i], "Return value of method should be same") + } +} + +func ConcurrencyTestMethod(m *Mock) { + m.Called() +} diff --git a/vendor/github.com/stretchr/testify/package_test.go b/vendor/github.com/stretchr/testify/package_test.go new file mode 100644 index 0000000..7ac5d6d --- /dev/null +++ b/vendor/github.com/stretchr/testify/package_test.go @@ -0,0 +1,12 @@ +package testify + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestImports(t *testing.T) { + if assert.Equal(t, 1, 1) != true { + t.Error("Something is wrong.") + } +} diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go new file mode 100644 index 0000000..169de39 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -0,0 +1,28 @@ +// Package require implements the same assertions as the `assert` package but +// stops test execution when a test fails. +// +// Example Usage +// +// The following is a complete example using require in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/require" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// require.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// Assertions +// +// The `require` package have same global functions as in the `assert` package, +// but instead of returning a boolean result they call `t.FailNow()`. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package require diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go new file mode 100644 index 0000000..ac71d40 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -0,0 +1,16 @@ +package require + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements_test.go b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go new file mode 100644 index 0000000..b120ae3 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go @@ -0,0 +1,385 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + require := New(t) + + require.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsTypeWrapper(t *testing.T) { + require := New(t) + require.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualWrapper(t *testing.T) { + require := New(t) + require.Equal(1, 1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Equal(1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEqualWrapper(t *testing.T) { + require := New(t) + require.NotEqual(1, 2) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEqual(2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactlyWrapper(t *testing.T) { + require := New(t) + + a := float32(1) + b := float32(1) + c := float64(1) + + require.Exactly(a, b) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Exactly(a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNilWrapper(t *testing.T) { + require := New(t) + require.NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotNil(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNilWrapper(t *testing.T) { + require := New(t) + require.Nil(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Nil(new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrueWrapper(t *testing.T) { + require := New(t) + require.True(true) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.True(false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalseWrapper(t *testing.T) { + require := New(t) + require.False(false) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.False(true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContainsWrapper(t *testing.T) { + require := New(t) + require.Contains("Hello World", "Hello") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Contains("Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContainsWrapper(t *testing.T) { + require := New(t) + require.NotContains("Hello World", "Hello!") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotContains("Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanicsWrapper(t *testing.T) { + require := New(t) + require.Panics(func() { + panic("Panic!") + }) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Panics(func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanicsWrapper(t *testing.T) { + require := New(t) + require.NotPanics(func() {}) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotPanics(func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoErrorWrapper(t *testing.T) { + require := New(t) + require.NoError(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NoError(errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestErrorWrapper(t *testing.T) { + require := New(t) + require.Error(errors.New("some error")) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Error(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualErrorWrapper(t *testing.T) { + require := New(t) + require.EqualError(errors.New("some error"), "some error") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.EqualError(errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmptyWrapper(t *testing.T) { + require := New(t) + require.Empty("") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Empty("x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmptyWrapper(t *testing.T) { + require := New(t) + require.NotEmpty("x") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEmpty("") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDurationWrapper(t *testing.T) { + require := New(t) + a := time.Now() + b := a.Add(10 * time.Second) + + require.WithinDuration(a, b, 15*time.Second) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.WithinDuration(a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDeltaWrapper(t *testing.T) { + require := New(t) + require.InDelta(1.001, 1, 0.01) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.InDelta(1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZeroWrapper(t *testing.T) { + require := New(t) + require.Zero(0) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Zero(1) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZeroWrapper(t *testing.T) { + require := New(t) + require.NotZero(1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotZero(0) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go new file mode 100644 index 0000000..ac3c308 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -0,0 +1,867 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { + if !assert.Condition(t, comp, msgAndArgs...) { + t.FailNow() + } +} + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { + if !assert.Conditionf(t, comp, msg, args...) { + t.FailNow() + } +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.Contains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if !assert.Containsf(t, s, contains, msg, args...) { + t.FailNow() + } +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { + if !assert.DirExists(t, path, msgAndArgs...) { + t.FailNow() + } +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { + if !assert.DirExistsf(t, path, msg, args...) { + t.FailNow() + } +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if !assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + t.FailNow() + } +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if !assert.ElementsMatchf(t, listA, listB, msg, args...) { + t.FailNow() + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Empty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.Emptyf(t, object, msg, args...) { + t.FailNow() + } +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Equal(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if !assert.EqualError(t, theError, errString, msgAndArgs...) { + t.FailNow() + } +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { + if !assert.EqualErrorf(t, theError, errString, msg, args...) { + t.FailNow() + } +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.EqualValues(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.EqualValuesf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.Equalf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.Error(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if !assert.Errorf(t, err, msg, args...) { + t.FailNow() + } +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Exactly(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.Exactlyf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.Fail(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.FailNow(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if !assert.FailNowf(t, failureMessage, msg, args...) { + t.FailNow() + } +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if !assert.Failf(t, failureMessage, msg, args...) { + t.FailNow() + } +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.False(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) { + if !assert.Falsef(t, value, msg, args...) { + t.FailNow() + } +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { + if !assert.FileExists(t, path, msgAndArgs...) { + t.FailNow() + } +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if !assert.FileExistsf(t, path, msg, args...) { + t.FailNow() + } +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if !assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if !assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { + t.FailNow() + } +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if !assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if !assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { + t.FailNow() + } +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { + t.FailNow() + } +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { + t.FailNow() + } +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { + t.FailNow() + } +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { + t.FailNow() + } +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if !assert.Implementsf(t, interfaceObject, object, msg, args...) { + t.FailNow() + } +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if !assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + t.FailNow() + } +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if !assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + t.FailNow() + } +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.IsType(t, expectedType, object, msgAndArgs...) { + t.FailNow() + } +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if !assert.IsTypef(t, expectedType, object, msg, args...) { + t.FailNow() + } +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if !assert.JSONEq(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if !assert.JSONEqf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if !assert.Len(t, object, length, msgAndArgs...) { + t.FailNow() + } +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { + if !assert.Lenf(t, object, length, msg, args...) { + t.FailNow() + } +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Nil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.Nilf(t, object, msg, args...) { + t.FailNow() + } +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.NoError(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { + if !assert.NoErrorf(t, err, msg, args...) { + t.FailNow() + } +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.NotContains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if !assert.NotContainsf(t, s, contains, msg, args...) { + t.FailNow() + } +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotEmpty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.NotEmptyf(t, object, msg, args...) { + t.FailNow() + } +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.NotEqual(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.NotEqualf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotNil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.NotNilf(t, object, msg, args...) { + t.FailNow() + } +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.NotPanics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.NotPanicsf(t, f, msg, args...) { + t.FailNow() + } +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.NotRegexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if !assert.NotRegexpf(t, rx, str, msg, args...) { + t.FailNow() + } +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if !assert.NotSubset(t, list, subset, msgAndArgs...) { + t.FailNow() + } +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if !assert.NotSubsetf(t, list, subset, msg, args...) { + t.FailNow() + } +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.NotZero(t, i, msgAndArgs...) { + t.FailNow() + } +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if !assert.NotZerof(t, i, msg, args...) { + t.FailNow() + } +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.Panics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + t.FailNow() + } +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.PanicsWithValuef(t, expected, f, msg, args...) { + t.FailNow() + } +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.Panicsf(t, f, msg, args...) { + t.FailNow() + } +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.Regexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if !assert.Regexpf(t, rx, str, msg, args...) { + t.FailNow() + } +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if !assert.Subset(t, list, subset, msgAndArgs...) { + t.FailNow() + } +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if !assert.Subsetf(t, list, subset, msg, args...) { + t.FailNow() + } +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.True(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) { + if !assert.Truef(t, value, msg, args...) { + t.FailNow() + } +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if !assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.Zero(t, i, msgAndArgs...) { + t.FailNow() + } +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if !assert.Zerof(t, i, msg, args...) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl new file mode 100644 index 0000000..d2c38f6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -0,0 +1,6 @@ +{{.Comment}} +func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { + if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go new file mode 100644 index 0000000..299ceb9 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -0,0 +1,687 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) { + Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { + DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { + DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { + Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { + EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { + FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) { + FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) { + Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { + Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { + FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { + FileExistsf(a.t, path, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { + IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { + JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { + Lenf(a.t, object, length, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { + Nilf(a.t, object, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { + NoErrorf(a.t, err, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { + NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { + NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { + NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { + NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { + Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) { + Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl new file mode 100644 index 0000000..b93569e --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { + {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go new file mode 100644 index 0000000..e404f01 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -0,0 +1,9 @@ +package require + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) + FailNow() +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/require/requirements_test.go b/vendor/github.com/stretchr/testify/require/requirements_test.go new file mode 100644 index 0000000..d2ccc99 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/requirements_test.go @@ -0,0 +1,369 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +type MockT struct { + Failed bool +} + +func (t *MockT) FailNow() { + t.Failed = true +} + +func (t *MockT) Errorf(format string, args ...interface{}) { + _, _ = format, args +} + +func TestImplements(t *testing.T) { + + Implements(t, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsType(t *testing.T) { + + IsType(t, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqual(t *testing.T) { + + Equal(t, 1, 1) + + mockT := new(MockT) + Equal(mockT, 1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } + +} + +func TestNotEqual(t *testing.T) { + + NotEqual(t, 1, 2) + mockT := new(MockT) + NotEqual(mockT, 2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactly(t *testing.T) { + + a := float32(1) + b := float32(1) + c := float64(1) + + Exactly(t, a, b) + + mockT := new(MockT) + Exactly(mockT, a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNil(t *testing.T) { + + NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + NotNil(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNil(t *testing.T) { + + Nil(t, nil) + + mockT := new(MockT) + Nil(mockT, new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrue(t *testing.T) { + + True(t, true) + + mockT := new(MockT) + True(mockT, false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalse(t *testing.T) { + + False(t, false) + + mockT := new(MockT) + False(mockT, true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContains(t *testing.T) { + + Contains(t, "Hello World", "Hello") + + mockT := new(MockT) + Contains(mockT, "Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContains(t *testing.T) { + + NotContains(t, "Hello World", "Hello!") + + mockT := new(MockT) + NotContains(mockT, "Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanics(t *testing.T) { + + Panics(t, func() { + panic("Panic!") + }) + + mockT := new(MockT) + Panics(mockT, func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanics(t *testing.T) { + + NotPanics(t, func() {}) + + mockT := new(MockT) + NotPanics(mockT, func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoError(t *testing.T) { + + NoError(t, nil) + + mockT := new(MockT) + NoError(mockT, errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestError(t *testing.T) { + + Error(t, errors.New("some error")) + + mockT := new(MockT) + Error(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualError(t *testing.T) { + + EqualError(t, errors.New("some error"), "some error") + + mockT := new(MockT) + EqualError(mockT, errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmpty(t *testing.T) { + + Empty(t, "") + + mockT := new(MockT) + Empty(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmpty(t *testing.T) { + + NotEmpty(t, "x") + + mockT := new(MockT) + NotEmpty(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDuration(t *testing.T) { + + a := time.Now() + b := a.Add(10 * time.Second) + + WithinDuration(t, a, b, 15*time.Second) + + mockT := new(MockT) + WithinDuration(mockT, a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDelta(t *testing.T) { + + InDelta(t, 1.001, 1, 0.01) + + mockT := new(MockT) + InDelta(mockT, 1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZero(t *testing.T) { + + Zero(t, "") + + mockT := new(MockT) + Zero(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZero(t *testing.T) { + + NotZero(t, "x") + + mockT := new(MockT) + NotZero(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go new file mode 100644 index 0000000..f91a245 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/doc.go @@ -0,0 +1,65 @@ +// Package suite contains logic for creating testing suite structs +// and running the methods on those structs as tests. The most useful +// piece of this package is that you can create setup/teardown methods +// on your testing suites, which will run before/after the whole suite +// or individual tests (depending on which interface(s) you +// implement). +// +// A testing suite is usually built by first extending the built-in +// suite functionality from suite.Suite in testify. Alternatively, +// you could reproduce that logic on your own if you wanted (you +// just need to implement the TestingSuite interface from +// suite/interfaces.go). +// +// After that, you can implement any of the interfaces in +// suite/interfaces.go to add setup/teardown functionality to your +// suite, and add any methods that start with "Test" to add tests. +// Methods that do not match any suite interfaces and do not begin +// with "Test" will not be run by testify, and can safely be used as +// helper methods. +// +// Once you've built your testing suite, you need to run the suite +// (using suite.Run from testify) inside any function that matches the +// identity that "go test" is already looking for (i.e. +// func(*testing.T)). +// +// Regular expression to select test suites specified command-line +// argument "-run". Regular expression to select the methods +// of test suites specified command-line argument "-m". +// Suite object has assertion methods. +// +// A crude example: +// // Basic imports +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// ) +// +// // Define the suite, and absorb the built-in basic suite +// // functionality from testify - including a T() method which +// // returns the current testing context +// type ExampleTestSuite struct { +// suite.Suite +// VariableThatShouldStartAtFive int +// } +// +// // Make sure that VariableThatShouldStartAtFive is set to five +// // before each test +// func (suite *ExampleTestSuite) SetupTest() { +// suite.VariableThatShouldStartAtFive = 5 +// } +// +// // All methods that begin with "Test" are run as tests within a +// // suite. +// func (suite *ExampleTestSuite) TestExample() { +// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +// suite.Equal(5, suite.VariableThatShouldStartAtFive) +// } +// +// // In order for 'go test' to run this suite, we need to create +// // a normal test function and pass our suite to suite.Run +// func TestExampleTestSuite(t *testing.T) { +// suite.Run(t, new(ExampleTestSuite)) +// } +package suite diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go new file mode 100644 index 0000000..b37cb04 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -0,0 +1,46 @@ +package suite + +import "testing" + +// TestingSuite can store and return the current *testing.T context +// generated by 'go test'. +type TestingSuite interface { + T() *testing.T + SetT(*testing.T) +} + +// SetupAllSuite has a SetupSuite method, which will run before the +// tests in the suite are run. +type SetupAllSuite interface { + SetupSuite() +} + +// SetupTestSuite has a SetupTest method, which will run before each +// test in the suite. +type SetupTestSuite interface { + SetupTest() +} + +// TearDownAllSuite has a TearDownSuite method, which will run after +// all the tests in the suite have been run. +type TearDownAllSuite interface { + TearDownSuite() +} + +// TearDownTestSuite has a TearDownTest method, which will run after +// each test in the suite. +type TearDownTestSuite interface { + TearDownTest() +} + +// BeforeTest has a function to be executed right before the test +// starts and receives the suite and test names as input +type BeforeTest interface { + BeforeTest(suiteName, testName string) +} + +// AfterTest has a function to be executed right after the test +// finishes and receives the suite and test names as input +type AfterTest interface { + AfterTest(suiteName, testName string) +} diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go new file mode 100644 index 0000000..e20afbc --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -0,0 +1,136 @@ +package suite + +import ( + "flag" + "fmt" + "os" + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var allTestsFilter = func(_, _ string) (bool, error) { return true, nil } +var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") + +// Suite is a basic testing suite with methods for storing and +// retrieving the current *testing.T context. +type Suite struct { + *assert.Assertions + require *require.Assertions + t *testing.T +} + +// T retrieves the current *testing.T context. +func (suite *Suite) T() *testing.T { + return suite.t +} + +// SetT sets the current *testing.T context. +func (suite *Suite) SetT(t *testing.T) { + suite.t = t + suite.Assertions = assert.New(t) + suite.require = require.New(t) +} + +// Require returns a require context for suite. +func (suite *Suite) Require() *require.Assertions { + if suite.require == nil { + suite.require = require.New(suite.T()) + } + return suite.require +} + +// Assert returns an assert context for suite. Normally, you can call +// `suite.NoError(expected, actual)`, but for situations where the embedded +// methods are overridden (for example, you might want to override +// assert.Assertions with require.Assertions), this method is provided so you +// can call `suite.Assert().NoError()`. +func (suite *Suite) Assert() *assert.Assertions { + if suite.Assertions == nil { + suite.Assertions = assert.New(suite.T()) + } + return suite.Assertions +} + +// Run takes a testing suite and runs all of the tests attached +// to it. +func Run(t *testing.T, suite TestingSuite) { + suite.SetT(t) + + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + }() + + methodFinder := reflect.TypeOf(suite) + tests := []testing.InternalTest{} + for index := 0; index < methodFinder.NumMethod(); index++ { + method := methodFinder.Method(index) + ok, err := methodFilter(method.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) + os.Exit(1) + } + if ok { + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + parentT := suite.T() + suite.SetT(t) + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + if beforeTestSuite, ok := suite.(BeforeTest); ok { + beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + } + defer func() { + if afterTestSuite, ok := suite.(AfterTest); ok { + afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name) + } + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() + } + suite.SetT(parentT) + }() + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, + } + tests = append(tests, test) + } + } + runTests(t, tests) +} + +func runTests(t testing.TB, tests []testing.InternalTest) { + r, ok := t.(runner) + if !ok { // backwards compatibility with Go 1.6 and below + if !testing.RunTests(allTestsFilter, tests) { + t.Fail() + } + return + } + + for _, test := range tests { + r.Run(test.Name, test.F) + } +} + +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} + +type runner interface { + Run(name string, f func(t *testing.T)) bool +} diff --git a/vendor/github.com/stretchr/testify/suite/suite_test.go b/vendor/github.com/stretchr/testify/suite/suite_test.go new file mode 100644 index 0000000..b75fa4a --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite_test.go @@ -0,0 +1,294 @@ +package suite + +import ( + "errors" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// SuiteRequireTwice is intended to test the usage of suite.Require in two +// different tests +type SuiteRequireTwice struct{ Suite } + +// TestSuiteRequireTwice checks for regressions of issue #149 where +// suite.requirements was not initialised in suite.SetT() +// A regression would result on these tests panicking rather than failing. +func TestSuiteRequireTwice(t *testing.T) { + ok := testing.RunTests( + allTestsFilter, + []testing.InternalTest{{ + Name: "TestSuiteRequireTwice", + F: func(t *testing.T) { + suite := new(SuiteRequireTwice) + Run(t, suite) + }, + }}, + ) + assert.Equal(t, false, ok) +} + +func (s *SuiteRequireTwice) TestRequireOne() { + r := s.Require() + r.Equal(1, 2) +} + +func (s *SuiteRequireTwice) TestRequireTwo() { + r := s.Require() + r.Equal(1, 2) +} + +// This suite is intended to store values to make sure that only +// testing-suite-related methods are run. It's also a fully +// functional example of a testing suite, using setup/teardown methods +// and a helper method that is ignored by testify. To make this look +// more like a real world example, all tests in the suite perform some +// type of assertion. +type SuiteTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int + SetupTestRunCount int + TearDownTestRunCount int + TestOneRunCount int + TestTwoRunCount int + NonTestMethodRunCount int + + SuiteNameBefore []string + TestNameBefore []string + + SuiteNameAfter []string + TestNameAfter []string + + TimeBefore []time.Time + TimeAfter []time.Time +} + +type SuiteSkipTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int +} + +// The SetupSuite method will be run by testify once, at the very +// start of the testing suite, before any tests are run. +func (suite *SuiteTester) SetupSuite() { + suite.SetupSuiteRunCount++ +} + +func (suite *SuiteTester) BeforeTest(suiteName, testName string) { + suite.SuiteNameBefore = append(suite.SuiteNameBefore, suiteName) + suite.TestNameBefore = append(suite.TestNameBefore, testName) + suite.TimeBefore = append(suite.TimeBefore, time.Now()) +} + +func (suite *SuiteTester) AfterTest(suiteName, testName string) { + suite.SuiteNameAfter = append(suite.SuiteNameAfter, suiteName) + suite.TestNameAfter = append(suite.TestNameAfter, testName) + suite.TimeAfter = append(suite.TimeAfter, time.Now()) +} + +func (suite *SuiteSkipTester) SetupSuite() { + suite.SetupSuiteRunCount++ + suite.T().Skip() +} + +// The TearDownSuite method will be run by testify once, at the very +// end of the testing suite, after all tests have been run. +func (suite *SuiteTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +func (suite *SuiteSkipTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +// The SetupTest method will be run before every test in the suite. +func (suite *SuiteTester) SetupTest() { + suite.SetupTestRunCount++ +} + +// The TearDownTest method will be run after every test in the suite. +func (suite *SuiteTester) TearDownTest() { + suite.TearDownTestRunCount++ +} + +// Every method in a testing suite that begins with "Test" will be run +// as a test. TestOne is an example of a test. For the purposes of +// this example, we've included assertions in the tests, since most +// tests will issue assertions. +func (suite *SuiteTester) TestOne() { + beforeCount := suite.TestOneRunCount + suite.TestOneRunCount++ + assert.Equal(suite.T(), suite.TestOneRunCount, beforeCount+1) + suite.Equal(suite.TestOneRunCount, beforeCount+1) +} + +// TestTwo is another example of a test. +func (suite *SuiteTester) TestTwo() { + beforeCount := suite.TestTwoRunCount + suite.TestTwoRunCount++ + assert.NotEqual(suite.T(), suite.TestTwoRunCount, beforeCount) + suite.NotEqual(suite.TestTwoRunCount, beforeCount) +} + +func (suite *SuiteTester) TestSkip() { + suite.T().Skip() +} + +// NonTestMethod does not begin with "Test", so it will not be run by +// testify as a test in the suite. This is useful for creating helper +// methods for your tests. +func (suite *SuiteTester) NonTestMethod() { + suite.NonTestMethodRunCount++ +} + +// TestRunSuite will be run by the 'go test' command, so within it, we +// can run our suite using the Run(*testing.T, TestingSuite) function. +func TestRunSuite(t *testing.T) { + suiteTester := new(SuiteTester) + Run(t, suiteTester) + + // Normally, the test would end here. The following are simply + // some assertions to ensure that the Run function is working as + // intended - they are not part of the example. + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once. + assert.Equal(t, suiteTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1) + + assert.Equal(t, len(suiteTester.SuiteNameAfter), 3) + assert.Equal(t, len(suiteTester.SuiteNameBefore), 3) + assert.Equal(t, len(suiteTester.TestNameAfter), 3) + assert.Equal(t, len(suiteTester.TestNameBefore), 3) + + assert.Contains(t, suiteTester.TestNameAfter, "TestOne") + assert.Contains(t, suiteTester.TestNameAfter, "TestTwo") + assert.Contains(t, suiteTester.TestNameAfter, "TestSkip") + + assert.Contains(t, suiteTester.TestNameBefore, "TestOne") + assert.Contains(t, suiteTester.TestNameBefore, "TestTwo") + assert.Contains(t, suiteTester.TestNameBefore, "TestSkip") + + for _, suiteName := range suiteTester.SuiteNameAfter { + assert.Equal(t, "SuiteTester", suiteName) + } + + for _, suiteName := range suiteTester.SuiteNameBefore { + assert.Equal(t, "SuiteTester", suiteName) + } + + for _, when := range suiteTester.TimeAfter { + assert.False(t, when.IsZero()) + } + + for _, when := range suiteTester.TimeBefore { + assert.False(t, when.IsZero()) + } + + // There are three test methods (TestOne, TestTwo, and TestSkip), so + // the SetupTest and TearDownTest methods (which should be run once for + // each test) should have been run three times. + assert.Equal(t, suiteTester.SetupTestRunCount, 3) + assert.Equal(t, suiteTester.TearDownTestRunCount, 3) + + // Each test should have been run once. + assert.Equal(t, suiteTester.TestOneRunCount, 1) + assert.Equal(t, suiteTester.TestTwoRunCount, 1) + + // Methods that don't match the test method identifier shouldn't + // have been run at all. + assert.Equal(t, suiteTester.NonTestMethodRunCount, 0) + + suiteSkipTester := new(SuiteSkipTester) + Run(t, suiteSkipTester) + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once, even though SetupSuite + // called Skip() + assert.Equal(t, suiteSkipTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteSkipTester.TearDownSuiteRunCount, 1) + +} + +func TestSuiteGetters(t *testing.T) { + suite := new(SuiteTester) + suite.SetT(t) + assert.NotNil(t, suite.Assert()) + assert.Equal(t, suite.Assertions, suite.Assert()) + assert.NotNil(t, suite.Require()) + assert.Equal(t, suite.require, suite.Require()) +} + +type SuiteLoggingTester struct { + Suite +} + +func (s *SuiteLoggingTester) TestLoggingPass() { + s.T().Log("TESTLOGPASS") +} + +func (s *SuiteLoggingTester) TestLoggingFail() { + s.T().Log("TESTLOGFAIL") + assert.NotNil(s.T(), nil) // expected to fail +} + +type StdoutCapture struct { + oldStdout *os.File + readPipe *os.File +} + +func (sc *StdoutCapture) StartCapture() { + sc.oldStdout = os.Stdout + sc.readPipe, os.Stdout, _ = os.Pipe() +} + +func (sc *StdoutCapture) StopCapture() (string, error) { + if sc.oldStdout == nil || sc.readPipe == nil { + return "", errors.New("StartCapture not called before StopCapture") + } + os.Stdout.Close() + os.Stdout = sc.oldStdout + bytes, err := ioutil.ReadAll(sc.readPipe) + if err != nil { + return "", err + } + return string(bytes), nil +} + +func TestSuiteLogging(t *testing.T) { + suiteLoggingTester := new(SuiteLoggingTester) + capture := StdoutCapture{} + internalTest := testing.InternalTest{ + Name: "SomeTest", + F: func(subT *testing.T) { + Run(subT, suiteLoggingTester) + }, + } + capture.StartCapture() + testing.RunTests(allTestsFilter, []testing.InternalTest{internalTest}) + output, err := capture.StopCapture() + require.NoError(t, err, "Got an error trying to capture stdout and stderr!") + require.NotEmpty(t, output, "output content must not be empty") + + // Failed tests' output is always printed + assert.Contains(t, output, "TESTLOGFAIL") + + if testing.Verbose() { + // In verbose mode, output from successful tests is also printed + assert.Contains(t, output, "TESTLOGPASS") + } else { + assert.NotContains(t, output, "TESTLOGPASS") + } +} diff --git a/vendor/golang.org/x/crypto/.gitattributes b/vendor/golang.org/x/crypto/.gitattributes new file mode 100644 index 0000000..d2f212e --- /dev/null +++ b/vendor/golang.org/x/crypto/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/crypto/.gitignore b/vendor/golang.org/x/crypto/.gitignore new file mode 100644 index 0000000..8339fd6 --- /dev/null +++ b/vendor/golang.org/x/crypto/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/golang.org/x/crypto/CONTRIBUTING.md new file mode 100644 index 0000000..d0485e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/README.md b/vendor/golang.org/x/crypto/README.md new file mode 100644 index 0000000..c9d6fec --- /dev/null +++ b/vendor/golang.org/x/crypto/README.md @@ -0,0 +1,21 @@ +# Go Cryptography + +This repository holds supplementary Go cryptography libraries. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You +can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the crypto repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the +subject line, so it is easy to find. + +Note that contributions to the cryptography package receive additional scrutiny +due to their sensitive nature. Patches may take longer than normal to receive +feedback. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go new file mode 100644 index 0000000..1f4fb69 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -0,0 +1,1065 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package acme provides an implementation of the +// Automatic Certificate Management Environment (ACME) spec. +// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details. +// +// Most common scenarios will want to use autocert subdirectory instead, +// which provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package acme + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net/http" + "strconv" + "strings" + "sync" + "time" +) + +// LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. +const LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" + +const ( + maxChainLen = 5 // max depth and breadth of a certificate chain + maxCertSize = 1 << 20 // max size of a certificate, in bytes + + // Max number of collected nonces kept in memory. + // Expect usual peak of 1 or 2. + maxNonces = 100 +) + +// Client is an ACME client. +// The only required field is Key. An example of creating a client with a new key +// is as follows: +// +// key, err := rsa.GenerateKey(rand.Reader, 2048) +// if err != nil { +// log.Fatal(err) +// } +// client := &Client{Key: key} +// +type Client struct { + // Key is the account key used to register with a CA and sign requests. + // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. + Key crypto.Signer + + // HTTPClient optionally specifies an HTTP client to use + // instead of http.DefaultClient. + HTTPClient *http.Client + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncryptURL is used. + // Mutating this value after a successful call of Client's Discover method + // will have no effect. + DirectoryURL string + + dirMu sync.Mutex // guards writes to dir + dir *Directory // cached result of Client's Discover method + + noncesMu sync.Mutex + nonces map[string]struct{} // nonces collected from previous responses +} + +// Discover performs ACME server discovery using c.DirectoryURL. +// +// It caches successful result. So, subsequent calls will not result in +// a network round-trip. This also means mutating c.DirectoryURL after successful call +// of this method will have no effect. +func (c *Client) Discover(ctx context.Context) (Directory, error) { + c.dirMu.Lock() + defer c.dirMu.Unlock() + if c.dir != nil { + return *c.dir, nil + } + + dirURL := c.DirectoryURL + if dirURL == "" { + dirURL = LetsEncryptURL + } + res, err := c.get(ctx, dirURL) + if err != nil { + return Directory{}, err + } + defer res.Body.Close() + c.addNonce(res.Header) + if res.StatusCode != http.StatusOK { + return Directory{}, responseError(res) + } + + var v struct { + Reg string `json:"new-reg"` + Authz string `json:"new-authz"` + Cert string `json:"new-cert"` + Revoke string `json:"revoke-cert"` + Meta struct { + Terms string `json:"terms-of-service"` + Website string `json:"website"` + CAA []string `json:"caa-identities"` + } + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return Directory{}, err + } + c.dir = &Directory{ + RegURL: v.Reg, + AuthzURL: v.Authz, + CertURL: v.Cert, + RevokeURL: v.Revoke, + Terms: v.Meta.Terms, + Website: v.Meta.Website, + CAA: v.Meta.CAA, + } + return *c.dir, nil +} + +// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format. +// The exp argument indicates the desired certificate validity duration. CA may issue a certificate +// with a different duration. +// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain. +// +// In the case where CA server does not provide the issued certificate in the response, +// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. +// In such a scenario, the caller can cancel the polling with ctx. +// +// CreateCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { + return nil, "", err + } + + req := struct { + Resource string `json:"resource"` + CSR string `json:"csr"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{ + Resource: "new-cert", + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + now := timeNow() + req.NotBefore = now.Format(time.RFC3339) + if exp > 0 { + req.NotAfter = now.Add(exp).Format(time.RFC3339) + } + + res, err := c.retryPostJWS(ctx, c.Key, c.dir.CertURL, req) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + return nil, "", responseError(res) + } + + curl := res.Header.Get("Location") // cert permanent URL + if res.ContentLength == 0 { + // no cert in the body; poll until we get it + cert, err := c.FetchCert(ctx, curl, bundle) + return cert, curl, err + } + // slurp issued cert and CA chain, if requested + cert, err := c.responseCert(ctx, res, bundle) + return cert, curl, err +} + +// FetchCert retrieves already issued certificate from the given url, in DER format. +// It retries the request until the certificate is successfully retrieved, +// context is cancelled by the caller or an error response is received. +// +// The returned value will also contain the CA (issuer) certificate if the bundle argument is true. +// +// FetchCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid +// and has expected features. +func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { + for { + res, err := c.get(ctx, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode == http.StatusOK { + return c.responseCert(ctx, res, bundle) + } + if res.StatusCode > 299 { + return nil, responseError(res) + } + d := retryAfter(res.Header.Get("Retry-After"), 3*time.Second) + select { + case <-time.After(d): + // retry + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// RevokeCert revokes a previously issued certificate cert, provided in DER format. +// +// The key argument, used to sign the request, must be authorized +// to revoke the certificate. It's up to the CA to decide which keys are authorized. +// For instance, the key pair of the certificate may be authorized. +// If the key is nil, c.Key is used instead. +func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + + body := &struct { + Resource string `json:"resource"` + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Resource: "revoke-cert", + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + if key == nil { + key = c.Key + } + res, err := c.retryPostJWS(ctx, key, c.dir.RevokeURL, body) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return responseError(res) + } + return nil +} + +// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service +// during account registration. See Register method of Client for more details. +func AcceptTOS(tosURL string) bool { return true } + +// Register creates a new account registration by following the "new-reg" flow. +// It returns the registered account. The account is not modified. +// +// The registration may require the caller to agree to the CA's Terms of Service (TOS). +// If so, and the account has not indicated the acceptance of the terms (see Account for details), +// Register calls prompt with a TOS URL provided by the CA. Prompt should report +// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. +func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + var err error + if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil { + return nil, err + } + var accept bool + if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms { + accept = prompt(a.CurrentTerms) + } + if accept { + a.AgreedTerms = a.CurrentTerms + a, err = c.UpdateReg(ctx, a) + } + return a, err +} + +// GetReg retrieves an existing registration. +// The url argument is an Account URI. +func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { + a, err := c.doReg(ctx, url, "reg", nil) + if err != nil { + return nil, err + } + a.URI = url + return a, nil +} + +// UpdateReg updates an existing registration. +// It returns an updated account copy. The provided account is not modified. +func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) { + uri := a.URI + a, err := c.doReg(ctx, uri, "reg", a) + if err != nil { + return nil, err + } + a.URI = uri + return a, nil +} + +// Authorize performs the initial step in an authorization flow. +// The caller will then need to choose from and perform a set of returned +// challenges using c.Accept in order to successfully complete authorization. +// +// If an authorization has been previously granted, the CA may return +// a valid authorization (Authorization.Status is StatusValid). If so, the caller +// need not fulfill any challenge and can proceed to requesting a certificate. +func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + type authzID struct { + Type string `json:"type"` + Value string `json:"value"` + } + req := struct { + Resource string `json:"resource"` + Identifier authzID `json:"identifier"` + }{ + Resource: "new-authz", + Identifier: authzID{Type: "dns", Value: domain}, + } + res, err := c.retryPostJWS(ctx, c.Key, c.dir.AuthzURL, req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + return nil, responseError(res) + } + + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + if v.Status != StatusPending && v.Status != StatusValid { + return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) + } + return v.authorization(res.Header.Get("Location")), nil +} + +// GetAuthorization retrieves an authorization identified by the given URL. +// +// If a caller needs to poll an authorization until its status is final, +// see the WaitAuthorization method. +func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { + res, err := c.get(ctx, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + return nil, responseError(res) + } + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.authorization(url), nil +} + +// RevokeAuthorization relinquishes an existing authorization identified +// by the given URL. +// The url argument is an Authorization.URI value. +// +// If successful, the caller will be required to obtain a new authorization +// using the Authorize method before being able to request a new certificate +// for the domain associated with the authorization. +// +// It does not revoke existing certificates. +func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { + req := struct { + Resource string `json:"resource"` + Status string `json:"status"` + Delete bool `json:"delete"` + }{ + Resource: "authz", + Status: "deactivated", + Delete: true, + } + res, err := c.retryPostJWS(ctx, c.Key, url, req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return responseError(res) + } + return nil +} + +// WaitAuthorization polls an authorization at the given URL +// until it is in one of the final states, StatusValid or StatusInvalid, +// the ACME CA responded with a 4xx error code, or the context is done. +// +// It returns a non-nil Authorization only if its Status is StatusValid. +// In all other cases WaitAuthorization returns an error. +// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. +func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { + sleep := sleeper(ctx) + for { + res, err := c.get(ctx, url) + if err != nil { + return nil, err + } + if res.StatusCode >= 400 && res.StatusCode <= 499 { + // Non-retriable error. For instance, Let's Encrypt may return 404 Not Found + // when requesting an expired authorization. + defer res.Body.Close() + return nil, responseError(res) + } + + retry := res.Header.Get("Retry-After") + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + res.Body.Close() + if err := sleep(retry, 1); err != nil { + return nil, err + } + continue + } + var raw wireAuthz + err = json.NewDecoder(res.Body).Decode(&raw) + res.Body.Close() + if err != nil { + if err := sleep(retry, 0); err != nil { + return nil, err + } + continue + } + if raw.Status == StatusValid { + return raw.authorization(url), nil + } + if raw.Status == StatusInvalid { + return nil, raw.error(url) + } + if err := sleep(retry, 0); err != nil { + return nil, err + } + } +} + +// GetChallenge retrieves the current status of an challenge. +// +// A client typically polls a challenge status using this method. +func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { + res, err := c.get(ctx, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + return nil, responseError(res) + } + v := wireChallenge{URI: url} + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// Accept informs the server that the client accepts one of its challenges +// previously obtained with c.Authorize. +// +// The server will then perform the validation asynchronously. +func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { + auth, err := keyAuth(c.Key.Public(), chal.Token) + if err != nil { + return nil, err + } + + req := struct { + Resource string `json:"resource"` + Type string `json:"type"` + Auth string `json:"keyAuthorization"` + }{ + Resource: "challenge", + Type: chal.Type, + Auth: auth, + } + res, err := c.retryPostJWS(ctx, c.Key, chal.URI, req) + if err != nil { + return nil, err + } + defer res.Body.Close() + // Note: the protocol specifies 200 as the expected response code, but + // letsencrypt seems to be returning 202. + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + return nil, responseError(res) + } + + var v wireChallenge + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. +// A TXT record containing the returned value must be provisioned under +// "_acme-challenge" name of the domain being validated. +// +// The token argument is a Challenge.Token value. +func (c *Client) DNS01ChallengeRecord(token string) (string, error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(ka)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} + +// HTTP01ChallengeResponse returns the response for an http-01 challenge. +// Servers should respond with the value to HTTP requests at the URL path +// provided by HTTP01ChallengePath to validate the challenge and prove control +// over a domain name. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { + return keyAuth(c.Key.Public(), token) +} + +// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge +// should be provided by the servers. +// The response value can be obtained with HTTP01ChallengeResponse. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. +// +// The implementation is incomplete in that the returned value is a single certificate, +// computed only for Z0 of the key authorization. ACME CAs are expected to update +// their implementations to use the newer version, TLS-SNI-02. +// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name of the client hello matches exactly the returned name value. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b := sha256.Sum256([]byte(ka)) + h := hex.EncodeToString(b[:]) + name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) + cert, err = tlsChallengeCert([]string{name}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, name, nil +} + +// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-SNI-02 see +// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the client hello matches exactly the returned name value. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + b := sha256.Sum256([]byte(token)) + h := hex.EncodeToString(b[:]) + sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) + + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b = sha256.Sum256([]byte(ka)) + h = hex.EncodeToString(b[:]) + sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) + + cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, sanA, nil +} + +// doReg sends all types of registration requests. +// The type of request is identified by typ argument, which is a "resource" +// in the ACME spec terms. +// +// A non-nil acct argument indicates whether the intention is to mutate data +// of the Account. Only Contact and Agreement of its fields are used +// in such cases. +func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) { + req := struct { + Resource string `json:"resource"` + Contact []string `json:"contact,omitempty"` + Agreement string `json:"agreement,omitempty"` + }{ + Resource: typ, + } + if acct != nil { + req.Contact = acct.Contact + req.Agreement = acct.AgreedTerms + } + res, err := c.retryPostJWS(ctx, c.Key, url, req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode < 200 || res.StatusCode > 299 { + return nil, responseError(res) + } + + var v struct { + Contact []string + Agreement string + Authorizations string + Certificates string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + var tos string + if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 { + tos = v[0] + } + var authz string + if v := linkHeader(res.Header, "next"); len(v) > 0 { + authz = v[0] + } + return &Account{ + URI: res.Header.Get("Location"), + Contact: v.Contact, + AgreedTerms: v.Agreement, + CurrentTerms: tos, + Authz: authz, + Authorizations: v.Authorizations, + Certificates: v.Certificates, + }, nil +} + +// retryPostJWS will retry calls to postJWS if there is a badNonce error, +// clearing the stored nonces after each error. +// If the response was 4XX-5XX, then responseError is called on the body, +// the body is closed, and the error returned. +func (c *Client) retryPostJWS(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, error) { + sleep := sleeper(ctx) + for { + res, err := c.postJWS(ctx, key, url, body) + if err != nil { + return nil, err + } + // handle errors 4XX-5XX with responseError + if res.StatusCode >= 400 && res.StatusCode <= 599 { + err := responseError(res) + res.Body.Close() + // according to spec badNonce is urn:ietf:params:acme:error:badNonce + // however, acme servers in the wild return their version of the error + // https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 + if ae, ok := err.(*Error); ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") { + // clear any nonces that we might've stored that might now be + // considered bad + c.clearNonces() + retry := res.Header.Get("Retry-After") + if err := sleep(retry, 1); err != nil { + return nil, err + } + continue + } + return nil, err + } + return res, nil + } +} + +// postJWS signs the body with the given key and POSTs it to the provided url. +// The body argument must be JSON-serializable. +func (c *Client) postJWS(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, error) { + nonce, err := c.popNonce(ctx, url) + if err != nil { + return nil, err + } + b, err := jwsEncodeJSON(body, key, nonce) + if err != nil { + return nil, err + } + res, err := c.post(ctx, url, "application/jose+json", bytes.NewReader(b)) + if err != nil { + return nil, err + } + c.addNonce(res.Header) + return res, nil +} + +// popNonce returns a nonce value previously stored with c.addNonce +// or fetches a fresh one from the given URL. +func (c *Client) popNonce(ctx context.Context, url string) (string, error) { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) == 0 { + return c.fetchNonce(ctx, url) + } + var nonce string + for nonce = range c.nonces { + delete(c.nonces, nonce) + break + } + return nonce, nil +} + +// clearNonces clears any stored nonces +func (c *Client) clearNonces() { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + c.nonces = make(map[string]struct{}) +} + +// addNonce stores a nonce value found in h (if any) for future use. +func (c *Client) addNonce(h http.Header) { + v := nonceFromHeader(h) + if v == "" { + return + } + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) >= maxNonces { + return + } + if c.nonces == nil { + c.nonces = make(map[string]struct{}) + } + c.nonces[v] = struct{}{} +} + +func (c *Client) httpClient() *http.Client { + if c.HTTPClient != nil { + return c.HTTPClient + } + return http.DefaultClient +} + +func (c *Client) get(ctx context.Context, urlStr string) (*http.Response, error) { + req, err := http.NewRequest("GET", urlStr, nil) + if err != nil { + return nil, err + } + return c.do(ctx, req) +} + +func (c *Client) head(ctx context.Context, urlStr string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", urlStr, nil) + if err != nil { + return nil, err + } + return c.do(ctx, req) +} + +func (c *Client) post(ctx context.Context, urlStr, contentType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", urlStr, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return c.do(ctx, req) +} + +func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) { + res, err := c.httpClient().Do(req.WithContext(ctx)) + if err != nil { + select { + case <-ctx.Done(): + // Prefer the unadorned context error. + // (The acme package had tests assuming this, previously from ctxhttp's + // behavior, predating net/http supporting contexts natively) + // TODO(bradfitz): reconsider this in the future. But for now this + // requires no test updates. + return nil, ctx.Err() + default: + return nil, err + } + } + return res, nil +} + +func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { + resp, err := c.head(ctx, url) + if err != nil { + return "", err + } + defer resp.Body.Close() + nonce := nonceFromHeader(resp.Header) + if nonce == "" { + if resp.StatusCode > 299 { + return "", responseError(resp) + } + return "", errors.New("acme: nonce not found") + } + return nonce, nil +} + +func nonceFromHeader(h http.Header) string { + return h.Get("Replay-Nonce") +} + +func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) { + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, fmt.Errorf("acme: response stream: %v", err) + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + cert := [][]byte{b} + if !bundle { + return cert, nil + } + + // Append CA chain cert(s). + // At least one is required according to the spec: + // https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1 + up := linkHeader(res.Header, "up") + if len(up) == 0 { + return nil, errors.New("acme: rel=up link not found") + } + if len(up) > maxChainLen { + return nil, errors.New("acme: rel=up link is too large") + } + for _, url := range up { + cc, err := c.chainCert(ctx, url, 0) + if err != nil { + return nil, err + } + cert = append(cert, cc...) + } + return cert, nil +} + +// responseError creates an error of Error type from resp. +func responseError(resp *http.Response) error { + // don't care if ReadAll returns an error: + // json.Unmarshal will fail in that case anyway + b, _ := ioutil.ReadAll(resp.Body) + e := &wireError{Status: resp.StatusCode} + if err := json.Unmarshal(b, e); err != nil { + // this is not a regular error response: + // populate detail with anything we received, + // e.Status will already contain HTTP response code value + e.Detail = string(b) + if e.Detail == "" { + e.Detail = resp.Status + } + } + return e.error(resp.Header) +} + +// chainCert fetches CA certificate chain recursively by following "up" links. +// Each recursive call increments the depth by 1, resulting in an error +// if the recursion level reaches maxChainLen. +// +// First chainCert call starts with depth of 0. +func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) { + if depth >= maxChainLen { + return nil, errors.New("acme: certificate chain is too deep") + } + + res, err := c.get(ctx, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, responseError(res) + } + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, err + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + chain := [][]byte{b} + + uplink := linkHeader(res.Header, "up") + if len(uplink) > maxChainLen { + return nil, errors.New("acme: certificate chain is too large") + } + for _, up := range uplink { + cc, err := c.chainCert(ctx, up, depth+1) + if err != nil { + return nil, err + } + chain = append(chain, cc...) + } + + return chain, nil +} + +// linkHeader returns URI-Reference values of all Link headers +// with relation-type rel. +// See https://tools.ietf.org/html/rfc5988#section-5 for details. +func linkHeader(h http.Header, rel string) []string { + var links []string + for _, v := range h["Link"] { + parts := strings.Split(v, ";") + for _, p := range parts { + p = strings.TrimSpace(p) + if !strings.HasPrefix(p, "rel=") { + continue + } + if v := strings.Trim(p[4:], `"`); v == rel { + links = append(links, strings.Trim(parts[0], "<>")) + } + } + } + return links +} + +// sleeper returns a function that accepts the Retry-After HTTP header value +// and an increment that's used with backoff to increasingly sleep on +// consecutive calls until the context is done. If the Retry-After header +// cannot be parsed, then backoff is used with a maximum sleep time of 10 +// seconds. +func sleeper(ctx context.Context) func(ra string, inc int) error { + var count int + return func(ra string, inc int) error { + count += inc + d := backoff(count, 10*time.Second) + d = retryAfter(ra, d) + wakeup := time.NewTimer(d) + defer wakeup.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wakeup.C: + return nil + } + } +} + +// retryAfter parses a Retry-After HTTP header value, +// trying to convert v into an int (seconds) or use http.ParseTime otherwise. +// It returns d if v cannot be parsed. +func retryAfter(v string, d time.Duration) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + t, err := http.ParseTime(v) + if err != nil { + return d + } + return t.Sub(timeNow()) +} + +// backoff computes a duration after which an n+1 retry iteration should occur +// using truncated exponential backoff algorithm. +// +// The n argument is always bounded between 0 and 30. +// The max argument defines upper bound for the returned value. +func backoff(n int, max time.Duration) time.Duration { + if n < 0 { + n = 0 + } + if n > 30 { + n = 30 + } + var d time.Duration + if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { + d = time.Duration(x.Int64()) * time.Millisecond + } + d += time.Duration(1< max { + return max + } + return d +} + +// keyAuth generates a key authorization string for a given token. +func keyAuth(pub crypto.PublicKey, token string) (string, error) { + th, err := JWKThumbprint(pub) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", token, th), nil +} + +// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges +// with the given SANs and auto-generated public/private key pair. +// The Subject Common Name is set to the first SAN to aid debugging. +// To create a cert with a custom key pair, specify WithKey option. +func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { + var ( + key crypto.Signer + tmpl *x509.Certificate + ) + for _, o := range opt { + switch o := o.(type) { + case *certOptKey: + if key != nil { + return tls.Certificate{}, errors.New("acme: duplicate key option") + } + key = o.key + case *certOptTemplate: + var t = *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + // package's fault, if we let this happen: + panic(fmt.Sprintf("unsupported option type %T", o)) + } + } + if key == nil { + var err error + if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return tls.Certificate{}, err + } + } + if tmpl == nil { + tmpl = &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } + } + tmpl.DNSNames = san + if len(san) > 0 { + tmpl.Subject.CommonName = san[0] + } + + der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{der}, + PrivateKey: key, + }, nil +} + +// encodePEM returns b encoded as PEM with block of type typ. +func encodePEM(typ string, b []byte) []byte { + pb := &pem.Block{Type: typ, Bytes: b} + return pem.EncodeToMemory(pb) +} + +// timeNow is useful for testing for fixed current time. +var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/acme_test.go b/vendor/golang.org/x/crypto/acme/acme_test.go new file mode 100644 index 0000000..63cb79b --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/acme_test.go @@ -0,0 +1,1380 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + "time" +) + +// Decodes a JWS-encoded request and unmarshals the decoded JSON into a provided +// interface. +func decodeJWSRequest(t *testing.T, v interface{}, r *http.Request) { + // Decode request + var req struct{ Payload string } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + t.Fatal(err) + } + payload, err := base64.RawURLEncoding.DecodeString(req.Payload) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(payload, v) + if err != nil { + t.Fatal(err) + } +} + +type jwsHead struct { + Alg string + Nonce string + JWK map[string]string `json:"jwk"` +} + +func decodeJWSHead(r *http.Request) (*jwsHead, error) { + var req struct{ Protected string } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, err + } + b, err := base64.RawURLEncoding.DecodeString(req.Protected) + if err != nil { + return nil, err + } + var head jwsHead + if err := json.Unmarshal(b, &head); err != nil { + return nil, err + } + return &head, nil +} + +func TestDiscover(t *testing.T) { + const ( + reg = "https://example.com/acme/new-reg" + authz = "https://example.com/acme/new-authz" + cert = "https://example.com/acme/new-cert" + revoke = "https://example.com/acme/revoke-cert" + ) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "new-reg": %q, + "new-authz": %q, + "new-cert": %q, + "revoke-cert": %q + }`, reg, authz, cert, revoke) + })) + defer ts.Close() + c := Client{DirectoryURL: ts.URL} + dir, err := c.Discover(context.Background()) + if err != nil { + t.Fatal(err) + } + if dir.RegURL != reg { + t.Errorf("dir.RegURL = %q; want %q", dir.RegURL, reg) + } + if dir.AuthzURL != authz { + t.Errorf("dir.AuthzURL = %q; want %q", dir.AuthzURL, authz) + } + if dir.CertURL != cert { + t.Errorf("dir.CertURL = %q; want %q", dir.CertURL, cert) + } + if dir.RevokeURL != revoke { + t.Errorf("dir.RevokeURL = %q; want %q", dir.RevokeURL, revoke) + } +} + +func TestRegister(t *testing.T) { + contacts := []string{"mailto:admin@example.com"} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Contact []string + Agreement string + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "new-reg" { + t.Errorf("j.Resource = %q; want new-reg", j.Resource) + } + if !reflect.DeepEqual(j.Contact, contacts) { + t.Errorf("j.Contact = %v; want %v", j.Contact, contacts) + } + + w.Header().Set("Location", "https://ca.tld/acme/reg/1") + w.Header().Set("Link", `;rel="next"`) + w.Header().Add("Link", `;rel="recover"`) + w.Header().Add("Link", `;rel="terms-of-service"`) + w.WriteHeader(http.StatusCreated) + b, _ := json.Marshal(contacts) + fmt.Fprintf(w, `{"contact": %s}`, b) + })) + defer ts.Close() + + prompt := func(url string) bool { + const terms = "https://ca.tld/acme/terms" + if url != terms { + t.Errorf("prompt url = %q; want %q", url, terms) + } + return false + } + + c := Client{Key: testKeyEC, dir: &Directory{RegURL: ts.URL}} + a := &Account{Contact: contacts} + var err error + if a, err = c.Register(context.Background(), a, prompt); err != nil { + t.Fatal(err) + } + if a.URI != "https://ca.tld/acme/reg/1" { + t.Errorf("a.URI = %q; want https://ca.tld/acme/reg/1", a.URI) + } + if a.Authz != "https://ca.tld/acme/new-authz" { + t.Errorf("a.Authz = %q; want https://ca.tld/acme/new-authz", a.Authz) + } + if a.CurrentTerms != "https://ca.tld/acme/terms" { + t.Errorf("a.CurrentTerms = %q; want https://ca.tld/acme/terms", a.CurrentTerms) + } + if !reflect.DeepEqual(a.Contact, contacts) { + t.Errorf("a.Contact = %v; want %v", a.Contact, contacts) + } +} + +func TestUpdateReg(t *testing.T) { + const terms = "https://ca.tld/acme/terms" + contacts := []string{"mailto:admin@example.com"} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Contact []string + Agreement string + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "reg" { + t.Errorf("j.Resource = %q; want reg", j.Resource) + } + if j.Agreement != terms { + t.Errorf("j.Agreement = %q; want %q", j.Agreement, terms) + } + if !reflect.DeepEqual(j.Contact, contacts) { + t.Errorf("j.Contact = %v; want %v", j.Contact, contacts) + } + + w.Header().Set("Link", `;rel="next"`) + w.Header().Add("Link", `;rel="recover"`) + w.Header().Add("Link", fmt.Sprintf(`<%s>;rel="terms-of-service"`, terms)) + w.WriteHeader(http.StatusOK) + b, _ := json.Marshal(contacts) + fmt.Fprintf(w, `{"contact":%s, "agreement":%q}`, b, terms) + })) + defer ts.Close() + + c := Client{Key: testKeyEC} + a := &Account{URI: ts.URL, Contact: contacts, AgreedTerms: terms} + var err error + if a, err = c.UpdateReg(context.Background(), a); err != nil { + t.Fatal(err) + } + if a.Authz != "https://ca.tld/acme/new-authz" { + t.Errorf("a.Authz = %q; want https://ca.tld/acme/new-authz", a.Authz) + } + if a.AgreedTerms != terms { + t.Errorf("a.AgreedTerms = %q; want %q", a.AgreedTerms, terms) + } + if a.CurrentTerms != terms { + t.Errorf("a.CurrentTerms = %q; want %q", a.CurrentTerms, terms) + } + if a.URI != ts.URL { + t.Errorf("a.URI = %q; want %q", a.URI, ts.URL) + } +} + +func TestGetReg(t *testing.T) { + const terms = "https://ca.tld/acme/terms" + const newTerms = "https://ca.tld/acme/new-terms" + contacts := []string{"mailto:admin@example.com"} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Contact []string + Agreement string + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "reg" { + t.Errorf("j.Resource = %q; want reg", j.Resource) + } + if len(j.Contact) != 0 { + t.Errorf("j.Contact = %v", j.Contact) + } + if j.Agreement != "" { + t.Errorf("j.Agreement = %q", j.Agreement) + } + + w.Header().Set("Link", `;rel="next"`) + w.Header().Add("Link", `;rel="recover"`) + w.Header().Add("Link", fmt.Sprintf(`<%s>;rel="terms-of-service"`, newTerms)) + w.WriteHeader(http.StatusOK) + b, _ := json.Marshal(contacts) + fmt.Fprintf(w, `{"contact":%s, "agreement":%q}`, b, terms) + })) + defer ts.Close() + + c := Client{Key: testKeyEC} + a, err := c.GetReg(context.Background(), ts.URL) + if err != nil { + t.Fatal(err) + } + if a.Authz != "https://ca.tld/acme/new-authz" { + t.Errorf("a.AuthzURL = %q; want https://ca.tld/acme/new-authz", a.Authz) + } + if a.AgreedTerms != terms { + t.Errorf("a.AgreedTerms = %q; want %q", a.AgreedTerms, terms) + } + if a.CurrentTerms != newTerms { + t.Errorf("a.CurrentTerms = %q; want %q", a.CurrentTerms, newTerms) + } + if a.URI != ts.URL { + t.Errorf("a.URI = %q; want %q", a.URI, ts.URL) + } +} + +func TestAuthorize(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Identifier struct { + Type string + Value string + } + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "new-authz" { + t.Errorf("j.Resource = %q; want new-authz", j.Resource) + } + if j.Identifier.Type != "dns" { + t.Errorf("j.Identifier.Type = %q; want dns", j.Identifier.Type) + } + if j.Identifier.Value != "example.com" { + t.Errorf("j.Identifier.Value = %q; want example.com", j.Identifier.Value) + } + + w.Header().Set("Location", "https://ca.tld/acme/auth/1") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "identifier": {"type":"dns","value":"example.com"}, + "status":"pending", + "challenges":[ + { + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1" + }, + { + "type":"tls-sni-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id2", + "token":"token2" + } + ], + "combinations":[[0],[1]]}`) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC, dir: &Directory{AuthzURL: ts.URL}} + auth, err := cl.Authorize(context.Background(), "example.com") + if err != nil { + t.Fatal(err) + } + + if auth.URI != "https://ca.tld/acme/auth/1" { + t.Errorf("URI = %q; want https://ca.tld/acme/auth/1", auth.URI) + } + if auth.Status != "pending" { + t.Errorf("Status = %q; want pending", auth.Status) + } + if auth.Identifier.Type != "dns" { + t.Errorf("Identifier.Type = %q; want dns", auth.Identifier.Type) + } + if auth.Identifier.Value != "example.com" { + t.Errorf("Identifier.Value = %q; want example.com", auth.Identifier.Value) + } + + if n := len(auth.Challenges); n != 2 { + t.Fatalf("len(auth.Challenges) = %d; want 2", n) + } + + c := auth.Challenges[0] + if c.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", c.URI) + } + if c.Token != "token1" { + t.Errorf("c.Token = %q; want token1", c.Token) + } + + c = auth.Challenges[1] + if c.Type != "tls-sni-01" { + t.Errorf("c.Type = %q; want tls-sni-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id2" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id2", c.URI) + } + if c.Token != "token2" { + t.Errorf("c.Token = %q; want token2", c.Token) + } + + combs := [][]int{{0}, {1}} + if !reflect.DeepEqual(auth.Combinations, combs) { + t.Errorf("auth.Combinations: %+v\nwant: %+v\n", auth.Combinations, combs) + } +} + +func TestAuthorizeValid(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "nonce") + return + } + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status":"valid"}`)) + })) + defer ts.Close() + client := Client{Key: testKey, dir: &Directory{AuthzURL: ts.URL}} + _, err := client.Authorize(context.Background(), "example.com") + if err != nil { + t.Errorf("err = %v", err) + } +} + +func TestGetAuthorization(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("r.Method = %q; want GET", r.Method) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "identifier": {"type":"dns","value":"example.com"}, + "status":"pending", + "challenges":[ + { + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1" + }, + { + "type":"tls-sni-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id2", + "token":"token2" + } + ], + "combinations":[[0],[1]]}`) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC} + auth, err := cl.GetAuthorization(context.Background(), ts.URL) + if err != nil { + t.Fatal(err) + } + + if auth.Status != "pending" { + t.Errorf("Status = %q; want pending", auth.Status) + } + if auth.Identifier.Type != "dns" { + t.Errorf("Identifier.Type = %q; want dns", auth.Identifier.Type) + } + if auth.Identifier.Value != "example.com" { + t.Errorf("Identifier.Value = %q; want example.com", auth.Identifier.Value) + } + + if n := len(auth.Challenges); n != 2 { + t.Fatalf("len(set.Challenges) = %d; want 2", n) + } + + c := auth.Challenges[0] + if c.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", c.URI) + } + if c.Token != "token1" { + t.Errorf("c.Token = %q; want token1", c.Token) + } + + c = auth.Challenges[1] + if c.Type != "tls-sni-01" { + t.Errorf("c.Type = %q; want tls-sni-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id2" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id2", c.URI) + } + if c.Token != "token2" { + t.Errorf("c.Token = %q; want token2", c.Token) + } + + combs := [][]int{{0}, {1}} + if !reflect.DeepEqual(auth.Combinations, combs) { + t.Errorf("auth.Combinations: %+v\nwant: %+v\n", auth.Combinations, combs) + } +} + +func TestWaitAuthorization(t *testing.T) { + var count int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + w.Header().Set("Retry-After", "0") + if count > 1 { + fmt.Fprintf(w, `{"status":"valid"}`) + return + } + fmt.Fprintf(w, `{"status":"pending"}`) + })) + defer ts.Close() + + type res struct { + authz *Authorization + err error + } + done := make(chan res) + defer close(done) + go func() { + var client Client + a, err := client.WaitAuthorization(context.Background(), ts.URL) + done <- res{a, err} + }() + + select { + case <-time.After(5 * time.Second): + t.Fatal("WaitAuthz took too long to return") + case res := <-done: + if res.err != nil { + t.Fatalf("res.err = %v", res.err) + } + if res.authz == nil { + t.Fatal("res.authz is nil") + } + } +} + +func TestWaitAuthorizationInvalid(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"status":"invalid"}`) + })) + defer ts.Close() + + res := make(chan error) + defer close(res) + go func() { + var client Client + _, err := client.WaitAuthorization(context.Background(), ts.URL) + res <- err + }() + + select { + case <-time.After(3 * time.Second): + t.Fatal("WaitAuthz took too long to return") + case err := <-res: + if err == nil { + t.Error("err is nil") + } + if _, ok := err.(*AuthorizationError); !ok { + t.Errorf("err is %T; want *AuthorizationError", err) + } + } +} + +func TestWaitAuthorizationClientError(t *testing.T) { + const code = http.StatusBadRequest + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(code) + })) + defer ts.Close() + + ch := make(chan error, 1) + go func() { + var client Client + _, err := client.WaitAuthorization(context.Background(), ts.URL) + ch <- err + }() + + select { + case <-time.After(3 * time.Second): + t.Fatal("WaitAuthz took too long to return") + case err := <-ch: + res, ok := err.(*Error) + if !ok { + t.Fatalf("err is %v (%T); want a non-nil *Error", err, err) + } + if res.StatusCode != code { + t.Errorf("res.StatusCode = %d; want %d", res.StatusCode, code) + } + } +} + +func TestWaitAuthorizationCancel(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Retry-After", "60") + fmt.Fprintf(w, `{"status":"pending"}`) + })) + defer ts.Close() + + res := make(chan error) + defer close(res) + go func() { + var client Client + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + _, err := client.WaitAuthorization(ctx, ts.URL) + res <- err + }() + + select { + case <-time.After(time.Second): + t.Fatal("WaitAuthz took too long to return") + case err := <-res: + if err == nil { + t.Error("err is nil") + } + } +} + +func TestRevokeAuthorization(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "nonce") + return + } + switch r.URL.Path { + case "/1": + var req struct { + Resource string + Status string + Delete bool + } + decodeJWSRequest(t, &req, r) + if req.Resource != "authz" { + t.Errorf("req.Resource = %q; want authz", req.Resource) + } + if req.Status != "deactivated" { + t.Errorf("req.Status = %q; want deactivated", req.Status) + } + if !req.Delete { + t.Errorf("req.Delete is false") + } + case "/2": + w.WriteHeader(http.StatusInternalServerError) + } + })) + defer ts.Close() + client := &Client{Key: testKey} + ctx := context.Background() + if err := client.RevokeAuthorization(ctx, ts.URL+"/1"); err != nil { + t.Errorf("err = %v", err) + } + if client.RevokeAuthorization(ctx, ts.URL+"/2") == nil { + t.Error("nil error") + } +} + +func TestPollChallenge(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("r.Method = %q; want GET", r.Method) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1"}`) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC} + chall, err := cl.GetChallenge(context.Background(), ts.URL) + if err != nil { + t.Fatal(err) + } + + if chall.Status != "pending" { + t.Errorf("Status = %q; want pending", chall.Status) + } + if chall.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", chall.Type) + } + if chall.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", chall.URI) + } + if chall.Token != "token1" { + t.Errorf("c.Token = %q; want token1", chall.Token) + } +} + +func TestAcceptChallenge(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Type string + Auth string `json:"keyAuthorization"` + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "challenge" { + t.Errorf(`resource = %q; want "challenge"`, j.Resource) + } + if j.Type != "http-01" { + t.Errorf(`type = %q; want "http-01"`, j.Type) + } + keyAuth := "token1." + testKeyECThumbprint + if j.Auth != keyAuth { + t.Errorf(`keyAuthorization = %q; want %q`, j.Auth, keyAuth) + } + + // Respond to request + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{ + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1", + "keyAuthorization":%q + }`, keyAuth) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC} + c, err := cl.Accept(context.Background(), &Challenge{ + URI: ts.URL, + Token: "token1", + Type: "http-01", + }) + if err != nil { + t.Fatal(err) + } + + if c.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", c.URI) + } + if c.Token != "token1" { + t.Errorf("c.Token = %q; want token1", c.Token) + } +} + +func TestNewCert(t *testing.T) { + notBefore := time.Now() + notAfter := notBefore.AddDate(0, 2, 0) + timeNow = func() time.Time { return notBefore } + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string `json:"resource"` + CSR string `json:"csr"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "new-cert" { + t.Errorf(`resource = %q; want "new-cert"`, j.Resource) + } + if j.NotBefore != notBefore.Format(time.RFC3339) { + t.Errorf(`notBefore = %q; wanted %q`, j.NotBefore, notBefore.Format(time.RFC3339)) + } + if j.NotAfter != notAfter.Format(time.RFC3339) { + t.Errorf(`notAfter = %q; wanted %q`, j.NotAfter, notAfter.Format(time.RFC3339)) + } + + // Respond to request + template := x509.Certificate{ + SerialNumber: big.NewInt(int64(1)), + Subject: pkix.Name{ + Organization: []string{"goacme"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + sampleCert, err := x509.CreateCertificate(rand.Reader, &template, &template, &testKeyEC.PublicKey, testKeyEC) + if err != nil { + t.Fatalf("Error creating certificate: %v", err) + } + + w.Header().Set("Location", "https://ca.tld/acme/cert/1") + w.WriteHeader(http.StatusCreated) + w.Write(sampleCert) + })) + defer ts.Close() + + csr := x509.CertificateRequest{ + Version: 0, + Subject: pkix.Name{ + CommonName: "example.com", + Organization: []string{"goacme"}, + }, + } + csrb, err := x509.CreateCertificateRequest(rand.Reader, &csr, testKeyEC) + if err != nil { + t.Fatal(err) + } + + c := Client{Key: testKeyEC, dir: &Directory{CertURL: ts.URL}} + cert, certURL, err := c.CreateCert(context.Background(), csrb, notAfter.Sub(notBefore), false) + if err != nil { + t.Fatal(err) + } + if cert == nil { + t.Errorf("cert is nil") + } + if certURL != "https://ca.tld/acme/cert/1" { + t.Errorf("certURL = %q; want https://ca.tld/acme/cert/1", certURL) + } +} + +func TestFetchCert(t *testing.T) { + var count byte + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + if count < 3 { + up := fmt.Sprintf("<%s>;rel=up", ts.URL) + w.Header().Set("Link", up) + } + w.Write([]byte{count}) + })) + defer ts.Close() + res, err := (&Client{}).FetchCert(context.Background(), ts.URL, true) + if err != nil { + t.Fatalf("FetchCert: %v", err) + } + cert := [][]byte{{1}, {2}, {3}} + if !reflect.DeepEqual(res, cert) { + t.Errorf("res = %v; want %v", res, cert) + } +} + +func TestFetchCertRetry(t *testing.T) { + var count int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if count < 1 { + w.Header().Set("Retry-After", "0") + w.WriteHeader(http.StatusAccepted) + count++ + return + } + w.Write([]byte{1}) + })) + defer ts.Close() + res, err := (&Client{}).FetchCert(context.Background(), ts.URL, false) + if err != nil { + t.Fatalf("FetchCert: %v", err) + } + cert := [][]byte{{1}} + if !reflect.DeepEqual(res, cert) { + t.Errorf("res = %v; want %v", res, cert) + } +} + +func TestFetchCertCancel(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Retry-After", "0") + w.WriteHeader(http.StatusAccepted) + })) + defer ts.Close() + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + var err error + go func() { + _, err = (&Client{}).FetchCert(ctx, ts.URL, false) + close(done) + }() + cancel() + <-done + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestFetchCertDepth(t *testing.T) { + var count byte + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + if count > maxChainLen+1 { + t.Errorf("count = %d; want at most %d", count, maxChainLen+1) + w.WriteHeader(http.StatusInternalServerError) + } + w.Header().Set("Link", fmt.Sprintf("<%s>;rel=up", ts.URL)) + w.Write([]byte{count}) + })) + defer ts.Close() + _, err := (&Client{}).FetchCert(context.Background(), ts.URL, true) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestFetchCertBreadth(t *testing.T) { + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < maxChainLen+1; i++ { + w.Header().Add("Link", fmt.Sprintf("<%s>;rel=up", ts.URL)) + } + w.Write([]byte{1}) + })) + defer ts.Close() + _, err := (&Client{}).FetchCert(context.Background(), ts.URL, true) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestFetchCertSize(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b := bytes.Repeat([]byte{1}, maxCertSize+1) + w.Write(b) + })) + defer ts.Close() + _, err := (&Client{}).FetchCert(context.Background(), ts.URL, false) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestRevokeCert(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("Replay-Nonce", "nonce") + return + } + + var req struct { + Resource string + Certificate string + Reason int + } + decodeJWSRequest(t, &req, r) + if req.Resource != "revoke-cert" { + t.Errorf("req.Resource = %q; want revoke-cert", req.Resource) + } + if req.Reason != 1 { + t.Errorf("req.Reason = %d; want 1", req.Reason) + } + // echo -n cert | base64 | tr -d '=' | tr '/+' '_-' + cert := "Y2VydA" + if req.Certificate != cert { + t.Errorf("req.Certificate = %q; want %q", req.Certificate, cert) + } + })) + defer ts.Close() + client := &Client{ + Key: testKeyEC, + dir: &Directory{RevokeURL: ts.URL}, + } + ctx := context.Background() + if err := client.RevokeCert(ctx, nil, []byte("cert"), CRLReasonKeyCompromise); err != nil { + t.Fatal(err) + } +} + +func TestNonce_add(t *testing.T) { + var c Client + c.addNonce(http.Header{"Replay-Nonce": {"nonce"}}) + c.addNonce(http.Header{"Replay-Nonce": {}}) + c.addNonce(http.Header{"Replay-Nonce": {"nonce"}}) + + nonces := map[string]struct{}{"nonce": {}} + if !reflect.DeepEqual(c.nonces, nonces) { + t.Errorf("c.nonces = %q; want %q", c.nonces, nonces) + } +} + +func TestNonce_addMax(t *testing.T) { + c := &Client{nonces: make(map[string]struct{})} + for i := 0; i < maxNonces; i++ { + c.nonces[fmt.Sprintf("%d", i)] = struct{}{} + } + c.addNonce(http.Header{"Replay-Nonce": {"nonce"}}) + if n := len(c.nonces); n != maxNonces { + t.Errorf("len(c.nonces) = %d; want %d", n, maxNonces) + } +} + +func TestNonce_fetch(t *testing.T) { + tests := []struct { + code int + nonce string + }{ + {http.StatusOK, "nonce1"}, + {http.StatusBadRequest, "nonce2"}, + {http.StatusOK, ""}, + } + var i int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Errorf("%d: r.Method = %q; want HEAD", i, r.Method) + } + w.Header().Set("Replay-Nonce", tests[i].nonce) + w.WriteHeader(tests[i].code) + })) + defer ts.Close() + for ; i < len(tests); i++ { + test := tests[i] + c := &Client{} + n, err := c.fetchNonce(context.Background(), ts.URL) + if n != test.nonce { + t.Errorf("%d: n=%q; want %q", i, n, test.nonce) + } + switch { + case err == nil && test.nonce == "": + t.Errorf("%d: n=%q, err=%v; want non-nil error", i, n, err) + case err != nil && test.nonce != "": + t.Errorf("%d: n=%q, err=%v; want %q", i, n, err, test.nonce) + } + } +} + +func TestNonce_fetchError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTooManyRequests) + })) + defer ts.Close() + c := &Client{} + _, err := c.fetchNonce(context.Background(), ts.URL) + e, ok := err.(*Error) + if !ok { + t.Fatalf("err is %T; want *Error", err) + } + if e.StatusCode != http.StatusTooManyRequests { + t.Errorf("e.StatusCode = %d; want %d", e.StatusCode, http.StatusTooManyRequests) + } +} + +func TestNonce_postJWS(t *testing.T) { + var count int + seen := make(map[string]bool) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + w.Header().Set("Replay-Nonce", fmt.Sprintf("nonce%d", count)) + if r.Method == "HEAD" { + // We expect the client do a HEAD request + // but only to fetch the first nonce. + return + } + // Make client.Authorize happy; we're not testing its result. + defer func() { + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status":"valid"}`)) + }() + + head, err := decodeJWSHead(r) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + return + } + if head.Nonce == "" { + t.Error("head.Nonce is empty") + return + } + if seen[head.Nonce] { + t.Errorf("nonce is already used: %q", head.Nonce) + } + seen[head.Nonce] = true + })) + defer ts.Close() + + client := Client{Key: testKey, dir: &Directory{AuthzURL: ts.URL}} + if _, err := client.Authorize(context.Background(), "example.com"); err != nil { + t.Errorf("client.Authorize 1: %v", err) + } + // The second call should not generate another extra HEAD request. + if _, err := client.Authorize(context.Background(), "example.com"); err != nil { + t.Errorf("client.Authorize 2: %v", err) + } + + if count != 3 { + t.Errorf("total requests count: %d; want 3", count) + } + if n := len(client.nonces); n != 1 { + t.Errorf("len(client.nonces) = %d; want 1", n) + } + for k := range seen { + if _, exist := client.nonces[k]; exist { + t.Errorf("used nonce %q in client.nonces", k) + } + } +} + +func TestRetryPostJWS(t *testing.T) { + var count int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + w.Header().Set("Replay-Nonce", fmt.Sprintf("nonce%d", count)) + if r.Method == "HEAD" { + // We expect the client to do 2 head requests to fetch + // nonces, one to start and another after getting badNonce + return + } + + head, err := decodeJWSHead(r) + if err != nil { + t.Errorf("decodeJWSHead: %v", err) + } else if head.Nonce == "" { + t.Error("head.Nonce is empty") + } else if head.Nonce == "nonce1" { + // return a badNonce error to force the call to retry + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"type":"urn:ietf:params:acme:error:badNonce"}`)) + return + } + // Make client.Authorize happy; we're not testing its result. + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status":"valid"}`)) + })) + defer ts.Close() + + client := Client{Key: testKey, dir: &Directory{AuthzURL: ts.URL}} + // This call will fail with badNonce, causing a retry + if _, err := client.Authorize(context.Background(), "example.com"); err != nil { + t.Errorf("client.Authorize 1: %v", err) + } + if count != 4 { + t.Errorf("total requests count: %d; want 4", count) + } +} + +func TestLinkHeader(t *testing.T) { + h := http.Header{"Link": { + `;rel="next"`, + `; rel=recover`, + `; foo=bar; rel="terms-of-service"`, + `;rel="next"`, + }} + tests := []struct { + rel string + out []string + }{ + {"next", []string{"https://example.com/acme/new-authz", "dup"}}, + {"recover", []string{"https://example.com/acme/recover-reg"}}, + {"terms-of-service", []string{"https://example.com/acme/terms"}}, + {"empty", nil}, + } + for i, test := range tests { + if v := linkHeader(h, test.rel); !reflect.DeepEqual(v, test.out) { + t.Errorf("%d: linkHeader(%q): %v; want %v", i, test.rel, v, test.out) + } + } +} + +func TestErrorResponse(t *testing.T) { + s := `{ + "status": 400, + "type": "urn:acme:error:xxx", + "detail": "text" + }` + res := &http.Response{ + StatusCode: 400, + Status: "400 Bad Request", + Body: ioutil.NopCloser(strings.NewReader(s)), + Header: http.Header{"X-Foo": {"bar"}}, + } + err := responseError(res) + v, ok := err.(*Error) + if !ok { + t.Fatalf("err = %+v (%T); want *Error type", err, err) + } + if v.StatusCode != 400 { + t.Errorf("v.StatusCode = %v; want 400", v.StatusCode) + } + if v.ProblemType != "urn:acme:error:xxx" { + t.Errorf("v.ProblemType = %q; want urn:acme:error:xxx", v.ProblemType) + } + if v.Detail != "text" { + t.Errorf("v.Detail = %q; want text", v.Detail) + } + if !reflect.DeepEqual(v.Header, res.Header) { + t.Errorf("v.Header = %+v; want %+v", v.Header, res.Header) + } +} + +func TestTLSSNI01ChallengeCert(t *testing.T) { + const ( + token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" + // echo -n | shasum -a 256 + san = "dbbd5eefe7b4d06eb9d1d9f5acb4c7cd.a27d320e4b30332f0b6cb441734ad7b0.acme.invalid" + ) + + client := &Client{Key: testKeyEC} + tlscert, name, err := client.TLSSNI01ChallengeCert(token) + if err != nil { + t.Fatal(err) + } + + if n := len(tlscert.Certificate); n != 1 { + t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + if len(cert.DNSNames) != 1 || cert.DNSNames[0] != san { + t.Fatalf("cert.DNSNames = %v; want %q", cert.DNSNames, san) + } + if cert.DNSNames[0] != name { + t.Errorf("cert.DNSNames[0] != name: %q vs %q", cert.DNSNames[0], name) + } + if cn := cert.Subject.CommonName; cn != san { + t.Errorf("cert.Subject.CommonName = %q; want %q", cn, san) + } +} + +func TestTLSSNI02ChallengeCert(t *testing.T) { + const ( + token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" + // echo -n evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA | shasum -a 256 + sanA = "7ea0aaa69214e71e02cebb18bb867736.09b730209baabf60e43d4999979ff139.token.acme.invalid" + // echo -n | shasum -a 256 + sanB = "dbbd5eefe7b4d06eb9d1d9f5acb4c7cd.a27d320e4b30332f0b6cb441734ad7b0.ka.acme.invalid" + ) + + client := &Client{Key: testKeyEC} + tlscert, name, err := client.TLSSNI02ChallengeCert(token) + if err != nil { + t.Fatal(err) + } + + if n := len(tlscert.Certificate); n != 1 { + t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + names := []string{sanA, sanB} + if !reflect.DeepEqual(cert.DNSNames, names) { + t.Fatalf("cert.DNSNames = %v;\nwant %v", cert.DNSNames, names) + } + sort.Strings(cert.DNSNames) + i := sort.SearchStrings(cert.DNSNames, name) + if i >= len(cert.DNSNames) || cert.DNSNames[i] != name { + t.Errorf("%v doesn't have %q", cert.DNSNames, name) + } + if cn := cert.Subject.CommonName; cn != sanA { + t.Errorf("CommonName = %q; want %q", cn, sanA) + } +} + +func TestTLSChallengeCertOpt(t *testing.T) { + key, err := rsa.GenerateKey(rand.Reader, 512) + if err != nil { + t.Fatal(err) + } + tmpl := &x509.Certificate{ + SerialNumber: big.NewInt(2), + Subject: pkix.Name{Organization: []string{"Test"}}, + DNSNames: []string{"should-be-overwritten"}, + } + opts := []CertOption{WithKey(key), WithTemplate(tmpl)} + + client := &Client{Key: testKeyEC} + cert1, _, err := client.TLSSNI01ChallengeCert("token", opts...) + if err != nil { + t.Fatal(err) + } + cert2, _, err := client.TLSSNI02ChallengeCert("token", opts...) + if err != nil { + t.Fatal(err) + } + + for i, tlscert := range []tls.Certificate{cert1, cert2} { + // verify generated cert private key + tlskey, ok := tlscert.PrivateKey.(*rsa.PrivateKey) + if !ok { + t.Errorf("%d: tlscert.PrivateKey is %T; want *rsa.PrivateKey", i, tlscert.PrivateKey) + continue + } + if tlskey.D.Cmp(key.D) != 0 { + t.Errorf("%d: tlskey.D = %v; want %v", i, tlskey.D, key.D) + } + // verify generated cert public key + x509Cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + tlspub, ok := x509Cert.PublicKey.(*rsa.PublicKey) + if !ok { + t.Errorf("%d: x509Cert.PublicKey is %T; want *rsa.PublicKey", i, x509Cert.PublicKey) + continue + } + if tlspub.N.Cmp(key.N) != 0 { + t.Errorf("%d: tlspub.N = %v; want %v", i, tlspub.N, key.N) + } + // verify template option + sn := big.NewInt(2) + if x509Cert.SerialNumber.Cmp(sn) != 0 { + t.Errorf("%d: SerialNumber = %v; want %v", i, x509Cert.SerialNumber, sn) + } + org := []string{"Test"} + if !reflect.DeepEqual(x509Cert.Subject.Organization, org) { + t.Errorf("%d: Subject.Organization = %+v; want %+v", i, x509Cert.Subject.Organization, org) + } + for _, v := range x509Cert.DNSNames { + if !strings.HasSuffix(v, ".acme.invalid") { + t.Errorf("%d: invalid DNSNames element: %q", i, v) + } + } + } +} + +func TestHTTP01Challenge(t *testing.T) { + const ( + token = "xxx" + // thumbprint is precomputed for testKeyEC in jws_test.go + value = token + "." + testKeyECThumbprint + urlpath = "/.well-known/acme-challenge/" + token + ) + client := &Client{Key: testKeyEC} + val, err := client.HTTP01ChallengeResponse(token) + if err != nil { + t.Fatal(err) + } + if val != value { + t.Errorf("val = %q; want %q", val, value) + } + if path := client.HTTP01ChallengePath(token); path != urlpath { + t.Errorf("path = %q; want %q", path, urlpath) + } +} + +func TestDNS01ChallengeRecord(t *testing.T) { + // echo -n xxx. | \ + // openssl dgst -binary -sha256 | \ + // base64 | tr -d '=' | tr '/+' '_-' + const value = "8DERMexQ5VcdJ_prpPiA0mVdp7imgbCgjsG4SqqNMIo" + + client := &Client{Key: testKeyEC} + val, err := client.DNS01ChallengeRecord("xxx") + if err != nil { + t.Fatal(err) + } + if val != value { + t.Errorf("val = %q; want %q", val, value) + } +} + +func TestBackoff(t *testing.T) { + tt := []struct{ min, max time.Duration }{ + {time.Second, 2 * time.Second}, + {2 * time.Second, 3 * time.Second}, + {4 * time.Second, 5 * time.Second}, + {8 * time.Second, 9 * time.Second}, + } + for i, test := range tt { + d := backoff(i, time.Minute) + if d < test.min || test.max < d { + t.Errorf("%d: d = %v; want between %v and %v", i, d, test.min, test.max) + } + } + + min, max := time.Second, 2*time.Second + if d := backoff(-1, time.Minute); d < min || max < d { + t.Errorf("d = %v; want between %v and %v", d, min, max) + } + + bound := 10 * time.Second + if d := backoff(100, bound); d != bound { + t.Errorf("d = %v; want %v", d, bound) + } +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go new file mode 100644 index 0000000..263b291 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -0,0 +1,962 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package autocert provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package autocert + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + mathrand "math/rand" + "net" + "net/http" + "path" + "strings" + "sync" + "time" + + "golang.org/x/crypto/acme" +) + +// createCertRetryAfter is how much time to wait before removing a failed state +// entry due to an unsuccessful createCert call. +// This is a variable instead of a const for testing. +// TODO: Consider making it configurable or an exp backoff? +var createCertRetryAfter = time.Minute + +// pseudoRand is safe for concurrent use. +var pseudoRand *lockedMathRand + +func init() { + src := mathrand.NewSource(timeNow().UnixNano()) + pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} +} + +// AcceptTOS is a Manager.Prompt function that always returns true to +// indicate acceptance of the CA's Terms of Service during account +// registration. +func AcceptTOS(tosURL string) bool { return true } + +// HostPolicy specifies which host names the Manager is allowed to respond to. +// It returns a non-nil error if the host should be rejected. +// The returned error is accessible via tls.Conn.Handshake and its callers. +// See Manager's HostPolicy field and GetCertificate method docs for more details. +type HostPolicy func(ctx context.Context, host string) error + +// HostWhitelist returns a policy where only the specified host names are allowed. +// Only exact matches are currently supported. Subdomains, regexp or wildcard +// will not match. +func HostWhitelist(hosts ...string) HostPolicy { + whitelist := make(map[string]bool, len(hosts)) + for _, h := range hosts { + whitelist[h] = true + } + return func(_ context.Context, host string) error { + if !whitelist[host] { + return errors.New("acme/autocert: host not configured") + } + return nil + } +} + +// defaultHostPolicy is used when Manager.HostPolicy is not set. +func defaultHostPolicy(context.Context, string) error { + return nil +} + +// Manager is a stateful certificate manager built on top of acme.Client. +// It obtains and refreshes certificates automatically using "tls-sni-01", +// "tls-sni-02" and "http-01" challenge types, as well as providing them +// to a TLS server via tls.Config. +// +// You must specify a cache implementation, such as DirCache, +// to reuse obtained certificates across program restarts. +// Otherwise your server is very likely to exceed the certificate +// issuer's request rate limits. +type Manager struct { + // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). + // The registration may require the caller to agree to the CA's TOS. + // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report + // whether the caller agrees to the terms. + // + // To always accept the terms, the callers can use AcceptTOS. + Prompt func(tosURL string) bool + + // Cache optionally stores and retrieves previously-obtained certificates. + // If nil, certs will only be cached for the lifetime of the Manager. + // + // Manager passes the Cache certificates data encoded in PEM, with private/public + // parts combined in a single Cache.Put call, private key first. + Cache Cache + + // HostPolicy controls which domains the Manager will attempt + // to retrieve new certificates for. It does not affect cached certs. + // + // If non-nil, HostPolicy is called before requesting a new cert. + // If nil, all hosts are currently allowed. This is not recommended, + // as it opens a potential attack where clients connect to a server + // by IP address and pretend to be asking for an incorrect host name. + // Manager will attempt to obtain a certificate for that host, incorrectly, + // eventually reaching the CA's rate limit for certificate requests + // and making it impossible to obtain actual certificates. + // + // See GetCertificate for more details. + HostPolicy HostPolicy + + // RenewBefore optionally specifies how early certificates should + // be renewed before they expire. + // + // If zero, they're renewed 30 days before expiration. + RenewBefore time.Duration + + // Client is used to perform low-level operations, such as account registration + // and requesting new certificates. + // If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL + // directory endpoint and a newly-generated ECDSA P-256 key. + // + // Mutating the field after the first call of GetCertificate method will have no effect. + Client *acme.Client + + // Email optionally specifies a contact email address. + // This is used by CAs, such as Let's Encrypt, to notify about problems + // with issued certificates. + // + // If the Client's account key is already registered, Email is not used. + Email string + + // ForceRSA makes the Manager generate certificates with 2048-bit RSA keys. + // + // If false, a default is used. Currently the default + // is EC-based keys using the P-256 curve. + ForceRSA bool + + clientMu sync.Mutex + client *acme.Client // initialized by acmeClient method + + stateMu sync.Mutex + state map[string]*certState // keyed by domain name + + // renewal tracks the set of domains currently running renewal timers. + // It is keyed by domain name. + renewalMu sync.Mutex + renewal map[string]*domainRenewal + + // tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens. + tokensMu sync.RWMutex + // tryHTTP01 indicates whether the Manager should try "http-01" challenge type + // during the authorization flow. + tryHTTP01 bool + // httpTokens contains response body values for http-01 challenges + // and is keyed by the URL path at which a challenge response is expected + // to be provisioned. + // The entries are stored for the duration of the authorization flow. + httpTokens map[string][]byte + // certTokens contains temporary certificates for tls-sni challenges + // and is keyed by token domain name, which matches server name of ClientHello. + // Keys always have ".acme.invalid" suffix. + // The entries are stored for the duration of the authorization flow. + certTokens map[string]*tls.Certificate +} + +// GetCertificate implements the tls.Config.GetCertificate hook. +// It provides a TLS certificate for hello.ServerName host, including answering +// *.acme.invalid (TLS-SNI) challenges. All other fields of hello are ignored. +// +// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting +// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. +// The error is propagated back to the caller of GetCertificate and is user-visible. +// This does not affect cached certs. See HostPolicy field description for more details. +func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if m.Prompt == nil { + return nil, errors.New("acme/autocert: Manager.Prompt not set") + } + + name := hello.ServerName + if name == "" { + return nil, errors.New("acme/autocert: missing server name") + } + if !strings.Contains(strings.Trim(name, "."), ".") { + return nil, errors.New("acme/autocert: server name component count invalid") + } + if strings.ContainsAny(name, `/\`) { + return nil, errors.New("acme/autocert: server name contains invalid character") + } + + // In the worst-case scenario, the timeout needs to account for caching, host policy, + // domain ownership verification and certificate issuance. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // check whether this is a token cert requested for TLS-SNI challenge + if strings.HasSuffix(name, ".acme.invalid") { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + if cert := m.certTokens[name]; cert != nil { + return cert, nil + } + if cert, err := m.cacheGet(ctx, name); err == nil { + return cert, nil + } + // TODO: cache error results? + return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) + } + + // regular domain + name = strings.TrimSuffix(name, ".") // golang.org/issue/18114 + cert, err := m.cert(ctx, name) + if err == nil { + return cert, nil + } + if err != ErrCacheMiss { + return nil, err + } + + // first-time + if err := m.hostPolicy()(ctx, name); err != nil { + return nil, err + } + cert, err = m.createCert(ctx, name) + if err != nil { + return nil, err + } + m.cachePut(ctx, name, cert) + return cert, nil +} + +// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. +// It returns an http.Handler that responds to the challenges and must be +// running on port 80. If it receives a request that is not an ACME challenge, +// it delegates the request to the optional fallback handler. +// +// If fallback is nil, the returned handler redirects all GET and HEAD requests +// to the default TLS port 443 with 302 Found status code, preserving the original +// request path and query. It responds with 400 Bad Request to all other HTTP methods. +// The fallback is not protected by the optional HostPolicy. +// +// Because the fallback handler is run with unencrypted port 80 requests, +// the fallback should not serve TLS-only requests. +// +// If HTTPHandler is never called, the Manager will only use TLS SNI +// challenges for domain verification. +func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + m.tryHTTP01 = true + + if fallback == nil { + fallback = http.HandlerFunc(handleHTTPRedirect) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { + fallback.ServeHTTP(w, r) + return + } + // A reasonable context timeout for cache and host policy only, + // because we don't wait for a new certificate issuance here. + ctx, cancel := context.WithTimeout(r.Context(), time.Minute) + defer cancel() + if err := m.hostPolicy()(ctx, r.Host); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + data, err := m.httpToken(ctx, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Write(data) + }) +} + +func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" && r.Method != "HEAD" { + http.Error(w, "Use HTTPS", http.StatusBadRequest) + return + } + target := "https://" + stripPort(r.Host) + r.URL.RequestURI() + http.Redirect(w, r, target, http.StatusFound) +} + +func stripPort(hostport string) string { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return hostport + } + return net.JoinHostPort(host, "443") +} + +// cert returns an existing certificate either from m.state or cache. +// If a certificate is found in cache but not in m.state, the latter will be filled +// with the cached value. +func (m *Manager) cert(ctx context.Context, name string) (*tls.Certificate, error) { + m.stateMu.Lock() + if s, ok := m.state[name]; ok { + m.stateMu.Unlock() + s.RLock() + defer s.RUnlock() + return s.tlscert() + } + defer m.stateMu.Unlock() + cert, err := m.cacheGet(ctx, name) + if err != nil { + return nil, err + } + signer, ok := cert.PrivateKey.(crypto.Signer) + if !ok { + return nil, errors.New("acme/autocert: private key cannot sign") + } + if m.state == nil { + m.state = make(map[string]*certState) + } + s := &certState{ + key: signer, + cert: cert.Certificate, + leaf: cert.Leaf, + } + m.state[name] = s + go m.renew(name, s.key, s.leaf.NotAfter) + return cert, nil +} + +// cacheGet always returns a valid certificate, or an error otherwise. +// If a cached certficate exists but is not valid, ErrCacheMiss is returned. +func (m *Manager) cacheGet(ctx context.Context, domain string) (*tls.Certificate, error) { + if m.Cache == nil { + return nil, ErrCacheMiss + } + data, err := m.Cache.Get(ctx, domain) + if err != nil { + return nil, err + } + + // private + priv, pub := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, ErrCacheMiss + } + privKey, err := parsePrivateKey(priv.Bytes) + if err != nil { + return nil, err + } + + // public + var pubDER [][]byte + for len(pub) > 0 { + var b *pem.Block + b, pub = pem.Decode(pub) + if b == nil { + break + } + pubDER = append(pubDER, b.Bytes) + } + if len(pub) > 0 { + // Leftover content not consumed by pem.Decode. Corrupt. Ignore. + return nil, ErrCacheMiss + } + + // verify and create TLS cert + leaf, err := validCert(domain, pubDER, privKey) + if err != nil { + return nil, ErrCacheMiss + } + tlscert := &tls.Certificate{ + Certificate: pubDER, + PrivateKey: privKey, + Leaf: leaf, + } + return tlscert, nil +} + +func (m *Manager) cachePut(ctx context.Context, domain string, tlscert *tls.Certificate) error { + if m.Cache == nil { + return nil + } + + // contains PEM-encoded data + var buf bytes.Buffer + + // private + switch key := tlscert.PrivateKey.(type) { + case *ecdsa.PrivateKey: + if err := encodeECDSAKey(&buf, key); err != nil { + return err + } + case *rsa.PrivateKey: + b := x509.MarshalPKCS1PrivateKey(key) + pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + default: + return errors.New("acme/autocert: unknown private key type") + } + + // public + for _, b := range tlscert.Certificate { + pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + } + + return m.Cache.Put(ctx, domain, buf.Bytes()) +} + +func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { + b, err := x509.MarshalECPrivateKey(key) + if err != nil { + return err + } + pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return pem.Encode(w, pb) +} + +// createCert starts the domain ownership verification and returns a certificate +// for that domain upon success. +// +// If the domain is already being verified, it waits for the existing verification to complete. +// Either way, createCert blocks for the duration of the whole process. +func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certificate, error) { + // TODO: maybe rewrite this whole piece using sync.Once + state, err := m.certState(domain) + if err != nil { + return nil, err + } + // state may exist if another goroutine is already working on it + // in which case just wait for it to finish + if !state.locked { + state.RLock() + defer state.RUnlock() + return state.tlscert() + } + + // We are the first; state is locked. + // Unblock the readers when domain ownership is verified + // and we got the cert or the process failed. + defer state.Unlock() + state.locked = false + + der, leaf, err := m.authorizedCert(ctx, state.key, domain) + if err != nil { + // Remove the failed state after some time, + // making the manager call createCert again on the following TLS hello. + time.AfterFunc(createCertRetryAfter, func() { + defer testDidRemoveState(domain) + m.stateMu.Lock() + defer m.stateMu.Unlock() + // Verify the state hasn't changed and it's still invalid + // before deleting. + s, ok := m.state[domain] + if !ok { + return + } + if _, err := validCert(domain, s.cert, s.key); err == nil { + return + } + delete(m.state, domain) + }) + return nil, err + } + state.cert = der + state.leaf = leaf + go m.renew(domain, state.key, state.leaf.NotAfter) + return state.tlscert() +} + +// certState returns a new or existing certState. +// If a new certState is returned, state.exist is false and the state is locked. +// The returned error is non-nil only in the case where a new state could not be created. +func (m *Manager) certState(domain string) (*certState, error) { + m.stateMu.Lock() + defer m.stateMu.Unlock() + if m.state == nil { + m.state = make(map[string]*certState) + } + // existing state + if state, ok := m.state[domain]; ok { + return state, nil + } + + // new locked state + var ( + err error + key crypto.Signer + ) + if m.ForceRSA { + key, err = rsa.GenerateKey(rand.Reader, 2048) + } else { + key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + if err != nil { + return nil, err + } + + state := &certState{ + key: key, + locked: true, + } + state.Lock() // will be unlocked by m.certState caller + m.state[domain] = state + return state, nil +} + +// authorizedCert starts the domain ownership verification process and requests a new cert upon success. +// The key argument is the certificate private key. +func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) { + client, err := m.acmeClient(ctx) + if err != nil { + return nil, nil, err + } + + if err := m.verify(ctx, client, domain); err != nil { + return nil, nil, err + } + csr, err := certRequest(key, domain) + if err != nil { + return nil, nil, err + } + der, _, err = client.CreateCert(ctx, csr, 0, true) + if err != nil { + return nil, nil, err + } + leaf, err = validCert(domain, der, key) + if err != nil { + return nil, nil, err + } + return der, leaf, nil +} + +// verify runs the identifier (domain) authorization flow +// using each applicable ACME challenge type. +func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error { + // The list of challenge types we'll try to fulfill + // in this specific order. + challengeTypes := []string{"tls-sni-02", "tls-sni-01"} + m.tokensMu.RLock() + if m.tryHTTP01 { + challengeTypes = append(challengeTypes, "http-01") + } + m.tokensMu.RUnlock() + + var nextTyp int // challengeType index of the next challenge type to try + for { + // Start domain authorization and get the challenge. + authz, err := client.Authorize(ctx, domain) + if err != nil { + return err + } + // No point in accepting challenges if the authorization status + // is in a final state. + switch authz.Status { + case acme.StatusValid: + return nil // already authorized + case acme.StatusInvalid: + return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI) + } + + // Pick the next preferred challenge. + var chal *acme.Challenge + for chal == nil && nextTyp < len(challengeTypes) { + chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges) + nextTyp++ + } + if chal == nil { + return fmt.Errorf("acme/autocert: unable to authorize %q; tried %q", domain, challengeTypes) + } + cleanup, err := m.fulfill(ctx, client, chal) + if err != nil { + continue + } + defer cleanup() + if _, err := client.Accept(ctx, chal); err != nil { + continue + } + + // A challenge is fulfilled and accepted: wait for the CA to validate. + if _, err := client.WaitAuthorization(ctx, authz.URI); err == nil { + return nil + } + } +} + +// fulfill provisions a response to the challenge chal. +// The cleanup is non-nil only if provisioning succeeded. +func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge) (cleanup func(), err error) { + switch chal.Type { + case "tls-sni-01": + cert, name, err := client.TLSSNI01ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "tls-sni-02": + cert, name, err := client.TLSSNI02ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "http-01": + resp, err := client.HTTP01ChallengeResponse(chal.Token) + if err != nil { + return nil, err + } + p := client.HTTP01ChallengePath(chal.Token) + m.putHTTPToken(ctx, p, resp) + return func() { go m.deleteHTTPToken(p) }, nil + } + return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) +} + +func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { + for _, c := range chal { + if c.Type == typ { + return c + } + } + return nil +} + +// putCertToken stores the cert under the named key in both m.certTokens map +// and m.Cache. +func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.certTokens == nil { + m.certTokens = make(map[string]*tls.Certificate) + } + m.certTokens[name] = cert + m.cachePut(ctx, name, cert) +} + +// deleteCertToken removes the token certificate for the specified domain name +// from both m.certTokens map and m.Cache. +func (m *Manager) deleteCertToken(name string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.certTokens, name) + if m.Cache != nil { + m.Cache.Delete(context.Background(), name) + } +} + +// httpToken retrieves an existing http-01 token value from an in-memory map +// or the optional cache. +func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + if v, ok := m.httpTokens[tokenPath]; ok { + return v, nil + } + if m.Cache == nil { + return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) + } + return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) +} + +// putHTTPToken stores an http-01 token value using tokenPath as key +// in both in-memory map and the optional Cache. +// +// It ignores any error returned from Cache.Put. +func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.httpTokens == nil { + m.httpTokens = make(map[string][]byte) + } + b := []byte(val) + m.httpTokens[tokenPath] = b + if m.Cache != nil { + m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) + } +} + +// deleteHTTPToken removes an http-01 token value from both in-memory map +// and the optional Cache, ignoring any error returned from the latter. +// +// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. +func (m *Manager) deleteHTTPToken(tokenPath string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.httpTokens, tokenPath) + if m.Cache != nil { + m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) + } +} + +// httpTokenCacheKey returns a key at which an http-01 token value may be stored +// in the Manager's optional Cache. +func httpTokenCacheKey(tokenPath string) string { + return "http-01-" + path.Base(tokenPath) +} + +// renew starts a cert renewal timer loop, one per domain. +// +// The loop is scheduled in two cases: +// - a cert was fetched from cache for the first time (wasn't in m.state) +// - a new cert was created by m.createCert +// +// The key argument is a certificate private key. +// The exp argument is the cert expiration time (NotAfter). +func (m *Manager) renew(domain string, key crypto.Signer, exp time.Time) { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + if m.renewal[domain] != nil { + // another goroutine is already on it + return + } + if m.renewal == nil { + m.renewal = make(map[string]*domainRenewal) + } + dr := &domainRenewal{m: m, domain: domain, key: key} + m.renewal[domain] = dr + dr.start(exp) +} + +// stopRenew stops all currently running cert renewal timers. +// The timers are not restarted during the lifetime of the Manager. +func (m *Manager) stopRenew() { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + for name, dr := range m.renewal { + delete(m.renewal, name) + dr.stop() + } +} + +func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { + const keyName = "acme_account.key" + + genKey := func() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + + if m.Cache == nil { + return genKey() + } + + data, err := m.Cache.Get(ctx, keyName) + if err == ErrCacheMiss { + key, err := genKey() + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := encodeECDSAKey(&buf, key); err != nil { + return nil, err + } + if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { + return nil, err + } + return key, nil + } + if err != nil { + return nil, err + } + + priv, _ := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, errors.New("acme/autocert: invalid account key found in cache") + } + return parsePrivateKey(priv.Bytes) +} + +func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { + m.clientMu.Lock() + defer m.clientMu.Unlock() + if m.client != nil { + return m.client, nil + } + + client := m.Client + if client == nil { + client = &acme.Client{DirectoryURL: acme.LetsEncryptURL} + } + if client.Key == nil { + var err error + client.Key, err = m.accountKey(ctx) + if err != nil { + return nil, err + } + } + var contact []string + if m.Email != "" { + contact = []string{"mailto:" + m.Email} + } + a := &acme.Account{Contact: contact} + _, err := client.Register(ctx, a, m.Prompt) + if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict { + // conflict indicates the key is already registered + m.client = client + err = nil + } + return m.client, err +} + +func (m *Manager) hostPolicy() HostPolicy { + if m.HostPolicy != nil { + return m.HostPolicy + } + return defaultHostPolicy +} + +func (m *Manager) renewBefore() time.Duration { + if m.RenewBefore > renewJitter { + return m.RenewBefore + } + return 720 * time.Hour // 30 days +} + +// certState is ready when its mutex is unlocked for reading. +type certState struct { + sync.RWMutex + locked bool // locked for read/write + key crypto.Signer // private key for cert + cert [][]byte // DER encoding + leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil +} + +// tlscert creates a tls.Certificate from s.key and s.cert. +// Callers should wrap it in s.RLock() and s.RUnlock(). +func (s *certState) tlscert() (*tls.Certificate, error) { + if s.key == nil { + return nil, errors.New("acme/autocert: missing signer") + } + if len(s.cert) == 0 { + return nil, errors.New("acme/autocert: missing certificate") + } + return &tls.Certificate{ + PrivateKey: s.key, + Certificate: s.cert, + Leaf: s.leaf, + }, nil +} + +// certRequest creates a certificate request for the given common name cn +// and optional SANs. +func certRequest(key crypto.Signer, cn string, san ...string) ([]byte, error) { + req := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: cn}, + DNSNames: san, + } + return x509.CreateCertificateRequest(rand.Reader, req, key) +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +// +// Inspired by parsePrivateKey in crypto/tls/tls.go. +func parsePrivateKey(der []byte) (crypto.Signer, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey: + return key, nil + case *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("acme/autocert: failed to parse private key") +} + +// validCert parses a cert chain provided as der argument and verifies the leaf, der[0], +// corresponds to the private key, as well as the domain match and expiration dates. +// It doesn't do any revocation checking. +// +// The returned value is the verified leaf cert. +func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certificate, err error) { + // parse public part(s) + var n int + for _, b := range der { + n += len(b) + } + pub := make([]byte, n) + n = 0 + for _, b := range der { + n += copy(pub[n:], b) + } + x509Cert, err := x509.ParseCertificates(pub) + if len(x509Cert) == 0 { + return nil, errors.New("acme/autocert: no public key found") + } + // verify the leaf is not expired and matches the domain name + leaf = x509Cert[0] + now := timeNow() + if now.Before(leaf.NotBefore) { + return nil, errors.New("acme/autocert: certificate is not valid yet") + } + if now.After(leaf.NotAfter) { + return nil, errors.New("acme/autocert: expired certificate") + } + if err := leaf.VerifyHostname(domain); err != nil { + return nil, err + } + // ensure the leaf corresponds to the private key + switch pub := leaf.PublicKey.(type) { + case *rsa.PublicKey: + prv, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.N.Cmp(prv.N) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + case *ecdsa.PublicKey: + prv, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + default: + return nil, errors.New("acme/autocert: unknown public key algorithm") + } + return leaf, nil +} + +type lockedMathRand struct { + sync.Mutex + rnd *mathrand.Rand +} + +func (r *lockedMathRand) int63n(max int64) int64 { + r.Lock() + n := r.rnd.Int63n(max) + r.Unlock() + return n +} + +// For easier testing. +var ( + timeNow = time.Now + + // Called when a state is removed. + testDidRemoveState = func(domain string) {} +) diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go new file mode 100644 index 0000000..2da1912 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go @@ -0,0 +1,757 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "fmt" + "html/template" + "io" + "math/big" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/crypto/acme" +) + +var discoTmpl = template.Must(template.New("disco").Parse(`{ + "new-reg": "{{.}}/new-reg", + "new-authz": "{{.}}/new-authz", + "new-cert": "{{.}}/new-cert" +}`)) + +var authzTmpl = template.Must(template.New("authz").Parse(`{ + "status": "pending", + "challenges": [ + { + "uri": "{{.}}/challenge/1", + "type": "tls-sni-01", + "token": "token-01" + }, + { + "uri": "{{.}}/challenge/2", + "type": "tls-sni-02", + "token": "token-02" + }, + { + "uri": "{{.}}/challenge/dns-01", + "type": "dns-01", + "token": "token-dns-01" + }, + { + "uri": "{{.}}/challenge/http-01", + "type": "http-01", + "token": "token-http-01" + } + ] +}`)) + +type memCache struct { + mu sync.Mutex + keyData map[string][]byte +} + +func (m *memCache) Get(ctx context.Context, key string) ([]byte, error) { + m.mu.Lock() + defer m.mu.Unlock() + + v, ok := m.keyData[key] + if !ok { + return nil, ErrCacheMiss + } + return v, nil +} + +func (m *memCache) Put(ctx context.Context, key string, data []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + + m.keyData[key] = data + return nil +} + +func (m *memCache) Delete(ctx context.Context, key string) error { + m.mu.Lock() + defer m.mu.Unlock() + + delete(m.keyData, key) + return nil +} + +func newMemCache() *memCache { + return &memCache{ + keyData: make(map[string][]byte), + } +} + +func dummyCert(pub interface{}, san ...string) ([]byte, error) { + return dateDummyCert(pub, time.Now(), time.Now().Add(90*24*time.Hour), san...) +} + +func dateDummyCert(pub interface{}, start, end time.Time, san ...string) ([]byte, error) { + // use EC key to run faster on 386 + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + t := &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: start, + NotAfter: end, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment, + DNSNames: san, + } + if pub == nil { + pub = &key.PublicKey + } + return x509.CreateCertificate(rand.Reader, t, t, pub, key) +} + +func decodePayload(v interface{}, r io.Reader) error { + var req struct{ Payload string } + if err := json.NewDecoder(r).Decode(&req); err != nil { + return err + } + payload, err := base64.RawURLEncoding.DecodeString(req.Payload) + if err != nil { + return err + } + return json.Unmarshal(payload, v) +} + +func TestGetCertificate(t *testing.T) { + man := &Manager{Prompt: AcceptTOS} + defer man.stopRenew() + hello := &tls.ClientHelloInfo{ServerName: "example.org"} + testGetCertificate(t, man, "example.org", hello) +} + +func TestGetCertificate_trailingDot(t *testing.T) { + man := &Manager{Prompt: AcceptTOS} + defer man.stopRenew() + hello := &tls.ClientHelloInfo{ServerName: "example.org."} + testGetCertificate(t, man, "example.org", hello) +} + +func TestGetCertificate_ForceRSA(t *testing.T) { + man := &Manager{ + Prompt: AcceptTOS, + Cache: newMemCache(), + ForceRSA: true, + } + defer man.stopRenew() + hello := &tls.ClientHelloInfo{ServerName: "example.org"} + testGetCertificate(t, man, "example.org", hello) + + cert, err := man.cacheGet(context.Background(), "example.org") + if err != nil { + t.Fatalf("man.cacheGet: %v", err) + } + if _, ok := cert.PrivateKey.(*rsa.PrivateKey); !ok { + t.Errorf("cert.PrivateKey is %T; want *rsa.PrivateKey", cert.PrivateKey) + } +} + +func TestGetCertificate_nilPrompt(t *testing.T) { + man := &Manager{} + defer man.stopRenew() + url, finish := startACMEServerStub(t, man, "example.org") + defer finish() + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + man.Client = &acme.Client{ + Key: key, + DirectoryURL: url, + } + hello := &tls.ClientHelloInfo{ServerName: "example.org"} + if _, err := man.GetCertificate(hello); err == nil { + t.Error("got certificate for example.org; wanted error") + } +} + +func TestGetCertificate_expiredCache(t *testing.T) { + // Make an expired cert and cache it. + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + tmpl := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: "example.org"}, + NotAfter: time.Now(), + } + pub, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &pk.PublicKey, pk) + if err != nil { + t.Fatal(err) + } + tlscert := &tls.Certificate{ + Certificate: [][]byte{pub}, + PrivateKey: pk, + } + + man := &Manager{Prompt: AcceptTOS, Cache: newMemCache()} + defer man.stopRenew() + if err := man.cachePut(context.Background(), "example.org", tlscert); err != nil { + t.Fatalf("man.cachePut: %v", err) + } + + // The expired cached cert should trigger a new cert issuance + // and return without an error. + hello := &tls.ClientHelloInfo{ServerName: "example.org"} + testGetCertificate(t, man, "example.org", hello) +} + +func TestGetCertificate_failedAttempt(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + })) + defer ts.Close() + + const example = "example.org" + d := createCertRetryAfter + f := testDidRemoveState + defer func() { + createCertRetryAfter = d + testDidRemoveState = f + }() + createCertRetryAfter = 0 + done := make(chan struct{}) + testDidRemoveState = func(domain string) { + if domain != example { + t.Errorf("testDidRemoveState: domain = %q; want %q", domain, example) + } + close(done) + } + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + man := &Manager{ + Prompt: AcceptTOS, + Client: &acme.Client{ + Key: key, + DirectoryURL: ts.URL, + }, + } + defer man.stopRenew() + hello := &tls.ClientHelloInfo{ServerName: example} + if _, err := man.GetCertificate(hello); err == nil { + t.Error("GetCertificate: err is nil") + } + select { + case <-time.After(5 * time.Second): + t.Errorf("took too long to remove the %q state", example) + case <-done: + man.stateMu.Lock() + defer man.stateMu.Unlock() + if v, exist := man.state[example]; exist { + t.Errorf("state exists for %q: %+v", example, v) + } + } +} + +// startACMEServerStub runs an ACME server +// The domain argument is the expected domain name of a certificate request. +func startACMEServerStub(t *testing.T, man *Manager, domain string) (url string, finish func()) { + // echo token-02 | shasum -a 256 + // then divide result in 2 parts separated by dot + tokenCertName := "4e8eb87631187e9ff2153b56b13a4dec.13a35d002e485d60ff37354b32f665d9.token.acme.invalid" + verifyTokenCert := func() { + hello := &tls.ClientHelloInfo{ServerName: tokenCertName} + _, err := man.GetCertificate(hello) + if err != nil { + t.Errorf("verifyTokenCert: GetCertificate(%q): %v", tokenCertName, err) + return + } + } + + // ACME CA server stub + var ca *httptest.Server + ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", "nonce") + if r.Method == "HEAD" { + // a nonce request + return + } + + switch r.URL.Path { + // discovery + case "/": + if err := discoTmpl.Execute(w, ca.URL); err != nil { + t.Errorf("discoTmpl: %v", err) + } + // client key registration + case "/new-reg": + w.Write([]byte("{}")) + // domain authorization + case "/new-authz": + w.Header().Set("Location", ca.URL+"/authz/1") + w.WriteHeader(http.StatusCreated) + if err := authzTmpl.Execute(w, ca.URL); err != nil { + t.Errorf("authzTmpl: %v", err) + } + // accept tls-sni-02 challenge + case "/challenge/2": + verifyTokenCert() + w.Write([]byte("{}")) + // authorization status + case "/authz/1": + w.Write([]byte(`{"status": "valid"}`)) + // cert request + case "/new-cert": + var req struct { + CSR string `json:"csr"` + } + decodePayload(&req, r.Body) + b, _ := base64.RawURLEncoding.DecodeString(req.CSR) + csr, err := x509.ParseCertificateRequest(b) + if err != nil { + t.Errorf("new-cert: CSR: %v", err) + } + if csr.Subject.CommonName != domain { + t.Errorf("CommonName in CSR = %q; want %q", csr.Subject.CommonName, domain) + } + der, err := dummyCert(csr.PublicKey, domain) + if err != nil { + t.Errorf("new-cert: dummyCert: %v", err) + } + chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL) + w.Header().Set("Link", chainUp) + w.WriteHeader(http.StatusCreated) + w.Write(der) + // CA chain cert + case "/ca-cert": + der, err := dummyCert(nil, "ca") + if err != nil { + t.Errorf("ca-cert: dummyCert: %v", err) + } + w.Write(der) + default: + t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path) + } + })) + finish = func() { + ca.Close() + + // make sure token cert was removed + cancel := make(chan struct{}) + done := make(chan struct{}) + go func() { + defer close(done) + tick := time.NewTicker(100 * time.Millisecond) + defer tick.Stop() + for { + hello := &tls.ClientHelloInfo{ServerName: tokenCertName} + if _, err := man.GetCertificate(hello); err != nil { + return + } + select { + case <-tick.C: + case <-cancel: + return + } + } + }() + select { + case <-done: + case <-time.After(5 * time.Second): + close(cancel) + t.Error("token cert was not removed") + <-done + } + } + return ca.URL, finish +} + +// tests man.GetCertificate flow using the provided hello argument. +// The domain argument is the expected domain name of a certificate request. +func testGetCertificate(t *testing.T, man *Manager, domain string, hello *tls.ClientHelloInfo) { + url, finish := startACMEServerStub(t, man, domain) + defer finish() + + // use EC key to run faster on 386 + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + man.Client = &acme.Client{ + Key: key, + DirectoryURL: url, + } + + // simulate tls.Config.GetCertificate + var tlscert *tls.Certificate + done := make(chan struct{}) + go func() { + tlscert, err = man.GetCertificate(hello) + close(done) + }() + select { + case <-time.After(time.Minute): + t.Fatal("man.GetCertificate took too long to return") + case <-done: + } + if err != nil { + t.Fatalf("man.GetCertificate: %v", err) + } + + // verify the tlscert is the same we responded with from the CA stub + if len(tlscert.Certificate) == 0 { + t.Fatal("len(tlscert.Certificate) is 0") + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatalf("x509.ParseCertificate: %v", err) + } + if len(cert.DNSNames) == 0 || cert.DNSNames[0] != domain { + t.Errorf("cert.DNSNames = %v; want %q", cert.DNSNames, domain) + } + +} + +func TestVerifyHTTP01(t *testing.T) { + var ( + http01 http.Handler + + authzCount int // num. of created authorizations + didAcceptHTTP01 bool + ) + + verifyHTTPToken := func() { + r := httptest.NewRequest("GET", "/.well-known/acme-challenge/token-http-01", nil) + w := httptest.NewRecorder() + http01.ServeHTTP(w, r) + if w.Code != http.StatusOK { + t.Errorf("http token: w.Code = %d; want %d", w.Code, http.StatusOK) + } + if v := string(w.Body.Bytes()); !strings.HasPrefix(v, "token-http-01.") { + t.Errorf("http token value = %q; want 'token-http-01.' prefix", v) + } + } + + // ACME CA server stub, only the needed bits. + // TODO: Merge this with startACMEServerStub, making it a configurable CA for testing. + var ca *httptest.Server + ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", "nonce") + if r.Method == "HEAD" { + // a nonce request + return + } + + switch r.URL.Path { + // Discovery. + case "/": + if err := discoTmpl.Execute(w, ca.URL); err != nil { + t.Errorf("discoTmpl: %v", err) + } + // Client key registration. + case "/new-reg": + w.Write([]byte("{}")) + // New domain authorization. + case "/new-authz": + authzCount++ + w.Header().Set("Location", fmt.Sprintf("%s/authz/%d", ca.URL, authzCount)) + w.WriteHeader(http.StatusCreated) + if err := authzTmpl.Execute(w, ca.URL); err != nil { + t.Errorf("authzTmpl: %v", err) + } + // Accept tls-sni-02. + case "/challenge/2": + w.Write([]byte("{}")) + // Reject tls-sni-01. + case "/challenge/1": + http.Error(w, "won't accept tls-sni-01", http.StatusBadRequest) + // Should not accept dns-01. + case "/challenge/dns-01": + t.Errorf("dns-01 challenge was accepted") + http.Error(w, "won't accept dns-01", http.StatusBadRequest) + // Accept http-01. + case "/challenge/http-01": + didAcceptHTTP01 = true + verifyHTTPToken() + w.Write([]byte("{}")) + // Authorization statuses. + // Make tls-sni-xxx invalid. + case "/authz/1", "/authz/2": + w.Write([]byte(`{"status": "invalid"}`)) + case "/authz/3", "/authz/4": + w.Write([]byte(`{"status": "valid"}`)) + default: + http.NotFound(w, r) + t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path) + } + })) + defer ca.Close() + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + m := &Manager{ + Client: &acme.Client{ + Key: key, + DirectoryURL: ca.URL, + }, + } + http01 = m.HTTPHandler(nil) + if err := m.verify(context.Background(), m.Client, "example.org"); err != nil { + t.Errorf("m.verify: %v", err) + } + // Only tls-sni-01, tls-sni-02 and http-01 must be accepted + // The dns-01 challenge is unsupported. + if authzCount != 3 { + t.Errorf("authzCount = %d; want 3", authzCount) + } + if !didAcceptHTTP01 { + t.Error("did not accept http-01 challenge") + } +} + +func TestHTTPHandlerDefaultFallback(t *testing.T) { + tt := []struct { + method, url string + wantCode int + wantLocation string + }{ + {"GET", "http://example.org", 302, "https://example.org/"}, + {"GET", "http://example.org/foo", 302, "https://example.org/foo"}, + {"GET", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"}, + {"GET", "http://example.org/?a=b", 302, "https://example.org/?a=b"}, + {"GET", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"}, + {"GET", "http://example.org:80/foo?a=b", 302, "https://example.org:443/foo?a=b"}, + {"GET", "http://example.org:80/foo%20bar", 302, "https://example.org:443/foo%20bar"}, + {"GET", "http://[2602:d1:xxxx::c60a]:1234", 302, "https://[2602:d1:xxxx::c60a]:443/"}, + {"GET", "http://[2602:d1:xxxx::c60a]", 302, "https://[2602:d1:xxxx::c60a]/"}, + {"GET", "http://[2602:d1:xxxx::c60a]/foo?a=b", 302, "https://[2602:d1:xxxx::c60a]/foo?a=b"}, + {"HEAD", "http://example.org", 302, "https://example.org/"}, + {"HEAD", "http://example.org/foo", 302, "https://example.org/foo"}, + {"HEAD", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"}, + {"HEAD", "http://example.org/?a=b", 302, "https://example.org/?a=b"}, + {"HEAD", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"}, + {"POST", "http://example.org", 400, ""}, + {"PUT", "http://example.org", 400, ""}, + {"GET", "http://example.org/.well-known/acme-challenge/x", 404, ""}, + } + var m Manager + h := m.HTTPHandler(nil) + for i, test := range tt { + r := httptest.NewRequest(test.method, test.url, nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + if w.Code != test.wantCode { + t.Errorf("%d: w.Code = %d; want %d", i, w.Code, test.wantCode) + t.Errorf("%d: body: %s", i, w.Body.Bytes()) + } + if v := w.Header().Get("Location"); v != test.wantLocation { + t.Errorf("%d: Location = %q; want %q", i, v, test.wantLocation) + } + } +} + +func TestAccountKeyCache(t *testing.T) { + m := Manager{Cache: newMemCache()} + ctx := context.Background() + k1, err := m.accountKey(ctx) + if err != nil { + t.Fatal(err) + } + k2, err := m.accountKey(ctx) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(k1, k2) { + t.Errorf("account keys don't match: k1 = %#v; k2 = %#v", k1, k2) + } +} + +func TestCache(t *testing.T) { + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + tmpl := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: "example.org"}, + NotAfter: time.Now().Add(time.Hour), + } + pub, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &privKey.PublicKey, privKey) + if err != nil { + t.Fatal(err) + } + tlscert := &tls.Certificate{ + Certificate: [][]byte{pub}, + PrivateKey: privKey, + } + + man := &Manager{Cache: newMemCache()} + defer man.stopRenew() + ctx := context.Background() + if err := man.cachePut(ctx, "example.org", tlscert); err != nil { + t.Fatalf("man.cachePut: %v", err) + } + res, err := man.cacheGet(ctx, "example.org") + if err != nil { + t.Fatalf("man.cacheGet: %v", err) + } + if res == nil { + t.Fatal("res is nil") + } +} + +func TestHostWhitelist(t *testing.T) { + policy := HostWhitelist("example.com", "example.org", "*.example.net") + tt := []struct { + host string + allow bool + }{ + {"example.com", true}, + {"example.org", true}, + {"one.example.com", false}, + {"two.example.org", false}, + {"three.example.net", false}, + {"dummy", false}, + } + for i, test := range tt { + err := policy(nil, test.host) + if err != nil && test.allow { + t.Errorf("%d: policy(%q): %v; want nil", i, test.host, err) + } + if err == nil && !test.allow { + t.Errorf("%d: policy(%q): nil; want an error", i, test.host) + } + } +} + +func TestValidCert(t *testing.T) { + key1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + key2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + key3, err := rsa.GenerateKey(rand.Reader, 512) + if err != nil { + t.Fatal(err) + } + cert1, err := dummyCert(key1.Public(), "example.org") + if err != nil { + t.Fatal(err) + } + cert2, err := dummyCert(key2.Public(), "example.org") + if err != nil { + t.Fatal(err) + } + cert3, err := dummyCert(key3.Public(), "example.org") + if err != nil { + t.Fatal(err) + } + now := time.Now() + early, err := dateDummyCert(key1.Public(), now.Add(time.Hour), now.Add(2*time.Hour), "example.org") + if err != nil { + t.Fatal(err) + } + expired, err := dateDummyCert(key1.Public(), now.Add(-2*time.Hour), now.Add(-time.Hour), "example.org") + if err != nil { + t.Fatal(err) + } + + tt := []struct { + domain string + key crypto.Signer + cert [][]byte + ok bool + }{ + {"example.org", key1, [][]byte{cert1}, true}, + {"example.org", key3, [][]byte{cert3}, true}, + {"example.org", key1, [][]byte{cert1, cert2, cert3}, true}, + {"example.org", key1, [][]byte{cert1, {1}}, false}, + {"example.org", key1, [][]byte{{1}}, false}, + {"example.org", key1, [][]byte{cert2}, false}, + {"example.org", key2, [][]byte{cert1}, false}, + {"example.org", key1, [][]byte{cert3}, false}, + {"example.org", key3, [][]byte{cert1}, false}, + {"example.net", key1, [][]byte{cert1}, false}, + {"example.org", key1, [][]byte{early}, false}, + {"example.org", key1, [][]byte{expired}, false}, + } + for i, test := range tt { + leaf, err := validCert(test.domain, test.cert, test.key) + if err != nil && test.ok { + t.Errorf("%d: err = %v", i, err) + } + if err == nil && !test.ok { + t.Errorf("%d: err is nil", i) + } + if err == nil && test.ok && leaf == nil { + t.Errorf("%d: leaf is nil", i) + } + } +} + +type cacheGetFunc func(ctx context.Context, key string) ([]byte, error) + +func (f cacheGetFunc) Get(ctx context.Context, key string) ([]byte, error) { + return f(ctx, key) +} + +func (f cacheGetFunc) Put(ctx context.Context, key string, data []byte) error { + return fmt.Errorf("unsupported Put of %q = %q", key, data) +} + +func (f cacheGetFunc) Delete(ctx context.Context, key string) error { + return fmt.Errorf("unsupported Delete of %q", key) +} + +func TestManagerGetCertificateBogusSNI(t *testing.T) { + m := Manager{ + Prompt: AcceptTOS, + Cache: cacheGetFunc(func(ctx context.Context, key string) ([]byte, error) { + return nil, fmt.Errorf("cache.Get of %s", key) + }), + } + tests := []struct { + name string + wantErr string + }{ + {"foo.com", "cache.Get of foo.com"}, + {"foo.com.", "cache.Get of foo.com"}, + {`a\b.com`, "acme/autocert: server name contains invalid character"}, + {`a/b.com`, "acme/autocert: server name contains invalid character"}, + {"", "acme/autocert: missing server name"}, + {"foo", "acme/autocert: server name component count invalid"}, + {".foo", "acme/autocert: server name component count invalid"}, + {"foo.", "acme/autocert: server name component count invalid"}, + {"fo.o", "cache.Get of fo.o"}, + } + for _, tt := range tests { + _, err := m.GetCertificate(&tls.ClientHelloInfo{ServerName: tt.name}) + got := fmt.Sprint(err) + if got != tt.wantErr { + t.Errorf("GetCertificate(SNI = %q) = %q; want %q", tt.name, got, tt.wantErr) + } + } +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache.go b/vendor/golang.org/x/crypto/acme/autocert/cache.go new file mode 100644 index 0000000..61a5fd2 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/cache.go @@ -0,0 +1,130 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "errors" + "io/ioutil" + "os" + "path/filepath" +) + +// ErrCacheMiss is returned when a certificate is not found in cache. +var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") + +// Cache is used by Manager to store and retrieve previously obtained certificates +// as opaque data. +// +// The key argument of the methods refers to a domain name but need not be an FQDN. +// Cache implementations should not rely on the key naming pattern. +type Cache interface { + // Get returns a certificate data for the specified key. + // If there's no such key, Get returns ErrCacheMiss. + Get(ctx context.Context, key string) ([]byte, error) + + // Put stores the data in the cache under the specified key. + // Underlying implementations may use any data storage format, + // as long as the reverse operation, Get, results in the original data. + Put(ctx context.Context, key string, data []byte) error + + // Delete removes a certificate data from the cache under the specified key. + // If there's no such key in the cache, Delete returns nil. + Delete(ctx context.Context, key string) error +} + +// DirCache implements Cache using a directory on the local filesystem. +// If the directory does not exist, it will be created with 0700 permissions. +type DirCache string + +// Get reads a certificate data from the specified file name. +func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { + name = filepath.Join(string(d), name) + var ( + data []byte + err error + done = make(chan struct{}) + ) + go func() { + data, err = ioutil.ReadFile(name) + close(done) + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-done: + } + if os.IsNotExist(err) { + return nil, ErrCacheMiss + } + return data, err +} + +// Put writes the certificate data to the specified file name. +// The file will be created with 0600 permissions. +func (d DirCache) Put(ctx context.Context, name string, data []byte) error { + if err := os.MkdirAll(string(d), 0700); err != nil { + return err + } + + done := make(chan struct{}) + var err error + go func() { + defer close(done) + var tmp string + if tmp, err = d.writeTempFile(name, data); err != nil { + return + } + select { + case <-ctx.Done(): + // Don't overwrite the file if the context was canceled. + default: + newName := filepath.Join(string(d), name) + err = os.Rename(tmp, newName) + } + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + return err +} + +// Delete removes the specified file name. +func (d DirCache) Delete(ctx context.Context, name string) error { + name = filepath.Join(string(d), name) + var ( + err error + done = make(chan struct{}) + ) + go func() { + err = os.Remove(name) + close(done) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// writeTempFile writes b to a temporary file, closes the file and returns its path. +func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) { + // TempFile uses 0600 permissions + f, err := ioutil.TempFile(string(d), prefix) + if err != nil { + return "", err + } + if _, err := f.Write(b); err != nil { + f.Close() + return "", err + } + return f.Name(), f.Close() +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache_test.go b/vendor/golang.org/x/crypto/acme/autocert/cache_test.go new file mode 100644 index 0000000..653b05b --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/cache_test.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" +) + +// make sure DirCache satisfies Cache interface +var _ Cache = DirCache("/") + +func TestDirCache(t *testing.T) { + dir, err := ioutil.TempDir("", "autocert") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + dir = filepath.Join(dir, "certs") // a nonexistent dir + cache := DirCache(dir) + ctx := context.Background() + + // test cache miss + if _, err := cache.Get(ctx, "nonexistent"); err != ErrCacheMiss { + t.Errorf("get: %v; want ErrCacheMiss", err) + } + + // test put/get + b1 := []byte{1} + if err := cache.Put(ctx, "dummy", b1); err != nil { + t.Fatalf("put: %v", err) + } + b2, err := cache.Get(ctx, "dummy") + if err != nil { + t.Fatalf("get: %v", err) + } + if !reflect.DeepEqual(b1, b2) { + t.Errorf("b1 = %v; want %v", b1, b2) + } + name := filepath.Join(dir, "dummy") + if _, err := os.Stat(name); err != nil { + t.Error(err) + } + + // test delete + if err := cache.Delete(ctx, "dummy"); err != nil { + t.Fatalf("delete: %v", err) + } + if _, err := cache.Get(ctx, "dummy"); err != ErrCacheMiss { + t.Errorf("get: %v; want ErrCacheMiss", err) + } +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/example_test.go b/vendor/golang.org/x/crypto/acme/autocert/example_test.go new file mode 100644 index 0000000..552a625 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/example_test.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert_test + +import ( + "crypto/tls" + "fmt" + "log" + "net/http" + + "golang.org/x/crypto/acme/autocert" +) + +func ExampleNewListener() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, TLS user! Your config: %+v", r.TLS) + }) + log.Fatal(http.Serve(autocert.NewListener("example.com"), mux)) +} + +func ExampleManager() { + m := &autocert.Manager{ + Cache: autocert.DirCache("secret-dir"), + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("example.org"), + } + go http.ListenAndServe(":http", m.HTTPHandler(nil)) + s := &http.Server{ + Addr: ":https", + TLSConfig: &tls.Config{GetCertificate: m.GetCertificate}, + } + s.ListenAndServeTLS("", "") +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go new file mode 100644 index 0000000..d744df0 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/listener.go @@ -0,0 +1,160 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto/tls" + "log" + "net" + "os" + "path/filepath" + "runtime" + "time" +) + +// NewListener returns a net.Listener that listens on the standard TLS +// port (443) on all interfaces and returns *tls.Conn connections with +// LetsEncrypt certificates for the provided domain or domains. +// +// It enables one-line HTTPS servers: +// +// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler)) +// +// NewListener is a convenience function for a common configuration. +// More complex or custom configurations can use the autocert.Manager +// type instead. +// +// Use of this function implies acceptance of the LetsEncrypt Terms of +// Service. If domains is not empty, the provided domains are passed +// to HostWhitelist. If domains is empty, the listener will do +// LetsEncrypt challenges for any requested domain, which is not +// recommended. +// +// Certificates are cached in a "golang-autocert" directory under an +// operating system-specific cache or temp directory. This may not +// be suitable for servers spanning multiple machines. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +func NewListener(domains ...string) net.Listener { + m := &Manager{ + Prompt: AcceptTOS, + } + if len(domains) > 0 { + m.HostPolicy = HostWhitelist(domains...) + } + dir := cacheDir() + if err := os.MkdirAll(dir, 0700); err != nil { + log.Printf("warning: autocert.NewListener not using a cache: %v", err) + } else { + m.Cache = DirCache(dir) + } + return m.Listener() +} + +// Listener listens on the standard TLS port (443) on all interfaces +// and returns a net.Listener returning *tls.Conn connections. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +// +// Unlike NewListener, it is the caller's responsibility to initialize +// the Manager m's Prompt, Cache, HostPolicy, and other desired options. +func (m *Manager) Listener() net.Listener { + ln := &listener{ + m: m, + conf: &tls.Config{ + GetCertificate: m.GetCertificate, // bonus: panic on nil m + NextProtos: []string{"h2", "http/1.1"}, // Enable HTTP/2 + }, + } + ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443") + return ln +} + +type listener struct { + m *Manager + conf *tls.Config + + tcpListener net.Listener + tcpListenErr error +} + +func (ln *listener) Accept() (net.Conn, error) { + if ln.tcpListenErr != nil { + return nil, ln.tcpListenErr + } + conn, err := ln.tcpListener.Accept() + if err != nil { + return nil, err + } + tcpConn := conn.(*net.TCPConn) + + // Because Listener is a convenience function, help out with + // this too. This is not possible for the caller to set once + // we return a *tcp.Conn wrapping an inaccessible net.Conn. + // If callers don't want this, they can do things the manual + // way and tweak as needed. But this is what net/http does + // itself, so copy that. If net/http changes, we can change + // here too. + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(3 * time.Minute) + + return tls.Server(tcpConn, ln.conf), nil +} + +func (ln *listener) Addr() net.Addr { + if ln.tcpListener != nil { + return ln.tcpListener.Addr() + } + // net.Listen failed. Return something non-nil in case callers + // call Addr before Accept: + return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443} +} + +func (ln *listener) Close() error { + if ln.tcpListenErr != nil { + return ln.tcpListenErr + } + return ln.tcpListener.Close() +} + +func homeDir() string { + if runtime.GOOS == "windows" { + return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + if h := os.Getenv("HOME"); h != "" { + return h + } + return "/" +} + +func cacheDir() string { + const base = "golang-autocert" + switch runtime.GOOS { + case "darwin": + return filepath.Join(homeDir(), "Library", "Caches", base) + case "windows": + for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { + if v := os.Getenv(ev); v != "" { + return filepath.Join(v, base) + } + } + // Worst case: + return filepath.Join(homeDir(), base) + } + if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { + return filepath.Join(xdg, base) + } + return filepath.Join(homeDir(), ".cache", base) +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go new file mode 100644 index 0000000..2a3a0a7 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "crypto" + "sync" + "time" +) + +// renewJitter is the maximum deviation from Manager.RenewBefore. +const renewJitter = time.Hour + +// domainRenewal tracks the state used by the periodic timers +// renewing a single domain's cert. +type domainRenewal struct { + m *Manager + domain string + key crypto.Signer + + timerMu sync.Mutex + timer *time.Timer +} + +// start starts a cert renewal timer at the time +// defined by the certificate expiration time exp. +// +// If the timer is already started, calling start is a noop. +func (dr *domainRenewal) start(exp time.Time) { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer != nil { + return + } + dr.timer = time.AfterFunc(dr.next(exp), dr.renew) +} + +// stop stops the cert renewal timer. +// If the timer is already stopped, calling stop is a noop. +func (dr *domainRenewal) stop() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + dr.timer.Stop() + dr.timer = nil +} + +// renew is called periodically by a timer. +// The first renew call is kicked off by dr.start. +func (dr *domainRenewal) renew() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // TODO: rotate dr.key at some point? + next, err := dr.do(ctx) + if err != nil { + next = renewJitter / 2 + next += time.Duration(pseudoRand.int63n(int64(next))) + } + dr.timer = time.AfterFunc(next, dr.renew) + testDidRenewLoop(next, err) +} + +// do is similar to Manager.createCert but it doesn't lock a Manager.state item. +// Instead, it requests a new certificate independently and, upon success, +// replaces dr.m.state item with a new one and updates cache for the given domain. +// +// It may return immediately if the expiration date of the currently cached cert +// is far enough in the future. +// +// The returned value is a time interval after which the renewal should occur again. +func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { + // a race is likely unavoidable in a distributed environment + // but we try nonetheless + if tlscert, err := dr.m.cacheGet(ctx, dr.domain); err == nil { + next := dr.next(tlscert.Leaf.NotAfter) + if next > dr.m.renewBefore()+renewJitter { + return next, nil + } + } + + der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.domain) + if err != nil { + return 0, err + } + state := &certState{ + key: dr.key, + cert: der, + leaf: leaf, + } + tlscert, err := state.tlscert() + if err != nil { + return 0, err + } + if err := dr.m.cachePut(ctx, dr.domain, tlscert); err != nil { + return 0, err + } + dr.m.stateMu.Lock() + defer dr.m.stateMu.Unlock() + // m.state is guaranteed to be non-nil at this point + dr.m.state[dr.domain] = state + return dr.next(leaf.NotAfter), nil +} + +func (dr *domainRenewal) next(expiry time.Time) time.Duration { + d := expiry.Sub(timeNow()) - dr.m.renewBefore() + // add a bit of randomness to renew deadline + n := pseudoRand.int63n(int64(renewJitter)) + d -= time.Duration(n) + if d < 0 { + return 0 + } + return d +} + +var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go b/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go new file mode 100644 index 0000000..11d40ff --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go @@ -0,0 +1,191 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "golang.org/x/crypto/acme" +) + +func TestRenewalNext(t *testing.T) { + now := time.Now() + timeNow = func() time.Time { return now } + defer func() { timeNow = time.Now }() + + man := &Manager{RenewBefore: 7 * 24 * time.Hour} + defer man.stopRenew() + tt := []struct { + expiry time.Time + min, max time.Duration + }{ + {now.Add(90 * 24 * time.Hour), 83*24*time.Hour - renewJitter, 83 * 24 * time.Hour}, + {now.Add(time.Hour), 0, 1}, + {now, 0, 1}, + {now.Add(-time.Hour), 0, 1}, + } + + dr := &domainRenewal{m: man} + for i, test := range tt { + next := dr.next(test.expiry) + if next < test.min || test.max < next { + t.Errorf("%d: next = %v; want between %v and %v", i, next, test.min, test.max) + } + } +} + +func TestRenewFromCache(t *testing.T) { + const domain = "example.org" + + // ACME CA server stub + var ca *httptest.Server + ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", "nonce") + if r.Method == "HEAD" { + // a nonce request + return + } + + switch r.URL.Path { + // discovery + case "/": + if err := discoTmpl.Execute(w, ca.URL); err != nil { + t.Fatalf("discoTmpl: %v", err) + } + // client key registration + case "/new-reg": + w.Write([]byte("{}")) + // domain authorization + case "/new-authz": + w.Header().Set("Location", ca.URL+"/authz/1") + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status": "valid"}`)) + // cert request + case "/new-cert": + var req struct { + CSR string `json:"csr"` + } + decodePayload(&req, r.Body) + b, _ := base64.RawURLEncoding.DecodeString(req.CSR) + csr, err := x509.ParseCertificateRequest(b) + if err != nil { + t.Fatalf("new-cert: CSR: %v", err) + } + der, err := dummyCert(csr.PublicKey, domain) + if err != nil { + t.Fatalf("new-cert: dummyCert: %v", err) + } + chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL) + w.Header().Set("Link", chainUp) + w.WriteHeader(http.StatusCreated) + w.Write(der) + // CA chain cert + case "/ca-cert": + der, err := dummyCert(nil, "ca") + if err != nil { + t.Fatalf("ca-cert: dummyCert: %v", err) + } + w.Write(der) + default: + t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path) + } + })) + defer ca.Close() + + // use EC key to run faster on 386 + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + man := &Manager{ + Prompt: AcceptTOS, + Cache: newMemCache(), + RenewBefore: 24 * time.Hour, + Client: &acme.Client{ + Key: key, + DirectoryURL: ca.URL, + }, + } + defer man.stopRenew() + + // cache an almost expired cert + now := time.Now() + cert, err := dateDummyCert(key.Public(), now.Add(-2*time.Hour), now.Add(time.Minute), domain) + if err != nil { + t.Fatal(err) + } + tlscert := &tls.Certificate{PrivateKey: key, Certificate: [][]byte{cert}} + if err := man.cachePut(context.Background(), domain, tlscert); err != nil { + t.Fatal(err) + } + + // veriy the renewal happened + defer func() { + testDidRenewLoop = func(next time.Duration, err error) {} + }() + done := make(chan struct{}) + testDidRenewLoop = func(next time.Duration, err error) { + defer close(done) + if err != nil { + t.Errorf("testDidRenewLoop: %v", err) + } + // Next should be about 90 days: + // dummyCert creates 90days expiry + account for man.RenewBefore. + // Previous expiration was within 1 min. + future := 88 * 24 * time.Hour + if next < future { + t.Errorf("testDidRenewLoop: next = %v; want >= %v", next, future) + } + + // ensure the new cert is cached + after := time.Now().Add(future) + tlscert, err := man.cacheGet(context.Background(), domain) + if err != nil { + t.Fatalf("man.cacheGet: %v", err) + } + if !tlscert.Leaf.NotAfter.After(after) { + t.Errorf("cache leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after) + } + + // verify the old cert is also replaced in memory + man.stateMu.Lock() + defer man.stateMu.Unlock() + s := man.state[domain] + if s == nil { + t.Fatalf("m.state[%q] is nil", domain) + } + tlscert, err = s.tlscert() + if err != nil { + t.Fatalf("s.tlscert: %v", err) + } + if !tlscert.Leaf.NotAfter.After(after) { + t.Errorf("state leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after) + } + } + + // trigger renew + hello := &tls.ClientHelloInfo{ServerName: domain} + if _, err := man.GetCertificate(hello); err != nil { + t.Fatal(err) + } + + // wait for renew loop + select { + case <-time.After(10 * time.Second): + t.Fatal("renew took too long to occur") + case <-done: + } +} diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go new file mode 100644 index 0000000..6cbca25 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/jws.go @@ -0,0 +1,153 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/base64" + "encoding/json" + "fmt" + "math/big" +) + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format. +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) { + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + alg, sha := jwsHasher(key) + if alg == "" || !sha.Available() { + return nil, ErrUnsupportedKey + } + phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce) + phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload := base64.RawURLEncoding.EncodeToString(cs) + hash := sha.New() + hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + + enc := struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` + }{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", ErrUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// It returns ErrUnsupportedKey if the key type is unknown. +// The hash is used only for RSA keys. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + switch key := key.(type) { + case *rsa.PrivateKey: + return key.Sign(rand.Reader, digest, hash) + case *ecdsa.PrivateKey: + r, s, err := ecdsa.Sign(rand.Reader, key, digest) + if err != nil { + return nil, err + } + rb, sb := r.Bytes(), s.Bytes() + size := key.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return nil, ErrUnsupportedKey +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(key crypto.Signer) (string, crypto.Hash) { + switch key := key.(type) { + case *rsa.PrivateKey: + return "RS256", crypto.SHA256 + case *ecdsa.PrivateKey: + switch key.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-521": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff --git a/vendor/golang.org/x/crypto/acme/jws_test.go b/vendor/golang.org/x/crypto/acme/jws_test.go new file mode 100644 index 0000000..0ff0fb5 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/jws_test.go @@ -0,0 +1,319 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + "testing" +) + +const ( + testKeyPEM = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA4xgZ3eRPkwoRvy7qeRUbmMDe0V+xH9eWLdu0iheeLlrmD2mq +WXfP9IeSKApbn34g8TuAS9g5zhq8ELQ3kmjr+KV86GAMgI6VAcGlq3QrzpTCf/30 +Ab7+zawrfRaFONa1HwEzPY1KHnGVkxJc85gNkwYI9SY2RHXtvln3zs5wITNrdosq +EXeaIkVYBEhbhNu54pp3kxo6TuWLi9e6pXeWetEwmlBwtWZlPoib2j3TxLBksKZf +oyFyek380mHgJAumQ/I2fjj98/97mk3ihOY4AgVdCDj1z/GCoZkG5Rq7nbCGyosy +KWyDX00Zs+nNqVhoLeIvXC4nnWdJMZ6rogxyQQIDAQABAoIBACIEZTOI1Kao9nmV +9IeIsuaR1Y61b9neOF/MLmIVIZu+AAJFCMB4Iw11FV6sFodwpEyeZhx2WkpWVN+H +r19eGiLX3zsL0DOdqBJoSIHDWCCMxgnYJ6nvS0nRxX3qVrBp8R2g12Ub+gNPbmFm +ecf/eeERIVxfifd9VsyRu34eDEvcmKFuLYbElFcPh62xE3x12UZvV/sN7gXbawpP +G+w255vbE5MoaKdnnO83cTFlcHvhn24M/78qP7Te5OAeelr1R89kYxQLpuGe4fbS +zc6E3ym5Td6urDetGGrSY1Eu10/8sMusX+KNWkm+RsBRbkyKq72ks/qKpOxOa+c6 +9gm+Y8ECgYEA/iNUyg1ubRdH11p82l8KHtFC1DPE0V1gSZsX29TpM5jS4qv46K+s +8Ym1zmrORM8x+cynfPx1VQZQ34EYeCMIX212ryJ+zDATl4NE0I4muMvSiH9vx6Xc +7FmhNnaYzPsBL5Tm9nmtQuP09YEn8poiOJFiDs/4olnD5ogA5O4THGkCgYEA5MIL +qWYBUuqbEWLRtMruUtpASclrBqNNsJEsMGbeqBJmoMxdHeSZckbLOrqm7GlMyNRJ +Ne/5uWRGSzaMYuGmwsPpERzqEvYFnSrpjW5YtXZ+JtxFXNVfm9Z1gLLgvGpOUCIU +RbpoDckDe1vgUuk3y5+DjZihs+rqIJ45XzXTzBkCgYBWuf3segruJZy5rEKhTv+o +JqeUvRn0jNYYKFpLBeyTVBrbie6GkbUGNIWbrK05pC+c3K9nosvzuRUOQQL1tJbd +4gA3oiD9U4bMFNr+BRTHyZ7OQBcIXdz3t1qhuHVKtnngIAN1p25uPlbRFUNpshnt +jgeVoHlsBhApcs5DUc+pyQKBgDzeHPg/+g4z+nrPznjKnktRY1W+0El93kgi+J0Q +YiJacxBKEGTJ1MKBb8X6sDurcRDm22wMpGfd9I5Cv2v4GsUsF7HD/cx5xdih+G73 +c4clNj/k0Ff5Nm1izPUno4C+0IOl7br39IPmfpSuR6wH/h6iHQDqIeybjxyKvT1G +N0rRAoGBAKGD+4ZI/E1MoJ5CXB8cDDMHagbE3cq/DtmYzE2v1DFpQYu5I4PCm5c7 +EQeIP6dZtv8IMgtGIb91QX9pXvP0aznzQKwYIA8nZgoENCPfiMTPiEDT9e/0lObO +9XWsXpbSTsRPj0sv1rB+UzBJ0PgjK4q2zOF0sNo7b1+6nlM3BWPx +-----END RSA PRIVATE KEY----- +` + + // This thumbprint is for the testKey defined above. + testKeyThumbprint = "6nicxzh6WETQlrvdchkz-U3e3DOQZ4heJKU63rfqMqQ" + + // openssl ecparam -name secp256k1 -genkey -noout + testKeyECPEM = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIK07hGLr0RwyUdYJ8wbIiBS55CjnkMD23DWr+ccnypWLoAoGCCqGSM49 +AwEHoUQDQgAE5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HThqIrvawF5 +QAaS/RNouybCiRhRjI3EaxLkQwgrCw0gqQ== +-----END EC PRIVATE KEY----- +` + // openssl ecparam -name secp384r1 -genkey -noout + testKeyEC384PEM = ` +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDAQ4lNtXRORWr1bgKR1CGysr9AJ9SyEk4jiVnlUWWUChmSNL+i9SLSD +Oe/naPqXJ6CgBwYFK4EEACKhZANiAAQzKtj+Ms0vHoTX5dzv3/L5YMXOWuI5UKRj +JigpahYCqXD2BA1j0E/2xt5vlPf+gm0PL+UHSQsCokGnIGuaHCsJAp3ry0gHQEke +WYXapUUFdvaK1R2/2hn5O+eiQM8YzCg= +-----END EC PRIVATE KEY----- +` + // openssl ecparam -name secp521r1 -genkey -noout + testKeyEC512PEM = ` +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBSNZKFcWzXzB/aJClAb305ibalKgtDA7+70eEkdPt28/3LZMM935Z +KqYHh/COcxuu3Kt8azRAUz3gyr4zZKhlKUSgBwYFK4EEACOhgYkDgYYABAHUNKbx +7JwC7H6pa2sV0tERWhHhB3JmW+OP6SUgMWryvIKajlx73eS24dy4QPGrWO9/ABsD +FqcRSkNVTXnIv6+0mAF25knqIBIg5Q8M9BnOu9GGAchcwt3O7RDHmqewnJJDrbjd +GGnm6rb+NnWR9DIopM0nKNkToWoF/hzopxu4Ae/GsQ== +-----END EC PRIVATE KEY----- +` + // 1. openssl ec -in key.pem -noout -text + // 2. remove first byte, 04 (the header); the rest is X and Y + // 3. convert each with: echo | xxd -r -p | base64 -w 100 | tr -d '=' | tr '/+' '_-' + testKeyECPubX = "5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HQ" + testKeyECPubY = "4aiK72sBeUAGkv0TaLsmwokYUYyNxGsS5EMIKwsNIKk" + testKeyEC384PubX = "MyrY_jLNLx6E1-Xc79_y-WDFzlriOVCkYyYoKWoWAqlw9gQNY9BP9sbeb5T3_oJt" + testKeyEC384PubY = "Dy_lB0kLAqJBpyBrmhwrCQKd68tIB0BJHlmF2qVFBXb2itUdv9oZ-TvnokDPGMwo" + testKeyEC512PubX = "AdQ0pvHsnALsfqlraxXS0RFaEeEHcmZb44_pJSAxavK8gpqOXHvd5Lbh3LhA8atY738AGwMWpxFKQ1VNeci_r7SY" + testKeyEC512PubY = "AXbmSeogEiDlDwz0Gc670YYByFzC3c7tEMeap7CckkOtuN0Yaebqtv42dZH0MiikzSco2ROhagX-HOinG7gB78ax" + + // echo -n '{"crv":"P-256","kty":"EC","x":"","y":""}' | \ + // openssl dgst -binary -sha256 | base64 | tr -d '=' | tr '/+' '_-' + testKeyECThumbprint = "zedj-Bd1Zshp8KLePv2MB-lJ_Hagp7wAwdkA0NUTniU" +) + +var ( + testKey *rsa.PrivateKey + testKeyEC *ecdsa.PrivateKey + testKeyEC384 *ecdsa.PrivateKey + testKeyEC512 *ecdsa.PrivateKey +) + +func init() { + testKey = parseRSA(testKeyPEM, "testKeyPEM") + testKeyEC = parseEC(testKeyECPEM, "testKeyECPEM") + testKeyEC384 = parseEC(testKeyEC384PEM, "testKeyEC384PEM") + testKeyEC512 = parseEC(testKeyEC512PEM, "testKeyEC512PEM") +} + +func decodePEM(s, name string) []byte { + d, _ := pem.Decode([]byte(s)) + if d == nil { + panic("no block found in " + name) + } + return d.Bytes +} + +func parseRSA(s, name string) *rsa.PrivateKey { + b := decodePEM(s, name) + k, err := x509.ParsePKCS1PrivateKey(b) + if err != nil { + panic(fmt.Sprintf("%s: %v", name, err)) + } + return k +} + +func parseEC(s, name string) *ecdsa.PrivateKey { + b := decodePEM(s, name) + k, err := x509.ParseECPrivateKey(b) + if err != nil { + panic(fmt.Sprintf("%s: %v", name, err)) + } + return k +} + +func TestJWSEncodeJSON(t *testing.T) { + claims := struct{ Msg string }{"Hello JWS"} + // JWS signed with testKey and "nonce" as the nonce value + // JSON-serialized JWS fields are split for easier testing + const ( + // {"alg":"RS256","jwk":{"e":"AQAB","kty":"RSA","n":"..."},"nonce":"nonce"} + protected = "eyJhbGciOiJSUzI1NiIsImp3ayI6eyJlIjoiQVFBQiIsImt0eSI6" + + "IlJTQSIsIm4iOiI0eGdaM2VSUGt3b1J2eTdxZVJVYm1NRGUwVi14" + + "SDllV0xkdTBpaGVlTGxybUQybXFXWGZQOUllU0tBcGJuMzRnOFR1" + + "QVM5ZzV6aHE4RUxRM2ttanItS1Y4NkdBTWdJNlZBY0dscTNRcnpw" + + "VENmXzMwQWI3LXphd3JmUmFGT05hMUh3RXpQWTFLSG5HVmt4SmM4" + + "NWdOa3dZSTlTWTJSSFh0dmxuM3pzNXdJVE5yZG9zcUVYZWFJa1ZZ" + + "QkVoYmhOdTU0cHAza3hvNlR1V0xpOWU2cFhlV2V0RXdtbEJ3dFda" + + "bFBvaWIyajNUeExCa3NLWmZveUZ5ZWszODBtSGdKQXVtUV9JMmZq" + + "ajk4Xzk3bWszaWhPWTRBZ1ZkQ0RqMXpfR0NvWmtHNVJxN25iQ0d5" + + "b3N5S1d5RFgwMFpzLW5OcVZob0xlSXZYQzRubldkSk1aNnJvZ3h5" + + "UVEifSwibm9uY2UiOiJub25jZSJ9" + // {"Msg":"Hello JWS"} + payload = "eyJNc2ciOiJIZWxsbyBKV1MifQ" + signature = "eAGUikStX_UxyiFhxSLMyuyBcIB80GeBkFROCpap2sW3EmkU_ggF" + + "knaQzxrTfItICSAXsCLIquZ5BbrSWA_4vdEYrwWtdUj7NqFKjHRa" + + "zpLHcoR7r1rEHvkoP1xj49lS5fc3Wjjq8JUhffkhGbWZ8ZVkgPdC" + + "4tMBWiQDoth-x8jELP_3LYOB_ScUXi2mETBawLgOT2K8rA0Vbbmx" + + "hWNlOWuUf-8hL5YX4IOEwsS8JK_TrTq5Zc9My0zHJmaieqDV0UlP" + + "k0onFjPFkGm7MrPSgd0MqRG-4vSAg2O4hDo7rKv4n8POjjXlNQvM" + + "9IPLr8qZ7usYBKhEGwX3yq_eicAwBw" + ) + + b, err := jwsEncodeJSON(claims, testKey, "nonce") + if err != nil { + t.Fatal(err) + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Fatal(err) + } + if jws.Protected != protected { + t.Errorf("protected:\n%s\nwant:\n%s", jws.Protected, protected) + } + if jws.Payload != payload { + t.Errorf("payload:\n%s\nwant:\n%s", jws.Payload, payload) + } + if jws.Signature != signature { + t.Errorf("signature:\n%s\nwant:\n%s", jws.Signature, signature) + } +} + +func TestJWSEncodeJSONEC(t *testing.T) { + tt := []struct { + key *ecdsa.PrivateKey + x, y string + alg, crv string + }{ + {testKeyEC, testKeyECPubX, testKeyECPubY, "ES256", "P-256"}, + {testKeyEC384, testKeyEC384PubX, testKeyEC384PubY, "ES384", "P-384"}, + {testKeyEC512, testKeyEC512PubX, testKeyEC512PubY, "ES512", "P-521"}, + } + for i, test := range tt { + claims := struct{ Msg string }{"Hello JWS"} + b, err := jwsEncodeJSON(claims, test.key, "nonce") + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Errorf("%d: %v", i, err) + continue + } + + b, err = base64.RawURLEncoding.DecodeString(jws.Protected) + if err != nil { + t.Errorf("%d: jws.Protected: %v", i, err) + } + var head struct { + Alg string + Nonce string + JWK struct { + Crv string + Kty string + X string + Y string + } `json:"jwk"` + } + if err := json.Unmarshal(b, &head); err != nil { + t.Errorf("%d: jws.Protected: %v", i, err) + } + if head.Alg != test.alg { + t.Errorf("%d: head.Alg = %q; want %q", i, head.Alg, test.alg) + } + if head.Nonce != "nonce" { + t.Errorf("%d: head.Nonce = %q; want nonce", i, head.Nonce) + } + if head.JWK.Crv != test.crv { + t.Errorf("%d: head.JWK.Crv = %q; want %q", i, head.JWK.Crv, test.crv) + } + if head.JWK.Kty != "EC" { + t.Errorf("%d: head.JWK.Kty = %q; want EC", i, head.JWK.Kty) + } + if head.JWK.X != test.x { + t.Errorf("%d: head.JWK.X = %q; want %q", i, head.JWK.X, test.x) + } + if head.JWK.Y != test.y { + t.Errorf("%d: head.JWK.Y = %q; want %q", i, head.JWK.Y, test.y) + } + } +} + +func TestJWKThumbprintRSA(t *testing.T) { + // Key example from RFC 7638 + const base64N = "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAt" + + "VT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn6" + + "4tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FD" + + "W2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n9" + + "1CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINH" + + "aQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw" + const base64E = "AQAB" + const expected = "NzbLsXh8uDCcd-6MNwXF4W_7noWXFZAfHkxZsRGC9Xs" + + b, err := base64.RawURLEncoding.DecodeString(base64N) + if err != nil { + t.Fatalf("Error parsing example key N: %v", err) + } + n := new(big.Int).SetBytes(b) + + b, err = base64.RawURLEncoding.DecodeString(base64E) + if err != nil { + t.Fatalf("Error parsing example key E: %v", err) + } + e := new(big.Int).SetBytes(b) + + pub := &rsa.PublicKey{N: n, E: int(e.Uint64())} + th, err := JWKThumbprint(pub) + if err != nil { + t.Error(err) + } + if th != expected { + t.Errorf("thumbprint = %q; want %q", th, expected) + } +} + +func TestJWKThumbprintEC(t *testing.T) { + // Key example from RFC 7520 + // expected was computed with + // echo -n '{"crv":"P-521","kty":"EC","x":"","y":""}' | \ + // openssl dgst -binary -sha256 | \ + // base64 | \ + // tr -d '=' | tr '/+' '_-' + const ( + base64X = "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9A5RkT" + + "KqjqvjyekWF-7ytDyRXYgCF5cj0Kt" + base64Y = "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVySsUda" + + "QkAgDPrwQrJmbnX9cwlGfP-HqHZR1" + expected = "dHri3SADZkrush5HU_50AoRhcKFryN-PI6jPBtPL55M" + ) + + b, err := base64.RawURLEncoding.DecodeString(base64X) + if err != nil { + t.Fatalf("Error parsing example key X: %v", err) + } + x := new(big.Int).SetBytes(b) + + b, err = base64.RawURLEncoding.DecodeString(base64Y) + if err != nil { + t.Fatalf("Error parsing example key Y: %v", err) + } + y := new(big.Int).SetBytes(b) + + pub := &ecdsa.PublicKey{Curve: elliptic.P521(), X: x, Y: y} + th, err := JWKThumbprint(pub) + if err != nil { + t.Error(err) + } + if th != expected { + t.Errorf("thumbprint = %q; want %q", th, expected) + } +} + +func TestJWKThumbprintErrUnsupportedKey(t *testing.T) { + _, err := JWKThumbprint(struct{}{}) + if err != ErrUnsupportedKey { + t.Errorf("err = %q; want %q", err, ErrUnsupportedKey) + } +} diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go new file mode 100644 index 0000000..3e19974 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/types.go @@ -0,0 +1,329 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// ACME server response statuses used to describe Authorization and Challenge states. +const ( + StatusUnknown = "unknown" + StatusPending = "pending" + StatusProcessing = "processing" + StatusValid = "valid" + StatusInvalid = "invalid" + StatusRevoked = "revoked" +) + +// CRLReasonCode identifies the reason for a certificate revocation. +type CRLReasonCode int + +// CRL reason codes as defined in RFC 5280. +const ( + CRLReasonUnspecified CRLReasonCode = 0 + CRLReasonKeyCompromise CRLReasonCode = 1 + CRLReasonCACompromise CRLReasonCode = 2 + CRLReasonAffiliationChanged CRLReasonCode = 3 + CRLReasonSuperseded CRLReasonCode = 4 + CRLReasonCessationOfOperation CRLReasonCode = 5 + CRLReasonCertificateHold CRLReasonCode = 6 + CRLReasonRemoveFromCRL CRLReasonCode = 8 + CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 + CRLReasonAACompromise CRLReasonCode = 10 +) + +// ErrUnsupportedKey is returned when an unsupported key type is encountered. +var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") + +// Error is an ACME error, defined in Problem Details for HTTP APIs doc +// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. +type Error struct { + // StatusCode is The HTTP status code generated by the origin server. + StatusCode int + // ProblemType is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + ProblemType string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Header is the original server error response headers. + // It may be nil. + Header http.Header +} + +func (e *Error) Error() string { + return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) +} + +// AuthorizationError indicates that an authorization for an identifier +// did not succeed. +// It contains all errors from Challenge items of the failed Authorization. +type AuthorizationError struct { + // URI uniquely identifies the failed Authorization. + URI string + + // Identifier is an AuthzID.Value of the failed Authorization. + Identifier string + + // Errors is a collection of non-nil error values of Challenge items + // of the failed Authorization. + Errors []error +} + +func (a *AuthorizationError) Error() string { + e := make([]string, len(a.Errors)) + for i, err := range a.Errors { + e[i] = err.Error() + } + return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) +} + +// RateLimit reports whether err represents a rate limit error and +// any Retry-After duration returned by the server. +// +// See the following for more details on rate limiting: +// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 +func RateLimit(err error) (time.Duration, bool) { + e, ok := err.(*Error) + if !ok { + return 0, false + } + // Some CA implementations may return incorrect values. + // Use case-insensitive comparison. + if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { + return 0, false + } + if e.Header == nil { + return 0, true + } + return retryAfter(e.Header.Get("Retry-After"), 0), true +} + +// Account is a user account. It is associated with a private key. +type Account struct { + // URI is the account unique ID, which is also a URL used to retrieve + // account data from the CA. + URI string + + // Contact is a slice of contact info used during registration. + Contact []string + + // The terms user has agreed to. + // A value not matching CurrentTerms indicates that the user hasn't agreed + // to the actual Terms of Service of the CA. + AgreedTerms string + + // Actual terms of a CA. + CurrentTerms string + + // Authz is the authorization URL used to initiate a new authz flow. + Authz string + + // Authorizations is a URI from which a list of authorizations + // granted to this account can be fetched via a GET request. + Authorizations string + + // Certificates is a URI from which a list of certificates + // issued for this account can be fetched via a GET request. + Certificates string +} + +// Directory is ACME server discovery data. +type Directory struct { + // RegURL is an account endpoint URL, allowing for creating new + // and modifying existing accounts. + RegURL string + + // AuthzURL is used to initiate Identifier Authorization flow. + AuthzURL string + + // CertURL is a new certificate issuance endpoint URL. + CertURL string + + // RevokeURL is used to initiate a certificate revocation flow. + RevokeURL string + + // Term is a URI identifying the current terms of service. + Terms string + + // Website is an HTTP or HTTPS URL locating a website + // providing more information about the ACME server. + Website string + + // CAA consists of lowercase hostname elements, which the ACME server + // recognises as referring to itself for the purposes of CAA record validation + // as defined in RFC6844. + CAA []string +} + +// Challenge encodes a returned CA challenge. +// Its Error field may be non-nil if the challenge is part of an Authorization +// with StatusInvalid. +type Challenge struct { + // Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01". + Type string + + // URI is where a challenge response can be posted to. + URI string + + // Token is a random value that uniquely identifies the challenge. + Token string + + // Status identifies the status of this challenge. + Status string + + // Error indicates the reason for an authorization failure + // when this challenge was used. + // The type of a non-nil value is *Error. + Error error +} + +// Authorization encodes an authorization response. +type Authorization struct { + // URI uniquely identifies a authorization. + URI string + + // Status identifies the status of an authorization. + Status string + + // Identifier is what the account is authorized to represent. + Identifier AuthzID + + // Challenges that the client needs to fulfill in order to prove possession + // of the identifier (for pending authorizations). + // For final authorizations, the challenges that were used. + Challenges []*Challenge + + // A collection of sets of challenges, each of which would be sufficient + // to prove possession of the identifier. + // Clients must complete a set of challenges that covers at least one set. + // Challenges are identified by their indices in the challenges array. + // If this field is empty, the client needs to complete all challenges. + Combinations [][]int +} + +// AuthzID is an identifier that an account is authorized to represent. +type AuthzID struct { + Type string // The type of identifier, e.g. "dns". + Value string // The identifier itself, e.g. "example.org". +} + +// wireAuthz is ACME JSON representation of Authorization objects. +type wireAuthz struct { + Status string + Challenges []wireChallenge + Combinations [][]int + Identifier struct { + Type string + Value string + } +} + +func (z *wireAuthz) authorization(uri string) *Authorization { + a := &Authorization{ + URI: uri, + Status: z.Status, + Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, + Combinations: z.Combinations, // shallow copy + Challenges: make([]*Challenge, len(z.Challenges)), + } + for i, v := range z.Challenges { + a.Challenges[i] = v.challenge() + } + return a +} + +func (z *wireAuthz) error(uri string) *AuthorizationError { + err := &AuthorizationError{ + URI: uri, + Identifier: z.Identifier.Value, + } + for _, raw := range z.Challenges { + if raw.Error != nil { + err.Errors = append(err.Errors, raw.Error.error(nil)) + } + } + return err +} + +// wireChallenge is ACME JSON challenge representation. +type wireChallenge struct { + URI string `json:"uri"` + Type string + Token string + Status string + Error *wireError +} + +func (c *wireChallenge) challenge() *Challenge { + v := &Challenge{ + URI: c.URI, + Type: c.Type, + Token: c.Token, + Status: c.Status, + } + if v.Status == "" { + v.Status = StatusPending + } + if c.Error != nil { + v.Error = c.Error.error(nil) + } + return v +} + +// wireError is a subset of fields of the Problem Details object +// as described in https://tools.ietf.org/html/rfc7807#section-3.1. +type wireError struct { + Status int + Type string + Detail string +} + +func (e *wireError) error(h http.Header) *Error { + return &Error{ + StatusCode: e.Status, + ProblemType: e.Type, + Detail: e.Detail, + Header: h, + } +} + +// CertOption is an optional argument type for the TLSSNIxChallengeCert methods for +// customizing a temporary certificate for TLS-SNI challenges. +type CertOption interface { + privateCertOpt() +} + +// WithKey creates an option holding a private/public key pair. +// The private part signs a certificate, and the public part represents the signee. +func WithKey(key crypto.Signer) CertOption { + return &certOptKey{key} +} + +type certOptKey struct { + key crypto.Signer +} + +func (*certOptKey) privateCertOpt() {} + +// WithTemplate creates an option for specifying a certificate template. +// See x509.CreateCertificate for template usage details. +// +// In TLSSNIxChallengeCert methods, the template is also used as parent, +// resulting in a self-signed certificate. +// The DNSNames field of t is always overwritten for tls-sni challenge certs. +func WithTemplate(t *x509.Certificate) CertOption { + return (*certOptTemplate)(t) +} + +type certOptTemplate x509.Certificate + +func (*certOptTemplate) privateCertOpt() {} diff --git a/vendor/golang.org/x/crypto/acme/types_test.go b/vendor/golang.org/x/crypto/acme/types_test.go new file mode 100644 index 0000000..a7553e6 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/types_test.go @@ -0,0 +1,63 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "errors" + "net/http" + "testing" + "time" +) + +func TestRateLimit(t *testing.T) { + now := time.Date(2017, 04, 27, 10, 0, 0, 0, time.UTC) + f := timeNow + defer func() { timeNow = f }() + timeNow = func() time.Time { return now } + + h120, hTime := http.Header{}, http.Header{} + h120.Set("Retry-After", "120") + hTime.Set("Retry-After", "Tue Apr 27 11:00:00 2017") + + err1 := &Error{ + ProblemType: "urn:ietf:params:acme:error:nolimit", + Header: h120, + } + err2 := &Error{ + ProblemType: "urn:ietf:params:acme:error:rateLimited", + Header: h120, + } + err3 := &Error{ + ProblemType: "urn:ietf:params:acme:error:rateLimited", + Header: nil, + } + err4 := &Error{ + ProblemType: "urn:ietf:params:acme:error:rateLimited", + Header: hTime, + } + + tt := []struct { + err error + res time.Duration + ok bool + }{ + {nil, 0, false}, + {errors.New("dummy"), 0, false}, + {err1, 0, false}, + {err2, 2 * time.Minute, true}, + {err3, 0, true}, + {err4, time.Hour, true}, + } + for i, test := range tt { + res, ok := RateLimit(test.err) + if ok != test.ok { + t.Errorf("%d: RateLimit(%+v): ok = %v; want %v", i, test.err, ok, test.ok) + continue + } + if res != test.res { + t.Errorf("%d: RateLimit(%+v) = %v; want %v", i, test.err, res, test.res) + } + } +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 0000000..b423fea --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// +// Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// +// Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2_test.go b/vendor/golang.org/x/crypto/argon2/argon2_test.go new file mode 100644 index 0000000..775b97a --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2_test.go @@ -0,0 +1,233 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +var ( + genKatPassword = []byte{ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + } + genKatSalt = []byte{0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} + genKatSecret = []byte{0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03} + genKatAAD = []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04} +) + +func TestArgon2(t *testing.T) { + defer func(sse4 bool) { useSSE4 = sse4 }(useSSE4) + + if useSSE4 { + t.Log("SSE4.1 version") + testArgon2i(t) + testArgon2d(t) + testArgon2id(t) + useSSE4 = false + } + t.Log("generic version") + testArgon2i(t) + testArgon2d(t) + testArgon2id(t) +} + +func testArgon2d(t *testing.T) { + want := []byte{ + 0x51, 0x2b, 0x39, 0x1b, 0x6f, 0x11, 0x62, 0x97, + 0x53, 0x71, 0xd3, 0x09, 0x19, 0x73, 0x42, 0x94, + 0xf8, 0x68, 0xe3, 0xbe, 0x39, 0x84, 0xf3, 0xc1, + 0xa1, 0x3a, 0x4d, 0xb9, 0xfa, 0xbe, 0x4a, 0xcb, + } + hash := deriveKey(argon2d, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32) + if !bytes.Equal(hash, want) { + t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want)) + } +} + +func testArgon2i(t *testing.T) { + want := []byte{ + 0xc8, 0x14, 0xd9, 0xd1, 0xdc, 0x7f, 0x37, 0xaa, + 0x13, 0xf0, 0xd7, 0x7f, 0x24, 0x94, 0xbd, 0xa1, + 0xc8, 0xde, 0x6b, 0x01, 0x6d, 0xd3, 0x88, 0xd2, + 0x99, 0x52, 0xa4, 0xc4, 0x67, 0x2b, 0x6c, 0xe8, + } + hash := deriveKey(argon2i, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32) + if !bytes.Equal(hash, want) { + t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want)) + } +} + +func testArgon2id(t *testing.T) { + want := []byte{ + 0x0d, 0x64, 0x0d, 0xf5, 0x8d, 0x78, 0x76, 0x6c, + 0x08, 0xc0, 0x37, 0xa3, 0x4a, 0x8b, 0x53, 0xc9, + 0xd0, 0x1e, 0xf0, 0x45, 0x2d, 0x75, 0xb6, 0x5e, + 0xb5, 0x25, 0x20, 0xe9, 0x6b, 0x01, 0xe6, 0x59, + } + hash := deriveKey(argon2id, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32) + if !bytes.Equal(hash, want) { + t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want)) + } +} + +func TestVectors(t *testing.T) { + password, salt := []byte("password"), []byte("somesalt") + for i, v := range testVectors { + want, err := hex.DecodeString(v.hash) + if err != nil { + t.Fatalf("Test %d: failed to decode hash: %v", i, err) + } + hash := deriveKey(v.mode, password, salt, nil, nil, v.time, v.memory, v.threads, uint32(len(want))) + if !bytes.Equal(hash, want) { + t.Errorf("Test %d - got: %s want: %s", i, hex.EncodeToString(hash), hex.EncodeToString(want)) + } + } +} + +func benchmarkArgon2(mode int, time, memory uint32, threads uint8, keyLen uint32, b *testing.B) { + password := []byte("password") + salt := []byte("choosing random salts is hard") + b.ReportAllocs() + for i := 0; i < b.N; i++ { + deriveKey(mode, password, salt, nil, nil, time, memory, threads, keyLen) + } +} + +func BenchmarkArgon2i(b *testing.B) { + b.Run(" Time: 3 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 32*1024, 1, 32, b) }) + b.Run(" Time: 4 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 32*1024, 1, 32, b) }) + b.Run(" Time: 5 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 32*1024, 1, 32, b) }) + b.Run(" Time: 3 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 64*1024, 4, 32, b) }) + b.Run(" Time: 4 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 64*1024, 4, 32, b) }) + b.Run(" Time: 5 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 64*1024, 4, 32, b) }) +} + +func BenchmarkArgon2d(b *testing.B) { + b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 32*1024, 1, 32, b) }) + b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 32*1024, 1, 32, b) }) + b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 32*1024, 1, 32, b) }) + b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 64*1024, 4, 32, b) }) + b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 64*1024, 4, 32, b) }) + b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 64*1024, 4, 32, b) }) +} + +func BenchmarkArgon2id(b *testing.B) { + b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 32*1024, 1, 32, b) }) + b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 32*1024, 1, 32, b) }) + b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 32*1024, 1, 32, b) }) + b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 64*1024, 4, 32, b) }) + b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 64*1024, 4, 32, b) }) + b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 64*1024, 4, 32, b) }) +} + +// Generated with the CLI of https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +var testVectors = []struct { + mode int + time, memory uint32 + threads uint8 + hash string +}{ + { + mode: argon2i, time: 1, memory: 64, threads: 1, + hash: "b9c401d1844a67d50eae3967dc28870b22e508092e861a37", + }, + { + mode: argon2d, time: 1, memory: 64, threads: 1, + hash: "8727405fd07c32c78d64f547f24150d3f2e703a89f981a19", + }, + { + mode: argon2id, time: 1, memory: 64, threads: 1, + hash: "655ad15eac652dc59f7170a7332bf49b8469be1fdb9c28bb", + }, + { + mode: argon2i, time: 2, memory: 64, threads: 1, + hash: "8cf3d8f76a6617afe35fac48eb0b7433a9a670ca4a07ed64", + }, + { + mode: argon2d, time: 2, memory: 64, threads: 1, + hash: "3be9ec79a69b75d3752acb59a1fbb8b295a46529c48fbb75", + }, + { + mode: argon2id, time: 2, memory: 64, threads: 1, + hash: "068d62b26455936aa6ebe60060b0a65870dbfa3ddf8d41f7", + }, + { + mode: argon2i, time: 2, memory: 64, threads: 2, + hash: "2089f3e78a799720f80af806553128f29b132cafe40d059f", + }, + { + mode: argon2d, time: 2, memory: 64, threads: 2, + hash: "68e2462c98b8bc6bb60ec68db418ae2c9ed24fc6748a40e9", + }, + { + mode: argon2id, time: 2, memory: 64, threads: 2, + hash: "350ac37222f436ccb5c0972f1ebd3bf6b958bf2071841362", + }, + { + mode: argon2i, time: 3, memory: 256, threads: 2, + hash: "f5bbf5d4c3836af13193053155b73ec7476a6a2eb93fd5e6", + }, + { + mode: argon2d, time: 3, memory: 256, threads: 2, + hash: "f4f0669218eaf3641f39cc97efb915721102f4b128211ef2", + }, + { + mode: argon2id, time: 3, memory: 256, threads: 2, + hash: "4668d30ac4187e6878eedeacf0fd83c5a0a30db2cc16ef0b", + }, + { + mode: argon2i, time: 4, memory: 4096, threads: 4, + hash: "a11f7b7f3f93f02ad4bddb59ab62d121e278369288a0d0e7", + }, + { + mode: argon2d, time: 4, memory: 4096, threads: 4, + hash: "935598181aa8dc2b720914aa6435ac8d3e3a4210c5b0fb2d", + }, + { + mode: argon2id, time: 4, memory: 4096, threads: 4, + hash: "145db9733a9f4ee43edf33c509be96b934d505a4efb33c5a", + }, + { + mode: argon2i, time: 4, memory: 1024, threads: 8, + hash: "0cdd3956aa35e6b475a7b0c63488822f774f15b43f6e6e17", + }, + { + mode: argon2d, time: 4, memory: 1024, threads: 8, + hash: "83604fc2ad0589b9d055578f4d3cc55bc616df3578a896e9", + }, + { + mode: argon2id, time: 4, memory: 1024, threads: 8, + hash: "8dafa8e004f8ea96bf7c0f93eecf67a6047476143d15577f", + }, + { + mode: argon2i, time: 2, memory: 64, threads: 3, + hash: "5cab452fe6b8479c8661def8cd703b611a3905a6d5477fe6", + }, + { + mode: argon2d, time: 2, memory: 64, threads: 3, + hash: "22474a423bda2ccd36ec9afd5119e5c8949798cadf659f51", + }, + { + mode: argon2id, time: 2, memory: 64, threads: 3, + hash: "4a15b31aec7c2590b87d1f520be7d96f56658172deaa3079", + }, + { + mode: argon2i, time: 3, memory: 1024, threads: 6, + hash: "d236b29c2b2a09babee842b0dec6aa1e83ccbdea8023dced", + }, + { + mode: argon2d, time: 3, memory: 1024, threads: 6, + hash: "a3351b0319a53229152023d9206902f4ef59661cdca89481", + }, + { + mode: argon2id, time: 3, memory: 1024, threads: 6, + hash: "1640b932f4b60e272f5d2207b9a9c626ffa1bd88d2349016", + }, +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 0000000..10f4694 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 0000000..bb2b0d8 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,61 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package argon2 + +func init() { + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 0000000..8a83f7c --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,252 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func supportsSSE4() bool +TEXT ·supportsSSE4(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $19, CX // Bit 19 indicates SSE4 support + ANDL $1, CX // CX != 0 if support SSE4 + MOVB CX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 0000000..a481b22 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 0000000..baf7b55 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 0000000..fc31160 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 0000000..aeb73f8 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,295 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go new file mode 100644 index 0000000..aecf759 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go @@ -0,0 +1,243 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import ( + "bytes" + "fmt" + "testing" +) + +func TestBcryptingIsEasy(t *testing.T) { + pass := []byte("mypassword") + hp, err := GenerateFromPassword(pass, 0) + if err != nil { + t.Fatalf("GenerateFromPassword error: %s", err) + } + + if CompareHashAndPassword(hp, pass) != nil { + t.Errorf("%v should hash %s correctly", hp, pass) + } + + notPass := "notthepass" + err = CompareHashAndPassword(hp, []byte(notPass)) + if err != ErrMismatchedHashAndPassword { + t.Errorf("%v and %s should be mismatched", hp, notPass) + } +} + +func TestBcryptingIsCorrect(t *testing.T) { + pass := []byte("allmine") + salt := []byte("XajjQvNhvvRt5GSeFk1xFe") + expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga") + + hash, err := bcrypt(pass, 10, salt) + if err != nil { + t.Fatalf("bcrypt blew up: %v", err) + } + if !bytes.HasSuffix(expectedHash, hash) { + t.Errorf("%v should be the suffix of %v", hash, expectedHash) + } + + h, err := newFromHash(expectedHash) + if err != nil { + t.Errorf("Unable to parse %s: %v", string(expectedHash), err) + } + + // This is not the safe way to compare these hashes. We do this only for + // testing clarity. Use bcrypt.CompareHashAndPassword() + if err == nil && !bytes.Equal(expectedHash, h.Hash()) { + t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash) + } +} + +func TestVeryShortPasswords(t *testing.T) { + key := []byte("k") + salt := []byte("XajjQvNhvvRt5GSeFk1xFe") + _, err := bcrypt(key, 10, salt) + if err != nil { + t.Errorf("One byte key resulted in error: %s", err) + } +} + +func TestTooLongPasswordsWork(t *testing.T) { + salt := []byte("XajjQvNhvvRt5GSeFk1xFe") + // One byte over the usual 56 byte limit that blowfish has + tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456") + tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C") + hash, err := bcrypt(tooLongPass, 10, salt) + if err != nil { + t.Fatalf("bcrypt blew up on long password: %v", err) + } + if !bytes.HasSuffix(tooLongExpected, hash) { + t.Errorf("%v should be the suffix of %v", hash, tooLongExpected) + } +} + +type InvalidHashTest struct { + err error + hash []byte +} + +var invalidTests = []InvalidHashTest{ + {ErrHashTooShort, []byte("$2a$10$fooo")}, + {ErrHashTooShort, []byte("$2a")}, + {HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, + {InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, + {InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, +} + +func TestInvalidHashErrors(t *testing.T) { + check := func(name string, expected, err error) { + if err == nil { + t.Errorf("%s: Should have returned an error", name) + } + if err != nil && err != expected { + t.Errorf("%s gave err %v but should have given %v", name, err, expected) + } + } + for _, iht := range invalidTests { + _, err := newFromHash(iht.hash) + check("newFromHash", iht.err, err) + err = CompareHashAndPassword(iht.hash, []byte("anything")) + check("CompareHashAndPassword", iht.err, err) + } +} + +func TestUnpaddedBase64Encoding(t *testing.T) { + original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30} + encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe") + + encoded := base64Encode(original) + + if !bytes.Equal(encodedOriginal, encoded) { + t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal) + } + + decoded, err := base64Decode(encodedOriginal) + if err != nil { + t.Fatalf("base64Decode blew up: %s", err) + } + + if !bytes.Equal(decoded, original) { + t.Errorf("Decoded %v should have equaled %v", decoded, original) + } +} + +func TestCost(t *testing.T) { + suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C" + for _, vers := range []string{"2a", "2"} { + for _, cost := range []int{4, 10} { + s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix) + h := []byte(s) + actual, err := Cost(h) + if err != nil { + t.Errorf("Cost, error: %s", err) + continue + } + if actual != cost { + t.Errorf("Cost, expected: %d, actual: %d", cost, actual) + } + } + } + _, err := Cost([]byte("$a$a$" + suffix)) + if err == nil { + t.Errorf("Cost, malformed but no error returned") + } +} + +func TestCostValidationInHash(t *testing.T) { + if testing.Short() { + return + } + + pass := []byte("mypassword") + + for c := 0; c < MinCost; c++ { + p, _ := newFromPassword(pass, c) + if p.cost != DefaultCost { + t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost) + } + } + + p, _ := newFromPassword(pass, 14) + if p.cost != 14 { + t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost) + } + + hp, _ := newFromHash(p.Hash()) + if p.cost != hp.cost { + t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost) + } + + _, err := newFromPassword(pass, 32) + if err == nil { + t.Fatalf("newFromPassword: should return a cost error") + } + if err != InvalidCostError(32) { + t.Errorf("newFromPassword: should return cost error, got %#v", err) + } +} + +func TestCostReturnsWithLeadingZeroes(t *testing.T) { + hp, _ := newFromPassword([]byte("abcdefgh"), 7) + cost := hp.Hash()[4:7] + expected := []byte("07$") + + if !bytes.Equal(expected, cost) { + t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected) + } +} + +func TestMinorNotRequired(t *testing.T) { + noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga") + h, err := newFromHash(noMinorHash) + if err != nil { + t.Fatalf("No minor hash blew up: %s", err) + } + if h.minor != 0 { + t.Errorf("Should leave minor version at 0, but was %d", h.minor) + } + + if !bytes.Equal(noMinorHash, h.Hash()) { + t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash()) + } +} + +func BenchmarkEqual(b *testing.B) { + b.StopTimer() + passwd := []byte("somepasswordyoulike") + hash, _ := GenerateFromPassword(passwd, 10) + b.StartTimer() + for i := 0; i < b.N; i++ { + CompareHashAndPassword(hash, passwd) + } +} + +func BenchmarkGeneration(b *testing.B) { + b.StopTimer() + passwd := []byte("mylongpassword1234") + b.StartTimer() + for i := 0; i < b.N; i++ { + GenerateFromPassword(passwd, 10) + } +} + +// See Issue https://github.com/golang/go/issues/20425. +func TestNoSideEffectsFromCompare(t *testing.T) { + source := []byte("passw0rd123456") + password := source[:len(source)-6] + token := source[len(source)-6:] + want := make([]byte, len(source)) + copy(want, source) + + wantHash := []byte("$2a$10$LK9XRuhNxHHCvjX3tdkRKei1QiCDUKrJRhZv7WWZPuQGRUM92rOUa") + _ = CompareHashAndPassword(wantHash, password) + + got := bytes.Join([][]byte{password, token}, []byte("")) + if !bytes.Equal(got, want) { + t.Errorf("got=%q want=%q", got, want) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 0000000..6dedb89 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,221 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 0000000..8c41cf6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +func init() { + useAVX2 = supportsAVX2() + useAVX = supportsAVX() + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func supportsAVX() bool + +//go:noescape +func supportsAVX2() bool + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useAVX2 { + hashBlocksAVX2(h, c, flag, blocks) + } else if useAVX { + hashBlocksAVX(h, c, flag, blocks) + } else if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 0000000..784bce6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,762 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + MOVQ DX, SP + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(SP) + VMOVDQA X13, 32(SP) + VMOVDQA X14, 48(SP) + VMOVDQA X15, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(SP) + VMOVDQA X13, 96(SP) + VMOVDQA X14, 112(SP) + VMOVDQA X15, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(SP) + VMOVDQA X13, 160(SP) + VMOVDQA X14, 176(SP) + VMOVDQA X15, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(SP) + VMOVDQA X13, 224(SP) + VMOVDQA X14, 240(SP) + VMOVDQA X15, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + MOVQ BP, SP + RET + +// func supportsAVX2() bool +TEXT ·supportsAVX2(SB), 4, $0-1 + MOVQ runtime·support_avx2(SB), AX + MOVB AX, ret+0(FP) + RET + +// func supportsAVX() bool +TEXT ·supportsAVX(SB), 4, $0-1 + MOVQ runtime·support_avx(SB), AX + MOVB AX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 0000000..2ab7c30 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +func init() { + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 0000000..6453074 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,290 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET + +// func supportsSSE4() bool +TEXT ·supportsSSE4(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $19, CX // Bit 19 indicates SSE4 support + ANDL $1, CX // CX != 0 if support SSE4 + MOVB CX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 0000000..4bd2abc --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import "encoding/binary" + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 0000000..da156a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_test.go b/vendor/golang.org/x/crypto/blake2b/blake2b_test.go new file mode 100644 index 0000000..5d68bbf --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_test.go @@ -0,0 +1,798 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "bytes" + "encoding/hex" + "fmt" + "hash" + "io" + "testing" +) + +func fromHex(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func TestHashes(t *testing.T) { + defer func(sse4, avx, avx2 bool) { + useSSE4, useAVX, useAVX2 = sse4, avx, avx2 + }(useSSE4, useAVX, useAVX2) + + if useAVX2 { + t.Log("AVX2 version") + testHashes(t) + useAVX2 = false + } + if useAVX { + t.Log("AVX version") + testHashes(t) + useAVX = false + } + if useSSE4 { + t.Log("SSE4 version") + testHashes(t) + useSSE4 = false + } + t.Log("generic version") + testHashes(t) +} + +func TestHashes2X(t *testing.T) { + defer func(sse4, avx, avx2 bool) { + useSSE4, useAVX, useAVX2 = sse4, avx, avx2 + }(useSSE4, useAVX, useAVX2) + + if useAVX2 { + t.Log("AVX2 version") + testHashes2X(t) + useAVX2 = false + } + if useAVX { + t.Log("AVX version") + testHashes2X(t) + useAVX = false + } + if useSSE4 { + t.Log("SSE4 version") + testHashes2X(t) + useSSE4 = false + } + t.Log("generic version") + testHashes2X(t) +} + +func testHashes(t *testing.T) { + key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f") + + input := make([]byte, 255) + for i := range input { + input[i] = byte(i) + } + + for i, expectedHex := range hashes { + h, err := New512(key) + if err != nil { + t.Fatalf("#%d: error from New512: %v", i, err) + } + + h.Write(input[:i]) + sum := h.Sum(nil) + + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) + } + + h.Reset() + for j := 0; j < i; j++ { + h.Write(input[j : j+1]) + } + + sum = h.Sum(sum[:0]) + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (byte-by-byte): got %s, wanted %s", i, gotHex, expectedHex) + } + } +} + +func testHashes2X(t *testing.T) { + key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f") + + input := make([]byte, 256) + for i := range input { + input[i] = byte(i) + } + + for i, expectedHex := range hashes2X { + length := uint32(len(expectedHex) / 2) + sum := make([]byte, int(length)) + + h, err := NewXOF(length, key) + if err != nil { + t.Fatalf("#%d: error from NewXOF: %v", i, err) + } + + if _, err := h.Write(input); err != nil { + t.Fatalf("#%d (single write): error from Write: %v", i, err) + } + if _, err := h.Read(sum); err != nil { + t.Fatalf("#%d (single write): error from Read: %v", i, err) + } + if n, err := h.Read(sum); n != 0 || err != io.EOF { + t.Fatalf("#%d (single write): Read did not return (0, io.EOF) after exhaustion, got (%v, %v)", i, n, err) + } + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) + } + + h.Reset() + for j := 0; j < len(input); j++ { + h.Write(input[j : j+1]) + } + for j := 0; j < len(sum); j++ { + h = h.Clone() + if _, err := h.Read(sum[j : j+1]); err != nil { + t.Fatalf("#%d (byte-by-byte) - Read %d: error from Read: %v", i, j, err) + } + } + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (byte-by-byte): got %s, wanted %s", i, gotHex, expectedHex) + } + } + + h, err := NewXOF(OutputLengthUnknown, key) + if err != nil { + t.Fatalf("#unknown length: error from NewXOF: %v", err) + } + if _, err := h.Write(input); err != nil { + t.Fatalf("#unknown length: error from Write: %v", err) + } + + var result [64]byte + if n, err := h.Read(result[:]); err != nil { + t.Fatalf("#unknown length: error from Read: %v", err) + } else if n != len(result) { + t.Fatalf("#unknown length: Read returned %d bytes, want %d", n, len(result)) + } + + const expected = "3dbba8516da76bf7330055c66ea36cf1005e92714262b24d9710f51d9e126406e1bcd6497059f9331f1091c3634b695428d475ed432f987040575520a1c29f5e" + if fmt.Sprintf("%x", result) != expected { + t.Fatalf("#unknown length: bad result %x, wanted %s", result, expected) + } +} + +func generateSequence(out []byte, seed uint32) { + a := 0xDEAD4BAD * seed // prime + b := uint32(1) + + for i := range out { // fill the buf + a, b = b, a+b + out[i] = byte(b >> 24) + } +} + +func computeMAC(msg []byte, hashSize int, key []byte) (sum []byte) { + var h hash.Hash + switch hashSize { + case Size: + h, _ = New512(key) + case Size384: + h, _ = New384(key) + case Size256: + h, _ = New256(key) + case 20: + h, _ = newDigest(20, key) + default: + panic("unexpected hashSize") + } + + h.Write(msg) + return h.Sum(sum) +} + +func computeHash(msg []byte, hashSize int) (sum []byte) { + switch hashSize { + case Size: + hash := Sum512(msg) + return hash[:] + case Size384: + hash := Sum384(msg) + return hash[:] + case Size256: + hash := Sum256(msg) + return hash[:] + case 20: + var hash [64]byte + checkSum(&hash, 20, msg) + return hash[:20] + default: + panic("unexpected hashSize") + } +} + +// Test function from RFC 7693. +func TestSelfTest(t *testing.T) { + hashLens := [4]int{20, 32, 48, 64} + msgLens := [6]int{0, 3, 128, 129, 255, 1024} + + msg := make([]byte, 1024) + key := make([]byte, 64) + + h, _ := New256(nil) + for _, hashSize := range hashLens { + for _, msgLength := range msgLens { + generateSequence(msg[:msgLength], uint32(msgLength)) // unkeyed hash + + md := computeHash(msg[:msgLength], hashSize) + h.Write(md) + + generateSequence(key[:], uint32(hashSize)) // keyed hash + md = computeMAC(msg[:msgLength], hashSize, key[:hashSize]) + h.Write(md) + } + } + + sum := h.Sum(nil) + expected := [32]byte{ + 0xc2, 0x3a, 0x78, 0x00, 0xd9, 0x81, 0x23, 0xbd, + 0x10, 0xf5, 0x06, 0xc6, 0x1e, 0x29, 0xda, 0x56, + 0x03, 0xd7, 0x63, 0xb8, 0xbb, 0xad, 0x2e, 0x73, + 0x7f, 0x5e, 0x76, 0x5a, 0x7b, 0xcc, 0xd4, 0x75, + } + if !bytes.Equal(sum, expected[:]) { + t.Fatalf("got %x, wanted %x", sum, expected) + } +} + +// Benchmarks + +func benchmarkSum(b *testing.B, size int) { + data := make([]byte, size) + b.SetBytes(int64(size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sum512(data) + } +} + +func benchmarkWrite(b *testing.B, size int) { + data := make([]byte, size) + h, _ := New512(nil) + b.SetBytes(int64(size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + h.Write(data) + } +} + +func BenchmarkWrite128(b *testing.B) { benchmarkWrite(b, 128) } +func BenchmarkWrite1K(b *testing.B) { benchmarkWrite(b, 1024) } + +func BenchmarkSum128(b *testing.B) { benchmarkSum(b, 128) } +func BenchmarkSum1K(b *testing.B) { benchmarkSum(b, 1024) } + +// These values were taken from https://blake2.net/blake2b-test.txt. +var hashes = []string{ + "10ebb67700b1868efb4417987acf4690ae9d972fb7a590c2f02871799aaa4786b5e996e8f0f4eb981fc214b005f42d2ff4233499391653df7aefcbc13fc51568", + "961f6dd1e4dd30f63901690c512e78e4b45e4742ed197c3c5e45c549fd25f2e4187b0bc9fe30492b16b0d0bc4ef9b0f34c7003fac09a5ef1532e69430234cebd", + "da2cfbe2d8409a0f38026113884f84b50156371ae304c4430173d08a99d9fb1b983164a3770706d537f49e0c916d9f32b95cc37a95b99d857436f0232c88a965", + "33d0825dddf7ada99b0e7e307104ad07ca9cfd9692214f1561356315e784f3e5a17e364ae9dbb14cb2036df932b77f4b292761365fb328de7afdc6d8998f5fc1", + "beaa5a3d08f3807143cf621d95cd690514d0b49efff9c91d24b59241ec0eefa5f60196d407048bba8d2146828ebcb0488d8842fd56bb4f6df8e19c4b4daab8ac", + "098084b51fd13deae5f4320de94a688ee07baea2800486689a8636117b46c1f4c1f6af7f74ae7c857600456a58a3af251dc4723a64cc7c0a5ab6d9cac91c20bb", + "6044540d560853eb1c57df0077dd381094781cdb9073e5b1b3d3f6c7829e12066bbaca96d989a690de72ca3133a83652ba284a6d62942b271ffa2620c9e75b1f", + "7a8cfe9b90f75f7ecb3acc053aaed6193112b6f6a4aeeb3f65d3de541942deb9e2228152a3c4bbbe72fc3b12629528cfbb09fe630f0474339f54abf453e2ed52", + "380beaf6ea7cc9365e270ef0e6f3a64fb902acae51dd5512f84259ad2c91f4bc4108db73192a5bbfb0cbcf71e46c3e21aee1c5e860dc96e8eb0b7b8426e6abe9", + "60fe3c4535e1b59d9a61ea8500bfac41a69dffb1ceadd9aca323e9a625b64da5763bad7226da02b9c8c4f1a5de140ac5a6c1124e4f718ce0b28ea47393aa6637", + "4fe181f54ad63a2983feaaf77d1e7235c2beb17fa328b6d9505bda327df19fc37f02c4b6f0368ce23147313a8e5738b5fa2a95b29de1c7f8264eb77b69f585cd", + "f228773ce3f3a42b5f144d63237a72d99693adb8837d0e112a8a0f8ffff2c362857ac49c11ec740d1500749dac9b1f4548108bf3155794dcc9e4082849e2b85b", + "962452a8455cc56c8511317e3b1f3b2c37df75f588e94325fdd77070359cf63a9ae6e930936fdf8e1e08ffca440cfb72c28f06d89a2151d1c46cd5b268ef8563", + "43d44bfa18768c59896bf7ed1765cb2d14af8c260266039099b25a603e4ddc5039d6ef3a91847d1088d401c0c7e847781a8a590d33a3c6cb4df0fab1c2f22355", + "dcffa9d58c2a4ca2cdbb0c7aa4c4c1d45165190089f4e983bb1c2cab4aaeff1fa2b5ee516fecd780540240bf37e56c8bcca7fab980e1e61c9400d8a9a5b14ac6", + "6fbf31b45ab0c0b8dad1c0f5f4061379912dde5aa922099a030b725c73346c524291adef89d2f6fd8dfcda6d07dad811a9314536c2915ed45da34947e83de34e", + "a0c65bddde8adef57282b04b11e7bc8aab105b99231b750c021f4a735cb1bcfab87553bba3abb0c3e64a0b6955285185a0bd35fb8cfde557329bebb1f629ee93", + "f99d815550558e81eca2f96718aed10d86f3f1cfb675cce06b0eff02f617c5a42c5aa760270f2679da2677c5aeb94f1142277f21c7f79f3c4f0cce4ed8ee62b1", + "95391da8fc7b917a2044b3d6f5374e1ca072b41454d572c7356c05fd4bc1e0f40b8bb8b4a9f6bce9be2c4623c399b0dca0dab05cb7281b71a21b0ebcd9e55670", + "04b9cd3d20d221c09ac86913d3dc63041989a9a1e694f1e639a3ba7e451840f750c2fc191d56ad61f2e7936bc0ac8e094b60caeed878c18799045402d61ceaf9", + "ec0e0ef707e4ed6c0c66f9e089e4954b058030d2dd86398fe84059631f9ee591d9d77375355149178c0cf8f8e7c49ed2a5e4f95488a2247067c208510fadc44c", + "9a37cce273b79c09913677510eaf7688e89b3314d3532fd2764c39de022a2945b5710d13517af8ddc0316624e73bec1ce67df15228302036f330ab0cb4d218dd", + "4cf9bb8fb3d4de8b38b2f262d3c40f46dfe747e8fc0a414c193d9fcf753106ce47a18f172f12e8a2f1c26726545358e5ee28c9e2213a8787aafbc516d2343152", + "64e0c63af9c808fd893137129867fd91939d53f2af04be4fa268006100069b2d69daa5c5d8ed7fddcb2a70eeecdf2b105dd46a1e3b7311728f639ab489326bc9", + "5e9c93158d659b2def06b0c3c7565045542662d6eee8a96a89b78ade09fe8b3dcc096d4fe48815d88d8f82620156602af541955e1f6ca30dce14e254c326b88f", + "7775dff889458dd11aef417276853e21335eb88e4dec9cfb4e9edb49820088551a2ca60339f12066101169f0dfe84b098fddb148d9da6b3d613df263889ad64b", + "f0d2805afbb91f743951351a6d024f9353a23c7ce1fc2b051b3a8b968c233f46f50f806ecb1568ffaa0b60661e334b21dde04f8fa155ac740eeb42e20b60d764", + "86a2af316e7d7754201b942e275364ac12ea8962ab5bd8d7fb276dc5fbffc8f9a28cae4e4867df6780d9b72524160927c855da5b6078e0b554aa91e31cb9ca1d", + "10bdf0caa0802705e706369baf8a3f79d72c0a03a80675a7bbb00be3a45e516424d1ee88efb56f6d5777545ae6e27765c3a8f5e493fc308915638933a1dfee55", + "b01781092b1748459e2e4ec178696627bf4ebafebba774ecf018b79a68aeb84917bf0b84bb79d17b743151144cd66b7b33a4b9e52c76c4e112050ff5385b7f0b", + "c6dbc61dec6eaeac81e3d5f755203c8e220551534a0b2fd105a91889945a638550204f44093dd998c076205dffad703a0e5cd3c7f438a7e634cd59fededb539e", + "eba51acffb4cea31db4b8d87e9bf7dd48fe97b0253ae67aa580f9ac4a9d941f2bea518ee286818cc9f633f2a3b9fb68e594b48cdd6d515bf1d52ba6c85a203a7", + "86221f3ada52037b72224f105d7999231c5e5534d03da9d9c0a12acb68460cd375daf8e24386286f9668f72326dbf99ba094392437d398e95bb8161d717f8991", + "5595e05c13a7ec4dc8f41fb70cb50a71bce17c024ff6de7af618d0cc4e9c32d9570d6d3ea45b86525491030c0d8f2b1836d5778c1ce735c17707df364d054347", + "ce0f4f6aca89590a37fe034dd74dd5fa65eb1cbd0a41508aaddc09351a3cea6d18cb2189c54b700c009f4cbf0521c7ea01be61c5ae09cb54f27bc1b44d658c82", + "7ee80b06a215a3bca970c77cda8761822bc103d44fa4b33f4d07dcb997e36d55298bceae12241b3fa07fa63be5576068da387b8d5859aeab701369848b176d42", + "940a84b6a84d109aab208c024c6ce9647676ba0aaa11f86dbb7018f9fd2220a6d901a9027f9abcf935372727cbf09ebd61a2a2eeb87653e8ecad1bab85dc8327", + "2020b78264a82d9f4151141adba8d44bf20c5ec062eee9b595a11f9e84901bf148f298e0c9f8777dcdbc7cc4670aac356cc2ad8ccb1629f16f6a76bcefbee760", + "d1b897b0e075ba68ab572adf9d9c436663e43eb3d8e62d92fc49c9be214e6f27873fe215a65170e6bea902408a25b49506f47babd07cecf7113ec10c5dd31252", + "b14d0c62abfa469a357177e594c10c194243ed2025ab8aa5ad2fa41ad318e0ff48cd5e60bec07b13634a711d2326e488a985f31e31153399e73088efc86a5c55", + "4169c5cc808d2697dc2a82430dc23e3cd356dc70a94566810502b8d655b39abf9e7f902fe717e0389219859e1945df1af6ada42e4ccda55a197b7100a30c30a1", + "258a4edb113d66c839c8b1c91f15f35ade609f11cd7f8681a4045b9fef7b0b24c82cda06a5f2067b368825e3914e53d6948ede92efd6e8387fa2e537239b5bee", + "79d2d8696d30f30fb34657761171a11e6c3f1e64cbe7bebee159cb95bfaf812b4f411e2f26d9c421dc2c284a3342d823ec293849e42d1e46b0a4ac1e3c86abaa", + "8b9436010dc5dee992ae38aea97f2cd63b946d94fedd2ec9671dcde3bd4ce9564d555c66c15bb2b900df72edb6b891ebcadfeff63c9ea4036a998be7973981e7", + "c8f68e696ed28242bf997f5b3b34959508e42d613810f1e2a435c96ed2ff560c7022f361a9234b9837feee90bf47922ee0fd5f8ddf823718d86d1e16c6090071", + "b02d3eee4860d5868b2c39ce39bfe81011290564dd678c85e8783f29302dfc1399ba95b6b53cd9ebbf400cca1db0ab67e19a325f2d115812d25d00978ad1bca4", + "7693ea73af3ac4dad21ca0d8da85b3118a7d1c6024cfaf557699868217bc0c2f44a199bc6c0edd519798ba05bd5b1b4484346a47c2cadf6bf30b785cc88b2baf", + "a0e5c1c0031c02e48b7f09a5e896ee9aef2f17fc9e18e997d7f6cac7ae316422c2b1e77984e5f3a73cb45deed5d3f84600105e6ee38f2d090c7d0442ea34c46d", + "41daa6adcfdb69f1440c37b596440165c15ada596813e2e22f060fcd551f24dee8e04ba6890387886ceec4a7a0d7fc6b44506392ec3822c0d8c1acfc7d5aebe8", + "14d4d40d5984d84c5cf7523b7798b254e275a3a8cc0a1bd06ebc0bee726856acc3cbf516ff667cda2058ad5c3412254460a82c92187041363cc77a4dc215e487", + "d0e7a1e2b9a447fee83e2277e9ff8010c2f375ae12fa7aaa8ca5a6317868a26a367a0b69fbc1cf32a55d34eb370663016f3d2110230eba754028a56f54acf57c", + "e771aa8db5a3e043e8178f39a0857ba04a3f18e4aa05743cf8d222b0b095825350ba422f63382a23d92e4149074e816a36c1cd28284d146267940b31f8818ea2", + "feb4fd6f9e87a56bef398b3284d2bda5b5b0e166583a66b61e538457ff0584872c21a32962b9928ffab58de4af2edd4e15d8b35570523207ff4e2a5aa7754caa", + "462f17bf005fb1c1b9e671779f665209ec2873e3e411f98dabf240a1d5ec3f95ce6796b6fc23fe171903b502023467dec7273ff74879b92967a2a43a5a183d33", + "d3338193b64553dbd38d144bea71c5915bb110e2d88180dbc5db364fd6171df317fc7268831b5aef75e4342b2fad8797ba39eddcef80e6ec08159350b1ad696d", + "e1590d585a3d39f7cb599abd479070966409a6846d4377acf4471d065d5db94129cc9be92573b05ed226be1e9b7cb0cabe87918589f80dadd4ef5ef25a93d28e", + "f8f3726ac5a26cc80132493a6fedcb0e60760c09cfc84cad178175986819665e76842d7b9fedf76dddebf5d3f56faaad4477587af21606d396ae570d8e719af2", + "30186055c07949948183c850e9a756cc09937e247d9d928e869e20bafc3cd9721719d34e04a0899b92c736084550186886efba2e790d8be6ebf040b209c439a4", + "f3c4276cb863637712c241c444c5cc1e3554e0fddb174d035819dd83eb700b4ce88df3ab3841ba02085e1a99b4e17310c5341075c0458ba376c95a6818fbb3e2", + "0aa007c4dd9d5832393040a1583c930bca7dc5e77ea53add7e2b3f7c8e231368043520d4a3ef53c969b6bbfd025946f632bd7f765d53c21003b8f983f75e2a6a", + "08e9464720533b23a04ec24f7ae8c103145f765387d738777d3d343477fd1c58db052142cab754ea674378e18766c53542f71970171cc4f81694246b717d7564", + "d37ff7ad297993e7ec21e0f1b4b5ae719cdc83c5db687527f27516cbffa822888a6810ee5c1ca7bfe3321119be1ab7bfa0a502671c8329494df7ad6f522d440f", + "dd9042f6e464dcf86b1262f6accfafbd8cfd902ed3ed89abf78ffa482dbdeeb6969842394c9a1168ae3d481a017842f660002d42447c6b22f7b72f21aae021c9", + "bd965bf31e87d70327536f2a341cebc4768eca275fa05ef98f7f1b71a0351298de006fba73fe6733ed01d75801b4a928e54231b38e38c562b2e33ea1284992fa", + "65676d800617972fbd87e4b9514e1c67402b7a331096d3bfac22f1abb95374abc942f16e9ab0ead33b87c91968a6e509e119ff07787b3ef483e1dcdccf6e3022", + "939fa189699c5d2c81ddd1ffc1fa207c970b6a3685bb29ce1d3e99d42f2f7442da53e95a72907314f4588399a3ff5b0a92beb3f6be2694f9f86ecf2952d5b41c", + "c516541701863f91005f314108ceece3c643e04fc8c42fd2ff556220e616aaa6a48aeb97a84bad74782e8dff96a1a2fa949339d722edcaa32b57067041df88cc", + "987fd6e0d6857c553eaebb3d34970a2c2f6e89a3548f492521722b80a1c21a153892346d2cba6444212d56da9a26e324dccbc0dcde85d4d2ee4399eec5a64e8f", + "ae56deb1c2328d9c4017706bce6e99d41349053ba9d336d677c4c27d9fd50ae6aee17e853154e1f4fe7672346da2eaa31eea53fcf24a22804f11d03da6abfc2b", + "49d6a608c9bde4491870498572ac31aac3fa40938b38a7818f72383eb040ad39532bc06571e13d767e6945ab77c0bdc3b0284253343f9f6c1244ebf2ff0df866", + "da582ad8c5370b4469af862aa6467a2293b2b28bd80ae0e91f425ad3d47249fdf98825cc86f14028c3308c9804c78bfeeeee461444ce243687e1a50522456a1d", + "d5266aa3331194aef852eed86d7b5b2633a0af1c735906f2e13279f14931a9fc3b0eac5ce9245273bd1aa92905abe16278ef7efd47694789a7283b77da3c70f8", + "2962734c28252186a9a1111c732ad4de4506d4b4480916303eb7991d659ccda07a9911914bc75c418ab7a4541757ad054796e26797feaf36e9f6ad43f14b35a4", + "e8b79ec5d06e111bdfafd71e9f5760f00ac8ac5d8bf768f9ff6f08b8f026096b1cc3a4c973333019f1e3553e77da3f98cb9f542e0a90e5f8a940cc58e59844b3", + "dfb320c44f9d41d1efdcc015f08dd5539e526e39c87d509ae6812a969e5431bf4fa7d91ffd03b981e0d544cf72d7b1c0374f8801482e6dea2ef903877eba675e", + "d88675118fdb55a5fb365ac2af1d217bf526ce1ee9c94b2f0090b2c58a06ca58187d7fe57c7bed9d26fca067b4110eefcd9a0a345de872abe20de368001b0745", + "b893f2fc41f7b0dd6e2f6aa2e0370c0cff7df09e3acfcc0e920b6e6fad0ef747c40668417d342b80d2351e8c175f20897a062e9765e6c67b539b6ba8b9170545", + "6c67ec5697accd235c59b486d7b70baeedcbd4aa64ebd4eef3c7eac189561a726250aec4d48cadcafbbe2ce3c16ce2d691a8cce06e8879556d4483ed7165c063", + "f1aa2b044f8f0c638a3f362e677b5d891d6fd2ab0765f6ee1e4987de057ead357883d9b405b9d609eea1b869d97fb16d9b51017c553f3b93c0a1e0f1296fedcd", + "cbaa259572d4aebfc1917acddc582b9f8dfaa928a198ca7acd0f2aa76a134a90252e6298a65b08186a350d5b7626699f8cb721a3ea5921b753ae3a2dce24ba3a", + "fa1549c9796cd4d303dcf452c1fbd5744fd9b9b47003d920b92de34839d07ef2a29ded68f6fc9e6c45e071a2e48bd50c5084e96b657dd0404045a1ddefe282ed", + "5cf2ac897ab444dcb5c8d87c495dbdb34e1838b6b629427caa51702ad0f9688525f13bec503a3c3a2c80a65e0b5715e8afab00ffa56ec455a49a1ad30aa24fcd", + "9aaf80207bace17bb7ab145757d5696bde32406ef22b44292ef65d4519c3bb2ad41a59b62cc3e94b6fa96d32a7faadae28af7d35097219aa3fd8cda31e40c275", + "af88b163402c86745cb650c2988fb95211b94b03ef290eed9662034241fd51cf398f8073e369354c43eae1052f9b63b08191caa138aa54fea889cc7024236897", + "48fa7d64e1ceee27b9864db5ada4b53d00c9bc7626555813d3cd6730ab3cc06ff342d727905e33171bde6e8476e77fb1720861e94b73a2c538d254746285f430", + "0e6fd97a85e904f87bfe85bbeb34f69e1f18105cf4ed4f87aec36c6e8b5f68bd2a6f3dc8a9ecb2b61db4eedb6b2ea10bf9cb0251fb0f8b344abf7f366b6de5ab", + "06622da5787176287fdc8fed440bad187d830099c94e6d04c8e9c954cda70c8bb9e1fc4a6d0baa831b9b78ef6648681a4867a11da93ee36e5e6a37d87fc63f6f", + "1da6772b58fabf9c61f68d412c82f182c0236d7d575ef0b58dd22458d643cd1dfc93b03871c316d8430d312995d4197f0874c99172ba004a01ee295abac24e46", + "3cd2d9320b7b1d5fb9aab951a76023fa667be14a9124e394513918a3f44096ae4904ba0ffc150b63bc7ab1eeb9a6e257e5c8f000a70394a5afd842715de15f29", + "04cdc14f7434e0b4be70cb41db4c779a88eaef6accebcb41f2d42fffe7f32a8e281b5c103a27021d0d08362250753cdf70292195a53a48728ceb5844c2d98bab", + "9071b7a8a075d0095b8fb3ae5113785735ab98e2b52faf91d5b89e44aac5b5d4ebbf91223b0ff4c71905da55342e64655d6ef8c89a4768c3f93a6dc0366b5bc8", + "ebb30240dd96c7bc8d0abe49aa4edcbb4afdc51ff9aaf720d3f9e7fbb0f9c6d6571350501769fc4ebd0b2141247ff400d4fd4be414edf37757bb90a32ac5c65a", + "8532c58bf3c8015d9d1cbe00eef1f5082f8f3632fbe9f1ed4f9dfb1fa79e8283066d77c44c4af943d76b300364aecbd0648c8a8939bd204123f4b56260422dec", + "fe9846d64f7c7708696f840e2d76cb4408b6595c2f81ec6a28a7f2f20cb88cfe6ac0b9e9b8244f08bd7095c350c1d0842f64fb01bb7f532dfcd47371b0aeeb79", + "28f17ea6fb6c42092dc264257e29746321fb5bdaea9873c2a7fa9d8f53818e899e161bc77dfe8090afd82bf2266c5c1bc930a8d1547624439e662ef695f26f24", + "ec6b7d7f030d4850acae3cb615c21dd25206d63e84d1db8d957370737ba0e98467ea0ce274c66199901eaec18a08525715f53bfdb0aacb613d342ebdceeddc3b", + "b403d3691c03b0d3418df327d5860d34bbfcc4519bfbce36bf33b208385fadb9186bc78a76c489d89fd57e7dc75412d23bcd1dae8470ce9274754bb8585b13c5", + "31fc79738b8772b3f55cd8178813b3b52d0db5a419d30ba9495c4b9da0219fac6df8e7c23a811551a62b827f256ecdb8124ac8a6792ccfecc3b3012722e94463", + "bb2039ec287091bcc9642fc90049e73732e02e577e2862b32216ae9bedcd730c4c284ef3968c368b7d37584f97bd4b4dc6ef6127acfe2e6ae2509124e66c8af4", + "f53d68d13f45edfcb9bd415e2831e938350d5380d3432278fc1c0c381fcb7c65c82dafe051d8c8b0d44e0974a0e59ec7bf7ed0459f86e96f329fc79752510fd3", + "8d568c7984f0ecdf7640fbc483b5d8c9f86634f6f43291841b309a350ab9c1137d24066b09da9944bac54d5bb6580d836047aac74ab724b887ebf93d4b32eca9", + "c0b65ce5a96ff774c456cac3b5f2c4cd359b4ff53ef93a3da0778be4900d1e8da1601e769e8f1b02d2a2f8c5b9fa10b44f1c186985468feeb008730283a6657d", + "4900bba6f5fb103ece8ec96ada13a5c3c85488e05551da6b6b33d988e611ec0fe2e3c2aa48ea6ae8986a3a231b223c5d27cec2eadde91ce07981ee652862d1e4", + "c7f5c37c7285f927f76443414d4357ff789647d7a005a5a787e03c346b57f49f21b64fa9cf4b7e45573e23049017567121a9c3d4b2b73ec5e9413577525db45a", + "ec7096330736fdb2d64b5653e7475da746c23a4613a82687a28062d3236364284ac01720ffb406cfe265c0df626a188c9e5963ace5d3d5bb363e32c38c2190a6", + "82e744c75f4649ec52b80771a77d475a3bc091989556960e276a5f9ead92a03f718742cdcfeaee5cb85c44af198adc43a4a428f5f0c2ddb0be36059f06d7df73", + "2834b7a7170f1f5b68559ab78c1050ec21c919740b784a9072f6e5d69f828d70c919c5039fb148e39e2c8a52118378b064ca8d5001cd10a5478387b966715ed6", + "16b4ada883f72f853bb7ef253efcab0c3e2161687ad61543a0d2824f91c1f81347d86be709b16996e17f2dd486927b0288ad38d13063c4a9672c39397d3789b6", + "78d048f3a69d8b54ae0ed63a573ae350d89f7c6cf1f3688930de899afa037697629b314e5cd303aa62feea72a25bf42b304b6c6bcb27fae21c16d925e1fbdac3", + "0f746a48749287ada77a82961f05a4da4abdb7d77b1220f836d09ec814359c0ec0239b8c7b9ff9e02f569d1b301ef67c4612d1de4f730f81c12c40cc063c5caa", + "f0fc859d3bd195fbdc2d591e4cdac15179ec0f1dc821c11df1f0c1d26e6260aaa65b79fafacafd7d3ad61e600f250905f5878c87452897647a35b995bcadc3a3", + "2620f687e8625f6a412460b42e2cef67634208ce10a0cbd4dff7044a41b7880077e9f8dc3b8d1216d3376a21e015b58fb279b521d83f9388c7382c8505590b9b", + "227e3aed8d2cb10b918fcb04f9de3e6d0a57e08476d93759cd7b2ed54a1cbf0239c528fb04bbf288253e601d3bc38b21794afef90b17094a182cac557745e75f", + "1a929901b09c25f27d6b35be7b2f1c4745131fdebca7f3e2451926720434e0db6e74fd693ad29b777dc3355c592a361c4873b01133a57c2e3b7075cbdb86f4fc", + "5fd7968bc2fe34f220b5e3dc5af9571742d73b7d60819f2888b629072b96a9d8ab2d91b82d0a9aaba61bbd39958132fcc4257023d1eca591b3054e2dc81c8200", + "dfcce8cf32870cc6a503eadafc87fd6f78918b9b4d0737db6810be996b5497e7e5cc80e312f61e71ff3e9624436073156403f735f56b0b01845c18f6caf772e6", + "02f7ef3a9ce0fff960f67032b296efca3061f4934d690749f2d01c35c81c14f39a67fa350bc8a0359bf1724bffc3bca6d7c7bba4791fd522a3ad353c02ec5aa8", + "64be5c6aba65d594844ae78bb022e5bebe127fd6b6ffa5a13703855ab63b624dcd1a363f99203f632ec386f3ea767fc992e8ed9686586aa27555a8599d5b808f", + "f78585505c4eaa54a8b5be70a61e735e0ff97af944ddb3001e35d86c4e2199d976104b6ae31750a36a726ed285064f5981b503889fef822fcdc2898dddb7889a", + "e4b5566033869572edfd87479a5bb73c80e8759b91232879d96b1dda36c012076ee5a2ed7ae2de63ef8406a06aea82c188031b560beafb583fb3de9e57952a7e", + "e1b3e7ed867f6c9484a2a97f7715f25e25294e992e41f6a7c161ffc2adc6daaeb7113102d5e6090287fe6ad94ce5d6b739c6ca240b05c76fb73f25dd024bf935", + "85fd085fdc12a080983df07bd7012b0d402a0f4043fcb2775adf0bad174f9b08d1676e476985785c0a5dcc41dbff6d95ef4d66a3fbdc4a74b82ba52da0512b74", + "aed8fa764b0fbff821e05233d2f7b0900ec44d826f95e93c343c1bc3ba5a24374b1d616e7e7aba453a0ada5e4fab5382409e0d42ce9c2bc7fb39a99c340c20f0", + "7ba3b2e297233522eeb343bd3ebcfd835a04007735e87f0ca300cbee6d416565162171581e4020ff4cf176450f1291ea2285cb9ebffe4c56660627685145051c", + "de748bcf89ec88084721e16b85f30adb1a6134d664b5843569babc5bbd1a15ca9b61803c901a4fef32965a1749c9f3a4e243e173939dc5a8dc495c671ab52145", + "aaf4d2bdf200a919706d9842dce16c98140d34bc433df320aba9bd429e549aa7a3397652a4d768277786cf993cde2338673ed2e6b66c961fefb82cd20c93338f", + "c408218968b788bf864f0997e6bc4c3dba68b276e2125a4843296052ff93bf5767b8cdce7131f0876430c1165fec6c4f47adaa4fd8bcfacef463b5d3d0fa61a0", + "76d2d819c92bce55fa8e092ab1bf9b9eab237a25267986cacf2b8ee14d214d730dc9a5aa2d7b596e86a1fd8fa0804c77402d2fcd45083688b218b1cdfa0dcbcb", + "72065ee4dd91c2d8509fa1fc28a37c7fc9fa7d5b3f8ad3d0d7a25626b57b1b44788d4caf806290425f9890a3a2a35a905ab4b37acfd0da6e4517b2525c9651e4", + "64475dfe7600d7171bea0b394e27c9b00d8e74dd1e416a79473682ad3dfdbb706631558055cfc8a40e07bd015a4540dcdea15883cbbf31412df1de1cd4152b91", + "12cd1674a4488a5d7c2b3160d2e2c4b58371bedad793418d6f19c6ee385d70b3e06739369d4df910edb0b0a54cbff43d54544cd37ab3a06cfa0a3ddac8b66c89", + "60756966479dedc6dd4bcff8ea7d1d4ce4d4af2e7b097e32e3763518441147cc12b3c0ee6d2ecabf1198cec92e86a3616fba4f4e872f5825330adbb4c1dee444", + "a7803bcb71bc1d0f4383dde1e0612e04f872b715ad30815c2249cf34abb8b024915cb2fc9f4e7cc4c8cfd45be2d5a91eab0941c7d270e2da4ca4a9f7ac68663a", + "b84ef6a7229a34a750d9a98ee2529871816b87fbe3bc45b45fa5ae82d5141540211165c3c5d7a7476ba5a4aa06d66476f0d9dc49a3f1ee72c3acabd498967414", + "fae4b6d8efc3f8c8e64d001dabec3a21f544e82714745251b2b4b393f2f43e0da3d403c64db95a2cb6e23ebb7b9e94cdd5ddac54f07c4a61bd3cb10aa6f93b49", + "34f7286605a122369540141ded79b8957255da2d4155abbf5a8dbb89c8eb7ede8eeef1daa46dc29d751d045dc3b1d658bb64b80ff8589eddb3824b13da235a6b", + "3b3b48434be27b9eababba43bf6b35f14b30f6a88dc2e750c358470d6b3aa3c18e47db4017fa55106d8252f016371a00f5f8b070b74ba5f23cffc5511c9f09f0", + "ba289ebd6562c48c3e10a8ad6ce02e73433d1e93d7c9279d4d60a7e879ee11f441a000f48ed9f7c4ed87a45136d7dccdca482109c78a51062b3ba4044ada2469", + "022939e2386c5a37049856c850a2bb10a13dfea4212b4c732a8840a9ffa5faf54875c5448816b2785a007da8a8d2bc7d71a54e4e6571f10b600cbdb25d13ede3", + "e6fec19d89ce8717b1a087024670fe026f6c7cbda11caef959bb2d351bf856f8055d1c0ebdaaa9d1b17886fc2c562b5e99642fc064710c0d3488a02b5ed7f6fd", + "94c96f02a8f576aca32ba61c2b206f907285d9299b83ac175c209a8d43d53bfe683dd1d83e7549cb906c28f59ab7c46f8751366a28c39dd5fe2693c9019666c8", + "31a0cd215ebd2cb61de5b9edc91e6195e31c59a5648d5c9f737e125b2605708f2e325ab3381c8dce1a3e958886f1ecdc60318f882cfe20a24191352e617b0f21", + "91ab504a522dce78779f4c6c6ba2e6b6db5565c76d3e7e7c920caf7f757ef9db7c8fcf10e57f03379ea9bf75eb59895d96e149800b6aae01db778bb90afbc989", + "d85cabc6bd5b1a01a5afd8c6734740da9fd1c1acc6db29bfc8a2e5b668b028b6b3154bfb8703fa3180251d589ad38040ceb707c4bad1b5343cb426b61eaa49c1", + "d62efbec2ca9c1f8bd66ce8b3f6a898cb3f7566ba6568c618ad1feb2b65b76c3ce1dd20f7395372faf28427f61c9278049cf0140df434f5633048c86b81e0399", + "7c8fdc6175439e2c3db15bafa7fb06143a6a23bc90f449e79deef73c3d492a671715c193b6fea9f036050b946069856b897e08c00768f5ee5ddcf70b7cd6d0e0", + "58602ee7468e6bc9df21bd51b23c005f72d6cb013f0a1b48cbec5eca299299f97f09f54a9a01483eaeb315a6478bad37ba47ca1347c7c8fc9e6695592c91d723", + "27f5b79ed256b050993d793496edf4807c1d85a7b0a67c9c4fa99860750b0ae66989670a8ffd7856d7ce411599e58c4d77b232a62bef64d15275be46a68235ff", + "3957a976b9f1887bf004a8dca942c92d2b37ea52600f25e0c9bc5707d0279c00c6e85a839b0d2d8eb59c51d94788ebe62474a791cadf52cccf20f5070b6573fc", + "eaa2376d55380bf772ecca9cb0aa4668c95c707162fa86d518c8ce0ca9bf7362b9f2a0adc3ff59922df921b94567e81e452f6c1a07fc817cebe99604b3505d38", + "c1e2c78b6b2734e2480ec550434cb5d613111adcc21d475545c3b1b7e6ff12444476e5c055132e2229dc0f807044bb919b1a5662dd38a9ee65e243a3911aed1a", + "8ab48713389dd0fcf9f965d3ce66b1e559a1f8c58741d67683cd971354f452e62d0207a65e436c5d5d8f8ee71c6abfe50e669004c302b31a7ea8311d4a916051", + "24ce0addaa4c65038bd1b1c0f1452a0b128777aabc94a29df2fd6c7e2f85f8ab9ac7eff516b0e0a825c84a24cfe492eaad0a6308e46dd42fe8333ab971bb30ca", + "5154f929ee03045b6b0c0004fa778edee1d139893267cc84825ad7b36c63de32798e4a166d24686561354f63b00709a1364b3c241de3febf0754045897467cd4", + "e74e907920fd87bd5ad636dd11085e50ee70459c443e1ce5809af2bc2eba39f9e6d7128e0e3712c316da06f4705d78a4838e28121d4344a2c79c5e0db307a677", + "bf91a22334bac20f3fd80663b3cd06c4e8802f30e6b59f90d3035cc9798a217ed5a31abbda7fa6842827bdf2a7a1c21f6fcfccbb54c6c52926f32da816269be1", + "d9d5c74be5121b0bd742f26bffb8c89f89171f3f934913492b0903c271bbe2b3395ef259669bef43b57f7fcc3027db01823f6baee66e4f9fead4d6726c741fce", + "50c8b8cf34cd879f80e2faab3230b0c0e1cc3e9dcadeb1b9d97ab923415dd9a1fe38addd5c11756c67990b256e95ad6d8f9fedce10bf1c90679cde0ecf1be347", + "0a386e7cd5dd9b77a035e09fe6fee2c8ce61b5383c87ea43205059c5e4cd4f4408319bb0a82360f6a58e6c9ce3f487c446063bf813bc6ba535e17fc1826cfc91", + "1f1459cb6b61cbac5f0efe8fc487538f42548987fcd56221cfa7beb22504769e792c45adfb1d6b3d60d7b749c8a75b0bdf14e8ea721b95dca538ca6e25711209", + "e58b3836b7d8fedbb50ca5725c6571e74c0785e97821dab8b6298c10e4c079d4a6cdf22f0fedb55032925c16748115f01a105e77e00cee3d07924dc0d8f90659", + "b929cc6505f020158672deda56d0db081a2ee34c00c1100029bdf8ea98034fa4bf3e8655ec697fe36f40553c5bb46801644a627d3342f4fc92b61f03290fb381", + "72d353994b49d3e03153929a1e4d4f188ee58ab9e72ee8e512f29bc773913819ce057ddd7002c0433ee0a16114e3d156dd2c4a7e80ee53378b8670f23e33ef56", + "c70ef9bfd775d408176737a0736d68517ce1aaad7e81a93c8c1ed967ea214f56c8a377b1763e676615b60f3988241eae6eab9685a5124929d28188f29eab06f7", + "c230f0802679cb33822ef8b3b21bf7a9a28942092901d7dac3760300831026cf354c9232df3e084d9903130c601f63c1f4a4a4b8106e468cd443bbe5a734f45f", + "6f43094cafb5ebf1f7a4937ec50f56a4c9da303cbb55ac1f27f1f1976cd96beda9464f0e7b9c54620b8a9fba983164b8be3578425a024f5fe199c36356b88972", + "3745273f4c38225db2337381871a0c6aafd3af9b018c88aa02025850a5dc3a42a1a3e03e56cbf1b0876d63a441f1d2856a39b8801eb5af325201c415d65e97fe", + "c50c44cca3ec3edaae779a7e179450ebdda2f97067c690aa6c5a4ac7c30139bb27c0df4db3220e63cb110d64f37ffe078db72653e2daacf93ae3f0a2d1a7eb2e", + "8aef263e385cbc61e19b28914243262af5afe8726af3ce39a79c27028cf3ecd3f8d2dfd9cfc9ad91b58f6f20778fd5f02894a3d91c7d57d1e4b866a7f364b6be", + "28696141de6e2d9bcb3235578a66166c1448d3e905a1b482d423be4bc5369bc8c74dae0acc9cc123e1d8ddce9f97917e8c019c552da32d39d2219b9abf0fa8c8", + "2fb9eb2085830181903a9dafe3db428ee15be7662224efd643371fb25646aee716e531eca69b2bdc8233f1a8081fa43da1500302975a77f42fa592136710e9dc", + "66f9a7143f7a3314a669bf2e24bbb35014261d639f495b6c9c1f104fe8e320aca60d4550d69d52edbd5a3cdeb4014ae65b1d87aa770b69ae5c15f4330b0b0ad8", + "f4c4dd1d594c3565e3e25ca43dad82f62abea4835ed4cd811bcd975e46279828d44d4c62c3679f1b7f7b9dd4571d7b49557347b8c5460cbdc1bef690fb2a08c0", + "8f1dc9649c3a84551f8f6e91cac68242a43b1f8f328ee92280257387fa7559aa6db12e4aeadc2d26099178749c6864b357f3f83b2fb3efa8d2a8db056bed6bcc", + "3139c1a7f97afd1675d460ebbc07f2728aa150df849624511ee04b743ba0a833092f18c12dc91b4dd243f333402f59fe28abdbbbae301e7b659c7a26d5c0f979", + "06f94a2996158a819fe34c40de3cf0379fd9fb85b3e363ba3926a0e7d960e3f4c2e0c70c7ce0ccb2a64fc29869f6e7ab12bd4d3f14fce943279027e785fb5c29", + "c29c399ef3eee8961e87565c1ce263925fc3d0ce267d13e48dd9e732ee67b0f69fad56401b0f10fcaac119201046cca28c5b14abdea3212ae65562f7f138db3d", + "4cec4c9df52eef05c3f6faaa9791bc7445937183224ecc37a1e58d0132d35617531d7e795f52af7b1eb9d147de1292d345fe341823f8e6bc1e5badca5c656108", + "898bfbae93b3e18d00697eab7d9704fa36ec339d076131cefdf30edbe8d9cc81c3a80b129659b163a323bab9793d4feed92d54dae966c77529764a09be88db45", + "ee9bd0469d3aaf4f14035be48a2c3b84d9b4b1fff1d945e1f1c1d38980a951be197b25fe22c731f20aeacc930ba9c4a1f4762227617ad350fdabb4e80273a0f4", + "3d4d3113300581cd96acbf091c3d0f3c310138cd6979e6026cde623e2dd1b24d4a8638bed1073344783ad0649cc6305ccec04beb49f31c633088a99b65130267", + "95c0591ad91f921ac7be6d9ce37e0663ed8011c1cfd6d0162a5572e94368bac02024485e6a39854aa46fe38e97d6c6b1947cd272d86b06bb5b2f78b9b68d559d", + "227b79ded368153bf46c0a3ca978bfdbef31f3024a5665842468490b0ff748ae04e7832ed4c9f49de9b1706709d623e5c8c15e3caecae8d5e433430ff72f20eb", + "5d34f3952f0105eef88ae8b64c6ce95ebfade0e02c69b08762a8712d2e4911ad3f941fc4034dc9b2e479fdbcd279b902faf5d838bb2e0c6495d372b5b7029813", + "7f939bf8353abce49e77f14f3750af20b7b03902e1a1e7fb6aaf76d0259cd401a83190f15640e74f3e6c5a90e839c7821f6474757f75c7bf9002084ddc7a62dc", + "062b61a2f9a33a71d7d0a06119644c70b0716a504de7e5e1be49bd7b86e7ed6817714f9f0fc313d06129597e9a2235ec8521de36f7290a90ccfc1ffa6d0aee29", + "f29e01eeae64311eb7f1c6422f946bf7bea36379523e7b2bbaba7d1d34a22d5ea5f1c5a09d5ce1fe682cced9a4798d1a05b46cd72dff5c1b355440b2a2d476bc", + "ec38cd3bbab3ef35d7cb6d5c914298351d8a9dc97fcee051a8a02f58e3ed6184d0b7810a5615411ab1b95209c3c810114fdeb22452084e77f3f847c6dbaafe16", + "c2aef5e0ca43e82641565b8cb943aa8ba53550caef793b6532fafad94b816082f0113a3ea2f63608ab40437ecc0f0229cb8fa224dcf1c478a67d9b64162b92d1", + "15f534efff7105cd1c254d074e27d5898b89313b7d366dc2d7d87113fa7d53aae13f6dba487ad8103d5e854c91fdb6e1e74b2ef6d1431769c30767dde067a35c", + "89acbca0b169897a0a2714c2df8c95b5b79cb69390142b7d6018bb3e3076b099b79a964152a9d912b1b86412b7e372e9cecad7f25d4cbab8a317be36492a67d7", + "e3c0739190ed849c9c962fd9dbb55e207e624fcac1eb417691515499eea8d8267b7e8f1287a63633af5011fde8c4ddf55bfdf722edf88831414f2cfaed59cb9a", + "8d6cf87c08380d2d1506eee46fd4222d21d8c04e585fbfd08269c98f702833a156326a0724656400ee09351d57b440175e2a5de93cc5f80db6daf83576cf75fa", + "da24bede383666d563eeed37f6319baf20d5c75d1635a6ba5ef4cfa1ac95487e96f8c08af600aab87c986ebad49fc70a58b4890b9c876e091016daf49e1d322e", + "f9d1d1b1e87ea7ae753a029750cc1cf3d0157d41805e245c5617bb934e732f0ae3180b78e05bfe76c7c3051e3e3ac78b9b50c05142657e1e03215d6ec7bfd0fc", + "11b7bc1668032048aa43343de476395e814bbbc223678db951a1b03a021efac948cfbe215f97fe9a72a2f6bc039e3956bfa417c1a9f10d6d7ba5d3d32ff323e5", + "b8d9000e4fc2b066edb91afee8e7eb0f24e3a201db8b6793c0608581e628ed0bcc4e5aa6787992a4bcc44e288093e63ee83abd0bc3ec6d0934a674a4da13838a", + "ce325e294f9b6719d6b61278276ae06a2564c03bb0b783fafe785bdf89c7d5acd83e78756d301b445699024eaeb77b54d477336ec2a4f332f2b3f88765ddb0c3", + "29acc30e9603ae2fccf90bf97e6cc463ebe28c1b2f9b4b765e70537c25c702a29dcbfbf14c99c54345ba2b51f17b77b5f15db92bbad8fa95c471f5d070a137cc", + "3379cbaae562a87b4c0425550ffdd6bfe1203f0d666cc7ea095be407a5dfe61ee91441cd5154b3e53b4f5fb31ad4c7a9ad5c7af4ae679aa51a54003a54ca6b2d", + "3095a349d245708c7cf550118703d7302c27b60af5d4e67fc978f8a4e60953c7a04f92fcf41aee64321ccb707a895851552b1e37b00bc5e6b72fa5bcef9e3fff", + "07262d738b09321f4dbccec4bb26f48cb0f0ed246ce0b31b9a6e7bc683049f1f3e5545f28ce932dd985c5ab0f43bd6de0770560af329065ed2e49d34624c2cbb", + "b6405eca8ee3316c87061cc6ec18dba53e6c250c63ba1f3bae9e55dd3498036af08cd272aa24d713c6020d77ab2f3919af1a32f307420618ab97e73953994fb4", + "7ee682f63148ee45f6e5315da81e5c6e557c2c34641fc509c7a5701088c38a74756168e2cd8d351e88fd1a451f360a01f5b2580f9b5a2e8cfc138f3dd59a3ffc", + "1d263c179d6b268f6fa016f3a4f29e943891125ed8593c81256059f5a7b44af2dcb2030d175c00e62ecaf7ee96682aa07ab20a611024a28532b1c25b86657902", + "106d132cbdb4cd2597812846e2bc1bf732fec5f0a5f65dbb39ec4e6dc64ab2ce6d24630d0f15a805c3540025d84afa98e36703c3dbee713e72dde8465bc1be7e", + "0e79968226650667a8d862ea8da4891af56a4e3a8b6d1750e394f0dea76d640d85077bcec2cc86886e506751b4f6a5838f7f0b5fef765d9dc90dcdcbaf079f08", + "521156a82ab0c4e566e5844d5e31ad9aaf144bbd5a464fdca34dbd5717e8ff711d3ffebbfa085d67fe996a34f6d3e4e60b1396bf4b1610c263bdbb834d560816", + "1aba88befc55bc25efbce02db8b9933e46f57661baeabeb21cc2574d2a518a3cba5dc5a38e49713440b25f9c744e75f6b85c9d8f4681f676160f6105357b8406", + "5a9949fcb2c473cda968ac1b5d08566dc2d816d960f57e63b898fa701cf8ebd3f59b124d95bfbbedc5f1cf0e17d5eaed0c02c50b69d8a402cabcca4433b51fd4", + "b0cead09807c672af2eb2b0f06dde46cf5370e15a4096b1a7d7cbb36ec31c205fbefca00b7a4162fa89fb4fb3eb78d79770c23f44e7206664ce3cd931c291e5d", + "bb6664931ec97044e45b2ae420ae1c551a8874bc937d08e969399c3964ebdba8346cdd5d09caafe4c28ba7ec788191ceca65ddd6f95f18583e040d0f30d0364d", + "65bc770a5faa3792369803683e844b0be7ee96f29f6d6a35568006bd5590f9a4ef639b7a8061c7b0424b66b60ac34af3119905f33a9d8c3ae18382ca9b689900", + "ea9b4dca333336aaf839a45c6eaa48b8cb4c7ddabffea4f643d6357ea6628a480a5b45f2b052c1b07d1fedca918b6f1139d80f74c24510dcbaa4be70eacc1b06", + "e6342fb4a780ad975d0e24bce149989b91d360557e87994f6b457b895575cc02d0c15bad3ce7577f4c63927ff13f3e381ff7e72bdbe745324844a9d27e3f1c01", + "3e209c9b33e8e461178ab46b1c64b49a07fb745f1c8bc95fbfb94c6b87c69516651b264ef980937fad41238b91ddc011a5dd777c7efd4494b4b6ecd3a9c22ac0", + "fd6a3d5b1875d80486d6e69694a56dbb04a99a4d051f15db2689776ba1c4882e6d462a603b7015dc9f4b7450f05394303b8652cfb404a266962c41bae6e18a94", + "951e27517e6bad9e4195fc8671dee3e7e9be69cee1422cb9fecfce0dba875f7b310b93ee3a3d558f941f635f668ff832d2c1d033c5e2f0997e4c66f147344e02", + "8eba2f874f1ae84041903c7c4253c82292530fc8509550bfdc34c95c7e2889d5650b0ad8cb988e5c4894cb87fbfbb19612ea93ccc4c5cad17158b9763464b492", + "16f712eaa1b7c6354719a8e7dbdfaf55e4063a4d277d947550019b38dfb564830911057d50506136e2394c3b28945cc964967d54e3000c2181626cfb9b73efd2", + "c39639e7d5c7fb8cdd0fd3e6a52096039437122f21c78f1679cea9d78a734c56ecbeb28654b4f18e342c331f6f7229ec4b4bc281b2d80a6eb50043f31796c88c", + "72d081af99f8a173dcc9a0ac4eb3557405639a29084b54a40172912a2f8a395129d5536f0918e902f9e8fa6000995f4168ddc5f893011be6a0dbc9b8a1a3f5bb", + "c11aa81e5efd24d5fc27ee586cfd8847fbb0e27601ccece5ecca0198e3c7765393bb74457c7e7a27eb9170350e1fb53857177506be3e762cc0f14d8c3afe9077", + "c28f2150b452e6c0c424bcde6f8d72007f9310fed7f2f87de0dbb64f4479d6c1441ba66f44b2accee61609177ed340128b407ecec7c64bbe50d63d22d8627727", + "f63d88122877ec30b8c8b00d22e89000a966426112bd44166e2f525b769ccbe9b286d437a0129130dde1a86c43e04bedb594e671d98283afe64ce331de9828fd", + "348b0532880b88a6614a8d7408c3f913357fbb60e995c60205be9139e74998aede7f4581e42f6b52698f7fa1219708c14498067fd1e09502de83a77dd281150c", + "5133dc8bef725359dff59792d85eaf75b7e1dcd1978b01c35b1b85fcebc63388ad99a17b6346a217dc1a9622ebd122ecf6913c4d31a6b52a695b86af00d741a0", + "2753c4c0e98ecad806e88780ec27fccd0f5c1ab547f9e4bf1659d192c23aa2cc971b58b6802580baef8adc3b776ef7086b2545c2987f348ee3719cdef258c403", + "b1663573ce4b9d8caefc865012f3e39714b9898a5da6ce17c25a6a47931a9ddb9bbe98adaa553beed436e89578455416c2a52a525cf2862b8d1d49a2531b7391", + "64f58bd6bfc856f5e873b2a2956ea0eda0d6db0da39c8c7fc67c9f9feefcff3072cdf9e6ea37f69a44f0c61aa0da3693c2db5b54960c0281a088151db42b11e8", + "0764c7be28125d9065c4b98a69d60aede703547c66a12e17e1c618994132f5ef82482c1e3fe3146cc65376cc109f0138ed9a80e49f1f3c7d610d2f2432f20605", + "f748784398a2ff03ebeb07e155e66116a839741a336e32da71ec696001f0ad1b25cd48c69cfca7265eca1dd71904a0ce748ac4124f3571076dfa7116a9cf00e9", + "3f0dbc0186bceb6b785ba78d2a2a013c910be157bdaffae81bb6663b1a73722f7f1228795f3ecada87cf6ef0078474af73f31eca0cc200ed975b6893f761cb6d", + "d4762cd4599876ca75b2b8fe249944dbd27ace741fdab93616cbc6e425460feb51d4e7adcc38180e7fc47c89024a7f56191adb878dfde4ead62223f5a2610efe", + "cd36b3d5b4c91b90fcbba79513cfee1907d8645a162afd0cd4cf4192d4a5f4c892183a8eacdb2b6b6a9d9aa8c11ac1b261b380dbee24ca468f1bfd043c58eefe", + "98593452281661a53c48a9d8cd790826c1a1ce567738053d0bee4a91a3d5bd92eefdbabebe3204f2031ca5f781bda99ef5d8ae56e5b04a9e1ecd21b0eb05d3e1", + "771f57dd2775ccdab55921d3e8e30ccf484d61fe1c1b9c2ae819d0fb2a12fab9be70c4a7a138da84e8280435daade5bbe66af0836a154f817fb17f3397e725a3", + "c60897c6f828e21f16fbb5f15b323f87b6c8955eabf1d38061f707f608abdd993fac3070633e286cf8339ce295dd352df4b4b40b2f29da1dd50b3a05d079e6bb", + "8210cd2c2d3b135c2cf07fa0d1433cd771f325d075c6469d9c7f1ba0943cd4ab09808cabf4acb9ce5bb88b498929b4b847f681ad2c490d042db2aec94214b06b", + "1d4edfffd8fd80f7e4107840fa3aa31e32598491e4af7013c197a65b7f36dd3ac4b478456111cd4309d9243510782fa31b7c4c95fa951520d020eb7e5c36e4ef", + "af8e6e91fab46ce4873e1a50a8ef448cc29121f7f74deef34a71ef89cc00d9274bc6c2454bbb3230d8b2ec94c62b1dec85f3593bfa30ea6f7a44d7c09465a253", + "29fd384ed4906f2d13aa9fe7af905990938bed807f1832454a372ab412eea1f5625a1fcc9ac8343b7c67c5aba6e0b1cc4644654913692c6b39eb9187ceacd3ec", + "a268c7885d9874a51c44dffed8ea53e94f78456e0b2ed99ff5a3924760813826d960a15edbedbb5de5226ba4b074e71b05c55b9756bb79e55c02754c2c7b6c8a", + "0cf8545488d56a86817cd7ecb10f7116b7ea530a45b6ea497b6c72c997e09e3d0da8698f46bb006fc977c2cd3d1177463ac9057fdd1662c85d0c126443c10473", + "b39614268fdd8781515e2cfebf89b4d5402bab10c226e6344e6b9ae000fb0d6c79cb2f3ec80e80eaeb1980d2f8698916bd2e9f747236655116649cd3ca23a837", + "74bef092fc6f1e5dba3663a3fb003b2a5ba257496536d99f62b9d73f8f9eb3ce9ff3eec709eb883655ec9eb896b9128f2afc89cf7d1ab58a72f4a3bf034d2b4a", + "3a988d38d75611f3ef38b8774980b33e573b6c57bee0469ba5eed9b44f29945e7347967fba2c162e1c3be7f310f2f75ee2381e7bfd6b3f0baea8d95dfb1dafb1", + "58aedfce6f67ddc85a28c992f1c0bd0969f041e66f1ee88020a125cbfcfebcd61709c9c4eba192c15e69f020d462486019fa8dea0cd7a42921a19d2fe546d43d", + "9347bd291473e6b4e368437b8e561e065f649a6d8ada479ad09b1999a8f26b91cf6120fd3bfe014e83f23acfa4c0ad7b3712b2c3c0733270663112ccd9285cd9", + "b32163e7c5dbb5f51fdc11d2eac875efbbcb7e7699090a7e7ff8a8d50795af5d74d9ff98543ef8cdf89ac13d0485278756e0ef00c817745661e1d59fe38e7537", + "1085d78307b1c4b008c57a2e7e5b234658a0a82e4ff1e4aaac72b312fda0fe27d233bc5b10e9cc17fdc7697b540c7d95eb215a19a1a0e20e1abfa126efd568c7", + "4e5c734c7dde011d83eac2b7347b373594f92d7091b9ca34cb9c6f39bdf5a8d2f134379e16d822f6522170ccf2ddd55c84b9e6c64fc927ac4cf8dfb2a17701f2", + "695d83bd990a1117b3d0ce06cc888027d12a054c2677fd82f0d4fbfc93575523e7991a5e35a3752e9b70ce62992e268a877744cdd435f5f130869c9a2074b338", + "a6213743568e3b3158b9184301f3690847554c68457cb40fc9a4b8cfd8d4a118c301a07737aeda0f929c68913c5f51c80394f53bff1c3e83b2e40ca97eba9e15", + "d444bfa2362a96df213d070e33fa841f51334e4e76866b8139e8af3bb3398be2dfaddcbc56b9146de9f68118dc5829e74b0c28d7711907b121f9161cb92b69a9", + "142709d62e28fcccd0af97fad0f8465b971e82201dc51070faa0372aa43e92484be1c1e73ba10906d5d1853db6a4106e0a7bf9800d373d6dee2d46d62ef2a461", +} + +var hashes2X = []string{ + "64", + "f457", + "e8c045", + "a74c6d0d", + "eb02ae482a", + "be65b981275e", + "8540ccd083a455", + "074a02fa58d7c7c0", + "da6da05e10db3022b6", + "542a5aae2f28f2c3b68c", + "ca3af2afc4afe891da78b1", + "e0f66b8dcebf4edc85f12c85", + "744224d383733b3fa2c53bfcf5", + "b09b653e85b72ef5cdf8fcfa95f3", + "dd51877f31f1cf7b9f68bbb09064a3", + "f5ebf68e7ebed6ad445ffc0c47e82650", + "ebdcfe03bcb7e21a9091202c5938c0a1bb", + "860fa5a72ff92efafc48a89df1632a4e2809", + "0d6d49daa26ae2818041108df3ce0a4db48c8d", + "e5d7e1bc5715f5ae991e4043e39533af5d53e47f", + "5232028a43b9d4dfa7f37439b49495926481ab8a29", + "c118803c922f9ae2397fb676a2ab7603dd9c29c21fe4", + "2af924f48b9bd7076bfd68794bba6402e2a7ae048de3ea", + "61255ac38231087c79ea1a0fa14538c26be1c851b6f318c0", + "f9712b8e42f0532162822f142cb946c40369f2f0e77b6b186e", + "76da0b89558df66f9b1e66a61d1e795b178ce77a359087793ff2", + "9036fd1eb32061bdecebc4a32aa524b343b8098a16768ee774d93c", + "f4ce5a05934e125d159678bea521f585574bcf9572629f155f63efcc", + "5e1c0d9fae56393445d3024d6b82692d1339f7b5936f68b062c691d3bf", + "538e35f3e11111d7c4bab69f83b30ade4f67addf1f45cdd2ac74bf299509", + "17572c4dcbb17faf8785f3bba9f6903895394352eae79b01ebd758377694cc", + "29f6bb55de7f8868e053176c878c9fe6c2055c4c5413b51ab0386c277fdbac75", + "bad026c8b2bd3d294907f2280a7145253ec2117d76e3800357be6d431b16366e41", + "386b7cb6e0fd4b27783125cbe80065af8eb9981fafc3ed18d8120863d972fa7427d9", + "06e8e6e26e756fff0b83b226dce974c21f970e44fb5b3e5bbada6e4b12f81cca666f48", + "2f9bd300244f5bc093ba6dcdb4a89fa29da22b1de9d2c9762af919b5fedf6998fbda305b", + "cf6bdcc46d788074511f9e8f0a4b86704365b2d3f98340b8db53920c385b959a38c8869ae7", + "1171e603e5cdeb4cda8fd7890222dd8390ede87b6f3284cac0f0d832d8250c9200715af7913d", + "bda7b2ad5d02bd35ffb009bdd72b7d7bc9c28b3a32f32b0ba31d6cbd3ee87c60b7b98c03404621", + "2001455324e748503aa08eff2fb2e52ae0170e81a6e9368ada054a36ca340fb779393fb045ac72b3", + "45f0761aefafbf87a68f9f1f801148d9bba52616ad5ee8e8ac9207e9846a782f487d5cca8b20355a18", + "3a7e05708be62f087f17b41ac9f20e4ef8115c5ab6d08e84d46af8c273fb46d3ce1aabebae5eea14e018", + "ea318da9d042ca337ccdfb2bee3e96ecb8f907876c8d143e8e44569178353c2e593e4a82c265931ba1dd79", + "e0f7c08f5bd712f87094b04528fadb283d83c9ceb82a3e39ec31c19a42a1a1c3bee5613b5640abe069b0d690", + "d35e63fb1f3f52ab8f7c6cd7c8247e9799042e53922fbaea808ab979fa0c096588cfea3009181d2f93002dfc11", + "b8b0ab69e3ae55a8699eb481dd665b6a2424c89bc6b7cca02d15fdf1b9854139cab49d34de498b50b2c7e8b910cf", + "fb65e3222a2950eae1701d4cdd4736266f65bf2c0d2e77968996eadb60ef74fb786f6234973a2524bdfe32d100aa0e", + "f28b4bb3a2e2c4d5c01a23ff134558559a2d3d704b75402983ee4e0f71d273ae056842c4153b18ee5c47e2bfa54313d4", + "7bb78794e58a53c3e4b1aeb161e756af051583d14e0a5a3205e094b7c9a8cf62d098fa9ea1db12f330a51ab9852c17f983", + "a879a8ebae4d0987789bcc58ec3448e35ba1fa1ee58c668d8295aba4eaeaf2762b053a677e25404f635a53037996974d418a", + "695865b353ec701ecc1cb38f3154489eed0d39829fc192bb68db286d20fa0a64235cde5639137819f7e99f86bd89afcef84a0f", + "a6ec25f369f71176952fb9b33305dc768589a6070463ee4c35996e1ced4964a865a5c3dc8f0d809eab71366450de702318e4834d", + "604749f7bfadb069a036409ffac5ba291fa05be8cba2f141554132f56d9bcb88d1ce12f2004cd3ade1aa66a26e6ef64e327514096d", + "daf9fa7dc2464a899533594e7916fc9bc585bd29dd60c930f3bfa78bc47f6c8439448043a45119fc9228c15bce5fd24f46baf9de736b", + "943ea5647a8666763084da6a6f15dcf0e8dc24f27fd0d9194805d25180fe3a6d98f4b2b5e0d6a04e9b41869817030f16ae975dd41fc35c", + "af4f73cbfc093760dfeb52d57ef45207bbd1a515f5523404e5d95a73c237d97ae65bd195b472de6d514c2c448b12fafc282166da132258e9", + "605f4ed72ed7f5046a342fe4cf6808100d4632e610d59f7ebb016e367d0ff0a95cf45b02c727ba71f147e95212f52046804d376c918cadd260", + "3750d8ab0a6b13f78e51d321dfd1aa801680e958de45b7b977d05732ee39f856b27cb2bcce8fbf3db6666d35e21244c2881fdcc27fbfea6b1672", + "8f1b929e80ab752b58abe9731b7b34eb61369536995abef1c0980d93903c1880da3637d367456895f0cb4769d6de3a979e38ed6f5f6ac4d48e9b32", + "d8469b7aa538b36cdc711a591d60dafecca22bd421973a70e2deef72f69d8014a6f0064eabfbebf5383cbb90f452c6e113d2110e4b1092c54a38b857", + "7d1f1ad2029f4880e1898af8289c23bc933a40863cc4ab697fead79c58b6b8e25b68cf5324579b0fe879fe7a12e6d03907f0140dfe7b29d33d6109ecf1", + "87a77aca6d551642288a0dff66078225ae39d288801607429d6725ca949eed7a6f199dd8a65523b4ee7cfa4187400e96597bfffc3e38ade0ae0ab88536a9", + "e101f43179d8e8546e5ce6a96d7556b7e6b9d4a7d00e7aade5579d085d527ce34a9329551ebcaf6ba946949bbe38e30a62ae344c1950b4bde55306b3bac432", + "4324561d76c370ef35ac36a4adf8f3773a50d86504bd284f71f7ce9e2bc4c1f1d34a7fb2d67561d101955d448b67577eb30dfee96a95c7f921ef53e20be8bc44", + "78f0ed6e220b3da3cc9381563b2f72c8dc830cb0f39a48c6ae479a6a78dcfa94002631dec467e9e9b47cc8f0887eb680e340aec3ec009d4a33d241533c76c8ca8c", + "9f6589c31a472e0a736f4eb22b6c70a9d332cc15304ccb66a6b97cd051b6ed82f8990e1d9bee2e4bb1c3c45e550ae0e7b96e93ae23f2fb8f63b309131e72b36cba6a", + "c138077ee4ed3d7ffa85ba851dfdf6e9843fc1dc00889d117237bfaad9aa757192f73556b959f98e6d24886ce48869f2a01a48c371785f12b6484eb2078f08c22066e1", + "f83e7c9e0954a500576ea1fc90a3db2cbd7994eaef647dab5b34e88ab9dc0b47addbc807b21c8e6dd3d0bd357f008471d4f3e0abb18450e1d4919e03a34545b9643f870e", + "3277a11f2628544fc66f50428f1ad56bcba6ee36ba2ca6ecdf7e255effc0c30235c039d13e01f04cf1efe95b5c2033ab72adda30994b62f2851d17c9920eadca9a251752dc", + "c2a834281a06fe7b730d3a03f90761daf02714c066e33fc07e1f59ac801ec2f4433486b5a2da8faa51a0cf3c34e29b2960cd0013378938dbd47c3a3d12d70db01d7d06c3e91e", + "47680182924a51cabe142a6175c9253e8ba7ea579ece8d9bcb78b1e9ca00db844fa08abcf41702bd758ee2c608d9612fed50e85854469cb4ef3038acf1e35b6ba4390561d8ae82", + "cec45830cd71869e83b109a99a3cd7d935f83a95de7c582f3adbd34e4938fa2f3f922f52f14f169c38cc6618d3f306a8a4d607b345b8a9c48017136fbf825aecf7b620e85f837fae", + "46fb53c70ab105079d5d78dc60eaa30d938f26e4d0b9df122e21ec85deda94744c1daf8038b8a6652d1ff3e7e15376f5abd30e564784a999f665078340d66b0e939e0c2ef03f9c08bb", + "7b0dcb52791a170cc52f2e8b95d8956f325c3751d3ef3b2b83b41d82d4496b46228a750d02b71a96012e56b0720949ca77dc68be9b1ef1ad6d6a5ceb86bf565cb972279039e209dddcdc", + "7153fd43e6b05f5e1a4401e0fef954a737ed142ec2f60bc4daeef9ce73ea1b40a0fcaf1a1e03a3513f930dd5335723632f59f7297fe3a98b68e125eadf478eb045ed9fc4ee566d13f537f5", + "c7f569c79c801dab50e9d9ca6542f25774b3841e49c83efe0b89109f569509ce7887bc0d2b57b50320eb81fab9017f16c4c870e59edb6c26620d93748500231d70a36f48a7c60747ca2d5986", + "0a81e0c547648595adca65623ce783411aac7f7d30c3ad269efafab288e7186f6895261972f5137877669c550f34f5128850ebb50e1884814ea1055ee29a866afd04b2087abed02d9592573428", + "6a7b6769e1f1c95314b0c7fe77013567891bd23416374f23e4f43e27bc4c55cfada13b53b1581948e07fb96a50676baa2756db0988077b0f27d36ac088e0ff0fe72eda1e8eb4b8facff3218d9af0", + "a399474595cb1ccab6107f18e80f03b1707745c7bf769fc9f260094dc9f8bc6fe09271cb0b131ebb2acd073de4a6521c8368e664278be86be216d1622393f23435fae4fbc6a2e7c961282a777c2d75", + "4f0fc590b2755a515ae6b46e9628092369d9c8e589e3239320639aa8f7aa44f8111c7c4b3fdbe6e55e036fbf5ebc9c0aa87a4e66851c11e86f6cbf0bd9eb1c98a378c7a7d3af900f55ee108b59bc9e5c", + "ed96a046f08dd675107331d267379c6fce3c352a9f8d7b243008a74cb4e9410836afaabe871dab6038ca94ce5f6d41fa922ce08aba58169f94cfc86d9f688f396abd24c11a6a9b0830572105a477c33e92", + "379955f539abf0eb2972ee99ed9546c4bbee363403991833005dc27904c271ef22a799bc32cb39f08d2e4ba6717d55153feb692d7c5efae70890bf29d96df02333c7b05ccc314e4835b018fec9141a82c745", + "e16cc8d41b96547ede0d0cf4d908c5fa393399daa4a9696e76a4c1f6a2a9fef70f17fb53551a8145ed88f18db8fe780a079d94732437023f7c1d1849ef69ad536a76204239e8ba5d97e507c36c7d042f87fe0e", + "a81de50750ece3f84536728f227208bf01ec5b7721579d007de72c88ee20663318332efe5bc7c09ad1fa8342be51f0609046ccf760a7957a7d8dc88941adb93666a4521ebe76618e5ddc2dd3261493d400b50073", + "b72c5fb7c7f60d243928fa41a2d711157b96aef290185c64b4de3dcfa3d644da67a8f37c2ac55caad79ec695a473e8b481f658c497edb8a191526592b11a412282d2a4010c90ef4647bd6ce745ebc9244a71d4876b", + "9550703877079c90e200e830f277b605624954c549e729c359ee01ee2b07741ecc4255cb37f96682dafcdbaade1063e2c5ccbd1918fb669926a67744101fb6de3ac016be4c74165a1e5a696b704ba2ebf4a953d44b95", + "a17eb44d4de502dc04a80d5a5e9507d17f27c96467f24c79b06bc98a4c410741d4ac2db98ec02c2a976d788531f1a4451b6c6204cef6dae1b6ebbcd0bde23e6fffb02754043c8fd3c783d90a670b16879ce68b5554fe1c", + "41d3ea1eaba5be4a206732dbb5b70b79b66a6e5908795ad4fb7cf9e67efb13f06fef8f90acb080ce082aadec6a1b543af759ab63fa6f1d3941186482b0c2b312f1151ea8386253a13ed3708093279b8eb04185636488b226", + "5e7cdd8373dc42a243c96013cd29df9283b5f28bb50453a903c85e2ce57f35861bf93f03029072b70dac0804e7d51fd0c578c8d9fa619f1e9ce3d8044f65d55634dba611280c1d5cfb59c836a595c803124f696b07ddfac718", + "26a14c4aa168907cb5de0d12a82e1373a128fb21f2ed11feba108b1bebce934ad63ed89f4ed7ea5e0bc8846e4fc10142f82de0bebd39d68f7874f615c3a9c896bab34190e85df05aaa316e14820b5e478d838fa89dfc94a7fc1e", + "0211dfc3c35881adc170e4ba6daab1b702dff88933db9a6829a76b8f4a7c2a6d658117132a974f0a0b3a38ceea1efc2488da21905345909e1d859921dc2b5054f09bce8eeb91fa2fc6d048ce00b9cd655e6aafbdaa3a2f19270a16", + "ddf015b01b68c4f5f72c3145d54049867d99ee6bef24282abf0eecdb506e295bacf8f23ffa65a4cd891f76a046b9dd82cae43a8d01e18a8dff3b50aeb92672be69d7c087ec1fa2d3b2a39196ea5b49b7baede37a586fea71aded587f", + "6ee721f71ca4dd5c9ce7873c5c04c6ce76a2c824b984251c15535afc96adc9a4d48ca314bfeb6b8ee65092f14cf2a7ca9614e1dcf24c2a7f0f0c11207d3d8aed4af92873b56e8b9ba2fbd659c3f4ca90fa24f113f74a37181bf0fdf758", + "689bd150e65ac123612524f720f54def78c095eaab8a87b8bcc72b443408e3227f5c8e2bd5af9bcac684d497bc3e41b7a022c28fb5458b95e8dfa2e8caccde0492936ff1902476bb7b4ef2125b19aca2cd3384d922d9f36dddbcd96ae0d6", + "3a3c0ef066fa4390ec76ad6be1dc9c31ddf45fef43fbfa1f49b439caa2eb9f3042253a9853e96a9cf86b4f873785a5d2c5d3b05f6501bc876e09031188e05f48937bf3c9b667d14800db62437590b84ce96aa70bb5141ee2ea41b55a6fd944", + "741ce384e5e0edaebb136701ce38b3d33215415197758ae81235307a4115777d4dab23891db530c6d28f63a957428391421f742789a0e04c99c828373d9903b64dd57f26b3a38b67df829ae243feef731ead0abfca049924667fdec49d40f665", + "a513f450d66cd5a48a115aee862c65b26e836f35a5eb6894a80519e2cd96cc4cad8ed7eb922b4fc9bbc55c973089d627b1da9c3a95f6c019ef1d47143cc545b15e4244424be28199c51a5efc7234dcd94e72d229897c392af85f523c2633427825", + "71f1554d2d49bb7bd9e62e71fa049fb54a2c097032f61ebda669b3e1d4593962e47fc62a0ab5d85706aebd6a2f9a192c88aa1ee2f6a46710cf4af6d3c25b7e68ad5c3db23ac009c8f13625ff85dc8e50a9a1b2682d3329330b973ec8cbb7bb73b2bd", + "167cc1067bc08a8d2c1a0c10041ebe1fc327b37043f6bd8f1c63569e9d36ded58519e66b162f34b6d8f1107ef1e3de199d97b36b44141a1fc4f49b883f40507ff11f909a017869dc8a2357fc7336ae68703d25f75710b0ff5f9765321c0fa53a51675c", + "cb859b35dc70e264efaad2a809fea1e71cd4a3f924be3b5a13f8687a1166b538c40b2ad51d5c3e47b0de482497382673140f547068ff0b3b0fb7501209e1bf36082509ae85f60bb98fd02ac50d883a1a8daa704952d83c1f6da60c9624bc7c99912930bf", + "afb1f0c6b7125b04fa2578dd40f60cb411b35ebc7026c702e25b3f0ae3d4695d44cfdf37cb755691dd9c365edadf21ee44245620e6a24d4c2497135b37cd7ac67e3bd0aaee9f63f107746f9b88859ea902bc7d6895406aa2161f480cad56327d0a5bba2836", + "13e9c0522587460d90c7cb354604de8f1bf850e75b4b176bda92862d35ec810861f7d5e7ff6ba9302f2c2c8642ff8b7776a2f53665790f570fcef3cac069a90d50db42227331c4affb33d6c040d75b9aeafc9086eb83ced38bb02c759e95ba08c92b17031288", + "0549812d62d3ed497307673a4806a21060987a4dbbf43d352b9b170a29240954cf04bc3e1e250476e6800b79e843a8bd8253b7d743de01ab336e978d4bea384eaff700ce020691647411b10a60acacb6f8837fb08ad666b8dcc9eaa87ccb42aef6914a3f3bc30a", + "3a263efbe1f2d463f20526e1d0fd735035fd3f808925f058b32c4d8788aeeab9b8ce233b3c34894731cd73361f465bd350395aebcabd2fb63010298ca025d849c1fa3cd573309b74d7f824bbfe383f09db24bcc565f636b877333206a6ad70815c3bef5574c5fc1c", + "3c6a7d8a84ef7e3eaa812fc1eb8e85105467230d2c9e4562edbfd808f4d1ac15d16b786cc6a02959c2bc17149c2ce74c6f85ee5ef22a8a96b9be1f197cffd214c1ab02a06a9227f37cd432579f8c28ff2b5ac91cca8ffe6240932739d56788c354e92c591e1dd76499", + "b571859294b02af17541a0b5e899a5f67d6f5e36d38255bc417486e69240db56b09cf2607fbf4f95d085a779358a8a8b41f36503438c1860c8f361ce0f2783a08b21bd7232b50ca6d35428335272a5c05b436b2631d8d5c84d60e8040083768ce56a250727fb0579dd5c", + "98ee1b7269d2a0dd490ca38d447279870ea55326571a1b430adbb2cf65c492131136f504145df3ab113a13abfb72c33663266b8bc9c458db4bf5d7ef03e1d3b8a99d5de0c024be8fabc8dc4f5dac82a0342d8ed65c329e7018d6997e69e29a01350516c86beaf153da65ac", + "41c5c95f088df320d35269e5bf86d10248f17aec6776f0fe653f1c356aae409788c938befeb67c86d1c8870e8099ca0ce61a80fbb5a6654c44529368f70fc9b9c2f912f5092047d0ffc339577d24142300e34948e086f62e23ecaca410d24f8a36b5c8c5a80e0926bc8aa16a", + "9f93c41f533b2a82a4df893c78faaaa793c1506974ba2a604cd33101713ca4adfd30819ffd8403402b8d40aff78106f3357f3e2c24312c0d3603a17184d7b999fc9908d14d50192aebabd90d05073da7af4be37dd3d81c90acc80e8333df546f17ab6874f1ec204392d1c0571e", + "3da5207245ac270a915fc91cdb314e5a2577c4f8e269c4e701f0d7493ba716de79935918b917a2bd5db98050dbd1eb3894b65fac5abf13e075abebc011e651c03cafb6127147771a5c8418223e1548137a89206635c26ca9c235ccc108dc25cf846e4732444bd0c2782b197b262b", + "96011af3965bb941dc8f749932ea484eccb9ba94e34b39f24c1e80410f96ce1d4f6e0aa5be606def4f54301e930493d4b55d484d93ab9dd4dc2c9cfb79345363af31ad42f4bd1aa6c77b8afc9f0d551bef7570b13b927afe3e7ac4de7603a0876d5edb1ad9be05e9ee8b53941e8f59", + "51dbbf2a7ca224e524e3454fe82ddc901fafd2120fa8603bc343f129484e9600f688586e040566de0351d1693829045232d04ff31aa6b80125c763faab2a9b233313d931903dcfaba490538b06e4688a35886dc24cdd32a13875e6acf45454a8eb8a315ab95e608ad8b6a49aef0e299a", + "5a6a422529e22104681e8b18d64bc0463a45df19ae2633751c7aae412c250f8fb2cd5e1270d3d0cf009c8aa69688ccd4e2b6536f5747a5bc479b20c135bf4e89d33a26118705a614c6be7ecfe766932471ad4ba01c4f045b1abb5070f90ec78439a27a1788db9327d1c32f939e5fb1d5ba", + "5d26c983642093cb12ff0afabd87b7c56e211d01844ad6da3f623b9f20a0c968034299f2a65e6673530c5980a532beb831c7d0697d12760445986681076dfb6fae5f3a4d8f17a0db5008ce8619f566d2cfe4cf2a6d6f9c3664e3a48564a351c0b3c945c5ee24587521e4112c57e318be1b6a", + "52641dbc6e36be4d905d8d60311e303e8e859cc47901ce30d6f67f152343e3c4030e3a33463793c19effd81fb7c4d631a9479a7505a983a052b1e948ce093b30efa595fab3a00f4cef9a2f664ceeb07ec61719212d58966bca9f00a7d7a8cb4024cf6476bab7fbccee5fd4e7c3f5e2b2975aa2", + "a34ce135b37bf3db1c4aaa4878b4499bd2ee17b85578fcaf605d41e1826b45fdaa1b083d8235dc642787f11469a5493e36806504fe2a2063905e821475e2d5ee217057950370492f5024995e77b82aa51b4f5bd8ea24dc71e0a8a640b0592c0d80c24a726169cf0a10b40944747113d03b52708c", + "46b3cdf4946e15a5334fc3244d6680f5fc132afa67bf43bfade23d0c9e0ec64e7dab76faaeca1870c05f96b7d019411d8b0873d9fed04fa5057c039d5949a4d592827f619471359d6171691cfa8a5d7cb07ef2804f6ccad4821c56d4988bea7765f660f09ef87405f0a80bcf8559efa111f2a0b419", + "8b9fc21691477f11252fca050b121c5334eb4280aa11659e267297de1fec2b2294c7ccee9b59a149b9930b08bd320d3943130930a7d931b71d2f10234f4480c67f1de883d9894ada5ed5071660e221d78ae402f1f05af47761e13fec979f2671e3c63fb0ae7aa1327cf9b8313adab90794a52686bbc4", + "cd6598924ce847de7ff45b20ac940aa6292a8a99b56a74eddc24f2cfb45797188614a21d4e8867e23ff75afd7cd324248d58fcf1ddc73fbd115dfa8c09e62022fab540a59f87c989c12a86ded05130939f00cd2f3b512963dfe0289f0e54acad881c1027d2a0292138fdee902d67d9669c0ca1034a9456", + "594e1cd7337248704e691854af0fdb021067ddf7832b049ba7b684438c32b029eded2df2c89a6ff5f2f2c311522ae2dc6db5a815afc60637b15ec24ef9541f1550409db2a006da3affffe548a1eaee7bd114e9b805d0756c8e90c4dc33cb05226bc2b393b18d953f8730d4c7ae693159cdba758ad28964e2", + "1f0d292453f04406ada8be4c161b82e3cdd69099a8637659e0ee40b8f6da46005cfc6085db9804852decfbe9f7b4dda019a7112612895a144ed430a960c8b2f5458d3d56b7f427cee6358915aee7146278aed2a0296cdd929e4d21ef95a3adf8b7a6beba673cdccdbdcfb2474711732d972ad054b2dc64f38d", + "b65a72d4e1f9f9f75911cc46ad0806b9b18c87d105332a3fe183f45f063a746c892dc6c4b9181b1485b3e3a2cc3b453eba2d4c39d6905a774ed3fb755468beb190925ecd8e57ecb0d985125741650c6b6a1b2a3a50e93e3892c21d47ed5884eed83aa94e1602288f2f49fe286624de9d01fcb54433a0dc4ad70b", + "705ce0ffa469250782aff725248fc88fe98eb76659e8407edc1c4842c9867d61fe64fb86f74e980598b92bc213d06f337bd5654fc28643c7ba769a4c31563427543c00808b627a19c90d86c322f33566ce020121cc322229c3337943d46f68ef939d613dcef0077269f88151d6398b6b009abb763410b154ad76a3", + "7fa881ce87498440ab6af13854f0d851a7e0404de33896999a9b3292a5d2f5b3ad033530c558168fe5d2fdb9b89a2354c46cf32a0e612afc6c6485d789511bfef26800c74bf1a4cfbe30bda310d5f6029c3dccdedb6149e4971274e276dccfabd63bc4b9955e8303feb57f8a688db55ecb4b33d1f9fe1b3a8ba7ac32", + "23a98f71c01c0408ae16843dc03be7db0aeaf055f951709d4e0dfdf64fffbffaf900ee592ee10929648e56f6c1e9f5be5793f7df66453eb56502c7c56c0f0c88da77abc8fa371e434104627ef7c663c49f40998dbad63fa6c7aa4fac17ae138d8bbe081f9bd168cd33c1fbc92fa35ed687679f48a64b87db1fe5bae675", + "7b8970b6a33237e5a7bcb39272703edb92285c55842b30b9a48834b1b507cc02a6764739f2f7ee6ae02a7b715a1c455e59e8c77a1ae98abb10161853f1234d20da99016588cd8602d6b7ec7e177d4011edfa61e6b3766a3c6f8d6e9eac893c568903eb6e6aba9c4725774f6b4343b7acaa6c031593a36eef6c72806ff309", + "f7f4d328ba108b7b1de4443e889a985ed52f485f3ca4e0c246aa5526590cbed344e9f4fe53e4eea0e761c82324649206ca8c2b45152157d4115e68c818644b03b65bb47ad79f94d37cb03c1d953b74c2b8adfa0e1c418bda9c518ddcd7050e0f149044740a2b16479413b63fc13c36144f80c73687513dca761ba8642a8ae0", + "2d7dc80c19a1d12d5fe3963569547a5d1d3e821e6f06c5d5e2c09401f946c9f7e13cd019f2f9a878b62dd850453b6294b99ccaa068e542993524b0f63832d48e865be31e8ec1ee103c718340c904b32efb69170b67f038d50a3252794b1b4076c0620621ab3d91215d55ffea99f23d54e161a90d8d4902fda5931d9f6a27146a", + "77dff4c7ad30c954338c4b23639dae4b275086cbe654d401a2343528065e4c9f1f2eca22aa025d49ca823e76fdbb35df78b1e5075ff2c82b680bca385c6d57f7ea7d1030bb392527b25dd73e9eeff97bea397cf3b9dda0c817a9c870ed12c006cc054968c64000e0da874e9b7d7d621b0679866912243ea096c7b38a1344e98f74", + "83bed0d556798f2b419f7056e6d3ffada06e939b95a688d0ec8c6ac5ea45ab73a4cf01043e0a170766e21395f27ab4b78c435f5f0dfe6e93ab80df38610e41158429ddf20296f53a06a017723359fe22dc08b5da33f0800a4fe50118e8d7eab2f83a85cd764bf8a166903bd0e9dcfeeceba44ff4ca4439846458d31ea2bb564645d1", + "ea12cf5a113543e39504123036f15a5bafa9c555562469f99cd29996a4dfaaab2a34b00557ccf15f37fc0cc1b3be427e725f2cd952e50af7970dda9200cd5ce252b1f29c40067fea3027ed686190803b59d834179d1b8f5b55abe55ad174b2a1188f7753ec0ae2fc01316e7d498b68ee3598a0e9baaaa664a60f7fb4f90edbed494ad7", + "55266358332d8d9e68bd13432088beadf95833aab67a0eb3b10650414255f299e2670c3e1a5b2976159a46c72a7ce57d59b7be14c15798e09ed50fa312a431b0264d7a1396aa6168bde897e208ece53d2cfc83786113b1e6eac5e9bb98984abb6c8d64eebb991903254abc650c999bb9958a5d7937434b869bc940e21b9dc1cc8982f2ba", + "4d6104ded730aefe02873f4c741232c8234a6d66d85393aff57fbf56ba6347666988dfc4d58f3cc895a0da598822edeee4533d24ec0ee292fd5e1ad04898ffbc1ff4bef14dec220babcb0f28fffe32a6e2c28aaaac16442bf4feb02917d18bb3a415d84fa9358d5a9852688d846c92271911f934181c30f82434d915f93f155a1ffbf0b125", + "eb5f579a4c476af554aac11e5719d378549497e613b35a929d6f36bb8831d7a466aa76de9be24ebb55543f1c13924f64cfd648a5b3fa90387315c16174dbf1e9a183c196d9bb8f84af65f1f8212429aadc11ef2426d07d4716062b85c8d5d2dff8e21b9e62b7fa7dbd57d72633054b464fb28583a56ca13ccc5ddc74dae942492f31731e7046", + "ebddec3dcaf18063e45a76ebeac39af85a1adc2818881ccce48c106288f5988365cca2b4b1d7f037322da46840f42bebdcbc7193838d426e101087d8cea03aaff743d573eb4f4e9a71a2c884390769a6503874125d194bee8d46a3a0d5e4fcf28ff8465887d8e9df771d70157e75df3642b331d2778ceb32ceba868640171ab7a5d22eede1ee44", + "26d87ec70b57691e3bb359633d3ddba17f029d62cdfe977f5fd42274d79b444a32494d1c01e9f72d03cce78c806df96e93ea78da3a054209924ed765edc4d570f66168dc25ee3114e4017e387440349c8f0a94804761c3055f88e4fda2a49b860b1486a9609095f6250f268b6a4d1aecc03a505632ebf0b9dc22d0755a736faf7ad7000858b5864b", + "3880f5cc2d08fa70ef44b1f263fcf534d062a298c1bd5ee2eee8c3265806c4ce50b004f3a1fc1fa5b024aaac7f528c023c8181f67c6e1c357425dc4d573bd46b93a542afa3a19bdb140a2ce666e1a01f5c4d2dcd681fa9f5839b797813c394738d5ee4971386c12c7c117d17c7bec324b760aa30cda9ab2aa850284ba6fa97946f710f02449d1883c6", + "3317d2f452105dd3f4a96f9257af8285a80be58066b50f6f54bd633749b49f6ab9d57d45652d2ae852a2f6940cd5ec3159dd7f333358b12f502325df38843508faf7e246352d201280babd90b14fbf7722641c3601d0e458474439973c611bb5502fd0eb3078f87124ca7e1a016fcb6cfeff65f6a565985aca7122cfa8c5a11da0cb47797c5132333179", + "f2c5c955d0224e784a46b9125f8fef8a5e1271e145eb08bbbd07ca8e1cfc848cef14fa3b36221ac62006403dbb7f7d77958ccc54a8566c837858b809f3e310ace8ca682515bc655d2a397cab238a663b464d511f02dc5d033dad4cb5e0e519e94a54b62a3896e460ec70e5716b5921bf8396aa86a60123e6287e34570bb01bdc602e113670bf498af2ff10", + "180e275205691a83630cf4b0c7b80e6df8fad6ef1c23ba8013d2f09aef7abade1827f23af230de90676240b4b3b0673f8afdea0327330055041741f65560d90348de696d34ca80dfe8afae582fe4879d4594b80e9408fb53e800e01ca58552b905c365e7f1416e51c080f517d6bbd30e64ae1535d59decdc76c6624d737868f49f2f719da39ba1344d59eab9", + "c517a84e4631a7f65ace170d1e5c2fdb259841535d88da323e68c0883e6af7b041cfe05908815a5a9d1b14fa712c2c16fadcf1ca54d3aa954d411240df331b2aebdfb65aced84d0b8aace56ec0aa7c13ec7d75ca883b6bcf6db74c9e98463c484a8262684f29910373430651f90ecffe18b072170e61ee58de20e2a6ff67b3ab00fccbb80af943f20b56b98107", + "d1a56a5ee990e02b84b5862fde62f69ec07567be2d7ccb769a461c4989d11fdda6c945d942fb8b2da795ed97e43a5b7dbdde7f8fd2ff7154544336d5c50fb7380341e660d4898c7fbc39b2b782f28defac6873523c7c1de8e52c65e4395c686ba483c35a220b0416d46357a063fa4c33fa9c52d5c207a1304ae141c791e62ba6a7374ed922b8dd94079b72b69302", + "4720b88d6bfb1ab43958e26827730d852d9ec30173ebd0fe0d273edcece2e788558984cd9306fe5978086a5cb6d37975755d2a3daeb16f99a8a11544b8247a8b7ed5587afc5bea1daf85dcea5703c5905cf56ae7cc76408ccabb8fcc25cacc5ff456db3f62fa559c45b9c71505eb5073df1f10fc4c9060843f0cd68bbb4e8edfb48d0fd81d9c21e53b28a2aae4f7ba", + "f4639b511db9e092823d47d2947efacbaae0e5b912dec3b284d2350b9262f3a51796a0cd9f8bc5a65879d6578ec24a060e293100c2e12ad82d5b2a0e9d22965858030e7cdf2ab3562bfa8ac084c6e8237aa22f54b94c4e92d69f22169ced6c85a293f5e16bfc326153bf629cdd6393675c6627cd949cd367eef02e0f54779f4d5210197698e4754a5fe490a3a7521c1c", + "3d9e7a860a718565e3670c29079ce80e381969fea91017cfd5952e0d8a4a79bb08e2cd1e26161f30ee03a24891d1bfa8c212861b51618d07429fb48000ff87ef09c6fca526567777e9c076d58a642d5c521b1caa5fb0fb3a4b8982dc14a444732b72b239b8f01fc8ba8ee86b3013b5d3e98a92b2aeaecd4879fca5d5e9e0bd880dbfffa6f96f94f3998812aac6a714f331", + "4d9bf551d7fd531e7482e2ec875c0651b0bcc6caa738f7497befd11e67ae0e036c9d7ae4301cc3c7906f0d0e1ed4738753f414f9b3cd9b8a71176e325c4c74ce020680ecbfb146889597f5b40487e93f974cd866817fb9fb24c7c7c16177e6e120bfe349e83aa82ba40e59e917565788658a2b254f25cf99bc65070b3794cea2259eb10e42bb54852cba3110baa773dcd70c", + "b91f65ab5bc059bfa5b43b6ebae243b1c46826f3da061338b5af02b2da76bb5ebad2b426de3c3134a633499c7c36a120369727cb48a0c6cbab0acecdda137057159aa117a5d687c4286868f561a272e0c18966b2fec3e55d75abea818ce2d339e26adc005c2658493fe06271ad0cc33fcb25065e6a2a286af45a518aee5e2532f81ec9256f93ff2d0d41c9b9a2efdb1a2af899", + "736f6e387acb9acbee026a6080f8a9eb8dbb5d7c54ac7053ce75dd184b2cb7b942e22a3497419ddb3a04cf9e4eb9340a1a6f9474c06ee1dcfc8513979fee1fc4768087617fd424f4d65f54782c787a1d2de6efc81534343e855f20b3f3589027a5436201eee747d45b9b8375e4294d72ab6a52e04dfbb2914db92ee58f134b026527ed52d4f794459e02a43a17b0d51ea69bd7f3", + "9242d3eb31d26d923b99d66954cfade94f25a18912e6356810b63b971ae74bb53bc58b3c01424208ea1e0b1499936daea27e63d904f9ed65fdf69de40780a3027b2e89d94bdf214f585472613ce328f628f4f0d56217dfb53db5f7a07f54c8d71db16e27de7cdb8d23988837b49b65c12f1771d979e8b192c9f4a16b8d9fba917bcf74ce5a82aac2075608ba6c2d485fa59864b9de", + "5da68704f4b592d41f08aca08f62d85e2e2466e5f3be010315d11d113db674c4b98764a509a2f5aacc7ae72c9deff2bcc42810b47f64d429b35745b9efff0b18c58653461e968aaa3c2c7fc455bc5771a8f10cd184be831040df767201ab8d32cb9a58c89afbebecb524502c9b940c1b838f8361bbcde90d272715017f67609ea39b20fac985332d82daaa023999e3f8bfa5f3758bb8", + "71ea2af9c8ac2e5ae44a176662882e01027ca3cdb41ec2c6785606a07d7231cd4a2bded7155c2feef3d44d8fd42afa73265cef826f6e03aa761c5c51d5b1f129ddc27503ff50d9c2d748322df4b13dd5cdc7d46381528ab22b79b0049011e4d2e57fe2735e0d58d8d56e92c75dbeac8c76c4239d7f3f24fb56697593b3e4afa6671d5bbc96c079a1c154fe20212ade67b05d49ceaa7a84", + "1d133170582fa4bff59a21953ebbc01bc202d43cd79c083d1f5c02fa15a43a0f519e36acb710bdabac880f04bc003800641c2487930de9c03c0e0deb347fa815efca0a38c6c5de694db698743bc955581f6a945deec4ae988ef7cdf40498b77796ddea3fae0ea844891ab751c7ee20917c5a4af53cd4ebd82170078f41ada2795e6eea17593fa90cbf5290a1095e299fc7f507f360f187cd", + "5ec4ac45d48fc15c72471d795066bdf8e99a483d5fdd599511b9cdc408de7c0616491b73924d0266da34a495331a935c4b8884f57d7ad8cce4cbe586875aa52482215ed39d7626cce55d50349c7767981c8bd6890f132a196184247343566fc972b86fe3c5369d6a6519e9f07942f0522b77ad01c751dcf7defe31e471a0ec00963765dd8518144a3b8c3c978ad108056516a25dbe3092e73c", + "0d5e74b78290c689f2b3cfea45fc9b6a84c822639cd438a7f05c07c374adced42cdc12d2a9233a4ffe80307efc1ac13cb04300e165f8d90dd01c0ea955e7657332c6e86ad6b43e78ba4c13c675aed83192d8427866fb6484e6a3071b2369a46fba9005f31232da7ffec7952f831aaaddf63e225263531c2cf387f8cc14fa856c8795137142c3a52ffa69b8e30ebc88ce3bbc227597bcc8dddd89", + "a0fe36f983259921dc2fa7d89002b3066241d63bfc2448caf7e10522a35562be0bfedc3dce49cfce2e614a04d4c64cfc0ab898873a7fc26928dc1927c009d12f6f9b7a278205d3d0057604f4ac746f8b9287c3bc6b929832bf253b6586192ac43fdd29ba585dbd9059aab9c6ff6000a7867c67fec1457b733f6b620881166b8fed92bc8d84f0426002e7be7fcd6ee0abf3755e2babfe5636ca0b37", + "1d29b6d8eca793bb801becf90b7d7de215b17618ec32340da4bac707cdbb58b951d5036ec02e105d83b5960e2a72002d19b7fa8e1128cc7c5049ed1f76b82a59eac6ed09e56eb73d9ade38a6739f0e07155afa6ec0d9f5cf13c4b30f5f9a465b162a9c3ba04b5a0b3363c2a63f13f2a3b57c590ec6aa7f64f4dcf7f1582d0ca157eb3b3e53b20e306b1f24e9bda87397d413f01b453ceffeca1fb1e7", + "6a2860c110cd0fc5a19bcaafcd30762ee10242d34739638e716bd89fd537ea4dc630e6f85d1bd88a25ad3892ca554c232c9830bd56980c9f08d378d28f7fa6fa7df4fcbf6ad98b1adfff3ec1f63310e50f920c99a5200b8e64c2c2ca249399a149942261f737d5d72da949e914c024d57c4b639cb89990fed2b38a37e5bcd24d17ca12dfcd36ce04691fd03c32f6ed5de2a2191ed7c826375ba81f78d0", + "7132aa291ddc9210c60dbe7eb3c19f9053f2dd74742cf57fdc5df98312adbf4710a73245de4a0c3b24e21ab8b466a77ae29d15500d5142555ef3088cbccbe685ed9119a10755148f0b9f0dbcf02b2b9bcadc8517c88346ea4e78285e9cbab122f824cc18faf53b742a87c008bb6aa47eed8e1c8709b8c2b9adb4cc4f07fb423e5830a8e503ab4f7945a2a02ab0a019b65d4fd71dc364d07bdc6e637990e3", + "3e664da330f2c6007bff0d5101d88288aaacd3c07913c09e871cce16e55a39fde1ce4db6b8379977c46cce08983ca686778afe0a77a41baf447854b9aa286c398c2b83c95a127b053101b6799c1638e5efd67273b2618df6ec0b96d8d040e8c1ee01a99b9b5c8fe63fea2f749e6c90d31f6fae4e1469ac09884c4fe1a8539acb313f42c941224a0e79c059e18affc2bcb6724975c436f7bf949ebdd8aef51c", + "7a6ea63a271eb49470f5ce77519ed61ae9b2f1be07a96855726bc3df1d0723af3a703fdfc2e739c9d31d25814daf661a23558b50982e66ee37ad880f5c8f11c8130fac8a5d0250583700d5a324894fae6d61993f6bf9327214f8674649f355b23fd634940b2c467973a839e659169c773119919f5b81ee171edb2e5f6940d7551f9e5a70625d9ea88711ad0ed8ab2da720ad358bef954456cb2d5636425717c2", + "c5106bbda114168c449172e49590c7eeb827fa4e1a2a7a87a3c1f721a9047d0c0a50fbf244731be1b7eb1a2ef30f5ae846a9f38f0df44f32af61b68dbdcd0226e741dfb6ef81a2503691af5e4b3171f48c59ba4ef91eba344b5b697f261df7bbbb734ca6e6daebaa4a179feb17002823281b8534d55a6531c59305f6e3fd3fa63b747bcf0deb654c392a02fe687a269effb1238f38bcaea6b208b221c45fe7fbe7", + "597716a5ebeebc4bf524c15518816f0b5dcda39cc833c3d66b6368ce39f3fd02ceba8d12072bfe6137c68d3acd50c849873150928b320b4fbc31c1456679ea1d0acaeeabf666d1f1bad3e6b9312c5cbdecf9b799d3e30b0316bed5f41245107b693366accc8b2bcef2a6be54209ffabc0bb6f93377abdcd57d1b25a89e046f16d8fd00f99d1c0cd247aafa72234386ae484510c084ee609f08aad32a005a0a5710cb", + "0771ffe789f4135704b6970b617bae41666bc9a6939d47bd04282e140d5a861c44cf05e0aa57190f5b02e298f1431265a365d29e3127d6fccd86ec0df600e26bcdda2d8f487d2e4b38fbb20f1667591f9b5730930788f2691b9ee1564829d1ada15fffc53e785e0c5e5dd11705a5a71e390ca66f4a592785be188fefe89b4bd085b2024b22a210cb7f4a71c2ad215f082ec63746c7367c22aedb5601f513d9f1ffc1f3", + "be6556c94313739c115895a7bad2b620c0708e24f0390daa55521c31d2c6782acf41156271238885c367a57c72b4fe999c160e804ad58d8e565edbce14a2dd90e443eb80626b3eab9d7ab75d6f8a062d7ca89b7af8eb292c98eaf87ad1dfd0db103d1bb6188bd7e7a63502153cf3ce23d43b60c5782602bac8ad92fb2324f5a79453898c5de18415639ecc5c7974d3077f76fc1df5b956723bb19a624d7ea3ec13ba3d86", + "4bc33729f14cd2f1dc2ff459abee8f6860dda1062845e4adab78b53c835d106bdfa35dd9e77219eaef403d4e80488ca6bd1c93dd76ef9d543fbb7c8904dccc5f71509a6214f73d0f4e467c3e038ea639b29e7fc442ee29f57117740576188ada15a739827c647a46b0271817ab235c023c30c90f2115e5c90cd8501e7b286962fc66ffc3fe7e8978746168314908a41998bd83a1eeffda9d714b864f4d490fdeb9c7a6edfa", + "ab12faea205b3d3a803cf6cb32b9698c32301a1e7f7c6c23a20174c95e98b7c3cfe93fffb3c970face8f5751312a261741141b948d777b8a2ea286fe69fc8ac84d34116a4674bb09a1a0b6af90a748e511749de4697908f4acb22be08e96ebc58ab1690acf73914286c198a2b57f1dd70ea8a52325d3045b8bdfe9a09792521526b7564a2a5fcd01e291f1f8894017ce7d3e8a5dba15332fb410fcfc8d62195a48a9e7c86fc4", + "7d421e59a567af70594757a49809a9c22e07fe14061090b9a041875bb77933deae36c823a9b47044fa0599187c75426b6b5ed94982ab1af7882d9e952eca399ee80a8903c4bc8ebe7a0fb035b6b26a2a013536e57fa9c94b16f8c2753c9dd79fb568f638966b06da81ce87cd77ac0793b7a36c45b8687c995bf4414d28289dbee977e77bf05d931b4feaa359a397ca41be529910077c8d498e0e8fb06e8e660cc6ebf07b77a02f", + "0c18ab727725d62fd3a2714b7185c09faca130438eff1675b38beca7f93a6962d7b98cb300ea33067a2035cdd694348784aa2eda2f16c731eca119a050d3b3ce7d5c0fd6c234354a1da98c0642451922f670984d035f8c6f35031d6188bbeb31a95e99e21b26f6eb5e2af3c7f8eea426357b3b5f83e0029f4c4732bca366c9aa625748297f039327c276cd8d9c9bf692a47af098aa50ca97b99961bef8bc2a7a802e0b8cfdb84319", + "92d5909d18a8b2b9971cd1627b461e98a74ba377186a6a9df5bd133635250b300abccb2254cacb775df6d99f7c7d0952653c28e6909b9f9a45adce691f7adc1afffcd9b06e49f775364cc2c62825b9c1a86089080e26b57e732aac98d80d009bfe50df01b95205aa07ed8ec5c873da3b92d00d53af825aa64b3c634c5ece40bff152c331222d3453fd92e0ca17cef19ecb96a6eed4961b627aca48b12fecd091754f770d52ba861546", + "802f22e4a388e874927fef24c797408254e03910bab5bf372320207f8067f2b1ea543917d4a27df89f5bf936ba12e04302bde23119533d0976beca9e20cc16b4dbf17a2ddc44b66aba76c61ad59d5e90de02a88327ead0a8b75463a1a68e307a6e2e53ecc1986274b9ee80bc9f3140671d5285bc5fb57b281042a8978a1175900c6073fd7bd740122956602c1aa773dd2896674d0a6beab24454b107f7c847acb31a0d332b4dfc5e3f2f", + "3844fe65db11c92fb90bf15e2e0cd216b5b5be91604baf3b84a0ca480e41ecfaca3709b32f8c6e8761406a635b88eec91e075c48799a16ca08f295d9766d74475c47f3f2a274eae8a6ee1d191a7f37ee413a4bf42cad52acd5564a651715ae42ac2cddd52f819c692ecdef52ecb763270322cdca7bd5aef71428fa73e844568b96b43c89bf1ed42a0abf209ffad0eeec286c6f141e8af073ba4adfbbdeda253752ae36c9957dfc905b4c49", + "329377f7bf3c8d74991a7d61b0cf39baff5d485d79751b0d5ad017d23bec570fb19810105bab79ab5acb102ab972165224d4ec888ec7de5148077fa9c1bb6820e0d91ae4e2591a21fec2f820606ce4bafc1e377f8dc3a5bd1a9e2772a57abccd0b757164d768872c91d02789545ab5b203f688d71dd08522a3fd2f5bcd7df507aebf1ca27ddff0a82afb7aa9c180008f49d1325adf97d047e77238fc75f56356de4e87d8c961575c9f6362c9", + "f7f269929b0d71ea8eef7120e55ccba691c582dd534692abef35c0fe9dec7dae973cd9702e5ad420d278fe0e653fdcb22fdcb63148109ec7e94f2d0750b28157dd1764376ae10fdb0a4aef3b304bd82793e0595f941226a2d72abbc929f53134dc495b0d65ced409914f94c2523f3dfbbdeeac84ae247ab5d1b9ea33dce1a808885a55be1f3683b46f4be73d9b62eec2585f690056858dfc427aabf591cd276724885bcd4c00b93bb51fb7484d", + "ac022309aa2c4d7fb628255b8b7fb4c3e3ae64b1cb65e0de711a6def1653d95d8088871cb8905fe8ae76423604988a8f77589f3f776dc1e4b30dbe9dd262b2187db02518a132d219bd1a06ebac13132b5164b6c420b37dd2ccee7d69b3b7fa12e54f0a53b853d490a68379ea1fa2d79762830ffb71bf86aab506b51f85c4b6a41b69325c7d0c7aa85b93b7144489d213e8f33dbb879fce22849865337b620b155cb2d2d36a68832889e30194d36d", + "d009c2b78a8f02e5e5dbb586ef71fc324b375092e15913ca1a5bfd22d516baadb96867bee3562e77c4a4852344a1a76c30728be5e22400b4cc41711f66754c246a520498d8c24f0205b9c873748dbeb67fe1ad099ad04cf89f4b517f0aa481136d9f6de2d727df01c6aa4099da59d4382b51e25fd47c33d9842c32b62331e50794bfe8b61b3ba9de1b8b704779c6d65edff3af00f121ab4a7ea384edabe47c6d0098a48991f387ca4444135ec59d46", + "c00bab36cce69899817d1425016d222d7303197ed3e3fdcac744705e7f178a1ac745968900f69299163e19b3161f3e0a4cc55aa2e4e71e0ee6ac427d1f4d14e063f68d303ddfbb18118335cfa7a6a90d99c38319ee76f7a884846a9e0b68030bf28e78bfbd56359b9368842814da42b04cb0e307d5d846dc22f049147bae31b9a956d17676a8cc348dafa3cabc2007a30e730e3894dddf9999fb8819086311f0703e141613ed6dcd7af8510e2dc435b0", + "c9789152a9fc29698d49ed95f09bd11b75f18a8c5615a73dbe54ae5e550027fd0ae6a8b60667040c1b12de3d1ee3f6bf061c78c951a3210effc912e19f482dd4de152063c588c44903bc11761706fd935afa040df085b08144d83d0dde32b46ab52f4fae98ac116c7ff11d7f553450c2e37b9c5f0b1dd9e0b8640a24cba6f2a5246c41f197f46e3dc8a29131c79bef3351c6e277a0a34442274d546ccd058891277473d668420f121750d19cd684267405", + "06a15a0731ce52557e368bcbaa11ef3399299e36fb9f2eda6e5726907c1d29c5c6fc581405ba48c7e2e522206a8f128d7c1c939d1132a00bd7d6366aa82724e968964eb2e373563f607dfa649590dcf5589114df69da5547fef8d1604cc4c6de1ed5783c8746918a4dd31168d6bc8784cd0c769206bd803d6ca8557b66748770402b075ef44b38157d4c0da7c6281725a2065d087b1f7b23455fa673bdeeba45b983311c44eabe9ef4b7bde3420ae9881863", + "d08aacef2d7a41aec09473bd8a44f628e15addb7b9e5b77a1e09c8ab4942f379a0bfcb324d580b774666f18ae78dd36710824ff12393f059068fe4b559c53662c2b0e6c69e23785c8f32554e837ec1714bee902e60737b639dd933af4f68cb9d7de77e1f3b28e5b122891afce62b79acd5b1ab4ba411662cc77d806449e69c5a45a143b742d98ac84a0826d68433b9b700ace6cd472ba2d58a90847f42ce9c43f38ffc017db4bf40450b2eee1f4594dc740c0f", + "6a6058b0a498b7ea76a93c646eb9b8629f0cba4a0c726420c5f67ba9b0412cade356abdf0a4fb94384bad32ce0d5dd9e23dcaae1d6f28ff8683616b30f1392890c67b3a2c04b360893b801f127e527e4da82e239f4c878da13f4a4f1c76db07190e77ec123995168102fb274434a2d1e12913b9b5cbab4aacaad2bd89d88b3ca2b8e60dacf7c22c9379097ff60880f552e320ca3b571994f52534470feee2b39e0dadb5cd88257a3e459a4cc6f12f17b8d54e1bb", + "adeced01fc5671531cbb45679f5ddd42b3a95151677b6125aaf6f5e8f82fbabaa5ecf7c3552c2458587224f0042870f178f5fca5465250e75d71352e652eeed23cdb7f915f5ebb44099b6db116ca1be45530ac8ed32b7f161d60ed4397ad3d7d649ae6bf75ca5bec891d8e595605be9764f3a03965e1fe0eaffbf212e3df4f0fa35e08ff9d0091e6d4ac4748edfe43b611085a6ffec163014655fdd839fd9e81b63b1fa8cae4ec335ec343289758e389a79ceedfae", + "d014592f3a83ba40af366f137c674724916c3cdd3f6cf9d4c5c7c8d6d51ebf26e315e2c12b3546be56fb52382904046ecbd2f5b883aa4ff473de6f0c26ab862c3fa34bf3d880cc1911ce39a4088c6617c179dc5faf68a2c488bbde12d67b50f73abcfab0e3b062e68c95363e11f5f1de8ec36ed01ea21442518089045df67d346135283ad5b3fff80cf57f20876849f6db9fa139728358415a90610f69ec720fc92d8234e3e122551e9df2c644c4a2c4e3734d07de8e", + "c0d0c37838873ba8757d6e41b409605043bc1635edcd731219587676d94217e9f0ab44b71de25000661ce7303b7015f45e6eaa7b7ebef92b8f4a34c902c908d2172185505fa33aca5a41be83079316cdfdd430fc2c45f505f85d867e6d516f7e1bf19c001d9f43018968aab65ec031b3801399231c83ec9e622dab5629922a6b424cab938c135ff7310501c2c02971bfd2f577e25904d1a618baf0859f77f4e8b1d0cde9544e95ec52ff710c0672fdb3d891feeea2b017", + "7022e7f00902219ba97baa0e940e8ac7727f58955aa068c29680fac4a16bcd812c03eeb5adbcfe867a7f7c6b5d89f4641adb9173b76a1a8438866f9b4f640ce2aedf5f1080c890bcf515b4be4e3e512352f1e5323c62ec46cb73f3d71be8235fee55a154763f7c3f9aeb61ffd28f4cd93d3310f608e2133586bf1ab3f102de96f64c68a4668de8acb2a76a7ce0cddddc8fa3df5e9d230823da16ed9ebb402d36e38e6e018795e5a71517ecab5f9ca472b9ced8ff69d2d195", + "acaf4baf3681ab865ab9abfae41697141ead9d5e98523c2e0e1eeb6373dd15405242a3393611e19b693cabaa4e45ac866cc66663a6e898dc73095a4132d43fb78ff7166724f06562fc6c546c78f2d5087467fcfb780478ec871ac38d9516c2f62bdb66c00218747e959b24f1f1795fafe39ee4109a1f84e3f82e96436a3f8e2c74ef1a665b0daaa459c7a80757b52c905e2fb4e30c4a3f882e87bce35d70e2925a1671205c28c89886a49e045e31434abaab4a7aed077ff22c", + "84cb6ec8a2da4f6c3b15edf77f9af9e44e13d67acc17b24bd4c7a33980f37050c0301ba3aa15ad92efe842cd3ebd3636cf945bb1f199fe0682037b9dacf86f162dadabfa625239c37f8b8db9901df0e618ff56fa62a57499f7ba83baebc085eaf3dda850835520344a67e09419368d81012168e5de5ea45158397af9a5c6a1657b26f319b66f816cd2c28996547d697e8df2bb163ccb9dda4d6691dffd102a13667ab9cde60ffbfb872187d9c425a7f67c1d9fffff9276ed0aeb", + "6a52c9bbbba454c14540b2be58230d78ecbeb391646a0c6fcce2f789086a78364b81ae85d5396d7cfa8b46bda41e3083ec5cf7b4c47dc601c8a697df52f557defca248506dbebab25657f5a561d09625b7f4b2f0119a12beeac087efc9d350a735c35d2431c1da7dda99befb17f41a3dc4da0f00bb95366be128538ce27763d81f832fe3c1d4efc07b5b08ad8dc9e65fb5e48546664e18cb2d3bb3fe1f56fa7aae718c5e3bbdeaf70e15023f6a25b72a2d177fcfd04211d40664fe", + "c3c4d3b31f1f5f9538923df3478c84fffaef411520a542da9a220ee4132eabb9d718b5076fb2f985485e8ba058330aed27ddfd3afa3db34aa60301088caec3d0053828c0c2bc87e2e61db5ea5a29f62fdad9c8b5fc5063ec4ee865e5b2e35fac0c7a835d5f57a1b1079833c25fc38fcb14311c54f8a3bd251bca19342d69e5785f9c2e43cf189d421c76c8e8db925d70fa0fae5ee3a28c4047c23a2b8a167ce53f35ced33bec822b88b06f41558c47d4fed1bfa3e21eb060df4d8ba1", + "8d55e92136992ba23856c1aea109766fc44772477efc932b3194af2265e433ed77d63b44d2a1cff2e8680eff120a430fe012f0f09c6201d546e13ad46fc4ce910eab27bb1569879abed2d9c37fae9f1267c2216ec5debcb20d4de58461a621e6ce8946899de81c0add44d35e27b7982a97f2a5e6314901caebe41dbba35f48bc9244ca6dca2bdde7306435892f287036df088633a070c2e385815ab3e2bfc1a47c05a5b9fe0e80dd6e38e4713a70c8f82bd32475eea8400c7bc67f59cf", + "5016284e20362610fa05ca9d789cad25f6d43263787e7e085476764ce4a8908ce99b262b375e9d106170b1bec1f473d5e777e0c1896533040e39c8c1465e07907ef5860e14e4d8310013e35f12090e0bfc687474b1f15f3dd2033a0edac5246102da4deec7e188c3517d84d9c2a0a4497a4c5f82a30f1ba009e45ee6eb3ab4368c720ea6feee428ffd2c4cc52debb8d634a64176572c72368f94a66689f23f8a01218f532117af5a8060d140e7ca435a92882fcb5630ebe14a4805f1dc83", + "05456ec59b8d41bbd736727976b96b38c43827f9e16169be673ff37870c2ecd5f0d1ea1a136be4cc7b047a02a4421d484fd2a12ece418e42ee391a13a0b1df5a0162b29ab70d3fe3e04ba6ab26b37d62b7cf05a5e2f033611bf970b8e1f30e198e483e740fa9618c1e8677e07b61296b94a9787a68fba622d7653b5568f4a8628025939b0f74389ea8fced6098c065bf2a869fd8e07d705eadb53006be2abb716a3114ceb0236d7e916f037cb954cf977720855d12be76d900ca124a2a66bb", + "eb6f60b83fcee77060ff346aaf6ec34d82a8af469947d3b5074cde8eb26566eb1fa039bcc707738df1e95869bd827c246e88436f0614d9834ead5392ef376105c4a9f370071cdeaaff6ca0f18b74c3a48d19a717253c49bd9009ccbfdd5728a08b7d112a2ed8dbafbbb46d7a75dc9a05e09bfde1a0a92d74a51887f9d123d7896e9f9d0057b660ed7d55454c069d3c5260411db4cdc67e7b74f680d7ac4b9dcc2f8baf72e15e6b3cafebcdf449a6436ed2c398b675f79c644747c57553bf7ea2", + "187a88e88514f6c4157c1ba40b442baae1ae563a6c989277443b12a219aa484cb9fa8adbb9a29d429f50155321b15664926317477079c7060dfdaa84c1d74bba78892c34e6f21ad35208d2ae622012401696bff5cd57b6485944b3db7b9071fa5f57fbfb1085d91bb9cff5808d662cdc6c8157249478262c44b7fbc397ed42a4977b202e817717bfccc9f0467294062313f7705251ed09573f16d23429361fada259dfb300369c4198f07341b38e84d02cdb74af5de6aab1fc2026208ea7c418c0", + "be31bc96606d0fab007e5caeded2f1c9f747c759777e9b6eef962bed49e45a1d4fc993e279d024915e600865ecb087b960584be18c41114d3c43f92169b9e0e1f85a0ebcd4e196376ccdc920e66103cd3b1c58407d0aafd0e003c4e341a1daddb9f4faba974362a32f35db83384b05ae8e3322d728893861afd8b1c940de5a17f691e763ce4969b6d94f67fb4a0235d100225bd8602f291388f0ca4a568748ad0d6040f1262eac2aede6cd27419bb78a394c1ffad72c262be8c3f9d9619d633e51d0", + "4d83d85ca838b4518588f2a90228a4dd18f14dd5b4c012d26298a97d848abbd825d221d02cceb6e8c701b4ad00e1dee4889b5c533e4bb60f1f41a4a61ee5478be2c1b1016c30345afd7a5253668260515e70751f22c8b4022d7fe4877d7bbce90b46531507dd3e89549e7fd58ea28f4cb23d33662bd003c1345ba94cc4b06867f778957901a8c441bee0f3b12e16463a51f7e50690356971dd73a686a49fda1eae46c9d54fba262811d698025d0ee053f1c58591c3bb3cbde69de0b31549ef5b69cf10", + "cdeb07d36dc5f9a1cd717a9e9cca37a2ce93caa298eee63571f7d6c5fde2a11c666cf53cf2dcb41ca2ea2319e7230ca68e38c647905928713a13982bf47fe33d7095ebd50b2df976208920a43eb2e29b942f32467403c45cea18bf44e0f6aeb155b48a8e5c471fec972a9d62f7ae093d2758f0aaec7ca50cb4725bfa219f1a3a46ad6bde7361f445f86b94d66b8ece080e56c510250693a5d0ea0ae87b4421860b853bcf0381eae4f1bf7c5c0472a93ad18407bc88475ab8560d344a921d3e86a02da397", + "a598fad52852c5d51ae3b10528fc1f722e21d44fbd42ae5acdf20e85a28532e646a223d27fd907bfd38eb8bb75175636892f8242877aab89e8c0824d368f3339ce7a82aa4e5af6db1f3b588a4d667a00f67bee37cfd2724dde06d2909fb9e58d892f4cfd2c4ca85acdf8256f5458b030a6bda151154ff2e6d7a8da90b54a2884c8a99fab5a4ac211ff23dc0975f4f592fd1b6b9dc7783bdcd2d4ca4e68d2902f2013e122cb62e2bff6b0a98ec55ba25837e21f1cfe67739b568d43e6413dab2bd1dc471e5a", + "17b68c74c9fe4926e8102070916a4e381b9fe25f5973c9bd4b04ce25749fc18931f37a65a356d3f5e5a1ef125d546f4f0ea797c15fb2efea6fbfcc5739c564693d47adeb12dcb3d98a2830719b13247792cb2491dca159a28138c6cff925aca42f4fdb02e73fbd508ec49b25c60703a7595a3e8f44b155b371d525e48e7e5dc84ac7b17c52bf5e526a67e7187234a2f19f57c548c70fc0b27183df73ffa53fa58b658034c896fa791ae9a7fd2620f5e46ce84c842a6e60e9324ae4db224ffc87d9617cb85ca2", + "b9e4267ea39e1de1fed0579f93bb351007c9f8fcdd811053fae33f09e2753d7428f04e1a9efcd45ea701a5d87a35b3afb2e6b65365dee6ead0bbb611b7797b212ac688653f542e604a39df277f12514ddfee3b4e27b98395c2cd97a203f1f1153c50327965770802ec2c9783edc428271762b275471e7ac65ac36523df28b0d7e6e6ccc7674268a132a63411fc82c0738dbb68af003b769a0bf9e6587b36476cb465350fee13f88ea355d47ffac7b0f964f4139db11b7642cb8d75fe1bc74d859b6d9e884f75ac", + "8ca704fe7208fe5f9c23110c0b3b4eee0ef632cae82bda68d8db2436ad409aa05cf159223586e1e6d8bdae9f316ea786809fbe7fe81ec61c61552d3a83cd6beaf652d1263862664df6aae321d0323440430f400f291c3efbe5d5c690b0cc6b0bf871b3933befb40bc870e2ee1ebb68025a2dcc11b68daadef6be29b5f21e440374301bde1e80dcfade4c9d681480e65ec494a6af48df232c3d51447b9d06be714949249c44c43cf73ed13ef0d533e770284e51369d94ae241a5fb2f163893071b2b4c118aeaf9eae", + "4fd8dd01012bb4df82bf42e0683f998e6f52dd9c5617bae33f867d6c0b69798cead8179346d70acc941abbbdd26e3229d5651361d2252c72ff22db2938d06ff6fc29a42fdf800ae967d06479bc7bbb8e71f40b1190a4b7189ffc9a7096cdb76d40aec424e1388e1eb7ef4ac3b34f3f089da8fda7d1927f5d775c0b2801d22dd1265c973158f640cec93edfed06dc80b20ef8c496b98289d54d46ccd205951cbb0f4e7daeb866b60bacb483411e4382b6f04d472843186bd0e31fbaa93e5c901ec028efafeb45fc551a", + "e9ee1b22b04b321a5fdd8301627011f583887d77560fb0f35552e207561f81e38ac58a0d0aeaf832d1ee72d913720d01f75574e9a321864fe95f4d0d8f0b8db97649a53e71e940aede5c40b4b9105daa42a6fb2811b61209247534cbaf830b07abe338d75d2f5f4eb1c3cf151e9edabe2c8d5f6fff08fac1495ef48160b100d30dcb0676700bcceb28723a29980ab0766a93abb8cb3d1963007db8458ed99b689d2a7c28c788743c80e8c1239b20982c81dadd0eed6740c65fbc4ef15c7b5569cb9fc997c6550a34b3b2", + "ec01e3a60964360f7f23ab0b22e021815765ad706f242265ebc19a2bb9e4eac94393952dcf61aae47682671a10f9165f0b20adf83a6706bfbdcf04c6faba6114653a35584267267873291c6fe7ff5f7695243143421509502c8875aafa9e9afe5be5ef2c851c7f35d69be5d3896000ccdbbfab5c238bb34d607cfe2d55d748880545b4aa7ca61137992925189025c62654b1f20d49c3ccd75aa73ce99cd7258dabedd6480a9f5185531fc0118beb68cc0a9cd182f6973287cf9252e12be5b619f15c25b65c71b7a316ebfd", + "db51a2f84704b78414093aa93708ec5e78573595c6e3a16c9e15744fa0f98ec78a1b3ed1e16f9717c01f6cab1bff0d56367ffc516c2e33261074935e0735ccf0d018744b4d28450f9a4db0dcf7ff504d3183aa967f76a507357948da9018fc38f150db53e2df6cea14466f03792f8bc11bdb5266dd6d508cde9e12ff04305c0295de29de19d491ad86e766774bb517e7e65befb1c5e2c267f013e235d8483e177214f89978b4cdc81aa7eff8b39f2825ad3a1b6ac1424e30edd49b067d770f16e74dd7a9c3af2ad74289a676", + "00e40f30ae3746edad0f5dd03d0e640933cf3d1694804c1e1ed6399ac36611d405196ee48f129344a8512feda16a354517871322bd5d9c6a1b592933eab531923efb393ffb23d9109cbe1075cebfa5fb917b40df028a621460ff6783c798792cb1d9635b5a6f84ec13918fa302924649b5c7fcb1f7007f0d2f06e9cfd7c27491e565a96c68a0c3644f92cd8f38857258c33801c5d537a83dfe583cba59d7eec7e394199c0a2660a62fabe3ed2099d57f315a6cd8de1a4ade29d977f15d65759cff433e5ac0c182aef3761163e1", + "3c5ea24d0d9b618294a263f062b2414a722be4eb10dfc346a6ec3b821d7396eba61cd6ef33618b04cd087a811f299d4606820227f16000d7c839062b96d3e3f59cd1a082448d13fc8f56b3fa7fb5f66d0350aa3b72dd7c165d590282f7da2e12cfe9e60e1796122bb8c2d40fdc2997af634b9c6b127a893dfb3467909378300db3da911be1d7b616bb8e0572433e65527e15d936500a2c60e9f9909dcf22ab5e4b6700f0238c205b4a813626fac3d945bab2637fb08203044a73d20c9a3fcf7c3fc4eb7807c3276dd5f73ce89597", + "9271aeeebfac46f4de85df78f1bfd36136aa8905e15835c9e1941176f71e3aa5b1b131843d40479735e23e182a2bd71f66f6149dccb7ed8c16469079dc8590bbf165374951785f4531f7e7361de62f936cfb23a2b5bdf186632e7042a0dd451fdc9b7208f923f3a5f250ae590ec348c63a16c3aacaf7379f53b5dd4152dcd40d23e683e2156e64c592ffc07e2cd6bbeebef4dd590b2f6b2bcbf08fcd111c079f5c4033adb6c17574f8756ecd87be27eff1d7c8e8d0324438d59ae171d5a17128fbcb5533d921bd044a2038a5046b33", + "4e3e533d5bcb15793d1b9d0468aaee801f32fdb486b11027183553a09ddbee8213924296f2815dc61577297459e834bf1c7a53f87d43782209e589b8295219ba7073a8fff18ad647fdb474fa39e1faa69911bf83438d5f64fe52f38ce6a991f25812c8f548de7bf2fdea7e9b4782beb4011d3567184c817521a2ba0ebad75b892f7f8e35d68b099827a1b08a84ec5e8125651d6f260295684d0ab1011a9209d2bdeb75128bf5364774d7df91e0746b7b08bda9185035f4f226e7d0a1946fcaa9c607a66b185d8546aac2800e85b74e67", + "b5d89fa2d94531093365d1259cc6fe8827fea48e6374c8b9a8c4d2209c280fa5c44958a1847222a692a59e6aa2696e6cdc8a543dd89b0ce03bc293b4e78d6ef48e1839694ccd5c65661143095c705b07e3ced84a0f5959114dd89deb956ab3fac8130eb4a878278205b801ae41a29e34146192308c4e759b374757b0c3b00319bce92a1b95a4d2ee179fd6714ff96155d26f693a5bc973f84ac8b3b91e3926276297532d98b46992a3f104c08100bf1671c43134bac280c617da711e90a0100137525375ebb12802a428885ae7fce6514a", + "40e3d8048fc10650cb8a7fc2e7113e26dec34f9ca2d5129cd10a8e8e44d113d61ee48c7d003e19fd307fc6debd70feb30243f298c510ccc4418355ce143066f067ad7c6de7288c3080e7ad46a23c8d34deb55a43e652fe90444ad3c57d3ec1e1c489d63ef915a24bc74a7925a0a7b1e1523f21ca8fee78df24e3d0a68d0013423db97c280799a0618229c0f2c167289a891e5c8d6661ab21285951c31710e3b5fe55f6347fe16d9b40507948a59252efeb616df83e5c098b07d0a7247cd371daff0e50491c582503fd89f79ba94d6af9ed76", + "1fa444de01dd3901e2b4684e3d7a799ffa02d85afd35fb30fe4c9d672837bee6dd8a3b8608b4bb5e589220ad5a854f46b46e41c6d57ad124a46beab4169ff69fee7e3838a6165e19dad8eb5d7bf53d4edd3cd2769daf219510a02fdd2afe0c0e1da3cd30fcd1aa88b68965586f07a25a1720fbd90a096ea30fc8e945e3637d7857c8a9c0ab4154ffb2000e57b5f9adfa4e4eaf8065bc3c2b2e75f495963325588785a6ce417dcddffd299873b15dcccca128d63cd4eeeadb64cda28099a9ad7c80d34844901f26b88b00b9aafeb2f90286d29d", + "fde0a0d9d813983bd1f55cf778a003a2023b34a555322ab280584537bc6bdd844d22a7d6066c18da83ec09f3d8d5a1aab4be0d5ce19b436052f6e259a4b49017a1f47f1fe2bf115d5bc8599fb216351c60dd6b1bedb2e6f4dcadf424b833501b6f099cbfad9e2290680fb69c25032b42a6274f7cb9b5c5950401354838a45f7cb77b95bf54718e2f3d3d9fb91eb2311903980277396398d9736d8e92fd838594ac8a537c6c529db5a8a4f89290e6ba6f20ac0e5ed6fef40901d0e0e8e3e502990811f9acaae555dd54eb1bcd96b513e2fe751bec", + "9f8e0caec87858599f5ab29bff86da78a841a918a023a111098687ecdf2747612d3f3809d9ca400b878bd4f92c43a1004f1c17c7f19a3cd1ce449bd2b23aff551623c37dd8c0be56bf3fd857b500c2b9f9ccea62481944090a3cf3b6ee81d9af8eeb60f65ef150f9fa4d3ed6ce4762d3d4f174ee8ccd460c25cafac0ea5ec8a6a4b2f9e8c0520cb7061155e532cb65f188b01e4b9086db951f504b060c296b326b3fc1c590498ecce594f828f4a10ea416675720ae505295d38a791bd0e93f428448a8f4c1fc0af53604a9e8255384d29ae5c334e2", + "33d1e683a4c97ee6bbaa5f9df1a88cb53b7f3c157b6045d70a56fda0ccbd3a1fa1f049cd564da072b53f415bf5fb843771c1d2551fd075d33377362b2f7c0645f9723123d11975991db8a2b518f02e2c7c30342a044754290bae2c77496d755e5981f12e6b0a0174280b958bf11ed628a9062775993ced04bf752ea8d165e3ac2177d7cd1b9371c44efa98f0b3e68602a839d384eec007979f46429dafb138cbc231ad928a9f65f7d66fac77416395e8f1debaaf76ec2e4e03e8674102cd26f614739f3ec9f949033df1fb97e87c2326d65aef94ed5f", + "180048f09d0b480887af7fd548a85abf605440c1ddde6afe4c30c30670233f7bf928f43b4681f59279ebbda5e8f8f2a1abefdee129e18ac60f9224e90b38b0aabd01308e0a27f41b6fb2ee07ee176ec9048c5fe33c3f7c791469c81f30e28170585b9f3e7e3c8c2e9d74370cb4518f13bf2dee048cbd98ffa32d85e43bcc64a626b40efb51ce712925fdd6fee006dc68b88004a81549d2121986dd1966084cd654a7c6686b3bae32afbd9625e09344e85cf9611ea08dfce835a2e5b3726e69ae8a76a97db60fcc539944ba4b1e8449e4d9802ae99fae86", + "13c0bc2f5eb887cd90eae426143764cf82b3545998c386007cca871890912217aa143ac4ed4ddb5a7495b704aa4de18419b8664b15bc26cfc6596a4d2ae408f98b47a566476d5802d594ba84c2f538def9d016661f6404bb2337a3932a24f6e30073a6c9c274b940c62c727242e24466084a3ea336365d71ea8fa6499c0ea8d59eea505f1126b99c795023c4963aa0d99323d0391e8701110edf551b2d3799e1063ca443f1add162156e445502ca1a052fe70c289838593b58839fc63de128a03e2bbf389e22ae0cf957fd03315ee407b096cc1cfd92dee6", + "6f1eb607d679efef065df08987a1174aab41bdac8aece7726dfa65805d6fff5b3d17a672d96b770dc32165f144f0f7324822a5c87563b7cd9e37a742ae83ef245d09006d91576f435a03476f509ea2936636232f66aa7f6cdf1ac187bbd1fcb8e20f8791866e60ed96c73374c12ac16795e999b891c64507d2dbd97e5fc29fac750ad27f2937cbcd29fdafccf27ab22453834d475f6186eaf975a36fad5c8bd61c21da554e1ded46c4c39765dcf5c8f5ccfb49b6a4dc562c919d0c7d8940ec536ab2448ec3c9a9c8b0e8fd4870cad9de2577c7b0c38563f355", + "dcdd993c94d3acbc555f464871a32c5da6f13b3d5bbc3e34429705e8ad2e76393fdd96a69a94acb652f5dc3c120d41187e9aa919669f727c4868013b0cb6acc165c1b7706c52248e15c3bf81eb6c147619467945c7c48fa14a73e7c3d5bec91706c567145342a026c9d97eff97ec672c5debb9df1a998083b0b0081d65c517b3e5634c95e347e781aa30ca1c8af815e2e494d844e847fdcb41622894a518dc36571123a40bfdbe8c4f4cff44d83c61dd9dcd24c464c53b395edb31efee9f3aa080e87cdc3d22d613ae84a53c9249c32c96f9a3bc4629bb126a70", + "49971f9823e63c3a72574d977953329e813b22a8387cd13f56d8ea77a5d1a8a20012632d1d8732bbcb9f756b9675aab5db927beacab7ca263e5718b8dfa7b2eed9a91bf5ed163b16139d45f7b8cc7e3f7bdda6202106f67dfb23b7c315ee3e17a09d466b1e6b13e7c7428184a979f5358667b4fa8bd40bcc8ea46058db44587a85377ac46bf155136c09ac58cb6c27f28e17028c91e7e8f74d5b500e56293b316974f02b9d9ea205d9b6ac4cfb74eb8eb0c944577fd2f41316368307beab3e327bf7dbaa0a4428836ec4e895dea635234abeaf113ceeadac33c7a3", + "c57a9cc958cee983599b04fe694f15fb470fcbc53e4bfcc00a27351b12d5d2434444253ad4184e87b81b738922ffd7ff1dc1e54f39c5518b49fb8fe50d63e3935f99e4bd125e8dc0ba8a17fd62de709339a43fabe15cf86d96a54010112170c340cfac4132182eed7301402bc7c8276089dec38488af145cb6222525894658f03501204b7a66aba0be1b557b28a2f652d66f7313ed825ecc4d8596c1be7420d4425b86a1a90a5b7f30d0f24e0d1aae0eb619ca457a71699e44be612a4011c597ee80b94d5507e429d7fc6af22579cd6ad642723b05ef169fade526fb", + "0568a672cd1ecbaa947045b712e2ac27995392fbef8f9488f79803cbee561c212287f080eca95adb5ba42739d78e3ba667f06045d87850d3a0499358649caa257ad29f1a9c511e7054db20554d15cbb55ff854afa45cae475c729cea72ede953522031865bc02b95589ed4d9841c552a8cc94904a93ed09ed77222f6c178195056be59bc4e96a815adf534e6b466fb47e262ff79c803c157a21b6e2269c2e0abeb494113cd868d8466e82d4b2f6a28b73645853d96bc9242515d803e33294848d3fe42fdff68da53c03491636beede47ff1399dd3d54a5e914d55d7adf", + "3f19f61a4cd085796731ac9f85a75a8bce77031932c31762d87d8b8d07b8bd19ff78d6b7d1bd1e87f3a4f41aad03b6c4d17a6cbc86be55f7c8b88ada047bb04f8d49f1c34bcf81cc0f3389ad01a758fc7eeb0072aa9ad1481992bfdde82e438e75590a4423832dfbe3756e2229ea873bc3606e6d72174cb2163bf40b5d49c81009dab85ecc03e311351bbf96e32c030a2b276a7698cb25bc2c967acb3213161a1fdde7d912cd6a804490f8056c47da1333f6e35c41e749c2c23919cb9af5eec5652e6e072b034fb1682e9aaa194a9c0bd456ea0b008d14dbce37967a7a8e", + "705f98f632d99d3651793825c38dc4deda56c59eac539da6a0159c83131cf8ab6f2ee0c3b74111fde351f7aa1a8c500a0cecab17c212d2c58ca09eae608c8eefc922b9902ef8d6832f799ba48c3c28aa702b3242107edeba01daafe424406a3822965056cfe8783455a671e93b1e2eae2321364f1871471c82124df33bc09e1b52882bd7e1c4c7d0b2f3dd4a28c2a002a43246768af0700f9659de99d62167be93177aabf19d678e79e9c726ac510d94e74873eda99620a3961930cd91937c88a06d8153d64fd60da7ca38cf26d1d4f04a0df273f52127c53fdc593f0f8df9", + "ea6f8e977c954657b45f25480ff42c36c7a10c77caa26eb1c907062e24fbca5aebc65cacca0de10abea8c78322f08672e13d8ac16996eca1aa17402eaea4c1cc6c800b22dc18cb8d620192d74bac02c07b5cfa61e513c7f28b7e29b9700e0e442720bf4c669d4995da19d19f841d9eb68cc74153592591e3bf059ef616b95305aa453b32fe99a91afb35bd482cf2b7aa42702837a53be3c38883d2963020e347556f841254ec6b85854485fe8c520b05f2ea67a9bf3981555c20991e2bacd4db5b418228b6002d8d41c025cb472bf5443aaa885974a408ea7f2e3f932c600deb", + "408190134ed06556811b1af808ab2d986aff152a28de2c41a2207c0ccc18125ac20f48384de89ea7c80cda1da14e60cc1599943646b4c0082bbcda2d9fa55a13e9df2934edf15eb4fd41f25fa3dd706ab6de522ed351b106321e494e7a27d5f7caf44ec6fadf1122d227eefc0f57aefc140d2c63d07dcbfd65790b1099745ed042cfd1548242076b98e616b76ff0d53db5179df8dd62c06a36a8b9e95a671e2a9b9dd3fb187a31ae5828d218ec5851913e0b52e2532bd4bf9e7b349f32de2b6d5d3cdf9f372d49617b6220c93c05962327e99a0480488443349f0fd54c1860f7c8", + "5f9e5c6f38573a85010a9d84d33f29c057003b2645e3ea6f72cbc7af95d197ce6a06b13fea81722853e6991791b8b15091cd066f5ed913592ed3d3af5370d39ba22beeb2a582a414b16824b77e194a094c2afdcc09aa73ce36f4943cca5ae32c5017dc398801dd92a47382d9327c9f6cffd38ca4167cd836f7855fc5ff048d8efba378cdde224905a0425e6b1de061fc951c5e624a5153b008ad41160a710b3ff2081748d5e02deb9f841f4fc6cf4a15153dd4fe874fd447482696283e79ee0e6bc8c1c0409baa5ab02c5209c319e3169b2476149c0c6e541c6197ca46e004eef533", + "218c6b3508aec69574f2b5039b30b942b72a8349d05f48ff945bbbe5c8957d5a6199492a6bf54bab821c9377e2edfa4c908384664d2c80112d5e805d66e0a551b941021be17dd20bd825bea9a3b6afb1b8c605805b3bda58750f03ea5c953a698494b425d8980c69f34d1c3f6b5866e8717031152a127215c256e08873c21b0f5cc85875d0f7c94601659150c04cd5fe5d381ba29983a2d94fcd3a65a94c53c7279cd000dddd4253d8cff8d7f6ace10247fe3bc30d63ba4bb54f557b3d22a3924369430d71ab37b701e9500bda70b5a643704858beed4726a889b6c9c91584194c68f1", + "dac26aa7273fc25d6e044c79fc2bfa46e59892a42bbca59a86826c91e76ab03e4bd9f7c0b5f08d1931d88b36ea77d94f7ba67cd4f1d3086e529427201119096ae066ae6f170940830ed7900de7bb9d66e09788287403a4ecc93c6da975d2fb08e918840a236c15f5d3a8f7375c2eeebbf6f01a6e7f29ca2b8d42df158414c320777433663c59fdcd1f39ca68e3473db721be7ce8c6dba5fddc024f94fedb286b0477581d451313ca8c737484daf60d67f9b2d56d4bcc271f7e9ae958c7f258efbc74d25753e0516f28282461941bf2dcc7dd8c7df6173b89760cefcac07190243ff863fb", + "c46e6512e6797cc7a54254a1b26b2de29aa83d6c4b1ea5a2786fbcec388270625b12635eae39e1fba013f8a65219421bca8b52a8ddfd431cda60299bdf160734d5a7450ec79620058522702174ae451b9bfa7c4a455fbbee3e1d048c7d4bac5131018228f137c8e130440c7059b4f15eaa34ce872a851a16ce86f982df78a00be4d564da2003a450ddee9ab43ea876b8b4b65c84f0b39265fd5456417afb5bc54997c986e66fc222f2123ba5e719c4d6b9a177b188277df384f1125821cf19d5248cef0be183ccdc84ac194506f740ed2188b2689ea4c9236a9e9e3a2fff85b6af4e9b49a3", + "1ccd4d278d67b65cf2564ecd4de1b55fe07adc80e1f735fe2f08ea53fd3977323689122c29c798957abaff6aba09bdcbf661d77f4dc8913ab1fe2bef38846166e3834785e7105d746484eff8c656af5d8c7854abc1c62b7fadb65521dc6f793d978bda9838eb3800417d32e8a24d8c8cb1d18a5de6ca79d9e1b0ff9aa25e6218fe944cf18666fecc1e31334b390260dbe0997539e1b02f6366b2aea4f4a21efe04f4b97568fcb39e59919d5ebac6543d5d0f48fc66b923c34aac377dc95c20329b837b6ed5e8d9a3d2089cd0d8f025658006ff41cbdaccca618822ca590ab155253f8bc1c7f5", + "9875209588395ee3c9fdd793fd48717cc84c8c3ea622b2ccc4a1be4448e6034b7810569855255031f10be5ffd714b05f9ce01972d712d40abf03d4d0ce175813a7a668f761324996093fc2aa5912f7fc2abdadd8775d2b4d9ad492216293381460ed8f6db3d641d1525f4242c348bbfe504c704f215dc461de51b5c75c1aae967936963848f16c673eca5e78dfd47eb19001d52d1bcf96c98956dad5ddf594a5da757e7ca35f2f69803b784e66ac5a58b75c228b8266ec592505e5d1ca87d81225738855f15bc0914677e81593fd409e77d159f8a908f67788de9eb06c5561547aada96c47c535", + "40c90e375e366f3756d89091eb3eed9fe0fbfc5638700af4617d358812bac53124a2205dd6756456787d49cd6a35e302479a0992288f47532e4ea7ab62fc5ad5adc690a5d9a446f7e035ad4641bd8dae83946aee3338ec984ccb5cc633e1409f2531eeffe05532a8b0062ba99454c9aeabf8ecb94db195af7032bfebc22912f49d39330add47ff8fa5720612d697f0b602738930e060a1bb214efc5e292224cf34e29deaea6b1b1ff847e94ecc997325ac38df61db45d82bf0e74a664d2fe085c20b04c39e90d6a170b68d2f1d373f00c731c524456ada73d659aaac9df3191a7a3865083343fc13", + "e8800d82e072210ca6d7fa2472028974780b76aad4bcb9ad362422dd05ae3232668251d164daa375a43b26a38cce28dbeb3dee1a4a579f70d0fe7febb29b5ece8aa836e050fb3d188c63aa9c3c0da6c717d86458a6096b5effceb964efdec7035960c09ccd10dea3c5f1c7f9f478d5887ebbe2e15c5ff85dbacbc444bb951c4eec7abecb89ed80187e409e2972ffe1a5f01562af109f2cf09471cf72cf83a3bb8f4e2ef38ed0e326b698296394e5b2718a5000c01425708e8ad0461e62462d8819c2377f13ab1be2c7c9f33dc06fe23cad27b87569f2ce2e56e4b2c60c7b1b3d370841d89ebdc1f192", + "796d6d1447d5b7e8c55cd8b2f8b7010db39f27565f907e3fc0e464ea2d4bb52b37f10e7c6dcfc59231b9cdee12c32aeb4adbc42b86e86eb6defb5b69e6ca75e1f4d0dae3e124e5a1b8b6697f7e10b0403f1f0a5ff848eef3752837a9ba17780f16a9a709188a8d5b89a2fa74adb2e651163b1c2b3d261e225c9158dcd9eb7ac3d6704cee290cdff6bcb3cb90cee030aa0d19d4693655c3c30ac6fc06d2ae37787c47126d57ed9a6bef5f8a6c56859aefc08755739a95aac57a4dd916a92ba9f3afbf969df8085949615033365c751a9a3e1a18cee98a69d22e64009bebf8307169b6c61de0617ecfafdf", + "4f9057183566153cf337b07c3f5556006de54c56b2a1e5326c07aaeabd1886ec6f1641358925db232b2f0dbf75229c796a7395b2f934c1f99090bec1123f3c841b1cb3c5b1ec42ed5408f2940f0c48a9470b852c46d6557853d459cecd2c32bbcd8ee21fa11e385eef0857cba4d8545a61b52a484cdd779db4739fbc7aa9860dcabe0488b98fa0b60c3f7d6153db279000a52ffb573dab37d2ab1896a90e5deb7ac6bbe56239085c325d83a917dc6e8a448425b718c2356b9f3066163555ec444f372e184e02c8c4c69b1c1c2ae2b51e45b98f73d933d18750968945ca85d6bbb22014b4c4015262e3c40d", + "79dcca7d8b81a61359e4aece21f3df7b99518ce70bd2f57a18bab5e7114af2add0a0cea7f319d69f231f060e0a539d9a23fb3e95451ce8c6340cfb09edf931df84203a39226dd9eb278f11b691ef612585b973daab373e65d11325898badf6732100371fd759960fa8fec373268421d28bffdb9b12a430b92fe4b07566ca0c89e616e49f8fc75ccd9cdc66db820d7c02e109aa5ed86b89770262918a518f90a2292f6b68d68ae03992e4259a17a23c84ec2a417f082b5abf3a26e44d2278ecb8ba9456965303a75f25394d1aaf5544590e74b14d8a4cc4050be2b0ebcfe4d2db6b12a02c68a3bcdda70301f3", + "848755dc31e25e9a42f9ec12d847d19f292c14c162c9aba49e972cb123b58b8e57bb263a923929833373858594ff52dbc298dbbc078599194e4c07b0e5fc1e10808bbacdb6e93c72b333685cf961f28eb0d5a395c63266b01f130d25db384b356e5da6d01042fc2359581b89c63b3bb2d1ce897fbc9e83fe85d9666cb60e6a8c657f70caad5387b8a045bf91095606802c8424ea8ac52ef29386dc46183378a5fcb2cb927428b8c070f1c42aafd3bc70ca25437807696a46873cfeb7b80ba2ebc3c4272443d445e46343a1465253a9eebd532a0d1d2c18264b91ff45159f245404ae9335f2af55c802772426b4", + "ecaa6e999ef355a0768730edb835db411829a3764f79d764bb5682af6d00f51b313e017b83fffe2e332cd4a3de0a81d6a52084d5748346a1f81eb9b183ff6d93d05edc00e938d001c90872dfe234e8dd085f639af168af4a07e18f1c56ca6c7c1addffc4a70eb4660666dda0321636c3f83479ad3b64e23d749620413a2ecdcc52ad4e6e63f2b817ce99c15b5d2da3792721d7158297cce65e0c04fe810d7e2434b969e4c7892b3840623e153576356e9a696fd9e7a801c25de621a7849da3f99158d3d09bf039f43c510c8ffb00fa3e9a3c12d2c8062dd25b8dabe53d8581e30427e81c3dfc2d455352487e1255", + "23a3fe80e3636313fdf922a1359514d9f31775e1adf24285e8001c04dbce866df055edf25b506e18953492a173ba5aa0c1ec758123406a97025ba9b6b7a97eb14734424d1a7841ec0eaeba0051d6e9734263bea1af9895a3b8c83d8c854da2ae7832bdd7c285b73f8113c3821cced38b3656b4e6369a9f8327cd368f04128f1d78b6b4260f55995277feffa15e34532cd0306c1f47354667c17018ee012a791af2dbbc7afc92c388008c601740cccbbe66f1eb06ea657e9d478066c2bd2093ab62cd94abadc002722f50968e8acf361658fc64f50685a5b1b004888b3b4f64a4ddb67bec7e4ac64c9ee8deeda896b9", + "758f3567cd992228386a1c01930f7c52a9dcce28fdc1aaa54b0fed97d9a54f1df805f31bac12d559e90a2063cd7df8311a148f6904f78c5440f75e49877c0c0855d59c7f7ee52837e6ef3e54a568a7b38a0d5b896e298c8e46a56d24d8cabda8aeff85a622a3e7c87483ba921f34156defd185f608e2241224286e38121a162c2ba7604f68484717196f6628861a948180e8f06c6cc1ec66d032cf8d16da039cd74277cde31e535bc1692a44046e16881c954af3cd91dc49b443a3680e4bc42a954a46ebd1368b1398edd7580f935514b15c7fbfa9b40048a35122283af731f5e460aa85b66e65f49a9d158699bd2870", + "fe511e86971cea2b6af91b2afa898d9b067fa71780790bb409189f5debe719f405e16acf7c4306a6e6ac5cd535290efe088943b9e6c5d25bfc508023c1b105d20d57252fee8cdbddb4d34a6ec2f72e8d55be55afcafd2e922ab8c31888bec4e816d04f0b2cd23df6e04720969c5152b3563c6da37e4608554cc7b8715bc10aba6a2e3b6fbcd35408df0dd73a9076bfad32b741fcdb0edfb563b3f753508b9b26f0a91673255f9bcda2b9a120f6bfa0632b6551ca517d846a747b66ebda1b2170891ece94c19ce8bf682cc94afdf0053fba4e4f0530935c07cdd6f879c999a8c4328ef6d3e0a37974a230ada83910604337", + "a6024f5b959698c0de45f4f29e1803f99dc8112989c536e5a1337e281bc856ff721e986de183d7b0ea9eb61166830ae5d6d6bc857dc833ff189b52889b8e2bd3f35b4937624d9b36dc5f19db44f0772508029784c7dac9568d28609058bc437e2f79f95b12307d8a8fb042d7fd6ee910a9e8df609ede3283f958ba918a9925a0b1d0f9f9f232062315f28a52cbd60e71c09d83e0f6600f508f0ae8ad7642c080ffc618fcd2314e26f67f1529342569f6df37017f7e3b2dac32ad88d56d175ab22205ee7e3ee94720d76933a21132e110fefbb0689a3adbaa4c685f43652136d09b3a359b5c671e38f11915cb5612db2ae294", + "af6de0e227bd78494acb559ddf34d8a7d55a03912384831be21c38376f39cda8a864aff7a48aed758f6bdf777779a669068a75ce82a06f6b3325c855ed83daf5513a078a61f7dc6c1622a633367e5f3a33e765c8ec5d8d54f48494006fdbf8922063e5340013e312871b7f8f8e5ea439c0d4cb78e2f19dd11f010729b692c65dd0d347f0ce53de9d849224666ea2f6487f1c6f953e8f9dbfd3d6de291c3e9d045e633cfd83c89d2f2327d0b2f31f72ac1604a3db1febc5f22cad08153278047210cc2894582c251a014c652e3951593e70e52a5d7451be8924b64f85c8247dab6268d24710b39fc1c07b4ac829fbda34ed79b5", + "d7314e8b1ff82100b8f5870da62b61c31ab37ace9e6a7b6f7d294571523783c1fdedcbc00dd487dd6f848c34aab493507d07071b5eb59d1a2346068c7f356755fbde3d2cab67514f8c3a12d6ff9f96a977a9ac9263491bd33122a904da5386b943d35a6ba383932df07f259b6b45f69e9b27b4ca124fb3ae143d709853eed86690bc2754d5f8865c355a44b5279d8eb31cdc00f7407fb5f5b34edc57fc7ace943565da2222dc80632ccf42f2f125ceb19714ea964c2e50603c9f8960c3f27c2ed0e18a559931c4352bd7422109a28c5e145003f55c9b7c664fdc985168868950396eaf6fefc7b73d815c1aca721d7c67da632925", + "2928b55c0e4d0f5cb4b60af59e9a702e3d616a8cf427c8bb03981fb8c29026d8f7d89161f36c11654f9a5e8ccb703595a58d671ecdc22c6a784abe363158682be4643002a7da5c9d268a30ea9a8d4cc24f562ab59f55c2b43af7dbcecc7e5ebe7494e82d74145a1e7d442125eb0431c5ea0939b27afa47f8ca97849f341f707660c7fbe49b7a0712fbcb6f7562ae2961425f27c7779c7534ecdeb8047ff3cb89a25159f3e1cefe42f9ef16426241f2c4d62c11d7ac43c4500dfcd184436bb4ef33260366f875230f26d81613c334dbda4736ba9d1d2966502914ec01bbe72d885606ec11da7a2cb01b29d35eebedbb0ecc73ed6c35", + "fd993f50e8a68c7b2c7f87511ce65b93c0aa94dcbdf2c9cca93816f0f3b2ab34c62c586fc507b4900a34cf9d0517e0fe10a89d154c5419c1f5e38de00e8834fe3dc1032abdeb10729a81655a69a12856a78ca6e12110580de879b086fd6608726541cfa9616326bdd36064bc0d1e5f9c93b41278bff6a13b2494b81e238c0c45aea1b07d855e8f3fe1478e373bd9d3957cf8a5e5b9003386793d994c7c575cff2322e2428cbbaa4f47560316ae3354a7478842ff7cc5dcbacb6e871e72b36f06d63a9aaeb9044cfb7974afdc238a5816f537dcf33ee40b4e1a5eb3cff2402b46d548264e133008d284f11b7e4e450bc3c5ff9f79b9c4", + "8df21892f5fc303b0de4adef1970186db6fe71bb3ea3094922e13afcfabf1d0be009f36d6f6310c5f9fda51f1a946507a055b645c296370440e5e83d8e906a2fb51f2b42de8856a81a4f28a73a8825c68ea08e5e366730bce8047011cb7d6d9be8c6f4211308fad21856284d5bc47d199988e0abf5badf8693ceeed0a2d98e8ae94b7775a42925edb1f697ffbd8e806af23145054a85e071819cca4cd48875290ca65e5ee72a9a54ff9f19c10ef4adaf8d04c9a9afcc73853fc128bbebc61f78702787c966ca6e1b1a0e4dab646acdfcd3c6bf3e5cfbec5ebe3e06c8abaa1de56e48421d87c46b5c78030afcafd91f27e7d7c85eb4872b", + "48ec6ec520f8e593d7b3f653eb15553de246723b81a6d0c3221aaa42a37420fba98a23796338dff5f845dce6d5a449be5ecc1887356619270461087e08d05fb60433a83d7bd00c002b09ea210b428965124b9b27d9105a71c826c1a2491cfd60e4cfa86c2da0c7100a8dc1c3f2f94b280d54e01e043acf0e966200d9fa8a41daf3b9382820786c75cadbb8841a1b2be5b6cbeb64878e4a231ae063a99b4e2308960ef0c8e2a16bb3545cc43bdf171493fb89a84f47e7973dc60cf75aeeca71e0a7ebe17d161d4fb9fe009941cc438f16a5bae6c99fcad08cac486eb2a48060b023d8730bf1d82fe60a2f036e6f52a5bff95f43bbe088933f", + "f4d84ed3e564c102600a795eaa9b1eaf4ad12f1a4deca1d042a0a2750ddf6201db03073d8bf553cb9dde48a1b0083827a609f7242b86584cc180964ae794b12ce55661e00e36a6ba4dbc389e6a5a85f1b45df9af7ead1b0a54db56e68639b9d438a91504e82c35d40c7bc7e048a53ac0b04accd0dadf4ac9884b0ca0e3cb5ba4336e3581be4c4760a553823ffa283a1120d4e145af56a59f2533903650f0b9e9ad9fe2e8a3c3c3dd03a1fcb709032c8835324839c735b0c051d0cbd8b5d867617c11023432e4bd275d3d0eb98a0b6cf58071a5b712922f2bc751ac7c2588c447444cde2f37a8ea5ec126425bf517e0d17c9e2999f52fee14b3", + "2ccea21bac9c2b70d3923309cbf2d7cb7abd1fcc8b8b002688870a80029c62397350c3c898194e5deea360bb963d26d485cb7963f8167586976ec0556950b2e86135f4a2800991ce8473bfd44a3c5e937a48b5e355ba5141bccf2131a83988d9d2a9e8e7635a956105b3512c05ef708139ced51d7a4e204c12d8a49a21e8dc6de2629a2fd092326885d9f218745fe09f6d91fb6afce250a30a63689534b6be1f26899ffa3767d835cf586aa47776700f94241bc999b1e3deefe188f37ff734f5f16ee6a00914323dc7b8a143c9137cdcc5cd08ae9566f04bb2941532674c97dff6ffa5ce3405ef8e5d27ec403114253dd6394c0167d72a0044c5", + "2b681c6398aee63bf862770341648bbcd31d7de7903c5903fe3d9469311320bb24d914f2af0cdca199c97214c7c679dc32a2800ba484a03c010ea6be3bb9f2c87e30a98b606050b8a3f297f12b8f92caaeceb3e844652115934874e0a1ab093a73d759b53f6a6c3096940dd22c2bb96ce6820a7b9c6d71a208de9892aa6a7209b0fff56a0cafea52b952cdd6f5752cff3309d448800b4e4c878aa595595b56b12b83fcd6ca89520c7da664e449d7b4438fc455888aad5de0fad9a06eed14afd3513b5ebbffe01775549b701181bd26370764f56eba52fdb24286ad1ac0f5418a7c429f7dfc7f3168437fa8eed7a2ed7c723a485e4c3ed14dea2e07", + "aadfd505a89f4aade2c3018258a7e039401b1fc6a7f3d87910dddbb880d372ec8a13c70d92245de5b8e5f9a285c33b99dc82fa2b22decee72b93a72211656ad7a52696c8e570f78be28c0e427a371dafde856e8d5ed24f83b0660b51e7fac05d93a8666dfde6def59af863f80f3e5f6801182c87422203df390dcb736b8f830052a8832eeeb0b4e27e732aaf793d166b5a3ec7745aeef3766937c2b75a276bddd145f6010c29d035e343e267cb2d828436876ec3a7ebe3b6347d4172f7a99d6821ce152e039e53deb33340b324c7f068ffb94b3cde35a8eaa12d15c3806a7ad0acec3e8c7078c1d32a28fd3eec9f32cb86e4c22166ff69e83785e851", + "1605b8cce529a9d6262fd4390d9e4ae5e14e0adc0ec89b028ef68dd0f373ea259aaa96f2967091dd0874c0105385e9e6da9ca68297c31afa44ef834535fb302ce5b4e49edacbbdf359fe1228a8172495b3e57014c27edd58b685110980056c50c398a64f4923f2d720b4df16d75cb36b4233660694182099c35028a972519c24764fc94e18e582b24deb3491535fc06b83837c7958522800e822201d694af0bd0aa3834e17d4b1ba36f470905ae5f8bbeeb6c4c8604d8af02baa347b07086d6989867ddd5e8e8ed7740c3469bfa2810519c55c6add1332c4c54ee9097961d6741cb12a09713a0d07645f784f42f5ad94b48b836b34263130b0483f15e3", + "ff9c6125b2f60bfd6c2427b279df070e430075096647599bdc68c531152c58e13858b82385d78c856092d6c74106e87ccf51ac7e673936332d9b223444eaa0e762ee258d8a733d3a515ec68ed73285e5ca183ae3278b4820b0ab2797feb1e7d8cc864df585dfb5ebe02a993325a9ad5e2d7d49d3132cf66013898351d044e0fe908ccdfeeebf651983601e3673a1f92d36510c0cc19b2e75856db8e4a41f92a51efa66d6cc22e414944c2c34a5a89ccde0be76f51410824e330d8e7c613194338c93732e8aea651fca18bcf1ac1824340c5553aff1e58d4ab8d7c8842b4712021e517cd6c140f6743c69c7bee05b10a8f24050a8caa4f96d1664909c5a06", + "6e85c2f8e1fdc3aaeb969da1258cb504bbf0070cd03d23b3fb5ee08feea5ee2e0ee1c71a5d0f4f701b351f4e4b4d74cb1e2ae6184814f77b62d2f08134b7236ebf6b67d8a6c9f01b4248b30667c555f5d8646dbfe291151b23c9c9857e33a4d5c847be29a5ee7b402e03bac02d1a4319acc0dd8f25e9c7a266f5e5c896cc11b5b238df96a0963ae806cb277abc515c298a3e61a3036b177acf87a56ca4478c4c6d0d468913de602ec891318bbaf52c97a77c35c5b7d164816cf24e4c4b0b5f45853882f716d61eb947a45ce2efa78f1c70a918512af1ad536cbe6148083385b34e207f5f690d7a954021e4b5f4258a385fd8a87809a481f34202af4caccb82", + "1e9b2c454e9de3a2d723d850331037dbf54133dbe27488ff757dd255833a27d8eb8a128ad12d0978b6884e25737086a704fb289aaaccf930d5b582ab4df1f55f0c429b6875edec3fe45464fa74164be056a55e243c4222c586bec5b18f39036aa903d98180f24f83d09a454dfa1e03a60e6a3ba4613e99c35f874d790174ee48a557f4f021ade4d1b278d7997ef094569b37b3db0505951e9ee8400adaea275c6db51b325ee730c69df97745b556ae41cd98741e28aa3a49544541eeb3da1b1e8fa4e8e9100d66dd0c7f5e2c271b1ecc077de79c462b9fe4c273543ecd82a5bea63c5acc01eca5fb780c7d7c8c9fe208ae8bd50cad1769693d92c6c8649d20d8", +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 0000000..c814496 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 0000000..efd689a --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s.go b/vendor/golang.org/x/crypto/blake2s/blake2s.go new file mode 100644 index 0000000..ae0dc92 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s.go @@ -0,0 +1,187 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2s implements the BLAKE2s hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xs. +// +// For a detailed specification of BLAKE2s see https://blake2.net/blake2.pdf +// and for BLAKE2Xs see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2s (Sum256 or New256). +// If you need a secret-key MAC (message authentication code), use the New256 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 32 bytes. It +// can produce hash values between 0 and 65535 bytes. +package blake2s // import "golang.org/x/crypto/blake2s" + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2s in bytes. + BlockSize = 64 + + // The hash size of BLAKE2s-256 in bytes. + Size = 32 + + // The hash size of BLAKE2s-128 in bytes. + Size128 = 16 +) + +var errKeySize = errors.New("blake2s: invalid key size") + +var iv = [8]uint32{ + 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, +} + +// Sum256 returns the BLAKE2s-256 checksum of the data. +func Sum256(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// New256 returns a new hash.Hash computing the BLAKE2s-256 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 32 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New128 returns a new hash.Hash computing the BLAKE2s-128 checksum given a +// non-empty key. Note that a 128-bit digest is too small to be secure as a +// cryptographic hash and should only be used as a MAC, thus the key argument +// is not optional. +func New128(key []byte) (hash.Hash, error) { + if len(key) == 0 { + return nil, errors.New("blake2s: a key is required for a 128-bit hash") + } + return newDigest(Size128, key) +} + +func newDigest(hashSize int, key []byte) (*digest, error) { + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + var ( + h [8]uint32 + c [2]uint32 + ) + + h = iv + h[0] ^= uint32(hashSize) | (1 << 16) | (1 << 24) + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint32(BlockSize - offset) + + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint32(sum[4*i:], v) + } +} + +type digest struct { + h [8]uint32 + c [2]uint32 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint32(d.size) | (uint32(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + d.offset += copy(d.block[:], p) + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + h := d.h + c := d.c + + copy(block[:], d.block[:d.offset]) + remaining := uint32(BlockSize - d.offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFF, block[:]) + for i, v := range h { + binary.LittleEndian.PutUint32(hash[4*i:], v) + } +} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_386.go b/vendor/golang.org/x/crypto/blake2s/blake2s_386.go new file mode 100644 index 0000000..45ae546 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_386.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,!gccgo,!appengine + +package blake2s + +var ( + useSSE4 = false + useSSSE3 = supportSSSE3() + useSSE2 = supportSSE2() +) + +//go:noescape +func supportSSE2() bool + +//go:noescape +func supportSSSE3() bool + +//go:noescape +func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) + +//go:noescape +func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) + +func hashBlocks(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) { + if useSSSE3 { + hashBlocksSSSE3(h, c, flag, blocks) + } else if useSSE2 { + hashBlocksSSE2(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_386.s b/vendor/golang.org/x/crypto/blake2s/blake2s_386.s new file mode 100644 index 0000000..0bb65c7 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_386.s @@ -0,0 +1,460 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,!gccgo,!appengine + +#include "textflag.h" + +DATA iv0<>+0x00(SB)/4, $0x6a09e667 +DATA iv0<>+0x04(SB)/4, $0xbb67ae85 +DATA iv0<>+0x08(SB)/4, $0x3c6ef372 +DATA iv0<>+0x0c(SB)/4, $0xa54ff53a +GLOBL iv0<>(SB), (NOPTR+RODATA), $16 + +DATA iv1<>+0x00(SB)/4, $0x510e527f +DATA iv1<>+0x04(SB)/4, $0x9b05688c +DATA iv1<>+0x08(SB)/4, $0x1f83d9ab +DATA iv1<>+0x0c(SB)/4, $0x5be0cd19 +GLOBL iv1<>(SB), (NOPTR+RODATA), $16 + +DATA rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +GLOBL rol16<>(SB), (NOPTR+RODATA), $16 + +DATA rol8<>+0x00(SB)/8, $0x0407060500030201 +DATA rol8<>+0x08(SB)/8, $0x0C0F0E0D080B0A09 +GLOBL rol8<>(SB), (NOPTR+RODATA), $16 + +DATA counter<>+0x00(SB)/8, $0x40 +DATA counter<>+0x08(SB)/8, $0x0 +GLOBL counter<>(SB), (NOPTR+RODATA), $16 + +#define ROTL_SSE2(n, t, v) \ + MOVO v, t; \ + PSLLL $n, t; \ + PSRLL $(32-n), v; \ + PXOR t, v + +#define ROTL_SSSE3(c, v) \ + PSHUFB c, v + +#define ROUND_SSE2(v0, v1, v2, v3, m0, m1, m2, m3, t) \ + PADDL m0, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(16, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m1, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(24, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v1, v1; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v3, v3; \ + PADDL m2, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(16, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m3, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(24, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v3, v3; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v1, v1 + +#define ROUND_SSSE3(v0, v1, v2, v3, m0, m1, m2, m3, t, c16, c8) \ + PADDL m0, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c16, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m1, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c8, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v1, v1; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v3, v3; \ + PADDL m2, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c16, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m3, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c8, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v3, v3; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v1, v1 + +#define PRECOMPUTE(dst, off, src, t) \ + MOVL 0*4(src), t; \ + MOVL t, 0*4+off+0(dst); \ + MOVL t, 9*4+off+64(dst); \ + MOVL t, 5*4+off+128(dst); \ + MOVL t, 14*4+off+192(dst); \ + MOVL t, 4*4+off+256(dst); \ + MOVL t, 2*4+off+320(dst); \ + MOVL t, 8*4+off+384(dst); \ + MOVL t, 12*4+off+448(dst); \ + MOVL t, 3*4+off+512(dst); \ + MOVL t, 15*4+off+576(dst); \ + MOVL 1*4(src), t; \ + MOVL t, 4*4+off+0(dst); \ + MOVL t, 8*4+off+64(dst); \ + MOVL t, 14*4+off+128(dst); \ + MOVL t, 5*4+off+192(dst); \ + MOVL t, 12*4+off+256(dst); \ + MOVL t, 11*4+off+320(dst); \ + MOVL t, 1*4+off+384(dst); \ + MOVL t, 6*4+off+448(dst); \ + MOVL t, 10*4+off+512(dst); \ + MOVL t, 3*4+off+576(dst); \ + MOVL 2*4(src), t; \ + MOVL t, 1*4+off+0(dst); \ + MOVL t, 13*4+off+64(dst); \ + MOVL t, 6*4+off+128(dst); \ + MOVL t, 8*4+off+192(dst); \ + MOVL t, 2*4+off+256(dst); \ + MOVL t, 0*4+off+320(dst); \ + MOVL t, 14*4+off+384(dst); \ + MOVL t, 11*4+off+448(dst); \ + MOVL t, 12*4+off+512(dst); \ + MOVL t, 4*4+off+576(dst); \ + MOVL 3*4(src), t; \ + MOVL t, 5*4+off+0(dst); \ + MOVL t, 15*4+off+64(dst); \ + MOVL t, 9*4+off+128(dst); \ + MOVL t, 1*4+off+192(dst); \ + MOVL t, 11*4+off+256(dst); \ + MOVL t, 7*4+off+320(dst); \ + MOVL t, 13*4+off+384(dst); \ + MOVL t, 3*4+off+448(dst); \ + MOVL t, 6*4+off+512(dst); \ + MOVL t, 10*4+off+576(dst); \ + MOVL 4*4(src), t; \ + MOVL t, 2*4+off+0(dst); \ + MOVL t, 1*4+off+64(dst); \ + MOVL t, 15*4+off+128(dst); \ + MOVL t, 10*4+off+192(dst); \ + MOVL t, 6*4+off+256(dst); \ + MOVL t, 8*4+off+320(dst); \ + MOVL t, 3*4+off+384(dst); \ + MOVL t, 13*4+off+448(dst); \ + MOVL t, 14*4+off+512(dst); \ + MOVL t, 5*4+off+576(dst); \ + MOVL 5*4(src), t; \ + MOVL t, 6*4+off+0(dst); \ + MOVL t, 11*4+off+64(dst); \ + MOVL t, 2*4+off+128(dst); \ + MOVL t, 9*4+off+192(dst); \ + MOVL t, 1*4+off+256(dst); \ + MOVL t, 13*4+off+320(dst); \ + MOVL t, 4*4+off+384(dst); \ + MOVL t, 8*4+off+448(dst); \ + MOVL t, 15*4+off+512(dst); \ + MOVL t, 7*4+off+576(dst); \ + MOVL 6*4(src), t; \ + MOVL t, 3*4+off+0(dst); \ + MOVL t, 7*4+off+64(dst); \ + MOVL t, 13*4+off+128(dst); \ + MOVL t, 12*4+off+192(dst); \ + MOVL t, 10*4+off+256(dst); \ + MOVL t, 1*4+off+320(dst); \ + MOVL t, 9*4+off+384(dst); \ + MOVL t, 14*4+off+448(dst); \ + MOVL t, 0*4+off+512(dst); \ + MOVL t, 6*4+off+576(dst); \ + MOVL 7*4(src), t; \ + MOVL t, 7*4+off+0(dst); \ + MOVL t, 14*4+off+64(dst); \ + MOVL t, 10*4+off+128(dst); \ + MOVL t, 0*4+off+192(dst); \ + MOVL t, 5*4+off+256(dst); \ + MOVL t, 9*4+off+320(dst); \ + MOVL t, 12*4+off+384(dst); \ + MOVL t, 1*4+off+448(dst); \ + MOVL t, 13*4+off+512(dst); \ + MOVL t, 2*4+off+576(dst); \ + MOVL 8*4(src), t; \ + MOVL t, 8*4+off+0(dst); \ + MOVL t, 5*4+off+64(dst); \ + MOVL t, 4*4+off+128(dst); \ + MOVL t, 15*4+off+192(dst); \ + MOVL t, 14*4+off+256(dst); \ + MOVL t, 3*4+off+320(dst); \ + MOVL t, 11*4+off+384(dst); \ + MOVL t, 10*4+off+448(dst); \ + MOVL t, 7*4+off+512(dst); \ + MOVL t, 1*4+off+576(dst); \ + MOVL 9*4(src), t; \ + MOVL t, 12*4+off+0(dst); \ + MOVL t, 2*4+off+64(dst); \ + MOVL t, 11*4+off+128(dst); \ + MOVL t, 4*4+off+192(dst); \ + MOVL t, 0*4+off+256(dst); \ + MOVL t, 15*4+off+320(dst); \ + MOVL t, 10*4+off+384(dst); \ + MOVL t, 7*4+off+448(dst); \ + MOVL t, 5*4+off+512(dst); \ + MOVL t, 9*4+off+576(dst); \ + MOVL 10*4(src), t; \ + MOVL t, 9*4+off+0(dst); \ + MOVL t, 4*4+off+64(dst); \ + MOVL t, 8*4+off+128(dst); \ + MOVL t, 13*4+off+192(dst); \ + MOVL t, 3*4+off+256(dst); \ + MOVL t, 5*4+off+320(dst); \ + MOVL t, 7*4+off+384(dst); \ + MOVL t, 15*4+off+448(dst); \ + MOVL t, 11*4+off+512(dst); \ + MOVL t, 0*4+off+576(dst); \ + MOVL 11*4(src), t; \ + MOVL t, 13*4+off+0(dst); \ + MOVL t, 10*4+off+64(dst); \ + MOVL t, 0*4+off+128(dst); \ + MOVL t, 3*4+off+192(dst); \ + MOVL t, 9*4+off+256(dst); \ + MOVL t, 6*4+off+320(dst); \ + MOVL t, 15*4+off+384(dst); \ + MOVL t, 4*4+off+448(dst); \ + MOVL t, 2*4+off+512(dst); \ + MOVL t, 12*4+off+576(dst); \ + MOVL 12*4(src), t; \ + MOVL t, 10*4+off+0(dst); \ + MOVL t, 12*4+off+64(dst); \ + MOVL t, 1*4+off+128(dst); \ + MOVL t, 6*4+off+192(dst); \ + MOVL t, 13*4+off+256(dst); \ + MOVL t, 4*4+off+320(dst); \ + MOVL t, 0*4+off+384(dst); \ + MOVL t, 2*4+off+448(dst); \ + MOVL t, 8*4+off+512(dst); \ + MOVL t, 14*4+off+576(dst); \ + MOVL 13*4(src), t; \ + MOVL t, 14*4+off+0(dst); \ + MOVL t, 3*4+off+64(dst); \ + MOVL t, 7*4+off+128(dst); \ + MOVL t, 2*4+off+192(dst); \ + MOVL t, 15*4+off+256(dst); \ + MOVL t, 12*4+off+320(dst); \ + MOVL t, 6*4+off+384(dst); \ + MOVL t, 0*4+off+448(dst); \ + MOVL t, 9*4+off+512(dst); \ + MOVL t, 11*4+off+576(dst); \ + MOVL 14*4(src), t; \ + MOVL t, 11*4+off+0(dst); \ + MOVL t, 0*4+off+64(dst); \ + MOVL t, 12*4+off+128(dst); \ + MOVL t, 7*4+off+192(dst); \ + MOVL t, 8*4+off+256(dst); \ + MOVL t, 14*4+off+320(dst); \ + MOVL t, 2*4+off+384(dst); \ + MOVL t, 5*4+off+448(dst); \ + MOVL t, 1*4+off+512(dst); \ + MOVL t, 13*4+off+576(dst); \ + MOVL 15*4(src), t; \ + MOVL t, 15*4+off+0(dst); \ + MOVL t, 6*4+off+64(dst); \ + MOVL t, 3*4+off+128(dst); \ + MOVL t, 11*4+off+192(dst); \ + MOVL t, 7*4+off+256(dst); \ + MOVL t, 10*4+off+320(dst); \ + MOVL t, 5*4+off+384(dst); \ + MOVL t, 9*4+off+448(dst); \ + MOVL t, 4*4+off+512(dst); \ + MOVL t, 8*4+off+576(dst) + +// func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) +TEXT ·hashBlocksSSE2(SB), 0, $672-24 // frame = 656 + 16 byte alignment + MOVL h+0(FP), AX + MOVL c+4(FP), BX + MOVL flag+8(FP), CX + MOVL blocks_base+12(FP), SI + MOVL blocks_len+16(FP), DX + + MOVL SP, BP + MOVL SP, DI + ADDL $15, DI + ANDL $~15, DI + MOVL DI, SP + + MOVL CX, 8(SP) + MOVL 0(BX), CX + MOVL CX, 0(SP) + MOVL 4(BX), CX + MOVL CX, 4(SP) + XORL CX, CX + MOVL CX, 12(SP) + + MOVOU 0(AX), X0 + MOVOU 16(AX), X1 + MOVOU counter<>(SB), X2 + +loop: + MOVO X0, X4 + MOVO X1, X5 + MOVOU iv0<>(SB), X6 + MOVOU iv1<>(SB), X7 + + MOVO 0(SP), X3 + PADDQ X2, X3 + PXOR X3, X7 + MOVO X3, 0(SP) + + PRECOMPUTE(SP, 16, SI, CX) + ROUND_SSE2(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X3) + + PXOR X4, X0 + PXOR X5, X1 + PXOR X6, X0 + PXOR X7, X1 + + LEAL 64(SI), SI + SUBL $64, DX + JNE loop + + MOVL 0(SP), CX + MOVL CX, 0(BX) + MOVL 4(SP), CX + MOVL CX, 4(BX) + + MOVOU X0, 0(AX) + MOVOU X1, 16(AX) + + MOVL BP, SP + RET + +// func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) +TEXT ·hashBlocksSSSE3(SB), 0, $704-24 // frame = 688 + 16 byte alignment + MOVL h+0(FP), AX + MOVL c+4(FP), BX + MOVL flag+8(FP), CX + MOVL blocks_base+12(FP), SI + MOVL blocks_len+16(FP), DX + + MOVL SP, BP + MOVL SP, DI + ADDL $15, DI + ANDL $~15, DI + MOVL DI, SP + + MOVL CX, 8(SP) + MOVL 0(BX), CX + MOVL CX, 0(SP) + MOVL 4(BX), CX + MOVL CX, 4(SP) + XORL CX, CX + MOVL CX, 12(SP) + + MOVOU 0(AX), X0 + MOVOU 16(AX), X1 + MOVOU counter<>(SB), X2 + +loop: + MOVO X0, 656(SP) + MOVO X1, 672(SP) + MOVO X0, X4 + MOVO X1, X5 + MOVOU iv0<>(SB), X6 + MOVOU iv1<>(SB), X7 + + MOVO 0(SP), X3 + PADDQ X2, X3 + PXOR X3, X7 + MOVO X3, 0(SP) + + MOVOU rol16<>(SB), X0 + MOVOU rol8<>(SB), X1 + + PRECOMPUTE(SP, 16, SI, CX) + ROUND_SSSE3(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X3, X0, X1) + + MOVO 656(SP), X0 + MOVO 672(SP), X1 + PXOR X4, X0 + PXOR X5, X1 + PXOR X6, X0 + PXOR X7, X1 + + LEAL 64(SI), SI + SUBL $64, DX + JNE loop + + MOVL 0(SP), CX + MOVL CX, 0(BX) + MOVL 4(SP), CX + MOVL CX, 4(BX) + + MOVOU X0, 0(AX) + MOVOU X1, 16(AX) + + MOVL BP, SP + RET + +// func supportSSSE3() bool +TEXT ·supportSSSE3(SB), 4, $0-1 + MOVL $1, AX + CPUID + MOVL CX, BX + ANDL $0x1, BX // supports SSE3 + JZ FALSE + ANDL $0x200, CX // supports SSSE3 + JZ FALSE + MOVB $1, ret+0(FP) + RET + +FALSE: + MOVB $0, ret+0(FP) + RET + +// func supportSSE2() bool +TEXT ·supportSSE2(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $26, DX + ANDL $1, DX // DX != 0 if support SSE2 + MOVB DX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go new file mode 100644 index 0000000..a925e6b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package blake2s + +var ( + useSSE4 = supportSSE4() + useSSSE3 = supportSSSE3() + useSSE2 = true // Always available on amd64 +) + +//go:noescape +func supportSSSE3() bool + +//go:noescape +func supportSSE4() bool + +//go:noescape +func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) + +//go:noescape +func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) + +func hashBlocks(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else if useSSSE3 { + hashBlocksSSSE3(h, c, flag, blocks) + } else if useSSE2 { + hashBlocksSSE2(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s new file mode 100644 index 0000000..6cdf5a9 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s @@ -0,0 +1,463 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA iv0<>+0x00(SB)/4, $0x6a09e667 +DATA iv0<>+0x04(SB)/4, $0xbb67ae85 +DATA iv0<>+0x08(SB)/4, $0x3c6ef372 +DATA iv0<>+0x0c(SB)/4, $0xa54ff53a +GLOBL iv0<>(SB), (NOPTR+RODATA), $16 + +DATA iv1<>+0x00(SB)/4, $0x510e527f +DATA iv1<>+0x04(SB)/4, $0x9b05688c +DATA iv1<>+0x08(SB)/4, $0x1f83d9ab +DATA iv1<>+0x0c(SB)/4, $0x5be0cd19 +GLOBL iv1<>(SB), (NOPTR+RODATA), $16 + +DATA rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +GLOBL rol16<>(SB), (NOPTR+RODATA), $16 + +DATA rol8<>+0x00(SB)/8, $0x0407060500030201 +DATA rol8<>+0x08(SB)/8, $0x0C0F0E0D080B0A09 +GLOBL rol8<>(SB), (NOPTR+RODATA), $16 + +DATA counter<>+0x00(SB)/8, $0x40 +DATA counter<>+0x08(SB)/8, $0x0 +GLOBL counter<>(SB), (NOPTR+RODATA), $16 + +#define ROTL_SSE2(n, t, v) \ + MOVO v, t; \ + PSLLL $n, t; \ + PSRLL $(32-n), v; \ + PXOR t, v + +#define ROTL_SSSE3(c, v) \ + PSHUFB c, v + +#define ROUND_SSE2(v0, v1, v2, v3, m0, m1, m2, m3, t) \ + PADDL m0, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(16, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m1, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(24, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v1, v1; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v3, v3; \ + PADDL m2, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(16, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m3, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSE2(24, t, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v3, v3; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v1, v1 + +#define ROUND_SSSE3(v0, v1, v2, v3, m0, m1, m2, m3, t, c16, c8) \ + PADDL m0, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c16, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m1, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c8, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v1, v1; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v3, v3; \ + PADDL m2, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c16, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(20, t, v1); \ + PADDL m3, v0; \ + PADDL v1, v0; \ + PXOR v0, v3; \ + ROTL_SSSE3(c8, v3); \ + PADDL v3, v2; \ + PXOR v2, v1; \ + ROTL_SSE2(25, t, v1); \ + PSHUFL $0x39, v3, v3; \ + PSHUFL $0x4E, v2, v2; \ + PSHUFL $0x93, v1, v1 + + +#define LOAD_MSG_SSE4(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \ + MOVL i0*4(src), m0; \ + PINSRD $1, i1*4(src), m0; \ + PINSRD $2, i2*4(src), m0; \ + PINSRD $3, i3*4(src), m0; \ + MOVL i4*4(src), m1; \ + PINSRD $1, i5*4(src), m1; \ + PINSRD $2, i6*4(src), m1; \ + PINSRD $3, i7*4(src), m1; \ + MOVL i8*4(src), m2; \ + PINSRD $1, i9*4(src), m2; \ + PINSRD $2, i10*4(src), m2; \ + PINSRD $3, i11*4(src), m2; \ + MOVL i12*4(src), m3; \ + PINSRD $1, i13*4(src), m3; \ + PINSRD $2, i14*4(src), m3; \ + PINSRD $3, i15*4(src), m3 + +#define PRECOMPUTE_MSG(dst, off, src, R8, R9, R10, R11, R12, R13, R14, R15) \ + MOVQ 0*4(src), R8; \ + MOVQ 2*4(src), R9; \ + MOVQ 4*4(src), R10; \ + MOVQ 6*4(src), R11; \ + MOVQ 8*4(src), R12; \ + MOVQ 10*4(src), R13; \ + MOVQ 12*4(src), R14; \ + MOVQ 14*4(src), R15; \ + \ + MOVL R8, 0*4+off+0(dst); \ + MOVL R8, 9*4+off+64(dst); \ + MOVL R8, 5*4+off+128(dst); \ + MOVL R8, 14*4+off+192(dst); \ + MOVL R8, 4*4+off+256(dst); \ + MOVL R8, 2*4+off+320(dst); \ + MOVL R8, 8*4+off+384(dst); \ + MOVL R8, 12*4+off+448(dst); \ + MOVL R8, 3*4+off+512(dst); \ + MOVL R8, 15*4+off+576(dst); \ + SHRQ $32, R8; \ + MOVL R8, 4*4+off+0(dst); \ + MOVL R8, 8*4+off+64(dst); \ + MOVL R8, 14*4+off+128(dst); \ + MOVL R8, 5*4+off+192(dst); \ + MOVL R8, 12*4+off+256(dst); \ + MOVL R8, 11*4+off+320(dst); \ + MOVL R8, 1*4+off+384(dst); \ + MOVL R8, 6*4+off+448(dst); \ + MOVL R8, 10*4+off+512(dst); \ + MOVL R8, 3*4+off+576(dst); \ + \ + MOVL R9, 1*4+off+0(dst); \ + MOVL R9, 13*4+off+64(dst); \ + MOVL R9, 6*4+off+128(dst); \ + MOVL R9, 8*4+off+192(dst); \ + MOVL R9, 2*4+off+256(dst); \ + MOVL R9, 0*4+off+320(dst); \ + MOVL R9, 14*4+off+384(dst); \ + MOVL R9, 11*4+off+448(dst); \ + MOVL R9, 12*4+off+512(dst); \ + MOVL R9, 4*4+off+576(dst); \ + SHRQ $32, R9; \ + MOVL R9, 5*4+off+0(dst); \ + MOVL R9, 15*4+off+64(dst); \ + MOVL R9, 9*4+off+128(dst); \ + MOVL R9, 1*4+off+192(dst); \ + MOVL R9, 11*4+off+256(dst); \ + MOVL R9, 7*4+off+320(dst); \ + MOVL R9, 13*4+off+384(dst); \ + MOVL R9, 3*4+off+448(dst); \ + MOVL R9, 6*4+off+512(dst); \ + MOVL R9, 10*4+off+576(dst); \ + \ + MOVL R10, 2*4+off+0(dst); \ + MOVL R10, 1*4+off+64(dst); \ + MOVL R10, 15*4+off+128(dst); \ + MOVL R10, 10*4+off+192(dst); \ + MOVL R10, 6*4+off+256(dst); \ + MOVL R10, 8*4+off+320(dst); \ + MOVL R10, 3*4+off+384(dst); \ + MOVL R10, 13*4+off+448(dst); \ + MOVL R10, 14*4+off+512(dst); \ + MOVL R10, 5*4+off+576(dst); \ + SHRQ $32, R10; \ + MOVL R10, 6*4+off+0(dst); \ + MOVL R10, 11*4+off+64(dst); \ + MOVL R10, 2*4+off+128(dst); \ + MOVL R10, 9*4+off+192(dst); \ + MOVL R10, 1*4+off+256(dst); \ + MOVL R10, 13*4+off+320(dst); \ + MOVL R10, 4*4+off+384(dst); \ + MOVL R10, 8*4+off+448(dst); \ + MOVL R10, 15*4+off+512(dst); \ + MOVL R10, 7*4+off+576(dst); \ + \ + MOVL R11, 3*4+off+0(dst); \ + MOVL R11, 7*4+off+64(dst); \ + MOVL R11, 13*4+off+128(dst); \ + MOVL R11, 12*4+off+192(dst); \ + MOVL R11, 10*4+off+256(dst); \ + MOVL R11, 1*4+off+320(dst); \ + MOVL R11, 9*4+off+384(dst); \ + MOVL R11, 14*4+off+448(dst); \ + MOVL R11, 0*4+off+512(dst); \ + MOVL R11, 6*4+off+576(dst); \ + SHRQ $32, R11; \ + MOVL R11, 7*4+off+0(dst); \ + MOVL R11, 14*4+off+64(dst); \ + MOVL R11, 10*4+off+128(dst); \ + MOVL R11, 0*4+off+192(dst); \ + MOVL R11, 5*4+off+256(dst); \ + MOVL R11, 9*4+off+320(dst); \ + MOVL R11, 12*4+off+384(dst); \ + MOVL R11, 1*4+off+448(dst); \ + MOVL R11, 13*4+off+512(dst); \ + MOVL R11, 2*4+off+576(dst); \ + \ + MOVL R12, 8*4+off+0(dst); \ + MOVL R12, 5*4+off+64(dst); \ + MOVL R12, 4*4+off+128(dst); \ + MOVL R12, 15*4+off+192(dst); \ + MOVL R12, 14*4+off+256(dst); \ + MOVL R12, 3*4+off+320(dst); \ + MOVL R12, 11*4+off+384(dst); \ + MOVL R12, 10*4+off+448(dst); \ + MOVL R12, 7*4+off+512(dst); \ + MOVL R12, 1*4+off+576(dst); \ + SHRQ $32, R12; \ + MOVL R12, 12*4+off+0(dst); \ + MOVL R12, 2*4+off+64(dst); \ + MOVL R12, 11*4+off+128(dst); \ + MOVL R12, 4*4+off+192(dst); \ + MOVL R12, 0*4+off+256(dst); \ + MOVL R12, 15*4+off+320(dst); \ + MOVL R12, 10*4+off+384(dst); \ + MOVL R12, 7*4+off+448(dst); \ + MOVL R12, 5*4+off+512(dst); \ + MOVL R12, 9*4+off+576(dst); \ + \ + MOVL R13, 9*4+off+0(dst); \ + MOVL R13, 4*4+off+64(dst); \ + MOVL R13, 8*4+off+128(dst); \ + MOVL R13, 13*4+off+192(dst); \ + MOVL R13, 3*4+off+256(dst); \ + MOVL R13, 5*4+off+320(dst); \ + MOVL R13, 7*4+off+384(dst); \ + MOVL R13, 15*4+off+448(dst); \ + MOVL R13, 11*4+off+512(dst); \ + MOVL R13, 0*4+off+576(dst); \ + SHRQ $32, R13; \ + MOVL R13, 13*4+off+0(dst); \ + MOVL R13, 10*4+off+64(dst); \ + MOVL R13, 0*4+off+128(dst); \ + MOVL R13, 3*4+off+192(dst); \ + MOVL R13, 9*4+off+256(dst); \ + MOVL R13, 6*4+off+320(dst); \ + MOVL R13, 15*4+off+384(dst); \ + MOVL R13, 4*4+off+448(dst); \ + MOVL R13, 2*4+off+512(dst); \ + MOVL R13, 12*4+off+576(dst); \ + \ + MOVL R14, 10*4+off+0(dst); \ + MOVL R14, 12*4+off+64(dst); \ + MOVL R14, 1*4+off+128(dst); \ + MOVL R14, 6*4+off+192(dst); \ + MOVL R14, 13*4+off+256(dst); \ + MOVL R14, 4*4+off+320(dst); \ + MOVL R14, 0*4+off+384(dst); \ + MOVL R14, 2*4+off+448(dst); \ + MOVL R14, 8*4+off+512(dst); \ + MOVL R14, 14*4+off+576(dst); \ + SHRQ $32, R14; \ + MOVL R14, 14*4+off+0(dst); \ + MOVL R14, 3*4+off+64(dst); \ + MOVL R14, 7*4+off+128(dst); \ + MOVL R14, 2*4+off+192(dst); \ + MOVL R14, 15*4+off+256(dst); \ + MOVL R14, 12*4+off+320(dst); \ + MOVL R14, 6*4+off+384(dst); \ + MOVL R14, 0*4+off+448(dst); \ + MOVL R14, 9*4+off+512(dst); \ + MOVL R14, 11*4+off+576(dst); \ + \ + MOVL R15, 11*4+off+0(dst); \ + MOVL R15, 0*4+off+64(dst); \ + MOVL R15, 12*4+off+128(dst); \ + MOVL R15, 7*4+off+192(dst); \ + MOVL R15, 8*4+off+256(dst); \ + MOVL R15, 14*4+off+320(dst); \ + MOVL R15, 2*4+off+384(dst); \ + MOVL R15, 5*4+off+448(dst); \ + MOVL R15, 1*4+off+512(dst); \ + MOVL R15, 13*4+off+576(dst); \ + SHRQ $32, R15; \ + MOVL R15, 15*4+off+0(dst); \ + MOVL R15, 6*4+off+64(dst); \ + MOVL R15, 3*4+off+128(dst); \ + MOVL R15, 11*4+off+192(dst); \ + MOVL R15, 7*4+off+256(dst); \ + MOVL R15, 10*4+off+320(dst); \ + MOVL R15, 5*4+off+384(dst); \ + MOVL R15, 9*4+off+448(dst); \ + MOVL R15, 4*4+off+512(dst); \ + MOVL R15, 8*4+off+576(dst) + +#define BLAKE2s_SSE2() \ + PRECOMPUTE_MSG(SP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \ + ROUND_SSE2(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X8) + +#define BLAKE2s_SSSE3() \ + PRECOMPUTE_MSG(SP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \ + ROUND_SSSE3(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X8, X13, X14) + +#define BLAKE2s_SSE4() \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \ + LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0); \ + ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14) + +#define HASH_BLOCKS(h, c, flag, blocks_base, blocks_len, BLAKE2s_FUNC) \ + MOVQ h, AX; \ + MOVQ c, BX; \ + MOVL flag, CX; \ + MOVQ blocks_base, SI; \ + MOVQ blocks_len, DX; \ + \ + MOVQ SP, BP; \ + MOVQ SP, R9; \ + ADDQ $15, R9; \ + ANDQ $~15, R9; \ + MOVQ R9, SP; \ + \ + MOVQ 0(BX), R9; \ + MOVQ R9, 0(SP); \ + XORQ R9, R9; \ + MOVQ R9, 8(SP); \ + MOVL CX, 8(SP); \ + \ + MOVOU 0(AX), X0; \ + MOVOU 16(AX), X1; \ + MOVOU iv0<>(SB), X2; \ + MOVOU iv1<>(SB), X3 \ + \ + MOVOU counter<>(SB), X12; \ + MOVOU rol16<>(SB), X13; \ + MOVOU rol8<>(SB), X14; \ + MOVO 0(SP), X15; \ + \ + loop: \ + MOVO X0, X4; \ + MOVO X1, X5; \ + MOVO X2, X6; \ + MOVO X3, X7; \ + \ + PADDQ X12, X15; \ + PXOR X15, X7; \ + \ + BLAKE2s_FUNC(); \ + \ + PXOR X4, X0; \ + PXOR X5, X1; \ + PXOR X6, X0; \ + PXOR X7, X1; \ + \ + LEAQ 64(SI), SI; \ + SUBQ $64, DX; \ + JNE loop; \ + \ + MOVO X15, 0(SP); \ + MOVQ 0(SP), R9; \ + MOVQ R9, 0(BX); \ + \ + MOVOU X0, 0(AX); \ + MOVOU X1, 16(AX); \ + \ + MOVQ BP, SP + +// func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) +TEXT ·hashBlocksSSE2(SB), 0, $672-48 // frame = 656 + 16 byte alignment + HASH_BLOCKS(h+0(FP), c+8(FP), flag+16(FP), blocks_base+24(FP), blocks_len+32(FP), BLAKE2s_SSE2) + RET + +// func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) +TEXT ·hashBlocksSSSE3(SB), 0, $672-48 // frame = 656 + 16 byte alignment + HASH_BLOCKS(h+0(FP), c+8(FP), flag+16(FP), blocks_base+24(FP), blocks_len+32(FP), BLAKE2s_SSSE3) + RET + +// func hashBlocksSSE4(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 0, $32-48 // frame = 16 + 16 byte alignment + HASH_BLOCKS(h+0(FP), c+8(FP), flag+16(FP), blocks_base+24(FP), blocks_len+32(FP), BLAKE2s_SSE4) + RET + +// func supportSSE4() bool +TEXT ·supportSSE4(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $19, CX // Bit 19 indicates SSE4.1. + ANDL $1, CX + MOVB CX, ret+0(FP) + RET + +// func supportSSSE3() bool +TEXT ·supportSSSE3(SB), 4, $0-1 + MOVL $1, AX + CPUID + MOVL CX, BX + ANDL $0x1, BX // Bit zero indicates SSE3 support. + JZ FALSE + ANDL $0x200, CX // Bit nine indicates SSSE3 support. + JZ FALSE + MOVB $1, ret+0(FP) + RET + +FALSE: + MOVB $0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_generic.go b/vendor/golang.org/x/crypto/blake2s/blake2s_generic.go new file mode 100644 index 0000000..f7e0653 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_generic.go @@ -0,0 +1,174 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2s + +// the precomputed values for BLAKE2s +// there are 10 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [10][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, +} + +func hashBlocksGeneric(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) { + var m [16]uint32 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = uint32(blocks[i]) | uint32(blocks[i+1])<<8 | uint32(blocks[i+2])<<16 | uint32(blocks[i+3])<<24 + i += 4 + } + + for k := range precomputed { + s := &(precomputed[k]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_ref.go b/vendor/golang.org/x/crypto/blake2s/blake2s_ref.go new file mode 100644 index 0000000..a311273 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_ref.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!386 gccgo appengine + +package blake2s + +var ( + useSSE4 = false + useSSSE3 = false + useSSE2 = false +) + +func hashBlocks(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_test.go b/vendor/golang.org/x/crypto/blake2s/blake2s_test.go new file mode 100644 index 0000000..cfeb18b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_test.go @@ -0,0 +1,1002 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2s + +import ( + "encoding/hex" + "fmt" + "testing" +) + +func TestHashes(t *testing.T) { + defer func(sse2, ssse3, sse4 bool) { + useSSE2, useSSSE3, useSSE4 = sse2, ssse3, sse4 + }(useSSE2, useSSSE3, useSSE4) + + if useSSE4 { + t.Log("SSE4 version") + testHashes(t) + testHashes128(t) + useSSE4 = false + } + if useSSSE3 { + t.Log("SSSE3 version") + testHashes(t) + testHashes128(t) + useSSSE3 = false + } + if useSSE2 { + t.Log("SSE2 version") + testHashes(t) + testHashes128(t) + useSSE2 = false + } + + t.Log("generic version") + testHashes(t) + testHashes128(t) +} + +func TestHashes2X(t *testing.T) { + defer func(sse2, ssse3, sse4 bool) { + useSSE2, useSSSE3, useSSE4 = sse2, ssse3, sse4 + }(useSSE2, useSSSE3, useSSE4) + + if useSSE4 { + t.Log("SSE4 version") + testHashes2X(t) + useSSE4 = false + } + if useSSSE3 { + t.Log("SSSE3 version") + testHashes2X(t) + useSSSE3 = false + } + if useSSE2 { + t.Log("SSE2 version") + testHashes2X(t) + useSSE2 = false + } + + t.Log("generic version") + testHashes2X(t) +} + +func testHashes(t *testing.T) { + key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f") + + input := make([]byte, 255) + for i := range input { + input[i] = byte(i) + } + + for i, expectedHex := range hashes { + h, err := New256(key) + if err != nil { + t.Fatalf("#%d: error from New256: %v", i, err) + } + + h.Write(input[:i]) + sum := h.Sum(nil) + + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) + } + + h.Reset() + for j := 0; j < i; j++ { + h.Write(input[j : j+1]) + } + + sum = h.Sum(sum[:0]) + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (byte-by-byte): got %s, wanted %s", i, gotHex, expectedHex) + } + } +} + +func testHashes128(t *testing.T) { + key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f") + + input := make([]byte, 255) + for i := range input { + input[i] = byte(i) + } + + for i, expectedHex := range hashes128 { + h, err := New128(key) + if err != nil { + t.Fatalf("#%d: error from New128: %v", i, err) + } + + h.Write(input[:i]) + sum := h.Sum(nil) + + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) + } + + h.Reset() + for j := 0; j < i; j++ { + h.Write(input[j : j+1]) + } + + sum = h.Sum(sum[:0]) + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (byte-by-byte): got %s, wanted %s", i, gotHex, expectedHex) + } + } +} + +func testHashes2X(t *testing.T) { + key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f") + + input := make([]byte, 256) + for i := range input { + input[i] = byte(i) + } + + for i, expectedHex := range hashes2X { + length := uint16(len(expectedHex) / 2) + sum := make([]byte, int(length)) + + h, err := NewXOF(length, key) + if err != nil { + t.Fatalf("#%d: error from NewXOF: %v", i, err) + } + + if _, err := h.Write(input); err != nil { + t.Fatalf("#%d (single write): error from Write: %v", i, err) + } + if _, err := h.Read(sum); err != nil { + t.Fatalf("#%d (single write): error from Read: %v", i, err) + } + + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) + } + + h.Reset() + for j := 0; j < len(input); j++ { + h.Write(input[j : j+1]) + } + for j := 0; j < len(sum); j++ { + h = h.Clone() + if _, err := h.Read(sum[j : j+1]); err != nil { + t.Fatalf("#%d (byte-by-byte) - Read %d: error from Read: %v", i, j, err) + } + } + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (byte-by-byte): got %s, wanted %s", i, gotHex, expectedHex) + } + } + + h, err := NewXOF(OutputLengthUnknown, key) + if err != nil { + t.Fatalf("#unknown length: error from NewXOF: %v", err) + } + if _, err := h.Write(input); err != nil { + t.Fatalf("#unknown length: error from Write: %v", err) + } + + var result [64]byte + if n, err := h.Read(result[:]); err != nil { + t.Fatalf("#unknown length: error from Read: %v", err) + } else if n != len(result) { + t.Fatalf("#unknown length: Read returned %d bytes, want %d", n, len(result)) + } + + const expected = "2a9a6977d915a2c4dd07dbcafe1918bf1682e56d9c8e567ecd19bfd7cd93528833c764d12b34a5e2a219c9fd463dab45e972c5574d73f45de5b2e23af72530d8" + if fmt.Sprintf("%x", result) != expected { + t.Fatalf("#unknown length: bad result %x, wanted %s", result, expected) + } +} + +// Benchmarks + +func benchmarkSum(b *testing.B, size int) { + data := make([]byte, size) + b.SetBytes(int64(size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sum256(data) + } +} + +func benchmarkWrite(b *testing.B, size int) { + data := make([]byte, size) + h, _ := New256(nil) + b.SetBytes(int64(size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + h.Write(data) + } +} + +func BenchmarkWrite64(b *testing.B) { benchmarkWrite(b, 64) } +func BenchmarkWrite1K(b *testing.B) { benchmarkWrite(b, 1024) } + +func BenchmarkSum64(b *testing.B) { benchmarkSum(b, 64) } +func BenchmarkSum1K(b *testing.B) { benchmarkSum(b, 1024) } + +// hashes is taken from https://blake2.net/blake2s-test.txt +var hashes = []string{ + "48a8997da407876b3d79c0d92325ad3b89cbb754d86ab71aee047ad345fd2c49", + "40d15fee7c328830166ac3f918650f807e7e01e177258cdc0a39b11f598066f1", + "6bb71300644cd3991b26ccd4d274acd1adeab8b1d7914546c1198bbe9fc9d803", + "1d220dbe2ee134661fdf6d9e74b41704710556f2f6e5a091b227697445dbea6b", + "f6c3fbadb4cc687a0064a5be6e791bec63b868ad62fba61b3757ef9ca52e05b2", + "49c1f21188dfd769aea0e911dd6b41f14dab109d2b85977aa3088b5c707e8598", + "fdd8993dcd43f696d44f3cea0ff35345234ec8ee083eb3cada017c7f78c17143", + "e6c8125637438d0905b749f46560ac89fd471cf8692e28fab982f73f019b83a9", + "19fc8ca6979d60e6edd3b4541e2f967ced740df6ec1eaebbfe813832e96b2974", + "a6ad777ce881b52bb5a4421ab6cdd2dfba13e963652d4d6d122aee46548c14a7", + "f5c4b2ba1a00781b13aba0425242c69cb1552f3f71a9a3bb22b4a6b4277b46dd", + "e33c4c9bd0cc7e45c80e65c77fa5997fec7002738541509e68a9423891e822a3", + "fba16169b2c3ee105be6e1e650e5cbf40746b6753d036ab55179014ad7ef6651", + "f5c4bec6d62fc608bf41cc115f16d61c7efd3ff6c65692bbe0afffb1fede7475", + "a4862e76db847f05ba17ede5da4e7f91b5925cf1ad4ba12732c3995742a5cd6e", + "65f4b860cd15b38ef814a1a804314a55be953caa65fd758ad989ff34a41c1eea", + "19ba234f0a4f38637d1839f9d9f76ad91c8522307143c97d5f93f69274cec9a7", + "1a67186ca4a5cb8e65fca0e2ecbc5ddc14ae381bb8bffeb9e0a103449e3ef03c", + "afbea317b5a2e89c0bd90ccf5d7fd0ed57fe585e4be3271b0a6bf0f5786b0f26", + "f1b01558ce541262f5ec34299d6fb4090009e3434be2f49105cf46af4d2d4124", + "13a0a0c86335635eaa74ca2d5d488c797bbb4f47dc07105015ed6a1f3309efce", + "1580afeebebb346f94d59fe62da0b79237ead7b1491f5667a90e45edf6ca8b03", + "20be1a875b38c573dd7faaa0de489d655c11efb6a552698e07a2d331b5f655c3", + "be1fe3c4c04018c54c4a0f6b9a2ed3c53abe3a9f76b4d26de56fc9ae95059a99", + "e3e3ace537eb3edd8463d9ad3582e13cf86533ffde43d668dd2e93bbdbd7195a", + "110c50c0bf2c6e7aeb7e435d92d132ab6655168e78a2decdec3330777684d9c1", + "e9ba8f505c9c80c08666a701f3367e6cc665f34b22e73c3c0417eb1c2206082f", + "26cd66fca02379c76df12317052bcafd6cd8c3a7b890d805f36c49989782433a", + "213f3596d6e3a5d0e9932cd2159146015e2abc949f4729ee2632fe1edb78d337", + "1015d70108e03be1c702fe97253607d14aee591f2413ea6787427b6459ff219a", + "3ca989de10cfe609909472c8d35610805b2f977734cf652cc64b3bfc882d5d89", + "b6156f72d380ee9ea6acd190464f2307a5c179ef01fd71f99f2d0f7a57360aea", + "c03bc642b20959cbe133a0303e0c1abff3e31ec8e1a328ec8565c36decff5265", + "2c3e08176f760c6264c3a2cd66fec6c3d78de43fc192457b2a4a660a1e0eb22b", + "f738c02f3c1b190c512b1a32deabf353728e0e9ab034490e3c3409946a97aeec", + "8b1880df301cc963418811088964839287ff7fe31c49ea6ebd9e48bdeee497c5", + "1e75cb21c60989020375f1a7a242839f0b0b68973a4c2a05cf7555ed5aaec4c1", + "62bf8a9c32a5bccf290b6c474d75b2a2a4093f1a9e27139433a8f2b3bce7b8d7", + "166c8350d3173b5e702b783dfd33c66ee0432742e9b92b997fd23c60dc6756ca", + "044a14d822a90cacf2f5a101428adc8f4109386ccb158bf905c8618b8ee24ec3", + "387d397ea43a994be84d2d544afbe481a2000f55252696bba2c50c8ebd101347", + "56f8ccf1f86409b46ce36166ae9165138441577589db08cbc5f66ca29743b9fd", + "9706c092b04d91f53dff91fa37b7493d28b576b5d710469df79401662236fc03", + "877968686c068ce2f7e2adcff68bf8748edf3cf862cfb4d3947a3106958054e3", + "8817e5719879acf7024787eccdb271035566cfa333e049407c0178ccc57a5b9f", + "8938249e4b50cadaccdf5b18621326cbb15253e33a20f5636e995d72478de472", + "f164abba4963a44d107257e3232d90aca5e66a1408248c51741e991db5227756", + "d05563e2b1cba0c4a2a1e8bde3a1a0d9f5b40c85a070d6f5fb21066ead5d0601", + "03fbb16384f0a3866f4c3117877666efbf124597564b293d4aab0d269fabddfa", + "5fa8486ac0e52964d1881bbe338eb54be2f719549224892057b4da04ba8b3475", + "cdfabcee46911111236a31708b2539d71fc211d9b09c0d8530a11e1dbf6eed01", + "4f82de03b9504793b82a07a0bdcdff314d759e7b62d26b784946b0d36f916f52", + "259ec7f173bcc76a0994c967b4f5f024c56057fb79c965c4fae41875f06a0e4c", + "193cc8e7c3e08bb30f5437aa27ade1f142369b246a675b2383e6da9b49a9809e", + "5c10896f0e2856b2a2eee0fe4a2c1633565d18f0e93e1fab26c373e8f829654d", + "f16012d93f28851a1eb989f5d0b43f3f39ca73c9a62d5181bff237536bd348c3", + "2966b3cfae1e44ea996dc5d686cf25fa053fb6f67201b9e46eade85d0ad6b806", + "ddb8782485e900bc60bcf4c33a6fd585680cc683d516efa03eb9985fad8715fb", + "4c4d6e71aea05786413148fc7a786b0ecaf582cff1209f5a809fba8504ce662c", + "fb4c5e86d7b2229b99b8ba6d94c247ef964aa3a2bae8edc77569f28dbbff2d4e", + "e94f526de9019633ecd54ac6120f23958d7718f1e7717bf329211a4faeed4e6d", + "cbd6660a10db3f23f7a03d4b9d4044c7932b2801ac89d60bc9eb92d65a46c2a0", + "8818bbd3db4dc123b25cbba5f54c2bc4b3fcf9bf7d7a7709f4ae588b267c4ece", + "c65382513f07460da39833cb666c5ed82e61b9e998f4b0c4287cee56c3cc9bcd", + "8975b0577fd35566d750b362b0897a26c399136df07bababbde6203ff2954ed4", + "21fe0ceb0052be7fb0f004187cacd7de67fa6eb0938d927677f2398c132317a8", + "2ef73f3c26f12d93889f3c78b6a66c1d52b649dc9e856e2c172ea7c58ac2b5e3", + "388a3cd56d73867abb5f8401492b6e2681eb69851e767fd84210a56076fb3dd3", + "af533e022fc9439e4e3cb838ecd18692232adf6fe9839526d3c3dd1b71910b1a", + "751c09d41a9343882a81cd13ee40818d12eb44c6c7f40df16e4aea8fab91972a", + "5b73ddb68d9d2b0aa265a07988d6b88ae9aac582af83032f8a9b21a2e1b7bf18", + "3da29126c7c5d7f43e64242a79feaa4ef3459cdeccc898ed59a97f6ec93b9dab", + "566dc920293da5cb4fe0aa8abda8bbf56f552313bff19046641e3615c1e3ed3f", + "4115bea02f73f97f629e5c5590720c01e7e449ae2a6697d4d2783321303692f9", + "4ce08f4762468a7670012164878d68340c52a35e66c1884d5c864889abc96677", + "81ea0b7804124e0c22ea5fc71104a2afcb52a1fa816f3ecb7dcb5d9dea1786d0", + "fe362733b05f6bedaf9379d7f7936ede209b1f8323c3922549d9e73681b5db7b", + "eff37d30dfd20359be4e73fdf40d27734b3df90a97a55ed745297294ca85d09f", + "172ffc67153d12e0ca76a8b6cd5d4731885b39ce0cac93a8972a18006c8b8baf", + "c47957f1cc88e83ef9445839709a480a036bed5f88ac0fcc8e1e703ffaac132c", + "30f3548370cfdceda5c37b569b6175e799eef1a62aaa943245ae7669c227a7b5", + "c95dcb3cf1f27d0eef2f25d2413870904a877c4a56c2de1e83e2bc2ae2e46821", + "d5d0b5d705434cd46b185749f66bfb5836dcdf6ee549a2b7a4aee7f58007caaf", + "bbc124a712f15d07c300e05b668389a439c91777f721f8320c1c9078066d2c7e", + "a451b48c35a6c7854cfaae60262e76990816382ac0667e5a5c9e1b46c4342ddf", + "b0d150fb55e778d01147f0b5d89d99ecb20ff07e5e6760d6b645eb5b654c622b", + "34f737c0ab219951eee89a9f8dac299c9d4c38f33fa494c5c6eefc92b6db08bc", + "1a62cc3a00800dcbd99891080c1e098458193a8cc9f970ea99fbeff00318c289", + "cfce55ebafc840d7ae48281c7fd57ec8b482d4b704437495495ac414cf4a374b", + "6746facf71146d999dabd05d093ae586648d1ee28e72617b99d0f0086e1e45bf", + "571ced283b3f23b4e750bf12a2caf1781847bd890e43603cdc5976102b7bb11b", + "cfcb765b048e35022c5d089d26e85a36b005a2b80493d03a144e09f409b6afd1", + "4050c7a27705bb27f42089b299f3cbe5054ead68727e8ef9318ce6f25cd6f31d", + "184070bd5d265fbdc142cd1c5cd0d7e414e70369a266d627c8fba84fa5e84c34", + "9edda9a4443902a9588c0d0ccc62b930218479a6841e6fe7d43003f04b1fd643", + "e412feef7908324a6da1841629f35d3d358642019310ec57c614836b63d30763", + "1a2b8edff3f9acc1554fcbae3cf1d6298c6462e22e5eb0259684f835012bd13f", + "288c4ad9b9409762ea07c24a41f04f69a7d74bee2d95435374bde946d7241c7b", + "805691bb286748cfb591d3aebe7e6f4e4dc6e2808c65143cc004e4eb6fd09d43", + "d4ac8d3a0afc6cfa7b460ae3001baeb36dadb37da07d2e8ac91822df348aed3d", + "c376617014d20158bced3d3ba552b6eccf84e62aa3eb650e90029c84d13eea69", + "c41f09f43cecae7293d6007ca0a357087d5ae59be500c1cd5b289ee810c7b082", + "03d1ced1fba5c39155c44b7765cb760c78708dcfc80b0bd8ade3a56da8830b29", + "09bde6f152218dc92c41d7f45387e63e5869d807ec70b821405dbd884b7fcf4b", + "71c9036e18179b90b37d39e9f05eb89cc5fc341fd7c477d0d7493285faca08a4", + "5916833ebb05cd919ca7fe83b692d3205bef72392b2cf6bb0a6d43f994f95f11", + "f63aab3ec641b3b024964c2b437c04f6043c4c7e0279239995401958f86bbe54", + "f172b180bfb09740493120b6326cbdc561e477def9bbcfd28cc8c1c5e3379a31", + "cb9b89cc18381dd9141ade588654d4e6a231d5bf49d4d59ac27d869cbe100cf3", + "7bd8815046fdd810a923e1984aaebdcdf84d87c8992d68b5eeb460f93eb3c8d7", + "607be66862fd08ee5b19facac09dfdbcd40c312101d66e6ebd2b841f1b9a9325", + "9fe03bbe69ab1834f5219b0da88a08b30a66c5913f0151963c360560db0387b3", + "90a83585717b75f0e9b725e055eeeeb9e7a028ea7e6cbc07b20917ec0363e38c", + "336ea0530f4a7469126e0218587ebbde3358a0b31c29d200f7dc7eb15c6aadd8", + "a79e76dc0abca4396f0747cd7b748df913007626b1d659da0c1f78b9303d01a3", + "44e78a773756e0951519504d7038d28d0213a37e0ce375371757bc996311e3b8", + "77ac012a3f754dcfeab5eb996be9cd2d1f96111b6e49f3994df181f28569d825", + "ce5a10db6fccdaf140aaa4ded6250a9c06e9222bc9f9f3658a4aff935f2b9f3a", + "ecc203a7fe2be4abd55bb53e6e673572e0078da8cd375ef430cc97f9f80083af", + "14a5186de9d7a18b0412b8563e51cc5433840b4a129a8ff963b33a3c4afe8ebb", + "13f8ef95cb86e6a638931c8e107673eb76ba10d7c2cd70b9d9920bbeed929409", + "0b338f4ee12f2dfcb78713377941e0b0632152581d1332516e4a2cab1942cca4", + "eaab0ec37b3b8ab796e9f57238de14a264a076f3887d86e29bb5906db5a00e02", + "23cb68b8c0e6dc26dc27766ddc0a13a99438fd55617aa4095d8f969720c872df", + "091d8ee30d6f2968d46b687dd65292665742de0bb83dcc0004c72ce10007a549", + "7f507abc6d19ba00c065a876ec5657868882d18a221bc46c7a6912541f5bc7ba", + "a0607c24e14e8c223db0d70b4d30ee88014d603f437e9e02aa7dafa3cdfbad94", + "ddbfea75cc467882eb3483ce5e2e756a4f4701b76b445519e89f22d60fa86e06", + "0c311f38c35a4fb90d651c289d486856cd1413df9b0677f53ece2cd9e477c60a", + "46a73a8dd3e70f59d3942c01df599def783c9da82fd83222cd662b53dce7dbdf", + "ad038ff9b14de84a801e4e621ce5df029dd93520d0c2fa38bff176a8b1d1698c", + "ab70c5dfbd1ea817fed0cd067293abf319e5d7901c2141d5d99b23f03a38e748", + "1fffda67932b73c8ecaf009a3491a026953babfe1f663b0697c3c4ae8b2e7dcb", + "b0d2cc19472dd57f2b17efc03c8d58c2283dbb19da572f7755855aa9794317a0", + "a0d19a6ee33979c325510e276622df41f71583d07501b87071129a0ad94732a5", + "724642a7032d1062b89e52bea34b75df7d8fe772d9fe3c93ddf3c4545ab5a99b", + "ade5eaa7e61f672d587ea03dae7d7b55229c01d06bc0a5701436cbd18366a626", + "013b31ebd228fcdda51fabb03bb02d60ac20ca215aafa83bdd855e3755a35f0b", + "332ed40bb10dde3c954a75d7b8999d4b26a1c063c1dc6e32c1d91bab7bbb7d16", + "c7a197b3a05b566bcc9facd20e441d6f6c2860ac9651cd51d6b9d2cdeeea0390", + "bd9cf64ea8953c037108e6f654914f3958b68e29c16700dc184d94a21708ff60", + "8835b0ac021151df716474ce27ce4d3c15f0b2dab48003cf3f3efd0945106b9a", + "3bfefa3301aa55c080190cffda8eae51d9af488b4c1f24c3d9a75242fd8ea01d", + "08284d14993cd47d53ebaecf0df0478cc182c89c00e1859c84851686ddf2c1b7", + "1ed7ef9f04c2ac8db6a864db131087f27065098e69c3fe78718d9b947f4a39d0", + "c161f2dcd57e9c1439b31a9dd43d8f3d7dd8f0eb7cfac6fb25a0f28e306f0661", + "c01969ad34c52caf3dc4d80d19735c29731ac6e7a92085ab9250c48dea48a3fc", + "1720b3655619d2a52b3521ae0e49e345cb3389ebd6208acaf9f13fdacca8be49", + "756288361c83e24c617cf95c905b22d017cdc86f0bf1d658f4756c7379873b7f", + "e7d0eda3452693b752abcda1b55e276f82698f5f1605403eff830bea0071a394", + "2c82ecaa6b84803e044af63118afe544687cb6e6c7df49ed762dfd7c8693a1bc", + "6136cbf4b441056fa1e2722498125d6ded45e17b52143959c7f4d4e395218ac2", + "721d3245aafef27f6a624f47954b6c255079526ffa25e9ff77e5dcff473b1597", + "9dd2fbd8cef16c353c0ac21191d509eb28dd9e3e0d8cea5d26ca839393851c3a", + "b2394ceacdebf21bf9df2ced98e58f1c3a4bbbff660dd900f62202d6785cc46e", + "57089f222749ad7871765f062b114f43ba20ec56422a8b1e3f87192c0ea718c6", + "e49a9459961cd33cdf4aae1b1078a5dea7c040e0fea340c93a724872fc4af806", + "ede67f720effd2ca9c88994152d0201dee6b0a2d2c077aca6dae29f73f8b6309", + "e0f434bf22e3088039c21f719ffc67f0f2cb5e98a7a0194c76e96bf4e8e17e61", + "277c04e2853484a4eba910ad336d01b477b67cc200c59f3c8d77eef8494f29cd", + "156d5747d0c99c7f27097d7b7e002b2e185cb72d8dd7eb424a0321528161219f", + "20ddd1ed9b1ca803946d64a83ae4659da67fba7a1a3eddb1e103c0f5e03e3a2c", + "f0af604d3dabbf9a0f2a7d3dda6bd38bba72c6d09be494fcef713ff10189b6e6", + "9802bb87def4cc10c4a5fd49aa58dfe2f3fddb46b4708814ead81d23ba95139b", + "4f8ce1e51d2fe7f24043a904d898ebfc91975418753413aa099b795ecb35cedb", + "bddc6514d7ee6ace0a4ac1d0e068112288cbcf560454642705630177cba608bd", + "d635994f6291517b0281ffdd496afa862712e5b3c4e52e4cd5fdae8c0e72fb08", + "878d9ca600cf87e769cc305c1b35255186615a73a0da613b5f1c98dbf81283ea", + "a64ebe5dc185de9fdde7607b6998702eb23456184957307d2fa72e87a47702d6", + "ce50eab7b5eb52bdc9ad8e5a480ab780ca9320e44360b1fe37e03f2f7ad7de01", + "eeddb7c0db6e30abe66d79e327511e61fcebbc29f159b40a86b046ecf0513823", + "787fc93440c1ec96b5ad01c16cf77916a1405f9426356ec921d8dff3ea63b7e0", + "7f0d5eab47eefda696c0bf0fbf86ab216fce461e9303aba6ac374120e890e8df", + "b68004b42f14ad029f4c2e03b1d5eb76d57160e26476d21131bef20ada7d27f4", + "b0c4eb18ae250b51a41382ead92d0dc7455f9379fc9884428e4770608db0faec", + "f92b7a870c059f4d46464c824ec96355140bdce681322cc3a992ff103e3fea52", + "5364312614813398cc525d4c4e146edeb371265fba19133a2c3d2159298a1742", + "f6620e68d37fb2af5000fc28e23b832297ecd8bce99e8be4d04e85309e3d3374", + "5316a27969d7fe04ff27b283961bffc3bf5dfb32fb6a89d101c6c3b1937c2871", + "81d1664fdf3cb33c24eebac0bd64244b77c4abea90bbe8b5ee0b2aafcf2d6a53", + "345782f295b0880352e924a0467b5fbc3e8f3bfbc3c7e48b67091fb5e80a9442", + "794111ea6cd65e311f74ee41d476cb632ce1e4b051dc1d9e9d061a19e1d0bb49", + "2a85daf6138816b99bf8d08ba2114b7ab07975a78420c1a3b06a777c22dd8bcb", + "89b0d5f289ec16401a069a960d0b093e625da3cf41ee29b59b930c5820145455", + "d0fdcb543943fc27d20864f52181471b942cc77ca675bcb30df31d358ef7b1eb", + "b17ea8d77063c709d4dc6b879413c343e3790e9e62ca85b7900b086f6b75c672", + "e71a3e2c274db842d92114f217e2c0eac8b45093fdfd9df4ca7162394862d501", + "c0476759ab7aa333234f6b44f5fd858390ec23694c622cb986e769c78edd733e", + "9ab8eabb1416434d85391341d56993c55458167d4418b19a0f2ad8b79a83a75b", + "7992d0bbb15e23826f443e00505d68d3ed7372995a5c3e498654102fbcd0964e", + "c021b30085151435df33b007ccecc69df1269f39ba25092bed59d932ac0fdc28", + "91a25ec0ec0d9a567f89c4bfe1a65a0e432d07064b4190e27dfb81901fd3139b", + "5950d39a23e1545f301270aa1a12f2e6c453776e4d6355de425cc153f9818867", + "d79f14720c610af179a3765d4b7c0968f977962dbf655b521272b6f1e194488e", + "e9531bfc8b02995aeaa75ba27031fadbcbf4a0dab8961d9296cd7e84d25d6006", + "34e9c26a01d7f16181b454a9d1623c233cb99d31c694656e9413aca3e918692f", + "d9d7422f437bd439ddd4d883dae2a08350173414be78155133fff1964c3d7972", + "4aee0c7aaf075414ff1793ead7eaca601775c615dbd60b640b0a9f0ce505d435", + "6bfdd15459c83b99f096bfb49ee87b063d69c1974c6928acfcfb4099f8c4ef67", + "9fd1c408fd75c336193a2a14d94f6af5adf050b80387b4b010fb29f4cc72707c", + "13c88480a5d00d6c8c7ad2110d76a82d9b70f4fa6696d4e5dd42a066dcaf9920", + "820e725ee25fe8fd3a8d5abe4c46c3ba889de6fa9191aa22ba67d5705421542b", + "32d93a0eb02f42fbbcaf2bad0085b282e46046a4df7ad10657c9d6476375b93e", + "adc5187905b1669cd8ec9c721e1953786b9d89a9bae30780f1e1eab24a00523c", + "e90756ff7f9ad810b239a10ced2cf9b2284354c1f8c7e0accc2461dc796d6e89", + "1251f76e56978481875359801db589a0b22f86d8d634dc04506f322ed78f17e8", + "3afa899fd980e73ecb7f4d8b8f291dc9af796bc65d27f974c6f193c9191a09fd", + "aa305be26e5deddc3c1010cbc213f95f051c785c5b431e6a7cd048f161787528", + "8ea1884ff32e9d10f039b407d0d44e7e670abd884aeee0fb757ae94eaa97373d", + "d482b2155d4dec6b4736a1f1617b53aaa37310277d3fef0c37ad41768fc235b4", + "4d413971387e7a8898a8dc2a27500778539ea214a2dfe9b3d7e8ebdce5cf3db3", + "696e5d46e6c57e8796e4735d08916e0b7929b3cf298c296d22e9d3019653371c", + "1f5647c1d3b088228885865c8940908bf40d1a8272821973b160008e7a3ce2eb", + "b6e76c330f021a5bda65875010b0edf09126c0f510ea849048192003aef4c61c", + "3cd952a0beada41abb424ce47f94b42be64e1ffb0fd0782276807946d0d0bc55", + "98d92677439b41b7bb513312afb92bcc8ee968b2e3b238cecb9b0f34c9bb63d0", + "ecbca2cf08ae57d517ad16158a32bfa7dc0382eaeda128e91886734c24a0b29d", + "942cc7c0b52e2b16a4b89fa4fc7e0bf609e29a08c1a8543452b77c7bfd11bb28", + "8a065d8b61a0dffb170d5627735a76b0e9506037808cba16c345007c9f79cf8f", + "1b9fa19714659c78ff413871849215361029ac802b1cbcd54e408bd87287f81f", + "8dab071bcd6c7292a9ef727b4ae0d86713301da8618d9a48adce55f303a869a1", + "8253e3e7c7b684b9cb2beb014ce330ff3d99d17abbdbabe4f4d674ded53ffc6b", + "f195f321e9e3d6bd7d074504dd2ab0e6241f92e784b1aa271ff648b1cab6d7f6", + "27e4cc72090f241266476a7c09495f2db153d5bcbd761903ef79275ec56b2ed8", + "899c2405788e25b99a1846355e646d77cf400083415f7dc5afe69d6e17c00023", + "a59b78c4905744076bfee894de707d4f120b5c6893ea0400297d0bb834727632", + "59dc78b105649707a2bb4419c48f005400d3973de3736610230435b10424b24f", + "c0149d1d7e7a6353a6d906efe728f2f329fe14a4149a3ea77609bc42b975ddfa", + "a32f241474a6c16932e9243be0cf09bcdc7e0ca0e7a6a1b9b1a0f01e41502377", + "b239b2e4f81841361c1339f68e2c359f929af9ad9f34e01aab4631ad6d5500b0", + "85fb419c7002a3e0b4b6ea093b4c1ac6936645b65dac5ac15a8528b7b94c1754", + "9619720625f190b93a3fad186ab314189633c0d3a01e6f9bc8c4a8f82f383dbf", + "7d620d90fe69fa469a6538388970a1aa09bb48a2d59b347b97e8ce71f48c7f46", + "294383568596fb37c75bbacd979c5ff6f20a556bf8879cc72924855df9b8240e", + "16b18ab314359c2b833c1c6986d48c55a9fc97cde9a3c1f10a3177140f73f738", + "8cbbdd14bc33f04cf45813e4a153a273d36adad5ce71f499eeb87fb8ac63b729", + "69c9a498db174ecaefcc5a3ac9fdedf0f813a5bec727f1e775babdec7718816e", + "b462c3be40448f1d4f80626254e535b08bc9cdcff599a768578d4b2881a8e3f0", + "553e9d9c5f360ac0b74a7d44e5a391dad4ced03e0c24183b7e8ecabdf1715a64", + "7a7c55a56fa9ae51e655e01975d8a6ff4ae9e4b486fcbe4eac044588f245ebea", + "2afdf3c82abc4867f5de111286c2b3be7d6e48657ba923cfbf101a6dfcf9db9a", + "41037d2edcdce0c49b7fb4a6aa0999ca66976c7483afe631d4eda283144f6dfc", + "c4466f8497ca2eeb4583a0b08e9d9ac74395709fda109d24f2e4462196779c5d", + "75f609338aa67d969a2ae2a2362b2da9d77c695dfd1df7224a6901db932c3364", + "68606ceb989d5488fc7cf649f3d7c272ef055da1a93faecd55fe06f6967098ca", + "44346bdeb7e052f6255048f0d9b42c425bab9c3dd24168212c3ecf1ebf34e6ae", + "8e9cf6e1f366471f2ac7d2ee9b5e6266fda71f8f2e4109f2237ed5f8813fc718", + "84bbeb8406d250951f8c1b3e86a7c010082921833dfd9555a2f909b1086eb4b8", + "ee666f3eef0f7e2a9c222958c97eaf35f51ced393d714485ab09a069340fdf88", + "c153d34a65c47b4a62c5cacf24010975d0356b2f32c8f5da530d338816ad5de6", + "9fc5450109e1b779f6c7ae79d56c27635c8dd426c5a9d54e2578db989b8c3b4e", + "d12bf3732ef4af5c22fa90356af8fc50fcb40f8f2ea5c8594737a3b3d5abdbd7", + "11030b9289bba5af65260672ab6fee88b87420acef4a1789a2073b7ec2f2a09e", + "69cb192b8444005c8c0ceb12c846860768188cda0aec27a9c8a55cdee2123632", + "db444c15597b5f1a03d1f9edd16e4a9f43a667cc275175dfa2b704e3bb1a9b83", + "3fb735061abc519dfe979e54c1ee5bfad0a9d858b3315bad34bde999efd724dd", +} + +var hashes128 = []string{ + "9536f9b267655743dee97b8a670f9f53", + "13bacfb85b48a1223c595f8c1e7e82cb", + "d47a9b1645e2feae501cd5fe44ce6333", + "1e2a79436a7796a3e9826bfedf07659f", + "7640360ed3c4f3054dba79a21dda66b7", + "d1207ac2bf5ac84fc9ef016da5a46a86", + "3123987871e59305ece3125abfc0099a", + "cf9e072ad522f2cda2d825218086731c", + "95d22870392efe2846b12b6e8e84efbb", + "7d63c30e2d51333f245601b038c0b93b", + "ed608b98e13976bdf4bedc63fa35e443", + "ed704b5cd1abf8e0dd67a6ac667a3fa5", + "77dc70109827dc74c70fd26cba379ae5", + "d2bf34508b07825ee934f33958f4560e", + "a340baa7b8a93a6e658adef42e78eeb7", + "b85c5ceaecbe9a251eac76f6932ba395", + "246519722001f6e8e97a2183f5985e53", + "5bce5aa0b7c6cac2ecf6406183cd779a", + "13408f1647c02f6efd0047ad8344f695", + "a63970f196760aa36cb965ab62f0e0fa", + "bc26f48421dd99fd45e15e736d3e7dac", + "4c6f70f9e3237cde918afb52d26f1823", + "45ed610cfbc37db80c4bf0eef14ae8d6", + "87c4c150705ea5078209ec008200539c", + "54de21f5e0e6f2afe04daeb822b6931e", + "9732a04e505064e19de3d542e7e71631", + "d2bd27e95531d6957eef511c4ba64ad4", + "7a36c9f70dcc7c3063b547101a5f6c35", + "322007d1a44c4257bc7903b183305529", + "dbcc9a09f412290ca2e0d53dfd142ddb", + "df12ed43b8e53a56db20e0f83764002c", + "d114cc11e7d5b33a360c45f18d4c7c6e", + "c43b5e836af88620a8a71b1652cb8640", + "9491c653e8867ed73c1b4ac6b5a9bb4d", + "06d0e988df94ada6c6f9f36f588ab7c5", + "561efad2480e93262c8eeaa3677615c4", + "ba8ffc702e5adc93503045eca8702312", + "5782be6ccdc78c8425285e85de8ccdc6", + "aa1c4393e4c07b53ea6e2b5b1e970771", + "42a229dc50e52271c51e8666023ebc1e", + "53706110e919f84de7f8d6c7f0e7b831", + "fc5ac8ee39cc1dd1424391323e2901bd", + "bed27b62ff66cac2fbb68193c727106a", + "cd5e689b96d0b9ea7e08dac36f7b211e", + "0b4c7f604eba058d18e322c6e1baf173", + "eb838227fdfad09a27f0f8413120675d", + "3149cf9d19a7fd529e6154a8b4c3b3ad", + "ca1e20126df930fd5fb7afe4422191e5", + "b23398f910599f3c09b6549fa81bcb46", + "27fb17c11b34fa5d8b5afe5ee3321ead", + "0f665f5f04cf2d46b7fead1a1f328158", + "8f068be73b3681f99f3b282e3c02bba5", + "ba189bbd13808dcf4e002a4dd21660d5", + "2732dcd1b16668ae6ab6a61595d0d62a", + "d410ccdd059f0e02b472ec9ec54bdd3c", + "b2eaa07b055b3a03a399971327f7e8c2", + "2e8a225655e9f99b69c60dc8b4d8e566", + "4eb55416c853f2152e67f8a224133cec", + "49552403790d8de0505a8e317a443687", + "7f2747cd41f56942752e868212c7d5ac", + "02a28f10e193b430df7112d2d98cf759", + "d4213404a9f1cf759017747cf5958270", + "faa34884344f9c65e944882db8476d34", + "ece382a8bd5018f1de5da44b72cea75b", + "f1efa90d2547036841ecd3627fafbc36", + "811ff8686d23a435ecbd0bdafcd27b1b", + "b21beea9c7385f657a76558530438721", + "9cb969da4f1b4fc5b13bf78fe366f0c4", + "8850d16d7b614d3268ccfa009d33c7fc", + "aa98a2b6176ea86415b9aff3268c6f6d", + "ec3e1efa5ed195eff667e16b1af1e39e", + "e40787dca57411d2630db2de699beb08", + "554835890735babd06318de23d31e78a", + "493957feecddc302ee2bb2086b6ebfd3", + "f6069709ad5b0139163717e9ce1114ab", + "ba5ed386098da284484b211555505a01", + "9244c8dfad8cbb68c118fa51465b3ae4", + "51e309a5008eb1f5185e5cc007cfb36f", + "6ce9ff712121b4f6087955f4911eafd4", + "59b51d8dcda031218ccdd7c760828155", + "0012878767a3d4f1c8194458cf1f8832", + "82900708afd5b6582dc16f008c655edd", + "21302c7e39b5a4cdf1d6f86b4f00c9b4", + "e894c7431591eab8d1ce0fe2aa1f01df", + "b67e1c40ee9d988226d605621854d955", + "6237bdafa34137cbbec6be43ea9bd22c", + "4172a8e19b0dcb09b978bb9eff7af52b", + "5714abb55bd4448a5a6ad09fbd872fdf", + "7ce1700bef423e1f958a94a77a94d44a", + "3742ec50cded528527775833453e0b26", + "5d41b135724c7c9c689495324b162f18", + "85c523333c6442c202e9e6e0f1185f93", + "5c71f5222d40ff5d90e7570e71ab2d30", + "6e18912e83d012efb4c66250ced6f0d9", + "4add4448c2e35e0b138a0bac7b4b1775", + "c0376c6bc5e7b8b9d2108ec25d2aab53", + "f72261d5ed156765c977751c8a13fcc1", + "cff4156c48614b6ceed3dd6b9058f17e", + "36bfb513f76c15f514bcb593419835aa", + "166bf48c6bffaf8291e6fdf63854bef4", + "0b67d33f8b859c3157fbabd9e6e47ed0", + "e4da659ca76c88e73a9f9f10f3d51789", + "33c1ae2a86b3f51c0642e6ed5b5aa1f1", + "27469b56aca2334449c1cf4970dcd969", + "b7117b2e363378aa0901b0d6a9f6ddc0", + "a9578233b09e5cd5231943fdb12cd90d", + "486d7d75253598b716a068243c1c3e89", + "66f6b02d682b78ffdc85e9ec86852489", + "38a07b9a4b228fbcc305476e4d2e05d2", + "aedb61c7970e7d05bf9002dae3c6858c", + "c03ef441f7dd30fdb61ad2d4d8e4c7da", + "7f45cc1eea9a00cb6aeb2dd748361190", + "a59538b358459132e55160899e47bd65", + "137010fef72364411820c3fbed15c8df", + "d8362b93fc504500dbd33ac74e1b4d70", + "a7e49f12c8f47e3b29cf8c0889b0a9c8", + "072e94ffbfc684bd8ab2a1b9dade2fd5", + "5ab438584bd2229e452052e002631a5f", + "f233d14221097baef57d3ec205c9e086", + "3a95db000c4a8ff98dc5c89631a7f162", + "0544f18c2994ab4ddf1728f66041ff16", + "0bc02116c60a3cc331928d6c9d3ba37e", + "b189dca6cb5b813c74200834fba97f29", + "ac8aaab075b4a5bc24419da239212650", + "1e9f19323dc71c29ae99c479dc7e8df9", + "12d944c3fa7caa1b3d62adfc492274dd", + "b4c68f1fffe8f0030e9b18aad8c9dc96", + "25887fab1422700d7fa3edc0b20206e2", + "8c09f698d03eaf88abf69f8147865ef6", + "5c363ae42a5bec26fbc5e996428d9bd7", + "7fdfc2e854fbb3928150d5e3abcf56d6", + "f0c944023f714df115f9e4f25bcdb89b", + "6d19534b4c332741c8ddd79a9644de2d", + "32595eb23764fbfc2ee7822649f74a12", + "5a51391aab33c8d575019b6e76ae052a", + "98b861ce2c620f10f913af5d704a5afd", + "b7fe2fc8b77fb1ce434f8465c7ddf793", + "0e8406e0cf8e9cc840668ece2a0fc64e", + "b89922db99c58f6a128ccffe19b6ce60", + "e1be9af665f0932b77d7f5631a511db7", + "74b96f20f58de8dc9ff5e31f91828523", + "36a4cfef5a2a7d8548db6710e50b3009", + "007e95e8d3b91948a1dedb91f75de76b", + "a87a702ce08f5745edf765bfcd5fbe0d", + "847e69a388a749a9c507354d0dddfe09", + "07176eefbc107a78f058f3d424ca6a54", + "ad7e80682333b68296f6cb2b4a8e446d", + "53c4aba43896ae422e5de5b9edbd46bf", + "33bd6c20ca2a7ab916d6e98003c6c5f8", + "060d088ea94aa093f9981a79df1dfcc8", + "5617b214b9df08d4f11e58f5e76d9a56", + "ca3a60ee85bd971e1daf9f7db059d909", + "cd2b7754505d8c884eddf736f1ec613e", + "f496163b252f1439e7e113ba2ecabd8e", + "5719c7dcf9d9f756d6213354acb7d5cf", + "6f7dd40b245c54411e7a9be83ae5701c", + "c8994dd9fdeb077a45ea04a30358b637", + "4b1184f1e35458c1c747817d527a252f", + "fc7df674afeac7a3fd994183f4c67a74", + "4f68e05ce4dcc533acf9c7c01d95711e", + "d4ebc59e918400720035dfc88e0c486a", + "d3105dd6fa123e543b0b3a6e0eeaea9e", + "874196128ed443f5bdb2800ca048fcad", + "01645f134978dc8f9cf0abc93b53780e", + "5b8b64caa257873a0ffd47c981ef6c3f", + "4ee208fc50ba0a6e65c5b58cec44c923", + "53f409a52427b3b7ffabb057ca088428", + "c1d6cd616f5341a93d921e356e5887a9", + "e85c20fea67fa7320dc23379181183c8", + "7912b6409489df001b7372bc94aebde7", + "e559f761ec866a87f1f331767fafc60f", + "20a6f5a36bc37043d977ed7708465ef8", + "6a72f526965ab120826640dd784c6cc4", + "bf486d92ad68e87c613689dd370d001b", + "d339fd0eb35edf3abd6419c8d857acaf", + "9521cd7f32306d969ddabc4e6a617f52", + "a1cd9f3e81520842f3cf6cc301cb0021", + "18e879b6f154492d593edd3f4554e237", + "66e2329c1f5137589e051592587e521e", + "e899566dd6c3e82cbc83958e69feb590", + "8a4b41d7c47e4e80659d77b4e4bfc9ae", + "f1944f6fcfc17803405a1101998c57dd", + "f6bcec07567b4f72851b307139656b18", + "22e7bb256918fe9924dce9093e2d8a27", + "dd25b925815fe7b50b7079f5f65a3970", + "0457f10f299acf0c230dd4007612e58f", + "ecb420c19efd93814fae2964d69b54af", + "14eb47b06dff685d88751c6e32789db4", + "e8f072dbb50d1ab6654aa162604a892d", + "69cff9c62092332f03a166c7b0034469", + "d3619f98970b798ca32c6c14cd25af91", + "2246d423774ee9d51a551e89c0539d9e", + "75e5d1a1e374a04a699247dad827b6cf", + "6d087dd1d4cd15bf47db07c7a96b1db8", + "967e4c055ac51b4b2a3e506cebd5826f", + "7417aa79247e473401bfa92a25b62e2a", + "24f3f4956da34b5c533d9a551ccd7b16", + "0c40382de693a5304e2331eb951cc962", + "9436f949d51b347db5c8e6258dafaaac", + "d2084297fe84c4ba6e04e4fb73d734fe", + "42a6f8ff590af21b512e9e088257aa34", + "c484ad06b1cdb3a54f3f6464a7a2a6fd", + "1b8ac860f5ceb4365400a201ed2917aa", + "c43eadabbe7b7473f3f837fc52650f54", + "0e5d3205406126b1f838875deb150d6a", + "6bf4946f8ec8a9c417f50cd1e67565be", + "42f09a2522314799c95b3fc121a0e3e8", + "06b8f1487f691a3f7c3f74e133d55870", + "1a70a65fb4f314dcf6a31451a9d2704f", + "7d4acdd0823279fd28a1e48b49a04669", + "09545cc8822a5dfc93bbab708fd69174", + "efc063db625013a83c9a426d39a9bddb", + "213bbf89b3f5be0ffdb14854bbcb2588", + "b69624d89fe2774df9a6f43695d755d4", + "c0f9ff9ded82bd73c512e365a894774d", + "d1b68507ed89c17ead6f69012982db71", + "14cf16db04648978e35c44850855d1b0", + "9f254d4eccab74cd91d694df863650a8", + "8f8946e2967baa4a814d36ff01d20813", + "6b9dc4d24ecba166cb2915d7a6cba43b", + "eb35a80418a0042b850e294db7898d4d", + "f55f925d280c637d54055c9df088ef5f", + "f48427a04f67e33f3ba0a17f7c9704a7", + "4a9f5bfcc0321aea2eced896cee65894", + "8723a67d1a1df90f1cef96e6fe81e702", + "c166c343ee25998f80bad4067960d3fd", + "dab67288d16702e676a040fd42344d73", + "c8e9e0d80841eb2c116dd14c180e006c", + "92294f546bacf0dea9042c93ecba8b34", + "013705b1502b37369ad22fe8237d444e", + "9b97f8837d5f2ebab0768fc9a6446b93", + "7e7e5236b05ec35f89edf8bf655498e7", + "7be8f2362c174c776fb9432fe93bf259", + "2422e80420276d2df5702c6470879b01", + "df645795db778bcce23bbe819a76ba48", + "3f97a4ac87dfc58761cda1782d749074", + "50e3f45df21ebfa1b706b9c0a1c245a8", + "7879541c7ff612c7ddf17cb8f7260183", + "67f6542b903b7ba1945eba1a85ee6b1c", + "b34b73d36ab6234b8d3f5494d251138e", + "0aea139641fdba59ab1103479a96e05f", + "02776815a87b8ba878453666d42afe3c", + "5929ab0a90459ebac5a16e2fb37c847e", + "c244def5b20ce0468f2b5012d04ac7fd", + "12116add6fefce36ed8a0aeccce9b6d3", + "3cd743841e9d8b878f34d91b793b4fad", + "45e87510cf5705262185f46905fae35f", + "276047016b0bfb501b2d4fc748165793", + "ddd245df5a799417d350bd7f4e0b0b7e", + "d34d917a54a2983f3fdbc4b14caae382", + "7730fbc09d0c1fb1939a8fc436f6b995", + "eb4899ef257a1711cc9270a19702e5b5", + "8a30932014bce35bba620895d374df7a", + "1924aabf9c50aa00bee5e1f95b5d9e12", + "1758d6f8b982aec9fbe50f20e3082b46", + "cd075928ab7e6883e697fe7fd3ac43ee", +} + +// hashes2X is taken from +// https://github.com/BLAKE2/BLAKE2/blob/master/testvectors/blake2-kat.json +var hashes2X = []string{ + "0e", + "5196", + "ad6bad", + "d8e4b32f", + "8eb89056f3", + "410497c2ed72", + "f0de771b375c90", + "8662db8685033611", + "9ef9f1eed88a3f52ca", + "08225082df0d2b0a815e", + "0f6e84a17439f1bc97c299", + "895ec39c78d3556cefdbfabc", + "2b396b3fa90ab556079a79b44d", + "abae26501c4c1d6123c0f2289111", + "bca098df9099b3f785a37ba40fce5f", + "19b827f054b67a120f11efb0d690be70", + "b88d32a338fd60b58570fda228a121113b", + "3f30143af1cad33f9b794576e078cc79062e", + "ffddb58d9aa8d38086fcdae07e6653e8f31dfc", + "abb99c2e74a74556919040ca0cd857c95ec985e9", + "71f13f89af55ba936f8a7188ee93d2e8fb0cf2a720", + "99734fdf0eef4838a7515426f4c59b800854e2fcdc1c", + "579b1652aa1f5779d2b0e61868af856855020bdd44d7a7", + "1383d4ab4a6d8672b4075d421a159f69380ff47e4bb518d5", + "d3fa1412712dbbab71d4c6265dc1585c8dcc73380cf807f76a", + "1d57868a71e7245667780455d9aaa9e0683baf08fbaf946091c2", + "ef80418fe7049c6251ed7960a6b0e9def0da2749781994b24593a0", + "ef91cb81e4bfb50231e89475e251e2ef2fde59357551cd227588b63f", + "d7f398a5d21c3139cff0562a84f154b6953c7bc18a5f4b60491c196b6d", + "0a2abc6d38f30aef253579a4088c5b9aec64391f37d576eb06a300c193a5", + "02dd758fa23113a14fd94830e50e0f6b86faec4e551e808b0ca8d00fef2a15", + "a4fe2bd0f96a215fa7164ae1a405f4030a586c12b0c29806a099d7d7fdd8dd72", + "7dce710a20f42ab687ec6ea83b53faaa418229ce0d5a2ff2a5e66defb0b65c03c9", + "0320c40b5eea641d0bc25420b7545ac1d796b61563728a4dc451207f1addeedcf860", + "460539415f2baeb626fad748dee0eb3e9f27221661160e13edf39d1b5d476ee0672400", + "02de8ffa5b9c748164f99ed9d678b02e53f4ae88fb26c6d94a8cefc328725a692eae78c2", + "348a61a0136436136910262ad67ef20644b32c15456d5fad6b1679386d0bea87cc1a2e2b5e", + "24c32966c803434d48d2283482ee8f404f598cf7a17961748125d2ed1da987039b1ce00f2ba7", + "bd07cb16121d3b47adf03b96c41c947beadc01e40548e0d0773e61780d48d33a0e2a675ca681a6", + "a35844e34c20b4b9371b6c52fac412afe5d80a4c1e40aa3a0e5a729dc3d41c2c3719d096f616f0ba", + "6df1efbb4567747fe98d218935612f8835852dde2ce3dec767792d7f1d876cdae0056fef085245449d", + "48d6094af78bd38d8f4b39c54279b80ef617bc6ad21def0b2c62113b656c5d6a55aea2e3fde94a254b92", + "cd6e684759d2f19083164712c2aca0038442efb5b646594396b1fccdbd21203290f44cfdecca0373b3801b", + "155dfbf26103c8354362663677fa27d0e1ce3487a821a2a7171014c1bd5dd071f4974df272b1374765b8f2e1", + "15b11067f311efa4ee813dbca48d690dc92780656bc4d4c56510523190a240180867c829a8b8b9844175a8aa23", + "9bc27953a17fb84d5eabe95b4ea6bc03ea450274abccfb6f3938ded8560fb59662459a11a86b0e0f32fbea6bb1f8", + "03b78fb0b34fb8662accdf350a6be75ace9789653ee4375d351e871f6a98ac5e782ca4b4a717665d25e49a5ae25d81", + "687e9a6fda6e2ce0e40e4d30fef38c31e3513d2892bbe85c991fc3715947e42bc49bcd079a40ed061c2c3665efe555ab", + "f3886027d2049a8909e26545bd202d6a6fa2a6f815d31c7d520f705a81fa606dd695369c37aee4fa77dc645e9b05813ceb", + "e4a412ccd20b97797d91ccc286904fcd17c5afe8bed0618f1af333c052c473cd327637d951c32e4af047106036a3bc8c1c45", + "92f4b8c240a28b6238bc2eabadaf2ff3c4bfe0e6c61268ace6aebdeb0691450caea4287db8b329bde96af8cdb8a0fe2f57ef2d", + "e506834b3445e1a9a9b7bae844e91e0834512a06c0dc75fa4604e3b903c4e23616f2e0c78b5cc496660b4a13064bb1138edef4ff", + "27031955a40d8dbd1591f26e3c26e367a3c68f8204a396c6a4ba34b89672896d11276966a42bd516716f35ed63e442e116dbcf35da", + "646b1635c68d2328dddd5ac26eb9877c24c28390a45753a65044c3136ae2fe4fb40d09bf555271646d3dceb1ab1b7c8d8e421f553f94", + "f6171f8d833743bdee7cc8f8b29c38614e1d2d8d6a5fff68bec2c0f4dd463d7941ff5c368e2683d8f1dc97119bde2b73ca412718bc8cb1", + "45db1c478b040aa2e23fb4427017079810775c62abe737e82ec0ef8dcd0fc51f521f29fe6412fff7eac9beb7bcf75f483f3f8b971e42454b", + "500dab14687db3ca3dde9304af5f54194b37bdf475628af46b07bfbf6bc2b64ecef284b17f9d1d9be41794699bc0e76c2878b3a55730f7142d", + "31bba2efc7b3f415c3f031d4c06bb590ae40085ad157370af30238e03e25a359c9e133212ed34b7a006f839173b577e7015a87fdff2270fafddb", + "0600b3fb4b5e1ed0c8b2698ac1d9905e67e027390764821f963ad8d2b33cbc378b9c25c3ee422992d22b760222ed5697be0576d73938ae9d634ed7", + "4c0ca4f177d132594a4c613bad68da24c564efa3b4da0d0a903f26534a2e09f8d799d10e78f48ccdb0203954a36c5cf1bf24c076632c2b022b041200", + "97aacf2e1b013677b2e14084f097cb1e64d7b3fa36f097e189d86dc4a263bcc46817cd1ee6ff0c7ccd9acef63201cdc0e36254e19204a7388643bb571f", + "71fd6846ce7adb0843d6063546a16b79b54ad6c0f018a479a45817624fa221f63525084860559d1a0679c8d89a80701c62743ec2da8419d503f8f0cd7946", + "f73dfb046def3362d6de36077dae2cee2587fe95fe0800548bb7d99737897096ba59052e0dadcc1fb0ccb5535391875328637a0376a43a4d89366758dfe3e2", + "ec470d0aa932c78c5bcf86203ec0014314114765fa679c3daef214f883a17e1b4ca12f44433772a6e4ef685c904b2fc35586c6bd88f325b965968b06d808d73f", + "cf601753ffa09fe48a8a84c37769991e96290e200bbaf1910c57760f989bd0c72e6128e294528ee861ad7eee70d589de3cf4a0c35f7197e1925a64d0133628d87d", + "f15413f7d6fc54bb55829f698da92ee42fcf58dde1aa1bd07d438ecdc32ad6bf2bcdbecc99f18ed43e81b33065af5a4ca29960ae50553e610c0bbf4153d580e73dbb", + "84b1738adb9757fb9402ef7113581291136184d7ae35fe0b6a738da6acb0889d4d5bac7a957024e3709fa80c77d3859871ed1aa25cf488e438a2d24cfadce6008761dd", + "e02814bb81f250c1835a05108396b74c7878e737654bb83155e241774d04e639bbc571b413cd9349092f926c8a149a53cd33e9b63f370b6d460e504199d2e7d849db6cbe", + "aeee4a789956ec0913592c30ce4f9c544894da77ba447c84df3be2c869100e4df8f7e316445d844b31c3209abcc912f647735fd4a7136c2f35c6fda5b2e6708f5ca951b2b0", + "8cfd11ca385de3c843de84c830d59278fe79b70fb5ddbfbfc1ddefeb22c329ef2f607d1d1abbd1cd0d0cc7c5d3ed922add76aadca0d2f57b66cb16c582b6f18f60aee2f7509b", + "852e5ce2047d8d8b42b4c7e4987b95d23e8026a202d4567951bbbd23111e389fe33a736318546a914d2bddedfbf53846036ad9e35f29318b1f96e33eba08f071d6dc665149feb6", + "f225c23164979d0d13874a90ee291627e4f61a672a5578506fd3d65a12cb48a182f78350dc24c637b2f3950dc4882a5c1d5d5bad551c6f3e0093aa87e962bea51566af3791d52d65", + "5f33864d882455f8ef046aed64e2d1691e5c1555e333b0852750592e6f00d3b5ec941d0c00e99629612795d5870cf93c984b45e4464ba072a34903b400a42824ac13da28c7c1cb1959", + "7baaee7c3eb68c18c5ae1d45ba381803de34e36a52e2d7ccc9d48a297273c4d8644b473195bc23005f7a4f5ca790b1fa11f6a96e585e635513f11745dd97a69c1222204ab28d3c7735df", + "d0a2a3fc450ef9af7ae982041feb2842901026467d87839c33b4a9e081ea63d5be60ae99ca6e42393ded45255b8f42886f87ba0310572d9f0d8b5a07ff4b6bae1f30559a844983cc568560", + "3aa4164462b3e7044c35b08b047b924790f6d5c520b1df4305b5d41f4717e81f0cd4bccb9a5a6594773832b8707443adde4047caaed2293f92234df257df54ed275a9658fab483d0576d33a9", + "c8b4239fd7f1b893d978268f77f6505b5775d89090374322d40083b0f4c437423f670ca213f7fe05c61069725da2561646eefaea597ac48e293fbad44c2872046857e56d04a426a84008cefd71", + "f94839a7024c0a16971271b6727c081770110c957b1f2e03be03d2200b565cf8240f2873b0426042aaea996a1784fadb2b27f23bc1a521b4f7320dfbed86cd38d75141365ba9b443defc0a3b4078", + "8af934fdc8b3376ca09bdd89f9057ed38b656bff96a8f8a3038d456a265689ca32036670cb01469cc6e958cc4a46f1e80d700ae56659828a65c0456b8e55f28f255bc86ce48e44377bf1f9970b617d", + "ada572989e42f0e38c1f7c22b46bb52a84df8f7b3b773c9f17a5823e59a9725248d703efb4cb011abc9474e8e711666ed3cfa60db48480a8160615dfabad761bc0eb843d2e46299c59b61a15b4422fdf", + "b11f1ea52a7e4bd2a5cf1e234b7c9eb909fb45860080f0a6bdb5517a37b5b7cd90f3a9e2297f995e96c293189b807a7bf6e7633bebbc36674544db5f18dd33020aeaf50ee832efe4d3d053873fd31ce3b9", + "e54b006cd96c43d19787c1ab1e08ea0f8922bdb7142e748212e7912a1f2c0a4fad1b9f5209c30960b8b83ef4960e929b155a8a48c8fb7ce4326915950cede6b98a96b6f1ecb12715b713985dacd1c1180413", + "ee2c2f31a414ccd8f6a790f55e09155fd50aac2a878f9014f6c6035cae9186f90cdef0b7adf3e207c3d24ddfba8cd321b2e9228b02a1182b6973da6698071fce8cc0a23a7bf0d5aefd21ab1b8dc7818549bba3", + "6d6810793bad6c7efe8fd56cac04a0fb8717a44c09cbfaebce196a80ac318c79ca5c2db54fee8191ee2d305b690a92bd9e2c947a3c29342a93ac05796484638787a184e4525e82aeb9afa2f9480caebb91014c51", + "91e4694366cff84854872667fd168d2d42eca9070cdc92fca9936e8361e7266931f418450d098a42686241d08024dd72f0024d22ba644bd414245e78608942321ff61860ba1245f83c88592dc7995c49c0c53aa8a9", + "608aa620a5cf145f4477694407ccd8faa3182465b29ae98d96a42f7409434c21e4671bcae079f6871a09d8f2965e4926a9b08277d32f9dd6a474e3a9fb232f27fc4235df9c02abf67f7e540ca9ddc270ee91b23a5b57", + "c14f75e92f75f4356ab01c8792af13383e7fef2ffb3064de55e8da0a50511fea364ccd8140134872adccad197228319260a7b77b67a39677a0dcdcadfb750333ac8e032121e278bdcdbed5e452dae0416011186d9ebf29", + "03fcb9f6e1f058091b11351e775184ff2cd1f31ee846c6ea8efd49dd344f4af473f92eb44eba8a019776f77bb24e294aa9f962b39feecf7c59d46f1a606f89b1e81c2715ac9aa252e9ce941d091ffb99bb52404961794cf8", + "11e189b1d90fcfe8111c79c5351d826f5ec15a602af3b71d50bc7ed813f36c9a682520984ae911669d3c3036223a53176794c7e17929efab2b1c5b500f24f8c83d3db5d1029c5714c6fd34eb800a913985c218071677b9885c", + "69f8f5db3ab0321a708ab2f4234645dade6bfda495851dbe7257f2b72e3e8378b9fa8120bc836b737a675271e519b4712d2b56b359e0f2234ba7552dd4828b939e0542e729878ac1f81b6ce14cb573e76af3a6aa227f95b2350e", + "be734d78fae92cacb009cc400e023086bc3a3a10e8ca7cb4d553ea85314f51383660b8508e8477af60baf7e07c04cc9e094690ae12c73e5f089763201b4b48d664b94b4f5820bd1540f4a84100fdf8fce7f6466aa5d5c34fcbab45", + "d61b77032403f9b6ea5ad2b760eb0157545e37f1712ec44d7926ccf130e8fc0fe8e9b15570a6214c3899a074811486182b250dc97ebdd3b61403614d935cd0a61c0899f31b0e49b81c8a9a4fe8409822c470aacfde229d965dd62f51", + "c31bd548e36d5fae95ed8fa6e807642711c897f0fcc3b0d00bd317ed2bca73412064618c6a84a61c71bce3e963333b0266a5656571dcc4ba8a8c9d84af4bdb445c34a7aef445b15d77698e0b13c436c928cc7fa7acd5f68867e8132993", + "9903b8adab803d085b634bfae2e109dd247a7d6249f203403216d9f7410c36142df8fa56fb4d6f78136eef5817bad5ea3608439bb19336628c37d42db16ab2df8018b773baedafb77278a50926370b48bd81710203c7abc7b4043f9a1751", + "4dadaf0d6a96022c8ce40d48f460526d9956da33260e1770315ead420da75b122c762762aa3ddc1aef9070ff2298b2304cf90443318b17183b60778f3859b141053e5827decfff27ff106a48cfdb0371d0ef614fc7400e860b676df3176d1a", + "314dda800f2f494ca9c9678f178940d2284cb29c51cb01ca2019a9bede0cdc50f8ecf2a77e238b884867e78e691461a66100b38f374c4ccac80309641533a3217eca7e6b9a9af01c026201f0afaec5a61629a59eb530c3cb81934b0cb5b45eae", + "4658b7500951f75c84e4509d74047ca621009835c0152f03c9f96ca73beb29608c44390ba4473323e621284be872bdb72175628780113e470036265d11dfcb284ac04604e667f1e4c1d357a411d3100d4d9f84a14a6fabd1e3f4de0ac81af50179", + "491f877592837e7912f16b73ee1fb06f4633d854a5723e156978f48ec48fbd8b5e863c24d838ff95fa865155d07e5513df42c8bb7706f8e3806b705866475c0ac04bbe5aa4b91b7dc373e82153483b1b03304a1a791b058926c1becd069509cbf46e", + "231034720c719ab31f7c146a702a971f5943b70086b80a2a3eb928fa9380b7a1ad8773bfd0739142d2ad6e19819765ca54f92db5f16c1df5fa4b445c266215a92527bd4ef50ed277b9a21aee3fb7a8128c14ce084f53eac878a7a660b7c011eb1a33c5", + "3366860c77804fe0b4f368b02bb5b0d150821d957e3ba37842da9fc8d336e9d702c8446ecafbd19d79b868702f32405853bc17695873a7306e0ce4573cd9ac0b7fc7dd35534d7635198d152a1802f7d8d6a4bb07600fcdaacfaa1c3f40a09bc02e974c99", + "ccbbbe621f910a95835f5f8d74b21e13f8a4b03f72f91f37b5c7e995aa3cd5539508d5e234e77a4668a42c239b2d13ef0e55ecf85142055e3f8a7e46320e21324a6b88e6c823ac04b485125c2aa59b61476481208f92ea4dd330cb18777c1cf0df7cd07893", + "87faf0e49e7e5ab66ee3147921f8817867fe637d4ab694c33ee8009c759e7d707f44c69c1b9754e2b4f8f47b25f51cd01de7273f548f4952e8efc4d9044c6ea72d1d5857e0ffeb3f44b0c88cb67683401cfb2f1d17f0ca5696641bef28d7579f68d9d066d968", + "38c876a007ec727c92e2503990c4d9407cea2271026aee88cd7b16c4396f00cc4b760576adf2d683713a3f6063cc13ecd7e4f3b6148ad914ca89f34d1375aa4c8e2033f1315153189507bfd116b07fc4bc14f751bbbb0e752f621153ae8df4d68491a22430b309", + "87d636a33dbd9ad81ecd6f3569e418bf8a972f97c5644787b99c361195231a72455a121dd7b3254d6ff80101a0a1e2b1eb1ca4866bd23063fe007310c88c4a2ab3b49f14755cd0ee0e5ffa2fd0d2c0ea41d89e67a27a8f6c94b134ba8d361491b3c20bacac3d226b", + "b021af793badbb857f9a353e320450c44c1030fce3885e6b271bcc02e6af65fdc5be4dc483ff44bd5d539ed1e7eb7efe3001252e92a87df8227ace601047e101c871d29302b3cb6c6f4639078afc81c4c0f4c2e04688612ecf3f7be1d58ea92894a5dab49b949f2089", + "c5c1f2fbf2c8504a686b615278fc6221858d401b7fe790b75fb6bca6885cdd128e9142bf925471ee126f9e62d984de1c30c9c677eff5fdbd5eb0fa4ef3bff6a831056cea20fd61cf44d56ffc5bda0e8472ecdc67946d63c40db4ba882bc4dfa16d8ddac600570b9b6bf3", + "88f8cc0daeaeaea7ab0520a311dff91b1fd9a7a3ec778c333422c9f3eb0bc183acc80dfefb17a5ac5f95c490693c45666ec69234919b83244003191bad837aa2a237daeb427e07b9e7aa6ca94b1db03d54ee8f4fe8d0802cb14a6599005eb6326eefe5008d9098d40aa851", + "2eb6b1a58e7fe39ff915ac84c2f21a22432c4f0d260380a3f993310af048b11647f95d23adf8a746500833ee4e467fb52ea9f1039519fa58bcb0f1d0151558147b3c92b83730aba0e20eeeea2b75f3ff3ad79f2f8a46cbbadb114a52e32f018342aeeaf827e03ad6d583bbce", + "3ba7dcd16a98be1df6b904457709b906cbf8d39516ef107006c0bf363db79f91aaae033466624d30858e61c2c368599963e49f22446e4473aa0df06e9c734e183a941510d540536377072334910e9cef56bc66c12df310ecd4b9dc14207439c1da0ac08bdd9be9f2c840df207e", + "a34a7926324ea96867dac6f0dba51d753268e497b1c4f272918c7eb0e34120be65b7b5ba044d583141ec3ea16fcedae6197116b16562fb0706a89dc8efd3ba173ccd0fd7d84d480e0a3dda3b580c326aa1caca623879b0fb91e7d173998889da704eda6495023b5ad4c9ad406298", + "5ef97d80b90d5c716322d9ba645a0e1b7a403968258a7d43d310320f60f96235f50e9f22cac0ad239636521fa0607d2f471051b505b371d88778c46fe6787d47a91a5bec4e3900fe6ed22918226fc9fbb3f70ee733c369420612b76b5f55988d757c891d7005d17ee55783fe506202", + "140d2c08dae0553f6a49585fd5c217796279152b2e100ebde6812d6e5f6b862b2a3a484aed4d6226197e511be2d7f05f55a916e32534ddcb81bdcf499c3f44f526eb515cc3b6fa4c4039ad251253241f541558bba7413ca29318a414179048a054104e433c674ca2d4b3a4c181878727", + "29fdfc1e859b001ee104d107216b5299a792d26b2418e823e0381fa390380d654e4a0a0720ba5ff59b2ff22d8c4e013284f980911dcfec7f0dca2f89867f311ced1ac8a14d669ef1114504a5b7626f67b22ecd86469800f1575543b72ab1d4c5c10ee08f06159a4a3e1ae09937f12aa173", + "52dfb643832a598a10786a430fc484d6370a05356ee61c80a101dbbcfac75847fba78e27e537cc4eb918eb5ab40b968d0fb23506fee2ad37e12fb7534fb55a9e50902b69ceb78d51db449cbe2d1fc0a8c0022d8a82e2182b0a059035e5f6c4f4cc90278518e178becfbea814f317f9e7c051", + "d32f69c6a8ee00ca83b82eaf82e312fbb00d9b2f6202412a1ffc6890b4509bbbeda4c4a90e8f7bca37e7fd82bd23307e2342d27aa10039a83da55e84ce273822740510e4ec239d73c52b0cbc245ad523af961994f19db225212bf4cc160f68a84760233952a8e09f2c963be9bb1d71ca4bb265", + "d1e603a46aa49ee1a9ded63918f80feca5fc22fb45f659fd837ff79be5ad7faf0bbd9c4ba91628ee293b478a7e6a7bd433fa265c20e5941b9ea7edc906055ce9799cbb06d0b33ae7ed7f4b918cc082c3d4a1ac317a4acec175a73cc3eeb7cb97d96d24133a29c19375c57f3a4105519846dd14d4", + "b45ac88fac2e8d8f5a4a90930cd7523730733369af9e39bf1ffb833c01108952198301f4619f04b9c399fef04c214bad3358999967c474b67a7c06457a1d61f9466489ed5c0c64c6cdc83027386d6263491d18e81ae8d68ca4e396a71207adaaa60997d0dca867065e68852e6dba9669b62dc7672b", + "d5f2893edd67f8a4b5245a616039ffe459d50e3d103ad4675102028f2c497ea69bf52fa62cd9e84f30ae2ea40449302932bbb0a5e426a054f166fdbe92c744314cc0a0aa58bbc3a8739f7e099961219ec208a8d01c1ae8a2a2b06534bf822aaa00ca96218e430f0389c69c7f3fd195e128c38d484ff6", + "37279a76e79f33f8b52f29358841db9ec2e03cc86d09a335f5a35c0a31a1db3e9c4eb7b1d1b978332f47f8c3e5409d4e443e1d15342a316f442e3bfa151f6a0d216df2443d80cbcf12c101c51f2946d81161583218584640f4f9c10de3bb3f4772bd3a0f4a365f444777456b913592719818afb26472b6", + "a46d252a0addf504ad2541e7d992cbed58a22ea5679980fb0df072d37540a77dd0a1448bdb7f172da7da19d6e4180a29356ecb2a8b5199b59a24e7028bb4521f3281313d2c00da9e1d284972ab6527066e9d508d68094c6aa03537226ef19c28d47f91dddebfcc796ec4221642ddf9de5b80b3b90c22d9e7", + "060c18d8b57b5e6572dee194c69e265c2743a48d4185a802eaa8d4dbd4c66c9ff725c93667f1fb816418f18c5f9be55e38b7718a9250bc06284bd834c7bd6dfcd11a97c14779ac539629bcd6e15b5fca3466d14fe60d8671af0fb8b080218703bc1c21563b8f640fde0304a3f4aeb9ec0482f880b5be0daa74", + "8f2f42bc01acca20d36054ec81272da60580a9a5414697e0bdb4e44a4ab18b8e690c8056d32f6eaaf9ee08f3448f1f23b9844cf33fb4a93cba5e8157b00b2179d18b6aa7215ae4e9dc9ad52484ad4bfb3688fc80565ddb246dd6db8f0937e01b0d2f2e2a64ad87e03c2a4ad74af5ab97976379445b96404f1d71", + "ccb9e524051cca0578aa1cb437116a01c400338f371f9e57525214ad5143b9c3416897eae8e584ce79347297071f67041f921cbc381c2be0b310b8004d039c7cc08cb8ff30ef83c3db413f3fb9c799e31cd930f64da1592ec980cc19830b2a448594cb12a61fc7a229e9c59fe1d66179772865894afd068f0942e5", + "3eb5dc42172022ab7d0bc465a3c725b2d82ee8d9844b396913ceb8a885323dbbbf9ef4ed549724cc96d451ea1d1d44a8175a75f2a7d44bb8bfc2c2dffed00db0328cfde52bf9171f4025770abbe59b3aefd8151c480bafa09f613955fd571e5d8c0d4936c670d182cf119c068d420ded12af694d63cd5aef2f4f6f71", + "20ea77e58e41337ad63f149ed962a8210b6efa3747fe9bea317c4b48f9641f7145b7906ed020a7ae7d2ee59435392edc32aee7eff978a661375af723fbd440dd84e4a152f2e6ef66f4ab1046b22c77ac52717de721dfe39aa8ba8cd5da27baca00cc1fffe12c52382f0ee83ad1418f4c6a122effaf7471e1e125d7e7ba", + "95c662b835171fa23f948c3c3ed27bab9b3c367bbfe267fe65f8037a35b50cd7fc6030bfce4000425ef646c34793f0762635ae70487a0216ef7428da622be895d1b6040423246511c2370d6876a5c5d2df8bbd48fb14f787b632ad2c1f5a927fdf36bc493c1c8606accfa52de33258669f7d2d73c9c81119591c8ea2b0ef", + "f708a230675d83299cc43167a771602d52fa37cbc068ef9128ef60d186e5d98efb8c98798da619d2011bf4673214f4a4c82e4b11156f6292f6e676d5b84dc1b81e7cc811b0d37310ac58da1bfcb339f6ba689d80dd876b82d131e03f450c6c9f15c3a3b3d4db43c273c94ed1d1bd6d369c4d30256ff80ea626bda56a6b94ea", + "f8417766ce86b275f2b7fec49da832ab9bf9cb6fdfe1b916979ae5b69176d7e0293f8d34cb55cf2b4264a8d671370cb595c419c1a3ce5b8afa642208481333522005fbe48cdc700e47b29254b79f685e1e91e7e34121784f53bd6a7d9fb6369571bba992c54316a54e309bbc2d488e9f4233d51d72a0dd8845772377f2c0feb9", + "3479e04efa2318afc441931a7d0134abc2f04227239fa5a6ae40f25189da1f1f313732026631969d3761aea0c478528b129808955be429136eeff003779dd0b8757e3b802bdff0f5f957e19278eabad72764aa74d469231e935f4c80040462ab56094e4a69a82346b3aeb075e73a8e30318e46fdaec0a42f17ccf5b592fb800613", + "03df0e061fa2ae63b42f94a1ba387661760deaab3ec8ffabcaff20eeed8d0717d8d09a0eafd9bde04e97b9501ac0c6f4255331f787d16054873f0673a3b42ce23b75a3b38c1ebcc04306d086c57a79d6095d8ce78e082a66c9efca7c2650c1046c6e0bbce0b2cba27c3824333e50e046e2a7703d3328ab3b82c9d6a51bc99b9516ff", + "76b488b801932932beefffdd8c19cf5b4632306e69e37e6a837e9a20c8e073bcadd5640549faa4972ebd7ee55cb2425b74cb041a52dd401b1a531beb6dfb23c4cfe74bc84f034156c8f55050ca93236eb73c4e2595d9fbf93dc49e1ec9a31705359732dda73f737ec4274e5c82626dc4ec929e5e2c7a2f5f5fb666181922bd8be575e3", + "ff17f6ef13abc0426b03d309dc6e8eeb822300f7b87eff4f9c44140a424098fd2aef860e5646066d22f5e8ed1e82a459c9b9ad7b9d5978c29718e17bff4eeefd1a80ba48108b551e62cd8be919e29edea8fbd5a96dfc97d01058d226105cfcdec0fba5d70769039c77be10bd182bd67f431e4b48b3345f534f08a4beb49628515d3e0b67", + "95b9d7b5b88431445ec80df511d4d106db2da75a2ba201484f90699157e5954d31a19f34d8f11524c1dabd88b9c3adcdba0520b2bdc8485def670409d1cd3707ff5f3e9dffe1bca56a23f254bf24770e2e636755f215814c8e897a062fd84c9f3f3fd62d16c6672a2578db26f65851b2c9f50e0f42685733a12dd9828cee198eb7c835b066", + "010e2192db21f3d49f96ba542b9977588025d823fc941c1c02d982eae87fb58c200b70b88d41bbe8ab0b0e8d6e0f14f7da03fde25e10148887d698289d2f686fa1408501422e1250af6b63e8bb30aac23dcdec4bba9c517361dff6dff5e6c6d9adcf42e1606e451b0004de10d90f0aed30dd853a7143e9e3f9256a1e638793713013ebee79d5", + "02aaf6b569e8e5b703ff5f28ccb6b89bf879b7311ea7f1a25edd372db62de8e000219afc1ad67e7909cc2f7c714c6fc63ba341062cebf24780980899950afc35cef38086ee88991e3002ae17c07fd8a16a49a8a90fc5540be0956dff95390c3d37629949de99920d93096eb35cf0427f75a6561cf68326e129dbeffb8772bfdce245d320f922ae", + "70752b3f18713e2f533246a2a46e38a83cc36dfccec07c1030b5204cba4432700735a8cee538b078d281a2d0262110381c5815a112bb84404f55af91652bd17502dd75e4910e062943d8a736ae3eecdfdd8e3f83e0a5e2ddeeff0ccbdadaddc95391310fc657a59724f7e6560c37dc1d5bb5db40170190f04a274c864ade9687c0f6a2a48283177a", + "01f3c1333b44077c518cc594d0fb90c37651fb7b2442e71fc0a5611097f1cf7bcfaf11c8e0ac1b1cab54afba15bb9332df6bc64d8032368e3f686c8324b0114e0979dad78a5ccd3fff88bbe89eef89c4be586ca092addef552ed33224e85d8c2f4fba85ac7735f34b6aa5ae5299154f861a9fb83046b0e8fca4db32c1343e02676f283975f43c086cf", + "509283ebc99ff8d87902fa00e2d2a6fa239e335fb840dbd0fdbab6ed2d95e8275402523f7ce9a2fabd4b6c9b533288fbe914bde84365a204711d0977a7d698f4614385984dd4c137e4820035dd6737da364edff1bb62283e87a8c7ae8637314fe9b5777ec4ec21276dafedb2ad5ee1aa0ac99e34a6c01c055c8a239fd28681607f65143082cd4553c529", + "c17e417e876db4e123c631f7136b8a85bfd6ce66a69180d0cd5ecfd6f037bb1c7bd7908d51f2c485bf9e92c0e1799ee5f6ab834ee481f5eb1a8020205adb4d0f90126d4e7c2c859c5a5f644bdfa9c649ff4f168e834de6f9769429732099d46d0af506ab86c6fd92175159bbc05c75db8e1fa867e6030d64250008d64c857c47caec3dc8b2ffb384d0193e", + "950988fbe9d62a66f5f2c492bc8dc944a78eb3796ec37ba94b6a81a9d402ccad03cd8497fff74c5f4a03081c5fecec48574fecb21c1de261332c23108195d3f6a96ff8e433a1a30eda53dd5bb414973334f8cde5510ff759f7c17046cbb5acd8e8c4a6eecf2a9121ec3fc4b22c4daa72678194ce809024cd45c4ebb9ccdb6f854205cdb624f0787480d8034d", + "552a212c403b473741da8e9c7b916d5e5e9bcc9949021ae1ca1ed46b7d4a98addbb604d9fff56175b7e0367db26c9635fa7813653dc8d610befdd09ec41e99b192a716106f4299eec8b940863e5a59cf26cdc2cd0c3017f9b4f215812bed15f69e77edf672178e13c55580982f01fcc2fa131ec3d736a55d56504c545f4be50fee83f1263e4d3f3c877cc6242c", + "b00c4283dd3d9cd26e44bd97cede6c771cb14f2571b51cfdaae4309560ffd165da025a1bbd31096c3aa8286e2d6dcc3e681b8d01f2c5064ea26dfd0b5156b7a7f5d1e046c5bd1628f8fdae24b03bdf7cf7366900cc013a8cbed9d7f5937c914b08f8c27683b956e1279812d04288515333fc6aba3684dde2292951f0610649d90fe61606630fc6a4cd383649252c", + "f6e79457bb6d0884dd223be2cf5ae412a1ed425f1e4012f75951b096aea3b9f3581f9013bcae1aff2d3fc1e5c7e06f24af6d53c2c5c238b71c71cc670b05a7ee5204400026a5c4e5ddec3ad96771e49fae4b0f75ec58049ad9d972e5749a32d90f847f1ed2a1bab83db181e541cf5c8adb6b29ecc64dc25add491d408d3eb3ddcb013de7f5ffb6de9dd7ff300a5fc6", + "fe1d71e1d5efa3f712d23216ee8ee9139e66bd648b83efc02cdb4d45a28cf36759ff190a84d14d9471477abefb5aea4111110336143dd80cf81e02f268120cc07d746538f968e9876bff8358d390f5b8e7eafa61ecd236cedaf276bd61865fdd3424988201dcdeda2e3e0c33c9e3b3670125dd1049106cc6df5695fb2dca443233ff440f265bbff055483bac1e859b83", + "4c80163562872a965dedd8725652906156ada6e9d999027d96f49289edb92f9ef043e9d7c3377e091b27f85275499454af32317535997fb4aaeaf93565ad481ff7d45d2abddd4df4b60f71a6923ec30496c6ae534dc5427107ab4c5e656a322c7ab058d4c13ec0ebafa76576560697ac98f84aa4a554f98ec87134c0d7dca9184cf70412a324aac91823c0aca02537d197", + "fdd58c5ffe88665beb7073c8f4c22472f4bc9390cdd27a42622ca55978b000ab7579f795d4de0dfcaf521b8268980ef1d20277b07567985c0fd5030784ad6c32541ac24e99ab706105a2255fc32935c0fce6fdad9bb224d94ae4eae2a3ff08836618a3adf193630647bce1952b69da4de360f59da303519278bfd39b733cf66820a5e9e971b702f45998b69a0889f4bec8ec", + "ff38b15aba3794e2c81d88003e045ac6cbfc9f4833cdf896cefd8ac0c88674727ad9a9fcb9ef36574deea480e6f6e8691c8390ad73b8ea0eb3665c914b0d886546948e67d7987eea248b5feb52346ffdd965d5c835144c3bc63daf325e74b11267e32e58a914ae4521a668839d9445fececa49c5fba41f9e171698bbc7c6c97fa163a377a96456958d6e1d74f91ada56a30df8", + "f048c19328d60b4e59ed76940415b2c84c23883198bba5699efb0a1774ad5da6d15390c7b55d77d66f37448fe08107f42a5336408d5322f4b630e3275865fc66dccab39f6e13fabc133e5a441fe352d81c7cd9a25f145a6e2e2417d3b0bbc79eafcd7ad688c02011fd268dd44ac3f4f87b37a84a46fd9e9975962fba92c9a3486deb0c45f6a2e044df4bb79f0feeea432c5008b0", + "1b3e5fe6f113cce28a6f8d6f7809d3cec398cabffe9ff2ff10a7fec29a4ee4b54186063fd5307a2be393c9ecd75a37620bdb94c9c18da69b658579676ec90351d10dc33a7cb3b75798b1234f9f684d4a73a0fab2df3d5d6fdb1c1b1514d0935c1f2dd21486f91c2595b2f8f8a500ff443b9305270fb6f3da7961d9316d4ed6a135a31c4a3611d40e6585bbb34f498cd5b9a5d92676", + "740db337baa12b16897f17a85fa5685acc85e48338867f8ac9c0198dd650f5dfa7c17725c1262c72207e365c8aa45ffaab6470a0e5afefbfc3bb702a9766064f28cc8b796878dfdd3ca9d0216c14941438fc541fb5be0a13d29a996c5c985db4f630df067a5626db5dcd8df3a2bff17dc446e46e4079b8815da4318cb228c7722684e2a795a0ca56f500ea51951a6a385385d886f678", + "1465f2d578d167faa017fe8f763ce3cc8dc1e8371d774ed2a8803f12585296ee71a1f2253dd16b717a81f91f0f3641018a0111182b4e65d884b0a3d0292631ad807cdccc88bdeecb476e76f72b5246a630aff6e2401fa9570f85acb73ccb4e19ef04a932a03d7b7985dbe1e5bb410df517fe362321469e6f8b0e0cef6c31d7aa8ec06aa220620d66cc0e133fdee963589b12320fc9678e", + "80c051952fa6f3ef6af0f1759ec3e83c8eb91abee1de360bfa09e74b05af2475a0dbf8f9135aa25892919bbe0515898cfb6f88abc9e1891f2b2180bb97370f578973d55c13c35edb22ed80647c2a7e2884d1ccb2dc2f92d7b6ec5843ade13a608a31190ce965bde97161c4d4af1d91ca9962053f9aa51865bdf04fc23fa35a6fc3c8e888941263a26ed66c2dd0b29b2325dfbd1227c5091c", + "9c1e2a1aed6406052eed12b4495365f2f80e9c9645473f3549b607f20910bcd16dc3a4b173ac8d128129cdb7c76ebbc8e9a2a1ba0d822c66b367e790a69ac71f0a60ed4bff0e979148e3f3ee6607c76dbc572ee5ff17c27e4b52adebb4bedddff517f591a1977299c7cb01106f1453b098d29848ba3751c816215bb0d090c50f9e445b41b2c49d4eec83b92ce6c269ce835fd279e7cbbb5e47", + "466abda8944d0329d2975c0f2e2afc901f117887af301881f63b714f49a2f692fa63a8871fc0b301fe8573dc9b2689880cd8969e5072c57671e0633b041481dab25e65c9de404af033a11a8070c8ab70ca6d465318501afdd9940c7efbe1bb6d49581c222fad251dba4ee0a98efe22a3c4f74da05844523b30bbad6b080ac8df70a02da80bc9d477dfb869adb211e209a316d5dd1fd89a6b8f8e", + "0e89a873e07799ba9372fc95d483193bd91a1ee6cc186374b51c8e4d1f40dd3d30e08f7feecfffbea5395d480ee588a294b96304b04f1ee7bbf6200cc8876395d1db3ac813e1019bb68d27204e514fe4a61ad2cbd1782dca0e38b5538c5390bca626c5895b745cfca5dac636fd4f37fed9014ab46ae1156c7789bbcbb956ff7ee5ce9effa560731d26783dc6ae8bddd53a5d28133614d0ddeddd9c", + "fdde2b80bc7a577ef0a6c03e59512bd5b62c265d860b75416ef0ce374d544cbb4e3a5dbd31e3b43e82975090c28bc77d1bdec907aeceb5d1c8b71375b6d631b84a46153f5f1d195bfcb2af6f597a9cdc83782c5bbbb58c5188a87ebf375eee5212fa52523820a83106e8ecd52bedd60d95cd646159774389c07e1adcaa6b6f649408f33399ec6e507d61659696b3dd249996892d5986b654d94ff337", + "f5d7d66929afcdff04de30e83f248e69e89604daea782e1d82d8032e91a95c1d6fb2f5578f79b51be4397e4cd7cbc608ce143fdddbc6fb6c43ffdd394a7df0124353b919aeeac025f3eb11ff246c3b9657c1a947fc534ce48e18feffada8797037c6bc7e2d9a9e2e019fe65627b3feb28e446473e3bd413047a2587f0be6a103403cb3c33fdc212dca14d8e386aa511c22308e632f5f9528dbabaf2deb", + "332990a8dba55f977bc814436cf386ebbf10cb487a5f6ce83e13741bac670c6810284fbbe4e303547ef411e964fae82854e8c13cf56979b89ecfedd337aad78260060122d13dfbbf8497acb2066ed89e30a1d5c11008bd4d145b5ec353956310536304d8b8bba0793baec6d8f3ff49718a56e6694f8122078265cf5731d9ba61292c1219a1affb3679576d4998290aba3684a205c3469d40761a5c4e96b2", + "efbdff285027610f03182009c89b953f19721cfcdb8accd74bab6ec4bdf3f555ab902cb0dd91284269d140638aaabd211748aa4da3b18cddc653b57e461b9ad8491807c535c08fe97d89eb587c6af19ca152e72479626ab764e8b62da89fefc8354c75a44851f985746d78715a5a92798dac1a4222be27897b3f0aa63d596aa7378545f49b259aa8518c3def8a2ec8f7aa956c43668c8717052035a7c36b47", + "0eea9bb83bdc324fd21b03669aa922fbebc448e7d25e210294c07862cfa6e061731dfb67b4810633f4dbe2130d90fa1c65843af436e74219d213c4458dcac1c48ec4541fc6e3b7918ab2bc621aedda53658050900c3865ca57cd5dfa1d28576827401956d2dd8b861fa90ab11bb0b544ded9bd3d62e3278ed484e17db8f2d5dc5ea4d19a0e15134ba6986714c2b22c59c2f0e517b74eb92ce40d2f5b89e6d79f", + "25da9f90d2d3f81b420ea5b03be69df8ccf05f91cc46d9ace62c7f56ead9de4af576fbeee747b906aad69e59104523fe03e1a0a4d5d902352df18d18dc8225855c46fefeec9bd09c508c916995ed4161ee633f6e6291cb16e8cac7edcce213417d34a2c1edea84a0e613278b1e853e25fb4d66ff4c7ee4584e7f9b681c319c874d43502534e8c16a57b1ae7cc0723783807738a55b661e617ee285bdb8b845607f", + "a76b6f81372df09322098868d469fb3fb9beafc5edb32c674974ca7032966aaca5b5c9bffef87bfe626bd8e33d1c5f054f7d5acd3b91ff95324d1ae39eb905b9f2694fe5cb03486cee86d2f661a751b0e6c716a61d1d405494c2d4e32bf803803dc02dba2c06eecf6f97fb1f6c5fd10cfc4215c06d627c46b6a16da0854e4c7c873d50aa1bd396b35961b5fa31ac962575230c07c369f8fbc1ff2256b47383a3df2a", + "f9db613812f2259972d91b1598ffb166031b339913925ee385f03b3b35dc4b2f1ae78a3c3d99c6ff6a07be129ce1f4b8d994d24988d7fbd31f20535d36ab6bd0592cfb4f8c1ed9244c7fa8a3c46e91272a1a40c6cfcf261c5658476c59793bf1a3775086e41a0492f88a31e2d9d1ce75cf1c6b4b928b3545d838d1de6b61b735d921bcf72e4e0615e9ff969ef76b4b947026cb016e2660ba39b0c4c953369a52c210de", + "e601c7e75f80b10a2d15b06c521618ddc1836fe9b024458385c53cbfcedd79f3b4239598cd7b9f72c42dec0b29dda9d4fa842173558ed16c2c0969f7117157317b57266990855b9acbf510e76310ebe4b96c0de47d7f6b00bb88d06fad2c2f01610b9a686079f3ed84613ba477922502bc2305681cd8dd465e70e357534503b7cbc68070ad16d9c51de96ccf0aae1599299331c5655b801fd1dd48dddf6902d0e9579f0c", + "ee5ff4ca16d1bde59ffaf2d064eac9141c1d8f120ea2bda942b7956ba3effc5f1e725a3b40b0b9223a14d7a50df1681d14ca0e0eda7bb09c428fa3b2701f83a7a3e139485a118f6287d266dbc7fe68c87b35becabc7782537c79cb8165bdc40cc103d7b6d4b627fafa0e4113f92341ab90ceab594bfae20dadbfafd401684584598941f1ffb8e23dc8a04ecd15376cda6d849fe0dfd177538c62413622d172d9d46e05c450", + "1daca80db6ed9cb162ae24aae07c02f4126f07cd09ecee8e798fa1bc25c26c644333b63731b4ebc3f287f2318a820c32a3a55fc976576bc936f7384e2553d2891e3771ff24dd4c7f0256906460a8f12d30ed2b23583a0259cb00a9065a757d654d6e4603e7c7eb4a8426b527ae8a849d9350e9094b890367df3e8b23ad2df4d7dcce416bd8ea3badd037f53f7b07c02e5926515f196d62aeb9b8b14c863f067fc12c5dfc90db", + "27ff4e58a34ff1fcd66855d014ea17889a3cf0021a9fea3fabfd5b270ae770f40b5439e00c0d26bd9766f6fb0b4f23c5fcc195edf6d04bf708e5b0bced4f5c256e5ae47cc5651e51cd9fe9dc5d101439b9bc5cc24f76a8e8847c72686e2af1ce7098ad7bc104dad00c096a6d48b6453322e9cd6773fb91fb1eabd05dc5185a9aea07a2f64c6fea9897681b4428aaffe1fe5fd3e8ceb890b12169ec9d51eaabf0ca3d5ba415770d", + "75e2fb56327983b04f640717be8cba6fef3655b4d8e5539587d6478356ec397efaed818b8425d052778eb30ef0dee656c52c2aeab079ed496ae4441a365f2130432c87ba757e25b4511656ad15e2eff84d342331fd2814d1f1d11af65d98a424c115ba183437c0d0aa55f5c44b8685028a47d89d0d36a0f20aed510c366ab338f074a941b404fb349caaec821e0850a627777cc8f5abce6b509290027a2a28ff1db62a5ed2f95fc6", + "c6ae8b6a060917cd498aa7874ad44baff73efc89a023d9f3e9d12c03d0b7f5bcb5e24e1bc2ab2f2c67b9a9d36ff8beb51b5affd4a3510361001c80642955b22ea4bf28b81a5affe5ecdbabd8d17960a6af3825a4522fe76b3d720b5d06e66bff5379d7a8de1f5cc3e7bb75163a854d77d9b3949bf904b6c4e568682f0dab7f217f80da7303cfdc9a53c17b6b51d8ddff0ce49541e0c7d7b2eed82a9d6be4aec73274c30895f5f0f5fa", + "606c9a15a89cd66a00f26122e33ab0a08c4f73f073d843e0f6a4c1618271cfd64e52a055327deaaea8841bdd5b778ebbbd46fbc5f43362326208fdb0d0f93153c57072e2e84cecfe3b45accae7cf9dd1b3eaf9d8250d8174b3dade2256ecc8c3acc77f79d1bf9795a53c46c0f04196d8b492608a9f2a0f0b80294e2abe012dc01e60af94323c467f44c536bf375cddbb068c78432843703dd00544f4fff3eaa1a5a1467afaae7815f80d", + "88b383cb266937c4259fc65b9005a8c190ee6cc4b7d3575900e6f3f091d0a2cefa26e601259ffb3fd03083270eb63db1ffb8b4515ec454d12f0944f8f9f6869eedc2c5f1689766a748d74e79ad83ff6a1639aefdec6109342dead31e9cead50bcc00c5b2206e8aaa47fdd01397b141880490174141a1e6e19268378c1b54a84aba60ca711fd72f7df88e120dfea2caa140085a0cf73342f3c588b7edfb5b5e5ccabd68a32364746d92d536", + "dc0b293f1ba02a326743509f41efdfeeac1efc45137ac03e397a3273a1f586a0190cfb4ea96d6c13ca692a4de6de905c8338c3e29a04cbae76272f568b9d795cea5d758106b9d9cff6f80ef650d6b7c428ea3946c3acc594907fe4227ed68faf31f2f6775f1be5139dc0b4d73ed6308fa226b9077561c9e4c7a4df68cc6b819b0f463a11b9a09682ba99752c4db7aea9beac1d9279f2c2675d42b551d27aa2c1c34125e32f2f6f45c35bca45", + "5d801a7413311e1d1b19b3c321542b22e2a4ccbe340545d272abede9223741d9835a0fc80cc9da97a13f8bb4110eb4ad71093efba165b1edad0da01da89d86726e0d8e42ae003b4b50297d233c87da08406f0e7fc58ba6da5ee5ba3d2d7142cbe6632734eb2e7b7863c15cc82198ee8f9a0ae0b7f93bdbda1ed269b3824d5d3c8e78513815b17a4c0cc8c9706b9c77423a309ae3fd98e1e05cdbe9e2577834fd71f964301b10b66c316a2d8f2c", + "2fd32a2bc15a9e96a100624404fd0a4e54ba9f8c0543d8ccf7c5c2e35f5e8c3c11dfd497320aa903900a4ca55a2b323b3ac4a7cfcd01bf0b448db8829072bee6b77c3d7bec2e1d8b414d907288d4a804d2379546ef2e2dc628269589164b13fceb32dba6fd5d48a956ce0b5c3eb28d894a95af58bf52f0d6d6cbe51317152744b4ccfc918ed17fa6856478d580b389016b772e1d02e57d2217a204e25361d91d4845a3fa20fefe2c5004f1f89ff7", + "f537b437662759bef8bd64368536b9c64fffbddc5e2cbdad465c3966b7f2c4bc5b96767ef40a1c144a4f1cd49edc4cc5b57e7eb30d9b90108f6fd3c0dc8a8808b9e0bd13aa3d661c4863637c5e4ba286553694a60bef18801299ae349df53a355051dcc46a7d003c4aa613808f430e9db8ca7dfe0b3f0a4c5ab6eb306aeb53e11a01f910064fbe6ca78b2a94fac34a2602f73de3f275953e13ff5c6bb5c39b82321ead17ec0f8ecc479e6afbc926e1", + "1dd9fb7d5b5d5074971e69300720014deba6fbdb942bd29704cdfcd40fa5281d2a1b9f5b776183e03ff99c29587f10e8d325cb49c5c93e94f5132741b92c4086eec1374dea5c1e772cbb230c7b31f3e962eb572be810076bdb926b63732522cdf815c3ab99bbc164a1036aab103cac7b823dd21a911aec9bc794028f07b7f839bae0e68211286441f1c8d3a35b281fd321312577bbda04f643ecb2a74ec4527bb5148dbccbeba749f5ea19b6072366ba", + "5bd63737449de2d20ca63943953338ecf4cdd6cd0a726241adb04376385a809cc6ba0f3482a310746fbc2cd5eb214f03a14cdc548777fb0d048d659cd75a962e490c4fe47affc2430a34b10275e4c76752a115aae3a24d4fb4fad89ce4d79d65de10292f3490bfdaeabfae08ed51bda6ec8230e66cb07ddbeec26e3ef68dd71c852900659fcf0c963f4574ffe4626a33db9abf0873dde68b21138498b81e8cc44d354be4073615889a7ddff633b5447d38", + "a683ec8250506571f9c640fb1837e1ebb06f123e745f95e521e4ea7a0b2b08a514bbe5bdfd316903d1d6a05f5a143d94dab61d8a3a146ab40b2d6b72df2f0e945875a8aa7051ed115975f6f1567cfcbf04c5e11e3a7027b8e179ba00739181ba10b028e3df7259d0712f4a6cef96469ff737865b85fee2c2db02a6423e32505381e18a1e0b4ce3c7998b8d6b1b5e09c3a280b85486d0984c9e193b0ad2043c2bc4ad04f5b00a73956715937eebf6b3e27afc", + "4df9d160b8e81c42930c48956fcb46b20b6656ee30e5a51dd6317876dc33e0160d31280fc185e58479f994991d575a917073b4439919c9ac49b6a7c3f985211d084c82c9d5c5b9a2d29c5699a22e79de3958d7b0e856b9aa97493cd4563aaa04fa3977a9bb89e0bc06a82296bdc76d20c8d393770176d648712454305fdfcf4e117d05acb5a5b006a9f8d0dc66dca708c4e4103ca825d2331750685c44ce3d9b3e753455580f4d6ac4533edeeb02cebec7cc84", + "67bb59c3ef5ee8bc79b89a673e331e581215076cc36b68f517ca0a74f74efafe9dcc240e6d8ca4b21019c27d6c9289f4419b4f218eeb39eb741c5ebebfe0ed2f6faeec5e8c477acf71907990e8e288f4d4049111779b0635c7bbec16b76493f1c22f645745fdac2b383679fee573e4f47af45ee08d84f63a5ace4ee1c06fa41e2e6e14b7bc392e38426813087a3a461efc62ed1941dc8f1728a2bdc04fde72a0b786558783c84abd4bd100e4926979a0a5e707b1", + "d341147169d2937ff2373bd0a9aefa77968ec8f0d993c6f9881eb174a1911e05cdc45993cb86d149a754bbe321ae38363f9518c50dd3faf087ffeeeb6a058b226ccab7858c00ba6de0e8f4d034b1d27508da5cc473f3a413189ee6fd912d7750486912944d4dc34405ce5ccc3885fb0aabcb922bcfa9081d0ab84c288022bd501235a835eb2e1124ed1d48fd4f8682da8e7919321031326502273375625c4e3a7282b9f53452195e53c6b4b57cd5c66f621bed1814", + "27e7872a54dfff359ea7f0fca256983f7600236e716e111be15a1fe72eb66923ea60038ca2953b0286447dfe4fe853ca13c4d1ddc7a578f1fc5fc8598b05809ad0c64a4363c0228f8d15e28280837a16a5c4dadab681e28968ae17934639fbc124bc59212138e494eecad48f6546c38366f1b7b2a0f56f579f41fb3aef75dc5a0958b25deaa50cb7fd1c69816aa9a51874a98e57911a33daf773c6e6166cecfeec7a0cf54df01ab4b931984f54424e92e08cd92d5e43", + "13dcc9c2783b3fbf6711d02505b924e72ec6736131159017b966dda90986b97522bf52fd15fc0560ecb91e2175322334aaaa0097e1f3777c0be6d5d3de18ed6fa3444133486068a777443a8d0fa212ca46994944555c87ad1fb3a367db711c7ebd8f7a7a6dbb3a0207de85851d1b0ad2f4149bdd5a5ba0e1a81ff742df95edee850c0de20e90dd01753137cb8f2c64e5e4638ceb893a3879ae2c049aa5bce44d56bf3f325b6c5029b2b8e1b2da8de7d4e48ca7d8f6fbdc", + "9ca875115b109eab538d4ec7023600ad953cacdb49b5abe263e68b48eafac89a15e803e838d048d9625972f271cc8f36344bed7bab69abf0bf05979a4cfff273b82f9961626509765fcb4b4e7fa48212bcb3ab2b1f2dd5e2af768cba6300a813514dd13e4d269e3d36548af0cacdb18bb2439ec9459f6d847d39f5598304ec46a26d75de1f9f0c2a88db915bd26e45e1f1e68c5b5b50d1890e97a3803c36755f026863d14176b8b57f42e91d3ff37787f9b38e333e9f0433", + "ec006ac11e6d62b6d9b32ebe2e18c002353a9ffd5dfbc5161ab887770ddd9b8c0e19e5321e5bc105add22e473050b71f0399327c7eba1ef809f8667c1f4e2c7172e10e753705e9a083f5bce88d77521225ecd9e89f1e1caed367fb0275dc28f620fbd67e6b176c9ae5d2659e6ec662116c9f2bbca3a93043233a4861e0688db6dc1800f752c5d58aa5033c250c891d9126e534ed921a9026eb333333fa8292059b8b446f336ca6a0cb4c7946b6aea3831653122f154a4ea1d7", + "23deadc94481ce28188f3a0ca3e85431964cb31b60fabf381e6bd45ef0332bd4dde774b0281d317dc2e7d0c298fcf8625fa734126968df8b68ef8a35c325d84ba4fc53936ff3ffdd8838d2a8cabf8a9cac54aa444ed9875944e55994a22f7fa8538b1e983b57d9215fac5c0052029644044e790ce2f5044655608c1d7ad3bb862203ba3aba3b526606f273d342ed5721648e3f600942d3f7546f679161436389d879dd8094e1bd1b1e12cde15cd3cda4c30a40835665e4e5cf94", + "94701e06340114f9cf715a1fb659988d33db59e87bc4844b1500448960af757b5282f6d52967a6ae11aa4ecfc6818c962b084c811a57724f5d401191567f24ce917e4f8c3963474fdc9d2c8613c16f62446448b6da6eeae54d672825ed7606a90e4611d0e318ff00566862c955b636b5e81fec3362e8672ad2a6d222a515cf410482836deba092a51a4d464dfbbab35c50a33437ac16a88256e9e23ddd3c827cc58d3e5000ee90b12e4c5175c5733662d4848ae0d406c2f0a4f498", + "735b0758d5a331b2304f01081172eb95ae4115de651b1a6693c5b9543de33df25d9f421dbaeca033fc8bff57313b482778005aa9fdcbca65c643da2f3320e34197868eec3848ff3c70d7ac7d910fc332e9a359f892ae01641be253013b554a0d3f249b3586b1857e5a0f9482ebd91432a852b221f4287a6e81ed24e8064645d5b28ab9a13b26cc1420ce73dbc47b31acf8a871601022ce23bc443b1222ce9a037a2fe5226295feb4efd4fd671338f459ae146032697cf82fc55c8fbf", + "c48d94f14549352790079fee69e3e72ebaa380510e3581a0824066413e7044a36ad08affbf9b52b21963d2f8e092ff0ac1c973c423ade3ece5d3bca852b894675e8173290529226939c24109f50b8b0d5c9f762ff10388833d99bea99c5ef3ebb2a9d19d2231e67ca6c9056d8834730605897426cd069cbeb6a46b9f5332be73ab45c03fcc35c2d91f22bf3861b2b2549f9ec8798aeff83ceaf707325c77e7389b388de8dab7c7c63a4110ec156c5145e42203c4a8e3d071a7cb83b4cd", + "553e9e0de274167ecdd7b5fc85f9c0e665be7c22c93ddc6ec840ce171cf5d1d1a476743eb7ea0c9492eac5a4c9837c62a91dd1a6ea9e6fff1f1470b22cc62359474a6ba0b0334b2739528454470f4e14b9c4eeb6fd2cdd7e7c6f97668eebd1000bef4388015630a8332de7b17c2004060ecb11e58029b3f9575040a5dd4e294e7c78e4fc99e4390c56534a4e933d9a45460f62ffaaba25da293f7765cd7a4ce78c28a85013b893a0099c1c128b01ee66a76f051dc1409bf4176e5afec90e", + "dea8f97c66a3e375d0a3412105ed4f0784f3973ec8c57b4f553d3da40fd4cfd39761de563ec96a9178804641f7ebbee48caf9dec17a14bc8246618b22e683c0090259e3db19dc5b6175710df80cdc735a92a990a3cfb166461ae713adda7d9fa3c4cf9f409b1467f3cf85d2141ef3f119d1c53f23c0380b1ebd728d7e932c535965bca41a414b6ea5bf0f9a381e098d282a554a25ce41980d7c7be75ff5ce4b1e54cc61e683f1dd817b8e2c1a430d7f895e5e7af13912cc110f0bbb95372fb", + "9dfda2e2f732867e60ed2b5fa99ab88eb82dc7a54334d02031258beef75fa4bd6962a1083b9c29e4eeb3e5ab8065f3e2fc732675b8d7705c16cfb4ef7305eb58120f1af5ddc55872a2cbde3a48661a0598f48f63e2e9aadc603545e2b6001748e3af9e86e1830af7b84ffd3e8f16679213d37cac91f07af0af02b37f5ed946ef5c955b60d488acc6ae736b10459ca7dabeacd7dabcfd656511ac913174f6d99327be59befe3e463a49afbb5235f0ce2840588c6edfbaaba00a4211c0764dd638", + "ddcd23e8b9dc8889b8599c721e7f8ecc2cbdca03e5a8fd5105f7f2941daec4e2906c654210bdd478374ddee43ee749a920ee91872e057a1157d384dcd111266221b3c79774476b4862fe450704ff2c5353e9a936cac87c96515c28ed4c830335a55d084cb5873c5fd2dd907f3266d8eb7bf13b6dd7cd4966982a0949efd8e428dae13daee549e01cc3c226211d6307823f742c5ef2155601a4644c46eddd603d4abd959c6d242e427768df3b1e22d87971df58a1564b38311a897c85b497a72556", + "39016647acfbc63fe55a74598bc1956eaf4e0cb49d532c5d8323fc6a3f15a0231597f06eafd74ad245e672bf6b21e4da503cb5bf9d15e9038ef354b38807564d91f38b4258378ccd9b9420a1562d7136196822a1291c913d83c4cd99fd8d420990c72cdc47607124de21da8d9c7f472fdcc780379f186a04da93cd87628abf323c8dadcd7fb8fbade37d7d2b5c9f9fc524ff77494c98f42f2158a6f68c906105ca9e8bb2df463863cfc1e9008d8344f55c4e3203dde6699b59812d49ce1279fa1c86", + "02cff7567067cbca5911664c6bd7daaf484181edd2a771d0b64566c3ab08d382e83932cdd7b4dbf86c9cdd1a4c353a511e68afb6746a507a9cd385c198246f4543d606c6149a5384e4ff54c1b90d663dc7a4b91aeac3cf716db7ca6f9a1914e3a33efe82e7ccc4215999c0b012782402db4726db1d7d1c73571d45739aa6fcb5a20eeb54a84d5f99902a8d356cbf95f34c9c28c8f2badfbc08c69233514493c0c04963268c88bc54039ab2999c7b06cba405936dfc43b48cb53f62e18e7ff8ff3f6eb9", + "5764812ae6ab9491d8d295a0299228ec7146148ff373241a510faee7db7080706a8dada87938bf726c754e416c8c63c0ac617266a0a4863c2582412bf0f53b827e9a3465949a03dc2db3cb10b8c75e45cb9bf65410a0f6e6410b7f71f3a7e229e647cbbd5a54904bb96f8358adea1aaa0e845ac2838f6dd16936baa15a7c755af8029ef50aed3066d375d3265eaaa38822d11b173f4a1de39461d17d1629c8df7334d8da1b6401daaf7f34b2b48d6556ae99cd29ed1073926bcda867421832a4c36c7095", + "4df3043cf0f90462b37d9106e67366d112e4938c4f06abae97869531af89e9feebce0812dffe71a226de5dc36be652e26ef6a4be47d9b2db5cdd43809a565e4fc0988bfe82037c505dd276b757b785203249fd083fb474a25acccc9f38dc5164ff9097e05989aa6e280739a755231f93670e7226e22046914c155bf33d135b3f736ccca84cc47ae643215a054b54b7e13ffcd7ad73cced9279dc3210b80700fcc757acfb64c68e0bc4da05aac2b6a99d5582e79b303c88a7ac4dd8ed4289516bba0e243527", + "bf041a11622715426c3a755c637d5f478dd7da949e50f05377bf333f1c62c671ebdbf9467d37b780c25f7af9d453fc67fafb2f065a3f9f15d4c3561eeaa73fa6c813bf96dcf02430a2e6b65da8d174d2558110dc1208bdcb7898e2670894c0b9e2c894da3b130f57a90ec8ea1bffd27a37b4da4645c546b2b141db4e2c919154dac00e78dd3eb6e4445974e3bb07905982da35e4069ee8f8c5acd0efcfa5c981b4fd5d42da83c633e3e35ebdc959bd14c8bacb52212b4334f94aa64d2ee183861db35d2d8a94", + "a170ceda0613adc9c3a1e427f07beacf3b16ed69fb42b6bc09a38d803f632ad2929dba215b85683b74e2feb1d18fe17d0ea0db84d1be4e2e73476917a2a4cff51d6eca7c5e82232afde00dd2286a4c20eb09800b4d5d80e7ea35b6965b9792d99e399abda8cf32174ae2b7414b9bdb9d63e148f7357635a7310b130c939593cd3479164724011966c4232142df9966f09422f34f20b30af4b640a2c6d3dd985fe0ba3dfa9083cbb9b8dfe540ff9f6c608d18481213040768ef33300d773f9890c724ead320a1e7", + "929477e9c2d0bbad3429a0e0de776695255013108261dc6404cb09828770e274d8bb650a50e490dfe917fc2047b0f8ee72e105927d9fa70523c727778cbf6ae876d641ad562938c870d12f2e047bb78920739dba0c3f8ce1fb77589623a5f1625f5d6ab81940c7dfc3dc3a641d82b2813629bab8282999317d6b93842334f123fb4693a9c2c9d8ba9bfc746642dfbd045cd2021b272eab7358aa954d453da53fc5392dfa7eb881f6f53809b692d27f3366595ff403289efcc691e118b4744a1147071d8909bef1e8", + "3e98bb14fff5bdf7db38a3960dc55ca7d02333daed8712cca13dd5bffd114636559279db72554cc0a0ee1f7e15557d77cab0f2f1131f94fe698db81be38300a856a5eca85e5cf915fb7b6f38ccd2f27350e62cc30ce10ffe835118be3d435d2342ed3d06199b7e20c8e34d68902f0ab8745bd8b7d5b863d525c1f5906d2dca598db8a0f1e67736182cac15677579c58b8c670cae1be3e3c882153b2aa2988933e579ec2d6dbb00c671da64443dfc027dee6dfc3233c99758304570a982bf9b2eb59ccd70d0b54c4b54", + "aa12c7fa50ffdc2811c1872e4bee15f43e6909212385c872eb489f7e06dc1787043f56126f8373bdfa4b3f61405c73dd4dfd3f40aa5cd207e8520849c26f67716a46c0989a99efff42f24e0736e327af8e607c401a1bac77341e9a78c91e35d55b2457bdd5317a405a1fcf7a2a23de68ef92b65819e8aa3807c545361dfc9fe89125123492da958dc313cb5d03cb4b192c54ac6b27fcbc498652f5ed36b587bb74942b3ad453a8d79e5ddc06ebf806dad5046b73251064582ef5777dc530f8701701761884783fdf197f", + "83e615cf6e17a29e63945710b548a6d9935850eec69830841e26cb6071e908bf72c87cf079ffb34c5eb1a390def72d004a9488224a18e189aa1092a0f1135712834d257a53dc1d0e2c6417d8f472ff13b181910f4c93a307420d44beec8875d5219a3160b8e921434ddf3f71d68db1c1d5c39d68edb7a604792f8b4e31ecda7895c99fc7031a5b98a22009c1da005ac8fd2da0b5d742743f5712d12fd76d11a18e487776ce21ca0d6e5ab9ca6d8c394c321b91c14e291399a642721361811a73b7392e8603a3004e7060bf", + "ae1a8f7bfe4b1a0fa94708921dadb2c20b938239d7b9a2c7c598528f20f49764d322ebe85a5b2ea15563cf2f2304baf55d6607c52e2e1160859dcb7af6d7856899eada0e9128a180d3de6fed9334ba52b80c5c362d5591a0ec30f86d37a399927eb1c53076a12d26775522c511c83eb5b7abc2a00bd2dfd5627a8febba53d85f9b74c4b7f0c862ddb0d9298899b646b774d6cc23e4e23ab47174fccd34499253996d5e0917210e2f6daa1685f89f2f1fdfd5509ebc38191d539ecfb54ff0f5bbe6ef36ea35d425af6462f518", + "1d033e06be253ab800c8176d3a9650ab2a5bcaa03e11ea95fb9ab3834b41eb0d1b2bcecfe219364c3104ef65a8d692bd77c798548b7d9a8faf7f5172db24ec7c93006d6e9839368291b8277a82c034a3731f1b2e298d6e0282ec8a7902e4f844d132f1d261d171375c646065e201849f2df73e3748d853a3122c2206aac92fea448500c5418ecfb3d80e0e6c0d51f85831ce74f6c659cc291f5348a1ef8b949f1b2a753633e382f40c1bd1b2f44748ea61127b6f568255ae25e1da9f52c8c53cd62cd482788ae430388a92694c", + "104bc838b16a641749dcf73c57b207ea3bcc84381170e4ca362065a3d492e892b426a1f4fd82f69461d1ce1f3aaf8fc291ea30d6667e7e1aea4c44f7d52a5fa6d34709e6658483260ff5da76bfb74e7d194ad40dcac00daf0e45e74db4bc2248100a8b256b257278c3c98f1f2e3a80cdb812352aaf4155b3a4033999fb9fe7f506994fcf3a8db31e9e5ca8ef8c2e9c6326ca5b0803724ba641950eca877fe6ed6afc2e014651c56d0e6a61eaff7c5ed0b861d4bebe42904c0a568c26aa8abb2e97da2bfb40f14eafb6bf16cd208f", + "5b92e4a175437d0a53eb10de2c56401720b11715a034459ebf506c3fd6534b5e817a0f09deac4bcfd353301d8d031b1331582ac09189b48e6ccea444655866c4bbd123d45ebabb774f877cf12d33b84cfca4a6a94f3f98869fcf2bbb6cc1b964c2438c2f348bcdf9001dce60a4706d20c169a040baa61cbeb0b8e58d505e6e3739ab03e110ae7efdf91347474033defbd1e86af322ec6456d3394699ca7ca6a29a70d9b10a38fe666eab2858bfe12dacb31568549c826c15af5b6fddf779954351be1872f04e53db7b3b5fbf61fd18", + "401cc7bd9f8227efaed70dad83fc8db3bd38efc166f0f11ab142c565c68ba9db680423a3d698b6f3476ef440051fd20b93f6a2ed045825567df5a65e3f62e4442ec396ad260a16a13a1dee46c7e8d88bdd7edf223ab76a9a787c1f4fe9925c051a4ca0e77a0e78baa29f36d193c862fd3a60653f544ea9e3f75f2f553891be8c1fb882f6a6aad118f576f3c2793efc67221b37a45ab6137434f6228cb002fc137b91fb8572c757f00736879453d64a8a868c131810ffdad9e9d028d132157ecb1da675d54047d19b27d3258c9b1bca0a", + "c20cf0354982ca6a19d9a4dbf78f810934db2373941a12c263adefa61a5f385c859bc47028829c531dc25ccc0004c7510e707175a102ec3c4b4c933e3f52033e67476ff5f864c446c042a21e6037f7798363d20267891b965879fde80af6b59d77862e3a229af01b7ac78b578e94bd9f9b073c38a627c1864df0083aabb17024bdab6c3c0f0f73d31d59480523a2f23b78baa0385c15f290114305d7f98786b7dbc17a8c2aad97448e8ea389e68ef71091a6a9735ac12ca5497b9171da11a93c28d3273f58b74e2e46279d3ce9d0b20d19", + "e2365c2754073b511f16a1881ff8a537541ca7362ae7b84223d3c7d1d49d03a37d6d05dd2b819af9705c015dacc9dda83474eb14b7d5fce6e8a8f8c58e870149338d320e5ae476da6749af45e65ffed550d225a39dc74ffd93ba7da476985d6f44e90fc8e82454496260458431804d802fe804d825f611772f9710667377adfb1a11e4275bcecb42175c515f6a9439a359824f82cc9d480954364e6693099a821ace362e6c7ecbe68be8823bb5b49b4f23ad81b64139e3b63d9d4d298a842f013ef0d91ce7915ee8f816c70ba2aa3994216f", + "9c43944676fe859327096f82049cf69e48b98715878400fdf2805e0d5ee642e6cc9c43739f418b701348a033c5cb96bf8702fcd2fac9be58262a843c1e4155ed8a1724b6ebf7cce659d88a95a0c54deb2d7d9574a45219b6419ee173d1d8fad3ace47c962b349abe1048565df85bbd0eb9b11698258c23598023a00fdd26573e41951452027125c6e894a97736ecd63fd15b29a55d8dd9dab7e2e18f541a2e341890a61b7c896e7dc67aa82f3479dacd4a8ec7558d40c34d9ae4060e13718d676c2450258d83de8a86e012813693098c165b4e", + "1c707c29582d98a0e99639211102f3f041660ca03ad0939fe3855b8c1b22d6a9b8673c93e3eabc0ab231509b2b0d73c76a290a363943d12d2ff0ea30c6dd54eda753767effe04cabb4c3966388fa4c83a1906a0f48519a5fba9aeb585e0f8c45d6123a75ebe98fd1d0272f733a3925119481a321fe7509346c05128302851ba17a137f956f184e057a305e79a148727a5926de6854eb0314d5492fd735fa773d99ea34c95ca7546bd3a3aa8e66bcc6d860cec3d35d0e2165d5fbe8be99b6e7967df6693e5a6243e94c9c4a2528ae6305cbeca209", + "8f1e88103ffa378f062cade0ec509bec99a5c73fb273e79dbef24abf718ac26ac23dfd2b8932038ed3cb9637b71643c161142019f45b25b4fa4c52356737a27027e805ec635154327a66bfe64efc6285cca98c34edc7fb6c0766970a545342cf840aec0a5ba1dd3c6949be4fe97b0f8c8186de07536fd9074db34d09b2f08af9dcf9424d6edbf9cd044102c0e5dc35aff78c36d079dbd2c500e19c8c985ae2abaf6b2a20716bb719754a8840ce97632116c4d0b0e3c83ccca27f11c4204b76b5d6cfe6348a9615d8e4af53500dc4c2cabf12ec8c76", + "b9a0c28f1a6156992c103a84655fc6e654fa6e45e45819513afa797024717c00cc195994512fd53ecd1e12dac4d2448e0c40308382312084d2111f7db147b2e6589ce6d977f6115f629508167df8f45bac98abd49f6b272bcc4fd874dd5e29fb6daceb2d727a2a892194cfb9269eda00626ac89b4e74bd29b21e9f6ef18cb69889a02d4f0a06a2e5718899c1dc3b051c2cfa29653e782f87fefa478e6465bf5ff27f8b6abdb500077aac97100bd955ec535a587d66f23354be51cd8170289344bac9451f74e8aee3639f7c09981f4885e018912324d7", + "456844a34ae1074246f8f71eeef2010ec8733265bed7c1cc60043d770edfa320cbd4284a94be2574337e16d27f125074ebd7e99031f7abb4547b9540a7b0b5148ef501b550dd929f3dfe39ac65519f563e9254424aaafa05b1d37c16c771882e9e25d4906ac58603da749adf686932cd73d81e2658134fe69294c7a521d257eaf2110c667fc9d6f09b52d24b93910e532184eeb96eae9d9c9750ac3c39e79367431ac1af7011172d0a8be46a31010219a0310a733068c589bfc4748f3626aa4ff8d355cc893d05111c287c9992e95ad47481a6c42d6eca", + "c5c4b9900b9727bdc24baa544cad5faf8340be6b3759361f53889f71f5f4b224aa0090d875a00ea7116772117dbefc3a81c6950ca7ceeae71e4ba975c50d61fec82e6d9448d3a0dfd10bb087bdf0673e3e19fa2aaa7e97eebf71f11b86034fcf5a61240c71444ac3da15ef09b27b3523d37d309e8722380f835c1aee4a767bb027ec0674040853e5b53d6a31657f51acff6d2487860becd5ce695696cfe5937f4a0217b69e01cc6facc24dfe5f5230b8692a0b718e3b3c789d682db36101795a9a5f8bbb838c3679be72f7941a1db180135347d0a884ab7c", + "1781df2fedd2c39137854737d054cd3ed16b0ade411e41d97888ac900fdb46d9ae26b3d2dd07e118fd57eabd0dfd03a55793c76420666444865371adffc9b2f35068a0d70f9cfda1ac27ccb4beff4ffa5b8bb8bddac843386675c38a181fd0d935d6d51b25d78e7ff4ecef27a9853c0f0d2879c395ed1c4883987d123890d04f851c3e042e1164c68c0d503de16816f4b0e554236e5f4c339ea11d01ce652f6208f78f457a2417a97c0a6a240f443262def4b6763abf53e597bf1a28f907dc7cbdc751a234ea7d75710ad5ab0c37e8e9805102a375abd44011", + "8963552ad1e729ead07750df599d734157aaa4bcdcac17e8eb19b4f99cdb162686ff433137aa4e8a0cc8df0053999196262115aec326cf37567d9ba4760e0ad21d5763977f1ab9b35c0fc667890fa87fc946ceb776a811b5adc69446bfb8f5d9908029dc5aa38db816e4a4e8f98e5a48cf0a01627031c5bd1ced8bc1940dcafe4ae2f1199b186468eafc07e96a89d95dc18ef0fed3eda5b58ce58f221a47ba5311313cc680367eeb058fafc7bcadce5f520b6371489d9e529278ae6ee2650a85aed82896879038bbd9aa8d685fc9528943ccf2235cdf69a86464", + "23ceae3008085134433f5de4b47bafe0f443d443491e6cd47b216dd2dcc3da65239515a6e6b9beb9a939ae9f1f1f5e11f88326475e0962f319d9bf75ddfb4a46e7cc3f799d7547f3c0b2e089018b75787b82ea1a7295e7411f4852f94c94170e98bb0647923b8eb7d184038e56560da46085540cbfef82b6b577c445d038f6c93fbfdfc96ab3a0191d20a57b8610efb4cc45cd95198198e6f80ac46b0601511885f650eb00992605be903bcb46cd53c360c6f86e476c4c9ca4ad052eb572bbf26eb81dd9c73bcbec137aea6ee27aa97dadf7bef733fa1555019dab", + "c0fd31e82c996d7edef095cccfcf669accb85a483ea9c59f368cc980f73da7202a95c5156c34192ae4ebf773c1a683c079b17ac9d08b4265b4054fcddaf6666ca50f38f1a2ef2497459a68c06837363a526e850ecfbd223f55dba67db017eadb7a9139abb5bf3854834478b838aafa16c5ee90ea52fb2f7b8db2bcefb85b06fc455c2b6c27d0af9a49dbf2f313bf2599370637393e7972b31d8bf6759f3e6115c618e672831f84d76ba1879c754144e1df4d56b1e264b1797dcb8ab165040c8d20b931071081d7f74fbff590bdc8e888e71acc6a720270da8db7c821", + "936fdab91fba396e4a8754a97a04ba333daadc29885c9d0c8fea3387165278f4974e468fea57f2bfd8428c4d0f010833283db73735d39de0c0cb5898d0c06c0ecd05f61098935cb6130a8da60d1a6c2ecfe420f972263fff5a631b09e81c837183c5528bb1c740b36fc39cb082f3383c2b4afb25d04ad1d1f4af63dcf26a0bf5a647cd2e35a51cc119c4dc5031f5715b3bfa1f2b92de06bdac0d670fdd30980f32c51f3936b51e5db6b95a8d36279da5faa4c4e454f2b7e54e9f488071011c7f6f9b63da260a2e46d796d36c9a9dcae88085806a10a77bbb670d475778", + "a55fe162b287bd6eebd6cf7e7aeea8672322d924ae42c7404ff89aedb98943f3755d2889bca488cc7000e6e9b8e7a0ef289273cd29c44cc600e330d1775e3cb767f12150e1615dca8c3f67466463a3ca993a1b788cf67a7a35b95dfff954206eb5ea1e1bf7fb06482a551625b5c9fd9a86e8414c8cf79d3a14104a153cbe04aac5172aa4c4a89349f5856c4262dd1d7317a7544c9afbbed449e7dcc2b58d9df6c9c9ed3883e42e80f5c2433550f30e73c7bce0fccdd880adc19282a392dae26a0108e7faf168cfc15937aeb046d60712603286b8ddfb27916b79242d56f1", + "2bd6976592408cdbc4e41dcd3ecfbb786775ddedef914d9058e6753f839fdfe15b17d549dbc084aa6cdf3befa0158aa84c5d58c5876144fd7e6c41ab7d42419d0dd353732e0e6d3fafc4f5626c07433390a4fd467197e85b5de7e2cf1c26cc575356adedcc0740008523b503df12ff571387726c5ccb280376d19cbacb1d7ce7aab8b13292c6a8b8881e949cbf6d4610d16ebba1d46cdb8d0459596e0aa683d0307bd926e14de19b9bfeaefa29d91b82248604673a455520cbb64eef3f38cfad8e126a3b1cfa1aaba53a784c8ae0c50279c0ecdab54095d36f67ace9b8ebbb", + "71913ae2b1c8729ed6da003c24a1d4f96e28d7faf55ca14ee0b2865282b9b61103ce6ee0b00b00aacf2081adedea5616f9dfd22c6d6d4f5907bcc02eb33edf92de0bd479794f51246d9b612b4543f6ff633c4fc83bfa6144c9d26721cdc690a3d5a8db54d8bc7873bfd32924eeb502810732b5ac2f1852bb021c401d26c39aa3b7eb09083093a9e89bf889b53383b5af61110aca1b9fdf38908c7d5a184fc5f46b3423a66a2749feb8de2c541c563987278dbd0513d99b732411012b5b75e385510de5f6839c3797dc094c9501d5f0504b06b43efb6e746f2129ca189c1da424", + "9d048a83294de08d3063d2ee4b4f3106641d9b340a3785c076233686dd3382d9064a349c9eaa78028d35652078b583e3f708e036eb2ced3f7f0e936c0fd98f5d0f8aa91b8d9badef298bd0c06843831279e7c0c67ca7e572f552cfdd984c12e924c08c13aeec6f7e13d161785546ebfd794b5d6a92a4744e52c4cab1d0df93b9468be6e264e8cfcc488f9c3c1817cbe501f4b9cc5999483b7433aea777226b25273a6ef2331b5f3b6db8091591e8e276015da3ef78bb2ee0526ffe23def2d8d193cbe594e8ced1f3d216fcedae2a1eb288da82e34cf98aebc28def658ee0849ae7", + "3251c96cbf82ee2e5264528c0b6cdfc23d20e1eb2d6441b5d62f0fd24c692a0d45a8bc8aac32884b7141ac0f4f113ec9fc7f6b4db3d696374177f9a42d602ca471275b928f639105a55b846da9ac7274cc37de8c38541f6895f94d72a81e117844b46601c201f7189b935a96e42505f2098ac985d92dfe86349a706ef6325b3c2e4060ced3c453e68ed09e043bcc75846b80118dc53530248da250fb57922d0afa53a7b2c89161aa4fa372a46b2a8e1307741cecedf585d2f998a9d496763800b6965c38a5d8aa566c709f13699c8185ab4fd8fdc8b824f4dd6d1c255b4788f50574", + "2de31dbc8a012254586f3229d3524fc529554e98850d30acdfc11406bba6a142029126ac165ee90b2de7509fc3571a8ee12e16b05054eb8baea879d135b39627f0d8331be3e66bc720c2096ce74e437daebf3bc53d8f2ccc228c3256d3edb6e9ae7c354a0c9350e6d663a9a30630bf9da3d96b96608a2a171ae28105714058b6c4b38a36c56561c4612c32aad25c65b7fb6faa4e4ecd44ebf9b2fad42ff9a807cda2581614fd30d41a7436069399b8d4f062a37a5bd4066a93d541fa5797a7d3e7dc9c4c40f0bbf5256f71613240f9ef128b3423eacaf428ada06b6a531f835281e4f3", + "07dadee629a08223dcd7ec441287b4c5e26347451d9c003e3a8496b4ea313b51126283a6720d7851e24423d9c9c818b4601247178f38a61f45fd4c8596d79529d416834226666a2c8552bbc901cc5cc3406a18fc88077fea52e1b620748553052ab7788c0d025b095b736fbe714cb3a968ec16b5917652eba2d7cf32ef3140d6c27b25d053e9786d24cd09a5306a0ef55e46201faa6196a91084267d7a7b5ca57c2efdeb2cb97d682d2a191b915553c8933f1d1b7faf0b4a1d83ef611f1e44438bc1c3d860fbfd12b5f26e5a6889a31ce26ae6a55c7a563b5816d113423ef3f25fa9befc", + "1d94166bb387526d519c4ce150221954da8930f66765fe6a5504e30a69962d595cfdd07a82c003843598864261f053bdb6f5086d516c261e089caa89990f0967605768ae9200bdfe4dcd7b77a93265cb33d9851a2a1036113c732bf3f37534530641300f0620de5c16101e16f4baf39d9fcbfcb01c52afce0992c329d8dbb438c314eee995c5020611d6f889e06b8a032785cba9a415580dbf752b5e510523c89f478cc6f047bd926f51e4a965c9749d1e76379c0e7e5b56803893bafaa4d2892b4c52f143b2fa777cd1035ea418684b8019df084f9a3f1f768753096621f342895c510d01", + "fc0073f199ed8a1d6edc8e7bdf182670003108d82b283aba82326e856f8de378987a03d0fe8d2041440fd29d51c63796aab44090d2b14ee00859b3a08cbe88f724badcd3c401226c5db8b307b8deea5be305412b080e9f99cf79d6d08d3646f347a7afebb62912e3e246e2e726f9aec5c101d916e47f984507b1d65d313697256c77da7eca3bc5811c87bee02a2826cefff0d92bae989609aaf95d70561b40d98474c37277c884aed887a1606d206b11e8a8a71d1f1d19319557b57351228ff0404be700a6cc56c0a30f3d4b7a0a046463fdaf19e7d5f59e155f378e35baa33db1e881f2207f", + "f42a6a91278d6a076feba985b1cf4ce0af1fa9d6d039c136e8971e665ff088a10b6b9a379a6f5526fc5957773a0ccb8972a4a19be0745ac13937030a54b18dee4f4c5df47a58a33a7516b90e646e5da999166ab0e52f457f7c9b7e391836a687eaae37b377e59a4c995ab0c57162c307ab951a9ba6590f429cd27250e7010eb794ec1b1ec35f8aad189b2fd3e8aff24d93601d91a4884e6f84b02757ce7620a02901519fccfda52f68ad6df709d112a9c25d66bcbb9622806427ca8b8d346b6db05874bde800cde9cf17df4b05baab0f133febd1ebbb053b49c109a7f5b1f864a304d10288e2f0", + "bbcefaf4a0739509f8a2f831c954071aac52e60cfa882a867b8b910dcf7edf92e1c0692bb027bc378c460a01cb6ecc8f2a012dd84ee5a678cd497b1457b6d393421fbee98ff544fc7eba24cbc3aae506254d9a2d74dde74437ce4c8a69010718506bf4c5943342a942e5e2d3406a3016280b6e37954c5d5e763346251afb0b746cad68cac757f9df765e092518729cfb9a5e76300c124e708ca33591a369767ffb63933cb72fba67beb2223d98984d0b75eb5d1a38615913747b520b3d613c715c0c77d2987bb88f3c419bcc5d38573cf4a8a4f550b2d876f05ca252d88c70a561d869a5018b32f7", + "dc2437010cb05d9cab2af5c275e1d2acd627ce19fb86355df91fb8d059e60d591663c8eb077d48388c9a321057a98136f49f0098348d9f29d808936f98bb1787c7ac75fb14f6076dfd2de5b59b1fa4848cabaa9a99a091dc24b561911c392ecdbe53f4adae82b852d830adea3a10490c908e337ce0a6d12354ce05a37ad3a06696b66820af8a1f67e6287533fd6f38a5f6ad1c6b078c08baf2c37d2683af01e6a5b33796c8ae48935a888f9bd265f4f11a4e27c433b8b1c9afd140bcd21a07e24378ad6badde8e47c57e3340f49e2406e8d49afadd65eaaa4c3d078c27d7e42118cb86cd248100a356", + "6c290db326dd3152e6fa9b9c0cd7d49e50a0221b96e32f5f34a8cb7d0c2edd3e937a7d025d6999b7b468add4d6894d8f7aceaabc18f4d9c171f1fe95ea1ae8570382a8450fbc595d95b1f51d24e1abc2970b0e1d20ca40aa21bdfb3656adf2f19882eda606f5ef1c03174e1d94c8d12f0fee8dce6852f42a364eeafa27a7971d4379405db8e46baac4d685b969238e5df06292a6c790bf1994a051b038e1d8db91e1bc4804f32443781c34a552ed2e8100cea374e77af56ba0e11c45990d3ba68df9087b1f4968cbcbb1c42f99b7267c76af926ff3134e093df28fab039cad420c6b70f2d9b5e678c155", + "ac724a22ebabaedbbb052953e3c264a4b6440f313bad501cdc1484b64f33402a2230898776db5c818c28035ffae6ea24abd04b7159e42159833903a0c23a7c564f7645e49ddedb748fd9e51bd6cbf2eced98caaa35226970f003ce1fd260ac5795e096f1c04aebf8fd36e5e2adeea929b5e963a3cb71d6b55c85bb7d3a2b03a7e74b4416de8fa68950168d7c3ae8ed2e29bad1e8a182a7c5418e5d564373163778cd3c34e9d320eb1a60480a8f98b12e0026cbd7752e6079812e3767d9f55f3f10b8c214a6eceb2a58954091a06b33862af171a9b60bf2c6a44e8766e6c56e98092c56f2a8510f6d05c103", + "8c70114f7cffb375c2b9a06e27297a5c32418b2daf68af5bbedcc7106edbc070e764bf40c1f8eb15079e2ab77f898afff3490108ed9afb7ea9cb05df41d263be0e42d2321d3d2656622d7bd232bf68d37375fe7314b09cba66f19c8b59424198ee69e7a9f3de0ecce0685127807ce336fa479ccaf7aa1ebc4e406271ce6c4923ec36093516498cc227f9218869346c80ba5ae83e023aca0ae2bc86b5bf5d115a4616b6587cb869d92f8c780ab70d5766de07a204af5e1c8dbba622516d2e911b36c82e4687e4d258ea616c07f76ff0baa376c8d5975cffac0b25817f779ae3ce88b72eb47e378484ce999bf0", + "0733d59f041036398233fd47a84b93f6778ae5259ef5d62aa3b9faedec34c7edb570c18b2a5d2c4c55cf656d98a1ae396d45a3b746b7ad6f07312c3d05d1a50ffa90bcdcdba105e25b7b0c52664223f8c2476925d46dc6ea2406ded7d0b0b292f6656cebcc7616cfa4b82aec68b35d1da67f6ed2bf0171849d6bb65128d8a140ea5cf97f1003f8d7093bee077be78def4f7bd2caccbf0644f26b26285225142c40038484c3bb9ba9597744f4389e76dca3eb695c33ccc621cab1fb603cb3535a0ad318d220385d5e94f8674f3d55e97e097f8d5c049e911946afbfce783819951d65d6bff4567dc951390d1aaa", + "398ddbba3dcb5642c102efa841c1fcdaf067062e7eef8e2ee0cd73d7f77e57372d6ee1a9b7b6f86ad12d575001ae71f593449cb5a476c6bfeddaa2af0f9239c1d7effdedf66ceaf413707b5ab9661a7cc0ef8cfe4d1651579c4f0f64e2d12a52653c54f2dd60864e769eab8a627c89c56ee93365d031f0d2523cb95664b1575d51b122f33c9e94de75432a690658c977b68aa5b721a393f9b9b3b612c10e920a7d510c6d8460b35f8614c42f5d2c241a01b28105aa7c1b521ac63ebbedafac6d5a38c898e8590f918a1927bc53aecc2b1c8b18d7df9107c6997d9b3fa4b0bdb1c603da619d9e75670b97a5b40f06", + "ef07bbc7c4150dd47f8c69a7989948fe831dc798b0424dcd6551bfa8e88216095a7e5d720909bf3d23526b9ba464b66ff6b63a7337c31451ab9a15f04ead809a62bb52206237de77597a730106d02d227dd6099ea9ee2a92cdc446ac3b9d024e32255adb3e9b56b561c431e0b5a721f0336f19568a5335d0ebc6c73ed8ff2c15e219477d9e4b67f2928e251f8a61a2848857e037d010806c718ab062967fd8e85f3722252957923f5f9005aae47b4b1b3fa464e3ba9df573a56055f17e903126fbbcb6cb96de92fe617c97f84ef3ba0d8f2651dc4aa80c157f372ae1bc02e5067ad076f3fe48bb72c0f3c99273f82b", + "c7076986d2333f3a6752adf11f1a9e5c6bc4755f341073cc86a9c7519c8db029d5ae833fdf3fee826ff4692c57880c5074620ea97c00f1dde1e8a0f18501627984ded4d1b5c4af35be5cc1bcc868060a49a968dc0547acde490b4c68d79924a93a986aa0ad060c7de706e8a99ce8f84a4f8707b52a8ee122b763ba580d6b1f35f6af25094c69f49247da96c836991851ad36f60bf577863d7471608a012afa7a56656abeee7cd9b4f1f4d9d13a8526c0f33cd251caf7486639e787250390e7e488e9ec311fc3d847a7266cc59bcc2bc34192554aa57cf25db10ce04bdabef3fde6db85f55195ecc2ff892b2e268ebea6", + "01789f40d42d8d3e4a416fd9ae7de78c3a30507809eda200e1afaaf8d7020cd1fad18eba62d821946f220506cf105ff0e2069a771a2c233714afa6b2f695497e4b95c9693dbb93ec4c9a14720676aa87ee31dd34e4e081756477032b4a57b328285f2cdec1b269754c474936927e93acc26012aff1bb36f30c2402aca0a9b9ce9568f5000e2c934263933b436c94f8d6589c89db7edabc5d03a8fe795fe50c5166beab64ed7c22662b984ae2c66dbe4c090b0df603b27c759278f8d66859afea3f6a8f02c2c2a2202b9fc29132256f164b5050a803b43688dc4c9ba86374a3522afba5d1a19bb3820b883aebc267627095", + "2c61944bd6a50da00ebb951d2b67d79fc6b6fb5aca83b1de3dbd7690ab756bb1e1a21051ccf1e24136ac8ccb42a2ee10be94d2cb9289d5f52b6f90e9d07a3478f36a1eb7d08c3dec52ca154fd1427ba92a4ecbe73a71bceafbd26e9a39d50821e2876d3a0c0e6e373b9795dbf72ea29cc439ff42706be798c90d4617b39c90ec84bf9fb699dc8a9a34e25d81759d6c57df45efb1d0d68aa51278564b99633ed5dc464bb7d53c5c21f798f33bcd868657ecfe75a1ed8149d394b398969ef624831b30f1458465bfd2fdf3f284f2ffc54bf2817b5fab2e02056e864f78bb6fd870c64f3609dab218f25da8060f756e45121e79", + "942fa0c68cc72f69518a3a7aac0cde45bab0e928b5cb2bd24d049fc313f74b6afa87c4e34150484f3b5200163f8a6472d04777928ecc49319539fc17d71a38090f55a74f757fe45781a3c09f08dcd3dd4c73c8533a5e00cf8a86ebe77fe45be2848574f7c5d25e9a0632a60d2dd41febdbf987d2a0487e4a4ce6ed5f49f2d741a88ecac232b1498253fa4ee8147bbd0f600abdf295e81f7570015aac5fe6ca7bb4a99bb3fc54287106d7fc1132a574af49db82a7b9a5f33e193cde527ca2176c52cdab672165e0fe5720f71ada57ee90060aa069ae2a0bfe67c1b71b17c601c3c2224bf9891bc11ba216e3ebcb51fd95b8d7cb", + "0d68cfe9c087ec116fe7572042385159cc705960f842aabad1ed1387ec1697f4413a23c6090041328fedd4b626c6eeaac5b5a71acc1fd1bb8fbd228857ac5bd045c364be7a5a26338ff04c99c4c473cf445a891db6422d1bdef4533442df171643fc36a092fabb464298e4194c9e2950884de13d113ee24160a416404c16ddc5d2476cb3fb80da543e6ed9105f6003977acb34e1fdd2cbdf7a00d5ff84350b74ac231418c0d88269d02d824802791ff42a51cc835deb9869a6023f867f82ef6dc0bfb03e6dfa835646bb18a4074773486e308aa39e532aaea4e6fb35dcada7e060f8282c371ed26d22302323d4fd142a85534671", + "45e24b167a0bbef1bd8f79dd047763d0754f36a7b623f298059d177e8ac994945c37d2c4af06f01318960301595941124592f2995af1459d854339998d3ae17534df2d9793d6e203857d02c98a0cd88991e641b3e640090ba303f87b907dca8ca462fac19ad079b2c82ea5b521ab891b10138b083b3d9fa214a8fe60d1cb3599c5d199c61a2cfb7ee2f39e5a5abad5ac4998b707545f73e92128d21803420526d2598a53bb314adf29a0ef56b94bd2221601eb53ecb8540e8fffd38fba7bd827ef255e4ef55491475c0f383a241f81c72af4e1dbf2a65cd4d18a497615aa0de2791a3511a7977a8d4d41492bfa4085f2fd4e8f751d", + "1c1bb695ae90e6e33fc1e8b2a62ab98bf835ac7193440f2351c8cdd830472b637d2fd9c9013cb83caef506abc1c4f7567706db6046b1d184579c7a9223ab1b35e32898c70a3c27628123ffcfa518612f080a2c4a9f8e0a927a47dc98307d2b48de9d5dddcb5c82f0b0e4e610d44f1baa9bbbf7f5a727134680bb7d1327b73b52d8e5e36dbb53971e99e699d79f75a3fc01316bd7012947d119d6aeb7f75b8fbf0479c03002148553fa0da450fd59d4f1bebc252caa11ed9bec5b6ef54279b5f8382b61cffc67ec03f4baa7ea476c31364b86aa8ccad9fd0818717f0ced2dd49477874b4341c602d7a1beab860eb476c7e3ce597e6926", + "7a3cd9bb2277e2c7f1134fe7233f0f7883c2db9fba80aa5742b03041de0fe589d9e5ea84470dabf41bb66816f3e33ebf19a0ca5aba1004cf971249b258ff26a98dbd0c37ec6cd574854109433357720040bafed4531e0079186b1e853e0ced35d08d27f6d732ed6e2c6651b51cc15c420a24f2dc36c16ef4b3896df1bb03b3963f9aaeb02a48eac5772abd5948c2fd0db2bb74e3351e5eabd681c4f413655bd94dec96b1544c1d5d2d1df4bdc26020d25fe81d5238de824687a5505e1fbe08d11b3924b3ccc070fd225bf01eb79e3d21f7b62a836cd3bcc11c931669c37613470e356143df87c48848a829f5e018973a5db88eb6c60203", + "3f158afd0733fcc5dfe1efc2dd4eada732f942af734ee664955bb1ba613eafd0f349e7554a14d68200c62d8f2dca2ec8b81c8350735eaf437041f78b452598825b6899560963ade66a0fc74ad01f8343d1d19c7bb327a8dc14ffdb1c42fa72b2970d9155e2da6a2e6419d4117842d826ff38ffab9617307a0283d3ea28c8104ad9a6e087bb750ed1d10fd8f7100b1663682e979d80e43968c33d9eff66f4d1344e583ee521e78d0a2193c0577516b978339c143bfc689bc744bbc4a9163063de82c9706384b6b385e54666c86b34f23c1e25be293af06092ca31d857e11e5b2caf0d19dd3afbe85380878eda76d718b4bb869c67e044e242", + "a177af4387b9bfa3d59e97ee7b0ff5f4ae4a326fd9204c8d28831a67fcc385ee6c4828247b16d11aea9bb8cd9e6c4d2876c6b2fa6d5041ad39e1b04039071e29c4d86417e7eac4fc7d3823958a021823e2c880a757dfbcd0c8196371db5bbfac15e4d1a0596508b6d26f8c4a664924c95082d173f817995b44c4285d625d9b2f56c86632fe1295c5a8a7a3760028072bcb07bc245a705e7174d06b9d5c0c8ca495b9ac218f1921fa63f2db3fd148f07545366d008fb5aead7497d902b91fbaa39669929d4ae9d07df8557f1f0aed7b51252f10c6606e5ff3ede1327530ca356b4896ecf14bf7322d77fddfbe28d52f6de7f66eeb81704c87e2", + "01a15b9018e35cc342c926b01d03ad9db4993a6bf92e0555969fee90033f28f3ec234c1268b11b040dfa0770d4ceb39edfeb8ee6a589f4eebcc08d2d1b0a1a52953aa26eb44fdf4a2743c3dacb212a0c0f325572f645f53027b6f3c0c55abaeb1b0918c89bedcb5028f094d743ea354f8ff553c45f111a8fd5a14a4e5c835164747d302472e19a67da04b4c8e39756a9d248ce14d1ed43de75aca86850f2455eccd4639b2af035bb3f504cc9065d091c1c47e036083cb3fc50bf39292b11737c7ce0b49673ba93981de304dc65a671775b6ff927e3ff93850b214fffb5792105a4bdc81354d5b09e84afbdd1792b8fb4e9d0ae3dad2492b03282", + "24f07ae31279ceed18ec6d35990f21200934ad6b132c6c62e82fe92a40a0e60a5bed10720eff5a1f728971888682772b2d9060d4fee88f37d0824e7384dddcc549475f0e1a44eda4804778b62febe46e04657a20577ee70acb3425e334881eebd8ddf714ae8c527ea747e3367de384e595a43b299b6bb3f6b0a4716cf90038e0f75a47d5057d7fcc3c8a8f9224992c67f8ae0d3251ea09a24aed9ce57ab637f6b3cbb7083df62b6287f64d0877984c4249d113bdb2b07865082aa24cd7ec07061b17de320f51f29f25b82d7073d369cf2dbf96310c0c311997911b2cc02f606f9cd99663c57e78499192a2a78f9c9fa67013e0f9817287faa69b22", + "4aeb32bf9d050f10bea18d9f71b4afea7bd08550e574e7d50df234c7413668b297b6721d7a0f0bdcdcceb2f55adddea28cd59bd44be0c5ec067039e428706caae11f565d961ad6e7f4c51b0aed6d05cc5b8d826c4b9c39daefb6c7da46dce619a359dc9ce215a215218fa8d54ee0b4f301b6c201c7c2c5f7cb1c6e0cb76ba6c6e8f63ef7a5213d550b0d0857fa0ff9e3e38e497161617413ac066e2fa539520233193a5cb7baa0c2cb20b45e56bfed2c40a9544d1f230dd0cd6d4976e7cf51da8a13200c3957c0154c8237b2931ce19b824963ac576ea49b548cc6aa85c47796b470fb2c6308d88f390bb13607e294c84a838b2713b14ca6a5e8bcee", + "77e607478be5502432230c913d9ec82f967d87c0ee169a74076f989648853eca693277287f8a5b306bc94dfdbf64ca5cb5dfc0bc498589d51a691b8d57d4b0a9ee247d038fe1b5571183be3e75c37045bf1235863ff1b84b208c10e7f1a5ba54ff36af5b2870129867164d013e0a6d2cc067a3509bba2f46390302c80b651cf590ef69aad8effd94cab28a9b44be6a38b58cfc47c9c725d6fa467894163383b6873d10d263b1cbbad932ded59ab503920267ac026726f794a335a88f6ef564f8968c6fa6f5d3ea161eb6062ca349b9a0e4038273399cfa297a6b07ceda1ebaa99c9de2d935ee230a08c5a488ad46f3393243371d40916b8063cac9da63", + "50957c407519951bd32e45d21129d6b83436e520b0801ec8292d79a828106a41583a0d607f853dc4410e0a1427f7e873455a75df065cfc6eef970f7e49d123b346976460aadd91cf513c140c356442a84656904a8b1d708dc6089db371c36f4fe059c62302eaab3c06c0cb3b429961f899dcf99798464b8571a440cac7a52b495f32417af6bc8f58adc63647531f804b4e96273b29b42434c1236bde80ba3744fef7b1d11c2f9db332b35bc25123338ac9a0796aac213c9709b3c514ea7ecd80e22d3d8a74f28c8194418a6e1ff30714d0f5a61c068b73b2ba6cad14e05569b4a5a100da3f91429d6e3ffee10ceea057845ec6fc47a6c5125b22e598b2dc", + "f2273ec31e03cf42d9ca953f8b87e78c291cb538098e0f2436194b308ce30583f553fccb21ae6c2d58f3a5a2ca6037c1b8b7afb291009e4310a0c518e75314c5bb1e813bf521f56d0a4891d0772ad84f09a00634815029a3f9ad4e41eafb4a745e409ef3d4f0b1cf6232b70a5ce262b9432f096e834201a0992db5d09ffa5cbc5471460519a4bc7cdc33ae6dfe6ffc1e80ea5d29813136406499c3514186ced71854a340701519ef33b6c82ca67049ab58578ff49c4c4fbf7d97bfec2ecd8fbefec1b6d6467503fea9d26e134e8c35739a422647aaf4db29c9a32e3df36e5845791fdd75a70903e0ce808313a3327431b7772567f779bbaee2e134c109a387", + "5784e614d538f7f26c803191deb464a884817002988c36448dcbecfad1997fe51ab0b3853c51ed49ce9f4e477522fb3f32cc50515b753c18fb89a8d965afcf1ed5e099b22c4225732baeb986f5c5bc88e4582d27915e2a19126d3d4555fab4f6516a6a156dbfeed9e982fc589e33ce2b9e1ba2b416e11852ddeab93025974267ac82c84f071c3d07f215f47e3565fd1d962c76e0d635892ea71488273765887d31f250a26c4ddc377ed89b17326e259f6cc1de0e63158e83aebb7f5a7c08c63c767876c8203639958a407acca096d1f606c04b4f4b3fd771781a5901b1c3cee7c04c3b6870226eee309b74f51edbf70a3817cc8da87875301e04d0416a65dc5d", +} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2x.go b/vendor/golang.org/x/crypto/blake2s/blake2x.go new file mode 100644 index 0000000..eaff2a7 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/blake2x.go @@ -0,0 +1,178 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2s + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = 65535 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 32 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 65535), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 128GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint16, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^16-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2s: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint16 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2s: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint16(x.cfg[12:], x.length) // XOF length + x.cfg[15] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[3] ^= uint32(x.length) + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint32(cfg[i*4:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2s/register.go b/vendor/golang.org/x/crypto/blake2s/register.go new file mode 100644 index 0000000..d277459 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2s/register.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2s + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2s_256, newHash256) +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 0000000..9d80f19 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/blowfish_test.go b/vendor/golang.org/x/crypto/blowfish/blowfish_test.go new file mode 100644 index 0000000..368ba87 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/blowfish_test.go @@ -0,0 +1,274 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +import "testing" + +type CryptTest struct { + key []byte + in []byte + out []byte +} + +// Test vector values are from https://www.schneier.com/code/vectors.txt. +var encryptTests = []CryptTest{ + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}}, + { + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}}, + { + []byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + []byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}}, + { + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}}, + + { + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}}, + { + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}}, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}}, + { + []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}}, + { + []byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57}, + []byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42}, + []byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}}, + { + []byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E}, + []byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA}, + []byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}}, + { + []byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86}, + []byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72}, + []byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}}, + { + []byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E}, + []byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A}, + []byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}}, + { + []byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6}, + []byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2}, + []byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}}, + { + []byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE}, + []byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A}, + []byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}}, + { + []byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6}, + []byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2}, + []byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}}, + { + []byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE}, + []byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A}, + []byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}}, + { + []byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16}, + []byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02}, + []byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}}, + { + []byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F}, + []byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A}, + []byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}}, + { + []byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46}, + []byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32}, + []byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}}, + { + []byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E}, + []byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA}, + []byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}}, + { + []byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76}, + []byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62}, + []byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}}, + { + []byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07}, + []byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2}, + []byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}}, + { + []byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F}, + []byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA}, + []byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}}, + { + []byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7}, + []byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92}, + []byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}}, + { + []byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF}, + []byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A}, + []byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}}, + { + []byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6}, + []byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2}, + []byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}}, + { + []byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF}, + []byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A}, + []byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}}, + { + []byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}}, + { + []byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}}, + { + []byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}}, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}}, + { + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}}, + { + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}}, + { + []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}}, +} + +func TestCipherEncrypt(t *testing.T) { + for i, tt := range encryptTests { + c, err := NewCipher(tt.key) + if err != nil { + t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err) + continue + } + ct := make([]byte, len(tt.out)) + c.Encrypt(ct, tt.in) + for j, v := range ct { + if v != tt.out[j] { + t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j]) + break + } + } + } +} + +func TestCipherDecrypt(t *testing.T) { + for i, tt := range encryptTests { + c, err := NewCipher(tt.key) + if err != nil { + t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err) + continue + } + pt := make([]byte, len(tt.in)) + c.Decrypt(pt, tt.out) + for j, v := range pt { + if v != tt.in[j] { + t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j]) + break + } + } + } +} + +func TestSaltedCipherKeyLength(t *testing.T) { + if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) { + t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0)) + } + + // A 57-byte key. One over the typical blowfish restriction. + key := []byte("012345678901234567890123456789012345678901234567890123456") + if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil { + t.Errorf("NewSaltedCipher with long key, gave error %#v", err) + } +} + +// Test vectors generated with Blowfish from OpenSSH. +var saltedVectors = [][8]byte{ + {0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e}, + {0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12}, + {0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad}, + {0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8}, + {0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8}, + {0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf}, + {0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9}, + {0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38}, + {0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4}, + {0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c}, + {0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5}, + {0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b}, + {0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47}, + {0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2}, + {0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19}, + {0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc}, + {0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93}, + {0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57}, + {0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08}, + {0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03}, + {0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f}, + {0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef}, + {0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71}, + {0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad}, + {0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe}, + {0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13}, + {0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe}, + {0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6}, + {0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6}, + {0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92}, + {0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56}, + {0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee}, +} + +func TestSaltedCipher(t *testing.T) { + var key, salt [32]byte + for i := range key { + key[i] = byte(i) + salt[i] = byte(i + 32) + } + for i, v := range saltedVectors { + c, err := NewSaltedCipher(key[:], salt[:i]) + if err != nil { + t.Fatal(err) + } + var buf [8]byte + c.Encrypt(buf[:], buf[:]) + if v != buf { + t.Errorf("%d: expected %x, got %x", i, v, buf) + } + } +} + +func BenchmarkExpandKeyWithSalt(b *testing.B) { + key := make([]byte, 32) + salt := make([]byte, 16) + c, _ := NewCipher(key) + for i := 0; i < b.N; i++ { + expandKeyWithSalt(key, salt, c) + } +} + +func BenchmarkExpandKey(b *testing.B) { + key := make([]byte, 32) + c, _ := NewCipher(key) + for i := 0; i < b.N; i++ { + ExpandKey(key, c) + } +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 0000000..2641dad --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 0000000..d040775 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/bn256/bn256.go b/vendor/golang.org/x/crypto/bn256/bn256.go new file mode 100644 index 0000000..f88f3fc --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/bn256.go @@ -0,0 +1,408 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bn256 implements a particular bilinear group. +// +// Bilinear groups are the basis of many of the new cryptographic protocols +// that have been proposed over the past decade. They consist of a triplet of +// groups (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ +// (where gₓ is a generator of the respective group). That function is called +// a pairing function. +// +// This package specifically implements the Optimal Ate pairing over a 256-bit +// Barreto-Naehrig curve as described in +// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible +// with the implementation described in that paper. +// +// (This package previously claimed to operate at a 128-bit security level. +// However, recent improvements in attacks mean that is no longer true. See +// https://moderncrypto.org/mail-archive/curves/2016/000740.html.) +package bn256 // import "golang.org/x/crypto/bn256" + +import ( + "crypto/rand" + "io" + "math/big" +) + +// BUG(agl): this implementation is not constant time. +// TODO(agl): keep GF(p²) elements in Mongomery form. + +// G1 is an abstract cyclic group. The zero value is suitable for use as the +// output of an operation, but cannot be used as an input. +type G1 struct { + p *curvePoint +} + +// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r. +func RandomG1(r io.Reader) (*big.Int, *G1, error) { + var k *big.Int + var err error + + for { + k, err = rand.Int(r, Order) + if err != nil { + return nil, nil, err + } + if k.Sign() > 0 { + break + } + } + + return k, new(G1).ScalarBaseMult(k), nil +} + +func (e *G1) String() string { + return "bn256.G1" + e.p.String() +} + +// ScalarBaseMult sets e to g*k where g is the generator of the group and +// then returns e. +func (e *G1) ScalarBaseMult(k *big.Int) *G1 { + if e.p == nil { + e.p = newCurvePoint(nil) + } + e.p.Mul(curveGen, k, new(bnPool)) + return e +} + +// ScalarMult sets e to a*k and then returns e. +func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 { + if e.p == nil { + e.p = newCurvePoint(nil) + } + e.p.Mul(a.p, k, new(bnPool)) + return e +} + +// Add sets e to a+b and then returns e. +// BUG(agl): this function is not complete: a==b fails. +func (e *G1) Add(a, b *G1) *G1 { + if e.p == nil { + e.p = newCurvePoint(nil) + } + e.p.Add(a.p, b.p, new(bnPool)) + return e +} + +// Neg sets e to -a and then returns e. +func (e *G1) Neg(a *G1) *G1 { + if e.p == nil { + e.p = newCurvePoint(nil) + } + e.p.Negative(a.p) + return e +} + +// Marshal converts n to a byte slice. +func (e *G1) Marshal() []byte { + e.p.MakeAffine(nil) + + xBytes := new(big.Int).Mod(e.p.x, p).Bytes() + yBytes := new(big.Int).Mod(e.p.y, p).Bytes() + + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + ret := make([]byte, numBytes*2) + copy(ret[1*numBytes-len(xBytes):], xBytes) + copy(ret[2*numBytes-len(yBytes):], yBytes) + + return ret +} + +// Unmarshal sets e to the result of converting the output of Marshal back into +// a group element and then returns e. +func (e *G1) Unmarshal(m []byte) (*G1, bool) { + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + if len(m) != 2*numBytes { + return nil, false + } + + if e.p == nil { + e.p = newCurvePoint(nil) + } + + e.p.x.SetBytes(m[0*numBytes : 1*numBytes]) + e.p.y.SetBytes(m[1*numBytes : 2*numBytes]) + + if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 { + // This is the point at infinity. + e.p.y.SetInt64(1) + e.p.z.SetInt64(0) + e.p.t.SetInt64(0) + } else { + e.p.z.SetInt64(1) + e.p.t.SetInt64(1) + + if !e.p.IsOnCurve() { + return nil, false + } + } + + return e, true +} + +// G2 is an abstract cyclic group. The zero value is suitable for use as the +// output of an operation, but cannot be used as an input. +type G2 struct { + p *twistPoint +} + +// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r. +func RandomG2(r io.Reader) (*big.Int, *G2, error) { + var k *big.Int + var err error + + for { + k, err = rand.Int(r, Order) + if err != nil { + return nil, nil, err + } + if k.Sign() > 0 { + break + } + } + + return k, new(G2).ScalarBaseMult(k), nil +} + +func (e *G2) String() string { + return "bn256.G2" + e.p.String() +} + +// ScalarBaseMult sets e to g*k where g is the generator of the group and +// then returns out. +func (e *G2) ScalarBaseMult(k *big.Int) *G2 { + if e.p == nil { + e.p = newTwistPoint(nil) + } + e.p.Mul(twistGen, k, new(bnPool)) + return e +} + +// ScalarMult sets e to a*k and then returns e. +func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 { + if e.p == nil { + e.p = newTwistPoint(nil) + } + e.p.Mul(a.p, k, new(bnPool)) + return e +} + +// Add sets e to a+b and then returns e. +// BUG(agl): this function is not complete: a==b fails. +func (e *G2) Add(a, b *G2) *G2 { + if e.p == nil { + e.p = newTwistPoint(nil) + } + e.p.Add(a.p, b.p, new(bnPool)) + return e +} + +// Marshal converts n into a byte slice. +func (n *G2) Marshal() []byte { + n.p.MakeAffine(nil) + + xxBytes := new(big.Int).Mod(n.p.x.x, p).Bytes() + xyBytes := new(big.Int).Mod(n.p.x.y, p).Bytes() + yxBytes := new(big.Int).Mod(n.p.y.x, p).Bytes() + yyBytes := new(big.Int).Mod(n.p.y.y, p).Bytes() + + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + ret := make([]byte, numBytes*4) + copy(ret[1*numBytes-len(xxBytes):], xxBytes) + copy(ret[2*numBytes-len(xyBytes):], xyBytes) + copy(ret[3*numBytes-len(yxBytes):], yxBytes) + copy(ret[4*numBytes-len(yyBytes):], yyBytes) + + return ret +} + +// Unmarshal sets e to the result of converting the output of Marshal back into +// a group element and then returns e. +func (e *G2) Unmarshal(m []byte) (*G2, bool) { + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + if len(m) != 4*numBytes { + return nil, false + } + + if e.p == nil { + e.p = newTwistPoint(nil) + } + + e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes]) + e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes]) + e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes]) + e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes]) + + if e.p.x.x.Sign() == 0 && + e.p.x.y.Sign() == 0 && + e.p.y.x.Sign() == 0 && + e.p.y.y.Sign() == 0 { + // This is the point at infinity. + e.p.y.SetOne() + e.p.z.SetZero() + e.p.t.SetZero() + } else { + e.p.z.SetOne() + e.p.t.SetOne() + + if !e.p.IsOnCurve() { + return nil, false + } + } + + return e, true +} + +// GT is an abstract cyclic group. The zero value is suitable for use as the +// output of an operation, but cannot be used as an input. +type GT struct { + p *gfP12 +} + +func (g *GT) String() string { + return "bn256.GT" + g.p.String() +} + +// ScalarMult sets e to a*k and then returns e. +func (e *GT) ScalarMult(a *GT, k *big.Int) *GT { + if e.p == nil { + e.p = newGFp12(nil) + } + e.p.Exp(a.p, k, new(bnPool)) + return e +} + +// Add sets e to a+b and then returns e. +func (e *GT) Add(a, b *GT) *GT { + if e.p == nil { + e.p = newGFp12(nil) + } + e.p.Mul(a.p, b.p, new(bnPool)) + return e +} + +// Neg sets e to -a and then returns e. +func (e *GT) Neg(a *GT) *GT { + if e.p == nil { + e.p = newGFp12(nil) + } + e.p.Invert(a.p, new(bnPool)) + return e +} + +// Marshal converts n into a byte slice. +func (n *GT) Marshal() []byte { + n.p.Minimal() + + xxxBytes := n.p.x.x.x.Bytes() + xxyBytes := n.p.x.x.y.Bytes() + xyxBytes := n.p.x.y.x.Bytes() + xyyBytes := n.p.x.y.y.Bytes() + xzxBytes := n.p.x.z.x.Bytes() + xzyBytes := n.p.x.z.y.Bytes() + yxxBytes := n.p.y.x.x.Bytes() + yxyBytes := n.p.y.x.y.Bytes() + yyxBytes := n.p.y.y.x.Bytes() + yyyBytes := n.p.y.y.y.Bytes() + yzxBytes := n.p.y.z.x.Bytes() + yzyBytes := n.p.y.z.y.Bytes() + + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + ret := make([]byte, numBytes*12) + copy(ret[1*numBytes-len(xxxBytes):], xxxBytes) + copy(ret[2*numBytes-len(xxyBytes):], xxyBytes) + copy(ret[3*numBytes-len(xyxBytes):], xyxBytes) + copy(ret[4*numBytes-len(xyyBytes):], xyyBytes) + copy(ret[5*numBytes-len(xzxBytes):], xzxBytes) + copy(ret[6*numBytes-len(xzyBytes):], xzyBytes) + copy(ret[7*numBytes-len(yxxBytes):], yxxBytes) + copy(ret[8*numBytes-len(yxyBytes):], yxyBytes) + copy(ret[9*numBytes-len(yyxBytes):], yyxBytes) + copy(ret[10*numBytes-len(yyyBytes):], yyyBytes) + copy(ret[11*numBytes-len(yzxBytes):], yzxBytes) + copy(ret[12*numBytes-len(yzyBytes):], yzyBytes) + + return ret +} + +// Unmarshal sets e to the result of converting the output of Marshal back into +// a group element and then returns e. +func (e *GT) Unmarshal(m []byte) (*GT, bool) { + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + if len(m) != 12*numBytes { + return nil, false + } + + if e.p == nil { + e.p = newGFp12(nil) + } + + e.p.x.x.x.SetBytes(m[0*numBytes : 1*numBytes]) + e.p.x.x.y.SetBytes(m[1*numBytes : 2*numBytes]) + e.p.x.y.x.SetBytes(m[2*numBytes : 3*numBytes]) + e.p.x.y.y.SetBytes(m[3*numBytes : 4*numBytes]) + e.p.x.z.x.SetBytes(m[4*numBytes : 5*numBytes]) + e.p.x.z.y.SetBytes(m[5*numBytes : 6*numBytes]) + e.p.y.x.x.SetBytes(m[6*numBytes : 7*numBytes]) + e.p.y.x.y.SetBytes(m[7*numBytes : 8*numBytes]) + e.p.y.y.x.SetBytes(m[8*numBytes : 9*numBytes]) + e.p.y.y.y.SetBytes(m[9*numBytes : 10*numBytes]) + e.p.y.z.x.SetBytes(m[10*numBytes : 11*numBytes]) + e.p.y.z.y.SetBytes(m[11*numBytes : 12*numBytes]) + + return e, true +} + +// Pair calculates an Optimal Ate pairing. +func Pair(g1 *G1, g2 *G2) *GT { + return >{optimalAte(g2.p, g1.p, new(bnPool))} +} + +// bnPool implements a tiny cache of *big.Int objects that's used to reduce the +// number of allocations made during processing. +type bnPool struct { + bns []*big.Int + count int +} + +func (pool *bnPool) Get() *big.Int { + if pool == nil { + return new(big.Int) + } + + pool.count++ + l := len(pool.bns) + if l == 0 { + return new(big.Int) + } + + bn := pool.bns[l-1] + pool.bns = pool.bns[:l-1] + return bn +} + +func (pool *bnPool) Put(bn *big.Int) { + if pool == nil { + return + } + pool.bns = append(pool.bns, bn) + pool.count-- +} + +func (pool *bnPool) Count() int { + return pool.count +} diff --git a/vendor/golang.org/x/crypto/bn256/bn256_test.go b/vendor/golang.org/x/crypto/bn256/bn256_test.go new file mode 100644 index 0000000..1cec388 --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/bn256_test.go @@ -0,0 +1,304 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +import ( + "bytes" + "crypto/rand" + "math/big" + "testing" +) + +func TestGFp2Invert(t *testing.T) { + pool := new(bnPool) + + a := newGFp2(pool) + a.x.SetString("23423492374", 10) + a.y.SetString("12934872398472394827398470", 10) + + inv := newGFp2(pool) + inv.Invert(a, pool) + + b := newGFp2(pool).Mul(inv, a, pool) + if b.x.Int64() != 0 || b.y.Int64() != 1 { + t.Fatalf("bad result for a^-1*a: %s %s", b.x, b.y) + } + + a.Put(pool) + b.Put(pool) + inv.Put(pool) + + if c := pool.Count(); c > 0 { + t.Errorf("Pool count non-zero: %d\n", c) + } +} + +func isZero(n *big.Int) bool { + return new(big.Int).Mod(n, p).Int64() == 0 +} + +func isOne(n *big.Int) bool { + return new(big.Int).Mod(n, p).Int64() == 1 +} + +func TestGFp6Invert(t *testing.T) { + pool := new(bnPool) + + a := newGFp6(pool) + a.x.x.SetString("239487238491", 10) + a.x.y.SetString("2356249827341", 10) + a.y.x.SetString("082659782", 10) + a.y.y.SetString("182703523765", 10) + a.z.x.SetString("978236549263", 10) + a.z.y.SetString("64893242", 10) + + inv := newGFp6(pool) + inv.Invert(a, pool) + + b := newGFp6(pool).Mul(inv, a, pool) + if !isZero(b.x.x) || + !isZero(b.x.y) || + !isZero(b.y.x) || + !isZero(b.y.y) || + !isZero(b.z.x) || + !isOne(b.z.y) { + t.Fatalf("bad result for a^-1*a: %s", b) + } + + a.Put(pool) + b.Put(pool) + inv.Put(pool) + + if c := pool.Count(); c > 0 { + t.Errorf("Pool count non-zero: %d\n", c) + } +} + +func TestGFp12Invert(t *testing.T) { + pool := new(bnPool) + + a := newGFp12(pool) + a.x.x.x.SetString("239846234862342323958623", 10) + a.x.x.y.SetString("2359862352529835623", 10) + a.x.y.x.SetString("928836523", 10) + a.x.y.y.SetString("9856234", 10) + a.x.z.x.SetString("235635286", 10) + a.x.z.y.SetString("5628392833", 10) + a.y.x.x.SetString("252936598265329856238956532167968", 10) + a.y.x.y.SetString("23596239865236954178968", 10) + a.y.y.x.SetString("95421692834", 10) + a.y.y.y.SetString("236548", 10) + a.y.z.x.SetString("924523", 10) + a.y.z.y.SetString("12954623", 10) + + inv := newGFp12(pool) + inv.Invert(a, pool) + + b := newGFp12(pool).Mul(inv, a, pool) + if !isZero(b.x.x.x) || + !isZero(b.x.x.y) || + !isZero(b.x.y.x) || + !isZero(b.x.y.y) || + !isZero(b.x.z.x) || + !isZero(b.x.z.y) || + !isZero(b.y.x.x) || + !isZero(b.y.x.y) || + !isZero(b.y.y.x) || + !isZero(b.y.y.y) || + !isZero(b.y.z.x) || + !isOne(b.y.z.y) { + t.Fatalf("bad result for a^-1*a: %s", b) + } + + a.Put(pool) + b.Put(pool) + inv.Put(pool) + + if c := pool.Count(); c > 0 { + t.Errorf("Pool count non-zero: %d\n", c) + } +} + +func TestCurveImpl(t *testing.T) { + pool := new(bnPool) + + g := &curvePoint{ + pool.Get().SetInt64(1), + pool.Get().SetInt64(-2), + pool.Get().SetInt64(1), + pool.Get().SetInt64(0), + } + + x := pool.Get().SetInt64(32498273234) + X := newCurvePoint(pool).Mul(g, x, pool) + + y := pool.Get().SetInt64(98732423523) + Y := newCurvePoint(pool).Mul(g, y, pool) + + s1 := newCurvePoint(pool).Mul(X, y, pool).MakeAffine(pool) + s2 := newCurvePoint(pool).Mul(Y, x, pool).MakeAffine(pool) + + if s1.x.Cmp(s2.x) != 0 || + s2.x.Cmp(s1.x) != 0 { + t.Errorf("DH points don't match: (%s, %s) (%s, %s)", s1.x, s1.y, s2.x, s2.y) + } + + pool.Put(x) + X.Put(pool) + pool.Put(y) + Y.Put(pool) + s1.Put(pool) + s2.Put(pool) + g.Put(pool) + + if c := pool.Count(); c > 0 { + t.Errorf("Pool count non-zero: %d\n", c) + } +} + +func TestOrderG1(t *testing.T) { + g := new(G1).ScalarBaseMult(Order) + if !g.p.IsInfinity() { + t.Error("G1 has incorrect order") + } + + one := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1)) + g.Add(g, one) + g.p.MakeAffine(nil) + if g.p.x.Cmp(one.p.x) != 0 || g.p.y.Cmp(one.p.y) != 0 { + t.Errorf("1+0 != 1 in G1") + } +} + +func TestOrderG2(t *testing.T) { + g := new(G2).ScalarBaseMult(Order) + if !g.p.IsInfinity() { + t.Error("G2 has incorrect order") + } + + one := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1)) + g.Add(g, one) + g.p.MakeAffine(nil) + if g.p.x.x.Cmp(one.p.x.x) != 0 || + g.p.x.y.Cmp(one.p.x.y) != 0 || + g.p.y.x.Cmp(one.p.y.x) != 0 || + g.p.y.y.Cmp(one.p.y.y) != 0 { + t.Errorf("1+0 != 1 in G2") + } +} + +func TestOrderGT(t *testing.T) { + gt := Pair(&G1{curveGen}, &G2{twistGen}) + g := new(GT).ScalarMult(gt, Order) + if !g.p.IsOne() { + t.Error("GT has incorrect order") + } +} + +func TestBilinearity(t *testing.T) { + for i := 0; i < 2; i++ { + a, p1, _ := RandomG1(rand.Reader) + b, p2, _ := RandomG2(rand.Reader) + e1 := Pair(p1, p2) + + e2 := Pair(&G1{curveGen}, &G2{twistGen}) + e2.ScalarMult(e2, a) + e2.ScalarMult(e2, b) + + minusE2 := new(GT).Neg(e2) + e1.Add(e1, minusE2) + + if !e1.p.IsOne() { + t.Fatalf("bad pairing result: %s", e1) + } + } +} + +func TestG1Marshal(t *testing.T) { + g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1)) + form := g.Marshal() + _, ok := new(G1).Unmarshal(form) + if !ok { + t.Fatalf("failed to unmarshal") + } + + g.ScalarBaseMult(Order) + form = g.Marshal() + g2, ok := new(G1).Unmarshal(form) + if !ok { + t.Fatalf("failed to unmarshal ∞") + } + if !g2.p.IsInfinity() { + t.Fatalf("∞ unmarshaled incorrectly") + } +} + +func TestG2Marshal(t *testing.T) { + g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1)) + form := g.Marshal() + _, ok := new(G2).Unmarshal(form) + if !ok { + t.Fatalf("failed to unmarshal") + } + + g.ScalarBaseMult(Order) + form = g.Marshal() + g2, ok := new(G2).Unmarshal(form) + if !ok { + t.Fatalf("failed to unmarshal ∞") + } + if !g2.p.IsInfinity() { + t.Fatalf("∞ unmarshaled incorrectly") + } +} + +func TestG1Identity(t *testing.T) { + g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(0)) + if !g.p.IsInfinity() { + t.Error("failure") + } +} + +func TestG2Identity(t *testing.T) { + g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(0)) + if !g.p.IsInfinity() { + t.Error("failure") + } +} + +func TestTripartiteDiffieHellman(t *testing.T) { + a, _ := rand.Int(rand.Reader, Order) + b, _ := rand.Int(rand.Reader, Order) + c, _ := rand.Int(rand.Reader, Order) + + pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal()) + qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal()) + pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal()) + qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal()) + pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal()) + qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal()) + + k1 := Pair(pb, qc) + k1.ScalarMult(k1, a) + k1Bytes := k1.Marshal() + + k2 := Pair(pc, qa) + k2.ScalarMult(k2, b) + k2Bytes := k2.Marshal() + + k3 := Pair(pa, qb) + k3.ScalarMult(k3, c) + k3Bytes := k3.Marshal() + + if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) { + t.Errorf("keys didn't agree") + } +} + +func BenchmarkPairing(b *testing.B) { + for i := 0; i < b.N; i++ { + Pair(&G1{curveGen}, &G2{twistGen}) + } +} diff --git a/vendor/golang.org/x/crypto/bn256/constants.go b/vendor/golang.org/x/crypto/bn256/constants.go new file mode 100644 index 0000000..1ccefc4 --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/constants.go @@ -0,0 +1,44 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +import ( + "math/big" +) + +func bigFromBase10(s string) *big.Int { + n, _ := new(big.Int).SetString(s, 10) + return n +} + +// u is the BN parameter that determines the prime: 1868033³. +var u = bigFromBase10("6518589491078791937") + +// p is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1. +var p = bigFromBase10("65000549695646603732796438742359905742825358107623003571877145026864184071783") + +// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u²+6u+1. +var Order = bigFromBase10("65000549695646603732796438742359905742570406053903786389881062969044166799969") + +// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+3. +var xiToPMinus1Over6 = &gfP2{bigFromBase10("8669379979083712429711189836753509758585994370025260553045152614783263110636"), bigFromBase10("19998038925833620163537568958541907098007303196759855091367510456613536016040")} + +// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+3. +var xiToPMinus1Over3 = &gfP2{bigFromBase10("26098034838977895781559542626833399156321265654106457577426020397262786167059"), bigFromBase10("15931493369629630809226283458085260090334794394361662678240713231519278691715")} + +// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+3. +var xiToPMinus1Over2 = &gfP2{bigFromBase10("50997318142241922852281555961173165965672272825141804376761836765206060036244"), bigFromBase10("38665955945962842195025998234511023902832543644254935982879660597356748036009")} + +// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+3. +var xiToPSquaredMinus1Over3 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437752") + +// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+3 (a cubic root of unity, mod p). +var xiTo2PSquaredMinus2Over3 = bigFromBase10("4985783334309134261147736404674766913742361673560802634030") + +// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+3 (a cubic root of -1, mod p). +var xiToPSquaredMinus1Over6 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437753") + +// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+3. +var xiTo2PMinus2Over3 = &gfP2{bigFromBase10("19885131339612776214803633203834694332692106372356013117629940868870585019582"), bigFromBase10("21645619881471562101905880913352894726728173167203616652430647841922248593627")} diff --git a/vendor/golang.org/x/crypto/bn256/curve.go b/vendor/golang.org/x/crypto/bn256/curve.go new file mode 100644 index 0000000..55b7063 --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/curve.go @@ -0,0 +1,278 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +import ( + "math/big" +) + +// curvePoint implements the elliptic curve y²=x³+3. Points are kept in +// Jacobian form and t=z² when valid. G₁ is the set of points of this curve on +// GF(p). +type curvePoint struct { + x, y, z, t *big.Int +} + +var curveB = new(big.Int).SetInt64(3) + +// curveGen is the generator of G₁. +var curveGen = &curvePoint{ + new(big.Int).SetInt64(1), + new(big.Int).SetInt64(-2), + new(big.Int).SetInt64(1), + new(big.Int).SetInt64(1), +} + +func newCurvePoint(pool *bnPool) *curvePoint { + return &curvePoint{ + pool.Get(), + pool.Get(), + pool.Get(), + pool.Get(), + } +} + +func (c *curvePoint) String() string { + c.MakeAffine(new(bnPool)) + return "(" + c.x.String() + ", " + c.y.String() + ")" +} + +func (c *curvePoint) Put(pool *bnPool) { + pool.Put(c.x) + pool.Put(c.y) + pool.Put(c.z) + pool.Put(c.t) +} + +func (c *curvePoint) Set(a *curvePoint) { + c.x.Set(a.x) + c.y.Set(a.y) + c.z.Set(a.z) + c.t.Set(a.t) +} + +// IsOnCurve returns true iff c is on the curve where c must be in affine form. +func (c *curvePoint) IsOnCurve() bool { + yy := new(big.Int).Mul(c.y, c.y) + xxx := new(big.Int).Mul(c.x, c.x) + xxx.Mul(xxx, c.x) + yy.Sub(yy, xxx) + yy.Sub(yy, curveB) + if yy.Sign() < 0 || yy.Cmp(p) >= 0 { + yy.Mod(yy, p) + } + return yy.Sign() == 0 +} + +func (c *curvePoint) SetInfinity() { + c.z.SetInt64(0) +} + +func (c *curvePoint) IsInfinity() bool { + return c.z.Sign() == 0 +} + +func (c *curvePoint) Add(a, b *curvePoint, pool *bnPool) { + if a.IsInfinity() { + c.Set(b) + return + } + if b.IsInfinity() { + c.Set(a) + return + } + + // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3 + + // Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2] + // by [u1:s1:z1·z2] and [u2:s2:z1·z2] + // where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³ + z1z1 := pool.Get().Mul(a.z, a.z) + z1z1.Mod(z1z1, p) + z2z2 := pool.Get().Mul(b.z, b.z) + z2z2.Mod(z2z2, p) + u1 := pool.Get().Mul(a.x, z2z2) + u1.Mod(u1, p) + u2 := pool.Get().Mul(b.x, z1z1) + u2.Mod(u2, p) + + t := pool.Get().Mul(b.z, z2z2) + t.Mod(t, p) + s1 := pool.Get().Mul(a.y, t) + s1.Mod(s1, p) + + t.Mul(a.z, z1z1) + t.Mod(t, p) + s2 := pool.Get().Mul(b.y, t) + s2.Mod(s2, p) + + // Compute x = (2h)²(s²-u1-u2) + // where s = (s2-s1)/(u2-u1) is the slope of the line through + // (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below. + // This is also: + // 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1) + // = r² - j - 2v + // with the notations below. + h := pool.Get().Sub(u2, u1) + xEqual := h.Sign() == 0 + + t.Add(h, h) + // i = 4h² + i := pool.Get().Mul(t, t) + i.Mod(i, p) + // j = 4h³ + j := pool.Get().Mul(h, i) + j.Mod(j, p) + + t.Sub(s2, s1) + yEqual := t.Sign() == 0 + if xEqual && yEqual { + c.Double(a, pool) + return + } + r := pool.Get().Add(t, t) + + v := pool.Get().Mul(u1, i) + v.Mod(v, p) + + // t4 = 4(s2-s1)² + t4 := pool.Get().Mul(r, r) + t4.Mod(t4, p) + t.Add(v, v) + t6 := pool.Get().Sub(t4, j) + c.x.Sub(t6, t) + + // Set y = -(2h)³(s1 + s*(x/4h²-u1)) + // This is also + // y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j + t.Sub(v, c.x) // t7 + t4.Mul(s1, j) // t8 + t4.Mod(t4, p) + t6.Add(t4, t4) // t9 + t4.Mul(r, t) // t10 + t4.Mod(t4, p) + c.y.Sub(t4, t6) + + // Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2 + t.Add(a.z, b.z) // t11 + t4.Mul(t, t) // t12 + t4.Mod(t4, p) + t.Sub(t4, z1z1) // t13 + t4.Sub(t, z2z2) // t14 + c.z.Mul(t4, h) + c.z.Mod(c.z, p) + + pool.Put(z1z1) + pool.Put(z2z2) + pool.Put(u1) + pool.Put(u2) + pool.Put(t) + pool.Put(s1) + pool.Put(s2) + pool.Put(h) + pool.Put(i) + pool.Put(j) + pool.Put(r) + pool.Put(v) + pool.Put(t4) + pool.Put(t6) +} + +func (c *curvePoint) Double(a *curvePoint, pool *bnPool) { + // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3 + A := pool.Get().Mul(a.x, a.x) + A.Mod(A, p) + B := pool.Get().Mul(a.y, a.y) + B.Mod(B, p) + C := pool.Get().Mul(B, B) + C.Mod(C, p) + + t := pool.Get().Add(a.x, B) + t2 := pool.Get().Mul(t, t) + t2.Mod(t2, p) + t.Sub(t2, A) + t2.Sub(t, C) + d := pool.Get().Add(t2, t2) + t.Add(A, A) + e := pool.Get().Add(t, A) + f := pool.Get().Mul(e, e) + f.Mod(f, p) + + t.Add(d, d) + c.x.Sub(f, t) + + t.Add(C, C) + t2.Add(t, t) + t.Add(t2, t2) + c.y.Sub(d, c.x) + t2.Mul(e, c.y) + t2.Mod(t2, p) + c.y.Sub(t2, t) + + t.Mul(a.y, a.z) + t.Mod(t, p) + c.z.Add(t, t) + + pool.Put(A) + pool.Put(B) + pool.Put(C) + pool.Put(t) + pool.Put(t2) + pool.Put(d) + pool.Put(e) + pool.Put(f) +} + +func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoint { + sum := newCurvePoint(pool) + sum.SetInfinity() + t := newCurvePoint(pool) + + for i := scalar.BitLen(); i >= 0; i-- { + t.Double(sum, pool) + if scalar.Bit(i) != 0 { + sum.Add(t, a, pool) + } else { + sum.Set(t) + } + } + + c.Set(sum) + sum.Put(pool) + t.Put(pool) + return c +} + +func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint { + if words := c.z.Bits(); len(words) == 1 && words[0] == 1 { + return c + } + + zInv := pool.Get().ModInverse(c.z, p) + t := pool.Get().Mul(c.y, zInv) + t.Mod(t, p) + zInv2 := pool.Get().Mul(zInv, zInv) + zInv2.Mod(zInv2, p) + c.y.Mul(t, zInv2) + c.y.Mod(c.y, p) + t.Mul(c.x, zInv2) + t.Mod(t, p) + c.x.Set(t) + c.z.SetInt64(1) + c.t.SetInt64(1) + + pool.Put(zInv) + pool.Put(t) + pool.Put(zInv2) + + return c +} + +func (c *curvePoint) Negative(a *curvePoint) { + c.x.Set(a.x) + c.y.Neg(a.y) + c.z.Set(a.z) + c.t.SetInt64(0) +} diff --git a/vendor/golang.org/x/crypto/bn256/example_test.go b/vendor/golang.org/x/crypto/bn256/example_test.go new file mode 100644 index 0000000..b2d1980 --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/example_test.go @@ -0,0 +1,43 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +import ( + "crypto/rand" +) + +func ExamplePair() { + // This implements the tripartite Diffie-Hellman algorithm from "A One + // Round Protocol for Tripartite Diffie-Hellman", A. Joux. + // http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf + + // Each of three parties, a, b and c, generate a private value. + a, _ := rand.Int(rand.Reader, Order) + b, _ := rand.Int(rand.Reader, Order) + c, _ := rand.Int(rand.Reader, Order) + + // Then each party calculates g₁ and g₂ times their private value. + pa := new(G1).ScalarBaseMult(a) + qa := new(G2).ScalarBaseMult(a) + + pb := new(G1).ScalarBaseMult(b) + qb := new(G2).ScalarBaseMult(b) + + pc := new(G1).ScalarBaseMult(c) + qc := new(G2).ScalarBaseMult(c) + + // Now each party exchanges its public values with the other two and + // all parties can calculate the shared key. + k1 := Pair(pb, qc) + k1.ScalarMult(k1, a) + + k2 := Pair(pc, qa) + k2.ScalarMult(k2, b) + + k3 := Pair(pa, qb) + k3.ScalarMult(k3, c) + + // k1, k2 and k3 will all be equal. +} diff --git a/vendor/golang.org/x/crypto/bn256/gfp12.go b/vendor/golang.org/x/crypto/bn256/gfp12.go new file mode 100644 index 0000000..f084edd --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/gfp12.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +// For details of the algorithms used, see "Multiplication and Squaring on +// Pairing-Friendly Fields, Devegili et al. +// http://eprint.iacr.org/2006/471.pdf. + +import ( + "math/big" +) + +// gfP12 implements the field of size p¹² as a quadratic extension of gfP6 +// where ω²=τ. +type gfP12 struct { + x, y *gfP6 // value is xω + y +} + +func newGFp12(pool *bnPool) *gfP12 { + return &gfP12{newGFp6(pool), newGFp6(pool)} +} + +func (e *gfP12) String() string { + return "(" + e.x.String() + "," + e.y.String() + ")" +} + +func (e *gfP12) Put(pool *bnPool) { + e.x.Put(pool) + e.y.Put(pool) +} + +func (e *gfP12) Set(a *gfP12) *gfP12 { + e.x.Set(a.x) + e.y.Set(a.y) + return e +} + +func (e *gfP12) SetZero() *gfP12 { + e.x.SetZero() + e.y.SetZero() + return e +} + +func (e *gfP12) SetOne() *gfP12 { + e.x.SetZero() + e.y.SetOne() + return e +} + +func (e *gfP12) Minimal() { + e.x.Minimal() + e.y.Minimal() +} + +func (e *gfP12) IsZero() bool { + e.Minimal() + return e.x.IsZero() && e.y.IsZero() +} + +func (e *gfP12) IsOne() bool { + e.Minimal() + return e.x.IsZero() && e.y.IsOne() +} + +func (e *gfP12) Conjugate(a *gfP12) *gfP12 { + e.x.Negative(a.x) + e.y.Set(a.y) + return a +} + +func (e *gfP12) Negative(a *gfP12) *gfP12 { + e.x.Negative(a.x) + e.y.Negative(a.y) + return e +} + +// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p +func (e *gfP12) Frobenius(a *gfP12, pool *bnPool) *gfP12 { + e.x.Frobenius(a.x, pool) + e.y.Frobenius(a.y, pool) + e.x.MulScalar(e.x, xiToPMinus1Over6, pool) + return e +} + +// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p² +func (e *gfP12) FrobeniusP2(a *gfP12, pool *bnPool) *gfP12 { + e.x.FrobeniusP2(a.x) + e.x.MulGFP(e.x, xiToPSquaredMinus1Over6) + e.y.FrobeniusP2(a.y) + return e +} + +func (e *gfP12) Add(a, b *gfP12) *gfP12 { + e.x.Add(a.x, b.x) + e.y.Add(a.y, b.y) + return e +} + +func (e *gfP12) Sub(a, b *gfP12) *gfP12 { + e.x.Sub(a.x, b.x) + e.y.Sub(a.y, b.y) + return e +} + +func (e *gfP12) Mul(a, b *gfP12, pool *bnPool) *gfP12 { + tx := newGFp6(pool) + tx.Mul(a.x, b.y, pool) + t := newGFp6(pool) + t.Mul(b.x, a.y, pool) + tx.Add(tx, t) + + ty := newGFp6(pool) + ty.Mul(a.y, b.y, pool) + t.Mul(a.x, b.x, pool) + t.MulTau(t, pool) + e.y.Add(ty, t) + e.x.Set(tx) + + tx.Put(pool) + ty.Put(pool) + t.Put(pool) + return e +} + +func (e *gfP12) MulScalar(a *gfP12, b *gfP6, pool *bnPool) *gfP12 { + e.x.Mul(e.x, b, pool) + e.y.Mul(e.y, b, pool) + return e +} + +func (c *gfP12) Exp(a *gfP12, power *big.Int, pool *bnPool) *gfP12 { + sum := newGFp12(pool) + sum.SetOne() + t := newGFp12(pool) + + for i := power.BitLen() - 1; i >= 0; i-- { + t.Square(sum, pool) + if power.Bit(i) != 0 { + sum.Mul(t, a, pool) + } else { + sum.Set(t) + } + } + + c.Set(sum) + + sum.Put(pool) + t.Put(pool) + + return c +} + +func (e *gfP12) Square(a *gfP12, pool *bnPool) *gfP12 { + // Complex squaring algorithm + v0 := newGFp6(pool) + v0.Mul(a.x, a.y, pool) + + t := newGFp6(pool) + t.MulTau(a.x, pool) + t.Add(a.y, t) + ty := newGFp6(pool) + ty.Add(a.x, a.y) + ty.Mul(ty, t, pool) + ty.Sub(ty, v0) + t.MulTau(v0, pool) + ty.Sub(ty, t) + + e.y.Set(ty) + e.x.Double(v0) + + v0.Put(pool) + t.Put(pool) + ty.Put(pool) + + return e +} + +func (e *gfP12) Invert(a *gfP12, pool *bnPool) *gfP12 { + // See "Implementing cryptographic pairings", M. Scott, section 3.2. + // ftp://136.206.11.249/pub/crypto/pairings.pdf + t1 := newGFp6(pool) + t2 := newGFp6(pool) + + t1.Square(a.x, pool) + t2.Square(a.y, pool) + t1.MulTau(t1, pool) + t1.Sub(t2, t1) + t2.Invert(t1, pool) + + e.x.Negative(a.x) + e.y.Set(a.y) + e.MulScalar(e, t2, pool) + + t1.Put(pool) + t2.Put(pool) + + return e +} diff --git a/vendor/golang.org/x/crypto/bn256/gfp2.go b/vendor/golang.org/x/crypto/bn256/gfp2.go new file mode 100644 index 0000000..97f3f1f --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/gfp2.go @@ -0,0 +1,219 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +// For details of the algorithms used, see "Multiplication and Squaring on +// Pairing-Friendly Fields, Devegili et al. +// http://eprint.iacr.org/2006/471.pdf. + +import ( + "math/big" +) + +// gfP2 implements a field of size p² as a quadratic extension of the base +// field where i²=-1. +type gfP2 struct { + x, y *big.Int // value is xi+y. +} + +func newGFp2(pool *bnPool) *gfP2 { + return &gfP2{pool.Get(), pool.Get()} +} + +func (e *gfP2) String() string { + x := new(big.Int).Mod(e.x, p) + y := new(big.Int).Mod(e.y, p) + return "(" + x.String() + "," + y.String() + ")" +} + +func (e *gfP2) Put(pool *bnPool) { + pool.Put(e.x) + pool.Put(e.y) +} + +func (e *gfP2) Set(a *gfP2) *gfP2 { + e.x.Set(a.x) + e.y.Set(a.y) + return e +} + +func (e *gfP2) SetZero() *gfP2 { + e.x.SetInt64(0) + e.y.SetInt64(0) + return e +} + +func (e *gfP2) SetOne() *gfP2 { + e.x.SetInt64(0) + e.y.SetInt64(1) + return e +} + +func (e *gfP2) Minimal() { + if e.x.Sign() < 0 || e.x.Cmp(p) >= 0 { + e.x.Mod(e.x, p) + } + if e.y.Sign() < 0 || e.y.Cmp(p) >= 0 { + e.y.Mod(e.y, p) + } +} + +func (e *gfP2) IsZero() bool { + return e.x.Sign() == 0 && e.y.Sign() == 0 +} + +func (e *gfP2) IsOne() bool { + if e.x.Sign() != 0 { + return false + } + words := e.y.Bits() + return len(words) == 1 && words[0] == 1 +} + +func (e *gfP2) Conjugate(a *gfP2) *gfP2 { + e.y.Set(a.y) + e.x.Neg(a.x) + return e +} + +func (e *gfP2) Negative(a *gfP2) *gfP2 { + e.x.Neg(a.x) + e.y.Neg(a.y) + return e +} + +func (e *gfP2) Add(a, b *gfP2) *gfP2 { + e.x.Add(a.x, b.x) + e.y.Add(a.y, b.y) + return e +} + +func (e *gfP2) Sub(a, b *gfP2) *gfP2 { + e.x.Sub(a.x, b.x) + e.y.Sub(a.y, b.y) + return e +} + +func (e *gfP2) Double(a *gfP2) *gfP2 { + e.x.Lsh(a.x, 1) + e.y.Lsh(a.y, 1) + return e +} + +func (c *gfP2) Exp(a *gfP2, power *big.Int, pool *bnPool) *gfP2 { + sum := newGFp2(pool) + sum.SetOne() + t := newGFp2(pool) + + for i := power.BitLen() - 1; i >= 0; i-- { + t.Square(sum, pool) + if power.Bit(i) != 0 { + sum.Mul(t, a, pool) + } else { + sum.Set(t) + } + } + + c.Set(sum) + + sum.Put(pool) + t.Put(pool) + + return c +} + +// See "Multiplication and Squaring in Pairing-Friendly Fields", +// http://eprint.iacr.org/2006/471.pdf +func (e *gfP2) Mul(a, b *gfP2, pool *bnPool) *gfP2 { + tx := pool.Get().Mul(a.x, b.y) + t := pool.Get().Mul(b.x, a.y) + tx.Add(tx, t) + tx.Mod(tx, p) + + ty := pool.Get().Mul(a.y, b.y) + t.Mul(a.x, b.x) + ty.Sub(ty, t) + e.y.Mod(ty, p) + e.x.Set(tx) + + pool.Put(tx) + pool.Put(ty) + pool.Put(t) + + return e +} + +func (e *gfP2) MulScalar(a *gfP2, b *big.Int) *gfP2 { + e.x.Mul(a.x, b) + e.y.Mul(a.y, b) + return e +} + +// MulXi sets e=ξa where ξ=i+3 and then returns e. +func (e *gfP2) MulXi(a *gfP2, pool *bnPool) *gfP2 { + // (xi+y)(i+3) = (3x+y)i+(3y-x) + tx := pool.Get().Lsh(a.x, 1) + tx.Add(tx, a.x) + tx.Add(tx, a.y) + + ty := pool.Get().Lsh(a.y, 1) + ty.Add(ty, a.y) + ty.Sub(ty, a.x) + + e.x.Set(tx) + e.y.Set(ty) + + pool.Put(tx) + pool.Put(ty) + + return e +} + +func (e *gfP2) Square(a *gfP2, pool *bnPool) *gfP2 { + // Complex squaring algorithm: + // (xi+b)² = (x+y)(y-x) + 2*i*x*y + t1 := pool.Get().Sub(a.y, a.x) + t2 := pool.Get().Add(a.x, a.y) + ty := pool.Get().Mul(t1, t2) + ty.Mod(ty, p) + + t1.Mul(a.x, a.y) + t1.Lsh(t1, 1) + + e.x.Mod(t1, p) + e.y.Set(ty) + + pool.Put(t1) + pool.Put(t2) + pool.Put(ty) + + return e +} + +func (e *gfP2) Invert(a *gfP2, pool *bnPool) *gfP2 { + // See "Implementing cryptographic pairings", M. Scott, section 3.2. + // ftp://136.206.11.249/pub/crypto/pairings.pdf + t := pool.Get() + t.Mul(a.y, a.y) + t2 := pool.Get() + t2.Mul(a.x, a.x) + t.Add(t, t2) + + inv := pool.Get() + inv.ModInverse(t, p) + + e.x.Neg(a.x) + e.x.Mul(e.x, inv) + e.x.Mod(e.x, p) + + e.y.Mul(a.y, inv) + e.y.Mod(e.y, p) + + pool.Put(t) + pool.Put(t2) + pool.Put(inv) + + return e +} diff --git a/vendor/golang.org/x/crypto/bn256/gfp6.go b/vendor/golang.org/x/crypto/bn256/gfp6.go new file mode 100644 index 0000000..f98ae78 --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/gfp6.go @@ -0,0 +1,296 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +// For details of the algorithms used, see "Multiplication and Squaring on +// Pairing-Friendly Fields, Devegili et al. +// http://eprint.iacr.org/2006/471.pdf. + +import ( + "math/big" +) + +// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ +// and ξ=i+3. +type gfP6 struct { + x, y, z *gfP2 // value is xτ² + yτ + z +} + +func newGFp6(pool *bnPool) *gfP6 { + return &gfP6{newGFp2(pool), newGFp2(pool), newGFp2(pool)} +} + +func (e *gfP6) String() string { + return "(" + e.x.String() + "," + e.y.String() + "," + e.z.String() + ")" +} + +func (e *gfP6) Put(pool *bnPool) { + e.x.Put(pool) + e.y.Put(pool) + e.z.Put(pool) +} + +func (e *gfP6) Set(a *gfP6) *gfP6 { + e.x.Set(a.x) + e.y.Set(a.y) + e.z.Set(a.z) + return e +} + +func (e *gfP6) SetZero() *gfP6 { + e.x.SetZero() + e.y.SetZero() + e.z.SetZero() + return e +} + +func (e *gfP6) SetOne() *gfP6 { + e.x.SetZero() + e.y.SetZero() + e.z.SetOne() + return e +} + +func (e *gfP6) Minimal() { + e.x.Minimal() + e.y.Minimal() + e.z.Minimal() +} + +func (e *gfP6) IsZero() bool { + return e.x.IsZero() && e.y.IsZero() && e.z.IsZero() +} + +func (e *gfP6) IsOne() bool { + return e.x.IsZero() && e.y.IsZero() && e.z.IsOne() +} + +func (e *gfP6) Negative(a *gfP6) *gfP6 { + e.x.Negative(a.x) + e.y.Negative(a.y) + e.z.Negative(a.z) + return e +} + +func (e *gfP6) Frobenius(a *gfP6, pool *bnPool) *gfP6 { + e.x.Conjugate(a.x) + e.y.Conjugate(a.y) + e.z.Conjugate(a.z) + + e.x.Mul(e.x, xiTo2PMinus2Over3, pool) + e.y.Mul(e.y, xiToPMinus1Over3, pool) + return e +} + +// FrobeniusP2 computes (xτ²+yτ+z)^(p²) = xτ^(2p²) + yτ^(p²) + z +func (e *gfP6) FrobeniusP2(a *gfP6) *gfP6 { + // τ^(2p²) = τ²τ^(2p²-2) = τ²ξ^((2p²-2)/3) + e.x.MulScalar(a.x, xiTo2PSquaredMinus2Over3) + // τ^(p²) = ττ^(p²-1) = τξ^((p²-1)/3) + e.y.MulScalar(a.y, xiToPSquaredMinus1Over3) + e.z.Set(a.z) + return e +} + +func (e *gfP6) Add(a, b *gfP6) *gfP6 { + e.x.Add(a.x, b.x) + e.y.Add(a.y, b.y) + e.z.Add(a.z, b.z) + return e +} + +func (e *gfP6) Sub(a, b *gfP6) *gfP6 { + e.x.Sub(a.x, b.x) + e.y.Sub(a.y, b.y) + e.z.Sub(a.z, b.z) + return e +} + +func (e *gfP6) Double(a *gfP6) *gfP6 { + e.x.Double(a.x) + e.y.Double(a.y) + e.z.Double(a.z) + return e +} + +func (e *gfP6) Mul(a, b *gfP6, pool *bnPool) *gfP6 { + // "Multiplication and Squaring on Pairing-Friendly Fields" + // Section 4, Karatsuba method. + // http://eprint.iacr.org/2006/471.pdf + + v0 := newGFp2(pool) + v0.Mul(a.z, b.z, pool) + v1 := newGFp2(pool) + v1.Mul(a.y, b.y, pool) + v2 := newGFp2(pool) + v2.Mul(a.x, b.x, pool) + + t0 := newGFp2(pool) + t0.Add(a.x, a.y) + t1 := newGFp2(pool) + t1.Add(b.x, b.y) + tz := newGFp2(pool) + tz.Mul(t0, t1, pool) + + tz.Sub(tz, v1) + tz.Sub(tz, v2) + tz.MulXi(tz, pool) + tz.Add(tz, v0) + + t0.Add(a.y, a.z) + t1.Add(b.y, b.z) + ty := newGFp2(pool) + ty.Mul(t0, t1, pool) + ty.Sub(ty, v0) + ty.Sub(ty, v1) + t0.MulXi(v2, pool) + ty.Add(ty, t0) + + t0.Add(a.x, a.z) + t1.Add(b.x, b.z) + tx := newGFp2(pool) + tx.Mul(t0, t1, pool) + tx.Sub(tx, v0) + tx.Add(tx, v1) + tx.Sub(tx, v2) + + e.x.Set(tx) + e.y.Set(ty) + e.z.Set(tz) + + t0.Put(pool) + t1.Put(pool) + tx.Put(pool) + ty.Put(pool) + tz.Put(pool) + v0.Put(pool) + v1.Put(pool) + v2.Put(pool) + return e +} + +func (e *gfP6) MulScalar(a *gfP6, b *gfP2, pool *bnPool) *gfP6 { + e.x.Mul(a.x, b, pool) + e.y.Mul(a.y, b, pool) + e.z.Mul(a.z, b, pool) + return e +} + +func (e *gfP6) MulGFP(a *gfP6, b *big.Int) *gfP6 { + e.x.MulScalar(a.x, b) + e.y.MulScalar(a.y, b) + e.z.MulScalar(a.z, b) + return e +} + +// MulTau computes τ·(aτ²+bτ+c) = bτ²+cτ+aξ +func (e *gfP6) MulTau(a *gfP6, pool *bnPool) { + tz := newGFp2(pool) + tz.MulXi(a.x, pool) + ty := newGFp2(pool) + ty.Set(a.y) + e.y.Set(a.z) + e.x.Set(ty) + e.z.Set(tz) + tz.Put(pool) + ty.Put(pool) +} + +func (e *gfP6) Square(a *gfP6, pool *bnPool) *gfP6 { + v0 := newGFp2(pool).Square(a.z, pool) + v1 := newGFp2(pool).Square(a.y, pool) + v2 := newGFp2(pool).Square(a.x, pool) + + c0 := newGFp2(pool).Add(a.x, a.y) + c0.Square(c0, pool) + c0.Sub(c0, v1) + c0.Sub(c0, v2) + c0.MulXi(c0, pool) + c0.Add(c0, v0) + + c1 := newGFp2(pool).Add(a.y, a.z) + c1.Square(c1, pool) + c1.Sub(c1, v0) + c1.Sub(c1, v1) + xiV2 := newGFp2(pool).MulXi(v2, pool) + c1.Add(c1, xiV2) + + c2 := newGFp2(pool).Add(a.x, a.z) + c2.Square(c2, pool) + c2.Sub(c2, v0) + c2.Add(c2, v1) + c2.Sub(c2, v2) + + e.x.Set(c2) + e.y.Set(c1) + e.z.Set(c0) + + v0.Put(pool) + v1.Put(pool) + v2.Put(pool) + c0.Put(pool) + c1.Put(pool) + c2.Put(pool) + xiV2.Put(pool) + + return e +} + +func (e *gfP6) Invert(a *gfP6, pool *bnPool) *gfP6 { + // See "Implementing cryptographic pairings", M. Scott, section 3.2. + // ftp://136.206.11.249/pub/crypto/pairings.pdf + + // Here we can give a short explanation of how it works: let j be a cubic root of + // unity in GF(p²) so that 1+j+j²=0. + // Then (xτ² + yτ + z)(xj²τ² + yjτ + z)(xjτ² + yj²τ + z) + // = (xτ² + yτ + z)(Cτ²+Bτ+A) + // = (x³ξ²+y³ξ+z³-3ξxyz) = F is an element of the base field (the norm). + // + // On the other hand (xj²τ² + yjτ + z)(xjτ² + yj²τ + z) + // = τ²(y²-ξxz) + τ(ξx²-yz) + (z²-ξxy) + // + // So that's why A = (z²-ξxy), B = (ξx²-yz), C = (y²-ξxz) + t1 := newGFp2(pool) + + A := newGFp2(pool) + A.Square(a.z, pool) + t1.Mul(a.x, a.y, pool) + t1.MulXi(t1, pool) + A.Sub(A, t1) + + B := newGFp2(pool) + B.Square(a.x, pool) + B.MulXi(B, pool) + t1.Mul(a.y, a.z, pool) + B.Sub(B, t1) + + C := newGFp2(pool) + C.Square(a.y, pool) + t1.Mul(a.x, a.z, pool) + C.Sub(C, t1) + + F := newGFp2(pool) + F.Mul(C, a.y, pool) + F.MulXi(F, pool) + t1.Mul(A, a.z, pool) + F.Add(F, t1) + t1.Mul(B, a.x, pool) + t1.MulXi(t1, pool) + F.Add(F, t1) + + F.Invert(F, pool) + + e.x.Mul(C, F, pool) + e.y.Mul(B, F, pool) + e.z.Mul(A, F, pool) + + t1.Put(pool) + A.Put(pool) + B.Put(pool) + C.Put(pool) + F.Put(pool) + + return e +} diff --git a/vendor/golang.org/x/crypto/bn256/optate.go b/vendor/golang.org/x/crypto/bn256/optate.go new file mode 100644 index 0000000..7ae0746 --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/optate.go @@ -0,0 +1,395 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +func lineFunctionAdd(r, p *twistPoint, q *curvePoint, r2 *gfP2, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) { + // See the mixed addition algorithm from "Faster Computation of the + // Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf + + B := newGFp2(pool).Mul(p.x, r.t, pool) + + D := newGFp2(pool).Add(p.y, r.z) + D.Square(D, pool) + D.Sub(D, r2) + D.Sub(D, r.t) + D.Mul(D, r.t, pool) + + H := newGFp2(pool).Sub(B, r.x) + I := newGFp2(pool).Square(H, pool) + + E := newGFp2(pool).Add(I, I) + E.Add(E, E) + + J := newGFp2(pool).Mul(H, E, pool) + + L1 := newGFp2(pool).Sub(D, r.y) + L1.Sub(L1, r.y) + + V := newGFp2(pool).Mul(r.x, E, pool) + + rOut = newTwistPoint(pool) + rOut.x.Square(L1, pool) + rOut.x.Sub(rOut.x, J) + rOut.x.Sub(rOut.x, V) + rOut.x.Sub(rOut.x, V) + + rOut.z.Add(r.z, H) + rOut.z.Square(rOut.z, pool) + rOut.z.Sub(rOut.z, r.t) + rOut.z.Sub(rOut.z, I) + + t := newGFp2(pool).Sub(V, rOut.x) + t.Mul(t, L1, pool) + t2 := newGFp2(pool).Mul(r.y, J, pool) + t2.Add(t2, t2) + rOut.y.Sub(t, t2) + + rOut.t.Square(rOut.z, pool) + + t.Add(p.y, rOut.z) + t.Square(t, pool) + t.Sub(t, r2) + t.Sub(t, rOut.t) + + t2.Mul(L1, p.x, pool) + t2.Add(t2, t2) + a = newGFp2(pool) + a.Sub(t2, t) + + c = newGFp2(pool) + c.MulScalar(rOut.z, q.y) + c.Add(c, c) + + b = newGFp2(pool) + b.SetZero() + b.Sub(b, L1) + b.MulScalar(b, q.x) + b.Add(b, b) + + B.Put(pool) + D.Put(pool) + H.Put(pool) + I.Put(pool) + E.Put(pool) + J.Put(pool) + L1.Put(pool) + V.Put(pool) + t.Put(pool) + t2.Put(pool) + + return +} + +func lineFunctionDouble(r *twistPoint, q *curvePoint, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) { + // See the doubling algorithm for a=0 from "Faster Computation of the + // Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf + + A := newGFp2(pool).Square(r.x, pool) + B := newGFp2(pool).Square(r.y, pool) + C := newGFp2(pool).Square(B, pool) + + D := newGFp2(pool).Add(r.x, B) + D.Square(D, pool) + D.Sub(D, A) + D.Sub(D, C) + D.Add(D, D) + + E := newGFp2(pool).Add(A, A) + E.Add(E, A) + + G := newGFp2(pool).Square(E, pool) + + rOut = newTwistPoint(pool) + rOut.x.Sub(G, D) + rOut.x.Sub(rOut.x, D) + + rOut.z.Add(r.y, r.z) + rOut.z.Square(rOut.z, pool) + rOut.z.Sub(rOut.z, B) + rOut.z.Sub(rOut.z, r.t) + + rOut.y.Sub(D, rOut.x) + rOut.y.Mul(rOut.y, E, pool) + t := newGFp2(pool).Add(C, C) + t.Add(t, t) + t.Add(t, t) + rOut.y.Sub(rOut.y, t) + + rOut.t.Square(rOut.z, pool) + + t.Mul(E, r.t, pool) + t.Add(t, t) + b = newGFp2(pool) + b.SetZero() + b.Sub(b, t) + b.MulScalar(b, q.x) + + a = newGFp2(pool) + a.Add(r.x, E) + a.Square(a, pool) + a.Sub(a, A) + a.Sub(a, G) + t.Add(B, B) + t.Add(t, t) + a.Sub(a, t) + + c = newGFp2(pool) + c.Mul(rOut.z, r.t, pool) + c.Add(c, c) + c.MulScalar(c, q.y) + + A.Put(pool) + B.Put(pool) + C.Put(pool) + D.Put(pool) + E.Put(pool) + G.Put(pool) + t.Put(pool) + + return +} + +func mulLine(ret *gfP12, a, b, c *gfP2, pool *bnPool) { + a2 := newGFp6(pool) + a2.x.SetZero() + a2.y.Set(a) + a2.z.Set(b) + a2.Mul(a2, ret.x, pool) + t3 := newGFp6(pool).MulScalar(ret.y, c, pool) + + t := newGFp2(pool) + t.Add(b, c) + t2 := newGFp6(pool) + t2.x.SetZero() + t2.y.Set(a) + t2.z.Set(t) + ret.x.Add(ret.x, ret.y) + + ret.y.Set(t3) + + ret.x.Mul(ret.x, t2, pool) + ret.x.Sub(ret.x, a2) + ret.x.Sub(ret.x, ret.y) + a2.MulTau(a2, pool) + ret.y.Add(ret.y, a2) + + a2.Put(pool) + t3.Put(pool) + t2.Put(pool) + t.Put(pool) +} + +// sixuPlus2NAF is 6u+2 in non-adjacent form. +var sixuPlus2NAF = []int8{0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 1} + +// miller implements the Miller loop for calculating the Optimal Ate pairing. +// See algorithm 1 from http://cryptojedi.org/papers/dclxvi-20100714.pdf +func miller(q *twistPoint, p *curvePoint, pool *bnPool) *gfP12 { + ret := newGFp12(pool) + ret.SetOne() + + aAffine := newTwistPoint(pool) + aAffine.Set(q) + aAffine.MakeAffine(pool) + + bAffine := newCurvePoint(pool) + bAffine.Set(p) + bAffine.MakeAffine(pool) + + minusA := newTwistPoint(pool) + minusA.Negative(aAffine, pool) + + r := newTwistPoint(pool) + r.Set(aAffine) + + r2 := newGFp2(pool) + r2.Square(aAffine.y, pool) + + for i := len(sixuPlus2NAF) - 1; i > 0; i-- { + a, b, c, newR := lineFunctionDouble(r, bAffine, pool) + if i != len(sixuPlus2NAF)-1 { + ret.Square(ret, pool) + } + + mulLine(ret, a, b, c, pool) + a.Put(pool) + b.Put(pool) + c.Put(pool) + r.Put(pool) + r = newR + + switch sixuPlus2NAF[i-1] { + case 1: + a, b, c, newR = lineFunctionAdd(r, aAffine, bAffine, r2, pool) + case -1: + a, b, c, newR = lineFunctionAdd(r, minusA, bAffine, r2, pool) + default: + continue + } + + mulLine(ret, a, b, c, pool) + a.Put(pool) + b.Put(pool) + c.Put(pool) + r.Put(pool) + r = newR + } + + // In order to calculate Q1 we have to convert q from the sextic twist + // to the full GF(p^12) group, apply the Frobenius there, and convert + // back. + // + // The twist isomorphism is (x', y') -> (xω², yω³). If we consider just + // x for a moment, then after applying the Frobenius, we have x̄ω^(2p) + // where x̄ is the conjugate of x. If we are going to apply the inverse + // isomorphism we need a value with a single coefficient of ω² so we + // rewrite this as x̄ω^(2p-2)ω². ξ⁶ = ω and, due to the construction of + // p, 2p-2 is a multiple of six. Therefore we can rewrite as + // x̄ξ^((p-1)/3)ω² and applying the inverse isomorphism eliminates the + // ω². + // + // A similar argument can be made for the y value. + + q1 := newTwistPoint(pool) + q1.x.Conjugate(aAffine.x) + q1.x.Mul(q1.x, xiToPMinus1Over3, pool) + q1.y.Conjugate(aAffine.y) + q1.y.Mul(q1.y, xiToPMinus1Over2, pool) + q1.z.SetOne() + q1.t.SetOne() + + // For Q2 we are applying the p² Frobenius. The two conjugations cancel + // out and we are left only with the factors from the isomorphism. In + // the case of x, we end up with a pure number which is why + // xiToPSquaredMinus1Over3 is ∈ GF(p). With y we get a factor of -1. We + // ignore this to end up with -Q2. + + minusQ2 := newTwistPoint(pool) + minusQ2.x.MulScalar(aAffine.x, xiToPSquaredMinus1Over3) + minusQ2.y.Set(aAffine.y) + minusQ2.z.SetOne() + minusQ2.t.SetOne() + + r2.Square(q1.y, pool) + a, b, c, newR := lineFunctionAdd(r, q1, bAffine, r2, pool) + mulLine(ret, a, b, c, pool) + a.Put(pool) + b.Put(pool) + c.Put(pool) + r.Put(pool) + r = newR + + r2.Square(minusQ2.y, pool) + a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2, pool) + mulLine(ret, a, b, c, pool) + a.Put(pool) + b.Put(pool) + c.Put(pool) + r.Put(pool) + r = newR + + aAffine.Put(pool) + bAffine.Put(pool) + minusA.Put(pool) + r.Put(pool) + r2.Put(pool) + + return ret +} + +// finalExponentiation computes the (p¹²-1)/Order-th power of an element of +// GF(p¹²) to obtain an element of GT (steps 13-15 of algorithm 1 from +// http://cryptojedi.org/papers/dclxvi-20100714.pdf) +func finalExponentiation(in *gfP12, pool *bnPool) *gfP12 { + t1 := newGFp12(pool) + + // This is the p^6-Frobenius + t1.x.Negative(in.x) + t1.y.Set(in.y) + + inv := newGFp12(pool) + inv.Invert(in, pool) + t1.Mul(t1, inv, pool) + + t2 := newGFp12(pool).FrobeniusP2(t1, pool) + t1.Mul(t1, t2, pool) + + fp := newGFp12(pool).Frobenius(t1, pool) + fp2 := newGFp12(pool).FrobeniusP2(t1, pool) + fp3 := newGFp12(pool).Frobenius(fp2, pool) + + fu, fu2, fu3 := newGFp12(pool), newGFp12(pool), newGFp12(pool) + fu.Exp(t1, u, pool) + fu2.Exp(fu, u, pool) + fu3.Exp(fu2, u, pool) + + y3 := newGFp12(pool).Frobenius(fu, pool) + fu2p := newGFp12(pool).Frobenius(fu2, pool) + fu3p := newGFp12(pool).Frobenius(fu3, pool) + y2 := newGFp12(pool).FrobeniusP2(fu2, pool) + + y0 := newGFp12(pool) + y0.Mul(fp, fp2, pool) + y0.Mul(y0, fp3, pool) + + y1, y4, y5 := newGFp12(pool), newGFp12(pool), newGFp12(pool) + y1.Conjugate(t1) + y5.Conjugate(fu2) + y3.Conjugate(y3) + y4.Mul(fu, fu2p, pool) + y4.Conjugate(y4) + + y6 := newGFp12(pool) + y6.Mul(fu3, fu3p, pool) + y6.Conjugate(y6) + + t0 := newGFp12(pool) + t0.Square(y6, pool) + t0.Mul(t0, y4, pool) + t0.Mul(t0, y5, pool) + t1.Mul(y3, y5, pool) + t1.Mul(t1, t0, pool) + t0.Mul(t0, y2, pool) + t1.Square(t1, pool) + t1.Mul(t1, t0, pool) + t1.Square(t1, pool) + t0.Mul(t1, y1, pool) + t1.Mul(t1, y0, pool) + t0.Square(t0, pool) + t0.Mul(t0, t1, pool) + + inv.Put(pool) + t1.Put(pool) + t2.Put(pool) + fp.Put(pool) + fp2.Put(pool) + fp3.Put(pool) + fu.Put(pool) + fu2.Put(pool) + fu3.Put(pool) + fu2p.Put(pool) + fu3p.Put(pool) + y0.Put(pool) + y1.Put(pool) + y2.Put(pool) + y3.Put(pool) + y4.Put(pool) + y5.Put(pool) + y6.Put(pool) + + return t0 +} + +func optimalAte(a *twistPoint, b *curvePoint, pool *bnPool) *gfP12 { + e := miller(a, b, pool) + ret := finalExponentiation(e, pool) + e.Put(pool) + + if a.IsInfinity() || b.IsInfinity() { + ret.SetOne() + } + + return ret +} diff --git a/vendor/golang.org/x/crypto/bn256/twist.go b/vendor/golang.org/x/crypto/bn256/twist.go new file mode 100644 index 0000000..4f8b3fe --- /dev/null +++ b/vendor/golang.org/x/crypto/bn256/twist.go @@ -0,0 +1,249 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bn256 + +import ( + "math/big" +) + +// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are +// kept in Jacobian form and t=z² when valid. The group G₂ is the set of +// n-torsion points of this curve over GF(p²) (where n = Order) +type twistPoint struct { + x, y, z, t *gfP2 +} + +var twistB = &gfP2{ + bigFromBase10("6500054969564660373279643874235990574282535810762300357187714502686418407178"), + bigFromBase10("45500384786952622612957507119651934019977750675336102500314001518804928850249"), +} + +// twistGen is the generator of group G₂. +var twistGen = &twistPoint{ + &gfP2{ + bigFromBase10("21167961636542580255011770066570541300993051739349375019639421053990175267184"), + bigFromBase10("64746500191241794695844075326670126197795977525365406531717464316923369116492"), + }, + &gfP2{ + bigFromBase10("20666913350058776956210519119118544732556678129809273996262322366050359951122"), + bigFromBase10("17778617556404439934652658462602675281523610326338642107814333856843981424549"), + }, + &gfP2{ + bigFromBase10("0"), + bigFromBase10("1"), + }, + &gfP2{ + bigFromBase10("0"), + bigFromBase10("1"), + }, +} + +func newTwistPoint(pool *bnPool) *twistPoint { + return &twistPoint{ + newGFp2(pool), + newGFp2(pool), + newGFp2(pool), + newGFp2(pool), + } +} + +func (c *twistPoint) String() string { + return "(" + c.x.String() + ", " + c.y.String() + ", " + c.z.String() + ")" +} + +func (c *twistPoint) Put(pool *bnPool) { + c.x.Put(pool) + c.y.Put(pool) + c.z.Put(pool) + c.t.Put(pool) +} + +func (c *twistPoint) Set(a *twistPoint) { + c.x.Set(a.x) + c.y.Set(a.y) + c.z.Set(a.z) + c.t.Set(a.t) +} + +// IsOnCurve returns true iff c is on the curve where c must be in affine form. +func (c *twistPoint) IsOnCurve() bool { + pool := new(bnPool) + yy := newGFp2(pool).Square(c.y, pool) + xxx := newGFp2(pool).Square(c.x, pool) + xxx.Mul(xxx, c.x, pool) + yy.Sub(yy, xxx) + yy.Sub(yy, twistB) + yy.Minimal() + return yy.x.Sign() == 0 && yy.y.Sign() == 0 +} + +func (c *twistPoint) SetInfinity() { + c.z.SetZero() +} + +func (c *twistPoint) IsInfinity() bool { + return c.z.IsZero() +} + +func (c *twistPoint) Add(a, b *twistPoint, pool *bnPool) { + // For additional comments, see the same function in curve.go. + + if a.IsInfinity() { + c.Set(b) + return + } + if b.IsInfinity() { + c.Set(a) + return + } + + // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3 + z1z1 := newGFp2(pool).Square(a.z, pool) + z2z2 := newGFp2(pool).Square(b.z, pool) + u1 := newGFp2(pool).Mul(a.x, z2z2, pool) + u2 := newGFp2(pool).Mul(b.x, z1z1, pool) + + t := newGFp2(pool).Mul(b.z, z2z2, pool) + s1 := newGFp2(pool).Mul(a.y, t, pool) + + t.Mul(a.z, z1z1, pool) + s2 := newGFp2(pool).Mul(b.y, t, pool) + + h := newGFp2(pool).Sub(u2, u1) + xEqual := h.IsZero() + + t.Add(h, h) + i := newGFp2(pool).Square(t, pool) + j := newGFp2(pool).Mul(h, i, pool) + + t.Sub(s2, s1) + yEqual := t.IsZero() + if xEqual && yEqual { + c.Double(a, pool) + return + } + r := newGFp2(pool).Add(t, t) + + v := newGFp2(pool).Mul(u1, i, pool) + + t4 := newGFp2(pool).Square(r, pool) + t.Add(v, v) + t6 := newGFp2(pool).Sub(t4, j) + c.x.Sub(t6, t) + + t.Sub(v, c.x) // t7 + t4.Mul(s1, j, pool) // t8 + t6.Add(t4, t4) // t9 + t4.Mul(r, t, pool) // t10 + c.y.Sub(t4, t6) + + t.Add(a.z, b.z) // t11 + t4.Square(t, pool) // t12 + t.Sub(t4, z1z1) // t13 + t4.Sub(t, z2z2) // t14 + c.z.Mul(t4, h, pool) + + z1z1.Put(pool) + z2z2.Put(pool) + u1.Put(pool) + u2.Put(pool) + t.Put(pool) + s1.Put(pool) + s2.Put(pool) + h.Put(pool) + i.Put(pool) + j.Put(pool) + r.Put(pool) + v.Put(pool) + t4.Put(pool) + t6.Put(pool) +} + +func (c *twistPoint) Double(a *twistPoint, pool *bnPool) { + // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3 + A := newGFp2(pool).Square(a.x, pool) + B := newGFp2(pool).Square(a.y, pool) + C := newGFp2(pool).Square(B, pool) + + t := newGFp2(pool).Add(a.x, B) + t2 := newGFp2(pool).Square(t, pool) + t.Sub(t2, A) + t2.Sub(t, C) + d := newGFp2(pool).Add(t2, t2) + t.Add(A, A) + e := newGFp2(pool).Add(t, A) + f := newGFp2(pool).Square(e, pool) + + t.Add(d, d) + c.x.Sub(f, t) + + t.Add(C, C) + t2.Add(t, t) + t.Add(t2, t2) + c.y.Sub(d, c.x) + t2.Mul(e, c.y, pool) + c.y.Sub(t2, t) + + t.Mul(a.y, a.z, pool) + c.z.Add(t, t) + + A.Put(pool) + B.Put(pool) + C.Put(pool) + t.Put(pool) + t2.Put(pool) + d.Put(pool) + e.Put(pool) + f.Put(pool) +} + +func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoint { + sum := newTwistPoint(pool) + sum.SetInfinity() + t := newTwistPoint(pool) + + for i := scalar.BitLen(); i >= 0; i-- { + t.Double(sum, pool) + if scalar.Bit(i) != 0 { + sum.Add(t, a, pool) + } else { + sum.Set(t) + } + } + + c.Set(sum) + sum.Put(pool) + t.Put(pool) + return c +} + +func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint { + if c.z.IsOne() { + return c + } + + zInv := newGFp2(pool).Invert(c.z, pool) + t := newGFp2(pool).Mul(c.y, zInv, pool) + zInv2 := newGFp2(pool).Square(zInv, pool) + c.y.Mul(t, zInv2, pool) + t.Mul(c.x, zInv2, pool) + c.x.Set(t) + c.z.SetOne() + c.t.SetOne() + + zInv.Put(pool) + t.Put(pool) + zInv2.Put(pool) + + return c +} + +func (c *twistPoint) Negative(a *twistPoint, pool *bnPool) { + c.x.Set(a.x) + c.y.SetZero() + c.y.Sub(c.y, a.y) + c.z.Set(a.z) + c.t.SetZero() +} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go new file mode 100644 index 0000000..0b4af37 --- /dev/null +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -0,0 +1,526 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common +// OpenPGP cipher. +package cast5 // import "golang.org/x/crypto/cast5" + +import "errors" + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/vendor/golang.org/x/crypto/cast5/cast5_test.go b/vendor/golang.org/x/crypto/cast5/cast5_test.go new file mode 100644 index 0000000..778b272 --- /dev/null +++ b/vendor/golang.org/x/crypto/cast5/cast5_test.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cast5 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +// This test vector is taken from RFC 2144, App B.1. +// Since the other two test vectors are for reduced-round variants, we can't +// use them. +var basicTests = []struct { + key, plainText, cipherText string +}{ + { + "0123456712345678234567893456789a", + "0123456789abcdef", + "238b4fe5847e44b2", + }, +} + +func TestBasic(t *testing.T) { + for i, test := range basicTests { + key, _ := hex.DecodeString(test.key) + plainText, _ := hex.DecodeString(test.plainText) + expected, _ := hex.DecodeString(test.cipherText) + + c, err := NewCipher(key) + if err != nil { + t.Errorf("#%d: failed to create Cipher: %s", i, err) + continue + } + var cipherText [BlockSize]byte + c.Encrypt(cipherText[:], plainText) + if !bytes.Equal(cipherText[:], expected) { + t.Errorf("#%d: got:%x want:%x", i, cipherText, expected) + } + + var plainTextAgain [BlockSize]byte + c.Decrypt(plainTextAgain[:], cipherText[:]) + if !bytes.Equal(plainTextAgain[:], plainText) { + t.Errorf("#%d: got:%x want:%x", i, plainTextAgain, plainText) + } + } +} + +// TestFull performs the test specified in RFC 2144, App B.2. +// However, due to the length of time taken, it's disabled here and a more +// limited version is included, below. +func TestFull(t *testing.T) { + if testing.Short() { + // This is too slow for normal testing + return + } + + a, b := iterate(1000000) + + const expectedA = "eea9d0a249fd3ba6b3436fb89d6dca92" + const expectedB = "b2c95eb00c31ad7180ac05b8e83d696e" + + if hex.EncodeToString(a) != expectedA { + t.Errorf("a: got:%x want:%s", a, expectedA) + } + if hex.EncodeToString(b) != expectedB { + t.Errorf("b: got:%x want:%s", b, expectedB) + } +} + +func iterate(iterations int) ([]byte, []byte) { + const initValueHex = "0123456712345678234567893456789a" + + initValue, _ := hex.DecodeString(initValueHex) + + var a, b [16]byte + copy(a[:], initValue) + copy(b[:], initValue) + + for i := 0; i < iterations; i++ { + c, _ := NewCipher(b[:]) + c.Encrypt(a[:8], a[:8]) + c.Encrypt(a[8:], a[8:]) + c, _ = NewCipher(a[:]) + c.Encrypt(b[:8], b[:8]) + c.Encrypt(b[8:], b[8:]) + } + + return a[:], b[:] +} + +func TestLimited(t *testing.T) { + a, b := iterate(1000) + + const expectedA = "23f73b14b02a2ad7dfb9f2c35644798d" + const expectedB = "e5bf37eff14c456a40b21ce369370a9f" + + if hex.EncodeToString(a) != expectedA { + t.Errorf("a: got:%x want:%s", a, expectedA) + } + if hex.EncodeToString(b) != expectedB { + t.Errorf("b: got:%x want:%s", b, expectedB) + } +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go new file mode 100644 index 0000000..3f0dcb9 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -0,0 +1,83 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD as specified in RFC 7539. +package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" + +import ( + "crypto/cipher" + "errors" +) + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + // NonceSize is the size of the nonce used with this AEAD, in bytes. + NonceSize = 12 +) + +type chacha20poly1305 struct { + key [32]byte +} + +// New returns a ChaCha20-Poly1305 AEAD that uses the given, 256-bit key. +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(chacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (c *chacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *chacha20poly1305) Overhead() int { + return 16 +} + +func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + return c.seal(dst, nonce, plaintext, additionalData) +} + +var errOpen = errors.New("chacha20poly1305: message authentication failed") + +func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + return c.open(dst, nonce, ciphertext, additionalData) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go new file mode 100644 index 0000000..7cd7ad8 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package chacha20poly1305 + +import "encoding/binary" + +//go:noescape +func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool + +//go:noescape +func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) + +// cpuid is implemented in chacha20poly1305_amd64.s. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in chacha20poly1305_amd64.s. +func xgetbv() (eax, edx uint32) + +var ( + useASM bool + useAVX2 bool +) + +func init() { + detectCPUFeatures() +} + +// detectCPUFeatures is used to detect if cpu instructions +// used by the functions implemented in assembler in +// chacha20poly1305_amd64.s are supported. +func detectCPUFeatures() { + maxID, _, _, _ := cpuid(0, 0) + if maxID < 1 { + return + } + + _, _, ecx1, _ := cpuid(1, 0) + + haveSSSE3 := isSet(9, ecx1) + useASM = haveSSSE3 + + haveOSXSAVE := isSet(27, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if haveOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + haveAVX := isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + haveAVX2 := isSet(5, ebx7) && haveAVX + haveBMI2 := isSet(8, ebx7) + + useAVX2 = haveAVX2 && haveBMI2 +} + +// isSet checks if bit at bitpos is set in value. +func isSet(bitpos uint, value uint32) bool { + return value&(1<+0x00(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 +// <<< 16 with PSHUFB +DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A +// <<< 8 with PSHUFB +DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B +DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B + +DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 +DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 + +DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 +DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 +// Poly1305 key clamp +DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA ·sseIncMask<>+0x00(SB)/8, $0x1 +DATA ·sseIncMask<>+0x08(SB)/8, $0x0 +// To load/store the last < 16 bytes in a buffer +DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff + +GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 +GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 +GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 +// No PALIGNR in Go ASM yet (but VPALIGNR is present). +#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 +#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 +#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 +#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 +#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 +#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 +#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 +#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 +#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 +#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 +#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 +#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 +#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 +#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 +#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 +#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 +#define shiftC0Right shiftC0Left +#define shiftC1Right shiftC1Left +#define shiftC2Right shiftC2Left +#define shiftC3Right shiftC3Left +#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 +#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 +#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 +#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 +// Some macros +#define chachaQR(A, B, C, D, T) \ + PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ + PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B + +#define chachaQR_AVX2(A, B, C, D, T) \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B + +#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 +#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX +#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 +#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t2:t3; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 + +#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 +#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 + +#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage +#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage +// ---------------------------------------------------------------------------- +TEXT polyHashADInternal<>(SB), NOSPLIT, $0 + // adp points to beginning of additional data + // itr2 holds ad length + XORQ acc0, acc0 + XORQ acc1, acc1 + XORQ acc2, acc2 + CMPQ itr2, $13 + JNE hashADLoop + +openFastTLSAD: + // Special treatment for the TLS case of 13 bytes + MOVQ (adp), acc0 + MOVQ 5(adp), acc1 + SHRQ $24, acc1 + MOVQ $1, acc2 + polyMul + RET + +hashADLoop: + // Hash in 16 byte chunks + CMPQ itr2, $16 + JB hashADTail + polyAdd(0(adp)) + LEAQ (1*16)(adp), adp + SUBQ $16, itr2 + polyMul + JMP hashADLoop + +hashADTail: + CMPQ itr2, $0 + JE hashADDone + + // Hash last < 16 byte tail + XORQ t0, t0 + XORQ t1, t1 + XORQ t2, t2 + ADDQ itr2, adp + +hashADTailLoop: + SHLQ $8, t1:t0 + SHLQ $8, t0 + MOVB -1(adp), t2 + XORQ t2, t0 + DECQ adp + DECQ itr2 + JNE hashADTailLoop + +hashADTailFinish: + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Finished AD +hashADDone: + RET + +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Open(dst, key, src, ad []byte) bool +TEXT ·chacha20Poly1305Open(SB), 0, $288-97 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + // Check for AVX2 support + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Open_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE openSSE128 // About 16% faster + + // For long buffers, prepare the poly key first + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + MOVO D0, T1 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + MOVO D0, ctr3Store + MOVQ $10, itr2 + +openSSEPreparePolyKey: + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + DECQ itr2 + JNE openSSEPreparePolyKey + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore; MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSEMainLoop: + CMPQ inl, $256 + JB openSSEMainLoopDone + + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $4, itr1 + MOVQ inp, itr2 + +openSSEInternalLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(itr2)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(itr2), itr2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr1 + JGE openSSEInternalLoop + + polyAdd(0(itr2)) + polyMul + LEAQ (2*8)(itr2), itr2 + + CMPQ itr1, $-6 + JG openSSEInternalLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Load - xor - store + MOVO D3, tmpStore + MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) + MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) + MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) + MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) + MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) + MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) + MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) + MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) + MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) + MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) + MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) + MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) + MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) + MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) + LEAQ 256(inp), inp + LEAQ 256(oup), oup + SUBQ $256, inl + JMP openSSEMainLoop + +openSSEMainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $64 + JBE openSSETail64 + CMPQ inl, $128 + JBE openSSETail128 + CMPQ inl, $192 + JBE openSSETail192 + JMP openSSETail256 + +openSSEFinalize: + // Hash in the PT, AAD lengths + ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally, constant time compare to the tag at the end of the message + XORQ AX, AX + MOVQ $1, DX + XORQ (0*8)(inp), acc0 + XORQ (1*8)(inp), acc1 + ORQ acc1, acc0 + CMOVQEQ DX, AX + + // Return true iff tags are equal + MOVB AX, ret+96(FP) + RET + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 129 bytes +openSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +openSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE openSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore; MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSE128Open: + CMPQ inl, $16 + JB openSSETail16 + SUBQ $16, inl + + // Load for hashing + polyAdd(0(inp)) + + // Load for decryption + MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP openSSE128Open + +openSSETail16: + TESTQ inl, inl + JE openSSEFinalize + + // We can safely load the CT from the end, because it is padded with the MAC + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVOU (inp), T0 + ADDQ inl, inp + PAND -16(t0)(itr2*1), T0 + MOVO T0, 0+tmpStore + MOVQ T0, t0 + MOVQ 8+tmpStore, t1 + PXOR A1, T0 + + // We can only store one byte at a time, since plaintext can be shorter than 16 bytes +openSSETail16Store: + MOVQ T0, t3 + MOVB t3, (oup) + PSRLDQ $1, T0 + INCQ oup + DECQ inl + JNE openSSETail16Store + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + JMP openSSEFinalize + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of ciphertext +openSSETail64: + // Need to decrypt up to 64 bytes - prepare single block + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + XORQ itr2, itr2 + MOVQ inl, itr1 + CMPQ itr1, $16 + JB openSSETail64LoopB + +openSSETail64LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + SUBQ $16, itr1 + +openSSETail64LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + + CMPQ itr1, $16 + JAE openSSETail64LoopA + + CMPQ itr2, $160 + JNE openSSETail64LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + +openSSETail64DecLoop: + CMPQ inl, $16 + JB openSSETail64DecLoopDone + SUBQ $16, inl + MOVOU (inp), T0 + PXOR T0, A0 + MOVOU A0, (oup) + LEAQ 16(inp), inp + LEAQ 16(oup), oup + MOVO B0, A0 + MOVO C0, B0 + MOVO D0, C0 + JMP openSSETail64DecLoop + +openSSETail64DecLoopDone: + MOVO A0, A1 + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openSSETail128: + // Need to decrypt up to 128 bytes - prepare two blocks + MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSETail128LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + CMPQ itr2, itr1 + JB openSSETail128LoopA + + CMPQ itr2, $160 + JNE openSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr1Store, D0; PADDL ctr0Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + + SUBQ $64, inl + LEAQ 64(inp), inp + LEAQ 64(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of ciphertext +openSSETail192: + // Need to decrypt up to 192 bytes - prepare three blocks + MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store + MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store + + MOVQ inl, itr1 + MOVQ $160, itr2 + CMPQ itr1, $160 + CMOVQGT itr2, itr1 + ANDQ $-16, itr1 + XORQ itr2, itr2 + +openSSLTail192LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSLTail192LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + CMPQ itr2, itr1 + JB openSSLTail192LoopA + + CMPQ itr2, $160 + JNE openSSLTail192LoopB + + CMPQ inl, $176 + JB openSSLTail192Store + + polyAdd(160(inp)) + polyMul + + CMPQ inl, $192 + JB openSSLTail192Store + + polyAdd(176(inp)) + polyMul + +openSSLTail192Store: + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 + MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) + + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + SUBQ $128, inl + LEAQ 128(inp), inp + LEAQ 128(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openSSETail256: + // Need to decrypt up to 256 bytes - prepare four blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + XORQ itr2, itr2 + +openSSETail256Loop: + // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication + polyAdd(0(inp)(itr2*1)) + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulStage3 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + ADDQ $2*8, itr2 + CMPQ itr2, $160 + JB openSSETail256Loop + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail256HashLoop: + polyAdd(0(inp)(itr2*1)) + polyMul + ADDQ $2*8, itr2 + CMPQ itr2, itr1 + JB openSSETail256HashLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + LEAQ 192(inp), inp + LEAQ 192(oup), oup + SUBQ $192, inl + MOVO A3, A0 + MOVO B3, B0 + MOVO C3, C0 + MOVO tmpStore, D0 + + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Open_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimization, for very short buffers + CMPQ inl, $192 + JBE openAVX2192 + CMPQ inl, $320 + JBE openAVX2320 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, state2StoreAVX2 + VMOVDQA DD0, ctr3StoreAVX2 + MOVQ $10, itr2 + +openAVX2PreparePolyKey: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + DECQ itr2 + JNE openAVX2PreparePolyKey + + VPADDD ·chacha20Constants<>(SB), AA0, AA0 + VPADDD state1StoreAVX2, BB0, BB0 + VPADDD state2StoreAVX2, CC0, CC0 + VPADDD ctr3StoreAVX2, DD0, DD0 + + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for the first 64 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + + // Hash AD + first 64 bytes + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +openAVX2InitialHash64: + polyAdd(0(inp)(itr1*1)) + polyMulAVX2 + ADDQ $16, itr1 + CMPQ itr1, $64 + JNE openAVX2InitialHash64 + + // Decrypt the first 64 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), BB0, BB0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU BB0, (1*32)(oup) + LEAQ (2*32)(inp), inp + LEAQ (2*32)(oup), oup + SUBQ $64, inl + +openAVX2MainLoop: + CMPQ inl, $512 + JB openAVX2MainLoopDone + + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + +openAVX2InternalLoop: + // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications + // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext + polyAdd(0*8(inp)(itr1*1)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(inp)(itr1*1)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(inp)(itr1*1)) + LEAQ (6*8)(itr1), itr1 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + CMPQ itr1, $480 + JNE openAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(480(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(496(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + LEAQ (32*16)(oup), oup + SUBQ $(32*16), inl + JMP openAVX2MainLoop + +openAVX2MainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $128 + JBE openAVX2Tail128 + CMPQ inl, $256 + JBE openAVX2Tail256 + CMPQ inl, $384 + JBE openAVX2Tail384 + JMP openAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +openAVX2192: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +openAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE openAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +openAVX2ShortOpen: + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openAVX2ShortOpenLoop: + CMPQ inl, $32 + JB openAVX2ShortTail32 + SUBQ $32, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + polyAdd(2*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP openAVX2ShortOpenLoop + +openAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2ShortDone + + SUBQ $16, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2ShortDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +openAVX2320: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +openAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE openAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP openAVX2ShortOpen + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD1 + VMOVDQA DD1, DD0 + + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + TESTQ itr1, itr1 + JE openAVX2Tail128LoopB + +openAVX2Tail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMulAVX2 + +openAVX2Tail128LoopB: + ADDQ $16, itr2 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail128LoopA + CMPQ itr2, $160 + JNE openAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC1, CC1 + VPADDD DD0, DD1, DD1 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + +openAVX2TailLoop: + CMPQ inl, $32 + JB openAVX2Tail + SUBQ $32, inl + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + JMP openAVX2TailLoop + +openAVX2Tail: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2TailDone + SUBQ $16, inl + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2TailDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare four blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + + // Compute the number of iterations that will hash data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $128, itr1 + SHRQ $4, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + +openAVX2Tail256LoopA: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail256LoopA + + CMPQ itr2, $10 + JNE openAVX2Tail256LoopB + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + + // Hash the remainder of data (if any) +openAVX2Tail256Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail256HashEnd + polyAdd (0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail256Hash + +// Store 128 bytes safely, then go to store loop +openAVX2Tail256HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + + VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 + VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) + LEAQ (4*32)(inp), inp + LEAQ (4*32)(oup), oup + SUBQ $4*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +openAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare six blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, ctr0StoreAVX2 + VMOVDQA DD1, ctr1StoreAVX2 + VMOVDQA DD2, ctr2StoreAVX2 + + // Compute the number of iterations that will hash two blocks of data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $256, itr1 + SHRQ $4, itr1 + ADDQ $6, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail384LoopB: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + +openAVX2Tail384LoopA: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + + CMPQ itr2, itr1 + JB openAVX2Tail384LoopB + + CMPQ itr2, $10 + JNE openAVX2Tail384LoopA + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + +openAVX2Tail384Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail384HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail384Hash + +// Store 256 bytes safely, then go to store loop +openAVX2Tail384HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + LEAQ (8*32)(inp), inp + LEAQ (8*32)(oup), oup + SUBQ $8*32, inl + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +openAVX2Tail512: + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + MOVQ inp, itr2 + +openAVX2Tail512LoopB: + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ (2*8)(itr2), itr2 + +openAVX2Tail512LoopA: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(itr2)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(itr2)) + polyMulAVX2 + LEAQ (4*8)(itr2), itr2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + INCQ itr1 + CMPQ itr1, $4 + JLT openAVX2Tail512LoopB + + CMPQ itr1, $10 + JNE openAVX2Tail512LoopA + + MOVQ inl, itr1 + SUBQ $384, itr1 + ANDQ $-16, itr1 + +openAVX2Tail512HashLoop: + TESTQ itr1, itr1 + JE openAVX2Tail512HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + SUBQ $16, itr1 + JMP openAVX2Tail512HashLoop + +openAVX2Tail512HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + LEAQ (12*32)(inp), inp + LEAQ (12*32)(oup), oup + SUBQ $12*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Seal(dst, key, src, ad []byte) +TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Seal_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE sealSSE128 // About 15% faster + + // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + + // Load state, increment counter blocks + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVQ $10, itr2 + +sealSSEIntroLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JNE sealSSEIntroLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore + MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) + + MOVQ $128, itr1 + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 + + CMPQ inl, $64 + JBE sealSSE128SealHash + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) + + ADDQ $64, itr1 + SUBQ $64, inl + LEAQ 64(inp), inp + + MOVQ $2, itr1 + MOVQ $8, itr2 + + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + CMPQ inl, $192 + JBE sealSSETail192 + +sealSSEMainLoop: + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + +sealSSEInnerLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(oup)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(oup), oup + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JGE sealSSEInnerLoop + polyAdd(0(oup)) + polyMul + LEAQ (2*8)(oup), oup + DECQ itr1 + JG sealSSEInnerLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVO tmpStore, D3 + + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + ADDQ $192, inp + MOVQ $192, itr1 + SUBQ $192, inl + MOVO A3, A1 + MOVO B3, B1 + MOVO C3, C1 + MOVO D3, D1 + CMPQ inl, $64 + JBE sealSSE128SealHash + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) + LEAQ 64(inp), inp + SUBQ $64, inl + MOVQ $6, itr1 + MOVQ $4, itr2 + CMPQ inl, $192 + JG sealSSEMainLoop + + MOVQ inl, itr1 + TESTQ inl, inl + JE sealSSE128SealHash + MOVQ $6, itr1 + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + JMP sealSSETail192 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of plaintext +sealSSETail64: + // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A1 + MOVO state1Store, B1 + MOVO state2Store, C1 + MOVO ctr3Store, D1 + PADDL ·sseIncMask<>(SB), D1 + MOVO D1, ctr0Store + +sealSSETail64LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail64LoopB: + chachaQR(A1, B1, C1, D1, T1) + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A1, B1, C1, D1, T1) + shiftB1Right; shiftC1Right; shiftD1Right + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + DECQ itr1 + JG sealSSETail64LoopA + + DECQ itr2 + JGE sealSSETail64LoopB + PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B1 + PADDL state2Store, C1 + PADDL ctr0Store, D1 + + JMP sealSSE128Seal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of plaintext +sealSSETail128: + // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + +sealSSETail128LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail128LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + DECQ itr1 + JG sealSSETail128LoopA + + DECQ itr2 + JGE sealSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr0Store, D0; PADDL ctr1Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + + MOVQ $64, itr1 + LEAQ 64(inp), inp + SUBQ $64, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of plaintext +sealSSETail192: + // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + +sealSSETail192LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail192LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + DECQ itr1 + JG sealSSETail192LoopA + + DECQ itr2 + JGE sealSSETail192LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + MOVO A2, A1 + MOVO B2, B1 + MOVO C2, C1 + MOVO D2, D1 + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special seal optimization for buffers smaller than 129 bytes +sealSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +sealSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE sealSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore + MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealSSE128SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealSSE128Seal + polyAdd(0(oup)) + polyMul + + SUBQ $16, itr1 + ADDQ $16, oup + + JMP sealSSE128SealHash + +sealSSE128Seal: + CMPQ inl, $16 + JB sealSSETail + SUBQ $16, inl + + // Load for decryption + MOVOU (inp), T0 + PXOR T0, A1 + MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + + // Extract for hashing + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP sealSSE128Seal + +sealSSETail: + TESTQ inl, inl + JE sealSSEFinalize + + // We can only load the PT one byte at a time to avoid read after end of buffer + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVQ inl, itr1 + LEAQ -1(inp)(inl*1), inp + XORQ t2, t2 + XORQ t3, t3 + XORQ AX, AX + +sealSSETailLoadLoop: + SHLQ $8, t2, t3 + SHLQ $8, t2 + MOVB (inp), AX + XORQ AX, t2 + LEAQ -1(inp), inp + DECQ itr1 + JNE sealSSETailLoadLoop + MOVQ t2, 0+tmpStore + MOVQ t3, 8+tmpStore + PXOR 0+tmpStore, A1 + MOVOU A1, (oup) + MOVOU -16(t0)(itr2*1), T0 + PAND T0, A1 + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + ADDQ inl, oup + +sealSSEFinalize: + // Hash in the buffer lengths + ADDQ ad_len+80(FP), acc0 + ADCQ src_len+56(FP), acc1 + ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally store the tag at the end of the message + MOVQ acc0, (0*8)(oup) + MOVQ acc1, (1*8)(oup) + RET + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Seal_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimizations, for very short buffers + CMPQ inl, $192 + JBE seal192AVX2 // 33% faster + CMPQ inl, $320 + JBE seal320AVX2 // 17% faster + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr2 + +sealAVX2IntroLoop: + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr2 + JNE sealAVX2IntroLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + + VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 + VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key + VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), DD0, DD0 + VMOVDQA DD0, rsStoreAVX2 + + // Hash AD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + // Can store at least 320 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), CC0, CC0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU CC0, (1*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 + VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 + VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) + + MOVQ $320, itr1 + SUBQ $320, inl + LEAQ 320(inp), inp + + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 + CMPQ inl, $128 + JBE sealAVX2SealHash + + VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 + VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVQ $8, itr1 + MOVQ $2, itr2 + + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + CMPQ inl, $512 + JBE sealAVX2Tail512 + + // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + + SUBQ $16, oup // Adjust the pointer + MOVQ $9, itr1 + JMP sealAVX2InternalLoopStart + +sealAVX2MainLoop: + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr1 + +sealAVX2InternalLoop: + polyAdd(0*8(oup)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + +sealAVX2InternalLoopStart: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(oup)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(oup)) + LEAQ (6*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr1 + JNE sealAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(-2*8(oup)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + SUBQ $(32*16), inl + CMPQ inl, $512 + JG sealAVX2MainLoop + + // Tail can only hash 480 bytes + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ 32(oup), oup + + MOVQ $10, itr1 + MOVQ $0, itr2 + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +seal192AVX2: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +sealAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE sealAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +sealAVX2ShortSeal: + // Hash aad + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealAVX2SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealAVX2ShortSealLoop + polyAdd(0(oup)) + polyMul + SUBQ $16, itr1 + ADDQ $16, oup + JMP sealAVX2SealHash + +sealAVX2ShortSealLoop: + CMPQ inl, $32 + JB sealAVX2ShortTail32 + SUBQ $32, inl + + // Load for encryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + + // Now can hash + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP sealAVX2ShortSealLoop + +sealAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB sealAVX2ShortDone + + SUBQ $16, inl + + // Load for encryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + + // Hash + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +sealAVX2ShortDone: + VZEROUPPER + JMP sealSSETail + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +seal320AVX2: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +sealAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE sealAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP sealAVX2ShortSeal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +sealAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0 + VMOVDQA state1StoreAVX2, BB0 + VMOVDQA state2StoreAVX2, CC0 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VMOVDQA DD0, DD1 + +sealAVX2Tail128LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail128LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $4, DD0, DD0, DD0 + DECQ itr1 + JG sealAVX2Tail128LoopA + DECQ itr2 + JGE sealAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA1 + VPADDD state1StoreAVX2, BB0, BB1 + VPADDD state2StoreAVX2, CC0, CC1 + VPADDD DD1, DD0, DD1 + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + JMP sealAVX2ShortSealLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +sealAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + +sealAVX2Tail256LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr1 + JG sealAVX2Tail256LoopA + DECQ itr2 + JGE sealAVX2Tail256LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +sealAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + +sealAVX2Tail384LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail384LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr1 + JG sealAVX2Tail384LoopA + DECQ itr2 + JGE sealAVX2Tail384LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0 + VPERM2I128 $0x02, CC1, DD1, TT1 + VPERM2I128 $0x13, AA1, BB1, TT2 + VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + MOVQ $256, itr1 + LEAQ 256(inp), inp + SUBQ $256, inl + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +sealAVX2Tail512: + // Need to decrypt up to 512 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + +sealAVX2Tail512LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail512LoopB: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(oup)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + + DECQ itr1 + JG sealAVX2Tail512LoopA + DECQ itr2 + JGE sealAVX2Tail512LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3 + VPXOR (0*32)(inp), CC3, CC3 + VMOVDQU CC3, (0*32)(oup) + VPERM2I128 $0x02, CC0, DD0, CC3 + VPXOR (1*32)(inp), CC3, CC3 + VMOVDQU CC3, (1*32)(oup) + VPERM2I128 $0x13, AA0, BB0, CC3 + VPXOR (2*32)(inp), CC3, CC3 + VMOVDQU CC3, (2*32)(oup) + VPERM2I128 $0x13, CC0, DD0, CC3 + VPXOR (3*32)(inp), CC3, CC3 + VMOVDQU CC3, (3*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + + MOVQ $384, itr1 + LEAQ 384(inp), inp + SUBQ $384, inl + VPERM2I128 $0x02, AA3, BB3, AA0 + VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 + VPERM2I128 $0x13, AA3, BB3, CC0 + VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + JMP sealAVX2SealHash + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB),NOSPLIT,$0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go new file mode 100644 index 0000000..4ac014f --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -0,0 +1,70 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/chacha20" + "golang.org/x/crypto/poly1305" +) + +func roundTo16(n int) int { + return 16 * ((n + 15) / 16) +} + +func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { + var counter [16]byte + copy(counter[4:], nonce) + + var polyKey [32]byte + chacha20.XORKeyStream(polyKey[:], polyKey[:], &counter, &c.key) + + ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) + counter[0] = 1 + chacha20.XORKeyStream(out, plaintext, &counter, &c.key) + + polyInput := make([]byte, roundTo16(len(additionalData))+roundTo16(len(plaintext))+8+8) + copy(polyInput, additionalData) + copy(polyInput[roundTo16(len(additionalData)):], out[:len(plaintext)]) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-16:], uint64(len(additionalData))) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-8:], uint64(len(plaintext))) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, polyInput, &polyKey) + copy(out[len(plaintext):], tag[:]) + + return ret +} + +func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + var tag [poly1305.TagSize]byte + copy(tag[:], ciphertext[len(ciphertext)-16:]) + ciphertext = ciphertext[:len(ciphertext)-16] + + var counter [16]byte + copy(counter[4:], nonce) + + var polyKey [32]byte + chacha20.XORKeyStream(polyKey[:], polyKey[:], &counter, &c.key) + + polyInput := make([]byte, roundTo16(len(additionalData))+roundTo16(len(ciphertext))+8+8) + copy(polyInput, additionalData) + copy(polyInput[roundTo16(len(additionalData)):], ciphertext) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-16:], uint64(len(additionalData))) + binary.LittleEndian.PutUint64(polyInput[len(polyInput)-8:], uint64(len(ciphertext))) + + ret, out := sliceForAppend(dst, len(ciphertext)) + if !poly1305.Verify(&tag, polyInput, &polyKey) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + counter[0] = 1 + chacha20.XORKeyStream(out, ciphertext, &counter, &c.key) + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go new file mode 100644 index 0000000..4c2eb70 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 !go1.7 gccgo appengine + +package chacha20poly1305 + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + return c.sealGeneric(dst, nonce, plaintext, additionalData) +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + return c.openGeneric(dst, nonce, ciphertext, additionalData) +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_test.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_test.go new file mode 100644 index 0000000..78f981a --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_test.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "bytes" + cr "crypto/rand" + "encoding/hex" + mr "math/rand" + "testing" +) + +func TestVectors(t *testing.T) { + for i, test := range chacha20Poly1305Tests { + key, _ := hex.DecodeString(test.key) + nonce, _ := hex.DecodeString(test.nonce) + ad, _ := hex.DecodeString(test.aad) + plaintext, _ := hex.DecodeString(test.plaintext) + + aead, err := New(key) + if err != nil { + t.Fatal(err) + } + + ct := aead.Seal(nil, nonce, plaintext, ad) + if ctHex := hex.EncodeToString(ct); ctHex != test.out { + t.Errorf("#%d: got %s, want %s", i, ctHex, test.out) + continue + } + + plaintext2, err := aead.Open(nil, nonce, ct, ad) + if err != nil { + t.Errorf("#%d: Open failed", i) + continue + } + + if !bytes.Equal(plaintext, plaintext2) { + t.Errorf("#%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext) + continue + } + + if len(ad) > 0 { + alterAdIdx := mr.Intn(len(ad)) + ad[alterAdIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce, ct, ad); err == nil { + t.Errorf("#%d: Open was successful after altering additional data", i) + } + ad[alterAdIdx] ^= 0x80 + } + + alterNonceIdx := mr.Intn(aead.NonceSize()) + nonce[alterNonceIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce, ct, ad); err == nil { + t.Errorf("#%d: Open was successful after altering nonce", i) + } + nonce[alterNonceIdx] ^= 0x80 + + alterCtIdx := mr.Intn(len(ct)) + ct[alterCtIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce, ct, ad); err == nil { + t.Errorf("#%d: Open was successful after altering ciphertext", i) + } + ct[alterCtIdx] ^= 0x80 + } +} + +func TestRandom(t *testing.T) { + // Some random tests to verify Open(Seal) == Plaintext + for i := 0; i < 256; i++ { + var nonce [12]byte + var key [32]byte + + al := mr.Intn(128) + pl := mr.Intn(16384) + ad := make([]byte, al) + plaintext := make([]byte, pl) + cr.Read(key[:]) + cr.Read(nonce[:]) + cr.Read(ad) + cr.Read(plaintext) + + aead, err := New(key[:]) + if err != nil { + t.Fatal(err) + } + + ct := aead.Seal(nil, nonce[:], plaintext, ad) + + plaintext2, err := aead.Open(nil, nonce[:], ct, ad) + if err != nil { + t.Errorf("Random #%d: Open failed", i) + continue + } + + if !bytes.Equal(plaintext, plaintext2) { + t.Errorf("Random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext) + continue + } + + if len(ad) > 0 { + alterAdIdx := mr.Intn(len(ad)) + ad[alterAdIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("Random #%d: Open was successful after altering additional data", i) + } + ad[alterAdIdx] ^= 0x80 + } + + alterNonceIdx := mr.Intn(aead.NonceSize()) + nonce[alterNonceIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("Random #%d: Open was successful after altering nonce", i) + } + nonce[alterNonceIdx] ^= 0x80 + + alterCtIdx := mr.Intn(len(ct)) + ct[alterCtIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("Random #%d: Open was successful after altering ciphertext", i) + } + ct[alterCtIdx] ^= 0x80 + } +} + +func benchamarkChaCha20Poly1305Seal(b *testing.B, buf []byte) { + b.SetBytes(int64(len(buf))) + + var key [32]byte + var nonce [12]byte + var ad [13]byte + var out []byte + + aead, _ := New(key[:]) + b.ResetTimer() + for i := 0; i < b.N; i++ { + out = aead.Seal(out[:0], nonce[:], buf[:], ad[:]) + } +} + +func benchamarkChaCha20Poly1305Open(b *testing.B, buf []byte) { + b.SetBytes(int64(len(buf))) + + var key [32]byte + var nonce [12]byte + var ad [13]byte + var ct []byte + var out []byte + + aead, _ := New(key[:]) + ct = aead.Seal(ct[:0], nonce[:], buf[:], ad[:]) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + out, _ = aead.Open(out[:0], nonce[:], ct[:], ad[:]) + } +} + +func BenchmarkChacha20Poly1305Open_64(b *testing.B) { + benchamarkChaCha20Poly1305Open(b, make([]byte, 64)) +} + +func BenchmarkChacha20Poly1305Seal_64(b *testing.B) { + benchamarkChaCha20Poly1305Seal(b, make([]byte, 64)) +} + +func BenchmarkChacha20Poly1305Open_1350(b *testing.B) { + benchamarkChaCha20Poly1305Open(b, make([]byte, 1350)) +} + +func BenchmarkChacha20Poly1305Seal_1350(b *testing.B) { + benchamarkChaCha20Poly1305Seal(b, make([]byte, 1350)) +} + +func BenchmarkChacha20Poly1305Open_8K(b *testing.B) { + benchamarkChaCha20Poly1305Open(b, make([]byte, 8*1024)) +} + +func BenchmarkChacha20Poly1305Seal_8K(b *testing.B) { + benchamarkChaCha20Poly1305Seal(b, make([]byte, 8*1024)) +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go new file mode 100644 index 0000000..49f0da6 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go @@ -0,0 +1,332 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +var chacha20Poly1305Tests = []struct { + plaintext, aad, key, nonce, out string +}{ + { + "4c616469657320616e642047656e746c656d656e206f662074686520636c617373206f66202739393a204966204920636f756c64206f6666657220796f75206f6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73637265656e20776f756c642062652069742e", + "50515253c0c1c2c3c4c5c6c7", + "808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f", + "070000004041424344454647", + "d31a8d34648e60db7b86afbc53ef7ec2a4aded51296e08fea9e2b5a736ee62d63dbea45e8ca9671282fafb69da92728b1a71de0a9e060b2905d6a5b67ecd3b3692ddbd7f2d778b8c9803aee328091b58fab324e4fad675945585808b4831d7bc3ff4def08e4b7a9de576d26586cec64b61161ae10b594f09e26a7e902ecbd0600691", + }, + { + "1400000cebccee3bf561b292340fec60", + "00000000000000001603030010", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "2b487a2941bc07f3cc76d1a531662588ee7c2598e59778c24d5b27559a80d163", + }, + { + "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "3f487a25aa70e9c8391763370569c9e83b7650dd1921c8b78869f241f25d2096c910b180930c5b8747fd90959fe8ca2dcadb4fa50fa1439f916b2301e1cc0810d6725775d3ab86721700f96e22709b0a7a8bef32627dd929b2dd3ba15772b669062bb558bc92e6c241a1d60d9f0035e80c335f854815fe1138ab8af653eab3e122135feeec7dfaba1cc24af82a2b7acccdd824899a7e03cc29c25be8a4f56a66673845b93bae1556f09dafc89a0d22af207718e2a6bb022e9d917597295992ea3b750cc0e7a7c3d33b23c5a8aeab45f5bb542f6c9e6c1747ae5a344aff483ba38577ad534b33b3abc7d284776ea33ed488c2a2475648a4fcda561745ea7787ed60f2368deb27c75adce6ff9b6cc6de1f5e72a741e2d59f64751b3ae482d714e0c90e83c671ff98ed611823afb39e6e5019a6ba548a2a72e829c7b7b4a101ac9deb90a25d3e0c50d22e1fc26c7c02296fa13c6d9c14767f68aaf46450a8d0fd5feb60d9d73c6e68623425b4984a79d619dd6bf896459aa77a681ec9c1a97f645e121f47779b051f8948a817f84d1f55da170d5bbbaf2f64e18b97ed3fd822db2819f523314f1e5ac72e8f69bbe6c87c22daddb0e1ac6790f8534071de2f258064b99789bfb165b065b8fe96f9127cd7dca9f7cb0368420f1e802faa3ca23792f2a5b93773dd405e71c320b211b54f7a26626b03c060e1ab87f32ac588abfa056ce090bd7c69913a700c80f325bfe824fa", + }, + { + "0967de57eefe1aaa999b9b746d88a1a248000d8734e0e938c6aa87", + "e4f0a3a4f90a8250f8806aa319053e8d73c62f150e2f239563037e9cc92823ad18c65111d0d462c954cc6c6ed2aafb45702a5a7e597d13bd8091594ab97cf7d1", + "f2db28620582e05f00f31c808475ca3df1c20e340bf14828352499466d79295f", + "4349e2131d44dc711148dfe3", + "bd06cc144fdc0d8b735fa4452eabbf78fd4ad2966ea41a84f68da40ca2da439777bc2ba6c4ec2de0d003eb", + }, + { + "c4c920fb52a56fe66eaa8aa3fa187c543e3db8e5c8094c4313dc4ed35dfc5821c5791d171e8cfe8d37883031a0ad", + "85deea3dc4", + "05ff881d1e151bab4ca3db7d44880222733fe62686f71ce1e4610f2ea19599a7", + "b34710f65aed442e4a40866b", + "b154452fb7e85d175dd0b0db08591565c5587a725cf22386922f5d27a01015aba778975510b38754b2182e24352f019b7ad493e1ed255906715644aec6e0", + }, + { + "c4b337df5e83823900c6c202e93541cf5bc8c677a9aad8b8d87a4d7221e294e595cbc4f34e462d4e0def50f62491c57f598cf60236cfba0f4908816aea154f80e013732e59a07c668fcc5cb35d2232b7ae29b9e4f874f3417c74ab6689fae6690d5a9766fa13cd8adf293d3d4b70f4f999adde9121d1d29d467d04cf77ea398444d0ea3fe4b7c9c3e106002c76f4260fa204a0c3d5", + "72611bef65eb664f24ea94f4d5d3d88c9c9c6da29c9a1991c02833c4c9f6993b57b5", + "dd0f2d4bb1c9e5ca5aa5f38d69bc8402f7dbb7229857b4a41b3044d481b7655e", + "2bbca0910cc47ca0b8517391", + "83aa28d6d98901e2981d21d3758ae4db8cce07fe08d82ca6f036a68daa88a7dda56eeb38040c942bdda0fd2d369eec44bd070e2c9314992f68dc16989a6ac0c3912c378cf3254f4bae74a66b075e828df6f855c0d8a827ffed3c03582c12a9112eeb7be43dfe8bd78beb2d1e56678b99a0372531727cb7f2b98d2f917ec10de93fe86267100c20356e80528c5066688c8b7acba76e591449952343f663993d5b642e59eb0f", + }, + { + "a9775b8e42b63335439cf1c79fe8a3560b3baebfdfc9ef239d70da02cea0947817f00659a63a8ee9d67fb1756854cc738f7a326e432191e1916be35f0b78d72268de7c0e180af7ee8aa864f2fc30658baa97f9edb88ace49f5b2a8002a8023925e9fa076a997643340c8253cf88ac8a221c190d94c5e224110cb423a4b65cca9046c1fad0483e1444c0680449148e7b20a778c56d5ae97e679d920c43eed6d42598cf05d10d1a15cd722a0686a871b74fea7cad45562bacf3bda937ac701bc218dac7e9d7d20f955429abdac21d821207febf4d54daea4898837035038bf71c66cef63e90f5d3e51f7fcfe18d41f38540a2c2958dacde16304e4b33da324030f1366f923c337", + "74ba3372d308910b5c9c3885f41252d57556", + "9cf77bd06a4ed8fb59349791b98ba40b6019611942f5768e8be2ee88477149e3", + "b928935c4c966c60fd6583c0", + "ec7fd64fd75b254961a2b7fc942470d8620f439258b871d0d00f58028b5e0bee5e139e8108ac439391465d6658f559b1df57aa21cf826ede1a28bc11af885e13eebfc009870928fae8abfdd943a60c54fca93f0502dc23d29c2fd5340f9bc0e6ef2a18b66ef627af95f796d5bbca50de22c8ec802da9397089b25c6ba5262468e3977b45dc112e51896c70731b0a52d7efec7c93b41995823436bf4b0c477ae79684407c9831b487928b2b8303caca752b3edf1f0598e15831155462706f94ef3fa3a9e5f937f37085afa9b4bbf939d275796a61b78f70597acfd25cd87f967021cd99328fc371b5eb5739869520657b30e4a5b0db7c8715cbe275dee78e719b357d3a9731f9eaba95986479bb2004a77822fc115a3d", + }, + { + "b3d3128bce6bbf66fd78f1a18352bae56bfcdae18b65c379ee0aeb37ee54fba1270d2df578ec5b75654d16e89fd1cd0acda7ec580dafd2fbbabd32a8112d49383a762db2638928c8d63eb0750f7e7fdd256b35321b072dd5c45f7dd58cc60dc63d3b79a0c4a1689adf180fef968eccbcfa01ee15091ceacd7b67a3082db0ce6aeb470aafe87249c88b58b721e783dde184ccf68de8e05b6347fe6b74ae3adf9a81e9496a5c9332e7ebe908d26ce6b3f0b2a97e9a89d9fdd0d7694585a3241f240d698e69fcc050e7a959ba153f6d06f117848ba05d887134f1b6b994dad9b9e74247513e08a125b1fadfc7394dcd2a6451b504ae3e75e22f2b9bc405747dedb6c43ef4ccdf1a7edaf9451346123eaa63f3af113124f361508e255503a242b96680ae3360c8b13ac1f64d08088bb26b7f617cb0866f11d6fd362b00d86eba3fee68724e302388f119d6f92161ac8ce00d08919377a26974d99575b1032ff0f1976240c785c8b89e9eb2bf005e4be06b5371ffca14683fedfdb49e00e38ff27af1324177faf91599abd5990920797574eb743effdc7decda318ada1419cc8e0bfecf82f9c99792746c2b", + "7e8da4f3018f673f8e43bd7a1dee05f8031ec49129c361abbc2a434e9eaf791c3c1d0f3dad767d3bba3ab6d728bbcf2bd994bd03571eae1348f161e6a1da03ddf7121ba4", + "7ee32dd501dce849cd492f6e23324c1a4567bfceff9f11d1352bcb8615f1b093", + "8998e043d2961afa51ea262a", + "ba85e72af18cb5ba85a4a0d6c28b4ac1e5509a3a2fdb0e3255cbc559df5e6a661fc560c756a0264dd99b72c61c51a4b7ad56ca4c8ccb7e8edfc48ff3cceac5d1e8ac5fc87096adc4d0e9a27492857b17604c3a694cfe0e70b22df106c8f3c61f840bcd634964cdb571840e125e381e7dd3a0d97972e965f16f775fa4ce555124318290bf508beb7bd77e633042deb0e863631478fc3dc9122862b3c31264471bcce54e0b74040c8bafd481cf798f332e8940f1134d3027d6f28e771d15e154fc89c6c25fe18a5d312807cc2e623bb1bbb4f0b6ec71d009407eb54bb0759f03682f65d0da8812f84d8e97483f6a8d76a8417efcd9526444abba24288647609791578887ef49780b0b89f51b072cae81c5b5014463da3633dda105b82add0f9c2f065dca46eedd2928be2570493c79a996fa78ea6aec0996497fe2dc444432ade4eaa662ee2255f0f4b92d593288a8e3ffe7a15a10e9d33b0203af23f4c9fd2cfcb6160db63b52810869ff1e65423dbe2c4415884b9f8dec3c968e14cd74f323c89053a96111bc9ce59ec483832c49c53a648e5f0f797f53642ac60170c94b473f1f2e7d8a38e46460b81219b52081263027f74cbf63a75af3a7", + }, + { + "68d5ba501e87994ef6bc8042d7c5a99693a835a4796ad044f0e536a0790a7ee1e03832fec0cb4cb688cdf85f92a1f526492acac2949a0684803c24f947a3da27db0c259bd87251603f49bfd1eab4f733dec2f5725cfcf6dc381ad57fbdb0a699bccc34943e86f47dcfb34eba6746ed4508e3b764dfad4117c8169785c63d1e8309531747d90cc4a8bf13622759506c613324c512d10629991dc01fe3fe3d6607907e4f698a1312492674707fc4dde0f701a609d2ac336cc9f38badf1c813f9599148c21b5bd4658249d5010db2e205b3880e863441f2fe357dab2645be1f9e5067616bc335d0457ea6468c5828910cb09f92e5e184e316018e3c464c5ce59cc34608867bd8cbfa7e1286d73a17e3ebb675d097f9b3adfa41ea408d46252a096b3290e70a5be1896d6760a87e439334b863ccb11679ab5763ebe4a9110eb37c4043634b9e44d40cab34b42977475e2faa2ae0c0a38b170776fbb0870a63044aa6679545ac6951579d0581144cdf43f60923b6acaecdb325c864acd2c7b01d6e18b2b3c41c041bb9099cce557b114b84350131e3cee4089648b5691065867e7d38314154355d0e3ef9dc9375eddef922df2a06ad0f0e4357c3ac672932e5a66b16e8bf4b45cd893ea91cb397faadb9d9d7bf86e6ceca3e9176a5baa98b6114a149d3ed8ea176cc4a9380e18d2d9b67045aedeb28b729ba2ece74d759d5ebfb1ebee8ac5f5e79aaf1f98b7f2626e62a81d315a98b3e", + "63b90dd89066ad7b61cc39497899a8f14399eace1810f5fe3b76d2501f5d8f83169c5ba602082164d45aad4df3553e36ef29050739fa067470d8c58f3554124bf06df1f27612564a6c04976059d69648ff9b50389556ad052e729563c6a7", + "7d5c4314a542aff57a454b274a7999dfdc5f878a159c29be27dabdfcf7c06975", + "aeb6159fa88bb1ffd51d036d", + "7597f7f44191e815a409754db7fea688e0105c987fa065e621823ea6dea617aed613092ad566c487cfa1a93f556615d2a575fb30ac34b11e19cd908d74545906f929dc9e59f6f1e1e6eaaabe182748ef87057ef7820ffcf254c40237d3ea9ff004472db783ed54b5a294a46cf90519bf89367b04fc01ce544c5bcdd3197eb1237923ce2c0c99921ca959c53b54176d292e97f6d9696ded6054711721aebda543e3e077c90e6f216cdc275b86d45603521c5aab24f08fd06833b0743c388382f941e19e0283ac7c4ef22383e1b9b08572882769c1382bab9ad127e7f3e09b5330b82d3e0c7d6f0df46edc93265999eef8e7afa0cb1db77df7accf5bff8631a320d146a5c751a637a80f627b0c9a41b44f09212f38c154226de02f4906ef34139bbeacc3f06739c8540e37334392d38ba1cbf4bc7debe77c09b35d2200216db15ed4389f43bfd8ae9bf76fd8243c3d869546e16b8e44a6cd1edbd2c58ef890b5a84cda889131e5cd9402ca4d8271052c6b4fe3f2dff54fb77bcb575c315b9109f90b14bc8e109919808a581c1809e2a188d29fd34ce639088a6683f641925f5b4b3529baa34e080bb47fb7ad9b43d0d67c9e6ae7cacb50527fa74e56d0c8b20149f5d332d686d48ebbe634c2b5d35fc84c69a5bcc93b93dedcf9fdf19a1fb9b75f6df9692d16f6c3490377a06294499e4b8ebeaa0cfd840bfa05fde21c0b5e94d13063b3f5da7b537caefe89069cfa9de9eb8f06e4d30125de64716f821bcc8279c0c7ea2e", + }, + { + "89c1ee38b6697d0190c87a2aa756892ee09fca095df1e31aeedbda5750f604d9b8f2116e5b8f70ec57ea16fe419f2d213ef72b9be90eb5d7e98f2e398632123e2524ac80b31c6c0a07820848223569602d94fc16a3b1ed8c411bc6c74ed80573fcb1f3afce60b9d5e2c21d04f78665241b613abe12274a5343101a91e91f04e5d1f7959f574e743a10913e0817a32c320467f0178e3b6ad14b856234a4661a755eaf14b5fd88ef0e192e1631d14263d6a954ed388f5709dadc6c0f81d229f630d80be6d593d5e3ad03f9ded53c41abe595981d24ef27ffcc930e4d653743960f4e7ce4e251c88f55c16d2afdaed5e3446d00685c276728ba757520acb9b6bb0732a0e9836878d829e5022794d70ad8440a40a132a8c9ec1d3f0ccaf8c285fff425e9788d6150b74753dedb2ae8b36ff2f310249bd911b9181d8310e00810d42ef94cbb5a9d72a1f0507c1a382f892b23994fbe7360778b7efa9c5e03ac3231a57fecff1c5fa10caf1d26e84db0137049622ebcc3a64841a0e49fa390d1d43550c1346c20d578cff39fb7404fcab0982dde55f0849d312581d0c811a19d46f25e7a5e7e50d74d43760583c5cf335dfc11b2ec964f1dbbd0ed83e18f2027817ea2dffcf2b64a352c4fb8f11eeb4f1bfc01079251254d2112d103a1f12a2270cc026cbeb8b6f3e505abd62496253f93274625786b73997e449c1f35c742a593441252fcc845e1cef1b8f287dd311a0477407ce3b31661f7b2802c79c2d20d06e45f03aca4e47a959c6c1d7a9d377e1577fbf82a115921c3d94e3d9c204aa204a9a5b04d8a2be3269700a035371f4aaf1a42d92b9bfbee74492b106975b36d1e581d6ce2484f09e04fa91586c85f35e2a10f0d3c0afcb05327c1bc9d7429bbcc4627af8f76b86fc561844c2ae3810c84901ac09a1670ed3d31a9daa5d296", + "7219bd21a834d917f93a9b45647ec77102578bc2f2a132dfde6489b9095b4f7b740c9c1c4075333ab0ce7f14", + "a7f849b054982cc8a4c8e5e53e181feee79e0233e58882839892134ad582da7c", + "4c46854e9e101090b1436f90", + "ab2e189baf60886bed88eb751bf3560a8bd3cdb6ee621d8c18b5fb3aa418f350048ecf359a7d542daf7090ec8688c3b0fe85914aa49d83be4ae3396f7bdc48051afae6a97fca7b42c0bf612a42d3c79ef6aadceb57f5cfe8d67f89d49add0ea1ffd423da058297239e72a85fa6cd1d82e243a503b1b0e12d7510a9ee98d7921dae2754d7581e52acb8ab9e7f9df3c73410789115cef6ce7c937a5441ad4edf2b7a8c0c6d152d5a5909c4ce839d59594a6163364038c4c71a1507389717f61e2bda1ea66a83ef477762e7834ebcfaa8f2ee61ced1605ba1380108236e1763bf40af5259da07dd3e3d0fb2801868c2e7c839e318678687cbe33384e2ef5750a0a0e2d2e19e869a4277e32a315ed4de79357f6a12a8a25d5b18291316d9bf40dad2d05d1b523ade76650669c700a1c2965f4e51337aa5d45ec7b4981072779401d6d30ed69034053334bccb18425ac68460becf2aeccc75aacd3d6709f07ee10366ed848c8a54904af4ea71fc2117de133f01e1cc031f2a4d0779b997b82682433ee615202d5dfffba6c916f11a00551d56ffde8c36b303263e14adaf45b6eab0bedf344e5214ce52f071d2f40154d788c6870020791a03d2fd4ec5879d9026241954ed45cfddef4937ea3d0d45647f252be31411237983a1be340fc65ebab9a5620abb0e8d475af4e89e842e895eda0cbd283bb5d0bf20236c62d956de733d60ebceb42fc0c9adbf9b69f8d66551b0aca0e260625ad41cad75d752a234af7caf7902c2c5b62f04b6a8e019a6179d44feeb2ad5859ef1c45371e66f1af1fe0de63997266c290e27f0dd62185c53f81e0a50c296a51ace7c90d9cf0dda8b2d7e72a347f64c44262e2a544d1acc7bb05734dc1783bbc1903279092fe7fe434610aa95fc2ce5fc5ee45858f5e8337d8fcb0a468464becb1cef6b7e5ea48ba383ad8a406df9c581f1cac057d8711fcb", + }, + { + "2dcfbb59975f217c445f95634d7c0250afe7d8316a70c47dba99ff94167ab74349729ce1d2bd5d161df27a6a6e7cba1e63924fcd03134abdad4952c3c409060d7ca2ee4e5f4c647c3edee7ad5aa1cbbd341a8a372ed4f4db1e469ee250a4efcc46de1aa52a7e22685d0915b7aae075defbff1529d40a04f250a2d4a046c36c8ca18631cb055334625c4919072a8ee5258efb4e6205525455f428f63aeb62c68de9f758ee4b8c50a7d669ae00f89425868f73e894c53ce9b964dff34f42b9dc2bb03519fbc169a397d25197cae5bc50742f3808f474f2add8d1a0281359043e0a395705fbc0a89293fa2a5ddfe6ae5416e65c0a5b4eb83320585b33b26072bc99c9c1948a6a271d64517a433728974d0ff4586a42109d6268f9961a5908d6f2d198875b02ae7866fff3a9361b41842a35dc9477ec32da542b706f8478457649ddfda5dfab1d45aa10efe12c3065566541ebdc2d1db6814826f0cc9e3642e813408df3ebaa3896bb2777e757dc3dbc1d28994a454fcb8d76bc5914f29cfc05dc89f8c734315def58d4d6b0b0136ccd3c05178155e30fcb9f68df9104dc96e0658fa899c0058818da5ec88a723558ae3a6f2f8f523e5af1a73a82ab16198c7ba8341568399d8013fc499e6e7ef61cb8654b48b88aa2a931dc2cdcf245686eed9c8355d620d5e91c1e878a9c7da655e3f29d9b7c3f44ad1c70890eb5f27ca28efff76420cd4e3cebd5c788536ddd365f7ad1dbb91588d58612e43b0460de9260d5f780a245bc8e1a83166df1f3a3506d742c268ab4fc10c6e04bca40295da0ff5420a199dd2fb36045215138c4a2a539ceccc382c8d349a81e13e848708947c4a9e85d861811e75d323896f6da3b2fa807f22bcfc57477e487602cf8e973bc925b1a19732b00d15d38675313a283bbaa75e6793b5af11fe2514bda3abe96cc19b0e58ddbe55e381ec58c31670fec1184d38bbf2d7cde0fcd29e907e780d30130b98e0c9eec44bcb1d0ed18dfda2a64adb523da3102eafe2bd3051353d8148491a290308ed4ec3fa5da5784b481e861360c3b670e256539f96a4c4c4360d0d40260049035f1cfdacb275e7fa847e0df531b466141ac9a3a16e7865947572e4ab732daec23aac6eed1256d796c4d58bf699f20aa4bbae461a16abbe9c1e9", + "33791b0d653fb72c2d88519b02bde85a7c51f99cfb4456dfa6f84a61e10b4a14846521", + "a0a7b73ca2fc9282a28acc036bd74d7f5cb2a146577a5c29dbc3963fe7ebfd87", + "eaa4d916d261676d632455be", + "c9a631de470fd04dcbf8ea9f4d8ac37c3988878b6381707ac2c91d3720edbb31576ba90731f433a5e13582aca2b3c76ae75ca8881a463ecfa789910d3a776a9ad4800521c6baa120b2f1afd10f32ef8da63f5b69f5e5fd88ee84bf66b0666b15d05c4050f5358a050b9d5cf1503719f56cd48ceba78f29efe2ae8092e37f5134df526831532f86ccb9339637e2c9e9b9036f83cc058fda23e826a188456e7fd3f4ee20f4e4a3221883fe3232b49db607b90a8956133ab95051c9ec33a908ea7e81a1bfa7bd06c09f0143d07bb23a3feeac7f0d7720269c93e2df19d03605828c8713b84d183c9a50954c12fe3b047511ad15ef03a63355520cbd224d06a34de67a671368e6a8f9feeefe48fc273764a8c69c00314e5d693f159cb5270544f3c4e1760b0529e3303ab308e9a6d03835a3a42aef2df5f7643696f707a574d1dcc676aeecdd9947ebe8c13bcf15d30b2d10d2cd95445a307c1d22d39450615ad38f9302c6eb9dc05764b0503d6a7eaff9feb94834853b47bc25660207be3e7c0e27cb3127b5402cb016396e5ff07ddc3df29861dd68a17f53bf660b23352b739d6da72381b8d19a9fc95da7efb79330a2b360dce4309860af429e3fd10cab235c4acc1d80d9e20d67019375bd161ab65648400f308815afe63cfc717f7d0eea150e687caac25b6603287d44dca4a7cc2f67c3bdd54450bd3170340253b03ba054ec003070eddf9c14fb9dc595e228e4968524900cb5d85af6d1e658a42d744e0e7eb6995023823a8dc33528c6715b2e1aa607782c8e1ddddad72026d657bf122ece8685f6e92236e809139325e4a3c069facf94c10b7896995bba01eb22c7b3a87ea2114a7649d7ed3e83d223e5e785c66a75119beab0968d3eaf0cbcc2d7ede95d024041e6db39a880ce3e19efea32fb89a40a2aae22f407e5fd615e51e48dbd50a8b4ec27ce95e2ba1928bf699d0418705482ed0ed7acc858dfbd690403c74667a88dd5221bb79940c6c4a268379c10343aaefb635982c14f33ad83d47ced9682961540bd4f75804d3d48ba8aa67fb2e3a1db83fbcbe57fec9e4ffb1b575e947f8bd8263c680357960e3a39382974774b5a013f2f8514b3c63c21dbfd314fd5d927d82ba616d76629ac018879f54ff84b5808e94af4fcfe1cf8845b65208ca5510b5b593ce6c109611652cd", + }, + { + "c335b055b752e083554b5aa2cbb6556cfcace658d5c11b6b000256fd89e9b24c1e62a2d5b582580acdb2ad9869020465aeeabe83acd9eeacdc44aa652d5cb24bbe542073d6787ea32b2b3c942d40f9db2bb75ed7914c836d902dd2be89840948d82abbaea23952cd648e6191ce5b6cf912cad0a3165410a781e3650b676e5340980eee3b484008acce6a3e9dc5aa96d775677b8bbb8b323c6e9747d6069a169ea904d9f145e29d134cdbb0118647e8fbae638669efb9a55d50ed33568749f5304ece2193b0bfa6fc9a570d209ef61b4c59a2b5485b5aa6ab47d902cf23f7ff71c5210476e0aa727a01809b9f76b6ebcf58a018b3fbbe5f42976111ba58112b1d322f9312da068cdb86277bfcde66cb3607e3ea02a1494439aa56f302671f1f994eb3ab28b937043f5f7f3b3de50673ecea5dee8ba633c45089b852f0d772892525344ede6b521dcad15807b65e7ba348d891d47fc498cf4d50223d2794c64db9fa9b9766edb430be0c38746ab317b38ba9870a6d1fdabb70fcf89790bfe449b97fe01f6c94502aa0889f0a3bb6bdc65f44d1cd64ab88d4a7806b373f5080f9cf60183cf4686694f0059e2bbc5cf21ba0c3e8046e70d815f1444c3094cc29632c429f20aa06b49b0b52c6c7aeb8e34f7bcb53e93c2cfe2d704a5d0416876742c90762730d160e1869d5e0178dc366098ebaf2cae6f1f7563b555a52dcc194a5c8f718d50d27ee76fcce8e8991f4921fae85ea9476e1eab1364403120698b7ce8fd0a49cf79213f360a17cf1950f104494fad80adcc3bb1207bf250d57dcdce6ac8082a312959672361363cc227310b66ee8c04aab7b5cb33a81c0915e9c770a1cfaae2e8f44a0c65703927977a22fe58aef2f366b8be9a50da9376b46ae7562a82391386831febf359039ac326891bc58c0f2c34bdb6858859fc3cb4e392df65cbe2ec4f02c8425bcbdd1ee2562ab7d229d406d79a9c6fe4889c996c2f68d1fb5bbe3a5e867caa4249b934afd3ec71fdb088c54b15252f9dc1b909e121dbdc7d8a16cc00836652dd1f877ce363eed11467966f7ccb8f1a8d48146e69e04ad76a51937ad4f9cda209451eeca90dbdbd65441ce20fabfc8ce400fb4de136154b87a8b65c92740e9bb91d78521b261f806a2c6279c85ef6ac5fe1ea3117ff7c9f9832fc2aa6fab660082eb22344c1a3befe0628b6551f62a5014cd6194c42b8d475a50f2c9fb58c97e43ebb29005ed7fe54f0a4aa10074f1154152a9067d364dd7863fa082976a00db55b26b5ba0ea40eff48b90", + "f5ff810a41d4b34751e9942970d4c9f26b33f24689a4b1e4449b243490afc485af468ff01a42376b2bcb949b9f5e8d0b917f511a", + "a74271c184a82cb074c14b131fd91eb05870cb7c73c9e511ec8140bfe2f34089", + "2403fe689e239c2ed261b381", + "af9be893d5fd23aab42e6a2e59a8e7cb13d4f543db02af87cb0802bc1af7c717cd0093cc8244994cf21189146922b69927ffd5745e57118bea07a6afe7c21d952c13ab636b3c2e461dc9ffb3ae701175360156338be94b1fa7115799831019455cfaf5114010fe45f8fb9c77ec50fe06f2c5a32423edccb3b2210ee1200a78e1a3130c567542377827586ca8cf0c14c19fa1449a2cce9c039bb441b04e9c0a3f9a743b31c828032174fcdb7c894349aa68f5adf97dfe9294d24e6b5fed95eb994397883f58487bf5c57b0aea5268be7cee9efeab370f89805ebe5373ab2e93658fc078955ccf68b554dd5605005751ee8531c35ca5336a5d0ce273370c0dc9307779b86e96d2d1daf2620d67d43e1fb7800ccf250ca3c02eb74047c1d2a2bc7f29fff8320301694b80d0fd975f834337d00d5f0e4215044d52aa4ca21e6a9d7e03f186d7cdd5c48e3765dc926fb0a46bb0f05c50d9f69c9c507527a60366b7dc251aae1d6bb0d9c73735dcfab959f6fd4382fe2a1f6ad07affb0601bb9040f81b55a48f6a6c5f8ac4a2acc2b0c9a6c439198f7926460695fa11e0b0b017e39de5cf0d5d5f84d972b5eee7b5d1e0343b5485cd84b92ad892e5b23f3e803f5b363f2398c11c15be9f13e59922b0d49902dc8483fb142850b4226da2fb84e9b434a34f6bb67f575a9e57fde3354bc3077a876e260311bb2481bb139aa9af55df5074749fe532d7b8a554218a90cc7e7ac69db280bae5d55a174dfc8d325b9909a8da1016d4e162fe5ba70cf8726cdf291f5e47083d9929cd5e32021cbfd982fd0975f6f9baf4322b553cb3174b11c007559879f308419ff9e4e18eee8d3640cec8aea082b90f69cf3c7676c28af0265c24c91cd58a06513198892ce6ce1ab3ee9ac0a2e937b973a9cac06a039a54f8d994c13d42c59187f677352e5feb32a417aebec4d852b2595e7e67450e06dbd183279e3b63022a3813b37257b085bf8454d6890875a2950d20210a8df4f9da746722f62687e92f0e9efc3e5d526d65ccfbcc042fcac7964dbe147932c73924bdcdf62f9eae58d29e8567ffed90048bcf0566b952e986efeae4c477944af18bd243c3eccf8d88c06d07279adad037450cb8547a8aa0a74223f4851747c803cb21a2dd027e7080aed75038cdcecbc4639d87763cdd41829a1b72cedf0d722b180d0d492a5740ea7607b95f3201df352fb1ab28149124d2df5d5ec106867897b537302c3431402348f94d28eebc701ae1b49d10adedea38f1654fbc48885e59e6e6dfd413c6b5a97d8c35dfb07a6cdefe317bf61cf91", + }, + { + "4aba5a776ace38b6e2578f0007e770d264e39c49f588ca3547ad2888365e3a811994f8836330394587c8458eb0b6611499fd5d8e8527c3cdd4ec550b4a8f8c632384e786b420cb3be911c999c72aad60270aefad31b27a069ecf11e95e9d4c81213308d554d3103de4d9d6ab04830c2b8dfbd8bead52c44c21d5357f72810193b5096809dc7846c1521c6c569f78812c735aea21acaf6dce84a24df7234e8ad857f3e1346b27f5bd436113e2da950e4deff96e9ba8db692c7db723a105ae795da15b910c8286cac6e7dda8c172b70f61b07dfd58596684d61da8772356f180f74c1103ce97cd947eab3d401df44f7fa4cc7cfc25e280fc002873237e64a375b0b4797f4b4613c9f150090f44588ee8250ae44aec6546ec8dba0f0c1eb281cf66fa4eb141617b32b28441f6ddcfdf02d9c34cc62893b2b64dc2c26b74433adb3e888c7fea07b19c8cf39269c2716b9c35b7625d4a141397d6d5034b193d2657c6b2d6b0ba874c467adeaf3d501ad985d13be21c4ff6b326cbb671e4f4973bba49116a0399b6491394f850e4122969e4644c00b442b3da0d6a4bf25ee22d182b3f822fd83878ebcc713cb183651a67ca66677ea81b58b685a3a8e385d5fbb0147ddfecb558d881c914324c794db443b31bc15c361912bbbcba9e418f99f2a416d190cb29684df27c7f3ff6ccf339800efbdc4514ee00d1a89f12373804db4fd66c1affd467f251e73147b3248033327b0f7790fd7861a51773dd4f78b89e4e24b94df9203f4a077091bb9411eec78dfe3e1dfbb67ea1cdf17e1d6936bbb75b74055495449e9cb52f5749404610cd444fea3f0568e0d35a5ef0c395ab7bf0208044b5c4e2517911a9c351efd31f33220972287253fbccb1eb8f46960a36b68a7a6b4f5cbdc86d668bbf555fde8881e7faa9594da425ff8fb54526bf7cdc4af64899530561c06bed7fc04c5d48cd4542779e901bc48fab79d4d13850ad8247f51b9afa7d5a656ada25b6376d837cb0fa1b4016dfcfc158a39290f43f133b352ed52fab2f951509bacb41284fbdd849d8185fb7e7200f8ab2a07ef2b3b927e18e568dbeeba2c7a66e08cebdc6a6069ebe6656a586652f3905ae2bb867529af6a827b494c97b3a378408f44aaefbe86c613e11e7a44020a9ee4b62569dfc4c462300daec7b1424ff1c1849ca1332367470475c14877cbe76c820cc651c18ab3f18852b93994f93b568dc7f7b0eb5f07ffc4c9384c851fa9071c6f68ddea1ccf627f889c0471c76aff9f52b07ab1b86a7671a2b2f6b25c0ddebb66ac95737bf7e2f493f7665b5265eaa5166556cecfdd3062802724ec24f3978b903d0f0c24e1f0b8d967142bccfed0d354279223f4c28684e9ab611e9ef89a3f25993b5a8b3c0354931780501651236a78b58e7d7814f251b053605f4c0a8e7193b9cc1ee5cf7378e6f3c8fd44ec57bd91e62b09fb1d6bab60cbfabcc6792e6a32ea7918a9ec9180d05a7e1546d5d2d8bbfde2a71b4e427c0a4d28d0b6473ae", + "921a401db90935c60edda8624a0590d5c46eff3522e35de2872f6f9394e24126fd8143b68a797c995624fba0298b75eef974", + "6a4d35ae03bf277f587da4541dcddf92bbd906dff45d5ff23c0f60ae53b062be", + "231b5780fedfb06d724450b3", + "ba40968282d98849b19d867f8b564ea5a81d657516099362926bca4cb6e9ae02719d10c8061f53008c727a0eeea5e1e36c9e55c117e9434e213316c96840231a1e356b254a9981d4a6ca3c66cfc61018bcaade1a4486506559e6aa3a86bac980d391d835fd5ded98d10f1394d84bf1bbf2cd3397890d704154802f7864ecc753db782fd3d19213ae65ace4770e1bacf32d61c6730aa5adcab4d7e2e437888c11c29abba4890a17a00f67a53b660becd94092df0598df5ac57326f6860593a519e28bd4a39f6481e1a4748881fd5f0456a3cd9f28d1d1e78dc64030cbd8fdb2c5abdab3f13d6ccccd187e71e989f8c486929efcdbf2a763effa95af62db5cef95e9081b818275c69267022fda4b7fdb8c650b491a785b03d4d0186625962b6326ec3f4e176373da4dc1f83a14815adf82c6bffa7c6967d77528d0249754bb4d17656bc4a89449b16152a4a1aea7eb0054a8892f271138971507d2f3b237ba5b620f444544e4a8c2b1ab4f9168762c27478c9f776c47ee2e9ff05bfa35ed127f0cabe7cc053640bb8aa01f8359b74bf89ef43ca94c48fcd201eae39d1835957eeccd6b3a852f4e1bbfef9a469f42c764481ff8408fe5871afeeae7676b58f4202199aad50a596626dff97c8e60d750cc59da9f595ce12ce9afdce14481cb1e39994de8fe4cce07845110d6703dc59d34734e93e9e57e1c52d61f44143a2d290220a4bad5098d098ee65ea4b6757d8a9bf5485aa3d697a7826d4a285186f5da10eff707566c23c6a15033365bcb498c44487c72d96402d1834753fdbf86770239761f03e0dc8963766441da99c0813e4f1df5a1d018c8799861a396562eb24ce305ca15f4022d83ea3c56b68d9a7ceac4742ec0ce50f4d36273df26005ec2b051fa071b319be2d8a5ed26eb75bc1ea83761b8454db234d15d84d6706cd178981c1f156e6d28f774aee3e9a4fade022e71b52b50aa532b8bc7fe464f22d6eb169c69671875d614e987658820c2f584a4fea3008afdcbb646dba3d69020fbf503f121be3480344db23efdda0d255aa058c3ff66abd3a5fe35db977521608bba7eddae72ae801f4fbb12a1de4133039e046ceb8db87e465e5ede1d79a08c857d59076d7ff858942c31e15cbbdae6fc15c3f9545a0825d6ff8583c0aba8a7d143d27b93f6caefb98c0d83bd8715abcab2a49087f55a9daf9090eacdf45be08ad80b5df5070e1719f68c4cc8f8711083f0f7823a09ec092f22df95fe9e95114fdf82a3f6eed0bfc9c0aa65222609442776154a474dbc9e662cd5dce66846572e52417ee5d7eb59287d07ef60a9537fe1f85c7fa74fe84dea0da235ac7574335e6649b54a6bd33397df4bf4a7976c4ab868aa702766d2bc8d2c82c2d1c2653fc8428b8d1e61852ac185a3a0b416dbcf8eb54c44967ff43c44f2b32c6d4a9dbf2c2f3a587b430aef50f0375cdb4c1b319ac9aca486d9bb321141b065f52f7b6decaf1985531ca7bbc3772a561eb1efb8a6297075920bc432131a5b211bf25e35fa31e12833bc77a9de14c7", + }, + { + "6c0056937faf1023032df1e2bfacbbc58bb022eba25ffa020d4eb26f0caf0678af5d0b2f0c1b520f4843f107f0adcc7b5dee66ff4d61025bafb4cabb64d133132e3e423a599549a1d83aa8c8e774444462aa44b00b460bbafad5755ea6a872d4e6b40e3f4957e0229288ea79fc2ebe5fd9020fe4481a9f42ef14a196bd136aa3c779e311d0c333624c1ddc484c9aa7259cb609e4d0a826c0bdc7567adac01da23900b30ac4e66c100348584fe200747eb67e6287268947e3509d5d2b5d7bcd977b80a13f660d4f6956a8b938a82db75eab19e5d2a22cb5f3c9131e278eebbe096b5f49d16c983ac240f3fbe821b247cccb2c9e6e59546122677f49f56a07fed56647a6d3e0e09520d49009f54250c10e7c607cd5b4ddf81b5c4110c6490e9baf56418236211856f5a85feaebafacf92c0c7501c052f9dbae3beb7484f90f334f50b68571cedc67763b5161ebfd5a1709cf18c92112a4cf4d8f43d1895204d8a2ba5e14883a7bff75cc6060cabb77d38a909daca2417befd1bfc05a11c432b47f90c807ca4306400f67a0d92218adaca84a584a8bd4395c93f9b6a4bde9583c79204444634a8473b1244cd33cf980e443d82ecfac672b3f60e2e41ecb3c5a445d9e88c0e90c339a31806e6d79ee52bdc6808c73e8b7b24899966664d3c1a9305f31f0483e24e36fa451dc1d3f2eda05af6678971e2bdfb7c1461c9407c5c466f6b5af34d992a37de3809a22ae75275ddba0f4f9cbd4b18c1acd212192e587889a36bd73c860f0abe08bcd8f00f5ecdb95e1d560b586eccf530df0e5f3776d8dae2a01768bf1226b7ceffa7ce4e75879c82dd97db3c64c06d33cebc6b35854618355d80e46fa79c3e9743fce5b974723c421a077e7ec7dba286881dbc1d53d442a1552700fcb33f83f73c69a0a0ebdcf2f5d461649c4d0712c514ded268a31509f83c1ae4ff4a68e676d29727be641aa4487c08d4b90ff78e24c6508d69759751a1a23690ec9f8763621e8b107295b4bb01bd9fcacd8748e24d996fa70ef6f8b0992f4185bec8e920d7643159f9f604fba394b6611bff435998b2f097a9e948430899c8c752a1e83a061983f00f88ebb32da214399167932a1a83c1b47d09f77593b03cf6521520583ea4483e2d33e14ad60584676d1791779b532c085d238df0d3bae735d0078e0eabd63cc90a2e13d023983780afc8f83b1c14437937c16a1b7c41414c48cf4ae49587ad9fa5b16fc949a749e96032248c4667f58e295f999590dae1d99a2cbe3fa45bcf4a1d3f0356d64d40367f64b2c5cca843e5f7dd7b88a85d52328a00622e6c317879607bc036c9006d38652ffe21c83207c00f8348a7d0aaea5aab4c89077df170de6d41052641726eb6925cd85a9ee01a9e636346340e209ea96d17b0eb0921b96662ce9cb430fb6ac348331dd7133875769bbbba99dc49333950e4145a15ddb0789c4d2ccd38878080ca9e57ddc6cd5452790eec45482f8e990392e319609391fce0beba19463a9a00d8f1de9fbf22f23821de7d69fdfbf3019ed61aff79acfc5a6ba663a1e10da2b9ff7149aea43bd6c61a543008402309df0924de72c1cacd2d6120cf422e61fc1de345cc8771934d8be77d9437a09e06a9b2d51c849fd9a200fa714328d34f36b684f33df6968b827df916a599a4bc3367814fec21198e2213ff653cd2a463892966c72ffd42a26b3bb91", + "0d55dcd08b54e58916f622f81761ef6a2e19b167ac47d3", + "e42e1d6d44138f3d2bf12c951f454686f18d590fd30057405b5a3dc2b317fa97", + "1e46a7486c5a03fd6758d938", + "fd3c1fac10cc82e49235fd57f5aea0ee7a7bd6d539b138d4b3fb623aee591615c1a61228ef9673113a3a90a3687a12d4c6367d5f7bc67d422fdc4106455084d79c2c42c5e86368dd164bcbce7925bfffe7d96c13a2f49aac8e9d1ada3554e3fdc21aab00455a0f33b0c1fdea91b3588e7ad301bfccf9940027332fbdf966463491f7a33c093e0a13831ea9d2183294f89f414cf7b5876af04fa68d594430194429df74fa5915394427259e832bc545c13400aef6cf16620d48280798a6e49773c9316d79fa1dc758e54cde2e2cdb856092d83f4e9b698385cb976fd6cc2538abe055273a5b34a784182ea5e7d3ac9019a05de5e5afe4308a7ed2d363cd50ed6a52df1c616e4a82f607ced768445d13ae4884f2ae1f9fd8313924e8a1a8a23905c92eb231f638dfa6f4cb27bbb9844e05afbbe2ca4d1a3b3a5b371bf33c9ab6f82a7387d61cf8bf662097624145a983839b0cb9f4bd07556800b4054fb3d0bac94f44bcc9b4ac49c39f5571fac4e02ff09f08b3ed5add4bf8bba934e9feb773c0590b45c45fa036382f3fe9782ad19107d4630321e414b7b442b64f18fdd5219039e5740f34b3ce8925d1afe8a39e35ce8db086060bab63b9720700499f82db19a62897c6d845389461260303f9cf2bc7235a898b4620c2191ef05604a5c8c783d58009533a86b27c12b0772635d34ac53993ccf174c9087073e5e69b26c0c3d9f768507ac4d4e2af847b65e3a6e1b7a6dafb0aefc190871cdae6c60f0b1d6137c351d4cb211870791cf4cb8af2ea446f6401eb9ec8a5bcebccce898d1dfb13454df6b35b81ed6d7637e6e261e004080c60944f3a08e8e5fc7e2e4939e7c2607c8cf07d1d10883ba3ad43e2611826f245df571857ae0a7a867df9659f2082c19f94ce400132e48c7f8de2b102c7f83ba5cd1e785597a0ba0d73bb81bba0c00300d4bcd6ec25fb73105a46122873bfa729c0979d8d314ab7ea52391aabab513dbfd1cf01c2990c0a3612f4511c2bcf0f5a07e659a881a7f99c3f1fc4a46e66904427fe26a4a80a904c047d090c861a075c0ae4e29bfbc18b9620aaa42237f4c6fa76ee7491ee638ab5f1cf0b440759828e1ec519679efc776eb1468999a00f667e87199ad6891e98b95fb682e02517b024a6bb803ed23c944010cb7bad0733eccc12d6ab6030c6e88d510ce92e2f98fdcfaa1e37e41fbfb4e99589c0e8efbefd40473db42b3a73b57b22a2f8c9bdaab16831f1b117dd83a77dd01ee8d0c2e92203adb670f4fd65e618823ad196220d70e014c1aafd8863797c61c16382c2600062683ed3a180c70891717c52da15191b02f25d1715ebf33a5e6037092421989c942082f4b836423cc3e976c9bcda185de36f06265dfc250a27d2de0bc48c73b3bff704f3b386f962522f572108458bdb283c6ab3fd33b3ac13a406268fd5d97e17db9c0f780b4b2a8f761d15a4d8b3a0cd73357ecf4d26a6492ee069f19325823ef50bcb2f73326719a57b67eeef506fe8915a1b1ba1a637592268257b91e9c7c5d33cdd947967efc1952005d82ccef9a3ad7ef8ffbb6b658983d64c51242ba53f8f8963245b87a25aa9324c527e53f8c11d55f30aab598401589acd13f090541b3b057b162190f27910718b02a6b8ddbb8ca6cf40bf0d2848f4b76341bd5e78f476862bcdbe2d1bac84c0566fb45b21388221ecd8483d99fe603646b1a9f38a49230cf4dbe5d7883d73eece01bf", + }, + { + "04892b94c65685f2eba438322b29bf8439938590d3e0eb10a29e279d356cb439f6dfcdbc3552af21f7e753221012a649a52bda780bc589ae63b04b981dffd113df9fcf14f17e35e865880a769bb1bf40dc99b9e85e4296c1f2e1590fe02b22bfcaf2d4bb7009a4d692ae4c2d5f0b6d3ca526240368bac55b9b1e6a7b498d3b137f0fcfef1873c5aa2111d7811d45bdc26be1c5d49b8a2f36a999b1f226ec06a5fbd59514485abe696c96ea89dba74b4688101a239b495944e30b3609f73caff3114407599ec5c30a5bad933655de7dddef97018ae15acec46504cd5d417c5052c057ac5f1c6f69781cfdae71db2b4fcac35054a4aa22681027356d68b2bdba721466d130d53ba8f23857631382b2de450232e9ad5551bd7c872ae439e79eabfb057d2bdab8d4ccf02b3003ade2e1f3e514dc92692e4fe5b579c9ee6067995b6c168647ce5a13be8543c23326a3260bb7029d2030ec05e565ced3c5366d20a283a6e95201fd108640d2b96676df712de20e4e12fa53f85f22cb24583844fabcebe40eece11e7221f12c88670bf994ed08e2000236f86258c386b0fccbaab8b68ec6a26fe41491d540193c4c12d1391ab3391de9317f41f505f1f1d09ca9862a6f289a533d2b297d4465c956360371ea3c8ed36e0d1563120654e3a2fd69cd6c9267bfcf92e84cd64e162c84199d6e552b42c33857264b5d7a2e007797cde32934a3f8c68b459cd95bc85e7466ccc9910e8dca65b315c32e43c3a5da908904c42cfc8ab74126919ceeef1054bbdae6ca67b02f1ac5f24808b5eee24577e609a3e3935a24b9ebc1a8dad1fc96abe26012928f2d5782755f3763427dda28867d0b1ad830d3c3f17b9ec278346e5a9480ed23ad44a523a4dd86e65a610ee0de1afab64ace7a3b4918fdc14c6b1ce0ec0903994da9bcf18643d7e0a4e6c08200bb394a89b385d2cb829417eeb0f7dab9fa7306a330f82973cf0917b5ca99b585d2ff0e8584e050077467f5245ecfdd5942e4fc72dc26e5ab2ffc61f996167e68168cee9a6d3ea1e1a696060465e35da8c75a1aa380004faffcb0a992c627fbdcb4e97721271802cdaf08d214ec2fbcb389d75709d7a6b9d35662661c8961f93d4a705e7188613f3769114c55400809cadf60d3b6068c8a5ceef078785171b59be1140c6a754ba1de5ced349df63d67d59d3a8ca3c716ffb506772d57e9e3f2caf7fe346c4ad64aa6c37e43b9bbaa8f58e51bfbac31fa6137728f8e5b728025697e5ad5c8301f6ff39eb2ad595d3cb24257adee88a84fbf1ade4d7550cd9ab94bf48e1424ae83184c35c5a5920157d45805c2e0ad129fc7f0ec3c41b9d6fa04cb8918ef379b0783d1cc2863cd80382585fa05320ca4f9fd90353e490b384ed6c166c6f802cd7bd39aa43667246e8da96992db7537d472c709b01114e95febaac5b1a3c77e1e9a18c2d180e63f0d8fa89f6a1ed63e909e4741af5c2a0e47d4d3f8779b7696358f58060f3f461cceeebb390c92779d30bfdedf1b08ed62dcc05a545bd0ea915f42976e81dd8a50cc4689d8d8007508bf53e7da5bd43c3894968cf0677681c6b818353af6bf8ac205139add1310e5d363ccadbfa0eaf735808325e7f9a6aeb1bee3ebb4a27576a88811859c216b6f84371c43d8063a0d87bd326eb6d81c6896ff534ba2c9c14a51d2cfedf33a5c787279bb4a7ff65706b389756a6191d2f791254233ee047d40d64c2dca878a42f903fd4382f39a89a723fe11848fe37b2008be53f7c2d037981d6462a4eea49df1a2e074957afd3c9dfb4d218a309cab395afe301ccf", + "67b5eccb1790babc2dab5e0d1ff3871c3024177d45a2ae", + "259603e1c3af3fd0ce3257eb627b02e0c0a48ea2f175de3d8c36570a445e5369", + "e14de73c4b17581a7e0d0649", + "33522e67ef932da5fa8abe628b51f3abd5049951dbc982ea95b7769652d4830c588fa45e3fcff094c8602b9008d7b2f9bf6c1c4a8cfb515401c7c44a7ec42ccb967722a710199e121a41160b1ec581507e9bd2e2e506b10c4b5a8d6977435aa08e27504957cd49e756e1574c4ccbbdde937de35128b7ee3455d2e665c596c2e97c253c94e405f85eb5de84874c099b4a97eb8f492d28f2e4bc64b228dd5984e76ca08376d7f1355ba8e0fa60fca96635075417d8b436278e0fb91e3bfc7d61ca8c7407086933c061b2d318f46f352099e1d317d6c44098539d1d2c1b7894db668e7a82ff991864fae236570cc420a4229883f1e2242d05aa07e175bc6abe11cc643cf1786a4456a2de8c066fb1a70fe387f149ffbe8cca7b110e256fd0c09b1d3bd7381cfa82fa700c8db1e79809ccf75ea52d0b349264557046e8703a191ddaace00ccfc513db5e78810eaac0a99d7bb1a5725e722d4e595216a0e12f3a7aab2e623ea9e1dad06169914bcd51b643016fea7dc3f2743b1e65877f1fd5581bee5ef206d86494a587ec8462a170746fcedb2c9f99090674ee687382711b4610ddac599732453dc063518aa36f5b4129098fb9fddc02eb8f8cfc2fdf0d904ef4d6d06014f977b29d0e9aab4044ce9c662a18b1a8db1ceea97854e90704430fe9b1046b221b27ac79054fcc68c3abd6fab7da66e255ff0cbd0506c852e961e619615c944cd9a05c25abb63742f5da7bd9939feb0f2f2208c8ce82f551a9d4d70e935dad018e3e4e6998e39670221601c3e34716ba75eb4e2fdf53c4d471c444330514986de45cf44d77f793c17e36a271fc65e6bf08943aef4c66547dc310c7a430e3fe7a54898de48f69f282f52bbdc4daabdb325cec7ab66fce1aea4e2fd932dc1a316c821f5220ea437447feae2fa478adade7cd515a27d8c132d0299b3ca1bc8516c9d9e7c65c38c238c69f03e104eb42a29cacc8d79b808ea6fb233a5056201e3697f81a2d49ccd8b8efd1ab0fd407c16a210767d1d3ca798ee53a4bbf1ce5090d321b1a64fc2c5f013c23829f5b0d2737936ca71595a1d02711c8a7b0e74654e5d76376ae26977dd49c68e3c0a7b36e047d44be42d732c31f681bd7b1b4b339f004ecd847960377acd005debfab13d0fb88355025877630aff753a7cfddf6851e8bcc8ec37b8f9149830f47e6b601098b2ba19a4c0808e31e8927b2525cb82bfddc9b4bcba2b46bbe768ee278fb89010243d16f9679f5ba4f13cfe76b5beb16c7b28daf99b0873098115c2233ee3402ac0f6c899a2cfcc83b2ccc06676999ad48017c4ace507080a26501993327ebdcbd1e2eaaaa99f4998b716cd9e36eb26b4573a03fd1d18047198fdf675ef4f979864ac85d230a011c69d8b6c45e9efbdc2a03f195c9731b4cefa60208ba845c0978e73d082bf6d6a513b93dc805a4f5973f4158f60a200167ca88704a15ac5ab1f38ed455a426f7c6a96b6bfea2ebc1ae1247cfe5ff29ee81bdbcb53b03b89568bae9a6f311d2b20e31c2d91bd18fd93a37be266d0de8015d52e325f78356dea0b77cc76f28e0f06e4ec705d1328340013a77b0b6196f44b7712fff4ae0ac7f6afab9456a95012b7c6d387285487476d189977e28f6c9d1a3f736320d61302c2d627d5a7ac8cde4988056b55eeba27efe7e640f94c115762ad5849423ae138c76f15b47bd2a2bde2c492489b7980aaf1c4e32a155f858d7be4fcd0f8a18e7b5d97c5a08d7885d6d56222ef49542c7f80498a14a8eed1c092543aac3439966d5b5d0cb9e602f4fd795c09d652b64f9ab67e38f48c88d18e30a9774f37e9c77b7a94cc7310d", + }, + { + "4ab8068988d4bbe0bf1e5bc2fe1c668cbe58019c958dd2ec97164aea7f3f41c9f747527f1c0e5fdb2cbb9d2ad704b6955cb731f14403dddb1a28c5996707635e4eb5dd6ac33d46eff8e319cfe7cf6443869534ca9812a5b23a6b4ca172afffc064dc2b28197117115431e03c00447f87d9b45172c6f724006270a1d41fa094847cbfac9630c3a785f488c1f5cc407ca6f4cd18bac43cba26ad5bfaccfb8f50784efc0e7fc0b504b43dc5a90a0525b0faf3c8b4b7046fdeb1cad87ec667ce3eb6cb4c358b01393f3ffee949030ef9fd01c1b2b9c5219777eb6ff5b1d7c3ef8d8e3bc2193dfb597cf942c5fc50befa527fac0b44cda2bbb811b06ae87459750295371cd232754e2bb7132807d1225950ce64949b0650531800bd0074177677acad937ee008cc0bbfdf33c6b0552000238494be8be412a3e5cfa359e619d092c76310a76bdcb22abbe6f16b3b116b5f95001d20e42fc3c9ff6723e580f378475788eec265a1ed2087de8cc2eff72184f73fa5dc6e68a56dcfc85350bccb97135386d5b827c2d9aea065708f5c921454d1b9303f21d5adf19e00415acbd86d1e5e42d78505b033a515a435713649c50702f54623cbf31469f355c3be2e30dd8c72b4127764451d79e952ea1f9bb0269da56dc07060d5d9542a9c1258ccefe53fa3f7b6073cd38026256b45c01b6c5dc0d91e3139f30a8d1da7a076738f5bb23352693a8e3cbbb46226fa22416680013f9e3278913d06aee4a62457357f0a68d173a360af5e1411840e34c574b4c6b352f92ce33632911ad8b6710d357b7607ee19679e777baffb8ae3c0fe9786b2e97fdeccb5105ecfe81441f549bc6b50ab84b749fb33f8f6bddcb6bb733d6d5dbc4b29725b8741439b8239e53fa435ea29ed3324202b1bdd07d1987b0e06d8cb51013dad897ef02401290940ce3f2af72c5d1b4c8836299008c10b16c7e3e119e41ec66d9db6929ee09bdeaeda08a50665c052edf77b7dff3d8815046bf71d5015e3bdb29a4f507aeb2e28c536cdcc9b8d1e89849a0683d78f99dbfa90f94aa5dc08587657a8f042d718080de5d4a973f232f78c387b63c7143fc2a4380c491414a18b6c4a7bae2194b62e798ad7ec7d09e409425f6d0973accb17e4d860f8ec0283584cff076d93bd9b0c4873f9c57cddcebe3c3bc8afe793c6cb6b26c4582847b07446b7e1d9757de6bdf0df826cbc502bf88cf3a773866d3ff293034abc4afa3091b2126a278f50e47f2f66ebebb616e342098ab690f7f5828bf8cc4742c677d378893e9f188e8397bee983a9a0998de2a31798330f8db59a8581e1c847589bc0e2d95ffa68e39226cc15cf6cae5c4f5174e7848375391dfabafec202565ec2383721339f04c5c5d1da953d88f18cda65745ee8e99805e35203a6545a0416923b38c5db3c8aa00d64354bed27d7c78c4b257534bd7a18107ebe64d8c27b6afdb330d8efba79fd1fae480cd51fd3626bf8d79fb651b7c6cf752aa737a5123558420d48fc86451b358d270aacfa6c17f343b7a9956e6f64e4990c1b3f1e5097605edf5ce4247819b19f245e9a90758dd42c36699ba5cd7f3ed99a7df7eb155749f4b42d192c47cacb6b2865fb9ef2cfca283865cd06e40cdf7f89d76a9e2eb393e2e0ac0e2776da929f3f8e3d325d075a966d289c51347bd0bd523a5c81edef63ce9b72f5114c88b08b16edbd73f518096240a5b37421843173be8df4ac7c587a17ca6f2916f7d9a10dc75f81bc778a1eb730d12b51555cc414eab9c066113a7edba9a7f1a18092ae47f12f0368ba211feaf34a3b48a7ff5c91b81cf7c95675a4001c95a19d284fe4197fe8823909a123fcec5e45935da12416be1bdf14918414ad19b54a41052f5b8417ddbd207ee01d6a3e62fd9b0321b1c13d91d6ce15ea7b2ea0c670a5f5cb290ca8e62c26c6499104ab8e9fafb05170ede246bbf7313625d1fc9576f1609ffd08852a2f4b73c04f1f4eeecefe3f3eeb2185a618b6dd3e87d9d3fdcb349cc83c21f26b6c662bbb857aa95378e991640a160a23cce76153c134508c68ec54a5", + "0d471079ad3c3432b6de852ec71692d12d9df4f984554d458a9dd1f28a2697976da8111ae4454c9a23d1c8eae75bbc14f8b00e7c065bc290f8938282b91a1a26c22b40a6708c40945d087e45633a595beb67d8f1c29a81", + "f3dac58738ce057d3140d68d2b3e651c00ff9dbb2ca0f913be50219dd36f23c6", + "bb2d033de71d570ddf824e85", + "238c4e6be84bfb151557327095c88f6dc2889bce2d6f0329e0c42a5cd7554ab16c8b5a4db26eab30f519c24766b1085e11d40823053ca77adfe2af387b4dcde12bc38502229510606ff086265f45b1087375dc4a022eb0b641101c74ad566ab6f230133b7aa61861aa8202b67beddc30dda506691a42032357010d45adc7ee633b536a2fefb3b2143837bb46db04f66a6e2bc628d6041b3d306ff78e96205ab66847036efa1fb6e6a387cf8d5a105738be7163df9da0db48e3d8fd6a786f0f887968e180ad6888e110fb3d7919c42a7f8c92491d795c813f30ea645fafcddf877f5035f133f864fd0ba1415b3d698f2349ebe03d9e76610355e7fc23221c5c72b1b2628a40b14badf93288fc4abeaff5306d274f21938650ab236a39496d3f8a6e9086eac058e365d4335b51eafac813f9175bb7bebb75605909ec3fde6515694e119f7b6e96aa1d6d6454c3a7dddeacc83bf0c1f5f6c2a9dd2f460f3e5b074a33b8d7904e6988ae43a22a87f0933f812e45c4c518bf83e606bad4c3c55422ab2207e9d3cfcbc5819049f55e35b9663273d9d3a6f8a897fa38b0dca77eb6c344290cc007b68d913187f2cd480a40262623a4e95d90d5701ac2b9d858d70a27f0672f919c2ded1fb89134ac9a8ba6ac62931c832372abb70e811dc50cce264ece65e87338231f18ac007c5f68f3b1c5904ffbb2e1dc361d53914917770d66afe28c547d8cd5896d892cbdadc34cd6af348c93bdb8b072f38b085361e62ded7a38b4368824c759ec7d2cf4caddb9191e5deedc8b8388bc4ba2c0672321bcda3a7343c9ea71ef03750912f35624d81da5fa8a6ee676c4efd99d0c7258b844ded7b35d8c8233a316b508d79c7c0b3edabad5db9543615179b1c111bfd78b79327ac5b4155336d670baa592d441c810cb1b7c07f3d35473a45b57e780b7d997782aeecfc0363976fb608d6967844ed00b63ba75996054d090aeb605c195b1ff86f9d9ab5892d27632cbb59c06b3ccd69d33ed5dea9398f00b7c6404fcfe2fcb5924e4cb75cbcae0a1b084ea8b15eaa5847431e9ab70e4afe15b4c82239f6165e243e3b76d6c91d23b16edecad8bcb16898641f8e323671452034a8ec9b42b29cec0db210bad0444f1c5bf3505cc41d514d5a270d556f0a34333bd06cd6509ba253a6ba7a6db8f1a60c99f0c3d566a038a72f1271a178cc3ff890b0df1e7438c0c1a12d9873643e2d7bfeb92379545de50834abe2a345faf7ca49beeab87ee516dd8598b71196b8cdb15e7200cb5bd814338babd74c565faaf33d9a8ed4209b417345a1ae611880ea22ab2e894d5d14a28fe3835d3b2718125f0e6daabd85327455646290ceab89e579ed5e1d72a0172e4a6d8da70290b5022c941f3866f96cc4218de5d2622d13af6dab15760a1ec5d10918267f9585284058aba611ba07b1d5711cef505869831699bedc2b190fe1d578814065c91d87a8c8dc9b0d4dae0c80cd241f0bda3a6d5e714c894b7a48b1e5eed4555f103eb03c9db30efcb855df422d7451a6d70f28174c7ebff536dd2cd2891f6c3f264d632ca924c4e0d84b37cf8e06e6f2e29efac6cf008cc27f062441278dbc9f09cf44987e0e9ca088a48437b0b89efb9cf00d3d0c5fb449fd4b64e21dc48cf300c2d80a502cb583219f1881e78e647783d91dd2f3b389a1594eefd8ea07d4786f983d13e33cf7a34e4c9a0ec4b791f1666a4eef4e63bde7a241f49b5cf615888bd8130743bc8a6d502bfc73ab64d1184ead9a611832b7e24483a1a0fc475d9ff6166b86a18a3dc96910ff182cf326456c4461ce8acb3467f801890eaf1ce0b24791da9c650876e718c0bf43c475174f9712dd4a228695e8f8b2b23fc4a06358b4a6a8e1afa87a0280c3e098f218f7a6d6bd716f8c105a7eb799ba0220837fa5a96c8a22a826a6f7ea9d7216a24acbc7b0133210cc17c8190507badb421bc54997ff9340cdc1ee415126ac46a4fec9fee12d40f06300f7e397b228250f36d6f0d2ddad5fe1898ea690e4c7cc3a116a70bfaf6d2dc996753fffae40ba5280b8356b7ab4ffbc914ec74eaa070581fdd1d9e5aa2", + }, + { + "4d81b652fee892d575bd13dad913d976cf0517c819d5183a72eba995b1f27efe743451721ce34791a15a6b7a6e44f13d4a080563dd1d9d4f0946e5ba3863b9ac970a1fb4ed66458ec1b1092ff5fa6c3f0271a2df8e3f2e97851352be760b6a0e1589c202f00791b1b89ae0ae944ced96bd90754bcfa3e355b735132d407d3b5507fd57f705e8a8bd82886b16d459ac91e921dcb8c5bf0d7cf420a9349ee589a5e2e19ce7c944a54ccc1062a0690f3152300d0bf5cd1871c1391bf6d7007f7ce26018ca2a5c6f76287fd8c8e9e7f93b1806460dd35f7f95989a8b6f9a0aeb7c6b0346955fb50b8735e34f1ecb4859e34ea0f022ff6fb797094206a34cf120b7f4664c531c57da513b296f0671c8e9bf68d9e1674998fe52da04f627f516dee97c2b3c988216e9bd3f58c3b021ac70898651f1cfeaef21c4f417ebe92dcad3aaf50f4277262c356584f816a5a5862f2bd720fac10f1b86033371ed603bc00a30cf4da8f579dd5bfdd571a37af7d2a5cef29f9001bb1605ee87f24ec3b259f381a69b771f78d21c4e43bfc83a916e08830d9885c8ae8ab6367c05f92e5eecaf0488262300f83f4e3bff177590857e149216995bc52311fb9f16f4cd74e07c7868a39b699bdbb7d7dace4c6a53ca7ee6e11741a63a52a1d96995a6dd752356dec6f14761ccfe38a6cd8511204f8f0630a747d6e19a77bb030c61e0828436604a28a7acf4a5e49b7269ac93b93b99e9e2e1c0c47b377f7e44e05ec6659526afbdcd5bb172404ce5a9f8786234114c16f20cda6d4359eb873a4a4d9fdf734e9c40aa4db3ea9a98939210f6c62142dd144eb78191116d194bb766ea96da38321ae27fcdcc196560ac75567297984fabe6072c771899906350f74de6d18518eb6898b934b11e945d94ead02b821fd6682602e03e9c70a1ec67eed33874eb24dc83dd1035fba5928f8f62ba1282907aa8935ae72fcb881b3277ee6bebda8fc75d6cd792677c25f70c87b11e094298b2d5f39904be211ff0980e5b83e8ea4a455622d8be9efdb5aa8466c88ea861407d54d98112faa10293af5e16974861dc9f83b45d21b112cc367894c421f5049e49dd205bd7c15e6a70bc810704e2e3a3659800864912527f8be743acdc474a26246a81fc2bdf669b9be7a2a0c986432e1e44b5675607e7e1ee2a8dcb72d8f1964272926e52f909ede0ac8daa32d1d850158db76b959e4d83c9da4e3bb23fd1f5b26463045d6cf13d187fe74a50c09a654d52d0e2f01d66b9f8b4f4aaf4c69fa62a02aa876f9bc4871aacd26a6c6ccfb9bea09cafbd0268b5b65d60aa23ff504d02fad4719698f8b044ca1bb037ea6af58a06a448080dfdbe6a5d698d5db9da5fb4aed04a46c8fa8b93153bca00a5bf8aab64d2b371d072db2ddb688a9442e948f0b99236828dc115a2fddfa2a29e2d4e02ff0173cf734efd4eb687e3f8712be82abe1fac4be0c1eddda090803fbdce41bccfb58c43038991ba1074b281a09bac5eba58a99a1a9678ba26f8f9e3c63ba095f02cd8f3b56aadc5de60477efbf3dcb54b854f651cc72042bf19268554c61b44f2f338a75de56c3c45b3ba40a697f5f21c4557380c777bcc91a151e5676c2a59606200bd476cf98d20b4cdc64bc3b8670810a014871be018bc32fe239e287cfe8a7cbcd1e8b55e08692ccfb4ef871cf797bc0b1fd7ec37931e35b6bc5d32bbe7ae77b9962c179f96436e4a32f566298d2235acf921e38c3f1942fb7674b65e222d17b95a2e58f072c63aa4bba1ce48c303f4bd24d84963f18c5e670015c52342dcdc9c0b348c7dfac721b568effe2bf2f2e816ca3279bbbed823beede8e12fc5bdccd0f1584deb1f6ea1875e9fb350919b675ccde0178bb83a4aa5232bd5e8e9a1b8daf905c6197367a0d106532297ef89f3bc690b48224592c768bd9c50a63d0881370d475081aef052b444744b33fd3fef674a37898fc950f887ed482d2a51ae615ef5b1dfa3a23257e6a6a319a4e2080b2c4094bb09e4b390d1fcbefc4d6c5dab620f8b05b1bd5d976300b007e2b8120ef8a6c9028b7d925c795058c6bdb6711fc5fc2476b9810d1d81bd24637537716edd3b7068b802c531531df710d3682f9865530e1ed51b3b56d860ba4e972bbc74662cdd1e2ea24f81bf469193afc02b14143a32e9556e3f2ecef97c65", + "2538d98b64b6aa9258f9141840a5abef66d6037a10356366a3a294719c10d6c148b04cac66f63ebff052d730f8821f5e5822d869573bcffbdd636c7973433abbf38767597da5186df8ef9df071bc4ecade2633366102313e659db8d8e0f293d379fa2df79f456497", + "a5049b0aa153e282457555bf6f82b60fc81aa6fd1c2ea3db031478ffb74b5b5d", + "350287a6bed5709dfba3d35c", + "849670914f5fe318eb01e8849e536374ec11e813acdbbe6a5e82a506f6aef4f916a3a7fb2e41db3adf990175e21f2386d1805af9bbc32a6ac156b13b1a9505958f68599019c4b7297314229c467114754277b10e9f49a4d12837ef24184629c8902ebe2a23f740dc826b01f8963d47100bf617b314835e436104eb207fa9a1079b8feba06d9369b9aa8222d38d87096b73678bc5db9a1add59394530e678b6ec93a80efc6e8320f2909e3e891306d69b016ade0d30cde64c2c903b401f9d01a29b5cb8619dc68ad6c21900b365a6b657f7d9ca4c145fe598a94eeea741e20a9329996b17aba5d7115c93623f2f5d6927068d0f190b49eb885429d771bbbb3980e9293e4d664a71c3cb629d869dc97e58fc3d328331b11df19a38d61e1705ec4c3d779168abe049e9d675337ff658e00d2d610c8f227d1341d1c41f1c01d8b5d83c4b1b30ae4318da9822f46402ee8cd5cfe9f3f22d90a5ec2d0aaa0baa85e10f5295cc6005c5a0887287b0c867a23da1a4c2196f91fe0bd4f0db1ab324c26fe6088d7583f3cd052b7f6fca38e8b21f98fd07fe78b7657da1f586f1fbd3d2b4079e20f21dccc0d269d53a29deb7c7fb63cc291d1d2c50ff163e08ce612310d3bd622f2416e193078ce4e1463f8a3490578af96ca98e665468281f1af9117a2ed23367df19b570885de9d6594f09aaba4090bdd1079720b08d54311793c97bbe14433b031c865b059cb4f75db74779b82c4f83eb4bd829c62eab995027b548063d7cab7d1a6f9642da6cf7181c0ac71594b97fc2c84b1768f81eb287091f63c76623c61e7ba90c922c74d46b9ae5d8094d9752bc1e8020a82601c356a201e0473d540053c707a88f4baad37826152dd245c4cee6b0019583c61e4327fdf6bdcae53584cdba8a503b835bfb5df9d649705fcc1f09376eec96c3da1e105accc1cbc21d90f527041a9beb85f8cbb1ee8db798838bb45374b741618f83b5d0801a3af2f640abdbe74ec3dc15d6711b4c1480aa8d6084defba82ed221ba359c9744705c4feee0955c27ef468cbb816694516f73fb541e0ad4ccf99ec8b67ef090505d1f7c4c3a8ed7e291c820261f12d92bbc6609da6c275349819848c9112826674f243acb9a29ab73f17c8f8af12c7437c11972c824f00db7ad284e51b9b508a925f0664bb259b4443d56463bffc9e5d845c9b9f79b24c1f457088fadd281f48238866e0b92d6253638eb188bbaa8bf6a81d2b1087904974752697cffb00b4ba05e5b7b842a3d2c0a743e4bd691625788fbe9df14600643b1d161bb2916176b6ee40aee38dbb594ec2735d41369ed3a0c6dd9073f1eb51d1b77eb9a967b53670a8ed755f3b2b73a6cb50a9e1ea7549346646dbe4b801c8aa642779d8761b6c2d2e1a9995e758ab92f07c4eb4a23c042171a4b354f434ced5f6d9ccd26cd6c2506e5023dc076ced15566fdabc7364f4a8594cd6ec404e1a9470f52a83052390e4f7789ade9179b069d9f84ca2c7ac9eea51035db817845aded7405bee90cbe92364c8c7cf8a366cbebd7a972438f2a9881395a8610a2cd0c06c46b60cdae5b1f473f4fd6ec48479cf35101656f05485198a470cd36af22838e7ba3e28863cd8ba7bbba7e3c2625c1106a6be44c9e3d9b9938679b26f0713c62c3757a2dc8b2d9eed5e652220a7711cd220bc91a9afd7c940dd8be71616ebb8b2cb0686dfa161c6ef56994a3cafaec5e79bd0a2531fd1c1a42771acb101a38988bcba51ad85bffcd8c67aebec5b37d526b29f7b9d31388e1e7ad7154f8e65516f0d80a30b88c2b868be2541d19ea1d2bcbadd30e2fbb1b4678bfef7f200e0f8309ac0701000c52ebbcd6fa00cb85c8d3ea9c5aceeb3adcf3773cfb3bfc9ac764d031d7c63ab888e9b03eb9fa74554dab4719d426d0875a508c8c86b22cabfeeb70b0f1461db4e5f639d2a2d28a089dbcc48e3f34394ff1acb887b89f75d3236c8143bb9b06273c3878744340ea1858a9f383f8bbdc259250e23a3c3992bf8b7ca7e1a66913547710402bb538a8866772d11cf4214060ed091d403e1c9ca3af75859259f88656a1cfecfdb49d57c193e60a2223627c681a2fbc7390140aeddc19df035a5207adde4f5736bc542bfdc943ae8b094f4a8701618688fadc2284fb423f602c41ad8ee11e5d9fdfa67fb7dc7d4dce7847d4875b3af667168ebb6082f6911c95", + }, + { + "67f0494a728fbfc84e2f4a043e121ee40f3b12b31616c78e157ed970db28674318b08d8b3f4c538d7b9d91b9b0b09ebfebb07201c6398fdbb8684c9390b3d6a8636333a3b086302b24c2e5d47283935d33065efa3fedd5f755218be5d4618d38c5c1db75470ba06bcd853f3f08d39c3cd9fa3618e70b103c2d9b2101fcaf39c1701436b720d723ed5c622d6535c9a10ec4d727abe237e80fd20911ceb84a90285fc6e07f9d036cfa65995f9b6300a927d7d0d2b907bac9d9c4daa87c2438a583fe85029c886f96ed08f5886bf53292cc0265850a1f4ee3e3288b604dc305d0c28ad35e1242f4ff4ae988b6deba48aabcad2fc6cd7eaab0a63510f3f915c4bb9f9719b1d90db123f639d9d4f3227eafcfad769c2b204dd2555dc54e738909122022c4f92f751d25aef6f9a1187750e825c68450e6d1223c2fe88aa27194b492b6788be6eda80b9b9f053cb77c8d9fa15324f23af5147624fc00c66e947b004bf38b31e1343c7cd341b98abe462a5f994e51d343664968624a2ed0dea9d0299d5c5a7e9097fa63d8b3ed96f917f693654766a9adb01110fa3fe0d8e9b102860d5c049df3fe00ccb2ed62ab05583e6aa0a5134d55245d4f643e274def29d3fc86d79979d599458786a8338b0071f6a01609ee6b2e4bba9289e2df780bb27491890d0b5ea650e62df819b8f98aae99a1b8870ce6d3c7785ca957d5b4094946925751f0fda1d62a9aefe3937a912c1b49b4272f87eea7e397feb84c0702929959e38a568460811e5064b1caf5dee53f920c6e19fb16fc9214b5de1cb770b510533f66d8a0e7f6f04ba8ba41869f8018abee31a6042d3919e217359988eaa9db2a10b3caf7aaba43527484d81304f0bef22165f74e9e1031b545ca3d2f74195984cc237b76ddbec85142a06446902339b1883000264031db85fb19b46f320ef3fe316f750f2d3d6070dec5b66ee8ef20701f20965f5171e44c8a99bcbca7afbbd81e30e74c6d48bc4b0d72baf562da6581fafbe14b6cc597f75e53b305036ede219ec56d0c0d29571a9c110ffeeb747fe56f6030dc26c8d3841b868a1ef56840932dad9f3bd7f75573086571f4d9f0d949510a2577d2f8fbed7e850c73ed4c071bf9a656d09dab43a610b49aeaa57333f67d586d4f50683dceee4942db9549f68eef4c5f8df8a2330857cdf2fc4025f2be7d5f0dcdc74a9cb593de91282787b716d416a3ccb8d6d40fa3c70be4ecfda26a5caf3724fad3d98db16ab6d8f26defc68392923b69664b0c2d56f01a549284b042bbd43c8faec940187f190aec08d06f9a62ab03c9f610f64c0010a0939451d5502511dfd3da1fec5a38f64640c7b6db2961def257eee9a3eff944828e9557deba68bd8e42dc7a9c1570e35537993061fa0f5351fd3cf4ec36386ec4cdc5a2882d5f16703b900c5000efa63888d69982e5ecd3e329c8cf5f003e23ce03c55631246ca15ffcadb0fc9d5634252ccda812ba7bf5e343c44244026512062a68374ed4d8add0855dcc22b30148e0cef0f2886be76bafabadf3ae1205b43c6deb8a41c338114895dd6b49deb329ada31b350e02a1bdad4eb05b61b50f9d22fa2863bd607406f552713e302467ddc78213d584b4933202438d63f99d011b97297f5589f35b7e45ccbd76f02453b7a7668c2b1a1f5d1d63eb805c8881771faaf67433eacfb22f9b6fa58b93f9423a5fcf667aeec39751ae17ad36992556431bca77059a29353598dac12bd3036633d2ccadc18f44123e5bc074f4e5ca380095af062fd83b647015259be929011cfbcdc9bc5d0dcf9b688f0f5d74da95746f447a9e1cb5028ccb2827b45129d04cf6990953a6d8ee0e67fe6bdbd8004f4744cae5607fe7ec4a0f14fe603dcead3367b6870d8e751cf57387d04b881f92cce9772d695f19b36e2db2cf6a807c9ee83225f5c09a11b50e99855921a4eced8e631af7c234aa31615c00ccdd7c6ac5ae8fba6e29cc233765a891864c7d73dae08ed1a3c27cd423d8d4efb550597afee8356c12018f496637daec83575f5e38ed2fdbafabafd38483c239d31cb4d104e93d16eacc6050033a3c86929be4ca8914a538bf540b43d7ce7daaea317bee1ab80504846554879f900d312bf2fbb406a0edc5f4f809cbc68675b0b7f09fd1a8a4d52c0929b3a8b9c1dae4b3d599b976867e6a7e8736450dabf5c49c949544386a71419324ea4ce5c4319899ca510f50d07ace57b013655b0929f79dbf3cd629ad17bdd10109b7c53a4f5f04a16e5471e823c898362df43f57ebdd1627b33fd4cafca6cc065d9140acf0454d5f99be47bc87e0f3b4d4320bbf0f21e7c261bb8d5d615963beeaa46bdbe9b83a8277813ffe6132b23564bef5", + "74dfdc364097c39ef91c01b707a522e28edb1c11529d5050ff820234e6c0295aa00591e09d547e9671804d7825705ab44b76c59d1315ed1297ef477db070d85076693013bdafa92e2ff6a654660008b176cd4e8ae23b9c792be3f7db54cf2bca385bddf50a8624397cca8ee3cb96944164e3cb461e68", + "b3b5ccd7ef49a27d2c6d13c0ae77a37abec2e27e0b2d3530cdbb7f36792a7d2c", + "c0494bb7249f864f69beab46", + "ed8d6e964bcde1df68e7f362243073941fd68ac77929c8e480c89f519f748b3dc337b1af6231632c975167a8425b174b42c2c60dfc0ec85a0a212bf5c9aada818a83f9664c8712d96de1036b5e5d8c8298786b753638de3a8da958549f16eb9c723355cdf7b999aac464ec39df7d6c1607e81b88b63043d1c847dab618f1b19336911b4b0145c2a694e61db71e021282006d48e37f10f3b6314dd012a07618228532c28ca84a936e0eff83723d117b2f2db857d14af5bbd5948a0e53018b31e57cc2a81f36aa013a844990753ccb347fe98fab294cbd252a8b8f7246276275d2780511fd3cb7baa2fd1548184f968c422230f7ad73ae9dde91295f79f6b799e7d234dfd6573fee6d6ae748b0a8cd7ed4862ebd957390826f276c2afb01fbb4b64b61a1bfc138508efd630e77580867bdc1e96a48a694cf0db6c2a11f05dd0bc8769e7200bb0749f5798b6f3559de55d0c281eb5df22b731fbbc109da9c68f209b888e61240c4c0ca006d105c0a7f43144021547d3316e5a99f6c429f9ea2f17d77dc68bc9d5125b6260f79bc8b3b8061972e6757d87b6544f21645c0b4debe5224f7c48142c09f35b8e144c0c1e6521f04c170519ff744d61abd59a56d25a26c5ed5972191b25e78e2140f3ce68fe17be9e59a79f6c69619a79b83614c670c7736d19c27fd22515fb5b896a6418cc0b4850e85c07b38b995cffafd9f69763cbbcfa9d1bbea6868244a66a5cc82e815fae09f5775d28437634926d571c2b0d200855e09cbdc67d10f85bd4cc334ded4c83aeea57f8e373a950f135997666b653e8de47a3bc0059525720045996bff500a47baeec97808fe971d7693dfde339e8beca3598fbc053121536c30d0af10f8f5d8e5eeaaaa9586d7abb563fd69e88351f93bcc46520f6d97c1a49ba9f8f6a25cdcfc11b2a722910aabe7435ac8f0dcda9f824fdde80850f21a2d4bcbfd2e9fcbd14dec05c117a9796db49e2f0dc55e74c7f0f615bd049fa7d0bfcf197dcda3ef3de90762e6f6f9f8a8936bd04fcf2a97cf18ecc8f2f118ffbf02b67f252097e4289d02f264161f6f90f79e1e1ef8414b01a9e1a77b88c039ad6eda6df1e28fcfe9370f0d574aa9e857dcebb19eb7ce8af9b19b4481c9fb3e1f0db3b02af483f737ce3ea824b2165e7c0fca8585383d4b0a16eab2c7e3ee5c038f939a97bc8e1c093cc5372ee45d81836c988f3ab3e6ee0e5f9549e4b7bc381a2afac2074cf75ed56b0e757e7966cb253d549fb0902da98294c6dd4de3c2e166b7e45098d2729b1393deb68471d4d3218dea3dfd0183b654ae4092a79357945eea4b28cfd06b40d30d1b4b8f19827895f6f908f0fe511f74ec84cbab2483ca4bdfc6ef50178eabad79b18b58529c9328c13c52c2869858cc20ec36ef7717e1c743d13f9607bbdb0b701d9df6aca7366814e883d23e51ee5b0f20ef70e2c4134ab037d213315fddc89009260981329a1872e541767adbd5ee9501e7df4ef0cdfae9769961f8716ee7dfbab0ec89b3f62e987387d5842e124a69b07245d359052ada50cfd67472d27ce2c4eacb5421b62dd7331da54ebf0989803797f4c8c781d0e2e6477b421c7d5cefc8146aacc0012af3f1f7cd71ce2b1045d86bf48c9a13fe469a1865294e160b4975023d0eb24ed26837afefc250a914f86f8b1f5d67d65e9737e841519148d4dd5dbf2b5a8b073861288ec9793d4b113d71c01727f67d791852fc3946dc912d60fc66bffccf4c45d859eed9f0bfc7f89086df5d5cd830ac919aa7cdb4504018052d67f6a3ca012ed69187cd5fbe91875cfade381bff1e804ba59cd59f0f75cb46dcfba234ab9832c3fb9aa8dde19fc1fb30677ac1793a38d94aefd9ffcd4e777e9e4f6d49e0cdac6c16a36bc2f3ed8e23b80350e3be6d866aaafbc8cbf7c69fe44c2aa80651164803150c23ebe262aa669c77ca94d215895d2ee9c3e325a0bf2c61e419a41e0f7b1ba8ee0508307d49301abccd5b74c054b6c7bd1aa67cffeafee033761d8226d9dbd7214b130a867764062cf4da685deefa23693b8549d5ef5e53df85c19bfb3c43c6bd073e7a836f849587a4747e1a9a3c7194f6d5472d2e3e4c81784a3061fc9bd3b94862c4784974d859134369486f2651f1db94f511c6f59f41da0d75307191602730b88e4e6101fc8d392c87687f3be454dd92fb8ec380715bcd88aadb63717cbce4db91a36821a572c363759d8d0a2ab007e5981b78731dfdea20d900b14f0c5ee6a4a9b532ed2134e6edb4dc267f001cb88dbe43aac4aad453b839d035697df7de98ca7a9ee7601228a79004b89796e9ab971aeb8e62c789bb21f38b77b492c57db402bf6a42ad0cee169e9251d865ea3e5f79b1801ef1e53797aa6c7060d6f9486081", + }, + { + "04cf92a64cbe135f7fc1d7223b95e41d13f04b482018039f4e7ccacba8aa15ac79a752c5666524e527fb076290ec80a3dccbebfce3ee9b316a65fd130f12bf88b9124d1f7772049e6d0c01fef881a1d44c8dd02f7b6b60e6d15df9e06fb86929cab64842284de09659e19451623525aec2f5dd3e603e24319b1d120bd57b34a0317ce25ac9c2f022a4847306b998b57c8d92baeed0de1f6cfb3177d0acab70de275238f1152813b9ac87bf651f74e1ad079b9bd779ba4374ecba459865b5768d08ae7e1dd691d6821895e8380ac9e5116580e8de3a2c5326e698bf4c4d35d955e45772bae8483d01de2539e8ee1ef9539ee132d80d85fff41dbe406af319c0d7703292587bcf5959f49241e2b03a364e1b682729ed261d0ae45d74d77634afe667413ee210983b042a7ce6dbb61c29d18450fa7176177b5a74f032ea24e1d08b220f6d32a7a836d1241cacda39d6acbd26a62f9dbeaaf7329a291dbf0aed4a2cfcb85ea360947585b1215feaf70ba71eb2d6bb7081b2a21bdcbfdae6ad2513a9dd714d3d06c2c2b7e322a1db2d48f9df1fb44fa066f2bb42b196295ebb3c0898ad55d5b317986afaba0bd5e754cec773821613e908ce2bba6454181f9020b73e758df18c255c87df675cc6bb2b8d2eada44196ac10c26674167f94a79f4be515d8d6a1fd3228dc9a85a355b030845dd4c5f481d5b6e74acc66de730629581b022fbcff61e5dcfb6a7f511aafd577849a6b057021ecbaee53986159c1ba74c3e930c34a159f467f1e9799cd6c1151067c56769e43308c96c8edef8aa7634d909310dba9af2128cdb8c29b24d3ec2a4f43a1ed86d1791c9a670b240e6e719f01827aaa319bd3ff53959a776886a1b7c942a54f141e6bae8576d294e44333e6c5ad90f74863f69bf890126016b318e0f6bd2f0adb9bb861118af5f6cd28dc93d56c8a1dd080b8c810ca29267d410673fe367dd9d1353ae2bf2fd88d57b4202c21aa49f12a01b93acbe260492367bc219d3afb6e6f35502f6529bcbcdddce9fe8632efb034a9eaff8b4a48afb105d04e3fcbbcae010ddd6636992213750b12fb3e01ab72aa957136e0bae591bfb5e0fe819cac82a98ae8df230af399160594540640c6b1d537e7b5f1cc47b08127ae02c35b846de56c4c08773fa18d4436e14b76a7fc4bdee301d0af4880306f2f33328ab79f6f24ec779b2b1928704f09bbc5b0b7108e9a115e4959df79c80eacfb98649a0788867e23b2974b22e654ddab0494bc922ecdf17727d0f0efde9dea7601857d890bfbacbd93f7df794bbc254f50e1e17eaed2f5d5a2e6c58083aff68434730d406fb9fd02b0dd7bfb99a04aea812b6830fe5e05a044ca21c77a174bae8b58eefa11ecfcc1c977bc6218064c9931b5c92f13cfd05799f11e130869c293c1b08dd29c899365014fc8195514b286c97cb6dc4b8633e47751f87fbaba137b6aa04d072ae06c2b2f34448449f60b1272c1efbd4722a2be749a3d2e5450aabef1f7c51bd8324607668a8caf8097c2f358b1b09fd3525d47ec9a7640eb20ffdc17c4f7eff63df75dc7830c471ace3a727feb11533d6e9a2a08106af33069cf482ec63724032e81cab18e12cb5c4c3ddc374e2f75bcc99fc5da09b80a738852a14e8ac552b8471c6ad52e35317b730db2c13c277e06c643e0d0fbea43833de4d2c7a9247ff040e9c56f1ff7ea92049c5341c4d1478a14275a10119d934e8165152b89951bca7ee1399dd8232fdcbf831d8354640e698b68799d060ceb877201b2fb96cec514affeb28721e163e1648164b9e5722271db9b0ee1a7f96819fa1b1590e9daa598d9571ffa3882db9d034056e9b2785a8d13686eba61d7d45cf2e9ecdbc391739ce89297211472be18b21401658c5bf29fc3615924382d802a166d05dafe7876e70a0d081e80c63632da379766928a0555eb5e7a238cfa4da267527c66caf34dd40055f2801b29b3f5604a5bf3d46048bfbec2e24abd2fed2481698a4b5cd71f5d2c12dd473b903c9bdb978eaff7d76fb69951005681ed7b0257054eb3dd6d10097fee51ba7e8d565925e4091cbb78d255c9d3ab4ac0264d172c9bcb0908db1288c9634248f198a1167daa323822058decd83936985f83b08b1e7b942756a7af200af168fb8a091107b4443fd649cdc22106f9b9657c69f19be485c23b2c715b3762c332eccc44f380883357d10019f20612ab6b8f155c2af9e2ec340e5d8f45bf5278ac1fbc9f9f44d2f615d21007d822b244b1c7a0dbc182c7f5912485d6e4d74e90f60a2f964e028c63d49c6aadbf1df170e4914ca514139ba538207b1cf7caaceed4db8423dd1086b2adf15f6c0e50dcf2e12898f53c339a745316904ae03c38b417bcd7f5cd5ea77a4f06e65d56c24f37ebe72d271ac79b6ddd2bb8bd67f0727ead49737aa71af4f620da53769ca3ae878adbaea5a249128074ca3ddbbbaf5a68f9cde2a0e8d69708b0ea7f4c8d2dd4180882bdaacccf2a409a681c551776bd10439fb12b7548342532b371c0e045d8e8c895929464bdd4fe25f0533c66104daaaffed52446094978bcbb389c", + "001084c8a5175c0ad43108f9215c35886c82321c800740c6118a3fcd45776a4588ee0e2d056a37d99b40d2bd9c0845088a77240b7e65d18fef105d3488c77910b5873dbbf275948db5", + "d614d2e671066e387f7eee07bca9f9313d86e6a4d1371360900d670e11ecf1e7", + "674a516f8f59f9067e0485e3", + "1ee376e9e3c89b2147bcf75480ff0dec1d0e8cd45ba812f34c84124871d484b4ca87bfc8cf99f85ad452c482933801426e2737a97468809fa36caebebe8eed07a626b3bc3614ef1ceb54f9221ecb16f413f0bd9ed4b3010c40632f05223484af7bf5948c2fb8a3d2ce04c53e3f2682494f3969a0f8eb738cf93c0141799c9e6b68924433f0326991e19626bb19e6fbb5dd46baf39f92e830f9b1ff465a007f031891fb1f1799cc122d3ae7a55624356b5297bd5d948d9ff2e414cd8adf00a53524df43f398938d33c93b2c06bcde2679566c0a7b0177b4a873f35874739d550712d5cfe3d25c19292ba97c01d84224738bb25546e5c252fe5e5f260ca881aaf176a271a6fca2edbb2cf23ae6d4c56c20daadadb8205c2e33881867cd67ae6e59132edccc3601f014b744ff8eb6aef5e09b358607695d3af42ab8fa30e9fdf99ce54427ba9da3699de19f7a8f9be368df47ff0607601a91e7a5fa6e72be50bb32b825427cdeda3972a18a23af290986cde14f5fb9cbddad336f5efcd2d7a0cf3d5b23e54b702352fd5ee52d7e3479441497d56e17d5868574c56cfc421ee47bb00e9c75b84262a1b9e2cbfcccfed9c4c386ef0d2c1be9a7b7556909b5d72a38b7258acdd624de2396c75386e077c34f005f92a2203c82d1072c8998f03b1df22de832ac733977705453b1d72336b8d371cf1ed3923f462ecd22075de5df68c83ab1e6648ede7fd5ee5794a744abcb32af73bcb182cf97d36f37c15535c4107b7c8f2321f9fe0e2b6ccbe74204df3d748c05bc1e0e2c55ae1aee2d4aa4a52e98ca7229d6d06576196ac8e4b14a9ce807075cdc876aaf904c9962741efa8c6caf41e6b87b2ecd6636e2e58f3ecf576e5d8b895162545e618960ff6e336ff17eacd5a1eb335001633fa78c41ed05466d904ef9b81b643a043298c0e291a085e4e67da72e329adfccc407f800709865147db49cbdf4232073b7bc7ad89b3dd901d927ee08ae6497e0f2f9d052ca8d7444d2e2ae2197f930a7b1c8af38d8739ad298464169823684612cb628c484f710cf9c552551b6837b575a43275100bf800b7a3d777adc44d07f67cee5000422b9049dcfbedfccded0f2aa4d189621579b01e3fdaedc4d772dcc593316ca85e7aa248d219dac21c561d318a4936ac0d3bd5c75311486c174e0e2182affdf69bdd6a086534e4a602efba2b9363beeb5346539b45336cbaf479da6b15b226a9ac026482216dedb84ae3443b306820d9f05f78dca7090d727c7481d82c6e5df80e189e24e46f5758e453e542bd91a58eb51a89e07c50afb543c6b998704432e863dc4c0d0236e0672835a7b0b64e14f5ced2904e54da4287597f920bb4d542c35d3b0271cf0eec055656d523d7d2cbd667445d3e8634854f8616b7d7a7f3e14fd32651e9df40e1daedfdff1371f16d5549ed5646adf2d417e4b3a4d145bbe0974ab388c2716861a08296b862e4fd035163281457877eff89dadb160eb2b780414435784804bf4fd36602699d8c2f6a8cbcb509198c38e2df2edaae7bd7c93313ca98a9c2d24419a12ce35b0b3d68c18840e3ff8739d70969927c7db9a6569787bdedf5c99948a9e79b2302a83a71159f4c789b3b3f05f1e574f8a24c899ae3457f8e73f9bd86976fbddd83b1af337eb8da4c0dbac3792921597e18a2fd3a0ac89a270794529d370d36bb6dc7452e754e903781cbf57c8646b92d5d02842e7df229b3d721f9b981f9d61a48f00e53948a5dbc4f739849609d94aba3e3f5f8163d40321576cb8eb8e89953b608a01184d41aafc13f40c47b12240e3ad49413473c26b6843f4514be221c2af632d1a54cba230457f23f00b2608485c381ae03b389ad0a1671fb416de4659cc7f7a9c4b6d9807789c307d061fcf613b96a2d79e5e3e20b863c8b1b75f35c982b40ac8dcb7d2712ef7df94901facef783e8015a9a48574aa6f0cfb0bf6c1a3409028f8d62137c347f5a35ad6a3cd60d71aeb29bae56bb4590f69226fb4e08fab7a9f41e58f4d5784540a70e7a97720c549c8440b089eabd0eb3e4d37a2e54b1160572ce568f4256dd244decec31fec555017ebf488e878945383750eff26a8a1cca73e7d6f52d8cb229d5603360a3bffec23029ee34145c4aade82d486758e0aea9e1b7bf0b4bfbd4fcc96aab66a27fb463b48c6a6c5c5a60253e2fbc5716ef55629277a5f3b89c300e21bf1226241ce0d587fe3f5b11e47f35614169dcfaa375ee1aa589be33a4363765368f5666d155cf72e851d426fa67b982aac4dbbc29356d71deb0715b34e00b9fd8876bbb09ca0701b15615f05cc45e128b3864b26003e6ffe801c4e27402f37b8997e0c29ebc273dc03358cd22fdb68d9cd3b56ff8248a727c2d4ac65acda4d0e0f511bc07ab06cefcf444f1002c151b953d7f7b19695668a86683497c2a2d2e69f19a4997148d2e8d158da859c8f44437d9ce9db92f84a88e89cbffc74c0ef4295088e2543a4f7c6ae9c908bd987bcfd7a074f83ffaf3888bd7f430dc5a5bb70d223c21b1bcd8bff2103408460df864dcc168486f6a66d67ded366c6e10f50bcddada93627cda711764a57ec36035ebc", + }, + { + "ce72c93caa49bb9850774149a87fcf8e23a0c53701554468645554553d54190bc6e247712b02097b794bc421ca94afed34742435ca689d2ebef183fb469c060c7f4d7daa508726c9d2eaeb9c7e9a89b30faee8d9168607d4778acfbd27d5caa623475073ce763ca061273cdfc2c692d1747baa8a01b15f783b2e36620400082747599a16cfd6b630fef310c0b9a2912d1d3bb71eec16972745cd8a49cd927014eb0a2abbe0e1ebded4fb9e8d9e2fbabb6a71da5688717ecd3e08160b9a861f86904a41702b2c4fff28ed8cc61d468187b75bde3fcc5c0c0a642215fea83584387fc5a9aaf2f8a91ae535e0027b618a32bd687289c47e9428a1a92649deab825d702b076223b07c08e55c0b60be95937bfd0504c18398e924420f6e20baf07e2b1b858d3e360a461b66517c24e60f9fe314a4a4973c8dbc7e9d2a9f571a1d8235a21073d81ab9f4800b70a5f17f44d593e8792a2507e6a3a41042fb2a5f7e5f028ed2daa88cce28973ecd88bd125d50fad77b1fde61c38272057d9c65fbfc6789ce41315a105af14e277a0c39d75c34aed7538c39160eab1c8c47818743e8111229426c399c5e88c4d894fdaff0315ec885ea019bf9acb785f3380c37201d494a60b583fc130bc0eb9fbe9b90eff95874e35910dc05c761f8006e2f208b786aeb2eeee841f9a82d9966c82956c181caa4dada81dfa2e2d7a25007c2dc7f2dc7ad1bafef14581cadbee4d614a557df4931b9ca105bade8fdfdefc0d96eeda11c08500b1ca827ca670ba07bb0f85af92914c43a6f71226d6e112d487f1ae99b2239a63ee2cd0849d8a9c488a11f82ca334604a2b7260f25373c6db75656527890f9b772c6bfbb9f687f27099ea9d4d1efd874a6ff83cc36c039ed1690408f20394692ff054d9e6eccc6776b6f4b3c5f24b0052334d159f40b470a9b8799bbc0df4dbfe59a5e536624cad193160ef23abef85df2c9b6e6d4fdf16f848a2a446a77044f1162a278866c491982570cbc16041908cdd0efa2cde011526a3c96d4b39a23c5fcc53d8232869cb4dea871f4ac8afc795aeb1b28cb2d7a3669100a1cab2ee1a7f31e2a25a5c6da836e4b771ad57393305faf582adcd26045e26b618d9943358c615fb206258c8993d700adac7440dcd3ef34fdcb065e10e9c9727662b5abee160aa01d2f2ca6c203a76fb01bb08cee9fc1eb6bc7497bb012ed2774a2d263b9dd03d60c307ccf33233ee33eee702c8e3118f9f86174a97462d0e804a24bbd7f4f938c7f105bb23399967288069e1637b60f2f1883d88ce5a874ea4bc0a7ca0f3b568e4bb1407e4bd6f0d3dc8fe91345f8435d7b1be961c45e4b0f1ef2d92d2d30bb78e1fbf72cd2e7ffae76e8c2bce005195c2003bde46108f37ffacdac28fd67a0de62970b347f0ae3f5f3a5b1d3aacb2fcaceecaf2ff4a2aeef6f5a176cc1b74b234f5658ce603bc353e075278a4056540e43033d37a6eb2615453d8206f5cd294423811283bcd5d79c4afe268a547b98977ed5cf24c0f53a0533bc0b2889356cacb67e2f7353060f9e04362859b1c1f02f96bf5457b58e5ce84a6810d39d7c7f53faaec64db5d6ebb90c1412bdd503ec6bc240c277ce1f5f18876feb24eb6a77e5193e33ce141e8720329add079dc9735f0a35d7d85436f1dba6dcff9147777760b5aa2ec9c8b5e9fb4fc602ec8f754c99ab2372ff5963dbff3fda91865108e606b214cf7acab875197e78060eed52a798751998ce7c73cebc4d5f429f6729a5193d7593072d0921ac8127ba6e796107ee7b9fbcf7128ab35fe9f6fe501fa4695c19fd64460685f287acacf5250efc13899bcf80ad5a340d432a0b9449affda5c8fa090f008e01873aae7d5fbc7972451542c5c29cf9cfdf23db736c8a7112536b1b626caa63f3e4117044cdeab612fff8d8c194d19174f56ce761f6587349c48fab30390f231d209461ee7e18007d10d83ea5aacf199f3b00003259747b1d03274d3c3670595604bb4482d345ffe31d3e88c70da16649a2677bfbdbf618de1d651a53d573aada2eee5c01335ce5519a6d18a70f7ff0b1e66bacc162c49f7f29b9d3fe2c7dd85b6b355c9f9141f02baf08d2be87c36f6d2e1b2e90dfcd100886e306b360df0ecb146a6aa5ac5ad05b63a219ea65885894a386248254348ada17908d776f9b438306ad28b208f80d6b9b265500aead945134b9d388ed5d6205edf07c5d8bbfe0916d0943750150e09c76359d24e3317517ea489fd8a501dd93f159f07d19d00e86d952fbdba2db771910143df346b30a30fba908a1abe5349c3f241958f428dece7ad9a91cb42035c43573b87b26c2ab216cb4c21799f6b3d81acd300ff50edd6fe7868b9ba6c160db3418565ada027b46b63e5d4f3411284fde585ed3673b424ec1cdea678e4a43c262991c3c9b988351d6e0a10af1c959cf21b7a288f2e4d7b3b2c11b400b5e036df71fa993b72ce48d0d8598fe4ef1ce70a970f89b55cf4f07906a479bc84a08bf6ab25221de37afebbc47ea0b38b87be128737d7d43cc84d336cc6ffe1677bd802910a2084751f30398dd0ed09589b2befd2f3b40fbc013318c822fa2faec2323fcc52b43161f47aefc557e92df3050dc5f8b1c5a4b2f8bd7b2ba7aaca79dcfa362fbe7781a2e261683a4a862d5f83e34845a8fcf8a1aa73cd521e87cbeb71f20b20698cc34bee3b8628b1a3784596c", + "08b3fbd73d157e79ea9f61665d19867dcb8c1598c1d37b793606936d8aecd992a0d46addeae857d488b83be5d1c1639d4d78350e4cb08782b61bef4107c9d3a79d3d85", + "a56f38b4bbe83b3d9e562cdf5ef5b30593f08a1a166676c4fb0d5861654e640b", + "8726dc5cae9497936658f603", + "88420357d1ad70e7c7bfd55b3cfd4bf06cd4e9b4ed5cba681045199a06985956d35fe86b28b9a4599964930d05d230a23c55a6a152f67082a453fc31f68489df05c553f9ae5cdb3f611445db384d79af865e52440a876fc4153d896b7a2318dbc2a4495ecdbb2e9dc68022326d35289e82aa55197aedc266dd91ba3018c7b474ba22b4e773773f3e9890ea84bc16a6b235e4bb69e785c40c1adc15b0e0ef03aa147b0d14e62341e27398b84a53f72c9199cc1c94cbcad2bd31aa69c96b06d01775b8c0f80278a43f526664bdd430164863c9c9140ad87798a5b8f38dfe90d37f54d1137709d5311136b728e6c799da244294daa4c8b44bfb0acc603a16c088a081129a0d2cff55ce1c4ccb486fa0ecc3098ef2196f47c49f9d253112bd5746fd99df5d2be577617dc2519c0ad04ee49ee1d7be3d50492017108fffc9a414ea227af39fe49fb2c895fcf00d927bf4a2d78c466fd44df4768e6775d39fa5c834b60979ca27ee9f00faf37a090838f56275a894ddadd265a8d2de74265e4d8d286639ce8f01eccd4f551cf6b4429eae3f08902b6ce6ef422cf91ce8946d9403fe8064784895b62a7f5df76ea294132c59da6b9f53d4195c1e9000bec499c14cf8bad460aebb024a76ac50616f0dcda71c0f56dd3239b11764f3ed6ed06c049b2ad673e4beea391dbb854fde1f01b1900858b9809259f3906b34f95a1c6ce8d24fdf0cf7c2ab7bde2202a7f1482baa6e51caaccef9f541c377da620bfbc63955cae0e6644ec8ed6878f704f1dea30d6b50d4291892bad19b0234582d50c6cc0b4165322cff24a9dc2ce1be35be0fdb3bb7abb777ff0b2f4cf16277388af5a89220d59f1f45ee9cc2a0fd7af9aa8e9e8d548fd65be4e47e7f8ef58f7701f93a42e7ff78f70e807fb63513157fcba96ad9731b2e8f80da85ef407d5c368ad16f0657620bfc122ba1b10d7ac2bf46d8133a9c6fec1fe04882f3d5765da8f825e1984a4313f72b67d806ed45c000dd3ddedd524d474b9b5788547d0712e8edb4c6c586d0cdf8f2384f1e093a7f6dffea6e79df9cb9398f5d0b9a7cbd63d489430fbfa397a0d03ef916b7702f33a54ebab84a7055b7ec6179b0ab7722f03e126ed343b1cdf2af3763df7e3a070162535514b01ad86c6cb051859aba1cc4766b12c8cd57b73fdd3c65af6961c45395aa7b885dd59e115db885f644e1c94bfa26b3804f767601c86e2c7dcecd4daa59955e6a40991a4b4701e63fc82b46dc0ccf59af40a8583171375551c868436ede535705f2e6380c5899cddfcaf9e94314794bab98846cd5ba9e9afbdbe1ea7fec5e22e7b2aae59fa598f4d6c0cc6f936a616e11bf01a2acc891cbfa2bc53c511a8a3a3da2e3aa5907d123ab2a4a3c0009fdb5235a3c33718fe4c504e1539abac6370e06150c402b5fc2f8c32608db4ce2eca9d1e4b96371ee195f6cd632f5b972385f9d5d357b87c78cb4e2c27aa9851534de14de923543f5fd9d55e34d6e8b7e1f3f2735df80046de01f79d0321066f9bbd76299c7386d285f7bf4ac15e033e89a040710c90f87aacc09fb8159f93c8b4860247eef079e32d05707e88aac734a2eadaa853f528d9986e0af3435b5c5f44ddfdab9b0c9ab3eea97676e920f80d1794740067f9b229fb018c804e595aa997533a5e967cb79ee58eea18995a90ac08333f1c69600b17ef4f454f540dbfa8b502457761bc4daa876d9053ae1f55001b6916ce559dc6268d01841255990e56614e6f4ee4ce04472dff0657360d75da4e83a71c852a2585110e53137e91bd89d64d99b5614ab2a5691c876f15d9931b092fc6729c0732db5cc40f966fe440ff99d7d05b24a872f552c27fb0cf2af443340b153214b407fb9ca3750d9c157aa75763b0b7600959663889d00f392d6ebc12835bd2f03ad802a21d0228f1d2e9731d0f0051eb2d5369ab790d1134c38e28d2bc2d5d57d6d897244742c176559961a1e40c84ee5c8225c8d72b92352a011e3785c262aac115cafccc2fe1b5e81a677a0220f207ebadd786b93f58e40eb6ade68ddda5b66c5f0f6b4b95cdb8241156110ba3303beb79acbd54423315768bb43b4fe8c4a465e50c4e63bce272c4d731ea4c797e14b2de31ce4264e2479179b906f67af4a23c56e817abafedc2c7a65aa45f0c89fcd0baba60561a8d013e2d5e0bdf9fbcc1346d3edb20e6e9f9c410982e1ac43039ad8fd0ebd453a6788376951fc20374b59946a6803498929d9fdf2e0f5e58c441329a79d1232e957b3a9ed17231c663b4819dcb6b4e33d205edaeb7d7ec466930bd84a064b40aa67fd76f6ca005408062b45b5aed6f8161836c7160a8c8313dc9aa1c6d42c2c16972a1065e41aea9c58db7916e1670cb42a8b54d85498561b4401761506860b19b446655f8988101fb4c45067e30edc3f00df8d88ee34111dd6626d605d993ff207be09704fd8dc242ce514bae77cecd20f10d4a38435a3f5e545882fdc224586a04ca6a162e118d23716240fa67892b78faf98a17916471f7f121fb9f85497a0b34bf5aaa4ee1ed8a4681bec55d1b4973d4368600115bea70f20a37c9e942b87f6cd1e2ab70fd401e703e3c8334c75fc338508e06d6370779578fbe737a75954b4701bfd92028ec32d3d7ae606caaf9f049d9774f70efa707c1c1174d9fcb5b0a0ae2a961c6f58e48ba82c2db14ebbbdc24288e42879f547b855c86dea9a3b9877e4b105515bd78cc43465", + }, + { + "bf7884fab52251e202afd7b5b46aa53f85bca5fb80a009d8016e276579e401385d853312a884f4aa33cc5fe7360426bbc0ccb7416cc0196e2e40d3a825d5e0825a1394029789acca550bb28b10d847d0a4fe1111be2b7fec6b5294902775128288a784203031ea853c9c104c75571d19552e2a1359a900c5fc9455230968a5920f2ab23f5b9cc49739d4e4ae2c01c7812ff295899b954e9729a3bb330b60c51a8a7759e5131d7d4cf261fa1e62c29f91b4341a4fc968e7f30ca2261702eb328d628b7275a9efc29b50bcb9b27e5844328d5e8256c76949d30b6fea0d5a1c9abca80d3251fcf4ec4db0a5ff2ffd43618aa2e3e1694c2a3c579a2665f443ffb1eb0ce33c09d7285687cd55b6ca9918553bfb36a44860e09ffa0604ef4904a034108370195a986fe165c598305eb08599abbb3df31b1d93162397056d9ba5a1ac2812c582aa356310fafb4058abc5f157802e4a9b4bddb16e75b6db105b7dbc838f820539b76949b1648909104efa67ce28b16a738f1be104d2bd142d3ad1b1c953b6020a1f4cbb84d5c49424befbf2e6ac5c593b783a3f4af75477312528fa78dffd82fe493d821e011642bf1135a5be91fef909383953308dcb61b2f35c2ad259acd1a2e953c0ea6a03a97b384e39c94c33d3846c26b4f9f116abe572d5b7cb81886d6adc2d544630fdc1684bfb32972e051b9a2bd0931de63e025813b923944290fe1ebd5264ee4f25569a2088314e8d4ce8b91c7bd602b9d85acc917d60d30d5ef1cbb055b9ff7b0f999b98caea2517d2de334eb436078c90d41e0e34f11b93e3e643389f43b3afdc4f47a7396cbe0b4bf159ff27618cb835aac6699be1fc7ec840b767836a165fb95d06f2cac4fe15b65714ddb8a095ed4a5b57e63d536405931b6c168683763fe07c32aa4130bff787d4d440746a2dbfc584a502d809076b257482abf7f8ead7741c82b54c41acd41581148aeb4149b0c6eeb39ef7ba091c2e8bc72583b2fdf8ce7fad1bc05aefd6db0360c644a9760a9729a88ee4b2ab123d7238c12435b9f3b4660e74c0fd4a9b00aa614453d84fea01f779e5a924f8e79630a8bb6561ae19c7bc8d88b9d823b98285fdd65d4cc05e443944ed5d3cd4f46c7cafd1dd5deaa519772dd24f508bd2d588a832d5689119a2d506ff11dbf37d57a24e35ff38da18af07eaff5775d12dfe795fd3e1f0ec83c5f283d6cd76532519a15a18d93431893b1b88929159bf8fd21f62b30f4e37d540baab0e30ff3349a08d627ac19303fcae8b8e3fe44eceb66d30697c7ea051bf5afdcd8bfc00d49c8d36164ec9194a78a4d8b78826863e93b6a810354861f4a35ec12e5ac102f74e390d9c0227e67acbbe3254e5b892786e3a88a383ea9726485854a319569a678fa70392cee90c9aa83eee8df6800565bb8e083e78a064c0f8b863120efd799ea57d3073663c0d0e7bfb9b717ca1d6372fdf75a77fd9677791cb899fc8033d6d806de1e6aaeef525ea909666316d9d604c1207cbeb6f427c3acc1b02cf59704fc65135703f2a9529bb2c8fec992c4de53e54b029c3f2a5fdbec1008d1a70dce0c11251003ce62af712b9e4abe631902485404e4933f346f1b4467fceb65baf776d0078aae6a2a1f95b85a441b635663c75b485a8a7cb9a5c12192ac874d940e2d9b88cc05a2db9b5b35df769925da508112ab0b8f64a1408633fd0d81810baf2c846b222736bd826c8cf905b2c35633d6013f5565e0a5ec1492e99613f53530799052a0d70023339d1c394fdf9f73a590a2faf68390d2a823bc3e47a173782b03dacbdadaef1e67fb47a7cad71b6067ce5b5e41fc20ea1fed28578e9bdfa99faa657a754488ed3fc084faa7a05b0f6eb66da0a28e9ab26bb319fa4ee993de840948f94dc1d68d926b783a0bd3396a89970b2c2595de8148e87b87c21f664618af4f567115d403715c3d7d2f66d7a90de2c5237893a4c18c20494e3faf94485ed39ecfe972c36acef0d7ee57bf8755924c790ad02dcc5c4e15aa7db53eb6040244c3ebb7874676782e54dfdddc256018ae6af8cc37450a4cef77f21e2e061062ca0c2a514290c960f5993ec1ce9eea6d09d3293118237e079b6015b966361c3032368174d74ae5cce4148ea2b3690fbd3c28ee544c5c5bd7bc618122979d52c9d3d44eab1f2467f338e695ec5f95998bbe77dffac42bc2809d43a324e0f5feb4ca3d5fd951b7dc8a9e6276ee080079b68849b14c7573cd02c76027a856165d1043acf99554c62fe32896d120974ae71f84986bfa0c28fcc399246bef3ab90f8e55f913aabf339dd7ca6f0861a9ef712e77dd28740615479f39a37e746c7df2b267066d1649fafe0459f665f3d5e7124db43ab1ba5ff94989acc7fe0935e0bbacf718b33103a1355d97ab416d8263ab369e6cf0ee563a77f2f265fc3856b7d54dc0887ed439a421c14f733ec1d6da086536f9539d23cb8026218c5e783423b5f4ac24c8d5d8faa7186dd5ea34afe299e6dbed73ffa8f415da706442a48808a9342d6209f65ca11eba76f8ef26db890da76671971f65bce9e6112c8aa92523dd5295d748e28857acff408c161c0513b37b855a8afb0764d118815bb1b68f8f09156641f7eea994ddea20f4062607b9919d041c880b71592402a4d5b92464b239caf431a99dc67787e76b8e1d7337af004bcb88473cd16b3f7640e8aaa59ad4609f060a2cdc71a4b3ed22c1506a7050a63bd8ed68aa58a8109980bb3f2b9f9fba9599d7620b8c25e8aee739095789af83529cfbfce5941d7f14c8ae30583deafdc7c25fc34e75bbed6ce4f6b47e9647c12333ce08c7db77dc94161cfc43f7ea0bba39def8bf8ae61c6fdcc0de6308af963c6d9ef43916d0cd2cedb970d5937c9fdd4b888cc66e99363b5a04ae62a4349f01c3933ada64080741b1781754e4a722303faef69e382cd9823177e4d5ac83e76017124a7b1a41bcdbb9c4209e7b42c", + "eaae1c53919e2029c137a80f91704d0871be2c1870d0333d8bcf7f94", + "4c434cddb399e52457113cc7e16f046c3f8301f5b6c296979f8a091a9ea557ea", + "b633c1a0e1ddf4727b149b3d", + "f1de487001a580cee6edadb1ef6b700c861a70c6ef16274447b8c61bb10d2d1efbf104d5f7d7172c6a5cf9c06d886165a2919ee9418e2e8f803d47832dae5ef232ee300d1f973a6298c22d777a1b16264353cc731a7a683cfe31e0abc704460788c555c0c24f281b81d7761235a955c736f17f213a896b40a034609ca8456ec3cf5906d01121b7580ce19d89347b6a59c81add318df487b2442a7a8b5e30df78467abbf46bcd5ee5b994a39ca5bd8846caba6f02f4f1335b73d4e20be0b6ad85966f86d1bb857713ebf947ae936782f1f4929498bbd66bdd5ad6fa252364a5a6b46180e93b54cc321b3cf63cf23d55392475c6b8c8c9dc707924b55544151c7c55ae0bf391f793e52bed70829fcd32b2926600f65be0943d6a9a96547675426b0dca9cc7b0f5dbc9d5439d0281014c6c159d055d6bd89d67828ba7fd2a0570ba82996037f7dcce297fe6518331270f6fd5ee63d406cc5081472bc5f2298a9208dba9398ccf807ce9af982885897715b3c5742456f756d79c70434a9baf7b4b6664c9d9f5696c5256b74099e593f97a2d4a469cb3430d0c3eb06083398cabd58af598945a85c9235a3fdd9ba7686e54d0de9afb594b1bb030be8e6bb839f6b45699dbcd2f771db64b0c62bbf6c8672fb412d60c00b3d87f82ffff6512e8308877573323c5a2d6a216ce3e2ce07c9763835ae59d44d7958fd873e3995b62b1b347e489ce86e023ae27a6cb03ddec27a38fb233499a714acd89232a91d38abce30299f38f437f7a46df647f2be862c1e7bcc1e4263c2147b13ee5b345b7fcb973f3ac71db8bc12309f67ddb62659bd73fbd20664eadcd23a79233386aeec1a6fcc8c592053954ee53826cb9b6bba22400648887311cdfa5414c96d5956fe193a3729be1434d923a3f9849f6c419f77ea05fb72f3c4f75ccec03b7f7aef8c8e55c8c5480ee505ae1a7594e6a911dfbc39dbb0ae8656f5972eb644c64203a920fe0078f3d050cc5666ed9747c23df7853d6913005d0156e741a5ead3bb1b22e5bd802c303a73a961f0b60d0fa698041c22577b44eba5d6071de4b545d9f5de24944c151de6a189bfdc223e0507c74ff929f06a2e7497e8c63073294b4aba110a006a6e9510a9617405d9ee711831e085940006761822672549d1d1c70e50002c2227f6f304b9a7f11dc05751be2dfd297087044d2e20ecfa0c091478d62c1bf5f0aacd25bb0384853762a51144b77d30418b633c4c10a6eda7b2eac46905641da0b685f85349749a91cdbaa4027fc50eb97a7dea9e8cbb5b5f386ace0363803ba579cd16ef80dc40ba1044b4ecd0e81e382635d7855e2341b18e0ca705ff46990282fe25093a248ca04a1fff64ebee25065350ea4b9e5990da4dd2e28688ab08b6d6fcb54d70f6d74fd7e5e05d21c12f5b140839aa966aea9ee094a923ee5ec704b5b709ff009c20ed89a75468c48b505d07c7a5ba1ad54ed610886c9d84468eaa598c71b017578404c909dbca431703e0cb1cfb975a696a1677bc015a75db007eccdcb21b9e5e119c48f148c2cffcf29e245e52156ba5ba0a8b0031570e4cbe7b3ac4646353594f0c4a9424c9d97845c5e1a4b4016df9be8df3013e5269484cf32258849afbdd733189ea11783f0f64d3aba9b4f48818011e868cc03ecaa44ff0ab83ed12981a6df445294ff672f3a16d6e0d19b90007d4646e967e0fb1763b3c879f548e1103a75c94f3a7f72be78555eafc086c1c58d1761aac60b843704f234c55b951a1303a12705f2120f784c2bc1494432a94c835d908f0edd5cbb169afd2d38087ca5bc5e5df9c3bd970dd2da4fb2a00933538148ebf669a20b5beef0402e53dbfc3a0f289b33b41ca27eb2f036a22f0d02e0617bd01e8c74be264515c9b46b9ab6fc67403a35837844580794088a9d3c14ad9309435daa0396f48017be524856ab6c191350529962ead64bab33171a01bb3c144b23bed406cb05102c693ce5df36eb541c47e871acf56f2b47de687eb9b3511ae83d06b1f69fbcef3225c3469c304741437fcd0ff4ae3484c117f51d24b6ae1363beb7d85d9b61e01e3dee901b90f2d3272eedb384ddb4d3b9594b9c0926595e500f8ce2e5cd407bd7a4e2c8e6f4315bf693e8c961ba5b8a6c7f5030c68a6b995e9d3f9eaee9eebc9d679eaf72a5f1cb6b2fc66edc7dffa2370dd778ea7ff446121999afba7bb35ceabf626c6269bc466d65f7f812c663bcb2fd87d3e09ab7d71e727f66d20ec48a5d2bf0aaf0aca05d1546d6e974f90df85c1393e3d45731f71ec7b5cb6cfb4e5c29976ead6944a99df2045056e198b19905362d4e9b765adb65eb089233a8b3777352665489c9456cceed593c6590d9f3cc4024d0bb92e1a0dc619bf8ae65be77456c18f8171e4d2d846073cf5c57ba93adbc0db9799e3d98934aa6899372acfa4d7d2ea32e20164b79c71d7bd33c94f9a781a25cbcafe563462eeacaec0e8d9d6c0199de85558a3a05d1ee3483351915d8a4e65ca0ab129a2386a9e26aff9b912c588babbcf25f8c467145061b9b8fbbff19d8c6ded8527d457be7c926c8f490bbcd627b3002044b7729a52e94147f95772591616f6074047e758597f410b3100f9efafaa4137dedfd0edfa85b0927804f0b4fcea1a174622116222004d42b36c2c73d04781f2f49d080f351e57154a3980005bcfb0ea34288e2fafec5bfd01e1f7901b3efc71ae58bf8df4cd7c045856103b77bd78073f0174aaaef4a3c0e8b5b46dc92db55478f012dc1b7d513e215e735573257f105d2390b5366f49b61809033c13ed4e1ebe19ab89313c947f2585f0788a0c5de90b41ad0dbbfc604a0d414d0e5390a0f3c9616cfce4097e38e05888b8bc6e55e40368bacdba7e5b76f4bd8fe619746155c30b38807a1ad325b00ecc3dbcf23014e79f1c39af7cdd0dc7ea58ce733e6611b7eae069deb047aeadfc21960e614db19d2e7e0905a9873268b9a24f856c28059321a742cd6cb3d1527", + }, + { + "c89c3cadc094bffd5ba06c600dabe30ea19ad037316fc13b895fe0e14ac8841264c1bf25557e22b01f8e102c3af43adb8e0a12bf79d3fa0232dae37ca3688e07294e2c7ecc4e2eebdd3f17173351f2c15b0480d4d77bd70955ba86f82214004b622cc92f7bf81a5837326f6a83612bdf65abb33c268a457c45cb7467e074b342a17c711c748c74abbee31541444020a9ecd4e5125e2a8ea3f6030bd677be18183a8a34af16a85ad48b7015cfb036789c0a5daf68883d0c7e401754b8d56cd00ff605be0cad19e03989f608392c81d636de859e66c2aae403c138bb96a58ba69b9064a83e7d8877067e7f40aa0016e0df9b7f455d292a60eb621b8107a727a3378c4b7509d3ec10526c50fc6c66dd4b015c915e85bbbf701ddaf2258119c8b9a5132eafe61bbf38870f35f375123f766ed0d4f38b9364a86e56cdef6f95a815a8d7c48ff283c77992fc6c070eab7d7c7b517006e5d4af532a7c429912ebaebac27249b4f5112d870d998e1c450b98c05d08c742dc769506f2d7a004c24ebf84c10838b619653e27ffcc4344d8db0435e4cb77c0410cc734e36738a6b5f72a7600632d19c86b40c737830b0f5f104443dbbb031dc7ca51ab318951e7817b5d81de8a9aa7f5db6e2d5e7a3cbd8a8100653c048204ced3af005d00e7de7b445f5acff901c4d46ff133e92ef073aff1d9ebf55befc32f9ec38c9eaa6a1aefc974bec2758297e474cacea2ba4151ab1a3ca0762c64a5ca273169d29b83c164f77f266c01bd5075871e17426068ed7aa58ef0d1f2959b19c604eb6187acc57e2becea2da93ba23159ba73b9226034c7ee2498e0ba34fa8038e5e2c092a73ebd9329ea3d648d6ebd47e1776941ab3130cfc91089fd0a0a36f0ecf68293343f275d2a64c1b7d27ffeb3f667f4a19824706235fa5f3f04952ff08bb183c0f1aa1d1b0edfd2e05ed093543788f5d0ac6532e15f912163275053b202d772f381900e906fe070cdb00421e78c16b7387be91adb7b3b3ea28b92548d69c780ea578e7ac66eeb931eefb4067bcabdb345a7cd2022085fc494f118215adfa2443630bffc9faa8fbd9943c3140d81c7532895734a9dd20e31c326531d06f5623c252139c4cbc882640c457819c63f6ceed4e03872b246a3766df69373ebf5af1116e8d5e1b15745bd9dbdd663fd4352d1238a43d5d1e74b3edddfb1c9d460daeb49afccfa0712b7a4cf8d07ccd0599ef3e4e1c9b5c814f3a6f3a46fc80449b34df87f47ff91fea3618cab2d5c04cb50e8ad199d752d901b21348ae939d39c86cc1bcecbadcc6f0e581a3bb51e070507b41ea4294b35456c69cf55a2a3f1296f0df73abac3a9c81cc303d1e20ad6e9bef48de83fc22dac2cfc01ce9ff3f70e00ee49bab2f282ceb6859f989075814e690e36a8d16354fd6056cbff49c30e49b1570363498531ff0ad0979a4518e9ae271f57f883abf5e301c0e24a83f09335479698911bca90269a28c0e040a98e67c9e55f4c91542f921511dd980270cd490766da22306b48ca9309aad3b2393b7b1e9ac7afeff64204081f9c0a8f6a5396d02eb9009901ca2c0a75ffbdae3a38ccd5007cc4f6bec8fedd64086cce5c039e8abc9e23bd694fc8de4e858c89bd585ebdd422b492eab26f4ebbdc1d17dfbba19b5ac458c31320a161a52dea638548205a6ad4ec54875ca34238c059177bfab2d5be0a98d12b3932d0661d33ec655446d0283224af8ec7f1c6874add03448fd8029a71d3c5aa06951123c9fd881d435845757df50444e6cacc31a8cf7537a778d1184b96c3512cd474f5d1fd1214555789d24c8d173358e36400b2d937595109729d9f35eecb0963c0da60d2eeb52a778876059fa95d820d5d34e7948d389dffd53d34c4083d27c917879b053cc57dc43c8263e5dfe5f33c19dad0a7126ea6e8abdbacb318d37c305a183596ddb25b1934beff13a4f24fbdcc2064de8e0bc639e672ecfe45692e9f8164365e1691784b4f775ef369aeb135ce15135c20da95064c810592ea33316b9767caaef842f948b9573b2205ec57d3026a2f2244c42991462e233061549cf9bc66a7b4a8a0fc61f73883fd24dad02644004989c4721a0aa03d3b0191d7fa4d3da102e541fe463936c9365ba30681e706ca70cb3c8ad5dcc710de59e7d8a6247aa809bba74ff4dd182a38bb31baa337841302c19ed89d65e87bbed05465f4ce0dfe89b44d7e9266a8ca21d984c41109d813ca76eb67dbd4e39aa437ff98050c968ec1e40c534ab51d6b8ea2309fab08b3757e9edc5972bff316f6f2affbff458ac0299613734b30dfdad20f797d172cf295cbcfee3d8ee25485d40380d3480a9372a1a6e5ecd7c4c6a9d34027ea6c197f37e86e757750c9fc24cc7cf814878b8628326c140930dbb2041bd9ee87f36ebfdbdc34522cfd4e50c9cb48dd52d4647a06d08e0f0069c104849bf30c8e61cb693dffbc69fc0ab9c5d502a227d606a1dcd630ebd799acdb1e47ce2ad52ff53f6cf4fbd5f0058fb5db915702675ea44334d42e0b6ddae78b22b5b5f7e5aa36519e31278e37b64312479b14aef9b8f12d8c1f39faf920851bd53b13bae5490c847b3312b2e956c430f1d8deea91cf171dee5017e7709d0346d81600bd5f0c41da3f548c28aa50589b293685ba059cd7f3edefdb5d8cdea364f4a42153b0632ef0b7ba18610b71fc34a781eead1dc5a00ab47b6840590ba44dafc6a16029cf50e089684194d93dc881beb62edb7ccee6304a4e71a35915f109db92690461b9e4ea21257ffb62477c20feaafc7a78e2aac2301b66893157920ce9fb114ab4f534d61bb3d17dfb4d9ef9f79a736f7c1d32ac3998356aefc876d8c38722787d564e980a1f15056cb3fe634d71d2c98e0475c79cab318b73a863362f85aeacdcfc44e61b5aeb870de9ea5b5abd24e8c19ab05e45e1e9b8894deeb9d29d65ae99aa94b5047f3c1168276cc2e491aba52b5b03703ced28c63a167f0cb3e4bb4d8e4f0292cf3ea4376510fa49a1a5efcc00f23c3cdf6402197b81262e66e17bf4307d87ffbc2b37213b316bddd65aa9d64ce6122c4a1545c5966bf4fc4c6ff17ded787ca9a3b3cadee435bbba8f6590dc4ba30895b84d5b4eb94f4b05be3c", + "82abb4ff5457b21f359754f151e456e2c0a185c8363d15918bcee0d6d49f12564ac655", + "b63b577e719494032062e3f63682098dcdcfe26cedea2a40893c847a331e4ce9", + "9f1d08be539f1244f0f69ad2", + "88dcdb0309f8c4a96ad5560f8210eda1f5afb31b85b7a8b15525777748967d4ed77c063f65d64ef19b31044f2adc690f5e457faa1abe2e127b38c626eaa94053c9ae1b6b4d0db1f02c8404b50f58210cc9fcc6fa4ecc615631da631031cd6253b4a13a3e88295ffdc775fd4bdf29655d9780dbe02b0a82aad4c4088e90b51f170909c0f98ff93ca3926067ec94be05841603db4f913b7025a9ee34b8d8bc629ed827a2a9857e0814d36b83cba21e670f8f94ceb4be5757e0b8782895b5d8605868e4f584b5bb6a5f3a94edd9b23fc2b6fa06914aec970c260fc370aa245ca68888c90c43eecb68474c9e45c53a7da055f5bfe39b56769fa56264dc8bf4c1616e30262bd501ff9fc5cd78f73ad89e093feba0393a11c6b2cbca765ba025c40dd0417dfa644fce96db5a0362235ad37a317145e7b5f3c7213c7fb3c393be57a1cb55035f06da1f0bf665653c5fe8a0f3ca67dbcbfc59852694d34819d0978cd09b508d103017168f6848258493be737cc24c2112f2afeabf41038bea1f74bc8656d9910b77d33cc691a0d9b12f7c518ecef93423cb4871949a518d2f06e5427823324275b97110f8f88b0d14788741e617f4b194e679a1627da50376a08d4f23b005c0446b46d4f534ed85e4692e7946ec818437089425ee30e47de995e8774b61003801de67939d9fed7bf0cdaf625798d0d0d04a61a2482217b890168e36f20cf1d6b81f9daf1a49a781567c4363ac2f3ebf0252d5adfbed17f98cc264ed2765aa279b7437410ee8b4cf42932e5055f4884deefd2a979ab1328f97cb750b3b7e4615b9c1c61659c90a5ff6d1c736e785587ec85040fb2c6decf789c2707974bfcbd0c7f699627b31e0762321d55bcc6acf1aabbd44abd7766d397bfbb68c424b311611d9eeb6598ca3126f569f688455da8d5ab86eb01f9c96186858c4b5e447aa2b9ca11aa5453f731beed4e09f95bb7376e200212e2f03551b8b09a19d6910f25898d692bc20bf6ed3ac9a0276db560de5c9e264f4db8fec6577042fbbd4510bb7070086508ac451a1fda26582c259412fbf1bd60cd5e921160c2604fde559b5ed4df52b805010b225f999450adadc6e108b70f169a3d8da6efbe1cce1c4908b004e928e3cdfdd0b4c5f742fd72a11c9585aa3517486201b6d9a98739b77970a88072750d29d005a291546f13b576b4249d71f04a9abf8f653ca206d98f738af2a1203bf0975f0a40138df054ee834ab73a3b1d7036567369a7ae15f808904e08adfc84b34a0e1356009d8a82e51c3e8f2170908179bfe47be8ad819cb12e85b6b76bba7c9b9398dfc00f550e32c171b4d5f2d9676063efee0b0b49660c10260ce052dd00addc3359e35c25dc33066d4b05bec7d93f71e0ad7d5ab83d844c7f33137894327f464260688ea4ce9847046e7dd0bfa48d4e15277a9586b4742daf0c5ecc59aceea6867068b03c20aad38d04a814472287d809a9285cd4dcdbf68f3f4ffb794701f4c265b2dff4aee55c9815938689162e08309df150538e60dccc03d495adcc560fb831444b922a6375845cef5dec56eff2910b5bde5f25f0e550ab5a13205de91d20896fe04a8ecc2c83d1371cf381424f8c43d2a5ced374878405f52bb92f4fa3c15d29ec151508488f9b4e42527921e245a8ee4b5d6ee95797f6ec4374d79acd7b467454a1d7eda05a8ae104534b23c46b27581abed6afc3ca555202dda94fc2b93501fe78867730a84f6f726dfd7364bc240b65d6c3022a04e09c89e36a809fbf244cc5522315110e9e33c8a4e1f1396e3e51fcdd53d9ae94fe7bf6c6ccef0ce02048a11441de3c25aa9787c577501977e486f8dfaa4c81e3183e648311148ce5cf3de56878847a9d14c0645777022c158670377dd9553eb63eb17e19ebb06202be8fd9bc2b24878cc86f9938e5996751ad9ca04b636497199f7f27dfa0f5ba2a01c3a491bec6dc5113d127f6aae38fa07ce7539a0c1817f7f0de0da538f4d85ffa394784a42eb50994e28530e3997e3345db28bafbb836fa463d34146d9f46d8d2b28b3954b9bc7f84046828e9b55e2fd663e562aa95caa97873f48f0a003d2251fb3ddbce0b6072fc17e0d3f99b655b8f41e8e6986ef7526544222e2d402489eabed4c219540605b9f5dd321ad902708601e85bc874c11efedd072aab7e10272c87b08b9457223de9fbc3abc2d1346656a524e9c67d79d4053c4257e886d6b430f5b7f57b2e5e92ae69273c1705a3074d5066def69fadea1af8fa9b3bf4890f9cda4b1833e5ed27f22bc4fe4cf452880c7b53320bc7cb748c0af6e7550ffa84e4714ec18d208131ae9e3edc6cd6fa2c60ab8ebc1ee56eafc01fbfba061e55014b9711eb58fdd01f8936d29dd081565de0b175b02989c5ff374e6f58c3383e9bc00d8a93903e6a221c7475e15aaef77594849af877f3807a76e03bdd54ff0b192bf34385d24d858d6f454810ee48141d73e3acf1aa3d19cd4c723a634cd8e25b4fb604c744e408dfd82961e46e8444f001d0991af24b3b6ec57ba41fb45122afc73ec6b25f501f1abd46181247945729337bf5083e5821968502a5a696043ee696c290095feac000957f968ac61ccb572ab2f37008830ab9a81d02456190af99873450b52df1888c3d8b6b13df65a9bb36a4b6d0538a0f179daebca2bed6f94b4670560fc5471c3770f2d004b6a138b8243068d754fd246e9881242638c6675f1611f237146f6e0f72ff2fba96f479fe0a662a81f40928f5400a0bbfb5ed07a87f457d5febdbdd6f323e2a59f749e6fc8a51d08b023734c762a91cc517401be57ffdf6a52b9174ea153abf2190ae2642955c3c02b4a15d72456c9d2f323de6fabbf56dfa3b566f1aa688c86b662bd34cf2511cc4a30621b6f1f1ac382bc1c4fa4c0d4d5a30ae90a5e54a9fb4afc1475e7c612eeb7f0e09e894c2004cd04126df9359d525d7f090e4b531916207c38c3512341c84218c86fc50061043ba1b89ddfb21cd756b391cb53e8c1cd55352be05efe562669e3986c022e30c79a97bdf087889a392e6da0d72cc7ea208aaf23408df23f3a9ea9bf9a935e49c9994a37a5dd0faf1267d5f7db47cf64ae1d3ec166466b2f882eb21698aa375cb50146c0e660e9bbb38d7bbc1c1c6d8333f7031d6a", + }, + { + "68ca38fccd585eb14f953358220027046c14ef965478d3d8f206f63fef4fe3571a1b53e774b298c347cc1b69cc426d39575ccfabd5a284c7a87a0698cae9efe40543cb79f5643c3c3057a5fc991235f06f059c44a7200b509a12e864fbd748001a14790f78b54ba80cf0a4a603da9672df32b68652c1d6edd3be51cf969acfb0ae49c026fe0bce0bfc72b1ff4c47712b7a27b2cce888b9bc470b8bdda55a8d53a34d79a25947ad55b95e5406a5c5311fece3ecd46ca590b3b01b9055761da8196b21bbc468681922c66d286c32598b1e3d77f2a91d835ccd9eec231409cb2e74ede9385552517718be9f84f0f9100e368701dfa4843b7222279537306065a54d4edda3a02f1ab9edba3ddeb34dece9d5edc8797103eb942a80cb5ae130ff2e7eddd11f0cecd8f9a615d75963c44238b10ab1230d9db7371d8291feb2912d306efe4f7aea2773903d4be9a00f2bd8c03589e342269a79441c0b42ce9c6fff0a6e4e845876f7e9b342d25351fe2b1233b4f576db90ef1facfa617b96d17aa03fc824973e1c80f15e5344b0516fc28424b7faff47ea1ef4e47f6f7b50e91b8fb14027f05ca7e1bafa266a4b952cd0b9e4cab82bb4d61f99568e14a6772f36296f5d19cb04fa86ff20f04ab61d1a6f01e5282c99fe4c3254da46fb5276317be58e94b1928e3791af27dc6544f6d445dbfc7275fbbea74f98ee4aea647b654909f9fa9c88312d3759099c9d0070e3db6d55506813f8b7abe602964a7dfb9387f58e237dbf50b4185a50b65ac099352dee8695017e4dac644f42aecc3e415333cf76b08fc764a721b45d7b74f6b0a2e43637e5b4849218d3d4c6a01208f345d76af56631590e520d6bcd82627d2446b45b2c68e0be81b3924753a54f47ea27b1e08de2399b34470701c9697eedaf3248db9b28991cdc2c632fd1b376bbda279b6709d5033d1c0a3ee573bdd222ef1afe8a4397a61fc30a4e94bdc55097ecebfef6c00133dc0b72c17e2f93a11eae9fa9f1364f10fa595e8e1623dead10caac068aad3967b9ab2837dabcd8f96a77a25baef16ba84bc93661ed150ffddfbb0369683cd021e8f959c2b531bb1fa95d7a574fe5ff9aae35fb9b4a0a9829c59b932ed8634fc05ca88211da3e30839baadaea8fd9843e0e80d9598a7282500d157ee579cda5d57628e7506617d301c2adec5186708f94f069ed7bdb70cbe88549cefe1673d48c9bbbdc01d2af03945cefe6e25f757750de64cbb9d496a25adf7058f5e32c827fe75e80ba0e834e6a72344dd2aac4228828ed42fd83e4576254af5737dcd9b6c338377d46baccb02d00fdffaac12133ea0e75e791593ef3aded4ae4c9249b8d5cd20aa28cd652b9d750b88111d9b4fbe729e27882206b2f0eb614d7daaf6436816fd80d452ac71c7a7f9e8c595287407c6ab9fe8a242e98da4270b4f1d4ea7243c27f89ed46a567c643f31f967b5f12e518106f3d3e08178078cc714cb6e39079631966a9becd6f02c18e983ceeaa2106ba9043f9985b791027eb5dddceed563106bcdbc48a4ac64bd95e87c708a8cdc33811bcd16c35e193203e70ef2bc7203183fbf60d46bc581f1bdfe62387b3e6c0c4a29130d872c3f8b134e7dcfb080e7e03048c49c0e468dbc44eff4b02e50bc6889cf7600fba58c0ee409ce948aa684baef4956fd8fd4a9c4c49e84e2ff314b7900b179fc66f5fb4affb9ef7a6064354fad8c3d2d50e6f2157576f864a843dda8f547955c4d80a73d4a86b7aaeaecea886927a5ba0e97df740ec7e8b70bb650010df55d4b75f478b07b205b560d45de666d84206c1bffd02ab7b8d1c37f21c47d1711b89d16214d8151a8e75eeeb5c54c39e5a855d578708d314240a064051d8b26c6183ce755be38fe9597dd5b5d198532b1db083a4b856b8dd60bf1db197cf1df852eb6daecffd97287a6cdd4c05307722e0fac798507f75b03e9361d5627ecdb56a3b633938fa61b2673efe6c6e768e4e7055e6c1d55c7113efd3e95151b606bbf169f4296455dccb93da370150c54fc11b3682f092f30381c6ecd218a3d9d39442c8bea61d9a71b174a8b2c56e028689380879cafb7c4bc2691dda0cf6ada039755edf93f851446df9f63267f8b8f030c069fabbe6457d4f63575b5905fb927a5a720d52c351bfbc48f12440a91471697e6b2564b1a2b314fa0e6dff090079637287b635d875f120671561102ad27aa83d9f0cee41bf023bcd703ad670b43ae23bf01713650834cc1e95dd486757f0a4f6fc9337bb95738805ad5e756198579c886eb0ee77e4ba957997dde0eecd84e4c9171c84ad8f0cb23c6a289e037f3a8beeea7965ce34fa47cbd727baa4ac9e6dc3baf17049fd2386674b246aca5ef6b8496f1d17a3175f6fee86299232c7fff682f066cbed895155d475bf9fd4b5571d257534c88c93377b1a600d4c280d42aafda975eb32c740073cffa610b5fd2dda7262a2fff5da7a0f3a875c62949e0c9247827d7a49bd8185bc27967124c34b9725ee961bc8102a029786652c2571be6cf33be63cf867c2b48e5826b31b714a415fe05c27f0862a870d8fb33200719ef4ac8530a4ecf2597b4a7f2e66f078a7505803774889a1cf963083c831f46725a1ec5545d8489e53921d81f80ef99f5e51a2d5992c7769c2a7ec8bd8e0f2fd81de53c7b69b650a2d838b269185c5efd668c470943bd956e3c5e1bba5d3b927b10cee68a75372d4d6fdfa6782c05659281bc9bc56a2123967f4f50cc7ae3379ba21e1617553354b5030b3d3f0092c1824f5d47b97e6b4fedaa90aa2573e1b115ffc72d44fa8209fd8d372c8dc9ee00193b47c2a9a302875da331731713243d02eb5a57d5dc51c35988ffd742ddd75c191f1eb2c2214a1fc47b82db8ea708818262d9583f2b1b98a40b6ff6e94742f25661a51882ef28475aab12d9422b6ac48e341cbd6f38460333b5fa1cfd4d0f43aeb46c21938468fe3f7bc771972246156652d2c58b18c8cecec2dbbc0feb0fae9f6bc022e029111f94e8913c0ad741612a1426b53cff73fbb24fb7b22ab750ba1310ecf339fe12ced6a3fae17b4c429550794a8d68be891b0e30cd28e81de2fb2ecfee58bdf11794951276005eb8a5af21e03c8aaeb595ace652c5ce60a8b98f6897d82698ffbb2e02213e50d9d3f00bb42c8652d22bffb87ec576ef6e206ed6c846fd5136a87f38c9ad899371799f35a6258878418830b04da79fabd80e7290456fe17c0850a4c20e2e657f97f4a53e1a3db65bb5e71bf38eab9f56aa11e6ef71c85b8a28f04c271", + "ea196b6a64be4b0799b1c2f72281300c3a2577db44e5d3606b8b02fa8fc46c7aded7e442", + "7c86a2c06b7520d60023d18fe73d09c514ed07a91d50d8fd5bff00e7550faed1", + "952f492fe00b26028c560fc6", + "b3f3294815ce461c8843172efe93f73a8254e58a0e71953e35c15aa89a7bd9dfee967853dcbfba73d3b87fa60449cbcabf13b1206d0cb27d2c3fedcfa695b6d41efda37bb6db35449bd470a23787619ee48f981d3f0b1c8e121725b2289b6d67858a4f9ab41683bdaec8a913ca2cc292a9640efe50fb85a1d1f7b286f45d4448f85b3242f45ab44e3281d759db24dfabbae4259f127d6546ecb914d7e93e2c19230c67fba8a6cba6069023ff7ea3d8a170289c2b4391bb97a7b899228d032b36186dfbb29ae8f0e6c06d753f4c6b21982d49ee682bef50a5c2c8434510c5fa2b9c0349592f33f8d7ad6f7243d42b292aee6d210c61e3f898875b91a17a89148275031b74cb34e628d7b701775dbfcf87c79ab279a73dad14d8eed365eb9f29a007b7d2ccc07ceb8cdcdaece67fa0166e135c9a4b939426882eeca98ab887ed2e4888bbebd5afc9f2da3e9162527262b0fa85903246bc8b80df3060c890ebaa516781a2b2a138b98001287e12a9c68471912dd297bc0beadccdc31a27b7c726baf31510cd355a28e4ef786b30084af66ef135909795aa73814cbbc6552270d5e11d46e9497ba30d6d8cecf343d16e7e3357bc9bbfbc7c1dcaa5fafd8a9b07056129da02e6228886463474c5af1d670bc14cf2868b816cc71578ad807a37477341c8192bfc2e8b1f7bfd58827e041f70384f92bb4c6acc415dde5099a1c2b27b709f9e53d1dab07c87a042ca4af7a2a6ee57b37bf2bb42259d372ecfeaf1dc55ac3a9f211f16fef3b2d5f11dc19fd1f425c14779580b2501ec6e0a84220e7e12baf9e0fee3e8cf499a7fba6721a746f598f04ee8ab4df31fb8fa5ce2d2419d5551155c009f2780cdd225ec2c19f94fb9c8b785ad4574b4da766eabfa696a1994e64a2518d1bcade6390cc683a6e80cf8b163c3e58cfa1134ee743079347f08a89c81478668df32ce9cdd7b853db5cf7af13436f3bbb11bcfa8f6b6d727a1df84f99fb3a5c248b8fd5baf669b68fd9af45298030f3251bf0351fa9b58b0b9fba53ecfd838300790ebd689744c1b7b333fbed76c8fb96fc669ecc6695ff5bf8379dd2a3c270af858cc60894be8922d69fb9707bd2a7825f2eec4a5056e5e91714f4dcfa86974259fcbfd5f20d55923a0a9936fb20e5ae9670e2019336e15f530c0be449fe355a7a02c0938d60720d5b8f4f59d2e4213ad5251c6058312b43d47c44ffc8946a98797f5ace279d3e126da63633c0eff1c412febdd47817aaee466c639e43637c1e179f606780ab490d3f0b3c2d79709f1262305fc87c02f68da2dc32f8c544e7b358c3a5d2c27986a19d13fe736c60a3524e94caa55e853eedeece985d16bfa6c487bed6583436cf82077fcdcf90a05f49db50588f46550f7a0c3a1cfca902d66d25dba8d2c53bb5557cc1d87c8a407898b3c30c4f0852df92d839859c191228d0a47324ea9ec2e0ae84513cbe4ff4aff85e77b8587f1044bcb9775099ebc2f28fbcd1cad58a8ce1f072f2228f559fbfdd8405d86f8262c27c3d95e01016b343c6a4e59dec81b59bb6e3c6109a4cffffa85e9752ed2149b5624417c0dfd1a27bd2630bf59814f15820c43bfa317be59ef6f433c95e8be154a8ae94765bcedadebb717f0d8c24e01e1952bd104ba9620f067554ae0faeb78f13c622c45d97b2b5774a3e30cb07f2cf0e8b19d1266d8a8861f3772305e24ec5c9cb714806c7d705a3bed6385f8be4e12562e17ec3df01afb4ef6f7427c48a1bc0e64fc65eb1c3d3ff2d6687e4c275a019f5ab5c63bbe47e3680fb1802d5835c4d494f0f394de1ae47f81eef005127d0971c4589c456ae6a69855f35635c28b590c1b93f155fabcab59b6c7cd8ea1c4ed1f67093aa782c54329cdcf9bf84a40400de707b894587d6e08cf7fd72fa45b6709a26e97ff5ec1269b8042358f872a79e8c2db1c7ebffac014d6b6f71b0c1c1945ddedaf5b6911668059b61b55eea4737aa307c829309c9ea548fba2bede023849bd61b5a467cd1ab1c61205ce64301e2531e5d58d03c74ecdafe1f5b74627be8716cad0d0a0be60984c9f9dfeae24a6c4949170ce2f589326e0a76c447a578ea3a5e4bd9f18884f18843eb1a78aa2fae06a7569a97551b227c34d429c8e1c8c5417ced93c30dcc607cb32a365d87328aaecb4ce57ab8e74f0d9099e267cfb747a3bca9f76b5f6dfb543bc4b5c06c3646062ec14f511058eb2939601913f8a0f1785249cb72b0bb1c12a9508b23caf490537eec53f614f3e06592eb61f75c1cecfa514cf7b500b0375095d5db74556220131390b77d0db72711c0c7229a5769b1d2b3f5105f3a4370beb1cacbd93ce32f89f1fc833c7949211dd204616c013a3399a22f5325f1a00008f4c8ee7dc5bd7476848721fef843123a6213cb0c0b6ae84233ed01a77a115d06e08990b8e60cfa4f41dbc9505cfae76463278b6c6b5ac7c3b83284caaba4a6a1d739c392528ff5b06bc3b82e98060e3001279a44aabaacc661fb14e7581d1235940cbe067c6b386da09454e0467c785ed0b65d41ff4cf36ba5f63d3ff2b45c11c6c22d3ea8ebbf1d52d770e0ebf2ba0c67c7d3641c145cab474a88119335990137fa82a340c2cc8c453752a3aa801127a47aeefe66d1af1a26ee1cd0e6d935bd548f6ce33a9c204be02ba08f9fa03c685665375db7c0c656ddf3e441ddd96b0d2018beff5086cc63339f26bc8332a5e6a1422bfedb69187a3443c23b630a28b02f8075faf3ff2fbeef6cdf02ba4af47a765003de2254b69f487bb5d038759a33ce6885611198b81b0b6fc5d7a531a7a90dbc3556aa758db1657698cb3698b8207b1c1b589efe5d52790667ac483dde9543953c6392d5eb8afdafa205d325e314f810e9c7722cbf5bb76fd6502733149bf21c60717ff5bc366b85ee9f206bb1f330ea72f61a9766090eabde747b1eb9c046cc8713d5a4f8d4b7dcd7c61f2496c5b467608cd9260382b8f11b04c318a5ebb6411a4c7fa060e08c295c6062ac644bd3d10bcbfcfe2e3748eba66f65d904ff21147faa8475f508f21238d42f62b697249b9fceb905127f7684c8130cb8663f09cd25ea038078e1980237389337d1446c3a77bce41b37b50b9c3a020526e7b7b3bef370cd7af71b225700627060eb65693899d277ed130ec5ed9eee75d4886f31aa93bbf302e0c69c9c4499396b43dceb67c02fafaff8b56698308393a03f60babde883f00de2c66831f024fafaf98b2fcf37a9ce01d4f34e95c9408395716dcf83fe86c7a0f5e3e6741c3b63b6ebe9964f1d5005eeb732ce66402007beb3e6a087053", + }, + { + "9100c5b2d7c5d5a854bce55e82f94b89a268da7b66357a661dcf75cba10a1b320ae0e4e1a5b989f9766e57f867a3810a0b5b857191ffd7aece4c796f5694a2617486421940cc12b63a6aaea20d2fac188b318a1c3061cafeae436e04d710654b96a864d674768caee03a50ed6afc06f52d90115df1db5c9f1ecaa4f5da094070b1a447251ad3d4fb0e24e87821ee6d4e7e7eac7059080f77d2b36cacbdac1c6e5063946a376865458c4ebdad3c2afcbba8a82b01b03a7882eee42eab904a19e0aead4ae515b02aa2fee74f3a114bf5b9f320baa35b3225491653f4a69e0d864cbbd031d0805b727e42c2b9530dae0c01cfc6a42af8ca730e1d67b4bb743a072f0a38008b937209d534c2284271344340fae76af2b1dd00cf44b48ab8ee92e8f9cae8845e5a8d338f505cd1c19014018bfb6b7dad487e7c8c32064421982c1a63149ec16f2bf4fe7b50cf3ce1e33d6cdea8e98bf067077c9a0ec1bba6edd5090273ca719ebf6f1a0f3e56f021945cff3c468b2dad92a947a06a024758d7505a4a1bcbe9da3a03e97859da99ed36982a7c23572ab60071566b749dc34bee1d9609e87fe32282cc9adba633c9ddcbf359ef4a83a54af5fbb5699978b487954a907dc9739f4b3f3927e66cf0c338e31c272da0cc7795c72dfe60a5b2e73bfd77b8c6ea58122a913910fe29d3360cef5d398f29b024f0dd225183d538bed2b076989aceaac460e3d45e0ca7941897f151261a024b0adf6d5b62429420144497adde6557a3c53b7723471fb760b6a8b1dcc2b327cd939528f5d7bc16ec00ad99df12f082d82bf9fb7318b3d3ce5b84ab1e38d2ebcb6713c03fd0d62bd083c4af96b4316ee02b6953431c261278aabd96e28f81adf7946e3664446135c825e45ed916ccb941350c84523296cadd5360bfe3e16dda75db10da1f710fe796f3456f0911294a4735cf9968656345b9c3049ca47176194c86f36cf702538df699fcffaa254af15b198ac37eed0837b00cd3547e496ecacf6136c6648a535a235059cd75a3bfd0bc49933b379b72e7a8463c268faaf05f0b27256fb179c9d4c923a13ec6600f83aaa2bee13e30c8e676040c06aefc65ba238a29d403f3a8cc164a0bdcaa1a5f54bc1d35fa4efee0c402eccab1e92f6b0cba94e1bd87898a9dd3957a7eafd9d26bf70866450646090833d4b91c032428bdb9097b409305de669a58e44931b7b428bf1a6dc56177cd944b87b04eabd80c64e287a5758c83db26dbc06f0c772335363ea2fb9f19c833644fe3b3fbbbbf5f9d460412d287eef862ae676f258aa45bc8465667601e9ac46e7d77693936c8d67ccde94e54d746b785ad26aa38ca0500105b6870790235e780ac50b9e3198f5fe678ae3a4ff4f1d4a2177edae183daf2de42625845973fc544907e27a90d868f8634c9d529bbaacbd228a5b4ac7fa68ac208e207a022cce4b24a0b5b5791eaddc6b3b3ef6e5dba41855ff531de9bbca0a39ea743c0732772bd32cd15c4b7f28a6ba579d902331a88920fb970aa75114e14b891d42cb947e9eb14feafccf1393796b21099e52b21773adae8e550f93364b1c438dd7d7fc76994c51860b652974d04a7e6ead207610de149f231422595f4e9ced1674d98d0e15ee841143ad8613f804729524e8a5f30d451611676f70a60c5dcc7127497f4d27f35e7ba0e48f98e9022e0deac400e809170970867a1682c7d2f3ef2c632c44568abff76f4f804841ae462c7247147b6e1debe48802674fd55b2ef1be5b4604d5f60c35358c7d773ab3a3ad0ab81868c6044d4e06a48ddbffacddadf813a2ce09aef34f3b60b666245a032f021b87c81fc506166983f25930cff728d399f6dd48ea1c745ad2da7f2cdd9e3ee915f708db0d1f3481018db1c174ea950ed17247bb8ebc065186758e5403bd4d19a445e4a15519326696e4280bcecd1a903f525bbe1e521f94d79df8db4b35f4ef7bd990c0f2c32789a75f95761ca0064bf251fa00b409a58b979e56d2c44bc2302552f118162891bd78272384c739c0c98bbaca3fc46fbb5bfe123eb25df0e27343e38b5a0c2d0774443af91b64b9d4e0649f20290edb84fcedb3bf4ba491bee8754a32716739e5ab64deb6c9888bb9fd2ada1629a59b16934ec5dee3678dcbdcc7fe5e2f3833da9d1281669b1d108837eaae5180396813883de26b957037623825b0675df431fb06b35191c06229f84cc849ccf1b1e079efc2e575331cd77b3297d2908c048b82b7dd14883f3e707bf6ca38f87c19625bec47c11f54988a97205d27ac51a32f19704391af72021b78cc4461386dc3844a1b45596fede3f70e311eba92b1d9ac221d3dc19f3fdd080c2169348f2cc8c9380e12a7ebf69efa37bda4ca6f7e66919b94532ac43022c0518c04d0a8cd99e0cbac88b7a317a1dac5469534b4fbc64080196b44498e149b0a196bb2d6f59392a21c4a4523ec1ff922a52de790e42810fd9355471169d22b734dde4a3361ecd57e271a92132a8b35cfa91d508d45618ad8c6c1ea209405a3d1d3ee1535caeaa3f20546052fc13aff7a584ff79db1726678344098d8563caa2a2abf6fe5aa03d7af49dccf1b17be85600e7cfdbfff54282394b0fbeafda615185574fdff78d59ec2a26dddba1c531a1ac007cabf5be2e2f0a3dedb9174e0a9da5597c9de6d68911fc66ec9d2b1e3fd71ebb83147ab14384ee303d067f47a324a01fc187f54a98f1b0848fdba2ceb3c18936d503e71887d548c4dbc70b7eecac9ead3393f8cb85a84f1484f2e237b36b6d886f54a0f629e8bb05b0c6839c722149a5b541703aeac04e6eb230a5659b12ed0a668d018f75bc94258218c1f5390b9aee4c0b2836cb76a47da649e2425bcf4cc15c4d51d109e5f78cfdb88137c31b2510264e46f1c4eb6e6b3450ad901ff9517b47a24d508844dc85fc5dbcc079e2d09f301691f401ff5f36500cc66f0617eb4dba389d427c7ac778d78438506608f0961f818a2080ea56d0f61c40fc342b49ee63e730df61f757387b9089e1987977b7fa02d87aec2e4be24b8bdf7fb6286d190f9df870944fa910df32f178ab692fa56b071f57366a3981f51800ab416dc4500abcc19e0c6aaeeb9ca063470993ec749a0bcbd07604516b1d51175ebedbaec8986f67a4d9158f75b5f3bcbe86a83220b4fdf12a0242951f94ac7d52882b1b209b82c4749753ea4d46a60bcc4f3eed033bde2d3d20c25cb46fd907f7052217a0a4db143b2efe8875a59441f4d22ef70d0c244b2de6a7e15581e84c860a6326ae3e3aea6d3972e2de0623d2d852c9e65eed318bd3d86d29595575df60d9050e1740f884796b6657718a294adcf2303adf61c6b23933db93885172e82a78f741b8efc6315a2c88ccb6b11692a346cd82a79334e0c610734e61e6378b5e2ecc161d924778bfcf4475805a0823a0d5a54768d9272ee99b7c4a81b3d5dfe1a2f5ff34", + "3c77f30bbb698b1571aeb54653fcae2c23dc16be58603f0c361eedd813ec0c4f63005a1e69e533da93e820e6e4ce1308aa29c60289060ebf24fc9738e8a4874ca4e26a0dc79ee75b8607416bd554737f", + "0223c0a6052bb3cdc99a284fa169ba76be2df53b677642a606090a9267a60769", + "7d3981073f90c6648c5e9c74", + "61ec5230306b70113f67b340575b77ef76d521ff75b754d551e4177591a02351ad382b2a4067f2b3af7e8e15431c7133e98be9d8293d17ef40161dbad9a4f1a4f30cdd557bb9a8b03b5f1b277c850e23ecfa0fc2ab1102e4b1d5e836a606883c3d43527fc3aa26955964b144a9a56cafa7b174d72a0635b80e7b4f871ead3838a955a14c4b8c5c3c66fd86a5e4ff10dfaa92105378bbc5f76ad29727e5bc4779ba3e6dc19bf45020f6ce4dfb3400df05cac51577d58eec21b22839b8f055226b204e641783bb3305b4461172f1c1d48eec56fe6f82aae564ac6688d7b0994747d9b23a24418e69f8a4fc548f854f86baacbdec78b7597b138c453349034c8cad2ff272781e0e6799ef2f8addaf18528736aef21ef8c2d213161e36b2c7815fcfc40747626e0165684e46a9a2275c533d548e52a9952a556168195d602ead86f6bd699e97ca59f4cb2050ff148f5bdfec358dc4542ff2f700db9861dfe5ba377ec7fdc0fcb2501e72fe6873c7cc76b95b4f300857f76e6e6e370119f403b556115b19fee7009f4f6675ad2d174f44002e35ddc360f309f20a3a1dbf39d90d7e5fa2106c53afb0bf445e4cede59cb50b8a7a2c0961d00b2c251f2d815309f74a46a424838ee87f1229273ff3b66dfb79e3b1ce11bd60e061e60e3f37bd7ac896b618cd78388590f44b1a276b965a4b95f2e3a7a175b30fb45dc7a71d4b3a1a33e98af30dbb46a217c50046ac21b8bbe9537c02f05a5780c8a5d796bd6424fd9e9f3ed5932069bc050bf4a1898a0ef0ca756aa2e2269b709cc92e0c5192ab49d692143388ede2bde4923c85eae8f59db5c7711dabeb33743c692be6dfebd815456958b5e1384a109f891f433e7b4a1031d4f30478b05766dd97eb964a28f2f7b55aa6c27c7f4ebf4d47ee8709bf99915426b3896412a855798e392e111789213af537cff7a976b4509e0eb6ffbb8e886a3596a242d16d95109b0ff562c624e06636a3611f804f9b2e252afe8a4e5e868b48e9e734f688f2da2012d7fdfe2d3aca75fd74730a85aae90353417fd52b92d28a5098b6af358a096b859859916bcd5a8f779676c6e04ea461fe62872050af92d08cdf1124bde1e889ace3c923457ecfe0a635ec757907a131ad7c2ca3f60e1317880f843c5e63f4ba59ab2882a492dd1e070b070af6f60e18cca29541206a7b267c3f75a5327fd9b8ffc9b36b57b73b36e586541d15c85253e17a2581e8f8a1518f275cc79afcf2b5c88a16e9bf553e757df089b5db90a9dcdc1867b788fe75abb5161dd7ee1cf37d3f0faa793ddb1bbf1eca13f4220ea63af8ef7c0e7144d999ba1c5a983e74d48cef708c1d28d3c0a168ab87d0ef70f381693f0d438ce013ffa2cba65a8cf6b498a7120209564535b7372690329cdbd74eaa76765962720f06aae58338a10064ad80f5a67395db2c31d36b1f5eb777306395f192599d2f737327afdcd9f14b3f24155a3f974915d3302427494fad756703b13afcd1764ef9735e7dbff920f1253cb668e9f40632aea1e0b4620db162138e4a97e6f0729b14be4a7c3256250d5e7423ba1238c704503c51cfc9cb68db7001b2f597a15e77138beea02e11e0bb98a72f2a77b7260e9172fe7e60483114ddd836addd966b69570db5eb26a0cfc4f8a8b80d26357ed51a70165bc0dd11ad7467688025bdb532e7222ea12f23c44d08d111b0ad4acb2f5b3d6b45c387d541ffc84466ed57acacefb1436ef00bcb5b6211dfd0650113ac369b9f3e4891acb2693c377467b1e9c949cc0ea6c4a72ef9292964275ed397cd2b1ed25fe1aa8f47e90cde362392da5e53893eef6e4f61decae1a75e3b726f0596f09c3cba62aa08bea89984b484d5768296a5afa8b0759dceba530a169d22b81979212b3343db35ce4e4766dd251ea6a47f5033cc090d6577efbed441bb4f8944937e812f12ef17ede76df621bd4cfa31567ade18b74583a2b783279150d584ca13c0d4784b70156afdf9be8ae96666b82def888465cd3df349de427d5f5b3572e4f963d33f968e6780e381ca196bc04a6664fe93fdc8558b21b84130dfa2a646950eb2e927885925af46d7a28d1507bcc3c02ba98318bfebe5b9eea1bd47935ad869eb701cbc35a9aef5efad88ff54eb350a34ccef2e159de8e16135b81105bf799fbd86aa11653b5ef93a1ab1c367231d61b42b8bdb4f04d8d05396d53247d51890be9b56c51cb19eec0fd1e6b8cdc98376b6c6b30963ac7ab02656ff94dec0e3a0eb3f3ffb8bebd99d5889df98e6c77093c370373dd5f17871fb334c7eb12c6ca22deb75bdac9eaf24281c965dffe03da9c940e13fb382fb6be332797813710a7cd2e7720f5b9e53fc0d98fcceeea4a8e9f787e670d60bfc4a849f34571e5d09b9e9c28cdf2b2d888eca9bb31ea8b9239bd19dca86880ad3e12b1583acc3a6d1f0a438ce3b5a337487279dc4ead1b214272d455e6a2c8cce4ae3bb29abfdbe77a67ababeaff5dd9c96b17f589cd4615c0209eba5e4b1c7167b4b739ca4b9957185961529d1082226f85068890c94aa1f1c244259ef7b120e40114926a49c4412b67b4caef1ff3ce6f3aea3c6107b830cd34df9f4d73d7d978b6b9d5c481e9d76e83d649e742b098334838fe50d80975fb567642d3b72c461ef3072ebb1d03c0099e97575bae6a12cd2352d9d296351df6965d736d7568c2911394a73d199743526ba54dd62c56c598f4e78495c0172739274c0b8c96755e489765723a24a8704093a94544f6c8764dcd1ce6b4bf2917cfad27d85e4442b4e5bd577ea1a88c2b79d61cc1be01ee9028235b36444483b4e45da1087bf6d45ca540620de5aacc644a0d5c4b807b582c7b058e140eebca539947502bf73c9abc81a0e3a618b39d3a38c4ff7f94767fd7e6b9eb61e629806bc3d183bdade7e369d180dd2f57fef677e22ce41be7224f11723a85a3f1d14d7b72dc98ccb2816b77e625ce3db3e2c5753af8b079e0d63939079a01910ee4699cb405d4d9c60e4ac86a7fda3a4c9c290662afbdb7678c3a84c87ff83470fa8a416511a06d3216a1445699d7ad7e6980491fd596d39762d576b08fcbf0825243c1fc01ec8300780857c429c607113160a8354f6699b368a87983464472a5754fd58943fca6f6779764fbe6cbb510d5280292df02c4a7ed9acec8c95ad67ebcda71d0f519ac18db9b43b28244cd34fe02c5d694df57410eb54c5e1ca0f8501e7776a811d7ee81eb9d8c80b2ca50a012b5eecd5428af965b217e7fdac80be88a01f76d473105b027eb557a523f13c55e1670ff34627667649573e0f19dda41c525a8c96c2866a88bd73e66c786767e1657960f6676d8a22be1c6024158a0f0e4ec761148b5a3d8ea481d8fed94855be82479ba23213190054f937838f0e35e00aa74c89b294c29ea25ad7e96b4b6fa952ea8f1cbe5397b7c86d0b74ccc25e22c88736b045fe86110bffa0679f28a1f27162b51410498cb7", + }, + { + "0fcff2c29cbb5cc40bfd2ec573ecf368275ade6a00e5730b77dab17e437b46524b3814e7f470acff6ddac4e0c6b748ed112657120bca1d83a4ce01e74a473995804d7c74bd28732a02370ac8ef52b600790d1284d82f077cfe096448509dddd0eb5944a882b7d384efdd4dde3003dea910f12de82035651e3ec9668e66435f519da3fa1f5bcda34aaaf028daf3068304f7b1ec18e65136241a9db281e011d27db5cc9c1099405a4430821e2488a228805314983966ce5d806b0f014c21d4c9d6a066e63aa6407ed6c29cfa4a3e22ca913762ca9d31271d9c371fe858f3b22e931814cdbe544b9416e88f6026b12bb8e88d8285beaaa35be1c24339b5f567480d7b16cbcf6160e549ef4570a0702889feaa0ebc54b11735735b6e2850d5715e5087291fe8890432784aa219bacaa2b874b075c9628cfed5e76dfe38426f9693f6bfb2de49b710c101b2dabb7c7c74f12de9ba8f75b8645d25629568d12bfbc7eaada63364b6f56569cf21e54c95d6797e9008f3496c506ecfe5d6a010d168fb7f0e2ee3c423492df36a133fffe9b87d7ac070c32cc131fba6089cb7d904b25812e03cd6048504f7ef1736ee00ee6b7aaedb3dda9c6fd6437772fa5076aca9888ce55e906a62875979bd477aabb2f4598d32342aa10a6d187c6768f213117a9ff6d830603bb7b9b475002e20b2237a4055ae6af6b8d70e343e76265188a0f07e7820dfb3d898684d99966d4bb9e78b0e95f5044dcc12810a89a75b11474c8fc06c6e734407db91a072ffeb2be6773a7c6c3ec939514b43daf29feb3aeb7afa57e96d9cf0492d90bb2c7be613f2208f5f5f5898b0a3db8a967a75d065efcabdd83759c88086583bb3d422c6c6425525a1adbd515199dbe71350b77940813618b88fe139153974c80d968ed4d9e3f97a91b7cce250a7c963f880dc38011250b9a131f2b76b677f78fd0e4cd6f1465182fd1d644dc42db0bcad8df4ae9f456841765af8e1c1775abf85a69577ece6f9e9035e36c88be784397479e713be4f5434aa4c166bc4702a4916c0c003a6baecaa182372a30af6dc7e6fc4912d13e662bd327829f6e85340fe130001babaee64d211d6761bcc52993c162a692a10cbe7434310392b64792a777a2b31341995072a6b7d4538cfde74e609dd1019a9f75cec0896186c0f42e3896d15be87aac5b11642f74e11d5c2f7de9f07f848ff543507ea4d73fa8f5683fc6b41831606352c482c7a5a013c51e0db59d824582c595f17a6d2113528943194d6b5aadcead62516507f178cd0f76729cf8b81fce4e0138ab224bfdbb8f16f8ea6196b90ef90a63f0fbdcbdfb5320984be8a80a26b932d1db7ecf870dd67fe838069136ff9b9ae087779e82cacf1b06a7b310ce6c439047c26fcec0364ea87e4549a544d540256cb7c3ef7282fa792aad89e919dd89519fe910501f5ef88da43232e917730e742ac2539d454e066feb9058f56dd246fdbb674dcab636585a788b338ffe41f4190447a65985acb9613d02669ad4ad888004c65acb0ca315752e58f51c9ae9259f20cbe8a668a207a5a46e30891bc909108f53db8bf6f0f11549e621d4cf4763e0035c867bfe9e1192fc421c080b25289a78f4167fe517852efdb6f3ccfe67ad01b4337da2c18f35bdc151c5dc76ee66efd27d5fc784e4e6829bea4f8a41ec8bf61ff998d178ce9f4a10551687337d7705eac6cd7fabb3f2379e31c1d01e4dc63e475f0fb01d9efa3de400b5177e2c2d68f2ead89e9ecad62cfc97fd0ad5b3391d0248dd2fd7c75dcbd802d3463ef0af21eb77b07a3286a72f1e9439f457630159abde7983a5c74f7dda12b40913632afedadb691d62003c70a46664fbd976457544cef8ea863858505b1c596e7f745d4a5fb657b1c694226afa9756c40d9c49425b323ce17a8531c5919b24010f715b5f27a300ee37334931ca9ff5c83c3f0a87713768ebccaaa15e35c56f3536ba945e5d954c94c885c68325bc4b51fb55d96c8d424849ece9a812af0747d5b1dc240f71609439f65acd1c17086e025e376eeb79a7255680cd692fc4b0f5768d1985fe8a1a387074f58c8bfdea8e5c11ed379b845ce2052a5b24ef0c1a658923eb87adf5b01e6aa59ae6937564ef97421722c67404cb9e5fe07d5bfad2e52ebe6cccb41ceb1eb2760545fb6a3582bc4ca572b0aa4e4f0a2ecc56299f3b485d980501a4e010576615ad518fd2d43c1f79aed013ed1f1e1bdb74357aaf7dc84772c9ec62da43c8ffe11a7fb3eeabc3584a936c37b28a438dfe78f89de6b0d5597ac1bc55057544e68fb49a6e505db69af122c2a3ad06219b7f2a2955db0ebf55c06baac5e0efac609436dee484857f75a8421945484ad0c7650a1d3008cc85c938208f19002b7994524878d6ddf85c763a65cb72a09c3a059657459f13cb584bfbd754fbf2de904517092be4f1786b2bde26ae8eb2d884592fc9e84395408f8117e47d1ab30d5fca167bbf07e41a33c230d240e3aac53cda9f251e24659da57d721288252fe7ff3653ae3e47b86209e9344accef0009b99f2ec7b3845558f1d77b89fc9b61ebc1b589fffd3261f71b9631e87541e22ed100e694854bed771358f10fe452fba61875a605b8080cc39e3eac13708e32518f28e60464c38b782c7c7800df63b6e7e95ced9154ea54e32900f6998f38eb1e51c112b6949e2eb11a96b1ea0a68c1e3b5af750a99c9fdb2cae44c5a1d37686ef87b158d19343e23daf00dd558cfb91e6f2e18f8e806abb2faf80d082f657717d08ca4e9c0d30d9bc30b612bcb1a3a3a3843231059dec344c6c04ce625b3fe064092e00175fd9d38f8fe54c4088efe30d211412be01460a6d4ad8d0a618b00a21de0a383de30ccd72f119b27a08958729a999e8aadff21829cbe8cfe398d90476e33db4c64981383a9aeab4a27f3bcb29d4b3d3b3a6ebdd71d3ac546b8658e269959630de176819b153cd53d2091efbddd2cf9178ba6ee98e1a3df9a095db0a2b713a0988a22239f5f08cc8f9abc3d67d9267f54dd5dedbf01bd490b0b09adb21d4e5aa7707e36cf77034f01bf8c7988a2e8dd7046bb2f486878436371f1258f3f7026afee6d7f6560be67103ad098edc9665e00118d4879f58bdd677cf2e6bc631d5c517acbb6db8a1debb4fe7492b7daf0b7ec7df056637c23caf926a1a589bef1db29cd81f547afd0fc9e459f46108ffdfcfdee43515a771c439dbde9177ceaf296a8749be0146cdca2b26be8c2ebd6cfd9b5032b1f7a375307f54c2f622711f8cf8684afaaf17c4da3e83666c40d26adc239c8d1a40024bbf560db5787ed404763d4e70ec6635c6a4b82c10f8ff7ad42217613c57648716ba94cb33129f3789dc86f9c8ec2e8e90e6bba0dfba1bb3dc3215188979a09f33346a6647099ed0e624c9ae10f83da0def840bdb25b718e8d86a616ff46b5327b1f99c22937920f5b5bbd6b53fa0b32f24befa4a7603234e6d94be51f00189a20b15c49e8ee58434a15ae9d10b9cf0204bfa7ab1fd9e006b22bebd22b036c4bb4c9949cb7ecdf01028d9f12466e144b2dbbf64d95d65347013e192d428678f64f0d9306f97208fb00a70d4615229143dd8890725ee3ba6021d38d6359055aa812edaf", + "0c5fb7075f5e15a6733737b614bf46871e29417e4b140bae6e10081623f5c52f557c36b4da4b5a4e82920497514b1e6f745fedbf73f86ee10976f82c6cbd5bc13a917514ddd062", + "e70954c812cac03e367e99f7b82a6dcc073d2f679f965d524872756ee58654cc", + "5f6267f6b3b21423267310e6", + "c53868c0fdc14e891ae1bc257fbb13be210a5d9cdbd9d18fe1b474f9a1929dbba3f25222d8fe8c1be3eef22352100064b922fd9642ad128a202b6382ae0a67c8affb0c5bfa1a80e55c1084cc372485243df872d677a80a3ef1ca3589908bca621f6f50133eb762cb9c05775d13db7dd3eb65ffd3eef96e8dd42928facc68390f6bbc50b17e1ef5ea6310d8756dd177be2cceb63a97bcceaa046794915589ca022d90756b02c22e8634c0ed44192abc3b8b1e2814c855ab27aaae3bdd801a73e6209fdd559ceb59a94fd98a66d12a31a643ca2f4b07ed910bc390f77ab89395d5cd1d783d8940dad4447f0452991b209cfcd998b0c814cebd08f9ff15052818bab0bf51c3b72ac1020d3b0974fbdf4ff941b1ab9c01f284fe82f2fd89c0aeb4b9fbb0a74ece08b3debc7b65e7263e2922fd4aba15ae3cba7885d04127c8e06a67f244e7aa4556f8694a5db6653f6e48d6de54f9e4024d25d3236d4f933205b6a358aa1506f832ef7d556c6a1bfe4aabfce51f3b5ac64bf6ab1e665bddb12fe13db9f07a55db3da3886df36ddb89f3a4939b1e9e5b701301570e3d01c0b947f498dcc6af438cc15e6038cb78a78986da0316cab67bca3e28c95e6b7e6b36cae9202cf4a77a0e15d3c3291d267aeee172dd587a944719b9fbe077603b4d39d4302b9a6415aa07af309a5e1cf7a9379552becdb4bc6a0b5c85d2e63bb141c405afc58a8b2b4188b3883a24eedf98dd50fc54725c440ccdb03514a6f37cab49296b6826b6bc7d7ad8cac0a3425eeb6866d94119acdad468cefe162a29e8831c77aa83321e8ae3e20e968cfe51dbf2b63f4e26c61536e6be4f63d61bbd06af38023b15f4fccb8ae0356d924dbf646bff69d1ac0d6e1c7f40b12d6d16e52d1c15958add5708bd38c514e47fe623a67c9ec211cd625b398fa7fd67a23e6e9f65d42dda2bae94524372fbc1a7e0ab3f1c451c126135536e73c573749aa60177dfb68843752b010e2cb9c1afaf51c94a48cf8ac7aab3fb200aaebcedefc6cccb581848da0121af92d9f4be002f0c2beffdfa65c36bec80e7f62d7009b1eb719d24b96e97059e6b50a52662c2c833738849f342391514349305228b29bfa9c7cf2a931558ca8e704c600148a28bd871465b23af499c11784aa45acd051f276d82789c58b14f12619372be4bc3a285f6cee21d65648d18e61752d6e7957736d3385f8ad36702c451c61ed475997d6d9f11c8be5257d8febce329aa701028aa2b5644b8515a95b5e866780e32754ac2e6f2e31b2c04a4ad35cbcbc25b23e9bf49cb1a5d877ca30880741757c29303af8676546760016f1538991b37cf0cd24ad3b1d877e5e1bd083e4b990af6ff5c0b28e530db3f463d21e76c928c8e1ffaa6c045937ea171a9071827a173e231f50e95430ae4895932c88ce048058ce6d0a50ca5c1842506158e98bb2912a61c7991a2256c97cb9050a4bb3ca32594622756291340561e9e584dd2e096263b6ff8eb898ae86f5f24500320d2d0ebb30d84cb4ef876a877dad23a611b39bf0cba5e22f2850e11c298fa23fed40691b83acc87136f8fa540b1dc40d1b0d0bd489ee9dad785c121955a094a2c6bd3353e142c04f7b88b2eb3305fd00d5eddb391b73fa2b16a6357aaa2abf2059ec979bd3ce06d5fff1c325bbe5c833a101615750613047d8155ac0c3a0734cc6aaeae7cb65d7501cb95f9d6d1161d09c961c0681547faf7983ed2efaf4e0fbb87a06169ecff1d0ee540a9223a73f75584441d4669cac09c2dbdb8aa2aed74eb9a2870f2021eb16e5f5c3e79a24d7110af4bece22a1086d27642550cadfa4f0e03f2c032a2745e1c9277a4f67fa4dc74ba056110fed3a63f643567d079c9430b8d5b3bf57a9b3f02d486d870229fee5462043b6bda8d265c745ddc1b8952bf91828d6db2edcfca7051e74df9dd456dca5e04ba469b9ff6a8130aab3903c05659b8f31cf4ba4c22511493a36541ff9d88c708dfb714d52a3c0356543e6efad37530b598bb63c3724772907abe4cad39c896c62daf5b30cd7d37eb36a7be2494353028c76e8d148b018c7bb755c45d2a33f61944071bae8316881e9aa37e4ec2374aac4f8436ed3c7db2092326538f07fc6644e0239899e3335f73c1e3c4602b12d19d7b639d4968974b6b2703ec1add8cd930cbafff4158f68f06aaac83bb4a2e31466e2ddc247ad71c5f4c49af7defd1394e21819cc24c78380caefb2ce87c0d1050680313037def12ca21cf67bb6692d6e4a9e90a9c9a0b7118ac300c6c6f636337aa25bc59cf1d9749dc183803cc0ccd1ff53210352795c6edb49ff1e5e8ebaee7b3eda6e3c0c340fa60594115e37fab60133b8a3b39d2e63db0bc6a03973e236fca801553912f93feafd8b96766049dd2066f3c5ac9222121ee9d36cbcd8f713adc8779949941f8a8dcc92ade62e46e9f1b292d5f7eced14c3bff50a811cb762ced1f103652773ef946e18569eb5892626627e085d4ffb3102c1586ddf88acbaeed903b22d3e7ccd8b8ddcdfddb872403240bc8e0e46a068f55bbddaf90fffb9a914187aac2ceedf21fefa1fe32fc7bdbb9fd76dcda1fca7b39107d308d11a118e47499dc4092ef0cd28d0d9af84440f095b4feb7adcba198894cd89a324c60ed0b996c520d4b33391bbbef1997256af7ba7ec1069244359066af81543ca23105742fee3480f890373d3205236bed566cd22a62bf69f8c0f27b714f84a203bca1605865e2cc2f9211389e0df7a4b3aab9d10826639357efe1f5fe64a1bd6d06d0b5605658c4d2d12e1bec77e70ea393b0a09043dd7d6684bd53f4c883f2f6928d99ba91873d063d43600f9105d503b11d8dc2b05e34b4fcf18e78b2b6c97d3b2c9249a2f6566ddab2a8a67fed6c9f8af2f4ef98dd579f2d4fb572e178489c503df5d5f03bee9920db347a6e734ed72ec7233387f1579c13725599a33a90915ddf03725dce20fd3806abc1029a20732380596057830ed63b6edcaa4d4418871bbfd58de1d1f2800588ed207f2016e11abd1baf1895f6096e2c75cc5916836a9ddc09cab4c28e53fadbd7d3080088131cc270095315b61011b0cea5b4d64b647bbcea54d20be1eec0992c72fc9c9771cae19191cf6a6f1840acec1deff605626d0a0d79ea8fe0af63ea75e80f8141fa8d7ca6f4c99dc7e78aeacc67762ed0134f1a0b053debfb9ccb145800b9818c2deb46f7124e8655f37c3291af107ed75384afcedb44518ca14cdea341c9657ec638531011cb957ed6b3434b736ae8c8199684cc58862638c5f6c07e1cbe8ae68c5582b1697ca9dbdd01e97023138a9173d6b1294cd99514a28102e6912b1c87ef22cdc611133bcc111e95c355a26b20a3d6f0ead66e932c5e1229b0fc17a7d6f78134c69beb362ca75017b1bf1105ac8970fad48acb8313cb3ff10e9d72c4ff11f95c2dab59575525c98653a9c7d31585a3742267c062d6ffc7a4303a3e81a45bf39e1ce2097623bba70f216aa612c64ba06ed6d596ad6abbdde69d56ab45e25ebcd4e485824449550232be26f987c14008f67c9db9d0f709f567fa44502b9e0839457e5f0aadec0395bf5c38ed8de7529708e58c0a895198fc8b2570fb6e68547630ca7f313526d392ac4776be973205f971854c300454d5", + }, + { + "95a17355dfa9d378a18ba20e58aa4b8711ea1d6e3c65e0b2d3c6382892c7d02768437d47ed50bf8edc619c340be7bb1cd1d88b0d3d6bbf1031f738c4be09eb264c686d39b92cc7958e63c9994a84b61b5c412999ace8a9dee0e2a29eeb8dc537f63271af5f3844ed9c0d86e6913c02ed7d2b862a132f08f311aa92fc3757342d89a5dce8dd20d5792d5c60be9862ab168d3140a061489472f2266f297da357064833ef2554c49f8120ff40b961ebcfee1d0f8e7e5722f049485f72c502c9cc4afdbb70517f0fd2a00e12596ffe285d1b37eb998e0e89d756e9491ceb13e83610a3a66122b533c2c3461b3244438f5f7a7af8088881dfdf6a29fb563ce38c4c8632ada8e7e06baa2686dc6aca6bc944e5c14d6e432c4dad554803912b8fddb1c18a59a86bc452914b2efc1599c5597f87a6edcad33a7728827bbaad0a975ecc22b7748d7cc71ec7f51adc8fe0350e67dcfb31af35a8d7b72391642e29c2fa4b796ed8f535f6bc2b1198baf1cec858aac38959f83130af55c21383ebd57d364eeb0e442104004c1599060667ce5e1191e76a89199a386e5c4bf147206e7d6e598bb27a90b3c6a54cccacb39a0ac42bf22eb40bc8ec7925376a6c57d8eac6317578ac052b72ab773f572ad961ee05531cb95ee5a6d70add4176351960fb4bd673f7db9f698616a8dd41823f2f87924c40f131e6c83bc40ab1f92312f46ee86765c306cf4a1d77275ef9668d80f9d9c1ea0aa7b2456bbcf764e009584ef1c0b4b4c683fee3fa2641f48ccf7485a8356fb3dd22f848deefadbef8050de9c5c19e8c449c6f3ec2b1324f80a7d428dc44dbb966d40244c3af03bcb410a57ad1430615e07553a22686f1a62dc6cf090aaac3707ec5b44274b7fe28c7a3a298e7a8adc71e016944875bebb421babd2b64809be3454f25b90723e2cec68467ad2d14744b15de8f9c397a505a340e85998e207cd46fa18d76c46f458af4ac3821c0ac6cd68afb72c376c31daad1a2435fc2bf333260c1a82430edaf2499e7455a93b1301eada2e12365ffcd36a1119664d0c996318a3e55bb2c04dfc5eb251f7fd64f9d83f27ea6577d748e1f85248355ed19867857dc3383e01249cc37684b0eb8e891aa663801e4ac8f0331b38686a19f0d19f6e94c7ac95ec395962be0a4e3c8358d2f6d8f13191e164ad29cd1733bde8c31c7d8ab90366e26cc9a06707dcfa60bfe139a112db827778ac348fdfe26892fed61db7e9849a464e3aad561797b6c778e0688bbbeaf3349727b4670a2d0a08f317b0dc9c4b12ea85c0309d57e754d0c7bd5c83985fb82f776c968189908a8ca83b5944767c2efc3c5f898436de54fe8bb17224012a437896d9fa106a749d12aff657266276129ec5ac12fc7a77eb06296d2a2a876d931e479d3ea201cbb4b1b20bd81471eaa33786c624013e1f07577c2171f38f0511c6924078a40c2d55ce392dd2ab0885e29f4c06907a1597c181b933853838970edad7777ed394c491cde27478eafa5b7a36520aa0779261f94b957e83ce058298dcfa07b08ecc425caeb6c599a11103d7631e77daa0d9d3fc6f42703d57f2c624ecddd56b9a27b848de7dd28f8ed656f1e4decc95a8908217e2f2453ae50b5fc1d9352d735ce5bc2b538eaae25501d449d090df793151811443c64f28d19eeaaac4081e10edca4c4148e723ade8f7e7b988b732ba08b3ce4c8a0d655bac4ff66048148135decd7727a49ac59d82ad470b5479c55d3d8399b790ff033d3ef99d770e1eacecdc140480aeca1e2167553cbbdef2090c7592b40681b733b0a0d127beefd49bcbe8904c975a5ab8b1afe56d7ed7667b5cf92f537ad6972b876843364817c20400524097ac9b405e4b35bbba0d12355a0b54bd763b4491b2acd4e8e4fcaaf8fcfd398499d4c4e81ffa93ca07a5ff51a1540f178f43a931e07e1ad56ab5ce57a2f7dc3ccca114dc9ba8a6934e95f4efe9f3f76947909b280ea5fd795bbbc0feb3ad2b704e305cd9d8f37d178961f77355eedc9d7f77c58e1db2f7797eb8682255939293c3ef7dacd2eab46c4cbbdf929aac301a13f59831a88fab173803399d96dc216abb9f079e79bbfab667ca590266891c8a7ea4bc1724573e5c5a67e9f1341b5bffaa538e240f78da7733237999ac86141b2ac0324f17609b71c885630c90befc3b027a5f01e33979165ce2a00968c414838446c2aba76e1d7fe3707c742f68af21d30e23b637accc848f6c8df820a27bb4e94e5090ac6e008fde7cf3fdd5931fa891335ec8d01b5d6f77db57a87dc35d6701adf7ae0bf82dda6511c83ab4d7d3460b221eeb3d6c4aa537924db5559b1c6739040534fc330f5144c78bf99f5f4faa715e85aebac043e2529197a82ca40f65a8149a9447a9e58c61618600b0c5ab221420c0cee114a133a648dbc2eceb2894ffc329376d1eb3ce7039cf30ff6a53038b23c26c38739fdebc7b919956ca2e468d577dea6621a8d66b78075ad26a6e6d8e20c9b694698540d516ea2bd108625e5fd038b5f1e19c5d5993b82bfe16897c375322dbbca81c81cef6ad900f0ffe5ed02714c208a12f5234d78e32ee07af155ad1e1077a0d8938f426d8f326c751f6ee66c8f707e8493cbfc76f9ddf1ea329e094315a91ba9385e16c890823db0f0231c7f939a042665009d5edd8e48102c515341fa6eea33cc00fb5d82380d735b29f2eec3f61428f7b186d43fcee46b2037ad1aa6974d729848cf1a80dc8ddb0580c9c876def06d8f7642cf45263a655ee77f047fcd76171546319622bf71283f3bf0b519e123a85765779c8bb201e99981ed184e642f63aa61f9cc206bf45fa6e514bfc637671d9cdfba2891bb112a3cff438a6372ee0dd3e7d9f352ce52f8b367b7799e1f963bfe50638f0c74b94873fcd3d66fc1e342a8bd36fb8b88f33eefabb78eca4dc9c89e2c57aaa010f2140dc5ea7c86cebe2f8bf42a167d1d546cc80bfa9258c35af6efb1a090c293a4cf588e4bdf5c090ee7fe38fd7b5551e71e5ce2b0b5a50bab95bc4c257edfc94d37579816b4a2249ba05c991bb2ea02d047e480fc8a8ba71f48f344c6d20d140a64ac20184e45b4eea14d0953370c237ef0a47a7a2f22997715dd3ee8ea52f24ffe12674d571b3bf968454ca051701e411499bc43bb55bbd033f9b81d4baa6c49bdd49614efd20d58175af868ca16a9deaf65216abbdc3beed5f30b209e786a5b4c006f3bd27d93e9d78b51a1a2fb7f5160a0bc1b7df70952ea1573888ddde3d9dd5314b0d0a899a733eb48d5e6c7274667e362e4da6b37c480aa4d0d8730e66483fb1453a3aefad69942ac7f09d3c571b6275590938c541336a121bdd20722550236a9a5e4a37c7de628fceffbc260b1e9b6417c4295907937b13609b8585ebb8f076073abdcf19104ed80ffafe1b09997f115d987a552be5689c70fe125ca702d2ae4d807d5690bc2e90b72cabb0b61ad203b34c68df21c16b92bf8def5680b204ce327214c32e4363d5600f96162a6819dda472acc6441858f396385a16fa5ee52cc0f9ffef3d53c49d535aa37db2cd4b573ff81d74006677969ec1ad891082b5d18ca5b0b9f975574ccffaca72b805c9f7fdd76bfe3dd384dc953255a5b50b7731a137fb9aad42e77d3da1eff5a7b9eda5814993cf2d289bb25ae1680ffcdf419e073d38b4701021adb2019359bb70ff4cca930be7bb979a0678f20665d14803d8753c8ce54cae92feb026486ba747a861daa449863bd38cb4d5831aa6db1e7f404b0c3587aac8765aeecec686066ee7d11321574f04d3f3da571e71222ce07277eca7ff97607", + "5e24f34a8d53b17bd0c2aee5369e3276dbd7e7c2ea0990f1300fbbb00831b76655aab1e2fd625ecd", + "c1d796f1e651a1ee825855d80206baff6818cc8c247ee6ce62b7531e6e9ac32f", + "240cb25aaae4d085bbb747a5", + "319e968ad291ea5d4a057c38f7afa4ddb9c9565962fa1a7b231e397a268ad8e0c5030a2df09dc4f99402ddf2e0d06e753bf55e1b318b3e5ff0108de2328d3b8d53e23e08bf7d84d59fededd60d47bbb52736b0491f82c616eb5f779c496abd6499555035e4513c8613e7204e6bff8d06dfecd9ce38c6b83efd8d0e41f84f7cfc9ae07113237987a4b2eaa87f7e0a310155e282e57858244e9071712fa026cb781e5a4bfe6fa1bc480e534096394459a3d1354e2d9a54aac6926a60b388410fd0b53f7a3a9116292f37406369c22ea674418c4deeead171e00f74f5cabae5d24a0686a4bcd8ba99aea613a23edd0a019a319daa3779c212fbdca9d772fc3fe612cf178c2aca2aeaf6bce2433494027a474eff699bba95fc7dcf79ca1d77b1e097439a9050a5cc78e0b78bf2e7f50f959ea2986a59be3880519cd84d0a673acb0432feb1945c603e70748445c74600ccfec60efcf9e4d02a7df5f967de4b473f63b0b0499ff4ba350ec1182f3a0ac17ef9ae28945fc9bc714c49909a7c1e2f311aa6ad7652e22e1f48bb51cf53814a2125152813752d86c7f9468a991d0ac84b1a2f3969b8081c228b7f5760718036e26a10e211ff04ea323acdaaddf9b06a08c92ed663d0fdf13fa601cda45c416c2d3803dd9b5ca29cba57e59cf4ad93176c65c64507b1995d638541c90b381ff758833a2ad67b0de44c280fdfd82b3c6d4353ae30b33768863cd3169a2032f26e37ddd57e7da1673cfc7375bf6e6792495a2b434155d684f2a6f2b919f944469d47be5aa7da74eed69d871e6f65c3ae08904a9ad042ba39905188f0b9158fd14094bd6a408fba6ef57566d69eccda86bb54cd3ca7381f51bffeaf8bcc1ae8df91d22c359888e21b70f640d6f3726a34e6100ee269124747f0ca05110f63deee07e3628bd6aacf926036ccec02c0b6bd7259db52ea8b7a686b36ba1d0296c85e43e25d72ce46c66a1e646301dafd2f4c502281e6f949011cea69459c026c65bd130d6ef06be17b23a9c9a84746e39d017b144135025ac527c1e653f233770cd68e9f232c3b623ceda836843b3e9ea313cc6a57d28ce71ccfb7265ce73b06bce1447220645e6f66caeb06b55129b97c8dd8db54c94d771504d24cedc86a8ec706a9f7dcbbcd7fc7cf38005b2913b1cfb77370bd23183ac7b5ca5135a2738cc91d05b2b22640469e3daeb6a7b0f14fc6652563663520f7754aba624a35e5d24529a6ee9f5ef0d019d83c04f5a93a38b68cbce0cecd42a11aae305475806326aebb4f673791f50c9f90894add51a0fd7c02807efd8c1bd21fa717a860e224bc9fa3f40975fd8d558e4844a09f8920256528450d77e546604e2ce2d38efadaf39a0ea3ea12156174aa8a20481e6c1190e448564675f9ca60bcef37cacec5aa218122e7bd25b571ff10f54979d62018b779a2a3d5d7d6cd56ae31efef2c844ba50ff9da88eba7a8e0d9fc5388a805ba4ad35eaa4798e395d2fe112083cce2f11cc850d25ca5c6e60a9996cee4789ca99d519daedb62f4fb1e535b742a35d71d7390117e93821ff18948a78c1fcdcb90a5f1211327d7ee0663ef16ff446e0e22d8cb7b2d3d05469b1c02864f4a87e2d9715f60c9e7be841e308d0a5f6c50161a4a0464aebafb88e0d2df8cefcead93c9623106d5518a9852f320235594be10c45bc0cf06c9daa007100ff97959357f9be8e49c870d0a11c884213e266c35e9131439fb3654fd5f1abd1e778ccb02b8c262753a22653a09272a0c33b6b2683c9045e8f967af756b98dc1797ff605c64ac5bda8252e9ebfe0e4d8d7ca754fcca5e3de3c4b63678da095281d76d60fa12ff4ca818825f346b9c4e426cee16db5818d78a527a901cd088bc2983f9b83430b50683018996996717a1738439680b68e3f61cbdcd0f0e1a6b436af8fa05d3ce2228054e319bad1dc6ac970c75313c552fc1136fabc302fcd1d09ef1b9138d18133a772cbd9cb197ff58c6e898f9e83e4e27206f3b15b6bf2778aaf9fb38e0d50152f8dbf5763816132a04b4b2e9639584b3dc8ea6d95ade024f9497944200ab0aeab206ef099859b9240aaa15f737c1e0fe6d015d04f47261ade4928e3c2ca21d1f5ab4a3f571f2ed92ebeeebf2493e6e39f0063ba931e165384ee1b5081f5f8d26ec24716757037f5158d35effbe67009080ad7b0381292a513f312eb28328cf5ff47a6599e36c14277c3eb5053c5aca530ff5954c21c03fb3fd5fc0facdac36dd819b0495fde421411e0440991da0cc4a20d294446115c0b79045037fbfacfeac574da3bf192fec4bf38c27cef71d03787430223b6069ba6d9273ec8679736a832277c657862ca791b559a5054ee8c7c07618083f75480c8aa01cb086c7317315911802e6cefb15bbe20494b14d97e3a885806db775c216dc15949e3b724f7cbb30bd2c46bd5a2fd6132352c2b21cc2b47891dd9794975f70a6fa7a0791ee761ccf4c263f27f64790826c1aa656c39483e029baef0855935e7e6c133a4035a3699925fbde131ca62948879373346af35bd7fa52b8d6c3338f213bbd9c79977c0d710028d1d386df614c5faf4a1f8fe5506a9af7059370893ff6d07d91383baba67a617b5d829e0e2eb20e541ed5c34be7ef0eaf6c6f6f52d7ca01933a2a4e8de46e422dc95161ba8ad354f6bc7c8e4cf8ab5e08607530147fcd7c9481afc621c5a3230a05e2c4db79db9e1e73f43556a8e8f0dff7ffe420282212f23d4c5f6f8d2febe129b9fe5ba7ddf27f72ae898a4eba270b5d2bb3b6b06e38c546ba80a9b2bc46097d0b47db5ae72485ef2c6419e856c33c2d66a861b9d474699e730eb8a8992e3ea9c1ed74316687d5d9fc611189eba2aa31af5ba8e81179866dc016bda977c59c595e40001c8ab3a4a44cec00ff84c6dbd9ad4be30bcc080e69b9398089d6ea464a70f536ace3b447693301c94850606d0de1299770b5f45e6d28f8ab83e3ffe52178522eb91fdaa9e4a696674ba0f52ee18e960b04415782f018d67479081b1bf9b4c9b90de026cbb66bf7d9d12cddccdd9b2c8ee2f010892571c6f0c0feac9555c71bf61f9cd69553cf7fc2be8d058e0c3430e134adb1ba28985fdc4f0cf71bd3cd09f5f82f303cded0de62f98404477bdd0a846c6c51e3e82ebf72f475afc8e6388aec57206018ba2528ede194345cc1ee95cb2023793f692f708aac3c9e8a682af36b078f5d6c7a3ed07475e9fe73b95d1eee048ab898edfee3fac4beda45f03eeb64b2128f6df9453ed77c6010e13c0270c068f704f49e62fb7410be90ffee47584ca2efc5287dae1f63bcc1819e7548eb9f0d8a3182f9ed00da3817255a2ff735876b75cd21cb25e86aa4b2893f9e5089dfac76194563f9a14335dd37ef06a501c89623caaf6feb4afb792092dfed515ba7518e278c341834a9dd17b50a0fc860b62ec621b69408cb3fbf7d4ab88a3e367fda84c82357376fa9b1161b739361c313b99dcbf4122f3870c8175093298cf432174217398928983ab6cea4759f18e7a21d71fe1b0f3cda05d241e12db0818b8763bd23d958d6e52981ce8d84cd6d82640d2000874a53c0bd14949ec99e48ce6c954ef0d08e6e319de5ebf7e142f25c0f50ff13f6acecde6a270c8d8de05ef4c310ce9e92f40f6f2b77d6e7aa3f056d4a20f7faa7cd0b93d82e3972343a50a26ff462caada10621bc953b73913944246d2a4da25fa52cc6ee1293c436ab9031ee2dc79cce39f139f44d473c236731257c6f65ca4d383e39cf8d33923afea3c80244021d36e0ed43230c44e7d1a1297d35464861f9149d869f26cc51879027169803e43c898d1b4a2a2480197500", + }, + { + "2158abc2472e1b9c061da2c01d0ad9e996fd687cccca331fe8a2baacd12c06f284b1b5cbdfd067e5ed09a60a137ff4a97c5c26482659680ffb22bbcd4ec1bfd272749e52440537320fdd3c225c30ccd98cf221b34b89c247ab7d14f93ed3ccb0486a028c6f3abe7e17fba1742b6d4db85f6e6baaf82df1a3aa059de8d9699821d39bad42d56cc1ec67626092cfad4a2e1cb5d814e2cab78ccf5474a8bd0dc990a877d37de394694af6cadcc57727f393dccba7bf955f4b65b3c00d71cdd701754ed4f231685b7b5e2557239d7e16305be2d81a773765dcea25ea5bf2c15d670f3159409ab5bbf8da121c779132a8ec1480068cb76b68a19152fd83135aeb228b446225f91d1ed4303a4bc16cf3ad8173b30d2a1e75ccafc8c933db231efeae6260d45c7ef230ae2c7b6f986f1c19e2cf260ded9cd99d64a2d03fc5ee3d73509e47ac1c39dcca655839fec75517a9243eb611da8fae3e317e7df66cbb6abd59b16975eb463f509e784e65cd660ef1a4c5027e54b1bc862f397c9cf4e6594d98c2c2830801d3a679220b46881a372cdf3aaa33eb66b91a9f36b6941c0fe1b4d2a437daa50b811f2d8c65b5a69de185d78bb9c2f172dc90a89324c5a2067974aab14f4fbcd06ee95cd49e03717f88480a410afbb4e68b5c79b0211cb69b90604cdfaf08af1ef10cf28f0f630e97ab18d9b5138d9b9ee9154e0b3104a6c164f2a114fa5032eb5c247a6b87880332a0dce7b36982515297a05dc8a4038a09f52b1def7b4fdad8735443fadc462c7c22132f8b9581de2d213bf5c53f7fce34aaeb24263afefead5341a72f88d3acaae6db367c5c14a97d4f9e438e1e11c3c8fde7ee37e5ece5382e8c68b660146046ef96c24caa6bc9fa0a0c88281e4bf01b32df5218cb3750f9c4b8af24cc106abca62d085198d14ba2ded3cafc1fbb17519a696965a1ba5f65720e893f1ef3fbc5200316b9d4615bb23426ae53e1c5a57b2f0ee0d0c83f353b4ebe7a6cb17531d278478b4ca8e6ffdd0cad30ed73d568a2e44972ac88a7e7d665614316d674e84ebc739b645a9a4166477254ba47bc5c2b05ced88e75bf64da21a7f1f71cd946d84de13ca77b7e0dc2f0617d371ed96323a83bb11dfa16f81bbde913d9c259b10f3aeeb6b56cc4775c25f49343cef667763118932c2e8b47ec745ac537b37746ed65fda2d1c11a2de60ec02adcb79152e8a9e614d8715cc4e6b6891d6a0063576560fa3621146308222432ffdbc351c36c37d844a934088fea92ac54920facf870a62e91ba9299dcb6cbdb918e2d54fb642c3f0d60489c4bda489f6c584b64c8f19359ab25f388dbbe636c4d90c048f5ed87024dcf9f98a9e738163f837a07750d61203254a80d120c795f9c3aa791272f9474fe330da81a45be5ac838613d46c25e781606862912ff88af393040605fd4d55d07e2052227c37ceffcdd2d42a08bbab69140dfa4406853799893daf768af546f915a91b81d0da719ebd45b8b5f1641f15621959689e810217bea18e3996c532ac6e4e2e4f289fddd5e5968bd6fa9aec5ca435c532b6c74a7568c8aeff9dd19bfc2fba3b484a191e2faf9a069a24e2e6d928ac0bdf635644cc1ef3bbacc547a8e4f1d42d4bed3b6b8cc56216fa550dc37da9cf4d1d1591d9348594d14adc7a3fde5e5d1a3b9875c85de7df483cdd0baa86dae793e0796d14fef1f649de6079acbec6b6fa5f2cb2bd0481f5316f00dbe5dbc379bc3cd6d13bd8c775a727ef43e6a5fad1051783b22c05a75d64a8394a73fcb430299b015563c8cb0ae0aa4ec750399855411c076d21aeca8656f3d0cae084fb0a1ffc6f73b52a7ea5d4bd6d24e7057a3811719533105fc967439a32241f2d3e3f299da2deb821748cdee1a1c5e71bfdf88d833bade2f505268f375a9e6488cd8e16705cce91d15b60b2fd269a19148296a7be348aa349a12270fbc0d5748e538afeb0598081a4f1349217ceab3c4141d40f765ea2bfffd530fb9606601469fb131a44939be984c07bac8f26d8c068accfdefb729eeb47cfd6ddc646e22031f53a7698c6501d86cbba05e282d64b2f962a1b08b9064078dd1e3f14006f45f599bc8e600cabe6d855fcbae8c3060859202361d929a241f6c0711ac0d050b67a1d44da19e0b0e236adad1f60a327c9c34b2b9c64cdde5b8e4f664f2fc70599d44a63ee2b14d051c27d71231098ecd3d4086038d63e84547dfaa39db1a92785e38b640ea0345062a1c185b25a72862e7ae6574114eba592d6492087e2580dc5d361c473a614d647e66c0a30de806f4976b69a8b92301e68794ee05b96ee116a5fd5edf5eab43dc1103801eec861383f17c2bab9f2d9126c1802b7aee0c909309ee72679ab644abb9c4caa54add283b5954e6f881781e42f849bce6554c7a5e3becc5d5a209805ccd4a0117272a53807e3978ffb19641a9dffd9034490a9284f658599961daf52f24f6464c2099cc9ed3459d84dbde2ebbdbbeef25c882a9beda03573bdd4c6a0143b14d634a1a021d5f9fa23a7ed0f5598ee57e56672814412b6c7c08b8e709fb98575fe2716100d000a20a7e7200d800e556564c7e6a8da9d609b18ff0bb8a8812e96b834a6b534b0d5dc97f5da17f42f8d58e763f1b201625d1a5158c2f9e9e190921637474ae81d278002f197f7211540088931ca8a941794e56067ef4a497fdc6fa713aa9f20c21f23c3a71ae4cc5aed459ca7c020bf55162fbcf56a066546660c5a009b8ad2aaae9651c97b1e145853a10013d1bf68e7df25dd492c328f823ed982da54557502ebc6cc56d4d0bf2881bf3c536ea53b4dcb0886e73b066969dfec343441b9372d7ff38454c4337d45e2b999415ec48f19cd05f0f80c5a61ec369610784f47a5cf3b2a13ff5d8145303ade7189a300936006846812dec9ff15500f8daf47236e724d72619af3a6cb3e854cb8284d5b8843dfe056beaa45c40a4541a98c7507feb27a605d6e07189c8c5554a492a03ce6701d3d2ec782e2c1c8346b54a963435bdda3a93bbac1d837172cebb9cd18903d25cd6bed404eaf18730a6d1c6da0783b5411770ed34f35fa6c11a4292a34565ff1b23d4200ec5a73e6b7905458088fac19f6aafd35e0e791f28bbb2cb0117ca1c3a9e3c4863e487ce5d8c14dd140e9eb4794d87d75b01f683bca84ebdbf19dafab716421bfac9e95755fd346a0cd31e8520a55c7ca652ff63fb4e20ba67fab41e11f7390bc02363162097802c6a9eb18b430d07ea60064d5b546d15bb68cada79c113848136e797577f1783e9b53574f9427be3a28230fdd69d139205dd6c7e9e7f031fb6eab70d69ce905384c5c77d084360aac590a89b2dbb2d339899b13619b455cf9f0cdc08db6c5b5f3223dc3a663ce42bcc8cc6f947f42cdf8dde15a6926b753177513a52be95b1f0b88d2a1ec90e49959b108fe204bbc29199d7382c42ad5dbaff970cbd2dbeade54bd70415e54daa805d396361f525f38efc2bba3fd818f9d7af0594dcc341c20f18c624fe13ce7e7108e1d2fd06c58b03f04642c95e3ba00d4035ea0476ac138f72378d85050bf60dedc90af38e96f67fdc38483a73e847b41d31b894ddcb234f02b0d507bbcb15a8941f9c23b592a291cbeacb3ed213f2f044aa842275a7717757467f121294bba6b357c969e96bfab455c6f328d9e5181d909c3f0543b17d9af7fcac099067b043be79aca8e5a75c3a6d4f6246357a63c516a3ca595447f34b43a055d3070517c67ec36e636aca9ed71a001d4f7b81149124deeb7826dec3697e183d861d544c9c17baff82849d599e9e77ed19f801aa1ce095940674576ff270ac788d00c429187e299a03c6f3a1646a8f7d6290287e70bd1276316ae624da929c67936191abdfba45e2803884e5a3136205a38a841448968a7900709dda033a42969bd3417a8d865d0dbee1f261f4556797dfebab278136a182a63e5ca9789e3f1371808efe06eb0cc5ccfe26c0538d573378035afa39fb7cdf3ad889b277c8c6e84954e74f3ff3140bf13bcb45c822784125d23b5eceb73e", + "088fc7ba068f80efd8d4d62813c93c1eba77e9ff400c7781314abc901873ce200295da09245bf8fd2fce254397616151d94b511957c89a881256182ac9e64acb7b25d4a080cc9daf9ac2f231235483fc9fd415f69caf7eaf0597", + "78d5f86b071bbf8a185e5e2d54faddd2a9e26983b1e7a74be0f0b979b9f4af31", + "d9ce7d249af9496e99c93b36", + "ad542824b49fc520f0b7ff8ce2bff8b3d47baacb4a1c95ed56a306483aac551fffba48e8a8f5e4cc536e9266182f6811d070fb9282f5c542cefb4993ccc7044b42cfd6fc71793dc8dd2de23c630f9ceaeddba45efed9d7fca25fcb07d193c000822478b19c2ee9fb31760cfe01475ba8a003db469d1130318a79345a29d054a9f9412dca1edf6d8f1498af5bb6fdbbd3d5f9a244ff176f62742c53779291ef6294df6540d841f4ee8c7c58fc8497ba74d9cf7947add5373427d81ae928305b93dd26cfc65e63b0ed0812ce759511bfbb10aca98f2abdbc9055c4e5ab82637f6a965bb74f592bdf11118b8eb79d50331e76cb4d10c6b4428cd4ec2ef4cb727bdba2b5375f5184d77772d0f9fd3a3c579a4a548b9c2dadc22c805ae959617af49a514b43f47af834313ed2e4d1fcec2c4b9ea87f328fa3d23129a36e6c54bcd08f7e30645de86e98ebb11bcaf99543503eb1e024bc9fd51fe6bd5e6d749033f2452cdf28b3d0f8a304111bdd26dbde641c02fcb15dc21b1a9baac5e86d35b4126ed1cc8a2c3c2a5b94c99fb9b2008daf1a0c090633bf9e31326428c75a50e821b1e72a6504c9d7bcfcaabecd929163d365832e8971f5efebff99ee3f5b95f957e8904d05b410936d8a81c60b4947f8605c58e5b727d491995c76fbe06e556c8ab5cc661a0c09ebc98d61010050f68b31fbe1f9de8f6481b2704204b0164d8433ba4dc1076908c782826e9b555e8d608463581099a466f92bfd6ac9796eacc0ab771a3f11d03806b0f33ec04c69cef6b87d58c11acb5d1374450ce61ba159456b915043c5c17cb03f0ba66d027105bb6fff41e6422f13e2a466f073358bf68149a3b577cfba7ea08b42f83fbc5a2aff17c5ee7dbdac3ff97389f5b8d1f3750e5c9be651209eeb9574127ea81bd7619da16d1cfab85754883543f6474c8c0cc9d5b80e34bf8262d2b4798f9917bcab4b880339397907a5bafe7d149247fd735523df3cbb17ae5e298846ad3bfb7d4f902aa549b7667d3ea945b002e7b209bc83842a7b120d6d27ce80631404371f31d1f61efc5423e1822032a1cbf4fa1a6b6fe79934a202d5add8c6e3595e49be3dd9553a569521c50e9653bc684ef2b73c3526ff7a0843fcac9cc9ecf46e63df5b9328a54c576bd299a366bbdc0f83a9de67b03f1da16244bd6d52e7e4b52c4ed693827735554b05b3a260cd01a41d7c944d0b7b58ae4b0eb052da34bc22b779d7ad46f90f3d4049c097e0adeaf71bbb30ed24b32ff5c7a65177db77492c2571e9cd99f15e613797e319ea7377038d53b28a4cd66a697e5e8f84cf16bd0f0430b34826114b4e1d1ebaaf2939dff7f9f4ce7c0861e51701c42d9cc9e871018b447ccaf4e402e3d63be164dcdf6799314a389ada8bf5e51a35148acf627e51481b9b0e4bec09c9e6d59229721b151fa9adf8323001fcf33afbc9a949643172f39b0d10ef57b37973683fdd9b9eb46e63054fd05ffbef889ff8fc8f251b0ab41fb00757ec1964ef373fceb8f6d148a7f7c89944b3cfc240d091601b23046188ba70a7cdf7b6f96eb93dcd3d24d4aebdc4a29a749bfe3cf5f6e1a025b62982ce188e6b57245d829c9fc1dcaaa5309a8b9557b8824a78eceef6e977721de4065b474ae008642b974001a5565ef5fe4250194e8b861cc45a8691c461817f10b646fb526bf0fe7790bb0db29d1356e8c7a197ec78df8310431d632a032b5490c2a458eb8d4327a9679d7e8ef8739797b0e820e2c567ce3562592e862a1dfcecd50bf77fcfcd00518db65ee0effb9eb3655d5d401a4a47808faa596d17b316f828cbbc14a7e018a0593da9320140a752f3824b5fcb66aa4c3cb94366ee8b821b09e7bea2c04ece15e8a7be1f58463b525e8cfcfc3fdd395ec5b0575094313557e632d0a65e3099e3c653111a5fb4f0eb2aa710229fc055a2bfd8a7147cbecc10823f1244fbb6894af1408ff9047d6483ef83573b5421b9798ee387dc38f166b11de6c33e9785e9b3d9d28bc24c37890e4f8f8ff24cca298b44d6fb1c6aad28cc634a67dd427205285521a172c2a4884ac5b038e261e38faf0086a02aa29195713cea335c47d03d67fa0dec7a8cb21db741519f5f0ba0143f14d71e33d82c75d6a19b3f7a42e6c16d762354daa2670ffa55bd400637de9cddf9e7964a03b4c8956f36bf54d89cf16de23e8c52957b52eb4572a11d1398be72bdb129e2c1abb58c65cc291bb7b0d2dc326c6125a441863a6c92de0f47a355222d58bf10af0d297a86a98b4e933a8f844fc7f1bbc8ba77919dfc50c41219e3db309b92ba056349faa758daf360b8ac05e43fc2069cd46e63fec399cd7764b111467fc65407ac06f5f84a3179930f6215ac5ec906146c19e0d3e162e77a2bca3582128284282b251cdcac03ecc204266ac3a9cfe8d8854008baf89c0ea0096a400d6a0d2f7c681c99462cf0105f7a3dde690ece0438fbb820b9c73c6cdf6208c336831101b904526cf8ac331d879d71615d8b1f750ac7f0ec692d97a5e21e17e194a98c10172b5c4bc1049a8743188ae7c4d70384a7e68c1353aab7882bb91aa383821046ed0ebabb4b2dd126ccb935f48646b299095cdb71ecd5cc402e4635a3f7a3c8a6f54f4076ba028dedb402bcc92f5668dec3d91dda7319f58382017e306237e42480ee2c1f5930564cf16fdf37a3434585336b8e4535bba87311cd47722b9da727250560624a5dde48a2090ee44592d2fc06edda634b600fad9f843c6b2eaa0697b42858afee8191dd2a31e5685bd104188e2ccb057dd0a8d4d1205d7c846f5b8ec0f06bff61c7f47ac4da30e1bc80a4e95af79b14a83e9af2e0f195cb92d14f752a5f12ff90a05765be453075d799694848fcddb07859336ec101c8052bdc273d4abc313cfb351b543fa340dcd01bf32fea59881ddb8f33c6023ccea70532814ce4a2d0c66c846347b86c29dfc34f6fa4db298911d4367c59939020a3d078194e6a3a3c5126c24ed182398468e77fd61a5b1271f5cb2a97868876954c3f7179d6a045f4bd770f681cd82216cd2b1ceeb4e724b3fddeb74481e662fbd7f5dd45bed6d4f89d21b8dd9c1009ad2b0b16954e97993ab8f3fdd9d61f8db102a945591b4552f419971a9e46a792dd8392c8d9502767c82d9b4f69e66071eb579859e9ca070cad5fe3b7fcb77b8474926ea991ce7ad201421f8a79c051b762a066027ab2b9595a1c97ad57f3149f5872ed4d8e99195d47bd3c03bbee590a50a99d8048e912aaeed797977b52f0240a6cf2c865b108456881adbfda60cf701454da17bae879cf098df808f34e50bccaada2d3edeb1aa73cfe3c512d814eb33897b6ff9d67d3d682517cc333c3c2552adc99860b1f0d1076390de9f84fcc9e802581f77e14f5254da01831c70cb8581630dadb44209377d90447a1a21cc8a2d6d897db62d8420afbcc6ed85ce42f3281255bd43e0afd3e86b27d3b957104ef54959282b0e1b381a26f16057246704c7888126055af5a1f494540f01897e8781e1a5c0193b7bef4b5588d0e9b9c8de74dcdb63f03f7b15cf48fbb71c7c3bbe9329e3d326988bad7d0cb85537c1e0b3cd88f37a3c7765f548f99e495ddc29daed8c7f15dadf2e5b79def91dbbea277c51a5da250e66c305604bcce4789ca2df9a10614d72824ba8e4f179f35ccae7119fd962cce13b282f0f970ca6c4776374c4bc438f0de98aa04fb3cf23d2c6800a4a666c15bd20c486e88e688ff9e5fce906b4ae96ec7c3388d7567ce6c8bc61f6d2373b93f9ddbb02b384084b3f28f54c9ddda232d3084daa5fac5ca356ac0059f2fd3fde5d6a9516d0954653b699aa986f70733538e19721daa41329abb95058450e602eb5726ad5a8b81aa474650659c6f7f6f53f8a6e635bf35f4b1191e0dbefad3be756c6141c7d55f007f4fd131e5d5eaa120ba31cc32b8d4c69d4fa784fe0af7dc272898789c774e7995cb252eb6c8e8053c9e7adb59c27f675952d161dba78bdfb15859fdfe4fe4a44c01efd394bf51d43c600aa9a527d9c490971e188e28b980e77a9c6ea0a4ef6bd38d11b47f5745ecdb", + }, + { + "9cd1c25b5bdab9b9080db3e5e05dc749e0783087c310777d89307138613bdffe0ca259677c13208420d4690031314a11a97a986d8b0fea143f5b4da0972c9ea3cef80b4b0b2bcf2bff392c306a764113f0d9807be86a9027c6ddc85d096600d85e0b236937f295362bc1679537a8a9278229a36a9433925a105ab719c0b7f11fc31488fa071d3032de97c81540713dc29ae02c2e13be8823183f3cd9f72ef8ba4280b4499ee47c7c7c4492bcb5cf7e4fafaa7ec26906e58146215a3d4f52f792d3abdb718f57ed0b9b7fc7504e45a0fdf01ebf5924a4da6ac635a715879ea75a4983cbd9dab9e47638acc687f16684e184443aa9e81513ae4abbc4d1596b2ca3eef77cc9b0603fe90c0570fe6cf4dff0381a99212fadcf7968934ac1ff7664ed6ee0b61e41f5074dfb774b676c2b57a445f1c5749e95ed062837c727ae2c151c0ccb3a4dc1429bbcb9e62325117aca566b8fca0924b70f4defd7749d0389b90f55f35d1635f8d2efdef514f06fde46db6e11e492c8f4dfb7cb5454cedd0ddd32013a4836321a25110f3a017f18475a86583e192132f8d8fd4c2dcb2a3aa95c3be3a57216bf9727cfd1284eea6fa870c8e689e91982c116ceeee2f8298b55646efad684b96eab883fd3d629437e9a0b6523f47ea5b59474a4766ccd01c13170bb08f47576a0fdb573d4dfb65279c1b79cb535426bcab60f4022dc42e40db29f15a6148b461241bae62070389932f035e7257752ef2d6130503d72344b24d360cae8ec11fa2dcbe04d3b18e66d081b552e93a71dc0094d1046bf4491e318f2ae00debffa0b8ada58c5f23e33fb598829ec2f46ad3894bd7f530210371a02e51ae0a414eb2eee43f3e08126dbdbae04c7de4b7416df32953234a6694ea84e6889f27c74206ab8144a393a2614e92adcc77550dd54827387b619f004c13f6c4a31e8bf525277669db0a0c3c589eda15063f12eb774a13e2aba2f2f7b6e9bc69f8485f1d6fc5773acf83671812412d28704003e78a17da25bacd1d61a6d9cb9f121abc71d023bcafa713b7c954e4e1c524e5bcaefd86c4a843e209eabbd579cde0263fc059ec6ff10017ba54fc9c2a1171d6b06f5d85079167117c12e6e5d0c71c008765fce756fd0f1141fbad6c1d2f32cd8e80429611a9a78dbc8e738d458f9ddce58ab43c77b34db9befb25cc1a588998e8dc2efa75c6883244fbbf9a7b4d6750c81b8d3fdedaf98dc61f49d067c369409f984b155ec347a3bef73e2a44957b0ca0f84c7fc335fd89453759ad0ac2fd9a5b38afa9fbe74daaee7bc52301302fb2286c21fb922f74d756de84519171fbecaa9b869682d431614ff6845126a4034f10253aa244bf89ab8e0dfd1f7fe8fc1a8472a10746d26896c8ece7ef80eb2e910069435518ccf096caeda63ad692455b04e6525bb8bae27197ca5118a57fb9a5d8fcfae1b9eb7874d91eafafa0e4fab5cb4d0173f7e3e58fae369843a641e98f3ee460e8cfe95d98f7fd38a8d2235e9d6050015833e6d7d21d7015c3b1ff42f0d3a3d9a38d373c8524752e06987c9408cca550f08c38c2a9a8d86d5ac7a04bab44254ed15c7b5670e0747788e11b81adb0d29e3d0b50d6a429340ee0d44a8c286fcaf9bc46403d26b4a4af95b021336103c1ae0f1274b33bb8b21c8cfca8a56c639f18a9df45d083fa7019aaa14d1ba50eb9a4112e574cd70969640602096265a87b1f77c0e00bbb501555f1626196611b4a824991cf10ab2874a12a8e0390267eaf9e3f8f99eadfbf40d111a26772cda1f50743c417eeec9c80171a83a730f246cf31c6691c96185d672a0fde9ccd7091c4b455dc93326913497396e0a4992773caeddcd783e534eb0f34b99bf23a2db6ee738381b5fc94ff603be014c507888ff55557793a8c5439b11dc5a347f35a2666eda81cda4d1c3a78fc4f3df3c7bde91d05524791b67142c446f60c3a4022912ddabdf817ca3280b671beaa496c935661e5adf39c1f4650563c5c807c8f21aa59df926199c4e2404690ea8ffd7dd65f637452ff93995fe9c5ac7a322b9bdc756b7ed6f533b9357a4a1ffa379dd096f144e9e0d87330c238ed3c6b08c8478e23b65518ea1e4e64585e5e9fec2f26dd7400ce4c73ff0eacdc3b07e4f34f6316f5b82fefc66e442ecc92bea8c1d58635d644724a3380e71fbbeef4bf3e57c6240ff603d65447f510eaa3c9ac794fd24f844489b7c560c7814fbc307e03f6a213eca5ea40fddf51d8731b74ec5b472bdf8ba59751065ed2461b02c41ef96622e60c0d26f9dc78c24f94372bef7e47cf09ed565ae3a52d39b02ffddf1953f1ff500f1659db9f1c2b23534702c19ec1cb7c18166fcd33997d53874c7cdb4e6c2b4d82751911913434e48b37a61a0971861187e5decb7f5c1ef6988bc1d6f7fd147a623d8bf361b0d7ece88df6e1ff8d037762d232e22e51d8c6ddaa9dc597b23ff9efbbfd416cc53e5543253732a23aba151cecf73b3ecff21c6a9fd1f24211fc21cde9633aae918ff1c6b72468f1de7e0ecb6539fa353c069fcbe8920dfa8e2fb86782e3062462f7eb2a2c441bfac21ab62744b05c70b6fc3c9f8e3a8a0c5a4263ed256a019861ecb28e20ce78e2d93f1a1def669e9652cb35d105bfdd5ff2313d27ab3eb00d1b628b4c20f42efa23390802af96a8f261ded3678ea0b780e1f4a88d23588a4ebb058adbf9a9c62ce2ce2f8264c874c697482e25f8d5a6daca4f57fd97d23c42d7b71ec150d4ee33931db5f7d63abe7d72dc936bb23a367c798e6a01509644284d52f9ae27d7d1bae597b2cbc26139354dcca0fff6d76c6065d661b66ca5eeb9f8d85810a029cb95b17e5173ef8ab92d475a1d3e21799e874ff04dbc962c668ef4be9f94d85b2a99d97c0db8f6b6d63e00e36c325cfab9aceaf7597113bff0086e8fad36eac7c0b443de6d3a8533789616d4c863df7200ba795a3b8d0a2b9568bb32af95fa604a3e3ea778c3dae159e1b612458584564ffda07b8aba9710134242b2d83d23127b51b9e41584c56f667b71bc01060240f3a2bc7e5d438e7095c1236e0e468079a83a5dbdcf132d258e9ed18f94d3c098867d06d3c09544565677b454be34ce567f1c143e2f3153bdc0353d65090dfd8f7af4633b89a781e01f4634dd7b0323ea1f38184e697bfc39a1299eaa278c39a2709cde0a346fea53a61f211112450b318d137fe68f6c102085aedabd2b045fab912da5c58d8019239f3a44b18f4fe30c5352e2e2bf030334a1dde1dcd23178636f1e38ec9e42102d8c54df0b94b207e804eacab3edddf89fabda6c8e1bd4e17ae31a57716c679ee8bc7de4412fec3934c6f3e8b4c1d1447dbba0fbc775dd3258f789ca53f1593cadc710fef6fd282bb41c0468ede5ad5b914e4758b4148b0d0c04c75ff6208ca3e79d92de8abafa4ec70ea7a4e454f0759337ce575c4954584e2bb8444c34e823d27b025d25fc9becfb4391df9882452bca0373164cd76e9af316df3f5bb7532e22557b485217254d5ab72ce349620f03758219b259784d4c9f1c7beac3cf08e624742e768b53b3d60ad0b94442c847b84a516a93d9b7d068c44c43980b4c7e2fb0ac964bf05a11fb2adb4f6d938715dde88061b238321afc7e5e84799b02a94baf3f879f89a98ab474ca12085137d639b837ebe069f6dcd8456141d063eb1c032aa392a44d1d58b1e77aba38a280625ab84e3b123507ea7a692c4acd1756c031fa52d637703ee957a993804c13e296cc20c1de55c9b8c032e50afffc51c02e5c12f48383237cdacd005b09243d9fe05e51cea42b77645e5c6f4e48c10e671d216b90a48f0d8f5c1dda553217f5126646d11a62587eb0a4ee0efdaf0d54bc2eb04cd34f5a529b682ce09a34d5acab2c8db58ed6244f7b024e68a14bcd5d7a7daa4dbcf490485cbd38e6f20e839d2b0142b9d766f9527937bb1a737877edf6122ba306bbfb5379243a6b22bdf85dcf3b079691f0e90b28a4259c1c9d8a02afa5b5a661a0f9dac52435e7d22e3591593d37eb2e10f646b51be2d1a96cd4490289ef642ad93eeffd64d7cf830d60dc4a98c768a9bdbf6ec9923062ff04abf19e8b65b95494a9420971018c7e6268b8fb2021a4ddd103976333fa52389643c711a980664e29a8479aa9c4091c2cc2074ce3ac1ab4afa217d39c6a1", + "c22add33457539a957d32dd07ec9110f8cdd2f00ab6ac256b4bc7732f63dd3b867b0ecac262555", + "e71f9a3dd457b4064df1d9055889f105af175a2d10dd7b8729da0d0116c2d9fd", + "7df9824e774c5f86d83cb5d8", + "689683c9e7aa9c48b9fda0cfffea0458ea0c3dedccd21efeb06126f1194780917c9f4f2f44b1daceec3f6b1f75506f4169bdacf12c1f65958784851056fe0b4b42a22aeb043ab35ca73747346ac58c550324c4b849a404c94b8860967b6fc58aff25dad0556f1952c045b91f56ec8eebf6f552c18b2a0641c037e6c6538b289601e1fd5a7bbe7b6e0b224124fec341bf77615183abafb52b3e30082a0abfc2cf224324338c132426011d9f800b382e6b834896ea48a8247f149d92ded7e69c7800096076cd2a729a1fe41c70dafb1f855ffa2ffc27b93e2f5f6827ade7118af60730033675d84de9cde6c260d3d615a945dfe0ed25f33b6cbd2c0e204ee919219d85c7536f4700f06fa61937f8dbbe9bda88db1f4ba8a8d195cd385eec62edd9ce673880800be9aa4430e5c10a5908f6dd349af70f32b32d8db38a7d73821af47b993b622bf168565082d07e88fc48231a440469adeca59263302438ece96d89de11cf8057454d1bfe8e4e36965a4d82618834a0847af39dd8776866d9558a5cff79a1cc9d1e3c22e050677e54ead68b3cf0094daa01330d41bb66708a8bbb8a196fae5c77dc6774629d38905e81d97c5b16d755182f687a8046e55d148419cf9c12139fee50c0533b0f04a805723ce1ea5595fca5b668e58f6b3b396f438308372489b640317cfa3a79392cf6d1afdd8c3359557a83790021a4eb418fa189ad15ba9be0f74182ac76076f102ec171117a3d16ca20b4d200e03e54f1f0ee6308e463a148c0c85aac3ccbe5781cf45b53a313f7c9975a45d1853ed9104a860c08634a8211b87500b5ffa3d8d9d56f22256d485b9b45b24d3873159adb8ae25966cc40f164f342519e88d1ead1e711e1b2bbd4be64c7e83f056f797c2d3a5cf7c5025f92be5637fa7738a1bbba55f761dcd1451ce4b1e85a6628b629a2f7917a86363b01516472c0f8614abe2ad1c9d5501b2a44a68e3eeeb34a64541125bf49138bcd15b7c82dfd40708414b85107d8b982c4f99783a03c707a37787a91a7198063f0e8a2d52dca61755105faaa09c063c7a0849570cba1aa7ddb3600eeba602c7e7c9b90ed00ec731d4d1d8e4bb42f9e9db21616c4aca48dc27b939428834404331288f03c2b5e887103c51748d0257519c3988f6492eb70cabbc2dd8a8a910d737a678d0970ec48bef3b81673bd10b687b37e11d49e7cf90c03c54826ecd833bfd9dbb8174274dd45b139d08371d5d248ee33298193194734c5863adf4bca92bc282bae2f47da5201fc240dd0710a22a8d922faf92c2071a7eede7ee17232d3b6ee5f3ebb1a8b230600b243c860968ab427a5f540912e5e7bfa0271201f288727f2bd5173539d5318e5c1c0a71cba4d9501b91c3bffa7bb61b3713f1751efe94a66e17d2b42da51d13c3df40f4db988dace42a6a1b9d138c4f590b7227990711afbf8f56fa63f2800cc019bbd4a7b3a0983c9b9e5f77562dcad6de96e3b2eb85cd99d28a021a10d6734400a91369236b48ed68528afc68f247d45c79318fc5d634ecb0f3ef8536d8ec2e877adc3308be906c5b96777d0e05970023e5c5dffed12310cc97249e4b95e32451c9acca8394fde699deda57e938bed7167e62e2cb62357f82fbe821ee73b4e09c6e2f512515412c2f27805762a8493e74a3d30bb409e499002a97354381318af28311ce484bdf7c39db53f08f73ca5793945e13fc8c66d503fa95506b37ce134ce2945d75b424ca6367ef4ed47b9cb8ba7de80e773279bf23ac888eb105385ea958b1b49b27c8db6b1e14a5c8ed5d28808a7d0b6bff1a58f24f9c57fd8b8f477a9d1365f89c698b8ba923896181299d474b93e05d3c915b10a69e61910761a6d8644933c593661b0828afeca590ca18e702322d9140d98fcf836c2f7a4f72b59eb529823a52ab05d919c3eee4db2cae1067213c5070450a160fd52fa44bc9bacc5c136701cd7adb1faf484da376477da08f6a4dcaa37af47c7b026c2da9d5fd0b30741357104cb2bc0d3cebd132b5fc7c873ebeceec5492aecab95ab393f35b93b923d2ca071e6bd8522c3ad8598a05e96646504f1620c045aa5734d665acbdda0ef73612be4ca4d95ba069041e042497f7b10445869989ce30f55206a1feb4e64890b7d1f7e9df2e88a352674a52ae4267c06592d425ed1d88101cf94588135892218ac11f3976ab2b47a27f02eb887696c94b13d48b4370eb11222274b5513a0fef905c66d0c1893832ffdb9b333178b65338fd8b81094d8f86f2e4e96a47e72032cd6fd47af87eec295c6e980f595b57f79abeb4654c4039fa03ade732b1e579551898b801ecd6e0fb1c5fd198335834b51673d074a8222640d2a969998f5b878bf897fdcf3426c4e24a7c599e5567643fa79ea5d20e7de581a873ee0181e3632a4e304f9dae09a81f882d4061ec17e588793b160c93a926874d5a8b78727f88de9bc125589a9562db5bb1c01012bbea1b2eeab68877871ce83455db43cc48455effbc71c436aebe362af22c6a319d134f65681c4d0d51f9aa42fb20f48ae3f7065664aeff5d8349624a5d79eb0bef3cbb2a1244ee445f560a6bf7a796b2c950a37dfb85ed5be11e8e305e835c9e077e676aa5ce23edb1f74806278548e3fa35059abc2f032289f9bd76043c8dd1352b6131cf34f66bcd0e7f1d13081f5b08ed0c69136f3b7ad8e05e9fe99a9b73624095f96740c1f40074e5d92ffeccdc0f15502082fdfcfc97a800be511c22b875f2832b2b891cb1aad2a17c7bd0be4427a4549404172f7c14d5e425e14498237c26a7813cd8612d048703cb180f1a6194f688b4644304950b078692faec7a2a5c5bbc482f3a7e8ef2825c4c19032a7a79a2908ca9774c6403e6b15625c485f2dd078902aff769dfee2dca9373704bf63ad981b51f61253910fd48c49ef10e3938f35ca8dd491a8e569baef675df30367b093f1088ebe8f876191dc32055481d074e5e47a4bd728efaea9fee3e83d8556255ffb2fa08194bdc66897d97d1557186d5f873169461494a83368ed8065b9a033fa4c2f07f7c60f945b60479e3c89233d58f674c0c6fa5918150bae0c6de2b65a09ccd490e2ad8571745bc37e70982411af667f3e8e9b9f7f75d863e5fef05c1f0d2acc7c86585a83ee32e0a64a9e67e75b80def5bfeb7cffe6e6822efa7a9cf049689b58336b081c039696e0fd3b2a2a6b0d177c9b3f8fe5cbb1c69ea93c1235b2c5b6934f603127eeafc4ed0728161612acdb2ba894a5ac376c4ef1fa8d49b4722379e5cb39752837395c413dd29a2a88c03849b6fb2221fd85ba6d5a50ba7ee9c09ecc5e6dc66afdaa1b021282cadc68f19529eadab809341187d57cfdfe01d0798ab8a94277b9b868612e575bd98f70de80ebe5f57637c511800373262eb5ac3836b03808ca5d5f732f286a5f18a7b7fb8cd8f60e4debe54731c9c524b84694c5469975443964ed28ccff2f4e8e0cf4c60c1c8a092e986cf12fa90a994e4f26ac89fabe8a0d1e27fdc00f1d3d3fdb73bb76809f93ea113e336cb0a5438147e454e262fbb7d656aa1be1288839bc342b48ba7d0e72c85a2e24be1a97dfb2db85b5d850481e62f3b11a28c6407686e73d550b9f1d0f010602e82af26813d2484a8db2da0814782c8404b2865abfbe3c98a07ffb37eea6de7992cad73a9b81ae96a9acb13ba213eb4111d868cc73b0432d2b6c2d7e0e0ca7ccbdce86d01576e1136871a07c76498eae53fb7ebf2e85fb8561d10dfba740400ef4495ece7eb33ce3bce26344eddd88cf1ed8028ec5fe8e71edda54dbdae08f50f8df6295f6d7ef1163f62262a200456a7777d0565d7f5832fcc7ac144b5c3e0ce3e5c9b7f880a54ed5e80662e96b356ff58f2e372b1dc0d73cb8b96c72caa9e5dd312841a8be23f838bc706d893e1a8a48b2c069874c293c41d00226f73f987aec8686046ac4c0c972c991c38b98cabce30e7255dbf16039b95dc7d103fde630b03441b15bd2c214763fece9d6778d1c6354d2c9478c226175c02cb006006715fffc879a6a2b4111f6234ee330d6c84d453c9ffac08efda1f380110a8ef8c2fe44e2ed644cc3e0146b4d02f76586fbb6d69b827be38b9add444e2bac4d7165007cdbf2ea8c4b967fc1bb70c68b229f19bc3f79cb13ee6265264885f04c09a96583f331ed46de3e5dcaf08313ba6053f3d0c1916a0f", + }, + { + "3ab6cbeebc18df951d371e0f3cce2697fb367476bd9d50ca9e668c77636eeb9d24b68be0ce6a75eca194fbde6221755d57e9d3148623de24896a9becd98789fd3d14de0c7e53f81fe7f3fd491472a66b5b797fe19c5d0525c7a111a0289a9e65ae7c712ccf694cb75c490070bca7db17205af9bdb7fee27f9ff41fc78ebd2d3d399e690908b5c064ffc0d5bb67b0d2880bcb45c2ca2741691b6131aa1e5ee758fc50610406216905e13ec049ee92d1f95e16bc283dfd91595ec2037d20ead51d3a362140578a4538c80581b79852b0f6686c1ea66aafffc872024592ec1aaf2650d167a75bace024b261db4ab48b401cf85ec2620dc12a7fc37012af8ac1d6db923d82eee962129bc4ede578782594708357d29118fd10dc6d228bf7e461d2769e556488b776237b6309f3dc2e884cb2df1f43f71c53d389765f805ac053d05fa835e75fab0adb0f13ceeb425637f43556372d728a00fb005f7c5a20cf2b7f776066d60b70b11a848005c6d63dba0c93f139067b39017c997dd6b94c0138c3619e9a6d0e4b8792cb8d58a2ca12ae5d03e7637f2065fbb9e2d1722fd3aaf234488ca157d829e9a3b642458054f3dd58da41d7fba6d2b488a327b776d1aaab1a364c710e755ab22b9cf7abf1eb8949c5ca20c070f275f8959cb00c6d5ab7879003f89f795351a4ef4850e033d929f9a349b9133b2e0bd1cabbdd381594bfa697b845100b96b5fade05db12de040b814ec49489f39f5abd5b37f570cbb516636d5b7378f12872d02d4de20b52ed8ca0b12029a4c084621bbb578b870ca2ea79fd5df1ef8664bfb3b1a1bf038e4ba33f6ccde42c5146470c9dd293aa747d2372db1561617920142ac1d32e4f1fd18e8b9e72b7efb8fefc56d08f00450d23b7e8381849b1385ddcf9310a4850dbd6db7a4992690190655760f557a5027b5ceab3743365ac9041a5c14bed1126c4eca00d7e0a0e0e6f666f64bd1466387150ece5835192149237d5dd25e703e9d3a4f652ae04601d6acf8228e4e86055394c3abc9dccd02f04a60c298d101260b408b2620c137f77e2019fc6eaff1b234c56dfe922b0192656254fe3356143e969f64b7609cbedebcc8cb2b68bcdd9d723b9c14669da6cbfffbca2351de51e87db6afde435ead0017682b8014f91d9734a9ab9b374257273e114a8fffac786d53183ba666d8a67e30c1fe45bb1bdcefb5787afcbad213f8e36e78d30ae1305df96bf450349ade655cccbb17d887f79e00728abb449ea427fd2d0af80e3b5607a74a57dbe5264131f2fc49cb74415974b3d43ff872d4106ff11b680f56be06fdf85ec9dd850b1f77f759337b9a9ce04e611036d3f45743e562abe4b959eba7424a712fcf7c3f3773886aef22f7cf6168efa83cd3ff70b9521cae1b6689b2b8c423d883a007bb138025f2a31db2147691bcb365ac242efe40cd09a746cc501ae0289e80205993b07f86538d486803da14b74fb0db6ebf1c2bb8c36275137d654c1be56c65891cd50f705247d85621fd0d61ade8c05cf4ec15b84e8adbcbe017d7d5743d5e91025e0154a5d9bac7c6b8297490e9c195c5d74e046219c042219817a5c56636c7c4382c6a01d721d88f4b4d20250eb5eae5f3ef481dbf8a3f47a1d51d080bd4cc33f12645c8481e57835b77a85a2d83301172782f22026e69a43376ac4f5b78734c9eb914e6c76c6a12d4127cf195ad030825322a279093cbc40a680355d086a27f3fb7560713b019e7c286d96833dc60590e9a709f2e3c632894668e74ed20e42cd83a23ebea3dc3bcc49d14f8697541780fb2072dee6a5672d0d4e7bdf5cbdacdf5fea9e03c6d9cf0faa1e954172acc26dcd344bb3d9b2e0e6015cc55d19713d795bdb7c21b44b305e69c69fdb7261483f9693f36f45d356462f1ba4498de1c2e8bc3e0a70893acef2006dcd73cf15b265a8a5d4ed792a34a846d8f1d3b9b3bb75f1c5e57a00b36c00203973ef4e2654f6cb29e4445318ed99f0de6ca992281e83ed03feedb66aeed6a461c6f2871ae95343cd9797e58430d5639d7ef5c59c78b29f76a055e18e2b85eff177770c60ca4f2d61e612e617e749b4653e7901b62ba02dcbf50e59219349120ac01e6b8a6e98eb54abd16b921a1ff85898f90fc49a3c8f8f4ae9b0dd32c3e7f2e1527c4feb67a496390f28532f20acc71abb8bb4f71b434104f41e36b705289858a4e8430b8cd9449b0198ca2244923cff1df0f63833373c275572de5a9a77b23e5ff54aebce8e86d02651f26ae32e69001e5f3951967579ebe8574682cef8c12dee0b18bc999f8cc0f07e2ad3ac94d3caf30c1c8a8295756aecbbecbbb4ade8a2b8015e52a0eb1290693c6316d036e0c443fc4ec591c32f7e7f1b3933c921d5812233d3c21ee5528822b59ef2ec7eb62f7b04f40cc8238a473ec37a07e54f8907825ccaa1421c2964d2c756be450dedc011e1cdd9045720421b9a4a00e9d3076c2fd10d71ee36d5c0fd2c7e42396b034a4cd0245027449242dfdc42c8af4a34df1b4150097726c9745247b78bb2bad5fe8af94eb13ee1f41dbd36e56d801a4c9c5b9ca5d3c26f4714b6fe9f69b87567426eb6f4ac97e8c9541eafc19fc90d3b24aae0f76c4f3f81063d206ff695d638048c2cb023147a78332939d2f2470d16f1ed0e5d3d4dde438affb2809488b99815e54938fac3b02deceaffde310cf422f9027f364f5e79da5d2b5af1b4138ac9f9d301f396b220829c1f60cd2b54ef24576e5ba6ccd4802900db1bb4eea57de7787eda0e30fa90cc19f099444488699bf7c442c398c2ed989d084c8cadc97325484e337848c34562b3dea6f7670f935ed3d5216c970e04351651c1c31a34e862821bdbcbde202d91fed38965e31cc3b6f1e52288f327bd0a787ecd92b3b6f535d1d000b0f02d41ee01ca54e4e6179ad7fcbd60f0e41dfa5c9cc7ee4f7de3844fb385ffa3b24092b30be697f1fd32c9faef29ead346e42fe2ab1d312901b678b43b7758edb7eaa1c2d038b4cd6a7dc759a6b12cec955bcf4179006a7ab6e22ef15986df107080d340b8870e2304d57caa87a9961c04655d7d66c7f71ca9260e02aced131d6de65d256d6b487141c51bc86eb1e4721742f07d09e799b30da7b5ba94c8d701ae34271ba06f8ce134a7a9a2598d1570cf05edd9ec868cfa2e41b4c20a8bc4b8bfebd45f5a60408f08e931617746d1464bbe1f3844ab3272ede635f771f9af30e483903ee4d0cdecbaff4d31451e7791dc97c92042fb932fe1c82652c1d682a55912e33de3b1299db076cef594458670dc4f911f4a244e2bec757dad4b0052a41235e2f5e60b929682608c16a61287826218a1ac3cf0d8286555d5b0552754685c365d4342f0d9c45065daf6786179da791a86b50a5edd6fb4b21f09d9747136aacf79ecbf52b00fb88b0630ec7f0a6699901ba4eff913a3ab33ac85a71ebb51ed343eac86eebb3e79c16e664078ccda09e77ef8e0919b8cc447116b65ccbd5200fbfe86e9bac5637b33c9bcac9596b57c14ad5da548e96a8ffad5f5c69247c68d464c770011da7b45a337f138cda6b4e15311879bfaf12af4c61fba596780e6adcd5dadde372823da6014122dbac70f0dd896a8d387d3c74df282a659028d06cfeab3ae22dcd1fc3ce60f69a0d678aeae0e5681952949e31ccb8975cd167c9d012f4b230b1c1f47022eb1a3042951b338a734cdd17db0ed483a621650deb3510efe74191a94611dc212c0c73b117a73b8ae41892cf176742bd98a7cb73dcdc53b42df56d640739852335f8d44d901fc884286b433fc285fd5b3db8df0a8522cea3182c071f559c328b8516c9252681a94eecec7ebf626c0a9014d9aaaa0c694d14855433dae06656657d1f8a939123d28e00513d72bd3802d211ad7c1e06b9228c0d5656edccad5339bcdddd5e01afdc01f10974be3187804324fc513ba583b7b2da1e9096bbe3d078c1adc6c34d92c54e9c49fccdc17d10e66962120ee5d9b1cfe852569436270cf7c4c3bb12568050e2ca4db08bbac16214238413195dd4d936272fca5d56d7551b9b002df1807ed44abc84c66746387b79bc9e830a635c308a7bfad7c2c22cee6d3d0c5ebd8b230837b7ceaefdf71a67a3a8eaae0c36de86b2d96e759b8b53f8b8604775eb7a7e13223cb21033dc87d775628581a954085c2d66c1c8f225b1aa86091061738e7495cb36a5ff032dc678904bfa39a00285cd6947865b6d4805e3411644b4a4c94a6fffe05ef31e156bae6165d801685dcec195552d029d22e5de393a82ddf3cd3de3ad8cd6bba2325a03982204f07fc3c21518ef17a601fd743b27f7191bb446ff61d3c61d7608777990997e911932532e5b3235f13423756f5b6c786720cf6682932c90092", + "50772c5a0e156ba13a9d86edc0e600021d56f7d31e7e452a74ad53a6775339c7ca6521d87a8c79b42900a1e9e6a1ec03f7e3d615611c3fd5c9927c40e5b508af1a298794b60148df01e9c9e78ab5ea8198c097fadcd6cfa6694be64e00eefe1a1885aece86f6ad87df766e692b58ebc41982bef5", + "93a2561a9904a1787a10e2a668cd6a814f2877a7b512698e94796805875c8d1a", + "588d9bc1d98210d9700ef488", + "165d8c9eabcd5e93e6eff7be122c8c242e1a7f284790c93324f924efabcec4a4ce48262011b7360c2833143d645ff295453853c92f0c48c6dfc2af7ec58d9bec0d13239c7e5593cdb39d49376c6341263df80c0ed2ed79fe9899d0c07de93f6ea95a5dfd307e49bdb5672b158a4df623ee86d54cd1a0fa9a60ce39d1f5f4b6b0ce9daf2a61a907cff3bdd3f29156ac439638e0910d728843ae17ea7368814ad7734732e7c023d4954e1cd5fd19fc9b76e9bb84b61dd4371478917757b14b366b4bfab4eab0d9de746088ad43d8742e2b9e58faff15c2eff084df5f4316111d5dd7d23cc0b1ee1000253f26cd260aa636f03f64a8342e531ca1515b3beecc3ee07a29184988325322d5c09754c278231f92c0d980adc919d4fccf4a1da1d37f1ddb58ca997d6d700946199fa007c43853b6caf5f8049233584087fb23c3952414ac487e452f0c3898486d04e5b008b843122501f9c8a294da9159a04119ad5c8e9f5c211411e34559d3a7bcf2ac10e0174f94f3f2968c80ebdf4498de172884dbdad0acc3a887f9bfe896a6004d54cc424567d53f1198ba33c56aa460edc6af0e437b34322c1144854bafb2434f00703c1992dbad0ceaa0616aec60a380676ca11558cece57a936959d6c2ffe0647eeffd37524fbafa9691f31499701b202d9dc9980e79ea517089eced779aa45b522c9ad193e63ea8b64e8a942f630d44370f23b7e9acfedac51dd9f139f8806b09a8fbbabc76fec3c3721fad5087a6d41f93973af8d787d8bc74a3122d99ea14e2f30a3c90be4b695c8b269784eefafa52d6a79e785eb47a23d72f037ca572b7029d2f37baabce57658119fb02c5b659e3aadfe0052f1cc3c0afc6fe4624533d9700388713945c20c1d175da53738fc73f48fe57fef8305e796b474b6f8d3fc5040042373a13384237d95bb045ce0c20934a964a8372acedfd6e559aa84180a86311a3996cc17bf7f73e5d85d4db2529989e5836edad490aaa5f56d17326825aa20608fd209903335de4b36b79f68b6a52194f6ea8ce42570533df650e65b50c367f69b9f08c32b3ce3e75318106b8b2c6b6d09369c781fbf2aaa35053af215b621f833814ec4778ac683de0dc22c418b077a917a6e405ccbde9f72ed523aa696be1a6f247b096b9235217bcf19b88d43178cce5a7d82335fccb4c079e00280bfd272b9f16ffefa7fea38d09dfb2e4874553b135052595812aed3fa15096abf1eebf9abd598289e0d156974de4c2654c60825d42b662ca7439816d9d3a0255f40a4965504f643f029da535d4b109e8658ec570e99859382ca0ede0b0495d508c63c7f1eff3f648c60e9b773590cc663a751178ba7603a11985ff519056661b9460c1aabc30e83bb0073a927682a06d1b8050c345f7920c1a37546d79587fae2a92c803a986248f90547f0b6c0ad0552d8260d2a0dc3cc76d092ab76b8c12f05dcf141167a6ea300bc23227933396ef6fe9d51a1ba5a754485950f06cfa6964db2d0fd1d4393cc36f0592fca25ac1a6aacda2a32f548ed20287e3d291661848a62d41504e4fcb1cd1785617fa5786712b3005f1a1041733df6cf838ea3ea0b93685889bc6b2857d80a9bc0e7a66f7fb3d805770402f049889311fc112dccc72a25bd127777fd87bf5ab56d39bfe6be2b45a8301c2f324dcc50b27540200d522c24941701f7293b8877ac84cf35638507c7d912a3a94e4384b68c507412df65d0c4ca8ec2da704bd4483eb2e0d13b68c0c2b68c106a55b9710ad0a1436d655a3cf3c419d5e6f027ddf5dcfc896a5b316a7dae9290a7bf81aed539a647c8c98e24e7ed6a4f7f00a11134ca715e5826625c250500f8f16b40de048b095b5dd08268407f58a91c86c36ca5a2bf4f8fc682adf1bf601da24414c74956e1a8fd2888b5260e980c32f6678a4dc4ff73220c22593d23144b84c2ff56920342248876d15ea54fc100c09a81b802dd15f030bda9aa08727ea49e34f0ca8693e0a06d0af06ea7ceddbf0584adfdebeb20510bbac683451d9f84cf0f4e85c34d979e550e07e7f414d6f1011cb3dc28d0df6d4aac113f2d5b04e4486ee2cdcd4157dafcbbd55e8330a7176d1b231d9f47a63da9ee30fec6cc2c5aba3a8c6154f79997af89d972743255355647235ee939f4f305ec655271e0cd562ff6f401b86dd5826c769298445108ad0d9e13c504551f74c507436911331db60ef0ea99dc259b13cfcb0596fa9b3c95cd7fc3b1611e3b012b6719afbcee7548939676dffc372276aecd08e6a14251407cf995266545427d49ae5ab245cd5d534c52542fc71b3973f0b766f3d234c8baaec8b74eaa8ba90abe160b4504769d02e08d7af4e7ecc167780c619cefa58865169b674b2b1e10d82f6560ba0be41a781f4afa46bd722566d941a8e6f87e4a5c03d89685a22a3470354f2922e2915f9d46288a5e8896ed13617dce694a595e379f25fe621dde8ba73d865976950954e5bd07db147a0fb74f87cb06aba49b073942b82fab33a878651df73df2721ef800b658bdc6c359d396f684598e93f38e79639b8736b02dfcc124fb9fc199c35f2fa1d0dc39939c57286e58a7deed7b6c76e02b99a14d9bbf11f65d8eb7fa096fe4baf0f78cb34736499a0ca550f10d7edc8909dc34b039e3abdf1aa67a51d37a2eaf4c07022897d4d8355d3325bcf392d91d02d462488ead90b366e9645b956c3802e4249d34b5b2b2484a1dec15a9477821df6bef5e1626ec5ee9832fc3bd0b63a3c4100d32fac3e9085f0b5ba43123f54beaa7ccbe6ba68231649f35a28acfcbbf97dea2d6cfd96025032b3950ec8437108d0f07baf1bc89e3afbc2cdbb5031d3cd9e20b19018adda466382059229e4c8c54b455eda4280bde43b36afa96e146e408c7104523d5f565d22ef86d4c7cbf9c6e0d0b30e37b37feb9332939c642eacfe19d0dae1259d3267635051ea5f9b518dd74786e45fb8bdf72cbe3753bd50bea2a961b49cc0e2d589e77fd25ebd962463fc728b1d288c38a79a182b124d345872afbcfe792d259e7e5334311244edc75d05f9a12eadb61fd3ff79fe8c097eb01a4ac1f0c339d3be74be3d96b0b6a15e8868d043a0f2007ee8aa51756d78b7a78ad90fd9a26afbcb51fdc20ed7a3947f715c833e363bb87504d8efc9f8b93a993e2e26430f79f3cce203b09093c9b456b1967212eb0db4f7688d4dccd4a523866f75c9d9e7ce07825ae34399c5607a60b771866a647b6d5e1e20795ca906e451f367d8c40ffe79a2cecfe7aa47a402f8d49be9084661c96ebb11f1b48e7e8abd2978ee626f962e98f99db4eb3c6a52aa2bb2e62194120ce1e773b9db784e8c9b5adcfb70e3bd5717293eebf014e9872c5c1bdf3fb296cb88eab5e97a5ac320092033b49f37d840dac23021c19ab2a89190f3c8dde927f6e6b41874bf71ba7747a616682bd5b3f17a1dad40f4993a1b186ce4f44afb4e36af7715450bac62cb1527eb8db1d87bbc4d9c99415d16660e48efd911e02f5777a77e72733af3c3f5315dd0c785d5212b79c46c3bccd74582c57cfac0d50fc0c85370476913f9d8e8e10d0f6602f2271994972de49ab1a91728713c3cfcedb0e61c270b5fb331a980965bcfe10b41251a0f7915d5943f49fb139626f1c424524f2fba3a407e77dd7513669894fd09fff4185fbb997b4e4677f6ea0b52892f013f1691bdb38eee9307a565e396bab484d91cea9268f49aed29e319b0add900b6a75f7461db5486aaf5366f98df05674361308931de753c70777de73337a996f6d4b0e06d63a69849ba7533bb0e446f062edbd6250e61a49f4120f84efc1cf74c1bd30cc61a2d719fa76991dab119fc814a7c56f48bd584c7935679c53bb0ac78905b5d961fcd89a4b567d17a5182651cb07146aa9a94972ce613e8ff9c878a8433c0244052f09980a52d800e97ba65e8ac186862def58c72b9feec91266e26aa5075b3337c7bb8716b3acafe666ffe2df32b78f9995661d3ba28f8a8780436aae1da2a3e6a0a16dc562b8d5df6f68391aab73a10508e0f55208f974a0505f0fc0d8a55049a7b631fc94fab91459ae1f199527362695b41972e50faee34c5cca9e35e8682099f5e9652f88cfe9fa990ff2154c89c1c2a4ed6bb8a889fecfdf048ee0aae7798c55d6cdfd062cbca97ca289578c832d658ceaf26faba54c9c3ee9eb5bac80698c1441b9cba287f749a5e30d5cc715a01c89353ceab0974ae77fecc1d2dfb31a5101783cbc002c73cd155dfd14685c2f9acc170dc437c649b6b4720b676848a7f9b56cc4787eabe72f6e3f2aed776f9bb1432fba93a63bfa44fbcfcb6eaa9ef4b79b32bdbd68cddbb9897cf5a02c6f99fc765790092edf0d5bca7c55cf232a03fbb6f3eae09b12e09a9b49a538e0589394700d16ebd3", + }, + { + "3497e8d61062e6f2084ebf72d00e9a47b550591edeee9746f31ea28039a1646d384c4348af293ab778f92a4807c48fbd14e8dbf3d67339c991dc4aca7dae38b5fb7bfeaaa538611d328b653950f4f664dcd257b345917cd66dc6a1ea75d99f70549d1af9d67b1608077b41576f38bb4c0a13ff4fa47b251142c6fbb79f9a27f43841ed0ebc0416c37f571aef8fd63b99e93ae88db50e9ef7d499ae7433d5686b165579d3598f96d9e7b1c876870310703df8fdf2069beadb34984f676eb7d3840c4c5766dcee3fc39f0739260a499647429339482e232362bc72c92a299cae36e9069cc5f4db8893e2c1b9ec0b4f334de26c951090b9724c2b3b7655d8248bc12a27861e020eb1e4cf6ad0dab903279b6fbdabff761d4ba159c1f631e681f210a8782faa86e08e554b5e30046157a0d1144bd08a691c2cc2dd22f3c3a4e5d44c5d03f7e3e385382ee4683345c0d316d41ee75f87038b49e0ad3ca45121789e7e7b95615e1a9a8dfe02c044c2935a97b141f639448182252ebfc980e0411e5fbcb3c01acd5aa7cc5d67101ffa6ab6acacace5f02d67155c26dedc071ffa66dbad26f67a819d46de0556fdffc1b4ab6d60905d8ef873ea1e51c62571c08b4c6db242e733e02e11e5840ee445c290b2232010b118839b37d4615c4521e8928e9ad475cdb4a3de9928ec7e6daf0e20d22e308347b31e7e877fdacda0c25f2e5c33a329e84707816ff4ffdca30dfc753c2cf883df16016795db34359e9363fac60624ae4d2b30bc1f2f99c23d953779c22ffca145fd08dad83c0f76cf727196799544c6c07483e0a41ca2e1b1da5a730956154f531d292b5a39a229ab13bf24a804eb68786e481c8aebfd3bc557afceadc41d00e1472c3b80ce652be1245089283bf1a1a93abd3325bb6eea121db8c0e1d6c0c31decfe9dba63c89b881824b0531651fc500f2f75ca9e5fdcbb179c9ded5d600a495ea704c2709f4a88c4fadcda4cd82a5b089f25a6fe0161159efe03fb5e0d44bdb5487f25e8c9adacc389860f62b06a6a4f8f104d9171622f70652ace736e8b28b70a4d9fd3fa4b9784d1a6e6811150d0a0601d31d17f6041e58a1058f99b80b0a6cd4f79c79a104b6bb731ecc881bc68e1d99ab358faf43d8504957ea0152e46e27dbfaa17d0f58287276e4fa82ab78a03513d5b4c3199d1362e4fd6447d1c26fadbd011abc69332ed0181952b391f2e8a5c89d68e22a7c451f69a9573b6bb6d918c7e3d52116f3f12f1d43d2af46bb450f58bde1732a268293cfd9cf2b90a844588c1979a30d6ac21aaea4b9e5500ef4a8bcd62bd70cae6acc8839f818d23c615e45daf14335c36dd46817c9b816be60c3848caa812b055da33f45bc01721d6fb7e850fb1e1458f27c70bc34876a955aef11f5703cfacde03a039c3b75b99b2d91fc18b00071a28ce25eb169b946b49858aa0885a4c665deca020a3fbba55d4d9175fd91e7901ec9eec0239806e8305f8238e5270f4af5c94d0008f8a5564636cc33c8a3d3e76db2a7915abe798b0dfbb3e322b33e188c7b188573bddbb9e4a7edbd4bb194b9743c4aceeab449f8affddbc2b109eb3d84f3b2f8b18ea2962680437241d82bb6146674ff1abee7baacc38d5dcd688b425c3e3b0dccdda3e36de755afcf7155d3d7cac2e279baad167e2a743b82ff8ddf3db8ecfa9680ddf468339427a4e9fb8ca4ce6f1e790c24e7269912a9989088c65965b0efe68ed44eb26876674261e3e72042f5995f1a7075b3932f4c23a8027d0db35ce4322122f489995bcc0b3fa32b7298c4c1b3354766c866a2fc0ea5690c58c5e08ae7037f70accb3ca7faefc37d78883f2bcd768285dd2571dbcaead813a0b8ae87cc1df868e93500d414c4418d5c80b919f73b9fd46111a02bfc884f9d30ee14fcfc1d55d54256b9572afad4777b8d8172c911472a22e7461f6f85aca063c19d6fdef3351149ee6864e93cdc54ca5dc7837f0ead91f5e3b155795df5dd1f933cee8671ffc05058353995019e5f6f55d2de6470605a5411afcd7fa5aa8f38d77dbf496d7fa9c5a4d35ab661aa15c77ce42bed44763166160ed5bba954e470c293ca301363f5b837406ea8ea746057588c34acf266030864d8c40e2da88ef04c49205fad1607d456767d30eadd884359bce04c12e35487bc1885d9b104c9fd4dea4ceaf054cf46cb3c77a619ffe963acc9bfcfad0447591ccd32cdd1fccb1fe7080ad75cca2e17f695ce0095a774327123f21e2839773506a9f2d896bde87dc5e35512ad733aa408f8a49e9018d1013cc32f550c968a03308cdbc73ab444f0a79a13450d4de906369da4c6a675d7e338f738358dc238be4f047579c8ba7a60448da541cb9e57f22bfcb8c26280a59b77edd0f5a009a3ef1e2958d6d3c3372840dc6a0c6ab1fe86aeb7590137feacbfdc7da57c77595b8572b45c4677836ec86fd8c4ca8ac351397aaa3aa298d752754507e1cc514d41c3f1ae0a692179218141f65bccb9acf6244730c6d00829455d21371972745b3665f930cf2aa9f0abebe6f7b89094aeb4dbdf7bbbe794f134b6284e289c995ef2929fc1bd39b259259950de29e57cdec15c4a7d33ef6e689596a6ce23301d25c2ace77fe699d90c2329da4d0f471bc093563dc735ac2fdb32c6995606a67bc953534939ed1236003c004d3b47590beabf39a1e4d5d1b00898496e9effda68433da17d1ab3a32aefa3681aeac116c5705077552649153ed15e9d704e67d8819579feb02d91db0d3533182ff43ee5648f5cc9a595ded4772d61e77bd9bffd6f29fc1f478dea44c32d5ce3118bc8860b254fb0bb1e85223bf709a7c0b9a52fd3914f1b1f295fd246bcb568388dee43a32df45e3c798068608a102143b5511746903255b98238003eed68776b46bb0e64af6c9118ecf9896709aaaabefbc1f58bf45b45768345b560ae2cdbe4d7da497736da8013c4098addb4258cafe7823bdbdd715250b707b155248d39fc6773639e4de3b201fd3cdfa1526c4149ee7d15bbee680c956fbdea844b1470a287d430c5c7e2d7b51fa756720397bbe214c19df3399a989958732d93979e361f7266e53a59bcef695435db67cd8749d258e7d582726e1bcad1395e68d7848849fb6d74451a53ae6e8989c64701102959f7fedc6a5cf8352e218396f9181f33037ca74886fae6e57460bbcb71cbe4cbb3d3a81e2090434eb1d6d5baeee4ede251952ad88001ce047279cfe435a4afe97847f798d84ad79a11bd44f09222d2f3b7fdcc47ff8a4c61f40c4629a0f603193e0aa2164579a05726e547c9081abcc0087907f8034469f740a020e19623fad42e9cea64068abb3d6ff2f6680da328061c200e1f646816a5083786ae5b71728a0e5cee14d7a942379c389fa9dbc7afe7e7ae075c061df11e4587bc90f92f1b077c091c43a25e7b3e870ad852c2883aba2632063c4ff74a857ef7267816317f823a8bc5dcda311b513be3a40e6bdeb89210bece50a608e624f00c9d063e0c8878884e45527f50a3ab4447a9a01652322700f087b6f96ddbe96a68ef98656800eda6563015a6d3c0eb1b6a9b21cccd58cdcdd074b73e40a098a980210ef831ec9e881cb42ee07519fbdfa52d9c62766a2046dee7752f880dc9082ed7f050b49ed8d14307b1b811bd87b6db2419418e49885d20fd7ca8fb45a11a1da17ac2304393734b552b5d02a303ddc72d1f456697a287851f207054c18a6262f5349348c806841d21e11fd4e4ed9c01fce1688483e009930079f7d2045a34f98ed83256dec66400a783d58c61619e6e42f6e2c6e6fc69e76651b96aabfe643ac69681955ce595f4696b80dadd1f3910061be6ed0840d47e928dd93e7c3d6932d3ead820d06e2539d9a604a6b53db6bb599da851de7cc060faa9af76d708a9aaf371dbc3eff0fdb99702504c3006f789a49feb730cabe40745837e2c8c17c77f999333798431231b337357637a5efd1eeed891fb7475f2c9f960e67578adf50241287bc5599ee08d0237f08c86ed9b75b62d612a9353e48cb4cb022d78f73fba1fab7f794a5ff64c97e6c91ec464847a81e5a5253989a1ee54a41bcd9b4b77bae6e72421471a7ddf0136edc59b72402d57e542916ee47fb3988b7123c6e8debddff2df171d4ce61e83c3d41f36143c9df97f2f68639f1bfc2a9d1fe175fe9f45e17e5cfebb330d3f06e15e3cf58acaff09ea576d896359a3f06985765824bc499319384e4c458d4326db801c564b0b503552bdbec60752b670d82cc8fce9028ff24ade3e805b81a72701b37d4ccedd72118b20d792739e035bbacc4893ded88619a6c499f246311947e48684a35406c4ef279c71ab2a74f6e5313f7900080f19aec3a39109d4aa41c930c66c84cd2163f4cdd59fe84a86cd8bb6468bce45a56d09490e032da844e6d90b436dd874c1cd32a75d1ae1d3e86d8a2ef948649eb56dd7b360f55ba5dc34a12f9279945436c6fb83d1ed57ba4ae1d9342a3dc2df9baa82fc9fee927c13439ba5bd2ff9f3e6f577b8d2df731db14c51db8a14bb15bf3e125f1ca4cb2fe856c5a576cf995db5010687d0799581c5e76d400c1855bb46680a631cc582f51c589a831", + "823d0cd34e7450550da9716c1f456ce0cbc79431483a6214939266581b0e899e4c95719a09c1ef166a618289a6ee6971b6fea3fe380512cb977823b387ac51d341c26d4a835c61eebde37764d2e1d588df7886177e98e3151106c898b3196bf4dbd83f5f", + "a4639c22fc7f370d8500a53819102df5e86c541c0ca10e8f6564e50b90c28f34", + "34a04df283c45655a52bdd84", + "cd8d1b2e5f65ddb3c0da8f12096134da22ad4d541444964077610aafc1f77f8da5ffc75bee807541cb6eb0526e78d57fd88fa9d9608914cf391ae7ccb8eedb0aa711889f9b6192601163b271c90df5d69fef487b6c05a24fc667469cf16cbd5afd58fc830119fc9f61b26dd50a96ed84c96825a615a3aee84ea4c950152323b20884346b25c9e2a6be3a93505ba059fbb114c224bed8f05f54eab76b2c9c23a0fd942eef9696ff67484b542c8347f1b1fd7df7242872b3528c9e45030447b2bc85eaf191963291e4223b75778335e5f1256618ff87bbd68b5a9e5cbd2ca1dc8aff4625c834edf8fb0d879b1f75ba9b85895a6bb4d7569a41bb3be6cdd020065bcc69b44a8fa335d9418ea2d090d8061e042e8e1a6ac03a6d5525079f14274079734ed42c5c9ab9986f0fee6bc9ee6c485e233e9b4d6de70664902529a135a5675ae129353eb2c00b73f226e84fe8c594272d6eceaca28b6da30492c92074250ec80beddb7208f9b5418944305b0864009b3bbb3dfbfb4cc2bba3313f8f7c6c19860f1dc0f5d7aa06e3b551adfc63dddac980a79d72bd2225d54a87a93717291c7b78bdfc5521f7f3239d5564fe9c9559dfefe76b77efc2e75991f31a0134529a6611ab9ef076491f2d2d81ffc5774ba8f8009dd7e5881e09ddf5116fcb5a44e576aef6cea91ebf52c56c742049639392cfb8b280dc2229252e04d8d394ffafa539290acdd8118656e7e1a4f7bfc0bb689448379e8cedff7590a09a3f5a29bf819fd87297b96ca07431a29a07ae126eb9d65e21824c16707db89868e127f17614a536de6ed268b1600a8b02aac2bca54a09b7cccf8e184448df334f95b9f0221187d56da7bd422f09b4d94228098b563df53414a5a86728962a2ea63023d8c3f03847b36db7cd189ccfef3e623b14842b8cccb18b4f80f01b32a4cec48f3009b98ffa25dbad76089c8700e90848da74aeca81d01f4dab2b7e844a3e48bef21f33c92734b821ab382bdf6d0b1048a9866e676b78ac9398678ff626d5c173a15a0a7514b2544405dd54eccaa2791605c87d7117bc9f8c0ad84623a9d3a2b1733304b492d4dec38f7981db9361b03a2837a95fe937976c7f4341a802dbf583366fbe368a3af3f92618046bb55696cf7af1f465a5a57ec5908621f431ffc762f35abe892f772a60a3f75ad8401321f67981e90083fdd1cce40903ce56a629120d6e13c8871523c4d848664331966298c8b31a5bc8174a8c14f61cbe98ae7ee3e90bc832b04318864d19a9b8b6d49a260f42bb120cef9afbe704faecf0f428d917ead9f020f5e9d772bc8f29600f8a7623d8971c1e3c5f1a3b094191e497bd70f85de124137cc4b9fe0617cb73cd44b89aada072625e25976e7aaa5a8fe9d9e3f32db47d1565aaef0e84d256bfce6aedfa1a2dce5a94976a2bb9a0da95941fb7ed444990b0e0e87627e35f3235a998019650a5e5cae804ecab8cf729a5c712f1e7d17486082dd50cbeb2ee1b0be6a7bf08a66ab3cf1fe9f49c7083f5b8ad183f32fb35fb8a41230e4041bcf0e5ef54bc3d21ecc1fceb08d95d745a997e8f2fc3c0f6b1b6c1c02e03ff02ae0d879d13eedd42d9f9949ca7ebb785764162ceb6c6f9944dcb3927b2f4eab23ab566b2b2bcc0c7d77b82579e88203602264064ce98b5b1ed992c1bb13edce579ae7f5e11697b493749f308b33e47512533350df5c07c3dadff656197884f359cdfcb736d29231aea1524b56e06c92f5a98ea663543f67e44003f5b41907a951dd792468c84c5e0e1b46149a5c9751295e153990b78c0cc712889a21b299b0315150dc50aa3b4f7fb0079ddd39d263a754b1dcc595c76ea9fea6c120384afb38d4bd40491c4689b1afc9dd096dd0327c84802bda6bb6b7a8830bc6c06b308ae9665a8666a5551ec954eb72adb827ef38f036c51698a28c92dc1c9e25c267532da2c04c1bf27f5b683ac750c3ef53a8460dc186331549bf82868f9327422c09afe1cd15e161bc41a70cab2f973efcfc8f01a380b86a432e1ae540e09d404d93d22a20dd5f685a52f0acb863dadea236288b1714700f23d1c19e40e219e8ed21f6a393e541abba850ffbbd4030e5f6567b7202fb66d86cc2a0beabd495814f6a50690e8d74cb8b093e4d43261fff80e7a67ca06dfe808899cbef84c09ece01414baac740cbe4c656b17991868e2a136f4785a0de311aeb18cc95ed33fbece22aaed8cc1e47f58cf6c09a6f92c96f37d2d2485b369093506f5e9f8534f8569655277d0399ddd3d33861bd40c71ac53a44d1981cd744d79202322d47a0228356c0e27efa2ff1009cf2a416fb6e8844eb76b8077a4a3961ff193e1c95b222e72688ba48be82ec5da498e58861ea613782ed1ab50a95b5cc236834af98e61528ab18453c20ff978551b81e1bcc0ff4b7092bdd9ab0b946b7324b7361ef05e1f7d7f6a336281b4bb2c671a95a6ab84be6bef1b9c8c3d2536edb8d79b40637e16d7281ec5243016232d7c9fc07ed9dfcf555055d8ae65f12ad150da81f62f2e1e82b3adacf6d623ee4759ad61a09038905bcf1dbbab671dd28fc1d10a0b7eaaef73a5862ab449bd84c8698d061e79fbe52a86739ba945a01353e0f3916667bd7b4356cc65451c7003927f2aa738d98245760550156dda529be741ce3ae1afdea0de35ada26ac241fcb5d518e6ee7f9930baf88bacf8bdaccbecfdb920f3b26285439912a8902ae029b07f28c1dbcfde780cd2bee6c6e5f4520c5c7ff3ab5448ec86cfb270c39586f80041f3764b5dc77dc5ced0695c89671cf90ed34c4067b4bd938b1493c7902dd94be824810a00bbde4915d138fcc7584790bb0b6682fc0799cd415441ac90c1caa008c7fde3ab4a3aae478c64991ebe07e6c4587d3046c9ebb8e125e795f0be9266bcee5a4e4355a2830c5b34e583b0355b34b89c08011db6f6b8371de003074704e8cdda37ce42c7e395b6a37bae3dfbe67bcfd1f125c9a262d56883ddc028773988270aa30c6dd326cbffee589f38286533e1d5c9486011170be591beab5e0ce98837cf91f0a58d69d872e364aa88daf9cfa71bad167129420282d99ed5884a1276dfffb2c4100c74a8b863b063c07937f2e9c12523deac4ea16178863d975e3a5be5efb5ffbea994d07f7ddc5326bed1f5c9415c1d4ee1667e3a581499bb573595158636ad94d84f7c6e4b8efc2b141f2bfab7932a050fd88a8c7b21877cddd488543db5b11138cc808e1248b6e2ef492faa8a32f9d93e3c060b5cec10f03794248f9662ed8c283a8e0eb493824e2750ec75b3b1292d80ce002083a3c64cc487afc31b20f84a778f386b012ef7bef46e638d0f1cd75487ea46e05621d608482637b3e642a9a2c5371bead4386eff968b3e007fc263086d8a930dc76a8431a4e6907ae35c7b3291075d1c723f02e4895714803c0e97d65b04c0f27d01d5d68001bdb3bbd44dfee1eff1754fe8c182cd9bc6ee273beb2a444ca1766f747d86f36cd8cef6eb1dafe0c38b9327a8cac6e83e076099188f02721cc4de3d940c3ef19d9b067be07b890c798a79ee8c44d96c5e05ee5d5202d941a674378386233a83bc85134dc8c46a7531b2b952fb277d8089cfb13e882bcf7545f0605271fe38bf4754f98dfa13fe6b635a62bcf962553882a8f28a9a5fc0b3f85509b702d4a7555d40c4f7d10fbe80d48b4826995fda7d15f14aa9b95fc6526101cf09c97fd74baca6bd26b4fce8a57b0726e0f68118969ec067e9ca39b2ba59fb0d78eb5cec5b872613b1b76763b3217d859bd6d991bbb5448bd4e49dd6597ddec9e46afb3f71d254aba828c91de51904139ab19138e36e6996a207da80323d96077c97a3e8994296376d4dcb602f1e77371efe8b020b7b6f6f7bd2bd733ad9c06c45b77a2893d73b4a8a57707969af74ba06b2fe7d4079bcad1cfeb3689ab95c8b1215fe0a855eb431f67df4ea589dadbf055086924e42cb142c9031e25b81e8e1167a54008ba1ad7fec6794f203b27f3092dd72bb766c9653a72b2e25c965f53487cf3baf74eb7742702380303af8c0a61cca3eec78d4b709e35e2cc5bd586263d9f56fc12454547bc6165e3f070ce7b2bcace5c8cbf52f987568dd90237cf190dabd4ee7a80494692a5379b013611f4eebeef8e1ab9a9c5ba61926095545e19c3dd61b7b404230729aff7d82b6bbbed6b4a926f6e49189e3bccb578fcb3537951fe9c78ac842350ddd80133275ac0bce3a669183776fee8288f874d29190b452d65bb7d8edfedc6fa0ae147102b92041af6dd8a566932e016763b60a5b9b1e3667f228cab075f966d1c525ac19d12046c6409345799adfd7154b6d8b51eeb1eab3a132ac6a2e08acd1a34bbbbdd019195af9f8a93c6ed5463765173e669cb0d42b6cffee1a4b45987853d43c02f920819f45a4fe0905d8c65aca182b4bf56fa0dc51cb53c642fef003d92c13ef4bc1bac571cbe2ba3673a49694f6311b7dfc17a4069759177930b179748d4403c7259e10a5d221cd0a6b745966e598f894e607b779dd5289fbdae0b4348141ad373a62c76aa454b35b39a7be875598bb30007fc300606ee2537cfcd7c22b6149880fb3cd8eb53054d698a0d20f26a5c3ce468255737a68706784", + }, + { + "5622aa8d2f308dd468a7e4959ccc01f0e80d91f79df65b8201eb44911f6abc758c6703bb97908fff377395d33f96c328a4541f414b7ac34c6607dd85729afbfe01feba988e4997c6bd2c99fcc35d2467b143a8fcbe6b49247226a9e4c0a4e3c1a29d5931e6f1f7a31d90a0e0edc4479f08ef9bc65ae4eacd0b93b1cb38948dda31e60b18d702bbf5935bd580201d1f280cbbee679fd834aa6be576a37a037eabe989c3c18c7fb61fda8b9ffaa8bf22b57a101c19e850c454353af7af3d755b26ff1ee78b9d9daa78294972d108958682a5a29c8ef260e2289ad9d7d74f32fd4e51e5d9ee828366abccd97dd56e035713a6f3a1985383c0ed5d98c4accac2fa1ba7d30a295670d5224952f7b7554fcbfb426c9496f054834dec48f9b70af3d2b1c6dcda1c4daf3e9601364e57851952c785e65d753be1c22729bbde33aeb1e4748dbe90da6ecf716f05bfc68ad819515dffafd33a909562b95140ecfff1d0747f8e0459fcd3ca6cd8893262614bb4bf4b639285f327e7ac782898781968ec98f6f0f2f3c4bc5f9c4691ffa7ddb3662816f8ad092095b598bd4d10d6b5fc6fabed619eb11dfd4d638f4c0b6cff7194156a411e8ad6d3229320336ad52fd9811c3a1fcd571d1bbbac67c6186737ac7ca1ed9b2bc46e4e578f81c164b09ae5cdd4059a2c22b5e7ce1dade684e49200867f9bb1430aff9b99805cfd31f7e3fecbe898f70a4eded86b8bbeef7050eff6cf8ba71395a7ae2e270a2b58010e56cdf6efc4003da3d8a82e96979ee68694b6113cc9a6e377d40a810063830eb95005a81405e5b7de8de67424845bab1911bc55da6338513742d237a555465fa54b07ba50ed712e7a57a39fdcfe4af50f064ae969823aa1c40cd86a621ec90769d0c1babd33e8388a8bd76689215b9827a5819127bb32ecc80a562a291f3192eff34cad2635e5b0c0bc174add72e2041864953f1fc72be7d28111fba0438d9036da3d5c0f220ccfde2319bb96fcbfae6055ed7f1c1967ee9a78e93bbb77cbf151084d602a5a2f087d49c3134582c1a5d7af24f4c88be26204cc9dbf4368b19470fef49a5823a2d66c65e9b1e8ab56bf5a7bb3220696840a6222caa58a7b39fb792d95d25038a8bd9d916e853cc5459640f8b8468e3d51f05f1b95e996cee40ffb7ae14cb289094f1b77d5573c1aee7c12a6c3a1e31491422f272cc5f510d4f18ab63d3c3f468c5abd61b2fa7ba0768d46392e2a4dc06c7ce79841dca916cd33cc0a700b50fc660e5d1808d8b87e65feb89428055495823b2dc317d6d9e50aa5ef7ab14076174ed32f56abe7d410e58ca40e92f8a31433d0d74ba7b130b1561f2b075fa11ead744d031f34d82f1a64d428f6cccb0a009be24b42937bf3e99a1ef1fabf0fa7335dab52918382abe756d3de229ee8223aca6d7c5de87047838e387d4e472481a4cfd4365256e13aacb518ce5300f18dcb5e0a28477a6fca08a74756ef6bd8933bacc98d02abc7ae60df7cb3e06d41abcc4bd313c543ddcdea2424d98ffc6dcaa83658aae11f5841ffd4f5df42368a0e815d2146a0fe138b223764b133d17cdb08d485e9f3dd2bf2b220d1f4565b02d7b9231d592130e4436849f49b1a70772244fc0c38da372a8c57fc80ad57828410a5a16ac6d14e093997fdd5b26e4cd4b248e0ea221715ae6e112e1b68b09f795540e31b1231244bc922207b906c4f42b5302dd7474286b653b4d1bb657134bab117d6c349fa0f121c2f8dac9cdcef510c1c28545eae0ab163db6cc84ca182feb858c10153d0136f00a01c9c7d0bed892715dd85c4e73627c3a2ef0f43710dfccacffd1d9f118c9fb1a83b2eb328b8da3e955f027d95294038184f7b895d77532c7570cb86fd6b37a5a66659cf1e330db3930f302838706050c0dcd91d532d49c89d144e9a7f864026ec99f50acc02bd5f11ee88495ee8991ec4723b189f84e03d992fd718b5173ea1b033ab7d3568dc4656648fb54d28d3119b0f293a930a772c394f45ee66838f17b73a94eca27033f9d5c2ae22eb813386905dc024673850a087958eed191d04d05798bcf909eff2deb2a0009d223323b290e3d6f71b2797a2bc2590d54294a5992d629336518514032614a04847c3fad8a7d1cfc2f86765b48cf58acf892f68b691fbece38100e6a71487ef5c4ae934f1ba03b4b26a1967f70ef1c697202e4eb22a3a95ab3b7b524f0241ab4d2adf3ee5e3f2974d0bfe4419ef0ab11039ffc26339570e74d260c4d5a16f22cb4f60b03253487f5e46c47836ce29460728086a615f78d631d89a06790928455889f58adc3d0a3a84ceb2ba9cdb00a403080e6567873b985fd59fd9dec71e375013c12c51cb67d599198f36f58fdaf897e85dfe6f9896cf6d35a84cfdc6834dd9447a2a10e1ffa9fa8edfef1db9e8b4a245b211de49e04b7e88977b4e1ac9285f43526f2452181ee0f80efeb1f6b2533b656519ae45652ccefca81c17714476b497e5d8e9fdf6c9f504c7a7fa7afa36df5f4f8da5b4b973b1618fc8d2d43e866b235e5420551d1659e5bd545fb78a3e17d9cbbc8e842f3fe6be07b892453ffd689d5188f26f9e4c545ba0b3132af12a03bce6914015d026d3d7df661c1e6384bbb50dae24abfa78079a2b1ac41c44c7d82a59183f293f12011e781d3cdca2f791afa5b55a9f2d6139587bfd74bfc54ce91e642847a33b48c1b366fd8f08f520b79ad5113a0273735aee71ceae361a97547fc09b22fbe4e4ae4ae13e52d65e0971341aab368d1e917c8f5f2ac57ac119f981b51b7c99ff2be3e16935b7c73e28fb58d332e6f2c36281228c479c4d6095cf15b14baeb0769191dfc649a70471a25d45d4433797a5b8ba31ff567e60ec4d759d99244d0fb5dfef7c2896809938ddde0d2015a4c5ce5ef6cdb5752da1c2a33e5bc78b6b7c6a5af892f0792c28560a357720da3cee3833bbeda8e98e6a8cccc6535831cfc28bc8557b4181a3978bd90eabb34b99eb7e55d9263e6790ca34561d8c87ec4e12b4a38df524318db00a9b5bbde6f5a8644a818a88e91b521d716fa9f95bf70b109b9905bfca926fd42ecb9114c039790abb0392a41ee4c190536a89ae6194befc2dc4bcf7562bcb84f65c99b69612c0511552f53436b6c489204d3881e1f67e0fba3a061165d2955c2e2e12c440d31556250a8a5cc04ee5e09b1d627c14e08bce1a92df7f6475db92a3ee57e4c16c3ae677c44237122818ad457a29595ab528744707f3ab7ccf3d20bd94047e013e647802a7af14cfc7c11441ea6e9b9f960fe69d03911ad2cf3a8f633e0d647c71dc7e188c92e75353fc953d6a30dd0040c39d4355b71524f1a4872fb1ecab22c8293b54bb22a80e1e3d4c886d2988adec26f041dd0565cfa9edfe5ad9aa7da1d3b8f68fda9e9df9dbe98148120af6ff30e6400deca6dc9593dbf06c856d0d582503e7ffa185f87c6e7ac58184bb80b4a1c0c18d669e23f9791365fe807356a5763ea418c39d94311759b29b14324fb6f3104359ae66532779b825f92b7c9ea2ba43ba7de04eaef7a86192bc93e17286f1b6e0a01c33c796ebed8f17692eb9237173a051c14e4869afda2643bb98c9ac4ea94c6bdc1401c80190df6abe988d2f0b2d80cc7bc8362ba25c6e5df4370a43e156aebd6aaf856b3f64d5fefc622d078faed40b760a361966a4765adb809dbcd74b7a41faffad3a64823860e5656874133c7f8a46b5a3ac591906359aa4f171ef6bb2ea6b5f24cfe25c2fc7c1973bd5d3bb5f197002c5ca1bccffb570f0265f5cd949c7386d961ac9c5e18b5d1d6030d8bf4a48c10f12dcdb11924b02b8ab5e91f425ca62bbe42b80c6b6dde3160ebbd55803966716734327058e29bd39874f2eac199067fdbbe8c372c5a688d3615e2b65f4937b67d6a26c64cc2a9e5379cc00925c678f174f538915f912e85b7014c064a73bcc7ddd38e1a9627ffddb4bfd6da764fdbfb45048c9495ab1a4cac5642f6c9ffbe97d33cb26964a23719620df3d85dcfc392c4502759fb31a6a797e99e51e94cf9bc79ac15de4e5cf7a05aeb88a8ab4c3b6f9c52b99794503f2c49cd7e230a67df7403e552523249f29d257b35c0c7712053c3d9eb583a1a7473d7f296d25a66566e4ba8b08de2a31b082e40c8e5b1e93985b324dded3f52511744e7e99f4e3ffd99d8ae17bb5122b37f637c5525558eab18a378f5e2cb56fa003ed3af8d139d16ec4b2ea79c415b0ba4d750ca2cdf653582ee3b65a9825fb9b123593e36e645232163cabda515b959ed0a1419e9894f6c677ac200fd11babe3503ec7bfa319f1b9559d94a6f82945c9ca8667621a5d28920949a1da644cbdb58b84742e9d65e7f2027b99fba4dec46f642bd17e88fa109143b26ba7fe285c89add0b74a369f3d381ad633bfb4f72e1822ff96aaf9a73b3c59a6e457cf40e17c1198c64737037f52d9b3118daa3fa5cd3e3c7738e3b3743c595893289974a4aa0d6bf1446e70964823a7d5cee67b9b25b7125d9ac5d1d61f2a6947c3deec6deb575e2fc5cec60df26de3c0545e5b79156dd6af33a78552d1ee9994cc8501b7dc5fe7a22eadaf201a92e06ef03be705a8bdb4db65392d3628c7cbf44cccac292c93cb5a407a7a5a0d5ac9fd95b0033d6eb719d3f14609190dd40d5aa1b983cd4c4e278cc8a1e7d5fbb0d39060d6cdce8de6a17e2dab973a7fa594205e17edab6514372eb51e03b0ced6402fac0efd3af49fb8214a505cc9f5f0ea5308d7fe6dec369ba154", + "9f522375925222a04f5c95ee14b6386412025903ecad0bc3ab78afe1145136b3a3592835ab4ad6faa66be9", + "d1ba82b3ced3e9817642aaacedf482e79bedd0560ef2754215ee792514bbf8e6", + "bb21211f342379370f2642d3", + "1a6683805d3f478ca1c1512b9846468378f83be27393db63956e151ec408368b47334afe610249182f54c4d0a01b704db2aa90a9755b8feb67ef9301f0715d7d6bdfa5cc4497cef1142a43eeb42f7c413e8f489af30d742a706d05a40a0c4a5991f9e2cc5d9fbca6ad3767682e20c146ac35aef38dfb2a77388b738fa022158d5c802e5f0761096bb45b50815ebf09172759521b5c5d459703ebe9ff669ee4d14a86e5d0650b597f4a082ba0aef366a924ea378b91c3262d99f48189eea19c76c0f644079f8415c11033cf24d30d6c149ab13ca5c29deafdc816e457257361c1af4b915da312d2e6c7fc712faa27be3e67c893f9005a0e2c28369991c1dab22d38961d1abd6d94c4d549cf491aa1f8d522be3ffa6d214825a5fde3c94c4e35c29b8d05b2627eb12c9d94f450a85eec6bc963a279a37c2344ca36eb604c4bd11c2bf2ecc0dc16c2c365bbbcad3541bd54f8d0bdbb3ca4a087b62fc19fcc1c13984eab807d2a6a1386643d90d412d027bcd0a638765498cdbb1f4cc1b91b69bd241eab3645f225ece85a56e5008d6094041f8cca6b9a0ae3b15585de6fe0695d79d348f8619431ece40e736957a7627224fe92bbe30df5124f476d97e36b5b08b3787e8e00f0c10013068eb156f82f3494a35d6edd5f7048d1e91954f1013ede22eca8b4ba41699ee08decedde87139180a567c6d169b672af0f12aa09ce20e9cac4e78b8067d31ba4f63606c00d1d787b868cf7643fbb170f8074667c9f7584d36af80b4e6557724013618c28d0dd40bfe9d4b25761b3c99558af528c2d290d04b09821bd7f992c044dd61dde9395bd0c9ddec6d0bf6e044ddf0b4b2d6753f5acf2e9c904caa4e9f310578527b85e6738803758da646919989f735b09c9a5744e63fed2c3982e59fd29d2baeb9771316bf8d29213a4956b66c78d5654436ffdd82d0d572530fd09507b988d13fd743f35333237681f8abbb301a8ea870159f802a57760659094d0e4902036c5a62c563f1fc86c4238e1ce89f5176ecaea194ca112fbdeefbef4fa7c203678cafd34486fe58b2af04f84a1cb620c6e123bfd96301e0a5e5e5abcc95d28b852d0cee2f51faa73e42f22fc335f50de4c3812ee14038633a195083f3944284c1086c34995832c3cceb7d385b4ce86af10685c16005495121105272d1d739c584a07ec7801c3667bb280987a8aa41f9537e9d1812a5dba5b385a0b71d2e9573c6f3e9ebf0bf7267528946a6aa6f43efce908d32525cdc3b825bb11c7239f1de412704d24c17455b9382fd6a873180f0d5d44dc449320973d5cd0d4e67e83946b6ef47e5fc3dabadd80751f1421404e56b1bce748b7bde63c6975ca81f3eaf52586a55242c9745dee3f7c796d4508e818eaa4fa50490c1a79624561b98d2e1139a328806414c905372356a22393ea0da51c83957029edd8c2dfcf46d9564264d74c1c0497034ec018b1dd4c14acebc34b6d2c1a616937c37b8b4a0ee5dcdf787a0de1173798ab929b72e0fa83a6c9b9a99d8024328d9c236a8f57550a4f83e8071eac76adb55939f85f5b5f514174b670a3e8dc2b54656f6201940a81fe4953d2680ae4ec58635ba74d15efab3e06dca6ac269711ef2d4dd49f731e24a92a3b935ebbb3fe8d001cd4062669ae4baa62c2947033afcfaca227d88a11769f87456d5cd1bb6606891e71d63aff9cd5a7d23263a78768ac2ac54ece1441fd37d096cd27e916e68891137fc3cca427febd1947cfb4d7ccfad75b2ec5e809c132111eadf25a73043d68333139bd2435de9941bbc61c5c509897cfc19a21645019eaaccb6d06371e3d0570c09c7556e41a727e44d9bd672fccd1f89cc7d58761c16df8fb75fb8a1dde2caaf088f02dad91b6489114398740e6798f3ea8c7b0cfd974e160a0106d703d9589ab09aae79108e3212f19cb950ea9c0798a1532bc2a065d5900a12054395c0545b0878ac0b1d461f553dccfc2a22bf254ced88dcb538e3889549960b77ba6237ab1458e158f4f46606372e797ec9d9ecc6534acaa1218e7540eef11030bb9c3e5a7816f3b33a590d970619bdd2dc04d5c6f4ec38b7cb4d525234b836eab57f65dd045e02367eede9049e219b8712b8d6fe178080c5f77b821f1a475259ae571a5578eb3b48863162d45486f71a28ecbcedb35b320e5b6401f9e7870aa5418449bf47502626e1f42abf481b48d5a6819c640bfdb64f873d583fc4e40187940a6c3373ea7b47195270a8657898f55568985018abcea9bce1c155d95b426f91a734b2a14ec2c7ca2011a4d30019fd9b3ef63a804e9c30c3de2651c4213e90285a4ba100b31ee402e8a7f23cf9d4dba003bbf982526bc63be5af102dca34e7d362d6fbf6f56046160d7af33b364f2a86074d1c0fdd54aae89b19480efde2a9caef9de7c0f9491e1cf43a48752cef405a0ff16b0fc67bbe433a3c1b9661406c3726092efdc076febd60c436476f24dab1b0b8f8893986d951ed72282990e8b1526f4dcf539b22c01c6a7eb5577cd540a16a81296ebeeb7ddda72e60fcf2840c5b42c5cba30eaea5402f267d1d04bc80da5ef0dd2bf3c7a2be986507617c9bdbc96c6273a0c9e586a0c48c98b4552113149c6f79557fc8ace0b1a512fec3aa09ef191f95c2163113ac5cdd940f0c2120509bc53c3ea493c54703effb902ef752c830c61e85636ca95429bf16937bf6786b3eae1b277bf08dcd69f521a0078d633beb33c9aa0cb33b238e1021ca67df122a403a3698452740bdcac81d22ccfe4ab5f835d1961708d1faf6d40f115f16c6094ea37a7ff15e0534f62c19a6f4ded0967be337cdbdd2a7c58ba16ba2e4c3686e9d075c6fa7d29b2a0335ab4940d2a95c4500295f4db84ae65e46c54b7300909cc5411c725a31fd962d239aa0e2007c285586b4c778e2ac7afec42cd8409a63d7cd9c677031f43f4aaf04258dcf1270c02a4764177aa66db2d8f860eeb1fd06d0b27587537410bcb641f90aaa7bfc6f12bd143f66e7c933a0f3ce6b5048913e1b2d79eaa6c19e7255d5eabd24d5f12426339541a22d600cdfd1781a1a3894740887840aa82e5a461fc324285b0223ac9b95c3eb88160353f168b3d4ae8a2e87b7715b5fd2671f66e6eaaf9365b3d9e3acd9a749faefba6009783771177aa4dc91f72fed7a5bf6b1b7738b84ac0a07b4a5a3f0a9134a39e1e7e3e2f9a92d5644295f31c5a356092bf07c709b4c34305ebf50e857a4f593dd1cce0439d3fd125c1ede1a48f583bbbe0eec7058345129ef78868a96f8a76ba7fbfd1c5eebf75f3e0eeeb9db87474b96f321b87fffc02433513fb467fb74e2fc8feb498d51530c753e9a173e95e0edc5ba9802641a45db281b2e2d87d409057b4fb1925e834e90fa5619ae3a9237d5b104e7ac67c2bdc31001eedb4ec7064b2f72e0379bf8780f67ec4b195db014a2d130e77b1778efe3dc703f1310a566a6d3b5c9b12b1d4e25815493ed1510a516a31ced3b64ca49a783ad63ea71a57290727fa31386d2fbfe41f12d36a618c6c28d8f10405eb3e0a33e8ac2e4133ba75c688c8c9a2bb33c8fa032eaf3ea0d2c27bf89269c4aec55f8232b292e7fa9fc24527184f19187d9d8a3f52335e2feb5dc6d997b9b773a79a31db832b752e5738963ee5d61a1b426414975693f986e165e52d46cb059fdd4f48f008e96d4c1a48306b7c002fd0c861721656074cf11173ca65cbdb694c79f58a3f3365e872b24670b691682c10261eb1ffb2b65da031d070e31542f49704b77970a78bcfb4c4ca517b4c966a4e8e27664704f633e90cb7d7917dc1d3a8b8b7fcf59ea3a8a81305761923cb182cebdd59255803a14ca8a75fd007670d79a25eacda1138d67a0fd1da981529dbf182fc4d7a700ba498e4476a1d415381c9e2ffa3bd46201cf2e454c4aaedbbe3893bb4121a6de02cbecc1f319155eb8c99d1030103bb6194bee51e74fa01f28dbe16092955b9599d5c1f1c3f356e26d48fcad7c4cdf0eef25c25273dd62171785c9d2c5a01b1f3da9b4786b1b399d890e2049b73c12de2fb7177f2bc3d9c645398111ebcfd83b73119897bb994f998f4a6fae1b3d6361e171059dba0bf9de9af7a5a1b21641790baf82a36278945d649cf5d310f3792fdefe8c58986a48118fd94647b786e47733ae703701e18992bc1b143b1da6110a98030bb9895c14d7b8eae1a155a550e219a5b6301b6d26d7956ecfe4c7023eec1ff62538b3606ebc7906a1243bf8357f593b6cfff32e3fc6b51f6a0ffaecb658d526f7a5e9faa6294e4808b779f4832318cc184e49e8957b72bea0d67366e040cf76a85889fc6b04e84afab0d02947d0d83e0de19f12966fa8372f6e82ff402bd7a69195eb1a7864a3375aa9e23736fa4d4b0224647e416474c01f72b7d4af240d7f43395b5b04c8fdef1165ce1d56ee8ba0e350e6ada893e0594facbfb5f0d8829ae203929525951584c21371b86deb0f76ef5daad5e847135a6488b35ea33e3a165fea502975d6421d4567a229bf3ce94605885453610eb9c82f9ea743bee9e14776bc3076a29af268cc72d9092a492d9ff08c345dc2eb2f8003b561d9912ae1198c58107f8b37a08b35075af9863110e6770425e9d59c2dfff9d9942c8bc3bf7904c2a952bcd573706caf1ee14420564ffc433c0f5871c4bda916f2530ac75819ade49fa1de21edacbbf6b7075dba21a84989411c566b7c356b81803c7215ab0f326a6b8910dbc62c1bee3af51f105fcdebc0dbc56a50b22cf81eda563bf8c2eff98b476e8", + }, + { + "99444e82c6c4c47070b164f298ffdf6955ee5bcb3070b9aa95ce658db4db084d2056cfe61a93568b44ba7ddcba5d450f4ba0da7b119425a6628b3416663c638692326cacc5c237097db5e537122b465dcb21d8dcb5fe831789b72deff3907685c2e23187a56990221e755930a09f8d6cc065487563cb8cec82b9dc754952fa0b342c92d99522fbb39854e338f470a4b4d5ed2a39b8b6253b7001b0b953abc588d757616c7a5d1f12b1024aa572ef5a47dc8480943aa6cfaaa78064fb2b29830280e46efa418d0cf38f57980146f2482276c9b6b16f865b1606bf1131e894336979a163ba2e70adbdc746be0d38062fafcfe5603e6bbb55717b66a263fbd5cc7476302ea4a0dc6167221f745a26a309f5886934f4258965a0ef0803eaddd05e54008df8a0695a078b797be59f1eef95a658c99a7d52001d4108212ce5f18a39f1173291808c980b0513f1a531e03ad7380372b65572d3967af4c25fe54d99d664cb67e557fff05c12e10143c13b1bfa3e8db093ff832a7978ecd85d3971349e3c9b83939b73f0ad55f1f1162d0c106b99c0ff98442911bc15e9194f5b4ded97e9702b84e31b31380c224f392e5fa5c720a45f64cd7020e25a3931b5871e4c708e77f4729225aa9f48f9d876597d3e79219dddee0efdd16836021dbd21692dafe121217347cc128fc5eb051e6843978ae17478ef714957a84c74656ddd931cbeb43e32fb0a448acf2f90ee98d38522b4fa9aa36be4fa13306e799d4c0cb90ac0f73cbc018146d1b0d6bf48aa446a5e3e0502aae9fcbd196b36b6b7426fc10367febf687f05392fdcf878863de2e47be7e625d0e3e3e94e199f055c0fc65f76c41ede43231873ff10eb854dcd6ac9b550ee8533d16f81eb0e86471d4da69311c47255e78ac8e79ab36ce880d6b135279fbb5a712adc5c3862a356af49e9c10d5b16f4e5dedb80914868111e194745b802a0292c7c8564de28ba8e71a44f7eff6573e5434e65d496cde5b5e62cfa9e2e9ac85a164dbff5767983e71dd2661d37d9027a27674ebe3433731a606db88e0880e91ecea8134421962b3f68915c9f6a5e1992c56750f99bc313fb30cb89384c72571a1a6a5e3c01897b691bd70985352217fa8a67f3252a06205bd1a9931d1cea3736559572561fedbf3ac4c8bff9ebd7f3753ee69a69ecbac4be6357db7f4213b697a828edc716ac01da75c1d46098c7d5d6ae6f3f9a2903588c5b340c9d47c234efea21b700cdb8db4279afa2117677e824e627bf0f2b179c864ba823926a57825478395545f130886bdf2a7c55a2647a888c3998b750343d9cdc602e46b7b09a2fe9ef74db1ffc46fe27c254c927ce51b307e96a571da7f3f907223fbed2daedbcc96197e95edde7859f3b4ec6099f791089e368a68a5ba0917ddf4f50b93c0c839ea36cfc8053811f8fcfe6986e5fa9f743119ecd6c3e5fea1dae3ad7eb465a89e9c68569190688a8d56e4143ceea3b11fbd9de67173d5134ec8b0bd7d16560ba2be52345ebacedc01a2e03e8183ef91317d87b2e15cc6301586ed829d438e4ff1d074408b332c8ce60ccb6790ab08c228807509dd4b39f2c227755f6b039f5cd413ad6f46c9ec2cc6a79457529d297b1d9e74ead9bedd9bd652fb31568a8e2a9e2b89e4e57601bc1d960360232cdb30cb502b950ef930d54c2c0692a684cd44b0472995bd2b41dac1553ae47216253d6640d2653a033a862f3118c5b5d60a662d240bda5f4da51092eff514f61a425c5b14b19517ec1b371d240cc30a0739273b34f18a72a69b1586802a7caa6cc8f5817a8a995695d063c9dd26c3d45feb0f84dc8a0773151cf9a537664f942f351599cfbee0558f441f5c7ad320cabe305f9aba570ddf6407749b6db42f9ce94526a8f4170e735b1dcfc5f0e090af10e039db3747aa9b4f1f26acc34639ac8b60557f7753e2c261a29852932901a4093b7f307319cbb228e26eec289898b3f8ee236032163293b8caf64be3f7ffed236f1da688d958a1bbb79dd45026884904bbb936c1ebca7aa6b0c68aa8b667dc1575729e4ecb4ffa82ddced2f4571bf902c52fc4a0ea3f47aaf5c243ac2a1fc19f825fde5d9fc8d06d97a351eebf4ae1846aa62554d57cffdb3f3377695338f8d598d723289ff3962796e8065632e7da9d8dffe2636cd23eac15a60568eefe3e77c561906555268cfc1e9342417b1cdf090cc16c79939b15a9311b0210094087dea22833f74eb0e35d44259ecf327dc84f3f24b8c2bfce7be0d97e00d2be88a150a0d557ff963b4cda60eb99935951d288768b4b2649b717133517f5e3909744417c9c3102c77ddd285976cba2c89e2b4f297665632d7c8652847c4625038a6670169772de0550066ec6c2018f503cce79a333ecc0a0632334df6959d2e3b052fa47c5c84d15ceabdc80bd6be0ea2a5a8d5e374e0e9a613369ca8d4cae3d9f98755560b27b2f6e47b01ba390f5ddeb732c22b12abd225e26ecdb639b08f3237e488430b3b39f0b63aaaef4907cd003a8f2b4c3bfd721d6c3fd3a5f062d72746606a529ba34251ddec4026f40d262e9d527ad84fecf5bb2cc8601c2a38437098aec2335104842ff1c455e5d17c136ece8d461d7a3bd9a60339c22d71059e09b3603c0565c0345684893b56054ec4d3db0bf15546cafb4a03bd7775c3157e7676bb7bdb7baf3100396c563eba1a12952503eb6ccde6b6d0a42d456743c4ddb97f5994fa08c5fa41315080eb6b928090956bfc6252b232f6e0785d233c3adcbb9370b59c35b0dd66005d516befd1fc843df8e68fab19858b91e2aecd1c8a88b0fa3d4c2fed2995ee87e65976b755fbf44ee183f9fa08848bea325807bce0b7b61e03e50b2c7af9b360532a17a8250cf6068fef0198738c82a5e58961c54017e343fcef7076e823d63b4deee472fada7989ca7a213d06a4e3eb2d44b16e5c94b1588321cf6c45a5a792938b058d667e1730f8386dfedc50ea0a959b78f12f2949b34b181f90bec622515227dfb8a5f6e89d2e559c0ba686153b218d2c50b67503018e22914ce9b49d3bdb7cf38172db1ea130baacd640c111614e3db204b3b50641d8978dc14b2afc27a7efa819cac6bafa8166d1c127e2237520d57ad38a80146217a12363cb1f8a720e328cd8f846d379ada43bd4865e4aa633c479bd448d205b2e43befa63486c717af84a733f1dececc127c047850aeeb8ce677612f5966e23d92c1d3c758aaeef82f862c1154fadd6766e1dfc780bb447732a5968c0c78b9af4a9d669338458b57cbb77910a24678092857c0b903152035bab6b1c73f7b667a08cd0d31128888de3ff1fed24866eb60beac19c1b139f77bf0b9332024999a2d56975e691fd7475fd93622119d0d725bb99c1d6ac604d6b6be09d6d29360fff9f84e5318259a67fec08a006d9772b9410ec6abd4cb828b898c625c2fc35c19cb9a6cd3b0073baec7b5af254d21de8e209539f560bc80ea38e33658a68262622cdf35dcd6618b9e272ac3644c91f27d372c6297d8e37201c6a86a7d3accdf579c15246276a0009ddac4021755f4848d10f714e9da86eba13f461e6a12edb1aef2d6117986120750d609682bfdfcb90ee3cde8be54d45f841a6dee2d5b9fdc4e65edb7ebffcf3cc5c8a4e1c6919ac57568be23bd8283319ce11fca3caf968b057432f163f22e29cac30b8154a646ca0ef4fdbc7770ee1451fdde9e9d651992d94c843d4eb2570975528ad9f8c193f7c681a43df28242547010e30d75fca04f39247c77d6c3715c25fc261ecdba16844bbab23e4d0482bd1565ca9b526ada9b8f5703661a84b23070d85f3e8265b2ce10750c5d798f1a8ef4d51a473ff4d2bf4be615566ac796db9fe61a224bcce05c31ecb9ab7bc43a609944a7c9398a7875609ddbcb556296f548a117847df7d0afe48a5b504e85b0d7ca589103d3197933a744fefca795e1e036f964a4f14554d5cfa0261e25d6e5e02f86e402906d3637a2352459cb1639f20faea6f0e3fbc6a39becb1b1b3a791e32e85e5bee31be685410adf0c11190e20b7a5119b90e83f2cc4f0de8898606bb6e64165c95d4c5eae472daa6836a888ee4d9a79de72b8fb47a9c9c0323a2be9106d4ee9ba8b3858c256032a9caba37af94df4c7b0adc2f8478cb879b6d452d73191b0fc1ce944df3f4809cbf3ad46eceb3ba4abd9679410f45c8aab20dd72626f235e7c0c934b4beb4507def24ebbdd7a507943c81d54bc69df578aacd9ed0bfd3b7809dec345ba084d88fa9c34d80685415a4d5eaef9b88e51432b2b2037186baf123a6257e47aa56d6531923d38178e8264dd315e95bfafd8dacaf901e354b0f58f135d638df2c0f32453205c7aaeeedf8c102e11cfddea9a98d3ac7c385d71b760cf2afeb1ebe1d64f0222b9b101893d11a74ed175297c1dfd188a2565fbecc6bb07b56ce3973322a965dc5a675587890cc65a71efc68fdcdf1a023505ef0bc0e6b12dca5860fcf1c6c94c2e2ec3a72b8a019d69c82d36a73738dc3d17d7fdfe992bc8e18cb5d3437f1f619dd318b95d1a56b6d273ed79ab2655d83e2dd63cb6f1f5987eab6bb21a7b13b84e2c619b36b842192c3f82c755d8af840675b0bd67a655d641b1886c3c9c147ac87615ff3e58085a879b21dd63c1616a3712279ec87d650a2eed665b797ad631f0ec312f343979cbc49b99385cfa92841cba12d52777df565545a1deb07800a15431c0987b4a543fd5ed6832e80ab6f4b4d9c9ec419932a6ded4759f5c7630a0b80139234b8d53117acb4452c60b477ad50157169a89bd796e2308baa9395b513a94747611c7978c82dbdf48d716c3ac181ac2b2a4702c02a324bd4c5e089d989d020ebec9963b5c721a95492158f54973b7fc1828181acb3cc8078ac095136d97221c60b847bd2a52427383ab68cd1f10b92738c13203fdfa0b78baa09c1837be2498667c459", + "0ce980442336d0f427db869a6799baa6785b5e030567c588e2a7d2680e96c11b7f415fa27730969e0b1c3973b5f3192d4e773153def6dcc09dae29ac44eac7c42c2666a356fd4262197bd5cf6eeefcbd662d104423ec05c19a2e6ddf1834a3445a09e8b1062a1320a5e8ef13d6ebd03c19e1813ccd86fd68b46a", + "1ac8a509db7bf4acb80d8d394a5abf47c273b2093f50f35049e749f3e16cb0fb", + "47cc9eea11f9f3f9aafa23bd", + "088888333340b3a057b05491fb2402301c8654948aa6d5ee1ec75eb045858c22056fef0873d6675f897126052923a47a30675b266ffb6181cbd29ce2da3720e36a227e4c6e53328d789913c0d9cd149a6e49293996b1be7d6c513b24d876445a950e723ade3efc36907c840b9b8cfdb1503811b4044d931a0009b381fd60a5bf1e73d16348cb57eea672709875fb9d56908dbc729d5d7d322a17a41d0f62c9af9a013ab1e19fb7b6c6e7fa0c0b18bec5e3d3e92546c77e3753193389e5fcdb6a6a1896cba461343e71ef7a156b136b27ae6f45be9368301cfade203e9b53824d70f07de9abfea1968b8ff8489b9804422ba05ac3c3adf23ba0848817fa51febab5e9b5500100310479e710b663f064c1ef101c9a5320367cd8bc6e52081a32f070e7d3fd6f4210cdffdb9fcab1de4af5b06a7c6d191dcc12b25b3053e58952bfd1f723afbf570796946c1df9579ad14ea9c8c30389c1de4d1e845c764fec5eb8faaf4c558c5eb5113018c6a21ef653ac7d7f5b6c7e1a8fd48c6f423e9913436202da176a86731287db7331db055508acc94168888040ee37b3c119c8a0d88360241d68745825fe480324a944d56e7cd0375d4d33a5fe7a3863c2aaa899b2d24f65b70bd804039116fe959c32442c9f0b5470463523eb4336985b71125fe5235cbca0c88a6f92416d038e144de5ff8ef6ca749a9e239f02db505bff8e16fad1cba8b1500445f067a674142b6413e9dc0f432242d8301879bfc11fa86d1ac9992ab12319fea8b703e10a13bfd4b017496222be26b56af3ef67610f904f0ca8a3e7cc249ca8122735a542b289f13922904ff23dd197f8883c7ac77150d7331316ef94e0cf13b6ad95070420513599100b0a6d117640b781c622ed7ef7ead29476b3c835bd9dbda2203930bcee7ac01c3b9c89da405ee436ee652ddcc3e96c7f1a94e200eec9a4a226f3cf7ae5725068916e73b61149497d11dd85157f895669f51978d1bea8fd2afabb18d082365daba2682ef623109988b7d0e27ae57bc14d86603f93b5ac040ae52d8db404ee27e6c34cd4246f40eccf9d3f8637a4615a4006918b01d34709bcbebd02ea72958d54db3e87d69e6d783de2f1841029d6975eb11f9b076c247108797d5368c656f888092b82aa81aa26e164e038b359bd68801c22fc107e4083a9d85fc254b002ece9d4545310b0cb22ec1af04a7ee31d210ede4b605dbdbcb70e4301989422ef46edf63f9c96de9cb3f70638b51df5c0abe79b7af8cd97148f2b7bf394bea0f7bbbf6925f83b901b87a6079f2c3b38a98fe1a86dc7f48bf97553701834f557451df4b41e7db984a34432823585380b45c1b84813d6aa21107cae252923fb4673cf660a541e65610ac0127d238285f53bf329b62169f3e42d5efe268dea62578e97da59a58a1314a1bd46cf7a7cae772814130b51411082e30062fdbda1c9e14d6b2bfff89d0379d32461f3b8e833b105f6a89532ae748b5fb43f283fc86450404e8befb8442b65e338aa0408303a70e9c27a1d923d9f2a06e7c6159c50bf2e3ba5b035420ecbd9d0b5fae478eb1ab72fa714f99d00188bb10e60380fa3a3a318c2d359ea3805c2fa0dde17ee52a504f70d6b466bd38d1dd4196be336a9ab4a9e573d1bc6404018a119f688c1dc2a8ed1433e8a8ebf455ce3808c245f0220f0c12d28c771757763bd111ab829294e2429a6f7a59858dfa1fe0b806e986d40aaff934589fefd75ab91097a979f26bc9352267efb2d82c4738e4e6c451b0d5adc398f546c646b9e6b8fc84e91651a1252d5b805a857c7798d102d1e6f90749252bc53588348ecec0897c79f514442fe3b27608c95d0cba999a7e0fbd7f601689b4dc63ecb9ff553ff12eca3e9b26e3eccbde28770bb6aff7c864ad6be77fc09f81f90df6efd0c4025d0916ab5197ab846dfe6121c462761d9cc87112ebbca197b0a222fd34a15b824b7eda06a56a6ffda760fae5f0b527e2798f01e205a3f47947a4bd190f6abfb1dab2e3a53131af95d593bb57e4f4af506440cf20636d9fccc449d9565bf43dec8b6877337ca5a43900c1dc600c877b290342914e909aad8c5f0755bc25652781535c057ed5ab2ff8ad4322a8edf3fc1b5311dae6361a7395919725f4cd87ce0ccba37c64eb3618f9c5a53644ada569b90cd07184fc048f1b589eb29852909e75e7116ef96a268ea85c2bd257cefdde9222d7eda875a2a3abcd3a02a1fb470ba967b20beb54914b8b0c6ed464ba978088d7f8b30d098966b0bde82a8f1210f5d0c3405c9bc73f703134d0b6ee13326f65fa0b8154f4e30808997d4afbd060285942ca1dededc3410a099881492b5730ab7bdc2a4cfd0068f67766d60b5d4945f121459d2083334ac878d067bef644b9ee427bbbd6c9351d7b019bfc051c05ac301ff3792a1c687546dbf6a07a0cf56717374bfa1191c22b7753f6ae02392f8aac9207d1ad0fcd57c5c8b35817574b7dd90a00cab75f508f8a234eabce6618305f94746cb6a8573389d336bb67e1b0d2b6e9bd3959ef344e1eb245b522c35222813b8c6e82df48987436b5592025e9786ca63b6d1a064223bfacf59ada713c2a3116611393aa8446ea79b3cb21e96d13b659ada2d6524686fd46ec66c1b4d8f5ae7831840c9e3db64d528f83a1cef1e0a586a783f8306cb261ed9c2905493e74d35883fcb39cfc5745c282104cc3ce804999231d13e1bc6f2c022f05999fb57575bbdaf00d7a990e17dd2f8b9dfe66a637b42f58ee49ba60f2dd9718d09d7025b6061b2087bc35f0a8c884f5b67a5e18c2b4e857d3b48b79dc7cab6b72f572d22987566238a7153ed6264578424f1ce091fd05b7f14563fe12c76104d3373367af3ed3aca694a21127b5912c0b7eb1ddf9d4a9f03f660d49f7a7f0fb42797fd112414c3eba2b75a04282dcb9645191fd3dbe376e7f60ab40bb7ca1e991053a1912854a68d7dcf854201d1f2c26c6cfaea32e29d80847e6288274713d2ca973b91dab97884326b280c6f06c65b8fd25d314be29139961051a1d8699467d02b67991baabc9b05629660c243ca3b0477362d5e6bf9eaa33beeb52cf399846c77fcae11a89cbfdb2058e443ddd44fe202a3ba5c2efce937d78b9639781b8b2b99077b433189cf3b0733ed73b59bb194c9a98c5aa0cba6e71d1c5522f193defb9e31fd2cd60f22bedaf7008c2fb0b55a8dd52731dfa2bc69b40f835ae95db040cda6a4a1588a5ba4769edfeb7369c1e9a3b1cda293255b4942881d94d771b7b82460004875e71be64c582f2830c5e80dd6de421a311c5852f4912bea1451b0328d01c7029867cf9af99284cdfc1e1f0aa0d8c19ba9bc035dc270b45724247137da5d3fc4daa09e7014fe1439889968eb23fe124f067825d5f7b304f17a983580e009e0e51630ea0006dbc74a30b512cd9eb4d0b315a0ffdbfb581609ea9661b0007cd234ce43c17c92269a7519bfe99c2ca94b5cd3e7654946e67b37d4270a369266db6804336a446022677a024d44cc02cb04108292dc12f790578a0d61cb6fada738902eed3afdf1850bafcb279f18b5798d7466752c6368a594533baff5dbd17974638ecc41753b184845206c79bbab84dfef148eb7f1390f8cb7346a14c88caf540c241cad11ce8869be3bec85d029ef490fc5edacf94fa962be39a33c8efefcbb6b43960d5bc35f8fb72038af3801466aed141b50e9ac7dcf1921f7a6abaf320ff02ac34bbfac265e05e27495e6e027e673a48a874e6f0c33827a050fa21c2efa789c1e3df2ecda95fc52ca7be35dbf17ff6c73f37cb236e5131542e002913d177ffb21ac450e2542e24b894650007c36c52d90f83731009a7c3239ccf11829cf0fb6510d9924e927f14d6a06f8dc772fc9b028a8bbd2d3388985f3e2609abbd08434c46642b97240c9380a831bbafdc5db77be63a1400cc9a4f7362a689b07a77162022c6ba7a1bb9f0446a0b6b460ebdd9111132694fa5f1b29da39be66c5179849ae9720b2da0a012d4bdfd1b18b8fbef0d5c32b92c351dcf2c599f069c3b53f622fc8e904f27584b2d97d43f779abcde6dc1413c0a677dd187b28cfbcf7fa6316f0967b53977432d45944ce8ebd2e265c0bf6b2870c75ae808fed52aa35421ef55667ecd6f9d279c9b91c9314bd9411bce267d6ad52b1d910b3e65147c3eb6021a0af98707408e66bb11ca5abf5e34b2bc85b144fd06ea56f5d7f8939fe0cfa4862e7f306de069cf85f4aa7aa97c6848594f5a6dbcc718d2af77497f4b9d5ffa217fc301127071e9bc9c2c9222ba90e286506e384f321e622f05d81c114953d0f7e9626b74f4a6bea8cfb86ceb4575e5cf4fb84e9efac8291d1f4153ad3cd9a34ce0ffcfbe30b6829c0f986a4f85d63b602ab99ff3934b1e0c46e55d56eb479b79ca0729beb59aed783e9a3ccd55db8d884733dbd93f9fd7a7209fb92fcc49826b2d4356ca676f01b0981637897b3d2f90f37bfd73b214a398a8e4e2f9e5abec01d8192ca690191255dd8304a2d95a69331288bce00385f462e942f4d694dc3560a263c8ac2b5cd1d2c63b90ec67c32eaf5bd947bd8ac730da9c09ebc6888b0b4f3bead157aa9d31c2802df8ff0e4d69b7abfed6f184bf35a16ffb5677ddfc4682322128932d57fe4c32f21e190e1147d8e673ae407b1dbbca31331310b299e9f3db08ebfd2dad3158562c2e47addcbcc831cef0194ac8ba9778d0103c2955c886d439967bf788eae688f2a7459b0ef3bd16808e8d768b8962a24588d918ceb2cd1cd611b504019f65216beca212f44600cb7fac77216b7645c49f18064a3acdc01399315084dc9ea151ee28534fb31628d190bc540ac6b6aba572ba51aee89544015e6fbca2b3c2330f2ac1f68849e99e1a1f7f523599eaee22720392ea52259e26f1101614d4edae481b3783af4e99082d75dcca549049290731bbadd1ec0a93789ad5c9afe8bae44e35b3e59e562362964", + }, + { + "0410d1f8bc890649c250a3819766f4496f339a6384e34acdd72b3a87266edd2a7eae223a372883f978277a108d6e59fca1f35f25d7a9f3aed42d35fa9b12241ac04754f76fd8f0e8ff6af88cd851887a45e89f1c9192ca66bfff605b128575d2ccc9ca3ba1ba23a0251b2cfd6db577b29d17ce2ea998946997f5c4a97a397c46024681a400a54425c071232d269adfc3b1adf15b4586c4dd7b8886f5c1023bc348bc674961ac6e221d914f432c2f06dddcf738227dfcfff88485ed45882809d0e57019461c88683919b87c45e78223c37a5be5f758e4f0dc6add22f2062bc2eb9bdc31b8649af17d526ec339f0e6fc6a41e26299c65276302f982235c3e5205ec1521625ec08a23e766577664b73d18d5533261c859c4cb4346feaf7540a56155c6c3a4874dc86ea42fd518d71221ac65541e2dadd2f8e129e7809f2835f07dfcc4128401dae2b5fac7ced1d9e07e3f348c6cd26f55b3893d4418557a18c366dcd5eadea0dd84ab95437d6f23eb9e5877fb2ad740ee507e2268c39c7186f34e5cee2d0dbba1a940f516a018f23e716a399c317a7a81f89cfabc296c432cba900ad79db67936f76e4d97874fc5f8a9ff84eb7a0f6d629c581ec5c451e27ef1ed468f93bfc68b2e0412a543d89dfdd812d9421236a4be9eb374531556c207340886c7b84d42d651557b952e0982f62c5c383e92dced21905174a5a836acdc3f2393e770d6cdc22c39575a42ea406f36889dc9558aeae5dc5f8b84862850b55bf4accccb6a8ef793d641d6b08235f70ad3b0605eab462afad1af80fa003645f4d302b03d81a7d167e9a8187bee0f76b1cfd7006b2d2b55fedad6e8db1d3ecfe031702dc327ff2b0197337d7542f42702cb276de852b3d72d9acff8a7feb8882028a5e340950e523c41cfa184b3d8878effe56742994e60240e58cbfd01541d39fa007a9f0ecccb409c6cc540354ccf35223677cb74e7ef7330bb60420f7d7bf97de6888cb343cd4fb0928fe5df5f1b018592ccfa7aac6dab57cded573b5950b94fd935f32cf332dd85b2b36501de6687612371dbcfdf77279d647ed8bdcf81fda8b7e0c5ab139330d64695d814fc6f761fd141dfb0c8f74e2d7616db3598d8de40b993fbdd272ca37db27b82aedb08bebc4a8e6d0385ab20fbc20c215ad50fab8e93975bcab3ff38667abb0545b3b3f20e325f01b80a32a3cc3ed51703d4b2826849ee22fddd5b544816599dca0d8fc84feed9f7e90caba53b70bc3f457eb1adb89fd0b67d2c0ab53264430c61d2c4a1b19ea99a9b453fc6b5ebf5fb5ab799134769c9b495c479c828bcc49a8f993c3127d5cbc31afb89c0e78fbc323755457ebf0f3344d3ad1cfc59d186e96ac31a9298e655b3d1df74b95f30fb868631053540388a13d597002f689708d35a2365e309bb96db8b1b94ea4c8060c2b165f7f19e72056409159371ac9c44f6bfaad9b9567094d18c29bbc8aa2c8b5b82735d20f55284fe68186004b4a4fb644fd52d9645b277c1dc238a764005c1d2791ef36e71786cd990ccee4571d9a9b1aec757e479cfa645e320bc33268e05af9cf90e0e616ae7f237c637a99fe15b4ea8a3232262d96855fa248920a28ec03f77ce4dd93925db60ec030a7be455ba9d08edbf6bb717b1a13c3ac1deb9821e21505c0a8971d5ea5dd8e4c9cd3a845a336209af191150ba5d9b8c2c450e3a765e8670d7f846b2461f971fdcd1942704f620a40f4204b99f9035bbd543f64b927cbc7a74f32cbb12c3caef955f169a45374e4479430e08d333c4a877baf41a27a0849ca3a157b6651295fa71ac94b6e3d30b5d160965e93d2a81b4d575cefd264399c9e4e17059f4064465b2d92c96ac27e3b221499b5e642d033992c236b905c072faa1e34495f9890bac6228330e4016c061605bbfc478c30e1b8534c49af54785972aca2d144328b0a540e3b3810a73e26acfa22f48652d53ea521875475ffade8ab50b9f08245fad753350f63dc4e898948ac7dcefe520ca47394f8e993a6d13ff68a2f78cf294f235f5f863bad10c4f5bc41c3ba93cf5e076357f0f7fdc136f34b656b1b8ebb3eed1ac429c7d4edbc902f7f4bc24ea9c9b200b9a9fd7adff0c6445ce1d2171fc031e3e9f8b8d6b448053393c8813d91333d4bdc3bc5bb2b8bff876cd29e8b92cf6f7bc727517b6f57ae031f3040b0637dfb40b8c1fbe44cfb6bb9cd0a445fd9b3daa1da2b1c4a82cb4da1fb8d525e0a4d9ec30e9aa75b951214621c58c1f60c9b97e6c6b330497e7dea790a3cd8158a76d898107ff3a5910707ae60c8a46c633b522aee83736d005de60b9abe202435f8bc4577b0eb08b7f2b617bb5a831e95d6488459bbf15919d764b39684d7cb7c9310f343fbfcfbeeb212a90d96c7a26c1026c5cb171ee4ef839785076e5084026077455c73404a2653f333e9bad555cafc1a9613387a02bb1287c380d7478238bec8943208de585bd18b448b6099565cb3ec70ec6672a778fa6af9d1b17b0970439da24c7bfaa74c85ecd8e5852e42391ab2258024ccf91e37f2f0e86df958b197fafd12f4a45f7990375f1665a14f7f5374ff7740f89677ea8660587fb80916b30629a7aa88213bbf80512421a0a37414a2eb549b81cc85072cdd87e4e69d97ecc63f974e60d20de0233101c3d475d777602b12e2f797e9237570085b0e9f48d4dedf233eb1301ed4621f9736946eadf599bfd79157c0b4cc31bc273f5c6f133a4e3679ff6797d3c9b76aff4bd8ad40726c1703c3d8b78f0974b748d0265b0a75928374f91b48c2d2b2c11d8b6e5efddb75009e4db72e562be59efb0bfa06808c89f585a43d4776ef08947a77f277526777f0b52f1e0b5a03aa560fa45c8f30e584b58ac1fc00b104942b7b86a3cdee1abea349dcaea4e058faeffc567e2c3b03e1c5c4ddc675e25aa15de1442bcf5ee972a8c5204ca5794694759c13a2d716839dda61635043bdf1a09e35cb6d93b4df3b7a00871f79cdb4ee69c79041dd14deb7754107b8fef8589d2d240ac1d8eafc52ea847263512651bbede2fccaf6da816b1b892319817bb6af9fc17078ab6cca95f03cf8426249fd4f2bf91921d39b8cee24af07a52bbe54ca7fc4422a310dbf2149b763ac0060fb2c59154d2cb0da1ad4892279b4e0ce7f5f92c189c3ce48e518ff48c4ffa9bf2b02d4792f84534958dc6bd2914ba010aa32d133f6a07bdbb87a237c7acc3ba5cf101efe947147ed4eb3bfdffe5fefa991c0dc8760586218d286944c52d0f221e0101f74826761d01a20af187f9ec1115e9e98bff6fbd7c8816c15d33c07f51c171490997bf269951218ae92b66fa3150d3bd40336abccb717e18b53e8806fff94009910f202a5041b5396d1c339e6d075bad4ab66a0637d81eed1696e4068024001123204b8371f0bcdf0ce07d79f7c917327f7138a75947846fde68665e9c767fbf96bb3308abffe7a8d05512c81e39fa8dab2334f46ab9543921ca97be31076dc7b2a0d05e90b7f7610d1a391b442398ef56cde3b18737faa8f282572389b4fb3c55cb8ae6737257708c808bc0a414bffae293bc69cba702ce2959e1a30edcdf64985a4b0bcc927c5912f819c71cc9b1ff5d6e5929055be72ea5c8c1a4a591093deb5449b7e6b60109be1ac0cae472ba31e1035ae65f3214f50ad699a077a2de52f7180addde0bd78c2698470b1af13cfbf497d243c9e738c4cdc265356543885c5b933a299f01a5b5a9ecb0b4ddfda0c28573064f6a3f142801795d66bcd5c31868fd3207fee7bd98c47e4da26bee64e1617b20cbaa34e3abbe31126b06d5737fc2b577b19d255a519397f3ff8668d0e7d401a37e368729e4b83c5fbf01c32ec478967605cbc0675f685b5eeeb42fc688216a0667e1204c995c9c485e6f7712d80d88edc9594528b1907790549756dcc8b0d32091f36d2b4009639e68daa130e83a1ea18353ca34f431c548d91c1591ccf8b25eec1f7a3c18ddca71b87bb290a5c13229250c5e193e1352072f6798ec504b3b4c6aa578737332f52baea7bc4468fe6d8dfabb9728cee93fee50c8caa113f5ed7e9b55e21e98d73a377ef68be7e4e965dfa50cf863e6285236f11ce80512c573ae2b55bcb43cf6ebabed6783c250f991f5f68a59dcb2ac13a3c8fba8dbb11c79dc6236809f2d7c4b0ad3cecd24b85f1aaed9748b8c109f2fd98ac8a53bd52f18475598d67305117de8e03b0d988a2847539cc2efad520f86dcd82c08ad4b10e490b9cb03bedc7197bcaca55526cd9c8a5a5f69f7a1697e7e31aa76eee597c386418e89f06b0b9817a83d6cdefaf9594548b33cea1cbb585e55df3d3b66f0b1a88f4b98ea4720f1ef5e6ebe4958078ea0bacb8ad776e325ccb252f81943b9b1c2f54aad3c7baf1bca0dda1355d191f69c5d8163c464898116dc89201032d1e3281c8054882f60522d3a65831bf779a854fb0c195f85aa66522386625658457e74d5c2fcf5234f226da4a579ac1f11f11a1e0a6993a4dfe5c856481ebe9d8d2363401058736f7ad104104aa03f5c91496aaba2fe4072d418d91c2787a9b4ab0cf4bb65681ad0392ef073cf2fc060692b0c0c194c8eed5558098cdfa3317ab02626159e40e5c76fd64b2ef60b8f5f368b6b4fd7ea3d2d3236aa01d9db7c8a01929f9fd38557335b926251ade1a0d47d0c1444e6416218781c1a51e786dbe9297b78fcf0d0304c62929e00744ed4e14af926313a9849b2a464048bead075044bee013cbe318920c4172138560629a0ff4fd229d81bdc7c7fd1086ab17d6efd5b603a1991b33a55ca5b9e2051b7c140f7937adfaf474c2f284489d9b1e8c71d58f126eaa451407eacde9f0e86504f7de3ba4d830199a229de2bf39014baad6dbbc448501588ceb2575db0ddae005b81ba9914bc22b6d600e2c990f7843e553ff29d8008265eba7dac7b5b5a7ba6dc263fe0e262a7b8638a81f4720622c7361554b61d7b04c7f8b133440baeead7d51ac8b77d606fd0eae1c55ce7e8141dfd68d40ae3d8d2dc8a061085b4fb6d8a06263183869154618329be6b01c2890f2b5d0a0f25dcdbbfe2ec3597d79311edb943613fd4b59157df4fc2e1024be03d98ea3cbec7186ea9f4a431dc3743b9f0871b205bc0c1b3a001768", + "113b261414b4b7dfa028668ac8b0cde5734120124991c54f4dd16a87d181efe2bc15f6d0caaeaf6ad615f59ec5c2833904a34b4d34109c82e10609b387f995430e8c13d83ac34310d838af9efa32d7fed6224c0a33", + "cd762390b93369f1e207eb15deeaeb0036f5331e82480d180f84a76c3e44550b", + "e88c14ef96c7768f5dba9de9", + "8d6aaa27892a76fb05a2e96cef9a9b4b7ae0670a12cff95f7b076372456889fbd3b9b4fb5fd98b3bd85b247f15009be2f4e7a0329dd118b6872199b314e159618ede0381dd97db28743461ace1a694c0383d8458150a501d6c45f4b50d5b1bd47e61a51f9ed4929bf2e564f201ed0e6825170027d93e482c1ce268459d2f81cab41f0e7ff281430c16b34a29b5c76630dba72ab9e751bae41122b26121d91f2af271a23e818263f46e05fdd52f319d58330bcabf66637a368c0a8aeeb20cad1916d966e5e0b0de74cc67ebe57e3d1fe01e9743d42a931cb4b98bb762ea43ab937d1e5c42eb08fd56e70e911bdcc1ca4ca0604a329c5364b262ce2de282b4732ea657b89300cc7b7127ba4a2d08c13f581f024fd093ac09c2bc245be60c80e102405597fa8082f4d28cc954a93217edffaba3d2a397bb59ee89c8cc0f33eded78f21183bd1acdce64a923dd609a0620d2911f61e81fb2c8ccad8ad9d81157223253a121ea2bc60d6a3670c563fe06bd75688572b3be83cd31dfeac6b17cf8455267b481219c42034b2252977f32b8e6588fb05166498fa37d17c2b002a655b5711bbc21175348225fdcca041b1f97fae48fb1e222c5bb46b5202191c00666b7e1b2d84aca3edbee7a97dc0f6d1330e929226f8a76c155e973c1ab62c867e1f87be37788754e51825ba31af9f4722b5782ef782fbb70c391a664f252d14e49a805e94790135ff6bd881a687f98b42da96fd34bf240eae4914488af739ec15f13f048a7eb5fa94af14e8b6ac5fae714cbef6268b114813ca2a3920a7a9d5eb506a2ca211758de292047eefdb5a97e18530dcd8410495fc42abed91b1204d9b8ba9d6aed11d2d0fa0d931d46f93f2c1a560ef9f5f7cee1497be770d3cb07c534215cec12c1458bb57aab4d95cf4a15a5e3a3bf8e650206d5cac4af3193d169f1a57638d9a50f6b7c6985d42f7138b9226451670d7359351c2affbca65680557693d03458341198b8e13d0ea6abb7496edea3cd4dee2eb93695e668c7c0901c6809b8ef434e88b85a8b22cab6508b9560fae62900056b7c5c29a8c899bed45a2b5159a1d4929476ef350101317f77f02d48a039cf4cf01c56319cbba16fe908c49ed6f3face88867c0ad3703452baa7b86fe58a00ab8f740b4e8055164b0385dd3fa44502ffbb99cdd843bc3287ea468aafe4cc298a3fc180f284dbf78aa09e0a2f7d8593356eab016ad8dc505420edd376b66598a3d0aaa848fd68c4e07419b8b50e40febe2b6b17ad07726fae1f87e86abd01490a0ce24fb57b533c765504ee0a9ca154187bcf5e6828e3addc7597532643cfd992558d63b1acd00e7aa41b9765094217480c08c43f4f0b3f0127120699b7f2a5ac07c655b6143e467777cdad4bc21d4b57da4d8f9b9a7e4523d8c6fba3614b7f7281e80ff0f9004577adcff1b79fe443c80ca9655ecc102d5df6aab2ff6c3401f344b77666c59ac7d5b92bf4f1e2322f74b75e6ef2bf43ad9e018f164ae76a91451e5221bdf5b65a4fbbaa8dc31e6063b451edbbf4965307f8e65bfae87b15f2453083bea8484017228a9cdc6edab1a28834eed8ce07430f776b916b3bdd2340798955ce9ffcf114c3f6a88bcc4c7b6f2e3842426488c340d00f2c4d2d6fd3b6263dcf7a57f5cea6c77efba7013297bd3320accf033acc0833aaa8e8f95cecba469704214f54a1ed581349878a591f9993371f1daf92e55b2a4faf8f952cf785c687a59b3c258daef1b6d7bf9f904123c7384a859933c3ac31e33edf648a1be4d6264ffade860915bd118f0b9aaec2eb8e16b2015fc25e68caac77a3accea53b9b178f6cf48d15029fac12963b4277df037b7a494cb29b1d9e6d2148531a1f7360519cba5657c080254f130a1cc3ccaadb4298d7ea0223897e63d798b4f4909577cf9b491a82de0275a246bb1211bc4144574c8ef176b382262c0e087975cbef33cc616d32e0131a9efdbe8ad3d9cb5f935d3f4f409852acca22ae2a6e7450e9a426ec3b9183f93b4b7f89d850e1c7053c661936e0cde23e831a261b319b430da45772f0fc0113679d06f025983bbf37ecfba35eeca28de5ff4815a490570491266e92faaf8d0ad4ac8df106faff8fe3c8d050ae9dfc03a01ad177c21d7b653509a80369a668a97eaa532dc9867c32aebaf89ed36586e1ebbe1045347766a354a86ec1e8b2f30c8fdfbb6c5d549e7a84db81b73fb828499c5c4be0d4b2b7ffb197133a0ee18abb5a4e371be0ec0a6535507029316f8decde30833ca47493ffcab781d028edfb91c138609baf1054ad52a5d8ccb98b3ca5b138f253d99bd556afd80f71b39f36e0d96fba4e0cbdb18926894968aa825392f12d98b6497ff85a0e4a91c97f37ba1dcad30fe688b54008b925805104a61dc22b712685202ecdb073fad9b10b5b9ee2ff781f23fd41ecdec87f85b369a304b85bd2af126d08f79d8a9e2bff0b18607a95c4efe35941c5493c94e3f2f3902e79f4cfe84c138b83c7f32d7c5a125b28c6107921e8ac92f1af7da015b46a2f9169369cede770292eee8a5f40d080ea1c267c33cb7d4187093d486dc3911bb2d6cae036cb508e81ca783ab5e95cec751e39f3038003081a252eefa7cd913baf136d4e27076251da9cbf0c7d2586fe02b62ec786790ef08fb3ff3d79bd06868eb1abd9875920e14fccf6dc144e898f578b7295fb5f4e84cbf683722ce3597aafe3195e194736fc317ed03ebbb00d956ce89f7a41a334020e1a88da355d3b47d5bd3965a290f6fbf5dfdc8c8e6347b4eb85151e53a960311582235f3b546ca80a670dcb628fef572dfae0c101bc08c80f78d5630a793bdfe402592c316227f2333b386839a67e6ee8d9396fabc9648ea656a407670efaf80966034958f4a70fe7b920c79dea3d5a0ff05f3ed0516537d51a686efcb258520936fdd415345251c9ac1143a41be295cf12da5d4319e78e1c57ce20507490e5213ca7be92afca8ec8b6a07b33571afe6940daa2afb0dd4dcc1c329474ff8e13d740488e5ced552074fff695a04fc1b70755245895a1e9c387fd9514261dbb0f600ae03f4896e795d1e72f421d8572543243d662f6811eb9402b6a3b8dbb0f32de95bb1ac01b1287663d3b6a3f52339a4f6b27789e15519b2b59f2f4fc8fd33ad1a6e4d02cf0ddf8499f45746da424ee78e72847e3cd3833551b6e6fd6b1aa98c688252b57a1d97660ff006ea1b970a0b8fc7d2e313ffd0b0b85299ded47b60cd2fe9bdd7ebace4b0c1072cdf67231a475045990b35ec761e1dc1dfbd0c402296566eb4b9462979d33c9d652a9295ae70943f38adb212b48bd8ebe82722b1712ab6a3be6060297e2aa54e7d0158e4aba6975237e7c7a1e22b29560b8d262125ff2a6e5c1332acd0f6b5ba15b4a82d3631891a01530321830aa8f2e8ab6b41bc5b5356957a4d0c3bc3eab04df7700305a95d0f9cd18d486c675c963876b25b1a0f78e245deb40dedd14dafdaa9d614fb06eb2538c5411e13be116c76fbd3377ff212eb07c5c035612e4cd7a1de2ceafe95832eff88a9bdb3595cc19287fa40b8d244afe9bd24dca40db49893602a59640d7a1b8e7475825b09cb0cee111864deba9d3d1beac03664279910accb9fac534ef099e398d7f6e3235cef7685fd1ae46e47da093135741894273c0c3486197c26057044b10faa57244721328b47e611633d16d3e4776d90309d68ce4a60d3ecda26c9f39c1c6da67ff79fde4977efc5653d79ad86c3b53090003bb72e78aeedcf4c8107185d9aa65221df4e2104640a1a083845c01000370371fea2a6bc8ae43fbe290949da4e559d3867c16df16b143fdc807616f51ebce8d05bb03c2b0bd587b95e3f6a15d907aa9a5b11622ddf4c81ff9fda4bb49d3e9577551bae649cf64ac0cfd646b02f6f16cdefde09a55e77afd16c74e8a3d777d80b7cc42c51f618a3c467968631119f11ca4385f0f5713e37ab1133b692de475db1d44fbfe9d274b9a09e673dac88aea74ba88cde8db3c831e9b5a0f1e40261281e5aea9d4dfd48c5d9e173f4d9cd56fe7fd610909c838bcbe1d6c729e151ecb4caef511a36a14b03cca7ec5d0feacb4647ea5212a11d18cbcbedf78443127680ac0b1bb65120b4197570288226830e2a92b380e32387bbcd3be2c77d6c7722054d849be9de459cc1832ec3ac8e7f60fba9c81cf5fbad37d228eba137a23227d56cd24970340f2b7599aada9d2424cdba8b50c2b97244dc83f7391e2ceba5bc0a11ba547c142126c791265b33a3db6238321a5f3273ffb01e42adee17b898153e41818b91413ec4f6386ab3dd48db875afe659db9eac94d16f850ac179d087d93784d607349e8711f5f96fd514e8d096de8b4a74122ba914520e93a11fa4adf006700e122e2531e1f39340cccbab4862708d69c117d3efbebabc14a0231916ae1ee8285727c9fc980051360346d53dfc76aa5a11fb1fc8f36f95f741e913bd2cd1031e508b320abd2d3a62baa400dc439969eb44e6abf8223b29d4025c3d1ca08d2dbdbbf9927c625270543e8c0cb5ac5bb5d504d224e66a1895719e4f975d819a95e54cecfa59ec8e385aaacbb023772fdddbe093afaf5a75e63a62d51926254e5b47da1e9b05851196644b9180734d05810dcf3502747c4ece652b67674c02aae74f20d07de2ad5993b3a68d10207eab6be5be34e52ada655aa96c1d82df9b24c2acec35e8f0bec9131c20d0ad8936880af87215611b80d07d7a741a12d8145bd05066c6ac171afd8684b92f72237bb0e4ca4aec1ec280e39f36928852d5d8d02fe463acbad8ecefc103083fd4298f399bb254e7bfa166638460b760ccf2b0f5fec0e3875206bdc8ce096274643824acfad71ba06441c74788356caebdd2208f6f077b056fa9d85aa4357e93bf064a776f5f3b0f288d0afdc51558c8f25cbee17247364c2bb24637dd69017f92bbb43024d9c773439626a02bd0cd44136a642c9c5ae593f32eada790c31a6704030f2e07f1173cbc0dabc410bf9864214c298a6283b3631acbf94b8371681ba81eed1aa81ccf258252d7f90fe733ac770b9744d0170cb554b39e6c72e05919cc237f8f4d7f3545f4d2732f4c9473c77401dcba04c0fd33efc73219f31c08dfab26abee9a7cd4ad3584730768fae899fc", + }, + { + "9c73ac05648e0c50a3ea3a8eea70841e8e06669c1e7520c5e25e093769c4b005375c0a9cea16ec8e00261ceb96a00924a66fc0c4e4e089c63e93fea857aead8e0ab82af4ce1682cf3c9fbad23fc3f7e632b7aa169834ddd6c7db7e1e892cac93e4d787b2ed0a812aa93bfce8fef3ce30ab794743ad241974ff989288c43e1ba815a25a03acdc2d5517293e161d0c46c8858d0b32b124a6b0bc3838807753288cf6838fa25fbcf876e6368c0342d3cbc860d6fa12faa1c2b7d9fb37504e60dd44e36ce74229dfb80f1545125718dd1f78b31a8aadbb4d6494489ce596fcc2dbdf2ec22157a1d966b61e780d36552daf084739b602861a96ceb67b65b23d40916c02b2c3a38c2a59aaa266e1f8939000dac9b6dc50d1731e87ee833a2cc3cb98c57e5b680a85c1b428289520bb252096efd7723fa8e55d2fd4e16900a435986ab3f3d2bd799471a1bc07c1772ce10d1bb8805a6065b8903999f9393d2ed1a7e1c57a9e3e0e10dfca17a04143814f5f3acfb99a34712a6e0a24a7485279ef343e69d27c77e25b41f9fb833d7cd29cb6a15551d5c77b43d19feb19f2640926a272f81eeadb792bd474ae11f080ada72103f8f7ca733a9b1325b50589be2b2b3023491afec246d336f4e4277592ce9695c68d5f39c8fa4cedaf51776d7ca29ea0ecb89eaefe71e5f3560c68e8dafe7da08cdcd954d626418677b8f3f45b9194474a32f548a4da3bfae6a3e2c0a25f602e3b3a821160c397d77c8bcbd71c5f1e669213af36eeea30d48e12953071f55eac2fe0bd8fa355671fe032f6fc9214632428125a16fc8aea8a9c7fba0d7518b9a4f876349ccb9bbbabcdb2a85fc60b83ee1ddd041967efa4036e5e10e377c9886f40bc0b0b57c7b724795f843f6a072e87e532a04c21445090a360731a2afb896ab795750e5c2c33d58bb714f5be427ca3751df09661402604a09a1eca95a8344d3daa5b99d68e6e6245825704c5d4a73af197d052d7f75778917542261d77735a21cff3f75d6159a3e4b1a7a9854ee376e6b3c8bdaa1f353b957862b2efd50d10a40007026261a546124cef979ad20d8085d53e30f5736b8aebcd3cdaa349ea474af249ac53eef2653ae1fcd5b3095538de9368d307d45df2a19acd44e3b78c2da9d5d9fcc4cb61feac5dd35f66299845bc0018c3d476b6761083baf33a4621e41cfae0e0c642de729fb2d206db6a4b976a635b3fd911b5e9946fddceb6feb2d2f893b2bed590317442037a1d6dc5b5d72910160221cbecb53bc983f1c736c3bfc9757e9e05af1248b28d651f521af67b2a0d7e4bd86a0013338404fabac7b9833c372142e6338a98c0efb7130aae8e34bb0c80937680a7a904aba3be735d41af9462f17b967b13566bcb697579f8a9340429c77baa6e24ae1ac86d8d25ae3cb9112e34a7a948fd141367898c5f33c0635c87de06f603b510cb229df0d0d9a9e107de88b12686c539ed4fc54c8285afde0c8ee502919a125cbcaf4c8c89f56e90d3f641f97c07326956f7b5d87c65b689f39b8b84359ee0f14d2c7ed621ec67f5e2a8ee5faf21c805187edd95e3941ed62fa95a65473a569566d46b87c0d27ca37b6b022a8cca30a4480d392ba15701d1015b3648958cddfb614983211bffc4966ac6c1f691f19bd9fed405a02c06712d62a775f73353f3949c76b6b7757a4ee0410fd6d20071abfe46b09e72b70f9f19b61410ea67037e037934bbefaf09cff018a5c218176d165d1eb5cfd5c46eee7b82fe65ea02e3ed7b18a86ac7b139b7c9df79e1f6e6f85304ad22d97190c7ec12c651fcc835ea434d92ae1444e7cb0dc644efbc2ae70f2f94310805c1d0f2d49643d05e78baa1c54d4fd99137a49efde88dba1374c94208fb4a0ebc1a0090b043610ebc1bb08168ff5bf936ff9834e825eefb9ab73da2b287b06fa2b0ff52f46061b07c1131e4108cde478c767b749b696f3520acd8d3338842d53941282da289dd1e9a0e02aa9be0f127566c9bf2d50a27f6b6ffc9e9880bbfc14ce7eeee70cb0c0ad90fb474efa69b46123638e8405fdef65fa7e0e7b29fa8fe8696edf661f9003a08b4aff85a4a3e6d817655c1d533b834da981b8c37c38abd5977b3ba71b3f57967a471c2eeaf2f6f258431fbb7e92f91814b1db80ea775681f282290db170942bb7b04aa2a331950b74a4b6e337affb4c51c6cd4c4e13ce3095e73e4767c2731f72bdb225ff572163fbd8573378427fda194d165750d487f6bbb63e1378a132fb6ee5115e3c32b2380b096b735bdb4d651853bc7928346fe3ea9df7534f2a4eae1f5ffc4b82ae738db7df0103ba4e68c2a2153bca499bae2439a57778cfc616df16032aa8a19e26597d275d2775b5ea17cb25d204b18028eb25a053e5666ac47c6def151f7d4b68ea62c601d87bfbe04711c24bc34274be6815024d7b7d01e7dae10cea6e485348ab195a83854663cc5826181b688cc9c091dc1e0d491fe51400e20e6f2a51a7d56af258e038bcbc80e2c4ac4b41661bd33229d07b39b59f3aa79d99c1ef41974a33e02a7cacd6fd8f9b99cadd0fd6a031f070bd3a364c64ddda0e9fb94036f374171de0b3f4ee3380780e6d77d50db9d58e670fb4a364827d631226a3491a27602808141ce657ad6e560ad62b088ff086e6f03b8a64bdf7c7d01e7b19289279509a9d6d80e50aef3b05b5561e4556952c46d0b6ab8eae735eccee77e570e1360b7ea38c53ae6b8eb420e4c2663b57827228392db6e79105a47f7d89e06ecfebdd63783101d3bfb5f494785acfdfed41f8166faefdf0b49260222c4080ec2c6e4f949f41784f076ce37fc7a34fa4e547bb44e6b9359b4b95cd67d64e4402ac83973bd50f8adc7c6e4c34019bd8f6d3843bba3d7155890712e0ed5134e00db877398d86b459f312a6272431f01b057446bfb1b8053acf181bac79408c7708f3a0867a64e06d7786849bb874a6bdf8fd6daaa572d5648ae100f4318d6b3a811bb0fb709168e817ed83c0622a7e5b17ebf5cd5ecb21d9ac32ddddb039083144c93cb55a95ad72732132d54bb120639d1620ebd142b58d75835b35cc6367012c93c6772963e9ac852c71c0dda2246ab845469997fc170d8f62334bc5aa4ce23e036967674303ec6f75bd3d17d197d026de69beda70bc59d2ff95a899d28ac7e5e42f4d37233996a8e6d3b0b86b80df49ea8e145b4a6e3e39f3d6c3c6518bac45baf97cde23037709d737b242b8918ca31f90fe59ff2c83e2f347a954d3559a8e4f075c620ad36be20b1e24b3afa156cf3255192171ad0474e4adc9b7f35436325b92945665f038611e5d14bdfe7b7d20c09642323346a717f460dfe7b5062a0098be66febe9f5fccfc747aeaeff81ba08e5dd2b1a489c998ea9970afaf9aa03859073707a686c492fb3f7ddb27897ba5e75e578bd82114b2ba85525a2002927909c970a04035334b64b1169c3a923211e0999db8baa26b6537cdcf57c051c0ca1b317a5b66ad96cb5ebd57994f99ab202348d8ddeb343312f1f26ab2442b8c5f5cf6bab394418ef2fed68c3e60275e836027515b6b946e5d86d91fdaf49c2a5182d5051726840a156a8653cabda25e1dd9af693533d782caa09295952ebfe6a194fbc8bb7fc2c0da5914a506c6f31490928dc5d6554890f5eb268b09d671bb6b6d7416dd36e7b78ffc5c86b34fab43d22909a87e5239643d5fef373650e291be56b89b9d90431d8c9fa44fdf4f83a1689d59d6ef833b1ce31a44197b36ab298d53b51ae3f8387087dcb0571c340874c1524ba0d576bdb88101c1fc387d25b5c0dad0b4d309255ad5d5b1e209ba56db0c927bd209399a8a3b5c8663c9ac199a76ea4f49e364a4b93a569b3400e20f0d748adf7db46a07efc68e43802a5d1a914759eb2abe8fe3e8d67f2cd7612bd4d5a6a4535b1e5b3ad4d97e54f3db7f8512c9603d87e01160b6908d8df1b952c750071abb1565e5ea3f643f233faeb84278187ff0089150bf21ee4d13979fdae796f592ac5b88869aecc5be1c64665edc8ececc87502d36720b73859313607aaa561d56a195dd3c7292fa8f0750ddd3df9ca056fccd9d6ec900f45c1454c6ceaad4154c69e288dc85735b8cc42950a3c5f0fab2be8811779905c3ad5a9a6bf56e7141d863caa4e93e0065f229b695efb790926618b3eda1b9a15f143bbb09aa3c4b72900617793417df364185cc213d5cc3a375778117212266356e214f085d8a7aed908256c4aa25faebabc70ce913c08c89380da06920069e8e27dd867567f152f883a9bd2dcfb8097b7f065482d6d11c0edebc67feb3068cead403503c04b324885ce1a62c99af9808a5ec8b7cbd978b8c43e37b06e9f7e1ce0b31fa0fe52e8842002e6e99cdf69263d31de080b56c0cf94f77f0397fd1f77b13e17af90ff33b00119999df802c33534a13d3ff7fd0e8cf58e8f8c8bae033cec1aec7d191f2d1a39c7b731c97a67fd1ca43c13a24b9f97d92e2364dc26a1c9408d4659ac7373e53a2a1704a47e01c0223ed4c489735b62a27ec67ea46747e4f48d3da101b0863bda9d3f7f1b413f3e7f130208875e6a29dc30a78198ef658c7ca32d7d53b4b92e51f8ad6d39ecabb800adc0870b2ab0e85b5769f346ce7fc371ad40c561f9f3b2f2a01f2b8ccae48c78a41383cfc36b2a1bd41d61a39c24144965d9aa5ecc5d506c7c7cf9476085bf049942d35caefd77821ad925b7fd3a006213abc1e008114c848d45cbedcb8af264cdc5c07bc338fddd1123940e5d95717040325048439dccd1e298bead22b011ef76d26a390a68161b8bab29e8409a5880cca9c8104694e1282c9fd64f50e73ec6b9a9ffc31115de9cc0088400a2dc806f85487fcbdd60f409ffca584fb197156b40142e512a0dedea1571ebb74d6b26d3b4a59e9105929a055cf3540e8a6a79ca7ea71ba8b40893c9797e81c6e9a7999d4d382e52cac95727bcac354616ae1094552b3d0a33d0d3ac4e547237fc0cd54944039b0eccf335889f6aceb518de496e0986783c564be8a4a05bdc9c67b1e5abb480b98173ef091259d8c772b611e0c09758fceea3e59243406edfa71fc452d4450b55b8fa5ecb543692c6eda3a6ad3bfea929a18ebbe5ce2ac4754989c71dced37286cdd1512107e4e7f4878da1c28b4beb2dd9a712a8d1d61d1a5fe5382db8aab4857b05a783e98e77711c1933a7641fd43dc6e6e597bd03b11ce8e94aa094fe250f03cc92ed5b0a5e7723911e87b0f3c476d9aa0d96adbfb395a8fd353cfb5a4cfe27deeb82e849f90bdb17928b0a5702e4010f7aaece2d43772a78b325d2ff24f9de0f7bc65974d2348c64", + "bf96bbc17abcd1f56a9f22ad164d25ca72f8c996f1a7a66d6effe140336da4f20460b47e1c8573872496343be35a055552ceec437692b0e4919224c4ffc8b603286a8245eff5cc148b004f6e5a54c4ac22b0f09842a07cd332a09732694d3591b8b7d6a7ada2bb38a30aa7fd5e6baa811b9a195d3a96306d", + "aa2f714d3a184a9883f4199e8e33fbc9c92b36fff2d59f07a9d0d335d7476e81", + "36c79f9f14d431cc8c077439", + "873d0617c986dc9d83e9cdfc50b1f916626a9d9e1c595dc7ccd99d1e993d25d89b04a893c89e205952eef8f1733054bbb55fa5e1b07135787d4fcfae226737b50cafa2c11276e8708451be9b4d7f662e98ef6b705c5c4fc64588728eab1dfee22a0a92bae61828a7394977b0ae8a3b6d0126a23583fec025becf0a72a28891391ac1495732a7a4a1d43a63ed8eb37b280b6d886096fbc4f77aadbc5e441e996334d0e10cd7f3dbba9bb7efb147297986509a07735385c681e0543186dc166291edc3b4664f5c8ffb0965c85bc30ff5e7769a69609c69ebb68f35d104bafe3dbd3e2a40e13865f19bca3612e48592aa930eaee29440b4ebc1c0a59f1c54519857c929709b086bfddd6d4a30940b592be48e0067976099efe71f45f956182dbb300e8076e1207baa32d59c1afef7f34171bd66099d2d7f07b39d16d0f8b085185bf2554c6ad66bcd656f07979e8f19575a116f5c4fb9700ec3b46a3254f28afa1ed51348c1af6dba26fd398098a76d7bfa2ff195eebab41330ef290bf75205a2ee570a2fa46bbaa74aa6ba68a0e63e2731dc1974eb44794f3c89ba58cf96f7a070fcca678185711d97cd9d7d8202351ed589e0b05a7a190e60ae4aa109254a7bcf7013f8addd07a64145e21226795ff7c7b1c225f40ed7c3552da8eb18b9bc9bc70c2e7ecb10c8b20c54f04b6e27b5044a7a67b558407eb330f2083444375c022565c45fe817dc00c7d24c23db320d15949b0b64fbbaedd310e73e423fcebe6e1e98a5cd232d97e6466642e5e3b23f06525ac1cdf8688650cd366b1b7ba2a9033e62d836b14bb73717757b76b9673671bd3d3b2a56628f5a309f3b86ad32abac0590c50f7c5a22e0a920d88dc9fbcb3add08b900a2a2fae4178aa100a0e645ab428e0e79bd90baf4af2755e48262b64838a6fbc21226e323c0a1ba5703e30738fc7b5a7df9eabec6199df5ff6ad58f9df5a734ccd6509e53ecb3de1c881732e26e52ab848a0335b04b25f2254aaf8c130c78b0c9a40b60d402673ac7ec7311d0b00c45bd176bc73ad81c2478611804f59e3c145110aacce922e473ef346f8acaabdbb9f313dd3f8d0a937d0c048e5af789e2e09a816146f9ea28170909caf2572a2f6e2d0d511242909de2815e9ec586b2d12183ddbeb7dd70f32424097e2ec28b4ba62cf78f547e2057a4c050cccdf6b582172343742ec8c85e2847efb1595bccf89ece3b3ebba824d2f097b1987ec26c6e5710544739d54a714060fa91b7995cff0161415eaf55758078772c0271d9d282354e47a25b673eb11497a6ed8db82267d65ad47412300ed525af96f943c5336b1de88676dc346e7339230032463d305b0442f934018bdf0242768511d20474c6ecc82fd752c0c0ca5cee1f3e06e679fa5835540f97870d47ccc6bab233290be7a3bbd4a73f1dc7682049bf7b3cbfb6687479c18d246e3c07161df5c889ee95d39cccd989625a8c9e80f951f8b1832f6378e05daa8566477d7fe547e49ae6e822a68de4df9fc4d6500d5219c3d3bd8887bd7f695151ba378da17c2e750399f7482973510a386721c59683a86003edb9f0ce1ea89bd7bb8a25c222df7ebedcc1b56c8ce18f367b2cae720e0591b477f6ffb498c3d7ce59cabb1b01d7cba84d7180b4b2a165d4b889a6ac361720e768f2913aa50b0b5c88e55c35bb4df4fbc4460338809605f1fd445a2bcd97ec1d2f269b5e779a18c8f215bbc5555c745424484ee5436119eb8754f5e9e91f51fe715353596baa1fbb0a690e99691636e6027cbd4b7be752bc278661e2677070ddc12dccc262d3dd47160345de51359ee8dcf2f61044f95dfdaf323881b2bbff68af6572348f786f6e52d1309cff871ad58148307d7eaedc93ef037922b6092ac62171433adc4934884efdee3052ebd60ee115f76f9dbd0eab7c4c0a77b4ce8078209d23d81d957335f331965b556ebd54732327b5aacc899f9ed0edacad9eb98cb845867f249efb0e1a5fa2483227f78decbf7f1f32d060ab0c01eb985d83920b2cc24b5f9a0d5d869e980129d3b78277fb87e5cda61e340a729d86b6617b8828dffc7c37d4c38080ef3515c2784935973dd184e0a8160f84bb78bcd8a5e691760be4a4d41ed6512ee436ce24650c0e17e7d74b5e01cc39b21e21514a84db262d673f24a82cfd5dfe2a162976171c538b24af16429bf8ed5fa8e37f89ec6e7d63ea1d83ac1087cf89e8f43161f225108889e922493d973e36b510074533cb1cb22174d21c4076959e4191a5df880a8b868b95a9cb5151a7ad47375fcd87725660cc0b59c88ceb86984941268493c49b8aa2baa8c531ecf497853ffc3d26b926a379e72188e246d42073041fbca453bd558f328881c8f8d9e099e898a912530c4be499f2b32229c359ea10e0befe6d94cba5ddafe51d164898166e890b22fd1eebd5724451511dce1f8f7431d712a3f1e50fa5f609da686253311af255b84b2106b09b803e94b51729cfa0826869945d46b9606547e7e33fd9961cf15b400d0f5e01d8fd4d92a83ae526934059d4514b9e0005317a70466aa0b6086d5fcfed201d958a0de55fd23f0919ea29b8aa02440031a9fc206b9feef362a73430a4204869354ec81b6fff92eca97e7f1bb12d25228eae466b8137b4806895ce34b57dc14bdcd107fe160776b0e5daab150ba06976eb884eaa574da393af4de355381c7caa4f611a2ee70a0c78df93a4276f55e6281997b4aeb36888a6d9638cc95444047e5202f41f8bdd787f1ff44a648cc7d39f05e49e5d6989fedb194c526780709763da81a780db0d1534a466cce57e11dd3a4c0e273d9873af1040d52a90e20101e1f80ef296d45769d204cd5417a84e022b6b336675d36d9cbdb16b0cbb08f5e240012967c8067c92f97f981cd19d449084400d76adfb7c610abb73bf21e161db04debe6665fca79d71c8cc50adc3ecf0e52d07773478ca97b8e9821a5704dc58acc647a5bc618d2b681f17942c46c266c73ec211ca403a7d47e42e12c775b370cd500d70a4aac7124f5f6d2d4ca78e1c17a96426c326bb60379ceb0c84a86200f3b450e5e9aaa11f45440f5260eee7675a8b9c47fbc58cf18a651a1dc7b39a911442504f12c103054bb50f15381e512dc6e3af7b414b3db26fe767d83a2a53d7181fec8f6b196c7874befd6628b31797ee3c9260c7b7853b137893e36696e2a47277add98462ea9a0edeb7d2d3c0f2805fd7db64c2c7eff353ff2b36f4de862a42779ffd4dbe77b6a79bc9f4ea3e909474ead915fa3fa990bc82b83a670b163e79300b627fb91c4502e96bb9dde00f716ae6ad14dac647c9f7c2e5b2e505708b5fee996b8e9113a8f4f2caaf414061ee72e76b8bf47ec4f781bd7c589adebc2c267448247e30d659998d8037783494a1fdadcc819d7ad7ea2674f75e10639c3d3055046a00814ddda0e463185454a4455d60b9780250183d591c3db6f27373cd2ce4f02f206ae10a8c32d71226e7cb8d5b05909445977164983c0073434d6c0f2bb62bda66a16792d6e53a49ccb5ac3e285a6baba935f30e9d1ddb812a018ce04f29e2009ad678ba72b6a7112d6e7cfcd3ee7b058ec954a6fd7fd01018a6eba6209687c3130de58147b07bcfa02ec1caf30b59daf87db4618b4a5fad34cbc8014a7529b9458e05eccb9a77ef1621aa95513c6fa4003b0877ffa6d48805e7867dcf53447caf348228ce926233f65d553146584d6ff3dc3ed3296db9bfe69dec6a07add13037b3aade118b2ac3c52350b9691a6cb32356ad93377059fb8ceab68de38d96876d6d383db01f3cf620e47cbfd471bf6dd1f601210482f7c3bdd4c3bd37dd0a7507e1f0fe515151634813dd4ecefe97b52eda28e7a7129993b0af311abd3a07bc463f3cbbcb4fb0eb265a5835663fdbab0d8b8b5a73837ac98ced6582348fdeb41ac8ea9e36f9818ab9c0a41bac1389a6b518ea17df043dd50550f32471645791bf59855ed695b84919aa5cb688e569122786660f06e3a919ef9cf18c355bb397b86710c367362cddb0239aa1d32d489328e4bf92b3abdc3d0dacd76ef1a1efa28fdb848e708aed6780e2d8efb19a2e26fea56b4440dc3eafd796896d73fd150bbd967871f5e6ee5db58995f2f85cc2a15077d7d472bec2e30430af6891193ef03dfc7761e2b3b3b54a72d4f1084a8fc541526fdeb0633dcba14e9485b43065aee8750397ea88d9ff13417149e0fa145be666e6f4afdabe7ad8e4864e777c20ee7a2842db44dedee22f3ce2f97d72919b9ff6059352083be816a7515c48c5140a99af8e81b9e18b10074dc73dab55fae66261421629c8e323d8134f08beefbda555660a51e4b55a9ba4573bdf0396cc413145a941c4175aa672586f7676027f9fe211db87fe07a23962f5b1ad8f566f0d5b13c5146457276f307a02e1e13d00c5032a06d225248215e4bc4be1b672f1eaff16ca95da42513fc4315c7a6663f9101aba80224acbf0c87fd3a2ee9dedd1808c1247c5bebf3cb8d77377a508ddb484ed91203a438ef5ed3ca14e087102bc5f3828d8c3437ecf5c92eeec0331ed93ae33520740abae9b7bfc45f097da70adbb9b9b879e46a7d655dbf75d89773f737b66fd8a8c13506cff7b44bd85dee279ea7053f3ed8447fe79c400cf23726fae800449d27af5e342ecf776378e2eb449a3af27a40fe4a9806487b81c942bfe1a4b0fc146c971a13f83669e0189e337cc9fa2024864436189a9165ade6b864698ecb797ea05fed0d60f0ab4b92cbae36c72ccb5aa45337cc02dd086afed9e5522ecdb75ccf389fcd63c5a4abbf60908e39cb3268c76a08687588be67a856a841eeaaee8ed016f6640ef0f5acce12ab8bb58dda380696e3fb22d0bae0788c4fb79d00cfa5ae3e479dcf7d08b45f4592c2d2a7f8081d5a9398659613ba4932ebfd7382d516b2648ec4ff4477648069b9b2e4decc89547c16ab82a0ad9cf293fee5adb17cea4c95ab7b8e386dcae6acac63ad0d1d13656dfd97d5623dbe45230de597751321bbe5a03c879c303fd7a0d837d48141decb6df4f0865717628c85dbfda29df9a8a69b2c956c75fc66e45c08960c23bbbc706e48395057f989dfe675305067b3ed8d046db339e504d5b2bc978ab4dc261d8afb325c5e794ec79d63d8db53f9dd24b623fbcc202679fae8f7d39f7f7e0667b142c714b6a723996e5254ad2ebafd63c3577f8909981ce6b3eb1a6ad67a4e93c45ac3b34587d153ec5ab67a2697a9741610d5a176cb9b5856bdccb98f69421061c84811dd6660495d9f30548efaa69e36ead246d997c95bad0ca3fdc1a08b4be31b12daf211d3e29d585cdac48af8f2268ec304bb35d", + }, + { + "ceb1f819497c0d631a9c9616655f419b5e3470fd3b19cd0e4fa556bd26cd9df57e960ec7121b2a2cb7c0421c1f84b77eb8277bf341490190ee574d1424eb09a281176a933394bfea5502077486bef23ee66e3127b732b7a58a04b9aeefc35170dabb030d4fc3f8a4c5ff194bbd0b89a379baca30ec81d576868f25755276e62c31e93a80ac322571313ebcee494592c3ff5cf3ecdec962645887d9aafdbfd62ea910af5542d4c7731283625bc9f41ec85012b42edb1792339e6cdd9c2bb3cad4c4792a064df17a5f74dcbb3dd0d90620ebba4fc6d1e1f9704dd60c798ad64d4e5077549d68cefdddaab81a7a91209b7ddbea43accb3d1c191328929dffdfeb4f5740ecbf0ee99cb9a1b73333d7ceb0b2b8f35f84307b9d44a42fe1a30ecdf2650dde251bc8c1d46978089c50d64c028f40611370ddb0b481df9624ed63165370f4788bbc396026b268c2023e0f04cd4f66e0bf439074c46f0ae85d6dfeb0ddf22868af61c8d5133097156fa61a3cf5801db5c3ad29871d336f7aa06d2a7d5f52e50eb3aee3c7de7bdc4d21f68a1776a7cc3954f5c071282febc89c1545fc672a0a1bd8eee2b769be048ab58ea12b356d658a6225fb8a55e752f1fc97ed64c2f87f9ae661514f1f56d9d4e47b001ae865a44b8a9fd5df8628d183bfbee781b6661c9cc76debe6c3c5bba840bbc228206673aa05498a8c715b0f3019f6b2d05cce6c233b5809ff1dc4a75d7f69859fcff94ad442d460b32f6fe348659518c16385e49fddee9efab2455732aedcd17dd51b5117efb2ca1e21ae6787437f48a7042d46e11be4dbcd2932ffd70fd154e4eca5fcdc57c6fa79746100b8e1485fe575a5c79089a25eb2d55d89e42eddc81b82c4f7da8bf153ff5353b7349b161911bbe0a14483fff6585d7f3c8b5c04a6dfc99db9548f0c53e25f0b16fa212f0bdd10ad2193ac18eb09972795f42b3bd3f4d98c4868989c4af7a760f1c88ffda59faac73256df1d607644f56a70303d6409c9ad716149bb58f01b4ab8ab475e4af1257d47049aa77adf9ce54fcd22b3d6ec60484da903a6991ff052ca37b01428d5916fd92c17530bb3385a805b0d57476e9f9417a23ab1c12a038b61b3a0898831f9615d10b468c3edc24448d09b8f3e3a2355dc5e069e880929eabcc97344fb6ca5587c5ac1404783848f531f1e915941e7359fedd328f7fd12b3c685f8c1f29d1a6ef7dbae3e5e32cdb251eb43aa2d2ae0cc18b3f40fb006c2778cba387e5852ec4f2d9b8e8ccd5b3e1f4781c974aca940c45d35d30d3b9584c750bd45a80f32f73dcd85c99ae107b92888839c342cdcf88911cb974d611b14b1d85a59e88c502559d6eef3b7f5addf7d307bb25c57aae669767db6d798ca887124e159b0317e09076cfdbe61aa9ddeda189036703b1cd9b1998f88325910a37ef1fc2e227a382ae635e847df8625b99eb6ef0ef10ce7a2a5762ad7d03a7a4e2b767c4df0b477d6e9601dc8e6438184f97193ea7d7a8c22f1b6fac1f0740f1beb8b68db40e0b22940cff2261273aa0be43df561b88184a9377e6a27f27942dd04abb9448b6b6ecb3a60f14dd39b58b8d94e1991cf9d3a071ba42e0e1d71eb211ca466a70fd4724a34639707feefbfd73dd9680d76a214924642a063b38b85cf30eb763fbfe889f34b20fa4a10ba214d938a5a092c6e9b73b13bd664c75b34f746aa360593c0f8dee0f328f0ad4a3e40d498490007e573b8204a1ce7a550deecfb15f18ed5ea6cb5dd95a68adfe4cab37c13b383f8273b1971580016a8df02a3f4f431c9de9e7ebb33244512080fc5852278081b9f4434109c3427441329e8071d19d0fbb74fb6ea73fbfc7c0ac1012d3a0948d94d7ceae9b0112ec43a16cb582f9c53e7eb0ad15e05ceda108fdb3dc9e585a332018d1cb19e4a75d86041308fdd8476c88e4826931601a3a5dce06fc16512f4669f10183d5a8d15bace4649abcac07358089aeb1e9b8fc3776f3239d5442d3be33d532097e13651af7c9a5b465ace9e626889800318447b8876b45dbbe1989e1eecbfb5cdf5067c71a0d7b7fba6555d0edede12f7228d7f9841dc532274f24060b1f52da6fbaa179b81ce962723f43601d248f8f4d5778c1653e038c8d27828836d562968004003810e9aa9318edf3260272b54fca2e012f6c04abe92c2e6152f3c3e973c7e9abe8c3467bdc246f0226d1b7669bd577bb317c571aa8758bfb694fe4dd17ce78f091cf6c6de3cb601a9d177128fce8d42e652b490d90c4f8fa04ddc71cac300d3dff699be3250bfdb2136edb0057af3ebcca77ba5b3ca34531810c5e2d4c5b5b3bc4e71ee9e30cac067b7706c326357fe0ad2a4bd9cd811b4e9d696bd9b4b70579ae246381210f879c769e5f9cc3cf8d70e9c94ab74a55f5d7bf61a17418b6edb6db4147fc40cf98c75de85421b7d192919add48e5334ebce2a06e56b915447fe085b7dcd677659dd55de1f705c389975e56e0338a2ef07ccf5ec3786407e8449d9011641786f1ecd4d3d3da975d61f5a442293e6119ab20686ea8cc7681010421226838a95a157e2de948c536aabadafcd4095dfda48e5613272289a8238dc945e5f1ef30075d5de096131740cdf23da1fb8b9fa009e5b321083cd93bba9271909460c09bbe1e8c54319394ff85c291814e21215816d4791f01424abbe4cc4c792d0d04db1b812f4d24b44caa76de2bc50f4d1d1611862512d87fcebd3c0b2659082b2423bc5360d107ad7b8e8ba7438ae4509105d6b618af25e75c51e272aafaaddf1e5a227f2b2a2c96a8a83dec23223cb428136a30b290181ee20a819cf52f6c03798e7294a89f3b5137693d5a8b7a0ea38d78e43008fc4eeaf6d077ebffd3ef7952620e0af1395c38a289832df391d1710ab5b103a1ffeea8c06684c03a74399cd63797c770e3f0136d8331611502d21fb883136a82f2034358880392fc3d2fc274b799e59b89f8f90d2a5a123d3c21e5bf3540323743858fdb8912c7c6329a3aea241075ae097ebb23c8cd50f4ff46b42486e65bda6beba5f4fe6dbb30f7e61b1bf690c9f00f7513c83274cd21bb71563257a20cc38da2b88c1063bd0849c8243058ee205853342085a8edb7545f0d96a6af936a3d4612b95676665eb02e72e0875100dfa444f039eddde1422ceed8d38e6c3dbba25064f8c6cb5786f9ca67712b7840cfbd40f99b1edadd4bb9a61f48124cf3b49d68bd642404eb1dcf428eeabadfba6810a4032f8ed06b38867a7098c7744d54dcfab8f0ff941ecee69da9916d54097e080cad86dd08bf53833fec4aa4399f7124586223ec70e2c31e8c647be06df9e86a976f37901e9b134e775de2a0fd53d545c5f92236dbf5455859c138b7bb1112427049d29ed4f5dd5c43cffd3113c276d9bba910879e55efe817189fc239a204a9ebe738c0dd161d10d60a51e9dcc8c38861d41ff029ffd841086803320a17ebf5ff14b6cc2ac3dcf0ce2eea9af7ae23597233599c2321dd2b99e06d93f84989e75e30a388f47079c2af545d96f270e064a43a00c76bddf2f5be5089a69a138de844216148a1eb0b413f58d831d9b8967df297455e7538442388cdda12d157fb25896c6e2b47696c76b234a88bed4f09dfd64f2e4b77627ef03049030190fe271a5a853591ee9218a0c6b12cb3f02683d665b211dd1480cd44c9c0566ace7d751902babae14cc3821374bec774d54b4b4afd5d1811ede556a7a5ad02642a878d2d32380e7efb9082604f49d51495105f827d77945b5cfaf2f2980566b28ce3dfbf1bee2e077eb067bdfa4cc28f5d2211ca99a615e69118d9391e3feb9b13cb4a2fa9682718189ec612db889228aaa3f3345a091aeb11f41420240fbb47caf567646d9e7c762d3288f8bb2b1165cf049a191db5042fa9185fcd180b04d3007c376e0aa3d427d66d10918821f74736816044366463df7cb3ac94cea167cf1daf2d1842f130295e40bad672a22da9238ded69e241395f04d5e3c3875b8294faafbd3d90ed56ff3e01c5a0a3e349d761273143686aa26d408620c7d1a35ccc430a09e3f750d3256298c6068c0fdded270f308f79d2fcba591d723ac0cef703d8f0e7c051bae5b453abbadfab98bcc297ed4201b03ebc195c2e441cfd3b10c63c08868db36c320707ecd6a37593661d70a81f30e6db4a32f98e4fe6b950ace55923631c8f95138781fa2af78d8104fe39242f1fff6942e8e782dfa0d37c863caff9492f8e5cb70046d207c4630cc29c20e1ac105aef093261d8d335456961e552ab14d107cbe14e9de912f0e5d58d16b729270208204469f917af4e710123c3bc38a4b3f485f2926f058344db105b9239829441a2d8ababf04aea615c0e350846d9bc3b5faecdbeb450f38f615f119ad1b5dc748e88107ec2fae01f0915174feec37b3e7248ed2699d0a5fb2fc785f17d6275fbea867aad815acc8a6fd3ca4ea7357d197e5a30082ad5f35a9d894c0aebb206c6487163c9cc20442c040e6aab33d7b4b221e4ba4cbabd975836e353129559d8ddcb3c97876cdba360da0e0c1dd5b0cff7957a444027db985ebefb6154453a221076c997d3954b347f49308d2ee14d1676b75ab6ef365f3de54aaf398fd96b9040253813ba734829bc78a6db59e3f1c0ab4c878a72d6b8681157919130fd3171126994dcdcdcf68955ad64af8156702c92f7a715ce6f7ddfb70f60e80c92691efbfdebc8cae252108fb6c0010d303d9027d4a5e63413b5fb2316d32fb93c3ea52a2a7df50cc0058c76c58d73f5bb041d9fb9f3c3cda9bee0c0920079ce4f1ef8698ced664ce2e2b3b86027ae2b3bcbbae5bf7ea3693d9429cf94938dd3a2763d3f53937c46763ffee6579d018358bc69182b1c7158a09b18352ea618c11c45f07fe97cb65faca535f43237879ae3e0a31efd14679daf8fd2ce25eb8f32218fa20afc586a98fd908d3fd804cabbf56dcae272328011b252dfd83e5f0a5fdebc6acb04c5540255e1322de5fce9db5aa4cdccd74dde8990ae51cefd6c1edc1879971d3efb1f94dc41b2b23e9c9d89415b46189914a229b2f3e8b05ff78c68711385a00e9534dae6f79d15842aaec575e4ee0f098028bc74016cd3f8e93c6a0cb21a0b574ee63e367343ca9de28003d76e02d0ee2b8d622cfa3615d3628fd02499eb7bd8c1aa1f34edd9c2d059c6a7c7c978a5e4f60801e03e17c3a09793c5217f310a30db1965b8e328893cef20f4a899aa8d9fa28f7fe0a733813ed7466046776a874273ecfb57158483f4a588ad4f232adec5ba4ea651822780596de09fd54b1717bf04130619979a0e3d12ab7c35d64afb8099a1d21bc952653742f50c8e1c244d10374329cedd27fbefd37815a9b3112a4cb2fc587c4ebda381b2b01fced45cdf0b9ff8ca7d10b65ce42e728de183a82e369486a2e3345664e70674a5dac174d6616d90de8e472b62759df057119875483cfbfb103041751747f9cd12bb31e91caf79eb2db1168026a4707dc618f30", + "e45eef9561f3acb3672b4f38570256e8cc4d877e2998e72b022e33de8fc20f7320fe0882f2b53559e084923786e8205336a7d15f3fb88a41e7bd20767f2feaa02df2221fa7577988db0bbf61f3dfb429868688c53e130725d0279c505686f083", + "475a44cde0cc931edf9a44b0c1e0001766f09ade023dfe6b59a6af800e549b55", + "7812a320691ca8442767a51a", + "eaa577bd67fe79ce4586f43355c94528e306c1678946e4f7a907d2a8ee7f4281270502522119a8b09b6f05d864921cb515fddf6a1000fc2f67b52d0627998591e2acf5b6faf71c278e5754b2703662ce670dd049da8d6e280c2b84d6a9b29ce28980563c40e03381a49c54608b72faec9b272ef05cfa41957d9eaf3e944b22610c725d8efea90aaac6e782848d368ffc08784d7fe37ea1effbbbb34952def29fc511fb10a1282bb0b6334328e4d00529a44de3259b522553a07d524dc75f431cc9670127c15670c0df419826617cfb5ebdd8788d5f528a9eb1e61324eac5c1746f339aae2e2e2fae598642a389da671482128acf2d69814258d83de98f186468136868b729aa5f0874fef2ff2575a1f87439d64e049e4d0637e9c99ecb7275417af654541306615f30b75a6caaa563e4790dfb28fe9f0e7881ea2d885eefdba99efa7f878925ce7d33e86d888154a1b03189429fe20af8fa3a68d65ced9b690a709031121425cfcd7e1890ed9614f9dc3ecbd0e38c6c84e453e3204978ddc1ef8d7fc6cae28c61a472d8e089e23209f0c36e80c994af771e6505e72ba90e5543f6bad6dcd31fdd468b13533a0254e44797825764ac1f63747d8d6ca019ff16fa732068ee94be382c46b168050ba725379df31a98ab81ec8eb266a3c3f2e1cd95e5f12b3bc79b8b435e4d94098c6184631cec57e9d8913458889223a2a4541f34d2f9df380f34c3e541fc587f0a6cf08c82e99476060eb84709a292f4c7a8551bda3a9eb6735787dbb9d7f1e83937c2e0e49f2cf6e0ab0ad84c40fbafc3c7e61886a8629bea816972fa0afd0f617b6340b1af19e341875e97565c8eb0b25fcf68696ee674d2abdc29396bfd0f282543d2b72a239c6470f76d3b5bff6d1d064e6e2d06f9deef2aae8a259c034373efc820f9a2fdbce36cc27f35dd6386de3b49509d0c305757257f8674d958c580a09e768c0f6ef237416fd53c31511badb2e7cdfee636508482f01899e72052b46b5d844799cf94708520178cfec2b61c8980fa7dfaad8915b0b75ce6eb57ed4a01edcb4a35c1dfcdf8d60f3191bbcdfd522a0e321ea41c2cd87a303522d0f98b82dcbe53232ecbf0e2528de7e1be75569584bf2ec574687fde67ffe9827ebbe78f2e5bc4fb368f3c9b0f588c97f7a139bd82fe86eb605b8e29cee75d07b510da1b24fd62cd2fb366f1621e7dbf268b15937f7f7ea4acf6e615775a32c90733769996dd2c5aebe08ecba73e0bc4781d33971992b2764c1b08aa972859cb61b003406479423254a01ea85a348ef249d408157cc0962d1e24cd9c426e6e6a3784dec6fe935be1f6730b01e8683d97e21d8774b2e2655f85db7149e930a44524d4f86004cd687d8a528b6ceadd890707458cab62809110ee28f61a7277ed79dc41e573fd4a59fabf15393ed4c21bf4d5138ac843e80bbf5e1c39ac2d7f2147f35996eb51a9e835db63faaa196b8aef1823ad72523fbfcb35b5560582a48a25ab770e7528e4b3ef291e6f62f5fac916e2162b3b56304287e46839858daf322b0de083d1691d6bda44d66d085ef0d0ad364eebacdd0a43a4456035e58910d0b2dacce45b1c0beabc784f3620a3e4390c345df6117b86d4fc386523b7ceeaecc21233a2865ec6b63bffba6689fb3323402119db8f0665a4730b2e26ca6411db04f1bcc78ce6272159ed2665a286f1ad7758d6d90090a6fd320e697dafbdfef575077e282b825bd64a4dbcf92d1fc0c6f795154e8466ee4b318f2d44b6f81c52523ab68ff8367e01090c2623e00b4008e784049df873a35c29e0abcfae7acbf27236adba0b913d19a15b4af4996669aba4c656c317084347ca962ac8df15cd2f849f522016eb92de4de62944b917d88200ef9aa2def0d13e5f4ae09d2eb4a2d0800af1d704cb01975f6d59768a2b50e39e78116147fd6dcdfbc08354c1b4033bf6772fa127856a4072556a9f07bd7516d01ef41bcb519005c0a3b2a04400427ec033f1b52fe5fdc1aed8e2521fd0fff663e203defc39d7546281a98a502b8a470af16cc62a6581c9985d7ca516864b799fcc55a803ce80711484f6b81591d2402bb1499c95dfb1dee9846679c22853be87c84b4547138dc4fd46b4e79ad12773a5392540a595954112f0cb1d9be4d4eb3aaa4286b6c01520558d58587d9d7f0df3a0282011ce01c9c17111d10ad61b3675b1826c1ad37fc562bdde951b43f890555d6f74ac4fbdb9abbe8bc1e80bb6d52c13de8960a3ff8f65201265e82981dbe39e0d65cf3f1fb6c56e11f9786210383d0150a5e0cbbdb52ca8b2bc45c12fb572657380df369082685b3de9847d5014beaeef815d63e203cc911061eb53d89a312d187f9f02760bfa71083fb643f5d8c324c410070b7ebde250a185e7359837899bb1568a43fa3418f39c12feb03b148b924bfb98b99352b1fbad3f07ac8e4302f85d1fe9ee4bf7507972670ff8beca105cdeb037f1cc4f944d6ca869d0281653de5ee93a7362420fdba8b01a375ff08fe27873655953ec1c00f53613c6ab8b244e2fc1b6babdca5311428d06f57aa4882dc870165deff75ba877dd2a04d1799f26ebfac97a1be53a83ab77dbc2cd4aa45bd779f61b1283eae1a1866ec8a9c150dd0a4deceb2ddea1bc0f4206cd435600a8f190b999b952337d9eb2bdeb3aba2cb2e7000319056629dc1f00901f0880278509417223a3ea0919fcdcf12bff0771c7cc725bdca292068478ccb2e1f35ae8964e0601789a73e7e7c1769ba53f865910fc3d0085c922d7f7849d27b6e7503d521371351f9d7dfd5afc5df0effdf6ac49617fa228501ad72154a73e07781dc4b07765dbfa721d95cf1dc41e161cbd34fc7883a25e3ba6b03e504b2c3b98c8b12ff629b965c2aefc26d74faff7f784baf09c3fc38c487a9d1f5818261162f97e9dff70cf42eb5dbcd7bebb66d68f26d917ddf2a3efc0db1e3372b170b4cd18da507e44c467943f73648dba74db1053b53f989e481c3054bac22c6342fca2c26d30a859a1312e9c353bf921f68136de2b1589747bc765153927c31ebe749dcdff98b5da84c4b66085451b4c87fe1ba2142f98636bcb268c33f7b8c2b96a6525298814578377aa189dd73d5bb27ec5cd2110d8751c18a3110273df2595d4c3a00809bdeda70d86c4a8169b7010c9cdeabfbc3dd3266518226d0ade9bcc4825f18198c854de329fb8fe456dd3bf35d89bd9d2384f3f3282f6872351a18a2f852bf173ea4426de6d01b3ef4b4685aa82df7dc45b99617a8b8c8a0c65a2237b3eaae8267e1f6c453f485432529d973924a080f6a1cc2cc18f804f53209383ce3601ad9361afc331707be1c88b4370404cb7fe0bc538df04adc5c8d9ced94b4c474b19619a53dca3fddb434cac09ce10c0293fea04e8e1b19fd3ff3d174baa988d91cb604fadc59ac0b61f4f87bfd07eee20f7f3ffd96766dd6f3555cd48da7ecd71d2fef34ab082678bfc4dd007669b3fc7a937a5a46269baa7e4e4e43eff1b2b847ea70b6c6c23905d6fb2fbccd944251087ac00c35c2eedba30641797d36ef9d3cb1afc0e3e8930f5b605a847ee77106995bd44047294d04350194369c5a7bf246d1108e1d18d9a638be0c051f695ce86579db613cd8922e86c683c91800b9a34fe6339e0dd79472daa662f78f04f0151a3acd18f11faa4e1216222843b521fb998c8490ab8bab27fde36395b456501307d07b484b453b189fa339282a634af30fea99c9af8f877e61871fe743238b2cee6cb69dbd17d574b5106ebe4b0fde4ef42fab469a5ba7d62c23b67d857f1af6ac981c320db70cdbb6be41bbca60bb7a159ee1c85cb82e0a220064359c06c660b75de6b49839eea68c80283b75d9d627aa4500c0c0f21edafe4a2cf7ee079d5310479da06ba58b142614fe69cb236c51447d63db31cdff91485b46325c26d40dc6d608d46a5e2fb01df06064a022ddf6d5cce0147d5b2a5aba5f9fadc5e778010a924e00a13e21daeea2cd330f45536ef4f42c2e77be00bb53b3f9a93d3eb327dbf30baccee5d26849cfad654ff3ef2b035b78dd3ef42de3302e5514551a968a205b823dffb040ac9452ae3efb43219b02436d0761ca11470405510e534d56caeaacc40eaf9c47a39475adad266f5ddc813e71223800dd46fa7c02b078353f870049806ed7ba57b40b7c3c6272296667500c4b97dd2d7026698b6bc4985bc01be99e0097013a2632c71740888ffaf902a02bf644b38cf9a42528880d9dd142de967cc2ad3e1f1737f0cb8dc5c59c252496e8cfe4e53c82f4a28d9ba2bfa62b6415ba3e5e09040d7f3e3abfeba53e46575e8817ac5eca806ec8a84c7cf77c9fa86c9dd2940f5b96b25a92d4a8f894d4717c8f80a62a35a51d8511f1e822fd79e6fc27cc3f3097d9e3272447de6f223971657ded9e660ee4f8836359742ce7616fd0ca2de6656c71b212b34b8edc71ff36bc84ac4af58eb1adcba4b2c0cb31468dbd2c2b7ee6752981ee1d152c4e4a9b25b2ce87796820def34b662381806d2e4fc77f0b69d7a87de43d94d62a6a6526a7f8c588392890e96f9c51bb58b4f438eb5d197477ce9b160d1c898c89ab408b3c1d648be93b531a5bb4988592c5a8999ae3acbe586d947fe6dd507cddb92dff4974ae17ab99aad5aec9d07b96bd29489876f51afa67570e86b69321d9e565d86001514638403f86666dbf93f18e0a62bf65db333bb85a3ae12d8411aa3c2a423a29bacbbfeebb8a5bafd90436bfded16f992232360211086a3084d9fd1980dd96631820a2cf25c3ac5c19d164cf5ab9a852399491962100ca4fd640146b7ea5460b4fb9e46bf8d23d508a4eeb8a3e9fad8249ece3648c2ec7705a7414eb8e8d602549204cb437f589161fe40de1447d14efa4d738b775d0333526c845cef5ffcbaf5c957df1d8022176b56eeb198e7ad2dfc3d7ea46b125ed432cd04c77efc011a2dad8573345080d7c3cdf5cc160fbc86c4ee1959ee1b8258056b0f3d9343c22dbb2f7858c5f162f08cffdca1acc866aa68e5f1c00b74f66544e8a61e429335adf6f73e32fa87e48e1adf15bb6c7aeacc93713dbc31cdccc9b0e52f922842679494039c395cc1d95eb97ae4df3bb8aba9a2584d97a236f87cb22f00c0a078b045044a5c456e22b2b94a76a559de2672c880660f9785b76bcc2aaed780e05212415c6e73880ca110654ed155a1004af45d5f15ae8e5bfd4817440c5d3d5589eea2c6c344ca0d85d91460638b37f877ea4cbbed35ea75678ef2335a5922cc8541987cc256c8f58045028d33a1c4899cc32265c619ac782ff998a478996be6a0c5b102a664831b395a884f18e77885d860d6b236c52a8066d2ced25432bce79a31b23117f405ef4ebdf3517de98d288f8c3baf04b63b6817c46c14b646308e9f97170b7dbbf9d1a36480338d8eb7466df56feb6baef42cba75512954fd7e33961d247b7393726e46c6e94e156d5776a89ad3e288554470ca0bc4cf4d2d2b0c01ae4fcafcb65ccd6ead03df1d4d6577bb", + }, + { + "228eabb5ad8b4ff13b10d13b27372bc2152dff149859ba47d9c89b741d4a5340d8fff5858a4576c55547007d7e2b3f94583ea8f0976237712bd2e5481c3988f5387e7ac2c3f18718388795b7b2d44b0a13f3faaa55311b800301c9203a511572cf8f349280bbabb9424070f415bbfe28aef8d20329ee842cef4d4c299e619b6ef1cf00718aab2accec9ac00155be2903b6fb07dfe98b0bd8d8580176b99ce4aa6be51cf59046c17ce1817d363fa63af5a241d48bcce064a438651af102ff9c6de4b86374fe24f1dfa66e16e51550dbb791af425d8fa601c70c1bb90e1a557bfe0dde730b0364eba9d2018ee751699ee219e13fa8874070935b29a1767e1d748bfbe796fe4b81a71e823605d39fa4b5b885f4610c34d1a090fa4106785e7a035a629958ad1b00cb9d36d171d575268efa1bef064fc0a6dfbae8e532466035a0c2cef96fe9f93b872f0cf804811e927b39818189412868fb104e2d56ae62f77031f0df1ae91aa11826991ca7b8af22f130a47a72cce36ddc319b32dffd294f2e192e490249ea1a6f8437173ce6392d16dda888a98bf685bc91b89b8ee1eabdfb1806fd61f018d1744fe8b03521de4bff86d4a811ca2ecd5be668e9c752a6c26aacc0cc9dd89d112785c25ca6a0a7a5267b4e37457c04a0626c8a29be30ec28ddacf47a84918bab164d07bdedae62132ab04a6f2c4e108eba9ab878caa4a1a7509521d427ad7f3dfa86fae8345dfb5e0d46ce3a94dec84f7880c7422468ea74fe0b4825b8c762b34d5d9b82ba96e0c7dcae01718ccac0044a87476ff031e3ee3c2c13f5f375a841d243c38cd9a354b6525527de1fe7e36a6e2ad95e5bbc4c97e85f8cdcd5341da777e03451838807d5dd2eb4fd15976783c140e21cfc2eb3e58e40c16374de0aecbe3e3d41c64417a472cba18762080a2348ec3f441bf229a932ea0ca7c816938655d0c81b14dfbf86aa600d0c68172fb0046ef51f601ec89309d43ad1eacd583f9d205bb1ff1a37a97b44b5e35be4945f52897eb2a74645b01a7f82054cda44e9fa9f9af9bad1a235155718713bacd08d354f3fdd95858db0040fb551e9f93ae399d5dc53a67e88bcd5a02d104dfd9d824cdd5fe262ed9266fc47b7e640f2c9d9c7a62c6d24b429fa55560aa254a824a0858482e771144d6d5b05539cf71d75bec3a22be75655e1ababec4dff9472a019f6220067374dd49252282e4945a407084633ef9c88d14833bd95335107d36afdf56a642cb739bf0a61ed53a6915baed78e9d74166ebc492b517c7c594fe6564550bb7108f43012551e65fbafc0a9874e46fb64b5b7aee0082a5d617a43b8bf9473309c6761aebc7f13b72ed460b522a6b0875b67353c705f99d1d9dc899870fcc90c632aba1fa9ced6d7a2368dc4dd3d4b38a5807415e00de6b9ea70525a6c1b67d04521efeeefc6c591fc5256d990a1123522864a029430bb7ea00dd80d283fdd6d61cc5b509221e28f73386803d97a38fb0182fd95b3b91353c6eb60ef2b3d5c8c0ab8dc9cd9be2b4cf69450d00e88cb0f0bc9a4be82b71148a37237ceaf945ab94c365625f58171eb15c1bb244a87335550d813d28f241a3296520046e65aff3291555786d7c871ec8a2d10d4b44429041c3cd6ab60f0def742de3d28393c5aca92b150697ac15504ee66d8a2aa01a6c63d7c719d6d4f94af2ed1d8670e3231a0e481095e425e6231c43ad36e3b7a3478f6a61563f5aa13237beb8a891dbb29013c325f7f91c1b055fb83c436fdf8aef49ec457946e6ab7e955427373fd9c743acfd4b9609569b591ec79c7ea7276de103a35a4a8a05c91f59e04689ba1ddd570b18ed046f785d7e4ff9fce7115ac814fe126f781828877208ddfbb2ebc919e6d1f6eb417f38bfbf22ac9633f75e58e560b85d88d0e4fad9b2e68c9ebf9675819d50c30c8982bbbc2f41e02690390bf0e16979b24e648bf15b18800aaef58c3c465f38cfd1e47bf1266c17b69523b7868d2138cb95c4bce0dd3ceb7c2267b868b6e12888d5a489fc0091b295b56a1c328b54fe1119aaf1e6d7dd52fa450b52fbfc8b84c2200ebe209060b655cad288562786673121691809366af37b76567762d1fc24f1fad3128b43c8d10e9b6954b2efcbe40124fc0a5b670dd6dd544e30263a551825282aa06be3817a8eeacf31ca8b25cba011d60b78d3d2462810764e4acb566ff371005f5481c9d36c991527143af2c44cc8cfc59c920bb4a281f2ed4d494d30ba4d900edf59e23be2f763072255cb6f1e8b24ab1d305fbfb2429cff8bda303617c034e71a17230d0e860420dbcf9fea4ab48557e4d50797179496936ec6c97686fe6d9115809e14069244d251d4bc9c8931e47e06ec051e709ba1df526b55d959b37a6f3408833aaac80cfc9cb99915eb7d83e26998f0da2492b986fe0f5047b2cab6e6d33a117df21e6a8ec7f394a3712885dab176a4d6095e5cf75dbd3f0077e5e74b1ff8b902072380cf172562884de852ff5f07c55856224fb3df8eb44764ab9284944b86ab6f176a863cdd0e7ab5616a14692f6cbf41bc63113b27689fc2fb145736aaf2a5b26d2bef3a2a59ef8bb3f3e4d360a4251d0736482e9ed7e189fc48c0973b6649988228c2ac72b23826a61cfa06b11f13c8555be6e433d87e20113eb74c94f0e51719a7b38c59eba300089d06b9bc2a72017668e5aa3153ca4282718f1762642e7c1be1f865cd9b65c6387c8fe496f1e60d5acbb78c2f71cea1f35dc955b1e7d1cdc9ca339765995d9e05dd729cdf58aa2a1451b633c374e5b6c2af1c8486ee4250a875e80e1f359c15130eb1e2575c0c7badb2af61378527fa24347ebb12c10bbb36e3c94619556b2c641d0ebb691b2706cdd667f55b8fff8fb46e3ac72f3682661a4bac2391075ff5145eb07d69d77437adec2d096c1c89208ab3e7a9ea6a0ff4a5bc1846b3683bd7c6ec4520c3c95861a5856b0191e4221c9819c67273c66729728f6035e79c0dae8842df4c0c27ada1ad18b34efcd55b94ef120762e87e8c5afdec80d5788e83f0d1533cdd7aea8f27f33266e007b274f6d48c59bcfad607e8b298be2b17322be88558c60033452826778f167f318b660607bfb2f285cadb385399636acb8f5350d819511b5e7931c5f8483529d3ab3fdb5ae2dde0ada918f1327c6c0dfbbf5ed3c8afef171910dd0169022b3cad5b08084dd5e8eb8ef1ecb17e48bf69f80e3db0ae1cc7b73d94b89696e3c3443ecb4c7ca12568201744d1858d90ff759f2d264d49edf47772bd0e0990c14dcf8c8a4c2dafa44dc6e92f4c66b03bdc4f68f28ca2d0811a433e184cced99a8e5614ca83c46ec18b47e0c7ae91037ae06c6d6d0f3dee19711c21cddafb5869416d23c5219296acda7774891877f3f8d46155d39f43ed10500ede3afa26943b83b800b54a9752250ec6ae173e920002f365d692a9b3a2f9b27124ac97b8e81b70e8c0bb7022d07ee97e962810962b03fc019695b5399f77aab414327cfc5dedd51e99453179c42ae85a42f8e06e0cec6f937224dd019c77c5a0ba32ad08107216a9c758138b730bd5b5f4b613f192839514a8621634d9dbd5840e728c1ef4a2c8bbfadc376dd80d13dcb327ce55ab536a43b570789f5c5e135ac0af79b54232613d0e989ae695aeb358c671ae71d508b58a793e19c58c3d204cdc9a021ecc634bcb0bd6a1917554ea3bd688adab8163260a914fc01d7ce05a497a5c5836cf9401cb6aa35cd008470bdecfb97a511c905badd01bbb4d0c05867661debd2162beeccd52399d5a70a929405293916f33ed0d03f8b850f4bdd77b1fb6283118d71de629577383c81cad086f4099ce7476cb787f73c96431a0df4156f7826fce9045f7e7c97bbfd618b845595203cdc8df4638430fac74a07bc5f773486731d8ad29c06695704cbe2882077a85d543551b7ba81b181ccb93d2b3071b1a38f3c762b42df8246aa64cecbdc772830ac79e766fa99e8c65225f28297a32526df9b51227bd368253737f013ae18435a912bc18cc4a95216ce449865e8bd8bc759dce9d4af52f9e789eafa37023e91946952202dfb7243cab7db2f9f98bb66f19750c547a2bf2e2ba92862ab66f33fcf465ffc41d23f0b891a3b28b3f68ea48dde6ad4802902abd22b0d7d9101bd61471c5d88ee9d9477b7cf9f6ac52e0f520c79278da22938745446f1e647ae478ecba416b941aa31f979d0633efe72910bebb8988de1d0013616f31c5da163eb6c07022649ac57422627a5642618f53103adc9918f9992c5b085e10d2744f9934bfbb994a710d6cd387c325e94278f97d5582864f1bb29a1400aaf674ea8fb99a3b42e4ac50418fd804a5b1471eaac4642d4aa338fd3d5d0dd84372b2c32c5cfe7f319acf731a9787b048cedee3833300dde639cb1386c8fbca4bae8d67fb7bd72d1696a0212e27e166e6b04a79e34b47c98502ed0bdbd8d61777537f72df569fe5ed30071b57e8724e98ccb88c07f0458cf32298cefb6ed672b255e581ac756789b57e950d57174bffd3f47bdbe4b168e7e3f1a6df508d4202d327947facfbf9526a9e5fc1a5abb179902d4584deae6cb2900391e080d3f3540b87c3a873ccfaee5b4aaff0e6516a867ea00b4d5e680fee6b91defc65c240614a1409bdd0f49c2c4f3c1d258d77abfc17a749660f49547adb236730e5a7a22fbbabdd8ca079a8efa5b605332db12f455868ab67a1ffd27d1339bdf8d150189cfbf6199c6fc27c05788138a63267eb8ac086e27286b4ef99ee9d92cfedab5ce9916675f128f206a1733f47a597232067aa12da20c7b9cab6575d7634f8c31e9a29948b528681f3f9c13b9f585ebfbff8c28a299a43e4409b31b6c02a79eeb493734fe5f9c1d9e3830572eb54229b5cf525768f695acff48c76b4a6e0936b7406ab69f06d33d3f04946db9d7966ea6e8c50ede5abadda28149edef5223a6938d5c32933070d234043feddbd65c81be218f9d7c497a1ecac30bb9162e60a9bbbcdb4fec4b212050610e2b376aadf58b3c9207860d2650d0310ae6606a8f1b266b6a13b68c3306ed413224abdf19371bac3ea1b964f28996fc70f666ff118c6a7c9f2108d327f5145919c03832f754de35f5979ae72130e39126499037d6fbb3751cbb4843b05d9dc91dd5fc1429da491f72e3069313ea243933b47109af247fcbe0c70f9024ac5a41815655ab309fcaa282d03596ba59cfee0e40f7bd657689453e98d562442fa4c585f970b6983a581b0b8eb1c5e780b3f5c1abb326213c6b5fd440c2187066ddf55f4eabf88804139392c45979440c6f05b7222bd95e963832d7fa4a4760273cc075e8b8feeccb917e8feaf7d3f766d9ae880487e69bc01872ba62b91b8af5dbffdd93fdc95e8f47ed793fc070a5991f2e9ea61439662dab218f643c1959171937aa160008a548f51f87b58f2c4fae5aed556f26bb9cd1dc2b3518458e2f5ec5d974c6e11a0ed639958cc8c1db771cc8cc8bee8727bf6452f47c9782acf548856a0e67841c3dbdb1c98572a4fc8e6cc8195a504019b4930d302a90dc20d8628ae6c90e0206cbb3d05025744db4e115cd3b650e5519a1624acbf226ebca8875b05183b2584e65289f8b9cec3f7d010cb9671a0e80bb70ca8763f1722d79e8decb6b9023baf64b5981e745c06546cc1e", + "ade72c2ea29cf829ffe99c2d63840b2eef9b51a9919c02128347d2e88e9f063b86326928cf6252ce4beefbae7206dc61a22d0b33c90d464d551835e3b73c1e3d6e88663deab80c35a607e4180ec079b0ee84e3b7922904e7423acaf976e837", + "43348cf32211d7daa300de8a4218543c8e3c7373ad10950765c39760f80b733c", + "e4709d225a552e90fb357413", + "562050bfb40451f27b1181c389508550a0f46b53d14ca73143da9dae3d3d2b466e9618db39e3219675d2b6eadded7dd9c741d7c9bf3c5619a521189607acbcf6b3964d469d966fa134444aa06d80749c873f0f976e0c5efc5be8d00a2729f03eda6a7b8630575df8b3a19388ff88daf0d00bb3e7c35a525ded90a4511ce815fe6c8904406cf72d7bfa14ca533566f7b54268835285c5402e22a63f98b5d90c86dae0a76d65eacc1ba85b3f5a1499d5f3432dd5455fab9e8bfbd266e99283c2bddf9b556410956b2f061603d1fc91194766f90da841699ba7da3d53ed5abdd8e98034f8fe734446d92b458a731aa4c578552ec1ac5d1baaccc4153a67b48a290602d5f955d61a08436b27cfb0786a80afef76e1266310a42d90feeb3bcc40ae5c4506432dcc92f7e5758ceaf277255401f5c5f4b10df93a249e38edd9effe7bacdf7fecc451d3b2cea77c9bab0403450c41929775b8c0ace46f6928f4d9cf3adf86832d298ea32b236d3201464e2ff506ef01da0e1e389e26e2b3ddc553b369b48d1aa5dd43edd5cab065e276aeff72a4c43206063fc7eea3bcc783ba2221f5b615a7a43a75cecda6bca5aa159e9208bf66af61e2e465c2daee630c4c62077ea6ef0e8b4b4e272d4e93a5f5284f9da463e1a60f815a8a31698ecdc09dff2b62f00e37aea5fd4b07a110cef27e12466c1814d3b10017cb9b8e12f2f38f10cbe31296de2570d5662b16639fcdc05db81e0d48178d055ef873501148d00903ec771400fa4873c5579dc3265028f531538f6dab1e5607a15c8b90cbfa4835107cba6f453bbdc71d08c7e423f58b44be38a9c8a610469f2551ee6177edf639cde35fe8e02f76b7ed106d691a876a4fda3b42d8ace3e0d3d4e026206c5d7d4d56fdda9dcd30fd7b74217fab3c617903f1aeffb8363443ed128af94c391810e327704d6f655e57dece97658d41e074029823850ddf7c5937af41c64465046d8544bba65c691ac69121bd272107f7eef8cfdb6a25da5da16d1033cede09129d51f6abfe63905a6fba9a64d7832fa35825447150595a60163af848eea878fb31a5fb97b1859efbfcc8586eebce8cfe64386461a9b88aa5efc1db43c64dfd5d4a45aa74803fd178f9e16a3f59acfb6e13a564d645cedd73890d0a82fb6dffeef527694a7cf2a89aed9750c3675a67505bff77de8d046087bd39a85c90aedb085e99baf04c7e3bf92e350b332da1b8af85550a00d68904ca426da61add864496d6ff442bb0b848e9aa463bb0c2085cff1a83a47d6f702bd184cfb5c139752754c8978d27b58d364bd88722b9097ee3a6ae28eabb14ca7c31e40461101e92448dbbc63b55cfe56efd078d0058c5e6146c73bcd949c4b3ec9f881b9a5f7b41ca83301261e0c674f2d35d96761baa00ce0675c082bf73dc52dc726a3e605067569a372d2bb47fc8fe1e74f00078ce6f352a6d9d97fd2834670ba3a45aa6751eafc7ed6694e1e07542860c8ea516f296ee901a3ee16b00b40419c74bf6db12c7230325e85a918f412bc2f6469c1a13a5aa77f028e327749efd05b91053f49d9f1edf49aa552c58c68257233a168db60ac55b4086ddaea275b078869cda7b69493c4b371b4e9c8361357a7ac7d3d3bbb464c960addfa8df2b208b21b090d540c440241598212d33273203d484e0930e22469c2a8e866579a4a2b3db8f8344dbf8baa1b97be0c4d976f6aaf14cc09ec52630139b894b2b6f4dad3a205a7b286253f1522b1d6e43bfa37beaf06f831c6f0945cefb2593b9b298da13b0d910582086c5d7e256ed4067bfb476dbe01bcddb437d46ba716d6ace2ff9912c8e460ad33ab3d8f97b7b08dd4ba9e01968d1949ff85b4b9d5b8da291fc0f90ab1eab1d246f67d76092b7a37528ceb388dd76f8a8f0aabb7490f02a2c8bc6498cb26350d859c466dd611bf0ceb81a8b7899c67742c22697ccee21c4963acb003d15c1a2078112bab05595917584e417db3872a0ff0a29138bbca7314449b19827525340370d7e48fdf9f7c6b4a280e78d00775a291081a5e78e7a00ff915015dd5af5f0a45690baba8b1b503bf85f326c23136f4424be4a559aed03fbc81400ac27a33dadb2155d1704950d98043dcd86df1eee78f3f266c4d14deb8126708f74b59aa15e8b497c6a52924a473f999aaf0abd3d148fee8503a1568efec7bfb0bd463402f563e4019cc9c9e1eb498aa54dcb659f43b86df0a34de4e51ec558bbbade3d69511d3fea2baf44f67e85ada7398d7f72ecadcd9e981f82b0743ed74bd33088ba4cbc85b0c99dc5382c599706dd2d51aa9f470c25a98e7e8248dec216a155495630662bf6ba0b7a4baa2cdad30e9ce3e1a65e3c23d69d5f946606ee8504dd70830aa5a8ddd84f10e064695469727d2efeb46186c9d3b7a170057636f05b9ec4c2de7d935fba504a1e7eddf7a5a95226b253b0b9eccec976ca3c57599850db40c27a51ae755c1f30d392467cb74e5c8235861d11d0f8461b0e1d84f5718d64ea92da62f4de184a6499dba473e82b3d197305de0e494f118a263237c7b4c0652327977edb427ccded35552c00a5804b9557ccf2bca2484d9da2c33f6c1bbf2c666ea10b4644a21e3905e5c4eb417ac3572e783428d23dd7222e75c356b99e8183d033034e29e618c90e66ec2f1e9fca47d82c1cffda8ad14c96045159d9437e91ecef41d24cff89009ff57e18c1a422860aa9cd31dd2a85b07422c72a5decc614a9742e62a4988f394421b6918e51c2412d749bb53b1e8fed7b2ef0873ffe14fa77bc366bbd5fa1432be465f5e25266c6c12b55df1f19b1a491acfc5c9019f122c422243d751d8eaa8ff721397915171556e999b34425f7d3ad6f6c3323b8133b4618c65ac16cb5941edc979472734bdccafc73c08939c0b1e306ae3015faa9cfa09ed6560269a1dc54c2c046a12a178144f4381f7b6fd3fd2d28f778d444d9f7a0dae00ea96c6969b78ef326a962d23275f1518f0e6a2469440612f3710b53538fe99a6179471be8c5b2d682ab3e9a5126e41ed6de000cd9e92fec3974e0f4cb2d2245d03d6ee80d6a793b16efa829d75c796f34d4e918250f457703559bb48ff78f0896be1bda403b7f1fd6a319d68478ff70d88238f2b8afc7d20e51757bb9db3bffb35a8040fc0db913c4f03d48619af7fd24cb8986b3e139058be3cc253b3de9b3bb3f8dab7b8818638279b2e6a0c29cfe16fa7250d3c74362ffa07e2977cf562140fe28afba8f61d81f7c73bdd4a2faddb00752bb049d0a57d05c6475c7387e6716ee31974169930c9fd830cef138659cf56f2212de185186c3d683fc6b7fd36e7821f69d0de041a569765066dc4a1934870a7b80f174e8f9e484942e62404a42b21658467873865ef94fc262c231527f39e82dfec91215947b99567daf75c6a28073ee4e67d4307e4b35b46f85433abd9812f35438b34598ff3b6dbd60b60747ad64565391df45ac80b272d0141702ab807fa27c6a6ba2f42c3facfae0c773940cb2943bb1353b41298258bc0d07542b69483e17ab9ce709e4160b80a0968dae9af8fc7c0324c753ca4a11a6df32dfa79a87b445c988154bb3c503e6884cf6d8f5e062a16b4ff230fbda109a6127d35e3bf2b29bfd3b18ba275af773b1981d603300035e046ef023d51874aa105d136bfcc9c7323bd0513a6b2b397ffea71afb7a8d4695411d86164917099eef504f6cff3c5cefb88f23f56c4ae3e2b09a3f353fa55630f45f06c29e8912e8c3c4f493f25eda781680585580595bba43dca9cfd400d9eaf5081d2c6697da59e012dfd0b875336b88fe16609c2e9876737b9afb868ed52417ed0c6b359d582d585ff82d98edd4e63c6b65cf43d4f69eee2af4819157b8a433966953862d1ff2c6d0cba382644a1b0033ddb7be3d1fa9a204042d7b821b293bd659dca980c108ad1db740800b9bd2fc1a163f9b4066f7604f160a7910bd947cb48ce6c81e680fc6571ff0cd12a3ded9c8cd560970ca5cb480a70a8322d5072edcd257604eba8dcf55f9ec97ea2b14fdcc72fbf615131836fb14e42b8d7171d0a06d2fb3caec2e0759e86b0d8f21e312d9211ed7fe0b48669934ffb892baf1db9aa457c07820723e5446420334bf6479f2099e01ef8adf273adfdd9ed0b741931284515d69c211cc2efead8339e450b13be71b35c36c1f00c2b8ed0cfa9792e422912e14b5b1455ef6abdbbec0035480c6cb69d21321d12ee19d528dd48f43b142cf0502eae5304ce52b7fb827552db9ab885b93e83d56a33346135aef11b7e48efca7cd52e2499a7edab0bd0562862187ff4599b2446bff11c37181092fbb05d0e05220ca6bc37f529d6599e8c29acb9f25616c27df291d4fb07430188e6470df7002f73cfe5fe6907dab0b4f90bb58130fe90241c29c6063a22c9f45d032b282eb92c93736692bd5cbde2a17552e942b595b08e6ba0c91a03b9079e9117fbba8f26ce6c5d0500c69bb6e22e3562a50baece49109c2d42b6714250665afd0f0a7e951182012f21aef4b917cd434d9ca22661437608e32666497516be34652500def6c28ef8f56f2273de5416142ce9606faf7df92ab779ed6aa74cb99bb1bfe758ffd344e1d31f479807326d1a7b98f6811e275545d69198707b0fbf027dc6a5e4815d62ef191535569a452c27c4e25ecf139df949d70dd5935bddc04f33b2f0bcf5073c51fc51c15067963a20569b5659f0e7413b347d6d5ee38a92b7e6e656c199149f07ebafe5281db6b1b2ecd9e0384b6f5a8e27ecea9a0249c61b16564964054f5f9621471a98de132e102f518c1419829e2ae2c8c5fffd1270f0a0b33a383437b0034783d50bce8bd7420c059d16364eecbd55b6ac8df8a70382734d8127f4f5895cc9e508b13c000ea053ab59b87ee639745418ffc566ceebad37a17b842d24d3423ac3f086142c622eceaadc4106f8c90c5dae1f52f407fa0bf1e6bf9385cbcbf3b61006ea3b1e66b693ce704577ca9598587f41e05d36d1de424e0e51290a5f2e2f99f1960c0253a046a49b19eef249ca2dda2af1e8dd78411088eff1e9c23c31bd20abd4fc9e7eab19500827d202f76270fe9f90e95309516343e0fca48e5a12182e91c78ebf2cdd4644629afdc90bbccb77546cd765135910ba1cd8a3e3c00fa77e585865e898bfecd06c01a0a4d7be483801099c61941c4967154af5620b171b426cf229df59d2944ba50754140c3f305c16956953be376fe6e7cf31a2e9c276bb09cc24c4b86b2b26f039b0d8511853adcb7feb8502e7641a34e3242bf2c538006bb1983345ec3cacbf219ef10efc1681d52e6e1b1c60bb556b6b8a63d1d1f6869077841d1b816f3165a35833e33d39a8c6e62a2f7c482c395768fc6a0e3cbfc7a1a6d64da53adad66c8016f76eaa73df1b8ef83012ecbe75c92a8e39b48169433f951a539b28a034d5fdd00639a5e3e17ef14dafe869064d130c90c68be4d5ceddabed1bc94e97e2cdf7313f780cd6e175a9e3eba3eaed896fe464073fcf07ae7b5bd41d58c3160f66ac95a76fdaa7a8cbaebb304fe3c8f03cef927a1182ac2281c3b32378813b24bb99e42cb0774331ad78b74d46b8ce48bbf4ef8431a82d4240edfd61b910c38570ba0bfbd4a41665117e6d5f5a97908462e62d0b76160d06aa56cc6e17aaf4607ba8263648f2a0077e306c25486f5f39a75", + }, + { + "2f6210063cb3071b3d49339185c2cef8357b08ca826d8d1acd852540c16540f1c850f70404fe1f414853d3cd15a1c64a1cce149e3ca1b80926de4ae8438ad90bdad010decf2f201782f3e49794aae1b079f54eb59607bebde508a528927e346d4e444b1d736b34f65e198df2c36fa23c64f1f1fbf8b0b8ddb85d054bdb39b8297d0347f16f7be7cd9474c058e36294485386434b36fb28ee582e393367f15ce5f5a3d6641fbd31b331f10b1554a05da726a0f35c9b1b4af3498426b17582966a266cce452900f85af1046f45a4ccedca6ce02607fb70fa45f420f66aa38cd4c9f8a30e21a3067b940aebdaaeb7c77824a79e2ba20f26e70346dd6de96942b261e5c08288c7fe1cd1e9f680a0bdf8c46497f007a616eea95ccc17463559f8973eb919c68017e25100d9d1a196ca65fb615502076bf0b0c8bcc70ef22006895ebfa2243fba0791bae0625b762cc1718d1673948264454a200c58122d5e9b8b1e3eb05df8b7eeb297510e0d7dcf7f0be5f29f6756e4b177f109891e6825a9866359e35b10d20da7231bb5a0ea34abd0264b377d2fe9f420f27d3e5aa2e8e00541c46052966ef9b989ae5974e2054409507b867f647aa057f7deb19ac6929f0856005aec6e53a5f702fe6be403afed532b73d38fed73e6e551987f182a1e20801e7a6c8ccd1184cf0fefb4139fa166ca15395902ac40e7fed8661602853682a3b0ee307dffb44d0ea3012142a2880cb7c166ba6ea6a16c7e0882808db8023068f060e5ef1432fdb8331ffad6a7078d686d47d613e94291f1c4117e7c13aee4030fcaf223fcefdb300ed606b5dd931e4adbf45dc437eeb5fbff337812e15c15f026071423f6ef5305c559baa2ecd8ecc7cd498b043740ff3673774855d45d45fa64591d5b4970600ec91ab1b6f39d7dc0e709c41e49c355bd3b9d120ffb57095fb127bafa971a086135b917285794e83e9dac5ce76fb1a4aa4fb6b94a0dc3a9beea64b8817ec1e2b37af9dbd18ec30f2b6f6c12df1db6896c6c43b67a066038f0c4f17142b254f62c4dd1fedb950d07047919e397d06d033cb0bab6b61aefa6dee01720926b16beb9e8bc947dca9b8143b565da85d2dec182987838b267de9047f5b0d961c7971aaf54ae2c1e4aad61ff123c84e41a4566b2bd9e64247cf46b72a444d36bdced1a309b464ee5f4afe406eb68eb05ae51b76bf01b906c0ffbdeb440b11f1c9e3a4c3a809a1f7449047b356c663a1ab7f286a70d16141d11f2d151a4f06d422ab97cab539c1f9da09ad20c000c27b8fead5f0cc37329d466fa260aea934c154dc9c0a065df3d057a0f117a1c38321ae59226a8054f7d6b49a3753436c249838b0924f0e861f5627106dd8d3f0fa724a1cecda71d4a1267ed889b234ae4a7d5edcbc5d52cba389dc0152aff24d224c6a0f16dbd3b7f242807bf4b51a3f22690bdeb66eaa59e8766b3b265d784899d247a0ae1b58a06dd91c529e3691b09f9d9f55fc39afd4a00b0fc668880ef25a46a30861fba8cfd4b51262eba4138b41a2d13ddc71128c8c1242e49a51d6f49879fcfa7595ba4a4adcad3670b0b1b26382f03ff402bc70150f54bf513ba3e9a590e41b269e55616af297ebb3499e16cc8e46c0810330a602955553c0f93d668a1181a0bfd7021ad9a9f68ce39493b012da70a3dda149d0369f23f788616e0272efa322b6a54d804f340d32c890e2eb7b538f48f4c9293b584d22d0ae80d321607644271b81a76ac5b49d8e457069b0c3e909b8a222e3fa6016cb1e979e300804742f2005c68acb7b1849c088b3714c9c7af54e9de9390df0041c87924c8fa6b0aec6b6754171e059cba0d27f221f0b9d044a3aed8338dd8745651981e4b0329376f908b86ae9022699d495bbe3a148f7eb73d56eacb2e5e2180f63fcbfa680369f88eefa71f1210bc5b6b7b957f0a1437476a2112998033197673e470dbe7d9d476c97b95db8b5136f6cccc75d6e0ac1e4ace30e34e64fcc4d7e135b2c80e863ed701d3b28c25e982f1b5f8c895a4e6df7216c3c07abf8551a0ba0469c88aa7a08c7b5218a03b9b91f0935985373f65aa56286ad0e7ef2288a926f172b098123c136455b3a0f04590839e16bade7b6434a3cf048abe2612684c03dafd9cec39af508e63f07ea881014697bc24122058b5ef5d3fae835216d055f0cdf1dc06a12c95041d13ac9e15f235d11747f16ffce1cc3b8f508da520e395edd471f3759d8879ba9c2558b1188d822fd4739ed0546b0ce3bb9988db7c1dc8518ebbc62c4440e6e0653f917dcc13aca1864b71dbb67dbe7117474c936414e4f3cfab1f13eb05f3504484ce11977ab21ec523f97ba1b7ecb8fe384b634c30561cdb752fc67a2316bfa7e4d03f5f825d24a556a0460d8cfe0cc54a6f117ac52d553a5d1bb48031732716436675c5c3996b1939b127c6b0338bfaa29c7467cac9a127e455a715c9ce2b0c35a0d2f83a3d1273ee39399e6cc4980e610c752bd51652b96bf9cf34c7fa41fc9b13f5d55007483e4082ddac4675baa7822fd257452411b01de0e5e5da26e17539d64a89dd93c71d15a4c95b1a83039cb2d5f3f7fa04a817e48dfcbfb3de34ecb47f7592123caf27e17982fbfc8597af5b8aa6558f4e6c73db69328e47677afbe6ef8df82c3d1f0db6a108b2279f61822908d7b856432c32ac5ec0f3c53befab2a7ca356b9c2636f646b228b0a830d348be4ece2271814d477d4c73c0fb6e83a338b90ec4ef45cb25f7e3d6a014a9e8d2e8a6f55a383291a57f15667a73ea1daca31c7182523ca85a107efa2518d2f7f179ed4ba21fed479ef2be09669817133b2384bd85b155dfc1c4c9e6dd9ceecf06cc1ab8ebf7f07aeaae7441468b5471aed93f248a84f44c59be33274b11f651de010ab9f8fb24d3a99914e0147951c34280e7dd15ec196f9a4c86e55e7d373c7e31e6672d1b3ac6a45fa6c8c9088c0b8963d89f4ff1feea3e85cf9cf2f6c97128afd845bb131c6f62b3282bbba42745080fd457f1d3322058f1bd4be876bd01269546d1a853310b165926c1fd4e07054deb5d3fbe8f6007711d435994005aba95918c3df4cd390b165fcd139dd418ebbf661b6de57b655698a8a02ca8fad73e8c536c7110957c36e5494a831d536eccb97a2a9ef58fe58e2885aad170720ffcc57c7de601ea1cf723577a30aad8fd544317e33897c8b6c04e5191bec391ab990e197f10038c0726d371677e4a54c28d7ca5c6046e7cc4acde565b91f7f72af6109a0614160d3ae97e9257b8f71a4663b00c681e793cbb478306e97b0e04711eae7722b4845dadf2fff5bbe71ff24acffea2ee67df99bf62a098ddae9d4ebd3bc5dff04a2d9e3d1d83e8f493db3f63c9e24231b1dbe1147c79f21b0730c842f6983330c5c17dd34556d7e932074cfbe98f2dab5b0ebfd778a1e28fe2bac2d942f61a08b787ebfcdeb3d600bb130ca4922a4ffd38ffc4a1a1a7218451e45da4da67ad81ef898ece3d54cef877cb9d09f5dcf72eccbbc06e62f1e2b4d64059b0a807329780b155ce1614b68de04387d6108ef4dd3ab54b9da72e528d6eac3e16a360ae3421f3f23808a8b5e8ec3dbefcbca3c9f76905850033d78d9283bba9272c475b4e3b4d7643e62c2cc259ebbf168f890de88e82f8b26a7654ee31fe055e45609c70ae02b4942ee15678cd158f4c9e8d351d102ddf7a942458c6125e1457bea0d86ca38cf0c26e474b2b5cca77eb57ad0867cad7d25efc2b250e79396637ea3e948dbb855029cc9b452955bd04ad5a0d0514d4d773c0f298df7bc235a3ac64383a1fbd8a397a158e936b3ba81895a51daa89f51e4ae7a71a53794ff715a42f4fc3dcc9fd56df7bea4ab782534d3760e7b15605fc4dad16911656983c0ab77bce9445bbeb1537c55fef57a32c8f1404306a0a2ca7b73348cd99d0f9948875531cbb0ef7c036cd201614c33293d746c44140e0e8f82421c5bdf2bf428b249597df949fafdb5ccfe1618323f56a6ab9abab9a84a3beb6696ca918af244d34cc1cd95bbca4a87c860a0fa9ff6a04a905b0338a53f230bd5ee9c60e0e0332ca200c15dca0be5936b858d0a7b2e540b8958432e9767396c55d5cc35b60062580023b5cb2f9a5e9a1feba59a19f9a5a251e9d0e8500955a5df21da95213ced2260a2ed8f3d4b295c36cef750c89cf21985c302d5cc577aab7855409a912dbcf1d0a9800df4aa692a78607a40fd6d5a82305c58fcb3d2a82b27e8c5b91681aae62a2bf31ed55c494dbdc38eba30e83c6044945df76705228eede8470369f2e9941ddcb2f239fb3ff6bfcdb0efb5ec50f981adf0e8b213769ffbbea364b08cf8cd69abbfa2a6fe9865cc48558134a57bb5526b9d047e14a379d246de82d3d64f3c810ede280c768dd8bee25af287d5a8d94045ddbf5981382bc716ad9aedfcd66e0ab496172a24efe80649db8e1e83675fc8451e22c6564d8d6dfb285af7fec802b35f19dd8308c68952a11770247fcfecc4ed0e8a445c17b1573f0b4e3ed350f13269ceb572943fc435563459d5044699f1542335b03be6077af156b8c5a6a9f71078ad820cec4642427a9b187ee1b17036d5a5e6108cee8a7d444342eaec3afa64e77c71d3c2b3153d4e2dbb30df2b66b4d14cc45d3a4eda7e911d697e5763e23ee05311a20626df55549b8533c6ebe79737abf472f9cff08bec590943bdeb819d3f923f45b81f9a0cba1f3f800a261842d10cb4cbdba456c7fe5f0abb4a8b58891d97cfd6b669e2708922f1934809d51a1589e5f12e3bb82c9ac3e7e44e3f6e6cd63d428da624fd2f46eec38ff798a90d228efe50c9b67c63796347c8a2b53478f27605999a03c8e1f18b70e92419f646a7f49670aa12d324751aec17d0208fc296955b3098241189af8172d39a6819415cafb107c1842b369f174d6f37dd31cd728dfd0ab10f93609006342b6e4d6ccbfd1ed2bea2fdf5411442b04b1fe218916f159b20242f80b535b4e0a3024c6eff6a40bd0d3db24e51f5ff9c14e1b4a650ca4170ee70f0a3a5a58349a7d0b7a63af86347351696870b95231f76d8c5c6a20736907726341dcbb76672871d18c2157c094b929fd29d34f5bcaacd82706f89a60000cd341d98eb830b73a12335b69f3e0131ded3ce12c98bbd960d2d0696d40696a13ab43925374498d868cd8f070c9039ea6407fc2d92b9c39fe7c935bbcfcc5c0980952fb7dac79042951f49a1af828b138a87401c4104bc28cdf1e39dbd3fa63dd4d5f5ae9d85f032a43ad353bc5e6746e5a76326ab1f4e79103116ce70bc0b459200f32f85e461291e347dda92e421778b849e37a3ecb0b31ec6818e828dd3148dc74313aba43cc9d8b9a36a9dc4e229488060eb6c109f8ad6201958adec6d3bb3b04e5e558a272d44cb98e18f7a0ad8fa6ac3667a62f150830aa930f6166baac6b9081b44304988fbe1698a5b746255de26bb5988aca90bb6523cad68a7572f615f4aa58f932d8a749615cf0a7724e99de042268ceb31433e6df0a61547d576a6201b36b348c028ded5f7e94d1cd2eafc141088ff42cb3dafbbe4c402b93aa9d955df8d9d9fb57c75ac65c2c837acc44bbd4d4aff1888aed46c73d625ad7fff035e8ca0fe411c73ed8135b6b8e17a039ec74e9de0d64cb442bf8a676c0a666f68f21066332cd921ae0ed766f0516a8e19b82cf98e78add0373737a3419e13aa902310c44feae5fdf8bc64e80dce772686a31f141bcce452041bf545b908ef4a2b000e7beaf378e2afdccbbcaa42e330e5024400cf2852d3444718", + "fd5008477b0855f6f2486fd4f74b9fb4f6e19726c6996bc66893183bd76054d5b05c1c2b64722256ba912ab2dcca66d2abfdf972966438fff7513acfb18ea461eac08c4e32aea4ed3fcf9f1c9905ee4402e7b6984bef974340d212f160b6524b76de99a98d3e96cc0d35e8a63ad7ea3cbea1d40a906c4dd03e5fc19e1513e9", + "390a5e75c9ff4ad38fb6205ff47f209294337c1f25ff54a3c01eee8e1e220257", + "8bf183347ec1ca4bceff3374", + "19fa2641519e21293094e9d767ee1237f9e0715dc57172794867c3bbe2cb647f9b28a8d3f85c0ff557b91bad66f5ea16e0107757b0277fdd3ca05bf47c19bcb92a958a57e8c142a51af29bddb20af84377b6db65f77494e0dc4d2634a776b3a5d777319873bc0dacbbd4b9ebccfae849fa7e9769cdf54660ecca0d5cf4fa5190713726d54d02b3a3f21857125b8a808c0ca2f99d11dc430ed5113ee49ff8f00bcc08f0370dd510e8100e1285659a7b2c7457a6049f2af7786c4db1471ce5bd164e11c7a2165e83e03a135ae2b3429f82f677de044a067e99e0bda2d65a7270d629c00e1d528212d3aeb2896e58ee5145a93ed06a9c00705ad5c5988d3a192304c1d17661d45257c5d16799ef70771964435b12e3b2ee9d5b467c3b1992f45b7a59871b40d8daa1c280747ecb3d170257b91df1f549ce6d66455b5b6f60b7c6e95c92a67e20cffe8599ceb183de53f1dedfe19bae836447af8e053ba419660e0912cad064d6125b9e978e8d0d5f28f8a4e43ca3cdf2d4c0e9a11221d8184e9eb6c90761b0beac82d0d22793279aedb1c7db3632adbee323bc3bbde4801152694831abf5676979af26af7dcbadfba1cad1306b635840cbca76c558b37db0803b4c12befa27d16f21506b07ade4a838d6beba1816eb29ed5e3c4f132a752fc747bd9ba879156e87e6c1584e911da9f796e1fa4a055e427272559e4bd6d0f54b8257100f8a55d84c27b702bb1fe2f995425c85fd48b0a0610db5b39f7a5031407a12dae9f508b21b1378f14952d1beb2dea81d016b2d9b7f1a67b814569b69c0e619adea02a8683242d63a11d3317d060e5b4d85df5ad73127541ba5314715d187990735aa81f438f8b94070ec506ba536274d98b766c1694e54367891a602b99e370425b47a70b819277a249fa429c5bbd0530267f987e6022f25030c30f3baeedc0d13c95f3d5e4b2b87465d179a3a23b9f9e76a42ceea55226ce072f9488392f40621289124d786109d2498e74fb37e2ef466fe8bf3016d96e34204c32978775765aa80461cac48518157f86d59f6187bad4ee62fba1ddbe166b29452f4a59af1e057300c353440644a8e40ae8171ea028be2fa315804abf518847c7945e8228b7766cfdb08d3a3116b59aab8e94b6d8c8c9ef442c2dc7f923bc2cd3e5c663baca7dded976bf191fe36da16948c89c385fe71434f4aa5dd15fe0e925d2459e3b068b9d82a9cc8b8f9786bd9f5fef9baaaf2d67027d9bfd58bb2c58ec7c746b747ab62f9242e4b53ed14d6fc75f5280eca0de23717c97a2293826e19cc8eb47f946421516c349dc4ba49225b91e4e868874bdebd373700df1f3792aaa140597e58b88f90e163397dbad3941705b53d754e3e0c9003df836a7fb8d23f40362fcb5f3947a4281b24240be4ee89aa8e917b194f94345eeca224df0adc15f22a617b6427f29410bc48ea3f92216163785723efc36301d23ed52780c6fd7924bcfaa03269b13582b7c7ea9c0e4a451f38a469fbdb585dcb7c81452da77945ebe27eb26ff6e8c7b2decea289aac5af74746dc257c9bea44a0847f02c4f586e1d76f39d5bf952355a0875f177a666d1d354ad86ce5ec0aba2c2b20cab050eaffd31095395132f5af80a2d2d53b77bda49f948bbb37bdf31c8a690476488e14e542ff6841e7fbfc2eb84795696562d079dc1612274b6dff362567084f793f0bc2dd8de23392d05aeeeeac6991c9f74387153a4b7da94790375e336a00c8293bad0fcef2dd1880e7094e2e53f738247c860780ebe308410ca02ae409ae720e841f48c9677acc6e7d4ccd18c219c400f8b7e1257f692e09eaef96802b17a1cb7d93eb81d3bfcbc7af4cdf05b98e22556b3d1a8b56d6d83bb5f5724696f8f329839dbe477483ec3c09fa2e0628faeba1bf285c224bea3f6cdc7bbd768133c6ef1da14f248cc3b819b196588811b073a7291817bd1e89c65760435d8d17cbf9423744a92143e0f956e2977b39c54fdead5a57f3a04a0facca01bbf44d3b1fb9c4fa83ae1046985e3f26aa0a437999004dd8adc04c5111759849f919b93558dbc559173a23b069b59f800096d9fcf077c7640f59170bb9a6fffe64778bac272365d27ea62aa956559e90edd3f6393cc8775597bcf7d91990ab9511973d948324a27261059e93f4b5dd2f70caf12e1a08e0493cb05588618764391f355379578cf94dd33e616136eea997ec11c0d4ff064ff51a767e5558433a2e3a9a74c232d8e187f47b8cca010709eb9fea0dac8f1ea53bf18822e154ecd929c83b0eac366e30fffbd5ba6a46d734f58d26e7f5df538e18b3d827884aa857a680823131bcf30a76f1a555bcabb17b02b53aefad96fe76f7312da69719434c580d3ff1bcdcd594e6375935003d5d732cc577e11ea2abb1d04259f50aed4c3af9866e8c4a52a09809046ee330f05c4403acbc297a9416c5208fadb31ed4eb7a3b01b87bf08c75cf44c2b0df84df30872d021d6567ea649859268e5e1b5b6405e1b41e350a32c1af13722959c17c01b52c42241313b26b25995a1c89a53e248488724d280647226195746901929501df36d1e94815d7fe6c4ca2731f3181293217f71b9d7f59c2474856972013924ae4796db4cbd22d8905a6043c959941ca6b556c53d1688c439036c715d33a47a7dfc2fe40e53424c5093020d2e85e4b04aa4c704ea5bfe5a2384878da38319c59d41d66b6add2a443d9ea11edd8d18fa41004251653857733b388b453943eb33df93dcd5d549757fa2967ef0f9a5105836c48826c47fcccb2d9bc349032b286962136b848632bdcf186a08cbeaa52d195efcfc3a440bac154971d11ff4994f293b14fb8c3214ebe7ab8b3d0f2fe0b03ed7b145fafd7730a173e3cc1847f0cdf2cf629f5ea81a07bef716b1a67dd9e3b7a52fea1aaa7a393f53b5bdb5988df78a57a9dad19a8253316835acab8a6b9a9fb42d97bf29b2443322f46de386fd82bd3453ed68e2370c6eac4497b1bde7b42d569c452f377bd38bd50fa5a6792ef5c9ec6c647001149b86fedb3e2f18d4271e9cc4801aa16ecddb31b6a795fecabc613bfbc8e4f5636d71e74595c841fd11b6a6bc7f169317c1added56b82a71fc36d774bb4d661685363e9da5fd2e1f357006dc5b5bbf8b42ee3f869e75a541586fba558a8f490d641b78c27368b9b4c2db046354e9358ae9140e91cd95ebeffc6c0d2676a3ff4ab10d463bf32bed97023a80a79df191ab9858c43537a03072a17c30b1bd99efbd361590ed6b7d5b0ec4e2326fa35904ab9a48596f44491cbbc0112890f9386ed04dec30126be359a05e99b2b77fa2c8f6b7460a6cd590d71c73b2a1b23312ff89306b6e41c76ddc0a099bfa79498e36ae5cf0c560b8854dff32d2b690ce0ac4aabfa723ac6f2e97ad1083235196b464ad67fdd649aec01695d55c8b4bb198f30630ca635aa5a1915f3718341bcfd8b522f764015fa5479004d28eceea7fe67df7ee24a97a9708d528b89589f1899f13242a0d00f7464c3cdfce213699340e754533b934f4a8410224e111f31cf8e54d7b5e90cd8c68bf96edbc8d183894deefdf4fcc1a83162a3f6341dcd9a9aecf171c0df28257a68b1af1b67c54c43c3cff27fed89cc64bc46e23a49ec74a9efbab7981d9f0a018247441e4f0f5b5f68ba9325582f92de4cca4a5f878a0c5c387581e64324e3246d8f3205c838a29f1abeea24446e496421f0e742d411adb55f70272ae4a992e825a3d327e44b8b3762b25aa451d07eb4eac0322b431fa676462632daba2aba7bdeee1b438f051d21d4b1897e2ac2f95ee7c23f9996a805de8fffb3b30b855cd6c5b84c011accf4bf94d304d944079f04b5cadf8fcd6751c22a0f9165ab98998b2d89e6514641f1f3b91b8c0bf057d69c3d893fc4e041e06a2229e2ee58082ffb58cb920972ede58483287d0ace94c1becef26a410b93e4ff402e61dcc574b790d49679f18f4e2004f8b7cc357faba34a80e56821bb5b883d1a8b49c6605002152f270bbc36bc79095644e29ab08cc988deda765d67e4fff12b726d5de135ff9d0cbd9d5f9d440e548836633b93a38330d638468b59a32642da3375cdf70b062d14b46a78569c24a706e179baa2058dcae5c61fb6cadd9e015b017f26e9dbe3e6366cf5f1ec839aa3bbb21dd6c9b8e910245fa95b09b7d6cbf08a4c6c84bef257a70389be962dad14d97a893c128b73bf6580689e540d004f21edf8403f36b1ad7c9a2e83ffceb141af59700c316c8c1e3347187f24819c2ff0c9f9a2360dce354f3374374eab1643d2d8831310a8e3ca6768200ea7759822b82f7027cd450479fcc7f6d04802b15735a137ad489f1e1ee78434a253a9dd16684ad58fc91960cde6754f82e8b38edd5e798fdbbbf8fc2e2380a4e21dd94f8c1c063b18f29d8cd8d89f65deac5640799d4ca2caa29c1e72ad8bc417490d11e4051d94956fbc74289857e5f8e9e87b9a2d83074a994de0b10bc7782f6650cfbdb8c835c81cd88bdce5f04ca939b3c5cd010d4dc5d51224fcacbca9851694b8bf55b22dead859d023eee5a7ad3436a912c3fc0284456d5d72ea5f1afa8545c856676ac2dd9a057028bd3ca0f50e7070fa74152f13997c95c1834c3e67504f1a4165d2b49a96919b88f72caed60f56ca7ab5a3204fb12ad3592c725fdebb048732fc189c7dfed185c6c184a626e07d7356860d00389862d5b9701eaa4e5f7889e6db0f54633369b8d26805c08471de8fc3f8fa1fb0b0711d9e015add5373f7f8b64abaddbac3399c756244b1b07c579d33e4967e5e0cf16de29cb8a7efad07ff9039ca305772a6e45c76bd9b77e24949556766a8b8425c5e595efb431bde4ee222f9eb3fc2d002a1e2d14db2b23135266c942eea33bffd30eb0218405373240e0cd3040436ca895093bf056fd001c00ba59d90502042e6e6c0167105051628895c8164c9ab959400898309cabafdef12be53604fa57df44e0a90a81bd63c331291a93bffefe809e80db0679568f6e94e0d8e2edec0087c35bcb3c4f4725e6013bcf197156cd9d90612423348123383e45c14d27d8833f56ddb04083c069fd6e282fe69c940840f5f747dfb72ad72fd8cf9f3ded15c9e2f4727fd60b4f40e95dbe77a89b47dde7d5326942600554905d9dade9d145ab6da802643f2081678392609c2fdd1b79dd8caec137cbed315374c6f05c0758070f3bb17e23d81ccc39c6aa89913897e487fde889c5aacd422278f8571641cc4f0a93d9768aef9e45d6bd187d1ba637ce0fbd3c573d6778cf7bf5188c00dcdf13be3fd599143952b376220283e34e014e83b214bd5f64eb0ecb098ae8bef883949907cc36e22ece60b893b963cfa73d120513e285aaf70ce5add34edbdac60b3aa7b385b90e339058fb9b3cf984b06f79788016035c5ce490f2de7995b98a8c1c9c80f29603ae2b7fc41886663163e604275cb085f8453b27f4d795b9bad19ade2f98a1c99b43a7581bd991e5d0e5e1a6e713acc522ba9fe8302658a9782558e35436e714ac6bc85ad1d3cd008f24106901fa954f5fefb61210d6f8dc9ff35c480f1d14e59c0e501917a31ee9d00c6bdb06a00af5a8b08c3928cc5f37476248223627cb77eaf0e96213cb0a13e97d3fe9b9814d462690e8d68d02655a32fc271ee73db4f88a33386ea88a5857e15a28d9b3e3a96f00c7cd85aa53f9282ab8c8ca6d6a8afed43aa87fe7fc1ad59b0f0db2dd25c20af96e8c282c19fc883ef01a4060398926a1c82f07bcd3bc314580d7636b623b7bad8ddba05850291a6344df0f346fa4a321a85ee3e9c", + }, + { + "67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b", + "0942e506c433afcda3847f2dad", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "588e1356fb8fa32410dad99cf7922aae47b4042502c92f3afe33dc22c1c2e90caf22bc37a254f8dd62a09582c70194f9616982639415178e9fe95740c0f1d497a69b69d4924a7a15290187f9c8acf09cf5b3b3188ecde2d2807207f5bb6a6d3504314b1b47684cf8ba8807eb9a3c497c79ebe1e4c1eca2aa90328563e201425227fca8ee05dcc05fd6c98128626c1e71d2fb3a21860567093db1012dfabe13055c48219d2a301c8a5a49033a811d8d9413bafbb2eefc177226fe578e93c2ef1f309416dc98843bfac387debb1b610b1d2366178ce7212a7312057a3d058357a629f18c78e129e60979a2310455a76207be5611e8b4b840629564020c17f5c9446882e23f610e931246ec434e62de765bf22954cfae02b2ff4b4086fbbd1b6cec23e45481eac5a25d", + }, + { + "67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314", + "d3d934f75ea0f210a8f6059401", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "588e1356fb8fa32410dad99cf7922aae47b4042502c92f3afe33dc22c1c2e90caf22bc37a254f8dd62a09582c70194f9616982639415178e9fe95740c0f1d497a69b69d4924a7a15290187f9c8acf09cf5b3b3188ecde2d2807207f5bb6a6d3504314b1b47684cf8ba8807eb9a3c497c79ebe1e4c1eca2aa90328563e201425227fca8ee05dcc05fd6c98128626c1e71d2fb3a21860567093db1012dfabe13055c48219d2a301c8a5a49033a811d8d9413bafbb2eefc177226fe578e93c2ef1f309416dc98843bfac387debb1b610b1d2366178ce7212a7312057a3d058357a629f18c78e129e60979a2310455a76207be5611e8b4b840629564020c17f5c9446882e23f610e931246ec434e62de765bf22954cfae02b2ff7c59dfe246e4bb2d6a8afcebdc2beeaabf2a3f43f95a5ea639853f38719875ecdd2bbc0d81bb2a5ed59553b1e76b6365b74f618f685eb7731024bbf6794c3f4c7c5a1cf925", + }, + { + "67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314", + "d3d934f75ea0f210a8f6059401beb4bc4478fa4969e623d01ada696a7e4c7e5125b34884533a94fb319990325744ee9bbce9e525cf08f5e9e25e5360aad2b2d085fa54d835e8d466826498d9a8877565705a8a3f62802944de7ca5894e5759d351adac869580ec17e485f18c0c66f17cc07cbb", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "588e1356fb8fa32410dad99cf7922aae47b4042502c92f3afe33dc22c1c2e90caf22bc37a254f8dd62a09582c70194f9616982639415178e9fe95740c0f1d497a69b69d4924a7a15290187f9c8acf09cf5b3b3188ecde2d2807207f5bb6a6d3504314b1b47684cf8ba8807eb9a3c497c79ebe1e4c1eca2aa90328563e201425227fca8ee05dcc05fd6c98128626c1e71d2fb3a21860567093db1012dfabe13055c48219d2a301c8a5a49033a811d8d9413bafbb2eefc177226fe578e93c2ef1f309416dc98843bfac387debb1b610b1d2366178ce7212a7312057a3d058357a629f18c78e129e60979a2310455a76207be5611e8b4b840629564020c17f5c9446882e23f610e931246ec434e62de765bf22954cfae02b2ff7c59dfe246e4bb2d6a8afcebdc2beeaabf2a3f43f95a5ea639853f38719875ecdd2bbc0d81bb2a5ed59553b1e76b6365b74f618f68a12d0f1cc99e132db9014100d9668c91", + }, + { + "67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314d3d934f75ea0f210a8f6059401beb4bc4478fa4969e623d01ada696a7e4c7e5125b34884533a94fb319990325744ee9b", + "bc", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "588e1356fb8fa32410dad99cf7922aae47b4042502c92f3afe33dc22c1c2e90caf22bc37a254f8dd62a09582c70194f9616982639415178e9fe95740c0f1d497a69b69d4924a7a15290187f9c8acf09cf5b3b3188ecde2d2807207f5bb6a6d3504314b1b47684cf8ba8807eb9a3c497c79ebe1e4c1eca2aa90328563e201425227fca8ee05dcc05fd6c98128626c1e71d2fb3a21860567093db1012dfabe13055c48219d2a301c8a5a49033a811d8d9413bafbb2eefc177226fe578e93c2ef1f309416dc98843bfac387debb1b610b1d2366178ce7212a7312057a3d058357a629f18c78e129e60979a2310455a76207be5611e8b4b840629564020c17f5c9446882e23f610e931246ec434e62de765bf22954cfae02b2ff7c59dfe246e4bb2d6a8afcebdc2beeaabf2a3f43f95a5ea639853f38719875ecdd2bbc0d81bb2a5ed59553b1e76b6365b74f618f68d1f05b5662cd6e04de896d3ef5dae4149485a5a2093ff4ec74b20b5e5bf8e61b5c65515938c202beab3eea5a498d2f32d4d00a24b826b6efb16013ef54cbe170", + }, + { + "67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314d3d934f75ea0f210a8f6059401beb4bc4478fa4969e623d01ada696a7e4c7e5125b34884533a94fb319990325744ee9bbce9e525cf08f5e9e25e5360aad2b2d085fa54d835e8d466826498d9a8877565705a8a3f62802944de7ca5894e5759d351adac869580ec17e485f18c0c66f17cc0", + "7cbb22fce466da610b63af62bc83b4692f3affaf271693ac071fb86d11342d", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "588e1356fb8fa32410dad99cf7922aae47b4042502c92f3afe33dc22c1c2e90caf22bc37a254f8dd62a09582c70194f9616982639415178e9fe95740c0f1d497a69b69d4924a7a15290187f9c8acf09cf5b3b3188ecde2d2807207f5bb6a6d3504314b1b47684cf8ba8807eb9a3c497c79ebe1e4c1eca2aa90328563e201425227fca8ee05dcc05fd6c98128626c1e71d2fb3a21860567093db1012dfabe13055c48219d2a301c8a5a49033a811d8d9413bafbb2eefc177226fe578e93c2ef1f309416dc98843bfac387debb1b610b1d2366178ce7212a7312057a3d058357a629f18c78e129e60979a2310455a76207be5611e8b4b840629564020c17f5c9446882e23f610e931246ec434e62de765bf22954cfae02b2ff7c59dfe246e4bb2d6a8afcebdc2beeaabf2a3f43f95a5ea639853f38719875ecdd2bbc0d81bb2a5ed59553b1e76b6365b74f618f68d1f05b5662cd6e04de896d3ef5dae4149485a5a2093ff4ec74b20b5e5bf8e61b5c65515938c202beab3eea5a498d2f32c38dbb37d04f8272e741da2802c54a9d9aaf8ecf38b36fc9ad0079523f6a4abd5281a22697a3180bc02662a7c13ee23599d18e5c48300dbb831509df4c172f53e524b3c15124a87ac73e5028cde6c94d8d", + }, + { + "67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314d3d934f75ea0f210a8f6059401beb4bc4478fa4969e623d01ada696a7e4c7e5125b34884533a94fb319990325744ee9bbce9e525", + "", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "588e1356fb8fa32410dad99cf7922aae47b4042502c92f3afe33dc22c1c2e90caf22bc37a254f8dd62a09582c70194f9616982639415178e9fe95740c0f1d497a69b69d4924a7a15290187f9c8acf09cf5b3b3188ecde2d2807207f5bb6a6d3504314b1b47684cf8ba8807eb9a3c497c79ebe1e4c1eca2aa90328563e201425227fca8ee05dcc05fd6c98128626c1e71d2fb3a21860567093db1012dfabe13055c48219d2a301c8a5a49033a811d8d9413bafbb2eefc177226fe578e93c2ef1f309416dc98843bfac387debb1b610b1d2366178ce7212a7312057a3d058357a629f18c78e129e60979a2310455a76207be5611e8b4b840629564020c17f5c9446882e23f610e931246ec434e62de765bf22954cfae02b2ff7c59dfe246e4bb2d6a8afcebdc2beeaabf2a3f43f95a5ea639853f38719875ecdd2bbc0d81bb2a5ed59553b1e76b6365b74f618f68d1f05b5662cd6e04de896d3ef5dae4149485a5a2093ff4ec74b20b5e5bf8e61b5c65515938c202beab3eea5a498d2f32c38dbb370a9bbc3187cc260ddac991f94ce4f0d5", + }, + { + "0fb826ddb2eb5e708de203d0438be12cf708d635ebdbae56278be09077009586b9bc646ba7c2db35a5de05e86ae71461efea96dac64430edcf117d461113cccacf303576f310ab98efb180599894ba877e50614494923163a3afa9b4c2757f91a6b40799c5b331b464b10dfc45c783c317e408ab76390e19e8b7ceaa2c4d3bd201436bc6f69c7a5a4d8756924ed95665bd5e1034971e4d80d51b2a", + "026866d46aa940309fdcabf92a324fbc", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "30f05cf8189bb7b8b4f560e746e228c4cc7e86e8f2fa66e1afe212d1855db51070acd5eb34ce80b2e223957df50fde4c2531d97fc9e573725e7a5e47f0dfc4da1942620320bb2deaf8b17937bae4218d04db8e76f6efe84a117292159507c9f8a09fb2c17921d7762510dbf1dac7b62b1bd7572e3e2cf008d01c445c7fa78833235034281ae180e051451c6a64f22ca9708634bd0d604e4cfcd971b13742efa5b6363e662a875daccb2b00", + }, + { + "c7d4f8790e4c47d4daecbddf5939973521ddbf3b832e564afc66f03b5583c41c58bd956609dc3ae3c8f7c2213059575236168dba44e3044049f47c9e7840bbd0fd5036062d70e9f567ac1797056ee93c8476f6c959fa09a3ee854166c6fc36c34d6cca7adcb36f435f86db65f4c4a1793b974294914b377fd179e697751c5ac289243c65d8aca93732849c27483da083d4e218652d4fe5fec8cb953ee7f00070143dd6ece97f241b03c0424bfee2cfd2c4e738f2361df0ffe8863dcf763d408a7a167763959b7f985bc1e359a4b22c6899645ad0814bcf69d10c38474978d1c48e482723e3a6bb3f689f980c51c474eb28cfbba91a8a12eb964b32dfc303a3524ccb752f71316ed9d007e521cb5a0cf429c79d4351b02ee7fb60c7be636a10af3586dfa7b74d80875466a820c0b514e97cb12cce615ab55cba7c1b1de72bcd1cb1acc368f944ef4eaa986e6a4d8253c9337f9795d94df193c90cb0b0387dcde929905223d441717ed9dfe826613bf094ba872993d41b269e27d74e5f541b497eac9ba180dc12ffb6f1e7dc5223cce6dd541071282b97c6526e15b2c330fb41dc96e25d72f45c28e543053766d11d44252db54e584c14abbb295d7e5a58bf36eea1936095ef897a338eb1995fcedd85fc92d354dfe7ff9a115c186bb4d7a1a27835030d248c87571a38f17906cefe0261d15740b9", + "56", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "f89c825ca43cae1ce3fbdee85c505edd1aabefe69a0f9efd740f027aa7dee48a91ad24e69ad061648f0a52b4afb19d7ffccdc21f4b4247dfd89f5f9f998cb3c02b226173fedb6f8770aceef9271e7236fefd19fb3b87d08a5c587ac7918e80aa4b477f22602189811e270d686bc4949137a41d11d95ec96ee9d26c6126f6e923ab37638b34d1538d2e46d6df6216da4f193a3cecb731e632e109ced643056a1673059355d2d1314df35ded8364efed7de490201090a6f2d1751748585f64d26041637ba3723cbc4b60e226f10a19699d223075bc1f27d82e7f560c0db630ea670b3f8a70a8950894af4d1c7b3f674a3fa00d19ee4cc2b6174c1d259a297424bf2c3943a29a16a9830ce11abaa79cd2eb77b53a02b365b1838e7bfd5ae1bd044ffc885c61c6b2186a357e8b8f732b7ab96517969aeb70c7b493bbaca9462a61815a3c6135c748bf9c8487ac0631807aa69243fa09cd3b8efb63f8d4e090ad30b6c2f08bf4e82f191cedfa5cbe2b42268d67ecd105918181e44fc9879efd642d20be84e6f74717e03fb94fcbaa6ed3b307431d2a9384b8a2b3e5825ffce8d99af48f177e43bb4272226d8a5edd37d53807f768feb9e0733b437a1d0f84779ab68a1804e92a5eecca56364f0fa6dca152203b249fdc8fbd950fdc37c1887596308a90ba3a5751c7096bfbd1cb177bb17847b33c4379b43938a67674459cd9a06e3017ccac5b", + }, + { + "135a28170fe89066da7bcff3a9ccc1b27dfe942a6f47b23835ef746aaea63dc10066d90f4e697528e5451b8e11dd408fdbd4b94a1c6c82515bf7bc099df9cb9d5fa4acad0d22d5f267f18078cec107a995c1f3b12d7603886dbf910ab85ca7180053c50e759b00dc8c81555a425c03d71df6894a6c8cd2d94b64e303c08a1bc1dee1cf537ccf300850856292e1656aff5bf349c87f1ca1ca8085cd400fe901edcad04146a0714ef0f6b083d715edd670e020385f3cda29bc5ff6fc6edffe5ca9ce9def6e0e3d5f04ede2db02cfb2", + "73afd2ab0e0e8537cae42dc6530dc4afb6934ca6", + "a5117e70953568bf750862df9e6f92af81677c3a188e847917a4a915bda7792e", + "129039b5572e8a7a8131f76a", + "2c125232a59879aee36cacc4aca5085a4688c4f776667a8fbd86862b5cfb1d57c976688fdd652eafa2b88b1b8e358aa2110ff6ef13cdc1ceca9c9f087c35c38d89d6fbd8de89538070f17916ecb19ca3ef4a1c834f0bdaa1df62aaabef2e117106787056c909e61ecd208357dd5c363f11c5d6cf24992cc873cf69f59360a820fcf290bd90b2cab24c47286acb4e1033962b6d41e562a206a94796a8ab1c6b8bade804ff9bdf5ba6062d2c1f8fe0f4dfc05720bd9a612b92c26789f9f6a7ce43f5e8e3aee99a9cd7d6c11eaa611983c36935b0dda57d898a60a0ab7c4b54", + }, +} diff --git a/vendor/golang.org/x/crypto/codereview.cfg b/vendor/golang.org/x/crypto/codereview.cfg new file mode 100644 index 0000000..3f8b14b --- /dev/null +++ b/vendor/golang.org/x/crypto/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 0000000..88ec8b4 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,732 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer or to a big.Int, it panics. It returns true on +// success and false on error. +func (s *String) ReadASN1Integer(out interface{}) bool { + if reflect.TypeOf(out).Kind() != reflect.Ptr { + panic("out is not a pointer") + } + switch reflect.ValueOf(out).Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case reflect.Struct: + if reflect.TypeOf(out).Elem() == bigIntType { + return s.readASN1BigInt(out.(*big.Int)) + } + } + panic("out does not point to an integer type") +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It returns +// true on success and false on error. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 4 { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It returns true on success and false on error. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It returns true on success and false on error. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. It +// returns true on success and false on error. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 { + return false + } + + paddingBits := uint8(bytes[0]) + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if uint32(int(length)) != length || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 0000000..cda8e3e --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1_test.go b/vendor/golang.org/x/crypto/cryptobyte/asn1_test.go new file mode 100644 index 0000000..ee6674a --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1_test.go @@ -0,0 +1,300 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "bytes" + encoding_asn1 "encoding/asn1" + "math/big" + "reflect" + "testing" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +type readASN1Test struct { + name string + in []byte + tag asn1.Tag + ok bool + out interface{} +} + +var readASN1TestData = []readASN1Test{ + {"valid", []byte{0x30, 2, 1, 2}, 0x30, true, []byte{1, 2}}, + {"truncated", []byte{0x30, 3, 1, 2}, 0x30, false, nil}, + {"zero length of length", []byte{0x30, 0x80}, 0x30, false, nil}, + {"invalid long form length", []byte{0x30, 0x81, 1, 1}, 0x30, false, nil}, + {"non-minimal length", append([]byte{0x30, 0x82, 0, 0x80}, make([]byte, 0x80)...), 0x30, false, nil}, + {"invalid tag", []byte{0xa1, 3, 0x4, 1, 1}, 31, false, nil}, + {"high tag", []byte{0x1f, 0x81, 0x80, 0x01, 2, 1, 2}, 0xff /* actually 0x4001, but tag is uint8 */, false, nil}, +} + +func TestReadASN1(t *testing.T) { + for _, test := range readASN1TestData { + t.Run(test.name, func(t *testing.T) { + var in, out String = test.in, nil + ok := in.ReadASN1(&out, test.tag) + if ok != test.ok || ok && !bytes.Equal(out, test.out.([]byte)) { + t.Errorf("in.ReadASN1() = %v, want %v; out = %v, want %v", ok, test.ok, out, test.out) + } + }) + } +} + +func TestReadASN1Optional(t *testing.T) { + var empty String + var present bool + ok := empty.ReadOptionalASN1(nil, &present, 0xa0) + if !ok || present { + t.Errorf("empty.ReadOptionalASN1() = %v, want true; present = %v want false", ok, present) + } + + var in, out String = []byte{0xa1, 3, 0x4, 1, 1}, nil + ok = in.ReadOptionalASN1(&out, &present, 0xa0) + if !ok || present { + t.Errorf("in.ReadOptionalASN1() = %v, want true, present = %v, want false", ok, present) + } + ok = in.ReadOptionalASN1(&out, &present, 0xa1) + wantBytes := []byte{4, 1, 1} + if !ok || !present || !bytes.Equal(out, wantBytes) { + t.Errorf("in.ReadOptionalASN1() = %v, want true; present = %v, want true; out = %v, want = %v", ok, present, out, wantBytes) + } +} + +var optionalOctetStringTestData = []struct { + readASN1Test + present bool +}{ + {readASN1Test{"empty", []byte{}, 0xa0, true, []byte{}}, false}, + {readASN1Test{"invalid", []byte{0xa1, 3, 0x4, 2, 1}, 0xa1, false, []byte{}}, true}, + {readASN1Test{"missing", []byte{0xa1, 3, 0x4, 1, 1}, 0xa0, true, []byte{}}, false}, + {readASN1Test{"present", []byte{0xa1, 3, 0x4, 1, 1}, 0xa1, true, []byte{1}}, true}, +} + +func TestReadASN1OptionalOctetString(t *testing.T) { + for _, test := range optionalOctetStringTestData { + t.Run(test.name, func(t *testing.T) { + in := String(test.in) + var out []byte + var present bool + ok := in.ReadOptionalASN1OctetString(&out, &present, test.tag) + if ok != test.ok || present != test.present || !bytes.Equal(out, test.out.([]byte)) { + t.Errorf("in.ReadOptionalASN1OctetString() = %v, want %v; present = %v want %v; out = %v, want %v", ok, test.ok, present, test.present, out, test.out) + } + }) + } +} + +const defaultInt = -1 + +var optionalIntTestData = []readASN1Test{ + {"empty", []byte{}, 0xa0, true, defaultInt}, + {"invalid", []byte{0xa1, 3, 0x2, 2, 127}, 0xa1, false, 0}, + {"missing", []byte{0xa1, 3, 0x2, 1, 127}, 0xa0, true, defaultInt}, + {"present", []byte{0xa1, 3, 0x2, 1, 42}, 0xa1, true, 42}, +} + +func TestReadASN1OptionalInteger(t *testing.T) { + for _, test := range optionalIntTestData { + t.Run(test.name, func(t *testing.T) { + in := String(test.in) + var out int + ok := in.ReadOptionalASN1Integer(&out, test.tag, defaultInt) + if ok != test.ok || ok && out != test.out.(int) { + t.Errorf("in.ReadOptionalASN1Integer() = %v, want %v; out = %v, want %v", ok, test.ok, out, test.out) + } + }) + } +} + +func TestReadASN1IntegerSigned(t *testing.T) { + testData64 := []struct { + in []byte + out int64 + }{ + {[]byte{2, 3, 128, 0, 0}, -0x800000}, + {[]byte{2, 2, 255, 0}, -256}, + {[]byte{2, 2, 255, 127}, -129}, + {[]byte{2, 1, 128}, -128}, + {[]byte{2, 1, 255}, -1}, + {[]byte{2, 1, 0}, 0}, + {[]byte{2, 1, 1}, 1}, + {[]byte{2, 1, 2}, 2}, + {[]byte{2, 1, 127}, 127}, + {[]byte{2, 2, 0, 128}, 128}, + {[]byte{2, 2, 1, 0}, 256}, + {[]byte{2, 4, 0, 128, 0, 0}, 0x800000}, + } + for i, test := range testData64 { + in := String(test.in) + var out int64 + ok := in.ReadASN1Integer(&out) + if !ok || out != test.out { + t.Errorf("#%d: in.ReadASN1Integer() = %v, want true; out = %d, want %d", i, ok, out, test.out) + } + } + + // Repeat the same cases, reading into a big.Int. + t.Run("big.Int", func(t *testing.T) { + for i, test := range testData64 { + in := String(test.in) + var out big.Int + ok := in.ReadASN1Integer(&out) + if !ok || out.Int64() != test.out { + t.Errorf("#%d: in.ReadASN1Integer() = %v, want true; out = %d, want %d", i, ok, out.Int64(), test.out) + } + } + }) +} + +func TestReadASN1IntegerUnsigned(t *testing.T) { + testData := []struct { + in []byte + out uint64 + }{ + {[]byte{2, 1, 0}, 0}, + {[]byte{2, 1, 1}, 1}, + {[]byte{2, 1, 2}, 2}, + {[]byte{2, 1, 127}, 127}, + {[]byte{2, 2, 0, 128}, 128}, + {[]byte{2, 2, 1, 0}, 256}, + {[]byte{2, 4, 0, 128, 0, 0}, 0x800000}, + {[]byte{2, 8, 127, 255, 255, 255, 255, 255, 255, 255}, 0x7fffffffffffffff}, + {[]byte{2, 9, 0, 128, 0, 0, 0, 0, 0, 0, 0}, 0x8000000000000000}, + {[]byte{2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255}, 0xffffffffffffffff}, + } + for i, test := range testData { + in := String(test.in) + var out uint64 + ok := in.ReadASN1Integer(&out) + if !ok || out != test.out { + t.Errorf("#%d: in.ReadASN1Integer() = %v, want true; out = %d, want %d", i, ok, out, test.out) + } + } +} + +func TestReadASN1IntegerInvalid(t *testing.T) { + testData := []String{ + []byte{3, 1, 0}, // invalid tag + // truncated + []byte{2, 1}, + []byte{2, 2, 0}, + // not minimally encoded + []byte{2, 2, 0, 1}, + []byte{2, 2, 0xff, 0xff}, + } + + for i, test := range testData { + var out int64 + if test.ReadASN1Integer(&out) { + t.Errorf("#%d: in.ReadASN1Integer() = true, want false (out = %d)", i, out) + } + } +} + +func TestASN1ObjectIdentifier(t *testing.T) { + testData := []struct { + in []byte + ok bool + out []int + }{ + {[]byte{}, false, []int{}}, + {[]byte{6, 0}, false, []int{}}, + {[]byte{5, 1, 85}, false, []int{2, 5}}, + {[]byte{6, 1, 85}, true, []int{2, 5}}, + {[]byte{6, 2, 85, 0x02}, true, []int{2, 5, 2}}, + {[]byte{6, 4, 85, 0x02, 0xc0, 0x00}, true, []int{2, 5, 2, 0x2000}}, + {[]byte{6, 3, 0x81, 0x34, 0x03}, true, []int{2, 100, 3}}, + {[]byte{6, 7, 85, 0x02, 0xc0, 0x80, 0x80, 0x80, 0x80}, false, []int{}}, + } + + for i, test := range testData { + in := String(test.in) + var out encoding_asn1.ObjectIdentifier + ok := in.ReadASN1ObjectIdentifier(&out) + if ok != test.ok || ok && !out.Equal(test.out) { + t.Errorf("#%d: in.ReadASN1ObjectIdentifier() = %v, want %v; out = %v, want %v", i, ok, test.ok, out, test.out) + continue + } + + var b Builder + b.AddASN1ObjectIdentifier(out) + result, err := b.Bytes() + if builderOk := err == nil; test.ok != builderOk { + t.Errorf("#%d: error from Builder.Bytes: %s", i, err) + continue + } + if test.ok && !bytes.Equal(result, test.in) { + t.Errorf("#%d: reserialisation didn't match, got %x, want %x", i, result, test.in) + continue + } + } +} + +func TestReadASN1GeneralizedTime(t *testing.T) { + testData := []struct { + in string + ok bool + out time.Time + }{ + {"20100102030405Z", true, time.Date(2010, 01, 02, 03, 04, 05, 0, time.UTC)}, + {"20100102030405", false, time.Time{}}, + {"20100102030405+0607", true, time.Date(2010, 01, 02, 03, 04, 05, 0, time.FixedZone("", 6*60*60+7*60))}, + {"20100102030405-0607", true, time.Date(2010, 01, 02, 03, 04, 05, 0, time.FixedZone("", -6*60*60-7*60))}, + /* These are invalid times. However, the time package normalises times + * and they were accepted in some versions. See #11134. */ + {"00000100000000Z", false, time.Time{}}, + {"20101302030405Z", false, time.Time{}}, + {"20100002030405Z", false, time.Time{}}, + {"20100100030405Z", false, time.Time{}}, + {"20100132030405Z", false, time.Time{}}, + {"20100231030405Z", false, time.Time{}}, + {"20100102240405Z", false, time.Time{}}, + {"20100102036005Z", false, time.Time{}}, + {"20100102030460Z", false, time.Time{}}, + {"-20100102030410Z", false, time.Time{}}, + {"2010-0102030410Z", false, time.Time{}}, + {"2010-0002030410Z", false, time.Time{}}, + {"201001-02030410Z", false, time.Time{}}, + {"20100102-030410Z", false, time.Time{}}, + {"2010010203-0410Z", false, time.Time{}}, + {"201001020304-10Z", false, time.Time{}}, + } + for i, test := range testData { + in := String(append([]byte{byte(asn1.GeneralizedTime), byte(len(test.in))}, test.in...)) + var out time.Time + ok := in.ReadASN1GeneralizedTime(&out) + if ok != test.ok || ok && !reflect.DeepEqual(out, test.out) { + t.Errorf("#%d: in.ReadASN1GeneralizedTime() = %v, want %v; out = %q, want %q", i, ok, test.ok, out, test.out) + } + } +} + +func TestReadASN1BitString(t *testing.T) { + testData := []struct { + in []byte + ok bool + out encoding_asn1.BitString + }{ + {[]byte{}, false, encoding_asn1.BitString{}}, + {[]byte{0x00}, true, encoding_asn1.BitString{}}, + {[]byte{0x07, 0x00}, true, encoding_asn1.BitString{Bytes: []byte{0}, BitLength: 1}}, + {[]byte{0x07, 0x01}, false, encoding_asn1.BitString{}}, + {[]byte{0x07, 0x40}, false, encoding_asn1.BitString{}}, + {[]byte{0x08, 0x00}, false, encoding_asn1.BitString{}}, + {[]byte{0xff}, false, encoding_asn1.BitString{}}, + {[]byte{0xfe, 0x00}, false, encoding_asn1.BitString{}}, + } + for i, test := range testData { + in := String(append([]byte{3, byte(len(test.in))}, test.in...)) + var out encoding_asn1.BitString + ok := in.ReadASN1BitString(&out) + if ok != test.ok || ok && (!bytes.Equal(out.Bytes, test.out.Bytes) || out.BitLength != test.out.BitLength) { + t.Errorf("#%d: in.ReadASN1BitString() = %v, want %v; out = %v, want %v", i, ok, test.ok, out, test.out) + } + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 0000000..29b4c76 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,309 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if !b.fixedSize { + b.result = child.result // In case child reallocated result. + } +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go b/vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go new file mode 100644 index 0000000..f294dd5 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go @@ -0,0 +1,428 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "bytes" + "errors" + "fmt" + "testing" +) + +func builderBytesEq(b *Builder, want ...byte) error { + got := b.BytesOrPanic() + if !bytes.Equal(got, want) { + return fmt.Errorf("Bytes() = %v, want %v", got, want) + } + return nil +} + +func TestContinuationError(t *testing.T) { + const errorStr = "TestContinuationError" + var b Builder + b.AddUint8LengthPrefixed(func(b *Builder) { + b.AddUint8(1) + panic(BuildError{Err: errors.New(errorStr)}) + }) + + ret, err := b.Bytes() + if ret != nil { + t.Error("expected nil result") + } + if err == nil { + t.Fatal("unexpected nil error") + } + if s := err.Error(); s != errorStr { + t.Errorf("expected error %q, got %v", errorStr, s) + } +} + +func TestContinuationNonError(t *testing.T) { + defer func() { + recover() + }() + + var b Builder + b.AddUint8LengthPrefixed(func(b *Builder) { + b.AddUint8(1) + panic(1) + }) + + t.Error("Builder did not panic") +} + +func TestGeneratedPanic(t *testing.T) { + defer func() { + recover() + }() + + var b Builder + b.AddUint8LengthPrefixed(func(b *Builder) { + var p *byte + *p = 0 + }) + + t.Error("Builder did not panic") +} + +func TestBytes(t *testing.T) { + var b Builder + v := []byte("foobarbaz") + b.AddBytes(v[0:3]) + b.AddBytes(v[3:4]) + b.AddBytes(v[4:9]) + if err := builderBytesEq(&b, v...); err != nil { + t.Error(err) + } + s := String(b.BytesOrPanic()) + for _, w := range []string{"foo", "bar", "baz"} { + var got []byte + if !s.ReadBytes(&got, 3) { + t.Errorf("ReadBytes() = false, want true (w = %v)", w) + } + want := []byte(w) + if !bytes.Equal(got, want) { + t.Errorf("ReadBytes(): got = %v, want %v", got, want) + } + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } +} + +func TestUint8(t *testing.T) { + var b Builder + b.AddUint8(42) + if err := builderBytesEq(&b, 42); err != nil { + t.Error(err) + } + + var s String = b.BytesOrPanic() + var v uint8 + if !s.ReadUint8(&v) { + t.Error("ReadUint8() = false, want true") + } + if v != 42 { + t.Errorf("v = %d, want 42", v) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } +} + +func TestUint16(t *testing.T) { + var b Builder + b.AddUint16(65534) + if err := builderBytesEq(&b, 255, 254); err != nil { + t.Error(err) + } + var s String = b.BytesOrPanic() + var v uint16 + if !s.ReadUint16(&v) { + t.Error("ReadUint16() == false, want true") + } + if v != 65534 { + t.Errorf("v = %d, want 65534", v) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } +} + +func TestUint24(t *testing.T) { + var b Builder + b.AddUint24(0xfffefd) + if err := builderBytesEq(&b, 255, 254, 253); err != nil { + t.Error(err) + } + + var s String = b.BytesOrPanic() + var v uint32 + if !s.ReadUint24(&v) { + t.Error("ReadUint8() = false, want true") + } + if v != 0xfffefd { + t.Errorf("v = %d, want fffefd", v) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } +} + +func TestUint24Truncation(t *testing.T) { + var b Builder + b.AddUint24(0x10111213) + if err := builderBytesEq(&b, 0x11, 0x12, 0x13); err != nil { + t.Error(err) + } +} + +func TestUint32(t *testing.T) { + var b Builder + b.AddUint32(0xfffefdfc) + if err := builderBytesEq(&b, 255, 254, 253, 252); err != nil { + t.Error(err) + } + + var s String = b.BytesOrPanic() + var v uint32 + if !s.ReadUint32(&v) { + t.Error("ReadUint8() = false, want true") + } + if v != 0xfffefdfc { + t.Errorf("v = %x, want fffefdfc", v) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } +} + +func TestUMultiple(t *testing.T) { + var b Builder + b.AddUint8(23) + b.AddUint32(0xfffefdfc) + b.AddUint16(42) + if err := builderBytesEq(&b, 23, 255, 254, 253, 252, 0, 42); err != nil { + t.Error(err) + } + + var s String = b.BytesOrPanic() + var ( + x uint8 + y uint32 + z uint16 + ) + if !s.ReadUint8(&x) || !s.ReadUint32(&y) || !s.ReadUint16(&z) { + t.Error("ReadUint8() = false, want true") + } + if x != 23 || y != 0xfffefdfc || z != 42 { + t.Errorf("x, y, z = %d, %d, %d; want 23, 4294901244, 5", x, y, z) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } +} + +func TestUint8LengthPrefixedSimple(t *testing.T) { + var b Builder + b.AddUint8LengthPrefixed(func(c *Builder) { + c.AddUint8(23) + c.AddUint8(42) + }) + if err := builderBytesEq(&b, 2, 23, 42); err != nil { + t.Error(err) + } + + var base, child String = b.BytesOrPanic(), nil + var x, y uint8 + if !base.ReadUint8LengthPrefixed(&child) || !child.ReadUint8(&x) || + !child.ReadUint8(&y) { + t.Error("parsing failed") + } + if x != 23 || y != 42 { + t.Errorf("want x, y == 23, 42; got %d, %d", x, y) + } + if len(base) != 0 { + t.Errorf("len(base) = %d, want 0", len(base)) + } + if len(child) != 0 { + t.Errorf("len(child) = %d, want 0", len(child)) + } +} + +func TestUint8LengthPrefixedMulti(t *testing.T) { + var b Builder + b.AddUint8LengthPrefixed(func(c *Builder) { + c.AddUint8(23) + c.AddUint8(42) + }) + b.AddUint8(5) + b.AddUint8LengthPrefixed(func(c *Builder) { + c.AddUint8(123) + c.AddUint8(234) + }) + if err := builderBytesEq(&b, 2, 23, 42, 5, 2, 123, 234); err != nil { + t.Error(err) + } + + var s, child String = b.BytesOrPanic(), nil + var u, v, w, x, y uint8 + if !s.ReadUint8LengthPrefixed(&child) || !child.ReadUint8(&u) || !child.ReadUint8(&v) || + !s.ReadUint8(&w) || !s.ReadUint8LengthPrefixed(&child) || !child.ReadUint8(&x) || !child.ReadUint8(&y) { + t.Error("parsing failed") + } + if u != 23 || v != 42 || w != 5 || x != 123 || y != 234 { + t.Errorf("u, v, w, x, y = %d, %d, %d, %d, %d; want 23, 42, 5, 123, 234", + u, v, w, x, y) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } + if len(child) != 0 { + t.Errorf("len(child) = %d, want 0", len(child)) + } +} + +func TestUint8LengthPrefixedNested(t *testing.T) { + var b Builder + b.AddUint8LengthPrefixed(func(c *Builder) { + c.AddUint8(5) + c.AddUint8LengthPrefixed(func(d *Builder) { + d.AddUint8(23) + d.AddUint8(42) + }) + c.AddUint8(123) + }) + if err := builderBytesEq(&b, 5, 5, 2, 23, 42, 123); err != nil { + t.Error(err) + } + + var base, child1, child2 String = b.BytesOrPanic(), nil, nil + var u, v, w, x uint8 + if !base.ReadUint8LengthPrefixed(&child1) { + t.Error("parsing base failed") + } + if !child1.ReadUint8(&u) || !child1.ReadUint8LengthPrefixed(&child2) || !child1.ReadUint8(&x) { + t.Error("parsing child1 failed") + } + if !child2.ReadUint8(&v) || !child2.ReadUint8(&w) { + t.Error("parsing child2 failed") + } + if u != 5 || v != 23 || w != 42 || x != 123 { + t.Errorf("u, v, w, x = %d, %d, %d, %d, want 5, 23, 42, 123", + u, v, w, x) + } + if len(base) != 0 { + t.Errorf("len(base) = %d, want 0", len(base)) + } + if len(child1) != 0 { + t.Errorf("len(child1) = %d, want 0", len(child1)) + } + if len(base) != 0 { + t.Errorf("len(child2) = %d, want 0", len(child2)) + } +} + +func TestPreallocatedBuffer(t *testing.T) { + var buf [5]byte + b := NewBuilder(buf[0:0]) + b.AddUint8(1) + b.AddUint8LengthPrefixed(func(c *Builder) { + c.AddUint8(3) + c.AddUint8(4) + }) + b.AddUint16(1286) // Outgrow buf by one byte. + want := []byte{1, 2, 3, 4, 0} + if !bytes.Equal(buf[:], want) { + t.Errorf("buf = %v want %v", buf, want) + } + if err := builderBytesEq(b, 1, 2, 3, 4, 5, 6); err != nil { + t.Error(err) + } +} + +func TestWriteWithPendingChild(t *testing.T) { + var b Builder + b.AddUint8LengthPrefixed(func(c *Builder) { + c.AddUint8LengthPrefixed(func(d *Builder) { + defer func() { + if recover() == nil { + t.Errorf("recover() = nil, want error; c.AddUint8() did not panic") + } + }() + c.AddUint8(2) // panics + + defer func() { + if recover() == nil { + t.Errorf("recover() = nil, want error; b.AddUint8() did not panic") + } + }() + b.AddUint8(2) // panics + }) + + defer func() { + if recover() == nil { + t.Errorf("recover() = nil, want error; b.AddUint8() did not panic") + } + }() + b.AddUint8(2) // panics + }) +} + +// ASN.1 + +func TestASN1Int64(t *testing.T) { + tests := []struct { + in int64 + want []byte + }{ + {-0x800000, []byte{2, 3, 128, 0, 0}}, + {-256, []byte{2, 2, 255, 0}}, + {-129, []byte{2, 2, 255, 127}}, + {-128, []byte{2, 1, 128}}, + {-1, []byte{2, 1, 255}}, + {0, []byte{2, 1, 0}}, + {1, []byte{2, 1, 1}}, + {2, []byte{2, 1, 2}}, + {127, []byte{2, 1, 127}}, + {128, []byte{2, 2, 0, 128}}, + {256, []byte{2, 2, 1, 0}}, + {0x800000, []byte{2, 4, 0, 128, 0, 0}}, + } + for i, tt := range tests { + var b Builder + b.AddASN1Int64(tt.in) + if err := builderBytesEq(&b, tt.want...); err != nil { + t.Errorf("%v, (i = %d; in = %v)", err, i, tt.in) + } + + var n int64 + s := String(b.BytesOrPanic()) + ok := s.ReadASN1Integer(&n) + if !ok || n != tt.in { + t.Errorf("s.ReadASN1Integer(&n) = %v, n = %d; want true, n = %d (i = %d)", + ok, n, tt.in, i) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } + } +} + +func TestASN1Uint64(t *testing.T) { + tests := []struct { + in uint64 + want []byte + }{ + {0, []byte{2, 1, 0}}, + {1, []byte{2, 1, 1}}, + {2, []byte{2, 1, 2}}, + {127, []byte{2, 1, 127}}, + {128, []byte{2, 2, 0, 128}}, + {256, []byte{2, 2, 1, 0}}, + {0x800000, []byte{2, 4, 0, 128, 0, 0}}, + {0x7fffffffffffffff, []byte{2, 8, 127, 255, 255, 255, 255, 255, 255, 255}}, + {0x8000000000000000, []byte{2, 9, 0, 128, 0, 0, 0, 0, 0, 0, 0}}, + {0xffffffffffffffff, []byte{2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255}}, + } + for i, tt := range tests { + var b Builder + b.AddASN1Uint64(tt.in) + if err := builderBytesEq(&b, tt.want...); err != nil { + t.Errorf("%v, (i = %d; in = %v)", err, i, tt.in) + } + + var n uint64 + s := String(b.BytesOrPanic()) + ok := s.ReadASN1Integer(&n) + if !ok || n != tt.in { + t.Errorf("s.ReadASN1Integer(&n) = %v, n = %d; want true, n = %d (i = %d)", + ok, n, tt.in, i) + } + if len(s) != 0 { + t.Errorf("len(s) = %d, want 0", len(s)) + } + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/example_test.go b/vendor/golang.org/x/crypto/cryptobyte/example_test.go new file mode 100644 index 0000000..86c098a --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/example_test.go @@ -0,0 +1,154 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte_test + +import ( + "errors" + "fmt" + + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" +) + +func ExampleString_lengthPrefixed() { + // This is an example of parsing length-prefixed data (as found in, for + // example, TLS). Imagine a 16-bit prefixed series of 8-bit prefixed + // strings. + + input := cryptobyte.String([]byte{0, 12, 5, 'h', 'e', 'l', 'l', 'o', 5, 'w', 'o', 'r', 'l', 'd'}) + var result []string + + var values cryptobyte.String + if !input.ReadUint16LengthPrefixed(&values) || + !input.Empty() { + panic("bad format") + } + + for !values.Empty() { + var value cryptobyte.String + if !values.ReadUint8LengthPrefixed(&value) { + panic("bad format") + } + + result = append(result, string(value)) + } + + // Output: []string{"hello", "world"} + fmt.Printf("%#v\n", result) +} + +func ExampleString_aSN1() { + // This is an example of parsing ASN.1 data that looks like: + // Foo ::= SEQUENCE { + // version [6] INTEGER DEFAULT 0 + // data OCTET STRING + // } + + input := cryptobyte.String([]byte{0x30, 12, 0xa6, 3, 2, 1, 2, 4, 5, 'h', 'e', 'l', 'l', 'o'}) + + var ( + version int64 + data, inner, versionBytes cryptobyte.String + haveVersion bool + ) + if !input.ReadASN1(&inner, asn1.SEQUENCE) || + !input.Empty() || + !inner.ReadOptionalASN1(&versionBytes, &haveVersion, asn1.Tag(6).Constructed().ContextSpecific()) || + (haveVersion && !versionBytes.ReadASN1Integer(&version)) || + (haveVersion && !versionBytes.Empty()) || + !inner.ReadASN1(&data, asn1.OCTET_STRING) || + !inner.Empty() { + panic("bad format") + } + + // Output: haveVersion: true, version: 2, data: hello + fmt.Printf("haveVersion: %t, version: %d, data: %s\n", haveVersion, version, string(data)) +} + +func ExampleBuilder_aSN1() { + // This is an example of building ASN.1 data that looks like: + // Foo ::= SEQUENCE { + // version [6] INTEGER DEFAULT 0 + // data OCTET STRING + // } + + version := int64(2) + data := []byte("hello") + const defaultVersion = 0 + + var b cryptobyte.Builder + b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) { + if version != defaultVersion { + b.AddASN1(asn1.Tag(6).Constructed().ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddASN1Int64(version) + }) + } + b.AddASN1OctetString(data) + }) + + result, err := b.Bytes() + if err != nil { + panic(err) + } + + // Output: 300ca603020102040568656c6c6f + fmt.Printf("%x\n", result) +} + +func ExampleBuilder_lengthPrefixed() { + // This is an example of building length-prefixed data (as found in, + // for example, TLS). Imagine a 16-bit prefixed series of 8-bit + // prefixed strings. + input := []string{"hello", "world"} + + var b cryptobyte.Builder + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, value := range input { + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(value)) + }) + } + }) + + result, err := b.Bytes() + if err != nil { + panic(err) + } + + // Output: 000c0568656c6c6f05776f726c64 + fmt.Printf("%x\n", result) +} + +func ExampleBuilder_lengthPrefixOverflow() { + // Writing more data that can be expressed by the length prefix results + // in an error from Bytes(). + + tooLarge := make([]byte, 256) + + var b cryptobyte.Builder + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(tooLarge) + }) + + result, err := b.Bytes() + fmt.Printf("len=%d err=%s\n", len(result), err) + + // Output: len=0 err=cryptobyte: pending child length 256 exceeds 1-byte length prefix +} + +func ExampleBuilderContinuation_errorHandling() { + var b cryptobyte.Builder + // Continuations that panic with a BuildError will cause Bytes to + // return the inner error. + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint32(0) + panic(cryptobyte.BuildError{Err: errors.New("example error")}) + }) + + result, err := b.Bytes() + fmt.Printf("len=%d err=%s\n", len(result), err) + + // Output: len=0 err=example error +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 0000000..7636fb9 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,167 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. It +// returns true on success and false on error. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It returns true on success and false on error. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It returns true on success and false on error. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It returns true on success and false on error. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + if int(length) < 0 { + // This currently cannot overflow because we read uint24 at most, but check + // anyway in case that changes in the future. + return false + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It returns true on success and false on +// error. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It returns true on +// success and false on error. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It returns true on +// success and false on error. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It returns true on +// success and false and error. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It returns +// true on success and false on error. +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 0000000..b3f7416 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 0000000..ee7b4bd --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 0000000..cd793a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 0000000..cb8fbc5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have an implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_test.go b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go new file mode 100644 index 0000000..051a830 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go @@ -0,0 +1,39 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package curve25519 + +import ( + "fmt" + "testing" +) + +const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a" + +func TestBaseScalarMult(t *testing.T) { + var a, b [32]byte + in := &a + out := &b + a[0] = 1 + + for i := 0; i < 200; i++ { + ScalarBaseMult(out, in) + in, out = out, in + } + + result := fmt.Sprintf("%x", in[:]) + if result != expectedHex { + t.Errorf("incorrect result: got %s, want %s", result, expectedHex) + } +} + +func BenchmarkScalarBaseMult(b *testing.B) { + var in, out [32]byte + in[0] = 1 + + b.SetBytes(32) + for i := 0; i < b.N; i++ { + ScalarBaseMult(&out, &in) + } +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 0000000..da9b10d --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 0000000..3908161 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 0000000..9e9040b --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 0000000..5822bd5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 0000000..5ce80a2 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 0000000..12f7373 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 0000000..a57771a --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. +package ed25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) { + if rand == nil { + rand = cryptorand.Reader + } + + privateKey = make([]byte, PrivateKeySize) + publicKey = make([]byte, PublicKeySize) + _, err = io.ReadFull(rand, privateKey[:32]) + if err != nil { + return nil, nil, err + } + + digest := sha512.Sum512(privateKey[:32]) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + copy(privateKey[32:], publicKeyBytes[:]) + copy(publicKey, publicKeyBytes[:]) + + return publicKey, privateKey, nil +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) + + var checkR [32]byte + R.ToBytes(&checkR) + return bytes.Equal(sig[:32], checkR[:]) +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_test.go b/vendor/golang.org/x/crypto/ed25519/ed25519_test.go new file mode 100644 index 0000000..5f946e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519_test.go @@ -0,0 +1,207 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ed25519 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto" + "crypto/rand" + "encoding/hex" + "os" + "strings" + "testing" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +type zeroReader struct{} + +func (zeroReader) Read(buf []byte) (int, error) { + for i := range buf { + buf[i] = 0 + } + return len(buf), nil +} + +func TestUnmarshalMarshal(t *testing.T) { + pub, _, _ := GenerateKey(rand.Reader) + + var A edwards25519.ExtendedGroupElement + var pubBytes [32]byte + copy(pubBytes[:], pub) + if !A.FromBytes(&pubBytes) { + t.Fatalf("ExtendedGroupElement.FromBytes failed") + } + + var pub2 [32]byte + A.ToBytes(&pub2) + + if pubBytes != pub2 { + t.Errorf("FromBytes(%v)->ToBytes does not round-trip, got %x\n", pubBytes, pub2) + } +} + +func TestSignVerify(t *testing.T) { + var zero zeroReader + public, private, _ := GenerateKey(zero) + + message := []byte("test message") + sig := Sign(private, message) + if !Verify(public, message, sig) { + t.Errorf("valid signature rejected") + } + + wrongMessage := []byte("wrong message") + if Verify(public, wrongMessage, sig) { + t.Errorf("signature of different message accepted") + } +} + +func TestCryptoSigner(t *testing.T) { + var zero zeroReader + public, private, _ := GenerateKey(zero) + + signer := crypto.Signer(private) + + publicInterface := signer.Public() + public2, ok := publicInterface.(PublicKey) + if !ok { + t.Fatalf("expected PublicKey from Public() but got %T", publicInterface) + } + + if !bytes.Equal(public, public2) { + t.Errorf("public keys do not match: original:%x vs Public():%x", public, public2) + } + + message := []byte("message") + var noHash crypto.Hash + signature, err := signer.Sign(zero, message, noHash) + if err != nil { + t.Fatalf("error from Sign(): %s", err) + } + + if !Verify(public, message, signature) { + t.Errorf("Verify failed on signature from Sign()") + } +} + +func TestGolden(t *testing.T) { + // sign.input.gz is a selection of test cases from + // https://ed25519.cr.yp.to/python/sign.input + testDataZ, err := os.Open("testdata/sign.input.gz") + if err != nil { + t.Fatal(err) + } + defer testDataZ.Close() + testData, err := gzip.NewReader(testDataZ) + if err != nil { + t.Fatal(err) + } + defer testData.Close() + + scanner := bufio.NewScanner(testData) + lineNo := 0 + + for scanner.Scan() { + lineNo++ + + line := scanner.Text() + parts := strings.Split(line, ":") + if len(parts) != 5 { + t.Fatalf("bad number of parts on line %d", lineNo) + } + + privBytes, _ := hex.DecodeString(parts[0]) + pubKey, _ := hex.DecodeString(parts[1]) + msg, _ := hex.DecodeString(parts[2]) + sig, _ := hex.DecodeString(parts[3]) + // The signatures in the test vectors also include the message + // at the end, but we just want R and S. + sig = sig[:SignatureSize] + + if l := len(pubKey); l != PublicKeySize { + t.Fatalf("bad public key length on line %d: got %d bytes", lineNo, l) + } + + var priv [PrivateKeySize]byte + copy(priv[:], privBytes) + copy(priv[32:], pubKey) + + sig2 := Sign(priv[:], msg) + if !bytes.Equal(sig, sig2[:]) { + t.Errorf("different signature result on line %d: %x vs %x", lineNo, sig, sig2) + } + + if !Verify(pubKey, msg, sig2) { + t.Errorf("signature failed to verify on line %d", lineNo) + } + } + + if err := scanner.Err(); err != nil { + t.Fatalf("error reading test data: %s", err) + } +} + +func TestMalleability(t *testing.T) { + // https://tools.ietf.org/html/rfc8032#section-5.1.7 adds an additional test + // that s be in [0, order). This prevents someone from adding a multiple of + // order to s and obtaining a second valid signature for the same message. + msg := []byte{0x54, 0x65, 0x73, 0x74} + sig := []byte{ + 0x7c, 0x38, 0xe0, 0x26, 0xf2, 0x9e, 0x14, 0xaa, 0xbd, 0x05, 0x9a, + 0x0f, 0x2d, 0xb8, 0xb0, 0xcd, 0x78, 0x30, 0x40, 0x60, 0x9a, 0x8b, + 0xe6, 0x84, 0xdb, 0x12, 0xf8, 0x2a, 0x27, 0x77, 0x4a, 0xb0, 0x67, + 0x65, 0x4b, 0xce, 0x38, 0x32, 0xc2, 0xd7, 0x6f, 0x8f, 0x6f, 0x5d, + 0xaf, 0xc0, 0x8d, 0x93, 0x39, 0xd4, 0xee, 0xf6, 0x76, 0x57, 0x33, + 0x36, 0xa5, 0xc5, 0x1e, 0xb6, 0xf9, 0x46, 0xb3, 0x1d, + } + publicKey := []byte{ + 0x7d, 0x4d, 0x0e, 0x7f, 0x61, 0x53, 0xa6, 0x9b, 0x62, 0x42, 0xb5, + 0x22, 0xab, 0xbe, 0xe6, 0x85, 0xfd, 0xa4, 0x42, 0x0f, 0x88, 0x34, + 0xb1, 0x08, 0xc3, 0xbd, 0xae, 0x36, 0x9e, 0xf5, 0x49, 0xfa, + } + + if Verify(publicKey, msg, sig) { + t.Fatal("non-canonical signature accepted") + } +} + +func BenchmarkKeyGeneration(b *testing.B) { + var zero zeroReader + for i := 0; i < b.N; i++ { + if _, _, err := GenerateKey(zero); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSigning(b *testing.B) { + var zero zeroReader + _, priv, err := GenerateKey(zero) + if err != nil { + b.Fatal(err) + } + message := []byte("Hello, world!") + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sign(priv, message) + } +} + +func BenchmarkVerification(b *testing.B) { + var zero zeroReader + pub, priv, err := GenerateKey(zero) + if err != nil { + b.Fatal(err) + } + message := []byte("Hello, world!") + signature := Sign(priv, message) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Verify(pub, message, signature) + } +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 0000000..e39f086 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 0000000..fd03c25 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1793 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "encoding/binary" + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/vendor/golang.org/x/crypto/ed25519/testdata/sign.input.gz b/vendor/golang.org/x/crypto/ed25519/testdata/sign.input.gz new file mode 100644 index 0000000000000000000000000000000000000000..41030690c0db39a0279304a46f002a625caa9080 GIT binary patch literal 50330 zcmV*3Kz6?$iwFoTsc=vL19NF-ZZ2tVaCLM5?EPz!AU%?02mX)M0EV~k28PG}moVqp z*D^9gA!nNH)m2%^C^oAyBf`zi0DW8qRPPrlJ!@tDXRK03 zd(%@#94(}Gu8`uIr`;NMD|7S^`}4*y-?mGB@7ZaMnnO!7oDj z{=_oEOn%&|@gzPpt~=gb-~E*LmK(|_?|YtHONlw=)K8l+g!ku9UTcRw-lyL?>Yii2 zb){U-iZP#X%iCv)rTC-5?Z}sL=BXEA5vhDdjGp(ZW#rjbEN3)d z=IMI{cIK>eW@LlecwgcV`#3{+aia3fl2bjghg7gnA9(HzDVNKly|J{Xw9?vhdI;s< zCH;(3)eNZoOLxeVYHgo8M*BAKK$=ivGKig4)4NCp1tCnrG?Ude*J*;uGZe4 zJ$miCJnMO}zD6s(JQ!;s ztfctj_WbP3u3=JU>(G`=U) z`QTXie#*qB)AAa3g?#X(_g!JLx8Yg2q!bNOWv`r~cJQQ<&YbvCZ=DUnLx@))#5s|E?n-JDB3pP%R?=C6SN{H;ZVaLz*t%U!5_Qf~}514x* z1e~X0_Z@GXGt?Byb{otSEX@1h#R*@ZQrhggRb6o=A;%)PfJ? z-s{T-y`8rtj)%=|-af{A*}o8fe*FMGxbm`Lw4e96E7!SmGqC2|`if(m$cLR+$Bpl) z>1q7@zzO+{BVl-R5O6PFS8>?*K1Aiq29gbTA(}@QoRB*YzCz;Qp}ZUE_hsQG$ugHZ zC*ePg|BqMs)B41R?>&SN7IZvB4S#*e_*=!1YU5Kj`yFrr0t|oh5O4S!B2p$Tt^3}Y zLt%H@#rMDhguRVRuT?T11a>G-4g4pZ_rX30@jc;Ljw~1W7xfvL2e+@5mxVt zd+_;(Pw0&lc5A8tmvjkxWcM}Vyn7_V%zkoY%WflJU@0A3(eHydSEBMu=)uIc5q6WK z2j)a9$XIH~n}9;g}~9 z(n897QNWAsVTpTy+aFs+F%5KqWyX`eXvsOhvuPET*bcUi^Pz};V7f_<&& z=hqK-^CUm_cv=9*u)Ja3=Ypl?Nv+NLw%H!iz`k!L4DR$qK+_KUmh*rDNmjkb=d?q5 z89fGKaw!ktf@DdiDhrbYy#=x}TSV^1u8BJM4Z)NEuvz-`2EZFX-2M-*^M~9}OZ>3W z>ixX2$9LcW>5?YM1^lo2vfBJzewW@x!nivS`_3Wd-5zmI9{QBKNN5GTK?uD37y%Px z2^71pE{30xMf!MvH6+QK3Q<(A0Q4tR=}kzHbPA!h=Qb_ z-VF+yKbb7di7^zs_9mXdU+nYnOJ`XLe_&6Ri zHPu1I1|#oyunhhS;RB3P32C_c0eP^Dx+~%BIe5HLW&pd--9Nv603Y%ai)-1$Zayv7 zpJ%l?12xQ(MqH6R+@F(h$hpWV1wYhf398sMp&f(^B+xIg`Gv({!DFqFLtZ&2!L5*_~&fDX_ITyGdSLbVt z4|(7F^ZN&+O+e_F@Bz!mmnk_uu;H95W2J2FEN#85t0g_Vn0B9pu}kd zFQ$MRE-;J8dQR3EY~3l4(y*qWjsXHtl9fSR2CG?<%--^7ymB3tK#vt1e&qk`rT$n> zvVsrrjVf|Xsv?l`VMj>Mctwtf96?1xngE_9?EdcO*`35lg-6;ZoRfoi`l~U@tYtQ@~@C%vd;E zRtW(jGRl{V*-`5O%}-&AL-@aVtv{foSt)P9C7@_xJ)o?D_MQ);fZxOvn|hcx!nq!@ zH%qd~qzUvw#e7d^%c&G86eItW;>s8ipbPXsHO)5HmmD0~KYj*)o(7ebc8ry@pI<)! zWV+oY%2{@~toywrYWg6zZjzkIPj_zwsg@aL9p za>@qMd|(E+jUPmcropq=xMi&h1ejE!{8JJNRUTFIOJFa7vMFGeqLuvMs2l7J^iJ;e zOHq?Z?)=NrsGbJ3I$)I|ZU6lG0Te3Ob4=ia%Sw?@$ltH1>&M^&7$ykez^nz1NxhfI zpTyfMmJE#~*IUe&1j6^`FsesUQ#x=3=uIrL*aeQ$3 zA{jq{dbE4;HNnIG*RS?x%!Nsm;ZHsh529>jwF$2WYYvMzL->3WEAVc@%lm`}pWN(8 zovg|tWfPuyCY|7j)JU5+Mph-uHXMia)c3^42fuvr*eA9#_{)SBKLog6KY#+2NJ@!H zzGa2ktcS-O0Z9S~ng}p$z5rzYf;%2QA>cox5WoQGB0LdCcmP?1nVb($5DVP3&X**K zMYHH}fDsCiyYv}1;lQ&d&*nl&mdFi238D@IgYS3G9kV^FUh>SzX zCkX%@*qPqWd!A8a#VtuYfY9rqz~V1~54Z~ffc?fH3-Bj`kf|pMDPkYq==4hhW{2hr z9*NlikhiTYbVSAoj*L3a-;-{Rcxx5sq3Qyt_uAQO&-uo4*iOv47uK?RkVfv?88;-V zluQBui$o7V`b)C-|NVM@!sI8Y#64f2T%g!2o|G+<_5&ten4F-Fa1UO_i7TE|fT$p8 z{sn*h9RmbXQ_HF0xqb@^mS3_sZ$tr1QOgQ!vkpd)TV3Qb7qpc3=hqJ~2w!$-Rcw0_ zpl&5CP-S4-;O+7A%(S0o_}#|Vrww8Pp!Z_DZ(@0 z>iRaoXWxL<2lT#h?C}0iyyBnME+qtIMn3m?6z)&uLCpcm!8i(d^HSY{xUxYwf@%|W z^0V1jcB^HNz#ohWpbL)emD&Y(kSJ{l!}upFK(RwKTl^o)II_zN`1N?6`u_a-0WXU{ z4yQiCBtdEGz1gGWmrno+S~|ra0vq46J**CiKs*4Otz-?E4?Waqlso_r;3e)q7ulb6 z!yMV3LKa>Rp8tY``N_lr)@9A0AqH|VVB~3M0a|gI#=-+MGbkpV!*Y{@=l|48{vqyC z+MGl6=iLFn`<+znR6cV9Lk4mRmRp-Mtte;;$pho}=9_leoqf(?yYi)Se`kflQ1U+&- z&hxT^6j&P+?I{c?`B>1cGs(C337(G+UM#n<2mE#*CGc+&XH&EP5H|Kw}_={qKpPFq%1FY%FljklJ~U-$b^kVL?; zn6QRR60?@3-s~P`t@LcyrR$g;FafI)AdugDQt3J1QoSA#-69zSE%=vLNX>?>5O~M? z^XmuncUJ0C03Ov0HaW257kesvb6|yVqTJs+er5!(6FEHpr~D*u*47R%=z4obDk93v z5^e$MtU16TH(ByMEMs~+EMqZ%aR3FZd05Ik18d2;640f_dHtDUr9-?6`KeaR`yB?D z@<08ee=vz!wwHS?P)sCDF;3t)HGB5~nfOf2Mp~ty3zU}@eD4F=G-#0hFZBflc$k3# z&WBrjzM9qq@A07lKcAk9<^rltw>}_2Ahmd4_McxrAQIf`cNzR0%d;H2dvyPbM8G-6 zQ>ws!s6SK@(XwP8@&%zA^JSTczbqI@f_SeN64cmiB00wFRWMZ^<-)DNCE#AbsGmyN zy^8?s#ljcG4EFAg@y(GWw;u9FOWw_RCEPshINdDsKmDqIHrRsbR5u}fVf4Q-(4#BPI(NW>SvER?1%i}r#Lnt1ei&J?u-%mE(@ zC9}5go`hfQzWn_90g(XWktk7P4q`1?wLNR^#oK0AAQH9Sb-*V1=Ot89s^OQLI-StS z<0mP+6Fkk`Rg<-hA&q#J|AKWl$d@{>ndtylnJ1H!ylY)%A2t9JCH8^&iM~9J@)1)5 zygiqqWLU%XIe2mi|F>TD52ZHX@r2tNCG_yVD>Ta9q}PXaB2a|)73v+2n8L0&5l%*8 zrpPA{you5%WtNIH#E>XkSjtp@1-Sv#FG(7gBvuoH`Jg9N?ZJ~NVB^oPA5gG;DlE)9 zSi(?ElSN6i58ojAsb6rRvgG|^{b}cdsVQpMYob!m-(4a(Cu6`se+-i9Ty$9`)d6vy zs-tZa(Cj!kr9Xg;RRp1oalNgrwPaGWhq4d<9SDhY5W`HB7KaO*O8VEe@VWnculpw- zLhj=o>=S_z+1oGTFTU90HI@^S53s%L_wj}=QzA4%!yz|?yel~@eQhXu8ju$IUNt9{ zwBR~Ac>@sX72n$ zP&5JR-15UVPZqYaz$^DjXl_(zsrQ$KK6dPQoJ}wtwRr+JSsw2v+unB%vIOzekiy@O z4n!gG4@v`&H2L_o;o&JIe^+0)MFHkM36vQl2o6J=|F>WG4>rN>?P;?FAe$sqUKR~M z1FloT9)QKDVo37WEkqb#J4ut*5F2jK%&2vS*sT|`rp-acCY~+SVxe&ik~*)uFifCv z^+JVw_n1TK )CvQ&_F|D&(|~Wcevas@-~58&na0Fp6blgcBX0v2FFU%r%5 zcJF$5clNs!DmN^b+VTxQX)aO8z$xz z0p1%JQz(ry-qA^gwwqw^2W-{1y_xs=afC5q7}ZIhi`dgLl|KbdI^^+Kz*3;`eTZ@7 zHImqF;L!+HdrcKd36Hoq_0 z+ci_5y0xiw+EXz3pB~6WnF@2IPPlnaz1OA)q9@2cQN(28+vC*-u{pe84Qf9MhI4ox zf8ogt=njkwh9j`^OJgKZ)Zjfec@U)izy9if_#vums#zOIv1MwvmzTi#1j%zh3-C%M zES43QZVH0PX214@@vcvLzeA%^A8=DMju9#-5DK+Op2TsAQ&MSZX+PKNz~mLic(WbV z<^B2f1KyYVl+Q1?1r=eyGd2rAiHXVKs81^7am*OBKr*MeCs(nm;I8>YR=IO~B?4%} zDe}et!kMd;g&UG6>eqG(Iu8cX^+dvE=cTF0USWR(9NYYChwYs&al3g!Gv2TL?(MvX zE1A;-4)p6`eBn1!rGP%4U%mXFXwwE>ne25lm|`KBjrSNO5h=F*xDNh&E{O9!qY%#u zackJ)#h*}z5pm*&lvkjtzV;YD6rcDE1vt+6^}B}R&8c0@WXuWR&+ubd%d9-{5;s1|v^A@d~1ez zHUoTJj9#?AlQPN%%<1tvJB16q1eYJr>Z;eTuca?}-+PM8?_fj7c*MR%_90fYj)bLA z2(&X=FcLFUcUnIQ_WL0#ameC_M``SkQ@QMl4Sk0JkYo7mklM(nog{s6;$X|cLp8_r>9a3BX zdS5=`vY!NQo@N_uOz~-n5AjK$DnLxF*~VC~4*_IPJ33-66ocD-i%fRL06}M4lggwf!OKcHdGZ9}X zUaiexWD&_g+a5i6S!fFZFtG1g&WHeBA}hGEgfF@huRu8zXvdv+ zSpPLxTRawo3TQd@zHas;*{XV}4eyELY2J!b{PQKA0aN1tM|^}ux;K@L%-a`flS0PJ zpi!xH8U+rC_ujlro)i$1lD)>XKvaROz@J=iK!O)*GroY`V8a|~c=20Vad7@q)>UAxW4b@Tet=CItk-QUEw*3}Vz8h(tbBO7t)^gzLg4Nc2bIX@I%Dr3 zs`(u|cFgmK#8OCt>jy^((22CfG8)VOaXMi5a3Anh5{chuku=gE^2Bw-bRTYhR{ z9s@wy7K!m0k3*@=4d5S5s;W6D4j(5*_SXjgANVPM#`KD#;kw_g%lWQgU-vaE%jCr- zc88d--uv<@7!Qe@QYj!jiiYTW5go)SmoFKMl-`8-S6c2Oqjslwi+IWAUDq&lD?amC9?(M}$ySF`m z9t-ATGbQE6D%l1kF9vCKI0mBE>05Ax!z1xFkSN);Z#?CR&OA-4l>xOcNxip<0 zn|I>QKx;~;cL0gpvw6ZMiSotITfL0~+&&<2Y~5*~YYKG2faN0&d(ilJzXV>|&V=of z+_@+8XWaol*3YZsC*-EY=1yup> z@9Bid(+U-28+^Z4S~+T4K=PNE@p#7yIL5{RJD4@~z`~S0n4kb4>XIIbbhCLl#lNii z?LWVM06tAxA}0poSlo{X#P7yZDj2M{nwy77k1f%cN|*l-NQ1TSW@j7OB@gG4pE3O5 zc76a(*f_C~fHv%fraZKxiQS_jDA-?P6YjMnAYi8ox+$GddJ4r*0qEe-U{33QN|#F| zg0Vc+_R@IL6IQsEnB_ZWYpt7^S?!&U=i>c8{Db~n(s(66KLed{ zIg1$sz>xYfuqE#ky#b`KbZT&%*B?mi)^^U_qxGd}z;K|^ z+^Bqie*J*uaQ1CyrDkjanWfv|Yc0u)Odz{8HYo`;D1x}{UqRtG<( zhtd33hqM45e3dsDBz;?hN3H;fPSBZr)5`{Ahm}CO)MS)e)ZbL}V2M0mq5)Z?VCQBf zJa(DCY_p&a|M~R;?8_w=Zc)YYEF)6xV(?f)?|bzW^I?BBK6WUbgH#k0aacP*Gp%m7 z9_g#$m##rO)Db_?&NkxLs%FBcPKp|Sx~%MQyv6PS31f?1V&&-lw==qdGzM6Bo|6~< zux*FN2(g7NIi#ILO1!KnId%_n55<)YK_D+S&zTC~M|lXy|64xl4+d-@V1EM<09ye? z=kr`bv}rMB+r4h6@A%I3HtCVx*4>9%_k48`uUK=X93hq5uU&0ZuM+U5 z=b^Z?wIq(VJs&Fhb<8gCtHF*drApm{%geZtoStE;yxw+v@3B-1<$u#>{SgmP3fdI! zF*DU!wUZk&Yu$?>TR652&Qm@B(DKc|ISBK{>-|8P+7f1pYvfB|&9bBA$r1 z1~1#>RM;)>g9&>gI}o8`BwSr_?_NLr;@%5Uc)Dw%1GTcO3@{{Isou zuj6~m(Q8-BWpS3&^O$M>hDBtvoAK8VaB*!L2+!R7)J@;b_I}T6$Lq#h@q>y9C8>K@ z&RH@4*HOAdU2e}X@!BRBNpy&%&#ulb#OKEz{1rwnL~8rsp06_ibagQHKvu&c!Ai+` zVTWNoPyTDy9%$R)L0)VEk+SXa?8rCl8$o-RFL*8j2*xIHvJoHxwv06VdXC|Hlf%>h z#!vgxWgr4!49W#k$n;h(cjpUb(CEi|Ko&- z*5M_|7Gg=K$F?ER4q(2HNSUf9p6Ci7cQ76`EHr zWR?a*j7}&osyCaem*drj2x(U2_9{z@ji5syMjTF*5;1>%{eZlvLHP8;Uo~HqbVP0A z=-IZ3=h?P8H>tHqy!;#fADfuI5w7+;Yh!I~Q`S@5*hn{>EeO`2Ut|jYRv>`un~()P?QLWI7w)(- zDGB)5FdoOoXR{NpdacbC{crxfKZ|m1cCR5p#hxaF?%{Te9tLGushqA_9`B{{&4DEn zaf-vM@7IgJ{Tz&OTGyZYVkQP;>X4rHyQt~xyuwYT&j`oqoCG@R(orq2@XxOw(0T$P zS4tCN`6YLlNzW@J_Gp1LUr8&BAj@ioSqJRg*w_~)sCd|?sZQ;NFH_W!#-@Xu=f+v~{h_#dI+k5ZY5> zaj10J!mkw$zA+tY{d?`QAsShU$#j9uUpt$DVs2=)35|Njm_FrB0^br1@9`XstUCA4 zuOHyFT%(Ex0cf&24XWtsS&sxMn)%0k zBn#7;F6Nbu|c{pDrJ$y;2FFCzR9dYjU1p7Lq zJ^0u}-MKt?1BUl%Ok`!B^i%CQ%$ZsoxKFtZ4(OGa%;avppFbf5mrk?@gv?APU4492 zm=tcjkB7?X#j8owx5Pwk)?$$f#LLD*oPMbh4u8T=Tc?H zyr}s>FdIaOArp|ia$g=4UCiOlM3Im%O8e-5y1I$uqFCi5sU&F`N3Sm>CN4yQXfxm| zca6nO2iaVkL_K%rO`ENqx!j*$KOo8gM^FT-*ppMKf8)yY>6KiL73JNZ$Eh$%b(fJ0 z+Ptfbb0sP`$;?3J$E!Qa`{g*EmxRF8C^;01@KR3yEIW>)cF-2?MzGF+>PwvZsymAl zXD~0p7x&}m!OqUO#n;JD{58t?+DP+_dwO2ggUMj*bpkUntVa?FO9v!QEf7?w+9eu2qe|FGD)h{k ztvQ)0O%f`KHo|l~_G`@KJZs#;e$a3QbT4^R=X1#TKkrljytW8V>Y;SC6w(_e_TFqHDqyz z?BYj9B6}-YLGog0t>hK~FcxR?K@&eAnytCL5JSBJkc30$Hv5h*YilKja;bh*t2l2qZDUXd_+&)|EVAQ=cV?M z&M(M)^mv_I7N!tt)@a+tJ1Avq#-s=?UI#nC4^SaM+IiIM=fp^xa~E-aJvCyNk0Xj_ z#Za4ryQ}B%7~P83jRpIv~l_}d3i&F$IdSj1Wa;G~v2hrO%YzUC1L)iOJi^}#1> zNAcYH#dU)R1cx|I+LfxB&rEVJO)n%T|7;}j-HFToAV;mjdVX;7XAGSDK9$O`4wkHW ze$Qi*YWL89lEjI+M-x@F^9H?Y{U!m;@*bHvSCGTdIexTk&ySK>J-G7e0YEL)ZmQFDZC%wu=c(-mQlPsQng{we8%v&n`+!)&Lu&gA5T3wK}Z{{@!JP@H*e__DWgMw zy|g3bIsbG$q;tfYn)=EpT;<0|9uy{d#sCYU00bx5ii9ph@9rbu^iPOh5rlQD(`H&S zj#w`8`9IEFEt~p1zp=5W00@Dby*z-g;dq;z;N)rgyUde(e^E?{H6(g{zhE zojwQI=j;^S9@9`MzK0^h<{mt_v^0JILF*f6^E$W6fIF+SWp69e1K05m_S@skDhCG} zEg>s~Bry>e5Wik|2_Dw+KEqj~>*!C#B62&@@;BmgUA{7?R4 z*&v}sz>OO~u|3UF`4Vla7t&$Y8qie~4Uauh|qI)uYFC|I2*M^qdxJhC(K-;qf4!WFAi}V1SuVl zmzw1g^=*Q5-e|7ylb#Jo33-3>P+l=QLla-Bo9F4W?URv)WVmV8`m(>%@?2?@0T@W2 zFH&y0vR`&5c)*@<*wcEWQ{d}j4CyU$`VTN0m8y*t!MTxm!uT)x>_2&ck*Sk6(01BJ z17OV(;qIxs+i?U)+0s3wT28(`pM2LtA`vJs;5c^X%~?7SKG8T5@q$$HO0ho6x;~xs&kf_p05V*thI=3Fkt?yQo_24wx&&!zH^r%rYfJc#|e)WnX; zVo8*0M^?Lr7BQ-j@`)Z~%?1H7d;AF`5dO0E}k zo?`FIaxjUv>x~Unvc{1L{d; z+w6&4cc|YM2g+YcW=bN;3x0xFyYI67wm)T#s^G$Z*{A=ZP<{v4V9L4hM3&;(MnF2; zo*7reX$!M?!vH*c-?aIUGVe!6c_?Bdn+swF=gZ3F(V@40gB4kJBC5XOlzhY}hAYtR5J%Pj+WU!>W3lEmB*ERx(h*G-8b3YT7JAC*p?g^O_L8vc0f6!DDuDsHcNSe zK@lx5yw8=>@=O&4*hxEA9Vnur@pNGJcS_3hq^6`#B+pd&MFyo5at~k1bb*A95#!-t+VO2Pk2)6gg=xb*4Hg-?$Nhy=B=* z`EiOHBWfUdJNW$+Z?e3rqyC*9UQVy-gNM{b?%sZF)UmT_Ti#3L71kqs?Z+eeQThmbhYDk5%sBx%9-+5)A2Og}zFL$gU+(}T-z7)x!__#BTE2~>H0L2Qz zi~X?)Le}-C@OI?qb0smtl_{BJxSpN8ztgTgme9|5PW*0(ql~ICdm$K~`rVaPCo1Jk zir4#PrM0X-)2{iuNzL!t{1c3`ZeBrl>xaFDJ#Bw6Q}}P;9|1l3<3^LD+fnhaKceot%GwDiZG2yC;!e zB{}DZJxH+x=Am+AD--eBJPi=G*SD;xLLFu+S1l&DKF<68HP2(4R94~)8O5gzPI*fc zB8BUk@>!EnfGw;9h~%&N|26;rI`jWi%RZtC&u32O#i_6e-=UEFo~&kFP-TGfuiWem zK;`?v10Jt{a-NsWM-erikAiF3B`YE1M~FhwA``{pwbyj=TfsD-=gP}rvLHdh¤ zlQ$!hi%}8rZ%fs@d9}ko$K;!oye<#8l1~b5#gV<6%-bfWU0C^I+sD{n|7)k@@~BAd zA+KG&Bg#8uUzV_n+{Lc0ZXY$EDL3%jw516L0ZQ%H|%IWi3GF*jD z^EkW-kF4)U`J^xk?+rpE;fHF?uxgpT+6DH0BfKEQ;=R{x-qQTjp--9hHamYXX<44y zRLe~=OKhju*Ye;Pz>NHnc41}!1%73gG-j?x85O43VGXOsF7v#dm7bipmbB{ zD^c%vM`l2lH5HE%ha&=VGg7))`8^*&$!fBRwZ+GJ^k$KVv4pKeB)ItfSo&n(=_>J2 zK7Z>k9eq!RWW0!K9oTLe+HB`@4_{)1#27KW%0hlDt+H%8W&J7WdgT3%@eDFf9cZV@P=8f=sezjr z-1uYu^EAY-5VG@%c18pna>gf3_wvWz070-lfS)0S{B?l84)9;;0Dpp`iY@U!&c4bg z*&G^uZgu1_J2f}>_ig1VqsW#thJ>4CJ?{G(C4N%wYkoXfvaz2Q`SbW(`6EX>{SEu0 z9VZXvV=nw|3A~S>9Qyh7160pb-pAfpmH#OAC}TYDbSipK!SEjrNNn2dV_D`GfwyM3 ztO#zzj+W)s;tc!q>4`;wE*uJzqMzSQ&iz`5Py6`6z(ka2)O93&R#;A0N-`eprMv|# zap2%sYrcS*<%;mxWieV+3;O0kl*!V%#J;sCuNL&bFUF{_B6`<|JNG310m-bll z-uK{Tm)r}Il&dx?M&aRqIZ7^lG%(WcOyJR~&9OWOeh(lqS`6Q4DLTc>pp(gZx`ciH8WMGpI2jAFV zvkJzFecBMly7%r8-|V;=NH|mKO9QhEZv-1KX{)lzHb>fm3|2+ulsTL9c8=&uz4L;p z5=)%>mV;&%4+s%<$sO@r2bJ z1)Ke!QNL8$QnSpv`d4C(T9hPk6kqkRPngJdy!=j;o1|lHATK$x)C-RG+OP(}TG}CA zz)@@t25oIWfXc3RgPK!+ zUEr?^{Fl1GAD56SOm+hs<2pYfA0yQP1~NzJmEE7`b-=dQVKv}wf0SY^|6)BT0B_xlJUX41R3QXS>3uLxh>V zHOQBZ>sG7t7lcvYU8b}!HPunTb+zY|q{g8BCGW8fYh#)JFo37wH+xy?OVRuuel@f`O{``IPg(#E(?R&`iJX$cj8HqYV9ei4!``k z)1fR7MvQ;HA7ig}j`aZK-&yij$RQ2DUoVOy$tiiZjG=4!jfC5`Q-0K+y{P&(k(Y&yT@Yz1wJqm)zV~2DZaDDtG?;00Q->YxAFQ4FUt<~ zT?6kUtps_=CKG-;G3&1r{B?r=awqsh@n%!OyQ>2-m}xq4Cs2{rOb8D$!R8Ze_DaVa z6{Vr%EpmZ?2w9lx@xD9*07I>5yHBLRqec$7t6G>|Dl$2_=3#%!{`vI- zB9+kR8g zEn<_&+8ZmD14yaD53bO%ENF}D`bfnvqKw~Nu2>8J&$FM3ZLmMk>3D!0&wD&y1sut{ zcpcXPaBNAhK{Ql^*n=G(gcM$Y1g^etb*PH8irFqfo{hju%$mw6;y^NLSKw^kDZMKZ zqkbG_ReF_9fY?%109RkFaFYX3(GIWaYR7L_LQ2O=u`G2)V|%Y!Qk2Hd>TVSf zF4e7VaUSXO+^AR^unI&ACeOohqT(#z!70Fe=d1S*pYTzJv~dVql+1RdE^tWdPXXE$ z>HLnJARxhwcgmu6zp+4q0%)ZsZ=Lb^rG!19(t*XP%x)*3>|KST4__nN{s)YO%ZF%I zdxZ+H{#XU6B8dF;2awrp89Yi6J8YuJb?nI#8a%4z^xAlF3why__Oj`F+bc=1Q#!R0 z0JA5^V8R{j?_{wrhUC;4mB0oVNBfxhN-#DtIoLc%B;U3a{dI%CZtySQ27fRzX@%_( zZzaAHj3aA|GvG3P)}f&1pnOo!oTz@WQBoBp3+A%2)DNmcFKKoZ&F+T}tk1MJdr5jE zY25o@s;}aQ)?x|EQtVTH!0_|y2Y7o&Eor+|h_YMb9esX`qD4W;q-93Ly;`yoHiPV^Fm^%|Q3@C&Mbu%|@h z4KQcZ@@q-1Qk+CLr>Zp@4m-RXIC6QNx9rBm#zv@$OA^>ZluPVL5!#$)FESQLBI;rA z1BhZhr@C2jdRUPhQNLh1kK(~>`i(D-&-1(3UU2qv>TEv;V7K0yE~qZs>pgO%ekL&D zMZh@f1X*V*#uDX@7JV<9pgOCzy+L^>Y~&0`-eeM#Q|YqX#QPnY9qDMnloB#NTgh~_ zU531dQ%SbJj_}tJ{v{mYPq1|X!>*y6oE?1shhgHyKZayJ!`3Z(r{CT*+WAslJ<4?} z8KIseNQ76x9L%Os##b8_9>=w*;|8T$8hH+C$3#6+91-Ic&EqH6pI<+KLY!j7JHP#L zt;#S!My^}op_#YNc0&M0H^23LYknLIq4qr#!OrXDD#!xgUkPm4nltovOYi|NXtvACqTp_Q0h`{fjUyp0 zHMjS0?2*KLL?N}F0cH&lrp}uenCn%ciFH4`dTI_wM*p?k|5XN>$GgKv9{%#RMn?&M zY|~(!`jRG7VRt3-Vv>|x-QwaS%PSwbC$UeqadCPl9Hl8bcY+Zy>MF_Jk_?GWf0YUc z_#VD|7tHmsJINN=*GpsD2D?P<@kmdG56c|}a0q@z^}j)`A#f;`8nhXJ zaROUYSpzXo_|TGSN#SXPPkcNngxl@$Rhb32ri3tMy^3>*7yATk`3?O#f%qyGp$HAJ zZU|=OW3!R`2XqKfzgzj|rqs&dMG2uG(vNi^--~O;B4Kvop#(cG5<*m@a8Q*K!Boi2 zcc@WFEOt~<%`jqN2OP5BL9QLg6Zy3)9dRPB_nfO<0#>k9uOuJ9)*aLfxS(}6V? zAQP3|Nsk@FB%4NQ3AkG4k>;j9PgVL2&zUy8?~}^clclJseU8qcb`?Gz$4JSG%I!SO zAxMcas!5H%8INk3CxrU*>j&7mVIQHYKaxjbKLl?U2-&`^`o77_V*0BAs$zq0gf zwv~)P!3B(>hGaYNBE`$Ey(bi{joKJ1j-~VklOp)f3pvSJKirw zIrM=9z%Azl@l|Ecg7bUH&c~*Zt38inw#JShtgp+t$<}VZXMpKy>mz|D*y|`cytOQz zGE#I-*(O@)VF^JmnDKcCB+9jT0)7cp1k8tB=bt~1wxtkBE&G$!OQ9E)67TBZB>XT= zxg#q0Y~|vR7U;Nx&6{!-4)X>-pMaM;(XB=B_UFBiDkOt{@B>C#(iQK$6vf7-S9j)v zkhIm+JYNY;*pMXod_{`FeqGH}PeYbelKH!gS0P2Mc#9xqkz4c+h9af-!L@CLXlq&` zSMdRDpHTif!(V6kmvM$a@^iBgWV~DYClM!4yqt8064rUwC9$|FC1exD`v6_d-VZeb zDf*vwMsO|5c?Q+DL&wLidrGdgiv@bMhJF*N-nl3Ts+A(K&+Lq!Uq1jOMh3dd^12^k zV9Lj~wx-}kvGmQNDC~Q(9ra3|VsFAuRDG%kHn2)aRLyLAS9HR5h*EYB>29jE!qQsvDUj{b4KyfRi!Ti-y7{k%NfG3udW(;m`Dcf}KIjSun?`R#Fl zb#P=*CSeb7Br`qeZ^Z6|A-2^OSvt}q<1@TdqS^*(Z+qYgTNM6N<+gL8@*LmX#ORqi z8&v2>wRZuTha9<5aY|{l?N2`*=41I_IyFxzh#l>1Pf}|#^M*Gbi&EAiN@b=Rr-&J7 zQ^in(xI|+WYp0>II%@buaUZhUQy@#bc{z}Z#9;%||qU6*{vuLq$d&^a=h~D$` znD_Hdvr5%xtJC#DVmf(?hey2nM%lftMMtQD;qaXhTyUIU|I`HHYS^M9pv0!*lv%(; zumKivzCV3+<%hj#ZOuMQX>>9LK?Vb^p!&i5#yY>2u;r~c$h9;TjH2LKirJ0Jea9>D zIkRsfYyr)&TwgECmCMQYSLwd4i41c%3iHX$`qv%)y2HPYJN#j3vn=_nM(xxr!^^ny zvh&pNZ0nWYSV^6x4(tVT>?bNIZcPPlkO7TcTZ5exuNyWLi&W=+l7UEX_K#=&Gl_cM z)ksiUCq)TAbIQ-JAK-!E+YSI}?C{a>@@1eKAOQuAeAwm9)cE+YdxFjRZ^`2Q#;h_c zt4{SvLN4AZN<1l$2Ij{Oau&3eQd5+$$tvJ{6s4T(Em3V#grd8;D?Ak)|0 z8TKkKr5O2DAOODUrY0}cp@x-=-qF<1HO66!AhzArcz?fd?Oi@+{*#?}t9!-{;KD6M zb2jO{vO-Nqy9Bz)&dll-)$0Vd=f;h126F)o+o(j_;A0yeL1Da_ zT0#|M5u{$W3ACw6LzO#7`(t{Omu#oRdgpN_$g`akz}GETFF@^FQt<5Ap{jpwf4Y*J zz)ZU-?N0$|37bI_r^-))*YcvoTs&QUtVpyP8;CusW90XhG>NKw6GtQH!ZyxGh+-=i zub+lj%2@Ut^DT7%d;#5gw(S)j5pp^&L7aTlZUrM_Iuh>y$#W*8%PjFuhl#<7LfaHx zwG@WRO}NHOWS3~`6Ur0_6Q*DAcuW1B8AFL`drPUGam2<~uAkSt3IJXNCzt9SCf>XY z&ZmC$BI6KA`LD7)ow+5kUHp~z)EZ1w3InH{%GAgNX&oTH4*1)9)zxin4T(rbk>O=4 zvGd5b)>W4e;jcsdb%=i{hxn5m1@TPIL8{ZT%`N4Tv<7rywGvfae9l`aBIJaSZ;0K_ zEp&>0;DvH-t6Uk*k zly%>2R0NaNG9d2FZvmH<88W^dusDKzEMczH+aJ_*C0dex{+o**0mlLDbj zou1&?1h1(IFYex{$Zzekk^)y>$F$h)DrW=>d~M7LKvGOCZrLJ8?5~4yb|T)-v)=`N znXId9HjY;~6MZiGUNs*{3%b5}FY_;zBH7dX;Nbxq6H#KZ!=z+^(>Bj@)oxT7A zs{F@%ce^mVbg6YZ!!NnXj@=l0_KRn;OXgrGzrz%WXl)V&2*mk9kx z>z4ho9$&$Yb zJF|ajNE^zhWwi<^P$us|5koi}>Ynu8#sDI!D6zh2I54F>V}D)ZuS@)kxx}AFlqXxb z%NUSczx6A1g|pX@rTVj8P3?Cac_+5>C!6|I{YfW?sCec~ zpUBW7nNv?M3k(1l>*Hk|L-NvJK7jv>CpOg#9yzYkT~(^1W(Ocue@nKyNF#Aqlvorf zAXojE12@N`+SBANPQ{wHfHqphQ?WJW%2;V3)a%$RMo`zs1FOo-o!h1eoHy{-(mUco z8$YWXgHtUSWTrl`;p3b~){T|bwZqymh=ltF0tzO6ZXd^sTFmgd9=H=<2uBoe;7!v! z4*{DEs^&3qFY1~u%BC1d^ba9+amUU`=DcOQVVb18W>Dv)F;Bm*1fM$u( zo_Oal7`DM;^Dp@LS2jhS)t%Dy1|f!}AgnDlpcFe!z7bvrX^ET^)Z4@NAXynpwaG(9Gh^rmGSW>?IIvyCFF!4||`Qpcz<`6n1 z`{Yd&3XBpgsDg;8@`8GwH~0~C zRpjy@ip#V}P<>v@3u29dgNi=b3F_y|DG!v)TLMV_B&pI($wExd>uD{{AKvkj1s>a^ z>Zc)@yQ&+`3fdMnr(u5iTb+YG4mNmC;;l+tT|VlBy@0?jV3r~y_mU*DYykJ+-6$?G z2}03!RV)`@Ck#&M+7_YzuT%VWihnt$_)`Hk)PO-61c1chlSiFReoBReol*K|7pSJ> zX`j@Yk9S$~QZBP@m@;ppCS{HwtdIX?u{FyPtY|Y|DoPPbl?TjWZEPy3ttxoQpZ)y) z0k+xorBFIZZb?ZWqP=zxAn$Bd>O7B)2h{L-5m!G;SV=W0RTpeH*;L^@f~>kTN7S_7 zkt^L* zIG5+GA$o)_Rq}P#o45hwy(}p}HrM;~K_y+^pvK(mEUj|vmXj3RNAl1#9kN!DGCC#J z*lKmhR}9h-o{?T<-|zHy?GKf*W~GnsLJ!Mqk34$`b0!Y0r%I-OMbF8OMO%Disnbb; zeAC8%EKnY#Lx_xCE89fHdjZMbH)zx9<2fCClI@04Zw&dL30PH0f|5sZc&=me!y8E3 zpZuIH2S=+iq=xkn167((m4GGO5%dI3+kiO_qeqwcL^<#<(<@KjSW)isKk*2fv=pggsE*J#?)5+?q!ZuY-JXea+{ywV7PwCtp9{=kWf8F9= z&n^Dg>Acc|oyll(Y$Gip-V^48<3dVDMW_3S40?=~!d zT|>4eKDi|+YFeA|Yw45eho5nN?RssPEWGQKM~Md?p-NVcM|Slac};99B@aH*>$$gr z)@Ky3iPH5q9-J)fR7+GrY85B3q|$aS*#n6*09K&gXM3Y4!7O<^KS}Biel>9dZ0NK! z8%byaqP>b6ncrW#NMix=-GgyqAri$ZtnAd9JR{_IEcJ2pi#U`N|4LxTNVHLUcKfFo zlD(PXj$|uk9ujYS(x+XDCe;uril?LGiQUKsF3Bkrs{(j}A>PMUD6khFTJ8{izD*KR zHM)xm%6;`6t~&)JlBc(b(4MAdC6$0=ze!2n2;_S_^ij=yedHo+E^dOw5Xg42++WY5 zuwvfw)l;(XN>rLl5eWj1L(OgQ|1gLr$mETawWW}KUiqULTvXoi>pW^Y6=A8V`!nkx zI{h@oz`M+jU{vC2akgI4;o;q{*P;oBUyc-e{-Hhy@O*;2mq#1m>YG8+Wlag0_sN?tbU_Pq~~5QDa%e~{#$K} z{Tnt-V^h?FZ#keL{&zYg*?%NWsJE29R{m)!S5@tw8;TrA>?Y|^uTd^G?Aw7}t91}W zNu*9z4jcXX^#hudRU1^c`9ChZB_`h%szbHhpib=u$s{>tDQn|VQ%kaJ7ExN z0*kV%SGxNihx4vO9U-7=UMuE&Fa|z!k=i54-jmJbG%%2MkHN-D`Gm9MI^%fiLGGRE z*;o1C9TTlwu#Cqeko-?_FP%T2 z-em^ov4~)J{)2oL*_dVnkrn&%9wJchyMS#k z@h-~Q(KYX#)pnQDM7fxIq|nNb2SG|AAHKrt_p@+5uTKv2#$EBtL!SXZ4lQ6Ap z4}oeYuNpIdUE{B7{ENEApQYU0r~5>zSMu{%zZQ(RNs*jL>-KS`U}HafyK~Dt>b+lH z#TJLM6@0a#tJEVUb4Ll>4pI<*<<(j;xYG9pU z$LaODkd?cjm5@88*?V+)H@uEZUut%b)QJa*?cBEjGSz3kT@yXIxFCnj#sUH%@U6#x zf&H__Z@5?zk+)|y?BQ(O>1LM?s5JVC9Jl)S?u63mfUvH>hh_JsEJ96%~Q zWd|U2HsFs?g6RzB=sd;+wy_KZ7Q2?b_q+NC*2t$(1QQ*?u!}UM`h9m{mzoHvJYJ&a zFe|nN+`W<7jB;8TM))snU4C1w%7iYN0Na*FuOAs+X7Uz%zO1(0 zvw`n?JZ9K(L+0abbpZ=ss*j&yqZHnRKfWPQ``Ak*$sm3-F8@sVKg`-UE0|v1yjLbY z$V!=HRUYF?@mp~0OsHu!_QGo3BT1CvD-td_p8=ld@e*Se(5sC;Q%bGB&hghd{zaYR zj}Qu^;gwV_GNU^9Q$14aSG_#jNcN{C@k$nF8w}58Wxh6b>dJRCucNS6>`0CtwH}KU z+Ls;Q_B>w)5kO`e&xwUM&9=kWvDvDhUq68D2>t2g{t<^faZaMz=RY#5vtw}t={`#m zq$Kwr#eX6Bf}_;8&X0Pa`E?GK-|QGek{L>VxQSEOiodK2l3k76z)PF}d`hBM+02N8VT;v~X~K~LT={nSD!}U}%e2Ra zof5qvhKNi|+&Y&qD+^w&N9y2rn+d;D1+fznnj-h=5_?ORu6 zNIHip1@mj6eB5!Y_^LE5`vA#^BJ?p)rPoa}*-ty?Y!Cpn?NXM5PI7}w*gbRe=+19z zA4N3v8^zquuOA>0=T`A2t_yB?x@(9%YR&)3)yrsSS@71(B|@hCfOQsl;y?Dcg=$aj_4Y!!?SsMZcv zB`|sbO2{BiWX6M}jFnMw=P>4czfe%CA0R7D#YPJO@j_+Ffe#`Zx}Gr!T<>oy;iH_CP- zSs_D$TPFSh#_?1Ttg6#f6Y-Q~PKTAF%(^0atcUvlnQC4HyCuQUm0%|2IcTqh4?Q|w zS6Rc*r~RV1zFoNhH~Gt(N=F|+leU&+1l5gMd!b|iSZ7;mcczGV`9}n;=%u~-jghAu ziLCt2rc@SQmak>A)5Jvb$19I2@rGi0*e@|`-|s}L-7p^+RftM6Dq94WXU%}x)w>jM zkn7Lyk}zB;^qnO%kH`CY*`)TwM<{66x#w%c@eqdE0eexnj}Wncs9Vk*((8gyfm$pe zx#g;)OsNu{gO|=v;SkfU9b*qe&655^=Q{&M{~XP0X*-=gSz^npRFw*3cwfBa46)UA zEzComO&ml;FQC2J9)BI=uY>#xJIJ4B@BLO@W(U@7klno*3eHHyPTdZCTf!FgDlF0> zwc^~$(WPu%+(Q|PJ2$&au(EMT$A?YBbol2$Bhb`k!TD4>CLf`iYSlJy+Ar|y2S`+) z9#K%afxj*uklP#gP>CkcpENxSb7t}FB~*p_fI^2=mG}&d$;xM`T3Kn2QRi*m=)bt58ROyO>WUstYj#T-bC=)Qr zz1pUrr;xwWycXO-Bd5j#ih*;FtRa_*ccmD3nK$W7!Ycb+k^^!0oMcG~Tq#pw1L!<5 zMcJdt|HA3Q_3GL~?OAG*bytbi5H=W;Bu}tw(JmS14znBOe@4Nn*p3v70`vT-U@dt$Sw>wBHo3A=|lhigWBmXE1L zmX>LI)75EZrVk!vL{`V^hU{g76-A#QMAo{?cHZX!yb+U#-)*c>CMa+#YMzL^lpkHf zrA1n(#96~2hLJ&XUX6<231D6;00+Zm!3IC{vvB)^ixUwKfz(^hA>}XuAjo459tY=9 zECmpM5xWv$LQzjj5drBH!S!q4AL7i~Z256@qG*;1fcJODAMZ+SvNm$^r;n{@?DAh1 z`RgM8;x6*XMhnW>fTe=a_%Z3mW!njb9Dq$pOQ50#Xb6Oa9b>?kyrd*6AVety{4#BM-Fu&eQHqeBDNC9B!^rdv?o%l75 zCEO9DE>;rkY?h?gXY(pkASBvmJJ}^b;8WYVVsyKg9QjbSW8f;OsbfRu@9$WmL+H8s?=Ap5NJavrQ7U8D4 z7w@(_avAD`Juf)35lIN%^W$f%av|3BA=uuNv?fv-uXdgpZ10s{%kZ$5oyj1eyTOrU z23|k;1FN>GPK=_uRmDaA^BzxoKRHJ=VX}Ry;Jj0wFEC)TDG1*kWc z2+o>br$e*J(*Y3?|c+;-NP?rLL2qr^jTk>-!D9P&8q%hOJE?IS-?UsgyuMjjJw zWI0{=*~y;mn0rwm79?n&BoL%WieGXwE3W1OctwpZ0GUjPC1>SzRp4ODJdcUvgHhDV z5BYmuXQH=yfQ5Cn&E&lv9F$iUSL|xKvDB&(RXNi@5qdmlk!g@pvDv68d*P$r*1Rm# zRf8gvvhfVHTeG}MtVUXne)G1sTAJgs_`qCSRbfiaC&tQRU3nRS6_-#o%AP!RV9x6s zRPWb5QXCif4#t$EBQ1x|)r7<>Q@Q84UtiOGKMMJG#g{cIk8Hi)+&tg+>?ah;Y6-2% z%DknNkoy&E@(Zs6nEO%Y(9yym<)T+9H9m@^8+L()kAu%)s>?wjcjQHFl>;80?E9AX ztIFqCJAFVxdVbofbKWGYmv-c!}Q_sRb z9CHBahzcy3RxnF93#c$;5%Tar^;&5K-{+&&tt!C+W!^_#4vNlIw=F2GZ(f}bTUJTG zeZZF=H?JuXcyI{(P8Jhvl)Y(lSgemQ`6SA`Ap3`N8#d3MD8nP=`$I(t z%1r!z`&kv)lS!&xwL`)PYk*y&{NO8(4X(qe3Puu7S7RzkbN=i+2`@Q0&Nb7wi#Vl4 zo+I%5ATkJ$qN75kas^xlL>%Q5&YIc2n+-0`|`uYm4E|))NN>0psSAoPX0)XpV+;fZ=eY^ zm&aVI=cvMxbAqzT)TpC;SGoMA0){`bNP2m}233}MfDbQNdPOw_l-cx%q5x@q42yY- zS0EU@oo8d-jXBi}aIcDNvV2ow#3|9{cnRDAe14TzcW%%N)sZJ3RjJj3Evp{dlsMQm z;$rO$q@=TC8er_RbR;wn@t=cfRzymNi`^`hMPK;*z! zgt%4il^0WP%aa^z_9&HkqW#;kOSXjxc6*)Q+NHVNQx!Gr(W~9BYk*CZStau`ug6PL z`Q%rA97U4DUsw6-D*sJfQ!#A#b?kw^zYHO4O)o2;~_6 zf+U&9_{(Xrt@xlcz)#*-7+a1ri6$C9KOW_{6p7_4=D9D0_qa-LE12;_6<)!BYK;Jl z@grbkd6j=E`%`cs08sJQ$DV`*7f3JD4wt*~y>GE!Btgjl%C)sI9%`h+z;fhq2HB zn9?rb)Ny)HV6jI?-A-I3@1mUj6_YLy=#`VR*=3VJ)!u?!d#%+6I7&qYFz{9iC5h$g zj?Rb+k?MFKk8_AvD&ssJdPHR(qiUd*1H0xQwx%h(11imux8qj)NY%kqSd@s7D0kcG zhn<${T{H-2_Y&S)*Pa|c+WaLrx5<)e7q4uT+le5Bf4yd|ZG23c_XZb@BGT*jr_dYGeNKF=R!4C0?_KCprM>goq@M+;$iOnZ&?=d4EM+Wyw3)eWd zLEsmP*mZV*(p_SNe|9M=$CRGb-}rS?z7yD#<07~G_5s_jd%zx_J{1Uw9y-2Q9WdqA z$eKql1RdSwQMj@DmyU`VlweqY$#>r4Xm^%bBeBc?B`FxGKx2C)KU|(lyz-(4FcS;b z4tOlQtgmXlcqk<}+XLvrRH;LGg^Zoc^Hgz|ZrAIa-ir-vXgR)dRf6qet-;oGtflUy za4zxZsmj2_WwERH6GbEVAYpd>coM{at#M*s8^0Mdl=OJxi9cb0k2>cysS5a3Yyppv z>uZ-(KxG$?W0!}HLC^q_rTwa_iMzF2)jxA)E!c*v1XGjxpFNF7NME>cj8gjfpz%^V<4F*s<}9THT3xnSQ}~ zUMBM$S$?Omx`W;=d$REq04x470eBAiElI_Q<8oA;hbKBMPDE5YXFkgV9YP+D+o}@C z)`qP&*6z7BT?0bfE0toDXmh&)v&%H>flLFHvse7{>%F#pupdDd2I>{KGV=MZqa3M% zCV)FR@(pHQ-6j=NLLHv&n3d}7^T$EMWkGTIt?n`JfSa)^v)C<+{q1tpNhlUin?xY<-d)) z{ITnt)I;t2o&15qz-0UN5G_(oQ8M>XJ%#A2Nyp>FIK|n?-({v9K@TR`*8#P)xJI-l zETu;&q4w9glRYYS9@!KlzQXmS5npOc{QUX>!>W*d)>iWan8fO~>aJS~VdcAIRb&tD zDcJ`lELc)+Nui3R>A6Z{fJj*k4Bc_xqJoOJ<{dtl$2*dMA9h8B0S_J$8wctgSt|Ia zInO1Nen0$^L&^O~c4)e+etX#>YVUP$vZ%Zm(4%%KmM0IS+%_x$hcc!5+*3sN&gdv| zR!`X!4sukhD=C*aB#Z|gILG*aW=6(sNa=vQHn183iFECtv|-K{9&j(9g?@*T8@JsEZS|?IX_$ zrqlVYHr(26WlJ;m@2O4XNu?>Me6TXPikH&|A1&67rG2c8vJ-nu(>;)wCo3nF?}xUl zYKW?XHgN7Vv?&5TU4Z@$X>v2s0Xz?~RK@!Ft|VA5bz&kVO8AE~vrt*EjNU*Hpz`d6 z3S_i^Jpp?=2BJ!EJcqjGc>ixV94~o4rpNx#ys<%mKFl$JU|$Igj$WF`KOk*~>Q&ov zPQ^|kEREf<6K~F+!?GlzWVl|1OX2gp{2)MlvUyTOwg|>2cJCx+36+S7a9e zB7J;Bcc8Jn9#y1d7kMb?U#h5IKcL&jM?tC>$o9zdyBwZ)B}=fS_~514A&Y;OZX|17 zmFZSJIGn|jotWEqUg7vi-fUIC0bBfL!oxqV)uZ;#l7*-gZ>^y?0)1+g@1=VQVV@5G_DRXfZ`r0C zj926idiP0HF~EJ`OR^Adb+gH_;bsCK@;AIzfgk5g#4)NOHr-!pO?Bj|743 zr;zrtg}>z=DGq#vk9hi0{YEb78*Ls7$^gqZ1Mmde_Gc^Jb@8k-%XbJ?pM?E^0)q|1 zETLol9vLm$i(lN1LS5bINAgPT3q$r$yyabFwD{L${<_S6Gne^8HavEstn+-# zBF&anUO!sSQ+3(gYP-TI_W|PAz&)g4RVJ1{aEeP_$U9GIMd7}3_a4=rc<_W%;rt** zLoFXC=C>)y6Yj%?rxnxq`Sk+?Q}6W(@N)ycB+Lq$lb(=WkTxvm@=#>k_}M*$RoeV( zlS!RTqEalSGzmS33!^CMA|J-Xj{@xEGqMSj{28)T;~gM*X8?%?49{|t+P48bftsF^ z-cx62n3?Nkd+#GNrBdW)%5})s0aFg(Rpv(3cVCapolO`QnM4a7R|v}ZTl**szyK-nR*WYHT` z)&kqeXtHGimcKcw4)EBRRVJRb7C0Mok|UrUwv@fJ5&(eZ=CDu48>lZ*y-EuJjAeFw zq~uf?2b3G-=G?3f1qc5Ot9_{bWs;Vb1*rTD;M5cx&bgN;fWJs&#ka*v=(rBbgE`Ab z=lD>~7eFz#LgeJ-FRoib?LvM<=_o$Lv~S2M#_$rZbLN*~1cbx60dUF`@vEg+1N0S7 zmfiz2Cu&P*0A8-E{S-x~6(#60H{f-vRH6Z|@)9DCqe>oFc|F48Dl0C#kVM`@oAZ#O zo?en?JOI> z!})|FYl~E|1R{r_*1Ikz|z?fH%Lyc>7yQIXm#T6_gm%Qni;$Ix7fTR3a z#~J$606~2HC#X*DmvaQKwP&b&T|?w>%69%c*!Fsx?O~+KOF6J=p2oZs&Iu2+$v1AL zL%b`P-t<)m=e>=6o+&?}a%Od6PW3}^%5mBxlD={F@J^s;im9nVKfylhf{wv9&ScnF zKZ#pUluuxIDok~cYmFnZBmu(jBBz<3 zMN|Xdo#+|1vnEgWGC_V3gP$tVll4feHiyBld{+txq4~on z51$BgN_xH0`>&?Hqc;+r%__?5thvP0-?%0?2vb*@x|TBcBl~aw$^);O;yyc5Dd=nc zM!@%XE-rU;=|k>Nz$LK&EMFuP=pUQ0ECT?_x6epWNOnB$uYdLH%LSK`AY?Yh-+OC> z`FnOy@kHUdVo#~v(}^IJq{n)x1z2tN2)K3~6C42$OC)<%BPuk<7We$p5t2?2g-fu?j(xZ+WrMNaW1Q zYJ(xuOVz>ew;_Y?iEXOOR_5@u%EJftA~@>4Ws}Iqa8~}h&0n|qZ|F9ERJ@leC1=y# zhyt0;_OAkN`Kr@uN4x#BUjx-dfopWKo9ti*V3oUmdT)K@M5HcfP4kR)>EPM?^93rj zX~>&DC@{1prtTBT8y3!utGhjsQ ztCz*@I#UjU>f~_#>qv?669b znMT00++sh8K=I9m08CS3C$`h2fK#%~2D=WM0qih;TC6XDsB4DLm_vC2+1W6frPSRk zCuZsP07&WTIo~17VJg=~bwir;0&RD@EL6C&C-|@9{B@lFmX7m>kLo6)HEv+Vj*?yH zu+-q6CCMu2Pgy02hh5M^A@+D}Yahz<=s%GKQ#-c|2eI+HptMuZ9&le0eV~3#b)E5v z!`NH-5eKs7tlrPBA7Bc$Pk+thNxoOP_fdM-X4Ji2n;kv%m8NkFycN@2*;$Vm)9v!h zVx#1I0L#RLfPN|ZoA>@k1bBL|qvO~T>8wMO#YE|544VShVSo0In6fJGSx)k3n+=nP ztL>XESwC-nbCtI9k*qgjO-{T-voNq>@N27Dn;@~JeJPV*Rs1TVga6$}@f($|S$dV> zlB}4AAf&+ia!2oxc!rk(J{Db8rsBBfR!iDRG^*Jw{z;#9q@bN>9VfjD52fij=f&R+uQcYy!NnW zLo&8glUw{c-XF6(Yf;umwIy)0eV!8}Pzx&3EhVh)(!~`H*zC8G)-r&5GDSSa(G0m`EM8}|3`Vz#`Y70m#lZ0%3(GFUj!Y0O$8_1>hMy!uQd;Nui!kqN)IaE z@H^J-zLI)Mgux+2OY`z3s5wi3%c;ba`J2D4^VfC$o4U@Q<`6}qkh%))<`F`1zW0$g ziQiqRD3xs*hr(U~OzO%9h=nfC$rrB~vg9k*bqHMQaa{poso~};0d=+|_}G?2d3_}7oKKVw z#U^0hrr9Kwyfz#~`eFM{ciR$=Vm#|v4#8YeSi!uVImr)Bu>*0!0QKMje0sEnD6+h- zigcbV$@s&!s9@IHOF^$L9e6Z=Lp#1bu`2*gBo)!5UrI+eY?#1 z82Q={h^IatL}W_$T^)R|@%E19Tx*i#`4H8*JgPV(Vct=BvvJOmi=yNJ(ueYaGV3%smGr5$C<&1q4xIh zyX+84Eu`}^v-9_(-^E8yPh&32%0HBAGkqfCt&Hvf@Jp$YmZE#lvjx2KZAO>)5eg7} zx<`5zYDa49z!$43DzPQOBHN26R$kM-ULw{cF!sOB^VfO)+d9u5&s7`N%p<9#J8SAh zoSYord_`0=Dh-b-sl~x|Z3c0$lw$S zMeuko$n$+C=quM&rk5PMQ|-?ym2qT~|6v!vP>ZJ(RLjxxinlstN0?H{%(GudGt^nA zF*}f4j3lPJx!xIeRd>{avhP54t=_A$X_;OV9+s6N?3#(q+c23fO*otP|_J zxyea-jvCM^Ue8PEo&frU?MzfuS3p`A^*MpV9}l*}P5hA4v7$kHfuEsXdHJ}Str2Wb zkfJ7EMSOOa3WgxxJAu?LX9B?{xUm=ZdtX^MJG;Xwt;iFpRHVCej)q^iY#R8E!P&4N zFC2jt)2Ygf?w|OHQTOmPm7N^$C*H}rWLwT9L%Pel~V_T)aQOe!$voDrD8}GV4z)f)wp2J#Z&9mQb-z=|8fE;kPPR87Y<@ zu+yL{25MqZ;0(*oF8lP9#0AU_d7&W~CL?1oBVp{bC@ zf6U@g)mWC}1MEK7xmG+9Pz8u!UA+Nt80E=*Hi_Sy-}7R%+5F_5Ch2b5szVH{chK!_QO>$|`JD)(Iu|0`ZOJgH0f<4uUP+qEm~tzPx>ELJCiMz;vmS;+P=(X5_uxlPCcsnsCownr)+ z!Mx+FOnmISokcYc{F?mNE5Cl@rSt)HUq*lC`R8`Xt7oTV6sw4{+hfd+3~~s8>?5#q zG;vUHfJW5~a;W~5kwH#8%)S#-WP$6Z4=(~hRcdNJ)BfV+VASlYvGLa!vSdX`ueCK{ zw%hY33q5;pmkBzPDL;*HE!LC-22vs8>LfQLGHmze& zKBo(mkUScb;lt!?!?f7DQ#_=8UtWdUae1iD|LTvT50Z+2z$hxED#Z~9QC~-sm#UvCR6uZxe?+ zX};KMt~0%HNlQ6w_XIpQ-b*fQ7Qj6?6AX0v^$kfRT2d}+dzTwfSP$2 z%fxC^_BTN7^)gBg(xfEmn+phd@+Eoi`AjEPmc6c2UB zdF3o6#Vlnk`s%i#^D!gfMy&SQ@dx+ouZEWa(=V$%2;G>5*VV6*RMpgh$(>Sy4REyp zMV*CF6lq(9N$Y8k*&2_954!;Zd}PKufxO||7d74p^57*fJ}b*kfR=}+F@Ig?uM7S6 zcA-C1T;!&{T2+5iUipwnmnu}A_3;swdU)Qw-?WHa(VM-SX_8*meRKM7kUQ&+xAgo0 zg>2R6DY48TgF;a4;RfdF*6Lu!lm(yu&J3m9eBm*yu4C zYO*?Ss-MA}8bc*If!j`+VaU$&4xZ3U zeuZ&aQZwMtEHgz1(1)@$PRwMFcvA`N9baP?(Z{wwMz={Q0pR%F-o1eU>&S-vI{zam zQj<(-uhy+_FIQU~b)Nt+9SZ{q~$_7?H@jJdxeG93< zVwFchTGjDQ-zbKcva~n~S=)3QoEJ($BI<0yc6|r$Q9z_Or%{ZjGH)#Q!Z z;1eETb$84U`wRdh7Cd6pkY-c5r^tYZ)f6WaS1oV3jhDw16t*QYgI6gZw&LGA5zufp zqPQ>3x~=pb0}Y7{l-C%f|9C%3&aQPnWPfSSF3`-2=ElO+pnT7KzUq9+3y|$U zSql__h zF{14Sfpdr0#LXLQSn~j2jU+j8Q2~>81^Av=7x794mY~qVlNOelU-?Qu*_zWNUpX;( zodA14gulWP-1WY>*}*nX!pI{Cb!gbBIs7WLR29LKi5bL^6R4{4put&vq?iF^CCT+2 zIuR1nb^J+%&`D5|6dXGYd3v9HWH{$9<-y57H+4xZ6$m~Xe`gOtuvaTj&V1l1c-uoU zzsEO;A0=ab_jzr?%6WY)!8m9ba!pe&u;Tt-{-U4xC^2mhT-|_74wocaxHs<&g+^UfYh2=k(M_ zl!qPw+r?vlYd%dpaAkOAY2Q_^bSU^xahPQS)WRll53m?Xv9C8BSv%Ca^pqjapI<)! zucG$k#k_q4##QK6D zrsAE{-_*}dQkdS9lgJ@SnBx>XhROe}0Md)e32Sw&?ES6tB3TX3P;DtA@<{yEyW-Wm zT>EhNoftKc6~OmCACEo-^~bS%Z=GHKA7?-x{%(WOhAd)px#N;Q)Ln>4?_iDjEP=&X zGDb^-;=H>n(}$FFYe`q&`ALqvf)q0q9IK-2m1hqG%4-zjR8_d>`6i42Dd~+w%M2#X z8>~`0yB5Juqf{#IA_v`j6k5X19uS1Np@Jd&-9C?Yo%$ ztunp>UZr3-1bB*xCR>KGKyp;GfT4EYq{ zK7_NDqfA8qgf%frdCe4v@RIu0mq)~pOssw9xwU@OF-d7_o(~AMZ)>O26VB1U>4(QN z>{^^G^7uiX9?YM8I@j4(DYGAkHnzP!A=DQu-Ta@{3IU~znD=)IeMXrfY@8@v;Gm~j z%K;9R@46kxIr9lXcYD4Sh{K4A=Szzqv0uqPZ4@r*S_hsr9=YJgb7!;P+ve<&%T$)& zCXA3B>cJa_+9qEIGR(L`xei*!;2>@L_yE0~SEp5xy8^_Xr8Ae%SApuA@M)0oX2jsNe!hu2bF+Pl3J_wqF4{B!!p`#7^}o}{Fve#n2jxF zQec2-?K~VV%Ix)r-J!N=TK>h<3F1=ruqm=J-CK&2RpqP8B3ni%By_~)1rqQsNHf*n zEfvQ+JF@e{lTQL;A60DkQse}mIj+)R9bW(NqdzBBg!0#w{<_k?i7WkyoEdyKg zk{PfN9Jt9R@HVM}D>DY2B}8V;6_1nRS=tr#vS*U37~5mRR+CY=znF~oDC`wT^6mgW zU++qQxp1TvA0Gh_-!@uu{$vHb5`am(@zPV)uWlyeWaBeC4E#NZtvh&t~wDx+M~XMXHS6e#xjmoo!MxP zr>y+VJPDj7D%yLs7eDvt=3WsRD_pK!+3NOscHyYaR-?POPohJmZ>0f0ty4hUczCi@ z+pmn&z&)0{sPCiDk3Pf7cK|MF;Z*|t8LZ%VUe?6+^OsepjEI|h|KO}|U?VP3(nBZ0 zTTbKx3@xcq|4;+)gh(|c2gE~qC{mej3}8Qt)wiuemOKR$h%aLAR&3%RIe720R8f*p zU0%KEp}v|YK-8)(*Vmo#5fu%^fA5dO$Z3LK*@`uJ^#way_9iSq{R}Y77?;&Ga)uSa#8>eWS`XV{L(grnkT>m=LUuXJv zai%{vv?06kd3O3(@1vUFPVTX2uiPvOEvvE(+5?fuChy4d*<);sNLCcjmL0tY2FSbi zemBOdwLBo9#rLzccuiU)sW{m|vL=5wGyn4E*AHlNXW8#U9oj<~rsk?X*;YL?eN&59 z7USv>?*{P9@NA%u-9p&TeNQ{>o&3W6D!ttK9VI599^f)$IoN#q@`s*+i>e;= z*FFCCYCfMu!Ucw8RdgJskRR@mw9`Wp%wH=TGI3+cRPWvfyr55;U6&*MUbzBR`VIZ5 zFQBJ8`Dr^cZUQQ@(JU4$zb84o`v*DW@_0Qv%i3_~Y9Cv=9S9&@8D<4wxnQ{oN&;5P z?3R+g1X4Uv%juX%0wLP@TJh!Em_#$g&z9&;kN6Gf^x0jd)K$N}i_{G{}w(wo+wkKSZW&_qTj5>fb%D$42{2ywutv+Z7cieVY6V?|S1W zh+so?STBIgIVUL~(pHusy)P)aDilu$3Lh6tJcP|o*!tw=fnU;=|IjnJgaI9s)$f(kY2-h{96z$k4qwt16X$hmi3+#Y-2U zDlbJIqI#lwIm!=HUi7;@5dSztOq`T}a2{0zdHXP(Xe9)_b`4S%#IcHW^AuY!CPw+YAPEgzRBMS5#wGU6uHFSj}FROg0@$;^-YMRwc1{ zXOeE^O-|QiC&~yea9S(8xjsa39-2C@or)iu;H5_L)&K~+?)N#CRm6}qp3Xp zc)a~6=emS$P3@$&)u;>zC?#jb7hPLRr z07)|g)kIMLrzp711Ne8tA^;GEX{{~$pF@u_|1@?)ns&DOj{QwU@(d&LLjm!T8@2Y` zr!U<0I`lFgIig+p!^5kCwr%&UrYZiz;w6cVu2O{rYJSQ27}=y>0%^0zgxrb8K6^^v zR#Zp|>dcNV!M6l|2_*Gy+wut08&pd$>K^ZSw-dtGaNfx*PpXk$k(;VTkL2Nb5`-Q} z2yDBjWWX;dasA229(sNb zR!s&TSI1N?YuvHURjAf$W{X2f?;0_WqpG^8&e(7zDlCC7)PA10MXO$cJv@6fJjNk2jd2vu2 zz~BZORCflwsmb@)HPnIQBd~){|C*cL0xXi#{QU-S25>m2?&_$GO?Gph1nC^snjjKuOd&vQhvL*)Tt-6w=+HpoaeRLW52KRKkI<= zZ%~*OB*l(*oS+e`Fo^==KjCZeZoQhPt1oLx0jU#Y_9 z0vDAC+%830BU{mNEG(9-TVg2uHs|gw36Om8=W;!*=fQ!2OUpjGXC!+Y%2Nh7`$q8| zt3Y^?cap-Dua>lSJaG<&(A9#ko|rudT&NZ*^+N4JY23Iod9>GiG5;eHrU&@mN0NX zO-}vCTkB7d>=O);t*<6ZvqKIN-`7~D{blMTv8z%Z90>wreWjKC>r#JR>fg+z{*b>_ zRBK!MePz+ULv4zu?(n^06W4U^h+=!mv{f;J%dYU-EJ+<%OuXV&+YYdT$yG|h{Ji!` zS>6|WL_*Z_x_BVxMFnkhC@Hr6{Q3cEfESbW#jK^v13G~srY&fjoG#b26lO6@d!e!# zbJD?J7bJG97ho3Or@ywKbxd12fJcVXEK!j~C4ZSQl)O|HU%8Z}+P1gQBOmH*IdXs$ z$J1JG0;IY*rw43k>m;dwyEZlF0K_#iJ`cLY;sGDjHlV zDcJkQZ;_ALkRMt571DtdciTiT24aBa0+UNUyxshzyl@GwEi z{J_#t7Z`m3hxI!V@?_^L2#9?ab68lyY+w73akHlpXDR&ndE9N==RiS|@{wX_DORiU zap-@Y>aSD%+d0)ArEcu~-$zjxR_DbEZ_~c0lbf#Yw#?E4t z*S^Pxop;!)KYj`c-#WqE|}PxbOb&00p4jg zy{X3IwC9%3MwtfrU*35t7**tOQ{scV6tohcQZz4KAVmsN&je(7&^D+{6`P>azWI|k zI@sQ}uXw&{+sYQ$OYpkCeOzhrVWZ9sD$iDBb-te6TBME`U9L}XdSoM)lwCTthrxq?8Gz@i^$8O3lNt$F?cTH!R^WO?r7f=4uR)Tbp~yGRQXPDKhHcHN z^G2eR115l8`;(--!@RtTcnSptrTLIiAS&HgyaT3ymlg;otbvccd3pqH^A&*y4_=bI z+CVI4T8s&6ZUK+7|84-lPY)%oM%3ojd76hKJG>sF2}mValoN^=dp?<=m*Qg-btqQB z?g4{eKk+V}&V8&)9^6ey_-e6~m~yU5n7~V=7i+VHPiU#!-z$i;W!m{5(E7$pWLAF^ zcOekAZ$j6Ts$!M$rH{tL$O9g$RAN6h0EGvSa|bHERuWcK2R1V&zc^(c7JqQWrtoE4 zyS~KmWLR}}1_ZsyRwMKIeC1VGT46`LArmRNzRZdk@MQt=nb)#6mMzRN%E6wjPhfa1%ESiQn3{L~jbo&YoDstKJd+ z9Ry|%_M^U1&3p;2;ABpjPO?1PU=bqDDvc;+-X%}KX|P@_t18G{8+fLGr)?nO7ZEYe zxUKU&Mva@Wq@u=Qzdf^~TdD!4E}!-0!Ya0)m@)D0TuJM|EoFk5K9+UdEnn-2B@cDy z_YPv|DJmf5Vjp+`c(3I+@OKP#tBf|DSd1ynQL&r-!&h>c%3~*wyF~1_^P~7gsJ39M zXhc*2N)G2<9D3|d0~g?aminSdLjedJ>Kt~UY;5Y>OL-femYrqV9u<%B>&HWuqaJXt zo8Op<^R9Su4fB1D#O?gFGPSaF%n>)fd|Mk?{sT7j^r7-rkjd6wz1K^iI6dW;oSwY_ zdXK}qmTrwxCZUVpyd99m0Z(S3r%Sb+lv)-EO(OG#4b?iObHc|C0gG4`Wk%wK=LKL6 z<>adZjb6JDfBC0Btd@gG8WMqL2LevwUxJ4()!cD-Oopbi`|~t@N9+o1mf#iMQl&aQ zSE6zTSvU*RlRO*1x4={b|pq2J1~V zfXpLZignpD-$Sms{+deScD|qF+YhBxvr)+Prr;w9hIdp~@nS>miN|`M0&%PTJsT;? zHXe|W>S-*{1ugD|tL%ockDp&Zfc-k1{Ac$Hd7f*`zYPoQ2ql$ z!~I-EJfn3DOm*QCN*gwbgPI9A9lNd(Fom<>8ISIOLlK`IP#Rx`=?aZv#ew^4KQ-`B zjo-p2Z?7GI(-9x9VJsqtlr`&S7EY$Z_F*^-Ch*k>#0O-@e)vi3b)TwQL<|{Z0l-sI zY4e`Ms3k`MAnL^*@%vE1b8xr|Ou~&+xf&D#cVCd$KFI@{(8FuNzii)@>tWWfld8Qd zY~HfYI_207OKSLZ(vna@l6K;Xd`s+jYy>8+Zvu#!|C5hOQZgOj&8>wC{1U*)3>f&I z3FfZ+GTYz5WRKBo@M>Qmx-!0Rg5C{_l7YHFqC#GQ&ycz?f7b?b3=xR-#6j1mynm5W-EoVMxvg_?N~{?sBJ_!f+Z0P)69*?TX=R z*+M~4?%Nab8sdp z*1ea(Z{P@y-P+@JOO`}!77v$fMcd^PmbziD5YEn)qzNM!7!oh%m{N%!Y2gTJA5t2u zOc<5h{$vF-__P_j9SHfTJFo05fi$oryw|IG;NaK3IgX`V?gCBKkB9+Bw{BO_ebo3dAwLumk{4gVb_UVy?^wxibJZr86-?k4jbJ#GKGQH4TEK0KsRW2vv_H%jcL>`}Ku6z^2!$upU4m#ibh63Oonr-D^p!U98W-{QYLZz-V;uWu%1 zYy0}Mc^SUz1@_={%VAx>*xhm~luOa?5fIprNYDmmYy-BVs!G9QgCLBC^TpGvjStcd zcy{59MLyXPqRbaAr-A2FZKjeb5f~D|mkS62_675^#6w+ZkIRqreOD&c?l%#>l&S=b zy`GbKiT{Yb9bd+8u(bjLR_AG{3OUC|v7VaRXBpTkZFp6#H{)K);|-2($zz`%UO-aZ zgVnIn`*So(iT<%FY|r(&QXdY2vWD0yk-kf`73?Sp&SF_F99-A1Ok{O{$ENDjDoaEv zJNDrxTx#2|65fxjzS<^nqhq+M8mWGIW`lRB26P9aYGeIh+>q}AUK5t&gedmP;holU zANEcDp=!TJ>LYfUP)2Ex>mcTh|DiNyhXGL}JD^q4ch^Vi8a4HU;XD|8h=1MduY3I) zyVoBK>Z!xFEqRJ=Zq;wcZvX}fBaylLN=~;iGwWb1K?-g#q_d7#9wt&r&-sp1K9$y) zKe_>@Htf71>!xT~yUSQ_iygDP;Q(p|w)^wz2TbquD~~9D6>KM%M6PQ-${~+e zC0J@~N=(V>9vgmHfF0R4RTFP3?VD`mSF>n8Z&nCM zlEeB30KNh8i&EgSJ{eDoIY~~>=gnEJeX!$1f(*Dd7eWqy2#(J#S!MR?3O3zr5S|y3*^X|KUu_3`ae$B)GRhVZPo} z;M{6S1&<{NMv}U}t5az3-(Be>em>s(bPp%H%DLxlBefE^)N0yPyU z$&XEH5?fK4NfS(%8eMg)iY-L-Bfp!LXMSVB7}E&qJrj*XcDRZdL-=FIehx3+uBVX{ zW%0=0`>{0uxMkXN2jnnJR!@%E%ZpZ<6NytG_}B7`8ZoU= z1NEXJk#Vc>Y{i17*PO%q*TMce*uS-d{c-k&ay#(Tj^wll04K4x!t$T|YLIX}-qE@m ze!xdn?Wt!&dY!Xz8U*NN%8%0n{S5Yq@-`$7SSCMukE1TsZd&tq z0JtbN!zcuaA4?9aj|PPxE>LZHVl0b*ooCrGNZIMgw*pv)XMu+6c}5XulIZ6YIC|B; zSmFm$c<{&O>8rQ31M}t;s%#Gm7v~W%vc^g{c?pO*M}{PYT>*TSIWDDSKW7>UrBKfX6I@g}NcRsH%va$}9Nb_>~8R2GS<9JzfM!Aq6l? z8^k@I=1i-DnsfKf4)UN+a~@=;w5I5#mMEwVk6=95H~4&m%}<|U5fp%?;8^D(@l3*l zO39~V2KxzeRscwWmz#i;PgnUPtXIJ)2X%mXe4y}h4sYX$N$k^WKcZI*#M@`hTC&nv zwqa2t;4zU>I?%`z4>O(Juw8gkB7IE3;8Ln z-UR@}t*S&0MFy#t$XWx2(B~q$uk`I+Y5k)sZ?>+n^v6-V^<)a2X#>dGstyxf>ac&C zUyKD+S*1RWh>?@FLzIo-7sc?u^N2=x-9cX{$ zptm1Cee&Vm2K#5_^?qYhatF7#u&YemqdthdMA=_g=cl6l1i}=`+WtLcYY1rOPw+a@WfU`u&4m!?RLXc5dt~m2)k~u^h zv|I?Mf`|%xpTbH%cij?G5LgWVeMrG~AW+A3LTJvk<1Dj&DkGK18k^Bq1STQ;vYnBcY zM0aF_TJkIz0VxT#%A^gurpN(!dt_!G_QT2_$puMf7F<)(W@7ty_}R~o;sPcuz%}JU zt#Zr%GfGfw1rID-v8$VCGo{A|-7D#1KV5Fg@hm);5-~Up{@SKT3SUnl$PWdH6?_U9WF7ea`-p2*ie#fH(PP6=@9p79ptexnvx z6?|}V1s?;FU}1gn-3~-hAy4UOx@x*!lHy&TK46&B>G<72iXDhK`ex0_IK4kN{`~#{ z-k4gumMSO3RZuD8Z;$Y_NZu5anLgb&KYarqWaz_{voo_xmc#8)64>zO?s}zu!5-1k z8R~oqe{5H+wgM;L=l~XpJd%t%(UP&M8P+RztLM2 z(nMtsYP97bO6=bW0sH+qb@DdSeL3s|*bY=c)CyF2Q*eFOJDhW_A9R1@gIIiwqUHQG z#=*{kf$y^%A#$dCyeCpP#RN__HWp=Ic2a}I4eCk0rcPv@3cCJvv%hZk{{T1pV?8@B z_3I8R;TBk?t44C9IE+h~SBmN)dr&xX0+bBu<3B3h^>MWHCdU!@G)mLsP+u}tA65Qj z)CD-m$MY=eev5x_VZ4tQewoVf=hqK-Wg@z4Tzy`!%5p5SZmE$Xz^C7X&Br>5>R#X> ztMF#!E=#KFvRly+M`4eHhpbCJlEXE72BOrcpP(?t?B(Q#rkv=JWCtKyKIKMS4=6FN z><{en3|P^Xb^^HJ-%Db#Ro1|&abFLVo^uF4@AXSD_8o=kpRA)BWMh>SSGeK(;Q6-{ zF!tW|+ez&cH|{Fh;}wL$schmBFMHVL1_B!|@colziTxjq&t}inQKAH&%F)hWA0`}= zyaTV6k+mLKqC{ALqN8%xaIhH|)tcx2a@}1_`q=8!7OQ2Fn?Qj)~}?E^^W`G2?5>rzkv z$Wj8-my$8>;-!w1e0A`V1*h{iqe#Qdr`JynRA1d&4a||i{e86P`5jR!B#Mt`rA6Ga zY&uU>N5CV-89zm6RgPb8=O=Z{*TP%~1q)<&9YyEw<6u1((Znl=mOh&zFR*&mMz|u? zA)BTWwD(7|IXkYqy?XRQ#Kf=-e%ovjQ$L*mAr!qTE(nq_Xr(%q}LT;YBU>$0}f09>? z)v@|*D@tEVLH=sZ6q#*GDVng8{Rs`PApRQ3;hElIQA}{#tR!E$ciZ{r*AKuDz;?qa zDm=abtm65^RRoQ|8A027idu|IR_-{0Bf^g?PJ9$b2WE5dr^(7tHl|QrE)evS5_p)u z4cGgOg)=MLnJ`OPeo|ylpHW{$>z7l%b{@z$@YpXfBqF>(5q6}=vXnL5QeVenfz=)g z@!4b9xa427;j03bm0<$Ww(D*E$WAO{eByiFXNQle{*gFjO}ns0AjCO^14iuXAXj45 z*d92j7t>w}{h`oRcfM%6QEG`o%y27-v`wBlxd$FV2o_TfCX%Z+07#c`ZhU80gjqsq zWW%;p%APLtPZj&Q{O)vBRvrP2J;GQ8_oh@yAi<0SCwcbo?R;6?887^!kR~B2j7SNpTGlkr}^ zt8?)xZspJ(FpwPDK~DVJ{HpXoHq9W#2rPN95Gme!m#CGG=!#;_XWiq(zwjha)g|P1 zPvlyXJlGKbitp=@4Ey6%cE0`R*AM6*l#k6@d8chd*SX=lpq!2_^%TxGdFv^>ua6_k zNx@U)Ugy8yXjT3nTn0Gi+5NN`s@g6csnxZ!ZY$N1M(A4W%W=N+x~|`{@Jot;F~2VH zopy+y~sS6(F^On>PuE`=SDxvby?Zufek7iVV(d9j^KlmP8sz7gQ9hDnu^fo-fkerL~z4@pGV&BwpN9=%<2 zodYKa;pe<2kb?&i24&T!6455U77eY52-cimYr;1$kr)hrj0%2U;*FQrEnf-v7ypEp zCmwiSR)MIR5oLanr6i^shjC+EuYY(+B4d^X_oN%VzPG4&)otXr8lWk7ip|d0!wsN2 zg&Ui&{{Y!{{p8Vn<6ayTw<)oWAxqRp;)_29o@iZ=i?FKN%u&+F3X3j6!d43u4+Qcb z+s7-odDx!{v*o*jOiwUsFUqf%A;%xZ&Y`biPVh`xH^3IhE`6Gv=vGdBnPWj;N1yH z3U*)nIAXGif}Q#6Y=52Y|1r+?=Pr*>S2+&G;$WU^KW`Ezu5(>!!NKoY zY*5DwW{Sd`AQRC13T>N*dGZDxo+4RA7or`NqP$UG!l1qHeB-$Vcye( z;G_FA9=luriv+*|0aQf-;X0BqgpOA^_rlkAR60uzRJi;N-E&EaDS97YD zR}+iGa>Lg}j@f{%5?=mzt^FtjBf-`shiKsQZ*sYR3=j8_epczux3f6PNA{7#Hn-;; z*FH9(0Wn$G`r>PVz8xDGKe3doW3heG(a*#$Y%lF<#ET2Ye)lV@0&eiN^3CeaTvD?K z{OU+9p2-Hffzz26c#FB)%plNzWwu;C!oX=#ghBWocU;D9;! zxsT*@vW4Xn$Bf=wDJ~n|TO1lxb%Ee(?^;v?@0b|bX4#dzkw(Q^l;RZ~ps*Mb@XhTo z`46c=V-TuhHL+T#A+FeVApX$`k^pGc? zFQjtuV3}+Rm~%;6XFnWg`03>Mph>k=Bn^6{RsFP3NZ;R!ZLN{=M~LV!>_}=7%2n?k zhf$N1JMYh5KY+cKdSPb?&VgJ*;U_lh<0k+Llx+*S%(?s7IbubZOMkdv>874hjs4x{1a2srwY%3XRZyV?QtFK({bvFIF!zpR%t zMWt^u1CW(#B7v4oWFSh(@QF(5Nyf0r3b1N5=oU34g0&Iynlh~jrD8G@|?t#{k% zX2p=c63%kc;!z|i-6rBEM1D7*uZ^M3G&BraO>Yc-r7d<_jh0#{- zR|)dyn6WBv+GA;^Uh~SM|22RttArdDWFo8}$JQ9tL95;yXI z`|EJO4);$v+@IQAo~Z;g=jCO^rn0gWaSiTI^7Ni=CkkqZB3UA5aXKvA3!& z3NP8_&X`4L*IufSB$;bMOb)bh2H4^}snskIZX(0RK6n55>jy{?@dU|}6zl+YzRxCL zG+bduJ&w@8iUN?pg1kZI?Ld^%3wm34r62&rSpUot$AKvNr&f)}7Ij56Lp_w3Fy>kE zghZ!Kz2SY-bPQlW#j9HKjwR*ih~I$9#FqN8*Rsgu;N?vFuy{14aH2!5YuH5oS}4!u z#83C_d^}KD#$*7ToUTCyu%3B@@>!eL0!KT8^;$A|S6V8AVtr%hTKOIeE0#tOh({>7 z8*J?jz&%#5TUZVZVWuxz-rek4?b`A>sp3oL!GmotT<*og>X5=~xuMFTx;{%iyMCE%zHNR)=l9>(0k-848fd(6R#vk4Z+ELa^eWF&y?lNu@NWD z1r2cZ4cPJa`t}nZ1R&72NHi>?$_o1DVO^*R8Nu|?u78a~-aCk1l1|EZt)XxKAT5~TliI0WTc4@V6NIJSb)&%xf-bW?+UJ{LQZE}}PjE#S{ zEKzT9a!M-5I7tRDAHwrv3G6LIgBI|D?eIC!7=*X$+!kT@{nTc{A(ZBctyl8}2Z3?; zTpvxOO)L4;^@-9*5}afzHd!Jr+FN5K`Jn1_wql}quGf-$q?cKs?dQlA zQaziTseVL*N1}eb@9ll2Oj?nr{AT;zuaO>GC6fb;HsCyHTm^WR_E>)^4j$Avf3EPH z<>g!hzi+8mdb^(@xg;Z*5M=9Rw*_XbT;B6P>9Lx1wkPINz|cue zz)Z%9)tnhtPA91gm8D`XgSRC@=3l4#b-I7f>HdH+hb?hW*kr*Iquhg3eUDGOqXpjNTsxVIIDbUM}!%zY<$r$e$zZNAGjFAY=mQV09Wm$bfNaU_go zWWK>)KcGl&r;?h)8PaT&F-g|DEDT_ybb+1WaQ5T*_$k@*Y4wRLMapZgu&!9z0>c7g zqVF3{OCVpJt8n)vEY)=mnZRZiHUac`=6=5_FmgKNtAEcgY_XE5(%2@?lFuDF97WD=Vk)xfH= zc<~mX-x17jS?k#~cj{|8+po7<_PxJps>8OdM_li%La5CtaCYFe##a;}c$Eg^Jw|ez z`)+kX!?h2zZnlRYtG3PH#89SX$dXmhwk|>(;xC-A7{2@+Bz`!RIi7sU{P9YjkyT^M zU&9|?S=DcRGQZibQvm4*(>{ltp6vX!#X~|yoS#rZpFhg`@P*m5i{-91mHj?PMO5;| zTO52=#epLrDH^*ZNhXWSQv%V6&DzuByuJ;O!7fhQTuCjv$p9a z!zR6Yp7rZ?zi#&ry4@cz*pY+dxja`jXBOt`j9TBtM|_>fR43(1N_D_LWXB&9N8#iu z!y!t^oO1Ilml4S}awRaQO>nF=p7SNX@AZEHj^x5u76^rj$Io9spvqOnuQcg?l8Vvk zErVSA(Y}h(rPG>@Hy{#7*lSg@!=!hg3gD88Z#hK+R`=p+V2lQrbL;+K?MgV+J1U8L zb8xw-RSs_8n*l4A4TV<)gYrDS#|3kRJ$Uq2(n*&6gDL$2;Z<3m#i})WlgyGUeQFEA zTqgpbq{PFuXL_66b-)C^fZh4l9)N9uPaz;o47(^Dw{_ z+4VtkyhuOWnH)!1OLul00DkPLo_9Im)m57XwXsMM-xFzrRoV32h&e&}kwILEtb1@i zZ^Ua8cba4%-u8U__7nU^ls;3=CN*(@RRYwl9(W+GJEk>RIs&ilB7}N_Sde$*VH;w( za!v-dV~jJA&x(b=c=!U@J~6c*Y61P*KSm6wOS0xe$bXH7b@EQ;XCx-X8j$z31)!d< ztJ7P`v?aGGl|$jRw0>(yfZB)ky?}20d@A|5TT&`c-BS{e_!PC+AK8qt+2B|QNV&_S zg25H%`Q!*__RW~X<;0ORpt*h259k(Pr&#&YCI^I9&cz=p) zQI7;VUymBF2uLCKd5T@RkS2?-hwLNQsVn;x4_L4wR|n^M^gLfS);d@tmUsq^TlPmc zfcxAi|KHeZYq3^?FnckW~2|NkpIi5RBTY-(}3h^h%0`>KV|vRr7(z1$r|aFzC669rC{9q zcIh5FzqLm>VLN9%B|m7AL}ZC8i>Bys!}YfW0#!Gf;i27xlpNYP=J1 zZD2B`+LRBdU!Pz3{!&GIfo-ARerYv%0F3jKT{)Sz#4OWeTUV5anD|!`UJkOd#BJCZ zO53a^?P9Q8;XEAwqKFJ=^3C)8yo?tZnd-}X({2*q8DOh0`DbE+Azjp z5*=fH0})Olkb6yg#`F0+xH#WO$~3zizSe@7-52K+qJ%e<4CLawgn=(x;m3}|z>#-X zD5;)2$#H6Fd=FW{rhV%jNK1^nWG9X>Hg#2JmJKsLVC+MJ2*l?oc$^B0Bp&z)2jzKw z_I^0!h?t?N$z+)+mrjUn4pht9>8n?w5r4v9O{;ckK_y%%o3n}tWG&dBMWw;}Ra@%T z4#_D!jFE32Sa+{)>#$q>N(H*slxg7C^?qINA9cMyqh=jIht)f#3niY85^7;!NwZcKNfegfzD;$`$)qH6eb_M5sH`~vVAW5AR zl4_}pSrK3cV7E$HHP!C5sm}+vo?vp!#bNHIq#4*)^!`l3fT7S{wo*RU4xhC|xzSs9 znKaieSiYXey4a?83T8d-)+8+gc_9~Uro*-32X$^^VfDTHg`s_ktrCx5srC#sXO$yB zv@asjRV?0VKvrT}(!yA$OvLlCXFiW3S5#C6DcC|6EPd1?l;k+Lp~%sq!{xJPP99ahwk$m-%Y=~=TUqcI4^}*; zk__Mii=^uPS~qbmCkonEDH@<|dLc6-@V@F*%S1^8JZT{bGKiWC9E38u+fEtVd?B ztSTUm3^?E4oc1Ar-9DA;ScFO&B`PcYsVd0jMoK}T<-p}Q=DXVq8dNua#L=W2sg0{F z(3s8#s4V>Je80~3&pO|qTtS@Ip-cP^e6j3K9|urn?|D%5j0{()x8y}b98n<~ULAnd zA4#>Q>MAy4joKxtR~a=VTq5W7&2i$Le85{d4R>%PWt3bm;18e{e*XFa0T%$EscRO6 z53)QKtGcJ2>mb^;Bpe5+&aMsc%6JFn0>(YE$b$k^jlJn0B{UkRaK2mJ8XW%$jBr1lotwU>3drJCk=mz zUxN*MA4xxnJ_$UTb0;F^A#ckJwsfCJL?O}Vg{z1V&3ZxS#|vR$8#GGzi>PwG8~X~%zkL4)&zfsqmZF<79o#G$9FW7ws&CiANG z6ES||AjddYR_cjRa;0+VU*(A(c!*XU6RiCFIJOJ^qN;DLiGxn4i!m<;Hb!*RaggbG zMN`&!i@hX7L#dsYxu0q3;+*5v+4S|sX`9LPzK@KD-05Mlq;-|A>z+rtRx-m}In;!2xtyod`(i$3D97s^CVTCFOP8;-%Gw#ax`qI;qUnbRFa?!PUSbKJQm=j{_ zMd8a3BnGU6f|Wj|y5nsqA%vqQ{wTSWtUns7^GEgVjU)k(vnB?whE-9*nBMD!z5*iQ z8%+fvO$T|-?N$k2&ZF5PK|v6KX#|I42*#wiX{g$Q>&&lAhWqj}#e_BdhTq@t`x}0L T!|y*De*gRjX1oxufGGk1?cM`* literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/crypto/hkdf/example_test.go b/vendor/golang.org/x/crypto/hkdf/example_test.go new file mode 100644 index 0000000..df84395 --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/example_test.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hkdf_test + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "fmt" + "golang.org/x/crypto/hkdf" + "io" +) + +// Usage example that expands one master key into three other cryptographically +// secure keys. +func Example_usage() { + // Underlying hash function to use + hash := sha256.New + + // Cryptographically secure master key. + master := []byte{0x00, 0x01, 0x02, 0x03} // i.e. NOT this. + + // Non secret salt, optional (can be nil) + // Recommended: hash-length sized random + salt := make([]byte, hash().Size()) + n, err := io.ReadFull(rand.Reader, salt) + if n != len(salt) || err != nil { + fmt.Println("error:", err) + return + } + + // Non secret context specific info, optional (can be nil). + // Note, independent from the master key. + info := []byte{0x03, 0x14, 0x15, 0x92, 0x65} + + // Create the key derivation function + hkdf := hkdf.New(hash, master, salt, info) + + // Generate the required keys + keys := make([][]byte, 3) + for i := 0; i < len(keys); i++ { + keys[i] = make([]byte, 24) + n, err := io.ReadFull(hkdf, keys[i]) + if n != len(keys[i]) || err != nil { + fmt.Println("error:", err) + return + } + } + + // Keys should contain 192 bit random keys + for i := 1; i <= len(keys); i++ { + fmt.Printf("Key #%d: %v\n", i, !bytes.Equal(keys[i-1], make([]byte, 24))) + } + + // Output: + // Key #1: true + // Key #2: true + // Key #3: true +} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 0000000..5bc2463 --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +// +// RFC 5869: https://tools.ietf.org/html/rfc5869 +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + cache []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.cache) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read from the cache, if enough data is present + n := copy(p, f.cache) + p = p[n:] + + // Fill the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.cache = f.prev + n = copy(p, f.cache) + p = p[n:] + } + // Save leftovers for next run + f.cache = f.cache[n:] + + return need, nil +} + +// New returns a new HKDF using the given hash, the secret keying material to expand +// and optional salt and info fields. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + prk := extractor.Sum(nil) + + return &hkdf{hmac.New(hash, prk), extractor.Size(), info, 1, nil, nil} +} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf_test.go b/vendor/golang.org/x/crypto/hkdf/hkdf_test.go new file mode 100644 index 0000000..cee659b --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf_test.go @@ -0,0 +1,370 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package hkdf + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "hash" + "io" + "testing" +) + +type hkdfTest struct { + hash func() hash.Hash + master []byte + salt []byte + info []byte + out []byte +} + +var hkdfTests = []hkdfTest{ + // Tests from RFC 5869 + { + sha256.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + }, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, + }, + []byte{ + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, + }, + []byte{ + 0x3c, 0xb2, 0x5f, 0x25, 0xfa, 0xac, 0xd5, 0x7a, + 0x90, 0x43, 0x4f, 0x64, 0xd0, 0x36, 0x2f, 0x2a, + 0x2d, 0x2d, 0x0a, 0x90, 0xcf, 0x1a, 0x5a, 0x4c, + 0x5d, 0xb0, 0x2d, 0x56, 0xec, 0xc4, 0xc5, 0xbf, + 0x34, 0x00, 0x72, 0x08, 0xd5, 0xb8, 0x87, 0x18, + 0x58, 0x65, + }, + }, + { + sha256.New, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + }, + []byte{ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + }, + []byte{ + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + }, + []byte{ + 0xb1, 0x1e, 0x39, 0x8d, 0xc8, 0x03, 0x27, 0xa1, + 0xc8, 0xe7, 0xf7, 0x8c, 0x59, 0x6a, 0x49, 0x34, + 0x4f, 0x01, 0x2e, 0xda, 0x2d, 0x4e, 0xfa, 0xd8, + 0xa0, 0x50, 0xcc, 0x4c, 0x19, 0xaf, 0xa9, 0x7c, + 0x59, 0x04, 0x5a, 0x99, 0xca, 0xc7, 0x82, 0x72, + 0x71, 0xcb, 0x41, 0xc6, 0x5e, 0x59, 0x0e, 0x09, + 0xda, 0x32, 0x75, 0x60, 0x0c, 0x2f, 0x09, 0xb8, + 0x36, 0x77, 0x93, 0xa9, 0xac, 0xa3, 0xdb, 0x71, + 0xcc, 0x30, 0xc5, 0x81, 0x79, 0xec, 0x3e, 0x87, + 0xc1, 0x4c, 0x01, 0xd5, 0xc1, 0xf3, 0x43, 0x4f, + 0x1d, 0x87, + }, + }, + { + sha256.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + }, + []byte{}, + []byte{}, + []byte{ + 0x8d, 0xa4, 0xe7, 0x75, 0xa5, 0x63, 0xc1, 0x8f, + 0x71, 0x5f, 0x80, 0x2a, 0x06, 0x3c, 0x5a, 0x31, + 0xb8, 0xa1, 0x1f, 0x5c, 0x5e, 0xe1, 0x87, 0x9e, + 0xc3, 0x45, 0x4e, 0x5f, 0x3c, 0x73, 0x8d, 0x2d, + 0x9d, 0x20, 0x13, 0x95, 0xfa, 0xa4, 0xb6, 0x1a, + 0x96, 0xc8, + }, + }, + { + sha1.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, + }, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, + }, + []byte{ + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, + }, + []byte{ + 0x08, 0x5a, 0x01, 0xea, 0x1b, 0x10, 0xf3, 0x69, + 0x33, 0x06, 0x8b, 0x56, 0xef, 0xa5, 0xad, 0x81, + 0xa4, 0xf1, 0x4b, 0x82, 0x2f, 0x5b, 0x09, 0x15, + 0x68, 0xa9, 0xcd, 0xd4, 0xf1, 0x55, 0xfd, 0xa2, + 0xc2, 0x2e, 0x42, 0x24, 0x78, 0xd3, 0x05, 0xf3, + 0xf8, 0x96, + }, + }, + { + sha1.New, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + }, + []byte{ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + }, + []byte{ + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + }, + []byte{ + 0x0b, 0xd7, 0x70, 0xa7, 0x4d, 0x11, 0x60, 0xf7, + 0xc9, 0xf1, 0x2c, 0xd5, 0x91, 0x2a, 0x06, 0xeb, + 0xff, 0x6a, 0xdc, 0xae, 0x89, 0x9d, 0x92, 0x19, + 0x1f, 0xe4, 0x30, 0x56, 0x73, 0xba, 0x2f, 0xfe, + 0x8f, 0xa3, 0xf1, 0xa4, 0xe5, 0xad, 0x79, 0xf3, + 0xf3, 0x34, 0xb3, 0xb2, 0x02, 0xb2, 0x17, 0x3c, + 0x48, 0x6e, 0xa3, 0x7c, 0xe3, 0xd3, 0x97, 0xed, + 0x03, 0x4c, 0x7f, 0x9d, 0xfe, 0xb1, 0x5c, 0x5e, + 0x92, 0x73, 0x36, 0xd0, 0x44, 0x1f, 0x4c, 0x43, + 0x00, 0xe2, 0xcf, 0xf0, 0xd0, 0x90, 0x0b, 0x52, + 0xd3, 0xb4, + }, + }, + { + sha1.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + }, + []byte{}, + []byte{}, + []byte{ + 0x0a, 0xc1, 0xaf, 0x70, 0x02, 0xb3, 0xd7, 0x61, + 0xd1, 0xe5, 0x52, 0x98, 0xda, 0x9d, 0x05, 0x06, + 0xb9, 0xae, 0x52, 0x05, 0x72, 0x20, 0xa3, 0x06, + 0xe0, 0x7b, 0x6b, 0x87, 0xe8, 0xdf, 0x21, 0xd0, + 0xea, 0x00, 0x03, 0x3d, 0xe0, 0x39, 0x84, 0xd3, + 0x49, 0x18, + }, + }, + { + sha1.New, + []byte{ + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + }, + nil, + []byte{}, + []byte{ + 0x2c, 0x91, 0x11, 0x72, 0x04, 0xd7, 0x45, 0xf3, + 0x50, 0x0d, 0x63, 0x6a, 0x62, 0xf6, 0x4f, 0x0a, + 0xb3, 0xba, 0xe5, 0x48, 0xaa, 0x53, 0xd4, 0x23, + 0xb0, 0xd1, 0xf2, 0x7e, 0xbb, 0xa6, 0xf5, 0xe5, + 0x67, 0x3a, 0x08, 0x1d, 0x70, 0xcc, 0xe7, 0xac, + 0xfc, 0x48, + }, + }, +} + +func TestHKDF(t *testing.T) { + for i, tt := range hkdfTests { + hkdf := New(tt.hash, tt.master, tt.salt, tt.info) + out := make([]byte, len(tt.out)) + + n, err := io.ReadFull(hkdf, out) + if n != len(tt.out) || err != nil { + t.Errorf("test %d: not enough output bytes: %d.", i, n) + } + + if !bytes.Equal(out, tt.out) { + t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out) + } + } +} + +func TestHKDFMultiRead(t *testing.T) { + for i, tt := range hkdfTests { + hkdf := New(tt.hash, tt.master, tt.salt, tt.info) + out := make([]byte, len(tt.out)) + + for b := 0; b < len(tt.out); b++ { + n, err := io.ReadFull(hkdf, out[b:b+1]) + if n != 1 || err != nil { + t.Errorf("test %d.%d: not enough output bytes: have %d, need %d .", i, b, n, len(tt.out)) + } + } + + if !bytes.Equal(out, tt.out) { + t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out) + } + } +} + +func TestHKDFLimit(t *testing.T) { + hash := sha1.New + master := []byte{0x00, 0x01, 0x02, 0x03} + info := []byte{} + + hkdf := New(hash, master, nil, info) + limit := hash().Size() * 255 + out := make([]byte, limit) + + // The maximum output bytes should be extractable + n, err := io.ReadFull(hkdf, out) + if n != limit || err != nil { + t.Errorf("not enough output bytes: %d, %v.", n, err) + } + + // Reading one more should fail + n, err = io.ReadFull(hkdf, make([]byte, 1)) + if n > 0 || err == nil { + t.Errorf("key expansion overflowed: n = %d, err = %v", n, err) + } +} + +func Benchmark16ByteMD5Single(b *testing.B) { + benchmarkHKDFSingle(md5.New, 16, b) +} + +func Benchmark20ByteSHA1Single(b *testing.B) { + benchmarkHKDFSingle(sha1.New, 20, b) +} + +func Benchmark32ByteSHA256Single(b *testing.B) { + benchmarkHKDFSingle(sha256.New, 32, b) +} + +func Benchmark64ByteSHA512Single(b *testing.B) { + benchmarkHKDFSingle(sha512.New, 64, b) +} + +func Benchmark8ByteMD5Stream(b *testing.B) { + benchmarkHKDFStream(md5.New, 8, b) +} + +func Benchmark16ByteMD5Stream(b *testing.B) { + benchmarkHKDFStream(md5.New, 16, b) +} + +func Benchmark8ByteSHA1Stream(b *testing.B) { + benchmarkHKDFStream(sha1.New, 8, b) +} + +func Benchmark20ByteSHA1Stream(b *testing.B) { + benchmarkHKDFStream(sha1.New, 20, b) +} + +func Benchmark8ByteSHA256Stream(b *testing.B) { + benchmarkHKDFStream(sha256.New, 8, b) +} + +func Benchmark32ByteSHA256Stream(b *testing.B) { + benchmarkHKDFStream(sha256.New, 32, b) +} + +func Benchmark8ByteSHA512Stream(b *testing.B) { + benchmarkHKDFStream(sha512.New, 8, b) +} + +func Benchmark64ByteSHA512Stream(b *testing.B) { + benchmarkHKDFStream(sha512.New, 64, b) +} + +func benchmarkHKDFSingle(hasher func() hash.Hash, block int, b *testing.B) { + master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07} + salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17} + info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27} + out := make([]byte, block) + + b.SetBytes(int64(block)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + hkdf := New(hasher, master, salt, info) + io.ReadFull(hkdf, out) + } +} + +func benchmarkHKDFStream(hasher func() hash.Hash, block int, b *testing.B) { + master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07} + salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17} + info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27} + out := make([]byte, block) + + b.SetBytes(int64(block)) + b.ResetTimer() + + hkdf := New(hasher, master, salt, info) + for i := 0; i < b.N; i++ { + _, err := io.ReadFull(hkdf, out) + if err != nil { + hkdf = New(hasher, master, salt, info) + i-- + } + } +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go new file mode 100644 index 0000000..0f8efdb --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ChaCha20 implements the core ChaCha20 function as specified in https://tools.ietf.org/html/rfc7539#section-2.3. +package chacha20 + +import "encoding/binary" + +const rounds = 20 + +// core applies the ChaCha20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte) { + j0 := uint32(0x61707865) + j1 := uint32(0x3320646e) + j2 := uint32(0x79622d32) + j3 := uint32(0x6b206574) + j4 := binary.LittleEndian.Uint32(k[0:4]) + j5 := binary.LittleEndian.Uint32(k[4:8]) + j6 := binary.LittleEndian.Uint32(k[8:12]) + j7 := binary.LittleEndian.Uint32(k[12:16]) + j8 := binary.LittleEndian.Uint32(k[16:20]) + j9 := binary.LittleEndian.Uint32(k[20:24]) + j10 := binary.LittleEndian.Uint32(k[24:28]) + j11 := binary.LittleEndian.Uint32(k[28:32]) + j12 := binary.LittleEndian.Uint32(in[0:4]) + j13 := binary.LittleEndian.Uint32(in[4:8]) + j14 := binary.LittleEndian.Uint32(in[8:12]) + j15 := binary.LittleEndian.Uint32(in[12:16]) + + x0, x1, x2, x3, x4, x5, x6, x7 := j0, j1, j2, j3, j4, j5, j6, j7 + x8, x9, x10, x11, x12, x13, x14, x15 := j8, j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + x0 += x4 + x12 ^= x0 + x12 = (x12 << 16) | (x12 >> (16)) + x8 += x12 + x4 ^= x8 + x4 = (x4 << 12) | (x4 >> (20)) + x0 += x4 + x12 ^= x0 + x12 = (x12 << 8) | (x12 >> (24)) + x8 += x12 + x4 ^= x8 + x4 = (x4 << 7) | (x4 >> (25)) + x1 += x5 + x13 ^= x1 + x13 = (x13 << 16) | (x13 >> 16) + x9 += x13 + x5 ^= x9 + x5 = (x5 << 12) | (x5 >> 20) + x1 += x5 + x13 ^= x1 + x13 = (x13 << 8) | (x13 >> 24) + x9 += x13 + x5 ^= x9 + x5 = (x5 << 7) | (x5 >> 25) + x2 += x6 + x14 ^= x2 + x14 = (x14 << 16) | (x14 >> 16) + x10 += x14 + x6 ^= x10 + x6 = (x6 << 12) | (x6 >> 20) + x2 += x6 + x14 ^= x2 + x14 = (x14 << 8) | (x14 >> 24) + x10 += x14 + x6 ^= x10 + x6 = (x6 << 7) | (x6 >> 25) + x3 += x7 + x15 ^= x3 + x15 = (x15 << 16) | (x15 >> 16) + x11 += x15 + x7 ^= x11 + x7 = (x7 << 12) | (x7 >> 20) + x3 += x7 + x15 ^= x3 + x15 = (x15 << 8) | (x15 >> 24) + x11 += x15 + x7 ^= x11 + x7 = (x7 << 7) | (x7 >> 25) + x0 += x5 + x15 ^= x0 + x15 = (x15 << 16) | (x15 >> 16) + x10 += x15 + x5 ^= x10 + x5 = (x5 << 12) | (x5 >> 20) + x0 += x5 + x15 ^= x0 + x15 = (x15 << 8) | (x15 >> 24) + x10 += x15 + x5 ^= x10 + x5 = (x5 << 7) | (x5 >> 25) + x1 += x6 + x12 ^= x1 + x12 = (x12 << 16) | (x12 >> 16) + x11 += x12 + x6 ^= x11 + x6 = (x6 << 12) | (x6 >> 20) + x1 += x6 + x12 ^= x1 + x12 = (x12 << 8) | (x12 >> 24) + x11 += x12 + x6 ^= x11 + x6 = (x6 << 7) | (x6 >> 25) + x2 += x7 + x13 ^= x2 + x13 = (x13 << 16) | (x13 >> 16) + x8 += x13 + x7 ^= x8 + x7 = (x7 << 12) | (x7 >> 20) + x2 += x7 + x13 ^= x2 + x13 = (x13 << 8) | (x13 >> 24) + x8 += x13 + x7 ^= x8 + x7 = (x7 << 7) | (x7 >> 25) + x3 += x4 + x14 ^= x3 + x14 = (x14 << 16) | (x14 >> 16) + x9 += x14 + x4 ^= x9 + x4 = (x4 << 12) | (x4 >> 20) + x3 += x4 + x14 ^= x3 + x14 = (x14 << 8) | (x14 >> 24) + x9 += x14 + x4 ^= x9 + x4 = (x4 << 7) | (x4 >> 25) + } + + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + binary.LittleEndian.PutUint32(out[0:4], x0) + binary.LittleEndian.PutUint32(out[4:8], x1) + binary.LittleEndian.PutUint32(out[8:12], x2) + binary.LittleEndian.PutUint32(out[12:16], x3) + binary.LittleEndian.PutUint32(out[16:20], x4) + binary.LittleEndian.PutUint32(out[20:24], x5) + binary.LittleEndian.PutUint32(out[24:28], x6) + binary.LittleEndian.PutUint32(out[28:32], x7) + binary.LittleEndian.PutUint32(out[32:36], x8) + binary.LittleEndian.PutUint32(out[36:40], x9) + binary.LittleEndian.PutUint32(out[40:44], x10) + binary.LittleEndian.PutUint32(out[44:48], x11) + binary.LittleEndian.PutUint32(out[48:52], x12) + binary.LittleEndian.PutUint32(out[52:56], x13) + binary.LittleEndian.PutUint32(out[56:60], x14) + binary.LittleEndian.PutUint32(out[60:64], x15) +} + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter contains the raw +// ChaCha20 counter bytes (i.e. block counter followed by nonce). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 0; i < 4; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_test.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_test.go new file mode 100644 index 0000000..b80d34c --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_test.go @@ -0,0 +1,33 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20 + +import ( + "encoding/hex" + "testing" +) + +func TestCore(t *testing.T) { + // This is just a smoke test that checks the example from + // https://tools.ietf.org/html/rfc7539#section-2.3.2. The + // chacha20poly1305 package contains much more extensive tests of this + // code. + var key [32]byte + for i := range key { + key[i] = byte(i) + } + + var input [16]byte + input[0] = 1 + input[7] = 9 + input[11] = 0x4a + + var out [64]byte + XORKeyStream(out[:], out[:], &input, &key) + const expected = "10f1e7e4d13b5915500fdd1fa32071c4c7d1f4c733c068030422aa9ac3d46c4ed2826446079faa0914c2d705d98b02a2b5129cd1de164eb9cbd083e8a2503c4e" + if result := hex.EncodeToString(out[:]); result != expected { + t.Errorf("wanted %x but got %x", expected, result) + } +} diff --git a/vendor/golang.org/x/crypto/md4/example_test.go b/vendor/golang.org/x/crypto/md4/example_test.go new file mode 100644 index 0000000..db3f59b --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/example_test.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package md4_test + +import ( + "fmt" + "io" + + "golang.org/x/crypto/md4" +) + +func ExampleNew() { + h := md4.New() + data := "These pretzels are making me thirsty." + io.WriteString(h, data) + fmt.Printf("%x", h.Sum(nil)) + // Output: 48c4e365090b30a32f084c4888deceaa +} diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go new file mode 100644 index 0000000..6d9ba9e --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -0,0 +1,118 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package md4 implements the MD4 hash algorithm as defined in RFC 1320. +package md4 // import "golang.org/x/crypto/md4" + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.MD4, New) +} + +// The size of an MD4 checksum in bytes. +const Size = 16 + +// The blocksize of MD4 in bytes. +const BlockSize = 64 + +const ( + _Chunk = 64 + _Init0 = 0x67452301 + _Init1 = 0xEFCDAB89 + _Init2 = 0x98BADCFE + _Init3 = 0x10325476 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [4]uint32 + x [_Chunk]byte + nx int + len uint64 +} + +func (d *digest) Reset() { + d.s[0] = _Init0 + d.s[1] = _Init1 + d.s[2] = _Init2 + d.s[3] = _Init3 + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the MD4 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > _Chunk-d.nx { + n = _Chunk - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == _Chunk { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0, so that caller can keep writing and summing. + d := new(digest) + *d = *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + len := d.len + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + for _, s := range d.s { + in = append(in, byte(s>>0)) + in = append(in, byte(s>>8)) + in = append(in, byte(s>>16)) + in = append(in, byte(s>>24)) + } + return in +} diff --git a/vendor/golang.org/x/crypto/md4/md4_test.go b/vendor/golang.org/x/crypto/md4/md4_test.go new file mode 100644 index 0000000..b56edd7 --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4_test.go @@ -0,0 +1,71 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package md4 + +import ( + "fmt" + "io" + "testing" +) + +type md4Test struct { + out string + in string +} + +var golden = []md4Test{ + {"31d6cfe0d16ae931b73c59d7e0c089c0", ""}, + {"bde52cb31de33e46245e05fbdbd6fb24", "a"}, + {"ec388dd78999dfc7cf4632465693b6bf", "ab"}, + {"a448017aaf21d8525fc10ae87aa6729d", "abc"}, + {"41decd8f579255c5200f86a4bb3ba740", "abcd"}, + {"9803f4a34e8eb14f96adba49064a0c41", "abcde"}, + {"804e7f1c2586e50b49ac65db5b645131", "abcdef"}, + {"752f4adfe53d1da0241b5bc216d098fc", "abcdefg"}, + {"ad9daf8d49d81988590a6f0e745d15dd", "abcdefgh"}, + {"1e4e28b05464316b56402b3815ed2dfd", "abcdefghi"}, + {"dc959c6f5d6f9e04e4380777cc964b3d", "abcdefghij"}, + {"1b5701e265778898ef7de5623bbe7cc0", "Discard medicine more than two years old."}, + {"d7f087e090fe7ad4a01cb59dacc9a572", "He who has a shady past knows that nice guys finish last."}, + {"a6f8fd6df617c72837592fc3570595c9", "I wouldn't marry him with a ten foot pole."}, + {"c92a84a9526da8abc240c05d6b1a1ce0", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"f6013160c4dcb00847069fee3bb09803", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"2c3bb64f50b9107ed57640fe94bec09f", "Nepal premier won't resign."}, + {"45b7d8a32c7806f2f7f897332774d6e4", "For every action there is an equal and opposite government program."}, + {"b5b4f9026b175c62d7654bdc3a1cd438", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"caf44e80f2c20ce19b5ba1cab766e7bd", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"191fae6707f496aa54a6bce9f2ecf74d", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"9ddc753e7a4ccee6081cd1b45b23a834", "size: a.out: bad magic"}, + {"8d050f55b1cadb9323474564be08a521", "The major problem is with sendmail. -Mark Horton"}, + {"ad6e2587f74c3e3cc19146f6127fa2e3", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"1d616d60a5fabe85589c3f1566ca7fca", "If the enemy is within range, then so are you."}, + {"aec3326a4f496a2ced65a1963f84577f", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"77b4fd762d6b9245e61c50bf6ebf118b", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"e8f48c726bae5e516f6ddb1a4fe62438", "C is as portable as Stonehedge!!"}, + {"a3a84366e7219e887423b01f9be7166e", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"a6b7aa35157e984ef5d9b7f32e5fbb52", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"75661f0545955f8f9abeeb17845f3fd6", "How can you write a big system without C++? -Paul Glick"}, +} + +func TestGolden(t *testing.T) { + for i := 0; i < len(golden); i++ { + g := golden[i] + c := New() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("md4[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } +} diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go new file mode 100644 index 0000000..3fed475 --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4block.go @@ -0,0 +1,89 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// MD4 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package md4 + +var shift1 = []uint{3, 7, 11, 19} +var shift2 = []uint{3, 5, 9, 13} +var shift3 = []uint{3, 9, 11, 15} + +var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15} +var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15} + +func _Block(dig *digest, p []byte) int { + a := dig.s[0] + b := dig.s[1] + c := dig.s[2] + d := dig.s[3] + n := 0 + var X [16]uint32 + for len(p) >= _Chunk { + aa, bb, cc, dd := a, b, c, d + + j := 0 + for i := 0; i < 16; i++ { + X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // If this needs to be made faster in the future, + // the usual trick is to unroll each of these + // loops by a factor of 4; that lets you replace + // the shift[] lookups with constants and, + // with suitable variable renaming in each + // unrolled body, delete the a, b, c, d = d, a, b, c + // (or you can let the optimizer do the renaming). + // + // The index variables are uint so that % by a power + // of two can be optimized easily by a compiler. + + // Round 1. + for i := uint(0); i < 16; i++ { + x := i + s := shift1[i%4] + f := ((c ^ d) & b) ^ d + a += f + X[x] + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + // Round 2. + for i := uint(0); i < 16; i++ { + x := xIndex2[i] + s := shift2[i%4] + g := (b & c) | (b & d) | (c & d) + a += g + X[x] + 0x5a827999 + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + // Round 3. + for i := uint(0); i < 16; i++ { + x := xIndex3[i] + s := shift3[i%4] + h := b ^ c ^ d + a += h + X[x] + 0x6ed9eba1 + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + a += aa + b += bb + c += cc + d += dd + + p = p[_Chunk:] + n += _Chunk + } + + dig.s[0] = a + dig.s[1] = b + dig.s[2] = c + dig.s[3] = d + return n +} diff --git a/vendor/golang.org/x/crypto/nacl/auth/auth.go b/vendor/golang.org/x/crypto/nacl/auth/auth.go new file mode 100644 index 0000000..ec1d6eb --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/auth/auth.go @@ -0,0 +1,58 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package auth authenticates a message using a secret key. + +The Sum function, viewed as a function of the message for a uniform random +key, is designed to meet the standard notion of unforgeability. This means +that an attacker cannot find authenticators for any messages not authenticated +by the sender, even if the attacker has adaptively influenced the messages +authenticated by the sender. For a formal definition see, e.g., Section 2.4 +of Bellare, Kilian, and Rogaway, "The security of the cipher block chaining +message authentication code," Journal of Computer and System Sciences 61 (2000), +362–399; http://www-cse.ucsd.edu/~mihir/papers/cbc.html. + +auth does not make any promises regarding "strong" unforgeability; perhaps +one valid authenticator can be converted into another valid authenticator for +the same message. NaCl also does not make any promises regarding "truncated +unforgeability." + +This package is interoperable with NaCl: https://nacl.cr.yp.to/auth.html. +*/ +package auth + +import ( + "crypto/hmac" + "crypto/sha512" +) + +const ( + // Size is the size, in bytes, of an authenticated digest. + Size = 32 + // KeySize is the size, in bytes, of an authentication key. + KeySize = 32 +) + +// Sum generates an authenticator for m using a secret key and returns the +// 32-byte digest. +func Sum(m []byte, key *[KeySize]byte) *[Size]byte { + mac := hmac.New(sha512.New, key[:]) + mac.Write(m) + out := new([KeySize]byte) + copy(out[:], mac.Sum(nil)[:Size]) + return out +} + +// Verify checks that digest is a valid authenticator of message m under the +// given secret key. Verify does not leak timing information. +func Verify(digest []byte, m []byte, key *[KeySize]byte) bool { + if len(digest) != Size { + return false + } + mac := hmac.New(sha512.New, key[:]) + mac.Write(m) + expectedMAC := mac.Sum(nil) // first 256 bits of 512-bit sum + return hmac.Equal(digest, expectedMAC[:Size]) +} diff --git a/vendor/golang.org/x/crypto/nacl/auth/auth_test.go b/vendor/golang.org/x/crypto/nacl/auth/auth_test.go new file mode 100644 index 0000000..92074b5 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/auth/auth_test.go @@ -0,0 +1,172 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package auth + +import ( + "bytes" + rand "crypto/rand" + mrand "math/rand" + "testing" +) + +// Test cases are from RFC 4231, and match those present in the tests directory +// of the download here: https://nacl.cr.yp.to/install.html +var testCases = []struct { + key [32]byte + msg []byte + out [32]byte +}{ + { + key: [32]byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, + }, + msg: []byte("Hi There"), + out: [32]byte{ + 0x87, 0xaa, 0x7c, 0xde, 0xa5, 0xef, 0x61, 0x9d, + 0x4f, 0xf0, 0xb4, 0x24, 0x1a, 0x1d, 0x6c, 0xb0, + 0x23, 0x79, 0xf4, 0xe2, 0xce, 0x4e, 0xc2, 0x78, + 0x7a, 0xd0, 0xb3, 0x05, 0x45, 0xe1, 0x7c, 0xde, + }, + }, + { + key: [32]byte{'J', 'e', 'f', 'e'}, + msg: []byte("what do ya want for nothing?"), + out: [32]byte{ + 0x16, 0x4b, 0x7a, 0x7b, 0xfc, 0xf8, 0x19, 0xe2, + 0xe3, 0x95, 0xfb, 0xe7, 0x3b, 0x56, 0xe0, 0xa3, + 0x87, 0xbd, 0x64, 0x22, 0x2e, 0x83, 0x1f, 0xd6, + 0x10, 0x27, 0x0c, 0xd7, 0xea, 0x25, 0x05, 0x54, + }, + }, + { + key: [32]byte{ + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, + }, + msg: []byte{ // 50 bytes of 0xdd + 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, + 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, + 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, + 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, + 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, + 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, + 0xdd, 0xdd, + }, + out: [32]byte{ + 0xfa, 0x73, 0xb0, 0x08, 0x9d, 0x56, 0xa2, 0x84, + 0xef, 0xb0, 0xf0, 0x75, 0x6c, 0x89, 0x0b, 0xe9, + 0xb1, 0xb5, 0xdb, 0xdd, 0x8e, 0xe8, 0x1a, 0x36, + 0x55, 0xf8, 0x3e, 0x33, 0xb2, 0x27, 0x9d, 0x39, + }, + }, + { + key: [32]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, + }, + msg: []byte{ + 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, + 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, + 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, + 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, + 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, + 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, + 0xcd, 0xcd, + }, + out: [32]byte{ + 0xb0, 0xba, 0x46, 0x56, 0x37, 0x45, 0x8c, 0x69, + 0x90, 0xe5, 0xa8, 0xc5, 0xf6, 0x1d, 0x4a, 0xf7, + 0xe5, 0x76, 0xd9, 0x7f, 0xf9, 0x4b, 0x87, 0x2d, + 0xe7, 0x6f, 0x80, 0x50, 0x36, 0x1e, 0xe3, 0xdb, + }, + }, +} + +func TestSum(t *testing.T) { + for i, test := range testCases { + tag := Sum(test.msg, &test.key) + if !bytes.Equal(tag[:], test.out[:]) { + t.Errorf("#%d: Sum: got\n%x\nwant\n%x", i, tag, test.out) + } + } +} + +func TestVerify(t *testing.T) { + wrongMsg := []byte("unknown msg") + + for i, test := range testCases { + if !Verify(test.out[:], test.msg, &test.key) { + t.Errorf("#%d: Verify(%x, %q, %x) failed", i, test.out, test.msg, test.key) + } + if Verify(test.out[:], wrongMsg, &test.key) { + t.Errorf("#%d: Verify(%x, %q, %x) unexpectedly passed", i, test.out, wrongMsg, test.key) + } + } +} + +func TestStress(t *testing.T) { + if testing.Short() { + t.Skip("exhaustiveness test") + } + + var key [32]byte + msg := make([]byte, 10000) + prng := mrand.New(mrand.NewSource(0)) + + // copied from tests/auth5.c in nacl + for i := 0; i < 10000; i++ { + if _, err := rand.Read(key[:]); err != nil { + t.Fatal(err) + } + if _, err := rand.Read(msg[:i]); err != nil { + t.Fatal(err) + } + tag := Sum(msg[:i], &key) + if !Verify(tag[:], msg[:i], &key) { + t.Errorf("#%d: unexpected failure from Verify", i) + } + if i > 0 { + msgIndex := prng.Intn(i) + oldMsgByte := msg[msgIndex] + msg[msgIndex] += byte(1 + prng.Intn(255)) + if Verify(tag[:], msg[:i], &key) { + t.Errorf("#%d: unexpected success from Verify after corrupting message", i) + } + msg[msgIndex] = oldMsgByte + + tag[prng.Intn(len(tag))] += byte(1 + prng.Intn(255)) + if Verify(tag[:], msg[:i], &key) { + t.Errorf("#%d: unexpected success from Verify after corrupting authenticator", i) + } + } + } +} + +func BenchmarkAuth(b *testing.B) { + var key [32]byte + if _, err := rand.Read(key[:]); err != nil { + b.Fatal(err) + } + buf := make([]byte, 1024) + if _, err := rand.Read(buf[:]); err != nil { + b.Fatal(err) + } + + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + tag := Sum(buf, &key) + if Verify(tag[:], buf, &key) == false { + b.Fatal("unexpected failure from Verify") + } + } +} diff --git a/vendor/golang.org/x/crypto/nacl/auth/example_test.go b/vendor/golang.org/x/crypto/nacl/auth/example_test.go new file mode 100644 index 0000000..02a2cd6 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/auth/example_test.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package auth_test + +import ( + "encoding/hex" + "fmt" + + "golang.org/x/crypto/nacl/auth" +) + +func Example() { + // Load your secret key from a safe place and reuse it across multiple + // Sum calls. (Obviously don't use this example key for anything + // real.) If you want to convert a passphrase to a key, use a suitable + // package like bcrypt or scrypt. + secretKeyBytes, err := hex.DecodeString("6368616e676520746869732070617373776f726420746f206120736563726574") + if err != nil { + panic(err) + } + + var secretKey [32]byte + copy(secretKey[:], secretKeyBytes) + + mac := auth.Sum([]byte("hello world"), &secretKey) + fmt.Printf("%x\n", *mac) + result := auth.Verify(mac[:], []byte("hello world"), &secretKey) + fmt.Println(result) + badResult := auth.Verify(mac[:], []byte("different message"), &secretKey) + fmt.Println(badResult) + // Output: eca5a521f3d77b63f567fb0cb6f5f2d200641bc8dada42f60c5f881260c30317 + // true + // false +} diff --git a/vendor/golang.org/x/crypto/nacl/box/box.go b/vendor/golang.org/x/crypto/nacl/box/box.go new file mode 100644 index 0000000..31b697b --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/box/box.go @@ -0,0 +1,103 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package box authenticates and encrypts small messages using public-key cryptography. + +Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate +messages. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html. +*/ +package box // import "golang.org/x/crypto/nacl/box" + +import ( + "io" + + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = secretbox.Overhead + +// GenerateKey generates a new public/private key pair suitable for use with +// Seal and Open. +func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) { + publicKey = new([32]byte) + privateKey = new([32]byte) + _, err = io.ReadFull(rand, privateKey[:]) + if err != nil { + publicKey = nil + privateKey = nil + return + } + + curve25519.ScalarBaseMult(publicKey, privateKey) + return +} + +var zeros [16]byte + +// Precompute calculates the shared key between peersPublicKey and privateKey +// and writes it to sharedKey. The shared key can be used with +// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing +// when using the same pair of keys repeatedly. +func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) { + curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey) + salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma) +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// will be Overhead bytes longer than the original and must not overlap it. The +// nonce must be unique for each distinct message for a given pair of keys. +func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte { + var sharedKey [32]byte + Precompute(&sharedKey, peersPublicKey, privateKey) + return secretbox.Seal(out, message, nonce, &sharedKey) +} + +// SealAfterPrecomputation performs the same actions as Seal, but takes a +// shared key as generated by Precompute. +func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte { + return secretbox.Seal(out, message, nonce, sharedKey) +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) { + var sharedKey [32]byte + Precompute(&sharedKey, peersPublicKey, privateKey) + return secretbox.Open(out, box, nonce, &sharedKey) +} + +// OpenAfterPrecomputation performs the same actions as Open, but takes a +// shared key as generated by Precompute. +func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) { + return secretbox.Open(out, box, nonce, sharedKey) +} diff --git a/vendor/golang.org/x/crypto/nacl/box/box_test.go b/vendor/golang.org/x/crypto/nacl/box/box_test.go new file mode 100644 index 0000000..481ade2 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/box/box_test.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package box + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "testing" + + "golang.org/x/crypto/curve25519" +) + +func TestSealOpen(t *testing.T) { + publicKey1, privateKey1, _ := GenerateKey(rand.Reader) + publicKey2, privateKey2, _ := GenerateKey(rand.Reader) + + if *privateKey1 == *privateKey2 { + t.Fatalf("private keys are equal!") + } + if *publicKey1 == *publicKey2 { + t.Fatalf("public keys are equal!") + } + message := []byte("test message") + var nonce [24]byte + + box := Seal(nil, message, &nonce, publicKey1, privateKey2) + opened, ok := Open(nil, box, &nonce, publicKey2, privateKey1) + if !ok { + t.Fatalf("failed to open box") + } + + if !bytes.Equal(opened, message) { + t.Fatalf("got %x, want %x", opened, message) + } + + for i := range box { + box[i] ^= 0x40 + _, ok := Open(nil, box, &nonce, publicKey2, privateKey1) + if ok { + t.Fatalf("opened box with byte %d corrupted", i) + } + box[i] ^= 0x40 + } +} + +func TestBox(t *testing.T) { + var privateKey1, privateKey2 [32]byte + for i := range privateKey1[:] { + privateKey1[i] = 1 + } + for i := range privateKey2[:] { + privateKey2[i] = 2 + } + + var publicKey1 [32]byte + curve25519.ScalarBaseMult(&publicKey1, &privateKey1) + var message [64]byte + for i := range message[:] { + message[i] = 3 + } + + var nonce [24]byte + for i := range nonce[:] { + nonce[i] = 4 + } + + box := Seal(nil, message[:], &nonce, &publicKey1, &privateKey2) + + // expected was generated using the C implementation of NaCl. + expected, _ := hex.DecodeString("78ea30b19d2341ebbdba54180f821eec265cf86312549bea8a37652a8bb94f07b78a73ed1708085e6ddd0e943bbdeb8755079a37eb31d86163ce241164a47629c0539f330b4914cd135b3855bc2a2dfc") + + if !bytes.Equal(box, expected) { + t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected) + } +} diff --git a/vendor/golang.org/x/crypto/nacl/box/example_test.go b/vendor/golang.org/x/crypto/nacl/box/example_test.go new file mode 100644 index 0000000..25e42d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/box/example_test.go @@ -0,0 +1,95 @@ +package box_test + +import ( + crypto_rand "crypto/rand" // Custom so it's clear which rand we're using. + "fmt" + "io" + + "golang.org/x/crypto/nacl/box" +) + +func Example() { + senderPublicKey, senderPrivateKey, err := box.GenerateKey(crypto_rand.Reader) + if err != nil { + panic(err) + } + + recipientPublicKey, recipientPrivateKey, err := box.GenerateKey(crypto_rand.Reader) + if err != nil { + panic(err) + } + + // You must use a different nonce for each message you encrypt with the + // same key. Since the nonce here is 192 bits long, a random value + // provides a sufficiently small probability of repeats. + var nonce [24]byte + if _, err := io.ReadFull(crypto_rand.Reader, nonce[:]); err != nil { + panic(err) + } + + msg := []byte("Alas, poor Yorick! I knew him, Horatio") + // This encrypts msg and appends the result to the nonce. + encrypted := box.Seal(nonce[:], msg, &nonce, recipientPublicKey, senderPrivateKey) + + // The recipient can decrypt the message using their private key and the + // sender's public key. When you decrypt, you must use the same nonce you + // used to encrypt the message. One way to achieve this is to store the + // nonce alongside the encrypted message. Above, we stored the nonce in the + // first 24 bytes of the encrypted text. + var decryptNonce [24]byte + copy(decryptNonce[:], encrypted[:24]) + decrypted, ok := box.Open(nil, encrypted[24:], &decryptNonce, senderPublicKey, recipientPrivateKey) + if !ok { + panic("decryption error") + } + fmt.Println(string(decrypted)) + // Output: Alas, poor Yorick! I knew him, Horatio +} + +func Example_precompute() { + senderPublicKey, senderPrivateKey, err := box.GenerateKey(crypto_rand.Reader) + if err != nil { + panic(err) + } + + recipientPublicKey, recipientPrivateKey, err := box.GenerateKey(crypto_rand.Reader) + if err != nil { + panic(err) + } + + // The shared key can be used to speed up processing when using the same + // pair of keys repeatedly. + sharedEncryptKey := new([32]byte) + box.Precompute(sharedEncryptKey, recipientPublicKey, senderPrivateKey) + + // You must use a different nonce for each message you encrypt with the + // same key. Since the nonce here is 192 bits long, a random value + // provides a sufficiently small probability of repeats. + var nonce [24]byte + if _, err := io.ReadFull(crypto_rand.Reader, nonce[:]); err != nil { + panic(err) + } + + msg := []byte("A fellow of infinite jest, of most excellent fancy") + // This encrypts msg and appends the result to the nonce. + encrypted := box.SealAfterPrecomputation(nonce[:], msg, &nonce, sharedEncryptKey) + + // The shared key can be used to speed up processing when using the same + // pair of keys repeatedly. + var sharedDecryptKey [32]byte + box.Precompute(&sharedDecryptKey, senderPublicKey, recipientPrivateKey) + + // The recipient can decrypt the message using the shared key. When you + // decrypt, you must use the same nonce you used to encrypt the message. + // One way to achieve this is to store the nonce alongside the encrypted + // message. Above, we stored the nonce in the first 24 bytes of the + // encrypted text. + var decryptNonce [24]byte + copy(decryptNonce[:], encrypted[:24]) + decrypted, ok := box.OpenAfterPrecomputation(nil, encrypted[24:], &decryptNonce, &sharedDecryptKey) + if !ok { + panic("decryption error") + } + fmt.Println(string(decrypted)) + // Output: A fellow of infinite jest, of most excellent fancy +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/example_test.go b/vendor/golang.org/x/crypto/nacl/secretbox/example_test.go new file mode 100644 index 0000000..789f4ff --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/example_test.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package secretbox_test + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" + + "golang.org/x/crypto/nacl/secretbox" +) + +func Example() { + // Load your secret key from a safe place and reuse it across multiple + // Seal calls. (Obviously don't use this example key for anything + // real.) If you want to convert a passphrase to a key, use a suitable + // package like bcrypt or scrypt. + secretKeyBytes, err := hex.DecodeString("6368616e676520746869732070617373776f726420746f206120736563726574") + if err != nil { + panic(err) + } + + var secretKey [32]byte + copy(secretKey[:], secretKeyBytes) + + // You must use a different nonce for each message you encrypt with the + // same key. Since the nonce here is 192 bits long, a random value + // provides a sufficiently small probability of repeats. + var nonce [24]byte + if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil { + panic(err) + } + + // This encrypts "hello world" and appends the result to the nonce. + encrypted := secretbox.Seal(nonce[:], []byte("hello world"), &nonce, &secretKey) + + // When you decrypt, you must use the same nonce and key you used to + // encrypt the message. One way to achieve this is to store the nonce + // alongside the encrypted message. Above, we stored the nonce in the first + // 24 bytes of the encrypted text. + var decryptNonce [24]byte + copy(decryptNonce[:], encrypted[:24]) + decrypted, ok := secretbox.Open(nil, encrypted[24:], &decryptNonce, &secretKey) + if !ok { + panic("decryption error") + } + + fmt.Println(string(decrypted)) + // Output: hello world +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 0000000..53ee83c --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,166 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox // import "golang.org/x/crypto/nacl/secretbox" + +import ( + "golang.org/x/crypto/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out []byte, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go new file mode 100644 index 0000000..3c70b0f --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go @@ -0,0 +1,154 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package secretbox + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "testing" +) + +func TestSealOpen(t *testing.T) { + var key [32]byte + var nonce [24]byte + + rand.Reader.Read(key[:]) + rand.Reader.Read(nonce[:]) + + var box, opened []byte + + for msgLen := 0; msgLen < 128; msgLen += 17 { + message := make([]byte, msgLen) + rand.Reader.Read(message) + + box = Seal(box[:0], message, &nonce, &key) + var ok bool + opened, ok = Open(opened[:0], box, &nonce, &key) + if !ok { + t.Errorf("%d: failed to open box", msgLen) + continue + } + + if !bytes.Equal(opened, message) { + t.Errorf("%d: got %x, expected %x", msgLen, opened, message) + continue + } + } + + for i := range box { + box[i] ^= 0x20 + _, ok := Open(opened[:0], box, &nonce, &key) + if ok { + t.Errorf("box was opened after corrupting byte %d", i) + } + box[i] ^= 0x20 + } +} + +func TestSecretBox(t *testing.T) { + var key [32]byte + var nonce [24]byte + var message [64]byte + + for i := range key[:] { + key[i] = 1 + } + for i := range nonce[:] { + nonce[i] = 2 + } + for i := range message[:] { + message[i] = 3 + } + + box := Seal(nil, message[:], &nonce, &key) + // expected was generated using the C implementation of NaCl. + expected, _ := hex.DecodeString("8442bc313f4626f1359e3b50122b6ce6fe66ddfe7d39d14e637eb4fd5b45beadab55198df6ab5368439792a23c87db70acb6156dc5ef957ac04f6276cf6093b84be77ff0849cc33e34b7254d5a8f65ad") + + if !bytes.Equal(box, expected) { + t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected) + } +} + +func TestAppend(t *testing.T) { + var key [32]byte + var nonce [24]byte + var message [8]byte + + out := make([]byte, 4) + box := Seal(out, message[:], &nonce, &key) + if !bytes.Equal(box[:4], out[:4]) { + t.Fatalf("Seal didn't correctly append") + } + + out = make([]byte, 4, 100) + box = Seal(out, message[:], &nonce, &key) + if !bytes.Equal(box[:4], out[:4]) { + t.Fatalf("Seal didn't correctly append with sufficient capacity.") + } +} + +func benchmarkSealSize(b *testing.B, size int) { + message := make([]byte, size) + out := make([]byte, size+Overhead) + var nonce [24]byte + var key [32]byte + + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + out = Seal(out[:0], message, &nonce, &key) + } +} + +func BenchmarkSeal8Bytes(b *testing.B) { + benchmarkSealSize(b, 8) +} + +func BenchmarkSeal100Bytes(b *testing.B) { + benchmarkSealSize(b, 100) +} + +func BenchmarkSeal1K(b *testing.B) { + benchmarkSealSize(b, 1024) +} + +func BenchmarkSeal8K(b *testing.B) { + benchmarkSealSize(b, 8192) +} + +func benchmarkOpenSize(b *testing.B, size int) { + msg := make([]byte, size) + result := make([]byte, size) + var nonce [24]byte + var key [32]byte + box := Seal(nil, msg, &nonce, &key) + + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if _, ok := Open(result[:0], box, &nonce, &key); !ok { + panic("Open failed") + } + } +} + +func BenchmarkOpen8Bytes(b *testing.B) { + benchmarkOpenSize(b, 8) +} + +func BenchmarkOpen100Bytes(b *testing.B) { + benchmarkOpenSize(b, 100) +} + +func BenchmarkOpen1K(b *testing.B) { + benchmarkOpenSize(b, 1024) +} + +func BenchmarkOpen8K(b *testing.B) { + benchmarkOpenSize(b, 8192) +} diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go new file mode 100644 index 0000000..589dfd3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -0,0 +1,778 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses +// are signed messages attesting to the validity of a certificate for a small +// period of time. This is used to manage revocation for X.509 certificates. +package ocsp // import "golang.org/x/crypto/ocsp" + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "strconv" + "time" +) + +var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) + +// ResponseStatus contains the result of an OCSP request. See +// https://tools.ietf.org/html/rfc6960#section-2.3 +type ResponseStatus int + +const ( + Success ResponseStatus = 0 + Malformed ResponseStatus = 1 + InternalError ResponseStatus = 2 + TryLater ResponseStatus = 3 + // Status code four is unused in OCSP. See + // https://tools.ietf.org/html/rfc6960#section-4.2.1 + SignatureRequired ResponseStatus = 5 + Unauthorized ResponseStatus = 6 +) + +func (r ResponseStatus) String() string { + switch r { + case Success: + return "success" + case Malformed: + return "malformed" + case InternalError: + return "internal error" + case TryLater: + return "try later" + case SignatureRequired: + return "signature required" + case Unauthorized: + return "unauthorized" + default: + return "unknown OCSP status: " + strconv.Itoa(int(r)) + } +} + +// ResponseError is an error that may be returned by ParseResponse to indicate +// that the response itself is an error, not just that its indicating that a +// certificate is revoked, unknown, etc. +type ResponseError struct { + Status ResponseStatus +} + +func (r ResponseError) Error() string { + return "ocsp: error from server: " + r.Status.String() +} + +// These are internal structures that reflect the ASN.1 structure of an OCSP +// response. See RFC 2560, section 4.2. + +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// https://tools.ietf.org/html/rfc2560#section-4.1.1 +type ocspRequest struct { + TBSRequest tbsRequest +} + +type tbsRequest struct { + Version int `asn1:"explicit,tag:0,default:0,optional"` + RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` + RequestList []request +} + +type request struct { + Cert certID +} + +type responseASN1 struct { + Status asn1.Enumerated + Response responseBytes `asn1:"explicit,tag:0,optional"` +} + +type responseBytes struct { + ResponseType asn1.ObjectIdentifier + Response []byte +} + +type basicResponse struct { + TBSResponseData responseData + SignatureAlgorithm pkix.AlgorithmIdentifier + Signature asn1.BitString + Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` +} + +type responseData struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0,explicit,tag:0"` + RawResponderID asn1.RawValue + ProducedAt time.Time `asn1:"generalized"` + Responses []singleResponse +} + +type singleResponse struct { + CertID certID + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` + SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` +} + +type revokedInfo struct { + RevocationTime time.Time `asn1:"generalized"` + Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` +} + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} +) + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +var signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash +}{ + {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType x509.PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = x509.RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.RawValue{ + Tag: 5, + } + + case *ecdsa.PublicKey: + pubType = x509.ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// TODO(agl): this is taken from crypto/x509 and so should probably be exported +// from crypto/x509 or crypto/x509/pkix. +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { + for _, details := range signatureAlgorithmDetails { + if oid.Equal(details.oid) { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { + for hash, oid := range hashOIDs { + if hash == target { + return oid + } + } + return nil +} + +// This is the exposed reflection of the internal OCSP structures. + +// The status values that can be expressed in OCSP. See RFC 6960. +const ( + // Good means that the certificate is valid. + Good = iota + // Revoked means that the certificate has been deliberately revoked. + Revoked + // Unknown means that the OCSP responder doesn't know about the certificate. + Unknown + // ServerFailed is unused and was never used (see + // https://go-review.googlesource.com/#/c/18944). ParseResponse will + // return a ResponseError when an error response is parsed. + ServerFailed +) + +// The enumerated reasons for revoking a certificate. See RFC 5280. +const ( + Unspecified = 0 + KeyCompromise = 1 + CACompromise = 2 + AffiliationChanged = 3 + Superseded = 4 + CessationOfOperation = 5 + CertificateHold = 6 + + RemoveFromCRL = 8 + PrivilegeWithdrawn = 9 + AACompromise = 10 +) + +// Request represents an OCSP request. See RFC 6960. +type Request struct { + HashAlgorithm crypto.Hash + IssuerNameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// Marshal marshals the OCSP request to ASN.1 DER encoded form. +func (req *Request) Marshal() ([]byte, error) { + hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm) + if hashAlg == nil { + return nil, errors.New("Unknown hash algorithm") + } + return asn1.Marshal(ocspRequest{ + tbsRequest{ + Version: 0, + RequestList: []request{ + { + Cert: certID{ + pkix.AlgorithmIdentifier{ + Algorithm: hashAlg, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + req.IssuerNameHash, + req.IssuerKeyHash, + req.SerialNumber, + }, + }, + }, + }, + }) +} + +// Response represents an OCSP response containing a single SingleResponse. See +// RFC 6960. +type Response struct { + // Status is one of {Good, Revoked, Unknown} + Status int + SerialNumber *big.Int + ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time + RevocationReason int + Certificate *x509.Certificate + // TBSResponseData contains the raw bytes of the signed response. If + // Certificate is nil then this can be used to verify Signature. + TBSResponseData []byte + Signature []byte + SignatureAlgorithm x509.SignatureAlgorithm + + // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash. + // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512. + // If zero, the default is crypto.SHA1. + IssuerHash crypto.Hash + + // RawResponderName optionally contains the DER-encoded subject of the + // responder certificate. Exactly one of RawResponderName and + // ResponderKeyHash is set. + RawResponderName []byte + // ResponderKeyHash optionally contains the SHA-1 hash of the + // responder's public key. Exactly one of RawResponderName and + // ResponderKeyHash is set. + ResponderKeyHash []byte + + // Extensions contains raw X.509 extensions from the singleExtensions field + // of the OCSP response. When parsing certificates, this can be used to + // extract non-critical extensions that are not parsed by this package. When + // marshaling OCSP responses, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing certificates, see + // Extensions. + ExtraExtensions []pkix.Extension +} + +// These are pre-serialized error responses for the various non-success codes +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP +// responder that supports only pre-signed responses as a response to requests +// for certificates with unknown status. See RFC 5019. +var ( + MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} +) + +// CheckSignatureFrom checks that the signature in resp is a valid signature +// from issuer. This should only be used if resp.Certificate is nil. Otherwise, +// the OCSP response contained an intermediate certificate that created the +// signature. That signature is checked by ParseResponse and only +// resp.Certificate remains to be validated. +func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { + return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) +} + +// ParseError results from an invalid OCSP response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// ParseRequest parses an OCSP request in DER form. It only supports +// requests for a single certificate. Signed requests are not supported. +// If a request includes a signature, it will result in a ParseError. +func ParseRequest(bytes []byte) (*Request, error) { + var req ocspRequest + rest, err := asn1.Unmarshal(bytes, &req) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP request") + } + + if len(req.TBSRequest.RequestList) == 0 { + return nil, ParseError("OCSP request contains no request body") + } + innerRequest := req.TBSRequest.RequestList[0] + + hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("OCSP request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: innerRequest.Cert.NameHash, + IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, + SerialNumber: innerRequest.Cert.SerialNumber, + }, nil +} + +// ParseResponse parses an OCSP response in DER form. It only supports +// responses for a single certificate. If the response contains a certificate +// then the signature over the response is checked. If issuer is not nil then +// it will be used to validate the signature or embedded certificate. +// +// Invalid responses and parse failures will result in a ParseError. +// Error responses will result in a ResponseError. +func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { + return ParseResponseForCert(bytes, nil, issuer) +} + +// ParseResponseForCert parses an OCSP response in DER form and searches for a +// Response relating to cert. If such a Response is found and the OCSP response +// contains a certificate then the signature over the response is checked. If +// issuer is not nil then it will be used to validate the signature or embedded +// certificate. +// +// Invalid responses and parse failures will result in a ParseError. +// Error responses will result in a ResponseError. +func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) { + var resp responseASN1 + rest, err := asn1.Unmarshal(bytes, &resp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if status := ResponseStatus(resp.Status); status != Success { + return nil, ResponseError{status} + } + + if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { + return nil, ParseError("bad OCSP response type") + } + + var basicResp basicResponse + rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) + if err != nil { + return nil, err + } + + if len(basicResp.Certificates) > 1 { + return nil, ParseError("OCSP response contains bad number of certificates") + } + + if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 { + return nil, ParseError("OCSP response contains bad number of responses") + } + + var singleResp singleResponse + if cert == nil { + singleResp = basicResp.TBSResponseData.Responses[0] + } else { + match := false + for _, resp := range basicResp.TBSResponseData.Responses { + if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 { + singleResp = resp + match = true + break + } + } + if !match { + return nil, ParseError("no response matching the supplied certificate") + } + } + + ret := &Response{ + TBSResponseData: basicResp.TBSResponseData.Raw, + Signature: basicResp.Signature.RightAlign(), + SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), + Extensions: singleResp.SingleExtensions, + SerialNumber: singleResp.CertID.SerialNumber, + ProducedAt: basicResp.TBSResponseData.ProducedAt, + ThisUpdate: singleResp.ThisUpdate, + NextUpdate: singleResp.NextUpdate, + } + + // Handle the ResponderID CHOICE tag. ResponderID can be flattened into + // TBSResponseData once https://go-review.googlesource.com/34503 has been + // released. + rawResponderID := basicResp.TBSResponseData.RawResponderID + switch rawResponderID.Tag { + case 1: // Name + var rdn pkix.RDNSequence + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder name") + } + ret.RawResponderName = rawResponderID.Bytes + case 2: // KeyHash + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder key hash") + } + default: + return nil, ParseError("invalid responder id tag") + } + + if len(basicResp.Certificates) > 0 { + ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) + if err != nil { + return nil, err + } + + if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { + return nil, ParseError("bad signature on embedded certificate: " + err.Error()) + } + + if issuer != nil { + if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + } else if issuer != nil { + if err := ret.CheckSignatureFrom(issuer); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + + for _, ext := range singleResp.SingleExtensions { + if ext.Critical { + return nil, ParseError("unsupported critical extension") + } + } + + for h, oid := range hashOIDs { + if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) { + ret.IssuerHash = h + break + } + } + if ret.IssuerHash == 0 { + return nil, ParseError("unsupported issuer hash algorithm") + } + + switch { + case bool(singleResp.Good): + ret.Status = Good + case bool(singleResp.Unknown): + ret.Status = Unknown + default: + ret.Status = Revoked + ret.RevokedAt = singleResp.Revoked.RevocationTime + ret.RevocationReason = int(singleResp.Revoked.Reason) + } + + return ret, nil +} + +// RequestOptions contains options for constructing OCSP requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the OCSP request. If zero, SHA-1 will be used. + Hash crypto.Hash +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + // SHA-1 is nearly universally used in OCSP. + return crypto.SHA1 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + // OCSP seems to be the only place where these raw hash identifiers are + // used. I took the following from + // http://msdn.microsoft.com/en-us/library/ff635603.aspx + _, ok := hashOIDs[hashFunc] + if !ok { + return nil, x509.ErrUnsupportedAlgorithm + } + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + req := &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: cert.SerialNumber, + } + return req.Marshal() +} + +// CreateResponse returns a DER-encoded OCSP response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the responder's name field, and the +// certificate itself is provided alongside the OCSP response signature. +// +// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. +// +// The template is used to populate the SerialNumber, Status, RevokedAt, +// RevocationReason, ThisUpdate, and NextUpdate fields. +// +// If template.IssuerHash is not set, SHA1 will be used. +// +// The ProducedAt date is automatically set to the current date, to the nearest minute. +func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + if template.IssuerHash == 0 { + template.IssuerHash = crypto.SHA1 + } + hashOID := getOIDFromHashAlgorithm(template.IssuerHash) + if hashOID == nil { + return nil, errors.New("unsupported issuer hash algorithm") + } + + if !template.IssuerHash.Available() { + return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash) + } + h := template.IssuerHash.New() + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + innerResponse := singleResponse{ + CertID: certID{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + NameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: template.SerialNumber, + }, + ThisUpdate: template.ThisUpdate.UTC(), + NextUpdate: template.NextUpdate.UTC(), + SingleExtensions: template.ExtraExtensions, + } + + switch template.Status { + case Good: + innerResponse.Good = true + case Unknown: + innerResponse.Unknown = true + case Revoked: + innerResponse.Revoked = revokedInfo{ + RevocationTime: template.RevokedAt.UTC(), + Reason: asn1.Enumerated(template.RevocationReason), + } + } + + rawResponderID := asn1.RawValue{ + Class: 2, // context-specific + Tag: 1, // Name (explicit tag) + IsCompound: true, + Bytes: responderCert.RawSubject, + } + tbsResponseData := responseData{ + Version: 0, + RawResponderID: rawResponderID, + ProducedAt: time.Now().Truncate(time.Minute).UTC(), + Responses: []singleResponse{innerResponse}, + } + + tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) + if err != nil { + return nil, err + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + responseHash := hashFunc.New() + responseHash.Write(tbsResponseDataDER) + signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) + if err != nil { + return nil, err + } + + response := basicResponse{ + TBSResponseData: tbsResponseData, + SignatureAlgorithm: signatureAlgorithm, + Signature: asn1.BitString{ + Bytes: signature, + BitLength: 8 * len(signature), + }, + } + if template.Certificate != nil { + response.Certificates = []asn1.RawValue{ + {FullBytes: template.Certificate.Raw}, + } + } + responseDER, err := asn1.Marshal(response) + if err != nil { + return nil, err + } + + return asn1.Marshal(responseASN1{ + Status: asn1.Enumerated(Success), + Response: responseBytes{ + ResponseType: idPKIXOCSPBasic, + Response: responseDER, + }, + }) +} diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp_test.go b/vendor/golang.org/x/crypto/ocsp/ocsp_test.go new file mode 100644 index 0000000..70b1976 --- /dev/null +++ b/vendor/golang.org/x/crypto/ocsp/ocsp_test.go @@ -0,0 +1,875 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package ocsp + +import ( + "bytes" + "crypto" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "math/big" + "reflect" + "testing" + "time" +) + +func TestOCSPDecode(t *testing.T) { + responseBytes, _ := hex.DecodeString(ocspResponseHex) + resp, err := ParseResponse(responseBytes, nil) + if err != nil { + t.Fatal(err) + } + + responderCert, _ := hex.DecodeString(startComResponderCertHex) + responder, err := x509.ParseCertificate(responderCert) + if err != nil { + t.Fatal(err) + } + + expected := Response{ + Status: Good, + SerialNumber: big.NewInt(0x1d0fa), + RevocationReason: Unspecified, + ThisUpdate: time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC), + NextUpdate: time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC), + RawResponderName: responder.RawSubject, + } + + if !reflect.DeepEqual(resp.ThisUpdate, expected.ThisUpdate) { + t.Errorf("resp.ThisUpdate: got %v, want %v", resp.ThisUpdate, expected.ThisUpdate) + } + + if !reflect.DeepEqual(resp.NextUpdate, expected.NextUpdate) { + t.Errorf("resp.NextUpdate: got %v, want %v", resp.NextUpdate, expected.NextUpdate) + } + + if resp.Status != expected.Status { + t.Errorf("resp.Status: got %d, want %d", resp.Status, expected.Status) + } + + if resp.SerialNumber.Cmp(expected.SerialNumber) != 0 { + t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, expected.SerialNumber) + } + + if resp.RevocationReason != expected.RevocationReason { + t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, expected.RevocationReason) + } + + if !bytes.Equal(resp.RawResponderName, expected.RawResponderName) { + t.Errorf("resp.RawResponderName: got %x, want %x", resp.RawResponderName, expected.RawResponderName) + } + + if !bytes.Equal(resp.ResponderKeyHash, expected.ResponderKeyHash) { + t.Errorf("resp.ResponderKeyHash: got %x, want %x", resp.ResponderKeyHash, expected.ResponderKeyHash) + } +} + +func TestOCSPDecodeWithoutCert(t *testing.T) { + responseBytes, _ := hex.DecodeString(ocspResponseWithoutCertHex) + _, err := ParseResponse(responseBytes, nil) + if err != nil { + t.Error(err) + } +} + +func TestOCSPDecodeWithExtensions(t *testing.T) { + responseBytes, _ := hex.DecodeString(ocspResponseWithCriticalExtensionHex) + _, err := ParseResponse(responseBytes, nil) + if err == nil { + t.Error(err) + } + + responseBytes, _ = hex.DecodeString(ocspResponseWithExtensionHex) + response, err := ParseResponse(responseBytes, nil) + if err != nil { + t.Fatal(err) + } + + if len(response.Extensions) != 1 { + t.Errorf("len(response.Extensions): got %v, want %v", len(response.Extensions), 1) + } + + extensionBytes := response.Extensions[0].Value + expectedBytes, _ := hex.DecodeString(ocspExtensionValueHex) + if !bytes.Equal(extensionBytes, expectedBytes) { + t.Errorf("response.Extensions[0]: got %x, want %x", extensionBytes, expectedBytes) + } +} + +func TestOCSPSignature(t *testing.T) { + issuerCert, _ := hex.DecodeString(startComHex) + issuer, err := x509.ParseCertificate(issuerCert) + if err != nil { + t.Fatal(err) + } + + response, _ := hex.DecodeString(ocspResponseHex) + if _, err := ParseResponse(response, issuer); err != nil { + t.Error(err) + } +} + +func TestOCSPRequest(t *testing.T) { + leafCert, _ := hex.DecodeString(leafCertHex) + cert, err := x509.ParseCertificate(leafCert) + if err != nil { + t.Fatal(err) + } + + issuerCert, _ := hex.DecodeString(issuerCertHex) + issuer, err := x509.ParseCertificate(issuerCert) + if err != nil { + t.Fatal(err) + } + + request, err := CreateRequest(cert, issuer, nil) + if err != nil { + t.Fatal(err) + } + + expectedBytes, _ := hex.DecodeString(ocspRequestHex) + if !bytes.Equal(request, expectedBytes) { + t.Errorf("request: got %x, wanted %x", request, expectedBytes) + } + + decodedRequest, err := ParseRequest(expectedBytes) + if err != nil { + t.Fatal(err) + } + + if decodedRequest.HashAlgorithm != crypto.SHA1 { + t.Errorf("request.HashAlgorithm: got %v, want %v", decodedRequest.HashAlgorithm, crypto.SHA1) + } + + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + _, err = asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo) + if err != nil { + t.Fatal(err) + } + + h := sha1.New() + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + if got := decodedRequest.IssuerKeyHash; !bytes.Equal(got, issuerKeyHash) { + t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerKeyHash) + } + + if got := decodedRequest.IssuerNameHash; !bytes.Equal(got, issuerNameHash) { + t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerNameHash) + } + + if got := decodedRequest.SerialNumber; got.Cmp(cert.SerialNumber) != 0 { + t.Errorf("request.SerialNumber: got %x, want %x", got, cert.SerialNumber) + } + + marshaledRequest, err := decodedRequest.Marshal() + if err != nil { + t.Fatal(err) + } + + if bytes.Compare(expectedBytes, marshaledRequest) != 0 { + t.Errorf( + "Marshaled request doesn't match expected: wanted %x, got %x", + expectedBytes, + marshaledRequest, + ) + } +} + +func TestOCSPResponse(t *testing.T) { + leafCert, _ := hex.DecodeString(leafCertHex) + leaf, err := x509.ParseCertificate(leafCert) + if err != nil { + t.Fatal(err) + } + + issuerCert, _ := hex.DecodeString(issuerCertHex) + issuer, err := x509.ParseCertificate(issuerCert) + if err != nil { + t.Fatal(err) + } + + responderCert, _ := hex.DecodeString(responderCertHex) + responder, err := x509.ParseCertificate(responderCert) + if err != nil { + t.Fatal(err) + } + + responderPrivateKeyDER, _ := hex.DecodeString(responderPrivateKeyHex) + responderPrivateKey, err := x509.ParsePKCS1PrivateKey(responderPrivateKeyDER) + if err != nil { + t.Fatal(err) + } + + extensionBytes, _ := hex.DecodeString(ocspExtensionValueHex) + extensions := []pkix.Extension{ + { + Id: ocspExtensionOID, + Critical: false, + Value: extensionBytes, + }, + } + + thisUpdate := time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC) + nextUpdate := time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC) + template := Response{ + Status: Revoked, + SerialNumber: leaf.SerialNumber, + ThisUpdate: thisUpdate, + NextUpdate: nextUpdate, + RevokedAt: thisUpdate, + RevocationReason: KeyCompromise, + Certificate: responder, + ExtraExtensions: extensions, + } + + template.IssuerHash = crypto.MD5 + _, err = CreateResponse(issuer, responder, template, responderPrivateKey) + if err == nil { + t.Fatal("CreateResponse didn't fail with non-valid template.IssuerHash value crypto.MD5") + } + + testCases := []struct { + name string + issuerHash crypto.Hash + }{ + {"Zero value", 0}, + {"crypto.SHA1", crypto.SHA1}, + {"crypto.SHA256", crypto.SHA256}, + {"crypto.SHA384", crypto.SHA384}, + {"crypto.SHA512", crypto.SHA512}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + template.IssuerHash = tc.issuerHash + responseBytes, err := CreateResponse(issuer, responder, template, responderPrivateKey) + if err != nil { + t.Fatalf("CreateResponse failed: %s", err) + } + + resp, err := ParseResponse(responseBytes, nil) + if err != nil { + t.Fatalf("ParseResponse failed: %s", err) + } + + if !reflect.DeepEqual(resp.ThisUpdate, template.ThisUpdate) { + t.Errorf("resp.ThisUpdate: got %v, want %v", resp.ThisUpdate, template.ThisUpdate) + } + + if !reflect.DeepEqual(resp.NextUpdate, template.NextUpdate) { + t.Errorf("resp.NextUpdate: got %v, want %v", resp.NextUpdate, template.NextUpdate) + } + + if !reflect.DeepEqual(resp.RevokedAt, template.RevokedAt) { + t.Errorf("resp.RevokedAt: got %v, want %v", resp.RevokedAt, template.RevokedAt) + } + + if !reflect.DeepEqual(resp.Extensions, template.ExtraExtensions) { + t.Errorf("resp.Extensions: got %v, want %v", resp.Extensions, template.ExtraExtensions) + } + + delay := time.Since(resp.ProducedAt) + if delay < -time.Hour || delay > time.Hour { + t.Errorf("resp.ProducedAt: got %s, want close to current time (%s)", resp.ProducedAt, time.Now()) + } + + if resp.Status != template.Status { + t.Errorf("resp.Status: got %d, want %d", resp.Status, template.Status) + } + + if resp.SerialNumber.Cmp(template.SerialNumber) != 0 { + t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, template.SerialNumber) + } + + if resp.RevocationReason != template.RevocationReason { + t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, template.RevocationReason) + } + + expectedHash := tc.issuerHash + if tc.issuerHash == 0 { + expectedHash = crypto.SHA1 + } + + if resp.IssuerHash != expectedHash { + t.Errorf("resp.IssuerHash: got %d, want %d", resp.IssuerHash, expectedHash) + } + }) + } +} + +func TestErrorResponse(t *testing.T) { + responseBytes, _ := hex.DecodeString(errorResponseHex) + _, err := ParseResponse(responseBytes, nil) + + respErr, ok := err.(ResponseError) + if !ok { + t.Fatalf("expected ResponseError from ParseResponse but got %#v", err) + } + if respErr.Status != Malformed { + t.Fatalf("expected Malformed status from ParseResponse but got %d", respErr.Status) + } +} + +func TestOCSPDecodeMultiResponse(t *testing.T) { + inclCert, _ := hex.DecodeString(ocspMultiResponseCertHex) + cert, err := x509.ParseCertificate(inclCert) + if err != nil { + t.Fatal(err) + } + + responseBytes, _ := hex.DecodeString(ocspMultiResponseHex) + resp, err := ParseResponseForCert(responseBytes, cert, nil) + if err != nil { + t.Fatal(err) + } + + if resp.SerialNumber.Cmp(cert.SerialNumber) != 0 { + t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, cert.SerialNumber) + } +} + +func TestOCSPDecodeMultiResponseWithoutMatchingCert(t *testing.T) { + wrongCert, _ := hex.DecodeString(startComHex) + cert, err := x509.ParseCertificate(wrongCert) + if err != nil { + t.Fatal(err) + } + + responseBytes, _ := hex.DecodeString(ocspMultiResponseHex) + _, err = ParseResponseForCert(responseBytes, cert, nil) + want := ParseError("no response matching the supplied certificate") + if err != want { + t.Errorf("err: got %q, want %q", err, want) + } +} + +// This OCSP response was taken from Thawte's public OCSP responder. +// To recreate: +// $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443 +// Copy and paste the first certificate into /tmp/cert.crt and the second into +// /tmp/intermediate.crt +// $ openssl ocsp -issuer /tmp/intermediate.crt -cert /tmp/cert.crt -url http://ocsp.thawte.com -resp_text -respout /tmp/ocsp.der +// Then hex encode the result: +// $ python -c 'print file("/tmp/ocsp.der", "r").read().encode("hex")' + +const ocspResponseHex = "308206bc0a0100a08206b5308206b106092b0601050507300101048206a23082069e3081" + + "c9a14e304c310b300906035504061302494c31163014060355040a130d5374617274436f" + + "6d204c74642e312530230603550403131c5374617274436f6d20436c6173732031204f43" + + "5350205369676e6572180f32303130303730373137333531375a30663064303c30090605" + + "2b0e03021a050004146568874f40750f016a3475625e1f5c93e5a26d580414eb4234d098" + + "b0ab9ff41b6b08f7cc642eef0e2c45020301d0fa8000180f323031303037303731353031" + + "30355aa011180f32303130303730373138333531375a300d06092a864886f70d01010505" + + "000382010100ab557ff070d1d7cebbb5f0ec91a15c3fed22eb2e1b8244f1b84545f013a4" + + "fb46214c5e3fbfbebb8a56acc2b9db19f68fd3c3201046b3824d5ba689f99864328710cb" + + "467195eb37d84f539e49f859316b32964dc3e47e36814ce94d6c56dd02733b1d0802f7ff" + + "4eebdbbd2927dcf580f16cbc290f91e81b53cb365e7223f1d6e20a88ea064104875e0145" + + "672b20fc14829d51ca122f5f5d77d3ad6c83889c55c7dc43680ba2fe3cef8b05dbcabdc0" + + "d3e09aaf9725597f8c858c2fa38c0d6aed2e6318194420dd1a1137445d13e1c97ab47896" + + "17a4e08925f46f867b72e3a4dc1f08cb870b2b0717f7207faa0ac512e628a029aba7457a" + + "e63dcf3281e2162d9349a08204ba308204b6308204b23082039aa003020102020101300d" + + "06092a864886f70d010105050030818c310b300906035504061302494c31163014060355" + + "040a130d5374617274436f6d204c74642e312b3029060355040b13225365637572652044" + + "69676974616c204365727469666963617465205369676e696e6731383036060355040313" + + "2f5374617274436f6d20436c6173732031205072696d61727920496e7465726d65646961" + + "746520536572766572204341301e170d3037313032353030323330365a170d3132313032" + + "333030323330365a304c310b300906035504061302494c31163014060355040a130d5374" + + "617274436f6d204c74642e312530230603550403131c5374617274436f6d20436c617373" + + "2031204f435350205369676e657230820122300d06092a864886f70d0101010500038201" + + "0f003082010a0282010100b9561b4c45318717178084e96e178df2255e18ed8d8ecc7c2b" + + "7b51a6c1c2e6bf0aa3603066f132fe10ae97b50e99fa24b83fc53dd2777496387d14e1c3" + + "a9b6a4933e2ac12413d085570a95b8147414a0bc007c7bcf222446ef7f1a156d7ea1c577" + + "fc5f0facdfd42eb0f5974990cb2f5cefebceef4d1bdc7ae5c1075c5a99a93171f2b0845b" + + "4ff0864e973fcfe32f9d7511ff87a3e943410c90a4493a306b6944359340a9ca96f02b66" + + "ce67f028df2980a6aaee8d5d5d452b8b0eb93f923cc1e23fcccbdbe7ffcb114d08fa7a6a" + + "3c404f825d1a0e715935cf623a8c7b59670014ed0622f6089a9447a7a19010f7fe58f841" + + "29a2765ea367824d1c3bb2fda308530203010001a382015c30820158300c0603551d1301" + + "01ff04023000300b0603551d0f0404030203a8301e0603551d250417301506082b060105" + + "0507030906092b0601050507300105301d0603551d0e0416041445e0a36695414c5dd449" + + "bc00e33cdcdbd2343e173081a80603551d230481a030819d8014eb4234d098b0ab9ff41b" + + "6b08f7cc642eef0e2c45a18181a47f307d310b300906035504061302494c311630140603" + + "55040a130d5374617274436f6d204c74642e312b3029060355040b132253656375726520" + + "4469676974616c204365727469666963617465205369676e696e67312930270603550403" + + "13205374617274436f6d2043657274696669636174696f6e20417574686f726974798201" + + "0a30230603551d12041c301a8618687474703a2f2f7777772e737461727473736c2e636f" + + "6d2f302c06096086480186f842010d041f161d5374617274436f6d205265766f63617469" + + "6f6e20417574686f72697479300d06092a864886f70d01010505000382010100182d2215" + + "8f0fc0291324fa8574c49bb8ff2835085adcbf7b7fc4191c397ab6951328253fffe1e5ec" + + "2a7da0d50fca1a404e6968481366939e666c0a6209073eca57973e2fefa9ed1718e8176f" + + "1d85527ff522c08db702e3b2b180f1cbff05d98128252cf0f450f7dd2772f4188047f19d" + + "c85317366f94bc52d60f453a550af58e308aaab00ced33040b62bf37f5b1ab2a4f7f0f80" + + "f763bf4d707bc8841d7ad9385ee2a4244469260b6f2bf085977af9074796048ecc2f9d48" + + "a1d24ce16e41a9941568fec5b42771e118f16c106a54ccc339a4b02166445a167902e75e" + + "6d8620b0825dcd18a069b90fd851d10fa8effd409deec02860d26d8d833f304b10669b42" + +const startComResponderCertHex = "308204b23082039aa003020102020101300d06092a864886f70d010105050030818c310b" + + "300906035504061302494c31163014060355040a130d5374617274436f6d204c74642e31" + + "2b3029060355040b1322536563757265204469676974616c204365727469666963617465" + + "205369676e696e67313830360603550403132f5374617274436f6d20436c617373203120" + + "5072696d61727920496e7465726d65646961746520536572766572204341301e170d3037" + + "313032353030323330365a170d3132313032333030323330365a304c310b300906035504" + + "061302494c31163014060355040a130d5374617274436f6d204c74642e31253023060355" + + "0403131c5374617274436f6d20436c6173732031204f435350205369676e657230820122" + + "300d06092a864886f70d01010105000382010f003082010a0282010100b9561b4c453187" + + "17178084e96e178df2255e18ed8d8ecc7c2b7b51a6c1c2e6bf0aa3603066f132fe10ae97" + + "b50e99fa24b83fc53dd2777496387d14e1c3a9b6a4933e2ac12413d085570a95b8147414" + + "a0bc007c7bcf222446ef7f1a156d7ea1c577fc5f0facdfd42eb0f5974990cb2f5cefebce" + + "ef4d1bdc7ae5c1075c5a99a93171f2b0845b4ff0864e973fcfe32f9d7511ff87a3e94341" + + "0c90a4493a306b6944359340a9ca96f02b66ce67f028df2980a6aaee8d5d5d452b8b0eb9" + + "3f923cc1e23fcccbdbe7ffcb114d08fa7a6a3c404f825d1a0e715935cf623a8c7b596700" + + "14ed0622f6089a9447a7a19010f7fe58f84129a2765ea367824d1c3bb2fda30853020301" + + "0001a382015c30820158300c0603551d130101ff04023000300b0603551d0f0404030203" + + "a8301e0603551d250417301506082b0601050507030906092b0601050507300105301d06" + + "03551d0e0416041445e0a36695414c5dd449bc00e33cdcdbd2343e173081a80603551d23" + + "0481a030819d8014eb4234d098b0ab9ff41b6b08f7cc642eef0e2c45a18181a47f307d31" + + "0b300906035504061302494c31163014060355040a130d5374617274436f6d204c74642e" + + "312b3029060355040b1322536563757265204469676974616c2043657274696669636174" + + "65205369676e696e6731293027060355040313205374617274436f6d2043657274696669" + + "636174696f6e20417574686f7269747982010a30230603551d12041c301a861868747470" + + "3a2f2f7777772e737461727473736c2e636f6d2f302c06096086480186f842010d041f16" + + "1d5374617274436f6d205265766f636174696f6e20417574686f72697479300d06092a86" + + "4886f70d01010505000382010100182d22158f0fc0291324fa8574c49bb8ff2835085adc" + + "bf7b7fc4191c397ab6951328253fffe1e5ec2a7da0d50fca1a404e6968481366939e666c" + + "0a6209073eca57973e2fefa9ed1718e8176f1d85527ff522c08db702e3b2b180f1cbff05" + + "d98128252cf0f450f7dd2772f4188047f19dc85317366f94bc52d60f453a550af58e308a" + + "aab00ced33040b62bf37f5b1ab2a4f7f0f80f763bf4d707bc8841d7ad9385ee2a4244469" + + "260b6f2bf085977af9074796048ecc2f9d48a1d24ce16e41a9941568fec5b42771e118f1" + + "6c106a54ccc339a4b02166445a167902e75e6d8620b0825dcd18a069b90fd851d10fa8ef" + + "fd409deec02860d26d8d833f304b10669b42" + +const startComHex = "308206343082041ca003020102020118300d06092a864886f70d0101050500307d310b30" + + "0906035504061302494c31163014060355040a130d5374617274436f6d204c74642e312b" + + "3029060355040b1322536563757265204469676974616c20436572746966696361746520" + + "5369676e696e6731293027060355040313205374617274436f6d20436572746966696361" + + "74696f6e20417574686f72697479301e170d3037313032343230353431375a170d313731" + + "3032343230353431375a30818c310b300906035504061302494c31163014060355040a13" + + "0d5374617274436f6d204c74642e312b3029060355040b13225365637572652044696769" + + "74616c204365727469666963617465205369676e696e67313830360603550403132f5374" + + "617274436f6d20436c6173732031205072696d61727920496e7465726d65646961746520" + + "53657276657220434130820122300d06092a864886f70d01010105000382010f00308201" + + "0a0282010100b689c6acef09527807ac9263d0f44418188480561f91aee187fa3250b4d3" + + "4706f0e6075f700e10f71dc0ce103634855a0f92ac83c6ac58523fba38e8fce7a724e240" + + "a60876c0926e9e2a6d4d3f6e61200adb59ded27d63b33e46fefa215118d7cd30a6ed076e" + + "3b7087b4f9faebee823c056f92f7a4dc0a301e9373fe07cad75f809d225852ae06da8b87" + + "2369b0e42ad8ea83d2bdf371db705a280faf5a387045123f304dcd3baf17e50fcba0a95d" + + "48aab16150cb34cd3c5cc30be810c08c9bf0030362feb26c3e720eee1c432ac9480e5739" + + "c43121c810c12c87fe5495521f523c31129b7fe7c0a0a559d5e28f3ef0d5a8e1d77031a9" + + "c4b3cfaf6d532f06f4a70203010001a38201ad308201a9300f0603551d130101ff040530" + + "030101ff300e0603551d0f0101ff040403020106301d0603551d0e04160414eb4234d098" + + "b0ab9ff41b6b08f7cc642eef0e2c45301f0603551d230418301680144e0bef1aa4405ba5" + + "17698730ca346843d041aef2306606082b06010505070101045a3058302706082b060105" + + "05073001861b687474703a2f2f6f6373702e737461727473736c2e636f6d2f6361302d06" + + "082b060105050730028621687474703a2f2f7777772e737461727473736c2e636f6d2f73" + + "667363612e637274305b0603551d1f045430523027a025a0238621687474703a2f2f7777" + + "772e737461727473736c2e636f6d2f73667363612e63726c3027a025a023862168747470" + + "3a2f2f63726c2e737461727473736c2e636f6d2f73667363612e63726c3081800603551d" + + "20047930773075060b2b0601040181b5370102013066302e06082b060105050702011622" + + "687474703a2f2f7777772e737461727473736c2e636f6d2f706f6c6963792e7064663034" + + "06082b060105050702011628687474703a2f2f7777772e737461727473736c2e636f6d2f" + + "696e7465726d6564696174652e706466300d06092a864886f70d01010505000382020100" + + "2109493ea5886ee00b8b48da314d8ff75657a2e1d36257e9b556f38545753be5501f048b" + + "e6a05a3ee700ae85d0fbff200364cbad02e1c69172f8a34dd6dee8cc3fa18aa2e37c37a7" + + "c64f8f35d6f4d66e067bdd21d9cf56ffcb302249fe8904f385e5aaf1e71fe875904dddf9" + + "46f74234f745580c110d84b0c6da5d3ef9019ee7e1da5595be741c7bfc4d144fac7e5547" + + "7d7bf4a50d491e95e8f712c1ccff76a62547d0f37535be97b75816ebaa5c786fec5330af" + + "ea044dcca902e3f0b60412f630b1113d904e5664d7dc3c435f7339ef4baf87ebf6fe6888" + + "4472ead207c669b0c1a18bef1749d761b145485f3b2021e95bb2ccf4d7e931f50b15613b" + + "7a94e3ebd9bc7f94ae6ae3626296a8647cb887f399327e92a252bebbf865cfc9f230fc8b" + + "c1c2a696d75f89e15c3480f58f47072fb491bfb1a27e5f4b5ad05b9f248605515a690365" + + "434971c5e06f94346bf61bd8a9b04c7e53eb8f48dfca33b548fa364a1a53a6330cd089cd" + + "4915cd89313c90c072d7654b52358a461144b93d8e2865a63e799e5c084429adb035112e" + + "214eb8d2e7103e5d8483b3c3c2e4d2c6fd094b7409ddf1b3d3193e800da20b19f038e7c5" + + "c2afe223db61e29d5c6e2089492e236ab262c145b49faf8ba7f1223bf87de290d07a19fb" + + "4a4ce3d27d5f4a8303ed27d6239e6b8db459a2d9ef6c8229dd75193c3f4c108defbb7527" + + "d2ae83a7a8ce5ba7" + +const ocspResponseWithoutCertHex = "308201d40a0100a08201cd308201c906092b0601050507300101048201ba3082" + + "01b630819fa2160414884451ff502a695e2d88f421bad90cf2cecbea7c180f3230313330" + + "3631383037323434335a30743072304a300906052b0e03021a0500041448b60d38238df8" + + "456e4ee5843ea394111802979f0414884451ff502a695e2d88f421bad90cf2cecbea7c02" + + "1100f78b13b946fc9635d8ab49de9d2148218000180f3230313330363138303732343433" + + "5aa011180f32303133303632323037323434335a300d06092a864886f70d010105050003" + + "82010100103e18b3d297a5e7a6c07a4fc52ac46a15c0eba96f3be17f0ffe84de5b8c8e05" + + "5a8f577586a849dc4abd6440eb6fedde4622451e2823c1cbf3558b4e8184959c9fe96eff" + + "8bc5f95866c58c6d087519faabfdae37e11d9874f1bc0db292208f645dd848185e4dd38b" + + "6a8547dfa7b74d514a8470015719064d35476b95bebb03d4d2845c5ca15202d2784878f2" + + "0f904c24f09736f044609e9c271381713400e563023d212db422236440c6f377bbf24b2b" + + "9e7dec8698e36a8df68b7592ad3489fb2937afb90eb85d2aa96b81c94c25057dbd4759d9" + + "20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" + + "5a35fca2e054dfa8" + +// PKIX nonce extension +var ocspExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 2} +var ocspExtensionValueHex = "0403000000" + +const ocspResponseWithCriticalExtensionHex = "308204fe0a0100a08204f7308204f306092b0601050507300101048204e4308204e03081" + + "dba003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" + + "0f32303136303130343137303130305a3081a53081a23049300906052b0e03021a050004" + + "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" + + "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" + + "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" + + "3130303730373138333531375aa1193017301506092b06010505073001020101ff040504" + + "03000000300d06092a864886f70d01010b0500038201010031c730ca60a7a0d92d8e4010" + + "911b469de95b4d27e89de6537552436237967694f76f701cf6b45c932bd308bca4a8d092" + + "5c604ba94796903091d9e6c000178e72c1f0a24a277dd262835af5d17d3f9d7869606c9f" + + "e7c8e708a41645699895beee38bfa63bb46296683761c5d1d65439b8ab868dc3017c9eeb" + + "b70b82dbf3a31c55b457d48bb9e82b335ed49f445042eaf606b06a3e0639824924c89c63" + + "eccddfe85e6694314138b2536f5e15e07085d0f6e26d4b2f8244bab0d70de07283ac6384" + + "a0501fc3dea7cf0adfd4c7f34871080900e252ddc403e3f0265f2a704af905d3727504ed" + + "28f3214a219d898a022463c78439799ca81c8cbafdbcec34ea937cd6a08202ea308202e6" + + "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" + + "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" + + "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" + + "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" + + "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" + + "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" + + "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" + + "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" + + "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" + + "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" + + "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" + + "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" + + "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" + + "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" + + "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" + + "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" + + "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" + + "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" + + "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" + + "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" + + "3a25439a94299a65a709756c7a3e568be049d5c38839" + +const ocspResponseWithExtensionHex = "308204fb0a0100a08204f4308204f006092b0601050507300101048204e1308204dd3081" + + "d8a003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" + + "0f32303136303130343136353930305a3081a230819f3049300906052b0e03021a050004" + + "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" + + "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" + + "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" + + "3130303730373138333531375aa1163014301206092b0601050507300102040504030000" + + "00300d06092a864886f70d01010b05000382010100c09a33e0b2324c852421bb83f85ac9" + + "9113f5426012bd2d2279a8166e9241d18a33c870894250622ffc7ed0c4601b16d624f90b" + + "779265442cdb6868cf40ab304ab4b66e7315ed02cf663b1601d1d4751772b31bc299db23" + + "9aebac78ed6797c06ed815a7a8d18d63cfbb609cafb47ec2e89e37db255216eb09307848" + + "d01be0a3e943653c78212b96ff524b74c9ec456b17cdfb950cc97645c577b2e09ff41dde" + + "b03afb3adaa381cc0f7c1d95663ef22a0f72f2c45613ae8e2b2d1efc96e8463c7d1d8a1d" + + "7e3b35df8fe73a301fc3f804b942b2b3afa337ff105fc1462b7b1c1d75eb4566c8665e59" + + "f80393b0adbf8004ff6c3327ed34f007cb4a3348a7d55e06e3a08202ea308202e6308202" + + "e2308201caa003020102020101300d06092a864886f70d01010b05003019311730150603" + + "550403130e4f43535020526573706f6e646572301e170d3135303133303135353033335a" + + "170d3136303133303135353033335a3019311730150603550403130e4f43535020526573" + + "706f6e64657230820122300d06092a864886f70d01010105000382010f003082010a0282" + + "010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616ec5265b" + + "56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbcbec75a" + + "70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b723350f0" + + "a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b8989ad0f6" + + "3aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d285b6a" + + "04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e655b104" + + "9a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31a77dcf" + + "920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030130603" + + "551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d06092a" + + "864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab861231c15f" + + "d5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d22889064f4" + + "aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f326709dce5" + + "2c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156d67156" + + "e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff59e2005" + + "d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf966705d" + + "e17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d93a2543" + + "9a94299a65a709756c7a3e568be049d5c38839" + +const ocspMultiResponseHex = "30820ee60a0100a0820edf30820edb06092b060105050730010104820ecc30820ec83082" + + "0839a216041445ac2ecd75f53f1cf6e4c51d3de0047ad0aa7465180f3230313530363032" + + "3130303033305a3082080c3065303d300906052b0e03021a05000414f7452a0080601527" + + "72e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f0204" + + "5456656a8000180f32303135303630323039303230375aa011180f323031353036303331" + + "30303033305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e7" + + "6e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f02045456656b80" + + "00180f32303135303630323039303230375aa011180f3230313530363033313030303330" + + "5a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0" + + "f1580414edd8f2ee977252853a330b297a18f5c993853b3f02045456656c8000180f3230" + + "3135303630323039303230375aa011180f32303135303630333130303033305a3065303d" + + "300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414ed" + + "d8f2ee977252853a330b297a18f5c993853b3f02045456656d8000180f32303135303630" + + "323039303230375aa011180f32303135303630333130303033305a3065303d300906052b" + + "0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee9772" + + "52853a330b297a18f5c993853b3f02045456656e8000180f323031353036303230393032" + + "30375aa011180f32303135303630333130303033305a3065303d300906052b0e03021a05" + + "000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b" + + "297a18f5c993853b3f02045456656f8000180f32303135303630323039303230375aa011" + + "180f32303135303630333130303033305a3065303d300906052b0e03021a05000414f745" + + "2a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c9" + + "93853b3f0204545665708000180f32303135303630323039303230375aa011180f323031" + + "35303630333130303033305a3065303d300906052b0e03021a05000414f7452a00806015" + + "2772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f02" + + "04545665718000180f32303135303630323039303230375aa011180f3230313530363033" + + "3130303033305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135" + + "e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f020454566572" + + "8000180f32303135303630323039303230375aa011180f32303135303630333130303033" + + "305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fd" + + "e0f1580414edd8f2ee977252853a330b297a18f5c993853b3f0204545665738000180f32" + + "303135303630323039303230375aa011180f32303135303630333130303033305a306530" + + "3d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414" + + "edd8f2ee977252853a330b297a18f5c993853b3f0204545665748000180f323031353036" + + "30323039303230375aa011180f32303135303630333130303033305a3065303d30090605" + + "2b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee97" + + "7252853a330b297a18f5c993853b3f0204545665758000180f3230313530363032303930" + + "3230375aa011180f32303135303630333130303033305a3065303d300906052b0e03021a" + + "05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a33" + + "0b297a18f5c993853b3f0204545665768000180f32303135303630323039303230375aa0" + + "11180f32303135303630333130303033305a3065303d300906052b0e03021a05000414f7" + + "452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5" + + "c993853b3f0204545665778000180f32303135303630323039303230375aa011180f3230" + + "3135303630333130303033305a3065303d300906052b0e03021a05000414f7452a008060" + + "152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f" + + "0204545665788000180f32303135303630323039303230375aa011180f32303135303630" + + "333130303033305a3065303d300906052b0e03021a05000414f7452a008060152772e4a1" + + "35e76e9e52fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f0204545665" + + "798000180f32303135303630323039303230375aa011180f323031353036303331303030" + + "33305a3065303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52" + + "fde0f1580414edd8f2ee977252853a330b297a18f5c993853b3f02045456657a8000180f" + + "32303135303630323039303230375aa011180f32303135303630333130303033305a3065" + + "303d300906052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f15804" + + "14edd8f2ee977252853a330b297a18f5c993853b3f02045456657b8000180f3230313530" + + "3630323039303230375aa011180f32303135303630333130303033305a3065303d300906" + + "052b0e03021a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee" + + "977252853a330b297a18f5c993853b3f02045456657c8000180f32303135303630323039" + + "303230375aa011180f32303135303630333130303033305a3065303d300906052b0e0302" + + "1a05000414f7452a008060152772e4a135e76e9e52fde0f1580414edd8f2ee977252853a" + + "330b297a18f5c993853b3f02045456657d8000180f32303135303630323039303230375a" + + "a011180f32303135303630333130303033305a300d06092a864886f70d01010505000382" + + "01010016b73b92859979f27d15eb018cf069eed39c3d280213565f3026de11ba15bdb94d" + + "764cf2d0fdd204ef926c588d7b183483c8a2b1995079c7ed04dcefcc650c1965be4b6832" + + "a8839e832f7f60f638425eccdf9bc3a81fbe700fda426ddf4f06c29bee431bbbe81effda" + + "a60b7da5b378f199af2f3c8380be7ba6c21c8e27124f8a4d8989926aea19055700848d33" + + "799e833512945fd75364edbd2dd18b783c1e96e332266b17979a0b88c35b43f47c87c493" + + "19155056ad8dbbae5ff2afad3c0e1c69ed111206ffda49875e8e4efc0926264823bc4423" + + "c8a002f34288c4bc22516f98f54fc609943721f590ddd8d24f989457526b599b0eb75cb5" + + "a80da1ad93a621a08205733082056f3082056b30820453a0030201020204545638c4300d" + + "06092a864886f70d01010b0500308182310b300906035504061302555331183016060355" + + "040a130f552e532e20476f7665726e6d656e7431233021060355040b131a446570617274" + + "6d656e74206f662074686520547265617375727931223020060355040b13194365727469" + + "6669636174696f6e20417574686f7269746965733110300e060355040b13074f43494f20" + + "4341301e170d3135303332303131353531335a170d3135303633303034303030305a3081" + + "98310b300906035504061302555331183016060355040a130f552e532e20476f7665726e" + + "6d656e7431233021060355040b131a4465706172746d656e74206f662074686520547265" + + "617375727931223020060355040b131943657274696669636174696f6e20417574686f72" + + "69746965733110300e060355040b13074f43494f204341311430120603550403130b4f43" + + "5350205369676e657230820122300d06092a864886f70d01010105000382010f00308201" + + "0a0282010100c1b6fe1ba1ad50bb98c855811acbd67fe68057f48b8e08d3800e7f2c51b7" + + "9e20551934971fd92b9c9e6c49453097927cba83a94c0b2fea7124ba5ac442b38e37dba6" + + "7303d4962dd7d92b22a04b0e0e182e9ea67620b1c6ce09ee607c19e0e6e3adae81151db1" + + "2bb7f706149349a292e21c1eb28565b6839df055e1a838a772ff34b5a1452618e2c26042" + + "705d53f0af4b57aae6163f58216af12f3887813fe44b0321827b3a0c52b0e47d0aab94a2" + + "f768ab0ba3901d22f8bb263823090b0e37a7f8856db4b0d165c42f3aa7e94f5f6ce1855e" + + "98dc57adea0ae98ad39f67ecdec00b88685566e9e8d69f6cefb6ddced53015d0d3b862bc" + + "be21f3d72251eefcec730203010001a38201cf308201cb300e0603551d0f0101ff040403" + + "020780306b0603551d2004643062300c060a60864801650302010502300c060a60864801" + + "650302010503300c060a60864801650302010504300c060a60864801650302010507300c" + + "060a60864801650302010508300c060a6086480165030201030d300c060a608648016503" + + "020103113081e506082b060105050701010481d83081d5303006082b0601050507300286" + + "24687474703a2f2f706b692e74726561732e676f762f746f63615f65655f6169612e7037" + + "633081a006082b060105050730028681936c6461703a2f2f6c6461702e74726561732e67" + + "6f762f6f753d4f43494f25323043412c6f753d43657274696669636174696f6e25323041" + + "7574686f7269746965732c6f753d4465706172746d656e742532306f6625323074686525" + + "323054726561737572792c6f3d552e532e253230476f7665726e6d656e742c633d55533f" + + "634143657274696669636174653b62696e61727930130603551d25040c300a06082b0601" + + "0505070309300f06092b060105050730010504020500301f0603551d23041830168014a2" + + "13a8e5c607546c243d4eb72b27a2a7711ab5af301d0603551d0e0416041451f98046818a" + + "e46d953ac90c210ccfaa1a06980c300d06092a864886f70d01010b050003820101003a37" + + "0b301d14ffdeb370883639bec5ae6f572dcbddadd672af16ee2a8303316b14e1fbdca8c2" + + "8f4bad9c7b1410250e149c14e9830ca6f17370a8d13151205d956e28c141cc0500379596" + + "c5b9239fcfa3d2de8f1d4f1a2b1bf2d1851bed1c86012ee8135bdc395cd4496ce69fadd0" + + "3b682b90350ca7b4f458190b7a0ab5c33a04cf1347a77d541877a380a4c94988c5658908" + + "44fdc22637a72b9fa410333e2caf969477f9fe07f50e3681c204fb3bf073b9da01cd8d91" + + "8044c40b1159955af12a3263ab1d34119d7f59bfa6cae88ed058addc4e08250263f8f836" + + "2f5bdffd45636fea7474c60a55c535954477b2f286e1b2535f0dd12c162f1b353c370e08" + + "be67" + +const ocspMultiResponseCertHex = "308207943082067ca003020102020454566573300d06092a864886f70d01010b05003081" + + "82310b300906035504061302555331183016060355040a130f552e532e20476f7665726e" + + "6d656e7431233021060355040b131a4465706172746d656e74206f662074686520547265" + + "617375727931223020060355040b131943657274696669636174696f6e20417574686f72" + + "69746965733110300e060355040b13074f43494f204341301e170d313530343130313535" + + "3733385a170d3138303431303136323733385a30819d310b300906035504061302555331" + + "183016060355040a130f552e532e20476f7665726e6d656e7431233021060355040b131a" + + "4465706172746d656e74206f662074686520547265617375727931253023060355040b13" + + "1c427572656175206f66207468652046697363616c20536572766963653110300e060355" + + "040b130744657669636573311630140603550403130d706b692e74726561732e676f7630" + + "820122300d06092a864886f70d01010105000382010f003082010a0282010100c7273623" + + "8c49c48bf501515a2490ef6e5ae0c06e0ad2aa9a6bb77f3d0370d846b2571581ebf38fd3" + + "1948daad3dec7a4da095f1dcbe9654e65bcf7acdfd4ee802421dad9b90536c721d2bca58" + + "8413e6bfd739a72470560bb7d64f9a09284f90ff8af1d5a3c5c84d0f95a00f9c6d988dd0" + + "d87f1d0d3344580901c955139f54d09de0acdbd3322b758cb0c58881bf04913243401f44" + + "013fd9f6d8348044cc8bb0a71978ad93366b2a4687a5274b2ee07d0fb40225453eb244ed" + + "b20152251ac77c59455260ff07eeceb3cb3c60fb8121cf92afd3daa2a4650e1942ccb555" + + "de10b3d481feb299838ef05d0fd1810b146753472ae80da65dd34da25ca1f89971f10039" + + "0203010001a38203f3308203ef300e0603551d0f0101ff0404030205a030170603551d20" + + "0410300e300c060a60864801650302010503301106096086480186f84201010404030206" + + "4030130603551d25040c300a06082b060105050703013082010806082b06010505070101" + + "0481fb3081f8303006082b060105050730028624687474703a2f2f706b692e7472656173" + + "2e676f762f746f63615f65655f6169612e7037633081a006082b06010505073002868193" + + "6c6461703a2f2f6c6461702e74726561732e676f762f6f753d4f43494f25323043412c6f" + + "753d43657274696669636174696f6e253230417574686f7269746965732c6f753d446570" + + "6172746d656e742532306f6625323074686525323054726561737572792c6f3d552e532e" + + "253230476f7665726e6d656e742c633d55533f634143657274696669636174653b62696e" + + "617279302106082b060105050730018615687474703a2f2f6f6373702e74726561732e67" + + "6f76307b0603551d1104743072811c6373612d7465616d4066697363616c2e7472656173" + + "7572792e676f768210706b692e74726561737572792e676f768210706b692e64696d632e" + + "6468732e676f76820d706b692e74726561732e676f76811f6563622d686f7374696e6740" + + "66697363616c2e74726561737572792e676f76308201890603551d1f048201803082017c" + + "3027a025a0238621687474703a2f2f706b692e74726561732e676f762f4f43494f5f4341" + + "332e63726c3082014fa082014ba0820147a48197308194310b3009060355040613025553" + + "31183016060355040a130f552e532e20476f7665726e6d656e7431233021060355040b13" + + "1a4465706172746d656e74206f662074686520547265617375727931223020060355040b" + + "131943657274696669636174696f6e20417574686f7269746965733110300e060355040b" + + "13074f43494f2043413110300e0603550403130743524c313430398681aa6c6461703a2f" + + "2f6c6461702e74726561732e676f762f636e3d43524c313430392c6f753d4f43494f2532" + + "3043412c6f753d43657274696669636174696f6e253230417574686f7269746965732c6f" + + "753d4465706172746d656e742532306f6625323074686525323054726561737572792c6f" + + "3d552e532e253230476f7665726e6d656e742c633d55533f636572746966696361746552" + + "65766f636174696f6e4c6973743b62696e617279302b0603551d1004243022800f323031" + + "35303431303135353733385a810f32303138303431303136323733385a301f0603551d23" + + "041830168014a213a8e5c607546c243d4eb72b27a2a7711ab5af301d0603551d0e041604" + + "14b0869c12c293914cd460e33ed43e6c5a26e0d68f301906092a864886f67d074100040c" + + "300a1b0456382e31030203a8300d06092a864886f70d01010b050003820101004968d182" + + "8f9efdc147e747bb5dda15536a42a079b32d3d7f87e619b483aeee70b7e26bda393c6028" + + "7c733ecb468fe8b8b11bf809ff76add6b90eb25ad8d3a1052e43ee281e48a3a1ebe7efb5" + + "9e2c4a48765dedeb23f5346242145786cc988c762d230d28dd33bf4c2405d80cbb2cb1d6" + + "4c8f10ba130d50cb174f6ffb9cfc12808297a2cefba385f4fad170f39b51ebd87c12abf9" + + "3c51fc000af90d8aaba78f48923908804a5eb35f617ccf71d201e3708a559e6d16f9f13e" + + "074361eb9007e28d86bb4e0bfa13aad0e9ddd9124e84519de60e2fc6040b18d9fd602b02" + + "684b4c071c3019fc842197d00c120c41654bcbfbc4a096a1c637b79112b81ce1fa3899f9" + +const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" + + "c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" + + "bcbb235d44ccc7dba62e72" + +const leafCertHex = "308203c830820331a0030201020210017f77deb3bcbb235d44ccc7dba62e72300d06092a" + + "864886f70d01010505003081ba311f301d060355040a1316566572695369676e20547275" + + "7374204e6574776f726b31173015060355040b130e566572695369676e2c20496e632e31" + + "333031060355040b132a566572695369676e20496e7465726e6174696f6e616c20536572" + + "766572204341202d20436c617373203331493047060355040b13407777772e7665726973" + + "69676e2e636f6d2f43505320496e636f72702e6279205265662e204c494142494c495459" + + "204c54442e286329393720566572695369676e301e170d3132303632313030303030305a" + + "170d3133313233313233353935395a3068310b3009060355040613025553311330110603" + + "550408130a43616c69666f726e6961311230100603550407130950616c6f20416c746f31" + + "173015060355040a130e46616365626f6f6b2c20496e632e311730150603550403140e2a" + + "2e66616365626f6f6b2e636f6d30819f300d06092a864886f70d010101050003818d0030" + + "818902818100ae94b171e2deccc1693e051063240102e0689ae83c39b6b3e74b97d48d7b" + + "23689100b0b496ee62f0e6d356bcf4aa0f50643402f5d1766aa972835a7564723f39bbef" + + "5290ded9bcdbf9d3d55dfad23aa03dc604c54d29cf1d4b3bdbd1a809cfae47b44c7eae17" + + "c5109bee24a9cf4a8d911bb0fd0415ae4c3f430aa12a557e2ae10203010001a382011e30" + + "82011a30090603551d130402300030440603551d20043d303b3039060b6086480186f845" + + "01071703302a302806082b06010505070201161c68747470733a2f2f7777772e76657269" + + "7369676e2e636f6d2f727061303c0603551d1f043530333031a02fa02d862b687474703a" + + "2f2f535652496e746c2d63726c2e766572697369676e2e636f6d2f535652496e746c2e63" + + "726c301d0603551d250416301406082b0601050507030106082b06010505070302300b06" + + "03551d0f0404030205a0303406082b0601050507010104283026302406082b0601050507" + + "30018618687474703a2f2f6f6373702e766572697369676e2e636f6d30270603551d1104" + + "20301e820e2a2e66616365626f6f6b2e636f6d820c66616365626f6f6b2e636f6d300d06" + + "092a864886f70d0101050500038181005b6c2b75f8ed30aa51aad36aba595e555141951f" + + "81a53b447910ac1f76ff78fc2781616b58f3122afc1c87010425e9ed43df1a7ba6498060" + + "67e2688af03db58c7df4ee03309a6afc247ccb134dc33e54c6bc1d5133a532a73273b1d7" + + "9cadc08e7e1a83116d34523340b0305427a21742827c98916698ee7eaf8c3bdd71700817" + +const issuerCertHex = "30820383308202eca003020102021046fcebbab4d02f0f926098233f93078f300d06092a" + + "864886f70d0101050500305f310b300906035504061302555331173015060355040a130e" + + "566572695369676e2c20496e632e31373035060355040b132e436c617373203320507562" + + "6c6963205072696d6172792043657274696669636174696f6e20417574686f7269747930" + + "1e170d3937303431373030303030305a170d3136313032343233353935395a3081ba311f" + + "301d060355040a1316566572695369676e205472757374204e6574776f726b3117301506" + + "0355040b130e566572695369676e2c20496e632e31333031060355040b132a5665726953" + + "69676e20496e7465726e6174696f6e616c20536572766572204341202d20436c61737320" + + "3331493047060355040b13407777772e766572697369676e2e636f6d2f43505320496e63" + + "6f72702e6279205265662e204c494142494c495459204c54442e28632939372056657269" + + "5369676e30819f300d06092a864886f70d010101050003818d0030818902818100d88280" + + "e8d619027d1f85183925a2652be1bfd405d3bce6363baaf04c6c5bb6e7aa3c734555b2f1" + + "bdea9742ed9a340a15d4a95cf54025ddd907c132b2756cc4cabba3fe56277143aa63f530" + + "3e9328e5faf1093bf3b74d4e39f75c495ab8c11dd3b28afe70309542cbfe2b518b5a3c3a" + + "f9224f90b202a7539c4f34e7ab04b27b6f0203010001a381e33081e0300f0603551d1304" + + "0830060101ff02010030440603551d20043d303b3039060b6086480186f8450107010130" + + "2a302806082b06010505070201161c68747470733a2f2f7777772e766572697369676e2e" + + "636f6d2f43505330340603551d25042d302b06082b0601050507030106082b0601050507" + + "030206096086480186f8420401060a6086480186f845010801300b0603551d0f04040302" + + "0106301106096086480186f842010104040302010630310603551d1f042a30283026a024" + + "a0228620687474703a2f2f63726c2e766572697369676e2e636f6d2f706361332e63726c" + + "300d06092a864886f70d010105050003818100408e4997968a73dd8e4def3e61b7caa062" + + "adf40e0abb753de26ed82cc7bff4b98c369bcaa2d09c724639f6a682036511c4bcbf2da6" + + "f5d93b0ab598fab378b91ef22b4c62d5fdb27a1ddf33fd73f9a5d82d8c2aead1fcb028b6" + + "e94948134b838a1b487b24f738de6f4154b8ab576b06dfc7a2d4a9f6f136628088f28b75" + + "d68071" + +// Key and certificate for the OCSP responder were not taken from the Thawte +// responder, since CreateResponse requires that we have the private key. +// Instead, they were generated randomly. +const responderPrivateKeyHex = "308204a40201000282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef" + + "1099f0f6616ec5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df" + + "1701dc6ccfbcbec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074f" + + "fde8a99d5b723350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14" + + "c9fc0f27b8989ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa7" + + "7e7332971c7d285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f" + + "1290bafd97e655b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb9" + + "6222b12ace31a77dcf920334dc94581b02030100010282010100bcf0b93d7238bda329a8" + + "72e7149f61bcb37c154330ccb3f42a85c9002c2e2bdea039d77d8581cd19bed94078794e" + + "56293d601547fc4bf6a2f9002fe5772b92b21b254403b403585e3130cc99ccf08f0ef81a" + + "575b38f597ba4660448b54f44bfbb97072b5a2bf043bfeca828cf7741d13698e3f38162b" + + "679faa646b82abd9a72c5c7d722c5fc577a76d2c2daac588accad18516d1bbad10b0dfa2" + + "05cfe246b59e28608a43942e1b71b0c80498075121de5b900d727c31c42c78cf1db5c0aa" + + "5b491e10ea4ed5c0962aaf2ae025dd81fa4ce490d9d6b4a4465411d8e542fc88617e5695" + + "1aa4fc8ea166f2b4d0eb89ef17f2b206bd5f1014bf8fe0e71fe62f2cccf102818100f2dc" + + "ddf878d553286daad68bac4070a82ffec3dc4666a2750f47879eec913f91836f1d976b60" + + "daf9356e078446dafab5bd2e489e5d64f8572ba24a4ba4f3729b5e106c4dd831cc2497a7" + + "e6c7507df05cb64aeb1bbc81c1e340d58b5964cf39cff84ea30c29ec5d3f005ee1362698" + + "07395037955955655292c3e85f6187fa1f9502818100f4a33c102630840705f8c778a47b" + + "87e8da31e68809af981ac5e5999cf1551685d761cdf0d6520361b99aebd5777a940fa64d" + + "327c09fa63746fbb3247ec73a86edf115f1fe5c83598db803881ade71c33c6e956118345" + + "497b98b5e07bb5be75971465ec78f2f9467e1b74956ca9d4c7c3e314e742a72d8b33889c" + + "6c093a466cef0281801d3df0d02124766dd0be98349b19eb36a508c4e679e793ba0a8bef" + + "4d786888c1e9947078b1ea28938716677b4ad8c5052af12eb73ac194915264a913709a0b" + + "7b9f98d4a18edd781a13d49899f91c20dbd8eb2e61d991ba19b5cdc08893f5cb9d39e5a6" + + "0629ea16d426244673b1b3ee72bd30e41fac8395acac40077403de5efd028180050731dd" + + "d71b1a2b96c8d538ba90bb6b62c8b1c74c03aae9a9f59d21a7a82b0d572ef06fa9c807bf" + + "c373d6b30d809c7871df96510c577421d9860c7383fda0919ece19996b3ca13562159193" + + "c0c246471e287f975e8e57034e5136aaf44254e2650def3d51292474c515b1588969112e" + + "0a85cc77073e9d64d2c2fc497844284b02818100d71d63eabf416cf677401ebf965f8314" + + "120b568a57dd3bd9116c629c40dc0c6948bab3a13cc544c31c7da40e76132ef5dd3f7534" + + "45a635930c74326ae3df0edd1bfb1523e3aa259873ac7cf1ac31151ec8f37b528c275622" + + "48f99b8bed59fd4da2576aa6ee20d93a684900bf907e80c66d6e2261ae15e55284b4ed9d" + + "6bdaa059" + +const responderCertHex = "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" + + "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" + + "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" + + "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" + + "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" + + "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" + + "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" + + "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" + + "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" + + "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" + + "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" + + "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" + + "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" + + "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" + + "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" + + "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" + + "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" + + "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" + + "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" + + "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" + + "3a25439a94299a65a709756c7a3e568be049d5c38839" + +const errorResponseHex = "30030a0101" diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go new file mode 100644 index 0000000..592d186 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -0,0 +1,219 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "golang.org/x/crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF { + if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor_test.go b/vendor/golang.org/x/crypto/openpgp/armor/armor_test.go new file mode 100644 index 0000000..9334e94 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor_test.go @@ -0,0 +1,95 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "bytes" + "hash/adler32" + "io/ioutil" + "testing" +) + +func TestDecodeEncode(t *testing.T) { + buf := bytes.NewBuffer([]byte(armorExample1)) + result, err := Decode(buf) + if err != nil { + t.Error(err) + } + expectedType := "PGP SIGNATURE" + if result.Type != expectedType { + t.Errorf("result.Type: got:%s want:%s", result.Type, expectedType) + } + if len(result.Header) != 1 { + t.Errorf("len(result.Header): got:%d want:1", len(result.Header)) + } + v, ok := result.Header["Version"] + if !ok || v != "GnuPG v1.4.10 (GNU/Linux)" { + t.Errorf("result.Header: got:%#v", result.Header) + } + + contents, err := ioutil.ReadAll(result.Body) + if err != nil { + t.Error(err) + } + + if adler32.Checksum(contents) != 0x27b144be { + t.Errorf("contents: got: %x", contents) + } + + buf = bytes.NewBuffer(nil) + w, err := Encode(buf, result.Type, result.Header) + if err != nil { + t.Error(err) + } + _, err = w.Write(contents) + if err != nil { + t.Error(err) + } + w.Close() + + if !bytes.Equal(buf.Bytes(), []byte(armorExample1)) { + t.Errorf("got: %s\nwant: %s", string(buf.Bytes()), armorExample1) + } +} + +func TestLongHeader(t *testing.T) { + buf := bytes.NewBuffer([]byte(armorLongLine)) + result, err := Decode(buf) + if err != nil { + t.Error(err) + return + } + value, ok := result.Header["Version"] + if !ok { + t.Errorf("missing Version header") + } + if value != longValueExpected { + t.Errorf("got: %s want: %s", value, longValueExpected) + } +} + +const armorExample1 = `-----BEGIN PGP SIGNATURE----- +Version: GnuPG v1.4.10 (GNU/Linux) + +iJwEAAECAAYFAk1Fv/0ACgkQo01+GMIMMbsYTwQAiAw+QAaNfY6WBdplZ/uMAccm +4g+81QPmTSGHnetSb6WBiY13kVzK4HQiZH8JSkmmroMLuGeJwsRTEL4wbjRyUKEt +p1xwUZDECs234F1xiG5enc5SGlRtP7foLBz9lOsjx+LEcA4sTl5/2eZR9zyFZqWW +TxRjs+fJCIFuo71xb1g= +=/teI +-----END PGP SIGNATURE-----` + +const armorLongLine = `-----BEGIN PGP SIGNATURE----- +Version: 0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz + +iQEcBAABAgAGBQJMtFESAAoJEKsQXJGvOPsVj40H/1WW6jaMXv4BW+1ueDSMDwM8 +kx1fLOXbVM5/Kn5LStZNt1jWWnpxdz7eq3uiqeCQjmqUoRde3YbB2EMnnwRbAhpp +cacnAvy9ZQ78OTxUdNW1mhX5bS6q1MTEJnl+DcyigD70HG/yNNQD7sOPMdYQw0TA +byQBwmLwmTsuZsrYqB68QyLHI+DUugn+kX6Hd2WDB62DKa2suoIUIHQQCd/ofwB3 +WfCYInXQKKOSxu2YOg2Eb4kLNhSMc1i9uKUWAH+sdgJh7NBgdoE4MaNtBFkHXRvv +okWuf3+xA9ksp1npSY/mDvgHijmjvtpRDe6iUeqfCn8N9u9CBg8geANgaG8+QA4= +=wfQG +-----END PGP SIGNATURE-----` + +const longValueExpected = "0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz" diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go new file mode 100644 index 0000000..6f07582 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go new file mode 100644 index 0000000..e601e38 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text_test.go b/vendor/golang.org/x/crypto/openpgp/canonical_text_test.go new file mode 100644 index 0000000..8f3ba2a --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/canonical_text_test.go @@ -0,0 +1,52 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "bytes" + "testing" +) + +type recordingHash struct { + buf *bytes.Buffer +} + +func (r recordingHash) Write(b []byte) (n int, err error) { + return r.buf.Write(b) +} + +func (r recordingHash) Sum(in []byte) []byte { + return append(in, r.buf.Bytes()...) +} + +func (r recordingHash) Reset() { + panic("shouldn't be called") +} + +func (r recordingHash) Size() int { + panic("shouldn't be called") +} + +func (r recordingHash) BlockSize() int { + panic("shouldn't be called") +} + +func testCanonicalText(t *testing.T, input, expected string) { + r := recordingHash{bytes.NewBuffer(nil)} + c := NewCanonicalTextHash(r) + c.Write([]byte(input)) + result := c.Sum(nil) + if expected != string(result) { + t.Errorf("input: %x got: %x want: %x", input, result, expected) + } +} + +func TestCanonicalText(t *testing.T) { + testCanonicalText(t, "foo\n", "foo\r\n") + testCanonicalText(t, "foo", "foo") + testCanonicalText(t, "foo\r\n", "foo\r\n") + testCanonicalText(t, "foo\r\nbar", "foo\r\nbar") + testCanonicalText(t, "foo\r\nbar\n\n", "foo\r\nbar\r\n\r\n") +} diff --git a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go new file mode 100644 index 0000000..def4cab --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go @@ -0,0 +1,376 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clearsign generates and processes OpenPGP, clear-signed data. See +// RFC 4880, section 7. +// +// Clearsigned messages are cryptographically signed, but the contents of the +// message are kept in plaintext so that it can be read without special tools. +package clearsign // import "golang.org/x/crypto/openpgp/clearsign" + +import ( + "bufio" + "bytes" + "crypto" + "hash" + "io" + "net/textproto" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// A Block represents a clearsigned message. A signature on a Block can +// be checked by passing Bytes into openpgp.CheckDetachedSignature. +type Block struct { + Headers textproto.MIMEHeader // Optional message headers + Plaintext []byte // The original message text + Bytes []byte // The signed message + ArmoredSignature *armor.Block // The signature block +} + +// start is the marker which denotes the beginning of a clearsigned message. +var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") + +// dashEscape is prefixed to any lines that begin with a hyphen so that they +// can't be confused with endText. +var dashEscape = []byte("- ") + +// endText is a marker which denotes the end of the message and the start of +// an armored signature. +var endText = []byte("-----BEGIN PGP SIGNATURE-----") + +// end is a marker which denotes the end of the armored signature. +var end = []byte("\n-----END PGP SIGNATURE-----") + +var crlf = []byte("\r\n") +var lf = byte('\n') + +// getLine returns the first \r\n or \n delineated line from the given byte +// array. The line does not include the \r\n or \n. The remainder of the byte +// array (also not including the new line bytes) is also returned and this will +// always be smaller than the original argument. +func getLine(data []byte) (line, rest []byte) { + i := bytes.Index(data, []byte{'\n'}) + var j int + if i < 0 { + i = len(data) + j = i + } else { + j = i + 1 + if i > 0 && data[i-1] == '\r' { + i-- + } + } + return data[0:i], data[j:] +} + +// Decode finds the first clearsigned message in data and returns it, as well +// as the suffix of data which remains after the message. +func Decode(data []byte) (b *Block, rest []byte) { + // start begins with a newline. However, at the very beginning of + // the byte array, we'll accept the start string without it. + rest = data + if bytes.HasPrefix(data, start[1:]) { + rest = rest[len(start)-1:] + } else if i := bytes.Index(data, start); i >= 0 { + rest = rest[i+len(start):] + } else { + return nil, data + } + + // Consume the start line. + _, rest = getLine(rest) + + var line []byte + b = &Block{ + Headers: make(textproto.MIMEHeader), + } + + // Next come a series of header lines. + for { + // This loop terminates because getLine's second result is + // always smaller than its argument. + if len(rest) == 0 { + return nil, data + } + // An empty line marks the end of the headers. + if line, rest = getLine(rest); len(line) == 0 { + break + } + + i := bytes.Index(line, []byte{':'}) + if i == -1 { + return nil, data + } + + key, val := line[0:i], line[i+1:] + key = bytes.TrimSpace(key) + val = bytes.TrimSpace(val) + b.Headers.Add(string(key), string(val)) + } + + firstLine := true + for { + start := rest + + line, rest = getLine(rest) + if len(line) == 0 && len(rest) == 0 { + // No armored data was found, so this isn't a complete message. + return nil, data + } + if bytes.Equal(line, endText) { + // Back up to the start of the line because armor expects to see the + // header line. + rest = start + break + } + + // The final CRLF isn't included in the hash so we don't write it until + // we've seen the next line. + if firstLine { + firstLine = false + } else { + b.Bytes = append(b.Bytes, crlf...) + } + + if bytes.HasPrefix(line, dashEscape) { + line = line[2:] + } + line = bytes.TrimRight(line, " \t") + b.Bytes = append(b.Bytes, line...) + + b.Plaintext = append(b.Plaintext, line...) + b.Plaintext = append(b.Plaintext, lf) + } + + // We want to find the extent of the armored data (including any newlines at + // the end). + i := bytes.Index(rest, end) + if i == -1 { + return nil, data + } + i += len(end) + for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') { + i++ + } + armored := rest[:i] + rest = rest[i:] + + var err error + b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored)) + if err != nil { + return nil, data + } + + return b, rest +} + +// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed +// message. The clear-signed message is written to buffered and a hash, suitable +// for signing, is maintained in h. +// +// When closed, an armored signature is created and written to complete the +// message. +type dashEscaper struct { + buffered *bufio.Writer + h hash.Hash + hashType crypto.Hash + + atBeginningOfLine bool + isFirstLine bool + + whitespace []byte + byteBuf []byte // a one byte buffer to save allocations + + privateKey *packet.PrivateKey + config *packet.Config +} + +func (d *dashEscaper) Write(data []byte) (n int, err error) { + for _, b := range data { + d.byteBuf[0] = b + + if d.atBeginningOfLine { + // The final CRLF isn't included in the hash so we have to wait + // until this point (the start of the next line) before writing it. + if !d.isFirstLine { + d.h.Write(crlf) + } + d.isFirstLine = false + } + + // Any whitespace at the end of the line has to be removed so we + // buffer it until we find out whether there's more on this line. + if b == ' ' || b == '\t' || b == '\r' { + d.whitespace = append(d.whitespace, b) + d.atBeginningOfLine = false + continue + } + + if d.atBeginningOfLine { + // At the beginning of a line, hyphens have to be escaped. + if b == '-' { + // The signature isn't calculated over the dash-escaped text so + // the escape is only written to buffered. + if _, err = d.buffered.Write(dashEscape); err != nil { + return + } + d.h.Write(d.byteBuf) + d.atBeginningOfLine = false + } else if b == '\n' { + // Nothing to do because we delay writing CRLF to the hash. + } else { + d.h.Write(d.byteBuf) + d.atBeginningOfLine = false + } + if err = d.buffered.WriteByte(b); err != nil { + return + } + } else { + if b == '\n' { + // We got a raw \n. Drop any trailing whitespace and write a + // CRLF. + d.whitespace = d.whitespace[:0] + // We delay writing CRLF to the hash until the start of the + // next line. + if err = d.buffered.WriteByte(b); err != nil { + return + } + d.atBeginningOfLine = true + } else { + // Any buffered whitespace wasn't at the end of the line so + // we need to write it out. + if len(d.whitespace) > 0 { + d.h.Write(d.whitespace) + if _, err = d.buffered.Write(d.whitespace); err != nil { + return + } + d.whitespace = d.whitespace[:0] + } + d.h.Write(d.byteBuf) + if err = d.buffered.WriteByte(b); err != nil { + return + } + } + } + } + + n = len(data) + return +} + +func (d *dashEscaper) Close() (err error) { + if !d.atBeginningOfLine { + if err = d.buffered.WriteByte(lf); err != nil { + return + } + } + sig := new(packet.Signature) + sig.SigType = packet.SigTypeText + sig.PubKeyAlgo = d.privateKey.PubKeyAlgo + sig.Hash = d.hashType + sig.CreationTime = d.config.Now() + sig.IssuerKeyId = &d.privateKey.KeyId + + if err = sig.Sign(d.h, d.privateKey, d.config); err != nil { + return + } + + out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil) + if err != nil { + return + } + + if err = sig.Serialize(out); err != nil { + return + } + if err = out.Close(); err != nil { + return + } + if err = d.buffered.Flush(); err != nil { + return + } + return +} + +// Encode returns a WriteCloser which will clear-sign a message with privateKey +// and write it to w. If config is nil, sensible defaults are used. +func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + if privateKey.Encrypted { + return nil, errors.InvalidArgumentError("signing key is encrypted") + } + + hashType := config.Hash() + name := nameOfHash(hashType) + if len(name) == 0 { + return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType))) + } + + if !hashType.Available() { + return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType))) + } + h := hashType.New() + + buffered := bufio.NewWriter(w) + // start has a \n at the beginning that we don't want here. + if _, err = buffered.Write(start[1:]); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if _, err = buffered.WriteString("Hash: "); err != nil { + return + } + if _, err = buffered.WriteString(name); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + + plaintext = &dashEscaper{ + buffered: buffered, + h: h, + hashType: hashType, + + atBeginningOfLine: true, + isFirstLine: true, + + byteBuf: make([]byte, 1), + + privateKey: privateKey, + config: config, + } + + return +} + +// nameOfHash returns the OpenPGP name for the given hash, or the empty string +// if the name isn't known. See RFC 4880, section 9.4. +func nameOfHash(h crypto.Hash) string { + switch h { + case crypto.MD5: + return "MD5" + case crypto.SHA1: + return "SHA1" + case crypto.RIPEMD160: + return "RIPEMD160" + case crypto.SHA224: + return "SHA224" + case crypto.SHA256: + return "SHA256" + case crypto.SHA384: + return "SHA384" + case crypto.SHA512: + return "SHA512" + } + return "" +} diff --git a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go new file mode 100644 index 0000000..2c09480 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go @@ -0,0 +1,210 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package clearsign + +import ( + "bytes" + "golang.org/x/crypto/openpgp" + "testing" +) + +func testParse(t *testing.T, input []byte, expected, expectedPlaintext string) { + b, rest := Decode(input) + if b == nil { + t.Fatal("failed to decode clearsign message") + } + if !bytes.Equal(rest, []byte("trailing")) { + t.Errorf("unexpected remaining bytes returned: %s", string(rest)) + } + if b.ArmoredSignature.Type != "PGP SIGNATURE" { + t.Errorf("bad armor type, got:%s, want:PGP SIGNATURE", b.ArmoredSignature.Type) + } + if !bytes.Equal(b.Bytes, []byte(expected)) { + t.Errorf("bad body, got:%x want:%x", b.Bytes, expected) + } + + if !bytes.Equal(b.Plaintext, []byte(expectedPlaintext)) { + t.Errorf("bad plaintext, got:%x want:%x", b.Plaintext, expectedPlaintext) + } + + keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey)) + if err != nil { + t.Errorf("failed to parse public key: %s", err) + } + + if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil { + t.Errorf("failed to check signature: %s", err) + } +} + +func TestParse(t *testing.T) { + testParse(t, clearsignInput, "Hello world\r\nline 2", "Hello world\nline 2\n") + testParse(t, clearsignInput2, "\r\n\r\n(This message has a couple of blank lines at the start and end.)\r\n\r\n", "\n\n(This message has a couple of blank lines at the start and end.)\n\n\n") +} + +func TestParseInvalid(t *testing.T) { + if b, _ := Decode(clearsignInput3); b != nil { + t.Fatal("decoded a bad clearsigned message without any error") + } +} + +func TestParseWithNoNewlineAtEnd(t *testing.T) { + input := clearsignInput + input = input[:len(input)-len("trailing")-1] + b, rest := Decode(input) + if b == nil { + t.Fatal("failed to decode clearsign message") + } + if len(rest) > 0 { + t.Errorf("unexpected remaining bytes returned: %s", string(rest)) + } +} + +var signingTests = []struct { + in, signed, plaintext string +}{ + {"", "", ""}, + {"a", "a", "a\n"}, + {"a\n", "a", "a\n"}, + {"-a\n", "-a", "-a\n"}, + {"--a\nb", "--a\r\nb", "--a\nb\n"}, + // leading whitespace + {" a\n", " a", " a\n"}, + {" a\n", " a", " a\n"}, + // trailing whitespace (should be stripped) + {"a \n", "a", "a\n"}, + {"a ", "a", "a\n"}, + // whitespace-only lines (should be stripped) + {" \n", "", "\n"}, + {" ", "", "\n"}, + {"a\n \n \nb\n", "a\r\n\r\n\r\nb", "a\n\n\nb\n"}, +} + +func TestSigning(t *testing.T) { + keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey)) + if err != nil { + t.Errorf("failed to parse public key: %s", err) + } + + for i, test := range signingTests { + var buf bytes.Buffer + + plaintext, err := Encode(&buf, keyring[0].PrivateKey, nil) + if err != nil { + t.Errorf("#%d: error from Encode: %s", i, err) + continue + } + if _, err := plaintext.Write([]byte(test.in)); err != nil { + t.Errorf("#%d: error from Write: %s", i, err) + continue + } + if err := plaintext.Close(); err != nil { + t.Fatalf("#%d: error from Close: %s", i, err) + continue + } + + b, _ := Decode(buf.Bytes()) + if b == nil { + t.Errorf("#%d: failed to decode clearsign message", i) + continue + } + if !bytes.Equal(b.Bytes, []byte(test.signed)) { + t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Bytes, test.signed) + continue + } + if !bytes.Equal(b.Plaintext, []byte(test.plaintext)) { + t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Plaintext, test.plaintext) + continue + } + + if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil { + t.Errorf("#%d: failed to check signature: %s", i, err) + } + } +} + +var clearsignInput = []byte(` +;lasjlkfdsa + +-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA1 + +Hello world +line 2 +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v1.4.10 (GNU/Linux) + +iJwEAQECAAYFAk8kMuEACgkQO9o98PRieSpMsAQAhmY/vwmNpflrPgmfWsYhk5O8 +pjnBUzZwqTDoDeINjZEoPDSpQAHGhjFjgaDx/Gj4fAl0dM4D0wuUEBb6QOrwflog +2A2k9kfSOMOtk0IH/H5VuFN1Mie9L/erYXjTQIptv9t9J7NoRBMU0QOOaFU0JaO9 +MyTpno24AjIAGb+mH1U= +=hIJ6 +-----END PGP SIGNATURE----- +trailing`) + +var clearsignInput2 = []byte(` +asdlfkjasdlkfjsadf + +-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA256 + + + +(This message has a couple of blank lines at the start and end.) + + +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v1.4.11 (GNU/Linux) + +iJwEAQEIAAYFAlPpSREACgkQO9o98PRieSpZTAP+M8QUoCt/7Rf3YbXPcdzIL32v +pt1I+cMNeopzfLy0u4ioEFi8s5VkwpL1AFmirvgViCwlf82inoRxzZRiW05JQ5LI +ESEzeCoy2LIdRCQ2hcrG8pIUPzUO4TqO5D/dMbdHwNH4h5nNmGJUAEG6FpURlPm+ +qZg6BaTvOxepqOxnhVU= +=e+C6 +-----END PGP SIGNATURE----- + +trailing`) + +var clearsignInput3 = []byte(` +-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA256 + +(This message was truncated.) +`) + +var signingKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK----- +` diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 0000000..73f4fe3 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,122 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "golang.org/x/crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + s.ModInverse(s, priv.P) + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go new file mode 100644 index 0000000..c4f99f5 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go @@ -0,0 +1,49 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package elgamal + +import ( + "bytes" + "crypto/rand" + "math/big" + "testing" +) + +// This is the 1024-bit MODP group from RFC 5114, section 2.1: +const primeHex = "B10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371" + +const generatorHex = "A4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507FD6406CFF14266D31266FEA1E5C41564B777E690F5504F213160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28AD662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24855E6EEB22B3B2E5" + +func fromHex(hex string) *big.Int { + n, ok := new(big.Int).SetString(hex, 16) + if !ok { + panic("failed to parse hex number") + } + return n +} + +func TestEncryptDecrypt(t *testing.T) { + priv := &PrivateKey{ + PublicKey: PublicKey{ + G: fromHex(generatorHex), + P: fromHex(primeHex), + }, + X: fromHex("42"), + } + priv.Y = new(big.Int).Exp(priv.G, priv.X, priv.P) + + message := []byte("hello world") + c1, c2, err := Encrypt(rand.Reader, &priv.PublicKey, message) + if err != nil { + t.Errorf("error encrypting: %s", err) + } + message2, err := Decrypt(priv, c1, c2) + if err != nil { + t.Errorf("error decrypting: %s", err) + } + if !bytes.Equal(message2, message) { + t.Errorf("decryption failed, got: %x, want: %x", message2, message) + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go new file mode 100644 index 0000000..eb0550b --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -0,0 +1,72 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "golang.org/x/crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go new file mode 100644 index 0000000..fd582a8 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -0,0 +1,641 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/rsa" + "io" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest key + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) { + candidateSubkey = i + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var current *Identity + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + current = new(Identity) + current.Name = pkt.Id + current.UserId = pkt + e.Identities[pkt.Id] = current + + for { + p, err = packets.Next() + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } else if err != nil { + return nil, err + } + + sig, ok := p.(*packet.Signature) + if !ok { + return nil, errors.StructuralError("user ID packet not followed by self-signature") + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + current.SelfSignature = sig + break + } + current.Signatures = append(current.Signatures, sig) + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } else if current == nil { + return nil, errors.StructuralError("signature packet found before user id packet") + } else { + current.Signatures = append(current.Signatures, pkt) + } + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + p, err := packets.Next() + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + var ok bool + subKey.Sig, ok = p.(*packet.Signature) + if !ok { + return errors.StructuralError("subkey packet not followed by signature") + } + if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig) + if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + e.Subkeys = append(e.Subkeys, subKey) + return nil +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + currentTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + + // If the user passes in a DefaultHash via packet.Config, + // set the PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, to +// the given Writer. For now, it must only be used on an Entity returned from +// NewEntity. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w. (No private +// key material will be output). +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/keys_test.go b/vendor/golang.org/x/crypto/openpgp/keys_test.go new file mode 100644 index 0000000..3a15506 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/keys_test.go @@ -0,0 +1,469 @@ +package openpgp + +import ( + "bytes" + "crypto" + "strings" + "testing" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +func TestKeyExpiry(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(expiringKeyHex)) + if err != nil { + t.Fatal(err) + } + entity := kring[0] + + const timeFormat = "2006-01-02" + time1, _ := time.Parse(timeFormat, "2013-07-01") + + // The expiringKeyHex key is structured as: + // + // pub 1024R/5E237D8C created: 2013-07-01 expires: 2013-07-31 usage: SC + // sub 1024R/1ABB25A0 created: 2013-07-01 23:11:07 +0200 CEST expires: 2013-07-08 usage: E + // sub 1024R/96A672F5 created: 2013-07-01 23:11:23 +0200 CEST expires: 2013-07-31 usage: E + // + // So this should select the newest, non-expired encryption key. + key, _ := entity.encryptionKey(time1) + if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" { + t.Errorf("Expected key 1ABB25A0 at time %s, but got key %s", time1.Format(timeFormat), id) + } + + // Once the first encryption subkey has expired, the second should be + // selected. + time2, _ := time.Parse(timeFormat, "2013-07-09") + key, _ = entity.encryptionKey(time2) + if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" { + t.Errorf("Expected key 96A672F5 at time %s, but got key %s", time2.Format(timeFormat), id) + } + + // Once all the keys have expired, nothing should be returned. + time3, _ := time.Parse(timeFormat, "2013-08-01") + if key, ok := entity.encryptionKey(time3); ok { + t.Errorf("Expected no key at time %s, but got key %s", time3.Format(timeFormat), key.PublicKey.KeyIdShortString()) + } +} + +func TestMissingCrossSignature(t *testing.T) { + // This public key has a signing subkey, but the subkey does not + // contain a cross-signature. + keys, err := ReadArmoredKeyRing(bytes.NewBufferString(missingCrossSignatureKey)) + if len(keys) != 0 { + t.Errorf("Accepted key with missing cross signature") + } + if err == nil { + t.Fatal("Failed to detect error in keyring with missing cross signature") + } + structural, ok := err.(errors.StructuralError) + if !ok { + t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err) + } + const expectedMsg = "signing subkey is missing cross-signature" + if !strings.Contains(string(structural), expectedMsg) { + t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg) + } +} + +func TestInvalidCrossSignature(t *testing.T) { + // This public key has a signing subkey, and the subkey has an + // embedded cross-signature. However, the cross-signature does + // not correctly validate over the primary and subkey. + keys, err := ReadArmoredKeyRing(bytes.NewBufferString(invalidCrossSignatureKey)) + if len(keys) != 0 { + t.Errorf("Accepted key with invalid cross signature") + } + if err == nil { + t.Fatal("Failed to detect error in keyring with an invalid cross signature") + } + structural, ok := err.(errors.StructuralError) + if !ok { + t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err) + } + const expectedMsg = "subkey signature invalid" + if !strings.Contains(string(structural), expectedMsg) { + t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg) + } +} + +func TestGoodCrossSignature(t *testing.T) { + // This public key has a signing subkey, and the subkey has an + // embedded cross-signature which correctly validates over the + // primary and subkey. + keys, err := ReadArmoredKeyRing(bytes.NewBufferString(goodCrossSignatureKey)) + if err != nil { + t.Fatal(err) + } + if len(keys) != 1 { + t.Errorf("Failed to accept key with good cross signature, %d", len(keys)) + } + if len(keys[0].Subkeys) != 1 { + t.Errorf("Failed to accept good subkey, %d", len(keys[0].Subkeys)) + } +} + +// TestExternallyRevokableKey attempts to load and parse a key with a third party revocation permission. +func TestExternallyRevocableKey(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(subkeyUsageHex)) + if err != nil { + t.Fatal(err) + } + + // The 0xA42704B92866382A key can be revoked by 0xBE3893CB843D0FE70C + // according to this signature that appears within the key: + // :signature packet: algo 1, keyid A42704B92866382A + // version 4, created 1396409682, md5len 0, sigclass 0x1f + // digest algo 2, begin of digest a9 84 + // hashed subpkt 2 len 4 (sig created 2014-04-02) + // hashed subpkt 12 len 22 (revocation key: c=80 a=1 f=CE094AA433F7040BB2DDF0BE3893CB843D0FE70C) + // hashed subpkt 7 len 1 (not revocable) + // subpkt 16 len 8 (issuer key ID A42704B92866382A) + // data: [1024 bits] + + id := uint64(0xA42704B92866382A) + keys := kring.KeysById(id) + if len(keys) != 1 { + t.Errorf("Expected to find key id %X, but got %d matches", id, len(keys)) + } +} + +func TestKeyRevocation(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(revokedKeyHex)) + if err != nil { + t.Fatal(err) + } + + // revokedKeyHex contains these keys: + // pub 1024R/9A34F7C0 2014-03-25 [revoked: 2014-03-25] + // sub 1024R/1BA3CD60 2014-03-25 [revoked: 2014-03-25] + ids := []uint64{0xA401D9F09A34F7C0, 0x5CD3BE0A1BA3CD60} + + for _, id := range ids { + keys := kring.KeysById(id) + if len(keys) != 1 { + t.Errorf("Expected KeysById to find revoked key %X, but got %d matches", id, len(keys)) + } + keys = kring.KeysByIdUsage(id, 0) + if len(keys) != 0 { + t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", id, len(keys)) + } + } +} + +func TestSubkeyRevocation(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(revokedSubkeyHex)) + if err != nil { + t.Fatal(err) + } + + // revokedSubkeyHex contains these keys: + // pub 1024R/4EF7E4BECCDE97F0 2014-03-25 + // sub 1024R/D63636E2B96AE423 2014-03-25 + // sub 1024D/DBCE4EE19529437F 2014-03-25 + // sub 1024R/677815E371C2FD23 2014-03-25 [revoked: 2014-03-25] + validKeys := []uint64{0x4EF7E4BECCDE97F0, 0xD63636E2B96AE423, 0xDBCE4EE19529437F} + revokedKey := uint64(0x677815E371C2FD23) + + for _, id := range validKeys { + keys := kring.KeysById(id) + if len(keys) != 1 { + t.Errorf("Expected KeysById to find key %X, but got %d matches", id, len(keys)) + } + keys = kring.KeysByIdUsage(id, 0) + if len(keys) != 1 { + t.Errorf("Expected KeysByIdUsage to find key %X, but got %d matches", id, len(keys)) + } + } + + keys := kring.KeysById(revokedKey) + if len(keys) != 1 { + t.Errorf("Expected KeysById to find key %X, but got %d matches", revokedKey, len(keys)) + } + + keys = kring.KeysByIdUsage(revokedKey, 0) + if len(keys) != 0 { + t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", revokedKey, len(keys)) + } +} + +func TestKeyUsage(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(subkeyUsageHex)) + if err != nil { + t.Fatal(err) + } + + // subkeyUsageHex contains these keys: + // pub 1024R/2866382A created: 2014-04-01 expires: never usage: SC + // sub 1024R/936C9153 created: 2014-04-01 expires: never usage: E + // sub 1024R/64D5F5BB created: 2014-04-02 expires: never usage: E + // sub 1024D/BC0BA992 created: 2014-04-02 expires: never usage: S + certifiers := []uint64{0xA42704B92866382A} + signers := []uint64{0xA42704B92866382A, 0x42CE2C64BC0BA992} + encrypters := []uint64{0x09C0C7D9936C9153, 0xC104E98664D5F5BB} + + for _, id := range certifiers { + keys := kring.KeysByIdUsage(id, packet.KeyFlagCertify) + if len(keys) == 1 { + if keys[0].PublicKey.KeyId != id { + t.Errorf("Expected to find certifier key id %X, but got %X", id, keys[0].PublicKey.KeyId) + } + } else { + t.Errorf("Expected one match for certifier key id %X, but got %d matches", id, len(keys)) + } + } + + for _, id := range signers { + keys := kring.KeysByIdUsage(id, packet.KeyFlagSign) + if len(keys) == 1 { + if keys[0].PublicKey.KeyId != id { + t.Errorf("Expected to find signing key id %X, but got %X", id, keys[0].PublicKey.KeyId) + } + } else { + t.Errorf("Expected one match for signing key id %X, but got %d matches", id, len(keys)) + } + + // This keyring contains no encryption keys that are also good for signing. + keys = kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications) + if len(keys) != 0 { + t.Errorf("Unexpected match for encryption key id %X", id) + } + } + + for _, id := range encrypters { + keys := kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications) + if len(keys) == 1 { + if keys[0].PublicKey.KeyId != id { + t.Errorf("Expected to find encryption key id %X, but got %X", id, keys[0].PublicKey.KeyId) + } + } else { + t.Errorf("Expected one match for encryption key id %X, but got %d matches", id, len(keys)) + } + + // This keyring contains no encryption keys that are also good for signing. + keys = kring.KeysByIdUsage(id, packet.KeyFlagSign) + if len(keys) != 0 { + t.Errorf("Unexpected match for signing key id %X", id) + } + } +} + +func TestIdVerification(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) + if err != nil { + t.Fatal(err) + } + if err := kring[1].PrivateKey.Decrypt([]byte("passphrase")); err != nil { + t.Fatal(err) + } + + const identity = "Test Key 1 (RSA)" + if err := kring[0].SignIdentity(identity, kring[1], nil); err != nil { + t.Fatal(err) + } + + ident, ok := kring[0].Identities[identity] + if !ok { + t.Fatal("identity missing from key after signing") + } + + checked := false + for _, sig := range ident.Signatures { + if sig.IssuerKeyId == nil || *sig.IssuerKeyId != kring[1].PrimaryKey.KeyId { + continue + } + + if err := kring[1].PrimaryKey.VerifyUserIdSignature(identity, kring[0].PrimaryKey, sig); err != nil { + t.Fatalf("error verifying new identity signature: %s", err) + } + checked = true + break + } + + if !checked { + t.Fatal("didn't find identity signature in Entity") + } +} + +func TestNewEntityWithPreferredHash(t *testing.T) { + c := &packet.Config{ + DefaultHash: crypto.SHA256, + } + entity, err := NewEntity("Golang Gopher", "Test Key", "no-reply@golang.com", c) + if err != nil { + t.Fatal(err) + } + + for _, identity := range entity.Identities { + if len(identity.SelfSignature.PreferredHash) == 0 { + t.Fatal("didn't find a preferred hash in self signature") + } + ph := hashToHashId(c.DefaultHash) + if identity.SelfSignature.PreferredHash[0] != ph { + t.Fatalf("Expected preferred hash to be %d, got %d", ph, identity.SelfSignature.PreferredHash[0]) + } + } +} + +func TestNewEntityWithoutPreferredHash(t *testing.T) { + entity, err := NewEntity("Golang Gopher", "Test Key", "no-reply@golang.com", nil) + if err != nil { + t.Fatal(err) + } + + for _, identity := range entity.Identities { + if len(identity.SelfSignature.PreferredHash) != 0 { + t.Fatalf("Expected preferred hash to be empty but got length %d", len(identity.SelfSignature.PreferredHash)) + } + } +} + +func TestNewEntityCorrectName(t *testing.T) { + entity, err := NewEntity("Golang Gopher", "Test Key", "no-reply@golang.com", nil) + if err != nil { + t.Fatal(err) + } + if len(entity.Identities) != 1 { + t.Fatalf("len(entity.Identities) = %d, want 1", len(entity.Identities)) + } + var got string + for _, i := range entity.Identities { + got = i.Name + } + want := "Golang Gopher (Test Key) " + if got != want { + t.Fatalf("Identity.Name = %q, want %q", got, want) + } +} + +func TestNewEntityWithPreferredSymmetric(t *testing.T) { + c := &packet.Config{ + DefaultCipher: packet.CipherAES256, + } + entity, err := NewEntity("Golang Gopher", "Test Key", "no-reply@golang.com", c) + if err != nil { + t.Fatal(err) + } + + for _, identity := range entity.Identities { + if len(identity.SelfSignature.PreferredSymmetric) == 0 { + t.Fatal("didn't find a preferred cipher in self signature") + } + if identity.SelfSignature.PreferredSymmetric[0] != uint8(c.DefaultCipher) { + t.Fatalf("Expected preferred cipher to be %d, got %d", uint8(c.DefaultCipher), identity.SelfSignature.PreferredSymmetric[0]) + } + } +} + +func TestNewEntityWithoutPreferredSymmetric(t *testing.T) { + entity, err := NewEntity("Golang Gopher", "Test Key", "no-reply@golang.com", nil) + if err != nil { + t.Fatal(err) + } + + for _, identity := range entity.Identities { + if len(identity.SelfSignature.PreferredSymmetric) != 0 { + t.Fatalf("Expected preferred cipher to be empty but got length %d", len(identity.SelfSignature.PreferredSymmetric)) + } + } +} + +const expiringKeyHex = "988d0451d1ec5d010400ba3385721f2dc3f4ab096b2ee867ab77213f0a27a8538441c35d2fa225b08798a1439a66a5150e6bdc3f40f5d28d588c712394c632b6299f77db8c0d48d37903fb72ebd794d61be6aa774688839e5fdecfe06b2684cc115d240c98c66cb1ef22ae84e3aa0c2b0c28665c1e7d4d044e7f270706193f5223c8d44e0d70b7b8da830011010001b40f4578706972792074657374206b657988be041301020028050251d1ec5d021b03050900278d00060b090807030206150802090a0b0416020301021e01021780000a091072589ad75e237d8c033503fd10506d72837834eb7f994117740723adc39227104b0d326a1161871c0b415d25b4aedef946ca77ea4c05af9c22b32cf98be86ab890111fced1ee3f75e87b7cc3c00dc63bbc85dfab91c0dc2ad9de2c4d13a34659333a85c6acc1a669c5e1d6cecb0cf1e56c10e72d855ae177ddc9e766f9b2dda57ccbb75f57156438bbdb4e42b88d0451d1ec5d0104009c64906559866c5cb61578f5846a94fcee142a489c9b41e67b12bb54cfe86eb9bc8566460f9a720cb00d6526fbccfd4f552071a8e3f7744b1882d01036d811ee5a3fb91a1c568055758f43ba5d2c6a9676b012f3a1a89e47bbf624f1ad571b208f3cc6224eb378f1645dd3d47584463f9eadeacfd1ce6f813064fbfdcc4b5a53001101000188a504180102000f021b0c050251d1f06b050900093e89000a091072589ad75e237d8c20e00400ab8310a41461425b37889c4da28129b5fae6084fafbc0a47dd1adc74a264c6e9c9cc125f40462ee1433072a58384daef88c961c390ed06426a81b464a53194c4e291ddd7e2e2ba3efced01537d713bd111f48437bde2363446200995e8e0d4e528dda377fd1e8f8ede9c8e2198b393bd86852ce7457a7e3daf74d510461a5b77b88d0451d1ece8010400b3a519f83ab0010307e83bca895170acce8964a044190a2b368892f7a244758d9fc193482648acb1fb9780d28cc22d171931f38bb40279389fc9bf2110876d4f3db4fcfb13f22f7083877fe56592b3b65251312c36f83ffcb6d313c6a17f197dd471f0712aad15a8537b435a92471ba2e5b0c72a6c72536c3b567c558d7b6051001101000188a504180102000f021b0c050251d1f07b050900279091000a091072589ad75e237d8ce69e03fe286026afacf7c97ee20673864d4459a2240b5655219950643c7dba0ac384b1d4359c67805b21d98211f7b09c2a0ccf6410c8c04d4ff4a51293725d8d6570d9d8bb0e10c07d22357caeb49626df99c180be02d77d1fe8ed25e7a54481237646083a9f89a11566cd20b9e995b1487c5f9e02aeb434f3a1897cd416dd0a87861838da3e9e" +const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" +const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" +const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go new file mode 100644 index 0000000..e8f0b5c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "golang.org/x/crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed_test.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed_test.go new file mode 100644 index 0000000..cb2d70b --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed_test.go @@ -0,0 +1,41 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "io" + "io/ioutil" + "testing" +) + +func TestCompressed(t *testing.T) { + packet, err := Read(readerFromHex(compressedHex)) + if err != nil { + t.Errorf("failed to read Compressed: %s", err) + return + } + + c, ok := packet.(*Compressed) + if !ok { + t.Error("didn't find Compressed packet") + return + } + + contents, err := ioutil.ReadAll(c.Body) + if err != nil && err != io.EOF { + t.Error(err) + return + } + + expected, _ := hex.DecodeString(compressedExpectedHex) + if !bytes.Equal(expected, contents) { + t.Errorf("got:%x want:%x", contents, expected) + } +} + +const compressedHex = "a3013b2d90c4e02b72e25f727e5e496a5e49b11e1700" +const compressedExpectedHex = "cb1062004d14c8fe636f6e74656e74732e0a" diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go new file mode 100644 index 0000000..c76eecc --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/config.go @@ -0,0 +1,91 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 0000000..02b372c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,206 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + if err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + k := priv.PrivateKey.(*rsa.PrivateKey) + b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go new file mode 100644 index 0000000..f2fcf4d --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go @@ -0,0 +1,151 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/rsa" + "encoding/hex" + "fmt" + "math/big" + "testing" +) + +func bigFromBase10(s string) *big.Int { + b, ok := new(big.Int).SetString(s, 10) + if !ok { + panic("bigFromBase10 failed") + } + return b +} + +var encryptedKeyPub = rsa.PublicKey{ + E: 65537, + N: bigFromBase10("115804063926007623305902631768113868327816898845124614648849934718568541074358183759250136204762053879858102352159854352727097033322663029387610959884180306668628526686121021235757016368038585212410610742029286439607686208110250133174279811431933746643015923132833417396844716207301518956640020862630546868823"), +} + +var encryptedKeyRSAPriv = &rsa.PrivateKey{ + PublicKey: encryptedKeyPub, + D: bigFromBase10("32355588668219869544751561565313228297765464314098552250409557267371233892496951383426602439009993875125222579159850054973310859166139474359774543943714622292329487391199285040721944491839695981199720170366763547754915493640685849961780092241140181198779299712578774460837139360803883139311171713302987058393"), +} + +var encryptedKeyPriv = &PrivateKey{ + PublicKey: PublicKey{ + PubKeyAlgo: PubKeyAlgoRSA, + }, + PrivateKey: encryptedKeyRSAPriv, +} + +func TestDecryptingEncryptedKey(t *testing.T) { + for i, encryptedKeyHex := range []string{ + "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8", + // MPI can be shorter than the length of the key. + "c18b032a67d68660df41c70103f8e520c52ae9807183c669ce26e772e482dc5d8cf60e6f59316e145be14d2e5221ee69550db1d5618a8cb002a719f1f0b9345bde21536d410ec90ba86cac37748dec7933eb7f9873873b2d61d3321d1cd44535014f6df58f7bc0c7afb5edc38e1a974428997d2f747f9a173bea9ca53079b409517d332df62d805564cffc9be6", + } { + const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b" + + p, err := Read(readerFromHex(encryptedKeyHex)) + if err != nil { + t.Errorf("#%d: error from Read: %s", i, err) + return + } + ek, ok := p.(*EncryptedKey) + if !ok { + t.Errorf("#%d: didn't parse an EncryptedKey, got %#v", i, p) + return + } + + if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA { + t.Errorf("#%d: unexpected EncryptedKey contents: %#v", i, ek) + return + } + + err = ek.Decrypt(encryptedKeyPriv, nil) + if err != nil { + t.Errorf("#%d: error from Decrypt: %s", i, err) + return + } + + if ek.CipherFunc != CipherAES256 { + t.Errorf("#%d: unexpected EncryptedKey contents: %#v", i, ek) + return + } + + keyHex := fmt.Sprintf("%x", ek.Key) + if keyHex != expectedKeyHex { + t.Errorf("#%d: bad key, got %s want %s", i, keyHex, expectedKeyHex) + } + } +} + +func TestEncryptingEncryptedKey(t *testing.T) { + key := []byte{1, 2, 3, 4} + const expectedKeyHex = "01020304" + const keyId = 42 + + pub := &PublicKey{ + PublicKey: &encryptedKeyPub, + KeyId: keyId, + PubKeyAlgo: PubKeyAlgoRSAEncryptOnly, + } + + buf := new(bytes.Buffer) + err := SerializeEncryptedKey(buf, pub, CipherAES128, key, nil) + if err != nil { + t.Errorf("error writing encrypted key packet: %s", err) + } + + p, err := Read(buf) + if err != nil { + t.Errorf("error from Read: %s", err) + return + } + ek, ok := p.(*EncryptedKey) + if !ok { + t.Errorf("didn't parse an EncryptedKey, got %#v", p) + return + } + + if ek.KeyId != keyId || ek.Algo != PubKeyAlgoRSAEncryptOnly { + t.Errorf("unexpected EncryptedKey contents: %#v", ek) + return + } + + err = ek.Decrypt(encryptedKeyPriv, nil) + if err != nil { + t.Errorf("error from Decrypt: %s", err) + return + } + + if ek.CipherFunc != CipherAES128 { + t.Errorf("unexpected EncryptedKey contents: %#v", ek) + return + } + + keyHex := fmt.Sprintf("%x", ek.Key) + if keyHex != expectedKeyHex { + t.Errorf("bad key, got %s want %s", keyHex, expectedKeyHex) + } +} + +func TestSerializingEncryptedKey(t *testing.T) { + const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8" + + p, err := Read(readerFromHex(encryptedKeyHex)) + if err != nil { + t.Fatalf("error from Read: %s", err) + } + ek, ok := p.(*EncryptedKey) + if !ok { + t.Fatalf("didn't parse an EncryptedKey, got %#v", p) + } + + var buf bytes.Buffer + ek.Serialize(&buf) + + if bufHex := hex.EncodeToString(buf.Bytes()); bufHex != encryptedKeyHex { + t.Fatalf("serialization of encrypted key differed from original. Original was %s, but reserialized as %s", encryptedKeyHex, bufHex) + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go new file mode 100644 index 0000000..1a9ec6e --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go new file mode 100644 index 0000000..ce2a33a --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb_test.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb_test.go new file mode 100644 index 0000000..91022c0 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/ocfb_test.go @@ -0,0 +1,46 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/aes" + "crypto/rand" + "testing" +) + +var commonKey128 = []byte{0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c} + +func testOCFB(t *testing.T, resync OCFBResyncOption) { + block, err := aes.NewCipher(commonKey128) + if err != nil { + t.Error(err) + return + } + + plaintext := []byte("this is the plaintext, which is long enough to span several blocks.") + randData := make([]byte, block.BlockSize()) + rand.Reader.Read(randData) + ocfb, prefix := NewOCFBEncrypter(block, randData, resync) + ciphertext := make([]byte, len(plaintext)) + ocfb.XORKeyStream(ciphertext, plaintext) + + ocfbdec := NewOCFBDecrypter(block, prefix, resync) + if ocfbdec == nil { + t.Errorf("NewOCFBDecrypter failed (resync: %t)", resync) + return + } + plaintextCopy := make([]byte, len(plaintext)) + ocfbdec.XORKeyStream(plaintextCopy, ciphertext) + + if !bytes.Equal(plaintextCopy, plaintext) { + t.Errorf("got: %x, want: %x (resync: %t)", plaintextCopy, plaintext, resync) + } +} + +func TestOCFB(t *testing.T) { + testOCFB(t, OCFBNoResync) + testOCFB(t, OCFBResync) +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 0000000..1713503 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go new file mode 100644 index 0000000..456d807 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "golang.org/x/crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque_test.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque_test.go new file mode 100644 index 0000000..f27bbfe --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/opaque_test.go @@ -0,0 +1,67 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "io" + "testing" +) + +// Test packet.Read error handling in OpaquePacket.Parse, +// which attempts to re-read an OpaquePacket as a supported +// Packet type. +func TestOpaqueParseReason(t *testing.T) { + buf, err := hex.DecodeString(UnsupportedKeyHex) + if err != nil { + t.Fatal(err) + } + or := NewOpaqueReader(bytes.NewBuffer(buf)) + count := 0 + badPackets := 0 + var uid *UserId + for { + op, err := or.Next() + if err == io.EOF { + break + } else if err != nil { + t.Errorf("#%d: opaque read error: %v", count, err) + break + } + // try to parse opaque packet + p, err := op.Parse() + switch pkt := p.(type) { + case *UserId: + uid = pkt + case *OpaquePacket: + // If an OpaquePacket can't re-parse, packet.Read + // certainly had its reasons. + if pkt.Reason == nil { + t.Errorf("#%d: opaque packet, no reason", count) + } else { + badPackets++ + } + } + count++ + } + + const expectedBad = 3 + // Test post-conditions, make sure we actually parsed packets as expected. + if badPackets != expectedBad { + t.Errorf("unexpected # unparseable packets: %d (want %d)", badPackets, expectedBad) + } + if uid == nil { + t.Errorf("failed to find expected UID in unsupported keyring") + } else if uid.Id != "Armin M. Warda " { + t.Errorf("unexpected UID: %v", uid.Id) + } +} + +// This key material has public key and signature packet versions modified to +// an unsupported value (1), so that trying to parse the OpaquePacket to +// a typed packet will get an error. It also contains a GnuPG trust packet. +// (Created with: od -An -t x1 pubring.gpg | xargs | sed 's/ //g') +const UnsupportedKeyHex = `988d012e7a18a20000010400d6ac00d92b89c1f4396c243abb9b76d2e9673ad63483291fed88e22b82e255e441c078c6abbbf7d2d195e50b62eeaa915b85b0ec20c225ce2c64c167cacb6e711daf2e45da4a8356a059b8160e3b3628ac0dd8437b31f06d53d6e8ea4214d4a26406a6b63e1001406ef23e0bb3069fac9a99a91f77dfafd5de0f188a5da5e3c9000511b42741726d696e204d2e205761726461203c7761726461406e657068696c696d2e727568722e64653e8900950105102e8936c705d1eb399e58489901013f0e03ff5a0c4f421e34fcfa388129166420c08cd76987bcdec6f01bd0271459a85cc22048820dd4e44ac2c7d23908d540f54facf1b36b0d9c20488781ce9dca856531e76e2e846826e9951338020a03a09b57aa5faa82e9267458bd76105399885ac35af7dc1cbb6aaed7c39e1039f3b5beda2c0e916bd38560509bab81235d1a0ead83b0020000` diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go new file mode 100644 index 0000000..625bb5a --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -0,0 +1,549 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "golang.org/x/crypto/openpgp/packet" + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rsa" + "io" + "math/big" + + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + for len(p) > 0 { + for power := uint(14); power < 32; power-- { + l := 1 << power + if len(p) >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(p[:l]) + n += m + if err != nil { + return + } + p = p[l:] + break + } + } + } + return +} + +func (w *partialLengthWriter) Close() error { + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet_test.go b/vendor/golang.org/x/crypto/openpgp/packet/packet_test.go new file mode 100644 index 0000000..1dab5c3 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet_test.go @@ -0,0 +1,255 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "fmt" + "golang.org/x/crypto/openpgp/errors" + "io" + "io/ioutil" + "testing" +) + +func TestReadFull(t *testing.T) { + var out [4]byte + + b := bytes.NewBufferString("foo") + n, err := readFull(b, out[:3]) + if n != 3 || err != nil { + t.Errorf("full read failed n:%d err:%s", n, err) + } + + b = bytes.NewBufferString("foo") + n, err = readFull(b, out[:4]) + if n != 3 || err != io.ErrUnexpectedEOF { + t.Errorf("partial read failed n:%d err:%s", n, err) + } + + b = bytes.NewBuffer(nil) + n, err = readFull(b, out[:3]) + if n != 0 || err != io.ErrUnexpectedEOF { + t.Errorf("empty read failed n:%d err:%s", n, err) + } +} + +func readerFromHex(s string) io.Reader { + data, err := hex.DecodeString(s) + if err != nil { + panic("readerFromHex: bad input") + } + return bytes.NewBuffer(data) +} + +var readLengthTests = []struct { + hexInput string + length int64 + isPartial bool + err error +}{ + {"", 0, false, io.ErrUnexpectedEOF}, + {"1f", 31, false, nil}, + {"c0", 0, false, io.ErrUnexpectedEOF}, + {"c101", 256 + 1 + 192, false, nil}, + {"e0", 1, true, nil}, + {"e1", 2, true, nil}, + {"e2", 4, true, nil}, + {"ff", 0, false, io.ErrUnexpectedEOF}, + {"ff00", 0, false, io.ErrUnexpectedEOF}, + {"ff0000", 0, false, io.ErrUnexpectedEOF}, + {"ff000000", 0, false, io.ErrUnexpectedEOF}, + {"ff00000000", 0, false, nil}, + {"ff01020304", 16909060, false, nil}, +} + +func TestReadLength(t *testing.T) { + for i, test := range readLengthTests { + length, isPartial, err := readLength(readerFromHex(test.hexInput)) + if test.err != nil { + if err != test.err { + t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err) + } + continue + } + if err != nil { + t.Errorf("%d: unexpected error: %s", i, err) + continue + } + if length != test.length || isPartial != test.isPartial { + t.Errorf("%d: bad result got:(%d,%t) want:(%d,%t)", i, length, isPartial, test.length, test.isPartial) + } + } +} + +var partialLengthReaderTests = []struct { + hexInput string + err error + hexOutput string +}{ + {"e0", io.ErrUnexpectedEOF, ""}, + {"e001", io.ErrUnexpectedEOF, ""}, + {"e0010102", nil, "0102"}, + {"ff00000000", nil, ""}, + {"e10102e1030400", nil, "01020304"}, + {"e101", io.ErrUnexpectedEOF, ""}, +} + +func TestPartialLengthReader(t *testing.T) { + for i, test := range partialLengthReaderTests { + r := &partialLengthReader{readerFromHex(test.hexInput), 0, true} + out, err := ioutil.ReadAll(r) + if test.err != nil { + if err != test.err { + t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err) + } + continue + } + if err != nil { + t.Errorf("%d: unexpected error: %s", i, err) + continue + } + + got := fmt.Sprintf("%x", out) + if got != test.hexOutput { + t.Errorf("%d: got:%s want:%s", i, test.hexOutput, got) + } + } +} + +var readHeaderTests = []struct { + hexInput string + structuralError bool + unexpectedEOF bool + tag int + length int64 + hexOutput string +}{ + {"", false, false, 0, 0, ""}, + {"7f", true, false, 0, 0, ""}, + + // Old format headers + {"80", false, true, 0, 0, ""}, + {"8001", false, true, 0, 1, ""}, + {"800102", false, false, 0, 1, "02"}, + {"81000102", false, false, 0, 1, "02"}, + {"820000000102", false, false, 0, 1, "02"}, + {"860000000102", false, false, 1, 1, "02"}, + {"83010203", false, false, 0, -1, "010203"}, + + // New format headers + {"c0", false, true, 0, 0, ""}, + {"c000", false, false, 0, 0, ""}, + {"c00102", false, false, 0, 1, "02"}, + {"c0020203", false, false, 0, 2, "0203"}, + {"c00202", false, true, 0, 2, ""}, + {"c3020203", false, false, 3, 2, "0203"}, +} + +func TestReadHeader(t *testing.T) { + for i, test := range readHeaderTests { + tag, length, contents, err := readHeader(readerFromHex(test.hexInput)) + if test.structuralError { + if _, ok := err.(errors.StructuralError); ok { + continue + } + t.Errorf("%d: expected StructuralError, got:%s", i, err) + continue + } + if err != nil { + if len(test.hexInput) == 0 && err == io.EOF { + continue + } + if !test.unexpectedEOF || err != io.ErrUnexpectedEOF { + t.Errorf("%d: unexpected error from readHeader: %s", i, err) + } + continue + } + if int(tag) != test.tag || length != test.length { + t.Errorf("%d: got:(%d,%d) want:(%d,%d)", i, int(tag), length, test.tag, test.length) + continue + } + + body, err := ioutil.ReadAll(contents) + if err != nil { + if !test.unexpectedEOF || err != io.ErrUnexpectedEOF { + t.Errorf("%d: unexpected error from contents: %s", i, err) + } + continue + } + if test.unexpectedEOF { + t.Errorf("%d: expected ErrUnexpectedEOF from contents but got no error", i) + continue + } + got := fmt.Sprintf("%x", body) + if got != test.hexOutput { + t.Errorf("%d: got:%s want:%s", i, got, test.hexOutput) + } + } +} + +func TestSerializeHeader(t *testing.T) { + tag := packetTypePublicKey + lengths := []int{0, 1, 2, 64, 192, 193, 8000, 8384, 8385, 10000} + + for _, length := range lengths { + buf := bytes.NewBuffer(nil) + serializeHeader(buf, tag, length) + tag2, length2, _, err := readHeader(buf) + if err != nil { + t.Errorf("length %d, err: %s", length, err) + } + if tag2 != tag { + t.Errorf("length %d, tag incorrect (got %d, want %d)", length, tag2, tag) + } + if int(length2) != length { + t.Errorf("length %d, length incorrect (got %d)", length, length2) + } + } +} + +func TestPartialLengths(t *testing.T) { + buf := bytes.NewBuffer(nil) + w := new(partialLengthWriter) + w.w = noOpCloser{buf} + + const maxChunkSize = 64 + + var b [maxChunkSize]byte + var n uint8 + for l := 1; l <= maxChunkSize; l++ { + for i := 0; i < l; i++ { + b[i] = n + n++ + } + m, err := w.Write(b[:l]) + if m != l { + t.Errorf("short write got: %d want: %d", m, l) + } + if err != nil { + t.Errorf("error from write: %s", err) + } + } + w.Close() + + want := (maxChunkSize * (maxChunkSize + 1)) / 2 + copyBuf := bytes.NewBuffer(nil) + r := &partialLengthReader{buf, 0, true} + m, err := io.Copy(copyBuf, r) + if m != int64(want) { + t.Errorf("short copy got: %d want: %d", m, want) + } + if err != nil { + t.Errorf("error from copy: %s", err) + } + + copyBytes := copyBuf.Bytes() + for i := 0; i < want; i++ { + if copyBytes[i] != uint8(i) { + t.Errorf("bad pattern in copy at %d", i) + break + } + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go new file mode 100644 index 0000000..34734cc --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go @@ -0,0 +1,380 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer. + sha1Checksum bool + iv []byte +} + +func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that +// implements RSA or ECDSA. +func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + switch pubkey := signer.Public().(type) { + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey) + pk.PubKeyAlgo = PubKeyAlgoRSASignOnly + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = ioutil.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + // TODO(agl): support encrypted private keys + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + buf.WriteByte(0 /* no encryption */) + + privateKeyBuf := bytes.NewBuffer(nil) + + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(privateKeyBuf, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(privateKeyBuf, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(privateKeyBuf, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(privateKeyBuf, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + if err != nil { + return + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + pk.PrivateKey = &ecdsa.PrivateKey{ + PublicKey: *ecdsaPub, + D: new(big.Int).SetBytes(d), + } + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go new file mode 100644 index 0000000..ac651d9 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go @@ -0,0 +1,270 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "hash" + "io" + "testing" + "time" +) + +var privateKeyTests = []struct { + privateKeyHex string + creationTime time.Time +}{ + { + privKeyRSAHex, + time.Unix(0x4cc349a8, 0), + }, + { + privKeyElGamalHex, + time.Unix(0x4df9ee1a, 0), + }, +} + +func TestPrivateKeyRead(t *testing.T) { + for i, test := range privateKeyTests { + packet, err := Read(readerFromHex(test.privateKeyHex)) + if err != nil { + t.Errorf("#%d: failed to parse: %s", i, err) + continue + } + + privKey := packet.(*PrivateKey) + + if !privKey.Encrypted { + t.Errorf("#%d: private key isn't encrypted", i) + continue + } + + err = privKey.Decrypt([]byte("wrong password")) + if err == nil { + t.Errorf("#%d: decrypted with incorrect key", i) + continue + } + + err = privKey.Decrypt([]byte("testing")) + if err != nil { + t.Errorf("#%d: failed to decrypt: %s", i, err) + continue + } + + if !privKey.CreationTime.Equal(test.creationTime) || privKey.Encrypted { + t.Errorf("#%d: bad result, got: %#v", i, privKey) + } + } +} + +func populateHash(hashFunc crypto.Hash, msg []byte) (hash.Hash, error) { + h := hashFunc.New() + if _, err := h.Write(msg); err != nil { + return nil, err + } + return h, nil +} + +func TestRSAPrivateKey(t *testing.T) { + privKeyDER, _ := hex.DecodeString(pkcs1PrivKeyHex) + rsaPriv, err := x509.ParsePKCS1PrivateKey(privKeyDER) + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + if err := NewRSAPrivateKey(time.Now(), rsaPriv).Serialize(&buf); err != nil { + t.Fatal(err) + } + + p, err := Read(&buf) + if err != nil { + t.Fatal(err) + } + + priv, ok := p.(*PrivateKey) + if !ok { + t.Fatal("didn't parse private key") + } + + sig := &Signature{ + PubKeyAlgo: PubKeyAlgoRSA, + Hash: crypto.SHA256, + } + msg := []byte("Hello World!") + + h, err := populateHash(sig.Hash, msg) + if err != nil { + t.Fatal(err) + } + if err := sig.Sign(h, priv, nil); err != nil { + t.Fatal(err) + } + + if h, err = populateHash(sig.Hash, msg); err != nil { + t.Fatal(err) + } + if err := priv.VerifySignature(h, sig); err != nil { + t.Fatal(err) + } +} + +func TestECDSAPrivateKey(t *testing.T) { + ecdsaPriv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + if err := NewECDSAPrivateKey(time.Now(), ecdsaPriv).Serialize(&buf); err != nil { + t.Fatal(err) + } + + p, err := Read(&buf) + if err != nil { + t.Fatal(err) + } + + priv, ok := p.(*PrivateKey) + if !ok { + t.Fatal("didn't parse private key") + } + + sig := &Signature{ + PubKeyAlgo: PubKeyAlgoECDSA, + Hash: crypto.SHA256, + } + msg := []byte("Hello World!") + + h, err := populateHash(sig.Hash, msg) + if err != nil { + t.Fatal(err) + } + if err := sig.Sign(h, priv, nil); err != nil { + t.Fatal(err) + } + + if h, err = populateHash(sig.Hash, msg); err != nil { + t.Fatal(err) + } + if err := priv.VerifySignature(h, sig); err != nil { + t.Fatal(err) + } +} + +type rsaSigner struct { + priv *rsa.PrivateKey +} + +func (s *rsaSigner) Public() crypto.PublicKey { + return s.priv.PublicKey +} + +func (s *rsaSigner) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { + return s.priv.Sign(rand, msg, opts) +} + +func TestRSASignerPrivateKey(t *testing.T) { + rsaPriv, err := rsa.GenerateKey(rand.Reader, 1024) + if err != nil { + t.Fatal(err) + } + + priv := NewSignerPrivateKey(time.Now(), &rsaSigner{rsaPriv}) + + if priv.PubKeyAlgo != PubKeyAlgoRSASignOnly { + t.Fatal("NewSignerPrivateKey should have made a sign-only RSA private key") + } + + sig := &Signature{ + PubKeyAlgo: PubKeyAlgoRSASignOnly, + Hash: crypto.SHA256, + } + msg := []byte("Hello World!") + + h, err := populateHash(sig.Hash, msg) + if err != nil { + t.Fatal(err) + } + if err := sig.Sign(h, priv, nil); err != nil { + t.Fatal(err) + } + + if h, err = populateHash(sig.Hash, msg); err != nil { + t.Fatal(err) + } + if err := priv.VerifySignature(h, sig); err != nil { + t.Fatal(err) + } +} + +type ecdsaSigner struct { + priv *ecdsa.PrivateKey +} + +func (s *ecdsaSigner) Public() crypto.PublicKey { + return s.priv.PublicKey +} + +func (s *ecdsaSigner) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { + return s.priv.Sign(rand, msg, opts) +} + +func TestECDSASignerPrivateKey(t *testing.T) { + ecdsaPriv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + priv := NewSignerPrivateKey(time.Now(), &ecdsaSigner{ecdsaPriv}) + + if priv.PubKeyAlgo != PubKeyAlgoECDSA { + t.Fatal("NewSignerPrivateKey should have made an ECSDA private key") + } + + sig := &Signature{ + PubKeyAlgo: PubKeyAlgoECDSA, + Hash: crypto.SHA256, + } + msg := []byte("Hello World!") + + h, err := populateHash(sig.Hash, msg) + if err != nil { + t.Fatal(err) + } + if err := sig.Sign(h, priv, nil); err != nil { + t.Fatal(err) + } + + if h, err = populateHash(sig.Hash, msg); err != nil { + t.Fatal(err) + } + if err := priv.VerifySignature(h, sig); err != nil { + t.Fatal(err) + } +} + +func TestIssue11505(t *testing.T) { + // parsing a rsa private key with p or q == 1 used to panic due to a divide by zero + _, _ = Read(readerFromHex("9c3004303030300100000011303030000000000000010130303030303030303030303030303030303030303030303030303030303030303030303030303030303030")) +} + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" + +// Generated by `gpg --export-secret-keys` followed by a manual extraction of +// the ElGamal subkey from the packets. +const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" + +// pkcs1PrivKeyHex is a PKCS#1, RSA private key. +// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` +const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go new file mode 100644 index 0000000..fcd5f52 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -0,0 +1,753 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +var ( + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} +) + +const maxOIDLength = 8 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c elliptic.Curve + if bytes.Equal(f.oid, oidCurveP256) { + c = elliptic.P256() + } else if bytes.Equal(f.oid, oidCurveP384) { + c = elliptic.P384() + } else if bytes.Equal(f.oid, oidCurveP521) { + c = elliptic.P521() + } else { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func fromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: fromBig(pub.P), + q: fromBig(pub.Q), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: fromBig(pub.P), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + + switch pub.Curve { + case elliptic.P256(): + pk.ec.oid = oidCurveP256 + case elliptic.P384(): + pk.ec.oid = oidCurveP384 + case elliptic.P521(): + pk.ec.oid = oidCurveP521 + default: + panic("unknown elliptic curve") + } + + pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + + // The bit length is 3 (for the 0x04 specifying an uncompressed key) + // plus two field elements (for x and y), which are rounded up to the + // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 + fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 + pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + // The ECDH key is stored in an ecdsa.PublicKey for convenience. + pk.PublicKey, err = pk.ec.newECDSA() + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal: + bitLength = pk.p.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go new file mode 100644 index 0000000..103696e --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go @@ -0,0 +1,228 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/hex" + "math/big" + "testing" + "time" +) + +var pubKeyTests = []struct { + hexData string + hexFingerprint string + creationTime time.Time + pubKeyAlgo PublicKeyAlgorithm + keyId uint64 + keyIdString string + keyIdShort string +}{ + {rsaPkDataHex, rsaFingerprintHex, time.Unix(0x4d3c5c10, 0), PubKeyAlgoRSA, 0xa34d7e18c20c31bb, "A34D7E18C20C31BB", "C20C31BB"}, + {dsaPkDataHex, dsaFingerprintHex, time.Unix(0x4d432f89, 0), PubKeyAlgoDSA, 0x8e8fbe54062f19ed, "8E8FBE54062F19ED", "062F19ED"}, + {ecdsaPkDataHex, ecdsaFingerprintHex, time.Unix(0x5071c294, 0), PubKeyAlgoECDSA, 0x43fe956c542ca00b, "43FE956C542CA00B", "542CA00B"}, +} + +func TestPublicKeyRead(t *testing.T) { + for i, test := range pubKeyTests { + packet, err := Read(readerFromHex(test.hexData)) + if err != nil { + t.Errorf("#%d: Read error: %s", i, err) + continue + } + pk, ok := packet.(*PublicKey) + if !ok { + t.Errorf("#%d: failed to parse, got: %#v", i, packet) + continue + } + if pk.PubKeyAlgo != test.pubKeyAlgo { + t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo) + } + if !pk.CreationTime.Equal(test.creationTime) { + t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime) + } + expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint) + if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) { + t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint) + } + if pk.KeyId != test.keyId { + t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId) + } + if g, e := pk.KeyIdString(), test.keyIdString; g != e { + t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e) + } + if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e { + t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e) + } + } +} + +func TestPublicKeySerialize(t *testing.T) { + for i, test := range pubKeyTests { + packet, err := Read(readerFromHex(test.hexData)) + if err != nil { + t.Errorf("#%d: Read error: %s", i, err) + continue + } + pk, ok := packet.(*PublicKey) + if !ok { + t.Errorf("#%d: failed to parse, got: %#v", i, packet) + continue + } + serializeBuf := bytes.NewBuffer(nil) + err = pk.Serialize(serializeBuf) + if err != nil { + t.Errorf("#%d: failed to serialize: %s", i, err) + continue + } + + packet, err = Read(serializeBuf) + if err != nil { + t.Errorf("#%d: Read error (from serialized data): %s", i, err) + continue + } + pk, ok = packet.(*PublicKey) + if !ok { + t.Errorf("#%d: failed to parse serialized data, got: %#v", i, packet) + continue + } + } +} + +func TestEcc384Serialize(t *testing.T) { + r := readerFromHex(ecc384PubHex) + var w bytes.Buffer + for i := 0; i < 2; i++ { + // Public key + p, err := Read(r) + if err != nil { + t.Error(err) + } + pubkey := p.(*PublicKey) + if !bytes.Equal(pubkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) { + t.Errorf("Unexpected pubkey OID: %x", pubkey.ec.oid) + } + if !bytes.Equal(pubkey.ec.p.bytes[:5], []byte{0x04, 0xf6, 0xb8, 0xc5, 0xac}) { + t.Errorf("Unexpected pubkey P[:5]: %x", pubkey.ec.p.bytes) + } + if pubkey.KeyId != 0x098033880F54719F { + t.Errorf("Unexpected pubkey ID: %x", pubkey.KeyId) + } + err = pubkey.Serialize(&w) + if err != nil { + t.Error(err) + } + // User ID + p, err = Read(r) + if err != nil { + t.Error(err) + } + uid := p.(*UserId) + if uid.Id != "ec_dsa_dh_384 " { + t.Error("Unexpected UID:", uid.Id) + } + err = uid.Serialize(&w) + if err != nil { + t.Error(err) + } + // User ID Sig + p, err = Read(r) + if err != nil { + t.Error(err) + } + uidSig := p.(*Signature) + err = pubkey.VerifyUserIdSignature(uid.Id, pubkey, uidSig) + if err != nil { + t.Error(err, ": UID") + } + err = uidSig.Serialize(&w) + if err != nil { + t.Error(err) + } + // Subkey + p, err = Read(r) + if err != nil { + t.Error(err) + } + subkey := p.(*PublicKey) + if !bytes.Equal(subkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) { + t.Errorf("Unexpected subkey OID: %x", subkey.ec.oid) + } + if !bytes.Equal(subkey.ec.p.bytes[:5], []byte{0x04, 0x2f, 0xaa, 0x84, 0x02}) { + t.Errorf("Unexpected subkey P[:5]: %x", subkey.ec.p.bytes) + } + if subkey.ecdh.KdfHash != 0x09 { + t.Error("Expected KDF hash function SHA384 (0x09), got", subkey.ecdh.KdfHash) + } + if subkey.ecdh.KdfAlgo != 0x09 { + t.Error("Expected KDF symmetric alg AES256 (0x09), got", subkey.ecdh.KdfAlgo) + } + if subkey.KeyId != 0xAA8B938F9A201946 { + t.Errorf("Unexpected subkey ID: %x", subkey.KeyId) + } + err = subkey.Serialize(&w) + if err != nil { + t.Error(err) + } + // Subkey Sig + p, err = Read(r) + if err != nil { + t.Error(err) + } + subkeySig := p.(*Signature) + err = pubkey.VerifyKeySignature(subkey, subkeySig) + if err != nil { + t.Error(err) + } + err = subkeySig.Serialize(&w) + if err != nil { + t.Error(err) + } + // Now read back what we've written again + r = bytes.NewBuffer(w.Bytes()) + w.Reset() + } +} + +func TestP256KeyID(t *testing.T) { + // Confirm that key IDs are correctly calculated for ECC keys. + ecdsaPub := &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: fromHex("81fbbc20eea9e8d1c3ceabb0a8185925b113d1ac42cd5c78403bd83da19235c6"), + Y: fromHex("5ed6db13d91db34507d0129bf88981878d29adbf8fcd1720afdb767bb3fcaaff"), + } + pub := NewECDSAPublicKey(time.Unix(1297309478, 0), ecdsaPub) + + const want = uint64(0xd01055fbcadd268e) + if pub.KeyId != want { + t.Errorf("want key ID: %x, got %x", want, pub.KeyId) + } +} + +func fromHex(hex string) *big.Int { + n, ok := new(big.Int).SetString(hex, 16) + if !ok { + panic("bad hex number: " + hex) + } + return n +} + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" + +const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" + +const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" + +// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key +const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 0000000..5daf7b6 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,279 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "crypto/rsa" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go new file mode 100644 index 0000000..e064059 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go @@ -0,0 +1,82 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "testing" + "time" +) + +var pubKeyV3Test = struct { + hexFingerprint string + creationTime time.Time + pubKeyAlgo PublicKeyAlgorithm + keyId uint64 + keyIdString string + keyIdShort string +}{ + "103BECF5BD1E837C89D19E98487767F7", + time.Unix(779753634, 0), + PubKeyAlgoRSA, + 0xDE0F188A5DA5E3C9, + "DE0F188A5DA5E3C9", + "5DA5E3C9"} + +func TestPublicKeyV3Read(t *testing.T) { + i, test := 0, pubKeyV3Test + packet, err := Read(v3KeyReader(t)) + if err != nil { + t.Fatalf("#%d: Read error: %s", i, err) + } + pk, ok := packet.(*PublicKeyV3) + if !ok { + t.Fatalf("#%d: failed to parse, got: %#v", i, packet) + } + if pk.PubKeyAlgo != test.pubKeyAlgo { + t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo) + } + if !pk.CreationTime.Equal(test.creationTime) { + t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime) + } + expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint) + if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) { + t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint) + } + if pk.KeyId != test.keyId { + t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId) + } + if g, e := pk.KeyIdString(), test.keyIdString; g != e { + t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e) + } + if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e { + t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e) + } +} + +func TestPublicKeyV3Serialize(t *testing.T) { + //for i, test := range pubKeyV3Tests { + i := 0 + packet, err := Read(v3KeyReader(t)) + if err != nil { + t.Fatalf("#%d: Read error: %s", i, err) + } + pk, ok := packet.(*PublicKeyV3) + if !ok { + t.Fatalf("#%d: failed to parse, got: %#v", i, packet) + } + var serializeBuf bytes.Buffer + if err = pk.Serialize(&serializeBuf); err != nil { + t.Fatalf("#%d: failed to serialize: %s", i, err) + } + + if packet, err = Read(bytes.NewBuffer(serializeBuf.Bytes())); err != nil { + t.Fatalf("#%d: Read error (from serialized data): %s", i, err) + } + if pk, ok = packet.(*PublicKeyV3); !ok { + t.Fatalf("#%d: failed to parse serialized data, got: %#v", i, packet) + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go new file mode 100644 index 0000000..34bc7c6 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go new file mode 100644 index 0000000..6ce0cbe --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature.go @@ -0,0 +1,731 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + IssuerKeyId *uint64 + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = fromBig(r) + sig.ECDSASigS = fromBig(s) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_test.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_test.go new file mode 100644 index 0000000..56e7611 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature_test.go @@ -0,0 +1,78 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "encoding/hex" + "testing" +) + +func TestSignatureRead(t *testing.T) { + packet, err := Read(readerFromHex(signatureDataHex)) + if err != nil { + t.Error(err) + return + } + sig, ok := packet.(*Signature) + if !ok || sig.SigType != SigTypeBinary || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.SHA1 { + t.Errorf("failed to parse, got: %#v", packet) + } +} + +func TestSignatureReserialize(t *testing.T) { + packet, _ := Read(readerFromHex(signatureDataHex)) + sig := packet.(*Signature) + out := new(bytes.Buffer) + err := sig.Serialize(out) + if err != nil { + t.Errorf("error reserializing: %s", err) + return + } + + expected, _ := hex.DecodeString(signatureDataHex) + if !bytes.Equal(expected, out.Bytes()) { + t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected)) + } +} + +func TestSignUserId(t *testing.T) { + sig := &Signature{ + SigType: SigTypeGenericCert, + PubKeyAlgo: PubKeyAlgoRSA, + Hash: 0, // invalid hash function + } + + packet, err := Read(readerFromHex(rsaPkDataHex)) + if err != nil { + t.Fatalf("failed to deserialize public key: %v", err) + } + pubKey := packet.(*PublicKey) + + packet, err = Read(readerFromHex(privKeyRSAHex)) + if err != nil { + t.Fatalf("failed to deserialize private key: %v", err) + } + privKey := packet.(*PrivateKey) + + err = sig.SignUserId("", pubKey, privKey, nil) + if err == nil { + t.Errorf("did not receive an error when expected") + } + + sig.Hash = crypto.SHA256 + err = privKey.Decrypt([]byte("testing")) + if err != nil { + t.Fatalf("failed to decrypt private key: %v", err) + } + + err = sig.SignUserId("", pubKey, privKey, nil) + if err != nil { + t.Errorf("failed to sign user id: %v", err) + } +} + +const signatureDataHex = "c2c05c04000102000605024cb45112000a0910ab105c91af38fb158f8d07ff5596ea368c5efe015bed6e78348c0f033c931d5f2ce5db54ce7f2a7e4b4ad64db758d65a7a71773edeab7ba2a9e0908e6a94a1175edd86c1d843279f045b021a6971a72702fcbd650efc393c5474d5b59a15f96d2eaad4c4c426797e0dcca2803ef41c6ff234d403eec38f31d610c344c06f2401c262f0993b2e66cad8a81ebc4322c723e0d4ba09fe917e8777658307ad8329adacba821420741009dfe87f007759f0982275d028a392c6ed983a0d846f890b36148c7358bdb8a516007fac760261ecd06076813831a36d0459075d1befa245ae7f7fb103d92ca759e9498fe60ef8078a39a3beda510deea251ea9f0a7f0df6ef42060f20780360686f3e400e" diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go new file mode 100644 index 0000000..6edff88 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3_test.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3_test.go new file mode 100644 index 0000000..ad7b62a --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3_test.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "encoding/hex" + "io" + "io/ioutil" + "testing" + + "golang.org/x/crypto/openpgp/armor" +) + +func TestSignatureV3Read(t *testing.T) { + r := v3KeyReader(t) + Read(r) // Skip public key + Read(r) // Skip uid + packet, err := Read(r) // Signature + if err != nil { + t.Error(err) + return + } + sig, ok := packet.(*SignatureV3) + if !ok || sig.SigType != SigTypeGenericCert || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.MD5 { + t.Errorf("failed to parse, got: %#v", packet) + } +} + +func TestSignatureV3Reserialize(t *testing.T) { + r := v3KeyReader(t) + Read(r) // Skip public key + Read(r) // Skip uid + packet, err := Read(r) + if err != nil { + t.Error(err) + return + } + sig := packet.(*SignatureV3) + out := new(bytes.Buffer) + if err = sig.Serialize(out); err != nil { + t.Errorf("error reserializing: %s", err) + return + } + expected, err := ioutil.ReadAll(v3KeyReader(t)) + if err != nil { + t.Error(err) + return + } + expected = expected[4+141+4+39:] // See pgpdump offsets below, this is where the sig starts + if !bytes.Equal(expected, out.Bytes()) { + t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected)) + } +} + +func v3KeyReader(t *testing.T) io.Reader { + armorBlock, err := armor.Decode(bytes.NewBufferString(keySigV3Armor)) + if err != nil { + t.Fatalf("armor Decode failed: %v", err) + } + return armorBlock.Body +} + +// keySigV3Armor is some V3 public key I found in an SKS dump. +// Old: Public Key Packet(tag 6)(141 bytes) +// Ver 4 - new +// Public key creation time - Fri Sep 16 17:13:54 CDT 1994 +// Pub alg - unknown(pub 0) +// Unknown public key(pub 0) +// Old: User ID Packet(tag 13)(39 bytes) +// User ID - Armin M. Warda +// Old: Signature Packet(tag 2)(149 bytes) +// Ver 4 - new +// Sig type - unknown(05) +// Pub alg - ElGamal Encrypt-Only(pub 16) +// Hash alg - unknown(hash 46) +// Hashed Sub: unknown(sub 81, critical)(1988 bytes) +const keySigV3Armor = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: SKS 1.0.10 + +mI0CLnoYogAAAQQA1qwA2SuJwfQ5bCQ6u5t20ulnOtY0gykf7YjiK4LiVeRBwHjGq7v30tGV +5Qti7qqRW4Ww7CDCJc4sZMFnystucR2vLkXaSoNWoFm4Fg47NiisDdhDezHwbVPW6OpCFNSi +ZAamtj4QAUBu8j4LswafrJqZqR9336/V3g8Yil2l48kABRG0J0FybWluIE0uIFdhcmRhIDx3 +YXJkYUBuZXBoaWxpbS5ydWhyLmRlPoiVAgUQLok2xwXR6zmeWEiZAQE/DgP/WgxPQh40/Po4 +gSkWZCDAjNdph7zexvAb0CcUWahcwiBIgg3U5ErCx9I5CNVA9U+s8bNrDZwgSIeBzp3KhWUx +524uhGgm6ZUTOAIKA6CbV6pfqoLpJnRYvXYQU5mIWsNa99wcu2qu18OeEDnztb7aLA6Ra9OF +YFCbq4EjXRoOrYM= +=LPjs +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 0000000..744c2d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go new file mode 100644 index 0000000..e1d52c1 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go @@ -0,0 +1,117 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "io" + "io/ioutil" + "testing" +) + +func TestSymmetricKeyEncrypted(t *testing.T) { + buf := readerFromHex(symmetricallyEncryptedHex) + packet, err := Read(buf) + if err != nil { + t.Errorf("failed to read SymmetricKeyEncrypted: %s", err) + return + } + ske, ok := packet.(*SymmetricKeyEncrypted) + if !ok { + t.Error("didn't find SymmetricKeyEncrypted packet") + return + } + key, cipherFunc, err := ske.Decrypt([]byte("password")) + if err != nil { + t.Error(err) + return + } + + packet, err = Read(buf) + if err != nil { + t.Errorf("failed to read SymmetricallyEncrypted: %s", err) + return + } + se, ok := packet.(*SymmetricallyEncrypted) + if !ok { + t.Error("didn't find SymmetricallyEncrypted packet") + return + } + r, err := se.Decrypt(cipherFunc, key) + if err != nil { + t.Error(err) + return + } + + contents, err := ioutil.ReadAll(r) + if err != nil && err != io.EOF { + t.Error(err) + return + } + + expectedContents, _ := hex.DecodeString(symmetricallyEncryptedContentsHex) + if !bytes.Equal(expectedContents, contents) { + t.Errorf("bad contents got:%x want:%x", contents, expectedContents) + } +} + +const symmetricallyEncryptedHex = "8c0d04030302371a0b38d884f02060c91cf97c9973b8e58e028e9501708ccfe618fb92afef7fa2d80ddadd93cf" +const symmetricallyEncryptedContentsHex = "cb1062004d14c4df636f6e74656e74732e0a" + +func TestSerializeSymmetricKeyEncryptedCiphers(t *testing.T) { + tests := [...]struct { + cipherFunc CipherFunction + name string + }{ + {Cipher3DES, "Cipher3DES"}, + {CipherCAST5, "CipherCAST5"}, + {CipherAES128, "CipherAES128"}, + {CipherAES192, "CipherAES192"}, + {CipherAES256, "CipherAES256"}, + } + + for _, test := range tests { + var buf bytes.Buffer + passphrase := []byte("testing") + config := &Config{ + DefaultCipher: test.cipherFunc, + } + + key, err := SerializeSymmetricKeyEncrypted(&buf, passphrase, config) + if err != nil { + t.Errorf("cipher(%s) failed to serialize: %s", test.name, err) + continue + } + + p, err := Read(&buf) + if err != nil { + t.Errorf("cipher(%s) failed to reparse: %s", test.name, err) + continue + } + + ske, ok := p.(*SymmetricKeyEncrypted) + if !ok { + t.Errorf("cipher(%s) parsed a different packet type: %#v", test.name, p) + continue + } + + if ske.CipherFunc != config.DefaultCipher { + t.Errorf("cipher(%s) SKE cipher function is %d (expected %d)", test.name, ske.CipherFunc, config.DefaultCipher) + } + parsedKey, parsedCipherFunc, err := ske.Decrypt(passphrase) + if err != nil { + t.Errorf("cipher(%s) failed to decrypt reparsed SKE: %s", test.name, err) + continue + } + if !bytes.Equal(key, parsedKey) { + t.Errorf("cipher(%s) keys don't match after Decrypt: %x (original) vs %x (parsed)", test.name, key, parsedKey) + } + if parsedCipherFunc != test.cipherFunc { + t.Errorf("cipher(%s) cipher function doesn't match after Decrypt: %d (original) vs %d (parsed)", + test.name, test.cipherFunc, parsedCipherFunc) + } + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 0000000..6126030 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "golang.org/x/crypto/openpgp/errors" + "hash" + "io" + "strconv" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go new file mode 100644 index 0000000..c5c00f7 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "golang.org/x/crypto/openpgp/errors" + "io" + "io/ioutil" + "testing" +) + +// TestReader wraps a []byte and returns reads of a specific length. +type testReader struct { + data []byte + stride int +} + +func (t *testReader) Read(buf []byte) (n int, err error) { + n = t.stride + if n > len(t.data) { + n = len(t.data) + } + if n > len(buf) { + n = len(buf) + } + copy(buf, t.data) + t.data = t.data[n:] + if len(t.data) == 0 { + err = io.EOF + } + return +} + +func testMDCReader(t *testing.T) { + mdcPlaintext, _ := hex.DecodeString(mdcPlaintextHex) + + for stride := 1; stride < len(mdcPlaintext)/2; stride++ { + r := &testReader{data: mdcPlaintext, stride: stride} + mdcReader := &seMDCReader{in: r, h: sha1.New()} + body, err := ioutil.ReadAll(mdcReader) + if err != nil { + t.Errorf("stride: %d, error: %s", stride, err) + continue + } + if !bytes.Equal(body, mdcPlaintext[:len(mdcPlaintext)-22]) { + t.Errorf("stride: %d: bad contents %x", stride, body) + continue + } + + err = mdcReader.Close() + if err != nil { + t.Errorf("stride: %d, error on Close: %s", stride, err) + } + } + + mdcPlaintext[15] ^= 80 + + r := &testReader{data: mdcPlaintext, stride: 2} + mdcReader := &seMDCReader{in: r, h: sha1.New()} + _, err := ioutil.ReadAll(mdcReader) + if err != nil { + t.Errorf("corruption test, error: %s", err) + return + } + err = mdcReader.Close() + if err == nil { + t.Error("corruption: no error") + } else if _, ok := err.(*errors.SignatureError); !ok { + t.Errorf("corruption: expected SignatureError, got: %s", err) + } +} + +const mdcPlaintextHex = "a302789c3b2d93c4e0eb9aba22283539b3203335af44a134afb800c849cb4c4de10200aff40b45d31432c80cb384299a0655966d6939dfdeed1dddf980" + +func TestSerialize(t *testing.T) { + buf := bytes.NewBuffer(nil) + c := CipherAES128 + key := make([]byte, c.KeySize()) + + w, err := SerializeSymmetricallyEncrypted(buf, c, key, nil) + if err != nil { + t.Errorf("error from SerializeSymmetricallyEncrypted: %s", err) + return + } + + contents := []byte("hello world\n") + + w.Write(contents) + w.Close() + + p, err := Read(buf) + if err != nil { + t.Errorf("error from Read: %s", err) + return + } + + se, ok := p.(*SymmetricallyEncrypted) + if !ok { + t.Errorf("didn't read a *SymmetricallyEncrypted") + return + } + + r, err := se.Decrypt(c, key) + if err != nil { + t.Errorf("error from Decrypt: %s", err) + return + } + + contentsCopy := bytes.NewBuffer(nil) + _, err = io.Copy(contentsCopy, r) + if err != nil { + t.Errorf("error from io.Copy: %s", err) + return + } + if !bytes.Equal(contentsCopy.Bytes(), contents) { + t.Errorf("contents not equal got: %x want: %x", contentsCopy.Bytes(), contents) + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go new file mode 100644 index 0000000..96a2b38 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// the user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute_test.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute_test.go new file mode 100644 index 0000000..13ca514 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute_test.go @@ -0,0 +1,109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/base64" + "image/color" + "image/jpeg" + "testing" +) + +func TestParseUserAttribute(t *testing.T) { + r := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(userAttributePacket)) + for i := 0; i < 2; i++ { + p, err := Read(r) + if err != nil { + t.Fatal(err) + } + uat := p.(*UserAttribute) + imgs := uat.ImageData() + if len(imgs) != 1 { + t.Errorf("Unexpected number of images in user attribute packet: %d", len(imgs)) + } + if len(imgs[0]) != 3395 { + t.Errorf("Unexpected JPEG image size: %d", len(imgs[0])) + } + img, err := jpeg.Decode(bytes.NewBuffer(imgs[0])) + if err != nil { + t.Errorf("Error decoding JPEG image: %v", err) + } + // A pixel in my right eye. + pixel := color.NRGBAModel.Convert(img.At(56, 36)) + ref := color.NRGBA{R: 157, G: 128, B: 124, A: 255} + if pixel != ref { + t.Errorf("Unexpected pixel color: %v", pixel) + } + w := bytes.NewBuffer(nil) + err = uat.Serialize(w) + if err != nil { + t.Errorf("Error writing user attribute: %v", err) + } + r = bytes.NewBuffer(w.Bytes()) + } +} + +const userAttributePacket = ` +0cyWzJQBEAABAQAAAAAAAAAAAAAAAP/Y/+AAEEpGSUYAAQIAAAEAAQAA/9sAQwAFAwQEBAMFBAQE +BQUFBgcMCAcHBwcPCgsJDBEPEhIRDxEQExYcFxMUGhUQERghGBocHR8fHxMXIiQiHiQcHh8e/9sA +QwEFBQUHBgcOCAgOHhQRFB4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4e +Hh4eHh4eHh4e/8AAEQgAZABkAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYH +CAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHw +JDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6 +g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk +5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIB +AgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEX +GBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKT +lJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX2 +9/j5+v/aAAwDAQACEQMRAD8A5uGP06VehQ4pIox04q5EnHSvAep+hIIl4zVuMHGPWmRrUWtalaaN +pU2oXsgSGJSxPr6ClvoitErs0Itqjc7BQOpPAFYmrfEnwjojtHNqaXEynBjtx5hH4jj9a8B8d+Od +W8UXZjWR4LJT+7t0Jwfc+prnIdO1CWZEW2mZ3HyDactXXDB3V5s8evm1namj6r0H4weCLtxG+ova +ueP30RA/MV6not1bX0Ed1ZzxzwyDKvGwZSPqK+Ff+ES8R8t/ZV2oHUmM10Hgbxp4m8BatEfNnWBH +/eWshOxx9Kmpg4te49RUM1kn+8Wh9zQ4P1FaMC7l465rjPh14y0fxnoseoaXOpfaPOgJ+eI98j09 +67W19M15bi4uzPSqTU480WXkjZkAyAR61DPE6OCSOalWRRgZxjvTb598sfU4FBwx5uY4T4feIm8P +TeJbAgc65NIM+8cX+FFeLfF3Vr3SfiNrMFrMypJMJcDPUqP8KK+kpVFyLU+ar037SXqX4hxVpMY7 +1UhPpVlT2rybKx9smWYz3NeH/EDVLzxt40j8O6bITaQybPlbKkjq39K9O8fasdH8IahfKxWQRFIy +Ou9uB/OuE/Z/0y3j1d9TuyoZCMs5xjuea1pLli5nn46q240l13PcfhN8EvDNtpcEl/CklyVBLuMk +mvU/Dfwo0BL/AO13FjEDD/qyV7Vn+CvGPg8zRpJrVm8ikLtEg6+1ew2dxZ3EQaJgysuQPasH7eXW +1zzsbVhT92kk/PsYieEND+zlPs6c/wCyAPyryH4wfCPRtW0u6j+xRLOxLxSoADkDpXY+MPjJ4c0S +9k082d3O8ZKkxw5XI96ytK+IGk+IpFjRpod+Qq3C7QT6A1E6NenaXbqRg6rlLlqS0fRnxjpd1r/w +w8afa7GWRPKbZLGeBKmeVNfZngLxNaeKfDdprVjxHcLlkJ5Vh1H5185/tDad9h8XOsqAw3Cb0cjq +CfX61P8AsveKf7L8T3fhe5nxa3g324YniQdh9R/KuivTdSmp9TXB1/Z1nRlsfU249QBx1pWfcwI7 +Cq6u2Ovamb9rYz16V5x7Psz5q/aJhZfibcupIElvE3H+7j+lFbXx9szP45jlUfeso8/99OKK9elL +3EeNVopzZVharCtxVRGGMk02S5JyFOB69zWTieypnL/GksfB+0cr9oQt69awPhPpD69Y3Ky3DWth +CWluGU4LAdq3vibGs/g68BJygVxjrwRW5+ztoRv/AAs8EeCZnO/J/hzz/Kumi4wp3kePjlOdZKPY +ml8Mvo6WM9ppi7J0EkQYMzkb1X0wW+bJHGACa+ivg14huZPCkjXUO6SImIYOQAP6UQ2sGneHmiWF +CYoSAAuM8etXfhBpMr+EZ3SSNRcMx6ZxWdes6ytBGSwkMNFuo7pnP614Ut9Zn1C4uLySKcwObGFA +Qnm4+XcR71h+CfDHiKCQWuv2YWFtw+bBZQD8rcE8n2Ney+GbGGQSM6I7xvtI681rXdp8hKRRp6t3 +FYPE1VDlsY1nQjWdl+J8w/tOeDZZ/AMd/EGefTHyxxyYjwfyODXg3waRh8UtEcFh+8Jb8FNfZPxh +Ak8J6nbPIsiyW7LnseK+Ofh99ptPHFnf2lu0y2twGcKuSEPB/Q1WHk50miq1o14TXU+xop+On61H +NMC6Nis1LgsAcUTSt1APFcXJZn0EqmhyvxA037friTYziBV6f7Tf40Vr3k4aXLx5OMZIzRXZB2ik +efJXbPHJJcnaD9aN2R1qoGO8/WkuLlIV+YjdjpXSonQ5lTxfiTwzqCnkeQxx9BWx+zPrQsrBFYja +zEfrXL6lfie3khcjY6lSPUGud+G3iA6FrY0uQ/KJsA9gCa0jSvFpnBi6tpKSPu++nsIfDFxeXciR +qIicscY4rxTwB8RUkn1axsPEf2LTYx85kTGzqCUP8VcJ47+JOs+I0Hhq1njjt/ufIeSvq1VtE+Gs +eoaUbSHUrkHdu3WtuX5Ix81XRh7OL5jirVpV5Whdn0F8C/iX4auVn0i612T7bASoe8wjTAd89K9g +vtSt5NMa4t5lkRhgOh3Dn6V8aaz8KZrIR3OlQ6r56LySmSxxz06Vo/CHx34h0rxBP4XvJ5AjK2RP +nEbAEj6ZxjPrWM6fMmoswqJxqJ1VZnqHxn1NLPwveqWHmNC2BnnNcD8DfDkGi+CH1m+ijN1qMzNA +4GSIiAMf+hVxPxU8Tapc3c0F9MGCn5GU5BX0Pau3+HmrT3XgXSIJCBHDGdgAx1NYSpezha52Yauq +1dya2Wh2onAIwTj1p0lxxWWLkhRyCKWa5O3ORXOos9KVQluZm83j0oqi84JyWH50Vdmc7ep43d3I +t1Z2Iz2FYdxeSTsxyRnvTdVuDNcNluM9KrKcg817NOnZGNbEXdkNckjrXGeIIprPxFFdRHAlIwem +COtdmxrG8Q2cd/ZNExw45RvQ1bVjim+dWNzw7eaTD4mN3dndCQCo6hmI5zXpj/Ea/wBHjkh0kwRW +xXEfl4yTxXzXZalJDL9nuWKMmRnHcV2Hh3WreCyYXW2SWQhd5P3F6n+lS43d2cTm6d7Ox9EWPxH1 +ODQxPqWpCaSU/ukUc4z3/WvKW8UhviAdaMewYZG98gj9c1ymoa8LyWOJHwkTDaVPb0qpr+q2m6Nb +cfvNo349az9mou9iZVXNWbub3jm98/Vza2ReV7lsJg/e3dsV654UR9N0K0sZP9ZDGFbHr3rzL4P+ +H7rXfEEWr3I3W1qf3IYdW9fwqDxf4k8UeH/G95p08kscHmk25dPlZT0we9YTj7SXKjpw1aNG8mj3 +FLv5ccU959ycnmvKPDnxB82YQarGsZPAlTp+IrvIr1ZIgySKwIyCOhFYTpyg9T0qWIhVV4svzPvf +IdhgY4orPachj81FRdmtzxqdiZmJ9aQEgdqZcPtmbJ71DJcAZ5r20kkeXJtsfPIQDwPzrG1a+S3i +LyHAHvmp7y7HOD1rlNdm+1T7Acovf3o+J2RMpezjzMvrob67pX9o2ShZlYgg/wAWKxZLLWLZ/Ke3 +mVh14yK9M+BMC3dre2ko3LHKCB7EV7EngeGQJdQ7HyBkMKS0djgq1W3c+XtK03U522RwzsTwNiEk +ntXoHgf4calql9El/G8UZbLfLyfr7V9FeGvh+s+0Lbxxcglu2K1NW1nwN4Gk/wBLuI57tV5jjwzE +/QVNS+0dWYRqNvXRFv4eeCodKsY1ggVIY1G3K4z714h+1Jqul3GpwaXYeXJLbzgyyrg4b+6D+HNb +vjz436zq9m+naHF/ZdkeGfOZXH17V4Vqt2b29K+ZuOc5bnce5zWdPBShL2lTfojSeJhy+zp/NjVz +1Bwa6DSfFGq6fbJFDKrov8DjPFcu97ZxsUe4jVhwVJ5Bpp1mwQiLewJPXacVq6fNpYyjOUXdHoKf +EG8VQHsInbuVcgflRXnt5fIs2FYHgcgUVi8LG+xusdW/mN7U2KgEVkTzPt60UVfQ9eHxGHrV1MGi +iD4V25x1qvdgLAMd6KK0pbHm4x++dp8FtUubLxJ5EIjMc+A4Za+qfD8pe1JZVOBmiinW3RyRPMfi +R8QPE638+k2l6LK0Hylbddhb6nOa80mlkcmWR2kcnlnOSaKK7qCXKcNdu5narcSrAoBxvODWJIga +VckjDdqKKwq/EaQ0gUdbjQ6mr7QGBUcd6tPBC6gtGpOOuKKKie5qn7qIpEXd0HSiiimSf//Z` diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go new file mode 100644 index 0000000..d6bea7d --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid_test.go b/vendor/golang.org/x/crypto/openpgp/packet/userid_test.go new file mode 100644 index 0000000..2968193 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userid_test.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "testing" +) + +var userIdTests = []struct { + id string + name, comment, email string +}{ + {"", "", "", ""}, + {"John Smith", "John Smith", "", ""}, + {"John Smith ()", "John Smith", "", ""}, + {"John Smith () <>", "John Smith", "", ""}, + {"(comment", "", "comment", ""}, + {"(comment)", "", "comment", ""}, + {" sdfk", "", "", "email"}, + {" John Smith ( Comment ) asdkflj < email > lksdfj", "John Smith", "Comment", "email"}, + {" John Smith < email > lksdfj", "John Smith", "", "email"}, + {"("}, + {"foo", "bar", "", "foo (bar)"}, + {"foo", "", "baz", "foo "}, + {"", "bar", "baz", "(bar) "}, + {"foo", "bar", "baz", "foo (bar) "}, +} + +func TestNewUserId(t *testing.T) { + for i, test := range newUserIdTests { + uid := NewUserId(test.name, test.comment, test.email) + if uid == nil { + t.Errorf("#%d: returned nil", i) + continue + } + if uid.Id != test.id { + t.Errorf("#%d: got '%s', want '%s'", i, uid.Id, test.id) + } + } +} + +var invalidNewUserIdTests = []struct { + name, comment, email string +}{ + {"foo(", "", ""}, + {"foo<", "", ""}, + {"", "bar)", ""}, + {"", "bar<", ""}, + {"", "", "baz>"}, + {"", "", "baz)"}, + {"", "", "baz\x00"}, +} + +func TestNewUserIdWithInvalidInput(t *testing.T) { + for i, test := range invalidNewUserIdTests { + if uid := NewUserId(test.name, test.comment, test.email); uid != nil { + t.Errorf("#%d: returned non-nil value: %#v", i, uid) + } + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go new file mode 100644 index 0000000..6ec664f --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -0,0 +1,442 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "golang.org/x/crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if !p.IsLast { + return nil, errors.UnsupportedError("nested signatures") + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body) +} diff --git a/vendor/golang.org/x/crypto/openpgp/read_test.go b/vendor/golang.org/x/crypto/openpgp/read_test.go new file mode 100644 index 0000000..1fbfbac --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/read_test.go @@ -0,0 +1,613 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "bytes" + _ "crypto/sha512" + "encoding/hex" + "io" + "io/ioutil" + "strings" + "testing" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" +) + +func readerFromHex(s string) io.Reader { + data, err := hex.DecodeString(s) + if err != nil { + panic("readerFromHex: bad input") + } + return bytes.NewBuffer(data) +} + +func TestReadKeyRing(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + if err != nil { + t.Error(err) + return + } + if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B { + t.Errorf("bad keyring: %#v", kring) + } +} + +func TestRereadKeyRing(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + if err != nil { + t.Errorf("error in initial parse: %s", err) + return + } + out := new(bytes.Buffer) + err = kring[0].Serialize(out) + if err != nil { + t.Errorf("error in serialization: %s", err) + return + } + kring, err = ReadKeyRing(out) + if err != nil { + t.Errorf("error in second parse: %s", err) + return + } + + if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB { + t.Errorf("bad keyring: %#v", kring) + } +} + +func TestReadPrivateKeyRing(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) + if err != nil { + t.Error(err) + return + } + if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B || kring[0].PrimaryKey == nil { + t.Errorf("bad keyring: %#v", kring) + } +} + +func TestReadDSAKey(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(dsaTestKeyHex)) + if err != nil { + t.Error(err) + return + } + if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0x0CCC0360 { + t.Errorf("bad parse: %#v", kring) + } +} + +func TestReadP256Key(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(p256TestKeyHex)) + if err != nil { + t.Error(err) + return + } + if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0x5918513E { + t.Errorf("bad parse: %#v", kring) + } +} + +func TestDSAHashTruncatation(t *testing.T) { + // dsaKeyWithSHA512 was generated with GnuPG and --cert-digest-algo + // SHA512 in order to require DSA hash truncation to verify correctly. + _, err := ReadKeyRing(readerFromHex(dsaKeyWithSHA512)) + if err != nil { + t.Error(err) + } +} + +func TestGetKeyById(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + + keys := kring.KeysById(0xa34d7e18c20c31bb) + if len(keys) != 1 || keys[0].Entity != kring[0] { + t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys) + } + + keys = kring.KeysById(0xfd94408d4543314f) + if len(keys) != 1 || keys[0].Entity != kring[0] { + t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys) + } +} + +func checkSignedMessage(t *testing.T, signedHex, expected string) { + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + + md, err := ReadMessage(readerFromHex(signedHex), kring, nil, nil) + if err != nil { + t.Error(err) + return + } + + if !md.IsSigned || md.SignedByKeyId != 0xa34d7e18c20c31bb || md.SignedBy == nil || md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) != 0 || md.IsSymmetricallyEncrypted { + t.Errorf("bad MessageDetails: %#v", md) + } + + contents, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + t.Errorf("error reading UnverifiedBody: %s", err) + } + if string(contents) != expected { + t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected) + } + if md.SignatureError != nil || md.Signature == nil { + t.Errorf("failed to validate: %s", md.SignatureError) + } +} + +func TestSignedMessage(t *testing.T) { + checkSignedMessage(t, signedMessageHex, signedInput) +} + +func TestTextSignedMessage(t *testing.T) { + checkSignedMessage(t, signedTextMessageHex, signedTextInput) +} + +// The reader should detect "compressed quines", which are compressed +// packets that expand into themselves and cause an infinite recursive +// parsing loop. +// The packet in this test case comes from Taylor R. Campbell at +// http://mumble.net/~campbell/misc/pgp-quine/ +func TestCampbellQuine(t *testing.T) { + md, err := ReadMessage(readerFromHex(campbellQuine), nil, nil, nil) + if md != nil { + t.Errorf("Reading a compressed quine should not return any data: %#v", md) + } + structural, ok := err.(errors.StructuralError) + if !ok { + t.Fatalf("Unexpected class of error: %T", err) + } + if !strings.Contains(string(structural), "too many layers of packets") { + t.Fatalf("Unexpected error: %s", err) + } +} + +var signedEncryptedMessageTests = []struct { + keyRingHex string + messageHex string + signedByKeyId uint64 + encryptedToKeyId uint64 +}{ + { + testKeys1And2PrivateHex, + signedEncryptedMessageHex, + 0xa34d7e18c20c31bb, + 0x2a67d68660df41c7, + }, + { + dsaElGamalTestKeysHex, + signedEncryptedMessage2Hex, + 0x33af447ccd759b09, + 0xcf6a7abcd43e3673, + }, +} + +func TestSignedEncryptedMessage(t *testing.T) { + for i, test := range signedEncryptedMessageTests { + expected := "Signed and encrypted message\n" + kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex)) + prompt := func(keys []Key, symmetric bool) ([]byte, error) { + if symmetric { + t.Errorf("prompt: message was marked as symmetrically encrypted") + return nil, errors.ErrKeyIncorrect + } + + if len(keys) == 0 { + t.Error("prompt: no keys requested") + return nil, errors.ErrKeyIncorrect + } + + err := keys[0].PrivateKey.Decrypt([]byte("passphrase")) + if err != nil { + t.Errorf("prompt: error decrypting key: %s", err) + return nil, errors.ErrKeyIncorrect + } + + return nil, nil + } + + md, err := ReadMessage(readerFromHex(test.messageHex), kring, prompt, nil) + if err != nil { + t.Errorf("#%d: error reading message: %s", i, err) + return + } + + if !md.IsSigned || md.SignedByKeyId != test.signedByKeyId || md.SignedBy == nil || !md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) == 0 || md.EncryptedToKeyIds[0] != test.encryptedToKeyId { + t.Errorf("#%d: bad MessageDetails: %#v", i, md) + } + + contents, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + t.Errorf("#%d: error reading UnverifiedBody: %s", i, err) + } + if string(contents) != expected { + t.Errorf("#%d: bad UnverifiedBody got:%s want:%s", i, string(contents), expected) + } + + if md.SignatureError != nil || md.Signature == nil { + t.Errorf("#%d: failed to validate: %s", i, md.SignatureError) + } + } +} + +func TestUnspecifiedRecipient(t *testing.T) { + expected := "Recipient unspecified\n" + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) + + md, err := ReadMessage(readerFromHex(recipientUnspecifiedHex), kring, nil, nil) + if err != nil { + t.Errorf("error reading message: %s", err) + return + } + + contents, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + t.Errorf("error reading UnverifiedBody: %s", err) + } + if string(contents) != expected { + t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected) + } +} + +func TestSymmetricallyEncrypted(t *testing.T) { + firstTimeCalled := true + + prompt := func(keys []Key, symmetric bool) ([]byte, error) { + if len(keys) != 0 { + t.Errorf("prompt: len(keys) = %d (want 0)", len(keys)) + } + + if !symmetric { + t.Errorf("symmetric is not set") + } + + if firstTimeCalled { + firstTimeCalled = false + return []byte("wrongpassword"), nil + } + + return []byte("password"), nil + } + + md, err := ReadMessage(readerFromHex(symmetricallyEncryptedCompressedHex), nil, prompt, nil) + if err != nil { + t.Errorf("ReadMessage: %s", err) + return + } + + contents, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + t.Errorf("ReadAll: %s", err) + } + + expectedCreationTime := uint32(1295992998) + if md.LiteralData.Time != expectedCreationTime { + t.Errorf("LiteralData.Time is %d, want %d", md.LiteralData.Time, expectedCreationTime) + } + + const expected = "Symmetrically encrypted.\n" + if string(contents) != expected { + t.Errorf("contents got: %s want: %s", string(contents), expected) + } +} + +func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sigInput, tag string, expectedSignerKeyId uint64) { + signed := bytes.NewBufferString(sigInput) + signer, err := CheckDetachedSignature(kring, signed, signature) + if err != nil { + t.Errorf("%s: signature error: %s", tag, err) + return + } + if signer == nil { + t.Errorf("%s: signer is nil", tag) + return + } + if signer.PrimaryKey.KeyId != expectedSignerKeyId { + t.Errorf("%s: wrong signer got:%x want:%x", tag, signer.PrimaryKey.KeyId, expectedSignerKeyId) + } +} + +func TestDetachedSignature(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureHex), signedInput, "binary", testKey1KeyId) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureTextHex), signedInput, "text", testKey1KeyId) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureV3TextHex), signedInput, "v3", testKey1KeyId) + + incorrectSignedInput := signedInput + "X" + _, err := CheckDetachedSignature(kring, bytes.NewBufferString(incorrectSignedInput), readerFromHex(detachedSignatureHex)) + if err == nil { + t.Fatal("CheckDetachedSignature returned without error for bad signature") + } + if err == errors.ErrUnknownIssuer { + t.Fatal("CheckDetachedSignature returned ErrUnknownIssuer when the signer was known, but the signature invalid") + } +} + +func TestDetachedSignatureDSA(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex)) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId) +} + +func TestMultipleSignaturePacketsDSA(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex)) + testDetachedSignature(t, kring, readerFromHex(missingHashFunctionHex+detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId) +} + +func TestDetachedSignatureP256(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(p256TestKeyHex)) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureP256Hex), signedInput, "binary", testKeyP256KeyId) +} + +func testHashFunctionError(t *testing.T, signatureHex string) { + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + _, err := CheckDetachedSignature(kring, nil, readerFromHex(signatureHex)) + if err == nil { + t.Fatal("Packet with bad hash type was correctly parsed") + } + unsupported, ok := err.(errors.UnsupportedError) + if !ok { + t.Fatalf("Unexpected class of error: %s", err) + } + if !strings.Contains(string(unsupported), "hash ") { + t.Fatalf("Unexpected error: %s", err) + } +} + +func TestUnknownHashFunction(t *testing.T) { + // unknownHashFunctionHex contains a signature packet with hash + // function type 153 (which isn't a real hash function id). + testHashFunctionError(t, unknownHashFunctionHex) +} + +func TestMissingHashFunction(t *testing.T) { + // missingHashFunctionHex contains a signature packet that uses + // RIPEMD160, which isn't compiled in. Since that's the only signature + // packet we don't find any suitable packets and end up with ErrUnknownIssuer + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + _, err := CheckDetachedSignature(kring, nil, readerFromHex(missingHashFunctionHex)) + if err == nil { + t.Fatal("Packet with missing hash type was correctly parsed") + } + if err != errors.ErrUnknownIssuer { + t.Fatalf("Unexpected class of error: %s", err) + } +} + +func TestReadingArmoredPrivateKey(t *testing.T) { + el, err := ReadArmoredKeyRing(bytes.NewBufferString(armoredPrivateKeyBlock)) + if err != nil { + t.Error(err) + } + if len(el) != 1 { + t.Errorf("got %d entities, wanted 1\n", len(el)) + } +} + +func TestReadingArmoredPublicKey(t *testing.T) { + el, err := ReadArmoredKeyRing(bytes.NewBufferString(e2ePublicKey)) + if err != nil { + t.Error(err) + } + if len(el) != 1 { + t.Errorf("didn't get a valid entity") + } +} + +func TestNoArmoredData(t *testing.T) { + _, err := ReadArmoredKeyRing(bytes.NewBufferString("foo")) + if _, ok := err.(errors.InvalidArgumentError); !ok { + t.Errorf("error was not an InvalidArgumentError: %s", err) + } +} + +func testReadMessageError(t *testing.T, messageHex string) { + buf, err := hex.DecodeString(messageHex) + if err != nil { + t.Errorf("hex.DecodeString(): %v", err) + } + + kr, err := ReadKeyRing(new(bytes.Buffer)) + if err != nil { + t.Errorf("ReadKeyring(): %v", err) + } + + _, err = ReadMessage(bytes.NewBuffer(buf), kr, + func([]Key, bool) ([]byte, error) { + return []byte("insecure"), nil + }, nil) + + if err == nil { + t.Errorf("ReadMessage(): Unexpected nil error") + } +} + +func TestIssue11503(t *testing.T) { + testReadMessageError(t, "8c040402000aa430aa8228b9248b01fc899a91197130303030") +} + +func TestIssue11504(t *testing.T) { + testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130") +} + +// TestSignatureV3Message tests the verification of V3 signature, generated +// with a modern V4-style key. Some people have their clients set to generate +// V3 signatures, so it's useful to be able to verify them. +func TestSignatureV3Message(t *testing.T) { + sig, err := armor.Decode(strings.NewReader(signedMessageV3)) + if err != nil { + t.Error(err) + return + } + key, err := ReadArmoredKeyRing(strings.NewReader(keyV4forVerifyingSignedMessageV3)) + if err != nil { + t.Error(err) + return + } + md, err := ReadMessage(sig.Body, key, nil, nil) + if err != nil { + t.Error(err) + return + } + + _, err = ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + t.Error(err) + return + } + + // We'll see a sig error here after reading in the UnverifiedBody above, + // if there was one to see. + if err = md.SignatureError; err != nil { + t.Error(err) + return + } + + if md.SignatureV3 == nil { + t.Errorf("No available signature after checking signature") + return + } + if md.Signature != nil { + t.Errorf("Did not expect a signature V4 back") + return + } + return +} + +const testKey1KeyId = 0xA34D7E18C20C31BB +const testKey3KeyId = 0x338934250CCC0360 +const testKeyP256KeyId = 0xd44a2c495918513e + +const signedInput = "Signed message\nline 2\nline 3\n" +const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" + +const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" + +const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" + +const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" + +const detachedSignatureV3TextHex = "8900950305005255c25ca34d7e18c20c31bb0102bb3f04009f6589ef8a028d6e54f6eaf25432e590d31c3a41f4710897585e10c31e5e332c7f9f409af8512adceaff24d0da1474ab07aa7bce4f674610b010fccc5b579ae5eb00a127f272fb799f988ab8e4574c141da6dbfecfef7e6b2c478d9a3d2551ba741f260ee22bec762812f0053e05380bfdd55ad0f22d8cdf71b233fe51ae8a24" + +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + +const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" + +const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" + +const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" + +const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" + +const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" + +const signedEncryptedMessageHex = "848c032a67d68660df41c70103ff5789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8d2c03b018bd210b1d3791e1aba74b0f1034e122ab72e760492c192383cf5e20b5628bd043272d63df9b923f147eb6091cd897553204832aba48fec54aa447547bb16305a1024713b90e77fd0065f1918271947549205af3c74891af22ee0b56cd29bfec6d6e351901cd4ab3ece7c486f1e32a792d4e474aed98ee84b3f591c7dff37b64e0ecd68fd036d517e412dcadf85840ce184ad7921ad446c4ee28db80447aea1ca8d4f574db4d4e37688158ddd19e14ee2eab4873d46947d65d14a23e788d912cf9a19624ca7352469b72a83866b7c23cb5ace3deab3c7018061b0ba0f39ed2befe27163e5083cf9b8271e3e3d52cc7ad6e2a3bd81d4c3d7022f8d" + +const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" + +const symmetricallyEncryptedCompressedHex = "8c0d04030302eb4a03808145d0d260c92f714339e13de5a79881216431925bf67ee2898ea61815f07894cd0703c50d0a76ef64d482196f47a8bc729af9b80bb6" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK-----` + +const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 +sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk +Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ +AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD +24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX ++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 +B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX +fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA +FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 +ex7En5r7rHR5xwX82Msc+Rq9dSyO +=7MrZ +-----END PGP PUBLIC KEY BLOCK-----` + +const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` + +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` + +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go new file mode 100644 index 0000000..4b9a44c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "golang.org/x/crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k_test.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k_test.go new file mode 100644 index 0000000..183d260 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k_test.go @@ -0,0 +1,137 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2k + +import ( + "bytes" + "crypto" + _ "crypto/md5" + "crypto/rand" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/hex" + "testing" + + _ "golang.org/x/crypto/ripemd160" +) + +var saltedTests = []struct { + in, out string +}{ + {"hello", "10295ac1"}, + {"world", "ac587a5e"}, + {"foo", "4dda8077"}, + {"bar", "bd8aac6b9ea9cae04eae6a91c6133b58b5d9a61c14f355516ed9370456"}, + {"x", "f1d3f289"}, + {"xxxxxxxxxxxxxxxxxxxxxxx", "e00d7b45"}, +} + +func TestSalted(t *testing.T) { + h := sha1.New() + salt := [4]byte{1, 2, 3, 4} + + for i, test := range saltedTests { + expected, _ := hex.DecodeString(test.out) + out := make([]byte, len(expected)) + Salted(out, h, []byte(test.in), salt[:]) + if !bytes.Equal(expected, out) { + t.Errorf("#%d, got: %x want: %x", i, out, expected) + } + } +} + +var iteratedTests = []struct { + in, out string +}{ + {"hello", "83126105"}, + {"world", "6fa317f9"}, + {"foo", "8fbc35b9"}, + {"bar", "2af5a99b54f093789fd657f19bd245af7604d0f6ae06f66602a46a08ae"}, + {"x", "5a684dfe"}, + {"xxxxxxxxxxxxxxxxxxxxxxx", "18955174"}, +} + +func TestIterated(t *testing.T) { + h := sha1.New() + salt := [4]byte{4, 3, 2, 1} + + for i, test := range iteratedTests { + expected, _ := hex.DecodeString(test.out) + out := make([]byte, len(expected)) + Iterated(out, h, []byte(test.in), salt[:], 31) + if !bytes.Equal(expected, out) { + t.Errorf("#%d, got: %x want: %x", i, out, expected) + } + } +} + +var parseTests = []struct { + spec, in, out string +}{ + /* Simple with SHA1 */ + {"0002", "hello", "aaf4c61d"}, + /* Salted with SHA1 */ + {"01020102030405060708", "hello", "f4f7d67e"}, + /* Iterated with SHA1 */ + {"03020102030405060708f1", "hello", "f2a57b7c"}, +} + +func TestParse(t *testing.T) { + for i, test := range parseTests { + spec, _ := hex.DecodeString(test.spec) + buf := bytes.NewBuffer(spec) + f, err := Parse(buf) + if err != nil { + t.Errorf("%d: Parse returned error: %s", i, err) + continue + } + + expected, _ := hex.DecodeString(test.out) + out := make([]byte, len(expected)) + f(out, []byte(test.in)) + if !bytes.Equal(out, expected) { + t.Errorf("%d: output got: %x want: %x", i, out, expected) + } + if testing.Short() { + break + } + } +} + +func TestSerialize(t *testing.T) { + hashes := []crypto.Hash{crypto.MD5, crypto.SHA1, crypto.RIPEMD160, + crypto.SHA256, crypto.SHA384, crypto.SHA512, crypto.SHA224} + testCounts := []int{-1, 0, 1024, 65536, 4063232, 65011712} + for _, h := range hashes { + for _, c := range testCounts { + testSerializeConfig(t, &Config{Hash: h, S2KCount: c}) + } + } +} + +func testSerializeConfig(t *testing.T, c *Config) { + t.Logf("Running testSerializeConfig() with config: %+v", c) + + buf := bytes.NewBuffer(nil) + key := make([]byte, 16) + passphrase := []byte("testing") + err := Serialize(buf, key, rand.Reader, passphrase, c) + if err != nil { + t.Errorf("failed to serialize: %s", err) + return + } + + f, err := Parse(buf) + if err != nil { + t.Errorf("failed to reparse: %s", err) + return + } + key2 := make([]byte, len(key)) + f(key2, passphrase) + if !bytes.Equal(key2, key) { + t.Errorf("keys don't match: %x (serialied) vs %x (parsed)", key, key2) + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go new file mode 100644 index 0000000..65a304c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/write.go @@ -0,0 +1,378 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" + "golang.org/x/crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signer.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signer.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] + defaultHashes := candidateHashes[len(candidateHashes)-1:] + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(encryptedData); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := encryptedData + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{encryptedData} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/write_test.go b/vendor/golang.org/x/crypto/openpgp/write_test.go new file mode 100644 index 0000000..f2d50a0 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/write_test.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "bytes" + "io" + "io/ioutil" + "testing" + "time" + + "golang.org/x/crypto/openpgp/packet" +) + +func TestSignDetached(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) + out := bytes.NewBuffer(nil) + message := bytes.NewBufferString(signedInput) + err := DetachSign(out, kring[0], message, nil) + if err != nil { + t.Error(err) + } + + testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId) +} + +func TestSignTextDetached(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex)) + out := bytes.NewBuffer(nil) + message := bytes.NewBufferString(signedInput) + err := DetachSignText(out, kring[0], message, nil) + if err != nil { + t.Error(err) + } + + testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId) +} + +func TestSignDetachedDSA(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyPrivateHex)) + out := bytes.NewBuffer(nil) + message := bytes.NewBufferString(signedInput) + err := DetachSign(out, kring[0], message, nil) + if err != nil { + t.Error(err) + } + + testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId) +} + +func TestSignDetachedP256(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(p256TestKeyPrivateHex)) + kring[0].PrivateKey.Decrypt([]byte("passphrase")) + + out := bytes.NewBuffer(nil) + message := bytes.NewBufferString(signedInput) + err := DetachSign(out, kring[0], message, nil) + if err != nil { + t.Error(err) + } + + testDetachedSignature(t, kring, out, signedInput, "check", testKeyP256KeyId) +} + +func TestNewEntity(t *testing.T) { + if testing.Short() { + return + } + + // Check bit-length with no config. + e, err := NewEntity("Test User", "test", "test@example.com", nil) + if err != nil { + t.Errorf("failed to create entity: %s", err) + return + } + bl, err := e.PrimaryKey.BitLength() + if err != nil { + t.Errorf("failed to find bit length: %s", err) + } + if int(bl) != defaultRSAKeyBits { + t.Errorf("BitLength %v, expected %v", int(bl), defaultRSAKeyBits) + } + + // Check bit-length with a config. + cfg := &packet.Config{RSABits: 1024} + e, err = NewEntity("Test User", "test", "test@example.com", cfg) + if err != nil { + t.Errorf("failed to create entity: %s", err) + return + } + bl, err = e.PrimaryKey.BitLength() + if err != nil { + t.Errorf("failed to find bit length: %s", err) + } + if int(bl) != cfg.RSABits { + t.Errorf("BitLength %v, expected %v", bl, cfg.RSABits) + } + + w := bytes.NewBuffer(nil) + if err := e.SerializePrivate(w, nil); err != nil { + t.Errorf("failed to serialize entity: %s", err) + return + } + serialized := w.Bytes() + + el, err := ReadKeyRing(w) + if err != nil { + t.Errorf("failed to reparse entity: %s", err) + return + } + + if len(el) != 1 { + t.Errorf("wrong number of entities found, got %d, want 1", len(el)) + } + + w = bytes.NewBuffer(nil) + if err := e.SerializePrivate(w, nil); err != nil { + t.Errorf("failed to serialize entity second time: %s", err) + return + } + + if !bytes.Equal(w.Bytes(), serialized) { + t.Errorf("results differed") + } +} + +func TestSymmetricEncryption(t *testing.T) { + buf := new(bytes.Buffer) + plaintext, err := SymmetricallyEncrypt(buf, []byte("testing"), nil, nil) + if err != nil { + t.Errorf("error writing headers: %s", err) + return + } + message := []byte("hello world\n") + _, err = plaintext.Write(message) + if err != nil { + t.Errorf("error writing to plaintext writer: %s", err) + } + err = plaintext.Close() + if err != nil { + t.Errorf("error closing plaintext writer: %s", err) + } + + md, err := ReadMessage(buf, nil, func(keys []Key, symmetric bool) ([]byte, error) { + return []byte("testing"), nil + }, nil) + if err != nil { + t.Errorf("error rereading message: %s", err) + } + messageBuf := bytes.NewBuffer(nil) + _, err = io.Copy(messageBuf, md.UnverifiedBody) + if err != nil { + t.Errorf("error rereading message: %s", err) + } + if !bytes.Equal(message, messageBuf.Bytes()) { + t.Errorf("recovered message incorrect got '%s', want '%s'", messageBuf.Bytes(), message) + } +} + +var testEncryptionTests = []struct { + keyRingHex string + isSigned bool +}{ + { + testKeys1And2PrivateHex, + false, + }, + { + testKeys1And2PrivateHex, + true, + }, + { + dsaElGamalTestKeysHex, + false, + }, + { + dsaElGamalTestKeysHex, + true, + }, +} + +func TestEncryption(t *testing.T) { + for i, test := range testEncryptionTests { + kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex)) + + passphrase := []byte("passphrase") + for _, entity := range kring { + if entity.PrivateKey != nil && entity.PrivateKey.Encrypted { + err := entity.PrivateKey.Decrypt(passphrase) + if err != nil { + t.Errorf("#%d: failed to decrypt key", i) + } + } + for _, subkey := range entity.Subkeys { + if subkey.PrivateKey != nil && subkey.PrivateKey.Encrypted { + err := subkey.PrivateKey.Decrypt(passphrase) + if err != nil { + t.Errorf("#%d: failed to decrypt subkey", i) + } + } + } + } + + var signed *Entity + if test.isSigned { + signed = kring[0] + } + + buf := new(bytes.Buffer) + w, err := Encrypt(buf, kring[:1], signed, nil /* no hints */, nil) + if err != nil { + t.Errorf("#%d: error in Encrypt: %s", i, err) + continue + } + + const message = "testing" + _, err = w.Write([]byte(message)) + if err != nil { + t.Errorf("#%d: error writing plaintext: %s", i, err) + continue + } + err = w.Close() + if err != nil { + t.Errorf("#%d: error closing WriteCloser: %s", i, err) + continue + } + + md, err := ReadMessage(buf, kring, nil /* no prompt */, nil) + if err != nil { + t.Errorf("#%d: error reading message: %s", i, err) + continue + } + + testTime, _ := time.Parse("2006-01-02", "2013-07-01") + if test.isSigned { + signKey, _ := kring[0].signingKey(testTime) + expectedKeyId := signKey.PublicKey.KeyId + if md.SignedByKeyId != expectedKeyId { + t.Errorf("#%d: message signed by wrong key id, got: %v, want: %v", i, *md.SignedBy, expectedKeyId) + } + if md.SignedBy == nil { + t.Errorf("#%d: failed to find the signing Entity", i) + } + } + + plaintext, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + t.Errorf("#%d: error reading encrypted contents: %s", i, err) + continue + } + + encryptKey, _ := kring[0].encryptionKey(testTime) + expectedKeyId := encryptKey.PublicKey.KeyId + if len(md.EncryptedToKeyIds) != 1 || md.EncryptedToKeyIds[0] != expectedKeyId { + t.Errorf("#%d: expected message to be encrypted to %v, but got %#v", i, expectedKeyId, md.EncryptedToKeyIds) + } + + if string(plaintext) != message { + t.Errorf("#%d: got: %s, want: %s", i, string(plaintext), message) + } + + if test.isSigned { + if md.SignatureError != nil { + t.Errorf("#%d: signature error: %s", i, md.SignatureError) + } + if md.Signature == nil { + t.Error("signature missing") + } + } + } +} diff --git a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c new file mode 100644 index 0000000..b3ca072 --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c @@ -0,0 +1,197 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code can be compiled and used to test the otr package against libotr. +// See otr_test.go. + +// +build ignore + +#include +#include +#include + +#include +#include +#include + +static int g_session_established = 0; + +OtrlPolicy policy(void *opdata, ConnContext *context) { + return OTRL_POLICY_ALWAYS; +} + +int is_logged_in(void *opdata, const char *accountname, const char *protocol, + const char *recipient) { + return 1; +} + +void inject_message(void *opdata, const char *accountname, const char *protocol, + const char *recipient, const char *message) { + printf("%s\n", message); + fflush(stdout); + fprintf(stderr, "libotr helper sent: %s\n", message); +} + +void update_context_list(void *opdata) {} + +void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname, + const char *protocol, const char *username, + unsigned char fingerprint[20]) { + fprintf(stderr, "NEW FINGERPRINT\n"); + g_session_established = 1; +} + +void write_fingerprints(void *opdata) {} + +void gone_secure(void *opdata, ConnContext *context) {} + +void gone_insecure(void *opdata, ConnContext *context) {} + +void still_secure(void *opdata, ConnContext *context, int is_reply) {} + +int max_message_size(void *opdata, ConnContext *context) { return 99999; } + +const char *account_name(void *opdata, const char *account, + const char *protocol) { + return "ACCOUNT"; +} + +void account_name_free(void *opdata, const char *account_name) {} + +const char *error_message(void *opdata, ConnContext *context, + OtrlErrorCode err_code) { + return "ERR"; +} + +void error_message_free(void *opdata, const char *msg) {} + +void resent_msg_prefix_free(void *opdata, const char *prefix) {} + +void handle_smp_event(void *opdata, OtrlSMPEvent smp_event, + ConnContext *context, unsigned short progress_event, + char *question) {} + +void handle_msg_event(void *opdata, OtrlMessageEvent msg_event, + ConnContext *context, const char *message, + gcry_error_t err) { + fprintf(stderr, "msg event: %d %s\n", msg_event, message); +} + +OtrlMessageAppOps uiops = { + policy, + NULL, + is_logged_in, + inject_message, + update_context_list, + new_fingerprint, + write_fingerprints, + gone_secure, + gone_insecure, + still_secure, + max_message_size, + account_name, + account_name_free, + NULL, /* received_symkey */ + error_message, + error_message_free, + NULL, /* resent_msg_prefix */ + resent_msg_prefix_free, + handle_smp_event, + handle_msg_event, + NULL /* create_instag */, + NULL /* convert_msg */, + NULL /* convert_free */, + NULL /* timer_control */, +}; + +static const char kPrivateKeyData[] = + "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa " + "(p " + "#00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F" + "30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E" + "5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB" + "8C031D3561FECEE72EBB4A090D450A9B7A857#) (q " + "#00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g " + "#535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F" + "1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F" + "6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57" + "597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y " + "#0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF" + "2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93" + "454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A" + "3C0FF501E3DC673B76D7BABF349009B6ECF#) (x " + "#14D0345A3562C480A039E3C72764F72D79043216#)))))\n"; + +int main() { + OTRL_INIT; + + // We have to write the private key information to a file because the libotr + // API demands a filename to read from. + const char *tmpdir = "/tmp"; + if (getenv("TMP")) { + tmpdir = getenv("TMP"); + } + + char private_key_file[256]; + snprintf(private_key_file, sizeof(private_key_file), + "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir); + int fd = mkstemp(private_key_file); + if (fd == -1) { + perror("creating temp file"); + } + write(fd, kPrivateKeyData, sizeof(kPrivateKeyData) - 1); + close(fd); + + OtrlUserState userstate = otrl_userstate_create(); + otrl_privkey_read(userstate, private_key_file); + unlink(private_key_file); + + fprintf(stderr, "libotr helper started\n"); + + char buf[4096]; + + for (;;) { + char *message = fgets(buf, sizeof(buf), stdin); + if (strlen(message) == 0) { + break; + } + message[strlen(message) - 1] = 0; + fprintf(stderr, "libotr helper got: %s\n", message); + + char *newmessage = NULL; + OtrlTLV *tlvs; + int ignore_message = otrl_message_receiving( + userstate, &uiops, NULL, "account", "proto", "peer", message, + &newmessage, &tlvs, NULL, NULL, NULL); + if (tlvs) { + otrl_tlv_free(tlvs); + } + + if (newmessage != NULL) { + fprintf(stderr, "libotr got: %s\n", newmessage); + otrl_message_free(newmessage); + + gcry_error_t err; + char *newmessage = NULL; + + err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto", + "peer", 0, "test message", NULL, &newmessage, + OTRL_FRAGMENT_SEND_SKIP, NULL, NULL, NULL); + if (newmessage == NULL) { + fprintf(stderr, "libotr didn't encrypt message\n"); + return 1; + } + write(1, newmessage, strlen(newmessage)); + write(1, "\n", 1); + fprintf(stderr, "libotr sent: %s\n", newmessage); + otrl_message_free(newmessage); + + g_session_established = 0; + write(1, "?OTRv2?\n", 8); + fprintf(stderr, "libotr sent: ?OTRv2\n"); + } + } + + return 0; +} diff --git a/vendor/golang.org/x/crypto/otr/otr.go b/vendor/golang.org/x/crypto/otr/otr.go new file mode 100644 index 0000000..173b753 --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/otr.go @@ -0,0 +1,1415 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package otr implements the Off The Record protocol as specified in +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html +package otr // import "golang.org/x/crypto/otr" + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/hex" + "errors" + "hash" + "io" + "math/big" + "strconv" +) + +// SecurityChange describes a change in the security state of a Conversation. +type SecurityChange int + +const ( + NoChange SecurityChange = iota + // NewKeys indicates that a key exchange has completed. This occurs + // when a conversation first becomes encrypted, and when the keys are + // renegotiated within an encrypted conversation. + NewKeys + // SMPSecretNeeded indicates that the peer has started an + // authentication and that we need to supply a secret. Call SMPQuestion + // to get the optional, human readable challenge and then Authenticate + // to supply the matching secret. + SMPSecretNeeded + // SMPComplete indicates that an authentication completed. The identity + // of the peer has now been confirmed. + SMPComplete + // SMPFailed indicates that an authentication failed. + SMPFailed + // ConversationEnded indicates that the peer ended the secure + // conversation. + ConversationEnded +) + +// QueryMessage can be sent to a peer to start an OTR conversation. +var QueryMessage = "?OTRv2?" + +// ErrorPrefix can be used to make an OTR error by appending an error message +// to it. +var ErrorPrefix = "?OTR Error:" + +var ( + fragmentPartSeparator = []byte(",") + fragmentPrefix = []byte("?OTR,") + msgPrefix = []byte("?OTR:") + queryMarker = []byte("?OTR") +) + +// isQuery attempts to parse an OTR query from msg and returns the greatest +// common version, or 0 if msg is not an OTR query. +func isQuery(msg []byte) (greatestCommonVersion int) { + pos := bytes.Index(msg, queryMarker) + if pos == -1 { + return 0 + } + for i, c := range msg[pos+len(queryMarker):] { + if i == 0 { + if c == '?' { + // Indicates support for version 1, but we don't + // implement that. + continue + } + + if c != 'v' { + // Invalid message + return 0 + } + + continue + } + + if c == '?' { + // End of message + return + } + + if c == ' ' || c == '\t' { + // Probably an invalid message + return 0 + } + + if c == '2' { + greatestCommonVersion = 2 + } + } + + return 0 +} + +const ( + statePlaintext = iota + stateEncrypted + stateFinished +) + +const ( + authStateNone = iota + authStateAwaitingDHKey + authStateAwaitingRevealSig + authStateAwaitingSig +) + +const ( + msgTypeDHCommit = 2 + msgTypeData = 3 + msgTypeDHKey = 10 + msgTypeRevealSig = 17 + msgTypeSig = 18 +) + +const ( + // If the requested fragment size is less than this, it will be ignored. + minFragmentSize = 18 + // Messages are padded to a multiple of this number of bytes. + paddingGranularity = 256 + // The number of bytes in a Diffie-Hellman private value (320-bits). + dhPrivateBytes = 40 + // The number of bytes needed to represent an element of the DSA + // subgroup (160-bits). + dsaSubgroupBytes = 20 + // The number of bytes of the MAC that are sent on the wire (160-bits). + macPrefixBytes = 20 +) + +// These are the global, common group parameters for OTR. +var ( + p *big.Int // group prime + g *big.Int // group generator + q *big.Int // group order + pMinus2 *big.Int +) + +func init() { + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", 16) + q, _ = new(big.Int).SetString("7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68948127044533E63A0105DF531D89CD9128A5043CC71A026EF7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6F71C35FDAD44CFD2D74F9208BE258FF324943328F6722D9EE1003E5C50B1DF82CC6D241B0E2AE9CD348B1FD47E9267AFC1B2AE91EE51D6CB0E3179AB1042A95DCF6A9483B84B4B36B3861AA7255E4C0278BA36046511B993FFFFFFFFFFFFFFFF", 16) + g = new(big.Int).SetInt64(2) + pMinus2 = new(big.Int).Sub(p, g) +} + +// Conversation represents a relation with a peer. The zero value is a valid +// Conversation, although PrivateKey must be set. +// +// When communicating with a peer, all inbound messages should be passed to +// Conversation.Receive and all outbound messages to Conversation.Send. The +// Conversation will take care of maintaining the encryption state and +// negotiating encryption as needed. +type Conversation struct { + // PrivateKey contains the private key to use to sign key exchanges. + PrivateKey *PrivateKey + + // Rand can be set to override the entropy source. Otherwise, + // crypto/rand will be used. + Rand io.Reader + // If FragmentSize is set, all messages produced by Receive and Send + // will be fragmented into messages of, at most, this number of bytes. + FragmentSize int + + // Once Receive has returned NewKeys once, the following fields are + // valid. + SSID [8]byte + TheirPublicKey PublicKey + + state, authState int + + r [16]byte + x, y *big.Int + gx, gy *big.Int + gxBytes []byte + digest [sha256.Size]byte + + revealKeys, sigKeys akeKeys + + myKeyId uint32 + myCurrentDHPub *big.Int + myCurrentDHPriv *big.Int + myLastDHPub *big.Int + myLastDHPriv *big.Int + + theirKeyId uint32 + theirCurrentDHPub *big.Int + theirLastDHPub *big.Int + + keySlots [4]keySlot + + myCounter [8]byte + theirLastCtr [8]byte + oldMACs []byte + + k, n int // fragment state + frag []byte + + smp smpState +} + +// A keySlot contains key material for a specific (their keyid, my keyid) pair. +type keySlot struct { + // used is true if this slot is valid. If false, it's free for reuse. + used bool + theirKeyId uint32 + myKeyId uint32 + sendAESKey, recvAESKey []byte + sendMACKey, recvMACKey []byte + theirLastCtr [8]byte +} + +// akeKeys are generated during key exchange. There's one set for the reveal +// signature message and another for the signature message. In the protocol +// spec the latter are indicated with a prime mark. +type akeKeys struct { + c [16]byte + m1, m2 [32]byte +} + +func (c *Conversation) rand() io.Reader { + if c.Rand != nil { + return c.Rand + } + return rand.Reader +} + +func (c *Conversation) randMPI(buf []byte) *big.Int { + _, err := io.ReadFull(c.rand(), buf) + if err != nil { + panic("otr: short read from random source") + } + + return new(big.Int).SetBytes(buf) +} + +// tlv represents the type-length value from the protocol. +type tlv struct { + typ, length uint16 + data []byte +} + +const ( + tlvTypePadding = 0 + tlvTypeDisconnected = 1 + tlvTypeSMP1 = 2 + tlvTypeSMP2 = 3 + tlvTypeSMP3 = 4 + tlvTypeSMP4 = 5 + tlvTypeSMPAbort = 6 + tlvTypeSMP1WithQuestion = 7 +) + +// Receive handles a message from a peer. It returns a human readable message, +// an indicator of whether that message was encrypted, a hint about the +// encryption state and zero or more messages to send back to the peer. +// These messages do not need to be passed to Send before transmission. +func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change SecurityChange, toSend [][]byte, err error) { + if bytes.HasPrefix(in, fragmentPrefix) { + in, err = c.processFragment(in) + if in == nil || err != nil { + return + } + } + + if bytes.HasPrefix(in, msgPrefix) && in[len(in)-1] == '.' { + in = in[len(msgPrefix) : len(in)-1] + } else if version := isQuery(in); version > 0 { + c.authState = authStateAwaitingDHKey + c.reset() + toSend = c.encode(c.generateDHCommit()) + return + } else { + // plaintext message + out = in + return + } + + msg := make([]byte, base64.StdEncoding.DecodedLen(len(in))) + msgLen, err := base64.StdEncoding.Decode(msg, in) + if err != nil { + err = errors.New("otr: invalid base64 encoding in message") + return + } + msg = msg[:msgLen] + + // The first two bytes are the protocol version (2) + if len(msg) < 3 || msg[0] != 0 || msg[1] != 2 { + err = errors.New("otr: invalid OTR message") + return + } + + msgType := int(msg[2]) + msg = msg[3:] + + switch msgType { + case msgTypeDHCommit: + switch c.authState { + case authStateNone: + c.authState = authStateAwaitingRevealSig + if err = c.processDHCommit(msg); err != nil { + return + } + c.reset() + toSend = c.encode(c.generateDHKey()) + return + case authStateAwaitingDHKey: + // This is a 'SYN-crossing'. The greater digest wins. + var cmp int + if cmp, err = c.compareToDHCommit(msg); err != nil { + return + } + if cmp > 0 { + // We win. Retransmit DH commit. + toSend = c.encode(c.serializeDHCommit()) + return + } else { + // They win. We forget about our DH commit. + c.authState = authStateAwaitingRevealSig + if err = c.processDHCommit(msg); err != nil { + return + } + c.reset() + toSend = c.encode(c.generateDHKey()) + return + } + case authStateAwaitingRevealSig: + if err = c.processDHCommit(msg); err != nil { + return + } + toSend = c.encode(c.serializeDHKey()) + case authStateAwaitingSig: + if err = c.processDHCommit(msg); err != nil { + return + } + c.reset() + toSend = c.encode(c.generateDHKey()) + c.authState = authStateAwaitingRevealSig + default: + panic("bad state") + } + case msgTypeDHKey: + switch c.authState { + case authStateAwaitingDHKey: + var isSame bool + if isSame, err = c.processDHKey(msg); err != nil { + return + } + if isSame { + err = errors.New("otr: unexpected duplicate DH key") + return + } + toSend = c.encode(c.generateRevealSig()) + c.authState = authStateAwaitingSig + case authStateAwaitingSig: + var isSame bool + if isSame, err = c.processDHKey(msg); err != nil { + return + } + if isSame { + toSend = c.encode(c.serializeDHKey()) + } + } + case msgTypeRevealSig: + if c.authState != authStateAwaitingRevealSig { + return + } + if err = c.processRevealSig(msg); err != nil { + return + } + toSend = c.encode(c.generateSig()) + c.authState = authStateNone + c.state = stateEncrypted + change = NewKeys + case msgTypeSig: + if c.authState != authStateAwaitingSig { + return + } + if err = c.processSig(msg); err != nil { + return + } + c.authState = authStateNone + c.state = stateEncrypted + change = NewKeys + case msgTypeData: + if c.state != stateEncrypted { + err = errors.New("otr: encrypted message received without encrypted session established") + return + } + var tlvs []tlv + out, tlvs, err = c.processData(msg) + encrypted = true + + EachTLV: + for _, inTLV := range tlvs { + switch inTLV.typ { + case tlvTypeDisconnected: + change = ConversationEnded + c.state = stateFinished + break EachTLV + case tlvTypeSMP1, tlvTypeSMP2, tlvTypeSMP3, tlvTypeSMP4, tlvTypeSMPAbort, tlvTypeSMP1WithQuestion: + var reply tlv + var complete bool + reply, complete, err = c.processSMP(inTLV) + if err == smpSecretMissingError { + err = nil + change = SMPSecretNeeded + c.smp.saved = &inTLV + return + } + if err == smpFailureError { + err = nil + change = SMPFailed + } else if complete { + change = SMPComplete + } + if reply.typ != 0 { + toSend = c.encode(c.generateData(nil, &reply)) + } + break EachTLV + default: + // skip unknown TLVs + } + } + default: + err = errors.New("otr: unknown message type " + strconv.Itoa(msgType)) + } + + return +} + +// Send takes a human readable message from the local user, possibly encrypts +// it and returns zero one or more messages to send to the peer. +func (c *Conversation) Send(msg []byte) ([][]byte, error) { + switch c.state { + case statePlaintext: + return [][]byte{msg}, nil + case stateEncrypted: + return c.encode(c.generateData(msg, nil)), nil + case stateFinished: + return nil, errors.New("otr: cannot send message because secure conversation has finished") + } + + return nil, errors.New("otr: cannot send message in current state") +} + +// SMPQuestion returns the human readable challenge question from the peer. +// It's only valid after Receive has returned SMPSecretNeeded. +func (c *Conversation) SMPQuestion() string { + return c.smp.question +} + +// Authenticate begins an authentication with the peer. Authentication involves +// an optional challenge message and a shared secret. The authentication +// proceeds until either Receive returns SMPComplete, SMPSecretNeeded (which +// indicates that a new authentication is happening and thus this one was +// aborted) or SMPFailed. +func (c *Conversation) Authenticate(question string, mutualSecret []byte) (toSend [][]byte, err error) { + if c.state != stateEncrypted { + err = errors.New("otr: can't authenticate a peer without a secure conversation established") + return + } + + if c.smp.saved != nil { + c.calcSMPSecret(mutualSecret, false /* they started it */) + + var out tlv + var complete bool + out, complete, err = c.processSMP(*c.smp.saved) + if complete { + panic("SMP completed on the first message") + } + c.smp.saved = nil + if out.typ != 0 { + toSend = c.encode(c.generateData(nil, &out)) + } + return + } + + c.calcSMPSecret(mutualSecret, true /* we started it */) + outs := c.startSMP(question) + for _, out := range outs { + toSend = append(toSend, c.encode(c.generateData(nil, &out))...) + } + return +} + +// End ends a secure conversation by generating a termination message for +// the peer and switches to unencrypted communication. +func (c *Conversation) End() (toSend [][]byte) { + switch c.state { + case statePlaintext: + return nil + case stateEncrypted: + c.state = statePlaintext + return c.encode(c.generateData(nil, &tlv{typ: tlvTypeDisconnected})) + case stateFinished: + c.state = statePlaintext + return nil + } + panic("unreachable") +} + +// IsEncrypted returns true if a message passed to Send would be encrypted +// before transmission. This result remains valid until the next call to +// Receive or End, which may change the state of the Conversation. +func (c *Conversation) IsEncrypted() bool { + return c.state == stateEncrypted +} + +var fragmentError = errors.New("otr: invalid OTR fragment") + +// processFragment processes a fragmented OTR message and possibly returns a +// complete message. Fragmented messages look like "?OTR,k,n,msg," where k is +// the fragment number (starting from 1), n is the number of fragments in this +// message and msg is a substring of the base64 encoded message. +func (c *Conversation) processFragment(in []byte) (out []byte, err error) { + in = in[len(fragmentPrefix):] // remove "?OTR," + parts := bytes.Split(in, fragmentPartSeparator) + if len(parts) != 4 || len(parts[3]) != 0 { + return nil, fragmentError + } + + k, err := strconv.Atoi(string(parts[0])) + if err != nil { + return nil, fragmentError + } + + n, err := strconv.Atoi(string(parts[1])) + if err != nil { + return nil, fragmentError + } + + if k < 1 || n < 1 || k > n { + return nil, fragmentError + } + + if k == 1 { + c.frag = append(c.frag[:0], parts[2]...) + c.k, c.n = k, n + } else if n == c.n && k == c.k+1 { + c.frag = append(c.frag, parts[2]...) + c.k++ + } else { + c.frag = c.frag[:0] + c.n, c.k = 0, 0 + } + + if c.n > 0 && c.k == c.n { + c.n, c.k = 0, 0 + return c.frag, nil + } + + return nil, nil +} + +func (c *Conversation) generateDHCommit() []byte { + _, err := io.ReadFull(c.rand(), c.r[:]) + if err != nil { + panic("otr: short read from random source") + } + + var xBytes [dhPrivateBytes]byte + c.x = c.randMPI(xBytes[:]) + c.gx = new(big.Int).Exp(g, c.x, p) + c.gy = nil + c.gxBytes = appendMPI(nil, c.gx) + + h := sha256.New() + h.Write(c.gxBytes) + h.Sum(c.digest[:0]) + + aesCipher, err := aes.NewCipher(c.r[:]) + if err != nil { + panic(err.Error()) + } + + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(c.gxBytes, c.gxBytes) + + return c.serializeDHCommit() +} + +func (c *Conversation) serializeDHCommit() []byte { + var ret []byte + ret = appendU16(ret, 2) // protocol version + ret = append(ret, msgTypeDHCommit) + ret = appendData(ret, c.gxBytes) + ret = appendData(ret, c.digest[:]) + return ret +} + +func (c *Conversation) processDHCommit(in []byte) error { + var ok1, ok2 bool + c.gxBytes, in, ok1 = getData(in) + digest, in, ok2 := getData(in) + if !ok1 || !ok2 || len(in) > 0 { + return errors.New("otr: corrupt DH commit message") + } + copy(c.digest[:], digest) + return nil +} + +func (c *Conversation) compareToDHCommit(in []byte) (int, error) { + _, in, ok1 := getData(in) + digest, in, ok2 := getData(in) + if !ok1 || !ok2 || len(in) > 0 { + return 0, errors.New("otr: corrupt DH commit message") + } + return bytes.Compare(c.digest[:], digest), nil +} + +func (c *Conversation) generateDHKey() []byte { + var yBytes [dhPrivateBytes]byte + c.y = c.randMPI(yBytes[:]) + c.gy = new(big.Int).Exp(g, c.y, p) + return c.serializeDHKey() +} + +func (c *Conversation) serializeDHKey() []byte { + var ret []byte + ret = appendU16(ret, 2) // protocol version + ret = append(ret, msgTypeDHKey) + ret = appendMPI(ret, c.gy) + return ret +} + +func (c *Conversation) processDHKey(in []byte) (isSame bool, err error) { + gy, in, ok := getMPI(in) + if !ok { + err = errors.New("otr: corrupt DH key message") + return + } + if gy.Cmp(g) < 0 || gy.Cmp(pMinus2) > 0 { + err = errors.New("otr: DH value out of range") + return + } + if c.gy != nil { + isSame = c.gy.Cmp(gy) == 0 + return + } + c.gy = gy + return +} + +func (c *Conversation) generateEncryptedSignature(keys *akeKeys, xFirst bool) ([]byte, []byte) { + var xb []byte + xb = c.PrivateKey.PublicKey.Serialize(xb) + + var verifyData []byte + if xFirst { + verifyData = appendMPI(verifyData, c.gx) + verifyData = appendMPI(verifyData, c.gy) + } else { + verifyData = appendMPI(verifyData, c.gy) + verifyData = appendMPI(verifyData, c.gx) + } + verifyData = append(verifyData, xb...) + verifyData = appendU32(verifyData, c.myKeyId) + + mac := hmac.New(sha256.New, keys.m1[:]) + mac.Write(verifyData) + mb := mac.Sum(nil) + + xb = appendU32(xb, c.myKeyId) + xb = append(xb, c.PrivateKey.Sign(c.rand(), mb)...) + + aesCipher, err := aes.NewCipher(keys.c[:]) + if err != nil { + panic(err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(xb, xb) + + mac = hmac.New(sha256.New, keys.m2[:]) + encryptedSig := appendData(nil, xb) + mac.Write(encryptedSig) + + return encryptedSig, mac.Sum(nil) +} + +func (c *Conversation) generateRevealSig() []byte { + s := new(big.Int).Exp(c.gy, c.x, p) + c.calcAKEKeys(s) + c.myKeyId++ + + encryptedSig, mac := c.generateEncryptedSignature(&c.revealKeys, true /* gx comes first */) + + c.myCurrentDHPub = c.gx + c.myCurrentDHPriv = c.x + c.rotateDHKeys() + incCounter(&c.myCounter) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeRevealSig) + ret = appendData(ret, c.r[:]) + ret = append(ret, encryptedSig...) + ret = append(ret, mac[:20]...) + return ret +} + +func (c *Conversation) processEncryptedSig(encryptedSig, theirMAC []byte, keys *akeKeys, xFirst bool) error { + mac := hmac.New(sha256.New, keys.m2[:]) + mac.Write(appendData(nil, encryptedSig)) + myMAC := mac.Sum(nil)[:20] + + if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 { + return errors.New("bad signature MAC in encrypted signature") + } + + aesCipher, err := aes.NewCipher(keys.c[:]) + if err != nil { + panic(err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encryptedSig, encryptedSig) + + sig := encryptedSig + sig, ok1 := c.TheirPublicKey.Parse(sig) + keyId, sig, ok2 := getU32(sig) + if !ok1 || !ok2 { + return errors.New("otr: corrupt encrypted signature") + } + + var verifyData []byte + if xFirst { + verifyData = appendMPI(verifyData, c.gx) + verifyData = appendMPI(verifyData, c.gy) + } else { + verifyData = appendMPI(verifyData, c.gy) + verifyData = appendMPI(verifyData, c.gx) + } + verifyData = c.TheirPublicKey.Serialize(verifyData) + verifyData = appendU32(verifyData, keyId) + + mac = hmac.New(sha256.New, keys.m1[:]) + mac.Write(verifyData) + mb := mac.Sum(nil) + + sig, ok1 = c.TheirPublicKey.Verify(mb, sig) + if !ok1 { + return errors.New("bad signature in encrypted signature") + } + if len(sig) > 0 { + return errors.New("corrupt encrypted signature") + } + + c.theirKeyId = keyId + zero(c.theirLastCtr[:]) + return nil +} + +func (c *Conversation) processRevealSig(in []byte) error { + r, in, ok1 := getData(in) + encryptedSig, in, ok2 := getData(in) + theirMAC := in + if !ok1 || !ok2 || len(theirMAC) != 20 { + return errors.New("otr: corrupt reveal signature message") + } + + aesCipher, err := aes.NewCipher(r) + if err != nil { + return errors.New("otr: cannot create AES cipher from reveal signature message: " + err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(c.gxBytes, c.gxBytes) + h := sha256.New() + h.Write(c.gxBytes) + digest := h.Sum(nil) + if len(digest) != len(c.digest) || subtle.ConstantTimeCompare(digest, c.digest[:]) == 0 { + return errors.New("otr: bad commit MAC in reveal signature message") + } + var rest []byte + c.gx, rest, ok1 = getMPI(c.gxBytes) + if !ok1 || len(rest) > 0 { + return errors.New("otr: gx corrupt after decryption") + } + if c.gx.Cmp(g) < 0 || c.gx.Cmp(pMinus2) > 0 { + return errors.New("otr: DH value out of range") + } + s := new(big.Int).Exp(c.gx, c.y, p) + c.calcAKEKeys(s) + + if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.revealKeys, true /* gx comes first */); err != nil { + return errors.New("otr: in reveal signature message: " + err.Error()) + } + + c.theirCurrentDHPub = c.gx + c.theirLastDHPub = nil + + return nil +} + +func (c *Conversation) generateSig() []byte { + c.myKeyId++ + + encryptedSig, mac := c.generateEncryptedSignature(&c.sigKeys, false /* gy comes first */) + + c.myCurrentDHPub = c.gy + c.myCurrentDHPriv = c.y + c.rotateDHKeys() + incCounter(&c.myCounter) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeSig) + ret = append(ret, encryptedSig...) + ret = append(ret, mac[:macPrefixBytes]...) + return ret +} + +func (c *Conversation) processSig(in []byte) error { + encryptedSig, in, ok1 := getData(in) + theirMAC := in + if !ok1 || len(theirMAC) != macPrefixBytes { + return errors.New("otr: corrupt signature message") + } + + if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.sigKeys, false /* gy comes first */); err != nil { + return errors.New("otr: in signature message: " + err.Error()) + } + + c.theirCurrentDHPub = c.gy + c.theirLastDHPub = nil + + return nil +} + +func (c *Conversation) rotateDHKeys() { + // evict slots using our retired key id + for i := range c.keySlots { + slot := &c.keySlots[i] + if slot.used && slot.myKeyId == c.myKeyId-1 { + slot.used = false + c.oldMACs = append(c.oldMACs, slot.recvMACKey...) + } + } + + c.myLastDHPriv = c.myCurrentDHPriv + c.myLastDHPub = c.myCurrentDHPub + + var xBytes [dhPrivateBytes]byte + c.myCurrentDHPriv = c.randMPI(xBytes[:]) + c.myCurrentDHPub = new(big.Int).Exp(g, c.myCurrentDHPriv, p) + c.myKeyId++ +} + +func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error) { + origIn := in + flags, in, ok1 := getU8(in) + theirKeyId, in, ok2 := getU32(in) + myKeyId, in, ok3 := getU32(in) + y, in, ok4 := getMPI(in) + counter, in, ok5 := getNBytes(in, 8) + encrypted, in, ok6 := getData(in) + macedData := origIn[:len(origIn)-len(in)] + theirMAC, in, ok7 := getNBytes(in, macPrefixBytes) + _, in, ok8 := getData(in) + if !ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 || len(in) > 0 { + err = errors.New("otr: corrupt data message") + return + } + + ignoreErrors := flags&1 != 0 + + slot, err := c.calcDataKeys(myKeyId, theirKeyId) + if err != nil { + if ignoreErrors { + err = nil + } + return + } + + mac := hmac.New(sha1.New, slot.recvMACKey) + mac.Write([]byte{0, 2, 3}) + mac.Write(macedData) + myMAC := mac.Sum(nil) + if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 { + if !ignoreErrors { + err = errors.New("otr: bad MAC on data message") + } + return + } + + if bytes.Compare(counter, slot.theirLastCtr[:]) <= 0 { + err = errors.New("otr: counter regressed") + return + } + copy(slot.theirLastCtr[:], counter) + + var iv [aes.BlockSize]byte + copy(iv[:], counter) + aesCipher, err := aes.NewCipher(slot.recvAESKey) + if err != nil { + panic(err.Error()) + } + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encrypted, encrypted) + decrypted := encrypted + + if myKeyId == c.myKeyId { + c.rotateDHKeys() + } + if theirKeyId == c.theirKeyId { + // evict slots using their retired key id + for i := range c.keySlots { + slot := &c.keySlots[i] + if slot.used && slot.theirKeyId == theirKeyId-1 { + slot.used = false + c.oldMACs = append(c.oldMACs, slot.recvMACKey...) + } + } + + c.theirLastDHPub = c.theirCurrentDHPub + c.theirKeyId++ + c.theirCurrentDHPub = y + } + + if nulPos := bytes.IndexByte(decrypted, 0); nulPos >= 0 { + out = decrypted[:nulPos] + tlvData := decrypted[nulPos+1:] + for len(tlvData) > 0 { + var t tlv + var ok1, ok2, ok3 bool + + t.typ, tlvData, ok1 = getU16(tlvData) + t.length, tlvData, ok2 = getU16(tlvData) + t.data, tlvData, ok3 = getNBytes(tlvData, int(t.length)) + if !ok1 || !ok2 || !ok3 { + err = errors.New("otr: corrupt tlv data") + return + } + tlvs = append(tlvs, t) + } + } else { + out = decrypted + } + + return +} + +func (c *Conversation) generateData(msg []byte, extra *tlv) []byte { + slot, err := c.calcDataKeys(c.myKeyId-1, c.theirKeyId) + if err != nil { + panic("otr: failed to generate sending keys: " + err.Error()) + } + + var plaintext []byte + plaintext = append(plaintext, msg...) + plaintext = append(plaintext, 0) + + padding := paddingGranularity - ((len(plaintext) + 4) % paddingGranularity) + plaintext = appendU16(plaintext, tlvTypePadding) + plaintext = appendU16(plaintext, uint16(padding)) + for i := 0; i < padding; i++ { + plaintext = append(plaintext, 0) + } + + if extra != nil { + plaintext = appendU16(plaintext, extra.typ) + plaintext = appendU16(plaintext, uint16(len(extra.data))) + plaintext = append(plaintext, extra.data...) + } + + encrypted := make([]byte, len(plaintext)) + + var iv [aes.BlockSize]byte + copy(iv[:], c.myCounter[:]) + aesCipher, err := aes.NewCipher(slot.sendAESKey) + if err != nil { + panic(err.Error()) + } + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encrypted, plaintext) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeData) + ret = append(ret, 0 /* flags */) + ret = appendU32(ret, c.myKeyId-1) + ret = appendU32(ret, c.theirKeyId) + ret = appendMPI(ret, c.myCurrentDHPub) + ret = append(ret, c.myCounter[:]...) + ret = appendData(ret, encrypted) + + mac := hmac.New(sha1.New, slot.sendMACKey) + mac.Write(ret) + ret = append(ret, mac.Sum(nil)[:macPrefixBytes]...) + ret = appendData(ret, c.oldMACs) + c.oldMACs = nil + incCounter(&c.myCounter) + + return ret +} + +func incCounter(counter *[8]byte) { + for i := 7; i >= 0; i-- { + counter[i]++ + if counter[i] > 0 { + break + } + } +} + +// calcDataKeys computes the keys used to encrypt a data message given the key +// IDs. +func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, err error) { + // Check for a cache hit. + for i := range c.keySlots { + slot = &c.keySlots[i] + if slot.used && slot.theirKeyId == theirKeyId && slot.myKeyId == myKeyId { + return + } + } + + // Find an empty slot to write into. + slot = nil + for i := range c.keySlots { + if !c.keySlots[i].used { + slot = &c.keySlots[i] + break + } + } + if slot == nil { + return nil, errors.New("otr: internal error: no more key slots") + } + + var myPriv, myPub, theirPub *big.Int + + if myKeyId == c.myKeyId { + myPriv = c.myCurrentDHPriv + myPub = c.myCurrentDHPub + } else if myKeyId == c.myKeyId-1 { + myPriv = c.myLastDHPriv + myPub = c.myLastDHPub + } else { + err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when I'm on " + strconv.FormatUint(uint64(c.myKeyId), 10)) + return + } + + if theirKeyId == c.theirKeyId { + theirPub = c.theirCurrentDHPub + } else if theirKeyId == c.theirKeyId-1 && c.theirLastDHPub != nil { + theirPub = c.theirLastDHPub + } else { + err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when they're on " + strconv.FormatUint(uint64(c.myKeyId), 10)) + return + } + + var sendPrefixByte, recvPrefixByte [1]byte + + if myPub.Cmp(theirPub) > 0 { + // we're the high end + sendPrefixByte[0], recvPrefixByte[0] = 1, 2 + } else { + // we're the low end + sendPrefixByte[0], recvPrefixByte[0] = 2, 1 + } + + s := new(big.Int).Exp(theirPub, myPriv, p) + sBytes := appendMPI(nil, s) + + h := sha1.New() + h.Write(sendPrefixByte[:]) + h.Write(sBytes) + slot.sendAESKey = h.Sum(slot.sendAESKey[:0])[:16] + + h.Reset() + h.Write(slot.sendAESKey) + slot.sendMACKey = h.Sum(slot.sendMACKey[:0]) + + h.Reset() + h.Write(recvPrefixByte[:]) + h.Write(sBytes) + slot.recvAESKey = h.Sum(slot.recvAESKey[:0])[:16] + + h.Reset() + h.Write(slot.recvAESKey) + slot.recvMACKey = h.Sum(slot.recvMACKey[:0]) + + slot.theirKeyId = theirKeyId + slot.myKeyId = myKeyId + slot.used = true + + zero(slot.theirLastCtr[:]) + return +} + +func (c *Conversation) calcAKEKeys(s *big.Int) { + mpi := appendMPI(nil, s) + h := sha256.New() + + var cBytes [32]byte + hashWithPrefix(c.SSID[:], 0, mpi, h) + + hashWithPrefix(cBytes[:], 1, mpi, h) + copy(c.revealKeys.c[:], cBytes[:16]) + copy(c.sigKeys.c[:], cBytes[16:]) + + hashWithPrefix(c.revealKeys.m1[:], 2, mpi, h) + hashWithPrefix(c.revealKeys.m2[:], 3, mpi, h) + hashWithPrefix(c.sigKeys.m1[:], 4, mpi, h) + hashWithPrefix(c.sigKeys.m2[:], 5, mpi, h) +} + +func hashWithPrefix(out []byte, prefix byte, in []byte, h hash.Hash) { + h.Reset() + var p [1]byte + p[0] = prefix + h.Write(p[:]) + h.Write(in) + if len(out) == h.Size() { + h.Sum(out[:0]) + } else { + digest := h.Sum(nil) + copy(out, digest) + } +} + +func (c *Conversation) encode(msg []byte) [][]byte { + b64 := make([]byte, base64.StdEncoding.EncodedLen(len(msg))+len(msgPrefix)+1) + base64.StdEncoding.Encode(b64[len(msgPrefix):], msg) + copy(b64, msgPrefix) + b64[len(b64)-1] = '.' + + if c.FragmentSize < minFragmentSize || len(b64) <= c.FragmentSize { + // We can encode this in a single fragment. + return [][]byte{b64} + } + + // We have to fragment this message. + var ret [][]byte + bytesPerFragment := c.FragmentSize - minFragmentSize + numFragments := (len(b64) + bytesPerFragment) / bytesPerFragment + + for i := 0; i < numFragments; i++ { + frag := []byte("?OTR," + strconv.Itoa(i+1) + "," + strconv.Itoa(numFragments) + ",") + todo := bytesPerFragment + if todo > len(b64) { + todo = len(b64) + } + frag = append(frag, b64[:todo]...) + b64 = b64[todo:] + frag = append(frag, ',') + ret = append(ret, frag) + } + + return ret +} + +func (c *Conversation) reset() { + c.myKeyId = 0 + + for i := range c.keySlots { + c.keySlots[i].used = false + } +} + +type PublicKey struct { + dsa.PublicKey +} + +func (pk *PublicKey) Parse(in []byte) ([]byte, bool) { + var ok bool + var pubKeyType uint16 + + if pubKeyType, in, ok = getU16(in); !ok || pubKeyType != 0 { + return nil, false + } + if pk.P, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.Q, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.G, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.Y, in, ok = getMPI(in); !ok { + return nil, false + } + + return in, true +} + +func (pk *PublicKey) Serialize(in []byte) []byte { + in = appendU16(in, 0) + in = appendMPI(in, pk.P) + in = appendMPI(in, pk.Q) + in = appendMPI(in, pk.G) + in = appendMPI(in, pk.Y) + return in +} + +// Fingerprint returns the 20-byte, binary fingerprint of the PublicKey. +func (pk *PublicKey) Fingerprint() []byte { + b := pk.Serialize(nil) + h := sha1.New() + h.Write(b[2:]) + return h.Sum(nil) +} + +func (pk *PublicKey) Verify(hashed, sig []byte) ([]byte, bool) { + if len(sig) != 2*dsaSubgroupBytes { + return nil, false + } + r := new(big.Int).SetBytes(sig[:dsaSubgroupBytes]) + s := new(big.Int).SetBytes(sig[dsaSubgroupBytes:]) + ok := dsa.Verify(&pk.PublicKey, hashed, r, s) + return sig[dsaSubgroupBytes*2:], ok +} + +type PrivateKey struct { + PublicKey + dsa.PrivateKey +} + +func (priv *PrivateKey) Sign(rand io.Reader, hashed []byte) []byte { + r, s, err := dsa.Sign(rand, &priv.PrivateKey, hashed) + if err != nil { + panic(err.Error()) + } + rBytes := r.Bytes() + sBytes := s.Bytes() + if len(rBytes) > dsaSubgroupBytes || len(sBytes) > dsaSubgroupBytes { + panic("DSA signature too large") + } + + out := make([]byte, 2*dsaSubgroupBytes) + copy(out[dsaSubgroupBytes-len(rBytes):], rBytes) + copy(out[len(out)-len(sBytes):], sBytes) + return out +} + +func (priv *PrivateKey) Serialize(in []byte) []byte { + in = priv.PublicKey.Serialize(in) + in = appendMPI(in, priv.PrivateKey.X) + return in +} + +func (priv *PrivateKey) Parse(in []byte) ([]byte, bool) { + in, ok := priv.PublicKey.Parse(in) + if !ok { + return in, ok + } + priv.PrivateKey.PublicKey = priv.PublicKey.PublicKey + priv.PrivateKey.X, in, ok = getMPI(in) + return in, ok +} + +func (priv *PrivateKey) Generate(rand io.Reader) { + if err := dsa.GenerateParameters(&priv.PrivateKey.PublicKey.Parameters, rand, dsa.L1024N160); err != nil { + panic(err.Error()) + } + if err := dsa.GenerateKey(&priv.PrivateKey, rand); err != nil { + panic(err.Error()) + } + priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey +} + +func notHex(r rune) bool { + if r >= '0' && r <= '9' || + r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' { + return false + } + + return true +} + +// Import parses the contents of a libotr private key file. +func (priv *PrivateKey) Import(in []byte) bool { + mpiStart := []byte(" #") + + mpis := make([]*big.Int, 5) + + for i := 0; i < len(mpis); i++ { + start := bytes.Index(in, mpiStart) + if start == -1 { + return false + } + in = in[start+len(mpiStart):] + end := bytes.IndexFunc(in, notHex) + if end == -1 { + return false + } + hexBytes := in[:end] + in = in[end:] + + if len(hexBytes)&1 != 0 { + return false + } + + mpiBytes := make([]byte, len(hexBytes)/2) + if _, err := hex.Decode(mpiBytes, hexBytes); err != nil { + return false + } + + mpis[i] = new(big.Int).SetBytes(mpiBytes) + } + + for _, mpi := range mpis { + if mpi.Sign() <= 0 { + return false + } + } + + priv.PrivateKey.P = mpis[0] + priv.PrivateKey.Q = mpis[1] + priv.PrivateKey.G = mpis[2] + priv.PrivateKey.Y = mpis[3] + priv.PrivateKey.X = mpis[4] + priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey + + a := new(big.Int).Exp(priv.PrivateKey.G, priv.PrivateKey.X, priv.PrivateKey.P) + return a.Cmp(priv.PrivateKey.Y) == 0 +} + +func getU8(in []byte) (uint8, []byte, bool) { + if len(in) < 1 { + return 0, in, false + } + return in[0], in[1:], true +} + +func getU16(in []byte) (uint16, []byte, bool) { + if len(in) < 2 { + return 0, in, false + } + r := uint16(in[0])<<8 | uint16(in[1]) + return r, in[2:], true +} + +func getU32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, in, false + } + r := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3]) + return r, in[4:], true +} + +func getMPI(in []byte) (*big.Int, []byte, bool) { + l, in, ok := getU32(in) + if !ok || uint32(len(in)) < l { + return nil, in, false + } + r := new(big.Int).SetBytes(in[:l]) + return r, in[l:], true +} + +func getData(in []byte) ([]byte, []byte, bool) { + l, in, ok := getU32(in) + if !ok || uint32(len(in)) < l { + return nil, in, false + } + return in[:l], in[l:], true +} + +func getNBytes(in []byte, n int) ([]byte, []byte, bool) { + if len(in) < n { + return nil, in, false + } + return in[:n], in[n:], true +} + +func appendU16(out []byte, v uint16) []byte { + out = append(out, byte(v>>8), byte(v)) + return out +} + +func appendU32(out []byte, v uint32) []byte { + out = append(out, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + return out +} + +func appendData(out, v []byte) []byte { + out = appendU32(out, uint32(len(v))) + out = append(out, v...) + return out +} + +func appendMPI(out []byte, v *big.Int) []byte { + vBytes := v.Bytes() + out = appendU32(out, uint32(len(vBytes))) + out = append(out, vBytes...) + return out +} + +func appendMPIs(out []byte, mpis ...*big.Int) []byte { + for _, mpi := range mpis { + out = appendMPI(out, mpi) + } + return out +} + +func zero(b []byte) { + for i := range b { + b[i] = 0 + } +} diff --git a/vendor/golang.org/x/crypto/otr/otr_test.go b/vendor/golang.org/x/crypto/otr/otr_test.go new file mode 100644 index 0000000..cfcd062 --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/otr_test.go @@ -0,0 +1,470 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package otr + +import ( + "bufio" + "bytes" + "crypto/rand" + "encoding/hex" + "math/big" + "os" + "os/exec" + "testing" +) + +var isQueryTests = []struct { + msg string + expectedVersion int +}{ + {"foo", 0}, + {"?OtR", 0}, + {"?OtR?", 0}, + {"?OTR?", 0}, + {"?OTRv?", 0}, + {"?OTRv1?", 0}, + {"?OTR?v1?", 0}, + {"?OTR?v?", 0}, + {"?OTR?v2?", 2}, + {"?OTRv2?", 2}, + {"?OTRv23?", 2}, + {"?OTRv23 ?", 0}, +} + +func TestIsQuery(t *testing.T) { + for i, test := range isQueryTests { + version := isQuery([]byte(test.msg)) + if version != test.expectedVersion { + t.Errorf("#%d: got %d, want %d", i, version, test.expectedVersion) + } + } +} + +var alicePrivateKeyHex = "000000000080c81c2cb2eb729b7e6fd48e975a932c638b3a9055478583afa46755683e30102447f6da2d8bec9f386bbb5da6403b0040fee8650b6ab2d7f32c55ab017ae9b6aec8c324ab5844784e9a80e194830d548fb7f09a0410df2c4d5c8bc2b3e9ad484e65412be689cf0834694e0839fb2954021521ffdffb8f5c32c14dbf2020b3ce7500000014da4591d58def96de61aea7b04a8405fe1609308d000000808ddd5cb0b9d66956e3dea5a915d9aba9d8a6e7053b74dadb2fc52f9fe4e5bcc487d2305485ed95fed026ad93f06ebb8c9e8baf693b7887132c7ffdd3b0f72f4002ff4ed56583ca7c54458f8c068ca3e8a4dfa309d1dd5d34e2a4b68e6f4338835e5e0fb4317c9e4c7e4806dafda3ef459cd563775a586dd91b1319f72621bf3f00000080b8147e74d8c45e6318c37731b8b33b984a795b3653c2cd1d65cc99efe097cb7eb2fa49569bab5aab6e8a1c261a27d0f7840a5e80b317e6683042b59b6dceca2879c6ffc877a465be690c15e4a42f9a7588e79b10faac11b1ce3741fcef7aba8ce05327a2c16d279ee1b3d77eb783fb10e3356caa25635331e26dd42b8396c4d00000001420bec691fea37ecea58a5c717142f0b804452f57" + +var aliceFingerprintHex = "0bb01c360424522e94ee9c346ce877a1a4288b2f" + +var bobPrivateKeyHex = "000000000080a5138eb3d3eb9c1d85716faecadb718f87d31aaed1157671d7fee7e488f95e8e0ba60ad449ec732710a7dec5190f7182af2e2f98312d98497221dff160fd68033dd4f3a33b7c078d0d9f66e26847e76ca7447d4bab35486045090572863d9e4454777f24d6706f63e02548dfec2d0a620af37bbc1d24f884708a212c343b480d00000014e9c58f0ea21a5e4dfd9f44b6a9f7f6a9961a8fa9000000803c4d111aebd62d3c50c2889d420a32cdf1e98b70affcc1fcf44d59cca2eb019f6b774ef88153fb9b9615441a5fe25ea2d11b74ce922ca0232bd81b3c0fcac2a95b20cb6e6c0c5c1ace2e26f65dc43c751af0edbb10d669890e8ab6beea91410b8b2187af1a8347627a06ecea7e0f772c28aae9461301e83884860c9b656c722f0000008065af8625a555ea0e008cd04743671a3cda21162e83af045725db2eb2bb52712708dc0cc1a84c08b3649b88a966974bde27d8612c2861792ec9f08786a246fcadd6d8d3a81a32287745f309238f47618c2bd7612cb8b02d940571e0f30b96420bcd462ff542901b46109b1e5ad6423744448d20a57818a8cbb1647d0fea3b664e0000001440f9f2eb554cb00d45a5826b54bfa419b6980e48" + +func TestKeySerialization(t *testing.T) { + var priv PrivateKey + alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex) + rest, ok := priv.Parse(alicePrivateKey) + if !ok { + t.Error("failed to parse private key") + } + if len(rest) > 0 { + t.Error("data remaining after parsing private key") + } + + out := priv.Serialize(nil) + if !bytes.Equal(alicePrivateKey, out) { + t.Errorf("serialization (%x) is not equal to original (%x)", out, alicePrivateKey) + } + + aliceFingerprint, _ := hex.DecodeString(aliceFingerprintHex) + fingerprint := priv.PublicKey.Fingerprint() + if !bytes.Equal(aliceFingerprint, fingerprint) { + t.Errorf("fingerprint (%x) is not equal to expected value (%x)", fingerprint, aliceFingerprint) + } +} + +const libOTRPrivateKey = `(privkeys + (account +(name "foo@example.com") +(protocol prpl-jabber) +(private-key + (dsa + (p #00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB8C031D3561FECEE72EBB4A090D450A9B7A857#) + (q #00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) + (g #535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) + (y #0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A3C0FF501E3DC673B76D7BABF349009B6ECF#) + (x #14D0345A3562C480A039E3C72764F72D79043216#) + ) + ) + ) +)` + +func TestParseLibOTRPrivateKey(t *testing.T) { + var priv PrivateKey + + if !priv.Import([]byte(libOTRPrivateKey)) { + t.Fatalf("Failed to import sample private key") + } +} + +func TestSignVerify(t *testing.T) { + var priv PrivateKey + alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex) + _, ok := priv.Parse(alicePrivateKey) + if !ok { + t.Error("failed to parse private key") + } + + var msg [32]byte + rand.Reader.Read(msg[:]) + + sig := priv.Sign(rand.Reader, msg[:]) + rest, ok := priv.PublicKey.Verify(msg[:], sig) + if !ok { + t.Errorf("signature (%x) of %x failed to verify", sig, msg[:]) + } else if len(rest) > 0 { + t.Error("signature data remains after verification") + } + + sig[10] ^= 80 + _, ok = priv.PublicKey.Verify(msg[:], sig) + if ok { + t.Errorf("corrupted signature (%x) of %x verified", sig, msg[:]) + } +} + +func setupConversation(t *testing.T) (alice, bob *Conversation) { + alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex) + bobPrivateKey, _ := hex.DecodeString(bobPrivateKeyHex) + + alice, bob = new(Conversation), new(Conversation) + + alice.PrivateKey = new(PrivateKey) + bob.PrivateKey = new(PrivateKey) + alice.PrivateKey.Parse(alicePrivateKey) + bob.PrivateKey.Parse(bobPrivateKey) + alice.FragmentSize = 100 + bob.FragmentSize = 100 + + if alice.IsEncrypted() { + t.Error("Alice believes that the conversation is secure before we've started") + } + if bob.IsEncrypted() { + t.Error("Bob believes that the conversation is secure before we've started") + } + + performHandshake(t, alice, bob) + return alice, bob +} + +func performHandshake(t *testing.T, alice, bob *Conversation) { + var alicesMessage, bobsMessage [][]byte + var out []byte + var aliceChange, bobChange SecurityChange + var err error + alicesMessage = append(alicesMessage, []byte(QueryMessage)) + + for round := 0; len(alicesMessage) > 0 || len(bobsMessage) > 0; round++ { + bobsMessage = nil + for i, msg := range alicesMessage { + out, _, bobChange, bobsMessage, err = bob.Receive(msg) + if len(out) > 0 { + t.Errorf("Bob generated output during key exchange, round %d, message %d", round, i) + } + if err != nil { + t.Fatalf("Bob returned an error, round %d, message %d (%x): %s", round, i, msg, err) + } + if len(bobsMessage) > 0 && i != len(alicesMessage)-1 { + t.Errorf("Bob produced output while processing a fragment, round %d, message %d", round, i) + } + } + + alicesMessage = nil + for i, msg := range bobsMessage { + out, _, aliceChange, alicesMessage, err = alice.Receive(msg) + if len(out) > 0 { + t.Errorf("Alice generated output during key exchange, round %d, message %d", round, i) + } + if err != nil { + t.Fatalf("Alice returned an error, round %d, message %d (%x): %s", round, i, msg, err) + } + if len(alicesMessage) > 0 && i != len(bobsMessage)-1 { + t.Errorf("Alice produced output while processing a fragment, round %d, message %d", round, i) + } + } + } + + if aliceChange != NewKeys { + t.Errorf("Alice terminated without signaling new keys") + } + if bobChange != NewKeys { + t.Errorf("Bob terminated without signaling new keys") + } + + if !bytes.Equal(alice.SSID[:], bob.SSID[:]) { + t.Errorf("Session identifiers don't match. Alice has %x, Bob has %x", alice.SSID[:], bob.SSID[:]) + } + + if !alice.IsEncrypted() { + t.Error("Alice doesn't believe that the conversation is secure") + } + if !bob.IsEncrypted() { + t.Error("Bob doesn't believe that the conversation is secure") + } +} + +const ( + firstRoundTrip = iota + subsequentRoundTrip + noMACKeyCheck +) + +func roundTrip(t *testing.T, alice, bob *Conversation, message []byte, macKeyCheck int) { + alicesMessage, err := alice.Send(message) + if err != nil { + t.Errorf("Error from Alice sending message: %s", err) + } + + if len(alice.oldMACs) != 0 { + t.Errorf("Alice has not revealed all MAC keys") + } + + for i, msg := range alicesMessage { + out, encrypted, _, _, err := bob.Receive(msg) + + if err != nil { + t.Errorf("Error generated while processing test message: %s", err.Error()) + } + if len(out) > 0 { + if i != len(alicesMessage)-1 { + t.Fatal("Bob produced a message while processing a fragment of Alice's") + } + if !encrypted { + t.Errorf("Message was not marked as encrypted") + } + if !bytes.Equal(out, message) { + t.Errorf("Message corrupted: got %x, want %x", out, message) + } + } + } + + switch macKeyCheck { + case firstRoundTrip: + if len(bob.oldMACs) != 0 { + t.Errorf("Bob should not have MAC keys to reveal") + } + case subsequentRoundTrip: + if len(bob.oldMACs) != 40 { + t.Errorf("Bob has %d bytes of MAC keys to reveal, but should have 40", len(bob.oldMACs)) + } + } + + bobsMessage, err := bob.Send(message) + if err != nil { + t.Errorf("Error from Bob sending message: %s", err) + } + + if len(bob.oldMACs) != 0 { + t.Errorf("Bob has not revealed all MAC keys") + } + + for i, msg := range bobsMessage { + out, encrypted, _, _, err := alice.Receive(msg) + + if err != nil { + t.Errorf("Error generated while processing test message: %s", err.Error()) + } + if len(out) > 0 { + if i != len(bobsMessage)-1 { + t.Fatal("Alice produced a message while processing a fragment of Bob's") + } + if !encrypted { + t.Errorf("Message was not marked as encrypted") + } + if !bytes.Equal(out, message) { + t.Errorf("Message corrupted: got %x, want %x", out, message) + } + } + } + + switch macKeyCheck { + case firstRoundTrip: + if len(alice.oldMACs) != 20 { + t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 20", len(alice.oldMACs)) + } + case subsequentRoundTrip: + if len(alice.oldMACs) != 40 { + t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 40", len(alice.oldMACs)) + } + } +} + +func TestConversation(t *testing.T) { + alice, bob := setupConversation(t) + + var testMessages = [][]byte{ + []byte("hello"), []byte("bye"), + } + + roundTripType := firstRoundTrip + + for _, testMessage := range testMessages { + roundTrip(t, alice, bob, testMessage, roundTripType) + roundTripType = subsequentRoundTrip + } +} + +func TestGoodSMP(t *testing.T) { + var alice, bob Conversation + + alice.smp.secret = new(big.Int).SetInt64(42) + bob.smp.secret = alice.smp.secret + + var alicesMessages, bobsMessages []tlv + var aliceComplete, bobComplete bool + var err error + var out tlv + + alicesMessages = alice.startSMP("") + for round := 0; len(alicesMessages) > 0 || len(bobsMessages) > 0; round++ { + bobsMessages = bobsMessages[:0] + for i, msg := range alicesMessages { + out, bobComplete, err = bob.processSMP(msg) + if err != nil { + t.Errorf("Error from Bob in round %d: %s", round, err) + } + if bobComplete && i != len(alicesMessages)-1 { + t.Errorf("Bob returned a completed signal before processing all of Alice's messages in round %d", round) + } + if out.typ != 0 { + bobsMessages = append(bobsMessages, out) + } + } + + alicesMessages = alicesMessages[:0] + for i, msg := range bobsMessages { + out, aliceComplete, err = alice.processSMP(msg) + if err != nil { + t.Errorf("Error from Alice in round %d: %s", round, err) + } + if aliceComplete && i != len(bobsMessages)-1 { + t.Errorf("Alice returned a completed signal before processing all of Bob's messages in round %d", round) + } + if out.typ != 0 { + alicesMessages = append(alicesMessages, out) + } + } + } + + if !aliceComplete || !bobComplete { + t.Errorf("SMP completed without both sides reporting success: alice: %v, bob: %v\n", aliceComplete, bobComplete) + } +} + +func TestBadSMP(t *testing.T) { + var alice, bob Conversation + + alice.smp.secret = new(big.Int).SetInt64(42) + bob.smp.secret = new(big.Int).SetInt64(43) + + var alicesMessages, bobsMessages []tlv + + alicesMessages = alice.startSMP("") + for round := 0; len(alicesMessages) > 0 || len(bobsMessages) > 0; round++ { + bobsMessages = bobsMessages[:0] + for _, msg := range alicesMessages { + out, complete, _ := bob.processSMP(msg) + if complete { + t.Errorf("Bob signaled completion in round %d", round) + } + if out.typ != 0 { + bobsMessages = append(bobsMessages, out) + } + } + + alicesMessages = alicesMessages[:0] + for _, msg := range bobsMessages { + out, complete, _ := alice.processSMP(msg) + if complete { + t.Errorf("Alice signaled completion in round %d", round) + } + if out.typ != 0 { + alicesMessages = append(alicesMessages, out) + } + } + } +} + +func TestRehandshaking(t *testing.T) { + alice, bob := setupConversation(t) + roundTrip(t, alice, bob, []byte("test"), firstRoundTrip) + roundTrip(t, alice, bob, []byte("test 2"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 3"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 4"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 5"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 6"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 7"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 8"), subsequentRoundTrip) + performHandshake(t, alice, bob) + roundTrip(t, alice, bob, []byte("test"), noMACKeyCheck) + roundTrip(t, alice, bob, []byte("test 2"), noMACKeyCheck) +} + +func TestAgainstLibOTR(t *testing.T) { + // This test requires otr.c.test to be built as /tmp/a.out. + // If enabled, this tests runs forever performing OTR handshakes in a + // loop. + return + + alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex) + var alice Conversation + alice.PrivateKey = new(PrivateKey) + alice.PrivateKey.Parse(alicePrivateKey) + + cmd := exec.Command("/tmp/a.out") + cmd.Stderr = os.Stderr + + out, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + defer out.Close() + stdout, err := cmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + in := bufio.NewReader(stdout) + + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + out.Write([]byte(QueryMessage)) + out.Write([]byte("\n")) + var expectedText = []byte("test message") + + for { + line, isPrefix, err := in.ReadLine() + if isPrefix { + t.Fatal("line from subprocess too long") + } + if err != nil { + t.Fatal(err) + } + text, encrypted, change, alicesMessage, err := alice.Receive(line) + if err != nil { + t.Fatal(err) + } + for _, msg := range alicesMessage { + out.Write(msg) + out.Write([]byte("\n")) + } + if change == NewKeys { + alicesMessage, err := alice.Send([]byte("Go -> libotr test message")) + if err != nil { + t.Fatalf("error sending message: %s", err.Error()) + } else { + for _, msg := range alicesMessage { + out.Write(msg) + out.Write([]byte("\n")) + } + } + } + if len(text) > 0 { + if !bytes.Equal(text, expectedText) { + t.Fatalf("expected %x, but got %x", expectedText, text) + } + if !encrypted { + t.Fatal("message wasn't encrypted") + } + } + } +} diff --git a/vendor/golang.org/x/crypto/otr/smp.go b/vendor/golang.org/x/crypto/otr/smp.go new file mode 100644 index 0000000..dc6de4e --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/smp.go @@ -0,0 +1,572 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the Socialist Millionaires Protocol as described in +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html. The protocol +// specification is required in order to understand this code and, where +// possible, the variable names in the code match up with the spec. + +package otr + +import ( + "bytes" + "crypto/sha256" + "errors" + "hash" + "math/big" +) + +type smpFailure string + +func (s smpFailure) Error() string { + return string(s) +} + +var smpFailureError = smpFailure("otr: SMP protocol failed") +var smpSecretMissingError = smpFailure("otr: mutual secret needed") + +const smpVersion = 1 + +const ( + smpState1 = iota + smpState2 + smpState3 + smpState4 +) + +type smpState struct { + state int + a2, a3, b2, b3, pb, qb *big.Int + g2a, g3a *big.Int + g2, g3 *big.Int + g3b, papb, qaqb, ra *big.Int + saved *tlv + secret *big.Int + question string +} + +func (c *Conversation) startSMP(question string) (tlvs []tlv) { + if c.smp.state != smpState1 { + tlvs = append(tlvs, c.generateSMPAbort()) + } + tlvs = append(tlvs, c.generateSMP1(question)) + c.smp.question = "" + c.smp.state = smpState2 + return +} + +func (c *Conversation) resetSMP() { + c.smp.state = smpState1 + c.smp.secret = nil + c.smp.question = "" +} + +func (c *Conversation) processSMP(in tlv) (out tlv, complete bool, err error) { + data := in.data + + switch in.typ { + case tlvTypeSMPAbort: + if c.smp.state != smpState1 { + err = smpFailureError + } + c.resetSMP() + return + case tlvTypeSMP1WithQuestion: + // We preprocess this into a SMP1 message. + nulPos := bytes.IndexByte(data, 0) + if nulPos == -1 { + err = errors.New("otr: SMP message with question didn't contain a NUL byte") + return + } + c.smp.question = string(data[:nulPos]) + data = data[nulPos+1:] + } + + numMPIs, data, ok := getU32(data) + if !ok || numMPIs > 20 { + err = errors.New("otr: corrupt SMP message") + return + } + + mpis := make([]*big.Int, numMPIs) + for i := range mpis { + var ok bool + mpis[i], data, ok = getMPI(data) + if !ok { + err = errors.New("otr: corrupt SMP message") + return + } + } + + switch in.typ { + case tlvTypeSMP1, tlvTypeSMP1WithQuestion: + if c.smp.state != smpState1 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if c.smp.secret == nil { + err = smpSecretMissingError + return + } + if err = c.processSMP1(mpis); err != nil { + return + } + c.smp.state = smpState3 + out = c.generateSMP2() + case tlvTypeSMP2: + if c.smp.state != smpState2 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if out, err = c.processSMP2(mpis); err != nil { + out = c.generateSMPAbort() + return + } + c.smp.state = smpState4 + case tlvTypeSMP3: + if c.smp.state != smpState3 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if out, err = c.processSMP3(mpis); err != nil { + return + } + c.smp.state = smpState1 + c.smp.secret = nil + complete = true + case tlvTypeSMP4: + if c.smp.state != smpState4 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if err = c.processSMP4(mpis); err != nil { + out = c.generateSMPAbort() + return + } + c.smp.state = smpState1 + c.smp.secret = nil + complete = true + default: + panic("unknown SMP message") + } + + return +} + +func (c *Conversation) calcSMPSecret(mutualSecret []byte, weStarted bool) { + h := sha256.New() + h.Write([]byte{smpVersion}) + if weStarted { + h.Write(c.PrivateKey.PublicKey.Fingerprint()) + h.Write(c.TheirPublicKey.Fingerprint()) + } else { + h.Write(c.TheirPublicKey.Fingerprint()) + h.Write(c.PrivateKey.PublicKey.Fingerprint()) + } + h.Write(c.SSID[:]) + h.Write(mutualSecret) + c.smp.secret = new(big.Int).SetBytes(h.Sum(nil)) +} + +func (c *Conversation) generateSMP1(question string) tlv { + var randBuf [16]byte + c.smp.a2 = c.randMPI(randBuf[:]) + c.smp.a3 = c.randMPI(randBuf[:]) + g2a := new(big.Int).Exp(g, c.smp.a2, p) + g3a := new(big.Int).Exp(g, c.smp.a3, p) + h := sha256.New() + + r2 := c.randMPI(randBuf[:]) + r := new(big.Int).Exp(g, r2, p) + c2 := new(big.Int).SetBytes(hashMPIs(h, 1, r)) + d2 := new(big.Int).Mul(c.smp.a2, c2) + d2.Sub(r2, d2) + d2.Mod(d2, q) + if d2.Sign() < 0 { + d2.Add(d2, q) + } + + r3 := c.randMPI(randBuf[:]) + r.Exp(g, r3, p) + c3 := new(big.Int).SetBytes(hashMPIs(h, 2, r)) + d3 := new(big.Int).Mul(c.smp.a3, c3) + d3.Sub(r3, d3) + d3.Mod(d3, q) + if d3.Sign() < 0 { + d3.Add(d3, q) + } + + var ret tlv + if len(question) > 0 { + ret.typ = tlvTypeSMP1WithQuestion + ret.data = append(ret.data, question...) + ret.data = append(ret.data, 0) + } else { + ret.typ = tlvTypeSMP1 + } + ret.data = appendU32(ret.data, 6) + ret.data = appendMPIs(ret.data, g2a, c2, d2, g3a, c3, d3) + return ret +} + +func (c *Conversation) processSMP1(mpis []*big.Int) error { + if len(mpis) != 6 { + return errors.New("otr: incorrect number of arguments in SMP1 message") + } + g2a := mpis[0] + c2 := mpis[1] + d2 := mpis[2] + g3a := mpis[3] + c3 := mpis[4] + d3 := mpis[5] + h := sha256.New() + + r := new(big.Int).Exp(g, d2, p) + s := new(big.Int).Exp(g2a, c2, p) + r.Mul(r, s) + r.Mod(r, p) + t := new(big.Int).SetBytes(hashMPIs(h, 1, r)) + if c2.Cmp(t) != 0 { + return errors.New("otr: ZKP c2 incorrect in SMP1 message") + } + r.Exp(g, d3, p) + s.Exp(g3a, c3, p) + r.Mul(r, s) + r.Mod(r, p) + t.SetBytes(hashMPIs(h, 2, r)) + if c3.Cmp(t) != 0 { + return errors.New("otr: ZKP c3 incorrect in SMP1 message") + } + + c.smp.g2a = g2a + c.smp.g3a = g3a + return nil +} + +func (c *Conversation) generateSMP2() tlv { + var randBuf [16]byte + b2 := c.randMPI(randBuf[:]) + c.smp.b3 = c.randMPI(randBuf[:]) + r2 := c.randMPI(randBuf[:]) + r3 := c.randMPI(randBuf[:]) + r4 := c.randMPI(randBuf[:]) + r5 := c.randMPI(randBuf[:]) + r6 := c.randMPI(randBuf[:]) + + g2b := new(big.Int).Exp(g, b2, p) + g3b := new(big.Int).Exp(g, c.smp.b3, p) + + r := new(big.Int).Exp(g, r2, p) + h := sha256.New() + c2 := new(big.Int).SetBytes(hashMPIs(h, 3, r)) + d2 := new(big.Int).Mul(b2, c2) + d2.Sub(r2, d2) + d2.Mod(d2, q) + if d2.Sign() < 0 { + d2.Add(d2, q) + } + + r.Exp(g, r3, p) + c3 := new(big.Int).SetBytes(hashMPIs(h, 4, r)) + d3 := new(big.Int).Mul(c.smp.b3, c3) + d3.Sub(r3, d3) + d3.Mod(d3, q) + if d3.Sign() < 0 { + d3.Add(d3, q) + } + + c.smp.g2 = new(big.Int).Exp(c.smp.g2a, b2, p) + c.smp.g3 = new(big.Int).Exp(c.smp.g3a, c.smp.b3, p) + c.smp.pb = new(big.Int).Exp(c.smp.g3, r4, p) + c.smp.qb = new(big.Int).Exp(g, r4, p) + r.Exp(c.smp.g2, c.smp.secret, p) + c.smp.qb.Mul(c.smp.qb, r) + c.smp.qb.Mod(c.smp.qb, p) + + s := new(big.Int) + s.Exp(c.smp.g2, r6, p) + r.Exp(g, r5, p) + s.Mul(r, s) + s.Mod(s, p) + r.Exp(c.smp.g3, r5, p) + cp := new(big.Int).SetBytes(hashMPIs(h, 5, r, s)) + + // D5 = r5 - r4 cP mod q and D6 = r6 - y cP mod q + + s.Mul(r4, cp) + r.Sub(r5, s) + d5 := new(big.Int).Mod(r, q) + if d5.Sign() < 0 { + d5.Add(d5, q) + } + + s.Mul(c.smp.secret, cp) + r.Sub(r6, s) + d6 := new(big.Int).Mod(r, q) + if d6.Sign() < 0 { + d6.Add(d6, q) + } + + var ret tlv + ret.typ = tlvTypeSMP2 + ret.data = appendU32(ret.data, 11) + ret.data = appendMPIs(ret.data, g2b, c2, d2, g3b, c3, d3, c.smp.pb, c.smp.qb, cp, d5, d6) + return ret +} + +func (c *Conversation) processSMP2(mpis []*big.Int) (out tlv, err error) { + if len(mpis) != 11 { + err = errors.New("otr: incorrect number of arguments in SMP2 message") + return + } + g2b := mpis[0] + c2 := mpis[1] + d2 := mpis[2] + g3b := mpis[3] + c3 := mpis[4] + d3 := mpis[5] + pb := mpis[6] + qb := mpis[7] + cp := mpis[8] + d5 := mpis[9] + d6 := mpis[10] + h := sha256.New() + + r := new(big.Int).Exp(g, d2, p) + s := new(big.Int).Exp(g2b, c2, p) + r.Mul(r, s) + r.Mod(r, p) + s.SetBytes(hashMPIs(h, 3, r)) + if c2.Cmp(s) != 0 { + err = errors.New("otr: ZKP c2 failed in SMP2 message") + return + } + + r.Exp(g, d3, p) + s.Exp(g3b, c3, p) + r.Mul(r, s) + r.Mod(r, p) + s.SetBytes(hashMPIs(h, 4, r)) + if c3.Cmp(s) != 0 { + err = errors.New("otr: ZKP c3 failed in SMP2 message") + return + } + + c.smp.g2 = new(big.Int).Exp(g2b, c.smp.a2, p) + c.smp.g3 = new(big.Int).Exp(g3b, c.smp.a3, p) + + r.Exp(g, d5, p) + s.Exp(c.smp.g2, d6, p) + r.Mul(r, s) + s.Exp(qb, cp, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, d5, p) + t := new(big.Int).Exp(pb, cp, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 5, s, r)) + if cp.Cmp(t) != 0 { + err = errors.New("otr: ZKP cP failed in SMP2 message") + return + } + + var randBuf [16]byte + r4 := c.randMPI(randBuf[:]) + r5 := c.randMPI(randBuf[:]) + r6 := c.randMPI(randBuf[:]) + r7 := c.randMPI(randBuf[:]) + + pa := new(big.Int).Exp(c.smp.g3, r4, p) + r.Exp(c.smp.g2, c.smp.secret, p) + qa := new(big.Int).Exp(g, r4, p) + qa.Mul(qa, r) + qa.Mod(qa, p) + + r.Exp(g, r5, p) + s.Exp(c.smp.g2, r6, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, r5, p) + cp.SetBytes(hashMPIs(h, 6, s, r)) + + r.Mul(r4, cp) + d5 = new(big.Int).Sub(r5, r) + d5.Mod(d5, q) + if d5.Sign() < 0 { + d5.Add(d5, q) + } + + r.Mul(c.smp.secret, cp) + d6 = new(big.Int).Sub(r6, r) + d6.Mod(d6, q) + if d6.Sign() < 0 { + d6.Add(d6, q) + } + + r.ModInverse(qb, p) + qaqb := new(big.Int).Mul(qa, r) + qaqb.Mod(qaqb, p) + + ra := new(big.Int).Exp(qaqb, c.smp.a3, p) + r.Exp(qaqb, r7, p) + s.Exp(g, r7, p) + cr := new(big.Int).SetBytes(hashMPIs(h, 7, s, r)) + + r.Mul(c.smp.a3, cr) + d7 := new(big.Int).Sub(r7, r) + d7.Mod(d7, q) + if d7.Sign() < 0 { + d7.Add(d7, q) + } + + c.smp.g3b = g3b + c.smp.qaqb = qaqb + + r.ModInverse(pb, p) + c.smp.papb = new(big.Int).Mul(pa, r) + c.smp.papb.Mod(c.smp.papb, p) + c.smp.ra = ra + + out.typ = tlvTypeSMP3 + out.data = appendU32(out.data, 8) + out.data = appendMPIs(out.data, pa, qa, cp, d5, d6, ra, cr, d7) + return +} + +func (c *Conversation) processSMP3(mpis []*big.Int) (out tlv, err error) { + if len(mpis) != 8 { + err = errors.New("otr: incorrect number of arguments in SMP3 message") + return + } + pa := mpis[0] + qa := mpis[1] + cp := mpis[2] + d5 := mpis[3] + d6 := mpis[4] + ra := mpis[5] + cr := mpis[6] + d7 := mpis[7] + h := sha256.New() + + r := new(big.Int).Exp(g, d5, p) + s := new(big.Int).Exp(c.smp.g2, d6, p) + r.Mul(r, s) + s.Exp(qa, cp, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, d5, p) + t := new(big.Int).Exp(pa, cp, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 6, s, r)) + if t.Cmp(cp) != 0 { + err = errors.New("otr: ZKP cP failed in SMP3 message") + return + } + + r.ModInverse(c.smp.qb, p) + qaqb := new(big.Int).Mul(qa, r) + qaqb.Mod(qaqb, p) + + r.Exp(qaqb, d7, p) + s.Exp(ra, cr, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(g, d7, p) + t.Exp(c.smp.g3a, cr, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 7, s, r)) + if t.Cmp(cr) != 0 { + err = errors.New("otr: ZKP cR failed in SMP3 message") + return + } + + var randBuf [16]byte + r7 := c.randMPI(randBuf[:]) + rb := new(big.Int).Exp(qaqb, c.smp.b3, p) + + r.Exp(qaqb, r7, p) + s.Exp(g, r7, p) + cr = new(big.Int).SetBytes(hashMPIs(h, 8, s, r)) + + r.Mul(c.smp.b3, cr) + d7 = new(big.Int).Sub(r7, r) + d7.Mod(d7, q) + if d7.Sign() < 0 { + d7.Add(d7, q) + } + + out.typ = tlvTypeSMP4 + out.data = appendU32(out.data, 3) + out.data = appendMPIs(out.data, rb, cr, d7) + + r.ModInverse(c.smp.pb, p) + r.Mul(pa, r) + r.Mod(r, p) + s.Exp(ra, c.smp.b3, p) + if r.Cmp(s) != 0 { + err = smpFailureError + } + + return +} + +func (c *Conversation) processSMP4(mpis []*big.Int) error { + if len(mpis) != 3 { + return errors.New("otr: incorrect number of arguments in SMP4 message") + } + rb := mpis[0] + cr := mpis[1] + d7 := mpis[2] + h := sha256.New() + + r := new(big.Int).Exp(c.smp.qaqb, d7, p) + s := new(big.Int).Exp(rb, cr, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(g, d7, p) + t := new(big.Int).Exp(c.smp.g3b, cr, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 8, s, r)) + if t.Cmp(cr) != 0 { + return errors.New("otr: ZKP cR failed in SMP4 message") + } + + r.Exp(rb, c.smp.a3, p) + if r.Cmp(c.smp.papb) != 0 { + return smpFailureError + } + + return nil +} + +func (c *Conversation) generateSMPAbort() tlv { + return tlv{typ: tlvTypeSMPAbort} +} + +func hashMPIs(h hash.Hash, magic byte, mpis ...*big.Int) []byte { + if h != nil { + h.Reset() + } else { + h = sha256.New() + } + + h.Write([]byte{magic}) + for _, mpi := range mpis { + h.Write(appendMPI(nil, mpi)) + } + return h.Sum(nil) +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000..593f653 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go new file mode 100644 index 0000000..f83cb69 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go @@ -0,0 +1,176 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pbkdf2 + +import ( + "bytes" + "crypto/sha1" + "crypto/sha256" + "hash" + "testing" +) + +type testVector struct { + password string + salt string + iter int + output []byte +} + +// Test vectors from RFC 6070, http://tools.ietf.org/html/rfc6070 +var sha1TestVectors = []testVector{ + { + "password", + "salt", + 1, + []byte{ + 0x0c, 0x60, 0xc8, 0x0f, 0x96, 0x1f, 0x0e, 0x71, + 0xf3, 0xa9, 0xb5, 0x24, 0xaf, 0x60, 0x12, 0x06, + 0x2f, 0xe0, 0x37, 0xa6, + }, + }, + { + "password", + "salt", + 2, + []byte{ + 0xea, 0x6c, 0x01, 0x4d, 0xc7, 0x2d, 0x6f, 0x8c, + 0xcd, 0x1e, 0xd9, 0x2a, 0xce, 0x1d, 0x41, 0xf0, + 0xd8, 0xde, 0x89, 0x57, + }, + }, + { + "password", + "salt", + 4096, + []byte{ + 0x4b, 0x00, 0x79, 0x01, 0xb7, 0x65, 0x48, 0x9a, + 0xbe, 0xad, 0x49, 0xd9, 0x26, 0xf7, 0x21, 0xd0, + 0x65, 0xa4, 0x29, 0xc1, + }, + }, + // // This one takes too long + // { + // "password", + // "salt", + // 16777216, + // []byte{ + // 0xee, 0xfe, 0x3d, 0x61, 0xcd, 0x4d, 0xa4, 0xe4, + // 0xe9, 0x94, 0x5b, 0x3d, 0x6b, 0xa2, 0x15, 0x8c, + // 0x26, 0x34, 0xe9, 0x84, + // }, + // }, + { + "passwordPASSWORDpassword", + "saltSALTsaltSALTsaltSALTsaltSALTsalt", + 4096, + []byte{ + 0x3d, 0x2e, 0xec, 0x4f, 0xe4, 0x1c, 0x84, 0x9b, + 0x80, 0xc8, 0xd8, 0x36, 0x62, 0xc0, 0xe4, 0x4a, + 0x8b, 0x29, 0x1a, 0x96, 0x4c, 0xf2, 0xf0, 0x70, + 0x38, + }, + }, + { + "pass\000word", + "sa\000lt", + 4096, + []byte{ + 0x56, 0xfa, 0x6a, 0xa7, 0x55, 0x48, 0x09, 0x9d, + 0xcc, 0x37, 0xd7, 0xf0, 0x34, 0x25, 0xe0, 0xc3, + }, + }, +} + +// Test vectors from +// http://stackoverflow.com/questions/5130513/pbkdf2-hmac-sha2-test-vectors +var sha256TestVectors = []testVector{ + { + "password", + "salt", + 1, + []byte{ + 0x12, 0x0f, 0xb6, 0xcf, 0xfc, 0xf8, 0xb3, 0x2c, + 0x43, 0xe7, 0x22, 0x52, 0x56, 0xc4, 0xf8, 0x37, + 0xa8, 0x65, 0x48, 0xc9, + }, + }, + { + "password", + "salt", + 2, + []byte{ + 0xae, 0x4d, 0x0c, 0x95, 0xaf, 0x6b, 0x46, 0xd3, + 0x2d, 0x0a, 0xdf, 0xf9, 0x28, 0xf0, 0x6d, 0xd0, + 0x2a, 0x30, 0x3f, 0x8e, + }, + }, + { + "password", + "salt", + 4096, + []byte{ + 0xc5, 0xe4, 0x78, 0xd5, 0x92, 0x88, 0xc8, 0x41, + 0xaa, 0x53, 0x0d, 0xb6, 0x84, 0x5c, 0x4c, 0x8d, + 0x96, 0x28, 0x93, 0xa0, + }, + }, + { + "passwordPASSWORDpassword", + "saltSALTsaltSALTsaltSALTsaltSALTsalt", + 4096, + []byte{ + 0x34, 0x8c, 0x89, 0xdb, 0xcb, 0xd3, 0x2b, 0x2f, + 0x32, 0xd8, 0x14, 0xb8, 0x11, 0x6e, 0x84, 0xcf, + 0x2b, 0x17, 0x34, 0x7e, 0xbc, 0x18, 0x00, 0x18, + 0x1c, + }, + }, + { + "pass\000word", + "sa\000lt", + 4096, + []byte{ + 0x89, 0xb6, 0x9d, 0x05, 0x16, 0xf8, 0x29, 0x89, + 0x3c, 0x69, 0x62, 0x26, 0x65, 0x0a, 0x86, 0x87, + }, + }, +} + +func testHash(t *testing.T, h func() hash.Hash, hashName string, vectors []testVector) { + for i, v := range vectors { + o := Key([]byte(v.password), []byte(v.salt), v.iter, len(v.output), h) + if !bytes.Equal(o, v.output) { + t.Errorf("%s %d: expected %x, got %x", hashName, i, v.output, o) + } + } +} + +func TestWithHMACSHA1(t *testing.T) { + testHash(t, sha1.New, "SHA1", sha1TestVectors) +} + +func TestWithHMACSHA256(t *testing.T) { + testHash(t, sha256.New, "SHA256", sha256TestVectors) +} + +var sink uint8 + +func benchmark(b *testing.B, h func() hash.Hash) { + password := make([]byte, h().Size()) + salt := make([]byte, 8) + for i := 0; i < b.N; i++ { + password = Key(password, salt, 4096, len(password), h) + } + sink += password[0] +} + +func BenchmarkHMACSHA1(b *testing.B) { + benchmark(b, sha1.New) +} + +func BenchmarkHMACSHA256(b *testing.B) { + benchmark(b, sha256.New) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 0000000..233b8b6 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go new file mode 100644 index 0000000..7fca55f --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go @@ -0,0 +1,63 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +var bmpStringTests = []struct { + in string + expectedHex string + shouldFail bool +}{ + {"", "0000", false}, + // Example from https://tools.ietf.org/html/rfc7292#appendix-B. + {"Beavis", "0042006500610076006900730000", false}, + // Some characters from the "Letterlike Symbols Unicode block". + {"\u2115 - Double-struck N", "21150020002d00200044006f00750062006c0065002d00730074007200750063006b0020004e0000", false}, + // any character outside the BMP should trigger an error. + {"\U0001f000 East wind (Mahjong)", "", true}, +} + +func TestBMPString(t *testing.T) { + for i, test := range bmpStringTests { + expected, err := hex.DecodeString(test.expectedHex) + if err != nil { + t.Fatalf("#%d: failed to decode expectation", i) + } + + out, err := bmpString(test.in) + if err == nil && test.shouldFail { + t.Errorf("#%d: expected to fail, but produced %x", i, out) + continue + } + + if err != nil && !test.shouldFail { + t.Errorf("#%d: failed unexpectedly: %s", i, err) + continue + } + + if !test.shouldFail { + if !bytes.Equal(out, expected) { + t.Errorf("#%d: expected %s, got %x", i, test.expectedHex, out) + continue + } + + roundTrip, err := decodeBMPString(out) + if err != nil { + t.Errorf("#%d: decoding output gave an error: %s", i, err) + continue + } + + if roundTrip != test.in { + t.Errorf("#%d: decoding output resulted in %q, but it should have been %q", i, roundTrip, test.in) + continue + } + } + } +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 0000000..484ca51 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto_test.go b/vendor/golang.org/x/crypto/pkcs12/crypto_test.go new file mode 100644 index 0000000..eb4dae8 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto_test.go @@ -0,0 +1,125 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/x509/pkix" + "encoding/asn1" + "testing" +) + +var sha1WithTripleDES = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + +func TestPbDecrypterFor(t *testing.T) { + params, _ := asn1.Marshal(pbeParams{ + Salt: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + Iterations: 2048, + }) + alg := pkix.AlgorithmIdentifier{ + Algorithm: asn1.ObjectIdentifier([]int{1, 2, 3}), + Parameters: asn1.RawValue{ + FullBytes: params, + }, + } + + pass, _ := bmpString("Sesame open") + + _, _, err := pbDecrypterFor(alg, pass) + if _, ok := err.(NotImplementedError); !ok { + t.Errorf("expected not implemented error, got: %T %s", err, err) + } + + alg.Algorithm = sha1WithTripleDES + cbc, blockSize, err := pbDecrypterFor(alg, pass) + if err != nil { + t.Errorf("unexpected error from pbDecrypterFor %v", err) + } + if blockSize != 8 { + t.Errorf("unexpected block size %d, wanted 8", blockSize) + } + + plaintext := []byte{1, 2, 3, 4, 5, 6, 7, 8} + expectedCiphertext := []byte{185, 73, 135, 249, 137, 1, 122, 247} + ciphertext := make([]byte, len(plaintext)) + cbc.CryptBlocks(ciphertext, plaintext) + + if bytes.Compare(ciphertext, expectedCiphertext) != 0 { + t.Errorf("bad ciphertext, got %x but wanted %x", ciphertext, expectedCiphertext) + } +} + +var pbDecryptTests = []struct { + in []byte + expected []byte + expectedError error +}{ + { + []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\xa0\x9a\xdf\x5a\x58\xa0\xea\x46"), // 7 padding bytes + []byte("A secret!"), + nil, + }, + { + []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\x96\x24\x2f\x71\x7e\x32\x3f\xe7"), // 8 padding bytes + []byte("A secret"), + nil, + }, + { + []byte("\x35\x0c\xc0\x8d\xab\xa9\x5d\x30\x7f\x9a\xec\x6a\xd8\x9b\x9c\xd9"), // 9 padding bytes, incorrect + nil, + ErrDecryption, + }, + { + []byte("\xb2\xf9\x6e\x06\x60\xae\x20\xcf\x08\xa0\x7b\xd9\x6b\x20\xef\x41"), // incorrect padding bytes: [ ... 0x04 0x02 ] + nil, + ErrDecryption, + }, +} + +func TestPbDecrypt(t *testing.T) { + for i, test := range pbDecryptTests { + decryptable := testDecryptable{ + data: test.in, + algorithm: pkix.AlgorithmIdentifier{ + Algorithm: sha1WithTripleDES, + Parameters: pbeParams{ + Salt: []byte("\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8"), + Iterations: 4096, + }.RawASN1(), + }, + } + password, _ := bmpString("sesame") + + plaintext, err := pbDecrypt(decryptable, password) + if err != test.expectedError { + t.Errorf("#%d: got error %q, but wanted %q", i, err, test.expectedError) + continue + } + + if !bytes.Equal(plaintext, test.expected) { + t.Errorf("#%d: got %x, but wanted %x", i, plaintext, test.expected) + } + } +} + +type testDecryptable struct { + data []byte + algorithm pkix.AlgorithmIdentifier +} + +func (d testDecryptable) Algorithm() pkix.AlgorithmIdentifier { return d.algorithm } +func (d testDecryptable) Data() []byte { return d.data } + +func (params pbeParams) RawASN1() (raw asn1.RawValue) { + asn1Bytes, err := asn1.Marshal(params) + if err != nil { + panic(err) + } + _, err = asn1.Unmarshal(asn1Bytes, &raw) + if err != nil { + panic(err) + } + return +} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 0000000..7377ce6 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go new file mode 100644 index 0000000..3347f33 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rc2 + +import ( + "testing" +) + +func BenchmarkEncrypt(b *testing.B) { + r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) + b.ResetTimer() + var src [8]byte + for i := 0; i < b.N; i++ { + r.Encrypt(src[:], src[:]) + } +} + +func BenchmarkDecrypt(b *testing.B) { + r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) + b.ResetTimer() + var src [8]byte + for i := 0; i < b.N; i++ { + r.Decrypt(src[:], src[:]) + } +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 0000000..7499e3f --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,271 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go new file mode 100644 index 0000000..51a7efe --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go @@ -0,0 +1,92 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rc2 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +func TestEncryptDecrypt(t *testing.T) { + // TODO(dgryski): add the rest of the test vectors from the RFC + var tests = []struct { + key string + plain string + cipher string + t1 int + }{ + { + "0000000000000000", + "0000000000000000", + "ebb773f993278eff", + 63, + }, + { + "ffffffffffffffff", + "ffffffffffffffff", + "278b27e42e2f0d49", + 64, + }, + { + "3000000000000000", + "1000000000000001", + "30649edf9be7d2c2", + 64, + }, + { + "88", + "0000000000000000", + "61a8a244adacccf0", + 64, + }, + { + "88bca90e90875a", + "0000000000000000", + "6ccf4308974c267f", + 64, + }, + { + "88bca90e90875a7f0f79c384627bafb2", + "0000000000000000", + "1a807d272bbe5db1", + 64, + }, + { + "88bca90e90875a7f0f79c384627bafb2", + "0000000000000000", + "2269552ab0f85ca6", + 128, + }, + { + "88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e", + "0000000000000000", + "5b78d3a43dfff1f1", + 129, + }, + } + + for _, tt := range tests { + k, _ := hex.DecodeString(tt.key) + p, _ := hex.DecodeString(tt.plain) + c, _ := hex.DecodeString(tt.cipher) + + b, _ := New(k, tt.t1) + + var dst [8]byte + + b.Encrypt(dst[:], p) + + if !bytes.Equal(dst[:], c) { + t.Errorf("encrypt failed: got % 2x wanted % 2x\n", dst, c) + } + + b.Decrypt(dst[:], c) + + if !bytes.Equal(dst[:], p) { + t.Errorf("decrypt failed: got % 2x wanted % 2x\n", dst, p) + } + } +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 0000000..5f38aa7 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac_test.go b/vendor/golang.org/x/crypto/pkcs12/mac_test.go new file mode 100644 index 0000000..1ed4ff2 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac_test.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "encoding/asn1" + "testing" +) + +func TestVerifyMac(t *testing.T) { + td := macData{ + Mac: digestInfo{ + Digest: []byte{0x18, 0x20, 0x3d, 0xff, 0x1e, 0x16, 0xf4, 0x92, 0xf2, 0xaf, 0xc8, 0x91, 0xa9, 0xba, 0xd6, 0xca, 0x9d, 0xee, 0x51, 0x93}, + }, + MacSalt: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + Iterations: 2048, + } + + message := []byte{11, 12, 13, 14, 15} + password, _ := bmpString("") + + td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 2, 3}) + err := verifyMac(&td, message, password) + if _, ok := err.(NotImplementedError); !ok { + t.Errorf("err: %v", err) + } + + td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) + err = verifyMac(&td, message, password) + if err != ErrIncorrectPassword { + t.Errorf("Expected incorrect password, got err: %v", err) + } + + password, _ = bmpString("Sesame open") + err = verifyMac(&td, message, password) + if err != nil { + t.Errorf("err: %v", err) + } + +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 0000000..5c419d4 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go new file mode 100644 index 0000000..262037d --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go @@ -0,0 +1,34 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "testing" +) + +func TestThatPBKDFWorksCorrectlyForLongKeys(t *testing.T) { + cipherInfo := shaWithTripleDESCBC{} + + salt := []byte("\xff\xff\xff\xff\xff\xff\xff\xff") + password, _ := bmpString("sesame") + key := cipherInfo.deriveKey(salt, password, 2048) + + if expected := []byte("\x7c\xd9\xfd\x3e\x2b\x3b\xe7\x69\x1a\x44\xe3\xbe\xf0\xf9\xea\x0f\xb9\xb8\x97\xd4\xe3\x25\xd9\xd1"); bytes.Compare(key, expected) != 0 { + t.Fatalf("expected key '%x', but found '%x'", expected, key) + } +} + +func TestThatPBKDFHandlesLeadingZeros(t *testing.T) { + // This test triggers a case where I_j (in step 6C) ends up with leading zero + // byte, meaning that len(Ijb) < v (leading zeros get stripped by big.Int). + // This was previously causing bug whereby certain inputs would break the + // derivation and produce the wrong output. + key := pbkdf(sha1Sum, 20, 64, []byte("\xf3\x7e\x05\xb5\x18\x32\x4b\x4b"), []byte("\x00\x00"), 2048, 1, 24) + expected := []byte("\x00\xf7\x59\xff\x47\xd1\x4d\xd0\x36\x65\xd5\x94\x3c\xb3\xc4\xa3\x9a\x25\x55\xc0\x2a\xed\x66\xe1") + if bytes.Compare(key, expected) != 0 { + t.Fatalf("expected key '%x', but found '%x'", expected, key) + } +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 0000000..eff9ad3 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,346 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go new file mode 100644 index 0000000..14dd2a6 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go @@ -0,0 +1,138 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/rsa" + "crypto/tls" + "encoding/base64" + "encoding/pem" + "testing" +) + +func TestPfx(t *testing.T) { + for commonName, base64P12 := range testdata { + p12, _ := base64.StdEncoding.DecodeString(base64P12) + + priv, cert, err := Decode(p12, "") + if err != nil { + t.Fatal(err) + } + + if err := priv.(*rsa.PrivateKey).Validate(); err != nil { + t.Errorf("error while validating private key: %v", err) + } + + if cert.Subject.CommonName != commonName { + t.Errorf("expected common name to be %q, but found %q", commonName, cert.Subject.CommonName) + } + } +} + +func TestPEM(t *testing.T) { + for commonName, base64P12 := range testdata { + p12, _ := base64.StdEncoding.DecodeString(base64P12) + + blocks, err := ToPEM(p12, "") + if err != nil { + t.Fatalf("error while converting to PEM: %s", err) + } + + var pemData []byte + for _, b := range blocks { + pemData = append(pemData, pem.EncodeToMemory(b)...) + } + + cert, err := tls.X509KeyPair(pemData, pemData) + if err != nil { + t.Errorf("err while converting to key pair: %v", err) + } + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + config.BuildNameToCertificate() + + if _, exists := config.NameToCertificate[commonName]; !exists { + t.Errorf("did not find our cert in PEM?: %v", config.NameToCertificate) + } + } +} + +func ExampleToPEM() { + p12, _ := base64.StdEncoding.DecodeString(`MIIJzgIBAzCCCZQGCS ... CA+gwggPk==`) + + blocks, err := ToPEM(p12, "password") + if err != nil { + panic(err) + } + + var pemData []byte + for _, b := range blocks { + pemData = append(pemData, pem.EncodeToMemory(b)...) + } + + // then use PEM data for tls to construct tls certificate: + cert, err := tls.X509KeyPair(pemData, pemData) + if err != nil { + panic(err) + } + + config := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + _ = config +} + +var testdata = map[string]string{ + // 'null' password test case + "Windows Azure Tools": `MIIKDAIBAzCCCcwGCSqGSIb3DQEHAaCCCb0Eggm5MIIJtTCCBe4GCSqGSIb3DQEHAaCCBd8EggXbMIIF1zCCBdMGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAhStUNnlTGV+gICB9AEggTIJ81JIossF6boFWpPtkiQRPtI6DW6e9QD4/WvHAVrM2bKdpMzSMsCML5NyuddANTKHBVq00Jc9keqGNAqJPKkjhSUebzQFyhe0E1oI9T4zY5UKr/I8JclOeccH4QQnsySzYUG2SnniXnQ+JrG3juetli7EKth9h6jLc6xbubPadY5HMB3wL/eG/kJymiXwU2KQ9Mgd4X6jbcV+NNCE/8jbZHvSTCPeYTJIjxfeX61Sj5kFKUCzERbsnpyevhY3X0eYtEDezZQarvGmXtMMdzf8HJHkWRdk9VLDLgjk8uiJif/+X4FohZ37ig0CpgC2+dP4DGugaZZ51hb8tN9GeCKIsrmWogMXDIVd0OACBp/EjJVmFB6y0kUCXxUE0TZt0XA1tjAGJcjDUpBvTntZjPsnH/4ZySy+s2d9OOhJ6pzRQBRm360TzkFdSwk9DLiLdGfv4pwMMu/vNGBlqjP/1sQtj+jprJiD1sDbCl4AdQZVoMBQHadF2uSD4/o17XG/Ci0r2h6Htc2yvZMAbEY4zMjjIn2a+vqIxD6onexaek1R3zbkS9j19D6EN9EWn8xgz80YRCyW65znZk8xaIhhvlU/mg7sTxeyuqroBZNcq6uDaQTehDpyH7bY2l4zWRpoj10a6JfH2q5shYz8Y6UZC/kOTfuGqbZDNZWro/9pYquvNNW0M847E5t9bsf9VkAAMHRGBbWoVoU9VpI0UnoXSfvpOo+aXa2DSq5sHHUTVY7A9eov3z5IqT+pligx11xcs+YhDWcU8di3BTJisohKvv5Y8WSkm/rloiZd4ig269k0jTRk1olP/vCksPli4wKG2wdsd5o42nX1yL7mFfXocOANZbB+5qMkiwdyoQSk+Vq+C8nAZx2bbKhUq2MbrORGMzOe0Hh0x2a0PeObycN1Bpyv7Mp3ZI9h5hBnONKCnqMhtyQHUj/nNvbJUnDVYNfoOEqDiEqqEwB7YqWzAKz8KW0OIqdlM8uiQ4JqZZlFllnWJUfaiDrdFM3lYSnFQBkzeVlts6GpDOOBjCYd7dcCNS6kq6pZC6p6HN60Twu0JnurZD6RT7rrPkIGE8vAenFt4iGe/yF52fahCSY8Ws4K0UTwN7bAS+4xRHVCWvE8sMRZsRCHizb5laYsVrPZJhE6+hux6OBb6w8kwPYXc+ud5v6UxawUWgt6uPwl8mlAtU9Z7Miw4Nn/wtBkiLL/ke1UI1gqJtcQXgHxx6mzsjh41+nAgTvdbsSEyU6vfOmxGj3Rwc1eOrIhJUqn5YjOWfzzsz/D5DzWKmwXIwdspt1p+u+kol1N3f2wT9fKPnd/RGCb4g/1hc3Aju4DQYgGY782l89CEEdalpQ/35bQczMFk6Fje12HykakWEXd/bGm9Unh82gH84USiRpeOfQvBDYoqEyrY3zkFZzBjhDqa+jEcAj41tcGx47oSfDq3iVYCdL7HSIjtnyEktVXd7mISZLoMt20JACFcMw+mrbjlug+eU7o2GR7T+LwtOp/p4LZqyLa7oQJDwde1BNZtm3TCK2P1mW94QDL0nDUps5KLtr1DaZXEkRbjSJub2ZE9WqDHyU3KA8G84Tq/rN1IoNu/if45jacyPje1Npj9IftUZSP22nV7HMwZtwQ4P4MYHRMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFsGCSqGSIb3DQEJFDFOHkwAewBCADQAQQA0AEYARQBCADAALQBBADEAOABBAC0ANAA0AEIAQgAtAEIANQBGADIALQA0ADkAMQBFAEYAMQA1ADIAQgBBADEANgB9MF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAG8AZgB0AHcAYQByAGUAIABLAGUAeQAgAFMAdABvAHIAYQBnAGUAIABQAHIAbwB2AGkAZABlAHIwggO/BgkqhkiG9w0BBwagggOwMIIDrAIBADCCA6UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECEBk5ZAYpu0WAgIH0ICCA3hik4mQFGpw9Ha8TQPtk+j2jwWdxfF0+sTk6S8PTsEfIhB7wPltjiCK92Uv2tCBQnodBUmatIfkpnRDEySmgmdglmOCzj204lWAMRs94PoALGn3JVBXbO1vIDCbAPOZ7Z0Hd0/1t2hmk8v3//QJGUg+qr59/4y/MuVfIg4qfkPcC2QSvYWcK3oTf6SFi5rv9B1IOWFgN5D0+C+x/9Lb/myPYX+rbOHrwtJ4W1fWKoz9g7wwmGFA9IJ2DYGuH8ifVFbDFT1Vcgsvs8arSX7oBsJVW0qrP7XkuDRe3EqCmKW7rBEwYrFznhxZcRDEpMwbFoSvgSIZ4XhFY9VKYglT+JpNH5iDceYEBOQL4vBLpxNUk3l5jKaBNxVa14AIBxq18bVHJ+STInhLhad4u10v/Xbx7wIL3f9DX1yLAkPrpBYbNHS2/ew6H/ySDJnoIDxkw2zZ4qJ+qUJZ1S0lbZVG+VT0OP5uF6tyOSpbMlcGkdl3z254n6MlCrTifcwkzscysDsgKXaYQw06rzrPW6RDub+t+hXzGny799fS9jhQMLDmOggaQ7+LA4oEZsfT89HLMWxJYDqjo3gIfjciV2mV54R684qLDS+AO09U49e6yEbwGlq8lpmO/pbXCbpGbB1b3EomcQbxdWxW2WEkkEd/VBn81K4M3obmywwXJkw+tPXDXfBmzzaqqCR+onMQ5ME1nMkY8ybnfoCc1bDIupjVWsEL2Wvq752RgI6KqzVNr1ew1IdqV5AWN2fOfek+0vi3Jd9FHF3hx8JMwjJL9dZsETV5kHtYJtE7wJ23J68BnCt2eI0GEuwXcCf5EdSKN/xXCTlIokc4Qk/gzRdIZsvcEJ6B1lGovKG54X4IohikqTjiepjbsMWj38yxDmK3mtENZ9ci8FPfbbvIEcOCZIinuY3qFUlRSbx7VUerEoV1IP3clUwexVQo4lHFee2jd7ocWsdSqSapW7OWUupBtDzRkqVhE7tGria+i1W2d6YLlJ21QTjyapWJehAMO637OdbJCCzDs1cXbodRRE7bsP492ocJy8OX66rKdhYbg8srSFNKdb3pF3UDNbN9jhI/t8iagRhNBhlQtTr1me2E/c86Q18qcRXl4bcXTt6acgCeffK6Y26LcVlrgjlD33AEYRRUeyC+rpxbT0aMjdFderlndKRIyG23mSp0HaUwNzAfMAcGBSsOAwIaBBRlviCbIyRrhIysg2dc/KbLFTc2vQQUg4rfwHMM4IKYRD/fsd1x6dda+wQ=`, + // empty string password test case + "testing@example.com": `MIIJzgIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCA/cGCSqGSIb3DQEHBqCCA+gwggPk +AgEAMIID3QYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQIIszfRGqcmPcCAggAgIIDsOZ9Eg1L +s5Wx8JhYoV3HAL4aRnkAWvTYB5NISZOgSgIQTssmt/3A7134dibTmaT/93LikkL3cTKLnQzJ4wDf +YZ1bprpVJvUqz+HFT79m27bP9zYXFrvxWBJbxjYKTSjQMgz+h8LAEpXXGajCmxMJ1oCOtdXkhhzc +LdZN6SAYgtmtyFnCdMEDskSggGuLb3fw84QEJ/Sj6FAULXunW/CPaS7Ce0TMsKmNU/jfFWj3yXXw +ro0kwjKiVLpVFlnBlHo2OoVU7hmkm59YpGhLgS7nxLD3n7nBroQ0ID1+8R01NnV9XLGoGzxMm1te +6UyTCkr5mj+kEQ8EP1Ys7g/TC411uhVWySMt/rcpkx7Vz1r9kYEAzJpONAfr6cuEVkPKrxpq4Fh0 +2fzlKBky0i/hrfIEUmngh+ERHUb/Mtv/fkv1j5w9suESbhsMLLiCXAlsP1UWMX+3bNizi3WVMEts +FM2k9byn+p8IUD/A8ULlE4kEaWeoc+2idkCNQkLGuIdGUXUFVm58se0auUkVRoRJx8x4CkMesT8j +b1H831W66YRWoEwwDQp2kK1lA2vQXxdVHWlFevMNxJeromLzj3ayiaFrfByeUXhR2S+Hpm+c0yNR +4UVU9WED2kacsZcpRm9nlEa5sr28mri5JdBrNa/K02OOhvKCxr5ZGmbOVzUQKla2z4w+Ku9k8POm +dfDNU/fGx1b5hcFWtghXe3msWVsSJrQihnN6q1ughzNiYZlJUGcHdZDRtiWwCFI0bR8h/Dmg9uO9 +4rawQQrjIRT7B8yF3UbkZyAqs8Ppb1TsMeNPHh1rxEfGVQknh/48ouJYsmtbnzugTUt3mJCXXiL+ +XcPMV6bBVAUu4aaVKSmg9+yJtY4/VKv10iw88ktv29fViIdBe3t6l/oPuvQgbQ8dqf4T8w0l/uKZ +9lS1Na9jfT1vCoS7F5TRi+tmyj1vL5kr/amEIW6xKEP6oeAMvCMtbPAzVEj38zdJ1R22FfuIBxkh +f0Zl7pdVbmzRxl/SBx9iIBJSqAvcXItiT0FIj8HxQ+0iZKqMQMiBuNWJf5pYOLWGrIyntCWwHuaQ +wrx0sTGuEL9YXLEAsBDrsvzLkx/56E4INGZFrH8G7HBdW6iGqb22IMI4GHltYSyBRKbB0gadYTyv +abPEoqww8o7/85aPSzOTJ/53ozD438Q+d0u9SyDuOb60SzCD/zPuCEd78YgtXJwBYTuUNRT27FaM +3LGMX8Hz+6yPNRnmnA2XKPn7dx/IlaqAjIs8MIIFfgYJKoZIhvcNAQcBoIIFbwSCBWswggVnMIIF +YwYLKoZIhvcNAQwKAQKgggTuMIIE6jAcBgoqhkiG9w0BDAEDMA4ECJr0cClYqOlcAgIIAASCBMhe +OQSiP2s0/46ONXcNeVAkz2ksW3u/+qorhSiskGZ0b3dFa1hhgBU2Q7JVIkc4Hf7OXaT1eVQ8oqND +uhqsNz83/kqYo70+LS8Hocj49jFgWAKrf/yQkdyP1daHa2yzlEw4mkpqOfnIORQHvYCa8nEApspZ +wVu8y6WVuLHKU67mel7db2xwstQp7PRuSAYqGjTfAylElog8ASdaqqYbYIrCXucF8iF9oVgmb/Qo +xrXshJ9aSLO4MuXlTPELmWgj07AXKSb90FKNihE+y0bWb9LPVFY1Sly3AX9PfrtkSXIZwqW3phpv +MxGxQl/R6mr1z+hlTfY9Wdpb5vlKXPKA0L0Rt8d2pOesylFi6esJoS01QgP1kJILjbrV731kvDc0 +Jsd+Oxv4BMwA7ClG8w1EAOInc/GrV1MWFGw/HeEqj3CZ/l/0jv9bwkbVeVCiIhoL6P6lVx9pXq4t +KZ0uKg/tk5TVJmG2vLcMLvezD0Yk3G2ZOMrywtmskrwoF7oAUpO9e87szoH6fEvUZlkDkPVW1NV4 +cZk3DBSQiuA3VOOg8qbo/tx/EE3H59P0axZWno2GSB0wFPWd1aj+b//tJEJHaaNR6qPRj4IWj9ru +Qbc8eRAcVWleHg8uAehSvUXlFpyMQREyrnpvMGddpiTC8N4UMrrBRhV7+UbCOWhxPCbItnInBqgl +1JpSZIP7iUtsIMdu3fEC2cdbXMTRul+4rdzUR7F9OaezV3jjvcAbDvgbK1CpyC+MJ1Mxm/iTgk9V +iUArydhlR8OniN84GyGYoYCW9O/KUwb6ASmeFOu/msx8x6kAsSQHIkKqMKv0TUR3kZnkxUvdpBGP +KTl4YCTvNGX4dYALBqrAETRDhua2KVBD/kEttDHwBNVbN2xi81+Mc7ml461aADfk0c66R/m2sjHB +2tN9+wG12OIWFQjL6wF/UfJMYamxx2zOOExiId29Opt57uYiNVLOO4ourPewHPeH0u8Gz35aero7 +lkt7cZAe1Q0038JUuE/QGlnK4lESK9UkSIQAjSaAlTsrcfwtQxB2EjoOoLhwH5mvxUEmcNGNnXUc +9xj3M5BD3zBz3Ft7G3YMMDwB1+zC2l+0UG0MGVjMVaeoy32VVNvxgX7jk22OXG1iaOB+PY9kdk+O +X+52BGSf/rD6X0EnqY7XuRPkMGgjtpZeAYxRQnFtCZgDY4wYheuxqSSpdF49yNczSPLkgB3CeCfS ++9NTKN7aC6hBbmW/8yYh6OvSiCEwY0lFS/T+7iaVxr1loE4zI1y/FFp4Pe1qfLlLttVlkygga2UU +SCunTQ8UB/M5IXWKkhMOO11dP4niWwb39Y7pCWpau7mwbXOKfRPX96cgHnQJK5uG+BesDD1oYnX0 +6frN7FOnTSHKruRIwuI8KnOQ/I+owmyz71wiv5LMQt+yM47UrEjB/EZa5X8dpEwOZvkdqL7utcyo +l0XH5kWMXdW856LL/FYftAqJIDAmtX1TXF/rbP6mPyN/IlDC0gjP84Uzd/a2UyTIWr+wk49Ek3vQ +/uDamq6QrwAxVmNh5Tset5Vhpc1e1kb7mRMZIzxSP8JcTuYd45oFKi98I8YjvueHVZce1g7OudQP +SbFQoJvdT46iBg1TTatlltpOiH2mFaxWVS0xYjAjBgkqhkiG9w0BCRUxFgQUdA9eVqvETX4an/c8 +p8SsTugkit8wOwYJKoZIhvcNAQkUMS4eLABGAHIAaQBlAG4AZABsAHkAIABuAGEAbQBlACAAZgBv +AHIAIABjAGUAcgB0MDEwITAJBgUrDgMCGgUABBRFsNz3Zd1O1GI8GTuFwCWuDOjEEwQIuBEfIcAy +HQ8CAggA`, +} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 0000000..def1f7b --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 0000000..f562fa5 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,33 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package poly1305 implements Poly1305 one-time message authentication code as +specified in https://cr.yp.to/mac/poly1305-20050329.pdf. + +Poly1305 is a fast, one-time authentication function. It is infeasible for an +attacker to generate an authenticator for a message without the key. However, a +key must only be used for a single message. Authenticating two different +messages with the same key allows an attacker to forge authenticators for other +messages with the same key. + +Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +used with a fixed key in order to generate one-time keys from an nonce. +However, in this package AES isn't used and the one-time key is specified +directly. +*/ +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Verify returns true if mac is a valid authenticator for m with the given +// key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305_test.go b/vendor/golang.org/x/crypto/poly1305/poly1305_test.go new file mode 100644 index 0000000..017027f --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305_test.go @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package poly1305 + +import ( + "bytes" + "encoding/hex" + "flag" + "testing" + "unsafe" +) + +var stressFlag = flag.Bool("stress", false, "run slow stress tests") + +var testData = []struct { + in, k, correct []byte +}{ + { + []byte("Hello world!"), + []byte("this is 32-byte key for Poly1305"), + []byte{0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0}, + }, + { + make([]byte, 32), + []byte("this is 32-byte key for Poly1305"), + []byte{0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07}, + }, + { + make([]byte, 2007), + []byte("this is 32-byte key for Poly1305"), + []byte{0xda, 0x84, 0xbc, 0xab, 0x02, 0x67, 0x6c, 0x38, 0xcd, 0xb0, 0x15, 0x60, 0x42, 0x74, 0xc2, 0xaa}, + }, + { + make([]byte, 2007), + make([]byte, 32), + make([]byte, 16), + }, + { + // This test triggers an edge-case. See https://go-review.googlesource.com/#/c/30101/. + []byte{0x81, 0xd8, 0xb2, 0xe4, 0x6a, 0x25, 0x21, 0x3b, 0x58, 0xfe, 0xe4, 0x21, 0x3a, 0x2a, 0x28, 0xe9, 0x21, 0xc1, 0x2a, 0x96, 0x32, 0x51, 0x6d, 0x3b, 0x73, 0x27, 0x27, 0x27, 0xbe, 0xcf, 0x21, 0x29}, + []byte{0x3b, 0x3a, 0x29, 0xe9, 0x3b, 0x21, 0x3a, 0x5c, 0x5c, 0x3b, 0x3b, 0x05, 0x3a, 0x3a, 0x8c, 0x0d}, + []byte{0x6d, 0xc1, 0x8b, 0x8c, 0x34, 0x4c, 0xd7, 0x99, 0x27, 0x11, 0x8b, 0xbe, 0x84, 0xb7, 0xf3, 0x14}, + }, + { + // This test generates a result of (2^130-1) % (2^130-5). + []byte{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + []byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + []byte{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + { + // This test generates a result of (2^130-6) % (2^130-5). + []byte{ + 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + []byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + []byte{0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + }, + { + // This test generates a result of (2^130-5) % (2^130-5). + []byte{ + 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + []byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, +} + +func testSum(t *testing.T, unaligned bool) { + var out [16]byte + var key [32]byte + + for i, v := range testData { + in := v.in + if unaligned { + in = unalignBytes(in) + } + copy(key[:], v.k) + Sum(&out, in, &key) + if !bytes.Equal(out[:], v.correct) { + t.Errorf("%d: expected %x, got %x", i, v.correct, out[:]) + } + } +} + +func TestBurnin(t *testing.T) { + // This test can be used to sanity-check significant changes. It can + // take about many minutes to run, even on fast machines. It's disabled + // by default. + if !*stressFlag { + t.Skip("skipping without -stress") + } + + var key [32]byte + var input [25]byte + var output [16]byte + + for i := range key { + key[i] = 1 + } + for i := range input { + input[i] = 2 + } + + for i := uint64(0); i < 1e10; i++ { + Sum(&output, input[:], &key) + copy(key[0:], output[:]) + copy(key[16:], output[:]) + copy(input[:], output[:]) + copy(input[16:], output[:]) + } + + const expected = "5e3b866aea0b636d240c83c428f84bfa" + if got := hex.EncodeToString(output[:]); got != expected { + t.Errorf("expected %s, got %s", expected, got) + } +} + +func TestSum(t *testing.T) { testSum(t, false) } +func TestSumUnaligned(t *testing.T) { testSum(t, true) } + +func benchmark(b *testing.B, size int, unaligned bool) { + var out [16]byte + var key [32]byte + in := make([]byte, size) + if unaligned { + in = unalignBytes(in) + } + b.SetBytes(int64(len(in))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sum(&out, in, &key) + } +} + +func Benchmark64(b *testing.B) { benchmark(b, 64, false) } +func Benchmark1K(b *testing.B) { benchmark(b, 1024, false) } +func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) } +func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) } + +func unalignBytes(in []byte) []byte { + out := make([]byte, len(in)+1) + if uintptr(unsafe.Pointer(&out[0]))&(unsafe.Alignof(uint32(0))-1) == 0 { + out = out[1:] + } else { + out = out[:len(in)] + } + copy(out, in) + return out +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 0000000..4dd72fe --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package poly1305 + +// This function is implemented in sum_amd64.s +//go:noescape +func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + poly1305(out, mPtr, uint64(len(m)), key) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 0000000..2edae63 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,125 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305(SB), $0-32 + MOVQ out+0(FP), DI + MOVQ m+8(FP), SI + MOVQ mlen+16(FP), R15 + MOVQ key+24(FP), AX + + MOVQ 0(AX), R11 + MOVQ 8(AX), R12 + ANDQ ·poly1305Mask<>(SB), R11 // r0 + ANDQ ·poly1305Mask<>+8(SB), R12 // r1 + XORQ R8, R8 // h0 + XORQ R9, R9 // h1 + XORQ R10, R10 // h2 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, AX + MOVQ R9, BX + SUBQ $0xFFFFFFFFFFFFFFFB, AX + SBBQ $0xFFFFFFFFFFFFFFFF, BX + SBBQ $3, R10 + CMOVQCS R8, AX + CMOVQCS R9, BX + MOVQ key+24(FP), R8 + ADDQ 16(R8), AX + ADCQ 24(R8), BX + + MOVQ AX, 0(DI) + MOVQ BX, 8(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go new file mode 100644 index 0000000..5dc321c --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +package poly1305 + +// This function is implemented in sum_arm.s +//go:noescape +func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s new file mode 100644 index 0000000..f70b4ac --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.s @@ -0,0 +1,427 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +#include "textflag.h" + +// This code was translated into a form compatible with 5a from the public +// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. + +DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff +DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 +DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff +DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff +DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff +GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20 + +// Warning: the linker may use R11 to synthesize certain instructions. Please +// take care and verify that no synthetic instructions use it. + +TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0 + // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It + // might look like it's only 60 bytes of space but the final four bytes + // will be written by another function.) We need to skip over four + // bytes of stack because that's saving the value of 'g'. + ADD $4, R13, R8 + MOVM.IB [R4-R7], (R8) + MOVM.IA.W (R1), [R2-R5] + MOVW $·poly1305_init_constants_armv6<>(SB), R7 + MOVW R2, R8 + MOVW R2>>26, R9 + MOVW R3>>20, g + MOVW R4>>14, R11 + MOVW R5>>8, R12 + ORR R3<<6, R9, R9 + ORR R4<<12, g, g + ORR R5<<18, R11, R11 + MOVM.IA (R7), [R2-R6] + AND R8, R2, R2 + AND R9, R3, R3 + AND g, R4, R4 + AND R11, R5, R5 + AND R12, R6, R6 + MOVM.IA.W [R2-R6], (R0) + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + MOVM.IA.W [R2-R6], (R0) + MOVM.IA.W (R1), [R2-R5] + MOVM.IA [R2-R6], (R0) + ADD $20, R13, R0 + MOVM.DA (R0), [R4-R7] + RET + +#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ + MOVBU (offset+0)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+0)(Rdst); \ + MOVBU (offset+1)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+1)(Rdst); \ + MOVBU (offset+2)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+2)(Rdst); \ + MOVBU (offset+3)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+3)(Rdst) + +TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0 + // Needs 24 bytes of stack for saved registers and then 88 bytes of + // scratch space after that. We assume that 24 bytes at (R13) have + // already been used: four bytes for the link register saved in the + // prelude of poly1305_auth_armv6, four bytes for saving the value of g + // in that function and 16 bytes of scratch space used around + // poly1305_finish_ext_armv6_skip1. + ADD $24, R13, R12 + MOVM.IB [R4-R8, R14], (R12) + MOVW R0, 88(R13) + MOVW R1, 92(R13) + MOVW R2, 96(R13) + MOVW R1, R14 + MOVW R2, R12 + MOVW 56(R0), R8 + WORD $0xe1180008 // TST R8, R8 not working see issue 5921 + EOR R6, R6, R6 + MOVW.EQ $(1<<24), R6 + MOVW R6, 84(R13) + ADD $116, R13, g + MOVM.IA (R0), [R0-R9] + MOVM.IA [R0-R4], (g) + CMP $16, R12 + BLO poly1305_blocks_armv6_done + +poly1305_blocks_armv6_mainloop: + WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 + BEQ poly1305_blocks_armv6_mainloop_aligned + ADD $100, R13, g + MOVW_UNALIGNED(R14, g, R0, 0) + MOVW_UNALIGNED(R14, g, R0, 4) + MOVW_UNALIGNED(R14, g, R0, 8) + MOVW_UNALIGNED(R14, g, R0, 12) + MOVM.IA (g), [R0-R3] + ADD $16, R14 + B poly1305_blocks_armv6_mainloop_loaded + +poly1305_blocks_armv6_mainloop_aligned: + MOVM.IA.W (R14), [R0-R3] + +poly1305_blocks_armv6_mainloop_loaded: + MOVW R0>>26, g + MOVW R1>>20, R11 + MOVW R2>>14, R12 + MOVW R14, 92(R13) + MOVW R3>>8, R4 + ORR R1<<6, g, g + ORR R2<<12, R11, R11 + ORR R3<<18, R12, R12 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, g, g + MOVW 84(R13), R3 + BIC $0xfc000000, R11, R11 + BIC $0xfc000000, R12, R12 + ADD R0, R5, R5 + ADD g, R6, R6 + ORR R3, R4, R4 + ADD R11, R7, R7 + ADD $116, R13, R14 + ADD R12, R8, R8 + ADD R4, R9, R9 + MOVM.IA (R14), [R0-R4] + MULLU R4, R5, (R11, g) + MULLU R3, R5, (R14, R12) + MULALU R3, R6, (R11, g) + MULALU R2, R6, (R14, R12) + MULALU R2, R7, (R11, g) + MULALU R1, R7, (R14, R12) + ADD R4<<2, R4, R4 + ADD R3<<2, R3, R3 + MULALU R1, R8, (R11, g) + MULALU R0, R8, (R14, R12) + MULALU R0, R9, (R11, g) + MULALU R4, R9, (R14, R12) + MOVW g, 76(R13) + MOVW R11, 80(R13) + MOVW R12, 68(R13) + MOVW R14, 72(R13) + MULLU R2, R5, (R11, g) + MULLU R1, R5, (R14, R12) + MULALU R1, R6, (R11, g) + MULALU R0, R6, (R14, R12) + MULALU R0, R7, (R11, g) + MULALU R4, R7, (R14, R12) + ADD R2<<2, R2, R2 + ADD R1<<2, R1, R1 + MULALU R4, R8, (R11, g) + MULALU R3, R8, (R14, R12) + MULALU R3, R9, (R11, g) + MULALU R2, R9, (R14, R12) + MOVW g, 60(R13) + MOVW R11, 64(R13) + MOVW R12, 52(R13) + MOVW R14, 56(R13) + MULLU R0, R5, (R11, g) + MULALU R4, R6, (R11, g) + MULALU R3, R7, (R11, g) + MULALU R2, R8, (R11, g) + MULALU R1, R9, (R11, g) + ADD $52, R13, R0 + MOVM.IA (R0), [R0-R7] + MOVW g>>26, R12 + MOVW R4>>26, R14 + ORR R11<<6, R12, R12 + ORR R5<<6, R14, R14 + BIC $0xfc000000, g, g + BIC $0xfc000000, R4, R4 + ADD.S R12, R0, R0 + ADC $0, R1, R1 + ADD.S R14, R6, R6 + ADC $0, R7, R7 + MOVW R0>>26, R12 + MOVW R6>>26, R14 + ORR R1<<6, R12, R12 + ORR R7<<6, R14, R14 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, R6, R6 + ADD R14<<2, R14, R14 + ADD.S R12, R2, R2 + ADC $0, R3, R3 + ADD R14, g, g + MOVW R2>>26, R12 + MOVW g>>26, R14 + ORR R3<<6, R12, R12 + BIC $0xfc000000, g, R5 + BIC $0xfc000000, R2, R7 + ADD R12, R4, R4 + ADD R14, R0, R0 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R8 + ADD R12, R6, R9 + MOVW 96(R13), R12 + MOVW 92(R13), R14 + MOVW R0, R6 + CMP $32, R12 + SUB $16, R12, R12 + MOVW R12, 96(R13) + BHS poly1305_blocks_armv6_mainloop + +poly1305_blocks_armv6_done: + MOVW 88(R13), R12 + MOVW R5, 20(R12) + MOVW R6, 24(R12) + MOVW R7, 28(R12) + MOVW R8, 32(R12) + MOVW R9, 36(R12) + ADD $48, R13, R0 + MOVM.DA (R0), [R4-R8, R14] + RET + +#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst); \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst) + +#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) + +// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) +TEXT ·poly1305_auth_armv6(SB), $196-16 + // The value 196, just above, is the sum of 64 (the size of the context + // structure) and 132 (the amount of stack needed). + // + // At this point, the stack pointer (R13) has been moved down. It + // points to the saved link register and there's 196 bytes of free + // space above it. + // + // The stack for this function looks like: + // + // +--------------------- + // | + // | 64 bytes of context structure + // | + // +--------------------- + // | + // | 112 bytes for poly1305_blocks_armv6 + // | + // +--------------------- + // | 16 bytes of final block, constructed at + // | poly1305_finish_ext_armv6_skip8 + // +--------------------- + // | four bytes of saved 'g' + // +--------------------- + // | lr, saved by prelude <- R13 points here + // +--------------------- + MOVW g, 4(R13) + + MOVW out+0(FP), R4 + MOVW m+4(FP), R5 + MOVW mlen+8(FP), R6 + MOVW key+12(FP), R7 + + ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112 + MOVW R7, R1 + + // poly1305_init_ext_armv6 will write to the stack from R13+4, but + // that's ok because none of the other values have been written yet. + BL poly1305_init_ext_armv6<>(SB) + BIC.S $15, R6, R2 + BEQ poly1305_auth_armv6_noblocks + ADD $136, R13, R0 + MOVW R5, R1 + ADD R2, R5, R5 + SUB R2, R6, R6 + BL poly1305_blocks_armv6<>(SB) + +poly1305_auth_armv6_noblocks: + ADD $136, R13, R0 + MOVW R5, R1 + MOVW R6, R2 + MOVW R4, R3 + + MOVW R0, R5 + MOVW R1, R6 + MOVW R2, R7 + MOVW R3, R8 + AND.S R2, R2, R2 + BEQ poly1305_finish_ext_armv6_noremaining + EOR R0, R0 + ADD $8, R13, R9 // 8 = offset to 16 byte scratch space + MOVW R0, (R9) + MOVW R0, 4(R9) + MOVW R0, 8(R9) + MOVW R0, 12(R9) + WORD $0xe3110003 // TST R1, #3 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_aligned + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8 + MOVWP_UNALIGNED(R1, R9, g) + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip8: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4 + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip4: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHUP_UNALIGNED(R1, R9, g) + B poly1305_finish_ext_armv6_skip2 + +poly1305_finish_ext_armv6_aligned: + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8_aligned + MOVM.IA.W (R1), [g-R11] + MOVM.IA.W [g-R11], (R9) + +poly1305_finish_ext_armv6_skip8_aligned: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4_aligned + MOVW.P 4(R1), g + MOVW.P g, 4(R9) + +poly1305_finish_ext_armv6_skip4_aligned: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHU.P 2(R1), g + MOVH.P g, 2(R9) + +poly1305_finish_ext_armv6_skip2: + WORD $0xe3120001 // TST $1, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip1 + MOVBU.P 1(R1), g + MOVBU.P g, 1(R9) + +poly1305_finish_ext_armv6_skip1: + MOVW $1, R11 + MOVBU R11, 0(R9) + MOVW R11, 56(R5) + MOVW R5, R0 + ADD $8, R13, R1 + MOVW $16, R2 + BL poly1305_blocks_armv6<>(SB) + +poly1305_finish_ext_armv6_noremaining: + MOVW 20(R5), R0 + MOVW 24(R5), R1 + MOVW 28(R5), R2 + MOVW 32(R5), R3 + MOVW 36(R5), R4 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R4 + ADD R12<<2, R12, R12 + ADD R12, R0, R0 + MOVW R0>>26, R12 + BIC $0xfc000000, R0, R0 + ADD R12, R1, R1 + MOVW R1>>26, R12 + BIC $0xfc000000, R1, R1 + ADD R12, R2, R2 + MOVW R2>>26, R12 + BIC $0xfc000000, R2, R2 + ADD R12, R3, R3 + MOVW R3>>26, R12 + BIC $0xfc000000, R3, R3 + ADD R12, R4, R4 + ADD $5, R0, R6 + MOVW R6>>26, R12 + BIC $0xfc000000, R6, R6 + ADD R12, R1, R7 + MOVW R7>>26, R12 + BIC $0xfc000000, R7, R7 + ADD R12, R2, g + MOVW g>>26, R12 + BIC $0xfc000000, g, g + ADD R12, R3, R11 + MOVW $-(1<<26), R12 + ADD R11>>26, R12, R12 + BIC $0xfc000000, R11, R11 + ADD R12, R4, R9 + MOVW R9>>31, R12 + SUB $1, R12 + AND R12, R6, R6 + AND R12, R7, R7 + AND R12, g, g + AND R12, R11, R11 + AND R12, R9, R9 + MVN R12, R12 + AND R12, R0, R0 + AND R12, R1, R1 + AND R12, R2, R2 + AND R12, R3, R3 + AND R12, R4, R4 + ORR R6, R0, R0 + ORR R7, R1, R1 + ORR g, R2, R2 + ORR R11, R3, R3 + ORR R9, R4, R4 + ORR R1<<26, R0, R0 + MOVW R1>>6, R1 + ORR R2<<20, R1, R1 + MOVW R2>>12, R2 + ORR R3<<14, R2, R2 + MOVW R3>>18, R3 + ORR R4<<8, R3, R3 + MOVW 40(R5), R6 + MOVW 44(R5), R7 + MOVW 48(R5), g + MOVW 52(R5), R11 + ADD.S R6, R0, R0 + ADC.S R7, R1, R1 + ADC.S g, R2, R2 + ADC.S R11, R3, R3 + MOVM.IA [R0-R3], (R8) + MOVW R5, R12 + EOR R0, R0, R0 + EOR R1, R1, R1 + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + EOR R7, R7, R7 + MOVM.IA.W [R0-R7], (R12) + MOVM.IA [R0-R7], (R12) + MOVW 4(R13), g + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ref.go b/vendor/golang.org/x/crypto/poly1305/sum_ref.go new file mode 100644 index 0000000..b2805a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ref.go @@ -0,0 +1,141 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm gccgo appengine nacl + +package poly1305 + +import "encoding/binary" + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { + var ( + h0, h1, h2, h3, h4 uint32 // the hash accumulators + r0, r1, r2, r3, r4 uint64 // the r part of the key + ) + + r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff) + r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03) + r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff) + r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff) + r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff) + + R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 + + for len(msg) >= TagSize { + // h += msg + h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24) + + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) + + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff + + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff + + msg = msg[TagSize:] + } + + if len(msg) > 0 { + var block [TagSize]byte + off := copy(block[:], msg) + block[off] = 0x01 + + // h += msg + h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8) + + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) + + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff + + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff + } + + // h %= p reduction + h2 += h1 >> 26 + h1 &= 0x3ffffff + h3 += h2 >> 26 + h2 &= 0x3ffffff + h4 += h3 >> 26 + h3 &= 0x3ffffff + h0 += 5 * (h4 >> 26) + h4 &= 0x3ffffff + h1 += h0 >> 26 + h0 &= 0x3ffffff + + // h - p + t0 := h0 + 5 + t1 := h1 + (t0 >> 26) + t2 := h2 + (t1 >> 26) + t3 := h3 + (t2 >> 26) + t4 := h4 + (t3 >> 26) - (1 << 26) + t0 &= 0x3ffffff + t1 &= 0x3ffffff + t2 &= 0x3ffffff + t3 &= 0x3ffffff + + // select h if h < p else h - p + t_mask := (t4 >> 31) - 1 + h_mask := ^t_mask + h0 = (h0 & h_mask) | (t0 & t_mask) + h1 = (h1 & h_mask) | (t1 & t_mask) + h2 = (h2 & h_mask) | (t2 & t_mask) + h3 = (h3 & h_mask) | (t3 & t_mask) + h4 = (h4 & h_mask) | (t4 & t_mask) + + // h %= 2^128 + h0 |= h1 << 26 + h1 = ((h1 >> 6) | (h2 << 20)) + h2 = ((h2 >> 12) | (h3 << 14)) + h3 = ((h3 >> 18) | (h4 << 8)) + + // s: the s part of the key + // tag = (h + s) % (2^128) + t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:])) + h0 = uint32(t) + t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32) + h1 = uint32(t) + t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32) + h2 = uint32(t) + t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32) + h3 = uint32(t) + + binary.LittleEndian.PutUint32(out[0:], h0) + binary.LittleEndian.PutUint32(out[4:], h1) + binary.LittleEndian.PutUint32(out[8:], h2) + binary.LittleEndian.PutUint32(out[12:], h3) +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go new file mode 100644 index 0000000..6c6e842 --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go @@ -0,0 +1,120 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ripemd160 implements the RIPEMD-160 hash algorithm. +package ripemd160 // import "golang.org/x/crypto/ripemd160" + +// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart +// Preneel with specifications available at: +// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.RIPEMD160, New) +} + +// The size of the checksum in bytes. +const Size = 20 + +// The block size of the hash algorithm in bytes. +const BlockSize = 64 + +const ( + _s0 = 0x67452301 + _s1 = 0xefcdab89 + _s2 = 0x98badcfe + _s3 = 0x10325476 + _s4 = 0xc3d2e1f0 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [5]uint32 // running context + x [BlockSize]byte // temporary buffer + nx int // index into x + tc uint64 // total count of bytes processed +} + +func (d *digest) Reset() { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 + d.nx = 0 + d.tc = 0 +} + +// New returns a new hash.Hash computing the checksum. +func New() hash.Hash { + result := new(digest) + result.Reset() + return result +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.tc += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > BlockSize-d.nx { + n = BlockSize - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == BlockSize { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + tc := d.tc + var tmp [64]byte + tmp[0] = 0x80 + if tc%64 < 56 { + d.Write(tmp[0 : 56-tc%64]) + } else { + d.Write(tmp[0 : 64+56-tc%64]) + } + + // Length in bits. + tc <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(tc >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var digest [Size]byte + for i, s := range d.s { + digest[i*4] = byte(s) + digest[i*4+1] = byte(s >> 8) + digest[i*4+2] = byte(s >> 16) + digest[i*4+3] = byte(s >> 24) + } + + return append(in, digest[:]...) +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go new file mode 100644 index 0000000..a1fbffd --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go @@ -0,0 +1,72 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ripemd160 + +// Test vectors are from: +// http://homes.esat.kuleuven.be/~bosselae/ripemd160.html + +import ( + "fmt" + "io" + "testing" +) + +type mdTest struct { + out string + in string +} + +var vectors = [...]mdTest{ + {"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""}, + {"0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "a"}, + {"8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "abc"}, + {"5d0689ef49d2fae572b881b123a85ffa21595f36", "message digest"}, + {"f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "abcdefghijklmnopqrstuvwxyz"}, + {"12a053384a9c0c88e405a06c27dcf49ada62eb2b", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, + {"b0e20b6e3116640286ed3a87a5713079b21f5189", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"}, + {"9b752e45573d4b39f4dbd3323cab82bf63326bfb", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, +} + +func TestVectors(t *testing.T) { + for i := 0; i < len(vectors); i++ { + tv := vectors[i] + md := New() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(md, tv.in) + } else { + io.WriteString(md, tv.in[0:len(tv.in)/2]) + md.Sum(nil) + io.WriteString(md, tv.in[len(tv.in)/2:]) + } + s := fmt.Sprintf("%x", md.Sum(nil)) + if s != tv.out { + t.Fatalf("RIPEMD-160[%d](%s) = %s, expected %s", j, tv.in, s, tv.out) + } + md.Reset() + } + } +} + +func millionA() string { + md := New() + for i := 0; i < 100000; i++ { + io.WriteString(md, "aaaaaaaaaa") + } + return fmt.Sprintf("%x", md.Sum(nil)) +} + +func TestMillionA(t *testing.T) { + const out = "52783243c1697bdbe16d37f97f68f08325dc1528" + if s := millionA(); s != out { + t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out) + } +} + +func BenchmarkMillionA(b *testing.B) { + for i := 0; i < b.N; i++ { + millionA() + } +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go new file mode 100644 index 0000000..e0edc02 --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go @@ -0,0 +1,165 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RIPEMD-160 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package ripemd160 + +import ( + "math/bits" +) + +// work buffer indices and roll amounts for one line +var _n = [80]uint{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, + 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, + 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, + 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, +} + +var _r = [80]uint{ + 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, + 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, + 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, + 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, + 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, +} + +// same for the other parallel one +var n_ = [80]uint{ + 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, + 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, + 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, + 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, + 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, +} + +var r_ = [80]uint{ + 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, + 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, + 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, + 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, + 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, +} + +func _Block(md *digest, p []byte) int { + n := 0 + var x [16]uint32 + var alpha, beta uint32 + for len(p) >= BlockSize { + a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] + aa, bb, cc, dd, ee := a, b, c, d, e + j := 0 + for i := 0; i < 16; i++ { + x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // round 1 + i := 0 + for i < 16 { + alpha = a + (b ^ c ^ d) + x[_n[i]] + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 2 + for i < 32 { + alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 3 + for i < 48 { + alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 4 + for i < 64 { + alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 5 + for i < 80 { + alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // combine results + dd += c + md.s[1] + md.s[1] = md.s[2] + d + ee + md.s[2] = md.s[3] + e + aa + md.s[3] = md.s[4] + a + bb + md.s[4] = md.s[0] + b + cc + md.s[0] = dd + + p = p[BlockSize:] + n += BlockSize + } + return n +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 0000000..4c96147 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa // import "golang.org/x/crypto/salsa20/salsa" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s new file mode 100644 index 0000000..22afbdc --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s @@ -0,0 +1,889 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) +// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment + MOVQ out+0(FP),DI + MOVQ in+8(FP),SI + MOVQ n+16(FP),DX + MOVQ nonce+24(FP),CX + MOVQ key+32(FP),R8 + + MOVQ SP,R12 + MOVQ SP,R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ DX,R9 + MOVQ CX,DX + MOVQ R8,R10 + CMPQ R9,$0 + JBE DONE + START: + MOVL 20(R10),CX + MOVL 0(R10),R8 + MOVL 0(DX),AX + MOVL 16(R10),R11 + MOVL CX,0(SP) + MOVL R8, 4 (SP) + MOVL AX, 8 (SP) + MOVL R11, 12 (SP) + MOVL 8(DX),CX + MOVL 24(R10),R8 + MOVL 4(R10),AX + MOVL 4(DX),R11 + MOVL CX,16(SP) + MOVL R8, 20 (SP) + MOVL AX, 24 (SP) + MOVL R11, 28 (SP) + MOVL 12(DX),CX + MOVL 12(R10),DX + MOVL 28(R10),R8 + MOVL 8(R10),AX + MOVL DX,32(SP) + MOVL CX, 36 (SP) + MOVL R8, 40 (SP) + MOVL AX, 44 (SP) + MOVQ $1634760805,DX + MOVQ $857760878,CX + MOVQ $2036477234,R8 + MOVQ $1797285236,AX + MOVL DX,48(SP) + MOVL CX, 52 (SP) + MOVL R8, 56 (SP) + MOVL AX, 60 (SP) + CMPQ R9,$256 + JB BYTESBETWEEN1AND255 + MOVOA 48(SP),X0 + PSHUFL $0X55,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X3 + PSHUFL $0X00,X0,X0 + MOVOA X1,64(SP) + MOVOA X2,80(SP) + MOVOA X3,96(SP) + MOVOA X0,112(SP) + MOVOA 0(SP),X0 + PSHUFL $0XAA,X0,X1 + PSHUFL $0XFF,X0,X2 + PSHUFL $0X00,X0,X3 + PSHUFL $0X55,X0,X0 + MOVOA X1,128(SP) + MOVOA X2,144(SP) + MOVOA X3,160(SP) + MOVOA X0,176(SP) + MOVOA 16(SP),X0 + PSHUFL $0XFF,X0,X1 + PSHUFL $0X55,X0,X2 + PSHUFL $0XAA,X0,X0 + MOVOA X1,192(SP) + MOVOA X2,208(SP) + MOVOA X0,224(SP) + MOVOA 32(SP),X0 + PSHUFL $0X00,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X0 + MOVOA X1,240(SP) + MOVOA X2,256(SP) + MOVOA X0,272(SP) + BYTESATLEAST256: + MOVL 16(SP),DX + MOVL 36 (SP),CX + MOVL DX,288(SP) + MOVL CX,304(SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 292 (SP) + MOVL CX, 308 (SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 296 (SP) + MOVL CX, 312 (SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 300 (SP) + MOVL CX, 316 (SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX,16(SP) + MOVL CX, 36 (SP) + MOVQ R9,352(SP) + MOVQ $20,DX + MOVOA 64(SP),X0 + MOVOA 80(SP),X1 + MOVOA 96(SP),X2 + MOVOA 256(SP),X3 + MOVOA 272(SP),X4 + MOVOA 128(SP),X5 + MOVOA 144(SP),X6 + MOVOA 176(SP),X7 + MOVOA 192(SP),X8 + MOVOA 208(SP),X9 + MOVOA 224(SP),X10 + MOVOA 304(SP),X11 + MOVOA 112(SP),X12 + MOVOA 160(SP),X13 + MOVOA 240(SP),X14 + MOVOA 288(SP),X15 + MAINLOOP1: + MOVOA X1,320(SP) + MOVOA X2,336(SP) + MOVOA X13,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X14 + PSRLL $25,X2 + PXOR X2,X14 + MOVOA X7,X1 + PADDL X0,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X11 + PSRLL $25,X2 + PXOR X2,X11 + MOVOA X12,X1 + PADDL X14,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X15 + PSRLL $23,X2 + PXOR X2,X15 + MOVOA X0,X1 + PADDL X11,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X9 + PSRLL $23,X2 + PXOR X2,X9 + MOVOA X14,X1 + PADDL X15,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X13 + PSRLL $19,X2 + PXOR X2,X13 + MOVOA X11,X1 + PADDL X9,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X7 + PSRLL $19,X2 + PXOR X2,X7 + MOVOA X15,X1 + PADDL X13,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA 320(SP),X1 + MOVOA X12,320(SP) + MOVOA X9,X2 + PADDL X7,X2 + MOVOA X2,X12 + PSLLL $18,X2 + PXOR X2,X0 + PSRLL $14,X12 + PXOR X12,X0 + MOVOA X5,X2 + PADDL X1,X2 + MOVOA X2,X12 + PSLLL $7,X2 + PXOR X2,X3 + PSRLL $25,X12 + PXOR X12,X3 + MOVOA 336(SP),X2 + MOVOA X0,336(SP) + MOVOA X6,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X4 + PSRLL $25,X12 + PXOR X12,X4 + MOVOA X1,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X10 + PSRLL $23,X12 + PXOR X12,X10 + MOVOA X2,X0 + PADDL X4,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X8 + PSRLL $23,X12 + PXOR X12,X8 + MOVOA X3,X0 + PADDL X10,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X5 + PSRLL $19,X12 + PXOR X12,X5 + MOVOA X4,X0 + PADDL X8,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X6 + PSRLL $19,X12 + PXOR X12,X6 + MOVOA X10,X0 + PADDL X5,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA 320(SP),X0 + MOVOA X1,320(SP) + MOVOA X4,X1 + PADDL X0,X1 + MOVOA X1,X12 + PSLLL $7,X1 + PXOR X1,X7 + PSRLL $25,X12 + PXOR X12,X7 + MOVOA X8,X1 + PADDL X6,X1 + MOVOA X1,X12 + PSLLL $18,X1 + PXOR X1,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 336(SP),X12 + MOVOA X2,336(SP) + MOVOA X14,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X5 + PSRLL $25,X2 + PXOR X2,X5 + MOVOA X0,X1 + PADDL X7,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X10 + PSRLL $23,X2 + PXOR X2,X10 + MOVOA X12,X1 + PADDL X5,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X8 + PSRLL $23,X2 + PXOR X2,X8 + MOVOA X7,X1 + PADDL X10,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X4 + PSRLL $19,X2 + PXOR X2,X4 + MOVOA X5,X1 + PADDL X8,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X14 + PSRLL $19,X2 + PXOR X2,X14 + MOVOA X10,X1 + PADDL X4,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X0 + PSRLL $14,X2 + PXOR X2,X0 + MOVOA 320(SP),X1 + MOVOA X0,320(SP) + MOVOA X8,X0 + PADDL X14,X0 + MOVOA X0,X2 + PSLLL $18,X0 + PXOR X0,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA X11,X0 + PADDL X1,X0 + MOVOA X0,X2 + PSLLL $7,X0 + PXOR X0,X6 + PSRLL $25,X2 + PXOR X2,X6 + MOVOA 336(SP),X2 + MOVOA X12,336(SP) + MOVOA X3,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X13 + PSRLL $25,X12 + PXOR X12,X13 + MOVOA X1,X0 + PADDL X6,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X15 + PSRLL $23,X12 + PXOR X12,X15 + MOVOA X2,X0 + PADDL X13,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X9 + PSRLL $23,X12 + PXOR X12,X9 + MOVOA X6,X0 + PADDL X15,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X11 + PSRLL $19,X12 + PXOR X12,X11 + MOVOA X13,X0 + PADDL X9,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X3 + PSRLL $19,X12 + PXOR X12,X3 + MOVOA X15,X0 + PADDL X11,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA X9,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 320(SP),X12 + MOVOA 336(SP),X0 + SUBQ $2,DX + JA MAINLOOP1 + PADDL 112(SP),X12 + PADDL 176(SP),X7 + PADDL 224(SP),X10 + PADDL 272(SP),X4 + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 0(SI),DX + XORL 4(SI),CX + XORL 8(SI),R8 + XORL 12(SI),R9 + MOVL DX,0(DI) + MOVL CX,4(DI) + MOVL R8,8(DI) + MOVL R9,12(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 64(SI),DX + XORL 68(SI),CX + XORL 72(SI),R8 + XORL 76(SI),R9 + MOVL DX,64(DI) + MOVL CX,68(DI) + MOVL R8,72(DI) + MOVL R9,76(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 128(SI),DX + XORL 132(SI),CX + XORL 136(SI),R8 + XORL 140(SI),R9 + MOVL DX,128(DI) + MOVL CX,132(DI) + MOVL R8,136(DI) + MOVL R9,140(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + XORL 192(SI),DX + XORL 196(SI),CX + XORL 200(SI),R8 + XORL 204(SI),R9 + MOVL DX,192(DI) + MOVL CX,196(DI) + MOVL R8,200(DI) + MOVL R9,204(DI) + PADDL 240(SP),X14 + PADDL 64(SP),X0 + PADDL 128(SP),X5 + PADDL 192(SP),X8 + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 16(SI),DX + XORL 20(SI),CX + XORL 24(SI),R8 + XORL 28(SI),R9 + MOVL DX,16(DI) + MOVL CX,20(DI) + MOVL R8,24(DI) + MOVL R9,28(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 80(SI),DX + XORL 84(SI),CX + XORL 88(SI),R8 + XORL 92(SI),R9 + MOVL DX,80(DI) + MOVL CX,84(DI) + MOVL R8,88(DI) + MOVL R9,92(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 144(SI),DX + XORL 148(SI),CX + XORL 152(SI),R8 + XORL 156(SI),R9 + MOVL DX,144(DI) + MOVL CX,148(DI) + MOVL R8,152(DI) + MOVL R9,156(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + XORL 208(SI),DX + XORL 212(SI),CX + XORL 216(SI),R8 + XORL 220(SI),R9 + MOVL DX,208(DI) + MOVL CX,212(DI) + MOVL R8,216(DI) + MOVL R9,220(DI) + PADDL 288(SP),X15 + PADDL 304(SP),X11 + PADDL 80(SP),X1 + PADDL 144(SP),X6 + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 32(SI),DX + XORL 36(SI),CX + XORL 40(SI),R8 + XORL 44(SI),R9 + MOVL DX,32(DI) + MOVL CX,36(DI) + MOVL R8,40(DI) + MOVL R9,44(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 96(SI),DX + XORL 100(SI),CX + XORL 104(SI),R8 + XORL 108(SI),R9 + MOVL DX,96(DI) + MOVL CX,100(DI) + MOVL R8,104(DI) + MOVL R9,108(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 160(SI),DX + XORL 164(SI),CX + XORL 168(SI),R8 + XORL 172(SI),R9 + MOVL DX,160(DI) + MOVL CX,164(DI) + MOVL R8,168(DI) + MOVL R9,172(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + XORL 224(SI),DX + XORL 228(SI),CX + XORL 232(SI),R8 + XORL 236(SI),R9 + MOVL DX,224(DI) + MOVL CX,228(DI) + MOVL R8,232(DI) + MOVL R9,236(DI) + PADDL 160(SP),X13 + PADDL 208(SP),X9 + PADDL 256(SP),X3 + PADDL 96(SP),X2 + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 48(SI),DX + XORL 52(SI),CX + XORL 56(SI),R8 + XORL 60(SI),R9 + MOVL DX,48(DI) + MOVL CX,52(DI) + MOVL R8,56(DI) + MOVL R9,60(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 112(SI),DX + XORL 116(SI),CX + XORL 120(SI),R8 + XORL 124(SI),R9 + MOVL DX,112(DI) + MOVL CX,116(DI) + MOVL R8,120(DI) + MOVL R9,124(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 176(SI),DX + XORL 180(SI),CX + XORL 184(SI),R8 + XORL 188(SI),R9 + MOVL DX,176(DI) + MOVL CX,180(DI) + MOVL R8,184(DI) + MOVL R9,188(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + XORL 240(SI),DX + XORL 244(SI),CX + XORL 248(SI),R8 + XORL 252(SI),R9 + MOVL DX,240(DI) + MOVL CX,244(DI) + MOVL R8,248(DI) + MOVL R9,252(DI) + MOVQ 352(SP),R9 + SUBQ $256,R9 + ADDQ $256,SI + ADDQ $256,DI + CMPQ R9,$256 + JAE BYTESATLEAST256 + CMPQ R9,$0 + JBE DONE + BYTESBETWEEN1AND255: + CMPQ R9,$64 + JAE NOCOPY + MOVQ DI,DX + LEAQ 360(SP),DI + MOVQ R9,CX + REP; MOVSB + LEAQ 360(SP),DI + LEAQ 360(SP),SI + NOCOPY: + MOVQ R9,352(SP) + MOVOA 48(SP),X0 + MOVOA 0(SP),X1 + MOVOA 16(SP),X2 + MOVOA 32(SP),X3 + MOVOA X1,X4 + MOVQ $20,CX + MAINLOOP2: + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + SUBQ $4,CX + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PXOR X7,X7 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + JA MAINLOOP2 + PADDL 48(SP),X0 + PADDL 0(SP),X1 + PADDL 16(SP),X2 + PADDL 32(SP),X3 + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 0(SI),CX + XORL 48(SI),R8 + XORL 32(SI),R9 + XORL 16(SI),AX + MOVL CX,0(DI) + MOVL R8,48(DI) + MOVL R9,32(DI) + MOVL AX,16(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 20(SI),CX + XORL 4(SI),R8 + XORL 52(SI),R9 + XORL 36(SI),AX + MOVL CX,20(DI) + MOVL R8,4(DI) + MOVL R9,52(DI) + MOVL AX,36(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 40(SI),CX + XORL 24(SI),R8 + XORL 8(SI),R9 + XORL 56(SI),AX + MOVL CX,40(DI) + MOVL R8,24(DI) + MOVL R9,8(DI) + MOVL AX,56(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + XORL 60(SI),CX + XORL 44(SI),R8 + XORL 28(SI),R9 + XORL 12(SI),AX + MOVL CX,60(DI) + MOVL R8,44(DI) + MOVL R9,28(DI) + MOVL AX,12(DI) + MOVQ 352(SP),R9 + MOVL 16(SP),CX + MOVL 36 (SP),R8 + ADDQ $1,CX + SHLQ $32,R8 + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $32,R8 + MOVL CX,16(SP) + MOVL R8, 36 (SP) + CMPQ R9,$64 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI,SI + MOVQ DX,DI + MOVQ R9,CX + REP; MOVSB + BYTESATLEAST64: + DONE: + MOVQ R12,SP + RET + BYTESATLEAST65: + SUBQ $64,R9 + ADDQ $64,DI + ADDQ $64,SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 0000000..9bfc092 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 0000000..f9269c3 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package salsa + +// This function is implemented in salsa2020_amd64.s. + +//go:noescape + +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 0000000..22126d1 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,234 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package salsa + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go new file mode 100644 index 0000000..f67e94e --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go @@ -0,0 +1,54 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "testing" + +func TestCore208(t *testing.T) { + in := [64]byte{ + 0x7e, 0x87, 0x9a, 0x21, 0x4f, 0x3e, 0xc9, 0x86, + 0x7c, 0xa9, 0x40, 0xe6, 0x41, 0x71, 0x8f, 0x26, + 0xba, 0xee, 0x55, 0x5b, 0x8c, 0x61, 0xc1, 0xb5, + 0x0d, 0xf8, 0x46, 0x11, 0x6d, 0xcd, 0x3b, 0x1d, + 0xee, 0x24, 0xf3, 0x19, 0xdf, 0x9b, 0x3d, 0x85, + 0x14, 0x12, 0x1e, 0x4b, 0x5a, 0xc5, 0xaa, 0x32, + 0x76, 0x02, 0x1d, 0x29, 0x09, 0xc7, 0x48, 0x29, + 0xed, 0xeb, 0xc6, 0x8d, 0xb8, 0xb8, 0xc2, 0x5e} + + out := [64]byte{ + 0xa4, 0x1f, 0x85, 0x9c, 0x66, 0x08, 0xcc, 0x99, + 0x3b, 0x81, 0xca, 0xcb, 0x02, 0x0c, 0xef, 0x05, + 0x04, 0x4b, 0x21, 0x81, 0xa2, 0xfd, 0x33, 0x7d, + 0xfd, 0x7b, 0x1c, 0x63, 0x96, 0x68, 0x2f, 0x29, + 0xb4, 0x39, 0x31, 0x68, 0xe3, 0xc9, 0xe6, 0xbc, + 0xfe, 0x6b, 0xc5, 0xb7, 0xa0, 0x6d, 0x96, 0xba, + 0xe4, 0x24, 0xcc, 0x10, 0x2c, 0x91, 0x74, 0x5c, + 0x24, 0xad, 0x67, 0x3d, 0xc7, 0x61, 0x8f, 0x81, + } + + Core208(&in, &in) + if in != out { + t.Errorf("expected %x, got %x", out, in) + } +} + +func TestOutOfBoundsWrite(t *testing.T) { + // encrypted "0123456789" + cipherText := []byte{170, 166, 196, 104, 175, 121, 68, 44, 174, 51} + var counter [16]byte + var key [32]byte + want := "abcdefghij" + plainText := []byte(want) + defer func() { + err := recover() + if err == nil { + t.Error("XORKeyStream expected to panic on len(dst) < len(src), but didn't") + } + if plainText[3] == '3' { + t.Errorf("XORKeyStream did out of bounds write, want %v, got %v", want, string(plainText)) + } + }() + XORKeyStream(plainText[:3], cipherText, &counter, &key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa20.go new file mode 100644 index 0000000..0ee6248 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa20.go @@ -0,0 +1,54 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package salsa20 implements the Salsa20 stream cipher as specified in https://cr.yp.to/snuffle/spec.pdf. + +Salsa20 differs from many other stream ciphers in that it is message orientated +rather than byte orientated. Keystream blocks are not preserved between calls, +therefore each side must encrypt/decrypt data with the same segmentation. + +Another aspect of this difference is that part of the counter is exposed as +a nonce in each call. Encrypting two different messages with the same (key, +nonce) pair leads to trivial plaintext recovery. This is analogous to +encrypting two different messages with the same key with a traditional stream +cipher. + +This package also implements XSalsa20: a version of Salsa20 with a 24-byte +nonce as specified in https://cr.yp.to/snuffle/xsalsa-20081128.pdf. Simply +passing a 24-byte slice as the nonce triggers XSalsa20. +*/ +package salsa20 // import "golang.org/x/crypto/salsa20" + +// TODO(agl): implement XORKeyStream12 and XORKeyStream8 - the reduced round variants of Salsa20. + +import ( + "golang.org/x/crypto/salsa20/salsa" +) + +// XORKeyStream crypts bytes from in to out using the given key and nonce. +// In and out must overlap entirely or not at all. Nonce must +// be either 8 or 24 bytes long. +func XORKeyStream(out, in []byte, nonce []byte, key *[32]byte) { + if len(out) < len(in) { + in = in[:len(out)] + } + + var subNonce [16]byte + + if len(nonce) == 24 { + var subKey [32]byte + var hNonce [16]byte + copy(hNonce[:], nonce[:16]) + salsa.HSalsa20(&subKey, &hNonce, key, &salsa.Sigma) + copy(subNonce[:], nonce[16:]) + key = &subKey + } else if len(nonce) == 8 { + copy(subNonce[:], nonce[:]) + } else { + panic("salsa20: nonce must be 8 or 24 bytes") + } + + salsa.XORKeyStream(out, in, &subNonce, key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa20_test.go b/vendor/golang.org/x/crypto/salsa20/salsa20_test.go new file mode 100644 index 0000000..0ef3328 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa20_test.go @@ -0,0 +1,139 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa20 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +func fromHex(s string) []byte { + ret, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return ret +} + +// testVectors was taken from set 6 of the ECRYPT test vectors: +// http://www.ecrypt.eu.org/stream/svn/viewcvs.cgi/ecrypt/trunk/submissions/salsa20/full/verified.test-vectors?logsort=rev&rev=210&view=markup +var testVectors = []struct { + key []byte + iv []byte + numBytes int + xor []byte +}{ + { + fromHex("0053A6F94C9FF24598EB3E91E4378ADD3083D6297CCF2275C81B6EC11467BA0D"), + fromHex("0D74DB42A91077DE"), + 131072, + fromHex("C349B6A51A3EC9B712EAED3F90D8BCEE69B7628645F251A996F55260C62EF31FD6C6B0AEA94E136C9D984AD2DF3578F78E457527B03A0450580DD874F63B1AB9"), + }, + { + fromHex("0558ABFE51A4F74A9DF04396E93C8FE23588DB2E81D4277ACD2073C6196CBF12"), + fromHex("167DE44BB21980E7"), + 131072, + fromHex("C3EAAF32836BACE32D04E1124231EF47E101367D6305413A0EEB07C60698A2876E4D031870A739D6FFDDD208597AFF0A47AC17EDB0167DD67EBA84F1883D4DFD"), + }, + { + fromHex("0A5DB00356A9FC4FA2F5489BEE4194E73A8DE03386D92C7FD22578CB1E71C417"), + fromHex("1F86ED54BB2289F0"), + 131072, + fromHex("3CD23C3DC90201ACC0CF49B440B6C417F0DC8D8410A716D5314C059E14B1A8D9A9FB8EA3D9C8DAE12B21402F674AA95C67B1FC514E994C9D3F3A6E41DFF5BBA6"), + }, + { + fromHex("0F62B5085BAE0154A7FA4DA0F34699EC3F92E5388BDE3184D72A7DD02376C91C"), + fromHex("288FF65DC42B92F9"), + 131072, + fromHex("E00EBCCD70D69152725F9987982178A2E2E139C7BCBE04CA8A0E99E318D9AB76F988C8549F75ADD790BA4F81C176DA653C1A043F11A958E169B6D2319F4EEC1A"), + }, +} + +func TestSalsa20(t *testing.T) { + var inBuf, outBuf []byte + var key [32]byte + + for i, test := range testVectors { + if test.numBytes%64 != 0 { + t.Errorf("#%d: numBytes is not a multiple of 64", i) + continue + } + + if test.numBytes > len(inBuf) { + inBuf = make([]byte, test.numBytes) + outBuf = make([]byte, test.numBytes) + } + in := inBuf[:test.numBytes] + out := outBuf[:test.numBytes] + copy(key[:], test.key) + XORKeyStream(out, in, test.iv, &key) + + var xor [64]byte + for len(out) > 0 { + for i := 0; i < 64; i++ { + xor[i] ^= out[i] + } + out = out[64:] + } + + if !bytes.Equal(xor[:], test.xor) { + t.Errorf("#%d: bad result", i) + } + } +} + +var xSalsa20TestData = []struct { + in, nonce, key, out []byte +}{ + { + []byte("Hello world!"), + []byte("24-byte nonce for xsalsa"), + []byte("this is 32-byte key for xsalsa20"), + []byte{0x00, 0x2d, 0x45, 0x13, 0x84, 0x3f, 0xc2, 0x40, 0xc4, 0x01, 0xe5, 0x41}, + }, + { + make([]byte, 64), + []byte("24-byte nonce for xsalsa"), + []byte("this is 32-byte key for xsalsa20"), + []byte{0x48, 0x48, 0x29, 0x7f, 0xeb, 0x1f, 0xb5, 0x2f, 0xb6, + 0x6d, 0x81, 0x60, 0x9b, 0xd5, 0x47, 0xfa, 0xbc, 0xbe, 0x70, + 0x26, 0xed, 0xc8, 0xb5, 0xe5, 0xe4, 0x49, 0xd0, 0x88, 0xbf, + 0xa6, 0x9c, 0x08, 0x8f, 0x5d, 0x8d, 0xa1, 0xd7, 0x91, 0x26, + 0x7c, 0x2c, 0x19, 0x5a, 0x7f, 0x8c, 0xae, 0x9c, 0x4b, 0x40, + 0x50, 0xd0, 0x8c, 0xe6, 0xd3, 0xa1, 0x51, 0xec, 0x26, 0x5f, + 0x3a, 0x58, 0xe4, 0x76, 0x48}, + }, +} + +func TestXSalsa20(t *testing.T) { + var key [32]byte + + for i, test := range xSalsa20TestData { + out := make([]byte, len(test.in)) + copy(key[:], test.key) + XORKeyStream(out, test.in, test.nonce, &key) + if !bytes.Equal(out, test.out) { + t.Errorf("%d: expected %x, got %x", i, test.out, out) + } + } +} + +var ( + keyArray [32]byte + key = &keyArray + nonce [8]byte + msg = make([]byte, 1<<10) +) + +func BenchmarkXOR1K(b *testing.B) { + b.StopTimer() + out := make([]byte, 1024) + b.StartTimer() + for i := 0; i < b.N; i++ { + XORKeyStream(out, msg[:1024], nonce[:], key) + } + b.SetBytes(1024) +} diff --git a/vendor/golang.org/x/crypto/scrypt/example_test.go b/vendor/golang.org/x/crypto/scrypt/example_test.go new file mode 100644 index 0000000..6736479 --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/example_test.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scrypt_test + +import ( + "encoding/base64" + "fmt" + "log" + + "golang.org/x/crypto/scrypt" +) + +func Example() { + // DO NOT use this salt value; generate your own random salt. 8 bytes is + // a good length. + salt := []byte{0xc8, 0x28, 0xf2, 0x58, 0xa7, 0x6a, 0xad, 0x7b} + + dk, err := scrypt.Key([]byte("some password"), salt, 1<<15, 8, 1, 32) + if err != nil { + log.Fatal(err) + } + fmt.Println(base64.StdEncoding.EncodeToString(dk)) + // Output: lGnMz8io0AUkfzn6Pls1qX20Vs7PGN6sbYQ2TQgY12M= +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 0000000..ff28aae --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "errors" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + x := xy + y := xy[32*r:] + + j := 0 + for i := 0; i < 32*r; i++ { + x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24 + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*(32*r):], x, 32*r) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*(32*r):], y, 32*r) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*(32*r):], 32*r) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*(32*r):], 32*r) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:32*r] { + b[j+0] = byte(v >> 0) + b[j+1] = byte(v >> 8) + b[j+2] = byte(v >> 16) + b[j+3] = byte(v >> 24) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt_test.go b/vendor/golang.org/x/crypto/scrypt/scrypt_test.go new file mode 100644 index 0000000..766ed8d --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt_test.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scrypt + +import ( + "bytes" + "testing" +) + +type testVector struct { + password string + salt string + N, r, p int + output []byte +} + +var good = []testVector{ + { + "password", + "salt", + 2, 10, 10, + []byte{ + 0x48, 0x2c, 0x85, 0x8e, 0x22, 0x90, 0x55, 0xe6, 0x2f, + 0x41, 0xe0, 0xec, 0x81, 0x9a, 0x5e, 0xe1, 0x8b, 0xdb, + 0x87, 0x25, 0x1a, 0x53, 0x4f, 0x75, 0xac, 0xd9, 0x5a, + 0xc5, 0xe5, 0xa, 0xa1, 0x5f, + }, + }, + { + "password", + "salt", + 16, 100, 100, + []byte{ + 0x88, 0xbd, 0x5e, 0xdb, 0x52, 0xd1, 0xdd, 0x0, 0x18, + 0x87, 0x72, 0xad, 0x36, 0x17, 0x12, 0x90, 0x22, 0x4e, + 0x74, 0x82, 0x95, 0x25, 0xb1, 0x8d, 0x73, 0x23, 0xa5, + 0x7f, 0x91, 0x96, 0x3c, 0x37, + }, + }, + { + "this is a long \000 password", + "and this is a long \000 salt", + 16384, 8, 1, + []byte{ + 0xc3, 0xf1, 0x82, 0xee, 0x2d, 0xec, 0x84, 0x6e, 0x70, + 0xa6, 0x94, 0x2f, 0xb5, 0x29, 0x98, 0x5a, 0x3a, 0x09, + 0x76, 0x5e, 0xf0, 0x4c, 0x61, 0x29, 0x23, 0xb1, 0x7f, + 0x18, 0x55, 0x5a, 0x37, 0x07, 0x6d, 0xeb, 0x2b, 0x98, + 0x30, 0xd6, 0x9d, 0xe5, 0x49, 0x26, 0x51, 0xe4, 0x50, + 0x6a, 0xe5, 0x77, 0x6d, 0x96, 0xd4, 0x0f, 0x67, 0xaa, + 0xee, 0x37, 0xe1, 0x77, 0x7b, 0x8a, 0xd5, 0xc3, 0x11, + 0x14, 0x32, 0xbb, 0x3b, 0x6f, 0x7e, 0x12, 0x64, 0x40, + 0x18, 0x79, 0xe6, 0x41, 0xae, + }, + }, + { + "p", + "s", + 2, 1, 1, + []byte{ + 0x48, 0xb0, 0xd2, 0xa8, 0xa3, 0x27, 0x26, 0x11, 0x98, + 0x4c, 0x50, 0xeb, 0xd6, 0x30, 0xaf, 0x52, + }, + }, + + { + "", + "", + 16, 1, 1, + []byte{ + 0x77, 0xd6, 0x57, 0x62, 0x38, 0x65, 0x7b, 0x20, 0x3b, + 0x19, 0xca, 0x42, 0xc1, 0x8a, 0x04, 0x97, 0xf1, 0x6b, + 0x48, 0x44, 0xe3, 0x07, 0x4a, 0xe8, 0xdf, 0xdf, 0xfa, + 0x3f, 0xed, 0xe2, 0x14, 0x42, 0xfc, 0xd0, 0x06, 0x9d, + 0xed, 0x09, 0x48, 0xf8, 0x32, 0x6a, 0x75, 0x3a, 0x0f, + 0xc8, 0x1f, 0x17, 0xe8, 0xd3, 0xe0, 0xfb, 0x2e, 0x0d, + 0x36, 0x28, 0xcf, 0x35, 0xe2, 0x0c, 0x38, 0xd1, 0x89, + 0x06, + }, + }, + { + "password", + "NaCl", + 1024, 8, 16, + []byte{ + 0xfd, 0xba, 0xbe, 0x1c, 0x9d, 0x34, 0x72, 0x00, 0x78, + 0x56, 0xe7, 0x19, 0x0d, 0x01, 0xe9, 0xfe, 0x7c, 0x6a, + 0xd7, 0xcb, 0xc8, 0x23, 0x78, 0x30, 0xe7, 0x73, 0x76, + 0x63, 0x4b, 0x37, 0x31, 0x62, 0x2e, 0xaf, 0x30, 0xd9, + 0x2e, 0x22, 0xa3, 0x88, 0x6f, 0xf1, 0x09, 0x27, 0x9d, + 0x98, 0x30, 0xda, 0xc7, 0x27, 0xaf, 0xb9, 0x4a, 0x83, + 0xee, 0x6d, 0x83, 0x60, 0xcb, 0xdf, 0xa2, 0xcc, 0x06, + 0x40, + }, + }, + { + "pleaseletmein", "SodiumChloride", + 16384, 8, 1, + []byte{ + 0x70, 0x23, 0xbd, 0xcb, 0x3a, 0xfd, 0x73, 0x48, 0x46, + 0x1c, 0x06, 0xcd, 0x81, 0xfd, 0x38, 0xeb, 0xfd, 0xa8, + 0xfb, 0xba, 0x90, 0x4f, 0x8e, 0x3e, 0xa9, 0xb5, 0x43, + 0xf6, 0x54, 0x5d, 0xa1, 0xf2, 0xd5, 0x43, 0x29, 0x55, + 0x61, 0x3f, 0x0f, 0xcf, 0x62, 0xd4, 0x97, 0x05, 0x24, + 0x2a, 0x9a, 0xf9, 0xe6, 0x1e, 0x85, 0xdc, 0x0d, 0x65, + 0x1e, 0x40, 0xdf, 0xcf, 0x01, 0x7b, 0x45, 0x57, 0x58, + 0x87, + }, + }, + /* + // Disabled: needs 1 GiB RAM and takes too long for a simple test. + { + "pleaseletmein", "SodiumChloride", + 1048576, 8, 1, + []byte{ + 0x21, 0x01, 0xcb, 0x9b, 0x6a, 0x51, 0x1a, 0xae, 0xad, + 0xdb, 0xbe, 0x09, 0xcf, 0x70, 0xf8, 0x81, 0xec, 0x56, + 0x8d, 0x57, 0x4a, 0x2f, 0xfd, 0x4d, 0xab, 0xe5, 0xee, + 0x98, 0x20, 0xad, 0xaa, 0x47, 0x8e, 0x56, 0xfd, 0x8f, + 0x4b, 0xa5, 0xd0, 0x9f, 0xfa, 0x1c, 0x6d, 0x92, 0x7c, + 0x40, 0xf4, 0xc3, 0x37, 0x30, 0x40, 0x49, 0xe8, 0xa9, + 0x52, 0xfb, 0xcb, 0xf4, 0x5c, 0x6f, 0xa7, 0x7a, 0x41, + 0xa4, + }, + }, + */ +} + +var bad = []testVector{ + {"p", "s", 0, 1, 1, nil}, // N == 0 + {"p", "s", 1, 1, 1, nil}, // N == 1 + {"p", "s", 7, 8, 1, nil}, // N is not power of 2 + {"p", "s", 16, maxInt / 2, maxInt / 2, nil}, // p * r too large +} + +func TestKey(t *testing.T) { + for i, v := range good { + k, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, len(v.output)) + if err != nil { + t.Errorf("%d: got unexpected error: %s", i, err) + } + if !bytes.Equal(k, v.output) { + t.Errorf("%d: expected %x, got %x", i, v.output, k) + } + } + for i, v := range bad { + _, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, 32) + if err == nil { + t.Errorf("%d: expected error, got nil", i) + } + } +} + +var sink []byte + +func BenchmarkKey(b *testing.B) { + for i := 0; i < b.N; i++ { + sink, _ = Key([]byte("password"), []byte("salt"), 1<<15, 8, 1, 64) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 0000000..a0ee3ae --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,66 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// +// Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// +// Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// +// The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// output is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// +// Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 // import "golang.org/x/crypto/sha3" diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 0000000..2b51cf4 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,65 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} } + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} } + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} } + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} } + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 0000000..46d03ed --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,412 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package sha3 + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[12] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[18] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[24] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[16] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[22] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[3] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[1] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[7] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[19] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[11] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[23] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[4] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[2] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[8] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[14] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[7] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[23] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[14] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[11] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[2] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[18] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[6] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[22] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[4] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[1] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[8] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[24] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[12] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[3] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[19] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[22] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[8] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[19] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[1] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[12] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[23] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[16] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[2] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[24] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[6] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[3] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[14] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[7] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[18] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[4] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[2] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[3] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[4] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[6] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[7] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[8] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[11] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[12] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[14] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[16] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[18] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[19] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[22] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[23] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[24] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 0000000..7886795 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 0000000..f88533a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,390 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +// This code was translated into a form compatible with 6a from the public +// domain sources at https://github.com/gvanas/KeccakCodePackage + +// Offsets in state +#define _ba (0*8) +#define _be (1*8) +#define _bi (2*8) +#define _bo (3*8) +#define _bu (4*8) +#define _ga (5*8) +#define _ge (6*8) +#define _gi (7*8) +#define _go (8*8) +#define _gu (9*8) +#define _ka (10*8) +#define _ke (11*8) +#define _ki (12*8) +#define _ko (13*8) +#define _ku (14*8) +#define _ma (15*8) +#define _me (16*8) +#define _mi (17*8) +#define _mo (18*8) +#define _mu (19*8) +#define _sa (20*8) +#define _se (21*8) +#define _si (22*8) +#define _so (23*8) +#define _su (24*8) + +// Temporary registers +#define rT1 AX + +// Round vars +#define rpState DI +#define rpStack SP + +#define rDa BX +#define rDe CX +#define rDi DX +#define rDo R8 +#define rDu R9 + +#define rBa R10 +#define rBe R11 +#define rBi R12 +#define rBo R13 +#define rBu R14 + +#define rCa SI +#define rCe BP +#define rCi rBi +#define rCo rBo +#define rCu R15 + +#define MOVQ_RBI_RCE MOVQ rBi, rCe +#define XORQ_RT1_RCA XORQ rT1, rCa +#define XORQ_RT1_RCE XORQ rT1, rCe +#define XORQ_RBA_RCU XORQ rBa, rCu +#define XORQ_RBE_RCU XORQ rBe, rCu +#define XORQ_RDU_RCU XORQ rDu, rCu +#define XORQ_RDA_RCA XORQ rDa, rCa +#define XORQ_RDE_RCE XORQ rDe, rCe + +#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ + /* Prepare round */ \ + MOVQ rCe, rDa; \ + ROLQ $1, rDa; \ + \ + MOVQ _bi(iState), rCi; \ + XORQ _gi(iState), rDi; \ + XORQ rCu, rDa; \ + XORQ _ki(iState), rCi; \ + XORQ _mi(iState), rDi; \ + XORQ rDi, rCi; \ + \ + MOVQ rCi, rDe; \ + ROLQ $1, rDe; \ + \ + MOVQ _bo(iState), rCo; \ + XORQ _go(iState), rDo; \ + XORQ rCa, rDe; \ + XORQ _ko(iState), rCo; \ + XORQ _mo(iState), rDo; \ + XORQ rDo, rCo; \ + \ + MOVQ rCo, rDi; \ + ROLQ $1, rDi; \ + \ + MOVQ rCu, rDo; \ + XORQ rCe, rDi; \ + ROLQ $1, rDo; \ + \ + MOVQ rCa, rDu; \ + XORQ rCi, rDo; \ + ROLQ $1, rDu; \ + \ + /* Result b */ \ + MOVQ _ba(iState), rBa; \ + MOVQ _ge(iState), rBe; \ + XORQ rCo, rDu; \ + MOVQ _ki(iState), rBi; \ + MOVQ _mo(iState), rBo; \ + MOVQ _su(iState), rBu; \ + XORQ rDe, rBe; \ + ROLQ $44, rBe; \ + XORQ rDi, rBi; \ + XORQ rDa, rBa; \ + ROLQ $43, rBi; \ + \ + MOVQ rBe, rCa; \ + MOVQ rc, rT1; \ + ORQ rBi, rCa; \ + XORQ rBa, rT1; \ + XORQ rT1, rCa; \ + MOVQ rCa, _ba(oState); \ + \ + XORQ rDu, rBu; \ + ROLQ $14, rBu; \ + MOVQ rBa, rCu; \ + ANDQ rBe, rCu; \ + XORQ rBu, rCu; \ + MOVQ rCu, _bu(oState); \ + \ + XORQ rDo, rBo; \ + ROLQ $21, rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _bi(oState); \ + \ + NOTQ rBi; \ + ORQ rBa, rBu; \ + ORQ rBo, rBi; \ + XORQ rBo, rBu; \ + XORQ rBe, rBi; \ + MOVQ rBu, _bo(oState); \ + MOVQ rBi, _be(oState); \ + B_RBI_RCE; \ + \ + /* Result g */ \ + MOVQ _gu(iState), rBe; \ + XORQ rDu, rBe; \ + MOVQ _ka(iState), rBi; \ + ROLQ $20, rBe; \ + XORQ rDa, rBi; \ + ROLQ $3, rBi; \ + MOVQ _bo(iState), rBa; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDo, rBa; \ + MOVQ _me(iState), rBo; \ + MOVQ _si(iState), rBu; \ + ROLQ $28, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ga(oState); \ + G_RT1_RCA; \ + \ + XORQ rDe, rBo; \ + ROLQ $45, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ge(oState); \ + G_RT1_RCE; \ + \ + XORQ rDi, rBu; \ + ROLQ $61, rBu; \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _go(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _gu(oState); \ + NOTQ rBu; \ + G_RBA_RCU; \ + \ + ORQ rBu, rBo; \ + XORQ rBi, rBo; \ + MOVQ rBo, _gi(oState); \ + \ + /* Result k */ \ + MOVQ _be(iState), rBa; \ + MOVQ _gi(iState), rBe; \ + MOVQ _ko(iState), rBi; \ + MOVQ _mu(iState), rBo; \ + MOVQ _sa(iState), rBu; \ + XORQ rDi, rBe; \ + ROLQ $6, rBe; \ + XORQ rDo, rBi; \ + ROLQ $25, rBi; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDe, rBa; \ + ROLQ $1, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ka(oState); \ + K_RT1_RCA; \ + \ + XORQ rDu, rBo; \ + ROLQ $8, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ke(oState); \ + K_RT1_RCE; \ + \ + XORQ rDa, rBu; \ + ROLQ $18, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _ki(oState); \ + \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _ko(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _ku(oState); \ + K_RBA_RCU; \ + \ + /* Result m */ \ + MOVQ _ga(iState), rBe; \ + XORQ rDa, rBe; \ + MOVQ _ke(iState), rBi; \ + ROLQ $36, rBe; \ + XORQ rDe, rBi; \ + MOVQ _bu(iState), rBa; \ + ROLQ $10, rBi; \ + MOVQ rBe, rT1; \ + MOVQ _mi(iState), rBo; \ + ANDQ rBi, rT1; \ + XORQ rDu, rBa; \ + MOVQ _so(iState), rBu; \ + ROLQ $27, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ma(oState); \ + M_RT1_RCA; \ + \ + XORQ rDi, rBo; \ + ROLQ $15, rBo; \ + MOVQ rBi, rT1; \ + ORQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _me(oState); \ + M_RT1_RCE; \ + \ + XORQ rDo, rBu; \ + ROLQ $56, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ORQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _mi(oState); \ + \ + ORQ rBa, rBe; \ + XORQ rBu, rBe; \ + MOVQ rBe, _mu(oState); \ + \ + ANDQ rBa, rBu; \ + XORQ rBo, rBu; \ + MOVQ rBu, _mo(oState); \ + M_RBE_RCU; \ + \ + /* Result s */ \ + MOVQ _bi(iState), rBa; \ + MOVQ _go(iState), rBe; \ + MOVQ _ku(iState), rBi; \ + XORQ rDi, rBa; \ + MOVQ _ma(iState), rBo; \ + ROLQ $62, rBa; \ + XORQ rDo, rBe; \ + MOVQ _se(iState), rBu; \ + ROLQ $55, rBe; \ + \ + XORQ rDu, rBi; \ + MOVQ rBa, rDu; \ + XORQ rDe, rBu; \ + ROLQ $2, rBu; \ + ANDQ rBe, rDu; \ + XORQ rBu, rDu; \ + MOVQ rDu, _su(oState); \ + \ + ROLQ $39, rBi; \ + S_RDU_RCU; \ + NOTQ rBe; \ + XORQ rDa, rBo; \ + MOVQ rBe, rDa; \ + ANDQ rBi, rDa; \ + XORQ rBa, rDa; \ + MOVQ rDa, _sa(oState); \ + S_RDA_RCA; \ + \ + ROLQ $41, rBo; \ + MOVQ rBi, rDe; \ + ORQ rBo, rDe; \ + XORQ rBe, rDe; \ + MOVQ rDe, _se(oState); \ + S_RDE_RCE; \ + \ + MOVQ rBo, rDi; \ + MOVQ rBu, rDo; \ + ANDQ rBu, rDi; \ + ORQ rBa, rDo; \ + XORQ rBi, rDi; \ + XORQ rBo, rDo; \ + MOVQ rDi, _si(oState); \ + MOVQ rDo, _so(oState) \ + +// func keccakF1600(state *[25]uint64) +TEXT ·keccakF1600(SB), 0, $200-8 + MOVQ state+0(FP), rpState + + // Convert the user state into an internal state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + // Execute the KeccakF permutation + MOVQ _ba(rpState), rCa + MOVQ _be(rpState), rCe + MOVQ _bu(rpState), rCu + + XORQ _ga(rpState), rCa + XORQ _ge(rpState), rCe + XORQ _gu(rpState), rCu + + XORQ _ka(rpState), rCa + XORQ _ke(rpState), rCe + XORQ _ku(rpState), rCu + + XORQ _ma(rpState), rCa + XORQ _me(rpState), rCe + XORQ _mu(rpState), rCu + + XORQ _sa(rpState), rCa + XORQ _se(rpState), rCe + MOVQ _si(rpState), rDi + MOVQ _so(rpState), rDo + XORQ _su(rpState), rCu + + mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + + // Revert the internal state to the user state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + RET diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go new file mode 100644 index 0000000..3cf6a22 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -0,0 +1,18 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.4 + +package sha3 + +import ( + "crypto" +) + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 0000000..b12a35c --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,192 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + buf []byte // points into storage + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + storage [maxRate]byte + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.buf = d.storage[:0] +} + +func (d *state) clone() *state { + ret := *d + if ret.state == spongeAbsorbing { + ret.buf = ret.storage[:len(ret.buf)] + } else { + ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate] + } + + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf) + d.buf = d.storage[:0] + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutatin before + // copying more output. + keccakF1600(&d.a) + d.buf = d.storage[:d.rate] + copyOut(d, d.buf) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute(dsbyte byte) { + if d.buf == nil { + d.buf = d.storage[:0] + } + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.buf = append(d.buf, dsbyte) + zerosStart := len(d.buf) + d.buf = d.storage[:d.rate] + for i := zerosStart; i < d.rate; i++ { + d.buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.buf = d.storage[:d.rate] + copyOut(d, d.buf) +} + +// Write absorbs more data into the hash's state. It produces an error +// if more data is written to the ShakeHash after writing +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + if d.buf == nil { + d.buf = d.storage[:0] + } + written = len(p) + + for len(p) > 0 { + if len(d.buf) == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - len(d.buf) + if todo > len(p) { + todo = len(p) + } + d.buf = append(d.buf, p[:todo]...) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if len(d.buf) == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.buf) + d.buf = d.buf[n:] + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if len(d.buf) == 0 { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. +func (d *state) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen) + dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_test.go b/vendor/golang.org/x/crypto/sha3/sha3_test.go new file mode 100644 index 0000000..2c8719b --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_test.go @@ -0,0 +1,311 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// Tests include all the ShortMsgKATs provided by the Keccak team at +// https://github.com/gvanas/KeccakCodePackage +// +// They only include the zero-bit case of the bitwise testvectors +// published by NIST in the draft of FIPS-202. + +import ( + "bytes" + "compress/flate" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "os" + "strings" + "testing" +) + +const ( + testString = "brekeccakkeccak koax koax" + katFilename = "testdata/keccakKats.json.deflate" +) + +// Internal-use instances of SHAKE used to test against KATs. +func newHashShake128() hash.Hash { + return &state{rate: 168, dsbyte: 0x1f, outputLen: 512} +} +func newHashShake256() hash.Hash { + return &state{rate: 136, dsbyte: 0x1f, outputLen: 512} +} + +// testDigests contains functions returning hash.Hash instances +// with output-length equal to the KAT length for both SHA-3 and +// SHAKE instances. +var testDigests = map[string]func() hash.Hash{ + "SHA3-224": New224, + "SHA3-256": New256, + "SHA3-384": New384, + "SHA3-512": New512, + "SHAKE128": newHashShake128, + "SHAKE256": newHashShake256, +} + +// testShakes contains functions that return ShakeHash instances for +// testing the ShakeHash-specific interface. +var testShakes = map[string]func() ShakeHash{ + "SHAKE128": NewShake128, + "SHAKE256": NewShake256, +} + +// decodeHex converts a hex-encoded string into a raw byte string. +func decodeHex(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// structs used to marshal JSON test-cases. +type KeccakKats struct { + Kats map[string][]struct { + Digest string `json:"digest"` + Length int64 `json:"length"` + Message string `json:"message"` + } +} + +func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) { + xorInOrig, copyOutOrig := xorIn, copyOut + xorIn, copyOut = xorInGeneric, copyOutGeneric + testf("generic") + if xorImplementationUnaligned != "generic" { + xorIn, copyOut = xorInUnaligned, copyOutUnaligned + testf("unaligned") + } + xorIn, copyOut = xorInOrig, copyOutOrig +} + +// TestKeccakKats tests the SHA-3 and Shake implementations against all the +// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage +// (The testvectors are stored in keccakKats.json.deflate due to their length.) +func TestKeccakKats(t *testing.T) { + testUnalignedAndGeneric(t, func(impl string) { + // Read the KATs. + deflated, err := os.Open(katFilename) + if err != nil { + t.Errorf("error opening %s: %s", katFilename, err) + } + file := flate.NewReader(deflated) + dec := json.NewDecoder(file) + var katSet KeccakKats + err = dec.Decode(&katSet) + if err != nil { + t.Errorf("error decoding KATs: %s", err) + } + + // Do the KATs. + for functionName, kats := range katSet.Kats { + d := testDigests[functionName]() + for _, kat := range kats { + d.Reset() + in, err := hex.DecodeString(kat.Message) + if err != nil { + t.Errorf("error decoding KAT: %s", err) + } + d.Write(in[:kat.Length/8]) + got := strings.ToUpper(hex.EncodeToString(d.Sum(nil))) + if got != kat.Digest { + t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s", + functionName, impl, kat.Length, kat.Message, got, kat.Digest) + t.Logf("wanted %+v", kat) + t.FailNow() + } + continue + } + } + }) +} + +// TestUnalignedWrite tests that writing data in an arbitrary pattern with +// small input buffers. +func testUnalignedWrite(t *testing.T) { + testUnalignedAndGeneric(t, func(impl string) { + buf := sequentialBytes(0x10000) + for alg, df := range testDigests { + d := df() + d.Reset() + d.Write(buf) + want := d.Sum(nil) + d.Reset() + for i := 0; i < len(buf); { + // Cycle through offsets which make a 137 byte sequence. + // Because 137 is prime this sequence should exercise all corner cases. + offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1} + for _, j := range offsets { + if v := len(buf) - i; v < j { + j = v + } + d.Write(buf[i : i+j]) + i += j + } + } + got := d.Sum(nil) + if !bytes.Equal(got, want) { + t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want) + } + } + }) +} + +// TestAppend checks that appending works when reallocation is necessary. +func TestAppend(t *testing.T) { + testUnalignedAndGeneric(t, func(impl string) { + d := New224() + + for capacity := 2; capacity <= 66; capacity += 64 { + // The first time around the loop, Sum will have to reallocate. + // The second time, it will not. + buf := make([]byte, 2, capacity) + d.Reset() + d.Write([]byte{0xcc}) + buf = d.Sum(buf) + expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39" + if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected { + t.Errorf("got %s, want %s", got, expected) + } + } + }) +} + +// TestAppendNoRealloc tests that appending works when no reallocation is necessary. +func TestAppendNoRealloc(t *testing.T) { + testUnalignedAndGeneric(t, func(impl string) { + buf := make([]byte, 1, 200) + d := New224() + d.Write([]byte{0xcc}) + buf = d.Sum(buf) + expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39" + if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected { + t.Errorf("%s: got %s, want %s", impl, got, expected) + } + }) +} + +// TestSqueezing checks that squeezing the full output a single time produces +// the same output as repeatedly squeezing the instance. +func TestSqueezing(t *testing.T) { + testUnalignedAndGeneric(t, func(impl string) { + for functionName, newShakeHash := range testShakes { + d0 := newShakeHash() + d0.Write([]byte(testString)) + ref := make([]byte, 32) + d0.Read(ref) + + d1 := newShakeHash() + d1.Write([]byte(testString)) + var multiple []byte + for range ref { + one := make([]byte, 1) + d1.Read(one) + multiple = append(multiple, one...) + } + if !bytes.Equal(ref, multiple) { + t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref)) + } + } + }) +} + +// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing. +func sequentialBytes(size int) []byte { + result := make([]byte, size) + for i := range result { + result[i] = byte(i) + } + return result +} + +// BenchmarkPermutationFunction measures the speed of the permutation function +// with no input data. +func BenchmarkPermutationFunction(b *testing.B) { + b.SetBytes(int64(200)) + var lanes [25]uint64 + for i := 0; i < b.N; i++ { + keccakF1600(&lanes) + } +} + +// benchmarkHash tests the speed to hash num buffers of buflen each. +func benchmarkHash(b *testing.B, h hash.Hash, size, num int) { + b.StopTimer() + h.Reset() + data := sequentialBytes(size) + b.SetBytes(int64(size * num)) + b.StartTimer() + + var state []byte + for i := 0; i < b.N; i++ { + for j := 0; j < num; j++ { + h.Write(data) + } + state = h.Sum(state[:0]) + } + b.StopTimer() + h.Reset() +} + +// benchmarkShake is specialized to the Shake instances, which don't +// require a copy on reading output. +func benchmarkShake(b *testing.B, h ShakeHash, size, num int) { + b.StopTimer() + h.Reset() + data := sequentialBytes(size) + d := make([]byte, 32) + + b.SetBytes(int64(size * num)) + b.StartTimer() + + for i := 0; i < b.N; i++ { + h.Reset() + for j := 0; j < num; j++ { + h.Write(data) + } + h.Read(d) + } +} + +func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) } +func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) } +func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) } +func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) } + +func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) } +func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) } +func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) } +func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) } + +func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) } + +func Example_sum() { + buf := []byte("some data to hash") + // A hash needs to be 64 bytes long to have 256-bit collision resistance. + h := make([]byte, 64) + // Compute a 64-byte hash of buf and put it in h. + ShakeSum256(h, buf) + fmt.Printf("%x\n", h) + // Output: 0f65fe41fc353e52c55667bb9e2b27bfcc8476f2c413e9437d272ee3194a4e3146d05ec04a25d16b8f577c19b82d16b1424c3e022e783d2b4da98de3658d363d +} + +func Example_mac() { + k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long") + buf := []byte("and this is some data to authenticate") + // A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key. + h := make([]byte, 32) + d := NewShake256() + // Write the key into the hash. + d.Write(k) + // Now write the data. + d.Write(buf) + // Read 32 bytes of output from the hash into h. + d.Read(h) + fmt.Printf("%x\n", h) + // Output: 78de2974bd2711d5549ffd32b753ef0f5fa80a0db2556db60f0987eb8a9218ff +} diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 0000000..5a027d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. + +import ( + "io" +) + +// ShakeHash defines the interface to hash functions that +// support arbitrary-length output. +type ShakeHash interface { + // Write absorbs more data into the hash's state. It panics if input is + // written to it after output has been read from it. + io.Writer + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash + + // Reset resets the ShakeHash to its initial state. + Reset() +} + +func (d *state) Clone() ShakeHash { + return d.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} } + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} } + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate b/vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate new file mode 100644 index 0000000000000000000000000000000000000000..62e85ae24236b46c09e5cfa84c71c69f5cc33cf6 GIT binary patch literal 521342 zcmV(wKzy6Q^@gM*Azx~Vq{J;Fmzx?Ch{x$i(5WRo?```ch?|=FG z>3{z3AOH9N=YRii|M5Tm`SX8hy#;>6sy0=|rHk_99c*FNJPEr@%PjJ-+%s}pIW)HOadPLF*FZ1|m#XVEZQL^J z8}q2xyRg#2Gfi@x>sGO%u(G>lHS%u@0*@p+eRb&B7)x6 zN8q{HPb`wT#pgU7NDnR~FjQ-F0CtB9*D1syD{4u-^FG;N`lJK5Z?1ph0V@v4gb@;o z8qApUxUUa^^qr%q02@32@&v6X{_!F2Q<;Q^{IbPjE;~`mCci#u zMXxC(#SH=&=YW#-X->!mi}R8_Q3iQ1K4E9TUk>*v<=wGIL}ZFwdcyrvM>yu(Y|WHZ zh9&T-GeNEaN;wWM*Vf0EkE>rCJpB5oPf>5~j`ZA($87HtIsElm^4j**aT_lNKcehlYQBnAr;U<|JieS6xWwAc)abQG0+dqn7?Fg;B-D9(fn6uhft?JZ|+3lz< zvO8!_!SD0?{8Mw33Z6ySJ>&VFSc>heIicxKlXZB~XFkL}qG5E{F7WVGKX}(VU5Mv@ zf*j!2ik}Gb=c{m94B7{eXL^vfPbf47&cSx)rv~rFAG8SM5QpKG^m^pdjS^ATl^tZ> z*cJeYq1M+ozR&kzqR!JLqWGYNNFG<|280)367J7eOtW3GiA9n$nOnp!eL@#bj1V8G zswVB47O_h!eSM|SXW-R+d;0`fR`D|Lkz2j-!9(f{TlpEo#3NYQAbALYqo2mSm)NAM z^2X9WMN=(C+8ugN5_Od;TMPU?4 zFX89gLU+|B>{>nwWPJ@*tYDs62jwJQu#F;1ONDvFU*8+_`Muj!Kqhham`AS*G?d#O zDE2s)6yK#Da{u&IVg()08$Y)u0?8`5f!EAi$siCRLI9$gaAs65zGg0f&o9oK4thL1 zFCkRKlNGwj+Q*d+YK^C4-81E43M@%``3B85inwP>`lH z@nk)m-MYfhuSWgO04>B~k`dd_$s#SE#D?DmV4&wS0e(!jS0es?raoK^E~s8&BN3R~ zV<~)xob~uU+);ZpyS%P9IKKDw-QQz5oEME>-^8f*U88khA(}yy@Q^;GD=T}KnRy~T{b(Q;r-M&m%Xy_YdqpJtt={SmZP9X5ztwEVSar&5ysd1_3huoKX9U} zdm(hg&N=5Cq(2AGFm!hA2L`SEjzS<@BeLn|=0a0l>?#AeVveTYHcwZHKZzX2^u=p}x89dl| zqTF1*5>CQszHa*l=1lh8(b4RTB-FzD*_$dEi5Vh5!q&-%<~ z<}HMlD^~M^GSd@U7Tj7k&shek{Ho%6_fa%`8+RB>MciwXl#jrMSGKJckis=G5%MLZY_&M!16{9_UPlMkr#ro?jX_5l2W!?aI zXKw==^I*#SGxNlE21ZI93R<=8qTAIY*hLQz1BfJl^VM=!qcY8m|m})$_ggv8+s_ZA#;3YH#0r_#hXqRARsvTXnX_ zM>jV}E}!7295*odQoN_-`+EmCV}dl9(8{kH#oZ0cp9YT2Qlry~7>t;!ug#>)XGNbb z(C)JvL8cbvd5#>jrm~-DrR!Z#P$hGx z4+WQ{?U^dZnlVHrp?S|sWT*@JOe@TvegL?SZ&~1a`w47EArJ_!mW=2^c<92+1lYSp z_Elgf1Ttvn*Q=Y;ROMQyKeZR-k7Y%!1#W}o&-=quDH?fQw7I2k=sA*x>B1*d@6#EW z4T#wwpO@0^V|jXL{r0HN$F>LO#RXKB!Yk)!T3x9jlm|mmNtWfy5cz(y6}%QMphOjT zMv(=gn-6Ho4zPrYQ5+GAf|W9K z=@uuUI5$*ycQ^U&>KH_!QKfDt%X^_#Nc_oc`LrUyE1P0}Y6NmJeZ?;+`!=+HI$=n& z09!hk2I;l*BWxB_$d+4SPBNz>I2yiJST>)vefvg&V$zoO?zlVl5{RMExW*VKhTtlX z7%8&9a_B92f@@ZweQ=VniP9NhyVzt~`TKkh^zz+42W3U{>vkyzGk7CW`UW*oAXB(! zA09A#RcD_Lg7!GgNN_ahkqPuW#0SFE!_9nA(miK_Z(aP&OS!ljW2ioV1M`fO^;Oh` zWRc)R)zA7Y1+NPH&2EVx>)seCtP~hteu!Y?cF1mpFal0(zW zx;qyQdV5NiY(3z8A_`uWez;6;tnE;Y!Z)km^xAmz3M##bi=RCJ`$1OLD{7foZO_iVl{hb@Wn09sTuRizS zJB=*LP{pRbPK8r#pUYyi2@EeQ)Y;O+O4#JKU4lpFF>J1D7JVSxX6BL^t3l_4hcE4Z zZ{vYHm7{GNDsg%nNbq_hw)Do}k^`RO!~gi-t0kZZ&oP&RH-wc%)- zJ6n{?Ota$HkN2JJ5k=Q~%BMVZG*&~M|@RE zpYF2_7h#Bi+M1MLISzyR=d{|Z_a1`84Qaebm6_zNoxoN^yV);B#^W{AsS?IfgKrG1 zXA#cI&w1|2B$=V~n4GvtZP^6AK6~!;V6gDQWFEuNz1XNvR{7J1VEPFGN;1>PtK-uf#{yBmV8XY(|)myl9jtD-j2v#>R+bTpZNGRt|DRoh^M z59WGr_4QeG2*2q>2vK;nj8iKx?TWc_|D-9doH(T1Bc@LDe%7J zt%j$r7m-gp)*ULe#h#Oh)4C%i=}NlTU8`7}vOT(=^*%QYp6FT$E}l$*d2@3%LyE)z zRl!{x%e$ADm!I{1ded%oqk|X;bsb+c7#yZCA+zYnylRYW%7?h^J-cfuhD%GZd5SmC z%n5f}6gE?48Fr8lg`Z8SR_D)(VYW$Xnaq}K-kHA~x%0Ca#ffBy8VwfY?AjfqPd&@0 zOFh(*o{rlPtYO6(%eqE_6p&})Tj_o?>NozDW4Iu)NnY5n*5j_Va!F0@tx2DHEApaw zSFlR2OSAF!-GQOhS_e}UmB_NjIP3Jb46YYP^3^f3w@!C)%%`gL)32oE@>Gv$;hYhh zX?@bkU%nj$4@^m0fh!8p;;u}KYt>FReF}>J59I+~Q@NnGJ^?y(+r(GbA;2bKAipD(V}+;rvkwz3p$bKYFbIQf zfVT##ZB^t1<90GWQO;(~EQhbU{nNwfCK++6l8gykm_iUPSL68s6GEBJIuj~?BPQhiAiqa zhC}POq#Z3D;BKFles55Al>u6Glb#6YT8Tzz^rB$-@7lYYOXA3zubu5ehjZ{=Qjn$JiPWp7*31JQ2jQMc!b9v+06J%J1`%#>S|?11>A$Nd?;B`6An>7=nHsCjaYAB^%)lGiH0qp@M^Rz?QN6&Y z!<|^`1JH?0Q}m$4Cmk_|MNc2N^Q6VZj*yZ|Oiy}qGc8B~Pxw2`dPxlQC`-$HSbbVD z6IX{{_z1J%`YULh_j}%Nl&4@9t`~IyG|ouD?d&?NMSz=?dU^$==Jm?)tA70Sx((7? zFUZpsM$@UgZb~ip)*4f};20h>kh<4k01#bs>VM?IJK>P^pzMY1gl?A4x#NWI3c z2ajW%C5_(F<=RizlXc4uIC+>A(A8%*wy@AB)O+S5yXXuXJD#{>UkK2r>!Gd}k6*ZN zRKc5#4n0ybN9-`}eJK=L&x|^G_qH5Tg=&uQ1pYM6G(~)I#>2ryBQ}vhVP{E4Ytc8! zamm4Rm<{l=<4KJTkUSinR|wgwK#}wsEZJ{FIV*|m;LLPWmOe4~^uOyzZ@0)AJrNu5 zI9f!;%RC*V+@}(!DIe{~Qp2{f-KiHn(Q(*vSwzEk$>*DO6`;e77W%s0$(j~Lqp3^* zPhc(E^-m?lY?t@pJbPhK+?Q^bPpM3n$*scc)jkM*6iYPvg#v#%Vb=09DtT*U{Awy~ z0>zE>=_A2@M_9~P)j?yI9m&#fnThTIhuEPj^X>T+*bpWmd0E9t>BR9{Eyt7VUOnJnw zb%eW(Ulk9dKzpUPoI7lm56SHyTX2Q}ZY>iC1i9Y$KB5iAiRkdjH)m^*`<_*n_uT}s z(|DShLXyPwyBeckXk3zVLGUJDXbDXz32u_4ki49fk~hz^3oPESyT>OQpN@%KLyma5 zL!}4rjy)uMcdYh&dY)_{p#@fByo7U50%hwd8LqN*EF$a4%gE2vlXkIqB`#5prDWYpvGjfW0K?k6!Of(h-yL)GLI|;Tw#gotfRH ztu+*w4nO6bW@6hDt3VFR^!%AC5GY7Wxp;#^&K>DB*QJ>qMM^zv?~IZrX58(mAe<^a2dHOPh~|Obt!?#9Jb`jyUI7^V6`L# z2Ee?RL|ijcINb4C7XzY!N(tr_7N>UH4!tq37Ygt9^!YlriuNMIiBgHY2GMo8-$SgH z*beqNvXxihC?BXOd^&5@O)wAMSe?YnwtFmOHjOqul2X2JkSBEL^sO0Y)d;4f=cYZZ z?XH^iv@M30d|al0?d_3(vbrPPjQ zNxc`oc-l^_ovYXQ{7G?p{`Q&zn+@FSeKc9-7_m!R4EXZJi+IGENX4_n3{9V2!=Da} zG{N!Qok6lgSLLV<77M*8I^Z_O@Xbh^3Z7887Xx^U`(u&kpt?!duMrT@JMfr9l<1{A znAN?R>ZH&;yaM2dCk1%h*Z8!iNU*pjM?`ym<~q9GBZ-&mBD3#Wb-G*7QY8?*IOAyL zR@uWLr>?&6d7obFh6!US$J#uJbG>5O<5~XPn(SQ;vsNS9*67_+@seka)0d@kz?La~ zEPJSV_p;WkQt?!r^sH7LOG-x)HyvD-a-&E3AOhVy8L{L=>S!J@{B-3fZhY{ByN(@2 z6WQM~ue7PSm(|ttG>GDOJb|vC2z|Qp2GqOTd-KGq%@FTirV%lAlH_U866krION^U% zd8GBQBFGd8`+!b5SB{)2S0bjIp5hVfjhv5g%X%C-C-=6Zlt5dn zfk|TrAK)uG?(_0^o9!a+CrWDm4)KNfcu6VAj7PrZFiu2X8B~P16sdRhBtIug)la8Z zettx;RyfCH25lzqt)S|HqYGSOYrCVyl)X0HM_b{1*+r%~nlN~2B`~~PdOa4g9rg@@ zAT4X>*^v94J1@OX!nSijt>CJtSTL`tFyNsw$5Pav9qpmpZSK6Q;SrDwO&#{^QAgai@ncoZ=at13dwF(W{o*f9hxkFm6dFjvpxB+o`Fp`b#9WlicHj@pGV%x zL$c>o3Ao_?MB>$J%ua&}&(0&w}6eHA8OZ>CgFM zpQ%p0jX?zJU7cIVB_d?PiiTSQJ4kbqF%kP#lz+N6_)|Op?Yq~luS}>zjNDcdF0&e> zqwYM0_$Gjc>9s`w*YoT0^Lqt&yl&%Hh#>Df#*gct5fF^l|m} zbAM4tu)5F@8Y!-tCxPh{6?Du!kuMoTLoD%mzd-iIrIR+rNFCYP+6c)W$8E2Qwu-(^ zH6L)?j3r5v;h2KA=<`g1px4PO-Wt)vqc>$FPm6^T_NP~!9{F;ql=|zK5KvGI2Y3pK z9MC;WPdo`O3S4VHb5ndedijVxqmZ|ZSMrwpI_>fK8=vOqfnB1n^%eM887k{+jlNFY zsj0MUY2F9$qBoqX(%Tjc82BwSvOf*km^Cdyct=0Ycu0w_FT0jMUfBzjB0RkOaWPcC3V1l>0xtQ|HvkX4H9Y#D`RWAMUqgCj=UB<$E zn>__VheUI&&JbXhJ9xZ8eq!ztyRonL>ZOryJY{-8R%+7o?q&NseN#Nyw=U2)gKqD& zz25GL+c!=4zy-nY!P_hyzb91k9_6$7>GJxV*<=vunriJ7vG!H?Tu!okP(SXy6jvjy z=T%>b;de4Ok)@%P!Nvj z763W7Q_J@;XPt6+iV}a zb>Trt1Z+J?T4@Iy8<*)2a!qu2?FLnG05@;CfHR%R$ppn6V>-u3_;`7UEjQD`g49!{ zu(cOFW80dDHaShHFn7!xIMl*wERZx)A{>Y zoV(`cbu}6&7p_e}0UfL^?Sfpym@T~(1r8FHeYPNAhGG}$$nsvI!PCHZu~+&`DT(_m zU)%~MQR(@+4dzCsNEh6_buOgElyiR@VLYS=S z@wl0LE2I`Hz~P=Sk4}Oq=zQVE`sry#*FgMWF9K02$LN9@a9=}kg+!e8^^-1k*j3jq z*#eAp(5$EtS>>07kg1d*2;HDYJ-jm|9&K|}YS+U|ar)<*@8Nd3C3E|UOunnikv=LC zvhoe*D#oPC*)}2mg2^5rZH-ifA z4&;JwM2xPMLM*oDc{QhxHf_DsB3<^;maC%%hxc+fpF#T3k^bzYJ9e`kZ-TnHO8vSy z^pAAthoT4XI(jE!kor`$R`*`H#oOoowCC6ekk6d9cW~L9G|FnJtWJ>aK@chCBEdRQt-2horL*d*zFPKM zSLUO)%6zvN?u*rueG(~dV3A@O2sNny3hPpGQhH{3AQZ|m(Y>#Jc3Ah@tMv$33eA|^ zH%ayIjifE|gD1+7T4>rd#=7%~-}jUd7g?1m60{3A^`=*7Dl4GCui;^^0$I@Rn@my$ z=8*tLaVv|!$%OTyQ32ZTXi2!3Bi z*%KjIs8e$1(ifxkncMhAbDB&Ym#e+HEQ*q;Ym`(Tgv(X!!PM2ECp01!YMVJ8`8xc) zFt>tY)YmCxXc>P>EGPl(ErYJ=@-Zq(qX@o$$ef^q_ZCW$IKBIJw^SW(-S?szS@|8J*o&s;jn;$D%Lv7i znyt=5W7E0bXr(vok|8$vF5(Nh$M8Zzg`oGb0=xp!(#A6is_4gBAktMvBMjpTgJha0 zKq>fOx%uQ}uqy8xf}L%qA%9CE`cCPENXhTrcHNRbrR3w zgxwN~K4-F^PZamMI`0WKQcNvCMejkgy%^q&dDvDX06F`hr&VUBs&dL*QVL%&B{U4U zISBzS1Cx6ivp>4Dm57EGd<*6W(oWcx?}Zm&KLDnULpMt7Ah|^ACE@j8+1K2Lc#@?onxXmvV+?c z=I_0CL<^oGnd0)OgkAyl@W{w%faR@W%h*#QddU|PE4;SgPp7M1@6Y+ouk@uocI!x9 z%zjzzG%SIo#V?LldRE2X_*k>yCz7AtCEa+T)jE{}NK4fjz|S(3>CFoZ5!k3+(ASBJ zS7FIe_$=uipQ<*b(HQdcoi)Hd8nh)6ZhnK?T>5OP_vqZ>-j+M|bB3}&Htzu3GLlVc zG-A}&QP71vn|pJ1ruzbV7>X+Ao~ZwxPNs-nb)R&)&QG;!6dbR4Ny&vXGd zNRG6+180@+y{J`z*8R5R&^LWYnwl{zUWpihRf&MlICE-7nu9 zd50a%A@+htIm%P5b$4ZYyv68EIfePqx)2^RxP;3?BfzkV!Y-*KMz0aEH#kRVFAtyl z!xxoNbnodX#2h{7WfqADvGQ#PH=Rgu$pwD_RxP1e1a)%SDLyh;>^b$*GaH)bkP~mz zluB3NiWeMKUo@ws&*VetM_0;*tY3J3-<_Am9JazDa$cJW%ewEoD zd?&i5XP)b+#yOzJ$cqftcj=UZDWvkYjj)O{D`a}>rE6+l20Q6g5DVG{`7tkFr3=gx zRoHW`aMSG0OUekhBzc8wt&0zmPy-i1<01R=-`BGfW!NUCD7eb;cPJ1{`&6IpbU%3u zTEwWTilgv_==Y^s-!>3-h|2=rtqxsF!~+dGVd$g+-8*V)?Hj=$w!qrfH+XdX?I)(6{YXMy zWeHLrlZ|T0#8D_ZfWre)dO4=#0$L)no6b(6#Ps@}&{e)9WSHeel|gjIB!h{ruwHK( zrVKrSF;sx6*GcMq;TtgeLJLwABPG3+r*u3)`OUSqZTn_4fR^?XJuHGAj5WtRf+sKA z9e(D{7Y1|O=N-#1U8{YX^y!d=5rcv-}- zsfPaEgO$QTd15srCra=x1MTwpss^BTJ#V~gT-+yQuehGE#RBAGf86H^m?DA?aT`g1 zS2FwYxSA#wQP{R6IKb5H!lR%Kwj*o+IkboPjN#!=#hyZtlkzQ&Tne&I`YaALmqN`e z14v$K65SBiy)R_HFYYG3$+v0jVLS!pwYCybwf6*rjYn6f$1r5mU9KZ=Jk*pT$RwH) z0)PVP3VfkU%Jg2KQf_&lTcJ@O=U@N{&neQG6&|!OEtgzJG!5hVsQ{veB)BEZ?%Q2i z=GUw-zAHm<8saG9s*@3Od=?LWstqVUrFt12D@5u zO(lR(%So&rt5~2nJFOWGGeRIyw^za93in$c=rJ*cCaglK z_e?z2b$v_=H+tvijE?~uH|c3(&X4ctC>YpSaU25)D){ zNPlSXzzDnh=9R@}kC#fasr5>wtv!U-;gT&{-W|C$sJAt*Cncwlq2P{~qT5FixA09i zsV7~mGIzu*bA;XI!xlpl)(!J9!^Dl9#VS>ma_Xkb zEJ|(erk)SMOY#r}i6vzkL|2?ESe0t9Y)_P6_mO5YTOuRZQ@N9slHA6h7{4B&z{CUx ztF*?4zzO%XmDNJd&xaFfCDwhS~Fr}_}*op#i;8uSEAl0(Ds}ZI%Kf&ars3_ zWNoKU@N=Iue#(0fFudNNUO|C@5gjdtW)Gn5 zVO$je1LK?;-b=Qy5PhdXPP5S}56qv#k98In98)rNiCm7I?^O)>@F2;fHo%hYN2qG@by8y;SkSR;X+cS<5o}uzMQL=}}?#nE|dX0yQ1xn+_ zg?-8HHw-BFPKoHDSViqU;x=G3;bT)^R%?-iyappgb)g9J3gF%oP! zgTyh(dc2hEkeE)xULt%J5vY&ey2Ox;c|5j?A)^O%-Yhwl*W1lR^*ob6ih@IFTwH~+ zvTMY>`n0RreISBsl;}cth44EB}rSFl{7uxdK5xJwDK7od#&h=#{KCh`qX2)3eUN3E-fxMb&ojcY%yBAlU z#01@3bH0$YNs+@TGK;7(gpso|C1^szOM}m2qx5EgmYRq2mAp|{&S%W8*#fymL)bi3 zW21~_n<-BozYD&nXu5UqUO)5q)Y<@^!9YVjXiso&M{(4jPQvU~bV;1Zb-899a(KbP zl3)nCoIrL5qZ~25-CuYsd^!oP$}6)j6gg?LFid@Ou`>LcG`VxM?X(zRIA#k20pe+3 zP9}uZzDgNa4ahKk?9l>gNc{RkJ1 zutFqJ+fW*}afy0)OP`e2J5oLcRoG-8569gaZEsFiBbU;^_Fd68c49QQ-(O+xqVn4nSeF)T} zzUSd11LLHxqF~@X4Xj4b`@-Dx6LZ1TEACegZLQu4h-$J5@wP;>POcfmnBG?FGjkG9 zyg0J8jHk?5gO?JX_H4OR65y7K*F>T@CdHerH!@wJ+4%}MonKwOjl6L0?K;Ypu&#ix_^tF4=rg~2D##rm z9MMgJppF^P3m(@y53V};2)*-tq3)lZ(=!i@uGf<$v>Ktf1KS~XgTd&EKsxOZvc_q9 zociEFVfBkm?`ic4%HtB- zo_bBHWJD5!7Cwty8?Xp_+caR$-$W56yTMXsENByE(m+SwBLIhV$iUe}y`Q*KW-a9CxIOSr-Ff0ZZ}36!zSqFSB6HZnDEh|kIg1JNd;BET z92c=i7DCE+PR^-4mvaEzELc5M4L6>33oRAJ^gQJWNazR(B%dUrk|e|@P}Q(N@A=Zt zZrU;cgJx?uR*_+3uu1^?h6l_Aw`j!+sTOK?;tP2M_-v=CPa?`wv7~!DRxNil-m@ps zKFNdI+Sb-iX^*_3#NVab2Auj*16=0x!jy;To!K*Q(zJ%CO)C!X zYi9vIVc(xYy(}#1wKh3v&z>!2PP&};lczHhLvanc`QAGRvv2S}J1Z<_!Gb*FD1e^Q zbgMz?T%cF-x@1LQwI2y9G_Bi8JiwD}dV9e5T;$+H`)Y7*Mg1MSc2BkW%q)FeK5Y*x3^hSBR&lk~a@I7jH z+?Ftvk}ZWJtdO}<0V+RTC&s%^)t?6UgLd4i0nmOC{;u*ksb~WvYZXjo<`aI@Z|&2y zgw%u3Rc2k`>$ ZZsLg$&N9BRAe4vjM&U3cLIy|dQ%H^$doZq>M6$A<4IbEga)XZ z!m47)a-lu0$Zai1pKje)%*x~(rEExTnKhMJYBt0&jpdttFYN*9-8p4Hz4Sv18!k#C zEe&0sO%aC2k-f|Lq$vffnPxyl1?m?8J9x@&X|)hQW~GvHJq8cjYGF4IZtU=dI)u4# zeG)*Q{gz6ntan^hwl-!994Lg)A8AddwN6IP5V!|Z&s}9=<3oz-o-sJq*eiTNE-pS) zI4CmsX6dHXA-#Z%{DBBMC3G&C5Wnu|AXjdzm!*0PuMd>4N1mvh)ZJ4YuBTY@V2&Ka zmyJmP>$$?_i~tYm=xQ+gy@e;bu~B(EPL``KS2&J8r&ma_UJc_eqVz%lvGlc6c_3bf ztK+TFD*RCHpdCLc;Lp~JFeMO0J)M=>pp?s8`y5e|VhepaV6iGEr4y12J+FF*ALDy~ zpc2o6;c>SeDG52&*(|AuJA`Ty8^}QmiZq>cXB;)*qYWKMT2n3qb?teCB~y+x**nlW zD!_8HBEIpCw0L!9r3N`1x*aH}j2Mt>fSLS>zq%t+F*hve151B#FMl`TTCRN&4)^#< zCA>BW$Hs3GkHbzuCJek~kfh(neUk7#n{Y6{S%r?UnpAK_z*NR}`u08Xle>5VEf8}6 zy590EoLdDu!`uQ*GAp5lW1R-1ae?|dgh^L1AUYC)V&PWyJD*54$E7JPba=FY+ZTYA z`Vv;`J=?bHT1`7AaXEXyyF|nzG;Hf$^q5{*b>>H6BpLd-KVT1)5RM{4_lAw!bx84p7?JO8q z%+F@y9*>A(mm^q3$<9wAEL6jL>FxET6TU%3WIt}3)GTpIE4;*$vZY|J+p z3z*JZy6p%RPn^2^>XNQ2f=XEZUD@*?(sw{GT%pi~qvg%dn@V2B^`;RsG-YNaz3nN0 zeymw-K&+TfRqo{F!W!jmw7mdMx1b`1${x{_u$ND@nHwHIPUm=NAjT1?oE#LjxceMr zw3k-~214yN0G@`XsPete4zvK!q~0=-M`JSTKdp&SgJDs8RIc#u(Yx3z?IDj)V%(Em z9F?i17&iE%LO)xyL@q(;*+Ow!hDTnJGrfFpoh0Kb*oXHtm!0=?{5=%6Ol)|~%XRMS z$k?m+q^~@TCluEoQxVXmAF)`EW*gFoHQGu<$*x5_zYOV?E!WlcbcQraliOEtev)+rV_aq z4N1DvxvLf_rM=~$gn9TXRC^V0@qLnEpUv8A*WpzhQ%DxEl7*L1L1NJ`1x{}{a(F0< zIK%NPE+!X6VLS(mvPj(s2LnL?SA9S?jiwFaVapP!U6_^yYV5rA!q6#gCHD;2_jTwX zbCEd(+Cy6o6qb_361->QJGO5gklcn(yHzwQk>aa69t4V2UpAUG3O)AAQo{-)CAg9O*q7%!e5~ zDEfE|9fy+kAbtH!pd%f@4&mXm+B2p)PKCV(YR$&4T&lh28Jfc(OvgF}mN;{ynpG$e z`WdbTA6Zy#@{0#5NKOGN!&W7-X`z?zL~qqSuyMm;8!oKR~+DtarxG@wFE3)D#V zAcl4xKCKv&G361O3ZHl}TyQ&@4nkKy=Lvmd?r4VZ7KDt39b|PeK(tU_0BcW=K)f65 z_JI5*#6JmPnvjBu%%OBgoz(#yJXZi0kQ#4%NbhtbQMn{3XGP9g2DaH9_5=)IxJdR< zH{^jp_h7B8h!ionExfV1aAYMihc-q|40-4Fpmdjp&+trr=AoSsJf|8?@Oei>^>$-w z0OdU^W&xWKIL>wl@Kn%dNeF?4-bJi%spdo-aEzykC@1zqJ;&fu%em z#Z--IdNnYII@CsK7e1Da0Rc5(evvYV*X5AK4xP+3rFfw^S84SL1eI#T5#*uvbbfieS@;c4H#C6dnKBBlUI(T#WqzDpih8AS7{4#CpKC}{-ZKp& zAn`Vf$5&58o0TTI((4d7w*vT8rf+&(MZfb^d(;af)Gs!D!`?B6BCRuQtYtI>sQeDW zq64F)qiM8=JP48#3fO_cJi@-a#}N_HyTC(7r9>UQaz4 zr*tvq_kh)B_ibTUTZv?&fEkzGKAT!#?I+^2qqfBN+Mc*fmO#v5IV*zblPCJ9sBrr! zFFHKGCVpnEMVY`N@wSKA0`oNQbu}fTYF7Fvd5f)q9Tcn^Smy=yyjN3!vOu8=7%-y4 zJU+7sC8fY-&K|~%6OWr{hgtR~H6dptNb+=Fp1K4hJvFrI=Tx^rWFASndw@0TG~d*G zwtPah148s}C3w66fvL|rr*=XVF@^9JM&G)iA8I>z7KIs!gy&MIJER#aSX%g5b6>B+ zkl@y~x(lFOWSwnhlNq-#v&o z&lE*%*<-u)ord~8&^Ko==dAf`pbeQ>OMtvsf?+7OEbSsCFLI>sJ+vp4+ffDhdiPH9 zVWl+og3E1%LuEyu#Sw}x@Sd&mQZ)OR{~6-EGh8@%UUto>;F8Oppl}!Ev(0*eIkDA{ zQrgVkReq`x!;g|4tM|`-lLa>nE&*b1^b{G0Y2xxS?)o;(#UeHR)LH@_!6!ZMGX`uv zSm!SE;-p87+pXOLWiFhLW3s44qpx+EJuyx?AiK^q_hOUn(H)n(JsZ|7>wS0Fk;T=B z(RW%K+`voV=FBV&S&nSAQV2b*ddKIHSTn7OyJPPJUe_80Xh0Q(q#IQ%-%{_!YOD3b zDc>6#SMEbNe{ielYr#6n({KU`)Sl@uJL6iFsm~jD-Tc@mkuCs% zlsDx%uVzUsrs%Z9FAzt9N6z93=E!;IeNbLNs)d=17|IM zB`DAI6#4c&%eT;cTz&+BknNEiW+2j`Ff=k$Mm3(@hLQm+Ojxc4oDXqk;i-;zN|=Tk zWh&%|TlsUGeN`z!iTcDW%tHXTg-0G#z;nW5DyTshDR}$*`NetP#@2Ws)`pm4n#5R6 z7`-$Enp!b6)3R2kYL1GM#Q?D-QV~jtc#TuVC2ek)h$q4Zi11#^Pm=7;lROIv>>*I@ z@m_hrBY&l6#KuHPbq3gReS7^)(q~BMHuIM+kkYawhm|0+C@S5+tcSatl{Kg1cskw* z?wk-7q2eBbA7MgLxQK;g!wUCm?>wnFb86fpfT5@?#~FPJ;u`vPvaGkJYTM+-f&>)K zCaGMwsK7(qFX6mVxAlb=z80C@WtJ|^z|ru1!@*g;zS-a;=Q&%ui=lH538Vp0?XB=5 z?aU{9K8sx?7Jt&Phx2~#(*$~IvtCsuxZ?4$zC0ng7%zEivFwIfomN#;x$;SqeZ~b- zj(UoBM+QYtBpV0wdd(@`%T3t;x2=~PTcK6bT$u;!>g_^0{NPT0emKU&%|M$5&Tcd#>jt9nvS*TYB(XdsGf}rw5dc9zzQ0~Osk<<>*DYE=$#mS? zmfMiZ9k#_Gw^~DZjE5`hy9iSKXoqd=6|PA5tHXx4)x=BWJ+9Ep%^gij41q+#-|vZ{ zw1l{w?nQuk8!JO4JCRil_#P&WT0N2m@!9((%D;m{sUTk~7DHd9_`I=oxOjt4JRqPC z$DweJRvp}@72$Y4$GyfWm)gWc~YP= z$$bx9Ra&fT_f^%i%h!44_jXnEwu$Zb?N;R_fiCV*4eY*L!y4EqI(Zj3nR9qsHwH}X zILcbaJY8I8dWCO^^A1maho|~XIx3>|NPpind!c61S_QwQ^ivI zmjZU_cAY9Sf&yXhb)%QT@wtYR>Gjon z3)}LvsX6+_cLRanT`(|+*CU~)bgu2hTPStUakVVEIe`dWLW)& zTfDc%^)v4QdFcH4M}%t}73GVde6f3$g$#aAw9VTgYO>wG$RhX*6R$6k%mWxmPxW@@ zA=?x2S}buH;FcDd?0Bp=N3Qc~ING`#*mk%`t-zX}D(QPM&@BZs)JKG8`RXWXk!K0xG)_qKwIo*2)y2k4hrxy?e zv8skLx?yha{bH{I+zJi0k276|<_#vObko6v!JJiE?D){((= zqMcN<5;>JF8Y)lIQ9$k5(i}LHVqQES6k978CmpQwbtJaRGA3lCo6I*tXzyLPPwCCU zlNSRwog6&M(MFMHs2lB7`TS?V=mE4MjZ{``^O)%E+~eJ7787(#@JFg+oFO z!0<=i;w(GAjCsKT@nNGC` zD`tu*xFY*i=GvUJt`Xp(ouOrkE>h<_)7lpbo%Xngivif41*=I!qDhK${I zf8on}EqL;V0mjK^rn_RGKxcq3Df*;KiUc%JYReNb!pqBD7cR=uVZ$acZ2-yOBF)f# zQI<^(Nr)YuV5iqrhH)gYypmc;9h}Y_rX9!lZ0nOU`ivUduOPuN#CXN%xhih>Z7q|E z;YDJnBM&RPFy@libA=;rPEOCpCIR4=s<1SMi#nX$uP%BCrS(t>8?J55hKCHrR&_FZ zw}ZsbkLqH9>SYI8@Qg0SGT6?th9t}>cTK~!>XjKd@)o}6qzu$Rtlbwt{jAM_R%0K? zEjPU05+Smn6al_;iZd`WB2pzh>*t=P;SHmaDT=jmY(L*wlfMoIPx8{C=*~13KJSN> z(=*kW`8MJ?rtZ?c`6P`$Lx*75R9P?GZ9TFK?7kI3zj<)6P`30;bY4H}5m8V8uW7PD zeE`86pf_<0dpGmk0f+`1QG_4m@s;)nt5KF+NpW&@?Xz0E;UqHeTu-@CY<$dT!vbnD zL+HTvv~xPe9x@yt0RX*Levk#-)MxLA_^HP6+W?3?+KT7Iqg0F!>|W*5!^GWws1MiQ zm>F3~tUWDHHLSg0J&;-_Fp0}wwDClSMe&;_qOsWF12L^Mz>3;Hg8eYP0+8JV4)>F` z2cP!H5;4IiqPR|21DMZ8Q5bQ;^_RK!#=#IM9N}ci_59FOu!n3CtZ?hG z)0w;;>=05n=Nt$kI1wO!zy|Z;)fO7kBZAYs-oR~7I1ysJP@%`X(#y}IjLcB`UT&pL zPZIP{WJbCK0g~{=^O<-Yl9jRSLNz!FGikXp4Nr=kCjoS^E62GTnSzp0rbL7pq?LRI z0`aNDPm}bSj>CnE*GSJ&Z4@#7jKGvy+YYzw6mb+;IE&$%xbGuKZPW>A$4HutsLUYl zDOXqwsH;TV0cnHR=F3CHamlAN4*LWiY~VdqX|UIivsIBD7%{zTN21&J^%L}U=%F^x z!p=I?x^vzvP6vE00u06oAuxZ|ORgQ9xV;_}tL)OUT&0_y)^Mex%(P6fhTU)LDJ7-S zQr&_dbms)GV(pa7K$&YAd6udk*n*f2dUQ}-ToFv?i)nG4e2p4$) z+OXCuQ12#e~P%UHKqi<>u?>fQ#)&seejStZP9rckhMp09XfEuA#B!CL}+I;@&o z?83DZvr&5+qLJ;Hn`{|Zh8&5BX(MgV6(oK3io#rDYiA5cQBo#}U+5q%VpbKB7J*(u znuYD!ETV-g;Gdn*^q!eC-Qts+)SATZV$wQ&?`NL0zC^ZAZ;s7G5@vX>oK&g;cpJ5S z&`SI85VYkxPwHnlIn#2PR(pg%GWs?cLI=l?$|}#U0MJkmXqbyop{d=e8DS4O#k8<# z820?k_k8G{3VtZJ?oHw^*k<8YmY{mB6ei&C(hjA-7lYEngbc}-fMd= zUAWI7p-I$d-i~-uhrZ{a_Xy$juCS7?)SQ)aK{5LV<1&_8(<^-1I`BY2M6GvELSZfz zh`OG}yf(LcUCAx)C1wX+$o;fUV`#!yjEQ0EvQ*ADGa~D6x`0^stli0rlDMPdlRl$Q z+m!N-2BuA|9%H_1H}tEb6ySWDeulG&Xo?=E@fhC0o%*^`I);^VgqXb8qsJ*^aZUQP z<~2Z+4(@CnJ!8kPta=?mr-jo|>47H(Zjs$1sfDhA8QeCIKI|7#%F#Hv^}Juxp>&^A zjDVjrb2H7Fn`1q6fg{Cok$$z+jU>qqEpG&{Ww-`xwS*7|QNewM$5fmPP;n{62h98+ zkU%qQB!3RziVb-65)h6t27|kFSV7qYLm)Bzf`y|tnaLNg(!3Q(AY~F=O%2 z?%lEN^PIShHuvV^CV&Mro`_Ih?CK0w;v#(;1RO8-dDUx9sS3TT_Jb8Zh9wHooUA5O zIk78KTzs}&=b|ReQ@R4}iRbYcA>-|wgU)!TCM-T|xU`PT)z(~O(^ubMS4c*gfJ^8z0#;WEJFK56&YiASz*Wiq7a`1 zyf+t3db0H3G_`^qj9=&YE0v(@(BPSg3w!%b;b(Mt`-CbVA=z|?=-xX&mKpUfBDpdy zskX&!Ne{0|3Pw?0dzW`yYK-sb;r5GrX`r}V1B9IGq?N@EhllSn%M12qMsBd`1sl6? zf**qjkEBRq9tqPKT80W@>_iXI>Q|`@5fFJ~V%r(5C=zKl1QvLl7LqGdqx~SotP0mH6-tC4ZXGH|6;fYdU2)uBI7xvs- zEw$g%QUJHhj)bG@_T{3tVf%=mRB*}Jum(EC@Ok&VD%OWsF4x55ThM}CjDlz23OP(q zdqIiQr@gpeT!^1*!1=N(tl<_FK8uvzgyQqs8@KE{VWr2ix!omn?21;cElsUU8k>Vx zqp|UDUJe1$B()S__0@&%OS3G>!{eJ|Vm^2}?1$4o$M)0*^ZX$P+_ULt^XmGL;rawy zj^+hkU?a1=UI~>i8X3ONIsyJHS1s(Sp|n^>SKJ%Y9*>cjd#6C@SP%kGwRC$TQ zs^yDC?&I$S&FO)Yg-M!n^^>-O$FQCvG54OkfGj|?@?^w2e8Bg>LY#nmt904MqvNG) z_9<{Alb!aPs&-Y?X>!<{bZQ`228LV24n5b0rCg7? zUQ^V~SeRso?p%|-Y*89`_F93TC7AcVCRzVUq)Qd=g!gms@l$If>OhQ^wx{JF9i$>d+^6 zdW(eD!FjN^j9xa_0mMPPeRMQ>gcQ$X{psc%7(n{GC%pHBlXs@k?}4{jI+KZ_T~@zM zPIBf<;|JQXp-QG;G3@KhE*xpF7_VCdIWwPFYXm#rU;q@ZQO%XJ4Bjwdu~jDRxrwb`1C3aJDq4QT)D%mknfBFd;5n2g`*)_2UGnb&ApAMz(or%SH8V ztxTU0r_V*Huy#B=Xw+shk7u{Y>FE>QFk;fFo{~PkB9H6@T=hvA2@hRPMHLn=Ff4{Q zn#BX?@m{UKi;;z;D#yq^wNwB_MDSQOHDns28JF6buAM$!4uX9M0>0oTmuQ^*u-o^W%jualvS87GRlUp^)^=fTcgtao6buyLkNUt?Mel-UjSE=&yf8exp^Kl0H8fJw zc=_8vbj*8d)g~E(KV#3z2zb6V2>=YNp#QCy(%+KZBLP{t)6GO|Mue?WU0-HzOZz+ar2H4j4 zjVm|Rgn86!dJLh+ex^oEOpI^u=2`{$ku6P{25JdC59V4%+LD=leFhHiio~jXMxd*ls=NDnffa_+LOk+SYCqji=pM<{mX|_zPWjtG^ zN_BiZnc|0R#p0EUAPRf-M9d|l_NIU(L3INR-UUTXs7LQXFW01G-+K<#TRsqjP!OKu z0x(euhI7q{FHhS{5XW&j)BU}0ZLUqdr*W}B7_Wm|y+=+^pOUDB-@&67@){q%n%jxX zE)FQL;YS-m!Mbqnbpz}E5@t#*2IN`yY%BXCmwfUl22rwYh$Rh;9s%LtG>bkwImqYh z@5xk(uXa)U<}XSq=t0JYvAb7zayn};yU08o&nj(p&{WdqIrP*1q*OkmP;uOrlk?`Z z@;dq}!8yt0n}?b08Ol{qi#_0(;%eg)2(21KxW@FD74+$;$e;0oq`&oPm<1_)7~A}! zGvYl53?9G^5|ZO9p580)yw)L&XEjwfE*N~mC7=+k_#i-9I8|Lse1(%SQmjIW8Fw}u zU*z#DBYHZ<Pb2au}?=Mm#56uf_xc54g$dWwJq)Kt(w4hjM%;B9Z*yfY@Bvn5{QHJx#I{|j! z^|pw(w^OxF8{qFA5kAgfnwjZmiB?c1`qGXqBL>WMBcACaABsOKYz#}Ix-8^Jjg)Zx z%KcpXPI{LD`AhRZ?^D6yRMCE;tm zO?zb^SVY=J1QFZj&iRzexioL)0R%lSPhvX-xs?RJwefnwgpY8mQmw`yp#Wy;;#FqZ zvBb#WZX(E$Pv6gYbn>{~JmSr4S7xu$@m6%q(P>B$V-Dfou`Oq_<8N9&8MKJO4BPAh*RF!%#!!x(Q%j8K7URt!O|Gvh*Q^Y8w+5x0bB0s*lj{#_|C69sbkzl zE$1ri?Gi{vos~cTnR~4sSnO_0EZ-yu?n38ds5b|Q1EPx5nD?G0x;1?gYx-%z2rXg6 z%hNfdV>0J9kSGpojLaEj1JD(+jd3E+fQ*T2O;EfL&!KejQEQqDqq|PPvjSfcT$5E& zTnlqy)iIz~V?EKwL=&|(VppmN8b{But;!^N4C@tZCsos&4kwdeZcRl^*;`t11uaNiWpKa^_9A>6tZ+L&Lr4y&6M8dCtb-WFXy1FQkU@v7z4T z+aVp0=qphzW_>j;cEQkUCLCb~-}vP?Ox5)3ykhHCxPn_shpQ(K%R6HvruT^%d&cni z>BBs4zR^iknnt|_%xodcmp~%*h8P=a1@66rDJVnE1D;U7wS!aK0jJj@{Ypqhp5NAJ z=19x#obujU@bhSn@*9dlc)$DXQw(Ma4y)$P9*w$t2m~z7>@*v^-ESPGuimxbcb|># z@U%7WF#TP7q3nA?DX#`-G?w9&9h0JF&IAZsupwyfTk_c$&n%$9sOF`c#Zf_#SPFf@ z`=&A;8zjD|DjJ-(1Vjw98?XeYi@juowc;5+&nLdK zJgqglEzb%II#-IsYFX2e-XbV88r=QN#!yXxU~Nk>@16HL=R+x+l{xlQ!!Qj6HKj5P z`nKfJBy`sX(TeO7Zo%GIhpSWbOgwckWbB<1z{2KV)`{U zo|k49TIxg2Br*kSL20ElW=E@(HKlH~=W7e2Zq(s#jJTtU3(>Ybl+lF&(xEO@diI5k zK|byGTwFn}yLP~}sh;#+C=WBmo7ynYLe3*PEbG@J34nduBzTqRYecP!{R&Yv(CeMv z^9(P2#SM1tn)+ARz;np5n78^+N(c`7`5QGU{6%m*7k#gp$hpV?EzUX&9}655XWv*q zNqSbtr9B1qP4H)+O3579r%#iqUHUrP_<*j30S?3IvWkw-V#yhJ-!qp7@p`UZ9wV$Y zoI(Wm-pLz(qxF=DCm3=HtkEQ*SV}++V$EN5O1Lbb5s6vYm9V~8+A!k1L3Nd8ENLD= z<@abr=wQ$1Y?SnlI$LMXckE~ z$AyN!Q2|A47)b_)k>bQUbj_MaAu8_0CEgRy!(b@fOhMsiW@dl#=tb-eF+0vyPawS( zj($*=1}kXfaH0u8u=q(ae(#{Pt5$8}WtbE2=4gXhVoE{yGp|>#Yfmz4gWLDfi5MBO zm|Ro3KVckGqVsbLc1xE?heOx{GVmThd&pVrlGiMz<`rVzuyy)y4@?u2;qXk@Vq>}} z-KAzIBtUi@8c;ozG9+ve`jBR5ymz1KleU$_ho=imocWxxR#dT3S7Q&CoDG+jSeqp( z-v%{_lBSj2A)ju(+yGtdTU6da!zerx$k47?x#|#}STXG()VvO0;-5x6NXzc8+WP`c z2F-juIWrMsUwW9apO0f9nODE(pCr>~$U1Bx?=$0Ydj>XdJVz@0tj$nk7D^1==ZLvz z+Ea>gCuQnU;7pwQYUQw%kDYcjzyLyWpHnvaLA*y}&LFogX>U2!6!JOSbK0SMY%|D3+X#>H`eL@H zdEIOU?jC~XAf%8G!ypC$(@RH)H_14grO!0;(Gj#if85vD;`071g@tN{?ZJ#Dss-iJ zxE`WDtTMFzNwm7k)7P5BP_Is3C39WalNcPL$t#QnzgITd3v~BM^!pB8XyNWg&oQoY zI4^^2Sj;R`6;{)+g(iH1fk|;BUpf=h>6v)&8-YycyI6vhAf4OF>=!~Y0J;^#Ex7{E zcjUe8XSib+xN}cgnVp^91&V1Duew9LR;L0&!IfRW6@+?>?XHq{URru|5?wk-F=?@3 ztwZIP+B(Unz!)(!Se+%$)F~Z%v`^(!1d)&f$Gp0UOKM<)Xj{kY!a9W3-wb)~1w>Xd z?oj}ptvaP+=VfLsw$tPg1pH*8C6vXofma5|-OZSWHeeAgpg`aXUbMCtpt3yTsX>WKb z{E{OH#2$+5i%+uUJB-Z{3SGW?EY@xhg<{5R%{fSh$8IMOnQPf*mjE$(A;cDIY`dL3>mft!fF6!~>l;z}y5oFD^yt z?6>;{n`APjjZbBXwM0;22jUeNln*%e%21(RJt zA)$A^=y+~f!l6o($hq>g!?I>iOOzdE(f}!){Xt-tPZZP!Ku5h*z(rvNjsot~_hNB7A1C9)1lg3g@|YinaH~WHU9i zg78MmSOyJ+Wn*VZtud$~+@akf%n$(xARuHsEg##GIKs<^DIhC*hAWCZGX&rma?oOL zwUTciKtx|6>dIl#rKe89tDLU4G%_&Dc+<}TJg}dbV?t(I2Uv-YGG3?jdQs0R!5|Kr z*-l(4@Sn;o-b{;$UyodldHN;gI|0z-c~_IAWvs|6Pz+Uu_cZk&@l~S8 z4(Wu{C|MbSG#Cv~DNMnQ?TbwiZ(67%pvgO!4P1ZHXYuo$r*|Ps92CgXT|9i<3$r@Z z^#-KCSzZ%jq#Hl#{3iV~wDr?hH92I`0@D-DrILeyQE}&zxQ6Xu#{{p6ItZs3uX?=Y z`iZOO#-2-QM>$s|rKA%P;EuYuZp${=6}ec-VA4Pg1fm?#baUJ)o-9_vovzo*uGczg z&ui`Yz&VVA0+(FQtdTGwi%`+QiG3YoE6n}QCORo~jKtv1~dRuHO2M3BV*?a>%-Y)5K!uv%# z%*>!rA1EK3fRvzb`ILzmynE7NEatGy>ERi~pR~tkeDm^Aukti8rdu*{lvGdbXvt)( zhqKfEg4a|?1_zLbxwj^kFr_MFiWD_W(;Mvi9^%PqwVo}MSazCslLe$+l!v=`cC z-ex_Ccra{>!&B^wyi*h<4fo)k-npH_wbxn_$sN&EMSE%C7&Ek(KNxz|CIwm!!!SAO zpz_`bLZ0?Bmv!Q7UZ|f#niz*w;rTN}eYo-NA)kz~h*oH&^exI6gpe!geDBExK7*VV zpc}$_daSA(Rn=YWm^>~CDg37HOw0W_O(E5#Qmf$diFYE-t<`n@ol;c0vKymT$GZlwE7 zAH97ev_UoRiTLwxCr*J!mh;x@Ra$ozM5;p1f%S?obs~;W9FC7Uc82a@`j#$WkW0v?M($ z=AZNW;gfzsNF3zx)(-ovkdZ%w^P~wy!RHzguvhEE^hrK_hC8qTZ8r)lSIfF5rW?oI zc1}lBC@-(;D7b<#4hzKJq~?PrE2f3QvU^DV4$J8|tU(5`6w)>s1f(Q0nj}?`kLUC= z>Aht>bKvt{vQQ&5c08a}ORjhR((tf_$QPP&!jO$mPmPq`^`;MIoC_LU4#Oo>4GFL` z*TYmanWq-vHApC@$F6$9{_LVpeUF!}N?vuunn14Cr+Dsz5jd-h&KfDPJxrPA$;fs&woO$ZnPUFwSf?`#9m z=6%!uy_<92coiTos|h{kG|DUa@xwCf86jVDg_YUsOpAh!d_p;|oH7}-)}OF$S?_c$ zh>w-O)ZBB@0`l?%GAxa3R*(#3*^HA-$TWcCEpwJ?d5Ty;6#5vTLwIlP1um;uQ*SLe z(S~Rb>SHdTR_~X@B-DJdD3}kmUMQ1TnezhsX}yt-Lxgif6>3X;kygFa)!lkDU?f!m z+>N1?rfr%ThmaUhIEd;IT$yp#p@}L&sAl}cYul(`IXaFS3EBKM>#ga}yIPHMr@zVe zYH=MnsAZw4s>FL;k6|{`5uDOd;_Krb6>rUX=4JuY|KTx#Phazc->B$ zMbC1kM3UT{^>%&o&oZckzyn|Mn%khf2Z1Ml#q(dHK4dpU2;#Z_nT z@jP=(C>HbT2yM48DExhMv)#A3sYur9d%da=SbpGww_r5Kg$pGEww$l_TLSXlXa6rs z;&)G$wntpqL-Gj@dn9XQRz%+(jC9;};_51&DVH?9tEgwr$nEP5dt00x0?;Qn!4_EN zv1*%*bOY>5@~qnzu4769k2!H)fVI91qM0P#4ku%TVo(SddHK@(K!B8{q~uAV#Nr)J zr*0X)awC3R{;0(^uW!;H$nI1<-(%Vc4IN|&3u0+b5Z1SB*fgex0dzRd_;fB_NJDCi zjn&1j**aX9Tkew8;w_T)bnJO>zO?D8X0?5A`}R-0^=OkM5Adf5j(5ep;325n<_YV? zwvEawlM{H2@XFkR`}GBke2PFmL*Ss+<{VHK&YRYAFYhPvT5w%;&Kb(1LDyVU&P{AR zAQ&M}u>HZ_`VbTgGu~s7aX4cN341C$GfL1i`;6gA+XsfGi|WY)&AXZswT9$%t-qqx>qiHAK?5O8Nb#E>ndp7j*;x+`O62a-w#@a;nLJ1BC;Y+zk_i-NTtyQMvm zmKKoLeMkJpx)3X3>%@DY#p4KSvfstQlAp=)!doextzHGo7drlY)Gn=FAX6Kbo8O3t zEjT6mcH-4!ym|FY1|3bxp(_eKxa#yKR&>$gx;?XB!vxcrGF3{vdHpE^{oeJR%4xMk zx)=zcE>*7GiiN{0BiTw8nVc98^E=~n1xCAw>wOtKNGc zv4FNj8``xGT_XnYwQd4-8~}SEP=x!N+GdQX0?Fxk8zPCqOc;%bayzhcghXCocn@a8 z`q6_4AGR{yhja^<&w!C&tfAGyXor|rW{UxmNI{;_B_~(9{=DNIg9k%10McF+37J`2Y;d$T+EzA>iWI|}ZQI_IJ;MB4-Mk^vr%W5*1%Xr$4 zR3o#iYR&-{#osZ*lDAR#vrT*`RHJOg14qSPuSV*kEwIHMlpGN_d9x~aR@M+#I9Ah( z`Y5cA1slxkUHaLv^eNWXx#O#&=Lt9u*R{n^HXpR|8Jn<0+7^!aktxyDQ_nJgnOXT( z_@&Mb&gmtJAo9aCk{w#N3pcjzveMMK+=u$io>9TmI&?&I8x*X|I4i1)>?z{pS^?`h ze0_6*Hq<3ou`lbV6eL4m9s3qPQ-4H%uE|@t*QF2JFA%#xJ!a2)S@95q%`uR!d)S zx}K{hDoZ1|L|;V?+EYlgdP0=a=+4cf+3mWDN~*T~@NFc*%k&@+PAVKDR7n@9?D5NU z;FmHJWvKTo!Oy=96uYed+bA_l&)?e7`&OlSJrK4K&8h6rSlR`Q<%BiKV$m7Ok679ezc z;V%&Ch4=^f=erU(cr){4*>XQg(3ly-w^3M){sD9 zM`4E%^;m{tVX>ZUUGU+ff^}a4P=HaYiIdHH?sKmBp1~%O0Vq1{dkz5*uHLW7p&yTp zz7$fwGz_p83gA|pn$^zcPUlX4f8{4ELBSr0RMh6^wzLVh=jFRcHthmH&ABXxDNwtb z*v~6%(d3v>)u712`59$uD(()XIw!|26N7^iY`&Lq_8wxc^hN-KkY8lIXp5ttxuK;*}_+_TKzeVG23ufnM}vsx`N@$ z$T1edMFNm;g_)(7<;G-0RQmeN-a|=)GkQHo2D5zoY$m}I`Hgi>w6%uh`YWbjmujdz52-lZx%U!yUOj|v z; z*KxdEj=d}*^A2*J>db2d&j5}X19%u=%|*gbGF14uBh?VIumVRjh6wdNnxqcnu2Gj= z)5mpV#$-unxzR<=N|4}&I#*9=rEydqQ)i552CO5#$33`RelUv`9bL~W-|20$LaQDL zovyx}4*fZ3TG`_D8+PU$w;NQwfH-Lq)oIv}Ld$l3ha1b2^C=4a43(wG@1l7FXB+_R z!2raJgVF;>9KkGs!BLSS}Z^XBy5{&^zw&4)E2Lz(4_=fK;&}c9? z56+xO=mwd+jozCF3;T$b$fSq{uwOHK9L9`B&4u{Zk}qSvyQPvtPd8`mD}#^|Pq&Us z!YQLl(qN0CX<~oHlOfy|B&K*fOiu~2Hy_Ii)uWeY#HUFt#`aJw`Smynv0#%V7BM+# z?=9Bw8r(HXGOjg9IAM$(z+<-+MIA zE$>B-G)G{si*HgYOqQlf@_YC~C78oFo1PeWEK8t5SLaTA?QivT1_If*}lWqeIU)c{ca zRvz-|j8jMOIWwHJ4%WVEivv$g$oF#yeY%4HXpRt31TDgM_kfKus z;HNP7GibgSv3HmKsyBt~+1}v3BPip2ZsN6p((Z|OD-=;s0OD-(65o9Aap*52*v1x64H%G))H{U)ZXwTdM;p(H5hv~=Y?+RzI0&m z>^9eD1CM`6WAx=N+Kw-k%W6T`)C-X}EPB9{{1T;X{k?4;_xO~?`;45sUFA2IZ&C>j zZa|IfG;AEt`4sR@ai2MY40o9T9koXtub=47JdJ@D4wmfhl(u`-F4vJE5JDT5{>t;V zhI`Fv-lT*BkdqZ|g}c)x9;BaTMlT?zZd5;|E7kUSi9SjA02eEmMYoWSjihu6)!z{V zWOzj)#>$Z08N9qJMPDQXU$fPfj_kh)w6n4P%WMK$4@`l)rD~*N ziV2YobNChs=WW!zM-=xKw@(r-4z}`*^p=sEXAw0YY)L8!6KAWQf#SV%ylm(93>R8L zgb%@=$t?xU=z0;$CT5BJttt9pdG0tHU5(I=JQ2%#L~z*+w8YC8%KS|RV|We zYo-kY^j<+4KRS0Ux8j-UmDlbDSEOl}ekugS-?>9S1L;wO zhlnE2Jbc}VI48Xf86i~`w4_k5Y$=5kk8T6dFkd#E>bxKy%VD#710t{MUJLOM-k*6} zjNU6C@9By{6CW-PRi;OG+@(9K$(ni~Q(XOUVO+7;U+HX%S3&4NKB60WG>0aOwJ3&H zLb<9(yRu)}3(A4Vyjv(wUY;K#6&IzLnZWX14I8YdcQ%TXIELX>a^=U=b=tu;SH>ISdd*v8&6nG* zF27rh)?$niM%0}r0H6mN{;=)&eW6`>aHPN2VU?dlhbv74X5ez7xs>J|Nfj z)CXz+YUytQElAVgu}@(YE6(1oF&Yp)l5;1VJ<`FwF}rr*a&&nzAd=HT{w#FGxlLDZ z?R8|6xMe=g^mB)IbX(%A-LE(+E6X26ia9UZNJJbHJ=&7kW9YG*tVSK9sVN9kVtCf! z;c@PRZ}WiC2&oozn+7Z#GczJy%$&#e1Tb@&Rlmh>Pp7Q2w$5V;{3VinS{|jZpe-0? zo}3hk(~U)2PC$xxycW8sOOd|*6p4R^)HrJNoQ-0KHh+r-SEjs?Ng$_>fkABxt2cw^ zWU^nKBrQOhW5_x99KCDPCGE0W#%Y!Vf3If;MXf3+T)gi(?1};qn6Usyldl*hy0OA| zNMthn@$yT^2a26F6+3K459_jun_!si@jW0diG1D7E=de{&J6{L0R=|`m|cD_4-s(@ zj_xhKU<)#lh!mOzk7BtQA$|pYWu@b}HKN>!mnz)SKsYyg5(KMn%@}jZ*yp_k4w4XE4h6Rq?8BsiVQL}+-e81Zb9*nyPyV1ijOhZ~ZQKv@HiW|yQ z@<;Iu-GvYGTPB}zH5Vn0FPRksfO56E*Sl)QhJ0lwMp5`lNWYGI+z%+Dit)G@u=dk>Dcw`Sr3H?mtt3g=CUoK0HPzNXUf2@R}#%znq`;D z6%8Nn^y>%phLKhAUS!P#_ol@lnxH$#3%%9sqo96GiDz05IK#%8BPP$Kp62*lRC?NK zqt0q#HOs6`0VDGOR~y+ii^;v{I(ej z;m*i9%M8A6P#iiyYYVs6SKSJ6&;b^r! z^rsF^DlFpBhKOTacfF6XUO#w2YswR3AqttvzD3jcl90xTFXAQMs_5OD!6$;fZ>^QE zC{q+(A^>2Ko2Q2*d4fYoGsz+(>ChFXN!{pc5@3ibk$mrntFmE4P!n6-hU?)TUDZXm z=se?}191rmnISejn>~X<>m#@@s2z*O#tI>mCiB-Ez_XuX!S929TL_P1`w;jIc9=TO z^LsG^z)y5m;hB^_FxY$kteLs&iAmwL4<)*WzUPu$uKaE@Swb=slkQ1K+Gr^Xi}LI; zR%O4(?a915Ix-=e;gIUA+9YHp0=pqV0&}H-n_VL#nv1W(K-Fu}UVF^ZZ9dKwI%IsL z9PR#Yl#vtFLR-s7Z@sjd75B9x>|b&@ zvo0YMLh7JPn4BOil@oA$PP9Ba;;%h~JOU2rQ!WIba!Jj1@1`XZhltrRSMpx?*efHU z?UPvc*cZsqwz;dMYc=6vdpALVcCUL@wctH?NCkc+5qFJoA(R<6%iq@C_vMQYq)ugb+tH zAfxotn^JQ(DQ?FF;e2r7*jL;15+A%mHB2n`)B!~Tq7d{eM7?yH@#>N1zp zYGOL?tv9CAdY^636{O?sizP(xh4>cCXTY7fOnkkyQ8p`rr46R?Ad)%QMG25g))y8R zQXl%9al}Asn&FjWb7$0Wl`n;0LA3=5Nm9J6L2y=g2fdZG!$rG!%lU8s+GG9tUb1rM zPGNS5`0|L-Ap;eUbCtX9kd-MgTR1XK7DzopW5>-*t3=s$f+}g`64V)eu(>TsHXt|G z3CB{L!btekLF+Y;N%cTxauJ{%KMj*9QN5@)$aZfRLEmdinpBt5I!? zqvUlMd#nlVVlr|esqqQ+r)2mu>eBcld&~o^cr>AX>(GZAOr(++$STdHn3+5}-N12L zNjXBP>D>(12xlOxWd?#EVRrdK8{5E>%o6D_yu<4Ufw=Y_4wkH0g<$4b0p7?I7Ml&K z6iC&1I6PVoRp>k~vbYtZ7)+mXZHR~+l@%Vq?&?>M(YX5}s5tS7>uKeqlUs(o^xOw6 za}=R^!A~dUyjTu3ah9`m7VepikE=qUgTlI3RBUu=J^};ga!W^=I+6E=W1l+#!a0@p z0?7ILo!)Y@gH}@+Hp5`qOP=f@g@;Cu`7Avxg|zb}C6eLx zwTz6_N;Ztk1R;d5z+9_D*u2M0*bea)l5cM)6ss#cO~A201XvUJ;#hZ-LQ8byhyl0?4Ni(KA_8-dZQe zakEMJo_DPYzp2(&O;CIF(E1d%gHCy3*EsC=KBkh&M`iKdWDA}`oV&Z@l!HM+8H{;Q z14LpD`KNI7I|$>GOQcu!C=#!U7>O<8MWqp7b)ypvlhG`Ilsa)ZhBnFBN;e&OT-|#Q zvru<$*82*fWJ6n9RESz@W)GBrk>`xAIfGk~2bTd0ZCYAYelN5K{r+N>O%*e-kJgTWB zYy*LMM*-nfE=>~O);_gMv0kou@x0X4#$LvpG;He$L)X!+Vy_-(wI8{@xf5|V(_)Oww^-_?y>X@B_^PcXc%kNd*`3j*=f!nYbwi(m2Ya)Z!cr`^(C;&^E zmry1~-_r3<>D*jScML`45a$AWYpe7m=AMZWVYwUa@R;2BoK1p~4XG~CTSsb+vvA%g z;&lUwN43YdC0YrA1be0wXcY~?t`^VbS?-1_z#xoU5g@3Wrw>ulYD~0At!sBX`a&~%bss7}dMIZOr#RYV??~xCbfeK6EJ_O$qC98Te zhS3F2og;S{u@keVoiP>!MlFoGSW*iWut=uv`A^@uRG@<>@^iH?!;R)p1*USiq=RE# zf5$r-wU!&-;n+UIakwixVcal;Ji>F*6Wwra%W;? ziRR0@+<7mQ<{)%@q;Sih>@lKy@gf}NIbtFoiq@7Txo55fZw)@fQhJ$~)|zvGO9W6n zM&q8GJB$GBPK^6-uh}#}4@w-SJ>gpA0;KD6&-X*Uw(Qq~l#U zJkj@zE$Bf?7G9a%biODCIj~CC`YY=y_Uc^|NxS-!1JX@-1f~_3P)$99tcygg615~T z;Vb@d?lqkPGt3+B*|j=MNP17`?RKl*QSM0{fJVoOZsz)gCF0UHdx#MxAbIqQh?DZkcF9D5nfsQ~7Goe0kYiPa{C?b7b zWT!gzwBQ+_YFB?ZulNki&&mlk6Rt`|EmV)zsnHrNlSY|rL$AkqdDfU&J&t!RB3@$M z6L9J`MzZ7ckU+YmdQO@jp4nv=PCiYkDC+7Ln{(z`J??mHm?4*FbH=Bjf{~kWXEBn9 zkF3IX?T}L%K-rdD9`e&bkZ=wpyv@j3>cQ%&IH8`H+zT`=w5@17dDYd&45hXD|(0RjIT=32yMB691Ht!)J8%zQa?y6>bRk&DRTZGt)K*r%kWAE%9+Z!~N$rNUMR+phr)V(j?kvuf5dV@8% z9xCrKl}{}uvh*WFDUMe_&qb5cdD>;voN8aa_lQH(y0>4Uzj^X}bNNmagN`3lP$ex> z!jalBc9R;$mJ2cLOux(d!sHG}U|$4F^>a0Gs{Vg5qU&tM`_u%CHjp zlQ&$68o3~z&dBVIW{(y49Cy*Ut|7>tzKa?opWS$^xmlLEy346#`%^@z(a`8=NDqAc zlGr0VtGP~3AJS=<^Tx4-!a#|Bi|8{%Z}xzNVc$hXa0Kvq<9JueyeTYso-PA#flctG zI4it0w_}Q+Izq$>%a-*Fc48JGV`M@h%93?)7DZAh8|1CsGnXRHhc_;!sgtHJeMiTM z=7m_wlM#B)L>oQRVKH;w;6)P*Nz5)9;!?NY05WP^S@-K@FIGBV2-e)d@UnIG>CJbR zQt>FN__V5zrh`OfSxp;TR=0SD63#V~cxQOP0}sRAGYztM#dAp-ikK62Mr9!iRPUxR z{P>2q9@awveI&a4WPQ8^TSTTk2?Jh6!C?;UU6z?^{%M#CLj`Q(m^)eY(a$&y8gaw& zP{-Kd(cv`l>v}` z-qBB!|3H-`FEb^IdxP}m0bfoGTs4)cdWVdc!29+i`V7{9MsvnQP95{yRijya`2~?e z*9QI(SB=ZJsr?GJk)oa)J<48)L!oOWev7s1ndLpA>-Ah0rz@jD9$3?bO#1j?L!S9A z?KmZ;#Wr+-L2&48#rIj7*atbZAb}E zHgMcWgX#sjo`xaVLXRZ&ErGlhmNE;qsL5GTy%jM47gzLE(s~J(=ONX-0f+)GUo6Ot zD3Wb151t2FUdi+AoUBO%0z|y+V0`_MUwE=Yf$IZ#kwjg@ZH(D#k4Y$<$P$LsV_qu& zDyW*zRS^D?lIZ!yX(UIikVj}g!hxVvvi#%#RWd~ zdJ*;-Y}zrhMvX(JU_sp8o%76Oc?iFkHL!+=MRT#@V>&K5;Rsr+6yl_{@aC~7ii9Z0 zzIs(oubBJgZOK{{48{e-FEK^H8?o7`V0mOmEC~RiNW^mZHXdma`U3g66*u&!nBMoE z`dK%o#O+na%V?8IS8axAY3zcA^9@#<@aHc?XJjxRoT9(HTAu7UQy?Gg-0rK>z=+mf zs@D*dd~Ya4ZzY)%z{Kv2&92N92t7v3uv}2}d=`ckVya80GtZk(%pYFh!ApP~*)%ws zXOW}?)6!$UnZ?Nkh=x(N=7TYF1{06i!t=}oZL38%=z0LJ6<~axYQ#bw=uO9Jul>@E;Zp8Gv%^ZnpY(u0c zT=0f@la)m|t{kY1o62y+Q?zQ-u+VS$?a=acvOC!*-xSWfVsd#P0I#TcNx?Dw4!lQ) z3jBPnYy*rrw)+&6d54mdCW9C;6bc_HKXsu_%gA|2r$t57PW8@ zP;heHWD*)ixh0xsK%if1x;;QFK!1dJ{ggNh*wGn_CMmu0QHSn31J{@R-XiG5V?+>K zdu`T8fpsz!!m%2nR;P*Br@>TJIn3wrv`!pfgX*}TG7qL83j%NyzX*=Hc~mamgih9e z2XZ(Ng$1TEZJx+d()s2p!2zpt<`PYmxV`k*W?#McUb*CZXY2tAHlQP5`OZ05#m!wc zPWRqyi4S-Sbb^VFEZvL2v>fAwgW}HBPX?I_Lub{CG^x0kGYM<@Xm7|9AO z8o=1RS_W5JHdY7QSd2KkYQzDvU%0%Y5{-G!XqfLoRk=HbC_QObon~U{JiUBE%AhF>G?kUsMTb}=jGFREbG0CaJ*z5V6{;5$(0ABwOFc*ZDr7u! z7oys)F4dh^X^72T9N`7lH3A-vx3Vq|?5wlrfHZ+6&%MhreL`We6>$`KxxF*k%H_k_ z$L3F7VwGCj!#2y-(&7ubcR;0YvKr;FEAr+G6PSCn8-Rd^D4CH}ai@>z&1_6o6(Go4 ztkHVa(-sW*UK0b1Tc>%02E73$&M|k}rya1~ZxA4RVs{Alm#}R09k+PMfVg%`$>SoM z#Zt{+LBplBmV!HNB19{!g9dqpxBQI52n|HA|XBmU? z9)Y~{48-znbW&HDrxSd**0NOQ$Q_gdy29>yJkJ2Df`?Kaa}c8aNx-Xe#XQdgeOkI4 zQS7DoKydh-`Xf0uos%-!lIj+hW)yg0liswo+)Ei4`g48K^Iw$!=@}T8Lvo2otCITn3L9@K)lIBmZ{2gao zQ@|@Jx;i2Qpk6^j<|D!34N1GzD z`OGSLqu%Qt0dF>W#EO^~v?Bauno!K5Xaq5~@%u#JtioW=R0qWpC4#@i74W@B z_kp`vPTjYsqTriqw9gqDDISlZTlXFKy?EHy$SY5dE822ow`!>X^-(?7&U2BbHa_vj zeajp;=}V(Lp^I|kFNAZYi7^lhAV)!<$1-m+I|@#fb2YP`2*$t(2c|qUuz421Z;exP z93+<|976NJ{E&kg@CA=5&lsMwYjl!T3V7KDuHzzkqoGc7_AP_IA_81^eGZ_Cw%doL z51zf0dD1r<_lEa^Mh8SHAUp`;ji(d(l#UZ{`Xj|5fH8c=W6%Bgkv-H>4{cPH0(q-opi{dW!i#pVXnL0v@g@%*xN%Fr?cNyG4aCA1wEdPb4uc zA>cYFUI8w-jf7N_Cr%o zj-{eIkeS&z6Fi(fMH<>Sjxd>=&>ipu^&%hVsRE<03!0};tl)X(;zI{-ua>b==xoxe z@RyAqFz{IS%;Ax?Jll9_1%80pb^Vf^WOL!25M%)A(UsQ;s_Y;c>$Tys=8H5|e8#bv zyzTrzqp%(vvOrUDmb)}i>w}6jb={GR5VC#P*JGi#uSv86jnTD<6gBQhEaYuXIyS6a z#Lqy#waACG{doDbKtRW`2z9X;k>j1U$7o~_^Z+z`3XDDjy9@EH(V+3Q_l?0hwYNFw=nh<3<@qV6a z7qD2ynb*VTglGq-s$zeph5%CNlY0DcbOWxjr&y>MF89gsbBebDSjr?ci_ab$l}oNr z#LPCM(X5vaR1!uKByp82!rkLH98-Ma%pe;l4IC3L8p~#7k_K7MHaJHb<@rif??F{y ziP;mm;pRWFy-U!Fv|f5`jFxCLza-`a2zc^X#&t1jTYLu=+g2HFoo0f@4BkF`4wR_h z68ntyibjWFn7X)c6%Fz#CuHQdBkDAYUa7rmv>9_iRFoBS2tC`% z27-YixR#9er38k0?x81=(x{3e41&RKV7E`QE}lP3jkHv!7Ke4#csPU;5XpIs2NlYT zosxyYi}Vsxcn(|Kp23PcUdfwWEEs=q(0(yk@LHc#B0OvxpE@F7NF~lhQTF1ANa_c0 zXL?LRV3FrORBXqxtJ;`1@`XM@pXGh28Vd7)Htk*rmp$_^? zsFY+g-ZRECndeD1@NCrN+2NakyG)TwS&Fkf7*rgW&Fgmr6lG`~=$jS8@Z>QC)KnUZ zmpTwMRK5j^>X4FZxQ6#Kf+}9U?%^8Mfz2wJs;q!FN(*2@{gw+LaLl;+UgN9BW|O>k z=kyA@=n}A)2KQ+P#470yO>Pj}Yn$3qmTHrC*)={dYAgAO8~b5*T@UO+Bfixzwi5|! zsKarIgEqBA2cTSEjwU6=yIBf?;epD74Nzp)(7u(jWhw=@Cv%*c9Km#)HWyL~mu^T& zWO$v&f!J#$h=o68ra(qCSaFOMEfBbHX9S0X=_$CS7qY|kkY+)7K4td4<35j)Ja?8i z$FO{ElIlCx$M4TVG<`8k?P8?mxn`(S%w>a^lLt5zr4pho_C54^TmcI??T*))>j={B zzOg-oxrrUQm=Dc$8dmhUl6#B_4I_FJ#`Tx3$-rHdWMe_ceyr)?@b{(Qw(qI zz3S)bCXG>P%Aq<5TbmBBfJG-z02?!}vs0&w@eKg_+cVrG4jfbH5HN(Pmj)MvboYk@ zUQC-%FS@Ih=RiN7^QXFJ#JH9%Y6n{)5V1Y&P%G!-YI6z&&g1vIzvZi+Sl?3MO0LbmEpp&_e3dFwBZhA0Be?tzom1Eba3 zeNhBTUA|mTi2FPE%V+SXohGNd3aBtdp8?KH;KC=z$^o+6H8cX-qd~=12LR~ODmc7h zq^$wNX1tC2ToCqAq1mH?0ybNl)$$Wrfb@J*&mQ!yPJuHB#SDg`3E(ttutX+TA3j@1 z)8Z_He&Mf?mpN7)w2EcbInRX*g4Mt!j>i+OZC-G7rB)%8oXQ%mfpp4f$MaF$;W36H zo#K$<_srOu^m4!>6LMe~w2QnBvJXMeBaiLL!M4!|zfkW^dE7&t0v)gLiP~e5w=CYs zXbqNNbyTpOk39{7)h@3+Qj9o&{911NM!0-0#P<+(LBY#Tx*~{k!a#f*)C{28o;H%D z;_lB=k(f#uh`kHuhmGAYyZD_xDVeZxoY(8e-t0=Jjo(t!&k-P?$Wt@%R+s~&x_ljX zw6muXLaFS|>DfDfvRkfUrT~2g1y@%mg3DEjh9;Ux}wg+v72}y zuUVM(vXeRsND#PafLd=1CppdTp^XKTy14uAoaR@IPEYE=BOuS# zFP8L-(BxZe{^@I>@tzHuynv(3r~(ndH?>ErF0Vt*M+Ql9Nm)rv)A?exjwvC*bO5c2`( z&oj8N5^YdhfSrhP<@vp}JZL&Mx_2ec1(+9GJOTD8H~Sn5j8LB_bH10<=7jcDxVh+nKq0K=8xgZ|%J97M=v|k19n%nI{ z3)v%um!Cin)5K6bEa=Lb5}5Q&5@dXW`kJ$}x`qV%Js+A^;Fc|x5>4bXqVG^A-;S(; z?SzE{;nTZKK`y6r8!==9CCY^m84vV7e%=5?D>_a;Js%8OUUV0zXaz-D0#*sJsVyu@+|VoDFqQ7%BFdQ zAGP};bME-3;QVty&~k<0Fp|VqFTBd|iM@Km)@g1Jo~fU{*5`#LSS*|!RN2&=nmUzA$1Gy~zGId626CKw)20^@|IAWC8V7Ly~9J_2G1s>f#i&h*}F zmw?v*lJH0gc-9LTMtXU5kCe1%4LsgPiBggKK++TF%RYPwGr7|640R;|oSHctPl0WS zM;4y!;vSLnm6x)42ELYw_fW~wh?<=4p3st{Z7TR^^zP%}1S1)=P#)k%>hDP8g^nkB zQz+sSMnML7!HCyj2aI%Oz6xf{@7y-ijceC4Lo_@>w+Pu}i|XwOH@*i*V6*i^ox@jV zBCRiMy+f6knEBPyKsx958e^}|n=19-G09<)Ehv>ziVv9TwNAaE*2ExJcm-%OZZxQ6 z0lwAn&=k?F+gyaSDcOy0vpMxq6~_q+ia=hNK{I)t{q(u*1&;U74^&SCh%HP67xXl3 zP@kw%=9`!?$@8_Zr7i({4h_XEYp*ndxk;E2v_-jICjqe^m9CnstK;%Z_V9S-Qj@b1 zDl{*R$04O%YM#6s+_;+@Y<83rVBgq#Oqngs{cin1HyiZ1ibE&K~yi(pHDYt5MpOfb8+7mqh!xUVsY7sFg>9%dXV2DPmliHiH)=J$_?<4f2 z;#VVmL6+rDmpva;jiX6yEHgU-YMRJt#2m}00)Ull&QZ|qUKtC4ZJj|$4Vv>it4mna zF2cxpc|tD>H`$n9rs`u^84kOK1eJb>Th+_d^q>Ns=`dKY18qAs_KRw)?dJ_@vQ4S2 z$2+`I_Hd9N8-y5Gi^?L5Cqa1=JoUQ}eAmZVk6!dRP49{~B;^TvQDC|)bHErI1}50^ zmoj{O$Dh6EZqX{7s}w_wA&YrP2RoX@&*e;ca?25^i?d}4N577){IS&L5tQi5_~TFd|xC0rzXOAL?`jr>w4ZtKS zusFj`d6+ZyTPuL0CccJ3xoL)RxiV;mevGZ`i%C*~orh!V1p;N~QCAEnac~5;eaE95 zS^Y?)*wwF}%sB3|Eu^dmN11Ibb22!MIT6--{l;0#<@Ii40|fxE9=qBtjK91)wwh%I zZz9lE>SvGH1mYpMo3(Md70J_EkWFN(oomn}3nhCHkuV}}($B_-IG=YNW7%wH;#Pa% z6H{JRV^nV}EJJDRiP#>VDPk{Bc;PPFIl<4G9_!pOwHY42WEZ!5FP&P32iX$1Gs#x9 z{qE-i}fJm8`aCk@K5nEeK%ZGyAw8e&6yY*SIoe| zVG{jx=0RWz&E!Uo>jN(hW(ki~eE6yfvfcm^!Lrc9y^{_hDzLap2~&IRW2m?9;fg9E zloFya_I97&fL8ko;sBV-ikbUPB(}7%&^th97+S>V9W=1hVTin3R?@@pLdmkGyf|jw zs0S?U21f7&6)?+7_Y)$)mZz)l@tbHKd-C29Uj?ru4IN&g$dh`Dtkz-7Qb2+6UV9ls z4$m_WGQNE8Dtye7wYk~!W$k8Li$>!_T3$tR4?D9HvnD1Kp+KUr_%Z7M=c1pgwu$kG^t(aF zrdaL1S>;WZgPaq0>1#+He@R9AX6U`pDf3uWMuXn7XW^Mx)$_#OCRF8v=P{*E&qc_W@_gYlC(}}@mV$XC^%KB~Df(sfglRH=g4Itjt zF`EmUkHs{po~G1l73|BP?cK`#KM(%@{PVy5r#SxcZ~vP7Ur6+y%J}#HohZ}pTF>in z$5aIypNS?Yjw+ov+`ZYy_2SZL9P*XJ`Qw)#;`~%5z^~__W`8b;J#2s!JnVA7R>@?c zBvfU+=uiM9PBT#inK^b}W6|eK)cteO9>-X^Dm>Q{ojasJH*Tj`O7cz|8R<%Qx8E2( zzoa$FFHF8u^26VWshWtN?5p9kpyY>r=-FT%haDZNhLi$W7=xwC)IAS;TfW@mGdJP6 z*-va*T%y)V6uR9!kol~gU+S)sFAJ`~TLoMPi3C-vOMh%c7y?v2ezExnq!TG0hGT(>Dpy|2?2#O2k)?q@kD z6tDW>F`>#_eZ4jPl=sVb1oZp~l92xVuyEJC0BNroGTKR}a93F{4c9pA(DxJuGh-~F z)y=@Vzwk2pes`)_4_$Vmludr-*epNyVGLZtG?F50}BWW!&4rB-y2R<`$QGOCO&X zpU*&QA9uhJ1<<}*-=X23AK2swu&^hfhHaa?K<|<(W0JOJ>dlhA=A=j3tT}vvt@zb^ z;CoQEr({gzNObJjTu=A?2GP;E_PNPr*|uE7PP~%~fIb|p5FS6n#kFn#SGRCjMgDA% zNS}eW#SMEDqcu{w?S;M8#;?o!ZI?gQ9(V~8pC==o%) ze0?Q-zLQHLw^!Yu?iTBNh^cX4L_cp{ZEIaDdeiFL9%TC>hEgk+${ZanFxn}$YMn?b zcbi?Dmf^F6T?+gWo$`{{{og|v4XH2w$?x;e5H6xG18i!TZeyI8T} ziP>HYMJH*0eJOvw9;d~ieeigu2Wk6+LSx_@YkHn>t%hf&_ZHMdBrhRKyj|et zuwaSi)d6Vl!-l|VQ~LVW_xX-Y)Oor@6d$w@$>S>Bfbb$r!u|Q`2k;mPwy$Q$r}sTZ zQ`J7#cS0Okv7PLXr=F-<(?W*le0{CZXAswYd;0`fR`D|Lkz2j-!9(f{TluY7T*Z}n zLcm8>u7?zy4wc0ZgC_4$xe^x=fD~O}qi5HrOX+tHBE{hF7*J8aRrfWK2e}Dq&nC|0 zr)TBB@==%?sBKUT_V$E`)0`h=)K<`DlI<3woD#4~2EF(B#heMa>-*wp!|t)y)Bs~SXZ#uStU}J$|>!wxhQJEK%nSPD*A|d(K27Q5Z$iOZe#+aB(8lB9l+Gny2wRoF(-^JTaOz z9(_BiPZNhr)oKkgzP>x?^SigJfK1}-F^^ssXehTmQ0#FoDZWcROV^XsicRDeh7O_k}p^HsgH*9#ZOG#RkT^7vGvV+MR5@-lyZ zJ?eMHXdxDpjM#oo7HRn;HvBFC13jM!@ME&Q67lylZR#xQ%TUshd?#;QUN(wHykkjU zR@!b8J`r@XfZ&(3FrZ)G|Gi#-^P=(Vn;7-JYqahwL^FsI9@3|jeEjsv+M-;3cFr;d zMykOgi)%G{7DL#w!!K5DLk~P25L-P#c|u0mFA_g1;O|!(%FXK`cal*1oZbTv^l^Mh z=O&i|Lb#;sve^L*@Aqc-nnY&b9!A6DD~O_{%O*SmcwSErtr^rMYafiXJwCSY9S?l6 zz=^Kzh0qN<=bUqp{v15R(Al{k7_{~~3W0Eq$fo|iHp`cA*H3{pJQwmE552cvvBCoRagt>iAxj#L;zI2h2jiXu0*+hM_lZ zY8N4*U(?8JT-eH#X5&U9{GKJnZeiJbIFa6FYLD9)Bv@2;gK_f1^@c4B(<@FsE3@}i z$@g|;)(?v>^OeDaohQo8 zEERFDO;SDr8(!JARzylqqTCKONq1%Q#kW#dIq#xtC%p!z~^RMI5`kXWISU8}yp zeD4L5MM`aU#qwie`c&HY+UMDchGly*K8OZacQSsKikvJ;@UWG+4-e1PRZ>hB$XRx% z>BY~f+@HRvyPoGlRzdFTlX5~=S)}l$B?J2vl-oSBsj+%cnxB~}zOy${>QK z*SZZ-g%D1Y=kM^atbom;oI~OfhU5V=3_k zkKg-!ArXz&NyinTM5O74%3178Qa$bAX-sol)T~o)%Uw~+?crBd_~{G|wOc*3e4gwf>@db$RqJVbn?TUZ6`8N9A?+-dA%ovjn|$@tBfO8?a)7g5+5=Z5=*!o=_U`$m z^c2cAP~jP&vF1B}u?k;x(WggPRwmLmrSUVhx9>fCkPBBTG2n}>I@{x;n;RsTPjFO@ z8yI{k-qZ5^z2j^d$s@`)7uf?1qL&QgOAksdl3?DDGLXWV#hs&cI*rX|WuI=*?z0;~ zrWWOSk0`4v`^dQh-W}s&XN$KvhC`gnh=a{jBQAndt5k%hvR}19wh39}+^ML-(2Zm96?Zt~sLF^EE=O5IMD_d>0Z_>RTnHbfM8;hwMSB>S(tg`^@>8~p7(`nEirx^*320b!?euwx#n0mOGFG{-SOz^FX zzj-MaS7Qv-=Wk%1k+Qyux{xdqoT#7uo!_~FZ6h=UHC95YsPrm^fl|N4WJzCdBi`pp zLl3pt;_Y~uom3V^j`c289>1ExWiw3_Ji+qvnWFqoAx>tPPKh5i_Io5ysS`X zOA{+$liPL)9-YUqxvp9CfpD9dOJ=MFof96uv=jW@R}LlwOW$)umZ#d+T`Tm^yK#W8 zP)kZfn`MmN>4|ie&S$lsjuc54wT}(CM$7?a<0e%bj>fsOMY+s0D~|nm-`O5fbiKz6 z?phH--KVlTkogSVpV$anw&blk*A+b zebtNlo$!uIgKJ<+_98mBripUyfqRA|%cD~^0$y^iY9@7aSaTcoA@61ndqSs4l<8vJ zy>;{?jDEWD$m=>AX@^|Ign`O=8KgU1}RH!Vm$q zH7UVz90v8znYCB%Jp_px(s++5Gs#;!fvt#kvtNvi$7`rlC5)p6-xyfWBAgZa$?}0f zKFQg=*Vi_o4}mJ(tR0vj*({z1uewU8Y?D0ec^zLh^QT|I^b>SswoayWBD%HYx<}V> z9`+td^qmR|yipLm^;ej7Hw0bJ=4ogzA*H%jMQx;KVQX0FXfpqN%hg!j2B+*HK7|09 z$5udF;~{TbqBy&9kT!d#T8eN3{HyBs>0GnqTKNccy#U1Uox>IOTyD(<4#oLX;C;zk z4NqM!BA<4wJ5*?kJtq;Tbw^6lm2|PYRNozDW4Iu)NnY5n*5j_Va!F0@ ztx2DHEApawSFlR2OS5r*&I>vv_!e-1U#od3>pZArwMja*Qv>rVF}{xN<*6Ri!Z{;0)B2>7zkE9i9+;B00#_8G#a)>e*Q%Xt`V%>Q^rt2d@7CCOb_sy#`ob?%WCBej-T~A2sk7EOeyy1aE5A& z3sj3~of>2As(MbG`*x3m43C>MrO$d7q&l5Z5mcvX39Xo6&Iu9NBIb1LIl}I+a!$Xj zh+~DPKf4->w8f3CUb^o*pwh6oS=@1u7oN3+fcJ>8Wb_R_juD|x_WjfA=q4F)s*;Qe zTbM!+E?49E0TV)*&f~TBd{8|Sl-;a!@bx<#*v!yI0T5)vvl-8a7-y5x&$*jdjkp1? ze&gX1nOHjOaYrFGXX@<*njI6vzh(0-sKJVyzEACpJydgBG83#2gkq zec;ZM785%{N-i-y>CMfwAO$?(?=b5nG0>wdE%RaZX~|4n9e&{>%!cc)pmARP^aQfh z_i#5j>giIm`B>f-tOxe9Bu9dM?t2GWhI!!T-dp^I1Acnq25GJrC{~}rIveZ zjj3F43=f)T)NJf=CNj3-GB)m`p38gnrf0<>*%k%%YEML@USrpT$1%>5MsMkI?H3aL zsz8mM0ch&IPBRYlDtid86kdw+A{A(@Xp_NtJr<;+FC^&G9Z}bd$1mJBs^HB=haM@J zBX$_~z7z_rXGWd8ds_~vLN!Nt0)N_Pnj*e9C{K#@Q|6IV_`RHKvlQ_h+7)S3;~V{g>*9o=M#%hpS*tbc8jdh6R`o0qeW!A z%+o>2eJXL9^3jegHEbK(oqEv|9fvKKMKpYue7;#%0Xp1hp|9(mtZ7j+n#vUL1lF=$ z|GvCmjP^#Cii>EO_SjbP3UaVUP;)kzl_gEat1~ps~x2Wa+oeM0bEg?9i3@_WTNL2$PV!tm33};`lA{akSkvtZ`p9gh26J&SGP=L0Brt0YRsf{60+`nX=bI*8GJ_TJ6F zDUAtO%zI#<^*dS4yt`MzAVT*%FVch|_33kr9ZV@>27hM|`*89wyI8jG^SuatE2P~_ zxpT2IyvAF~sk2gEwlle&+lC?_dG4E4&kmWBWr}76L#yEzM_O(q2N z*ZY!m#H2j+3So2j1|w)^X18f;4F#sdPdTTV*!ILKki#-Pf94YHXG@!}l48*;X!iP* zftkF1s;_2IP(1|!>fCYL8mLJ83mN}(+e#_5l5I}D%caJcaWqZZGim4vU?Z6)9bEG@PD+G0}$hK78xNd{KICtv`kc+w#W)eYiFLD=pALuB2I&jrZFc02X zoy5zwdn{x&jW#}#Qoe7HCv@oatr=$32&Saxrai3fu9-r!>P--OyfpyaH=FBb-T5Zt zDM+blU|@_FFrV45zfM32Zmgf`0RlR}HGDAjZubTJ`B`4xoaXaR=3%8aHz{AAijDyl z`NZebivz$sB5t@@%Jb?TsCLaViwCmG!L>;vx%Q0vrQl`7yC|PM>&Oe1w1pW(J?Nv z{gqJVH5S}j&s2-NQt$?B4|uH_<5P^95W@LdeS4YSw&aVuDT%}yO$y3{{fSXUMU}cV zJv<-fn>v`vtll#)$r-DBAW3((mLo;HK-l($5qLiQOF~$hdI^IjKp(O+#5+)?QmjP_>D)|c~{&Z@j36AIP z43ZtXDo1s&Sm;gB0k<)RZ${!&@Px{}7{FWHAB#K()lIs7jev;WfyX4GL@(vRtnSTJ zCx!0e6#zdxDZtyl#-}w!g2gpCBHBYgJt_9j@u-NR$0QUfMF_{$vM5pFRYT#erg>S& z>u!IY2cI~-PtSJ4gt3%kZJxxrUa{=)EPrlI_AZB6tC4MM^zNy6$+O1k%ThUD%M?GB zJ=DB=S!-6Qcq&eMR;!LBr6Y-(4lYZ%(Ib5jfo`6RSn?uuG>;g5cDJ=|H(`)Xt%{~= zGpHDUH^CM9K<1)?8SJ!VmUNND==CR3pYFW@_3rlGJh5ss#JiViM2wvzd0MmtdS2%e z<0f7nX+5k6GDX5Zpp(v(Bd5xhh$*M1c*J@m=Of&*9*54!y{#xE(AMg>W;_USXD_g% zn}|aD=X|=IWpdfO?!eF^T_X$z64_)e9K95iy9qs(|DA7->jC@vUOk7j| z`n6d;az*NKc*oFu2*Y|s7%iQU$zY^e7_f4DWU*k(W41+eLonCLrv15`$0hc=9z^14X(z#*=nysjb##n8XQb z=@;W!A)m_9Pe)gNenhcWILBoMZ6@!npz4C73tVDryQ9XGy*AxPTj6}!MW#8LFnDPt zFuYuPJr=PY_6&j`Eo}IyXsNMJDRd&m(W;A=z`P1YB@`BJoNM88m^UBc-MEf(w#Eh^F1_vDHe` zW9>Qy=(Q}hXTk6Knjtsy^ry>D=CfuoLNQX!0Wuv4(!3)#I?#e1tFV_2W2sIy56Ap7 zSNW&QgFnRs(7t=!`pSel#K>(W;WDd1I_l12h;IUDm|j~1a6P{+KfhOi$NOgLIZN=6 z900JZhM(lDUFK{k%1sLlleugP&I^yXPb!nA4Qz>?y0<>?QZK|`nDG-CqxpSe_UZTP?dSfYkYIJ8BQ#Q6HBSQ5DJtlgdm>*l zhK5+;^L~Nsi%TbMijg|9v$YYDJ&xO66>SxLooYVdxEV{5Cc`lWZ_($O1VOKpSG+Z% zhevP9NS+o8CG1ZxJE29O6{BD`Z}OvDqY%pm|5MZ8GZfWF27KVPv4|dxOc&*m>0VfNalZK!`?YlL za|$H6Cnp*V(t>Vv9I^)rIBwuWqLtYYSQ!WW&1mQQ#{$XF3>oGZtu0d-tLLpH%<7!1;OvZ z+bkWwCsgtt<@yBolC7lWX%z|942i>Vkkdw;fo`Gp*QCw&uH z8d@1_EI`pGTEbz%cMnws=RFNyHvpPG#pz3`w?;?$CUPWj4sUj?9u{o@kaJ6J6+c8K zE;ZUreA0p>=<=sJIA(Ep1;iw!v#FxM$1m=UjF&mh@YCr2ja}QzTsnS`^w7L3OAz37 zC1y5HATl=t7|NzoqZw&5i0Bt~-#5rYhxx-E5pmbVUnTV`Adv7Lv6pSO58k@)pd|?OR$8I(l*3v)D+W}oigm#tsd{3KGFN^5PdAp zU32rg8V!^S*CwEV4px_TK`vs@9%hQuKVN+hx6>_|+fQWjT~&_s zQIU|9Z#Y*mCSA_93Gpuk1(gqon3tJyqL!(NpLSf|a^$64ZB%35R?xj5K#?NY{zCBk zGHSZp7osVoFV4+`9G}iI&(Vt>2UtR9IF$`JIFim7l?ONbK!J&2JJl2pk)!QMeYTQ7 zK^b>5sPOJUF8D^o=xQm%VtbxfbNXo0)=MqYWgl(1I%;ruFL(19q#qsq%*K~_upxxj z`3gnd1ovoP4?mpPW?obUsDr4W*2B~}e?$9)q0lFWISr*6yoya4N9L>XUP{t91BBx~ z0HSk_Y<-X2>aqh|RRCO?dRkm=^I65v3XMuqQKU_?!2{frQJ8@tyjZG$%MA?1D17BD zqEhgPd#{{qzZ2 zeKkMTSiI^rU$QbmGx0jJ*d z3Qc7N6!P3f>43R%5x|4|(6uB@tKWbXbfHoQ0 zmktkZIZnAMuO9Hkl6P~tF1+P<8U5$IQp4?X83%11r~to^#HRCk=yz)9g+BAzM5o(# z;GT)BH_;1A?Z!AC%e6nb+82`F7hd*6NEYgp+`06{Xnp23zR{c}Q^)0MuP%$CWa=6v zl?UN+ReLaXb?6C=h=tl_jz_)@e=p3fpcwUaN*P+lpE3(dKzqxetGaxQiqa^8FCa1} z=-|DDlBAC8=t52Cq;tF%zp%7TN7U8Fudy@X1!zpwMCrlCwD#O+x(7cVE>a-}2J`#u zGv7C19dF(Dq8eHG9irHarss{;gU`zd#gdw>&O>94iuM z{X~;>+}#8(v)}ld*OLm#DwKIJ;p4#GMkKXxLOk*~Hl;pOchD!AdtIIP1RE))7NDZ{ zpxIsw@5VfAs}X>lebCb?vr|<$97KHD)+9S4Q-h6T&c>4F<`7yUJFfuB=1|S?)PX;G>ChG$=3_=M+=)mFk zI^QF%aKcwwo7sfz4u2x%^A4_gR1nmS1=>5uL~mpVw<*lud+&%AJVi3a&Ir$qFUFD6!aZNZ;TSH0e!$vh!_bVRzl7G&GrG#MA_sCHBUsH*ICNhuHy zZF00ek@tz{XBSF0UTC#Wf-K~Z;rgfj^+@1 z!J{1Isn)u?GCkg6^roD`d}v(=4;fs-<)IN^SVdu%R1%}th}avPBea)?&;8+x$|$<` zbQEHa9`rJcM1)xRwu75aB)H^)zW}S2P%MHvIqei5nJo64`i1MVLOpsZ4QlWPpR>@y zyQhNZ!^JKQc=T=(Vn}t~j@_w#;`)6*Ulw!N3X8~jZ6+-9W=fs=nD8+Qpsd*Xfq*FmGXdC3mynK}|Fi%ur z&$+@)vpX*-BixeY6|%K1K1f0hTm+4W?9YE83&XWtu~;t`52a%_+?eWF=tkUF#F_3G zGo^YKYnvtR#TT;Q7k7QzK-eKJ3wXCWj8y{jUR#t#@EEB%Do;Io@hm}D?J35fIH2Yf zJw9n4fy|&2$YNcwxTDD`b4V(oV1vn|2m(vvC)C1oqlUR`qK?EXKV>vhn=12d1NxUZvdaPUiz}nQJx?2c-0JOvwebL}WLeokWT0^*y1hd`ZYK%Zn<5=!{7Q z6J24w-ZV@ZdIDpp09CJ()ce9WVDyC+q$);AdMi)qc!KhqYi-;1&1e8E?I(Iz1U(pQ zj(G%6UbZ{@%&jQBBW9WowB@@%9rIKk-#xvxj2;ZSw0U=>>-flSu?)Y^mG2J56Bks} zqGAv!>P9ai?*h(`pWWjEV)l!dz&lK<$E>P%;y{zH9cy}FU|kzt-n^ZrYp;h7o5G~@ zz?7pf;&m8LWaRbo#ITAh0q_eKvK&D3a_=$@|5 zzwn(|#q(N{@*-X&q{fEwbV(SXad}>aHf)B>%MChH;+>zKtKYqhalPKff)b7}4pFj0 ztxRN%=;gL>*KibG7BOt9p}+TFrEpN5SPjXE61>YmyL`T?0jOQi8}Awy_X*i6u4inq z0QuM-_qhV5h~PuqMiSta%ziwsrin!qwrvRxFm=1|C}@N22pd2S?IAv6c=*$MC5~$6 zUh3{u$h9=&qKRm)*G$WzJ6sl>x+icxts?L5Nx{B%ed3#Zo5mi-Q&3)OD-l(DPcYbc zbai?RLpI&zIs(T-O(}v*qA4K&D3Gqe7rLZO?*%I5miM_88uf7w29WTaBAr>`K?~Dz z$#q22FrJ?ZAZkd0Te9rF-IZm2%^KsoG8CsFjxw$~88OFa@qmBlTq)Q!H%L9(dN-jm zu}}eHad*}7k^uc)s}ntSI#%H0PmKM08nf1grm`2$pGpwAy2Cp-s}Y!CVsV}}e;45y z+=HF3bm#2*!f;WgXPYJpmW)e(>jfQdI6@n~Wu!y080^ zj??y-9YA$#JS=Xq6+Lr!Y~6J>-vLPO%K=#d{XMlkVvr=kbb#2oQEHpOK&a4)k{Q2A zDP+>yhkLqvVLn*!uNJn_+tsMaU$^om3S)eO=f8ayzf&Fe|YDP$%L)@#as)e+ zJF27CJzVkbeW6Y8scex(*j(6Fe~miGPx&Y)u#U8ymM-BDWkefJy*zv~(RR!`DRg5z zfYuF1Q)~Rj84tz@26W^+CcB@Dao~IBee*1Whzs*BfH4uIOA{z64OdrOZCq01MPRef zamSuFE^eeLL#<+GR;LCzG)FXESt1Qm?)A^!=pOru3n^X#is)s`YHA92Mw;>nGT-#P zU@NU6?&QS){KWgSH`4B=2A{ax+tJf=SEJ=Tw6Uv0mGSsdsDbiO3Ogf4t3 z6lG)CuzIHomQ0U#;|j6*c!}ZNn%wowXiL*lzMdzi+@Umppie-2SjF(cOC73r@0rNs zH>MH3&JUuQKr&HLrrrawsR$&-p{_?tPwbsM`ROMER1d?YX3$x>p1mv9*Cb8d8I!O> zSCwTV^y(o5u%lFbAsLmAYFBd(L|e0*CuZ8LQv6XB;IwL*;RzWDk$smsx=I8V?l7>c)FrFS5Qcf1myG(MBYlL+-)?AXy|merJpeIQ;Ux!gK^Q(jaV*fMz&G zf-Pr|I3`(-my#V4(`ndCgwG-Z_0e0G7_u>s$5t_9^q|h0C8zRwyP2q-XA($Ja43z7 zt58;Ujks5zb~U>XL~xA~UFeQH#n&XI8JR*R9-DdBhcmh*s-fM428Z*t#afwf*%V+20j(Yk88jd>Gm!0^$rXHCcW7&JXw1o!pYNmDW zSo7>&TzL``baT!5LeeHh4yVX0qRJ3P&d!ve2?;L^K97ykn*myC9?n!I%->UM19%1l4fUWs!Mz>D@n;Vmm388w-nVxZ zMK)EZdp&9FIVf~hNJ9udw5>t`J4f42 zivfmXwlEMNo(AS*LP+halyTL74AV!hEF(gCyz%sGnrq%vn@7Z|@Oy}E>3EUTBgrjC z@~WLg3sC&zsqu6pF`aIpZ(VB(_Nr}e!J~;2+wE7kfv~8NXI?z}e2{JK9r=S_$oJ~d zjE}sB*C^%gw_^l~ixf!m7;9&fX#sX2O+^~CS^N|E&u*%ur{3tPktzWwkIXDQzNq;BzK0htyc}I^s=-Fmnj^1HehhYXs$dG9SNFGL0M$z zXI~`;LvFY`gzh7fik4e&x{C>aBpN zCaVx{OEl}`nn8@|ZM8l#CjrHaBU{UO%A7TLDdA~P_obM1p3rNdZ8Z01ctPdOcElFa zi7_HE?`dbDcJ=fIl*J_iY44iVy(}yNZmD=pB${JVyxDpq(-oSXuYl9})z#a`3-{ix zqg)B=3J8ne$~VFrH`34}oXrdgY3N<62BQ}W7-SuqDo$F@-Fhl9H}ngA|Lm}yd0=$C zo;0D=2*n-P4zU{yMpp#VX@`(CPTS+u2M-FXUu+@|x|pLhY=k+^a!!@J$GzAdwj$`> zn@5D1p^nNv3zCb)M8SSft5;ASm)Q2yYf>d6k{Gn`S>)P)McCV>0ek)?iZIy?mNH{O zn=q3GI{F>~IHW@c&MxZx!oP&`TnUP!b7(X?(KNvzH#4r(YJgdqpG$IS8>iv4wDcGL z@!4yIFVJrQ3JtJE;GOquI&gL;VK#b;zN~Sg}oZ53a2f)pO)kD>A<5{=R zQc+CLQ=Wi?j-WvDNg^sqLVN;M4GZ+1Fa3N0p6GRuLaS$XC}(&3I=Lfa#TJ`S>kvXf z9Pa@?ft|9j4+aq6v+brni6~FSlJ4zTwcOEo&z?m4BoA(DTU$G&J@SeYf0t?-aOz79 za8)}`kyUjC)hAz~9*3MIH#<{tA-}qguFR03gj*82_p((Tw{#w41vU%DRt_a!a@qlvZn}p8}49i)tAkR1opr$VaP@MN3b9xy%^IXKaNrXeGmc7+d3gYzhHM=0>8SDZ=nLvUfS3 zG^JoQ(+r5HK>chBHo%yvQC4qAlchX#Op);&j^Y;5bmJOjo?=DMa{$h7B7i>oGL=qQ z@3^XLZOj%pPza$v(wa8#WSrCjFP=ZKmVTj${S7!BZmoNyhtZ*1`N{6*|IdQo$7gQyJgs+xNsz?&1lwK+FN? zddstLZWZhda|<-dtb`VhbsCVy1?uMzCSApV=tu~PgADHSL_lo&YN9&PTID}C`c9wvrDM4cw`PSPQhU3Z)dMv+Q&@V7 zVD+_Sg|*GFJU=#9=J2qTgXRE6dC}4$EnZT? zw*pp2fTVm3^4Kv1p^>VImuBjnAr82u>rP>@2qUv{(rkp52Lj3BGhhX`2iSewX^v9I z{o)rXGrYCXozOX)XOw4r^c%^kSt;) z3ooOB#G+veoZfQe@K6?UhT~UUOfHJTcn%h2k-8BM27&^v`had4O&i3+mL*cVFf9wz z*m>)Pp;OvQ?isM}>(D{wB6A9~hqfFjEG3I2c+bXnY~MT}xecFot7udr#aDMc2o$Tn zY&2^a4(wu`LZH?&nG@n6*hQe;tv~GxV|Rn92*OI=I`K%Dg=eDIMp+wcIY?yPs99vQ zWIL98(&FD|WHTJ;JsHf089XTZcnlqflJ+2d{Y{`F9l;La;j`K^raDfAy$5Q|#;;te zz2_O4!y!z^It7+EbEKM8C=mJ?t_2@iSZ?x*2P#NT0V=~*C9-Lum+wSx+(YX{Q4dBv zCX}2|YeOn}E5J0MLQD(PNcJFxb{;;h7?UyO5t<60crjdXJDLtcSM~F~oEqOl5Vp9} zGLhXZg5H8@vl4P;eYdDB(d`*vc?V`p-^BPQF-#LuP?0&5?x?dmpo8ZM-~v+PjSuOa zZX_y~B;~BgIm^H{yThJ<0Sp((KI(=%5a=GPl@*a92DgPbRu_(}MCQ=O$cZ8E+#Zzf z((oCcsn0yL6N2Yd!wEj`h^XFfObwvCXT>aFGXlq1kDj&6MtasLrO);z;BvXOSCE|) zcx|z%z1H((hlcl)8llJ3$EEcuUS#_71@Y-o8e>^NGqNC}wQR_9foDpW`E6zK+4A8% zgk=P$d?x}=U>iY>+(0P|85HP=ZV9dBj`Z|P@MYT5dx@S#ecO1&rVjf2jt+;baYg^wqYO| z=$=I<<<=|oWdnmEIG4nt&v^ymd%s?e8yz%f+R zSCu_&pfK*FDDw~>dv1bg^MjrXGjl&(v_bS)5g>Ju3nqhSGxd^r}H*28G zfMs~vw{MB0v$%*UKvHxg-huCu2UZ5ryl9@hEt9j>Fo7<$aiR`I(|%gxNpWa^Y9OH^gZuay*#XU5DkXC+<6(aZ_BvXQ%}YzU5xoXVD;I3TiDfB zBH1Wl#-+EE&Qyxuh(HnaBExL1yC-s z&Nj1hpIZ1ibxDxUN)z9E$rl&GuekLdND1p}I!~`@>3q@lqISIZRX+DHP|Ed3uTy7M zPNZQUWys)CP^Y}Br|IviqsQ%NpkwVlpJjNQOLQ|tKj$6pT2Oau&t-)Tu`TdUF>`+X z#4hI^`aqX)s9ic)66Kq3lK3+u#4EdmYV7wmUKEZg+A!8Lg08hrvDV9X58}-;MNwP! z*lvBNp}r6F%^A!&Yd#xjLuS?zATO3+7>X@RyGY549O-)x?MdZ!Q~|!;y_0-cDUH40 za$Dh0S4gyIXlXREvv%|7OTMmg^c7fzm+T{9}UjaW!J} zoz@07@DjK=GfP93BU`N$LQkvS@p&ZHOl#uq*gJvOwMGFNP(>l>MitAq)Vs0TYW;A^ z_r}JR`w-3_+$#E7uuk%HN~g{R^U*sEd6nRRi4?EL$5wSN@Au*}gY_!%+^*DItax*z zO^Gze-{dnjKW@5PNsgP6KZV`ZCsFc!6c8IJ+b3YBI+3LIOo!PS*Q!i?-oWeT$3BU4 z0SKhLDc5;5OJXrarzL)YI1)T^7FQs*ETPMn4tI2>I9RY^-~b|x=qJdUn3&UhBP0ImGf`cmbU`?-T_lL_wR>Tu9=eB7tR(ppd!8vLlABn!qXwf&?@ zKO=;*7QYgdXL^c!`<~@nXg)4Kf02U@JR|C$6IJ59n zM?57=Lya;Oa>T9tInKVS6rn_YVix8hfZM_&4=Ug};V~7|poO` z%rQ-3EGLX!8UjtNn3`!>D^oQ`Mag1-*b=D-r9`~Osp670H%!D6VFN^XujMCM7|C6{ zE`kSsRVh=7Z%O@>2Tv24)uU>|cqE3U>fI9=_$KQ!Omv(1%NIy#S(3v_kXaO!ZeZ5K zUCzpy({Vfn6J_T_NeLGp! zTT``da$`XP3TKm4E?iXLA?}xO-l*I9!V6!EOz$#F7iZvT_`c!bEMMPjaFX+!t=+}Y zxrYSOfT;FX_>p$z6F#5Ct`dtsiP*z=zxQ>W-dD(BeCHB2nwbt-!?aHKHq|SeujKKO zs@v?V=vuG&q{}{Ig(*ip#k(Veq9>A#gL%E?6z}DxY=GO=OOCD3Drv6FgLU{<>rni zC5AvE;qUhqxoU9fk|nLh)utBKN3Vxn5W&Eat@M0vd@u={!yOy_lQ91d7^Q-Itym0w zmE!Zp*5Tp}KJkEnJ{*U_Ia+mapH_qy5|uFGJmT`_Nm-EF(8t=(AIp}|PGr5RUQ5YZ zwdk;1np&3iZk#6tI+NV@&{d_yx^`bxJ-d9JXMS&2MQ@wfZr^TIUJ~fy9@W6^%QdWl zjiQrxfs;9hw{>H{#Ezq^Wz5sXb*5MNmN@V5)OUEQ&!nRwT95Q+CS;mZLa#KrdFGzB zVq|Hf-r^Mff}g7MH6ia~&f}?+tnW-1-%&&2jlDXV+4ml)%xid)6GnK)%dhK{X!7{s zEoy~6#?yXtc&0G?jzYJ>CVWI8N67WI-}IuX0pGRJ0xB=@r%q>L~3x zRb~VQ!rtpfFN5QA4JXs8UpU=XiILQm;j+ljYO>Yn3jS#)y( z5xRsFtN41cu~Ut}^UWQ(0cy5*Z;k60X+Vy~%dO2pD}$V8&yUT09={jWG9w|3^fqPZ zI2g#}R6j{0_zWGdFOkdx7)Vd`cIF}56Y*LsaT(y27MbjLtT;!m^J+NSx*XVcxJa$Q znx87^doj>01vAt~glGGj--~>;5#cg4vTAilLCj(^n@Q>>^j$Pj2*>75mGO-)Ay$9W_8C8hSEVbc zFD+E*H17qEh9+3|F=gg-?;YqGuTPv_KoG>L8p`N~xw-d?y$WzEG}u1QbRC*En4ppi zd+kB*SY_0-xGR=wJJ4@J2kP2G@ypQqfA}RJv%WJWWRdwQEar;82Qr@qAEh ztzevVu+Gr==)-tIWULY|rWS`VeL;o8=0c*sy}RVSl&J4pQes4f<$UUskr&*)Mt zgY7J9NWz?Q*EC$KUYUU-Z{dqh%0Lao+I<1k&)OVlHTHqra>MH_5h4pp5#URwI0GXi zB2~h(e(q@+-Y^=OqF5Wp_Ve8t>oR0_i%6jRLuw04OsQ2Iynw?O5Ac*x!s0y?NtesB zPxAOPoCuapmG#oy)+5Wn?pq=Bn+F#QWlPUQ=k>E55d{VCnkF062N28wdK1U6cQel& zfM~!GMfg!3Uulo98fDp)6em~LKC8tWP9pQp^^_aM#>ad%ETAScgbr*^JEv3ZA;SR@ z0MKjY2U*ZfefExspK2Vx4S?9At$0p6O2zoV?o~cLOx*2<`f&Y?nUR&m+SBq>!`chh z1F3ZaleqlZ?<8V&yIaGPUOvUNhp~ad(e_Z0@AbOdRC-L?GTEPH)VFOu__R%yhzULs z#dX3OzDzs$8a4u(MC2q#Of=ZB_(J!F$$giNhh)*ScnrAJH z8!BkNMvC53)O+j-Fw)hqaW95)+S64G8-rM}`QCjK_448gfWa6c1m@3@$+d$Mx7UMWm0envt8~-T8m@GdnU)FGu={O2rKD6^s$1}b z?wsINteuh>D059C&r;O`TM*Mhj}EGfD}w2Kkxj!PI?vc;=@*4>7!;rtcH$)rRX3U# zCPodQK~HwJ`M@JwsLG{e&F7`_Nuhj3mrm2+M3RtnG<8xm0QKi;BF~1`PoRkqF7g7j zVXar7-cvx=TZ1Q-v2L*zH*YZ2y$zC|v10qPN|@11pffST(oU zg=;5fqxLpLBil1K*)pySIT91oM%tb$Nc!v*g}KJo&KQoOq)ZaO&_P_ptSTfe0=Lk`jJ^$q(7`dJvdXh705sGC8s;KYXli$AM%Y76 zF)eHwhCM&?Js-NK0$>P{Il*VIE7rzp&pISvJ&%jj+M`jXh#lGci8FV2_seIP=0HZg z_uAe|7w&UNXcG0Aw^uk4yF5~Vg&r0nVV_W+#Ktn3mhqyi}b6lZX`)|Xn7-mEyFcnt0ja$hzjm2Jf`AY zfQm~gK49htfdraaBl&ZXCncY0GjRgXjKh8L!ie4$skaVQPi8$Qn2knFmDW>0%TFR5 zK8eJ>X-exZJ7z2%+PyoreV!AS(dOQK+ytOJDkpYjii^*->s-`?c}iEHJ@GsqBV@dtbI=*@)P%){4VTt&x!Rhm za_AkLE)=UPCBk(gM^a1ckl{`>Y==*7yS8}Vv*dpJvWBSZ36_h~&z+q}mp@B|W?D1SX|PoFdYdTUZIAO+RSqsY3qM}g)-Kg14So5TwqF|@FIv4%GDAQ6WV)!~oK4O} zq~7g@C1*ths^N)JU-OcMw_*E;pHy(k*{}vW#qfFe zyeig*ST5JZ&tu0NhOB$PlSEI4JMVfEF8?@O~R%ERNEWMV#eI_!tj zKgWN3N=KX6EPs-t^@d_-vQ+SK>M^j>!Gz3_ZfhPaA8m#&IvKvtNdf*WS1s(Sp|n^> zSKJ%Y9*>cjd#6C@SP%kGwRC$TQs^yDC?&I$S&FO)Yg-M!n^^>-O$FQCv@n=3R zAPW$!JQ?v0AMib}5GUZ?DqXhm=y)lceF_}OWT*Y6sx2uQY{E2?V;|cl!@j=k!jT4x@w!Ej zGxLeHMzHe@20-B&)m%Bt;0+TNTcv}%C@{|#o8Tzq8X4%FVkaeU$8g^bXG?<`#V<;o zS{`mn7s_BhBspPql3;IQ@!h7HX&LN(@MxCvfVETPlT!MOLVYeug|*}1L8CU4c|5yC zPEVich7pra^_2AS6?tSQ;Hpo`NOOwFsTd>}M;yNIb ztb&|FaFs?_H7bt?O6cv&vc6ouiVQdnhv(1>O%yk%CJ&;<+*#h_F)BUiTgVIE`uR@% zRar1SW#d-$@3VRpk`|N2Qv@JuAB0Q*(ZL+6D z;mTWf2)s3^`1(;`I!I~0cEU#tqAx+jstth_YIuj(Fi7a^v5dYUgk8$K7MI!pzI?bC z>Md|1BdivrbCw{>`KS+URrD@s*0_MR&kMuD8@l+3SVJQqV^U?5^a?NFrZcSk&4KrDm`>Tw<$m9V$oM#*r0XlGS805btehR znZ-{chTy%b1984AAM9Nrh!^Q&x5&^k+x)JU!Q@)yCU&yxpEru0Y|fJy=S6;c6aiV zSXm;ku1Nz$eKY`%jpwaCurf+Eb{wp|>$8jSo=ovW zwqo&0MG%EOdm`qNQF~LslAyW)2JeERCe)+%pqFb>vhO{I>Mb9LK`02%aRHbp1;e@K z#FwXSCWzy>oaz2vxHi|O-qW~PAdJ^RuHGXjs831M!tdbG3we!?U(M~rWfun&*zlu` zpkQ4%_qu^~e+e_C76bCEd$yJRkxM>#6oV+)HpG&KMvs7SaGFIQo*d-!_4i~d#aFwi zee)N!AjAz()pz^Qhp&gIHS0pMuGlYLuZU84T@USMTp0yC{G?VsBU5qQmXq`5wDLOo zE5SL*<(r3@?HS5dP>Vg_nc`~W69}yuM7YNEm=*NtsmPxpgQUOpX_y5meHh#PqBG(> z2Miv-4ib{%E1upf@VwR`jb}AgH!c`_!X=;(t@t27S~yi*OMHcsF;c8Ti5Yh`9AD(| zEF*e4#^f7Q15J4J>MafhLctDcp5mauR@YMrnMbM#HFhdk`D~!=(4v!Eo&eYctv`%; zeseYUUjJS`bTx_7!<-$qhzM=U(H&SGdwtz%_G0)s6lJ9O%6Z=Tcc0bI@RZ>^@=kyq zc)cwm?(I~q(+2pvM}&_vm}X}BS)vt`iN3UB%ZLGU-H2!U$cN(3A{)cfs4ffnQ6nW> zKXbb=KpRU%d+=iOafw-=#-2T+nyQJ1)8%w+oY_y(GhWA04L`F}yy3FY14=9>eM$IQ zZ_{2G2o{mH5kbVZxpO|HaxTrAc>qDr%ahnnL2f0%Z*9DuFySNIs#L2nNGO1rx_FgY zb}TV6xSI%a%%9HnZgYcy!$5wa=deORzLXIO5c`+r|PIZNQehI(C~-7{2o> zPwE(VQOmgsd%FaZQD^1Pe-1yrNudC4@O*W3o-L!uI|jLK#|7SiN)EQTa`)l7}hJ+PO7Fk9Zn{{+?tAz z42zdOMkul)f{EwW4s{9=_4wNK1wf%EHob>u&&?k9`CCyr#q$~3UM=nJc<;dr49mfh+7(V_TBbhiAb`4sZBr?Obc1-kZ)> zV)S66BR(ni&uBG;cnVQjX$76iFGve#grmJHC+UUSSkAl&H$AhaacH4nr#J~q@_eLJKB5`87A#jLO9#V#0H&4eS&;2XajhpC!=omXt#3RiGT>2USr zVR>ha#PmKfW6u~KKYf_z%{MxUO4F#4EdB79uw{~!f zJK*$Mq+bcC$n)D8%^Ydjom1Xh3w|EWQGP=)2=8|vcJROi*F1Is5>U5@!=Q)Q>e2lc zCQ{<~9!?cd^5era`6fBOL)O;3!}NFUg|hDnrMw!T(O8C8c1((zITIjk!G@r@Z^>t4 zJhOlXqnej)7Doj|Vkz_q@0-eaY>@b-s%UWD5)d)aZom>a6Yll6kmq>p(`o0}j5$T$ zbdHx=cMQH}q?HSeQ9Z9#Xi`ahY42@lCLz7|DDK6cbUoLxJC@GI$}ZawLyY&TyC{pF z0nDSpE%uTT){1BRJfHZ^^0e0Iwmd5==v*lht7T0?dW)dYXmIy4OPkFc@GF}eEeguy zg2ol0dsaB!#9$7Oy2Yz_<{3f7?wc=~AAH6w$xhJ(PGs{}(@Y){tnVzA(xs8ev|d!u z6y#*SBhV2bKSDk=Zx)0_#Ea?I*mz!=U1+HfIg`i~tOcc&(wH5sQr48Z)t;{{jJi>W zzcJ#DDlSCZ@=!(>21tjxRO#6lG6wmy-*a&Vx$fEl*QR>Xd!anc6mM$7KnppK=&-C` zk0b#0ZIj?tqOTFPF7_)#)j+Rzde1Yw^c6SQwQK5MVFS-0%VOT@Ln$FR?B{RPq<-IL zc}U&I)e;>ZtkapBW*@w1&S<~~w==<8YdIGLMdv*EzKQnFTrHUc`}AotwM$=T8z0cM zFu-9rT~^T%S}ZvO?|bI*AYRY4%VUI$6iW%nL9F?! zP6?Lj@Cj3vz@h+N`TP}d#=mo<~%IwowZItDQyP=jxvL&1#? zMK4E+hy6j%^>ox~o?uLoQj*;R56vR!=D5)CH!7fr4I|0mFjAaYhpt)kC`84*xWs$n zc^C|Zn<*&#q>+2dOJ1{>npcQhmxN}5)7hkUyEasza+Z&7&z4WsZ( zAVa%m<*Gw?V#TzFQ1d!~iGLb+9<)4rLJmwzN>P4Qu>2m#*^%HpT;mc|tK|mkE^6!$ z{v?||!`NX9d7l}F+cU6v<2h2{XKjWWvruB_K1a+&)1Fd{J1J9-0%zjXS1X6DeC)KN z0p2UFvscwTHfZ6C>;_RVCTlIh!`(fNMbL)!>a7Olj1y!pDb17a%Ox;dH@8rJhU%b@ zsI=Yb&EA;qEKs9dB^=1_4B+PF%t%Nqi7?w_f>yh-_+@qN&6QNUhdGZwZt%=$?RVO& zr*bH|F4&Q9fwnJMtt2OS+@2#P&T3i(_Xy{N5z{ZaqiWC6gacV#vU_G)C$LN_4HIKI zKwy;sOhB{0+4D19q4A9)IQXQSKVw-;9H(uMU_Cq)qZ=4-$7SGvxiW_coLrmYdZrl2 z&(K#d+#F%$kz&kcS_w#-ma#zVV;Zl+zO6X~jS5JXgMrsIhYSUCZEkWe-zxQ#uaIQU zSg$`4f_zTd=m+s0jX8tdx}?43SX0R7Y|m+j?y=1v7i}Xv%Ik~Sp5}G46}Wo{nuCx+ zLJWf#1WYd-A>Jh8Y?eOL%tuGi{`_%YV~flCvltet8MX&Any40(OXGTo`moB-`X}Lq z;7=^Ih8b%gciiGq0meJhL^$=mYFThUglkoQ)(9pu&jhT*M)Tmt-l%a+zW`TWZbC$F7m5u z<}w5BiBLjGL}LGG>52&9>~krFPv_ohb4|77Gpl7SCGxEa1QI~Xd)=`0AigR0Ps&aA zSO|upET2b|H;gPx?2{Js$QcCK$F+|lfv$K~?%w$gti^;XXLlO)P;@+TDhvur5o;-A z6bc(uzbbhfOWT9&5KSf58{s>xVjxczU&PefmA!e^W5?peMMwow$0upgnf!>}&8WdJ z%XJ~>$pkgW(|d0?uPc}KVYbK{Up0wirZTPdNML%ING;E4aFcgEX6FU#@p6O1V?E)x zM7FipI?Nt5C;0^I^|NX#f`WMdz*2QwWa)m9{tk|zJ)j`#!3#wc7h_?+jie%e6)zzI zk91@c-pzEhsC<$x-=S@eQ0VgAW3hI7C=@ehYtBJ3Jf<5`c}XRYlA8H=a1d=!qVE}> z6C<$b?Y<7kSOHms^P}eyD$44W5bStKPqwsqO!*js589)`XjMyiBp&F*0p=#yd2uO9 zXTRMyASaL31}5vtsC9P_r-F9Z7UL#f8h})Sq3J42mW}zTBkY0eJv#7}kb`YPW1|WW zgKlL)F`b}Z_Os@&e882HD9?vLnjdFF%yzmm2^%+;>BKPs`EB|Zlc4;f-Q`L26_AHS z^o&|ReGi6ETkjrEk%~A(-3J zPs^+7AeD{0_cDh-NQoP2Bp96ok@C4RUvU$_Td)`HmJf?Gy`IMt;TXJon6F6VDcxX1 z-S#|TdnS4!8Hl<;5S$(H1aq$_FK(Dd$4;)=&QczXKv+ZQR&#M0p*)-g4W*7yX~^)QJHD2No9O31|6p)oY!xcrI z83J$&IcTxBTFJK$Afhi3b>%SW(o-klRZiDi8X1^nyy@ov9@tOJF(I?91FS?x8Lv}% zy{KoEU=Rn*Y$q-i_)l#XZ>Gh>uSYJ&ynoK;I?4}&z$#+i6y}VDj0>etW+Rq)x4hJbT0I`F>9%xIb4ncWvq$z1qyc$m#tMs)g z%kD7h2{QV|_$+c8BxNPM#OJIoaxD8CguKjjAaOm3OP;ltk0uO6(Zk`|U^7(c&bMm8 z1CW9;O%c>(*YchtpoTeiur$i-3ylLlfS5ao!b zo8wmTWU&(NbiH17z1B&4UTen(&S4xJ@6IlLInK`3Uotnls+%-mbs;HtJdgh?L5@)Y#*O7 z&&x->%G1P{Zpp||Qa!PwC6lop&QALaUQ;0%96%oC-kMm#l&X*^Qq(X_Z?Nloh$pAj zdbUtv*=gQQ7La;T9`54V(H4a&cl1_O&4h6|PK;`09JiSC2|aW~lw&dn6057o-PSy$ zJn|K{&Ia|wq!2EN9Cf(Y^rUEcoAo5(!LTh3Pq8oZPEnLJ+=F*|=XMU)UTaAtcSKhe z?WKid%+O-~VCYqw6lggN!{n%g%6lUSdD_oh)`_!uq5StAo(rnU@o_VysuyI*nAVCR zHLx3qUcp)%HJu$czs>q-s^W6{21~~dN817S<^1)T}JJ$VwrX(z3-2IVzFjMb>YbWcd8ut>b84B|;~7Qi^Vof)SlgM_%j617gMkrwSD zn?4>h>`iToGXvYu8yhBJk7*;ZHW!q7#&GB6=IKL>+E|T2PfNS8URXlVgZ6@~dxo;q z`Ft<#$(ttN4pmVVE+YeHK|bFr*L~4~TxH_L1wyxkhYeE2+-yiO+UBrVMjEx(kt8rl zRH+*T`@m0+8WxP>Qv~!G0>J{b-6*VFE$g0`ZX9>pIUP}x!(Cp z!^0LLUuen+LpDA=HBx%ln?9IvE@*H$43|(fB*4;K4^z=(o?3+0AfcQdyXp!1vx`3U zJzlyhdDRhbmKdmF^Ttju0#S`>8T6Uuqzl*ypA{)BbQdZ%kae60MX=AM%lke4TrVQFNu zf@CPmW}Iw7rU4vpnX^>OQ^X3Q(8mBB!h35ka9PcodTYUnHbi?+A9Dey30ABAgqlP+RJYwCbI%?$)CLBdH4DZVat7ZPUy+gv5ZtK~#_6 z%8a`XO;iy=HRC5<+eQV;(Q(vB$mX|MZ%y-OEI)?$0R1%f*BI+3& zgC}X}vb>pF`4)n(e)@rjMB2W}T(=kGj?DVN5aTTchZ!Bglr^Imn1@@7SX1gq)~b&X z>>f|QKn+@4!s>B{oUK!?dW2j89(D;JB)apkL<7$Jgj}S30@d@Cs$MTmQa#bJw6#Xd zlA|l!4JeD%a6{TqHk5F_0n>-MuinPAF#%sT<|1C=d0RZZZl}$nXE{@%OUnM9ZHz*! zB9uZexT;m)3FQ!)uC>6M*fK&i*Vrxf02MC_s@e)nW)d&HGJ zB%k20N3up{MfB~#NXK0#uCDT#a!KR6ihAaZ+`ithx5e2Z0DW>3Y=LDStG3xlH^9Cm z&$@l#I;JG>m=pH}SnJCmnn~jAa56?H28D2umoLo^1W0L0N}d!-EZ*UC>Xz{E>>`vA5J*JJ&&_R~4AeQC?VSUSnO=Ef(K!@XuPv_!=G^DoJSY7Ozt;2=6 z)?j*CNFM3c zQCw@Q#KWE`2)MHzV#pR!&w2`a-IcMk14$(V_;#WB9Td4^Hn1+eMZsE+-O`>&OAE;B zz9W8PU5FL2b>cnH;&B8u+3(_D$yHHuy>lp%7RrYThFMlbgB zi$zyJ>I+Sr=P$-&PO6+y*zLxAV^Mfb&ns+joW#|uiUN3yP6)+zND)Hns`nm9 zETApXhIZ{k*N6dpt($-y2f$tk6yg4+wizR;Kyo_XhDf3?6GkJV+zzZ9A(0mt-h&yj ze)M3%hpmkFA>G2|GhiebYiPAF+9BqZ*Ha}KyD{*D=zyp6)2ZQ?_r8f7aUI4bsfHBuLCfi3Q!zfm_p)R?KeOW&xanP@k?HzLlPOI2?@};0DcMV)6cHRuNgzPX-W?_~|evg*+ z86|JVG9Ghsy}Yck6Bq299uM{kHKtvsSk0dCNGouqgoW{Tl4lvm?knrfbdf@#B9KK|$>FCc2*nyz zc580nJ;9CDJ}5-0!slDj57vtw*{*Mjt-{EReu~0BL#07JsH?poGE2-Hqtmh&Kr;;M zw>6JR)%&(V1|qV%R1lO(<_<+0ii}y+E#nobTJ+7tToH~0`AFsC$;)F2+A zmE1F?NmSeA3l3762am6#D}xxgtYkxT6Dem>pG(ZWa^2X45?>AoTrG632c8{ok>aHX zB0bq<#Z_1Z+&l5XVN;G{6RvHCt$KU2B#!o2!BmN_RUXQ%;9vncu<9VS$QxICeB4|r zM*9F*F%^sy8Q;Uo7o5&l8+nrfXL*=wXv980XANh~sx#X&UsRg)E7eyKp(6<#7-bK) zUh)C0RhW~C#(l4&gFa>PSajcudv&hwomt_#H)>s46OAXiET)7b<@jtxsZ?53Tsn{T z1=e$+NWkPbL^_W(Bv9B<*kMFHmZ4Z!tS4I+eE6tf-Io9qV3ca&Wb>Z;oNK;kut{V9 zicb5ULjZ)U_iJ+K$77=}h14$%1MGzYxD}^nwX?a?xzpcY`3XxlwK=*iZG!E2 z`R^GaJZIc8KfD6(*VMwyz5y924t$+64C;GhJX?`535hnOq9 z5x^ki7g;ab;(iXH{=%Rw7+i+HyOvl~2g5k@^F&4o^0+4WpnWdNI#$-fP`ufb?LrH@(dOb%5vwZt( zCc#RvV~>(W@SdcIKUbjVmo=-Vi#;Z%ERxBao3&hm(~dlGMVt8!4kym}jdf16wT9&S zE2dzVYN$OAsW{xZ_Y!zsJ%n!Jv`8(beaBBz)@NL>$;BdMrcR2+&~nS4F~Z_$KF#JK zO}nV~!X-~dhsru0=z7NbJ)5q#r{&z4eUW>mh+fxsw(r%R2HVz5d!Vaxl-Wh<9 zhcO@=FV?*~8XgpG_1a^T9O#8;=y@Ybt#tmv=35&6nJ-eb&KUZgbBLRuMDZA0=Z7Jhsv^CFuWYy zimK2;=jth~G>*z+>WmT1 zfOW+8xCgh(4`$J#qw9I)JH2gIXw@U3)77`rq4jefxd4Sc-na9NQL|5QrGk*Sr=OG6 zyxfaGai1Os^NQVk@F@=b449KkGs! zBLSS}Z^XBy5{&^zw&4)E2Lz(4_=fK;&}c9?56+xO=mwd+jozCF3;T$b$fSq{uwOHK z9L9`B&4u{Zk}qSvyQPvtPd8`mD}#^|Pq&Us!YQLl(qN0CX<~oHlOfy|B&K*fOiu~2 zHy_Ii)uWeY#HUFt#`aJw`Smynv0#%V7BM+#?=9Bw8r(HXGOjg9IAM$(z+U<_PR{@l8sF$)Gi?N3jSQ7=-3>T1NDCq4)&{_w=2jUTyE ziVD;$^-vAo(2Zlbek5r9M~C-FzHjIW8P8UU)_%0phAaq0*@XNHs3!P-}Cao~vw z`F;+skL*vbFtUUaumVrgp8%nL*>P;bg zwl}!%2+DY$n|N)Yw0q*+3PltYfH+$|EjogXE^b|Y8OtsDlC0sdH3vh1je&Jr>K4<6SyrB2TYFT$G%l~EssFJ9g-R#x?0NxTkp_g2CHiwUrs zdh9zMLNPj_gmfd4wFDe7wKx2To(tGx4aT0$d7)dnFCADsyUq34z~f&MDOrUCjP-|a zoYifGeUsGkyfpV4%;tNiBjO)8ah^=mq4|jq0a#rP@9((I*KX;9>={=oZqkk(5rM`a5EP46jJUSQ)ZAgO_)u z=!<0FYqq*_{E#ChA0p!ojxuYLQTjC-I2=4|Lw;Dhnk52VF9}j-O4Ao8;;8Ud8$-)0 zFKq1RRxe&n_YJ48uw>B@qXg7ZKTx`%9Pta#q=EUFL&CKtHwyWxgDhAv_Ju)KGjsS+ zNs6U*)q_+b30l&Sxo?s9r%3c2@5^3kf9Ruf+f*$>$-pyD9QYaQa>1NoKzOYsfzIX$ zjaE7_YUeS-o(r)Fy7w#VQQTYHK1sMZ*vdE3TSji4Mbvz-C8;D#oUM8Wiucm-vYp>ETxbaqJ_LUzw-hjs zR}(~XdH3m++9Nv0m+Jj$dA;N3*_~e7wI^(|hpdN2ueF_RXUe;!-Ht3q0wYDxBg&*O zdH5+4`HZVWT{OlF%oiKTA&lhU?A-khI>X+&ZL2?k5!%q=OAUY7;uAR#>@zDF#sEH! z-Y*nvVwT9?nxY?;=Z>?{)d=m#6S2HU1eXmlm;N3DqDEBM*yHn~!rWtF+o~ti9g93y zkNuJFVnDvVDel~|<6uU#I_0csTd~@DXxO%xLkd|SZVuXSkgb$aOCYPGdtvT4J13u2 zm20h;t)f+}nBP6Y@b{)zVIf0X4UhuXv?^ps56%$yG#t`-y%A*P{pz z5k;PP__`5sPI?(KLaOY~rl4TiQVJ&?-3FjxzHBw@{wEJU>P%E=n;of#tm#Hds&ZY!oMP48yDB%2xwp zx`sa4=Z|@qQ)0;NygP{attA=4!xGYEkG+C@P@3q)nWy6Gw1aJ~j5oyfnzzWBFSlJ? zen}-g!&i$FK(321ZdbhJi4DBc7>N^hzUP{^UJu%uI2|x@x~~QE^Lld5o+NMWv8NbWa^>5SK;zm!q*~N%8nAH8%!qg~a~|6hz|3h@{T9PLowCl_I*%#vXHM}rDt?M& zn-xnAA=B>#dShbd=E8Hu#u>o(5_iGQi5vY>EdCj0nes*^ft)@D z2DL4$-VC0T$$oW`v;bv}A?M(8^sY^pw99H4r&$jCy`CKuwW_3W@xJS@D+)kh#sVBo zzG9T<#tP#hk;(AK%P%1xD0bFV?64g@tjj8Hf?=}9_kgq{@^v@6Br)JQHxwiW6dVm; zcKN|PM8riny0`d(EyzS7QfL}Hisfd6_!ab(m5%4uh;k=hs&GpK;oRs+5Ud`0UEb7o z9b^HJBQx&^RAfZRi^lfzRvuH*e$GT27C3TeMDgH7%?7UV{cJ;P>6l%6EMrBl3{+m! z!>n;q^6tCHzK2KcIYVxb)jb#RTP~lmHWww1FPRksfO56E*Sl)QhJ0lwMp5`lNWYGI z+z%+Dit)G@u=dk>DH$2!6R`}uO<7zS#znRgD( zlZq)Go86ZN5?LF8PKGa>D>~-Au@ikPq4SP^=9l!Ud5(mS2$7IXxz2ar=tAb?t*nse zCSGAJqC4omxNpfAyidtIe%p+N@aLqr+c2Xw@z&w2GNuz_4@}Os08NncR7dkwpABjy zI7XrerLxgQu~fHpI*ROKtUvN`_W>G_dx(P!qP9;%PuZ>afb#p0W zqF=rD;!^VxHF=7vA0h3XaI{(<`cnrd6&CSmL&PzzyWYoGuOGaiHRTDi5QWTS-=b-J zNl0VF7x5BrRrK!7;1j{#x7JEnlqm`?5dg5r&C|n@Ji#HPnPd@?bm$7xq;B*z2{1&J zNWOQ(RoO5isEMs^!}V~FuIi#&be{3gLHdD5q2ruvryRb?LJ-T$-OfssBY>Mpn0>)w zqZoRb^YxMuqe+iV^#Kh+@8$4qazcd84jt=s!c*xBCs0*BrsPR zxY;!_qPh4g3{<@q?X|}o-R9$5p+m+;%F*ucMk!gH8=5WX;OJ!`qgNN%65cbm%Tj8y zP4?DYUhW^wvwOS#e)G!v5Kwwoamom!R`Ul=SR)Q}Zr7fHQid?R zPGArMpRytNluc^Bdp9kSI7G~jxsvz7$6gr;ZJ)%l$G$*@w#{87U8@NX+q($@w0qsN zss-=CQ|j5dVWDvpUd`7&mf%QdfZfJcg62&kdSHl109#5n)fCa07BrXQ#}X_QammSo zu~ERNI(LZ@2z-|;>tc4+kgl!u^cH!_mwG0quhOMvrh6eReNp5b&ydD9ZuKb&4-Zj0 zVhzVR!(%QYN{)VooHq6Qa>}JTp6HxSSG|MKnB)MRN~i ze)KJz&mcT;nfQ8Zqij|LOB+n(K_qjqixME0tS>Asq(1aH7gVt*xlj?!Y(>x-7sxH-s_2~06%_!R6_NtMdx|9 zr`SZd-$5Gr`ST+R;Ttl zKBdE-k(kCG*<&7P#iI%BTZcZ}U?P>oKvrok#mwZ<=?0F|O3D#RP48y7MmPgmEi(`V z3A4)=+Smr3WR^&e;T>K-2*kDbaIj>}Dg-mf3h+jzu-I%+r9i6A!{O0#s6yv?k;SbL z#bElBYePirsI2e+c2~c8jKo!m0yrRP3qnWG5R3w}B&=f!fUiL;!g zvvALBd|VX*9Te8RqGF>{^AQ*@ms>j0)QP+|9Q)h}5YDNz7eLO}@AQ_N9kiOtxT&1; z%HZ)6rC@j$=gQ)Sv~juI$v=IEZLLo;lCdD1ik`(|Dsy%fvxfz{7CDnxq`&vfEY9$-SWr7ew zSYWPIB5dB{CTxdz3(2=P6pGaqFL!_`FqaX!Cr!q)_f^|?tPQUWsz6Q3sCsz`)U~gI zFZ#PToRL&H{qE$z78zxO!|JyhYjB8!^@)z^d;KBM_H%!vJFe!Is1(zn4FSsJae|; z`;?D-#^m-5)2yfL0l{v)Tt06Fp_v#~G+=@H2?Q4og#|)ZKgSi_O*jg@aJHE10Eaw{ zH7??a!N$k6>yV2~j!L~)X{YIIPc4cdvxgnF-YT+FM?|>68zlqG%fNS$WFS2SD(-Ru z1Fp?mV!{T{?1t!7_~>|b5;xrEiZR<{jMam2pA2$flD#52=iUONi|VX~B2;y$8}m2; z;xGhyg>Ie%@*WA%#ios0SUvzubQCt>Y?>1YzLk4#IAAtZeA-9uXvbha>dt3RIllkyFNuiyBQvv$rCznXC>`^3M6EPB7#*0cL!0JXP8YZJz04a6ia13pdvz2Z-^0>P99?EG% zm))v~SzzIN{1Av=E-3Dt60^ioy=7{QI`=2=m}USwQ?^hOr`h~Cuv3=%tsILQdCklV zlPPNtnDnal*KK@9?{U0=c^>k@+*I}@KuPc!9Cd<|8Ye04xKH&SBAJno_jpuOOV|bi z^^O9QzO8*~mtwtK^Wu4_tBt*kIceC|6Nav%UBzBK&}u(&eRC(`Y^KFH=WTag zijO=+>#T+i-Rq?wan&(92j@NANtfR%K~3>@aW8xP+81TAhe;{$HW;x9!i`8;u7}-8_AWS|6L&DZjHb34jI; z*8;PGIxl;WPVZSMR>bWJo3?DJI&8->xF6IEw5U5ja5LoemH3dN64pe?v z4sDNH(}l|FT`4qI5*}Bhl(W-gOZ6wuEc&>|E-s+EeUE&g3{+SO_aXR}C|T8$F^n#F z>KwVth@F@CjHbN2j=N zGFPSFA;R!lCxlA24ZY>pF#RfN^bYEzYJ-c#xX~V!(1`ewS3PpL1lX7Qb zWr^m?yWDv%l;$9Ge57#8pX@QBd+{P1<~d>_ABxtNCAnv=1aA#K!%}*gnAVzefJ+2W zJVxW5oI8vF?M{sQaIe`kKo3eBrak1q;U>i%{*~~vo#l-x9zV`?eyxNH_OdmAla^PR z-myyYW171(rh8`DNZ+%Vf5+*}QZmtDuvCq~r&$^96JW0;QKER(1qIVF3eh_8P9^LV zMc3hG$UB25soH2t^PhD{AA?kz%cy((%z0VED;Tqw50nz2ot=%{ z&-TP6JlUV#i+K_1RO;L>L8%_0#Jjv|Wao~@mCiMLg0k0#Fh7I!^tNiW44`RYO#_gT z4?Sven@Ql}tV;!ZLOguA_ne^iQ%La{te=$=Y9?Hjj9RE3ty7~lSSF1!*@j+^^YW}Q zvw9rwT133Wx+mb&Z;WKe=OKY~N%fpGKRmO`E}VRtQc={^FE;1QwR+s~*f2vb(dLX# zK?Nf>;m%?t5g%EF@7f`!G=Q=#xjf{jfgs@=NO+r(wbX;vRdr--uk%>V7o@JRvqON{As`U_~Inx>l7kdH^ZU z5*Oey%5r@CoxkFjdx$P5Yj5@tE6}|L7cXogk(vRSw#h-6qb4VNl5V`rf3~)oGsddo zfu$_i;t3-pPD@B|S;g<#DM3@=+}pWb|DDHV^RichQhXgWw#mesVeWp#^ZDB)Z)iFbwvJn%5=J<}kI zS3H-bp@=zQXH*uVK=p14!;f!x>tQ_<&_|-nPu9m9 zkmzfwMY(0ToR>Z6VewwlU5mqAkYRL4A7wt87Jq85OERj( zR4PViPRzps2jzSwNHUS?F0XnOoUC3zmCf-?>QUn3_PWrl4&ZSfiL}6_QF46gQ4S7C z4ho{)M=u?67183>-@;uE0K21km!_f(+uq3Q1`LCI`EW3Oc&7L1PF21v>wp|l;qYP8 zdyMGO>y6F4$aI^l={;zv*vbG%zXVm`uW7XOrpd)#=7``mYN?+~opwKmL}?_?6-5=r zd1{28f}+m=4rnxIOytxt&s{Z|#g|_YDRgb%AA!}le4E;@U>hmw$uKGc+_>v+ z87Lt(9f;uNZ67=cTeX4n^^74EMAC+o@MHtWeKe?Ekn3p}f-Uq&V&4+TTVW})P>Y(J z71di218{LgUnQ-VaCsh5-5Y=?@bbli+=wFC=JMcqpyic3-_FUJL?A%K+YZLp5BY^B zD-^gskQYhRMcl@iz4n-d(upi#NImAY0-%Db`CJ9zFG&TaR9w?^%jHgr4Xx$F4WenL z5u@Ci(~;NZ&AE@Btc%L0r1&$6vzt`VgFaqJFNs!hd2pm@fPLuxLYJD*p1Bv2-cUTC z(KLC&m7px-Z@``99)Ms{hB{9Nb;zH)Z4Cti+)ebT2 zyLlP6`0!RiSno5%o6TC3Svtm;t30$vJzMM{5^e6tm}~75Raa+Qgc(aWL$qR@(xSEHu=Uc&6Jufcssn?6J*I?6*ku_=@G6f6b_U@c#Cd)(k zy{v&XL@b(%6(7@a$q7f$Vx8i~zEsb5! zaK6Ec6aM^#=!^{JgH!aESId(fXA0zlo!fnN8W_>qOZ6IplJ5UiTT3|Ja`F^Bbx?C^DL5-U|M?2H?ugo0MRhY)_gE# z&S2sZTX>$Cpl!7X2VD=~wE~RKQ>|D_OP4z4i*9)s8#Qik=0+hLQD7#|$8Ib0vAqK~ zN>TD*bO2z)r4tolm7F(!ZVf99Zw@VqXl-`eq9qFvbzF=tum^OpyRCPGjrH2qZeDC} zt=2g>t>-UU=>YS%OztqNj+~9`JK*90DHSu{-2{6DtN}x_1pzY051>H1E?aY#jT{7vSMCt%UF>)bluyQE;C5ycDemOlnS+v7?W4+s>?K1|S-^u< z{KPZHoDdv9PpWLzq@S!Gx0xeRjcthZgbUst-+}k&P=TMXm2E&$7VcO+EF3^` zA!<6WJJGaTwa|D(z0Q#_Pr(oPm#_f0s*E4PZe^=Vb3Fj%0l<4awJ~T!TTFZlZpveK zps4*REYZ(g^%iw@8i_1`D)X3Wc)){L0c%FziScD(l@VZ|_bqDSBB0>py2&IojB-mf z&wxO`)^vM-Sb+Wr^ZF@q7OM z9a*{;gK0U&3kSu+B?kf7r$mKr-V(^XFa7y~ps)mOI<{WjFKPL3zUUI8tcZ>}AVyZL z0=!Pts6=DlGaBZ5P*v_uAxcl0Ri{~;#!y2uEZ<^?*gjl&v-_M9 zA5j3v1?LU*OB+{wgOFhva|w3V6HhOnkTNLG48@dREWas*$z)Q02Vk^G8IN>(>r#m0 zO6@2by(^RQbm!GQ`jdkbXQKt6zT1{3meG~grq(IJK@+dD>^(n$Eq)sroDM+t40|Pv z^&q0%bv7~Ub>$n6lWWVRBkUfsQ^f6jN?J7F(?smFL5+Jwr+L$U(>#Pm3hxA1`e`|a z%}?LDDoMz~& z5I#}yT|J4Qq-zVfcM3kyQ|~1Nj#RvIU&qUSHz)n#Jv!tH#1?Var=b)gh#s4FO3(c; zpGDaeVxlN4hR<|&p8!fg@FTgi!!92jq^AR6Zu-D&w6aMINDBh?a=o|Zh)R#JC9)T@ zja^U5ovl+7sr2eZ_|1SQ?16NeBfQ~?$1_C)x4G?+DXT#Va88tz6-l=WEHEZ(wWxO_ zq|^G?jRa@_@6n-q)()g@q@LWsV6TmsNXK2O7UV$@)HvvYFDQy?z&K_*qM3K{WK+|N z3I!lQyWZdxu;mKj?28C}<WpnY)$EMDYa|EdLS}v;mB`xkMxGOfD;}qOeRaW ze|wjI#`y6+351@QuF7@-iX4X=^kt9pv1SA!?ue<+(IDy&5-t;qRlbwAvq5&~aTGS3 zVVCnZ5>D#j_;}3NEdh#JL{nL5U37?5!l)^qG*=5!*t4nQdc#m4?{d#Sva$T_fP(cq{Ajz|J~*4oDMN^4z-|({su6?giW-ps~iRRMy$ z#Tu;NxOJL0XwVyA;v93gecA!*{RRQDCw7N$e+f(mr_v9_Up60Wadwbw zBWII8K(0`Ip^~MfQt{}imW*$I3+yw{b0VD!cs*#Qw)cWcm5JG8`t}9-Yx6Tki(|%D z7OTvr<=N_dl7ffAuf`|}sNf9%ok-XC`gN3S80eL zE#Bd5F8G^mak#L7RCAZPb$%6d*_aEa_NX_O)C8$h@N`b{iR3}cjb>fE>`eqtW`kyV z&n3;DUimxDwx)nrQgn4h20*=ngv?d8*lz_utZs1Mn__P3snE!XC4YtfP9X6Ws3%?v zKui>cPi+u!h*6r7Wj0XO`tfo-I-@B^*NY!2FPd8Sn*n!-QV(rpnjXFA zieVx&O;EcjaPLWiCHtYNC&yCJ9mve=oCzMzo+1tH8%LN-PUsGJf_jmU^HhOR*agi~ zC|2-1bMc{rw^z$pDReezRrt$B4;Xl?d*<*+Tb^yav;sfC?7Du*PO`c1P6#pp_2|m$ z1XXsBjP=@ZS@T62D?a1cOx|{Wpix*44q2e7ILlocsP#d`nY!-CMF`nG?CY`6+t(!8 zfyU_CM2Z@BBo^|vCLJ4AF5;Katf3~Y>(S)Y0NV+q9NBV~Op^FKH8`xz0F>8m9geJ( z-$J9$;4jzf2$3u<2KykK7t!C!LualE2wZYF67MT~kKGDnFkl~J7e-~Ok#Qxoc-96o z_@cizJy5LXmHFtH<>(TG)Dq}HaT|I{JSr|;uFLQgX_CaJx;~Z)Y(!mDb-;Nkug5ep z;uyhLb|pb3mzroetR_TRPQ0I|+663@apv{#IU(8ss;bzZ$svFg`lKE|9NmCx>?sy1 zhRc02{G8&g0G2Wd&Em5MN9B?$6fv{SXf*4k1C@l41W8;ai*Wb&4aXFpI5Wt`Ndw1( zi^j59nWRCMvklIXMtQyx)q7ACSYq}>Zn*hRbng1x}%}-BLXyHmiCJHWh zJP_oytY^1jPj_Bwc681vu8sqZxDgwDOYL(6P&7IW!_>unt7wo{IUysr9Z{!I^h)hj zqs^EDqN1z-&FYDkYhfA=-{a@tM(EjAHV_OH!L?+xFC{S4a}Pa{ltxt)VGs;<1G{~a zb@BXRYNVw)wK%M^#={|;fJn}3Jg87!?364FUZj_p!gJW-_6%0s@k-w0V!`->gZ7KT zg4g<_65(On_|y>rLn?75in13^L{dM1JJVwl0*gHNp<+9hUDd|?!zIawhuyQEK!uGQ zoW(wQXd*0HT$rMv1;odZWm$6MbB}<6wK|N0E4VsPx=>;hYb+rjzOGl6n9X_WComQV z9c_$nP2g;RE>pbj=K%>@^-rP#J7$9x3H{hcJfOo_68QqsI02a{3yd)zCZg=8*jPV( zP1Uz2Vy>1@`XM@pXGh28Vd7)Htk*rmp$_^?sFY+g-ZRECndeD1@NCrN+2NakyG)Tw zS&Fkf7*rgW&Fgmr6lG`~=$jS8@Z>QC)KnUZmpTwMRK5j^>X4FZxQ6#Kf+}9U?%^8M zfz2wJs;q!FN(*2@{gw+LaLl;+UgN9BW|O>k=kyA@=n}A)2KQ+P#470yO>Pj}Yn$3q zmTHrC*)={dYAgAO8~b5*T@UO+Bfixzwi5|!sKarIgEqBA2cTSEjwU6=yIBf?;epD7 z4Nzp)(7u(jWhw=@Cv%*c9Km#)HWyL~mu^T&WO$v&f!J#$h=sr8_B;V9iC(sd_&rRE z6GFDBK&K2ogeUXZJXQjEtoFeJ?{9B=-(vxfl00{oH^;DiZj$Od*T?VAQZ#)rOYLH$ z<+*04Q_N+9n3D%M6{QlQE%rV1dRzeuIqi_wxtI^w=LU_Euz*D; zPyibXT-Rc zEoui_A`r1kv{kNM#3lq7kczFE!|(9kl1CSlQ!a02D+=w<098P$zZ>eH%9S(~C!rMX z7i9%BxAJa^HUR9E_Ax@X>QBMZgZy&=I|kIopd4AWl?yI?e9QZ10&$O6gOlGRn?EPZ zPr=FOfIvG& z?4v@nM+F6Jwl=HfC$a$P`KF#d=v|!xXAp`R3`G;bY209mOs+nBwveX9SqA;WUn4Jb ztU72F%c^sp3mF8fflC~ZCtTaS;Oa`PLMl0xHCzMfl+ljoqq@Um3`IJ{A;s^Ru{G)C zfJY|ez%pnTc^zaQf}Ten+mnNBqY-|g-ktKehdKp1Uf~n9$0ToAyphowEWzriU^^ds z8U(9dUVEe%aRB+X-1d!d`Cf?cA?$*Jmz{J)5a)z}_%^5+K({?@Bum9z{wcZ47)YF` zC=IMvt`Apzp1h%n=GXf;r9@`LB*0M1^J$-bN=`pVg@7VY&BR+_4wUNhb==X;o<<0z zvOA||@BGPbxrUhn^cfUbr7v43KHTL8%h&6Vl060TY6BQBxu#nJkJct;h4+OBi6~Tn zw+B6r2uSKAwP+EJyE$bwvz%z`*>i}VjP>cMCzw^S(P(@5*dlA_6*|>~W9PL6>4QY) zQq*`1IPhAEH*+*zYR&5g-IGbbB(8T!sZS(YQk}zs0W5|DTCn;6mI^hsU!dV*F36l4 z2Vx{sQ7DHeK4S(RWrMS5Z;GzL)y)m5RG`oj37EJ2m?mu&!ct4HBGFZ zUhLbA!X_68wWBy0aASYY_0Xz9i&FHUJv)`DLNubGc+qo-C$OYBKPfr7sD^buX*%tN3do{2d~zBx-}7l>3-Oy##piMu)0Y+4V@APl$#p#;utjrZ~S8b3d`W@k!ywntSf7Lm2nCyU2Mn+`$D2cSRi zq3v^yg^N1|DG!;|0evczykMc8C_mx`!IU=ukU6u^pr5j{&+)+s^@%d)dr56x7(j1x z`^3d0i@3=!z?qxWO{U>s1nOySM)Tuw+O(1471%ZadD4n9xw)y&ND~f3!NhcWICLDT zfUi^pTOWd(iw+1B!fL(|F)ODG&nu7Kb&1z8jjvsLTn z%@CtA9gvqNtvKf*aquZT{~RQ=TwyqjB=OY?uQGgMuimhAn%jeC>Zh;ud7%jw3ugya zHZ>>tWC7S^ zR|OMLuhC&JiCHR8*cVm(;zA!;K)n^Y=q$LUnxSAB0NmAwk(DJ+%pC@%?g)0=#P2o1 ztx4<^Gd}Et^&oM(b#z;~YB6&yTQBfXqz=}(1|G7#se!I3+5y_c4yzH<`C zE?Y=}hiG+cXdJ&+!GfEamoK+Hdq@>@^l`LENvm$#l!J-IPOC?}Vt}&*`_6YkpVCV} zXUJpsND_3W_h!2UyatejM@qo6UcfNY%d2~&q(y7s@it17irfd1oJ7Cf z2D!p3K$CH!K`jgLt%irDh;H5HBBV{pZhV`~sgJ5SPFPR`^1=+7$@A=|?*%lCoG)oX z&1@P5=&J%srAJ5Il8=J5@ZF-FE7r5JMgMBJ1n@ar6t}Fs(g@}zVMfpv<$9e2#C}w| zYO=15%P-l(MNXc zEKU{$z&e*aVpp`;MYbz;K*WGE<*%TZr+@AVc?%-a))<8h6lUPVk zC)PWN42$-;Cd}vKDcn;<+HXppU4!l|UAVx`khQw-ObjofXln)5!ENUPD~ifya9Z zG$Hn?IvHK-;kW!g#|-B@eq_;I@)&Mw8!R0|U>NR|>OFr>nzw6D^Z*P~aIvaI#Gs_x zw)KJ`8m&%hZ^l|Hbwj<6(36T^jr0XsmOowgd{8xxCb6;1>CU&hM-)VNtsXBj@D_y)4{hV}6;ck7Z>z>>3hO`XO#rFH_Tl3V5c& zV7(5s?bO&Wsk-rrM4dL@JiXkL4IryVqh&Qi!h!99))PV{90hV9#I5@bQ(mA4)HXuB@Ej*2?0Q#Hc+8hfL5;PiTS@L#o%a zGQhM!Ssn+u`1@Y*&?v~e5EeCydy4rS1%V3yQrkv}O*@Ao5BOeD!6NXd0P%C+NZave zFS=W_3g;@t5M#(<9#VORtGkXP9H`~-i*?O6jt7~3{fVP`R7ZAl` zYTj60?}P%TMc~`3wMK3ilH@QQ)JMh^^63c z3qRi%j$Cgmc919mi#MO-$YKV?oZ?8s@tQ5(G>ew9JEy`%SH(3iK+`cz&C^f`cRmeh zHKS#gQ?zR4L&ee4@JB+sNUKx|0NFx}3mw%ewM0BVH}P5kULs&#+-Sq^(c`4A-W;E` zq?sRkay>56XOXa;FMy#XvN5jU4^0Kf&X=DOr1vR7y=9obp2TE|b;=T0rjirbB55~n z8DbDuNN36IT_t^)toiIw1W~{8;=BQvL^$m<;Uo@@;I{90lq0Jji4?o~^^+OLeYS;^_24M8jb%;-r!gnO zdavI&Yq`AMjclL*0M=tyyM^(WcgI$<%-~G~+DiTGF`Ga<1b4GGF1I3idJD3NY_)R@ znq;A54jXe?D!!t$f1qv_RWjiPM zS<_>kJEk_nmhYug%kUsu0(U0asP$pWlF1`wHR!n9GWp`%Wab zw6V}TKxY_Q#OEC}u+w3Pyj)h&!|+1MvZlN^X5Od=EbInG@C6kx%S-nYBEgoYtMBof zXdZj=-Vt8~uOtl}UZKd7dW)>qVa!rMf$?5@8AA@wGY>MpeD5lJ%#*db+4N=YW?PF! z<33InS}8MV7I^0d0kaDxiT&E8p*i{|vVd~cR=0^ijEEP9q!zr!cUS!4r^=85j5_!!qtl()6MLIbl@FfBn7*QO&YV^)d|of1Cxo6hl8v}wze9ZJ z8jvz`fb=w>9rAG-z#ksY$}xSD4@qheIsIPj67MkRWww|$UqR!Ns2Yx=U6b%jp_6(X zXS9#rESuhIQRz-62AhdJ(@81ow`B`1XtYf3U=1{Ycvr`4E^Iy))1-QuQma+4FN3ys zE4ThvF8t@8|Mfq`@sEG|*W~}g{{E?qfB)ZlWGkmh`ICKT1BhVbLi?JYOADqzW@fx= z#Ryr$s@-r-%4S1$M$(s4&#trv7sN*n;LAfll?(9mdb4ZC_QJU5Rq^!0WpL!D7XHHS zbXmKCg~)Mk!3OMF7&;@z_bhWn_hcT{I*+V8cKBL3ea>9nKlAsDqe4tK1?=5eHe|@W zByd7;SkW4Rsqx1_paQj)4vB^#W|Pd;H|M&7ZTdQs7NO?jOTW|k!{12_-|0QqtMHwt z`4Yq7!K8!5TI_p1nZ30Ea`pOP&UKH{Q1Fy)A*9X)bqmz&R>6$l(wBRE<~Tez`-$xY zRgJ={8G04&A>ijCUdS6NA=v@dp6}u0MG1~(HNLw6XSoViT@ECHNBVl*_7QgZ$LD@` z{KoB@`}x63&qwe0(Xb@LHZfER_7o}t$>?E|V0LOQ3*&)0HR?;e1|un~y>sv+fJR0U zOCpE(_~7r}KzPV6TkI#sIzv0)ji>LqSfWf`o`bO0u7r_!N7ll#y2`rp!s{}5ybif> zhK~o}-eev}A$u0zeSPxxQ_~ZuvbXeeh2UxC>8gDCV!!Y<`t}Xg ztcNZ;QOYL2KAm=DjwG@^2 zxZJ5I+wq0De?H0Gu}4H?id=fa{rj@w+I+@W5LAo)uBk9ig5Y@K9mzZ)MwMKri{dD1 zqj#e;9m%8&fk_-Adzd3iGw+7<>*M|N<8ybU=WaY^d!NYR&(9BZM~-dk93KoD1}_Kg zV`p<}iu5MirD$2kM^ZF{UUYIV@al?v49rhl5RjxnU&7boAU>a|)IRQjBMP8>x4uKe zKZnY8DsPjPkq2v|^n#omuhPp03dMQs9UpAIBY?wRc_#Rx$!S5RG-O(>@*d6JP4kI- zHFx-4eArVmCUPV?c5JSv`+k$)3`0!P0I0+}w;8sH1FI5F^L8w&eD!TAw#!B}y3-(D zKNN0;TIFlildE2PRUA)3ziPnG$IC5l*rOP&k;)|xwj`D0cg(8AYK4r}t3^m7TS^@c z3x<(rIn*3LuJODX)ppJ9bjf7fA7e|-tG90@S1n()`@km?^!bi2iQHawgSuO+?;)nf zff4isY zc|^VHv8r~@U-&(d^*RI*6+S|Ww>dVn9$p}qCcB+ar%6oune=nWe9@_&wY<-FZlcc9 zC8GGCg-9M(=>~)sVG{07*Lqmi9yPDTDQyT=ELv|0y%HKpaI&&n-SXb^trsX*)$nq< z9LK`uRSO<)-5oL9b$`Bi`F!!9`}Xz;u&m-`-XphqRp*)DuC7TYwdfN* zMs2{TNgK<>n*=V4?1x;1NB&^MGc()lXc|F>=r9dY?Be`kzt1W8T}>mAB^aS>0zH7wEqdl1|%Pkgp zfl3p3?PMI3?&@pj5-+7O-bDLpK87xKEgvW8*2#K1WsmZ!Cj0L81}-#EeTKegxI2>|55o0F*$)3BvRy`-4EY|mm*7)8=c_*GZZ%!_o1A zh{xNx@126a_bw*uFvV`)y5VUkk?NqU(9PSeseM6xEk#9mUv&k2*0o&)WD;kOdGxwK zL%HpNVvlo4@m=a6xBm3ApO9?=brSHxWk2I2%ezrOm6^ zxYZ>UVc%^Mk`fq??~DCsWqH#}#9tDRnK z+%zx3)6V4-YO|f^;tIQ@undJit7AKS#c1=irg&vWPd!`{JR!WFR*cKwk-S86^9Jw> z3)1#PZgzGf>|0K8Fg=c-BmJxn^*iIW5Q|AhY(FQ9w0sg9eiwj&p3emMG1*>;`1?6c z=)<%;j&` zE(L^eN!Mkw0~+4X1kaKbv=|0pOWvzO`Ns+$t3(Ty!DG*ieazrEJZD%h5tol^C68&T z!G}1Z-dl!(n$H^Fr@sIvy1ExaH|(5q&O!Qf@C-v|=YC+&+V3a?!ZjkB`uEx-CA8kt zk?!$>AWK@dfIwV;!xQSO>xYL#0bSXH9l&jGcF!N&Acrr~t}ACxQ03Nlm7h-I?@F)) zZBARVyJUdq{KzLE3W1pB5fj`s)6{mev()cRVr8yn?!2pebit@?>t{r}1)36G0&(cC zh^|J`v4bb%?hNr{Ho%`1)AQ0f)6VJz+x@CjK0POnuG>0b76L}gt;aSDy>U~!2oe37 zMrPx}R;Dx?HyYumyHa0!qNgG1yb7QI!FcH4&BaW6Q^?eUa)AhoQO@#MWa24mN2pZO z;L-Cqe?ob7MW2l7r~AzMVew_YGI+4_M7gLo$h40(CrmsH(X3bY;uzt5*B; zA*Dlk+Z=D8+QbH)w1u~qTLLVo$iQ<<8=ANhpWxhSH7)s_<#e6yDX7BnP&ad?M6w-| z><%FFEk#DxBTsH3F)FRomPs0zr%(C?vZ!#v3k#XIlmlPY`lllmP2a{H#!?aY+9c&8 zu;G<$Yel5=B+BhjlXO=$UwkWdmGkzeH&LKMD)x~-Dz;4*bFac)r+Bjyn?tHBF^Y3M zIv2i+B0IoYn1Z7eSP#`90YsR>w?6Cr^rmMg8kX(N_#hfw-O2b_Dsr+Y!NXSOK0G{E zS4lBlAZOX3rWe1rMl#MmI|r7{Df*NMb6Ptk;7ORyMKiP7O(xWGUW@G>%;UEk)T8Ld zm!xf*HRxk*Mqd@j_iTlfIux{O+eNp}wQhq{A%xT9`MZ1lj<%Bu@?fWD9&@x7?Y@=w zv*vl2OfS(ms`@&dlZpgPAQ914tcFyOx@9^IRL3ifa$*arxNTW0%HK7tBSmTSu)nW2 z%<$<~VcbXcrVr}Ujp`xB>jJ5Ii)spq{$QLRGoS;BDTZxhEG53+@q73CLTfO_m1nlO zmM)Z7g;A-ha@!l$Q~%==X}ema*!?N$#h zpJzP5c`qUqJB%?`)q0xVCJ^*;Mdqt&NIQ#V$l&+XCSU#Zu=IPWkbWn{TlF@lqMsh# z1J1HLWok>rbGQx^cJX@tD6lwK4H-;Bgun*c7DL}O^3$)n>C?k3D-&s(()gL$+xH$m z$b~DF81Thbo$c|_%?*;vCpap{4Gg{%?`iq|zA=cSjCpp30w@!4l=V8hjIFbsJG+dn2;W+59rk+vUz4NV~y;y@=rHw_t}jgQ;YJvN0il-edJsL?~ZY? zv&GvS!y!&(#KGpN5f{O!RVqSL*-zGG0t-+DhF(1rzK3pjX#g8xBZqs@pyLRsmIbc2pTKq$0)g;q$%rn5hc3)afW2E} zUj=qTAcJ;(y}CI~RjzeEQ%YMUsJi1+FmANk8+*OP5XAu0I#NvpqW$ENVo$Y9@GT>3 z&X~wPyuvY$n~F5PN%H%u{(U+evjH(1>_Q70|NFT~cdHk(ffk?dtF0Sk%%UJ*`J} z*9W3LZvboM-NAg-FW)K6gXF=5zV^0PLsLKvIqpvoy4dsPJ@!1t>p(Km`#qAPNuK; zC1u}+_D|>Ex4_+%K~+PJP6HsPG5glA6MboC&^l@i#B!;%bbc`uq*dGg8)9Q5TX$ zf)iCgJpsvXuV?7NnNs)i$)N~d+hWgFTL=vH#A|i;djVZUE{SjhyB1cc-qQI@@sgM_ z59A96|DIC)vJy`)UcV_hG`+05bJ3u;r)0_21Kua1;8p2|%k;+D4#g;Zv-(Z1jYqGb z(u=tGeJL(UdOHPDd;YW_@%Wxk64G4LKxNxg515x@bhVsT@6MM>9+i`s#ZyBfGAvw} zqZ;}`B0k-*Hq>9O0Y7jBHty)>R4Hid5!a5DnMf*!;r6C*Sd4B4ZdD7SLjMfz=82RMt52O>?t_B_j8ww4sHVQ$OovZT7gzd5Qxg zCDEz=KCpQ3oemdesAAJzr^2bW&tTGFZC2VrrF2SSo7&g~6i#`x;Gjqv| z)u402!iQ)&$tA;OJn7jmV-K@f2QPzRxhk?X@p4q& z8L{{EdoC>(=Opxn2kLi%J}M2afic;O=-iqn%DD&b8ImlIPT2@}$+@bT)XibdZPbUn zn?39aohDJHi*fhX(UY*>w^)vu1xm&!S=YGNf{DX$_vVfJ)(X6qcPNr6m&1(u;N8?< zpo6_(8GM!PK6)&H;EB^;h|s5tZo@?wBA~V=C0LHbpguAI?bUk^LE?rq-lNJ)^43mZ zE27=(7bD~G8tPODidygdgPK5>D zC`wj9ADfi?Y4gxPA~++n2W; z$N1g`pM-ANYStCM>zxAMNpt6|x(C2smL~7wzEF9e&N@r3m5)Hz3qTCtIb31S<<@NA zP@F#n-j}@9@YMAp@@dDqLxr~3a}se{ccdg;Nf*0o6^m20NB7fHDw(!36EdSE)(DM{ z(i}Bg@EEzDS^^pdOocPGGU7II`G}Sk5nMuKfMY1_?Fjm}THdF}?p8NCh>=j&@kN8d zVHy)Mi;m2z#>l38h}+(?yOv_Ov;>={cmvIxaJNNaGi8=x2l-I=*_3K^{zOLP=4hL2 zm|sar>{XPGpf5=+oZKl}oK;vnIbI}|Z37A^j;+l)gX=t8#ht3%cnH~yAmxFE7gUf8hK zvvGfRX2IlR4vbrwguPAO$sxc4y!40K?2;Z6I%kiE9;aqjN|^=>x>ZQ$-DD50%ZZjF z*B3VW>ATW$d8)^>aL$O$v_9$NFW-)W2d1R0z!imPaaX3rwQ47uK7~bqhw=cgsa()o zp8y@YZQ`rz5MYxqke`VbN7pZ;U)V@(UOhrtu%41wST=}0QB?F(TLuiqA$YC5&urmL z03yU+>64{1bQN&qFLdnFfeqCb7pNA~IyJ`JRrQ=W_w61B86G!jN}u&ENOd}+BB)N& z5?V3CoD(9jMa=2gbA;Vt<(z(55yuKoe|GJ{;b#M4-DBixg;WPnCy3`Zdp&TeIXQW+ z3Ue&rK)0*#-g8f2i~{MWX>QNO*r3-wUzUG*G2J91PF0dIVGC0T!sTi_KVU*A(|Nr1 zo)4-=g0h>H4!(Y;1DhGzC;)EUP@-k&?CxD z-RFpI&YVg}H8gO6PcYSH%yq5<=XL~(<(@6hmZxXEiY~P>K#OkD6X9Gd(Fl!R6fFOV zk;xL@4wuA~77nv{*}56&qprF$Lm1D%q8C-#>dXFm9_nFay5a9-=b;5+0{u`zC!PQ!MF4xF)kNFT&9I|RX3Mo|wR=V)v;wbgBCaW2V1z(Z) zgJSJ606toqS{|I*Ny4UaZ`wngl;RdxJFB3-WY@(RAvrn^McYwZ>E~ zIEDw!Gio;WI1?FLaTy!;QP1VQdegIFk!*_sd$lJbQm?V=!Q&WbNu#%Px%M+LD)n>0 zVZ=1o+1Hb{v59*Jb@`M`@lN|3-b&z3Buc?_a8i|~_T99P4RI243bpIzCsOq3-l*%v z;}`B5Rq$q`Lywfq5j%`~UkZiRGow!4y)B1Sp_(H+fjNs~nj*e9r|=!w{X$I&7(Ugqf_3WG0Sz#~X6rR&^C zbeVKn5}w>!7cwXfc$IIvO{)aSgY@oVEY8TkmBpWqp0)gpO5Pe7znV&$KyhPz`bee0zQcHiSt?URH5ZI&u6K`8e9{n$~Eaxw}ME{>+*D zuvXM8L3YPclQ2|c)S>For5O8o3x+%{fDo=QEgnqN-3tgN$bHLYWN)SRS@hGxFTCET zx3gXxq{>m}3m@aCcyDy?cv7}1#nvYJp@b(D>|=-HzE{s;oA~(v%H1l7lCdD7yu3cH zSFa9Yw4c3qGjK{{0v7Wg7-;=Y=3{W?9&ahAz$D>T^eTjkdfhT!Cs6P+l(;ISB0}Jq zH~pGa%5vR9pgTSKK?&ny%Y0vm34JT9-AuW2u`|5JTgs`kQeL(*xt`mGA|QG0n^n&a znUiITW(7m5;TK0*ZY1O{sM9)LQ{&>EXxW=8m8NVK49A;d9MCq)_X{(R8ZGONoSD0Q zTIbTD<-Mwlh4nNv3d3^hX!(_a#CR3R#R=D7il;54>dGF0*NKLFVWyvcAMdQw7gHYb zYaQWk<5$JQD9~Q%E$0rK{waUwc=^3B;A<$X6n z>@=QcrjR6Y{jSF7r5i`Sd*5LP86y#&`+mpafcX%@yR> zR1992M$rr4h%v*Uy20kz(=CwU>9hpK?f|7dQ!BTj@p5#2;^vP6eXi=b?z&sZN@u8H z?rrfQz<~xC*`(Rzy_YJ_O#!*sfU#e2HhKsE@NNx)CB+wRrk}X&PIM(&BtUG_wg|Wx z&oIa{>qNBG5sg(XIeo3w`W&#AB>mCreMvfEQl5H+usM8#5wtV2+qAWY0@LBAoYPEf zdtw#HVVRyka~Z*)c59y08_Dh79Ef9p!fBigP6cO!T`1~9X+{aM5342MJEozKv)7e?B5KKFY zbSif*%Hc9{Ri4UXVR5Wk9|rZzNCwD7 zDx)VzPvA>=`hB&mx(VjN8>^Fe*>;bG%%;)CM^ei74f2EzoxU~0tQx_T^xU+EwcRyS zXjZ)mLXWowfcs{1-K;y`WIP2aH4O}m@dD;E8}`=;D8Y^O^W~n8J)1dwOh94v-nl&6 zdvv|#`TQUy5`7B$D(-Q^3njm>hN|G4%nC(A5i)x8&K{6`;`hDn0Wgn<8*Y~Jyt)Uf zU9-&Mfvj?HZPG}tJ>z~UcvO|aHinUGNK5bzZB9oriX?5R-!&&+CcjEii4 zB~*Eh1-I5S)grGHyaC$-UaQ9V6r(1DaK2XGUZ%G#`QmO$BC$r3f-+%$s*v(wA==Y- zb}!Uv9XS$4YKU(E1~H(p9>$T?3n-%~(W;DDeKBJl?xn@xz^;mm%}QSwe&15(sk~Zp zuQ)D4Y#Jay3G?)Bv1d4<4hcb-!tacY+ORjXj3n8)-}Sxm*-WwPl*7y!lMo$&+_NiWbbmAwHn#BM(>`Ampp5nzATjkwoLJ3*+b2{ zm$hbEN=I8$Hqo5$NX0h$SylNArl`7n1l8_9Cl%!V6nv5jhnD zrC>Fft0g8G+pn9W3{}=p0t5ZA$v9DhwV9vtW2~jz?TPqA^83OYQ15Q<%@eCOL%e&L zM#R`jlBY#WpyzciF>d1Jk=DbCAX6ml13KwkIdZC8iI{SFibt$Baz4T>>v8Cu+}ny$ z0&T61YsP~RclH8Hx``;Xe{WnEb_5HaJj2=az9SWcQGf9iar;K`&5M z_aeYFFk7|)0qECe`N$Qi$Kf4A^C1lD6=Ae=LMDTe zW?{g}@sY)XF^}0AsWnD4=(}W9eA(j-l+kSVP z?<=zM^COD2!Z|K8Xft_l1yvUuUEmU1+Z{Eg?6v7W+6w2(E;7y0guzQIf#K!S>#>OK zuxAhiX<0kZhTQMmdFgc$ww(iN1y@DIf_Y7a0S}cqmZI*@-s56?GGIag!%mFx(r`uG z;ohzJ^B{gx$8)@F{JJ{Vr*XV7H{@b3BY>x0KXELHnfX-b_ucswlHIJ#8grsLG+6*E zE7>Y%d-7pD1DkN_+$3=onW#fQkGz$KWY4J*aKZhF#49yq&;*i>l$O#9E=Ud`ns&3t zRx3@9wd)w5*Rs@}1;6WShTP24Ux?n^zQY23#@y8L(u#Gdgv?%sG}SE8_0092(An)- zs~z~#Sw(rM?R$2ndx4=aX-fV%*?eEB!Jpy*Xy3hVePu!&V&t}xaGBL09d+k1#5Vyn zOs_2hxSn5^pWiFM<9##roF#Zj4glCy!%uS7E^{^%<)#IO$y_!C=Y_}HCzZ+52DU^` z-CG}csTbleOrMWxK2*t+C*<4fQ4^T}n}wmrG&lXm^oUhOoJGho6ZO$*-P<{O2Mi~> zdtjFjPKG`){p?5T?dSfYkYIJ8BQ#Q6HBSQ5DJtlgdm>*lhK5+;^L~Nsi%TbMijg|9 zv$YYDJ&xO66>SxLooYVdxEV{5Cc`lWZ_($O1VOKpSG+Z%hevP9NS+o8CG2l6Z4whU z>cwM+)K#qlldIhDYg!#1QcHq6VC1akSrS^{8>zYUYATCsx7R)SvZm^#exdq(4KE+j zXB6_5@k-v3U#C4jf8*2qJg`gjwY~yBD???St5;7ialuvTiSp0M5_UDW(`OvY9+6}}cUr2( zRmbS4cn1U3khp}Ib*~V+>`gu6SM7snF*dy?EVXewNnZ|N@ID=2#Isf9(jPxsh2Gm` zEX=prQxJ4WG}r130cN>_$1CI~<}R@t`+Bcl8u`XkrWa(TCOz+7w!hOi#gl#O0*y21 z_FmiT?Vh-O(}WLP5d0pz&C>CELM87}KAT_2I*_fvdqI4@yEupbT%{+hlqJn4G2+G{ zhld}h>cg5u>n~*Sdr~ryrJS56q06DkhR`Ek*;!>l{#3wCCf-Zlmg<}?nS3pcs zI-4pAeEj0x$atC43_p$TpV(Tcqc1!Qb$eKrDM}v3 ztg$a+d=U`kW=z_{z|t?iuqF6(19X_hR76DFHSt$T{R#*qyhrS1o9%-JmKC;O0#iaHcaknV`61Oy?L0A1@EF(HKuLGg9cqP2xKrr&o1FhM9vlPIrj1nb$+wV7l&W? zvVMB5(KQf1*o#1v$}zg22He*WTpmC>RuU*E<8B5O z-W|vV--sApErnQY&+}?dA8p!tsYSZ%qb*lQ4G!<+Za#zbqa*#9$ke*{7C5=91TGhE zZPT3-a)rCrjLsmYML>=_z(ikzU%xQ0z6<3jlD4O%z;X9H_tS%gJ~7T|DAnLqY|=O~ zUyb)tlExVz9QOebopWUCd+b)19pI`0;L_C7;&PkMDuz~QRFaA!ZJG@p;GT@a3=HAL zQUzRYU@%7ED{m2%f=AqYDO(=FazhLd)$muvN3g%)q0 z_tTzZBS1cL+TOusbJ8fQrLsCfwg*9^n2QAKM78R2w3g1Qulj1)Z(W&>-YWCmVz@6> zOZG{mxPe8AWgyg~0w}CY$w}#%>48uv$3*wOnx8nY*ETe58{tATDX{grCJOo$hcst6 z&@5o{5qJyNZkkTNUrR*rVr1KWEz}5asHm#;v%b3MS^w#r{44mO=SfX_%%EX zRv-)7eUnMbz&sM*C~gH&wJ!$hMTe6Nkv}N9lZh4-xiC3DYFf*HHW}KN4i9cQPPr&bN{duNLc77@^DwV;zO_5# zVi$YYmv=P;M&T~6^rFc|)$GM1ctm)UUr5_$_hV0lWT8&Uol9Sg)@N?x8_j7lbzH9Y z>ar+Grmj&^c@QpFwFgsIhn~=gSg390c;xHw_rlx?icw#ul%Zw(DZ8Kqw6_eps>{cy zD2*cc0wQyQ4&GZRN$SXsF4TlhI>&qQyAPr%wwOe(T1P|hwzM-g-dShq%eHm^0@a<) z9e8i{#CntWv^HGKS$U|J2Tv;B^ZT6c{CfjA-n#EaHL~(MM6nl5&l{}=pO+DeB{f@} zhsLIJz0pc<*d;@3@?FFia*yGKgbG3LV+D8xq@|5#6jafVwLqk+j7Av76$Z&PQGinL z!E*D-%V1UBHv~J|Ohf*bMD(4~3y~7~nd_2XVxIbxN^_V!#$87UYBN3SVjey4cz^(# z^dM>QT#57Hfb~YORoAw6A{Zs6`Fee!9rTIzURUQm!A6Ry1*qsfXto!_yD<;jY6Ku> zAM~`!>{L}wxl2mnE2e~o0XHWhz-3@^Ph<8+m$nko(1LHl{6N|X+w#5e0_+FCv~lQ0 zi5(=DXuTx79xVHs+mM{-QJ0|>fk$`aPj!=Tyur9u0F{(I+vkAJZtZY1EN~1uaEgG& zhB4A^Z(e$K^Ml)*SIc6HXO8qRJjd(v`_?TN57#Wpb0}zg@Cxeqbll6fb2B8EZ zbl~uNo$nD>IN>X;&1}MU2TN+^^A4_gR1nmS1=>5uL~mpVw<*lud+&%AJVi3a&Ir$qFUFD6!aZNZ;TSH0ff8~8*YrSr7ClhzrN@-8sGVc1n~H*eY& z&m3%-fi1{=tl_C>RnTg1y_5;z3pw{liartl?2_rm3$50v96(yC&H#RvsZ4KPSct$z z^@6@mT)YZPhQen_@Ay=;A&th6pYN;z_R*j%k#O@H+~(3}Q@uy$7WcN?v7a-P1+sYu z;FghWN}~~@wvK`>ItIDKO@l=BKnlD!rP(4Y2uw0Qu6BdF8yg*kpti=cs4`YxvDaA@56k5Vpur{ad{ z>u{M4=QRFKfdN+(mysr}dXSLbwe;;sMBZXJme0e7V6^emv4@|!;a<< zd%>d|<*C-XyD~lAV)Uk*!hC352oD)t!sVe6U|2m=e3!z z%$q57?ql9#HA7*tzSl3sccN>0=DD6~oCA7{yvSgEmrf~|LMm_D2&*`=LZ-J~x~Ar3 zu#-*&v7l{`AM^56y1+b9g+1pAH_h(6q>OM&l2^#qy7(XoHE;Au#8gHY`ZyvsLeiB&}wc1lK<&$OBy#T}~9MKnp*y z|Lm(0@+wP^`j~7~OD2v&(E%JDkkZRBB^S^Vk==B55+$bB_k^zUB_YEsFRBcpGbR~K zbcOYL(=cV|35=lvRJ~48?+f36(HB~fsu(HhtvsdU3CeG-wQbusqXD$EpXgx`^kA$x z<`F!3+3r6l(u-$LW6F>=Nsr_V;?CRxSvM&y7Lo>bWcTG9EUF!XRzHLe&czAWZ9-hQ ziF{xWT|RT9e0Nx$xS*mI6@y4oH+l(q7jS<3>>d{ovtPUf-eFojW>vit2bz5CSknsw z>)P=0=Iu0Hdp&&E6egtyrW}P4ufupEBd?byhE-e%fM2*E*CnbBDaA6r340G!tpM!d zTjLnJt%F-`rbf#|_jGms={yo1b#JTgHDU1#KM@>0MYTX6RyC}$Nlv57m`P45T z?7j3xB1_DNO^zU|FPq>CfBo*YjO+C-7L;&=afp%~YGop8L@&36yN09ivWQ_*4gI|b zD}{sd#A--Rl;B+k+U4_A4M6RB-gwuzxKGGlaXn*;1<1$#xX%?ZMFbz>Hj)6ZWcK56 zHBBs{ux(3lfT`PsM?o8GN7w*zXb6cCoe$IibDw-A3rvE`I+nOdzUM|$+v0jVLS!pwYCybwf6*rjYn6f z$1r5mU9KZ=Jk*pT$RwH)0)PVP3VfkU%Jg2KQf_&lTcJ@O=U@N{&neQG6&|!OEtgzJ zG!5hVsQ{veB)BEZ?%Q2i=GUw-zAHm<8saG9s*@3Od=?LWz5*V6rI)KHy5^>==3)7Q zG|+KVyqi1!z=d-hO}Wxj6m^(@D2mfOTcyi^HwN&uHsaHT{Ck?U)`h0B7tfzc5W2d< zJ2D?*RV>h(oz@J8 z86l9U+pA!4h5IcJay{;SFAPo=8Z3r#Y%Ez`^q81J6IP+rdnO+1x<00b8@=;;ha;K; z7~qR;ozxp^`6hSV+&dsPs>9yiU`M`}%{mpOli||wtZgBmWs%h(^q9is^Sna!r{(f8i6Ve6AX@Vtp;nHpkQC@sBv{WitdL|zR5J%z_yg!(!K*VWJB z*&l!~7PK$xpjPe)xN!G_ad|F9jWIuU}ISm5=11%&kDMD2dOVt?gb!8{LAuT=SZCk$xuH-qJ^Du)A zppjFtcbfjiLwxpR@zIQ^)+g@tibMm|4ALJOJTSuUzIkP_+2f^>Y-+tyX=@MRb+}}U zmUl;P4eD*p>q*HeWGJ{Jrs(!j#4UW2P3lP(tIQoS%N$|1`LKo3S{4~?q-1i3Wh-qY z752&CrEO+4%Slr~!Vy~#V+b=*zx~Aoj!Ru+Qt5Ovf~<{QAZQ%mzK-zG11>Wa7!&6h zm9&Hx7JQqgi2;JHzsTSf1)Si3{w$7NpIvkgOZ#V}^+vJBwASD&^EomsynB+)X_n zf|ukW3KC1oG>EP^SFkG8VA-B1!R{l?WVQfZK%&1yMy{uFCo3hn{d+6n(wE}c$M+87 zq1G<%NH)oPs+1{Mb2zm8e4@I^`0;a_BPjHV(<^?Kle%Fr@Z$7!|M&|6%-g40rIm20lmW*z}kd+ zRGt@Wn0Q!&>gEU<^M=`P_5kW0##I3@FwUvry<`gu(RUi;G#jn*!2H<^ObSRA>ca+_ zKd&0+7ykPKZVK)P0UVwApbq1Y&BNg9*e*rXgzREs%goXq2dlBXC*$zO*%E!ICeoWH zlM@Gpzu2J9j$I;+u(`0U{u*_VpYl;qU>#{YEnUJR%7`|cdU^O}qV1S>Qs~Bb0IeI2 zrq=k4Gaif+4Cu&tOm;sPDR(FhAm|ei zA67AZ@KT5B-FqhT_>F0Vuk(XwCXh^2l&SYXY$^hYaj5H&(i3|pPku2nh3`Fj021bg zm!gAk!nIcQ0I1(ndi~ydbVP+9k(#FI9l1Xs7vfmjcXefg`_Mwn#=jWh&n`Y$h|^H; zYziJhipz=iqX|Y6QF5V2{vu8_VagHzAkCt)p1U2C$17Jxh*0{}wc1MEErmVjy#|59 zeWZ-lZ`(7D5}u**I8m~P$L`B4zoWVF3W5m zhEeIks`OEhrum(4A#zI5tXgNQ)0Q=!9~c0lM2c;DcT$+w>uLt;7&dx`K_M4&!;>k>mY=JD7nhKwH6d9&nHUT-%O)$>dODGCmyad8#O%B~Uj z>eH@f_kjqmQKAdok*D~Yq%;5@^cA@VF<&lOlK>^cm81$jZ4 zEos=bik>$Q-DXGYDL%yHSfkCignSDr#Mm!GCRhy z_j+jy4dm5K>)f&C*}b^(Bqr$Qn)8LEO^O^&ky%8QA&i`zDM1qwUK)HJ8>Ke`wA4JD zujGxwaz0~z%@)Wl8p7tO8XILa+e~@#_+9WlMboW=_xhQ?r`87W3=e3#v{JUyG{nm5(v5wR-#9->=1UgY#ha?6ptYA4YG6hC=tJl#l4ryJ;7*V=-;YMWc| zXyU|n`_*kAENbML7tcN)WSe_O{@@ogN+=C~tQNZ`@-tOn=~o$ok5|D5rSt5Og)+Rm z#8g@lRmh4gJQenOd3e0(QEMZ3_sPs>w^!0rZ}ikil>n5;=%=;7BcY<NlE zZ}cNvIKm2%L~TQ9+{Pv932r#_!MzE98s?sbB7b0H`n^lmIxz9Pp^phLG)GO{+ z4sEU83W#d53h}l?vreuV#F*Yz>oao_P`o&@wT!3CS%a4np7wNKidp9gy%yR=bAN^x zRNibyY$2T(BNFqTb{1+^Pj5h3Tq2P6u36p7!V=(?iq}M+yds(GD1^;-)^mA_U&jjF^2S(THNfTO)P~3s-5WB%(bVVSYb_iMHv^`FJ@Sw2z z#U}Egi#bZeMwsI)=Tymi+>7mDD}wI5c|@2Q>Zt6qAh}pf6zuo3dIjZiiEU54CRH*b zi9rjWMXn84guQJVu;*{02$S7lDKi$d2{UP+qwf)bLpo&O?4sV!*TeCINwNi2wAxEh z#8fDai2H%pm^VFU}BLu zY+)3AWA~iJg!w&wl4_2N*dq%eWjrV6)Sk;Z0B#nn9;${L&$@+{ieh@6@&qJw1O<{$ z5>ZJK;uEN9SfKZO=@&y1NE!&qPieUja1S!4Lv`#)8x*5#^~^(!Cw4mOC2n*^_9WUbd>^md=B$z{UY`x*avZ z7JJIWEk{+}n#O=vPfRnp5Ss zQDfm|74a1~f>s{itAszRwvxX0fC0p^)u>k}w&1uXSsf$P93%7co21W#0LxjhAkR1o zpr$VaP@MN3b9xy%^IXKaNrXeGmc7+d3gYzhHM=0>< zc0GV2Oo&$SPBqsbUfc8m$6BJ&Vq#AY_R z6Ii_0n_8$tri_77PchCOPtr0ZG(go9{yB4&3+-`5ZfimMbnC`qRwm~tWkYJqtf|aW zvmusgEZ^*VX%A5E&MEupr5{?@a8Vj*Y3TB7iZDEm>|M?$O(|H-Gy@_kP``-s8^C~I z$1Ov4;D^DA=nKs7Z)EY926ORvvgDH zkX}GW{y+qs5;~Vmh+lVfkSjOV%Thgt*9S`2BTrOL>h38H*Hf%{Fh`Ez%f=*t^;}_d zMu3NObTyd$-og{z*r+@nC(BirD;!7r*`LOy4dS3MqKgRa}K_#9C!{cr{QWA2kvsqFRcL>!aHjslB6lpr?&NynqM;khjw5D7J z>e}-NOQsxYvUi|$RDk7XMSSBOY4PgJN)2*2bURQ`88IN&05ka$e|1NuVs2Q_2bTWg zUj7afJX)3bWNbC2s^B1Eo+Tb3`@94=(gJIxKK5sLa21_E)QW#}1|VnD_{g}8FW^MI z`6TOohKXQ)vkDzyHL2i=fT@h{^zD1%CwK7#S|H{CbiL(SIJXLRhPefrWL828$2tv2 z;{x?_2$Qa2Ky)Mo#lo%ZcRrD9j!RQo=d$w)YwVHNL;&S$YcZrBc zXxQr2J<><6$fNa0EF8ipAv;UJ)0CjGi+t;C48!$9R^0wY*H!O1%z&;f6qltumrkjs zV)w4VBo8=rY!@qD$l4nPl@~EiGa|>=gSIT#kgTlb@@<3fGghp77Pxw#24@OOZxO7% zwydzW8J6eA=E@u%mU7S>z$h;|-2hehnc3s+s|D10k9<6geIMz`-8w2MvK?A(-zk>S z)GkdEgrYn2Eji9*$u4_g5W3?NSX5w|m3~aLFDB1Z*7_(<~WqY<%H}d5*Z0peWy>!Eif*32TmKr@D>zF~7SH9$} zj^fEGf-{rlrT)ZX7e5J;&wz1*v4H8krQ42B@x-aiuP*7jBB+Gb-<3TdB7Fx0!xai$ zI9lHPys6}6TyGjNLsMo((%YT_=*ODX2E>Z#ROL=yF04`BM%xSEbPFnCsO%9<348fe zo4Mif<8+RP24WnM%E>`di@VQ3MtgZ>U?9|P1K??BiYnjh>_7_uP3kQZc{C=Y{_~Ap zhmXG|^%{uIf#|V%bulbE8@4UtF6$gCVnevyu;l3?0Kl%RewoXwFEQ#pE)utIOVrP( zA(2Z^dbUv9mf?|CF(PEw6A86~YvFhE(8blu`5HGC^zbp%Mt#~_a#Ll7FNns{lZ-WlS6 zTe|KP7K<=4D<{oHXn7!zEItEPaC?B=$DQUVb=)t0k@gIqiU%}TH1N*V!KpissMv$b zNiI|J98Iz_ie_m{SJe<+uZW@c*jcEdkukX0L#$8I>@#$j?K-@QV+zS4Rm5o`@RkxWG*tNKznG*fx=RdQv6hT*_2 z)+q#PJ(D>h9)ev2>fQQ_HaM>|!Z!=c8H?QHRF@FFX->`9AasH{keLs7!VGm`zzfeV zsvfwMh}eU6<}J4t36|? z<5bvtpw?{s%B9+So}oD$!gQ=tV2LwFs#%2sp`YPe@R5b(Cck)~g5(sSGHg{Mn-+Tc zPV~k-v|beTVANwm$qBVKq@uS1Oam&!v_Oqy4`OKN;nRvS8B-pisql#x!v(ja=^%9V za~|s)8uF{@=9DqK;+)=XAx6%5IPVTGP&$*}E|NLN)X_#-fiCvj9wm_=ZPW%`!M%ZR z;{201rU@yi$Q(*{)L9+S!E*(00jcrEhxAT25|vAma#rM=Wni1#VNbvShKpn$bweHq zbPv|bibxTI+rk^G3rAKWb7*7a#E^Gx4@!4w_zcg~XCB%K!E>tN1fO?ARBtz?22kF! zVivF&f#a-4&st_9J!_QGXL}QHx!l?-$W98pw%F8O>-n-n!}~d(6$Wi(JtLI#jVs?b zqF6*wIL&Hc$p-x)GKBMH2Ru#Di8&%|Fn z&;vmZPt96;`z9r-^juc!#d_k|Q76D$r97pk9;%!5(sxBT1`W=K$mk`SLIYfmoh`df zvBg*eU|x!%<~qrvO%WUFTDIbZ zYwb%Yk7&}3TLA$zVSbS}r{_>QqJ5VxPm(O@NeZks4WeDMgx^ys z?J*aHd&O--NPOg{))}i(w@`MX+F})ZU*sWt-%rYMqjF|}^zcnc14ftuy^ckmvXmae zi;e3SIEIS)s89z>zGPx|mPrd;ao(3@0MO(Fuvr-hNi)U+Ly@ebl}_HgYQxj7=A@5Ah- z6pw)jGNn*{ejG}|CDsM|fC6jC$&6LBbYZ+Uu_wV_vavgA`h(|keSF1WXJ(gjoBdov zn)04$7y*g5Svni%4uiB$t7@>Z#=^OTrITUG~VPh?$ zDd6|Q*rU!puEvu6QNVEF2pPmwsw)ewaIh&uS=#|t&m=~npbevq%38##Ya1qHy-_KJ z@=pTkdtXxZ^03}PG#K`B=Vj2oE#qEKJsGETG3NJx)o1r@VOLv;WTSu?m)<^`T43!b z;X01h;z#{RshuH%2H1BmaC8BCp z`Y3set$`gBtQ%P81@^pGQ-QKTp$ix=qQg8svj`=nz-G=K#*Gt?n`nnw_V<4B!E#X3 zN>Q{!)0jdAXX0Y;GaB2KSOT7T=C|}(Qc2&E8|N`*%{n@nD+OS#gcw$RQ}`KO2-OY< z(Yux4@dgB@KI@#?2~orp!dn=9>w1N?2dhd3sGt=Zm%%wd1|7^0|kBQm#LG zojS8}A`SZ}Lk5?EI^|tGO@CJ%J#I$>9c%CTEW_hmqMIT5Iqz93Cro2=^u5cXAv&eT z0x+@6R~|bshUAm>W4ra8hWb9xH)k;Ctodx94VhU>fV^0OVJNmN?II;F za-{D)v?rC@Q3d#V_fGO*r8M?}%WZ{2WksLG5sEMHo~`myH2awU8S%U`TsV1NcFm~Z zlFOc;a2MsX&3b`3vDJ`L+RWZneyS3~kCGm%_s@Q~11{qXc|+r8p=9LPl#a^HfwxU9 zPU@$6_WCYNu!>?~z;M%V==$O@5Des)I62mR)A&B)%;tl2?m{n4depex+C5O_!udER zi&`}LTBq3)(!>r8VmHrXECamm}WVcoLccZVHWT#Xogr?tTiyaaB}%+iqM$W|+b z(9^1Sd>)B4)0((D_DUZNVI@P;qj7&*tf#H+q3}6{0Ehvb&FHE&9C@F-1-QRm#drMBD zSIqNB;c;}OamKUQ1!(18R9Sx4p*5X%!@=Q;WZ{M?g3(d#nM-T|v9?4+_A{`1tBSU3W zWHU=X{b@ALXNnVKgZcul_Hd=Pt3wR1aMn;PjR#_Fh&iT7jOB#UOGBWk6;m@UYh|kDs3=(s5L+S@p_GW%I8|KI z=7x!QB5Z&N@3rtxgMG_Fg+mi=$=1%O6t~ct6`MuPvJY^u+<^mb+I>%R z_8g<10&5`aq}w-{pJAul%wN7hO3RWQR)Wl;sB{Cf9`15h)|`&x>3Ao&b3#~zihBrt zgb7LEA{LGfE8MHS^Q7j?sd0}0hN7|@XY?tEYv|j_vfi4iZIc@d5>Pmsq;lb+0uOP& zg!4w-))!v*T4Z{cS-Lm_N5l6G2WR>EW`mQQ=WOjRhR!`CkOoAxx5AIKGoSGJEOwPx z{7J|j&ilPD)h74bm-Y^JuviD_XiCGb`aM9a5*zl8t$A{iUhlRf@Hr^tzDm4HR9b}9 zKb}+VHfk=D2rncmVZ?dF<JyMz1@FpjW@Q#;X*D2BD@xxox3Vn>H{pRpYVfY<|ZiP+wh(eB# z>uta3MWc5FCGW=O1n1QuTh6FxJIK>3tP|8x+I6bT2nvL~*Nt8V$LAVOrq@^REo{rv zrsn7y-wgzQcfr6QUXO&H(z&)1Z=uvZ$JMgv<^&>i2`N_b^kJr>}??a!_n5|z_!ChY6aH(R7u~9 zfo>_7p*|u!+t>VFv!>2U8Qm~9_kOWg0d9o` z+sB!%L-Pg`RB~ajJ?I^)jG7jA#Zqkt`c3FSJ)T`>BkRcEI?+xlT8W%W7Y&uC=_sIf zZD|f1N--~<4~nf7jFS%5`8pC?Wf>DP(oNK12#Cmc#Nc7|)q=*mguE-ji+sWkPPl8q;-t|7wrr!OLUPs=b6^NQ1BcY0ftzc=R@PO?@~OQ`!;WP zpEqRew)+cT-fO{=Hw-XNJ~Q1F0|hz*gh|mST~Z{Vfl^zZh!I|1?z(VMmJS;>foTIs z1{Z0D_KVukxs3>|yOfo2ZjM5{XNNS1gm5;G0m{%}&;vI!?e;mu#q%g!dSG{MSES@K zhu1*g)Y4~U(tZUAh9Sl)M$c7o!*6SuR17Z?I~{pg*@ZEeyq+r@adUEdHZ}&A`MJHvT24d~L0P1IL4zwEkKyJC=^_B>c1*Hh^rBj@Nkr9z9;aNZTG!1VU zjZ9Ijjbr=yZY}yK8PLv2gBwy9lhx35K;(Lr!Exupp*5jGt#rHBWR#`CpUXyRkxPLy z>m9#U>~BNn&+sHzHdWS3cUzAv1G{g9&~F}GER-!h6P?%3dPEcyz-yXpP#-`r2k1>4 z!`{t2cL1URM-<^ld3>ck!fKRdS5ll@UHhySZ#ap}JJ(Zg6dND&*|30`%n&-TJ?)%M zv4;!?NB}^ul^-Hr z$@TouRIrC^60C6RvD2Bn9_$cOH|HD(A~+Esf4~Ow;?))!(j$V?yxzcVPdE``yilRX zywc0hqm0Z@`(AFPO-~Z^P-I5B1Obxp#q*hX9Fmo>>_Rm-3NvZBG7V3PoF@Tvu`9>9 z8<~QVQKm$M8KnQL5d!h4#7}e9NV2K}hXoV~^5EeT84R%@HS;GLL7Yz&RZ_2zLXpQe z^%UXQ6qaAi(}EzgE|(`yzX|?6uGL1Jkamou$%x7f;+}Ga#elj>v>lK(cx}EsR2-Ll zO5?Ck;K2soLzM=5{Wx0{*?|$$yLKeHeP2I8UxyxQ^DOMFQ>{Da&Ej;x=OVyhj1U6z zXZhsX!HL`JL9xm%Ez4E9>1hpDI?7DT1Z&v+ww_W_DlOG5_(6A0@G90$$qbaarjci< z>VYkY>7Yjk)x{OTbiT-@;Sil??6UNW;*EA^zz0g98i~`=zDriC8f8dyZ`z&cDND~X zznlm~Ilk%V8Y@c5%Wd}_fw7+9Tl%J0KBHBq>2M-RNIIH2DH?$Kb2X7?!|Nx|LV{2y&M^REHiC^d-E@D;{k`{qp zLYjr`+AN}lD~+Q3SnXMd1gz(Aky?8+ z$`r99dp~jJ4)1>X4AUIQi1%LGd+EY`4hc=7KJ#|OlRET02farKuXlx&e5K~BlnaX4 zHyD?(+?rnD)7F6p3LQ&Fl+?kg8e z2e)|#yXGd%#j97@&@=tSYxF4_hDM*j&_F!8ZhOFDTz~~qPvnzkqffh*@{R_kO|2ed zzH2x1tD+R(e4BoTvx#Vm9;fjb-oc&vx>7oZm2-rcyx611DP(a?`n2XXK$H&dY#lvg z$FQt=9YLps(^2VxCkAej-6N@mu7MfcHjqB-7gEa6IJxz_U(=yL1#d48;wbhLz$qp@V1h8ee25hy25C~DheTBzVoC{EKDa8lO{2-7(GixOA7tu&k zf=$94sAetEz)Y=)JYcgTo8{nQ5`5qsDSD~qElX!zbiprcWyRebUS`SoA@BElcK9S3 z`=%+ayX=^;cxdeu9OJZi5y8StwV-8)vz5tx$WBGdC!vj?b9X{dfg&DqjW-;6_j3SR2~+g{i2GD zF}$p>V}DZ3(62yKnT4$-}Lek?QUT|{zaTvBa|+maq$l@yGky!I~dxYQWm(ZlT* z_tHRdxdsS1*GVgj9S#rQWtJE0&5Yb&)eAOu;RHVh5gtjA?y7PWo_z-IC_1TT_uyeX z^#z#`6tV7hXvk0usOo-WwtYv~sx|jqpTs<}3)hEm?wk%O)y>od2hY4JGAxnwS}x=X z?t`PCe2Om^2IRF1W)EO=|1@ykgiF2#mdyuUwUOM|dbW~}t&-qKjrGVw&T)imJ1Eh0 z?U_;_^vhHd6*eCBA|JW=Bs=u8Pp69-&e`N#MC#pcSaMcGpco=cr_Xu z59j3&AWc$B5msMa_`WpDqC7mlNhaolr^9|Y{d+yqc$iQ3LBP|JWPGN!Uba>^kwL-u z9=8loiX$5qJLDz0+KRm9vpL!)V%axwzQoY)ebLSEeJ%{}XSr%&R}H1bI=bTCkoI_t z#N0atO2>j&Xwxa)Nu|n56jm)?EOH-zCumL&oGeVzl&hb#6+DLZ6p6X_+y!I-qLn8j z-r)nj2NvQ4+*_r~HXa=>WwTF#Bbn^9-&D0FC4)_vW^(MqJQX&2d`;+csabEv(MprU z=A=^t!7?!1B6jGxJ}l*W)b*O8ZpOkSJ9Ouo>}8A6z_Zs1{4Bw|_ch7-Pr@mO*wFx} z{Qx}X5u3t^Xl$uQBE13c{K)=pN8{R=GGBiFY>L}Qk5*!?!sbJl3CX-hlz6&wIjqPdIsJ8vP!4o24_EDB5N9+vFr?&NP0Y4I8Rt3Kqk@ zzU;!028;2!MUXS|iM2+s^9=?-;TqLkIm_S;6Bb*ggS;p(&lj8EDC8O$=$&FGC2z-Y z-wkI=gBryz%7i!^oed5G3iuKNgrR4M|J|P`lO76hc2h0 z3X2yQ7Q-9O;sNw{uU6p2$ih;UV`QINDu5y)c&wTlGL6xUOYKZo(dc`jQ_vk0B3ZAM z_#N&hy`vF)&PPiS`^2Lz1mm&=>kTNb10u;P$T-o7mB%k`_sfYWez z4!zJsaf52|AZpB=}gTB@|GO}Z%rz` zeiWDvQkt)w@DYRPOAxVYL!gBk-XS&&5;}VH}L9y$hN(E@18R!tn5hE`B1`&`3?=AvyW`LMRM51- zN{KZ6&nwMZ1p^RY_u@H2^4wlZdXo7_pTsLP&K7UbZF?bp=l%WG`7^%3_(0?i#vvew zMz5@G1)XyeM6PKqW1BpGZ!-|Jw>Xk$s}z6%t*VbyL@rh7p$ocA`9T+pzVgBbty7nI zc1*52Nl?x#eiAVR?^PX$^JV#%pUb<2lu&dghL$~Fd5_ctHjldBQViD&u&wbMS8l2a z^QhPK7($W#OpThD7~kH_wF>ehTbeWt)Dn6g%(aZPB{Tc_3>@ASiD%1|(})i^nq})f z69u%plV8NUndul1O`}UtiFpgZrRx)mrweDVpUt@_(s{H>@Qpymz9$XOM|(072M1x3 zi#v=5K8b(t)8v_0%Xqd-mFoC-GQ|(sip47xK@|4viI_`9?M(qog6aksybFq&P>WzV{rew|pQ5p&&fR1z@5S4Ck5?U!Jy^Adcg5ru%#0+FYA@Pvc^NFkT0_dXJo- zJ|$5Lzk^3Fju{SCCrps49K(Y*;e*PF8Sn945DP) z5K9^wJp#hPX%>BWa*)s0-;=2nU+tpy&7aisz4r*8-Cp}j3m-b2G}+WJ_wK&PF2?uU*Tko6su5T#+?nv7kNC(h@Ory`Nq^h6CS;K zi$j4>utS=sI4H2y^;AOUk!nJXoeEYy8)!SU=p>gX05(DE4`ZI+T#dcg`-^-*7DoxN zH=)o_F~%c@ls9?hCu0FplS*unx-e>F1H3n+yD3unKnZ>MUVHo)IKB7B^|G&9rB60M+2^ranJMhuwiMm*C;J`{fz+!&Tdby>)d z8Y$uWncIy4+E^;ugBP2ROUwc__UsweR82gbE~jhb%zl!d@j8xb_?eyJ4VQ%;P+~dh zOTyQBoA$~;u!yvc2qL!4o%1P`b7|hp0|EkzQ$5-aw!rsba z;U`pdV4N?FO>Y?cC4bZZ8T0lHHhNe>x@xcJQC%bM{TDMX3WQyvQu@j6k8uv z0dJbHX0C)e;zr#|t0%m(j2Bia6_zoafWs^!BMx_8CpL-v~0V z%dDBnxM`#HnUY^bw9g{h? zfkbgwV`R=K8-T8mZHyCn24qZJYl7m1cn+nDk6P1Q7~OROo)!3t;F_$G;#!ystBwJ^ z8taKZCYq?V5xY`F&^UUIZB-`GV_2_PJE@xHbU2y(a%(C=GAv&D7@^3H2qvCeJJcyi z)Z=T@7XXEx*z_KrJvV#Y=Wj*j6wha9d$qK?hIKoi1PjMW?tBgUi^0yvZ2*m)+JplX22I}@TksJdtKuxr|Din{2yMM4EZ zIMhtRHa;cbpV4m$@f4!6(h542Uyv5g2uFKYPSOjtv7C7mZhB@-GBJi&|HHnmN+4JEy$27W_P#qx^~?9*xI*o-+v-*k?bT6YY-W~7x1jZry_+5JQajs=FwQp8?FH!7cWZ5!Q-l{5+rd&hoU@=(ap7Ea+S*602oRLwbv# z&}eY?a~3dbcn%eKOnVM7-o#nNDvk%$=@su;zF{SD5Gu|p&C+Pa!5AZWuE}>VET0;e zzpjwav+RSLqu!wju z{Tdt3OS20t^&w{xnS!;Tv{D+gqgBe9Qn%XkwS`eP>hL#4+)>4aXj>l2=)wT$P?st_ z`$EPbpZ0q$t{~T4JK)+>PkJwuhneC{Z5U`F=Mf#2_3M!Yz`kt~yh`*nqSnQJg{T_n z^-k}3hL^tL2D^4m{VQzXIb>PPTYV@c1c&|njhYnx6oTVZT@o9*lz7WAO$12_ZO?oN z#j6bVkzQx-ogBiA0wRI(#w`O)6u;hYeTan2Qf$?s+64fJX*Y@$em*VyonuIKPZXPv~K`ZwI z%RM!RPbuK{PEfmQ)iz#+IRS5uHi#vr6ofzXdiA>YB*QkieIK2Oks*u8HKqF##xW&2 zKeu4Fbcu90ggqbw@A0#ToW(AA&0=a^A?6KRrw{kQG%*Dri;>DYKB4rWY?hq z)l(@$!UmxaX@E_D~(8az*i{PH*&wdx0GN(nrS_vyi$bRXD};M< zN9a+IvZwm!stgPD0%4gR>_iDrVQXC5G;E#9TD(DaE*xGW95MCQf~|a@fkpPCFXlz2Z81Rn23A7QV=C5Cvnh))GA2 z-P2eEZD_CFYEaHNLH3f;Jn6n%0<(2<3*~314ho4%+nwI*jp@z;HOf`Of&9(@ZeGre zgv62vvrQ&wwJVEXR@dHKNws^J^Z5HS+ML#Yr_FjQhob9(9SIj``;yg4a)QV0Ia1=R zrd4o{a84L8{gOja<}^@MSV%rEU<7`#%QY4nd;=lI39Fb@GTnKRbwkAxtfQ#SfR zyhmfsAh#}QZ#mW!@;Tdc+M#=FGss2T2#@mmVz#Gw-E0N!9)ji|q>vE9AO-=`OGk(| z$vB&(&ouMV5wt&l+}GIR^8PG}g=&WF!Hg!V1?AGX9-=<1GPM3F2pH&|x-H0HK0@5g zcw0{>0GJ*>5RmTkDa#4oFeSrtq$>|F2RUWrfr4N>=^)^mO8FGT`woz3;qFGyF|Kkr zFN17Y%q&zDR@1SCCVYc|NpU1!Iuq0BnRxIUflTMSSb~%wo!iRn7eX-rx)sDNxdP93 z8H zQ~($G)iraO0rx~Gp(G-)|7;n1T6kmyz12_D#eJ1*$Y(?&#nUfRFmcG;>cNRTD{8ei z47Az^9}Kr_^9?ZTwt%c}N%*HEO!rs_hM_E1Nr1yaW+Y0;Vdh~CYp!7$5pA?V2jHOJF?Z#b_jm-b<{$QxfZiDRZRt@X%1 zdq|{~=QOyheiT-Nw|zL4)w zIY%gT`R=h;yFC<&8M8I#AQ>Lh4XM1Ol1E9+d^|XaHYm~ejL(S?SoC&Z2V|^(tik!w za|sn?^-2hKyrd^v+B~Lw48aHOQDL;IB|H)jbm9PW6YRXW6s5D@?i-MkM{5I<^<>n# zyN6RjyK9Sa6E6)wD#6fn6(-BZ{L~Tl!1W#-_)5sZHleXmg@-}6GNG7GP%rygb67s$ z%1M;xLm4p(I6sO z-X1%@B)~^gBapBT@+7?1;$fZ4ygBPWLO-DUl!boAOI{Qf7`M@_lJ=a4c_b)(ixv*S z+?IY?UR4LFY~;O{IRrvV+)yLI=p2ZY&z1R#n*iQ|y=b?5SfuIoJe~;0;N8P~MH)}( z1|#aW=Ly?0(G$r))D42*?1(3rdqsJ1!!$Z}a@BU0@?ZqQ8bY_4i_-|@;Vc*@b_1PV z!4+08*%cHLdgqId=awZLsziyLD^EKtYxcB6*2WLF51UNGc6{5J#snb4ZmB-k;kf` zJiP5E5rL905iE7m1c}rHTs`ZaPd86Gi{`OiT+g0bg`0YnLr1#3;(OZP((up7=|i)8 zP;9B}(gDWdAcY1Xb`aPDZ3)#OC=ZS_B~6M~<0)g6zBXmq9Y#GtM&B5pMQ($ntb~{N zoYh5+WuJqPmzfSEt|xKHv-a}Qgn=k}I9wZSh6>&JRxNk{Qc$KTg1YS59!_?m;bWYP z=w3ZDk3)*3Z0S>}2V=m%(nIJ{6|C(9K$GWPO_r9iBCkL(R2kmW)PuxVi6T3s6H=pO zWdzb-G(e>=1vj=YHbJ~;p^|_m?_f4?{Yjt2&%1^tnm2MC7Rl=8Odk zaY zYzI3gcvaLvIL&y~<1N=uTs=4TTuM92xhg3oornN;)WvmMw#lx@#Zm^724Wx(<%p)6 z<5uxxu@dfdy`)=7I_YsUx9VH_Oq&Mtj9&d%0fGB>-bn>1i`At`q}kO>U%jfE0e zdlHvb%Hyq7Wjwz8oEsuaq?+Llg6`Byb9Qc?*(i%a$BCf$vtwO)l~4vEV#v_jVp};l zP>jju8|d+NNtYAe&)a7|nq9@7uogJs2Yqk)oh@Tr$z@xT*2CFV9rJK_HF$ftM}er1 zI~tc7o=b;Cu};vpcZ7a=NBR7@PnsChEg3mVswZ}|WHQ#n*=c{lYbqpz1IWYNTN6u| zQWY{qiW;Wr4R(DG@#M5x&lXB7JI%Yv0#Yx^!(BW(+M-b9j^3)OnJ_NLiBYYL;}(-X zp@)u$a!lqxVs#a{+nR@zN510L*`S`76v8EuqYn3)o)j%_vz|me7`DaXDfUI)DThLbd2a+EPy3n6I&n5H)X(A7 z2vFcg68qxiqYI+Ft}^1gcz*Uebs+gTB3s_17gvKF$Ldm8G-|tvtM<`}zfL=v&*4?z zGmvTlx*@!$$Led3@UR-XPn0Ga{joo-_6Z;CsI!UD>v~?-T<2V@^IpHp;Kz_>(t`mt z%bIpU=V~XO4jce;4(KSr{>EjTkm-;ZK(i^gQhb3D*lsniMrAv#-wU%Dp0;<-OV<+X zM!Mhh(c3pd8&vb2h(F&KRnc&F&W9_h0yMf!6jslX85hzajCH;wx6}s&YLJeTlka+en-;~eo4>p)x27^ zc2Ax}aN0@htU-Cr5MwoJFx?Z9DJ&B2DT8=YoCPqBZfC}+$si$autcqsYNSQG$fl3S z40}_X;>^G{^u~rs*kjsAtjz_bo-y3Hxq12!qc&D!(9_axtQVFL^q{>U>z<)3bw1yV zd-A3UxIe+;bDVRF*h4hjCMO7=AP#ClY(_8OJ}&1 zCw*cAA>u%dXhYmxBAetF(lFk1(USD2nD3WJ-jXxjE(Ht*TB8)B2cL?0BSpL?6;b`% zz!#aHC*l?z*)5@^@(GhTTkxRwY4SurgXmfb_@cUVr(VGS~frI5DCARr}~(Ilygd_1S0N$)N5nFF8q zl7$+fvEu=)T5`SfmxhNeM843J6NYSjdTONft~Y%!<6O|-au_b5YDj>kxgMsX$vm|P zuR%gNJ$BU-_GcG;>U+F&Rr0DM-YhXtIYrBEs&BuC*OfOmsJZVp?k>CzV5ORJz8iN&Clqr9J+j(kEn zubeU&wAP=nZdvbiEr^emztr4w(gO1G1Trj*Y*vsAW!a3AO~^EW<1KTRYI%xSK@|EJ zphI|X?FBBYSyOKAx) zqh!9Ju2Ba;`4kH4r=NI8r0uKBb$dbX$gB?xG2T*en9&hTSu=`(dAPNRHKmSZt@;ST z?(y^s)S$&BtR8pB**fK_N601MVV3|xqB{>uG~mon$VJ*GP(5F%>h;nj)e{{{TWho| zIl98#fU;N(H>3?^LkZ^_FnyT&>TOIL6YynYF5)Ggx5dNjcG@g@mNO-~r0nn6#wf%p zLMilutBPiB9Zqt4664s!Te^X~eOerNhcAsbxA@-6d2=qVI(v`jnPWn+m{&(=yM;mF z?;CAuojlLNxKkKSH;PN8n~stoHh*j!d3SC`%sXta`V11#kTmQy4JH%O_ylOJ3na;W zO67e<*e*)qcTbkKM_k!M@(B)mBx__=MBg5abli2~>MEZpmo&bssAtZ|?duJDTbvyN z&?h&+7Fg!7YMYI81MExktlJl^V@d*#IdNZrwZ06ZnIzs0Cu4+SPzV=!`O^GAfRv`B zZW+IFBYs@|sKqv~Z_*#g?o>VBW7-G}9b^d$Vrfng*0*ffG^U3EbU4oV zbS_>y8+{D%cf)Vlr+aK($4?(do<2@D`hcl*-u&2T^ zqXa#(&ls+>ePC$1sGdyFysIfuYe-(#8ca_M$s@fwife6^c-S)q0e99z4B0~JSx-T) zyE1lmAgN>k-!3%2gCcj#2G*swC|K*UTiO$8X#siNcf@b33$Y@$PP_+NJdU6y`&}F? z`I#&)yp`hF>Q%6Oq2td-?b7N6GPPm3`HhI!f>WYzCtgj)n^!+$t#?WwH6l)_TAI~b zo1H&gFE(VL=WLc3J#@)EwA;^%+4>f&Dd0#0a$c-OFK1k=@TXk#d-r-Or_~baVjzIJ zRJnF577nwFWGh)@a$-Ep?~KnC7(usLgaK8-_h?2}qv+K_86wwdnnI;+^kP51Sab!X zzR<*Z{$fn#q{=YWgi@0el9+bI0mCO#CZQMTfNqhhaDBX!Xh*y0XK zjtHE*S(Q60YltfxtLa636xPRr4d(SO{p?u!6l?3;@zv4u1e}NK+F~f14_f(*P1quB z3&;G(l<4ZIXPLjutb8l{Qs)Ne^b$o7`QaML4z1gT8(Vi-Y3f|=Lw#n?sNiWGIwHCa z3f5(u71c%d6mfE`fb|@{zBxe~>XNJ2m-SOJKpo%&BbSkZU=)GtR=3-B=!|mSi2=S- zhvBaMI;BQlR&0l+W?4B(i3|IIyQ=}ieM-hYqwdXE#$!&dmzOn`oU;7HR&=(`eEpRr z6m-IR>j>9C)8sN189*zNGrxNrjEJc_~kk9OPPr>)O(iT=nYY&l}D5* zzztfjlN3+HOffu$@@m{mQ_XXbJWpMX5NzP^+PJ0D9y|#OE2f|##|^IDGR2xTB5Y0( z17F$p_l$a`GyQEJu@QGe1hPmg`OiVxB-8}(&K&oeJ4V9LqP;tJ9~)29X{M&sjvzV% z@3faa^L%#Hn+K^SWkhdCl6%H9iE6ui z!9hy%;PG{IWe@|Gm27BkBIQi#bBVcEt{b~h;>!VntA+0Mz_a5mQoQs)q$j(qxC*O) zdnZ0PY|3$L!nN(NRc~*W#L*rrm@4tL%0syo94sIQRvn}kdE;u2kDE)yXdeJ8rh<_o z<9k^7g46kGBX2U`EDv)Hjo9axY$%-FDm{72eV4T!fv!9Zt`-+NN(SzIICs`uBfMw3 zJp6KPp$#7+rq%e62vBD|eD%G53Hp@HW6^yt?$x=zcV>m}-l%nHO*Ee5vX~N%l;g7% zrBZ2Cap^qT7g*1QA_0@%5a~SDkU(KaVTTd*ScYO@v7T&Q@ZqC^bzcHdfKjT6lg)eX zbFTTG!6uObC_3$X4gnCZ-ml4_ACHZ`6jHx546qjp;8vWP)z0Ql=T3irmyZ!*=3z;b z4QU5FYCl4a7_IGi}=H`Y1P)*6!Qub6^es-gBgq~dVr-b>(l^$@y=(;~H$_8mV> zS)XyiCKrp4nK~&NL(46H#t4h2`81n{H0`3^3zs|<9V+X1pz9gy_iVb}o|bcG_C@ZM zB6?ll*}hkM8f;rL?SZb&QD#3Qady<)#2MNEN_c=*J~1U5&a-wTK0|~VyQv2oxT~E_ zfazuA$^KTAJ;0yM&6~AL({JhcXTC|%I%DW}&LM7s9*xlNiUbwJ#L{oO883`&xk-Qv zNWoW)ab3oeor7C#R#3}@;W@p!LY8H&BPvef3gkM?9xBUn!SHf)E2^F+Z)BO#JG+=} zloXpW*G^mBo~9+`>p0#n$6gkZc?UU9b>=mKX8=cx0X&Sb<|5%I87h3-k!pxpSb?J% zLxlPsO;U$(*Qm>`>EpUFW3r^P+~^`_B}i~XovWv`(l{!QsWV121J)7W;~v~DKbS>} zj;`mG@AS4=p;eEBPFLSfhyEPE5BL&zUtcuw`!n2{D%FXs%G)ay0DOE0btPk?om7Wo zJ@7cK()Q~4p=`DK;Kh^hwNLTjXE-iJeizLfIO70d4+bD+9F!h7;s|D;Jk79IUFgH_ z>7&Y&9tq$)eweSlCCbL?%Tv zfc=`;<1l74YA(dLmV6oW-7S?Idb&AdUm1j)c)E355>6Rak_KB8O%wYoo($o(ATh<; zVR}lCz4=&Hs2;sEBR)-HF}8<-+MIA zE$>B-G)G{si*HgYOqQlf@_YC~C78oFo1PeWEK8t5SLaTA?QivT1_If*}lWqeIU)c{ca zRvz-|j8jMOIWwHJ4%WVEivv$g$frMZPeZl5R4afS1HP6J69jwx_C4F11?bH4_UoJz zQ*UM{Z95F~gZEBaTEr0s%!OZCea~6SpMm+kh`qb)SG_4@&-MoQ9YGoIa}%!(ly*%R`D~Z>E?%qlmU@-x9Q;&VeLnuZkl#p&jvX+1& zruK#((Q^TNtijl`IWKfe_oV}iXScaN8+iOvLQ@!>B4)%r+-Pg#_@I*kQopsQP9IfH1X$-t@uw-|qwB4(AxsD8h5Zbu(SDv>u+-pwrCM6tzoUCvw z+?_V@ApI;edI33gqxvabskYBc^hv@8xLCm~x`lLXB&AcR{*D+R!z&UoR)*}(;N@K@ z`XU+lnys!JKjcWshsbz?qs-c5l>Udd!@ueWYTLiE#D&YPZ8-m-j}`7{?JF|wy9c%l7VNQ zIPf#p<$^iGfbd#N0-enh8m)9<)XrmuJr`mVbnjUzj_!Dw?}eDBEu}A?WQ_VcYWbkL z$5q4DL2wMbQwHYT$fcSr<8p7((HfH7r?88A;#ZKDLEXY}-loF?VQZ!#qPVxXeUfl-u$6D5w~X98i>UcvOHxUgI9v4$6z`?u zWjnuTxX=5(qh#FC0V~@{|3UiNzZL6M4cP#Q;J@!YwivjuernqyXfsl zZN+Nqp<&x%4k=`TxH)LQLAFvxErG0#?uEJI?3{d7Rj##Wwu)A@Vt)4o!{3`?g@p`l zH9!hj)2fgmJvfKgp6|%eHv6s3#!=sKRf{CrnrXuTxhOED_8{btv;S;r2wROJx{OJ- zc?v1Fyy%E5Et`n8d`Fl^1FtqKk{;jPDbwW0gqmPa)}Nus(|L5K-ir zhp!tE=cJb*Bc#fLmJ|w>Ev0bc(QN=4=F6s2ofqU|Ic#=sK;(7ZYat%O)0Jb3(R&5t zJzY^~;={$E%Jk@tyL4wYSyK;WimM+kj4KxVE1hleDhM6OM|2~P=FnuZ7RB&NC|C7p zT$4_nUV2|VT1Mb&PH(($1uD~u6#8xrfcYv zeg2q-IVFbN&bxz%-&&F(JS-ty_Sh@f2c?N#oOvp~PCMA<%6LOuuX&5C`EuLU<>!bm zrxK?TQ90hzX8kAt(??(9rIVfc^Kd~{d{8IzimQqz^bL)p=*10C>9A9^KZNw}lc^eg zM{KjOC66ehWYJ2Ix;Mw!o!V0f54WCK4LZM!i|lHCkQ8r>;^@NWI2EB5?of6rT1TcH z>U$M#j}`F2wZ0R|1U?|w_S6Sz0BY%P0WC<=;IU6(6)Vo(t}z-AK9X}MoITRPy)nCX z;c|3&G9Z%ELH;as#koyaZ|!wtlelF*&Gd7JcXV6gtlh6TD=W(%MT$8u+DJqk6Fu6J z*kkCioUBG2qp2wfQ(}15;o))agKzVI(g>**b(;n(95XW_Ud)`w_5?60@9}+rsM2;5nJ>S0_meQ05qN4n9Zk z+H^^~td?<_<-p(T*+EgON(vY6yAHde00d?%z|rI@Mu~2$Fdh<_41c`*67qp!XHCTp z+tI_itl}mZCVPAjNJ}DLce6_p1D0i!a!MOe7+Orop3F zZbpb-L0?(vcy5g-cjBcAw=@vWjh+O->Y>-=O>Ngf763Ui^Nv78Mufa*Y(H=1F(vKi zOtfKvBX>p=4_?%4;2Pi0Hur4o-Ap7#3VSfJBIx5wm$-u(D{(G}xS5I$hv^1@ye;LJ zi2%b*#xR{^DXWebHSgP-{4;jvqQvnfvtj^Hu2%PYSIyXvuk6Gq3O@g0cBJ% z9v1_4-MoC<`(Pk#W`wZ;afOrLG(Ar`Sp6laD2KrC>0Wwhvdj?9jKleW(g|~eN!Q3s zGMC?AMwReTiMeus^o@c%uIfkiOy>=!of=GVtq_MDvzr*`;zt!^b=Q`a!*6WL3NuSu?@CX)%Z<=nnEi zZ#DZUs9#g!nbrf&u(9Td$#bcvIsO)vp0?Vkvzl1VGHX-7$UMN+Mt03&vamd@BEgNK z@mrJE&z@5mEfmMwEYmlKtSUx!Bf@HQrG?dQC6S+Yk06dSdEu z$QKsjEF$&HI|t`U#T1Xt?n?uStc^e?!xzpK9rNDUi9VLldB;B`#U0L10I!W)z!POF z2xQefoR4b?D&MnRyNyeXb7K+ z?#N}Zz2=6fS=3OVZo*KDQ%MxnYh%=V-CW9;=vVK(xYWEvO`f9aM@V}o9Ie)e{?x%q zg+)Bt5OIv_uJjy7rO?iSWL?JWTw`dw)64Ds)MZCmY6}@{i_(ZVxt+f&sWs1T} z1OO~@^YpMJPjConCRv0e9lF9asT+Mw0t`_llJ6aHRW^(WYGSL~a6R0ktGehGoo5`s zMAeYv&1;(yg7D^5LxN9NyTGFzff=7Dz^rE-2dWpC!*#c_55W$%@T3$wZ5{-UI@7nP z@H3QKTL_P1`w;jIc9=TO^LsG^z)y5m;hB^_FxY$kteLs&iAmwL4<)*WzUPu$uKaE@ zSwb=slkQ1K+Gr^Xi}LI;R%O4(?a915Ix-=e;gIUA+9YHp0=pqV0&}H-n_VL#nv1W( zK-Fu}UVF^ZZ9dKwI%IsL9PR#Yl#vtF zLR-s7Z@sjd75B9x>|e6V^vMLu>T1bX48V0KT}|yz;??6EJ9p>1g;(>nk0m(L8DO`um7sZ(h#nXs62O*{O*KVyrUlKV z_^|{FMO<>SU~CjHs?J@a1OneB%et7IHKc26J-tPq@}-`M>8o_9ndx3gOJ5W@$1|ky zjaz++!ox$fJYMSAdV{>QJaFs8GU_rG72}x4CtwC^BcL%+dwZlcbdCU250NP{y z`d+ef=T2dEiTLt}(jfyCk8_o~?vRx!Fk3h>P8LW#LSx6xOshoMc7iHt)IsYtkV*AGW^xgr9X}0|DN((sH^_Ex7eU`^N}5!i-A>_0ORO)X zxs@D*yl$8_An)}=Re&EqKq{en)uQvf+fz%TvH)MWZ`L3!CL}g38m#@2)-_(+VVrgh zWkL)oI zwBpf(_N_x7ZZMHbVj!zDmttn}=yU_eX(iIFZYl=EUa)WliN(pk7?Ha@Njfes4m zUQw~psrd*Dn9D64Y3fAY8;*VM1PJF;+6y4(>vwv~%??^kW!zNGd1dhUiBd4Ui*sdh zL)y4p?&P1o<>dog0D{7)t+#38ZIjOwcNWZ0{3$HR zOFP5Ci1#V3_Zi-0F(%C2t}`3hZHZdXftz3QWDhAkG1ipXoi8bo47aakWUN-Q zVO%B%A%q3yS|!5fJ#NBwh_{e@dqbgEUGZ`Um;!Sdp?lI~JbPcYjmO&X%Ag9=q>QSU zmq1*rCB&(I=KznyDez85nEleUJ zyn)O&m;vA(C+w{MtcJ!KCFcI)Nxc`FFb#IT|P3)D{_ zxNs;e5VHC?uIO&UQRs!U#astCcvVsO=o**Q3RPi?6CD# zk)1js!VTUi8DL%pzKbLS=_ycgmlGIpZQc?SHh^X~M6bd}$E%aL;XYT4*(PJG9*p~B zkOPzK7125O78qSrXEhX|s!QFN#|aRJANQf>rZQR20AyD$%LjyK-61u%5uMzV^JfonR#I{W$gizUe*4(jqm6^ zjyEvRLtdDh%Dx0B2|k0PPHz{*sBLx?MJR}?nIo;v>4~S?XFAlk*8>#)v%#^y%Z#_ zI%enKyr(a#x#bpBavYCaY7LwO8cPI0V5OWR`j^#KAfX@?{$pH z1*=9lDUV`D2+3B&R`jclCecWRg7tr0lM?O#nDlCQj5PVCNtm?@a zMi)GFj@)I$PRyEi##j&-wJ_>pNi9^sBAL49KYh%dUgY^pJKPt8@3}_uyj#!I#e8mW z_o-D!Y&7!{g5=Ra*h3pFb72b-4w|BtE?lUfPm$SY$PagACyX10kVkkOM{giy)0p||`Rre7tE-a(yIZE&%;9LpP% z$6fL$F;kw}8#V#6qhW>gd-Ldc3K^TrvW!7VtrEdh!u4f9dk4f*NMj`&Jee5238^iN zBA&u7Z5X)9z+v@3p(w9$QtnKwEYW;bB7V2-HCA@?lqeR=s}6Yw1*rx+@#pUzY>1Q zte+y$0F>fd%mGQh5t8y$ptfv)$M7gu>MW>l*d}cf zapEmR&X_@|r!F}55OqQWyt=g>a;|%v;yxL80#RhC;;)~*d`ZWeRqWNfCX#mbCkLdP@(4^TFrk`y23Z%0S|w^pV!~Ja;oNIF1!kBx z-m`0Un2_|I(A(`+zoXofIslE1bxXw76^vQT2TF<1&d$c}XM5rjp6pNW#k>f0Ds}Fc zpj3}g;$2=fvUA7dO6QtALD}m=n4e9v5_Y*!ho?>Wbf0Q7XN0FQIWf;ixhus z%IdBwLRg~KkxKJGc18SdD>tzEQ)uxS{GXK*Y9?Hjj9RE3ty7~lSSF1!*@j+^^YW}Q zvw9rwT133Wx+mb&Z;WKe=OKY~N%fpGKRmO`E}VRtQc={^FE;1QwR+s~*f2vb(dLX# zK?Nf>;m%?t5g%EF@7f`!G=Q=#xjf{jfgs@=NO+r(wbX;vRdr--uk%>V7o@JRvqON{As`U_~Inx>l7kdH^ZU z5*Oey%5r@CoxkFjdx$P5Yj5@tE6}|L7cXogk(vRSw#h-6qb4VNl5V`re@TtJ*pTFi zxzb0-R>)tXuI_E*`WxPA-F@wzyO4z{jflx~M?Kw-kh7W83gqcDjJ@{H6^6s-2;hB( zr*L!qd9U%n%GXj=6KYj8bO-eW4CT1;f=M~G4!uYjJ5CZ+SV z%cwckzIyKwhp2ULze0cW3l}L@s!uFuJ{vNhb8A7u=w3aNXXLo4 z;wTT6ym4$ngXCG@!LZAw#<$qua~Rm{0Sm*vi;Ca~;Pb}uu9A6ESn@nw2Hpaj;7f5< zcx!IQ6hU=_h!vJC>ly6CEJDV}ghG@h>*6emq);}+I8;?<}R_ zQB?71RUb_UiORB?Hnyy8@eC!LYbNo|@PG#%hP`JRWbumUk~9=CC+v*MLKLXpO=0-) z4R1ZHhXVRYbot5pcnh|OOnVXryo`dw9N4=oGuQmnIv0it*v2t;vgo7VyMN8#h58s zweCG>X4YPvYr3;pcYi&^(}8q*aAu+nG0f8Fah$_RtYKM`E|`w)n9$yx*Wr*wZ9t;0 zsTSq-^{Y69xgvT#FwjLy?soGeJaJz3q=&_ONp~#{cR_~HA$^qjXj=TKy)Mb97E`Gh zojEZN3mla5nIOqTs=K`ERdBL;0aZ4~GpR?3kK5})vpRssc_h*Tmqy9)rAIk9BsnOE zdLO-X$W=s(TYn38IRNaA=3SbKHf(z%uNyE7^5w(9^x>J_r#n^ovaADgM1{kLP46+H zN3SC?3#gnmpl3P!{qx;LdUnKrksoou`BH zD%jaDFpwD0fGsNZUQRpeZCAlXzo%s`bwXgF{&2`En!W(*+zrp1;ia(%|qGq ztzfI37Z>={>qXdWuxZE08Z{1?f(3DVcg{1D8i~zEsb5!aK6Ec z6aM^#=!^{JgH!aESId(fXA0zlo!fnN8W_>qOZ6IplJ5UiTT3|Ja`F^Bbx?C^DL5-U|M?2H?ugo0MRhY)_gE#&S2sZ zTX>$Cpl!7X2VD=~wE~RKQ>|D_OP4z4i*9)s8#Qik=0+hLQD7#|$8Ib0vAqK~N>TD* zbO2z)r4tolm7F(!ZVxLBZw@VqXl-`eq9qFvbzF=tum^OpyRCPGjrH2qZeDC}t=2g> zt>@1?=jgt914|ce_ud*5`xM^tA{9Oj zN}x_1pzY051>H1E?aY#js9GpSMCt%UF>)bluyQE;C5ycDemOlnS+v7 z?W4+s>?K1|S-^u<{KPZHoDdv9PpWLzq@S!Gx0xeRjcthZgbUs< zZ?du|$CU%MaZ?$Nc#2ky8W#F3za3hhPIf07<(tBJS4=Js1mG1FFDW>t-+}k&P=TMX zm2E&$7VcO+EF3^`A!<6WJJGaTwa|D(z0Q#_Pr(oPXOkW0<|9(+fZKc3S_6ZYgnqCX z`aHU5{E1ZUI%I{%-R9F-6XvHoHPY_y`Rx?U%bYi#!V~?>b#GB;r;*44s4|b4h6g-| z6|iRXofuyxRv7^Xdf%cJE&>WpuA59k!zi~z^9%^|YfZNYhz016Ft48yX8}7pW6>m~ zS3c^{eP`hMvfo<-y?BfWf@`nM8Y!?&rb0MYL)7Xt5&JZlsw#*1Jf7Bx<7-eI7gXlK z6l6gFj^Y==Q8$mu#hcK{+V4OP2codRRHn@nSxP$JTqQVQbrFd~ZgQ@5BDLs9kt+pMd+28KMk^Al1O zXIG6lVD<}_S5%@g?->pAJ*X;orx2wl&8pKZPGhK{8J2G`L~I|fyxDzDiH|4%xrkAPe>V*XNF?RFP7hw!elb3zXLGZq>M+py>%(Xaiw+?joy_> zdAjrJ9{tJ1iL=oHP~UCK6U*pIYg6l#;Gl`uS@xcvz!txa3{D3idxpId#(EIZ?mC;8 z^}6zn$H}$j(h+tK*(u_7J|!(0@M$7;+Mve0qSL%-ziA#qBZYSYEd8_`!{(>&MKNo= zrV@rzX}~-_$ftc+N;)!iShNCX0TsuuJ6VlUZm!NI6r(5KB++tQFbV@Ze$TV^Ib1Bo z2ndQ#%>>)T@(?~z@m)QMprmUHxOWOZ(Npgw1ddd^a$m>Gem5um;ypU#3d9z1*QcQr zB8VQFcS_IwFrP))6k?(%EQZf?cb@=CK=32Ev%@YQ9HgfMVQ%`sZM3pU3`h$C_Hwvz+kV9m`KN6sutuy5!5*7fiEbEYQQ*V zJEEC)@?=xfiwXrGK)c@H6|m(B;p~eDedW)7$*;5DBO*J5e!()2ngSm@Ml}y;-o<+& zI_yJWYU>Xux8E2b;t}v8(+xOs1e#TTOPcrHcKvhA82^(<=!xm7Y$u?|amYbm_BbDF zMj+yjnED(Iq7EV9GO<|YJ9#@BWQQI{VZ#}AId3E3q#lls$Bf+)pr}POm6g^-hgc77Y1b69bG}r+I@0y#Xf9F?ZXi z9kAYS5FmSEcL?{F06~_ooOgW`!tIR&3ppFmQLLq+aW~f0=YDB!W z+FY}c1kXhE6@2@je-0d+Naq4x51Og%y`WNMVm6t+eS!Yk{EX4!nDLdxDzj;MwmP4r z;GyuVF^U2zcmqHu(lx$*9VOeisa2~918a{_&{r#;4AKLZi{a^o^eU^v$B=O~ zj$n)6uV!&{Ok9EtJyh~@BTwX-vwZc6>Dn_59!ho0L5TJz0k6sx^E?mqY3XuAv6tcl z!Qpr6k3>1)*tP+>cwFUL#-8>Iw^>RRFhl@%PReLYs#{!|QQ(P9dehc&16n^NsQZ-L zuB7j=7QXj_FmFwVF5g-b?_9^c&^(;BHBUg}v9P^!II3dm8-9}~%G__YdcGwXpQ8u5 z;!f|=OUXf7yu;aC@HgAyaA573*f$%B>~&ANEm zn+TlD2F>!GOPW8u@^_qVO#!c@=<0|JfO-W9nX7EE-wJ?O-Qd1A#oX3Yp^*_w{tEq_ zK;kPN zP&6*-t#tA-)4-!glwOYF!`rIq07zkajCXSka?sK8Q-lFeYTaW*YJV6Sx()zr=sASYP8Q8 z8Yv!+pB^*NY!2FPd8Sn*< zD$f|6vukvcRSJ072Cm~Gd846DbM`HRzaj!$czq6_iniN_r4OFHlzGxO8~29yf<^~K zDj+-vn!-QV(rpnjXFAieVx&O;EcjaPLWi zCHtYNC&yCJ9mve=oCzMzo+1tH8%LN-PUsGJf_jmU^HhOR*agi~C|2-1bMc{rw^z$p zDReezRrt$B4;Xl?d*<*+Tb^yav;sfC?7Du*PO`c1P6#pp_2|m$1XXsBjP=@ZS@T62 zD?a1cOx|{Wpix*44q2e7ILlocsP#d`nY!-CMF`nG?CY`6+t(!8fyU_CM2Z@BBo^|v zCLJ4AE`HuW3KFX_kS0^dVW(9vN&rhKo+!r~t4VtEG_T_tCpe)blrGVz3Xwc@h1sJap!&fWRe(Bk{h%_t>pK1_Smnc41Ve8W~qY zi)U>hgD?7f(*wn7UYU=MS&lA2NG*XL6t|(L#G~Tk<+=<{ktRues_SE^z(&+XRR^4x z@_I}oBaRV_Wmghpa;b@i!)ij5<;44Ws$IZh8E0M(pA(`TpsI@fnIr;8p-<}Z!_f`6 z#-3uKVz}HV!_O(+{xQWQG>gw39F;i$ zCk-4EE*i^bWs(M2&Netl8s+&)RPRAmV2Rlix#8wNQNByiinLyOZH$&^G{5(Q`;^VY zE1_#*td#Y(W*R6TO8|D&p4XWUud7bjh_Jjeyhd8Jgjfq6d%>t<_-?`bTZ*3}ilWhB z7^W`nTSbGs$_W{{?T9*!qE~9K8g0fL5EW$wXjV_OTnp21_#Qt8H$u<0vVmZr2(Bfg zeJO#Vo_pwtq%^9c2!mj-8`$lWtc&LlQzI?asl{QPH69M(1VnOP<3WY;Vy9$b@FKm$ z6rRHtw`Z{8j#u&~7YoK89JF5y7QEIcl?V^p#;1-57*dHdQIx%SB9i(6+?gJe5Lo27 z4;9<7?5Z~AA1+BgJnWwR1S)Lg;4JpZLla@q;=&XSEg(LQEX$H3pL+xptkq#0T*1|W z(uES6SYrwK@O8bi#B9z}KY_70=xAenYXWBjbeZCHKM!N6<^Mqux z=jdl~EAUL0NXDa-_Hiwf$c7F^S2MrdgNH$XFq^c^rx;m3eNol7Ct|LaQ2HS{hi6B} z=3(MvzO2_h!=VoPOsJG(Gu|`CGnwZ}Ht=lJ!W1=Lg;ikCVNG*rF?i|UY)YPg2?GJ+~zz3$-})q%|_nX0USH%bd&Lj9Ht zAaKmM`d;I!$7Ykfcjxp9yXX?ImU* znH<4%oHiFy3YTt3No07P$AQ>uC5VN;06=@1dw7C9fMo$1<0Jc&b2zbGr9 zxs`WQv;knRw2u+8ReuWd7Mt+#QHVLlTy)HU48W^}6|vp%mZY-YCpO}u$W+*mcYB*P z$KYi!q|0+je$B3M-?>pf2N>FEa=NR43Pbc6;LHRrd~&QDAj@4tBd|RhRBUwsfG(|q z!y8808Zd0e+qlmKVILKmJt`<*v$a_*KamAU&o}k#LGS7mID=5kU?`dZPU8kkWODW4 zvxPJ*&NAp1{u+6iW7R>cSXQ0$T*x3;4P4@QJmK2r1y@&U6;jEmtl=6+r;K(yAJrWm zV<^%o4k>=mjIBv82Rt$%2bMv*$m<~c5cE9q*q$708;$S__3o6%J=7`C@d}@)Jtld} z;*E^fU5=9(;!&w^4cTChy%#4<+g8x%lATj4`CM+yzHbaf;cA(#J54s0J`mI zBUvi${yg!$!BbLoU-gKR>z-$qSx+{!6&E#JqqFuNkaN*PpDS|rD~IklY9W1LEzm;m znZLu3@BFBrqf9`Nr)J`Yvw_L+a0s0IItkRb)6d&&L zgXQb>N6DT7d9?uym|W8>fk$f-v%>pAghUi7z}tf!M+78wl3KI~$K9N=npsXX_Ut)C zPsaN6)Dz6A*l4u9d~A_5^a`D7!m;z(g7iV6b17;(1{`=T#hW=AFSX`%gYLkYL&OY)kuXc#N@%|@$tqqJsgv zgiQr7nh!P(q42$4$`(5rqp*vPc(LzJy$psq&P@P&T?EREj;VFhbEuaPAU$Nfz%Ow( z$C^#+ffad`g zKlOkFEOv;kAntxLcSqMrPI^zANuFwlb7AYNP4?>C&LHf78Gy&M-RJQsOZyyej8LB_ zbH10<=7jcDxVh+nKq0K=8xgZ|%J97M=v|k19n%nI{3)v%um!Cin)5K6bEa=Lb5}5Q&5@dXW`kJ$}x`qV%Js+A^;Fc|x z5>4bXqVG^A-;S(;?SzE{;nTZK zK`y6r8!==9CCY^m84vV7e%=5?D>_a;Js%8OUUV0KmF1VlG~%tRy4cO zvlo@U`j8po-Ncz3h?+I`zz!?El;E!3f@t%0T^6_jyL*o!HBseLnEpBFXt~007)j!* z7hYxf#9qB&>om6q&(u#}>+?brEEdiVs%&ab^38y_Ie5g(`T!GXmAC0J2?LB1m%6?x z1H2e(OFZ9>fm{}UnL4Aii4oT0jP)vOBHyGyFwNVWv_H-7Lk1666wO^lWJ8_25YtCQ z8HjFEw0D~?Mg}AlfkrWENK;X(Zqk`WK&}cVpkAZHU=p)bps+8h`o)DlvVeLka?x3E zOEp8mG61-%4mbV9qZe==N2xC%)59X$E~tGXb3;kKH3l(3#$w z?Go@BKoTA)0nd5?!$>c$?vauft%1keC{ZeMA4qxvec6XEVJ24^o}sQJfKxMv<0-HW z@yNoHUECvbzVcEw&%oC*@g6E!8c~ze-4j}pv`qycjoy76oM0q_7Rm$sNc|m&ywLGP zZwf_x!YIffFBtI}?0}K3%vZsT`JLNlx^eA#W{8GI=oTTHY*D>E;l}p>32e5WsB`$r zOr-UNt#_#M5;MPg8c65-USsU_c~hkxJSI6zvIV75O7Q_xz1FEW)S4LN3a&s*;b-gMHa2Kl@zM$?lkIb3XTqWWL136<1#9Sw}yRDJOQ*#eun;>0$=v$tjV~}$mKeFg9c?`F; z4VI1}Fbwxf^`1W`&D*sndH{wgxLDO9Vo=g;+j_wejaDbMH)E}px}n}j=t;$|M*4y* z%bzZLKByW;lh{~hb_CQkk<*AdmQe)&E7_c*pxeDN76RKkgOVCF=XX|@u&7;xk@NC| zUKVb$F~3aJ$FedUb`1$C{SddRm#OJN1w7MXuwDn+c53Vw)mYol8`NZ*Qd^IAc%|&& zAU`$;F|ZbuMHo+l@+NrdcOm$$kFg%T=y96f6>mt&6ZWFObX(?tF*XcLu;(vj`1s1( z52Y7GS60q%Yi03DV$>dlLndgaCp5u{A=T?y8DQF=ERTa+{Mp=ZKo%S#-nEY99*wu| zd*dk^aFWERrVEz~L*>1w)ENV6NaD~D5te2U35pIgkFMg|Kk;)QO55>gFS=W_3g;@t z5M#(<9#VORtGkXP9H`~-i*?O6jt7~3{fVP`R7ZAl`YTj60?}P%TMc~`3wMK3ilH@QQ)JMh^^63c3qRi%j$Cgm zc919mi#MO-$YKV?oZ?8s@tQ5(G>ew9JEy`%SH(3iK+`cz&C^f`cRmehHKS#gQ?zR4 zL&ee4@JB+sNUKx|0NFx}3mw%ewM0BVH}P5kULs&#+-Sq^(a0}JXC&p-#YZ<)+tM1 znMzJzi=^GSWr#ssA)O_+ca`*IvgWf#5k&pUi}MCx5*1jSVW&LI8T+jjz)=%lL!sO> z!?;`-G($heR`$gtDZ$RevGoFhvh%1bhLborg4@31QI4#BBvS0^*H2~~_t_Rw)`O$W zHkLUVoW`68>%D&CtmX20H?n~O09cP*?H0yg-W^-bGJ`h}Xe;%z$7}-e5Zuk$xZH~5 z=`F}6venKtXp)7JJ%~sckvHjQV?>qMyz@2uz`w+{kf#;HAMV;jxMjUo}D28$cph7J9gM(ji0z7FQ`@YOj3^_4Ykn zQALDOLKMc{?(-YaYF|Md0CQO}bKi-?mNpi82j~n#i}<{Q26j3Ok(bL#dKg|PS=N*n z$IKh`fQ8+_2)>{KW_js;LL}JoboD)c6U}2!-aF!};FYAI!z&bdQg4yfI*eHgC@|h@ zFJs8zdFDaJm+xJLk9o2-H=Dk!-E3>oXxzuiLMvqk%>wV-AYgXEB(Yz+G&D!wBdqCG zl#9L7u6L_X9pL5m$}uc%w@wk|L~vA5L3f7=;auIz1@Mx3S`DE4OQuLZt%#;n8-*;z z#_|01WlpC($59u2$8)16V_t=>oU4uu&eGEcifKEcub2*}Om@H9{reo3I5{1opYP35 zPT;#bfJM*J>Ua1AIg4zd(LB+;6d&W-i4u3I7BN3HlVR5q*l_9>oKtlzuOhjJo!N<5 z6BCM1AW>NSn00`2(N9&|#CSyd-JoMrtajh5@+Qkc&WXG9H6)L}q@sN@^j_$cd8{g< zLGRhK@Jy`gd17x9s`A0}7}HmD&Y9Dyh0p6H^n}pUMzRq%?01L{T?0~P4v?NEv_n2_ z1Ng(kSvjU}@*znLBB$SrUE&=Ez04NV<|}Ai5>>--v}+Q6DRfegQ);ye_GQraZspeh%BBDO^S}P5 zIR5c(|C;d#g!bVhx9|-pUMXK^$a;UuG>qjlod%HO){qQ zTbXXwrCm4`$~IacrFTgyIlJ99qg5;pM6cc`vK#2Nu7iY?kb@vG6o3+TQxQePW)jFp zC7&~6_peGaA;?wofySFalQ-3-Rzg639!#WehR-sj%j>oO`QokjXn>GtfyX49hi*zI z#!ydW#cRW$YmT#?HxeEu$$;D%AC-Jh7Y~0Y%`SrJlNTcA5QvECUfaF*yfNuuK*5_+ zOiV2JceSTmdDDvnQ1gh*U#ZR6996Pwqc)R7L-0kCQeCL(BQ)`pJK-uB1V^y6! z+_eVuRTk+?m!fXHg*8%mR2auM0yP6eEyd4+7GU)_VwU9HM^W-=3W#CI-OjsvKf{y2 znA_;q3pHrU*F9*f-D?T2@s{Cwfd}_U08MCua^ZFHlbc5?2<+`#RPX%eu@BZOk#2Es ztw2Vm!&LDrBcB#M{mU=fk?J+G}DR*4j)2C#x|%?_e>@lQ}dFWUC9trQ$F&q;{3C? z+#Tt;8;{xECvy0!ypuTzp`f?H%nUVBV@j4v&1!V?@gwLVhn=-l^Z4aezYUmzCp| zW68yaX&v~*AHt`jq&+2LB1fWQ$L4yv?>C9^n(Q^t9O*c{lbh?}7pVXq0AgFT1Vr?t ztGuRRh#vH%^Ya73z{_e_gbQ**7eeef(LknMo!QqMs_ONs1ahzWzG}ozbKw>@>`{!? zNad0TTarpYZ3e*9h&7FcM6k#?efwTpkiGMMy(z+3kFO2;h_2|=RdRsZy$&Gg2pPoq zLA#_d`YGLuzy@gLE)emqKIGByx5o2TC4KtLC6U{!ZculN^*zMYI548$b)urP(VF11 zynu#2)aI(m1dW+$rRQ86G(bK(FU|z~rOb5;=k~;j%~TmrCOo_`1ds76**WdY)5CpB zlEbs4+MjiPT8>aEcot#zjOTk|DYmobg!Z#1hQ7Dl0j0J?NN+>uc6x<}xu3!zldB(u z$)Fce-$DkN@v&|0c^pBxQ+UoL&3;wNpYG$d7_<)_ z&-5T|pHOHFoP+Jo&k$-H5sw3m{9Xq8oD~ybkFpuM_!B<3!_2U{i>KjV-$*oq8Bj z5S^0J9hPtOWxqBN^G{~w(~&^;?d=m_S;fn|M{f1T2M?(;Y=3$a2KFAdW&%WER;s;P zDn6Ke=rD&kFxQdjV>57J`!Y3=XwjM*ln|QF+Fz6<~CibM9Kb4&|~KoXSG0h z^m9c2&OoFX93BHI>bL5?Ch{ORLG9VZx!_NR6~@Hv(JP}z6I|UZD2Hv5OSNVQXSBoj zRyMtwDt+`p#Oa*998NZ4VTCoJ)%DQV+R*_GKm4<|((~deS#< z)+H@x&=)M1=#+yf#y2<2M#cj-GK`p~ZG<_O3~1)7+j5qg5yonVfRkjE>T% zz%bb7g#$h-&zlZtE4`p2;w><4o%W*@PYeF4pFX_~&WpycZ(`K@ zuF<-$5X~S;cu1d8^6}FvYm0K(pZZT>dlPgQDZcOoS0OT`p{3ENia@;`L}`HOU36&z z+hYxcRtdfk==a!Oy_GsW9nQzPy*im&qJz)Idz_;?SvLC<0seNqP;Ooixs!z2=ky+c zppWB2Iybo#5W*#0m(31nc)u_(1d;)d!(dt^VURVrY@RX4_y)*%@we@vYx6P zt0$+|O!ko!6=X`sLXWoFG*i1gAWIKlQdUl_bk{{v2RbuWZ&*g5B%gY@U% z8HUcz{lK8L-%$vJYeY8vK5aC`mLY>9r3&ToU6h$ps6XW=K%1&+Ya8bP0o#-@8qnU6 z=KACc-b4>+tby)Qhps&WyQ)c%m?oEwYN10(67Bp%;nNBIT?v+;&1p+^mkbb{ANeFi zArR9%VuHJ7n%Zu5mioPEFL3LCN0?jZ7Kr+6Hs+~xO`>Y4(e|_Mc_1$hSrYf$Wi5b9 zsQIY*)`$ulH7G0t%juDaXacn|CeedKLZ!B9BmKfbK0PsxuG>0b76L}gt;aSDy>U~! z2oe37MrPx}R;Dx?HyYumA}B*zui-e_&(gKujFMtnbUe5&My|ffs52C@lWK!KC+i8D zOLTHrnc0=@Y8>TW_g<==ahfbeI15)xYl1@DcV9^8r#sI2Vew_YGI+4_M7gq+>O>rLvzg^vfX5XOG2^p62j-I+O1LOmC*Xu&_^`R63Nm&G81RO>E#vTX=i9 zCBTA;3_QoQp@}Q;3C^8X(~{p=xvp(OeV*_m&~UJp!wP*QeRmtNgRJvRRW^VYNE^`& zu*#a(ZZyzb5T$R5_J}1_s~g#0;(&X3cLq$1Cz-;#nW%kpv=P$6%VTpnT%->7CC`G%VYj@j*1W zx|8vQK*SZZ-g%D1Y=kMy8`VRM*9B7b7S$9I{lPdtWo~M+T^RB9xERG zR7p~^BdMb!-QPX3<_0Er%N0(IU@D`fCkPBBTG2n}>I@{x;n;RsTPjFO@8yI{k z-qZ3O{^>2&4+v9rb79K#__ zWyHbesSy{!sZ}aMQ`yhXOjxtC&tE6{sS${)S{EHn2T{Mrf*aD*lO}n#PsFTFVEG6b zIZjE5EOM@y2rFKKkv=@HDUs<0I>G_B*pjuUnqS!Pr|$yp<69QE-hKkxQ3wRWt0g14 z5FWZPGXeH)k$n}|34sjS`St4NG*!9Q>CZ%4qNGy6PeVO9V8b7dl(~7Lc;!w!1@(3C zeWJ#aYWso+E=!d-Mn*tqOj7))MVF+b5hU5k8Xvz_Bayxi1WXx-FLd6g12Y>Cvq3&D zrQOH!^w9e4QJs%%56+7Vs4Rt7&e62GQbQ;YhN6-z%aXsmjAkhpl)EOz%y_1-#WN0h+ z^kVr=xgI1BF7&mxy&9SVYRGYag3!gDH}A3MFHgV-ST#mAaiQ?}b_+@h7w8(~1DEY>NH; zna9cW6~CnH+tB{mxoDaMRr(lh$7ZSC(G5h%n7M>`r;FpJ1M>#b(Z08<$g_1V!qU%2 z+1Zat1|Mn#Iy6DeIw?MZSB8ogt{@ECY0#e-efx8QV$zoO?zlVl5{RMExW*VKhTtlX z7%8&9a_B92f@@ZweQ=VniP9NhyVzt~`3t2kMT7(z>r~dKe7Y<^-3Mq~0PvjJ7Jzbs z%o+!Q(Q0O^dD1R$0F9TIE<~v2X7kNpUY!YeL1<>7!wbR$8gN%x!yzIE|8FXiHDjG_Ac4a_rA)>ly%l0||O^*bN#agxVt-9LW; zEEIlz0ct2H*#BkTcBS=r2Bgyr5m^#)zYwyZ>@A=t= z-mK5#FP!|-v&Szh@dV@bo03D*%ep%k4SIV@mTW!XeIg28m43KPZ>;T5jKVjo-}Kse z^a?7yh>KrHakS;aIUvQvP&lO0)1}fwNRGX{(Rj0ilnNdKF(KP4pwsLP8MDuAVjkcW z9ts9Oc!3BS!d#vE`gscw4rms$kAyYs+Q8wVtFm;^(^!O^&&lL zA!Xl9_25L(aDvoF)v>ptJ+`FNPd6;>TKw|(FfB>Wb$c3*4sG>)VD;cTT`$T|#iqSZ zg;Q;x%VM($3@4DTu*;9gNp zjcspmAP|MCVEfOUiH`^9U7EDdQ!*Bh@-u%-^I{Jln(xa@vT0WB=rXsXSH|}+ERE=p zfe{^3FLmFkBAEwIS`nvMR0ZGK7+XG!2f<%>p?*&&N2S3vFeZBuomRJ`Gk)DOEVWp$V{PS&=PNJgV^`Nse8>#biK#XK~7Fv|D^3HZz z+p1k1jV0Fj^jgHN&Fb2)M7PxfR)b%3rX~()lg|^sp5^-90q$FKQ7rSc}i&M5o_j|K& z4}xW_1JE5`za=A_D9PO&W&;gi^L<7&6h~z5QRj^FWY6u zzzZ{vdYQpQoIVbFpSXSYfZggw2Qd=rI=*NyI80+gX3>#()fm~74{_UjcGpr2mzH4j z6mOuJ6YjPsY^KaI>>wWsKbumm&Y#G&4?Vh)J8bPiOn8u+$I=b4ERtce@MZ_c`JtQF zj?A4`)nIAd8zkx#W{7NX6Rwb<6f?rPL0W_KJ)}ik)LFQRFXX;2iVwA~9dR=VGe`i;Nk7%qrxk{33t^|))TTvC&JYtpCQio9sv6|Bkwd*Fp!_8%QmO?3I)qZ638Y(m%? zSJiXk+_!riWO&@9DSg(vAl2!Nil918OK8Omb54lB7BQz|&k=Tqm2>)KMI0+U&7U0> zcu%VYnKI_R%?)lNpu(ro)KtL+I*^P-LB#zKlS2ef;W#Qp^adW(m0X-b1S6Nbz4je7 z>ub5-iKKX#QGon?=X~FHPP$1(oT?;a!WO0wgv-@Y|tKV}LacvHSfu&hd$PLjg0Fo24Hw~*ZP=Z_` zzDi0!>Nk6RpvUeuI%Nrov+$}pqGp3eYYhWlMy6Fz?OXx@AC4F3eIochImqC%my%gB z^oX)k_c@}QGp7<#4Gmo26HK)kbDitJxgEh`xo3;B<>^_kqD!p|(4w34L^#(OMV)lv&A;1CW-ZXccY`i?>JDom(8bVkpl`EFT+T7U@QJ^g*b}&+)aDL%5 zX7%`OJhT`{-bIk$CxNhw)BS~Ed`}aGH5L!=;T*h|6lCdlBK7L2HS>VSLAYm<@KC&* zsVOF5XqZRfg)k8kPJGaCVEoh{fX-Q*L4+He)(I10`fn7uY{Kstg^GY)=DbU&2h;j! z{n0#O2HD;tbFsJVibltc`u6QrG=*RiUmK5eqt*+6tDxNSy#+1t3-s!j&v9Q9e{~Z1 zzP_GV>tC(0X^I}S_@pD|u;}Rncb>GE*b!24iRnpiZl(n(;0b?+Sucr!9%X5n535g0 zX5#Ab3m;)NTz>_P^XjK3ZV9N>nNm_7w#?} z2<)L{zJN?nzOs1KC!sP=f&lQn#smk<8jXyEc=?6n_q}$5G}jCAbcNA$>aLqo%e}S6 zR4zD%2hH=(Q}#F$8C!7~8~0Jq<-K~-vtp5KivoMKCn8d>vFpL(7-vbNw{*F`Z*{Q5 zqX3nUP|jim;91q}2+Y~&nug#T3(Xr;o!w6N8thugYomO)z-URtL{jg>oz95!x{~v( zpV*d=#M4P_;VSq-^83Pzx?Vhf;l5D?Z#FvgNXZg3(qa!3`bIl}v^ zD2{20_~ML*gNsINB7wrrl8)A*Z<6DZgXb_C;1`z8KnO~RHW|TWD}-Ym$czMD#|Ifk zscjRa>zUx&~g1KMNmPb@$C%=M$U zTV#!%hz)ogEh6J(o(@v(Q;E}*k9K6KVcXd5)Qg_zIBdBrqT##b^Ub;n(BVc4eO>Ql zO^c$@ROZhUu$Jxmr!qU^KuasK{7m~DPpz7b)GJ2dN38EXe{I$?-ZG<&33Z0nrt3Tz zMfG-WUpF#0Ub}MWdUM7;x)}n&xX#1~4fMD#G{3LNSCdD%-xM-vN9Va6=DKW$8KDP_FTudd+jiQugrh+_!c@%d@ks60Cx9_l4rcG! z-U82_5I7*DC$7IQJm2@-SuYM!<*4(8k8xDIH@bH`DO;6dYZLuY!jlU2vBPoSt7oxI z{CoiAZk0sISP)TOULV)1R|hfL&)&NkIHfTGi+K+Ww0=)EQQn4+(BWXmg(XS{U2{Ro ztZ2%b7Y|f9XpOmS_in5a&r%y&49Ga~_%<;iCA%8)g*72Ge z7xzTV-c+eHWwT&7-W212wpqTP2`S4sAx$d68g`v2nt62(*<-^A4bj!kO!6oMZRf7` zAq=qLpcg0%4t*emF933NHZG`%OnGJ#*1e&4p^opt(R$|HWQBiwEL zs(2U$+AF=~++nkPNNxw&f-?+oYnebG$o0ne5p6I|M2AnlIa`C=_pGwK?%GP7D;;DY?Yx}N*J$-184ZumsiKf65FkRzV%Q0c+DV-LyR9jiT`o+n#KXo1xj zFX0@NK1u{IHmcZB@ptNUdSbiRRLJEjB~4G)B$7$2kgarg2@I#XT@;`42`)sw%2c=S=F{IOt2Y;yTxzOaq&o6TO5W{Vo5 z+H`i7D+JSyBAv?Ji*mS(T$QJ?B9FS1z+?_va^+p+n{u#Pk^%!@-b*5`87Um@c&&>8 z(LkjH^9qYoJ8p;G7}yJipB)VBAkm-kt9(^Pc&uZE#m=X75j}Dy@o@X_j*3YRI4=wa zr|^I!ObiHuPa;hb>OkHg0owK`!XNYpidpzV&&~DAYxVo8Uv(4AgEv+u@v`k63z*tgeU{GGTOw@fb%3wQVLvyN zxj}*^wF(62XJ%H+flS-{y8TnZ_}SI;A;@;RvY<4!5H7G9`)(>AE!$w$}8{f_ZGZm~dbqm!R3pKPclZ}n+nqAool z9OLG&0Y6u0}LTn}G3R!Z$?mehOUi>K|>+PQj-&z}^x|Lk-*>1*E0 ztMi^!QrYx{8~Uxy<6<9mL|ZY7e7G}yX@ud+#f=Sw5+v0z0Cq#oB0Hn%2~D;KcOGZj z=fcYKI(7l@3uXT7bVw5%&)pd$J9Jf!>R_?Zo1z15V+`Mn#Hrv3m3uLOx41tRc@C;g5A&vZaxd)ZT!u2s$ z)~d2|`2z}7(ZFTxys)0^MB9xG8+!yelukZI8&@^5V9q!J2eEnFu3UpaJ%u}4?K$Ep zyuF`D`}gJ>>fP^+52 zOQ-k10JkCpAK&%a<5w~?`|)0qECe`N$Qi$Kf4A^C1lD6=Ae=LMDTeW?{g}@sY)XF^}0AsWnoC*eHt{z)ettx;RyfCH25lzqt)S|HqYGSOYrCVyl)X0HM_b{1*+r%~nlN~2B`~~PdOa4g z9rg@@AT4X>*^v94J1@OX!nSijt>CJtSTL`tFyNsw$5Pav31l02Ip9*|rV&BjGIgBP z$?Aq(HNPeY4}N+_G+|1$b(;7t?V@Q_9}n)>wg$<-B|-Q(z5wx!7@!>KQmofqsZZ?- zZ~N?xuaN9!W!9Jz)uG7(SXs$dIop#D>lxUDQ|Bg$tH?wh`g!E7JS2Ngm4FNGPb6Nc zA%iB6bfmPDUT{Hj2+_2gJ+@kDdaPZ?0KJx__AK~aUo+%pp8m{yHAO>dD_61=&5__126SL{5g%IwnJCe4p<8u zHI|z7;}x3qK{UaYw-UP0@QR%+ZTV%~$ZDw>h~OxHoO-R z-3eO!6LbG`6>mTH7lj0?3mu`6;;MNPm`+ha$J`V7k})*I5})@AWM5o5X;X~Uk)5rL zknC~X_Nr*B=<8JT0msc)k~A5PDR_%M&m;(XoxI|$5j{M5Q%3T%SSVqCdU?KexpRF^ zjnJBz)fOqnUFVUIL52upB1cq0QDc34+O1vTiYpK*Mvv0GpdruvE-b4X)7hO2M%#yh z=1E7T_=ER_dVF@y@)3PTA#WM4EAX>2RMy!VeVw>dQ)$=I zybs_-Z#Y$@w=Ebj@LOhNe;TqeYg&Tvj;{I%%XaNnGgp%x#i5@b**XvxT$P?E|Gt@q z<(1iRoCmyIPeF>bnfC3Vfb0h>x>&+9Aa-fuuaYq@FZQHV48pDvWiy%Ac9wMpz?nwP zwg#;E6&S9uILvT<;BN3f9cIL{RprtjKU#&}+hr`wx7kw=bVxMU>I?y9xr4_mKMFed_{^GwAkS+w1L~xP8-v4_pxZ9=y%c@q0og z?@>PceF-SKmdM9Rs@g%I6^!;Q$YoWb)S&ec@ghP**HLg`%Ll32sUvNz_h&$-5Jw#mXkbs{yA6hvoW(Jbq7_CbBfNGT2yvqEEDh!-Vf1stV3~8oq7-G<}NG zmsD?!j`U6BNZ=ga>{>l6+5#ZwmfR|Sh)i5+w3+y%1xe85PxWxj;_wQHNlIr^MS+iB z+#4A$bDH6&(ft#9NHxy$w=~3P<&bV%x9mkiZe-xao);CO%OWqErkBpF9YumCA{f>( zRkg)R!Wax`rC&yg`HP(yLrOM>2U|{W{0n=6Pq#sbSxiMl#9b4AmDI0*K*D>(Ubfjj zc^9q@Y)Tk;s9>mbOC2NlamRGJH~X5k?`^I5L<4hg$1dn zOkry;c*eFhiG?}0=QUp7?@J91KYj9)w`zi^jbZ-)Z3NGBb+9Qlmjmh7>u|izKT*R0yy%hxx5|(|oAYg`K7wX9JUZTO%z<04%`b;T_`z&AF3MJ%gz*dI7bt5as ztBCZtvu?!c6f_30g@;ytYZ-f$Iw370_j7JKvCBY7b6W1!aFj~IxJQ}PMAT!)5QmWG zkJ(@GaByTo1aoX2Uy4Yb?UAtrpFxEk{qDEwuUlu~9QidROgBtbl&Xjnx%~7dc z4>QFn7Up{ox6>_|+fQWjT~&_sQIU|9Z#Y*mCSA_93GvStAjnRgELQ~OAsN$PZ`AGR zm}T?Cp6IYxs>_}UO-i7>;UkXBSa^qzkbUAI7h%9tmAyAdr@X3MFMyp@;W?f;D}6EW zKD(~zZeNI|kiIxK6LNey%REOfdK_R0o#FoYJvfri7?lS%`#^z-VLR0n4w0knNPV`F zKtUOIGpO+HKrZ-3#OP`%#A17%S9AJk)7DEZ(q$iQxjJfacrSPJ8KfT_{hY0cGhf+j zdF%HCKwfFqRO@N;zQ?M0y$unCB4#oR59+w0XQ15VVqU;yYP>`5uDfmRESnWeiEU*v zdXOzKN+VE&{l$UMCkHtVr5e17O&Uk$tMOh+(l`Tz<30eQbB=6%kKO9B16)-AT$*}X zTyFDO#n1|kN>WjzO|!uR+>=q5fg!wDs({N448|yYEkmaMJGea;^SDz~b%me%fO3srI@DM2)E3GiBm50I!%oG#INRp!+bh7N zc6te?PfR%TDUYaikvZjcQBe~DZK}Q&jqtMoC1t(FPaeLf-nhuBRFR-vz^OOALQ`1* z1%3?=gB8evcHd-@GBA$>IEq_ARPBp_dePw|L*x&N?qs3`MJ`OvkDAsppiPGMrNe_; zj#IA6s|P%>mnzrGIpQp11g01Wb;J0m?u!Zg?9S|o zkSx?GxpV1@(fZ77e4{x{rjEd+G!5ev1=9FKe*{$7|{ zK{4vi z7jlo`g@g)0?_&jc1*D~oXB1S?kF`LgtBgh%#uWz1G*N(3@WFEP$;)6>-Zun0+e}0L zmPGWO(hHFi`uQ>%lKcsrZ8ZXr zvk!V&Wp=76r`#o_@D)=+!+@KU5a2Q}xu-Gvqf1+fXlTK=V16L&gl+j=cmehUVA?oz zqr?u9OSE1RUJsUi&230d^r*{Fi@>A1@fRQF1W5p*bM|mC?%tq)DnIu=*9%q_-iRfs zU?MmnhPrBGR<0*LIu&#=gg6GmN(E?6+lZchD_ps+HJjR21`zS;gAa$#-u;+c7#JCq zUIP%0t0#k#Jd^c-8U~>RB6Q&Jd!6qQS2*D-t<7x0b_Yvp=JO7&c~lV8jRo2}$3$;r z2e&EA-+S+f7Cc2V#pO{6y#ngtk&)8?%Ui>iv8P1zk}oD!cx}O-PFKC&UyMNb%g5xJ z+(mRHapn$-_?CD%hYRL0kQ|5GK&D>$3a^6dIovZ1dN@6x^cdSt4XMRim&s&@kE$hb zN~Z}{Y9WL_8Tstu>BbAK)~OtSzM{?mewL|BZ(dl4z()0gzD``c3QLBPb^Bdge(q~h>N9Pvzw%oCwGn56gc?aN@k!(t%5u>(_f-dCQ+?%sA z-51cqP*gehME&=)oyq_&>{P%ddh9*5aF-r=Y^krCLs9Ha_o^%^qM~M87zP{%GE@o^ z#H&$y`Uc8Nh{BROnkEP2xclXsBk!=IImBM@C`Wm!weGG=kGB}TDW@Eo;}z{)$_mVQ^N?BkU6ORo=+ROPJW+)`=L$E? z?!2Uoa7&U`$kw{}APF^a5i}mMKmWzdL+1-MiM0kLKe~1d3$k~Q5HTOu$W*bqR1q4y zUM<++L4C7*8^lHP_&$P;L_?7iTQeP_dJI~XFXxQVz)$y{*n2PQ&mq}p2`ua3 z$eyISQ0zdF_9r)=eP2ReWeHLrlZ|T0#8D_ZfWre)dO4=#0$L)no6b(6#Ps@}&{e)9 zWSHeel|gjIB!h{ruwHK(rVKrSF;sx6*GcMq;TtgeLJLwABPG3+r*u3)`OUSqZTn_4 zfR^?XJuHGAj5WtRf+sKA{pUoFMhC49jKC%+8i);6=okkTgY*R7HZ_|-7cEDSCV4(( z&9d$3nHbGb7}R!sGvcC8#_jM)OEC&N9*e5zDB5k*a}I6;}e_7cR(kiK;_Nv5arR-a}O@0DJh>IL2=4;Fg=I(K69JU7deB8%$-&@)JIq zyk2CP27E_;bsF6MwrpN)mqYRt)1s~iC>7sHG|tUsF^#_=Ab-k~lx^qdKCcXj&n=#E zCHkGpcWTz}EWo&4?_xm-M;M1F*`Zb@vPSfBTexdD3NMQoHr3GId$3YCC{L`0=oBDwpf6C?2r3g0aHZqA#NiH@JePs9#_-EA`08K1P7S9 zU3e6d7H z1cQx7SEt7?WYb-)BXB&_lp@F^ni2wl0_h5Tp-am2UZ7HLd7oRMQ6J}E013}2(wP+= zv@k7~Tt_qwjytq6yoqL+8e&-}ExmjFHpSOOUJU>}g~wck`Z@*I)z9MDAHXmcv@h$RR_+P7 zaQB08c`ileF1E>7vZ?#JFX=dKkJ$lK$Hv3rCR@=nhsV}kXY(C^M*$l2abQjC%#)`t#X``j-QwBvO2OnOt(Qv`cw*uYaNj+$5h!)1!Z`@D zFm391OpO`Exxk-1#b;&^AI*qred129NHkE*ApN1i10(G2n^zW_Jzgrwrq(N!w)PNS zhfB6-d3WU2px)NJo|K$IhJrg{if$i8+`>26q@HxK%G?pN%n^2*4_hd$Ws%WFN+x$$ zw$esYVV?|M+GbX>oHP|A9I*v4hA_E*Z2HJnIOws7;P;J z^XW6nD-$KA9#q8>)+sU3TA4~;&ria?g5_=pv zc3x^AOCu#-)Ywi?MQ{0DJQqgi2;JHzsTSf1)Si3{w$7NpIvkgOZ# zV}^+vJBwASD&^EomsynB+)X_nf|ukW3KC1oG>EP^SFkG8VA-B1!R{l?WVS>`uBUP* zD7o!mQ5>XjtDM>-KL(O9YwUtHm5j_?)7_bvl1MqQt|67@cTw&$eKA%m5V z%P&eIYdd{{pZlcoQ{H=k;q?af3JMI20QuR1fZkyYU~R%ZD$k2GOgyYXb#nxbdBf~C zdjNG0Ygb|Ml>=zaX(|{Ll@3?wSTlC#LTo?axfe4?l}7Etl)lfj zUOkj?)=#$RGf|XCBWx~gtG`AavPD_{Yh%%xLr(PbunP@xaofNt;9zg4c zqp3B1YZ(xnL$m4>S;t~M?y@*=R==eT3f z8y7dyl%ZC!GpkdB9GW8qR$@4zUD(rlov6Pfoc*X#hc=fcUVA;e(etRPWw1k;iXLBYd47L^FY8qM}T_2VzqZNQ^^W zkCdL+J9+YpF>1djdI5og)_ECoI1sx^xN}?70FxE(vB*f!+$55pvk|$NFNeyosfEOF z&+J<(wFRPwN8a+#mU+|Oasbl>`{oy8{FyQ)3vn6>o=w3cNO3vQel)>oB1$gw$X~>% zCQLcvALLnd)^oRm@_6OS2oXx3x>j4MyQQ$_yw@ObxQ~>v`fYp0QNlA+9w$on@YsEs z1z4}~P_aO1+_Y$VJrt{`y+_<;&SjbH!!Rm6Sd~8N(KNp}<6QIyl=F}{ z4|81o2}oc+J=MoJk0>c7nC9Z;MHVlKo`Z@>@fHldoi<@Kk_BelS{1)olh5SA9rg4H zG#quVFFWyhO+7L@#=;oU9g``c298Qs0M3o_ooSi8_ z6B1q;d>$L6Hv_cPJe;rOjlyz1V}8vR$SoSe=BXMRWi;DNdGh#O@I6J-t%LXanZKvj z2Jj398tOrNf_poP5#Z8ugtnogn@^2!NMBYN;?iG&`7&uCneKhx*xz9ABq^I8KsgWuHD3Q@mYk@~XMZ=FKn`h0U{11=lN4Ri= z6(Wh+hSIo=OVrC-`lP(xk@6|1!X^WGIPTVHdvmfHxs(RB?~2BIX+2125wPOcfmnBG?FGjkG9yg0J8jHk?5gO?JX_H4OR65y7K*F>T@CdHer zH!@wJ+4%}MonKwOjl6L0?K;Ypu&#ix`2F(ojj-=~u~fWuk$LSt6f5{DVW@qU4D|T) zQj+>z$jOvcmPBJ}H0nN=NDO1hDq5`rMWOd(Cmoh+rB{W3cvEa;zu5E7gyNY8M%U{} z6IzW>+=1;7yTM>|MIfDa2wCH_Jx+b_ps@PICi0+*IZDGunBy$xRLOhXi|t`6g6_R} zM3@=ssO+;KxmZjT?Dw>K1?6#xZBM->RWc%pK?|Qnt_@g(y=@w>=Wn72ligq`GZwT7 zGijis?-77QI%MGNqTWyEG^U})hsFHj0$}bjs6<6p>_nWB=CE`T_$#qdSV^g8gq%aj zF46NK^tB|ZHL(sXJE{rEnfa%-l;oJ+~*BGDBkxPm{?>ETNp*(*ga=4 zVSbOFq?+R*_Q*m=8PCZ%wdZmUfSU!YhpOSmvu>fKqL`kiJOK$EL4o9xL{yT5_ynpN z7U(@+`o&<_G%4ufJFC0H>wICMvaFrS>DV{=BnC*HP@KNW2X|}lQG+K6j`l9s(>i|m z)&UU`&Tx0%8;b`(HoE~E82iFU<%2;4_>3*7Pa?`wv7~!DRxNil-m@psKFNdI+Sb-i zX^*_3#NVab2Auj*16=0x!jy;To!K*Q(zJ%CO)C!XYi9vIVc%aQ z&PteX_P!N78}c}+z1l9$Y~Sdd=a+EC2(88=RMoj#voE?NO^5l7&dmsBV-~%Q#hwln z(3qMj9$JQif+j8A#cvWn6AUb8!Gb*FD1e^QbgMz?T%cF-x@1LQwI2y9G_Bi8JiwD} zdV9e5T;$+H`)Y7*M zg1MSc2BkW%q)FeK5Y*x3^hSBR&lk~a@I7jH+?Ftvk}ZWJtdO}<0s1q`FlTjAq^Ci= zzCx&d>9CIH0Jv4b+%T`gQ$va7L$#S7RuE=dINS|)qSj@`g9HneXZJ9NdS!y=KGL7Y%eJ||+ z>fJeIKfUxr3mYy6G=3tIF2KY=HxX5c(sn$+XtV$Qc6nVCuQ6Ol*8eQQb2J#~OQuFUZBkhYANp z2Hz~*lscprkdZ$SL8pYyB@^P;9UbJ#jrForkKy%!686Xwm6N)Aio^93YaYyzWB9T$ z31B@}*qjmIAst-}X1}-aL^n1nkH^V!)#VDu(SA`W2X*!EX(04LGXd)>&G7^(2vwrU zA~l^*gM%4*xQ_6qfGlkHHld=kY6J2;6Dwdgksv9$?&Aaic^A2U)q5tC{z)Z&MjV7G zfhg+ftkedjT;|&6h?*2z=*t0%RXHi0kYwn2)kFLk-vb1dcpeOoyX{Cx$g$35Nk!Zt zRFl|14q8y8>7+a3s0kl!=s?n%av7*=&m%0Ea-_-Lf!0w0mYWsvjd!HQt1~M#$l=iK zKtW~1fLsI2OM1j+0ibs3d!Lgh06-C!(WI&bN=BUC(b>hh~g zx~>Q+VfA-q&xc6g0l{#ELKlvfH$QJGc^TK6M$FKZnUVCirvUn~X0-vaVmej1la~u? zl(*6L0yy1*iWn+;L{q|EKGkM!c>Fk>+5OY2$)z2s-kxNi|wou%b;gMJ5OfMf? zC&{=9_TfFvW#>H|e-Fhi6B}Oha-I7+GWIGy=_?Q83B~otR0MP>dAty#B$qi(QjIYg zC9O>`Kv|P?-Qp!Rd@Epe1W3xqAdekG5E`kPcxk5I8RCFjy6zMfi!d@PC(TA^c_5H1 zJ_A;8dw|`?o#rTY+%JBS3imLJ2B8I)T-hfF%~j-3q3BF_>b>33-iPosxcgq@yps=G2;U(ot(@Gf~kJ`JW9=8aIYLeNqpO{f5xLS!;#*T!F-s(gQAbe&~Yef z57O7)1Uk|Y>=53c*PJocaVqRRP-`}Ry=&K#QG<(OcPR2kvWv^sIxktgXaq1 z0#f6R59yt5Br2CA<*dj#%fL3f!=8Wv3>V2h>V`ZJ=pL+<6_Fwaw}m%W7mln%=FrB- zi6QUY9+d9V@EM+|&pfmfg6CAj2|n+LsNQZ&4WPVd#VlYm0>@d8p0&(Ide$hV&-NzZ za=Eovkew8GZLz7n*7IeDhWCrwT3>vnCU&>N9c6MlvyoQX51u`EIO{ww74%6u=~y0S zE$&{ImAz8x)D(OxM;bQbQn$UmNWoaOIB&Zya=H<(}IJ8@F9pP*cQ)x|Xdt;adCB z6K9l)&=NJ9Af0Tn;v<@L<5oaGO_-kpPTT6+g>h0ao_Ft6ABt-_SN8GJBKzEwN_ly) z+}PwYz@{!!D?RB>vQ~SyMHp%#@g&PyrZ-}<0>#rr(6h<XE$BoLF1=7Pe zAq^N|2J|`>dCF3H2ro9SU*H%j>Z{70Hc%LMQj~d!k3Ba*wE02Lg_*gZF4`datO$_0 z$OV(ZGa9@>uUK10s$2wHn42|FX23E$?c29R(pg-@6d)N( zYnVWn+Bi{%qG>;^@uWC3KsLUDNK9RBL-G4XulS>Rlhh0`QIu(Yxduc^X9nr3PnBR) z8jTDe>c&_&-USbl_b5zbH);?9!T7^{9P9Sy!~#4|wF6$(dC$mpXT~SJ_!*-vb$94Z zs;VXtf#cJ{NMUN)j>>okaX5Rpc8%N|k+$Up*0RR+{KauS4M6 z3gB0nzUg%p{mxhIQ7?>8zu5E*d&eA#w9c@xmeCaOd!c_`^n|RW{dGnf&=j4pocRtg zaZ@lQiA>QuHxj{4J85`M#Tw8`yX`p|3_e9;#Nho#QF@l0r8w%H-XY3UJtkBBB$&SU zO;s-s>m5XcVJ~-H2JPE2?)B7@aY`3seh*lEcHb6uwUtOV3Yc-}?X#%`)_x*RJ8DaO zukDG;WC_F^ma`&=K6#>#iVC-%@}k4@YvO0tT9gSa5^sB$Eig~>URP5hs%E8+lDF6z z*g?U%fpuPB&wDi$C<_$2fB_>q%;PhQP*MtP=ImkIIPtiNc9>;BQyOmGwOyylMjK_$V$0Ao7H1B`yK zL#p&m@n^ImR68I)S~3qPkW3DQ|<;(IUo;zIZpx84IOVSP>K=`}5#FWO$z zj`zOG=N<-1x&G*N>deZCH0+}c8C(kLly~(s{atnRxE&32ti9*643Be(ZiXm+_Tezk zdDZXW`n_gp)xgfDN;dwWGR0^%3J6t=JKQl01AESq`UrzyL8v==aRh9z(x&uKZ~&Dc z=g%Zs+3EruqVe0P`7>n2E4zeh?DsZa6pks{FxE4IuC-3F*2{Mf;>|NfQCs%dZhfbr zz7O=x8O%9rJ{xF5X4VoQFP2~!iY-gKNXd&F>3a|DN#%A_0lwb7lYCeyjlJMJZ4 zq{r(0vmbN%275m_J~WF!3N3n~GLfnvf~qJJnxNEPE1WRelKR|gS%%V&`&0=Anx>Lk z6h@swj{McyIUQF}U;uMRuZ+Vd&F?dAZ9Z7%F7)E0M~&O9-2-JVoR4F&s70f%b(%df zPC6jF&NTO8lkL$Rm%Ke2)-CIOci54|)riq|S{vNJOW@|rEDc$XY_(DdJ*|4j=aE=5 zt%dLpXnMtLSUNI?2;1ojMoHNAEP` zRe}R1QoJ4?Th+O|--mmL+ug(&3KI6bB1d3>-kD5xs?1 zxTlmsgcQd*ii)EqAiJ4_psS`Pso#ye=v42bF)}5o1%^+aGk|53w4fm7zA)9cprjB2 zc7N|}?JYTpUNO%jg~!pA#u?9I7oe4YQJr^+t&3RUE0Rsfg$dO*3lD?{vt_T4DE!er z#dj}(qN4_1!j9cUW#i>1)%qF1oVECspghx4 zJzgt4*}d39(hm!&k2vIpaxx};O+D07w3H&TjPOP8)A-W5@R`G^wJP$ zYQ@w{%UYSLIVwsP1H_g{MJOfWHBJ?mw7Fp-o(LNt!h0=0$tHaWiwT3vD)HE7rYMBB zI1`+D3n-cF5+GdcjmHJCp3Ge|+U&8N_P&y7+?69fF~7yYT`$4Ia^P28Rb{0q04d*O ze}=JcGk^I4DJ@HKSP3$VqS6h_dbrD3S#vs$r{kUA&Iw@=D()fp5hf&si&!`|tZ=XP z&Xbxmr^Y=37>deroYAKsuAy%y%X(|7woPsCilFEgP3OvO963!cSTVHtLYmw<) zX6fP#91Y($9GvCrn+;BKp0l;P7&`ZmKpGI$-U>g`&V0h>v)EN)@h35RIPdqqS^#B| zk``5QIE7MWFb*h~fxg_^0 z{NPT0emKU&%|M$5&Tcd#>jt9nvS*TYB(XdsGf}rw5nel~ zyD+uaEm}aybllsP+mOl~w#6a0T0?k@hb!y52vYrMhi&W?u1NT+!-lxk#7pEouF%WP z9ZgCMfkeXJ@4LvHyl<=TjfZa5;rSb|aI&)Lwqx)lc&5+Zte`tZdSo_F3y@J?hG{{G zWAu}b-D|;eOR_=iSh)}jBt(*QH+kQ~mVXDdQbE2}EQY>H@p)tGaPbD8ctAiOjzi%b ztva|*E5ZwjN*Hk-aryJqEXZx>W9{dUWlLx$vR+lMrDUyIbXYD;Ez5d0&XWS2N$z{- zs?uUzyRWLAUB1pUzqhNRw@qxfZ?`Hh33PFfYGC)}8rHx@(aF2O$(+O6x-np4$5GZY z=IP=((<^*SoOgKYJ3Q5A(oqquM?c^AqifLVi>S8_uZmwFU_Bj8O=H!4CL#}XQ{>U} z$~~b)%gYB&abgeycK5*CaSlNbMLUf}i3$z`$D55#*AaVi>D$udJE}>%u~#QE``#m! zc@1xJ!U*qp`E{KVO&&kIMXk`sc-n6c&lHB=QRr6KgpVlX2)W+&n_e_}M^N%^Y))`q z9kS(&infD1y}~*{9i?5T%8Z~u*n8dRWpI41;beM!_1?m^JZ)-@zVY2a;CB}c4C3`j z=qa6RJMk7u-E&+mi*8OJLYI(Y6<;qlcB&D0zPTeeK+P8Kt#SP#9XdTa%E)+@{oOGi zR(6#Oh?4gkUIrE@W?@B9&UvUgDa1%LY}vIfW`t+eGEwki@!l=>>sxrX%nAFR@5zX; z_I(aw1fQYp^(B&d00Zf%-p)K^dm>(oB`yQp(jt=`j}_<0bzTieTbBdd4i~8vSo2dQ zeJ=*OrC^5oi12J*^Lvr6HX>YxMpmuvD2Q2XX0s%#>nX-@a*7r81$hF|7MYs1_Bn`p z0KDsb^Kes}2J)Fln}VEGIs1$|7w7OQkgb_iA1S%xBZr8#y9+6N4I!Jzuuu`~DNA%M zqyi^_zi2maiS(X(g9DbTRdktlmxM?o4cA#`%i^(NN?cPxloxsQ0==pGda2x@P9HvL zgXze3v*tGkfj&dMa+}c?kv$K;Y5$CG!>iI2)Rz{jbei{qM?({=`?Taw=UkRGy}zfZDaCIdCY&ym&q+wpK7sI#}oHNNkm5Ovp$#nQw&9-n($0 z(wl=PF9vQpIe3(#jUvxbH`=T6`7h%8-Utl=8O;K|VKqiD6LQ;?l`ejLk!QXtgM+tr zg@;^9Gf~dXe`cpy?=)UK^L7cpJ@fSu7Dl|raPTgZC7PU_PvY@2$oZpgaTa*Op+sY6 zxVDV0oK)`8q6w8sQ?Duc=mu#mRAJf(q?00H8q#g*O)Kp_%ZF2}6*I*YT#@}Mb8Svq z*9dUY&d{<%7pZfeY3&OI&!G`uh_!h>G(P(-#k09@^LF=nL&k2qzwqU~7Cd>w0ORB{ z(_Jx8pff<26n)YqMFJWqwdIKz;pOG73m0YSuwfIJHh^Ssk!EPWsAnE!+6UZmk-;yD zJo7nWA{9=F*m#fw@G!L47+~LFzo_hZM!kLa1olnfu(tM?6QKp{ogQ4>3kQCHudA|G zrVrwidisoX+OHtNFvNJp=(#Fx_-!qdis40Kry~z5yD;XG*K>se)EfmUN5$SpU#-V!0QpcDbVbc!=DG9pqXJnQG4rr`~v zktvF`acn=|eaLHr$m)VBca)$BK52T&z22aZgL4eR476#5aa7N$4{li^o#`T7C4G2m zxK_gFMhC93#ol&8ZkvxUlRHP$f_#$CpW#lhY^to6?zSFT26o>Hq2D~XSSVY1COWU5 z^@u1afY&tHpgw?L4$zx8hP|74?f^ssjwr&9^7u-7gw-g^uB14*y7pNu-f$9`cdn=0 zC^kOkvta=>nIUvwd)hgjVhQ;!!Hb2X?RW z>0#n-Kh%foZ_JFWB-WmmryAB?upUUQ6PU#1FZz?&KV3nX^gsBteU^wn zX^P@HVGUqDBX+H(ySG>Kaz|mr3D;ld+8YN$pm2nfCD-#qQ^6jxNwC7L$4+PRday%C z-JEkEh~Pwk`~e%xi&tA{NRJ3k^LhifJ>f)%@j`_j^GYv2k1{et?R&YEHa$tuLy;Nj z5(G%X7td$naY$CivJ2JVD9ohg$}~JFa-Iax#jYIZZe$8dMwt>3W{_6$83@Fu5w5SP*;hzgQpE%o1BM=@~U)_Qqbms)EV(pa7K$&aW^DI?8umv$4^yqkXarIz2IkMkShz=XM?E86& zu}L3Xj)4hf2H0h|X!iw!C+P(R&^fJ-Q)%C(OQD4Cd+iRd78>tJT`+_rP2R#2RtOrZ zaNfrXQ2DZ~EndZfaefkz&*;}_I+XY%BpprtDH?z}yP62w(E17Vdk7bK0ot(ED^Sb| z=z43wVj1feYjMMasqSr%#72tkY?Uyhneu{k^L)h2W&VX=BKeu(g+rS+X`gWO11m^Wx-kl%5McvAw+-|$0jL{qjDo~%>XK$%OSqpQgfkScHzl)!KhQKo|muWSA2v0_N zgCKNp3}0F0*%bg9>cJbLdZ^H^-Cq;J9#V>FVZR~R^E2P`dB-XMh7g$(bjDq=HdcGq zApz@oR8Orv8fEH{BYCqpQHOVPvSFITvqyW}#+)wH=aA4()Mt1{JYR=0JLvI4;NBI| z=PNa5rBqNvzCpNz<<^f2Sz8Af6hzc|_aqeNV&T0m));PcJMKzqVNT2rypW&w_bQd~ zU}B`+^SCLHB1w^L6QE^bO@V9LZhtaajfs=1ZH84CcJxS}xYQ+!%w2@hC-u%hH-sd`y@{R_2n_4|abk}a^SM^eWk~jSfXA{vBJx+rO_~1@`U0*te zl~aV6aO^#%lxK1Ml(mK%AW8>!w(hZ!V^~&kN4(R*>8SL;69c!%?vd0&*T4*F8%Q7W z3n`^&oZMn>?sq8NClw>$=S0->X3fp97+v6=V!23jZFS?5B!?Cr0c;tn0b4D32oF)g zeTBwUoC;8JUy2Wy`9a_V&8(5cpNAcWx%beaG%C`GKoz52r}y}RC2`f(dl=c)=U%Ly z!Pd;|>`??%qavUki@g%Wo-wYJS%SjOz9A*#VNRzfSDNFW5F9=sVBhph>n=MYESk4_ zcVx?+6PMBE-gMLtU;&LLB9s=pI)n64J>`uD6r9Um#Z4(yp?B4OutG<$?}cbiRzFiY zu`5$tWZSNDQ4^vmU4iz*^I$^A;GJ{O8Sm7DMaG6p>$qHP%~d({4oa67t1Bf!bt3nq z_N_yP`>P>4WV!9yqG7hA=4I`N^0-^1XY`#s%!-#@X|FsiLYt%N8DV%?VaNV_3f8xr zh-q73WEU%IuKAY|k-Q)8knNjb0Pp*tgs%>#w(!;Cr1)(Ud@#P(r z8X@0f+~zn=1I48pcu2Yaw6e(I@Q^RFykKu;nx#cZjIJ3^B3m9B8;J3 z;-EnEI&M539ZPfh_R~d88Q@g{yNYmuWMSBJTDqrCkBDi*-uE%+g?{FQbWy`Oo1BYC zz1s~-%8Ce7!xMdhA;94d9L8?0mfFl(3gC9xk#O(2eW@OA*zzNm3Mx4p(m;PPblyE& zMaqcfa{cJ}7PMd&q2L*~LJmFFUeHJB(_YjZ7vkp{aK7XUYq<3a*&?Mkq3FE!#w|Nf zNFS4IZggor0&f@q_iiTC$MT!KkFgXWIVsFxO1_@X7M?Tuch<8!)bgp2uehaDonS z$ZXhseC(1chWm=%!c)Zi;z00y4i0d(T(z*PhSDM(U2$(nV;Xy6?wtaqWARAdroZ5m zN)=8NQY~Ms=RVFSX#N=dS(v0LRkO4eFu{6?#2mA`fGj|?(qsf5GSD$th(Dm-DqXVi z=-`yiJ_U-TXQ$0m)s`;_Y{E2?Vj1&P$R5-6L!V2{;u%LPO$wWnP7MUhz;KJ$dF;wq zO2w~>`$gT1g-Lek&h<0S7JUP3+zP~&VBX99Wc??I=MqV%a^zhSv;0g{yec;ma+Gbt zUSIen>^3*puE~h$)WyjRM!Mq^FP}{jZo`GG6z_ymFPb3W)u zztRp0;cI=S@EV}GGt!23@YORkI;huCu$&SU6rQTGYO66&_^xcZx1P{CI1ToeJxTN>0}{9GB&u?FZiL|9hnVFo7T_@zk#NX7GZVu3joV+=QF-f&oY?0kKA5^nHz z9x(82FMGb-nCg2EAEFfMaA~UzKBIpIRz_A|iOKni|p@ zdowPzGhOvYnM0?bJ6?!naqA;K+|9@LMv$HE?SohrkGc?y%N8siP+SMZC#xXm5LBfR zR*gy{f)aYmS=N{9SCIjy;lK{P(C@_!stH5Xm^%qi9-%&lzJVU+ycU<*0J>yc4D}Yckr7ghr*oDdOUbVfY*q9w-mFmpX`dH{ zhc|Q)i&#VB>o;D`8;Fj1tX6H3F+Xc(hRSM0<6Z8OMv)3OC83MITzZi!xY~Us&*a3< z*n6k^w48}An-h^FCrU&zy&LZ$^1TOL>r~g+WPI20tO_9O`yN5`882acAaV!e5RgKn zSJt-Tol`!DT+>>@HeqMp3`FfMihOUY6o3G&s_&_YT&g}s7j&EQgDw)~^1=qKQjHdHW4I`@l9+7FZAk%wr)rqUSk}TF$STPQ;=Hb)2gbydF_O$v)oq>a_FC z-exCiRrq<&<~`l*d)L8v%L?4bpD>s|mk2%5GMX(@eRXs+nc|0JMdFo;APRfNBIc4& zds9G?pt=DDeDR_t)T8&Hmugb7kJ+Jm%Lif*3c_<-047SoaIPtl^R&$baU7R2-I>F+ zsW$ak<6_}qaL04?9yvi}eWDhA2mCmM8=1J~cH**&0t#%1-$uM3T{!o;fpzDEnNo`Z zVe6i4CFkdoEc{{+CEMnaq@g{2@NjUNL>W&Gl6{?7rc!*h>$UIC8>SIz9MnE#QCXHN zKUNhJT9JDU&bkG|j=cxZcsXfYLLQph#HmSxKA-7Dk*8LvuJ@3igBa31XznKq^_aq2 zWr_Vea>{4qEQ;Im=d?Mkw2satI48Mu!ZbKM9w<>&L_f5bP2rMgor2s-g5TPBvCu<)xK*iEV~|h)Gj+k0S#~5b zGPs)va?hvlXAFhr7u~4*gc?G~RUJt>nDnOFV$v6{Hr`rQ>lT4NlfX4%>n^6V+$;>{HlO9O-M6W!W?mTc9!tMiZ$h6PcXqSUs0gXp%%&$j9fC)i^H8P z?w4fN^x|zOgOkLNB02NO76k#oa~jDZ224UPd^gC(<%;iKmB5vzS1deA;jx*0-b|z8 zF5Esbh$2DK7~zOh*KQjNU~dDq+|{w$wJ;Y_9fQX;))Rg7-bAg9*p(`R#yxgyt1^im!{Q?C zeARDGhmxLOZcRl!~>x{b@nbOLheY)h}fbvS>%rH_esP* zqv;glDMV$Z6?7_bJT06NiuSIYd>m?HIl~ifdS?B`q2YLXug379u#=JaGm!3l98&X= z$xv^Vcb*PN^p&UTovJ({BY=EWfJ?;OJ2k&1T<9?pzr_{!lzAF1S#?x?0`)=P7r_flHx zQRtgT;;iIr_%_g~A)g^RO@W)8y*Ctrs28B+sK_X98y}{vq z@E*b24cG@x5BGXpp67Th>$FpB#+;&WIt8cJ9f7ae)5?X$s2;ABH>pp2-yUyh=EKL# zFOFkRy4ZE>j-<1(vdcEa5QDk8>tzue!0;Q~A}<*stzaYeWbvJ)X|2(1VJj@?T&X8k zOPYp{7eS%X;O=LaDbh{RDy?&vz>!ptbB4&(vT=)T21hB9R^@Ijz&dS1&tc_5Y2q0q zl(a};(bA^#9gkI@fNH}%V_SSZZDyd4Kl2E~XIz!+6iuK+GUuA6=P^ObXR$9`8s9T5 zj*2%0IhlM9bOfH?Lpn8Y5`;y>i)n6bJTJ{Iv{XjQe9siD1*P?+5jk3=tm*4kV_#bs zb$cDoV~;wjs1R+-LkV37ARX#brDr*043f2(U0gw~yLP~}U$OLFC=JmIp4u?bLdtvZ zu&lX95&-+QpWs!Zuf5m0*j$LJfnI!i>=|ClMGbcCn)?642G}9XVtAENN*)|GJCB+a z{(|D$<)`mWmVw(whV%e2V!Yeb!Vb~7$f4QsN5Nj^9L+~4&><4?AcVGN!*@(im#J>~ z>d8$SK6%nQG?@oU1VK)LHJU^eNeRe7tT|Vwgv$cjdtw%LC8QktHtf;f zpt?#EmNbnZQi)eVU3)yZteFJYF(F&kF^B8RB_!I*mb z^4T$XXx5W%iVF?rQ2|A47)b($k)p&pbj=!mAu8@gCEgS4VK6V;OhMrn6~TBYlf>EA zqi;shwnxI8R2Xf<`l(@Bv8zaV9JAM>!skOdz=4ohRnq_)!imdaDrlOXo!n$2nsu;H z_Ld0K$NG~Be!r<|SFPFxXP7^r&Cv$2#FT~qx=YQxkO0|r zXh8MUmmy(;(1&m4jrT6Avb3!vGM+B%qfGXfwW5lRx*B`9&&hD#KGG(MO5S+=M4zUW z-65TBa&CYw_AM%HpkWlA31n#3tXy>nO{|#qJk-1nphrI!5DYfv@M7QIt_wG(Q6vz* zr5FR4%3}`<^Rae{b!&Anv{B09Ox&?873-ZipV!CsfS~<%~bb-luO^x-a*E*}A!f5*w<6LZZIy zPH*zYbSHrtr7EF7;xm97&Y6*r*e60{lL=bwO5&H*wKrE%?H=Yd&eY(U)7pI6q^EKy zx-Qs}aDld*q*js>G;Zwp5@$87f_j8jQ(^+t1b5>i zhj2*eX%AcLqY%R;e|{_~2_s~#<~WA05%!FT-m_D|FxKbpHq0+&FXodAea70DILiOI zhsAg*_HJOn9hZOt=E@u*aB^*micK+)*w9xm+#F$rpJL2qS_w#-mXSa!(;KhDzO6X~ zjS5JTg8}ZELxO_2Ha9s=UVS~~D^D_Ktk?O;ga6sk=m)|4#+*QIUD9|d))bPRjQ#Dr zW3m~fdfN#6^7>-7r)k}61@0I@a}ZLVJc2=t2lO}{A$XE-HcQzwlixjPXJ_hbY;ocL z5l8ZBhKylG6V>A7zHvQ7Wvnu^{t0H&X*O~}%m7cf)$i0c-U1Of`c_%wVF>jTWCkDc z)i?xUM&*|{QZ@<*XI&_Rgfl|uBArB*R!|mvsiX1e04?u3xA1qsMGJSg$BuE8LwOm` zhQ!Q5Rbe$9Ti%3k5YST`NlvFn@AOPOh({pP=`Qv`N<5w0%4`mKF#x(1k6LmC*ms0^ zn+7ye_Om-a5}b&%J=iO2VBA zpn86FO;lz;JrVj)67iA$BBLtUPljj^%ASxMPCZSW+qV_bGXlcwfG!>-yiKCCGoYezrjt~^_U$F(xasY2PQqCxJ0rwZXF_znv-M!<7TV2 zdQcGT43?_nBKz(anlxdR_t?&!OYfWUL;#Z32*u@#h9`?R;MO&SPZu~c*6B4JB?n&5 z5_x$825ZmQY|~4>u^sFpV5!a{3J;LND*i?z-=TNzp}b2zCb4#7lovB*YfkZGXiPVx z^757N%hyClgM(;;5`AoR{s@6YZz#dPBJlAkq&B?DDXqOi{cX=2KTnC*0>CuG!ArV~dG zp5LZ#(G!$kY_1WXovOgmQ*su{GNY4Tg^N>Esg8KQcj#?N&rulKQc>oOzKpjbHk`PP z@5MSh#=NNm?H!(ly8vL=u238`Uv&Irqo47Y7KH@HZFH-oJtt!L2}<9hg+mavrJt5p z)j=xR^WMvx2lDh$LyZKXQy{)%SE4KG2fz!)(Qe6DPt%J%ScGEmjxo8O2J5@Qh`Q}z zA!8FgkqkuLco38w!GgJ0FE4KBjgFmMwVkCfj6hgJ=vGrv+CyQS1*61ncxP8|g%$Md z3JM>3Cr1anWeJ7qdx?}QtR0p$ds?F8Fp~!O(#aVDOI36r>)9Jp>P>T0x-*rYLCQiS zuV5I|&fo95)DNpW+p>3khsnx~cdX|gY`%@@@GO7$SP`cAoKYWp2@(Xb^j!7E+sjzd zmLeTpnP=ia7`+<(-pXjZtqAT8f9D$e48l!5T2Lm|CGu(+f?<0BAoi%BBnYgL1=aO1 z-!pjeo&$_{^;#-vqdnUdwg#++Y!>U`*RY~+o@=LAdpstasi74QZ?ueL&`?M=cIK%y z#;XW-XtxM6M1Thn5Hg;YOtw!Pfs-)>WM$8AMUiIa0XT*nv>2~e(k%l-^d+LM9Ok?9 z)JfpV>EeAO1G9uT%?{v!&0>xTnQa|lB|1uYozm;|dR89{;-H!B#H9lLYqj8+78Ac7 zxg7I`pHF!~L0sNSkWptyt$a6#&o|p-u=i$9mEYB~d(b7S!Tm11Quq33VMW`_qc5|y z-dR1i*>qC7o2RkH6+KrVWI(vzbo?_C`@C5?D6&*?=>Vf}kU|3xI|%H7wuI^s6oz}6 zk|sr~!Ae-A+@>VEL#QW6=o{m+NNtcWD}fW)NnPYfmL24Indv~{dOj*)YcC&77>Ig| zL$yI>sL-9fYC!{#f-?OgUYA`P<76iqGT~%I_v)E>9KP6>EqyA*Fa`u9Jr7-~g0%es zyvg&fewLQ8B3z&tsszlMdXUJKD6;c(LTdC`*#l{?H}Fbf3T|vUHbK1KLgj-uc?YwB z>nwd1KMw}nB|gq!VIHW{q;JS_7GGP!_$?K9k%3Covxoiy7kH1DAS46bt$-n%yBrGI z74ZgNqi+sbh0ueM*qNe-NXswz8;^g^9agTIoM+Mk(-Tgml7oOzap#h_hV5WS54h@e zJe(%H>hYFq7FW-WJ(tpsa;nOglK$QU+))?TZP|Wy^;|4vFyBB71ftw~(@k-!c(Pat zce-AjUEDfp?6r1u;1otd!FP7)OL2C#&Pmkds&3MN)rC)~gFz-Rz&933V2vd%tCYrD ztIBA6iJclEN~D_L4&vRZm*(u;u-PbyLC1-pINOo#<4Pz45ium_ZLzH!94Nvh^9}Ui z-KR?l{EH7s)A_;la|PcHeqH+9*x$n_e)YDxa^e*?@tWxTX^47jVjredg-ks~4b$`nyFNxdIjz>Sg%Zn7^X_K>sTbwpE}k82QK)kF zcvaO*2$$l-UagGd7CmKojE;zM^vvOj)m7wfYaYJ5=PPcV4eE*cLZ~Ei)Zw@vOTDFS z(vt{=AzK`pB44DPdQsAF419X$b`IBGYx$nsy?0g7URpRt1TE$a^SIiiK+9nWCPy7q zm`4!uwAozNA7#@*`OoFp#K~ z$uK@7(;4r6^J*$DS(LzYtf7%AT@+(XNWh*geE|~q49r@9ZV1eJq`t-v538a3MBhZC zGdXLuPxxR*olT5h7kgb(ol}txbMs{o6XconU_i~1rd_;qwG&SV3V=BWbQB=xaT$L| z@9+_TX20A@@r9Q_cB^?cD%)wz9A+~#ZG7ydYl(Fu-8_Admq*^lt6>&#_T_k9u-RV5 zrnl4$w5~6c=Z@K6>UMONeHr1NKq$LyU(?HH&+O;~9@VoaIm?QLB;p<-obIF@eL^*+ zc4A~Bg3h~%S#&}>17X_iKRZ_hhmlAwTA)P?qL8+cg7ge>Xd5qW5R^W9i9k)6-r+oN zj0-T=UI*Vx9!k*IY+ZnGo`;)DGgkC+z+X|Q0zy#bCn5Zf&LPD4*pO?umaN^ACK3GY zq;=Mya1+E>jT%h%gk%bd1hW!|=Zmud!qM%_I5io3h#MqP>rXY(qFrRu$76=PUz_60 zz&7;8hDjLH+eoa<1${kZxN~#E%7{@Lt1;+lX*bpj`w;Y?y&&t@Q1*4Q@5Qn3^aJWp z6;+|KXW%4A=X>S494$yyCSFt^@AlzggH#bU8NL|pc09~I&1sf`b$^yla4Rf*Vgn)K z@EXzPQFn=KKEIHL@qQOA`S=yl{X*2?PlZDN=k2x7!zZ%?VW}>7Ep=p_^tPWK(<;01 zVU*^Bs)VZ}c=i#z@)s?;slLsO*OfOmsJVO_ zcNbmFRF1H(*by0;n59D^1%pGY%m! zpl}e?Bd9Xtu0s=5LNPE8w-&K}sUumdKJsAqXqp2x-r_#29(Tyu zI;ARpNG0H5mjLoacNj}F;LJ~+i?mOmdUC1i^}fkhEIO99)@WIB?+SMV%3?L#@NFm= zN;utsDPt}dZ%i9Kpv%Tw#7jJH3&!jAw^@%ZXG(ORk~7=JD8wp4DfEJ?ie_#dPEvb5 z!m$b7cLRCLS`>JPoc1=i=$LbO&c#({k7?K(6N<#VIzrnm3<`hW*v~?x3^80u5A}^C zFZdX(w`XKy&eYskqeHVtcN!@a-7Y0RR}^N2I#-KFMeKD;Z!IQ}BB4uOvtB+hm&?tC z^y*Ja%%AzFUOwVu*|$C7N***NLmEbf#3&$X8KpPDt(R4SQRZ z9RkoNH$fIy=CNvlz1Rhc1a)7mR#(OjQXgmBFAryf^xCkew`GLUG zHzg%3g%XQ*IGws>#O3ygshnSnZC>A`Gsx~#vG38_2n`)135!S4lpv(MWY}*^4+Gxe zI3ep?a7aUHi;dOAuGu9DOj}kF@`5^F?dfJcrdLQr)44eY$_%>ix#IPvKDzO8DFRk8HZ@ONwOuXT%DN$=kTGtv(PYcO=dUY?Z zwN-+#X9@!DtmhGA%Tv!{1-t_krbjPaCk`4Ry=-Mi7LlIqLH1 z$TJK#1r-jNDa9E?>bnCi#BqghPWt_JeJZEb66saa9xm6Z%6avcnf4PhItxL1F=I-`mixWps@gKyKXx>^K1OLU8tO8ySgN-fBfKFm69kXDo*d4{i zlePBr;9LnB#I1fKgR?Y|=z#8^hVjfMg3c~$=NpuNf?^L`c?-jWj`R?kZj_`b5-7DS zjnL{nOv~zLFqgsF{8S^et7^^x7sdIAV4t_WaJEfklvkreI(dmUVQ0iN7AQQTj!2k_t+C~7}vGMP&OG_>5NRsB5e!D z{GRE1SFE08&Y4-sE5xaDgL8U`B8bGe_GIU++l3ohcUfubTrQ)s8QUvhtwTpdw?VbKiqDDon8LA^>LC@5C8Cc++iEEXrR%wB zqLMT|m*}g=L1Tq9E0*_C8r`XBG`n3_QJ<=9GxA0vaHhutp?rm74^`4dDtr9$9Eek9 zq73!e5*)oDsxn^MHUSGJwmUe9!<^Y%TmN8J#CB+~jEexbs4HxGB2?v7kXc#(Zj z+A-kNQ(?wUEQf6(E8Hv_c}i(8{jzdz3v(Ftu!bf|W4M`t=e>4WCHn!RNJKsil=WFn69HM;}K9<$%f`8zMM&A zmzd*n-PnZ^UkV6ZEp#si&kkOE!RdkcSawNq6;=WFPJD34l;X&QYTIF}cyIQJqcJI% z>Z5BFM!6LnEO-v2I`~?I$JLmQno7mqG5{&2f;~k-W~}6(bh_Hen+!P1!(2mq{O~*hz#H^vFZYz<3eT1AEg2 zFR~vz7ROiKmnoto?~_%$PgYE#%N)mbuFPjv$j77Br8Uubl1pOxaHJgBR`iuhtBOnK zz2(4Smlp|`#PgmGlZFHeISM(9sK*i%35oS&>w*v6D_Hk^017brYT{(`*nQ45nGH7I zGXO<@%j^&U;p)x(9GYotlvAGirD1?^D1cj0YF0a$`#bmd|0l7K#0&CBeD&HK-S%yQ z?Roj`J)3p`culz^hbd6In%L}>wrFxhsA^Ep!inu=YAWsyq&g+VEjwAkQzdINIXgFIefh%P-A3O37_Hj4;G8BqnOyd_a-iZa^bDfk)4Ih;1K+rdN^shdCa&IQ^1`PZ_qqk6_}ed zo@h{Qx}VAb>m#q);x`iJ zoI>0LJsP3;>Io`{iKX9m6J8kEQa=GIcnZF1gzGYvTrj*G-Re~j3y&-jdS@5C8-0pRm}{pkyr*gTk~@yJ%dwY4WcVP5RcE*n zJOe0V41h7hn(7I$WT?rq>xGngQ#G%+!P1w%z@N-0Cp z-9V!FOX1`8E0RWtvMaWystZg*4UkI<3zsO`+JmT9@ ziAI1U+i(co0|L=ibVGL+Xm2nm56=8Oc{fPpZI5{v7M36BdnWbX05&&~$6?H9)Le*f z?eir}zS~!F=;`K!ePs}G;_23L`Ebf!<`D-TOJaq{Ug3Cl<;dMK?a@pr~Z9^py=q4cshE;0Y4$Xct)qC;~DR#P@pV zktyjarro^&S$IgP14f|D68q3&X*FZvJI3q#ep?U5o{D~+1!;lzR@&FA2-x`3<|}ar8)21opc4=1Yai(o{)eMh=xA3Zv}DV&JhPfeK|kJI(K2EJ6mxLvuMT zdyjXah=YQ9`c6@>1d7Z?Aw(h`4TN(~DggM43e+t1Pz~DCd>B#G@10GH03=HJ6Xyp@ z$o<}{27qc_VWia=e;q;RL~znNSj*KG2cDRatUr4qsXZbA7h50`J$S3Yg86(6Q4!-= zIg(D_(Tdu%qTi7gA%Kx}m}5mdrSBJfS?=Phj*J=$Rw- z?y|XhQ=YNy4KCkbE*sq))IJp86~G$Numlyy>A5D!=f>%NxSVs=g};?m%~M zeHdUd0d`YNzT+V;Mkn;)yAjFS2NW^2H^lF;3mB6IVbA7p=$0<01Bqw1xv~u~{a%r0 zhLi~p6Ih;{3EF!ZxzgMg!~^+U53fM{(YR*(|7hUZo=_Mc9@Tv zJHr|S91fP`?k{cks$H%lLm=dBTsoKMZ4LFBzu`#<2YCLha4X#XZQ?=tS!VPCQtC!E z>$_5IA5QeihYYw_K_t3`@5r8%PN6#ABY+IANW@r~XLkmicctiy&%oDgb>)bWA|)A- z@CHYTw4c57Ycg;+U~EHTtX<6#fiBJmsWYYN3oqhcAy*qg3zru*HoFxE*L2@d3JFQp zyGJMib*~xp-Mk#}3($N6zaJ(RyVS<)ij~gH*ly>=%kW5I1m^Toxx_hS#!SYPwva&8 zrwhv#H&AI8h0`dv^H?fE!iR(;oOu1_Ngsx~JdE1+jm$rhDIYCoue3AzsND9emU&4) zGb|3oM!HlmXBZG(YxzKD^W=?I`XkiNBZ57bM<(drv#%(+gEN^!%+vOzFP&tJ`Z{X) zpt{FZ!`4Ai4DcxdQEueEnk=Jo@28_RPj*>h7x%=kATQ%}3&nYx4h@8@rKZwWMTxuZ z9QA=IJa4HQshFaNNQOCdiw}o4>X_e)JxgDt$GHE z$LV<4PRxc2Eg?dO;B0dH0)}ZdK_r)US+CUiy>sMLZ?2{Fj-Po9n0kf|HZ5p5(znUq zIT)V8gOV40-u^Z;_ywXB>BARg>M?YAFZl%pOBo+s^kCy zd@_^IxIeG!jS&IS#RgIc;d5|u>gI#aFkZK9bp{xD8(MU!;hZf#kpn?Kvyx#1;N$4c zpVn2`eAwQIN7@zc{{=)mdEeGWkbxRGZP?cM1_qpo!=|WJrc65Vwvt(q`6{p ze!hzV$$L}Oxn)N|glKh2S<|*+wZ&-IwwUu2vOwG%w0V%Mlu%0`tD}2i?kGDa*{aI5 z*34GXs#eVJSTLMU1Mj`xVy@)-hS z$Fb17q+R5A`l+0xDMn?k zaAQ(HhHK@MmkD(6T-#F_)Bx1dc>yh+rU8>rVHGLP-mVcEJme?m{&4o54(^TFwF{M_ z3(J5=PRDb$&=sdPUA;B#$bRCM$(rfs4t(#n#7VokC@U+=`9+F3E!s##9KFYHOJa|p z$8xe7b?i+|L6|-Qwhj-Eb06f*gO~R3)uL{{0Sm{>j0ldI^VpsMB1*GrUJUp2mvz?G zdGrE*zVnCUWyVlvbG&+&U_OZA=lIy?e2O%?MkUZwAfD zWOJS5TYxghkaO_4$JeI&w99G#~aa!O%0NV|ZE;$=yvZ`3Ufw8VV8v3hoVHcKN|DBH|+4 zJ6_})NJ4y-On}Z0Z$sKT1zppE~jwFMopJv)r2?Z z8ertL>>*W(wX+Ozw#i7Y9;hM1a|CMQ$A)&?@9K2i2ShX=-1I@V6f=lveslY*1-U43 zbV;Nb054aod%de>Y@V;|#3%}}gfw>?Q!~7bD#D{8z^)rkre5yHJ<{PE-TEZ9Ghkm@ zEZ|*bgH^&J+a*6WO&nDhg9QpDx033Si1k&?+`;n1z?Nhawo@b{czCI^Ib9;8G>@ns zGsC?$nkvCK>$sHk(6={o`s6h**kQojUXFnswE@^@+>w^EM@nf~i`ZJs@Ch`iAf7~= zXnSapbfA?|B}wa@L%hnfK^0%K>yYu(WZErmz#}>!H@kRdTS#$u z98nJfUy?K{#AS?g%`@IGzy1Ulp9O)+H>~BZ-)J&i+N@pgb-+>K>tI;RVibC~FLpKF zT((6NKy<|POd0s~N}_p7lkC27MMI{Y=4L1!LQ=)!$eIc2O^ZP^L3a=iz18e{@tXT3 zo@p^qhK)5vOrA@z<~T3vV{NrjCpEE}CDMKYBg25JjpUj|&%*Mw>IrVt8^1N-W@CS4 zZ=oo7vrGdOpWdi%+Nf*=jl@ZDOE$wo;KKi**FYf_Xk`SRbrj;WcKIpq(tWxm; zQ`$WVCSss*LMlIiI&?Ji@LaQ(T36Pcf@x+o$RKJcin9W=Y)&-`YABI|-N}{M<8=)R|Qz>DhUomrBYF?ryO}%P<`1VdH zTCESA)xn<%iFmXj;uzIkFB29w1IJrans^qXkeTFLZyGrtzOhFa!HKsjddD-!BG}7o ztq<#EiUKDB0M>K!^ss!M;1JUEWD)Y|yemwTx>4>YKoC_T$$XEhvSCC}6IDenQ4~Ki| zrCz5mYpzhtQXZ4vB9{bmcz4Kw;d{;sZ7n0c#c4Gw?rZn3f3btY7Fu;xBjl2!gCp1{ zt?w14>mV55Z~`0MFx-1BkX>w}w0SB1@?EX4P|_LIRcAa>hvR;27t!J5-e5ChdG}iP zlih<)cB#q7H!YDk?-4oXN*;%gaoLl%eG*F^%Yg)Ko4fjSttLEd?)E*>dE+Rwn%q8?;P}n}yN#>_&HIVyfg$1p*wSZH{USQk;?1S_kp#($xTGY(*eD=W zox4N{1iDL-bul|@NY~b4y+xYxeLWM?SLsqS)4hHar?JK36nvh3P4M-@>dQ)odCPnSIAe0Ozj(xTLIFW%1)iAN# zQwJ0uycdFgg_OpfkS)2t;Ej_G^in?0F>FdVwa_}1%yRN^UUqmp9NEC^oC}%>zC$=$ z(&UZs-a+>6aCC_;`A}v->}~G^aojW!Svrj8$4N?^JB8UL;!7j?4hde-I90jp&a*NFA`3^t$pR^UXymw=X_YA3{-8=4sRVUK z88){ipAE>(b;6Mpr4T-3bne32g#0i5tO-KzDd>D?G)m- zkCa22TAzbFuN!(BkjFhy72rn<_)1=Jwdk;SdumBk79fZFW(~f@gv5S}25Z0QDVC?n zgURBd5MVCM=x#oFN;|ngPfTrTUc%mc5=7~Ewq&8t!40M+%UNv1r5Qm9rG!j{rfJMJ zP!Qnw;S|L&6@Aj9&r0#e`56-pwBo%9?OTUF+@SYV5(7!4sT4DlMyDGnPAgxIywvn= zf@_2`kkv8+L69)Jvwuf zO%7W9%BWvC<&^;wi@qS>i*qG$!?$s{)JZ?*UO8L6c#ggfP??OK{ot1CEnfL>rOA#_igjAt)b+jy)EtqiI_P0FZxc?s0DuYxZ+9}i{ZtDNRL zIk1I>YR|a0P%od}=y;0&fL~^o$&F#&OMYD2k=7IAbUECJVe(x?<`dy_+r6l#UhJ0sOV~F!^(IWcg6k$C8AYsRiU1`p_ zmlX|{0`(|N`hMOfWJkwE&5T!-fv@S6CE#M~lWV8Y)z`AtN4T%1X1X%D7lEnbW=+>X;V*^2tv=YexHqX%;JaK#*H6l@G5VG(Cb94On>1 z0zrlI!U7?y*>Od86YhmxI9beffJ2_f8r7qS!A7Rqb;w0}j{17B(oWOKSS{*7W)C@R z@hXzPj)-sr9(@Lwmw|kd&p>(#RNUnR23(uA#Dol>$qmt~kl)ejByPCR6=Al?7^w&0 zvJ6sSKI3}toO%oFT~uc^)I(L5x-pLvcoc^4xX{h>fiORL?_$%&Ei4(~CCxD!kf}ex z8(BbWm%#^+tc>WHBr3etNpaLeQZaevzSJr~Xsk^-NH;HN$~Uo;jNM-M8E+Kyom%T`6+a~k z=n36dGp>A}0QI|YAj>5_F5?%8*Y6QNvJ8$&dw|uA{@&2DHwz%8{wN$n`^ni#_q*qD zb?>Y+jZx>$0!(iPurp-~HF28Ej{-Yosd?o{ z)SlPOaF|S4d%&buwR5-8-D9TUfng8fFgKOu1SknQgL|FePmS^^?zm6&9^x~5KHlR| z{aV5{5MJ*nAe_pjNg{78t6hrqQVj=tsjH1~#+)>4>j^>Ey77pq3bu7MEmb9LEew4V^iP5}m(%~# zvvMBg0(onz^d#nW}rpgiIGO3k;I1D5kp|b1J)D(fjUr$u^if%TGQo~6<;YdRX#kb zMkyzMOqS{_Y!-dgV;2|D-M;sHpbS(<3YQUlOY~V4%NRx%usTKV5@IK2O*>&M2#i|T z>mo@lRKR*Nb?iUqput$Bic8Pi3#@vI@Z@5;9nMLjW166?Unm|7o`P6KGcRSTC07F+ zlDX?r9}?=AoNt{9w#8gzL}v|<6M&OF!0-NRpCu&RmHc7c5QOl<^QR}ed=N{bFSPV| zQ?CX=$GtW28%&4xXLRormnU;onhz0%);b|nvTYtOal28Dg%eq1LZ||jX$N%#L5y)&b!=U4t;YFIzCdkX}j-uK` zw&5rm0ut8K-lRc&GW72{l3DspbOlaQ3P^em9F*wv#w&V`1(C*S7$kRx+(m?v;q^VU(X=v;(M(U zwNGNgSDbN<`<((243GEhS{)|jW0uFe-RgHQ_oNO$qhs9?v2_Jw7L!3=BD9l}k^9-6 zxP&J;>v0T6sJ~LD<^+Ak4<-2Ws*#*Jm@1uW_5@|G4`F^mbTWDA-hncVK`+#v%ytWO zG{`&|t(wSt3wxl?b^CThaW*7srwR!Sc$i+lYL6gz;Ku?B3Gb<*JqD(E>Rpf5uKD}^ z5ue3`t^A>8!d1zrh3dWa*Jurv`9|s4hF*`;(yS4YdK~RqL~vpq3;1gud$Qxh_&~a( zdQO@c&+M`bCs|V}>UA~8=A5}!k2@Y2X2>PloY5($VB{v;S&SqiKdbOvJEW8bP_lh4 zjKmrU5>9~+Z!@y?^13g z&t0U2Cyb_5JXnM4p~6hBd}=X~r1>FAQEOx@-4lDdum76{nRkFTpr6X%9V#@zey(A4a^9B+%a}& zs>zIogDMDsO@3|hg0s8TdrMShNFO>24^^T@E{LZS5_zK;lLDRNE*jM}#4}dDsIlj> z8{C?kWtpqHl=^IcV%(qzG98cR8D`~W@h8Uwucv#s8K-zX1Z&Z%TUpS>i5`WQEw7#I zt~x;*3JWf0_7qyo$YZP25qsV>3<|kr))n6vKg-Hy4_FxXT~q``0G&6Eca;oJVF`P> z4DbS(Ag4Gf@S5Avi+FVp5i2ZP)-%W-k;pSbdMNK@$+|d+;!`Laq^;dEmwJ?pH!h~B zlct=$qvP)lhgb^B$YVCqM$dFu%p4xP=m+y8W)}@{U$@@?5^7voH}{ek={sKt(%iuR zuj}N~n|zj1!7r-lw5sn-#}kz$HEnEJ-J+S7aITq;c7_H#@Gy+oG@b<)&E?Zj#GJ6R zR~Dl1if;}PlGO=jC$JulE~eqj0>$V^F2Zot%z^SmC*tx{Rsau{!*s4=g?h7UeKf5=U7p8j+M zbYLXdm1X1HsUP@|O*~7+PmcI~JE3geNPew*EKNk(t8@MCY}Vbm=h1ZFyFEBF(S{fz zY4kYCp?suaS(7f9j_#Py-ksOskoDT&iE_VMlv{4DIE1-+k9}aE>+N&58%cblu}J!q-eN&xtNF6#xlUMjV% zdQRZ}=tMJvriv%_rY490Camxx_PYEvsdGz8N~ZE!61PTqP(){e+VSB)m|C64zLx;F41Ks7GirZyLBdTyRK2P5DwC+x7 z+;v`pmk^r{?*Zp+A25Wi+Q9jG#yl0orwu9LNd}I~Z&1A;*V8ZrTOL1&<$WN$LQ-a- z7Bx95s<$Er;NptD`m{LV@;s!vHvmz9lVd?{MDf|?(%^ZZ<(05+=VVPHJV3vodYYo4Kozcl1`=GlZ#kASNSeksuorV-K_IU8-j@2HK8R4p}OMzd|k9;yd~z zNuO1Q+@#_$^wC0kNwkU!!#zy{>_c}B-PaF|&AssH%?pM%nkFn%2}<&u2i!@H0R;19 zsMB=3yb5wM3=DjPXh0T~dYsdadfQcS(ac)rQYQ~A)EQUY&C968hqemBdf60jHfvF4 z-x0=K<)QKG*&+{-XmdxxR0~=Ra#2+z({}6)QJ=)f)Ob>CcVm`JZ3%1QO*R@&I3Vue zY8YkDw}PyCI4+RYizAF1Y}zrhMvX$IU_spY&S_?{JcO9D2G$U<-dwEsn2t+MID!`G z3-PD5z{4bpA|VR0TwLXki>NtoOV+AjFfJf|-d(V~$3g~E{kZ}zz!Y@gU8J6qG4lys zpT|Z(xt|;@@fHWHy+jMSEax+yq_e#C@X)rN6;~Oo9^(&BG0 zy~^N>HmP*gCYYASE@(L4AjJu1=MbHd!DKi^=e$~)>^M_+KG?b4SEqpyt-Y_fAzsq) zycE5aBuW4iJ06=|nJbXTgqUHupz6sMh81F}`%Y)rn@-FbFYtg9AV)S0?hRYy(+AVi zW4@V1Nd<_8QMTrTF>?kJ{K&%d%mi(#MJV3&0A4G=_^@ilT3Wi)5nXf(V{Fv8@yv}v zIHJHzo{!yDCX?}j+e^{sL+AiNi2F`dgj8~P7_1+xH1Hf+zW3Hi}&p*X;m2n%QAgD>{Gz z?YeBuU9#sOShRA7P<*lDHBdeoOhD~KCQ{T%xibePT@O!;Zkuj{`2lNZ(%s|V-9fcM-mCAjL8DMYh^t3zel=bm;yauv5uSPvkcx$rD z>K1v7M|&r|k0d~jTr`2OsXHLq1&@22l`>an{S&6>XRhQ$ot?(_EZ|k<5xt=Sk4Fkv zGs-7Mmx)#O00X^mQ41FV1t-@{CZS=JTB2zN1e#mZjRCO$ogd~k>!U0nM<=W|`O+)- zb?EXLxND_`I++TgSPfCD(?smkpjTBnMCZX;Cyv~pIxeV0gDFUY z02D0#lhbPb4Yn)w%r$aJ>?ZVYW9!$52)7{zCMzG^tLL_!~nF&9LOf5V2)k z;n`(>iTqvw$OYxiYfc+ij~CNmZtQ<7 zd+Smj#g*EHR$&0TAAFQz#G8hcj%tA z1F0LSCpR$IYa=GoahIwEc~B2(9Q42!6h$>)9J3wK%sXkaUq6lt1$cmVy+JErOBF)d z7ZJ+kY`=IWL5)KfWf#?@Es!M*%oBhx9>WkjbtFL|c+m8=c+~xn-oO>G)WWmF$3+t) zUGA~e)tsFC0G?L8?3Y|vC@of{{K?ZlYtZ;Vq0kf4RoPBJk)x1AS7d^~3C_5q4oM8C4qy66zAgi%vEX{r`{ zVQf_;r1Fa61@s-Z+rFNA%@s19xeHP4SC{Het8X5eyEp;|)-?hij<=F74eYFw=YaGB zOPb@$F=csSu@!MI@^X7;u$9V(w2#bLII&7Cjj_$LwY2C$jt^cbPgbKaxgu?Hn7|yr z-2enUM9GY-iaTX`JhL%bRe&J8Sfj<&(-s8D+>Zdpt<$_g(ejO#-xT#gE2?J|PDCnz|EQ61MqSpvAq74tj~ zl(lpzqR2~;L2!sqou4Qr9N9KN7mcf2%NT2OxXr#)0Ye0Er+gW0`RW$;%_z{s=HqE= zxdE;HIYTV~wTWw&F-y7-o;jPRl*oYfTW~@rnOQx*^U2F5OF8$e1nb_rC2@|a!AAu; z_lTqbp0W&>-(rC~Q$Ml9s+I2*AD`6-U2&(E^-^-s7VmI27o2BX94>4i)!b!now#By z8*{@cseJ^B6-kqqgfZ6y@^0cWY8qcE@{qs<$RQF{Q|h8-qpQl0Iyf@ zAyJhrHm?AP)eSE56j57Gg+@l~b1rl~fkZA)PrMd@n5Y*%wecQ>*h|xAnGG*%{b;G! zh|!L8c%Fa^YMGcF%hQet&S#KsZdqVJC;jv?MBO4GMfx^kjo+q-Y_eG;Z4`6&2zZkT zKT^cJcq>9I(+|Zg>Wv`AR&uoAa8%sVp=ew_Ug@MIdIS9Uz4USvAKF$;2f!DmM|d|! zAO{^SKancXSwV=35)U&%iE;9DY_b}Yg|eWn?4@9mz9d+0rPm^;FNp-JMCdI;uxCc% zQo6Y|@O!yU+eF@WTw-eR09amqAr1Jx3;Dp^ETt~*sVMlS+FN#l#urRu=+=D)ejJQ_ zjkLmYRMD0ryH)!NQ27{|lodJk}c`y4d;gH z57!iC1l~``P7OLd!JuWy-|kPp0@j^QigDDtjrlMDa|l7T(aZ)apKn7(D%n=ly7@Ve_s@)N1T?!$W~IIeIfIH2YLu3n6XMh5>rc%;}3szzG}3*e4h) znD*kG%mVB(i{9$;Az$dotgRjz@6_}DhWb8>lU%POM3T4|EJHXgqVo!)6IBHS?sK>& z-dD&>ZUr(JuuRy6QJHGbs1jN{YXb><(V3?QiqyO^`5lqmy99Y^36G(u4Lv3J6%{Sj zWq6A8lSEcsA4>(Y_qtxy0p+D|k7;DY5rUEIN`g%8>-UDjYC@Fc#G5_UE?}{YGOvfu zAEF(gs*3#&Sp<+mpVTA9(G9r9o+5cgaJei)>@Ro)u#}#>S!83lS1!5oB4)M;?ag}W zK;^^82l=Q<7U7PGhhmCOoCsv&q=90>MPtdVOwu6B$p+`1MtO3H>M>LWmYA`~4K@9v zk<9_iRFoBXvwEWCT9}4I$HWe94?Ww;27=&4Q0=p~oDvx7xrd(k^o^=0!XOyz z26p=->EbzKYNVz5YjId74aRvm0g;r~cu=9V*eO{UwD>qNh32rujSW`Z@k)4dv0$9x zp#5U7fLmEA5g6OXr|vxyG-%A{Fzg$$JUx8ZYC&2$Kt}S-w3f{9P%wQWqZd0zZL|!%6n#J z=(Aw_IQ0?H-7uv9rVBH3q;)p6k3U>;DClyuSHVHwIhFKtuB!U>M9kGblx9Sy@a*W= zJWOPw%i``C3UyF6d8JP_gV_krWZ3iB0Nbbu+ab@uU8cyT?2EHJ7*rgW&1=2~FUrt5 z&^IfFfQ9J=)KnUZmpVLXsN@BU>X4FZxCZ8ocokgSJyfGQuvsNjl@-9FZvjkR^HKo> zju}-SH*ztVP14?-KQ8Qgmw-iYa9KMbR-f*?$&ClcZBtuHQfu;N|*K^wXz!H~Zqj&_LzE1}KtiXx~cN(klfxmO08y zilBFtHWyL~mu~p-J;Uof3dCM3K`i_^TdxO~JPW8zfK>ZjXY6fL-FqihXSWH+F@&Y5 zR_Y~WB~eKhXgBV?6PZ3Q7*pj$URgXZGM82zWjHG1JfPS?@BQS;-?fTHpRhX#&k-!0 zo22^A^%4J%G)-U3Qo9&wVb=`x7jxMl=FbEC)k}Sdw%Et$#Z&@hVr+RQ$YAJ&HZhr$bj0Jxr28YrD(1 zobOIZ--&j(*8?|5s=X9ogK4>}rSOmjQ+ytmH-T(fS@1;h2wV`kZmDNi?R%w!?}@`C{`V` zie%L(&*d2esewxzjVD~&a8PxnR^cl-l{H)g>6Fn9_EFuTF@kzJMIlAZW@P>Ja=;@K za$pIx>vUo_49bCww#`J`j#2X2%!4jKMyr$La~ z<+VqOJqmb!Ew_CmRK6GDdkDLr;AJOW5sz}hKztk244~Us8=s}(?#~md<9bQV^gMRN zVvsX=F^NwOrZI9Ku3JcNtZKZ;QkJriVAh<@Bv71Y<(GovDf;#bC`}#m;N&$RgO2n= z+RWBkkx#PvS+xS{d1@x!3Q?d`m#^cFcJ?$v=qtPP$HwO@yX6{Y3eaazV3odPp~$#P z3`^JR{E|Ec@@fMJFsY_n0`IL&%nB`s$P-bh0B?`S6cLcG^QlFPP~6QashQ8mQ<&(AOMSb0xej50Q(9xwK>p`i3&33#^Dh@Q&A`f7N0Q#kFvz7?pRx{ICtFa z(T>+l=)|5#$AP@{3{evMS_XCOCOCwfgx+3uzWyIz2vl$2wcZ#`Qkos3jRie*arfUj zmiTc&fX3Clfy)MPkjw$Q5nX~RN7NEF%?+s2>}}_!Vj=a6t+MM)AC*u!d4ilTB@%RX zK?`hJbJxy9>l)qn$v0a6oU@_9Y=b5oa4$2eKm?Gd_TH)scgXoDgh1VTL#@v~;eJ|g z2rHInqt!cdj$?T@3j4W0sNIW`0XK4XDn_dct(T$)?b)gHO5}k=OJp{k;#skDsFum$ zL=fKbN+^SpdG7{hraetc=O#L`jh|v zViH~QyY!hFUsROnAOJ5RQvr;ogN;KdWbS?0VkcuS?4lz$mhaTdV2I+}1hCgdpv>OU zYn}9**ZUA4J!HJVFL5`=noNtq48nj~5K7?O)@Yf|*NFYxnw(w=+uo~Mv52glK3P1n zx9Jd%=m7L*o?U~uA_2T~^b=cyCY33c?t5FFeyKVeCHD|V54ywN%h^!sP|1bE~P+62Hne(-IVZh_f?GqQ1tVjI}1DvRz zy2&&gj6kvGW;8L4zfBt{T7hi?5SCVy$<0k=dzx?{3MQu0L!smN3g}8ju$2+qTy#L7 z5LS~%#H{>fcwTw*u1j#oG{#!o-U0c$zQJdQ$z;B`y1-aS9x1fM0zLF5 z<^^LxSJsrkq;Eb!LKakR%F^nZC)mtBZ@9oMTP!8|J(m&XL!IOuSq0k(3yFuUcLjTN zG)3T7#BWZvBpxUG6TLPiWXxyo$n9@*l-R{gSll3&(y8qcWW!4ohFi4ovpt9doKggZR+ja zCdbHtgd)6AjGCvZs8u)VL?R$p1rt!*=rEX%NGeds7ghb@Lit%h@rqn@7Tms?c|kG& zxT}njl_f0Z4g*tn4|d$dkNd%`N$eF7GWNlGJaM~qbX&P@_jFYr)&9i(#&G|%>` z*4owRP(f=1NAAe=I4w(k=Om6?w(tcWqSdXT(eG125M=SoI60}7d+)tA_Iqre0LRc& zI?8UNV>>X02yhJ7Rj*Y=b@OQsB%|qzlT@5eM(m!4XQVa+epFbG1JEB|Mt#za_enPa zogk0gBcGttW1j62@EX7;G*SYd#Q{V3IIoVMKCQO~9=uVaub%rrJ{IW9KIDX%TxnoK zT}c3cO%#r&KsJv?7Fc$1?>(n0FD3H~cp%kzm2BEFn>tn)i-LDQk=cA3c3EnZ7FRcEmx^$e`dMa5Yuy zG^z|1%qRRPKkz<)&vI7Ovc{zmME!&aL0goHJ0Cpqd!?%;>*~0~Ngf_-E;TtTq4I{) zXdF`7rG|xXaN}-ru*p$QfP7<*>1DPwHQzczHyQM~ibE!~nAJ;M0l7JowuDqJoO3Kn5(U6InB#?)cr9@%*fHX= zt?zNwvuLfYe4)DT{zjcl!Lh=P)z@+d_X4&C0&@L{h4ge{@j+x*w9oa!WFJqVo~ozK zQ^Ix)y0`Db1$O4COR_31_i;b;4P^3BIHngT?0&!f?#hquvXG6HnT`?)1iO!MsJjbXh_da0+mw% z9rUo?E?_KDxd>?w?Pb1kLY2(46?JE@4r-9!3u5$>S{CBqQQXRVG;(3cH2` zm1e}P>ZR9@p#s=+2&~uPZTo9%j%uuJ_69ZCeyOcTJG4^9I7mzeAqLi>vIyh(pu8Wj z`dtX}^$`|7jvjy0yW$P`@`SuzV7e_)z!(_@CKx-X3>{s0`=RuL=*r6ZZLKU`NsJmp zIAnr$dO|-qF?_{6D+Ba4UY5r}F8%`CgFA9}dP&1VZch+O>lwc7Xx+*XCL%6aTaSKX85b?6n$PU*nR*FqynrhlHpT7K{sxSng)MEz**LmevtT+BaU_B*w{4IR45?hr=-ow*SPzLvRhtbI0drCD`kS%xw#jI zS%Ghx)+fPu=x`pPAfA}$LFsM|tw(CQ-x5J0T=qV-x~pqif`GQ0NEZ;rBc&XZO?O;- z%;#_?;_ycHEHQblJgzSt4s}j=ut0QI$yP(TV&QI3N-?x%Z0Z@bXt6y(b|LoVaHQg` z*unP_uxOJdM;0?)%qfmE9Ie^nO_OLTxl<}^bX8o_0`xn2Q^Oi6;ZCOkt!8hDFuYd2yo+zpGncCTwBH!E0cR-7CVx z6zareQ#tc(H0(o}w2>Bkj#gf0V?9>Ls714{DnWLxpg4#_vLAvam%5J3_-GDeaVCv@ z5`O$iSZ@iYuO~6-#rn$3F|G$CX&_8HE2EyeZ~-xFd{tZXJhYCvUeR}*=%RxR(m0f zDJ`iHsy7yvp)~eHY!A)UBQH?kaF^|zAhv!?I(PKi439X;#VwiBU(3)SSps$DvsG=s z(_e^Rt>OQd**DcUOrrx28tgIu|CsiCeJF+lmpA|%L`Jdy|MN#6YGDi9M08SzZmWDa z?zjR^?TOkw;8|Fs-luCAFrSF&eFq6tyAv|trc8{DD~LE#Fn-{!t5Q_wq`X0XkXj{pm;#Y17ykJ4X@n)Zus|-oK0&dN9*jYs80iG@`U&R z{lU;8vUkwHPKP1BePty*4>*)8Ys%}z43B!i!fs&1yP$&igwy?mNU()(y(a|}7|iWu4|3kHd62zJ=Bw~A&!_$RnR3={wzX(9F28(2 zD`f`#1o+$_V0OVIvAJCu`b8fT)^sb%#olQb-zuvEaBi>MgT?LEDWaT+7gbcy-JwD_ zSI4;kUQ(>p0J?v!E!N9xFr;QgK|S*5vDdV5mi2(_ctwgC;>IucVWc<75_r2v1hSKz z=X6(EQql4ZF^5i>$ErRw=sjZ#&%~-87UNB* zN(S~AQ!YAxnbWF;?8ON^A;j89HsXfOhsfv}kTP?C#G23!$*&FIR}X*sV)`Z-Noo*Z z`f=>i+j*du*<#x73L2M0)o>i`n&fc`ofN+~qh;dRXL{VC(w$BZY$o6!|4GKb z|G#kCMN=iO`#IStDzFHcg1EdLd*u-Cbg$Zd3sX1yQgx5?b>VJ?I{;Tagx6P)yEl~T zx`3CQ;_LbJMW-k3yT}z=dQ#r)_L|y167qp3g-BKcGcuug0#!gnB7Ru!+`_)|X={CrXcv=c#vhdL95+Pn;~b za~!IrI23Yb&0g->?y+^jb%v?c;!SEW##{y2#vIsVFAUYdtlScZdNh0D^1OWLotr({ z)K@^omvHR%p2Ctd26>Od4EL^g!*B;thNAnxBQj?tKjy>IaM01h*CMM zLF0;X0R=thX%$V%K#`3uWl;W(9PsG4-WjyS+kClOiZAW3aQ3|$6L35UdLT#3Lb}rs z`mhi}{8)M1;~rK50twut9X|DK3bkwJJ<5%|BlU$s1IW8pCn%wpZvqXU9L*i=g4V53 zJtQ{5;UsO~m6s;_VC|)4G-t>Jw#abE#tF_q$;S9=9a-;nc$vQhs_>M&fheE8BYQo! zCH5}IfA)xZZPS{;0PfAYt`X)tYwcb#nXJ_%BPJAfHpd;k?iC~pWDhd_$?En~5y$~#JF=Ag!$5oSS}NfHWwkCnsUqq%%`7J$FnB+qaG0H;l?;`_zdi=Fix7*Gz*yQfml8r2c1S`u5YNO-RuL!)Jj zW7Vm$a)Ic1KD;3VUq&A*6JEmQGAWg{c~MeKV=q8;t@TOBU47ctMX9Y( zxNQ$P3Gaygz0Fc2@kb_TrOrX##>~&*C*LTfZZN>)vs46uR*B;Y7IxQmH&&02j?0hUdrHW}(EbMfHD+ey8G=kUxLX{0LV*vKJb-AiPgpzWLt z9*;w;iTcBQj~mRd$+OF}B&S~8>5NKrCP(+angS6MDDQ3a#Hi?k z#_(J&6hq@^X-^T3#{ev<7nJcBZPhC^DzUt`5$!Tb$JlG2Mm?|~glc{*x>@>=e>k6) z5kJ!g{nDsh9mp%zT#VDJ7xIoGjWy;>-Px2Z- zBrds4Lq=!ftVo_Eschs~Qitn#DXdNF)@vRIN!`8DtCG4|!W~=2zNsysg32$s<{hY1-Cf zcbK-Q9D%v(j?E8A{7aJVza)X^P@$?&kI;Ow?8r+dZus!z-2i6Atv?gb-g10=y`9AE zd=k{V{F2#p-Q`_p+2rk6DQw@?jlo=>xYf&h;z_Qz&+yf;JxS*ReQ*gE%Jv~YA-YZk<+|s7!bbN1MIcAy3dzcvXNe4a+ z#B{oYiZ&LF#Ip80qb%H%qtnx5*;fTl6ghJlsD+-!XYry1AD~dSKYodz!6aDv;Z_dtXF$N15u4ad6QdS+-a?jF3DDrV&}1x=h8Md&h1PnTj+! zh)n1rIVg8ofvl9G=k}`d_|~1plC88sf*pMz13f&1UNkbrj+^lA3WpyLTxrL^9Bj!$ zC66^~&D`= z%CvP4Ka|jipd-NYhI#KKI$DK}Gl*;BHB^Xil50V7Ntj0@c}I;fMgxrN;!GJi=L>3xwi$>U~D zAa5K-4sM3bpY*vuc)Iem*Yx((BW0~dS<|j;j1K}ZUeOC!MK+1DEt?ILeE!Id#RCfW za4mPtQGFwHEZdBS?fekE-U6i6pddc1dZsq?3ZH_-3wM$p~^MJMK{NTO!I zcSQP%akF_~CsAEE6t6Nh2RJ>}C>1DMLM65-6&eQ!D!Yx-5Qj-PX}jh8K*@^FC>I~J zfCQXZU<-v#Eso5^q{nkJXw~BVTs;+_8uwJ?^0I>3pX6fLzv2Con z-VTRCRS4avnGFnTdHV$BRnWaGxZdOCSdgk}^g`UuU}{e=(;YsFS9TrcYYh)xQY7Q7 za^2QYsJRIR3>zv#26BO!^xPpsQK@!8M1q_=v5t|oi^DN5C+{A5*VZHcC=}l;*M@5p zA5Ed3SdfMEdY0W$LHfEOD1BT8f6Y_cX8LY9)=*hTia#qP^!y z1xu7AbL~}bY^7U=mikVgx`nhR>g<9%B_{g7{S=VpJ-CM8?pw$^oR9fJ&cLr?;+ z501_d-K*DKLhy*L`pGQA8Y4c$YG;e{kOU0{V1Fh^At(J@gpEgqpVnhjaPfyEsWpt9 z52ho!9#a`*=y?+1+werKs@Ug=jKx=}-k3&jaKtnj~zTwvB`{!6^& z0YOZaEP_89X^FrgQw5~8bxkLKUMy0f<4$Eu(g8-PI!RQJT@iHix=*f}TuI79YtDzu zo_GKbl7`L3OF_EJr6R{cm2tmJ#9rOflwOQ z#^ZatO?hM2BNVz6(ewZ$6dE(@9)s^}=q&mo+-!sym(;umLHc66HpY$cL5DuXEuBuM zZt_%Wz~JA1q~mZd~cK*w{PxGP^_+s zVe%PpiZ}(2Ur<l3X)ys*th{T7j)xrkra1^xuGZ$%4hPEQbtPk&>`GUfr$9^?I4cQ072@zdBrbho z#b<5HQ!vobSVnAa(xvLMAZqDZ-d7vX-{fYJ4H0)nYcyD12E|=NB)mnHF+zDcU-QZL zZjbQrakkiBr1-dLtEF_Kq%dzeZoJZK6vKCeEiz_Kz+0MU;F|Yt!v?8%hNvxbgtww& zUtk5KwMbTWzjmBKbN7%#gZD-hL1C$)?}jam4?|l6EWqZbReJ8d%Dgn-&?jl|0mf^O16L1r2N&43 zLb`;S?KH8n%m8hG-~hFhEqrC{`YDWD624(xDU%NLj_oRZ+>29-uNh-f>5--g;!=WQ4|;Bm1p2OVtqF`pbbQjKMB(bTE_HK}YR7 zH}5QC{1km)fQX2h<`~DtyULPvrH~SW-gLotjk@wEyR{W$#a;srlD@FnC?SKm?i5|L!%rx#RmdG)IfCE#$^jPWM4&Q}( zZY5V{t^_q{)b*Bf1!lcqwD=wA;!D(<1FuaJDi*6_*Ip*OL95{w4@i$ZYL}v*7VK>k z4rpwhjZv89n3ra|Ws3noC}6?#7=sa|T3t56@!Sg*CtAhjsO4hXLGH9opEfSP7C z87K7y?VT3s7b)MLV)X>7jF;l??J8TnB(Rre&qdbAMxrb5GP|>Nv7i(l%Zl zFjv2al(IZw{Klw;JiAd(Ia?qg4NY<4=~YZxn^Rs?80H6eLH*-k9dtoeE2cMPZ3e8C)qA%oss2G*x}Y|a1oxhDo=0OC0#X%db*!Yfp;5I zu_7U1-{YoX;Ci;c=@ZZyeFN>S+oi=^XYY7`J>kYh0>_QCR*7|vLD{GiWYFhsYqM-8 z0&BI0f?+#~^rC|>=I(lo@MI=lpFF1YNNo*Ncf&%C$HRvfyvP-_8G&g|K}_$Y2U6y- z@QTbX8JLacnU7o5yNbmZ?_t(G(wn7QoL4CCWy141iJGR-_uhrNA|_u{s_t!2t~W$% zUt>P4HBv{tylyB!ZSlfo8tOJsS;MRv>uuZ-w%j__lldivje1PEyIyl%b_zpHlbz7% zL&1%kfE=kxJLisV+X&U5a;7Ux;UQPZ2beLZ_r#hOq>)khemJD-@?>lzyy%%Ngwd5* zT1aszGQeS9r|>$%lQVjP=UeY8g-OXZowCYgleMnlRFalM)cQIM`swH^>b93-7Z!*X zg$d2$tb)@kR-wuWo%H&;qE2cIM)af((>h+v8VB)CUAH15zV_$v3hCY3ydmys!f(j)-bR1OT~yoj*eaoCWObBSRpq^S|} zo6TkGZaSa00d=}Q;d!-m?D$T-^s2ehJIYhU62@(eIhmf12Pp(2JO~zfUNi@`AU7-} zP-6iJg0RE+6~H9Kdw7V?5QOFd!b6wWcCQ}^d4m$AS+ldg9_$I- zKI4EhIMfZsuFF}&8E3QYJ4CZGjJGVh-AuJt3kIq-dM~ner7NFsy_fzJsIk$=VQ6Zg zSHiXrZ~GOQBg{Q-a**+I8HEVm8EStg9^S1=j~EZNxr3PbN)rruybe7wCAVXw4w>CS zOnDQ!+e`OFgyLLUSB%g4H~={8mJA?NXJ@OCug|hmKO+N{n67p>4kfi`Hers29M330 zUeU-Sh^84uLhXK;L(xXhM+=$8FoSw`*$~*YDcLWl)L#YDGqPSYx?ZZOK67luxZ)?I z<(qjp2uHZ$8bU<26qT!ZNo_l=q;RHL!jG6&o})ugKoi(|ut(9G;dYiimq4EO3+_9_ zO_V6Yx_1JXTNa9LypD=W!(>l7=80NfvJ8%wYVVb}nt!mO{KX0<9|5tmAPMPTu;7F) zuy8Fr8xOeJGSgn^AgZ}?vR{$1cCp&)<%Y04c{B_S^)5QzGQgd|>DBNIu)!Q2=mKYP zN@(c?Rdwb(6&QsI1_&huA`N`}q@(KOC}zF7!w8Li-{EZ8<+>XqI1AWfrBRYvQxlnv z*Z2-Oi+Qo;I?{uC{s@W4>&Ju(Y&wC9Sh*J2%@LBsH~7L&Pi=|d zz3S(fwK~fSqK}^6^ozuLAeX`LcAq!^KZ=O0$Asxis4^Qdi<=B+3&tXMh>c!f*}F0-*1l{O_R8Q235#2~%?5)f3pYcUXZuQn$n%OO z>?q%RDZ`M?FQRq$ByDRivIl%ro6K=5z$KaXdGlj>;HVWa|V}5 zt%)*}+X^x$e36WM8BYO+^cl~)^_2H)!>cT@x0W32;2qV>0Uxj! zeeZo#vlv}=qV$>kMGLe{$riV?znk9M*i(8EdZ9yL-a6efQL=ovC0VseZ-?33LnbEL zZ4FOJB>x@VvL()|wsukPZk6(b^vtWv|O(PA#N7j;f8@ z{@qo0>m)ASd#@4XDG8{l4uYJ4O@MSM{;1)iwn=_)E=M^`0NYozVbCBu_@2cSuR3)G$ z-0P=Tsbqf9lo!?4aSy3ijl@$Mdrwa(<&CvUGNc~T6&)&KV>BM^8s{7M6p#Q?APJb{%7)=DEJ~ zqdFU-J5aP5FfE>iVzHA;DtEk2F(Oyq>O@qapjQK@DS-6BZRo zj84mw%;0&QV<_)1S+$Dg=}G^A4?$q^(>Wh#QshKNNiqx;j> zHn#8HUNhv~Q=o7U3-uy_az=29p6QjHb|_O$$vf1$!+t}d$gS1f4g(hb{SUE-}7&J#}=jn(r&+kQ+U{6s7(LxTY1k#g(?zGmhykM9Spu4z zOI>C~&Du1W2W$%OT*UZ^+cTax$TGmEdAm5cbD=bt8xvFT^vF3PXZ^XZ`OpZTpK!YD z!I+^5q>P{-@{8@uqGZ=g>@!;rdmCo8jjlT^r_JFk8;c=8-Szsa`AAGB=0O6y^9GnE zh+XwVwm!7ll9gqI=bq5pto-s-5|_%O9(z;3QO3*LGQ}w33L(#&!cw7WuME_<*&e$; zmtxzDjd>A5xsigh_@W5LG#{+WL6nUF)`1z__MjsGUA#sFlOOQ8B6(YdG2b z>BIQeGpw4ph69QSFIh-}&m5n&dJ%^6H4MS}J*riD+l zHeh(ei%SZ8WoM$$scG<1)3wlh)N)E*;-iq9g-Q=0>I)}*5L{2GL7#X)AKxGk^vZ0m z32?OJJ=>|228UOqTN{EA3h1Ny07onlyNiFMG9`%?=4>gJ}9Yzpn9+JRp1i2J4$88!xkl> zu%>(HFu7b#%n-#-oGBCO>;My8P9tuLS~G^Ji%kkR-jVB^u-fpYxC;UMyJ= zia;7}lS_NixmJx|!z6?vSCu}w0Lk1p47e5=Gaa|!&q5;AI5JJI=cPdqn5rs$@DlpP zi`4S#fFlZ^*^VmI~K?Wog^v5$d znZy!ANp>@dtdb6*Er@B9<_sYmdI=;BU@y2a-j0EYyMoPOq_WWQN_)VqvcDH8VT@i$ zYipqWblx<_%a`g*_ktmM(Upku%5e*r!Lb!ty3Y&Ng{BoKsSF-Ikw8pqEYuz2^%`y7 z_H0o>H1!7XN|ML#R=O>ll$up3+u>~^IHgd6JUD3Qho`<5XU%PM9SuU{`}BEPl1@+D zkiUm#1zdYCL`R$BI#{Q}HlCzuy`tjTo>at_F`X5t@>~dteD(@tO5|uz{YEcO&0BZg zlYFmbQcFy(`%3wa_Nt}&NB|B%m+s9Kw?a~7Z96d_zc!0}6DIEvtokAx2{edIY=N)i zfxjJuG#hD!w|D0fl>Gu|X~4=TrA^wf^x@ppy$Cbr8+{}&v{ljqJbyS! zsSWQK-agoT<#5#g`@!~_hK^dWyhQXZYeI@8Q1jZG1P**iuCgf2BsgzM1vLeyZ?6#5 zde4Ax#o(svT&YURu~>1xfI;9dVbWO19+4x_J;#2rxMRB3g)SqAdMVCDiFM3j}8?laXfOsF~?})X&rp%RG8d=|m%5 zm(hDp#w*4EO0{p>^6umj%6hN@)RURr!c5!%Yt(x za(LL&$y-Itlp}dTow*&ti>sYT4ti>XRq2#XVI~l zUxO16wdH$6AaVXOQLpDt7R>YLTtw+$6!g(?nij>8)dW7kBGYb0&a<|HB)EkEVVG(v zOFj+sdREzwg%OGE=xj%FXOPw?XH9gHL#b&oD#CLyDHbMApGC6d+#YFex5LB^8nQT@ z4Z|$-tIIjgCzDD${KU$=S%}OLM&G?x8z2)`@r6GM_&7D19pDtXm3xRar20B%@OM` z=IPFW8?Ef}I~i(e7krGW7e!e?@cKGKlR+*X-z$Gi5go$$=0Q@o+Ov2v8OG(|%VFrQwJU^(I=((eakP5W8}tfuOu5`wFNre_+_as?ORowZYq}A8 zMl4DT`S@O;+Uel|K*Xq>lv~_-S@1K`Q+5<@c%tFb%|1a)oXi!Vg}mj2R?;Tpz5)*s zz#4n4a%v*kw|n?Jo|fZ4QU z6jAd%s~sMECA3HfTE*w-4hm&fargjA#zW2?a;}jxVP^>5`Xtwi7`%W7_o;qcfV}Prb)A;b{>CbVo}j7_XqQjIc>V zbL>&JSso!xvJ}ygjqspIB=%UnNRCu;od+_*Nn$Lt@nGbrUmt3#8t0s2EsoQ}DQzsz z=*~;s@cL4Q;7gfHA~&vXP$H95d!y;k5m=gXX`_uEEA2w;Rbf;5jwKkyz(f%oysAEtbajdCT8H+|Qm8C$8PGSI zM7&P2&y@*@Hx1;GEtr(zfG-rsG&;9nFa=d{T38S91aY^rqt*rD)p5AsgOnKcJ5YPw z-kdKWN8;7ip)$rDD{t4kHg8lYor*J&l*_b?r&8j*b^)xC7YfZ@W~dPhTSRv944&I6 zu`k*#1_2n7?^@2A;HpT%cvZuSo>wnlL>B?-_Q=ID<{=p19R^D3Lf|pqFQI1?-JB=4 zed@2UZHAP2jK!PfZuH4+xwOR+W8<-F^kl5rI-Hf8H}OWE*$|DlH8g{^XP&@`RhIg- z_DcdXF0Fp@R!3oyVf|^~i(&zv+|a2+n$8%9!~n*87G5NDLtIC-%J8HobwWm03^rYT z%N-o!)w3WEOG366nruVXj>B-rbwNm3^kOW{W4vdCCBuu^PP6Kc>-1J1_7VGY$Bp+I zS1Ac=Ti~T`D%>lyxad?Dn#@Y>9m`qSCmRZ^?~?oFqvIBbgV#gLatQlKz^=$0oa{vF zTHrx)XRtomDSRvrhLZH!ZA|1QuuxP7lMU}7>=ZRpCgRbMgZevuC^BkG+rZn@MZGI8 zc>vBV)86!G=O_+lSwBc6>oz%m!(MZdrdcfd>5P9lBa{lBMc6&#$t;#)J8Mp8`lmFb zv;wn1bqxA!*s*tNh>oVRa<^fF2QT8@9<6%eiA-2d;v0HK2t4|Nb>Eh?z}SkPWWtC& zSbnNiUE1>=1%pcnPJoa@z+2H+mgTjZ zp?XSGQi0Tjwd4fe8_?E|X?2!e#vb$Zz4e%A!+Qh(*g_z@Ik5e#xa)~_p98qh4Q=Uj zdW6vVOf9SMDZP%$&8U^_@~QH9(_!s?S{cn5&HNHzBiZyOCtgm*DG!pvo3>kQL(-wg z5Bk*Mf)9$&6LjQ?&oq3xq&*Z265hz<<_Pao;!Tka)lKCAL(aTdwA+(Oi15>VfJLz3 zE*3Z8{!lqe&al*aT~7tz?a84+p?4DcFl%d~-(w`FZjTig4^HYS;zh9PtyP^zDjt$b zw1TB*DFy^ZvGjD#oD2)gt>tcaujbx8-FhgbZUhk&?En>BoD_X)dB+c^BVZN!6tkzY z`t{g_*JX;-D7H;ly@HGVQ|8K%zDqXY%-!B3;O|h&+bcHCV+;ueUhH=3_QO zE%!nmAes-;^_!W&N7VChyL{>Cp|O*~vE)5Zu}q&q3$3Y+$VMJ?z}k=mZ)|PFGRXas z=INI-PK!ayz;C7pY5RmiW8fTYcm8}mfl~V+p&w4my-sPmd-tBR-8-_ zOFOQfrZrARllXf>(mrk1$LB)(LZrBZSdY8=T}5pxN5INB+k+8ZS@sv3^oqo5pDF-y zb0|xa`WC#piAUoWMWj&2t=+sE)?U-mK&ueXXz-dCC@>zIL{O5oP!k-U6CSzL4Ia1d z1oN&(hPuL2Ugj`dD9zj+W*t}WI75@Rrnljclb5cr8nBhfWq7uGsFMn4mGAO;J(T>E zRVfdtHeos*5#3(ed;B=r%ny!E8YYQfw6vH#mXa{TH zxY{WcI6QXP*sMayk(=m{qAnM!?UjJ%`)aLmWf(FN`iJ$k*(vcodmS)|Zc6BBUaJDu zv=Nacw{9!<9`E>5k9e8`ns)F|0&?&n$n@N-iSRI;OI7;Muh+;DNpfHtugoZ3v9FIm z+iZN~xD!xbJh9q^XFKTvVm9IEoH8_}_&=w?DmI6Lsf$6JdHx*#J zV^(KiCjfVL3FG7D8y8l2uHl<6fGuRctYQAL#zg&1mxv-m3z6`v(hUeN!X(_kw8?68 z${6x`3;fRA8jP|xabdMdGoVgyW3etsh5!NjoiFQ*=;K~6J2iC{P*MaTgsORJ9=D~u zv5>GO0JgcpL;Mg$Q!A5WT12q zqt9+H-#8ekhaOvsj{>2pvSdo<)k%}X=9x;e9fxCv+S@5I@VJK$IrXX4)q8I_<5_pV zHDE&5(2MYVq#@{`QkxKx@wn0O%)?@|cjEc^)f?^hQ{36Cn@xB=b%qjww(o+mY0&lw zzJM$^;vs*a5AkJOg{rrl)Zw!EfPh^Z>t+M4`QB zH|+69^_K;c8S)7L(O-3D`kS8FBNcCGy0$rP74R7WlExl&2!g6BMH0!*+2 zUQ*j710j}X5A-r@=P|Ej&of9iQ=vD{(ko!(<*=~IWF-}QFEm7$UzQvv^rfQlh$vDS z0`*Q=O5m2Jnb$D(y(W(VUDx7y6lXZC{c26dRu$JaN{)Mih~!El@a)mEB_XKDRNyad zgub)^UEW(3c(RI@c}#A_^raTgeN|SEnu*%Tic!ta97lzw>o6J0m8?V;fHURL>;!LqR}PYwa-|iY_eZk85haxKH347i-g*HCXoz4ORIK{ z6Y@4{YUQzTb52n+W8MWSnWR3ju^^{9Zwuu27V0g9 z_Vb)rK&8r^_BgDkF3%9&o`vNKuX<(;y;;XH1@-V3j}b-UfraK+au^WeJ9{?ZNJ~&I z(9ips8WW;fy|_}icRf&!DUhzjD^_Ys@M^8Srd4Yrt0XZCO$=n}RBBEJG44GikWAy% z1#VMa1|KL=K|$>GD$Bsi#(vzm3{rXWuD!bM zY!fj$<(Hfi_=7=X!DpU*yU(51^UE8BFK?C<3!|Qo2;!jUdSu@*P6tAqKTU2H;J3C}#~7?P zAVFmG3RZ=5r6LMpJ?b@gYOf@aVh!$b_PQeeV}yMRB8mJ(h?C4=i0;E^CH*e;|a#D1lZT%SJ4HlqXE?n#6CkaP1;Sy1xBKkywnSHmC06CS3=5o z-AnjDCP)r$YL?2s2R8lE6-ApirXudr8T(Sca#+QMu*+hsdn8)T2b7f zOHpzT!*Ux}cAqpcA;%PrOwUO8l!Rl-w%9!EQ0BgxX?Qnua_){p#GDX($H*`wv?L(l z@t#$XjPt%debk}uzHlo|uW-~-`Rq>6?CQX1Mqb)`9%l6xQ@ z0gp|Qs7iP0nDE1_Fg^oCMqA=KH9m&&F*qe|>)$|$R zD}5Fi=$e=3WQhq~a^ie!V)Vv_c8^hMK`yd~2>UVW+PV+XaI;~8Y{9gF6Kx;`6De*y zjfQ!WYLwlQ?Cm=0Pj(`z2{3_r8P;{7A+)F;uT&?2t+aGNpKI0Y9=q~Td@FA|fLJcZ zAIcyCu~IKW<}NEZ0_ILVbFnF+>>eo~-g)#;a7>8aY8UJ=&>`(ofVjpB-P0V1)9Y)o zhfuYwo#tpjILGg5x+g{n#q-jS-~pUpq7?zzd;GldwBT^?3a>rK8F^iKwrZ>7LZ2Ew zA!2c?C|}`Kj%R1!Nqt~qFPWN!nfuvHA-N3?SlgXVh^jg9vE<`M_33!S zUS}9vy*={O6_=4n zP?kzIed>Vtp6>0Gt&Yy5Q=}}*60+)XU5iMlv1i;uD%Pv0$f5uR@ab1xSB9pzo87B^ zJ9KbR&p{Ra;j#&%Nv_34L%b*wIJd=K$$^fJ}qxspI& zvyFgwZE`09nt?AS%5&CShx2xvQ$(MQCYkjzVtXq|#}E#TMiQMg z@3LQBH`C(C%pN?o*G&^?t7!0k(Mo($4@=kI<9QF4h(P2PF#FAx1UUr&HrSk+kh zq#7+m=H!p9QgpAJzCT0 z(E`y^7|7@?Cu*!RqZ6rixj~|YFe+%c^>QD3=x!T-l~%+dSI?SSk?Yx@zK1uWQ%uR(6a5S- zFTEo!Fr~MMN2r9lRZLw`ABeHx(Rhbbo>>$~B`yad*ND_eEY3}99LHwpnc)?mmfE0# zu8s;l^?ca<&KRA^(&zNR^322~i3K*=P02s}+5Zqj>eMtycKf*bU1(pF!&)Slbx@<@LifyTv(^)#Q!8X`!0Yyy12`>$L$(JrnumTJe3i^P&rUX9a%2<*(=-R^ zf}YNEVF;7AZwEUT)u_zha-cpbGQjICIDD_M=i(`s)rjh5560#;Cj2xvF5?T z+b`Hyp9;|}tFIHw?n<2t>i#-$=I=llD&q9`*iq-E-E4)(6bhn zXq$Xis1%wuv_x1Bx!6vK6L5l^<*e|YkB!izKUY{WT@Qq(l7zNZ`Iy?4G@rm-PYPd- z1)1m;jvIs?D9LEivo!;SeF7C-ffiGtRyW@I79}%@-_!DkTpDyh))6~ZCbnW!FbPmB z6F>kpW?nCJ(4WbeE%ZHF+iU@N7My@kFLAmh)K4OzcZQ3$jt>q6SH{o1DNH~q1ROfB z6LFII9x)rSEU4Px@Ree;AOmvFmDzg}Mi^en&N^bdK5=JngkRUAE_I>^9pD1b!#DR* zL+741MX6b(Ub16K5)CP0`556IV)IkXM<8d?0cFK6ip}HoNIz#KWql$Jup2lNPj$TW zHSL*j`AqSpzZZ6yyHQ2F%QxvEk9utJK)QK^OnAE_rnNeszKM0-7WGNVJ!+X&qygm}7*= zo(86!wg56v3#}(}?qO^xjwP(dGlQNFfK(}c@bKB|zGyIo0`PQ@wP&i(OI}M;d~=-) zb+hoq6s5c%ij*rd`t4j9T&PtEK|!Rc3U6oJoaSK^{V;y)Ybj=UuisMEu9!MTVoSK= zYWWpTVIlGu>jw1d#c4ximo#g^Q+Zz#4*`72lQ$j2Z{Bc1sEFrN=q76)S30P*Hzn(y zDe{*+0tnRg;;ttA9!K2C3uHIwT4sh$FCE3QC|hO63V!1&kAiJLQx-UN+gCzDoLIp9 z(!ms<@B!@bEL+k#z&KLnq&(jzIN8cG5BRN}iWDOfV+TCkAyXmceMYS(!B4_H^zHeB za25gzQyhNF^C-G%fStrJaAY$)oLFVNR7pLd%dlQ14}>uKCB5n0m1w4HcfP#^UnC^i*Y2%C?O`feSVkR#n)~{= zJsDqiikdV!zql6=)jk=tW7tL*=`lT3Z^81V4h21 zsM8X!zYWaLb=9ha8lwirxnaJ0ESLV8xbfwJddxD@3}CyU#cJHdg5Ao>^W)MCo->_xs#FdiOSV@QO0`4GOJoxbQCFOj&D;DAW0;REssp<$Sg>%qABe9 zY#Q{+J%_q?s08&|ZG2m%hiwbF!F=x!UxrXwo0YFWn(HcAb6*Oz*$%t7!Y(Q7gTg=c*`(A)&d$VF-ivbl^bx3# zBy+Am9;X6;y#;=r{0O&kOJ^UxfGQ5zv;(!_d^b^x_QDZNtCoZOXansX=R@&no{bBJ z8g5_HrgPVp;RZ|c)lRG@$^EcV-lHerQ%IvPz;{d>L%LF=u)IUvhA@^3wbU)wZ=!gZMYC0EF$`8*-nq3{ZIj1F z$YsXep=K6rr|hmj3=bRP)lol+iu8DvFyV%lqxcM#q~V~{!+038PNw=bNRl>})Nm`D z3>_UHshX6zbW_4W4HoS+)eW)*F?2>`y@ z-H?7Wi{U77np8c4y6G_N=ezpEA?w1zK^|m@$2Fn2fowqupo~_~Fb+?+sABDa_2BZ8 zkj9J-paT!nBU(Tw>7kA=NE(Ps$X4&iGy* zMsTr=&6y>8h(@6iH2Mpml(cn3C3@4Z2X396G|7iZ)gKQOMBd?JMI?CR27Fs~_=~)5 zhpQi-K9}$sd2l)~nHI9?(iFne=$>SWoY}RsX+3i;^`_%!P7zJy0g~lC3Eq5xt`~{^ z*e^z5hU(E2fvvpolbFiKB8U?fuJ}gvp|{-=;N*u1UA{5#r4QFoaQQR{YgZvs$C^4uvx?ldC z+cMFwv8s!UzV({JP?@K8jxVBS zI+h`HpgK@;@Qg-?UP$W$9boGhQH9NJR^w!TLlLC?$~d!ykvA6N=>ksfA}F@>5Wjv7 zIS#}UVKlF4jx7u$MYty(dirHPK+V)$5KCUM?k){aF4$(odpL*($^`P;d_LcAc$3eaF;#_b?3&!+5MCkW6 zHS8Kg1j#zEW|&}Bwp1NLH5L5`rPYmy{Q^bWKNw_lZ1!T_XTh?~c(nh`IKShItkn$Rl3BP(v_ zjh1zkb5mI@7J8;#xGDQkunE^Zt}9V@KptqzJ38d1zS}$#-`aCGQkyY+y zMdpk-+CY5^$GD-87`+65^^!0w*^YZq*5(<|l0u_<-&~DIAHPSn+Epla>8msVZvu$l zGmH|{mY9d<7f*Tz4iI)#*O#5yucGdyi_}hZEAX7*OZ5No_y8ooeHELD9?@@jNpTAwH6u`D1M!$60G4PX zk`ZY?K}$d}CkPu8ruW9*JbISECVcORgsu4zUZcsy`dhUq*fvp~U>dVi-ed=JAg1nw zO6BxsY}?=!lb%?GCW>WKX|IlWz@Cm@sg*fso z703|H3gc){fJ5$H@p>jOa*JPMVpwcNJiorytd|#7k=EY9gVav=We~&X6!a2Z-3y@` zcFteUK{`8lhM_-y&0x^ld=vuV8j(%?mqJUFZMQLnfow%nBd((@{w!tY&Cz4sRGe&+ zSMUtUcp4EDDfdl1fz8`3k1E7p1;#l+0*d$z%tU1ZSSUOgGN%L7w=R!7Armx=(;nff zDhD}Ec?&25JcO*bgv9T-satxyo@QG=X?pTNRtv?Y-ZN(p8>ggsc&a?1FDDOZLw|=iu)|1RbPJ2LrNxS9+CsT?qu>qDHWn5EhFK3L-|LJ_(s-1+3nMN8 zQq?YR9Wss8Krhc~1v;Sds?nDM1cb~ss-iBVO>ILc)tfI~0`n;?sIFo{&I7Ev3c_}D z4XD#W)Yh)GIW$gNu_dD52hRyO*u?6Qa5e)2ryZvbUMTTO-%B%iK}BY-JgG;cPjoF5Y?PH6JoPA0xH{+fZgdH@%K#SLSLoG?2m7Qw0`hVQ z8Se}ODW|}u0Y<9K$F4;JSj|x?<5+w7q;nnzLW;4vjh|gMokBex?pLWpGH(UZ%LS{; zUSlBur_wv0X-^oi9KvEb;n(SFAoW~%u=y5<6JtlNK$xZy`o%4z)A%FRiALg8oGAnw zU&s>cFz8!xi^)tw10Z7Xvy3Ei3~g!4R^+bmZZaJ_*&d6>mW&hj6cS@q)G?(@*1YX3 za;Ap?Cw10l=M~N!p0PS;F1+K=P6KRb_D_ZUOCjeg!4kANZOQJE0iyFGpM)p`dNfRr z;I1D{ZTIshMe&zI*RwfJ_oC-|FQT!sN{utpmpEI3m9-xZ5G40dHHmQEm^UxuJ9FSm zH1LXJ0Jw35ePD33SQ)$f9OMaQj*1i)q>&$uTsJ@B=OooIz3+7zs*+BdZV-%-m@Mhc z7$OW@1ZE%sl;F!5Y|O~ujob8^Lz**jp$w}@t9YW%HBu=X9_WhDu*)N%Y@Qqsh}@Y<4x<~sqZ#ncD_PXtiQC7#>%-6e zriq^5>GQ{cR`O!s9rC*uTvEmZdMX>5mT!Rk2KlrN0JkiMrmv5Sm@O0SCFS}~iDOCC zc&sO@-$FhYFnLg6A(O9qTW7XqWji7f zUIzBPhoKAJPSYx@o;&nBt%+DS5wYXFBRm)c26&3wb9#-MBe2Er&DOC zeIqL{*ItY3nqk|!G44xMlrQhtu0}`IWE}Sv3_M87G3z0x5MLV$1fbGTw$W>Tqze~| zdNq|)xDm)zyK@&>Uk-_WITT0NZ5=QR0i)&CV;hFvxT#%)h<;5YvvFZ7Q<{w%jqs;L zkv#gwsdX=<@GhW81!4O@TV56RD`T=!ZpK->gjWql4lW%=8=Q7^EB8VVnthF1P z1g7;C&p2n-iN*%;0HYcopO`1fv5YVS>Dw8|7>~OFz)tn{{|%!GYs- zild#*8!m-gZ3o5fMCCOZeUW)4W&K{uJOyFP$E}XY>*>Lx;>$<2ukqPpQ4&0g*MJfx zxy6z7PzITR*wp;#-ZIYgF|l|{2|dqQum_Z|)8r}3r>=t+{)`eWre|pH%mX?1l7ku~ z@8G3$zD4&_#BAaq>L$;2KoFy2(1snXO6O4J5Xz9ib}_S7h3j&`8&s!pgqn;Ls#`(b0xC)aZ>7%y=dc z2wuLAsux@n5d;jLgT_U$*-V_t-sJd>EB5nNTypob%D0$J&yoyY^(Y@k?QIZ}#JLZV zRFtsdfdPdjd4T7WF|ScYmG(|~v@PDslNj#%VvwGRyDG+ekDwjuL8Ct;O#~tuZJMoA zNRcfS<2(d`EKVhPjI+$e`K#$gdR;%VkfQ&W8uxz zE)8tSL6agJflK(fAk@cQ-EC_k2aaoB4MTqAjhS|}s4W3h(T3$~rRQC5e}SZIl) zT5>^088liK$}6v*k+r~0FnVK84d}svAl%-|+Ogv9hw^ycWoH1c@S!T&D8E<45u%Ht zQfsur{&redHlWy`ixxakU|zj4xVuMW_;6t^k&=kLL3ytI8PGK=T@k8u-qhZM*g32+ zmz=3ZYh9f+ph~VqqDafK-_HlS4|IbDW(^3gu}joXHcJkYkf8uU+-7UW8F(hCd323D z`7lM$X#w3l`WBu7JV0d}nWbs*+s?_zSY(pPIy`_)z}FyqZOV3#aHu<`KWbS7MZB6i1MQ1ZuOwO34|WJYpIG9jZ6^GEB|+nxQoL z;Cj}Wh~KR7O)?oAM8m@OvTuoZ;-A+KIiEaickc`_fV@lkDRcA6pi<)EO1!m z+^zQkK?Qh$t+3>CC4Y-{;h-o~(CbSi@yGoxKaxj?{&cm1~z+{?(7LfUA_paHU2HWj3AeBhu%JaC{WtpA6 z<-WpU8HrsPiqSV z?DkrAB@t(zw#r`4cdpPj2M??auHG9*<=c0gkv&xR*txQIuZo9w%xcy|`%ICm>3HDT zq6WVO=Vux+xdhw^9!9m!?<7XSA4%KjC>K6Oc6Td|Yr~usD#UdV)61?jhH;O0(`Iu~ zma{FKI(ZK{im+l}0^w-)s9$8g2Yj@mqI1f+B3L=NTvVB_tnSUlxRDvXCl3+n&1{H>P0W6ravh`$46$tQ?ytNU~m)T zksLn6l$q&Q0{TXl@>mjkfV;>mbmyEo^I2P;?(K`GFLOl~R~uKJ_4$f7!APE1Ca7DO zuWz!()7cwMy~nOu6^jR4isXiHCJ%W|*+4PiII}zbGL9Dk{JE?$9>Zk~kyp$x;XXj; z2r+`;K%{G?K$XnW$tmb7y>MMbR~>vllb%wnq6JE0udl1U-0ca(ZITl#3aC-8HQ;8S zOo++07Ea&ciL5qFCvUG@aq8i=)qqD6BgY{=%?m&ef-n<|djVG#6t~VqKC)fs*CP~Y zfqizRokrkKpl@U>O|i>Mg?Ludh1F)435XsMdau@t6#>};Vxi(|NVk5}QPELeCs!jT z^6;GqdSJ^SF6oU}sU*|vz1jk6_d&ECRDVy>jHAK|Zwk0==M80I(iE=&k=`3Gx9rA9 zyvL_d*VqG_=*le_ft`eH)TnQuv{L-N@x8XpoRw;w_hgk^C}*;s`@>A&X(qO5TkMpD zqRH59dE)6Fn?pxESMq!U;IdaCQy!5ci!xtxCe=QDDzc;9%{KJ9GhsIYeo`*A8f0;{ zOSR!(=UHL{ZM`<&(&2@ zOc%(X>`)U2{VCEQse|uOcCh$40QI3!sJ2fY5U>dPPW8FnJd}I5X^w4AEM}rs^8vuw z=t?s{7z~%%!wVRbPQVq}S$;5{ERo@DIMIt*ef=KSCHDHFr$GwrD+?({xP4@=V#{WI zq6|$gtRG#0n3|7c6yoFT2NrgEXQf2F?25`y@2sD*JfN7dda}yi$k|4+#Q|!wbTHRD z)a8+_;ogbnYcwzz+s%%~UW>L~nORdgRn7xNN6FE=3Jn10+od?vLo)7+C^k{LW84O9 z7I=E@y^1BbaO5*X#hWWjX*hQr5@0v(uX}`O6K6yyFr~=2_`SEKqyi&S(5twOKZ9sw z^FIb;}cK-(d6%+@tHHesq&ETk4w;cXC#$+k8_^=jjy5^#mz}fI01~ zMNJ+((+R^|0aid~Y==M%k@rY-kP|tZHSHNZBMj*(t|p{CqeMRorQKC~zK5wArTXeU z?RvyJ4&E>CmhOch*Y?P$u%Wcey_KGt0E)&Fj~*D&ml@uA8DScZ5=5Pr5AFx7^P35w z6C>`PD)A(ZNgGg2VHbv`cbvvBP8UF|gYlXup$8PO2_7u?%0Wh!6|2vJJUD?u(SAO4 z;DlEC%1FzF$WkuH>jG8Qw1Bz*`fTP3-o!j_cUs3)v0IVQQe87!HM(Qed?+%!w6&Wp z#gU%-Trdk?<(EjIUm^)o>QK!c6o*W zG|BbNuso*4JjPR}#^<{}d7%%yFznSchThs}iilwhD3CUNu+wfA!E_H3JPfIh%0Le9 zjXznE6dzziw;+L{8d|V8*d*2G4O08;1;D^t|inQyo z8ay1Q3Rjq>_EhL8Mdg|55*=>k-j;nO`P6+}2s(TOwcYH5*-Lw_F>#Jr z0K9k21Qj5Zp6xZ+&7(!7@F%6JPXU!JbCyFA(Hck=UOS9W(><&z>lq$ufwpj42YA`Q zM7pvdxIPpQE?y(QBL|+Ddi*4?7x7l4b{Kxxf{|~Di3Sg!QrTJn?n?m42O5eXm_Qhp z983DvW!;M0p*K1@=h?~h0pJ{|TroSu>jU4FpjI8KnP&ku%0UIPN4%j0KFKSR`I^j-~53;f3=Q z8ZACddca>Mg}pv1o<68cH!4O9?gFXeMKy&)XBdC{9-sq>DTZxhEG2Th@#CvMCnc0B znAIe=XY*8^yp018+9@HUR6BpJ?=hy8g?9#qB?!Wca9g6)yO4XhN3|>xqGv4ze8~uY z3NS@%4rE9>q<&9|EXyMY5S3WW-?Y_63F0m%JomJ{NP4`8il-3>^BUUJo=OC?FRunOI6oJ;9>_9DHdI(QSazcqMfj&${XO@yV|+e>LydKFhJH`a$D8&ms(EJ z#BjitRC-#$T(1S6(hN-}>(J{Gpfpm;o{dOf)71yAthn0-sz@xf#BgD5uaody+-e%F z>IBxjQyAd0N$Ua$5_gk>VR0llaDbZYt+`vuT60DZ2JNWQcW)i6*1e5-H-%G#n2R0V zWSZKZjz>&nkoN&Km7wW(wv=*J+5|_3@hZM!Ea}Bkw+fuv?Jd>ZO7C@xsK?F|Br?Jx zkyC&Z3WhKke(5cVzISZ<-mgZ~&vl%&CPh~!);idHM)bA7IK#9m4}xq4l~SvUE_Kht z5ZSfn=OkK0Ch&B&=s7g_kPd0%>hrVBMLJoR7iTP}hmT>>3N?A_Jg4KG2Hd@<7p+Lc zbUxfQrd)K^+k->`Lgom~0R?xSF9P=k@#Z$XQ>%vMF>eU84S!6R}r7x9` zzEpCk-Rhxz=NT+G%n_m3VGnavt*41MfuQ$QWO7wQ+F9&_jK{1t$pwG86up9xc+ZO3 zp!Jlo!Su{+J&P1w_p-LM7k$guq#%8F7WBq^9_d!S0*+jE9?i4D$<486D$md?G#}66 zR9^sGOB6+z=nLQJ1$`|lOdu^cq@HP*`l6rgli}H|E1ym@buIDK7YCC9i)rz^*pgKT z-mC#*2?pKd!Bb%?JED=KcV3#!zSe4G&nOS}iTBwXx(uSM*X#6By+W#A3cpHvx{NPh zH`PPOIi9B+Y)tqP&XL`p(u2z493v{qcg?K*Lc*-;xk-aIz{7R0Ca$&3C?FyB!;EMS zW}`kvONZA#?9SbxVV<%eq3YQ_zVK%ytr`yjEqU^-z6c*U1{z5dLTU#lL(1hmE)Iab z)sDq&$xg6qda%0Yk5kfBjwTIvJ3yGpf+YiA%VQDH-AAjrx4ik5-7CRZWWqbR1kBfD z=>Z?D%S_C0E&2hy!EDW13F7v(a>Cg6D+6zFnU^-|E%nP}s4!8@c~u@L9Hf=!PB6B< zEMn*Qd?$|i<;6#WU6P)%En&P&n$Z$<^;Q8)A+eo=+qp3Gn7)Oyf;-MBc zgEt=SJF1($_LY1Tr6!D?)$9<{bCj?7TrDdW8gj{l_<92z$WU&CiP!u!f4M~P<&tG( zB5hOJV^ia0W@N~PE0q}V#a5l|@zKoiaq?P&eH$-B51#Vg8$5dS_Espk#siFZ#jprlERBi{4`Igaj=DHJ9`K1Qd1Q{KJjm}x3z9#|) zrocpI{YX%#UG3ew(GG{vn&r*n2k85VW}dQ5qQ5}s*NrN%ZhRJ4lr7sJ|Bvj2sH&$3#F`05raSi*%TrFZ@l(K`H@SJls6-3bRLLoWVUS6}KrCiT0*T_fsbSlBwNiH-bzp z%JY~gt1J7+Uj^{p!^O@PZ*vTXIF%6x8&;!N1gBQ12u)>w*;LCjl@qm*)lTnJQrQ9_ z6O>d+*b3-zW1c2bxB!iKXs8` zP#M=%E4cU?Myl1#R^t;RnGMA~OUJB$UdA8M19Kb+A~9o#ZB3e2{p+ zx{_|mM?F`?lb8FRHQtoeOw86JG~gDO(MMfn3H+SN(zW|cOkA|&Nzn%7bEBh@c^xc` zdQaGj&0@vEtDW56x`4Q(T`mD6TdChwu5h8z^RcdXNgdBPn1r4orV*<{Z1&1XO*le2Hgij$U7_1wEBMSWyA5f^7t#w%?nx@J3&-5O)@BE?1`Z=bO- zj$g;zqk2^e=a;pKn;AkL#c%M|kW!%R)v0$5Y9>kIOvpGS92$dS0!|#dK!^vr54zS= z1ZGDvGQG+R6effn?Sk-8XV!=V%jdqkrt4GbfMj|)lxd*?D)&0*WhpDcBraMqvjtp~ z)g#R(78NO*a@!BzQ1FFaJL?!>u$1P^29_!d-_cPz_9ez?v9#cT)jFZxTH}IH5(jY6 zHf;pOL+*)5b{42DzuWQV2uE!&&n-$Up#v)kwAqHV+HER-XXbT<(#!Zdk{?)iS^I0Z zaN>c!?&f;A-GcE*@3Nk}eby3H5qGu4oFTP^lSAVCYP$u$Yf4w1Krh4^gO%M=efEC9SW= z#e_E|G{Z<5DTf}HyWPl!c8`#NRg3aTEG?wpdL%PvK4EIA2Q*l5es35LI}{dOE%{b? zMhq0Wvvyxy|71qa#PYHK#Ax8u%MvxP!yWFhP!O*0unOONxL3pyP#;P zIff6B9FO=sFugl>oLF^~$#ulF1jaxGS|+zM>X78HrfIeglCr`wuf6xUP@br!7i+ek zc7v%rfDMB}$~j+{%kI>afFKVXdpjaCN}R;X0&yjZP)oU>p%6BrEL|NNL1l;c0zh^X zav{A$auSbxn^Krbm6cZWRfZ)1Y#NaPY5;(3+708?JAZX9Ba4xR3`cjXGX{qdWf2l# zyUnk{vpQidfS76Ekk^)r+|%Fr^2x7Sequ>;Avj7k6%~DRZ^#dxan-!ytPmmQTxMQ+ z;ckR0^_owfro&YkBF~z}h?@d*ki3|nenm>yY5~d`uO>WR?Yw^S^5n4@SgE)M(rE8U zz{@JesU9h=Qrn|^DCc!U5A2Ppp{#2h8_(%Uhwp1#6+|m^=7Q%>&@Htn7lb!g(86X7 zxNW%Ilf?H5Y4AqTCpM%x_t_?#@ZK$a0|Fcg(!S-uY<`S0mTwT0w$p*C9-jg`K&Dbr3eIpl{i z*^L9x;Ax8n6ZTZ~d2rmqEeSFEgmG*m2!gCvu|FaP)5{>>i)3**6xhs}cvvzXvNL}4 zE9Zlo2t;F0KI@5B@c3~PrLXjm^({up_;C$`WDGhjk6k1cK(zWETX?dDDlG%6rpfp z&Ef48mr0k0F@I4n-j(*t7?}4u_aKz4{=E-$ScSyW6 ztld!p1%1PYX>ls64CPIb`0xsaUsT+aQII#4tVKuaM2;I`WCS%YSUOwbt>QXWFClYc zOyiL-S}Y(Agx16rb)H#G3Pqir3M2Z+6!y@Cy%X8CLd;UswTB^MLO1@{483oP`gBW0 zu$iM%0Vx7iXKWF+lG*0GDK?a!mipsErLl0EIFUm7@=55+C-op% z0ts1k*~yR+PAH(!%5gt^0?|^A+Ve~r$Z_zN;t(Ht&6DN4g`N|vXzSi27jwHbP9ry; zd2TK$3VQ4w^H@0W6-<|9H5VQD2TP~AUg}2GF zcxs60O-X;!a! z4W(f(O@i9{00QT+q4D~J@QQ`ZQ)r&W0du)!DZ@#3R*4R;><1t#HSUha0#Bj@1tBNo zP!D{&mAk^34$e0LHs}<`}D^J7k+m5ia0xgviU@OLi7bTZ4>z zf%=*jJdU)3?Y$X&n~g)&Y2bt`40K~ks?zU_Okw&CUnj4_2{5uFGYFRT#=em8a-~5@%j{sN1LK`%p71dycD@TBQ@K!;i!Q`Ff*P zX+5IzRWf9FiskhieFX&-R2nxh2P`;tk10&(WyFYo38nBQ)JZ7L4He$qP4Zpc15s#H zsoTl&UZ@okXPGUZRs`U(spqUlAScsTkCU=*LmU1w>WV1utaMQ(BBikP>(e@EEtH+ z2;zYEEMD*lD!|)kvQSlUD9bHL?6#9b-G5GaagdWTUDQlNP*UOH`iU_Qc&)JWkSLkV`_*l6z0SVH|6P+Z|5fkV|YEZKGYV4JvK1bRc7P@Qh z8DCMv2wi&JkmAHGxtvuJDU-_}`a*^Ml`20gT$879r&$lPt{#hH_W*R7_8H~bcB(jW zvL^eLdwX_*cD2kim#LR!UWI7du+kmduGJ{8SDWZi*g*|qA6Q*?Ra2Ba<< z@}h%K0QbG;O{sHMawJv1n^8MLd#K#gH9mQO58R^DpHjDWlAk;r$5Aj6FVzu`&5RyN zq8sCYiUjP^h7fOZf;)e-6@0MJ84?w5I#)FLN+=u;;?t<_@8b!INn6^xYbMhu#tvT(bi0gOh|!l+FO##U|Uzzm(cCeiFHlxk&fPd%?D(y;tiH z@ub&!VI*v@8A*ict`5x~2rXPdGSHa2cc)xvZ;Tta0u(KfR`OcxFxkDyP-d^)V*_lm z&WEmG0Vulh3Mu25f+GEV!VEiKl|=ReG~1)_7h%)c2&^l{Td~rCl*9WR9{{#%2bMmU z4Vv4aJLwBhA$52``QB+_JWI2=sMsSBRelc~D5j<4mR@$0sI9i+zy?H;#flTk5aFX8 zf756DK&7Dzb;~LO&zX>fge%){97_2JWGvz|{NUzn6QhG3Kd}~no=~SbgQko+^&W8- zSP4HA?dMH*1#<{|``kIT{NNR=3D$Gs8wk6aCt@uX9ZDNl6lgMd1QtJ0CY;}5N+}nu z#_fwUYZ}aV^K#17XwVFA)HGVyOyXAOB2@7gy?&)WF_h9-BfMqM;&ki^U z=i_>VT5h-JKyE>uLA|O3AvC7S#-iG|p>t|g`i$v4fNqFD zs7T%m7>=OyJHO0ViqCTZ*ZQ@GDs1ShA|_G}@tBi0f{tv_Ba$M8MGm%u1ealE*>~nb zBuJ5k4zx{d%|69dP0prX(t8*)#p4gY@|=QqAnzh1ShKga!Lc?y)5a+Ba~gGo2#iB zeI>77qys_f)`kLPCru<}Uwe4MWSfN!>*_sV5lHvojKvnz#6&W38{RYCNQO6$bg~w& zg?%s+AHAk*hInq^V{Q}*i%9JBWGM4R-`jCd^z)=^vS2ix8g9JuR@Zu_jBP#BqN4r| z2JMB=P~$Y|j+~T(Hj*K7Ab3z=B@}FdH9_tHYPP>LZxkwS-e&9`595iq>Uu(*uA-AV zF10m_(9CQALgxBFLY~EmwNNzg2A3}*Kvp~;(24F{cf`x4!xAPbJurr=JUVBCjBqJ4 zS22M{+M{qTa?E6>Ta`)HbCAHycVu2olp@tB7m(sbHWsw2>k+=ej!|l_j07w?8=oC2=R(F1Ftr?%~LyiqaBkLJ!<^XjIs7&biV93Q8BvIz%BvpTt+(v z0r#p_CPDO?YUqdyGa#U7G#lT3;d70V;khs0x#nUz66LgnP@xBsMzcC$qbL^Pa)Qea_#TBB`TWplSB%VXW-=-B2vC0t>C0d)#Y(R28~I$plS_RFQcQc$XR z$O(Fx>*vgl`7}zI9&c|3^R`~SzI;gHfHqUlUW=r1!^Ilkms8GPPK{qy;t2*fPsyR_ zW!;^N2JxPf<+C2}J`n}4N%kc|n=ACYvy*F;R^;_?Cj-^Aw{HvMBOm98WHL-gl!EH#{~n zo^8bsc%Sps!-7Zt0vn?<<~gqFFgofgTXw15LrO-skcLsY$cxi(#yyOc6<@Wp#2&IO(RN?7ht2v2R>S8{pcT{{W$HG!G; zV%WW;Q{H$P-Lo*nG z>MP+BQC2=)w-)MshTXpeH>8bFMF2uvG?LAk1-Q*;zGqa&&4A_|A2Q=`0XMZp^b- zP|J*Z*DtjPrp5-Z^)+V`qF&Uz9)k)_d1V-Gjdm&{YnCHw+>QDvRnV7IwV}?n29LoN zp1qD{r%FLvkGOWM%tTsg9W$~p863t+uRU|`Dd}Sc?PzQ}#oUp&u=&fXmlJSe*LPJ6 zVA>V#8iG*Etmmkh%lGWW;>9^Q_w*F6^0eJ^xIHiRJ~B3k)Reo2g(LQYX$_4HjA=cv zzP22g3-rJL(K+3#m z2RR%z)?;)d)`O*9232H}lQOB`1S`P!%8I{~s9f&T=6wF-Re%SlL zA|p+a+lE`IGC77fds*ReCey%3??m)P#Gt@qe2lcHqlTf80~40&;DsTU#*Xu-x@&gZ z93lfAzg>xT?$I24EEK&h%ajkDUv=kWj+vLVU=rcx?ue;?`BZUiC=jVUEFY5fbyfw zeGkQlvrz>0S?p~=V7tEr1Uj4C+NMn2>>;zXWtoQ8?Q$DJ^GN#%VcmtpqE-Pg=@358 zxJMGN2cO86bTBX2edWOP&Ol& z2LWLnPI>o6TR4l}T&oJuOTc;&u$mqbYw)arL6SQw&`qmU;d0lrAKX;bhgA&lWtA<; zP{pQkr^2bW&t*Nc2@EeQ)Y;O+O4xjDy97+Pg3gml33Q9Hr2q9+3%qXU*HkWMO5Q!MM<@s9A&=L_q2%I50|2q z<1w}-hr|jfWW;HNVV=jTN&*VmRy3KbvQ#PidFyP8l`=}SC z?cPEOOIV6TO|Mm7mR}Fa7A&;Q0l8DB+R_29lB$Te-P0RLsvVXnflC_q*HNz7z6|Lb zbPN%lRlBivgXDCe^+p0btR`<<`58q)6q_(tzI(akv)6BSUyq7wJqCYrFX!>-qJq4o z?BJ5F;p?hi8Xy)X%mc#Jg%)Xf;@ll*6XEE?2jY^WDgb$fudOej=)D1)n`wLAk7Nl- zU!PMxuw!oFS1TUIh)|Q;$B)%JfS6+H3Rpazo?4eg^D?893_OHbcNPqf?j3wOl>tB| z)TiJ~uTzNq6_dOnUMUvl2dJ}4W3sjtvy76+-uwcDlrS6TELNdQT#^gC(|}ytEI2;3 z@-M9-38R*u;j0mIK-suS)rO<-*V&@4%rqI68E~(trpC56ULX*K zt6;nS<<-El3barS-YGYol)1e14A3zaE48joq(@XTI8t5SZ|gX^b#DEN(L?ZIh^k_n z(NG1O~C>gHi^Qw!Okv_8bc2$-g5fpk2lCieBoj*~9m!1UlbP z4QQOj%T`xob`5(W-GXfFcyY=R{N5-r3D7345^1`FIy^cRz4j4^0mj3DFy9(J+Innj zV&X|bZYQw@o>C(nwy8?iDvji%(j?qHb*e|LlZFD$NFa61&z>0Lf<1i|T_}?~1@8&f+$r?| zyohe5NKcYZ>^7C$L9{Xov?A#tqv&J3pz(Q!_%JU@>rjal8M?%3r9-=DIiJQ5>7`R0 z%)MJm-hKA?aZ9JVIyJNem-$O5x>N<;drmG0i~ST*(ZoI5vC;<{z3JtHA(qMM!B&qq z&ml&)9>~h`doi6~UZH+@)qYeOuLj0sFQRj6nkeTO+%qJfFr7XlcuW4O`jNUhthtT) zkas^1W1-U|%5<^Uy>-MAM){XmxVbVX%mK=F2QOSy++0ekksU^6?rpuqTjdaf1#3X# zo5PunbU}9ue{YmC;vR{JI?-F2fsRzG%$g&jrhLfpv`0+LUkJGm!wj&ETFyCn5Lj!g z5zt$blv5q5%&Da?+I%FaSthUZS&ke(&EURKLZ`A(--WdV)OKNThQZCLmnc5&8EYMI zB6#a!jYcZj5>=_ecEsZJFgU=`Dk3E?%sYc+c;M)9#LUaIN2p~@zB?uD_ZnQ#B}OZw zfTz`NiYu4LUZpUssywZ|rMoJb%nR%-CF^6?cw{Z}ntCk^z(Ur-B)#>b!77AQ6nTzi z1vWiqF7D(g3k8%So>gGj3ct<=;JQwcSF<9-(+J8rp?AgOZ!@shI^Fp-7iJid8-Z9s zcJzv^b=VMfgWlm8t9T}P-3#B@ICRa^WtUL~@8o1rRuA--BeV_ooCJEtpN%>vE?+Lv z107I3I?NtX3p<>pj9G_}8CI?oA1NB~i-&Y*7F$%5b7=O?=_;FFp>Ik-cl_#I6GM4( zia`4GIa*mrk_5b`yXYd%c_~TT7G>Q)bC0pr@MwG~VUr3{#JirPLkKxZJBynXWq_^o z3tP%JNOH>k@HNWxK~JkbL(>$$(}vmy55@JhauOz9B3mA8?=;a(T}GD&U7-zWvqIVS zb{M`5NLG%he$YeqOC}Dvd?$SNL&0}#}*?t#%rb-PaF}m z>?A1nNO?{Thbcj#wHuKMky_Mk6q-IO$i2vDZ`FUQv)c2f^DT|g>7K55`L)2wfSpYL}9)qcGSjB<8{L8FIUuMBH3p&1MosZIq=+^SpFHQSs zNvyjDQvq*PXZyRg2jyJ(it7QuKL-z8 zS65Q#o?sVq0}jaa+@+i6Os$k8iePC8dTt)PYcPafimJP%R(ggwOxP@U)ZGDZ2G!pn zZOK*zD{n)+4(d|c)UJCbfu*=wi*mgMjrOpZ#^;HfDWU!j^iJtcY{ZX}h^^=Nb(!dC zl3Xu<^B7mAz4TtBWej-@uPNg$nAKC`_lJ_xa!ESW7xqZoW<+TN&iTa}L1Q))lfAT= zE<G>RA&j8ax*wC-Gc+R5PLg(os*a@%x~~%EJSBc7>Ge~?;GDN*6!Z7 z>~nIYt=`5ifk>`1h4nNBA6Rg>$}7Tsm3EZ@U;YzzrIdMth?E*LeOVa<5BY#sZ5QLQ^9r+KtFKxH(c}ezTpq%)$`i(;l+Iioa^~m`jK|ok zFJ2_@dl^&nz<^*KN+rvRvR-5~#h}{sHHZ?sLJ1V~yMEzXKv3)_*9XuqwZ`viTk~wQMNtB| zfsrw+P%1RGhi@|V#PxN(a7#d$ouRPv=QR4F#}K{Kg+vA~JnQgef|Vmt5uC>8Lo*j+;rQ%i;H` zBTbXg8}V^tmmIu%XU}bk1w2v;y+h|75@JdCBji%Pdy5lLO^=gK_G}|w`|-+U)PB0< zpD)C%ZgdbMp{{!u4F-p4kMKRwk>P5LY)VGl_MY9f6vL$@*gVA>==T!twkT|-%rfjC z8RfA}saEHoa%J|jySUU-29Dw}VzG9s-FMgYfH`a7aFFl?F<>d|yKV3o&V7#|6a%HI zEv7Txv*rZajmU~e^;n+?YJk{!wLQE{4kqdo3PmC|6o*{?-Dv}k*G1de)8sA{%9MRV zSaZG9i})U`JbJpzr}&B-pvhMPY^Wb0pMs$xa_BioFvgp1h`ipnS!fz5moyhyi<$)( zCXrw$JcRw8$}~aESjToc0Sg3e3WJoWD^{h(yGP`PHKV+=jNxJ?JC^SdLfUw1I#DXN z;;3Bdai~qXdV*6TYw`q}l1I-p23xPt#Orn8dblQ#oMcn_=wU`~ zsm{D+97ZBE&}1j>%huE>jeUb+#7w~zLsAlCTzGR46z=y9y#jB{O>Mj936*uvdvc-k z>IJ?4YIkWiXgV7zfD9rHgK&LoHt&izu^qa^JNhO<7h11e?p4VHuaNcPCnDX$B_2*O z$xKyU2Pth8MZ9m;SvS`Cy~E^uq*-lw{!*&?Hi~PUoso+~p~dTM-;!+2!&Vnk@iuAf#F)S9h?{sqEYNlPQ8H{xYkHJBVrHS%UaW5 za_~`>AhZKbn=iRUzvN=nlAey+5UhEMHI{Xa1SueF;#=u{GwL_a`(j=}WRq~%JS~1* zYvq!fueTzVlUYBO${$0SdNd-T z3$qEm_7T$~3K7<>*G#zaf{2{V1JbWW-{~}qlnrzqaa&<6Gl|YB%hnA!9_i}<&@)es z^?2bB@;r@KS=Xd_==Ff^sh6Uyw#?IZdF;>L^>rUc=Bv?I^iDN^z@ZW-Xv&wFOL(<+ z9-QbQNXNT}M&fzEK41?mjs@naQphQUsO_e|E=^i0M}yN?doji4#*0TB*&!j!B!+_k zz4NG1aED*SNp6R^2{LWtdKr7hrS#ib5%zMERY)S9@CS^s^xk!(dmmgp6V)y9kibA~ z9F(Y0ujg>)T=P~FV7SdigYWt68^5|)0euyC`NHx#!5&r^1(U{}qgctq0$-GFp=@7S zQ(v8V0a6u<#Y*g$m*K&TeP9l~S{S5b7!1SPPC}Uc*dnN&nVjH_ z(aR@mbaA0l$?t(^h`kg~0K&z|f;^&4c#Cz5>%K)p6}IAH7Vp+d-!r@?8f}(l`yK}I z^krN*7#(lkOYwMym4&sFvm7^%-HsT##gNF>V|FUn=Wl~}Sc*ILgx2(qo+eBtcAU;*syn9# za^3Fdh3~42IOEnHb-kB5@+QROT|I&3FT0e#?2?wtQ@uwE=MS+Ttxq~RC+{eDU`pBw zTv3SD>&mpaR_%PItgr~cC=7T_<$~VI0(9uMiCotq@Jzx${?ZHE+m&D8r4|gK4dDut z3m}vczt?%DY+kd1);9fiDy%)ni79C3)UcY}7?>+G5u!?*(8X7n zAz(eI-fni;i@YaWAk!;Y`0xdHgb}eFHjK@CGUydgL?Eg*xOuh;8Vg;K``)D^Mj2e6 zl*B$$d&LHwh5UfVL8>1;p?VBghG*-k$X>5;`fJ$~@w2GWv(`@pL1%A`ARiUqcW2>m_7(TahXcb#Z>gMh}__paF{P!wYt26SB1 zn%CWAfKX?nEgRPcO?*Z|?2$cp^|0sE>uV-Lp*Vs&P;Pgf&udEUaniemj%0XQ#kXT` zdIXp}))qNH6QPhW#VWX|p9#~Zzt&fl%( zK?uuE;K9DQv_}L2B+0QT&+lw9cchGV+ufC>f$$|D(Tmv{;_CwY(#z;eFGID(1**lg zPK_~lRXwMd%e(i24DU5*N}u&ENOk%{MNpllCA4}E=A01WS;U-(32 zHUIQ$^7$MSW=_3H4LT0DMMK25DD%{13IT1aZosP4r|4XB6I=5Yy25^@MyFGd*d%PUu%Oaq#(;BaErwLCLMN9C3c+9~7---PK$ z`4;Lnc?~z(PdrUf%n87Z5*Jtj`~jf|-A*!TZfiW&g`0<$i(VdFYSYihR=8BD7X;H; zUgH$Oi9krB*@^p8l3}Rk;s}*yZ%#z_5N?6FT&9;u4`p^2ZtDqqW|$B^>QNWVJf=Da zHbV0OLmo53Cmw!=A|6_>#%PDH zi*efc9i<%7lXvLCyfjKLlPS*yCHVOfTV}8YQA*~8*N9TCutbFluAD=#o8yRhvQA<@xHR%+x7Lhh<<0s}?Q$C?gAP^5*tji*I zCG2H>`Q`BC7v1DToT?;yge{Lk5H44P{ooNonGU}-W*=0K1buE+I>^nZ1DhGzC;)=b z@NCAD5#wx9n*DXZRioE{S2lkMrV5p8co~e`;gW2H!RWvgl9TIRjg`nUd&OWms)zAv z8F!Z#&T!D=bZ}Ifi^$R=7KWU%4S5Cj0Z`_-h895D(^Tl_zTiPw)+u!&uSDa?MKQtW z^DeebyHKwnQXusVhzWE!fyiuVpyVBT^!T!@%W=cyT+)*ceHp3G_>KGT&)OAH$99j{ zy?6nl*v{%C60jzyp>)k~-JDA;A$KRVC{_~-5z5sInGvL18nGL&#Uni)(+rC)3MUMu zKEJ&!cp%!ZjX8v=m*@Oo{8@7C__L^Oc4pf zBjd~oE#hQLxxpwRXI&Pjv5MWR(<-#m4tO{^i;B_V9a>dz*8!I@E;5X(>+bZ1@D8RS z)vS5+!vi`aJXTv&>$g$xMBP3G^Dn_d2H7|zvt)>gvQw8G(aqmWC8Qb}xIh+6wHb4r z>%h4k!FqCRi?ikFSzOVjRt9L%O?o0P*Ge=(qZb9s|1>NH$WHii>*+R1k=DIYyocjB ztPOWeI!q7?H}4|f0uJO!Z^56(Y z(3~1yT4doswTd9p7z^+V#F7jlUfp&_qMpe!pniPm&aiGSX~0@)!K?Iy)JcmCd*YP( zA`fU^c!OEnxq{mRG0!1N=AL{p=ZApZM+ibKbd3~hGuLK~^2R|usi4f?Grbipoa~8& zdh4bv_^7twUQbeAoCR#oODN6j@SU#W~e-V&d=N@D@6g)vW4M| zazNM?0})6sS0U~B(;F`9y))qRq{>bieU1Wb@eW&yZBXWt<%}cA+pbYtLlSCFgP-NR z^Y(d@@?29VOh_oUdYt=$Mj4Yk(L~U!&K9*3nU@TfF>sT#u8>*Cg>W9tr9X_@XFWi= zat3yev+MlGdZ3IOq)$WmZLqWVD(W?&pFILCd~`r!K~g^PrW7u9;qj2A$|e+Tp@V2Q zYe22}L`@gZmg22|M~mlp5BL;iM^aOPBkmbKh$-}k6FUfx5=m_y3N7c<5io7CgT{bh zTuZ@Ac_v#=q`DuOsTS80341ofn>{rrMSe*5LvnW`y%ARRh`JrN8FGz!3yPeu$_S-9 zR#{4AO%+VD%m>&ECkaac!|l9s)TMQI89GiuK_>NK&zE7LUxu||jRoT|&H-~$kfq;= z6c?-YdjlQ^;hstIhJtgZrXI$_E?<(bk5=oBHZA#P97nq|E<^y z!4l%|drkX*5mU#bMIJ-i1J!z3m@6{TA;@%zyZCm!j6%V9oR)b^XqUo36m*=PL;N(B zq=$4KM9VF32V)dgu6*6E#_=JJ#*m7khS_Vz^s|FiS};9n0Fg;xnpv=MLZiCdt15VS zWhlbh$rkWDWV+bFDw>}R>IF1omig0^$HEI9NfLs>a!@bQIqp&9hN$OLh68^T1x(~% zStzV{+G>l5E9Ly+ctG*B6sPwhWaE^?B=t?>`K5U)tb5vO!x6jZZ7ph5r#kGaRLNt) zrD&(;We0=Ltra#~V7vI`@g0~50Y=pBLuKsb_wLCkRp>37^#djq0AdC}UrufnRSWdk zPJMFA5MadSe4>b#8MhWKoz) zSM8Djpmxq+C$1$_a+rBxh6*Ixv=l_3i!KH^0$qX&MrjFZj$5%tC-@S!2|4g!b+0T)-36m@wZIhWEzUmnfTpUFe#t!9QBbf)g~i>x4<_y6ozSSL zA#EYe^*v|@vUe-#$5a84NgazD?Sv7Gp5@|;))0fgs&N+F# zComW0Y_maj6uSE2y=yeydH~TP4t@Kg%<7&*y2|W?`@AZBNTRd4Wlvk5X}oE%aHimJ zg-9j9){V{{GpE@#t5?HDUim0t(mjz0sfK_lsC4%R+P2Y=%K&Kny1>I^IHQ#i;DQ;r zZys$O2ZL2|tA_Qcj5R|D_L>>ftEn=DGlZwK2 z3Lo{CEssXid2s%qa*H}UU*%4sUmz_{b>T@*U&e{jzFI0kC*3{igK?NdR8v_PR(nFE+@8@$ zOlLgd^W@CfsFUZM9WvL7M9YdAjn@bn32~Qp>%y##Am|%o{Y%sM*+i znfS03_hI8Q^;{koPtS@)K3f#nt345s;>Iopelh+ejdFtV40`#7$9Bt2!ywSeRSz!SgbMHjGk`HU4x%mS#)$0fd$?Xx zm#F%fdW?e8=qb36+oNiiv23vi*3Skk8{R|!L=y4W4sm@h21LS zcPd7WKu0()gU$n-9}C9#S=ba?U&X81NKwm5$GC(ir@WMg&S5u>66$B*8lal%Q}K`w zKw0&w<*7n-O1Da>xb~Q=IFn6Vy|clzgA@lg6MEb}5MV6om*>`6e5C+3nrnyy;_>@L(Z5LLdX!$Hs>mJ92B0YFrWYlmtkRkMt4h8pW7Io0( z6&+QDQ5-W*L19I_)E?Rld&3VXaLBZaEyfS5;ueVRIUB#N*C_E0lp=V@<>j`L+N!q^ zUr6kPPs}s~#>tx+;U%vs@?1ICtm>!fxFiXL#*O^k)*ChC!e&+5xRRjTw{c76P2M0| zdT?eA7r~q*=vI`PIxD{C>Ibx0hCr)hn_iV_UNBXMr=z1eaLJF6(oRjNnT$)b)Df7w#KX zz_ZbzM@r_19rk)Tg+hzXsPpaKmP4vg%@Ll!oW(twqIYrj=EW--v55o-xh~g?DBX*7n^w+69~={HeZ{u2uaqV`#mo!)+w2>tOo&lmJb&S9?YZzw3+G_ z`@(S{H5>PnC(IZw$M8&dD&m?Fs;Dv}C$~8oPi|f+@UBnEKnv#C&QLs1b**P1DsO2k zYiI+}%RF2??VRvRs%5#)9X@LEpsfseF%o z)>GBl9;~sU@nb175L)lvFqN#FIIEY<>rE(fDS60O+W=GqHMGlUr=7Cp6#{PkcvGl- za5{wLofZ)QVLJP&X_d(i76%CV_$?ixkD+-M*?PQr zudplQs$Rsr__WMFUzg3qyG7RMiP(VmqD5r9%+o>2eJXL9l4(bl8n*4(oq7?Aj>DGA zA{z4LJKwCU0G-!pq1?qMYg!bIrZNR@0&AaL|E1Y5QwixV%tBh2WdmQd6YagH(L=qH z-5^<8ZHVLt4N2!CO`_U8*S%}@@@A;vJacI!N;E=_Fft0X^!ZHSKvT94Vottp4onFb z5;4>^DK@^kcCqg8sMU%u}+;8rfCrmZv?~- zM5zFDoe11!e8}uQPhAmd;gqLC;SWc}t=K}*BK26>V>;Gl=7Czy0l$6S0UP=PM*&%; z9@d(f&nW|qIry#a&WjAGOAF$PGhcLk&sGHTZW;PEq75Q~ zx8fWtd68RHpS{Osk6`zuQiz1f%Dp4mdOLZE%b?tan-kT8!MDu9&RA$zAQ% ztwNi*Z=;$0c^X{L>yS|65tI-I5VlJin=OgkTqxSuFfT-% z-QCgm|k17{o8pChk!RuwH8=#!f-5KK|6~>6d0%``D<2*T^2%RN4fJ+f!C1 z!R8~Z_pYjg#x6VZN%Ou(bO$)}9J(@j&#%CSFbT=aDo#o#?(yQg7j1V%YOr|b_7xyf!D`jllw zUcE6L7o>8Fp~2W^6R%+7iJ`(>0~|q5=rv{vL4g~b%d1C>rEbD8O@oIR7~Xfe&l#wf zyUjA}^>G|V3hfy^+7~7Iywzct-(yo!u@~06F|Bg>tS>I{0w{|D&U?AHowAZe(t=A& zYZ-WlxJIBNIp_^=zr*Nqqy-qAm`;EU&~3QK3Wq$ed$n&9+kBk?8m{pvCZ~cU)CuP0 z>m)4nwmG@Esu{mVH5|g;V&IFJX8_I^J-1hfBX*W$s#3W(0?^M>vVeEkmg|vdK>^Bu zAgUMzF#5>ccJFW|+a+>F!iK5dLwfB{DmM+*wL|R%Chw>ScFAxMhx~Ryy=GqXF-O>A zeT{a6r;xG|wS4vRalnpSCK^O4DZf>@b2IB1BF;V11LL3rpiF3`W&gWE5 zFASPJRH50qH_HVQA0S>PSjQW>ruAjEjIe^bCRft{H1tUIv00FwISt*4cjXpzBt>D7 zJCmE1@kBgo*-6&lM%4rAm%8rbuPO($QY*?KUMsf}Ikm80 z$nGk(Cz5S?1m{Q^Xwo!VuCyMi6q~8YfEuZd`!hZ@u0=ZWd@||gkF|#V38KlSQ9_q2 z!7-+$tqTRt1%hGyCJd=5WGF2?skEXS6ZK#UKlXMkft_P_PvtSZS@-yKtbZeNqcyLapAAq z{2iK-E0M1RsOPp4V-Y(zgZ3z#+DKxSfxj^q4(G%(cu+P5Ff#1B*IEm?$=VJ?BP#1cTIU z8M44mNj*$L`FQd0Rab9CLn5tVB-xTl+$jp>>mBQgA>=M5JxXozS6B=b99A>v_8pGxns^{Vzr!g-xQ+`JuHZK$!DcU2&> zC!TO5Pa^CdOS>wt;96IYzgD81f${WvF#gP{9@frY32TQ3J#2Z*R_18g@0@5Pw4{x7 z#9xed`{}SWJ2^hFb$dwt-YIR1`V{A$9R|;;h2y9dm8t}(i|Fq2xscoD>c^0;D&)nM zBEPzsMyjFr>h9zgjNjy&_BgQ_lt?xi9e1b7S=Hr^HyQ#vK-$I@>gPN=>g0TKKpAJk zQSL{~H%*H5xJI>)?j$uxL?ywiJ>}fV%_JqCAX|&|J+kKL+hhyt#s`DznPg6)*R_nw z1Xrg1RtHiFSvAFV!yB28d*q7D8dst)K1CDyp3vIO^mQ(FhSzvYIdxVFXZs`9bK6h^ zB+unp_3V&2*+faWk11iys3u++GfdrnI;WD zENBV_J#_RATNb4VGf=U%lDN`v!lQOPz?^noY`NGzlDs61oPu|Uxo1PVE5xI{PB_%g z)cgi6kF6AFMan|q3NrNh>ZFs=WJ&pyIZtNWgF1M@Bajm9R!jsA7-$+zSOcDq^0-M~ zL5yPJ8`vf-9UOg0dnv_N(ZhtiXdk| z3)-d<^a`Ohq2xaA=67VG6YpWc`{|U?dwx|nPeC)~YH6hAAtR=X)oWjiq&qBteZR|X zl7aCT;-x%24I#wn&-T5y;>(lD_ex6affuR-adFETh?-dt`yMku?OTV_h^tq+4E?ln zn(9h`tIf>btBjIRiE~vPv3kO-`r^T@dJ2jrgLx<76otaY_({^4;2>#^7$F= z@^f&Xkhf?ldYD>ZFcdqJMF4}nUNoLfyB8ScGAyDWA9dcj*yNoDZM zG=(qIg3mgAF@@=I>&WZ2$5n403ba>x%elj5`H|e3@wD;Y4)! zB+uCziQF`KV;Pl*q% zs^#$ks~yCeNlle!S7&>@Y2CgIx+hy>6p7UV^z_7X9^DmdgGk3?qn1;_HF6?>+6W9>RZp$Vb41dz!q zcanE|V(PdGH;FEo=-YTN~w3{T?`K za1i2nZ*JPNEw{Q@9S(}|S8hvg?QZ+1R4)&Op_E&m#H(Jgm$_VZhIrGKr)yns8_$Q% zP5E8(WO3Rs3IobIF;OEcz%no60LYX>S8-F=d$J3wsQYMaqf)A1+^-6xc*G*+gq4x}s_5b=kv+yW1;LSy7P1GUI$$yf89TcpSH$%mQww-DqEcxE zs^-YgE~kp^)N@T}-GW?`1PIO-;Ko)dPd`=jFV*nXkRzV%Q0W2RJrBv=9jiT`9+oX6 zw7_Z%PB;f8(7NkaL5@xJzzfqTdI20UW*(?+JoD`77Rc~)S^|6S0Hr-sE4QJ+IXZv2 z)_IBUTjM?Vg0_6s%yjg`_>2@Kt4b!HM1+8`!WGUFi{~eVRp=Ta2=nC4E2cf8HQ@ba z1SoOW_Nltzkr&BXxRG?e8)Q41q~drK*OCCsu8Fb1DrHWEXj;y!lA#CwLhRm%2-0cq zah|LPB6zX-BW(o1T@;Ka1i>foa5E|AMLt@|xp@Gj!zG%;;SZw83SMH!ouqY=t)TNh zqKYzq&D^};Os5)B<1wP_wGb6#5itG2VeK6K1w996SZsa>n`&8SG*K zzr57a9?fTzdG~Z$oPePouL~i$>_)25lL6Gu>tOS87^^Vz&XY-A8+4VLtAIB9}TToI|3FHx=WU+iv=JG-#Q{!5Xp`f-hnU>@ESDCQZ-=sr^CN8z%7= zMzjqrNpleI0Jq{Kb!fq)bJN>0HL5*=^$n~@J4$GP9TuEp<>uPsBEH-=5#ctH2kQ>1lt-h9Ef!fDWkd;gWkkk;zVDQbcWQ$a4jS z#8jcY$E~B4EWj(Rj>qvbnX6nQ!uo)i;oXLyB2wFI8j-Z6%6hSmWdo=>LZsGp46{?JVbsi#(Z5G)1n3Qy;AzP47b7erRNCQ$rP+(%T@HkSl;GMZ6p;L4$?Z3hi#UM z;3+&GeiQm&or52i#{oz()d=Fq?!BHoL{A^`TMN8| zYaUMObfv(@05>8a_y)SSIoVAJx8>D)!&gHir-tv~@LuC;l7}=emFTzzaOdpX1J2_f zytkfZZaV94@I5rx7wG!p1l+~*JsBqE_DT16BbYYYIg-SPQ~4@n_f^QfqHUd0Ay-dF z@w*&*ff3Etk0z1>EeEt`3_0xix~LayX;}gZEegtWavdbf($rK>t;$Lc@dhzclC@)h zR&!x6rIEpJr5#o-dq0cLRpPIvy{(0ymwX zuc_BDt2riBE=Oyht$5rz6t&(HCC?DEZoB|GP+a~IKSVq<-g3hYdf3*RMpY;8!bwSI zkidJN$u6J7+ntMdMu6B^PL9BlrrUS{LO${`X+b>d0+=Dd3J$0R6B8$ro9p4tJ@XXy z=27Y8ty;6<3)#q}$kz*Klspdo3`Rq(j%;R%0#rn`s2_pcCN8<*sgcf&UrB9XL_b_J zMX3XYeD7Y)MPj=Sfi!XGTx#$7IV$meN83Ay zCh!hVAd*f^pol43-%!ePd*fa6^1&epm>=YHy1{gSHuPE7B8D}-hE}<8Hmnxf0UL*a z_SbjCU$zB(*#>(_nk{M%)uywvTp=FqDAK9iag@VlVf*?YJG{F)$7V{!6z6U|OXodB^-f>8&spO0QFl^>+ zR6Qc;;ex{^W#i{4eOC7@-rMQt{SsN83+*c+Xc-}Wqt;Got8|Sq#2xjh5z z>4#}azTqbhPtmaI`Yd`8T(KMa9zQ2~5yH9%(DJB~RsbK8hUo&enLmuT&97~rT+`fX z7P7yyl9dqYz_YWgE}_In3D2)Lo?Ej%S>sCBQJ4A6nx(K02Moj7&Nbni2FpkkQ zHNW+1!3&>C)|whiZrvL*SzPI?;P!3@sL)8! z=C!8BIVb|1$#dfEJ?`m#YmZ{u&pO@&UwEZ-2~PPb%1E$jJ*rv)M4Z{OY^})lKGi;} z_k4+@W+#V}S`M0m(ZP4EhK5#R(g2}XRrItFZzV_sd?iVBq4mH*-s`YyxDt_AP>Yuv zb#>cb$MX=`{AHgGtU+WVGC$G*A4XZr>vS-Z*(UafZ^LQ!LpKHZoZRXr-W$BJItk9U z<0oV`jW#}#QocO+CUoeO*9^021XI#;(->>JYo^ewdJ}~BZ4JQdo6U8z?&Qf}1t~QR z42o>xRhyiMK6n~tQy$td-Uj%PO0 z*qL?q9zz81TZph@)~U+PLQVlAg5IZlh=Ak^Su<}^FMNpcEgRE1jdAdMEi9XrAtjszTgFgnb6W72~VC%CcC!_xOW#uWmL=O`)(Hk%Z zkN_2NWZPBr^Hyhg_3XV&eQ(a}_>7+l?aFJ?OXVTKhG@^}h}?ano%lLiT9fN{9kFyP zxQOGjpwMNy@1c?pa4?}>(K8RUfk+c^Qkn(G$1;`+q}6Tly!utci;WgYrSnV6EXieF zuSmR3D3#gw#nUIUEs#~|iPK^ZH47ULAvK*RN0>_*4$AO&ZCyXWTCZ zFDv+>eD5a}(0#3{TgZl>Zp5$EnT35AjzIaRw% z;YpY5l-n8!@Vm9dtpUl2<+=3s@d+xsU{!6zm}SF9x_D}DQ*9?@RNLXn0$Ub1iY*d( zaX~zY!$=_Ty|r32_og#@Y^}`2FF4*i&3uc{Sdrm8NT~y@zT{vodx>~x55-}$4Lf{v z^}2dabVg*{TqUPo$Xei=9NkFXj^~MEOE2&Wu}=Z)IvQxx;FwEor;tFWT^nFv z{BGhEJU)NWAm?J33hLvQl+C0;v#oUe(9dsm?rA`?!?NEpXt{J;Po9Wp1H&B(AeJ^} zqrcoqD}WmIMJ)?62(JuADmExvI|fvONT7ogm!@bDF6r~EG+$d;LuLz#m2hCKT&jK) z;;WK&mPwHmP1|0NR|spK0-lM~FzuW|!2rvP;W~ZW4H5hn8p#z7T2#ft(izr;L!0-_ z0oT)Bze=W=6p7IX)zsU@5f8H>w3Hz;w) zYvx3E(358qTJN6FD&`!yIKn(KadIAtT!lM!7LT_Bws~X( zh!#6BV$9BO9dV*jsZR{>rCmm54CKQ%%&oO{2O0tjYK~U7We+#+oS+@aR22DMW4@QQ zATT{EdLylujmb|?mQFga3Ls6U=u!|_GJ=7n{0vhgeD#*Cht-5*g?*c^_%*#XlVFyr zr^(D$(1BC$A{`HbeF^&>RljAjRD_syzJX0I0PP*~0k5(aHQ!~xEh>8lJ*3Zz+VJtt z*-uj4qBo*v%e8|!ca3W#I$~!jxh!-ZH4s`* zn#B24fL*dI6`4MNr}9lOs}bjuajhpLz+-XDn^u?}r2T>baPtVzhn+zH zRs?{JvOpx>z1#&7XtJ%Tw_6e-GdA{S(vc?I?^wQ!BmOdOScPlJz2aUOV$%QtN|>j2 zi?QK|IwS;T%Hy+V)P}v8WhD9hHD4c(&t{5U=j#R0dq|5%;Aft z?bO=2dX4NX_1g0<%Dlz^mLro4SHT{u-sVrL8p-Au2ia)$s1Umc3#eI8$oq#npoSH z539f)-oPHaA9gOKcZz#4IjAqh>eJ8zDZ{#{@i<IEU~t!s5N zKVLV}8|%qK@y2`dUP3-H^XMeQ(~?~@#GG10Le=dvt%2&mx)2*Kg>hzv+U(A*UTQLv z98BV8clpYuZ>cz;YnHqqmivK@<9pi~LXI#*jgM+t9s1hVxK4s&ZRF7;ai(Dr6^m&n z`!-&Y@}O8kZ#zD-oJNwPD6+l-WmxE0k9lrV#<7f)0vQ+yF0QFmZtl(5o7qjy)MgQb zBpcD1)Od;l-2(512~GlFS|ExYC56l5mEY6CTQU$zY1KzOwz>X7Yzps`Ou)J;M+^cGNPrzoxvt<*h2J+MqLi&(Pzzdt*%nB6wo|n*n*JQtMu^EO&e2pNlR5# z?)9x+$T74lhVp$X_vlMGq=^^o?hhn8bXAV(V6o7fq62PY40%T4RPcn#y&eEnK&roh zx482YVF%Ssy5>eeMDM_RB%(x|!Z53QGu26*QoB(S1D)UtKR7L=Frs9}{OUZz~d{r}2A@Pj;Wf)X8LxL)ANL%xfCRv~f zk6G%iw5P-L8+(M6)4QTp76PfRg&c4Nos4+kH<_R&uhiJ=)Dk;0pC5!gxD`j|P7#{z zh7~R@0DEh8$4~I)TFzQm59P&^#uakkF0pXz1x9U=dD5!n8D3&%JGPAM6+SPV zdBzuB;a!r*6FNa02Nyj|E}CR=L16M86b9+cLQNggORw!JEv;u^Gai+^*+#xvq8gee zlwiAs3E>FC0!8X{q!H7KXm!PM`#`hGJ;KzV&tTm{3uK^P;v)|^1Acrtn8VYq2lpU$ z11>MSZH*er(8yynWElW1r$IasW1_icBOA_(rE}QZyoS^jK_-$+9lD$xKp+B{K);-0 z_;St-6UI{RX}?LF>lMo$&vJHaGQOPmv>Kmnjoz_}m#{TXUzW?oV8|^ zil^eFXSM2BQaX~j>EN=I8$Hqo5$NXm5KA~xNAu{xU(&hv3p_UWsmiC%B{=L1E%I-+v`lhz%-sB;?A-<44sf6SSZuLpj2jCzHo#*{D`4P(611pZq(6G|m+TG^kle1IhMuI!C z_L)v1;@)I?Z;A#JoBX`tLDGX4H6X}2>fjD?C5U)}oCtf{d`z;~fd#v7>dv71^s%lU zE5nqd^CbC=w>}gtaw6rx#vm$JB6vry-5_b)x8zPz8a5=z&DcU4g;DF3pk~ha1OzxYmbdhC#t>9$DXzFvTXlo?w zG^a!+@Q{rqD8+8xySo{Grov6sp-h|*sg!cadR@JEl=CT_e@V9i#dmu=ELLrXc*mJW z^stj8Pm7j7&+A-b+{DWxt>^u?Et2O0I_a-+d^6u7HC74lC*x=OVyB*hx?*+_r{{nW9VMGQye;I#GC`E z(veFjY^j=xf{I-B2$+zME3rMLm$_lEB7tb?c5Zo`;n;R`xD1I4c$6<2)?=`hc1*CM z&>TgM(&z|f1(zdv#pRfZ+QiNSa~Oax6@5~LD_<|SrYUj|S2Sdm7q#|mly@Id;yy^i ze9!1bP|m(!ViIxzc`_-t#|LaZ5x#9XZqw#GX=5UF6(grT9lJu6D1BvzOpz$(2d#GD zqs!dYUWqy|H>qQE)tJojDEFGiQ_&qc#Kz0m7dl~H34~O;xs)~m$XiYVe*7Nl7+=Rv zS~JPw*Ztld6(9Gyfa|bT4?b!rSPXs41=y$SHTf2FNzq3XxJrP5jTzsSuxeBpvjLAs zOM%XAiVrWv>0ES>ruHUMSg1{_ia!;;nRm(@@LqB#Ml6@!vxTdNwF1WWOpz!A#RNiF z4G;iywNG?V+R?Ki|+M#dGKbt z2>+$s!SbQ=ytiI*o>`6d3V0RkiV5~{4$;2#?1!EMIzj6tb8}p9YojbICuHp) z6BqK7wE9WV>ma&@SQ`qqc}#VIE4=skO2W;6&j$KwN5*5jmk~g=7!+WSsYo8y;3TNd z8i^XD0Vz^wGY#MncW1>h>8@hjVaDa@4A6R90c=y=8a2UfK~W zcA`aO#gbL#mrJwKolHvNqh5eQAi)*FmWEuA&n)MKACp8p@w0o+MOOKc&V6DjAXZ;k z_C3x%3w@L~D~9Oej$F>iMq54bwmQ`CUbu9`bXL&qFwBPKYbKfJQ_a5fxC!W);m##r zxW~h?%F>%rq~R=`elh%3b&Ma_re{2f9D56)vhI=-7>Bgl%F-|$LDH*oa&HSCwLw75 z9n*s@)Y1-7%T$^P3XVL4)LYeVfuiU`kEs$uHp_@uy`?e=oBYkBx0NQOliw;^D*NQqo|VxLeP9KC{oKw99ckP|JedG9$Y;)K22$9CQH zsOt{kOFQK+?ewu9QLGity)uI~6Xq3EU2t@POKfd-)R;1E(`DKUf0tcknxhGWmsSGv z_LW|bMQn$$K@g;6?XV5G-?_gf?j&qG2h@sJ6%`BSH5CSLsLZhxb^r1XJ>H#jK;@pX zy@2d&lwsK3P@?Awl47wY0qocb8_l^lRA7H~#&5pno?XwVqy`+%Vx z{Agk;>G>MR!>+u%3D$rllflHsrZ~g$>F(_24dHhO5&(=gwydpu#VlNJrYxZvuv`*y zyXJz7cCRMgaVTreaykwzNn2o6dG;`e5+W_OO*FHy+cQvAMURI;g%R3gho7 zn#HqYopZ~kxVW0#z?ktNR=UP1gC(m7+9!w4lqIfF%JimR>qNiPiQswwOz%**^osnP zCG$6B=bA{c$+XtSshAy7H3cH--;dgf}U+M{Dlm;Y8c>fFfHI z7$2oN<_RsePbSWh8IPV~)Lx<~HkbZ7MC9T$?_ ztjyYbiR#c~0Z*Tjt-fr}cUWv-6Hc9*B(5S8b?E1jx57xqPL+TQ?ko~qYWP4CNIFtl zN-thPz7V2mH{)lu(!@`@?g8{#mKs|;zP^5tn|b<|c%mlWtH5Ob`e4wLn)&KN9;k?6 z_4ON!*=_QJQBf}RBC^uSty~qK0hMFq<-0r+9Rw%1ecp;`YISr2O-!3_Be=U&P_IwD z#st~lTTo1u7c^+nrCV;#c$P`+!Sb46TYwm7m5l9s$``EV35u*F7z4F>Mg>H`f=8{b zE4_q9N{`<1wd2$sYD_2aW4CcXpeku1eti89_$j;;WO&MDO?_x_hmRP02Ldst_d)Ym z*={~w2yWLBqD}V$Bn^JowC}vJ9ZgfNgYqLo3|5Z1S+d+6Hf`6`6IPs4Ch| zQ!A2`+nvUh+Zz(LWAK9w9F?{lSbchJd{ix7+K|bt zJI$777yxyYRlrGB^59y<$Z};#OgHngTP=bMx9QNXZi$i~t_7AoTq*2(FUVj6>K#d@ zd6Dl;1!xwHh{A+vrr&UiLQ>2kHCsLfd{`xi=QoD=C7#2Vc;Kvf0NQultz0J5Ax3U1 z371(7(ouIFL+>Vl<`K6=z$^Ca^7G>Yyth14>`&qi$pOG~)$o&?wac6hMY(B#d1Nk| zg2Um#%Tk$d+Q62G)xDL$OT7^PGH(P-Z$YP77#NRn%;r!XLcEK&{2>Ozh<7E8b~l#T zo{j3j1Y3c6kS~yPQqbrnlXMH8GYm&u!L)=16yA^6eV5 zEkU_ivRK%xKNWUW2GV>E_dpGV9_>6kMqor9aZo92V`kint^+6sdyk|TT(Fqoy-Qvd zWJkq8$5WtiQ}VE#A(VnI4VV z(cOfB-66v7`Zz9_=n&(*(eBlm!%y@4^HuBZ=gv_`u)5F@8Y!-tCxPh{6?E_QL~=e1 z4Y5S_=D_ERODAoLkvg)owGom%j@w=pZ54f;YChn&8B3BT^I{5k(T7cfpx4POc#Vkh zh^LH%wVqJI@?Yu&u(?i`q~B|<<>}BqRBx5Nd9x~48#FhegOm}7{j$u zh>_mY3)L4CqD8HUMC>>=`HWO);Ha2ti;3LLs$BBrl~Aiky-5>&Rzl@ifliyE;jq+@ zH!x(%JkaA>W^nb6??}<_0N~D?6-1b0F}t1-9=wEAxqfDc9!|WVg>O^S!lrFKJr9(@ zB!h|5AXv03m6~4(9wW=>35%kHnis?!W>^)*aWbYqEm(yBMN6uRimbBXQZr+ZJM4bC zdgtU-WJNloNG$nI{P~OHy~JjDqazzd^@448Nd`b?*|h4}2*|Y@WJ6Zid5M;bTlyVs zDfT;m5{?AL8XH|L*YNU|&S)13(azDgyx!}P=Xp5}*kv+Ybb?|&N}gzdbD|X$Rfmud zX2Ii??&}<0Qf_gI!j^tgFR@&r(w3tsI^Kn^#@5z(K%6`Y(djd_7F^0}4usbrl-Zas z1}Gq&zigl^CxYd0aic(;(b>${cVcXk?-k6p#*yk_MZlb0oXci9T@vZkR4@fqV827+ z@=ARDvK9h-g-UJH@Zl=v)e%s^PKb}*RoJdA-b`)uHgr$_mF_P~M=4^B> zzly%O&! zxsWHW`eZN$A2H9DdZAzHm5(SJh44PS5?=D_G=AqiKKW7&ibFp=vUQ+Wa8-Ju zum5!K>L&|<$m_)8a=_~{c~b?;lJ^c3j3Ys=Dd+qSPr#-hFd>h*v@BRy)9IPonC9Ch z>RK2?J~{74Rp*l~AQ12z7ZsEn01q9EJO-x3wIR}F&&ck@tD!x-ke)^}TYOghW)+4> zT24k?hTq6(AhGyOzT@oeX#(OdTRnaZ*Qf8jE{aNYz)XjR^Cb2pd?1xdqqOQVA`>86 z@SeK)>w0vik|Og!(TqDD^Sbnt^9p7rI5se-#~tet+T=JMuo5^@(tGe&-?*+jAqLvo zl%3S@Jr$K27ulOBm*}u2(2;xo)EVxc>#KN+mWe@*1eB-SP*^hE)&LXJ;7!xrxs1DX z9kCE4V;rlGl?)lH#MAvwm;n2!ztJf{enxEBJ@d??Gjxa>D#j|UO%219Kj{L~%? z=>ssHdygOY)9ttgwe_`N3&y~EB26R8mt3WR1~e^lyxRT-o87XC91n=qkrTI8Ne|>e z2O^(gUhx{M%5$UhS4;vSzy)GcRtK6l9r#WyHWx2>f~Dd$SAng;u^+P4sCpq`fuxzG z=(D~hPO3(c4y$+%>g_9MKlMm}EWALyM|j&Hrf;ToRP?Q7=IeYA+1pj%&a*9!BI4tB zM(YG0?iujs3wOk`Rprw8F|9)H?LI6_-s~v|IwYEF^#=iFxr6ss$WP2&VmFq1uQ-i- zgY^*yS*b}6A7?wC^3i?*R#cH>GoH>PEI7jM5vfG6V5&Y;u+o$wu|%X-tk%T3RG z)hT>CiNQ6lM1>sQNdU2&AA=*@*1UH|V!`{u#5clu*_3oQJ&TZ~hn5uF-mnmxM7l`P zB($ePYd@7WU9V?ZT#-8APHr$wbc*>Huj3I4)k-_(Da)4Os^Ke~C~@LIm6!fhYUSaG zN|B+&MN8Y|b^_nxDrsX!+S=W7CsitNE+?xDT~{fScWZB7JYBZEINU@*H$OGZ6Evb^ z69L9h*rv3m=ANhA9LT4<0o8d|d&su&B(>n7DyRtPr5!;USPq-v9gvPy!IZ5r-}dM; zr|FuQG_f~k=d_x_tc4PCw&w`WotqoDLUgIX`3w{(wT_iX+-Q4dj+I=J!N&Z_67lnO zTBS}nyg4f-Sd@!tP|PP7<@Y>*s0MVS9_Hh7;e#`|if(zH(~yf3j#KK5wt6oEM_Xa9 zGM`v-Tn(~k*)C1!W_m!F(;8=ZhzAVNt+y9MdB#!tq7#6Vw4jGJQ%mGUPP}op9G=59v!j zG?AsDmBGdW6lKwp7bfqHQB`o5HGJIwX!;bVFR6Hqj+7^IBybLIcC8qTwgAXqOK#O; z#7D2xXfu(e1xe5afsf$c69+D!M^ZYQDhlr&$MMK`nbXW;jqX3~TQb(3_u@SYe9KR7 zT8fZR>czk#-4J&=3UrRzdoQoGts;SrpEs=@qsgq?0ha}dma`DP3bcBhfQXKqCtjfK zC3!K#<~isGNY7ry!Go-_sIj`|(A2aC28wGkR*CqWr5%=Em7zZ~dj!hm@LsGxR8X(J zd6u=Bo4S*)#j7qP%`5^%>cNegDIgt$@5NC}1>z!Brdm8pp1sC;vN%Xm?0WGi#4|_JDM3p&ZYGIjj z`ja@@W_n!!8-Y@-ugFf<#2KhWEn=)6#4Ai9x+`xuaI%*=f+$1X#uOFL$@Dqc^@UZ+ zuG5*HKdHV{QI?#iCpr?c0~*9)T;e#|tWX66(W4=b_Gm8#XUz}F5h8WC$?zx}3@y^R z?rH8LFC1{CQz_26yN2b`M7vpXf57y(o0u&tGl|}yPD$qa0_mV-;Jwnvtg}}Y z-42i5Y7x!t#>DdT>XSv@ndX3r>1{e`eKL;{OIvxcZ@{LFHiSEDic;5U%0x`i=rjw4 z1p~#;x&7(AvSNSG3ohEv{KbroU6y8K0bEQq5ahD7vOtqqS%WI}n4F;O{_^k22a1sD zG#p(#7z~jQEfYpnZQm+hHBA87aY`LuVZtC2`a9>ly{(|l(e0xAw2$EPwTlk3n2Lyq z`^#D-H5U*_c#jxon=J#c%NvwLz}Azbm3F|fahVPw*F*ipOxZ&x7m+6n3Oi!ADekNxW`SIDx$n_+AX*X;3b}i?ZZAE$4F?tLN*|LPBcSv+I%A- zE7ED<;F5LZ`N5Q5MD?k$)Q;^?8W>Rq1!nTCMwIG$ok-csCMk* zId_T_zkF&2>x!eouLQC+`9)TP5lOH6@XT~gkpB|}{6 zA>riHYm<-a*aDq@C$gPHiY4h-wQ z&L+a*+i>)F20=A1TL|Oe##Bo29c|e@O*!`K+Sl8yCJYzkG=f5Y14TkLKbi0+lOAMWk2Y2tu z+s6-|2j?3~NY{evGks`Jl3Mjum*GgTH5Y9dGlo-HVDb@Ext?9YdxWrjD?YtOq7UMx zw#UZq-5w3j*#fcMh2t<}lct%G3547O?RIzi_M zzk}b1v2#LXks2PW_0EdFLOJId4Y##|o}_KN0j{gsDBf~KFTu__8Nb(oCxm-c6j~^Kr{arFq=z<#$ZB$88}5l* zaR_|zUX{A!%b1!B%_z5{&08Q(YZZL}j;ulbrVn7^!20efo&wfU;$_FC_?Ylh;ODes zeNSdB$DRnWJ#-6YRdBq@1i8snQ59yN#ap#3Q6PaNe76cGpc}hu1ed0iXG2MkgCwl( z*#f9V7=+x7K6a?fJGBPqot9c{sJjb|I?yHAFaMr?`3FYVK#yTB0#Pc*=zxglPK zM4a|DOV@MQRo5=r0*rOgtf&!LeVm1msgxlI-JnK2Z)Zw8+UBU#uID|9Q!Gqo#_e>= z_u5ZnlCLU9`lv|A$~XK~JxscsZ4-Kb3b@U@vUp1IWII`agL6i6&!(`k{mDCg!p$8| zbz?=-M`{M>`Kw6aN)ED+kcSnhp`bnCR9Z$sHnjZS1i-ywps|zc42yckw6xSX=fjKo zl>CY1*0T;q#q$i(@JG|+$;;udSdbwltt0v2yM0kbjc6>dX{EPc!|+HWY7{W-!)P-@ zYCyw%?A8)+8%6ZWP90C1i#4YMsj~7NE(Ua|PC`Ae_dK#XJKykTHz}}o8#dS5MIqR9 zPAvDZw0%*rH!ApG)=z^I(($-Af)kE!u_u^cjEZ2VmxiPq(Y7EEv@J1ISzi?Hv*50h6BC&b?VMR z)NGIF+&!aNUs{|e-6yiPj@dF}Xw7jPRD(rK@8OmzxgElyj8$vQZuC2ZM~zAa&I0~+ zRNg#4GB7qlIH%=zu(`T5*Lf5=Pa(ona((T|^_u4{y+>!Jdg(7z|soLZ#&e6;22sKJ3bcasg$kB;jO3glPAtx8wS=IWK8Fc!NEhD>EvQ9z#Ym^b6A&p@RE0hV$u(I% z*XZ&Gi}KbPMh}%|vo=6QLr&W-oT|!(vg>+mHr_grXNUR1nD@H*ymNe6hl?fXpb#mvAO;7y{2Jt21 zTdKG`McroPDu^4^wk=V5V8_~EEP$KF=-0}D zm>mpFz9vh^N)Hz**yHU@!R&;wxrMIVOn~0q20eP}J)rh3LUpYkQZpZ^ay$`czopCF z#PF2UEf!!HQBYs0@SkJBPG*a@0*ZUr1>~h@>`Q}yUDWw7ntdN*L$DkY5-y~EZ~ zFlRciAcaJh>;*DJffY0&bu#nP25DX273Nw2kkF|=ygQDd(N{@`EI@ORk3F)*kM*@* zB-ZPi*X~b`JM#gSy{F^TLm~~P)F?~*WuVZffjJGO8gCVwG>*(ygE=K>`~igHG62!} zi)>|nZgtrKt||a7O+76xx5-v9v_hkjR1|5`&wv5MSZ zz1}M)8#yF+4=7^*g&v0|Pa!ejOq$Wixp%UV$GzDyuS3#?$y~;nBy(kGe2g<7kTW0DF!=#Ftst|^?m3h-CNstI zIzpSgS6h*7)jxI_moQ7<2Pznf0TGX2!?&j9=NRa~fp{*XAlm z@DWUV))TnwtS3fVBC3|v)7I1IVhW}u*O{fHtZu_$^Jp6eQkv=Z+#{Iro}QIakUUa% zSuC}Y7^}9~Cv@)2$K0>B2jj~!Qs}p?25JZZ(NU%;+t1d@t=}(obulD(^>w8DM`rh4QxGz>q_DQ6;fkldC zAk?G+D6C7#N$HvCfl%m+iSFf^zZ@)o!xwU!*S3OS>jIk6q zYU905ReTmKc$x+2o2}LChA)Bdp=Yv8DzRR9PqeFEUPrRGthNrw2;cxL_}TCySXm0b zPAcUWGq(9^Po=MqL^qmOFMw*|H20G79=ru0ADDr%CDnVlgh}s&^_qJtYEzQo@w}O{ zI(;Rq=ZRzQ;zSqQptNLhyPB9)@DTwCxMsXJ!GlNGQ3;RZ?eih;nR|lAvmMd)SUkE} z)OmIzvn}qaRWOV$W{KrZ-Z9a|D6GF5O>h_mQX7m}RnqI81(VRQY3ttB=UkR;8&A^? zY4LG`ymIK9ey$M6QDjze>W&;DN?WX25yQiY<q@ZIb@m0L_nF}Uu(g=BIJ!CHw9$?tJ%*ed0*+!lq zRxOa2cWFH%?}5pQE4Dlzjal&H07Weo$dZH`G%Eo(kOjotmkrv)4vuOD1G{GdZi;!} z4`{Bl!?_mftwP@Q2{~TtlIQLNCS07_Q#0v|>LQ01!VR?oI3gnhiQVQ-S#n5jCHeM5 zxdQD;MW@Ti8p0bulcG+5&9+jq%9Z_e(7znixX7whk)U0`DV|=TsjPqkzXry53S>dM zJei~q-Wv&U6t{w?T8@F@=x~xDa)zQinP@?g3zPGsrnL-clcD8wV7Ps8`l|AZ;Y}=g z_bb;0-WQzFu?{LVuU#(Vpv?mn;4cX!9j_eEyOixUa6G7KX~^gd9ZmsAtCS2><3O>O zX=sjsqkOCxk6js5k+b4n*qRq$_;t8C7CDU-WiTS>Jg+8gz20nn#5z zyQEogPD*-=4;qW1N0&OLM%8RpnG<(30d;B;UzDp~(^&`Ut7CbxzTL#rN?Ld?>$t#| zD7pts=eozEeP`Ni;|@ruX8Xz18B!mlR8?AWTNzMSJb9a3`34r|*hgD*paeTfqhp!8 zh(b_A+Iv+>47mFp?>tuyHg0a>(347ZQHn_BuDv-L~uEaB)?SPy1Wx&)u*ARB&Dl1&?ZKlI))J7-1N1hj5 zDBO9&9ZQcrfeJ44dAlg7NRRBhrkWjPo>JU|<{7f}vS_OnI+1>aic!*DJE<@LGjH-W z#&8p9&e2G7stS5A95G{c-lYMzLjVYppw^NhJV3EHp!ec=&=uKkOtoRo<2_OpTzs!y z6OIRRm2NQWA(H!In)1Vb5uvW`I7(j-xp(TUmdL6qgr!5;H7bv9Ibq!@2H^UFf#>{b(y0YpFTcBF^(qoM(aWLGD5MW zepct9vFWdPw9*@P$q<`-7m-7bA8<&h5cKj>04^XcZ9Jo(isq*UB3)%P@?czHkW3Q= zC*g0-cW9ilclT$i8a>QJ{!Jj@L;0s zt3}kLeeR*i$=S#1 z6eD<(u2?=jN=%%(Oh$>-N1o;8mthc7{Kf$`R`t zhu2yaGIbcoVMbB)peSydtNBELUTA3aX^7SOr5#%G0jJ)qAt@Ie7Q^1njVQP}h)yO# zYWHeLrl+GUKP2ohTyD;HEvs8(XfP}d6i32G^P7SJ#o>``}n#fLtT z@H&Ohcg19 zv8_e`a+aZ|Rc5EEa>`v&3b`I7Gz_>o2?6edNA79wIn$-BL^QM@FPI-lJ7HTghZkTo zz@v>rH%javxkT$FdF%0HxnCQS6ESreY7uyJH~!_}?L(HNjTJ6;_r@!?G+>}DR)B(} z(Wi4o?_5Lt1%NQ00#!^ulG2Rzh71f+pe@-I$9)C0DjVoiRUCVOc4*u$w~(4(@C`w~ zXRz4U82Z>F{Jji(Ci>9uO;HG^?|Ys8Vpk5y;I$*CJ`Szo=NK{`dD63ZL_41M7|N*h zCPWpg9;Yyl-NNUf~8gzF9V#O!DgJQo{3k16rJK-$e@TO+d&~2~%W#B&2}~ zXXO&xVvxu){6MUZvtAEvAHEI3D<`DmXVDV%9$liNJq2Ss@zl3)`s7lmCH7$`giuM&aceuieGsS}*^hUDbY**1#cAPqCz=ND2ZbLe3>b2FWc@Jjm5yx2`(>`27Q&oC8 zYZO8LK;OI>?RFo4&?k|&{w&0q4Y|5w+$Z%aK?hM3kz47mWFp1MIF}7an8!8@aC@?% z^c8LM?G_l4o;UV+b*|G}t3uBZRVC{SSsl-_+F&w?M@sSGp#yw*$nUj0c=%9>8-Q?J zJs&s;o2(DiJP=ACLWdWA+{sL@!U?&wHnR!a9W1HeJMVZkOa(#RSfIUgkBCQhaGS!M znfZtoJVi3ag{g$NfO_7@$Z3G(tzpX;D-m(Ni;0!Dw&1MORowfRh@3#!8pE=EL3(Qv zlnoMs<9gW@CJ@CE11!;#Hhom!T!HN(ka3hj%>hjYodL}V=*>qUEBRibI9~}@;i0~K zwf^RyjgX!!@h~Hty&`pJ@F(RAQ`}6dl>1)KMAS@> zsts4WF+w4W2E`K?&<;!yPfD~L=q-lqfQz4}( zrSINdEP&%F4m`nK5yJ=ohfu!Hus*RVm*t4uidC6;GPOWJqJmTDVzWHmh+(-y!)lK`V8; zRtz#pEa*eoY{#HCjIwx7s{7cFy-r0|%k4a&M3OMu6l%)JE1%%pr-=R~V!FMBR_oLk zKw7H)0Q@XdAMtQlh`>hmf^sJ=UWMg@LbfEncdFWuMq~Kcch&&=Xwa5OUOyh(ex+<`KUpLqx1mXT~qqtQcc9R*#;vpJr#GhGhoc~De2$D%Ix=c{=D{Cr2S zHC+RtA8hAQDKnDNShwm5*NckG>fO$x4NWLH>`HokEW}jf7n)*~8QwV$$dNl_L9Ne^ zttff*N~>GiAT3ScOxTGxE)14ic=+}C@xt(X6%XgAw6+j#rO1y`OS+eMptTrz+V?dm z*SsRFbXsQRdfd;RY&_Y1WtmS`^8hFzGiSrKAW`yQ<1kywd+u!jQI`(YMxWG!Y+tI9 zHPsVx2fEIB{rWM;kvz5Ay+%50n+QWY+Ef|?z+z{yct!3rv1UdF3VzEVt_qUp9d|+l zw~UDElim*{#5Fi;pKH6MYvvI=pHp%i@3FT881@W!9`f8-O26V!;g^qF?^+Zwr0bPA zHD)(iyh=@zFhqo9{=hiTu zRH7%(v$vdG_PW#Dt>9id7u8kCW5_GF+AP~=;Otz*o3vRyi~z=aTW>@VUqIqHUOP+2Qe3|{qydbCSyH+TCT*m*?M-d(EK0Ji}9gUo%w5ZZoi8mxjK_*8~>0}K#wJ4zi~k2d{0%gPZqJtrQIbd*vnB%A&D zcD7`#r&gIjZx@3oAA=XNOjd`b$X1*8-G$uvD1ojt!d^$$K4nghCIASlv^ezw$9S!Q zyns4c;ZM;(hOE4R^}ub;W+J?SsvWA(o*6kmWof>6;83usVp|YEc{4>G`sx>loVwSf8OeB}CXi}I6%(k-p)(h2 z(10I7rEqqqY^w_opTCXg=kjEa+;T`ShHZeYjqh}M`#mS3YD?V5x?KWkBu4YjGL&!z z#5f}nA}k{4&^5pW2A-r!Q5-zCTk{PJ_VP{Ne3)Q-$spMehWYaKq1Ltzb4iN@PSZ23 z!Nwpo0rBQeDO{NF7+0NGFkL61OnEKkF_w8NW%3LJ2xjaoI^H}yQ_opEVGb>&)3HL* zBoSXO%2zgbc))sb(^f?R^gI?DE;$z9cs*!Lu1XTUh|cwujad4jhZMf-2w*C&MdSxw z_8_O5%2%3Jr?74ANq4NH!kkF$PzU%+sZV^6>Hu% zss?Z@L>RSL0^pDN=9RALATW!|YjWhV%`rHJ}z^Vlc2fVrx+{oT|$>wZCMFTAF9s}s3bajJdvN_89 zp^14BeFaDkW~$8$2?(p3px#us&;3Xzq^2;~Y7X})4+*uSKglximn%I#NOv(FpsrL) z1~dr;W6V**BA?#`z;K0N7zh!z!IYtSIXYTFZD;pxnA8hheb3*)yLJfc)!j%%8hKkS zM)thWn5n(xXnd{Jz+UMsF|N)4%o9~c5!xLX>_-WakBQ#lyoCpn&(&lXR7)R`uk+p%*m+^IRFH$Y9tmGo}taFY6(7@>0n-NU3xSmgw$-b+5lv7)KdSBBs+ zv%6bbQ3D<4am>UgGL!7U>$+6o4<8gO!@F54gBAe2P2j%w1a1_G+ny-C>21@{>vtx- z<@9_Y28$WiB=Qm|%sm5}-<>hN18LpBN2kmz-s?|e#*jN>*%{}mC@Prv2e?1GtHYBp z&_LO0w&&_;=tu6B+3r5ncH7r@zS1`(^NQNA4|^FPp|jA#sWKVMnNbpwBS{uL{a8$j zHHG;QEcoHdfdxthjaLfNV>nuVr5>}Zh;FV)j47Or8wSl=|y?Z||uY$|LLJ<`mzFuIU+eJ=Hh|^cdmD zc*>VfDVRbkylsS4FSA0Xw>Vu>^D>^3P6a(d+aSO9_Fbh542vr4`Kxf#?9NNd2)88R z!e_0E3`wYgi=e^yoc%8uyIhzgv0IZbbzI`rz#~vsvuEdH978pRPjVK<5|2=?g0iLA z!+M{HKj8q4NH{}DdSVFB;El`JA%!nJ_LcOGbs4C==z;rIq$ zXaTIsqJ)5qQNj(h?C}o~|QPHY8*QVnN7zKq@zUg>Ro% zs#!fA4TiYD(_@}fppJ8XN~SirSmFl4ch*_q`Fgl|=wXog0Wwdz^8;0d$yp%?O@ttC z$#Ve5*f$<9)l+FbhjqylL0~)zJzDu*hxgLNIL9dLq(88N6Np@MLa7XZkrcyvo27Fo zCSyb8olOU?xkPA%*fMYFah&WHWTvQ+ZShFI*FVGXRz?DENSr(cksHqIm99lpY2@?` zJW*{6iNPtt(%Z6FAOIZN{5j)Er4et8)ExCqv2m~^$Wvp* z7!(K8oFaZF?IVyGbOKpVS1j&mvdSEiN+_PeWYX_i`>JPNMv}m6tw=+wIH7aeGM5y3Tuiyo6aa|40}qcqDgeta&equ3(q39gJC}|zIa5&=Pb(*8qbiT4Au+)2J@j=08|uf* zNuEnA^PY9Rso^C67;ENXjQ#hB;9_rILal3NysFv%7$%M+S1v{ z5SXFa8&xE8D@rCx9)uiwtZzqQ;0Z@pD8G$rV|Yc(2^%Tt`TWLQ!dVqNNN+*{(V8f> z=7~Amxja)peUy-taA@$dWbuIz=X>WQ_Rtt|pup#X&hK>B05w3$zh2`i)*VKvN{o$ ziB#oF9w>;*IJ-ykxHHqpkf3SeS??menAMjldE)TS607J4Ub{_}OLo$#r+Y`6I5+YS zd4#xFpson+vG!QE3e(g&8BOBF{iK9?-00#zA z;v7?Q0WA^PO@B_J^oaXd=qfo0AI!c*l|l4}Nd^;LVZGiok23TG#!vyOxRccT!Z%=) zLkm(BBPG4{P3d@oKAvlB+xE?904;46JuHGAj5WtR0v678hre`$$*2n^l$#%8`Se$M za5(l`hz=6ubmq15l@wkkim+O=h0THX&NE4};xWS0H~Wq=9_bo`W@q~)@Mw!*k!KB2+p0Ht}M?E^mK4|ua4XWcH6SANI*++acwVCER+w&Th zxb}7394Z`3k6WL;Rcz(AtY)<5tu&d77z==K<17z=WV;u`i^}y%*LU4do9(pN5LQh2 z7-Y>w)qM{htXw%4aj{d-90e&4cmL>4yEi~+VUtYCKIxE`&qO%(zp8iy%4VKx|Rcit8U zDit6~@xCG5dKFivN3Jm+KOjOY{*h`QtvBJ!6dP(w<3!HUqJ%i%T)OA}Z z9jd)f^VNj1mDG~;z_U8e?jph`(c$$VuublU#6h-M=!^u%jUZPrn?ng&bDrtpOVlKd zU~;C*9+IY0Pp)_=X}z)Yyp3Fn%q;KG0~_ab?v{&ZWg(y*eP24t_Xg%oTu@Pqib15P z8*xJ31)SewJAMW9o?mdn+j+El@2TpYIM5`wV@)p%tZM`34evBvdoeOLg-Pjw>5IY$ z?yxtJk=Oes=Bc<6z~gYiSC^*1_%TM~#+=?&<39FCV2E zWf{Rq#t_>G0~I;2~7dSujrl@0uBg{!jfP5^Hy6w z6)BWQVY-4eWkN~8ICXkgeGPd#5%VmD*R{MKJ+e~-sWKT;jik}2v7~00dxg&@}l z*ch=GpRn`bq>_} zmif8Ga2@Ssgh5gh^2+ljaLu-Awn+p1MY5Z_T1W1=W$deLaKPtBWB6J-GKUs=E#rC4 zab3PXb?;hRq8l-=-J>zE6#(uFn0AMKdGwN>B&pLyBU){4`ALeM5E}TN(R2Th{^r{=3Vq^m}t}9R6kvWk_mBKrq?%QLU5!Sx5n8&w^@1fNKY}R_~osJO=uy2{z zT0U=;PE0Fe^avh#y$%6L)WI2#MR=h4x#$ApKvJB`3t|o8Gksh_Dh5vA#Vs-2;QZyI ze(z!)t`}b{DB;M%AUM!CXye%t zHh>%&BeFeU{7c9ok89zV=U7uR6{dn(K4!nZSgtC1Px393 zX}t%{gdSYk&FoVUXJmpyn*wexV=e&NOh)#pSWJ`pgMQ|Mbu0=lL4qz7*nN>t>gmc8 zZ|>&9cCaBACd8mxAn&q~c~fech*_%ooRl$Z_8n>f2h75VXhD(y>PQ(H=8{TKNwdN- zQfLDb(SUJz%gZ=>xlPgfR(KCa{SsHK6*%em0hQ%}6v2?M7*PRHY?kRwPvOwnitpBP zUAI&5a_PwG(DWIL3#kyc8BAv>rZYPBzy@WqOE%4Oe_*ga6<2bgTe60O&}gzS zwyhCtgI9N+F7}k3zo&jejq&#S&G|dYD4iL3V^8x5zhQmuMb4s+8sI=p4W-u`GvA{S z6b{+VbA^n&>CguSx>3?Q-PR?BXmL;M$p8(yj1|0{WSmG|j{FQvpN`NpQ<2 zyS%%y?{Pnk@m(2;(-21)SDlQQduPG$=M-7qgtK@^&|=I_Xv=VO*KuRk)BAMyExllV z4)`Vt$IQXuH95NOdz_Dp`_M!n_ey-$T=t0%WojzcNYoY<9S3(un2s8_5W*XI7c%nU?uQ-<-jopn5 z?FEloyd>!Zx{%UcrM>Yb4QHgs2L_BI{eq#b=I)ugmakFMdrV?}2~#{xcsvbnp1dd* zJYmKXuAT^dM0;@EE{!~UQEOCQOu>7PQSCXJy;%dn}@+_ z+*3~$o)$}IKWUaN+R>Ug&#qvg+E}sd23W|_mU^^X6|UyyP4AdYrYYToANlN{EP>`e zEQQ+5BRgA4F>Iy?X5=T*E?u9HCx?F->F{MFTkAqo83#M71fi=t@WEM)z|5m3 z{-(|OB0S^uU?;ckIS=EsV^cHSt^on!alXK? zF^$wbeW*}7;V@<3?xdR6Wz6ZfjE6OmSgKx}p{f0vZu4!UC`%ANcHtu3hX^!SA%{qa z-XXyznR+zW4`83i4Jh8Is!cuZM<5b_3b7|qFMXCTszFA`SlWDjP{f|!gRPgouN3QP z&g0ZNYl)I*ys%es!5uq_X{_g>2n7o3HNf4Wfooy0H7gtbUhW>@#q@@rViFFOyYhkE z^ys{!sy9W^+ofu-E|DBcDYt+Y^t9HPbfcpvp~42Sv}1uIE3-IQ@F6)Ho_ip#fVQRp z=njP(KV^g-;^H3MmQ3ihR9%WW?WgoS+A_)>E*Jp=Gz7gGZ_e;k!BWcf%*m9?VaEke zw4{3HnnUkSA3WAKsSlvo*x#9khSI#3hqGia`oUvWl~d?|%O_>rO$_t=VFT`XSK#I2 z6zAD|=B$03Q2a^;n^eMzGO@#6g25rdfwy_awT}?rHXZ^QAH3wW_4Ve)n_`s2EcD{W z16z!z=Xkevl#rN?vUC*h@nPXQP^>LdS~$9O7r|-c<^>jVDHh#wKNqT20}83097gZ7 z>wD1}mvPZ6oQJj=ayW`yUBhv^FP_lzUWti)=J!@Y>oIa5$0ARZ<3SZf)10raKH{fa zBP3N*D&|DNFeHYr>{-&>^c)nQ&R7tn53a`MS()-cvPL)d>TDUzjK+Q{>7Q?H4M=>H z#aiS$m&z98Z2lc}L63#FxZ+`K8iCc-rU#0vag5h`~IuB)E~+YdMx z3tG-PsFh;@m)FfOF6>fN?qZt{OEz`6`;zXZ?Y-v!s$=6}ag(j+ne)chU1yUIKyo<; zWCc|2FDJtvX)T@MW6|x<4#rC;@qLI*q-@M?XKs(%2Lw59mLJ7K)X{$L@@=cz;0Cyf zQLD2>E`r0I7t!-Z%uRd~2?S~NZg)km=~5$**<%;6(7Iw?9&~*vhs0+EG$4pLVSCUm)KX4_&tEEuHj^o> zux5{H;A}X;HId;2`Qu59pgx9&72X4Zv4HbrX4;;Q<^yS61^|u>Tjabt1}746e>C(^ zrL*%;Q;_#YhrlxA!umbveDAd*G)C^_BHha*weGmig0eSMwsNlktj9rQpjlNqTHdvz zr@ZZnhDkHstrx%yv7@ivvon!yGk=rbW%x?4jiAX%4e{>jgd4r~0%&%5yMy(X&dbMm z`*Oe{IqsRN%nCd_x;o)WJ-o8ez*jN7REY$-Od|K9A96m<-2^-gm44cJ9!Y0cv3a)9 zVM}G8PqPko08V0XCWj%i-vg1=GeO%>s+K3ab6vSj@GkIbsPHvu5?nOV(-t`{0d31o zvDkSD1pG4+Xi$6{@Wr#3rZ)*%FngO|v3`vs7^Wt+kt|{u%~BpJA(0e6GRjsbL$zFi zVu&sUoRNbcyeutL1$e}Jfu46Uhl)gen0BOPb5GffjCi;(uM)AO5n1$IOp*d84VFT6 zzed!Qjh4@8GW5$y@zIQ^Ru*^SBGEuKgLFm%1|#h58!n5@9xs)Arq(N!w#EqD;gT&{ z-W|C$sJAuTlaf>Tpx}-kMYoS4ZXr)LsV7~mzSj}6??u>cGPY1!%i=>DDVf}PvXwTH z3d=HhX`5O7H)h`}%i`UP z2GxmyA<)WZk@X@4>{4oy96umA=$XmsynB z+)X_n0#5P}1&JknG>EP^SFkG8VA-B1@!UuHk=YU%xt_|Mtd!(7{$-_ts3dV2v?=TB zX?bBa&|p?(-3EQ{A$h`6P3s4!xP<-a=zv%dGZX|EMk5v(;UR9n^$?2Z9XR%K3>{wW zJIhj7%52NPQMv}Bpy28%csMf>q-Q3VE<@5Ti(UOV=-I{KGQcV~N?KT>3SR9`^B8fh zGQq_%m42EG0N&Axl%8tMku=;W04PdhXBgItV40?X%KR?8NQC;+s5kGuGQLO&T@#m9WzA!8v?l-sLWFNy!Po=xkRw%uGYCOl zJ(c@~*zs$(2XpoCDKKoSd(I84#isFsou)Fniavfryvz#ta>f+IiSaN_dYFzx&o{g+ z<*cH)_nizzU#>=;&${|uiG$57G^b29G6aRSl;@B&E|%Yo7*sQ!;SuIphrXy*0TRT! z812(nx3r#Xfh8bhWAPq$xlL4OVEB7uNUwa%E_A>Uw59~|HCMR8%3@F5a&h!9=o@IZ z(7K~r6!=wbS39)^1D^M28KNUWlWf8aRt^ZD&w)>=<)OMeG&xV6&AgXZhfeI3wib2h zrQUCmRe50XaSV(1+9G*G^rY+>=nebj_yBu!dW)g=y`e@GeQ2Kh9>kZGNMBa^iX-!7 zpv9;wn=4T-3$(G5LWc}i@+-e6iLCAPiO24f_E_H@1BMq5>J=0i7y&-E1p&Ro7{J>oooNJY{grJWFe?d8+OG~vh#d6&kd2|Gn zRElwDM%yF}(4?_;hp_#;;zh)aS6jU)hOTvku3wxX)tS=h!n`u_)U1yWL`*5GuzgaY zPi*4}HG4r#13bK0Ph>40cbwsY0jZHeE}{0wDNVAzNvb!d0-GvOWiqbDo<=mb58y4r z@*9s_!VMImhay_`o}8Fw5E1((0mH1aCk)5^21}Z& z*OhBX&nDFO`T~QV#dE%)nG}S|k$P}|E(FT)Jkk1jDXOBuEy_WhvUD56^Jgg2PdfZ` z$)2;ldBkn4r*BP>*oj5BVDm8&T=tNYwjHIR4LRDlVP$0>9w>G;uxu*Ujt^&M;xHze ztw2L4XrZ~*sw`&hA)HR@kZ(DG&f8a>qSyY6oC293yG}v-aZ$Z>q35{wi_uNyV_cBh z4VaEBLwob3CBf%pDbfg=3)|}4sDqF79R&r}k+##)B`{G&wBZ!zAH}(e5 zy5VSQ?eX}-gK>fZ9XXH5Zg#O3$b8;6Y!O7SFz*5$CW3Tn0!5|a>WZt4ONwv=_Vc;d zvFDA88)^EWRB)MGwKmJIU>&Q@%wp>;c?1d~vSx45}+NjZg@x5#>F2 z;U}VjV@XK0PJCpU2oFT)oi!pU4Q-MTj8Pu$<8-&G;`F%))tgNddxBePb2t|lb7Ny3 zG_P;)pgdoq<_L(41n|rRz;en32jiuMuHT~sUI*J*3~>w`aS?pxDF$w-bnN?b$%VGz zsc;uE4V4SFM93o^lpwn|nVE&|Go`As;1XJf6HR3diE_T?2h^HQ&=&oOUWSjw5Xmz+ zeJ0z4DTJCafCKdWibY*)ZB!9Er)kcaC!+N^^hvDMG^zInj=gsaup5yA*;hN^tJ!EX zp3iKT^Wq+QA9;^<+7y)cy5n1 zQ!LjuAI#!FC!oJe0!-*aPN66p%ZAlERj_=-Z#S+GtM4s6;9HZso*8XvT1xI=Iei^U z0|?3j;`3At88~&Q_?S(EACGB-uRn(9MM>v0Cz$ya~nvN6^t8sJT-hEX_q!Q5qri$8w-|l0 zhuaA@&mGEPA00p$f+$Mw0|6C7eW*kT80p+eLvbl4D!nQ6f}P7Zj3E~c*lXZSO2Ek) zpNGYDsA2J{a|q2vs^dc2jw9flfD+8!IN_sh#}lv@fvd7OW0XQAp*?x4Eu|9#BJb^? z3_eS|-84~dc>5A#sp1$E;0{wYiP4_z*=15}`LlS{41>$wlYK9qHb;~g<<`;c3AjGt zIh$K`s%UaN?o(%w0a8cx6AlS{7Y|AS;)%K13(s!35FC+l;euuwT0*kUwwD8Cf-ew} zkxxfceF1yQf;V-Acv(^@ox)&P*=J`;TEhMva(OP<|qLsd2sCu)G1G#q&32X`0?ho{3T!mU!q=gPTst=@{J;X(x*v1RbA|)b} z5nTA@E<%g-;vUq_*i>SR1E%`mJPSgMvD>0ZWnXXs>IBwjc@<0sWFxULNlz-1M}e~F zj>M|uDml}lW5{JAuK6LW3pxTp(?(-o5e=>t=1ep^OFPH11$=U$94(;Khi%^JnTSe(2&m#yB&5ChJ8?SXGP61ccU;Yogf1 zgzrq{0Rbc9p0xEsnzpz#5j}J~Ihqr%l=0Xaf@lCfNkWmX@z6vwz`_{mkr9!j4CT7o zBO9VCCZ9sZhOCGIhCB@2D^rHO7AfoW#maU)e(I-4JB0*GX`SF)akA%=kcjxSO=0?& zNjt-jK#+l>cIjgER5P!cNcw8{g;mv6x>2E1LE+G^W#NqixNEWnMPUQ#WLYA+^J z_|4^Lae2B|xz%$|4dbi;6J38gE99=RN%nY6#xuux4^yG$8DfBg^V5+weUx-S*01~4 z?@e$)lhT!CMPK4M_ry(0mymZb>|HTe^t(50V-#BJivs7g4idbN^DThy zF-$1iF6rWO-;7VIy`yaTGL!ksOs0)UI)~ha1wgV$;>TwX7jPaYb749H8fg$VNI*Zh zhXh;xK;jg1NKB_;FA=gu1S%7+OAOhV2S2MAGGeInev(spz1@$f*fR;FC|)Ry zi>pvpc8$1KpLX?gABcE0N_3$+!iwA^r5Tw*CLWu4*M~N9_LrLBLzbj@+x+&NpkQ7sL6Y0=IFRZPm>$pfL>c&5!a>D8C>o3f?^L7q5GQ_k5BfNv*JIsMpKQ~Dn98IAAW9kVJ}Gm%49FH1H`{7Y zQPiyne-6W&*jh;D%*jONQBf}$CwGJ4O!~s4SlON`>Nt$Yb89r5z;NSx&hqk@cTnA( zkg~~Q*aNys)QE(~bmS_C7%6FYQo_Ma#0%F$$ggmdTpqmbW9L`sJ9&x^2A?t6o1Kl# zywTNTc46Dl%;!Q+0z(0o>jt=+YQ|&L6>_o_*WNyB(Pt3%(2ZZ@1F6{bS>F<&R1`yE z!NExmNnfA}doL5u%6wKE4*E2oMM&(`rKfq9ZoD*c_ZTM~+c#~DiBMM}!W{9`U(xf& z81qi*DhuBhZi1MskYCRr+)hCQzSq9ymoT%8?MXp61zcTzS$X=zcZl3rU+4IWI+K5mkmTa(1Q!O-SC- zAbV_--VD%Ezj?_eJPP}g?LF>ifv-hFo_VUqMj6faqdeiq7w=fnbnAe**_>Ie4S)>> z8j7Jk!Mz>DQUBhUhqQMd&_u{&ww(3^=XLy$0>I)@#^va#vK1S4Y6~+?CT2<(*9F$m7)(xaJkP$%N^-Fhlbx%ccou)YOHOEJ=>0 zSVpdfY(Sz>@RXodkY)6p9`IMBOi>AmDNmfYZi}x0pXq7oQhSyZ(rTpwRr^Qx2@ zr-kOZtPKP{z9r;de3EUTBl%j6gsYuI3sC%o)!uX? zJv!Y$-@4Wo>{Z*`0;Y)*+ikAfKv>ksGp{$xKKN{okDTEzIZY+mmt>b;HtK^`sS|vD zaux(3i%#rs*4^#kM6>1^gfyjM*MgGdJ%+eOQFY|#Nta`21fR`pih&XAOz>)p4xx-wKpzm^4z)!v|^1^M^`(N zPyviHlx6{M>$0+2*S*iqh`YGxAY5tm^>rVPJ4}eKtvttL7WG-JoP0s==pZ`RVMtW5 zf*%rKW1!FBGSiV7!b2XfckUZ3K&u;q+_bmbmX5kkf@W%U7Dl6wqDnXi>(rd7nd5}p z`*GLj_r;a5u}@E{3)k@$*H8L-sq{3Dgh{w z(X6%bMnXlyk0$%gnnn4~qi7~vIKm2%L~TQ9+{Pv9eOt;>UVNnQ6jWjJ0eLv?)@Xab zd}`!U8rZ%o8q8@uNN5qZIMIYkQ1Y~QYfWZ}`x$;PK zB z0c&<663~H%oFJyQ9`z>H(0@%pdcw%ARjdEP!`TR8}s{}uI$S(&lMyd;h z1bA;~d$I=Y;S|v5C1=OtJa|?nWhs5<26VoYcGT4)kaPn zdT%leU1N6GX0EIzVl84<;pTNetl@egI(a$1Y(aqWZUKx% z9P+H?O0uGi8;4#VwZb46HwLfu%ruxv+pV?UFvfd=9!(?8{Cu8Uw7~FN?IIa5(_#m5 zjF#E9F2b;Sj}_(1PSa021yiqHb2+rNdMhBR$tnbIiDsQ#KM;GwTdi#7B%pY4WNR6$ z?_~{MN_ZOUa{e-YLfk^zXzpxyLFN7I=vhc7#)!l)YiFT$_4Edm#U%o1@0!(d7M8$k zso*9O%`vIB*?J?>6`Gw~@X{aG6>sE)dvDiKu7q_3gvH;JrwXN(+rXMZKvZQC8Xa2? zNLhlF*u*y53*G2WhMh94h{}cZ%S%BUq#dP_4axBkg?pPl#F&&vT!jZzlX{Sned%(1 zu6<`x8*3@QL_2vvzsN3~b&nz5dq#rZnm|42KX}_hRJz7n< z*`rq{$ZS*|404NQk13O{n0;Y)JXuSRB)}esiDAJ@o}=JOcPr0j<(+ZlKSZ+6&9FsS`BlU6Qk7R?ioCOAExBo4C<_|AzOFQdMu zs%Taq87F!54wQm)(9Y!<`QBb3jK-?7R_hxuy>r7?`b~j5t+=plZkjiDnw%jnarbfns>G)p7Y-1zE-A(?!iry z5yvm*=)zWBCA*3#FiE6WHj{e1cUWOHY*!C1B!`^OAIs!EDH7M;P&t!&k+jW0!rf^H=TI^Pp_E2BqGm+JHASI1Ocw8r z>NqtI+U04_RZl)W2qnxH^oqCQCD}4tXb(1;fj=pK>oMdzUwS%!>FJpVM%U{}6IzW> zuLIj5cH@E36@hfxA!Loy_BizcLt!F(^xC~`P^VBGZwT7Gijis zj|spb9WrosQSa~R6E$BFMvVqk6tF-y)bWAgYu4rlGUoF$oF&ByMhP+(Rq;1G1KcC_ z)}qC<@ksn}2hPjWxR(zw_fk;7$u;iv7F?iaz?%R=})#kXMx83$>E!s-pLbCGtvgvzLA4 ziS%2`=XNpOks@Y~Ob5AL%6Dtu5aK0x*NkU2tLOpwHEQZ8_{-3!(Jdi14H-Y36>NB# z#&2lK*kLvqo5=6x@{pqXU|{4-nHl8H^UORaQs0*FDJ=@xCii6q9UaNa0bq(!Y4MQP z)eCRZIbZiC_uP@PymfWWhaVgpJvD2q_fGebq>DU&Rh<%v znd%-9m<1XO8!ht0UBF9xsdPqI-C~y7@99f;J7M|)H#D9_<*U-Fiy*^$D|0zBbx`$a zDz-<(4<^a=-TJZjQsR>rp2XK_60)K5uyYSW<3tOIjH8V&5xg1aaR51a5uGE~ zJ)y*s6542$GdX&+Uw#IC`6=%L{RW`W0BZ#Jyl2zlWp@&0qqit$1vDkvqy)8%hPt1M zgP6P!5nS(iZ{ha9J9US}eRzi&j~a_G62OlHmBaTTtdx91CNJqqN*XK_0&B$Bu*F^#o$JEEi2u5WY3~%n~!lINr;y&v|#1N(yRPT@%s`Ae0I#$BSW}lYE z>lD9z#<}aW*D3~2uHCrI1-D^#mW*pbc}TpWUL-vqa(#7Pwl#3J)bEPYc2XT+vS{`c zFa2cI169aEn>PBywF-pe^z7Ap#>r!?kLAwZ)kR#9bh0_UNMq zE_7QlDX84b6IBUn!4j&f1F+-cvOt)DHeh1VQ(W0AUEs}Z(*;Xg*VcV023r1J3)Rdy z@**i3@r6xBnY+D+vYWm_euU$Rs{R_M6=l=go*&dHm@bHmoWuAezk?4!5dgjjO=XEF zPsNh%?O3(k(O@=~XrFw8+uGLFPH9YDQQ~~5wgIQU)Bsnt!-`K;S5SSD6ZKxmS-yV$ zs9xdYx{j{QkfDTI5<2E=RmUwIhOEHG0dl$>HNX~Qee+t5s=PJr0ba3~X11es70`K?UR zrOd$|zujQ1yNAb8qNF0yHx_gvtdbLVAY2BDxhH{Dw<)zK+E!$C?mpwQGh)BOa!Vq>n?v$Pg!JA{0Fv&sCah?$&t+)61kr@=~bJ5~q zJ$opJI|d-q)y-|*($K?&alIofn*aw(8zz)Q#w@d~BT#SbWetsEM`mluTLhm3Zl>Wt z63%-e&Y-@Jd+8WRuk8b7{AAPcWnjo)h=H85K599SY#qnB$F zCqi==jZCSYR<`Y41v07s*{TgCx$LJz{DOUnN{S)U`aZdm8cpFh~%^LBnW=ETkgo zUbvdpJv-X-8-66NZzN(7Zk)%*Z15XV|3Vb@Wx;~5y(oa5(sZjq>Rcc$;VxMbSZyYG z3Qg;_5)62-2sGvt1Eom5mFBZr)hYtUd&?a= zb9UfPdS20p`U-lz9-2m`Q5Ls^*Q>%FvB1>o2ZMMZfdiXakH_Kgu{|COs&3s>%#PCvu@H;boj2(tf(P|BA6OXVU94u58H`-toF}bQ=Ga0|60X_i-wg>HE2ekUI zu^gx7y?L7?FD`PoGn}bjhg@QLVZR4KdMmB@LJ3)()qoa9SqSFAMK185_&sG4OQ9@0 z!eSKhkf30Pam#utX(3RLaO4IW{r27ID};G{W1Pog#sZM@E+>3ah8RZi)pK1n8!4z3JuPBYQG<4r=iag*K*}E@Uno_X((F};FK>Y=&ENHw{o?0~rk$gA~lP>9) zSSefBr7yUwFq3kc${TN&k8$pv(i5QTejLL^wN;G|agPG`*p35Qukq0XlM;+}n;NsO z;3j-q!cOL?jabl+u!HexD~zc$Aj8buWIXW(l&qxpsYU8kx0k#zleL!}7lOud{p9Vg z5kY4$9G^Aq=0og(5(wiwbl(hP5{PCbyar2yYw`nquS6@WSWtv(en6-MmssJs&n4Il zpEp~U+*J;|Nt{%sJx*Pd459Vxdf|aj%hQ5Z@4X_Pq;6g&!l2#CLqbukc`tP==2Xz? z9RRKx-;1j7iA-1&B7Gi(rXp)a7CI(73;ZBqZNT6~`94u5F*s$h^T(VLQ!|3(XMB<6 zuLp&u(83KcGNH&y*Fv!O)Jlv8M4t4PYyrJ|QDP5E^H#gc>vrJ`9b+u8$-Qo4Yt=x_ z{8lL)a*wDh8iF963(!4Cf-6pTqJf5C8i7%})HK}-j1HI92YQ?K^3^c{=)tW*!vvKB zf{wSLRMibW`e(UO-09Ac9T_ zol7S4xH~$?^)(h}son$b110Pc7L}8_W5s#JiZu*#T(CO=!$bg4DPbkUdXFwqcZ^1RC1)g_ahw(3^iGPmeQ;|mHxjOIH1y0M0NO1Y|N zqjm9G98-`u?Xi;dIAUql0u;e}7->&;6D%*xFksBAkyvF>k*`y9%24>CS@a~r1C&O= zBgT3xNak=cn|6Dmz~F43LxGuq$31xE$|JqN(IRNJj|rJy3HZ+LWa|S9wd6;Lgu6s! z&9+bMGN_F4vR*)lbx*u!#Z-;H8jd%y*FlUoLtHt~YDR=w-CaRVo1OLYA@u?Qq0V%m z$-Q%Z{FZd6*pAw)9eX1e+A#EW?@eUz>l^FPYgO5- zHdqBEe3Q?5Y)dbgQd1QCft2@nMGcRV%*xnwmk7`6W!2G5Z=5(pyyOinlDCOiJiD)! z(AyoH89+xFP$l|FQ&PN1`lR&H7o`YO0#Ve{S*Z<5x$o7oBWhA?q3;V=tjbC0gd{`H zs~&p%kQoqEf;}Ga>$W2$A;&tKB^7apP)&LUa?pYzO()$MM@`7Ip#w>4`pQ6Edzi3f z%8@2}2UI5AfB%d*m$6bw{RRZdlLvzR@T zr>LE6)%;YmSPp8i#4%6T9arBq8XWTtoR8b+6)L1Tge7}uEr<-D8h}l7r6Np%xA$XO zuAb~|J*cV&2!0ev7;G8iOoFiLS7-v}6`=f-fpvu$=ym4?vo{#o`VHxOaHN+CdmhLh z<$ak-4_u#7Lo_^l9d~2~N< zfOR1&!b1__XbTDL2^KMJ-lM1Z;s|!LCD;V$A-XC#%L{{h?JTq`T>FN{VIu_IJ%L)> zJ2&$keD}tk+|-v&b>TI!7GFtpQ}0O6km@`F3`qrSUJmkcZl#{_T-tnM%jx%+=v|@b zC#DQvn8tfNtI!cvlM1c~nEH@Ud6~se?t%qcAm#vcy@f6OwF-8IxdobJRzk~*bsCVy z1!{H(lm3Me9SK3Pa4Vb7C-Rx&(v%iDFfHKr1)!yJ!iq85wq4h1+Bu2K8N=HpqBlbG zthkOznOyOW7L!;wgi%6vmVl=zL1P#B7H`ahYsROz{THX^K(AS*!-esXn{bYiJsq|O z@PxKDx6hh<4TLflOA+362QE+jvs9&r8#JhkghxsB204beLlpP;E9tq!@O2nO^f_q6KT*p_03eILU z7rbxyt;nH!jhB}cg)M^~l{47g?5yo<5ZR9Am@kC?+!rT}ChcxG3ikt@1yI2>6lJi^ zF>&XeIngwcof#H*klg&zX+#uY{3ZuQ1|J~zke!p|gHw38fcBEsCnNnP&Lm(v<(?Va zN>+C?By5TY6V-*X-Mb8rSNRmRpEt^m_Tj}grE)21zLwP%SGw7Ix|GZrh@^3xvwl_q zWVBg=H~Dm;2g-fgLlE4zU~JTDtKOp_Y9@x0nJ3(>rUHG-Dlt#km~v8`Jgw)_>C6t> zS)U@f4Kms@lVfgIz!WuOa+qK|5>1cYasd#n=j;$hsnswZF9|_foU8k7=$oVIgnCg< zPVXd{6t7~Kk+QPV%;h@uNjg`V*y3}LhbMdCdN0jzIoMSmninzTqjNPXFBLhc_DARP znm~t6*WOf3lWI(=scNw83UoSkC(tX&*X)upgi3Eb-}pDD{>ACKXMw8+YPga^b2?>9Gz}}s>^1^uls5NwV27r!=5jbp4_dYlH#*N z%k4YGGMd_@X@XF6hrT7pxh&azUKoV#$O4NBk7i|gL~?xiIu*hQ3pA**Eovn}7tLZN445bBT2zs z@P!gTnKwC6I3>congjy$Jc;Rr-RT-*h(^}yrETOEX2;RTCpvhKhUH~$(JKOT0m6Xx z!9L+-wZ0LYuGV$B5Dd$~)1C+D9A(-)D-3<-3~w;D%YDi*S+4OZg#!!#Tua}afCKXI zBfiriMwTZldymmYq#QF}cN^ceL+kOOLB5%Jz=I=u*(buNys&BEVl-9n#XEgz6%DF6 z>U|`6Fi#m^nJi^0>h^2bPdEk5c#y>Q0VdOGM@X#KTQH@^k@&to4p}!B|`z z4;`O{GR76-y&6a*c!gY5~kx%}*)JxwY@{LY4^8=*ApZ#=hZ_ny+B)Ai@PLG+yiwmLsG?t>-e4?X`rFcNN2p+N>hh~gx~>Q+dFp&+ z>_en{KrpXDp$kXL`>{8baK`nf(RbBJZ178OgPY-6>i^ZM&ia) zWX`^N;I8(ab5Z zjYR;{6ETPMEm28oES*8VlPAH#P*i~m{Xi@qt-jZadO9`dqq`XKwAZ)RtE{RKeT-n+ z{hmC%9E2;S=YVYl1NKyqU5%b|O$WRIwZkHFVQOk7rP_FmEB=U(>6lGmP<1XN_h1nx&Wy+o6bl%9rzJtmI6 zT=Z3zmBDtqTzfk&Cq5hn-9{Vr<1vOip<}$0AoPAD?R$D=6a-^OvW^Yg}8{-QV}D{ zwCuzXnU_V?{$4#am(Uh!9G0J+~;pk$C**wGO>aC z?W=QN$A`U&EPdr+Z$fdM9~A*zO5R(DQIg9XC#lAmjFQ$S7@(|4x^D53ns+N;bp%NI z?t$=g3_)n5YI;jE#b<~EZt1#HSWo2Pd-{@QBeXmaNEX@PDY!9U_q|SYl)Bd(f1#>t zL{>u zc0RMUhfg^knc7OdTn5c)Q1T}nqkMI5H?PJ5l$l^>ggKBe+2V~CV_1uGZM5pkuwcKOLBq4B#1d9(yvyrY#*sJ?#wfbbe6!4WnM?< zCU%2js$K&*PJa^^}wbGKltV2uDn z?GN~ACN3LePFotk0C~W&`{cP3bCOeh(&5oxon2KFmI>Q^J6)`IkwWRdYrguK=i#9Z7EdLV5c6l$cN8L+ko1Xv1pGr!tI2^ zK2bILLe*^7fh+D&NESUM3ooOB#G-i=IKAcg!b6{kKe)&B>XD10JUj=BvPj(s2LnL? zS7o4^M$-oIuzeD#U6_^yYR}&mhoMv2O70mvU+&Ps_lnFZ&=_rBpgbvAEWu+N-?8Ok zAh`{ncB^PqB1NvdHwYA~zRzgZFdUwXbqax6&ty)BhhP_hdbj??s?`%0DdSPIES;LF z+8sZyomb#0C9Jq)u2x6cYW7ssU38nCiMbmZ_ips1JnZMdYR~SLvm(8T;k1=BSX$vOLIgo);70zU8JaZ^INt17Kp7mAk3Mp38|4 zlY)0_G-;`+KZ;j){?M@%YRrVA#q_AQe1+?6OD)}Id(UI&L*Rnsn7Cr+lIIj!5BI@} zoTW42%A!`FYY_6#+3~cxJza=93P(Y|^LNA}xN>KX!b7&&vqyC=74{g^`q}TotG$N}&EXKHW1Rv^{Jlsu zt56{HGhB;zWMR4aI2crroB~wlS(V79g9@-v~{GPjJjDxE)Ogp{x1}R;h<=lpzzFoTXb=R3w0hyv<1T5)c;Y zmAb3Um4V`UH^6Dsx~^zG(|l$d!t4siT*9x|+f%ePNrW}i#9mWORz?|X2JbrMdrksu z4H&yQv6dDi@RUwDvIqCb=DcRJfI=k`8hX_p62G?~h=q|dgl{j5VAVYxx~8oXTL2yz zpqkcjW?87oxtwVki{D!vqt{9fROmqmO~r!Gktrwk9>tj9s@toD@Gfrw>}QYs>Fxxo zltvCiuB0+eIJ8wezf03nkSldJniYC2fxQq%7^Yclcgkn>O2)Q?pK3M0yZMpN(p<=U zHJClO?fgOn)RDl2-?9c7C*Dk<4kJkdZXlS8AV=;@*%DItjQ4}c!p0f_J(_0*Ohd$CK30Gfm zUUEx3^C`LAoL$|=Gyp8&W^v$Kk@T<47Db>9?eonjQtU6C~*VpDrSbO&5Mk=uYL8VMQ zU8kn@v2#Uq9VWk*l`V@W$Co6V@3#25Tp+9LR2bC|U5wKn^cHT)aK1gU$QDP99-!GP z550o*1`C_WN7|UFp<9@X0$a4bQ5;5ctxYa@K1l(-JMl^jP>{-D;nlS9Mhdy}%O0w@ z*?L0spc-r9;DWcLn0CaxbhW2k_VeGn#Lx1BTxEq-5_r@ zdjhH7yHvw<+q-4p(UImBx<74O?`X9$)k-*t%~Nt6&^w?6>sJW(#xR#O{4Sx%>SL7Xj zvPC;MTu+s8-puUKIH@;60FZMzvOWn7J|2ie)7=t*GNo9}c)C!%&9${dp5m$RY|4=3wM?v_2IO{)MOj3Y5XY3$RwObR3CXJ<;#9SsGr7N^e3 z#}D5#ah8e_;(E?>2{R}P^+{{!7p=gIun)m0*~GaR^g!^1r)I73@}xwSp37=+tS6ow zbpp&)3M)1BP~EJT@)hA2H26EjhhCy7H1Nu?vt_p_wjS01n3tlcxlX>(@qQ1&Tbiv( zEn9gC8CCLBidmS)L1$Ku-A10hwhIetir7%svK1#>YhPmVhf)z*qGl7MlPy;7h$h`$ zDgC8F5eJV=XvJj&e^6^7KY*lF4+qB7u$^PVYL6C}yFWP)P3X}Uo1YuPjo z)`7QAN>$@M&17qFiLE=Q_g=#@#+rh7@J#wKnH&{$Zu#pQ@=%#XI)zl$U7=@W_GW<< zU%r`LQeA?DY+x%0QdlUBBM1J5%9R`(0U*)1^{i)ysT#&-%i^^pRW0qwaXEC(U}Pqx59#etDQc z;LBrYGU?VQ{mvfsgF=xd5}~2mrclcRAoVwguBX-uM59?R60Sm*JgHF6cPF`YDOQ8=wbDyw9r(nQMKV{ zHxKrjn-!W4gz=D~h!)*hBVW_7 z#u1l1Ab`8vp>EmnxhfYhiz(@oe`W7(fkm=yIA^r;#c zVWxOxN+Oot`WLPU{DrHR<3{Do0_h=7NCQTg0ddFTo3fN1!i$Y-4je;8<*Mvy1BG!X zMHxoq=eY@@{TO;K%*_3C(FW0HMS#>rE*=@M(clew#o9VjxlJq4@p9 zE5pMwYfAU5$cOpFRH$SdonCA*-JaA7K+&-jNNgM@O(6;^)TnlFBPoDX!}E78di^ZR zn|)=Fmdr%lLl5I-1)ev&49RCockFYyJjz@F>@3rjh%Df|TJ}({ON{9rK9UC>OxF*` zj#W;ZM_p^f+-IL%iKTY6T6e!V2!EJeSU3&{B)s_W&JqM39z0xqCVjQT40SN7XF!-i zmO~shug#Yvbh>%@uEht=-ox6F>LZSJ2=Ygb)&|%rHHp*F0v>Fn1+0oESA1JyFfz~V z?R!Jh6Jm{qJNj@rThfoIo|B(>B|Opx5<27Mtdw*lD?mjuiN-b#7dm-&Pj$^LA45VO zzufW(8Br;$@v-wMzT`GNIcK8PCWiN--`x7};9HzBmT^L=zNJT*(t=UjeaiJ5ToA(K zE!_$rUu>?L;^-*ACP*LT>hJ?-Q-e!%j`v=by(oTZz2XtReE#@BYzT;p?f}7-6Nnhj zyRLf~^H7u1OZVjqYaoCy>kL^J&fF&{;@IxY9;%3R*MQ`wX(h851{mWHS~14J;g!)m z^fA8#wW-cgRu{YRPAR>s9$VNqK&69k6R4se>jD5YvHNxE`nvkfu6f`kQbC9-6k=Hm zOR&m7l%~~lpQzKFf{I|*^bU^cB7WHId-&X-3ME7fjX`D**h)vZ+jwAjr{A15JD(-5ugdRn;USaAYlvlt)e5Q5k#?hqH%k*ZBHHB>OUEH>G$CM35PW|hMdfvik2>n*CzHPIOntHj+)NEF4y<29@v@LrQBw-Ye-Wbn}!jPc$)>k zD;CjyN)uh_bqFuF0v=bUZ+cxt^ZBYV^}+}>$EI%>-+Q4*>kl^8GMWNZ{sQ)438rz` z3o7KLo8Tq(wJS3Qd3vSm-YMV%7@*qPbN(uiQ0aIEO=@h-AL(JrXwA!*C~@Ov;|5ox zoqq9Tm$*jxCFIN66(wzc8&TKxHufy_eiq1v*H* zS;K`xu80cSxxjmdYxQj1v8fR}7M#?3Sr&#Q^VInjOQyh4UDx?71zwS{lo@o5iFZM| zwqNoKk2mp3R1FZ*x9+iA3P>C%O&SfX^kcNDd97J@LA<5a#-}q-59-*R9nsdDAUiy0 z8-1>@L`{*D?4n6r9@C%-qYoi=+9fCEs36WOb$QW8Q*_!dxe-LnMbH+6JhiU?w1gdg zxP}!x*E2!(FgI>V&X*6y$CJ!bQ^F7iD&1gC=qMpjbEm)!LSGX|9Y<>t32p{0T@+_j z%!OC238hSv_==?0bKyD+w|5xe8&dLYmJJ?8?^E`wl!WJl*lxvFG1sx(iM8_r8;0FU z3W)I06zSy~v{49`kpV?~@-DRCHJA9qtEh%%#KC}g@k-bpmqelLk|u@_KW}#(#g@w) z#MoLF)64)q^sB{a;m8_;11hWypU1Bkz^O|CWJ)kK5wc_j(qotTo&aTGj8X&Zv+R-P zm^D91fl%XI-`oJzcsGs+3u6MOC`tqF3*|PG7kX_by#a7kiHxp8D+a7Iw9jNHz+Xz0zB@sfDM_B2GJMOYgXi z#bvStVh+onB8alEC{t14HtSn-V85ovX01h;@I->Q=RFI|(>(5KN<`JH^ilE_TLU{N zSU0fF3+!R8sX$qv&;<+_(RputW)VtC;hFjKu-7=juZecvll_y}Zo8pO9rh=DyHdN}vWp`eKrcjA!~jU$GEK#a924?*2XnDDE2r#RaCO1m$Jx3lDCVxVBQ zfSFfmLZC}i-Rsz(<;8MDjtYx@em!++%=~beIuAvFd_#K?LeE9lLB{w=7ETJ0fivPavHnFYq1pi1j9SwA>G5hQTez`<9Lll1@%gFptCKoUL5@H#5G|K48ua#5j z9N*iD<{EjUoz{;{fIV+B=Emu2Rut0VQ4G8+IUc1a zzAE!{a~Xo&VI5(BNGQ^SB|B=JL^|8RuldPCNHaz!A+gFDgLSU;1c>NVHQ~l5Z2A}u z`8hosYa;81Pu@H?U$sORb@f)2KHyl?%c4t7dZ-}JkoxU9xG4p}wIkQN_w>#?oVAM| z6S4RizwCbD7eu?srcSSWBeRlyP@+X@diCkh!$Uugjxw;|tHY`0GaR=HwCxzo5n)MY zLkV(!qwfOy@@$nI^oVrb<@xGu_=o1<9Sq;79i%zZk6=NT$=)F#sacQqy_=@a=BLGQVZ3%Rd9VP zZYZa@PHHuO!SneQh@@T;)gj|0%j%pfg~Y|NWnj%3N?xi!gxEU-rT5<54nH^O?aO3( zH$;f%^gL=D4i4TczC;RTW_Q*V$)!rx{2@WqL_Q{o^*zTv{hVXD>=LT6nKw8J#}sWI z78^m=TBlg+B_BiZ{Hm(9&ttolPeXkfl;;fQ{AsccwBdVCOMtwd#Dk&Ovb2koyvUJ0 zW;B+{?Wh9e-o2A#tdz!HaJj8;sI2I-I70D-w`Z$wDf;=Cqo(ul87`c#mt8+paLIk1 zpl}!Ev;Fh}b7HF@rL^DkuJTirn8%dFPrdIiWY>^k~30iWcy2CKEgZE}f($ z&8lR{_R+3w;Q?*JJob`jw0jE4lFsEkERC)qWN>{Ac5gdm0g~=E09QJ8-{dF&*~^2m zcej0c$k?y+-Micj(DwpuNx{W>uz3f=uZB(DIwcd5-_Kj+l4MBLMWt7c4qWpNO^GDD z?!|SAjkgahEp4+T@8L@-Eyx~Sb@NK8+#L(uSC7M=OOCC(m~MhEvTJy)v(y`7f6uV^ zT}C|_b4dX5O6P+18%HFHC({tFv7j<+6GCqDepKstb$Xo)Pv@(h!pt$)d?ne#-qJv~ z^Pt#38-b@$Vly-h*N>Ul1+dDTW|{y+Lx~zMd_CSd!5{}JUo@*UV@CL0iDF-m)!RpQ z=Ao!hF=!OUCmqtlP2Ow3rkUpuNR8Qt{2r9c#_I(+qUSl#m>>~S`Y@}buht+tMP(zg zXP9{2`ONK1!@PMYOCnd{Tc4yL@)gVN!vHOCZXRh~c#HS%#2@aS@!96~#3- zzwO5)hcw;YaG4=RtLNM6%_xe*9dmV;W=g!0oFVcKQB9n=UmmQZ_FnhfO@1)e_Mi`+ z+-W{uVOoJBxvr^u34-^c3&@Y4eDqw1uxxr3U~E~+TqyLpfM?Nam5;laP^Weni>#t3 zH{E5cuXs?+&Q;EJy90U#l|(q0_}eJ29Hj>u?6*So!us5YV3@(f!DOEr-$1a&;7wK0 z6EMLeT=rVyqXJ$_WGB}fk_?EDk_<7_QOgq5GJnCwI_dmK%U|EI(>cvWdDq;Vpa>c` z5Hgu(dGC6-mk=}9VRDS6w5!=XIshe?-eA8Z8BpuUI9o-`0VyBmV8~ITX153AWFF}+ z^5PBH6C>%`jcKNb>t?TXavq{N$OGgbggTtIt&fO=86NY$w;njym+Lb_ZUiJ5~f+xYBvqT!!XC z(NKn+*+b>UVtWa#hTGkn%F}B1QeG0SU|Dcpvpme@WAyMPfp$=$MS4YcNxo$VHe7tG zZ%kbEYQ`Rva0Z2G*a_nmQ-E_0ziEx`wb$BcB(K>q0Vv0I5pTgvgp;dSvXeK)%iGr( z%vBo@%Q*T?E_yyufypglG0=az>y6@8Aj?NSZ3swvqK%^19<*mG) z(gz}>IMz{AFKPm^`;icI)zl<4-?)oT^)A}OM@edd;gjbNz%oi&P!MxDOtmd2DTLs; zGxOHol9TAwd&8u_FS^oR23za`w7$Qn6*x8Tr7&pcO~k3mdlB*^HE1q`)E+~=U_~{B zN~g+kl?#G+G`%VVhftD-^~}$-YC(FueNk78V1?|FUfnZEIqr?ZL6%5HMBa5jelatk ztN;wO^vs`FSaGe|UTkAV~a(UR04A05A z(YPLmxeFA~vWz)G^KB9FzFBCGUX22s+l^kzTF)zjSPBfzE^Q&*Yz`R75cH9>#m2Ng zd)z@{F{pIpN?Qv?v^2f)=5C!+C|RPSaUj(!84C_GSh6H+Yh0M~rZ6w88}|X(Ho46y zZQ?p^U>`q{A)uKorG;39;LCbsJ$*}+dZy5HXJZkR?@CP??98U5ax_4+YF-K?B=MYb z+RN(K`x*})uC2D|!4-j>j!p@;50W3Xml`|)1sW?zA(Y2L-9fv2=yEIu46?#qA$_gQan^|2=hD^*=)?E$0G5$Z87Us6zh}XD1cr|``9XmZ;O>}n z(^XywXoy|K*`ja7O@ST=GDdaVsAW&Z%Q$nPm?(M7xv%-U@0vlMfU_-;*AjHRA~Jvpo2SVTWrU#ANclf~AqC_h!$nQeL{F1((C#?zu?K6CmAX$7~byu5} zut_o-7jk*;G@s-Q(M!b{V3v4F(cq-;oUM+^4lEf_rm@}f;LL?Z-xm5^`V#L#EJfJ! zyWzvN0F3M9WpOIdw~hCdnF?Ia>8>J%A&O6f(ziA;`$#+yrKdRaleG~M%`8pFXR@P-hEHHUIvbaFVBmYwXnejTw(_x6JYI-O2*n+3`C; zR<0QG-txkzdwlR3I|irC(r?+M9tR1oWmliP! ziqz!`Hhol#+r_(4EvwO0#=0a(;W{trwH=FGtsHt_8v@qN%e3b$?Oct)CT~Iw%kS;3 zdV^pJS@46@`a_2~`(wO50Q?`fdEm6CbYLhKx8uhkE*t-ahkl;2|MxzUWQ8;AyH z&*amQ#PX2LMBPqB;C51XdDLFFXaObDal9?JA(cDN7Khww4S^pXuB`7ONHx>WvoS7Q zk#MfVhPc(lOMH8;LN7OWG$}Cz5()pFZ!taOskm_(96ew+FMN+H6JpQ96BO(o@neKN z;6hI4fW~-d0Uk>iBjyFFbwy2<$BRKxQMN4-EMo~^LFXzE zxMu*prR0Lh0@03YbB#pxUeB8|#e0@d?4`*`qh``Gtw#n)Dnzb$gi+;Bqx(;SXLAIA5YECWm4G`pMR9KdeQUgJ3!Ci<2bGY@B(A_u48hK|q6)PoMfO z&#NWQ8SRxa*hfBh3l99mtzQtn8P!w|xXTgOi^A6jKFjQn$$Wt)=>lg#NNgdmnM*38 z?ohh*Z1;h(lL61dl|#kLS(Rr`yhvz3p!V1+l2|)UMyYp$fQ%}d3-sp2P5R{UV7~&) zZW#fih+&=?cszs-w0POEKxu6~1#rsOX2D7F3Tqme`U#_ME9ZMbHxiKlD_bn0G+6GmjM`3azckqKf@%IQhmG*nS_3s5)so=X-tOtFS;=^O>aKVEt9uUxn z<4`z9s}AndiohXJ2_ybSTsFi!3vwI!o;Eu_*%I1`EUxOcl&n>Y4$GyfeX`z-!&0C# z`T7`LRa&fTm#d2Hk~`1*cvnSlo1WeBZdG0q=;9vLz%J(+*1$&5318r3&f#s{7(9B8 zqfg7;o32-9dWF37@(xdZ=S}sQbW}v^k^Vj7UMo^O$@CX!QA4Wsgg7^=B`#HS_zVL< zoph8&bQcIPREAZ;&4L`<1}uVoxZd4rmq-;Mtphn&LdEo2YbSMr^%Dzz9{4LU%_Omx z-|xhFy}q*y+o}+grwd3H*zy*k008!0`5Xo~<=Z@IXec3e)Kaao0`JmNA+Uk5cIHQU z2q{8yOmbax87fA;M-|3b5}>kMg&>A8B#%>^pQzv%zYe^_kVhhCW_@}=^Bzl9-`Y`P}Ux60xYcL)(?y%}5c~E8kUJeU}gfG!Nf1^=wLx_d4jI z#HipXss$z{)okFVJbSvS#R*}xYjCg^{AP$1o%eDR&?{z_nlGo~t!2DrAbTb8T-j0+ zVIC$t;n@aHc=4=bCEM_Xrr-ol2UJm`;jQ~)(>+1juBAbU5kN_-Q-w`u;-{dH-T7#p zRgu4J?yhW{0et6nEhi3}&_$YKMY{Ewra)exOo2M6z_y|h)#^wZv>+o65sAxL~>lM}s z>L~3xRb~VQ!X9^{m%+Vr4JXsrk zuLJOTt~_|}(UiRg3wm5^EPg$EpY?Y?*#q)KLNy~*Es<+p7 zy1I(#M;32`RV5Q2i?Td&YMZj{K5L;GsUA#AAGmEu8I5VfEA#a`@U|Rdf`(3^c33i= zs8)LNaG%L7BR?$$rM)&nVHK07FR%iKUvJx96h;Lg`YQuh-K1tl1;TtU%E^YFl0Jtq z%Jh7rTEmMNMMJ`yOnO)h-J(1N3bKG=_5y4sl-bY_*0vr$Y3;TUtkoPQuw!5sjdx<( z#W-rl=?u#uoj5(Uc!+e0=ZjAciRB6cBxZ6MNWhiaqV`(&Q3;fnC-;H_=R-0LtGV|c z&g+~_9F{mySvbnZ_vkSyoFk&C&cpO*|(*m-`aQJm3N8sou^oK4THD z#S)i+*U}=B9sCsM$aT2pMO&A{vz=F@R$%>Dm6SOKx}{)-`iSssxgT>R*G7cP(8#LQ z9Rgv|MJ<^UF*32}3Pc5 ztjOf0f)?cIAh-pdJ|uLN(gEdOq-EBIGK_T5LS>ugN_>heAQSa6kkRDXLLtI{s|h@k zY)=g3@_brZft88KIK8ffYKgl%nShg8Z`)1sLZ6a)PlU{b!(1twHdNP)^8;4O6s@aG z4SX=ePZ^fpO!GvsMhaodd(#=~iDCGlUxvVY@ixTMtfww6-gRmalD`h!ecrYk)OKKw zxuMkR2?ofct@QG}=ZT3ys6mqaC?gElPM>N{8Q@;?Fas~p91C5fR^;}gXDYU6XOTED z#PS+rK`&bCojdM3fjD`3e(Ff-YnYq*Oik@%IOZO`XcaTTh*EFo=P@{(Oqz`_Ou%}h zxeGMBaO{<|*W2e0A6QU?I^ZgoqlFQ2(oRYdg^EDp+vjVO_w?c@tvniMAVVZzsmvIq zUp}eWEsxrW_udo15%MRuPbYBb2;`LX8eTVFJlExMl+*G;@{Gop$M9U`dET(23hcS- zyqU5BGa{j~w{gs4PShR2BDaX%ItcJ?iSGfIztuKn3?V!0ZCU9QI}1b+9u$-W(iM9X z!*z0J@{~<-kZa00p!dmJ+_^4zKGozH!#np#M*FeFQNIFOwQqL)i`{vv(iPN~7OHfb z$MHr(6Ri6lW#)A69q8IypE$jMAc$2pl+g`yb8n8l3UDhl*uIzPIy5|(pppxF?LmA` zWz@7@S1i?bpx=ZJ6u;T^XJj22uTHd+idG`0(nUjsH5~=it}V^sg;ER$`=Hob!8qw) zov$N3t1Nqj59ub8M+lAi!hK3_4p=w_ZaQDQ(HCtLd4{^tUX|>B!Fwn#?~KFy=^NlX zuPqGR7wkf1al-1jKJX5>9#+FM1gh)dwK0f%Zt2lBgT&W>K_#wdIc6gLG<{^QOJ`g} z4JjNz_nyU2R}E6PGzhvHTE1|R;KR7$Y^7Fvy{+?%5t$6OAFVe8Yd?6Lo88yX%UH(5 zEzp@Fq;?7-+03g)fHM^PW*>nYd5I0NW>rT_>!YL>nkn@-Z20hkuc!l35>Ifh?=mYD zp^dEVd_5^Kt9$gxu*PTS9zl{EWehH`xqFT7VzobZ<41Qn50l zY$xqgrB0jczI~2jhf+_~i3m@n^is$VsR;y`E;&s1V9%`sbu~%KF zYt3wr@)_Lh%j0Pl8laz4wQ%yn5odO(^D-vSgizzQopzh=3B;pk2RqoQy-JBhnAdIq z1O=UhG4NKhZ;vEsP6G8d!%%pY*d?FuCC?GOT?a2D4Fcs>h@;qq)=&_QMIa=~Hr|Mg zpnKMs5RegMvsn@T;4K@wEd_!pq>cioy*3|J!k04CKDAu)Ghu5 zSY9a6*cqS@JHJ&IS6 z&6T+}C#`D)xM*i+S)z;7Ic!?Xq2M_*0uN$s*oVev`BFUl^=;nnvNvSxwmXOK+iSrS zo(C{aJ~Q1F0|hz*gh|mST~Z{Vfl^zZh!I|1?z(VMmJS;>foTIs1{Z0D_7}awjL(OJ zF>ZRRjNd~ZQk$1W{-Wmr?G)%N>}^T;v%?tcuJyzt3L<`X&07*!MQ@iR51<;W-nAaT z5#o_@X3I;q2~}n|B^Z`|y;kgW-DB#@Pjh#rNjCbp-;E;&M=2IV3U|7ilT=cQtoBkKv}{z* zM(w()lV&}~@AnE2fsr1owo={)WVHg z)8<)t=_xQetXm*94OhUv$yd9)l9(zFAG%1wo1*J}aT-3zLw*ze6dj$^mWZ-C-M8S8 z9;?wbK9D>xS|}4Wme5pG2AqqOH_RjNQYg}zyw^t3u*2NJNJcQl6IAdiJf41IpodS{ zQ$34Sf5{jbxP&|A!THs%nd>FKA$!iAuS&$Lx+LNO7n& zsj)7Pt6Ommr-3S4^MQw3`w$dV#Li#3T_G5h`U{R&o~N)hPZf*>+6b~djZQx4CH|sU zn+p;QL+q^>v8&>S-_|}-F}z6ZbbRylxjf7zFLs5a*DqgsHZ}>s<5Y#EF3D26{(=@zcG%`i8Htw1K zi7&hzdl9SGOq{392W}|E5S7C8tgpJc$dV?^5fA(F{MHlKsEj%q`WNi+n3z|r?6#9H zo9#_Mk|j2~8{~U7$+`MGsgM`pEoH~j1*g|?6!{b*^U|KD+GC_5gJ zOd|YJFc7NimC!~(%8^v20pyzna~iHs`F*|Gdb!9;_&oYnXK8~&7?&H9!Q*q1p><(+HmbyWr0*gsdN(1XG%o3qLZw8 z>BeASho;|AhL>dTbbd#e2V2-yNKZ> ztfYe=cOXUa9(dZM#|uA!vZn)#`0n|>Er(Zmpa&d7V2h>uIxz4>rDpJ6m~x=tBP&vl zvs~&pH}j17HL^No7Zj4UYLoLl#*xbD z>*x51@1rk#1-V4y1xCg&% zu{!|KfFp`Lro4BhJ@V8j%dVtezUo@GTD*BlWZt=+a--PD@14yPP?H%#hi9yv(<#RI z-~b5#h+7{+7Iage@ew^%jeEQS5PP&0?DR&d9x~W*CF^10ZZj(5I**x=mBbord8%RU z#nS_+bpn&P{EJ^{a)eha)a2r^t>v5Yw%9Z>1N+L;PuwJx0BiZ+T}NVP`ifH`-nDkz za96&QeMcx9g4}h^2BJQfw7o=(j^!###ODXrnVjH()2Xn#W8F9OQpp~AP~zUB5foIZ zZg!Ul%Nh5R7(n8Zx5#RC-h6jqu@T{Ex|RhOI4dafVt@@ngrK_$SXQhs;y(T zbUVg#eH*uUWpv5)4#)R}*udQ6-#f+an%Q}jH*4caEb5zXBu&+r{CzivlutuL= zlW7+K0EF~0W97Rf_*-q?nbx#V5w2>Z**7?FJ% zq9S+e7DINYeS7f8O?lkTTu-Zwdl-tn1!|DVt5-5Zlg&1P%3#pmdv|Dhv<9VHRQ1a*wKo@SfwLlM(Xqh_NPvh=cNQtg!P4Gt(I(*OAPy20D%qvr?A5ig}nuF?9tx z35s%c%i$UmKA-$DeDUi~A|~F6D6SLM045tf*J`@sy&BFPg%KxQ=gc)82ScE6gp(!L z^Fvd?90(sE@QSc?2j0_b8_j=yev z6qFBTN<`iRX(gY5KxCD$zW|0ze2TiU3(fuHmMRDlz15JVoW)nsf*-CU%cN5z?TwWC z!vZyiVE-l5RmyubG~}wNusIj63y_7OoayB1Z8A)sHR>IO6pFd)qd4Nq^DUX58UffF1 zx;{g&?p~}nv|J6(GE2DL`P_ zlHxAgGhx}XXK@En${bJ6x5G>}4$%`sd~I3Xnn0L%IUhE7rDL2rnF{X?vEXSEF`Z*( zy^P$sG~{P1+2mf89RgBd`+%ya5aOm|sZ1U}%2ol&cX_cbTsMimFG3Ung$UvG2rT5!0L*Ri$d0HZ*n-ME9AKgWy5BR0J6em zm#V%-eDx0H*5@2lJL+Cobk&>c59F_$bHt-l#8jPD?wkke{hnU{ABhl^4&4RuTJ=Eq|*jcAqcm6i(r312y zzyo^-Au#Ybm0UYs;`Vw_tUi~PeO0>YX$@C8`X22gSi^4KVx^>1TB=*{gYKMot5`cF zGf?K5MxLdr2eu%lgB~4J7gq$+$&pR-LUh=k%hF#2FIi!~O0sG8BdAc2=DS-sMYs!y zVy7W|{kS*|t_wJW?4YoDs5MJFs#@2kH1up4FVRf~JXMx(fF;mixjIDFRzfoY+fN9| z*2UFrx^!We;b8Yjx0!&Eiys?>IWN|k3C$3!sp@ZS2=8dxC> zGl8$NCNtg|69sJCnLlGLI8n0R#R{C|hjY_oxzjzhz`Hp*t$VT20{HOuS*d~ToF@?0 zo2V{0rL6bx__afE+?$acw*KWVsaR4FZu^PX5Mt5QftF5hIpxEE8ptAV>%6{7=0^zB z`9O^T<-JiJ=IQm+M@_)p+3H2?2RA*!weL7Sjk4{`ozo?+Ji`F>Y*8L z?P#4d`W_^bfL9etF;2tLuA~P9N>0(h>5}gd*shn#Q$Q)xmkNm)+7T?d#fqx<61MV{ zTLWF;Bwl2q#vZ41PpGXU*7+MU@^Y_|K1#W51p<85s=mudxVbuH5(mOrC=wUMxJ#wd z_cQ~{2-2o|RpjzYRvvH8qhw>rV5`*TA|Qqtyk__$SoDkFPSbgbBq8Z&>ZE7@>g;MF zZ1dJnpotJJ@&dGBtyiF!70~t8fW<1o>)sK zjcve7fUNUWbL+Xh+KJhyy$#XG_RP&^8CQlc5);!#+SnB&efEmNTw`lz3`bE?Ch2kL zATDB76_OT#UP79M?b2vA_OgoaWw*WHS&U3e^6r{_)`tW*o94w$hh+1@vSg>gQ$WtB4N6hXbHQ=(#hK#N zh%wGV&j62P07^rugvr_z#6(0~nLXmCx<^!{@5oFq()=aq>wPHx23+A@1*R&&L&{Eg zn@#<^QmLck5(*TO&%5DzIv0nLAHAbdj`WRl@GGFH3(`rtdD_a`-M|5{^uUK2o?3#& zjPlal+>WJWFb1M zV4GJ^9QK5D-K8QL_HAO15uaC|=->bhX1?-?Vz*FUv-cjT%!NV2pd57%EyHJRcSlNt@t!i7P&B|G(sb!>&Cf){2L}06@ zw9C(T^pe#Wyc2lYTiz46LL{g47{+|ER}b5v(5sG@ym!US!Yhn4$$(hy$z{r$B$=*Q zW=Urn=Ql}s7$fMauSPT^qVbRmYf*H^T{QCn^~sp3FO@alEMVS zt6FbFz{VWW>@hlwspy!gSyC$k!?fj!ItF_*41(6LQY{?7N`j~_zvp1(FNDvuT&C5S z5J*OO<3Z@)7*bi~*%bg9>H*DrMX1o!?$i%q4_}IDVbeU=^E2P`p<@*QLx{|Ycg9_@ zHdcGqApz@ouSl&u8fA)})*@uUuAcF0tG{ZZz^PWN*z5h1`= zi?-N~{YbkP?@snn5r<3C5)u{|1yBuLYcB|!X4lb>VIX-H_LGaZ=R^T1DohE?MIlMr zF3XkY<-+W!2bIkxZy2Pb7btHR>LHbEdk#{9)x1Ho<7&&>(}kd+*EY;@UW8K;9jWUW zd67387+E5)n5qn+HWWHlrb;Cvn&p&2S*O1fYq{IR!3nL4EehoBv4=|O zrC2V6IDn8>p0~5U8Ortw95#H*k`4garEYa`ZFOm;F#y-B6{cRQG@Yziu2DfJSC%eq zGn~@Bx2|%)5`L<;$nfG8Ud!CGco>Y28ShOgj!~?L`VG z=9D7=zls{b+bCY)sNL8H96tpQWLUCyhS4hO*%2}$o=Gv;(nc?G>2&gCLAA#Nz%EM= zibf_}DI?6N3wSuF+}eskSI?U$+J)G-D`zx_yFD3Dm8Q_%*mYb3kavKHu>)Gola*S(H6!r@EmH7j#S2#=?O$DKSGLFw3ad!3YnLH3_vny&2sIyo)Yxd!wl_(iw z3r~cQi5hr&#yx1biZb(@g(Ff}>ojsmUt>8-ft1widh`T)0k;q;&M8( zA)MF(N=!gq(pxP`*gR?*Mlb6eR`~fv?ZBi@i`ly(Z0@6ni>R+2jGCr;CGc5m-AZPX zgQZY9FAkE6Y+X^!G56yfXQg;!xUWD4t#KHEM%v8e9t^&73TThHbJxN_QhzL{H!r)L zy~2Hmn-5s)MJLqJp(VI!$)-7>!fc%+8AQ$VYdEcYgG;Vu`8Xx==D18Ga*N)B2^Q08 zggO}{w%tN17C~(yw^4fz4x(`zXi@%>>;MexF?riV%?{(K3157+W_WGKJ6LAXuhoo` z@@YzE4VJ~GeuM)Np;%q?`&blgBILKI)Xf^NVIZSFh-%)@atb~GHY5(~cc zhuIyR#6qUg{s>;tIZIx+wPV!)Wd-rcih&xy-ubMJSr319*3O+@Hh?COlC z^oo=>2wre5dlmOfsS3TT_Jfso50)rIbF!LD<;1Q`aglAi&P7e$o6;3%PdpEP2pPO{ z4m#tVny|>&aA_TvtF5^zhu-nhg<^H3L|&c9k<`*UWVln!vqP5KuB|uBmalnPn^1_m zMS4c*gfJ^8z0#;WEJB;3itJ%{Sz*WiMKM^9FB}HykV+bR`es)<7O)HeUn1~j;yx3q z$is(E?G8Ha^i8p{L#Z``Y@=>b5Uql$0eVH&1YR!?`&WVwRV4TRmT#-QQS?jrgcs~?+Pbff*dO3XI2j%k(5AUapWDioJZyo&8y8FWNLAF zsER_46Y$4})vEdnzVYBRuL;reR z(ky(vV>inniL)bdE(+@N6eBl-%wv5Y)~Zc49RuCW_S?4ta+Uc)t4rrvTyZ&UPfTwa zQ1*;pHIKy8^Aut$Q?-1AiYzZ`c&=_N?o|h@b}u-uXKF`IqctrW@bc1@TTJn2TB;Iu z80`x8iqk!Y*QRyvRw3t*hj5giBkOjM+AAJ1bCSbKL21{=(oGIGstKU^+}OQ( zBPqfvja3TMM-uM5(T{)^JQH@`ozBkK6FGSM9=#g| z*Tn#SQ+T#x7kPOd>SYh#ts02h>wfs$;>iUyX&y2RWma32>4oWbQR?Xo4y?*dgqc} zRXpFI6x)#3<&cG9q-Oe@+IVV8E_YM(n_~Z>n3shrnUHL{Lv)YN@5zjM7m-}qE2*}{ zZAlNWN{WY~Z;dbSUa39gBgSox<1|p*R|AAE*GVhuIUFAHWtJE0&5Yb&700vZ@)G8>hAd9!T5N6|?&y9bQH>I*U>C}Q31(2$`RP}R+2wtYv~sx`;1EHO-W;mQc- z{?Z|(x*s*c0h?Dvh9#0-%Z0GuG8_dZD{?#-5N;RD9>D0nzc5Da%m8X&CLXy;=6X4k zk00soozmzN-nSNXyf9v&Zg@QdJjuHyPs}lX}M@G+H(Uo zd1*Tj&xvaNEid>{zI!u5!!_p+FgXDUC>!XUkATA1!$wGsUh?J*rl^!DBaWB(oUrFm z?LL2SnfcaphNfoOf}ZBk9Fj9Ck(+Jtu7@P#g+35k!^b0x_^2G4r%Tx6d;2DKe@zeS>!CJRGY-?Pl6G-*3Cubff2 zMXkt%_L7wH3gS3cpnQtVcL-%~@!9<@e%p3${ zT_O?8uKX7FTAA9~&iFCTa`=y_a3O1(uC z;vKpi*Rpr-*sG-isqZ1VHitiuGVAuU5x&PS;<9mgeCeWwb2d2_ zk$SfqmM<$JPz_I%0z-hq9XO2LTrIVkwG_bZvLoT>x_w^}Z`d+DmWo$$HctbcV%~Z8 zaMe>rESGC~HvG#(}OP}_7&2b@qt^w!!Twx8jsE{pEdK2oM*WS2g z=gCvzC!5<{Lib$Js9ERkS3|62&=Cyd^ydsC=c)5V&IO$12h$2p=tX>0Ntd`(wSrY(7xGP+it#=gGHvV^dRiZ0b)m^BMh1wnCK zEqJk}$E=SG-BatasEj>7%pje7wWxi%HF{@f?l+ay5Q-@N2={T564>@)BGK9#xlog& z>!M{hyaslYtEs5pHEz{St>KLFtJEr5>OR)N#<`f*1~Xer45!yPBB|`wt!aNCYf<$V1Jk&gp-b6%XYRz^h z;PTb&d#=#-gJepWk*0!dj>bcWJkm`9pIOk`sXiLYCE#4$bSccpp{69Gn94W0dRa;u zv zO;}{BXKI$g7r3?WkY6d+0sv4$J3Z zm9& z+=A1Kpo_F7F~UXgu-g~bG;2=YB)+HMPna_c+b$)q%Zlv!*(%T6I+^>sgy&Jd=Axz@ z^g{bBHZCeInW;SQTP|G!m3LyG2akhd6M%xWp{{hIAG%mo?Fn{5@MUURM{7Nv2|}kO z%w!9C9pIM$igvMquv>Rj`!uw)YS?3p1i={&OqRGEPhSd|QF+Nd3T-=skm4PCbTZw0 z`Y4|rFvc_AITf5EH$aT!!!77g%(bAgDZFj>N~Bt3AyD@0*XCmn_<@wkyuwUnr)raC|eaY@Pdr#{@K4p?-ihaYC>$vTNC0@FVEmzSo z@giO~_G1h}3$w6!(Jl1mQMhfvm^7>aozNcM)tg303{!1FG$aCqOrv?5w{~-plj^7U@c4#AFy8)vSScG|ADA+qhNQ zA5^RN({sC6F$beuwNz+&sabG93!fk(ea^=^(p1{DRxrAJBG_QFIyVaDE=aO5U5Td*&1n++ec8N zZjif!_G%iTfJq+~F7)lxqgjby+j_!4SDS@mpj55vvCx(4UagJ+C3uI&vP0t~u~!NW&oY)K{9YbLbdfJ_YQ5TuS%);B z_A+RAQIE<-UwcsM6qepf#*#i?f6U95&_^f)Pn+k6SE9jV-wp1k<_aMsOnhe%Uem;9;z*SFyfh%p)ssU!og)unvy+qKY zN|ShS4^F|NrT|5KS@}t));aX%y@PS{6on2A(nmn)n;`&Y_%u4i?b*0gzq;anER<~B zk(E3eVvrk2%+c88+{t!C%7#quAWK+KS-k1(!#OyIi+o$p3f6eChKsBQXG%Z?I_L&4 zCujKuvm^))>$_K=zHlDU?@e4(aX(CyU;zh1>u%^NLyRcvQ?R@_*-w%QzDVwKQ7Wt* z4-Xo(naq2$Tja#bq8mn!bgHMM?_H5cb^@;Yq>O}z?n_0LCthG!4|p`|4WI{etpLZ! z!cvuEWS?3pfFdGzteP4=8lxGP+L^ARQRdJo=ne{zEN-R8hr3C9G=l8!XbECjJnBL) zE?cm8Kye)qNmfD5Azqb6ST*V!5tPtd&a%E-zlsbv4F`7Ug(iv{R1=1%G504tc@LEs z`WEuyZT*EZ#i2+HX1|q_(R=DbtCyg;Mo{tM-Y)VT)f4WhU`##*fB0@tg^XR7r={W1 z0GU&mg>^|_Io!j5Z8*FHKerT$Euti1Tf+mn;=&z9vRcAed*xzukxTeQ$D3HLUJlQ` zm&Q+?&bZK#8A-@c=g7JQc_~KtgH7|%0U})+>gy+Yj9VoEgtClEfGIu^kj{=I#Fm); zF!aHc-q^_<_Yp^2@L>Zm=5?5IQ_WGqWoKBujPnz4R06XrLgC08fskxC8e;~P3aIE# zb5sVxS1$;PoLh5s!_A&*Hfu;>RNJiky;a0_s~ouI4~l!vc;2GH*bA5_V0b~Vy_5~N zWc0<70nh8FaJ1S$6L zBm-ONuu=p>1A8VTd2oBkX1A#{&tDVS(iHbdl*_WDW3ni%3F9<5^3e}UAn){qjYfX1 zsqBx{!>4AS(M~i(G9=~cLbfp0LRMut7tLtWO?GTnmABi8&xBN^O@%VNiW(y0r^YYL z1TiLUtGt@^Dk%lpgoXFH_A`!wr-7#ivIaSZ3L#w&9%Zlpy@wO^UUTvz zHGhMhxmbK)ps;sgzHF>@(6$eH6oH{ew#itF!u4(0A-t_gMQ*0Rbdb{Ic0#5HqMRUN z)rLR|HE)NWVUW-nKN;mAgk8$K7MI$9cgeUI>Md|1BTp?z=PW_?B~u^Rs_0$Nti6J# zeO?$I-q7_}#2Ol@X}p{_5FPVat=c4G@Gq9z-uPvf3>1$l)Cf2&bHC0GTSaBBg#<1{ zHR9FNQZaQ_RUO|Qqd}5cg0I=7^z+#J~y~B8kK>Ktp!h)vYZ6rfm zM0Ey|Qm`+Yf`H1^>GLawM6qJ&vp`IfH!uV;=WM7chDfzp%%=o+XOcM!l~*ai#?rU+ zmRDib_C-7;${l-&Gn%YY!}4ZcOQj9<+&i2X&otGV-ipXc_K{RCuE;$w5~|2-YRDJu zE^W(v1l;9>4f8CCW?vfKOALrYEJMd;s*N(wgn>k1=6eqSOoXei8^&+kM)~1YyteM= zrj{b}V4kpuLd|oS^5%w}j2Ch0=fcWNs*PnKUSzN)Wr2?~0oSGvS8qxua%F(J*L!{0 zflW3Pt(z$W8+;;9R}io~Tl{fOQLBdAF{LS|A=hrU`aBUkHnENkD@!lF8yt z)%hhvE7>GSE?->7>;^@H4nQz!Lr&v_TaS_?L3Sl~yPeS~f~)mQ%OKB}@GR7H{g%WK zEQ?MW9@G#!>>G4_qO^De7C;W~y`31Dk(B;q`RNzSFg_5ugK-G>LZesKwt~)I5=5?P z?ZY-igVw3bu-zlqog^rKPy8fe z2$-u5#NTE4*e`OokP?di=t0Y#uP~FEz~)gmFU7o?foE%v$MrSUgn1M<@qg&9w?XCR>^`4b&21kN0XH(w59D_Zc|w6^UodmD7k1IGSbav55lO-N|1tbAlZ0 zVdlskKTxLFhQk16FuPK&TbyozGwyf5vXRkmmh+W_zT7jz0uDqhD^`XjD6DkevAo?! zB8J)b*!CgbdyT_{M>whvRGKGeYS3I-CL2bCd#5m2^3jqb%Z4Av?2=2pH;dfw=^%u_ zc0UN!vRGGqcUxsZ*A0-^Oae2{C9C9FlJf?F4{GUwA1}f?YS~z6x9AcGk;f>wuOlDo zfKIQxvI8t8o@Y{`e5?LqaPR<Rfa|!nZn2UVD@5$T6## z$Eh%^D7B+Dh!%Q^_WDg9zJO=-aJM6!f#<*^teRBkwM}eAT4$YcTrn@h80)~sTuFP? z7RkOtMH+bXwxCgXmWvF$`8+*LY0>bES)|dwRQxuA(H!OL9d&+;zS{2 zUpI#Gv*+YC2_WQj@7n3&1Txj}=tOSOAv%^`zLZ$OruE=6^k;N&iK1L=S?~BA7+a$G zi;ef5MyZ*DyhT$!p_Y3X`(AsIsQDSaNBdrPhPC=(EQqf`KV*bCp0x!>jZp|KE8b)$ z!!32b-MID=#eTO-)9Y^_sF10)kFK z^B2th=xN!TEmNhscW*Mq51-W&uT%t4*fSO}myFt*f+q>88(_c}6g8n9y$8LoCMElr z9jdo{AO@izJjVrKq7)40`XzFnwwWM~YTIC=1P5)_jlj#(prxRE|K!2XAow*V{~5A12RIl zb&-l=5SO=Fz8G;}c(84bwLaGr8;{(N=oH`qu}Lqg;Jhw1OwcEl`6l*!p9bCUaR7&F^5^i@GE_JC)K ztBp?}v}zFH+9Q5XL0L~l-e4z4I~U#i2kb z*dfhR92D5VkWPWW1;m+^za&8F%Uk*rVD_vFcNlOiNN1vER;3RP_pp)I(G!Teo zS*!wMx_5-5_VV#YZUklBalW^AtJu_*Jp-lkOsvCg8c$5h&~YscsGah86wIum&#df* zC8nS}l>xrT`0DOqzl$THx=I4Yh}R}cnP!S;aw9nGwRs!7sM(LvqHonL@GeK+6L*&; zkA}gKj+kaDn_cmV9SP|Z6dxV|kuKtZUyw62AkjNmVr(4|%RTeikk)ffs{z`e*Oa|8 zwGUJ8(X;MMGaor?!_Lq(TErlDW^FFH8S^g(SsW@3s7$CyR$3)04Em>MbJ zn$7LT0BtN4je%p6Ux`_u#-6cJP1OYBbU9ty%WRhP4DL9pd2DuyH?J)8fD+3|ISIM- zHjT?bu!yvc2qL!4{Us}vztX%J1_)v=PkMF=aw`dbYvaZ82$^uJQmw`yp#Wy;f-AG^ zp2W!DZX(E$Phb7TbAVs#yWLpCVP31=>}pgbjSP)=gMg%RPExLoV6E6Uh+FykrXc)P zN99Y$nHm?gp?&xwEW4IH*>aD)UNxrZZqsFxLx)b}l!z7bd)(S0a?d#P;zAT0Wy{EX zsvXmO-!vn0xdmeMLr5Zjt|Z0-B0~k^BabkeE!LY(C;Opmf+3tPCE$U%%tw{c%HYIZ-g-M;x#?jD}+_2tt7X5CRU!@dlK%NnXqyO9?s>J zZ8sjchpC`Xi)xmyud_ll2xN^EuZ=tn@G$mSV9E0;%=JZnk}hZKXBCpkGGvlx1~+l% zqNl3bF5ybrCCtEA`J{y2iEM}A87#s=To$DJzUag?bSQQTmv!3gaqJpuSCjJ^Mh)z% zd_m;Hwy!)x}b5WvYU= zY4S94CCm{w>Nu@f-p)R}uwqR)*b_`JOsW^?bEt(vf|2{o^x|+Qi<|P9^`oE-W$-64 zPw_eP$kvMo0OW5Z=P_VD^g_BpHZE7Bqbh+bO|Mv>m%?K+OWyoO$6dI6Vh}~*Nn?Z~ zPF=fgEP&AlY`Lprw+V$IpI>=W$GD4H&Q%!i5=cg!m9zf^G&uAN+67J+-diBJ)Fz;( zYmF2D?saw>@%4 zu7d*!v;p#v!jJd?JEyA60un9^)C*FJoIv%u;)4N#wQbC80Ih-b6u~OngEGMZFgZnO zAT#OkcPxaod@p!S9VX6^g!y4z#hTF`bqXuM(BbhE(&!XNRpcDJO6@>eNUzHxXgHoX zsU{iVWF|!Ip`5)ShtCIt%98fRZ&lwF7qRPmAtIUBPbSgFJpwu2i^FU-5djA!>z8{b zlkunu^xeG4E?oeOwwV?sGgA_P_g0T>OX5L*T{j+I^QL$AX*b-j?AhhLfcSxh?veF>9u=z_l*kp#{OC-ewxg@mB(SuYYvRb>$8_yKD0F z9iua6)g>#SCS2VdjR#gZJ<)n7(`XY{N`kfz^<})w14*QBpiRGkE`*jadi$nxM#p5% zZ6Hw`)*il>Q8oZwA=?-y!UklIUabj=7vedTE;6;Ixjb~&31BPm6~Q%GCB?Ncm!~=g z#5L9veUE6O)<*0~6+z>O9owo*qQ|hfo_10-&FQ>+0^WU|43JybaQ;-B;J?wc@^Qb`(R1 zwH~wRg>w2A(JtrId6;i>%GX+ZI9kJ|A98M{#e=x*%qF~J)fch*L#2e-&p7-`vEa2kOz~*2q!rL01t=rNh{Y~bcG-_#4XFgD0zd4J#!2as^>;rCHWZV0ZZ{Emsc871>2Tt zMg|fldwE~^E5;5;OEpTas6HCZN2J<*__kT+cpRQ{BU*~0!wVlG^>%>h6W6RNW1&F2 zvDeX$IRH2jbBD1+EZ-Y*l4cT|Op^hfvu=ee zxTSQsVqq-rjFFgL7PIFW^TtmZ^Ss}UPNLE@>NQ|y3t4gki4+e#Y^W7D=7T9H!AO ze)(pGD6`sfiq)b0E*AfWyd|}g73a3h@})j9dfP{c72-2aX5}&Oj@Y1Q zS-@=8dOJ=+af*P!JbZOqfK7*T5kzIyjytE5_} zA~GJ2klipF@T>|y90z@KB+K`%JI_e4 z#w)fO@-=H46-YKuF|P}#JK`P)95B`5+O{)J``H$yO$;@{)>C*FLi@nTqfsBXO{}u_ z(=xe-n}KaeGFvS*?+nhmGu{Js&MHZJLJ#mw3)tedsE4zUhRfM=)g9#Dg&tvb_&lj5 zCeh{##D2rQ{LI4n;i<1}#wqw(P59WH2Z>{mi6Ur9n*^Npdx%=rq%oWi5Dp1*3kpG1 z^JcaNJumLWkQjGgEFY~oP`Al!jCN9p6jBt>MFTzV<>{ zW}y_W0UC{cz-9MHQ8Q-(CTK z4le=G1Nx}Zbr)qlHh^Io+ma-4?dOg3gs9 zvD&9;NW2INjRtr57t>hzR@AA45Sj$uQ|k-52CCHCOKpCAMl!LGk`{WwQKujs3^+

;TQ8)#7l>4Snz*J)FMZ0Q-2>;ACWq)?04 z{KkU8ZD7LUQgu>@A=|8$d%ND$tibIvY5|s7dN*s`P+G1D05)v14kM8?)#m=Hiq5o* z*S6%XcTi;6bBrp89z{SixoY1t-IiN&1J6i8< z9fusdj-pOLv_X5RCj-6eb(|kZ0?Q0_^Qc3mCy^)}PRsM>s$vA&3d{Sd%M_4HF?NaX z7>it?UM2H$A)7Esd_e1W81xA7o~pfqS}*ZBXuG|F9)Tp!=h~~2WBs7~G+*+wb!Fs- z{8pb|D;p=Jrz(Rh-ElJq-i*-pfwJiOj5Pqq2}h`9!9eu7V5QqV|XUTTg%?mS91 zDjJ#2=M?>nc3Ros5$RTS=gmW1-h3I=k?Au`8HQ3622_=iOOM(dbfuwza#LM_j!PU~ zXpvR$bICWowY`tU$-kIpfG?&cJ4F*-;xp%(e&jLnl+R)*T^fl@i=%?3ASaWLKu3W5 z2;ZrBKS5YTyqM<3#`DtbLQ7?QnM9^wEhw#&_MW3v%9>KQ8vEM9s2g=SkJ0O>dWC3P z9{SMb0i;7+s`M;}j6t$Cvx_Upb=MBKHWf?ng}!-@f~PhNwD9E-9hNosNCIHrHVIxO z`WjK|Vsjy?272-7v1fQG*K4qA*VNy^2G}9XVtAENN(c^{okvXy|AN}i2eKY0l%!_g z6QcB&jmeK@VrxdJcBK=!TlltNUX~7ScE6|aj*$DwdXs3?+CgF?LaL;$ootQTdrx4x zDqWBa6E@X3Aur+4o;(o5lAECSBIe?W+C&xZw(;mCXA29C>s!psyVua4( zCUMI1jTCc>czd@&01`&7X`h@nYg-{74=m?2hC$Z2Q??speJks-${Iw9xqkFDISP(cc|^L+xKsr(_N+ zD{DS#m%h%ncR<(j01m_HvWkw-V#y!8Wi}Uv;GSz2eh4cKrx5Xa?}W#Bv{)a#i3d3a z)@Tw@Pf9=zV$HcaC0rKJh{P=HN}h5oZ5X}1L3NdWSki9<@s)TL)U^k}Wz8hGj>)rC z9fKGUs6ig+P;h&PqL(A}<~c*p^>ox~o?uLoQj#5mhh~vWFj6nE z4qdZ`DMZD+UWxYvdl(Fbn<*&#MRhC5w!PH}!7YziS1AFc9V4O$@go;Vi!LX^93uAg zQG;|a5{z7(JMXKrByfEE!>saWjLO@x@sql@D4MXbe0I7oj_uj{Z1 zM<PHNJ=wK;{#D=i+aw2^>`#K}Bs_@NORA#8FmS4y_$EhQI)C0NL16-tmV` zD6(h`1^$Y5sd`IHqIG39&!dw#dqr7C8P1OUAozttC5BDu zs-@dKb1JKeNL{jXI(%=cb&RN_XCcV!h{4qGj=2%h7D?q~J1#3BgSFIVNWEW$)na%J zm7vQO*fYPH?4{xgYoDbCLDL>|Ey#IiBRpvasKQ$~cu4>l*qfmP2h^^eNoKb#vDon- z>`DfPp91V-crDMu^SwRk&UzyZvN9zGN!l-=rXKJkzn-W2$Vw)}0$*lN^ThKS05J=F zBbSdKLL3{$I0h9G8O>b2=Zch>JyqNlxJa?gfQy=HZ}hfMDfqEm4{TJn*Yty}?S9od zeRCu^)x~6HsdwBUCE+NBd7kf0tj%M2e10A$B8e$tidRcBsuk}&aDsVT@MtVWrz|q9 z`x!lk3V9I5@&w|Dp)Rs@dSDKG2p8d#YSb^PLAz?zHaNqa;BAgJh$W^JgtK{Zao3(? z*v4yLrV}wTd}4A<>CWS89*}|e_!;BNdM}r0D-nJ8NSk)M0_A)?`Wb9 z-t`pc^uv}Xy7W*S@?z2~p1lNE?iAw3*UtbB-}eQon#gA`sL)gdnp z_qi+ND=y);>X-XeQp*12|& zfwnXX`3Q^Z*1_@FX(i<3X;z-p`D`j!X}h+0tZhD`fQMBYt59hyCyE4)fK$t6oVl-i zZ1Sx(D@lPd*L$lP)(=(SE*L^u`pON6FI$ktvFC-1=YwleT3<&SnKjr5T&|aO5GaAB zvH6M#!;+6@n@#TZU9lO!`lBl_DmH)9#|+5IG!YLDB57Z9I442v7%mU(V9Tuu6r95w z_P|7PdwA?~&&Kw_=Hs&+M{U7-v!fJk#}B&R;f}E)ERV{QS&%JV6N17%Re6*2bUK#L zyVGk+L-QzKqcqd%xi#(L2H;z}eYhTYdZFS+O9sjGP}qsPM{AA4Q}4<*wRxpHwGaEf zhurW!u_pe)`mlw(&+LU88`$u8j#T(ro1w-mlo-0t(R)SHo>C8YQl^*!XX4aXE9Y7H z*l9-tFxRUyuIe{7XyJ>`4WeL7)>`7t>+WeRf;KcRUJc3_C&*q>8kX+MB`{kzx6sFi z>Y$LQwB70byfNLMK#jgCd4Z450B$&EMnYmqQ#lk}7wky5KwHkIR+1BM+}M#4XEm+j^~lQ!qep*n4R##w9i=sed&QVMVrbl| z&F34Ei9jLX6k5emHTSAf*mu7gVuI5;v~XVIia~3LsYT zLiQ1&KErzU%tJcjDrBgI$tz=e5Oh!J)P&je(Ie(kn0%M$rgJHkBs$YNtvrlB?ACEiFj{|j8{B1(sF`Lz6>_ZJqo3? z#V#)Lz=jeQVM}D~B+q#p-I{e8V{_$J6y8Zf&&}PJpgXH$V~3qdDPL*gx!7s|ya!op z%)+U(G&{Hb^3H0>m!DvBHrR7`RE`aINO{FwSixtAk$RNHDAr|7m&NE7!7sst-s?S2 zWubQl`&v)6W%rHTR0I(f<=cX|%j#@bTPPaP5b1JQ+gvm?_fM`LeQ_NV_tLgUuozFp z=mrMdaUZ;ZxiW_coLrlF#ikhe*w9xm+#CQwK)%0Wg-J2yGOYxpP0OA@D?b{q!@jLK z1dR&FCkF%EHHQxh=GxrkIC+(N%2!A-XROzmgdo`|8~q@dY0MwUtxFp3i#3I0f5uKb zbo^`vU(q%KQ(j-p_VimfTY)=9&>Vym5_&L*LGXyv5rQWl&Soi_W-=W?J3GI=#ugWr zU56)BGtU@iG*K-mm&WxFm9fgu`cJTJ-&GDVN711&sS0iqpE6x}S1}ZZ#?>LTr1o1Y z%%Nd?jpu3JJ#w4{m5{(fv0LwqN<}vj9`poHO0SlVHM5a7^xoFkrG$aK^V%D&OnD@M z-dLIa%4cnGxr|~_F81J@USmX}AMqN4L^aea0w*@;3TbkmLUbN+G22q(u z9=fk|YD(ams+tF1(>{|!!|`pUW_pRF#WJt)mG0Y%zKZf2lDafbw0K*!i0TGXmZW<~ zO-#I@9ux5@y@4!X{q>^iOj-N$SyVRW%0e}M^iVXErI0xv*X&CMLps1|pk-5J7_iDW z%_lhN!A8dXJZPK!IllogA6-%A)wTSPU0 ztAKv9@5Nlj=2NAo>|hmmHiRG}0gqEp92E>onu|CmS4bM`Xh?073m#Z7Nb=K3CgpB{ zqI!@&M0to319;jVV&@CXvln5H6MD+x6KwuoFrtOK8?j?t<-EKMvUy@=p{lT&jx99d z8xI~SjwGi)dUSdw9*;*L)8AbzK}wL$ZDlrxPz-=>1-+JB0rnkX-e$uc!@!+mefpk1 zC%!;2je4u@5ZvliKq$Df3%G(%{IK0s^3F?3k4~aX2Pq~kHmr51{8C#d-zhwd7#gh3 z5;k>8#~$rdITb-9e1T(L-SkRoV1sB|$Lqp6gw}b6Jof@3D-WkGUQdJ) zN+QzpzsN3ijYIS}q0Q@tC+{q8%E7dy7xXR7G4Q>?hnzes_JlFWHjY~@MY!5&!}=wi zwpz3!bxf)-sUs^^9zfjNK^PlqF(mh7+&jItH+P=-x){j4ojZ1lyLl zp;SS)zUNL};kYH0ezp<_1{YQ@MGf6^xLM@31Gk$xoMHj=MVM%|4Ka__VlkU05lAH7>ExMQmT; zL*4N^tPaM98y2?LkTel?K1tN8v+%@`JTLD!bV^y35YpA@V#qa1^#QL%Zx9-1^$avm z3HZTls$FQe(|$266%W1MT+6!62nJmYu6`L-sWLHBDnO2+0IhMl>G*mP3X1YR|5 z)tvDhco6E5Y27mu>ifdw2w-As?&7~&!yBc}F>kH8u%Zqieh z4J&OF-dB|`>j@*uvugJT+jSwY$_aTcWc5lit9rrD2hc*?*#q?!xZcpnX-ow@CW6WL z-1j|~?h3%*kN`4>m+{Vk{hsVgDfkhP7UTp)O#RpZuGY>LpaDj+=5k4AI;4{=jqtq- z3*u3uz}EXF+ds)R-FreX56Y4~qP$^bpTx4XphwOi!1AkQiUhiPvvSAhH?S5HsxQ0K zsE4A1#i=kTC`GKL@S#xHp!!t_Z!B#OK8I*3x!wrhX%z!uS>%YRwJYOc>#=*{^oo!Q zr0$)hMQ1)H;+s)}c~7njK~E;AU$7qYyu7YlTE=V<9$z(ydymSr)+6B&XCk$*)8OXY z^_U%wr}y?XIPlYx7nk^Kja%nEkD8NY0pn(?wjwA9b_Pq;agn9_3+>%GXXNb47Z1Bj znz4Xo4LnG@)m!jAqoS0K^V%|PP+-NLnq}8>3?bAgRsls}+!uyHofTBdJ(3GxT-9Qhn_7= zSP`|d@(2O_J#krME|uMU2>hPi<=rui;QNoQCZnq zecTR8tioJjn~fG7kM>!xSIJ5{Pd#FRdli?$?QsL|R@@cK*Hhd3jS)y$sw7YaEHF}Y zyzJ`tTox0<=FFvl_LvH4t48?YuEg9!At?8KQ1eWyd5vHU4;wmix#?<=baGZt)s}jl zq!SVSq@0e@^+swOl<-n-6WP@F27FAGn^A9&SPB|U)aRIpdFo6RdEp))s52of$AGK% z%Q|cia25nrs_DLXgr<`hFJ1v8-&`PWdnNKZ7AX4)l9rWr<6H+&${VOO0&WtWRl6!7 zIc*wlo&sm*MCOO}1j@8vW;cR3ePO0~wPG#>DJvdzZBQLeo!F4Qf&$UhnWLzDns$!QUx2IR#CDrjwSJ;4-%H0iLZM4O zeq!y$C=@ehYyN^{-k5Gk( zYwO`AUK)T@f}!auOqPxLsUz%x>oFbpO31-Bp*^Dt4})%fgkm~Dz0c43g(btQoJ3(C z0_n#u8)CN8^^rV#{mOLW9s&7n`qm>s`HOAE(uKf#>9zOVm7VQ@Qsg6%SGZ?;>FSZI z4G2mnAlt{j&I+Z*5x8C<;zvB#KrQyLP-Gf+MmL;W`UI=xM=Ut! zdbC?M<%tEg&O(_od+vG55qqcJppckBdoe#wQxWpVeaMc^FJoU42qO5rDJ>_Jv!^e_ z*(a&RL`r~+wML_zR$JH>;Wx8JDRy9!Y>GsqL}6vvv@5`wUJp5e8#+Noq2k7)SLUcg-B zTh?o7P<`p1sA zif$eXK5RGuA7ESUrpNMRg6%+E*vSnUh9A7A5&^4J>@4}#dID4pUNnK!VENN#Vkkm_ z5}re9RvIdVyQNVq7Ci{qOVXCNc!EO|G!MULZRIbv-=aK$aU0z#Y0rrmCPC?2v~Y;` z+R{(UtLh;28F}wz4uO!;Yp9WU=r0f{+4a4v*95=|#?fxcSfuI29xU==@QyLLNQ0Gb zFrsdISe~(oo=66wZV<%Fj$pyuE6R(TN26mWS8Zo03?mTM5W3Z`UK*h=&Vs$fZlJR( zxWbA@b_Iom-pSFyZdvj|l_>G$3TuaD&7PL%bC^j3r1a+ufu$-skVQuG^!27W>bo-St80@Wx_rX)rn|nzO49t!4aNY$a zkaTLi>nw>0*rBAE_l`~dM1QHKb(_cQNAX;#i4(BTq8S+&qvcA*-l)Tj=w;*G&WSxQTpHBxI?X!*6cZpTblhAlFk* z_m&+?!`&@_x9%_U8J3UhS&2_Rc{^9+fpwD7U7L+7M?2fdOg9##`LJA;4QNh%=Oqa6 zlCnpkG&H7AF~6TZYIs9n52(0uD&rU{j4Kr_Hc<_iRiWEY^2r&dS5)Rsv)COz(pR7~ zr66&|WHRIr3B7=emiTaAy=m^G4lc$9k)7n;mfxAS#iZMZ41*^y!eOmL|g3%>fN+tX8duycI|4t#e*oiwYriDDIMY_Qq30Vp_;o&mDiFF3{;NH+E4vhCa&cQod1%uEE zb4$T4Q|EDDxK=fU);BIG2s1bHP60$gkU`gu&<@Gf9ukIWz^-q;w;E3#Xqo0XD)74= zArcAG3k~dtdG!<($z4sn)^h`q7>PxTUkIS(hUObY-zT_BgLajn_414~3yby+@cNY( zPcJJb3mvSw%Pz;WdR#U0#L{1?XUHJcMbBbOR*pfh<%2Sg*-K_!>OwqlB7H7tIPp5w`n0=!%a@&*N5D7XNtQ(3SuB{|oeT7En1vrlwZ6B< zSddIF)NztLU-cE;<;w^Ph=l}XH^;|aPabl;Lwmzs{fJ|O zRmsj74625yNTC6U9R&73TS9dR3d516q)ENiV0~Dn+@?=<=b@h9L*E#m z#n%Q&SqYrT{?tY8$+ClxmzfSEt|z?`w)XPTgn=kxoL3vq3>CVQS1sNEq@YYw1a;Z9 zF-~@(AwQgq=w3ZDk3)*3Z0S=ehB0{Xq=(R@DxS6z08O5EHCbB5ig1Bqs6JrU)PqE> zM3EiR38_)CG6HEZ8lX~`f*V_oO%QKds3f4tJD3ezXX&%}bM7wf&Yai-DHSXACn{p> zAh_?5K=Y*n)Z>R63i1kaG-uDoSGsqSL?^VX&+siqcwwBwdv)-rqbNlmL_IW-Dfg5v zMAb-+JdTJK!R=*Qjj865SCB;pLLsRfPkB9;L)zk%y=*?oOu)2MJr<_ql%mKaRTQ81 zEV7E5R6@Jo8is5JA_-VlLSxnVUW&PpusSYccAZLw8Ac2Yd-U)V>p8`P$ydq{;&*S` zI!kt--x|rb@}PIM*A#?>+V#G?NJBdboZ-mp=U2u2EKtS@6;~ag-+04jmSW2e$haXE zz?yhaW(#U8Q-x#`SdxXKI;SORMcV?LWku4AGd~N4G}Ne$@DyFhhZ^s(WXn62inIZF zl-&*s@szmbF+QZVqqVz}0z+CV#5is}>BJTBhgelBe>|xi}MIiyCv)PB15lOZ+jo@22W`f?2seHqOdYOswg3I0EjqMpN z={iAFli=lvmoF^!WTzAh1R57uhv!Ze^>f49FiP%+b9tSN5aANQli69=NW3Z`N@YfF zr4v*3@_CZxz#3~AUV`gsChrRZgLnztQ>_v4nh2zCr+FTs2G6@Jf7D|X+a!Q{G|zPl z4H6J5bKm+)+Z#lVdICEV$HadvU*q_Nl9y#s$?FJADSUHZN_J6q>`ub->BNds0F zlD-ZGnZN+wSSW!tmbk3aH{M!R_Qv*CXZs4~3LFX(d`kzcxSw9Fy(ac6*Nu2-$(&3ADDldT5S;SVpY9c^ zfOD42;A>LGm&p&_V4%OQgAme<(UX7(XiG(ecTWBGt$3=v^GzIpcG(+O8y>qzSPIdn z@g6j^R;^ZWGfR zaGCb>^V-elyL@kXYzee(ry@4)by^X!m5S3LIgq@aVnQx3XMYFQmtn6sqz`XEw0B?WMZZ{z5n~b*+uZ|3w&29shmcZsD1`wB? znO)^87RKlG@NhxI(R&q@<6w9|O@-Jp%5Tf(lkd>a*}Htyt2|BY(JdJ{N~$Mzv}8Uk z#@T7-cxx&og9G^Hz22Hw!j!6zDN@uhO>eO4W5kowYCT&hvFtSOCJRWtC=Yk>>}ZQZ zl{?~9RWo_GFHVeVWgNF2DGM<=BFa572NJ8R$lcaFq&)H!x6TIj#H5f{5;^K{+{99} zZ~N&<1j9UAoHzA+@$D2vNy9Pl>7Cm-Tzjo0k=zkoRkW8Dj=cvh<_tqzZBn4+Fb^h2 z9aNY{5c0IyT-NDjzlHL@0LKjw0zsRKyP$M`GpEMAF zLEhk;0cV<-;J8!d#yNW<<`(soqx;evh6o={m*66Io<*<^hYci`_p1eZBG~HVKK=G- zmXAFrxN(Pjp&ZJWbqaL>5J7e%^R{~Tv9)l$wTcX}gC2w&l!1(-$?R1_(f2YKVA7r5 zvvmPUen^6KiS-ad0eE%c7!KgHpGU@x%v7DN9qA!S49QWWX>l1IG{9^Hy4=CCfZq%( zh~@F|(A-OuxfDjt>O62AK=fM7G-ylYNyZ9Aa^!wdx;C{H$tX9xmhJZzSPyDRs$oD9 z{UzUBb2tJt;Q+odq_Hb@Rb%1g6t33ZCxT$4w#1?)0F>=-0g^OL_vh((E1?qfvxIkb zHLYhg?kN+Ywdsf#XY!KRm6TRl=sYybkD;h0>_XuAl=|&(3Sn@?x;V`lML3UC+22eLttfaI-16vj`~+T3L`>n3W`mHF1EcreiIj`g`uNu&6i zyET~9$FZHH>o2Av;!GObh((lN1`w+@v7v#+Ue3<*#^MoMpwrC*mF=obFFM`sCG^+KD|I5p>>7%z7tp zXCO?QUH&3m&>iTe<5DOo4Q}cv4~yT z3{Oh8NX$23uUQjeLu%lb94J<*Jr5pGj4>Y-7*tuUNv>=BA;LB$p$>CLN+K zRHdX$URQ{Af!7%w;Cn8wl91Ak5kUgcCA8fwq{Ha^PDH`#oXQ)?vt)pAT}RBhp6u+R z!d(yOLl%T-&Wnm`!H5`xjznONmQpHlR%By>N418q0>zY_!PM6F%yhE^3vTb(3A3&D z5{8rN+70eAAL*i_(7ZBJfKpboOMF#@G0CLleoSv$6|Dv%&HTLs?37aHnwH9q7im#S z@QUd8itM;HXzSjy3zXOM#(WY&8rHPK%@WgLD3|A~S2X?H^d2qZw~MRH;?=u)3^Ig3hn28}rkRyA?QJSLDsRMEOoN)#j)@-!Rt^JRe5D(;7{_f98~1XUlUP$*(8%RBFve5QX-YL7)kUYsvN^)a2~}Cy z9u+wgP4lfd^YQ1bnD81?2mNW0&7=-#?5LFO6M_c}nB7jQ$9R>z^mvG5_fg#;G3ePT zLDysFQ4`yHSsL4;CK)6^0QSU5OCOZcDj(s#aV~t4PZp1J*|TUJn79#YwaPNh&*!28 z<=JIb;1rKIlb_BiJ!J|)#*MVQop^^jI8Xca7Kg%GmtT)p;NF=OkfDUQPCsA}6wyTn zOl=WK2UgT9GO^K!y25?Nq~Wh$+~YZIywnEgn~v7^N;BWXmK7kcL6_vMdt1=S-7jp| z@fesHj~3r*vk{#vw;*CoUjvt8-tzdcbs%k3ay0 zsFV;V zjHit*b|}j(x@=H%-}ZAjWUo4Zm_e2U^j@G zO0Oikd2_FBPtZm>pF^8@h}bx#CcMap~>peLSfl4QuARsvBMf< z(33*iCWC;Kd=E{Ms>sK4noW9d-#c@7=e=a1MrhCRfL1ME@j0jAVGEHjH06Zhvv+!G zq{P>oKA3SXXmB|XE}?2j;7Pw?OhuDnwFs|4LOH$XswdCcF8b8>-qKYGS4Z$HJ)m-m zmfcj}X2$Ev8ynPIzKy#JuLD@ArX1m!z`M9fo-r$J6VAX88NA9I_>1wEz^3g>?4WjV zp$906J#e}rRS$2+U?@k9oDLoU^G^s(fp%d zqQD^rz~*FbeV)0J9>CJbxTqyc*XZ%?MIT31>Y(jaUvgk1w!$d%6h7)bP*0F_yp(>% zWbgK=9(N|HyS3yXxz~Vd8RO97JAIiOrfW*r^24;w{oQ1cN6LD9hKJ$+WxD%qw)FNBES%M{#t zoRnzX@lxY8x@}m!y5#nbwi!W6<0IWO-d*B($0P^cAP4zUA2{e$DDbn>ouYnNfbiCAHK%$4>G1+fSDYSFNYfu-%4iM+LM#{G+Nao^wy z5YB2sk2#I<%Eynf%z8%1*IZ#`_Bzv|pd(o*hs!DRfz~?9(=F?rt_AV2a!$=XCoLc^ zPawn6$bJfvp-(pBWD_zC;NF(`lWJi_tRM>I2hbsJZ;b=@saaEREjZDJXb&pCS3s@a zob*Vj-^HR}GHP+?Be61n3(rrBM>-A>&J9(lEtMm!dZ(+q^=QCIssdg&hE|%kX=WTk zVnE>_sz=vsZv0#LH^o&(BD#(po@*J){<$r>!B_d9IN%{3YT*y|d* zn0&XuM7b4-P>uz*-Mi347Hn%XvQ%W24u%dWb<)Wl^)_YOI$OG?EtIJnO4y7dAw=6= zbchbb=M76qFb1@X_re%2S%gC#q-qj=L!Gb(gvXPpmQ7(NGlT z8z3P_$GjNE2w30_QzZpwT-Q*Uk|Sd+*-t%Qb)2@eS~25-ZTel(BcwSk2~aSoxUn2d?nyvmjFVd zJB%e7aONlEBJC5Xo?NPWy);S1qGM@mjg}=xSGXHc7OUZgw4u*X!ru*;GUjse#!8^~MMdVzPy zX|%cZjyZ?tTwHbb_zjz5LOn6Bj?i`sgThCDQO+@T`BL95D&ofWUb>&dT<7>$j>c+w zL=NyEkh|m}W1MfSdf4G;JVUd^8t+8lv?j9h>B(Vx14-3LjY;HIGOe0$4n9E5LN!+> z*?j!!If9gA9IaY%YdhY`OgHAd%~6t~fQL77xQBk6xhT8cRC<#9p03os8=DsfBS4CG zdJ(jeaE@O4m?{v8)Y^@q?B2r#rB`C{hLLq@yz=SOK?>x#C*4=Ot5o+E;Juk4MdV$B zNM+m+Zoy*w+BGpQ(_*%Z3HM0Zk>t-cxTr}`Air&=FQ1=pRTRA&W@ zSJrFfqdrbuZpH_BOd>z7i|T2_&bi5JW4Rcgrk7X890_s@a^3!}^k+#*twn*Z*t}(u zhdbT)3UkY}R=9W0RfS*J!`@|N?wyA9?2BE>BNo#b;z=dNed>KM5OCobs^HEh?8tbFQx9HE;WNWM3^hIs-Px= zY9;x5+Ao$&9jaVwn<8+W_KKlzHC%#u=N0-=cH0V85f2O2hRmrRt1HsD@ z?%m9SanAQ*#u>qbLiMG<9=+YkOW@7RpS*WEMIB3$7M+F0NpVrZ$2D4VJ^nx)ZX1EHHZi3}JHyO5; z9XXfAv=r-p0t=EnU3efgamTsyyk&7z8l+#_lxL(iKrYoQmfPv{eh@|dG}0;*JoBjl zN*>0$_aJD%xhO4L`nmx3s5q@y)#xUSD? zBR<0{f3RoVEo+ZUw6y`>6KEu)0DMGsUC=Xg_#QLc+ZO`;;<4)7l*HY|iql3}oW%12 zHl0P1>!)|d4X>}%r3<3&xaskc(L`aTCW*oWXXW0fGYwnn`f`}c1~>o$8zhaTQ;yv0 z!ArOSuRE(GQYmeuB5{T%Hre*1Hz!uXZJuWs6ZY^53;I|?AB4Iqls@)BdDxXdjD%_wr^DZozfcIe$1lt+**5`Mc_we|MjKkTZkT6zxGou7OvuqEpw0&S`x~NztX!vSM z)Ee@wYYnESh2)W59mTb_N-*|JLBO5$(1UCt^(#mHQ9Y`t}K;DIZd{E?$+3<9U z7X@oQc1wFAEiE9g`;H!ubs<*7)`|B(i^mbvWb?(rlAp=G1zxE)TfGXF96HWEYL`|V z_^1ub&2L1+7Mv1&JMn5Vc)0$Y%`Gwe7@qb^r6fK}T12)JqnL8n_oA)WGE&zRUzI|$ zi4l+3TjgaSI|>DQYf7C2=MGug_K<1b;5==N0I5o3duZ-36NHc^H`XEqrbmpykCwGv zd^O6>+UOb83c`UMVVKRPk_qzZcGcsV9_^EHEx_70$a|JNV$-yVod3g!+2h8qtwi=4?RMq9!dOwfV?A)Jx5-%-d0{C+W(VExT#hWi^P# zX)#Yi+Z3*w#>QW2atSfOzME4{N?V?oXo+|r7Q=$7=(iCTg&<1xRg7^Dx!)<$D;r!FgzIf@ z8L7vd*n>-5!3!sl!b{1H$#Y6`S1T2%7Z%j&D(JNjh*K_1HY|bgl&?(yS)V+^j_LLg zIx1mcadMHn0uE8Uw*-K1joTCCh93(mp?B-DzEcvEV&Qt?t%yefR56H^!_SpCOdicS z&)Q2M5N3(8=nB!*xg(18tM+aYA5~ohRVcTL0)IGX6g_T9borhKPV^RaTkjU$LsftZ ze)L!euL-digEd(1y-JDEl6OJi=ZBa6l!Tbs``pq6I~Q!AdtFz8OE$X?iVxwuc^Ekx zg9D;(g|yr4x)+~o+lf3IgeP?J$+^N8=YA@u)e`AqAb`45xppfS4zmx*RCr7F_|UFEnx3ImTp8s{Elmw;S_~ zMR{v_UTNFf?i`MA5?5Rm1;7uT5bD_>MF^>@9y3TRpe@mcb}gf8!~k;ZCSb<_JTC-_ zaObJ*hY?jEIUT$qk|^&HMkAu!4y+s@krx;q!;DxnF--V8D|^dGw{Xb@4++K^S}lxr z=)INMdH_kJAZ&E`@>RP21v(vQ`y1(MGwoPf4q&GVsv4h11y%^;3#{TagBF5~uji7i zK_$rvC&KPo@8G=C*zxq;ev%@Mj8Wuf-~g$b-rXSYQ7aeZLr6T%h}}r^oyXJ-FFi0` zl|JW4Js|g@C~!ak*oJ+fc;m9#$L{d(fSewc-_EL&wKefozn7%xmQhLw01VGtZMP5h zVletm`z^Iic{0bLdWQEk1^~YsUAlX!-A_amgZ9OsDr4|y@VnTQVl^27K?GRju*NWG z3NisLDMK5Yy6cWpO2~fCA47Yyy?S94?nu>U1e9t_nqB^m6XB^FlT4h&dq`q%r(NS= z_Fk{OhR?>A= zt_QldGMixx;ni!XZqJ+Lz#_?HquRZ|p}drsaM;82_W;PBC!s{c#ZB%D_9$If$(ekc zI_;|_%aSZEhKNa99Jep0M`$mXkxO@(9y2gTJ%oLICU_i{vlBJ1w*!rm-d(%-Ox!8F zx7pF$YZ_J=yQffUs?u9o&9A;e_fOE-16OEaSkUniLcbe*QZEu-YFXMtD?9XN3J9G1RTb7Z84NhhSqoXOrAyB7LNImDbW?HXPI+m zR`SZ@)VaYqy+jej$GAqaL+f_o_N=?CG=$3u8XB3xmX z$Qq#PR5CX%$5>l~JMcPRsm-E$g0OKslgBD!iYzDR3OhsR_ZlK+6JPXO5X)9(w@})* z`+V^#j-pDgkH#S4FS+1 zy=XXEAV|FB;nQxi24maKXT@ifh)}QGQ1(rMO=VWWibL}e%yilIDinnfVwUxui=(`$ zd*=g7c1kFM_0Z(;hB}DpC`C(e^n7pqTouK+>KOfP2SHr}^6) zym|pyRR^Ut-Dc7m*^d&tYE)|+CM8g)!W@OyujeJr-3vvua{T$WJK3G1yRuLhbX z_hFF%v?BR?eEh<(_7&kP97{wWiMG{Jj+d_Is)_ofkzAs$A_t8X(yUmBQX1V~ztQY= zT}35T+h*jAMBq#h0(nV=V}vT{B9%RUc@B?LW}*!B*b*GQA*!_Uh%yCUgBEv^;)$3k zhR0A|jeBXTc@C22sjCr!4IEzEYw0uwmY}eD6jbE6@v65>v1W}3`=yA1uWUQBQO|Uy z^Y+m*dfgC#Po$L`{+!cY6dt9$h-w;69)4rfK%Du2VFwMv378fJlR;mp!V}Uho=ujU z0SYRp>7)Qs(88mUxQ^=&r56M#C_um>@{|o>7gQ&yysy7dVoG zJcYJJueyxK43JzC#S2~rS>M2G2BmFa#q*AS!{xIu2E8p?fc{JtpOg7c>^#*G*7498 zcbSBKg0CgZ_LjEO*^3N%vft3HE%Lotb@p4pQ66VJ3~!fSu|+M=JI1?V;nG$VjWQg0 z-YZ5iEh(8eJU4PU@3=eRnAiZ85iSb#71OD0$E5DFx2A;4&mvzF6+1a25xv|Y z4_0&TQ!DL_2=s+ULK8qGmWbuG*Z@Gy8d6!p4I~gJF1xCbjh%>SfeUCc_Vc%fn^mgQ z)To`MuR*!WyZ=%4zOkZjJJo@U;a4;e^cg%n3Ys;#7fML@8>F z%^2a^ZeECLnAb5!T0)AovYR6+wV||b$&f+-2`-BCc5H~9AMR~ijKw1cOB++dXSnPYTX76WL8Va;2^k5s*H8)P6NyGsQ@ zsbua@w4un}Q+3PWB2|mNnV3AwA>j9XCYlEsk>S0RCXyP&BearZdo+n^yYJ!!DGdX^ z>*&g$2d}JTLvs@;XHwZE=D1upcA>=g1q7}Zx)+0I2QN}^dLRCNpfwcWCTW44O~;Lzkd3Aj?e z@Yqz;%LI9EIr0^_c-+yPmnT`}k|e#$c!mIvj%K!b-vvmgXFuyooERc0I;F>NMi`y^ zP1)!&*`7R8fnYXzGh=f*`hqJ_7>aIFMW5Jc^a zs2~SEORmLMhU0D56w$)annOP6_mt766W@cbMRT0%nrqS$B_QdT3SdPQZ$1pR#KLDh zjs&QlnB@b#X+m}&@D8I{L0DfeKJKi=HgeT_wn>K|!<_F39A=m%1!8*&H@zW;UpT>V z^{!Dsf!KW4Pr+q^V-`dX#&_Oxhi?NHxUoUJb7? zGuXXi#j^FbI|w}@i%=ScHS@)ON-4*-oSh4z7|gXu+P?FgD`gGOo8CaH3msJW!lACE|&~`}pJqK5cakB)xrUSJhw{)JcM=I}fl6 z(;4LswVQm9ovzB#U=H_aBvT~88BxXT0d8iGd)*1X7ziAw$`#p%D{yt>A+!dD5`5!W zN_xo{JYix4;zwvZU{GN@Oyg}6(`yfg3=*);*@EH@mN0->$LZotG}S$b^QVqDi^~Qu zQE5-rs2FVJ1!3spdlaJE{mFXJC+qw~mpP8>T$#_TkdH^LOKYO>B=?Ca;Yc~MttgdB ztBOnK(Q;t13q=Cv;~~=Fry+sz9OXHTsK-92Cr_*=TNixZQNg+|0Vu#I)x^o>vHP5B zG8=3X8GxeGGCKr7xO#JwL-QLO5f1#qjEn$`Zyoz9(pKOf7Jpm-jMRMh6^ zwzLVh=jFRcHthmH{c@iiraC@8M_U~I3H!#_imVrj6Zow*wm>fj)HxdaY*8@-IzSWC+hB{lSB=H} zj5v~}0S*bKWeJgU7P8W~$5%N5;o;^Tb zrDwrj{`h4vx}hFygyEDXS`;FgWf#q9X-UA_x`%i)!G+K-2Wsair;S~MqP_X`!H%lE zR#xxRD2~^$*_5-8^}-D;#K2vXyuk;i+g|}Hr4LJ}!_5#AH8n#r@4FQl$Rh5|Sq6O_ zzZ_d|K|7P!TUu+bJ6{5vcTsQsI3@sHGpl2!6%jUkA0sH*B=KFPq^HJ=zx- zGkPeKPsxs{lt?y@>(B*L3>G48U-{Y?Yd;oyX>82Hgiqe1$?V8@j4R=7$d^|OLT76% zl{ie?wa|s}C}TNn2obX->}&-P(pA)2kkDCh`wg<<;Lr%4tebGVa1W0{7*#{C6F6tN z%G@O2U|TM?an#k!U{v*ltfxGH5DHeW=4P7!lthe*el|mm9Idk25gO4N{rZwYT4iAc z^NGtm*&GABE%C!_qcL8$p;oy9qed)xa@VsjL?T(v9jA+eI(I_qW$D7Q`ne`OSsH%^ z4$Nb+rk-?f3!l(?&rNZvpQkS%gV5BlCV+A-kVxfKYC`+f6+;)}^ki2HtD2iWuK>(p zp|KH2Xhi_PtI|W#02;cHFW42n7p46ydu0ZUUHLIqJJtn75B=c=< z*1i&)c6<|8w3&QxIC1`XtaGBRH6+)$9>sI1hT8Lxio=~_PI$w`2;KD3BDIv3@3E$= z&$wWdi$%yxofM6s<(9KOgvHZj&3;3gc2UgXlBc3WWgQH-x@?x%M=kZOya?y84SU`-}EEq3kprV(b!)d9g*~sxerkObtEdg^cUgudt@@2uK+d z6(HxiJ7{H`ObMj*-Gz10+YPgZc^+|$d?(E|>fsD0v9>lvI}?<5@9OCdhOgDhI9`&) z8NZv-iU!y)1{DVic+(8sZwd9z$kOOYO^H4D5V@7s3IQi3U*NtZkhbEWvQ6^wr`f7v zq0m*!{XFfR+a{hTilkCQ%1EUXPUPG$RUcE=Ljh9D&0GOX28c?ZK7+)Ux6?&PDDe7j z&GiBA%M6Es>#+`S2EC!4=vS=!NXQ*(iEN_&ATRa&($J2NsWxx;+fr`dbPX~ZKsRc0LxO-7~7mAdI7K5t}g?xEv>|f!BlpS&>rJm=)pkG2`G&h#tU?5w#!=K@HA&7 z8Z4TGsRNx{Jyj&o3*VX|u`0UecHPsp)_u5UqV0Wa&Cb-5;pU?xwlj7FoB)xrm>hJH z^@}LG#G{9g*vQXQ5Sr}-b?KXS|Dyd;w9XzhpT7_{L61ggz9K;dF|qX9?uQrlY+sYW zDW04m-H&|)oJ!nS(Xcim!n%z^|0{BzK7n~ z_2@=Pu^;BzX$$XZT2gYyz3p=BeIhb^ki)7o+z6flFJcUUF~a&4$z#b-dG|U}4KWKV za5Q6xP?>3xI_!0gy6l?rs~fXNmUNaIUF1&*65LSdij`IxM};4C#)xLXIwJGy!R_*c zS+wZrVz1=W+kOhIdL(qZ$~zref8h?k#zkb|a_(h~?wn{bP|(~7ttUjRyZY06Lhd-*Af|I2heQdISDr* zQ!$SO8?Y--t!VTiEC@1K(&jNr(z1E2ynexw26VtnV)(XcX>kNcpSpoEodUh&cLOew zuUI`pfYSH252G$DKo=t#LReV=3d#}-9m*L|ytTszUq8dMEKv^KMDDwX2$?bOxdvz? zUJy#^O+;@H>F!CAQ30@%%`7Yy)^R~YSE)hgp%ywr>$0%c%IvfPHbqQ z4D9AQ(OI#BJUMZ0bGXsZ7!bx|I9*Y@r%v4#X{_h!xp!5$m+0-j+^LqQ0Pe|gjYW$y zBIpYXa02@}h}t_9AQ{i7!sUSWQQ}47?c!okoEYofmfJStfxF`7V|`f_4zGK?oy1(; zO+!J0*)WMr4qnE}xJrj(B?>)t^0j4>lgVx_dp?W4+UlttBxTgd`+zA>5p4szG#(;2 zF0Zo`gj#fhf>q^p<6|uHS2@Be`X1O@ncN)}#=;v2dA5^uFqDFQdiLa>z6Sy)?7ZvT(iQ++CvHWIcqdq&u~mdDpQ} zv@;83s2)ZHStg1YCX3Iegk+XkWxI+d20w8}{ldKzA7Av_z!?VsV;F##aZqA##1YIw zVa+hEF7$cK%2b)sBLOeZdBnG+672zwY{MaR4+un8y_CR?=Rk8y(NT&D|;g9fJr29xTeiAIS&`< zV)rq-GL*fMywRc2%bv!9zLa;YDH`yeEP{{h9lBBjH6~P@Y!S86j?h{p$NGKw!G8w&vQM1lp_TQq;b4m<(D+A}z%MA3k3??vL7Tx~08zIr!3 zGeJ)_wdNhF=HR3ACZc6Y2N_g7zirWB_oOUUd$y0BuNq@42CK?U4v!P^3r?QIT!@8j z-j*VWMhD?_lhyX!0yb3p`qBU#}8! z0VU1!AduRAX-lL|58q&T;pKbRMj_Af3A?z7=W6@5UiKrF)@9*Ay`&RtM%Bv_oFgL_ zKN^K+2F(y|r_4;lZh0I%(k}vgU3`;LVX`z;@-ZWaO1u~LvWdmOW1j>nl=bX1(@`u! z1_q(IoR$&sF4W_A@p}4BQLqH+nT=u=blsmaEc1lEcH;0x2H)MQLl;4rbPe} zrJTf>U>|Z5Q8fTm^9tizopI`jcm5unv<}vCwZ(xaCM4@$z$?ntF^-}09_*ZDL4ie? zCa2X=A*4mp-TDwM9i*3%gGkhk@$#v!X&@=56suq^?6d%%W`mk7^7ELWz^4jpqNIwV zL^CX2*15cOILT)@1!kBIQB%wt=x3vjhPb)5<7_O6YxnXF;vp(kU2T~33(i&N*PFw_ z39u{}ixk8j;x`i~3HqDOAo^V|t6iCfZ&}ez(g#v;DitfRn0<6P02vh{|Yh@zbD9?2= zd#$x0PC@XlcC~ZoC_*4clP1OB5p*geY`8oz$;WiDk!!kSiv{gLMIDdEA%`k3eU!#Q z?q)G`8V@ywDn{_gFIR=t-O#B5DZH%cX4$Cj#`QVxVwo40sPMh%=eNuNy(jRVAC5(_ zd8;{YC*gy>m#z~?+z(ASJd)^uNX7e7$=~adBS|1Ec#c52^29a%>F0%3zjDIFACqwj@o`(jGlF4Wj0 zB1CE|?F2cHls|th(z>$)Rk8FIW(k*pN-Kno#Q{JOGcw)28^Sn!M zRqlNIwI|?KPBz5VhQi8-oC&#kDtNtB+jItjHxi{vQcPlS#%fqhYQ!2LC1OIsLr}UJ zZ-8D%wmb?JyZ|ZWvAc6kX23IF?Y8pgpp9`4^0ZCN?L>*F;!f?I=tET5@gtbThib=j z$t{mf$Ck#<^mtQe%)sdONV8wIoysOPAL8?{;RD!TMbfiSsj&K?;F!h`C)3QiUkFM9 z@4FSd;_b{OJq5qignfXjCy7M#8X>1d9p2-&@qEj@{l#8f;I$=G%y~r4n||b|+Mf5` ztc$|VhHpQ^B#P%G#E&k;47C}=%X+SG#!kSkhh=l64wL)k!fTA~5{GRhnj7I6LeS)u zR5lSW;z5}Qy>y)*YVSM(0p7ME85~)ZHO;CG#)Fg&_70L5-k6#!n&4?%WOy3J5yG5! zKh;AtlW3CNOEJSX-n;E=2}4}0kx-jM#|ND+_$Gm*@;xqcvBaJ20-#FK>E6Bo<`oO% zH_cWH4~!@T7zTkdqZ|g}c)x9;BaTMlaw?-Kb`zE7kVlM4u#Nz{QI9M7NOc8A<6B zs`JqU$nc6pjFlm~GvK@{MPDQXU$fPfdyFqqk`W)?;OKkWWR!mW3>*#^+wd{gu4aiq z7bijLOlkT8MI04!wLNIz^1{Yux8mTM?wgnLBy>^y_GrYmT&cQLF75h%X!7v`i+V zRx6yk&DqowCcw#w9%p2=CR;tonv= z#4RIVy^%+8KF2MxuJ>;Hfw_+_z?_D5H*1RW(KC!kOPDOe_)y0C;q1Ib#f6iQJ+||! zpVL@k^6c{IAjn}3tU?Fc0Zz30B>=(W=uR)dG0VH9*U2R&S%=GHq(z3eW_NJwn4Sa; zn(b2>kx7eqAlERkEoUz>AX>skz9GfNaj_F&f z;RuS=_V%}JrnTXoF64z*PoYl6sz-(IBC&6AYvl9@L-Ka z8PqK=&f9d}K-gMp>f5R)akrg&eP9aYEmb2GQ;!gzVa~fn!r_fNW}-M=+&)RTIM_-a z=`ABS&m!t~uqCM^k6yOw87Lm7<7N9}He6^4k#`8rCbtwY{8kf0a^EiNl^PSBBd2$ML~&oj#oq0aH|S$U1wn=+Bqfzg_eQDMd^yrLPS5cGN+2pS4wEnd08I1lgd_5OKo-RK{B2OEte@pKr*;$Zin`67r|bfb=Q@@bYU0NXC97t zJ667s{BBVClB@7R#J$c6vB2{JIRP>?+H9H^cC*-H3wk*M+KGEjloMU`geSaKET9+& z+wxZTNQUSMf-29pzIlS;5z*1pE%@5oHXfR!n&ktM=pu*ge12`|5mu@%DYhnju%Dbw z<7C#T8Pv@y=fj4FylRKMb{Xl8O;`;pO?e}Sg}lc*7dg-AY-|BM9`Bh_F`CnKZj>^! zXXV-($=B@w!NnKhuPbO2`-#YkHw>|nu7kaP(T^Rjs4~vgqGDaq`w}<^jj`UYQIe^s zgZg{Cir~?kWY!1sc=Nplf{@zx2xwbl=piHdOpd+-z`5rTm1(js#R%&0dRyvS&`HKf zcJyVh2jc~~w8S7FucOyAo27+zHc#mpDI9t*96SId=~d%eW1gl2Bc(*vu2&}^C9%a3 z{9=O7@yV?HN^=8Y%v)7qHQWe|<^g;{ZR}V)4!){yW~%Pd6KC$tH}&XM^){MlHM2wW z?LZ?h^1M&xMZcII>Y}mt;Ju3td?63X!JofwKIjbNb=y{FfDzizdY2l`+2Rv95YJ~; zGVB5PIC^s^*u*T6^O~X`mgkN?qpK0x5f-sLCW6a`m`i7VfT$4_HpcJ#s4(}QJliUk z>5j!WSNxpGcQGJ&Z|Ze!*}ZrV(dzVNP1}mq7NcR?Vh$-}fw(zn^Wd}6hgt$z9o-9a z_p)=6t*TsW&1@B|YQ_AH1;d%Ap29+gwi+M>tZ7xqkRF@^we2{KzagM?9Qunmibo^7CyIsqA1Ht*mX;kxXq``VtZ7PnhxaF^yh zA}lFDkD+^!!cKQtosH*h3rUfM78O>StJ}i0k(A`qem)BYMEsKMB zaaLUN=F?)riR8wf_$60Hji=u%>y8xkZPax&LCs`6bEHe=g2aqS zJ%m13t%--Fc}tW>;q32W>1>NvLFhm-(Ty<8p~+$`is6+|uIkaaCY?HQdS7N&Hm7k=4)A-sg~Gyleh;Z$ zQHq%f?Axn(2J7jajp8JZVR)6Vl51d0*U%^XoZp-GQew#MygP^`omb9yrUD(gzocbM6hOw5|Ct_d&sK>C1~=x_ZgrQHD4|s z_+~uwxLiz6MH9Jhv=eJ@-^WbgNf8l}uiuW6s)-KTTp6s|kttznUhDmse3CF^74y5zCO50GzJ@ds=!v9K0El{Bw9*0 zRS$1Cy!nN>P17E2EBA1V=yX{7daYCr+{!I8EGtc-USV6IXV{5Pa>-`ox&h={8hj*M zvoVzucA0~3%60tsxwz-xhU#+KWDQhuz5tur8Y+?qLoau+2h}*nfeA~Oto~E(ZLO>b zF7)0Io{OlDrdhe5>@fxSQz)^{u_ypwBc4GtAg}7poD$?=8Pn@EyR1v)My$xD>hcF? z))v>LmWHDmbv8%$%1$mWFEhlj@hIVW&}y}qYJ`Kzi#))_bcWGFb#-kyaO0|cJIhh1 zXc$3OvX7PzBb-L(Q3A_zvjcAl+Um2+NAJv2b=R?1C~RLjCg7g(b+GW&PNOuwd~urX zRgutJ$mvFX4FiL@F{RmN3FC*3`Xg!P3(>ZsZju@9yk`qKs$yIfNhIr)Le&B0b@Y+xN zwe%v+!H7OFhUc6_7r@p+=PimE&VGQvs5V{;$<=ICp327PP}Tc(H4xd#cv|e{M472b7(IwDafSf;cb~;Coktb+^v$!LvPmppoxmoneM&$e_heN>Z*>xJ1gYW>r>(VIvX7z=>=b9%? zMGrRisg-$ip-~}I+a5n4YFFJbIYT5jjOl7ped9r`&ExK?+G-PPYI#ERu;NX|j(JFN z0m{TUE4_XI?uHfeoVLyVN}PzAXWSoyaBHv;5&Lo4K$`qC-x3s*vC<$=WNt-XmegP zxH5eknFMmm4<4v(VfALbIhky(lcWVGa|}5LpCi6DUD7VAeK`H(z?pk?P}HiD!o~Zp z!>%X*;XN$C(R^1ACAzV~-jK-T&#=b{$)MO-Q?c{xh_NoKUK7kC<97_CC6V0S&n4*r zp1+2I#DIdM0n9Ex7)C@~grnm{j%Pt85|KjF;89O*M(A-tUs>sRZjI>c#7mXe(m*&j zdJ+VyhhCRAwOt2U0Oa`I+YzY9h!Bp(_Vd;^rligOh&C*6Yobiau&AjF=nYIJb6RuB(E&ZDJpi;4ZU*l$AiH`XR-1*BSSYZ<7+kYk zZl|)jrzV`^3$V@gE_{bpxm@xESUq{8uE>Itlr)HtxQ{r<^_eki%X>l2_4;kq`8eZn zbO1OReb%#d2(hgbH?M`sR;!8C)3EQf#n|);(*@=pu(C9)@`XkUqasrHVqsj! zrjp0ArkoqmcljXv`7Rq=vLie+G#kYo=cu?%VMb^-D{N^ACCQbCyiTvHao&tYHI{Pt z5}ggoBi|AkaC(^-2SHTJqbDroNhG?9jJq}HH>k`{J(p(|ft}V(eA!j5(OJ3#P~^mT z(JWsTS$I64jF_r`OUq5 zai5D4_b%U43;@d2>R#`v85{DIoft)VEFsMu$FCVEqw3+kdVpOwocwyZiF>5OIl7f3 zwliQUEf(;uvcW20k?oR6O%q4e#bDuuKDUzUk%;wG&D_EA#K4wh6Sh-)_JHtGXLGtl zp1z^?nwS}m+TK(N##zUuq=(Yp$SDamFxX+h++L1>-D?A|(YSkB&K@bHWi4WBF~cX& zpn`Z3aiZ;^^`rx>^i@7-y>kevJR4N;HMmS;>G6Jp`m)R$`H!R_g;Cw3MJKBIKW58)cbNyrza z6QV7H2Eln)b|@bZWF#d*(-XNl5ZcN_04r#~#ZHU~DJ+nNhiXAD$g)ZIy&X-b*G7XN zN%aI=g0yDGS0XvO##%uUwN4Z+8=rF9iYsJbpxQb!xQ?<{*OVNxcg}7tb<$8j-l$m= zneOO|m?AQl@smfE$5v&6%P)It~kP0yC8ncDq835hTG1~Eoyd!&WDEKa*m16he@}OQ!M>lV3 z2@F4f8n-Q=RgEt5Y$jTc%4nfp@Mf6? zDw5u)Z`!DA#T$u};+AZNvwrn#?Vbi71rs-Mp0^+DN%X|j;gByZ!dXP>nRgBjOU2Y1 zn_W%=iL8x4CqoW@6&=GocB1?wbokz1Pqr$7AYgQ?bVy2C3Y-a>iQL0&39hYo^j%B1L*H zOQQ4X4(X85ZogJj(U1ATqfR^9H zRMkA;eLc_p2zRz4dFx@cscz+4To;vs?!Bks60{iL$VLF1x6g09&{hXi@^*xFnIk~L zd(jqRc2q(opTj;D>Rug?cSMKlJ;Z6+#;2(v0{yi3c|hCJ3l%S$)Jao!jd>IL-m5hx z%Ce@C2`K6p_C{5U_eCVN93YG8zRJMJ!B9?QpQL%hriEq^<+LfBrJYcj5I$g!!Le{jd-&Z|Zg%X0#@F9nLCaIx+UZ zb6ctkzI^+Ca)$MLUz#|xr}GruOVs{H590u zFx28y5=Hge9_n%TD}9*gSIiujnwO~grl^_;Y47AktM#F?Iyk935sx-R9D8-w%MXj2 zfrHlcO^}5sWajfNnnq4S8l!g+oOr9EcRYhEg1x-fN?4RB3Y-W4SmfsEVM(6g5Ymri z5t4N13e%)+l$!(|M3qP~AHAw<7!lOOR=0V@xJOrY(JeY`FaAP*GkDT&=eUSMo!*w% z!HpxUAG7J|1v~+M=Uf>=fRg9F>hvCAEYHJnh>XIQ25(n4qUpin!m}~6o7Up$edl;b zYl%`ZIj+Z!I+$~C(vMen>9$FqJZ9c&MysqvZI+sau2-O0gK>MTdRl8LRHyfQup=5wdX3Oo(8B zVSB3KW?rS0o(Jb6V~J?fItXP4ynJ2?=o-Tfyc^e=Z7W=yIxa+kbaL+~t+i{L;PP-$ z`MFO#Y9R&F$6`Y!B;;NTu|1cG=|L5d1Cw!$U2hZ$%Vj#`^+r82#9Xa%O1#iMVha?h ze)Y&5-r(km?!o~Py*gC8dT(_N1CB9~a8{z?HY}k1s+tPREN7x0>X@V*PfZC;($2)t z6K%~jOYH5E_aLjEQyP1F7+sC|Lz$uEoVoX8&__B<*p%_KB&)gQP?c0h<8hn0MM`n! z3|V^G#W`Ab;R8$eFc^xNpcpB5X*g1@FC(m-2e8DaQeJeQYP?bab|fP!5*{LUs1ayc zutTLOiETt$oW65>)ueg`{KN<-2aH}ldo{;Vxci2ec!FtY{X&-UjzcSsPo5}XI6T!$ zi&7;4>pb_&UbG$M@YR`8X^Ys@@V4^gsAC7hz8?^wMAH1&v8?eEeWWk+tu5q@WBU;J z4R)A14*PM;06Z3*RbZ2H27^6zwtnxGv6vKI`%t25D6>nxa(#T8`6MJWG3i)B(nd>B zp6Hw1hgI3kuRY(}9UYkv&2UKdr`jZBB?7x4Kmv26fty_;Bbtj`VW8@@XxtuibesHg zg$^Gw>5F#f8>M{euc4m>9UO5MKJ@A$Tf$>|c3Dbow)wpJSCV5qn=IwVcUx;HR3}xI zC24;0SchM?F?Q)0m6eCXk$S1uDP_$SidhQr^IPPSKo0K?IWQ!0R%mM(=`BvHS#e)G z!v2eWd#gsvT4X#A!Xxge9bNM{g&!o>$ zn_j}qC1FlSnD()yhS&yjWfstK)3if044vAoNo>%ViS&lbCKm(CC>kv$v~d)3xP@Fl zlT7tsL{F?GCroqmAvp=t_j*^vwBDdBy-pidHi(=g*w>9t^8mOIsHRUuHBX3I1Q9rH zyU$@bwF$?%bFheKW@Zv53!3g27DNZqQ1ao-6ScVE{G>Uh2qwirm)bo*+M4 zbV+yLlw2c>3M5>9zWush7`|5~??EA>=Yh*wF=wmq22q{BoWqgG7rS9d z{*IEMPoG&U`^LCP#Pzb1hLztIp)-2n9)v|aSvdcdfabY?hKM{$sF?I@TvJ9Rn$9@d zhsu=6IAz!N9=QaR21^)Ygl7eeg;KnTsPrU6oFgF7p<97$EvUYcol$I35TxxVw$3b} zdx!AJKEWsZsmaGTEs;1x?>Xj59*2){83}El#6FMZzz1!cyGpuN6CSpA69j0--Lt9% z%)m-LJ2y{g9ObPhw~r+_(ivd4Ju5-;CJ{X_L?nPMC7WuB=u8WmOYwUWPblK@Jg{52;}$BTRe51_ZecD5u0K+M>b*>OMdN@%jvGRuUil`Ohx&9uEqyxYcrh>x1= zTHoP@MzJwJC!)65_e|;JnKg#ZY~*DGQUKhMJbQcn@(s{i2z&wqLmJg}r@Z%MX>qY1 z03r8N!+6x)Io57?d)%Q>kNb4Jz#W^;47gb25Pe_Gvox;tc*5iDbA8Hj3C{p?(aJ#5 zRoZ%E$8q`U8E+{+3Vx!y!(LA;9!hmR>eaS;Ks!=B>PP4g5%%m(A@{PiuAGFbRU3|h zbo$xW@`U;8G?sY58x-Z~@5Z0`#6nnJ7o?%SLtPPz^%G(B@a7UiQtnIut#{pXtamw$ zL1IFCY?BY}36JWTzRYD!sH^94SBvtLQ8ZSTi0&DmFX8Un^I=kb$<4$LZ!J`h4iYzw zUV?T|9=&+&muiUPA;?WoiU!lk>^EfJ@cS416PJnHTN`DwB3RmBDh!eD#dA>td?o7( ziwmiYK4&jtAT`bK%CTQ()VwNR3c-SE3lfr~;H^RMr|u4VD{F^~cEkIUaRAz5{rWhc zzRsP(>=N;PBT9!4sNVRia@QTQGKKdn93M^=NHL*3$IVQuMA>$NDrtNrs58p2xh+XH zAUD?u_oQCRLqb*uEp8x_>VeFBMSymXHB6>N^`dz2+3_xdGB+hns?KhwJf@|m9MZ3q z9E7}X9&JD#_e52I-(x^3q2g-MVej_TlBg^|4)@I(q{W29rbUCbKk1j+?tDH0?`Yim zDkB|Qa5%xzB$kJ5e)$Qy3u2dQ1paDs@8|Dw!{5rad0G<&`LSfWJ06 zQ;r@9w_}bARqs{ZAr#~WSv!am8t0pRu%S=hu+IU3Gs=|UrVJ9Kq0@A*c(Wmrk-Y2# z8KLV&FMFNXFQxj9=anX7vw)Mnu!(w}E$#aN+mgX-r>8Vo>xBi9A3-5FoSFJg>K^+xu+z_Bg)q8jsK)NYDM)2;MV*~J}g5D#MdmM?8xs$Pp%lDdPGg|l`?5IanYy$r=JT#lqrz2e=b6QL)O4U|Xi1xU)_+j_>&{b5s&8{T+6P2MDO z3o$67)>mp(3fs7ha;(Cq`8Fl{jOQ3{A!c9r6TI17j_3)W@~B6U;=4-cqOk~*-0Bsj zs014$n2+Cp_7*#L%A>sWJ7i4W@y#Z3gE2Z;1Q!G|$Dw7Sdb^!K?2TGnu{OW>%Iu8m&oFJTFG>B`yekpm!V~jB2+IP>!h65lS56MeOWpS$7bW>su1X) zukIbXli+t<%QtEucYmA||);K!mA5BTC- zpSU4yT<+_9f6o2emJkWNO)x9F>Q5bq3C#@UTtsP30L{7K zR&q0m(IJ1~F1SOKm*!PmFB#kP3_#CZ+-+#!0h90O_StOL64XHy<|C|7MZFk2-tK^E zx*hBXjb54bSmeP2<3q?wsVND)S0&YWnI7ViL75>_7#=8%xBZx4G&yB092}5l4U4hc z(kv^~2}^2lohU(9!x3<8c@|gJx-TGJ1T~ZrW27o9X7lYr;Xt!JVbn$!0}UgBoT^Sy z@@Qg{Ma$Mpj}6Vt^pQj3(TRLJ#i_N+rJw_49v4iB=OdMA;(2;HSG5xAdZ+25Bo8Pa z_T%;m@T!6q(#Yu$L%9Hwc+LsuURs%WlPNV~C$~knP@#?lnLwZyh(m}?W7f?oRl4Xe z8pyCul_HGH?BEM82YH? zo-yn$OJ=7~jK71r*Q27(+1Yc%tXJX9!!l@w6qyo3S8qHi%gWd1^15J0ooY6e`+?xv zIu5gNfX-s5a^Pa;8Q6m-r2Y`AvFAyEiWWY?@lkgfl!iKIsA6ci3V zoy=?_M1KC3<#jLsHNTT_+wf4b^XQXO5S!I;9ZIL2aDpd-acmCOtF1WG6R2J{Glyx$tV8IU--*nOqjb}XEr>yC2Bo~*N^i}_K*Uj5x;kqo|Zz|$w`T1xP2`n zW3`eE<1#@AAx~hgRU&MdUlX=N@It=Z8w$nh3eFvP6qx%Ex+hJ>vzM!FJl5u|45~m) z%BXsI3DmW(f-gEB&&x=voaQ?@u!V+d&p28rO41u0ZxI0S%gi#lG0dZ6;@Xb1o?qf} zxP9YcAc~5n8```QdZ~Wr08HW)5wP==9ylvHaOK;Vwn5`Zce&DZl_HgrugMyc&alQ1 zhi=g#nsVx45db`4$BkWS&N<48hD(8>7bbmw@~<*aZ7H)21G3) zMgjMhwx^*8Y{;vH?0xp=VJMGcd8C-+iBqbMRUtEOzKD@XYjGJ<;`c`Rm|NaVl;8S2 zu_T>VUEo5gxEr{(#^N2Yskht?`Gib?W>6~&&v#tX2|@J2ZYhu$@vKj4D!?GZI(eiT zwV+5*;_O~uwviAqHdyF;Jx*@jj92fhOVQ=JcX1!9ZGgE~Z1C%N+SpErvLz*ndGA^a z5I7_@d||8Lj4pvL#aNz02&Fb{L6&pxK2gPc+y}F~8ZMQ4mnSU4o^zmb_$tTPaDjNHNN`SELAK z2QLpE3&YEkt!yx*!5s*-TzE9~VD8%SQ7|2glD4ft)j7;umIK0DrdJ1$EhZSu9<@M9 zTsx@EgEu_@hy$&Bd0M5nMYwRF@(yJDN~6yMAKTnPzQCtdGCTwb*!6Y;paeYe{bD%- zcfz+<-P)kBnEakGjUZAamWX!sl&7ufG{2JkS4CY z*i(;(%QE-^lZ=b#{Ph+XT~uc^6rrk1-I&J-&Zp8PPMJsPJ0ni+lY{%J;l$O&(9Ray3Ei6{GbjYzLjbiCuf~=f@u^ z9jRxNtdt-Stix>2WRqt*9>n&8D{Z?}KegPl(#0T5hp@_OF}z+&rR z#IqNI$)M>cHanBvQ85J1*W9{=OQ~zZdYIUD;=Ng{QNL6J6gR*F_trI+^_g{p2U-rZ zokLU2klu7a`hX)SC^6oP<1~425Xn>H6M0zJf|sYk2^;!cqDT|5*8}pc zG(1PCZ0c@)neCv#SbGW3t<`IECvghxQUakhwlQ1^LoY#KUh#V`NejBPG*;=}TenrY zMp0TBe9p%&#?UetmXZQJEP3l)Fut(OyFT8w=hMn>jf}8Fl}^KJMu2uOohFE@Vay?c z<7`5uN?Zm`jT7`1-a(<~ik9W3fjn|BTFz%&R$(aaRAa9nZ-k0&gS}FDO$wgCU0+>?=|Jfy`7 z$1r#S$D$!G{cg^vF2VP9D;M@F*%QtI@=F|vru#ovN0zJ+J#hp`nPhzQfA2mjuI}7|G1K63eg_=11%1{4SZz+& zMUHsxjsc+0vh7jqX5rz&qnRcQE@CtnWk68aJiy4|iOE-OKs5%nauX9B<$4Uv*~};k z>u$M(`}*y@1KH2Fvt#Vuw{wgZ9ic4m-K@D*)Apm5TwPqU|5KU9GmM&qTu5h=IffM!M%a4 z-ROxx0KD$f3|bDbc%_HR_PYigjf-9mAZT3QS1hlmF`dK5TM){J@DhMtfct_<8B+_A zn5iDdqSXs~O0yj-?*txs%Ock~zza>tmzW_|6kXVU2@*B?q9SBi0-!{f+I#4Dy|PX8 z3CupHc!RghPuXLl=()mT&XXlZ93X_5!G7ZrtWSgpv4F$OK7#Wz(|=-TMhHBz35-mc@Spy7F>5oV-UmPgtzY|a(j zz;k3&hyhR^i%HTU7^LjkkLL|+L+9fBZUO$;0&Xs+JBFfi=;gxm)>i3B%smq$!g4p- zd1G?tb2bS|Hl(^lypGiGC(T)pLv}=K0A{DXn5gTU{+A)Wen-WY^7Kcw=2)IWlPmzJNAM5LCrvmx7!|fI(>-3EB&|WwxU@6Er%j{ZZ2X8S# zJ&mfG6@X#!i}QT(-n{CZ>UP-#$=>ToOS2_i@^zX*v2E<4j%YHv_X>;DMc(TP_o*pe z7pQq{xF8;hD%bQhRSFNMOd#J);|ogDHA7~NVq1doV9m2p--|*;2sA%cdIY18FwSqU zTMAdSPscC4lw+pJS3LY>Wm{1%U}VCA)pHxKWbJvE$5ui#rW%Ho3a_2jxe9{!`8aKL zA$Tu5dHehllE*0ctgP~k<=(>6ZJ>vz`HO|zSueH)ExSb(^Ky=gp@mR^Lv7~b4Rm>~ z+v?$i3#AfY;fQd9KeXb1`K3M-_ob%^l*{v9VF>%u?k zV6>Sj27hoKi_$I|kFSj-bKEMQOrw^iQ+HU9a&UFyZSfBOTo&}~CK>{t_t^yOa|s;o z`kXLs9)vLAIq8WmAH+UU3N0n?*Q@cM<7iF%2GgOP>>Zur@?@?`^C7~#wN40?Y#ZWz z+%V0Rzsf?LRBgO^;&M-TO!##PQ(~sD8qYHUv!i(m>Bqx#ZwepwE6cJ6N@|q|rV_54 z0qq?StdRDUaPVYe-c3ktVHCY7?9zsTs|=i{9w-#$HBS0E6DvzJIq!0ZIh5ugbbO?6 z%USjwqIyA=*9r)O{+ikf!l4+gf{N;oC29?5@EN(^x;9m zx3O7yjEAUb7y~L}!DRyHT>B^=JuV)3$_gVwpzbMYk!Z=49&XpASZ9;zC2V!l*!#3Z z(;kUU)nR3o@)1W(Z~bn!-TfJ4=$jiYP0Cs7y+Q6rLS4_qh2Wr7j)hpA%!ngcUoPfj zbO7|1sj!zIh@RvGXv}AoP>?UkXRLwr-H>uwj}4A|c$S7!T()(~i>FVJ}v&^lUez z@go{LBUESPcwppS%)TuA9-9T@Ev<7gwD-9>og*; zaBEI8khH8d;<8JG4v2d}!W;((qqqc?Ga-ZlhS|GQQ8B&qC}WYHD2{^Du;yMa*e=%K zBU9P8g0EEtu%25M?Ls7QuD&I4a6!x4d<%l8(KmE=-s0j*6H3p+I@SeEvL#(#a1Z38 zgj7mD8SU;mjyIY7&ipA=bu#3fy?1W7&;SuWQ8(%iDyphIR?D~HL9IAF(nBw0nap6uTKL%p{CgdoSxP244=hzMj=`! z-l>G0qUbu@40%UCH1#K_JkKU=5^;hTB4^B?)KeE6dx$!r0l03hhy2y?OL3nJJb@^( zRB>)LPENYF3x_AlY|nxiQnK*M?52~W9>{@Jy4LSlSFu-oO(gB=EC-~U!UU!jm{3hU zgHIQUS|w^pV!~IPagLi#;XN20@7c9FOh{rD;@xibJIXz&1JLMLw?u4R!I;HlP)daM z=g*$|*`BzB=X2KM7>-b z4#?}i%p}KGN+80`(GU9wJe2zqJ>QlX@$gnX+h;GlHwduvy_VAqhB#8nYuP#ogIZAZ zZClYsX?#ToWRN8p^7L}HoVRG=afQbd9=7YjsVjz?Pw*MW<;Z@?B>{jd1E#bR|tQf=9h^4u?C4*3R?_#6~%8s&}gr}gU;}nOoy+eCfhUXvenVJZBd?uHZhPzL3SGkiVxoMnQy;g+w}5ziS<&QDGYA-MMFU7 zdcmU=!Kt}{J=aN)XUA)F=1;vIM(LD2$-7MleedXeD~7bauODGsKi0F)Mi8HiVYYHY z&4jCxQ47_hb!xN*%cKBGK()WoN4BBY<8R-rz4z4P-mXOiC)TlmQ}YD` z^kY1;%PyQ`O{pmAYL3l0bFChCyl0ppmuPeLPC*4DH{s4=BoUdc!guZPr8Iy(TXJE1 ztbrimFOcvyBWtM#tE=kB+PL$cnlDIQVF!nYZ33R?UGn7Bx-u)Gy0oI@Bha=qc)FbJ zs1|V=Z}Qd-;~u5kS8D%*>&gWc@a}Uu4WsPSKu>##| zaPh(>5~&~Hqiyg@W^S5#{SOD9~?w4P4*c)SGeHk2OH zW~3_ao`xwMEPUNycy;aX5V?)!m2g+ziV;8bAi=Z-1+UIThA859q(Xht&Cj)uMQ|uY z?ios8Ljrf*B-zsx8=g{p%N zbtwhBt2}O`Y|cbEK!C^KAcnOY>-rkIJh}kORryWfU}_U_JWh)V4MpvK7md%U9Jl+Q<9zwXaFJ}p zv8(EZIn{~>P>>ZgOb~HA5;+HH={C>M+;!do*UWh5S7+oj=938(jiiKH6BL`Ayz87?9hSx(wLQ<2CTBKrM*s& z+bYt><4R*im`mBWFQwkJ?jRTz+*{eNR^aftBKE$+Q@fSd5bmK9`1EQ9+pj${N8S0Mlf;5*rBN=Ga3%6AOQCBYYPg_ z?pE(DQI&a0=qx<15;bx`JpJK&-e|^8;hp0y+N)~_GFHB*G4k0BZq3cI%+=kOO1A!Y zGk_V>6=;0Lhy%P6;|S&lD;3u=7eeo>DR+y}J0JN18yLFKwQSRcHsO2CnbztL^ll8& zoH*cu9BF~`J$@Ae)W~Iy$qtYnTH6%su2MCqOe5_B z%$Vxc6-%)5f$$<{-rf!uoi|?SF@Rc+o3j)%9|qQ2ZYt)}>fbf3LcA0e7J6`fIT%S@ zNSt*-tRBawDo5))2uZ(t>13qRA%`Y)*kDcVAd)G)uynnAjFQeHboUYxW!ief?0G@a zH1dNQ(JI3%k6i>WsVM_(3yhI%wPv@8_PNxd_g+g7I|AQ($BjxUD4H)-HQ4v1PU0Jw z?`H71EZ*z^3&XyPig*$5&Kt+ON`|Mfggspbc;T5Kr}$IgHMe^dL3M8Z*UHpk6DU=Pqt=%)1qL+*}E~crIrkuW`<3z(DmclYZ%qH6CnGTDY!-E%1 zFeEX%XoySQegpVWYsO(eI#+KEs zH$w^Mnn`bG-hc-lhB2E4S#Z6%Bn?H(2|J^*5Ctl}DGa}N^R^g^Q9zkQ_pz*xw_uCN zv?pNzXA~Ufz}{t#y{;GV7(nk~?Rz}u zt&?C`-nuIErnBOyt(LJVz?#b@)I4@bu(2F`z<|+k02fe{Dj*RB#i%Nhi-qHtUA@l- zCQDL(nPm$oI9qSU;tZ#-Hs96j!~ntRQz+iT(`$*jYN^4Z9j$1iv6AWc)(4-cR;vNN zZbRhF(KjU9or9q%P2#ik#%aBaD%Iq5R7-dYUl4W%u?Zy0lUKtm@lzycNU#)`#qxFD zn^eb!tK+wP&yjO;z%`AM%`K2`z^jccH9AFEvkjtRVNOc4{myBp_BB01PaJtpx%0eU zo$%JB0!lyk_6-`b?h3ngG+Z3wr|=L*cH{|53FF!kcXvih3BheA3EPe?x<`U+B{ulO z@ybA@dd`95R;*P$8#RRCyOVMQu95zPpW&;a;BkJ2hw*6N)-%`#gm6<$F7L>m(Hw-F zVLra(DpEH3q6%qteS8OLq{cH#Lxi+Sk%^S(oy2sM*J?+)GraPN; zckZD#9Z0taXC~SZ^PV(%+{<}MPs6e%T`(QpF`>OXufrjW+JHp4sTSpyn=1}su87zN z2D)g;-ELUI6Nj@WJuH}$?phq~f()ZW`Y4lWTAbCmOERj(R4PViPRzps2jzSwNHUS? zF0XnOoUAyY%I4lo>QN%U_PWrl4tV2lB+>$xM#+)WqZ}NP927*ok2oE27183>dEqVx z;JKrDm!_i4v%L}S1`LDbWE@PNH`B|yQf(45~p!fLsT#*S+unGgVaQwgngf*)ry8gRt&NAwqRDl;vn&37AXk zbH`c~4Gw`ISFPU2eemiTMj2CHB`p#YlsdLYE{|Ssjhnm$Yr}j z#;4}p{G4Ph+L1IaVz8We0^fOdB~R7~GbTmTnnp2KZG>h=Ygo4U!%LB;#M(;+!#K7sssT?YnQ`4kPSN={jObI5H)=ShtXem$Gs>99JZddJ=;xv*M#1oJ9~x! zNkW07`d-?--DN^HAJyKcc5{=OkqX)QiriaSspgJg>df=aP^`VdeCzjaoH(DN8qqG4 zTgJY4ogOH$>8LTC2!6hFTTeQk(e|rAq?N}zXYakRGI0Sx5=&7oXKt`5nLbg5w-TGB zCu$NS&}?Qg69CT9!PAr3m?sGA5MCFOP_dKr=0FJYO^s_pN%Ss(6YWjKRO*mXT90&x zaQHq3P?8s)4S_xv$bd$3_K2K1hTT=8pZGowB89FE`~h5z`)*U43$~G>SdN&o7vfOp zTIum(?RsW;kLY@_>*91}Gzfz=UC5-5A2#Hf@6wJ_zO>keE-;7}x~r5q<$RvJ>uKGc zuW{FTA5cPUIuHTpZ67d%t=ho(dd83nB56ZPcs>KiWg1j3$n`V~!4_hYSY86*l_zBu zYEhH3qIxT004}cRtE9yVm**kXy#a^8b1L zh=+naG7V+PgxVSv?ebJXqITY<^v?4{T^q*hXg5e_M% zszdAE9SJ*9vopPW{+7r1u4HE{=W*-8C|9i=3%Mu9$!ugJ-$ScmS6V!hea{mfguQ5= z88UxSp7jd2;;L7rH?vq_vYZbD7P8}>=}o&!DEREde|Ur z))Ogns7fc-?k@w2X~%N1R=aRlg>PdOwHSH>p7gv5OppbEE)sENNr)FMPm}guN&Ay$ z6#aJTc}u%bnKa0h7TwnV5V&~OX*qm(9FhoKuV2NQaEd`)${M}$hHeBq*ts+91xxCC z+1p&jyojyu)zo>c2;e9H66mm4C}q-#bu`_(p20HBUJ5euBTUVi92UVaD3nJvZ&@c& zP@ynI`nVST5+&zg3Ph@kFl!>E;HufUcc|Rl?9p{q(L**liu-H{?{k&>+@yjS`riH! zh*oi7IMOu0K6K~Mr6x2s_d?Pe3I-ZY6P8yA`h=VZ+@Bl+2qtByzv-a7is#QTFpwUi z!Lz8;u1zmre0zvleBR?qSST9vV~6*7FdF zHg|lOYVj81xu_~W({}6)QJ=)f)F3IgyD>|CZ3%1Q{cJQyI3VueY8YkDx8hm#a9kj( z7e^R3*tBD0joJ&Df(3EoJAX5iD$QuJ@Yrwq&ge2IB(aucn}`=*LB_u8+s>=CQv^&Wt92;&a_P zQbo$Ao7$8F;SQdFPGhH=Mm?hg8`wu;u2f5)0x!|Flmm&R}DfUREsex zWw;ddb%Z(5f;~eMmDY=5Gh1{`sL?Sxm8#dBwoVFoB&v1^=t|&c;jV~jTFrx~SudDJ zEzHTU<@(TaE+Zx+P5|E(32M^3X%GS5X!0z%STKWuDZ0AodF@#NM?*fy0nl~b1qwMdoohbydLI0snw^^Z=C{GkHX4s>Pgi| z9Mml;vi7}{G@-D&kpWVdez6lTIOpcIwIk=cZmH0qP+4v`)FZ#QsOSFLTIW=aHgQuI zycGpxRa_{s7IC)Ab^N)4#iFgzo?kvMjcp8Sr~oBGft2Pa&izoLPZ2%vIe4hNTxEGf ze5^d788y&1d)C-7LBqN(JjxtSHFdC3YyEuZSR>~(Y~>2aBdQ}q3E1bksEL;~$&sUV zUP$OS0Y*&%WLXc*t>T^w1&SiRbV{Egt0C%3!>E@9^N_+4W+N{Ellg(zJnm^kV;dFI zG6fnNofB^gN{wRS*e(?G1o~_W{JB&%wr)&`+p7%DXp>4;?FZA+*aZ#e8&7e<**QdK zWH1>{(K)a7O?I3qkPmil_tj}&L~AbiSLOeA^9d%qKN#tS^)1jvz1gQH=KBqf-Z9`nttm#+ZPFv`|^FlNqRg6Ua!o|&L+ zwa5#)9>8k_7#~)xSW8Qny7w-+g)ug2+<4|jAskU)CeO!iE0dq`fg7bL`8;$0c!*0U zD)Llvco?h+RvLH?Es1DtcH5#Q3lVi(j4rSTbg{dwcjVdAYgfDB*xp*Lb8uSEKi>-Z zdQuE9gnc#D$D?O!E>y%Ov$H8$_FNuKC(@&b14KG7c1plB2GvB;Rk-5;G`?DEQdG|b zI0D%42JpC5boI2%La|L=n!^O%!jtez8aahBqk0_ZQ~q?Qhs>`Si|C~e9!Uo#S9mE` zzO6@dDbuppEtSY>M{Xn98_wpnyMf>~*@;WHkkk3x{dJ^0jmQqqoRa1OH^bOUGbzX_ zELh1dK8u4hnJ!vel1`!qEmVqa>-(bq2$edt+EiOb#8|EBB+^$16Z`TiSE1=3fhJQEjfmqsEdlSwDye8 ztrs7kIWnTy0EZu`buiLfRM6pG%=Iy6>x(OTIhO#Or+P%;__jiWE~)?_A$ApU3XKwggU$!CKk0oOvBc-K;CA zEa;Dtk{_Bb=+tm*%(F<1)dL5|Q;0GGYw3A`%(4iBa7PufFly8oT@xpFt(~knAy32J zQ%_?{OFDX)vyRDcIfgd6cit&?5;q|Ay?mg}uzgRR-$SeQp>TLVna^--gD6nAp!vHk ze6E+p^1|@Cb32TmOw)Gk0aH}(w3f0N#U`q|c zBYa*mB$Wkk(CV>x#+Va=1L#SW&6<=nRJw36z40msTm#&QX}--IiE3;^B$ije^WNsu zC;H;bf!bbE8IE|0R*f1K`t9Q#+BZLU$VT5y;qcWX7X|^~qIydTj_G&cJvvn2=WAsf zkd%cxmW+i1NG?Q8=XEEVcB>W|Ow{WyGKLlW;QiGYXtuEwlfo>8@9_0rgI{dxA=Wyq zos?N~_mKv%K(A}Zh;52@J%BN1f2`@;nmVp~TWF={?fW1UCa3bv zOu53b4jFpe18b2|$vp(83SS00(bD%4Rm_FFoDfO)QuCg39_&7?WIiIa(D?s)6 zlUSevg9diEXEZk&bJ6c@&tqpb2RCqQn;hjug?g0mj2?R4p{k!G6ub_Ge$MnFf=^g) zH+bI;TzNK00MJ16Lj(uCD^dO-+NMX|eoj6Gx&0!=UIx76Xy`bz0|BmR@7hj(rSsDA zi1p#waz$(u(=Gw_BxK`VS!5I66Vx%)f-_v)+w;|~ecr_5TK=B<=)9(j8qEtXPtnqn z9(j2T{_M6}*zqC{Zwb|fnj9C@4?HlyOZ+)Yi6?h4Y9t(G7WLB`bS{*B%K*O57oeeZ zibTt!xu3oDM?U!08Ae4xnmHHH3f#0yvKF&>q7yk^eKrQs&%FXK>g+TUSpZe$y+`u~ zJm@K4%_yJPyG*Py0u1!NMJ-$e6r5Z)nS_SX*Ao3^K%lub-53xH(3vo=S?OiLbM%Kr zlayY`)S=5~;L6#|i=Y?$5J7N_+pLiS>trhA#cGIJohD+R#-pmrdG9<}>%@^8RL2GN zy}=ZGf&ee-am0(dVJa7ILMLm#134Us!U9v7Hcxy~(#dm`;DFURbBQKOuf3FQv#*$$ z%O#o5o(Cw{fR2DApTBr2Ztkjay2rC6KHx3T2_`zSbR2_eIrbI~>dh-T2*^GqDs=Of z!1tEZ*%t)mNzkTa>(%|$8m0-Jx8h4Ve_Hd#lHIYSUfn&QH)%8^Z?S}+I!91x(NSGg zX&6YTcEf4w5pt%%%t3~<48p_NFh^1?TnMAOJC#Xx8&n_V)j7G zm%2_}U;E>S-P|5NlL8&QHykAV=sKUKK41&DI=;Q!U9xDv^3wFdCX;<#yk3Vk?hx>< z^2q8^C28N3FS)pR28YP$HS&QZT#|@JaJeE?S#ogkjMwYK>R7Jx_lE20+`M^SP5ZUM zkdBq@ zGSfP5DK15~6zxbhwy;#smUTzCark9}vEe*vvGTT8t$w0NJHS+Tvg-Oc`O0x=4<_jD za1oc^jZS1dQS+gj0bWp0oP3ow%P1AEC4CB2ULW`@y6Q>4d9U_SzVe~6suvr^(z@<# zPDcd&)Gk<@yrO~xW)0U9dE42lcAE;5U3(SnVeb(Z=SzL^n%Y22;__19=TK=@(<_!E z-!*$Wrzeql&qu-?g^i5+B9HpouW>u)9J7q}c~F|q z;B<9L!Bpdy8(7*%4Zg@K`PmwjKR+e4qx5s$T{v>zUb5~Tp#)TTNLFCc0QStQWpK4+ zV|B3YiP6ih8gamE4i_#e(HLfLsVHtA?cGeTDmn@_Y^vw*#lwa)QDTT>=q|OImv`HCEy1jKN^x{hG zC>p&hlk#+j>zIC=JH2eQ093whVX=&^v^KR)i5E1%on_2^0$YzaGG00W85{OW*wcfE zcGua&EbdAkkCSW5r6cSfvQxzEWF;*c@M)swv_Xw~MW=bwe$#IVjTHCRAag zzG4OJY_tUGYIVFIo4wqbg45KvxqgvlPuO5x6X2XSr-K1Kbtx&JBnqx2MrZcBZjW_p^7qVfa za7A~3P`5+T(O#jxu6HjU3b6*MH;5J{0gKw!+EY8%SBWhBs-D{AYrqxJQMzh>j$N-k zqjmyp*s&qrA(z2m&BM%Oyfk^e53ePRA=&CA9L^K=(PpP7yNgl244eW>l79RiblIdW zT-l&7_Pf{5EcDH?D5Q@#FXBdPVg=d-SU)~O>@g2uHpGbQdoYX+=e|}m4KE18m^r5~ z)aQH8V4ut9r5FK0@u``3HnDFASyX&iPa-Jk+5(PG!6$l(IU#VQg3Em!FPm>pnuD1R zUj<@|xGQTYg$Sa@=A9C|A0}IrO(7HIYiMPUP_nh{7I7r#S-8tKc_NL~xti9+|Ql zlmO>MNm-F}tMCNIWUUtUj)Zht`MHq*4d5{yx@YY`>PG6x4Gi|$h>3LErD{PQ6hV!H z9{7Tys0NH)%w+}3Sl@~%x!p- z1H1SYk44LYlCoU(4ExS0ggtA!Ya-pJ;#MzA;^<9kU-0ATOM!|-KWyz`njzW*PEJdE z*}X<@c_rAdyTiqH>xeo`G0XGL-g6x`$j2mX0@Z6f?VhYBoTYrCRGY4k-OpLoP~ffX z1G(3&NJtwpAmW0_!x@Pzb3${K&O4RI;dcg~2ijpqD`Q zEo(oS<`N)~Mnt!5$L*BbSF{0%1|x4(p9LEm5J6GuPNg>t>=Tk&7G<>6O}GiK6Ham9 z%+k6t@*+wZz{>jt*q#%fESMxLmnp|Sc!Nfv;kZ}sk8GqcUwWqzQdC&XRo^tr_8Y7l zz(H{2Ba+rSh)_D8v z)FPV7O6#IStP(~|-$}n}K?-B5DtRhY92C%Z*ltTbN6i&7p1BKA?N^uT{#I$|nY%aw z2i7$L9*(y@-8Zna{yYby2`t|nUydmYg~e9HQRL+K{7WzfN|?I zZ_pqfc=U42-Ilcj)|&?bvL|+jaR1ypD5s5KrLUUV%a$zYeTsUBY>#ngw-OW0)`g;A z_8|IY!~_DxnDx6j7Xi;To<*a3;ypvvyw_f2b>y(9RPYjhEv0rTP) z1G-5Tgob!*Iitz~Qp=B7qt|>+!qPn8IbQ8n?^@h!@GdbV)teiIqIZm>Dc3KoxOim= zJznQk*0yq;yi<7m_)z`2RL#lP;+PACbi++9wj-RBy`uFRb5zL2ZYXm=naC#6WiRZ# zqGD$PT7dJQSagr5ORb=OnO-0ln?o`Ov_Ng^P0FRV4VJ~0HX#9Ws?e=Ql%3>+lDoq1 z%0l3PfXzAY%${-i&*M~!BT6_?iG2EYktox&rrmLid}JS+GZl3^-T||fdU&icFSg<@%{#*P7WGv009GdTBWZ=@!DN@u8FbsdhscU;jm zuNWT-P&`EQ7Qenrhb|7iae*hKkI&u!&^4zTiL>{TFuGE`7E#SWar0HESKPQ~2gnEa zh*>Gz1M~SieWKpZF-J+@YqGh|cKo|Rd@iJ4BAp9(G4!K0=Acr2^q$G|EeATc`PoB@ zdk?uRR^Kx%&sKjYDR?N4YmB0R3ix+Wk*@Le>nPdAO|4o@7+B+ng1%bGGDr+hF6K=y zq*qxT@`H?1v4!HzID##LHrEtK$HXPb&_g9ZH}b?+bCz6OkFGt_K)9RTG1o3J@=~R> z-!9h6*@r=SOdy<|fmptcPUcH>WW=%Yh+IWO7^(DJt z^|Z1#Z9S|J2xm^wvhn~kT3}eUd6W#{d(}O#qeD`Z6}vZ3DqCeb!Z2{#+hmpMlHA8JAVNSIHEcsD|T_;I9}oMLoenT67PX+%Zc{RY-hir^0f8 zOY*^UAKQc3`zP)-7 zhwpNvliG=!uIF(c&Zexwa(YkDTB*DTXoT@>;cYkNh%)61qTx|hW;cmYY#o2lT`Z%j zr6wH&*~G4e03|7yrV>YNPa5rA+#FCL117v3sb<-O@U@7wd`+=MO2gotkvi1qE&B9c zJ@eIf-iB=e;$gb(FrIMq7LE=(7vOk7ZxqyCAB1|+zLp6~3zevLJk=Ms8in4vJb158 zo;Y&yD7xLdsIfDFDBALf)E#yqZB;1)O(8v4+2!}9>y@yInuvN0D-jj1=5 z)C8$h@N`a+Me?BKMzbzBdlTX1dj|c4*(J?cubl5?TT=j+6kQ#W0Z^|X;d@oK*t`ND zRyVlJQ}4C)RA^+xl5?T+2_$lXdg8SJ#6(f})CSQDF-lXi%m&I@zqha09;4mU;UNJT z)G{$U7SfIhPBKWETNW75Nk6>|QMX7)k-m*sW7-svO*X518^zo`0^ZMr=_z7f(26{k zX+klJq7lT{N{%)hj*8oNC>oc745>`Nj*w!(I4iNn#diF2EJ>4D(Rxid7ydnz?%hXaA*&jf%B-y^!0t>{QPY}336 z49q}xxA>wR_4T>2!h@m}tD-ax6_hw;=Y7e|xOUD_w{A4`ift=!xntF)A+q4g{boJG zQ7Usf5$Sn_%>gHztcbx(yAAs4zV}|tu;UPP87rHkQV+6nP)sjg1!$?TjI)jh_69!j zicO$6a@?L4u`bit_QuQ1LHHfC98AH8MQZltBhyY6U%zYKXO^A9{$4-a0Cm-k*|yEi zGtLEjFUBC}F_{;bb_!$f@eIPLUr{vL!2=L7pt5os7duvBgazW*jn3PsWR)u?}T1PFTc!0%D*I8@KP(l2Y2L zc#PFBk!kls)`K*1e7G@>wE`z`x3u0q-jrN)c%e%eC3mIghU_~&T-k(c3a(fjNBsGY z2>4u5gAd%zzSQMC6$Rf^qh){4NWpIm-Ma6<4)50K-H>YFh|R8@P^(Xn2*uNyF{7Nd&9FQu_jEVG zfMR*7d{@fXeA%xN!Sgi{9lb&-Q+&Z#_>4K&u#%l^GdWRfOCS{PPUfm;O*Ed@^Nc4D z)hibODX|(?@IVq{S>Z0%eT>t{+UeTxdAU!4?Gzf-3sIdRI#?GvL&=MI{SLrmV=ImB z6!bl@*+^qPg2l`FEHkn4`I*5>))BYeU> z2Ps+w3%F9h;$jEjrnd#I1?q}gwEOV5bYXUr4wRSJXG9I;ultRX(u>iA?gR{lG zM>i2DD7eQOiv6=TES>*!wC9wHo7uN>5pqm2U zgG6;`wRs6sSlQhm!+@FT*Qyif0Uld6Q&|aenLgw1QkZX6JuO~79TzsCBf7-*-fKmn zvqRr84`)w&j|>dQWn`Z=UsGV86j!{!^CiJIT&hq=0Tz%YFi2(ZIter0d4!J`Xv4F9 zZlg-q1hr3yd|Xy)04xpd)qt*al`SLa%Vp_Oo$NIAzt~?`fIo_3`qGu-rVb<2SZVoj{ z1zCx&QE^~x3!Y|?Z4@|z)d*k_J2g2(*ycuaiQ$g!i^Mm^V~I;7-gQs>75UopE95LH zh1J7F_mDTC8BcM@sa$57_}yw-w=!1}yn5tU%?seZ4CJpgm5>Nj5;0ME1~);D35$0W zj(Izr^yvY~%jMP0N3fIh1m|XfTn2YsAq6#J4Jbh{3i=@iD^*9=o9$6mHmA3XFQ#ta zE|R34aqgS|Gd+JE3kAvZ@Mb zDDa#tvRX;@4hPPjSkc8Us@tNc&ppssoWYw}TT2e-)re%n>0u6N7Gto&2}z0av|6=R zQQv^~lpvtu874(;hqT(GM-olQ-7gq$+24lKu8L{eA*dI)KlVUPX|};BhT7aZJg~n3iM|PvW1EepK zO86>oCLH+sMj^MUfVqP(&5EQ99`&gUo1DNKfQj@ed#_6orLl6bovpE|<}s}j8;s6b z6Cy@FL5rN%cIrLET7K^Ym-NYGv%-|W>z0NY;7BU_Y!vjlxR&d6gvci@2Fnos7SVZy z(ch~I2wZYF67MTyer^Ra7_j`X3!^gC$X+G1c-96!@I_~y9;m10mC1DP$iSqJJR|C&s)Lu8!ab&u5%&=6$*v^G%K0jjE)Ki5D2DfCIb$2htH*Vt1}s2*G{%RF`pUI8q9Bs7a`3`gaX zD-4sR^jqAwMpV}GT((YB*X&g7E(n!cKZ`^iAsA~ z6jhI+(5Yn7@5ahS2%sxQ`IhxPv?hEJwl#;2Q1@&BM)XW9@R_t+I!|9d+s4o;nt83- zIPbyZZ1!rq%|p2`ps_(*Hl%E~c)SG4(lzgds?iyT zUPxZ~gk9voQ-iK`=+(g`(!}u`q!=#qFcGc;dNVnR#OfVw0d;3vTKU_@*=Zxf{oa9e zzH?Lku)$I6{Gil~UiO$u-QY84G=6Rd^}WC9L{jqY7ATfOx}cbSxgc zKu~RQa;?o)veE5?C_bI*Zo^*jD4fLgfLv%v=#z&NjYIE2wZE`~IAXsDj0kz8S3$@Y z68%v)RW!(}oRE>*j;PZp z;!@*kv>9_iRFoBx_Hi*8fmFcEe`8XgK-EaAoArk9#rUC?365ww@93r^5(F`jSW`Z@k)4dv0$9x zp#5U7fLmEA5g6OXr;Z37q|(bo(dYFhBB>eR&h#D$fkmGCP(8aRyQ+=(t4os4o9CX* z0u?rLa2CtLXd+LvUSW!c7SOvFS(YV7KF0(UPpiXTyoy&HC|xMAi8YpxjNHX#iP}uL+zD&}E9(3!MPyr;XQE!c1>@2Lj+?35O+;2ex%uql?$@Pt~;FQYkY(Kus4-q zeb~mmWdyXvF!F6C8)^<;1!xMm)1N#Ve$10)8?Jd82dzOP%&=gcQi7PP(G_0p&xgTK0aWo;<1ROl9L%uNEqu zI?6&`m%g&Kb*0d97qQxBN#q;lE$SRqgO{FJH{Wi4_}Z7(AjXG`jdr}%r_%NaBYN6%6~ zM;?qaKT(gxDT}kbNf7L7P zG7&``+>7S$mo)o=~W89^0X+&!;GbzrkfrYb9dM`-~}sCi!j1diFOK5pdVXEyov z?wq)=i!K4{(crRnK&+DP(BuZeaog0EKB+e0%dYX^sI7cQudx}s>v~`p8j)ASo}EZo zLmiGw9JHw|IsoPRzGzZXyql#Um^V;)umOtCHMDP~Y#)^Z9LwCx%op+KUfNtpDO|cC zC6VEE-V4NDD?u#$t6j7eY$705Ee+zFKY0V1nj>gB*>B&27p;V;PZp7Q=ZHyi+$%+B zv=koazLcwJd$Rl-K{-h3M!RI+n!F)vOS9v;dGnqMvEm6sbW$`ZWJln=&VZz-I5h!E zA-0gi2g(m_Efo1d@xzTS`-xbK+i!4`8Fo{dL+?SpWJaT+7B!fpo6uXV1^pCK0k|GByj;0?ENNaM`nZUxYr#^l@LiS>?uBW+#dEsj zDbREM9pt1;T_Ac4K-;k9QOw3U_?%tm$=Kj6IfVup?8t1TukgA^6SEh|)zS4$*i)Vmz`RE;UkP$;7cE?b5a8!&4V zOCOQPkzS9IOL*f&X%?pR(+kq13avatWo|HfscH6RkXtp);1Fw^HeFg3yqyqVDBwkf z)1$g3Y|B@|3uj5G%e~+2g1^_=ZLhG!I=})Joj?I>%y4I? zPS-;o0Ce6n+@u#crqCf^2veK}7ld^8s{~$5n@}&htCiwrBKwMRe%A zcAk+=3l@PZnFPJ1$`(^4xlTxgUhJ%gp6lSCL9s1+2=_?oSYb-%#po2;?Nhd(Z0eH& zwIhY6dXw~c7s3=>JYUPsrH!{&*@k+Zc@T9DC7D9#f}--Em{S?ANmkdR!Ex)BN-T8G zHM5dSF_^r5@!Bq-(^TmRze`E;>D-AL3 zs>+`NxhYflHaOip?HZd$>DaIy*=!q@9sc9Bgmyvf@H{QwQu#vdb> zr#u?=OEHb*^<-U?7^#aOZB;gZDr9w%MbB9`pWh?gwFfV8^70frY!oC4;UOQyy^*Gygr6*kHM7Vc0M-(Z+OsEN1c!cJ8w)7!NWX8c}`D+xIqOt!QphrXld%x$}t zt?fP#UXC6`#>0WZMuzC4YtO^Qxzca5G1wLgvdeMWN*oUKgi?&(`Pw?Dp0;TT;G5K7 zFyhAo-sC_FpAADk7u>YdK1q$f|YG4!rZfjtz`hZ8%!AE+}}}Nmm5DoG=jI1~midHr7V6 zRNQ@kwT#?JTj64E-o|P2d>~m5dOEHOYl7Y}PAZ6ac6-08$K$ex@@9J;1h~RJe<-*$ z01nJWPy_|7ggU63M3^%NP!V&fLA@5Mk?rzs3|a@wT%m>U%(6v7sSgI!-FqQJX4`sA zVSV8#=f;z6?+vBOD@B`HmSoj9IB88YwJYeTX_wc|J$rISG%5XXP|(pxQ2c_meHjUWF(^q>pz%&9jc&^LXn{_qzMfh&?$oUF%$_r{>Qk~9 z&axsw9CK>IDXS#m*F^Thpc}pM=@^fSKXbZaW}>7Sz{(2Cqhx~f!&!xFI5x5h3?uRg zekIfOh6o-^ywn|VeTzXb?WyWM*Oztuws$4++UjwflolwiL?Fu&F*V5O6V(7sh6+C) za=3>>;tm5-kO6rF2yQ6bal3N2C|H$@E5RgOwt0#z#mo>~4@?jfEn;ZHp8(v3@|$T= zp|Uc&B%5L^firg^uI1vh+2L2B`D$!SND0)Vc!*qL=Pj5aKScFGV&IY%eQcL^6^rj( zi#^yi&CiyhpR4YGB2Uf4TX`=~s>|1LM>~5OA(YDQoY?rBWw%_zOab}~3arxiStv5@ zK8AhQ>rDAP1@dYG9$>zjZV5bEo0yfi93mv5PyyZ^#4jQssgu;AMPA&^=~FYyiN>Bi zhv>;zpPpjDtcs0B+e>~HSwmdtR1=P!*A}DiJe+)p0uWhZqOkRZIG0cyQ5oP23^j5ZdJ)WzNZ z++)P;z2n>4i&uig)6g*QdMK`C^wi-5vDK-DabMVO2C^~ZYWQ+7K_4Zm6iVh(52^Ls zK;Dse9U*Y2BMf%}!VO~&&0~-h0@LJL+Gbyqv-!F%NLwm!ZvapF*&aNcjtMaaEEE|; zFqV7B(vM9zS;}rNc!1>1%G0`p9a$A2eybjL0Wmww!Jh9$k4Smg07@OldH_*CuD<{V z4G1Q_CaoJ;U&CB-OI>PoK)a5xs%A`^7l}UW_2A?lzG03Q2zkhB3<9`>fDX2K@12AW zmE8*&c)AKF9Khhn59VZNPQd;&{B17Xm~9865&LEkGUo@yQ&CAGC?7s)F*p5!8}Gd7oO+^~6T zJ{yJ>_JSRgO+z;C3n*m~p$=|% z><^_TaIrSOn}&bBGeU#e22D8NC^M=+1dylpXw`)~Ow2*Po3qsC0+DKp`Q9bd zrO(v(qM}5{FV*r)1u*&@Y#c%%b1!9!os3b~MMrQf->H|uycg#tfW0mPWk&a?b<%UF zmk=O5WW2yHaW}{MnHGZ?gu!b;D1mcZd&}>9?XjO*KYx_MwntSf7Lm2nCyV!tHXVZA zI{^KwZLCt?G8bwjC|>T|YOkt+Xf=??;1Q+hyg3rNt3noj?<)IEHB^Kr0|1tT1X)ev zCYvdRgSd8r`zTjdLv{_jw;XtR6Tyun%C=fCyF99jpr_DYs9NKDC2>@;5O|hZ9OY~&yVa`l9S<1Kq0Xz2+`(o;-!vQC z=L*gMo9?YAcL~m$y9B!1J|qgMfZ{gO;=tn?NXzBS<95__2>Q7D(Q#1K^`qi7*4=j- z8yy92(q@@40n4hR&&YVwGfl~ac2l}GQo1b0kh zti|meJg4g$Bs(mJms}C7$Jvqm%okS|7z>|A%G<{RJ&z`ag0Y}0Yf50!H%agz3o7@^ z(&`!#Y-S%CE^x~hONl0O8BspeN#2oFu${1wAY{EO*n9V;9{AN`nv*RF;$$b$Ytx5} zN#>5+PNSobUCe~V4RT-lYoiC*K#9U|ixz&i2eI!-x=KR&3VVAgsKt2)=fO4!OiNhj z&oj8%Cww&@T>68nFyle)ou_~#lfiP#LS7Mb1R*WxYNo@Fle36`(tCvY-H|>$MMNU6 z!;_Ypt3D-j3Agvmv`Rea!5BYe@K@oltC~%hI}Rc=0cRGX{V`5*({l=XSNA9}%lFwv zqE9|QVH9rW%W}_g7u%LQr>@10As)?C(=flaLqBX1czGI*B4`L2Fcm<@nkiY&MYk$_ ztChL!y%=5c-J;ymkwf(bX6xGI7u)S&{Oab?{X$=Ai5)qLok$;Ri8Z~@d}8U#ydBD& zk6Q3jrqLP~PS4?z+J|(34_-g&F@2}q2ZwrrGDfExD1pYMAy0;gqCJJ*Mo<>4Yn9;S zGv9jV2}i!ek$z%F#0#&Zk=PILy)npR$+m+bYjNQ2hz+Nt6o0v*N=oJT`>ZjbwUTA{#o-iW?mUlirgG zl;?}8esQ5p7Ert*7o7#SR5KJ$1^{=JF|x9R#oS?F>W*N?P5ihCZcSpZ-b2PdSPv4n zTSvE*s}?iYvh@NFMe2Au*YJjHuWGGb?Hwv;jo`@Lb3IPWQr|g=doEi@frn^yYiKXx zuht>`dMx*527Dpz@k=LC6sdldAravekvrVW>(XlC{+_y|_)3=p&?TGucB#E}gbuGc z!$dzznz??0AUdEgsukdpsRmx*L+R!YWk?^zVaIB_0JDz0+0^S7@giPtyR+kUB`mjK z^i_!!tK8tec~8W~C(RyV7vYNYs@c6%o#!3PF_9rGqvf2J&xGx;w}_f{Vtm3hUTSP6 z4az;!)mgKGU2U5tzrx1qVybf!WPnX#gK|3ze~`NQ-XsLlv5S>egel$Qc@8k?6l zLvEvKS#+!626X1i;5Nt#j;!s)c=6q5Xdwx+iNeTg`d!(kIH76nww zH0(FVoeDnvXifWt;(~!Ps+Wh~q!iyaf(YLuCY^EgvLm!0#3M9fOA8so5UUfe3n zD>8mc&7$Qu^)g8x=swUrtUEUl&5gK$?x@mCPMFD+1~$}{1aRv2!toTI4ZV>CmR;N<@^|H>&pZRUWr7)%ERCqi>5he#ByCgS z7sXwE@e+(=&_dt9V^ZfM5e^+s^rlcmmWP55gySJzgB?7iEAv$_dymg;Gu>Y8dS-}* zN9Y#eGufizJ$a4HfCM&MEb1?OWhT=4!qz)f-_m=Ji#3qWA9G{u^?6gJ9`GYMOtJ-~ zQcCdwQ@z$H9%@Ytas@6xlX0U#Eer6i21Zjvw{CM0(x!ZF?>76TGF81ed4eL47iQ3p zJkS1ocla9_5TY$Ea%5NQ0S2DrkFe@!aG0kp z12I6m8H7h4LeDgdy{+pwBQkin3!6JH9U&9_sSTZ;XtB9Fo|1dmlA2%ki!)7QVTDNx ze9yQe&yi6i$OGZY#Ut0c1DdvHTi`b3x3*@9?oFVb+s^(dkwDlc)DSE=@|cE+cI_!o zNvQ;(OM^|@RntILQv&ttUI2rpcQkni?bnK;ugiL2<3X9Ufo3^6RBsP2HJeKiS}_^M z-eWYCJ8Oxi7mX&AuhZTF{|@Y>m^|Tg!jAWfl3ucfjjFkJUoh;1jyl>X8lAC|60@g< zGp|H%sJ))Zd?&dJVhTPyrwmMEp74U;lHROUtGFYp)Wc>1KK0S)nZDeLl+@$gY!#z9 z0(BsUH>j~CMWxZV@8)_IqcHrrC`h|<$pO`LR-Ll81dB(UkU!|xlL5i}I&@B_k4L!NJRojSvQ%oZ4(8|S`5L+9#E1|)UgaH^Ew6J& zw01&lJ*5D*!!`iHcf@Q$vP(~iAYy&IWT?{vy<5!Ww&Wh)3Nm(gylpf>b$y=pw6{PQ z6AS(6*SmuNK9}S5TGqHU;=Lw$4?$b>6?YQQ^P|#LlXZ36$N4-w*j#FIRzih_)806w zv`Y;O-{8jG zLHCv}TwrHNU6NI?iv#gcWw`Y1k(Ap&hR&Ni%>omTh|D460juG|_fqZg%`(d^8yjM2 zWcb!-ZHM6PGT$gk*d0h(=i0CzOOuU{zz`f%H|r{rzUS@}z# z?7GQ$@Mw%$Vdd^I<;F8@h0caxbC@_ty}s4Qp^4Y3knb%b;vF;AlmgVVe2+x)$+~tC zORvvR2(A{lb!fXN64)@%Ain@QaKS)u-hHut#YUlWf~_l}eZ09MEetumvlVNm5sixJ zJ_82vNO0;H-dd7w>Rr5|l~!D7kmubLqcQ8C6CoW{5+xKUsaP?Y`HJxRk>uLqNYEQw z>M1_MjmC}HdJhBjAc`m-cN*LfFv9K>5u%TRx&gXZRX6~%6h6J%dt>sV41ss&>AqEc zC8(C790fN7_?Q^wRZyJ}I7=8cvYs!B8q_lM(H^e7rMC=xX!88MtSv-*Wa~-++}0Ty zCr(g|t?ZoFtIxQNl%TO-&ZvQ3W9@0Zw}qC<1y{>H>Z@=8eW@CRe4z$mW)(^1rE(Y+ zqL1Z;05}<~cE6e9H}|w&EC+hnqt~18%6;g{uj1m6Fg65_dsHZsys-yfK8CRbltaYQhV*MQpdG`v>~J05rI($>z~C#TGPcrW?!P$6x;_u|2O z@;snkbBjFwZXcg(a({V`$)dZ2AKcb9SUQHlJUA}ZV`nGL+cg$F0K*hqtZET4DCxFs zy?79fRwuPLW383Cq25R6N%go!`hqOWS@(G|RE?uaZ0vi_5m3`aP9x@?j4A+F$>tmd z-R_k=A+W78D5*j7$7gj3i`qpPIh-fNS-AP^Jvf=Qr^e=}#@c3YP?K#+ZN0biR>~L$A3uW-18Y%Pgz+ROZvs}o3qiiVhsDIv<21c1 z-jI|h&x-=nZQlzRdxn7t#?C48?p=BNq4a|2%F6j|tt?(ij2c5YWP)~jLKB=AQgP49 zz@rVy@;J!FKldJ1^gBWG3n`lA3{cI-QYb+w4azZW+x~(aHB}m3iJ{%?5R;wY+kQN6 zN^&If6c@H-E>t=3h*uyVwl=a_gt4&7zQhOZTtYhyj0~BB>3LI4G>-aSjN~9`D{6^6 zpx!k0HRjf%ljlq1BF{B!2a6UpCh9#Y#o&@KlP-)xUP`F~6E={=$FwB7=)`Tp7W2@0 zoq5~3me?cEmEP^80)!bf;sc3R_5y(~m%JX6i18*=EAwh6C-V%HIVyY&HJplw8z*0G`OL(hCl-#7sQr6V1&(zlADBTtG5s~)j zrKb9nMR4TZyuCmZNA;wTl*hCP%|J&*V4BkeuM)u$%|?mCWepqBSI){SFh(^ylX z;2D!XZC1X1qd3e8r0usp3C8mdhaL*ziHRPR?&iGpNKNiX@2fVP`R7ZAmJ`f^M*-EobW&*4tQfkyT$F?p>#t}h)9bxwG&Ky+81 zt%h>N!rkDH8Lb(cB7+t!HWFl)$G#knuXrnVkSGD`ZL;LZVg|*W;z+~2HCw#tCtCX4 zUn*>LRb0OXXu3yJ!x}2#{!Rm0&1m0~Q?%;$gJtwIoJnXGX_ZO=AY16+LPxbqEzujF zo8T4zP6W)08*RA1KVi+_xikW_9Y{%JB&4}GmI&kaYBH$NL>|43bI6!?9r{=o3+_!n zh$=+bcX6z;;LmX4DJqFyOP|~$<Eye!&JIkHl#! zB-JT<%bGxr^L2;DTjn>`Rk%2NOaY=q*|10h!mk$1wee-(83&7AEUi!KMV(7%kho~JU)Rn!+#%?Sw#j|~xA69yuq@dZ8-(Y>+WNs9NjqFi&Nk_bcR z7+9SnUi#W?-|C9fO1@-6GkGibYLAs>+zINQi@D^do^p$cgAm=~0Y#*ezJAAPzVIHvGn!|SoRK1Mm%Ec>^`IUFKS?jz z*GMMn#a@^utaM^Jc{8VFY_%N3l*>0{f`s1hz=p?X?U&5LZZllH0pV z%9&4-jVXetxx8N908FC76MwK%9_A05*9zdM>0Lvi-1LKSxiaVn%@13jFD6L|b{>wc z7YLM{M_oNQiGw4!?YlSnBCDB1>bd$g%Z%f)Z6Reb9A&mWnUle3%!#nxYaV}EF5J73 z4HN+I^q#BT!Z_#MJ*(OGz?%rPm70y;Gl5_PceA!vZbia+3$lsNYUdg>$wJ8(A`(V~ zC;e=U=p}pCJuI8;Ox$WOWHEjF)E=rg7M7tj_C##Yn<;u;pupiS+d1*rn)vD5J!&(& z$N5~`k~y7P<_$hec%4bMs_l3BR}(>fMDqZo_B6Ez%o^X}OPET;&Q?MN8S|yiw^d-z zwIRFd4HTi+Lwbuf{Ia|8kfypHyKzaQmkvCu_hLgli~^|gp>c*k+{}8yuL7{NkN2c& zuop`qJ79xP7o}}-@MUv(-eo;xe5AP`=0cN2eubi(_Dl%aL5wX~8a8xkfmIRj3Tvix zbx#^w+bn<#(64a;o+P^p&GJDtJnw*(;%l1FC=9__VE}YqZBuMR3Paq!cZ{q}<)ca+ znh!Y83@3+hVM5i;9ul&RyuxP|5eH;E^dcu=Y03Gi>Gn2^khe$5YV#%{X7|lz3_{I2 zi$_ZSQXvkPI&OxM;Hgau!y9?20|j&a@);?FZrf=udL+fs19@k$XmMn+kEZa6+9G)B zYlHX5wfR(^=4D_%+eaxyb$}6q?_CaJhE@hqFV_hMCCJm`qvscWkefrKl56ko1wD0A z<}OAq-=K|_Eg#l>v8*Zig2$wi1~Ac!9>i=%#KW6tiWx<)SM(BNrz^YDCx~qQmfk#k z!Iq^H?O^Tws(sM|#S52`3(kLgl`^Wr*Pb?nlF!(srk;Z`+$6UD)1@Bh6IJ z%G*>4(T=7(qnx6)l4{)RYv1ELPZY{6|7;?UzCUE4+MPTD?w5(NarGWNd0`UGI>Qi{ zLNmFM%H1%iS}R2S@VWwwiN=UVI9&T#kvCODP8OR*sm270)(B<*jnUcIA`mvU+>Kw0{P zNSU>qtNm8a4%}ZBLMT?`!#jo9MMa5f4X3>V7nX3YGrS#a?s04g zm<#XvkCdSE@a38%+SA6hNDa zn!;0>kkU;$?g7smOu-RKs`%bJTp4{)dQokiN|k!vaXxDXq>#64#|!6l2!!EVNp`}I zgtC2R{4JJGK?Y{4=zBLvDFRy?dhs&VfOjN7njT;31i=f0t^t}w)#iSW{mOCA$~hNV z zB&4f$zO<4m!@b8O%L=z!6v%Oe2C9lrL(WA&6N0r*JuD757wqLZkcVj?8-LxrSEvJ1cl)Mf?u9#doV1?YRkEbIQQeE_Zs;C_s#0td~%R;;x2s+`Nlb^XnBU1L#NDRRUaDkp0R~z zVpR`|@g`Iy1AB}q7oES%Y1KmZ;)I?MVr?WFal__AWONNknK?jWO=ySY*9P#bhd+HW zeUpqNHHa_$ICkmhtX^h|X}>FIToP5oakOia$0>AD{Njw3iD#ebaf?cKIz6zN*fX7! zvgR#Ya6zMe((;Rv7$ z1TaReBb9MMV#$5eU_`HdgW_53IakAG zxsuC!9NQ+sdg52sWG4Nbm0AE{oVC%VQ4nz@M2J$W2er@j-XIY|>?^#P;8&X4te!MV znAV`J8Dgze!jspp)=kt#&FJ9^IYl;#SL(f4ma)*{PuB21ovXD2V4REwN3`k8s6 z+z|V`7dvY%S9X$^nW6LWZFYM=JZ-es(*u{~PR(K>xAk%-blB`J^aa#Kst!K&BJd?V zYlH#LH#NLZLEl@M;!+?5Y~@mw62Xy;2E~CO7 zqCfLPJYcg~5J|_3ofJz>>MLk}K`{>@kj^N!wJ)ExW^0=GGvo&>V}ac+dnZL0kB*)z zRv*0-Dw$NCvH~IlZ3QLMa+)ED7wMw_MVnb2JTZAq>6joii)bj6!h*Xx-v{WxFE_m zGehSV1kYR;ne~Y1oy3cWHCPF>6kxrNftBz`D*y&05iv5B<8fM#qt66AW0pDrHc3F- z1TZ&u2OPq=?XsjQ3SJJ;xilQ_W+Mi9Omz-OaEqQIIPRlaO+5Y%%<76{YgUd6o|=!E z?JDc^J`Nqh=f5nNQ>P?A=H2V!Dz~={M;ni3jGYbM+m-_(;e9|>kc~8Dqw_AH^Sl(C{(m*cNVc)!6_dLTYRnUU6z}agWQ)*YnDt{QaXpe9!8Eng#HDZG5 zN!D1LRHO@ocf3)g_ilUjInZ;HOrVOL%5GyTA3Exh z&r^_~MN$Ea3raa>Zm!czfe9ZWBe1abj-cb$rfKgcon&>I;9jz^T2lFmVD?|#P)@ui?@ziGu2Wb^Nfjs5cYzy zF{?32S!V8wseoXqoaYV@J7OrU&Q-h|j=TAYw@+1^bHFE#tTlQ16h#7EtlFOPdRpUC zRm_#Ub@H~yGqNcOhGe?VX5~V%KxM@&6WFc$Y{ZRWnhG>(b%NIBLK?G#%Jj?>b00tQ zP}*!m(Pf2t4Sctq+ipZDy7ugm%Q6jjws83Q*oe`Vyc6m6< zFC$^%WdJR^V+k@uJw4I4oep`KQ+8gM%#$4z82smByVXlu$L~ ze(7Yx92ULa-I{4D4}2??voG%M1m|frGvGO*4Oi%6Xr)2Wf>dTXz;L^0Zb&9r%&6Rw z;OfGfx|Jvem94<8EfQjTZ7#l@Y_iyn>rB$N)M%j1%~|zwb6>niY_UCia~ZD{wi&=u zuw@3~m5`A#iX0Kp0uF({AsfTg(+(PcvAStP{EKVz(+7t&pi9M4)G&}^P?L!G^ig^5 z3XzH?Qzg%DuE@L=>Wv&Infk@T7G_63Enbj)IoD|~9D6HI7uXUVLpRNxbt28Fbg_2{ zp1SH(r*SCr8t+N6JRi<0G|+kqXOA8E;hSIFTS;rzvf(z4%!a*Mz)`cq_@&}LIxo^r zJ)U;FwR<9-Nrx`3{g=VpJ-CcSpm zC;~5B_m#6e80SRYQ+|(9H^ue|ZybSeT~nre-~|`o4ATvfb#T3Rk;BsW!iip-<5g2v zwmHNBE;a>&y&N5K!bbAf(oA-YH>@nOXT_T^I1y7X^3a*z>O-QL+aU?O z)&L>9u!IPLU@Rbhk@qT}7TS%tFS=b<;~hd|amMzw8V2bebVobwf^MrxESY$^OO#*6 z%(0F(a46xOc-*@a?O=@2V^8%`zr6wVtZiZ^9=k>XGHc=&b>@RI5>uJfg|jFhd7wl6nNYCEevwE44<(tlAKAlPh8hA2;Zxc}!(wDBJ$19EV z2{kaU5yqOpQIygJ?~r<`14RXw^1b0BjF)z;vGX`3RE!M|xH%NTd8@=Rt2;(upBksl zR*}oK5lWpZ=oG%7v_r+H$Cf-`Lk}jw9Pwr;x0WRh(7ewXTQSa%_DE}YyBYl@0t1+? zz3bD=q7L&Q%zYsOp|*_Y zY+k<}-c|?jcj7F^iH!mVD;5?FSRQj$tISU}+0m*YhhBE4cDeZ(K0KfcLRlJVq`p@? z#6_V5?SUzr?X|jZw!vSRyUoI;YdNN{M1}fQE_!jnBc8Rj#@xL&30LM`yz`3>PQEv~ zjoUZ(_hY9#o!JDfGCJ+@@peQy0Pqoc7CEtzGj6?kX-Xs=Q)_m%3<;`cI*-~o7zya& zIFsKXwUTmPO6ZGbUc{3{K3vfd0-rL!XMHaljaXw=tsDS(Fu6iKMbAYl?@5wL`Lgxd+(=zIN8ADafg~ElpAzNOGHO?sMYESI|9@d=<2R%cv#xS<`AZ{|R z9a*#DdYFXStUxI1fmVshcB&h>zLMj2K-yIM$YWh>1eozX*akYy;?_q|==1U*aVMq> z?lGWlLgyVG?15M6kc#S@c5ft$Ee60VkAF; z2_RV`Ida5B-PDuoV+X;cHXY|C=5QGyg8bh&^k2K#{<`q0>^0fD!xzhSmo_C^D zkGPYz_QcE54?GATnzs++Oi7{x5eoMKLK{(&MXo|-I7`^J3ezrTO=qr;2+JMI+K+fc zmR!n*#9yF%@Ae1}A7_jGMT!OkZXFJ{Z3;lG!XBvkn=XJze8tz}^%z%EkJ)&pp zYN$u*_8Og_`somyj;wM<%j#1Xc_3Z=j%zt1KuXTy$~4k~DPr@|*YPd|t9TTM9SWP_ zVmE3R&_>2iXVu%&VmBxWDki1o4PdpX^vqi^*J~d3GmHl8(RrQMBLK%|>I$&xfe3Wh z2!}6tUos@t2tP+NsbYX#U4jZ>g6pDot-8MFP#v;+o$poqtkYY3#RZ&}i#Jc7y9>@O zGE7!CF@#j;F%Oh}&x6LUM>{OBjh!`T(%DWtr`PvPX%L$a<<2YYqINoLF+Ep=8!A?{ z5{JF|a$rO+uvC_$;H4~kA5XzlvszU(E<7`S5twaMTqH3nfG^3wx13~dS_R@IAD?VE zlg(Ny;B!_R4}hHO#Uo>IeLk%kVnfzt##MO-wX+MSBCoo#!-$h$eFu8A=uCO}U>uf( z(A-nH-TTec!w8aWIs)m_En6LR_uU()<*4?$@pPzIkZBWqwUt&YG%xC+hKi;K-c9@Y z+6AP)$)X&W6C7Zugu{l6>E|01DKKRhk)-}qkyZg)mmllOddMDkUyr7AKQp^Vyj0G} zCL8KWwWa7nPuqy4POt~9U!;70iq#XSGG2;5A3i{$*ht+osMZ-06DHbi5UgjnRaadCf9G5d*omvA!guK>bS(@^!po@=12y}KDxwdBX~6a_~pGF}&{ zB0vS_0CPxKp#;3}#MIYJ%JE!^`!b!pJ>c2Gh1E_v3#7UbAV2rWpeAu;QROYMt_18? zh>suoA|^Wf1k%7zS8k0&8kP`;s9_7c5FG=);9K@7ju35lz}$H?49{>+#SSxFmqUXk zs%i*lQ}m@EJ0%;k?YXd9O7~RDqiV{nex$O7H9n%pLf8}U38%e5lj}aotUWUISsGs# z9KI&JgGWxmh*PBM55VPB%6S#uP4Bytz_1$h_lB~rydRL&(;^IwvwhmH^xkD@7@pq6R&9K`WSJN79HPlXlGr-(*w3ap95bJ<-HV;94o4tr?s<>p`5SMN4vMQ& z1=0&te(|g1(i#mbqk(5lJS!oR6HOpbi_1zYpzo`4R}F#3#R?#(Pe1t z;d#VcNWRiWld$or=QEZ^68nfuup1J)lXf{6jyFYn11*BFk2RD;dw{cC9;pt}+N#Af z8{wsR1iWk<)P}r3d-^ zdfkv|#$S;ih+J9*A)Nr)GfvxVsWK;S6b1>KeOw#c9=ut7CRjHz3lxc+leQ8u{9a`P zM`TWzlUNKn(MuUrQr@c**}`XF>}!+FB}yW!{!1Qby#56|P6H>B6TIXl$|+tZlKMW+ z+~Qa7dg`DO;4Q(kEur>}csLcUaMw6^y=$s(5YV!8y0L=TDBbgosOL%E#e>|O$~8fg z*L8H^_MXocW;)R=xw*S4*n1eN#Lr7RPiuTEW|417=fPR{J26BwGnKKC#l2F5%j=l6 z_d2HD4ITCqCLD^c&~r_G;3=dJ;m%kS_q7^`SU3hbaR_Qw!3Q&m71Tn=DP}`zIAOy@N|nxB?06*DT>|9j2#~(O7$8O`ASb$NQ0c9 zJy>6|jakDHrwqw?Yre`A=Ho^cnC`7;vi&Y-B(ssL_XbYKN0t_2mepDw+^lQn>80cX z32y?9fh&-C6W7{V3#mr#ZVR=7-m3Cg`nuSbaedy55%J&FO{`m1;%B zalU9SF;Abz8lZ!mY50bURHvj|at8!myNev6o>$fDxQHfKw0PGB6GkO?gb`}I_IOxt zVSP(Y%q%;WOU%<|dHPDSe*$a)sL`_u7Z4a#cR82d#|W%e()o-tjtRzlWs;NQ@vS+i z2xH3|Fl_gOJAAa8vsaTX8%}~PVez1+9*u>HZZTg-n^#RZ$Za&y3sq_xaXA#zp%Hp@2TZHU%`W9t)O8`bwE$Y zIGO+>-i(uL!DD8CXJQ59R~LH1_$V|ps$3qKy)EyuzIJ$GetAORG+gC*3i<#qgDiuu zl+xbP#_dyEb9#d4BB+gynQFWk4=|q(K9fZiPjsBRAPaj9L|~Qu5=y0^HGtPKBwwot z48Fc$ZFHA#BYo@CE6Oe=QFlekc1)loN5%e{OyCNN4J-+e@5Bm|>6z5CV&(!z>>cik zH_|L3II`G2M^N1p{H$#G1&Yo@4o`r*czN0Q*s>@r9I*#$v#nXUxeQgyrEZ1pv^hXp zWc)Qszl=0fsL8nI3fSh%y6AeUknXS6@R_}r$Yx^1c5JY){Hg{Gp+!+Sgn%?E*gN3+|CcowcfCGOy0ZQbtvlP z49YFI^Yw;c_Q^3lxdI%Gu&u%uW0x}L4RuIW3wcz!Pq-O8?ooLQ1?&67#XoV$-I1QV z@tEypk;A`u0k|h5Xq`1;H@j;$9$7I5)#O@!CRR-o1(9jR{)}Vc+{LLcy5F#sCuH~-WLvyPdM#Tj z?;XHyKd#Gqf_}#vUKZk{giX~}W!avW>7y|B4veldPmnL|Cia#Jujh4zrkz2$@zWp@ z=Zd#yTm_GWQFm_}d2djFA3fE^#+9?+XcBupi<-pE6zCloSrhK(AxL1zac=L-hb-QW zGTp1U@qn@1MasKRDd{w+e0zD?fcrEaXjGfWu)#IQ^Bj63B{KwrZy!FEBGfn&SdHeu zw{I@-aE0>$M*4fNRM}M`HEH9y(>0&x0zJM#4x~5!@Q|JGotUqO;tW{tTyTJ8<(%z} zvjD6B){(gHD->B9Vn>+_ah8z1p=&)f^;5ydErbQ25P5ta=`$PitU=C}_hOTtmZF?f z-GHr~-WImfT+i@Hc+J)8D!6_mu@gE%xhPr$2d7&F9!S;~e!x=Gg*4^dw9DoIy-%|c zE}ZD$t+CFdYXKkwSW?bRAiTMUl9^?J&qEAI^w=eTC1 zKVY#Xd+k|X?|twR`o)XX^6P*j3ZUg%-=TSb0fT+0;Rgn!@}4^D%h){xS{L^lSwlRo zMBQiMV{P^zdwR81HlK6!bjE=5v)Y9?^AUPs9W-zN*&QPrEI|0~J*!9)4O_penNGFt zDmyRgEF_2*mb{Z#8Ch7&Pj&e$VQ7kNgdK@6-#D93~pQ$|2RFL z1KwbjJsmdpQ<>V^E;dDVY4tZH+8_f*dT0R%U5{SVwO1tb3?~}Z5|OtSB$!2kK3grB z1vWF4utd8J9@osBKunw&vh!0kgzof~doI8!aDWfEM1nC6wB&%_+=g@UY|@lMKx6vT zBW*BpU~d=?1~j4yF%`2zG=Qp>)T+a#*XmZq=7=;2Gbz-#qeKHl_MFaiCs%Na?vNY8 zE7yl4DdkPLfKP^ZL~hTNZO-p6xr}v$DN*m1>&>ViT%i!t-UHFAGh?OUJGF&+%(Xz> z2k$EUbVg3+fhIQQRH3wl5Hio(9P1crP|QSQAbtj@MV2FOXirG?#(P z#jS*U0kf5X_7UmMJT7IfE(uHkO7O0P2d9NW_5ln6e+iSuO7@5xiS9Y}E7s+|hZx3( z4|4{lk_wWY`OSL~9-5<4C&x@W^f;HGV`%D4%w;(;;Fu9PQ;I*N^JiKv<_f@9Ah{KYFyvi7SHT72Hkrwc0}{@aqUM-OGLLu#ydJ=6fhonV)X?mE^Qu;FrZi%o^dr70 zA?(@feK3Gxkn7~|3Q9PwF^`J{30X0noy{8Y|F=(^xBlSYq6% zD+KAA%BTO36fQ7X|Z_vruj4sI^@e3 zq%UJ$iyQVRMr)+L5{4~FCHYGky3Kom!%|ePk2Vn;be{~X9G$66_hvj`0ZQ(RzHvG0 zVXcFeo}+%4{8}?-3!2qmy<6c0rUqmd8xW@SYzU!fjE6Vay9K+aEr~;Zc7tmXF3-li zVtRArK|vOYE!pEo^Tr5dS+Mt71C#}4rnO$dBn}?Uy#szP)Wz2nqdb{lpo@C0YWXcj z4OOeb(#fP?VmUINUBab>JiiWzdFX7-s`A>=8Ic8~1)HOtc^f@fTKCKe7+}C9>N&yf zXaT*!d@Rybql;-nf*K`WWtdI~=JAefBe)W{EruX=!uClFP(|P}P#+BPmf~>ZPCP6Y z($TlEA!wH=)&7#65GuiyRBU8p&#}nUJmD3!lZonw7&`P?yrB)^1&8HZ#7kk$XAxM^ zpygvHHV?$sU%$;m6WVJQ}ep@4ZZvDcMJ8-cS3M>rZOBsSMWiE-_xVk~zE!OuCQ{%vh{&GePF`5PJ!XyVa zHu(@$S1w8d>PQVUw-^T>47C_kUW-LR#WuTi+tY+}_NsQ*u1t%&9#HF5u2+m;j9Gx# z!b~LU;K3kuxIQznuX=AW4c|ICd`(Puz zU0|@*N4^jU?^1iQcslM1YK@*w*2@}-HQ<4MLM_CXjl4)#ia{~qvlI5l*3Bh7`-JjD z8?aX?C!Q*Fyi$MHMBqO5fX--Oa)m`+LBG{WYYwlim&v-u5@wNDuy}fSw;r$=UbiA11+@2c=95qm@ZvBD6UxvOQRy(fe)j#frcZ#=!(iQgdkCM8#Tgbpe^ zK@4uldEveT*|MY9cw(CGbs#s@)-LSUin#ew@k_xK1QG=GeGO-A6oejPZ%gySq9?b~ z%lApFmOiSiynRA$_IidBU9l{S zjDwF)y4AMNFG4sP0y|a1?g$pVPIIHQ*0|d2NT&pb0i>g9i>2Q3{$~Lh$?l*gWw}Q&~zP(34h^b=eA2#az*l- zFK*vi8fD2WVb?43r)N*m9~d!>ym;1X-EvRUMCbx-3Mu(1+tUJ9!-u@p^X_r0!9o?N z&Re&j>?L{%ev&A(T^HBI#B@82ONRXX$w7lY0yJLR-7SUFlJx1w+_3HB;IXJTb2GJp z^C&or`_=0je2kkMX!hO}zC@{fT&xEp;n*?FVu|#cQOHaerJlJ#6Yn(CM3=q3l%W87 z(P-q+H2cypD`75sUzyGs_LQ22Jwh{jV&b;1>0va=xLTT?wAD!`ozNqdVMN~?yU?l! zkK?5I*&A8DEC$ow(kySp?nx~pBP2}3130y@k=%#tx|7A?)w96V;``=3^=Wpg$r^@y zr)3n}@VtEI0qGiScWm1zCP!bFGU zpMJ&Hy@*F{+bxkocYNVlh)s;Ry*5NeJW9rFBTV$_ofV zaAnIoyNAQgoe2V4m;qThm<^{^af}F{$z?jy#l=2^!joa)8rFPuQ%YsjqyZobg!O_ zdc9;s=Hf7X$p&?b1Dzl%1k9%1^CivGFKL_>gO-8cOb^ob35CYMIoR&}`Fa9T41+qr zWdyJ0@JTMe07$re=4LRN8wPJt)x+I5(FwDFAB}b4KpKFqu;R=Ej_1~lsE=%L-hzqr zEI__ysFLfeS;>L4WmSP(>v}d#2g@g*Q_059cDLi4UiNt0+uSp?+|LM87>a)v@+nV<8_UTHM&f z)YuoDwh0R?RiYja!z!^uaL}tR@RT5fcWk8N{ptt~GBjxehg_2NDxN+&GwfJ>10|NZ z`{d=F8cArgDs;Vgjv0WU0KM@rtXmV5F!Gssc@&)H0=$xo+B5Aw9T6-SS};vDPvbRN z245T=Eer*AzauO?E$atPAQ!JDCb4xAXT!iA7$ekJF3}zF5?9^iI>+6TsutG#lmYLH zfqQ2f-hAP2xPo@cONz^D!uz4+UKitv&WJodiAinedP68~L%YUs!U=7cqm=q8JkQwZ zS*f3SzUWGp&~r?p_tfUGh^c!$;*$5IFH>#J(;=jT#=wD+GEI?Ub~P$+(4?jz*(Jq0 zu*>%zX;J&&I6ac)tmT9=ndtdI;j~1!aKo%?0YC$ z$<#%pwQqxuDBi}xRb$kg7@EGfXeGg%KmyV&uK*v(;FyI9g=}hR2`o}Dn%Wt;R~)Z) z-#bxD+kmmLSMI_3njnt|F(ToFx_}Q}iL~E) zArF$D)l@BTGnE!msH*8>fD?|l4?D9!G1apB7(=)z6VfgL+>*=56Z5K-Dj_y7UncV< zr?CpTQ`z0~jz$ft^LLe*X!mG#bw~vL5{!`by~mextpfrJT^O=#h60_4;akgtv7L*B zj}YKFy*l9Wi(yx-e1NYb+3uj^U1}Sg*bu?EuGg($Dn8+Z2x0+Xb}N;&W?2Z(2diiA zrbUbPR_n1w&BLMhoO?L^K^p1^v}NE7M6#sCv5mc|M}u6oE`23hmDy|)_EPU%T+`bb zGtP%4?_J+dH%b#XY6)R;hp+{9mM?9DzO(^d-dh%UvWk~^Om4;FgNM`^w(^%Zb-cUs zD!22+k~^D2;sIC*Xsl(xVO9;&o>`kE!}Cs2g&3iV+C_Zwbo{n$NckuP!RT`8XkJ1Q zYQT=Lc?v@VO~9+CCgv6t+002btrAwWMz@DZB{gd~;sWYE$| z@FZsI;utuxHlKCQd*w&)DEUUN^^h2tuZ}X7jR|kGk=^IqDwz5JFRtzIojGw^Lu8(g zy*S?IruF;@>$W=0X6osCs}B%RS!<9*yNS{&TL1%ueOI*Q3J}F1YPE2zPuM+4Rbe3= zrqg3jMKbiQ1HM%3C7Yy2`~dw8V@F3`Xd|z)=FB~HHm4=9Ky`u>Ti{mMhssPoc+0z{0Qyf^m7gc{`Gmp2Mu z-YhA`3y%R6^;>mc6Jf|rP-B}o7yL_{)UbQ%t_0p!Bhs%Mo5A{BnCC+=Uam+BFw|AR z)?LVICd`x-b}zA|c%9>U44~4e$Y2B0b6tZ*cWVHpFt(TE(QG32-laTwDpLXAw7XD_ z#7z6ts)t$dQ0RJu)oqq@3C^w+uIo(?He>9ZF$r!vv*A2h-sHFH93cppo!QeczI}2p zoWTV|tPN_B&5{n)yEg1LT{h8Of|F}X{O!dIwUj!U@!HzLFOg{U`T`Z6#cM-cnpAO( z+Ly$`jt!5})wpq?CooCv0d<3O$!q7lH;^_@ph_v}UImaX9?5IsVaI%&<>Q6QXD z9n$z%xSDg_T9y|9%Lcf$-vO*vb8%G=-HUch&T)PQyhw7=*EgQHz8*{udd8;4Y8Wy) zD{D(CH@Z2dR5UwaYnStCpNAEkr7}oVy@$!B7XBFC+|v#WlgQD!d=^0;5MMr%L!U}^EQbvY>uKzML0LTkA`BKOYL1IB($=c+^gvYBlq2bH{sAv zmj^*TW@Lrr9?zK(V=XA=l6!qag*o3v(_vP3(p zm?Lb8@x9@oYD1?XU@&AVfYrL>suxrTul?jqSZi#~XKSQ&39ogYGowmdr6W=5_8 z%)zLgBxplM5;WZ=pp5g2KzkAvOfO~*p6O)?rl7*Xwe+B_mCY3dzaAk5WdyFl?hVVw z1Wzv6@OhKX#1+7)kTb+0)Ha|Byh2mARy|4pFe)XMZBUf(Nin#m6}NuBy1w=6dYza) zf$ug8J)k{z*2q?0E4Y+2hOSf9xlw?&BQtFO2$do-Bo{?(L&P?qP~Jz6vLCPYaE_da zddEH}#}|wZ#H!_JT`2TM4Q<7Xx4U|bGw7{@j#J(fdLc(7qzjsc;!=j9v2OhaGjF|r zxgcO*>Y`=bAVgaxskiwa||8H|!Z3Ud?4PNn{G z8&;`cFxf!`fN{%u`ffTDFzd;iz_3mf1%86pL|P-p{Ytg4mB`BaRLtf8g@BMV6bBgI zmpP_i=7@pIjV&^vW+Bk{Lo4Wd&(mB-t{;uxU+SE@C7_2(D`&kCg{UPYTzh9(2E!Rp z$EFG^Ps(VnA#sji8~Pc!Qq>H$@sw%84pLRaBl4Fimx_}22o@XVp=_ez77?Sf1`fV? zzV~9bE=eioe$20NbBrQ6P5IOfboPV7C-1=wABi%Ty;~>KR(&Lu;@*bl^I~&e`;^p` zrrd**0Ca5U)^cwqOwL)M(z!AoJRGMj0=+s{^9f<5DW@Rx*nAxG-jnv^5?lA?3oN7_ z$ajU{56quyWVd@|@XH>3*)x}}k8j7{3niuZ6nU@r#VE|sc;RWan?*F2ZB^MYSc?*_ zXcw)|3!w!Y5d~pBM<}Z5-+!ks!5(Im8mRSPdvge0QI4`IgioS%2wS!ILuJjjS2dt3 zp}Es7=D-EGPr zU@&aa?U7fBjzDo0c5%=#hsCvUPympGzR#h{IT^BMESA*7eD_)OD>Y7%e6Rt;00nia zsnKOwp{i((YkeT8MYpl=o@!FR<&R^;z2m{Z_REcTWQlnl{%wD!< zJy94%(o6Wu9hzD!y=Q*dqM2QCk%o!1^#nDTj)tX&m3(P*Dl@s0oJJWOw1ceU)tlDA z4jm!CrmN||F|$`wOf_(w9#oo=f+7W~)v1_4@zy|g z24HrSfevY4^14sKU*Hj|82Lk>ez1mSR#tH@4m2vDm{Zqz+*Dz(`a;~2WJ}^4x$n}u zdTng8pA^};ft|`pnO~vSTBA9ViYRO(JKPZfb0IA_9^buFGl=1V!XA;SYc@(dSxuqWI zXsZu-O1|qo%q8jFem<@`#x_b_QF7k(ynqb_G3bu@Zn2(}4ETmT=7p#+4DM_mRU_)y zr*f~Mtk`cMiQ+Ar7q7OjmUX;i)Mr(2gL2cvtISVAF0-{C)B5aL9N^r9t`et*a6xW7 z34A8ympZnSgkqk8xv-@%VY8OkczkJLmpX_eO{y=wAXJ6*nE?P0$CM;JdauskJZ(#q zdFyKLgr*ym42oF6btMOsou0P(UL9~g#EgW1HcCwkMBS?7%D%N@Ezt)@))7Zy`C9aq zE601~*H4Y|bJ_{|bZ5H?$Ry7Az0vCe4Sj74iaq{H>fNOtzSh6wkzkTrDU#S6TVQ%5 z{90v8vId%`^kU&PItTf>nS)fUZKLRTV-4(*Enmh%e^iX-I61Fv=+&k=wlN77ZVP4r zm_5;XEuEBP7B#EKQmZ*laH}&=%^vkem)~sjo$h-{+)7NC1m(t1iHomBnQdvbdndaz zD4EsU!_QIX+{^PV5JUAS50Tk8iz-tWQk*NCc$ zc4%+?;Z_E1DnNAz3e{V>NpgE9>;R3@M53k+5tWjiV-ogQ-djH|X`?8n>+HTw7;P(= z&GD4Pfk701I{Mgj>vg~*xp&v>9m=Os6k>~n69UK}+b2CIZB7Olb(}$rR=CW~%;ROq zyDlhPIm${8O;Gx<)FE@abKbqu7y~x}f=e?xrGH0Yl4F(gV`&o%3`1;-fe zT>~A%1A8=sxhDwPDmmV&H&3AP9SsmY8!E2Ielsbw&kS#c?KNAQ)j7`u>^(=|aj~Cy zr)9ZX6lCBB6>1sbspF^06piiJD-F;2xmhl^=po~1g) z8}pR(<*dEprpPo~X^?8j9EJ{H1WkN4M1z!42-@Z-P1?Ogc>8!vVofYT9(S zB$$&9SiUyPcm@v?g>I-*TN&YMgrY7wiHh6rhM9Ty@aZ0uBAG5z!2lBQQdV!O&xiIo+<9kn|Enj@QqRLVv(Jr7=-nxY^CioDfdAt<~@MzA6&bvX=e z)e{}I$SOiO9Zc1hhZu;bUn-yeC8<0Y7$`cL0C>ej{1b|Q2Rt%hNFOEfDJjCY~ zSj;&TPG|sd@FFALy926-EY%dlp^-%uu<4B56D|ydCEa_9?j}g1tX( zdVL3&Zbx$;$RZ0{=L|Z;GZ7Y7!iIo%t65EvgU0A~Ffv*XxpO7?LR*%Qh*t{`3P+)zq;3@C1Q)jDx``DDAX)O=t-;4GlV`?z%T^()oaV|J|o z+$8qMN%TkwWRr2vlIeAu*gDo&HG_xyWU;J|J}T9i&S7zxK7H)%Ekn~Pn4FCcdM`z$ zRxVd!1ePj}vYa#6E7no;Q+b3Xl2Hw8$_6HRZ*y+Qn^>&OulBbOJ*t zP#}yQg^zAL1=w6=wjSD9PE#QT*q>JC#6A&bDd9B}3Ay2Hf-J z-X6mWBMh_}>xJD5m|uo9R-3U4Anv+j@3meOO{TKWfL-qk&ro>PL@$#l57aas4&RIn zHE8S6@jM)M;WJw1P2|`?qdFAo8Sr^8Pu$JRm(+Wi*NOEhHiHE-0!u|l8Z5O0DU z@1tGHU5DU2#h0ab&hv~iIbn-R{qCZkC-)>8NTs^RZ?< zxmqmdeyS*u=N5J~6s-j$9$c!V`W}I~D-8Hcakp2#GfVaWiqY)2Fb(!d#1!E$if(+4 zHRw{<6VMZoPox6IZ0CuETf%!gzWx?0Wn`YRE0POuA@GAQe~7>Qf%BsA>zf$$zH7AZ zD?~Ghk~gHRl=1}ek3Fc z({YgFtY=+KkfS9*o*uPc6U2zPI_7MB5fqGo+>BJVYC+gy#tAr|{ag^AZ0jlNig}tyx|}Ua3wx&nm2@g!wf2P5+_g3>%DFF?zb`J z$%1do-JwE`!cCivE&corc(_GTJb~%b&SDNr-$KqyXXi%`p$Xp@zXH{*ZYURuxC55HHPFm~NDr8>AX!6k9>D_v?d&_tMG_3^aQMekK_-aPEWim~9lU@;16 ziim0S_e>th9 z+lTj_+r51ZF)v(?mg;PVUihPqt1p4x_gCl8*Ka-KP7-R_i5Wo9$B~iFO)dq5a7ou? zvjZCV@68wMm>1Oqs&b#3gz-?{6y%aEz~#>Ndj>6C{*Yp8LALpMLITkv1?=~7lpZRT zSE0hm_(^+ERkDH(#9d_Zu!f!Noe4R1MX`idUU=~40Z{bbQx8Ufwa|F? z)`BIpHzKFcq- zRJ^E*gI|&#kR}CzcquvBb!_2_8PI2Cdui`6yEvHVj^4&gJ)*@$uAp`6eqGXhV`#wm znuB9yKxc;eHXxg~Hka|GRc7H0RYc_-GcAg2@D|6I?9EIxRFyVvYqoMIvb2eq``nhP zb^?jcMXPvOwJJFHxyi;pueTH?{DwNUej3 zP9y386$m|)EDotCa@%l!1=33Piu=)Z;-Kr$96O%y6PbK@Z*8+U8X45Zy5b~Yt+0Gk z$`V?p%5Ka&`AQ6Gsmq=mVW&jodBBs6iDOP#VWQa`q7SL{G0#V-0BoWT&JLD$W$1fJ- zWM=ARhw7I>44+fbOLTQFgl^b5e>n%~?BE%O{`@tAL2L6-2!v}yHuYZ$)tlzWl|$zH zBV6Yy8yU5Z?dqAbWb10#^4wS~E6DDn3JcjmSAW+fdV#P+&w%fc01i+#uV@z(9XB=$ zE77WDoU4hFMj9VSU z-cy-$fZ$_AFzCB{oUU^QQf}R3HVzotuB||ibbViDFV@-tK5Yp1%S24N3(O5hi`eGb z(2Ya2WpHmNs>*KGxqM`EMhG4CFyrij=9)YdO4o~p?Sgl+WsK54Om!5m(~jr0neOV< zY5=+d1zopxd0eri`YEdHQ+JtRP{ZA4ME)eOI-sdIg^8RBU+o%hRq`TP*qp>In{p&s z9->ahF)kdGRw$j3!d%jcMuyd~F-Y<@r?NhE*cb4+tzReIi;bsxy`2sZCq0(g>e;<= zFpZlA=AgjK!z#b%?VZ{!UPziQ@)71a1SS>PNnKB!-KQyFjuV41U{0e4Tv$N2^_hRKT15R49Y1k*Po(-Ty0Ps=?}NM#&4$=_xn zh?Z6-#2baWCHH+5K(UhfLOSm43w*;xR5?ioNlthEI`nPW;hFHu3@SOs2z$S)gqIeB zFQ^%I#Yp8Q&m_z`4mP~0cV_IYg^H0Vc?FFeI)rU5;;tTO(9VmXxWsRT{7WI{E5RSD zr!CoCGC*{GK|5DA{UduRzk=KWHF z2+QYcDsRAd>DhIY(RN{*8Wm&tY}*J|xEFV)N*{$Sj_{;PqUjUJW{+Td5{tNxv)=Nk zF>el<$y`8<)Pf<^2=B2@lZChmS(vioX!*rJ!!2?lJR=d}uAzpRh&#)p@;zRm(cqW3oo&?ls-2`pahf}KF_9MJ?L9;k7Mertu8hGrSsW>=yvql z!V+Y?6kn0@5b%3kD?yYxJ9koHZff*iws?@V8XX@Ol-VOqDe~8abA(ni zH&=(Wd|S!1^cr1F7OoCRV`=0i5Eql{Wq`Ugjj!6iRH%h9FzT1%wQ-!!gq1Y~9;K(1uHn7!U zi^^F9$=gTyTApufjx#anL>-cKi{@4{Q*rrK$)vPNEY?C3AhA{`JmLhmdifyU9$3$@ zOrSI+7u*?)a-$+PS}R3uxKKBP=Pe;!KX{Fy$3zJ(X{64LzzR#jR|>sxG_sTuv3$7i zvRsYf6nANQ@@%nvyqdy~p(0+_K>AMA!j>CKX^Ic^_7Gm{vjSR>$}!njz6VV;g<|A> zrthFIfYkxD(KUkw9*NN!hR`FK2Sd}dl!1a8EjZB3uFPXD*(zRefRE~Rhq`k!&TLMU z7?a#I;2TiWu!ft}q?w0#`>?w&#-&&v$!PD4Lm6xv8kvtl^J6wKa1YQFv!*S0aGpbj zo|qUqORDUFA__y~XFwrZWWc(kZ&RyW-&t7CB4Pp+M(T0Yv~3o_0z*8n2(yvLH3K*p zg<_8Mq=_SmxBZbBq^S^`G_0Itx312a*v(qZT8=kAAQzN!QsjY22WNm5v)LQqDbX{&n_A|Yjo$_RjT1(9XiSg-YZxbBxk%3l)A`eD7xt7bJN75ub*PJ*vbniW`rXoVuGcFXxu^7+U@NL2XfL9R~GI@K8E=cyfk@vPEy8)S| zGT$9eumf!Y*yzh}MFKt*zlWAxyN58F{kC z4TOFSFH32Jmpl_u??QWFbRA5Yz*(pJLOL@0vR<+)U%Zleu8}r5DOKkf#)zScszt{q zv=0St9jHT_EpIgAl+Q%?_Ki?k)MSr#2m^Mc`5{X1V_UN`vh0xseAwsV&G&SwKy4Rf z`FZJO8jP12!B()5GXt^}N{CsP^mX4x#`Yrw$6<;)_vWkxYO75ugi>4~bdQzpreK3< zRD8r9SNtHTv}3PB(7LCgz=<Pq#N_^twPOE82{y7F`z8aN>L?a{yKXM=3Ggd<73-nLrxrMF0V#av3wr*gk;9ip zMbo!&hp|-jdTo-D32ffVwzVQsdJ^S!s7bo(GhgJDy2|1G%cHCve`jRxUR&Br0}z!% z&Fv~!cthqAoD`Ssub!(%E=L+MVwYa0j6U5;Z&B!m=fyVZ#!OLbEs#MdB@lkWh_J`| z{M^T&>mF+t=g9)sdRPnbZg)E+8PSQw!HU}oZc)`3mgz5cSK(+ z7OoYysX4i^6Kb6nvWhn%w@_KiYoUDSRZoEZ@hgm$!nj1dJ{fb9CP(;W9&(|>zxVhW9E1lhdEWrEd1oqyj=tIV-it_LQ zEhBlwAhwYj1EfiIGRIzZbh}gvj~Irzg5}+@OA#SCIVkp)aL+Va`z${gT329c3n+T+ z%n_|sz$jRoEY_ar(+3cIq4895@kroNE(mc`LX^fC*1SE|bvMkMSK=L1&85LLGHmJT z(CL#~sp7mcQaoCshVArL&wRYpQjLY4)~U&oCtA~aXBU?Yszu#JbUvNcHU&dsd~C2OqxXBAqVfP zL93^+X*g@I57xF`APLH{urSw}qxlr$#Z_~yy}`})1{4l0aDVZbC-fZ{tmNL*QPhBF znKf0oljgp#?F9pzPS0c^mLnZQpE%YIxW_rZXAnl&tpIhhoAK$9e|f}qqG8$I4;iAt z)t&6IrQ*vcO2F9qULPKwtE;4#E|5Rjp(YOcQ=}O7p4kA1cjqu|&RV&B+%|e{YVb0d zfonnFvDDen#ofDjz4&OKZ*2l*X<%i?dGPCcFLTY^Hq75bB5oxOzDPxx^Hkd9tXQ$? z#j{I{e)Uc>-AmXGY9t$$=vfIevY@?OpWV_^(9T0)={RN@=XnYam4a*%`x*x4se8Wh zvL>H#2F{d)$ZW`ajch?<&W}wAH99k%EalE4<-~>rkbV;NO1w%gDpFUrSJ3rhnzTbG zDZEUZ1D_9ph1YdCzX9zd$ixu_M64cx)_F14^!6-0(>I=wYCyY!E4@UBLJ@9fIFwa` zY3}#-)i|0nx%ImZzw24eJ(-jSc)kw@5&OM&>^Z8Bw~MV=2;b4mh8A<{3RRg`54S1d zE?UBppV1ukJ|5^({A=6865$0Phc?li4?A{ERH_Mz!^tLVU{nELcc^3q|~9HRogDQW!Jh5QiYJ0 zChUC2%}3iw1$nU3GyGn(7VYv1|7j9q55GL(S8E62kWmbpDPe0A;zPVFCzQ0964mmg z32d*cW4u&S)H*>XS%Afx?nb=RJ6{1RT9(dXB=>Ws;7l|gBkzhgI!@L2WIO2axxw^^ z=oB+yW2@+Mc)KfGQ+>$wax8X=7nH)F^6ISvj?8j)9tfo_SOGmL=u7UI#TD+C(A&1o zfXVZGkucs`*QgS{vbKV9U?qaPymj?`*cc=9mEZB#rz zOc7FA>n5q$pog-LV*C_o&QLjaAv0h2yW(&zT|_c;-;8_1AOYjTC$(ZA{BmY|--D_o z;J_FQ#1%E5y-Ihqc;L2u(wkwD?o$SO1)k}NP$_3L^nj)?0qan%`xQus#xnqc&K5l3 z2pF-6d}aY9OFG~sIt!VXid*zt-G*Oikw@I=kiZ2M&QNykR5vyx23G=1viU%jN=yI( z#GS-DO+Hl!IyHJ85Fomku#4>@*z=snz-xSc&3NF?-XNKb$+dXKL&o=3 zsgW0i)wabcLd``TZ7t7b8q#0hTQ>+Jgf$#{B*qRzOM=0Iw89mIu3`Pkph7!+#K;wg z1kJlLUdP46!!Y4c>16&-KZEbxC^9;7u6IJonid( zdw>okrWm%3v6RU1#*eT5oRpx1V&(}poVC2D%a(i(=w#gkG$~e9Chvh5GF!jtwd&b) zpnb}vQV*U5yq2AsBt}|r?&{$aOa|gkT@;=)Xg0^`YxAcEy0WaGgF3WWJ1`1YPs{nl zY4Zw&FtE>o2MG^O;}N51*?UuMvG_zPiU*B!U%V!2r#jqqc}9J}DZzl=`Vwg8s>p+g z&`pkD+TMGx)4Q-rU1CF({4Q}l&vFjM(!v$5*mpQO41rt6V<-R=iDCq6E-RI~%$mc~ z$MBTw84)uJ3CHjmYh*7j#rBI;SQmZs?kYRQfk#!Ltx+W0;tn0S&y{n1uzioji?Ilc zodI%=U!-Y6%_v<}lY))QF@~-XkqP>cK<=Ikr&2`pp2U?P6O4^CCdln-UESx>`(v;r?I^t77)+_WLnCUmHZPYfn zVatq6BxzbAiBUv4l4e&}GW?|y(w9mOwOc*3?>vJAhdCk?JM3Yus`WJSCJ^+#icGF* zNIQ#tknxz+Cb|Cp#=R9zNhW;DIvC?!=UO}xJjlg_?O89E*_4=V5 zZyi`O(i<`3E}n2r2i6=2X6YrKCiB2Dg_(z)Jf;xOscu|FN{(XPRb_&Gd_?c9=eXFD z*bLftweOrU5Xt<#(V9DVz^Ihca5`pvq;6Jko+_DI>joC2UJSW=ZPJS(IV??LPwmM@ zGYQiWzYHbbxwP2_0$uhLXov@&>1!Ib1k?v!$1QK7tvExngh@Dpxr~#x9-O)tUI#ue zu`AQ8N04nb>_*ndy|w$5%A{ThH73E9dPHzmdTm{bcDL2Vz#14QOvlv_OMSIcJrS3Nm7XGNh%FOWs#X5 zf?jLCy*gr{HUX9q^L|c5i%e`&OqU9mIsvhe+Ymc<7tqaEk1~wJkeBf;lMkV%)W!Pv@H4W$y)BrzJbVPwLCCsPI*kSI|>J<_qo3apwW}{l* zVQqDjJI-!tvB=YtUoH`Rxnxm9qF=Y&K&MSk3yXz&k*FCW#LE(AW0#Rko?w;NTp*L$8N=~wnw16jAENv z+Ij<@bVwk#cBt#|OVK_RN}VB-PEGfxf@i*dU5@JOGk#+Z_T<2g;O$~QC`o)Hr)9nQ zxV!V!yr)IrlPzw(E_KLOP`n1?cJ^k0-Kz6SccLEvEzOA@p_F+W5F}Q|@q4IPY1hHU zy-zK6Lu4GDJ|-~rj<$WR8oI&TU696pjfclTTic)x=B{+OG}qpv*Ys`?1Xplw->B3x zi=K5SuB-GceYeNPqwKewe$n7>7gqcHPECFAgzw=qkmGz~qK-imxN*SX>sy6@T^p7Y zcczK=*zQp1SSE@fw`|tB3D}lRpTMJ#cP-u0%QC9jLyOpEmjjbNeJFQG@2O13J(&&i z=V))MYceKLE}slSDX&!-$_oP@cTi+W^q{L@G%ebQhk{(&Xg4W6)Ot zK~K%%DYLvsK&escuV}k4a_3E<_zUYGntmBj%IQubjFmDId1L1^)e7%WwBi&a{k7qv zgz+3h0%?DloI<5UPrx|d!o_8UJTRrifR1Ykb?8tm@V=+BrIwLwbQe#D)I^1n`>2#D&Yi; ziCv}2+o;zTI3t*Z5!}1*E}vbdjHsZemWmiP)=RWKp;M-|qTU4+a=JC9m5Yu_0eE$& zrdeS8qPy?T#>#ZgdEZ1_CTSe7NtbNe6v@2$Wlk$pAD6g&2@-IRlsyQk(gbvGVWxKEs{mqM4I0GxL-Aq%gSZLb^X;apUd9o1T8z1i1WepTPAt3v5Rr5Xf6C8PSFC(1n=^Fuuj-tMHr<$e{gkuWn9Lm1~{; zr4w{{7Oo=4F%AV9P8Bm3+-tDgn-!ht8bpC>en0PH;bf-;&STTMVN*Mn9zAW%Y@@38 z2vabx>(ND)41(YeF)yVLqwz!6ryLpqW?-?XZuRs`IzmR%yxcN2I8rpJuewosuQRo& zOYlG#j1r`guF|y-=E1ACx7q0Df>PHrg=jRQf8q7QRksT>_(6tM)B z;~UXY#SCBunQ`RI<@Y#!_pC2jq$9ZBKCBS1Ygn2ENKDA%sqX5jd+}aiOK!)$;(6^W z=N98@a{9o0)itqZWc7)+I9RI2d0fEoRy{0xp$#ZBGIgpl9rLSZT+@ z^Wi?%S&`>c0&+E20R$b+$x;0x)Kg=m-cm(IDUO=y$m1A(kNW{r!$V8T$Mkr;OSwFT zVqbdcx)5mk$ z=IS{@TP=bkeGOz*DCNS6d^y7Yo*eg$(pmfgdp2Z8OOH$9AsrEC`ys#j1+je^$$-Vuvv+9?qtHZS!Z zA8s98KJ328E2A)c&n>O@uJjb|9F+=tg5LxRZ`V^=-ELh{jWgClCJNwu(-ZIETZV`9 zymSlEP_>@%rRO>`yk$m*4=2}Ta@eT z4>}t{UO(O;uMPM0h@g&x_G|GQS$nKgbi`F9rl1d~KnTV%SNI+W%_%ShY94SsI6Gp8 zBC2@VY}YbN-LplOY)t|z8j>6LR``kxgu29$G0+iYK?zaxjw?s=$(&SY!d3dHB|1Jh zL`#UffO1nv12RRf6^$!3m8gU1!Fy$ckBIU;zgV%SBa@@>2t5`#hG;}xv5Zm_GbY7k zjNnB3skF4$0E;wwS=pOQR-wibgM80Z3Wt&gAo;l|zCo-^F;qywcU;O7XfGBX?HEjxGjH8gkrO5W1e{4fFH7hu48*qWAW;wH}rDRzJU{)CznO zn*R5v#W3_04HAN@+QSw`sv_KAtJbH%%e$p*cuR&GgfdgR!@IhX^ zIjKfSJB+j)noeX2Bgf$gp#imm;ABwecx!VJ3=j0>09dd0nl7d70G$Cn;kW!Y`nES% z0iQ(zd)iZd83kIv20RisrL&bB7BO!fgc`FRKcJ+f7gX|6u0q(9e232u#io2Lq-WFK zMGz8&2~q-+M#0E2`jUmzt?9;#^XfRlXUI57(gz_&Wx}TeedAm&05jk*OIaE!lDs%R zdpg%RJc$+Qed%^6`*=7VpM!VbQb$G_LSlspp=yNgtOzV}iA;`Av}Z(B+>*NYQ6B{p zi-Dmu$L0k$Y(m`;*$%VtlYL5h4|`Pb&g&+0=7ZBaoBn ztH(*%x1s&_=I&HpBq9I?>E2tIy8>hq_u*yV;UpxJhvdy2%-t&RR&E#(#owr1-Eh$k zjT5tNxXi%o;>qY9-#!x1Sm$D6Ol~pJQHmdMCJG4Q6QTJ`=K7)N$cZlbjo+(bi>H7J zuR0>?;;F>wyH_DOkcTIW#xW-t)*-nC(&}&eo^9Myn!*`nF<;+_#_ofBIq_DboGu%H zvx?ZGz#cbuwiISO)b0nzoXr%-xf-hBt`yH_V5;8urKY^WGZ>OYlRiht3BaM4ezgqC zhmrEYyURk3`Dpto2kS(f#LzTO6SmBtQdxVCRIML(`YC9_o)jy_X1pTv>1o>F$0CR3 zJ4%C%q|Eb;~^fNvjYVCYo~Oo#f(d|U5&w6iOdHw89`?dM;(cK_1;uF zf~O5p&hvOY6e0OiMtpk8m!AweL`>96x#%qP3WkftqN%73Q!gEgCI}~cwQtL8z@~NC zUA=e5g`Jaig7Z08?-(@&-DpJil@?CIgUz}V7QLR!T!VGNN)BY|6!HlifL>%b@e)3u zc&jb1O&Cgq+#gW^N-3z#pf+?FV(SZoZq&-la7gbrJ}GxZPsb-K1d}drDXxu3EGL2? zj?tGvT>NS#TniXgtoFr_b zbOzWiHrZDGrBpp6XhB^|gBNAzL+P%?W_%foTn=exh4TOb1%G|hkMgf^G1mb zL2uy|C&LlmJ77cxNlu3iy4MXXZK2j>d35i&#q`CbtmT~JgTSpPwjB*K5LT{!OJgXF z#5s`6OQn-@QcvOXG>weeN5;xDi`r0yUqURM$%p~MOWzQWT=4-6;OtA76JEVLz#StY z)w30<2)cTV%%YC=KxG*;al^-F7Tsf@jEn)X6X1ze57=;@KYEAoMxBa`GW><4LWa;Y zb~=EW5hCFCDA#E_#I`=bl6~Gb|N9IX)eDQBUxyFud+5%u94u)N&PjYx@ZC z6{ak12_+6nZPjZI2Fp=$+9eTmiHknBf$@W^r7$<4?_HF1&zT^vi}P^G#nsq@>T@0# zHqxhDQ5TX$f)iDLIdu~8-o?f>8$YJ9>hj_lt+WEllbxYPCoUqZ8cI=eBQ*z>#~b`M zHA)O4U~^uMOb%#gG}MLNCf;*%CpM=g<}qWied}-Jge>u-ZoG}{8oUJ!mQTA7IivgW zhGvtAo6}=v!Mf>!0NNJHxOM_J4Cz6@NM9*<%c03+N;ln=x-Irb8jRruYS@It;|qWE za3_>0p*87(FuXX_Ak)sY(NONSgR-xJR#=~bOK4V%&$JD^m`yyMzA)t(65^KrX}d9>jrv2n0!)nX%Yf&{Gk z1kqLMZn3YK3LqOsgWWQ#TiI*I3GgW;zg&zAF_=A*>bukdfKb|anDc0#W+3_)x0zj} zj?3fF4SoI|^;;b)NI7^=RE*&r^)Q`!5si@=nFD#vd7@yd%kw>gJEG>}V@DWqm$tb# zaC;AmkI0A~ka;VBKgRc(7cp0*I+sA>rmshZsi%+MVznmJsti%%xZl01u)62R$7Y*V)CCg_$;C&(rUX^~hk9e%@ zP>jMit9g2DJbDF{IO5{Zci)t#m4~3RlHkK~;4t!pRIQ$&BAaJUp2zs)%}HJ&%*Azg zHzLgB1H*jsF!ltTun#S?q#wrc#z?CHV$?aN~J()9Bjn8v6{szz|60ywO+oc7(%24!WM74Fzdf1Pj3|BAhuD z^s&_ySl?B5>agjsdJ$PY20iAkd#!sK1V!;KTO-J(iw5OAfwuNa8s}leeVqn76OXVP zo?Gf%ITK-^K=AHDlF^acG$J1QvbK&>k5+iRM~Wrs4!3hdFN@Afdmo2+>}*&!JRVqJ z@O}HrWI}Lk4qh05QuN~Qn6zX@n-1;jI6CE~YN>DE-4kEqEEYpLsKz!n5u6D~%CdRSMdA#I<8(Celjln309a z;4oHt?U{Q|Ngpd{M`PP5=8nXL&EK!TF*L7suMxAB2}!j=1Yv2qhZR-PsHdXoOApdV`C3}@nQ|tam>OD6 zO84Z0%I?WCG=_&72;Jc8WK?QGNI|z~96)+GvuXUEI1_FbAcET<3={a}`p78QE{sQ* zR*yv@9q)Ky%K&;5EbT}^z0h_kGSgR6Cp4lJw1CnF4s(38?eN0qYV1A1xECn%4uJ+h zb(mx?ZZY%awTpB6@knr1y5i0~dWU8b{rN{bEysgZ+Bw`9Qq1h$Gdj2`I2* z#n{KR&+`V&UD*_}5Tc8NV+H+zlv7TR`mJsd4Q6H0;G;oU$gIgTKjukS+Uj{o}leXE|n|UZO@+J-w9)Lb13e=lat!%;8=*^Y)&Jsl-!%7%=2WThs#{^A@MVkLFfV{l!TEm+o|`J@RKm@4{uVJD52ecn*rHriyuq=fxZy zvyIVR1(a>Hda^!>9evP~8*E?(%=9%?F~FBqwkSgto5r0Ar`kT3^~@$PysS`XOA{+$ z^R?{~FrCLdb6vCO19@#`E}5|!bWV84X(#xXR*x}L+D*3j9m4u7xq!I1tGK}`WhCFR z)P-3E+dwT^(R3M)xWkn^)Ru;l#kNV}@%A=a=}FFDkU$!wZF#7QxW$m`oB_|^o!UG{ ztFo7EP0bv`qhrsy7POH$2`*yp8Y%q{crKRNGj#U(w2qy>M||~?l(agKL3Sn3?wQKO z5xJY05oxVci1X;esWAV}KyvZoI`C;FEi{at?#l6I*lav|LK93*M5;DneC!Oj3ud5V zlPg}8Q*vxu(eCu<%BourZzoL^;F470Z2cip)Z6Ex*{Nf`FxKMaLIN+<+|kX66Hv4SqhW;Tsy zgT{W8y^z>~lmjNn`>+f8#`vu?g}&FMtU2gTXj+M~*6mDoNl-)~2%vKcCdwy^EY>z0qnBN1wN(S63s;RN-mN=74B0re!ZJx#l2+RnH#yrmscCB_ zGHJ#n>}6r9RGP#XykNbw@PKabSPfTu!srpBy+b}pg<*cDsMJTJ`IZ*aj#8^~vbW{9 z_zh={LjXJ@3a4w!bHZFkeuMOKQ_IFhKo}=u40s$T7Rd>=;jJ@2SKUm9l*cLIPaF>? zukDaHP=@IuiQSFBn&?r(jczOy z+d=P`fW+j%$1CzsMK~HAfXN~&p?P!U9#c06yqH|<=*#=^3iZpY_M_5xH83W75uIDp zL^;Rco+0^!>GT=FTk==ckJQa!&27|&y!&|=3!NrWri;Dqts|B&%D=>F3Pe+H7K?pu zTAFo`oDYh&oLi(ZC}wcoQ0N;ULarE;bT=U-@Ul&ROh15A+1UftYf*iQ|j3*18vdZ zMqF*1eFmiObwsmG9WQtuzhqCrBvFFKL#?{yUQD4>D4tPxLOlCCyQx-%7*24Xw#s|s zl2&j9dXvipY3tmq%oY4rx;dL&K>C1bd;O|7E1*!z@?OrqB|bNtq;Yk8i2&`iN*5gc z#NZ0i7%a;$RiTQ>IT5+=)X|i1_m)_@0W~_UA#PQjGYq0i)>;}OpA?TQLM*;F^m^|u zmY>%up_pa(L~#_{17ld*2hZE94{Y7D%hWECUnxvJw^FpxyYuvMa~6D~9D6U3DJvVQ z$5G<=N@a>zQHkV=ul4C@;{;Ec*eY8*t~Bdlt+D}xTxs(KK`~a9fUe1arw<%LIcm=i zh715QP_$@3raZC(GVpltC}b#f%>1po(PeiTbaf9g*D1jWM?5DcZ0~wbu_n4&jFR`7 z^g>8p402ViXv5~!-!~rqoIzBRCD0=u(=C)v#!3xBK{I<8!o~S z0kt(L!Ezi1mB|FOSC1J%;)XPssWOwiwG*Bd(QY=!$aru=oho4*HTcHBdKTfV(BEd| zwY+YbT2>7^<&n~QR))SnToNcTmxl=yU)7e?P#QeKXNRE+qT1x#gXlzi@w_q6 z^wri2+T3@emt(eRB(!T};VZ=tJrwuURzm6)YT;?KdApZG3-mjy0X9%Q?pk=5uBOaF zkyo_})_Q5CK-qhd`egPMUX6CBmEwEAFqoUHWV&ApnL6}=l50aiWshC!~adafsx(@6~D?0cn}P4ix)a3*GVNt?U^ z$Ai){r{}&_PHpyp@4><=nOD~fhb1VUJKI@o9F6usrjH&b@FwxoV|pj)=*@~7Fe*E# zG1D?(Rv1UR+%e_GM+%hhq*k)Lr44$j#a8M}44#?#Vkiw{YnZZ>gg5Kxi?)QBhh^PY z?#sqfM`kJ@54_25roj2s6uq1WSTKt+S@;hY?x>{_y=JLgI0cmpDd-`gRL)PmGzdpfLEyD=Oxj9hFo@r2R1o^F_^Nc)#r zkG{-;X%=*R&pIEa6Va{Zt7E#3^Dt(T=sT4sz@s2|>s*+2Hw0a5!y4L4NU5$>Q5)%5 z*cw(kn(zJjHjFhn87||y8e;AVQJr%|AuX8=ah^vGm6Gbb*)hx=*f6~!|euII?ZpwJz<0UW%w&`blsfJs@Y&$#RDdsSo4wy(}%mv}f_ zP6zRp_;zZqp4;$F9p~aP#Ht;FVI12F3-x8t6fBsm;*A%0d+;vKs@X6ep%BP(4dlz& zXMnbI1#{~ySlmw03N;fClpw1&= zC&t$rd~ukwDy4S5IFFV~M)2Ty(`AguO;I&E`@XdjZtmd$!sGVJJ4H9~h_cw@L{=|g zei|5#m(?KyreIJt;sJZPi@~YvyyE>sb@I_!%!I;HK}^a7?5~Vw{9pd3Z}1 zjvV#IJ6@q)E*^lfW?JzvY&70@q%FC)40SbL^K7a&o(3o(aTnXDzbRr#KJqvaIuA!; zhFaPoF9zVqm}--HR|xQGJ+UIRmYEFF-!wr!AElNs9EbtEn||ls$)MEHjYnEIqnLyVIKn(dDuCV8FYc_Bw&RO9t=i6#vb-jpW?O1oH&=zAS z5vO%WO45~dvAb5WIDN)+f4Q|qyj|MS-IKUK_mWiomA>TH>B>tl)6_9J$nvhnb9S8! zxQOc!j&?vKc3l_C%LZo=yLHYYSzvaT9S6aLttu9qaKeqoNA%#1tog`7kRMGl$^?u= zpCP+wbR|&Uqx9o41&wzUG*l77%)FxMG}?|2qbFiyrs#la zgP4e!MMfr0qBkbzpw=8X_@=YkD0g4j;p-X|9bzpdhQubU99=-t&Q0ui5SomA%3cjS zmLRzBrb}?C*>fMYJG$#*AWSahoaUm=NI~@BQVn0X#(l$0sv4o^CQM9(e6PS9w0*O^ zOJ80uB)n1{eZ!g~@8Gdazl>WE7=9Zo8s|-wRn$N-&hKnQeRj3ll;#yrIh5x^jOoHi zv_82-ebM>wT^n!qX30fm(+X=pow25%3W=nS&hQ@O*WyIg!Dumi?+vmhDg>%~F9y}Y zOWXIj2Hu-e%{RiFev3AA()H<P^>a?l}PR_c%GtfIOsUtaQmLY0P-1ZJQriywR9ad*ta_o%~ z*DXRN^ge|!!xbn&0IpF23X*sL``#OL4a+wJ3_j0VmQ1=mxwR$71sI-6y=1Z@th+c= zg~_oPe$g^H>LP363}RNA=|rc<#ndMzifORxgnH3E&Wk-^R;gh zJ*<;^7?>RDxa-IAjK?v}c;mcvS6HP)t-2T6Y@IyXh!GRpPZ%gbf%TX=Z{15cWxegH zBU=RRa9h10t zU%{yD=z}vJy<=UmyE1w@A_gx-axrR2PseQt);z@;%eqE_6c9G? zt#rQ`^&97XF|Q!9NjPkt7Qe2wa!JkCTa!M;E5gxlSFlR2OS5tRvP=I@MSJMRbSU8TV-o zz@YJA7b7h;w{XRDc^plEHsL`UH0yH@W&3UMY72O|0MNCqjye8*0m*d+9rCW&cAS81 zgIxI_zbV?&@>gyyAM76j1?5yY!SkxU}Ofn z>Qdljrkh@yjgA=BF+B6Zig*a`krn1=(Cg7E6U=u9M!KXxPYT1(-A_ zw-Q?}X>!Xn7O<~!*0do<7ZreEWO?o->4teJ;$YcF5~6;A7c-Ae)vYtz%{K6DgZi4Yb# zz?{Pose7sB1KD@XE#N{I43mMG@6uDMz1wKc@(}Z2F&foQ;o-D7%(?hG3IK_dtZfan zvpwPWLY^}*J!WA5uH_fr1D)D~YdG+Xj)+16&y)?B8edC+hdJoI&)1^zmtE3wd8+ql z;rt=?qxDHA=j0s)4@^m0fh!8pdR>_o*Q%Y*lob{M7=;0^sa()oS%41RHj(Q(1fEG4 z$e)uJ@sj2|1@(PSvUm~Jx8peo7+j%iy2m6nkBx-uFsfW>hB+3{X`_4z+Pj1q_HmcJ zz<5%6G933FMuzh~d>O|(hG5Na?AZZo4Z+y4aH;`Dh-znrHxJ0cJS=j=WZz1j1B`fD z(7NHTpNLK04HG?gm$a_LXWEm4QcbsqYXbOsUUs_PESI7l)$K#ThnceLq$dR|I6T*I zur-@qPt!}YLdV9jC<)INqGbd8I{VV}%?>}srHO4u8l;EAsv6)n;rZ!#b1-=^$yeNyAxdyYRZ*i3z;bNTHDWNlNSSm=Z z&?3C)%GQxEr1J_A+#hzbx!3~iqW~j#Rxd-R!yIl(=vl0x!w#&os}7S{jK_&yaQe75 zD8K_s2ogDezL!-4erffXwtC&IYc|OE<IL(IxlUHac5_r$R!;m3T*pS3M&wBIS z(7=E;JndXQ({h&+sCSE)FYW<%7*L%Y@r)z^=*F$)(97+R@O@ldz^@>va|a&vyJUKx zpzI)P?-rgYWbdnSn=o#_3YmmfU)bg%mUfP+0j=PC7z826239A`hWK`0A?J01rq(_1 zsfCyRrI*o{UWRIm3sj3~of>2As(MZ@mv`?48QyEsls@ZSkm~e@il918OK9~T%sC;# zvxqqzV@KGXr!S{DE8?C4YyRcezy@D=nq@=~ zW9w4~OA!%RXm7by7zhO|gD;M|ZuOe0UcW6tLw-US+b|BbV*3K1dgGA|!rb&4RL`SE ziW?cJZsF$YFgi6+vQSpNsBU`^Q_jpyyB@kd>ArBWV-(>MK?gMkw)3_(43ya+69-|q z3Qg~S2(x$83Zf7Uz+3WtwDh6-UsB-K11SQRbn4HCXEdE+rk8Lm!`)0Mkf9nf`5wGe zQ*_i9-`;fKa; z1eife)8gU)41C!KV+A1Hu2yZHJKHK@Kv?|J>=n@Iy?w+YE~ShIZ|8a0)|1z#p=@uP z_n7Adhg3+?)lVUfM@ND8wmg(s#O#_2aMDOD)d4F83?9xiP17|wB9sd*J9vbsEIbd< z;_cC*7%vd^@lN!0t)}CuOxB|qrK4kr+P&wM{Omci zt0!xb6=Z4aPoXuhI)!ZVWTZID1-H8AY)DUH@}zJ5?bDR@n7wcZlVuu1JM3-&hf3Iv%TFwSGGptbo6#3!38Bals zdz0GY+|6r`xB;);{3qBk&U)v(m(0>E4imgw3@$Q<+bohkB0WRz6+pz110Zp1!K9X& z-84HNEb4n_Z)d0)4sq7r@xGLa*J>TdRd8K0yfWbTjz^o}YG}fwNA%d7>XpO`g8{_!6H6`dEe>KL z=RIb+mj&ZDH9y1&0G-jTmN9Mb-swpXy+*3qoq~s&u(o)%tWWv&9YBhWdjnIh8fU7T@ z5h8%6>SVx$C5`-*&zWgWT>*!_-h8>TdkU6?_wta{x5W3|U59}+nXUB%a6f&Imezek zMO|kD5HU$%*XSB)p0>7n*K6R825ZXf*s0qBN+fu>dXYiHG`*(eiTp@+YM;Ji2$4vE z4JJ2sA8&|@x;lx~E2nS4{7bOVL3xszSvn*{*=Z||=+Bwc2&sk!E>INH-pq5G>%h4k z#bP~qi?ikFSyItuuO8rwZn9J5+*fK57QLug{;y%l6WiMfoCs*?DwgxKU$G+4XjTCb zUg=TcDVGKs$sU|FMx4o1ACoS>lVn3S?=v{A$vDL7(>vP2l!gKoD_svKkWF1de5V-q z=;(A70bfz&!Xuu*6o#VW#_)GDBzG`y*s~B3Z)4isr#XBKBZ(#PtH`0WZmijxZV&)#Js0Oc{TT14NMEedIsR8GG{p* zhNckcVa=JKADrX6jlgYC?M``8hKqStYj?(BSs)M1A%wZw^PaK=NUVW2HUN3T@PmCl z(72@!ulPkhed{%ZnTAbEM+E~@L-RPhXa?M05SI(Ui|Qw5ZDCG`UQ|3cd>b;H&$hhA zye~t5We!RO13~O*#p$Y@w=L`)(nJv~J!MUZjY|Y};4*5ndAShh%aM8tdR!Mg^mZ2u zO~o!=9vE7U1hJ4N9^59HDzl`Lw@!+MDNoM&t$8tnza;zhvM5{d7@V;Cf`8Q_XF zX%0vRucqf=3{gv4y@UC3*5{$W^|On7ro`7yu;1Un!oCdahczz*PjC)Il7e3PooY#m zy=ETpI0*MlQXUG)nVMo!hK3OWFNBHEaN>i8gU4bn05uQ#)vA0=sn)Qm0p0~Uq z*+qJan$#hcp_+gR;k8#9WC%`2%+7>x>qOxCq(YzZbs93|6|aeiVc|SKZeNgbgDVK7 zi)7BXYK^&#J=__{oklmTSAAzx08HYt11~FD5n4UYC``wmvD5Zj)n$#-x_n5ID&^cH z`bcwLSoqtM4Rq@#>xHG<2lSMg1?hP~S{dQIYx7dIgW|;uWu%iEmWO*-$IY$V>tsG}gKrmez}O z2g?5BL}SgtsMQp zc|$bJll9y#E^CEaXe3_R;*bXcUiHX*MX?pn?x6|_Pd^_$ZP^hZ!k1izV%F=>okaTP zy{U>Q1B;usp3ab{K6+8SGOI7lXJF5{FEW%Yd6d|ERT z*M=p0l=iZ{(N;s|3JgD*~FYk(iV8^$ya=|$>MwE(k4c6aTf znypGOwx{}fU1G~Ejys#(0Y+T#)K_ak~@xwz5?ve#Ff+-$VpXaP_8)R0ufXl&W; z-4MhgiLwr=RQN>%j>|1(KI2wo9>MJ<=3oeY(_7EO@*dn{O*(Dd7vhjN3L#vrfKq*s zk5w|?J!CX4)x;I78PKcBXq=+90O6h3O}YHx0o^6((}H$6lFUK~>a=jqWRvDj7-E~u1xwq9k(+iH_!RHw@8+)9IjBU7# zjVsi1c~Ua{^Ede|D(uyss%lANmw<4Lv*sgNy4?4lWKI-yZLIk$#?90?tllHO606E^ z^M`cwn%@i3PAgU~=fH&=Z-&W-*M9KOszS31CIQ;;IA68TA)`Lj4bsD{t1F#HUAz!} z(14N->z*RSIFlGMm%gma8fUPn^HLWE*ChRBDdc2_mN%S2Cd=82+KMl)iy|}|U!Fa) zNRq3Ej84qyH0>2E?G{`-_gqq|SbC$2Z}vrA*QHs*8zi6(m34{9X=og+d{LKA0;93* z?xxCF$X~mX5~XP_57RQgraH9d(j^+v_f8A+u>29aGJ#-)1r=z%*tG)i0ZBJNH`cQRi#J^s~eoi($f{Y+QrT;=G55 zrx3LOMh~9hYokeKwq|f^j-u&`$#=xHk74mV z#-j3`1l;m_q}yFSa`Fl#T?>Zoc5_A&2qt+ZCB;2-CR4O@&n04m-+i^nhAH>yBpqr;Aj zo+EZV?v+#uD|wGPdH1#)(v)hB@D%1=9Mcr>#XTMlE*gCk2^8;M($T)?o8-9U;5p0& z_|GykBwKou-e>@2KColDaq?Qw^fHB~WeY3fZfBZ#4VRz*3k}-k2^3u1Qrsk7>E4lJ zOn8I5o`F737!GrjRY?}pR|5^K+E4qv)#iyD0^Wz$^jy<EYPHnbi6;v270!HUMzk zuu;13G@E0gSyz+5lh8#E;1vr2c6hf&d~D+PA`bm(4DRkd(19l=>wpWnBS!}&RFTyB z>r?=hM&TqzLh#eZM#iUQ=nz=$P6_)6bk{h8Ft)2NLpTwJCQ_ljp>%``fY06@j8aZr zoM}`UUD^WJ^C}+^2^0568L`__#nbFqiV-&o5J-6$+{S4|JOBeJXkvbo6`VU0h)Sll zoFW1<0u*=e+$Wx@5$}5!!cf67xk^zRX~pmgi-NK=(6>kmMZ;C4q}#|sF!}*XNWHGq z;;}gE=}~mN0bY3G1HQPBsV&<^EzjR;1Xq9DfVM}nZ$&2`4o}4taiMbePDVzSc8|A; zS?^Ie0)%foTXB2v)Bw*pX{aYLniwzAJo9D~^XPfc z@5Mghg(|&ZT^rGh=hH>GgEZIEDHf`xYKw8CBOMwY{#>u*;YbbI)8TS?LeFfpQtFP9 zFOWVh^Uv313(0QLYxG2Hz~g9D9k1uo#X6z~Mr^1J>|v%TB}js`)xUO&~deeaQbY@-ht$Q3#WOylZv z*03HYC?QOk&Q$XDG%9>ageJmZk0xy#eQ8xS)Axm$rk$TY!j#2#>$j7tnB9SBho`Wi z&T)4n^rmsby?`v79f4Hd$cQ!Kkc!vxxho=$@-Z0ZlX%o>`l8LT32JoYr8H$IarL4K znBN>b^67}CkBB|>CL}{yPFshqzCFhk0hWcM0lFT@)wM#HCfS6jkg1mEkDqzNwDnO0 zR>*rIt^z^QrEgfB=9RA~Z-})%q#)N)@#}WR;$_6XyJR#BLGxJdF^T4rH(|ifK?d4V zF1;ZNG2dvip0#9olu18UR2HJ-Rf@%^$a(j=S?mHNkQUP#!n=34Hl8Z-OR|91H}76< zVd~?Ihb^hkpi?=bYfb0?!6aTL4x7yMWF0?kl~*%mfOur3CIj6k3#!xL7S-gyfvXJ! z44@N4V$1JMtHGchXQ(@v%i}`1eamstqzJxAFD87BFo4!zMz(;-Z2j?YL6gidgINT! zTMHwuI>tMaJcmnE0CR!ogiTCu1AqloVV{!~og!qLjuhbrvl&8&CaflG`ykU;2%{a9 zCt){<60{M>9W4SElUw6}%x#Lh^bQr)3kzvdG8L@2+Vdtvq+vh)Vs>voq=9 zum|{D%jDS`IehudcQKse<&)Jx!*};2nTZ?X2L;fJWyL0eSLGf(lKapLA}b%_EoY)%EOAr9U*lb$r!hOz$|~<(p;AvxE?zKAxRw z&xb0#t`xw0_O4CsLT}`i!Ah8;hi)x6$(+LZCWu2RY%!UTaJttX#p%ga0#do9&UHAb zdr9kv-e_5ewgmy@E0XPNf z-J8-)tR^+lJuVf?N1k`mNzisq?ssq}^`_3*7n8SDjI-Rko!$WF6Thk;dGRuQ7^1Qv z_A#N>dF=~tR2(K6Lxd)WaV^p3I<7Lwkq&3#@dCZkqUJ4LD-8fgM1xD6MOoO|JbCLH zGc>ksm&hE+p{_3PRZEayG0^ps1$0kJM|OqBj6vM2FH3fZ&*UxxelZF}1CA66qPR#S zc<>^C4B~CS(|nlk3b0fjuLJ9_bHx$c(PhGdO%<|caO_1FV;(LeoxHY=T0DhGwlB{_ zzC2^QI7ri@P6?mKQ6Vz6cRZYRN^{LEhkOc8O9(njxx#qD;9j-+^o}w1V?mTAFIs{F(>FnC zS%8IkJCjjyt~U$=vs~d*>{$V`1|aHeeTCG5t2Md$D)^Q0Ys4LS_hw0GEAG)SDYuS0 z7ZEcbI^N|NqzX=g8{iAPhVzLN$&gaT!t@vQ?3(HYW6}u&4icuUx#4NM+;stRT=OII z(({z&8wqfTK9Mu8#x>Joi;;OU12vlucR*2^U&=$0UK9&@^khQ8UONEWP8_BdBtq`P zb$$skwKC1>b7KBDXd;TT*K2b!OwHa?^g@HzzR2}X6angbFL7kqE2=2AhzDOqx7j@~ z*0ff}+UW9tTxafiy{r=69p~8kOo9Wsa{MV%J;dX4_s1JM;^|yg)C#;1mdxiQwE zfYzz^0Inw3@1pi>`5YD{wpf@QEphnv9e_R-4C*dTFPJyNOs|wxRr>B19x1FIZ(;R<-$v|pkreQ#*(?-_S4c81q@YdLjRs$@Hp+xgot1SHRu zS@Z1BIbEh`HhB1ISmMaaA1S#6b-oU1YFyk?t0z-s+LX`Q{AXGQfO7gh z_B3zwE`L{<&JSu(=z$;&07~nu1Nc=vS;Ii51QBW^ytgD0sRbC*(5vz4aNh8m45CYj zjVOVqn>^xt+>J{@_t2jswi2B?WS=sqjkDIE!gakVLpB~Fc)}8QD#@#Nqz~y)Amn4_ z&=~KgIEzw&)$LA&x%VQ7joFuiR81`ymdIWtr@Agvxi>vgs(o6skDo`08u41_)f4b( zMGqW>=IzbPUt4h%}@^&Bv&#yN`5W8H)>qc=e@S#hZwOTPI z2IK;j+ZpvF-8^}?4GbHyQ)-POK$=(?^@37Sr|JY~n5#?_ophDbU%hf_k=|u8iwip* zpUl=EMtY@~rL8-{s~Id(Os&Zeltp*LyIV=4W0-}y5ylMY$&8U7HZ?Hb#tu0tz-V(g z^^0dla3zKTtOT426z_1XoH}G|J8`pbw!M zpDVJTyXu^4R3B!l-riR-f zpCjTz5Jr3rhKK!j`w_QxT{@#I7W$@j88=YH*d-UepePxk!+EZbe0z%|ai;Z1M`ks0 z;bF>I3S=f(u%67`T=FyM@UC^xRG6(|KwJO=)T?y?=qoHM9zr8P8GAY>B%fQF!IxFprD;EV^jwM?K8r6~(czOa_x3@q z$g3>xy9r{a@qF(YBxzjB)jax7HF|Y~o`6WIC{q-XV13 zKyvWqQ$7XZ*B%V0LPM{S8u*m&-oqZ4lVU%CGI(XLPcVRBSQ`DbhHHdmI8NRmdb^}@ z8tdX?({s557N})H0<}if8=^N{N*A?H^%qqpf2FhAu=@D5i%V7|)LOUjgxcXt!9K_7 z)qT!n#@q+NWD5XN_~;pfM{4j~RbxUJB-#eR2JX9h|hRA@r(cyfl#7Tvo((_Zj&G6on~@ia%kXieW8vwb&u2jL8k z>?MonZc3gRJ4}X;g9^?TnKQziCug#m+}Q!V_BN)?;StmcF|NCdn?d`YB*kM88{Y@x zyFM=Qa>w(D7o%&wSHxaw3LOEV;}3`+8k+GI-vbc3<(ejSI=yniElpzSl8!QdSJFBY z7eJ&_(nga)a6IUtSF{DXoILjI-Q$~TuaFT>Rqd6YRS>OHCaveR&?I!lJ@h7wGxh*w zrcw$F{K)8CXBF+7K1hii<@XA!9|li!cN3ouYtxj*fX?|y`YMW1c-hwCENLZz$Mh51 z+q3E=D&eb$-K?3+XKE%&ry`r_+AFG2Y0im6z0ZOk34y{DvDG95ibEDw6}hhDpQ`znYREO@h-W)gb|81`q1n6hYR{)fbPEYzU^Rv$ zoP!Zq-8EN`V^cABVLpmp07r}&2D1%*o;}?P8JQVHY_B^=3m#Ymr&U# zm3Uq~yK=TIQ8#-K(dyNT^LFiQw!%ZolKJVgg2{_?FmZ1lVt+>UlJ*r+W)F^qDZbr7 zp-i3DgeUL7SC=D6+~(;?ysohpj~F|xqs;6;kF>0|ef|n2TFJRxU=_VB3plvXfcdcn zDb59J0?mRLPhHCNG|mRFTt^e$RZPumadw$IMnJ03(~O>*ddSh-tfvNnJSIF-k30|r z`H<|@VI8Vl>#oJ*kd-p31j{oTX@_PRMXVW54m{@}l8f_mF2l(Lu~wn#=ZDJ4xkGP} z%t8Gult_K($sx8+7)xr+$Xc|)^x=8T5wUO1?Q`x*Q%Y4qw~2Lz*Y7=kM1vsF?5Ik1 z>TF0=<7ch@2Gq;pYSMSc*K9Khp2HyUgn&D&*O^z0qwe*goyNkrdLHww4Eakc zU&FSoBU`j(mEt|)_hc6^Q7o`fVjK?;(o~PZH_SQ$7HhA|R!I3yxK1AD4>@rR@vIP^*1(r*&put- zo#;w@kpQtx+p6I2@eG5?d!2}HZPa4TO3$RN+Qb2SNz;X-SCVY>X?fZe!shS|M$pd8 zZqwF2RG1Dw%84+EYe`zVKft_nk3wwGRxU|wN%Tk8<~k-aTRKZDpom8?V06|8 z$Kk-$%nTwu>x{stniBWkorhSycLhO%efUrhh~n+YSwRYuiW{3?OOwcyzguxj-g-X6 zrLTLz9#MDIDQ|bXQ97v^X%?^=-^ydSC++tf_|+`GQT9nE$_*4Ohw${$H&3oN6CORW zsU2@A;tMRE^@@7T1J->>*XU2426SWtW?b6S@h(LLh4AwTZVAz#CN(@ix?L`^H-7bA zVwNillPg?6c`FV05+??crQw2lxX|{NIB1+7){_`DUTRaS2Q6e3SYEIgdsK{ZG;gj+ z59T4|v(8|q3#wx8^O&7*psJUnrc%WEn6wM~?6BjCP4)zLy&%2v^e)!@`Knb)?dGye z%>l(qAkO)^oR&U#AfsIN#;P#)_{FngKLMS{O?z!=^BzA~el^d5#Y8j81k@g}*)$l| zvC9^0>|-&z(*g93N~IYxnvNF_x}-cy?76%qo`fp|Jk3|Jm5SMG4T!M=+%Hk`I_+8Q zhy>A2S9bM_vMFM0gXxj1s!M3(hAbX)$nAW-6?FhPK*qmt4|~ns^3IweG$dn=2GD|9 zjZD)vNUhndrIpJXuG!1>(09D`;e_ncTUQwAFWFqaWUJJ&SGvu~ce&IUGmg)a_MSHM z1n?sr(T=VM5E%ds1)VP`>}6=KIa#Dlor_t&G8ai41J5?YdwU0RizSu3c%@_L95pXd=Ril7RjKJIw#BQaR;H!m^kAvT z9uHT0C<;@iV<$3Ew)EoB)q=uWK2v_X!y8wqF0o_STy(Y!=}J838(Npk8C=C+EKEp; zP40-SAwo~1U7OdOBJo87pIeI1RMts^0L7c-qv(vYUDDl?sOVSxrbQw$25q>QCMk;p zvvIA`j>DcZU81i-;VO*{Z;{B`CFm86op>-0mw6nLV1xM}RzmVA52^+vMWH_Cs}3f7 zC9l|A5g?Os6ey<9Zr)OGE4J#YEUM_c2!POlJ?~eq77;|b9+#L>(M5xJ%{$s84H)L_ zRt2vys^x@0Z+tQlt-*1-(c&7^*F zIbd_%DiHze7mgqm$3$E(r?4F+W-39ut8QJhqc5KxzGsvazy@-T1UES0n78xBa#Q2> z!A$RCPr*|@L#&{;%`=?!)+3>yID|Ko^2sGN5QNM0mcDmZye~Jrs~nGe;yJ9+h}7&| zo|xTazZ9XU_MlJ#DY&X=PomT#pmk+ap(c^8V*qIA#;$^vMNf+EAd6S9>RFikUM@o6 z@OIFa#bc9B3wqJAyT>u~8f=Xx`x%hWz0`~+=N&Ik`(-kgR_Wo%6s>XbNXMB=AO*Pe zdpR3Vjmo}k3;MDR_L3}H?J?S$&dzd!VA?UHGrf~2hs(${71LD}>N0|-bJ&`za+NaW z@M=j59su)R5^>FF;c$nvEe1pbl@W{-7N>RG4#_-t5(e_GZt8bv`_Oi!4DzFu10(TB?$6%ge%kTu9s)6jKiMMuEBEw_6rYdDcu|3d~?j~=WwAo+S zLw{098f{s{SdtC_wW*nJtUymqUgzfIt7I+F+ueO4)v(XTH?3T{1%ZdvF{rW&$sC|d z(-u)4#JeJ^*2Jc53?~ZR|8HC48WD&zE}cMfY(Q-+XK~@uqdrv z&Mp;`+X~ndt{tvUNWD~%MM89TZx6kKR&1fVv9T3YfO~$;R^W!H#=* zb5PeAf;k{DA&e4=DeU>IAl5tEsy>^VwX}x_PBX=IKR~LhISA}KfWA9xEgh0iBP=oDU2C0?hW%9c~&j^NCS@}(dpG;};s}i0x zFsHl00qnu8iw%f}w`llm?k3gkp~98((Zdx1+EHqpdwx`>^r4#pd~R-a6U>7*uTDd< z-w7+dH;r$6G_`zXkSA>DO!hs@s!>c$&&?-T+g&q-?^SPtlJM35aNlgMzt^2I8DgMi z&jJHuyny-M8+Pdg)ZoVY&o>}EuztvXW~@ZjBOE?BKH_&lN<8zt5unG_uk{8`0v&}!-XfBtJ5Dr_ zIXJ*%KN|}W5m6uIWgRjuT5vph(6O%Xsp;N3Z)LdWxktc}P^6sXOR?4rH)MAg_THKz zVJSX!qJa<^n5v9>CW>+uY;18)m}LDzp{^T)k|78WM3qQ%_eIwwV}S9>Y$Nj`Y+C^V z(52y4lY6;<%sNh_B455+o1!+XmYlS?nUazy?cBwA-KUjy0?CmVv`I!pt}t?P8Svs> zu&INIKzW#PKhPw3M$x|H``!({;m9t2HA%DgTwWv1J9J2U)zAHfj^zgWTg#n8lO?Cd zea1xT$L?xv6DkJr-mRbIGdKxE6acZeZ(%bv=WVoJB)$@(ZZFYe@UFD?u(O2a1YW0g zCqGQ<$H<)xs);<>FEDg!LL%il8lN@U+UWMNaACyC^CX=~)!UXEL0fv@c>q}D+58^w zbCRlTY%sM@4wpiNIK0p)$ee{P<=Xewz0*0})Od0I0pdRND>k%BUX`S}n7e0~Lc6#Q zF@q_L{nD94NDLzZX&4`kDW>!*$6)-&#x zikB5~Q9gUGBU_zPr+PviUtYbz5t^PG;}MQU;iiE zR0At?XYRhUMb~F5tew}d+jOfPi{2z1db)egE{34j*~wrrxUV`%of!jpS^%q@WGLr8 z?&-CH&~(-w(H(tC^Jpz1`+!j?<}r&C`ex~jUHI73Wp;Gtj?~t&W-zq8tF!%h=%tD37(P&oU%|%6sfN{dgH?8oSZ(PH$K>)r zVY68j7nK{JCd+xe_ZY)NrqE{`4iRb9V2PDyqiM?OM#!07W~GTVp#1MD zp?wKwf}q>!#){em`}!&q63QI{#L(pU$yi`djVX9$R-B2$GJd0Cp zyR0QPpJNOJuWLdgvv!Z13LA*oz2-LuXWFZDuQ;qEh9fyM6KI&+>t>!bfNUET*erxb6-{=)e5z=NxKdn*>Hi0W*dO% z#k%$bcdmU0R>996zjJ2f+1k!*K18tsh4W&AG?cP_v>lkdB86cO`ShtAlEm5cZsn$f z#2wQyxFTOPG_i7rqcp6&&M`?mW-<~9NRBzbsJ=98M(rat21cP!d&~mthXZc4P?1tO zJ<*cj0xVu5&l(_K&yBm$I&4Kca}8CgjeD2s@PG-Em0yC(1szY*c+XYxa>DXf{Oa6W zP~+;CPcQPZrN8t@)HFvzgxjF3)AD<7)4hq{DZcwMj^xX@5mTup_lo0sh)n|osPR0# zTc11}HHUSE{{|nEN@58`ds%>d?gW(50?Z<{|F zybhE)uL5Gm7I@^twaSR`ETq>&W(LIv_EZ|)8?`b8OUV!{@Rx?eqpv(y*-0jSybmk} z`*coh83MuQ$uat{-(r{_szS~S!ii@)n$-rOymAANlXKrCKTiZ*xaR6E?q#t^W!p=> zSZ&vU!`Oc74?Hg@qm#9B8=vqIxlA&wA?Fo8RCwElt@`9Wta-ww&Vo9=B4B$QsIUrM zOU(Hk*WI3R%t&D2DHFb%#cQo#d)QbOj0-Q!W*_LF(V^^dhpxRA$ifB}WQU6kG;Jj! zdGEI3Z6volg3RmPDE>{(61ER8d`P`w862F;$&TrE@hzpsH35iqI_fQiYNvZTUa95l zfa)MZwk#+O-m4Kr*P z0aE#e&d%8@^HGMl#O#=IH8mVtM>9)_%vqUz7>g;T7`eSrBO#b<60}e+y58Pi&dE+& zvPesT2_(LQefMAk_Pv=ENGH)ZJ?f^;XeAZ=ybjb0G?!zbpMnKdP!$bY=?N3S>^XL~ zVNsHGZv@r_hq8>J1~GYErFo`zoZ@kUh}qMaH&|izJgzHUcafmY(vO?L*AkVP2v(B4 z31>2;=vwhg;xyW&_)9saFXfOXIKL5B=vH@jczC z9vwZVIKUNEpfZclzKYa?C%&;*T`W?@;g75G#SreXm3L!}C$YZqxjJs9N^59Z54Ni? zXOHngc5i};(?@mDd%+&qCOr12-BfGrZ8r~WhbXlTCOnTE?#EI=hVCng#bYjq(vUi_ zk9sH(TKm##r=e1hv#k^rlGKhWMAa=E&mm~R=~2j)i-p^;yB>SBU5HiyY?X>Z$WmPP zY!=ou&b!67c3w3ahy`birRv1vt&w!9*sk%lVh^x!!=;4>vMZ-LJF+jI9J&+2u5h69 zI?ScN(8POsus0GZ*ZcG9&>tIER3{KUzEh!`D%)*jBtM)d?Qg8DL?781ZR*>RV8U9v zRJ6iOVu|8*O}%*7vZxQWayD;hJqh0+x+6uOM5^4lI`CGp;XFPVAILYVZ(#AYk8{(T z>6OO;p4b=L2j}3r@7*L%d$f~5q{&;-yY!)JHw-Dt7u>IyO4@@Fb+aNL2)Kg03nEPl zVrxV91AI9WkIb-NSTd>aNJy|U*IuMX(wnY6L&?oNg4#p195Sp+;;UU6mwA>4`v@OL z9?;xF7$E3!SP!iuFxTN+!`V)!VIeS9^aq($y~I7qYHu3zZZ8<43X_mVsWK;Mz#^Hc z1_*@UhGp%_E*=7PzG5Gd>hd_Y4hy_=YvbOg&KK(A)nmTuw8Ie{;lj0yta=YBS_>ke#$%L z(TTZK30zKlIJv>Ym97qaq%}gA! zp%ylqd@J}Io8?j<`TA`jQ}aRJ-I2Ye>i2Mlg3OlMBr@dqP_O!7s$rNfTVKi_Y?qO=P*ti)E7MQ8xg*Sxq6*!qs5dxFq#v zZn4AROlLH_nL|jNg#P_VZU)m(Y$T+Vr z5@bJS25tc?^TEz%MM;n+cW^859G0|%U121&GBESen_64)viI8i&1BtHU*_2>-yW~U z5{;TMFQM31q-6yOG(&Zez1P~+x7q`*9c7u!fZpaBQggt2?FjqK(jVoEF|YPp-?;?o zrFLM_rrQE8ZD$u!Hz<;{Dw@E$)U$^`$!FQaGuFZUYL+*|B94oNah!th1-l!u6M4;? z6#EgyEs12UJS?=t>V91HD%UdjxzEC|MAy?tF+!AAOr6`kcP&(}1}vmdh1^bhQWT%E z3^ZN73g1V$2TSGS3sshS>9plQO-~U?t&x?AU}!Z4hR66$x0&t1CB06Ip;I>}8&2GW z@Sf<-AjW-Cfg2HflSpTL>F_K?Jfs9%;HFxJ^cT;2pVIl4bQ@4|we(3{vEAtTD7UW1p>uL?8)_-^ zYjfQ9co5?5NnmL=5rwt>XWhHTd)M@c_?fkzFar2XZk5#ZOfcLc7`BHYBwS_GZvr5~ zT&j-EyR7P=1GQ1dF{wvTL&po%XGE!2sGYD1N#=q3_yuh8oVbQll$<&DJ8hbRTk!3B zRxz?3FUvt0CJqNlsD$uAV|YJTQnR_7Y<&V_m`W|~bYe@9x73eWTN6>=Y6Tm@K>CVO zjLT0BmAAJry~uqRxL$F=4fhGfD-DJ%`=hGr!orDj;C>j?_RU7 zko)l zsgtAOZNDZKPtmwpROVth2|E`BVtcg?F^~mq>n2aOXkKzkFGaTxY+v~+F{7J#9vJ~F zY0_E(_lyU^-TjJ*#kNeO$#1O2s*Dy!Gbiu%pbl!v0n;Jw*}Nzh14C!9R|;xM-gn1Y zDF@py9b|WXP5d6PwwE5l=%;mRpVmF{S)+OCY6{TO@1>9%QjdciL-QdF+ZAE7c0wk@ zBg?{o)#IZV3y*o;+eh{qUoE{MG=+?m!DTv=FnY#5m6P#+`gJleS?r(yO3`tjmxpY& ztIB`cVNHv0`Qcj1YkfZlyaBdjEkL}MC>!kcG!ipDOpASw%NV1eZh;&RI;&(7?`f+u z6J%H#-TKTS3^d>Khd``G_})6bTPXDe8ur$d0It{L1)UCnMC|-%T4(YBrjWmZhnG|o z@8LaA@m}HVf}_=xVC3s(TY2mCbdm6YLRRoG@pe>aT;8SQ?NH`>nn^=#g7a#PWYZ06 zv`M;evhi*%TZ~?FxTfH)Zs}X1SWK*7q_&$B3N+9ms}up;L$JBh+RHav^c3I_6T^tdM!nk@oM#9! zJQA1hu=ARfj#loNnmdJ)IOOflNV#vC!0L|%?ShO&B&2P;Dw66hjsnXLh{shk^g^GT z*~*{=1r#u0YZ1mPOMM#GedB%U1X&5H=XeMVE^u6X?y^Z%MFav!W zEOykRJoTlW$(MG<;ztzw3g@`qgMLp%R?%$1(G@PSt=-XL>PbIap{;Pf?4oBmmN0nv zN?~}p?0R0rc073y1o^Udv{}))lbXvhj zJ>DnSIXuF+9GlRV?kXZcrJWOZJl!X515(t$MI8)O|NcCu(aoyLpMM9A1fG(3m83x!X!P zqT}m(M;O7<`bMW|0G4-&rSwHv+0rPB4g0g6;-_RJYbq@3KB|l4Lh(A+NFubb_}Rnq zXnpI?uGnzLvyDY9c${js79gA7tQkJyAkf@Xvqr->0Xyl!j`v<3HN3OnWnCeU?3%#V zII!0WBw|O{i3T(XT{zDNeY!xnrDVb45k9Xh=>RZdp*uFQ-gYEN?Lk?Lhk9n)c7`zt zJ9XGGQ90>wP(=s_AZ}J6{AJ}VNl=mJUil;$| zK&REd#1K9e;isc8qEl3P%prmShaqD35)b6wijGEwiZ9^i*~-``Y4JY0=si%U*RXOIqM7=Zbg#B?P_#9D5>iHN9JKi?`>s&2KwS15@ja*+f5F-_3nD_#y1h)+rTo2Q9LKkzVSH?a zd283JZ$d`lJ-omk$iiyFnRwL)yw%mzv~B+CfhJYA4$Kxo*P3XU$ed?<4Z&i?a9Ru{fEPL{T<@%Z-H}mX2@dRaa+MMYE6Qte)SToNQ ze``Diii%6IhY!8q#7W!EV7nJ+ARO^{x=MxgL$ww{B%?<=^*C)&ofWoUa)iSq2BWdq z;4wqQ%$SlLkmsQhY{oL@BB603vUnf{yB$zXQJtj=@Zu z^L<>|*6S|ziXon%g0Z2Pk^y67$C=s7e5hy_t@mIRXx^Z^`PmF>j1Q0{2;^~RDf6ag z=*;UP08^zG*1EVw%_TN9aU_n1CFFfGNd72dUq7TMes4Kca$z^@Lbcujs~AGLn9>7H z(2xev=AIfXHK}dko6;b!00rX0O2HEG}>-ho)6KXfRE!^g>z( z4&LCRpS-S+ql8CpaeXofl1n(6nDZs9FzTU|U z3<&p=mxP420WXdFg$_|rZ;*`&9y4~`WgMEKjY>q!bp#?K^r^}C;Q>@oOAJ1YEv_9@`{`rRVG(QJwd$#(RXpA$)1~Vamof zsShxsA5@EBwAm^h>tW^)KOd)>^qhJMjQl`Hkc|82W-BpA7f7$4O&dW^bD7KbERgd> zzOhIb<5EA#ey4>?$EOaR?*!eC>4D87sudMpUkIUMv@l?a?`y2Xmw4b}cmURS(rr>t zsY4(6TTQskYS50l^BCfr02(Ig7Xe)2*X8FY1$ewNQ{t?_LwW$fZWex;v+puz!%%Np zVVIulrs7C=$ck1bPd|7|B<9{G;ALKj|CtAUkqHO#T!iNWYUi3nA3z+hdV%c~(cqEO zq$Dwc5hp2(x*WZw5xoaKY4$Rzn`~Jdqg4wnBRDrEJjCya&BhZl!2)$xAJQPd5gYMV zwy8+cnsdL13_){V7AjT2TyZR{v~-Vp*|m|T!L7^^jrGQ^{o2gvC0*Wg#D;^Em;fQM z3f(9(U#}t^B|HdF7pzy5ZBn$XaV367F)taVaA@BG77QVTo0&98Og}^x6N&5hDtBd0 zg+;p2)6sIG!=VNy2CyYZ_Mjh7-}rM-1P+9C?BTOM8RS6@->14N50*H)(@GIKHSQ8h zi8JQiz0`5N$}L{aXv7J5*?N-=Qc&-$4iF_`BRsekdrgtk# zgZ)}q2<`1RPSIPHJK}y#_QGp7 za;(?=n1KQeQ{5NetH!P(+2UYvJT5~+`!vr#U$x$T?h=IrYYQD=k?Ll78kkN|LC4$^ zDajZX`Vz%k0@)XrPWu@~+UU;KMrigpZhKYqtLW>r=L3$Lv84HQIHo`redK8n^g4Nk z>?0C9lBq{3R;-k;@}GKCek#N%_$JEfO?WmA;nr*HIbrc5zXLBjF!osKCPr+1gbI=B z?sLHRPAX)Cy3CHtT{pduAy^WDln9>1V6)Oa8h(D9#=VLAjIuZNiCJ?Vy1z&MsL{qf z4y+C`nGhdbW2Ot0AlF@7GF{_>!MSOny00pn$IOXzZZ!c& zFcB0UP-^damu`o?dN0O0vE$6uv$te}-Xciiv_|U&6yr(S(lg=`8-bBWV z2vhT!<+5j)RwM&EqjhH8y_qdVwB4gf+hMq*2qBaAGIER{*#o?fS6XHM3<=&P}m9 zv=MiTHKN8fJUST9&FZ!BIqV&>PSGtEzR|fud7)uomz{7+v|88hBYXz>rUJJh2U;bt z%!;<72sQziP){I;PH;zdObjgy=#k+&rZGBlk5>uYnr`|EXC$W%&R1E2B0PvvUKP#r zcnWlG{#s1i^BF35fDwYv12nPs7IE>617FG!|5$#H#m*h-m2VH2z#teAGCsyWk!MDE zTUEr+0aWDUBHDVq35>Zf59G9nhI3!9dRv!l3#w12l35@4L2PHwMX;R>PU=Y6fqsP2Q4x$R6S4pjaaIMgyw^O*0;`L z^qA|^i}LmJe4BU!YKSxUrC!*VdgY@gk3wY`uT+-&I-l^lj8C(8V3(S-NrB(1hsHKr zW73H`HI?sLn^yoNdc&zIz2CwE29{+;cCpZn*=MB)a&*X#XYAXK z_oQXnEY!i=8WL!`C!F46(-1i6+SC~>sCKz-k!$Q5 z2s1b%n7nRc0*WMXw^XH}=xzpuNDe`lMr6 zh;A(K;S=~tRz;R zUrCSe20euj_a4aS3wPAB&GgcRh1Q_=b{Pv(HhT(!4vFU4oFU-7+`;2j^3&(8u^TJB zSCWr>LySp+UfI(gImvc8lPR9;TUThDL4P9sq_=zGR;CFBToL>PWV3AiM5$DSQr`S0 zAG}vIhfL_E%&q;0{JT9-;Y>tDo zP*Kr3gBjIcx-3!k*`BwUngZo7DR@{pWYQ$%3@p?LnaS>C+$E9^<2mMKOf}@dLhYy` zn$XjcYeZwuqYr#z)gy* zfC3W!L@Y6QatT_BT?QtC%uy;c-IxhNtm0|(tHWv!ICm$T)L0-t6YQINL_9@MEM@Fb zAl_B3EeR{@GOev7I5fGLP8$%kLuT>t23?2C4C5ghG-yi+Rr=!kGTy3tLuEQ9)e+z; zT&Rd@Hn==wM(uU)HRUF#>dQ>u0pu-@OP%DnmY9g;28A^uB&xR@TocUosFi~XP1iu# zt86J|8B#+YD32sDuxIWS@|0J~6&I#y$&(3L=?m8)0jeYFd{>Wndd9iZpU!a`u!6gF zmL=8zT^7^fbVB#|i(*9#TRg9BR>o{oHjrQ{i+yZbUWTjs)U{qZ8^0JJ3Kuym9cnqo zv`XreBz3Ka%54BT2ue29D1sgZPhQum?;M@nbvoQdq}06j?&a!iwoDzEH%spak% zp7RiddNQ~*_eiXdulA)G-x^ed^!5XB=%#UQm&Xt9l;vgcsDMV0m6e}TujLJ`8h^@% z^d%oY)uo};!Nvkq6SY!~r+g=9rZ^%NzWxAc`V6Npsbn7=nM~wp;2hrUz7j0j0wDiY zSH*(J#AS~*6GdB*6kQSI2##4Dq=1;TY&J~|_*mj(biAI^42wngzxG)bdW+URRB#d8 zbyP6ic}^!)ntjT_)luc7*DpE;NZTeJjG>;8E4o@(FOh}C=z=Yp@OfqMso~2j2+KOL zS=xQWRF%oBM&5#lcMLA*s@95YlMKdDsgrDp63^zQ0a+E#n2`AqXNIbl7h)Y~i0Dfz zaxXx14SFdqNPw~GM(ALd8Rng2Y*y~lLT>Pe#%^OhNY=*ZS>zj<4ZzSWn_cm+tZYV% z8`NDxQoPQ>VOLjrOKXee+n{xB8k;P-iqSbc^IojLkb+m+X4{oA&Z73cV|kuJ?sKX( zQr0okdRPVzZ|At>BEz82&lwO*O3v8`js(b1+5k2kuBb?=qP*UFRmp&mjaO@!i(YpZ z3Up?OSl)>XbM~+qG3^j7-i$Yiwp56fPXUiLygif_?Yn03wMGi@5#%@Kz+$lx);KQB zjZ@`)y&6RLP<7fb7FX4~)GdBh=sP_}eSTn2#EJPdYaI}-m0d}uu~db}<@CsVRx%Z$ zJb^`tj?VCWIn8-6ng9J?*ip1;nw#2$^TKtY#n+%!oZ82*U+LzgoLp zVP17(Rama*dP&DB2gr?AdBG))XI4~gRl*~gN=xd8N1pRQuh!}&)Nds?Ti!fKY$>jJ z?5!B8J{M<(3~la=`R0k&D|P~6M{v`g2#?3Qr08Y=m*{VON$S1&vX9{NwTlk3n1+am zyH8yzsilBG!+Z2e_IoQJ>&kW?&7e zV!vzAa>y%R(iR3uS8`ZG5)a(RaUFBMELjs|G+n|BO$|r1&qZGGr9FEuyYYB@oozQm za#P08kjWG}$lXDjyHgJvki$q8!%cXUS||)a>8koX@&HEN05UXQ4N`I>b8hbz8ty7R z4zCH+tTUk}w$fpm?`@#%1LzFUt_MVvoL=5K%h~rdBLPg!QQ{rF+_l0J0or_sy;dX| zRBimux$**Oco7!zEpTrz$`zTc-c7TbFg!MLeodZNOC3_=4lGYh8j* zP@)e;T@WN1=glOp;+!87T4y3g#xhZKCKv(77Mh-n&YEA>vmK_x#XvNxb?!2DxouH$ zbstYg_l(I`jiB1tK(4d{5C} z!rz?J3DeW-SDp;s##67!Ln9}~*M?qds^Xe!nQ=T3(}_?0{7b(+R_Cs{k*>xE#)bPP zpnwk6mUcleV%}SlRRa!E)|I!Q@E+>BFh^FA#0O%5aL%E!$vSXO!o@B)oT z+_5>Tw=-2d6^P&R`5eK8n<~ZgoQ#$&T76(N`%xYtnBvs<;BDxCx5g=Nk>+;~1>sB7vTD)&abu=;_P#9Megx0E$X zBrj+?ts%w`n-N<@s+g&$V^CyBLg6^+xxR{v9GsH5irh1;UTPxQjoWyx8r&V-PX?Y< z>9jsA*)+Y>NSo_5mOW>9)KyunnW>z6+9gAck%l|~Dm%%cQv22k{2aB=K!Gn z%IMrxpX5f1RKRFo1xG+<310I?81=w8$m_)1LXNo1Ya6@nGb=ivz6xdXi0;KqUnklP z2~T^?@krp!ZFpE*%tbGgr&J_PE}CNH6;m!notb_4$M)qPJh}#AfxQSst(->})PO4u z!4(>D*4Lt4?67NYUAhGr>tI#~4-ib1I&Olsyjga3oYfxxxK&TdN2nCn2VP>}*hFBo} z80cxj$=HZr__#rE1Y1W54(91RibCvQaAmRaXVEBFj<*ws@5Z$O%@7lz*_+gsMzQyj7jgz5AsGe-4 z8lt%35L(<2Og>dg{Y<0ZqOpRgzc3nqscTThEpFahO(4X6310E?9?nMAz7vQX7z$Bc z?9~uvbKTp_?XCqBqrzu!OIq10(wDYa+T_y-@B$`msO)n2yT{v03IhlIU@O zC3J=}-GGB5?Tj%MxLE-MCWh^_XK;ud{f^9MD+vsYaW{hs?+)~WZ&V*$Erq_=9`R~U zA8Xo@v?5#fvDT}j1_zPcO?i-hbY%Y-Xh9E`!T9wm8%IVusMGnz1TG{-9yV#pGFnP@ zU5m2JE1OcSnoWL3O19bw0&gHW25c#>OxYVri#Z_OV1b#Ri-2I<1sY3r3hj2}IIQZL zjxfq&brb{kj>@ep2XD+Mnip0_RGFzVK@?6%`w(!|1{0Dfy82?lUtn8}m~2HnG*KWC zZHnN+p|orvp}`XbZ_1Ua6TcSVD?e6l?c|+?XdQ`Q5D%llnUW_#Ubjnc&horAx5#8? zAf_Y~vUQQv@F~`CRtt8d<5_DnM`qk3vo1;UU9zLlhxtxAiWTBXZ9!&^2~7mc#@@ZR zgKTw9nl1{06%BZx8dVtnZtOY~nP?@YWLKeIzHVVA;Xnf(^QghdNtJL~;G`NPLr91k zc-GK-Hnz~o;)pOlt&^$kLKvQ{+d=sPB62qICSeYWb=@EG<6@*SR22@K>R4>O0C*k4 zFQQ4NCBZ`$pFXg;Hy~Ry786ie`VilOlsR6Jgy?Z4&%DDx!iTCRCJHceRRJylo-a0A zVZ2TQjq@sf;c)ew7L`;v{ER%HLkSb~p(7K*b(cChdfMQVmyAv{rP@+fhng7htlk() z_~Uk8w;F!~z5wb;NYA0&0<}{$Rvou_`i<6QMXLat;X#PY1^8UigqP{L3SaRdP*dwb zc@*lZ=|+Jpyn86PtO!(7pyq%GP-yUFpwg#-ISpkNyoOB{N6*(nBsFQA0m5+wfashf z+eFx3U3P$*DS&HJPpj)~%BvV!r7=w^ihR@jKmhl23}#>mFP19casz`g3Z=YNO$&s$ z_sZ#w9uhDEWelJaOL)o@0zdgrL3#}K^z>xv*|5;&y7#$S@q=)p_m(8_LKhxxL6eV} z>V)F!N_#WQRB0*6M8hvfQg$yO4VG>>)m-q^J{6|Z7baCW^HLK{o8;I}z_nXcu~}P2Zdakqt28tV7N&x`c0PLR1ZeY?g{_HQ2J>b4Tj_o3sp*_@W9*D$ z=f1KIgFU2gM@rCdu}Z=LWU$wJvUgS}Qu)BP-Wy2El2^OAi*#FC@J3jcEFXrxRXsT- zmr6KCeY1SnzOdp?resZ5NjRM9MqkE081z~WaF&eb7r2&XEciGwj5EX*CmmBJiXTyB z6C(z0CLphnG*y^}z1(mc1tFVoh7FR41qZ*e&7kx|P`TiQb7{mn!8Z$-ub@&r03TIn zR`F0bu`4@K7_6D8dQpjm9(xj%K70Ba64OQa?JJ`f9x0$)D50{Q5OYL?V{PkhD!WPf z1n0PFY>(1gjOWE8qJ1>$E7IO}bXy|JLyYjt21n-cEwvO?$2=5jpuoDgj`t9sr??0{ z0-An={>%Xu5LWpx9xWR~>GT4jvErIPRm!8wN{dKYUT9_4ahPU%H!$!>eMTd#Qim4q zat%@!TAYMmNFaCiT%!yD0QN*eyo_#r!^i-(P#&T=8!yn2%6P+*0=ocWJz}o=)m)tV zv74PWbMAW&9UN2fJ%VPL+`UwKCitbG!Iy%IcbDO0-RtF6{S)Ygtayv{96ti&drsRs zxcreW)cIz@g0L1dVV1nWe#=6bX*oz0~BYT0kyn2%(Q`ED`X7ppb=ir>(_{lzVU@Yh*ZHzdeZ3DZ8nDIe+( zzhc2`DrjwsXrszfMOB7~^(0jEymNr|9ibALea&zbYk`fz+WV->9vCpZ!NO+8pkJ4B zcnCHYDZN7}&J$S@y0d-u7ks`39r0UQ-oaVTxrBPnBuubmL`;b?oDYo?Bpmwcz+Y{X zEUV3p=gMpp7(v|)-UZ027YDc!Wg`!c3Z5BFD1aB9x6-6Nr>NIM;MGx{To&|JpVrmA zr8w*yXPOwm9l=#a%3i^!pnG@;eR?mJ8PG--WA{!CJH+X!ixDIGCfl25UQH78I_MfB zB5v=$gIePxu-`_J@g@7gX4MOpfD4cIjq+ci!Hbrv4_G}VlJlIK}vV}0HJo`jb60qE{cXPQd z$Z|+V$2yqSaJyc{K|c>Pfd3@q_CONYi8DG;Q!i~CUYYme>5BFhLW?SL(A$g&#Nrhc zlxf&a_VzS4&sfMKxUkB0=Eli;I8e7)Cs^KO4TutYKHDMlD7&<^Bdf6a(RsZWwNJ3! zU>{M!xprpr5Vmz}tznyZ+^IU|oSH(Jpe?L>x<{u$w+VvM7~n`C=&L=pl4&ljXO#!D zvX8`5V4~JWHQ#!ZnX!`(xP(qIUr2VpeJhbK4Ud%^ZM>yniH}VM1U=WL0phZlJ4D-7 ze8-?#%;foV2OuJtj8ruc(lj*61kM|M0KIj&!Z8Htder4)-R>wMaMVY-EKg;84tQ3? zZ*VY+y{f5D->brEj>%Vn_15^Fx1MnN(-wMFh_Env2X6r+JH`=)nHeDIJx?&UpTp*o z?Uq{|Z(c=1h)}bxS&&x2N`tf(4@CJKl(+v@x&+)2LpImxilN2i{ zb_|RQ#WNQ3t^-0T8r%-a;>&efef`9dNEzEuYigt?)v}H;#y0OBdrdxbu9Tj6{`iI9 zbhccJF2ATTMcOPIl$PVsC!X9V`jB7HFv>X|4xVQ3*h|RB1TtYhCQt@rTD?a|RL0o{ zcvRh!Q6ty-Vr{08AI^2%V|n!@;j=FZ*%KjIsZ(?3(ifwD2_D~Q&Zkqy<@Tg5i=uSu z8a0gv;d0G-Fl}u}lt%T1z0Dktlnxh(=dYj`O**w6z6?H+Ehq)+tp{Dz<@2a0A4Tv5 zMCJq?h%A&Qb!0~uT0$qC<4OGcItEn3!ZAP0t6&I!@70^oRjU_(Atws@ic8_ZH$8-H zRVq7DH$-c&s}!@Z3dV4iW3YHVEU_AetnpI7O9yQpnCNTP<*~=_Ws&1Udg)4&VtflW zv^PTvXi9h(<)fU)wgdBZS_Jk)Tp zHJC2u$pDA6*3+=@uFUpliM_0nh=VJUBilrd<;O5Qm6?o>=e7yIb5$GZN&$<8U}*{OHF<8-*rP3&x} zW=~^|W_U+DO}b2s5JHI8Zm-~=CI;2z>P}N>n@f#k+EEC6#iew>qMoO|x7`i+Zl`LH zqmfIYI5Y>XTNQ|YyeU~R@Qirf+||r3RJu5(I~^X!5}?LRG?H8 zSZ+R*3^wDHA=uew8gf}uH8~>*)l<L!s0_?Z&BWwYw)yIwekd;Y$p_(ImWyb2b2#5KD?q#@SkP4z=r!bE7KF9}X`I@t)$f z&+93;j5f463w$AML)MLno}w=AHlG2$(Kk_Jb%U$DDIH`W+3g0TGy|vXZY@55{y?SX z7K0;cZM5}4IyD2NsdDY=BD`J35Y~en^a@g?ohZ4xkw@L)H217*GFOFZ@HNR(5vh9C z^Ma9kP>i5)s95|>V3Vf2oE5Rf*~`~9)U=(9#t3!@JE`x*rZwUDyyyMvr2*dRiCR6G z0q?gx>vtMaWGY}Jl|{xVqD=BKL=8#K9M-%mu$<&AtR`3wo2?kA9^*rru5>I7xJs7x zHH`M-z2O)ti|)4OL6;j)&!`{L}wxoc{n6jMUOfIlZCz-3@^&&TXSmv1$qp%rD}`GK|*_DhlQ0&D?b+Boz_ zi5;Yu_Kf(2yxx;|!YRA`z9mrYGaV=yL*&A+OdJT4ahrAXlG|KcE_V$|*QJ_v>d{sT z^L9OkT9nhgeW1#9_&-Lc1ULf@;C(7R_*kEXp~RHgU9 z?Ks|W?x>I{DG3IN5T_I(2M%YUHITlSvy2QF2B|Ncx%o~nrePb*n>&bDDrOlJOPv{V z{0&suJ&+=V#nwc7<^rU7@dBT|E|>(xx03X*J#u%qRw)cZDav9al^cyQ06{>$zsn;p zdElniX@hw6h=K#So~AStBEG8aqeYZj1)cTIHt@#TY`bJ`Q=dnoa;?qJB|IEBonzE= z5?t8!0UTHRTtp3vX_qvvyW153t1L35QCx>fAv}}=uy~Gy8;{>S$oHln-@WGyOLN6J zm09g<`oikx zb2@nfy?OLdH{LeAn^as`>v}9ec{$+robat!!4f%tc<2CM9tv|S10$o6Gyvhac``Vu zJY65CVNgmSLI(~%=@cQZa6&1o&1}MUhnLjM=N()lR1nmS1=c&qL^8UA+Z4}5M2={| zQ#Dgvg-S^ZsE0>K&jMcFK5U&QMnsZ)F|o?~7F=w)N_zhjab*IMgDUI!3>&vUd97~1 z%IXJ-yB-kFsCgnwiVo{-DU>{RA#Ocwk~V^)t1YT`|M}Syqed6+Bl?`L}TOk?BTp8_(X%v)Uj3lfoZE|Zf-V)xcjpm7+pO&K1g@~ zrT_!H=iR87WhFN1Z|*!8vVEu7MQcdI*k7tSX#%h<-rhb~O68QsC|%J!%fS|75ZYbj zC=$u{6eqJsI$vuiJRK_IxU$@3)tQmoWBvhr6DFh`zi7gam6)flpBRl2uAsWjYZt#|1XhhPEp|^BIF~zXNpVK6WH;#5qZSMc z3&tLg6MW2-^_Z^DRk37;Jo$zWXHbS}^Z|%p))LU~DYd}h{=kLDqZ@9YEYqrtaC%FQyk&1LeYdXLUu zoNT#ci!;;<88y_)R+ZfnF-kX!Tch6P=dKii-=S0o*{xdO)jnd3sGYYTz zl#VVsg@~vLA6^mCWnOQZT1iL8q(T_PzKY0#YV3EiZRi781 zcxF9q+&pMXPr+rzANQL(+zOyQXNWSaE1Og(OXs^*5!(6q=IN4SbD}6iv=NlqLtju| zeE0-RV(R3%t(S9Sr1rqAy)s|EC514f3G)*|B9aob0a@|q{h;It#OjsW9i<+VWZ1rl z?rSsTBAP6|pYrQ~Jibcid8M@q5!;PD{ zH!B^y+RJrZ0Yhp+dqz)SH!qdDgw=X9e4|f*IeHw;ZnM#HpYL-tFIDQ5vFq5ohRf8hguXJ%h39M2K1HI@Nbll7@r(7BWUo0P zb{!fEj`u4Gi56z2vW?JpD2zDxwLz*i<0 z({ng9{@T;bYwT+0RnDEnSPx~36{~0GHQdA_x^o5j}F0; zeIqKsBRp`u&zrLxmud_!f%z30XVKCkcFVy&u7(xN+1xJer2-+^jW;vsZc%-6QPP*u zbEh*mt6ZRCTDn6DfmqJ9Ua{(?YOd+S+>e|rajWr2B!xLc1d3$(+5#u?`7!Z}8n2p@ z^Pg#xuM_*8T+4cM!()kACk}^+Z6}K47OqXbEvWJ%H3X!$gO~UOFQAWfZXYx1hyb`Z zEAhl=Es`+quV1{UryE_U4-b5XB;S@0cIK1jC-=IiXSQTVAH^dgLb#M*aQBYPJ+Ni# z=Pg_p80O+#@D?Wo`(bolS)Qi?%1*{y6oF0$Xp!&PQ7kn@pXg(J3s}XHJ-#e$@5H=M z@KMw*E0Z%1%IlR?LwD6954PcyEn8qGkT8_gSt!tAd?b*z5ZHU3Wc?PI92RIW(u@_D zjd`ztuSZ`l+4DAzBBKK|lh=m5Iy-=hq1bN=FLAkQ(7_2Id5E$y+*#Ay=Gc&OP1%Of zVV0&8fmdp=Xr82tlPJ_-#mQx$8!G7O<2aLf?OooQoOEu2h8HEFBAR$+z7^@V(4>Pk z&t`q>f&xmnIrFbM{|h1;8Bh$X0`5aOu}1?-jq|Ag0Blfkij)v1s?&%t0?T!N@DaH z)hB~K2;0foNIL?dvI*^b zY%~krhj@MRnk9MEjvk6~jnO^P=Lc5U9O|_;{+Mwb$>BkhNC1@0EDCXv8Y&2?T{Vb4 z)y2|4VhMxA6uDIC?7|nCjvgKllUP7s4fWFy~3W76804->NBpQ%?!AD0n zrGt+R{0hpN+xfJi09m3{eFs)uv18Uyd&^vsJKBs9Lw0I2>@{5MvPBjxwTj1!$jJ{g9~_ zKtuH84okZpST2-KMz8T^2$$#h6K=?p&c3wmNUAH*J#%1JHP7r;^ru=csbxpt=-HCi zqR2Z$#sQPmRynRIhmo~KW=C*RET@pJmEl9D;}fIeaL%fCq$LyCp1gG2og5Fvx4Kbf z_D!XxI6>Qcmor@oRF?MgU>XdlXSFh%`_%%mw%5*Kpq-*Hk3){X5=U)oj+YT&MFUUk z8t#!WP9hvg<{GMK8nMYS+fyNN1Iq%V7ZL7j1@o|J>H^*pVux|E6Eo9T)`=(#BZ9-1 zi%efGF6(pHiWiY1{hqRp%#=CzG4HXxhsyN&q$SmNqHB6a+|De{0X;?~GT7wODHT)6 zRJI>s6=zoH^p>PsT3!Y_*;EiKz76tYUcO3K7*R9WbFT1b*`1e^5&n`?3i-7y3M8Qg zE`o(1yZC=HmWc-N87(DnNWBS9CF`}uN1DJVKkX{$<;XQTW}jHq^aE(u-&JDCGv3`voz>>dM2vdr?0 zWVh2Br8_!IDS^WceUHK*&WH2%h%;njd9N;c&8l%nq=jtg?Z{0+NmXSVC`>;bFqtK` z83!0x5_I2$LXXI@fo!yJ1C}amPr`mV@4#Xo*kcfd1F_MI;s&#}t_yEP#nloWD>&ZX z1AZ@eF~&IrJ4eJN2Y$_dlO)2i%kEDloZ$7%kL2Voug(JXeo*30uZifn*#oldUeok5 z;hJPfPP?trtykd~#(nV4ZjK;#{8_YbvkGYoo9%9rWs%nj@4IO)xo4Er*9v6 z6fU`ECs3YDBG`3@75TWJZ?ZsjII~EV`jN;{9 zXqyRTiyQnUqrjJpCfh*Rp{@&fw>FGb3M0}l$|86k*>g0Wk|!ZALD`-d#-KW&E>0KylK`H7jb2girLoY=Q9gJ^zIQ!-`as?7#%z0A_$62H3{kM;=J6)pi4t7 zOBXnS?$z!uq8>xmvB^teQ#Hz-B^DOo zoldrr%<6*{S1f`sqUZaZ_g&Y;%jg3%U*9zWYA>TVtJN}hnQe1CdIDuK<@LgJvgg6B|p`oG6qQMcrMiM;4 z_o8P@ZpPK?@$sDKfu3-+`GE zwh#{a25*K=N!qYCj=Xk&>*8~?Wox&w;?=P##%9uf3DYUVp>FSl?clw(hgcOhT^VV! zZH#^umE{z<0(B2s6~)Tm#N|XfNAHOdNz}ANIS>Wea+}GRX(%xgr|pJAS=6RK;m1Ui zIAg?`PtZ8b8zLkdnh@3G**G7STfD?lgcmx&Hg!#XYi~yYdmnf|WjrjIeY&n-Q{Z$H z#YJL|gPnx#PL82AUg!As(79LXNxbw!6)_FM4zZ{QSBWP&&pJ|o$TN8CIa}B3TRPIz zQ)@y47bO&zd_yus#Zg5`puF-OTRxJh2UFEH$9#7YACX zFhIzXMWQ}e3({$qOh|wIuFwoy`$>T#iu>@08=$OX!BEs=QAn@{Y3D>d7g{Q$0mWwu z){Pz#SEj*-sY(kbC5!qj=jH1opoQ`a*t`IgHzIpzz&IQb0+TvVnl;IyBL#fR9K2UKKnaL+2H+ILL&itBZprwXJfH*ULTU91bf!j;*WHbuu>CI`wI!w0F3 znKT24?oX*7kAR7x2Mr7aMC(r!g4&(xSLmAgIoUS#;6-A^psu9Jh3h?Lku#HOTH%5vcwC&jH$0xa@;FUfpkC$_>(Kx6}^cTSSFE7lEP%SzV1E{Om|6mEvP z$HVcQZG|YQdx$70!hn-kHaDY&n#UwzySV!`RP(sKenE^?)!wutgtXIMqz%&gHj;O} z5jb}x$MS#_&>9opBlnZUcT148git@evW$nXWX<0WS5h?^AHEK-H}D#rKnC){Qn2T= zHrmQVf#&rgIj@r-6r%F$4n`4%XJgRwrK5gdV0hw!idsz{M5_5CNyxi^^J94@TtLiz zAql+0w06v9cBc+BDeYL(3j^!cAbBG@P1jz6f=%(X?BE$kVT5!%p2*1S<%wZaHv(V@ zSLC`xvmv8cMwxgbX!Z)=3Ccc>dAD_N%gxmIGSNNVod5Ij7v;>Bch8ByPa%&l-rGDs z8jEWn4%o$4htF`k9uI>15L+sXvO^ig%fXu-MSO#n+=7t12|*JclpETw;=N!n)qdLd zoTU-)EQXdjG4Z8%EC{qt@j0gw+K#^G!-2*y_ylwpGVJD+vxxXDR2%lT0!mt56r8Km zbEbV_;T8q{Na6vr6AcT-2=j~FvSt*pyg`h}%I)K9UsAD($9CSb9MNv!Wl!d`&uSg} z@hVMcyci}&0;rhtiKwzR%~y6tu(-0O>>21C(X-aYj0y)HR`pZ}%gHpengknYg4bA{ zH7j!1YPn6DC1^j%rEPzkIQVo>6DjA=FJ4eQnB|IP@PKg-V+cnbeYwQ$p_y-=%~*;7 z#sTPIa6f(}O2cFl>BmvH1CZrmn6?FS7O`o>`syWn3vp$P5GnNh~tkMZdF=wAIKn3a=dRM0o5NM6=OeZ~rrtzK#;vwklu+DnH zNI|JQRS{S+Vpzo}Gsdv6wX8vhJ7ttzbQvysj>qgb!^^?ysweFwK!_@vxAr8SMhxU~ za4MQT&UnyQQ4iFeY~H~&#qkcy4&#ZwfHYpseu19S+$AAjKHB#!hH<^*V!;SU8HXs@ zp;jldM(uK2xobEIFN+v9?V*bZtQHQc=&K<;H3GQ|e3#;D7J$|z-gwuzxT5r=xa9GR z1xR5R?sEf7RYgJEMpEF_%oZNEXNg4=Z~G-Uz|`%6P|yayBkW(GpCHO(5d2Tbdsf2M zqziR4PZQ<Sj9{+fV#12 z>n7+m_HcloF6TL@ZMjpfVBk3|7%|7Ff+|^-P@VOt91I&N=JP#z($z0sdDx>)BI$|> zSvC_dHe&XXaCE|gV)q5_jYrC*>;0Z>6z3-MT(hH=21T+1R5`2g2rV2lHklNL>DT=fn&4UU-53@>W$-w88WB@FH1}`YYy>_Q80Psxb zQ-5&`6Pk>X0_x$2+JW#lE)Vkfk>@+DWU_kC zSuJFl?CFYe8%U#1$;#+|66-aK?0v&-(mc4z%?m#pVqm^voMSSt(ski%OB*v3Hj(t z$fv%^xB1xP@f4KTzSW4TJy8sPJhnDH4?{QI^*RE_!=6zDJ&mS>0H8p+0$=FTdUg_M zlw02CukcYH=U@OS&l%FaS9s9Mv|M@}(KL+bX9|cGn&2;4cV%~VnWfocd{>9!EYwlQ z%_bw}_+AM7yG6D?=)NZx884jP;=r9RPIhq8!XYG7zLZ%Ibmc&udgIonfG{N}gainr z!(w`eWN%aF+Bcx5WPJYZw$(}vX%?YtXc1o)kv)rq(?wc#SPu}7mfmGlE9t|LOS)BEf+?_ZnJTz&=43WG z6LC~~{RAWw-;~eOGlM7g_I6-Ap5^rGcVx;BQJDxCIgL`VU=4Xp8nP{<5b@DlcN@ajLn+FmJrHVHYS92I~eAvzL`E?)j)PlG_!TU&zRwLZzhwHEmHn z5mT&(r4dgEC>`9NL(f*{p=WugujU3hL+nlIXd1`7cCI~ttC=Mu!1pqSXlJmiMmZTzvlGh$mdCFxUA3mj9s_a0M8c#OZRoNjyW zwy1b74X*v2N@0tZ<`u=>Kn#$&UwfMDbpv6u#;hUfPxs7JY4`xS_rx z=c9XUutjRUx})G!!49Eww9PP^O%c1PWFBJOB#;Yz4drS|todH;NU5vAA>ySPLxklh zf@xZYzIeeRP1bPL;liX?ChZYfiBqdcl7wj>`@m8`o=^D(A93^`j&jkVd4|}=i`)wz zOYY4Dria`lSdPyQk15|F)AsiAwLXS8JD3iCR>AaI8n}h>$u^-aOf8VjpuOFYTQ2~n zi%MYL;kDZH>6tEa@743clBotvty8>tqy$FJ{eXNNkP{yqOiLEROA+eO0z%1$!!sjCGZgl$cZ?^3)8n7>K1-8IHP-n zDm=tKpe=16OE!el-QND3MK<02kRCrG^b|!b2VP#=1n!w4eX_>3CD2JiSBw@;0b9|4 zZ7Q?H3rJ+j5-akZOOqGmJSHN`58Ffo z!Sg1Xb!v!pp{(pqS~k`9sY(q1iGeT|p-HFWy7|43_X7^*1+8Qq)ar?XD|ZWwtGE`G zyS`1v(*5PGFX=en9e`OB3& zPSm28vf`1F37BiN#Fv27x9n90SS$>orIQ5RRN}1EMWU??mUJ0seZ?eYvo&doOoSM( zNVagB)MQENNnj}ld2F5r2a>3dvo4>X*n)bG6_@LPGtXLJ;)3_s6WUi4`eu4yL0tXD zB>c5mS>@$Ra5|<@R9D(yOm=!fS#r1dieo&``woD*02{h+@rJ3&3urD(o{|!;$(;3* zauIqM&GV+8_Qv$8Jjt;mR&wYXJRtMKSPNANK-&<{hk|!vK+KR3*7hu-C}yh4&kW!_ z=o(lKFF+2a0#ULA)l!_GgIrhEd>a^Sb%ePO*nA-Xv`~(3V%b=aH4MgMk!7P$! zlN6~1+Ix^L_(0$hcK40ci_IP{)8uEbS1aE>L68pDY_;<4$gM%Ut&yJ8oI-|zJ7S7% zA4A+inQSsox>#fGh*{={cbkH(l&^J>@r{;F?yzp<8%={19lU&-S4(%i?39ew;%{$Z?NF>e)o85*oaL6F*=Qy7Hded+;m*()vY`a+8uT974tvQY zh(pO-nw{>$5jh0Z05k}8!3Am-0Ca%Xp3H%Z9!f@$Km$?g*#cfa zXxtw4GcNZKL8k+j8h8Z*gj@AQ-kHIgRW+m&_H(5{=W?(ESIxzcae&9Rb{n8`i=?J8JV$zs(AX%5 zL}W_PTNfiQ-_-~J<%mHTy5#3WHbzm>E9A#dE0=FmSKA82gM2#( zB95E)L&^a8lCt))^a+Q}J1={nOQWS;)Ywi?MY4QSxx9YW!!~t~Wfx_2EGr)6&aes) ztC+2my27rsB5lSC&AMSeW|+8n_hOBz$~bM)br!WYchk;?K$1MvKx4_62GI@Y3f8n5 zUbZJ{u={8;y|+Y<+|Kk)S4(pn|Fx3Wf{WC(nS#unI$mv}!H8wAW*uD0%K(?_nMkV%9`p3C!OkcU{ytuOR=3-Z;0aOZ-piwJLN#58FX_u>hrn}Ad6aU=j@FP&%<5Oeo;xrnT`Rpi02LjTCb|7Qstf*D(n-f!KIG3CMBtm_@Y9f z0rTbFCh-zYvj~r=F5_KZ+PhZON3VT=ny+~FjWx2cpD}TH)a!V9traj1wZ1okRlNac zY@L92wVcI~2`4ND$2^x3^w3ifAL7QoVc)D!GgU@8)?q^e;#ZsEyF0BCqy_;qyPhR= zWERNSR~pc!RlR`TyUD#Qk>2IJtmwUdth@#90^k`y6Ij2qvcgG{!zAMaj`w&^FXD1- zqNo@RFyioyM2^KM)|myWD4y{4oP1e{^kt>5I*MEdz8G_p=SI{k3jM^%phE{Mh08BW zBWpW-g2jFEu^8_OV0g)(T|vQvM}RDELBZ}Y2Cz2agv#?`A0{5wp!stIi;-dWn>~O! z!MG{_2F5vic#{0$g_@iNIn73!3OtuL1Ct7xg(lcQvndDTEaCS58~pik>63K9cHbi` zOM*lIBW0G8+to13A~Z2KRLqLo0jEyMK-7fNgNW7{sg*qhr%`4?s&bY>+OvSVj0`bIpygEzex1cfp4;+;#+w9?c_l%yElU?OD?n>a{P zKy4O79`LhH2p^HvLBT;|JA=n+an$<=`Ap#L@-uT-AA1%;gqX2cPi-%zQ$U|&4uQ+% zLK(h!^yY|7#A2=5m2Ai@vz z$;72Q`n2Mh5D82TG=i)L2NyLvh6sa?lRpAAdB($ayF^%*5fjfI;KMgUDBVlfq|dmq zNcZfKTh&{Mr@@5@?*-Cw-m?f!S%Q)EoO!t?_fS_4+iT3%19@T`^ohKq$Jc`QAb|o> zw{CjH2m%6t$pMdgPoy1C*G=c?)96=(_}bRDtn_hsMNq-QxVuXqP2WR6bl=b$jEQsuF?CKF1w<-nh7t&lqYAJF_}1$e}r+ z@#-bgAmv{F=Otq6%a1>$M(79C&l?zCKiwjvf}I?6Q2;Vr04F{n z9+zsG9KLI7%Ce_P(tUF_5;o&H3zoCY1DO({`!idh6G7a{7`#?y%>y93^ z$=guAiidIOnbC}K-J&a(15at6pk2%X7$2tB2`{55bAwpR(LPd1pxe!@>5(~%0&Mj1 zxosQ@ys-zBHo{VZTL$LIrtuh@5MI{UTp8U6c}1b~;`MczN?N~Et-u9CHFc%9lrwyO zDW|>16#NV=@=&;tUqv|jBf6J=lJc$$oeU%HoES`V6!~sz+hYVJ&D7@ilTK zt8re5Z2bNgXDj&*O_BeBnTdZZRal)C_ygKi^ab*ruMP)ec zJrIyzSyVg1SafgrSgi{TgfZV~VOd{|K8VMb0={Rxhb5|;PcZNxZuf{GF@eGy-g*|Z z0B0<3qAB!H-lv!TeFBlOYi4ZB5?WkAi zpu7}&Qk}*zPo3aygB2nR{A%A;K2VViW^ArxmRC?B3%6_+^&-NUkQ610)*d7CrcU%R zI^Jc!dm&HbAzR!_K=;FB^Y?;C-SG8O!amxL7bG~o!J-ueO2UafX+eU1?y8lBrLUj% zdcL9o40jl6wKklF<<3s%F{X9Ln9jRkHVI!GecWhdF*IrlQQJHEq8*wKVg*gXaq|U2 zl@Ia(KfZLJlkhwY*gBu5rWu^?VP3bl;M){~wRe)69^Q|)d$sUw_1W%u+AEA*s8Q4W5Sussc#7I=PTo>TYQk4o2OV*-ZhG?_~obC*|InoQ;v@+f#WPanl$4 zgaHz}QTefiR3jmj&uD2cNMS%aiJE2PN&?OGgFByN3{>(A+<21{5A$-oeEM3znQD+i8Oow?%WO5g_dVs9QzsGhPRuFrEy@_rPjIE+tRVdcg!g~=bp ztmA{hs#lthSj`JJU`KGky{PuOr_}@PagVQQ$Rj}AwN!oMQ3$d``+!kP-~pCIKdS@` zJM#=u-90kmRmOQdsF?4VP#|i?(YZ748oh1CxN$4x9lX-w5s)SV=EwUZbjZeIZv3>8 z?g5yX!pq}6XDM~F}+=2rd6++Qktj!1*T;R+dJwQPIFQGz@) z9w$on@Yt2i0xW4fG%PS0e_Ys??0&<5igHFog8C|IPl)?H=X#m#!!V{jSd~8J(KP>> zDY`K8t~YftR;Su}fJeQlvZWk*9is9L^wTc4H)-3`(OaGPURE^I_idYb;#0n*;N9M!a%z;bqF86 z#+uk|GZKPbq3qGtLnq=2$|j3qf@%z4RcIX5db#Q#n_G7FTA%Vm!#x*+6}3qrjBhmg19RqoczJemQr>DWxURHb6;X$lTqV`51PsGDS@94kuDVmc_ama zW~liI*-cHpDi9)Z@S-DJS%;-VKiL;xDhp#PZY&@sWtZX|dJ!oIwxMas&qcl5*i@|_ zz`VE?wRjDYE<@Xb)i-KzkSK3^O#loYa=IzncAF4~Em|5AaQ7lQ#J?ZnM}~$e=uUuPk9})GElOe( z2h|xV@^V)$ki1nL85KP=L2PBTWZ$5L7L2@V)3^6tD8fVWLel|pITq_^iS*e;f6I+W zo;Lcfx!$A%2o*n4m;tH|hj)v4`bLw{OgdncAF5E1hv_;fk(lCzIUCAbV1T9!cH`JD zGey446xxWSbI4s-0Hha5!g7ys0f!|;3eyqrkp*Fc1~kJl5^Onx#xd!7NNRRyOy|R1 zB9s>qXhO2CG4$qn2)~ME8JS8?JvQ^M58uqa|J2;W6ex6zY=V~9#baEya%ID4-!PcO)M__-()tJ* zj#SqnqDS=6KwN710c+N-UMD`%3-?e_>yd>)AKrfLRm?3V{Z-&4}xWgN!j; z%2tMMRlVNfi^u7wUTZdw^Kx}oTT3jBKo|8q-)Zr7uW7jM)UCAJursU)b;OG7v1Y24 zvsl=)Vi+q4jHLxg3kua03S60pTTAaGboC=dABSBFf?DFI}h!)IVF6<$w zqcbEn;XMV>L5kP0C*@Bz0A!@j?dClwC&aldbQfxS|iz-nHY>Q`5s^ zex63qkrAbk$AGM{;lsE&z%pIHEOny8y`2jgx^i$Z1_i$qzRZJdY}v$l14g5ToEBx? zt5@uF_m(sXqu;If;5;vRX22L{n;?Q754O1U(%$oN*&5;Z+KZ>in7Rzj7^j^2%)2Fl zQ{4-yNoMYS44a9T!~;+x;EVXw>R@A<$uw(6%h`mYS2UD2xiT4CB^ z>qeLR`qc}4*l#*tjJ|k~3jw0??7d>>nKuv#@i^CXYyplh2(%K$!TNZnG`p@e8)uI* z{h6%isc-oh$oK$I)+TZ`Y?1l<)uAdViJ{U2VlI(gyl&ov&ooyR3PB-lWAux z5QlYm0iKg@J&D?pMKgUH!|e7J7-MauauN=$#q7AST!Ctu)TO%SA$i8Lw)Q$2RUY;% z@(L#h8eAi5d8NDH%5X=N;Y!(v+-fkhXTf;3hN8`@q^E0Sv2U(N8Wdlnxl5a4Va@ZA z6jb04j%p2gZQ9LRqqJ{n;1=4wsDnCy6Q-VyRyGfhr6v=1?!v0J;d)ggBIg^=9VlP~ zf}Yn3ELPO9du!MPjgl4edg>JqD&wQkW2zy0CG%cB1M4dV2T(t?RqTy>yl=61uD5ds^d00dzcrk62(6V$9XAWecqZh=I;v#H z8)EmSxkje?c|@!!iy-<-$BUdEO>R9>sdiE;K=o6x$J33(bhd%Mb#1NKYj1N4geFek zZcE(;!lFgq^Ws_YL4I>`y zAEvRx-ng(_Y83FxLT?uL5|9Q?arTV0HYk1_ES^c-ybuRyg$M6;d?cZQ>n8B$1VBD8 z_eb$EFc)dD*wq=7E#32SCLKl!}GCs8c&ha8%k7S4}B=sPe809}f{4kYFn7u(0|Kl!^)TT`Y(P)Ese5 z>hya+0=HG~`QSiFaff%lydg!>u&p_u^Tl25CzWK$x+qT1<&0bgMlbPowS_7;ko8C$ z!#LflWAO7?WK?iaRTZ%Et+J}}**euEFFj<1J4Sn|&##_7Qf@CZ@c*xJrTD3R9tLBnlpFK;GS*IiR z=W15sWPyT>uOEYqky0q3Iy^6NycBxSG-Hf;`vVAl@NiDJ!UYe(9}btpiaAV`N3XZ^ z+5WM|&!X_2M@sFBfPwZNlcYT-wd&9m=C`Ol+h zAzV1Z3e`mGhuXM}OVrC-CR$!{WPAo@u*pCkj=MG1-kfZYT+0I6chy29Uk?(#h_^V= zgho)xR#T(19;9~;UlHr7O)T9D zNv%-?05^~A$6AykXdkwkNv*-eb|PsbEB7p5Ksk|jB4b*1?`!wG^`E@v@)u?xdU7C1KiA@g^L0}QeXKN0U`by?7esXVqrdC>(4&zR7|_# zmU8IV=52szPuC!1OD*g4nn53vY_-XIP6~z>M{i$-7<1O(rH1DdTSkR3$0pd0%ZEA}eW^~J3%0Np+< zhns$!L(q?20dN^Prq^D*dtIcWC!}s!{-WbfiN~_tM>OORRD12|=btJ(%8N>1@CssK~pvSqL$cD`q5_?T4TH74V>uLMaZ37neRtNDc z33kE}6T4I4Epwl|+a#N}_AdQb7Ks)n$@poj%j5=AE$)(Tx9%DaBO)db?$!~3nQjZ< zl2^G_@EB^DY!2KWtmQq@PTh1-DSepFWiK>=fD~_uEtCcsENC1bP@$bqA8?OTI)uMh z;O~jCrntspD%wU1vR(-)0O1S9q3dRw=t&zC*iFfG-VE^N88g%yKjgdn;KpkzIeb-&o+4g z?>Ls0sN+SJ`kcev)YB4DKZ*R()8$J~&pdc^y`D5-)deHJtqi;05$#Hv?Og-hS| zlr)*qBhnar;j_rK0gJG=O#@F{CWX2F@>! zQ7FQx*Vbg5m5!0!Me*v9^n|u5Ib6MywNwN^$D6SQgfMBNYsH2u6?h>%T-jrx2~I@- zXp6OYe&ez4WlqAAmzQJNMHj|Z^6an_wHpkiSMwFRg4J#L(>ifRk0EnXMX}3z^_(Ed z=5FKl!vNfK)XOmSoR~3TBjxycGucDA^J4%g>-M zKb0@gZvYA(V2y&D_iQ?Fb|+yrdaFrRKvUwI)S!N2VeV(@piene71w*17j6%{({@DN zM+OCoSEPZ7Rp+pkQBCHZIO|hp5f)8*j*Hl%3!(LRPS2@5mvaF8yAY`8g43u?;Ncf0r5Mbcs)|I;_WX_vWgt-nNb1L8NTso>8b;hJ&0^dBzmr+ z+JYDl(hzc1OrCbb^26+9k>c9LNAq~&Qu=Pt!QjEZKJj`))7$XWwr~?P*2Qu5h-)q+ z0Tg=4#q$VDqBNLb^knDis4-2M!D<UneYJB9153U0%>AoX($!Q2gR z$lPZWqz2a#{u(;H>)`cFRM_H?tn+ESm(*(L55{ifDZQ`&=oZYBqaw9%7ho{WXa&1` z-Uu zHOr#HHrdM9_~=1vvQd(E_>L6^YC^EiOX62Wl8HWoQ%VnOQRUyIP;|jxT+m7WHVbu^GQjxB=w_^{m^@;It>rs`ryp<)L=>o;+`|d<{{XuQRqyX+GDn zncThRv^+}B?djl45{oNHN2z;wO-S?5Xvg8f%S=V-(K4qBtq@Pom>0X;ixoK{!)t3> zqS}YyK@T2{A9*NbE|lJsk?$xuSm?fVHq#DC;;lhrWqa{qjtj}IG< z-P=+0+w9m)yzq_Xc=o`Kwu*%z&!p?QCy8M_wh8oSH{$qf8f%Gq>FWjvjq1ih(V}|K z=9L!Kzz(X+l>y!J_ECGskbNvH*uiSG?r++4Lr@K9F{VIy!_)wKb;`{eGj`S+DIHzm zFvWTrRS-!H5|_ON5>OTq0z>bJK_yg9j5D3|ZZY}2;Vzt_7elMn?>0fSkBiI^AM^>0 zG$?v?tYY)I3Mi5BFt&o*6Eam=>)cy@WG~+%O%a1z%6BxOIMb(^W?34um}dEIP|>1p z_c4c%GkAl%SMZIfe<2FXS+Jn;I0~SrEdA9WbuN&UN|&xGthJD`LDTwM4FNpeCfNf< zan*xUYk3wrYWc2E@L6ykCGIE%9^G!}J>4CZ#KNX^B(keJJL8;Hmq6@?VK05_K`>YI z>7Z5zmNuElgrW^Mr#GtXiZ7zqpa?Yx_e+>c&DO$EHppD90MmbW(M5?2HCTh0XOj)D z$Mg{iaJGr@17=gMF#zS`P#>7XJ)GQP;X4y|^>-2>ni%rJ2{L@n#ih4>*}&8L4XTEm z;pMr?yMjs+&Pu)wp~b*4TuPTz@sxVDfFkv9!EgXqOUx*y)paKcEv)Pw+V;E&UzJ-4 z3o5;P`(y_dom)~Kd0NU%yI!h|grP2=V4E7stm1ptk5g%-yq*^?FtggzAYDqkdN&>Xe3*ca!wz(=2a+)O zE_pIkwnW6OL1o@}H4%mw639C`Z@p0=R*3<& zV4{07Zk(w?E*#^lJ4{4JEVc~#G)m0L7C7$GlM4EwyrxVMFd!qldQo5zJE%L+sq!<4 zH!_rTir7=pANW3h5Tm^d$BAJjou!1~JDTbp{+{l3&GPmmB$TO_;nF@Vj;L zu888@t#y2xWd!cP%rQOz+Aew>EaLAJ?|Mqsd3>4QWLHn?JGZy>y^ErK@1k_Ap(PNu z#%!y6J=Z?ijZf#{WXC*(G-MuPjM&U3cY+rpy=jFu^o%i3OAOW@U1YQ8uKt&OXywYJQ0IES7IpB%c7<-8p58UHajRH(ZpDtSoGK zHdPsfBYT%qv?&#=+jLoWaj;Cfkd~ z--!yO@9S(2rp{*scy4N#GZ5p5fQ6ow**N<83(VJe zLgxmuUO+Zttjje;uM@7w_bI+QGpdTn<>a>T9LS^t67YSNdcj%O!CYv8>9NaOo#d{Y z#yoJFGSvfIpk=;rGGq@$ZW=s%RtylGDd(3b$#*aw%snGcoho(zIIP>JAnPgdG_cZ7>8W4PLQ1D)aJ=ZjQ{9cJ#8=tY7VA8-BfT_rS5V>&ZMkFm z1auaCp9dDVs~|nmSdUv3p-vlZA}$a^d3CcZtbMYVd-Fi~@wC4ss5gxLhF`!NhVo(p zm8#fIhk?AT(rpFh2^qzAsJn(?nJ9hR zlL{}nGmR5x`;nERcgfA{jwQ<{q$FREQrXn&9XHdhjoAVR3MF(QtI2$AlaVt7{!8}U zRVFqHQdIZMgJX-mLJ4wp^`XK+)j^r1n=*&=0(#^Es_2x^x%8A+x}$^KxUnS5>=>jE zjIc*VO;77i42Me$YXs)VF?{(kDPV~kY|bd~kd3Vcv!5)8+QvrZ@i@I)bG^cGeE(5u zb`?@aCVF8vs+(lE;2E4*`K z#6o$ajBmIbVq{ph8moxdjEg0P&+I^F_NAGj?K_WxYIBX`Ef=72M-M2%cu{L-&^Hxl zO*g9#=0#@=2}C%-3iC{at~AlZEwTuHT@~gW30(Q|9$C&2*Sn;1WAO&@Scp`Of!n(4 zI_7z=cgRyX4q|C9^Nlgf56`Ei?#R*>!kl|>Zl(>o@wO>%C75lOdK(^z3#ADX_d8+p z7~FOD$LEom8;_wdDV}*PtVH@NrN+}7@Oc1xQTmeLbh=*?d+Mt7CMWFTMa`R$5j0m&2B~OMMyxNRxA&q@C_mFiZW1NZQVqVsAUEz_*5~sZr zH3(aJrvg_sItS}BrD-Xu+Y81cqvTEMRO^R`i}(5+ft1q-oIz8bLB!PDn9R9)L?tCq zlKF$>+fHkE8<8V?!9zGrPZ#qB7ukkR0)0%t1KiBP=FUFvm_^Q>le%X*br zEf=_hkXSpoS`^JeZh_ZGlWHzHMW#`1NN9QtvCjlA)tn9rYOJ^-TsxIM5+DX@<524% zR2EDewuh`n+$I7cJuX_{S^d(xMvlwu#QEvmL)B`{=eQR1Xd8=jWw|eLd^qZ$r0|nc zrY}knrWB&6r?b);jB=T4#Stwjw$PUY7OQenJ0a3cA%ho#DH7_%;cgjbw|(i`NM)fcE`0mQY0-ST5@VlDJr`VdDV5_aih15{Zm0%5 z6D&Pt{Dgv;l;>4Bl_>W)#W7bz`KncbwHq?ln4Ep@r2=1=2D8i>bi}Jk12+UrW0W&l z5%tr%5QSEVIRIO4l^4!!ikL{*TjG7xunu;#8Wa&Taww;K@)w1?UvB8gTMyfy@kM;$mn$96IW~;$N*4js?n}$N+sJA16Cb@;%ZT5{obkP z(^pTMYjlt9oau6bU>iFYqY?2X!aGokO|t`oml81r%@k%CI__i<71k0Gfy54|aG@B1 zx$l{;CK=gFPvOpIxzW0FjPqLQs31DFP#62;oof-)sCLS`Z4fV?^N0kWTH0v3M|E>t z<-mCerHNHNuVgV$7b2R7;h_p3wY}uHeD4v=1F&*S+>wLOG84&*N4+D8qFg+yukdob zgdDPs7Iz?)F#K@^)s=#Yv2cs^a0{)=M!y3+%+$P5=QGzNxCpD)V9o$!fj#6SD)*3C zn%Li53lGy8a{yRG`?W47pGin3RAty6LcL>qGmJ(&(+kLy?kB)1XKKm856D9=LyEP0^3b!c^E$y!?kjDDayXE73j%m z(diG+Y~{Tt+CG{0k~P zAbDuz8#)N`oRi-3AZ0RS0DhQqLtuakd^YH>cM$C5b|IFe zlqTa%%}G*WWi;#-G}XSxfX{D*H9#B7A#b9GUUeH|>q~^=IzDo(a+qj2><2px5f*}j zJ4DKE^-kVFK;Ix2Gbw}~#Sn>+-1_{c0h8aY`q2uoyJ#JY;`#tlbX>>tV8_10jG#A$8}A zt||~`TboGKeV)c%;4kh8urNoJTyg9Uw2zrf+MDqx0xU5=xP08JSGnu&#$%+k1rB7Q zw_Ooxng^b8v=cDi#$$1^KDH{sVi2-nN(3S$h$bmxOn7)(lu^#&2^bU6 zo~5bNCfh`mSdbsjvcrrdd{EX|xg=sdb}LdW1x1FtvcxyhRY@DY3iv!g&b zuPnmGN(E=1&fwE^f=3SgN*R^V6+52r4)FQpQILxjZ370en<}a8hQUHOPQ5ZswxCit zBVfLHkrXG%*7U0OCD*ILb>!ouoy6WMig+=BkDz;ybfAUjfqPIj`@m21?mmP0;cnpZ977Rs8g3;UD9<$P$`?s)e|2glLLa`3WF{jD{mHWDwT}u%}318 zl$ntv+fx87Y+3z)*f5={+{w$8Ez0}R_5wKFf~h_<_K2p0y%e)%ZV(nu=Ma3Lk0a7J zIVfszR~&S#msbY{!rpBFh=rw^@kwU~z5uYK-g>G+^K{JrBUN3GUY@<%B@uh}3ba?i z=v9G@01Lx}q|w8eTym2hk$vgA$pRHVIiLjNjn-I&S!&d9)3pT<-$ovF_M3 z&x@5F3%YpdSGrmBAgP{Y8F49LNrCGF=Inr8?I{Yw)T@Ih4nnV~CjsHX={{ecohJ|d$xML`g-Bh2h(Dvq@CECZ4wqbSy=S2$ zK7hT8I8QVBor?CkRX)-3995HJE=aFO`#ep|BfRylI+ndqeD;JD;_*Ix z5lM>sC|POf0Z?E!A7+j~P6sH|UbrJZ*z93~mkq6tm)Ofx0V?86Z?U`zxgG_JKH)ml z+&$ElM%Ed^5)dl_JV8|C+zg|raDPMC)56dE8XF5~Q#wUMbHwm(gF#XdZ-Tz1~GaS>F1nb;uB%XRMS$k=Nr+E*TrCk)qxsR-y&^LQb~ zNH24ov>IbFO52)XfU+j(y46c-_*UW75g-{Kg99$i?tjfr2 zoHQF@<$*xDC=b}cp8$3rcRok0dphT z=BlIoNLeY9Cz1xBlNP1lyEDV$Op#@%9v&$~NbfDvMTNNugxC%7yF=^&Jj?I~1Q=rC zQ(7iorj6TIl{O-XyUX480Es5GZrdwpA0fs_VHYME*)(*$xRR9)_LO{``NF6`5|N@L zA)`g#9sBw)zW|YyGA$=&dk_^WfIZM@(g>$RltgCF6E|h4RTKhTv!(@eH|H7l3$bHH zfvop*4DS^H9tsY?DM5f^AzoAV>I4d3bv*=pR^6Vvqyy+AhS``RcAO#4y;zaUeP&Ll zpmwi|8GA^T1ufaYPNcMtjnVs&0$vipyJwXOwyg*XUi8L-`ewEco)*U{V1 z2b)&vd7BC4WI{ld)WXorg92f}*-DCWA_ah1m(i-B4UcRFci7tN_0UmVW*{vY@?a}2 z4YZvXy{n!vZX#_QMi!qLPRYcJi*bocs%W6a*^g$GP}#p1rj9PoSkx?p+R#=HDR5(Qjr z`I*DBVWFFYTIoX8F~2%i?oe!c%Z1@W&(86_^d%a-m*qo7v{?Xd+fX=@z5#XYVJK? zU+J(x=A!2m_ypf_pt6=Omg319-+3z|KzbWK?XRNIhzzCfcn~PoeEHGr!*F2N*Qo^d zdM0y1JruhL%)9MBR;>*}b-FywxNQ2J?!5(zPQT^Jh%}os4GYpyeYX>M@*q&)jomC0 z5SPBCU1`gXSMZ*ZTsG<1BA;NOg_S_c+|x7cQ6-#kY8!}g7HWLSX4}SM%Sv8keHW;; zAwm^f-3)hhmNT$Ul>yR^A3%3R_MJF%9*Do*Rq0B8%4MlDb}+Z54-XJ^{qZY!2#i2n zRt59IO`4AxjrKWN^e*xB%B#SVn+v>fF-aW4eG!B2o;6lEyxBE-1x1fRrIx}eq6Vp6 zI$W_zFRu+|U$_YSi&T~Evr4uKkNger=A2B$$u5Vix$@pGy`|Z(St!yU|wXHQ}-eF}#k-F#t} za%wm3VI@)2gHghSniKZ=Ar;9gFb$Xz(*iZp6NsT5L9vQ?I?s5NrotyAh6`@T(n0B} z{)4rfHWOs`Twe6#x|(hIT_OUcz>Cnjh{%n%vr1-;(<}jocH|;71d~BX(hH92UXBu& zJ7?ON2Sab2QVJFaJB!$vnf$PPQW)A3{F$X2Ke%-9d{G+VQ`~n0`YLR3X&3g0Vmpz! zQC0lHIRXfTAH#}ND>cXEG|ueigkA*45kV&dn4t>Hy`70Xex5p#7)2Gl*3VC(S($|s zN)IuRsg#ihK%8*^pG7j-&?(lLbQeJqHub%1zp}Fpcwk6YlM9A8gB(Wdr98H|Wc><; zRB`f*#5cy&@v&2Z3^4}{g0BpV@T}(aqsRRS)6bb;`_}QjY_Qkqsel$~`xpSWdKy=D zjXAE}Uh}-%EEg2x@OhQ9NH`cCm!gkZ)nw!8dyt#}q|pifz!01aGig|60nR)7jp(qh zW*To=EDk2!n=B51QCz4tW$`3c6@UjEsnpyI5e|c|a|jMmjS-Tghl8I%->RRT=d2Ji zk~=vh>*AC@aCEM~zC3k$8&+`dNsB^o3BJ6G854Y~5he*(Lup8NeU<15hrv@6uOh7* zhaOD4%=Po+v?8<6EHhzj0$Su4;~7qp_uQWK-trO_(6+3f_KlhPOQaHk=hwrryCPM4 zk|B}C4RfR3M_1IN2S7^&gh6I~4UCRY+-4t?kZXEAI3D&i97ffN$UJFM4w%O#n#Jqm zaPSSQf5OT%p#>G0L+Oq-YXdfTZUC;JHQp#la<-9ZT$+@#s^_c&+w2Z|3J+koNLQ#E z@<5?`uvS-9ix~VZ$h^96WFvYG{TMkhRL-A3*)9#=!+Yj4g71_-oN741M~;X}c4KM) zRpb@3!kbYz&JvQ>IveR(W0XGIn}EyZuf2laX@S=kKYP-4zUDGL)q_cXX3M&*-WP-muY?Pq^^Ntqf+vA@jGYRSqAn0?hjP4>Hk*RYQ*%()krKxiLNdU`su5ElkH?F z^0w2K(yBxXz%Ur*)ZC=``2s^c99Jk3)d6nu*wTS~Z)85o+}! zYC-O*K7J`e_)`|tkEGc~j9!$eZvYObgAoG&37AjBcw>28+3~77*X?m*?x=ZZhvc0CDQ6pIwn7no2lz6(4HfbD2JkY*+ ztqtw)FieTCrC#*x8J*Riqw8Y~O?y!>{o(ao&2A8eAw*4si#-1i2K7xWxls8EjO=;chv+vQGLm8u2w zEAOOtH`pXZPak#Dd-sTywJQOcmo*6As1n&TaYe_h+M8;|b~HJBF7K1purFFc1YsG$ zDdnk43D|)mho@!jla)!0Dm&M;l2}i?chm_mSE*vu)I)W%UM5$B^Ps``5E;GHQfYw8 zv9oozDYh7E0G^kj+H;%avGHbx@Ya@BWiQ)g3I$E`OiM&X#=&M*PyCJY<9-(}s3~H@ z-0H15;lB1IQD@YOuu^+(f^4$&6(7;0KW+mAw1oK&*OxT6V8wZZ zfH9PJ;)Amy>NBl7G(W1eWpc2!eG|iGqnwOUP3`5G{rVi$k=*bR#XQT^ker7N3o2M{ z@H~y?;Zg*xs_xp=W`K3ys^9cOz%beM<^~;T>NlQ%^%jky`izLVLHsS-Vw1{~MT&ZK zcu{#{3->y1)@MX!8fU`HU4TMbi=zcR6Kjw|2TzjNd}44F%^^6AACrpSYDh8d2A)Sz zyyav*<=RM00269_lMf6VVNgkr1sw*YpI6pdVY^v+HV(!$z9u>3P64Jdg_(TfO9?SN z@sf~ql-3?JPDTKwv; zLyHm)jGsaDs0Aog>G4AZlAaB!+B@vW9cArP`LHOH*fYsW0A4)bD;68R1z>Cr%m>Ft zEv>gWNyGhkrCKQkU^T+cV7!rDi*>&n^_Zd#rJ;lORsgW5ti*P8)lx*Cmy}OqUNSdF zNbl>FC>cjb425SE>~#iuhWX^86|_y7@~Hvt+vW@@BGkv=RU?k7oOR~!^OU1m5L)6RSerR8>SWuzB!UOC2zMar}v`tHi6GG^cbf>Ulev7 zJ}=^;XRC%Dg3q83-wzjOe$s(4HbPwbw6#T}G;=IzzHmj5FI>GGH!5cq$PQ&f8!+NM zAn91-sh8SAc(HLUf%DMNq$+#*fx) zd*ehMhR<58#*^ak0rKN3h{UwzeyD!`@p_eF&XN|P!JIjBuBCAXTU_sz=CD^2rW?VF z@gbtu6~(>0y&eo9At$I$APBB=-e>Do^9QQ8XuP6S7S%deAT!8dJvLA-Dn^ zVkgbT^j?Cg%j;O~o<2mLZJn+?%}i{qz{Xh6-7-5{3&-d63g{M~Jf=Hb$u41EQ5(ry zYoZP3T% zYP&Adkm-yY1C2Ez5H##HEk3L#hI*nlw1~Lxc6APb@RdZB1U;y|)tc)?acktZ3|Yn` zXdHg81%XK#Jn=OR$V2EPd+VCijf*cQ*&dk6U~Dk?4kQ{2u5M(NL(S}Ao!*wtY^nh%D^r@mTUsO+huI}?&Qoc7 z8xOm!)Yv)Iq@ZeO9H{LwU$v1@XlUUZkmg*1dt9s;xiYLa0?48Z38VO=TJ(|HRE>*V zq}27ojQFv(o@b!0VG?^p3tSqf;=v#qEE*Zq$s`6)9$?#Bk9b9yw-_D+$S&3wu4vN0 zf*uTOcmsAZ3M;;O&nS$=kHbj0^mV}sP+$!?y~n0jwlH3s*puLr{Ma2WT|iu~ zkFOZsz4tElHe1|6oATsYJOWbh_d@teRIS-)qAR@)fpaT>r80ff>nd8#*Pc)>j8aQ% z`o@!E4ntaJ*w~lR6kz%vuu=x00FL3Woth34t70oN^3fux>RSq{-tbU0Y|GH#hg+B( zi5w^=f>Dx?2;#*fM5RoI1=(a<;Yu69Op1#t5jNXK82P|>{1JKj{CK$yN!X|2+J3}S z#Dcp|I4;j$VzEYV-UoRMiLQj4CYh33cYs__GienW?W5!glLC7OBWe2Rl12?*hdFj9#KuhsCeljhBfT}Wo> zeH?*0av;`@B1m&emUAaZ?1**PmZZXu)>rHRz|WY;_=SVu1L4Z@Yr^G+S`Rr(YH+4_ z6s}@)EVnM?Zc=f$MwYalib5-=-}xbSK7Z2*qO(*=`GUx@Dn!rXF(nR6ax#&R$oL66 z`FNF7iEBrBrGoe!Mfy*DxdsWb~g zvZvZl*#gbp8_JT)^ut)6-gpga0s7z$5MMJTp8_=9I=&oJoTKF?KV7#6&S&t}2NdFo zs(a3099BJX0)MICgl{rGfhG6?HmW2KOAeypVJ~-H2CZy8?)9|OaZ1<6ECOskyRwB{ z{Yo_(72e~rTX|CpY%QuzJNB0Nq@SqkWGTcPma`$KiHe#~QQ@{2FFJ@{6U%#TMVY{= zA=|_J0`n|Sx|$NvEGvDCy!GvacTlkY;B{VLk4Q}g>H>u=V8Eyi^Z3jnl$HU%=j`#g zaYDF>c9?blBUZbrgKvkxyY?V&T&tVD99o?@& zF+*et$d*;3JCzD(8&BDBq`5m3OfRJ;o0zRQSg6+FK^B$9RMzx+tMCYjmiHkXuwJJVg<3%+Wx`G{DzeegzM8hGwm}YJ2D=rFmmLO{sLU zWS}@Y&s>n5y$i#D1oJdftiAL)1ysp`dneVzjpl?}o!%BdWW?JGV!Xv7-hp?Irc4m< zk;dD+(t-DK2hB0w5i}QIH@8xTs8~Isk}NesEO89F0N=MSUp*j5KjQ}p^>{&SnGolg zm1YY^VPdgi;Wr?btkR;3IK3u;&4WbD(qr-kjxhPOssr4*afH9U#z$2Y z&XxgKd~$pn_f4#S5ld+AfDpag2!uBvFil?P)J~`&rV`%5n5--Mp|yi&QM^a0;kgv% z4*4D%ynNyJn)`Ykh7`B9&0PWYBJ18}R<76!Kc_AYvRQ576G^_fP?qAh6VOsNX*y4@ zY3-EgCs8}zE0y9N4~%kMNIGq1^;8>HC_@LAf;v^M#M0%eqsQOTK*v52pJfovCAt}+ z_z&66=zEg0W0+D+Pzlf1LHPkyV~R-cS!98FMZ>8Zxizx)0cEmvSuAvQE1jqs@zAT# zcpYrMsgv76&-dw%=c^uZm^t6$C>{g{9|DpGgO2D)sLq~n)+%q9A_sANUy$5r?azX^xgvxdX~KQ#y1OA5}Fo6 z>?*Ow>@Pt)j z8ZtrPt+un63Kt$W$>4&@Q>K^VPT1H8>aNh}@SbgsS%l4K0=ek}T3C6Y>r4AahV(#P z$+N>@%NfV&!M<*=on(}8SQO@>G~R6itzF74^v)*stT|GdsoF;-(B1}d4+f;)8>$Tq zxaaLF;=r0gU@q9IeSu23datwRg=bqBMW|xxi0&YAhxE7+#@j{o&XHH%;B|6|ZS)`w zXKj>y41AHS;jXs&oyZ7!mA_Ses|r1n1WhgPsLxmh5j}=scBU%e>7^Yg;ZVyRtc(c) zSfB1D5@Y%sATSd)+e&RUgfiaM(o|?_@3eVa-qSVHEV%=|^A>i6-x8}xj5PK^sG_Vo zF1IJ@=1uNl+-xz{#unL-D4b{bhOy)kbnWZZ*LEo<5Hj)%)!wqlcAK1q`3jiKJ(zR0 zlsC{1nb}H#yjX)_D7Id{i`2Zx(LND;qLtfG1t`6Hrzu#ejlJM{Tj5Yy(f8sA)fag0 zt?^Pc`#eWWmyvt8a4KH!n$f^Dmp#GYF3M+{?E-URt0A?lnZ2w0%ru6Dl7!9s{$n<- zo?U?To}~B7=$U0wSsPKu>3gC@7NOVGNW2g2;Kf2h#SgrmnASY^kfMp_%%S(HK__$x z1fx;s$!=Z|mmO9Ly^O-eHBeXvMYI0(G9Bb zKwOi;V12a@;~_pg19^^aY9s|un;YROK?vvo*aC!FVCFL;Ta~-0`7pLN9+39fk4zn9kWOH9bu6lDzbrS4O2RA>VP&rx( zV{!r)SI*P+V@OP4W`0tVX6Qcq_}=3?OMRA*Fvo2H-5N^B&pW4(xW*nQ$mJE;RE#N) zLHav#7IU6>r(;WeN7+j`kb0f{unT|*ICcS(nwMYkVxwfQNNU;&7cM4TwdWPIBX%Gj zxpKQ6!hn(TP_WT}E3vcC<1k!)Emg2Naxa#7-rE`&7k&_hgy*FYO~PR+F@*l+lsdw8 zGaR%fx#ixY0N{Bw?=EkQUEYhaGBnP4ZA>ZPL8S1|eUoU+bwS%*aWmT1NGHT_4=m43 zHu$1MMGk;O$YfgINd^YNZ37apzEWY3?ST}X@ic$G{+-zoQCrv6y5YM9D{==tap<}Y z7@=ez4966F7OwAUchGXCG4;u4N)!QZEYPT`m=YPINVn`Tdp(+vt;w`OyB+y6m3inQle z7!8*t;Bye^y?5}|Nu#H`4jK&l_QtP=L$P*3s+$2oC&vMZ=w*wNq_5F~FeEcO6*q$? zNAzf|JBibMG(l&yM5a#=7x|rtiK4$c;PFn3DQAr*y^ONs$A?`k@VdoKAq~@6>5`?T z8D9Ccup3KL&<6U$)M?fCwDJj*FM?~MRKFJqWI*mE-VRw()O%;5*Ycj?*xWF1kXY)z z^TIK^Tv)1y_^3%nBYM=?E)Sv9URODbgEZP8VyTUX;kLbAo9XZ<$c8J9r|LLd^8kb& zDKnaDg~k%)nL>QRd}{8aKA=8(FJsnA@<9R&aIoVTPFn-xOPDpv{DfBN z3tH4ST5m<+o!LZ^J$W|F&bY7YnU4%eHw*hD(gh%p@uptq)hy|YDLO0l3&hdj(R*

nJLYnt<$PQi86UnxvK+ceSbB#mC5$ zWG^s$@|*!)MrkW5Vy?t9>lc(1O2F=7OwM9Ez3;B^G$%LKjia8*hkv>G{9|`&2@y2 zcg(5qltuZ$yTx}f>@koo@s`xpY=gn57?+iOC)5IaA4B0BorE7CBsI-~Ok21hB#)Q{ zQ>?iBA~)KAx5Hrna0^NfV7j#D0AlXMnYu-F_|#O<~kRV*yO8#m#AH@>gzW}hpDnu0p;W-8UPc;CK7jVFx<`-@>xbl8uMpSQZSFToUq>6g*A-byKQeb01+k3$`JuBIvtV zcoNh;K|CtVc6^X2B1A33OH(i!PH67&l*ysM5m@k@Fq-<|){;Hq=Rk{F*Xc$;FDY^> z=X@v9fWvvArfOY^CsNOivg&lvS5>KDc%QykVS$FkB?^*xgz(OU!CtxNECrty-|$(e zc2DHIMV5}!U4yg9=fISM?K%&knlLy391#r&O`pJP6a zc{%Y&@*&XF>a+K(UK>+2M@8vkfW9R%RZ5MJ#;M}ce*Q2KPn91af=KJX*qW(t(QQRO zjyrrLx2RKdor@V^IZsHIAt}ej=DR3WBl&6;1Y5={+|PY*{{^ z>^___NXXfT5oaFl2MyBgVvK%J-YkVgRpIP*;stBL_=0`zSx4I_P+czGB)^$T1ka15 zf!DXCQ1xP*{6^DWNAr7lmkF<5%DXhjL{>M%%4Y>HPHs^qD-woNR)?|$PY(GBW86hK zPCQUVF*%)#+!$k|ORqel>+PhH+Q#&8e<#lo5BFs^y36d!6Ud^orIPbTY{z~pP>XQ^ zNACt7QC4VW5=se#l@wxPMyYbk_B@Z=i6gpR^;`_&G+I%Zr_u%DZN7W3Z>T9zi~%x$ zT5ZP%C91U9)U3;s=hD?ljZ!@5&1{R-K7}(5ckF8B;Sz5opFjjZO4UbF(~3rAO5MPt zr-2)GQYDc3aIuI^LDH&hd^}!0b{EjMIJAX23&ZMKjnXrh*ULFNmTow&&#Ck<@3|Pi zjHnkEsN^m2>=7JGaw1hiO0`Qo?OjHDBm0PgHYn666nIQ1DKM;pWgoV&kmfuq$UcoFD#&3)}AZ zTv7rlUzX;u5o8ucr9XJDhr67OEvMsnI^-00P6><9aPOB3Oh^h>ec{-!%DvV*qBZxN z8utibC@RZw#>7C~!el4w_108vKfQTD3I_Kksa(0Jz(d_H;k?ndO~MPMRi}5EwTm-w zEPQ1+ILp^J8=Uk!XKQycY)+6s8xZZuDhpX>it_m^c9p*P5&r~7+WzCV?~TLkxbFj) z9YrrX00{xjO$)WaASu}cBhVRgiG2wWVDg%iSm5=@)@Z3yGnJ`IE7%HJ#UKZ8a*hSD zIN7o-x4Kc~UepBdu_%(-^JUr^ty~DhU?rx}T9xcac=PlY(Ub*K@Wvg6IA)Q+R37H8 z^~HlOOE{pM*>uC(y9-=vT5;NpYEr!d=w7__UY?W}TjbCDSgmQ#`2-;0pw~4F?AVQx z^~D;*toUK?aJyw}GLRU}M$#)dQhebT1KCBi)(-*kU`DhM*Oys^lj&)Zl!-|=q?X-f z(MCI{_9R-ZwZWC4r5~@?(@>?PIV#X{vD#ep1f0&o4N}p9;%=HIMHYSmgfnlNLTcjW zd)oLUL5|Lz2D;#a1B18vTLK>}iMK>S1kElMA+jfUhH28y3zotG)yL?oyppy-C>O$m zyP>ZhuB*dBqH|k;21y^2L%c^ll@DN(Ekry^?Kpy6N6N<)yM1gOfDeJSZ68MmF7V;? zgc)T=EI%sIYj8s#(^ZC3zI$Y+5zXubSz^9cO|6OqLI(8kR;(c@o+f&A-}p=AqI|nc z5Sr*vkXrr5Wc1znUK%+|EI_*yJ)F5k-U0~rR|u-d1^Y_XdMzJ&#k4yV(Y`P<6i;iJ zctxG+=q7g}08ip6R2Fbv5R;?U5GJOHfjBE@vm7~%h8tI$1SZD-`RFIN&%U^Q#!+H; zcl4l0RI_m~ulG5HNN(x|_}h}?*b1xC=E^)+H*Z(6VF9@i(0#G*)n)5ldDLO(Dw8L+n6s~Hx)L}#1TH+=09#`1q=8h#LhC(9Y|NHGtN!OmcD#=5* zu)`7$h)7?(INhV_?j|lv!>D$=n7T8r$To-?orKGmmkn#LDlAoglP{?dUPuW_t-Irh z?nu0u(&tZDPJtyEI-k8BPsXyKXTsVMZF1adZ!3@6;;Oe*po^~TVJAE5_7Hi$ekLIZ z2yZxIXUwqLL*59FR*PU3?Q!H~+jFn($yPBMweT}AG(<^fu?Nq<1{JWKMdSfv-)0b| zYqmRK$WyrTl_Ao{N$)5C>uH<&8*oi4c}umUdC#?@3?|vD;|!m|p%pDyXJYqzokJDc zI_f;ISUqnM3dS#e@-4(uc!TJVITX#7HL$W=aZh^Q;(_+K0Om^ACoCo987yn(&=~cyoa&C&FtuGE;c0XJg;Zg+7d9g%QzHL zN~*79yBcrvxKu=?s~-iWDLHi%%xgA*0-hTs*J9O(F-^C2p8`#e}KziUbEo^3UXx1)FtgjyTkWW~ad z4Z}{`JWY$ioweP9$pH-AR9M-W9_4!+Z^o{fB#L_RWwGGPj+r#tebU08;QIH0tW1%w z4U3_#Qhj9JI$X%0s0Re};W!k|(VBz%v>`}nRKhR7>bjxNvmm#jkFCXpbxUa{vZSi_ zrDmH80&>@a9(IiD;d+g z1s+m0b5cE(hG`^|-SAk_8Q6R;-BqifY$BK)0d_<*)EW;uzB%S8BRLh=Ix%i3akW6U z^}t4ON`7lOV?os5w~91DY8>7Clr5R$*cF1Q6%hN9IqEk?^}*HiPHwM8l>bn$E+HRj8Mjg7~k8WN$( z549lQ&X%pVw#q=8s4zQS0Xap3kUGf)kp4h^}uQU0*++0mcjI~KR ze2gT$1DR?{nJmUsi`cj2*W8g%(xUl-x$L1ZWt!RL-P#K^nn&-RTNoHb1`fodW-Y16 zp*J~cNwNUy(S=*F^1xD_wbnfe2%Hms&=+xMIki|op`cYcj+&#eHGM<`9{jG-Aw@YO zIwb()fhBiyhW2H>gQgo=V8eI(;^xXTP=ub!tdWDhcsV6Kd2{vB;;5iErj;RM+xrQw z2R(H~YRPN@SCe3Ufe*PPDl}t`P;chR1=#s!zCupeOc9>vDNbyXRqpKgO>TUj$~0u2 z)alHM2$_yF$mGN$yyIo*IwP7q7Q98PFkw7vnZtVq!*Ue16>q{v4RVy+Zd;}oAITAv z%FUZo9H~QoIb&+=pkh~8r)Xoe>r|N$6bO6Ljb0Cq&n=vuU0;bTY^zvPbM%ex1_H}n zFfgc>kdhdkTRZU%tM<(;;Xqu5t_^ zZXl~Ys8cy{fUfh6r4hJDY3tf2{tTc_6rXeQb5*aTN9L*UBoB3MIt1aU~@hD59s+?ek z(dOB*$0LM_JK@Q$(zp%_Zoc%!yLu{M>R3Sy;T9h#zrfgjje+_C#TRVec+~P~ySFeb z`wiPT!IWkQn!|#Z4!XuB(`%eIekwfdT5&=^;!nb!cGkvK`8&0CQzk7ZYZ;~`FI=fZ_c$7W zN`HHRk6<3}UNyUucRmApZEDL;eP*!tF)U?f1FLL4+!v$VFsx)dehnm5-eXU*DByO- zdM5*uA|IYMlFQ@mkX?>n>l0l9_(E6u z66tvW1KF9~&Is}oRj!*w*X8hwV3ECo*DPjaA~Ddd z1vAt~g!fjOMIxm(B3y??*IwOG5VO9S&C;wcF^uEn)K}COMwmfI0VII`i$ltY6!=v$3Q(oPtEG@U4?& z`m&yZy$+!MNmrJ6)dk2uRHGL zyU;go4Mf!L9xek#=Q0(!+?qNdw}sOX7KgvaP~|E;MSUwYL$KLF`~TB;+gsj zam^|oHGLB8J-P~$(W3Z{F{_gOa9+aY!jPkNLM6`Ks>PfT>JX|un}zFB&8L@oZ$08b zG^Gp_XM$CMX|mO>)mp+mrNQaSwVtH3r@-sQ1Zx&4T4yH5PLrl1zVVEUuvX)ZJbyyS z&+s*@SSy+@l(4@iVhc}=XN%{V%ETtO=kjc;w?5@#x!Ez$lpr={J1AD&(AneKe986{ z-eH_9q|E^DFa$KY-ZO0ROe zr&0CIu79yRyrx}2efh$S&hjL9EG*%5A5&&d_uhf+!hO9$fNCB9uqRsO{R>f5(vp5`mJH@uJ!#^7zghd|0cs;o`H%%M!7jO-EGB}bh1 zAWj>B*NpbaVnegN-d-SQ>%4cEERsi zBd>8__f>~-JJO;*Wlb=RqRzEidE_D(Poyo=tYVe#S;Ym6BYE$=w^wl{NLz@!C#Rlo-K46cIdvXD?T<3gWe! zJ_2-YjFcxU&GW$0*j5M>lK7c5-Ek7OY|Okoqm#Ft_VDEKQi;Db&8q8s(mPiOs(IYSrLx#$px(0HsmTN7E*5lH z;Fo4Bk))DKnSPGl?@ASApG_p-g>lGo0*C-di_-SN&1re+OG-6>EXKSWT+|U0I!u1E zov>}N_mp3}95b&saa0X7OXB@77;)9Y7SY^iX71t9VT3SaS6@}!5ed7K8UZjrmZo=c z`{hTg6N<1y^KCW$v5PwtWP+`0nA$Qd^|Xk!Q6M2A)Hsz!3XJd}w@D zF4eQSZ}WCnydh(^-6ec^uN6;a7+{=yX1Xf|3Ty@llcG<%v`E1MwYEGFBfPxaZQ-I` zI&8cN%r}7a;3D6{_aD9f$TPH5Cmubl2Uu7~P2n$#L3B87A)a@Ly%rpi1~75!<4(l# zh9PRhNMA9f+**6Lar1_i^%-;fB9@!hNUQ3@c_wQYv=pTsY+&?Qi89o87rKa1*CVmC zMfVvOiWZ_-%ER|uHiB33rA1t}v1{nd43xTdwe{{cn&Ci=gG(Jc=_e^2i^qv|Am{VA zQj?LHgyXRq<*h$y0cHfZeHjll`&qAxQ@sKTdfsW+_oOs;$RTZCXn?gepOUtAvD2#N zLAG{m-n0@FWIs8S1p+=Kh+8wk!HI|cj2{rlHA?kZIuS2Jr(?Z|4LR0nWn(e1By95a z$CdX4ALxo3Wm$-taxxNM{@h9 zo6%tO%#YX~%PV!*5Iq4bC?HM3jd!~ZBhxKNLmE#(#G|;IV7cA7Vcd^Q1G8Mlu%wwu>Nw1fd7v<>TVYf|FwQ0~?Apvrz zQqz7-feZ9@fOofV2keE#C%q(J^jb?n!o$$VtB=IZaKmqFnM@yEBz8LTu(2y+E_sO? z9C34UdNwvG085&|(mY(W;p~>W=q1#)LoIB$e*50=&_jJ|HW|I!LF(s6b+JGt*}*S( z#+G6k-p;axCd?UkO~bX>)qCK`TPV><8EAo6yAnVxub%_0#R}*xH%M=(61|{Q1-^8O zd+_KHks0B=7WaG>-Y^!KYFHb`@87+*V3!66^)NJ8hJxQH@vtA}q4G5fHOT6ii?e9h zaRmek279Uz#0W|a=0cb77L`~tN@GGoq3Q@z$ZJM7p<8V&xv_zjCd`=DkSPLXcqO=Z zgMxyhva32dYWBLHXbg~Zrz47?ki+%d~*djgf)?|Mwi+1Wh(4yJEO z3r6gAp4v# zl&<8C>OIV)C*;2apGB-5g}mwT4n4I+N^uv) z2OFLHfKLIilqQ85vSY}#J&|)B0fLaoVKhUrcaIV5xhM-@C){qempYJEwXHSvbN2{; zIGmbnJc{EHSlXrFdY+@C`EbLvc=i)trZ0RI>rK_`rMoSmmx0~4O6WHN7Yp^481`;P+yRIN98r~p^7zVnlRNfVc*9BbymLME z#;{SC?+q*1(-}eseqx=|sZWsM00{s{+E^e9x~cEU5wV!Xv1|au9&Lp<@fa1Oz&oiF zJ51bdK@(h;dGFCHjeTO}X%A~J*bcPT37*E~fBa%B$2}2ANmy(EHbx;%BWzF|Sa=7E zxTm$~Z1vCrx?mR7)LfcXcjIVnO~bhy0ZS3!I8fhAdHV$QV>1v7&T2IYZ1oQjDSJU0yjM8SepV zh1@zQ9KVEWlvUsDXDY>P8Ou3@$ zS_*&xm$lG;7`oF}&MG<(HKz z=BCNWFznkcVXNZlTSKnC4Uqa4bs1h`vJSFpdBxk9?bS5Sd0ic^7>+t-Emq>`RCY5i z$f2e;(CQHH04iAS?HZ&=%o4Pw!Xbe}yqeiL8|q>Kw#f%xsXTYD#u8T$^et$WzT$go z7%#RgT7bo8?~&J~v(ss{PZ?h|_?m*{I8-n6!|M=W+s(p;_DE{a69B3~HGIyJ0eF`} znIv&<^8u{gNfF|`0;L_T*(eg~@F&~HQH-AakPoLB%!D*+pS9%3JK^oyX}1(z++Y<6 zkKUKF`Bu8PFA(IlTx`zk@Z2!|5flET1b~+B@RMJGFMgdhVuDZAaGkIQFy#@uRnwj9 z)ky9bj5^`EWbTu3cnB1ZaJuAne)vqWhu$Q-!YyH^dn!HHp=AD?b0CP|RDoQ8AB=?5 z7Cs~)g0sBdz->=BRbsp_C1GCeW$~y-@1gZcZsnVvB%%7Mm__9D5mlL1=#cG-C{|ur#yJi#o;n;-j!PkZRi` zn4nLVo^y&JgdZ6zGkc5zmyWAK`x)HWZ78=Iqp_AnfUumm`T0gRKfq!GXc~vpLK+l8 z7smh@Jk)&8`W7#rC<0f6UUd5UQYRR-CLi9ki`Ny*O3K^KNyJv{C)|VDOm#sx@Q#Ga z8QjdkK#Ss5w6zcW(l%qFIK7k+#SXQXGH;6`g=z z-g*?p%<0VA2Sr}nl6ZVu^QilFoekF6Z=Sz_Ov>X_pfOQl&MoXadFyM>!ddztDP1dX zBRxu(fvVc1-Lvdg9?~&7Ca4ABSvEPgKU&@*)PPo~Ks|PR=t7z{L_&>RwSjwpmm0{s!2;0H)fgQ?icHd^&16 zgScm0@nXQ-H2NKoHF#}G9vY5oiqSZ%COVp2*RaIRiZ}B)<1t zTF&4>`vvHQiCfX=E%K}N(T55rulr3m0OC!=vChfFq8?oEdq(XKm9z1g=#D*gEcT^h z@rZo$$TGGY`)C!zcQo&ik5nWxF~OqOlcCFOvBCv_hTxt>!-aI{MO>f^lK?%z(UvjB zim_G0VH(V-!NVB~s`5DTwrV-|GK*WOp&;tDWOOl5P-db~T14*fVg_~{YbjHD46Me} za}ZHY2y}rub6gAIvF+7cK+$U$PqGD*Y;5|SvVf)>XCOms_vSr3CojHt>WFVko~0NW z3-Ot6Hw(Rq3~375do7J)ubB82^(;JOj*V8OM6gmjWO$*}>OIOUy{_|KBa=6G^Stt% z`0Na==*x}UHBbj|Nzh~TfHzkGm$?-u6O9I>k#?@cdGEFNcpi(K;EW=!SX)H;MQXZk zcItZ$0Gh|h57Y_9L2;ggf(=kY6o@?F@PQmA&Z$fDBv`ZcJ>@7XeF7{ce5!js!w>G!t9YLTt9=pN z`D{3mG&CKbIT=0xb8&mB@`l$>p@|SK@&c@3U#~)m7@*tj1JReU{$gL;$Y7d#KS-9x zhVAkyVa76*f^GAB)e~#!w6Pz^5}??yS#Gf_*G|kv?QN(Tk(iiow4baV#)lZpol@w zptt6|SwvEPd`b+;nguJBfY^pAXG&S^j~{q_t3$=_R%9a4JDz}`tfr6RiV>KtE{ z3C1JMTER_Jin5tD34$5tuza^%WT4Uz%LrmxXPWs+t>ynfFq6N_aXYLB)^eW8Yl zL=oZ@%!xAP!?$#Bjb@H-DPu{J(brO@481I!of2y>4It1v!MCc3BscOkcDKj#r*Voj zFXOEY9w26IOD`DSg0v31C3P0 z;}mrfi%z%i$zGvk+ZBGb+bWAoF!UpFd)vLUB1%MCjfQGpU4VwBF8-zXN)MtfF z!?5S~e9wnYOaUH3WKQrs>56^xYVUPuzM#)pJ0S$=U1cL*t+`jm1;y+ejLTU5nxs&yZ9u?4)n4zOhQVAc z5Os;gNI!okUCFH?iP?b{djByT93#TVnfDxmg^IQfAY(A)Ewo^0$Kr(c5?yFlcxF5} zxfzOl+c7H-6PtxboM8y&TC*<0&+A5d?kO!m-J#;VPLa{7ec9+4+l{D?Rf<403%oG* z6@wV@4-6Z5c*N<{wAl6zag<)T5_qeGAiJYB_ZiVkOS|ljrNre?FQ%n1L0xr^RbQNT z(^tSGHMrRZcRuB8PwJ^Tv-nuV^8>MXiG#yyLoq-Q()3;*u#6M%w9EsY^UZo7*5Zhb zezYB6^;qi$n=+ZfE}Q2H|om%dWoJ3H^J$H$v^Ci&#}Ot^%yicQMLm?D0VS zN%88`+CG`ULxet08QtuDsTgM_9Y%$i;tNE5rXy3bvI-W8iagF{(a-JXT=GTkP0mw6 zN$YmnblKv>$Ls!vgBXmltebMkTD60{t)0;E^f|$;`)r2gCKbUlLsK+Fnkcf!qryT4 zE0E z3j=Iz-&NN`NnB2srQ1l%h=fmuAAK=QTHevXv}x7znD5#j`c+XXaLT6N!`Vc9h8|}j z403R1zHXF`@ya=-Yrq$B98a5^eI@brPd$nMd! zLbt#S?l+J=>=#fo0Zc;(rj|!a6M5nk^WDIy&oE)bdFH&iT2%Ry6w%|~Fu4<|Y!vIVMHWvF<7QA23yE!}vHF6h>0)8|+ zcW*FB$-!tCC*I(@cish&y@2TNA#6-jj|+8sLnI8|vIq$|U+>d35itYr+!xk(#;zA0 z*TKbjxYKMk)V#;_a8QmE#EX_8_2>mU-0=;RGk9n4$>2V^ccT3mIm;Mz@SrgKe6-{} zp$*xoVK!loXNDY@PR{g1_98lqf{(0yqu^q!qn&DZ7g9>rc6g8Wap>&{5gpIH0;WxJ zVUXty>?=BK{9By@4CC(&gF` zq9^E{@Z5r1Ee%N)1mMAYn6e51Y;)LABunTT;3>Rv=a|!P5JvTR0i;dEm0EAwC~81q zPe%2wTh}-ltG>*Gxm0E_fJ`%2sk8Hyh@r=tHSQ8{a}@kVTnR5C#&h1p1!MR zxF~PC&efhWPuVK8C!U8eLWb;|gUxuSB`gXyT)vLWwYFT-L+{{prC3`nRjyM#(pp-F z40o#GcPM(>wZ$Xyl3P}+36-Q-b{@_wGEn0xs()teLm?-7Sb8E&lTJK5^GnCi#+P#GuY585< zjiC#$9vowi!@VVaSF|49u{xQW1O}Y2DR{XW`^?3}(#cvB@E~7$8&RJoz2UbJz7r}3 zbCYc8)~nH;KF5=$llpWHR4~Xw*bk=R1g}`YWW}pxZ?}W1(m_k7OrKGvZ>F@HJo^RLL7#MT}|LH{i+@Z)fJ29-a9D@(w>Vm!Q;A1J``v`p@B8* zS{|GOD?rm1`NT`DJw_|t#cb-udSG9^dZ0`xV?Oo=)shJR^ELwq+>O zBApQz>d;Pte&=qB77BJ=wWH>jo3EEtk;V4<9^7a%l_i5ds(RG^+NeB&c8aHH zk#&x__B`_xEbm!r?e=Psvd6vJB4_LZ9kHtKxI{a)gTk?IikDy)1oDtGXijiF!QUHD zjk7zQnChfcPFYGXd4%4V6P=dJ9(05W6Aib4DF^hbNy9bd{1@+%KvUkKZo) zk;m)W$jT7*?v05@+hjDX-posDN9QO9OJ4FInsyMQ6f~qZ{4}I{sNSd@@HfT&MX{_X zRSF@!=?=9$IX~7J^DZL2dR$ul7PmD$yecUe)p(y=-f`Julq11yiIaR_xLgB-oa>db)m@nvzqU!5zhlUKp zfU0hp_qOi{+pOlqO*BU6UAPItxpO+CRyR`<9LV#k$asll*LtC%xB^E(DTWdZ11jx` z*#j8e_aDZ2a3{2@Pz-PcWa3v#)8IlJB0{0&tfCL@`KvHxOaRe-h8`=*Y{8CQuOO9Q z#WrXrk-gZkel@RC^##xLxFMjV3C~ zO?j{~#L5a{SMCV9Z{`}DIW}TEpF)}7I&Nl(C3@%bUZw=UfQ${H-LeSSf<>topX=mQ0tLi7gLqk7kxf_JkGhVzkJ+RY^A8I88I62PJ?jll5&<33EE zrPs^Dq1IU&ZK>5`ui=(-s%A?cK{@ez&r!GOz0!~ll!Mpd;Us-f2nIVaQoOxnFW53(OThBmN#?GC?QHDWz_OX?Cw zwWe;v)c1~Y3G#7t7ik$aaO0HbKBNW0sOmz)(5Xb(?~0z*N<9VDLkb9-qmVr2mSE;|yA zuG^Q3WaF(6i&k*yyC;}^5*O;{ z7I41o2K#V}3gty=Z$j~T?Tx?eJY^$c-Q4a{I(EZXZLLjhOBS1hS7WguIFdtvG)b*R zSbcTjEBRg)<>B#7GBF>B4g2A=|HpCY5ZcHUVh2E%vpu<?VXfGCES1O!z8V`Hfr zPkFKSA1@3jo< zCuqOW(;J>%injou~d<-7FTe6&DrDkQI1}@;V{c@i}#> z*CvtJtGIoAdJtOtta5H*tDz=$svDeX9CCZc0wart(ZnW%tXpgnS2=t}o^Hs^RfZ!) zd&J9-^i^3uC#r<2*$(%`PzN=T{{OKrHVRtm7EMOVhQ>*je| z%X+T{{1EwN7gF0eKkN2={G8rl{K_sK&&=S*65SxhZDN7JIE{P=+uHh0MiV*Nm&{NY zsEL>1$a5jgq9pI41jgpFOtLq8U6ZP0MYlP46`W5fh8az~e(vSny1&yOTb&B|C&0<*#ll$1su8lZr;KR4^c+9KJYakS~rmz~$xIi+9yfR%~Od?hW~b=aD}5PKC00 zL9Ddt6mnW=Dv8Qw<%>n`<8q46NxL1@lb#k?=L4&!uI_jN>ayjyI>B76>l` z!(YS>iJM>;Hn>)S85`vMWHyWA zOPdkagmj<;WAeL-}2fcBFfv6PUJcq%%x#&(HD`kUhZ#s~T zf@V-BLo8nh*YXXwA#Y}=zIB*BL=qcTk(1olrn+LLGOegcLVfwl2wEtfzQR#@@bGp$ zpdWA6VGSC?v{s1`?nrCf-8;dP>Kz%zBGehGwbW3m0h^o_N>)81H?*nv^)z~0Nb0&g z_(J@UO>$o?-U~rae{so=_G&cF=$ie-RRSAPU&tG{p#~Cz!FjAJxV@N^6%Q`sxlG$T zxYv}NkSYd;#RwEvrh^%24pDNHOL3g$-PF%v000hNK!i8k)@5oWrdsbr+*2}_Je%uv zrU0x6N40*u0I%Px6TrU7lC^4eD(jK=uDc3VqW!^*t&uTTzzzcMl)QNo1JWEr)0sRY zFF5q=4bnWX$EQBR;O|7unS}{SmXSsx-iw}GW2RjncR76+1Mj)zYu5}Dx^2TUbCwHl z(uH)O;&YiaGioUi8cD}&?DBMRiSI#v}*dHK`EoOjV^#vTc-N z-s!GcZ)Lecv6UT?q00`L1bi4TXSVf$1C(#6XYRz-c*2;Wk8qUOIV0LbRrA!)H#8Np z(A3ixPB?D)FvHr2Dz$&Epzu?3+lN1C<@z+ArcLSj7Cl*;;8IFTMTCZWFL4Q1BX%IG zo@a2XSUh=YgV(aWT)BO|$E*$qFseNlNU~JBQ|>73r=$3=&ope7HLo|Mw@Ghx%-=hK z$(Ev_n@rg7Yw#)Ho4n?fqzoTeH|>aeW~a&$W$p?#uO494@6XO*tTudxpVYVzVjCjT z3{)iPUJ_n7$UG8MamN75SLdilU?-V~Pg$IhT1DAw*B+OeIw@e91{fyl6Ork4X(9`@ zjaWrek{?BLTR=zCW?J4YM%2J$@%-}TOCNkVBHFu(kxb+G2rptzDGa3!Vuc~L4wND&h_)C{m3dguutRPKopJn`_5NbFG$$~~PfDu@vyK!tF5_(DO$ zA<4y7eWr*u;}WiGIVa{WGlfoWPcmkl10}Rl9wco!j7HGp^Xc~W;KoYofar4qPxNNo z%I;5+3BE|~b2Tcg9S;v0do!8Gvs>gOCTbgxm~5)2w2!aIBRc`td{Rfl!=)GA1fE-Y0!M)uiD129AckF{qHndZ@qYwb)|(U?fs6m$oLYL>JS%i(U4 z9F3wlA1y(ws7GBW#$_v(3>dBhBIz3FIRsZ(#H&W*QAG{Am7hwveia#T77pUj3r!R^ zXio*A=DD-V0n#$3Md;H!ztlLWcEFx#0Z9~;|d_#|p za}4Oz_x!;Cl5L{r4IV1JU~DE#ADhHF12Y|XF9&tcP}L<#V*B3kBX8IkzJ{pt;j4-v zgG;$Xt{Rz2b5;!)dz*E671YHA!g^%;w#oF~61*p3i|TNLPu}{&c{#Wd)R+OWVBX2+ zVSI156kt7*f>9n%bG6t#>?hIW zxIkrZ{GfOMX;wn0u(%D9G<03GQ&fOlJOrN$O{BFvFd=DeNO)W~;IMj5(A~Qc-EzZ^ z3aVP6>7Ui|aX0zAWZkdPYK~6qXP? z)is+GBnrc#<%m{p*D47mrFuyBjk15COxeqrRgkny_CjRno-V3JxK2G*Y5S3eKJD^A zh_92O9y3l9b4v=7J?FvBTr4?wV6b=beC3JNLBD0_F$94c-KHm26mGor4uQ8O4W)$w z(?QEp+6jdi)FeUmRX+q;sNo&rhe1mBgmp}Y5Oyi^T3u@c_)>5&%v<3`N7-JG&AkL& zPN6<{Yif5vvyThd`n)hayrGLl^))n7(|EaTAUfuWz3QhKga5L;_BO5x)Z7fVQhcx< z;5;?ui8qlU`yMo_E*+oEM7`#oeh+V*w*A%KN@fvOzBZ4`K1B`gqUZP0Q^v8P_l0-U zRVtw9Zow?PI6z~go0UK#=z`=X>(bFHh;Z2F{GPPmP{()xY!w2}>$sMT06&~66EI_E+>@|f)4XmwCiP+%`*6uvV&Bg@MJea7H2r> zBiF0!D0I%0*hb&*jS?870-_TrLN>uJTJ&WGUg?16lg=h)Q@*sWVYI3Q8+T@X5f`@4 zA_&RRkc5#}D~;t{#NpCRt6ExYaGk9QwtP`m|M7P-B{%5 z^DR4gvP-*Hwbn`l>xCe83^GfdXX27g@|b?MIfpX_OHm%vbp!=~eW->oQFGWSQ-YWf&ib+`%{m zOpqV*O>5^a?MFyO1|BU9B&GZJ(~e^Y+Y#cEPs*kE<$ zI`WRmZ6^uJnbl7thCrn1K%Fnk=lQwHEu@B_GcmO8`Kk!nQ`kJ}mZcc38Q`~%W!$)_ zCZ0!0lQ4uK`#pQq#Kb6jH`k`fLT_pES)i4Ycre#8(w5Gw^gVEpD^kzaE2mK(aC|S@ zPM#XzyF2|4=0_msrgZ!UN$V^~ManCo-8E`+%~lE|C)HfrN}3zXlby6E$Eu4?MYyEN zb50+-U*Cyir%?n;XPrM{m6c=;p?d7yBa0zugi5j=ita<%9eZst`w|~i$CDO@3TIf- zxiE-V{9KdH%I!E-L!T^8G*#g45HXGn=3b3X@AB$p3dy687#NI64~M!odbWZyV*oSv ze4RLWcQqpy+Mhm$hhz52-8QS%p1xGd3nx3%eVq` znD)j4O{|{GcxIq2JggJvJpv@vHFG3#<%6nLW zC)VbtZ`ug8LZ+b#_&lIyfo24Ib>RHOS8j51@E}1aBSme}p`1=+FO{dlBTlH2v9~AY z^;+sIb3K?bp*NgSjP0DWi@Wf6!M26iZ&x+}DE!&2ZKuvYTN$P@Rg=BWWU@R~jGYwQ z;uYa^-RJ2`HiU)hOrC0a0tGJz@hn zO_)dTK`+;&W}k>d^_CCBAQXh>xB^U+!Na-cM9H&$PZ7s)In!Mv+?s3CPAo1K2tzu^ z&3p6|O^noD_#FsIs5A;o&F#cx7Y7u)VWEwnU|Tr%x`B0(wz%(L-l`XFpjcy=z{QAH)&W2v}PM_ZCd z!iC3l+KEJ25+?5^Eufrp;dVuhv?R&E+Lz6G6c`ga*acp92%(_#iZRN2%_9<`4+5=9 zunn$3vyn7;9a-#YOegf#%rh=S366O6o}@a-(;+fEyI*9e75l*~A`(BfEvr!9)g!o) zln@G5pC`OhM)NE{hWJJ&JGSc%#Z~I)&7@-#pOO$VMS(u+=7`OHEc5s#s{B!)`LZr0 zOTfIkOpaAv6&4VExh<{jCk?k7%mKxxpsn<*J!-SLh+~#q+@2wJ@{E(hdcT`fqJ?$Yl$V@fsxo(H^(K*7 zBK6ohMV>^o=%lm+&NU)?kUc73Q+RirxNqT=@Kxw1%}QT17sqWmId4uIucJ#T&S@^+ z2xhit7*|0p_JC)q+Z&%iXtN-~eN4h^U}9&gJlF}EF6+}U3tA=^+bq!;A>x3+1K2@I zdVIyxdj+1?He?~Mrs~EOgHO2>45AGM0<@LW%&pW{IT<7MRTwej&W58z9?vqOr(;aY zJbPdXLQ=9g6bJ=7WO;^z3R_)b8ahH|2{rFbu~ObZzrz=ux@GME~J!q$Q~`t%7BMY-sqDOpDlXOO*bzl^`Q@pX4mt4$MBpCUnVVZ z#fd#OBUnXB%am1Jsu1OuPgvh-;~JsWRP(9d3pLImvG*yjp`2Vtz9W35z&St z$6P;->!(qxFO0{9-azRP1o!C)vAnbiXIi|i5`7k|m*^WjQv~b1M@L)9+-8TXPY@qg znY6?qdA@>-gV1C>O@fTD zKguJb+u%r=r#9C62(aalzj>Q-a0ZEzPQMG z__AFb11JD&9*_90YA15?aO_ZU4&=3Owx5q+JR~81@40Uyx$UqX7&DmjEi(wEk*c%y zsAR%xKr>b-SG@3}97?d8R4a5i#0o>JQrrnox*e^ii`{!5mh4ZZ0#$fxsSY&-2OIdIXwT#Cd z?^eEXHu%Ch!+BIrfgO0gUqqbjRISqnxSSB7a0b)NOv_7r1@+V<-|@?cf#?joUR{dwrF~WbR5;N zymy8-Trcc^63fXX38n2epVR}zBC>u&5V8H-ImM`)%kpLfAV|DCiQgIMttR;G8!u5N z6vC}ado>Rd3Sg!#q%!LrYm5%=CW0RM^wobn^HX-SYZ?#*jmsbvq~SZbK~i}nv(3H$ zhY*&>HjvMN0~kC4;6nSN`nI>FH&N=1Ll={@+8U@u=ETB@4(B5~e`|-X8V_*Sa$#S( z5tZn4TLdQFq0U-8uj&fBcm^Ivmh5{NunTPvQz|FIr-y*@jE>pPXiM*Py+WXkxUBJa zY`Md%u?182IOb%xi)jedR z2bT-O5poWORD~315uEw>h%VQJp36L4?9%M1Zi3`26dyZ2d?-L1h=f%Fa(?=3_d2o% z1Gp}7s7JPtzz>C|T}*hD^uY*NCFA*Y9FudA1)h_&+J_t~|o7j9Kr~&74rko@9usZ5@ zCNT4S-coj&EtX-MP!;gzQ?|_2c#gPHC;3X0cb4(um00S*o?^noq~buILn~BLj9h-t zE)I9Hq$&BmW(wLc0cVY2Lw1qTEe-|%avsfL0Zc|Oq#Kkc<%V=rrEq2G6$`|vGH+(d zTX<~TRoW*8Q8ic_BOGz++TX?kJlcRQcXRB1O7T$6uRN(^+|^#rRXo`x(2TlQF8=@i z?3jtmtRUQYj}6A8AMo`V20VFbXsrtO{55mx0YKIg97EeR}O+RDUYDF>tCWBNq1Ov0S|7G2vUdN+~g#X^8AHE5{BdFXWE4$@%H z0gN(?m~yG@#r64cbw9Waf0c{^Y13lKA z9kd z_pMz**hO#%?)BY4v8xFeHi{_Jcm_Mi1Fvq|G8)H4HhjjI3imyp;0gojGFH*I%nim7 zIFvRr#FU(C4f;vOUV|L}&aj&dsFu03?_(D4mkvmAd4RDJC<(c8VHjXP$a$I=i z6=iJfGWU!6Wup5lBRU4@j$AwqzPyp5qacVvmn^Z=!Be^J@=dsGY}r?mI=xoix8FO8A;embS@c3Z`*(*XO>^h9KnH!I&R~^&4TBTI0@ysM@hRH8 z+iIQ*ogEj^?IwdtF;r{!QbJ#Ky4~1nIr477hrxEQwe|pfUe2?8P|H1h@Bm-n14`nD z1}~h5cdu@DUNwSm^b775x?py!iqVUQz!FiD#)iPasS575hj6>h&~mH;DFesW*a_AH zE@4wJV5(eWNG?uD$}tOL;i(MMu?S1OtV)7le^&#%_?ylGxs_eh5e=LFu0<%$z6E&Dl94Y z^g{@GGx=O*w^d$?2qic=h+5dmNTn>yqZs5*qFug-osH}VioyHPmg)_?0-j$Oi zVQ*f}$b>(;_nO9`;beNR<{_bq^J8^-pxsFlT0<%P&~8n3$Obg}O3jMdq()*F9=@6h zN0>nwzZ{3DnwHKhe%%IFa7*cMB`R3n8KW`1qVKV143D1)=6UmtO`_2>+BINiE4`Ei zQY{%`Y^YT@k%K9yhnxpIVSf7#&Tt2uq*Yr=$yCK}YbX@PDb0#q-Jk=EZ>_vih7T(vv_xcMTV)GcMPo& z&Z1AUxL6*%@Gb?*SJaJ#-_tNRtv&7b$|l_KJMeZbt9UNr_l#VPt4W?7Snk|AN;j`yPD zNOeS_SN1fo6!kq0)F`Ixu8e-YlziJCBmpUv$6@mNMmfln8^;Hl)BIC-qm9k^g(ElK-Sl0seB zct_gX!Ct%@1R5iPS#*>&Q-MJEWoBQp*l;1)^(da|s*$EuXV#O0wW!2v)Gp3-oBi}!S#%2A?%4-vE$xlVBj4!yTW=lp@agoieK zZZPW|;VJ&w6XT)8zVD0nku zu$d-MyBu5l6Y2Oq5^jwgrpvV#>WV0(N)6ChEQ8cLCPT}d2~d8)55aO}$!B9cvw#Mp znwM@C#}rj#DNK}CrZR*dB+6734UQ}U5d-Z8EP*rOUXLqzj>n2kJI7|s8TzJkNLt-7 z_`XM0xzHHZBehDCM(Rs@vSFEoBq9_iv8P?)Ht&wLv$3-4ZRkS`k?JnWpJgyYgInyS zqihxOSUg32XL;IcY+L0O7Idx_>8oYSha`(&(D>jk|7AKgLV>h-CWN26VAvf*Lj{!< zlwxTNGjes$?=?*07z&Gus5R_udJC|VC_axuWgGdnz!6e5x=0}TOjcJV-TO>VbX~bO@ z=xBWughSV+v)O^i3wtCNuT#+n?Hz@RN}dyV7O4slc}5o24eUVsK;-I+dWgmz6_shb z>~2WM#Dj}QZ_w~tEatSPR`D5IjSXXCQ4S~$i7VvY8jAq$0dz# zLHa6Us@Lj4*Ax>XT&PGOh{S{8%qyc(Bt9zP<8pf1LxaEq=5CT9iDj%z$2hd66^-#0 z86(ck*d2WEO~>*CO13RL?^%Pyy_kv%S+%A(b`|u@($mpyCOIfvs<`M5(ujxG`jTqU z%?(5sBsrfn8(8_y($Y#S6(3-4Nr|GZg|d4tGxj#ReK9QnUrcLuswHqDKbM+j@|a+g zd$E+RjYQ8%qJpL%CsU5VMuGeY`RsYKA}k_aOiN?qd0BShOB3WwqG#}0QQIgVv*WAO zeMa5(#P=@{Le3*Ptk=?`34nduqkbbL8tgH?I_)Gx6S~)K zBdwtz?Q7$EI!gWC1JmfhTtvdn>J>&G^&=CRQycVv*pBz+>ChX}p}_42mK@1j$SYxi zXU*u|3{XyQIS0?q!JW}3<=Mb9vs}TT1|P|K@m5(xiK92g^?@9aSxaQk+p#w!laHD6 zUPK8L17?o9^8-F$w=nYaLq#@XSf8BYD(X6e>?3HOUCyM%rVFK6wQC0LLp&*9?{z7p=P!QqB)rwMA?b6w5+;j8uXG@T$dR{nPlB3-l2jyWl ziby`-7M|l*WN#QvfH+)FPggvN-$@y}@LPCtDSghsFsUxKCJUUzJG6_mD`aAW@U;8_ z%MHgS)2pW7U5#4yqg%J9z}VOWMwEAR9YpVBy2^x5?EDP{L%9Yz_G!)=%j@=66`(on zFWbxWO+M%~vaaPtUPRSL&tcf7SB^6+Lru_y5TfEKMEL_GE46$J}85~B76YJ0|YlK2g-HS`TC&c5yQ22WW2LDpM{YKHM=-KY8H4mh9 zyPk);;G0rinj#fSurBw6*%8rTHJ?zOuZS?k$<-3gqXVFG5{(&UpVwpp$Pwl4B*tA` zN1`tuIGk@cP=X%ot30doS;lb5synkBaO#TCM2C5zT62|`>0D2GLdCtuPy2e&wu&YW zd{ru0kVQ>=;9B(okodc`g8@NzbqnvzxDkBVee4d`kg-p(uGFkD;4O01^TgOU(5=E3 z^%%IH)Pz2is8&#AC=yE_L1PEvaxRzJ?H3u?LpU5x^;VxcJ%7{Q(cABlArJ}Kdx=BpSsviVyJ?{iyn0oV;(Lu9B0|X>w4v~Q2CCxgcHupM^ z1a1}F@i|`~UBtwx#qjbm9dsVe8v}iJW#~NgBrqyQO1id?v;%R?B;2<)GUVfKOeHe!u-Wl^p9tl{h5~tfulfNo8JmblhHzOz z52TlyAK|U+E9I=?Y&H(rA6%K;;Nj2IR)nHw<_BJHr zIRS5uHRwxBEeMzACBKoJX1on&6)~rz}8`PvmmapC&^4X^32H0ZXqVWbkjKX^g z8P+wcR~^a|tIv7}Ew2NZ`2PKf5;2A(@JJ7Q0|0ld|>Yf3L(UEq7|;XNLT z<&DJU=w4H_2E!!5CmYf%6?dEf9{fUc0m#7+6gC<-m9w9}=P#LQM*8V(H`dF>wMmAx zNU`aV_Q&Q;sIXi7@Tt@2sw-Bhg?5*qt&EG&fru~6LoA`gvP+oC5!kS{vRHdn81PCA zrWvFB05#C<^Zzhak3`W*Hiu^xe62ldqVhg@y&T*;Ir!y{J>@ zrL64=>1tOiALL<-t3L(&I&cgRmy=Z_W*7rXGTL~NtO9cCRnu_ zt6x^v-dt(5dpzfH;Rer~wwAMIJJUn8b;XW^3w$fd_DXYt$A6JXoV9!v+@qWmkC^`B z`rS}WJP42SgUL~H>+#-qx-YmK_WX<#?4BlKTyb`B%p>cmwUa2f&^Vm8=uiaf$>%VYg6*hHHz8L~B1 zN{m36h)jofYNfr^OUH8)U2W*nu^kub=xxqgs2`y0@Flm4q$v|_p136KQ{iDFqj%KR zlFov9$(@E)(epOF2Or4^Spyp83idiaJ<#x5$=tx?LmF#0jRNC4ZIjzk@g^{)IB1=e z+^d`iGH8iqFWJZ8t;qo=XH2e?nDlcoDitk|Kv?L@(Y44UZJfCYFJzGV{NhewN)|{Gfk3DjIqbyHFRu~O< zAHMCEZ}4?^zGkw9lynZ&e!GnPqDZjmC~JYei1iI;ObCoWJxVo}E_tLn9dw77^(I@! zo-Q)7G|Am7P;a`34p$c-e_ad^W!*eaGj(4yMVrsv#Dp$C4Jre+l-_M!mfiN*_w&0P z4ib4c%0>jE!;(GVb%<2(v}pZ3YmipP%f&^~cFL5R>iXV6>M ze6k$-3{sq*IPK60zZvABZ3LmbzL@Q4UVm>D?gYVd5K>8rVGx6WNzxHQCK+e5OrB*5 z9YMRea9?AqtLUx+E7c4?ff-A*7nDomc8Dfe_3-t-VDBXbM`=v^vC`}3%Pxg|Nc|RF z%wIxt;!WIDym<5Ikmr0cqA1YPDxGd#2Fe3Af4dJ}+xc@*5t{APIPk^M*%PmB;VMj% zCo(bv1np)F(Ff%WmlAOZkp4XQnrJ;9U~Uo%<8(jZEDRY^8f zoc-eTJ%SVr$W=`>mONYGo-2Q%u0d8Kq2XBfTIcC9VAs@)Z=Au7tCHBE}}23q6k_{!qowHJlQS+x@&pNsOb8LOP>-x zemz&c5i{j@?&H`GTMsB(ryNlDjo9QdRt1B@lO&uBKl6w)a#)dQEY;rP1yP~_c7~T( z8z%J#vY0XPQ7U)u;gfVa5IZ3!7{rh_Sd6xF9MEo?wL}8G0N}@A3Hy#mu_D}i=&oAA zEpA~OIGH^$eId=5#FF1^BI2x)1co6>gdq?SkCA6RxO%5tE>s4`PiO4Cm=Lnu%3YOb zMrC2NYzPqT+|zqDUTuunjn818OBeSU36j38MY#!8*mg$y{d5ipi0UEUi+kSFC9W$i z!1i$U2=;BBEHoI4jLM?E7kG`Oj+qTDdm%5o%Y^oMUwwit-v>r~;qH&bF|KkrFN5B& zzW2h+U@aY6X~H)cm<&f#(wUgf?x_dMC}cX{#S)|j+1yrUODM$v*j5m?<|>HqsK~ZF z+<6$db7E}F&Q5ZH`Yejq+##gRsen*%br*0Ip@gyBP4mvn%8pK>%LXYXUu?X#q4G;> zo8&WKj2IfM&Qf{il#V^tXL_cJNXUWnyt;`?YhZ(D+s5nSbttXN40-MaL{>8HQ~($G z)iray2iy~3gqn!N{vX-#f@qx{8kf7pL_Sl=6=m8ayYNsWMI}Hwlvj)E=yMKa9urn# z(O$3t2m?Yq8IWbWL+z{y*eT6!Z45~??plg?NH1cuH^T0snzmd~Co`N*0#Z@iS(>>^iS7PNX|CgrKl ziO!ajk=qbD@N0W)0kp#!d9Qm@i8fZ9Qq2MXdqfnaaa1BRpvFv~pkPGx;8G84H81>+D*8@>d(%czVr7=la!McY!Y z(uCDCjRpb_eMlh`wMMi|^?|d!*XT|oe&%lzHr)!en@^V+W~93Dt(F{h%Tb%W$2-R$ zdMVz@;20F!$G9#Tpz1c9SBwO}c=t4GyeH@Nt{J=K%ZHu;6}}q}+$7Q7rFhj#zUMjJ zgrEzzMNd!)%UYvft=?0JRUI{KCMCZ1#zr#oT@#u}i+jTWC6T|z_}@g zCF>!3m9K?9B!}CRuy3;clk8`EtOUbQFU2Fu8%CEkRRGKv0+VDS zwTjc=ChvO8js)BBa)X1gopM|vzkSj+%pNtTDGE%#;~*}^mDQzfZu7Cy?h3?d{w)1-K>(DBrBB*fVu9yx1YlIcd7`q{madNn~~ zzE2M(T~1zyMZZJpQ+$B<>h11$y|x7 z&(leI#3%*O*FDPKBx43ttK;(tkQSz}mEn_k@1Pq2;#Tqa6`jY6jyKsI)IP9qEt&ul zqe)I-88= z-J4c^R*Fge+5}kR5ncvoDCA2#c*kZ1*Q5PH9_|vqK@7Hf#x8)5AHtRQt>7g2JHtV~ zDtuC+cnp)6cm)U4R4nF^kF@aIGK}P{ow&zaC0UmVtnN7Q**x^vOP4%KrpYG`IT=o? zFVdOrN8jzJt`Cf4K7>szsQ01|%-(iwoI1T5$T_>T37@@bNIqVd(OmMeXT1-2@xWlE za;0H{@m%s*(|d0o7SO@m@C<-~v~3lW31w_;42uC?JuIy+ zSei@m?0_V0GTwB9rQhqVDW)WsC_@y=x2ip;cZ%tRL(Ij^I&l^wFRN^*RM8?g2{G-- z(;M;OD0AaITgl1wP=c`R_72u$s=QpIo#Vg~~}hLs_b zTL8DP@}qZ_L59uH`d*0(V9^Hbs58{CH?p7&#zon7*b(c!1brBWhu(0U@!k{IIPH#J zqqES9jC=8tIq8bzrBSu9ip0v@799zN*Ay!4dymNAVn5k7`C^+Fl@-R{*j8zKPQ(Z) zYTx1uhhXlPepX&p2WkAsdoObcl#IBcMuX8g5Glot`HGtWWZ_A)zZ5L8^b!wI1JO1Jg0mw;G53n{;)ZE#-sx4pd#M7W5U(L@tGPHIp$g8z zZhQ$p=X3>;i#hrZ$j8kA{tV z^En#dJ=2~+&cerDAuwp2`ybra!EX#8w4PB}qy?hQEIv{!yMBiFhVb#i0}}AM@g&p) zeMdTv;Icvx>NvK6dCa^0>7#zvUfe;@kDwu0?!^2d9@A4^UvuP%@vLUZ=G09-Q-~y5 zz+?}OxP$;DF=e#c2MSmZdaDUbd-dJKFY#o0g@DAgq2#ebEG3)`Ribs}Ng*Elng!t` zO@O@f)ojHpria1FaNR108P2*|C-D}lX31jt04qo0W%EJ^X}y)v(PYtzo+dSOZl1@F zsH~VzN$2JW5lo-#aAL7XKtrPEW-1zC+ZgbuXD$_v$LU)KTC?+fNK_{+jdSdGQ7Er! zWWZ$+gTgyn^03-CAhQW-a7x#FGanloUhPZ_PNI5?!P2XFgDl~Sp=PxWD97;(pY?-&l1oA=@BMT!e9p7Od2VaN;M zj;T`bxu=~&@t$fukhQ=#(ybF#g5I-vcq5oO^j>CBRl8NhD@)dv#`N+AS?l)%*S#0R z(-d4V;*q~q?juRM368_udyyS5{ro+TTqqVPQ?HZYvq{_2K%~8AHyr5;eJ>s_0A0Uq zQSFwS@CIP$*U#|z;Lczc(y|19!~N(B?q(k=sHd+>m1+e-V0!@|_PC%l7+#|&n%iL# zJ)j_RfKjjBm&Vrk=y`-^xF5r6;!LdWx^@LN(xkQ7V>UEO=QYREG~0FI#ttxwh}`BnfT`V!Gr50frE zZ4#t%x@2i|V3zTw#Q{99MW16rX1@-2r8dTRo!aX~J!=GmIQZV%iAw`k!Pyrw)B4n} zM=$4j!+&(+vvw5#%MJtpBk{xDyFRE+6hN&P90g#y8tLpu$Q3(cZJzvIHY36ecrL4t zG~q*gI7-{? z0VS#T);c^`(6%18GWk!P=|xn94XX#Ja*$w3h@tiBtjHk8-nib-aOH~$D{hye%?o1% z7P?~YI$fDYns=|PH51M$m)s=Tfvk*!W=lDY-!O4&lGs+g^tb&$e6F`~I>U#F<4z4U zJezS~A5rW=vei5Vs0gf_)_e4{#uR3sz~s6RF6JW!4rz5R5?p|T` zY)#8rX^53?b$C#S^RXZhOVXV&bwO+pnc6im*Mh=TmP8B}xQTGwi&C2l=Wct~Vo*RY z2c>)w={)>BR8cUTIVDq?LCFLbC&@$+M#WnYnmQ+#s0#iYC&z4@3? z6K70yBdtm!x$K=D>ItT=IFEPuV-7T9A6?(8O>eDA0)U~JV}{M+DcnedN2^sgqc;S2 zp2!Zc?_qq>?eImn56$vH@k?XZ4tN|6TKE9;9R&73zl7>gRDmN)&8Nj{A;wr`($83T zhtW=v(Kp6tk^4bXR)Qpov$^WAt~e-py=Mc7>q%THuf2RMVIYbGhiil1Lxb*=wHG`9 zsi@NwL0#|q2~Kw6Lt&hZ=w7?`JPs+AvbE2&1m*z)YY(AoRj{=a08O5EHNAWpt11PC zVa6b0X$Of?iK;tf6Ix^R>JezeqX8;|DY$tnu?gx;3zY;kc?YwB>!N)x{=@q;IxVXe zW$x{2UR0o`vTEm==1ga3JGj}-qG`w}%9|aPs0#GAa*E12yn9V>8V`7J?@77kwCMRP zlPSR~O)eqg=6bM^Q6vZ6RCuW8>d%-R+v6w;CAUrAzLoBrip>ocFEUZs=&XxUNi`3C zVMQYil1at&1jo*GBfZP63T6WDDsZFl#i#{cw0X2JYW6ZW?F*KXK<88jEk6ZNB%yhn zMB1uvp1;a=4;c-A?Ij(kKuw`HTO~o{s)po7Mw2g}r@sU9g;&ybPAsx^mE2wr@^x+i zaTS0xdWzVb3sZY&D&ur!`2<3%5u0MNQOgprB%>Svs-zH38c0`Wsg=ORF$t}oVRvL? zNe42By61(1qC2i1H+_QcmtdXsQE+o8oMd_G1*O9xQypw*zRsBB_Jfe> z;HTyZEN)wk^s-K5P$AoZyX$9H=340TaMc@JQdDkL@mgBPce zL*_Y%JaL(xYm3sRrmJKGdb9>M*jI)F&zHsx4inUp7u@nL+mKc5Dmv3dJfo84{$sPg(u!$*^i#bzE#?JUB_1a%Eb;!hLZ@>DPBtLvqV!*RC&d8%5sC; zVpfL^+{geLeBR1kstm?7Sq+?BY%y>B^^D=>o3hbxX!8Te(IG#D(vpbe$CHapj@UYR zRXd%;xK9j$9##+RV@{3H9ZTDNFH3e|Ou7KF-?qBtFdQD&DVUMDVh}{eu7DRz6|U{; zyfUuN_k>dO_49ebTns(Ea9#?uY-U#pO`TcR1x{vy`J{nXFXq6|8UX3B_!ECkTNb?T z=t+)U>VvRwfq3aVsk6tuNyvur_C?}5^;K&jqvM(xe-kwVnS{@%h3`S2X;|=3osL%n z8`I`Mn?u>wDsRs17(aR_d@Ijf&^;%O1NU~yQsX_C6tDsa&_)t-C@$hrX&}|G*Ien3 zXy(A=5ROSX9yO%gX<;S4^9qu;G>(_Ja&Nf?O!i^&$)UUYfB?`d(XOl9*KG&VNLp^M z`&f}%-~ha-!w?8c;+mL553 znkRO&W-^xG?6gbpK2y@c0pwxst%)T}tqPqYLyKqW4e$B{@${_P?k$wQ?0nu$FCgur zJlxf@qhA!7-jS@Cy{C-Jar&rM$8n3vL`l#QQIF|4kiNQ#+-=K4$|GO(*V$m6m=wyT zkz)=gO`=81+ia&10>f`{c#3_IcZ#C4;RNLD&fhs)d+kdiy(7A+XfG?A#|*8{1%{;h zX@Ql)Fieg)n2L-dRh0UN{JQ?uD^Jooj9n2fVr=2c+A zt9y+t>QlJTu1)5;4%X_-Y^O$WB6qABmXCD!73MpXD2Cm&oud}p^?P?%ilbT85StUZ z_XORq685&7=G>Y=>J;<}=nk~-#ml$FDzO#&q|RIm_0~|U-({&q()wmAvR%C8;CKsx z7jIr8j<1z*Ko~Y8CiqZ39e^%-(f+_+G)YPhT*>aCzcrwEW^OD8R_|!S%eAL>+l8i> zu=~|>;_+jqJI#76GI4HHhQmUfon{;qyGbRlqo1D#j6%u{2)r1%_cDrsuR1+lX|!vh zAm-lm5I%1g1%7Y_z0dNt`-V%3#rlOY3di^hgkXJ_B{|E_nMPny3LSNP~D zTV|_I%$v?%&Q=k*XBA?p~u(;6keWJN6A z>rC>TL@Ga`sM3&iAVW<;9>Ts!2&~68nNa8x``$)QxCkC`kexNyY^PCG;F&P6(z?D( z6!&s+f@348W!RTB^jz0B#*(`dGRSg;l#~|*U(SPbXzW{wehiC6{`TbEnv!bf9twDy zoe}k5nYi3G+S9{)lGn`=d-B9e#C)<&-}?ewLB0UD0`!LW9!%Yi&5Exe94Ul}zwK+Lq(|=^p8|2wBh6VcEM!&pP?dCN>*!OidG<~p zzfnb(-NYclj>!I;!yBE20Tt2x@Nl+~^U0>1vax*n1lD=J=$)6*dj_Xg4_l?Z@iBB zX8KVh4p*2<&xwU??P9uLaAd@DpxhO!+LyHus4a8mExZMmC`fGqJ9~aZcp-zfEpLR- zycx=qo6_I1I4cnV2=w-|c+NUwmcF22Qz4u^ROv#JZ-B8i2bib>IWr2erEwv$`@BN% zCaL!>n~%Ll__lcM4FIi==tM){kiXQkmU@utNa6Nc><%}41c-o|-h%t2f-&k_sFi%} zWb9i;9P(0QPz$d*O!KiwJjC1GQ^GQAxsZB!1*qs+{o29^;1HTj-xBkga6=K~qg{Rb z7T>mm6_g~BUmToO9#V9VQ(;{9qcgfh5Vfx5$M|Yiq7ruF##uF%i9pz2SLu*}3DOhs zp`!=X#jm+jOCmh_RRFt6z+22GeiN%63Aj6HSNsH@_v&i{dd$KR`8=!xuaeK3wgcbM z&2k8h!41FKn=}cCWH}wL+1<9;$qDw5jkfd<<5!Ns9vY8ZXIw~_tTgBmH<&t~Ig?{% zK=T#nQ_1$+Gn8qDX#>w8+sqOw@d)I1GD0gTbioho+}k_6$_jNMf}G{KCcV4vxF+D2 zoLo}eWR$ZS%V`h2U!*s*(J;CL6P0NK?l4m`<$CnMS&`58%55cD zk*l71ae>k;;bDVTF*iS?80&UC%stC#(SmiSm(FmjqJ8=XLe+s9@eOfzscw>AXv28Z zMQf5!G2efP>u9dVdyj(j__fY+e72|@RHJ-lS`?&p>M?WiCTLfXj$B^$VrfbeDQX8o zLOywsBYmeK-ja`P4+sy8T!Ec8-*fh;(_?OBeZ4q{gE&jCX5iT)u%O3p)%_J*TluBf zkv%m9c;w|#vza9)?0 z5iRm%?^eGo7HjB-iI)j;WEv9=xsrV#jE`TGEft}gg>Y?;`Y|m`y+GJCNQYvAY zao_MZWTBzg1Y}galq^%VEr>~ORm|q2hf*tD5Rxq_2eUO8CfkRK=x6VVNgE(|`djx< z5F?8zLNs62ZJ9en@wdSxk$$=ByViUM${~XT9l(IZU00#cIsmm9E+#>ku_F`#Pt;@R z^_t1hSdg~^@7j*3mKh$o9g;){y@KMA#z=qAZD6Y810znCi-gaNo@NiyV=ScCqB8iZ zhUwmmnj4KFBJ2+0XOciX;)xJP8ci(|guJYRm~Z8P8~yp}vx___mkj4K*+Ej>eVvhF zVbACRhC;_XesTma`viy^R>&0F6q*^flJdRBdCr`Xudss+YWv+h_bKB%=dy^!$_A8l z%Z+D;>JTe)?ORTyv-&z-cMq8J4%C*tdP-?Ex`*Tw)t1A;1(KWV{cK*6ECcBX+xEl( z3sVFp#$cfd<>@El1Yd~53$XS_@yhLGo#?ZTN065(DaIF zp|IWwQp;gEiNij~Al5?KrUwD3$&62vs>sK4TAudaGT(FH^Im#kkMObM0j<5{l5@$2 zhpj}u(3BGo`SID=BO|%q^udgC#Rr$ea0yjI3arf~n1)YB>_vDD66)Epo1L=DyXZ6D zT2=avOISUI(yRO*twvfp>9}GV@+pKa~s&)xoRG zf&UmUmJRcoL0j{FxMM*;$=<4R&PJE)bU5369?DiQbeFHsA8Bvn(7Zf-W-4K2!s`U4 zX6OAQr-i0W#qlXHM%D3~m_6hHg`&w;UNp{T({;>T;F=OBdjrpHXRoQ4<8~iDo1G5m zphC+>wuZ?oSy~iN&@EEd??`2swT(b|H?B)7han2F0&QobQU^7pAglTw=SyNNK@i|K z2+btS7ctG6tOo8EK;czUg9_J9N`SAn4-63}ce5X8bsyI9rCrhF6qJ7} zTjBlKE+db0lLw&lCPB9RKt)QuC?<7;J;MT}6Gz@rJ_;m1gH<~T3&f!$VzN%Z24>YIX4t(b;o6cS(gg-{-ydnmFtgy%LOLs{oy*i9H;8bsbWo z=4|=RxPLLO?i*48Dp^hFF=tU;$-;v5Ue74`J~vpMz0Q15(2=5)BjuFIV6`sFw)J|a zYe9WpxuoTulNFGcC(y&P$Yul0FxJgD-Gs~saJ==LwOYjxD~Q5`0XmfT_DSHfz1P%R z3r@75)`KR@1=Qv(NlZ%37mJE1XeD7x`sz6^u%DHTb{ry{KQyI&X%hLWce=XUjs_lS zRRDKmSmm>RmKleT7*IHf<`G<(ao1sqrb=k-~}Ox8i+ej7+=$YD#eFAJep_ANu!yD%V`ZNUz zxpCi7T1!1bnlXGG1t~b<_M|K^+PE=`k9yx>!li|Dx&S61A-s3w*rQ{qLXwTciI^Gh z+@JF!TJx+KHkW{R#=9Uo?|2bZ^q9+~83-ZxezI7AL}6)>J)Hr}Val$vM+(+U9I$xokf3tIDwo7UXnSz;2ICC;2? zuA4B32Q(?~nL8$)y6@smxB*=`WBa(iuE&!K9g$ru(=P^&>9Use_DW@HBm_{BWLvdl zWP2}0>ScI(&4MkHtQFPNzS><&(Un^d@nJ?agn)YddQpafI=XP)F7NY!dOc}3NPWP5 zUO~%xuf`V5B=zK;?yAMvYrzy2W|(WR^R|AdM&OB9V-LKDjuCZrWFk&Q6esLeyFUGX^l0>nEJ7=lVjHYqG{^WV?g7pgM5VwK+*6PJ$$KJ#*v1qJdtS zP+0{phrAM<=R1~ad>Mn#a~CXaIL}Kgx1LJhZ684HfH?=74yM$b=vG%sxu<%7kGOp~ z3_IwUAcePy$0?}gM3`-#3rWhF!aS*#w+*`KV?k;_U=`EE2<^cd&JR}Sbb_Knq%2!s>B{ zoUJpi5<)Hok9P^6q_!hiY618Blw7ra0?kv(%&wOvsYGpFzI}}^OOLK_H=tgug+HVX zvq~Kl9w~3wq)!gZ(|H%4WSHr!Ohe%w+<({ zJ&AGNge=`a-ij3m-l629%`HBW9GP=A+Fv{PCQ6Y31KI=LM_N00JW-7_K^oA+sH_er1;bf?R&Dr zR*&P<-w?n=K94&!6#~O7nFX){e-h1D*`RMGFV(T_o?12E5m&WvBZ$|KixY^tx5rvD zMr^ZB$4M1J^|q!Cv1O4X^2!R2Qp~!L9HMujq+S74KEg^dg}zHxGj;+LUx1|ahVqjp zn5rdc6k92!ki^#!O>=M4=mOzokKS$pZcl^BQ6Q$6RU?oT<}JeU%yZ$IEjK6T$k2NU zYp6+R?UDP|)7*p1UCybRQv7w1g|?|r*@JCzTjRXaB9Ty#c(u&n*qv~Lk`n5+; zw4715ogt35JH5+N*2u?zeR=MLpp9|__M&v(3K=&A$2+5f5DBPRwXVV+VXEL53ps-{ z=^Ctgl+obIZ-;?pID)wx0Drp%KYO<83o{3>8v zw9r}^ePEEbwU;5&xICXqXYhd}F9UL7ykwd2JTcCfx#FYPZ+E9fOKap}wH9pT^^3qz z+gIb{vrCxLk`~R?wG}8=!Om3c8Ey&hU2{eN11eMRltwpj!d|W3xeM~a6g+VpWU2QE zU_n{130pYdHS7@_siX+|#Yr?dvTeOP9g}rO8bN+tEC@y7G)x}Z4eyh3kuS=-D2e4n zm$paU*h5nkhdq)lGAp8Q4@NugI&p1{?-`f&uMs8B8M%GEVQ-7GLjn5qCin%`^H_VE zjdTO-ODeD17p`MU1CKdzCE&G52GL9sZ->)kgkmrVSCy1}exN``Q(7uosIhp5(`j3e zrTh^KmkYJN&FhPJ4Je^a(mcTb_i|;K4h3v0^b0dW@}(c( zZ@l`Qy~iP1-f7Bp!50Ouu5w%sL%VPDSnoF8B0><-OrAc#Iy%RERElYKjT)dVPK=L& zUNvvBP=?J#L7#?_1vp6abCk;A6m*GsnA!V~LC`rx-Wx_Xm41v}kdepH*`B^q=FWMq z46xoqO)Wy98gDnRFENGj36aE2C+m+*jOIGNr*G zbcQ06FEX2tdz3GrUKvYg*^3=|EZEM;7qkA169$}HHf4*;8$H6_T0Fq#y=2KpBRrT! zD?>Jh#zy+4P&7Su_sW|+Gsfb;7%HOVE-yGVEq9`g_j$ha-aK)a6*Jz~u^jjKJ>b*9 zeq6LJcBy&5O5X2qke_#Czg{4Xpy#4`X^Zu0=R|pX%ppC}`@O42FU>t$AE$1YiW2(i zOcsO8)pH{ZQ51WUN=FAyV#&a=x57i$+AK(>S#Ii32*!DuAEfi zgks(kxu^=_L~a~A)VQz;7~qG6tm$XMiqqaP*O12!;lt_bJd+;J5x7Vtv@VMZMOMb? zJ!!C(?yUva)C!bo+}pki^4fS!yf)eWVvibhw84+McssT48QCFc z=ur?ywx8&mUROu0bUuhEoZc77)V3hxP~Lm%+l0KtPgQw{Q05bJr7z3}tv2U?vT$VD z&b_=vRa$Y~Y|a_VqrvvMrJg^1>jA+Cd5Y}sEv5`9kx^u8!i`+B5`vrXb+%^$YlY$)aMd$8K3q|xV9D?4@`9`u z&sML&O9>knAA6UtB#@~emOsBy)wkf3=-Y``lOZGZALlZ#s`BV^C*Q_F(6*!+uCp!~ z25Y6R4We$0v;g5F-RF$0^$vlK1|_5b<8Gr)mDGt^;<@v|DuxskgvZomJ4@H~%`j5P zo^X;t!9%>czJ|=UwAFLRqFhAcdundR0;i|MlGc!aX4I|_mmyCQ0cTQlt%?% z8ZM*h6?IstCTJ_$kaOsJ&Q-zf#|wnXFb0Yp&M_^X4;twiqDVM#fgQ| z^@YPMBfZrudV2bJnB^Xy8!&=xvnm6cf$!0btwoX4LOn#TvwQ}Xw$Y0%ezE8VNPS_6 zBQ7yd=d{WhmHpj3-&j=MXXlk~+uB{i5l-Vus-ggd(FvjW9a4mnx!H+;#sa=2+OVz_ zbc+~3Y1;(sH~{uSpa^%F+GdQX0?p}=4b?RsgW6JGWUpkc7_+jbC|2^0Xn zFfh{fBLr!-=xyloVVBqB0}2kbTgt6@;PYS`C|&@@16XvbhxE*F&Mh2Xqa8zRfge9> zZH5B$I4mh9A+7}j0czZi!hLxpOqbUXD|`2*?Nn088eT}BfMg6E#&{`50 z-#tiP64PoGzJiyl=wUi^o+G15cGbxOgOAv`c|EzjbKLnghNp^)M5Jrt%7fq^D=@N(W#? z#hwxifu@I(&{1e3gZDn#05j?<D0L*BfFrnWtq*lmppEbGQ+|)^fc8RKpEVz$!ht}VPKYrcy%F^a?1x?6=r0LtT0mD_Q@OeyZx4LwzQMZ?%({F}4z*W&26JiWKF+ zd5t3LWq#?KiB_)6B3T`vyB$!hWsWzVl1gF_5w&Y19xe7zaZ?-SweL|yXxxYlJ{)Dp zk<@1i{j5+P(hEYzS7`4_7!>9mohqI0dpnATSpWe~BeEQ>RjgfOY8vFIlb1c822T&s zw#Pnh2@+K5H&{xJ7J(|sOp0L}Zy!%mkm4Csn?Epj6(ay@BvX6~FhG-*#08xp2wQ>H z7`OdwHZtIey!2^2XOP`0TlN79al&0-H%+ab1-1pud6LEP@@mi8sCAkUQ(pD-%yx+v zzB*;LJ7=7Fd(rz$xX4vOMnchWWZk6fcI^%9<&e8KNO}YvV60M_G2J8OxG|-N>!|$l zYGdCEZBx&L=At&)VyPKeJ&aAlSGFBWa6KzxO$4tacSF)L@8s|}GBD^(-Za1JbbJgf zsdA&Nb-;uaq4AUijsZx-CUFq4;o^1Ba_EFD5>%+433ti1Qrl3Y_*z0rJSJ9C1;|2t zLfDef2MMavUNYqG%&zb4WU?KyRz=^N&2%d(r@Nv2y=2tUe>(i)Me4P zYO{6E*QH)UK_{%YjdBe%O)g{61875XW;x+-tmPu)!g-14qtS1zOoG$x+$_;p8_6~L zDsu3NLB3a_M5&GL+&q@uZfj^{_O=COBM~IigFrc{a2}y)x@u*QU!MQ+HWT&GPF{ke zH$;_H9#N(OH&{t08J_BUhT$=kSL0rm_B;p4^UUoL!W%fee%#vm1Vn@46;n`=;|5o6 zJ;R!PMA)3F4}5joMIQA`XS!@3@gwer2xQeZa`+E*S0Y{Zm=hIscg*MXo~sWR(zYtS z+Jyl=!M#cndwAF+y2o7Yy?xwQZ_|m4_x$u^sbPS5aL#gA54UrRR^W(s;-m)#E3WWP z^O|=IRN~OtmPvEW1Xn~PkTY_#j}7eQTid)`IURiLCna=>u6fLIyGzb~xoHFpOvkOw zL*&K{^qdFPXpsu~@f<^VZxeX6tJb+>G%*5UjvijsDkkAlm07i_8j=Dy(Fsqfh762> zGSwqnPGM-WiPh>JedpYRy_}kiGu?!9~+#r1-aFWO${U}syMTeDmdz05txQfzT(QG|}e6XRY+>_Vw82L!Gax|e`whb&S^ zdLWYME*q}F8sOfk4-P-$IDX2t?XXp{H%sdHgbkh<@wKU--YO0jkOP|yQme|iJ>lc# zGJUiHz=mn?$dFM4D!iv6k5kg>bEw`qX^16RAC$vqmKiUd%HdsM&Y22B z)#>@mgM4>n`n=<1DWG^+p*}=LyxlNLuV^03$}#mPG5DOTRuxxD;8fy>I8fp zg2Kgf_>NTPax@0|3xax)Re7$~%bwRyE#Et;8*NVDXmD|; zY{xOd^9t1>Dzg&RYA}8?5{ZP~vflZ)rp(J-VhF_>2>WTB%Y$JF9=U>I+$rAiq-;nOzRwCIv-eT!v*KL zR@CjLfPy4X6$iAX;3MF8VgB;9Tij>ZhBQvDY+)^_~N}S zxCT(RMg`E7vx%dy$O(Idb^Fp%oFw_7#vXXVwLzOJ`i=CetN?Qh=B#xQde58OeP%@U z5EBgo&8rbLN6rXi^Qt9iS!-Iv*3E>upNJ`#1%<2+I=v(XI9>)1ZWJe&CRw)P62%SC zsGH6;Lh}bke_j<5?JC&k$}lEL^i?}8mR23>@aE`%TTPWuP56UeE6tf z-Io9iV2oHL#GvS2!L?&mL`W5o;N0`WPbTD;7J&Ozv8s4c7E=3 z?)3Y~qO3u|9*I=c&(Yt~CcHf_-#vQMt^m}W%X*j!wQGqjUilWE9y6LfD0<;6k1{O{ zcL&m(lVg{O!NCYNMKbP*Am(as1TZN1MV3UrIQkEEW`W|DJ3|L;b|lVO(`Fs7L_w9F z660aN3{vw~A=ZJcRZlaE260V{;c;Sm@4%*W3O%-5*TISy9NLY{E4zx5eOcMXQ5|Qa z;m#R@1#@mq8XS_KE@Uq2Df;*@xwpi&S7Yuy(nu&EEoW@%hRG!jRcyDZHw!H+*VU{{ z?g#`Jz!l^`Jv!MJX(u^dpxe4&6v>wnHAkskla%M!H7M*(n2aIdV{bJ=S*R|mWYV>! zPq;N{bD~Qw$y8{wL1g-Y>$}6kzEB`56O#%{vExMc>mn;9T}z02xC6TSa!!&j%Mg1U zEEdpi=T#gXO0y%7A&r>>?}4in^|R;e4{~3v^$PzOd@Iiyas=MNU~VpwsS=)Wj&&a% zEws}rhh|+1E_h}MTRIpxFNt-wC@FHCZ*;7@S^I@37hNXY2Zpc%cr|i?o`DyJ-z|F&-A4R3V&8axPo|#bN}r8?%~=m$(4--uO*Cv_9cY z+?uLu3YRM^dLtrvTr_~`CP>Ky0V&F^8A!(={P@5S^4@TRjjPG!We%5nKftiW**TJz zrKqdd?Gljgaq(o4#%J_q=6qIF0ebQb?x-H@8?q8d_Gluen_c1Dh$2D{zIr`n?hPhx z`Y<6XGD?RgTOlgZY@qdYe6!h@Nr=}iU-&9kn~T#t<~N*GPp9K< z-Qb~Q0p0R2sZ*JCdDNZ}`#1(C(92`!Zv&`$9=+_pS>rza?uGLU`9$Im@b0P^m zQi9M;oWI~1tsIM`uFtq)lk1DnnK~_+hcCBW9wRKCrC2r(S-y)B3D-PR8>-hKVCxy{ zCvUoB&&s*;r_f#*qSy7^Tantc;I}o?9_Z#A_1=H9Uq`ElK!7ja^UQ#CcYo2b7r;HD zFcq>|yD*-8ma?LpOlb&h1Tk@%BHLMGEF(G~rxRk;!|qEVJODYK)?!W*t|vM#ENj#C z1YSbxKXF)w&Sl^j=0Y$bfP8r&dDH)`3Mm#xGcYXmb zx6D_vCSU`;=%HcN-kN%bNjc26920Ve?vt9ha?~9fyg>!yu}EFAc5@!BO|s)WW~bR! z1!>9cUZ~itNIXl&LxVm(40MG+5odm;9h#c~nQVtWloUE)?Sj;*3(a zCy__S55f068C}7#V!z1;a7idyK&*>X30&<{$KSO37wwm7b&sLtoJ0L7dNe}I6)76% z)0ckR&3N(nEjKA}0U7w(V_esH$xhbzxB|J(vWMzrxng)Z z`YWm)Q5juk^v*7(8zaMJ%zbCAvS;~{QaX;e%dwYLb>tvN%x0t!JOeml9)MuPYc5h2 z%`oNTj!2kRv!+*@Rd@@c35jsbidOU9zVX)*0DRbukIQf*D_2o*G#_!(nm#P32OQTo`KVFi zi73+OCBc;UW6KBE06~a1wJ?uFTbpdgpZ6+FK7-7F5lvOVy?kQtm1{n>A~*17MJyzP z-SW%;1U4{ccJb`jg$ba<(Z*?udQiFP0i_nmMD19FFw>dbnbrLsta)KoYJoS;vuE$2 zfd@&t^38itlsn@iRJgCxxR?v;AUvj(z#@ z4!#$jisfLY4P?_)mAsoJ+!a7_X9rUJu;B@DFAWqer2`ZpPuya#0N=VfI;h3g0FVZT zM!ET0h;!^HDxZD!Xz=m**1!wN9)?IViXF&&i&E;wim0jHo1?Yh!w9;xrb01z&xvb% zvLe7`ag3|l`0et`5u>;I7Il`jQ|ZbD-ZQA7+(CO~o2CW*sG^ixJ<$djlIZmW9mb(l zYK~O_q`s#s34!xmB!^~2x?300={XaMKAjQX}#;$y(kZ8(JP0fp!)zTvwId^8xG2lt#v z=?0m-ABl{>!V0kwJuRXEY-whX!tlcC%%NT1>DFo_XlZwl)wm5}6n#AivYkDs8vWa)8IV#Ov+EMju9-dn8U zeQ?*P$+)jU!wF;F0SFs~459~?lfSB_uI% z_2;lCoVqamMocia@U7^gKSH-T2Srv+%X`3$_9}?kkao;Bl$AtgBGQnrgpb~9XIA%n zNjXQGgPM~xw)48vvY*=%tS8F5W1#)cH#vPN(?`vH9_?J1G`I>0(X4um08OGV!oKaQ zL8e5wWv6_bkRctb{f@C;%>nv}RG9^Dn-rNX<{lnoz-^i0YHX#SDKsTfSuQa1u=~1LrNbjcdq!=pSFXfQXaZ8`bp)QnloKyYScIqN++jiMZqHQZGgLX-$*n(!4 zr8q~AT>WSa_&sQW@OLI6G~zE$qDPw}u-DZ$DGese(ll8FB}{`k9%qy21CM15G^p3J z(?UnF2pJfJ&-JVxk?caT1PAx@ouOhW6u&nHp&CIv5H3Aw0N@lAXj$g*1LZ6UBZ`~o z-n1$}qL!1o5GC^pBB}ShJN4DfC_R4qP>?yaoVmNz2$o+#njlh`kIc)yOHQ)|k!D+BHa<*< z-4)-mhkTX2NwH&`2U8(CR7KPcSRMn)fsNJfBDG$JpducmImC}gY*_GJ5P;bFN?t-E zrh~qc6uOko+t#4-rt`!7rP7ct+2@*vZ=U(yV|)(8Li=nV48s$>%4?_rczfqhx|p8f zJb-pzdQ&^4uj_Wx+!qea0bO1rh;^O3rAFF@6hIqYC-y*_bbqs1PI_T_!PYPGF`Af` zcv5+dxv+LVXTm+HtSz~m+f|dP*z?k(y=Y>qq25I|Hg2-& z#cg*Pv1lzvzCn20m9MqMj?j3jaU%@__KaW*l*?V+_Hm)FqY+p>I*PX!V(rC1Y0K9^ z$@?-g{ViFen2};HRt ziG{}Vp^Y3Rg}bvJl)!tH4N>o9oe)V})fmr~21ep_Sjv)3#XJut$WP#zzJM2r*t^S? z>P@95Z*Oqr2@SeaAy7#wLuAZd9|DfFq{%hJ{F6;R#zX_H2%XZP`jXuzGesH+ciX{}DgFr?KL< z@d}hG0e|P|3yTf&&zFMOPrE}G{?rQ8`Q z_u;$*(13gzE9IjkNYW<`)!XK1*Snq7vB<1;5jjbW(YyuSv0SAfJJa`uokI~Tp#&)( zHK(`4y)a2R+MQR9@`zIWEpOVgyq+d?g#ctInO4^3%kTx)LwJ|ML{w;RYmTbMG5`jVeN>Yzd zfFbVdcQOi9Lq#zd4K#{@K3Z$*YIWn&+nYt3aZabT%Bydr0J8a^uOC%4opN)ZA2VS3rnT?o0L7!DhdOCjM5_Dz0m54W-w4X_{4~5fiYOfg%B!E{vX$HSskfRcbsm zQ9UMOOrF4Px|i-WH?W1=QmzF_yd>|H^1`hyJ~>b)^RpzWXXO7V4#*cY% zl0=0m3pix6L3f;lyhYGwHUV06^(8)xenSJ2&+&W!{q<{R#Fu=h^0-DD2~nP1$(Bj( ztY$1t-7bJV+?UaPd(TJ~&)n<1u70W&TDHxB;~O(jrhO-i*F>jg2c_Y4Z^!nNc!4kC zce~1OE@jdPAKZXFdS~&*@tn^9?+jO-Bj|9~Q=p^vsN?lh+ZnMKkZ^d(?oRo3uiEuG zG895->sIgLzeH~=}l!mV(3+QftOdzsM-$Y~qZVsvHJK9cB@gaWu&!K}80 zbo@xmrqEoD7$Czd5;0bX-rWPqyE61edf@xs+Ul_&M`{Wp;|-26ThpWTYkuHxAlMIC zuy!p=6}lt|(q_ui7bxPWP^uq8tCSZuw)iUvsp-DqR94n3I%3p-I%)x<8_H3?081K} z|Bw$CeUOgWWu;8W2yKJ+bp9WkSlL`qYneJ(UaL#j~tU zYeO5?+zPKoI0}3(Sp(`egim=$k(hXOUyRAcA!CfZu}YVIoBTMa29-!AqzY{!78dt` z(N*)#yml1yakTZPR~_?&g(Su!4S=fNd&~Cj1z}Xb%TD0M3UOz6vl)I2Lh4uD%B`N_ zoOa#MCnkz~BBuC3EK}od;4OpRqXNNTZtq+>^ERR|_0hoIC`5p5RN;#2+9*DOuEseJ-O5NYiaUVPb^46-+iYX>Uewf3zNI0@lCqfh_i`%CO z7YAD@qrLUWpJx#@A8ctFDHCU#-2=mubi8b5k%tSdp~{Eg^7NJhMtChjHJ5kAuIvfX zIZB$h)be`Af6P!l>Mnyv$MM?u41-Tut3st}R1cX%xX6S;exqnZ#gORUC*a z`fd*Odc)vt^?~wRxyYmkJVcaKdTT)E@2ph1Uxv_gc!{uTwOSO_JQH|3z(hxZqk}&YxR^?7*vCiwRVIOhY6pvS^s z9$kyUQ_f0YlB0P&j~|^NkEFyt!_HYb4a7me*AM9odoi`+;O$U>&}h4rOvPKQmzDaK zj;6x9OF%Hub_o|edkI;P+QzVlTc}FMCu?45hUl##9j!@oq2rsVRu>psw0o;d*=*`s z=Lj!n2AV9&6=7}>iD(vUk-M8xD_z;#Um_;tW8QL{;72jyPaIA4l(9}S*GK|g03qHs zgyOwxW$hYQ3tU*(-E2v9@iff!u`whZzUoRA&3dXW>ms=GT=`C{GsPj`k>|_ikg#R| zs?3P#fCGuWm2c+NzL+2CqIt}~e6fKX%192*&fRj*8Bf;Vwz&X|(uNgZTDWAZPvk(b z&#Y!V2Jmt8mN0k|vqUcY4E?Y?cbp$xjna;a>dO-%xNL~IbP)zbi>P?>gwKx(bB~qZ zR*6n`Eb?3lyO8fLR$eSRm3;UZIPu#jPG50C=(Sykwe9h`%-ryMs=KF-VyN@m3<7TlhxJ<4{Xn+8A->*%pNmRTLAZ>*;Tyj z;?0g>7{f(QoXx5rnzU7WI_>eSM}%zWUcP+sX2*kVA$cF+thP+3t&_i&0v#lTs$uJe zWvh3Tj3IiRR@vB@#Vt0{PNK`J9#bZ6%hUD7o(ON!VG4?c&V7@IF7SdL96)@mw_?0#0^p>*{GQ9E=q{E)(>8TFa0w zC~(>05iMIj)8{YYo*&T?KL=azhdmkbY!ddZP)p9^ONrr&CVe~DZOtAIRz=NJc<+hK zbShUWD)Bp1_tVSQ#>NI^F4QT`!=)5#Op*Q)0{wTsj)sSZdr5h3Fw3i_xwCV*d85AtP)WQj& z+W>r+l1*ne66Eu8_`Q<>Rp~luB_6`F)#DdqCk5m^T~YYdhl|6EN$8HdY-cUm(hhWr zs|6Rv4U1jMW?Q`q!Uj@^Zd7OvpDxy-KD<)OH9H!&q%$W;@5}7UmV6SF1H!ypsG^eR z$4JFRsqa07<-Hnyu%6!8s7~q_53iCdr3U8NKJ>{x7v^D3jUj*M-9f~%FX=mp)S)vzbo~f_X4)$|nydf@W-lErhx$WlqAN9d;l2W?-%AShCb1GO@l^RQO!fglb zg?A&T$<^#=Nep_#%6ECF?!3<+c{agh4%NBubjLa4rO{jrKFM?l^?fEG23O(^b4fV! z8sPYGW*i`F4bHyB%*UsAE^}-(G`&{*<|QIhTU+eaW9eSXLQ;H}%YJaIK`9nYid}Mo zd2gNIaP5StE6%-~u1{YSrQX0sYH^dRP}XEdo8lfOXtFCPno}{claV~l-BR|Da(c4I zuZZ}}lBr>MPW1RNP*AR6-JdUyx+|6R)HvA?TRuSOlxSfHeQrH#58=Vi@()cpmbuH` zU2+7W&EuisSmh>7!{dUYd9lKv6^XCqJ%3N$aCeHWd+iG`skuR`64`#^MhXn?4X-S> zY9%IKPZGXSK5e38J$Jqu*yonVPS1)3AM-iYZnYr#h+8uy?@KXQl2ELxeFmzwY~kpj zB~Z?<6y>scosfgyb^*FbFh&mFV#78@DbV@?M{xM@c|S40Gt8)DAUssw5zUBdRLFF< zItVC0mPyj0C$Oui*0;4_1}JFa-|YLXuVBR}QKzPF6gb9)UYNAM``VQ46DM1sd@F#450@o&|lnH!5ZtZCT z_5kdq%L2Y2%Ll?fg;i`gd%KVE0ilqdJK^k+4erf**REWSuA&2~IUVHk!d9L8+1l-s zj%-qYnPQoK?jT3ErOwtZ#aUTdE)=QHdGU=##4(Z3mc|}K&&$bbw0Sf&1@Vj+ zoco|`9#B3)szuwTfftUM84(gQ=dnEj%$)C4%VM~vQ`WtHoyQdTANJhOn`nS|nebS?&V@%Hs9pQOGGie42J%^={Fn-MCu41k6xn1$AmfEb6N=U9x(j z!Lc|a&oO~aw>3v<@@8>UAV)#=3O0^qQ5Ti0&>tTu4s(;kMdv)M{$UxUdC@#I()Pk@ zIM_%5U+ZuIo7q)!?$Wbdv&f^3MaZQwaFTaCz5XnHlWW&@0kC=WT;6q1QbrHl%;tzT zUXY*^@goAz%=3H+s4;={-buoW%vME0*o*fH1Rfc4iMr3hSVa3pz!=+DdMW`Rx?;Fr z47*qZ0+cjjY&tp^CAWnDvPfUZNn}N6liP{lI0>FXK6Yax&+%OBeh!-vy>~E;o;VPC)$)ZntcAq%n-X9y(f!x;x_?@#ON$p;8^k%qeeGYJRVY=3>RLOgcLCBtf|=HcO+PsRon!_^n_1yU5h~Hxe%{9OjI70(XyXNr-aVoakf_vd+`-~)3-C^Yr310Z~f7fTSVf%zUYtf^fq4-#>8h*%>IQ`qAY zbW7K1gFqB$c;Ld%VDC~b&`(e@+KvXp_YE}0iR54muE&zHY3~VmK=>O{O+!dL zagBpT-@&u?>O;#1>nfA?s#m=kPqk!V!yJp_rZEN{aK6wc7C>wD7q0I}(`57D0kk=; z29ynjfimJ@O>N41MpajURENOB($rbXqr4bKmF5cdUX1s_r&IU%KARb0Yyg*YQ)V@|SztQT6T)P-G zB~`yCjWezmM$B2Ux2L_kPY;sC8rsrv!Yx18=ENmi$;gi?}58_;iI66@^vP^iz&v&Qzu|1T>og^8NPnICt z*u2cJ5}hP3C|ry8UFf^$VC{#hj1M@iycCg>O-`xb+-nv0h?ji#&>!Dq7p9$La~WeY z3s$prJ)v_1C~~0Ku@<>5!3{1ue3)QTaL7gHT=)W*VfKiU{k2l>y+a#Z4oBinAjWzo zA@jhyEhNd89E)WKjq3#?BTc5-Qv!#UFgrJPWHE=Xu$FVwYG3<(U zJRV)IQjGgyy7@gBcn574Z5INkJnoC`LA%zsdK-&rx$3MYfvV8TSZby*pwp~8ZuUE?UqO7U@B8L(uFo{0iZNEh2W=k3uQ(&x`=ba|h% zBls1UTbZh#SBc8uKD(?%KQ6OQ6!FFank=jyaoq>e=>VhjFq8~vqp(I@&oJ1=XY6?g zfWY&hr z1rQxEJyQ?-dNuKRYqRcBx#2_Mot73T8Adk4lgOG0?#&m2_!QkiCG6I+kAhm767N|F zaK@Y095H#WC6?o|Xe8G5#+>cxt664i3K$&$Tx(>vET$JP&#FjqqiFp0skA(Csz(dO zA)ECq(2(rLeDjUzR`6(?mb7LI+-oj=`;IN-D1@X*^1S_EPopQM4u^bU5$;8#-Sf`D z5v`cwd9y3|KqG4-(CMLs|3Dd$d8a0W8-|n+l?+jcE4u>_}##`GcSyvOW z97iA;sXa&EdrP9(%@$dun=wA2_;TNs0P|i|b=89#>&8_@f0>^2`spg7ZKc>Owe$0c zry%LX0c!=WDcX9uO4p3C{%rE)y8t5Vo#B=ZSNC^lq{uas&BW{a_#KDJwE7@zxK*JG z3~YfI0yQ8zPEnwSV-e5x%8zI)!BQKBW=-Tt!nUC-sCuDzAPy34(}eWy9dk?Y8+t;o zUp+9|dpjE4kSr+wsGsv7<40!B$SeRw;#%Xr~e8;V9WMOVIs#K$Asjg4~KlfnU}TyTTErT2(%H zg%f=gvBR5$A?!|O@mSAdNRI^0-*K_@ypV!6RyIcf%WTbg+9y_RVxlkVv>s~q6YRlm zu%1p^AANCj=NwDM9ir~tL*|9UPgm!(uDxD*+&t>5i#ip6y**`gD+P%+Ar!{D(_{IB z?71BKxr4XDxxFE3giDz>J0+*Aa*JCiE*)p098aB`{ji_IGY@QCT#?Wy?I%k1jk79pfrATQLNSdIvrJaG1i5=nrH~c)pqnU_(^j^ z)GS&UP&Z-Ni!)7Bv+KubC*54enCMp`5|@^jXvtGlErhgp%F*`v(8U~_Oj*^VA0p1< zy6Y9jk`|DleZ~{?LJcyLeT$}1l91*RUxXyyn%bSrpr~T6>}w+|%2WeM1OP1Z=jrj1 zJi#HfnRF47Z0HK}Y2BDKDKJEpYKk0jRW^(&T4I~O;S$`VtGeh`8+jc6qTfLB3IjVx zm~~vZ91W%6FfAhsbIO|Bf{5ddHx?NYBx@u0L&asii(*qwaGPX~O*~L{JVDu)Xl+<1 z%H;2Yq{TObIYI#-qoZJJbiMYdgPfrIqRP79#5A*!q;y}%O?4ZnOM!NC<$#S~U(D$6 zaJ^gXJ!6?w^)eG$4XmcK$l6@PJ)NXiJ4b;-DF{|P8DdxZn0%dH`^YXfn`z#Q3+twU z3lbJ@1_GU@1y}`|8FS6}`et_PEk6_WXaHWy#F;Zl?(m#hQG6>f;C&7zMxfhqZ5zpp zWm2>`Y8d*Ommb30J%XCI>Vo1pv6vt}*w2y|6TA)(guMNR(vGH6ZuYhJX|$8A&5>Bo zzBlT`Gq}+3H1eF))?-H+n4TrSt;)yK7`87^OJ8NqBIiqbmYE`IOS~c~BSHofn=?;* zWZ$CeD;t_<2Fx==rw6W)-NFXkFOts2!u{cmvdiQN7uxO0_9BYys4RApZ|2d2Fc>=< zL|u`M8Ot01hA#|;0ubur^LdXc?kPx~!?e|-xA|TmgI=-UOkwZ6GqRM(wqjNvMo=EHqCt)4vcQth1< z#>FDv^z^VC&lb`oqvEaOF933Sy+{sPVB(?n@SyqG%hTi`;DOLu1{+DTPxO(#(0~0x zc^un^z;CeQnd69`#0&t7+N^;*Ef;vOCoZp(LwxiLyM=Z5ANbZ{hD$mrEYZwXHxzsoY}_cqz9e=Rw|dy{3-$hWl* zmFl$WWobS?Ww8u7ID zzM56{eMi{;vd^bG=i0TQ(Q~y~wUl9rj)W}RG2C5`#(-nwdiJ37RXxh*RL{b= z5cpom?TBkT_3ph}xD_*92qFQ?#q(iVz4z)G^1P(ou`cdeKTl%z@IiubtPMMRgRGBO z;O&r-Fa>$PBXziP0DvyVVEZoous zK#F|#Mjn!x?-;Wfr6sUsY*S0VmCPO)qq&&G+t8Sb?13vgzz%vV}f~M&!v$%$l0wb%M zO%6}9=niXyY8T_^VvLQUvt4&PnIa+4-&+^IqBOy0f|&2RX~uZ06GN1qL)FS=5qQDn zE7Bu8e6`s3S_L&huCvTvy^%^f)3+R>dyVhd5)+nZ#SotRb_;jygG!od=B$%u))1Ic z_P{HfUV<6m$o^!X;FJBdl#`p*NE{+&$6U>m@Oe^?l(tV}*<&S;q2K1Nk?pGqkGFRd z1o%$6XUz&kK#Y2JZdhp?mDf_*=OsAOJ;3hAuN0p*soKFqL;~0{dQ(jio%w?1QvFzi zl_D-VSur*W7|rG`Q3HYRl676*yVsCzZ6$V#JmpJ06Vq4eS~JtVkS~2v^&IaZjc?qV z7%C4BQ9EJ{$2o&A7ZLJ|7z2i2X;APDB=b@k=hTD}M>Qa$w%ASCbAMXgjtj~uaO&7s z+a!qsQfLno%ROztkbo!@{R%msbV9b~{s(^?1J8DE23rU~D|0U`8aaCCTl6z+(6pxl z(&xtSrt}??;!Tr_{4%{0uLlSPZJ)`)BMnEejfpox0RF(=^yzu_A;`;DqUdYRtJyY( zdsz9lUkdsmExXOtOG(|C@bJ!6857N73=XFgHlDNxhgbbF6EP%#tDbRSKA+sTa(9n8 z?JziaqorH7_*8+%3wv+39%ysD{ldsIWF>N@ZKnun7F&{sm|SzSwAvqu{}-hGFJD&bZe)<5Q&8jo-j9zP8sIv zoG!w(=Pz{y4@Ne0spPE?UEgZW+iKHfAeI-*u;Ed>)6N7t?dDfVGzZV+K>y zVK_|6_C0;;a`+yCHPvNnbqgo*v_PfjXyd*|tv(0GbO(pP;^x)|W^{B6BUmY3NVu3N z)(_T@ago8H$ICeDg7)bB(|ASF!-*%}0Ab9b;?-FLY7Ze%$H2cSLH zuTPSVJ9i4ROVyW0jSd;8c$}-;ZHHbx1LhZwjFSagLipHmGxJrVZaYDhHgYN2j0tRR zYtkFgo9l#QEly=56mzhW26|dOkeOTr_>RTm>6DsXlnnAa*+npsrld);+3i#oT4IyX z<~DLr^15N#fIR7mssKM0Kq{q@YPAvX_O#NdEIfJRN5{$9SZ$JNH;&~P~QOfOD*wrKAN zMG!AE=QX$BS>E;wUjQ7&Tq0pgmCEU-c<&x^NwNi#y;|-WPMldxU1LY#q|4PvO4eA6 z*C(rs6U>tn7x5k+n&9K*b@cFe7=uKa^+@wbBwiM$0Udj7&+_|AipFX6(cq;^z!4Uq)q+gZcFlo zkU5uIcG?xrd%V#>jM(z4RmH3~pbxOkaGNH`cVSqQt`GJw{awDH@F#CYrZS}i+EiJ* zyy-nWetwk)4`?sCq=b63$N+8p^s=#Dq{KN8B|03)dAOH)s1pguuvh{cQR|rPhzeOqRFv5q2U+62*vEU;A{3c% zERxgvCi015PP~B*Q?5WOplFGQYftSe-_ygurzmL61?sQqT0!{Ohsp@lL0aA zyu-{Bn&<~{gtZw`-uYPIs=UZ()>>Y@m{lUm+kJ%{Av;QzBcmpWY^Ovowd#|8wlDf= zT<8h&KpP%SSl>GI;RX|#CI+%;a~WnPk4-mloK;eeQd)X9!!^P^(6xFG6n_F;N?2n* z@TB(=Nf_ktS|AYjiQr)AzE`1`Ij;b3bP9{#8#FD@s`GFVS`IVlJTH22t3)xFKIPUB z5j&=NRQo{JS`y!|~QPl0MQt0%SAul~wpmmNS%r01LQqPO^&=O}k%Vy!^z437~ z2y`%5_lk;*PD>$pz+C>)k)}>nWH?sb2@uYytQSDf*YE6>n;op0>Ty#!=hXvYQKMjx zi*sZ3hqQ6I+{yp${Wf7kBmg#7>$^?vK{r(A?sqDzfJf)KT2@>39{&2|ZVy+A9b_!@ zG29Gvf!vKhE?5CX;JI8sK)EZ}WG7}`=5QSZegX^>2W>)3jLz9^<^gZ^1=bYQJw&>p z_DUVDV5Y~V2u_aBPf`0hKAx=Let9ikkk3ZNy`FW#>Ld|anMKlGsvRVOV7H_JAr+OJ z6ws3>LUc9faP#hl4Av>U$Dn(x1|-+dU#JC?Mb{?aHk4+1aZ5H0Ix0RZePA>#PiXei z92}33U9SBpbDTi9uIp?a=f<8xU*{9!rxIi+x4O?VY!ys^Z>dkKOE}k7*5a)j@mnXY zDKzl6#WXzCeUwLKB-tey1`E$|Uz=M&aeBf^ZQ%vT!nhOwus`<%>NTrOjXiW-vgaBO z$ZbI2tG%UA53pv)LXyP!-E#{@pqC9}l0+c@rO&yJB8y`#a(f55y<+S(m1cQ^2=r;m zah@&Bp=uJ0zdJtNo(AF9_tMkE6KbO!54KtKRi%!GI7&3%ksyMyN@SDFU7MCll}8ah ziC9)5yNShg-8!977FPEcms-8!Uv!?<^SZ zpetDshHIkl`RTow`NUu33x8dViRb>VGaK0761ANJH%szl4;ctP66W6&UMg9qBqh?r z?Q0#KSF71Dt`me1$_jIB8u3Pio3I^17LspoC=9DBBzJ%*FqaX!C!daIuT;PB*f+d7 zXbLr{W9H?hP}jZ&zUXo?oRL&HEq8kG78+GwbQi!#qkMsqM(u z6XSe2(!RkMh-zx-58rSpahl&bfRK7c1njU8gNvbqRPu7Z4H`eX%hjf_RayrJ|q$0TI=QJ=k*^S4hB<#xt~ev-L#>Q3#`|k(@F5 zBGtYvOJ{qb;0u9pW$T(Q5q@yBU13hfl3Q*OYx z4+{oBdp-QH@AY#!g!0}|e$Yp+p0qhpAe1Rr%`kbcrUaopPk~3}{o12MY(d}Iv{(?j zbv8#I_;T|>hv>T#nS5OCy~vojRMM8jIwK}4@4W(x;mFvfRDW;bt(!%`7=j`rPyw7T z<=ZBN7T}}h@Vz5i3{EqCHZb5>49K}Dqu{&lJHv)IshP6)Dm}uOx*iej%xEz|p1HNd z(R*(~E-6{aQ(o?40ZZ@OVj`SjB6)M0_xc`x52a53x5}tH_8=4gpMc6e31VK=2S*fpgpORRMQb zq%9DJJ+P~Cwk8+CyoD@!_8y_a4cL3x`%HDy2;~fi(mwfD`r^NR<5`v%dqA+iUM?S5 zMQA35RSQ_47KPx#p?HCi)#A8ny9q~sg_%3b1zQS6^B#?E-=PnkMGxi9y)OqkEBGm> ze5-s5U7;6F7IPiokf*Um^(bPnk*Rhaa*>{+zFw@f({wUci+YgRLk?TKisY{&BHVyS zp8@7&AYbG&ke&h+cR7Ip*QPB1ML@d0F(CtJazpegi5u>7MVM_eM(RPhEQ1u7 z&$!+@r``g47u8t}^-$HNZp`BZ9)%%1E_CyJAk0tRyV$gG3rj|LNpp+_Wa>}wMi$W8 zW$*zcD#;Xz} z&fu+3*psWtm3jn$Vmx;d-eH}D%Lv6t50LV5g$Yb6J25=gN+!`(g;$l^MH66bmT8Of zwUjThZBn6U@Cb1Ov#a6lYf=DW5jbSIK+G=d$C44ZWSywC<+*St8v~_sPh=j?@}sD9 z>$hGehVkG*Q9d6bSDY8^EW^wBP=R;e$h0pvg7vToptF@g?_+sMs4jr7B{&wklmOh( zJTs-EU^A717XU(g@}|wviH@w5cVCE1K^!&}nOed@Ja;8g6Qq&)&EVrzO^Zzl7@69W z^^%DY+KIEL2o1w0o1<354>!ONkbEJkP(osEBbUMRGP3VFF=oK2<^+8FD&tHN{q()a zaDiD;6-r2oa5Klvr89Csxm}pa~>$`JqFi zv2IajWiU^r)KRboSfUqB@Ekja+sntZJCl@WN#>TNMI(pL2b#LGGzXfP4695hlO8VV z>0OZAh?fMo_<3*%*F}WQc^L&oVB`_MQ3*Q2Gi`c;S>*Wk*#R77E;I;9de=p3#yr@k zs~Dpmv7Mf*@^tKhB;7j-!&X?P*URL`(64I`?&a7MD)bzorW!v4|33ccx9~-lOMG0$ zFA}fcBYb2T9F_I}s~i2jp=WOvKuY~lIEMCkPNv~?>Zlk-$ zOu+-g9>QU6D$5B_5_AUlI>DbB z6zioL4)#)48{>>QY1q~ig06eJioJTE)qc;F=T5}QOp9>N+wQs)`FV=gSq&Mw7pLH( zsv~j=%46M0m(@S-|4`3{WpM+WM7<*52y}RW$jPBOAGa_~S%BntKd5NdNQ)IXyETvo znm5>Yv9C$47gzTI$o z7XZ2o4+O@ggq@fhqT5nio8fw;MTmW|wj7ZCRT_+TOFk|t=tz3v1wq`Dj8_M?C%_8( zYEr-qF@)Rct^)gBJ%2WG-8#7jbK?Hgqs8b@BBNL|n8gRKJOuy>A!^grQQt%ho9>-fL+yRrw=^p?MiI2;==T?q5EL^v_ruQW{b8hpb%<)GP*^bt#?hl z?3^U{tQ4fPE+q^fGB#g=GhE4o#DVlFt`|O!XJ6W5z2^(0C5Tgj7Q7^7GP%`7X(x;Yfl&*4T_mZ63Rq93j{VO)K!bqQj8N>sh^vCg!+kvT zjAbqh{LQO=l!PkH7*5z2te)ULVJ}Ao6wo+hPOljYvM}GBy-c;)}K$9VqhUuXKamKh{4_3}>D;RdpA+%py;-hQWa@@}2 zMlBzCsf~9_1cS(9*qgJ@tAO8gkxX1$C(j^xL?|_IPR^v5;_@QjQy$e(aZr_Lj{Rin&ewTYspn~FxDEMHV}^sE0l%XdojeLwalp5MHgH7j+Xd*l{B>wac($! z>vhb6U{&7#1T02_F99fbpb`YVd*vpA=J;Tjwk(9+)$QLG41n! zYR@frV`zT4^=>gCF}P41PwBmJw7h$>HkqNvX-~FlyyH6BQ3ldCE}Ng+Ge3L?&N<x%Oc%7~~t0j9wx*Cs=PVmxaluO?9yC!N;bz`&+ ze(Ip6$doJr>I|p>bMcTCU4cP-jRCO`@sJUO@M?#isO4D#VU#4Np`ADk>cj>yuK5|- zK6~QnoUf;gz&@j zrzg665KE#jwDfsXuLeQKy*2S0Oo#Sobng_GCv#Pr4-tmeIw4fDZ5}Ui!!%dE(L1P< zstqaX1BcWDixe^2-e14VB`Tr^nd6#{yJRLo61Dv!tT#9U8|$TW%rsf-G)Nd#%Q;Sv*50Aot7 z9V{!ShsC3~Ma9#FuLA=?QV!fA&bpGy=hoh=GdwF|?80CjP0j!ty{kqkO#lbU+9$xq zrWG+lshPFWCSFv>Y&y5M_m)%@Es?=os9@lk&+)r50IEVon0z_%_`QVfZevH^DxQ1F z134A?sA3md$Z+FejmB;W^A+R8q<4e}A)xAYuGU7T$85Nc z75jcEUk4%y$JKNqa(jYoRaQ}BV)oqpX8LZ#?_NQq;wY#!9C7$wm0XvRI?`GAJ|u*< zY)LbUsdW)k&xxm$YHw2a1#E^;dWBw+slc2dfWH^)f$i$?9*PSjaUSl1N@y?>PAX|f zO}lVb__c-h^BR5jI$Gv%Frqh3q)JT8b*o2RaI(YVapo1m ztTD1Lv{bh*d$+2*htEo7 zMmScR9zI^^6cKT~P6s9ktH`B^>hp+8&|Miwv~9?@Dz;>OGMplP+z}gxpn(dH@y)Y~ z@+oQVr_9&>-Xc0t(L?oYk9#A5#yVjj>d!5h#T({Qqoa&fT+$RH1AnIAFI5!(7=R4Yk z!xLpTvUm(BS$Ji3)5%c;a$uFN^?TM;>=j?XC++Gi2TwPJADC8PLiOtzBwc*3RigGu zO!$g3&T+p}AcEoXo?WZMgnZ2Mc(+^q?&Y4;0cdorTOziuV9a7N=u3olax!v1+Y^`Y zBxgO2;Ry9t>eQT|ulS(^UtTqmQwLL}Q_Y^B?DZkcpJG_6W?Bk`v$EX%cmev=HF`dP zDV-hP$}!kppg|aX)bfT)@Nv)0zL9jG^4joL)_rlx_i9Fj;|XCsV{8S6Yr(TQbOfEq zTz!W@P+3oJ1l2}Ro0s#3fMhgpW;aFAc+R+oc{*IoZQZ03Cy;{gkb@g$GEszpL%O{#7|&mE z5h9=Ff=Ii(f-v9CR73FG-om%$O%!0*3F4QWLM-AQF@`&&{ZM%pI-cS?us&9~cnyi+ zcEC#O?FguNI}%w6p-;XZkLK`-=H9+qXT_U z!4R9dk>28wD&eDTD99%?Yg0J=5}X~y@RTc?t|vCz$ZCpTCJ)7%xezvdih(m-@7z-i zq_(5h+Q)rE9`0FPm=KDb9;He4a7PD{xpdt*n&PC-7?pp5AU?$~TlquHgsYNK3)Oq; zuhAMT^NrH84ZR+xrCB2)^*GwKh~UIJ7Vy_R_GHJ0@qu(n^_(;@p4nv=PO_#{)az=F z%{gP9yB%A^t-ezR&>%r=( zx@T?Nd8FnGQdii);bEJACwiAWd9|+0il{EFXh{#WEe%MQvmMnUPJpRVjN6kiwR@0GCmg(;}8_f#fyQ zGsq1d7zR4a^I*;#5)Xw^O<41L%Z+i5x&3v)Ak^R-M_+mL+BY%I<(5_pssKc0psHh_JmkGm_+U zCcg9#*soccZZ1zaSHv%hVT*if1@(!pPnZG1d!@d35Kf6aHnmq0lPzEb`et3jYb!c< zav?cx1XC3+HPiVP!F`JtE|fz^pQMKuPy}t)W8WKDRdP_Zemm%LE$X@Ai&BzeSvSt1 z3c(OhuV$M^7MX~uw z$2cjIWOOua?R#;_!6nB}Xfj?K`O6~r<^y?wxN-Nqn~71{(;uiX(Rfc*puBD*3_P?K zn;{xhT(#9yK=;D;w8$tDjyQz{!|!BX@r#$~qY3;BS!pf=Svq3dG*HbHHK4;tF`QFX z3P$>&M$YWKFBCX@Dq`Y(9Xj;XCHMkxs%=F5q z786ODAEFcm7d&>+eCagpGHU*6xtRG;h+6kH7dj6M`{vS}CdNBrdhsgX(o49fc7)we z4P(pYu?(YJd06(FWTM@`jNr!|V~3`i%xE~Mf&kd$*A_20yIZ}tL{*0Lp|kK%C2Hh? zcse1GH<~di&^hj+QC&kkW95q)dp^6tt+`p2xw=cK&-PCk#0$aAw1*)?m_4H;_HGg1 z!W*gf-x8hPTc_oR4SW6ai+iJuB)ue`&P zsS-j>(yo1@mn_0S>36^mDRTgiLd%z#D*z-T!-fFm#mVKKS;IAOQvtR$2iRd)JPY7z z(aCo%kYH*X*ivTW$=G>aGtzD9`yQ+_n=1tM@{{VKI_;y|=2e;{-0|81i)=W2 zr|zU_INmWveo-swg!m$M`{lZ3neo6lv9w-V661T6OjG5OBhT1D>rqnG5uQq%`_c9sYTB=tk#7=q|b5oUHPwtFu3wJW8mPWBZ&s%*ir2$d> zazlIc9i=?-aC;W_K>JD1&8y>)R3>&#L6N(A0Z&K;$gQoWq3FgqwlJN~643(kdWNp; zT`pA>wupp^bJ=5EvjM{=`f~FQ`!^*TJ{6fHkEc10Gg&cOF1l(NGx6>vR1UsF+dk`n z)vJE6^~)C}lD)@L#~e$$;sz=xZuV?Qs&yhL(tNrlhn>Uhiw@t%y7jIM}o+!1;@*0>C)RbzN z6N#a;>{kgFzmai2#NqY zZyfI`8J@xt_H-HG1u{WSaZ=zlx1$&F>K-CiShlQZkUt`kXN2@n-pi78aT3L+P&Pd{DOj9RKIekaR-y06G6qb?4Y@&^x>9CkNJb2L$=1I&h8sffgzX2rFxUz2U zB`?x`nU8jc20ZXEjM+4v z1sBcb(@?~mu(MYdqVS4u3d4_XXp6BJ1(cuY63hB{3$}<%dp-={jDo`)*t;w<*UWyC zR2V8?8%NYhqWtVn93WV=@I~&6n&+>{`nVQl-cEB#KDZulB-97Js~z+-?}>-jJQ0Cz zD%QrR5+8q(hN8)t4Y~G~TmdsRbEkQx#e9$@*Aw!1l^~Fd<9aBXF&-zzj;_3I6d0_z zx_yRLQLLl2QKMvTiic{ANVH2Bju)klqsIHxiW0EkSmYw1UCUOhXlJM2jBERzp4J&@ zW%M=YgH?EDanP+#7mftU!P=`0g>?oORvtoX*3{Ht+oBgQ z({~+tPR4b#@R)MUZT)5GU<39s9~C`$Pa(PmL+vu=FBvSKl&P(V%Yk|{09%QyAb z%bZ^t9WF?MoTUxMwfl)_0mr$mi<4~f)DALcBRwp(+^L)AQp)aSiu6F2okSwNz@7|2 zU;-b4k#1rX6-x6`0XC@e;9Zl@Vzv+N*Q@?rhfGx#!Vz;JZCIGtq_^B5CwE z%AtIuVOf(dn2zq4(B7Tb;gI#(;E8g-T9jLEt~i9bdXIfzpzG~(w;Pu5#Nq5o4-4je zcP$Qg@eHFw`d%i#X>nHLF3G4CQ>hr8IWZ3l9F+5!c#?^)?((Wv!JiceRM{NOq~1$p zYOf2;>Hr$2kw^>NH~JhoJ<7o$pM!#^_dQOBTt&3FbzZp30U&oY@6uGXA=?|_Zon`| zPR7CXp_yLRovP$4>wp|lq3|KoV?y-k^~NSzq<5RE={;zv*h&ES{sIELd02kCTqRLF z6yrV@SZZSWlp%tLk^pM#NhEF9>^LFfn`cCE%_q@zXp^-&gs>Zc<1g}I5I<}le?Etg zJ=IUM;`y#y-q^F5ex^DLh6q&13=9{0?@2s~NVwGA>32PmW?Xx&Z{7&D?-TOD*#p_) zmbJ=jZvcHGd*gxPzRH9sxblX+v>SoP-75YxGqh$>_u+Qgz?j{tVE2>lr_k7p#{+Ze zFO>4_?m;%R;-d+Bn(!zzLGi6uY7d8dBfYiG!V(BWLl6p`ddy*D!~{(`k83#i$(kZvo(dYuf^J=;~=|qFCdDr@g7i-ru%X>uEi(MC| zE2BXetlx!9`uJf(p84+E@t2eq+t39DL7}_)GN+u+lXpF>yOSDsotNMx#HPc0zT&Gz`I($4_E;9|*6Ilv$`nP0otyt%w1*xT3E< zEl#*R52@}AKosEQSdbf0e73nXcphkZCG6WdS(6A45b?HykeiV>JV~Lzl|eZ2y{<=X zgxPCMPhR?amM~8}hFbwpLDhV&;^ALNFb#}SnCB^GXxkWY9TBkf;K3@B`tu1G&v%@b z^YGA2<35Xm!z%<~n>L9LrZmx@$ilgN;m8ag9RuDJXJBD9E~gU@Or7Dq>iGcbur0dRc&3zyy#Ei=F|; z%hY9j-Rm0@Lfnw#P%NDj^js14=(0EMPB7wdd4Tw#QlW-o!G$mDx}xO19^;9(Z;l{o z4xp;?hbiBL@axx4Ie1vE~7L|IO(~f%ERdCVF zTINzG4=mIfSKQ6ZsKtl23c`BX6mK?bQD)x}#$4s0@$1;ukkyMLj2mp)F|tODLZ)Cr z-1yFEX0kkln6n1f5V77|toWFYOHMd~7U>J|r?tStB#I&-3bI^W<&TS~Id4nWs$eiK zApV5HOJbUPjV=rzDzvyA?H3#`FZS%Rk9E#o zZ$L((#XyOZTzPI2CEg2T%}~qdYzN0n%|12taO;u*7QOCylJ!3tNU!UwGI{DHi8DC?IYNU1UDKLa(9e)H`N=I zy!ZeqMUUsb6K5@6;2M;T+>n4apk2!))^dfx;zGv^rN-$Y&0&{JcJ38oBsY(KZk#3V zUIzQjmq3WK=YGlL$-HQQ6UC5g*qr3A=3dVs^H zNV10>yDwk~`pNV=rLyrpjE%97*m|dEK+bQWJvP~m87ms?QXL?bnh_6&?zl+~*Q3fG zj6yx>!W_d@aiV}Vp&&Mi6joCQ1}W?)T?!nXX%>XL#~dmWw@-l^$W!K2$#3k;9TuFe z+@VpQA6xBlsK-DPHoIEo>2-qNriX**bfKYk2r*_SsZ~!@k-ZF>cfG+@(pd(!#(XGO zBel>>Q#qGMEgf=$uzelO1OY&j&lV z`|30>qP6!GH^fUio|mGxl0*q$V#i~%D{}?%m=H587gRmj!mvV2b>Ha>d((+I;{_gY z0_4c1!M$ONeEMKoddxSoD5(I^Fv`|^FlNqRf*)CUo|&L+wFt$#9>8k_7#~)xSW8Qn zI--kiVT_F$H=ems2uBo{$@8(>%49M=aC<5Gde z?6yTq79#4n*t@_U(8cby-Vrj=YgfDB*xp*LbMUvGe_2sj68J_e}3ya^seiH(5?oamOsz^m8!2f2a+dqy7m#8gwbHvLV}mDn>qwr2F2k$cdNtZnh8%a>nU)ANd%!k z6*KKBj8>^^o#s@h5N(i`E4>NtaCuV$E>?!>C_&Sn9_*D)mXLr6-KFI;n{DK>aLb}! zjPlMNpxEF6UYgfEM^-=Nv1kegnO9Q3RNX~XBW!v7m>#<~OVWxCT|~Hu5|U%d9(6{gwevH|fUJAbwmJUpt+mdot&zqwyel{yeN{e>1qf2-=>`FtqM@)I zQANj8kMKL-4lBrKK9Bd+?It{kOQ%udyj9tbwG#ocVC7o# z3gv{`9z#CvL##tJg7Q_<3w8n?+N>j#4JmtcSF<`nxP4Tstufy1?+6EvBdT&dCqspm zU=3aBv~k?ui~*b-7G~9v^NC0Bh;*zIW~Z~2p9&Wk96x?)>ViO3M3LOXG#Bi_8~92& zyHGM)<%UD~@y)@jwG3TU?9v1vj>o_OC__|(f{d0}|nxgEwN({DTW zfL~Paw3f2ji%nE_@zmwWMlf6hJx)nw=|o;{GuBZNM17$CIzZdYbvpo$W_B3WiVmPa zyDnREm+UzR7OmVN6kqIk4U|s?6Hq&mi4=8G?#w|6SNo{aL-LY&QdvNQR>a~NWBw2v zKu@Y{)}*AN(uIp3jaNC~8sJ7u^KIrxRAU?BW1#{bqD@i~rMPlO4V;VXJB3yd|9|-$yhjm&*i;-=XEEV zcB>W|{9dn9WDG0#0sVbMdX$@}JCN;pkJ19M7Ep%yf{y4#4mQA}sz_aSWwVJsA+REG zc~-|3H<@g7tI*Rc(L~LIQ$YY?Oy1eW06K5A=~>Yg5Y>|N9oQB_GUxJ8Y=#B*i;klV zJhj+k4QNJwuP`WHsPF&`^MGD&6v@i%ru2hucsB@~#-@B!b@zkbFMt?@VKem2*1hSFYOQ+Tfw8K_H-&kFrK{Pw~a7f?H)i;~Mv_{65( zy(*>Wkx8DGaa*(Wz+<`CVxkY7&jy>(OWFI@R>9(I8oNpEy(L_IV8BLN%(u9dYSwwD zcoU-Tw0he0%%5|D)V<=KD5p33K>V^u6y&u?0nKBn%n>SAMrKApFtx1B2e?n~87Rjl zNda4=YEA`Hv{;(0bD413((~oq0Yx`{Jsi0Qd5FsTj#BC|F)cD)3!YZok!ccyN1@em zXH38Bt>Jrbe6zsc>btv$z82|}wdQhFBoaOB1Ib8Qzp=naR~Y&%O&nn;kNF|ZO-jIH zxq7eI*SF=wcX0%3cX_lHYR=XNJ-5grWE9@wCSE@o7`9O!PxpF8m)@-$4QQ!!#ElGz zy#1FVo7saCiXOCx$O7fEodMQGr3%l)?KX;Hlw<&#n>56uVm!b`IHQbi*6w;I zZkVaiJ>I>z*&8cbLo4|NgXpJM;6C$q z7Xbw)*G(p&VU${;X$AzEThomJu>hSP<~8f1EFecGtT*}6EBSTk@)@{tHuECr1rs6& zu5p_+zQ8(}3ZYmHQLEEL?9-rERXIfG!CEJd+@Lxxs6>M)NP++qMI1pnKn-(De2_7N^rpHoVi33ebipcw%J$A%;l2IXXF72HlQP5$>$WL;^wXz zr+YkG;sf3SonWFP`;KEUEyrl#plGP%ARznny+Su{2_#xhXI~H$l6aertylNw&hUCH z_l3y5etpwT#yJtodM~aQ?CooCSvYvlF*#k*-^<63g_RLl^+j~209H!6Ha>8n2lECH z6+?RnS8dUxE-N7RSvYI^-6_y&m|r$9#0G|kGTo)iWg zO@jhEB6>J3kY!)vP}=2TC}z)sf}Gmw5&Pp=a8Ve^ELK0VcjA{v5z zqA^BQd$wcX4`01govFypW{E`XaD82(#_pmR^B5{?$)V~ViSp?L>i}g6^AdY7CA&A= zyhlYkB?Ws`E6s~fsOe4{#4|(EA)d%15A0&HMb8P9@j#(7IoPY^3>p;7}9Ug>U3nWl_Ggvw4>jlz)c_SLp z282|6ZzvW$Ya@KuXqKG7rz<<3m7jJM5lE4Ac}6jg&0gL^3BA`*A^7eW>wbD}YIllEnyt2- zH2Y+v{j)T?>>SjF~iNqHsV*_!v-J{k|o)OVyh(Me@h`H`h zG$?(hDzKK&l~+jxz8^-lAq{ibOi+Ed^YAZgZe3>*3r4#wpM+?{y*h>c%LdyW>8 zulR^Q#}1#6Gp%b#$R&uO>Gw>SE^^lp+XWt$$32Mwc<6q-CNwk6#4w%D%9&a662KBy z;3Wo7x|ldwXca!5xlu0Zz|!c6dIa}uE@j?JhyuG#99Nh=LhbifY?8#|3Jz@h)Nr~` zAdq@pK_{b^BE`kZ$)5{>Xi5;=lO<&XlrpO~$01C}hv$WD#N-y**Rx{G zUck&e#FqsMCk1Fq(1f0tG$)O+dY(z_yL8lO*t}lhU7RmOFCW9!^AN?gPJ-Rc=(A9# zDa%)I&x;D{2x=ttb?A0`jq%{ULBbazVXf*;o<39$dNFyI2N7si*s>pjaBd z?`deJs{4Xh8_1@e0fIV@0cbTOpjBlSjv_{3iAT}X!ys*c%?6Lezq?7PG6ZF^hi*8-v+|e~fb#$nL9C<5jsWCWO4zPNDa9dFG`^r`*`8V73n-dc)1+a=qo}`ZBx6dma)K z!_1gMwFP>zVxewll>)kAiSO*{nrG9U35Y~kR(eB=xp`Jf+9w?BQ$FvD5fBugnhCOr zrFqDr;=6hh@$y|;!0{>gL{Bj%1ddd2xv%48^UX=` z*+#A>izO2Z1t3U!{vQ~?F z_vGocGPym0H-N|Q&^>DhQa4ghZeXz2MogsRE>#QipdQpX=z%XNifX_(W;>#pchY3P zejF7F@Br<4gI2(nDul8xB9zP7{=x$u*c}HV*=!sxMKmOV3p61ND!i6` z57!kBtwI=9>4ZyB^!i0;y?LD)^mL)il2c5vC~m<6(CEFoM`a~5mB<0Y=uCA6B;Y54 z57Efr2|f0EbOh6V&p@&K92+5^+jm^`DP0NV=uyJ6y_(E1Q`o%ThIn4dfJQ0MiV|8T znLPHwHNCPc6(?sBDad%xoXJR#w0#;Dx!+b-#}*=_`s*@Fdusu$zKG7wgv@wn&{Zu+ z8ABdJ9I2E!n!%p@W-Rbpq!w>Gd zO_XkB%Bn|^%6aEFLz2(u#ZuNnQ?#dP>6J_gPC?|uWFOksV6LT!M1qELKAlhYo_n(@ zVLcgnNyAT06P+5_C3r=o%PC{OIG9+ za}-6gt7G!eF2!E*J#xKL9}z-qrSILrKQ;9DFTz7lOjl()0Y#2N4*Igk=}0pI5qHGY z=iVUdJbbwHNUV}icxU6;p~t7=Pz z@P)Bem5|CSju+5(*lzoJ?lo7)c;+rdwO?JTJFUKXWbWb!99Y)~csSllx-_t}PM!nO z4=ibpFUOSSg~e9Hy~xY$oxxTrAJRTDXW_&uwKT>y%huAO3pqY`r94@U!sLpy$zcL> z{B{En@DL?4vMTPB>G90QWK{uz@M4V?S5I3IBy&Fk7`IOI293u9^eD&NZCN{Dy?GEI zdt!GU?q7iLDqMVc?Xd|*W3lOv#!WZVxU~DSNMmi;oVt3*0Nv!UR-)j%*!zjw8UU8A z?Vvy0ryQWM&JR$W>WRLUV9Ap{Bi?t=3IWsp97Cf!q+M}4ce}7B&DU=cqZA6Hh1t2X zL$J)8ci_pDo9~N<5~wZiS}ygt>dC{~t!*kd)k@@Bwq01q$x*LI9w20vH#H*5NXm&1 z9fx(^K6_P%SI3x6{bI+R6q$rV6*(Un!8oh)&`Iih`H1c?N5tvUz@`9vX?G;UP#?zD zkr-ZKZ#ciKAUPzm;&7V)*N$dJb& zXV5!q&#cP1Ev5BQhtU>MU>Qio#+rKUQ$9DR&|~tZpe>Fy z7|mA&sTuDaZu<2*gS@JjIEUM;#MRI@9fOYo{mu2=5J2c6UUSl`V(t@!WFC@j>bfT3 z1^0y#3VH*xJm-|5GyyLRDP9>*NVCa2MQ4#Xy*Rl*ftO%_SOEs-8?|yWxDuhnOVv9j znjG2I^^7R=EPL*XYH^uyeS8PReIN))$VjJgV%SWGJ41D;ZGcbV`EMg`& zsFh8v(t3Xb;!{Xd;yV}cVrY78%<)Q<9+AoPEeATc`5B?b5h0hwDv@b#`J2G*ER&{r#21|I{-#nALZdX?276J(r zdy9O|h1Eb;*Y+k~G>zHjF4+F2C)F+9 z`exxmg+70a6H-_)qy;gWZ((Q1-m`*vaj_h5IEtcky(q$~ksM{JFweca^l~qf;Z!sO z(w`()%Ptr}JndcNyQjfPH3%~9(eCM;IM2|Y&A@fT%wXr+4UvJ@=RIi`O9@a+`dTAf z3FQ&7Ox92+3Fo*z7B|wt+H0;R*<&*~cn4^eZ!7LD-isHH>1BxmRX!kotIviU{4ogJ zDfXk1HIKQsPI4pKDz`NNxT~Si=CAbG1*C!7?&G1CE(2o@>46w5M~2bTyA^Hlc)Ac@ z!}W~4M+OWn8_Jzxy8`SSOMuc3pqV=i0_#$`uWu*F|JZX(uAuMFQMc!ee)=$N(c996e@W)jOEe1ZOb@FG-T(YgBS_G zxUSMQwBd3w5D!xY;WJ3#8%N2^*RJU)#rAl znhdf1BwzMM8@7ZNwVCO%d5-vr5`1=I#;jCKG<7hI$HjO#HKtb>rF6$_QPBrTWN<4L57hG zhfzOv2U}q9gh87DXEDLRZM5`}qOoLgGj0~b#{!YNu8K}`ou;H?Or<3j-aPt3aBNDH zVepvM#tmq}8Sxn$anNJTJ@P9PJAi{-P1QDfhvY99OlO}QIO6(ZTq1q-WqLu128c}Et8F&jE(Rl=dp#)J{RfM$gNdyz84QbL0*yY6dw?oWS0ss z_q)lhFzfVnB=mWkAx+`fYdRAzo|;$7fso+5;^bt6*;Wgfz6{8+iSw~r!@|UUudZni z^d7c#!~rfMC}d6Gdoy&GdeE;J-k`J&ub<1~lw3W0?4@pw2Is|T6VfgPEN-B>#DJpX zP!HC?PuRc#k5i5{jc1pL-WB8=u)Xe)c=cG{=7vVLAj20V0zM@*_`uyPr7rKODEOw@ zTXurR7ffU5)_n(l9E^R9w8C;!(Uv2-Rr?B1`4zi%o{RKrqZ4mjUZTKB-#1DVx+pi! zA(Siq2m`SIaufu5Eb}I_qu^92S2K%6Fa}O2Fon^;hAn`2jZ$+QB$p%{Li51_6lR;VP&{P2r*A!;u{?ye;o-gz9I5mJsgCviy+~^AV%=S#Ry6HG-036ec zl+^*qCxzUpPmem#MFJ89m&tqEP`SpCq)s8P%rl`G*>9^9v;`8FgA_tuV~sYeumdAJ zc}OVBVtI3kE}}A_rS%Asi*#Y?9!gH;k&Rrj zF=AT3vn{=~Ft)&*Lw3a@NPgazaS6V(&CQ3cEqyN%bj*w;a=Xc*4sAVBxJ>A}j-1s= zdN(eexa$Syl16w?PTQ=~gdU!CDHz4tsJ)Ca0P~9gy59`uIBkow@#C#2kBS5{eeQOY z-0NlO6bE;+`_SK;xp--`E{FK$r7k>beE^~xw!|dejrP{m6GJKm4jy?9B^%#Nj&6@s zz|)5YGw{@(o<{e}UL2dqtc*i$qt-*@9a%XOwNQ()eB15F^r|Z(sClt7N2hEez)N(I zfp-NQlredp;;=i5B~IGN;z87HOg&oa;o3*#m(Cpoh#@{=;+~-6b4U`Pn1D!#FNCyrK&0hDDE3Manjo`j$# z*}(F$b#NDYRQ4gVt2F1Fd4D?@CJaN)d5Ryy8e=3j6@G>zb4N$>su|Jjejldskj3sIr64NUsf- zG&#~pk&R+AY1@fGqp%nbNuXa*mb)~(R)&f*b={GRkY~%-*JGi#+)uOvjnK8DaJx5r2y7H-(nLfQW|~R50RnibfImFZCY zXVJCC^d3B3HG2v3%hmKL!;l!0zCE}HUESkyC|ME;4c(J+$dz#LLfqsfaPV*m?CVx* z5ysHE2E|iBW@vSMeL@&YaY1ux{KbDETzsX~6Nn{NP;5>C0EbjPmefGj;!j&N&I za@#kAlV&@Oc^OJ?%y!@Kj&Q`Q5#)d*o-wBBZBd&Iu@MoF!SNZUS|W&hlB@Vk9#Bmf z(-AzYIN~CnG6C*4c7vG_6{Ks=`C^~Exwk|TH5+3hv@!XBdwZri|DM)W&ad18~gK>~NcTiFPb=hLzezO;@0R zGF}Oz>GQsD9l0P3Bci%387oa|b0q^S+Xi||C#|nj6JC%L5;Cklws3yOed;Nk$hnY3 zBINKS9vqaOf{2xl8_=%W?nWB#1q=a zM5z}V=j;(mHncBgM0Rl9L@|XmUs_`TfL9;Y_9NOFoDq5a#x9#u-fIQa)u#rhx>NcB z^40;BU|)4-6$>m6b4HDy(r^K9+;hm65caa(3la>_lcP6l$gZ}2!1GMm{a&W5z9}yy zer^n$M)oMHY*8M9rA{gwwC!bv(3F8vctSYI%FaYk(3B+VyQLN77fvD9levCrYZ>5J zzQ1m6#4^#sFw@iNx3NTIzG)bH<}d^#9tWx9j3pyuysi7Hz^Fu?TPkBzYo19A(s{d^ zG}&u>L-EGWLBSc{R%E<1S75J;)B_sCWbM6Bvs zPxLX{@!{cYDV?%_Z!HG9Owm@jf*rNnY${wP5fZ``SBeqzGCW?e;zHu*%J!mnS z)IrgM@j`{79OwM?pzI7keV|858Ov@Rms3?S6)@@d#%P0~1tqQcF2R^-NIDbRV>eb1 zN3~!3?K`;pM#ZPXRJQ32!mj-8`$lWq>JZ_sgaiIuf<`VG#KaM z1VmC^<3WYeVy9$b(Bk976q>^pH#S&t$1CB<#e#8$gZ7KT0&ZogL||+ipSt&eAoWou zijo&iM80N#JJX{l4=nQB=M~wJ?5Z~AuP*s~XvjUA1uAUh;4GGf(L_kJs4zuC3mzRs zmSxF3pW_EFNUOsrsDi2kr3)oCvBnaTk-NAoF`M$#Phcz#I@$<%P2gaACW5CtS`y%BB>r6BxbbO{uRxD8%C^u|$jU0QFu?$knkehfk=3Ec$h|y}>MbbK7z!U13fambGe&t5# zetF88drIn@RTNd>nG&95YS}y&+H*=cAOO{h)BfJQ79LYP=y z%7CJ0DEXBF5=E_Y)g%|4fkzm?)uhNEk)15jNde4-t#&^2dHA->aQpUIJ(cub&yI;B zuDoZ_97X6`SIJ?$W{;?|IRjbuWX>u`?WG7MZL?0^df5ftBSOWiz?16qnBtLp&F>Qy zq@R0*{^lcMuJ)lcBRYj=N5|%2A`@K}ch69$gR;pheX<$MMtCN}p3er@Moribc?Rw> zMJ{DuoaMow;<#*H^F4S`hSq_;Suq4GOz)RUX((Rm@SvfR7c8nnN~+-+m^0#4aB=rg zjq1Q=l}uGu0FS-}FnP^O1rRu9RDImY#bh=~dw2f0ur2s3pW@x@iw8pk zl?NN3NUouMD`iWs6yR9qC^IR7-cj0INGV*p;mh|7uk$Dnd#wbq@K0Y6kjT+oR;e8B zegXDy<1Vn9#A6=>7g#`qL`S0B0YWQ+KR!#29O`_o3?3VlV_?!4sz8(_vhkGMaArh< zX+-@NTi~d-%mcxZ+pOPvyrL6G^SY+l zH`M@UVSzx4Ba+W35#G9nR; zi>JdUnmObfcvtcw*&p0f7?YPlW3^({?@S)?rJ`ip%iA`^Sh%NPIG)r2hga;%VIz64 z%W@L~#hI{R53pk8p4l1=`_9#w%z4vcDoGlV1S(2Xyk0URcfonN&epmWSu%msleQrQ zjNRG??_gDd*K@^0XXUxF+-Ac1Vaw@33uTqPa~`0uq)5A@w)JXz^~e^+%drD@&==wv zbv`Il8sajGDvJqeGLFnNpjn|CyS^gDeM5@`>X%>&4;#s-H)1AX=7Mlu?F@t@UC1&f z6hw;-;1jwRB2Mh1=Zpa!&Px!QmN2QaW+_wnIFtG9GzH_+ey3JgoG`nexZrQCO`}iP zorLEImd;I5edqd!VVzXd7qiqZMq1c4L;b~EHi-H20DtvTAEGVxF?umoz(P*DQ8@?*s~9V}?6Bb-D<70ML2QaPv{%m_mnuAxv=^Ts(Yt zze?c6w8`s5ceU~yX!bd0)jfNJYuTc9uq6T!tM6@&N`| zb>;F&E4!-JwOz`j+gig6JL-!!H=^~bi=2w?!Vq1`9y7s(gdPb*o7$p;8!2+(@HWe( zi|h8Ihh>4HU@j|5y+}E-z9<4oF91!l?~QrSpfzkhkR5BJ(7KogXGZl(O4Z^nzYiLXn)pNRM0;1a*jmVJD)f%z z!m$aTQS>s&Sq=@D%k(Jwz7z^9WqFcrE=LQ>tgY3!5Os;s)+lX7ZiS$@wNAIXG z3=szBd-&?5oBA5D0lyN3Q!n24mfwz_V3U){#U)E4Q3ed39$JsoVNAy&kj$B%Er1y# z)(za`qg9h2_q=9dm%3-zep7^+^vc7VMyDH$yeJcxvIm|0%35z-)0wYLIfRDdiH1B_ z^Gn3)(Skgx^|k5(06kH6bdR3dXKrw79cw@*R@${NO7`y2d-Vwn`4rr5r=P#O3aBtd zp8?8DphA`-r2tv#nl}R5qw$KY4gk=lRd8s-p0)-Inb9^byC5vTLNk5^1!T51tK}!M z0O`q7Yz)1tQ{V*hVg^Ie1aKNRNFtM}57`#dv?$AX+(7}Z)YD`afNxYHJ8Z5!;Ucq)Uc^U+%U0!>n*rR~w*K*r8 zLgjlQzK5_23SM^774ax148*rV%>cTMweeXh?$Te#7;ZcY2p@b@Fyk)3cbdhIm+)lB zp6=DT3P7(BRIb~$8g|&3`h^6wGh&G?ffjU8@DY`(a!}PZ9e7;wx`~Tl^*ZuwRe)UD z<2ajIBRe^@Cw->#QlJQ9LhE`&7ASS&Q2_$b>PVQTYNkAaCbEv^W_T4w)PDNZJgy$y zAuw@p8im3O>kLGgN3(}xuN3gK`Sm#kIRF(Ux%SKsr$IdqlMZnS^WYoZ*Nm^Hb?I%| zQTXF`&cnwwtTDThgHVRHmHG4@JHE%qh9nQ;5k8A1bu|-AHqBGW<~%gx)u8Kb$Q^VNQS zbL7(=5Zs2b8UmhYS-U*QKnsTy#IE#`#lSP`*)Rj0jFd|{X4~7@*vIULS8ih1ouq^{ z85aZ7pa1{`!~$1s6P{;$P>`NjHM;}Y+)e-mvLJnyXa2YztEz4ZA;$G(4z*?O-oi6Z zk*o&HI!JQ#Sk@lu6+K9v@{Qu~-Bw?qDZA-Fvqv!3_KqbP(?I$79ko15Sz)7kr#{$T z845L2EMEI+?=|V8B2U$)y4jo59n=uVeWFUw)_j|ySA=odk>Bl+L!O}GPPCFezCJF&qshM~yM1fLWzK%QE z+0zK2uk6kr8=tf6mTQJBI7PGEM2ejOY#)Rs|_H)q?&FCytg(nE3_OU zPeh>tygeRML_ogIrxq?_pN=0HOx zD#)B0he!BKMWGy6e8vns$`Y%(V{N(O+;OuLc6xSgR&^>XRZi&ODAiWb|deG^jCQRm~;qYeU7`8 zxEocVnWK2L@cKBV%IYERW3ruxOm?~EWPZ;J8J){!Z5`HGoGjU?p>@vp5vki59$Xm9 zhzlbg0+DD!v+>0vXP{*Wg;Ppmbc~=Uo4UR^N!Y<>-gj6|`Sy~R16US%iBnn;o=^lm z@3FH;T<*4b%w`}PYgd4F(Hb;1i9A*tRrF?JBh_Bu0ZrKD+lqWv7PSLdTQ<9l zF40J!K`|l)OBge6VFB|Tsz#Bq#c)# zlbpSP`mD${cVCX~u#qm_gM{#z+*W+Ki}r#EX%dx%#saX<;o4q=z6YAa{-6NX_h2Hz z#hl&~T`6;3Pmj5oAwOM|S#V5$i;*uuhS9#E;h%d(XfWHL2?yNEj4BWTcSmz zJ_;dFx86|evro96)*Hf#<=JTUj-2CI-i^Y3E)Z(>;$*;$oSll%szU3f=s|mSD!meU zAkh+;O{aKPEFG$4aySu$cf1nHpk&^=fthJflhV0~j%=f+R-cv_OG1Hya(eoRu2AM7 zym>SebC7g%mP#%Vsiv4jm;5e$rp6Z)B{~SeOUP6Jqv>Gd5DJ-lU$)rE*bBSp2#)1D z^)eWuI5z?8brC4Dcl25(J?Hg41V|4VFYrs;&9NraVlaa+pcaG@IJY%grt>voKer~Q zm%_I9s#Yu_Yo|{ZkL+za#3MQY{qsDfh$y-5Mwp+Q4fs-|*qjfi9HNa_KXq5M^hxCl z_IHwwwlFlej2V4LgvTRGk71I0naav7p4^hT;OlWhd)L(cC^S8L>I4uv#Z-aBvCign zOkVIKgD8S2M}1Ins!)>YZ&N#5MJJyX8^ol`kn%R5^xMpPnX{9xfuV9P3-zpDF2$rK zY9NRaU|G|X=gvK`3l^*HVxvQ4$LAuV$S?)O4-aC)%ORcL6M!U+l7{iSdcs7>_^?d< zoeP=(1i{KZQ*Cu~RuSqr(^>Lg$l;j|<$D@hA!b}7TgGn&e z@FE8Xb17WQP_+ZWv*yCLnvdX?v6Bbv9qIFVVE%e+mNz_K%z@MG3DC0cUYgr^!A9X) zyK@D;7iL1Q#_&oBA2fmniVEsHm0J>tLJyS<^@^%dA$E3EXIs;!Z=kz&{R#*JFk0^0^4EUhS$o14n^G~qxLOiZVTLdWqH(3Of{Dm*9?RjJ3GE1M+u$gU=4jfs!kN^*B3{$$W8jfw7Q0QfP?< zdgx8e3&w)3tSNy>-+Y3EEU4U+rPVc0u$g_{aDiL4SW5JJE+fi^I>|e-3bqp#5)WDL z3ijw|iomak-<)hoJWlo}dTmO`n9tmi+u!IYv5T3oxIr$ZQ`;lRhLhhAvo&c@)`S;r{FQ0z&@n(tG~cORY!1n@fbUn_SyDpw5-ROq(&A&hVr~CJawW1 zSA;}J9Ry^oMQrZLIA92?6A(U>0o3%gVUL;ecJ~kU~}@KgA2NmfrSZB|?*q zNB{|#sc@UOKu_2vqBXGSHNG*5XZ%uf*#@X2!Bd!py=t4KLL6qXx63LHG~E?>G7)v` z0ZjWk?onGy+AqM6XkMz`)g{TIs^3mD0?VY?Ts%=e1#lZQI>RzKV5rt^>*xm zgxa+q_CsImtQ7JWq{m$~XUCZN@=;VD=`D(vjTzSV{UT;EvkA|bCpainA9Ycc38TlA ztockZ8aCEw4u#N6>hNR_mDrFKmeFOwyv~G@qSJgGM~{yyr~_XFuE4q5tA3^|7#eqE zk-68@hA@kY&yh422A}#MXjSja>?E#u{ExVFQ>_Ap$tVE{*@Vyly&$GR@TE9qK1Qo$D%e}gb zsg2l?tit!e`pD0&qS9>4^b+Q%F0((u;h!R0%N2&go+NT{c$FcGaq*CKni~U~`YE@v z7n)#^Pwyxo)idr!+Vp)(u&JS-~o%GxvPj|sFN3B%J01lM7OE8cbgm|0}_hxMlouh zrlMBeq!WpNTop_}aihavJ|d|=A>Ut2#D(&+fZ`Ro=q$K>HS>aG0B~0sBP&Z-%pC@% z?jG#8i68faTa(x;B4q4?^?2fT>*%&})nevawqD?&_&P}E8fc#FRjswF(V>FY2#(y5 z>v39^`p!ulxoqJJJVdKoL!*d4(edu8LFon7ybiJHX;Y;)gVhUD*V)4Hp3gHI$I`6I z03IeH*mH1PR35+1lLw2h$h{d*{e`5`kV=WAY4C6hIKGN#*9B5UO|S!0E(3QVQ}^M@ z!-eZ<=hjJ|c;>;Xsi23uPkN0NFIDD%Yqt7vQgUTe!7J8iUzZb_V8^L_%+qS+)$81C zXo5(?WAzwG0XLAL%;8O38NMLjXaVBhO%p0>vj(rm1{y)!f#MMWtI8(dH7zwMqug_S z)EV6)7W=$%Z8W6z$<>q^9Wh{TU?)7L&%3xys{qPmeuP-y;JC$w?YgcMY5J0~RqOZ9#jIo$!{+^fY7Lr*-6w zq>_VHCDy3hWwyLGR#K2Y3P#UKB|vn|nI8Cin`fY^=#X<$`P8I%bM&;{oha4L4u4@yYnB*W`ydfna8Q)~})M&9P^(kW$XCul24 z1jKt0G6hu;$=O_X^s60c93g=!cy*2RQjS_wJ+z0ZpKwtJDC(PtZ0~_i}0{Cm9a6ARFc{H-XvWt7~IbC@vnP(uk zOfaK9OM9=MzdM$foUYJ4C^E~@=@2Gr*y^U4Hx`~lz=^8g})I}C(;IlU;1k(`_8-j3M z@N!QKi(1y6__$6x?37R$Dl;NftiOH~cW8U^$Qf4icqD8bd3a^}ZH6xkHVC0Jypg)o z)fFRA$x?;|sMGT%Wi2{YQDAF@9VQCOvwP!n9w-T?$2=Oh7&Ie_c1ux{O1?ZZX@&)B zhJ>C%IomaL2R7QbnZW5}K-{_sYtVlEmhB@l?;L>@XiVoTV^G&$u(5DigsHJiu4*(R0ARekFawT zg71Z%Nvw<4S!TTOddFucO{mt$vW-uh;dEuX$F2Rb2Ls%BMOhTavONh^^xdJW)>{#l zDte|nhR})6llomoIpL*VQsGl_Xs+COG=Z*{?F1fp;q5pKm?}t%6sN*^pt7E)iy0E~ z&=a1Fzj$bRp52_CyGq~i?Uz7%+lQ^rAfxT@wdo38Dg81Wvzk$$LV00oqFhj zC9G1`teqK(ue-oaNTav8`eGt#NfszqtevhM0w`~vY~P~+)IJT5osJ*&$BKGNI8!*I99l^`daSbUclBsK(0Trke*H~K8Os9 z_PKtT?BglaQ}wiYO4zPJ_x4@5z|K5%Nmj)!4#Y#1;ZoXrQf>noIy85h1tuO5ne&hb ztcDMXQjPd#ndO#^4Y4#b{A}N!_+ZCA(|tRbxMd6i8%i&!dO48hjiBxmbkuw!VxWXP z%1#v`_Xf_8gaZ+N7J6h4-3NpA2xkwgW&@STdBYbjS>RCrgWI zfj+pi@6v9%D)-TyNlv{QcX?L5dhl+~2u;|{9Pepc+tdv(GL^w7=|jsp+tVQB^0MZj zR%YTE>@In<($($w5L5Z~GLG?XihN>xWFcb(Yssg32yN zMWE>zzSoOsO2m>7m|Na5=UHSuJfTgGV13is8s~Yq4pI1QI^1jareX$_$JN6wFbz*1 z78_*S+%vK_A^x6)tmhlY%ly;RJ;34iqS0Zxw-dSF8L?#tA(}LB|_{D8FJfueBA)AV; z_U$3`?gyqGrkeCjk$1EJs~{y`AMi;D?@U;&y_NG|dcNzL*fH;}R61_pHo4MO1~&On zXHTscM~5GTG`6!lnI_Xy3cV#}XJ->ep?bp^+{~|Ltxs*eB7!%>hq}>53T`a<9uPd< ze$iXUP{MI9NV~N$G|Oq*o5f|GFX7PkNtHy_<9gj1;47F>JyO+|@fzML7I>#EW#*1i zvp_(!SJV{0*PR9F+LF@L2&u${M)bb%@u|t3@`#^BcL@{R);3r=hQJUUm+G;zKh4`U z7CivN6kM!o5iuy~wr#y2h(@cE+MBV~O5ITJd+7O!xJLToS(dXdc`{Uuqe*Nm5jg^C zn#gIy9LcBxfR${{y?D2KWh4)5>kLY2(46?JE@4r-9!3u5$>S{CBqQQXRVG;(3cH2` zm1e}P>ZR9@p#s=+2&~uPZTo9%j%uuJ_69ZCeyOcTJG4^9I7mzeAqLi>vIyh(pu8Wj z`dtX}^$`|7jvjy0yW$P`@`SuzV7e_)z!(_@CKx-X3>{s0`=RuL=*r6ZZLKU`NsJmp zIAnr$dO|-qF?_{6D+Ba4UY5r}F8&D+ix+JJ%~N)#pX4QHZGoDEgM7x9=j z+XDem24Py!pS(01uK;L9#=d*3*bLqn73PB_z8c^t4GmTSh-Arfz>-zt!q2l$kP(n% z>lr@ZR(k^aRAq2AwR|gw%5g}|pr8ktfGVEyJek`?u4fj$?-gsB=)F3I3i{v$JkDF#gR~=aIui-A^Fr8d70;}GP;VF3hjP|v zU)q{4_ck>|P7-)LwpQ~;E)E`;ldLcblP@fJ%VsLo2GRE68Y@<>^|gr+$MdB7kRG6upcYQo!Q#6?;)@)Y za^bw`UgXA>H&BPWxbhO;%_u3oi8(CuHIFDDc{b@1hL3Zp-NII#9+C@qmIr9mX}pY; zL3o`H94xs;fM~4FI?yA0XynynC})nrR}630PM{v*N!VcxMQJ3@T_mhdEwN=Ik`*c8 z49bN)w3NDitzuD4&3tYiv-4IJ#T33g;@t5M!RjFuw8(S9cvp zI8e(Yj&240KM{2s?5AiAq$tD#)6a5pHW7+Nzn^$c3H*q$J}5c_gCQt?*o;Cl&Jw8@eqiy1HG z6h|74)@<>nNwk#QDHS%lDz0e(`W?NgVGWgVr_+E|v$sTYidIc@s5p8W&QIPhzSUO> z0NL^g7dom{YTu*rxe0Co;6%W@xY36Ddj|o`=V8~W{!)Wu+%G7$FPM;FJNaF}xXzm< zz`f1i3mPcGM_s44@i7q}v{afJ-;9nP|DK_6Otg!Y#R6R z05{h<$spq0#pH=1)(jD&2%55vwlwM0cyog2p#)B4=B`4a0_ZG@Z+T|cACMGKYBOnA zPrbsH=B>vA!_L`-Aj0tiDy~P`i+2f!w}fB54ka63Vs$I@tT%n}(x1B7#i6NgS2HcS z#j;~_X-))->mh=p=Y!G zpn25>!pR9$?}c0D>r{KI(XWiCUEyB5=6mQOR#x$_nbb-V7sqZ1`dE0k4k7|tvd4-k z(KzERl#ips+%Uvbe8zVWCjv-mC;s@&3nJjBH9WrOq_RBg${Nk2*KpHRF21|8FZ%81 zslZcBE8=+xD?3cS3LZEY?^Y`WCZsCj0*Luq_M{$y#si6sbBScrq`c!QHb0q32Ys=R zG;hfGEy=M0Yt5m&Cjp|boyeNvhP?d5dS^H={C;l>K_a;3ftnLWB9uDV$zHCmnE?DN={(wNxMKLrGjXfEkj0di)CkoZ3(HU%dm^@nX6lg_C~&yTc1{pm zKPH_!dToYBoaEw`%;~RXXpk&{I`i48w%_Sr5b-t9g4jj1n5z)Wt(Fq$?qo*J%@Kbu z5EK{^Q~cP}=P@R@MpD|SznLJrLLzymB-=D%PR<&YniZ5W?Yc)2enJb(w;4`=VVDp7 zCEAmwS10n~dVy7?<6h%48Fm(?1@aB$agoId45v-vvr%1)eO7uB^=Lx_`UNsQ3qrM& z^^9W9t=O(i0obRg+4S&Ba%a@OWrYx#7l&tJl*3PNU0ORf5N|9yt26 z#R0m|?K&2*JcOQmq1=>D5`uVpwUFRN$uG9ep~odzSC~WS`9WnWfq7A|qdjISv2ig> ze{E>V6O?)rIZ26o&3L3?3NY}lOGI`LM^Il8>v+7)YK13Qz#bOIvE(8*k{t3N6$T=9 z-bqTkDZ5GX=Htg;E?P28L6x|MH^41~j$$t-U;&+jc&t8lnSpl;GB4? z;_1b$+^2?O*`P)!QL!&$GbXt8idJeuX|6Meo%=v*O_uIaB0jhBSFMU(x|60bF9ydM zpc%KR_~qkmTrj9!*lnBE*2CDBT4|SA_GTQ9(2*0pH=NrtS0_3%9e~go{Y2fcy>Hyi zFViN6_^ER&LIL9_25d14Bb=Npn}WJn+)j&Yd_siY_aO__?t~1uDHCJkiU>$3Orlw5 z7y?t?Om5`3GI(haNqDRxBUeAj;sJaQmgF(+opi{11r}E+VQSnyhI-44E2@Z4`VfV% zxBL7CwAxn?2f$ob%-nY(v89dV@c}x)yhUX1cmq2fhDb|gB|QWj`Ydb8i(-aHJz!xs zFoG^#0g-UJpS&m7!n(>#Jkd19!hDaef>!d)JG?>>mf}TH>o6iIc!9y(UPh2Z!{+gf zE}5^w$2>`!noK!sH``h?8kZ?a-b$H4lK`I^1k5g&BsRB8LsRte!zh;6Q=`YtP?o)w-)<};FgGz8#&mp~Oe1+PL$*dQwofjnGBIs2?89CfLr?zw3f;(PQGwwO&0=Ppj#kS6Yi zyUQb26zrD72i7MH%%qC6-d~8>l#t~~HyHXlM z2v1gNfSaT(ygKpe8&RW-J_v%69FH1yk%(z@mgkdf0SE3|g9fD}6XK}!=G_HMS*S|y zFadbvA$8ot(_yA`bWlFD+>VVF76ZUbpVqv}sU;*oavzU@)qC$$)$obbLoneTrE#2I zlU_gcC?Q@QEfR`M!F96cHy3jfZMcetJsj|Ev8Ii#&`e6&gcEmo(~c12T9|f$(I@AL z9-SikrWqn9YNL=y@pH4psh2hfiz2x=t_E4XO)PM{Z@0tA&qMgpp3*!;B(Kbqi9_Hn z$q>s;(1wcgHZK@vw9siFftk%J^CT)gyjb;}r9lU2E8EvM!V8z9?_j)BEzM+_g*7iA z^xjUrhg1BVX;kQ$D?WeI!J1GBn24^F^#cf)d4Je zmR9p23sTmzfkyL0_fllSwG(~RrCN`O)l7z5OJKuab5KszwQxmJ4>^%PB27%FhXRR0 z;zy(dl#70<+9pOL(tLxC{bIHIW|cNc4suT1rLQ4rob%ONo_WlnQ|7U%ga*B5Y~h($ z)x%=E3029!9%IV&&M9+RwUE6yp(o_AHa;71!{$R|bPY(EIl#x7&<@Ge2JowglTu9I zB;%7BL`px7-A6kFdYLVzO;^ykPgD)Z(XO8mr@WJ5inF);c$Q3$TU5H!AA!xpp6R5N zHE-F13))+H?qCgX0P(Jl$Xv*LET*68X-chD!E(mi-mTpFzZd!U|KI=o^Y8aRCpfy) literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go new file mode 100644 index 0000000..46a0d63 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!386,!ppc64le appengine + +package sha3 + +var ( + xorIn = xorInGeneric + copyOut = copyOutGeneric + xorInUnaligned = xorInGeneric + copyOutUnaligned = copyOutGeneric +) + +const xorImplementationUnaligned = "generic" diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go new file mode 100644 index 0000000..fd35f02 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import "encoding/binary" + +// xorInGeneric xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorInGeneric(d *state, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOutGeneric copies ulint64s to a byte buffer. +func copyOutGeneric(d *state, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go new file mode 100644 index 0000000..929a486 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -0,0 +1,58 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 386 ppc64le +// +build !appengine + +package sha3 + +import "unsafe" + +func xorInUnaligned(d *state, buf []byte) { + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0])) + n := len(buf) + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOutUnaligned(d *state, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} + +var ( + xorIn = xorInUnaligned + copyOut = copyOutUnaligned +) + +const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go new file mode 100644 index 0000000..acb5ad8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -0,0 +1,683 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package agent implements the ssh-agent protocol, and provides both +// a client and a server. The client can talk to a standard ssh-agent +// that uses UNIX sockets, and one could implement an alternative +// ssh-agent process using the sample server. +// +// References: +// [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD +package agent // import "golang.org/x/crypto/ssh/agent" + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "sync" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" +) + +// Agent represents the capabilities of an ssh-agent. +type Agent interface { + // List returns the identities known to the agent. + List() ([]*Key, error) + + // Sign has the agent sign the data using a protocol 2 key as defined + // in [PROTOCOL.agent] section 2.6.2. + Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) + + // Add adds a private key to the agent. + Add(key AddedKey) error + + // Remove removes all identities with the given public key. + Remove(key ssh.PublicKey) error + + // RemoveAll removes all identities. + RemoveAll() error + + // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. + Lock(passphrase []byte) error + + // Unlock undoes the effect of Lock + Unlock(passphrase []byte) error + + // Signers returns signers for all the known keys. + Signers() ([]ssh.Signer, error) +} + +// ConstraintExtension describes an optional constraint defined by users. +type ConstraintExtension struct { + // ExtensionName consist of a UTF-8 string suffixed by the + // implementation domain following the naming scheme defined + // in Section 4.2 of [RFC4251], e.g. "foo@example.com". + ExtensionName string + // ExtensionDetails contains the actual content of the extended + // constraint. + ExtensionDetails []byte +} + +// AddedKey describes an SSH key to be added to an Agent. +type AddedKey struct { + // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or + // *ecdsa.PrivateKey, which will be inserted into the agent. + PrivateKey interface{} + // Certificate, if not nil, is communicated to the agent and will be + // stored with the key. + Certificate *ssh.Certificate + // Comment is an optional, free-form string. + Comment string + // LifetimeSecs, if not zero, is the number of seconds that the + // agent will store the key for. + LifetimeSecs uint32 + // ConfirmBeforeUse, if true, requests that the agent confirm with the + // user before each use of this key. + ConfirmBeforeUse bool + // ConstraintExtensions are the experimental or private-use constraints + // defined by users. + ConstraintExtensions []ConstraintExtension +} + +// See [PROTOCOL.agent], section 3. +const ( + agentRequestV1Identities = 1 + agentRemoveAllV1Identities = 9 + + // 3.2 Requests from client to agent for protocol 2 key operations + agentAddIdentity = 17 + agentRemoveIdentity = 18 + agentRemoveAllIdentities = 19 + agentAddIDConstrained = 25 + + // 3.3 Key-type independent requests from client to agent + agentAddSmartcardKey = 20 + agentRemoveSmartcardKey = 21 + agentLock = 22 + agentUnlock = 23 + agentAddSmartcardKeyConstrained = 26 + + // 3.7 Key constraint identifiers + agentConstrainLifetime = 1 + agentConstrainConfirm = 2 + agentConstrainExtension = 3 +) + +// maxAgentResponseBytes is the maximum agent reply size that is accepted. This +// is a sanity check, not a limit in the spec. +const maxAgentResponseBytes = 16 << 20 + +// Agent messages: +// These structures mirror the wire format of the corresponding ssh agent +// messages found in [PROTOCOL.agent]. + +// 3.4 Generic replies from agent to client +const agentFailure = 5 + +type failureAgentMsg struct{} + +const agentSuccess = 6 + +type successAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentRequestIdentities = 11 + +type requestIdentitiesAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentIdentitiesAnswer = 12 + +type identitiesAnswerAgentMsg struct { + NumKeys uint32 `sshtype:"12"` + Keys []byte `ssh:"rest"` +} + +// See [PROTOCOL.agent], section 2.6.2. +const agentSignRequest = 13 + +type signRequestAgentMsg struct { + KeyBlob []byte `sshtype:"13"` + Data []byte + Flags uint32 +} + +// See [PROTOCOL.agent], section 2.6.2. + +// 3.6 Replies from agent to client for protocol 2 key operations +const agentSignResponse = 14 + +type signResponseAgentMsg struct { + SigBlob []byte `sshtype:"14"` +} + +type publicKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +// 3.7 Key constraint identifiers +type constrainLifetimeAgentMsg struct { + LifetimeSecs uint32 `sshtype:"1"` +} + +type constrainExtensionAgentMsg struct { + ExtensionName string `sshtype:"3"` + ExtensionDetails []byte + + // Rest is a field used for parsing, not part of message + Rest []byte `ssh:"rest"` +} + +// Key represents a protocol 2 public key as defined in +// [PROTOCOL.agent], section 2.5.2. +type Key struct { + Format string + Blob []byte + Comment string +} + +func clientErr(err error) error { + return fmt.Errorf("agent: client error: %v", err) +} + +// String returns the storage form of an agent key with the format, base64 +// encoded serialized key, and the comment if it is not empty. +func (k *Key) String() string { + s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) + + if k.Comment != "" { + s += " " + k.Comment + } + + return s +} + +// Type returns the public key type. +func (k *Key) Type() string { + return k.Format +} + +// Marshal returns key blob to satisfy the ssh.PublicKey interface. +func (k *Key) Marshal() []byte { + return k.Blob +} + +// Verify satisfies the ssh.PublicKey interface. +func (k *Key) Verify(data []byte, sig *ssh.Signature) error { + pubKey, err := ssh.ParsePublicKey(k.Blob) + if err != nil { + return fmt.Errorf("agent: bad public key: %v", err) + } + return pubKey.Verify(data, sig) +} + +type wireKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +func parseKey(in []byte) (out *Key, rest []byte, err error) { + var record struct { + Blob []byte + Comment string + Rest []byte `ssh:"rest"` + } + + if err := ssh.Unmarshal(in, &record); err != nil { + return nil, nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(record.Blob, &wk); err != nil { + return nil, nil, err + } + + return &Key{ + Format: wk.Format, + Blob: record.Blob, + Comment: record.Comment, + }, record.Rest, nil +} + +// client is a client for an ssh-agent process. +type client struct { + // conn is typically a *net.UnixConn + conn io.ReadWriter + // mu is used to prevent concurrent access to the agent + mu sync.Mutex +} + +// NewClient returns an Agent that talks to an ssh-agent process over +// the given connection. +func NewClient(rw io.ReadWriter) Agent { + return &client{conn: rw} +} + +// call sends an RPC to the agent. On success, the reply is +// unmarshaled into reply and replyType is set to the first byte of +// the reply, which contains the type of the message. +func (c *client) call(req []byte) (reply interface{}, err error) { + c.mu.Lock() + defer c.mu.Unlock() + + msg := make([]byte, 4+len(req)) + binary.BigEndian.PutUint32(msg, uint32(len(req))) + copy(msg[4:], req) + if _, err = c.conn.Write(msg); err != nil { + return nil, clientErr(err) + } + + var respSizeBuf [4]byte + if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { + return nil, clientErr(err) + } + respSize := binary.BigEndian.Uint32(respSizeBuf[:]) + if respSize > maxAgentResponseBytes { + return nil, clientErr(err) + } + + buf := make([]byte, respSize) + if _, err = io.ReadFull(c.conn, buf); err != nil { + return nil, clientErr(err) + } + reply, err = unmarshal(buf) + if err != nil { + return nil, clientErr(err) + } + return reply, err +} + +func (c *client) simpleCall(req []byte) error { + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +func (c *client) RemoveAll() error { + return c.simpleCall([]byte{agentRemoveAllIdentities}) +} + +func (c *client) Remove(key ssh.PublicKey) error { + req := ssh.Marshal(&agentRemoveIdentityMsg{ + KeyBlob: key.Marshal(), + }) + return c.simpleCall(req) +} + +func (c *client) Lock(passphrase []byte) error { + req := ssh.Marshal(&agentLockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +func (c *client) Unlock(passphrase []byte) error { + req := ssh.Marshal(&agentUnlockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +// List returns the identities known to the agent. +func (c *client) List() ([]*Key, error) { + // see [PROTOCOL.agent] section 2.5.2. + req := []byte{agentRequestIdentities} + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *identitiesAnswerAgentMsg: + if msg.NumKeys > maxAgentResponseBytes/8 { + return nil, errors.New("agent: too many keys in agent reply") + } + keys := make([]*Key, msg.NumKeys) + data := msg.Keys + for i := uint32(0); i < msg.NumKeys; i++ { + var key *Key + var err error + if key, data, err = parseKey(data); err != nil { + return nil, err + } + keys[i] = key + } + return keys, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to list keys") + } + panic("unreachable") +} + +// Sign has the agent sign the data using a protocol 2 key as defined +// in [PROTOCOL.agent] section 2.6.2. +func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + req := ssh.Marshal(signRequestAgentMsg{ + KeyBlob: key.Marshal(), + Data: data, + }) + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *signResponseAgentMsg: + var sig ssh.Signature + if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { + return nil, err + } + + return &sig, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to sign challenge") + } + panic("unreachable") +} + +// unmarshal parses an agent message in packet, returning the parsed +// form and the message type of packet. +func unmarshal(packet []byte) (interface{}, error) { + if len(packet) < 1 { + return nil, errors.New("agent: empty packet") + } + var msg interface{} + switch packet[0] { + case agentFailure: + return new(failureAgentMsg), nil + case agentSuccess: + return new(successAgentMsg), nil + case agentIdentitiesAnswer: + msg = new(identitiesAnswerAgentMsg) + case agentSignResponse: + msg = new(signResponseAgentMsg) + case agentV1IdentitiesAnswer: + msg = new(agentV1IdentityMsg) + default: + return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) + } + if err := ssh.Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +type rsaKeyMsg struct { + Type string `sshtype:"17|25"` + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaKeyMsg struct { + Type string `sshtype:"17|25"` + P *big.Int + Q *big.Int + G *big.Int + Y *big.Int + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaKeyMsg struct { + Type string `sshtype:"17|25"` + Curve string + KeyBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ed25519KeyMsg struct { + Type string `sshtype:"17|25"` + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + +// Insert adds a private key to the agent. +func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaKeyMsg{ + Type: ssh.KeyAlgoRSA, + N: k.N, + E: big.NewInt(int64(k.E)), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaKeyMsg{ + Type: ssh.KeyAlgoDSA, + P: k.P, + Q: k.Q, + G: k.G, + Y: k.Y, + X: k.X, + Comments: comment, + Constraints: constraints, + }) + case *ecdsa.PrivateKey: + nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) + req = ssh.Marshal(ecdsaKeyMsg{ + Type: "ecdsa-sha2-" + nistID, + Curve: nistID, + KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519KeyMsg{ + Type: ssh.KeyAlgoED25519, + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIDConstrained + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +type rsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ed25519CertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + +// Add adds a private key to the agent. If a certificate is given, +// that certificate is added instead as public key. +func (c *client) Add(key AddedKey) error { + var constraints []byte + + if secs := key.LifetimeSecs; secs != 0 { + constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...) + } + + if key.ConfirmBeforeUse { + constraints = append(constraints, agentConstrainConfirm) + } + + cert := key.Certificate + if cert == nil { + return c.insertKey(key.PrivateKey, key.Comment, constraints) + } + return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) +} + +func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + X: k.X, + Comments: comment, + Constraints: constraints, + }) + case *ecdsa.PrivateKey: + req = ssh.Marshal(ecdsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519CertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIDConstrained + } + + signer, err := ssh.NewSignerFromKey(s) + if err != nil { + return err + } + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return errors.New("agent: signer and cert have different public key") + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +// Signers provides a callback for client authentication. +func (c *client) Signers() ([]ssh.Signer, error) { + keys, err := c.List() + if err != nil { + return nil, err + } + + var result []ssh.Signer + for _, k := range keys { + result = append(result, &agentKeyringSigner{c, k}) + } + return result, nil +} + +type agentKeyringSigner struct { + agent *client + pub ssh.PublicKey +} + +func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { + return s.pub +} + +func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { + // The agent has its own entropy source, so the rand argument is ignored. + return s.agent.Sign(s.pub, data) +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client_test.go b/vendor/golang.org/x/crypto/ssh/agent/client_test.go new file mode 100644 index 0000000..266fd6d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/client_test.go @@ -0,0 +1,379 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "bytes" + "crypto/rand" + "errors" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "testing" + "time" + + "golang.org/x/crypto/ssh" +) + +// startOpenSSHAgent executes ssh-agent, and returns an Agent interface to it. +func startOpenSSHAgent(t *testing.T) (client Agent, socket string, cleanup func()) { + if testing.Short() { + // ssh-agent is not always available, and the key + // types supported vary by platform. + t.Skip("skipping test due to -short") + } + + bin, err := exec.LookPath("ssh-agent") + if err != nil { + t.Skip("could not find ssh-agent") + } + + cmd := exec.Command(bin, "-s") + out, err := cmd.Output() + if err != nil { + t.Fatalf("cmd.Output: %v", err) + } + + /* Output looks like: + + SSH_AUTH_SOCK=/tmp/ssh-P65gpcqArqvH/agent.15541; export SSH_AUTH_SOCK; + SSH_AGENT_PID=15542; export SSH_AGENT_PID; + echo Agent pid 15542; + */ + fields := bytes.Split(out, []byte(";")) + line := bytes.SplitN(fields[0], []byte("="), 2) + line[0] = bytes.TrimLeft(line[0], "\n") + if string(line[0]) != "SSH_AUTH_SOCK" { + t.Fatalf("could not find key SSH_AUTH_SOCK in %q", fields[0]) + } + socket = string(line[1]) + + line = bytes.SplitN(fields[2], []byte("="), 2) + line[0] = bytes.TrimLeft(line[0], "\n") + if string(line[0]) != "SSH_AGENT_PID" { + t.Fatalf("could not find key SSH_AGENT_PID in %q", fields[2]) + } + pidStr := line[1] + pid, err := strconv.Atoi(string(pidStr)) + if err != nil { + t.Fatalf("Atoi(%q): %v", pidStr, err) + } + + conn, err := net.Dial("unix", string(socket)) + if err != nil { + t.Fatalf("net.Dial: %v", err) + } + + ac := NewClient(conn) + return ac, socket, func() { + proc, _ := os.FindProcess(pid) + if proc != nil { + proc.Kill() + } + conn.Close() + os.RemoveAll(filepath.Dir(socket)) + } +} + +// startKeyringAgent uses Keyring to simulate a ssh-agent Server and returns a client. +func startKeyringAgent(t *testing.T) (client Agent, cleanup func()) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + go ServeAgent(NewKeyring(), c2) + + return NewClient(c1), func() { + c1.Close() + c2.Close() + } +} + +func testOpenSSHAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { + agent, _, cleanup := startOpenSSHAgent(t) + defer cleanup() + + testAgentInterface(t, agent, key, cert, lifetimeSecs) +} + +func testKeyringAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { + agent, cleanup := startKeyringAgent(t) + defer cleanup() + + testAgentInterface(t, agent, key, cert, lifetimeSecs) +} + +func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { + signer, err := ssh.NewSignerFromKey(key) + if err != nil { + t.Fatalf("NewSignerFromKey(%T): %v", key, err) + } + // The agent should start up empty. + if keys, err := agent.List(); err != nil { + t.Fatalf("RequestIdentities: %v", err) + } else if len(keys) > 0 { + t.Fatalf("got %d keys, want 0: %v", len(keys), keys) + } + + // Attempt to insert the key, with certificate if specified. + var pubKey ssh.PublicKey + if cert != nil { + err = agent.Add(AddedKey{ + PrivateKey: key, + Certificate: cert, + Comment: "comment", + LifetimeSecs: lifetimeSecs, + }) + pubKey = cert + } else { + err = agent.Add(AddedKey{PrivateKey: key, Comment: "comment", LifetimeSecs: lifetimeSecs}) + pubKey = signer.PublicKey() + } + if err != nil { + t.Fatalf("insert(%T): %v", key, err) + } + + // Did the key get inserted successfully? + if keys, err := agent.List(); err != nil { + t.Fatalf("List: %v", err) + } else if len(keys) != 1 { + t.Fatalf("got %v, want 1 key", keys) + } else if keys[0].Comment != "comment" { + t.Fatalf("key comment: got %v, want %v", keys[0].Comment, "comment") + } else if !bytes.Equal(keys[0].Blob, pubKey.Marshal()) { + t.Fatalf("key mismatch") + } + + // Can the agent make a valid signature? + data := []byte("hello") + sig, err := agent.Sign(pubKey, data) + if err != nil { + t.Fatalf("Sign(%s): %v", pubKey.Type(), err) + } + + if err := pubKey.Verify(data, sig); err != nil { + t.Fatalf("Verify(%s): %v", pubKey.Type(), err) + } + + // If the key has a lifetime, is it removed when it should be? + if lifetimeSecs > 0 { + time.Sleep(time.Second*time.Duration(lifetimeSecs) + 100*time.Millisecond) + keys, err := agent.List() + if err != nil { + t.Fatalf("List: %v", err) + } + if len(keys) > 0 { + t.Fatalf("key not expired") + } + } + +} + +func TestAgent(t *testing.T) { + for _, keyType := range []string{"rsa", "dsa", "ecdsa", "ed25519"} { + testOpenSSHAgent(t, testPrivateKeys[keyType], nil, 0) + testKeyringAgent(t, testPrivateKeys[keyType], nil, 0) + } +} + +func TestCert(t *testing.T) { + cert := &ssh.Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: ssh.CertTimeInfinity, + CertType: ssh.UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + testOpenSSHAgent(t, testPrivateKeys["rsa"], cert, 0) + testKeyringAgent(t, testPrivateKeys["rsa"], cert, 0) +} + +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and +// therefore is buffered (net.Pipe deadlocks if both sides start with +// a write.) +func netPipe() (net.Conn, net.Conn, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + listener, err = net.Listen("tcp", "[::1]:0") + if err != nil { + return nil, nil, err + } + } + defer listener.Close() + c1, err := net.Dial("tcp", listener.Addr().String()) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1, c2, nil +} + +func TestAuth(t *testing.T) { + agent, _, cleanup := startOpenSSHAgent(t) + defer cleanup() + + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + defer a.Close() + defer b.Close() + + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment"}); err != nil { + t.Errorf("Add: %v", err) + } + + serverConf := ssh.ServerConfig{} + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.PublicKeyCallback = func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) { + if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + return nil, nil + } + + return nil, errors.New("pubkey rejected") + } + + go func() { + conn, _, _, err := ssh.NewServerConn(a, &serverConf) + if err != nil { + t.Fatalf("Server: %v", err) + } + conn.Close() + }() + + conf := ssh.ClientConfig{ + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + conf.Auth = append(conf.Auth, ssh.PublicKeysCallback(agent.Signers)) + conn, _, _, err := ssh.NewClientConn(b, "", &conf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + conn.Close() +} + +func TestLockOpenSSHAgent(t *testing.T) { + agent, _, cleanup := startOpenSSHAgent(t) + defer cleanup() + testLockAgent(agent, t) +} + +func TestLockKeyringAgent(t *testing.T) { + agent, cleanup := startKeyringAgent(t) + defer cleanup() + testLockAgent(agent, t) +} + +func testLockAgent(agent Agent, t *testing.T) { + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment 1"}); err != nil { + t.Errorf("Add: %v", err) + } + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["dsa"], Comment: "comment dsa"}); err != nil { + t.Errorf("Add: %v", err) + } + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 2 { + t.Errorf("Want 2 keys, got %v", keys) + } + + passphrase := []byte("secret") + if err := agent.Lock(passphrase); err != nil { + t.Errorf("Lock: %v", err) + } + + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 0 { + t.Errorf("Want 0 keys, got %v", keys) + } + + signer, _ := ssh.NewSignerFromKey(testPrivateKeys["rsa"]) + if _, err := agent.Sign(signer.PublicKey(), []byte("hello")); err == nil { + t.Fatalf("Sign did not fail") + } + + if err := agent.Remove(signer.PublicKey()); err == nil { + t.Fatalf("Remove did not fail") + } + + if err := agent.RemoveAll(); err == nil { + t.Fatalf("RemoveAll did not fail") + } + + if err := agent.Unlock(nil); err == nil { + t.Errorf("Unlock with wrong passphrase succeeded") + } + if err := agent.Unlock(passphrase); err != nil { + t.Errorf("Unlock: %v", err) + } + + if err := agent.Remove(signer.PublicKey()); err != nil { + t.Fatalf("Remove: %v", err) + } + + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 1 { + t.Errorf("Want 1 keys, got %v", keys) + } +} + +func testOpenSSHAgentLifetime(t *testing.T) { + agent, _, cleanup := startOpenSSHAgent(t) + defer cleanup() + testAgentLifetime(t, agent) +} + +func testKeyringAgentLifetime(t *testing.T) { + agent, cleanup := startKeyringAgent(t) + defer cleanup() + testAgentLifetime(t, agent) +} + +func testAgentLifetime(t *testing.T, agent Agent) { + for _, keyType := range []string{"rsa", "dsa", "ecdsa"} { + // Add private keys to the agent. + err := agent.Add(AddedKey{ + PrivateKey: testPrivateKeys[keyType], + Comment: "comment", + LifetimeSecs: 1, + }) + if err != nil { + t.Fatalf("add: %v", err) + } + // Add certs to the agent. + cert := &ssh.Certificate{ + Key: testPublicKeys[keyType], + ValidBefore: ssh.CertTimeInfinity, + CertType: ssh.UserCert, + } + cert.SignCert(rand.Reader, testSigners[keyType]) + err = agent.Add(AddedKey{ + PrivateKey: testPrivateKeys[keyType], + Certificate: cert, + Comment: "comment", + LifetimeSecs: 1, + }) + if err != nil { + t.Fatalf("add: %v", err) + } + } + time.Sleep(1100 * time.Millisecond) + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 0 { + t.Errorf("Want 0 keys, got %v", len(keys)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/example_test.go b/vendor/golang.org/x/crypto/ssh/agent/example_test.go new file mode 100644 index 0000000..8556225 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/example_test.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent_test + +import ( + "log" + "net" + "os" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +func ExampleClientAgent() { + // ssh-agent has a UNIX socket under $SSH_AUTH_SOCK + socket := os.Getenv("SSH_AUTH_SOCK") + conn, err := net.Dial("unix", socket) + if err != nil { + log.Fatalf("net.Dial: %v", err) + } + agentClient := agent.NewClient(conn) + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + // Use a callback rather than PublicKeys + // so we only consult the agent once the remote server + // wants it. + ssh.PublicKeysCallback(agentClient.Signers), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + + sshc, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatalf("Dial: %v", err) + } + // .. use sshc + sshc.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go new file mode 100644 index 0000000..fd24ba9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/forward.go @@ -0,0 +1,103 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "errors" + "io" + "net" + "sync" + + "golang.org/x/crypto/ssh" +) + +// RequestAgentForwarding sets up agent forwarding for the session. +// ForwardToAgent or ForwardToRemote should be called to route +// the authentication requests. +func RequestAgentForwarding(session *ssh.Session) error { + ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) + if err != nil { + return err + } + if !ok { + return errors.New("forwarding request denied") + } + return nil +} + +// ForwardToAgent routes authentication requests to the given keyring. +func ForwardToAgent(client *ssh.Client, keyring Agent) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go func() { + ServeAgent(keyring, channel) + channel.Close() + }() + } + }() + return nil +} + +const channelType = "auth-agent@openssh.com" + +// ForwardToRemote routes authentication requests to the ssh-agent +// process serving on the given unix socket. +func ForwardToRemote(client *ssh.Client, addr string) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + conn, err := net.Dial("unix", addr) + if err != nil { + return err + } + conn.Close() + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go forwardUnixSocket(channel, addr) + } + }() + return nil +} + +func forwardUnixSocket(channel ssh.Channel, addr string) { + conn, err := net.Dial("unix", addr) + if err != nil { + return + } + + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(conn, channel) + conn.(*net.UnixConn).CloseWrite() + wg.Done() + }() + go func() { + io.Copy(channel, conn) + channel.CloseWrite() + wg.Done() + }() + + wg.Wait() + conn.Close() + channel.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go new file mode 100644 index 0000000..a6ba06a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -0,0 +1,215 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "bytes" + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "sync" + "time" + + "golang.org/x/crypto/ssh" +) + +type privKey struct { + signer ssh.Signer + comment string + expire *time.Time +} + +type keyring struct { + mu sync.Mutex + keys []privKey + + locked bool + passphrase []byte +} + +var errLocked = errors.New("agent: locked") + +// NewKeyring returns an Agent that holds keys in memory. It is safe +// for concurrent use by multiple goroutines. +func NewKeyring() Agent { + return &keyring{} +} + +// RemoveAll removes all identities. +func (r *keyring) RemoveAll() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.keys = nil + return nil +} + +// removeLocked does the actual key removal. The caller must already be holding the +// keyring mutex. +func (r *keyring) removeLocked(want []byte) error { + found := false + for i := 0; i < len(r.keys); { + if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { + found = true + r.keys[i] = r.keys[len(r.keys)-1] + r.keys = r.keys[:len(r.keys)-1] + continue + } else { + i++ + } + } + + if !found { + return errors.New("agent: key not found") + } + return nil +} + +// Remove removes all identities with the given public key. +func (r *keyring) Remove(key ssh.PublicKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + return r.removeLocked(key.Marshal()) +} + +// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. +func (r *keyring) Lock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.locked = true + r.passphrase = passphrase + return nil +} + +// Unlock undoes the effect of Lock +func (r *keyring) Unlock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.locked { + return errors.New("agent: not locked") + } + if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { + return fmt.Errorf("agent: incorrect passphrase") + } + + r.locked = false + r.passphrase = nil + return nil +} + +// expireKeysLocked removes expired keys from the keyring. If a key was added +// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have +// ellapsed, it is removed. The caller *must* be holding the keyring mutex. +func (r *keyring) expireKeysLocked() { + for _, k := range r.keys { + if k.expire != nil && time.Now().After(*k.expire) { + r.removeLocked(k.signer.PublicKey().Marshal()) + } + } +} + +// List returns the identities known to the agent. +func (r *keyring) List() ([]*Key, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + // section 2.7: locked agents return empty. + return nil, nil + } + + r.expireKeysLocked() + var ids []*Key + for _, k := range r.keys { + pub := k.signer.PublicKey() + ids = append(ids, &Key{ + Format: pub.Type(), + Blob: pub.Marshal(), + Comment: k.comment}) + } + return ids, nil +} + +// Insert adds a private key to the keyring. If a certificate +// is given, that certificate is added as public key. Note that +// any constraints given are ignored. +func (r *keyring) Add(key AddedKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + signer, err := ssh.NewSignerFromKey(key.PrivateKey) + + if err != nil { + return err + } + + if cert := key.Certificate; cert != nil { + signer, err = ssh.NewCertSigner(cert, signer) + if err != nil { + return err + } + } + + p := privKey{ + signer: signer, + comment: key.Comment, + } + + if key.LifetimeSecs > 0 { + t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) + p.expire = &t + } + + r.keys = append(r.keys, p) + + return nil +} + +// Sign returns a signature for the data. +func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + r.expireKeysLocked() + wanted := key.Marshal() + for _, k := range r.keys { + if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { + return k.signer.Sign(rand.Reader, data) + } + } + return nil, errors.New("not found") +} + +// Signers returns signers for all the known keys. +func (r *keyring) Signers() ([]ssh.Signer, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + r.expireKeysLocked() + s := make([]ssh.Signer, 0, len(r.keys)) + for _, k := range r.keys { + s = append(s, k.signer) + } + return s, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go b/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go new file mode 100644 index 0000000..e5d50e7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go @@ -0,0 +1,76 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import "testing" + +func addTestKey(t *testing.T, a Agent, keyName string) { + err := a.Add(AddedKey{ + PrivateKey: testPrivateKeys[keyName], + Comment: keyName, + }) + if err != nil { + t.Fatalf("failed to add key %q: %v", keyName, err) + } +} + +func removeTestKey(t *testing.T, a Agent, keyName string) { + err := a.Remove(testPublicKeys[keyName]) + if err != nil { + t.Fatalf("failed to remove key %q: %v", keyName, err) + } +} + +func validateListedKeys(t *testing.T, a Agent, expectedKeys []string) { + listedKeys, err := a.List() + if err != nil { + t.Fatalf("failed to list keys: %v", err) + return + } + actualKeys := make(map[string]bool) + for _, key := range listedKeys { + actualKeys[key.Comment] = true + } + + matchedKeys := make(map[string]bool) + for _, expectedKey := range expectedKeys { + if !actualKeys[expectedKey] { + t.Fatalf("expected key %q, but was not found", expectedKey) + } else { + matchedKeys[expectedKey] = true + } + } + + for actualKey := range actualKeys { + if !matchedKeys[actualKey] { + t.Fatalf("key %q was found, but was not expected", actualKey) + } + } +} + +func TestKeyringAddingAndRemoving(t *testing.T) { + keyNames := []string{"dsa", "ecdsa", "rsa", "user"} + + // add all test private keys + k := NewKeyring() + for _, keyName := range keyNames { + addTestKey(t, k, keyName) + } + validateListedKeys(t, k, keyNames) + + // remove a key in the middle + keyToRemove := keyNames[1] + keyNames = append(keyNames[:1], keyNames[2:]...) + + removeTestKey(t, k, keyToRemove) + validateListedKeys(t, k, keyNames) + + // remove all keys + err := k.RemoveAll() + if err != nil { + t.Fatalf("failed to remove all keys: %v", err) + } + validateListedKeys(t, k, []string{}) +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go new file mode 100644 index 0000000..2e4692c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/server.go @@ -0,0 +1,523 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "math/big" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" +) + +// Server wraps an Agent and uses it to implement the agent side of +// the SSH-agent, wire protocol. +type server struct { + agent Agent +} + +func (s *server) processRequestBytes(reqData []byte) []byte { + rep, err := s.processRequest(reqData) + if err != nil { + if err != errLocked { + // TODO(hanwen): provide better logging interface? + log.Printf("agent %d: %v", reqData[0], err) + } + return []byte{agentFailure} + } + + if err == nil && rep == nil { + return []byte{agentSuccess} + } + + return ssh.Marshal(rep) +} + +func marshalKey(k *Key) []byte { + var record struct { + Blob []byte + Comment string + } + record.Blob = k.Marshal() + record.Comment = k.Comment + + return ssh.Marshal(&record) +} + +// See [PROTOCOL.agent], section 2.5.1. +const agentV1IdentitiesAnswer = 2 + +type agentV1IdentityMsg struct { + Numkeys uint32 `sshtype:"2"` +} + +type agentRemoveIdentityMsg struct { + KeyBlob []byte `sshtype:"18"` +} + +type agentLockMsg struct { + Passphrase []byte `sshtype:"22"` +} + +type agentUnlockMsg struct { + Passphrase []byte `sshtype:"23"` +} + +func (s *server) processRequest(data []byte) (interface{}, error) { + switch data[0] { + case agentRequestV1Identities: + return &agentV1IdentityMsg{0}, nil + + case agentRemoveAllV1Identities: + return nil, nil + + case agentRemoveIdentity: + var req agentRemoveIdentityMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) + + case agentRemoveAllIdentities: + return nil, s.agent.RemoveAll() + + case agentLock: + var req agentLockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + return nil, s.agent.Lock(req.Passphrase) + + case agentUnlock: + var req agentUnlockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + return nil, s.agent.Unlock(req.Passphrase) + + case agentSignRequest: + var req signRequestAgentMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + k := &Key{ + Format: wk.Format, + Blob: req.KeyBlob, + } + + sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags. + if err != nil { + return nil, err + } + return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil + + case agentRequestIdentities: + keys, err := s.agent.List() + if err != nil { + return nil, err + } + + rep := identitiesAnswerAgentMsg{ + NumKeys: uint32(len(keys)), + } + for _, k := range keys { + rep.Keys = append(rep.Keys, marshalKey(k)...) + } + return rep, nil + + case agentAddIDConstrained, agentAddIdentity: + return nil, s.insertIdentity(data) + } + + return nil, fmt.Errorf("unknown opcode %d", data[0]) +} + +func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) { + for len(constraints) != 0 { + switch constraints[0] { + case agentConstrainLifetime: + lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5]) + constraints = constraints[5:] + case agentConstrainConfirm: + confirmBeforeUse = true + constraints = constraints[1:] + case agentConstrainExtension: + var msg constrainExtensionAgentMsg + if err = ssh.Unmarshal(constraints, &msg); err != nil { + return 0, false, nil, err + } + extensions = append(extensions, ConstraintExtension{ + ExtensionName: msg.ExtensionName, + ExtensionDetails: msg.ExtensionDetails, + }) + constraints = msg.Rest + default: + return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0]) + } + } + return +} + +func setConstraints(key *AddedKey, constraintBytes []byte) error { + lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes) + if err != nil { + return err + } + + key.LifetimeSecs = lifetimeSecs + key.ConfirmBeforeUse = confirmBeforeUse + key.ConstraintExtensions = constraintExtensions + return nil +} + +func parseRSAKey(req []byte) (*AddedKey, error) { + var k rsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + if k.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + priv := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(k.E.Int64()), + N: k.N, + }, + D: k.D, + Primes: []*big.Int{k.P, k.Q}, + } + priv.Precompute() + + addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseEd25519Key(req []byte) (*AddedKey, error) { + var k ed25519KeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + + addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseDSAKey(req []byte) (*AddedKey, error) { + var k dsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + + addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { + priv = &ecdsa.PrivateKey{ + D: privScalar, + } + + switch curveName { + case "nistp256": + priv.Curve = elliptic.P256() + case "nistp384": + priv.Curve = elliptic.P384() + case "nistp521": + priv.Curve = elliptic.P521() + default: + return nil, fmt.Errorf("agent: unknown curve %q", curveName) + } + + priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) + if priv.X == nil || priv.Y == nil { + return nil, errors.New("agent: point not on curve") + } + + return priv, nil +} + +func parseEd25519Cert(req []byte) (*AddedKey, error) { + var k ed25519CertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ED25519 certificate") + } + + addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseECDSAKey(req []byte) (*AddedKey, error) { + var k ecdsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) + if err != nil { + return nil, err + } + + addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseRSACert(req []byte) (*AddedKey, error) { + var k rsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad RSA certificate") + } + + // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go + var rsaPub struct { + Name string + E *big.Int + N *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + if rsaPub.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + + priv := rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(rsaPub.E.Int64()), + N: rsaPub.N, + }, + D: k.D, + Primes: []*big.Int{k.Q, k.P}, + } + priv.Precompute() + + addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseDSACert(req []byte) (*AddedKey, error) { + var k dsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad DSA certificate") + } + + // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go + var w struct { + Name string + P, Q, G, Y *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + }, + Y: w.Y, + }, + X: k.X, + } + + addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseECDSACert(req []byte) (*AddedKey, error) { + var k ecdsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ECDSA certificate") + } + + // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go + var ecdsaPub struct { + Name string + ID string + Key []byte + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) + if err != nil { + return nil, err + } + + addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func (s *server) insertIdentity(req []byte) error { + var record struct { + Type string `sshtype:"17|25"` + Rest []byte `ssh:"rest"` + } + + if err := ssh.Unmarshal(req, &record); err != nil { + return err + } + + var addedKey *AddedKey + var err error + + switch record.Type { + case ssh.KeyAlgoRSA: + addedKey, err = parseRSAKey(req) + case ssh.KeyAlgoDSA: + addedKey, err = parseDSAKey(req) + case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: + addedKey, err = parseECDSAKey(req) + case ssh.KeyAlgoED25519: + addedKey, err = parseEd25519Key(req) + case ssh.CertAlgoRSAv01: + addedKey, err = parseRSACert(req) + case ssh.CertAlgoDSAv01: + addedKey, err = parseDSACert(req) + case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: + addedKey, err = parseECDSACert(req) + case ssh.CertAlgoED25519v01: + addedKey, err = parseEd25519Cert(req) + default: + return fmt.Errorf("agent: not implemented: %q", record.Type) + } + + if err != nil { + return err + } + return s.agent.Add(*addedKey) +} + +// ServeAgent serves the agent protocol on the given connection. It +// returns when an I/O error occurs. +func ServeAgent(agent Agent, c io.ReadWriter) error { + s := &server{agent} + + var length [4]byte + for { + if _, err := io.ReadFull(c, length[:]); err != nil { + return err + } + l := binary.BigEndian.Uint32(length[:]) + if l > maxAgentResponseBytes { + // We also cap requests. + return fmt.Errorf("agent: request too large: %d", l) + } + + req := make([]byte, l) + if _, err := io.ReadFull(c, req); err != nil { + return err + } + + repData := s.processRequestBytes(req) + if len(repData) > maxAgentResponseBytes { + return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) + } + + binary.BigEndian.PutUint32(length[:], uint32(len(repData))) + if _, err := c.Write(length[:]); err != nil { + return err + } + if _, err := c.Write(repData); err != nil { + return err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server_test.go b/vendor/golang.org/x/crypto/ssh/agent/server_test.go new file mode 100644 index 0000000..038018e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/server_test.go @@ -0,0 +1,259 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "crypto" + "crypto/rand" + "fmt" + pseudorand "math/rand" + "reflect" + "strings" + "testing" + + "golang.org/x/crypto/ssh" +) + +func TestServer(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + client := NewClient(c1) + + go ServeAgent(NewKeyring(), c2) + + testAgentInterface(t, client, testPrivateKeys["rsa"], nil, 0) +} + +func TestLockServer(t *testing.T) { + testLockAgent(NewKeyring(), t) +} + +func TestSetupForwardAgent(t *testing.T) { + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + defer a.Close() + defer b.Close() + + _, socket, cleanup := startOpenSSHAgent(t) + defer cleanup() + + serverConf := ssh.ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + incoming := make(chan *ssh.ServerConn, 1) + go func() { + conn, _, _, err := ssh.NewServerConn(a, &serverConf) + if err != nil { + t.Fatalf("Server: %v", err) + } + incoming <- conn + }() + + conf := ssh.ClientConfig{ + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + conn, chans, reqs, err := ssh.NewClientConn(b, "", &conf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + client := ssh.NewClient(conn, chans, reqs) + + if err := ForwardToRemote(client, socket); err != nil { + t.Fatalf("SetupForwardAgent: %v", err) + } + + server := <-incoming + ch, reqs, err := server.OpenChannel(channelType, nil) + if err != nil { + t.Fatalf("OpenChannel(%q): %v", channelType, err) + } + go ssh.DiscardRequests(reqs) + + agentClient := NewClient(ch) + testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0) + conn.Close() +} + +func TestV1ProtocolMessages(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + c := NewClient(c1) + + go ServeAgent(NewKeyring(), c2) + + testV1ProtocolMessages(t, c.(*client)) +} + +func testV1ProtocolMessages(t *testing.T, c *client) { + reply, err := c.call([]byte{agentRequestV1Identities}) + if err != nil { + t.Fatalf("v1 request all failed: %v", err) + } + if msg, ok := reply.(*agentV1IdentityMsg); !ok || msg.Numkeys != 0 { + t.Fatalf("invalid request all response: %#v", reply) + } + + reply, err = c.call([]byte{agentRemoveAllV1Identities}) + if err != nil { + t.Fatalf("v1 remove all failed: %v", err) + } + if _, ok := reply.(*successAgentMsg); !ok { + t.Fatalf("invalid remove all response: %#v", reply) + } +} + +func verifyKey(sshAgent Agent) error { + keys, err := sshAgent.List() + if err != nil { + return fmt.Errorf("listing keys: %v", err) + } + + if len(keys) != 1 { + return fmt.Errorf("bad number of keys found. expected 1, got %d", len(keys)) + } + + buf := make([]byte, 128) + if _, err := rand.Read(buf); err != nil { + return fmt.Errorf("rand: %v", err) + } + + sig, err := sshAgent.Sign(keys[0], buf) + if err != nil { + return fmt.Errorf("sign: %v", err) + } + + if err := keys[0].Verify(buf, sig); err != nil { + return fmt.Errorf("verify: %v", err) + } + return nil +} + +func addKeyToAgent(key crypto.PrivateKey) error { + sshAgent := NewKeyring() + if err := sshAgent.Add(AddedKey{PrivateKey: key}); err != nil { + return fmt.Errorf("add: %v", err) + } + return verifyKey(sshAgent) +} + +func TestKeyTypes(t *testing.T) { + for k, v := range testPrivateKeys { + if err := addKeyToAgent(v); err != nil { + t.Errorf("error adding key type %s, %v", k, err) + } + if err := addCertToAgentSock(v, nil); err != nil { + t.Errorf("error adding key type %s, %v", k, err) + } + } +} + +func addCertToAgentSock(key crypto.PrivateKey, cert *ssh.Certificate) error { + a, b, err := netPipe() + if err != nil { + return err + } + agentServer := NewKeyring() + go ServeAgent(agentServer, a) + + agentClient := NewClient(b) + if err := agentClient.Add(AddedKey{PrivateKey: key, Certificate: cert}); err != nil { + return fmt.Errorf("add: %v", err) + } + return verifyKey(agentClient) +} + +func addCertToAgent(key crypto.PrivateKey, cert *ssh.Certificate) error { + sshAgent := NewKeyring() + if err := sshAgent.Add(AddedKey{PrivateKey: key, Certificate: cert}); err != nil { + return fmt.Errorf("add: %v", err) + } + return verifyKey(sshAgent) +} + +func TestCertTypes(t *testing.T) { + for keyType, key := range testPublicKeys { + cert := &ssh.Certificate{ + ValidPrincipals: []string{"gopher1"}, + ValidAfter: 0, + ValidBefore: ssh.CertTimeInfinity, + Key: key, + Serial: 1, + CertType: ssh.UserCert, + SignatureKey: testPublicKeys["rsa"], + Permissions: ssh.Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + if err := cert.SignCert(rand.Reader, testSigners["rsa"]); err != nil { + t.Fatalf("signcert: %v", err) + } + if err := addCertToAgent(testPrivateKeys[keyType], cert); err != nil { + t.Fatalf("%v", err) + } + if err := addCertToAgentSock(testPrivateKeys[keyType], cert); err != nil { + t.Fatalf("%v", err) + } + } +} + +func TestParseConstraints(t *testing.T) { + // Test LifetimeSecs + var msg = constrainLifetimeAgentMsg{pseudorand.Uint32()} + lifetimeSecs, _, _, err := parseConstraints(ssh.Marshal(msg)) + if err != nil { + t.Fatalf("parseConstraints: %v", err) + } + if lifetimeSecs != msg.LifetimeSecs { + t.Errorf("got lifetime %v, want %v", lifetimeSecs, msg.LifetimeSecs) + } + + // Test ConfirmBeforeUse + _, confirmBeforeUse, _, err := parseConstraints([]byte{agentConstrainConfirm}) + if err != nil { + t.Fatalf("%v", err) + } + if !confirmBeforeUse { + t.Error("got comfirmBeforeUse == false") + } + + // Test ConstraintExtensions + var data []byte + var expect []ConstraintExtension + for i := 0; i < 10; i++ { + var ext = ConstraintExtension{ + ExtensionName: fmt.Sprintf("name%d", i), + ExtensionDetails: []byte(fmt.Sprintf("details: %d", i)), + } + expect = append(expect, ext) + data = append(data, agentConstrainExtension) + data = append(data, ssh.Marshal(ext)...) + } + _, _, extensions, err := parseConstraints(data) + if err != nil { + t.Fatalf("%v", err) + } + if !reflect.DeepEqual(expect, extensions) { + t.Errorf("got extension %v, want %v", extensions, expect) + } + + // Test Unknown Constraint + _, _, _, err = parseConstraints([]byte{128}) + if err == nil || !strings.Contains(err.Error(), "unknown constraint") { + t.Errorf("unexpected error: %v", err) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go b/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go new file mode 100644 index 0000000..cc42a87 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go @@ -0,0 +1,64 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package agent + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]ssh.Signer + testPublicKeys map[string]ssh.PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]ssh.Signer, n) + testPublicKeys = make(map[string]ssh.PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &ssh.Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: ssh.Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/benchmark_test.go b/vendor/golang.org/x/crypto/ssh/benchmark_test.go new file mode 100644 index 0000000..20c3307 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/benchmark_test.go @@ -0,0 +1,123 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "io" + "net" + "testing" +) + +type server struct { + *ServerConn + chans <-chan NewChannel +} + +func newServer(c net.Conn, conf *ServerConfig) (*server, error) { + sconn, chans, reqs, err := NewServerConn(c, conf) + if err != nil { + return nil, err + } + go DiscardRequests(reqs) + return &server{sconn, chans}, nil +} + +func (s *server) Accept() (NewChannel, error) { + n, ok := <-s.chans + if !ok { + return nil, io.EOF + } + return n, nil +} + +func sshPipe() (Conn, *server, error) { + c1, c2, err := netPipe() + if err != nil { + return nil, nil, err + } + + clientConf := ClientConfig{ + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + } + serverConf := ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + done := make(chan *server, 1) + go func() { + server, err := newServer(c2, &serverConf) + if err != nil { + done <- nil + } + done <- server + }() + + client, _, reqs, err := NewClientConn(c1, "", &clientConf) + if err != nil { + return nil, nil, err + } + + server := <-done + if server == nil { + return nil, nil, errors.New("server handshake failed.") + } + go DiscardRequests(reqs) + + return client, server, nil +} + +func BenchmarkEndToEnd(b *testing.B) { + b.StopTimer() + + client, server, err := sshPipe() + if err != nil { + b.Fatalf("sshPipe: %v", err) + } + + defer client.Close() + defer server.Close() + + size := (1 << 20) + input := make([]byte, size) + output := make([]byte, size) + b.SetBytes(int64(size)) + done := make(chan int, 1) + + go func() { + newCh, err := server.Accept() + if err != nil { + b.Fatalf("Client: %v", err) + } + ch, incoming, err := newCh.Accept() + go DiscardRequests(incoming) + for i := 0; i < b.N; i++ { + if _, err := io.ReadFull(ch, output); err != nil { + b.Fatalf("ReadFull: %v", err) + } + } + ch.Close() + done <- 1 + }() + + ch, in, err := client.OpenChannel("speed", nil) + if err != nil { + b.Fatalf("OpenChannel: %v", err) + } + go DiscardRequests(in) + + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := ch.Write(input); err != nil { + b.Fatalf("WriteFull: %v", err) + } + } + ch.Close() + b.StopTimer() + + <-done +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 0000000..1ab07d0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive io.EOF. +func (b *buffer) eof() { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer_test.go b/vendor/golang.org/x/crypto/ssh/buffer_test.go new file mode 100644 index 0000000..d5781cb --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer_test.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "testing" +) + +var alphabet = []byte("abcdefghijklmnopqrstuvwxyz") + +func TestBufferReadwrite(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + r, _ := b.Read(make([]byte, 10)) + if r != 10 { + t.Fatalf("Expected written == read == 10, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + r, _ = b.Read(make([]byte, 10)) + if r != 5 { + t.Fatalf("Expected written == read == 5, written: 5, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:10]) + r, _ = b.Read(make([]byte, 5)) + if r != 5 { + t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + b.write(alphabet[5:15]) + r, _ = b.Read(make([]byte, 10)) + r2, _ := b.Read(make([]byte, 10)) + if r != 10 || r2 != 5 || 15 != r+r2 { + t.Fatal("Expected written == read == 15") + } +} + +func TestBufferClose(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + b.eof() + _, err := b.Read(make([]byte, 5)) + if err != nil { + t.Fatal("expected read of 5 to not return EOF") + } + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err := b.Read(make([]byte, 5)) + r2, err2 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || err != nil || err2 != nil { + t.Fatal("expected reads of 5 and 5") + } + + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err = b.Read(make([]byte, 5)) + r2, err2 = b.Read(make([]byte, 10)) + r3, err3 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF { + t.Fatal("expected reads of 5 and 5 and 0, with EOF") + } + + b = newBuffer() + b.write(make([]byte, 5)) + b.write(make([]byte, 10)) + b.eof() + r, err = b.Read(make([]byte, 9)) + r2, err2 = b.Read(make([]byte, 3)) + r3, err3 = b.Read(make([]byte, 3)) + r4, err4 := b.Read(make([]byte, 10)) + if err != nil || err2 != nil || err3 != nil || err4 != io.EOF { + t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4) + } + if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 { + t.Fatal("Expected written == read == 15", r, r2, r3, r4) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 0000000..42106f3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,521 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// These constants from [PROTOCOL.certkeys] represent the algorithm names +// for certificate types supported by this package. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the +// PublicKey interface, so it can be unmarshaled using +// ParsePublicKey. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return nil, errors.New("ssh: signer and cert have different public key") + } + + return &openSSHCertSigner{cert, signer}, nil +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsUserAuthority should return true if the key is recognized as an + // authority for the given user certificate. This allows for + // certificates to be signed by other certificates. This must be set + // if this CertChecker will be checking user certificates. + IsUserAuthority func(auth PublicKey) bool + + // IsHostAuthority should report whether the key is recognized as + // an authority for this host. This allows for certificates to be + // signed by other keys, and for those other keys to only be valid + // signers for particular hostnames. This must be set if this + // CertChecker will be checking host certificates. + IsHostAuthority func(auth PublicKey, address string) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback HostKeyCallback + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + if !c.IsHostAuthority(cert.SignatureKey, addr) { + return fmt.Errorf("ssh: no authorities for hostname: %v", addr) + } + + hostname, _, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + // Pass hostname only as principal for host certificates (consistent with OpenSSH) + return c.CheckCert(hostname, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + if !c.IsUserAuthority(cert.SignatureKey) { + return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) + } + + for opt := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert sets c.SignatureKey to the authority's public key and stores a +// Signature, by authority, in the certificate. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +var certAlgoNames = map[string]string{ + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoED25519: CertAlgoED25519v01, +} + +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. +// Panics if a non-certificate algorithm is passed. +func certToPrivAlgo(algo string) string { + for privAlgo, pubAlgo := range certAlgoNames { + if pubAlgo == algo { + return privAlgo + } + } + panic("unknown cert algorithm") +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the key name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + algo, ok := certAlgoNames[c.Key.Type()] + if !ok { + panic("unknown cert key type " + c.Key.Type()) + } + return algo +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs_test.go b/vendor/golang.org/x/crypto/ssh/certs_test.go new file mode 100644 index 0000000..c8e7cf5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs_test.go @@ -0,0 +1,335 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "net" + "reflect" + "testing" + "time" + + "golang.org/x/crypto/ssh/testdata" +) + +// Cert generated by ssh-keygen 6.0p1 Debian-4. +// % ssh-keygen -s ca-key -I test user-key +const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=` + +func TestParseCert(t *testing.T) { + authKeyBytes := []byte(exampleSSHCert) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + + if _, ok := key.(*Certificate); !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3 +// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub +// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN +// Critical Options: +// force-command /bin/sleep +// source-address 192.168.1.0/24 +// Extensions: +// permit-X11-forwarding +// permit-agent-forwarding +// permit-port-forwarding +// permit-pty +// permit-user-rc +const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ` + +func TestParseCertWithOptions(t *testing.T) { + opts := map[string]string{ + "source-address": "192.168.1.0/24", + "force-command": "/bin/sleep", + } + exts := map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + } + authKeyBytes := []byte(exampleSSHCertWithOptions) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + cert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + if !reflect.DeepEqual(cert.CriticalOptions, opts) { + t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts) + } + if !reflect.DeepEqual(cert.Extensions, exts) { + t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts) + } + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +func TestValidateCert(t *testing.T) { + key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert)) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + validCert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + checker := CertChecker{} + checker.IsUserAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal()) + } + + if err := checker.CheckCert("user", validCert); err != nil { + t.Errorf("Unable to validate certificate: %v", err) + } + invalidCert := &Certificate{ + Key: testPublicKeys["rsa"], + SignatureKey: testPublicKeys["ecdsa"], + ValidBefore: CertTimeInfinity, + Signature: &Signature{}, + } + if err := checker.CheckCert("user", invalidCert); err == nil { + t.Error("Invalid cert signature passed validation") + } +} + +func TestValidateCertTime(t *testing.T) { + cert := Certificate{ + ValidPrincipals: []string{"user"}, + Key: testPublicKeys["rsa"], + ValidAfter: 50, + ValidBefore: 100, + } + + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + for ts, ok := range map[int64]bool{ + 25: false, + 50: true, + 99: true, + 100: false, + 125: false, + } { + checker := CertChecker{ + Clock: func() time.Time { return time.Unix(ts, 0) }, + } + checker.IsUserAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), + testPublicKeys["ecdsa"].Marshal()) + } + + if v := checker.CheckCert("user", &cert); (v == nil) != ok { + t.Errorf("Authenticate(%d): %v", ts, v) + } + } +} + +// TODO(hanwen): tests for +// +// host keys: +// * fallbacks + +func TestHostKeyCert(t *testing.T) { + cert := &Certificate{ + ValidPrincipals: []string{"hostname", "hostname.domain", "otherhost"}, + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: HostCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + checker := &CertChecker{ + IsHostAuthority: func(p PublicKey, addr string) bool { + return addr == "hostname:22" && bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal()) + }, + } + + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Errorf("NewCertSigner: %v", err) + } + + for _, test := range []struct { + addr string + succeed bool + }{ + {addr: "hostname:22", succeed: true}, + {addr: "otherhost:22", succeed: false}, // The certificate is valid for 'otherhost' as hostname, but we only recognize the authority of the signer for the address 'hostname:22' + {addr: "lasthost:22", succeed: false}, + } { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + errc := make(chan error) + + go func() { + conf := ServerConfig{ + NoClientAuth: true, + } + conf.AddHostKey(certSigner) + _, _, _, err := NewServerConn(c1, &conf) + errc <- err + }() + + config := &ClientConfig{ + User: "user", + HostKeyCallback: checker.CheckHostKey, + } + _, _, _, err = NewClientConn(c2, test.addr, config) + + if (err == nil) != test.succeed { + t.Fatalf("NewClientConn(%q): %v", test.addr, err) + } + + err = <-errc + if (err == nil) != test.succeed { + t.Fatalf("NewServerConn(%q): %v", test.addr, err) + } + } +} + +func TestCertTypes(t *testing.T) { + var testVars = []struct { + name string + keys func() Signer + }{ + { + name: CertAlgoECDSA256v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap256"]) + return s + }, + }, + { + name: CertAlgoECDSA384v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap384"]) + return s + }, + }, + { + name: CertAlgoECDSA521v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap521"]) + return s + }, + }, + { + name: CertAlgoED25519v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ed25519"]) + return s + }, + }, + { + name: CertAlgoRSAv01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["rsa"]) + return s + }, + }, + { + name: CertAlgoDSAv01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["dsa"]) + return s + }, + }, + } + + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("error generating host key: %v", err) + } + + signer, err := NewSignerFromKey(k) + if err != nil { + t.Fatalf("error generating signer for ssh listener: %v", err) + } + + conf := &ServerConfig{ + PublicKeyCallback: func(c ConnMetadata, k PublicKey) (*Permissions, error) { + return new(Permissions), nil + }, + } + conf.AddHostKey(signer) + + for _, m := range testVars { + t.Run(m.name, func(t *testing.T) { + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, conf) + + priv := m.keys() + if err != nil { + t.Fatalf("error generating ssh pubkey: %v", err) + } + + cert := &Certificate{ + CertType: UserCert, + Key: priv.PublicKey(), + } + cert.SignCert(rand.Reader, priv) + + certSigner, err := NewCertSigner(cert, priv) + if err != nil { + t.Fatalf("error generating cert signer: %v", err) + } + + config := &ClientConfig{ + User: "user", + HostKeyCallback: func(h string, r net.Addr, k PublicKey) error { return nil }, + Auth: []AuthMethod{PublicKeys(certSigner)}, + } + + _, _, _, err = NewClientConn(c2, "", config) + if err != nil { + t.Fatalf("error connecting: %v", err) + } + }) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 0000000..c0834c0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,633 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window. + windowMu sync.Mutex + myWindow uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (ch *channel) writePacket(packet []byte) error { + ch.writeMu.Lock() + if ch.sentClose { + ch.writeMu.Unlock() + return io.EOF + } + ch.sentClose = (packet[0] == msgChannelClose) + err := ch.mux.conn.writePacket(packet) + ch.writeMu.Unlock() + return err +} + +func (ch *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], ch.remoteId) + return ch.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if ch.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + ch.writeMu.Lock() + packet := ch.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + ch.writeMu.Unlock() + + for len(data) > 0 { + space := min(ch.maxRemotePayload, len(data)) + if space, err = ch.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], ch.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = ch.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + ch.writeMu.Lock() + ch.packetPool[extendedCode] = packet + ch.writeMu.Unlock() + + return n, err +} + +func (ch *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > ch.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + ch.windowMu.Lock() + if ch.myWindow < length { + ch.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + ch.myWindow -= length + ch.windowMu.Unlock() + + if extended == 1 { + ch.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + ch.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(n uint32) error { + c.windowMu.Lock() + // Since myWindow is managed on our side, and can never exceed + // the initial window setting, we don't worry about overflow. + c.myWindow += uint32(n) + c.windowMu.Unlock() + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: uint32(n), + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (ch *channel) responseMessageReceived() error { + if ch.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if ch.decided { + return errors.New("ssh: duplicate response received for channel") + } + ch.decided = true + return nil +} + +func (ch *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return ch.handleData(packet) + case msgChannelClose: + ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) + ch.mux.chanList.remove(ch.localId) + ch.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + ch.extPending.eof() + ch.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + ch.mux.chanList.remove(msg.PeersID) + ch.msg <- msg + case *channelOpenConfirmMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + ch.remoteId = msg.MyID + ch.maxRemotePayload = msg.MaxPacketSize + ch.remoteWin.add(msg.MyWindow) + ch.msg <- msg + case *windowAdjustMsg: + if !ch.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: ch, + } + + ch.incomingRequests <- &req + default: + ch.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (ch *channel) Accept() (Channel, <-chan *Request, error) { + if ch.decided { + return nil, nil, errDecidedAlready + } + ch.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersID: ch.remoteId, + MyID: ch.localId, + MyWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + } + ch.decided = true + if err := ch.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersID: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersID: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersID: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersID: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersID: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersID: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 0000000..30a49fd --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,771 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + + "golang.org/x/crypto/internal/chacha20" + "golang.org/x/crypto/poly1305" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type cipherMode struct { + keySize int + ivSize int + create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) +} + +func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + stream, err := createFunc(key, iv) + if err != nil { + return nil, err + } + + var streamDump []byte + if skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + mac := macModes[algs.MAC].new(macKey) + return &streamPacketCipher{ + mac: mac, + etm: macModes[algs.MAC].etm, + macResult: make([]byte, mac.Size()), + cipher: stream, + }, nil + } +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*cipherMode{ + // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + + // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, + "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, streamCipherMode(0, newRC4)}, + + // AEAD ciphers + gcmCipherID: {16, 12, newGCMCipher}, + chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, + + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, + + // 3des-cbc is insecure and is not included in the default + // config. + tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writePacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) + if err != nil { + return nil, err + } + c.oracleCamouflage -= uint32(n) + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} + +const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" + +// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com +// AEAD, which is described here: +// +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// +// the methods here also implement padding, which RFC4253 Section 6 +// also requires of stream ciphers. +type chacha20Poly1305Cipher struct { + lengthKey [32]byte + contentKey [32]byte + buf []byte +} + +func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + if len(key) != 64 { + panic(len(key)) + } + + c := &chacha20Poly1305Cipher{ + buf: make([]byte, 256), + } + + copy(c.contentKey[:], key[:32]) + copy(c.lengthKey[:], key[32:]) + return c, nil +} + +// The Poly1305 key is obtained by encrypting 32 0-bytes. +var chacha20PolyKeyInput [32]byte + +func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + var counter [16]byte + binary.BigEndian.PutUint64(counter[8:], uint64(seqNum)) + + var polyKey [32]byte + chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey) + + encryptedLength := c.buf[:4] + if _, err := io.ReadFull(r, encryptedLength); err != nil { + return nil, err + } + + var lenBytes [4]byte + chacha20.XORKeyStream(lenBytes[:], encryptedLength, &counter, &c.lengthKey) + + length := binary.BigEndian.Uint32(lenBytes[:]) + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + contentEnd := 4 + length + packetEnd := contentEnd + poly1305.TagSize + if uint32(cap(c.buf)) < packetEnd { + c.buf = make([]byte, packetEnd) + copy(c.buf[:], encryptedLength) + } else { + c.buf = c.buf[:packetEnd] + } + + if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { + return nil, err + } + + var mac [poly1305.TagSize]byte + copy(mac[:], c.buf[contentEnd:packetEnd]) + if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { + return nil, errors.New("ssh: MAC failure") + } + + counter[0] = 1 + + plain := c.buf[4:contentEnd] + chacha20.XORKeyStream(plain, plain, &counter, &c.contentKey) + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding)+1 >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + + plain = plain[1 : len(plain)-int(padding)] + + return plain, nil +} + +func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { + var counter [16]byte + binary.BigEndian.PutUint64(counter[8:], uint64(seqNum)) + + var polyKey [32]byte + chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey) + + // There is no blocksize, so fall back to multiple of 8 byte + // padding, as described in RFC 4253, Sec 6. + const packetSizeMultiple = 8 + + padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple + if padding < 4 { + padding += packetSizeMultiple + } + + // size (4 bytes), padding (1), payload, padding, tag. + totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize + if cap(c.buf) < totalLength { + c.buf = make([]byte, totalLength) + } else { + c.buf = c.buf[:totalLength] + } + + binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) + chacha20.XORKeyStream(c.buf, c.buf[:4], &counter, &c.lengthKey) + c.buf[4] = byte(padding) + copy(c.buf[5:], payload) + packetEnd := 5 + len(payload) + padding + if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { + return err + } + + counter[0] = 1 + chacha20.XORKeyStream(c.buf[4:], c.buf[4:packetEnd], &counter, &c.contentKey) + + var mac [poly1305.TagSize]byte + poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) + + copy(c.buf[packetEnd:], mac[:]) + + if _, err := w.Write(c.buf); err != nil { + return err + } + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher_test.go b/vendor/golang.org/x/crypto/ssh/cipher_test.go new file mode 100644 index 0000000..a52d6e4 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher_test.go @@ -0,0 +1,131 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/rand" + "testing" +) + +func TestDefaultCiphersExist(t *testing.T) { + for _, cipherAlgo := range supportedCiphers { + if _, ok := cipherModes[cipherAlgo]; !ok { + t.Errorf("supported cipher %q is unknown", cipherAlgo) + } + } + for _, cipherAlgo := range preferredCiphers { + if _, ok := cipherModes[cipherAlgo]; !ok { + t.Errorf("preferred cipher %q is unknown", cipherAlgo) + } + } +} + +func TestPacketCiphers(t *testing.T) { + defaultMac := "hmac-sha2-256" + defaultCipher := "aes128-ctr" + for cipher := range cipherModes { + t.Run("cipher="+cipher, + func(t *testing.T) { testPacketCipher(t, cipher, defaultMac) }) + } + for mac := range macModes { + t.Run("mac="+mac, + func(t *testing.T) { testPacketCipher(t, defaultCipher, mac) }) + } +} + +func testPacketCipher(t *testing.T, cipher, mac string) { + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: cipher, + MAC: mac, + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) + } + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writePacket(0, buf, rand.Reader, input); err != nil { + t.Fatalf("writePacket(%q, %q): %v", cipher, mac, err) + } + + packet, err := server.readPacket(0, buf) + if err != nil { + t.Fatalf("readPacket(%q, %q): %v", cipher, mac, err) + } + + if string(packet) != want { + t.Errorf("roundtrip(%q, %q): got %q, want %q", cipher, mac, packet, want) + } +} + +func TestCBCOracleCounterMeasure(t *testing.T) { + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: aes128cbcID, + MAC: "hmac-sha1", + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writePacket(0, buf, rand.Reader, input); err != nil { + t.Errorf("writePacket: %v", err) + } + + packetSize := buf.Len() + buf.Write(make([]byte, 2*maxPacket)) + + // We corrupt each byte, but this usually will only test the + // 'packet too large' or 'MAC failure' cases. + lastRead := -1 + for i := 0; i < packetSize; i++ { + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + fresh := &bytes.Buffer{} + fresh.Write(buf.Bytes()) + fresh.Bytes()[i] ^= 0x01 + + before := fresh.Len() + _, err = server.readPacket(0, fresh) + if err == nil { + t.Errorf("corrupt byte %d: readPacket succeeded ", i) + continue + } + if _, ok := err.(cbcError); !ok { + t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err) + continue + } + + after := fresh.Len() + bytesRead := before - after + if bytesRead < maxPacket { + t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket) + continue + } + + if i > 0 && bytesRead != lastRead { + t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead) + } + lastRead = bytesRead + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 0000000..6fd1994 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,278 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "net" + "os" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. +type Client struct { + Conn + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) + go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-streamlocal@openssh.com")) + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.HostKeyCallback == nil { + c.Close() + return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") + } + + conn := &connection{ + sshConn: sshConn{conn: c}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key +// exchange. +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// HostKeyCallback is the function type used for verifying server +// keys. A HostKeyCallback must return nil if the host key is OK, or +// an error to reject it. It receives the hostname as passed to Dial +// or NewClientConn. The remote address is the RemoteAddr of the +// net.Conn underlying the the SSH connection. +type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + +// BannerCallback is the function type used for treat the banner sent by +// the server. A BannerCallback receives the message sent by the remote server. +type BannerCallback func(message string) error + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback is called during the cryptographic + // handshake to validate the server's host key. The client + // configuration must supply this callback for the connection + // to succeed. The functions InsecureIgnoreHostKey or + // FixedHostKey can be used for simplistic host key checks. + HostKeyCallback HostKeyCallback + + // BannerCallback is called during the SSH dance to display a custom + // server's message. The client configuration can supply this callback to + // handle it as wished. The function BannerDisplayStderr can be used for + // simplistic display on Stderr. + BannerCallback BannerCallback + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} + +// InsecureIgnoreHostKey returns a function that can be used for +// ClientConfig.HostKeyCallback to accept any host key. It should +// not be used for production code. +func InsecureIgnoreHostKey() HostKeyCallback { + return func(hostname string, remote net.Addr, key PublicKey) error { + return nil + } +} + +type fixedHostKey struct { + key PublicKey +} + +func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { + if f.key == nil { + return fmt.Errorf("ssh: required host key was nil") + } + if !bytes.Equal(key.Marshal(), f.key.Marshal()) { + return fmt.Errorf("ssh: host key mismatch") + } + return nil +} + +// FixedHostKey returns a function for use in +// ClientConfig.HostKeyCallback to accept only a specific host key. +func FixedHostKey(key PublicKey) HostKeyCallback { + hk := &fixedHostKey{key} + return hk.check +} + +// BannerDisplayStderr returns a function that can be used for +// ClientConfig.BannerCallback to display banners on os.Stderr. +func BannerDisplayStderr() BannerCallback { + return func(banner string) error { + _, err := os.Stderr.WriteString(banner) + + return err + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 0000000..5f44b77 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,525 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +type authResult int + +const ( + authFailure authResult = iota + authPartialSuccess + authSuccess +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + tried := make(map[string]bool) + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + if err != nil { + return err + } + if ok == authSuccess { + // success + return nil + } else if ok == authFailure { + tried[auth.method()] = true + } + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if tried[candidateMethod] { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) +} + +func keys(m map[string]bool) []string { + s := make([]string, 0, len(m)) + + for key := range m { + s = append(s, key) + } + return s +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return authFailure, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + // Authentication is performed by sending an enquiry to test if a key is + // acceptable to the remote. If the key is acceptable, the client will + // attempt to authenticate with the valid key. If not the client will repeat + // the process with the remaining keys. + + signers, err := cb() + if err != nil { + return authFailure, nil, err + } + var methods []string + for _, signer := range signers { + ok, err := validateKey(signer.PublicKey(), user, c) + if err != nil { + return authFailure, nil, err + } + if !ok { + continue + } + + pub := signer.PublicKey() + pubKey := pub.Marshal() + sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, []byte(pub.Type()), pubKey)) + if err != nil { + return authFailure, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: pub.Type(), + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return authFailure, nil, err + } + var success authResult + success, methods, err = handleAuthResponse(c) + if err != nil { + return authFailure, nil, err + } + + // If authentication succeeds or the list of available methods does not + // contain the "publickey" method, do not attempt to authenticate with any + // other keys. According to RFC 4252 Section 7, the latter can occur when + // additional authentication methods are required. + if success == authSuccess || !containsMethod(methods, cb.method()) { + return success, methods, err + } + } + + return authFailure, methods, nil +} + +func containsMethod(methods []string, method string) bool { + for _, m := range methods { + if m == method { + return true + } + } + + return false +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: key.Type(), + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + algoname := key.Type() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return false, err + } + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (authResult, []string, error) { + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +func handleBannerResponse(c packetConn, packet []byte) error { + var msg userAuthBannerMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + transport, ok := c.(*handshakeTransport) + if !ok { + return nil + } + + if transport.bannerCallback != nil { + return transport.bannerCallback(msg.Message) + } + + return nil +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the user and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns an AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return authFailure, nil, err + } + + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return authFailure, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.User, msg.Instruction, prompts, echos) + if err != nil { + return authFailure, nil, err + } + + if len(answers) != len(prompts) { + return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return authFailure, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand) + if ok != authFailure || err != nil { // either success, partial success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go new file mode 100644 index 0000000..5fbb20d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth_test.go @@ -0,0 +1,628 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "os" + "strings" + "testing" +) + +type keyboardInteractive map[string]string + +func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) { + var answers []string + for _, q := range questions { + answers = append(answers, cr[q]) + } + return answers, nil +} + +// reused internally by tests +var clientPassword = "tiger" + +// tryAuth runs a handshake with a given config against an SSH server +// with config serverConfig +func tryAuth(t *testing.T, config *ClientConfig) error { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + certChecker := CertChecker{ + IsUserAuthority: func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal()) + }, + UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + return nil, nil + } + + return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User()) + }, + IsRevoked: func(c *Certificate) bool { + return c.Serial == 666 + }, + } + + serverConfig := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { + if conn.User() == "testuser" && string(pass) == clientPassword { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + PublicKeyCallback: certChecker.Authenticate, + KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) { + ans, err := challenge("user", + "instruction", + []string{"question1", "question2"}, + []bool{true, true}) + if err != nil { + return nil, err + } + ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2" + if ok { + challenge("user", "motd", nil, nil) + return nil, nil + } + return nil, errors.New("keyboard-interactive failed") + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + go newServer(c1, serverConfig) + _, _, _, err = NewClientConn(c2, "", config) + return err +} + +func TestClientAuthPublicKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodFallback(t *testing.T) { + var passwordCalled bool + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + PasswordCallback( + func() (string, error) { + passwordCalled = true + return "WRONG", nil + }), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + + if passwordCalled { + t.Errorf("password auth tried before public-key auth.") + } +} + +func TestAuthMethodWrongPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password("wrong"), + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "answer2", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodWrongKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "WRONG", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive") + } +} + +// the mock server will only authenticate ssh-rsa keys +func TestAuthMethodInvalidPublicKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"]), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("dsa private key should not have authenticated with rsa public key") + } +} + +// the client should authenticate with the second key +func TestAuthMethodRSAandDSA(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"], testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with rsa key: %v", err) + } +} + +func TestClientHMAC(t *testing.T) { + for _, mac := range supportedMACs { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + Config: Config{ + MACs: []string{mac}, + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err) + } + } +} + +// issue 4285. +func TestClientUnsupportedCipher(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + Ciphers: []string{"aes128-cbc"}, // not currently supported + }, + } + if err := tryAuth(t, config); err == nil { + t.Errorf("expected no ciphers in common") + } +} + +func TestClientUnsupportedKex(t *testing.T) { + if os.Getenv("GO_BUILDER_NAME") != "" { + t.Skip("skipping known-flaky test on the Go build dashboard; see golang.org/issue/15198") + } + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") { + t.Errorf("got %v, expected 'common algorithm'", err) + } +} + +func TestClientLoginCert(t *testing.T) { + cert := &Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + clientConfig := &ClientConfig{ + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + } + clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner)) + + // should succeed + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + // corrupted signature + cert.Signature.Blob[0]++ + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with corrupted sig") + } + + // revoked + cert.Serial = 666 + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("revoked cert login succeeded") + } + cert.Serial = 1 + + // sign with wrong key + cert.SignCert(rand.Reader, testSigners["dsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with non-authoritative key") + } + + // host cert + cert.CertType = HostCert + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong type") + } + cert.CertType = UserCert + + // principal specified + cert.ValidPrincipals = []string{"user"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + // wrong principal specified + cert.ValidPrincipals = []string{"fred"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong principal") + } + cert.ValidPrincipals = nil + + // added critical option + cert.CriticalOptions = map[string]string{"root-access": "yes"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with unrecognized critical option") + } + + // allowed source address + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24,::42/120"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login with source-address failed: %v", err) + } + + // disallowed source address + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42,::42"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login with source-address succeeded") + } +} + +func testPermissionsPassing(withPermissions bool, t *testing.T) { + serverConfig := &ServerConfig{ + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "nopermissions" { + return nil, nil + } + return &Permissions{}, nil + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if withPermissions { + clientConfig.User = "permissions" + } else { + clientConfig.User = "nopermissions" + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatal(err) + } + if p := serverConn.Permissions; (p != nil) != withPermissions { + t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p) + } +} + +func TestPermissionsPassing(t *testing.T) { + testPermissionsPassing(true, t) +} + +func TestNoPermissionsPassing(t *testing.T) { + testPermissionsPassing(false, t) +} + +func TestRetryableAuth(t *testing.T) { + n := 0 + passwords := []string{"WRONG1", "WRONG2"} + + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + RetryableAuthMethod(PasswordCallback(func() (string, error) { + p := passwords[n] + n++ + return p, nil + }), 2), + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + if n != 2 { + t.Fatalf("Did not try all passwords") + } +} + +func ExampleRetryableAuthMethod(t *testing.T) { + user := "testuser" + NumberOfPrompts := 3 + + // Normally this would be a callback that prompts the user to answer the + // provided questions + Cb := func(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + return []string{"answer1", "answer2"}, nil + } + + config := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + User: user, + Auth: []AuthMethod{ + RetryableAuthMethod(KeyboardInteractiveChallenge(Cb), NumberOfPrompts), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +// Test if username is received on server side when NoClientAuth is used +func TestClientAuthNone(t *testing.T) { + user := "testuser" + serverConfig := &ServerConfig{ + NoClientAuth: true, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + User: user, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatalf("newServer: %v", err) + } + if serverConn.User() != user { + t.Fatalf("server: got %q, want %q", serverConn.User(), user) + } +} + +// Test if authentication attempts are limited on server when MaxAuthTries is set +func TestClientAuthMaxAuthTries(t *testing.T) { + user := "testuser" + + serverConfig := &ServerConfig{ + MaxAuthTries: 2, + PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { + if conn.User() == "testuser" && string(pass) == "right" { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + }) + + for tries := 2; tries < 4; tries++ { + n := tries + clientConfig := &ClientConfig{ + User: user, + Auth: []AuthMethod{ + RetryableAuthMethod(PasswordCallback(func() (string, error) { + n-- + if n == 0 { + return "right", nil + } + return "wrong", nil + }), tries), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go newServer(c1, serverConfig) + _, _, _, err = NewClientConn(c2, "", clientConfig) + if tries > 2 { + if err == nil { + t.Fatalf("client: got no error, want %s", expectedErr) + } else if err.Error() != expectedErr.Error() { + t.Fatalf("client: got %s, want %s", err, expectedErr) + } + } else { + if err != nil { + t.Fatalf("client: got %s, want no error", err) + } + } + } +} + +// Test if authentication attempts are correctly limited on server +// when more public keys are provided then MaxAuthTries +func TestClientAuthMaxAuthTriesPublicKey(t *testing.T) { + signers := []Signer{} + for i := 0; i < 6; i++ { + signers = append(signers, testSigners["dsa"]) + } + + validConfig := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(append([]Signer{testSigners["rsa"]}, signers...)...), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, validConfig); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + + expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + }) + invalidConfig := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(append(signers, testSigners["rsa"])...), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, invalidConfig); err == nil { + t.Fatalf("client: got no error, want %s", expectedErr) + } else if err.Error() != expectedErr.Error() { + t.Fatalf("client: got %s, want %s", err, expectedErr) + } +} + +// Test whether authentication errors are being properly logged if all +// authentication methods have been exhausted +func TestClientAuthErrorList(t *testing.T) { + publicKeyErr := errors.New("This is an error from PublicKeyCallback") + + clientConfig := &ClientConfig{ + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + serverConfig := &ServerConfig{ + PublicKeyCallback: func(_ ConnMetadata, _ PublicKey) (*Permissions, error) { + return nil, publicKeyErr + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + _, err = newServer(c1, serverConfig) + if err == nil { + t.Fatal("newServer: got nil, expected errors") + } + + authErrs, ok := err.(*ServerAuthError) + if !ok { + t.Fatalf("errors: got %T, want *ssh.ServerAuthError", err) + } + for i, e := range authErrs.Errors { + switch i { + case 0: + if e != ErrNoAuth { + t.Fatalf("errors: got error %v, want ErrNoAuth", e) + } + case 1: + if e != publicKeyErr { + t.Fatalf("errors: got %v, want %v", e, publicKeyErr) + } + default: + t.Fatalf("errors: got %v, expected 2 errors", authErrs.Errors) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_test.go b/vendor/golang.org/x/crypto/ssh/client_test.go new file mode 100644 index 0000000..81f9599 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_test.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "strings" + "testing" +) + +func TestClientVersion(t *testing.T) { + for _, tt := range []struct { + name string + version string + multiLine string + wantErr bool + }{ + { + name: "default version", + version: packageVersion, + }, + { + name: "custom version", + version: "SSH-2.0-CustomClientVersionString", + }, + { + name: "good multi line version", + version: packageVersion, + multiLine: strings.Repeat("ignored\r\n", 20), + }, + { + name: "bad multi line version", + version: packageVersion, + multiLine: "bad multi line version", + wantErr: true, + }, + { + name: "long multi line version", + version: packageVersion, + multiLine: strings.Repeat("long multi line version\r\n", 50)[:256], + wantErr: true, + }, + } { + t.Run(tt.name, func(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + go func() { + if tt.multiLine != "" { + c1.Write([]byte(tt.multiLine)) + } + NewClientConn(c1, "", &ClientConfig{ + ClientVersion: tt.version, + HostKeyCallback: InsecureIgnoreHostKey(), + }) + c1.Close() + }() + conf := &ServerConfig{NoClientAuth: true} + conf.AddHostKey(testSigners["rsa"]) + conn, _, _, err := NewServerConn(c2, conf) + if err == nil == tt.wantErr { + t.Fatalf("got err %v; wantErr %t", err, tt.wantErr) + } + if tt.wantErr { + // Don't verify the version on an expected error. + return + } + if got := string(conn.ClientVersion()); got != tt.version { + t.Fatalf("got %q; want %q", got, tt.version) + } + }) + } +} + +func TestHostKeyCheck(t *testing.T) { + for _, tt := range []struct { + name string + wantError string + key PublicKey + }{ + {"no callback", "must specify HostKeyCallback", nil}, + {"correct key", "", testSigners["rsa"].PublicKey()}, + {"mismatch", "mismatch", testSigners["ecdsa"].PublicKey()}, + } { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + + go NewServerConn(c1, serverConf) + clientConf := ClientConfig{ + User: "user", + } + if tt.key != nil { + clientConf.HostKeyCallback = FixedHostKey(tt.key) + } + + _, _, _, err = NewClientConn(c2, "", &clientConf) + if err != nil { + if tt.wantError == "" || !strings.Contains(err.Error(), tt.wantError) { + t.Errorf("%s: got error %q, missing %q", tt.name, err.Error(), tt.wantError) + } + } else if tt.wantError != "" { + t.Errorf("%s: succeeded, but want error string %q", tt.name, tt.wantError) + } + } +} + +func TestBannerCallback(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverConf := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { + return &Permissions{}, nil + }, + BannerCallback: func(conn ConnMetadata) string { + return "Hello World" + }, + } + serverConf.AddHostKey(testSigners["rsa"]) + go NewServerConn(c1, serverConf) + + var receivedBanner string + var bannerCount int + clientConf := ClientConfig{ + Auth: []AuthMethod{ + Password("123"), + }, + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + BannerCallback: func(message string) error { + bannerCount++ + receivedBanner = message + return nil + }, + } + + _, _, _, err = NewClientConn(c2, "", &clientConf) + if err != nil { + t.Fatal(err) + } + + if bannerCount != 1 { + t.Errorf("got %d banners; want 1", bannerCount) + } + + expected := "Hello World" + if receivedBanner != expected { + t.Fatalf("got %s; want %s", receivedBanner, expected) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 0000000..04f3620 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,383 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "math" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers lists ciphers we support but might not recommend. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "arcfour256", "arcfour128", "arcfour", + aes128cbcID, + tripledescbcID, +} + +// preferredCiphers specifies the default preference for ciphers. +var preferredCiphers = []string{ + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "aes128-ctr", "aes192-ctr", "aes256-ctr", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, kexAlgoDH1SHA1, +} + +// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported algorithms to their respective +// hashes needed for signature verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertAlgoRSAv01: crypto.SHA1, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, a size suitable for the chosen cipher is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a + // default set of algorithms is used. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible + // default is used. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default + // is used. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = preferredCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = supportedKexAlgos + } + + if c.MACs == nil { + c.MACs = supportedMACs + } + + if c.RekeyThreshold == 0 { + // cipher specific default + } else if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } else if c.RekeyThreshold >= math.MaxInt64 { + // Avoid weirdness if somebody uses -1 as a threshold. + c.RekeyThreshold = math.MaxInt64 + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo []byte + PubKey []byte + }{ + sessionID, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 0000000..fd6b068 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the session hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshconn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 0000000..67b7322 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,21 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + +This package does not fall under the stability promise of the Go language itself, +so its API may be changed when pressing needs arise. +*/ +package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/example_test.go b/vendor/golang.org/x/crypto/ssh/example_test.go new file mode 100644 index 0000000..b910c7b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/example_test.go @@ -0,0 +1,320 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh_test + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "path/filepath" + "strings" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/terminal" +) + +func ExampleNewServerConn() { + // Public key authentication is done by comparing + // the public key of a received connection + // with the entries in the authorized_keys file. + authorizedKeysBytes, err := ioutil.ReadFile("authorized_keys") + if err != nil { + log.Fatalf("Failed to load authorized_keys, err: %v", err) + } + + authorizedKeysMap := map[string]bool{} + for len(authorizedKeysBytes) > 0 { + pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes) + if err != nil { + log.Fatal(err) + } + + authorizedKeysMap[string(pubKey.Marshal())] = true + authorizedKeysBytes = rest + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + // Remove to disable password auth. + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + + // Remove to disable public key auth. + PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { + if authorizedKeysMap[string(pubKey.Marshal())] { + return &ssh.Permissions{ + // Record the public key used for authentication. + Extensions: map[string]string{ + "pubkey-fp": ssh.FingerprintSHA256(pubKey), + }, + }, nil + } + return nil, fmt.Errorf("unknown public key for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key: ", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key: ", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection: ", err) + } + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection: ", err) + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + conn, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake: ", err) + } + log.Printf("logged in with key %s", conn.Permissions.Extensions["pubkey-fp"]) + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of a shell, the type is + // "session" and ServerShell may be used to present a simple + // terminal interface. + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatalf("Could not accept channel: %v", err) + } + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "shell" request. + go func(in <-chan *ssh.Request) { + for req := range in { + req.Reply(req.Type == "shell", nil) + } + }(requests) + + term := terminal.NewTerminal(channel, "> ") + + go func() { + defer channel.Close() + for { + line, err := term.ReadLine() + if err != nil { + break + } + fmt.Println(line) + } + }() + } +} + +func ExampleHostKeyCheck() { + // Every client must provide a host key check. Here is a + // simple-minded parse of OpenSSH's known_hosts file + host := "hostname" + file, err := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts")) + if err != nil { + log.Fatal(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + var hostKey ssh.PublicKey + for scanner.Scan() { + fields := strings.Split(scanner.Text(), " ") + if len(fields) != 3 { + continue + } + if strings.Contains(fields[0], host) { + var err error + hostKey, _, _, _, err = ssh.ParseAuthorizedKey(scanner.Bytes()) + if err != nil { + log.Fatalf("error parsing %q: %v", fields[2], err) + } + break + } + } + + if hostKey == nil { + log.Fatalf("no hostkey for %s", host) + } + + config := ssh.ClientConfig{ + User: os.Getenv("USER"), + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + + _, err = ssh.Dial("tcp", host+":22", &config) + log.Println(err) +} + +func ExampleDial() { + var hostKey ssh.PublicKey + // An SSH client is represented with a ClientConn. + // + // To authenticate with the remote server you must pass at least one + // implementation of AuthMethod via the Auth field in ClientConfig, + // and provide a HostKeyCallback. + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("yourpassword"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + client, err := ssh.Dial("tcp", "yourserver.com:22", config) + if err != nil { + log.Fatal("Failed to dial: ", err) + } + + // Each ClientConn can support multiple interactive sessions, + // represented by a Session. + session, err := client.NewSession() + if err != nil { + log.Fatal("Failed to create session: ", err) + } + defer session.Close() + + // Once a Session is created, you can execute a single command on + // the remote side using the Run method. + var b bytes.Buffer + session.Stdout = &b + if err := session.Run("/usr/bin/whoami"); err != nil { + log.Fatal("Failed to run: " + err.Error()) + } + fmt.Println(b.String()) +} + +func ExamplePublicKeys() { + var hostKey ssh.PublicKey + // A public key may be used to authenticate against the remote + // server by using an unencrypted PEM-encoded private key file. + // + // If you have an encrypted private key, the crypto/x509 package + // can be used to decrypt it. + key, err := ioutil.ReadFile("/home/user/.ssh/id_rsa") + if err != nil { + log.Fatalf("unable to read private key: %v", err) + } + + // Create the Signer for this private key. + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + log.Fatalf("unable to parse private key: %v", err) + } + + config := &ssh.ClientConfig{ + User: "user", + Auth: []ssh.AuthMethod{ + // Use the PublicKeys method for remote authentication. + ssh.PublicKeys(signer), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + + // Connect to the remote server and perform the SSH handshake. + client, err := ssh.Dial("tcp", "host.com:22", config) + if err != nil { + log.Fatalf("unable to connect: %v", err) + } + defer client.Close() +} + +func ExampleClient_Listen() { + var hostKey ssh.PublicKey + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + // Dial your ssh server. + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatal("unable to connect: ", err) + } + defer conn.Close() + + // Request the remote side to open port 8080 on all interfaces. + l, err := conn.Listen("tcp", "0.0.0.0:8080") + if err != nil { + log.Fatal("unable to register tcp forward: ", err) + } + defer l.Close() + + // Serve HTTP with your SSH server acting as a reverse proxy. + http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + fmt.Fprintf(resp, "Hello world!\n") + })) +} + +func ExampleSession_RequestPty() { + var hostKey ssh.PublicKey + // Create client config + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + // Connect to ssh server + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatal("unable to connect: ", err) + } + defer conn.Close() + // Create a session + session, err := conn.NewSession() + if err != nil { + log.Fatal("unable to create session: ", err) + } + defer session.Close() + // Set up terminal modes + modes := ssh.TerminalModes{ + ssh.ECHO: 0, // disable echoing + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + // Request pseudo terminal + if err := session.RequestPty("xterm", 40, 80, modes); err != nil { + log.Fatal("request for pseudo terminal failed: ", err) + } + // Start remote shell + if err := session.Shell(); err != nil { + log.Fatal("failed to start shell: ", err) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 0000000..4f7912e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,646 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + + // data for host key checking + hostKeyCallback HostKeyCallback + dialAddress string + remoteAddr net.Addr + + // bannerCallback is non-empty if we are the client and it has been set in + // ClientConfig. In that case it is called during the user authentication + // dance to handle a custom server's message. + bannerCallback BannerCallback + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + readPacketsLeft uint32 + readBytesLeft int64 + + writePacketsLeft uint32 + writeBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex, 1), + + config: config, + } + t.resetReadThresholds() + t.resetWriteThresholds() + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + t.bannerCallback = config.BannerCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + if p[0] == msgIgnore || p[0] == msgDebug { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) resetWriteThresholds() { + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } else { + t.writeBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + + t.resetWriteThresholds() + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + go func() { + for init := range t.startKex { + init.done <- t.writeError + } + }() + + // Unblock reader. + t.conn.Close() +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) resetReadThresholds() { + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } else { + t.readBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.resetReadThresholds() + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + if len(t.hostKeys) > 0 { + for _, k := range t.hostKeys { + msg.ServerHostKeyAlgos = append( + msg.ServerHostKeyAlgos, k.PublicKey().Type()) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + } + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + } + + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + return t.conn.Close() +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + if len(t.hostKeys) == 0 { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit) + if err != nil { + return err + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, t.algorithms, &magics) + } else { + result, err = t.client(kex, t.algorithms, &magics) + } + + if err != nil { + return err + } + + if t.sessionID == nil { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { + return err + } + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + var hostKey Signer + for _, k := range t.hostKeys { + if algs.hostKey == k.PublicKey().Type() { + hostKey = k + } + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, result); err != nil { + return nil, err + } + + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/handshake_test.go b/vendor/golang.org/x/crypto/ssh/handshake_test.go new file mode 100644 index 0000000..91d4935 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake_test.go @@ -0,0 +1,559 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "io" + "net" + "reflect" + "runtime" + "strings" + "sync" + "testing" +) + +type testChecker struct { + calls []string +} + +func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + if dialAddr == "bad" { + return fmt.Errorf("dialAddr is bad") + } + + if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil { + return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr) + } + + t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal())) + + return nil +} + +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and +// therefore is buffered (net.Pipe deadlocks if both sides start with +// a write.) +func netPipe() (net.Conn, net.Conn, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + listener, err = net.Listen("tcp", "[::1]:0") + if err != nil { + return nil, nil, err + } + } + defer listener.Close() + c1, err := net.Dial("tcp", listener.Addr().String()) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1, c2, nil +} + +// noiseTransport inserts ignore messages to check that the read loop +// and the key exchange filters out these messages. +type noiseTransport struct { + keyingTransport +} + +func (t *noiseTransport) writePacket(p []byte) error { + ignore := []byte{msgIgnore} + if err := t.keyingTransport.writePacket(ignore); err != nil { + return err + } + debug := []byte{msgDebug, 1, 2, 3} + if err := t.keyingTransport.writePacket(debug); err != nil { + return err + } + + return t.keyingTransport.writePacket(p) +} + +func addNoiseTransport(t keyingTransport) keyingTransport { + return &noiseTransport{t} +} + +// handshakePair creates two handshakeTransports connected with each +// other. If the noise argument is true, both transports will try to +// confuse the other side by sending ignore and debug messages. +func handshakePair(clientConf *ClientConfig, addr string, noise bool) (client *handshakeTransport, server *handshakeTransport, err error) { + a, b, err := netPipe() + if err != nil { + return nil, nil, err + } + + var trC, trS keyingTransport + + trC = newTransport(a, rand.Reader, true) + trS = newTransport(b, rand.Reader, false) + if noise { + trC = addNoiseTransport(trC) + trS = addNoiseTransport(trS) + } + clientConf.SetDefaults() + + v := []byte("version") + client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + server = newServerTransport(trS, v, v, serverConf) + + if err := server.waitSession(); err != nil { + return nil, nil, fmt.Errorf("server.waitSession: %v", err) + } + if err := client.waitSession(); err != nil { + return nil, nil, fmt.Errorf("client.waitSession: %v", err) + } + + return client, server, nil +} + +func TestHandshakeBasic(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + + checker := &syncChecker{ + waitCall: make(chan int, 10), + called: make(chan int, 10), + } + + checker.waitCall <- 1 + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + // Let first kex complete normally. + <-checker.called + + clientDone := make(chan int, 0) + gotHalf := make(chan int, 0) + const N = 20 + + go func() { + defer close(clientDone) + // Client writes a bunch of stuff, and does a key + // change in the middle. This should not confuse the + // handshake in progress. We do this twice, so we test + // that the packet buffer is reset correctly. + for i := 0; i < N; i++ { + p := []byte{msgRequestSuccess, byte(i)} + if err := trC.writePacket(p); err != nil { + t.Fatalf("sendPacket: %v", err) + } + if (i % 10) == 5 { + <-gotHalf + // halfway through, we request a key change. + trC.requestKeyExchange() + + // Wait until we can be sure the key + // change has really started before we + // write more. + <-checker.called + } + if (i % 10) == 7 { + // write some packets until the kex + // completes, to test buffering of + // packets. + checker.waitCall <- 1 + } + } + }() + + // Server checks that client messages come in cleanly + i := 0 + err = nil + for ; i < N; i++ { + var p []byte + p, err = trS.readPacket() + if err != nil { + break + } + if (i % 10) == 5 { + gotHalf <- 1 + } + + want := []byte{msgRequestSuccess, byte(i)} + if bytes.Compare(p, want) != 0 { + t.Errorf("message %d: got %v, want %v", i, p, want) + } + } + <-clientDone + if err != nil && err != io.EOF { + t.Fatalf("server error: %v", err) + } + if i != N { + t.Errorf("received %d messages, want 10.", i) + } + + close(checker.called) + if _, ok := <-checker.called; ok { + // If all went well, we registered exactly 2 key changes: one + // that establishes the session, and one that we requested + // additionally. + t.Fatalf("got another host key checks after 2 handshakes") + } +} + +func TestForceFirstKex(t *testing.T) { + // like handshakePair, but must access the keyingTransport. + checker := &testChecker{} + clientConf := &ClientConfig{HostKeyCallback: checker.Check} + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + var trC, trS keyingTransport + + trC = newTransport(a, rand.Reader, true) + + // This is the disallowed packet: + trC.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})) + + // Rest of the setup. + trS = newTransport(b, rand.Reader, false) + clientConf.SetDefaults() + + v := []byte("version") + client := newClientTransport(trC, v, v, clientConf, "addr", a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + server := newServerTransport(trS, v, v, serverConf) + + defer client.Close() + defer server.Close() + + // We setup the initial key exchange, but the remote side + // tries to send serviceRequestMsg in cleartext, which is + // disallowed. + + if err := server.waitSession(); err == nil { + t.Errorf("server first kex init should reject unexpected packet") + } +} + +func TestHandshakeAutoRekeyWrite(t *testing.T) { + checker := &syncChecker{ + called: make(chan int, 10), + waitCall: nil, + } + clientConf := &ClientConfig{HostKeyCallback: checker.Check} + clientConf.RekeyThreshold = 500 + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + input := make([]byte, 251) + input[0] = msgRequestSuccess + + done := make(chan int, 1) + const numPacket = 5 + go func() { + defer close(done) + j := 0 + for ; j < numPacket; j++ { + if p, err := trS.readPacket(); err != nil { + break + } else if !bytes.Equal(input, p) { + t.Errorf("got packet type %d, want %d", p[0], input[0]) + } + } + + if j != numPacket { + t.Errorf("got %d, want 5 messages", j) + } + }() + + <-checker.called + + for i := 0; i < numPacket; i++ { + p := make([]byte, len(input)) + copy(p, input) + if err := trC.writePacket(p); err != nil { + t.Errorf("writePacket: %v", err) + } + if i == 2 { + // Make sure the kex is in progress. + <-checker.called + } + + } + <-done +} + +type syncChecker struct { + waitCall chan int + called chan int +} + +func (c *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + c.called <- 1 + if c.waitCall != nil { + <-c.waitCall + } + return nil +} + +func TestHandshakeAutoRekeyRead(t *testing.T) { + sync := &syncChecker{ + called: make(chan int, 2), + waitCall: nil, + } + clientConf := &ClientConfig{ + HostKeyCallback: sync.Check, + } + clientConf.RekeyThreshold = 500 + + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + packet := make([]byte, 501) + packet[0] = msgRequestSuccess + if err := trS.writePacket(packet); err != nil { + t.Fatalf("writePacket: %v", err) + } + + // While we read out the packet, a key change will be + // initiated. + done := make(chan int, 1) + go func() { + defer close(done) + if _, err := trC.readPacket(); err != nil { + t.Fatalf("readPacket(client): %v", err) + } + + }() + + <-done + <-sync.called +} + +// errorKeyingTransport generates errors after a given number of +// read/write operations. +type errorKeyingTransport struct { + packetConn + readLeft, writeLeft int +} + +func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error { + return nil +} + +func (n *errorKeyingTransport) getSessionID() []byte { + return nil +} + +func (n *errorKeyingTransport) writePacket(packet []byte) error { + if n.writeLeft == 0 { + n.Close() + return errors.New("barf") + } + + n.writeLeft-- + return n.packetConn.writePacket(packet) +} + +func (n *errorKeyingTransport) readPacket() ([]byte, error) { + if n.readLeft == 0 { + n.Close() + return nil, errors.New("barf") + } + + n.readLeft-- + return n.packetConn.readPacket() +} + +func TestHandshakeErrorHandlingRead(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1, false) + } +} + +func TestHandshakeErrorHandlingWrite(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i, false) + } +} + +func TestHandshakeErrorHandlingReadCoupled(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1, true) + } +} + +func TestHandshakeErrorHandlingWriteCoupled(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i, true) + } +} + +// testHandshakeErrorHandlingN runs handshakes, injecting errors. If +// handshakeTransport deadlocks, the go runtime will detect it and +// panic. +func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int, coupled bool) { + msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)}) + + a, b := memPipe() + defer a.Close() + defer b.Close() + + key := testSigners["ecdsa"] + serverConf := Config{RekeyThreshold: minRekeyThreshold} + serverConf.SetDefaults() + serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'}) + serverConn.hostKeys = []Signer{key} + go serverConn.readLoop() + go serverConn.kexLoop() + + clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold} + clientConf.SetDefaults() + clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'}) + clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()} + clientConn.hostKeyCallback = InsecureIgnoreHostKey() + go clientConn.readLoop() + go clientConn.kexLoop() + + var wg sync.WaitGroup + + for _, hs := range []packetConn{serverConn, clientConn} { + if !coupled { + wg.Add(2) + go func(c packetConn) { + for i := 0; ; i++ { + str := fmt.Sprintf("%08x", i) + strings.Repeat("x", int(minRekeyThreshold)/4-8) + err := c.writePacket(Marshal(&serviceRequestMsg{str})) + if err != nil { + break + } + } + wg.Done() + c.Close() + }(hs) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + } + wg.Done() + }(hs) + } else { + wg.Add(1) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + if err := c.writePacket(msg); err != nil { + break + } + + } + wg.Done() + }(hs) + } + } + wg.Wait() +} + +func TestDisconnect(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + errMsg := &disconnectMsg{ + Reason: 42, + Message: "such is life", + } + trC.writePacket(Marshal(errMsg)) + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + + packet, err := trS.readPacket() + if err != nil { + t.Fatalf("readPacket 1: %v", err) + } + if packet[0] != msgRequestSuccess { + t.Errorf("got packet %v, want packet type %d", packet, msgRequestSuccess) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 2 succeeded") + } else if !reflect.DeepEqual(err, errMsg) { + t.Errorf("got error %#v, want %#v", err, errMsg) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 3 succeeded") + } +} + +func TestHandshakeRekeyDefault(t *testing.T) { + clientConf := &ClientConfig{ + Config: Config{ + Ciphers: []string{"aes128-ctr"}, + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + trC.Close() + + rgb := (1024 + trC.readBytesLeft) >> 30 + wgb := (1024 + trC.writeBytesLeft) >> 30 + + if rgb != 64 { + t.Errorf("got rekey after %dG read, want 64G", rgb) + } + if wgb != 64 { + t.Errorf("got rekey after %dG write, want 64G", wgb) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 0000000..f34bcc0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,540 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + hashFunc := crypto.SHA1 + + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + ki, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: crypto.SHA1, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + hashFunc := crypto.SHA1 + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + ki, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA1, + }, nil +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in RFC + // 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/kex_test.go b/vendor/golang.org/x/crypto/ssh/kex_test.go new file mode 100644 index 0000000..12ca0ac --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex_test.go @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Key exchange tests. + +import ( + "crypto/rand" + "reflect" + "testing" +) + +func TestKexes(t *testing.T) { + type kexResultErr struct { + result *kexResult + err error + } + + for name, kex := range kexAlgoMap { + a, b := memPipe() + + s := make(chan kexResultErr, 1) + c := make(chan kexResultErr, 1) + var magics handshakeMagics + go func() { + r, e := kex.Client(a, rand.Reader, &magics) + a.Close() + c <- kexResultErr{r, e} + }() + go func() { + r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"]) + b.Close() + s <- kexResultErr{r, e} + }() + + clientRes := <-c + serverRes := <-s + if clientRes.err != nil { + t.Errorf("client: %v", clientRes.err) + } + if serverRes.err != nil { + t.Errorf("server: %v", serverRes.err) + } + if !reflect.DeepEqual(clientRes.result, serverRes.result) { + t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 0000000..73697de --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,1032 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// These constants represent the algorithm names for key types supported by this +// package. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + cert, err := parseCert(in, certToPrivAlgo(algo)) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsa”). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKeys parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// PublicKey is an abstraction of different types of public keys. +type PublicKey interface { + // Type returns the key's type, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, + // with the name prefix. To unmarshal the returned data, use + // the ParsePublicKey function. + Marshal() []byte + + // Verify that sig is a signature on the given data using this + // key. This function will hash the data appropriately first. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +type Signer interface { + // PublicKey returns an associated PublicKey instance. + PublicKey() PublicKey + + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != r.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (k *dsaPublicKey) Type() string { + return "ssh-dss" +} + +func checkDSAParams(param *dsa.Parameters) error { + // SSH specifies FIPS 186-2, which only provided a single size + // (1024 bits) DSA key. FIPS 186-3 allows for larger key + // sizes, which would confuse SSH. + if l := param.P.BitLen(); l != 1024 { + return fmt.Errorf("ssh: unsupported DSA key size %d", l) + } + + return nil +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + param := dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + } + if err := checkDSAParams(¶m); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: param, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (k *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + k.nistID() +} + +func (k *ecdsaPublicKey) nistID() string { + switch k.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (k ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := ed25519.PublicKey(w.KeyBytes) + + return (ed25519PublicKey)(key), w.Rest, nil +} + +func (k ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(k), + } + return Marshal(&w) +} + +func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + edKey := (ed25519.PublicKey)(k) + if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (k *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + k.Type(), + k.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := ecHash(k.Curve).New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a +// corresponding Signer instance. ECDSA keys must use P-256, P-384 or +// P-521. DSA keys must use parameter size L1024N160. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return newDSAPrivateKey(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { + if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { + return nil, err + } + + return &dsaPrivateKey{key}, nil +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + var hashFunc crypto.Hash + + switch key := s.pubKey.(type) { + case *rsaPublicKey, *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: s.pubKey.Type(), + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + return (ed25519PublicKey)(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private +// key and passphrase. It supports the same keys as +// ParseRawPrivateKeyWithPassphrase. +func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { + key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It +// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with +// passphrase from a PEM encoded private key. If wrong passphrase, return +// x509.IncorrectPasswordError. +func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + buf := block.Bytes + + if encryptedBlock(block) { + if x509.IsEncryptedPEMBlock(block) { + var err error + buf, err = x509.DecryptPEMBlock(block, passPhrase) + if err != nil { + if err == x509.IncorrectPasswordError { + return nil, err + } + return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) + } + } + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(buf) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(buf) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(buf) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(buf) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +// Implemented based on the documentation at +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key +func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { + magic := append([]byte("openssh-key-v1"), 0) + if !bytes.Equal(magic, key[0:len(magic)]) { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(magic):] + + var w struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + } + + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + + if w.KdfName != "none" || w.CipherName != "none" { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` + }{} + + if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { + return nil, err + } + + if pk1.Check1 != pk1.Check2 { + return nil, errors.New("ssh: checkint mismatch") + } + + // we only handle ed25519 and rsa keys currently + switch pk1.Keytype { + case KeyAlgoRSA: + // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 + key := struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + for i, b := range key.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + pk := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: key.N, + E: int(key.E.Int64()), + }, + D: key.D, + Primes: []*big.Int{key.P, key.Q}, + } + + if err := pk.Validate(); err != nil { + return nil, err + } + + pk.Precompute() + + return pk, nil + case KeyAlgoED25519: + key := struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if len(key.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + for i, b := range key.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, key.Priv) + return &pk, nil + default: + return nil, errors.New("ssh: unhandled key type") + } +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/vendor/golang.org/x/crypto/ssh/keys_test.go b/vendor/golang.org/x/crypto/ssh/keys_test.go new file mode 100644 index 0000000..9a90abc --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys_test.go @@ -0,0 +1,500 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "fmt" + "reflect" + "strings" + "testing" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh/testdata" +) + +func rawKey(pub PublicKey) interface{} { + switch k := pub.(type) { + case *rsaPublicKey: + return (*rsa.PublicKey)(k) + case *dsaPublicKey: + return (*dsa.PublicKey)(k) + case *ecdsaPublicKey: + return (*ecdsa.PublicKey)(k) + case ed25519PublicKey: + return (ed25519.PublicKey)(k) + case *Certificate: + return k + } + panic("unknown key type") +} + +func TestKeyMarshalParse(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + roundtrip, err := ParsePublicKey(pub.Marshal()) + if err != nil { + t.Errorf("ParsePublicKey(%T): %v", pub, err) + } + + k1 := rawKey(pub) + k2 := rawKey(roundtrip) + + if !reflect.DeepEqual(k1, k2) { + t.Errorf("got %#v in roundtrip, want %#v", k2, k1) + } + } +} + +func TestUnsupportedCurves(t *testing.T) { + raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + + if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err) + } + + if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err) + } +} + +func TestNewPublicKey(t *testing.T) { + for _, k := range testSigners { + raw := rawKey(k.PublicKey()) + // Skip certificates, as NewPublicKey does not support them. + if _, ok := raw.(*Certificate); ok { + continue + } + pub, err := NewPublicKey(raw) + if err != nil { + t.Errorf("NewPublicKey(%#v): %v", raw, err) + } + if !reflect.DeepEqual(k.PublicKey(), pub) { + t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey()) + } + } +} + +func TestKeySignVerify(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + + data := []byte("sign me") + sig, err := priv.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("Sign(%T): %v", priv, err) + } + + if err := pub.Verify(data, sig); err != nil { + t.Errorf("publicKey.Verify(%T): %v", priv, err) + } + sig.Blob[5]++ + if err := pub.Verify(data, sig); err == nil { + t.Errorf("publicKey.Verify on broken sig did not fail") + } + } +} + +func TestParseRSAPrivateKey(t *testing.T) { + key := testPrivateKeys["rsa"] + + rsa, ok := key.(*rsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *rsa.PrivateKey", rsa) + } + + if err := rsa.Validate(); err != nil { + t.Errorf("Validate: %v", err) + } +} + +func TestParseECPrivateKey(t *testing.T) { + key := testPrivateKeys["ecdsa"] + + ecKey, ok := key.(*ecdsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey) + } + + if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) { + t.Fatalf("public key does not validate.") + } +} + +// See Issue https://github.com/golang/go/issues/6650. +func TestParseEncryptedPrivateKeysFails(t *testing.T) { + const wantSubstring = "encrypted" + for i, tt := range testdata.PEMEncryptedKeys { + _, err := ParsePrivateKey(tt.PEMBytes) + if err == nil { + t.Errorf("#%d key %s: ParsePrivateKey successfully parsed, expected an error", i, tt.Name) + continue + } + + if !strings.Contains(err.Error(), wantSubstring) { + t.Errorf("#%d key %s: got error %q, want substring %q", i, tt.Name, err, wantSubstring) + } + } +} + +// Parse encrypted private keys with passphrase +func TestParseEncryptedPrivateKeysWithPassphrase(t *testing.T) { + data := []byte("sign me") + for _, tt := range testdata.PEMEncryptedKeys { + s, err := ParsePrivateKeyWithPassphrase(tt.PEMBytes, []byte(tt.EncryptionKey)) + if err != nil { + t.Fatalf("ParsePrivateKeyWithPassphrase returned error: %s", err) + continue + } + sig, err := s.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("dsa.Sign: %v", err) + } + if err := s.PublicKey().Verify(data, sig); err != nil { + t.Errorf("Verify failed: %v", err) + } + } + + tt := testdata.PEMEncryptedKeys[0] + _, err := ParsePrivateKeyWithPassphrase(tt.PEMBytes, []byte("incorrect")) + if err != x509.IncorrectPasswordError { + t.Fatalf("got %v want IncorrectPasswordError", err) + } +} + +func TestParseDSA(t *testing.T) { + // We actually exercise the ParsePrivateKey codepath here, as opposed to + // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go + // uses. + s, err := ParsePrivateKey(testdata.PEMBytes["dsa"]) + if err != nil { + t.Fatalf("ParsePrivateKey returned error: %s", err) + } + + data := []byte("sign me") + sig, err := s.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("dsa.Sign: %v", err) + } + + if err := s.PublicKey().Verify(data, sig); err != nil { + t.Errorf("Verify failed: %v", err) + } +} + +// Tests for authorized_keys parsing. + +// getTestKey returns a public key, and its base64 encoding. +func getTestKey() (PublicKey, string) { + k := testPublicKeys["rsa"] + + b := &bytes.Buffer{} + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(k.Marshal()) + e.Close() + + return k, b.String() +} + +func TestMarshalParsePublicKey(t *testing.T) { + pub, pubSerialized := getTestKey() + line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized) + + authKeys := MarshalAuthorizedKey(pub) + actualFields := strings.Fields(string(authKeys)) + if len(actualFields) == 0 { + t.Fatalf("failed authKeys: %v", authKeys) + } + + // drop the comment + expectedFields := strings.Fields(line)[0:2] + + if !reflect.DeepEqual(actualFields, expectedFields) { + t.Errorf("got %v, expected %v", actualFields, expectedFields) + } + + actPub, _, _, _, err := ParseAuthorizedKey([]byte(line)) + if err != nil { + t.Fatalf("cannot parse %v: %v", line, err) + } + if !reflect.DeepEqual(actPub, pub) { + t.Errorf("got %v, expected %v", actPub, pub) + } +} + +type testAuthResult struct { + pubKey PublicKey + options []string + comments string + rest string + ok bool +} + +func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []testAuthResult) { + rest := authKeys + var values []testAuthResult + for len(rest) > 0 { + var r testAuthResult + var err error + r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest) + r.ok = (err == nil) + t.Log(err) + r.rest = string(rest) + values = append(values, r) + } + + if !reflect.DeepEqual(values, expected) { + t.Errorf("got %#v, expected %#v", values, expected) + } +} + +func TestAuthorizedKeyBasic(t *testing.T) { + pub, pubSerialized := getTestKey() + line := "ssh-rsa " + pubSerialized + " user@host" + testAuthorizedKeys(t, []byte(line), + []testAuthResult{ + {pub, nil, "user@host", "", true}, + }) +} + +func TestAuth(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithOptions := []string{ + `# comments to ignore before any keys...`, + ``, + `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`, + `# comments to ignore, along with a blank line`, + ``, + `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`, + ``, + `# more comments, plus a invalid entry`, + `ssh-rsa data-that-will-not-parse user@host3`, + } + for _, eol := range []string{"\n", "\r\n"} { + authOptions := strings.Join(authWithOptions, eol) + rest2 := strings.Join(authWithOptions[3:], eol) + rest3 := strings.Join(authWithOptions[6:], eol) + testAuthorizedKeys(t, []byte(authOptions), []testAuthResult{ + {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true}, + {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true}, + {nil, nil, "", "", false}, + }) + } +} + +func TestAuthWithQuotedSpaceInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []testAuthResult{ + {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedCommaInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []testAuthResult{ + {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedQuoteInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`) + authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []testAuthResult{ + {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) + + testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []testAuthResult{ + {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true}, + }) +} + +func TestAuthWithInvalidSpace(t *testing.T) { + _, pubSerialized := getTestKey() + authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +#more to follow but still no valid keys`) + testAuthorizedKeys(t, []byte(authWithInvalidSpace), []testAuthResult{ + {nil, nil, "", "", false}, + }) +} + +func TestAuthWithMissingQuote(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`) + + testAuthorizedKeys(t, []byte(authWithMissingQuote), []testAuthResult{ + {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true}, + }) +} + +func TestInvalidEntry(t *testing.T) { + authInvalid := []byte(`ssh-rsa`) + _, _, _, _, err := ParseAuthorizedKey(authInvalid) + if err == nil { + t.Errorf("got valid entry for %q", authInvalid) + } +} + +var knownHostsParseTests = []struct { + input string + err string + + marker string + comment string + hosts []string + rest string +}{ + { + "", + "EOF", + + "", "", nil, "", + }, + { + "# Just a comment", + "EOF", + + "", "", nil, "", + }, + { + " \t ", + "EOF", + + "", "", nil, "", + }, + { + "localhost ssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line", + "", + + "", "comment comment", []string{"localhost"}, "next line", + }, + { + "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost", "[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}", + "", + + "marker", "", []string{"localhost", "[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd", + "short read", + + "", "", nil, "", + }, +} + +func TestKnownHostsParsing(t *testing.T) { + rsaPub, rsaPubSerialized := getTestKey() + + for i, test := range knownHostsParseTests { + var expectedKey PublicKey + const rsaKeyToken = "{RSAPUB}" + + input := test.input + if strings.Contains(input, rsaKeyToken) { + expectedKey = rsaPub + input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1) + } + + marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input)) + if err != nil { + if len(test.err) == 0 { + t.Errorf("#%d: unexpectedly failed with %q", i, err) + } else if !strings.Contains(err.Error(), test.err) { + t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err) + } + continue + } else if len(test.err) != 0 { + t.Errorf("#%d: succeeded but expected error including %q", i, test.err) + continue + } + + if !reflect.DeepEqual(expectedKey, pubKey) { + t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey) + } + + if marker != test.marker { + t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker) + } + + if comment != test.comment { + t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment) + } + + if !reflect.DeepEqual(test.hosts, hosts) { + t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts) + } + + if rest := string(rest); rest != test.rest { + t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest) + } + } +} + +func TestFingerprintLegacyMD5(t *testing.T) { + pub, _ := getTestKey() + fingerprint := FingerprintLegacyMD5(pub) + want := "fb:61:6d:1a:e3:f0:95:45:3c:a0:79:be:4a:93:63:66" // ssh-keygen -lf -E md5 rsa + if fingerprint != want { + t.Errorf("got fingerprint %q want %q", fingerprint, want) + } +} + +func TestFingerprintSHA256(t *testing.T) { + pub, _ := getTestKey() + fingerprint := FingerprintSHA256(pub) + want := "SHA256:Anr3LjZK8YVpjrxu79myrW9Hrb/wpcMNpVvTq/RcBm8" // ssh-keygen -lf rsa + if fingerprint != want { + t.Errorf("got fingerprint %q want %q", fingerprint, want) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go new file mode 100644 index 0000000..46dad14 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go @@ -0,0 +1,546 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package knownhosts implements a parser for the OpenSSH +// known_hosts host key database. +package knownhosts + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "os" + "strings" + + "golang.org/x/crypto/ssh" +) + +// See the sshd manpage +// (http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT) for +// background. + +type addr struct{ host, port string } + +func (a *addr) String() string { + h := a.host + if strings.Contains(h, ":") { + h = "[" + h + "]" + } + return h + ":" + a.port +} + +type matcher interface { + match([]addr) bool +} + +type hostPattern struct { + negate bool + addr addr +} + +func (p *hostPattern) String() string { + n := "" + if p.negate { + n = "!" + } + + return n + p.addr.String() +} + +type hostPatterns []hostPattern + +func (ps hostPatterns) match(addrs []addr) bool { + matched := false + for _, p := range ps { + for _, a := range addrs { + m := p.match(a) + if !m { + continue + } + if p.negate { + return false + } + matched = true + } + } + return matched +} + +// See +// https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/addrmatch.c +// The matching of * has no regard for separators, unlike filesystem globs +func wildcardMatch(pat []byte, str []byte) bool { + for { + if len(pat) == 0 { + return len(str) == 0 + } + if len(str) == 0 { + return false + } + + if pat[0] == '*' { + if len(pat) == 1 { + return true + } + + for j := range str { + if wildcardMatch(pat[1:], str[j:]) { + return true + } + } + return false + } + + if pat[0] == '?' || pat[0] == str[0] { + pat = pat[1:] + str = str[1:] + } else { + return false + } + } +} + +func (p *hostPattern) match(a addr) bool { + return wildcardMatch([]byte(p.addr.host), []byte(a.host)) && p.addr.port == a.port +} + +type keyDBLine struct { + cert bool + matcher matcher + knownKey KnownKey +} + +func serialize(k ssh.PublicKey) string { + return k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal()) +} + +func (l *keyDBLine) match(addrs []addr) bool { + return l.matcher.match(addrs) +} + +type hostKeyDB struct { + // Serialized version of revoked keys + revoked map[string]*KnownKey + lines []keyDBLine +} + +func newHostKeyDB() *hostKeyDB { + db := &hostKeyDB{ + revoked: make(map[string]*KnownKey), + } + + return db +} + +func keyEq(a, b ssh.PublicKey) bool { + return bytes.Equal(a.Marshal(), b.Marshal()) +} + +// IsAuthorityForHost can be used as a callback in ssh.CertChecker +func (db *hostKeyDB) IsHostAuthority(remote ssh.PublicKey, address string) bool { + h, p, err := net.SplitHostPort(address) + if err != nil { + return false + } + a := addr{host: h, port: p} + + for _, l := range db.lines { + if l.cert && keyEq(l.knownKey.Key, remote) && l.match([]addr{a}) { + return true + } + } + return false +} + +// IsRevoked can be used as a callback in ssh.CertChecker +func (db *hostKeyDB) IsRevoked(key *ssh.Certificate) bool { + _, ok := db.revoked[string(key.Marshal())] + return ok +} + +const markerCert = "@cert-authority" +const markerRevoked = "@revoked" + +func nextWord(line []byte) (string, []byte) { + i := bytes.IndexAny(line, "\t ") + if i == -1 { + return string(line), nil + } + + return string(line[:i]), bytes.TrimSpace(line[i:]) +} + +func parseLine(line []byte) (marker, host string, key ssh.PublicKey, err error) { + if w, next := nextWord(line); w == markerCert || w == markerRevoked { + marker = w + line = next + } + + host, line = nextWord(line) + if len(line) == 0 { + return "", "", nil, errors.New("knownhosts: missing host pattern") + } + + // ignore the keytype as it's in the key blob anyway. + _, line = nextWord(line) + if len(line) == 0 { + return "", "", nil, errors.New("knownhosts: missing key type pattern") + } + + keyBlob, _ := nextWord(line) + + keyBytes, err := base64.StdEncoding.DecodeString(keyBlob) + if err != nil { + return "", "", nil, err + } + key, err = ssh.ParsePublicKey(keyBytes) + if err != nil { + return "", "", nil, err + } + + return marker, host, key, nil +} + +func (db *hostKeyDB) parseLine(line []byte, filename string, linenum int) error { + marker, pattern, key, err := parseLine(line) + if err != nil { + return err + } + + if marker == markerRevoked { + db.revoked[string(key.Marshal())] = &KnownKey{ + Key: key, + Filename: filename, + Line: linenum, + } + + return nil + } + + entry := keyDBLine{ + cert: marker == markerCert, + knownKey: KnownKey{ + Filename: filename, + Line: linenum, + Key: key, + }, + } + + if pattern[0] == '|' { + entry.matcher, err = newHashedHost(pattern) + } else { + entry.matcher, err = newHostnameMatcher(pattern) + } + + if err != nil { + return err + } + + db.lines = append(db.lines, entry) + return nil +} + +func newHostnameMatcher(pattern string) (matcher, error) { + var hps hostPatterns + for _, p := range strings.Split(pattern, ",") { + if len(p) == 0 { + continue + } + + var a addr + var negate bool + if p[0] == '!' { + negate = true + p = p[1:] + } + + if len(p) == 0 { + return nil, errors.New("knownhosts: negation without following hostname") + } + + var err error + if p[0] == '[' { + a.host, a.port, err = net.SplitHostPort(p) + if err != nil { + return nil, err + } + } else { + a.host, a.port, err = net.SplitHostPort(p) + if err != nil { + a.host = p + a.port = "22" + } + } + hps = append(hps, hostPattern{ + negate: negate, + addr: a, + }) + } + return hps, nil +} + +// KnownKey represents a key declared in a known_hosts file. +type KnownKey struct { + Key ssh.PublicKey + Filename string + Line int +} + +func (k *KnownKey) String() string { + return fmt.Sprintf("%s:%d: %s", k.Filename, k.Line, serialize(k.Key)) +} + +// KeyError is returned if we did not find the key in the host key +// database, or there was a mismatch. Typically, in batch +// applications, this should be interpreted as failure. Interactive +// applications can offer an interactive prompt to the user. +type KeyError struct { + // Want holds the accepted host keys. For each key algorithm, + // there can be one hostkey. If Want is empty, the host is + // unknown. If Want is non-empty, there was a mismatch, which + // can signify a MITM attack. + Want []KnownKey +} + +func (u *KeyError) Error() string { + if len(u.Want) == 0 { + return "knownhosts: key is unknown" + } + return "knownhosts: key mismatch" +} + +// RevokedError is returned if we found a key that was revoked. +type RevokedError struct { + Revoked KnownKey +} + +func (r *RevokedError) Error() string { + return "knownhosts: key is revoked" +} + +// check checks a key against the host database. This should not be +// used for verifying certificates. +func (db *hostKeyDB) check(address string, remote net.Addr, remoteKey ssh.PublicKey) error { + if revoked := db.revoked[string(remoteKey.Marshal())]; revoked != nil { + return &RevokedError{Revoked: *revoked} + } + + host, port, err := net.SplitHostPort(remote.String()) + if err != nil { + return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", remote, err) + } + + addrs := []addr{ + {host, port}, + } + + if address != "" { + host, port, err := net.SplitHostPort(address) + if err != nil { + return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", address, err) + } + + addrs = append(addrs, addr{host, port}) + } + + return db.checkAddrs(addrs, remoteKey) +} + +// checkAddrs checks if we can find the given public key for any of +// the given addresses. If we only find an entry for the IP address, +// or only the hostname, then this still succeeds. +func (db *hostKeyDB) checkAddrs(addrs []addr, remoteKey ssh.PublicKey) error { + // TODO(hanwen): are these the right semantics? What if there + // is just a key for the IP address, but not for the + // hostname? + + // Algorithm => key. + knownKeys := map[string]KnownKey{} + for _, l := range db.lines { + if l.match(addrs) { + typ := l.knownKey.Key.Type() + if _, ok := knownKeys[typ]; !ok { + knownKeys[typ] = l.knownKey + } + } + } + + keyErr := &KeyError{} + for _, v := range knownKeys { + keyErr.Want = append(keyErr.Want, v) + } + + // Unknown remote host. + if len(knownKeys) == 0 { + return keyErr + } + + // If the remote host starts using a different, unknown key type, we + // also interpret that as a mismatch. + if known, ok := knownKeys[remoteKey.Type()]; !ok || !keyEq(known.Key, remoteKey) { + return keyErr + } + + return nil +} + +// The Read function parses file contents. +func (db *hostKeyDB) Read(r io.Reader, filename string) error { + scanner := bufio.NewScanner(r) + + lineNum := 0 + for scanner.Scan() { + lineNum++ + line := scanner.Bytes() + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '#' { + continue + } + + if err := db.parseLine(line, filename, lineNum); err != nil { + return fmt.Errorf("knownhosts: %s:%d: %v", filename, lineNum, err) + } + } + return scanner.Err() +} + +// New creates a host key callback from the given OpenSSH host key +// files. The returned callback is for use in +// ssh.ClientConfig.HostKeyCallback. +func New(files ...string) (ssh.HostKeyCallback, error) { + db := newHostKeyDB() + for _, fn := range files { + f, err := os.Open(fn) + if err != nil { + return nil, err + } + defer f.Close() + if err := db.Read(f, fn); err != nil { + return nil, err + } + } + + var certChecker ssh.CertChecker + certChecker.IsHostAuthority = db.IsHostAuthority + certChecker.IsRevoked = db.IsRevoked + certChecker.HostKeyFallback = db.check + + return certChecker.CheckHostKey, nil +} + +// Normalize normalizes an address into the form used in known_hosts +func Normalize(address string) string { + host, port, err := net.SplitHostPort(address) + if err != nil { + host = address + port = "22" + } + entry := host + if port != "22" { + entry = "[" + entry + "]:" + port + } else if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") { + entry = "[" + entry + "]" + } + return entry +} + +// Line returns a line to add append to the known_hosts files. +func Line(addresses []string, key ssh.PublicKey) string { + var trimmed []string + for _, a := range addresses { + trimmed = append(trimmed, Normalize(a)) + } + + return strings.Join(trimmed, ",") + " " + serialize(key) +} + +// HashHostname hashes the given hostname. The hostname is not +// normalized before hashing. +func HashHostname(hostname string) string { + // TODO(hanwen): check if we can safely normalize this always. + salt := make([]byte, sha1.Size) + + _, err := rand.Read(salt) + if err != nil { + panic(fmt.Sprintf("crypto/rand failure %v", err)) + } + + hash := hashHost(hostname, salt) + return encodeHash(sha1HashType, salt, hash) +} + +func decodeHash(encoded string) (hashType string, salt, hash []byte, err error) { + if len(encoded) == 0 || encoded[0] != '|' { + err = errors.New("knownhosts: hashed host must start with '|'") + return + } + components := strings.Split(encoded, "|") + if len(components) != 4 { + err = fmt.Errorf("knownhosts: got %d components, want 3", len(components)) + return + } + + hashType = components[1] + if salt, err = base64.StdEncoding.DecodeString(components[2]); err != nil { + return + } + if hash, err = base64.StdEncoding.DecodeString(components[3]); err != nil { + return + } + return +} + +func encodeHash(typ string, salt []byte, hash []byte) string { + return strings.Join([]string{"", + typ, + base64.StdEncoding.EncodeToString(salt), + base64.StdEncoding.EncodeToString(hash), + }, "|") +} + +// See https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120 +func hashHost(hostname string, salt []byte) []byte { + mac := hmac.New(sha1.New, salt) + mac.Write([]byte(hostname)) + return mac.Sum(nil) +} + +type hashedHost struct { + salt []byte + hash []byte +} + +const sha1HashType = "1" + +func newHashedHost(encoded string) (*hashedHost, error) { + typ, salt, hash, err := decodeHash(encoded) + if err != nil { + return nil, err + } + + // The type field seems for future algorithm agility, but it's + // actually hardcoded in openssh currently, see + // https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120 + if typ != sha1HashType { + return nil, fmt.Errorf("knownhosts: got hash type %s, must be '1'", typ) + } + + return &hashedHost{salt: salt, hash: hash}, nil +} + +func (h *hashedHost) match(addrs []addr) bool { + for _, a := range addrs { + if bytes.Equal(hashHost(Normalize(a.String()), h.salt), h.hash) { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go new file mode 100644 index 0000000..be7cc0e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go @@ -0,0 +1,329 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package knownhosts + +import ( + "bytes" + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/crypto/ssh" +) + +const edKeyStr = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGBAarftlLeoyf+v+nVchEZII/vna2PCV8FaX4vsF5BX" +const alternateEdKeyStr = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIXffBYeYL+WVzVru8npl5JHt2cjlr4ornFTWzoij9sx" +const ecKeyStr = "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNLCu01+wpXe3xB5olXCN4SqU2rQu0qjSRKJO4Bg+JRCPU+ENcgdA5srTU8xYDz/GEa4dzK5ldPw4J/gZgSXCMs=" + +var ecKey, alternateEdKey, edKey ssh.PublicKey +var testAddr = &net.TCPAddr{ + IP: net.IP{198, 41, 30, 196}, + Port: 22, +} + +var testAddr6 = &net.TCPAddr{ + IP: net.IP{198, 41, 30, 196, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + }, + Port: 22, +} + +func init() { + var err error + ecKey, _, _, _, err = ssh.ParseAuthorizedKey([]byte(ecKeyStr)) + if err != nil { + panic(err) + } + edKey, _, _, _, err = ssh.ParseAuthorizedKey([]byte(edKeyStr)) + if err != nil { + panic(err) + } + alternateEdKey, _, _, _, err = ssh.ParseAuthorizedKey([]byte(alternateEdKeyStr)) + if err != nil { + panic(err) + } +} + +func testDB(t *testing.T, s string) *hostKeyDB { + db := newHostKeyDB() + if err := db.Read(bytes.NewBufferString(s), "testdb"); err != nil { + t.Fatalf("Read: %v", err) + } + + return db +} + +func TestRevoked(t *testing.T) { + db := testDB(t, "\n\n@revoked * "+edKeyStr+"\n") + want := &RevokedError{ + Revoked: KnownKey{ + Key: edKey, + Filename: "testdb", + Line: 3, + }, + } + if err := db.check("", &net.TCPAddr{ + Port: 42, + }, edKey); err == nil { + t.Fatal("no error for revoked key") + } else if !reflect.DeepEqual(want, err) { + t.Fatalf("got %#v, want %#v", want, err) + } +} + +func TestHostAuthority(t *testing.T) { + for _, m := range []struct { + authorityFor string + address string + + good bool + }{ + {authorityFor: "localhost", address: "localhost:22", good: true}, + {authorityFor: "localhost", address: "localhost", good: false}, + {authorityFor: "localhost", address: "localhost:1234", good: false}, + {authorityFor: "[localhost]:1234", address: "localhost:1234", good: true}, + {authorityFor: "[localhost]:1234", address: "localhost:22", good: false}, + {authorityFor: "[localhost]:1234", address: "localhost", good: false}, + } { + db := testDB(t, `@cert-authority `+m.authorityFor+` `+edKeyStr) + if ok := db.IsHostAuthority(db.lines[0].knownKey.Key, m.address); ok != m.good { + t.Errorf("IsHostAuthority: authority %s, address %s, wanted good = %v, got good = %v", + m.authorityFor, m.address, m.good, ok) + } + } +} + +func TestBracket(t *testing.T) { + db := testDB(t, `[git.eclipse.org]:29418,[198.41.30.196]:29418 `+edKeyStr) + + if err := db.check("git.eclipse.org:29418", &net.TCPAddr{ + IP: net.IP{198, 41, 30, 196}, + Port: 29418, + }, edKey); err != nil { + t.Errorf("got error %v, want none", err) + } + + if err := db.check("git.eclipse.org:29419", &net.TCPAddr{ + Port: 42, + }, edKey); err == nil { + t.Fatalf("no error for unknown address") + } else if ke, ok := err.(*KeyError); !ok { + t.Fatalf("got type %T, want *KeyError", err) + } else if len(ke.Want) > 0 { + t.Fatalf("got Want %v, want []", ke.Want) + } +} + +func TestNewKeyType(t *testing.T) { + str := fmt.Sprintf("%s %s", testAddr, edKeyStr) + db := testDB(t, str) + if err := db.check("", testAddr, ecKey); err == nil { + t.Fatalf("no error for unknown address") + } else if ke, ok := err.(*KeyError); !ok { + t.Fatalf("got type %T, want *KeyError", err) + } else if len(ke.Want) == 0 { + t.Fatalf("got empty KeyError.Want") + } +} + +func TestSameKeyType(t *testing.T) { + str := fmt.Sprintf("%s %s", testAddr, edKeyStr) + db := testDB(t, str) + if err := db.check("", testAddr, alternateEdKey); err == nil { + t.Fatalf("no error for unknown address") + } else if ke, ok := err.(*KeyError); !ok { + t.Fatalf("got type %T, want *KeyError", err) + } else if len(ke.Want) == 0 { + t.Fatalf("got empty KeyError.Want") + } else if got, want := ke.Want[0].Key.Marshal(), edKey.Marshal(); !bytes.Equal(got, want) { + t.Fatalf("got key %q, want %q", got, want) + } +} + +func TestIPAddress(t *testing.T) { + str := fmt.Sprintf("%s %s", testAddr, edKeyStr) + db := testDB(t, str) + if err := db.check("", testAddr, edKey); err != nil { + t.Errorf("got error %q, want none", err) + } +} + +func TestIPv6Address(t *testing.T) { + str := fmt.Sprintf("%s %s", testAddr6, edKeyStr) + db := testDB(t, str) + + if err := db.check("", testAddr6, edKey); err != nil { + t.Errorf("got error %q, want none", err) + } +} + +func TestBasic(t *testing.T) { + str := fmt.Sprintf("#comment\n\nserver.org,%s %s\notherhost %s", testAddr, edKeyStr, ecKeyStr) + db := testDB(t, str) + if err := db.check("server.org:22", testAddr, edKey); err != nil { + t.Errorf("got error %q, want none", err) + } + + want := KnownKey{ + Key: edKey, + Filename: "testdb", + Line: 3, + } + if err := db.check("server.org:22", testAddr, ecKey); err == nil { + t.Errorf("succeeded, want KeyError") + } else if ke, ok := err.(*KeyError); !ok { + t.Errorf("got %T, want *KeyError", err) + } else if len(ke.Want) != 1 { + t.Errorf("got %v, want 1 entry", ke) + } else if !reflect.DeepEqual(ke.Want[0], want) { + t.Errorf("got %v, want %v", ke.Want[0], want) + } +} + +func TestNegate(t *testing.T) { + str := fmt.Sprintf("%s,!server.org %s", testAddr, edKeyStr) + db := testDB(t, str) + if err := db.check("server.org:22", testAddr, ecKey); err == nil { + t.Errorf("succeeded") + } else if ke, ok := err.(*KeyError); !ok { + t.Errorf("got error type %T, want *KeyError", err) + } else if len(ke.Want) != 0 { + t.Errorf("got expected keys %d (first of type %s), want []", len(ke.Want), ke.Want[0].Key.Type()) + } +} + +func TestWildcard(t *testing.T) { + str := fmt.Sprintf("server*.domain %s", edKeyStr) + db := testDB(t, str) + + want := &KeyError{ + Want: []KnownKey{{ + Filename: "testdb", + Line: 1, + Key: edKey, + }}, + } + + got := db.check("server.domain:22", &net.TCPAddr{}, ecKey) + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s, want %s", got, want) + } +} + +func TestLine(t *testing.T) { + for in, want := range map[string]string{ + "server.org": "server.org " + edKeyStr, + "server.org:22": "server.org " + edKeyStr, + "server.org:23": "[server.org]:23 " + edKeyStr, + "[c629:1ec4:102:304:102:304:102:304]:22": "[c629:1ec4:102:304:102:304:102:304] " + edKeyStr, + "[c629:1ec4:102:304:102:304:102:304]:23": "[c629:1ec4:102:304:102:304:102:304]:23 " + edKeyStr, + } { + if got := Line([]string{in}, edKey); got != want { + t.Errorf("Line(%q) = %q, want %q", in, got, want) + } + } +} + +func TestWildcardMatch(t *testing.T) { + for _, c := range []struct { + pat, str string + want bool + }{ + {"a?b", "abb", true}, + {"ab", "abc", false}, + {"abc", "ab", false}, + {"a*b", "axxxb", true}, + {"a*b", "axbxb", true}, + {"a*b", "axbxbc", false}, + {"a*?", "axbxc", true}, + {"a*b*", "axxbxxxxxx", true}, + {"a*b*c", "axxbxxxxxxc", true}, + {"a*b*?", "axxbxxxxxxc", true}, + {"a*b*z", "axxbxxbxxxz", true}, + {"a*b*z", "axxbxxzxxxz", true}, + {"a*b*z", "axxbxxzxxx", false}, + } { + got := wildcardMatch([]byte(c.pat), []byte(c.str)) + if got != c.want { + t.Errorf("wildcardMatch(%q, %q) = %v, want %v", c.pat, c.str, got, c.want) + } + + } +} + +// TODO(hanwen): test coverage for certificates. + +const testHostname = "hostname" + +// generated with keygen -H -f +const encodedTestHostnameHash = "|1|IHXZvQMvTcZTUU29+2vXFgx8Frs=|UGccIWfRVDwilMBnA3WJoRAC75Y=" + +func TestHostHash(t *testing.T) { + testHostHash(t, testHostname, encodedTestHostnameHash) +} + +func TestHashList(t *testing.T) { + encoded := HashHostname(testHostname) + testHostHash(t, testHostname, encoded) +} + +func testHostHash(t *testing.T, hostname, encoded string) { + typ, salt, hash, err := decodeHash(encoded) + if err != nil { + t.Fatalf("decodeHash: %v", err) + } + + if got := encodeHash(typ, salt, hash); got != encoded { + t.Errorf("got encoding %s want %s", got, encoded) + } + + if typ != sha1HashType { + t.Fatalf("got hash type %q, want %q", typ, sha1HashType) + } + + got := hashHost(hostname, salt) + if !bytes.Equal(got, hash) { + t.Errorf("got hash %x want %x", got, hash) + } +} + +func TestNormalize(t *testing.T) { + for in, want := range map[string]string{ + "127.0.0.1:22": "127.0.0.1", + "[127.0.0.1]:22": "127.0.0.1", + "[127.0.0.1]:23": "[127.0.0.1]:23", + "127.0.0.1:23": "[127.0.0.1]:23", + "[a.b.c]:22": "a.b.c", + "[abcd:abcd:abcd:abcd]": "[abcd:abcd:abcd:abcd]", + "[abcd:abcd:abcd:abcd]:22": "[abcd:abcd:abcd:abcd]", + "[abcd:abcd:abcd:abcd]:23": "[abcd:abcd:abcd:abcd]:23", + } { + got := Normalize(in) + if got != want { + t.Errorf("Normalize(%q) = %q, want %q", in, got, want) + } + } +} + +func TestHashedHostkeyCheck(t *testing.T) { + str := fmt.Sprintf("%s %s", HashHostname(testHostname), edKeyStr) + db := testDB(t, str) + if err := db.check(testHostname+":22", testAddr, edKey); err != nil { + t.Errorf("check(%s): %v", testHostname, err) + } + want := &KeyError{ + Want: []KnownKey{{ + Filename: "testdb", + Line: 1, + Key: edKey, + }}, + } + if got := db.check(testHostname+":22", testAddr, alternateEdKey); !reflect.DeepEqual(got, want) { + t.Errorf("got error %v, want %v", got, want) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 0000000..c07a062 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/mempipe_test.go b/vendor/golang.org/x/crypto/ssh/mempipe_test.go new file mode 100644 index 0000000..8697cd6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mempipe_test.go @@ -0,0 +1,110 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" + "testing" +) + +// An in-memory packetConn. It is safe to call Close and writePacket +// from different goroutines. +type memTransport struct { + eof bool + pending [][]byte + write *memTransport + sync.Mutex + *sync.Cond +} + +func (t *memTransport) readPacket() ([]byte, error) { + t.Lock() + defer t.Unlock() + for { + if len(t.pending) > 0 { + r := t.pending[0] + t.pending = t.pending[1:] + return r, nil + } + if t.eof { + return nil, io.EOF + } + t.Cond.Wait() + } +} + +func (t *memTransport) closeSelf() error { + t.Lock() + defer t.Unlock() + if t.eof { + return io.EOF + } + t.eof = true + t.Cond.Broadcast() + return nil +} + +func (t *memTransport) Close() error { + err := t.write.closeSelf() + t.closeSelf() + return err +} + +func (t *memTransport) writePacket(p []byte) error { + t.write.Lock() + defer t.write.Unlock() + if t.write.eof { + return io.EOF + } + c := make([]byte, len(p)) + copy(c, p) + t.write.pending = append(t.write.pending, c) + t.write.Cond.Signal() + return nil +} + +func memPipe() (a, b packetConn) { + t1 := memTransport{} + t2 := memTransport{} + t1.write = &t2 + t2.write = &t1 + t1.Cond = sync.NewCond(&t1.Mutex) + t2.Cond = sync.NewCond(&t2.Mutex) + return &t1, &t2 +} + +func TestMemPipe(t *testing.T) { + a, b := memPipe() + if err := a.writePacket([]byte{42}); err != nil { + t.Fatalf("writePacket: %v", err) + } + if err := a.Close(); err != nil { + t.Fatal("Close: ", err) + } + p, err := b.readPacket() + if err != nil { + t.Fatal("readPacket: ", err) + } + if len(p) != 1 || p[0] != 42 { + t.Fatalf("got %v, want {42}", p) + } + p, err = b.readPacket() + if err != io.EOF { + t.Fatalf("got %v, %v, want EOF", p, err) + } +} + +func TestDoubleClose(t *testing.T) { + a, _ := memPipe() + err := a.Close() + if err != nil { + t.Errorf("Close: %v", err) + } + err = a.Close() + if err != io.EOF { + t.Errorf("expect EOF on double close.") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 0000000..08d2811 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,766 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Helman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4252, section 5.1 +const msgUserAuthSuccess = 52 + +// See RFC 4252, section 5.4 +const msgUserAuthBanner = 53 + +type userAuthBannerMsg struct { + Message string `sshtype:"53"` + // unused, but required to allow message parsing + Language string +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + User string `sshtype:"60"` + Instruction string + DeprecatedLanguage string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersID uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersID uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersID uint32 `sshtype:"91"` + MyID uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersID uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersID uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersID uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersID uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersID uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersID uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersID uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/messages_test.go b/vendor/golang.org/x/crypto/ssh/messages_test.go new file mode 100644 index 0000000..e790764 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages_test.go @@ -0,0 +1,288 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "math/big" + "math/rand" + "reflect" + "testing" + "testing/quick" +) + +var intLengthTests = []struct { + val, length int +}{ + {0, 4 + 0}, + {1, 4 + 1}, + {127, 4 + 1}, + {128, 4 + 2}, + {-1, 4 + 1}, +} + +func TestIntLength(t *testing.T) { + for _, test := range intLengthTests { + v := new(big.Int).SetInt64(int64(test.val)) + length := intLength(v) + if length != test.length { + t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length) + } + } +} + +type msgAllTypes struct { + Bool bool `sshtype:"21"` + Array [16]byte + Uint64 uint64 + Uint32 uint32 + Uint8 uint8 + String string + Strings []string + Bytes []byte + Int *big.Int + Rest []byte `ssh:"rest"` +} + +func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value { + m := &msgAllTypes{} + m.Bool = rand.Intn(2) == 1 + randomBytes(m.Array[:], rand) + m.Uint64 = uint64(rand.Int63n(1<<63 - 1)) + m.Uint32 = uint32(rand.Intn((1 << 31) - 1)) + m.Uint8 = uint8(rand.Intn(1 << 8)) + m.String = string(m.Array[:]) + m.Strings = randomNameList(rand) + m.Bytes = m.Array[:] + m.Int = randomInt(rand) + m.Rest = m.Array[:] + return reflect.ValueOf(m) +} + +func TestMarshalUnmarshal(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + iface := &msgAllTypes{} + ty := reflect.ValueOf(iface).Type() + + n := 100 + if testing.Short() { + n = 5 + } + for j := 0; j < n; j++ { + v, ok := quick.Value(ty, rand) + if !ok { + t.Errorf("failed to create value") + break + } + + m1 := v.Elem().Interface() + m2 := iface + + marshaled := Marshal(m1) + if err := Unmarshal(marshaled, m2); err != nil { + t.Errorf("Unmarshal %#v: %s", m1, err) + break + } + + if !reflect.DeepEqual(v.Interface(), m2) { + t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled) + break + } + } +} + +func TestUnmarshalEmptyPacket(t *testing.T) { + var b []byte + var m channelRequestSuccessMsg + if err := Unmarshal(b, &m); err == nil { + t.Fatalf("unmarshal of empty slice succeeded") + } +} + +func TestUnmarshalUnexpectedPacket(t *testing.T) { + type S struct { + I uint32 `sshtype:"43"` + S string + B bool + } + + s := S{11, "hello", true} + packet := Marshal(s) + packet[0] = 42 + roundtrip := S{} + err := Unmarshal(packet, &roundtrip) + if err == nil { + t.Fatal("expected error, not nil") + } +} + +func TestMarshalPtr(t *testing.T) { + s := struct { + S string + }{"hello"} + + m1 := Marshal(s) + m2 := Marshal(&s) + if !bytes.Equal(m1, m2) { + t.Errorf("got %q, want %q for marshaled pointer", m2, m1) + } +} + +func TestBareMarshalUnmarshal(t *testing.T) { + type S struct { + I uint32 + S string + B bool + } + + s := S{42, "hello", true} + packet := Marshal(s) + roundtrip := S{} + Unmarshal(packet, &roundtrip) + + if !reflect.DeepEqual(s, roundtrip) { + t.Errorf("got %#v, want %#v", roundtrip, s) + } +} + +func TestBareMarshal(t *testing.T) { + type S2 struct { + I uint32 + } + s := S2{42} + packet := Marshal(s) + i, rest, ok := parseUint32(packet) + if len(rest) > 0 || !ok { + t.Errorf("parseInt(%q): parse error", packet) + } + if i != s.I { + t.Errorf("got %d, want %d", i, s.I) + } +} + +func TestUnmarshalShortKexInitPacket(t *testing.T) { + // This used to panic. + // Issue 11348 + packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff} + kim := &kexInitMsg{} + if err := Unmarshal(packet, kim); err == nil { + t.Error("truncated packet unmarshaled without error") + } +} + +func TestMarshalMultiTag(t *testing.T) { + var res struct { + A uint32 `sshtype:"1|2"` + } + + good1 := struct { + A uint32 `sshtype:"1"` + }{ + 1, + } + good2 := struct { + A uint32 `sshtype:"2"` + }{ + 1, + } + + if e := Unmarshal(Marshal(good1), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + if e := Unmarshal(Marshal(good2), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + bad1 := struct { + A uint32 `sshtype:"3"` + }{ + 1, + } + if e := Unmarshal(Marshal(bad1), &res); e == nil { + t.Errorf("bad struct unmarshaled without error") + } +} + +func randomBytes(out []byte, rand *rand.Rand) { + for i := 0; i < len(out); i++ { + out[i] = byte(rand.Int31()) + } +} + +func randomNameList(rand *rand.Rand) []string { + ret := make([]string, rand.Int31()&15) + for i := range ret { + s := make([]byte, 1+(rand.Int31()&15)) + for j := range s { + s[j] = 'a' + uint8(rand.Int31()&15) + } + ret[i] = string(s) + } + return ret +} + +func randomInt(rand *rand.Rand) *big.Int { + return new(big.Int).SetInt64(int64(int32(rand.Uint32()))) +} + +func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + ki := &kexInitMsg{} + randomBytes(ki.Cookie[:], rand) + ki.KexAlgos = randomNameList(rand) + ki.ServerHostKeyAlgos = randomNameList(rand) + ki.CiphersClientServer = randomNameList(rand) + ki.CiphersServerClient = randomNameList(rand) + ki.MACsClientServer = randomNameList(rand) + ki.MACsServerClient = randomNameList(rand) + ki.CompressionClientServer = randomNameList(rand) + ki.CompressionServerClient = randomNameList(rand) + ki.LanguagesClientServer = randomNameList(rand) + ki.LanguagesServerClient = randomNameList(rand) + if rand.Int31()&1 == 1 { + ki.FirstKexFollows = true + } + return reflect.ValueOf(ki) +} + +func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + dhi := &kexDHInitMsg{} + dhi.X = randomInt(rand) + return reflect.ValueOf(dhi) +} + +var ( + _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + + _kexInit = Marshal(_kexInitMsg) + _kexDHInit = Marshal(_kexDHInitMsg) +) + +func BenchmarkMarshalKexInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexInitMsg) + } +} + +func BenchmarkUnmarshalKexInitMsg(b *testing.B) { + m := new(kexInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexInit, m) + } +} + +func BenchmarkMarshalKexDHInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexDHInitMsg) + } +} + +func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) { + m := new(kexDHInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexDHInit, m) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 0000000..f190162 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,330 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return fmt.Errorf("ssh: invalid channel %d", id) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersID: msg.PeersID, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersID + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersID: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go new file mode 100644 index 0000000..25d2181 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux_test.go @@ -0,0 +1,505 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "io/ioutil" + "sync" + "testing" +) + +func muxPair() (*mux, *mux) { + a, b := memPipe() + + s := newMux(a) + c := newMux(b) + + return s, c +} + +// Returns both ends of a channel, and the mux for the the 2nd +// channel. +func channelPair(t *testing.T) (*channel, *channel, *mux) { + c, s := muxPair() + + res := make(chan *channel, 1) + go func() { + newCh, ok := <-s.incomingChannels + if !ok { + t.Fatalf("No incoming channel") + } + if newCh.ChannelType() != "chan" { + t.Fatalf("got type %q want chan", newCh.ChannelType()) + } + ch, _, err := newCh.Accept() + if err != nil { + t.Fatalf("Accept %v", err) + } + res <- ch.(*channel) + }() + + ch, err := c.openChannel("chan", nil) + if err != nil { + t.Fatalf("OpenChannel: %v", err) + } + + return <-res, ch, c +} + +// Test that stderr and stdout can be addressed from different +// goroutines. This is intended for use with the race detector. +func TestMuxChannelExtendedThreadSafety(t *testing.T) { + writer, reader, mux := channelPair(t) + defer writer.Close() + defer reader.Close() + defer mux.Close() + + var wr, rd sync.WaitGroup + magic := "hello world" + + wr.Add(2) + go func() { + io.WriteString(writer, magic) + wr.Done() + }() + go func() { + io.WriteString(writer.Stderr(), magic) + wr.Done() + }() + + rd.Add(2) + go func() { + c, err := ioutil.ReadAll(reader) + if string(c) != magic { + t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + go func() { + c, err := ioutil.ReadAll(reader.Stderr()) + if string(c) != magic { + t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + + wr.Wait() + writer.CloseWrite() + rd.Wait() +} + +func TestMuxReadWrite(t *testing.T) { + s, c, mux := channelPair(t) + defer s.Close() + defer c.Close() + defer mux.Close() + + magic := "hello world" + magicExt := "hello stderr" + go func() { + _, err := s.Write([]byte(magic)) + if err != nil { + t.Fatalf("Write: %v", err) + } + _, err = s.Extended(1).Write([]byte(magicExt)) + if err != nil { + t.Fatalf("Write: %v", err) + } + err = s.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + }() + + var buf [1024]byte + n, err := c.Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + got := string(buf[:n]) + if got != magic { + t.Fatalf("server: got %q want %q", got, magic) + } + + n, err = c.Extended(1).Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + + got = string(buf[:n]) + if got != magicExt { + t.Fatalf("server: got %q want %q", got, magic) + } +} + +func TestMuxChannelOverflow(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + writer.Write(make([]byte, 1)) + wDone <- 1 + }() + writer.remoteWin.waitWriterBlocked() + + // Send 1 byte. + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], writer.remoteId) + marshalUint32(packet[5:], uint32(1)) + packet[9] = 42 + + if err := writer.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + if _, err := reader.SendRequest("hello", true, nil); err == nil { + t.Errorf("SendRequest succeeded.") + } + <-wDone +} + +func TestMuxChannelCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + wDone <- 1 + }() + + writer.remoteWin.waitWriterBlocked() + reader.Close() + <-wDone +} + +func TestMuxConnectionCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + wDone <- 1 + }() + + writer.remoteWin.waitWriterBlocked() + mux.Close() + <-wDone +} + +func TestMuxReject(t *testing.T) { + client, server := muxPair() + defer server.Close() + defer client.Close() + + go func() { + ch, ok := <-server.incomingChannels + if !ok { + t.Fatalf("Accept") + } + if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" { + t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData()) + } + ch.Reject(RejectionReason(42), "message") + }() + + ch, err := client.openChannel("ch", []byte("extra")) + if ch != nil { + t.Fatal("openChannel not rejected") + } + + ocf, ok := err.(*OpenChannelError) + if !ok { + t.Errorf("got %#v want *OpenChannelError", err) + } else if ocf.Reason != 42 || ocf.Message != "message" { + t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message") + } + + want := "ssh: rejected: unknown reason 42 (message)" + if err.Error() != want { + t.Errorf("got %q, want %q", err.Error(), want) + } +} + +func TestMuxChannelRequest(t *testing.T) { + client, server, mux := channelPair(t) + defer server.Close() + defer client.Close() + defer mux.Close() + + var received int + var wg sync.WaitGroup + wg.Add(1) + go func() { + for r := range server.incomingRequests { + received++ + r.Reply(r.Type == "yes", nil) + } + wg.Done() + }() + _, err := client.SendRequest("yes", false, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + ok, err := client.SendRequest("yes", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + + if !ok { + t.Errorf("SendRequest(yes): %v", ok) + + } + + ok, err = client.SendRequest("no", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + if ok { + t.Errorf("SendRequest(no): %v", ok) + + } + + client.Close() + wg.Wait() + + if received != 3 { + t.Errorf("got %d requests, want %d", received, 3) + } +} + +func TestMuxGlobalRequest(t *testing.T) { + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + var seen bool + go func() { + for r := range serverMux.incomingRequests { + seen = seen || r.Type == "peek" + if r.WantReply { + err := r.Reply(r.Type == "yes", + append([]byte(r.Type), r.Payload...)) + if err != nil { + t.Errorf("AckRequest: %v", err) + } + } + } + }() + + _, _, err := clientMux.SendRequest("peek", false, nil) + if err != nil { + t.Errorf("SendRequest: %v", err) + } + + ok, data, err := clientMux.SendRequest("yes", true, []byte("a")) + if !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + + if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil { + t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v", + ok, data, err) + } + + if !seen { + t.Errorf("never saw 'peek' request") + } +} + +func TestMuxGlobalRequestUnblock(t *testing.T) { + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + result := make(chan error, 1) + go func() { + _, _, err := clientMux.SendRequest("hello", true, nil) + result <- err + }() + + <-serverMux.incomingRequests + serverMux.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", io.EOF) + } +} + +func TestMuxChannelRequestUnblock(t *testing.T) { + a, b, connB := channelPair(t) + defer a.Close() + defer b.Close() + defer connB.Close() + + result := make(chan error, 1) + go func() { + _, err := a.SendRequest("hello", true, nil) + result <- err + }() + + <-b.incomingRequests + connB.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", err) + } +} + +func TestMuxCloseChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + defer r.Close() + defer w.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.Close(); err != nil { + t.Errorf("w.Close: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after Close", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxCloseWriteChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.CloseWrite(); err != nil { + t.Errorf("w.CloseWrite: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after CloseWrite", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxInvalidRecord(t *testing.T) { + a, b := muxPair() + defer a.Close() + defer b.Close() + + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], 29348723 /* invalid channel id */) + marshalUint32(packet[5:], 1) + packet[9] = 42 + + a.conn.writePacket(packet) + go a.SendRequest("hello", false, nil) + // 'a' wrote an invalid packet, so 'b' has exited. + req, ok := <-b.incomingRequests + if ok { + t.Errorf("got request %#v after receiving invalid packet", req) + } +} + +func TestZeroWindowAdjust(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + go func() { + io.WriteString(a, "hello") + // bogus adjust. + a.sendMessage(windowAdjustMsg{}) + io.WriteString(a, "world") + a.Close() + }() + + want := "helloworld" + c, _ := ioutil.ReadAll(b) + if string(c) != want { + t.Errorf("got %q want %q", c, want) + } +} + +func TestMuxMaxPacketSize(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + large := make([]byte, a.maxRemotePayload+1) + packet := make([]byte, 1+4+4+1+len(large)) + packet[0] = msgChannelData + marshalUint32(packet[1:], a.remoteId) + marshalUint32(packet[5:], uint32(len(large))) + packet[9] = 42 + + if err := a.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + + go a.SendRequest("hello", false, nil) + + _, ok := <-b.incomingRequests + if ok { + t.Errorf("connection still alive after receiving large packet.") + } +} + +// Don't ship code with debug=true. +func TestDebug(t *testing.T) { + if debugMux { + t.Error("mux debug switched on") + } + if debugHandshake { + t.Error("handshake debug switched on") + } + if debugTransport { + t.Error("transport debug switched on") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 0000000..d0f4825 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,593 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a user. +// The Permissions value for a successful authentication attempt is +// available in ServerConn, so it can be used to pass information from +// the user-authentication phase to the application layer. +type Permissions struct { + // CriticalOptions indicate restrictions to the default + // permissions, and are typically used in conjunction with + // user certificates. The standard for SSH certificates + // defines "force-command" (only allow the given command to + // execute) and "source-address" (only allow connections from + // the given address). The SSH package currently only enforces + // the "source-address" critical option. It is up to server + // implementations to enforce other critical options, such as + // "force-command", by checking them after the SSH handshake + // is successful. In general, SSH servers should reject + // connections that specify critical options that are unknown + // or not supported. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Lack of support for an + // extension does not preclude authenticating a user. Common + // extensions are "permit-agent-forwarding", + // "permit-X11-forwarding". The Go SSH library currently does + // not act on any extension, and it is up to server + // implementations to honor them. Extensions can be used to + // pass data from the authentication callbacks to the server + // application layer. + Extensions map[string]string +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + NoClientAuth bool + + // MaxAuthTries specifies the maximum number of authentication attempts + // permitted per connection. If set to a negative number, the number of + // attempts are unlimited. If set to zero, the number of attempts are limited + // to 6. + MaxAuthTries int + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client + // offers a public key for authentication. It must return a nil error + // if the given public key can be used to authenticate the + // given user. For example, see CertChecker.Authenticate. A + // call to this function does not guarantee that the key + // offered is in fact used to authenticate. To record any data + // depending on the public key, store it inside a + // Permissions.Extensions entry. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string + + // BannerCallback, if present, is called and the return string is sent to + // the client after key exchange completed but before authentication. + BannerCallback func(conn ConnMetadata) string +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same algorithm, it is overwritten. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +// +// The returned error may be of type *ServerAuthError for +// authentication errors. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.MaxAuthTries == 0 { + fullConf.MaxAuthTries = 6 + } + + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { + sig, err := k.Sign(rand, data) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func isAcceptableAlgo(algo string) bool { + switch algo { + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + return true + } + return false +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +// ServerAuthError represents server authentication errors and is +// sometimes returned by NewServerConn. It appends any authentication +// errors that may occur, and is returned if all of the authentication +// methods provided by the user failed to authenticate. +type ServerAuthError struct { + // Errors contains authentication errors returned by the authentication + // callback methods. The first entry is typically ErrNoAuth. + Errors []error +} + +func (l ServerAuthError) Error() string { + var errs []string + for _, err := range l.Errors { + errs = append(errs, err.Error()) + } + return "[" + strings.Join(errs, ", ") + "]" +} + +// ErrNoAuth is the error value returned if no +// authentication method has been passed yet. This happens as a normal +// part of the authentication loop, since the client first tries +// 'none' authentication to discover available methods. +// It is returned in ServerAuthError.Errors from NewServerConn. +var ErrNoAuth = errors.New("ssh: no auth passed yet") + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + + authFailures := 0 + var authErrs []error + var displayedBanner bool + +userAuthLoop: + for { + if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { + discMsg := &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + } + + if err := s.transport.writePacket(Marshal(discMsg)); err != nil { + return nil, err + } + + return nil, discMsg + } + + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + if err == io.EOF { + return nil, &ServerAuthError{Errors: authErrs} + } + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + s.user = userAuthReq.User + + if !displayedBanner && config.BannerCallback != nil { + displayedBanner = true + msg := config.BannerCallback(s) + if msg != "" { + bannerMsg := &userAuthBannerMsg{ + Message: msg, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + + perms = nil + authErr := ErrNoAuth + + switch userAuthReq.Method { + case "none": + if config.NoClientAuth { + authErr = nil + } + + // allow initial attempt of 'none' without penalty + if authFailures == 0 { + authFailures-- + } + case "password": + if config.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = config.PasswordCallback(s, password) + case "keyboard-interactive": + if config.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configubred") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if config.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !isAcceptableAlgo(algo) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) + if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + candidate.result = checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + if candidate.result == nil { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !isAcceptableAlgo(sig.Format) { + break + } + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + authErrs = append(authErrs, authErr) + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + if authErr == nil { + break userAuthLoop + } + + authFailures++ + + var failureMsg userAuthFailureMsg + if config.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if config.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if config.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 0000000..d3321f6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,647 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of ioutil.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.7. +type ptyWindowChangeMsg struct { + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 +} + +// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. +func (s *Session) WindowChange(h, w int) error { + req := ptyWindowChangeMsg{ + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + } + _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/vendor/golang.org/x/crypto/ssh/session_test.go b/vendor/golang.org/x/crypto/ssh/session_test.go new file mode 100644 index 0000000..7dce6dd --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session_test.go @@ -0,0 +1,774 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session tests. + +import ( + "bytes" + crypto_rand "crypto/rand" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "testing" + + "golang.org/x/crypto/ssh/terminal" +) + +type serverType func(Channel, <-chan *Request, *testing.T) + +// dial constructs a new test server and returns a *ClientConn. +func dial(handler serverType, t *testing.T) *Client { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + go func() { + defer c1.Close() + conf := ServerConfig{ + NoClientAuth: true, + } + conf.AddHostKey(testSigners["rsa"]) + + _, chans, reqs, err := NewServerConn(c1, &conf) + if err != nil { + t.Fatalf("Unable to handshake: %v", err) + } + go DiscardRequests(reqs) + + for newCh := range chans { + if newCh.ChannelType() != "session" { + newCh.Reject(UnknownChannelType, "unknown channel type") + continue + } + + ch, inReqs, err := newCh.Accept() + if err != nil { + t.Errorf("Accept: %v", err) + continue + } + go func() { + handler(ch, inReqs, t) + }() + } + }() + + config := &ClientConfig{ + User: "testuser", + HostKeyCallback: InsecureIgnoreHostKey(), + } + + conn, chans, reqs, err := NewClientConn(c2, "", config) + if err != nil { + t.Fatalf("unable to dial remote side: %v", err) + } + + return NewClient(conn, chans, reqs) +} + +// Test a simple string is returned to session.Stdout. +func TestSessionShell(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout := new(bytes.Buffer) + session.Stdout = stdout + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %s", err) + } + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + actual := stdout.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it. + +// Test a simple string is returned via StdoutPipe. +func TestSessionStdoutPipe(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("Unable to request StdoutPipe(): %v", err) + } + var buf bytes.Buffer + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + done := make(chan bool, 1) + go func() { + if _, err := io.Copy(&buf, stdout); err != nil { + t.Errorf("Copy of stdout failed: %v", err) + } + done <- true + }() + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + <-done + actual := buf.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// Test that a simple string is returned via the Output helper, +// and that stderr is discarded. +func TestSessionOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.Output("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + w := "this-is-stdout." + g := string(buf) + if g != w { + t.Error("Remote command did not return expected string:") + t.Logf("want %q", w) + t.Logf("got %q", g) + } +} + +// Test that both stdout and stderr are returned +// via the CombinedOutput helper. +func TestSessionCombinedOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + const stdout = "this-is-stdout." + const stderr = "this-is-stderr." + g := string(buf) + if g != stdout+stderr && g != stderr+stdout { + t.Error("Remote command did not return expected string:") + t.Logf("want %q, or %q", stdout+stderr, stderr+stdout) + t.Logf("got %q", g) + } +} + +// Test non-0 exit status is returned correctly. +func TestExitStatusNonZero(t *testing.T) { + conn := dial(exitStatusNonZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus()) + } +} + +// Test 0 exit status is returned correctly. +func TestExitStatusZero(t *testing.T) { + conn := dial(exitStatusZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got %v", err) + } +} + +// Test exit signal and status are both returned correctly. +func TestExitSignalAndStatus(t *testing.T) { + conn := dial(exitSignalAndStatusHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestKnownExitSignalOnly(t *testing.T) { + conn := dial(exitSignalHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 143 { + t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestUnknownExitSignal(t *testing.T) { + conn := dial(exitSignalUnknownHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "SYS" || e.ExitStatus() != 128 { + t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +func TestExitWithoutStatusOrSignal(t *testing.T) { + conn := dial(exitWithoutSignalOrStatus, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + if _, ok := err.(*ExitMissingError); !ok { + t.Fatalf("got %T want *ExitMissingError", err) + } +} + +// windowTestBytes is the number of bytes that we'll send to the SSH server. +const windowTestBytes = 16000 * 200 + +// TestServerWindow writes random data to the server. The server is expected to echo +// the same data back, which is compared against the original. +func TestServerWindow(t *testing.T) { + origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes) + origBytes := origBuf.Bytes() + + conn := dial(echoHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + result := make(chan []byte) + + go func() { + defer close(result) + echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + serverStdout, err := session.StdoutPipe() + if err != nil { + t.Errorf("StdoutPipe failed: %v", err) + return + } + n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes) + if err != nil && err != io.EOF { + t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err) + } + result <- echoedBuf.Bytes() + }() + + serverStdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes) + if err != nil { + t.Fatalf("failed to copy origBuf to serverStdin: %v", err) + } + if written != windowTestBytes { + t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes) + } + + echoedBytes := <-result + + if !bytes.Equal(origBytes, echoedBytes) { + t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes)) + } +} + +// Verify the client can handle a keepalive packet from the server. +func TestClientHandlesKeepalives(t *testing.T) { + conn := dial(channelKeepaliveSender, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got: %v", err) + } +} + +type exitStatusMsg struct { + Status uint32 +} + +type exitSignalMsg struct { + Signal string + CoreDumped bool + Errmsg string + Lang string +} + +func handleTerminalRequests(in <-chan *Request) { + for req := range in { + ok := false + switch req.Type { + case "shell": + ok = true + if len(req.Payload) > 0 { + // We don't accept any commands, only the default shell. + ok = false + } + case "env": + ok = true + } + req.Reply(ok, nil) + } +} + +func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal { + term := terminal.NewTerminal(ch, prompt) + go handleTerminalRequests(in) + return term +} + +func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(0, ch, t) +} + +func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) +} + +func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) + sendSignal("TERM", ch, t) +} + +func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("TERM", ch, t) +} + +func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("SYS", ch, t) +} + +func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) +} + +func shellHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "golang") + readLine(shell, t) + sendStatus(0, ch, t) +} + +// Ignores the command, writes fixed strings to stderr and stdout. +// Strings are "this-is-stdout." and "this-is-stderr.". +func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + _, err := ch.Read(nil) + + req, ok := <-in + if !ok { + t.Fatalf("error: expected channel request, got: %#v", err) + return + } + + // ignore request, always send some text + req.Reply(true, nil) + + _, err = io.WriteString(ch, "this-is-stdout.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + _, err = io.WriteString(ch.Stderr(), "this-is-stderr.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + sendStatus(0, ch, t) +} + +func readLine(shell *terminal.Terminal, t *testing.T) { + if _, err := shell.ReadLine(); err != nil && err != io.EOF { + t.Errorf("unable to read line: %v", err) + } +} + +func sendStatus(status uint32, ch Channel, t *testing.T) { + msg := exitStatusMsg{ + Status: status, + } + if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil { + t.Errorf("unable to send status: %v", err) + } +} + +func sendSignal(signal string, ch Channel, t *testing.T) { + sig := exitSignalMsg{ + Signal: signal, + CoreDumped: false, + Errmsg: "Process terminated", + Lang: "en-GB-oed", + } + if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil { + t.Errorf("unable to send signal: %v", err) + } +} + +func discardHandler(ch Channel, t *testing.T) { + defer ch.Close() + io.Copy(ioutil.Discard, ch) +} + +func echoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil { + t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err) + } +} + +// copyNRandomly copies n bytes from src to dst. It uses a variable, and random, +// buffer size to exercise more code paths. +func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) { + var ( + buf = make([]byte, 32*1024) + written int + remaining = n + ) + for remaining > 0 { + l := rand.Intn(1 << 15) + if remaining < l { + l = remaining + } + nr, er := src.Read(buf[:l]) + nw, ew := dst.Write(buf[:nr]) + remaining -= nw + written += nw + if ew != nil { + return written, ew + } + if nr != nw { + return written, io.ErrShortWrite + } + if er != nil && er != io.EOF { + return written, er + } + } + return written, nil +} + +func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil { + t.Errorf("unable to send channel keepalive request: %v", err) + } + sendStatus(0, ch, t) +} + +func TestClientWriteEOF(t *testing.T) { + conn := dial(simpleEchoHandler, t) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + stdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("StdoutPipe failed: %v", err) + } + + data := []byte(`0000`) + _, err = stdin.Write(data) + if err != nil { + t.Fatalf("Write failed: %v", err) + } + stdin.Close() + + res, err := ioutil.ReadAll(stdout) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + + if !bytes.Equal(data, res) { + t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res) + } +} + +func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + data, err := ioutil.ReadAll(ch) + if err != nil { + t.Errorf("handler read error: %v", err) + } + _, err = ch.Write(data) + if err != nil { + t.Errorf("handler write error: %v", err) + } +} + +func TestSessionID(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverID := make(chan []byte, 1) + clientID := make(chan []byte, 1) + + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + clientConf := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + User: "user", + } + + go func() { + conn, chans, reqs, err := NewServerConn(c1, serverConf) + if err != nil { + t.Fatalf("server handshake: %v", err) + } + serverID <- conn.SessionID() + go DiscardRequests(reqs) + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + go func() { + conn, chans, reqs, err := NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("client handshake: %v", err) + } + clientID <- conn.SessionID() + go DiscardRequests(reqs) + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + s := <-serverID + c := <-clientID + if bytes.Compare(s, c) != 0 { + t.Errorf("server session ID (%x) != client session ID (%x)", s, c) + } else if len(s) == 0 { + t.Errorf("client and server SessionID were empty.") + } +} + +type noReadConn struct { + readSeen bool + net.Conn +} + +func (c *noReadConn) Close() error { + return nil +} + +func (c *noReadConn) Read(b []byte) (int, error) { + c.readSeen = true + return 0, errors.New("noReadConn error") +} + +func TestInvalidServerConfiguration(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serveConn := noReadConn{Conn: c1} + serverConf := &ServerConfig{} + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key") + } + + serverConf.AddHostKey(testSigners["ecdsa"]) + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method") + } +} + +func TestHostKeyAlgorithms(t *testing.T) { + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.AddHostKey(testSigners["ecdsa"]) + + connect := func(clientConf *ClientConfig, want string) { + var alg string + clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error { + alg = key.Type() + return nil + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + _, _, _, err = NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + if alg != want { + t.Errorf("selected key algorithm %s, want %s", alg, want) + } + } + + // By default, we get the preferred algorithm, which is ECDSA 256. + + clientConf := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + } + connect(clientConf, KeyAlgoECDSA256) + + // Client asks for RSA explicitly. + clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA} + connect(clientConf, KeyAlgoRSA) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"} + _, _, _, err = NewClientConn(c2, "", clientConf) + if err == nil { + t.Fatal("succeeded connecting with unknown hostkey algorithm") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go new file mode 100644 index 0000000..a2dccc6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -0,0 +1,115 @@ +package ssh + +import ( + "errors" + "io" + "net" +) + +// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "direct-streamlocal@openssh.com" string. +// +// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 +type streamLocalChannelOpenDirectMsg struct { + socketPath string + reserved0 string + reserved1 uint32 +} + +// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "forwarded-streamlocal@openssh.com" string. +type forwardedStreamLocalPayload struct { + SocketPath string + Reserved0 string +} + +// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message +// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. +type streamLocalChannelForwardMsg struct { + socketPath string +} + +// ListenUnix is similar to ListenTCP but uses a Unix domain socket. +func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { + m := streamLocalChannelForwardMsg{ + socketPath, + } + // send message + ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") + } + ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + + return &unixListener{socketPath, c, ch}, nil +} + +func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { + msg := streamLocalChannelOpenDirectMsg{ + socketPath: socketPath, + } + ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type unixListener struct { + socketPath string + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *unixListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + }, nil +} + +// Close closes the listener. +func (l *unixListener) Close() error { + // this also closes the listener. + l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + m := streamLocalChannelForwardMsg{ + l.socketPath, + } + ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *unixListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + } +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 0000000..acf1717 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,465 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +// N must be "tcp", "tcp4", "tcp6", or "unix". +func (c *Client) Listen(n, addr string) (net.Listener, error) { + switch n { + case "tcp", "tcp4", "tcp6": + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) + case "unix": + return c.ListenUnix(addr) + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.Addr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr net.Addr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.Addr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + laddr: addr, + c: make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var ( + laddr net.Addr + raddr net.Addr + err error + ) + switch channelType := ch.ChannelType(); channelType { + case "forwarded-tcpip": + var payload forwardedTCPPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err = parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + case "forwarded-streamlocal@openssh.com": + var payload forwardedStreamLocalPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) + continue + } + laddr = &net.UnixAddr{ + Name: payload.SocketPath, + Net: "unix", + } + raddr = &net.UnixAddr{ + Name: "@", + Net: "unix", + } + default: + panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) + } + if ok := l.forward(laddr, raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.Addr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + f.c <- forward{newCh: ch, raddr: raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + var ch Channel + switch n { + case "tcp", "tcp4", "tcp6": + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + return &chanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil + case "unix": + var err error + ch, err = c.dialStreamLocal(addr) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: addr, + Net: "unix", + }, + }, nil + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// chanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type chanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *chanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *chanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *chanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *chanConn) SetReadDeadline(deadline time.Time) error { + // for compatibility with previous version, + // the error message contains "tcpChan" + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *chanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/tcpip_test.go new file mode 100644 index 0000000..f1265cb --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip_test.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "testing" +) + +func TestAutoPortListenBroken(t *testing.T) { + broken := "SSH-2.0-OpenSSH_5.9hh11" + works := "SSH-2.0-OpenSSH_6.1" + if !isBrokenOpenSSHVersion(broken) { + t.Errorf("version %q not marked as broken", broken) + } + if isBrokenOpenSSHVersion(works) { + t.Errorf("version %q marked as broken", works) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 0000000..9a88759 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,951 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n++ + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go new file mode 100644 index 0000000..901c72a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go @@ -0,0 +1,350 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "os" + "testing" +) + +type MockTerminal struct { + toSend []byte + bytesPerRead int + received []byte +} + +func (c *MockTerminal) Read(data []byte) (n int, err error) { + n = len(data) + if n == 0 { + return + } + if n > len(c.toSend) { + n = len(c.toSend) + } + if n == 0 { + return 0, io.EOF + } + if c.bytesPerRead > 0 && n > c.bytesPerRead { + n = c.bytesPerRead + } + copy(data, c.toSend[:n]) + c.toSend = c.toSend[n:] + return +} + +func (c *MockTerminal) Write(data []byte) (n int, err error) { + c.received = append(c.received, data...) + return len(data), nil +} + +func TestClose(t *testing.T) { + c := &MockTerminal{} + ss := NewTerminal(c, "> ") + line, err := ss.ReadLine() + if line != "" { + t.Errorf("Expected empty line but got: %s", line) + } + if err != io.EOF { + t.Errorf("Error should have been EOF but got: %s", err) + } +} + +var keyPressTests = []struct { + in string + line string + err error + throwAwayLines int +}{ + { + err: io.EOF, + }, + { + in: "\r", + line: "", + }, + { + in: "foo\r", + line: "foo", + }, + { + in: "a\x1b[Cb\r", // right + line: "ab", + }, + { + in: "a\x1b[Db\r", // left + line: "ba", + }, + { + in: "a\177b\r", // backspace + line: "b", + }, + { + in: "\x1b[A\r", // up + }, + { + in: "\x1b[B\r", // down + }, + { + in: "line\x1b[A\x1b[B\r", // up then down + line: "line", + }, + { + in: "line1\rline2\x1b[A\r", // recall previous line. + line: "line1", + throwAwayLines: 1, + }, + { + // recall two previous lines and append. + in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r", + line: "line1xxx", + throwAwayLines: 2, + }, + { + // Ctrl-A to move to beginning of line followed by ^K to kill + // line. + in: "a b \001\013\r", + line: "", + }, + { + // Ctrl-A to move to beginning of line, Ctrl-E to move to end, + // finally ^K to kill nothing. + in: "a b \001\005\013\r", + line: "a b ", + }, + { + in: "\027\r", + line: "", + }, + { + in: "a\027\r", + line: "", + }, + { + in: "a \027\r", + line: "", + }, + { + in: "a b\027\r", + line: "a ", + }, + { + in: "a b \027\r", + line: "a ", + }, + { + in: "one two thr\x1b[D\027\r", + line: "one two r", + }, + { + in: "\013\r", + line: "", + }, + { + in: "a\013\r", + line: "a", + }, + { + in: "ab\x1b[D\013\r", + line: "a", + }, + { + in: "Ξεσκεπάζω\r", + line: "Ξεσκεπάζω", + }, + { + in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace. + line: "", + throwAwayLines: 1, + }, + { + in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter. + line: "£", + throwAwayLines: 1, + }, + { + // Ctrl-D at the end of the line should be ignored. + in: "a\004\r", + line: "a", + }, + { + // a, b, left, Ctrl-D should erase the b. + in: "ab\x1b[D\004\r", + line: "a", + }, + { + // a, b, c, d, left, left, ^U should erase to the beginning of + // the line. + in: "abcd\x1b[D\x1b[D\025\r", + line: "cd", + }, + { + // Bracketed paste mode: control sequences should be returned + // verbatim in paste mode. + in: "abc\x1b[200~de\177f\x1b[201~\177\r", + line: "abcde\177", + }, + { + // Enter in bracketed paste mode should still work. + in: "abc\x1b[200~d\refg\x1b[201~h\r", + line: "efgh", + throwAwayLines: 1, + }, + { + // Lines consisting entirely of pasted data should be indicated as such. + in: "\x1b[200~a\r", + line: "a", + err: ErrPasteIndicator, + }, +} + +func TestKeyPresses(t *testing.T) { + for i, test := range keyPressTests { + for j := 1; j < len(test.in); j++ { + c := &MockTerminal{ + toSend: []byte(test.in), + bytesPerRead: j, + } + ss := NewTerminal(c, "> ") + for k := 0; k < test.throwAwayLines; k++ { + _, err := ss.ReadLine() + if err != nil { + t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err) + } + } + line, err := ss.ReadLine() + if line != test.line { + t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line) + break + } + if err != test.err { + t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err) + break + } + } + } +} + +func TestPasswordNotSaved(t *testing.T) { + c := &MockTerminal{ + toSend: []byte("password\r\x1b[A\r"), + bytesPerRead: 1, + } + ss := NewTerminal(c, "> ") + pw, _ := ss.ReadPassword("> ") + if pw != "password" { + t.Fatalf("failed to read password, got %s", pw) + } + line, _ := ss.ReadLine() + if len(line) > 0 { + t.Fatalf("password was saved in history") + } +} + +var setSizeTests = []struct { + width, height int +}{ + {40, 13}, + {80, 24}, + {132, 43}, +} + +func TestTerminalSetSize(t *testing.T) { + for _, setSize := range setSizeTests { + c := &MockTerminal{ + toSend: []byte("password\r\x1b[A\r"), + bytesPerRead: 1, + } + ss := NewTerminal(c, "> ") + ss.SetSize(setSize.width, setSize.height) + pw, _ := ss.ReadPassword("Password: ") + if pw != "password" { + t.Fatalf("failed to read password, got %s", pw) + } + if string(c.received) != "Password: \r\n" { + t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received) + } + } +} + +func TestReadPasswordLineEnd(t *testing.T) { + var tests = []struct { + input string + want string + }{ + {"\n", ""}, + {"\r\n", ""}, + {"test\r\n", "test"}, + {"testtesttesttes\n", "testtesttesttes"}, + {"testtesttesttes\r\n", "testtesttesttes"}, + {"testtesttesttesttest\n", "testtesttesttesttest"}, + {"testtesttesttesttest\r\n", "testtesttesttesttest"}, + } + for _, test := range tests { + buf := new(bytes.Buffer) + if _, err := buf.WriteString(test.input); err != nil { + t.Fatal(err) + } + + have, err := readPasswordLine(buf) + if err != nil { + t.Errorf("readPasswordLine(%q) failed: %v", test.input, err) + continue + } + if string(have) != test.want { + t.Errorf("readPasswordLine(%q) returns %q, but %q is expected", test.input, string(have), test.want) + continue + } + + if _, err = buf.WriteString(test.input); err != nil { + t.Fatal(err) + } + have, err = readPasswordLine(buf) + if err != nil { + t.Errorf("readPasswordLine(%q) failed: %v", test.input, err) + continue + } + if string(have) != test.want { + t.Errorf("readPasswordLine(%q) returns %q, but %q is expected", test.input, string(have), test.want) + continue + } + } +} + +func TestMakeRawState(t *testing.T) { + fd := int(os.Stdout.Fd()) + if !IsTerminal(fd) { + t.Skip("stdout is not a terminal; skipping test") + } + + st, err := GetState(fd) + if err != nil { + t.Fatalf("failed to get terminal state from GetState: %s", err) + } + defer Restore(fd, st) + raw, err := MakeRaw(fd) + if err != nil { + t.Fatalf("failed to get terminal state from MakeRaw: %s", err) + } + + if *st != *raw { + t.Errorf("states do not match; was %v, expected %v", raw, st) + } +} + +func TestOutputNewlines(t *testing.T) { + // \n should be changed to \r\n in terminal output. + buf := new(bytes.Buffer) + term := NewTerminal(buf, ">") + + term.Write([]byte("1\n2\n")) + output := string(buf.Bytes()) + const expected = "1\r\n2\r\n" + + if output != expected { + t.Errorf("incorrect output: was %q, expected %q", output, expected) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 0000000..731c89a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,114 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return unix.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + newState := *termios + newState.Lflag &^= unix.ECHO + newState.Lflag |= unix.ICANON | unix.ISIG + newState.Iflag |= unix.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 0000000..cb23a59 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA +const ioctlWriteTermios = unix.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 0000000..5fadfe8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 0000000..799f049 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 0000000..9e41b9f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,124 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// see http://cr.illumos.org/~webrev/andy_js/1060/ +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, oldState *State) error { + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 0000000..8618955 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,103 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "os" + + "golang.org/x/sys/windows" +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &st) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { + return nil, err + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return 0, 0, err + } + return int(info.Size.X), int(info.Size.Y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + old := st + + st &^= (windows.ENABLE_ECHO_INPUT) + st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { + return nil, err + } + + defer windows.SetConsoleMode(windows.Handle(fd), old) + + var h windows.Handle + p, _ := windows.GetCurrentProcess() + if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { + return nil, err + } + + f := os.NewFile(uintptr(h), "stdin") + defer f.Close() + return readPasswordLine(f) +} diff --git a/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go new file mode 100644 index 0000000..f481253 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package test + +import ( + "bytes" + "testing" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +func TestAgentForward(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + keyring := agent.NewKeyring() + if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil { + t.Fatalf("Error adding key: %s", err) + } + if err := keyring.Add(agent.AddedKey{ + PrivateKey: testPrivateKeys["dsa"], + ConfirmBeforeUse: true, + LifetimeSecs: 3600, + }); err != nil { + t.Fatalf("Error adding key with constraints: %s", err) + } + pub := testPublicKeys["dsa"] + + sess, err := conn.NewSession() + if err != nil { + t.Fatalf("NewSession: %v", err) + } + if err := agent.RequestAgentForwarding(sess); err != nil { + t.Fatalf("RequestAgentForwarding: %v", err) + } + + if err := agent.ForwardToAgent(conn, keyring); err != nil { + t.Fatalf("SetupForwardKeyring: %v", err) + } + out, err := sess.CombinedOutput("ssh-add -L") + if err != nil { + t.Fatalf("running ssh-add: %v, out %s", err, out) + } + key, _, _, _, err := ssh.ParseAuthorizedKey(out) + if err != nil { + t.Fatalf("ParseAuthorizedKey(%q): %v", out, err) + } + + if !bytes.Equal(key.Marshal(), pub.Marshal()) { + t.Fatalf("got key %s, want %s", ssh.MarshalAuthorizedKey(key), ssh.MarshalAuthorizedKey(pub)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/banner_test.go b/vendor/golang.org/x/crypto/ssh/test/banner_test.go new file mode 100644 index 0000000..d3b21ac --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/banner_test.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package test + +import ( + "testing" +) + +func TestBannerCallbackAgainstOpenSSH(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + + clientConf := clientConfig() + + var receivedBanner string + clientConf.BannerCallback = func(message string) error { + receivedBanner = message + return nil + } + + conn := server.Dial(clientConf) + defer conn.Close() + + expected := "Server Banner" + if receivedBanner != expected { + t.Fatalf("got %v; want %v", receivedBanner, expected) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/cert_test.go b/vendor/golang.org/x/crypto/ssh/test/cert_test.go new file mode 100644 index 0000000..b231dd8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/cert_test.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package test + +import ( + "bytes" + "crypto/rand" + "testing" + + "golang.org/x/crypto/ssh" +) + +// Test both logging in with a cert, and also that the certificate presented by an OpenSSH host can be validated correctly +func TestCertLogin(t *testing.T) { + s := newServer(t) + defer s.Shutdown() + + // Use a key different from the default. + clientKey := testSigners["dsa"] + caAuthKey := testSigners["ecdsa"] + cert := &ssh.Certificate{ + Key: clientKey.PublicKey(), + ValidPrincipals: []string{username()}, + CertType: ssh.UserCert, + ValidBefore: ssh.CertTimeInfinity, + } + if err := cert.SignCert(rand.Reader, caAuthKey); err != nil { + t.Fatalf("SetSignature: %v", err) + } + + certSigner, err := ssh.NewCertSigner(cert, clientKey) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + conf := &ssh.ClientConfig{ + User: username(), + HostKeyCallback: (&ssh.CertChecker{ + IsHostAuthority: func(pk ssh.PublicKey, addr string) bool { + return bytes.Equal(pk.Marshal(), testPublicKeys["ca"].Marshal()) + }, + }).CheckHostKey, + } + conf.Auth = append(conf.Auth, ssh.PublicKeys(certSigner)) + + for _, test := range []struct { + addr string + succeed bool + }{ + {addr: "host.example.com:22", succeed: true}, + {addr: "host.example.com:10000", succeed: true}, // non-standard port must be OK + {addr: "host.example.com", succeed: false}, // port must be specified + {addr: "host.ex4mple.com:22", succeed: false}, // wrong host + } { + client, err := s.TryDialWithAddr(conf, test.addr) + + // Always close client if opened successfully + if err == nil { + client.Close() + } + + // Now evaluate whether the test failed or passed + if test.succeed { + if err != nil { + t.Fatalf("TryDialWithAddr: %v", err) + } + } else { + if err == nil { + t.Fatalf("TryDialWithAddr, unexpected success") + } + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/dial_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/dial_unix_test.go new file mode 100644 index 0000000..091e48c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/dial_unix_test.go @@ -0,0 +1,128 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows + +package test + +// direct-tcpip and direct-streamlocal functional tests + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "strings" + "testing" +) + +type dialTester interface { + TestServerConn(t *testing.T, c net.Conn) + TestClientConn(t *testing.T, c net.Conn) +} + +func testDial(t *testing.T, n, listenAddr string, x dialTester) { + server := newServer(t) + defer server.Shutdown() + sshConn := server.Dial(clientConfig()) + defer sshConn.Close() + + l, err := net.Listen(n, listenAddr) + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer l.Close() + + testData := fmt.Sprintf("hello from %s, %s", n, listenAddr) + go func() { + for { + c, err := l.Accept() + if err != nil { + break + } + x.TestServerConn(t, c) + + io.WriteString(c, testData) + c.Close() + } + }() + + conn, err := sshConn.Dial(n, l.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + x.TestClientConn(t, conn) + defer conn.Close() + b, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + t.Logf("got %q", string(b)) + if string(b) != testData { + t.Fatalf("expected %q, got %q", testData, string(b)) + } +} + +type tcpDialTester struct { + listenAddr string +} + +func (x *tcpDialTester) TestServerConn(t *testing.T, c net.Conn) { + host := strings.Split(x.listenAddr, ":")[0] + prefix := host + ":" + if !strings.HasPrefix(c.LocalAddr().String(), prefix) { + t.Fatalf("expected to start with %q, got %q", prefix, c.LocalAddr().String()) + } + if !strings.HasPrefix(c.RemoteAddr().String(), prefix) { + t.Fatalf("expected to start with %q, got %q", prefix, c.RemoteAddr().String()) + } +} + +func (x *tcpDialTester) TestClientConn(t *testing.T, c net.Conn) { + // we use zero addresses. see *Client.Dial. + if c.LocalAddr().String() != "0.0.0.0:0" { + t.Fatalf("expected \"0.0.0.0:0\", got %q", c.LocalAddr().String()) + } + if c.RemoteAddr().String() != "0.0.0.0:0" { + t.Fatalf("expected \"0.0.0.0:0\", got %q", c.RemoteAddr().String()) + } +} + +func TestDialTCP(t *testing.T) { + x := &tcpDialTester{ + listenAddr: "127.0.0.1:0", + } + testDial(t, "tcp", x.listenAddr, x) +} + +type unixDialTester struct { + listenAddr string +} + +func (x *unixDialTester) TestServerConn(t *testing.T, c net.Conn) { + if c.LocalAddr().String() != x.listenAddr { + t.Fatalf("expected %q, got %q", x.listenAddr, c.LocalAddr().String()) + } + if c.RemoteAddr().String() != "@" { + t.Fatalf("expected \"@\", got %q", c.RemoteAddr().String()) + } +} + +func (x *unixDialTester) TestClientConn(t *testing.T, c net.Conn) { + if c.RemoteAddr().String() != x.listenAddr { + t.Fatalf("expected %q, got %q", x.listenAddr, c.RemoteAddr().String()) + } + if c.LocalAddr().String() != "@" { + t.Fatalf("expected \"@\", got %q", c.LocalAddr().String()) + } +} + +func TestDialUnix(t *testing.T) { + addr, cleanup := newTempSocket(t) + defer cleanup() + x := &unixDialTester{ + listenAddr: addr, + } + testDial(t, "unix", x.listenAddr, x) +} diff --git a/vendor/golang.org/x/crypto/ssh/test/doc.go b/vendor/golang.org/x/crypto/ssh/test/doc.go new file mode 100644 index 0000000..198f0ca --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/doc.go @@ -0,0 +1,7 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package test contains integration tests for the +// golang.org/x/crypto/ssh package. +package test // import "golang.org/x/crypto/ssh/test" diff --git a/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go new file mode 100644 index 0000000..ea81937 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go @@ -0,0 +1,194 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package test + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "net" + "testing" + "time" +) + +type closeWriter interface { + CloseWrite() error +} + +func testPortForward(t *testing.T, n, listenAddr string) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + sshListener, err := conn.Listen(n, listenAddr) + if err != nil { + t.Fatal(err) + } + + go func() { + sshConn, err := sshListener.Accept() + if err != nil { + t.Fatalf("listen.Accept failed: %v", err) + } + + _, err = io.Copy(sshConn, sshConn) + if err != nil && err != io.EOF { + t.Fatalf("ssh client copy: %v", err) + } + sshConn.Close() + }() + + forwardedAddr := sshListener.Addr().String() + netConn, err := net.Dial(n, forwardedAddr) + if err != nil { + t.Fatalf("net dial failed: %v", err) + } + + readChan := make(chan []byte) + go func() { + data, _ := ioutil.ReadAll(netConn) + readChan <- data + }() + + // Invent some data. + data := make([]byte, 100*1000) + for i := range data { + data[i] = byte(i % 255) + } + + var sent []byte + for len(sent) < 1000*1000 { + // Send random sized chunks + m := rand.Intn(len(data)) + n, err := netConn.Write(data[:m]) + if err != nil { + break + } + sent = append(sent, data[:n]...) + } + if err := netConn.(closeWriter).CloseWrite(); err != nil { + t.Errorf("netConn.CloseWrite: %v", err) + } + + read := <-readChan + + if len(sent) != len(read) { + t.Fatalf("got %d bytes, want %d", len(read), len(sent)) + } + if bytes.Compare(sent, read) != 0 { + t.Fatalf("read back data does not match") + } + + if err := sshListener.Close(); err != nil { + t.Fatalf("sshListener.Close: %v", err) + } + + // Check that the forward disappeared. + netConn, err = net.Dial(n, forwardedAddr) + if err == nil { + netConn.Close() + t.Errorf("still listening to %s after closing", forwardedAddr) + } +} + +func TestPortForwardTCP(t *testing.T) { + testPortForward(t, "tcp", "localhost:0") +} + +func TestPortForwardUnix(t *testing.T) { + addr, cleanup := newTempSocket(t) + defer cleanup() + testPortForward(t, "unix", addr) +} + +func testAcceptClose(t *testing.T, n, listenAddr string) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + + sshListener, err := conn.Listen(n, listenAddr) + if err != nil { + t.Fatal(err) + } + + quit := make(chan error, 1) + go func() { + for { + c, err := sshListener.Accept() + if err != nil { + quit <- err + break + } + c.Close() + } + }() + sshListener.Close() + + select { + case <-time.After(1 * time.Second): + t.Errorf("timeout: listener did not close.") + case err := <-quit: + t.Logf("quit as expected (error %v)", err) + } +} + +func TestAcceptCloseTCP(t *testing.T) { + testAcceptClose(t, "tcp", "localhost:0") +} + +func TestAcceptCloseUnix(t *testing.T) { + addr, cleanup := newTempSocket(t) + defer cleanup() + testAcceptClose(t, "unix", addr) +} + +// Check that listeners exit if the underlying client transport dies. +func testPortForwardConnectionClose(t *testing.T, n, listenAddr string) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + + sshListener, err := conn.Listen(n, listenAddr) + if err != nil { + t.Fatal(err) + } + + quit := make(chan error, 1) + go func() { + for { + c, err := sshListener.Accept() + if err != nil { + quit <- err + break + } + c.Close() + } + }() + + // It would be even nicer if we closed the server side, but it + // is more involved as the fd for that side is dup()ed. + server.clientConn.Close() + + select { + case <-time.After(1 * time.Second): + t.Errorf("timeout: listener did not close.") + case err := <-quit: + t.Logf("quit as expected (error %v)", err) + } +} + +func TestPortForwardConnectionCloseTCP(t *testing.T) { + testPortForwardConnectionClose(t, "tcp", "localhost:0") +} + +func TestPortForwardConnectionCloseUnix(t *testing.T) { + addr, cleanup := newTempSocket(t) + defer cleanup() + testPortForwardConnectionClose(t, "unix", addr) +} diff --git a/vendor/golang.org/x/crypto/ssh/test/multi_auth_test.go b/vendor/golang.org/x/crypto/ssh/test/multi_auth_test.go new file mode 100644 index 0000000..f594d36 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/multi_auth_test.go @@ -0,0 +1,144 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for ssh client multi-auth +// +// These tests run a simple go ssh client against OpenSSH server +// over unix domain sockets. The tests use multiple combinations +// of password, keyboard-interactive and publickey authentication +// methods. +// +// A wrapper library for making sshd PAM authentication use test +// passwords is required in ./sshd_test_pw.so. If the library does +// not exist these tests will be skipped. See compile instructions +// (for linux) in file ./sshd_test_pw.c. + +// +build linux + +package test + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/crypto/ssh" +) + +// test cases +type multiAuthTestCase struct { + authMethods []string + expectedPasswordCbs int + expectedKbdIntCbs int +} + +// test context +type multiAuthTestCtx struct { + password string + numPasswordCbs int + numKbdIntCbs int +} + +// create test context +func newMultiAuthTestCtx(t *testing.T) *multiAuthTestCtx { + password, err := randomPassword() + if err != nil { + t.Fatalf("Failed to generate random test password: %s", err.Error()) + } + + return &multiAuthTestCtx{ + password: password, + } +} + +// password callback +func (ctx *multiAuthTestCtx) passwordCb() (secret string, err error) { + ctx.numPasswordCbs++ + return ctx.password, nil +} + +// keyboard-interactive callback +func (ctx *multiAuthTestCtx) kbdIntCb(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) == 0 { + return nil, nil + } + + ctx.numKbdIntCbs++ + if len(questions) == 1 { + return []string{ctx.password}, nil + } + + return nil, fmt.Errorf("unsupported keyboard-interactive flow") +} + +// TestMultiAuth runs several subtests for different combinations of password, keyboard-interactive and publickey authentication methods +func TestMultiAuth(t *testing.T) { + testCases := []multiAuthTestCase{ + // Test password,publickey authentication, assert that password callback is called 1 time + multiAuthTestCase{ + authMethods: []string{"password", "publickey"}, + expectedPasswordCbs: 1, + }, + // Test keyboard-interactive,publickey authentication, assert that keyboard-interactive callback is called 1 time + multiAuthTestCase{ + authMethods: []string{"keyboard-interactive", "publickey"}, + expectedKbdIntCbs: 1, + }, + // Test publickey,password authentication, assert that password callback is called 1 time + multiAuthTestCase{ + authMethods: []string{"publickey", "password"}, + expectedPasswordCbs: 1, + }, + // Test publickey,keyboard-interactive authentication, assert that keyboard-interactive callback is called 1 time + multiAuthTestCase{ + authMethods: []string{"publickey", "keyboard-interactive"}, + expectedKbdIntCbs: 1, + }, + // Test password,password authentication, assert that password callback is called 2 times + multiAuthTestCase{ + authMethods: []string{"password", "password"}, + expectedPasswordCbs: 2, + }, + } + + for _, testCase := range testCases { + t.Run(strings.Join(testCase.authMethods, ","), func(t *testing.T) { + ctx := newMultiAuthTestCtx(t) + + server := newServerForConfig(t, "MultiAuth", map[string]string{"AuthMethods": strings.Join(testCase.authMethods, ",")}) + defer server.Shutdown() + + clientConfig := clientConfig() + server.setTestPassword(clientConfig.User, ctx.password) + + publicKeyAuthMethod := clientConfig.Auth[0] + clientConfig.Auth = nil + for _, authMethod := range testCase.authMethods { + switch authMethod { + case "publickey": + clientConfig.Auth = append(clientConfig.Auth, publicKeyAuthMethod) + case "password": + clientConfig.Auth = append(clientConfig.Auth, + ssh.RetryableAuthMethod(ssh.PasswordCallback(ctx.passwordCb), 5)) + case "keyboard-interactive": + clientConfig.Auth = append(clientConfig.Auth, + ssh.RetryableAuthMethod(ssh.KeyboardInteractive(ctx.kbdIntCb), 5)) + default: + t.Fatalf("Unknown authentication method %s", authMethod) + } + } + + conn := server.Dial(clientConfig) + defer conn.Close() + + if ctx.numPasswordCbs != testCase.expectedPasswordCbs { + t.Fatalf("passwordCallback was called %d times, expected %d times", ctx.numPasswordCbs, testCase.expectedPasswordCbs) + } + + if ctx.numKbdIntCbs != testCase.expectedKbdIntCbs { + t.Fatalf("keyboardInteractiveCallback was called %d times, expected %d times", ctx.numKbdIntCbs, testCase.expectedKbdIntCbs) + } + }) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/session_test.go b/vendor/golang.org/x/crypto/ssh/test/session_test.go new file mode 100644 index 0000000..4eb7afd --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/session_test.go @@ -0,0 +1,443 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows + +package test + +// Session functional tests. + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + "testing" + + "golang.org/x/crypto/ssh" +) + +func TestRunCommandSuccess(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + err = session.Run("true") + if err != nil { + t.Fatalf("session failed: %v", err) + } +} + +func TestHostKeyCheck(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + + conf := clientConfig() + hostDB := hostKeyDB() + conf.HostKeyCallback = hostDB.Check + + // change the keys. + hostDB.keys[ssh.KeyAlgoRSA][25]++ + hostDB.keys[ssh.KeyAlgoDSA][25]++ + hostDB.keys[ssh.KeyAlgoECDSA256][25]++ + + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + t.Fatalf("dial should have failed.") + } else if !strings.Contains(err.Error(), "host key mismatch") { + t.Fatalf("'host key mismatch' not found in %v", err) + } +} + +func TestRunCommandStdin(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + r, w := io.Pipe() + defer r.Close() + defer w.Close() + session.Stdin = r + + err = session.Run("true") + if err != nil { + t.Fatalf("session failed: %v", err) + } +} + +func TestRunCommandStdinError(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + r, w := io.Pipe() + defer r.Close() + session.Stdin = r + pipeErr := errors.New("closing write end of pipe") + w.CloseWithError(pipeErr) + + err = session.Run("true") + if err != pipeErr { + t.Fatalf("expected %v, found %v", pipeErr, err) + } +} + +func TestRunCommandFailed(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + err = session.Run(`bash -c "kill -9 $$"`) + if err == nil { + t.Fatalf("session succeeded: %v", err) + } +} + +func TestRunCommandWeClosed(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + err = session.Shell() + if err != nil { + t.Fatalf("shell failed: %v", err) + } + err = session.Close() + if err != nil { + t.Fatalf("shell failed: %v", err) + } +} + +func TestFuncLargeRead(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("unable to create new session: %s", err) + } + + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("unable to acquire stdout pipe: %s", err) + } + + err = session.Start("dd if=/dev/urandom bs=2048 count=1024") + if err != nil { + t.Fatalf("unable to execute remote command: %s", err) + } + + buf := new(bytes.Buffer) + n, err := io.Copy(buf, stdout) + if err != nil { + t.Fatalf("error reading from remote stdout: %s", err) + } + + if n != 2048*1024 { + t.Fatalf("Expected %d bytes but read only %d from remote command", 2048, n) + } +} + +func TestKeyChange(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + hostDB := hostKeyDB() + conf.HostKeyCallback = hostDB.Check + conf.RekeyThreshold = 1024 + conn := server.Dial(conf) + defer conn.Close() + + for i := 0; i < 4; i++ { + session, err := conn.NewSession() + if err != nil { + t.Fatalf("unable to create new session: %s", err) + } + + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("unable to acquire stdout pipe: %s", err) + } + + err = session.Start("dd if=/dev/urandom bs=1024 count=1") + if err != nil { + t.Fatalf("unable to execute remote command: %s", err) + } + buf := new(bytes.Buffer) + n, err := io.Copy(buf, stdout) + if err != nil { + t.Fatalf("error reading from remote stdout: %s", err) + } + + want := int64(1024) + if n != want { + t.Fatalf("Expected %d bytes but read only %d from remote command", want, n) + } + } + + if changes := hostDB.checkCount; changes < 4 { + t.Errorf("got %d key changes, want 4", changes) + } +} + +func TestInvalidTerminalMode(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + if err = session.RequestPty("vt100", 80, 40, ssh.TerminalModes{255: 1984}); err == nil { + t.Fatalf("req-pty failed: successful request with invalid mode") + } +} + +func TestValidTerminalMode(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("unable to acquire stdout pipe: %s", err) + } + + stdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("unable to acquire stdin pipe: %s", err) + } + + tm := ssh.TerminalModes{ssh.ECHO: 0} + if err = session.RequestPty("xterm", 80, 40, tm); err != nil { + t.Fatalf("req-pty failed: %s", err) + } + + err = session.Shell() + if err != nil { + t.Fatalf("session failed: %s", err) + } + + stdin.Write([]byte("stty -a && exit\n")) + + var buf bytes.Buffer + if _, err := io.Copy(&buf, stdout); err != nil { + t.Fatalf("reading failed: %s", err) + } + + if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "-echo ") { + t.Fatalf("terminal mode failure: expected -echo in stty output, got %s", sttyOutput) + } +} + +func TestWindowChange(t *testing.T) { + server := newServer(t) + defer server.Shutdown() + conn := server.Dial(clientConfig()) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatalf("session failed: %v", err) + } + defer session.Close() + + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("unable to acquire stdout pipe: %s", err) + } + + stdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("unable to acquire stdin pipe: %s", err) + } + + tm := ssh.TerminalModes{ssh.ECHO: 0} + if err = session.RequestPty("xterm", 80, 40, tm); err != nil { + t.Fatalf("req-pty failed: %s", err) + } + + if err := session.WindowChange(100, 100); err != nil { + t.Fatalf("window-change failed: %s", err) + } + + err = session.Shell() + if err != nil { + t.Fatalf("session failed: %s", err) + } + + stdin.Write([]byte("stty size && exit\n")) + + var buf bytes.Buffer + if _, err := io.Copy(&buf, stdout); err != nil { + t.Fatalf("reading failed: %s", err) + } + + if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "100 100") { + t.Fatalf("terminal WindowChange failure: expected \"100 100\" stty output, got %s", sttyOutput) + } +} + +func testOneCipher(t *testing.T, cipher string, cipherOrder []string) { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + conf.Ciphers = []string{cipher} + // Don't fail if sshd doesn't have the cipher. + conf.Ciphers = append(conf.Ciphers, cipherOrder...) + conn, err := server.TryDial(conf) + if err != nil { + t.Fatalf("TryDial: %v", err) + } + defer conn.Close() + + numBytes := 4096 + + // Exercise sending data to the server + if _, _, err := conn.Conn.SendRequest("drop-me", false, make([]byte, numBytes)); err != nil { + t.Fatalf("SendRequest: %v", err) + } + + // Exercise receiving data from the server + session, err := conn.NewSession() + if err != nil { + t.Fatalf("NewSession: %v", err) + } + + out, err := session.Output(fmt.Sprintf("dd if=/dev/zero of=/dev/stdout bs=%d count=1", numBytes)) + if err != nil { + t.Fatalf("Output: %v", err) + } + + if len(out) != numBytes { + t.Fatalf("got %d bytes, want %d bytes", len(out), numBytes) + } +} + +var deprecatedCiphers = []string{ + "aes128-cbc", "3des-cbc", + "arcfour128", "arcfour256", +} + +func TestCiphers(t *testing.T) { + var config ssh.Config + config.SetDefaults() + cipherOrder := append(config.Ciphers, deprecatedCiphers...) + + for _, ciph := range cipherOrder { + t.Run(ciph, func(t *testing.T) { + testOneCipher(t, ciph, cipherOrder) + }) + } +} + +func TestMACs(t *testing.T) { + var config ssh.Config + config.SetDefaults() + macOrder := config.MACs + + for _, mac := range macOrder { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + conf.MACs = []string{mac} + // Don't fail if sshd doesn't have the MAC. + conf.MACs = append(conf.MACs, macOrder...) + if conn, err := server.TryDial(conf); err == nil { + conn.Close() + } else { + t.Fatalf("failed for MAC %q", mac) + } + } +} + +func TestKeyExchanges(t *testing.T) { + var config ssh.Config + config.SetDefaults() + kexOrder := config.KeyExchanges + for _, kex := range kexOrder { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + // Don't fail if sshd doesn't have the kex. + conf.KeyExchanges = append([]string{kex}, kexOrder...) + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + } else { + t.Errorf("failed for kex %q", kex) + } + } +} + +func TestClientAuthAlgorithms(t *testing.T) { + for _, key := range []string{ + "rsa", + "dsa", + "ecdsa", + "ed25519", + } { + server := newServer(t) + conf := clientConfig() + conf.SetDefaults() + conf.Auth = []ssh.AuthMethod{ + ssh.PublicKeys(testSigners[key]), + } + + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + } else { + t.Errorf("failed for key %q", key) + } + + server.Shutdown() + } +} diff --git a/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c b/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c new file mode 100644 index 0000000..2794a56 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c @@ -0,0 +1,173 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// sshd_test_pw.c +// Wrapper to inject test password data for sshd PAM authentication +// +// This wrapper implements custom versions of getpwnam, getpwnam_r, +// getspnam and getspnam_r. These functions first call their real +// libc versions, then check if the requested user matches test user +// specified in env variable TEST_USER and if so replace the password +// with crypted() value of TEST_PASSWD env variable. +// +// Compile: +// gcc -Wall -shared -o sshd_test_pw.so -fPIC sshd_test_pw.c +// +// Compile with debug: +// gcc -DVERBOSE -Wall -shared -o sshd_test_pw.so -fPIC sshd_test_pw.c +// +// Run sshd: +// LD_PRELOAD="sshd_test_pw.so" TEST_USER="..." TEST_PASSWD="..." sshd ... + +// +build ignore + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#ifdef VERBOSE +#define DEBUG(X...) fprintf(stderr, X) +#else +#define DEBUG(X...) while (0) { } +#endif + +/* crypt() password */ +static char * +pwhash(char *passwd) { + return strdup(crypt(passwd, "$6$")); +} + +/* Pointers to real functions in libc */ +static struct passwd * (*real_getpwnam)(const char *) = NULL; +static int (*real_getpwnam_r)(const char *, struct passwd *, char *, size_t, struct passwd **) = NULL; +static struct spwd * (*real_getspnam)(const char *) = NULL; +static int (*real_getspnam_r)(const char *, struct spwd *, char *, size_t, struct spwd **) = NULL; + +/* Cached test user and test password */ +static char *test_user = NULL; +static char *test_passwd_hash = NULL; + +static void +init(void) { + /* Fetch real libc function pointers */ + real_getpwnam = dlsym(RTLD_NEXT, "getpwnam"); + real_getpwnam_r = dlsym(RTLD_NEXT, "getpwnam_r"); + real_getspnam = dlsym(RTLD_NEXT, "getspnam"); + real_getspnam_r = dlsym(RTLD_NEXT, "getspnam_r"); + + /* abort if env variables are not defined */ + if (getenv("TEST_USER") == NULL || getenv("TEST_PASSWD") == NULL) { + fprintf(stderr, "env variables TEST_USER and TEST_PASSWD are missing\n"); + abort(); + } + + /* Fetch test user and test password from env */ + test_user = strdup(getenv("TEST_USER")); + test_passwd_hash = pwhash(getenv("TEST_PASSWD")); + + DEBUG("sshd_test_pw init():\n"); + DEBUG("\treal_getpwnam: %p\n", real_getpwnam); + DEBUG("\treal_getpwnam_r: %p\n", real_getpwnam_r); + DEBUG("\treal_getspnam: %p\n", real_getspnam); + DEBUG("\treal_getspnam_r: %p\n", real_getspnam_r); + DEBUG("\tTEST_USER: '%s'\n", test_user); + DEBUG("\tTEST_PASSWD: '%s'\n", getenv("TEST_PASSWD")); + DEBUG("\tTEST_PASSWD_HASH: '%s'\n", test_passwd_hash); +} + +static int +is_test_user(const char *name) { + if (test_user != NULL && strcmp(test_user, name) == 0) + return 1; + return 0; +} + +/* getpwnam */ + +struct passwd * +getpwnam(const char *name) { + struct passwd *pw; + + DEBUG("sshd_test_pw getpwnam(%s)\n", name); + + if (real_getpwnam == NULL) + init(); + if ((pw = real_getpwnam(name)) == NULL) + return NULL; + + if (is_test_user(name)) + pw->pw_passwd = strdup(test_passwd_hash); + + return pw; +} + +/* getpwnam_r */ + +int +getpwnam_r(const char *name, + struct passwd *pwd, + char *buf, + size_t buflen, + struct passwd **result) { + int r; + + DEBUG("sshd_test_pw getpwnam_r(%s)\n", name); + + if (real_getpwnam_r == NULL) + init(); + if ((r = real_getpwnam_r(name, pwd, buf, buflen, result)) != 0 || *result == NULL) + return r; + + if (is_test_user(name)) + pwd->pw_passwd = strdup(test_passwd_hash); + + return 0; +} + +/* getspnam */ + +struct spwd * +getspnam(const char *name) { + struct spwd *sp; + + DEBUG("sshd_test_pw getspnam(%s)\n", name); + + if (real_getspnam == NULL) + init(); + if ((sp = real_getspnam(name)) == NULL) + return NULL; + + if (is_test_user(name)) + sp->sp_pwdp = strdup(test_passwd_hash); + + return sp; +} + +/* getspnam_r */ + +int +getspnam_r(const char *name, + struct spwd *spbuf, + char *buf, + size_t buflen, + struct spwd **spbufp) { + int r; + + DEBUG("sshd_test_pw getspnam_r(%s)\n", name); + + if (real_getspnam_r == NULL) + init(); + if ((r = real_getspnam_r(name, spbuf, buf, buflen, spbufp)) != 0) + return r; + + if (is_test_user(name)) + spbuf->sp_pwdp = strdup(test_passwd_hash); + + return r; +} diff --git a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go new file mode 100644 index 0000000..3960786 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go @@ -0,0 +1,361 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd plan9 + +package test + +// functional test harness for unix. + +import ( + "bytes" + "crypto/rand" + "encoding/base64" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "os/user" + "path/filepath" + "testing" + "text/template" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/testdata" +) + +const ( + defaultSshdConfig = ` +Protocol 2 +Banner {{.Dir}}/banner +HostKey {{.Dir}}/id_rsa +HostKey {{.Dir}}/id_dsa +HostKey {{.Dir}}/id_ecdsa +HostCertificate {{.Dir}}/id_rsa-cert.pub +Pidfile {{.Dir}}/sshd.pid +#UsePrivilegeSeparation no +KeyRegenerationInterval 3600 +ServerKeyBits 768 +SyslogFacility AUTH +LogLevel DEBUG2 +LoginGraceTime 120 +PermitRootLogin no +StrictModes no +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile {{.Dir}}/authorized_keys +TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub +IgnoreRhosts yes +RhostsRSAAuthentication no +HostbasedAuthentication no +PubkeyAcceptedKeyTypes=* +` + multiAuthSshdConfigTail = ` +UsePAM yes +PasswordAuthentication yes +ChallengeResponseAuthentication yes +AuthenticationMethods {{.AuthMethods}} +` +) + +var configTmpl = map[string]*template.Template{ + "default": template.Must(template.New("").Parse(defaultSshdConfig)), + "MultiAuth": template.Must(template.New("").Parse(defaultSshdConfig + multiAuthSshdConfigTail))} + +type server struct { + t *testing.T + cleanup func() // executed during Shutdown + configfile string + cmd *exec.Cmd + output bytes.Buffer // holds stderr from sshd process + + testUser string // test username for sshd + testPasswd string // test password for sshd + sshdTestPwSo string // dynamic library to inject a custom password into sshd + + // Client half of the network connection. + clientConn net.Conn +} + +func username() string { + var username string + if user, err := user.Current(); err == nil { + username = user.Username + } else { + // user.Current() currently requires cgo. If an error is + // returned attempt to get the username from the environment. + log.Printf("user.Current: %v; falling back on $USER", err) + username = os.Getenv("USER") + } + if username == "" { + panic("Unable to get username") + } + return username +} + +type storedHostKey struct { + // keys map from an algorithm string to binary key data. + keys map[string][]byte + + // checkCount counts the Check calls. Used for testing + // rekeying. + checkCount int +} + +func (k *storedHostKey) Add(key ssh.PublicKey) { + if k.keys == nil { + k.keys = map[string][]byte{} + } + k.keys[key.Type()] = key.Marshal() +} + +func (k *storedHostKey) Check(addr string, remote net.Addr, key ssh.PublicKey) error { + k.checkCount++ + algo := key.Type() + + if k.keys == nil || bytes.Compare(key.Marshal(), k.keys[algo]) != 0 { + return fmt.Errorf("host key mismatch. Got %q, want %q", key, k.keys[algo]) + } + return nil +} + +func hostKeyDB() *storedHostKey { + keyChecker := &storedHostKey{} + keyChecker.Add(testPublicKeys["ecdsa"]) + keyChecker.Add(testPublicKeys["rsa"]) + keyChecker.Add(testPublicKeys["dsa"]) + return keyChecker +} + +func clientConfig() *ssh.ClientConfig { + config := &ssh.ClientConfig{ + User: username(), + Auth: []ssh.AuthMethod{ + ssh.PublicKeys(testSigners["user"]), + }, + HostKeyCallback: hostKeyDB().Check, + HostKeyAlgorithms: []string{ // by default, don't allow certs as this affects the hostKeyDB checker + ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521, + ssh.KeyAlgoRSA, ssh.KeyAlgoDSA, + ssh.KeyAlgoED25519, + }, + } + return config +} + +// unixConnection creates two halves of a connected net.UnixConn. It +// is used for connecting the Go SSH client with sshd without opening +// ports. +func unixConnection() (*net.UnixConn, *net.UnixConn, error) { + dir, err := ioutil.TempDir("", "unixConnection") + if err != nil { + return nil, nil, err + } + defer os.Remove(dir) + + addr := filepath.Join(dir, "ssh") + listener, err := net.Listen("unix", addr) + if err != nil { + return nil, nil, err + } + defer listener.Close() + c1, err := net.Dial("unix", addr) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1.(*net.UnixConn), c2.(*net.UnixConn), nil +} + +func (s *server) TryDial(config *ssh.ClientConfig) (*ssh.Client, error) { + return s.TryDialWithAddr(config, "") +} + +// addr is the user specified host:port. While we don't actually dial it, +// we need to know this for host key matching +func (s *server) TryDialWithAddr(config *ssh.ClientConfig, addr string) (*ssh.Client, error) { + sshd, err := exec.LookPath("sshd") + if err != nil { + s.t.Skipf("skipping test: %v", err) + } + + c1, c2, err := unixConnection() + if err != nil { + s.t.Fatalf("unixConnection: %v", err) + } + + s.cmd = exec.Command(sshd, "-f", s.configfile, "-i", "-e") + f, err := c2.File() + if err != nil { + s.t.Fatalf("UnixConn.File: %v", err) + } + defer f.Close() + s.cmd.Stdin = f + s.cmd.Stdout = f + s.cmd.Stderr = &s.output + + if s.sshdTestPwSo != "" { + if s.testUser == "" { + s.t.Fatal("user missing from sshd_test_pw.so config") + } + if s.testPasswd == "" { + s.t.Fatal("password missing from sshd_test_pw.so config") + } + s.cmd.Env = append(os.Environ(), + fmt.Sprintf("LD_PRELOAD=%s", s.sshdTestPwSo), + fmt.Sprintf("TEST_USER=%s", s.testUser), + fmt.Sprintf("TEST_PASSWD=%s", s.testPasswd)) + } + + if err := s.cmd.Start(); err != nil { + s.t.Fail() + s.Shutdown() + s.t.Fatalf("s.cmd.Start: %v", err) + } + s.clientConn = c1 + conn, chans, reqs, err := ssh.NewClientConn(c1, addr, config) + if err != nil { + return nil, err + } + return ssh.NewClient(conn, chans, reqs), nil +} + +func (s *server) Dial(config *ssh.ClientConfig) *ssh.Client { + conn, err := s.TryDial(config) + if err != nil { + s.t.Fail() + s.Shutdown() + s.t.Fatalf("ssh.Client: %v", err) + } + return conn +} + +func (s *server) Shutdown() { + if s.cmd != nil && s.cmd.Process != nil { + // Don't check for errors; if it fails it's most + // likely "os: process already finished", and we don't + // care about that. Use os.Interrupt, so child + // processes are killed too. + s.cmd.Process.Signal(os.Interrupt) + s.cmd.Wait() + } + if s.t.Failed() { + // log any output from sshd process + s.t.Logf("sshd: %s", s.output.String()) + } + s.cleanup() +} + +func writeFile(path string, contents []byte) { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600) + if err != nil { + panic(err) + } + defer f.Close() + if _, err := f.Write(contents); err != nil { + panic(err) + } +} + +// generate random password +func randomPassword() (string, error) { + b := make([]byte, 12) + _, err := rand.Read(b) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// setTestPassword is used for setting user and password data for sshd_test_pw.so +// This function also checks that ./sshd_test_pw.so exists and if not calls s.t.Skip() +func (s *server) setTestPassword(user, passwd string) error { + wd, _ := os.Getwd() + wrapper := filepath.Join(wd, "sshd_test_pw.so") + if _, err := os.Stat(wrapper); err != nil { + s.t.Skip(fmt.Errorf("sshd_test_pw.so is not available")) + return err + } + + s.sshdTestPwSo = wrapper + s.testUser = user + s.testPasswd = passwd + return nil +} + +// newServer returns a new mock ssh server. +func newServer(t *testing.T) *server { + return newServerForConfig(t, "default", map[string]string{}) +} + +// newServerForConfig returns a new mock ssh server. +func newServerForConfig(t *testing.T, config string, configVars map[string]string) *server { + if testing.Short() { + t.Skip("skipping test due to -short") + } + dir, err := ioutil.TempDir("", "sshtest") + if err != nil { + t.Fatal(err) + } + f, err := os.Create(filepath.Join(dir, "sshd_config")) + if err != nil { + t.Fatal(err) + } + if _, ok := configTmpl[config]; ok == false { + t.Fatal(fmt.Errorf("Invalid server config '%s'", config)) + } + configVars["Dir"] = dir + err = configTmpl[config].Execute(f, configVars) + if err != nil { + t.Fatal(err) + } + f.Close() + + writeFile(filepath.Join(dir, "banner"), []byte("Server Banner")) + + for k, v := range testdata.PEMBytes { + filename := "id_" + k + writeFile(filepath.Join(dir, filename), v) + writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k])) + } + + for k, v := range testdata.SSHCertificates { + filename := "id_" + k + "-cert.pub" + writeFile(filepath.Join(dir, filename), v) + } + + var authkeys bytes.Buffer + for k := range testdata.PEMBytes { + authkeys.Write(ssh.MarshalAuthorizedKey(testPublicKeys[k])) + } + writeFile(filepath.Join(dir, "authorized_keys"), authkeys.Bytes()) + + return &server{ + t: t, + configfile: f.Name(), + cleanup: func() { + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + }, + } +} + +func newTempSocket(t *testing.T) (string, func()) { + dir, err := ioutil.TempDir("", "socket") + if err != nil { + t.Fatal(err) + } + deferFunc := func() { os.RemoveAll(dir) } + addr := filepath.Join(dir, "sock") + return addr, deferFunc +} diff --git a/vendor/golang.org/x/crypto/ssh/test/testdata_test.go b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go new file mode 100644 index 0000000..a053f67 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go @@ -0,0 +1,64 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package test + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]ssh.Signer + testPublicKeys map[string]ssh.PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]ssh.Signer, n) + testPublicKeys = make(map[string]ssh.PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &ssh.Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: ssh.Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/testdata/doc.go b/vendor/golang.org/x/crypto/ssh/testdata/doc.go new file mode 100644 index 0000000..fcae47c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/testdata/doc.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package contains test data shared between the various subpackages of +// the golang.org/x/crypto/ssh package. Under no circumstance should +// this data be used for production code. +package testdata // import "golang.org/x/crypto/ssh/testdata" diff --git a/vendor/golang.org/x/crypto/ssh/testdata/keys.go b/vendor/golang.org/x/crypto/ssh/testdata/keys.go new file mode 100644 index 0000000..521b6be --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/testdata/keys.go @@ -0,0 +1,198 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testdata + +var PEMBytes = map[string][]byte{ + "dsa": []byte(`-----BEGIN DSA PRIVATE KEY----- +MIIBuwIBAAKBgQD6PDSEyXiI9jfNs97WuM46MSDCYlOqWw80ajN16AohtBncs1YB +lHk//dQOvCYOsYaE+gNix2jtoRjwXhDsc25/IqQbU1ahb7mB8/rsaILRGIbA5WH3 +EgFtJmXFovDz3if6F6TzvhFpHgJRmLYVR8cqsezL3hEZOvvs2iH7MorkxwIVAJHD +nD82+lxh2fb4PMsIiaXudAsBAoGAQRf7Q/iaPRn43ZquUhd6WwvirqUj+tkIu6eV +2nZWYmXLlqFQKEy4Tejl7Wkyzr2OSYvbXLzo7TNxLKoWor6ips0phYPPMyXld14r +juhT24CrhOzuLMhDduMDi032wDIZG4Y+K7ElU8Oufn8Sj5Wge8r6ANmmVgmFfynr +FhdYCngCgYEA3ucGJ93/Mx4q4eKRDxcWD3QzWyqpbRVRRV1Vmih9Ha/qC994nJFz +DQIdjxDIT2Rk2AGzMqFEB68Zc3O+Wcsmz5eWWzEwFxaTwOGWTyDqsDRLm3fD+QYj +nOwuxb0Kce+gWI8voWcqC9cyRm09jGzu2Ab3Bhtpg8JJ8L7gS3MRZK4CFEx4UAfY +Fmsr0W6fHB9nhS4/UXM8 +-----END DSA PRIVATE KEY----- +`), + "ecdsa": []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49 +AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+ +6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA== +-----END EC PRIVATE KEY----- +`), + "ecdsap256": []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIAPCE25zK0PQSnsgVcEbM1mbKTASH4pqb5QJajplDwDZoAoGCCqGSM49 +AwEHoUQDQgAEWy8TxGcIHRh5XGpO4dFVfDjeNY+VkgubQrf/eyFJZHxAn1SKraXU +qJUjTKj1z622OxYtJ5P7s9CfAEVsTzLCzg== +-----END EC PRIVATE KEY----- +`), + "ecdsap384": []byte(`-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDBWfSnMuNKq8J9rQLzzEkx3KAoEohSXqhE/4CdjEYtoU2i22HW80DDS +qQhYNHRAduygBwYFK4EEACKhZANiAAQWaDMAd0HUd8ZiXCX7mYDDnC54gwH/nG43 +VhCUEYmF7HMZm/B9Yn3GjFk3qYEDEvuF/52+NvUKBKKaLbh32AWxMv0ibcoba4cz +hL9+hWYhUD9XIUlzMWiZ2y6eBE9PdRI= +-----END EC PRIVATE KEY----- +`), + "ecdsap521": []byte(`-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBrkYpQcy8KTVHNiAkjlFZwee90224Bu6wz94R4OBo+Ts0eoAQG7SF +iaygEDMUbx6kTgXTBcKZ0jrWPKakayNZ/kigBwYFK4EEACOhgYkDgYYABADFuvLV +UoaCDGHcw5uNfdRIsvaLKuWSpLsl48eWGZAwdNG432GDVKduO+pceuE+8XzcyJb+ +uMv+D2b11Q/LQUcHJwE6fqbm8m3EtDKPsoKs0u/XUJb0JsH4J8lkZzbUTjvGYamn +FFlRjzoB3Oxu8UQgb+MWPedtH9XYBbg9biz4jJLkXQ== +-----END EC PRIVATE KEY----- +`), + "rsa": []byte(`-----BEGIN RSA PRIVATE KEY----- +MIICXAIBAAKBgQC8A6FGHDiWCSREAXCq6yBfNVr0xCVG2CzvktFNRpue+RXrGs/2 +a6ySEJQb3IYquw7HlJgu6fg3WIWhOmHCjfpG0PrL4CRwbqQ2LaPPXhJErWYejcD8 +Di00cF3677+G10KMZk9RXbmHtuBFZT98wxg8j+ZsBMqGM1+7yrWUvynswQIDAQAB +AoGAJMCk5vqfSRzyXOTXLGIYCuR4Kj6pdsbNSeuuRGfYBeR1F2c/XdFAg7D/8s5R +38p/Ih52/Ty5S8BfJtwtvgVY9ecf/JlU/rl/QzhG8/8KC0NG7KsyXklbQ7gJT8UT +Ojmw5QpMk+rKv17ipDVkQQmPaj+gJXYNAHqImke5mm/K/h0CQQDciPmviQ+DOhOq +2ZBqUfH8oXHgFmp7/6pXw80DpMIxgV3CwkxxIVx6a8lVH9bT/AFySJ6vXq4zTuV9 +6QmZcZzDAkEA2j/UXJPIs1fQ8z/6sONOkU/BjtoePFIWJlRxdN35cZjXnBraX5UR +fFHkePv4YwqmXNqrBOvSu+w2WdSDci+IKwJAcsPRc/jWmsrJW1q3Ha0hSf/WG/Bu +X7MPuXaKpP/DkzGoUmb8ks7yqj6XWnYkPNLjCc8izU5vRwIiyWBRf4mxMwJBAILa +NDvRS0rjwt6lJGv7zPZoqDc65VfrK2aNyHx2PgFyzwrEOtuF57bu7pnvEIxpLTeM +z26i6XVMeYXAWZMTloMCQBbpGgEERQpeUknLBqUHhg/wXF6+lFA+vEGnkY+Dwab2 +KCXFGd+SQ5GdUcEMe9isUH6DYj/6/yCDoFrXXmpQb+M= +-----END RSA PRIVATE KEY----- +`), + "ed25519": []byte(`-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACA+3f7hS7g5UWwXOGVTrMfhmxyrjqz7Sxxbx7I1j8DvvwAAAJhAFfkOQBX5 +DgAAAAtzc2gtZWQyNTUxOQAAACA+3f7hS7g5UWwXOGVTrMfhmxyrjqz7Sxxbx7I1j8Dvvw +AAAEAaYmXltfW6nhRo3iWGglRB48lYq0z0Q3I3KyrdutEr6j7d/uFLuDlRbBc4ZVOsx+Gb +HKuOrPtLHFvHsjWPwO+/AAAAE2dhcnRvbm1AZ2FydG9ubS14cHMBAg== +-----END OPENSSH PRIVATE KEY----- +`), + "rsa-openssh-format": []byte(`-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAIEAwa48yfWFi3uIdqzuf9X7C2Zxfea/Iaaw0zIwHudpF8U92WVIiC5l +oEuW1+OaVi3UWfIEjWMV1tHGysrHOwtwc34BPCJqJknUQO/KtDTBTJ4Pryhw1bWPC999Lz +a+yrCTdNQYBzoROXKExZgPFh9pTMi5wqpHDuOQ2qZFIEI3lT0AAAIQWL0H31i9B98AAAAH +c3NoLXJzYQAAAIEAwa48yfWFi3uIdqzuf9X7C2Zxfea/Iaaw0zIwHudpF8U92WVIiC5loE +uW1+OaVi3UWfIEjWMV1tHGysrHOwtwc34BPCJqJknUQO/KtDTBTJ4Pryhw1bWPC999Lza+ +yrCTdNQYBzoROXKExZgPFh9pTMi5wqpHDuOQ2qZFIEI3lT0AAAADAQABAAAAgCThyTGsT4 +IARDxVMhWl6eiB2ZrgFgWSeJm/NOqtppWgOebsIqPMMg4UVuVFsl422/lE3RkPhVkjGXgE +pWvZAdCnmLmApK8wK12vF334lZhZT7t3Z9EzJps88PWEHo7kguf285HcnUM7FlFeissJdk +kXly34y7/3X/a6Tclm+iABAAAAQE0xR/KxZ39slwfMv64Rz7WKk1PPskaryI29aHE3mKHk +pY2QA+P3QlrKxT/VWUMjHUbNNdYfJm48xu0SGNMRdKMAAABBAORh2NP/06JUV3J9W/2Hju +X1ViJuqqcQnJPVzpgSL826EC2xwOECTqoY8uvFpUdD7CtpksIxNVqRIhuNOlz0lqEAAABB +ANkaHTTaPojClO0dKJ/Zjs7pWOCGliebBYprQ/Y4r9QLBkC/XaWMS26gFIrjgC7D2Rv+rZ +wSD0v0RcmkITP1ZR0AAAAYcHF1ZXJuYUBMdWNreUh5ZHJvLmxvY2FsAQID +-----END OPENSSH PRIVATE KEY-----`), + "user": []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49 +AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD +PLL8IEwvYu2wq+lpXfGQnNMbzYf9gspG0w== +-----END EC PRIVATE KEY----- +`), + "ca": []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvg9dQ9IRG59lYJb+GESfKWTch4yBpr7Ydw1jkK6vvtrx9jLo +5hkA8X6+ElRPRqTAZSlN5cBm6YCAcQIOsmXDUn6Oj1lVPQAoOjTBTvsjM3NjGhvv +52kHTY0nsMsBeY9q5DTtlzmlYkVUq2a6Htgf2mNi01dIw5fJ7uTTo8EbNf7O0i3u +c9a8P19HaZl5NKiWN4EIZkfB2WdXYRJCVBsGgQj3dE/GrEmH9QINq1A+GkNvK96u +vZm8H1jjmuqzHplWa7lFeXcx8FTVTbVb/iJrZ2Lc/JvIPitKZWhqbR59yrGjpwEp +Id7bo4WhO5L3OB0fSIJYvfu+o4WYnt4f3UzecwIDAQABAoIBABRD9yHgKErVuC2Q +bA+SYZY8VvdtF/X7q4EmQFORDNRA7EPgMc03JU6awRGbQ8i4kHs46EFzPoXvWcKz +AXYsO6N0Myc900Tp22A5d9NAHATEbPC/wdje7hRq1KyZONMJY9BphFv3nZbY5apR +Dc90JBFZP5RhXjTc3n9GjvqLAKfFEKVmPRCvqxCOZunw6XR+SgIQLJo36nsIsbhW +QUXIVaCI6cXMN8bRPm8EITdBNZu06Fpu4ZHm6VaxlXN9smERCDkgBSNXNWHKxmmA +c3Glo2DByUr2/JFBOrLEe9fkYgr24KNCQkHVcSaFxEcZvTggr7StjKISVHlCNEaB +7Q+kPoECgYEA3zE9FmvFGoQCU4g4Nl3dpQHs6kaAW8vJlrmq3xsireIuaJoa2HMe +wYdIvgCnK9DIjyxd5OWnE4jXtAEYPsyGD32B5rSLQrRO96lgb3f4bESCLUb3Bsn/ +sdgeE3p1xZMA0B59htqCrvVgN9k8WxyevBxYl3/gSBm/p8OVH1RTW/ECgYEA2f9Z +95OLj0KQHQtxQXf+I3VjhCw3LkLW39QZOXVI0QrCJfqqP7uxsJXH9NYX0l0GFTcR +kRrlyoaSU1EGQosZh+n1MvplGBTkTSV47/bPsTzFpgK2NfEZuFm9RoWgltS+nYeH +Y2k4mnAN3PhReCMwuprmJz8GRLsO3Cs2s2YylKMCgYEA2UX+uO/q7jgqZ5UJW+ue +1H5+W0aMuFA3i7JtZEnvRaUVFqFGlwXin/WJ2+WY1++k/rPrJ+Rk9IBXtBUIvEGw +FC5TIfsKQsJyyWgqx/jbbtJ2g4s8+W/1qfTAuqeRNOg5d2DnRDs90wJuS4//0JaY +9HkHyVwkQyxFxhSA/AHEMJECgYA2MvyFR1O9bIk0D3I7GsA+xKLXa77Ua53MzIjw +9i4CezBGDQpjCiFli/fI8am+jY5DnAtsDknvjoG24UAzLy5L0mk6IXMdB6SzYYut +7ak5oahqW+Y9hxIj+XvLmtGQbphtxhJtLu35x75KoBpxSh6FZpmuTEccs31AVCYn +eFM/DQKBgQDOPUwbLKqVi6ddFGgrV9MrWw+SWsDa43bPuyvYppMM3oqesvyaX1Dt +qDvN7owaNxNM4OnfKcZr91z8YPVCFo4RbBif3DXRzjNNBlxEjHBtuMOikwvsmucN +vIrbeEpjTiUMTEAr6PoTiVHjsfS8WAM6MDlF5M+2PNswDsBpa2yLgA== +-----END RSA PRIVATE KEY----- +`), +} + +var SSHCertificates = map[string][]byte{ + // The following are corresponding certificates for the private keys above, signed by the CA key + // Generated by the following commands: + // + // 1. Assumes "rsa" key above in file named "rsa", write out the public key to "rsa.pub": + // ssh-keygen -y -f rsa > rsa.pu + // + // 2. Assumes "ca" key above in file named "ca", sign a cert for "rsa.pub": + // ssh-keygen -s ca -h -n host.example.com -V +500w -I host.example.com-key rsa.pub + "rsa": []byte(`ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgLjYqmmuTSEmjVhSfLQphBSTJMLwIZhRgmpn8FHKLiEIAAAADAQABAAAAgQC8A6FGHDiWCSREAXCq6yBfNVr0xCVG2CzvktFNRpue+RXrGs/2a6ySEJQb3IYquw7HlJgu6fg3WIWhOmHCjfpG0PrL4CRwbqQ2LaPPXhJErWYejcD8Di00cF3677+G10KMZk9RXbmHtuBFZT98wxg8j+ZsBMqGM1+7yrWUvynswQAAAAAAAAAAAAAAAgAAABRob3N0LmV4YW1wbGUuY29tLWtleQAAABQAAAAQaG9zdC5leGFtcGxlLmNvbQAAAABZHN8UAAAAAGsjIYUAAAAAAAAAAAAAAAAAAAEXAAAAB3NzaC1yc2EAAAADAQABAAABAQC+D11D0hEbn2Vglv4YRJ8pZNyHjIGmvth3DWOQrq++2vH2MujmGQDxfr4SVE9GpMBlKU3lwGbpgIBxAg6yZcNSfo6PWVU9ACg6NMFO+yMzc2MaG+/naQdNjSewywF5j2rkNO2XOaViRVSrZroe2B/aY2LTV0jDl8nu5NOjwRs1/s7SLe5z1rw/X0dpmXk0qJY3gQhmR8HZZ1dhEkJUGwaBCPd0T8asSYf1Ag2rUD4aQ28r3q69mbwfWOOa6rMemVZruUV5dzHwVNVNtVv+ImtnYtz8m8g+K0plaGptHn3KsaOnASkh3tujhaE7kvc4HR9Igli9+76jhZie3h/dTN5zAAABDwAAAAdzc2gtcnNhAAABALeDea+60H6xJGhktAyosHaSY7AYzLocaqd8hJQjEIDifBwzoTlnBmcK9CxGhKuaoJFThdCLdaevCeOSuquh8HTkf+2ebZZc/G5T+2thPvPqmcuEcmMosWo+SIjYhbP3S6KD49aLC1X0kz8IBQeauFvURhkZ5ZjhA1L4aQYt9NjL73nqOl8PplRui+Ov5w8b4ldul4zOvYAFrzfcP6wnnXk3c1Zzwwf5wynD5jakO8GpYKBuhM7Z4crzkKSQjU3hla7xqgfomC5Gz4XbR2TNjcQiRrJQ0UlKtX3X3ObRCEhuvG0Kzjklhv+Ddw6txrhKjMjiSi/Yyius/AE8TmC1p4U= host.example.com +`), +} + +var PEMEncryptedKeys = []struct { + Name string + EncryptionKey string + PEMBytes []byte +}{ + 0: { + Name: "rsa-encrypted", + EncryptionKey: "r54-G0pher_t3st$", + PEMBytes: []byte(`-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,3E1714DE130BC5E81327F36564B05462 + +MqW88sud4fnWk/Jk3fkjh7ydu51ZkHLN5qlQgA4SkAXORPPMj2XvqZOv1v2LOgUV +dUevUn8PZK7a9zbZg4QShUSzwE5k6wdB7XKPyBgI39mJ79GBd2U4W3h6KT6jIdWA +goQpluxkrzr2/X602IaxLEre97FT9mpKC6zxKCLvyFWVIP9n3OSFS47cTTXyFr+l +7PdRhe60nn6jSBgUNk/Q1lAvEQ9fufdPwDYY93F1wyJ6lOr0F1+mzRrMbH67NyKs +rG8J1Fa7cIIre7ueKIAXTIne7OAWqpU9UDgQatDtZTbvA7ciqGsSFgiwwW13N+Rr +hN8MkODKs9cjtONxSKi05s206A3NDU6STtZ3KuPDjFE1gMJODotOuqSM+cxKfyFq +wxpk/CHYCDdMAVBSwxb/vraOHamylL4uCHpJdBHypzf2HABt+lS8Su23uAmL87DR +yvyCS/lmpuNTndef6qHPRkoW2EV3xqD3ovosGf7kgwGJUk2ZpCLVteqmYehKlZDK +r/Jy+J26ooI2jIg9bjvD1PZq+Mv+2dQ1RlDrPG3PB+rEixw6vBaL9x3jatCd4ej7 +XG7lb3qO9xFpLsx89tkEcvpGR+broSpUJ6Mu5LBCVmrvqHjvnDhrZVz1brMiQtU9 +iMZbgXqDLXHd6ERWygk7OTU03u+l1gs+KGMfmS0h0ZYw6KGVLgMnsoxqd6cFSKNB +8Ohk9ZTZGCiovlXBUepyu8wKat1k8YlHSfIHoRUJRhhcd7DrmojC+bcbMIZBU22T +Pl2ftVRGtcQY23lYd0NNKfebF7ncjuLWQGy+vZW+7cgfI6wPIbfYfP6g7QAutk6W +KQx0AoX5woZ6cNxtpIrymaVjSMRRBkKQrJKmRp3pC/lul5E5P2cueMs1fj4OHTbJ +lAUv88ywr+R+mRgYQlFW/XQ653f6DT4t6+njfO9oBcPrQDASZel3LjXLpjjYG/N5 ++BWnVexuJX9ika8HJiFl55oqaKb+WknfNhk5cPY+x7SDV9ywQeMiDZpr0ffeYAEP +LlwwiWRDYpO+uwXHSFF3+JjWwjhs8m8g99iFb7U93yKgBB12dCEPPa2ZeH9wUHMJ +sreYhNuq6f4iWWSXpzN45inQqtTi8jrJhuNLTT543ErW7DtntBO2rWMhff3aiXbn +Uy3qzZM1nPbuCGuBmP9L2dJ3Z5ifDWB4JmOyWY4swTZGt9AVmUxMIKdZpRONx8vz +I9u9nbVPGZBcou50Pa0qTLbkWsSL94MNXrARBxzhHC9Zs6XNEtwN7mOuii7uMkVc +adrxgknBH1J1N+NX/eTKzUwJuPvDtA+Z5ILWNN9wpZT/7ed8zEnKHPNUexyeT5g3 +uw9z9jH7ffGxFYlx87oiVPHGOrCXYZYW5uoZE31SCBkbtNuffNRJRKIFeipmpJ3P +7bpAG+kGHMelQH6b+5K1Qgsv4tpuSyKeTKpPFH9Av5nN4P1ZBm9N80tzbNWqjSJm +S7rYdHnuNEVnUGnRmEUMmVuYZnNBEVN/fP2m2SEwXcP3Uh7TiYlcWw10ygaGmOr7 +MvMLGkYgQ4Utwnd98mtqa0jr0hK2TcOSFir3AqVvXN3XJj4cVULkrXe4Im1laWgp +-----END RSA PRIVATE KEY----- +`), + }, + + 1: { + Name: "dsa-encrypted", + EncryptionKey: "qG0pher-dsa_t3st$", + PEMBytes: []byte(`-----BEGIN DSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,7CE7A6E4A647DC01AF860210B15ADE3E + +hvnBpI99Hceq/55pYRdOzBLntIEis02JFNXuLEydWL+RJBFDn7tA+vXec0ERJd6J +G8JXlSOAhmC2H4uK3q2xR8/Y3yL95n6OIcjvCBiLsV+o3jj1MYJmErxP6zRtq4w3 +JjIjGHWmaYFSxPKQ6e8fs74HEqaeMV9ONUoTtB+aISmgaBL15Fcoayg245dkBvVl +h5Kqspe7yvOBmzA3zjRuxmSCqKJmasXM7mqs3vIrMxZE3XPo1/fWKcPuExgpVQoT +HkJZEoIEIIPnPMwT2uYbFJSGgPJVMDT84xz7yvjCdhLmqrsXgs5Qw7Pw0i0c0BUJ +b7fDJ2UhdiwSckWGmIhTLlJZzr8K+JpjCDlP+REYBI5meB7kosBnlvCEHdw2EJkH +0QDc/2F4xlVrHOLbPRFyu1Oi2Gvbeoo9EsM/DThpd1hKAlb0sF5Y0y0d+owv0PnE +R/4X3HWfIdOHsDUvJ8xVWZ4BZk9Zk9qol045DcFCehpr/3hslCrKSZHakLt9GI58 +vVQJ4L0aYp5nloLfzhViZtKJXRLkySMKdzYkIlNmW1oVGl7tce5UCNI8Nok4j6yn +IiHM7GBn+0nJoKTXsOGMIBe3ulKlKVxLjEuk9yivh/8= +-----END DSA PRIVATE KEY----- +`), + }, +} diff --git a/vendor/golang.org/x/crypto/ssh/testdata_test.go b/vendor/golang.org/x/crypto/ssh/testdata_test.go new file mode 100644 index 0000000..2da8c79 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/testdata_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package ssh + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]Signer + testPublicKeys map[string]PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]Signer, n) + testPublicKeys = make(map[string]PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 0000000..f6fae1d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,353 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writePacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) + if err != nil { + return err + } + t.reader.pendingKeyChange <- ciph + + ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) + if err != nil { + return err + } + t.writer.pendingKeyChange <- ciph + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader) + if err != nil { + break + } + if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + packet, err := s.packetCipher.readPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + cipherMode := cipherModes[algs.Cipher] + macMode := macModes[algs.MAC] + + iv := make([]byte, cipherMode.ivSize) + key := make([]byte, cipherMode.keySize) + macKey := make([]byte, macMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + generateKeyMaterial(macKey, d.macKeyTag, kex) + + return cipherModes[algs.Cipher].create(key, iv, macKey, algs) +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for length := 0; length < maxVersionStringBytes; length++ { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + if !bytes.HasPrefix(versionString, []byte("SSH-")) { + // RFC 4253 says we need to ignore all version string lines + // except the one containing the SSH version (provided that + // all the lines do not exceed 255 bytes in total). + versionString = versionString[:0] + continue + } + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/transport_test.go b/vendor/golang.org/x/crypto/ssh/transport_test.go new file mode 100644 index 0000000..8445e1e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport_test.go @@ -0,0 +1,113 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "strings" + "testing" +) + +func TestReadVersion(t *testing.T) { + longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253] + multiLineVersion := strings.Repeat("ignored\r\n", 20) + "SSH-2.0-bla\r\n" + cases := map[string]string{ + "SSH-2.0-bla\r\n": "SSH-2.0-bla", + "SSH-2.0-bla\n": "SSH-2.0-bla", + multiLineVersion: "SSH-2.0-bla", + longVersion + "\r\n": longVersion, + } + + for in, want := range cases { + result, err := readVersion(bytes.NewBufferString(in)) + if err != nil { + t.Errorf("readVersion(%q): %s", in, err) + } + got := string(result) + if got != want { + t.Errorf("got %q, want %q", got, want) + } + } +} + +func TestReadVersionError(t *testing.T) { + longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253] + multiLineVersion := strings.Repeat("ignored\r\n", 50) + "SSH-2.0-bla\r\n" + cases := []string{ + longVersion + "too-long\r\n", + multiLineVersion, + } + for _, in := range cases { + if _, err := readVersion(bytes.NewBufferString(in)); err == nil { + t.Errorf("readVersion(%q) should have failed", in) + } + } +} + +func TestExchangeVersionsBasic(t *testing.T) { + v := "SSH-2.0-bla" + buf := bytes.NewBufferString(v + "\r\n") + them, err := exchangeVersions(buf, []byte("xyz")) + if err != nil { + t.Errorf("exchangeVersions: %v", err) + } + + if want := "SSH-2.0-bla"; string(them) != want { + t.Errorf("got %q want %q for our version", them, want) + } +} + +func TestExchangeVersions(t *testing.T) { + cases := []string{ + "not\x000allowed", + "not allowed\x01\r\n", + } + for _, c := range cases { + buf := bytes.NewBufferString("SSH-2.0-bla\r\n") + if _, err := exchangeVersions(buf, []byte(c)); err == nil { + t.Errorf("exchangeVersions(%q): should have failed", c) + } + } +} + +type closerBuffer struct { + bytes.Buffer +} + +func (b *closerBuffer) Close() error { + return nil +} + +func TestTransportMaxPacketWrite(t *testing.T) { + buf := &closerBuffer{} + tr := newTransport(buf, rand.Reader, true) + huge := make([]byte, maxPacket+1) + err := tr.writePacket(huge) + if err == nil { + t.Errorf("transport accepted write for a huge packet.") + } +} + +func TestTransportMaxPacketReader(t *testing.T) { + var header [5]byte + huge := make([]byte, maxPacket+128) + binary.BigEndian.PutUint32(header[0:], uint32(len(huge))) + // padding. + header[4] = 0 + + buf := &closerBuffer{} + buf.Write(header[:]) + buf.Write(huge) + + tr := newTransport(buf, rand.Reader, true) + _, err := tr.readPacket() + if err == nil { + t.Errorf("transport succeeded reading huge packet.") + } else if !strings.Contains(err.Error(), "large") { + t.Errorf("got %q, should mention %q", err.Error(), "large") + } +} diff --git a/vendor/golang.org/x/crypto/tea/cipher.go b/vendor/golang.org/x/crypto/tea/cipher.go new file mode 100644 index 0000000..ce223b2 --- /dev/null +++ b/vendor/golang.org/x/crypto/tea/cipher.go @@ -0,0 +1,108 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tea implements the TEA algorithm, as defined in Needham and +// Wheeler's 1994 technical report, “TEA, a Tiny Encryption Algorithm”. See +// http://www.cix.co.uk/~klockstone/tea.pdf for details. +package tea + +import ( + "crypto/cipher" + "encoding/binary" + "errors" +) + +const ( + // BlockSize is the size of a TEA block, in bytes. + BlockSize = 8 + + // KeySize is the size of a TEA key, in bytes. + KeySize = 16 + + // delta is the TEA key schedule constant. + delta = 0x9e3779b9 + + // numRounds is the standard number of rounds in TEA. + numRounds = 64 +) + +// tea is an instance of the TEA cipher with a particular key. +type tea struct { + key [16]byte + rounds int +} + +// NewCipher returns an instance of the TEA cipher with the standard number of +// rounds. The key argument must be 16 bytes long. +func NewCipher(key []byte) (cipher.Block, error) { + return NewCipherWithRounds(key, numRounds) +} + +// NewCipherWithRounds returns an instance of the TEA cipher with a given +// number of rounds, which must be even. The key argument must be 16 bytes +// long. +func NewCipherWithRounds(key []byte, rounds int) (cipher.Block, error) { + if len(key) != 16 { + return nil, errors.New("tea: incorrect key size") + } + + if rounds&1 != 0 { + return nil, errors.New("tea: odd number of rounds specified") + } + + c := &tea{ + rounds: rounds, + } + copy(c.key[:], key) + + return c, nil +} + +// BlockSize returns the TEA block size, which is eight bytes. It is necessary +// to satisfy the Block interface in the package "crypto/cipher". +func (*tea) BlockSize() int { + return BlockSize +} + +// Encrypt encrypts the 8 byte buffer src using the key in t and stores the +// result in dst. Note that for amounts of data larger than a block, it is not +// safe to just call Encrypt on successive blocks; instead, use an encryption +// mode like CBC (see crypto/cipher/cbc.go). +func (t *tea) Encrypt(dst, src []byte) { + e := binary.BigEndian + v0, v1 := e.Uint32(src), e.Uint32(src[4:]) + k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:]) + + sum := uint32(0) + delta := uint32(delta) + + for i := 0; i < t.rounds/2; i++ { + sum += delta + v0 += ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1) + v1 += ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3) + } + + e.PutUint32(dst, v0) + e.PutUint32(dst[4:], v1) +} + +// Decrypt decrypts the 8 byte buffer src using the key in t and stores the +// result in dst. +func (t *tea) Decrypt(dst, src []byte) { + e := binary.BigEndian + v0, v1 := e.Uint32(src), e.Uint32(src[4:]) + k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:]) + + delta := uint32(delta) + sum := delta * uint32(t.rounds/2) // in general, sum = delta * n + + for i := 0; i < t.rounds/2; i++ { + v1 -= ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3) + v0 -= ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1) + sum -= delta + } + + e.PutUint32(dst, v0) + e.PutUint32(dst[4:], v1) +} diff --git a/vendor/golang.org/x/crypto/tea/tea_test.go b/vendor/golang.org/x/crypto/tea/tea_test.go new file mode 100644 index 0000000..eb98d1e --- /dev/null +++ b/vendor/golang.org/x/crypto/tea/tea_test.go @@ -0,0 +1,93 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tea + +import ( + "bytes" + "testing" +) + +// A sample test key for when we just want to initialize a cipher +var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF} + +// Test that the block size for tea is correct +func TestBlocksize(t *testing.T) { + c, err := NewCipher(testKey) + if err != nil { + t.Fatalf("NewCipher returned error: %s", err) + } + + if result := c.BlockSize(); result != BlockSize { + t.Errorf("cipher.BlockSize returned %d, but expected %d", result, BlockSize) + } +} + +// Test that invalid key sizes return an error +func TestInvalidKeySize(t *testing.T) { + var key [KeySize + 1]byte + + if _, err := NewCipher(key[:]); err == nil { + t.Errorf("invalid key size %d didn't result in an error.", len(key)) + } + + if _, err := NewCipher(key[:KeySize-1]); err == nil { + t.Errorf("invalid key size %d didn't result in an error.", KeySize-1) + } +} + +// Test Vectors +type teaTest struct { + rounds int + key []byte + plaintext []byte + ciphertext []byte +} + +var teaTests = []teaTest{ + // These were sourced from https://github.com/froydnj/ironclad/blob/master/testing/test-vectors/tea.testvec + { + numRounds, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x41, 0xea, 0x3a, 0x0a, 0x94, 0xba, 0xa9, 0x40}, + }, + { + numRounds, + []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + []byte{0x31, 0x9b, 0xbe, 0xfb, 0x01, 0x6a, 0xbd, 0xb2}, + }, + { + 16, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xed, 0x28, 0x5d, 0xa1, 0x45, 0x5b, 0x33, 0xc1}, + }, +} + +// Test encryption +func TestCipherEncrypt(t *testing.T) { + // Test encryption with standard 64 rounds + for i, test := range teaTests { + c, err := NewCipherWithRounds(test.key, test.rounds) + if err != nil { + t.Fatalf("#%d: NewCipher returned error: %s", i, err) + } + + var ciphertext [BlockSize]byte + c.Encrypt(ciphertext[:], test.plaintext) + + if !bytes.Equal(ciphertext[:], test.ciphertext) { + t.Errorf("#%d: incorrect ciphertext. Got %x, wanted %x", i, ciphertext, test.ciphertext) + } + + var plaintext2 [BlockSize]byte + c.Decrypt(plaintext2[:], ciphertext[:]) + + if !bytes.Equal(plaintext2[:], test.plaintext) { + t.Errorf("#%d: incorrect plaintext. Got %x, wanted %x", i, plaintext2, test.plaintext) + } + } +} diff --git a/vendor/golang.org/x/crypto/twofish/twofish.go b/vendor/golang.org/x/crypto/twofish/twofish.go new file mode 100644 index 0000000..6db01fc --- /dev/null +++ b/vendor/golang.org/x/crypto/twofish/twofish.go @@ -0,0 +1,342 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package twofish implements Bruce Schneier's Twofish encryption algorithm. +package twofish // import "golang.org/x/crypto/twofish" + +// Twofish is defined in https://www.schneier.com/paper-twofish-paper.pdf [TWOFISH] + +// This code is a port of the LibTom C implementation. +// See http://libtom.org/?page=features&newsitems=5&whatfile=crypt. +// LibTomCrypt is free for all purposes under the public domain. +// It was heavily inspired by the go blowfish package. + +import "strconv" + +// BlockSize is the constant block size of Twofish. +const BlockSize = 16 + +const mdsPolynomial = 0x169 // x^8 + x^6 + x^5 + x^3 + 1, see [TWOFISH] 4.2 +const rsPolynomial = 0x14d // x^8 + x^6 + x^3 + x^2 + 1, see [TWOFISH] 4.3 + +// A Cipher is an instance of Twofish encryption using a particular key. +type Cipher struct { + s [4][256]uint32 + k [40]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/twofish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Twofish key, 16, 24 or 32 bytes. +func NewCipher(key []byte) (*Cipher, error) { + keylen := len(key) + + if keylen != 16 && keylen != 24 && keylen != 32 { + return nil, KeySizeError(keylen) + } + + // k is the number of 64 bit words in key + k := keylen / 8 + + // Create the S[..] words + var S [4 * 4]byte + for i := 0; i < k; i++ { + // Computes [y0 y1 y2 y3] = rs . [x0 x1 x2 x3 x4 x5 x6 x7] + for j, rsRow := range rs { + for k, rsVal := range rsRow { + S[4*i+j] ^= gfMult(key[8*i+k], rsVal, rsPolynomial) + } + } + } + + // Calculate subkeys + c := new(Cipher) + var tmp [4]byte + for i := byte(0); i < 20; i++ { + // A = h(p * 2x, Me) + for j := range tmp { + tmp[j] = 2 * i + } + A := h(tmp[:], key, 0) + + // B = rolc(h(p * (2x + 1), Mo), 8) + for j := range tmp { + tmp[j] = 2*i + 1 + } + B := h(tmp[:], key, 1) + B = rol(B, 8) + + c.k[2*i] = A + B + + // K[2i+1] = (A + 2B) <<< 9 + c.k[2*i+1] = rol(2*B+A, 9) + } + + // Calculate sboxes + switch k { + case 2: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][byte(i)]^S[0]]^S[4]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][byte(i)]^S[1]]^S[5]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][byte(i)]^S[2]]^S[6]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][byte(i)]^S[3]]^S[7]], 3) + } + case 3: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[1]]^S[5]]^S[9]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[3]]^S[7]]^S[11]], 3) + } + default: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]]^S[12]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[1]]^S[5]]^S[9]]^S[13]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]]^S[14]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][sbox[1][byte(i)]^S[3]]^S[7]]^S[11]]^S[15]], 3) + } + } + + return c, nil +} + +// BlockSize returns the Twofish block size, 16 bytes. +func (c *Cipher) BlockSize() int { return BlockSize } + +// store32l stores src in dst in little-endian form. +func store32l(dst []byte, src uint32) { + dst[0] = byte(src) + dst[1] = byte(src >> 8) + dst[2] = byte(src >> 16) + dst[3] = byte(src >> 24) + return +} + +// load32l reads a little-endian uint32 from src. +func load32l(src []byte) uint32 { + return uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 +} + +// rol returns x after a left circular rotation of y bits. +func rol(x, y uint32) uint32 { + return (x << (y & 31)) | (x >> (32 - (y & 31))) +} + +// ror returns x after a right circular rotation of y bits. +func ror(x, y uint32) uint32 { + return (x >> (y & 31)) | (x << (32 - (y & 31))) +} + +// The RS matrix. See [TWOFISH] 4.3 +var rs = [4][8]byte{ + {0x01, 0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E}, + {0xA4, 0x56, 0x82, 0xF3, 0x1E, 0xC6, 0x68, 0xE5}, + {0x02, 0xA1, 0xFC, 0xC1, 0x47, 0xAE, 0x3D, 0x19}, + {0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E, 0x03}, +} + +// sbox tables +var sbox = [2][256]byte{ + { + 0xa9, 0x67, 0xb3, 0xe8, 0x04, 0xfd, 0xa3, 0x76, 0x9a, 0x92, 0x80, 0x78, 0xe4, 0xdd, 0xd1, 0x38, + 0x0d, 0xc6, 0x35, 0x98, 0x18, 0xf7, 0xec, 0x6c, 0x43, 0x75, 0x37, 0x26, 0xfa, 0x13, 0x94, 0x48, + 0xf2, 0xd0, 0x8b, 0x30, 0x84, 0x54, 0xdf, 0x23, 0x19, 0x5b, 0x3d, 0x59, 0xf3, 0xae, 0xa2, 0x82, + 0x63, 0x01, 0x83, 0x2e, 0xd9, 0x51, 0x9b, 0x7c, 0xa6, 0xeb, 0xa5, 0xbe, 0x16, 0x0c, 0xe3, 0x61, + 0xc0, 0x8c, 0x3a, 0xf5, 0x73, 0x2c, 0x25, 0x0b, 0xbb, 0x4e, 0x89, 0x6b, 0x53, 0x6a, 0xb4, 0xf1, + 0xe1, 0xe6, 0xbd, 0x45, 0xe2, 0xf4, 0xb6, 0x66, 0xcc, 0x95, 0x03, 0x56, 0xd4, 0x1c, 0x1e, 0xd7, + 0xfb, 0xc3, 0x8e, 0xb5, 0xe9, 0xcf, 0xbf, 0xba, 0xea, 0x77, 0x39, 0xaf, 0x33, 0xc9, 0x62, 0x71, + 0x81, 0x79, 0x09, 0xad, 0x24, 0xcd, 0xf9, 0xd8, 0xe5, 0xc5, 0xb9, 0x4d, 0x44, 0x08, 0x86, 0xe7, + 0xa1, 0x1d, 0xaa, 0xed, 0x06, 0x70, 0xb2, 0xd2, 0x41, 0x7b, 0xa0, 0x11, 0x31, 0xc2, 0x27, 0x90, + 0x20, 0xf6, 0x60, 0xff, 0x96, 0x5c, 0xb1, 0xab, 0x9e, 0x9c, 0x52, 0x1b, 0x5f, 0x93, 0x0a, 0xef, + 0x91, 0x85, 0x49, 0xee, 0x2d, 0x4f, 0x8f, 0x3b, 0x47, 0x87, 0x6d, 0x46, 0xd6, 0x3e, 0x69, 0x64, + 0x2a, 0xce, 0xcb, 0x2f, 0xfc, 0x97, 0x05, 0x7a, 0xac, 0x7f, 0xd5, 0x1a, 0x4b, 0x0e, 0xa7, 0x5a, + 0x28, 0x14, 0x3f, 0x29, 0x88, 0x3c, 0x4c, 0x02, 0xb8, 0xda, 0xb0, 0x17, 0x55, 0x1f, 0x8a, 0x7d, + 0x57, 0xc7, 0x8d, 0x74, 0xb7, 0xc4, 0x9f, 0x72, 0x7e, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, + 0x6e, 0x50, 0xde, 0x68, 0x65, 0xbc, 0xdb, 0xf8, 0xc8, 0xa8, 0x2b, 0x40, 0xdc, 0xfe, 0x32, 0xa4, + 0xca, 0x10, 0x21, 0xf0, 0xd3, 0x5d, 0x0f, 0x00, 0x6f, 0x9d, 0x36, 0x42, 0x4a, 0x5e, 0xc1, 0xe0, + }, + { + 0x75, 0xf3, 0xc6, 0xf4, 0xdb, 0x7b, 0xfb, 0xc8, 0x4a, 0xd3, 0xe6, 0x6b, 0x45, 0x7d, 0xe8, 0x4b, + 0xd6, 0x32, 0xd8, 0xfd, 0x37, 0x71, 0xf1, 0xe1, 0x30, 0x0f, 0xf8, 0x1b, 0x87, 0xfa, 0x06, 0x3f, + 0x5e, 0xba, 0xae, 0x5b, 0x8a, 0x00, 0xbc, 0x9d, 0x6d, 0xc1, 0xb1, 0x0e, 0x80, 0x5d, 0xd2, 0xd5, + 0xa0, 0x84, 0x07, 0x14, 0xb5, 0x90, 0x2c, 0xa3, 0xb2, 0x73, 0x4c, 0x54, 0x92, 0x74, 0x36, 0x51, + 0x38, 0xb0, 0xbd, 0x5a, 0xfc, 0x60, 0x62, 0x96, 0x6c, 0x42, 0xf7, 0x10, 0x7c, 0x28, 0x27, 0x8c, + 0x13, 0x95, 0x9c, 0xc7, 0x24, 0x46, 0x3b, 0x70, 0xca, 0xe3, 0x85, 0xcb, 0x11, 0xd0, 0x93, 0xb8, + 0xa6, 0x83, 0x20, 0xff, 0x9f, 0x77, 0xc3, 0xcc, 0x03, 0x6f, 0x08, 0xbf, 0x40, 0xe7, 0x2b, 0xe2, + 0x79, 0x0c, 0xaa, 0x82, 0x41, 0x3a, 0xea, 0xb9, 0xe4, 0x9a, 0xa4, 0x97, 0x7e, 0xda, 0x7a, 0x17, + 0x66, 0x94, 0xa1, 0x1d, 0x3d, 0xf0, 0xde, 0xb3, 0x0b, 0x72, 0xa7, 0x1c, 0xef, 0xd1, 0x53, 0x3e, + 0x8f, 0x33, 0x26, 0x5f, 0xec, 0x76, 0x2a, 0x49, 0x81, 0x88, 0xee, 0x21, 0xc4, 0x1a, 0xeb, 0xd9, + 0xc5, 0x39, 0x99, 0xcd, 0xad, 0x31, 0x8b, 0x01, 0x18, 0x23, 0xdd, 0x1f, 0x4e, 0x2d, 0xf9, 0x48, + 0x4f, 0xf2, 0x65, 0x8e, 0x78, 0x5c, 0x58, 0x19, 0x8d, 0xe5, 0x98, 0x57, 0x67, 0x7f, 0x05, 0x64, + 0xaf, 0x63, 0xb6, 0xfe, 0xf5, 0xb7, 0x3c, 0xa5, 0xce, 0xe9, 0x68, 0x44, 0xe0, 0x4d, 0x43, 0x69, + 0x29, 0x2e, 0xac, 0x15, 0x59, 0xa8, 0x0a, 0x9e, 0x6e, 0x47, 0xdf, 0x34, 0x35, 0x6a, 0xcf, 0xdc, + 0x22, 0xc9, 0xc0, 0x9b, 0x89, 0xd4, 0xed, 0xab, 0x12, 0xa2, 0x0d, 0x52, 0xbb, 0x02, 0x2f, 0xa9, + 0xd7, 0x61, 0x1e, 0xb4, 0x50, 0x04, 0xf6, 0xc2, 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xbe, 0x91, + }, +} + +// gfMult returns a·b in GF(2^8)/p +func gfMult(a, b byte, p uint32) byte { + B := [2]uint32{0, uint32(b)} + P := [2]uint32{0, p} + var result uint32 + + // branchless GF multiplier + for i := 0; i < 7; i++ { + result ^= B[a&1] + a >>= 1 + B[1] = P[B[1]>>7] ^ (B[1] << 1) + } + result ^= B[a&1] + return byte(result) +} + +// mdsColumnMult calculates y{col} where [y0 y1 y2 y3] = MDS · [x0] +func mdsColumnMult(in byte, col int) uint32 { + mul01 := in + mul5B := gfMult(in, 0x5B, mdsPolynomial) + mulEF := gfMult(in, 0xEF, mdsPolynomial) + + switch col { + case 0: + return uint32(mul01) | uint32(mul5B)<<8 | uint32(mulEF)<<16 | uint32(mulEF)<<24 + case 1: + return uint32(mulEF) | uint32(mulEF)<<8 | uint32(mul5B)<<16 | uint32(mul01)<<24 + case 2: + return uint32(mul5B) | uint32(mulEF)<<8 | uint32(mul01)<<16 | uint32(mulEF)<<24 + case 3: + return uint32(mul5B) | uint32(mul01)<<8 | uint32(mulEF)<<16 | uint32(mul5B)<<24 + } + + panic("unreachable") +} + +// h implements the S-box generation function. See [TWOFISH] 4.3.5 +func h(in, key []byte, offset int) uint32 { + var y [4]byte + for x := range y { + y[x] = in[x] + } + switch len(key) / 8 { + case 4: + y[0] = sbox[1][y[0]] ^ key[4*(6+offset)+0] + y[1] = sbox[0][y[1]] ^ key[4*(6+offset)+1] + y[2] = sbox[0][y[2]] ^ key[4*(6+offset)+2] + y[3] = sbox[1][y[3]] ^ key[4*(6+offset)+3] + fallthrough + case 3: + y[0] = sbox[1][y[0]] ^ key[4*(4+offset)+0] + y[1] = sbox[1][y[1]] ^ key[4*(4+offset)+1] + y[2] = sbox[0][y[2]] ^ key[4*(4+offset)+2] + y[3] = sbox[0][y[3]] ^ key[4*(4+offset)+3] + fallthrough + case 2: + y[0] = sbox[1][sbox[0][sbox[0][y[0]]^key[4*(2+offset)+0]]^key[4*(0+offset)+0]] + y[1] = sbox[0][sbox[0][sbox[1][y[1]]^key[4*(2+offset)+1]]^key[4*(0+offset)+1]] + y[2] = sbox[1][sbox[1][sbox[0][y[2]]^key[4*(2+offset)+2]]^key[4*(0+offset)+2]] + y[3] = sbox[0][sbox[1][sbox[1][y[3]]^key[4*(2+offset)+3]]^key[4*(0+offset)+3]] + } + // [y0 y1 y2 y3] = MDS . [x0 x1 x2 x3] + var mdsMult uint32 + for i := range y { + mdsMult ^= mdsColumnMult(y[i], i) + } + return mdsMult +} + +// Encrypt encrypts a 16-byte block from src to dst, which may overlap. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + S1 := c.s[0] + S2 := c.s[1] + S3 := c.s[2] + S4 := c.s[3] + + // Load input + ia := load32l(src[0:4]) + ib := load32l(src[4:8]) + ic := load32l(src[8:12]) + id := load32l(src[12:16]) + + // Pre-whitening + ia ^= c.k[0] + ib ^= c.k[1] + ic ^= c.k[2] + id ^= c.k[3] + + for i := 0; i < 8; i++ { + k := c.k[8+i*4 : 12+i*4] + t2 := S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] + t1 := S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 + ic = ror(ic^(t1+k[0]), 1) + id = rol(id, 1) ^ (t2 + t1 + k[1]) + + t2 = S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] + t1 = S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 + ia = ror(ia^(t1+k[2]), 1) + ib = rol(ib, 1) ^ (t2 + t1 + k[3]) + } + + // Output with "undo last swap" + ta := ic ^ c.k[4] + tb := id ^ c.k[5] + tc := ia ^ c.k[6] + td := ib ^ c.k[7] + + store32l(dst[0:4], ta) + store32l(dst[4:8], tb) + store32l(dst[8:12], tc) + store32l(dst[12:16], td) +} + +// Decrypt decrypts a 16-byte block from src to dst, which may overlap. +func (c *Cipher) Decrypt(dst, src []byte) { + S1 := c.s[0] + S2 := c.s[1] + S3 := c.s[2] + S4 := c.s[3] + + // Load input + ta := load32l(src[0:4]) + tb := load32l(src[4:8]) + tc := load32l(src[8:12]) + td := load32l(src[12:16]) + + // Undo undo final swap + ia := tc ^ c.k[6] + ib := td ^ c.k[7] + ic := ta ^ c.k[4] + id := tb ^ c.k[5] + + for i := 8; i > 0; i-- { + k := c.k[4+i*4 : 8+i*4] + t2 := S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] + t1 := S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 + ia = rol(ia, 1) ^ (t1 + k[2]) + ib = ror(ib^(t2+t1+k[3]), 1) + + t2 = S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] + t1 = S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 + ic = rol(ic, 1) ^ (t1 + k[0]) + id = ror(id^(t2+t1+k[1]), 1) + } + + // Undo pre-whitening + ia ^= c.k[0] + ib ^= c.k[1] + ic ^= c.k[2] + id ^= c.k[3] + + store32l(dst[0:4], ia) + store32l(dst[4:8], ib) + store32l(dst[8:12], ic) + store32l(dst[12:16], id) +} diff --git a/vendor/golang.org/x/crypto/twofish/twofish_test.go b/vendor/golang.org/x/crypto/twofish/twofish_test.go new file mode 100644 index 0000000..ed6a1a8 --- /dev/null +++ b/vendor/golang.org/x/crypto/twofish/twofish_test.go @@ -0,0 +1,129 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package twofish + +import ( + "bytes" + "testing" +) + +var qbox = [2][4][16]byte{ + { + {0x8, 0x1, 0x7, 0xD, 0x6, 0xF, 0x3, 0x2, 0x0, 0xB, 0x5, 0x9, 0xE, 0xC, 0xA, 0x4}, + {0xE, 0xC, 0xB, 0x8, 0x1, 0x2, 0x3, 0x5, 0xF, 0x4, 0xA, 0x6, 0x7, 0x0, 0x9, 0xD}, + {0xB, 0xA, 0x5, 0xE, 0x6, 0xD, 0x9, 0x0, 0xC, 0x8, 0xF, 0x3, 0x2, 0x4, 0x7, 0x1}, + {0xD, 0x7, 0xF, 0x4, 0x1, 0x2, 0x6, 0xE, 0x9, 0xB, 0x3, 0x0, 0x8, 0x5, 0xC, 0xA}, + }, + { + {0x2, 0x8, 0xB, 0xD, 0xF, 0x7, 0x6, 0xE, 0x3, 0x1, 0x9, 0x4, 0x0, 0xA, 0xC, 0x5}, + {0x1, 0xE, 0x2, 0xB, 0x4, 0xC, 0x3, 0x7, 0x6, 0xD, 0xA, 0x5, 0xF, 0x9, 0x0, 0x8}, + {0x4, 0xC, 0x7, 0x5, 0x1, 0x6, 0x9, 0xA, 0x0, 0xE, 0xD, 0x8, 0x2, 0xB, 0x3, 0xF}, + {0xB, 0x9, 0x5, 0x1, 0xC, 0x3, 0xD, 0xE, 0x6, 0x4, 0x7, 0xF, 0x2, 0x0, 0x8, 0xA}, + }, +} + +// genSbox generates the variable sbox +func genSbox(qi int, x byte) byte { + a0, b0 := x/16, x%16 + for i := 0; i < 2; i++ { + a1 := a0 ^ b0 + b1 := (a0 ^ ((b0 << 3) | (b0 >> 1)) ^ (a0 << 3)) & 15 + a0 = qbox[qi][2*i][a1] + b0 = qbox[qi][2*i+1][b1] + } + return (b0 << 4) + a0 +} + +func TestSbox(t *testing.T) { + for n := range sbox { + for m := range sbox[n] { + if genSbox(n, byte(m)) != sbox[n][m] { + t.Errorf("#%d|%d: sbox value = %d want %d", n, m, sbox[n][m], genSbox(n, byte(m))) + } + } + } +} + +var testVectors = []struct { + key []byte + dec []byte + enc []byte +}{ + // These tests are extracted from LibTom + { + []byte{0x9F, 0x58, 0x9F, 0x5C, 0xF6, 0x12, 0x2C, 0x32, 0xB6, 0xBF, 0xEC, 0x2F, 0x2A, 0xE8, 0xC3, 0x5A}, + []byte{0xD4, 0x91, 0xDB, 0x16, 0xE7, 0xB1, 0xC3, 0x9E, 0x86, 0xCB, 0x08, 0x6B, 0x78, 0x9F, 0x54, 0x19}, + []byte{0x01, 0x9F, 0x98, 0x09, 0xDE, 0x17, 0x11, 0x85, 0x8F, 0xAA, 0xC3, 0xA3, 0xBA, 0x20, 0xFB, 0xC3}, + }, + { + []byte{0x88, 0xB2, 0xB2, 0x70, 0x6B, 0x10, 0x5E, 0x36, 0xB4, 0x46, 0xBB, 0x6D, 0x73, 0x1A, 0x1E, 0x88, + 0xEF, 0xA7, 0x1F, 0x78, 0x89, 0x65, 0xBD, 0x44}, + []byte{0x39, 0xDA, 0x69, 0xD6, 0xBA, 0x49, 0x97, 0xD5, 0x85, 0xB6, 0xDC, 0x07, 0x3C, 0xA3, 0x41, 0xB2}, + []byte{0x18, 0x2B, 0x02, 0xD8, 0x14, 0x97, 0xEA, 0x45, 0xF9, 0xDA, 0xAC, 0xDC, 0x29, 0x19, 0x3A, 0x65}, + }, + { + []byte{0xD4, 0x3B, 0xB7, 0x55, 0x6E, 0xA3, 0x2E, 0x46, 0xF2, 0xA2, 0x82, 0xB7, 0xD4, 0x5B, 0x4E, 0x0D, + 0x57, 0xFF, 0x73, 0x9D, 0x4D, 0xC9, 0x2C, 0x1B, 0xD7, 0xFC, 0x01, 0x70, 0x0C, 0xC8, 0x21, 0x6F}, + []byte{0x90, 0xAF, 0xE9, 0x1B, 0xB2, 0x88, 0x54, 0x4F, 0x2C, 0x32, 0xDC, 0x23, 0x9B, 0x26, 0x35, 0xE6}, + []byte{0x6C, 0xB4, 0x56, 0x1C, 0x40, 0xBF, 0x0A, 0x97, 0x05, 0x93, 0x1C, 0xB6, 0xD4, 0x08, 0xE7, 0xFA}, + }, + // These tests are derived from https://www.schneier.com/code/ecb_ival.txt + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x9F, 0x58, 0x9F, 0x5C, 0xF6, 0x12, 0x2C, 0x32, 0xB6, 0xBF, 0xEC, 0x2F, 0x2A, 0xE8, 0xC3, 0x5A}, + }, + { + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + }, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xCF, 0xD1, 0xD2, 0xE5, 0xA9, 0xBE, 0x9C, 0xDF, 0x50, 0x1F, 0x13, 0xB8, 0x92, 0xBD, 0x22, 0x48}, + }, + { + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, + }, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x37, 0x52, 0x7B, 0xE0, 0x05, 0x23, 0x34, 0xB8, 0x9F, 0x0C, 0xFC, 0xCA, 0xE8, 0x7C, 0xFA, 0x20}, + }, +} + +func TestCipher(t *testing.T) { + for n, tt := range testVectors { + // Test if the plaintext (dec) is encrypts to the given + // ciphertext (enc) using the given key. Test also if enc can + // be decrypted again into dec. + c, err := NewCipher(tt.key) + if err != nil { + t.Errorf("#%d: NewCipher: %v", n, err) + return + } + + buf := make([]byte, 16) + c.Encrypt(buf, tt.dec) + if !bytes.Equal(buf, tt.enc) { + t.Errorf("#%d: encrypt = %x want %x", n, buf, tt.enc) + } + c.Decrypt(buf, tt.enc) + if !bytes.Equal(buf, tt.dec) { + t.Errorf("#%d: decrypt = %x want %x", n, buf, tt.dec) + } + + // Test that 16 zero bytes, encrypted 1000 times then decrypted + // 1000 times results in zero bytes again. + zero := make([]byte, 16) + buf = make([]byte, 16) + for i := 0; i < 1000; i++ { + c.Encrypt(buf, buf) + } + for i := 0; i < 1000; i++ { + c.Decrypt(buf, buf) + } + if !bytes.Equal(buf, zero) { + t.Errorf("#%d: encrypt/decrypt 1000: have %x want %x", n, buf, zero) + } + } +} diff --git a/vendor/golang.org/x/crypto/xtea/block.go b/vendor/golang.org/x/crypto/xtea/block.go new file mode 100644 index 0000000..bf5d245 --- /dev/null +++ b/vendor/golang.org/x/crypto/xtea/block.go @@ -0,0 +1,66 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Implementation adapted from Needham and Wheeler's paper: + http://www.cix.co.uk/~klockstone/xtea.pdf + + A precalculated look up table is used during encryption/decryption for values that are based purely on the key. +*/ + +package xtea + +// XTEA is based on 64 rounds. +const numRounds = 64 + +// blockToUint32 reads an 8 byte slice into two uint32s. +// The block is treated as big endian. +func blockToUint32(src []byte) (uint32, uint32) { + r0 := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r1 := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + return r0, r1 +} + +// uint32ToBlock writes two uint32s into an 8 byte data block. +// Values are written as big endian. +func uint32ToBlock(v0, v1 uint32, dst []byte) { + dst[0] = byte(v0 >> 24) + dst[1] = byte(v0 >> 16) + dst[2] = byte(v0 >> 8) + dst[3] = byte(v0) + dst[4] = byte(v1 >> 24) + dst[5] = byte(v1 >> 16) + dst[6] = byte(v1 >> 8) + dst[7] = byte(v1 >> 0) +} + +// encryptBlock encrypts a single 8 byte block using XTEA. +func encryptBlock(c *Cipher, dst, src []byte) { + v0, v1 := blockToUint32(src) + + // Two rounds of XTEA applied per loop + for i := 0; i < numRounds; { + v0 += ((v1<<4 ^ v1>>5) + v1) ^ c.table[i] + i++ + v1 += ((v0<<4 ^ v0>>5) + v0) ^ c.table[i] + i++ + } + + uint32ToBlock(v0, v1, dst) +} + +// decryptBlock decrypt a single 8 byte block using XTEA. +func decryptBlock(c *Cipher, dst, src []byte) { + v0, v1 := blockToUint32(src) + + // Two rounds of XTEA applied per loop + for i := numRounds; i > 0; { + i-- + v1 -= ((v0<<4 ^ v0>>5) + v0) ^ c.table[i] + i-- + v0 -= ((v1<<4 ^ v1>>5) + v1) ^ c.table[i] + } + + uint32ToBlock(v0, v1, dst) +} diff --git a/vendor/golang.org/x/crypto/xtea/cipher.go b/vendor/golang.org/x/crypto/xtea/cipher.go new file mode 100644 index 0000000..66ea0df --- /dev/null +++ b/vendor/golang.org/x/crypto/xtea/cipher.go @@ -0,0 +1,82 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xtea implements XTEA encryption, as defined in Needham and Wheeler's +// 1997 technical report, "Tea extensions." +package xtea // import "golang.org/x/crypto/xtea" + +// For details, see http://www.cix.co.uk/~klockstone/xtea.pdf + +import "strconv" + +// The XTEA block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of an XTEA cipher using a particular key. +// table contains a series of precalculated values that are used each round. +type Cipher struct { + table [64]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/xtea: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a new Cipher. +// The key argument should be the XTEA key. +// XTEA only supports 128 bit (16 byte) keys. +func NewCipher(key []byte) (*Cipher, error) { + k := len(key) + switch k { + default: + return nil, KeySizeError(k) + case 16: + break + } + + c := new(Cipher) + initCipher(c, key) + + return c, nil +} + +// BlockSize returns the XTEA block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8 byte buffer src using the key and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c, dst, src) } + +// Decrypt decrypts the 8 byte buffer src using the key k and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c, dst, src) } + +// initCipher initializes the cipher context by creating a look up table +// of precalculated values that are based on the key. +func initCipher(c *Cipher, key []byte) { + // Load the key into four uint32s + var k [4]uint32 + for i := 0; i < len(k); i++ { + j := i << 2 // Multiply by 4 + k[i] = uint32(key[j+0])<<24 | uint32(key[j+1])<<16 | uint32(key[j+2])<<8 | uint32(key[j+3]) + } + + // Precalculate the table + const delta = 0x9E3779B9 + var sum uint32 + + // Two rounds of XTEA applied per loop + for i := 0; i < numRounds; { + c.table[i] = sum + k[sum&3] + i++ + sum += delta + c.table[i] = sum + k[(sum>>11)&3] + i++ + } +} diff --git a/vendor/golang.org/x/crypto/xtea/xtea_test.go b/vendor/golang.org/x/crypto/xtea/xtea_test.go new file mode 100644 index 0000000..be711bf --- /dev/null +++ b/vendor/golang.org/x/crypto/xtea/xtea_test.go @@ -0,0 +1,229 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xtea + +import ( + "testing" +) + +// A sample test key for when we just want to initialize a cipher +var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF} + +// Test that the block size for XTEA is correct +func TestBlocksize(t *testing.T) { + if BlockSize != 8 { + t.Errorf("BlockSize constant - expected 8, got %d", BlockSize) + return + } + + c, err := NewCipher(testKey) + if err != nil { + t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err) + return + } + + result := c.BlockSize() + if result != 8 { + t.Errorf("BlockSize function - expected 8, got %d", result) + return + } +} + +// A series of test values to confirm that the Cipher.table array was initialized correctly +var testTable = []uint32{ + 0x00112233, 0x6B1568B8, 0xE28CE030, 0xC5089E2D, 0xC5089E2D, 0x1EFBD3A2, 0xA7845C2A, 0x78EF0917, + 0x78EF0917, 0x172682D0, 0x5B6AC714, 0x822AC955, 0x3DE68511, 0xDC1DFECA, 0x2062430E, 0x3611343F, + 0xF1CCEFFB, 0x900469B4, 0xD448ADF8, 0x2E3BE36D, 0xB6C46BF5, 0x994029F2, 0x994029F2, 0xF3335F67, + 0x6AAAD6DF, 0x4D2694DC, 0x4D2694DC, 0xEB5E0E95, 0x2FA252D9, 0x4551440A, 0x121E10D6, 0xB0558A8F, + 0xE388BDC3, 0x0A48C004, 0xC6047BC0, 0x643BF579, 0xA88039BD, 0x02736F32, 0x8AFBF7BA, 0x5C66A4A7, + 0x5C66A4A7, 0xC76AEB2C, 0x3EE262A4, 0x215E20A1, 0x215E20A1, 0x7B515616, 0x03D9DE9E, 0x1988CFCF, + 0xD5448B8B, 0x737C0544, 0xB7C04988, 0xDE804BC9, 0x9A3C0785, 0x3873813E, 0x7CB7C582, 0xD6AAFAF7, + 0x4E22726F, 0x309E306C, 0x309E306C, 0x8A9165E1, 0x1319EE69, 0xF595AC66, 0xF595AC66, 0x4F88E1DB, +} + +// Test that the cipher context is initialized correctly +func TestCipherInit(t *testing.T) { + c, err := NewCipher(testKey) + if err != nil { + t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err) + return + } + + for i := 0; i < len(c.table); i++ { + if c.table[i] != testTable[i] { + t.Errorf("NewCipher() failed to initialize Cipher.table[%d] correctly. Expected %08X, got %08X", i, testTable[i], c.table[i]) + break + } + } +} + +// Test that invalid key sizes return an error +func TestInvalidKeySize(t *testing.T) { + // Test a long key + key := []byte{ + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, + } + + _, err := NewCipher(key) + if err == nil { + t.Errorf("Invalid key size %d didn't result in an error.", len(key)) + } + + // Test a short key + key = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77} + + _, err = NewCipher(key) + if err == nil { + t.Errorf("Invalid key size %d didn't result in an error.", len(key)) + } +} + +// Test that we can correctly decode some bytes we have encoded +func TestEncodeDecode(t *testing.T) { + original := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF} + input := original + output := make([]byte, BlockSize) + + c, err := NewCipher(testKey) + if err != nil { + t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err) + return + } + + // Encrypt the input block + c.Encrypt(output, input) + + // Check that the output does not match the input + differs := false + for i := 0; i < len(input); i++ { + if output[i] != input[i] { + differs = true + break + } + } + if differs == false { + t.Error("Cipher.Encrypt: Failed to encrypt the input block.") + return + } + + // Decrypt the block we just encrypted + input = output + output = make([]byte, BlockSize) + c.Decrypt(output, input) + + // Check that the output from decrypt matches our initial input + for i := 0; i < len(input); i++ { + if output[i] != original[i] { + t.Errorf("Decrypted byte %d differed. Expected %02X, got %02X\n", i, original[i], output[i]) + return + } + } +} + +// Test Vectors +type CryptTest struct { + key []byte + plainText []byte + cipherText []byte +} + +var CryptTests = []CryptTest{ + // These were sourced from http://www.freemedialibrary.com/index.php/XTEA_test_vectors + { + []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}, + []byte{0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48}, + []byte{0x49, 0x7d, 0xf3, 0xd0, 0x72, 0x61, 0x2c, 0xb5}, + }, + { + []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}, + []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41}, + []byte{0xe7, 0x8f, 0x2d, 0x13, 0x74, 0x43, 0x41, 0xd8}, + }, + { + []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}, + []byte{0x5a, 0x5b, 0x6e, 0x27, 0x89, 0x48, 0xd7, 0x7f}, + []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41}, + }, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48}, + []byte{0xa0, 0x39, 0x05, 0x89, 0xf8, 0xb8, 0xef, 0xa5}, + }, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41}, + []byte{0xed, 0x23, 0x37, 0x5a, 0x82, 0x1a, 0x8c, 0x2d}, + }, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x70, 0xe1, 0x22, 0x5d, 0x6e, 0x4e, 0x76, 0x55}, + []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41}, + }, + + // These vectors are from http://wiki.secondlife.com/wiki/XTEA_Strong_Encryption_Implementation#Bouncy_Castle_C.23_API + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xDE, 0xE9, 0xD4, 0xD8, 0xF7, 0x13, 0x1E, 0xD9}, + }, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, + []byte{0x06, 0x5C, 0x1B, 0x89, 0x75, 0xC6, 0xA8, 0x16}, + }, + { + []byte{0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78, 0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x1F, 0xF9, 0xA0, 0x26, 0x1A, 0xC6, 0x42, 0x64}, + }, + { + []byte{0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78, 0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A}, + []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, + []byte{0x8C, 0x67, 0x15, 0x5B, 0x2E, 0xF9, 0x1E, 0xAD}, + }, +} + +// Test encryption +func TestCipherEncrypt(t *testing.T) { + for i, tt := range CryptTests { + c, err := NewCipher(tt.key) + if err != nil { + t.Errorf("NewCipher(%d bytes), vector %d = %s", len(tt.key), i, err) + continue + } + + out := make([]byte, len(tt.plainText)) + c.Encrypt(out, tt.plainText) + + for j := 0; j < len(out); j++ { + if out[j] != tt.cipherText[j] { + t.Errorf("Cipher.Encrypt %d: out[%d] = %02X, expected %02X", i, j, out[j], tt.cipherText[j]) + break + } + } + } +} + +// Test decryption +func TestCipherDecrypt(t *testing.T) { + for i, tt := range CryptTests { + c, err := NewCipher(tt.key) + if err != nil { + t.Errorf("NewCipher(%d bytes), vector %d = %s", len(tt.key), i, err) + continue + } + + out := make([]byte, len(tt.cipherText)) + c.Decrypt(out, tt.cipherText) + + for j := 0; j < len(out); j++ { + if out[j] != tt.plainText[j] { + t.Errorf("Cipher.Decrypt %d: out[%d] = %02X, expected %02X", i, j, out[j], tt.plainText[j]) + break + } + } + } +} diff --git a/vendor/golang.org/x/crypto/xts/xts.go b/vendor/golang.org/x/crypto/xts/xts.go new file mode 100644 index 0000000..92cbce9 --- /dev/null +++ b/vendor/golang.org/x/crypto/xts/xts.go @@ -0,0 +1,137 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xts implements the XTS cipher mode as specified in IEEE P1619/D16. +// +// XTS mode is typically used for disk encryption, which presents a number of +// novel problems that make more common modes inapplicable. The disk is +// conceptually an array of sectors and we must be able to encrypt and decrypt +// a sector in isolation. However, an attacker must not be able to transpose +// two sectors of plaintext by transposing their ciphertext. +// +// XTS wraps a block cipher with Rogaway's XEX mode in order to build a +// tweakable block cipher. This allows each sector to have a unique tweak and +// effectively create a unique key for each sector. +// +// XTS does not provide any authentication. An attacker can manipulate the +// ciphertext and randomise a block (16 bytes) of the plaintext. +// +// (Note: this package does not implement ciphertext-stealing so sectors must +// be a multiple of 16 bytes.) +package xts // import "golang.org/x/crypto/xts" + +import ( + "crypto/cipher" + "encoding/binary" + "errors" +) + +// Cipher contains an expanded key structure. It doesn't contain mutable state +// and therefore can be used concurrently. +type Cipher struct { + k1, k2 cipher.Block +} + +// blockSize is the block size that the underlying cipher must have. XTS is +// only defined for 16-byte ciphers. +const blockSize = 16 + +// NewCipher creates a Cipher given a function for creating the underlying +// block cipher (which must have a block size of 16 bytes). The key must be +// twice the length of the underlying cipher's key. +func NewCipher(cipherFunc func([]byte) (cipher.Block, error), key []byte) (c *Cipher, err error) { + c = new(Cipher) + if c.k1, err = cipherFunc(key[:len(key)/2]); err != nil { + return + } + c.k2, err = cipherFunc(key[len(key)/2:]) + + if c.k1.BlockSize() != blockSize { + err = errors.New("xts: cipher does not have a block size of 16") + } + + return +} + +// Encrypt encrypts a sector of plaintext and puts the result into ciphertext. +// Plaintext and ciphertext must overlap entirely or not at all. +// Sectors must be a multiple of 16 bytes and less than 2²⁴ bytes. +func (c *Cipher) Encrypt(ciphertext, plaintext []byte, sectorNum uint64) { + if len(ciphertext) < len(plaintext) { + panic("xts: ciphertext is smaller than plaintext") + } + if len(plaintext)%blockSize != 0 { + panic("xts: plaintext is not a multiple of the block size") + } + + var tweak [blockSize]byte + binary.LittleEndian.PutUint64(tweak[:8], sectorNum) + + c.k2.Encrypt(tweak[:], tweak[:]) + + for len(plaintext) > 0 { + for j := range tweak { + ciphertext[j] = plaintext[j] ^ tweak[j] + } + c.k1.Encrypt(ciphertext, ciphertext) + for j := range tweak { + ciphertext[j] ^= tweak[j] + } + plaintext = plaintext[blockSize:] + ciphertext = ciphertext[blockSize:] + + mul2(&tweak) + } +} + +// Decrypt decrypts a sector of ciphertext and puts the result into plaintext. +// Plaintext and ciphertext must overlap entirely or not at all. +// Sectors must be a multiple of 16 bytes and less than 2²⁴ bytes. +func (c *Cipher) Decrypt(plaintext, ciphertext []byte, sectorNum uint64) { + if len(plaintext) < len(ciphertext) { + panic("xts: plaintext is smaller than ciphertext") + } + if len(ciphertext)%blockSize != 0 { + panic("xts: ciphertext is not a multiple of the block size") + } + + var tweak [blockSize]byte + binary.LittleEndian.PutUint64(tweak[:8], sectorNum) + + c.k2.Encrypt(tweak[:], tweak[:]) + + for len(ciphertext) > 0 { + for j := range tweak { + plaintext[j] = ciphertext[j] ^ tweak[j] + } + c.k1.Decrypt(plaintext, plaintext) + for j := range tweak { + plaintext[j] ^= tweak[j] + } + plaintext = plaintext[blockSize:] + ciphertext = ciphertext[blockSize:] + + mul2(&tweak) + } +} + +// mul2 multiplies tweak by 2 in GF(2¹²⁸) with an irreducible polynomial of +// x¹²⁸ + x⁷ + x² + x + 1. +func mul2(tweak *[blockSize]byte) { + var carryIn byte + for j := range tweak { + carryOut := tweak[j] >> 7 + tweak[j] = (tweak[j] << 1) + carryIn + carryIn = carryOut + } + if carryIn != 0 { + // If we have a carry bit then we need to subtract a multiple + // of the irreducible polynomial (x¹²⁸ + x⁷ + x² + x + 1). + // By dropping the carry bit, we're subtracting the x^128 term + // so all that remains is to subtract x⁷ + x² + x + 1. + // Subtraction (and addition) in this representation is just + // XOR. + tweak[0] ^= 1<<7 | 1<<2 | 1<<1 | 1 + } +} diff --git a/vendor/golang.org/x/crypto/xts/xts_test.go b/vendor/golang.org/x/crypto/xts/xts_test.go new file mode 100644 index 0000000..96d3b6c --- /dev/null +++ b/vendor/golang.org/x/crypto/xts/xts_test.go @@ -0,0 +1,105 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xts + +import ( + "bytes" + "crypto/aes" + "encoding/hex" + "testing" +) + +// These test vectors have been taken from IEEE P1619/D16, Annex B. +var xtsTestVectors = []struct { + key string + sector uint64 + plaintext string + ciphertext string +}{ + { + "0000000000000000000000000000000000000000000000000000000000000000", + 0, + "0000000000000000000000000000000000000000000000000000000000000000", + "917cf69ebd68b2ec9b9fe9a3eadda692cd43d2f59598ed858c02c2652fbf922e", + }, { + "1111111111111111111111111111111122222222222222222222222222222222", + 0x3333333333, + "4444444444444444444444444444444444444444444444444444444444444444", + "c454185e6a16936e39334038acef838bfb186fff7480adc4289382ecd6d394f0", + }, { + "fffefdfcfbfaf9f8f7f6f5f4f3f2f1f022222222222222222222222222222222", + 0x3333333333, + "4444444444444444444444444444444444444444444444444444444444444444", + "af85336b597afc1a900b2eb21ec949d292df4c047e0b21532186a5971a227a89", + }, { + "2718281828459045235360287471352631415926535897932384626433832795", + 0, + "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", + "27a7479befa1d476489f308cd4cfa6e2a96e4bbe3208ff25287dd3819616e89cc78cf7f5e543445f8333d8fa7f56000005279fa5d8b5e4ad40e736ddb4d35412328063fd2aab53e5ea1e0a9f332500a5df9487d07a5c92cc512c8866c7e860ce93fdf166a24912b422976146ae20ce846bb7dc9ba94a767aaef20c0d61ad02655ea92dc4c4e41a8952c651d33174be51a10c421110e6d81588ede82103a252d8a750e8768defffed9122810aaeb99f9172af82b604dc4b8e51bcb08235a6f4341332e4ca60482a4ba1a03b3e65008fc5da76b70bf1690db4eae29c5f1badd03c5ccf2a55d705ddcd86d449511ceb7ec30bf12b1fa35b913f9f747a8afd1b130e94bff94effd01a91735ca1726acd0b197c4e5b03393697e126826fb6bbde8ecc1e08298516e2c9ed03ff3c1b7860f6de76d4cecd94c8119855ef5297ca67e9f3e7ff72b1e99785ca0a7e7720c5b36dc6d72cac9574c8cbbc2f801e23e56fd344b07f22154beba0f08ce8891e643ed995c94d9a69c9f1b5f499027a78572aeebd74d20cc39881c213ee770b1010e4bea718846977ae119f7a023ab58cca0ad752afe656bb3c17256a9f6e9bf19fdd5a38fc82bbe872c5539edb609ef4f79c203ebb140f2e583cb2ad15b4aa5b655016a8449277dbd477ef2c8d6c017db738b18deb4a427d1923ce3ff262735779a418f20a282df920147beabe421ee5319d0568", + }, { + "2718281828459045235360287471352631415926535897932384626433832795", + 1, + "27a7479befa1d476489f308cd4cfa6e2a96e4bbe3208ff25287dd3819616e89cc78cf7f5e543445f8333d8fa7f56000005279fa5d8b5e4ad40e736ddb4d35412328063fd2aab53e5ea1e0a9f332500a5df9487d07a5c92cc512c8866c7e860ce93fdf166a24912b422976146ae20ce846bb7dc9ba94a767aaef20c0d61ad02655ea92dc4c4e41a8952c651d33174be51a10c421110e6d81588ede82103a252d8a750e8768defffed9122810aaeb99f9172af82b604dc4b8e51bcb08235a6f4341332e4ca60482a4ba1a03b3e65008fc5da76b70bf1690db4eae29c5f1badd03c5ccf2a55d705ddcd86d449511ceb7ec30bf12b1fa35b913f9f747a8afd1b130e94bff94effd01a91735ca1726acd0b197c4e5b03393697e126826fb6bbde8ecc1e08298516e2c9ed03ff3c1b7860f6de76d4cecd94c8119855ef5297ca67e9f3e7ff72b1e99785ca0a7e7720c5b36dc6d72cac9574c8cbbc2f801e23e56fd344b07f22154beba0f08ce8891e643ed995c94d9a69c9f1b5f499027a78572aeebd74d20cc39881c213ee770b1010e4bea718846977ae119f7a023ab58cca0ad752afe656bb3c17256a9f6e9bf19fdd5a38fc82bbe872c5539edb609ef4f79c203ebb140f2e583cb2ad15b4aa5b655016a8449277dbd477ef2c8d6c017db738b18deb4a427d1923ce3ff262735779a418f20a282df920147beabe421ee5319d0568", + "264d3ca8512194fec312c8c9891f279fefdd608d0c027b60483a3fa811d65ee59d52d9e40ec5672d81532b38b6b089ce951f0f9c35590b8b978d175213f329bb1c2fd30f2f7f30492a61a532a79f51d36f5e31a7c9a12c286082ff7d2394d18f783e1a8e72c722caaaa52d8f065657d2631fd25bfd8e5baad6e527d763517501c68c5edc3cdd55435c532d7125c8614deed9adaa3acade5888b87bef641c4c994c8091b5bcd387f3963fb5bc37aa922fbfe3df4e5b915e6eb514717bdd2a74079a5073f5c4bfd46adf7d282e7a393a52579d11a028da4d9cd9c77124f9648ee383b1ac763930e7162a8d37f350b2f74b8472cf09902063c6b32e8c2d9290cefbd7346d1c779a0df50edcde4531da07b099c638e83a755944df2aef1aa31752fd323dcb710fb4bfbb9d22b925bc3577e1b8949e729a90bbafeacf7f7879e7b1147e28ba0bae940db795a61b15ecf4df8db07b824bb062802cc98a9545bb2aaeed77cb3fc6db15dcd7d80d7d5bc406c4970a3478ada8899b329198eb61c193fb6275aa8ca340344a75a862aebe92eee1ce032fd950b47d7704a3876923b4ad62844bf4a09c4dbe8b4397184b7471360c9564880aedddb9baa4af2e75394b08cd32ff479c57a07d3eab5d54de5f9738b8d27f27a9f0ab11799d7b7ffefb2704c95c6ad12c39f1e867a4b7b1d7818a4b753dfd2a89ccb45e001a03a867b187f225dd", + }, { + "27182818284590452353602874713526624977572470936999595749669676273141592653589793238462643383279502884197169399375105820974944592", + 0xff, + "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", + "1c3b3a102f770386e4836c99e370cf9bea00803f5e482357a4ae12d414a3e63b5d31e276f8fe4a8d66b317f9ac683f44680a86ac35adfc3345befecb4bb188fd5776926c49a3095eb108fd1098baec70aaa66999a72a82f27d848b21d4a741b0c5cd4d5fff9dac89aeba122961d03a757123e9870f8acf1000020887891429ca2a3e7a7d7df7b10355165c8b9a6d0a7de8b062c4500dc4cd120c0f7418dae3d0b5781c34803fa75421c790dfe1de1834f280d7667b327f6c8cd7557e12ac3a0f93ec05c52e0493ef31a12d3d9260f79a289d6a379bc70c50841473d1a8cc81ec583e9645e07b8d9670655ba5bbcfecc6dc3966380ad8fecb17b6ba02469a020a84e18e8f84252070c13e9f1f289be54fbc481457778f616015e1327a02b140f1505eb309326d68378f8374595c849d84f4c333ec4423885143cb47bd71c5edae9be69a2ffeceb1bec9de244fbe15992b11b77c040f12bd8f6a975a44a0f90c29a9abc3d4d893927284c58754cce294529f8614dcd2aba991925fedc4ae74ffac6e333b93eb4aff0479da9a410e4450e0dd7ae4c6e2910900575da401fc07059f645e8b7e9bfdef33943054ff84011493c27b3429eaedb4ed5376441a77ed43851ad77f16f541dfd269d50d6a5f14fb0aab1cbb4c1550be97f7ab4066193c4caa773dad38014bd2092fa755c824bb5e54c4f36ffda9fcea70b9c6e693e148c151", + }, +} + +func fromHex(s string) []byte { + ret, err := hex.DecodeString(s) + if err != nil { + panic("xts: invalid hex in test") + } + return ret +} + +func TestXTS(t *testing.T) { + for i, test := range xtsTestVectors { + c, err := NewCipher(aes.NewCipher, fromHex(test.key)) + if err != nil { + t.Errorf("#%d: failed to create cipher: %s", i, err) + continue + } + plaintext := fromHex(test.plaintext) + ciphertext := make([]byte, len(plaintext)) + c.Encrypt(ciphertext, plaintext, test.sector) + + expectedCiphertext := fromHex(test.ciphertext) + if !bytes.Equal(ciphertext, expectedCiphertext) { + t.Errorf("#%d: encrypted failed, got: %x, want: %x", i, ciphertext, expectedCiphertext) + continue + } + + decrypted := make([]byte, len(ciphertext)) + c.Decrypt(decrypted, ciphertext, test.sector) + if !bytes.Equal(decrypted, plaintext) { + t.Errorf("#%d: decryption failed, got: %x, want: %x", i, decrypted, plaintext) + } + } +} + +func TestShorterCiphertext(t *testing.T) { + // Decrypt used to panic if the input was shorter than the output. See + // https://go-review.googlesource.com/c/39954/ + c, err := NewCipher(aes.NewCipher, make([]byte, 32)) + if err != nil { + t.Fatalf("NewCipher failed: %s", err) + } + + plaintext := make([]byte, 32) + encrypted := make([]byte, 48) + decrypted := make([]byte, 48) + + c.Encrypt(encrypted, plaintext, 0) + c.Decrypt(decrypted, encrypted[:len(plaintext)], 0) + + if !bytes.Equal(plaintext, decrypted[:len(plaintext)]) { + t.Errorf("En/Decryption is not inverse") + } +} diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes new file mode 100644 index 0000000..d2f212e --- /dev/null +++ b/vendor/golang.org/x/net/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore new file mode 100644 index 0000000..8339fd6 --- /dev/null +++ b/vendor/golang.org/x/net/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md new file mode 100644 index 0000000..d0485e8 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/README.md b/vendor/golang.org/x/net/README.md new file mode 100644 index 0000000..00a9b6e --- /dev/null +++ b/vendor/golang.org/x/net/README.md @@ -0,0 +1,16 @@ +# Go Networking + +This repository holds supplementary Go networking libraries. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/net`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/net`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit +changes to this repository, see https://golang.org/doc/contribute.html. +The main issue tracker for the net repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/net:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 0000000..15e21b1 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 0000000..b89ca35 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,218 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto Extension = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType Extension = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset Extension = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex Extension = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr Extension = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested Extension = 16 + // ExtMark returns the packet's mark value. + ExtMark Extension = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue Extension = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType Extension = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash Extension = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID Extension = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag Extension = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent Extension = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto Extension = 60 + // ExtRand returns a uniformly random uint32. + ExtRand Extension = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU + opMaskOperandSrc = 0x08 + opMaskOperator = 0xf0 + // opClsJump + opMaskJumpConst = 0x0f + opMaskJumpCond = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operator defined by ALUOp* + +const ( + opALUSrcConstant uint16 = iota << 3 + opALUSrcX +) + +const ( + opJumpAlways = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 0000000..ae62feb --- /dev/null +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 0000000..f9dc0e8 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,704 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + if ri.Op&opMaskOperandSrc != 0 { + return ALUOpX{Op: op} + } + return ALUOpConstant{Op: op, Val: ri.K} + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + if ri.Op&opMaskJumpConst != opClsJump { + return ri + } + switch ri.Op & opMaskJumpCond { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpNotEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGT: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessOrEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterThan, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGE: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessThan, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterOrEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpSet: + return JumpIf{ + Cond: JumpBitsSet, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// String returns the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// String returns the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// String returns the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcConstant | uint16(a.Op), + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcX | uint16(a.Op), + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// String returns the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | opJumpAlways, + K: a.Skip, + }, nil +} + +// String returns the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + var ( + cond uint16 + flip bool + ) + switch a.Cond { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond) + } + jt, jf := a.SkipTrue, a.SkipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | cond, + Jt: jt, + Jf: jf, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a JumpIf) String() string { + switch a.Cond { + // K == A + case JumpEqual: + return conditionalJump(a, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(a, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(a, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue) + // K & A != 0 + case JumpBitsSet: + if a.SkipFalse > 0 { + return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse) + } + return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String() + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string { + if inst.SkipTrue > 0 { + if inst.SkipFalse > 0 { + return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse) + } + return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue) + } + return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse) +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/vendor/golang.org/x/net/bpf/instructions_test.go b/vendor/golang.org/x/net/bpf/instructions_test.go new file mode 100644 index 0000000..dde474a --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions_test.go @@ -0,0 +1,525 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "fmt" + "io/ioutil" + "reflect" + "strconv" + "strings" + "testing" +) + +// This is a direct translation of the program in +// testdata/all_instructions.txt. +var allInstructions = []Instruction{ + LoadConstant{Dst: RegA, Val: 42}, + LoadConstant{Dst: RegX, Val: 42}, + + LoadScratch{Dst: RegA, N: 3}, + LoadScratch{Dst: RegX, N: 3}, + + LoadAbsolute{Off: 42, Size: 1}, + LoadAbsolute{Off: 42, Size: 2}, + LoadAbsolute{Off: 42, Size: 4}, + + LoadIndirect{Off: 42, Size: 1}, + LoadIndirect{Off: 42, Size: 2}, + LoadIndirect{Off: 42, Size: 4}, + + LoadMemShift{Off: 42}, + + LoadExtension{Num: ExtLen}, + LoadExtension{Num: ExtProto}, + LoadExtension{Num: ExtType}, + LoadExtension{Num: ExtRand}, + + StoreScratch{Src: RegA, N: 3}, + StoreScratch{Src: RegX, N: 3}, + + ALUOpConstant{Op: ALUOpAdd, Val: 42}, + ALUOpConstant{Op: ALUOpSub, Val: 42}, + ALUOpConstant{Op: ALUOpMul, Val: 42}, + ALUOpConstant{Op: ALUOpDiv, Val: 42}, + ALUOpConstant{Op: ALUOpOr, Val: 42}, + ALUOpConstant{Op: ALUOpAnd, Val: 42}, + ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + ALUOpConstant{Op: ALUOpMod, Val: 42}, + ALUOpConstant{Op: ALUOpXor, Val: 42}, + + ALUOpX{Op: ALUOpAdd}, + ALUOpX{Op: ALUOpSub}, + ALUOpX{Op: ALUOpMul}, + ALUOpX{Op: ALUOpDiv}, + ALUOpX{Op: ALUOpOr}, + ALUOpX{Op: ALUOpAnd}, + ALUOpX{Op: ALUOpShiftLeft}, + ALUOpX{Op: ALUOpShiftRight}, + ALUOpX{Op: ALUOpMod}, + ALUOpX{Op: ALUOpXor}, + + NegateA{}, + + Jump{Skip: 10}, + JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + + TAX{}, + TXA{}, + + RetA{}, + RetConstant{Val: 42}, +} +var allInstructionsExpected = "testdata/all_instructions.bpf" + +// Check that we produce the same output as the canonical bpf_asm +// linux kernel tool. +func TestInterop(t *testing.T) { + out, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(out)) + + bs, err := ioutil.ReadFile(allInstructionsExpected) + if err != nil { + t.Fatalf("reading %s: %s", allInstructionsExpected, err) + } + // First statement is the number of statements, last statement is + // empty. We just ignore both and rely on slice length. + stmts := strings.Split(string(bs), ",") + if len(stmts)-2 != len(out) { + t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions)) + } + + for i, stmt := range stmts[1 : len(stmts)-2] { + nums := strings.Split(stmt, " ") + if len(nums) != 4 { + t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt) + } + + actual := out[i] + + op, err := strconv.ParseUint(nums[0], 10, 16) + if err != nil { + t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected) + } + if actual.Op != uint16(op) { + t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op) + } + + jt, err := strconv.ParseUint(nums[1], 10, 8) + if err != nil { + t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected) + } + if actual.Jt != uint8(jt) { + t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt) + } + + jf, err := strconv.ParseUint(nums[2], 10, 8) + if err != nil { + t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected) + } + if actual.Jf != uint8(jf) { + t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf) + } + + k, err := strconv.ParseUint(nums[3], 10, 32) + if err != nil { + t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected) + } + if actual.K != uint32(k) { + t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k) + } + } +} + +// Check that assembly and disassembly match each other. +func TestAsmDisasm(t *testing.T) { + prog1, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(prog1)) + + got, allDecoded := Disassemble(prog1) + if !allDecoded { + t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:") + for i, inst := range got { + if r, ok := inst.(RawInstruction); ok { + t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r) + } + } + } + + if len(allInstructions) != len(got) { + t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(allInstructions), len(got)) + } + if !reflect.DeepEqual(allInstructions, got) { + t.Errorf("program mutated by disassembly:") + for i := range got { + if !reflect.DeepEqual(allInstructions[i], got[i]) { + t.Logf(" insn %d, s: %#v, p1: %#v, got: %#v", i+1, allInstructions[i], prog1[i], got[i]) + } + } + } +} + +type InvalidInstruction struct{} + +func (a InvalidInstruction) Assemble() (RawInstruction, error) { + return RawInstruction{}, fmt.Errorf("Invalid Instruction") +} + +func (a InvalidInstruction) String() string { + return fmt.Sprintf("unknown instruction: %#v", a) +} + +func TestString(t *testing.T) { + testCases := []struct { + instruction Instruction + assembler string + }{ + { + instruction: LoadConstant{Dst: RegA, Val: 42}, + assembler: "ld #42", + }, + { + instruction: LoadConstant{Dst: RegX, Val: 42}, + assembler: "ldx #42", + }, + { + instruction: LoadConstant{Dst: 0xffff, Val: 42}, + assembler: "unknown instruction: bpf.LoadConstant{Dst:0xffff, Val:0x2a}", + }, + { + instruction: LoadScratch{Dst: RegA, N: 3}, + assembler: "ld M[3]", + }, + { + instruction: LoadScratch{Dst: RegX, N: 3}, + assembler: "ldx M[3]", + }, + { + instruction: LoadScratch{Dst: 0xffff, N: 3}, + assembler: "unknown instruction: bpf.LoadScratch{Dst:0xffff, N:3}", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 1}, + assembler: "ldb [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 2}, + assembler: "ldh [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 4}, + assembler: "ld [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: -1}, + assembler: "unknown instruction: bpf.LoadAbsolute{Off:0x2a, Size:-1}", + }, + { + instruction: LoadIndirect{Off: 42, Size: 1}, + assembler: "ldb [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: 2}, + assembler: "ldh [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: 4}, + assembler: "ld [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: -1}, + assembler: "unknown instruction: bpf.LoadIndirect{Off:0x2a, Size:-1}", + }, + { + instruction: LoadMemShift{Off: 42}, + assembler: "ldx 4*([42]&0xf)", + }, + { + instruction: LoadExtension{Num: ExtLen}, + assembler: "ld #len", + }, + { + instruction: LoadExtension{Num: ExtProto}, + assembler: "ld #proto", + }, + { + instruction: LoadExtension{Num: ExtType}, + assembler: "ld #type", + }, + { + instruction: LoadExtension{Num: ExtPayloadOffset}, + assembler: "ld #poff", + }, + { + instruction: LoadExtension{Num: ExtInterfaceIndex}, + assembler: "ld #ifidx", + }, + { + instruction: LoadExtension{Num: ExtNetlinkAttr}, + assembler: "ld #nla", + }, + { + instruction: LoadExtension{Num: ExtNetlinkAttrNested}, + assembler: "ld #nlan", + }, + { + instruction: LoadExtension{Num: ExtMark}, + assembler: "ld #mark", + }, + { + instruction: LoadExtension{Num: ExtQueue}, + assembler: "ld #queue", + }, + { + instruction: LoadExtension{Num: ExtLinkLayerType}, + assembler: "ld #hatype", + }, + { + instruction: LoadExtension{Num: ExtRXHash}, + assembler: "ld #rxhash", + }, + { + instruction: LoadExtension{Num: ExtCPUID}, + assembler: "ld #cpu", + }, + { + instruction: LoadExtension{Num: ExtVLANTag}, + assembler: "ld #vlan_tci", + }, + { + instruction: LoadExtension{Num: ExtVLANTagPresent}, + assembler: "ld #vlan_avail", + }, + { + instruction: LoadExtension{Num: ExtVLANProto}, + assembler: "ld #vlan_tpid", + }, + { + instruction: LoadExtension{Num: ExtRand}, + assembler: "ld #rand", + }, + { + instruction: LoadAbsolute{Off: 0xfffff038, Size: 4}, + assembler: "ld #rand", + }, + { + instruction: LoadExtension{Num: 0xfff}, + assembler: "unknown instruction: bpf.LoadExtension{Num:4095}", + }, + { + instruction: StoreScratch{Src: RegA, N: 3}, + assembler: "st M[3]", + }, + { + instruction: StoreScratch{Src: RegX, N: 3}, + assembler: "stx M[3]", + }, + { + instruction: StoreScratch{Src: 0xffff, N: 3}, + assembler: "unknown instruction: bpf.StoreScratch{Src:0xffff, N:3}", + }, + { + instruction: ALUOpConstant{Op: ALUOpAdd, Val: 42}, + assembler: "add #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpSub, Val: 42}, + assembler: "sub #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpMul, Val: 42}, + assembler: "mul #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpDiv, Val: 42}, + assembler: "div #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpOr, Val: 42}, + assembler: "or #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpAnd, Val: 42}, + assembler: "and #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + assembler: "lsh #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + assembler: "rsh #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpMod, Val: 42}, + assembler: "mod #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpXor, Val: 42}, + assembler: "xor #42", + }, + { + instruction: ALUOpConstant{Op: 0xffff, Val: 42}, + assembler: "unknown instruction: bpf.ALUOpConstant{Op:0xffff, Val:0x2a}", + }, + { + instruction: ALUOpX{Op: ALUOpAdd}, + assembler: "add x", + }, + { + instruction: ALUOpX{Op: ALUOpSub}, + assembler: "sub x", + }, + { + instruction: ALUOpX{Op: ALUOpMul}, + assembler: "mul x", + }, + { + instruction: ALUOpX{Op: ALUOpDiv}, + assembler: "div x", + }, + { + instruction: ALUOpX{Op: ALUOpOr}, + assembler: "or x", + }, + { + instruction: ALUOpX{Op: ALUOpAnd}, + assembler: "and x", + }, + { + instruction: ALUOpX{Op: ALUOpShiftLeft}, + assembler: "lsh x", + }, + { + instruction: ALUOpX{Op: ALUOpShiftRight}, + assembler: "rsh x", + }, + { + instruction: ALUOpX{Op: ALUOpMod}, + assembler: "mod x", + }, + { + instruction: ALUOpX{Op: ALUOpXor}, + assembler: "xor x", + }, + { + instruction: ALUOpX{Op: 0xffff}, + assembler: "unknown instruction: bpf.ALUOpX{Op:0xffff}", + }, + { + instruction: NegateA{}, + assembler: "neg", + }, + { + instruction: Jump{Skip: 10}, + assembler: "ja 10", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + assembler: "jeq #42,8,9", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8}, + assembler: "jeq #42,8", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipFalse: 8}, + assembler: "jneq #42,8", + }, + { + instruction: JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + assembler: "jneq #42,8", + }, + { + instruction: JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + assembler: "jlt #42,7", + }, + { + instruction: JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + assembler: "jle #42,6", + }, + { + instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + assembler: "jgt #42,4,5", + }, + { + instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4}, + assembler: "jgt #42,4", + }, + { + instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + assembler: "jge #42,3,4", + }, + { + instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3}, + assembler: "jge #42,3", + }, + { + instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + assembler: "jset #42,2,3", + }, + { + instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2}, + assembler: "jset #42,2", + }, + { + instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + assembler: "jset #42,3,2", + }, + { + instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2}, + assembler: "jset #42,0,2", + }, + { + instruction: JumpIf{Cond: 0xffff, Val: 42, SkipTrue: 1, SkipFalse: 2}, + assembler: "unknown instruction: bpf.JumpIf{Cond:0xffff, Val:0x2a, SkipTrue:0x1, SkipFalse:0x2}", + }, + { + instruction: TAX{}, + assembler: "tax", + }, + { + instruction: TXA{}, + assembler: "txa", + }, + { + instruction: RetA{}, + assembler: "ret a", + }, + { + instruction: RetConstant{Val: 42}, + assembler: "ret #42", + }, + // Invalid instruction + { + instruction: InvalidInstruction{}, + assembler: "unknown instruction: bpf.InvalidInstruction{}", + }, + } + + for _, testCase := range testCases { + if input, ok := testCase.instruction.(fmt.Stringer); ok { + got := input.String() + if got != testCase.assembler { + t.Errorf("String did not return expected assembler notation, expected: %s, got: %s", testCase.assembler, got) + } + } else { + t.Errorf("Instruction %#v is not a fmt.Stringer", testCase.instruction) + } + } +} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 0000000..43e35f0 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf new file mode 100644 index 0000000..f871440 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf @@ -0,0 +1 @@ +50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0, diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt new file mode 100644 index 0000000..3045501 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt @@ -0,0 +1,79 @@ +# This filter is compiled to all_instructions.bpf by the `bpf_asm` +# tool, which can be found in the linux kernel source tree under +# tools/net. + +# Load immediate +ld #42 +ldx #42 + +# Load scratch +ld M[3] +ldx M[3] + +# Load absolute +ldb [42] +ldh [42] +ld [42] + +# Load indirect +ldb [x + 42] +ldh [x + 42] +ld [x + 42] + +# Load IPv4 header length +ldx 4*([42]&0xf) + +# Run extension function +ld #len +ld #proto +ld #type +ld #rand + +# Store scratch +st M[3] +stx M[3] + +# A constant +add #42 +sub #42 +mul #42 +div #42 +or #42 +and #42 +lsh #42 +rsh #42 +mod #42 +xor #42 + +# A X +add x +sub x +mul x +div x +or x +and x +lsh x +rsh x +mod x +xor x + +# !A +neg + +# Jumps +ja end +jeq #42,prev,end +jne #42,end +jlt #42,end +jle #42,end +jgt #42,prev,end +jge #42,prev,end +jset #42,prev,end + +# Register transfers +tax +txa + +# Returns +prev: ret a +end: ret #42 diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 0000000..4c656f1 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,140 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_aluop_test.go b/vendor/golang.org/x/net/bpf/vm_aluop_test.go new file mode 100644 index 0000000..1667824 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_aluop_test.go @@ -0,0 +1,512 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMALUOpAdd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 3, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 8, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpSub(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + bpf.ALUOpX{ + Op: bpf.ALUOpSub, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMul(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMul, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 6, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDiv(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 20, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpDivByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1/0 + bpf.ALUOpX{ + Op: bpf.ALUOpDiv, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpOr(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpOr, + Val: 0x01, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x10, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, + 0x09, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 9, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpAnd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAnd, + Val: 0x0019, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xaa, 0x09, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftLeft(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftLeft, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0xaa, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftRight(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftRight, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x04, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x08, 0xff, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMod(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 20, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 30, 0, 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpModByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpModByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1%0 + bpf.ALUOpX{ + Op: bpf.ALUOpMod, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpXor(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpXor, + Val: 0x0a, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x01, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x00, 0x00, 0x00, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpUnknown(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 1, + }, + // Verify that an unknown operation is a no-op + bpf.ALUOpConstant{ + Op: 100, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/golang.org/x/net/bpf/vm_bpf_test.go new file mode 100644 index 0000000..77fa8fe --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_bpf_test.go @@ -0,0 +1,192 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +// A virtualMachine is a BPF virtual machine which can process an +// input packet against a BPF program and render a verdict. +type virtualMachine interface { + Run(in []byte) (int, error) +} + +// canUseOSVM indicates if the OS BPF VM is available on this platform. +func canUseOSVM() bool { + // OS BPF VM can only be used on platforms where x/net/ipv4 supports + // attaching a BPF program to a socket. + switch runtime.GOOS { + case "linux": + return true + } + + return false +} + +// All BPF tests against both the Go VM and OS VM are assumed to +// be used with a UDP socket. As a result, the entire contents +// of a UDP datagram is sent through the BPF program, but only +// the body after the UDP header will ever be returned in output. + +// testVM sets up a Go BPF VM, and if available, a native OS BPF VM +// for integration testing. +func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) { + goVM, err := bpf.NewVM(filter) + if err != nil { + // Some tests expect an error, so this error must be returned + // instead of fatally exiting the test + return nil, nil, err + } + + mvm := &multiVirtualMachine{ + goVM: goVM, + + t: t, + } + + // If available, add the OS VM for tests which verify that both the Go + // VM and OS VM have exactly the same output for the same input program + // and packet. + done := func() {} + if canUseOSVM() { + osVM, osVMDone := testOSVM(t, filter) + done = func() { osVMDone() } + mvm.osVM = osVM + } + + return mvm, done, nil +} + +// udpHeaderLen is the length of a UDP header. +const udpHeaderLen = 8 + +// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM +// and the native OS VM, if the OS VM is available. +type multiVirtualMachine struct { + goVM virtualMachine + osVM virtualMachine + + t *testing.T +} + +func (mvm *multiVirtualMachine) Run(in []byte) (int, error) { + if len(in) < udpHeaderLen { + mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d", + udpHeaderLen, len(in)) + } + + // All tests have a UDP header as part of input, because the OS VM + // packets always will. For the Go VM, this output is trimmed before + // being sent back to tests. + goOut, goErr := mvm.goVM.Run(in) + if goOut >= udpHeaderLen { + goOut -= udpHeaderLen + } + + // If Go output is larger than the size of the packet, packet filtering + // interop tests must trim the output bytes to the length of the packet. + // The BPF VM should not do this on its own, as other uses of it do + // not trim the output byte count. + trim := len(in) - udpHeaderLen + if goOut > trim { + goOut = trim + } + + // When the OS VM is not available, process using the Go VM alone + if mvm.osVM == nil { + return goOut, goErr + } + + // The OS VM will apply its own UDP header, so remove the pseudo header + // that the Go VM needs. + osOut, err := mvm.osVM.Run(in[udpHeaderLen:]) + if err != nil { + mvm.t.Fatalf("error while running OS VM: %v", err) + } + + // Verify both VMs return same number of bytes + var mismatch bool + if goOut != osOut { + mismatch = true + mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut) + } + + if mismatch { + mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match") + } + + return goOut, goErr +} + +// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for +// processing BPF programs. +type osVirtualMachine struct { + l net.PacketConn + s net.Conn +} + +// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting +// packets into a UDP listener with a BPF program attached to it. +func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to open OS VM UDP listener: %v", err) + } + + prog, err := bpf.Assemble(filter) + if err != nil { + t.Fatalf("failed to compile BPF program: %v", err) + } + + p := ipv4.NewPacketConn(l) + if err = p.SetBPF(prog); err != nil { + t.Fatalf("failed to attach BPF program to listener: %v", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to dial connection to listener: %v", err) + } + + done := func() { + _ = s.Close() + _ = l.Close() + } + + return &osVirtualMachine{ + l: l, + s: s, + }, done +} + +// Run sends the input bytes into the OS's BPF VM and returns its verdict. +func (vm *osVirtualMachine) Run(in []byte) (int, error) { + go func() { + _, _ = vm.s.Write(in) + }() + + vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond)) + + var b [512]byte + n, _, err := vm.l.ReadFrom(b[:]) + if err != nil { + // A timeout indicates that BPF filtered out the packet, and thus, + // no input should be returned. + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + return n, nil + } + + return n, err + } + + return n, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_extension_test.go b/vendor/golang.org/x/net/bpf/vm_extension_test.go new file mode 100644 index 0000000..7a48c82 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_extension_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMLoadExtensionNotImplemented(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: 100, + }, + bpf.RetA{}, + }) + if errStr(err) != "extension 100 not implemented" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadExtensionExtLen(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: bpf.ExtLen, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 0000000..516f946 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,174 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, value uint32) int { + var ok bool + inV := uint32(ins.Val) + + switch ins.Cond { + case JumpEqual: + ok = value == inV + case JumpNotEqual: + ok = value != inV + case JumpGreaterThan: + ok = value > inV + case JumpLessThan: + ok = value < inV + case JumpGreaterOrEqual: + ok = value >= inV + case JumpLessOrEqual: + ok = value <= inV + case JumpBitsSet: + ok = (value & inV) != 0 + case JumpBitsNotSet: + ok = (value & inV) == 0 + } + + if ok { + return int(ins.SkipTrue) + } + + return int(ins.SkipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + if !inBounds(len(in), offset, 0) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/vendor/golang.org/x/net/bpf/vm_jump_test.go b/vendor/golang.org/x/net/bpf/vm_jump_test.go new file mode 100644 index 0000000..e0a3a98 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_jump_test.go @@ -0,0 +1,380 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMJumpOne(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.Jump{ + Skip: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.Jump{ + Skip: 1, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfTrueOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipTrue: 2, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfFalseOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipFalse: 3, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 1, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfNotEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpNotEqual, + Val: 1, + SkipFalse: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: 0x00010202, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessThan, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterOrEqual, + Val: 0x00010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessOrEqual, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsSet, + Val: 0x1122, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsNotSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsNotSet, + Val: 0x1221, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_load_test.go b/vendor/golang.org/x/net/bpf/vm_load_test.go new file mode 100644 index 0000000..04578b6 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_load_test.go @@ -0,0 +1,246 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "testing" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 100, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Size: 5, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid load byte length 0" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadConstantOK(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegX, + Val: 9, + }, + bpf.TXA{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadIndirectOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadIndirect{ + Off: 100, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadMemShift{ + Off: 100, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +const ( + dhcp4Port = 53 +) + +func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with incorrect DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port + 1, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with correct DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := len(in)-8, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) { + // DHCPv4 test data courtesy of David Anderson: + // https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70 + vm, done, err := testVM(t, []bpf.Instruction{ + // Load IPv4 packet length + bpf.LoadMemShift{Off: 8}, + // Get UDP dport + bpf.LoadIndirect{Off: 8 + 2, Size: 2}, + // Correct dport? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1}, + // Accept + bpf.RetConstant{Val: 1500}, + // Ignore + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + + // Minimal requirements to make a valid IPv4 header + h := &ipv4.Header{ + Len: ipv4.HeaderLen, + Src: net.IPv4(192, 168, 1, 1), + Dst: net.IPv4(192, 168, 1, 2), + } + hb, err := h.Marshal() + if err != nil { + t.Fatalf("failed to marshal IPv4 header: %v", err) + } + + hb = append([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, hb...) + + return vm, hb, done +} diff --git a/vendor/golang.org/x/net/bpf/vm_ret_test.go b/vendor/golang.org/x/net/bpf/vm_ret_test.go new file mode 100644 index 0000000..2d86eae --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_ret_test.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMRetA(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 9, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetALargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 255, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstant(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstantLargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 16, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_scratch_test.go b/vendor/golang.org/x/net/bpf/vm_scratch_test.go new file mode 100644 index 0000000..e600e3c --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_scratch_test.go @@ -0,0 +1,247 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchUnknownSourceRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid source register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid target register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchLoadScratchOneValue(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 255 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Copy to X and store in scratch[0] + bpf.TAX{}, + bpf.StoreScratch{ + Src: bpf.RegX, + N: 0, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Overwrite 1 with 255 from scratch[0] + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 0, + }, + // Return 255 + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 255, 1, 2, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 10 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Store in scratch[0] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 0, + }, + // Load byte 20 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Store in scratch[1] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 1, + }, + // Load byte 30 + bpf.LoadAbsolute{ + Off: 10, + Size: 1, + }, + // Store in scratch[2] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 2, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 11, + Size: 1, + }, + // Store in scratch[3] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 3, + }, + // Load in byte 10 to X + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 0, + }, + // Copy X -> A + bpf.TXA{}, + // Verify value is 10 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 10, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 20 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 1, + }, + // Verify value is 20 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 20, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 30 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 2, + }, + // Verify value is 30 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 30, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Return first two bytes on success + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 10, 20, 30, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_test.go b/vendor/golang.org/x/net/bpf/vm_test.go new file mode 100644 index 0000000..6bd4dd5 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_test.go @@ -0,0 +1,144 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "fmt" + "testing" + + "golang.org/x/net/bpf" +) + +var _ bpf.Instruction = unknown{} + +type unknown struct{} + +func (unknown) Assemble() (bpf.RawInstruction, error) { + return bpf.RawInstruction{}, nil +} + +func TestVMUnknownInstruction(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 100, + }, + // Should terminate the program with an error immediately + unknown{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer done() + + _, err = vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, + }) + if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" { + t.Fatalf("unexpected error while running program: %v", err) + } +} + +func TestVMNoReturnInstruction(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 1, + }, + }) + if errStr(err) != "BPF program must end with RetA or RetConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMNoInputInstructions(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{}) + if errStr(err) != "one or more Instructions must be specified" { + t.Fatalf("unexpected error: %v", err) + } +} + +// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame +// as input and checking its EtherType to determine if it should be accepted. +func ExampleNewVM() { + // Offset | Length | Comment + // ------------------------- + // 00 | 06 | Ethernet destination MAC address + // 06 | 06 | Ethernet source MAC address + // 12 | 02 | Ethernet EtherType + const ( + etOff = 12 + etLen = 2 + + etARP = 0x0806 + ) + + // Set up a VM to filter traffic based on if its EtherType + // matches the ARP EtherType. + vm, err := bpf.NewVM([]bpf.Instruction{ + // Load EtherType value from Ethernet header + bpf.LoadAbsolute{ + Off: etOff, + Size: etLen, + }, + // If EtherType is equal to the ARP EtherType, jump to allow + // packet to be accepted + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: etARP, + SkipTrue: 1, + }, + // EtherType does not match the ARP EtherType + bpf.RetConstant{ + Val: 0, + }, + // EtherType matches the ARP EtherType, accept up to 1500 + // bytes of packet + bpf.RetConstant{ + Val: 1500, + }, + }) + if err != nil { + panic(fmt.Sprintf("failed to load BPF program: %v", err)) + } + + // Create an Ethernet frame with the ARP EtherType for testing + frame := []byte{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, + 0x08, 0x06, + // Payload omitted for brevity + } + + // Run our VM's BPF program using the Ethernet frame as input + out, err := vm.Run(frame) + if err != nil { + panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err)) + } + + // BPF VM can return a byte count greater than the number of input + // bytes, so trim the output to match the input byte length + if out > len(frame) { + out = len(frame) + } + + fmt.Printf("out: %d bytes", out) + + // Output: + // out: 14 bytes +} + +// errStr returns the string representation of an error, or +// "" if it is nil. +func errStr(err error) string { + if err == nil { + return "" + } + + return err.Error() +} diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg new file mode 100644 index 0000000..3f8b14b --- /dev/null +++ b/vendor/golang.org/x/net/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000..a3c021d --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 0000000..6284413 --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,583 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) + testDeadline(c, 2*timeUnit, t) +} + +func TestTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 1*timeUnit) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o = otherContext{c} + c, _ = WithTimeout(o, 3*timeUnit) + testDeadline(c, 2*timeUnit, t) +} + +func TestCanceledTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 2*timeUnit) + o := otherContext{c} + c, cancel := WithTimeout(o, 4*timeUnit) + cancel() + time.Sleep(1 * timeUnit) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 16, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 0000000..606cf1f --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go new file mode 100644 index 0000000..72411b1 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,go1.7 + +package ctxhttp + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "context" +) + +func TestGo17Context(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + })) + defer ts.Close() + ctx := context.Background() + resp, err := Get(ctx, http.DefaultClient, ts.URL) + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } + resp.Body.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 0000000..926870c --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go new file mode 100644 index 0000000..9159cf0 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!go1.7 + +package ctxhttp + +import ( + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go new file mode 100644 index 0000000..1e41551 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package ctxhttp + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func okHandler(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + io.WriteString(w, requestBody) +} + +func TestNoTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(okHandler)) + defer ts.Close() + + ctx := context.Background() + res, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != requestBody { + t.Errorf("body = %q; want %q", slurp, requestBody) + } +} + +func TestCancelBeforeHeaders(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + blockServer := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cancel() + <-blockServer + io.WriteString(w, requestBody) + })) + defer ts.Close() + defer close(blockServer) + + res, err := Get(ctx, nil, ts.URL) + if err == nil { + res.Body.Close() + t.Fatal("Get returned unexpected nil error") + } + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + })) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000..d20f52b --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000..d88bd1d --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000..0f35592 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000..b105f80 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 0000000..e6f5669 --- /dev/null +++ b/vendor/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "golang.org/x/net/context" +) + +// This example passes a context with a timeout to tell a blocking function that +// it should abandon its work after the timeout elapses. +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + select { + case <-time.After(1 * time.Second): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + + // Output: + // context deadline exceeded +} diff --git a/vendor/golang.org/x/net/dict/dict.go b/vendor/golang.org/x/net/dict/dict.go new file mode 100644 index 0000000..93e65c0 --- /dev/null +++ b/vendor/golang.org/x/net/dict/dict.go @@ -0,0 +1,210 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dict implements the Dictionary Server Protocol +// as defined in RFC 2229. +package dict // import "golang.org/x/net/dict" + +import ( + "net/textproto" + "strconv" + "strings" +) + +// A Client represents a client connection to a dictionary server. +type Client struct { + text *textproto.Conn +} + +// Dial returns a new client connected to a dictionary server at +// addr on the given network. +func Dial(network, addr string) (*Client, error) { + text, err := textproto.Dial(network, addr) + if err != nil { + return nil, err + } + _, _, err = text.ReadCodeLine(220) + if err != nil { + text.Close() + return nil, err + } + return &Client{text: text}, nil +} + +// Close closes the connection to the dictionary server. +func (c *Client) Close() error { + return c.text.Close() +} + +// A Dict represents a dictionary available on the server. +type Dict struct { + Name string // short name of dictionary + Desc string // long description +} + +// Dicts returns a list of the dictionaries available on the server. +func (c *Client) Dicts() ([]Dict, error) { + id, err := c.text.Cmd("SHOW DB") + if err != nil { + return nil, err + } + + c.text.StartResponse(id) + defer c.text.EndResponse(id) + + _, _, err = c.text.ReadCodeLine(110) + if err != nil { + return nil, err + } + lines, err := c.text.ReadDotLines() + if err != nil { + return nil, err + } + _, _, err = c.text.ReadCodeLine(250) + + dicts := make([]Dict, len(lines)) + for i := range dicts { + d := &dicts[i] + a, _ := fields(lines[i]) + if len(a) < 2 { + return nil, textproto.ProtocolError("invalid dictionary: " + lines[i]) + } + d.Name = a[0] + d.Desc = a[1] + } + return dicts, err +} + +// A Defn represents a definition. +type Defn struct { + Dict Dict // Dict where definition was found + Word string // Word being defined + Text []byte // Definition text, typically multiple lines +} + +// Define requests the definition of the given word. +// The argument dict names the dictionary to use, +// the Name field of a Dict returned by Dicts. +// +// The special dictionary name "*" means to look in all the +// server's dictionaries. +// The special dictionary name "!" means to look in all the +// server's dictionaries in turn, stopping after finding the word +// in one of them. +func (c *Client) Define(dict, word string) ([]*Defn, error) { + id, err := c.text.Cmd("DEFINE %s %q", dict, word) + if err != nil { + return nil, err + } + + c.text.StartResponse(id) + defer c.text.EndResponse(id) + + _, line, err := c.text.ReadCodeLine(150) + if err != nil { + return nil, err + } + a, _ := fields(line) + if len(a) < 1 { + return nil, textproto.ProtocolError("malformed response: " + line) + } + n, err := strconv.Atoi(a[0]) + if err != nil { + return nil, textproto.ProtocolError("invalid definition count: " + a[0]) + } + def := make([]*Defn, n) + for i := 0; i < n; i++ { + _, line, err = c.text.ReadCodeLine(151) + if err != nil { + return nil, err + } + a, _ := fields(line) + if len(a) < 3 { + // skip it, to keep protocol in sync + i-- + n-- + def = def[0:n] + continue + } + d := &Defn{Word: a[0], Dict: Dict{a[1], a[2]}} + d.Text, err = c.text.ReadDotBytes() + if err != nil { + return nil, err + } + def[i] = d + } + _, _, err = c.text.ReadCodeLine(250) + return def, err +} + +// Fields returns the fields in s. +// Fields are space separated unquoted words +// or quoted with single or double quote. +func fields(s string) ([]string, error) { + var v []string + i := 0 + for { + for i < len(s) && (s[i] == ' ' || s[i] == '\t') { + i++ + } + if i >= len(s) { + break + } + if s[i] == '"' || s[i] == '\'' { + q := s[i] + // quoted string + var j int + for j = i + 1; ; j++ { + if j >= len(s) { + return nil, textproto.ProtocolError("malformed quoted string") + } + if s[j] == '\\' { + j++ + continue + } + if s[j] == q { + j++ + break + } + } + v = append(v, unquote(s[i+1:j-1])) + i = j + } else { + // atom + var j int + for j = i; j < len(s); j++ { + if s[j] == ' ' || s[j] == '\t' || s[j] == '\\' || s[j] == '"' || s[j] == '\'' { + break + } + } + v = append(v, s[i:j]) + i = j + } + if i < len(s) { + c := s[i] + if c != ' ' && c != '\t' { + return nil, textproto.ProtocolError("quotes not on word boundaries") + } + } + } + return v, nil +} + +func unquote(s string) string { + if strings.Index(s, "\\") < 0 { + return s + } + b := []byte(s) + w := 0 + for r := 0; r < len(b); r++ { + c := b[r] + if c == '\\' { + r++ + c = b[r] + } + b[w] = c + w++ + } + return string(b[0:w]) +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go new file mode 100644 index 0000000..8600a6b --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go @@ -0,0 +1,132 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dnsmessage_test + +import ( + "fmt" + "net" + "strings" + + "golang.org/x/net/dns/dnsmessage" +) + +func mustNewName(name string) dnsmessage.Name { + n, err := dnsmessage.NewName(name) + if err != nil { + panic(err) + } + return n +} + +func ExampleParser() { + msg := dnsmessage.Message{ + Header: dnsmessage.Header{Response: true, Authoritative: true}, + Questions: []dnsmessage.Question{ + { + Name: mustNewName("foo.bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + { + Name: mustNewName("bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + Answers: []dnsmessage.Resource{ + { + Header: dnsmessage.ResourceHeader{ + Name: mustNewName("foo.bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 1}}, + }, + { + Header: dnsmessage.ResourceHeader{ + Name: mustNewName("bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 2}}, + }, + }, + } + + buf, err := msg.Pack() + if err != nil { + panic(err) + } + + wantName := "bar.example.com." + + var p dnsmessage.Parser + if _, err := p.Start(buf); err != nil { + panic(err) + } + + for { + q, err := p.Question() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if q.Name.String() != wantName { + continue + } + + fmt.Println("Found question for name", wantName) + if err := p.SkipAllQuestions(); err != nil { + panic(err) + } + break + } + + var gotIPs []net.IP + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if (h.Type != dnsmessage.TypeA && h.Type != dnsmessage.TypeAAAA) || h.Class != dnsmessage.ClassINET { + continue + } + + if !strings.EqualFold(h.Name.String(), wantName) { + if err := p.SkipAnswer(); err != nil { + panic(err) + } + continue + } + + switch h.Type { + case dnsmessage.TypeA: + r, err := p.AResource() + if err != nil { + panic(err) + } + gotIPs = append(gotIPs, r.A[:]) + case dnsmessage.TypeAAAA: + r, err := p.AAAAResource() + if err != nil { + panic(err) + } + gotIPs = append(gotIPs, r.AAAA[:]) + } + } + + fmt.Printf("Found A/AAAA records for name %s: %v\n", wantName, gotIPs) + + // Output: + // Found question for name bar.example.com. + // Found A/AAAA records for name bar.example.com.: [127.0.0.2] +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go new file mode 100644 index 0000000..d8d3b03 --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -0,0 +1,2103 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dnsmessage provides a mostly RFC 1035 compliant implementation of +// DNS message packing and unpacking. +// +// This implementation is designed to minimize heap allocations and avoid +// unnecessary packing and unpacking as much as possible. +package dnsmessage + +import ( + "errors" +) + +// Message formats + +// A Type is a type of DNS request and response. +type Type uint16 + +// A Class is a type of network. +type Class uint16 + +// An OpCode is a DNS operation code. +type OpCode uint16 + +// An RCode is a DNS response status code. +type RCode uint16 + +// Wire constants. +const ( + // ResourceHeader.Type and Question.Type + TypeA Type = 1 + TypeNS Type = 2 + TypeCNAME Type = 5 + TypeSOA Type = 6 + TypePTR Type = 12 + TypeMX Type = 15 + TypeTXT Type = 16 + TypeAAAA Type = 28 + TypeSRV Type = 33 + + // Question.Type + TypeWKS Type = 11 + TypeHINFO Type = 13 + TypeMINFO Type = 14 + TypeAXFR Type = 252 + TypeALL Type = 255 + + // ResourceHeader.Class and Question.Class + ClassINET Class = 1 + ClassCSNET Class = 2 + ClassCHAOS Class = 3 + ClassHESIOD Class = 4 + + // Question.Class + ClassANY Class = 255 + + // Message.Rcode + RCodeSuccess RCode = 0 + RCodeFormatError RCode = 1 + RCodeServerFailure RCode = 2 + RCodeNameError RCode = 3 + RCodeNotImplemented RCode = 4 + RCodeRefused RCode = 5 +) + +var ( + // ErrNotStarted indicates that the prerequisite information isn't + // available yet because the previous records haven't been appropriately + // parsed, skipped or finished. + ErrNotStarted = errors.New("parsing/packing of this type isn't available yet") + + // ErrSectionDone indicated that all records in the section have been + // parsed or finished. + ErrSectionDone = errors.New("parsing/packing of this section has completed") + + errBaseLen = errors.New("insufficient data for base length type") + errCalcLen = errors.New("insufficient data for calculated length type") + errReserved = errors.New("segment prefix is reserved") + errTooManyPtr = errors.New("too many pointers (>10)") + errInvalidPtr = errors.New("invalid pointer") + errNilResouceBody = errors.New("nil resource body") + errResourceLen = errors.New("insufficient data for resource body length") + errSegTooLong = errors.New("segment length too long") + errZeroSegLen = errors.New("zero length segment") + errResTooLong = errors.New("resource length too long") + errTooManyQuestions = errors.New("too many Questions to pack (>65535)") + errTooManyAnswers = errors.New("too many Answers to pack (>65535)") + errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)") + errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") + errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)") + errStringTooLong = errors.New("character string exceeds maximum length (255)") + errCompressedSRV = errors.New("compressed name in SRV resource data") +) + +// Internal constants. +const ( + // packStartingCap is the default initial buffer size allocated during + // packing. + // + // The starting capacity doesn't matter too much, but most DNS responses + // Will be <= 512 bytes as it is the limit for DNS over UDP. + packStartingCap = 512 + + // uint16Len is the length (in bytes) of a uint16. + uint16Len = 2 + + // uint32Len is the length (in bytes) of a uint32. + uint32Len = 4 + + // headerLen is the length (in bytes) of a DNS header. + // + // A header is comprised of 6 uint16s and no padding. + headerLen = 6 * uint16Len +) + +type nestedError struct { + // s is the current level's error message. + s string + + // err is the nested error. + err error +} + +// nestedError implements error.Error. +func (e *nestedError) Error() string { + return e.s + ": " + e.err.Error() +} + +// Header is a representation of a DNS message header. +type Header struct { + ID uint16 + Response bool + OpCode OpCode + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + RCode RCode +} + +func (m *Header) pack() (id uint16, bits uint16) { + id = m.ID + bits = uint16(m.OpCode)<<11 | uint16(m.RCode) + if m.RecursionAvailable { + bits |= headerBitRA + } + if m.RecursionDesired { + bits |= headerBitRD + } + if m.Truncated { + bits |= headerBitTC + } + if m.Authoritative { + bits |= headerBitAA + } + if m.Response { + bits |= headerBitQR + } + return +} + +// Message is a representation of a DNS message. +type Message struct { + Header + Questions []Question + Answers []Resource + Authorities []Resource + Additionals []Resource +} + +type section uint8 + +const ( + sectionNotStarted section = iota + sectionHeader + sectionQuestions + sectionAnswers + sectionAuthorities + sectionAdditionals + sectionDone + + headerBitQR = 1 << 15 // query/response (response=1) + headerBitAA = 1 << 10 // authoritative + headerBitTC = 1 << 9 // truncated + headerBitRD = 1 << 8 // recursion desired + headerBitRA = 1 << 7 // recursion available +) + +var sectionNames = map[section]string{ + sectionHeader: "header", + sectionQuestions: "Question", + sectionAnswers: "Answer", + sectionAuthorities: "Authority", + sectionAdditionals: "Additional", +} + +// header is the wire format for a DNS message header. +type header struct { + id uint16 + bits uint16 + questions uint16 + answers uint16 + authorities uint16 + additionals uint16 +} + +func (h *header) count(sec section) uint16 { + switch sec { + case sectionQuestions: + return h.questions + case sectionAnswers: + return h.answers + case sectionAuthorities: + return h.authorities + case sectionAdditionals: + return h.additionals + } + return 0 +} + +// pack appends the wire format of the header to msg. +func (h *header) pack(msg []byte) []byte { + msg = packUint16(msg, h.id) + msg = packUint16(msg, h.bits) + msg = packUint16(msg, h.questions) + msg = packUint16(msg, h.answers) + msg = packUint16(msg, h.authorities) + return packUint16(msg, h.additionals) +} + +func (h *header) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if h.id, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"id", err} + } + if h.bits, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"bits", err} + } + if h.questions, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"questions", err} + } + if h.answers, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"answers", err} + } + if h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"authorities", err} + } + if h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"additionals", err} + } + return newOff, nil +} + +func (h *header) header() Header { + return Header{ + ID: h.id, + Response: (h.bits & headerBitQR) != 0, + OpCode: OpCode(h.bits>>11) & 0xF, + Authoritative: (h.bits & headerBitAA) != 0, + Truncated: (h.bits & headerBitTC) != 0, + RecursionDesired: (h.bits & headerBitRD) != 0, + RecursionAvailable: (h.bits & headerBitRA) != 0, + RCode: RCode(h.bits & 0xF), + } +} + +// A Resource is a DNS resource record. +type Resource struct { + Header ResourceHeader + Body ResourceBody +} + +// A ResourceBody is a DNS resource record minus the header. +type ResourceBody interface { + // pack packs a Resource except for its header. + pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) + + // realType returns the actual type of the Resource. This is used to + // fill in the header Type field. + realType() Type +} + +// pack appends the wire format of the Resource to msg. +func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + if r.Body == nil { + return msg, errNilResouceBody + } + oldMsg := msg + r.Header.Type = r.Body.realType() + msg, length, err := r.Header.pack(msg, compression, compressionOff) + if err != nil { + return msg, &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + msg, err = r.Body.pack(msg, compression, compressionOff) + if err != nil { + return msg, &nestedError{"content", err} + } + if err := r.Header.fixLen(msg, length, preLen); err != nil { + return oldMsg, err + } + return msg, nil +} + +// A Parser allows incrementally parsing a DNS message. +// +// When parsing is started, the Header is parsed. Next, each Question can be +// either parsed or skipped. Alternatively, all Questions can be skipped at +// once. When all Questions have been parsed, attempting to parse Questions +// will return (nil, nil) and attempting to skip Questions will return +// (true, nil). After all Questions have been either parsed or skipped, all +// Answers, Authorities and Additionals can be either parsed or skipped in the +// same way, and each type of Resource must be fully parsed or skipped before +// proceeding to the next type of Resource. +// +// Note that there is no requirement to fully skip or parse the message. +type Parser struct { + msg []byte + header header + + section section + off int + index int + resHeaderValid bool + resHeader ResourceHeader +} + +// Start parses the header and enables the parsing of Questions. +func (p *Parser) Start(msg []byte) (Header, error) { + if p.msg != nil { + *p = Parser{} + } + p.msg = msg + var err error + if p.off, err = p.header.unpack(msg, 0); err != nil { + return Header{}, &nestedError{"unpacking header", err} + } + p.section = sectionQuestions + return p.header.header(), nil +} + +func (p *Parser) checkAdvance(sec section) error { + if p.section < sec { + return ErrNotStarted + } + if p.section > sec { + return ErrSectionDone + } + p.resHeaderValid = false + if p.index == int(p.header.count(sec)) { + p.index = 0 + p.section++ + return ErrSectionDone + } + return nil +} + +func (p *Parser) resource(sec section) (Resource, error) { + var r Resource + var err error + r.Header, err = p.resourceHeader(sec) + if err != nil { + return r, err + } + p.resHeaderValid = false + r.Body, p.off, err = unpackResourceBody(p.msg, p.off, r.Header) + if err != nil { + return Resource{}, &nestedError{"unpacking " + sectionNames[sec], err} + } + p.index++ + return r, nil +} + +func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) { + if p.resHeaderValid { + return p.resHeader, nil + } + if err := p.checkAdvance(sec); err != nil { + return ResourceHeader{}, err + } + var hdr ResourceHeader + off, err := hdr.unpack(p.msg, p.off) + if err != nil { + return ResourceHeader{}, err + } + p.resHeaderValid = true + p.resHeader = hdr + p.off = off + return hdr, nil +} + +func (p *Parser) skipResource(sec section) error { + if p.resHeaderValid { + newOff := p.off + int(p.resHeader.Length) + if newOff > len(p.msg) { + return errResourceLen + } + p.off = newOff + p.resHeaderValid = false + p.index++ + return nil + } + if err := p.checkAdvance(sec); err != nil { + return err + } + var err error + p.off, err = skipResource(p.msg, p.off) + if err != nil { + return &nestedError{"skipping: " + sectionNames[sec], err} + } + p.index++ + return nil +} + +// Question parses a single Question. +func (p *Parser) Question() (Question, error) { + if err := p.checkAdvance(sectionQuestions); err != nil { + return Question{}, err + } + var name Name + off, err := name.unpack(p.msg, p.off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Name", err} + } + typ, off, err := unpackType(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Type", err} + } + class, off, err := unpackClass(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Class", err} + } + p.off = off + p.index++ + return Question{name, typ, class}, nil +} + +// AllQuestions parses all Questions. +func (p *Parser) AllQuestions() ([]Question, error) { + // Multiple questions are valid according to the spec, + // but servers don't actually support them. There will + // be at most one question here. + // + // Do not pre-allocate based on info in p.header, since + // the data is untrusted. + qs := []Question{} + for { + q, err := p.Question() + if err == ErrSectionDone { + return qs, nil + } + if err != nil { + return nil, err + } + qs = append(qs, q) + } +} + +// SkipQuestion skips a single Question. +func (p *Parser) SkipQuestion() error { + if err := p.checkAdvance(sectionQuestions); err != nil { + return err + } + off, err := skipName(p.msg, p.off) + if err != nil { + return &nestedError{"skipping Question Name", err} + } + if off, err = skipType(p.msg, off); err != nil { + return &nestedError{"skipping Question Type", err} + } + if off, err = skipClass(p.msg, off); err != nil { + return &nestedError{"skipping Question Class", err} + } + p.off = off + p.index++ + return nil +} + +// SkipAllQuestions skips all Questions. +func (p *Parser) SkipAllQuestions() error { + for { + if err := p.SkipQuestion(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AnswerHeader parses a single Answer ResourceHeader. +func (p *Parser) AnswerHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAnswers) +} + +// Answer parses a single Answer Resource. +func (p *Parser) Answer() (Resource, error) { + return p.resource(sectionAnswers) +} + +// AllAnswers parses all Answer Resources. +func (p *Parser) AllAnswers() ([]Resource, error) { + // The most common query is for A/AAAA, which usually returns + // a handful of IPs. + // + // Pre-allocate up to a certain limit, since p.header is + // untrusted data. + n := int(p.header.answers) + if n > 20 { + n = 20 + } + as := make([]Resource, 0, n) + for { + a, err := p.Answer() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAnswer skips a single Answer Resource. +func (p *Parser) SkipAnswer() error { + return p.skipResource(sectionAnswers) +} + +// SkipAllAnswers skips all Answer Resources. +func (p *Parser) SkipAllAnswers() error { + for { + if err := p.SkipAnswer(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AuthorityHeader parses a single Authority ResourceHeader. +func (p *Parser) AuthorityHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAuthorities) +} + +// Authority parses a single Authority Resource. +func (p *Parser) Authority() (Resource, error) { + return p.resource(sectionAuthorities) +} + +// AllAuthorities parses all Authority Resources. +func (p *Parser) AllAuthorities() ([]Resource, error) { + // Authorities contains SOA in case of NXDOMAIN and friends, + // otherwise it is empty. + // + // Pre-allocate up to a certain limit, since p.header is + // untrusted data. + n := int(p.header.authorities) + if n > 10 { + n = 10 + } + as := make([]Resource, 0, n) + for { + a, err := p.Authority() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAuthority skips a single Authority Resource. +func (p *Parser) SkipAuthority() error { + return p.skipResource(sectionAuthorities) +} + +// SkipAllAuthorities skips all Authority Resources. +func (p *Parser) SkipAllAuthorities() error { + for { + if err := p.SkipAuthority(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AdditionalHeader parses a single Additional ResourceHeader. +func (p *Parser) AdditionalHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAdditionals) +} + +// Additional parses a single Additional Resource. +func (p *Parser) Additional() (Resource, error) { + return p.resource(sectionAdditionals) +} + +// AllAdditionals parses all Additional Resources. +func (p *Parser) AllAdditionals() ([]Resource, error) { + // Additionals usually contain OPT, and sometimes A/AAAA + // glue records. + // + // Pre-allocate up to a certain limit, since p.header is + // untrusted data. + n := int(p.header.additionals) + if n > 10 { + n = 10 + } + as := make([]Resource, 0, n) + for { + a, err := p.Additional() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAdditional skips a single Additional Resource. +func (p *Parser) SkipAdditional() error { + return p.skipResource(sectionAdditionals) +} + +// SkipAllAdditionals skips all Additional Resources. +func (p *Parser) SkipAllAdditionals() error { + for { + if err := p.SkipAdditional(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// CNAMEResource parses a single CNAMEResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) CNAMEResource() (CNAMEResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeCNAME { + return CNAMEResource{}, ErrNotStarted + } + r, err := unpackCNAMEResource(p.msg, p.off) + if err != nil { + return CNAMEResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// MXResource parses a single MXResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) MXResource() (MXResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeMX { + return MXResource{}, ErrNotStarted + } + r, err := unpackMXResource(p.msg, p.off) + if err != nil { + return MXResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// NSResource parses a single NSResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) NSResource() (NSResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeNS { + return NSResource{}, ErrNotStarted + } + r, err := unpackNSResource(p.msg, p.off) + if err != nil { + return NSResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// PTRResource parses a single PTRResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) PTRResource() (PTRResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypePTR { + return PTRResource{}, ErrNotStarted + } + r, err := unpackPTRResource(p.msg, p.off) + if err != nil { + return PTRResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// SOAResource parses a single SOAResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) SOAResource() (SOAResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeSOA { + return SOAResource{}, ErrNotStarted + } + r, err := unpackSOAResource(p.msg, p.off) + if err != nil { + return SOAResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// TXTResource parses a single TXTResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) TXTResource() (TXTResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeTXT { + return TXTResource{}, ErrNotStarted + } + r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length) + if err != nil { + return TXTResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// SRVResource parses a single SRVResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) SRVResource() (SRVResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeSRV { + return SRVResource{}, ErrNotStarted + } + r, err := unpackSRVResource(p.msg, p.off) + if err != nil { + return SRVResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// AResource parses a single AResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) AResource() (AResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeA { + return AResource{}, ErrNotStarted + } + r, err := unpackAResource(p.msg, p.off) + if err != nil { + return AResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// AAAAResource parses a single AAAAResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) AAAAResource() (AAAAResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeAAAA { + return AAAAResource{}, ErrNotStarted + } + r, err := unpackAAAAResource(p.msg, p.off) + if err != nil { + return AAAAResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// Unpack parses a full Message. +func (m *Message) Unpack(msg []byte) error { + var p Parser + var err error + if m.Header, err = p.Start(msg); err != nil { + return err + } + if m.Questions, err = p.AllQuestions(); err != nil { + return err + } + if m.Answers, err = p.AllAnswers(); err != nil { + return err + } + if m.Authorities, err = p.AllAuthorities(); err != nil { + return err + } + if m.Additionals, err = p.AllAdditionals(); err != nil { + return err + } + return nil +} + +// Pack packs a full Message. +func (m *Message) Pack() ([]byte, error) { + return m.AppendPack(make([]byte, 0, packStartingCap)) +} + +// AppendPack is like Pack but appends the full Message to b and returns the +// extended buffer. +func (m *Message) AppendPack(b []byte) ([]byte, error) { + // Validate the lengths. It is very unlikely that anyone will try to + // pack more than 65535 of any particular type, but it is possible and + // we should fail gracefully. + if len(m.Questions) > int(^uint16(0)) { + return nil, errTooManyQuestions + } + if len(m.Answers) > int(^uint16(0)) { + return nil, errTooManyAnswers + } + if len(m.Authorities) > int(^uint16(0)) { + return nil, errTooManyAuthorities + } + if len(m.Additionals) > int(^uint16(0)) { + return nil, errTooManyAdditionals + } + + var h header + h.id, h.bits = m.Header.pack() + + h.questions = uint16(len(m.Questions)) + h.answers = uint16(len(m.Answers)) + h.authorities = uint16(len(m.Authorities)) + h.additionals = uint16(len(m.Additionals)) + + compressionOff := len(b) + msg := h.pack(b) + + // RFC 1035 allows (but does not require) compression for packing. RFC + // 1035 requires unpacking implementations to support compression, so + // unconditionally enabling it is fine. + // + // DNS lookups are typically done over UDP, and RFC 1035 states that UDP + // DNS messages can be a maximum of 512 bytes long. Without compression, + // many DNS response messages are over this limit, so enabling + // compression will help ensure compliance. + compression := map[string]int{} + + for i := range m.Questions { + var err error + if msg, err = m.Questions[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Question", err} + } + } + for i := range m.Answers { + var err error + if msg, err = m.Answers[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Answer", err} + } + } + for i := range m.Authorities { + var err error + if msg, err = m.Authorities[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Authority", err} + } + } + for i := range m.Additionals { + var err error + if msg, err = m.Additionals[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Additional", err} + } + } + + return msg, nil +} + +// A Builder allows incrementally packing a DNS message. +// +// Example usage: +// buf := make([]byte, 2, 514) +// b := NewBuilder(buf, Header{...}) +// b.EnableCompression() +// // Optionally start a section and add things to that section. +// // Repeat adding sections as necessary. +// buf, err := b.Finish() +// // If err is nil, buf[2:] will contain the built bytes. +type Builder struct { + // msg is the storage for the message being built. + msg []byte + + // section keeps track of the current section being built. + section section + + // header keeps track of what should go in the header when Finish is + // called. + header header + + // start is the starting index of the bytes allocated in msg for header. + start int + + // compression is a mapping from name suffixes to their starting index + // in msg. + compression map[string]int +} + +// NewBuilder creates a new builder with compression disabled. +// +// Note: Most users will want to immediately enable compression with the +// EnableCompression method. See that method's comment for why you may or may +// not want to enable compression. +// +// The DNS message is appended to the provided initial buffer buf (which may be +// nil) as it is built. The final message is returned by the (*Builder).Finish +// method, which may return the same underlying array if there was sufficient +// capacity in the slice. +func NewBuilder(buf []byte, h Header) Builder { + if buf == nil { + buf = make([]byte, 0, packStartingCap) + } + b := Builder{msg: buf, start: len(buf)} + b.header.id, b.header.bits = h.pack() + var hb [headerLen]byte + b.msg = append(b.msg, hb[:]...) + b.section = sectionHeader + return b +} + +// EnableCompression enables compression in the Builder. +// +// Leaving compression disabled avoids compression related allocations, but can +// result in larger message sizes. Be careful with this mode as it can cause +// messages to exceed the UDP size limit. +// +// According to RFC 1035, section 4.1.4, the use of compression is optional, but +// all implementations must accept both compressed and uncompressed DNS +// messages. +// +// Compression should be enabled before any sections are added for best results. +func (b *Builder) EnableCompression() { + b.compression = map[string]int{} +} + +func (b *Builder) startCheck(s section) error { + if b.section <= sectionNotStarted { + return ErrNotStarted + } + if b.section > s { + return ErrSectionDone + } + return nil +} + +// StartQuestions prepares the builder for packing Questions. +func (b *Builder) StartQuestions() error { + if err := b.startCheck(sectionQuestions); err != nil { + return err + } + b.section = sectionQuestions + return nil +} + +// StartAnswers prepares the builder for packing Answers. +func (b *Builder) StartAnswers() error { + if err := b.startCheck(sectionAnswers); err != nil { + return err + } + b.section = sectionAnswers + return nil +} + +// StartAuthorities prepares the builder for packing Authorities. +func (b *Builder) StartAuthorities() error { + if err := b.startCheck(sectionAuthorities); err != nil { + return err + } + b.section = sectionAuthorities + return nil +} + +// StartAdditionals prepares the builder for packing Additionals. +func (b *Builder) StartAdditionals() error { + if err := b.startCheck(sectionAdditionals); err != nil { + return err + } + b.section = sectionAdditionals + return nil +} + +func (b *Builder) incrementSectionCount() error { + var count *uint16 + var err error + switch b.section { + case sectionQuestions: + count = &b.header.questions + err = errTooManyQuestions + case sectionAnswers: + count = &b.header.answers + err = errTooManyAnswers + case sectionAuthorities: + count = &b.header.authorities + err = errTooManyAuthorities + case sectionAdditionals: + count = &b.header.additionals + err = errTooManyAdditionals + } + if *count == ^uint16(0) { + return err + } + *count++ + return nil +} + +// Question adds a single Question. +func (b *Builder) Question(q Question) error { + if b.section < sectionQuestions { + return ErrNotStarted + } + if b.section > sectionQuestions { + return ErrSectionDone + } + msg, err := q.pack(b.msg, b.compression, b.start) + if err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +func (b *Builder) checkResourceSection() error { + if b.section < sectionAnswers { + return ErrNotStarted + } + if b.section > sectionAdditionals { + return ErrSectionDone + } + return nil +} + +// CNAMEResource adds a single CNAMEResource. +func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"CNAMEResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// MXResource adds a single MXResource. +func (b *Builder) MXResource(h ResourceHeader, r MXResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"MXResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// NSResource adds a single NSResource. +func (b *Builder) NSResource(h ResourceHeader, r NSResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"NSResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// PTRResource adds a single PTRResource. +func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"PTRResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// SOAResource adds a single SOAResource. +func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"SOAResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// TXTResource adds a single TXTResource. +func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"TXTResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// SRVResource adds a single SRVResource. +func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"SRVResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// AResource adds a single AResource. +func (b *Builder) AResource(h ResourceHeader, r AResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"AResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// AAAAResource adds a single AAAAResource. +func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"AAAAResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// Finish ends message building and generates a binary message. +func (b *Builder) Finish() ([]byte, error) { + if b.section < sectionHeader { + return nil, ErrNotStarted + } + b.section = sectionDone + // Space for the header was allocated in NewBuilder. + b.header.pack(b.msg[b.start:b.start]) + return b.msg, nil +} + +// A ResourceHeader is the header of a DNS resource record. There are +// many types of DNS resource records, but they all share the same header. +type ResourceHeader struct { + // Name is the domain name for which this resource record pertains. + Name Name + + // Type is the type of DNS resource record. + // + // This field will be set automatically during packing. + Type Type + + // Class is the class of network to which this DNS resource record + // pertains. + Class Class + + // TTL is the length of time (measured in seconds) which this resource + // record is valid for (time to live). All Resources in a set should + // have the same TTL (RFC 2181 Section 5.2). + TTL uint32 + + // Length is the length of data in the resource record after the header. + // + // This field will be set automatically during packing. + Length uint16 +} + +// pack appends the wire format of the ResourceHeader to oldMsg. +// +// The bytes where length was packed are returned as a slice so they can be +// updated after the rest of the Resource has been packed. +func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, length []byte, err error) { + msg = oldMsg + if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil { + return oldMsg, nil, &nestedError{"Name", err} + } + msg = packType(msg, h.Type) + msg = packClass(msg, h.Class) + msg = packUint32(msg, h.TTL) + lenBegin := len(msg) + msg = packUint16(msg, h.Length) + return msg, msg[lenBegin : lenBegin+uint16Len], nil +} + +func (h *ResourceHeader) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if newOff, err = h.Name.unpack(msg, newOff); err != nil { + return off, &nestedError{"Name", err} + } + if h.Type, newOff, err = unpackType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if h.Class, newOff, err = unpackClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + if h.Length, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"Length", err} + } + return newOff, nil +} + +func (h *ResourceHeader) fixLen(msg []byte, length []byte, preLen int) error { + conLen := len(msg) - preLen + if conLen > int(^uint16(0)) { + return errResTooLong + } + + // Fill in the length now that we know how long the content is. + packUint16(length[:0], uint16(conLen)) + h.Length = uint16(conLen) + + return nil +} + +func skipResource(msg []byte, off int) (int, error) { + newOff, err := skipName(msg, off) + if err != nil { + return off, &nestedError{"Name", err} + } + if newOff, err = skipType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if newOff, err = skipClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if newOff, err = skipUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + length, newOff, err := unpackUint16(msg, newOff) + if err != nil { + return off, &nestedError{"Length", err} + } + if newOff += int(length); newOff > len(msg) { + return off, errResourceLen + } + return newOff, nil +} + +// packUint16 appends the wire format of field to msg. +func packUint16(msg []byte, field uint16) []byte { + return append(msg, byte(field>>8), byte(field)) +} + +func unpackUint16(msg []byte, off int) (uint16, int, error) { + if off+uint16Len > len(msg) { + return 0, off, errBaseLen + } + return uint16(msg[off])<<8 | uint16(msg[off+1]), off + uint16Len, nil +} + +func skipUint16(msg []byte, off int) (int, error) { + if off+uint16Len > len(msg) { + return off, errBaseLen + } + return off + uint16Len, nil +} + +// packType appends the wire format of field to msg. +func packType(msg []byte, field Type) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackType(msg []byte, off int) (Type, int, error) { + t, o, err := unpackUint16(msg, off) + return Type(t), o, err +} + +func skipType(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +// packClass appends the wire format of field to msg. +func packClass(msg []byte, field Class) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackClass(msg []byte, off int) (Class, int, error) { + c, o, err := unpackUint16(msg, off) + return Class(c), o, err +} + +func skipClass(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +// packUint32 appends the wire format of field to msg. +func packUint32(msg []byte, field uint32) []byte { + return append( + msg, + byte(field>>24), + byte(field>>16), + byte(field>>8), + byte(field), + ) +} + +func unpackUint32(msg []byte, off int) (uint32, int, error) { + if off+uint32Len > len(msg) { + return 0, off, errBaseLen + } + v := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3]) + return v, off + uint32Len, nil +} + +func skipUint32(msg []byte, off int) (int, error) { + if off+uint32Len > len(msg) { + return off, errBaseLen + } + return off + uint32Len, nil +} + +// packText appends the wire format of field to msg. +func packText(msg []byte, field string) ([]byte, error) { + l := len(field) + if l > 255 { + return nil, errStringTooLong + } + msg = append(msg, byte(l)) + msg = append(msg, field...) + + return msg, nil +} + +func unpackText(msg []byte, off int) (string, int, error) { + if off >= len(msg) { + return "", off, errBaseLen + } + beginOff := off + 1 + endOff := beginOff + int(msg[off]) + if endOff > len(msg) { + return "", off, errCalcLen + } + return string(msg[beginOff:endOff]), endOff, nil +} + +func skipText(msg []byte, off int) (int, error) { + if off >= len(msg) { + return off, errBaseLen + } + endOff := off + 1 + int(msg[off]) + if endOff > len(msg) { + return off, errCalcLen + } + return endOff, nil +} + +// packBytes appends the wire format of field to msg. +func packBytes(msg []byte, field []byte) []byte { + return append(msg, field...) +} + +func unpackBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + copy(field, msg[off:newOff]) + return newOff, nil +} + +func skipBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + return newOff, nil +} + +const nameLen = 255 + +// A Name is a non-encoded domain name. It is used instead of strings to avoid +// allocations. +type Name struct { + Data [nameLen]byte + Length uint8 +} + +// NewName creates a new Name from a string. +func NewName(name string) (Name, error) { + if len([]byte(name)) > nameLen { + return Name{}, errCalcLen + } + n := Name{Length: uint8(len(name))} + copy(n.Data[:], []byte(name)) + return n, nil +} + +func (n Name) String() string { + return string(n.Data[:n.Length]) +} + +// pack appends the wire format of the Name to msg. +// +// Domain names are a sequence of counted strings split at the dots. They end +// with a zero-length string. Compression can be used to reuse domain suffixes. +// +// The compression map will be updated with new domain suffixes. If compression +// is nil, compression will not be used. +func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + + // Add a trailing dot to canonicalize name. + if n.Length == 0 || n.Data[n.Length-1] != '.' { + return oldMsg, errNonCanonicalName + } + + // Allow root domain. + if n.Data[0] == '.' && n.Length == 1 { + return append(msg, 0), nil + } + + // Emit sequence of counted strings, chopping at dots. + for i, begin := 0, 0; i < int(n.Length); i++ { + // Check for the end of the segment. + if n.Data[i] == '.' { + // The two most significant bits have special meaning. + // It isn't allowed for segments to be long enough to + // need them. + if i-begin >= 1<<6 { + return oldMsg, errSegTooLong + } + + // Segments must have a non-zero length. + if i-begin == 0 { + return oldMsg, errZeroSegLen + } + + msg = append(msg, byte(i-begin)) + + for j := begin; j < i; j++ { + msg = append(msg, n.Data[j]) + } + + begin = i + 1 + continue + } + + // We can only compress domain suffixes starting with a new + // segment. A pointer is two bytes with the two most significant + // bits set to 1 to indicate that it is a pointer. + if (i == 0 || n.Data[i-1] == '.') && compression != nil { + if ptr, ok := compression[string(n.Data[i:])]; ok { + // Hit. Emit a pointer instead of the rest of + // the domain. + return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil + } + + // Miss. Add the suffix to the compression table if the + // offset can be stored in the available 14 bytes. + if len(msg) <= int(^uint16(0)>>2) { + compression[string(n.Data[i:])] = len(msg) - compressionOff + } + } + } + return append(msg, 0), nil +} + +// unpack unpacks a domain name. +func (n *Name) unpack(msg []byte, off int) (int, error) { + return n.unpackCompressed(msg, off, true /* allowCompression */) +} + +func (n *Name) unpackCompressed(msg []byte, off int, allowCompression bool) (int, error) { + // currOff is the current working offset. + currOff := off + + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + + // ptr is the number of pointers followed. + var ptr int + + // Name is a slice representation of the name data. + name := n.Data[:0] + +Loop: + for { + if currOff >= len(msg) { + return off, errBaseLen + } + c := int(msg[currOff]) + currOff++ + switch c & 0xC0 { + case 0x00: // String segment + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + endOff := currOff + c + if endOff > len(msg) { + return off, errCalcLen + } + name = append(name, msg[currOff:endOff]...) + name = append(name, '.') + currOff = endOff + case 0xC0: // Pointer + if !allowCompression { + return off, errCompressedSRV + } + if currOff >= len(msg) { + return off, errInvalidPtr + } + c1 := msg[currOff] + currOff++ + if ptr == 0 { + newOff = currOff + } + // Don't follow too many pointers, maybe there's a loop. + if ptr++; ptr > 10 { + return off, errTooManyPtr + } + currOff = (c^0xC0)<<8 | int(c1) + default: + // Prefixes 0x80 and 0x40 are reserved. + return off, errReserved + } + } + if len(name) == 0 { + name = append(name, '.') + } + if len(name) > len(n.Data) { + return off, errCalcLen + } + n.Length = uint8(len(name)) + if ptr == 0 { + newOff = currOff + } + return newOff, nil +} + +func skipName(msg []byte, off int) (int, error) { + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + +Loop: + for { + if newOff >= len(msg) { + return off, errBaseLen + } + c := int(msg[newOff]) + newOff++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + // literal string + newOff += c + if newOff > len(msg) { + return off, errCalcLen + } + case 0xC0: + // Pointer to somewhere else in msg. + + // Pointers are two bytes. + newOff++ + + // Don't follow the pointer as the data here has ended. + break Loop + default: + // Prefixes 0x80 and 0x40 are reserved. + return off, errReserved + } + } + + return newOff, nil +} + +// A Question is a DNS query. +type Question struct { + Name Name + Type Type + Class Class +} + +// pack appends the wire format of the Question to msg. +func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + msg, err := q.Name.pack(msg, compression, compressionOff) + if err != nil { + return msg, &nestedError{"Name", err} + } + msg = packType(msg, q.Type) + return packClass(msg, q.Class), nil +} + +func unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, int, error) { + var ( + r ResourceBody + err error + name string + ) + switch hdr.Type { + case TypeA: + var rb AResource + rb, err = unpackAResource(msg, off) + r = &rb + name = "A" + case TypeNS: + var rb NSResource + rb, err = unpackNSResource(msg, off) + r = &rb + name = "NS" + case TypeCNAME: + var rb CNAMEResource + rb, err = unpackCNAMEResource(msg, off) + r = &rb + name = "CNAME" + case TypeSOA: + var rb SOAResource + rb, err = unpackSOAResource(msg, off) + r = &rb + name = "SOA" + case TypePTR: + var rb PTRResource + rb, err = unpackPTRResource(msg, off) + r = &rb + name = "PTR" + case TypeMX: + var rb MXResource + rb, err = unpackMXResource(msg, off) + r = &rb + name = "MX" + case TypeTXT: + var rb TXTResource + rb, err = unpackTXTResource(msg, off, hdr.Length) + r = &rb + name = "TXT" + case TypeAAAA: + var rb AAAAResource + rb, err = unpackAAAAResource(msg, off) + r = &rb + name = "AAAA" + case TypeSRV: + var rb SRVResource + rb, err = unpackSRVResource(msg, off) + r = &rb + name = "SRV" + } + if err != nil { + return nil, off, &nestedError{name + " record", err} + } + if r == nil { + return nil, off, errors.New("invalid resource type: " + string(hdr.Type+'0')) + } + return r, off + int(hdr.Length), nil +} + +// A CNAMEResource is a CNAME Resource record. +type CNAMEResource struct { + CNAME Name +} + +func (r *CNAMEResource) realType() Type { + return TypeCNAME +} + +// pack appends the wire format of the CNAMEResource to msg. +func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.CNAME.pack(msg, compression, compressionOff) +} + +func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) { + var cname Name + if _, err := cname.unpack(msg, off); err != nil { + return CNAMEResource{}, err + } + return CNAMEResource{cname}, nil +} + +// An MXResource is an MX Resource record. +type MXResource struct { + Pref uint16 + MX Name +} + +func (r *MXResource) realType() Type { + return TypeMX +} + +// pack appends the wire format of the MXResource to msg. +func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Pref) + msg, err := r.MX.pack(msg, compression, compressionOff) + if err != nil { + return oldMsg, &nestedError{"MXResource.MX", err} + } + return msg, nil +} + +func unpackMXResource(msg []byte, off int) (MXResource, error) { + pref, off, err := unpackUint16(msg, off) + if err != nil { + return MXResource{}, &nestedError{"Pref", err} + } + var mx Name + if _, err := mx.unpack(msg, off); err != nil { + return MXResource{}, &nestedError{"MX", err} + } + return MXResource{pref, mx}, nil +} + +// An NSResource is an NS Resource record. +type NSResource struct { + NS Name +} + +func (r *NSResource) realType() Type { + return TypeNS +} + +// pack appends the wire format of the NSResource to msg. +func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.NS.pack(msg, compression, compressionOff) +} + +func unpackNSResource(msg []byte, off int) (NSResource, error) { + var ns Name + if _, err := ns.unpack(msg, off); err != nil { + return NSResource{}, err + } + return NSResource{ns}, nil +} + +// A PTRResource is a PTR Resource record. +type PTRResource struct { + PTR Name +} + +func (r *PTRResource) realType() Type { + return TypePTR +} + +// pack appends the wire format of the PTRResource to msg. +func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.PTR.pack(msg, compression, compressionOff) +} + +func unpackPTRResource(msg []byte, off int) (PTRResource, error) { + var ptr Name + if _, err := ptr.unpack(msg, off); err != nil { + return PTRResource{}, err + } + return PTRResource{ptr}, nil +} + +// An SOAResource is an SOA Resource record. +type SOAResource struct { + NS Name + MBox Name + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + + // MinTTL the is the default TTL of Resources records which did not + // contain a TTL value and the TTL of negative responses. (RFC 2308 + // Section 4) + MinTTL uint32 +} + +func (r *SOAResource) realType() Type { + return TypeSOA +} + +// pack appends the wire format of the SOAResource to msg. +func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + msg, err := r.NS.pack(msg, compression, compressionOff) + if err != nil { + return oldMsg, &nestedError{"SOAResource.NS", err} + } + msg, err = r.MBox.pack(msg, compression, compressionOff) + if err != nil { + return oldMsg, &nestedError{"SOAResource.MBox", err} + } + msg = packUint32(msg, r.Serial) + msg = packUint32(msg, r.Refresh) + msg = packUint32(msg, r.Retry) + msg = packUint32(msg, r.Expire) + return packUint32(msg, r.MinTTL), nil +} + +func unpackSOAResource(msg []byte, off int) (SOAResource, error) { + var ns Name + off, err := ns.unpack(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"NS", err} + } + var mbox Name + if off, err = mbox.unpack(msg, off); err != nil { + return SOAResource{}, &nestedError{"MBox", err} + } + serial, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Serial", err} + } + refresh, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Refresh", err} + } + retry, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Retry", err} + } + expire, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Expire", err} + } + minTTL, _, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"MinTTL", err} + } + return SOAResource{ns, mbox, serial, refresh, retry, expire, minTTL}, nil +} + +// A TXTResource is a TXT Resource record. +type TXTResource struct { + TXT []string +} + +func (r *TXTResource) realType() Type { + return TypeTXT +} + +// pack appends the wire format of the TXTResource to msg. +func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + for _, s := range r.TXT { + var err error + msg, err = packText(msg, s) + if err != nil { + return oldMsg, err + } + } + return msg, nil +} + +func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) { + txts := make([]string, 0, 1) + for n := uint16(0); n < length; { + var t string + var err error + if t, off, err = unpackText(msg, off); err != nil { + return TXTResource{}, &nestedError{"text", err} + } + // Check if we got too many bytes. + if length-n < uint16(len(t))+1 { + return TXTResource{}, errCalcLen + } + n += uint16(len(t)) + 1 + txts = append(txts, t) + } + return TXTResource{txts}, nil +} + +// An SRVResource is an SRV Resource record. +type SRVResource struct { + Priority uint16 + Weight uint16 + Port uint16 + Target Name // Not compressed as per RFC 2782. +} + +func (r *SRVResource) realType() Type { + return TypeSRV +} + +// pack appends the wire format of the SRVResource to msg. +func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Priority) + msg = packUint16(msg, r.Weight) + msg = packUint16(msg, r.Port) + msg, err := r.Target.pack(msg, nil, compressionOff) + if err != nil { + return oldMsg, &nestedError{"SRVResource.Target", err} + } + return msg, nil +} + +func unpackSRVResource(msg []byte, off int) (SRVResource, error) { + priority, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Priority", err} + } + weight, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Weight", err} + } + port, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Port", err} + } + var target Name + if _, err := target.unpackCompressed(msg, off, false /* allowCompression */); err != nil { + return SRVResource{}, &nestedError{"Target", err} + } + return SRVResource{priority, weight, port, target}, nil +} + +// An AResource is an A Resource record. +type AResource struct { + A [4]byte +} + +func (r *AResource) realType() Type { + return TypeA +} + +// pack appends the wire format of the AResource to msg. +func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return packBytes(msg, r.A[:]), nil +} + +func unpackAResource(msg []byte, off int) (AResource, error) { + var a [4]byte + if _, err := unpackBytes(msg, off, a[:]); err != nil { + return AResource{}, err + } + return AResource{a}, nil +} + +// An AAAAResource is an AAAA Resource record. +type AAAAResource struct { + AAAA [16]byte +} + +func (r *AAAAResource) realType() Type { + return TypeAAAA +} + +// pack appends the wire format of the AAAAResource to msg. +func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return packBytes(msg, r.AAAA[:]), nil +} + +func unpackAAAAResource(msg []byte, off int) (AAAAResource, error) { + var aaaa [16]byte + if _, err := unpackBytes(msg, off, aaaa[:]); err != nil { + return AAAAResource{}, err + } + return AAAAResource{aaaa}, nil +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go new file mode 100644 index 0000000..052897f --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -0,0 +1,1137 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dnsmessage + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "testing" +) + +func mustNewName(name string) Name { + n, err := NewName(name) + if err != nil { + panic(err) + } + return n +} + +func (m *Message) String() string { + s := fmt.Sprintf("Message: %#v\n", &m.Header) + if len(m.Questions) > 0 { + s += "-- Questions\n" + for _, q := range m.Questions { + s += fmt.Sprintf("%#v\n", q) + } + } + if len(m.Answers) > 0 { + s += "-- Answers\n" + for _, a := range m.Answers { + s += fmt.Sprintf("%#v\n", a) + } + } + if len(m.Authorities) > 0 { + s += "-- Authorities\n" + for _, ns := range m.Authorities { + s += fmt.Sprintf("%#v\n", ns) + } + } + if len(m.Additionals) > 0 { + s += "-- Additionals\n" + for _, e := range m.Additionals { + s += fmt.Sprintf("%#v\n", e) + } + } + return s +} + +func TestNameString(t *testing.T) { + want := "foo" + name := mustNewName(want) + if got := fmt.Sprint(name); got != want { + t.Errorf("got fmt.Sprint(%#v) = %s, want = %s", name, got, want) + } +} + +func TestQuestionPackUnpack(t *testing.T) { + want := Question{ + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + } + buf, err := want.pack(make([]byte, 1, 50), map[string]int{}, 1) + if err != nil { + t.Fatal("Packing failed:", err) + } + var p Parser + p.msg = buf + p.header.questions = 1 + p.section = sectionQuestions + p.off = 1 + got, err := p.Question() + if err != nil { + t.Fatalf("Unpacking failed: %v\n%s", err, string(buf[1:])) + } + if p.off != len(buf) { + t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", p.off, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Got = %+v, want = %+v", got, want) + } +} + +func TestName(t *testing.T) { + tests := []string{ + "", + ".", + "google..com", + "google.com", + "google..com.", + "google.com.", + ".google.com.", + "www..google.com.", + "www.google.com.", + } + + for _, test := range tests { + n, err := NewName(test) + if err != nil { + t.Errorf("Creating name for %q: %v", test, err) + continue + } + if ns := n.String(); ns != test { + t.Errorf("Got %#v.String() = %q, want = %q", n, ns, test) + continue + } + } +} + +func TestNamePackUnpack(t *testing.T) { + tests := []struct { + in string + want string + err error + }{ + {"", "", errNonCanonicalName}, + {".", ".", nil}, + {"google..com", "", errNonCanonicalName}, + {"google.com", "", errNonCanonicalName}, + {"google..com.", "", errZeroSegLen}, + {"google.com.", "google.com.", nil}, + {".google.com.", "", errZeroSegLen}, + {"www..google.com.", "", errZeroSegLen}, + {"www.google.com.", "www.google.com.", nil}, + } + + for _, test := range tests { + in := mustNewName(test.in) + want := mustNewName(test.want) + buf, err := in.pack(make([]byte, 0, 30), map[string]int{}, 0) + if err != test.err { + t.Errorf("Packing of %q: got err = %v, want err = %v", test.in, err, test.err) + continue + } + if test.err != nil { + continue + } + var got Name + n, err := got.unpack(buf, 0) + if err != nil { + t.Errorf("Unpacking for %q failed: %v", test.in, err) + continue + } + if n != len(buf) { + t.Errorf( + "Unpacked different amount than packed for %q: got n = %d, want = %d", + test.in, + n, + len(buf), + ) + } + if got != want { + t.Errorf("Unpacking packing of %q: got = %#v, want = %#v", test.in, got, want) + } + } +} + +func TestIncompressibleName(t *testing.T) { + name := mustNewName("example.com.") + compression := map[string]int{} + buf, err := name.pack(make([]byte, 0, 100), compression, 0) + if err != nil { + t.Fatal("First packing failed:", err) + } + buf, err = name.pack(buf, compression, 0) + if err != nil { + t.Fatal("Second packing failed:", err) + } + var n1 Name + off, err := n1.unpackCompressed(buf, 0, false /* allowCompression */) + if err != nil { + t.Fatal("Unpacking incompressible name without pointers failed:", err) + } + var n2 Name + if _, err := n2.unpackCompressed(buf, off, false /* allowCompression */); err != errCompressedSRV { + t.Errorf("Unpacking compressed incompressible name with pointers: got err = %v, want = %v", err, errCompressedSRV) + } +} + +func checkErrorPrefix(err error, prefix string) bool { + e, ok := err.(*nestedError) + return ok && e.s == prefix +} + +func TestHeaderUnpackError(t *testing.T) { + wants := []string{ + "id", + "bits", + "questions", + "answers", + "authorities", + "additionals", + } + var buf []byte + var h header + for _, want := range wants { + n, err := h.unpack(buf, 0) + if n != 0 || !checkErrorPrefix(err, want) { + t.Errorf("got h.unpack([%d]byte, 0) = %d, %v, want = 0, %s", len(buf), n, err, want) + } + buf = append(buf, 0, 0) + } +} + +func TestParserStart(t *testing.T) { + const want = "unpacking header" + var p Parser + for i := 0; i <= 1; i++ { + _, err := p.Start([]byte{}) + if !checkErrorPrefix(err, want) { + t.Errorf("got p.Start(nil) = _, %v, want = _, %s", err, want) + } + } +} + +func TestResourceNotStarted(t *testing.T) { + tests := []struct { + name string + fn func(*Parser) error + }{ + {"CNAMEResource", func(p *Parser) error { _, err := p.CNAMEResource(); return err }}, + {"MXResource", func(p *Parser) error { _, err := p.MXResource(); return err }}, + {"NSResource", func(p *Parser) error { _, err := p.NSResource(); return err }}, + {"PTRResource", func(p *Parser) error { _, err := p.PTRResource(); return err }}, + {"SOAResource", func(p *Parser) error { _, err := p.SOAResource(); return err }}, + {"TXTResource", func(p *Parser) error { _, err := p.TXTResource(); return err }}, + {"SRVResource", func(p *Parser) error { _, err := p.SRVResource(); return err }}, + {"AResource", func(p *Parser) error { _, err := p.AResource(); return err }}, + {"AAAAResource", func(p *Parser) error { _, err := p.AAAAResource(); return err }}, + } + + for _, test := range tests { + if err := test.fn(&Parser{}); err != ErrNotStarted { + t.Errorf("got _, %v = p.%s(), want = _, %v", err, test.name, ErrNotStarted) + } + } +} + +func TestDNSPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b, err := want.Pack() + if err != nil { + t.Fatalf("%d: packing failed: %v", i, err) + } + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: unpacking failed: %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got = %+v, want = %+v", i, &got, &want) + } + } +} + +func TestDNSAppendPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b := make([]byte, 2, 514) + b, err := want.AppendPack(b) + if err != nil { + t.Fatalf("%d: packing failed: %v", i, err) + } + b = b[2:] + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: unpacking failed: %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got = %+v, want = %+v", i, &got, &want) + } + } +} + +func TestSkipAll(t *testing.T) { + msg := largeTestMsg() + buf, err := msg.Pack() + if err != nil { + t.Fatal("Packing large test message:", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + for i := 1; i <= 3; i++ { + if err := test.f(); err != nil { + t.Errorf("Call #%d to %s(): %v", i, test.name, err) + } + } + } +} + +func TestSkipEach(t *testing.T) { + msg := smallTestMsg() + + buf, err := msg.Pack() + if err != nil { + t.Fatal("Packing test message:", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + f func() error + }{ + {"SkipQuestion", p.SkipQuestion}, + {"SkipAnswer", p.SkipAnswer}, + {"SkipAuthority", p.SkipAuthority}, + {"SkipAdditional", p.SkipAdditional}, + } + for _, test := range tests { + if err := test.f(); err != nil { + t.Errorf("First call: got %s() = %v, want = %v", test.name, err, nil) + } + if err := test.f(); err != ErrSectionDone { + t.Errorf("Second call: got %s() = %v, want = %v", test.name, err, ErrSectionDone) + } + } +} + +func TestSkipAfterRead(t *testing.T) { + msg := smallTestMsg() + + buf, err := msg.Pack() + if err != nil { + t.Fatal("Packing test message:", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + skip func() error + read func() error + }{ + {"Question", p.SkipQuestion, func() error { _, err := p.Question(); return err }}, + {"Answer", p.SkipAnswer, func() error { _, err := p.Answer(); return err }}, + {"Authority", p.SkipAuthority, func() error { _, err := p.Authority(); return err }}, + {"Additional", p.SkipAdditional, func() error { _, err := p.Additional(); return err }}, + } + for _, test := range tests { + if err := test.read(); err != nil { + t.Errorf("Got %s() = _, %v, want = _, %v", test.name, err, nil) + } + if err := test.skip(); err != ErrSectionDone { + t.Errorf("Got Skip%s() = %v, want = %v", test.name, err, ErrSectionDone) + } + } +} + +func TestSkipNotStarted(t *testing.T) { + var p Parser + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + if err := test.f(); err != ErrNotStarted { + t.Errorf("Got %s() = %v, want = %v", test.name, err, ErrNotStarted) + } + } +} + +func TestTooManyRecords(t *testing.T) { + const recs = int(^uint16(0)) + 1 + tests := []struct { + name string + msg Message + want error + }{ + { + "Questions", + Message{ + Questions: make([]Question, recs), + }, + errTooManyQuestions, + }, + { + "Answers", + Message{ + Answers: make([]Resource, recs), + }, + errTooManyAnswers, + }, + { + "Authorities", + Message{ + Authorities: make([]Resource, recs), + }, + errTooManyAuthorities, + }, + { + "Additionals", + Message{ + Additionals: make([]Resource, recs), + }, + errTooManyAdditionals, + }, + } + + for _, test := range tests { + if _, got := test.msg.Pack(); got != test.want { + t.Errorf("Packing %d %s: got = %v, want = %v", recs, test.name, got, test.want) + } + } +} + +func TestVeryLongTxt(t *testing.T) { + want := Resource{ + ResourceHeader{ + Name: mustNewName("foo.bar.example.com."), + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{[]string{ + "", + "", + "foo bar", + "", + "www.example.com", + "www.example.com.", + strings.Repeat(".", 255), + }}, + } + buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}, 0) + if err != nil { + t.Fatal("Packing failed:", err) + } + var got Resource + off, err := got.Header.unpack(buf, 0) + if err != nil { + t.Fatal("Unpacking ResourceHeader failed:", err) + } + body, n, err := unpackResourceBody(buf, off, got.Header) + if err != nil { + t.Fatal("Unpacking failed:", err) + } + got.Body = body + if n != len(buf) { + t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", n, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Got = %#v, want = %#v", got, want) + } +} + +func TestTooLongTxt(t *testing.T) { + rb := TXTResource{[]string{strings.Repeat(".", 256)}} + if _, err := rb.pack(make([]byte, 0, 8000), map[string]int{}, 0); err != errStringTooLong { + t.Errorf("Packing TXTRecord with 256 character string: got err = %v, want = %v", err, errStringTooLong) + } +} + +func TestStartAppends(t *testing.T) { + buf := make([]byte, 2, 514) + wantBuf := []byte{4, 44} + copy(buf, wantBuf) + + b := NewBuilder(buf, Header{}) + b.EnableCompression() + + buf, err := b.Finish() + if err != nil { + t.Fatal("Building failed:", err) + } + if got, want := len(buf), headerLen+2; got != want { + t.Errorf("Got len(buf} = %d, want = %d", got, want) + } + if string(buf[:2]) != string(wantBuf) { + t.Errorf("Original data not preserved, got = %v, want = %v", buf[:2], wantBuf) + } +} + +func TestStartError(t *testing.T) { + tests := []struct { + name string + fn func(*Builder) error + }{ + {"Questions", func(b *Builder) error { return b.StartQuestions() }}, + {"Answers", func(b *Builder) error { return b.StartAnswers() }}, + {"Authorities", func(b *Builder) error { return b.StartAuthorities() }}, + {"Additionals", func(b *Builder) error { return b.StartAdditionals() }}, + } + + envs := []struct { + name string + fn func() *Builder + want error + }{ + {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, + {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, + } + + for _, env := range envs { + for _, test := range tests { + if got := test.fn(env.fn()); got != env.want { + t.Errorf("got Builder{%s}.Start%s = %v, want = %v", env.name, test.name, got, env.want) + } + } + } +} + +func TestBuilderResourceError(t *testing.T) { + tests := []struct { + name string + fn func(*Builder) error + }{ + {"CNAMEResource", func(b *Builder) error { return b.CNAMEResource(ResourceHeader{}, CNAMEResource{}) }}, + {"MXResource", func(b *Builder) error { return b.MXResource(ResourceHeader{}, MXResource{}) }}, + {"NSResource", func(b *Builder) error { return b.NSResource(ResourceHeader{}, NSResource{}) }}, + {"PTRResource", func(b *Builder) error { return b.PTRResource(ResourceHeader{}, PTRResource{}) }}, + {"SOAResource", func(b *Builder) error { return b.SOAResource(ResourceHeader{}, SOAResource{}) }}, + {"TXTResource", func(b *Builder) error { return b.TXTResource(ResourceHeader{}, TXTResource{}) }}, + {"SRVResource", func(b *Builder) error { return b.SRVResource(ResourceHeader{}, SRVResource{}) }}, + {"AResource", func(b *Builder) error { return b.AResource(ResourceHeader{}, AResource{}) }}, + {"AAAAResource", func(b *Builder) error { return b.AAAAResource(ResourceHeader{}, AAAAResource{}) }}, + } + + envs := []struct { + name string + fn func() *Builder + want error + }{ + {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, + {"sectionHeader", func() *Builder { return &Builder{section: sectionHeader} }, ErrNotStarted}, + {"sectionQuestions", func() *Builder { return &Builder{section: sectionQuestions} }, ErrNotStarted}, + {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, + } + + for _, env := range envs { + for _, test := range tests { + if got := test.fn(env.fn()); got != env.want { + t.Errorf("got Builder{%s}.%s = %v, want = %v", env.name, test.name, got, env.want) + } + } + } +} + +func TestFinishError(t *testing.T) { + var b Builder + want := ErrNotStarted + if _, got := b.Finish(); got != want { + t.Errorf("got Builder{}.Finish() = %v, want = %v", got, want) + } +} + +func TestBuilder(t *testing.T) { + msg := largeTestMsg() + want, err := msg.Pack() + if err != nil { + t.Fatal("Packing without builder:", err) + } + + b := NewBuilder(nil, msg.Header) + b.EnableCompression() + + if err := b.StartQuestions(); err != nil { + t.Fatal("b.StartQuestions():", err) + } + for _, q := range msg.Questions { + if err := b.Question(q); err != nil { + t.Fatalf("b.Question(%#v): %v", q, err) + } + } + + if err := b.StartAnswers(); err != nil { + t.Fatal("b.StartAnswers():", err) + } + for _, a := range msg.Answers { + switch a.Header.Type { + case TypeA: + if err := b.AResource(a.Header, *a.Body.(*AResource)); err != nil { + t.Fatalf("b.AResource(%#v): %v", a, err) + } + case TypeNS: + if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { + t.Fatalf("b.NSResource(%#v): %v", a, err) + } + case TypeCNAME: + if err := b.CNAMEResource(a.Header, *a.Body.(*CNAMEResource)); err != nil { + t.Fatalf("b.CNAMEResource(%#v): %v", a, err) + } + case TypeSOA: + if err := b.SOAResource(a.Header, *a.Body.(*SOAResource)); err != nil { + t.Fatalf("b.SOAResource(%#v): %v", a, err) + } + case TypePTR: + if err := b.PTRResource(a.Header, *a.Body.(*PTRResource)); err != nil { + t.Fatalf("b.PTRResource(%#v): %v", a, err) + } + case TypeMX: + if err := b.MXResource(a.Header, *a.Body.(*MXResource)); err != nil { + t.Fatalf("b.MXResource(%#v): %v", a, err) + } + case TypeTXT: + if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { + t.Fatalf("b.TXTResource(%#v): %v", a, err) + } + case TypeAAAA: + if err := b.AAAAResource(a.Header, *a.Body.(*AAAAResource)); err != nil { + t.Fatalf("b.AAAAResource(%#v): %v", a, err) + } + case TypeSRV: + if err := b.SRVResource(a.Header, *a.Body.(*SRVResource)); err != nil { + t.Fatalf("b.SRVResource(%#v): %v", a, err) + } + } + } + + if err := b.StartAuthorities(); err != nil { + t.Fatal("b.StartAuthorities():", err) + } + for _, a := range msg.Authorities { + if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { + t.Fatalf("b.NSResource(%#v): %v", a, err) + } + } + + if err := b.StartAdditionals(); err != nil { + t.Fatal("b.StartAdditionals():", err) + } + for _, a := range msg.Additionals { + if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { + t.Fatalf("b.TXTResource(%#v): %v", a, err) + } + } + + got, err := b.Finish() + if err != nil { + t.Fatal("b.Finish():", err) + } + if !bytes.Equal(got, want) { + t.Fatalf("Got from Builder: %#v\nwant = %#v", got, want) + } +} + +func TestResourcePack(t *testing.T) { + for _, tt := range []struct { + m Message + err error + }{ + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{{ResourceHeader{}, nil}}, + }, + &nestedError{"packing Answer", errNilResouceBody}, + }, + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Authorities: []Resource{{ResourceHeader{}, (*NSResource)(nil)}}, + }, + &nestedError{"packing Authority", + &nestedError{"ResourceHeader", + &nestedError{"Name", errNonCanonicalName}, + }, + }, + }, + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + }, + }, + Additionals: []Resource{{ResourceHeader{}, nil}}, + }, + &nestedError{"packing Additional", errNilResouceBody}, + }, + } { + _, err := tt.m.Pack() + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("got %v for %v; want %v", err, tt.m, tt.err) + } + } +} + +func benchmarkParsingSetup() ([]byte, error) { + name := mustNewName("foo.bar.example.com.") + msg := Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &AResource{[4]byte{}}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &AAAAResource{[16]byte{}}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &CNAMEResource{name}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &NSResource{name}, + }, + }, + } + + buf, err := msg.Pack() + if err != nil { + return nil, fmt.Errorf("msg.Pack(): %v", err) + } + return buf, nil +} + +func benchmarkParsing(tb testing.TB, buf []byte) { + var p Parser + if _, err := p.Start(buf); err != nil { + tb.Fatal("p.Start(buf):", err) + } + + for { + _, err := p.Question() + if err == ErrSectionDone { + break + } + if err != nil { + tb.Fatal("p.Question():", err) + } + } + + for { + h, err := p.AnswerHeader() + if err == ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + switch h.Type { + case TypeA: + if _, err := p.AResource(); err != nil { + tb.Fatal("p.AResource():", err) + } + case TypeAAAA: + if _, err := p.AAAAResource(); err != nil { + tb.Fatal("p.AAAAResource():", err) + } + case TypeCNAME: + if _, err := p.CNAMEResource(); err != nil { + tb.Fatal("p.CNAMEResource():", err) + } + case TypeNS: + if _, err := p.NSResource(); err != nil { + tb.Fatal("p.NSResource():", err) + } + default: + tb.Fatalf("unknown type: %T", h) + } + } +} + +func BenchmarkParsing(b *testing.B) { + buf, err := benchmarkParsingSetup() + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkParsing(b, buf) + } +} + +func TestParsingAllocs(t *testing.T) { + buf, err := benchmarkParsingSetup() + if err != nil { + t.Fatal(err) + } + + if allocs := testing.AllocsPerRun(100, func() { benchmarkParsing(t, buf) }); allocs > 0.5 { + t.Errorf("Allocations during parsing: got = %f, want ~0", allocs) + } +} + +func benchmarkBuildingSetup() (Name, []byte) { + name := mustNewName("foo.bar.example.com.") + buf := make([]byte, 0, packStartingCap) + return name, buf +} + +func benchmarkBuilding(tb testing.TB, name Name, buf []byte) { + bld := NewBuilder(buf, Header{Response: true, Authoritative: true}) + + if err := bld.StartQuestions(); err != nil { + tb.Fatal("bld.StartQuestions():", err) + } + q := Question{ + Name: name, + Type: TypeA, + Class: ClassINET, + } + if err := bld.Question(q); err != nil { + tb.Fatalf("bld.Question(%+v): %v", q, err) + } + + hdr := ResourceHeader{ + Name: name, + Class: ClassINET, + } + if err := bld.StartAnswers(); err != nil { + tb.Fatal("bld.StartQuestions():", err) + } + + ar := AResource{[4]byte{}} + if err := bld.AResource(hdr, ar); err != nil { + tb.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err) + } + + aaar := AAAAResource{[16]byte{}} + if err := bld.AAAAResource(hdr, aaar); err != nil { + tb.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err) + } + + cnr := CNAMEResource{name} + if err := bld.CNAMEResource(hdr, cnr); err != nil { + tb.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err) + } + + nsr := NSResource{name} + if err := bld.NSResource(hdr, nsr); err != nil { + tb.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err) + } + + if _, err := bld.Finish(); err != nil { + tb.Fatal("bld.Finish():", err) + } +} + +func BenchmarkBuilding(b *testing.B) { + name, buf := benchmarkBuildingSetup() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkBuilding(b, name, buf) + } +} + +func TestBuildingAllocs(t *testing.T) { + name, buf := benchmarkBuildingSetup() + if allocs := testing.AllocsPerRun(100, func() { benchmarkBuilding(t, name, buf) }); allocs > 0.5 { + t.Errorf("Allocations during building: got = %f, want ~0", allocs) + } +} + +func smallTestMsg() Message { + name := mustNewName("example.com.") + return Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + Authorities: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + Additionals: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + } +} + +func BenchmarkPack(b *testing.B) { + msg := largeTestMsg() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if _, err := msg.Pack(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkAppendPack(b *testing.B) { + msg := largeTestMsg() + buf := make([]byte, 0, packStartingCap) + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if _, err := msg.AppendPack(buf[:0]); err != nil { + b.Fatal(err) + } + } +} + +func largeTestMsg() Message { + name := mustNewName("foo.bar.example.com.") + return Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 2}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeAAAA, + Class: ClassINET, + }, + &AAAAResource{[16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeCNAME, + Class: ClassINET, + }, + &CNAMEResource{mustNewName("alias.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeSOA, + Class: ClassINET, + }, + &SOAResource{ + NS: mustNewName("ns1.example.com."), + MBox: mustNewName("mb.example.com."), + Serial: 1, + Refresh: 2, + Retry: 3, + Expire: 4, + MinTTL: 5, + }, + }, + { + ResourceHeader{ + Name: name, + Type: TypePTR, + Class: ClassINET, + }, + &PTRResource{mustNewName("ptr.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeMX, + Class: ClassINET, + }, + &MXResource{ + 7, + mustNewName("mx.example.com."), + }, + }, + { + ResourceHeader{ + Name: name, + Type: TypeSRV, + Class: ClassINET, + }, + &SRVResource{ + 8, + 9, + 11, + mustNewName("srv.example.com."), + }, + }, + }, + Authorities: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeNS, + Class: ClassINET, + }, + &NSResource{mustNewName("ns1.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeNS, + Class: ClassINET, + }, + &NSResource{mustNewName("ns2.example.com.")}, + }, + }, + Additionals: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{[]string{"So Long, and Thanks for All the Fish"}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{[]string{"Hamster Huey and the Gooey Kablooie"}}, + }, + }, + } +} diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 0000000..cd0a8ac --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/atom_test.go b/vendor/golang.org/x/net/html/atom/atom_test.go new file mode 100644 index 0000000..6e33704 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom_test.go @@ -0,0 +1,109 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atom + +import ( + "sort" + "testing" +) + +func TestKnown(t *testing.T) { + for _, s := range testAtomList { + if atom := Lookup([]byte(s)); atom.String() != s { + t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String()) + } + } +} + +func TestHits(t *testing.T) { + for _, a := range table { + if a == 0 { + continue + } + got := Lookup([]byte(a.String())) + if got != a { + t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a)) + } + } +} + +func TestMisses(t *testing.T) { + testCases := []string{ + "", + "\x00", + "\xff", + "A", + "DIV", + "Div", + "dIV", + "aa", + "a\x00", + "ab", + "abb", + "abbr0", + "abbr ", + " abbr", + " a", + "acceptcharset", + "acceptCharset", + "accept_charset", + "h0", + "h1h2", + "h7", + "onClick", + "λ", + // The following string has the same hash (0xa1d7fab7) as "onmouseover". + "\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7", + } + for _, tc := range testCases { + got := Lookup([]byte(tc)) + if got != 0 { + t.Errorf("Lookup(%q): got %d, want 0", tc, got) + } + } +} + +func TestForeignObject(t *testing.T) { + const ( + afo = Foreignobject + afO = ForeignObject + sfo = "foreignobject" + sfO = "foreignObject" + ) + if got := Lookup([]byte(sfo)); got != afo { + t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo) + } + if got := Lookup([]byte(sfO)); got != afO { + t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO) + } + if got := afo.String(); got != sfo { + t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo) + } + if got := afO.String(); got != sfO { + t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO) + } +} + +func BenchmarkLookup(b *testing.B) { + sortedTable := make([]string, 0, len(table)) + for _, a := range table { + if a != 0 { + sortedTable = append(sortedTable, a.String()) + } + } + sort.Strings(sortedTable) + + x := make([][]byte, 1000) + for i := range x { + x[i] = []byte(sortedTable[i%len(sortedTable)]) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, s := range x { + Lookup(s) + } + } +} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go new file mode 100644 index 0000000..56cd842 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/gen.go @@ -0,0 +1,710 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go +//go:generate go run gen.go -test + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "math/rand" + "os" + "sort" + "strings" +) + +// identifier converts s to a Go exported identifier. +// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". +func identifier(s string) string { + b := make([]byte, 0, len(s)) + cap := true + for _, c := range s { + if c == '-' { + cap = true + continue + } + if cap && 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + cap = false + b = append(b, byte(c)) + } + return string(b) +} + +var test = flag.Bool("test", false, "generate table_test.go") + +func genFile(name string, buf *bytes.Buffer) { + b, err := format.Source(buf.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile(name, b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main() { + flag.Parse() + + var all []string + all = append(all, elements...) + all = append(all, attributes...) + all = append(all, eventHandlers...) + all = append(all, extra...) + sort.Strings(all) + + // uniq - lists have dups + w := 0 + for _, s := range all { + if w == 0 || all[w-1] != s { + all[w] = s + w++ + } + } + all = all[:w] + + if *test { + var buf bytes.Buffer + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") + fmt.Fprintln(&buf, "package atom\n") + fmt.Fprintln(&buf, "var testAtomList = []string{") + for _, s := range all { + fmt.Fprintf(&buf, "\t%q,\n", s) + } + fmt.Fprintln(&buf, "}") + + genFile("table_test.go", &buf) + return + } + + // Find hash that minimizes table size. + var best *table + for i := 0; i < 1000000; i++ { + if best != nil && 1<<(best.k-1) < len(all) { + break + } + h := rand.Uint32() + for k := uint(0); k <= 16; k++ { + if best != nil && k >= best.k { + break + } + var t table + if t.init(h, k, all) { + best = &t + break + } + } + } + if best == nil { + fmt.Fprintf(os.Stderr, "failed to construct string table\n") + os.Exit(1) + } + + // Lay out strings, using overlaps when possible. + layout := append([]string{}, all...) + + // Remove strings that are substrings of other strings + for changed := true; changed; { + changed = false + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i != j && t != "" && strings.Contains(s, t) { + changed = true + layout[j] = "" + } + } + } + } + + // Join strings where one suffix matches another prefix. + for { + // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], + // maximizing overlap length k. + besti := -1 + bestj := -1 + bestk := 0 + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i == j { + continue + } + for k := bestk + 1; k <= len(s) && k <= len(t); k++ { + if s[len(s)-k:] == t[:k] { + besti = i + bestj = j + bestk = k + } + } + } + } + if bestk > 0 { + layout[besti] += layout[bestj][bestk:] + layout[bestj] = "" + continue + } + break + } + + text := strings.Join(layout, "") + + atom := map[string]uint32{} + for _, s := range all { + off := strings.Index(text, s) + if off < 0 { + panic("lost string " + s) + } + atom[s] = uint32(off<<8 | len(s)) + } + + var buf bytes.Buffer + // Generate the Go code. + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go\n") + fmt.Fprintln(&buf, "package atom\n\nconst (") + + // compute max len + maxLen := 0 + for _, s := range all { + if maxLen < len(s) { + maxLen = len(s) + } + fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) + } + fmt.Fprintln(&buf, ")\n") + + fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) + fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) + + fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) + for i, s := range best.tab { + if s == "" { + continue + } + fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) + } + fmt.Fprintf(&buf, "}\n") + datasize := (1 << best.k) * 4 + + fmt.Fprintln(&buf, "const atomText =") + textsize := len(text) + for len(text) > 60 { + fmt.Fprintf(&buf, "\t%q +\n", text[:60]) + text = text[60:] + } + fmt.Fprintf(&buf, "\t%q\n\n", text) + + genFile("table.go", &buf) + + fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) +} + +type byLen []string + +func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } +func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byLen) Len() int { return len(x) } + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s string) uint32 { + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// A table represents an attempt at constructing the lookup table. +// The lookup table uses cuckoo hashing, meaning that each string +// can be found in one of two positions. +type table struct { + h0 uint32 + k uint + mask uint32 + tab []string +} + +// hash returns the two hashes for s. +func (t *table) hash(s string) (h1, h2 uint32) { + h := fnv(t.h0, s) + h1 = h & t.mask + h2 = (h >> 16) & t.mask + return +} + +// init initializes the table with the given parameters. +// h0 is the initial hash value, +// k is the number of bits of hash value to use, and +// x is the list of strings to store in the table. +// init returns false if the table cannot be constructed. +func (t *table) init(h0 uint32, k uint, x []string) bool { + t.h0 = h0 + t.k = k + t.tab = make([]string, 1< len(t.tab) { + return false + } + s := t.tab[i] + h1, h2 := t.hash(s) + j := h1 + h2 - i + if t.tab[j] != "" && !t.push(j, depth+1) { + return false + } + t.tab[j] = s + return true +} + +// The lists of element names and attribute keys were taken from +// https://html.spec.whatwg.org/multipage/indices.html#index +// as of the "HTML Living Standard - Last Updated 18 September 2017" version. + +// "command", "keygen" and "menuitem" have been removed from the spec, +// but are kept here for backwards compatibility. +var elements = []string{ + "a", + "abbr", + "address", + "area", + "article", + "aside", + "audio", + "b", + "base", + "bdi", + "bdo", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "cite", + "code", + "col", + "colgroup", + "command", + "data", + "datalist", + "dd", + "del", + "details", + "dfn", + "dialog", + "div", + "dl", + "dt", + "em", + "embed", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "hgroup", + "hr", + "html", + "i", + "iframe", + "img", + "input", + "ins", + "kbd", + "keygen", + "label", + "legend", + "li", + "link", + "main", + "map", + "mark", + "menu", + "menuitem", + "meta", + "meter", + "nav", + "noscript", + "object", + "ol", + "optgroup", + "option", + "output", + "p", + "param", + "picture", + "pre", + "progress", + "q", + "rp", + "rt", + "ruby", + "s", + "samp", + "script", + "section", + "select", + "slot", + "small", + "source", + "span", + "strong", + "style", + "sub", + "summary", + "sup", + "table", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "u", + "ul", + "var", + "video", + "wbr", +} + +// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 +// +// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", +// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, +// but are kept here for backwards compatibility. +var attributes = []string{ + "abbr", + "accept", + "accept-charset", + "accesskey", + "action", + "allowfullscreen", + "allowpaymentrequest", + "allowusermedia", + "alt", + "as", + "async", + "autocomplete", + "autofocus", + "autoplay", + "challenge", + "charset", + "checked", + "cite", + "class", + "color", + "cols", + "colspan", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "datetime", + "default", + "defer", + "dir", + "dirname", + "disabled", + "download", + "draggable", + "dropzone", + "enctype", + "for", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "headers", + "height", + "hidden", + "high", + "href", + "hreflang", + "http-equiv", + "icon", + "id", + "inputmode", + "integrity", + "is", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "keytype", + "kind", + "label", + "lang", + "list", + "loop", + "low", + "manifest", + "max", + "maxlength", + "media", + "mediagroup", + "method", + "min", + "minlength", + "multiple", + "muted", + "name", + "nomodule", + "nonce", + "novalidate", + "open", + "optimum", + "pattern", + "ping", + "placeholder", + "playsinline", + "poster", + "preload", + "radiogroup", + "readonly", + "referrerpolicy", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "sandbox", + "spellcheck", + "scope", + "scoped", + "seamless", + "selected", + "shape", + "size", + "sizes", + "sortable", + "sorted", + "slot", + "span", + "spellcheck", + "src", + "srcdoc", + "srclang", + "srcset", + "start", + "step", + "style", + "tabindex", + "target", + "title", + "translate", + "type", + "typemustmatch", + "updateviacache", + "usemap", + "value", + "width", + "workertype", + "wrap", +} + +// "onautocomplete", "onautocompleteerror", "onmousewheel", +// "onshow" and "onsort" have been removed from the spec, +// but are kept here for backwards compatibility. +var eventHandlers = []string{ + "onabort", + "onautocomplete", + "onautocompleteerror", + "onauxclick", + "onafterprint", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncopy", + "oncuechange", + "oncut", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragexit", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadend", + "onloadstart", + "onmessage", + "onmessageerror", + "onmousedown", + "onmouseenter", + "onmouseleave", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onwheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpaste", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onreset", + "onresize", + "onrejectionhandled", + "onscroll", + "onsecuritypolicyviolation", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunhandledrejection", + "onunload", + "onvolumechange", + "onwaiting", +} + +// extra are ad-hoc values not covered by any of the lists above. +var extra = []string{ + "acronym", + "align", + "annotation", + "annotation-xml", + "applet", + "basefont", + "bgsound", + "big", + "blink", + "center", + "color", + "desc", + "face", + "font", + "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. + "foreignobject", + "frame", + "frameset", + "image", + "isindex", + "listing", + "malignmark", + "marquee", + "math", + "mglyph", + "mi", + "mn", + "mo", + "ms", + "mtext", + "nobr", + "noembed", + "noframes", + "plaintext", + "prompt", + "public", + "spacer", + "strike", + "svg", + "system", + "tt", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 0000000..a91bd64 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,779 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0x6907 + Action Atom = 0x26a06 + Address Atom = 0x6f307 + Align Atom = 0x7005 + Allowfullscreen Atom = 0x2000f + Allowpaymentrequest Atom = 0x8013 + Allowusermedia Atom = 0x9c0e + Alt Atom = 0xc703 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31106 + Area Atom = 0x34e04 + Article Atom = 0x3f407 + As Atom = 0xd002 + Aside Atom = 0xd805 + Async Atom = 0xd005 + Audio Atom = 0xe605 + Autocomplete Atom = 0x2700c + Autofocus Atom = 0x10209 + Autoplay Atom = 0x11d08 + B Atom = 0x101 + Base Atom = 0x12c04 + Basefont Atom = 0x12c08 + Bdi Atom = 0x7903 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0xd406 + Caption Atom = 0x22907 + Center Atom = 0x21806 + Challenge Atom = 0x29309 + Charset Atom = 0x2107 + Checked Atom = 0x47107 + Cite Atom = 0x55c04 + Class Atom = 0x5bd05 + Code Atom = 0x1a004 + Col Atom = 0x1a703 + Colgroup Atom = 0x1a708 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58307 + Contenteditable Atom = 0x5830f + Contextmenu Atom = 0x3780b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1f30b + Data Atom = 0x49d04 + Datalist Atom = 0x49d08 + Datetime Atom = 0x2b008 + Dd Atom = 0x2cf02 + Default Atom = 0xdb07 + Defer Atom = 0x1a205 + Del Atom = 0x44a03 + Desc Atom = 0x55904 + Details Atom = 0x4607 + Dfn Atom = 0x5f03 + Dialog Atom = 0x7a06 + Dir Atom = 0xba03 + Dirname Atom = 0xba07 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x45b08 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x3fd08 + Dt Atom = 0x64b02 + Em Atom = 0x4202 + Embed Atom = 0x4205 + Enctype Atom = 0x28507 + Face Atom = 0x21604 + Fieldset Atom = 0x21e08 + Figcaption Atom = 0x2260a + Figure Atom = 0x24006 + Font Atom = 0x13004 + Footer Atom = 0xca06 + For Atom = 0x24c03 + ForeignObject Atom = 0x24c0d + Foreignobject Atom = 0x2590d + Form Atom = 0x26604 + Formaction Atom = 0x2660a + Formenctype Atom = 0x2810b + Formmethod Atom = 0x29c0a + Formnovalidate Atom = 0x2a60e + Formtarget Atom = 0x2b80a + Frame Atom = 0x5705 + Frameset Atom = 0x5708 + H1 Atom = 0x15c02 + H2 Atom = 0x2d602 + H3 Atom = 0x30502 + H4 Atom = 0x33d02 + H5 Atom = 0x34702 + H6 Atom = 0x64d02 + Head Atom = 0x32904 + Header Atom = 0x32906 + Headers Atom = 0x32907 + Height Atom = 0x14306 + Hgroup Atom = 0x2c206 + Hidden Atom = 0x2cd06 + High Atom = 0x2d304 + Hr Atom = 0x15702 + Href Atom = 0x2d804 + Hreflang Atom = 0x2d808 + Html Atom = 0x14704 + HttpEquiv Atom = 0x2e00a + I Atom = 0x601 + Icon Atom = 0x58204 + Id Atom = 0xda02 + Iframe Atom = 0x2f406 + Image Atom = 0x2fa05 + Img Atom = 0x2ff03 + Input Atom = 0x44305 + Inputmode Atom = 0x44309 + Ins Atom = 0x1fc03 + Integrity Atom = 0x23709 + Is Atom = 0x16502 + Isindex Atom = 0x30707 + Ismap Atom = 0x30e05 + Itemid Atom = 0x38306 + Itemprop Atom = 0x55d08 + Itemref Atom = 0x3c507 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31708 + Kbd Atom = 0x7803 + Keygen Atom = 0x3206 + Keytype Atom = 0x9507 + Kind Atom = 0x17704 + Label Atom = 0xf105 + Lang Atom = 0x2dc04 + Legend Atom = 0x18106 + Li Atom = 0x7102 + Link Atom = 0x17404 + List Atom = 0x4a104 + Listing Atom = 0x4a107 + Loop Atom = 0xf504 + Low Atom = 0x8203 + Main Atom = 0x1004 + Malignmark Atom = 0x6f0a + Manifest Atom = 0x6d708 + Map Atom = 0x31003 + Mark Atom = 0x7504 + Marquee Atom = 0x31f07 + Math Atom = 0x32604 + Max Atom = 0x33503 + Maxlength Atom = 0x33509 + Media Atom = 0xa505 + Mediagroup Atom = 0xa50a + Menu Atom = 0x37f04 + Menuitem Atom = 0x37f08 + Meta Atom = 0x4b004 + Meter Atom = 0xbf05 + Method Atom = 0x2a006 + Mglyph Atom = 0x30006 + Mi Atom = 0x33f02 + Min Atom = 0x33f03 + Minlength Atom = 0x33f09 + Mn Atom = 0x2a902 + Mo Atom = 0x6302 + Ms Atom = 0x67402 + Mtext Atom = 0x34905 + Multiple Atom = 0x35708 + Muted Atom = 0x35f05 + Name Atom = 0xbd04 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x4007 + Noframes Atom = 0x5508 + Nomodule Atom = 0x6108 + Nonce Atom = 0x56605 + Noscript Atom = 0x20e08 + Novalidate Atom = 0x2aa0a + Object Atom = 0x26006 + Ol Atom = 0x11802 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x22e0c + Onautocomplete Atom = 0x26e0e + Onautocompleteerror Atom = 0x26e13 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x5c606 + Oncancel Atom = 0xea08 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41308 + Onclick Atom = 0x2ed07 + Onclose Atom = 0x36407 + Oncontextmenu Atom = 0x3760d + Oncopy Atom = 0x38906 + Oncuechange Atom = 0x38f0b + Oncut Atom = 0x39a05 + Ondblclick Atom = 0x39f0a + Ondrag Atom = 0x3a906 + Ondragend Atom = 0x3a909 + Ondragenter Atom = 0x3b20b + Ondragexit Atom = 0x3bd0a + Ondragleave Atom = 0x3d70b + Ondragover Atom = 0x3e20a + Ondragstart Atom = 0x3ec0b + Ondrop Atom = 0x3fb06 + Ondurationchange Atom = 0x40b10 + Onemptied Atom = 0x40209 + Onended Atom = 0x41b07 + Onerror Atom = 0x42207 + Onfocus Atom = 0x42907 + Onhashchange Atom = 0x4350c + Oninput Atom = 0x44107 + Oninvalid Atom = 0x44d09 + Onkeydown Atom = 0x45609 + Onkeypress Atom = 0x4630a + Onkeyup Atom = 0x47807 + Onlanguagechange Atom = 0x48510 + Onload Atom = 0x49506 + Onloadeddata Atom = 0x4950c + Onloadedmetadata Atom = 0x4a810 + Onloadend Atom = 0x4be09 + Onloadstart Atom = 0x4c70b + Onmessage Atom = 0x4d209 + Onmessageerror Atom = 0x4d20e + Onmousedown Atom = 0x4e00b + Onmouseenter Atom = 0x4eb0c + Onmouseleave Atom = 0x4f70c + Onmousemove Atom = 0x5030b + Onmouseout Atom = 0x50e0a + Onmouseover Atom = 0x51b0b + Onmouseup Atom = 0x52609 + Onmousewheel Atom = 0x5340c + Onoffline Atom = 0x54009 + Ononline Atom = 0x54908 + Onpagehide Atom = 0x5510a + Onpageshow Atom = 0x56b0a + Onpaste Atom = 0x57707 + Onpause Atom = 0x59207 + Onplay Atom = 0x59c06 + Onplaying Atom = 0x59c09 + Onpopstate Atom = 0x5a50a + Onprogress Atom = 0x5af0a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x1310c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x56304 + Optgroup Atom = 0xf708 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51506 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x4f07 + Picture Atom = 0xae07 + Ping Atom = 0xfe04 + Placeholder Atom = 0x1120b + Plaintext Atom = 0x1ae09 + Playsinline Atom = 0x1210b + Poster Atom = 0x2c706 + Pre Atom = 0x46803 + Preload Atom = 0x47e07 + Progress Atom = 0x5b108 + Prompt Atom = 0x52e06 + Public Atom = 0x57e06 + Q Atom = 0x8e01 + Radiogroup Atom = 0x30a + Readonly Atom = 0x34f08 + Referrerpolicy Atom = 0x3c90e + Rel Atom = 0x47f03 + Required Atom = 0x24408 + Reversed Atom = 0xb308 + Rows Atom = 0x3a04 + Rowspan Atom = 0x3a07 + Rp Atom = 0x23402 + Rt Atom = 0x19a02 + Ruby Atom = 0xc304 + S Atom = 0x2501 + Samp Atom = 0x4c04 + Sandbox Atom = 0x10a07 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21006 + Seamless Atom = 0x36908 + Section Atom = 0x5c107 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x1fe05 + Sortable Atom = 0x65108 + Sorted Atom = 0x32f06 + Source Atom = 0x37006 + Spacer Atom = 0x42f06 + Span Atom = 0x3d04 + Spellcheck Atom = 0x46c0a + Src Atom = 0x5b803 + Srcdoc Atom = 0x5b806 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3f205 + Step Atom = 0x57b04 + Strike Atom = 0x9106 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4b608 + Table Atom = 0x58d05 + Target Atom = 0x2bc06 + Tbody Atom = 0x2705 + Td Atom = 0x5e02 + Template Atom = 0x71408 + Textarea Atom = 0x34a08 + Tfoot Atom = 0xc905 + Th Atom = 0x15602 + Thead Atom = 0x32805 + Time Atom = 0x13304 + Title Atom = 0xe105 + Tr Atom = 0x8b02 + Track Atom = 0x19b05 + Translate Atom = 0x1b609 + Tt Atom = 0x5102 + Type Atom = 0x9804 + Typemustmatch Atom = 0x2880d + U Atom = 0xb01 + Ul Atom = 0x6602 + Updateviacache Atom = 0x1370e + Usemap Atom = 0x59606 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2e905 + Wbr Atom = 0x57403 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x11003 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xa50a, // mediagroup + 0x2: 0x2dc04, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x5708, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2810b, // formenctype + 0xd: 0x11802, // ol + 0xe: 0x38f0b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0xe605, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2e905, // video + 0x15: 0x2a902, // mn + 0x16: 0x37f04, // menu + 0x17: 0x2c706, // poster + 0x19: 0xca06, // footer + 0x1a: 0x2a006, // method + 0x1b: 0x2b008, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x1370e, // updateviacache + 0x1e: 0xd005, // async + 0x1f: 0x49506, // onload + 0x21: 0xea08, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x2fa05, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51506, // output + 0x28: 0x32904, // head + 0x29: 0x4f70c, // onmouseleave + 0x2a: 0x57707, // onpaste + 0x2b: 0x59c09, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e00a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5510a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42207, // onerror + 0x3a: 0x12c08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x34f08, // readonly + 0x42: 0x30006, // mglyph + 0x44: 0x7102, // li + 0x46: 0x2cd06, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x57b04, // step + 0x49: 0x23709, // integrity + 0x4a: 0x57e06, // public + 0x4c: 0x1a703, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34702, // h5 + 0x50: 0x5b108, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x33d02, // h4 + 0x56: 0x32805, // thead + 0x57: 0x9507, // keytype + 0x58: 0x5af0a, // onprogress + 0x59: 0x44309, // inputmode + 0x5a: 0x3a909, // ondragend + 0x5d: 0x39a05, // oncut + 0x5e: 0x42f06, // spacer + 0x5f: 0x1a708, // colgroup + 0x62: 0x16502, // is + 0x65: 0xd002, // as + 0x66: 0x54009, // onoffline + 0x67: 0x32f06, // sorted + 0x69: 0x48510, // onlanguagechange + 0x6c: 0x4350c, // onhashchange + 0x6d: 0xbd04, // name + 0x6e: 0xc905, // tfoot + 0x6f: 0x55904, // desc + 0x70: 0x33503, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30502, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x3a04, // rows + 0x76: 0x63c06, // select + 0x77: 0xbf05, // meter + 0x78: 0x38306, // itemid + 0x79: 0x5340c, // onmousewheel + 0x7a: 0x5b806, // srcdoc + 0x7d: 0x19b05, // track + 0x7f: 0x31708, // itemtype + 0x82: 0x6302, // mo + 0x83: 0x41308, // onchange + 0x84: 0x32907, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x49d08, // datalist + 0x89: 0x4e00b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4a810, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26006, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x26e13, // onautocompleteerror + 0x94: 0x8013, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0xdb07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21604, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0x7504, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0xf504, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x41b07, // onended + 0xab: 0x6f0a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x34905, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x55d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3a906, // ondrag + 0xb7: 0x6602, // ul + 0xb8: 0x26604, // form + 0xb9: 0x10a07, // sandbox + 0xba: 0x5705, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0x6907, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x21e08, // fieldset + 0xc4: 0x2880d, // typemustmatch + 0xc5: 0x6108, // nomodule + 0xc6: 0x4007, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2ed07, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xc304, // ruby + 0xce: 0x5bd05, // class + 0xcf: 0x3ec0b, // ondragstart + 0xd0: 0x22907, // caption + 0xd4: 0x9c0e, // allowusermedia + 0xd5: 0x4c70b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a104, // list + 0xdb: 0x32604, // math + 0xdc: 0x44305, // input + 0xdf: 0x3e20a, // ondragover + 0xe0: 0x2d602, // h2 + 0xe2: 0x1ae09, // plaintext + 0xe4: 0x4eb0c, // onmouseenter + 0xe7: 0x47107, // checked + 0xe8: 0x46803, // pre + 0xea: 0x35708, // multiple + 0xeb: 0x7903, // bdi + 0xec: 0x33509, // maxlength + 0xed: 0x8e01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57403, // wbr + 0xf2: 0x12c04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x40b10, // ondurationchange + 0xf7: 0x5508, // noframes + 0xf9: 0x3fd08, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0xb308, // reversed + 0xfd: 0x3b20b, // ondragenter + 0xfe: 0x3f205, // start + 0xff: 0x11003, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x2ff03, // img + 0x104: 0x101, // b + 0x105: 0x24c03, // for + 0x106: 0xd805, // aside + 0x107: 0x44107, // oninput + 0x108: 0x34e04, // area + 0x109: 0x29c0a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23402, // rp + 0x10d: 0x4630a, // onkeypress + 0x10e: 0x5102, // tt + 0x110: 0x33f02, // mi + 0x111: 0x35f05, // muted + 0x112: 0xc703, // alt + 0x113: 0x1a004, // code + 0x114: 0x4202, // em + 0x115: 0x3bd0a, // ondragexit + 0x117: 0x3d04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x37f08, // menuitem + 0x11b: 0x58307, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4be09, // onloadend + 0x121: 0x3760d, // oncontextmenu + 0x123: 0x5c606, // onblur + 0x124: 0x3f407, // article + 0x125: 0xba03, // dir + 0x126: 0xfe04, // ping + 0x127: 0x24408, // required + 0x128: 0x44d09, // oninvalid + 0x129: 0x7005, // align + 0x12b: 0x58204, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x2260a, // figcaption + 0x12f: 0x45609, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40209, // onemptied + 0x136: 0x38906, // oncopy + 0x137: 0x55c04, // cite + 0x138: 0x39f0a, // ondblclick + 0x13a: 0x5030b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x47f03, // rel + 0x13e: 0xf708, // optgroup + 0x142: 0x3a07, // rowspan + 0x143: 0x37006, // source + 0x144: 0x20e08, // noscript + 0x145: 0x56304, // open + 0x146: 0x1fc03, // ins + 0x147: 0x24c0d, // foreignObject + 0x148: 0x5a50a, // onpopstate + 0x14a: 0x28507, // enctype + 0x14b: 0x26e0e, // onautocomplete + 0x14c: 0x34a08, // textarea + 0x14e: 0x2700c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0xda02, // id + 0x153: 0x22e0c, // onafterprint + 0x155: 0x2590d, // foreignobject + 0x156: 0x31f07, // marquee + 0x157: 0x59207, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x14306, // height + 0x15a: 0x33f03, // min + 0x15b: 0xba07, // dirname + 0x15c: 0x1b609, // translate + 0x15d: 0x14704, // html + 0x15e: 0x33f09, // minlength + 0x15f: 0x47e07, // preload + 0x160: 0x71408, // template + 0x161: 0x3d70b, // ondragleave + 0x164: 0x5b803, // src + 0x165: 0x6dd06, // strong + 0x167: 0x4c04, // samp + 0x168: 0x6f307, // address + 0x169: 0x54908, // ononline + 0x16b: 0x1120b, // placeholder + 0x16c: 0x2bc06, // target + 0x16d: 0x1fe05, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x46c0a, // spellcheck + 0x171: 0x4607, // details + 0x172: 0xd406, // canvas + 0x173: 0x10209, // autofocus + 0x174: 0xc05, // param + 0x176: 0x45b08, // download + 0x177: 0x44a03, // del + 0x178: 0x36407, // onclose + 0x179: 0x7803, // kbd + 0x17a: 0x31106, // applet + 0x17b: 0x2d804, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x4950c, // onloadeddata + 0x180: 0x8b02, // tr + 0x181: 0x2b80a, // formtarget + 0x182: 0xe105, // title + 0x183: 0x6ff05, // style + 0x184: 0x9106, // strike + 0x185: 0x59606, // usemap + 0x186: 0x2f406, // iframe + 0x187: 0x1004, // main + 0x189: 0xae07, // picture + 0x18c: 0x30e05, // ismap + 0x18e: 0x49d04, // data + 0x18f: 0xf105, // label + 0x191: 0x3c90e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x52e06, // prompt + 0x195: 0x5c107, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2d304, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x13304, // time + 0x19e: 0x67402, // ms + 0x19f: 0x32906, // header + 0x1a0: 0x4d209, // onmessage + 0x1a1: 0x56605, // nonce + 0x1a2: 0x2660a, // formaction + 0x1a3: 0x21806, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x58d05, // table + 0x1a6: 0x4a107, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29309, // challenge + 0x1aa: 0x24006, // figure + 0x1ab: 0xa505, // media + 0x1ae: 0x9804, // type + 0x1af: 0x13004, // font + 0x1b0: 0x4d20e, // onmessageerror + 0x1b1: 0x36908, // seamless + 0x1b2: 0x5f03, // dfn + 0x1b3: 0x1a205, // defer + 0x1b4: 0x8203, // low + 0x1b5: 0x63109, // onseeking + 0x1b6: 0x51b0b, // onmouseover + 0x1b7: 0x2aa0a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3c507, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31003, // map + 0x1bf: 0x1310c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x4f07, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2cf02, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x47807, // onkeyup + 0x1d5: 0x59c06, // onplay + 0x1d7: 0x4b004, // meta + 0x1d8: 0x3fb06, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1f30b, // crossorigin + 0x1dc: 0x56b0a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x5e02, // td + 0x1df: 0x5830f, // contenteditable + 0x1e0: 0x26a06, // action + 0x1e1: 0x1210b, // playsinline + 0x1e2: 0x42907, // onfocus + 0x1e3: 0x2d808, // hreflang + 0x1e5: 0x50e0a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x11d08, // autoplay + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3780b, // contextmenu + 0x1ef: 0x52609, // onmouseup + 0x1f1: 0x2c206, // hgroup + 0x1f2: 0x2000f, // allowfullscreen + 0x1f3: 0x4b608, // tabindex + 0x1f6: 0x30707, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2a60e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x4205, // embed + 0x1fd: 0x21006, // script + 0x1fe: 0x7a06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobro" + + "wspanoembedetailsampatternoframesetdfnomoduleacronymalignmar" + + "kbdialogallowpaymentrequestrikeytypeallowusermediagroupictur" + + "eversedirnameterubyaltfooterasyncanvasidefaultitleaudioncanc" + + "elabelooptgroupingautofocusandboxmplaceholderautoplaysinline" + + "basefontimeupdateviacacheightmlbdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortrackcode" + + "fercolgrouplaintextranslatecolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotcrossoriginsmallowfullscreenoscriptfacent" + + "erfieldsetfigcaptionafterprintegrityfigurequiredforeignObjec" + + "tforeignobjectformactionautocompleteerrorformenctypemustmatc" + + "hallengeformmethodformnovalidatetimeformtargethgrouposterhid" + + "denhigh2hreflanghttp-equivideonclickiframeimageimglyph3isind" + + "exismappletitemtypemarqueematheadersortedmaxlength4minlength" + + "5mtextareadonlymultiplemutedoncloseamlessourceoncontextmenui" + + "temidoncopyoncuechangeoncutondblclickondragendondragenterond" + + "ragexitemreferrerpolicyondragleaveondragoverondragstarticleo" + + "ndropzonemptiedondurationchangeonendedonerroronfocuspaceronh" + + "ashchangeoninputmodeloninvalidonkeydownloadonkeypresspellche" + + "ckedonkeyupreloadonlanguagechangeonloadeddatalistingonloaded" + + "metadatabindexonloadendonloadstartonmessageerroronmousedowno" + + "nmouseenteronmouseleaveonmousemoveonmouseoutputonmouseoveron" + + "mouseupromptonmousewheelonofflineononlineonpagehidescitempro" + + "penonceonpageshowbronpastepublicontenteditableonpausemaponpl" + + "ayingonpopstateonprogressrcdoclassectionbluronratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/atom/table_test.go b/vendor/golang.org/x/net/html/atom/table_test.go new file mode 100644 index 0000000..46d9d70 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table_test.go @@ -0,0 +1,374 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go -test + +package atom + +var testAtomList = []string{ + "a", + "abbr", + "accept", + "accept-charset", + "accesskey", + "acronym", + "action", + "address", + "align", + "allowfullscreen", + "allowpaymentrequest", + "allowusermedia", + "alt", + "annotation", + "annotation-xml", + "applet", + "area", + "article", + "as", + "aside", + "async", + "audio", + "autocomplete", + "autofocus", + "autoplay", + "b", + "base", + "basefont", + "bdi", + "bdo", + "bgsound", + "big", + "blink", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "center", + "challenge", + "charset", + "checked", + "cite", + "class", + "code", + "col", + "colgroup", + "color", + "cols", + "colspan", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "datalist", + "datetime", + "dd", + "default", + "defer", + "del", + "desc", + "details", + "dfn", + "dialog", + "dir", + "dirname", + "disabled", + "div", + "dl", + "download", + "draggable", + "dropzone", + "dt", + "em", + "embed", + "enctype", + "face", + "fieldset", + "figcaption", + "figure", + "font", + "footer", + "for", + "foreignObject", + "foreignobject", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "frame", + "frameset", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "headers", + "height", + "hgroup", + "hidden", + "high", + "hr", + "href", + "hreflang", + "html", + "http-equiv", + "i", + "icon", + "id", + "iframe", + "image", + "img", + "input", + "inputmode", + "ins", + "integrity", + "is", + "isindex", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "kbd", + "keygen", + "keytype", + "kind", + "label", + "lang", + "legend", + "li", + "link", + "list", + "listing", + "loop", + "low", + "main", + "malignmark", + "manifest", + "map", + "mark", + "marquee", + "math", + "max", + "maxlength", + "media", + "mediagroup", + "menu", + "menuitem", + "meta", + "meter", + "method", + "mglyph", + "mi", + "min", + "minlength", + "mn", + "mo", + "ms", + "mtext", + "multiple", + "muted", + "name", + "nav", + "nobr", + "noembed", + "noframes", + "nomodule", + "nonce", + "noscript", + "novalidate", + "object", + "ol", + "onabort", + "onafterprint", + "onautocomplete", + "onautocompleteerror", + "onauxclick", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncopy", + "oncuechange", + "oncut", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragexit", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadend", + "onloadstart", + "onmessage", + "onmessageerror", + "onmousedown", + "onmouseenter", + "onmouseleave", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpaste", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onrejectionhandled", + "onreset", + "onresize", + "onscroll", + "onsecuritypolicyviolation", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunhandledrejection", + "onunload", + "onvolumechange", + "onwaiting", + "onwheel", + "open", + "optgroup", + "optimum", + "option", + "output", + "p", + "param", + "pattern", + "picture", + "ping", + "placeholder", + "plaintext", + "playsinline", + "poster", + "pre", + "preload", + "progress", + "prompt", + "public", + "q", + "radiogroup", + "readonly", + "referrerpolicy", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "rp", + "rt", + "ruby", + "s", + "samp", + "sandbox", + "scope", + "scoped", + "script", + "seamless", + "section", + "select", + "selected", + "shape", + "size", + "sizes", + "slot", + "small", + "sortable", + "sorted", + "source", + "spacer", + "span", + "spellcheck", + "src", + "srcdoc", + "srclang", + "srcset", + "start", + "step", + "strike", + "strong", + "style", + "sub", + "summary", + "sup", + "svg", + "system", + "tabindex", + "table", + "target", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "translate", + "tt", + "type", + "typemustmatch", + "u", + "ul", + "updateviacache", + "usemap", + "value", + "var", + "video", + "wbr", + "width", + "workertype", + "wrap", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 0000000..13bed15 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/vendor/golang.org/x/net/html/charset/charset_test.go b/vendor/golang.org/x/net/html/charset/charset_test.go new file mode 100644 index 0000000..e4e7d86 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset_test.go @@ -0,0 +1,237 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package charset + +import ( + "bytes" + "encoding/xml" + "io/ioutil" + "runtime" + "strings" + "testing" + + "golang.org/x/text/transform" +) + +func transformString(t transform.Transformer, s string) (string, error) { + r := transform.NewReader(strings.NewReader(s), t) + b, err := ioutil.ReadAll(r) + return string(b), err +} + +type testCase struct { + utf8, other, otherEncoding string +} + +// testCases for encoding and decoding. +var testCases = []testCase{ + {"Résumé", "Résumé", "utf8"}, + {"Résumé", "R\xe9sum\xe9", "latin1"}, + {"これは漢字です。", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"}, + {"これは漢字です。", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"}, + {"Hello, world", "Hello, world", "ASCII"}, + {"Gdańsk", "Gda\xf1sk", "ISO-8859-2"}, + {"Ââ Čč Đđ Ŋŋ Õõ Šš Žž Åå Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"}, + {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"}, + {"latviešu", "latvie\xf0u", "ISO-8859-13"}, + {"Seònaid", "Se\xf2naid", "ISO-8859-14"}, + {"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"}, + {"românește", "rom\xe2ne\xbate", "ISO-8859-16"}, + {"nutraĵo", "nutra\xbco", "ISO-8859-3"}, + {"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"}, + {"русский", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"}, + {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"}, + {"Kağan", "Ka\xf0an", "ISO-8859-9"}, + {"Résumé", "R\x8esum\x8e", "macintosh"}, + {"Gdańsk", "Gda\xf1sk", "windows-1250"}, + {"русский", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"}, + {"Résumé", "R\xe9sum\xe9", "windows-1252"}, + {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"}, + {"Kağan", "Ka\xf0an", "windows-1254"}, + {"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"}, + {"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"}, + {"latviešu", "latvie\xf0u", "windows-1257"}, + {"Việt", "Vi\xea\xf2t", "windows-1258"}, + {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"}, + {"русский", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"}, + {"українська", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"}, + {"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"}, + {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"}, + {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"}, + {"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"}, + {"㧯", "\x82\x31\x89\x38", "gb18030"}, + {"これは漢字です。", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"}, + {"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"}, + {"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"}, + {"これは漢字です。", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"}, + {"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"}, + {"다음과 같은 조건을 따라야 합니다: 저작자표시", "\xb4\xd9\xc0\xbd\xb0\xfa \xb0\xb0\xc0\xba \xc1\xb6\xb0\xc7\xc0\xbb \xb5\xfb\xb6\xf3\xbe\xdf \xc7մϴ\xd9: \xc0\xfa\xc0\xdb\xc0\xdaǥ\xbd\xc3", "EUC-KR"}, +} + +func TestDecode(t *testing.T) { + testCases := append(testCases, []testCase{ + // Replace multi-byte maximum subpart of ill-formed subsequence with + // single replacement character (WhatWG requirement). + {"Rés\ufffdumé", "Rés\xe1\x80umé", "utf8"}, + }...) + for _, tc := range testCases { + e, _ := Lookup(tc.otherEncoding) + if e == nil { + t.Errorf("%s: not found", tc.otherEncoding) + continue + } + s, err := transformString(e.NewDecoder(), tc.other) + if err != nil { + t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err) + continue + } + if s != tc.utf8 { + t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8) + } + } +} + +func TestEncode(t *testing.T) { + testCases := append(testCases, []testCase{ + // Use Go-style replacement. + {"Rés\xe1\x80umé", "Rés\ufffd\ufffdumé", "utf8"}, + // U+0144 LATIN SMALL LETTER N WITH ACUTE not supported by encoding. + {"Gdańsk", "Gdańsk", "ISO-8859-11"}, + {"\ufffd", "�", "ISO-8859-11"}, + {"a\xe1\x80b", "a��b", "ISO-8859-11"}, + }...) + for _, tc := range testCases { + e, _ := Lookup(tc.otherEncoding) + if e == nil { + t.Errorf("%s: not found", tc.otherEncoding) + continue + } + s, err := transformString(e.NewEncoder(), tc.utf8) + if err != nil { + t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err) + continue + } + if s != tc.other { + t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other) + } + } +} + +var sniffTestCases = []struct { + filename, declared, want string +}{ + {"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"UTF-16LE-BOM.html", "", "utf-16le"}, + {"UTF-16BE-BOM.html", "", "utf-16be"}, + {"meta-content-attribute.html", "text/html", "iso-8859-15"}, + {"meta-charset-attribute.html", "text/html", "iso-8859-15"}, + {"No-encoding-declaration.html", "text/html", "utf-8"}, + {"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"}, + {"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"}, + {"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"}, +} + +func TestSniff(t *testing.T) { + switch runtime.GOOS { + case "nacl": // platforms that don't permit direct file system access + t.Skipf("not supported on %q", runtime.GOOS) + } + + for _, tc := range sniffTestCases { + content, err := ioutil.ReadFile("testdata/" + tc.filename) + if err != nil { + t.Errorf("%s: error reading file: %v", tc.filename, err) + continue + } + + _, name, _ := DetermineEncoding(content, tc.declared) + if name != tc.want { + t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want) + continue + } + } +} + +func TestReader(t *testing.T) { + switch runtime.GOOS { + case "nacl": // platforms that don't permit direct file system access + t.Skipf("not supported on %q", runtime.GOOS) + } + + for _, tc := range sniffTestCases { + content, err := ioutil.ReadFile("testdata/" + tc.filename) + if err != nil { + t.Errorf("%s: error reading file: %v", tc.filename, err) + continue + } + + r, err := NewReader(bytes.NewReader(content), tc.declared) + if err != nil { + t.Errorf("%s: error creating reader: %v", tc.filename, err) + continue + } + + got, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err) + continue + } + + e, _ := Lookup(tc.want) + want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder())) + if err != nil { + t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err) + continue + } + + if !bytes.Equal(got, want) { + t.Errorf("%s: got %q, want %q", tc.filename, got, want) + continue + } + } +} + +var metaTestCases = []struct { + meta, want string +}{ + {"", ""}, + {"text/html", ""}, + {"text/html; charset utf-8", ""}, + {"text/html; charset=latin-2", "latin-2"}, + {"text/html; charset; charset = utf-8", "utf-8"}, + {`charset="big5"`, "big5"}, + {"charset='shift_jis'", "shift_jis"}, +} + +func TestFromMeta(t *testing.T) { + for _, tc := range metaTestCases { + got := fromMetaElement(tc.meta) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want) + } + } +} + +func TestXML(t *testing.T) { + const s = "r\xe9sum\xe9" + + d := xml.NewDecoder(strings.NewReader(s)) + d.CharsetReader = NewReaderLabel + + var a struct { + Word string + } + err := d.Decode(&a) + if err != nil { + t.Fatalf("Decode: %v", err) + } + + want := "résumé" + if a.Word != want { + t.Errorf("got %q, want %q", a.Word, want) + } +} diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html new file mode 100644 index 0000000..9915fa0 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html @@ -0,0 +1,48 @@ + + + + HTTP charset + + + + + + + + + + + +

HTTP charset

+ + +
+ + +
 
+ + + + + +
+

The character encoding of a page can be set using the HTTP header charset declaration.

+

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.

+
+
+
HTML5
+

the-input-byte-stream-001
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html new file mode 100644 index 0000000..26e5d8b --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html @@ -0,0 +1,48 @@ + + + + HTTP vs UTF-8 BOM + + + + + + + + + + + +

HTTP vs UTF-8 BOM

+ + +
+ + +
 
+ + + + + +
+

A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

If the test is unsuccessful, the characters  should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.

+
+
+
HTML5
+

the-input-byte-stream-034
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html new file mode 100644 index 0000000..2f07e95 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta charset + + + + + + + + + + + +

HTTP vs meta charset

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-018
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html new file mode 100644 index 0000000..6853cdd --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta content + + + + + + + + + + + +

HTTP vs meta content

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-016
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html new file mode 100644 index 0000000..612e26c --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html @@ -0,0 +1,47 @@ + + + + No encoding declaration + + + + + + + + + + + +

No encoding declaration

+ + +
+ + +
 
+ + + + + +
+

A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.

+

The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-015
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/README b/vendor/golang.org/x/net/html/charset/testdata/README new file mode 100644 index 0000000..38ef0f9 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/README @@ -0,0 +1,9 @@ +These test cases come from +http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics + +Distributed under both the W3C Test Suite License +(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license) +and the W3C 3-clause BSD License +(http://www.w3.org/Consortium/Legal/2008/03-bsd-license). +To contribute to a W3C Test Suite, see the policies and contribution +forms (http://www.w3.org/2004/10/27-testcases). diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html new file mode 100644 index 0000000000000000000000000000000000000000..3abf7a9343c20518e57dfea58b374fb0f4fb58a1 GIT binary patch literal 2670 zcmcJR?QRoS5Qc}JAoU&=BQ-(7b^;2j8i*i3RV1JlO@;VXIsPurV!WHiDdLW}i`*CO z^UnC>tih=KsVr;H&Y7?C&O3AV(?534uG?e##U9y_y|!QNi4``n+D>d{2lky^LnFNx z?9HrarH$>rwQR_$g)Hk0*&STI*EYq|47~&U9sfUB+ji})9eR{QqCUra7oDsZ5obtB zdxP%<)-$4Q;rSHJiM>U(#ZI=;?n^BC?Dp6lu=~_1-lnX3u03&2BlmQIY>L+!Uq7XoytKw^Q#oZSM?3*J?)&ojG&yzQRkC!Ml5JE?ax;lp_NYEcdUht`ZswOviB~L5hmJ|pXI71nn20w;>vG! zQGB$EE9&wC``&J#_Ym~PgRu-Bd>1!pOp0||k`kr=VJ zfH6I6rmRaeHA7U-A^OTsT+|d2a^i(>DePzZ{)ibXoCBvJnuYrd-3kkN$uy{qQK;=*Y;S87ro12aTgu^i*%f8zC3>a}9DIe4cfxOzsCw&(cqvP9{ud{N6f` z#TNDY(B6@Gpr|uN+%&x^XZjBHdc@2vsM(Tyc2=vshHQ5w+obmp>tuWT(t4BTUGAQw zxeI$UGSLUBg=WFbF;4f@4=^P2AgY@CFn8A`bcC=_&~)fiDe)#cUARRBzJ^k|%X)69 z+{Cb`wq}Rsg%B62CC_tK!AV(W{(MV?#mndR46CU#BUN<{8e?*oT+!pE5wF#O#TR#a z$9qRT)tpbw8zAI~QQJg2C3|6$I%(T(;`zOMy6SO+&;pG=c#2P|P-WZn$$DpWJlC3U z3*nvmz zwP{u~r$L?-m3uqp9I1+#3yE|3M$(s-BEtih=LQ>`qYoiktOop(wi%!;yh%+Rm z{e|xntY<{q!1F1Z6MKtngPm-p-4|H&+3m4AVE3_AyiHm6Tzlf4M(*ht*%YrezJ6kr zHGj45pc?64*$Cm%-zseWMA`x;)v*~jA=i}szqts9xmQkS`M11|(H7bTXAycsXU53+ zJ?120SRZeyiFjW7enPN`bxk$IaWV3o48oJF7D&2ysoY;6(s6%6vVfaYd&mC=erK!) zNGI^7upQgN)53OHe_VE<@J+G8*Y|p*)zB2Thdi}+YR<5QWHm!|a_*AoZXuv7)$xe| zm3Q$D7{|#}{m4X&UY!6(ZhyYi2(5JLzGE$H)W6BQklnjPMwn<Yvv7Z*TVWwD*=E3QpH37* z#lqXJA0A~J9T_<^W5smspmDg2p6ac5Bjn+~LAoow%1TCdZ*$K8`O zw_$HaCi+0N&@7la#_7KL5r$+QL{)Pi=I&aDjt~|Knht#`CEi4*3%97i_fSfASlwUz0=3V0GCxY}z81UC-nP=CGt2OqYV$ zoRCo+qM9YX*3FFORLC=E3B~S@+KROyk4r5 yX7?DaslDfIebqXgC!KKp4IYy+W~X?ddE6o=`A+x#x0AK&6MF#W&AXxbRrv+SX}PNa literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html new file mode 100644 index 0000000..83de433 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + UTF-8 BOM vs meta charset + + + + + + + + + + + +

UTF-8 BOM vs meta charset

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.

+

The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-038
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html new file mode 100644 index 0000000..501aac2 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html @@ -0,0 +1,48 @@ + + + + UTF-8 BOM vs meta content + + + + + + + + + + + +

UTF-8 BOM vs meta content

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.

+

The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-037
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html new file mode 100644 index 0000000..2d7d25a --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html @@ -0,0 +1,48 @@ + + + + meta charset attribute + + + + + + + + + + + +

meta charset attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with charset attribute.

+

The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-009
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html new file mode 100644 index 0000000..1c3f228 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html @@ -0,0 +1,48 @@ + + + + meta content attribute + + + + + + + + + + + +

meta content attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with http-equiv and content attributes.

+

The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-007
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 0000000..5eb7c5a --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,104 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. + "keygen": true, + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "svg": + return element.Data == "foreignObject" + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 0000000..822ed42 --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 0000000..c484e5a --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 0000000..a50c04c --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/entity_test.go b/vendor/golang.org/x/net/html/entity_test.go new file mode 100644 index 0000000..b53f866 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity_test.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "testing" + "unicode/utf8" +) + +func TestEntityLength(t *testing.T) { + // We verify that the length of UTF-8 encoding of each value is <= 1 + len(key). + // The +1 comes from the leading "&". This property implies that the length of + // unescaped text is <= the length of escaped text. + for k, v := range entity { + if 1+len(k) < utf8.RuneLen(v) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v)) + } + if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' { + t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon) + } + } + for k, v := range entity2 { + if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1])) + } + } +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 0000000..d856139 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/escape_test.go b/vendor/golang.org/x/net/html/escape_test.go new file mode 100644 index 0000000..b405d4b --- /dev/null +++ b/vendor/golang.org/x/net/html/escape_test.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import "testing" + +type unescapeTest struct { + // A short description of the test case. + desc string + // The HTML text. + html string + // The unescaped text. + unescaped string +} + +var unescapeTests = []unescapeTest{ + // Handle no entities. + { + "copy", + "A\ttext\nstring", + "A\ttext\nstring", + }, + // Handle simple named entities. + { + "simple", + "& > <", + "& > <", + }, + // Handle hitting the end of the string. + { + "stringEnd", + "& &", + "& &", + }, + // Handle entities with two codepoints. + { + "multiCodepoint", + "text ⋛︀ blah", + "text \u22db\ufe00 blah", + }, + // Handle decimal numeric entities. + { + "decimalEntity", + "Delta = Δ ", + "Delta = Δ ", + }, + // Handle hexadecimal numeric entities. + { + "hexadecimalEntity", + "Lambda = λ = λ ", + "Lambda = λ = λ ", + }, + // Handle numeric early termination. + { + "numericEnds", + "&# &#x €43 © = ©f = ©", + "&# &#x €43 © = ©f = ©", + }, + // Handle numeric ISO-8859-1 entity replacements. + { + "numericReplacements", + "Footnote‡", + "Footnote‡", + }, +} + +func TestUnescape(t *testing.T) { + for _, tt := range unescapeTests { + unescaped := UnescapeString(tt.html) + if unescaped != tt.unescaped { + t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped) + } + } +} + +func TestUnescapeEscape(t *testing.T) { + ss := []string{ + ``, + `abc def`, + `a & b`, + `a&b`, + `a & b`, + `"`, + `"`, + `"<&>"`, + `"<&>"`, + `3&5==1 && 0<1, "0<1", a+acute=á`, + `The special characters are: <, >, &, ' and "`, + } + for _, s := range ss { + if got := UnescapeString(EscapeString(s)); got != s { + t.Errorf("got %q want %q", got, s) + } + } +} diff --git a/vendor/golang.org/x/net/html/example_test.go b/vendor/golang.org/x/net/html/example_test.go new file mode 100644 index 0000000..0b06ed7 --- /dev/null +++ b/vendor/golang.org/x/net/html/example_test.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This example demonstrates parsing HTML data and walking the resulting tree. +package html_test + +import ( + "fmt" + "log" + "strings" + + "golang.org/x/net/html" +) + +func ExampleParse() { + s := `

Links:

` + doc, err := html.Parse(strings.NewReader(s)) + if err != nil { + log.Fatal(err) + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + for _, a := range n.Attr { + if a.Key == "href" { + fmt.Println(a.Val) + break + } + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + // Output: + // foo + // /bar/baz +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 0000000..01477a9 --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.6.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.6.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.6.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 0000000..6f136c4 --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,194 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + scopeMarkerNode +) + +// Section 12.2.4.3 says "The markers are inserted when entering applet, +// object, marquee, template, td, th, and caption elements, and are used +// to prevent formatting from "leaking" into applet, object, marquee, +// template, td, th, and caption elements". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} diff --git a/vendor/golang.org/x/net/html/node_test.go b/vendor/golang.org/x/net/html/node_test.go new file mode 100644 index 0000000..471102f --- /dev/null +++ b/vendor/golang.org/x/net/html/node_test.go @@ -0,0 +1,146 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "fmt" +) + +// checkTreeConsistency checks that a node and its descendants are all +// consistent in their parent/child/sibling relationships. +func checkTreeConsistency(n *Node) error { + return checkTreeConsistency1(n, 0) +} + +func checkTreeConsistency1(n *Node, depth int) error { + if depth == 1e4 { + return fmt.Errorf("html: tree looks like it contains a cycle") + } + if err := checkNodeConsistency(n); err != nil { + return err + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + if err := checkTreeConsistency1(c, depth+1); err != nil { + return err + } + } + return nil +} + +// checkNodeConsistency checks that a node's parent/child/sibling relationships +// are consistent. +func checkNodeConsistency(n *Node) error { + if n == nil { + return nil + } + + nParent := 0 + for p := n.Parent; p != nil; p = p.Parent { + nParent++ + if nParent == 1e4 { + return fmt.Errorf("html: parent list looks like an infinite loop") + } + } + + nForward := 0 + for c := n.FirstChild; c != nil; c = c.NextSibling { + nForward++ + if nForward == 1e6 { + return fmt.Errorf("html: forward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + nBackward := 0 + for c := n.LastChild; c != nil; c = c.PrevSibling { + nBackward++ + if nBackward == 1e6 { + return fmt.Errorf("html: backward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + if n.Parent != nil { + if n.Parent == n { + return fmt.Errorf("html: inconsistent parent relationship") + } + if n.Parent == n.FirstChild { + return fmt.Errorf("html: inconsistent parent/first relationship") + } + if n.Parent == n.LastChild { + return fmt.Errorf("html: inconsistent parent/last relationship") + } + if n.Parent == n.PrevSibling { + return fmt.Errorf("html: inconsistent parent/prev relationship") + } + if n.Parent == n.NextSibling { + return fmt.Errorf("html: inconsistent parent/next relationship") + } + + parentHasNAsAChild := false + for c := n.Parent.FirstChild; c != nil; c = c.NextSibling { + if c == n { + parentHasNAsAChild = true + break + } + } + if !parentHasNAsAChild { + return fmt.Errorf("html: inconsistent parent/child relationship") + } + } + + if n.PrevSibling != nil && n.PrevSibling.NextSibling != n { + return fmt.Errorf("html: inconsistent prev/next relationship") + } + if n.NextSibling != nil && n.NextSibling.PrevSibling != n { + return fmt.Errorf("html: inconsistent next/prev relationship") + } + + if (n.FirstChild == nil) != (n.LastChild == nil) { + return fmt.Errorf("html: inconsistent first/last relationship") + } + if n.FirstChild != nil && n.FirstChild == n.LastChild { + // We have a sole child. + if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil { + return fmt.Errorf("html: inconsistent sole child's sibling relationship") + } + } + + seen := map[*Node]bool{} + + var last *Node + for c := n.FirstChild; c != nil; c = c.NextSibling { + if seen[c] { + return fmt.Errorf("html: inconsistent repeated child") + } + seen[c] = true + last = c + } + if last != n.LastChild { + return fmt.Errorf("html: inconsistent last relationship") + } + + var first *Node + for c := n.LastChild; c != nil; c = c.PrevSibling { + if !seen[c] { + return fmt.Errorf("html: inconsistent missing child") + } + delete(seen, c) + first = c + } + if first != n.FirstChild { + return fmt.Errorf("html: inconsistent first relationship") + } + + if len(seen) != 0 { + return fmt.Errorf("html: inconsistent forwards/backwards child list") + } + + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 0000000..2a5abdd --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2094 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.4.2) and active formatting + // elements (section 12.2.4.3). + oe, afe nodeStack + // Element pointers (section 12.2.4.4). + head, form *Node + // Other parsing state flags (section 12.2.4.5). + scripting, framesetOK bool + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.6.1). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.4.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.4.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.6.1, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.4.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.4.3. +func (p *parser) clearActiveFormattingElements() { + for { + n := p.afe.pop() + if len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.4.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.5. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.4.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.4.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.4.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if i == 0 && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + p.im = inSelectIM + case a.Td, a.Th: + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Head: + p.im = inBodyIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + p.im = beforeHeadIM + default: + continue + } + return + } + p.im = inBodyIM +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.6.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.6.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.6.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + n := p.oe.pop() + if n.DataAtom != a.Head { + panic("html: bad parser state: element not found, in the in-head insertion mode") + } + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.6. +func afterHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Body: + p.addElement() + p.framesetOK = false + p.im = inBodyIM + return true + case a.Frameset: + p.addElement() + p.im = inFramesetIM + return true + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: + p.oe = append(p.oe, p.head) + defer p.oe.remove(p.head) + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Body, a.Html, a.Br: + // Drop down to creating an implied tag. + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.6.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form == nil {
+				p.popUntil(buttonScope, a.P)
+				p.addElement()
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A)
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr)
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Isindex:
+			if p.form != nil {
+				// Ignore the token.
+				return true
+			}
+			action := ""
+			prompt := "This is a searchable index. Enter search keywords: "
+			attr := []Attribute{{Key: "name", Val: "isindex"}}
+			for _, t := range p.tok.Attr {
+				switch t.Key {
+				case "action":
+					action = t.Val
+				case "name":
+					// Ignore the attribute.
+				case "prompt":
+					prompt = t.Val
+				default:
+					attr = append(attr, t)
+				}
+			}
+			p.acknowledgeSelfClosingTag()
+			p.popUntil(buttonScope, a.P)
+			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+			if action != "" {
+				p.form.Attr = []Attribute{{Key: "action", Val: action}}
+			}
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+			p.addText(prompt)
+			p.addChild(&Node{
+				Type:     ElementNode,
+				DataAtom: a.Input,
+				Data:     a.Input.String(),
+				Attr:     attr,
+			})
+			p.oe.pop()
+			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Iframe:
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Noembed, a.Noscript:
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			node := p.form
+			p.form = nil
+			i := p.indexOfElementInScope(defaultScope, a.Form)
+			if node == nil || i == -1 || p.oe[i] != node {
+				// Ignore the token.
+				return true
+			}
+			p.generateImpliedEndTags()
+			p.oe.remove(node)
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-4. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 5. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom)
+			return
+		}
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Steps 9-10. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 11-12. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 13. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Steps 13.1-13.2
+		for j := 0; j < 3; j++ {
+			// Step 13.3.
+			x--
+			node = p.oe[x]
+			// Step 13.4 - 13.5.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 13.6.
+			if node == formattingElement {
+				break
+			}
+			// Step 13.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 13.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 13.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 13.10.
+			lastNode = node
+		}
+
+		// Step 14. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 15-17. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 18. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 19. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		if p.oe[i].DataAtom == tagAtom {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.6.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a -->
+#errors
+#document
+| 
+|   
+|   
+|     -->
+#errors
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
+Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
+Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
+Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
+Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
+Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
+Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
+Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
+Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
+Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
+Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
+Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
+Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
+Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
+Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
+Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
+Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
+Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
+Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
+Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
+Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
+Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
+Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 140 This element (img) has no end tag.
+Line: 1 Col: 148 Unexpected end tag (title). Ignored.
+Line: 1 Col: 155 Unexpected end tag (span). Ignored.
+Line: 1 Col: 163 Unexpected end tag (style). Ignored.
+Line: 1 Col: 172 Unexpected end tag (script). Ignored.
+Line: 1 Col: 180 Unexpected end tag (table). Ignored.
+Line: 1 Col: 185 Unexpected end tag (th). Ignored.
+Line: 1 Col: 190 Unexpected end tag (td). Ignored.
+Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 203 This element (frame) has no end tag.
+Line: 1 Col: 210 This element (area) has no end tag.
+Line: 1 Col: 217 Unexpected end tag (link). Ignored.
+Line: 1 Col: 225 This element (param) has no end tag.
+Line: 1 Col: 230 This element (hr) has no end tag.
+Line: 1 Col: 238 This element (input) has no end tag.
+Line: 1 Col: 244 Unexpected end tag (col). Ignored.
+Line: 1 Col: 251 Unexpected end tag (base). Ignored.
+Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
+Line: 1 Col: 269 This element (basefont) has no end tag.
+Line: 1 Col: 279 This element (bgsound) has no end tag.
+Line: 1 Col: 287 This element (embed) has no end tag.
+Line: 1 Col: 296 This element (spacer) has no end tag.
+Line: 1 Col: 300 Unexpected end tag (p). Ignored.
+Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
+Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
+Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
+Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
+Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
+Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
+Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
+Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
+Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
+Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
+Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
+Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
+Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
+Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 460 This element (wbr) has no end tag.
+Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
+Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
+Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
+Line: 1 Col: 513 Unexpected end tag (html). Ignored.
+Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
+Line: 1 Col: 520 Unexpected end tag (head). Ignored.
+Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
+Line: 1 Col: 537 This element (image) has no end tag.
+Line: 1 Col: 547 This element (isindex) has no end tag.
+Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
+Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
+Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
+Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
+Line: 1 Col: 599 Unexpected end tag (option). Ignored.
+Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
+Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
+#document
+| 
+|   
+|   
+|     
+|

+ +#data +

+#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode. +Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode. +Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode. +Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode. +Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode. +Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode. +Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode. +Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode. +Line: 1 Col: 58 Unexpected end tag (blink). Ignored. +Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode. +Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode. +Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag. +Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode. +Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode. +Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode. +Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode. +Line: 1 Col: 99 Unexpected end tag (select). Ignored. +Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode. +Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag. +Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode. +Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag. +Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode. +Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag. +Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode. +Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag. +Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode. +Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag. +Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode. +Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag. +Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored. +Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode. +Line: 1 Col: 141 Unexpected end tag (br). Treated as br element. +Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode. +Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode. +Line: 1 Col: 151 This element (img) has no end tag. +Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode. +Line: 1 Col: 159 Unexpected end tag (title). Ignored. +Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode. +Line: 1 Col: 166 Unexpected end tag (span). Ignored. +Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode. +Line: 1 Col: 174 Unexpected end tag (style). Ignored. +Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode. +Line: 1 Col: 183 Unexpected end tag (script). Ignored. +Line: 1 Col: 196 Unexpected end tag (th). Ignored. +Line: 1 Col: 201 Unexpected end tag (td). Ignored. +Line: 1 Col: 206 Unexpected end tag (tr). Ignored. +Line: 1 Col: 214 This element (frame) has no end tag. +Line: 1 Col: 221 This element (area) has no end tag. +Line: 1 Col: 228 Unexpected end tag (link). Ignored. +Line: 1 Col: 236 This element (param) has no end tag. +Line: 1 Col: 241 This element (hr) has no end tag. +Line: 1 Col: 249 This element (input) has no end tag. +Line: 1 Col: 255 Unexpected end tag (col). Ignored. +Line: 1 Col: 262 Unexpected end tag (base). Ignored. +Line: 1 Col: 269 Unexpected end tag (meta). Ignored. +Line: 1 Col: 280 This element (basefont) has no end tag. +Line: 1 Col: 290 This element (bgsound) has no end tag. +Line: 1 Col: 298 This element (embed) has no end tag. +Line: 1 Col: 307 This element (spacer) has no end tag. +Line: 1 Col: 311 Unexpected end tag (p). Ignored. +Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag. +Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag. +Line: 1 Col: 331 Unexpected end tag (caption). Ignored. +Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 350 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 366 Unexpected end tag (thead). Ignored. +Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag. +Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag. +Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag. +Line: 1 Col: 404 Unexpected end tag (dir). Ignored. +Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag. +Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag. +Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag. +Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag. +Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag. +Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag. +Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag. +Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag. +Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 471 This element (wbr) has no end tag. +Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag. +Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag. +Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag. +Line: 1 Col: 524 Unexpected end tag (html). Ignored. +Line: 1 Col: 524 Unexpected end tag (frameset). Ignored. +Line: 1 Col: 531 Unexpected end tag (head). Ignored. +Line: 1 Col: 540 Unexpected end tag (iframe). Ignored. +Line: 1 Col: 548 This element (image) has no end tag. +Line: 1 Col: 558 This element (isindex) has no end tag. +Line: 1 Col: 568 Unexpected end tag (noembed). Ignored. +Line: 1 Col: 579 Unexpected end tag (noframes). Ignored. +Line: 1 Col: 590 Unexpected end tag (noscript). Ignored. +Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored. +Line: 1 Col: 610 Unexpected end tag (option). Ignored. +Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored. +Line: 1 Col: 633 Unexpected end tag (textarea). Ignored. +#document +| +| +| +|
+| +| +| +|

+ +#data + +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 1 Col: 10 Expected closing tag. Unexpected end of file. +#document +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat new file mode 100644 index 0000000..4f8df86 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat @@ -0,0 +1,799 @@ +#data + +#errors +#document +| +| +| +| +| + +#data +a +#errors +29: Bogus comment +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +35: Stray “svg” start tag. +42: Stray end tag “svg” +#document +| +| +| +| +| +#errors +43: Stray “svg” start tag. +50: Stray end tag “svg” +#document +| +| +| +| +|

+#errors +34: Start tag “svg” seen in “table”. +41: Stray end tag “svg”. +#document +| +| +| +| +| +| + +#data +
foo
+#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +53: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| + +#data +
foobar
+#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +58: Stray end tag “g”. +65: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| + +#data +
foobar
+#errors +41: Start tag “svg” seen in “table”. +53: Stray end tag “g”. +65: Stray end tag “g”. +72: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| + +#data +
foobar
+#errors +45: Start tag “svg” seen in “table”. +57: Stray end tag “g”. +69: Stray end tag “g”. +76: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| +| + +#data +
foobar
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

quux +#errors +70: HTML start tag “p” in a foreign namespace context. +81: “table” closed but “caption” was still open. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" +|

+| "quux" + +#data +
foobarbaz

quux +#errors +78: “table” closed but “caption” was still open. +78: Unclosed elements on stack. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +| "baz" +|

+| "quux" + +#data +foobar

baz

quux +#errors +44: Start tag “svg” seen in “table”. +56: Stray end tag “g”. +68: Stray end tag “g”. +71: HTML start tag “p” in a foreign namespace context. +71: Start tag “p” seen in “table”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" +| +| +|

+| "quux" + +#data +

quux +#errors +50: Stray “svg” start tag. +54: Stray “g” start tag. +62: Stray end tag “g” +66: Stray “g” start tag. +74: Stray end tag “g” +77: Stray “p” start tag. +88: “table” end tag with “select” open. +#document +| +| +| +| +| +| +| +|
+|

quux +#errors +36: Start tag “select” seen in “table”. +42: Stray “svg” start tag. +46: Stray “g” start tag. +54: Stray end tag “g” +58: Stray “g” start tag. +66: Stray end tag “g” +69: Stray “p” start tag. +80: “table” end tag with “select” open. +#document +| +| +| +| +| +|

+| "quux" + +#data +foobar

baz +#errors +41: Stray “svg” start tag. +68: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +foobar

baz +#errors +34: Stray “svg” start tag. +61: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +

+#errors +31: Stray “svg” start tag. +35: Stray “g” start tag. +40: Stray end tag “g” +44: Stray “g” start tag. +49: Stray end tag “g” +52: Stray “p” start tag. +58: Stray “span” start tag. +58: End of file seen and there were open elements. +#document +| +| +| +| + +#data +

+#errors +42: Stray “svg” start tag. +46: Stray “g” start tag. +51: Stray end tag “g” +55: Stray “g” start tag. +60: Stray end tag “g” +63: Stray “p” start tag. +69: Stray “span” start tag. +#document +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| +| xlink href="foo" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data +bar +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" +| "bar" + +#data + +#errors +#document +| +| +| +| + +#data +

a +#errors +#document +| +| +| +|
+| +| "a" + +#data +
a +#errors +#document +| +| +| +|
+| +| +| "a" + +#data +
+#errors +#document +| +| +| +|
+| +| +| + +#data +
a +#errors +#document +| +| +| +|
+| +| +| +| +| "a" + +#data +

a +#errors +#document +| +| +| +|

+| +| +| +|

+| "a" + +#data +
    a +#errors +40: HTML start tag “ul” in a foreign namespace context. +41: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +|
    +| +|
      +| "a" + +#data +
        a +#errors +35: HTML start tag “ul” in a foreign namespace context. +36: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +| +|
          +| "a" + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +|

          +| +| +| +|

          +|

          + +#data +
          +#errors +#document +| +| +| +| +| +|
          +| +|
          +| +| + +#data +
          +#errors +#document +| +| +| +| +| +| +| +|
          +|
          +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data +

+#errors +#document +| +| +| +| +|
+| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| + +#data +
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat new file mode 100644 index 0000000..638cde4 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat @@ -0,0 +1,482 @@ +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributename="" +| attributetype="" +| basefrequency="" +| baseprofile="" +| calcmode="" +| clippathunits="" +| contentscripttype="" +| contentstyletype="" +| diffuseconstant="" +| edgemode="" +| externalresourcesrequired="" +| filterres="" +| filterunits="" +| glyphref="" +| gradienttransform="" +| gradientunits="" +| kernelmatrix="" +| kernelunitlength="" +| keypoints="" +| keysplines="" +| keytimes="" +| lengthadjust="" +| limitingconeangle="" +| markerheight="" +| markerunits="" +| markerwidth="" +| maskcontentunits="" +| maskunits="" +| numoctaves="" +| pathlength="" +| patterncontentunits="" +| patterntransform="" +| patternunits="" +| pointsatx="" +| pointsaty="" +| pointsatz="" +| preservealpha="" +| preserveaspectratio="" +| primitiveunits="" +| refx="" +| refy="" +| repeatcount="" +| repeatdur="" +| requiredextensions="" +| requiredfeatures="" +| specularconstant="" +| specularexponent="" +| spreadmethod="" +| startoffset="" +| stddeviation="" +| stitchtiles="" +| surfacescale="" +| systemlanguage="" +| tablevalues="" +| targetx="" +| targety="" +| textlength="" +| viewbox="" +| viewtarget="" +| xchannelselector="" +| ychannelselector="" +| zoomandpan="" + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat new file mode 100644 index 0000000..63107d2 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat @@ -0,0 +1,62 @@ +#data +

foobazeggs

spam

quuxbar +#errors +#document +| +| +| +| +|

+| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" + +#data +foobazeggs

spam
quuxbar +#errors +#document +| +| +| +| +| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat new file mode 100644 index 0000000..b8713f8 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat @@ -0,0 +1,74 @@ +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| abc:def="gh" +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| xml:lang="bar" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| 789="012" +| +| + +#data + +#errors +#document +| +| +| +| +| 789="012" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat new file mode 100644 index 0000000..6ce1c0d --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat @@ -0,0 +1,208 @@ +#data +

X +#errors +Line: 1 Col: 31 Unexpected end tag (p). Ignored. +Line: 1 Col: 36 Expected closing tag. Unexpected end of file. +#document +| +| +| +| +|

+| +| +| +| +| +| +| " " +|

+| "X" + +#data +

+

X +#errors +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end tag (p). Ignored. +Line: 2 Col: 4 Expected closing tag. Unexpected end of file. +#document +| +| +| +|

+| +| +| +| +| +| +| " +" +|

+| "X" + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| +| " " + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| + +#data + +#errors +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| + +#data +X +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| +| "X" + +#data +<!doctype html><table> X<meta></table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " X" +| <meta> +| <table> + +#data +<!doctype html><table> x</table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> + +#data +<!doctype html><table> x </table> +#errors +Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x " +| <table> + +#data +<!doctype html><table><tr> x</table> +#errors +Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table>X<style> <tr>x </style> </table> +#errors +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" +| <table> +| <style> +| " <tr>x " +| " " + +#data +<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div> +#errors +Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode. +Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <a> +| "foo" +| <table> +| " " +| <tbody> +| <tr> +| <td> +| "bar" +| " " + +#data +<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes> +#errors +6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”. +13: Stray start tag “frame”. +21: Stray end tag “frame”. +29: Stray end tag “frame”. +39: “frameset” start tag after “body” already open. +105: End of file seen inside an [R]CDATA element. +105: End of file seen and there were open elements. +XXX: These errors are wrong, please fix me! +#document +| <html> +| <head> +| <frameset> +| <frame> +| <frameset> +| <frame> +| <noframes> +| "</frameset><noframes>" + +#data +<!DOCTYPE html><object></html> +#errors +1: Expected closing tag. Unexpected end of file +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat new file mode 100644 index 0000000..c8ef66f --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat @@ -0,0 +1,2299 @@ +#data +<!doctype html><script> +#errors +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script>a +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<!doctype html><script>< +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<!doctype html><script></ +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<!doctype html><script></S +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<!doctype html><script></SC +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<!doctype html><script></SCR +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<!doctype html><script></SCRI +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<!doctype html><script></SCRIP +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script></s +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<!doctype html><script></sc +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<!doctype html><script></scr +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<!doctype html><script></scri +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<!doctype html><script></scrip +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script><! +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<!doctype html><script><!a +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<!doctype html><script><!- +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<!doctype html><script><!-a +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<!doctype html><script><!-- +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--a +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<!doctype html><script><!--< +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<!doctype html><script><!--<a +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<!doctype html><script><!--</ +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--<s +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<!doctype html><script><!--<script < +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<!doctype html><script><!--<script <a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<!doctype html><script><!--<script </ +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<!doctype html><script><!--<script </s +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 43 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<!doctype html><script><!--<script </scripta +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script> +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<!doctype html><script><!--<script </script/ +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<!doctype html><script><!--<script </script < +#errors +Line: 1 Col: 45 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<!doctype html><script><!--<script </script <a +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<!doctype html><script><!--<script </script </ +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script/ +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script - +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<!doctype html><script><!--<script -a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<!doctype html><script><!--<script -< +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -<" +| <body> + +#data +<!doctype html><script><!--<script -- +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<!doctype html><script><!--<script --a +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<!doctype html><script><!--<script --< +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --<" +| <body> + +#data +<!doctype html><script><!--<script --> +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script -->< +#errors +Line: 1 Col: 39 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<!doctype html><script><!--<script --></ +#errors +Line: 1 Col: 40 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script/ +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script><\/script>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<!doctype html><script><!--<script></scr'+'ipt>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>--><!--</script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-- ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- -></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- - ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<!doctype html><script><!--<script>--!></script>X +#errors +Line: 1 Col: 49 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<!doctype html><script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 59 Unexpected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<!doctype html><script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 57 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<!doctype html><style><!--<style></style>--></style> +#errors +Line: 1 Col: 52 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<!doctype html><style><!--</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<!doctype html><style><!--...</style>...--></style> +#errors +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<!doctype html><style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 66 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<!doctype html><style><!--...</style><!-- --><style>@import ...</style> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<!doctype html><style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 63 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<!doctype html><style>...<!--[if IE]><style>...</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<!doctype html><title><!--<title>--> +#errors +Line: 1 Col: 52 Unexpected end tag (title). +#document +| +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<!doctype html><title></title> +#errors +#document +| +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (title). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<!doctype html><noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 64 Unexpected end tag (noscript). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<!doctype html><noscript><!--</noscript>X<noscript>--></noscript> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<!doctype html><noscript><iframe></noscript>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<!doctype html><noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 64 Unexpected end tag (noframes). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<!doctype html><noframes><body><script><!--...</script></body></noframes></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<!doctype html><textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 64 Unexpected end tag (textarea). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<!doctype html><textarea></textarea></textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<!doctype html><textarea><</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<" + +#data +<!doctype html><textarea>a<b</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "a<b" + +#data +<!doctype html><iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 56 Unexpected end tag (iframe). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<!doctype html><iframe>...<!--X->...<!--/X->...</iframe> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<!doctype html><xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 44 Unexpected end tag (xmp). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<!doctype html><noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 60 Unexpected end tag (noembed). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script>a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<script>< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<script></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<script></S +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<script></SC +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<script></SCR +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<script></SCRI +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<script></SCRIP +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script></s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<script></sc +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<script></scr +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<script></scri +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<script></scrip +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script><! +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<script><!a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<script><!- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<script><!-a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<script><!-- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<script><!--< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<script><!--<a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<script><!--</ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--<s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 19 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<script><!--<script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<script><!--<script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<script><!--<script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<script><!--<script </s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<script><!--<script </scripta +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<script><!--<script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<script><!--<script </script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<script><!--<script </script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<script><!--<script </script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script - +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<script><!--<script -a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<script><!--<script -- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<script><!--<script --a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<script><!--<script --> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script -->< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<script><!--<script --></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script><\/script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<script><!--<script></scr'+'ipt>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<script><!--<script></script><script></script></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<script><!--<script></script><script></script>--><!--</script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<script><!--<script></script><script></script>-- ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<script><!--<script></script><script></script>- -></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<script><!--<script></script><script></script>- - ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<script><!--<script></script><script></script>-></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<script><!--<script>--!></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 44 Unexpected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 42 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<style><!--<style></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<style><!--</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<style><!--...</style>...--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 36 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<style><!--...</style><!-- --><style>@import ...</style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 48 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<style>...<!--[if IE]><style>...</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<title><!--<title>--> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (title). +#document +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<title></title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +#document +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end of file. Expected end tag (title). +#document +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noscript). +#document +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<noscript><!--</noscript>X<noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<noscript><iframe></noscript>X +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noframes). +#document +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<noframes><body><script><!--...</script></body></noframes></html> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +#document +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (textarea). +#document +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<textarea></textarea></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +Line: 1 Col: 41 Unexpected end tag (iframe). +#document +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<iframe>...<!--X->...<!--/X->...</iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end tag (xmp). +#document +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE. +Line: 1 Col: 45 Unexpected end tag (noembed). +#document +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<!doctype html><table> + +#errors +Line 2 Col 0 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " +" + +#data +<!doctype html><table><td><span><font></span><span> +#errors +Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase. +Line 1 Col 45 Unexpected end tag (span). +Line 1 Col 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <span> +| <font> +| <font> +| <span> + +#data +<!doctype html><form><table></form><form></table></form> +#errors +35: Stray end tag “form”. +41: Start tag “form” seen in “table”. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <table> +| <form> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat new file mode 100644 index 0000000..7b555f8 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat @@ -0,0 +1,153 @@ +#data +<!doctype html><table><tbody><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tr><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<!doctype html><table><tr><td><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <select> +| <td> + +#data +<!doctype html><table><tr><th><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <th> +| <select> +| <td> + +#data +<!doctype html><table><caption><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <select> +| <tbody> +| <tr> + +#data +<!doctype html><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><th> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tbody> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><thead> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><caption> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><table><tr></table>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| "a" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat new file mode 100644 index 0000000..680e1f0 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat @@ -0,0 +1,269 @@ +#data +<!doctype html><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> + +#data +<!doctype html><table><tbody><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><caption><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><tr><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <style> +| "</script>" + +#data +<!doctype html><table><tr><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <script> +| "</style>" + +#data +<!doctype html><table><caption><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><table><td><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" + +#data +<!doctype html><table><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> + +#data +<!doctype html><table><tr><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><frameset></frameset><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><frameset></frameset></html><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><table><tr></tbody><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <tfoot> + +#data +<!doctype html><table><td><svg></svg>abc<td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <svg svg> +| "abc" +| <td> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat new file mode 100644 index 0000000..0d62f5a --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat @@ -0,0 +1,1237 @@ +#data +<!doctype html><math><mn DefinitionUrl="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mn> +| definitionURL="foo" + +#data +<!doctype html><html></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <!-- foo --> +| <head> +| <body> + +#data +<!doctype html><head></head></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- foo --> +| <body> + +#data +<!doctype html><body><p><pre> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <pre> + +#data +<!doctype html><body><p><listing> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <listing> + +#data +<!doctype html><p><plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <plaintext> + +#data +<!doctype html><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <h1> + +#data +<!doctype html><form><isindex> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> + +#data +<!doctype html><isindex action="POST"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| action="POST" +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex prompt="this is isindex"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "this is isindex" +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex type="hidden"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| type="hidden" +| <hr> + +#data +<!doctype html><isindex name="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><ruby><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rp> + +#data +<!doctype html><ruby><div><span><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rp> + +#data +<!doctype html><ruby><div><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rp> + +#data +<!doctype html><ruby><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rt> + +#data +<!doctype html><ruby><div><span><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rt> + +#data +<!doctype html><ruby><div><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rt> + +#data +<!doctype html><math/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <foo> + +#data +<!doctype html><svg/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| <foo> + +#data +<!doctype html><div></body><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <!-- foo --> + +#data +<!doctype html><h1><div><h3><span></h1>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h1> +| <div> +| <h3> +| <span> +| "foo" + +#data +<!doctype html><p></h3>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "foo" + +#data +<!doctype html><h3><li>abc</h2>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h3> +| <li> +| "abc" +| "foo" + +#data +<!doctype html><table>abc<!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <!-- foo --> + +#data +<!doctype html><table> <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " " +| <!-- foo --> + +#data +<!doctype html><table> b <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " b " +| <table> +| <!-- foo --> + +#data +<!doctype html><select><option><option> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><p><math><mi><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mi> +| <p> +| <h1> + +#data +<!doctype html><p><math><mo><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mo> +| <p> +| <h1> + +#data +<!doctype html><p><math><mn><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <p> +| <h1> + +#data +<!doctype html><p><math><ms><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math ms> +| <p> +| <h1> + +#data +<!doctype html><p><math><mtext><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mtext> +| <p> +| <h1> + +#data +<!doctype html><frameset></noframes> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html c=d><body></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><html c=d><frameset></frameset></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <!-- foo --> + +#data +<!doctype html><html><frameset></frameset></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| " " + +#data +<!doctype html><html><frameset></frameset></html>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html></p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<html><frameset></frameset></html><!doctype html> +#errors +#document +| <html> +| <head> +| <frameset> + +#data +<!doctype html><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!doctype html><p><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><p>a<frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "a" + +#data +<!doctype html><p> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><pre><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <pre> + +#data +<!doctype html><listing><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <listing> + +#data +<!doctype html><li><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <li> + +#data +<!doctype html><dd><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dd> + +#data +<!doctype html><dt><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> + +#data +<!doctype html><button><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <button> + +#data +<!doctype html><applet><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <applet> + +#data +<!doctype html><marquee><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <marquee> + +#data +<!doctype html><object><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> + +#data +<!doctype html><table><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> + +#data +<!doctype html><area><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <area> + +#data +<!doctype html><basefont><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <basefont> +| <frameset> + +#data +<!doctype html><bgsound><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <bgsound> +| <frameset> + +#data +<!doctype html><br><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <br> + +#data +<!doctype html><embed><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <embed> + +#data +<!doctype html><img><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html><input><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <input> + +#data +<!doctype html><keygen><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <keygen> + +#data +<!doctype html><wbr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <wbr> + +#data +<!doctype html><hr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <hr> + +#data +<!doctype html><textarea></textarea><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> + +#data +<!doctype html><xmp></xmp><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> + +#data +<!doctype html><iframe></iframe><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> + +#data +<!doctype html><select></select><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><svg></svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><math></math><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg><foreignObject><div> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg>a</svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| "a" + +#data +<!doctype html><svg> </svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<html>aaa<frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "aaa" + +#data +<html> a <frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "a " + +#data +<!doctype html><div><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><div><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> + +#data +<!doctype html><p><math></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| "a" + +#data +<!doctype html><p><math><mn><span></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <span> +| <p> +| "a" + +#data +<!doctype html><math></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> + +#data +<!doctype html><meta charset="ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| charset="ascii" +| <body> + +#data +<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| content="text/html;charset=ascii" +| http-equiv="content-type" +| <body> + +#data +<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa --> +| <meta> +| charset="utf8" +| <body> + +#data +<!doctype html><html a=b><head></head><html c=d> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><image/> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html>a<i>b<table>c<b>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "a" +| <i> +| "bc" +| <b> +| "de" +| "f" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" + +#data +<!doctype html><table><i>a<b>b<div>c</i> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <i> +| "c" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><table><i>a<div>b<tr>c<b>d</i>e +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <div> +| "b" +| <i> +| "c" +| <b> +| "d" +| <b> +| "e" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><table><i>a<div>b<b>c</i>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <i> +| "a" +| <div> +| <i> +| "b" +| <b> +| "c" +| <b> +| "d" +| <table> + +#data +<!doctype html><body><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <bgsound> + +#data +<!doctype html><body><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <basefont> + +#data +<!doctype html><a><b></a><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <basefont> + +#data +<!doctype html><a><b></a><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <bgsound> + +#data +<!doctype html><figcaption><article></figcaption>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <figcaption> +| <article> +| "a" + +#data +<!doctype html><summary><article></summary>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <summary> +| <article> +| "a" + +#data +<!doctype html><p><a><plaintext>b +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <a> +| <plaintext> +| <a> +| "b" + +#data +<!DOCTYPE html><div>a<a></div>b<p>c</p>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| "a" +| <a> +| <a> +| "b" +| <p> +| "c" +| "d" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat new file mode 100644 index 0000000..60d8592 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat @@ -0,0 +1,763 @@ +#data +<!DOCTYPE html>Test +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "Test" + +#data +<textarea>test</div>test +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 24 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <textarea> +| "test</div>test" + +#data +<table><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 11 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<table><td>test</tbody></table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| "test" + +#data +<frame>test +#errors +Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE. +Line: 1 Col: 7 Unexpected start tag frame. Ignored. +#document +| <html> +| <head> +| <body> +| "test" + +#data +<!DOCTYPE html><frameset>test +#errors +Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored. +Line: 1 Col: 29 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><frameset><!DOCTYPE html> +#errors +Line: 1 Col: 40 Unexpected DOCTYPE. Ignored. +Line: 1 Col: 40 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><font><p><b>test</font> +#errors +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <p> +| <font> +| <b> +| "test" + +#data +<!DOCTYPE html><dt><div><dd> +#errors +Line: 1 Col: 28 Missing end tag (div, dt). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> +| <div> +| <dd> + +#data +<script></x +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</x" +| <body> + +#data +<table><plaintext><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode. +Line: 1 Col: 22 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "<td>" +| <table> + +#data +<plaintext></plaintext> +#errors +Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!DOCTYPE html><table><tr>TEST +#errors +Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "TEST" +| <table> +| <tbody> +| <tr> + +#data +<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4> +#errors +Line: 1 Col: 37 Unexpected start tag (body). +Line: 1 Col: 53 Unexpected start tag (body). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| t1="1" +| t2="2" +| t3="3" +| t4="4" + +#data +</b test +#errors +Line: 1 Col: 8 Unexpected end of file in attribute name. +Line: 1 Col: 8 End tag contains unexpected attributes. +Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html></b test<b &=&>X +#errors +Line: 1 Col: 32 Named entity didn't end with ';'. +Line: 1 Col: 33 End tag contains unexpected attributes. +Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" + +#data +<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 54 Unexpected end of file in the tag name. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| type="text/x-foobar;baz" +| "X</SCRipt" +| <body> + +#data +& +#errors +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&# +#errors +Line: 1 Col: 1 Numeric entity expected. Got end of file instead. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#" + +#data +&#X +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#X" + +#data +&#x +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#x" + +#data +- +#errors +Line: 1 Col: 4 Numeric entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "-" + +#data +&x-test +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&x-test" + +#data +<!doctypehtml><p><li> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <li> + +#data +<!doctypehtml><p><dt> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dt> + +#data +<!doctypehtml><p><dd> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dd> + +#data +<!doctypehtml><p><form> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <form> + +#data +<!DOCTYPE html><p></P>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "X" + +#data +& +#errors +Line: 1 Col: 4 Named entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&AMp; +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&AMp;" + +#data +<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY> +#errors +Line: 1 Col: 110 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly> + +#data +<!DOCTYPE html>X</body>X +#errors +Line: 1 Col: 24 Unexpected non-space characters in the after body phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "XX" + +#data +<!DOCTYPE html><!-- X +#errors +Line: 1 Col: 21 Unexpected end of file in comment. +#document +| <!DOCTYPE html> +| <!-- X --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><table><caption>test TEST</caption><td>test +#errors +Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 58 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| "test TEST" +| <tbody> +| <tr> +| <td> +| "test" + +#data +<!DOCTYPE html><select><option><optgroup> +#errors +Line: 1 Col: 41 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option> +#errors +Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag. +Line: 1 Col: 76 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <option> +| <option> + +#data +<!DOCTYPE html><select><optgroup><option><optgroup> +#errors +Line: 1 Col: 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><datalist><option>foo</datalist>bar +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <datalist> +| <option> +| "foo" +| "bar" + +#data +<!DOCTYPE html><font><input><input></font> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <input> +| <input> + +#data +<!DOCTYPE html><!-- XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX +#errors +Line: 1 Col: 29 Unexpected end of file in comment (-) +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<isindex test=x name=x> +#errors +Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected start tag isindex. Don't use it! +#document +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| test="x" +| <hr> + +#data +test +test +#errors +Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "test +test" + +#data +<!DOCTYPE html><body><title>test</body> +#errors +#document +| +| +| +| +| +| "test</body>" + +#data +<!DOCTYPE html><body><title>X +#errors +#document +| +| +| +| +| +| "X" +| <meta> +| name="z" +| <link> +| rel="foo" +| <style> +| " +x { content:"</style" } " + +#data +<!DOCTYPE html><select><optgroup></optgroup></select> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> + +#data + + +#errors +Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html> <html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><script> +</script> <title>x +#errors +#document +| +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +#document +| +| +| +| +| "x" +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (style). +#document +| +| +| --> x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +|

+#errors +#document +| +| +| +| +| +| ddd +#errors +#document +| +| +| +#errors +#document +| +| +| +| +|
  • +| +| ", + " +
    << Back to Go HTTP/2 demo server`) + }) +} + +func httpsHost() string { + if *hostHTTPS != "" { + return *hostHTTPS + } + if v := *httpsAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func httpHost() string { + if *hostHTTP != "" { + return *hostHTTP + } + if v := *httpAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func serveProdTLS(autocertManager *autocert.Manager) error { + srv := &http.Server{ + TLSConfig: &tls.Config{ + GetCertificate: autocertManager.GetCertificate, + }, + } + http2.ConfigureServer(srv, &http2.Server{ + NewWriteScheduler: func() http2.WriteScheduler { + return http2.NewPriorityWriteScheduler(nil) + }, + }) + ln, err := net.Listen("tcp", ":443") + if err != nil { + return err + } + return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) +} + +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func serveProd() error { + log.Printf("running in production mode") + + storageClient, err := storage.NewClient(context.Background()) + if err != nil { + log.Fatalf("storage.NewClient: %v", err) + } + autocertManager := &autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("http2.golang.org"), + Cache: autocertcache.NewGoogleCloudStorageCache(storageClient, "golang-h2demo-autocert"), + } + + errc := make(chan error, 2) + go func() { errc <- http.ListenAndServe(":80", autocertManager.HTTPHandler(http.DefaultServeMux)) }() + go func() { errc <- serveProdTLS(autocertManager) }() + return <-errc +} + +const idleTimeout = 5 * time.Minute +const activeTimeout = 10 * time.Minute + +// TODO: put this into the standard library and actually send +// PING frames and GOAWAY, etc: golang.org/issue/14204 +func idleTimeoutHook() func(net.Conn, http.ConnState) { + var mu sync.Mutex + m := map[net.Conn]*time.Timer{} + return func(c net.Conn, cs http.ConnState) { + mu.Lock() + defer mu.Unlock() + if t, ok := m[c]; ok { + delete(m, c) + t.Stop() + } + var d time.Duration + switch cs { + case http.StateNew, http.StateIdle: + d = idleTimeout + case http.StateActive: + d = activeTimeout + default: + return + } + m[c] = time.AfterFunc(d, func() { + log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) + go c.Close() + }) + } +} + +func main() { + var srv http.Server + flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") + flag.Parse() + srv.Addr = *httpsAddr + srv.ConnState = idleTimeoutHook() + + registerHandlers() + + if *prod { + *hostHTTP = "http2.golang.org" + *hostHTTPS = "http2.golang.org" + log.Fatal(serveProd()) + } + + url := "https://" + httpsHost() + "/" + log.Printf("Listening on " + url) + http2.ConfigureServer(&srv, &http2.Server{}) + + if *httpAddr != "" { + go func() { + log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)") + log.Fatal(http.ListenAndServe(*httpAddr, nil)) + }() + } + + go func() { + log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) + }() + select {} +} diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go new file mode 100644 index 0000000..df0866a --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/launch.go @@ -0,0 +1,302 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + proj = flag.String("project", "symbolic-datum-552", "name of Project") + zone = flag.String("zone", "us-central1-a", "GCE zone") + mach = flag.String("machinetype", "n1-standard-1", "Machine type") + instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") + sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") + staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") + + writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") + publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") +) + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} + +var config = &oauth2.Config{ + // The client-id and secret should be for an "Installed Application" when using + // the CLI. Later we'll use a web application with a callback. + ClientID: readFile("client-id.dat"), + ClientSecret: readFile("client-secret.dat"), + Endpoint: google.Endpoint, + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + "https://www.googleapis.com/auth/sqlservice", + "https://www.googleapis.com/auth/sqlservice.admin", + }, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", +} + +const baseConfig = `#cloud-config +coreos: + units: + - name: h2demo.service + command: start + content: | + [Unit] + Description=HTTP2 Demo + + [Service] + ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' + ExecStart=/opt/bin/h2demo --prod + RestartSec=5s + Restart=always + Type=simple + + [Install] + WantedBy=multi-user.target +` + +func main() { + flag.Parse() + if *proj == "" { + log.Fatalf("Missing --project flag") + } + prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj + machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach + + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() + if err != nil { + if *writeObject != "" { + log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") + } + log.Printf("Error getting token from %s: %v", tokenFileName, err) + log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) + fmt.Print("\nEnter auth code: ") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + authCode := strings.TrimSpace(sc.Text()) + token, err = config.Exchange(oauth2.NoContext, authCode) + if err != nil { + log.Fatalf("Error exchanging auth code for a token: %v", err) + } + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + if *writeObject != "" { + writeCloudStorageObject(oauthClient) + return + } + + computeService, _ := compute.New(oauthClient) + + natIP := *staticIP + if natIP == "" { + // Try to find it by name. + aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() + if err != nil { + log.Fatal(err) + } + // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList + IPLoop: + for _, asl := range aggAddrList.Items { + for _, addr := range asl.Addresses { + if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { + natIP = addr.Address + break IPLoop + } + } + } + } + + cloudConfig := baseConfig + if *sshPub != "" { + key := strings.TrimSpace(readFile(*sshPub)) + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) + } + if os.Getenv("USER") == "bradfitz" { + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") + } + const maxCloudConfig = 32 << 10 // per compute API docs + if len(cloudConfig) > maxCloudConfig { + log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) + } + + instance := &compute.Instance{ + Name: *instName, + Description: "Go Builder", + MachineType: machType, + Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, + Tags: &compute.Tags{ + Items: []string{"http-server", "https-server"}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: &cloudConfig, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + { + AccessConfigs: []*compute.AccessConfig{ + { + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + NatIP: natIP, + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + }, + }, + }, + } + + log.Printf("Creating instance...") + op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() + if err != nil { + log.Fatalf("Failed to create instance: %v", err) + } + opName := op.Name + log.Printf("Created. Waiting on operation %v", opName) +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() + if err != nil { + log.Fatalf("Failed to get op %s: %v", opName, err) + } + switch op.Status { + case "PENDING", "RUNNING": + log.Printf("Waiting on operation %v", opName) + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + log.Printf("Error: %+v", operr) + } + log.Fatalf("Failed to start.") + } + log.Printf("Success. %+v", op) + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + + inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() + if err != nil { + log.Fatalf("Error getting instance after creation: %v", err) + } + ij, _ := json.MarshalIndent(inst, "", " ") + log.Printf("Instance: %s", ij) +} + +func instanceDisk(svc *compute.Service) *compute.AttachedDisk { + const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" + diskName := *instName + "-disk" + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: diskName, + SourceImage: imageURL, + DiskSizeGb: 50, + }, + } +} + +func writeCloudStorageObject(httpClient *http.Client) { + content := os.Stdin + const maxSlurp = 1 << 20 + var buf bytes.Buffer + n, err := io.CopyN(&buf, content, maxSlurp) + if err != nil && err != io.EOF { + log.Fatalf("Error reading from stdin: %v, %v", n, err) + } + contentType := http.DetectContentType(buf.Bytes()) + + req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) + if err != nil { + log.Fatal(err) + } + req.Header.Set("x-goog-api-version", "2") + if *publicObject { + req.Header.Set("x-goog-acl", "public-read") + } + req.Header.Set("Content-Type", contentType) + res, err := httpClient.Do(req) + if err != nil { + log.Fatal(err) + } + if res.StatusCode != 200 { + res.Write(os.Stderr) + log.Fatalf("Failed.") + } + log.Printf("Success.") + os.Exit(0) +} + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key new file mode 100644 index 0000000..a15a6ab --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q +62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby +XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV +mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ +JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ +SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA +nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e +/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx +qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser +hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j +NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E +LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 +8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c +0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws +K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd +bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo +QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt +Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 +nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy +b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 +gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev +WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr +C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj +x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA +hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem new file mode 100644 index 0000000..3a323e7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG +A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 +DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 +NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG +cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS +R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT +ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk +JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 +mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW +caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G +A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt +hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB +MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES +MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv +bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h +U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao +eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 +UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD +58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n +sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF +kPe6XoSbiLm/kxk32T0= +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl new file mode 100644 index 0000000..6db3891 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl @@ -0,0 +1 @@ +E2CE26BF3285059C diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt new file mode 100644 index 0000000..c59059b --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT +C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW +DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow +RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE +ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l +gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 +dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL +A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws +/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 +F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB +AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R +rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD +EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 +KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI +dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU +90p6/CbU71bGbfpM2PHot2fm +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key new file mode 100644 index 0000000..f329c14 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi +fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm +J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef +b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 +mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ +fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p +3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 +qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 +NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 +LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN +a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ +Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL +W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO +gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm +S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS +Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp +V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 +KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 +yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 +drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e +ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R +48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 +c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY +nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl +IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/service.yaml b/vendor/golang.org/x/net/http2/h2demo/service.yaml new file mode 100644 index 0000000..2b7d541 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: h2demo +spec: + externalTrafficPolicy: Local + ports: + - port: 80 + targetPort: 80 + name: http + - port: 443 + targetPort: 443 + name: https + selector: + app: h2demo + type: LoadBalancer + loadBalancerIP: 130.211.116.44 diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go new file mode 100644 index 0000000..504d6a7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/tmpl.go @@ -0,0 +1,1991 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build h2demo + +package main + +import "html/template" + +var pushTmpl = template.Must(template.New("serverpush").Parse(` + + + + + + + + + HTTP/2 Server Push Demo + + + + + + + + + +
    +Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. +
    + + + +
    +... +
    + + + + +
    +
    +
    +
    + Run + Format + + + +
    +
    + + +
    +
    + + +

    Command go

    + + + + + + + + + + + + + + +

    +Go is a tool for managing Go source code. +

    +

    +Usage: +

    +
    go command [arguments]
    +
    +

    +The commands are: +

    +
    build       compile packages and dependencies
    +clean       remove object files
    +doc         show documentation for package or symbol
    +env         print Go environment information
    +bug         start a bug report
    +fix         run go tool fix on packages
    +fmt         run gofmt on package sources
    +generate    generate Go files by processing source
    +get         download and install packages and dependencies
    +install     compile and install packages and dependencies
    +list        list packages
    +run         compile and run Go program
    +test        test packages
    +tool        run specified go tool
    +version     print Go version
    +vet         run go tool vet on packages
    +
    +

    +Use "go help [command]" for more information about a command. +

    +

    +Additional help topics: +

    +
    c           calling between Go and C
    +buildmode   description of build modes
    +filetype    file types
    +gopath      GOPATH environment variable
    +environment environment variables
    +importpath  import path syntax
    +packages    description of package lists
    +testflag    description of testing flags
    +testfunc    description of testing functions
    +
    +

    +Use "go help [topic]" for more information about that topic. +

    +

    Compile packages and dependencies

    +

    +Usage: +

    +
    go build [-o output] [-i] [build flags] [packages]
    +
    +

    +Build compiles the packages named by the import paths, +along with their dependencies, but it does not install the results. +

    +

    +If the arguments to build are a list of .go files, build treats +them as a list of source files specifying a single package. +

    +

    +When compiling a single main package, build writes +the resulting executable to an output file named after +the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') +or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). +The '.exe' suffix is added when writing a Windows executable. +

    +

    +When compiling multiple packages or a single non-main package, +build compiles the packages but discards the resulting object, +serving only as a check that the packages can be built. +

    +

    +When compiling packages, build ignores files that end in '_test.go'. +

    +

    +The -o flag, only allowed when compiling a single package, +forces build to write the resulting executable or object +to the named output file, instead of the default behavior described +in the last two paragraphs. +

    +

    +The -i flag installs the packages that are dependencies of the target. +

    +

    +The build flags are shared by the build, clean, get, install, list, run, +and test commands: +

    +
    -a
    +	force rebuilding of packages that are already up-to-date.
    +-n
    +	print the commands but do not run them.
    +-p n
    +	the number of programs, such as build commands or
    +	test binaries, that can be run in parallel.
    +	The default is the number of CPUs available.
    +-race
    +	enable data race detection.
    +	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
    +-msan
    +	enable interoperation with memory sanitizer.
    +	Supported only on linux/amd64,
    +	and only with Clang/LLVM as the host C compiler.
    +-v
    +	print the names of packages as they are compiled.
    +-work
    +	print the name of the temporary work directory and
    +	do not delete it when exiting.
    +-x
    +	print the commands.
    +
    +-asmflags 'flag list'
    +	arguments to pass on each go tool asm invocation.
    +-buildmode mode
    +	build mode to use. See 'go help buildmode' for more.
    +-compiler name
    +	name of compiler to use, as in runtime.Compiler (gccgo or gc).
    +-gccgoflags 'arg list'
    +	arguments to pass on each gccgo compiler/linker invocation.
    +-gcflags 'arg list'
    +	arguments to pass on each go tool compile invocation.
    +-installsuffix suffix
    +	a suffix to use in the name of the package installation directory,
    +	in order to keep output separate from default builds.
    +	If using the -race flag, the install suffix is automatically set to race
    +	or, if set explicitly, has _race appended to it.  Likewise for the -msan
    +	flag.  Using a -buildmode option that requires non-default compile flags
    +	has a similar effect.
    +-ldflags 'flag list'
    +	arguments to pass on each go tool link invocation.
    +-linkshared
    +	link against shared libraries previously created with
    +	-buildmode=shared.
    +-pkgdir dir
    +	install and load all packages from dir instead of the usual locations.
    +	For example, when building with a non-standard configuration,
    +	use -pkgdir to keep generated packages in a separate location.
    +-tags 'tag list'
    +	a list of build tags to consider satisfied during the build.
    +	For more information about build tags, see the description of
    +	build constraints in the documentation for the go/build package.
    +-toolexec 'cmd args'
    +	a program to use to invoke toolchain programs like vet and asm.
    +	For example, instead of running asm, the go command will run
    +	'cmd args /path/to/asm <arguments for asm>'.
    +
    +

    +The list flags accept a space-separated list of strings. To embed spaces +in an element in the list, surround it with either single or double quotes. +

    +

    +For more about specifying packages, see 'go help packages'. +For more about where packages and binaries are installed, +run 'go help gopath'. +For more about calling between Go and C/C++, run 'go help c'. +

    +

    +Note: Build adheres to certain conventions such as those described +by 'go help gopath'. Not all projects can follow these conventions, +however. Installations that have their own conventions or that use +a separate software build system may choose to use lower-level +invocations such as 'go tool compile' and 'go tool link' to avoid +some of the overheads and design decisions of the build tool. +

    +

    +See also: go install, go get, go clean. +

    +

    Remove object files

    +

    +Usage: +

    +
    go clean [-i] [-r] [-n] [-x] [build flags] [packages]
    +
    +

    +Clean removes object files from package source directories. +The go command builds most objects in a temporary directory, +so go clean is mainly concerned with object files left by other +tools or by manual invocations of go build. +

    +

    +Specifically, clean removes the following files from each of the +source directories corresponding to the import paths: +

    +
    _obj/            old object directory, left from Makefiles
    +_test/           old test directory, left from Makefiles
    +_testmain.go     old gotest file, left from Makefiles
    +test.out         old test log, left from Makefiles
    +build.out        old test log, left from Makefiles
    +*.[568ao]        object files, left from Makefiles
    +
    +DIR(.exe)        from go build
    +DIR.test(.exe)   from go test -c
    +MAINFILE(.exe)   from go build MAINFILE.go
    +*.so             from SWIG
    +
    +

    +In the list, DIR represents the final path element of the +directory, and MAINFILE is the base name of any Go source +file in the directory that is not included when building +the package. +

    +

    +The -i flag causes clean to remove the corresponding installed +archive or binary (what 'go install' would create). +

    +

    +The -n flag causes clean to print the remove commands it would execute, +but not run them. +

    +

    +The -r flag causes clean to be applied recursively to all the +dependencies of the packages named by the import paths. +

    +

    +The -x flag causes clean to print remove commands as it executes them. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Show documentation for package or symbol

    +

    +Usage: +

    +
    go doc [-u] [-c] [package|[package.]symbol[.method]]
    +
    +

    +Doc prints the documentation comments associated with the item identified by its +arguments (a package, const, func, type, var, or method) followed by a one-line +summary of each of the first-level items "under" that item (package-level +declarations for a package, methods for a type, etc.). +

    +

    +Doc accepts zero, one, or two arguments. +

    +

    +Given no arguments, that is, when run as +

    +
    go doc
    +
    +

    +it prints the package documentation for the package in the current directory. +If the package is a command (package main), the exported symbols of the package +are elided from the presentation unless the -cmd flag is provided. +

    +

    +When run with one argument, the argument is treated as a Go-syntax-like +representation of the item to be documented. What the argument selects depends +on what is installed in GOROOT and GOPATH, as well as the form of the argument, +which is schematically one of these: +

    +
    go doc <pkg>
    +go doc <sym>[.<method>]
    +go doc [<pkg>.]<sym>[.<method>]
    +go doc [<pkg>.][<sym>.]<method>
    +
    +

    +The first item in this list matched by the argument is the one whose documentation +is printed. (See the examples below.) However, if the argument starts with a capital +letter it is assumed to identify a symbol or method in the current directory. +

    +

    +For packages, the order of scanning is determined lexically in breadth-first order. +That is, the package presented is the one that matches the search and is nearest +the root and lexically first at its level of the hierarchy. The GOROOT tree is +always scanned in its entirety before GOPATH. +

    +

    +If there is no package specified or matched, the package in the current +directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +the current package. +

    +

    +The package path must be either a qualified path or a proper suffix of a +path. The go tool's usual package mechanism does not apply: package path +elements like . and ... are not implemented by go doc. +

    +

    +When run with two arguments, the first must be a full package path (not just a +suffix), and the second is a symbol or symbol and method; this is similar to the +syntax accepted by godoc: +

    +
    go doc <pkg> <sym>[.<method>]
    +
    +

    +In all forms, when matching symbols, lower-case letters in the argument match +either case but upper-case letters match exactly. This means that there may be +multiple matches of a lower-case argument in a package if different symbols have +different cases. If this occurs, documentation for all matches is printed. +

    +

    +Examples: +

    +
    go doc
    +	Show documentation for current package.
    +go doc Foo
    +	Show documentation for Foo in the current package.
    +	(Foo starts with a capital letter so it cannot match
    +	a package path.)
    +go doc encoding/json
    +	Show documentation for the encoding/json package.
    +go doc json
    +	Shorthand for encoding/json.
    +go doc json.Number (or go doc json.number)
    +	Show documentation and method summary for json.Number.
    +go doc json.Number.Int64 (or go doc json.number.int64)
    +	Show documentation for json.Number's Int64 method.
    +go doc cmd/doc
    +	Show package docs for the doc command.
    +go doc -cmd cmd/doc
    +	Show package docs and exported symbols within the doc command.
    +go doc template.new
    +	Show documentation for html/template's New function.
    +	(html/template is lexically before text/template)
    +go doc text/template.new # One argument
    +	Show documentation for text/template's New function.
    +go doc text/template new # Two arguments
    +	Show documentation for text/template's New function.
    +
    +At least in the current tree, these invocations all print the
    +documentation for json.Decoder's Decode method:
    +
    +go doc json.Decoder.Decode
    +go doc json.decoder.decode
    +go doc json.decode
    +cd go/src/encoding/json; go doc decode
    +
    +

    +Flags: +

    +
    -c
    +	Respect case when matching symbols.
    +-cmd
    +	Treat a command (package main) like a regular package.
    +	Otherwise package main's exported symbols are hidden
    +	when showing the package's top-level documentation.
    +-u
    +	Show documentation for unexported as well as exported
    +	symbols and methods.
    +
    +

    Print Go environment information

    +

    +Usage: +

    +
    go env [var ...]
    +
    +

    +Env prints Go environment information. +

    +

    +By default env prints information as a shell script +(on Windows, a batch file). If one or more variable +names is given as arguments, env prints the value of +each named variable on its own line. +

    +

    Start a bug report

    +

    +Usage: +

    +
    go bug
    +
    +

    +Bug opens the default browser and starts a new bug report. +The report includes useful system information. +

    +

    Run go tool fix on packages

    +

    +Usage: +

    +
    go fix [packages]
    +
    +

    +Fix runs the Go fix command on the packages named by the import paths. +

    +

    +For more about fix, see 'go doc cmd/fix'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run fix with specific options, run 'go tool fix'. +

    +

    +See also: go fmt, go vet. +

    +

    Run gofmt on package sources

    +

    +Usage: +

    +
    go fmt [-n] [-x] [packages]
    +
    +

    +Fmt runs the command 'gofmt -l -w' on the packages named +by the import paths. It prints the names of the files that are modified. +

    +

    +For more about gofmt, see 'go doc cmd/gofmt'. +For more about specifying packages, see 'go help packages'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +To run gofmt with specific options, run gofmt itself. +

    +

    +See also: go fix, go vet. +

    +

    Generate Go files by processing source

    +

    +Usage: +

    +
    go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
    +
    +

    +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files. +

    +

    +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. +

    +

    +Go generate scans the file for directives, which are lines of +the form, +

    +
    //go:generate command argument...
    +
    +

    +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. +

    +

    +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. +

    +

    +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. +

    +

    +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. +

    +

    +Go generate sets several variables when it runs the generator: +

    +
    $GOARCH
    +	The execution architecture (arm, amd64, etc.)
    +$GOOS
    +	The execution operating system (linux, windows, etc.)
    +$GOFILE
    +	The base name of the file.
    +$GOLINE
    +	The line number of the directive in the source file.
    +$GOPACKAGE
    +	The name of the package of the file containing the directive.
    +$DOLLAR
    +	A dollar sign.
    +
    +

    +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. +

    +

    +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. +

    +

    +A directive of the form, +

    +
    //go:generate -command xxx args...
    +
    +

    +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, +

    +
    //go:generate -command foo go tool foo
    +
    +

    +specifies that the command "foo" represents the generator +"go tool foo". +

    +

    +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. +

    +

    +If any generator returns an error exit status, "go generate" skips +all further processing for that package. +

    +

    +The generator is run in the package's source directory. +

    +

    +Go generate accepts one specific flag: +

    +
    -run=""
    +	if non-empty, specifies a regular expression to select
    +	directives whose full original source text (excluding
    +	any trailing spaces and final newline) matches the
    +	expression.
    +
    +

    +It also accepts the standard build flags including -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Download and install packages and dependencies

    +

    +Usage: +

    +
    go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
    +
    +

    +Get downloads the packages named by the import paths, along with their +dependencies. It then installs the named packages, like 'go install'. +

    +

    +The -d flag instructs get to stop after downloading the packages; that is, +it instructs get not to install the packages. +

    +

    +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. +

    +

    +The -fix flag instructs get to run the fix tool on the downloaded packages +before resolving dependencies or building the code. +

    +

    +The -insecure flag permits fetching from repositories and resolving +custom domains using insecure schemes such as HTTP. Use with caution. +

    +

    +The -t flag instructs get to also download the packages required to build +the tests for the specified packages. +

    +

    +The -u flag instructs get to use the network to update the named packages +and their dependencies. By default, get uses the network to check out +missing packages but does not use it to look for updates to existing packages. +

    +

    +The -v flag enables verbose progress and debug output. +

    +

    +Get also accepts build flags to control the installation. See 'go help build'. +

    +

    +When checking out a new package, get creates the target directory +GOPATH/src/<import-path>. If the GOPATH contains multiple entries, +get uses the first one. For more details see: 'go help gopath'. +

    +

    +When checking out or updating a package, get looks for a branch or tag +that matches the locally installed version of Go. The most important +rule is that if the local installation is running version "go1", get +searches for a branch or tag named "go1". If no such version exists it +retrieves the most recent version of the package. +

    +

    +When go get checks out or updates a Git repository, +it also updates any git submodules referenced by the repository. +

    +

    +Get never checks out or updates code stored in vendor directories. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    +For more about how 'go get' finds source code to +download, see 'go help importpath'. +

    +

    +See also: go build, go install, go clean. +

    +

    Compile and install packages and dependencies

    +

    +Usage: +

    +
    go install [build flags] [packages]
    +
    +

    +Install compiles and installs the packages named by the import paths, +along with their dependencies. +

    +

    +For more about the build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go get, go clean. +

    +

    List packages

    +

    +Usage: +

    +
    go list [-e] [-f format] [-json] [build flags] [packages]
    +
    +

    +List lists the packages named by the import paths, one per line. +

    +

    +The default output shows the package import path: +

    +
    bytes
    +encoding/json
    +github.com/gorilla/mux
    +golang.org/x/net/html
    +
    +

    +The -f flag specifies an alternate format for the list, using the +syntax of package template. The default output is equivalent to -f +''. The struct being passed to the template is: +

    +
    type Package struct {
    +    Dir           string // directory containing package sources
    +    ImportPath    string // import path of package in dir
    +    ImportComment string // path in import comment on package statement
    +    Name          string // package name
    +    Doc           string // package documentation string
    +    Target        string // install path
    +    Shlib         string // the shared library that contains this package (only set when -linkshared)
    +    Goroot        bool   // is this package in the Go root?
    +    Standard      bool   // is this package part of the standard Go library?
    +    Stale         bool   // would 'go install' do anything for this package?
    +    StaleReason   string // explanation for Stale==true
    +    Root          string // Go root or Go path dir containing this package
    +    ConflictDir   string // this directory shadows Dir in $GOPATH
    +    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
    +
    +    // Source files
    +    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
    +    CgoFiles       []string // .go sources files that import "C"
    +    IgnoredGoFiles []string // .go sources ignored due to build constraints
    +    CFiles         []string // .c source files
    +    CXXFiles       []string // .cc, .cxx and .cpp source files
    +    MFiles         []string // .m source files
    +    HFiles         []string // .h, .hh, .hpp and .hxx source files
    +    FFiles         []string // .f, .F, .for and .f90 Fortran source files
    +    SFiles         []string // .s source files
    +    SwigFiles      []string // .swig files
    +    SwigCXXFiles   []string // .swigcxx files
    +    SysoFiles      []string // .syso object files to add to archive
    +    TestGoFiles    []string // _test.go files in package
    +    XTestGoFiles   []string // _test.go files outside package
    +
    +    // Cgo directives
    +    CgoCFLAGS    []string // cgo: flags for C compiler
    +    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
    +    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
    +    CgoFFLAGS    []string // cgo: flags for Fortran compiler
    +    CgoLDFLAGS   []string // cgo: flags for linker
    +    CgoPkgConfig []string // cgo: pkg-config names
    +
    +    // Dependency information
    +    Imports      []string // import paths used by this package
    +    Deps         []string // all (recursively) imported dependencies
    +    TestImports  []string // imports from TestGoFiles
    +    XTestImports []string // imports from XTestGoFiles
    +
    +    // Error information
    +    Incomplete bool            // this package or a dependency has an error
    +    Error      *PackageError   // error loading package
    +    DepsErrors []*PackageError // errors loading dependencies
    +}
    +
    +

    +Packages stored in vendor directories report an ImportPath that includes the +path to the vendor directory (for example, "d/vendor/p" instead of "p"), +so that the ImportPath uniquely identifies a given copy of a package. +The Imports, Deps, TestImports, and XTestImports lists also contain these +expanded imports paths. See golang.org/s/go15vendor for more about vendoring. +

    +

    +The error information, if any, is +

    +
    type PackageError struct {
    +    ImportStack   []string // shortest path from package named on command line to this one
    +    Pos           string   // position of error (if present, file:line:col)
    +    Err           string   // the error itself
    +}
    +
    +

    +The template function "join" calls strings.Join. +

    +

    +The template function "context" returns the build context, defined as: +

    +
    type Context struct {
    +	GOARCH        string   // target architecture
    +	GOOS          string   // target operating system
    +	GOROOT        string   // Go root
    +	GOPATH        string   // Go path
    +	CgoEnabled    bool     // whether cgo can be used
    +	UseAllFiles   bool     // use files regardless of +build lines, file names
    +	Compiler      string   // compiler to assume when computing target paths
    +	BuildTags     []string // build constraints to match in +build lines
    +	ReleaseTags   []string // releases the current release is compatible with
    +	InstallSuffix string   // suffix to use in the name of the install dir
    +}
    +
    +

    +For more information about the meaning of these fields see the documentation +for the go/build package's Context type. +

    +

    +The -json flag causes the package data to be printed in JSON format +instead of using the template format. +

    +

    +The -e flag changes the handling of erroneous packages, those that +cannot be found or are malformed. By default, the list command +prints an error to standard error for each erroneous package and +omits the packages from consideration during the usual printing. +With the -e flag, the list command never prints errors to standard +error and instead processes the erroneous packages with the usual +printing. Erroneous packages will have a non-empty ImportPath and +a non-nil Error field; other information may or may not be missing +(zeroed). +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Compile and run Go program

    +

    +Usage: +

    +
    go run [build flags] [-exec xprog] gofiles... [arguments...]
    +
    +

    +Run compiles and runs the main package comprising the named Go source files. +A Go source file is defined to be a file ending in a literal ".go" suffix. +

    +

    +By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +If the -exec flag is given, 'go run' invokes the binary using xprog: +

    +
    'xprog a.out arguments...'.
    +
    +

    +If the -exec flag is not given, GOOS or GOARCH is different from the system +default, and a program named go_$GOOS_$GOARCH_exec can be found +on the current search path, 'go run' invokes the binary using that program, +for example 'go_nacl_386_exec a.out arguments...'. This allows execution of +cross-compiled programs when a simulator or other execution method is +available. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go build. +

    +

    Test packages

    +

    +Usage: +

    +
    go test [build/test flags] [packages] [build/test flags & test binary flags]
    +
    +

    +'Go test' automates testing the packages named by the import paths. +It prints a summary of the test results in the format: +

    +
    ok   archive/tar   0.011s
    +FAIL archive/zip   0.022s
    +ok   compress/gzip 0.033s
    +...
    +
    +

    +followed by detailed output for each failed package. +

    +

    +'Go test' recompiles each package along with any files with names matching +the file pattern "*_test.go". +Files whose names begin with "_" (including "_test.go") or "." are ignored. +These additional files can contain test functions, benchmark functions, and +example functions. See 'go help testfunc' for more. +Each listed package causes the execution of a separate test binary. +

    +

    +Test files that declare a package with the suffix "_test" will be compiled as a +separate package, and then linked and run with the main test binary. +

    +

    +The go tool will ignore a directory named "testdata", making it available +to hold ancillary data needed by the tests. +

    +

    +By default, go test needs no arguments. It compiles and tests the package +with source in the current directory, including tests, and runs the tests. +

    +

    +The package is built in a temporary directory so it does not interfere with the +non-test installation. +

    +

    +In addition to the build flags, the flags handled by 'go test' itself are: +

    +
    -args
    +    Pass the remainder of the command line (everything after -args)
    +    to the test binary, uninterpreted and unchanged.
    +    Because this flag consumes the remainder of the command line,
    +    the package list (if present) must appear before this flag.
    +
    +-c
    +    Compile the test binary to pkg.test but do not run it
    +    (where pkg is the last element of the package's import path).
    +    The file name can be changed with the -o flag.
    +
    +-exec xprog
    +    Run the test binary using xprog. The behavior is the same as
    +    in 'go run'. See 'go help run' for details.
    +
    +-i
    +    Install packages that are dependencies of the test.
    +    Do not run the test.
    +
    +-o file
    +    Compile the test binary to the named file.
    +    The test still runs (unless -c or -i is specified).
    +
    +

    +The test binary also accepts flags that control execution of the test; these +flags are also accessible by 'go test'. See 'go help testflag' for details. +

    +

    +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go vet. +

    +

    Run specified go tool

    +

    +Usage: +

    +
    go tool [-n] command [args...]
    +
    +

    +Tool runs the go tool command identified by the arguments. +With no arguments it prints the list of known tools. +

    +

    +The -n flag causes tool to print the command that would be +executed but not execute it. +

    +

    +For more about each tool command, see 'go tool command -h'. +

    +

    Print Go version

    +

    +Usage: +

    +
    go version
    +
    +

    +Version prints the Go version, as reported by runtime.Version. +

    +

    Run go tool vet on packages

    +

    +Usage: +

    +
    go vet [-n] [-x] [build flags] [packages]
    +
    +

    +Vet runs the Go vet command on the packages named by the import paths. +

    +

    +For more about vet, see 'go doc cmd/vet'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run the vet tool with specific options, run 'go tool vet'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go fmt, go fix. +

    +

    Calling between Go and C

    +

    +There are two different ways to call between Go and C/C++ code. +

    +

    +The first is the cgo tool, which is part of the Go distribution. For +information on how to use it see the cgo documentation (go doc cmd/cgo). +

    +

    +The second is the SWIG program, which is a general tool for +interfacing between languages. For information on SWIG see +http://swig.org/. When running go build, any file with a .swig +extension will be passed to SWIG. Any file with a .swigcxx extension +will be passed to SWIG with the -c++ option. +

    +

    +When either cgo or SWIG is used, go build will pass any .c, .m, .s, +or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +compiler. The CC or CXX environment variables may be set to determine +the C or C++ compiler, respectively, to use. +

    +

    Description of build modes

    +

    +The 'go build' and 'go install' commands take a -buildmode argument which +indicates which kind of object file is to be built. Currently supported values +are: +

    +
    -buildmode=archive
    +	Build the listed non-main packages into .a files. Packages named
    +	main are ignored.
    +
    +-buildmode=c-archive
    +	Build the listed main package, plus all packages it imports,
    +	into a C archive file. The only callable symbols will be those
    +	functions exported using a cgo //export comment. Requires
    +	exactly one main package to be listed.
    +
    +-buildmode=c-shared
    +	Build the listed main packages, plus all packages that they
    +	import, into C shared libraries. The only callable symbols will
    +	be those functions exported using a cgo //export comment.
    +	Non-main packages are ignored.
    +
    +-buildmode=default
    +	Listed main packages are built into executables and listed
    +	non-main packages are built into .a files (the default
    +	behavior).
    +
    +-buildmode=shared
    +	Combine all the listed non-main packages into a single shared
    +	library that will be used when building with the -linkshared
    +	option. Packages named main are ignored.
    +
    +-buildmode=exe
    +	Build the listed main packages and everything they import into
    +	executables. Packages not named main are ignored.
    +
    +-buildmode=pie
    +	Build the listed main packages and everything they import into
    +	position independent executables (PIE). Packages not named
    +	main are ignored.
    +
    +-buildmode=plugin
    +	Build the listed main packages, plus all packages that they
    +	import, into a Go plugin. Packages not named main are ignored.
    +
    +

    File types

    +

    +The go command examines the contents of a restricted set of files +in each directory. It identifies which files to examine based on +the extension of the file name. These extensions are: +

    +
    .go
    +	Go source files.
    +.c, .h
    +	C source files.
    +	If the package uses cgo or SWIG, these will be compiled with the
    +	OS-native compiler (typically gcc); otherwise they will
    +	trigger an error.
    +.cc, .cpp, .cxx, .hh, .hpp, .hxx
    +	C++ source files. Only useful with cgo or SWIG, and always
    +	compiled with the OS-native compiler.
    +.m
    +	Objective-C source files. Only useful with cgo, and always
    +	compiled with the OS-native compiler.
    +.s, .S
    +	Assembler source files.
    +	If the package uses cgo or SWIG, these will be assembled with the
    +	OS-native assembler (typically gcc (sic)); otherwise they
    +	will be assembled with the Go assembler.
    +.swig, .swigcxx
    +	SWIG definition files.
    +.syso
    +	System object files.
    +
    +

    +Files of each of these types except .syso may contain build +constraints, but the go command stops scanning for build constraints +at the first item in the file that is not a blank line or //-style +line comment. See the go/build package documentation for +more details. +

    +

    +Non-test Go source files can also include a //go:binary-only-package +comment, indicating that the package sources are included +for documentation only and must not be used to build the +package binary. This enables distribution of Go packages in +their compiled form alone. See the go/build package documentation +for more details. +

    +

    GOPATH environment variable

    +

    +The Go path is used to resolve import statements. +It is implemented by and documented in the go/build package. +

    +

    +The GOPATH environment variable lists places to look for Go code. +On Unix, the value is a colon-separated string. +On Windows, the value is a semicolon-separated string. +On Plan 9, the value is a list. +

    +

    +If the environment variable is unset, GOPATH defaults +to a subdirectory named "go" in the user's home directory +($HOME/go on Unix, %USERPROFILE%\go on Windows), +unless that directory holds a Go distribution. +Run "go env GOPATH" to see the current GOPATH. +

    +

    +See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. +

    +

    +Each directory listed in GOPATH must have a prescribed structure: +

    +

    +The src directory holds source code. The path below src +determines the import path or executable name. +

    +

    +The pkg directory holds installed package objects. +As in the Go tree, each target operating system and +architecture pair has its own subdirectory of pkg +(pkg/GOOS_GOARCH). +

    +

    +If DIR is a directory listed in the GOPATH, a package with +source in DIR/src/foo/bar can be imported as "foo/bar" and +has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". +

    +

    +The bin directory holds compiled commands. +Each command is named for its source directory, but only +the final element, not the entire path. That is, the +command with source in DIR/src/foo/quux is installed into +DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +so that you can add DIR/bin to your PATH to get at the +installed commands. If the GOBIN environment variable is +set, commands are installed to the directory it names instead +of DIR/bin. GOBIN must be an absolute path. +

    +

    +Here's an example directory layout: +

    +
    GOPATH=/home/user/go
    +
    +/home/user/go/
    +    src/
    +        foo/
    +            bar/               (go code in package bar)
    +                x.go
    +            quux/              (go code in package main)
    +                y.go
    +    bin/
    +        quux                   (installed command)
    +    pkg/
    +        linux_amd64/
    +            foo/
    +                bar.a          (installed package object)
    +
    +

    +Go searches each directory listed in GOPATH to find source code, +but new packages are always downloaded into the first directory +in the list. +

    +

    +See https://golang.org/doc/code.html for an example. +

    +

    Internal Directories

    +

    +Code in or below a directory named "internal" is importable only +by code in the directory tree rooted at the parent of "internal". +Here's an extended version of the directory layout above: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            internal/
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The code in z.go is imported as "foo/internal/baz", but that +import statement can only appear in source files in the subtree +rooted at foo. The source files foo/f.go, foo/bar/x.go, and +foo/quux/y.go can all import "foo/internal/baz", but the source file +crash/bang/b.go cannot. +

    +

    +See https://golang.org/s/go14internal for details. +

    +

    Vendor Directories

    +

    +Go 1.6 includes support for using local copies of external dependencies +to satisfy imports of those dependencies, often referred to as vendoring. +

    +

    +Code below a directory named "vendor" is importable only +by code in the directory tree rooted at the parent of "vendor", +and only using an import path that omits the prefix up to and +including the vendor element. +

    +

    +Here's the example from the previous section, +but with the "internal" directory renamed to "vendor" +and a new foo/vendor/crash/bang directory added: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            vendor/
    +                crash/
    +                    bang/      (go code in package bang)
    +                        b.go
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The same visibility rules apply as for internal, but the code +in z.go is imported as "baz", not as "foo/vendor/baz". +

    +

    +Code in vendor directories deeper in the source tree shadows +code in higher directories. Within the subtree rooted at foo, an import +of "crash/bang" resolves to "foo/vendor/crash/bang", not the +top-level "crash/bang". +

    +

    +Code in vendor directories is not subject to import path +checking (see 'go help importpath'). +

    +

    +When 'go get' checks out or updates a git repository, it now also +updates submodules. +

    +

    +Vendor directories do not affect the placement of new repositories +being checked out for the first time by 'go get': those are always +placed in the main GOPATH, never in a vendor subtree. +

    +

    +See https://golang.org/s/go15vendor for details. +

    +

    Environment variables

    +

    +The go command, and the tools it invokes, examine a few different +environment variables. For many of these, you can see the default +value of on your system by running 'go env NAME', where NAME is the +name of the variable. +

    +

    +General-purpose environment variables: +

    +
    GCCGO
    +	The gccgo command to run for 'go build -compiler=gccgo'.
    +GOARCH
    +	The architecture, or processor, for which to compile code.
    +	Examples are amd64, 386, arm, ppc64.
    +GOBIN
    +	The directory where 'go install' will install a command.
    +GOOS
    +	The operating system for which to compile code.
    +	Examples are linux, darwin, windows, netbsd.
    +GOPATH
    +	For more details see: 'go help gopath'.
    +GORACE
    +	Options for the race detector.
    +	See https://golang.org/doc/articles/race_detector.html.
    +GOROOT
    +	The root of the go tree.
    +
    +

    +Environment variables for use with cgo: +

    +
    CC
    +	The command to use to compile C code.
    +CGO_ENABLED
    +	Whether the cgo command is supported.  Either 0 or 1.
    +CGO_CFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C code.
    +CGO_CPPFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C or C++ code.
    +CGO_CXXFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C++ code.
    +CGO_FFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	Fortran code.
    +CGO_LDFLAGS
    +	Flags that cgo will pass to the compiler when linking.
    +CXX
    +	The command to use to compile C++ code.
    +PKG_CONFIG
    +	Path to pkg-config tool.
    +
    +

    +Architecture-specific environment variables: +

    +
    GOARM
    +	For GOARCH=arm, the ARM architecture for which to compile.
    +	Valid values are 5, 6, 7.
    +GO386
    +	For GOARCH=386, the floating point instruction set.
    +	Valid values are 387, sse2.
    +
    +

    +Special-purpose environment variables: +

    +
    GOROOT_FINAL
    +	The root of the installed Go tree, when it is
    +	installed in a location other than where it is built.
    +	File names in stack traces are rewritten from GOROOT to
    +	GOROOT_FINAL.
    +GO_EXTLINK_ENABLED
    +	Whether the linker should use external linking mode
    +	when using -linkmode=auto with code that uses cgo.
    +	Set to 0 to disable external linking mode, 1 to enable it.
    +GIT_ALLOW_PROTOCOL
    +	Defined by Git. A colon-separated list of schemes that are allowed to be used
    +	with git fetch/clone. If set, any scheme not explicitly mentioned will be
    +	considered insecure by 'go get'.
    +
    +

    Import path syntax

    +

    +An import path (see 'go help packages') denotes a package stored in the local +file system. In general, an import path denotes either a standard package (such +as "unicode/utf8") or a package found in one of the work spaces (For more +details see: 'go help gopath'). +

    +

    Relative import paths

    +

    +An import path beginning with ./ or ../ is called a relative path. +The toolchain supports relative import paths as a shortcut in two ways. +

    +

    +First, a relative path can be used as a shorthand on the command line. +If you are working in the directory containing the code imported as +"unicode" and want to run the tests for "unicode/utf8", you can type +"go test ./utf8" instead of needing to specify the full path. +Similarly, in the reverse situation, "go test .." will test "unicode" from +the "unicode/utf8" directory. Relative patterns are also allowed, like +"go test ./..." to test all subdirectories. See 'go help packages' for details +on the pattern syntax. +

    +

    +Second, if you are compiling a Go program not in a work space, +you can use a relative path in an import statement in that program +to refer to nearby code also not in a work space. +This makes it easy to experiment with small multipackage programs +outside of the usual work spaces, but such programs cannot be +installed with "go install" (there is no work space in which to install them), +so they are rebuilt from scratch each time they are built. +To avoid ambiguity, Go programs cannot use relative import paths +within a work space. +

    +

    Remote import paths

    +

    +Certain import paths also +describe how to obtain the source code for the package using +a revision control system. +

    +

    +A few common code hosting sites have special syntax: +

    +
    Bitbucket (Git, Mercurial)
    +
    +	import "bitbucket.org/user/project"
    +	import "bitbucket.org/user/project/sub/directory"
    +
    +GitHub (Git)
    +
    +	import "github.com/user/project"
    +	import "github.com/user/project/sub/directory"
    +
    +Launchpad (Bazaar)
    +
    +	import "launchpad.net/project"
    +	import "launchpad.net/project/series"
    +	import "launchpad.net/project/series/sub/directory"
    +
    +	import "launchpad.net/~user/project/branch"
    +	import "launchpad.net/~user/project/branch/sub/directory"
    +
    +IBM DevOps Services (Git)
    +
    +	import "hub.jazz.net/git/user/project"
    +	import "hub.jazz.net/git/user/project/sub/directory"
    +
    +

    +For code hosted on other servers, import paths may either be qualified +with the version control type, or the go tool can dynamically fetch +the import path over https/http and discover where the code resides +from a <meta> tag in the HTML. +

    +

    +To declare the code location, an import path of the form +

    +
    repository.vcs/path
    +
    +

    +specifies the given repository, with or without the .vcs suffix, +using the named version control system, and then the path inside +that repository. The supported version control systems are: +

    +
    Bazaar      .bzr
    +Git         .git
    +Mercurial   .hg
    +Subversion  .svn
    +
    +

    +For example, +

    +
    import "example.org/user/foo.hg"
    +
    +

    +denotes the root directory of the Mercurial repository at +example.org/user/foo or foo.hg, and +

    +
    import "example.org/repo.git/foo/bar"
    +
    +

    +denotes the foo/bar directory of the Git repository at +example.org/repo or repo.git. +

    +

    +When a version control system supports multiple protocols, +each is tried in turn when downloading. For example, a Git +download tries https://, then git+ssh://. +

    +

    +By default, downloads are restricted to known secure protocols +(e.g. https, ssh). To override this setting for Git downloads, the +GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +'go help environment'). +

    +

    +If the import path is not a known code hosting site and also lacks a +version control qualifier, the go tool attempts to fetch the import +over https/http and looks for a <meta> tag in the document's HTML +<head>. +

    +

    +The meta tag has the form: +

    +
    <meta name="go-import" content="import-prefix vcs repo-root">
    +
    +

    +The import-prefix is the import path corresponding to the repository +root. It must be a prefix or an exact match of the package being +fetched with "go get". If it's not an exact match, another http +request is made at the prefix to verify the <meta> tags match. +

    +

    +The meta tag should appear as early in the file as possible. +In particular, it should appear before any raw JavaScript or CSS, +to avoid confusing the go command's restricted parser. +

    +

    +The vcs is one of "git", "hg", "svn", etc, +

    +

    +The repo-root is the root of the version control system +containing a scheme and not containing a .vcs qualifier. +

    +

    +For example, +

    +
    import "example.org/pkg/foo"
    +
    +

    +will result in the following requests: +

    +
    https://example.org/pkg/foo?go-get=1 (preferred)
    +http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
    +
    +

    +If that page contains the meta tag +

    +
    <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
    +
    +

    +the go tool will verify that https://example.org/?go-get=1 contains the +same meta tag and then git clone https://code.org/r/p/exproj into +GOPATH/src/example.org. +

    +

    +New downloaded packages are written to the first directory listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +The go command attempts to download the version of the +package appropriate for the Go release being used. +Run 'go help get' for more. +

    +

    Import path checking

    +

    +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. +

    +

    +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: +

    +
    package math // import "path"
    +package math /* import "path" */
    +
    +

    +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. +

    +

    +Import path checking is disabled for code found within vendor trees. +This makes it possible to copy code into alternate locations in vendor trees +without needing to update import comments. +

    +

    +See https://golang.org/s/go14customimport for details. +

    +

    Description of package lists

    +

    +Many commands apply to a set of packages: +

    +
    go action [packages]
    +
    +

    +Usually, [packages] is a list of import paths. +

    +

    +An import path that is a rooted path or that begins with +a . or .. element is interpreted as a file system path and +denotes the package in that directory. +

    +

    +Otherwise, the import path P denotes the package found in +the directory DIR/src/P for some DIR listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +If no import paths are given, the action applies to the +package in the current directory. +

    +

    +There are four reserved names for paths that should not be used +for packages to be built with the go tool: +

    +

    +- "main" denotes the top-level package in a stand-alone executable. +

    +

    +- "all" expands to all package directories found in all the GOPATH +trees. For example, 'go list all' lists all the packages on the local +system. +

    +

    +- "std" is like all but expands to just the packages in the standard +Go library. +

    +

    +- "cmd" expands to the Go repository's commands and their +internal libraries. +

    +

    +Import paths beginning with "cmd/" only match source code in +the Go repository. +

    +

    +An import path is a pattern if it includes one or more "..." wildcards, +each of which can match any string, including the empty string and +strings containing slashes. Such a pattern expands to all package +directories found in the GOPATH trees with names matching the +patterns. As a special case, x/... matches x as well as x's subdirectories. +For example, net/... expands to net and packages in its subdirectories. +

    +

    +An import path can also name a package to be downloaded from +a remote repository. Run 'go help importpath' for details. +

    +

    +Every package in a program must have a unique import path. +By convention, this is arranged by starting each path with a +unique prefix that belongs to you. For example, paths used +internally at Google all begin with 'google', and paths +denoting remote repositories begin with the path to the code, +such as 'github.com/user/repo'. +

    +

    +Packages in a program need not have unique package names, +but there are two reserved package names with special meaning. +The name main indicates a command, not a library. +Commands are built into binaries and cannot be imported. +The name documentation indicates documentation for +a non-Go program in the directory. Files in package documentation +are ignored by the go command. +

    +

    +As a special case, if the package list is a list of .go files from a +single directory, the command is applied to a single synthesized +package made up of exactly those files, ignoring any build constraints +in those files and ignoring any other files in the directory. +

    +

    +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". +

    +

    Description of testing flags

    +

    +The 'go test' command takes both flags that apply to 'go test' itself +and flags that apply to the resulting test binary. +

    +

    +Several of the flags control profiling and write an execution profile +suitable for "go tool pprof"; run "go tool pprof -h" for more +information. The --alloc_space, --alloc_objects, and --show_bytes +options of pprof control how the information is presented. +

    +

    +The following flags are recognized by the 'go test' command and +control the execution of any test: +

    +
    -bench regexp
    +    Run (sub)benchmarks matching a regular expression.
    +    The given regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    benchmark's identifier.
    +    By default, no benchmarks run. To run all benchmarks,
    +    use '-bench .' or '-bench=.'.
    +
    +-benchtime t
    +    Run enough iterations of each benchmark to take t, specified
    +    as a time.Duration (for example, -benchtime 1h30s).
    +    The default is 1 second (1s).
    +
    +-count n
    +    Run each test and benchmark n times (default 1).
    +    If -cpu is set, run n times for each GOMAXPROCS value.
    +    Examples are always run once.
    +
    +-cover
    +    Enable coverage analysis.
    +
    +-covermode set,count,atomic
    +    Set the mode for coverage analysis for the package[s]
    +    being tested. The default is "set" unless -race is enabled,
    +    in which case it is "atomic".
    +    The values:
    +	set: bool: does this statement run?
    +	count: int: how many times does this statement run?
    +	atomic: int: count, but correct in multithreaded tests;
    +		significantly more expensive.
    +    Sets -cover.
    +
    +-coverpkg pkg1,pkg2,pkg3
    +    Apply coverage analysis in each test to the given list of packages.
    +    The default is for each test to analyze only the package being tested.
    +    Packages are specified as import paths.
    +    Sets -cover.
    +
    +-cpu 1,2,4
    +    Specify a list of GOMAXPROCS values for which the tests or
    +    benchmarks should be executed.  The default is the current value
    +    of GOMAXPROCS.
    +
    +-parallel n
    +    Allow parallel execution of test functions that call t.Parallel.
    +    The value of this flag is the maximum number of tests to run
    +    simultaneously; by default, it is set to the value of GOMAXPROCS.
    +    Note that -parallel only applies within a single test binary.
    +    The 'go test' command may run tests for different packages
    +    in parallel as well, according to the setting of the -p flag
    +    (see 'go help build').
    +
    +-run regexp
    +    Run only those tests and examples matching the regular expression.
    +    For tests the regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    test's identifier.
    +
    +-short
    +    Tell long-running tests to shorten their run time.
    +    It is off by default but set during all.bash so that installing
    +    the Go tree can run a sanity check but not spend time running
    +    exhaustive tests.
    +
    +-timeout t
    +    If a test runs longer than t, panic.
    +    The default is 10 minutes (10m).
    +
    +-v
    +    Verbose output: log all tests as they are run. Also print all
    +    text from Log and Logf calls even if the test succeeds.
    +
    +

    +The following flags are also recognized by 'go test' and can be used to +profile the tests during execution: +

    +
    -benchmem
    +    Print memory allocation statistics for benchmarks.
    +
    +-blockprofile block.out
    +    Write a goroutine blocking profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-blockprofilerate n
    +    Control the detail provided in goroutine blocking profiles by
    +    calling runtime.SetBlockProfileRate with n.
    +    See 'go doc runtime.SetBlockProfileRate'.
    +    The profiler aims to sample, on average, one blocking event every
    +    n nanoseconds the program spends blocked.  By default,
    +    if -test.blockprofile is set without this flag, all blocking events
    +    are recorded, equivalent to -test.blockprofilerate=1.
    +
    +-coverprofile cover.out
    +    Write a coverage profile to the file after all tests have passed.
    +    Sets -cover.
    +
    +-cpuprofile cpu.out
    +    Write a CPU profile to the specified file before exiting.
    +    Writes test binary as -c would.
    +
    +-memprofile mem.out
    +    Write a memory profile to the file after all tests have passed.
    +    Writes test binary as -c would.
    +
    +-memprofilerate n
    +    Enable more precise (and expensive) memory profiles by setting
    +    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
    +    To profile all memory allocations, use -test.memprofilerate=1
    +    and pass --alloc_space flag to the pprof tool.
    +
    +-mutexprofile mutex.out
    +    Write a mutex contention profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-mutexprofilefraction n
    +    Sample 1 in n stack traces of goroutines holding a
    +    contended mutex.
    +
    +-outputdir directory
    +    Place output files from profiling in the specified directory,
    +    by default the directory in which "go test" is running.
    +
    +-trace trace.out
    +    Write an execution trace to the specified file before exiting.
    +
    +

    +Each of these flags is also recognized with an optional 'test.' prefix, +as in -test.v. When invoking the generated test binary (the result of +'go test -c') directly, however, the prefix is mandatory. +

    +

    +The 'go test' command rewrites or removes recognized flags, +as appropriate, both before and after the optional package list, +before invoking the test binary. +

    +

    +For instance, the command +

    +
    go test -v -myflag testdata -cpuprofile=prof.out -x
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
    +
    +

    +(The -x flag is removed because it applies only to the go command's +execution, not to the test itself.) +

    +

    +The test flags that generate profiles (other than for coverage) also +leave the test binary in pkg.test for use when analyzing the profiles. +

    +

    +When 'go test' runs a test binary, it does so from within the +corresponding package's source code directory. Depending on the test, +it may be necessary to do the same when invoking a generated test +binary directly. +

    +

    +The command-line package list, if present, must appear before any +flag not known to the go test command. Continuing the example above, +the package list would have to appear before -myflag, but could appear +on either side of -v. +

    +

    +To keep an argument for a test binary from being interpreted as a +known flag or a package name, use -args (see 'go help test') which +passes the remainder of the command line through to the test binary +uninterpreted and unaltered. +

    +

    +For instance, the command +

    +
    go test -v -args -x -v
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -x -v
    +
    +

    +Similarly, +

    +
    go test -args math
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test math
    +
    +

    +In the first example, the -x and the second -v are passed through to the +test binary unchanged and with no effect on the go command itself. +In the second example, the argument math is passed through to the test +binary, instead of being interpreted as the package list. +

    +

    Description of testing functions

    +

    +The 'go test' command expects to find test, benchmark, and example functions +in the "*_test.go" files corresponding to the package under test. +

    +

    +A test function is one named TestXXX (where XXX is any alphanumeric string +not starting with a lower case letter) and should have the signature, +

    +
    func TestXXX(t *testing.T) { ... }
    +
    +

    +A benchmark function is one named BenchmarkXXX and should have the signature, +

    +
    func BenchmarkXXX(b *testing.B) { ... }
    +
    +

    +An example function is similar to a test function but, instead of using +*testing.T to report success or failure, prints output to os.Stdout. +If the last comment in the function starts with "Output:" then the output +is compared exactly against the comment (see examples below). If the last +comment begins with "Unordered output:" then the output is compared to the +comment, however the order of the lines is ignored. An example with no such +comment is compiled but not executed. An example with no text after +"Output:" is compiled, executed, and expected to produce no output. +

    +

    +Godoc displays the body of ExampleXXX to demonstrate the use +of the function, constant, or variable XXX. An example of a method M with +receiver type T or *T is named ExampleT_M. There may be multiple examples +for a given function, constant, or variable, distinguished by a trailing _xxx, +where xxx is a suffix not beginning with an upper case letter. +

    +

    +Here is an example of an example: +

    +
    func ExamplePrintln() {
    +	Println("The output of\nthis example.")
    +	// Output: The output of
    +	// this example.
    +}
    +
    +

    +Here is another example where the ordering of the output is ignored: +

    +
    func ExamplePerm() {
    +	for _, value := range Perm(4) {
    +		fmt.Println(value)
    +	}
    +
    +	// Unordered output: 4
    +	// 2
    +	// 1
    +	// 3
    +	// 0
    +}
    +
    +

    +The entire test file is presented as the example when it contains a single +example function, at least one other function, type, variable, or constant +declaration, and no test or benchmark functions. +

    +

    +See the documentation of the testing package for more information. +

    + + + +
    +
    + + + + + + + + +`)) diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md new file mode 100644 index 0000000..fb5c5ef --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/README.md @@ -0,0 +1,97 @@ +# h2i + +**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' +days of telnetting to your HTTP/1.n servers? We're bringing you +back. + +Features: +- send raw HTTP/2 frames + - PING + - SETTINGS + - HEADERS + - etc +- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 +- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) +- tab completion of commands, options + +Not yet features, but soon: +- unnecessary CONTINUATION frames on short boundaries, to test peer implementations +- request bodies (DATA frames) +- send invalid frames for testing server implementations (supported by underlying Framer) + +Later: +- act like a server + +## Installation + +``` +$ go get golang.org/x/net/http2/h2i +$ h2i +``` + +## Demo + +``` +$ h2i +Usage: h2i + + -insecure + Whether to skip TLS cert validation + -nextproto string + Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") + +$ h2i google.com +Connecting to google.com:443 ... +Connected to 74.125.224.41:443 +Negotiated protocol "h2-14" +[FrameHeader SETTINGS len=18] + [MAX_CONCURRENT_STREAMS = 100] + [INITIAL_WINDOW_SIZE = 1048576] + [MAX_FRAME_SIZE = 16384] +[FrameHeader WINDOW_UPDATE len=4] + Window-Increment = 983041 + +h2i> PING h2iSayHI +[FrameHeader PING flags=ACK len=8] + Data = "h2iSayHI" +h2i> headers +(as HTTP/1.1)> GET / HTTP/1.1 +(as HTTP/1.1)> Host: ip.appspot.com +(as HTTP/1.1)> User-Agent: h2i/brad-n-blake +(as HTTP/1.1)> +Opening Stream-ID 1: + :authority = ip.appspot.com + :method = GET + :path = / + :scheme = https + user-agent = h2i/brad-n-blake +[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] + :status = "200" + alternate-protocol = "443:quic,p=1" + content-length = "15" + content-type = "text/html" + date = "Fri, 01 May 2015 23:06:56 GMT" + server = "Google Frontend" +[FrameHeader DATA flags=END_STREAM stream=1 len=15] + "173.164.155.78\n" +[FrameHeader PING len=8] + Data = "\x00\x00\x00\x00\x00\x00\x00\x00" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader GOAWAY len=22] + Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) + +ReadFrame: EOF +``` + +## Status + +Quick few hour hack. So much yet to do. Feel free to file issues for +bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) +and I aren't yet accepting pull requests until things settle down. + diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go new file mode 100644 index 0000000..62e5752 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/h2i.go @@ -0,0 +1,522 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +/* +The h2i command is an interactive HTTP/2 console. + +Usage: + $ h2i [flags] + +Interactive commands in the console: (all parts case-insensitive) + + ping [data] + settings ack + settings FOO=n BAR=z + headers (open a new stream by typing HTTP/1.1) +*/ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "strconv" + "strings" + + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Flags +var ( + flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") + flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") + flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.") + flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)") +) + +type command struct { + run func(*h2i, []string) error // required + + // complete optionally specifies tokens (case-insensitive) which are + // valid for this subcommand. + complete func() []string +} + +var commands = map[string]command{ + "ping": {run: (*h2i).cmdPing}, + "settings": { + run: (*h2i).cmdSettings, + complete: func() []string { + return []string{ + "ACK", + http2.SettingHeaderTableSize.String(), + http2.SettingEnablePush.String(), + http2.SettingMaxConcurrentStreams.String(), + http2.SettingInitialWindowSize.String(), + http2.SettingMaxFrameSize.String(), + http2.SettingMaxHeaderListSize.String(), + } + }, + }, + "quit": {run: (*h2i).cmdQuit}, + "headers": {run: (*h2i).cmdHeaders}, +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") + flag.PrintDefaults() +} + +// withPort adds ":443" if another port isn't already present. +func withPort(host string) string { + if _, _, err := net.SplitHostPort(host); err != nil { + return net.JoinHostPort(host, "443") + } + return host +} + +// withoutPort strips the port from addr if present. +func withoutPort(addr string) string { + if h, _, err := net.SplitHostPort(addr); err == nil { + return h + } + return addr +} + +// h2i is the app's state. +type h2i struct { + host string + tc *tls.Conn + framer *http2.Framer + term *terminal.Terminal + + // owned by the command loop: + streamID uint32 + hbuf bytes.Buffer + henc *hpack.Encoder + + // owned by the readFrames loop: + peerSetting map[http2.SettingID]uint32 + hdec *hpack.Decoder +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + os.Exit(2) + } + log.SetFlags(0) + + host := flag.Arg(0) + app := &h2i{ + host: host, + peerSetting: make(map[http2.SettingID]uint32), + } + app.henc = hpack.NewEncoder(&app.hbuf) + + if err := app.Main(); err != nil { + if app.term != nil { + app.logf("%v\n", err) + } else { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "\n") +} + +func (app *h2i) Main() error { + cfg := &tls.Config{ + ServerName: withoutPort(app.host), + NextProtos: strings.Split(*flagNextProto, ","), + InsecureSkipVerify: *flagInsecure, + } + + hostAndPort := *flagDial + if hostAndPort == "" { + hostAndPort = withPort(app.host) + } + log.Printf("Connecting to %s ...", hostAndPort) + tc, err := tls.Dial("tcp", hostAndPort, cfg) + if err != nil { + return fmt.Errorf("Error dialing %s: %v", hostAndPort, err) + } + log.Printf("Connected to %v", tc.RemoteAddr()) + defer tc.Close() + + if err := tc.Handshake(); err != nil { + return fmt.Errorf("TLS handshake: %v", err) + } + if !*flagInsecure { + if err := tc.VerifyHostname(app.host); err != nil { + return fmt.Errorf("VerifyHostname: %v", err) + } + } + state := tc.ConnectionState() + log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) + if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { + return fmt.Errorf("Could not negotiate protocol mutually") + } + + if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { + return err + } + + app.framer = http2.NewFramer(tc, tc) + + oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + return err + } + defer terminal.Restore(0, oldState) + + var screen = struct { + io.Reader + io.Writer + }{os.Stdin, os.Stdout} + + app.term = terminal.NewTerminal(screen, "h2i> ") + lastWord := regexp.MustCompile(`.+\W(\w+)$`) + app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { + if key != '\t' { + return + } + if pos != len(line) { + // TODO: we're being lazy for now, only supporting tab completion at the end. + return + } + // Auto-complete for the command itself. + if !strings.Contains(line, " ") { + var name string + name, _, ok = lookupCommand(line) + if !ok { + return + } + return name, len(name), true + } + _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) + if !ok || c.complete == nil { + return + } + if strings.HasSuffix(line, " ") { + app.logf("%s", strings.Join(c.complete(), " ")) + return line, pos, true + } + m := lastWord.FindStringSubmatch(line) + if m == nil { + return line, len(line), true + } + soFar := m[1] + var match []string + for _, cand := range c.complete() { + if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { + continue + } + match = append(match, cand) + } + if len(match) == 0 { + return + } + if len(match) > 1 { + // TODO: auto-complete any common prefix + app.logf("%s", strings.Join(match, " ")) + return line, pos, true + } + newLine = line[:len(line)-len(soFar)] + match[0] + return newLine, len(newLine), true + + } + + errc := make(chan error, 2) + go func() { errc <- app.readFrames() }() + go func() { errc <- app.readConsole() }() + return <-errc +} + +func (app *h2i) logf(format string, args ...interface{}) { + fmt.Fprintf(app.term, format+"\r\n", args...) +} + +func (app *h2i) readConsole() error { + if s := *flagSettings; s != "omit" { + var args []string + if s != "empty" { + args = strings.Split(s, ",") + } + _, c, ok := lookupCommand("settings") + if !ok { + panic("settings command not found") + } + c.run(app, args) + } + + for { + line, err := app.term.ReadLine() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("terminal.ReadLine: %v", err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + cmd, args := f[0], f[1:] + if _, c, ok := lookupCommand(cmd); ok { + err = c.run(app, args) + } else { + app.logf("Unknown command %q", line) + } + if err == errExitApp { + return nil + } + if err != nil { + return err + } + } +} + +func lookupCommand(prefix string) (name string, c command, ok bool) { + prefix = strings.ToLower(prefix) + if c, ok = commands[prefix]; ok { + return prefix, c, ok + } + + for full, candidate := range commands { + if strings.HasPrefix(full, prefix) { + if c.run != nil { + return "", command{}, false // ambiguous + } + c = candidate + name = full + } + } + return name, c, c.run != nil +} + +var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") + +func (a *h2i) cmdQuit(args []string) error { + if len(args) > 0 { + a.logf("the QUIT command takes no argument") + return nil + } + return errExitApp +} + +func (a *h2i) cmdSettings(args []string) error { + if len(args) == 1 && strings.EqualFold(args[0], "ACK") { + return a.framer.WriteSettingsAck() + } + var settings []http2.Setting + for _, arg := range args { + if strings.EqualFold(arg, "ACK") { + a.logf("Error: ACK must be only argument with the SETTINGS command") + return nil + } + eq := strings.Index(arg, "=") + if eq == -1 { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + sid, ok := settingByName(arg[:eq]) + if !ok { + a.logf("Error: unknown setting name %q", arg[:eq]) + return nil + } + val, err := strconv.ParseUint(arg[eq+1:], 10, 32) + if err != nil { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + settings = append(settings, http2.Setting{ + ID: sid, + Val: uint32(val), + }) + } + a.logf("Sending: %v", settings) + return a.framer.WriteSettings(settings...) +} + +func settingByName(name string) (http2.SettingID, bool) { + for _, sid := range [...]http2.SettingID{ + http2.SettingHeaderTableSize, + http2.SettingEnablePush, + http2.SettingMaxConcurrentStreams, + http2.SettingInitialWindowSize, + http2.SettingMaxFrameSize, + http2.SettingMaxHeaderListSize, + } { + if strings.EqualFold(sid.String(), name) { + return sid, true + } + } + return 0, false +} + +func (app *h2i) cmdPing(args []string) error { + if len(args) > 1 { + app.logf("invalid PING usage: only accepts 0 or 1 args") + return nil // nil means don't end the program + } + var data [8]byte + if len(args) == 1 { + copy(data[:], args[0]) + } else { + copy(data[:], "h2i_ping") + } + return app.framer.WritePing(false, data) +} + +func (app *h2i) cmdHeaders(args []string) error { + if len(args) > 0 { + app.logf("Error: HEADERS doesn't yet take arguments.") + // TODO: flags for restricting window size, to force CONTINUATION + // frames. + return nil + } + var h1req bytes.Buffer + app.term.SetPrompt("(as HTTP/1.1)> ") + defer app.term.SetPrompt("h2i> ") + for { + line, err := app.term.ReadLine() + if err != nil { + return err + } + h1req.WriteString(line) + h1req.WriteString("\r\n") + if line == "" { + break + } + } + req, err := http.ReadRequest(bufio.NewReader(&h1req)) + if err != nil { + app.logf("Invalid HTTP/1.1 request: %v", err) + return nil + } + if app.streamID == 0 { + app.streamID = 1 + } else { + app.streamID += 2 + } + app.logf("Opening Stream-ID %d:", app.streamID) + hbf := app.encodeHeaders(req) + if len(hbf) > 16<<10 { + app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") + return nil + } + return app.framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: app.streamID, + BlockFragment: hbf, + EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now + EndHeaders: true, // for now + }) +} + +func (app *h2i) readFrames() error { + for { + f, err := app.framer.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame: %v", err) + } + app.logf("%v", f) + switch f := f.(type) { + case *http2.PingFrame: + app.logf(" Data = %q", f.Data) + case *http2.SettingsFrame: + f.ForeachSetting(func(s http2.Setting) error { + app.logf(" %v", s) + app.peerSetting[s.ID] = s.Val + return nil + }) + case *http2.WindowUpdateFrame: + app.logf(" Window-Increment = %v", f.Increment) + case *http2.GoAwayFrame: + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode) + case *http2.DataFrame: + app.logf(" %q", f.Data()) + case *http2.HeadersFrame: + if f.HasPriority() { + app.logf(" PRIORITY = %v", f.Priority) + } + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + case *http2.PushPromiseFrame: + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + } + } +} + +// called from readLoop +func (app *h2i) onNewHeaderField(f hpack.HeaderField) { + if f.Sensitive { + app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) + } + app.logf(" %s = %q", f.Name, f.Value) +} + +func (app *h2i) encodeHeaders(req *http.Request) []byte { + app.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.RequestURI + if path == "" { + path = "/" + } + + app.writeHeader(":authority", host) // probably not right for all sites + app.writeHeader(":method", req.Method) + app.writeHeader(":path", path) + app.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + app.writeHeader(lowKey, v) + } + } + return app.hbuf.Bytes() +} + +func (app *h2i) writeHeader(name, value string) { + app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) + app.logf(" %s = %s", name, value) +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 0000000..c2805f6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 0000000..1565cf2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go new file mode 100644 index 0000000..05f12db --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go @@ -0,0 +1,386 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" +) + +func TestEncoderTableSizeUpdate(t *testing.T) { + tests := []struct { + size1, size2 uint32 + wantHex string + }{ + // Should emit 2 table size updates (2048 and 4096) + {2048, 4096, "3fe10f 3fe11f 82"}, + + // Should emit 1 table size update (2048) + {16384, 2048, "3fe10f 82"}, + } + for _, tt := range tests { + var buf bytes.Buffer + e := NewEncoder(&buf) + e.SetMaxDynamicTableSize(tt.size1) + e.SetMaxDynamicTableSize(tt.size2) + if err := e.WriteField(pair(":method", "GET")); err != nil { + t.Fatal(err) + } + want := removeSpace(tt.wantHex) + if got := hex.EncodeToString(buf.Bytes()); got != want { + t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) + } + } +} + +func TestEncoderWriteField(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + var got []HeaderField + d := NewDecoder(4<<10, func(f HeaderField) { + got = append(got, f) + }) + + tests := []struct { + hdrs []HeaderField + }{ + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }}, + } + for i, tt := range tests { + buf.Reset() + got = got[:0] + for _, hf := range tt.hdrs { + if err := e.WriteField(hf); err != nil { + t.Fatal(err) + } + } + _, err := d.Write(buf.Bytes()) + if err != nil { + t.Errorf("%d. Decoder Write = %v", i, err) + } + if !reflect.DeepEqual(got, tt.hdrs) { + t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) + } + } +} + +func TestEncoderSearchTable(t *testing.T) { + e := NewEncoder(nil) + + e.dynTab.add(pair("foo", "bar")) + e.dynTab.add(pair("blake", "miz")) + e.dynTab.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), uint64(staticTable.len()) + 3, true}, + {pair("blake", "miz"), uint64(staticTable.len()) + 2, true}, + {pair(":method", "GET"), 2, true}, + + // Only name match because Sensitive == true. This is allowed to match + // any ":method" entry. The current implementation uses the last entry + // added in newStaticTable. + {HeaderField{":method", "GET", true}, 3, false}, + + // Only Name matches + {pair("foo", "..."), uint64(staticTable.len()) + 3, false}, + {pair("blake", "..."), uint64(staticTable.len()) + 2, false}, + // As before, this is allowed to match any ":method" entry. + {pair(":method", "..."), 3, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestAppendVarInt(t *testing.T) { + tests := []struct { + n byte + i uint64 + want []byte + }{ + // Fits in a byte: + {1, 0, []byte{0}}, + {2, 2, []byte{2}}, + {3, 6, []byte{6}}, + {4, 14, []byte{14}}, + {5, 30, []byte{30}}, + {6, 62, []byte{62}}, + {7, 126, []byte{126}}, + {8, 254, []byte{254}}, + + // Multiple bytes: + {5, 1337, []byte{31, 154, 10}}, + } + for _, tt := range tests { + got := appendVarInt(nil, tt.n, tt.i) + if !bytes.Equal(got, tt.want) { + t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) + } + } +} + +func TestAppendHpackString(t *testing.T) { + tests := []struct { + s, wantHex string + }{ + // Huffman encoded + {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + + // Not Huffman encoded + {"a", "01 61"}, + + // zero length + {"", "00"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendHpackString(nil, tt.s) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) + } + } +} + +func TestAppendIndexed(t *testing.T) { + tests := []struct { + i uint64 + wantHex string + }{ + // 1 byte + {1, "81"}, + {126, "fe"}, + + // 2 bytes + {127, "ff00"}, + {128, "ff01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexed(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestAppendNewName(t *testing.T) { + tests := []struct { + f HeaderField + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Without indexing + {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Never indexed + {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendNewName(nil, tt.f, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendIndexedName(t *testing.T) { + tests := []struct { + f HeaderField + i uint64 + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, + + // Without indexing + {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, + + // Never indexed + {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, + {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendTableSize(t *testing.T) { + tests := []struct { + i uint32 + wantHex string + }{ + // Fits into 1 byte + {30, "3e"}, + + // Extra byte + {31, "3f00"}, + {32, "3f01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendTableSize(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestEncoderSetMaxDynamicTableSize(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + tests := []struct { + v uint32 + wantUpdate bool + wantMinSize uint32 + wantMaxSize uint32 + }{ + // Set new table size to 2048 + {2048, true, 2048, 2048}, + + // Set new table size to 16384, but still limited to + // 4096 + {16384, true, 2048, 4096}, + } + for _, tt := range tests { + e.SetMaxDynamicTableSize(tt.v) + if got := e.tableSizeUpdate; tt.wantUpdate != got { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) + } + if got := e.minSize; tt.wantMinSize != got { + t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) + } + if got := e.dynTab.maxSize; tt.wantMaxSize != got { + t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) + } + } +} + +func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { + e := NewEncoder(nil) + // 4095 < initialHeaderTableSize means maxSize is truncated to + // 4095. + e.SetMaxDynamicTableSizeLimit(4095) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(4095); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } + if got, want := e.tableSizeUpdate, true; got != want { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) + } + // maxSize will be truncated to maxSizeLimit + e.SetMaxDynamicTableSize(16384) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + // 8192 > current maxSizeLimit, so maxSize does not change. + e.SetMaxDynamicTableSizeLimit(8192) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(8192); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } +} + +func removeSpace(s string) string { + return strings.Replace(s, " ", "", -1) +} + +func BenchmarkEncoderSearchTable(b *testing.B) { + e := NewEncoder(nil) + + // A sample of possible header fields. + // This is not based on any actual data from HTTP/2 traces. + var possible []HeaderField + for _, f := range staticTable.ents { + if f.Value == "" { + possible = append(possible, f) + continue + } + // Generate 5 random values, except for cookie and set-cookie, + // which we know can have many values in practice. + num := 5 + if f.Name == "cookie" || f.Name == "set-cookie" { + num = 25 + } + for i := 0; i < num; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + for k := 0; k < 10; k++ { + f := HeaderField{ + Name: fmt.Sprintf("x-header-%d", k), + Sensitive: rand.Int()%2 == 0, + } + for i := 0; i < 5; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + + // Add a random sample to the dynamic table. This very loosely simulates + // a history of 100 requests with 20 header fields per request. + for r := 0; r < 100*20; r++ { + f := possible[rand.Int31n(int32(len(possible)))] + // Skip if this is in the staticTable verbatim. + if _, has := staticTable.search(f); !has { + e.dynTab.add(f) + } + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for _, f := range possible { + e.searchTable(f) + } + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 0000000..176644a --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go new file mode 100644 index 0000000..bc7f476 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go @@ -0,0 +1,722 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" +) + +func (d *Decoder) mustAt(idx int) HeaderField { + if hf, ok := d.at(uint64(idx)); !ok { + panic(fmt.Sprintf("bogus index %d", idx)) + } else { + return hf + } +} + +func TestDynamicTableAt(t *testing.T) { + d := NewDecoder(4096, nil) + at := d.mustAt + if got, want := at(2), (pair(":method", "GET")); got != want { + t.Errorf("at(2) = %v; want %v", got, want) + } + d.dynTab.add(pair("foo", "bar")) + d.dynTab.add(pair("blake", "miz")) + if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 2) = %v; want %v", got, want) + } + if got, want := at(3), (pair(":method", "POST")); got != want { + t.Errorf("at(3) = %v; want %v", got, want) + } +} + +func TestDynamicTableSizeEvict(t *testing.T) { + d := NewDecoder(4096, nil) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("size = %d; want %d", d.dynTab.size, want) + } + add := d.dynTab.add + add(pair("blake", "eats pizza")) + if want := uint32(15 + 32); d.dynTab.size != want { + t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) + } + add(pair("foo", "bar")) + if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { + t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) + } + d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) + if want := uint32(6 + 32); d.dynTab.size != want { + t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) + } + if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + add(pair("long", strings.Repeat("x", 500))) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) + } +} + +func TestDecoderDecode(t *testing.T) { + tests := []struct { + name string + in []byte + want []HeaderField + wantDynTab []HeaderField // newest entry first + }{ + // C.2.1 Literal Header Field with Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 + {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), + []HeaderField{pair("custom-key", "custom-header")}, + []HeaderField{pair("custom-key", "custom-header")}, + }, + + // C.2.2 Literal Header Field without Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 + {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), + []HeaderField{pair(":path", "/sample/path")}, + []HeaderField{}}, + + // C.2.3 Literal Header Field never Indexed + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 + {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), + []HeaderField{{"password", "secret", true}}, + []HeaderField{}}, + + // C.2.4 Indexed Header Field + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 + {"C.2.4", []byte("\x82"), + []HeaderField{pair(":method", "GET")}, + []HeaderField{}}, + } + for _, tt := range tests { + d := NewDecoder(4096, nil) + hf, err := d.DecodeFull(tt.in) + if err != nil { + t.Errorf("%s: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(hf, tt.want) { + t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { + t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) + } + } +} + +func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { + hf = make([]HeaderField, len(dt.table.ents)) + for i := range hf { + hf[i] = dt.table.ents[len(dt.table.ents)-1-i] + } + return +} + +type encAndWant struct { + enc []byte + want []HeaderField + wantDynTab []HeaderField + wantDynSize uint32 +} + +// C.3 Request Examples without Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 +func TestDecodeC3_NoHuffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// C.4 Request Examples with Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 +func TestDecodeC4_Huffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5886 a8eb 1064 9cbf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 +// "This section shows several consecutive header lists, corresponding +// to HTTP responses, on the same connection. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur." +func TestDecodeC5_ResponsesNoHuff(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4803 3330 3258 0770 7269 7661 7465 611d +4d6f 6e2c 2032 3120 4f63 7420 3230 3133 +2032 303a 3133 3a32 3120 474d 546e 1768 +7474 7073 3a2f 2f77 7777 2e65 7861 6d70 +6c65 2e63 6f6d +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4803 3330 37c1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 +3230 3133 2032 303a 3133 3a32 3220 474d +54c0 5a04 677a 6970 7738 666f 6f3d 4153 +444a 4b48 514b 425a 584f 5157 454f 5049 +5541 5851 5745 4f49 553b 206d 6178 2d61 +6765 3d33 3630 303b 2076 6572 7369 6f6e +3d31 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 +// "This section shows the same examples as the previous section, but +// using Huffman encoding for the literal values. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur. The eviction mechanism +// uses the length of the decoded literal values, so the same +// evictions occurs as in the previous section." +func TestDecodeC6_ResponsesHuffman(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4882 6402 5885 aec3 771a 4b61 96d0 7abe +9410 54d4 44a8 2005 9504 0b81 66e0 82a6 +2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 +e9ae 82ae 43d3 +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4883 640e ffc1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 6196 d07a be94 1054 d444 a820 0595 +040b 8166 e084 a62d 1bff c05a 839b d9ab +77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b +3960 d5af 2708 7f36 72c1 ab27 0fb5 291f +9587 3160 65c0 03ed 4ee5 b106 3d50 07 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { + d := NewDecoder(size, nil) + for i, step := range steps { + hf, err := d.DecodeFull(step.enc) + if err != nil { + t.Fatalf("Error at step index %d: %v", i, err) + } + if !reflect.DeepEqual(hf, step.want) { + t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { + t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) + } + if d.dynTab.size != step.wantDynSize { + t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) + } + } +} + +func TestHuffmanDecodeExcessPadding(t *testing.T) { + tests := [][]byte{ + {0xff}, // Padding Exceeds 7 bits + {0x1f, 0xff}, // {"a", 1 byte excess padding} + {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding} + {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding} + {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding} + {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol. + } + for i, in := range tests { + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err) + } + } +} + +func TestHuffmanDecodeEOS(t *testing.T) { + in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) { + in := []byte{0x00, 0x01} // {"0", "0", "0"} + var buf bytes.Buffer + if err := huffmanDecode(&buf, 2, in); err != ErrStringLength { + t.Errorf("error = %v; want ErrStringLength", err) + } +} + +func TestHuffmanDecodeCorruptPadding(t *testing.T) { + in := []byte{0x00} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecode(t *testing.T) { + tests := []struct { + inHex, want string + }{ + {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, + {"a8eb 1064 9cbf", "no-cache"}, + {"25a8 49e9 5ba9 7d7f", "custom-key"}, + {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, + {"6402", "302"}, + {"aec3 771a 4b", "private"}, + {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, + {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, + {"9bd9 ab", "gzip"}, + {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", + "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, + } + for i, tt := range tests { + var buf bytes.Buffer + in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) + if err != nil { + t.Errorf("%d. hex input error: %v", i, err) + continue + } + if _, err := HuffmanDecode(&buf, in); err != nil { + t.Errorf("%d. decode error: %v", i, err) + continue + } + if got := buf.String(); tt.want != got { + t.Errorf("%d. decode = %q; want %q", i, got, tt.want) + } + } +} + +func TestAppendHuffmanString(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + {"no-cache", "a8eb 1064 9cbf"}, + {"custom-key", "25a8 49e9 5ba9 7d7f"}, + {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, + {"302", "6402"}, + {"private", "aec3 771a 4b"}, + {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, + {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, + {"gzip", "9bd9 ab"}, + {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", + "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, + } + for i, tt := range tests { + buf := []byte{} + want := strings.Replace(tt.want, " ", "", -1) + buf = AppendHuffmanString(buf, tt.in) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("%d. encode = %q; want %q", i, got, want) + } + } +} + +func TestHuffmanMaxStrLen(t *testing.T) { + const msg = "Some string" + huff := AppendHuffmanString(nil, msg) + + testGood := func(max int) { + var out bytes.Buffer + if err := huffmanDecode(&out, max, huff); err != nil { + t.Errorf("For maxLen=%d, unexpected error: %v", max, err) + } + if out.String() != msg { + t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg) + } + } + testGood(0) + testGood(len(msg)) + testGood(len(msg) + 1) + + var out bytes.Buffer + if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength { + t.Errorf("err = %v; want ErrStringLength", err) + } +} + +func TestHuffmanRoundtripStress(t *testing.T) { + const Len = 50 // of uncompressed string + input := make([]byte, Len) + var output bytes.Buffer + var huff []byte + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + var encSize int64 + for i := 0; i < n; i++ { + for l := range input { + input[l] = byte(src.Intn(256)) + } + huff = AppendHuffmanString(huff[:0], string(input)) + encSize += int64(len(huff)) + output.Reset() + if err := huffmanDecode(&output, 0, huff); err != nil { + t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err) + continue + } + if !bytes.Equal(output.Bytes(), input) { + t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes()) + } + } + t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize) +} + +func TestHuffmanDecodeFuzz(t *testing.T) { + const Len = 50 // of compressed + var buf, zbuf bytes.Buffer + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + numFail := 0 + for i := 0; i < n; i++ { + zbuf.Reset() + if i == 0 { + // Start with at least one invalid one. + zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8") + } else { + for l := 0; l < Len; l++ { + zbuf.WriteByte(byte(src.Intn(256))) + } + } + + buf.Reset() + if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil { + if err == ErrInvalidHuffman { + numFail++ + continue + } + t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err) + continue + } + } + t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n) + if numFail < 1 { + t.Error("expected at least one invalid huffman encoding (test starts with one)") + } +} + +func TestReadVarInt(t *testing.T) { + type res struct { + i uint64 + consumed int + err error + } + tests := []struct { + n byte + p []byte + want res + }{ + // Fits in a byte: + {1, []byte{0}, res{0, 1, nil}}, + {2, []byte{2}, res{2, 1, nil}}, + {3, []byte{6}, res{6, 1, nil}}, + {4, []byte{14}, res{14, 1, nil}}, + {5, []byte{30}, res{30, 1, nil}}, + {6, []byte{62}, res{62, 1, nil}}, + {7, []byte{126}, res{126, 1, nil}}, + {8, []byte{254}, res{254, 1, nil}}, + + // Doesn't fit in a byte: + {1, []byte{1}, res{0, 0, errNeedMore}}, + {2, []byte{3}, res{0, 0, errNeedMore}}, + {3, []byte{7}, res{0, 0, errNeedMore}}, + {4, []byte{15}, res{0, 0, errNeedMore}}, + {5, []byte{31}, res{0, 0, errNeedMore}}, + {6, []byte{63}, res{0, 0, errNeedMore}}, + {7, []byte{127}, res{0, 0, errNeedMore}}, + {8, []byte{255}, res{0, 0, errNeedMore}}, + + // Ignoring top bits: + {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 + {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 + {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 + + // Extra byte: + {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte + + // Short a byte: + {5, []byte{191, 154}, res{0, 0, errNeedMore}}, + + // integer overflow: + {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, + } + for _, tt := range tests { + i, remain, err := readVarInt(tt.n, tt.p) + consumed := len(tt.p) - len(remain) + got := res{i, consumed, err} + if got != tt.want { + t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) + } + } +} + +// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56 +func TestHuffmanFuzzCrash(t *testing.T) { + got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8")) + if got != "" { + t.Errorf("Got %q; want empty string", got) + } + if err != ErrInvalidHuffman { + t.Errorf("Err = %v; want ErrInvalidHuffman", err) + } +} + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +func dehex(s string) []byte { + s = strings.Replace(s, " ", "", -1) + s = strings.Replace(s, "\n", "", -1) + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func TestEmitEnabled(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + + numCallback := 0 + var dec *Decoder + dec = NewDecoder(8<<20, func(HeaderField) { + numCallback++ + dec.SetEmitEnabled(false) + }) + if !dec.EmitEnabled() { + t.Errorf("initial emit enabled = false; want true") + } + if _, err := dec.Write(buf.Bytes()); err != nil { + t.Error(err) + } + if numCallback != 1 { + t.Errorf("num callbacks = %d; want 1", numCallback) + } + if dec.EmitEnabled() { + t.Errorf("emit enabled = true; want false") + } +} + +func TestSaveBufLimit(t *testing.T) { + const maxStr = 1 << 10 + var got []HeaderField + dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) { + got = append(got, hf) + }) + dec.SetMaxStringLength(maxStr) + var frag []byte + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "foo"...) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "bar"...) + + if _, err := dec.Write(frag); err != nil { + t.Fatal(err) + } + + want := []HeaderField{{Name: "foo", Value: "bar"}} + if !reflect.DeepEqual(got, want) { + t.Errorf("After small writes, got %v; want %v", got, want) + } + + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, maxStr*3) + frag = append(frag, make([]byte, maxStr*3)...) + + _, err := dec.Write(frag) + if err != ErrStringLength { + t.Fatalf("Write error = %v; want ErrStringLength", err) + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 0000000..8850e39 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 0000000..a66cfbe --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go new file mode 100644 index 0000000..d963f36 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bufio" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestHeaderFieldTable(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // Tests will be run twice: once before evicting anything, and + // again after evicting the three oldest entries. + tests := []struct { + f HeaderField + beforeWantStaticI uint64 + beforeWantMatch bool + afterWantStaticI uint64 + afterWantMatch bool + }{ + {HeaderField{"key1", "value1-1", false}, 1, true, 0, false}, + {HeaderField{"key1", "value1-2", false}, 3, true, 0, false}, + {HeaderField{"key1", "value1-3", false}, 3, false, 0, false}, + {HeaderField{"key2", "value2-1", false}, 2, true, 3, false}, + {HeaderField{"key2", "value2-2", false}, 6, true, 3, true}, + {HeaderField{"key2", "value2-3", false}, 6, false, 3, false}, + {HeaderField{"key4", "value4-1", false}, 5, true, 2, true}, + // Name match only, because sensitive. + {HeaderField{"key4", "value4-1", true}, 5, false, 2, false}, + // Key not found. + {HeaderField{"key5", "value5-x", false}, 0, false, 0, false}, + } + + staticToDynamic := func(i uint64) uint64 { + if i == 0 { + return 0 + } + return uint64(table.len()) - i + 1 // dynamic is the reversed table + } + + searchStatic := func(f HeaderField) (uint64, bool) { + old := staticTable + staticTable = table + defer func() { staticTable = old }() + return staticTable.search(f) + } + + searchDynamic := func(f HeaderField) (uint64, bool) { + return table.search(f) + } + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.beforeWantStaticI) + if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } + + table.evictOldest(3) + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.afterWantStaticI) + if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } +} + +func TestHeaderFieldTable_LookupMapEviction(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // evict all pairs + table.evictOldest(table.len()) + + if l := table.len(); l > 0 { + t.Errorf("table.len() = %d, want 0", l) + } + + if l := len(table.byName); l > 0 { + t.Errorf("len(table.byName) = %d, want 0", l) + } + + if l := len(table.byNameValue); l > 0 { + t.Errorf("len(table.byNameValue) = %d, want 0", l) + } +} + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > staticTable.len() { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable.ents[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable.ents[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 0000000..71db28a --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/lex/httplex" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go new file mode 100644 index 0000000..5248776 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2_test.go @@ -0,0 +1,199 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "flag" + "fmt" + "net/http" + "os/exec" + "strconv" + "strings" + "testing" + + "golang.org/x/net/http2/hpack" +) + +var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") + +func condSkipFailingTest(t *testing.T) { + if !*knownFailing { + t.Skip("Skipping known-failing test without --known_failing") + } +} + +func init() { + inTests = true + DebugGoroutines = true + flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging") +} + +func TestSettingString(t *testing.T) { + tests := []struct { + s Setting + want string + }{ + {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, + {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, + } + for i, tt := range tests { + got := fmt.Sprint(tt.s) + if got != tt.want { + t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) + } + } +} + +type twriter struct { + t testing.TB + st *serverTester // optional +} + +func (w twriter) Write(p []byte) (n int, err error) { + if w.st != nil { + ps := string(p) + for _, phrase := range w.st.logFilter { + if strings.Contains(ps, phrase) { + return len(p), nil // no logging + } + } + } + w.t.Logf("%s", p) + return len(p), nil +} + +// like encodeHeader, but don't add implicit pseudo headers. +func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + } + return buf.Bytes() +} + +// Verify that curl has http2. +func requireCurl(t *testing.T) { + out, err := dockerLogs(curl(t, "--version")) + if err != nil { + t.Skipf("failed to determine curl features; skipping test") + } + if !strings.Contains(string(out), "HTTP2") { + t.Skip("curl doesn't support HTTP2; skipping test") + } +} + +func curl(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run curl in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +// Verify that h2load exists. +func requireH2load(t *testing.T) { + out, err := dockerLogs(h2load(t, "--version")) + if err != nil { + t.Skipf("failed to probe h2load; skipping test: %s", out) + } + if !strings.Contains(string(out), "h2load nghttp2/") { + t.Skipf("h2load not present; skipping test. (Output=%q)", out) + } +} + +func h2load(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run h2load in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +type puppetCommand struct { + fn func(w http.ResponseWriter, r *http.Request) + done chan<- bool +} + +type handlerPuppet struct { + ch chan puppetCommand +} + +func newHandlerPuppet() *handlerPuppet { + return &handlerPuppet{ + ch: make(chan puppetCommand), + } +} + +func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { + for cmd := range p.ch { + cmd.fn(w, r) + cmd.done <- true + } +} + +func (p *handlerPuppet) done() { close(p.ch) } +func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { + done := make(chan bool) + p.ch <- puppetCommand{fn, done} + <-done +} +func dockerLogs(container string) ([]byte, error) { + out, err := exec.Command("docker", "wait", container).CombinedOutput() + if err != nil { + return out, err + } + exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return out, errors.New("unexpected exit status from docker wait") + } + out, err = exec.Command("docker", "logs", container).CombinedOutput() + exec.Command("docker", "rm", container).Run() + if err == nil && exitStatus != 0 { + err = fmt.Errorf("exit status %d: %s", exitStatus, out) + } + return out, err +} + +func kill(container string) { + exec.Command("docker", "kill", container).Run() + exec.Command("docker", "rm", container).Run() +} + +func cleanDate(res *http.Response) { + if d := res.Header["Date"]; len(d) == 1 { + d[0] = "XXX" + } +} + +func TestSorterPoolAllocs(t *testing.T) { + ss := []string{"a", "b", "c"} + h := http.Header{ + "a": nil, + "b": nil, + "c": nil, + } + sorter := new(sorter) + + if allocs := testing.AllocsPerRun(100, func() { + sorter.SortStrings(ss) + }); allocs >= 1 { + t.Logf("SortStrings allocs = %v; want <1", allocs) + } + + if allocs := testing.AllocsPerRun(5, func() { + if len(sorter.Keys(h)) != 3 { + t.Fatal("wrong result") + } + }); allocs > 0 { + t.Logf("Keys allocs = %v; want <1", allocs) + } +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 0000000..508cebc --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,21 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 0000000..140434a --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 0000000..6f8d3f8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 0000000..5ae0772 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 0000000..a614009 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go new file mode 100644 index 0000000..1bf351f --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe_test.go @@ -0,0 +1,130 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "testing" +) + +func TestPipeClose(t *testing.T) { + var p pipe + p.b = new(bytes.Buffer) + a := errors.New("a") + b := errors.New("b") + p.CloseWithError(a) + p.CloseWithError(b) + _, err := p.Read(make([]byte, 1)) + if err != a { + t.Errorf("err = %v want %v", err, a) + } +} + +func TestPipeDoneChan(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.CloseWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_ErrFirst(t *testing.T) { + var p pipe + p.CloseWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.BreakWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break_ErrFirst(t *testing.T) { + var p pipe + p.BreakWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeCloseWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + const body = "foo" + io.WriteString(p, body) + a := errors.New("test error") + p.CloseWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != body { + t.Errorf("read bytes = %q; want %q", all, body) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + // Read and Write should fail. + if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 { + t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite) + } + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite) + } +} + +func TestPipeBreakWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + io.WriteString(p, "foo") + a := errors.New("test err") + p.BreakWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != "" { + t.Errorf("read bytes = %q; want empty string", all) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + if p.b != nil { + t.Errorf("buffer should be nil after BreakWithError") + } + // Write should succeed silently. + if n, err := p.Write([]byte("abc")); err != nil || n != 3 { + t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err) + } + if p.b != nil { + t.Errorf("buffer should be nil after Write") + } + // Read should fail. + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n) + } +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 0000000..39ed755 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2888 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if err := configureServer18(s, conf); err != nil { + return err + } + if err := configureServer19(s, conf); err != nil { + return err + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx contextContext + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return streamError(id, ErrCodeStreamClosed) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = requestWithContext(req, st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

    HTTP Error 431

    Request Header Field(s) Too Large

    ") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 7230 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/server_push_test.go b/vendor/golang.org/x/net/http2/server_push_test.go new file mode 100644 index 0000000..918fd30 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_push_test.go @@ -0,0 +1,521 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "sync" + "testing" + "time" +) + +func TestServer_Push_Success(t *testing.T) { + const ( + mainBody = "index page" + pushedBody = "pushed page" + userAgent = "testagent" + cookie = "testcookie" + ) + + var stURL string + checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error { + if got, want := r.Method, wantMethod; got != want { + return fmt.Errorf("promised Req.Method=%q, want %q", got, want) + } + if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) { + return fmt.Errorf("promised Req.Header=%q, want %q", got, want) + } + if got, want := "https://"+r.Host, stURL; got != want { + return fmt.Errorf("promised Req.Host=%q, want %q", got, want) + } + if r.Body == nil { + return fmt.Errorf("nil Body") + } + if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { + return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) + } + return nil + } + + errc := make(chan error, 3) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + // Push "/pushed?get" as a GET request, using an absolute URL. + opt := &http.PushOptions{ + Header: http.Header{ + "User-Agent": {userAgent}, + }, + } + if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?get: %v", err) + return + } + // Push "/pushed?head" as a HEAD request, using a path. + opt = &http.PushOptions{ + Method: "HEAD", + Header: http.Header{ + "User-Agent": {userAgent}, + "Cookie": {cookie}, + }, + } + if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?head: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(mainBody))) + w.WriteHeader(200) + io.WriteString(w, mainBody) + errc <- nil + + case "/pushed?get": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + if err := checkPromisedReq(r, "GET", wantH); err != nil { + errc <- fmt.Errorf("/pushed?get: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody))) + w.WriteHeader(200) + io.WriteString(w, pushedBody) + errc <- nil + + case "/pushed?head": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + wantH.Set("Cookie", cookie) + if err := checkPromisedReq(r, "HEAD", wantH); err != nil { + errc <- fmt.Errorf("/pushed?head: %v", err) + return + } + w.WriteHeader(204) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + stURL = st.ts.URL + + // Send one request, which should push two responses. + st.greet() + getSlash(st) + for k := 0; k < 3; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } + + checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error { + pp, ok := f.(*PushPromiseFrame) + if !ok { + return fmt.Errorf("got a %T; want *PushPromiseFrame", f) + } + if !pp.HeadersEnded() { + return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame") + } + if got, want := pp.PromiseID, promiseID; got != want { + return fmt.Errorf("got PromiseID %v; want %v", got, want) + } + gotH := st.decodeHeader(pp.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got promised headers %v; want %v", gotH, wantH) + } + return nil + } + checkHeaders := func(f Frame, wantH [][2]string) error { + hf, ok := f.(*HeadersFrame) + if !ok { + return fmt.Errorf("got a %T; want *HeadersFrame", f) + } + gotH := st.decodeHeader(hf.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got response headers %v; want %v", gotH, wantH) + } + return nil + } + checkData := func(f Frame, wantData string) error { + df, ok := f.(*DataFrame) + if !ok { + return fmt.Errorf("got a %T; want *DataFrame", f) + } + if gotData := string(df.Data()); gotData != wantData { + return fmt.Errorf("got response data %q; want %q", gotData, wantData) + } + return nil + } + + // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA + // Stream 2 has HEADERS + DATA + // Stream 4 has HEADERS + expected := map[uint32][]func(Frame) error{ + 1: { + func(f Frame) error { + return checkPushPromise(f, 2, [][2]string{ + {":method", "GET"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?get"}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkPushPromise(f, 4, [][2]string{ + {":method", "HEAD"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?head"}, + {"cookie", cookie}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(mainBody))}, + }) + }, + func(f Frame) error { + return checkData(f, mainBody) + }, + }, + 2: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(pushedBody))}, + }) + }, + func(f Frame) error { + return checkData(f, pushedBody) + }, + }, + 4: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "204"}, + }) + }, + }, + } + + consumed := map[uint32]int{} + for k := 0; len(expected) > 0; k++ { + f, err := st.readFrame() + if err != nil { + for id, left := range expected { + t.Errorf("stream %d: missing %d frames", id, len(left)) + } + t.Fatalf("readFrame %d: %v", k, err) + } + id := f.Header().StreamID + label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) + if len(expected[id]) == 0 { + t.Fatalf("%s: unexpected frame %#+v", label, f) + } + check := expected[id][0] + expected[id] = expected[id][1:] + if len(expected[id]) == 0 { + delete(expected, id) + } + if err := check(f); err != nil { + t.Fatalf("%s: %v", label, err) + } + consumed[id]++ + } +} + +func TestServer_Push_SuccessNoRace(t *testing.T) { + // Regression test for issue #18326. Ensure the request handler can mutate + // pushed request headers without racing with the PUSH_PROMISE write. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + opt := &http.PushOptions{ + Header: http.Header{"User-Agent": {"testagent"}}, + } + if err := w.(http.Pusher).Push("/pushed", opt); err != nil { + errc <- fmt.Errorf("error pushing: %v", err) + return + } + w.WriteHeader(200) + errc <- nil + + case "/pushed": + // Update request header, ensure there is no race. + r.Header.Set("User-Agent", "newagent") + r.Header.Set("Cookie", "cookie") + w.WriteHeader(200) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + + // Send one request, which should push one response. + st.greet() + getSlash(st) + for k := 0; k < 2; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestServer_Push_RejectRecursivePush(t *testing.T) { + // Expect two requests, but might get three if there's a bug and the second push succeeds. + errc := make(chan error, 3) + handler := func(w http.ResponseWriter, r *http.Request) error { + baseURL := "https://" + r.Host + switch r.URL.Path { + case "/": + if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil { + return fmt.Errorf("first Push()=%v, want nil", err) + } + return nil + + case "/push1": + if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + + default: + return fmt.Errorf("unexpected path: %q", r.URL.Path) + } + } + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + getSlash(st) + if err := <-errc; err != nil { + t.Errorf("First request failed: %v", err) + } + if err := <-errc; err != nil { + t.Errorf("Second request failed: %v", err) + } +} + +func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) { + // Expect one request, but might get two if there's a bug and the push succeeds. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- doPush(w.(http.Pusher), r) + }) + defer st.Close() + st.greet() + if err := st.fr.WriteSettings(settings...); err != nil { + st.t.Fatalf("WriteSettings: %v", err) + } + st.wantSettingsAck() + getSlash(st) + if err := <-errc; err != nil { + t.Error(err) + } + // Should not get a PUSH_PROMISE frame. + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("stream should end after headers") + } +} + +func TestServer_Push_RejectIfDisabled(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingEnablePush, 0}) +} + +func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingMaxConcurrentStreams, 0}) +} + +func TestServer_Push_RejectWrongScheme(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL is http)") + } + return nil + }) +} + +func TestServer_Push_RejectMissingHost(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https:pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL missing host)") + } + return nil + }) +} + +func TestServer_Push_RejectRelativePath(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("../test", nil); err == nil { + return errors.New("Push() should have failed (push target is a relative path)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenMethod(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil { + return errors.New("Push() should have failed (cannot promise a POST)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenHeader(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + header := http.Header{ + "Content-Length": {"10"}, + "Content-Encoding": {"gzip"}, + "Trailer": {"Foo"}, + "Te": {"trailers"}, + "Host": {"test.com"}, + ":authority": {"test.com"}, + } + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil { + return errors.New("Push() should have failed (forbidden headers)") + } + return nil + }) +} + +func TestServer_Push_StateTransitions(t *testing.T) { + const body = "foo" + + gotPromise := make(chan bool) + finishedPush := make(chan bool) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + if err := w.(http.Pusher).Push("/pushed", nil); err != nil { + t.Errorf("Push error: %v", err) + } + // Don't finish this request until the push finishes so we don't + // nondeterministically interleave output frames with the push. + <-finishedPush + case "/pushed": + <-gotPromise + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(body))) + w.WriteHeader(200) + io.WriteString(w, body) + }) + defer st.Close() + + st.greet() + if st.stream(2) != nil { + t.Fatal("stream 2 should be empty") + } + if got, want := st.streamState(2), stateIdle; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + getSlash(st) + // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. + st.wantPushPromise() + if got, want := st.streamState(2), stateHalfClosedRemote; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + // We stall the HTTP handler for "/pushed" until the above check. If we don't + // stall the handler, then the handler might write HEADERS and DATA and finish + // the stream before we check st.streamState(2) -- should that happen, we'll + // see stateClosed and fail the above check. + close(gotPromise) + st.wantHeaders() + if df := st.wantData(); !df.StreamEnded() { + t.Fatal("expected END_STREAM flag on DATA") + } + if got, want := st.streamState(2), stateClosed; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + close(finishedPush) +} + +func TestServer_Push_RejectAfterGoAway(t *testing.T) { + var readyOnce sync.Once + ready := make(chan struct{}) + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + select { + case <-ready: + case <-time.After(5 * time.Second): + errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed") + } + if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + errc <- fmt.Errorf("Push()=%v, want %v", got, want) + } + errc <- nil + }) + defer st.Close() + st.greet() + getSlash(st) + + // Send GOAWAY and wait for it to be processed. + st.fr.WriteGoAway(1, ErrCodeNo, nil) + go func() { + for { + select { + case <-ready: + return + default: + } + st.sc.serveMsgCh <- func(loopNum int) { + if !st.sc.pushEnabled { + readyOnce.Do(func() { close(ready) }) + } + } + } + }() + if err := <-errc; err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go new file mode 100644 index 0000000..c5d8459 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_test.go @@ -0,0 +1,3725 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") + +func stderrv() io.Writer { + if *stderrVerbose { + return os.Stderr + } + + return ioutil.Discard +} + +type serverTester struct { + cc net.Conn // client conn + t testing.TB + ts *httptest.Server + fr *Framer + serverLogBuf bytes.Buffer // logger for httptest.Server + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc + sc *serverConn + hpackDec *hpack.Decoder + decodedHeaders [][2]string + + // If http2debug!=2, then we capture Frame debug logs that will be written + // to t.Log after a test fails. The read and write logs use separate locks + // and buffers so we don't accidentally introduce synchronization between + // the read and write goroutines, which may hide data races. + frameReadLogMu sync.Mutex + frameReadLogBuf bytes.Buffer + frameWriteLogMu sync.Mutex + frameWriteLogBuf bytes.Buffer + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder +} + +func init() { + testHookOnPanicMu = new(sync.Mutex) + goAwayTimeout = 25 * time.Millisecond +} + +func resetHooks() { + testHookOnPanicMu.Lock() + testHookOnPanic = nil + testHookOnPanicMu.Unlock() +} + +type serverTesterOpt string + +var optOnlyServer = serverTesterOpt("only_server") +var optQuiet = serverTesterOpt("quiet_logging") +var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") + +func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { + resetHooks() + + ts := httptest.NewUnstartedServer(handler) + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{NextProtoTLS}, + } + + var onlyServer, quiet, framerReuseFrames bool + h2server := new(Server) + for _, opt := range opts { + switch v := opt.(type) { + case func(*tls.Config): + v(tlsConfig) + case func(*httptest.Server): + v(ts) + case func(*Server): + v(h2server) + case serverTesterOpt: + switch v { + case optOnlyServer: + onlyServer = true + case optQuiet: + quiet = true + case optFramerReuseFrames: + framerReuseFrames = true + } + case func(net.Conn, http.ConnState): + ts.Config.ConnState = v + default: + t.Fatalf("unknown newServerTester option type %T", v) + } + } + + ConfigureServer(ts.Config, h2server) + + st := &serverTester{ + t: t, + ts: ts, + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) + + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + if quiet { + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + } else { + ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) + } + ts.StartTLS() + + if VerboseLogs { + t.Logf("Running test server at: %s", ts.URL) + } + testHookGetServerConn = func(v *serverConn) { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc = v + } + log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) + if !onlyServer { + cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + st.cc = cc + st.fr = NewFramer(cc, cc) + if framerReuseFrames { + st.fr.SetReuseFrames() + } + if !logFrameReads && !logFrameWrites { + st.fr.debugReadLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameReadLogMu.Lock() + fmt.Fprintf(&st.frameReadLogBuf, m, v...) + st.frameReadLogMu.Unlock() + } + st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameWriteLogMu.Lock() + fmt.Fprintf(&st.frameWriteLogBuf, m, v...) + st.frameWriteLogMu.Unlock() + } + st.fr.logReads = true + st.fr.logWrites = true + } + } + return st +} + +func (st *serverTester) closeConn() { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc.conn.Close() +} + +func (st *serverTester) addLogFilter(phrase string) { + st.logFilter = append(st.logFilter, phrase) +} + +func (st *serverTester) stream(id uint32) *stream { + ch := make(chan *stream, 1) + st.sc.serveMsgCh <- func(int) { + ch <- st.sc.streams[id] + } + return <-ch +} + +func (st *serverTester) streamState(id uint32) streamState { + ch := make(chan streamState, 1) + st.sc.serveMsgCh <- func(int) { + state, _ := st.sc.state(id) + ch <- state + } + return <-ch +} + +// loopNum reports how many times this conn's select loop has gone around. +func (st *serverTester) loopNum() int { + lastc := make(chan int, 1) + st.sc.serveMsgCh <- func(loopNum int) { + lastc <- loopNum + } + return <-lastc +} + +// awaitIdle heuristically awaits for the server conn's select loop to be idle. +// The heuristic is that the server connection's serve loop must schedule +// 50 times in a row without any channel sends or receives occurring. +func (st *serverTester) awaitIdle() { + remain := 50 + last := st.loopNum() + for remain > 0 { + n := st.loopNum() + if n == last+1 { + remain-- + } else { + remain = 50 + } + last = n + } +} + +func (st *serverTester) Close() { + if st.t.Failed() { + st.frameReadLogMu.Lock() + if st.frameReadLogBuf.Len() > 0 { + st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String()) + } + st.frameReadLogMu.Unlock() + + st.frameWriteLogMu.Lock() + if st.frameWriteLogBuf.Len() > 0 { + st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String()) + } + st.frameWriteLogMu.Unlock() + + // If we failed already (and are likely in a Fatal, + // unwindowing), force close the connection, so the + // httptest.Server doesn't wait forever for the conn + // to close. + if st.cc != nil { + st.cc.Close() + } + } + st.ts.Close() + if st.cc != nil { + st.cc.Close() + } + log.SetOutput(os.Stderr) +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.greetAndCheckSettings(func(Setting) error { return nil }) +} + +func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { + st.writePreface() + st.writeInitialSettings() + st.wantSettings().ForeachSetting(checkSetting) + st.writeSettingsAck() + + // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. + var gotSettingsAck bool + var gotWindowUpdate bool + + for i := 0; i < 2; i++ { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *SettingsFrame: + if !f.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + gotSettingsAck = true + + case *WindowUpdateFrame: + if f.FrameHeader.StreamID != 0 { + st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID) + } + incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize) + if f.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) + } + gotWindowUpdate = true + + default: + st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f) + } + } + + if !gotSettingsAck { + st.t.Fatalf("Didn't get a settings ACK") + } + if !gotWindowUpdate { + st.t.Fatalf("Didn't get a window update") + } +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write(clientPreface) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(clientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) writeHeaders(p HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) writePriority(id uint32, p PriorityParam) { + if err := st.fr.WritePriority(id, p); err != nil { + st.t.Fatalf("Error writing PRIORITY: %v", err) + } +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeaderRaw is the magic-free version of encodeHeader. +// It takes 0 or more (k, v) pairs and encodes them. +func (st *serverTester) encodeHeaderRaw(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + st.headerBuf.Reset() + for len(headers) > 0 { + k, v := headers[0], headers[1] + st.encodeHeaderField(k, v) + headers = headers[2:] + } + return st.headerBuf.Bytes() +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. The :authority header +// defaults to st.ts.Listener.Addr(). +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + defaultAuthority := st.ts.Listener.Addr().String() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":scheme", ":authority", ":path"} + vals := map[string][]string{ + ":method": {"GET"}, + ":scheme": {"https"}, + ":authority": {defaultAuthority}, + ":path": {"/"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. +func (st *serverTester) bodylessReq1(headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) { + ch := make(chan interface{}, 1) + go func() { + fr, err := fr.ReadFrame() + if err != nil { + ch <- err + } else { + ch <- fr + } + }() + t := time.NewTimer(wait) + select { + case v := <-ch: + t.Stop() + if fr, ok := v.(Frame); ok { + return fr, nil + } + return nil, v.(error) + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +func (st *serverTester) readFrame() (Frame, error) { + return readFrameTimeout(st.fr, 2*time.Second) +} + +func (st *serverTester) wantHeaders() *HeadersFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) + } + hf, ok := f.(*HeadersFrame) + if !ok { + st.t.Fatalf("got a %T; want *HeadersFrame", f) + } + return hf +} + +func (st *serverTester) wantContinuation() *ContinuationFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) + } + cf, ok := f.(*ContinuationFrame) + if !ok { + st.t.Fatalf("got a %T; want *ContinuationFrame", f) + } + return cf +} + +func (st *serverTester) wantData() *DataFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a DATA frame: %v", err) + } + df, ok := f.(*DataFrame) + if !ok { + st.t.Fatalf("got a %T; want *DataFrame", f) + } + return df +} + +func (st *serverTester) wantSettings() *SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantPing() *PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a PING frame: %v", err) + } + pf, ok := f.(*PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *PingFrame", f) + } + return pf +} + +func (st *serverTester) wantGoAway() *GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) + } + gf, ok := f.(*GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *GoAwayFrame", f) + } + return gf +} + +func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) + } + rs, ok := f.(*RSTStreamFrame) + if !ok { + st.t.Fatalf("got a %T; want *RSTStreamFrame", f) + } + if rs.FrameHeader.StreamID != streamID { + st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) + } + if rs.ErrCode != errCode { + st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) + } +} + +func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) + } + wu, ok := f.(*WindowUpdateFrame) + if !ok { + st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) + } + if wu.FrameHeader.StreamID != streamID { + st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) + } + if wu.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) + } +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } +} + +func (st *serverTester) wantPushPromise() *PushPromiseFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + ppf, ok := f.(*PushPromiseFrame) + if !ok { + st.t.Fatalf("Wanted PushPromise, received %T", ppf) + } + return ppf +} + +func TestServer(t *testing.T) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + gotReq <- true + }) + defer st.Close() + + covers("3.5", ` + The server connection preface consists of a potentially empty + SETTINGS frame ([SETTINGS]) that MUST be the first frame the + server sends in the HTTP/2 connection. + `) + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func TestServer_Request_Get(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("foo-bar", "some-value"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "GET" { + t.Errorf("Method = %q; want GET", r.Method) + } + if r.URL.Path != "/" { + t.Errorf("URL.Path = %q; want /", r.URL.Path) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if r.Close { + t.Error("Close = true; want false") + } + if !strings.Contains(r.RemoteAddr, ":") { + t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) + } + if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { + t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) + } + wantHeader := http.Header{ + "Foo-Bar": []string{"some-value"}, + } + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Get_PathSlashes(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":path", "/%2f/"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.RequestURI != "/%2f/" { + t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) + } + if r.URL.Path != "///" { + t.Errorf("URL.Path = %q; want ///", r.URL.Path) + } + }) +} + +// TODO: add a test with EndStream=true on the HEADERS but setting a +// Content-Length anyway. Should we just omit it and force it to +// zero? + +func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { + testBodyContents(t, -1, "", func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, nil) // just kidding. empty body. + }) +} + +func TestServer_Request_Post_Body_OneData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_TwoData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, false, []byte(content[:5])) + st.writeData(1, true, []byte(content[5:])) + }) +} + +func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { + const content = "Some content" + testBodyContents(t, int64(len(content)), content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", strconv.Itoa(len(content)), + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { + testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "3", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12")) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { + testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "4", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12345")) + }) +} + +func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(all) != wantBody { + t.Errorf("Read = %q; want %q", all, wantBody) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err == nil { + t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", + wantReadError, all) + } + if !strings.Contains(err.Error(), wantReadError) { + t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +// Using a Host header, instead of :authority +func TestServer_Request_Get_Host(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", "", "host", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +// Using an :authority pseudo-header, instead of Host +func TestServer_Request_Get_Authority(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +func TestServer_Request_WithContinuation(t *testing.T) { + wantHeader := http.Header{ + "Foo-One": []string{"value-one"}, + "Foo-Two": []string{"value-two"}, + "Foo-Three": []string{"value-three"}, + } + testServerRequest(t, func(st *serverTester) { + fullHeaders := st.encodeHeader( + "foo-one", "value-one", + "foo-two", "value-two", + "foo-three", "value-three", + ) + remain := fullHeaders + chunks := 0 + for len(remain) > 0 { + const maxChunkSize = 5 + chunk := remain + if len(chunk) > maxChunkSize { + chunk = chunk[:maxChunkSize] + } + remain = remain[len(chunk):] + + if chunks == 0 { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: chunk, + EndStream: true, // no DATA frames + EndHeaders: false, // we'll have continuation frames + }) + } else { + err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) + if err != nil { + t.Fatal(err) + } + } + chunks++ + } + if chunks < 2 { + t.Fatal("too few chunks") + } + }, func(r *http.Request) { + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + }) +} + +// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") +func TestServer_Request_CookieConcat(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.bodylessReq1( + ":authority", host, + "cookie", "a=b", + "cookie", "c=d", + "cookie", "e=f", + ) + }, func(r *http.Request) { + const want = "a=b; c=d; e=f" + if got := r.Header.Get("Cookie"); got != want { + t.Errorf("Cookie = %q; want %q", got, want) + } + }) +} + +func TestServer_Request_Reject_CapitalHeader(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") }) +} + +func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") }) +} + +func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) +} + +func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid value" ... + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("duplicate pseudo-header") + st.bodylessReq1(":method", "GET", ":method", "POST") + }) +} + +func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All pseudo-header fields MUST appear in the header block + // before regular header fields. Any request or response that + // contains a pseudo-header field that appears in a header + // block after a regular header field MUST be treated as + // malformed (Section 8.1.2.6)." + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("pseudo-header after regular header") + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) + enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) + enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) + enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) +} + +func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) +} + +func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) + st.bodylessReq1(":unknown_thing", "") + }) +} + +func testRejectRequest(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }) + defer st.Close() + + st.greet() + send(st) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }, optQuiet) + defer st.Close() + + st.greet() + send(st) + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeProtocol { + t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol) + } +} + +// Section 5.1, on idle connections: "Receiving any frame other than +// HEADERS or PRIORITY on a stream in this state MUST be treated as a +// connection error (Section 5.4.1) of type PROTOCOL_ERROR." +func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteWindowUpdate(123, 456) + }) +} +func TestRejectFrameOnIdle_Data(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteData(123, true, nil) + }) +} +func TestRejectFrameOnIdle_RSTStream(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteRSTStream(123, ErrCodeCancel) + }) +} + +func TestServer_Request_Connect(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if g, w := r.Method, "CONNECT"; g != w { + t.Errorf("Method = %q; want %q", g, w) + } + if g, w := r.RequestURI, "example.com:123"; g != w { + t.Errorf("RequestURI = %q; want %q", g, w) + } + if g, w := r.URL.Host, "example.com:123"; g != w { + t.Errorf("URL.Host = %q; want %q", g, w) + } + }) +} + +func TestServer_Request_Connect_InvalidPath(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":path", "/bogus", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Connect_InvalidScheme(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":scheme", "https", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Ping(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Server should ignore this one, since it has ACK set. + ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + if err := st.fr.WritePing(true, ackPingData); err != nil { + t.Fatal(err) + } + + // But the server should reply to this one, since ACK is false. + pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} + if err := st.fr.WritePing(false, pingData); err != nil { + t.Fatal(err) + } + + pf := st.wantPing() + if !pf.Flags.Has(FlagPingAck) { + t.Error("response ping doesn't have ACK set") + } + if pf.Data != pingData { + t.Errorf("response ping has data %q; want %q", pf.Data, pingData) + } +} + +func TestServer_RejectsLargeFrames(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("see golang.org/issue/13434") + } + + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Write too large of a frame (too large by one byte) + // We ignore the return value because it's expected that the server + // will only read the first 9 bytes (the headre) and then disconnect. + st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) + + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFrameSize { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) + } + if st.serverLogBuf.Len() != 0 { + // Previously we spun here for a bit until the GOAWAY disconnect + // timer fired, logging while we fired. + t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes()) + } +} + +func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // data coming + EndHeaders: true, + }) + st.writeData(1, false, []byte("abcdef")) + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + st.writeData(1, true, []byte("ghijkl")) // END_STREAM here + puppet.do(readBodyHandler(t, "ghi")) + puppet.do(readBodyHandler(t, "jkl")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM +} + +// the version of the TestServer_Handler_Sends_WindowUpdate with padding. +// See golang.org/issue/16556 +func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0}) + + // Expect to immediately get our 5 bytes of padding back for + // both the connection and stream (4 bytes of padding + 1 byte of length) + st.wantWindowUpdate(0, 5) + st.wantWindowUpdate(1, 5) + + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) +} + +func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { + t.Fatal(err) + } + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFlowControl { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) + } + if gf.LastStreamID != 0 { + t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) + } +} + +func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { + inHandler := make(chan bool) + blockHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-blockHandler + }) + defer st.Close() + defer close(blockHandler) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + // Send a bogus window update: + if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { + t.Fatal(err) + } + st.wantRSTStream(1, ErrCodeFlowControl) +} + +// testServerPostUnblock sends a hanging POST with unsent data to handler, +// then runs fn once in the handler, and verifies that the error returned from +// handler is acceptable. It fails if takes over 5 seconds for handler to exit. +func testServerPostUnblock(t *testing.T, + handler func(http.ResponseWriter, *http.Request) error, + fn func(*serverTester), + checkErr func(error), + otherHeaders ...string) { + inHandler := make(chan bool) + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + fn(st) + select { + case err := <-errc: + if checkErr != nil { + checkErr(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Handler to return") + } +} + +func TestServer_RSTStream_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, + func(err error) { + want := StreamError{StreamID: 0x1, Code: 0x8} + if !reflect.DeepEqual(err, want) { + t.Errorf("Read error = %v; want %v", err, want) + } + }, + ) +} + +func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + // Run this test a bunch, because it doesn't always + // deadlock. But with a bunch, it did. + n := 50 + if testing.Short() { + n = 5 + } + for i := 0; i < n; i++ { + testServer_RSTStream_Unblocks_Header_Write(t) + } +} + +func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + inHandler := make(chan bool, 1) + unblockHandler := make(chan bool, 1) + headerWritten := make(chan bool, 1) + wroteRST := make(chan bool, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-wroteRST + w.Header().Set("foo", "bar") + w.WriteHeader(200) + w.(http.Flusher).Flush() + headerWritten <- true + <-unblockHandler + }) + defer st.Close() + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + wroteRST <- true + st.awaitIdle() + select { + case <-headerWritten: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for header write") + } + unblockHandler <- true +} + +func TestServer_DeadConn_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { st.cc.Close() }, + func(err error) { + if err == nil { + t.Error("unexpected nil error from Request.Body.Read") + } + }, + ) +} + +var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { + <-w.(http.CloseNotifier).CloseNotify() + return nil +} + +func TestServer_CloseNotify_After_RSTStream(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, nil) +} + +func TestServer_CloseNotify_After_ConnClose(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) +} + +// that CloseNotify unblocks after a stream error due to the client's +// problem that's unrelated to them explicitly canceling it (which is +// TestServer_CloseNotify_After_RSTStream above) +func TestServer_CloseNotify_After_StreamError(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + // data longer than declared Content-Length => stream error + st.writeData(1, true, []byte("1234")) + }, nil, "content-length", "3") +} + +func TestServer_StateTransitions(t *testing.T) { + var st *serverTester + inHandler := make(chan bool) + writeData := make(chan bool) + leaveHandler := make(chan bool) + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + if st.stream(1) == nil { + t.Errorf("nil stream 1 in handler") + } + if got, want := st.streamState(1), stateOpen; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + writeData <- true + if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { + t.Errorf("body read = %d, %v; want 0, EOF", n, err) + } + if got, want := st.streamState(1), stateHalfClosedRemote; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + + <-leaveHandler + }) + st.greet() + if st.stream(1) != nil { + t.Fatal("stream 1 should be empty") + } + if got := st.streamState(1); got != stateIdle { + t.Fatalf("stream 1 should be idle; got %v", got) + } + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + <-writeData + st.writeData(1, true, nil) + + leaveHandler <- true + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("expected END_STREAM flag") + } + + if got, want := st.streamState(1), stateClosed; got != want { + t.Errorf("at end, state is %v; want %v", got, want) + } + if st.stream(1) != nil { + t.Fatal("at end, stream 1 should be gone") + } +} + +// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + st.writeHeaders(HeadersFrameParam{ // Not a continuation. + StreamID: 3, // different stream. + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// test HEADERS w/o EndHeaders + PING (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WritePing(false, [8]byte{}); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) +func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID +func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// No HEADERS on stream 0. +func TestServer_Rejects_Headers0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 0, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// No CONTINUATION on stream 0. +func TestServer_Rejects_Continuation0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { + t.Fatal(err) + } + }) +} + +// No PRIORITY on stream 0. +func TestServer_Rejects_Priority0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(0, PriorityParam{StreamDep: 1}) + }) +} + +// No HEADERS frame with a self-dependence. +func TestServer_Rejects_HeadersSelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + Priority: PriorityParam{StreamDep: 1}, + }) + }) +} + +// No PRIORTY frame with a self-dependence. +func TestServer_Rejects_PrioritySelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(1, PriorityParam{StreamDep: 1}) + }) +} + +func TestServer_Rejects_PushPromise(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + pp := PushPromiseParam{ + StreamID: 1, + PromiseID: 3, + } + if err := st.fr.WritePushPromise(pp); err != nil { + t.Fatal(err) + } + }) +} + +// testServerRejectsConn tests that the server hangs up with a GOAWAY +// frame and a server close after the client does something +// deserving a CONNECTION_ERROR. +func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + st.addLogFilter("connection error: PROTOCOL_ERROR") + defer st.Close() + st.greet() + writeReq(st) + + st.wantGoAway() + errc := make(chan error, 1) + go func() { + fr, err := st.fr.ReadFrame() + if err == nil { + err = fmt.Errorf("got frame of type %T", fr) + } + errc <- err + }() + select { + case err := <-errc: + if err != io.EOF { + t.Errorf("ReadFrame = %v; want io.EOF", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for disconnect") + } +} + +// testServerRejectsStream tests that the server sends a RST_STREAM with the provided +// error code after a client sends a bogus request. +func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + st.greet() + writeReq(st) + st.wantRSTStream(1, code) +} + +// testServerRequest sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then verify that the +// *http.Request is built correctly in checkReq. +func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + checkReq(r) + gotReq <- true + }) + defer st.Close() + + st.greet() + writeReq(st) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func getSlash(st *serverTester) { st.bodylessReq1() } + +func TestServer_Response_NoData(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // Nothing. + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + }) +} + +func TestServer_Response_NoData_Header_FooBar(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Foo-Bar", "some-value") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo-bar", "some-value"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "foo/bar") + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "foo/bar"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_TransferEncoding_chunked(t *testing.T) { + const msg = "hi" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Transfer-Encoding", "chunked") // should be stripped + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed only after the initial write. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed before the initial write and later mutated. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("foo", "proper value") + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "proper value"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_SniffLenType(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { + const msg = "this is HTML" + const msg2 = ", and this is the next chunk" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg2) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + { + df := st.wantData() + if df.StreamEnded() { + t.Error("unexpected END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + } + { + df := st.wantData() + if !df.StreamEnded() { + t.Error("wanted END_STREAM flag on last data chunk") + } + if got := string(df.Data()); got != msg2 { + t.Errorf("got DATA %q; want %q", got, msg2) + } + } + }) +} + +func TestServer_Response_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + // Give the handler quota to write: + if err := st.fr.WriteWindowUpdate(1, size); err != nil { + t.Fatal(err) + } + // Give the handler quota to write to connection-level + // window as well + if err := st.fr.WriteWindowUpdate(0, size); err != nil { + t.Fatal(err) + } + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + var bytes, frames int + for { + df := st.wantData() + bytes += len(df.Data()) + frames++ + for _, b := range df.Data() { + if b != 'a' { + t.Fatal("non-'a' byte seen in DATA") + } + } + if df.StreamEnded() { + break + } + } + if bytes != size { + t.Errorf("Got %d bytes; want %d", bytes, size) + } + if want := int(size / maxFrameSize); frames < want || frames > want*2 { + t.Errorf("Got %d frames; want %d", frames, size) + } + }) +} + +// Test that the handler can't write more than the client allows +func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { + // Make these reads. Before each read, the client adds exactly enough + // flow-control to satisfy the read. Numbers chosen arbitrarily. + reads := []int{123, 1, 13, 127} + size := 0 + for _, n := range reads { + size += n + } + + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + // Set the window size to something explicit for this test. + // It's also how much initial data we expect. + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != reads[0] { + t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got) + } + + for _, quota := range reads[1:] { + if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { + t.Fatal(err) + } + df := st.wantData() + if int(quota) != len(df.Data()) { + t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) + } + } + }) +} + +// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. +func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + errc := make(chan error, 1) + go func() { + _, err := w.Write(bytes.Repeat([]byte("a"), size)) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + return errors.New("unexpected nil error from Write in handler") + } + return nil + case <-time.After(2 * time.Second): + return errors.New("timeout waiting for Write in handler") + } + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + // Nothing; send empty DATA + return nil + }, func(st *serverTester) { + // Handler gets no data quota: + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != 0 { + t.Fatalf("unexpected %d DATA bytes; want 0", got) + } + if !df.StreamEnded() { + t.Fatal("DATA didn't have END_STREAM") + } + }) +} + +func TestServer_Response_Automatic100Continue(t *testing.T) { + const msg = "foo" + const reply = "bar" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + if v := r.Header.Get("Expect"); v != "" { + t.Errorf("Expect header = %q; want empty", v) + } + buf := make([]byte, len(msg)) + // This read should trigger the 100-continue being sent. + if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { + return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) + } + _, err := io.WriteString(w, reply) + return err + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "100"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Fatalf("Got headers %v; want %v", goth, wanth) + } + + // Okay, they sent status 100, so we can send our + // gigantic and/or sensitive "foo" payload now. + st.writeData(1, true, []byte(msg)) + + st.wantWindowUpdate(0, uint32(len(msg))) + + hf = st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("expected data to follow") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth = st.decodeHeader(hf.HeaderBlockFragment()) + wanth = [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(reply))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + + df := st.wantData() + if string(df.Data()) != reply { + t.Errorf("Client read %q; want %q", df.Data(), reply) + } + if !df.StreamEnded() { + t.Errorf("expect data stream end") + } + }) +} + +func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { + errc := make(chan error, 1) + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + p := []byte("some data.\n") + for { + _, err := w.Write(p) + if err != nil { + errc <- err + return nil + } + } + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + // Close the connection and wait for the handler to (hopefully) notice. + st.cc.Close() + select { + case <-errc: + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_Too_Many_Streams(t *testing.T) { + const testPath = "/some/path" + + inHandler := make(chan uint32) + leaveHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + id := w.(*responseWriter).rws.stream.id + inHandler <- id + if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { + t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) + } + <-leaveHandler + }) + defer st.Close() + st.greet() + nextStreamID := uint32(1) + streamID := func() uint32 { + defer func() { nextStreamID += 2 }() + return nextStreamID + } + sendReq := func(id uint32, headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) + } + for i := 0; i < defaultMaxStreams; i++ { + sendReq(streamID()) + <-inHandler + } + defer func() { + for i := 0; i < defaultMaxStreams; i++ { + leaveHandler <- true + } + }() + + // And this one should cross the limit: + // (It's also sent as a CONTINUATION, to verify we still track the decoder context, + // even if we're rejecting it) + rejectID := streamID() + headerBlock := st.encodeHeader(":path", testPath) + frag1, frag2 := headerBlock[:3], headerBlock[3:] + st.writeHeaders(HeadersFrameParam{ + StreamID: rejectID, + BlockFragment: frag1, + EndStream: true, + EndHeaders: false, // CONTINUATION coming + }) + if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { + t.Fatal(err) + } + st.wantRSTStream(rejectID, ErrCodeProtocol) + + // But let a handler finish: + leaveHandler <- true + st.wantHeaders() + + // And now another stream should be able to start: + goodID := streamID() + sendReq(goodID, ":path", testPath) + select { + case got := <-inHandler: + if got != goodID { + t.Errorf("Got stream %d; want %d", got, goodID) + } + case <-time.After(3 * time.Second): + t.Error("timeout waiting for handler") + } +} + +// So many response headers that the server needs to use CONTINUATION frames: +func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + h := w.Header() + for i := 0; i < 5000; i++ { + h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) + } + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.HeadersEnded() { + t.Fatal("got unwanted END_HEADERS flag") + } + n := 0 + for { + n++ + cf := st.wantContinuation() + if cf.HeadersEnded() { + break + } + } + if n < 5 { + t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) + } + }) +} + +// This previously crashed (reported by Mathieu Lonjaret as observed +// while using Camlistore) because we got a DATA frame from the client +// after the handler exited and our logic at the time was wrong, +// keeping a stream in the map in stateClosed, which tickled an +// invariant check later when we tried to remove that stream (via +// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop +// ended. +func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // nothing + return nil + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, // DATA is coming + EndHeaders: true, + }) + hf := st.wantHeaders() + if !hf.HeadersEnded() || !hf.StreamEnded() { + t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) + } + + // Sent when the a Handler closes while a client has + // indicated it's still sending DATA: + st.wantRSTStream(1, ErrCodeNo) + + // Now the handler has ended, so it's ended its + // stream, but the client hasn't closed its side + // (stateClosedLocal). So send more data and verify + // it doesn't crash with an internal invariant panic, like + // it did before. + st.writeData(1, true, []byte("foo")) + + // Get our flow control bytes back, since the handler didn't get them. + st.wantWindowUpdate(0, uint32(len("foo"))) + + // Sent after a peer sends data anyway (admittedly the + // previous RST_STREAM might've still been in-flight), + // but they'll get the more friendly 'cancel' code + // first. + st.wantRSTStream(1, ErrCodeStreamClosed) + + // Set up a bunch of machinery to record the panic we saw + // previously. + var ( + panMu sync.Mutex + panicVal interface{} + ) + + testHookOnPanicMu.Lock() + testHookOnPanic = func(sc *serverConn, pv interface{}) bool { + panMu.Lock() + panicVal = pv + panMu.Unlock() + return true + } + testHookOnPanicMu.Unlock() + + // Now force the serve loop to end, via closing the connection. + st.cc.Close() + select { + case <-st.sc.doneServing: + // Loop has exited. + panMu.Lock() + got := panicVal + panMu.Unlock() + if got != nil { + t.Errorf("Got panic: %v", got) + } + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } +func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } + +func testRejectTLS(t *testing.T, max uint16) { + st := newServerTester(t, nil, func(c *tls.Config) { + c.MaxVersion = max + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Rejects_TLSBadCipher(t *testing.T) { + st := newServerTester(t, nil, func(c *tls.Config) { + // Only list bad ones: + c.CipherSuites = []uint16{ + tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + } + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Advertises_Common_Cipher(t *testing.T) { + const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + st := newServerTester(t, nil, func(c *tls.Config) { + // Have the client only support the one required by the spec. + c.CipherSuites = []uint16{requiredSuite} + }, func(ts *httptest.Server) { + var srv *http.Server = ts.Config + // Have the server configured with no specific cipher suites. + // This tests that Go's defaults include the required one. + srv.TLSConfig = nil + }) + defer st.Close() + st.greet() +} + +func (st *serverTester) onHeaderField(f hpack.HeaderField) { + if f.Name == "date" { + return + } + st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value}) +} + +func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { + st.decodedHeaders = nil + if _, err := st.hpackDec.Write(headerBlock); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + if err := st.hpackDec.Close(); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + return st.decodedHeaders +} + +// testServerResponse sets up an idle HTTP/2 connection. The client function should +// write a single request that must be handled by the handler. This waits up to 5s +// for client to return, then up to an additional 2s for the handler to return. +func testServerResponse(t testing.TB, + handler func(http.ResponseWriter, *http.Request) error, + client func(*serverTester), +) { + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + errc <- handler(w, r) + }) + defer st.Close() + + donec := make(chan bool) + go func() { + defer close(donec) + st.greet() + client(st) + }() + + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout in client") + } + + select { + case err := <-errc: + if err != nil { + t.Fatalf("Error in handler: %v", err) + } + case <-time.After(2 * time.Second): + t.Fatal("timeout in handler") + } +} + +// readBodyHandler returns an http Handler func that reads len(want) +// bytes from r.Body and fails t if the contents read were not +// the value of want. +func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + buf := make([]byte, len(want)) + _, err := io.ReadFull(r.Body, buf) + if err != nil { + t.Error(err) + return + } + if string(buf) != want { + t.Errorf("read %q; want %q", buf, want) + } + } +} + +// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: +// https://github.com/tatsuhiro-t/nghttp2/issues/140 & +// http://sourceforge.net/p/curl/bugs/1472/ +func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } +func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } + +func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + if testing.Short() { + t.Skip("skipping curl test in short mode") + } + requireCurl(t) + var gotConn int32 + testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } + + const msg = "Hello from curl!\n" + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + w.Header().Set("Client-Proto", r.Proto) + io.WriteString(w, msg) + })) + ConfigureServer(ts.Config, &Server{ + PermitProhibitedCipherSuites: permitProhibitedCipherSuites, + }) + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + ts.StartTLS() + defer ts.Close() + + t.Logf("Running test server for curl to hit at: %s", ts.URL) + container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) + defer kill(container) + resc := make(chan interface{}, 1) + go func() { + res, err := dockerLogs(container) + if err != nil { + resc <- err + } else { + resc <- res + } + }() + select { + case res := <-resc: + if err, ok := res.(error); ok { + t.Fatal(err) + } + body := string(res.([]byte)) + // Search for both "key: value" and "key:value", since curl changed their format + // Our Dockerfile contains the latest version (no space), but just in case people + // didn't rebuild, check both. + if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") { + t.Errorf("didn't see foo: Bar header") + t.Logf("Got: %s", body) + } + if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") { + t.Errorf("didn't see client-proto: HTTP/2 header") + t.Logf("Got: %s", res) + } + if !strings.Contains(string(res.([]byte)), msg) { + t.Errorf("didn't see %q content", msg) + t.Logf("Got: %s", res) + } + case <-time.After(3 * time.Second): + t.Errorf("timeout waiting for curl") + } + + if atomic.LoadInt32(&gotConn) == 0 { + t.Error("never saw an http2 connection") + } +} + +var doh2load = flag.Bool("h2load", false, "Run h2load test") + +func TestServerWithH2Load(t *testing.T) { + if !*doh2load { + t.Skip("Skipping without --h2load flag.") + } + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + requireH2load(t) + + msg := strings.Repeat("Hello, h2load!\n", 5000) + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg) + })) + ts.StartTLS() + defer ts.Close() + + cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl", + "-n100000", "-c100", "-m100", ts.URL) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + t.Fatal(err) + } +} + +// Issue 12843 +func TestServerDoS_MaxHeaderListSize(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + + // shake hands + frameSize := defaultMaxReadFrameSize + var advHeaderListSize *uint32 + st.greetAndCheckSettings(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + if s.Val < minMaxFrameSize { + frameSize = minMaxFrameSize + } else if s.Val > maxFrameSize { + frameSize = maxFrameSize + } else { + frameSize = int(s.Val) + } + case SettingMaxHeaderListSize: + advHeaderListSize = &s.Val + } + return nil + }) + + if advHeaderListSize == nil { + t.Errorf("server didn't advertise a max header list size") + } else if *advHeaderListSize == 0 { + t.Errorf("server advertised a max header list size of 0") + } + + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + cookie := strings.Repeat("*", 4058) + st.encodeHeaderField("cookie", cookie) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.headerBuf.Bytes(), + EndStream: true, + EndHeaders: false, + }) + + // Capture the short encoding of a duplicate ~4K cookie, now + // that we've already sent it once. + st.headerBuf.Reset() + st.encodeHeaderField("cookie", cookie) + + // Now send 1MB of it. + const size = 1 << 20 + b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len()) + for len(b) > 0 { + chunk := b + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + b = b[len(chunk):] + st.fr.WriteContinuation(1, len(b) == 0, chunk) + } + + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func TestCompressionErrorOnWrite(t *testing.T) { + const maxStrLen = 8 << 10 + var serverConfig *http.Server + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }, func(ts *httptest.Server) { + serverConfig = ts.Config + serverConfig.MaxHeaderBytes = maxStrLen + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + maxAllowed := st.sc.framer.maxHeaderStringLen() + + // Crank this up, now that we have a conn connected with the + // hpack.Decoder's max string length set has been initialized + // from the earlier low ~8K value. We want this higher so don't + // hit the max header list size. We only want to test hitting + // the max string size. + serverConfig.MaxHeaderBytes = 1 << 20 + + // First a request with a header that's exactly the max allowed size + // for the hpack compression. It's still too long for the header list + // size, so we'll get the 431 error, but that keeps the compression + // context still valid. + hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } + df := st.wantData() + if !strings.Contains(string(df.Data()), "HTTP Error 431") { + t.Errorf("Unexpected data body: %q", df.Data()) + } + if !df.StreamEnded() { + t.Fatalf("expect data stream end") + } + + // And now send one that's just one byte too big. + hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 3, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +func TestCompressionErrorOnClose(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + hbf := st.encodeHeader("foo", "bar") + hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails. + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +// test that a server handler can read trailers from a client +func TestServerReadsTrailers(t *testing.T) { + const testBody = "some test body" + writeReq := func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(1, false, []byte(testBody)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeaderRaw( + "foo", "foov", + "bar", "barv", + "baz", "bazv", + "surprise", "wasn't declared; shouldn't show up", + ), + EndStream: true, + EndHeaders: true, + }) + } + checkReq := func(r *http.Request) { + wantTrailer := http.Header{ + "Foo": nil, + "Bar": nil, + "Baz": nil, + } + if !reflect.DeepEqual(r.Trailer, wantTrailer) { + t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer) + } + slurp, err := ioutil.ReadAll(r.Body) + if string(slurp) != testBody { + t.Errorf("read body %q; want %q", slurp, testBody) + } + if err != nil { + t.Fatalf("Body slurp: %v", err) + } + wantTrailerAfter := http.Header{ + "Foo": {"foov"}, + "Bar": {"barv"}, + "Baz": {"bazv"}, + } + if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) { + t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter) + } + } + testServerRequest(t, writeReq, checkReq) +} + +// test that a server handler can send trailers +func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) } +func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) } + +func testServerWritesTrailers(t *testing.T, withFlush bool) { + // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") + w.Header().Add("Trailer", "Server-Trailer-C") + w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered + + // Regular headers: + w.Header().Set("Foo", "Bar") + w.Header().Set("Content-Length", "5") // len("Hello") + + io.WriteString(w, "Hello") + if withFlush { + w.(http.Flusher).Flush() + } + w.Header().Set("Server-Trailer-A", "valuea") + w.Header().Set("Server-Trailer-C", "valuec") // skipping B + // After a flush, random keys like Server-Surprise shouldn't show up: + w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!") + // But we do permit promoting keys to trailers after a + // flush if they start with the magic + // otherwise-invalid "Trailer:" prefix: + w.Header().Set("Trailer:Post-Header-Trailer", "hi1") + w.Header().Set("Trailer:post-header-trailer2", "hi2") + w.Header().Set("Trailer:Range", "invalid") + w.Header().Set("Trailer:Foo\x01Bogus", "invalid") + w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 7230 4.1.2") + w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 7230 4.1.2") + w.Header().Set("Trailer", "should not be included; Forbidden by RFC 7230 4.1.2") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("response HEADERS had END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "Bar"}, + {"trailer", "Server-Trailer-A, Server-Trailer-B"}, + {"trailer", "Server-Trailer-C"}, + {"trailer", "Transfer-Encoding, Content-Length, Trailer"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "5"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + df := st.wantData() + if string(df.Data()) != "Hello" { + t.Fatalf("Client read %q; want Hello", df.Data()) + } + if df.StreamEnded() { + t.Fatalf("data frame had STREAM_ENDED") + } + tf := st.wantHeaders() // for the trailers + if !tf.StreamEnded() { + t.Fatalf("trailers HEADERS lacked END_STREAM") + } + if !tf.HeadersEnded() { + t.Fatalf("trailers HEADERS lacked END_HEADERS") + } + wanth = [][2]string{ + {"post-header-trailer", "hi1"}, + {"post-header-trailer2", "hi2"}, + {"server-trailer-a", "valuea"}, + {"server-trailer-c", "valuec"}, + } + goth = st.decodeHeader(tf.HeaderBlockFragment()) + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +// validate transmitted header field names & values +// golang.org/issue/14048 +func TestServerDoesntWriteInvalidHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Add("OK1", "x") + w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key + w.Header().Add("Bad1\x00", "x") // null in key + w.Header().Add("Bad2", "x\x00y") // null in value + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("response HEADERS lacked END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"ok1", "x"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +func BenchmarkServerGets(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +func BenchmarkServerPosts(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(id, true, nil) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +// Send a stream of messages from server to client in separate data frames. +// Brings up performance issues seen in long streams. +// Created to show problem in go issue #18502 +func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) { + benchmarkServerToClientStream(b) +} + +// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8 +// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer. +func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { + benchmarkServerToClientStream(b, optFramerReuseFrames) +} + +func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msgLen = 1 + // default window size + const windowSize = 1<<16 - 1 + + // next message to send from the server and for the client to expect + nextMsg := func(i int) []byte { + msg := make([]byte, msgLen) + msg[0] = byte(i) + if len(msg) != msgLen { + panic("invalid test setup msg length") + } + return msg + } + + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + for i := 0; i < b.N; i += 1 { + w.Write(nextMsg(i)) + w.(http.Flusher).Flush() + } + }, newServerOpts...) + defer st.Close() + st.greet() + + const id = uint32(1) + + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + + st.writeData(id, true, nil) + st.wantHeaders() + + var pendingWindowUpdate = uint32(0) + + for i := 0; i < b.N; i += 1 { + expected := nextMsg(i) + df := st.wantData() + if bytes.Compare(expected, df.data) != 0 { + b.Fatalf("Bad message received; want %v; got %v", expected, df.data) + } + // try to send infrequent but large window updates so they don't overwhelm the test + pendingWindowUpdate += uint32(len(df.data)) + if pendingWindowUpdate >= windowSize/2 { + if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + pendingWindowUpdate = 0 + } + } + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } +} + +// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 +// Verify we don't hang. +func TestIssue53(t *testing.T) { + const data = "PRI * HTTP/2.0\r\n\r\nSM" + + "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad" + s := &http.Server{ + ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags), + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("hello")) + }), + } + s2 := &Server{ + MaxReadFrameSize: 1 << 16, + PermitProhibitedCipherSuites: true, + } + c := &issue53Conn{[]byte(data), false, false} + s2.ServeConn(c, &ServeConnOpts{BaseConfig: s}) + if !c.closed { + t.Fatal("connection is not closed") + } +} + +type issue53Conn struct { + data []byte + closed bool + written bool +} + +func (c *issue53Conn) Read(b []byte) (n int, err error) { + if len(c.data) == 0 { + return 0, io.EOF + } + n = copy(b, c.data) + c.data = c.data[n:] + return +} + +func (c *issue53Conn) Write(b []byte) (n int, err error) { + c.written = true + return len(b), nil +} + +func (c *issue53Conn) Close() error { + c.closed = true + return nil +} + +func (c *issue53Conn) LocalAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) SetDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } + +// golang.org/issue/12895 +func TestConfigureServer(t *testing.T) { + tests := []struct { + name string + tlsConfig *tls.Config + wantErr string + }{ + { + name: "empty server", + }, + { + name: "just the required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "just the alternative required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "missing required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, + }, + wantErr: "is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.", + }, + { + name: "required after bad", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after", + }, + { + name: "bad after required", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, + }, + }, + } + for _, tt := range tests { + srv := &http.Server{TLSConfig: tt.tlsConfig} + err := ConfigureServer(srv, nil) + if (err != nil) != (tt.wantErr != "") { + if tt.wantErr != "" { + t.Errorf("%s: success, but want error", tt.name) + } else { + t.Errorf("%s: unexpected error: %v", tt.name, err) + } + } + if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr) + } + if err == nil && !srv.TLSConfig.PreferServerCipherSuites { + t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name) + } + } +} + +func TestServerRejectHeadWithBody(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: false, // what we're testing, a bogus HEAD request with body + EndHeaders: true, + }) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func TestServerNoAutoContentLengthOnHead(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. (or smaller than one frame) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +// golang.org/issue/13495 +func TestServerNoDuplicateContentType(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header()["Content-Type"] = []string{""} + fmt.Fprintf(w, "hi") + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + {"content-type", ""}, + {"content-length", "41"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func disableGoroutineTracking() (restore func()) { + old := DebugGoroutines + DebugGoroutines = false + return func() { DebugGoroutines = old } +} + +func BenchmarkServer_GetRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + + st.greet() + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "GET") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + st.wantData() + } +} + +func BenchmarkServer_PostRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "POST") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: false, + EndHeaders: true, + }) + st.writeData(streamID, true, nil) + st.wantHeaders() + st.wantData() + } +} + +type connStateConn struct { + net.Conn + cs tls.ConnectionState +} + +func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs } + +// golang.org/issue/12737 -- handle any net.Conn, not just +// *tls.Conn. +func TestServerHandleCustomConn(t *testing.T) { + var s Server + c1, c2 := net.Pipe() + clientDone := make(chan struct{}) + handlerDone := make(chan struct{}) + var req *http.Request + go func() { + defer close(clientDone) + defer c2.Close() + fr := NewFramer(c2, c2) + io.WriteString(c2, ClientPreface) + fr.WriteSettings() + fr.WriteSettingsAck() + f, err := fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() { + t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f)) + return + } + f, err = fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() { + t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f)) + return + } + var henc hpackEncoder + fr.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"), + EndStream: true, + EndHeaders: true, + }) + go io.Copy(ioutil.Discard, c2) + <-handlerDone + }() + const testString = "my custom ConnectionState" + fakeConnState := tls.ConnectionState{ + ServerName: testString, + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } + go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{ + BaseConfig: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(handlerDone) + req = r + }), + }}) + select { + case <-clientDone: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for handler") + } + if req.TLS == nil { + t.Fatalf("Request.TLS is nil. Got: %#v", req) + } + if req.TLS.ServerName != testString { + t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString) + } +} + +// golang.org/issue/14214 +func TestServer_Rejects_ConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("should not get to Handler") + }) + defer st.Close() + st.greet() + st.bodylessReq1("connection", "foo") + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "400"}, + {"content-type", "text/plain; charset=utf-8"}, + {"x-content-type-options", "nosniff"}, + {"content-length", "51"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } +} + +type hpackEncoder struct { + enc *hpack.Encoder + buf bytes.Buffer +} + +func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + he.buf.Reset() + if he.enc == nil { + he.enc = hpack.NewEncoder(&he.buf) + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + headers = headers[2:] + } + return he.buf.Bytes() +} + +func TestCheckValidHTTP2Request(t *testing.T) { + tests := []struct { + h http.Header + want error + }{ + { + h: http.Header{"Te": {"trailers"}}, + want: nil, + }, + { + h: http.Header{"Te": {"trailers", "bogus"}}, + want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), + }, + { + h: http.Header{"Foo": {""}}, + want: nil, + }, + { + h: http.Header{"Connection": {""}}, + want: errors.New(`request header "Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Proxy-Connection": {""}}, + want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Keep-Alive": {""}}, + want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), + }, + { + h: http.Header{"Upgrade": {""}}, + want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), + }, + } + for i, tt := range tests { + got := checkValidHTTP2RequestHeaders(tt.h) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) + } + } +} + +// golang.org/issue/14030 +func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { + const msg = "Hello" + const msg2 = "World" + + doRead := make(chan bool, 1) + defer close(doRead) // fallback cleanup + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + + // Do a read, which might force a 100-continue status to be sent. + <-doRead + r.Body.Read(make([]byte, 10)) + + io.WriteString(w, msg2) + + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) + req.Header.Set("Expect", "100-continue") + + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + buf := make([]byte, len(msg)) + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg { + t.Fatalf("msg = %q; want %q", buf, msg) + } + + doRead <- true + + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg2 { + t.Fatalf("second msg = %q; want %q", buf, msg2) + } +} + +type funcReader func([]byte) (n int, err error) + +func (f funcReader) Read(p []byte) (n int, err error) { return f(p) } + +// golang.org/issue/16481 -- return flow control when streams close with unread data. +// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport) +func TestUnreadFlowControlReturned_Server(t *testing.T) { + unblock := make(chan bool, 1) + defer close(unblock) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Don't read the 16KB request body. Wait until the client's + // done sending it and then return. This should cause the Server + // to then return those 16KB of flow control to the client. + <-unblock + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // This previously hung on the 4th iteration. + for i := 0; i < 6; i++ { + body := io.MultiReader( + io.LimitReader(neverEnding('A'), 16<<10), + funcReader(func([]byte) (n int, err error) { + unblock <- true + return 0, io.EOF + }), + ) + req, _ := http.NewRequest("POST", st.ts.URL, body) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } + +} + +func TestServerIdleTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(h2s *Server) { + h2s.IdleTimeout = 500 * time.Millisecond + }) + defer st.Close() + + st.greet() + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +func TestServerIdleTimeout_AfterRequest(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + const timeout = 250 * time.Millisecond + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout * 2) + }, func(h2s *Server) { + h2s.IdleTimeout = timeout + }) + defer st.Close() + + st.greet() + + // Send a request which takes twice the timeout. Verifies the + // idle timeout doesn't fire while we're in a request: + st.bodylessReq1() + st.wantHeaders() + + // But the idle timeout should be rearmed after the request + // is done: + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +// grpc-go closes the Request.Body currently with a Read. +// Verify that it doesn't race. +// See https://github.com/grpc/grpc-go/pull/938 +func TestRequestBodyReadCloseRace(t *testing.T) { + for i := 0; i < 100; i++ { + body := &requestBody{ + pipe: &pipe{ + b: new(bytes.Buffer), + }, + } + body.pipe.CloseWithError(io.EOF) + + done := make(chan bool, 1) + buf := make([]byte, 10) + go func() { + time.Sleep(1 * time.Millisecond) + body.Close() + done <- true + }() + body.Read(buf) + <-done + } +} + +func TestIssue20704Race(t *testing.T) { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + t.Skip("skipping in short mode") + } + const ( + itemSize = 1 << 10 + itemCount = 100 + ) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < itemCount; i++ { + _, err := w.Write(make([]byte, itemSize)) + if err != nil { + return + } + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cl := &http.Client{Transport: tr} + + for i := 0; i < 1000; i++ { + resp, err := cl.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + // Force a RST stream to the server by closing without + // reading the body: + resp.Body.Close() + } +} diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml new file mode 100644 index 0000000..31a84be --- /dev/null +++ b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml @@ -0,0 +1,5021 @@ + + + + + + + + + + + + + + + + + + + Hypertext Transfer Protocol version 2 + + + Twist +
    + mbelshe@chromium.org +
    +
    + + + Google, Inc +
    + fenix@google.com +
    +
    + + + Mozilla +
    + + 331 E Evelyn Street + Mountain View + CA + 94041 + US + + martin.thomson@gmail.com +
    +
    + + + Applications + HTTPbis + HTTP + SPDY + Web + + + + This specification describes an optimized expression of the semantics of the Hypertext + Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a + reduced perception of latency by introducing header field compression and allowing multiple + concurrent messages on the same connection. It also introduces unsolicited push of + representations from servers to clients. + + + This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. + HTTP's existing semantics remain unchanged. + + + + + + Discussion of this draft takes place on the HTTPBIS working group mailing list + (ietf-http-wg@w3.org), which is archived at . + + + Working Group information can be found at ; that specific to HTTP/2 are at . + + + The changes in this draft are summarized in . + + + +
    + + +
    + + + The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the + HTTP/1.1 message format () has + several characteristics that have a negative overall effect on application performance + today. + + + In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given + TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed + request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 + clients that need to make many requests typically use multiple connections to a server in + order to achieve concurrency and thereby reduce latency. + + + Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary + network traffic, as well as causing the initial TCP congestion + window to quickly fill. This can result in excessive latency when multiple requests are + made on a new TCP connection. + + + HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an + underlying connection. Specifically, it allows interleaving of request and response + messages on the same connection and uses an efficient coding for HTTP header fields. It + also allows prioritization of requests, letting more important requests complete more + quickly, further improving performance. + + + The resulting protocol is more friendly to the network, because fewer TCP connections can + be used in comparison to HTTP/1.x. This means less competition with other flows, and + longer-lived connections, which in turn leads to better utilization of available network + capacity. + + + Finally, HTTP/2 also enables more efficient processing of messages through use of binary + message framing. + +
    + +
    + + HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core + features of HTTP/1.1, but aims to be more efficient in several ways. + + + The basic protocol unit in HTTP/2 is a frame. Each frame + type serves a different purpose. For example, HEADERS and + DATA frames form the basis of HTTP requests and + responses; other frame types like SETTINGS, + WINDOW_UPDATE, and PUSH_PROMISE are used in support of other + HTTP/2 features. + + + Multiplexing of requests is achieved by having each HTTP request-response exchange + associated with its own stream. Streams are largely + independent of each other, so a blocked or stalled request or response does not prevent + progress on other streams. + + + Flow control and prioritization ensure that it is possible to efficiently use multiplexed + streams. Flow control helps to ensure that only data that + can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed + to the most important streams first. + + + HTTP/2 adds a new interaction mode, whereby a server can push + responses to a client. Server push allows a server to speculatively send a client + data that the server anticipates the client will need, trading off some network usage + against a potential latency gain. The server does this by synthesizing a request, which it + sends as a PUSH_PROMISE frame. The server is then able to send a response to + the synthetic request on a separate stream. + + + Frames that contain HTTP header fields are compressed. + HTTP requests can be highly redundant, so compression can reduce the size of requests and + responses significantly. + + +
    + + The HTTP/2 specification is split into four parts: + + + Starting HTTP/2 covers how an HTTP/2 connection is + initiated. + + + The framing and streams layers describe the way HTTP/2 frames are + structured and formed into multiplexed streams. + + + Frame and error + definitions include details of the frame and error types used in HTTP/2. + + + HTTP mappings and additional + requirements describe how HTTP semantics are expressed using frames and + streams. + + + + + While some of the frame and stream layer concepts are isolated from HTTP, this + specification does not define a completely generic framing layer. The framing and streams + layers are tailored to the needs of the HTTP protocol and server push. + +
    + +
    + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD + NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as + described in RFC 2119. + + + All numeric values are in network byte order. Values are unsigned unless otherwise + indicated. Literal values are provided in decimal or hexadecimal as appropriate. + Hexadecimal literals are prefixed with 0x to distinguish them + from decimal literals. + + + The following terms are used: + + + The endpoint initiating the HTTP/2 connection. + + + A transport-layer connection between two endpoints. + + + An error that affects the entire HTTP/2 connection. + + + Either the client or server of the connection. + + + The smallest unit of communication within an HTTP/2 connection, consisting of a header + and a variable-length sequence of octets structured according to the frame type. + + + An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint + that is remote to the primary subject of discussion. + + + An endpoint that is receiving frames. + + + An endpoint that is transmitting frames. + + + The endpoint which did not initiate the HTTP/2 connection. + + + A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. + + + An error on the individual HTTP/2 stream. + + + + + Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined + in . + +
    +
    + +
    + + An HTTP/2 connection is an application layer protocol running on top of a TCP connection + (). The client is the TCP connection initiator. + + + HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same + default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, + implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the + upstream server (the immediate peer to which the client wishes to establish a connection) + supports HTTP/2. + + + + The means by which support for HTTP/2 is determined is different for "http" and "https" + URIs. Discovery for "http" URIs is described in . Discovery + for "https" URIs is described in . + + +
    + + The protocol defined in this document has two identifiers. + + + + The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) + field and any place that HTTP/2 over TLS is identified. + + + The "h2" string is serialized into an ALPN protocol identifier as the two octet + sequence: 0x68, 0x32. + + + + + The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. + This identifier is used in the HTTP/1.1 Upgrade header field and any place that + HTTP/2 over TCP is identified. + + + + + + Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message + semantics described in this document. + + + RFC Editor's Note: please remove the remainder of this section prior to the + publication of a final version of this document. + + + Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". + Until such an RFC exists, implementations MUST NOT identify themselves using these + strings. + + + Examples and text throughout the rest of this document use "h2" as a matter of + editorial convenience only. Implementations of draft versions MUST NOT identify using + this string. + + + Implementations of draft versions of the protocol MUST add the string "-" and the + corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 + over TLS is identified using the string "h2-11". + + + Non-compatible experiments that are based on these draft versions MUST append the string + "-" and an experiment name to the identifier. For example, an experimental implementation + of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself + as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in + . Experimenters are + encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. + +
    + +
    + + A client that makes a request for an "http" URI without prior knowledge about support for + HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade + header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include + exactly one HTTP2-Settings header field. + +
    + For example: + + +]]> +
    + + Requests that contain an entity body MUST be sent in their entirety before the client can + send HTTP/2 frames. This means that a large request entity can block the use of the + connection until it is completely sent. + + + If concurrency of an initial request with subsequent requests is important, an OPTIONS + request can be used to perform the upgrade to HTTP/2, at the cost of an additional + round-trip. + + + A server that does not support HTTP/2 can respond to the request as though the Upgrade + header field were absent: + +
    + +HTTP/1.1 200 OK +Content-Length: 243 +Content-Type: text/html + +... + +
    + + A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with + "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . + + + A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) + response. After the empty line that terminates the 101 response, the server can begin + sending HTTP/2 frames. These frames MUST include a response to the request that initiated + the Upgrade. + + +
    + + For example: + + +HTTP/1.1 101 Switching Protocols +Connection: Upgrade +Upgrade: h2c + +[ HTTP/2 connection ... + +
    + + The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a + SETTINGS frame. + + + The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is + assigned default priority values. Stream 1 is + implicitly half closed from the client toward the server, since the request is completed + as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the + response. + + +
    + + A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field + that includes parameters that govern the HTTP/2 connection, provided in anticipation of + the server accepting the request to upgrade. + +
    + +
    + + A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, + or if more than one is present. A server MUST NOT send this header field. + + + + The content of the HTTP2-Settings header field is the + payload of a SETTINGS frame (), encoded as a + base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The + ABNF production for token68 is + defined in . + + + Since the upgrade is only intended to apply to the immediate connection, a client + sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded + downstream. + + + A server decodes and interprets these values as it would any other + SETTINGS frame. Acknowledgement of the + SETTINGS parameters is not necessary, since a 101 response serves as implicit + acknowledgment. Providing these values in the Upgrade request gives a client an + opportunity to provide parameters prior to receiving any frames from the server. + +
    +
    + +
    + + A client that makes a request to an "https" URI uses TLS + with the application layer protocol negotiation extension. + + + HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a + client or selected by a server. + + + Once TLS negotiation is complete, both the client and the server send a connection preface. + +
    + +
    + + A client can learn that a particular server supports HTTP/2 by other means. For example, + describes a mechanism for advertising this capability. + + + A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, + after the connection preface; a server can + identify such a connection by the presence of the connection preface. This only affects + the establishment of HTTP/2 connections over cleartext TCP; implementations that support + HTTP/2 over TLS MUST use protocol negotiation in TLS. + + + Without additional information, prior support for HTTP/2 is not a strong signal that a + given server will support HTTP/2 for future connections. For example, it is possible for + server configurations to change, for configurations to differ between instances in + clustered servers, or for network conditions to change. + +
    + +
    + + Upon establishment of a TCP connection and determination that HTTP/2 will be used by both + peers, each endpoint MUST send a connection preface as a final confirmation and to + establish the initial SETTINGS parameters for the HTTP/2 connection. The client and + server each send a different connection preface. + + + The client connection preface starts with a sequence of 24 octets, which in hex notation + are: + +
    + +
    + + (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence + is followed by a SETTINGS frame (). The + SETTINGS frame MAY be empty. The client sends the client connection + preface immediately upon receipt of a 101 Switching Protocols response (indicating a + successful upgrade), or as the first application data octets of a TLS connection. If + starting an HTTP/2 connection with prior knowledge of server support for the protocol, the + client connection preface is sent upon connection establishment. + + + + + The client connection preface is selected so that a large proportion of HTTP/1.1 or + HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note + that this does not address the concerns raised in . + + + + + The server connection preface consists of a potentially empty SETTINGS + frame () that MUST be the first frame the server sends in the + HTTP/2 connection. + + + The SETTINGS frames received from a peer as part of the connection preface + MUST be acknowledged (see ) after sending the connection + preface. + + + To avoid unnecessary latency, clients are permitted to send additional frames to the + server immediately after sending the client connection preface, without waiting to receive + the server connection preface. It is important to note, however, that the server + connection preface SETTINGS frame might include parameters that necessarily + alter how a client is expected to communicate with the server. Upon receiving the + SETTINGS frame, the client is expected to honor any parameters established. + In some configurations, it is possible for the server to transmit SETTINGS + before the client sends additional frames, providing an opportunity to avoid this issue. + + + Clients and servers MUST treat an invalid connection preface as a connection error of type + PROTOCOL_ERROR. A GOAWAY frame () + MAY be omitted in this case, since an invalid preface indicates that the peer is not using + HTTP/2. + +
    +
    + +
    + + Once the HTTP/2 connection is established, endpoints can begin exchanging frames. + + +
    + + All frames begin with a fixed 9-octet header followed by a variable-length payload. + +
    + +
    + + The fields of the frame header are defined as: + + + + The length of the frame payload expressed as an unsigned 24-bit integer. Values + greater than 214 (16,384) MUST NOT be sent unless the receiver has + set a larger value for SETTINGS_MAX_FRAME_SIZE. + + + The 9 octets of the frame header are not included in this value. + + + + + The 8-bit type of the frame. The frame type determines the format and semantics of + the frame. Implementations MUST ignore and discard any frame that has a type that + is unknown. + + + + + An 8-bit field reserved for frame-type specific boolean flags. + + + Flags are assigned semantics specific to the indicated frame type. Flags that have + no defined semantics for a particular frame type MUST be ignored, and MUST be left + unset (0) when sending. + + + + + A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST + remain unset (0) when sending and MUST be ignored when receiving. + + + + + A 31-bit stream identifier (see ). The value 0 is + reserved for frames that are associated with the connection as a whole as opposed to + an individual stream. + + + + + + The structure and content of the frame payload is dependent entirely on the frame type. + +
    + +
    + + The size of a frame payload is limited by the maximum size that a receiver advertises in + the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value + between 214 (16,384) and 224-1 (16,777,215) octets, + inclusive. + + + All implementations MUST be capable of receiving and minimally processing frames up to + 214 octets in length, plus the 9 octet frame + header. The size of the frame header is not included when describing frame sizes. + + + Certain frame types, such as PING, impose additional limits + on the amount of payload data allowed. + + + + + If a frame size exceeds any defined limit, or is too small to contain mandatory frame + data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error + in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying + a header block (that is, HEADERS, + PUSH_PROMISE, and CONTINUATION), SETTINGS, + and any WINDOW_UPDATE frame with a stream identifier of 0. + + + Endpoints are not obligated to use all available space in a frame. Responsiveness can be + improved by using frames that are smaller than the permitted maximum size. Sending large + frames can result in delays in sending time-sensitive frames (such + RST_STREAM, WINDOW_UPDATE, or PRIORITY) + which if blocked by the transmission of a large frame, could affect performance. + +
    + +
    + + Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. + They are used within HTTP request and response messages as well as server push operations + (see ). + + + Header lists are collections of zero or more header fields. When transmitted over a + connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then + divided into one or more octet sequences, called header block fragments, and transmitted + within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. + + + The Cookie header field is treated specially by the HTTP + mapping (see ). + + + A receiving endpoint reassembles the header block by concatenating its fragments, then + decompresses the block to reconstruct the header list. + + + A complete header block consists of either: + + + a single HEADERS or PUSH_PROMISE frame, + with the END_HEADERS flag set, or + + + a HEADERS or PUSH_PROMISE frame with the END_HEADERS + flag cleared and one or more CONTINUATION frames, + where the last CONTINUATION frame has the END_HEADERS flag set. + + + + + Header compression is stateful. One compression context and one decompression context is + used for the entire connection. Each header block is processed as a discrete unit. + Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved + frames of any other type or from any other stream. The last frame in a sequence of + HEADERS or CONTINUATION frames MUST have the END_HEADERS + flag set. The last frame in a sequence of PUSH_PROMISE or + CONTINUATION frames MUST have the END_HEADERS flag set. This allows a + header block to be logically equivalent to a single frame. + + + Header block fragments can only be sent as the payload of HEADERS, + PUSH_PROMISE or CONTINUATION frames, because these frames + carry data that can modify the compression context maintained by a receiver. An endpoint + receiving HEADERS, PUSH_PROMISE or + CONTINUATION frames MUST reassemble header blocks and perform decompression + even if the frames are to be discarded. A receiver MUST terminate the connection with a + connection error of type + COMPRESSION_ERROR if it does not decompress a header block. + +
    +
    + +
    + + A "stream" is an independent, bi-directional sequence of frames exchanged between the client + and server within an HTTP/2 connection. Streams have several important characteristics: + + + A single HTTP/2 connection can contain multiple concurrently open streams, with either + endpoint interleaving frames from multiple streams. + + + Streams can be established and used unilaterally or shared by either the client or + server. + + + Streams can be closed by either endpoint. + + + The order in which frames are sent on a stream is significant. Recipients process frames + in the order they are received. In particular, the order of HEADERS, + and DATA frames is semantically significant. + + + Streams are identified by an integer. Stream identifiers are assigned to streams by the + endpoint initiating the stream. + + + + +
    + + The lifecycle of a stream is shown in . + + +
    + + | |<-----------' | + | R | closed | R | + `-------------------->| |<--------------------' + +--------+ + + H: HEADERS frame (with implied CONTINUATIONs) + PP: PUSH_PROMISE frame (with implied CONTINUATIONs) + ES: END_STREAM flag + R: RST_STREAM frame +]]> + +
    + + + Note that this diagram shows stream state transitions and the frames and flags that affect + those transitions only. In this regard, CONTINUATION frames do not result + in state transitions; they are effectively part of the HEADERS or + PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is + processed as a separate event to the frame that bears it; a HEADERS frame + with the END_STREAM flag set can cause two state transitions. + + + Both endpoints have a subjective view of the state of a stream that could be different + when frames are in transit. Endpoints do not coordinate the creation of streams; they are + created unilaterally by either endpoint. The negative consequences of a mismatch in + states are limited to the "closed" state after sending RST_STREAM, where + frames might be received for some time after closing. + + + Streams have the following states: + + + + + + All streams start in the "idle" state. In this state, no frames have been + exchanged. + + + The following transitions are valid from this state: + + + Sending or receiving a HEADERS frame causes the stream to become + "open". The stream identifier is selected as described in . The same HEADERS frame can also + cause a stream to immediately become "half closed". + + + Sending a PUSH_PROMISE frame marks the associated stream for + later use. The stream state for the reserved stream transitions to "reserved + (local)". + + + Receiving a PUSH_PROMISE frame marks the associated stream as + reserved by the remote peer. The state of the stream becomes "reserved + (remote)". + + + + + Receiving any frames other than HEADERS or + PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (local)" state is one that has been promised by sending a + PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an + idle stream by associating the stream with an open stream that was initiated by the + remote peer (see ). + + + In this state, only the following transitions are possible: + + + The endpoint can send a HEADERS frame. This causes the stream to + open in a "half closed (remote)" state. + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MUST NOT send any type of frame other than HEADERS or + RST_STREAM in this state. + + + A PRIORITY frame MAY be received in this state. Receiving any type + of frame other than RST_STREAM or PRIORITY on a stream + in this state MUST be treated as a connection + error of type PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (remote)" state has been reserved by a remote peer. + + + In this state, only the following transitions are possible: + + + Receiving a HEADERS frame causes the stream to transition to + "half closed (local)". + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MAY send a PRIORITY frame in this state to reprioritize + the reserved stream. An endpoint MUST NOT send any type of frame other than + RST_STREAM, WINDOW_UPDATE, or PRIORITY + in this state. + + + Receiving any type of frame other than HEADERS or + RST_STREAM on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "open" state may be used by both peers to send frames of any type. + In this state, sending peers observe advertised stream + level flow control limits. + + + From this state either endpoint can send a frame with an END_STREAM flag set, which + causes the stream to transition into one of the "half closed" states: an endpoint + sending an END_STREAM flag causes the stream state to become "half closed (local)"; + an endpoint receiving an END_STREAM flag causes the stream state to become "half + closed (remote)". + + + Either endpoint can send a RST_STREAM frame from this state, causing + it to transition immediately to "closed". + + + + + + + A stream that is in the "half closed (local)" state cannot be used for sending + frames. Only WINDOW_UPDATE, PRIORITY and + RST_STREAM frames can be sent in this state. + + + A stream transitions from this state to "closed" when a frame that contains an + END_STREAM flag is received, or when either peer sends a RST_STREAM + frame. + + + A receiver can ignore WINDOW_UPDATE frames in this state, which might + arrive for a short period after a frame bearing the END_STREAM flag is sent. + + + PRIORITY frames received in this state are used to reprioritize + streams that depend on the current stream. + + + + + + + A stream that is "half closed (remote)" is no longer being used by the peer to send + frames. In this state, an endpoint is no longer obligated to maintain a receiver + flow control window if it performs flow control. + + + If an endpoint receives additional frames for a stream that is in this state, other + than WINDOW_UPDATE, PRIORITY or + RST_STREAM, it MUST respond with a stream error of type + STREAM_CLOSED. + + + A stream that is "half closed (remote)" can be used by the endpoint to send frames + of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. + + + A stream can transition from this state to "closed" by sending a frame that contains + an END_STREAM flag, or when either peer sends a RST_STREAM frame. + + + + + + + The "closed" state is the terminal state. + + + An endpoint MUST NOT send frames other than PRIORITY on a closed + stream. An endpoint that receives any frame other than PRIORITY + after receiving a RST_STREAM MUST treat that as a stream error of type + STREAM_CLOSED. Similarly, an endpoint that receives any frames after + receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type + STREAM_CLOSED, unless the frame is permitted as described below. + + + WINDOW_UPDATE or RST_STREAM frames can be received in + this state for a short period after a DATA or HEADERS + frame containing an END_STREAM flag is sent. Until the remote peer receives and + processes RST_STREAM or the frame bearing the END_STREAM flag, it + might send frames of these types. Endpoints MUST ignore + WINDOW_UPDATE or RST_STREAM frames received in this + state, though endpoints MAY choose to treat frames that arrive a significant time + after sending END_STREAM as a connection + error of type PROTOCOL_ERROR. + + + PRIORITY frames can be sent on closed streams to prioritize streams + that are dependent on the closed stream. Endpoints SHOULD process + PRIORITY frame, though they can be ignored if the stream has been + removed from the dependency tree (see ). + + + If this state is reached as a result of sending a RST_STREAM frame, + the peer that receives the RST_STREAM might have already sent - or + enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint + MUST ignore frames that it receives on closed streams after it has sent a + RST_STREAM frame. An endpoint MAY choose to limit the period over + which it ignores frames and treat frames that arrive after this time as being in + error. + + + Flow controlled frames (i.e., DATA) received after sending + RST_STREAM are counted toward the connection flow control window. + Even though these frames might be ignored, because they are sent before the sender + receives the RST_STREAM, the sender will consider the frames to count + against the flow control window. + + + An endpoint might receive a PUSH_PROMISE frame after it sends + RST_STREAM. PUSH_PROMISE causes a stream to become + "reserved" even if the associated stream has been reset. Therefore, a + RST_STREAM is needed to close an unwanted promised stream. + + + + + + In the absence of more specific guidance elsewhere in this document, implementations + SHOULD treat the receipt of a frame that is not expressly permitted in the description of + a state as a connection error of type + PROTOCOL_ERROR. Frame of unknown types are ignored. + + + An example of the state transitions for an HTTP request/response exchange can be found in + . An example of the state transitions for server push can be + found in and . + + +
    + + Streams are identified with an unsigned 31-bit integer. Streams initiated by a client + MUST use odd-numbered stream identifiers; those initiated by the server MUST use + even-numbered stream identifiers. A stream identifier of zero (0x0) is used for + connection control messages; the stream identifier zero cannot be used to establish a + new stream. + + + HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are + responded to with a stream identifier of one (0x1). After the upgrade + completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 + cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. + + + The identifier of a newly established stream MUST be numerically greater than all + streams that the initiating endpoint has opened or reserved. This governs streams that + are opened using a HEADERS frame and streams that are reserved using + PUSH_PROMISE. An endpoint that receives an unexpected stream identifier + MUST respond with a connection error of + type PROTOCOL_ERROR. + + + The first use of a new stream identifier implicitly closes all streams in the "idle" + state that might have been initiated by that peer with a lower-valued stream identifier. + For example, if a client sends a HEADERS frame on stream 7 without ever + sending a frame on stream 5, then stream 5 transitions to the "closed" state when the + first frame for stream 7 is sent or received. + + + Stream identifiers cannot be reused. Long-lived connections can result in an endpoint + exhausting the available range of stream identifiers. A client that is unable to + establish a new stream identifier can establish a new connection for new streams. A + server that is unable to establish a new stream identifier can send a + GOAWAY frame so that the client is forced to open a new connection for + new streams. + +
    + +
    + + A peer can limit the number of concurrently active streams using the + SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent + streams setting is specific to each endpoint and applies only to the peer that receives + the setting. That is, clients specify the maximum number of concurrent streams the + server can initiate, and servers specify the maximum number of concurrent streams the + client can initiate. + + + Streams that are in the "open" state, or either of the "half closed" states count toward + the maximum number of streams that an endpoint is permitted to open. Streams in any of + these three states count toward the limit advertised in the + SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the + "reserved" states do not count toward the stream limit. + + + Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a + HEADERS frame that causes their advertised concurrent stream limit to be + exceeded MUST treat this as a stream error. An + endpoint that wishes to reduce the value of + SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current + number of open streams can either close streams that exceed the new value or allow + streams to complete. + +
    +
    + +
    + + Using streams for multiplexing introduces contention over use of the TCP connection, + resulting in blocked streams. A flow control scheme ensures that streams on the same + connection do not destructively interfere with each other. Flow control is used for both + individual streams and for the connection as a whole. + + + HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. + + +
    + + HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be + used without requiring protocol changes. Flow control in HTTP/2 has the following + characteristics: + + + Flow control is specific to a connection; i.e., it is "hop-by-hop", not + "end-to-end". + + + Flow control is based on window update frames. Receivers advertise how many octets + they are prepared to receive on a stream and for the entire connection. This is a + credit-based scheme. + + + Flow control is directional with overall control provided by the receiver. A + receiver MAY choose to set any window size that it desires for each stream and for + the entire connection. A sender MUST respect flow control limits imposed by a + receiver. Clients, servers and intermediaries all independently advertise their + flow control window as a receiver and abide by the flow control limits set by + their peer when sending. + + + The initial value for the flow control window is 65,535 octets for both new streams + and the overall connection. + + + The frame type determines whether flow control applies to a frame. Of the frames + specified in this document, only DATA frames are subject to flow + control; all other frame types do not consume space in the advertised flow control + window. This ensures that important control frames are not blocked by flow control. + + + Flow control cannot be disabled. + + + HTTP/2 defines only the format and semantics of the WINDOW_UPDATE + frame (). This document does not stipulate how a + receiver decides when to send this frame or the value that it sends, nor does it + specify how a sender chooses to send packets. Implementations are able to select + any algorithm that suits their needs. + + + + + Implementations are also responsible for managing how requests and responses are sent + based on priority; choosing how to avoid head of line blocking for requests; and + managing the creation of new streams. Algorithm choices for these could interact with + any flow control algorithm. + +
    + +
    + + Flow control is defined to protect endpoints that are operating under resource + constraints. For example, a proxy needs to share memory between many connections, and + also might have a slow upstream connection and a fast downstream one. Flow control + addresses cases where the receiver is unable process data on one stream, yet wants to + continue to process other streams in the same connection. + + + Deployments that do not require this capability can advertise a flow control window of + the maximum size, incrementing the available space when new data is received. This + effectively disables flow control for that receiver. Conversely, a sender is always + subject to the flow control window advertised by the receiver. + + + Deployments with constrained resources (for example, memory) can employ flow control to + limit the amount of memory a peer can consume. Note, however, that this can lead to + suboptimal use of available network resources if flow control is enabled without + knowledge of the bandwidth-delay product (see ). + + + Even with full awareness of the current bandwidth-delay product, implementation of flow + control can be difficult. When using flow control, the receiver MUST read from the TCP + receive buffer in a timely fashion. Failure to do so could lead to a deadlock when + critical frames, such as WINDOW_UPDATE, are not read and acted upon. + +
    +
    + +
    + + A client can assign a priority for a new stream by including prioritization information in + the HEADERS frame that opens the stream. For an existing + stream, the PRIORITY frame can be used to change the + priority. + + + The purpose of prioritization is to allow an endpoint to express how it would prefer its + peer allocate resources when managing concurrent streams. Most importantly, priority can + be used to select streams for transmitting frames when there is limited capacity for + sending. + + + Streams can be prioritized by marking them as dependent on the completion of other streams + (). Each dependency is assigned a relative weight, a number + that is used to determine the relative proportion of available resources that are assigned + to streams dependent on the same stream. + + + + Explicitly setting the priority for a stream is input to a prioritization process. It + does not guarantee any particular processing or transmission order for the stream relative + to any other stream. An endpoint cannot force a peer to process concurrent streams in a + particular order using priority. Expressing priority is therefore only ever a suggestion. + + + Providing prioritization information is optional, so default values are used if no + explicit indicator is provided (). + + +
    + + Each stream can be given an explicit dependency on another stream. Including a + dependency expresses a preference to allocate resources to the identified stream rather + than to the dependent stream. + + + A stream that is not dependent on any other stream is given a stream dependency of 0x0. + In other words, the non-existent stream 0 forms the root of the tree. + + + A stream that depends on another stream is a dependent stream. The stream upon which a + stream is dependent is a parent stream. A dependency on a stream that is not currently + in the tree - such as a stream in the "idle" state - results in that stream being given + a default priority. + + + When assigning a dependency on another stream, the stream is added as a new dependency + of the parent stream. Dependent streams that share the same parent are not ordered with + respect to each other. For example, if streams B and C are dependent on stream A, and + if stream D is created with a dependency on stream A, this results in a dependency order + of A followed by B, C, and D in any order. + +
    + /|\ + B C B D C +]]> +
    + + An exclusive flag allows for the insertion of a new level of dependencies. The + exclusive flag causes the stream to become the sole dependency of its parent stream, + causing other dependencies to become dependent on the exclusive stream. In the + previous example, if stream D is created with an exclusive dependency on stream A, this + results in D becoming the dependency parent of B and C. + +
    + D + B C / \ + B C +]]> +
    + + Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all + of the streams that it depends on (the chain of parent streams up to 0x0) are either + closed, or it is not possible to make progress on them. + + + A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. + +
    + +
    + + All dependent streams are allocated an integer weight between 1 and 256 (inclusive). + + + Streams with the same parent SHOULD be allocated resources proportionally based on their + weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A + with weight 12, and if no progress can be made on A, stream B ideally receives one third + of the resources allocated to stream C. + +
    + +
    + + Stream priorities are changed using the PRIORITY frame. Setting a + dependency causes a stream to become dependent on the identified parent stream. + + + Dependent streams move with their parent stream if the parent is reprioritized. Setting + a dependency with the exclusive flag for a reprioritized stream moves all the + dependencies of the new parent stream to become dependent on the reprioritized stream. + + + If a stream is made dependent on one of its own dependencies, the formerly dependent + stream is first moved to be dependent on the reprioritized stream's previous parent. + The moved dependency retains its weight. + +
    + + For example, consider an original dependency tree where B and C depend on A, D and E + depend on C, and F depends on D. If A is made dependent on D, then D takes the place + of A. All other dependency relationships stay the same, except for F, which becomes + dependent on A if the reprioritization is exclusive. + + F B C ==> F A OR A + / \ | / \ /|\ + D E E B C B C F + | | | + F E E + (intermediate) (non-exclusive) (exclusive) +]]> +
    +
    + +
    + + When a stream is removed from the dependency tree, its dependencies can be moved to + become dependent on the parent of the closed stream. The weights of new dependencies + are recalculated by distributing the weight of the dependency of the closed stream + proportionally based on the weights of its dependencies. + + + Streams that are removed from the dependency tree cause some prioritization information + to be lost. Resources are shared between streams with the same parent stream, which + means that if a stream in that set closes or becomes blocked, any spare capacity + allocated to a stream is distributed to the immediate neighbors of the stream. However, + if the common dependency is removed from the tree, those streams share resources with + streams at the next highest level. + + + For example, assume streams A and B share a parent, and streams C and D both depend on + stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, + then stream C receives all the resources dedicated to stream A. If stream A is removed + from the tree, the weight of stream A is divided between streams C and D. If stream D + is still unable to proceed, this results in stream C receiving a reduced proportion of + resources. For equal starting weights, C receives one third, rather than one half, of + available resources. + + + It is possible for a stream to become closed while prioritization information that + creates a dependency on that stream is in transit. If a stream identified in a + dependency has no associated priority information, then the dependent stream is instead + assigned a default priority. This potentially creates + suboptimal prioritization, since the stream could be given a priority that is different + to what is intended. + + + To avoid these problems, an endpoint SHOULD retain stream prioritization state for a + period after streams become closed. The longer state is retained, the lower the chance + that streams are assigned incorrect or default priority values. + + + This could create a large state burden for an endpoint, so this state MAY be limited. + An endpoint MAY apply a fixed upper limit on the number of closed streams for which + prioritization state is tracked to limit state exposure. The amount of additional state + an endpoint maintains could be dependent on load; under high load, prioritization state + can be discarded to limit resource commitments. In extreme cases, an endpoint could + even discard prioritization state for active or reserved streams. If a fixed limit is + applied, endpoints SHOULD maintain state for at least as many streams as allowed by + their setting for SETTINGS_MAX_CONCURRENT_STREAMS. + + + An endpoint receiving a PRIORITY frame that changes the priority of a + closed stream SHOULD alter the dependencies of the streams that depend on it, if it has + retained enough state to do so. + +
    + +
    + + Providing priority information is optional. Streams are assigned a non-exclusive + dependency on stream 0x0 by default. Pushed streams + initially depend on their associated stream. In both cases, streams are assigned a + default weight of 16. + +
    +
    + +
    + + HTTP/2 framing permits two classes of error: + + + An error condition that renders the entire connection unusable is a connection error. + + + An error in an individual stream is a stream error. + + + + + A list of error codes is included in . + + +
    + + A connection error is any error which prevents further processing of the framing layer, + or which corrupts any connection state. + + + An endpoint that encounters a connection error SHOULD first send a GOAWAY + frame () with the stream identifier of the last stream that it + successfully received from its peer. The GOAWAY frame includes an error + code that indicates why the connection is terminating. After sending the + GOAWAY frame, the endpoint MUST close the TCP connection. + + + It is possible that the GOAWAY will not be reliably received by the + receiving endpoint (see ). In the event of a connection error, + GOAWAY only provides a best effort attempt to communicate with the peer + about why the connection is being terminated. + + + An endpoint can end a connection at any time. In particular, an endpoint MAY choose to + treat a stream error as a connection error. Endpoints SHOULD send a + GOAWAY frame when ending a connection, providing that circumstances + permit it. + +
    + +
    + + A stream error is an error related to a specific stream that does not affect processing + of other streams. + + + An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error + occurred. The RST_STREAM frame includes an error code that indicates the + type of error. + + + A RST_STREAM is the last frame that an endpoint can send on a stream. + The peer that sends the RST_STREAM frame MUST be prepared to receive any + frames that were sent or enqueued for sending by the remote peer. These frames can be + ignored, except where they modify connection state (such as the state maintained for + header compression, or flow control). + + + Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for + any stream. However, an endpoint MAY send additional RST_STREAM frames if + it receives frames on a closed stream after more than a round-trip time. This behavior + is permitted to deal with misbehaving implementations. + + + An endpoint MUST NOT send a RST_STREAM in response to an + RST_STREAM frame, to avoid looping. + +
    + +
    + + If the TCP connection is closed or reset while streams remain in open or half closed + states, then the endpoint MUST assume that those streams were abnormally interrupted and + could be incomplete. + +
    +
    + +
    + + HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide + additional services or alter any aspect of the protocol, within the limitations described + in this section. Extensions are effective only within the scope of a single HTTP/2 + connection. + + + Extensions are permitted to use new frame types, new + settings, or new error + codes. Registries are established for managing these extension points: frame types, settings and + error codes. + + + Implementations MUST ignore unknown or unsupported values in all extensible protocol + elements. Implementations MUST discard frames that have unknown or unsupported types. + This means that any of these extension points can be safely used by extensions without + prior arrangement or negotiation. However, extension frames that appear in the middle of + a header block are not permitted; these MUST be treated + as a connection error of type + PROTOCOL_ERROR. + + + However, extensions that could change the semantics of existing protocol components MUST + be negotiated before being used. For example, an extension that changes the layout of the + HEADERS frame cannot be used until the peer has given a positive signal + that this is acceptable. In this case, it could also be necessary to coordinate when the + revised layout comes into effect. Note that treating any frame other than + DATA frames as flow controlled is such a change in semantics, and can only + be done through negotiation. + + + This document doesn't mandate a specific method for negotiating the use of an extension, + but notes that a setting could be used for that + purpose. If both peers set a value that indicates willingness to use the extension, then + the extension can be used. If a setting is used for extension negotiation, the initial + value MUST be defined so that the extension is initially disabled. + +
    +
    + +
    + + This specification defines a number of frame types, each identified by a unique 8-bit type + code. Each frame type serves a distinct purpose either in the establishment and management + of the connection as a whole, or of individual streams. + + + The transmission of specific frame types can alter the state of a connection. If endpoints + fail to maintain a synchronized view of the connection state, successful communication + within the connection will no longer be possible. Therefore, it is important that endpoints + have a shared comprehension of how the state is affected by the use any given frame. + + +
    + + DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated + with a stream. One or more DATA frames are used, for instance, to carry HTTP request or + response payloads. + + + DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to + obscure the size of messages. + +
    + +
    + + The DATA frame contains the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is optional and is only present if the PADDED flag is set. + + + Application data. The amount of data is the remainder of the frame payload after + subtracting the length of the other fields that are present. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The DATA frame defines the following flags: + + + Bit 1 being set indicates that this frame is the last that the endpoint will send for + the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. + + + Bit 4 being set indicates that the Pad Length field and any padding that it describes + is present. + + + + + DATA frames MUST be associated with a stream. If a DATA frame is received whose stream + identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + DATA frames are subject to flow control and can only be sent when a stream is in the + "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow + control, including Pad Length and Padding fields if present. If a DATA frame is received + whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond + with a stream error of type + STREAM_CLOSED. + + + The total number of padding octets is determined by the value of the Pad Length field. If + the length of the padding is greater than the length of the frame payload, the recipient + MUST treat this as a connection error of + type PROTOCOL_ERROR. + + + A frame can be increased in size by one octet by including a Pad Length field with a + value of zero. + + + + + Padding is a security feature; see . + +
    + +
    + + The HEADERS frame (type=0x1) is used to open a stream, + and additionally carries a header block fragment. HEADERS frames can be sent on a stream + in the "open" or "half closed (remote)" states. + +
    + +
    + + The HEADERS frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. + + + A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. + + + An 8-bit weight for the stream, see . Add one to the + value to obtain a weight between 1 and 256. This field is only present if the + PRIORITY flag is set. + + + A header block fragment. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The HEADERS frame defines the following flags: + + + + Bit 1 being set indicates that the header block is + the last that the endpoint will send for the identified stream. Setting this flag + causes the stream to enter one of "half closed" + states. + + + A HEADERS frame carries the END_STREAM flag that signals the end of a stream. + However, a HEADERS frame with the END_STREAM flag set can be followed by + CONTINUATION frames on the same stream. Logically, the + CONTINUATION frames are part of the HEADERS frame. + + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A HEADERS frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the + receipt of any other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight + fields are present; see . + + + + + + + The payload of a HEADERS frame contains a header block + fragment. A header block that does not fit within a HEADERS frame is continued in + a CONTINUATION frame. + + + + HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose + stream identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + The HEADERS frame changes the connection state as described in . + + + + The HEADERS frame includes optional padding. Padding fields and flags are identical to + those defined for DATA frames. + + + Prioritization information in a HEADERS frame is logically equivalent to a separate + PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in + stream prioritization when new streams are created. Priorization fields in HEADERS frames + subsequent to the first on a stream reprioritize the + stream. + +
    + +
    + + The PRIORITY frame (type=0x2) specifies the sender-advised + priority of a stream. It can be sent at any time for an existing stream, including + closed streams. This enables reprioritization of existing streams. + +
    + +
    + + The payload of a PRIORITY frame contains the following fields: + + + A single bit flag indicates that the stream dependency is exclusive, see . + + + A 31-bit stream identifier for the stream that this stream depends on, see . + + + An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. + + + + + + The PRIORITY frame does not define any flags. + + + + The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received + with a stream identifier of 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", + "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be + sent between consecutive frames that comprise a single header + block. Note that this frame could arrive after processing or frame sending has + completed, which would cause it to have no effect on the current stream. For a stream + that is in the "half closed (remote)" or "closed" - state, this frame can only affect + processing of the current stream and not frame transmission. + + + The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. + This allows for the reprioritization of a group of dependent streams by altering the + priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a + closed stream risks being ignored due to the peer having discarded priority state + information for that stream. + +
    + +
    + + The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by + the initiator of a stream, it indicates that they wish to cancel the stream or that an + error condition has occurred. When sent by the receiver of a stream, it indicates that + either the receiver is rejecting the stream, requesting that the stream be cancelled, or + that an error condition has occurred. + +
    + +
    + + + The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being + terminated. + + + + The RST_STREAM frame does not define any flags. + + + + The RST_STREAM frame fully terminates the referenced stream and causes it to enter the + closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send + additional frames for that stream, with the exception of PRIORITY. However, + after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process + additional frames sent on the stream that might have been sent by the peer prior to the + arrival of the RST_STREAM. + + + + RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received + with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + + + RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM + frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + +
    + +
    + + The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints + communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is + also used to acknowledge the receipt of those parameters. Individually, a SETTINGS + parameter can also be referred to as a "setting". + + + SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, + which are used by the receiving peer. Different values for the same parameter can be + advertised by each peer. For example, a client might set a high initial flow control + window, whereas a server might set a lower value to conserve resources. + + + + A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be + sent at any other time by either endpoint over the lifetime of the connection. + Implementations MUST support all of the parameters defined by this specification. + + + + Each parameter in a SETTINGS frame replaces any existing value for that parameter. + Parameters are processed in the order in which they appear, and a receiver of a SETTINGS + frame does not need to maintain any state other than the current value of its + parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by + a receiver. + + + SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS + frame defines the following flag: + + + Bit 1 being set indicates that this frame acknowledges receipt and application of the + peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST + be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value + other than 0 MUST be treated as a connection + error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. + + + + + SETTINGS frames always apply to a connection, never a single stream. The stream + identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS + frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond + with a connection error of type + PROTOCOL_ERROR. + + + The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame + MUST be treated as a connection error of type + PROTOCOL_ERROR. + + +
    + + The payload of a SETTINGS frame consists of zero or more parameters, each consisting of + an unsigned 16-bit setting identifier and an unsigned 32-bit value. + + +
    + +
    +
    + +
    + + The following parameters are defined: + + + + Allows the sender to inform the remote endpoint of the maximum size of the header + compression table used to decode header blocks, in octets. The encoder can select + any size equal to or less than this value by using signaling specific to the + header compression format inside a header block. The initial value is 4,096 + octets. + + + + + This setting can be use to disable server + push. An endpoint MUST NOT send a PUSH_PROMISE frame if it + receives this parameter set to a value of 0. An endpoint that has both set this + parameter to 0 and had it acknowledged MUST treat the receipt of a + PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + The initial value is 1, which indicates that server push is permitted. Any value + other than 0 or 1 MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + Indicates the maximum number of concurrent streams that the sender will allow. + This limit is directional: it applies to the number of streams that the sender + permits the receiver to create. Initially there is no limit to this value. It is + recommended that this value be no smaller than 100, so as to not unnecessarily + limit parallelism. + + + A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special + by endpoints. A zero value does prevent the creation of new streams, however this + can also happen for any limit that is exhausted with active streams. Servers + SHOULD only set a zero value for short durations; if a server does not wish to + accept requests, closing the connection could be preferable. + + + + + Indicates the sender's initial window size (in octets) for stream level flow + control. The initial value is 216-1 (65,535) octets. + + + This setting affects the window size of all streams, including existing streams, + see . + + + Values above the maximum flow control window size of 231-1 MUST + be treated as a connection error of + type FLOW_CONTROL_ERROR. + + + + + Indicates the size of the largest frame payload that the sender is willing to + receive, in octets. + + + The initial value is 214 (16,384) octets. The value advertised by + an endpoint MUST be between this initial value and the maximum allowed frame size + (224-1 or 16,777,215 octets), inclusive. Values outside this range + MUST be treated as a connection error + of type PROTOCOL_ERROR. + + + + + This advisory setting informs a peer of the maximum size of header list that the + sender is prepared to accept, in octets. The value is based on the uncompressed + size of header fields, including the length of the name and value in octets plus + an overhead of 32 octets for each header field. + + + For any given request, a lower limit than what is advertised MAY be enforced. The + initial value of this setting is unlimited. + + + + + + An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier + MUST ignore that setting. + +
    + +
    + + Most values in SETTINGS benefit from or require an understanding of when the peer has + received and applied the changed parameter values. In order to provide + such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag + is not set MUST apply the updated parameters as soon as possible upon receipt. + + + The values in the SETTINGS frame MUST be processed in the order they appear, with no + other frame processing between values. Unsupported parameters MUST be ignored. Once + all values have been processed, the recipient MUST immediately emit a SETTINGS frame + with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender + of the altered parameters can rely on the setting having been applied. + + + If the sender of a SETTINGS frame does not receive an acknowledgement within a + reasonable amount of time, it MAY issue a connection error of type + SETTINGS_TIMEOUT. + +
    +
    + +
    + + The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of + streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned + 31-bit identifier of the stream the endpoint plans to create along with a set of headers + that provide additional context for the stream. contains a + thorough description of the use of PUSH_PROMISE frames. + + +
    + +
    + + The PUSH_PROMISE frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single reserved bit. + + + An unsigned 31-bit integer that identifies the stream that is reserved by the + PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next + stream sent by the sender (see new stream + identifier). + + + A header block fragment containing request header + fields. + + + Padding octets. + + + + + + The PUSH_PROMISE frame defines the following flags: + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any + other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + + + PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream + identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the + stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + Promised streams are not required to be used in the order they are promised. The + PUSH_PROMISE only reserves stream identifiers for later use. + + + + PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the + peer endpoint is set to 0. An endpoint that has set this setting and has received + acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a + RST_STREAM referencing the promised stream identifier back to the sender of + the PUSH_PROMISE. + + + + A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for + header compression. PUSH_PROMISE also reserves a stream for later use, causing the + promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a + stream unless that stream is either "open" or "half closed (remote)"; the sender MUST + ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST + be in the "idle" state). + + + Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream + state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a + stream that is neither "open" nor "half closed (local)" as a connection error of type + PROTOCOL_ERROR. However, an endpoint that has sent + RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that + might have been created before the RST_STREAM frame is received and + processed. + + + A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a + stream that is not currently in the "idle" state) as a connection error of type + PROTOCOL_ERROR. + + + + The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical + to those defined for DATA frames. + +
    + +
    + + The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the + sender, as well as determining whether an idle connection is still functional. PING + frames can be sent from any endpoint. + +
    + +
    + + + In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. + A sender can include any value it chooses and use those bytes in any fashion. + + + Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with + the ACK flag set in response, with an identical payload. PING responses SHOULD be given + higher priority than any other frame. + + + + The PING frame defines the following flags: + + + Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST + set this flag in PING responses. An endpoint MUST NOT respond to PING frames + containing this flag. + + + + + PING frames are not associated with any individual stream. If a PING frame is received + with a stream identifier field value other than 0x0, the recipient MUST respond with a + connection error of type + PROTOCOL_ERROR. + + + Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type + FRAME_SIZE_ERROR. + + +
    + +
    + + The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this + connection. GOAWAY can be sent by either the client or the server. Once sent, the sender + will ignore frames sent on any new streams with identifiers higher than the included last + stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the + connection, although a new connection can be established for new streams. + + + The purpose of this frame is to allow an endpoint to gracefully stop accepting new + streams, while still finishing processing of previously established streams. This enables + administrative actions, like server maintainance. + + + There is an inherent race condition between an endpoint starting new streams and the + remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream + identifier of the last peer-initiated stream which was or might be processed on the + sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, + the identified stream is the highest numbered stream initiated by the client. + + + If the receiver of the GOAWAY has sent data on streams with a higher stream identifier + than what is indicated in the GOAWAY frame, those streams are not or will not be + processed. The receiver of the GOAWAY frame can treat the streams as though they had + never been created at all, thereby allowing those streams to be retried later on a new + connection. + + + Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote + can know whether a stream has been partially processed or not. For example, if an HTTP + client sends a POST at the same time that a server closes a connection, the client cannot + know if the server started to process that POST request if the server does not send a + GOAWAY frame to indicate what streams it might have acted on. + + + An endpoint might choose to close a connection without sending GOAWAY for misbehaving + peers. + + +
    + +
    + + The GOAWAY frame does not define any flags. + + + The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat + a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type + PROTOCOL_ERROR. + + + The last stream identifier in the GOAWAY frame contains the highest numbered stream + identifier for which the sender of the GOAWAY frame might have taken some action on, or + might yet take action on. All streams up to and including the identified stream might + have been processed in some way. The last stream identifier can be set to 0 if no streams + were processed. + + + In this context, "processed" means that some data from the stream was passed to some + higher layer of software that might have taken some action as a result. + + + If a connection terminates without a GOAWAY frame, the last stream identifier is + effectively the highest possible stream identifier. + + + On streams with lower or equal numbered identifiers that were not closed completely prior + to the connection being closed, re-attempting requests, transactions, or any protocol + activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or + DELETE. Any protocol activity that uses higher numbered streams can be safely retried + using a new connection. + + + Activity on streams numbered lower or equal to the last stream identifier might still + complete successfully. The sender of a GOAWAY frame might gracefully shut down a + connection by sending a GOAWAY frame, maintaining the connection in an open state until + all in-progress streams complete. + + + An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an + endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could + subsequently encounter an condition that requires immediate termination of the connection. + The last stream identifier from the last GOAWAY frame received indicates which streams + could have been acted upon. Endpoints MUST NOT increase the value they send in the last + stream identifier, since the peers might already have retried unprocessed requests on + another connection. + + + A client that is unable to retry requests loses all requests that are in flight when the + server closes the connection. This is especially true for intermediaries that might + not be serving clients using HTTP/2. A server that is attempting to gracefully shut down + a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to + 231-1 and a NO_ERROR code. This signals to the client that + a shutdown is imminent and that no further requests can be initiated. After waiting at + least one round trip time, the server can send another GOAWAY frame with an updated last + stream identifier. This ensures that a connection can be cleanly shut down without losing + requests. + + + + After sending a GOAWAY frame, the sender can discard frames for streams with identifiers + higher than the identified last stream. However, any frames that alter connection state + cannot be completely ignored. For instance, HEADERS, + PUSH_PROMISE and CONTINUATION frames MUST be minimally + processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow + control window. Failure to process these frames can cause flow control or header + compression state to become unsynchronized. + + + + The GOAWAY frame also contains a 32-bit error code that + contains the reason for closing the connection. + + + Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug + data is intended for diagnostic purposes only and carries no semantic value. Debug + information could contain security- or privacy-sensitive data. Logged or otherwise + persistently stored debug data MUST have adequate safeguards to prevent unauthorized + access. + +
    + +
    + + The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. + + + Flow control operates at two levels: on each individual stream and on the entire + connection. + + + Both types of flow control are hop-by-hop; that is, only between the two endpoints. + Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. + However, throttling of data transfer by any receiver can indirectly cause the propagation + of flow control information toward the original sender. + + + Flow control only applies to frames that are identified as being subject to flow control. + Of the frame types defined in this document, this includes only DATA frames. + Frames that are exempt from flow control MUST be accepted and processed, unless the + receiver is unable to assign resources to handling the frame. A receiver MAY respond with + a stream error or connection error of type + FLOW_CONTROL_ERROR if it is unable to accept a frame. + +
    + +
    + + The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer + indicating the number of octets that the sender can transmit in addition to the existing + flow control window. The legal range for the increment to the flow control window is 1 to + 231-1 (0x7fffffff) octets. + + + The WINDOW_UPDATE frame does not define any flags. + + + The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the + former case, the frame's stream identifier indicates the affected stream; in the latter, + the value "0" indicates that the entire connection is the subject of the frame. + + + A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window + increment of 0 as a stream error of type + PROTOCOL_ERROR; errors on the connection flow control window MUST be + treated as a connection error. + + + WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. + This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" + or "closed" stream. A receiver MUST NOT treat this as an error, see . + + + A receiver that receives a flow controlled frame MUST always account for its contribution + against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the + frame is in error. Since the sender counts the frame toward the flow control window, if + the receiver does not, the flow control window at sender and receiver can become + different. + + +
    + + Flow control in HTTP/2 is implemented using a window kept by each sender on every + stream. The flow control window is a simple integer value that indicates how many octets + of data the sender is permitted to transmit; as such, its size is a measure of the + buffering capacity of the receiver. + + + Two flow control windows are applicable: the stream flow control window and the + connection flow control window. The sender MUST NOT send a flow controlled frame with a + length that exceeds the space available in either of the flow control windows advertised + by the receiver. Frames with zero length with the END_STREAM flag set (that is, an + empty DATA frame) MAY be sent if there is no available space in either + flow control window. + + + For flow control calculations, the 9 octet frame header is not counted. + + + After sending a flow controlled frame, the sender reduces the space available in both + windows by the length of the transmitted frame. + + + The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up + space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream + and connection level flow control windows. + + + A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the + amount specified in the frame. + + + A sender MUST NOT allow a flow control window to exceed 231-1 octets. + If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this + maximum it MUST terminate either the stream or the connection, as appropriate. For + streams, the sender sends a RST_STREAM with the error code of + FLOW_CONTROL_ERROR code; for the connection, a GOAWAY + frame with a FLOW_CONTROL_ERROR code. + + + Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are + completely asynchronous with respect to each other. This property allows a receiver to + aggressively update the window size kept by the sender to prevent streams from stalling. + +
    + +
    + + When an HTTP/2 connection is first established, new streams are created with an initial + flow control window size of 65,535 octets. The connection flow control window is 65,535 + octets. Both endpoints can adjust the initial window size for new streams by including + a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS + frame that forms part of the connection preface. The connection flow control window can + only be changed using WINDOW_UPDATE frames. + + + Prior to receiving a SETTINGS frame that sets a value for + SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default + initial window size when sending flow controlled frames. Similarly, the connection flow + control window is set to the default initial window size until a WINDOW_UPDATE frame is + received. + + + A SETTINGS frame can alter the initial flow control window size for all + current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, + a receiver MUST adjust the size of all stream flow control windows that it maintains by + the difference between the new value and the old value. + + + A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in + a flow control window to become negative. A sender MUST track the negative flow control + window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE + frames that cause the flow control window to become positive. + + + For example, if the client sends 60KB immediately on connection establishment, and the + server sets the initial window size to be 16KB, the client will recalculate the + available flow control window to be -44KB on receipt of the SETTINGS + frame. The client retains a negative flow control window until WINDOW_UPDATE frames + restore the window to being positive, after which the client can resume sending. + + + A SETTINGS frame cannot alter the connection flow control window. + + + An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that + causes any flow control window to exceed the maximum size as a connection error of type + FLOW_CONTROL_ERROR. + +
    + +
    + + A receiver that wishes to use a smaller flow control window than the current size can + send a new SETTINGS frame. However, the receiver MUST be prepared to + receive data that exceeds this window size, since the sender might send data that + exceeds the lower limit prior to processing the SETTINGS frame. + + + After sending a SETTINGS frame that reduces the initial flow control window size, a + receiver has two options for handling streams that exceed flow control limits: + + + The receiver can immediately send RST_STREAM with + FLOW_CONTROL_ERROR error code for the affected streams. + + + The receiver can accept the streams and tolerate the resulting head of line + blocking, sending WINDOW_UPDATE frames as it consumes data. + + + +
    +
    + +
    + + The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can + be sent on an existing stream, as long as the preceding frame is on the same stream and is + a HEADERS, PUSH_PROMISE or CONTINUATION frame without the + END_HEADERS flag set. + + +
    + +
    + + The CONTINUATION frame payload contains a header block + fragment. + + + + The CONTINUATION frame defines the following flag: + + + + Bit 3 being set indicates that this frame ends a header + block. + + + If the END_HEADERS bit is not set, this frame MUST be followed by another + CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or + a frame on a different stream as a connection + error of type PROTOCOL_ERROR. + + + + + + + The CONTINUATION frame changes the connection state as defined in . + + + + CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received + whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. + + + + A CONTINUATION frame MUST be preceded by a HEADERS, + PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A + recipient that observes violation of this rule MUST respond with a connection error of type + PROTOCOL_ERROR. + +
    +
    + +
    + + Error codes are 32-bit fields that are used in RST_STREAM and + GOAWAY frames to convey the reasons for the stream or connection error. + + + + Error codes share a common code space. Some error codes apply only to either streams or the + entire connection and have no defined semantics in the other context. + + + + The following error codes are defined: + + + The associated condition is not as a result of an error. For example, a + GOAWAY might include this code to indicate graceful shutdown of a + connection. + + + The endpoint detected an unspecific protocol error. This error is for use when a more + specific error code is not available. + + + The endpoint encountered an unexpected internal error. + + + The endpoint detected that its peer violated the flow control protocol. + + + The endpoint sent a SETTINGS frame, but did not receive a response in a + timely manner. See Settings Synchronization. + + + The endpoint received a frame after a stream was half closed. + + + The endpoint received a frame with an invalid size. + + + The endpoint refuses the stream prior to performing any application processing, see + for details. + + + Used by the endpoint to indicate that the stream is no longer needed. + + + The endpoint is unable to maintain the header compression context for the connection. + + + The connection established in response to a CONNECT + request was reset or abnormally closed. + + + The endpoint detected that its peer is exhibiting a behavior that might be generating + excessive load. + + + The underlying transport has properties that do not meet minimum security + requirements (see ). + + + + + Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be + treated by an implementation as being equivalent to INTERNAL_ERROR. + +
    + +
    + + HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means + that, from the application perspective, the features of the protocol are largely + unchanged. To achieve this, all request and response semantics are preserved, although the + syntax of conveying those semantics has changed. + + + Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax + and Routing , such as the HTTP and HTTPS URI schemes, are also + applicable in HTTP/2, but the expression of those semantics for this protocol are defined + in the sections below. + + +
    + + A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on + the same stream as the request. + + + An HTTP message (request or response) consists of: + + + for a response only, zero or more HEADERS frames (each followed by zero + or more CONTINUATION frames) containing the message headers of + informational (1xx) HTTP responses (see and ), + and + + + one HEADERS frame (followed by zero or more CONTINUATION + frames) containing the message headers (see ), and + + + zero or more DATA frames containing the message payload (see ), and + + + optionally, one HEADERS frame, followed by zero or more + CONTINUATION frames containing the trailer-part, if present (see ). + + + The last frame in the sequence bears an END_STREAM flag, noting that a + HEADERS frame bearing the END_STREAM flag can be followed by + CONTINUATION frames that carry any remaining portions of the header block. + + + Other frames (from any stream) MUST NOT occur between either HEADERS frame + and any CONTINUATION frames that might follow. + + + + Trailing header fields are carried in a header block that also terminates the stream. + That is, a sequence starting with a HEADERS frame, followed by zero or more + CONTINUATION frames, where the HEADERS frame bears an + END_STREAM flag. Header blocks after the first that do not terminate the stream are not + part of an HTTP request or response. + + + A HEADERS frame (and associated CONTINUATION frames) can + only appear at the start or end of a stream. An endpoint that receives a + HEADERS frame without the END_STREAM flag set after receiving a final + (non-informational) status code MUST treat the corresponding request or response as malformed. + + + + An HTTP request/response exchange fully consumes a single stream. A request starts with + the HEADERS frame that puts the stream into an "open" state. The request + ends with a frame bearing END_STREAM, which causes the stream to become "half closed + (local)" for the client and "half closed (remote)" for the server. A response starts with + a HEADERS frame and ends with a frame bearing END_STREAM, which places the + stream in the "closed" state. + + + +
    + + HTTP/2 removes support for the 101 (Switching Protocols) informational status code + (). + + + The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. + Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate + their use (see ). + +
    + +
    + + HTTP header fields carry information as a series of key-value pairs. For a listing of + registered HTTP headers, see the Message Header Field Registry maintained at . + + +
    + + While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the + status code for the response, HTTP/2 uses special pseudo-header fields beginning with + ':' character (ASCII 0x3a) for this purpose. + + + Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate + pseudo-header fields other than those defined in this document. + + + Pseudo-header fields are only valid in the context in which they are defined. + Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header + fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST + NOT appear in trailers. Endpoints MUST treat a request or response that contains + undefined or invalid pseudo-header fields as malformed. + + + Just as in HTTP/1.x, header field names are strings of ASCII characters that are + compared in a case-insensitive fashion. However, header field names MUST be converted + to lowercase prior to their encoding in HTTP/2. A request or response containing + uppercase header field names MUST be treated as malformed. + + + All pseudo-header fields MUST appear in the header block before regular header fields. + Any request or response that contains a pseudo-header field that appears in a header + block after a regular header field MUST be treated as malformed. + +
    + +
    + + HTTP/2 does not use the Connection header field to + indicate connection-specific header fields; in this protocol, connection-specific + metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message + containing connection-specific header fields; any message containing + connection-specific header fields MUST be treated as malformed. + + + This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need + to remove any header fields nominated by the Connection header field, along with the + Connection header field itself. Such intermediaries SHOULD also remove other + connection-specific header fields, such as Keep-Alive, Proxy-Connection, + Transfer-Encoding and Upgrade, even if they are not nominated by Connection. + + + One exception to this is the TE header field, which MAY be present in an HTTP/2 + request, but when it is MUST NOT contain any value other than "trailers". + + + + + HTTP/2 purposefully does not support upgrade to another protocol. The handshake + methods described in are believed sufficient to + negotiate the use of alternative protocols. + + + +
    + +
    + + The following pseudo-header fields are defined for HTTP/2 requests: + + + + The :method pseudo-header field includes the HTTP + method (). + + + + + The :scheme pseudo-header field includes the scheme + portion of the target URI (). + + + :scheme is not restricted to http and https schemed URIs. A + proxy or gateway can translate requests for non-HTTP schemes, enabling the use + of HTTP to interact with non-HTTP services. + + + + + The :authority pseudo-header field includes the + authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http + or https schemed URIs. + + + To ensure that the HTTP/1.1 request line can be reproduced accurately, this + pseudo-header field MUST be omitted when translating from an HTTP/1.1 request + that has a request target in origin or asterisk form (see ). Clients that generate + HTTP/2 requests directly SHOULD use the :authority pseudo-header + field instead of the Host header field. An + intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by + copying the value of the :authority pseudo-header + field. + + + + + The :path pseudo-header field includes the path and + query parts of the target URI (the path-absolute + production from and optionally a '?' character + followed by the query production, see and ). A request in asterisk form includes the value '*' for the + :path pseudo-header field. + + + This pseudo-header field MUST NOT be empty for http + or https URIs; http or + https URIs that do not contain a path component + MUST include a value of '/'. The exception to this rule is an OPTIONS request + for an http or https + URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). + + + + + + All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory + pseudo-header fields is malformed. + + + HTTP/2 does not define a way to carry the version identifier that is included in the + HTTP/1.1 request line. + +
    + +
    + + For HTTP/2 responses, a single :status pseudo-header + field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all + responses, otherwise the response is malformed. + + + HTTP/2 does not define a way to carry the version or reason phrase that is included in + an HTTP/1.1 status line. + +
    + +
    + + The Cookie header field can carry a significant amount of + redundant data. + + + The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). + This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from + being separated into different name-value pairs. This can significantly reduce + compression efficiency as individual cookie-pairs are updated. + + + To allow for better compression efficiency, the Cookie header field MAY be split into + separate header fields, each with one or more cookie-pairs. If there are multiple + Cookie header fields after decompression, these MUST be concatenated into a single + octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") + before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a + generic HTTP server application. + +
    + + Therefore, the following two lists of Cookie header fields are semantically + equivalent. + + +
    +
    + +
    + + A malformed request or response is one that is an otherwise valid sequence of HTTP/2 + frames, but is otherwise invalid due to the presence of extraneous frames, prohibited + header fields, the absence of mandatory header fields, or the inclusion of uppercase + header field names. + + + A request or response that includes an entity body can include a content-length header field. A request or response is also + malformed if the value of a content-length header field + does not equal the sum of the DATA frame payload lengths that form the + body. A response that is defined to have no payload, as described in , can have a non-zero + content-length header field, even though no content is + included in DATA frames. + + + Intermediaries that process HTTP requests or responses (i.e., any intermediary not + acting as a tunnel) MUST NOT forward a malformed request or response. Malformed + requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. + + + For malformed requests, a server MAY send an HTTP response prior to closing or + resetting the stream. Clients MUST NOT accept a malformed response. Note that these + requirements are intended to protect against several types of common attacks against + HTTP; they are deliberately strict, because being permissive can expose + implementations to these vulnerabilities. + +
    +
    + +
    + + This section shows HTTP/1.1 requests and responses, with illustrations of equivalent + HTTP/2 requests and responses. + + + An HTTP GET request includes request header fields and no body and is therefore + transmitted as a single HEADERS frame, followed by zero or more + CONTINUATION frames containing the serialized block of request header + fields. The HEADERS frame in the following has both the END_HEADERS and + END_STREAM flags set; no CONTINUATION frames are sent: + + +
    + + END_STREAM + Accept: image/jpeg + END_HEADERS + :method = GET + :scheme = https + :path = /resource + host = example.org + accept = image/jpeg +]]> +
    + + + Similarly, a response that includes only response header fields is transmitted as a + HEADERS frame (again, followed by zero or more + CONTINUATION frames) containing the serialized block of response header + fields. + + +
    + + END_STREAM + Expires: Thu, 23 Jan ... + END_HEADERS + :status = 304 + etag = "xyzzy" + expires = Thu, 23 Jan ... +]]> +
    + + + An HTTP POST request that includes request header fields and payload data is transmitted + as one HEADERS frame, followed by zero or more + CONTINUATION frames containing the request header fields, followed by one + or more DATA frames, with the last CONTINUATION (or + HEADERS) frame having the END_HEADERS flag set and the final + DATA frame having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Type: image/jpeg - END_HEADERS + Content-Length: 123 :method = POST + :path = /resource + {binary data} :scheme = https + + CONTINUATION + + END_HEADERS + content-type = image/jpeg + host = example.org + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> + + Note that data contributing to any given header field could be spread between header + block fragments. The allocation of header fields to frames in this example is + illustrative only. + +
    + + + A response that includes header fields and payload data is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames, followed by one or more DATA frames, with the last + DATA frame in the sequence having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Length: 123 + END_HEADERS + :status = 200 + {binary data} content-type = image/jpeg + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> +
    + + + Trailing header fields are sent as a header block after both the request or response + header block and all the DATA frames have been sent. The + HEADERS frame starting the trailers header block has the END_STREAM flag + set. + + +
    + - END_STREAM + Transfer-Encoding: chunked + END_HEADERS + Trailer: Foo :status = 200 + content-length = 123 + 123 content-type = image/jpeg + {binary data} trailer = Foo + 0 + Foo: bar DATA + - END_STREAM + {binary data} + + HEADERS + + END_STREAM + + END_HEADERS + foo = bar +]]> +
    + + +
    + + An informational response using a 1xx status code other than 101 is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames: + + - END_STREAM + + END_HEADERS + :status = 103 + extension-field = bar +]]> +
    +
    + +
    + + In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error + occurs, because there is no means to determine the nature of the error. It is possible + that some server processing occurred prior to the error, which could result in + undesirable effects if the request were reattempted. + + + HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has + not been processed: + + + The GOAWAY frame indicates the highest stream number that might have + been processed. Requests on streams with higher numbers are therefore guaranteed to + be safe to retry. + + + The REFUSED_STREAM error code can be included in a + RST_STREAM frame to indicate that the stream is being closed prior to + any processing having occurred. Any request that was sent on the reset stream can + be safely retried. + + + + + Requests that have not been processed have not failed; clients MAY automatically retry + them, even those with non-idempotent methods. + + + A server MUST NOT indicate that a stream has not been processed unless it can guarantee + that fact. If frames that are on a stream are passed to the application layer for any + stream, then REFUSED_STREAM MUST NOT be used for that stream, and a + GOAWAY frame MUST include a stream identifier that is greater than or + equal to the given stream identifier. + + + In addition to these mechanisms, the PING frame provides a way for a + client to easily test a connection. Connections that remain idle can become broken as + some middleboxes (for instance, network address translators, or load balancers) silently + discard connection bindings. The PING frame allows a client to safely + test whether a connection is still active without sending a request. + +
    +
    + +
    + + HTTP/2 allows a server to pre-emptively send (or "push") responses (along with + corresponding "promised" requests) to a client in association with a previous + client-initiated request. This can be useful when the server knows the client will need + to have those responses available in order to fully process the response to the original + request. + + + + Pushing additional message exchanges in this fashion is optional, and is negotiated + between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set + to 0 to indicate that server push is disabled. + + + Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a + promised request that is not cacheable, unsafe or that includes a request body MUST + reset the stream with a stream error of type + PROTOCOL_ERROR. + + + Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP + cache. Pushed responses are considered successfully validated on the origin server (e.g., + if the "no-cache" cache response directive is present) while the stream identified by the + promised stream ID is still open. + + + Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY + be made available to the application separately. + + + An intermediary can receive pushes from the server and choose not to forward them on to + the client. In other words, how to make use of the pushed information is up to that + intermediary. Equally, the intermediary might choose to make additional pushes to the + client, without any action taken by the server. + + + A client cannot push. Thus, servers MUST treat the receipt of a + PUSH_PROMISE frame as a connection + error of type PROTOCOL_ERROR. Clients MUST reject any attempt to + change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating + the message as a connection error of type + PROTOCOL_ERROR. + + +
    + + Server push is semantically equivalent to a server responding to a request; however, in + this case that request is also sent by the server, as a PUSH_PROMISE + frame. + + + The PUSH_PROMISE frame includes a header block that contains a complete + set of request header fields that the server attributes to the request. It is not + possible to push a response to a request that includes a request body. + + + + Pushed responses are always associated with an explicit request from the client. The + PUSH_PROMISE frames sent by the server are sent on that explicit + request's stream. The PUSH_PROMISE frame also includes a promised stream + identifier, chosen from the stream identifiers available to the server (see ). + + + + The header fields in PUSH_PROMISE and any subsequent + CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in + the :method header field that is safe and cacheable. If a + client receives a PUSH_PROMISE that does not include a complete and valid + set of header fields, or the :method header field identifies + a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. + + + + The server SHOULD send PUSH_PROMISE () + frames prior to sending any frames that reference the promised responses. This avoids a + race where clients issue requests prior to receiving any PUSH_PROMISE + frames. + + + For example, if the server receives a request for a document containing embedded links + to multiple image files, and the server chooses to push those additional images to the + client, sending push promises before the DATA frames that contain the + image links ensures that the client is able to see the promises before discovering + embedded links. Similarly, if the server pushes responses referenced by the header block + (for instance, in Link header fields), sending the push promises before sending the + header block ensures that clients do not request them. + + + + PUSH_PROMISE frames MUST NOT be sent by the client. + + + PUSH_PROMISE frames can be sent by the server in response to any + client-initiated stream, but the stream MUST be in either the "open" or "half closed + (remote)" state with respect to the server. PUSH_PROMISE frames are + interspersed with the frames that comprise a response, though they cannot be + interspersed with HEADERS and CONTINUATION frames that + comprise a single header block. + + + Sending a PUSH_PROMISE frame creates a new stream and puts the stream + into the “reserved (local)” state for the server and the “reserved (remote)” state for + the client. + +
    + +
    + + After sending the PUSH_PROMISE frame, the server can begin delivering the + pushed response as a response on a server-initiated + stream that uses the promised stream identifier. The server uses this stream to + transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" + to the client after the initial HEADERS frame is sent. + + + + Once a client receives a PUSH_PROMISE frame and chooses to accept the + pushed response, the client SHOULD NOT issue any requests for the promised response + until after the promised stream has closed. + + + + If the client determines, for any reason, that it does not wish to receive the pushed + response from the server, or if the server takes too long to begin sending the promised + response, the client can send an RST_STREAM frame, using either the + CANCEL or REFUSED_STREAM codes, and referencing the pushed + stream's identifier. + + + A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the + number of responses that can be concurrently pushed by a server. Advertising a + SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by + preventing the server from creating the necessary streams. This does not prohibit a + server from sending PUSH_PROMISE frames; clients need to reset any + promised streams that are not wanted. + + + + Clients receiving a pushed response MUST validate that either the server is + authoritative (see ), or the proxy that provided the pushed + response is configured for the corresponding request. For example, a server that offers + a certificate for only the example.com DNS-ID or Common Name + is not permitted to push a response for https://www.example.org/doc. + + + The response for a PUSH_PROMISE stream begins with a + HEADERS frame, which immediately puts the stream into the “half closed + (remote)” state for the server and “half closed (local)” state for the client, and ends + with a frame bearing END_STREAM, which places the stream in the "closed" state. + + + The client never sends a frame with the END_STREAM flag for a server push. + + + +
    + +
    + +
    + + In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. + CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin + server for the purposes of interacting with https resources. + + + In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to + a remote host, for similar purposes. The HTTP header field mapping works as defined in + Request Header Fields, with a few + differences. Specifically: + + + The :method header field is set to CONNECT. + + + The :scheme and :path header + fields MUST be omitted. + + + The :authority header field contains the host and port to + connect to (equivalent to the authority-form of the request-target of CONNECT + requests, see ). + + + + + A proxy that supports CONNECT establishes a TCP connection to + the server identified in the :authority header field. Once + this connection is successfully established, the proxy sends a HEADERS + frame containing a 2xx series status code to the client, as defined in . + + + After the initial HEADERS frame sent by each peer, all subsequent + DATA frames correspond to data sent on the TCP connection. The payload of + any DATA frames sent by the client is transmitted by the proxy to the TCP + server; data received from the TCP server is assembled into DATA frames by + the proxy. Frame types other than DATA or stream management frames + (RST_STREAM, WINDOW_UPDATE, and PRIORITY) + MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. + + + The TCP connection can be closed by either peer. The END_STREAM flag on a + DATA frame is treated as being equivalent to the TCP FIN bit. A client is + expected to send a DATA frame with the END_STREAM flag set after receiving + a frame bearing the END_STREAM flag. A proxy that receives a DATA frame + with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP + segment. A proxy that receives a TCP segment with the FIN bit set sends a + DATA frame with the END_STREAM flag set. Note that the final TCP segment + or DATA frame could be empty. + + + A TCP connection error is signaled with RST_STREAM. A proxy treats any + error in the TCP connection, which includes receiving a TCP segment with the RST bit set, + as a stream error of type + CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the + RST bit set if it detects an error with the stream or the HTTP/2 connection. + +
    +
    + +
    + + This section outlines attributes of the HTTP protocol that improve interoperability, reduce + exposure to known security vulnerabilities, or reduce the potential for implementation + variation. + + +
    + + HTTP/2 connections are persistent. For best performance, it is expected clients will not + close connections until it is determined that no further communication with a server is + necessary (for example, when a user navigates away from a particular web page), or until + the server closes the connection. + + + Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, + where host is derived from a URI, a selected alternative + service, or a configured proxy. + + + A client can create additional connections as replacements, either to replace connections + that are near to exhausting the available stream + identifier space, to refresh the keying material for a TLS connection, or to + replace connections that have encountered errors. + + + A client MAY open multiple connections to the same IP address and TCP port using different + Server Name Indication values or to provide different TLS + client certificates, but SHOULD avoid creating multiple connections with the same + configuration. + + + Servers are encouraged to maintain open connections for as long as possible, but are + permitted to terminate idle connections if necessary. When either endpoint chooses to + close the transport-layer TCP connection, the terminating endpoint SHOULD first send a + GOAWAY () frame so that both endpoints can reliably + determine whether previously sent frames have been processed and gracefully complete or + terminate any necessary remaining tasks. + + +
    + + Connections that are made to an origin servers, either directly or through a tunnel + created using the CONNECT method MAY be reused for + requests with multiple different URI authority components. A connection can be reused + as long as the origin server is authoritative. For + http resources, this depends on the host having resolved to + the same IP address. + + + For https resources, connection reuse additionally depends + on having a certificate that is valid for the host in the URI. An origin server might + offer a certificate with multiple subjectAltName attributes, + or names with wildcards, one of which is valid for the authority in the URI. For + example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for + requests to URIs starting with https://a.example.com/ and + https://b.example.com/. + + + In some deployments, reusing a connection for multiple origins can result in requests + being directed to the wrong origin server. For example, TLS termination might be + performed by a middlebox that uses the TLS Server Name Indication + (SNI) extension to select an origin server. This means that it is possible + for clients to send confidential information to servers that might not be the intended + target for the request, even though the server is otherwise authoritative. + + + A server that does not wish clients to reuse connections can indicate that it is not + authoritative for a request by sending a 421 (Misdirected Request) status code in response + to the request (see ). + + + A client that is configured to use a proxy over HTTP/2 directs requests to that proxy + through a single connection. That is, all requests sent via a proxy reuse the + connection to the proxy. + +
    + +
    + + The 421 (Misdirected Request) status code indicates that the request was directed at a + server that is not able to produce a response. This can be sent by a server that is not + configured to produce responses for the combination of scheme and authority that are + included in the request URI. + + + Clients receiving a 421 (Misdirected Request) response from a server MAY retry the + request - whether the request method is idempotent or not - over a different connection. + This is possible if a connection is reused () or if an alternative + service is selected (). + + + This status code MUST NOT be generated by proxies. + + + A 421 response is cacheable by default; i.e., unless otherwise indicated by the method + definition or explicit cache controls (see ). + +
    +
    + +
    + + Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over + TLS. The general TLS usage guidance in SHOULD be followed, with + some additional restrictions that are specific to HTTP/2. + + + + An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on + feature set and cipher suite described in this section. Due to implementation + limitations, it might not be possible to fail TLS negotiation. An endpoint MUST + immediately terminate an HTTP/2 connection that does not meet these minimum requirements + with a connection error of type + INADEQUATE_SECURITY. + + +
    + + The TLS implementation MUST support the Server Name Indication + (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when + negotiating TLS. + + + The TLS implementation MUST disable compression. TLS compression can lead to the + exposure of information that would not otherwise be revealed . + Generic compression is unnecessary since HTTP/2 provides compression features that are + more aware of context and therefore likely to be more appropriate for use for + performance, security or other reasons. + + + The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS + renegotiation as a connection error of type + PROTOCOL_ERROR. Note that disabling renegotiation can result in + long-lived connections becoming unusable due to limits on the number of messages the + underlying cipher suite can encipher. + + + A client MAY use renegotiation to provide confidentiality protection for client + credentials offered in the handshake, but any renegotiation MUST occur prior to sending + the connection preface. A server SHOULD request a client certificate if it sees a + renegotiation request immediately after establishing a connection. + + + This effectively prevents the use of renegotiation in response to a request for a + specific protected resource. A future specification might provide a way to support this + use case. + +
    + +
    + + The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST + only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST + have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. + Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher + suites that use stream or block ciphers. Authenticated Encryption with Additional Data + (AEAD) modes, such as the Galois Counter Model (GCM) mode for + AES are acceptable. + + + The effect of these restrictions is that TLS 1.2 implementations could have + non-intersecting sets of available cipher suites, since these prevent the use of the + cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of + HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . + + + Clients MAY advertise support of cipher suites that are prohibited by the above + restrictions in order to allow for connection to servers that do not support HTTP/2. + This enables a fallback to protocols without these constraints without the additional + latency imposed by using a separate connection for fallback. + +
    +
    +
    + +
    +
    + + HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is + authoritative in providing a given response, see . This relies on local name resolution for the "http" + URI scheme, and the authenticated server identity for the "https" scheme (see ). + +
    + +
    + + In a cross-protocol attack, an attacker causes a client to initiate a transaction in one + protocol toward a server that understands a different protocol. An attacker might be able + to cause the transaction to appear as valid transaction in the second protocol. In + combination with the capabilities of the web context, this can be used to interact with + poorly protected servers in private networks. + + + Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient + protection against cross protocol attacks. ALPN provides a positive indication that a + server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based + protocols. + + + The encryption in TLS makes it difficult for attackers to control the data which could be + used in a cross-protocol attack on a cleartext protocol. + + + The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. + The connection preface contains a string that is + designed to confuse HTTP/1.1 servers, but no special protection is offered for other + protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an + Upgrade header field in addition to the client connection preface could be exposed to a + cross-protocol attack. + +
    + +
    + + HTTP/2 header field names and values are encoded as sequences of octets with a length + prefix. This enables HTTP/2 to carry any string of octets as the name or value of a + header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 + directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might + exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal + header fields, extra header fields, or even new messages that are entirely falsified. + + + Header field names or values that contain characters not permitted by HTTP/1.1, including + carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an + intermediary, as stipulated in . + + + Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. + Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. + +
    + +
    + + Pushed responses do not have an explicit request from the client; the request + is provided by the server in the PUSH_PROMISE frame. + + + Caching responses that are pushed is possible based on the guidance provided by the origin + server in the Cache-Control header field. However, this can cause issues if a single + server hosts more than one tenant. For example, a server might offer multiple users each + a small portion of its URI space. + + + Where multiple tenants share space on the same server, that server MUST ensure that + tenants are not able to push representations of resources that they do not have authority + over. Failure to enforce this would allow a tenant to provide a representation that would + be served out of cache, overriding the actual representation that the authoritative tenant + provides. + + + Pushed responses for which an origin server is not authoritative (see + ) are never cached or used. + +
    + +
    + + An HTTP/2 connection can demand a greater commitment of resources to operate than a + HTTP/1.1 connection. The use of header compression and flow control depend on a + commitment of resources for storing a greater amount of state. Settings for these + features ensure that memory commitments for these features are strictly bounded. + + + The number of PUSH_PROMISE frames is not constrained in the same fashion. + A client that accepts server push SHOULD limit the number of streams it allows to be in + the "reserved (remote)" state. Excessive number of server push streams can be treated as + a stream error of type + ENHANCE_YOUR_CALM. + + + Processing capacity cannot be guarded as effectively as state capacity. + + + The SETTINGS frame can be abused to cause a peer to expend additional + processing time. This might be done by pointlessly changing SETTINGS parameters, setting + multiple undefined parameters, or changing the same setting multiple times in the same + frame. WINDOW_UPDATE or PRIORITY frames can be abused to + cause an unnecessary waste of resources. + + + Large numbers of small or empty frames can be abused to cause a peer to expend time + processing frame headers. Note however that some uses are entirely legitimate, such as + the sending of an empty DATA frame to end a stream. + + + Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. + + + Limits in SETTINGS parameters cannot be reduced instantaneously, which + leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In + particular, immediately after establishing a connection, limits set by a server are not + known to clients and could be exceeded without being an obvious protocol violation. + + + All these features - i.e., SETTINGS changes, small frames, header + compression - have legitimate uses. These features become a burden only when they are + used unnecessarily or to excess. + + + An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of + service attack. Implementations SHOULD track the use of these features and set limits on + their use. An endpoint MAY treat activity that is suspicious as a connection error of type + ENHANCE_YOUR_CALM. + + +
    + + A large header block can cause an implementation to + commit a large amount of state. Header fields that are critical for routing can appear + toward the end of a header block, which prevents streaming of header fields to their + ultimate destination. For this an other reasons, such as ensuring cache correctness, + means that an endpoint might need to buffer the entire header block. Since there is no + hard limit to the size of a header block, some endpoints could be forced commit a large + amount of available memory for header fields. + + + An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of + limits that might apply on the size of header blocks. This setting is only advisory, so + endpoints MAY choose to send header blocks that exceed this limit and risk having the + request or response being treated as malformed. This setting specific to a connection, + so any request or response could encounter a hop with a lower, unknown limit. An + intermediary can attempt to avoid this problem by passing on values presented by + different peers, but they are not obligated to do so. + + + A server that receives a larger header block than it is willing to handle can send an + HTTP 431 (Request Header Fields Too Large) status code . A + client can discard responses that it cannot process. The header block MUST be processed + to ensure a consistent connection state, unless the connection is closed. + +
    +
    + +
    + + HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover + secret data when it is compressed in the same context as data under attacker control. + + + There are demonstrable attacks on compression that exploit the characteristics of the web + (e.g., ). The attacker induces multiple requests containing + varying plaintext, observing the length of the resulting ciphertext in each, which + reveals a shorter length when a guess about the secret is correct. + + + Implementations communicating on a secure channel MUST NOT compress content that includes + both confidential and attacker-controlled data unless separate compression dictionaries + are used for each source of data. Compression MUST NOT be used if the source of data + cannot be reliably determined. Generic stream compression, such as that provided by TLS + MUST NOT be used with HTTP/2 (). + + + Further considerations regarding the compression of header fields are described in . + +
    + +
    + + Padding within HTTP/2 is not intended as a replacement for general purpose padding, such + as might be provided by TLS. Redundant padding could even be + counterproductive. Correct application can depend on having specific knowledge of the + data that is being padded. + + + To mitigate attacks that rely on compression, disabling or limiting compression might be + preferable to padding as a countermeasure. + + + Padding can be used to obscure the exact size of frame content, and is provided to + mitigate specific attacks within HTTP. For example, attacks where compressed content + includes both attacker-controlled plaintext and secret data (see for example, ). + + + Use of padding can result in less protection than might seem immediately obvious. At + best, padding only makes it more difficult for an attacker to infer length information by + increasing the number of frames an attacker has to observe. Incorrectly implemented + padding schemes can be easily defeated. In particular, randomized padding with a + predictable distribution provides very little protection; similarly, padding payloads to a + fixed size exposes information as payload sizes cross the fixed size boundary, which could + be possible if an attacker can control plaintext. + + + Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding + for HEADERS and PUSH_PROMISE frames. A valid reason for an + intermediary to change the amount of padding of frames is to improve the protections that + padding provides. + +
    + +
    + + Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions + of a single client or server over time. This includes the value of settings, the manner + in which flow control windows are managed, the way priorities are allocated to streams, + timing of reactions to stimulus, and handling of any optional features. + + + As far as this creates observable differences in behavior, they could be used as a basis + for fingerprinting a specific client, as defined in . + +
    +
    + +
    + + A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation + (ALPN) Protocol IDs" registry established in . + + + This document establishes a registry for frame types, settings, and error codes. These new + registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. + + + This document registers the HTTP2-Settings header field for + use in HTTP; and the 421 (Misdirected Request) status code. + + + This document registers the PRI method for use in HTTP, to avoid + collisions with the connection preface. + + +
    + + This document creates two registrations for the identification of HTTP/2 in the + "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . + + + The "h2" string identifies HTTP/2 when used over TLS: + + HTTP/2 over TLS + 0x68 0x32 ("h2") + This document + + + + The "h2c" string identifies HTTP/2 when used over cleartext TCP: + + HTTP/2 over TCP + 0x68 0x32 0x63 ("h2c") + This document + + +
    + +
    + + This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame + Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under + either of the "IETF Review" or "IESG Approval" policies for + values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for + experimental use. + + + New entries in this registry require the following information: + + + A name or label for the frame type. + + + The 8-bit code assigned to the frame type. + + + A reference to a specification that includes a description of the frame layout, + it's semantics and flags that the frame type uses, including any parts of the frame + that are conditionally present based on the value of flags. + + + + + The entries in the following table are registered by this document. + + + Frame Type + Code + Section + DATA0x0 + HEADERS0x1 + PRIORITY0x2 + RST_STREAM0x3 + SETTINGS0x4 + PUSH_PROMISE0x5 + PING0x6 + GOAWAY0x7 + WINDOW_UPDATE0x8 + CONTINUATION0x9 + +
    + +
    + + This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry + manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to + 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. + + + New registrations are advised to provide the following information: + + + A symbolic name for the setting. Specifying a setting name is optional. + + + The 16-bit code assigned to the setting. + + + An initial value for the setting. + + + An optional reference to a specification that describes the use of the setting. + + + + + An initial set of setting registrations can be found in . + + + Name + Code + Initial Value + Specification + HEADER_TABLE_SIZE + 0x14096 + ENABLE_PUSH + 0x21 + MAX_CONCURRENT_STREAMS + 0x3(infinite) + INITIAL_WINDOW_SIZE + 0x465535 + MAX_FRAME_SIZE + 0x516384 + MAX_HEADER_LIST_SIZE + 0x6(infinite) + + +
    + +
    + + This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" + registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the + "Expert Review" policy. + + + Registrations for error codes are required to include a description of the error code. An + expert reviewer is advised to examine new registrations for possible duplication with + existing error codes. Use of existing registrations is to be encouraged, but not + mandated. + + + New registrations are advised to provide the following information: + + + A name for the error code. Specifying an error code name is optional. + + + The 32-bit error code value. + + + A brief description of the error code semantics, longer if no detailed specification + is provided. + + + An optional reference for a specification that defines the error code. + + + + + The entries in the following table are registered by this document. + + + Name + Code + Description + Specification + NO_ERROR0x0 + Graceful shutdown + + PROTOCOL_ERROR0x1 + Protocol error detected + + INTERNAL_ERROR0x2 + Implementation fault + + FLOW_CONTROL_ERROR0x3 + Flow control limits exceeded + + SETTINGS_TIMEOUT0x4 + Settings not acknowledged + + STREAM_CLOSED0x5 + Frame received for closed stream + + FRAME_SIZE_ERROR0x6 + Frame size incorrect + + REFUSED_STREAM0x7 + Stream not processed + + CANCEL0x8 + Stream cancelled + + COMPRESSION_ERROR0x9 + Compression state not updated + + CONNECT_ERROR0xa + TCP connection error for CONNECT method + + ENHANCE_YOUR_CALM0xb + Processing capacity exceeded + + INADEQUATE_SECURITY0xc + Negotiated TLS parameters not acceptable + + + +
    + +
    + + This section registers the HTTP2-Settings header field in the + Permanent Message Header Field Registry. + + + HTTP2-Settings + + + http + + + standard + + + IETF + + + of this document + + + This header field is only used by an HTTP/2 client for Upgrade-based negotiation. + + + +
    + +
    + + This section registers the PRI method in the HTTP Method + Registry (). + + + PRI + + + No + + + No + + + of this document + + + This method is never used by an actual client. This method will appear to be used + when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection + preface. + + + +
    + +
    + + This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext + Transfer Protocol (HTTP) Status Code Registry (). + + + + + 421 + + + Misdirected Request + + + of this document + + + +
    + +
    + +
    + + This document includes substantial input from the following individuals: + + + Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin + Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin + Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY + contributors). + + + Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). + + + William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto + Peon, Rob Trace (Flow control). + + + Mike Bishop (Extensibility). + + + Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan + (Substantial editorial contributions). + + + Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. + + + Alexey Melnikov was an editor of this document during 2013. + + + A substantial proportion of Martin's contribution was supported by Microsoft during his + employment there. + + + +
    +
    + + + + + + HPACK - Header Compression for HTTP/2 + + + + + + + + + + + + Transmission Control Protocol + + + University of Southern California (USC)/Information Sciences + Institute + + + + + + + + + + + Key words for use in RFCs to Indicate Requirement Levels + + + Harvard University +
    sob@harvard.edu
    +
    + +
    + + +
    + + + + + HTTP Over TLS + + + + + + + + + + Uniform Resource Identifier (URI): Generic + Syntax + + + + + + + + + + + + The Base16, Base32, and Base64 Data Encodings + + + + + + + + + Guidelines for Writing an IANA Considerations Section in RFCs + + + + + + + + + + + Augmented BNF for Syntax Specifications: ABNF + + + + + + + + + + + The Transport Layer Security (TLS) Protocol Version 1.2 + + + + + + + + + + + Transport Layer Security (TLS) Extensions: Extension Definitions + + + + + + + + + + Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension + + + + + + + + + + + + + TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois + Counter Mode (GCM) + + + + + + + + + + + Digital Signature Standard (DSS) + + NIST + + + + + + + + + Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Range Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + World Wide Web Consortium +
    ylafon@w3.org
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Caching + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + Akamai +
    mnot@mnot.net
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Authentication + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + HTTP State Management Mechanism + + + + + +
    + + + + + + TCP Extensions for High Performance + + + + + + + + + + + + Transport Layer Security Protocol Compression Methods + + + + + + + + + Additional HTTP Status Codes + + + + + + + + + + + Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) + + + + + + + + + + + + + + + AES Galois Counter Mode (GCM) Cipher Suites for TLS + + + + + + + + + + + + HTML5 + + + + + + + + + + + Latest version available at + . + + + + + + + Talking to Yourself for Fun and Profit + + + + + + + + + + + + + + BREACH: Reviving the CRIME Attack + + + + + + + + + + + Registration Procedures for Message Header Fields + + Nine by Nine +
    GK-IETF@ninebynine.org
    +
    + + BEA Systems +
    mnot@pobox.com
    +
    + + HP Labs +
    JeffMogul@acm.org
    +
    + +
    + + +
    + + + + Recommendations for Secure Use of TLS and DTLS + + + + + + + + + + + + + + + + + + HTTP Alternative Services + + + Akamai + + + Mozilla + + + greenbytes + + + + + + +
    + +
    + + This section is to be removed by RFC Editor before publication. + + +
    + + Renamed Not Authoritative status code to Misdirected Request. + +
    + +
    + + Pseudo-header fields are now required to appear strictly before regular ones. + + + Restored 1xx series status codes, except 101. + + + Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting + to limit the damage. + + + Added a setting to advise peers of header set size limits. + + + Removed segments. + + + Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. + +
    + +
    + + Restored extensibility options. + + + Restricting TLS cipher suites to AEAD only. + + + Removing Content-Encoding requirements. + + + Permitting the use of PRIORITY after stream close. + + + Removed ALTSVC frame. + + + Removed BLOCKED frame. + + + Reducing the maximum padding size to 256 octets; removing padding from + CONTINUATION frames. + + + Removed per-frame GZIP compression. + +
    + +
    + + Added BLOCKED frame (at risk). + + + Simplified priority scheme. + + + Added DATA per-frame GZIP compression. + +
    + +
    + + Changed "connection header" to "connection preface" to avoid confusion. + + + Added dependency-based stream prioritization. + + + Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. + + + Adding missing padding to PUSH_PROMISE. + + + Integrate ALTSVC frame and supporting text. + + + Dropping requirement on "deflate" Content-Encoding. + + + Improving security considerations around use of compression. + +
    + +
    + + Adding padding for data frames. + + + Renumbering frame types, error codes, and settings. + + + Adding INADEQUATE_SECURITY error code. + + + Updating TLS usage requirements to 1.2; forbidding TLS compression. + + + Removing extensibility for frames and settings. + + + Changing setting identifier size. + + + Removing the ability to disable flow control. + + + Changing the protocol identification token to "h2". + + + Changing the use of :authority to make it optional and to allow userinfo in non-HTTP + cases. + + + Allowing split on 0x0 for Cookie. + + + Reserved PRI method in HTTP/1.1 to avoid possible future collisions. + +
    + +
    + + Added cookie crumbling for more efficient header compression. + + + Added header field ordering with the value-concatenation mechanism. + +
    + +
    + + Marked draft for implementation. + +
    + +
    + + Adding definition for CONNECT method. + + + Constraining the use of push to safe, cacheable methods with no request body. + + + Changing from :host to :authority to remove any potential confusion. + + + Adding setting for header compression table size. + + + Adding settings acknowledgement. + + + Removing unnecessary and potentially problematic flags from CONTINUATION. + + + Added denial of service considerations. + +
    +
    + + Marking the draft ready for implementation. + + + Renumbering END_PUSH_PROMISE flag. + + + Editorial clarifications and changes. + +
    + +
    + + Added CONTINUATION frame for HEADERS and PUSH_PROMISE. + + + PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is + zero. + + + Push expanded to allow all safe methods without a request body. + + + Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 + hop-by-hop header fields. + + + Requiring that intermediaries not forward requests with missing or illegal routing + :-headers. + + + Clarified requirements around handling different frames after stream close, stream reset + and GOAWAY. + + + Added more specific prohibitions for sending of different frame types in various stream + states. + + + Making the last received setting value the effective value. + + + Clarified requirements on TLS version, extension and ciphers. + +
    + +
    + + Committed major restructuring atrocities. + + + Added reference to first header compression draft. + + + Added more formal description of frame lifecycle. + + + Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. + + + Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. + + + Added PRIORITY frame. + +
    + +
    + + Added continuations to frames carrying header blocks. + + + Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful + concepts, like cookies. + + + Removed "message". + + + Switched to TLS ALPN from NPN. + + + Editorial changes. + +
    + +
    + + Added IANA considerations section for frame types, error codes and settings. + + + Removed data frame compression. + + + Added PUSH_PROMISE. + + + Added globally applicable flags to framing. + + + Removed zlib-based header compression mechanism. + + + Updated references. + + + Clarified stream identifier reuse. + + + Removed CREDENTIALS frame and associated mechanisms. + + + Added advice against naive implementation of flow control. + + + Added session header section. + + + Restructured frame header. Removed distinction between data and control frames. + + + Altered flow control properties to include session-level limits. + + + Added note on cacheability of pushed resources and multiple tenant servers. + + + Changed protocol label form based on discussions. + +
    + +
    + + Changed title throughout. + + + Removed section on Incompatibilities with SPDY draft#2. + + + Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . + + + Replaced abstract and introduction. + + + Added section on starting HTTP/2.0, including upgrade mechanism. + + + Removed unused references. + + + Added flow control principles based on . + +
    + +
    + + Adopted as base for draft-ietf-httpbis-http2. + + + Updated authors/editors list. + + + Added status note. + +
    +
    + +
    +
    + diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 0000000..e6b321f --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2303 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-reqContext(req).Done(): + return nil, reqContext(req).Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + if !afterBodyWrite { + return req, nil + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && + int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, cs.getStartedWrite(), err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + cc.writeHeader(strings.ToLower(name), value) + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go new file mode 100644 index 0000000..fe04bd2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport_test.go @@ -0,0 +1,3847 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var ( + extNet = flag.Bool("extnet", false, "do external network tests") + transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") + insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove? +) + +var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true} + +type testContext struct{} + +func (testContext) Done() <-chan struct{} { return make(chan struct{}) } +func (testContext) Err() error { panic("should not be called") } +func (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } +func (testContext) Value(key interface{}) interface{} { return nil } + +func TestTransportExternal(t *testing.T) { + if !*extNet { + t.Skip("skipping external network test") + } + req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) + rt := &Transport{TLSClientConfig: tlsConfigInsecure} + res, err := rt.RoundTrip(req) + if err != nil { + t.Fatalf("%v", err) + } + res.Write(os.Stdout) +} + +type fakeTLSConn struct { + net.Conn +} + +func (c *fakeTLSConn) ConnectionState() tls.ConnectionState { + return tls.ConnectionState{ + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } +} + +func startH2cServer(t *testing.T) net.Listener { + h2Server := &Server{} + l := newLocalListener(t) + go func() { + conn, err := l.Accept() + if err != nil { + t.Error(err) + return + } + h2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %v, http: %v", r.URL.Path, r.TLS == nil) + })}) + }() + return l +} + +func TestTransportH2c(t *testing.T) { + l := startH2cServer(t) + defer l.Close() + req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil) + if err != nil { + t.Fatal(err) + } + tr := &Transport{ + AllowHTTP: true, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + return net.Dial(network, addr) + }, + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.ProtoMajor != 2 { + t.Fatal("proto not h2c") + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(body), "Hello, /foobar, http: true"; got != want { + t.Fatalf("response got %v, want %v", got, want) + } +} + +func TestTransport(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, body) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + t.Logf("Got res: %+v", res) + if g, w := res.StatusCode, 200; g != w { + t.Errorf("StatusCode = %v; want %v", g, w) + } + if g, w := res.Status, "200 OK"; g != w { + t.Errorf("Status = %q; want %q", g, w) + } + wantHeader := http.Header{ + "Content-Length": []string{"3"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + "Date": []string{"XXX"}, // see cleanDate + } + cleanDate(res) + if !reflect.DeepEqual(res.Header, wantHeader) { + t.Errorf("res Header = %v; want %v", res.Header, wantHeader) + } + if res.Request != req { + t.Errorf("Response.Request = %p; want %p", res.Request, req) + } + if res.TLS == nil { + t.Error("Response.TLS = nil; want non-nil") + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } else if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func onSameConn(t *testing.T, modReq func(*http.Request)) bool { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer, func(c net.Conn, st http.ConnState) { + t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + }) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + get := func() string { + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + modReq(req) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Fatalf("didn't get an addr in response") + } + return addr + } + first := get() + second := get() + return first == second +} + +func TestTransportReusesConns(t *testing.T) { + if !onSameConn(t, func(*http.Request) {}) { + t.Errorf("first and second responses were on different connections") + } +} + +func TestTransportReusesConn_RequestClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Close = true }) { + t.Errorf("first and second responses were not on different connections") + } +} + +func TestTransportReusesConn_ConnClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) { + t.Errorf("first and second responses were not on different connections") + } +} + +// Tests that the Transport only keeps one pending dial open per destination address. +// https://golang.org/issue/13397 +func TestTransportGroupsPendingDials(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer) + defer st.Close() + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + defer tr.CloseIdleConnections() + var ( + mu sync.Mutex + dials = map[string]int{} + ) + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Error(err) + return + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Errorf("didn't get an addr in response") + } + mu.Lock() + dials[addr]++ + mu.Unlock() + }() + } + wg.Wait() + if len(dials) != 1 { + t.Errorf("saw %d dials; want 1: %v", len(dials), dials) + } + tr.CloseIdleConnections() + if err := retry(50, 10*time.Millisecond, func() error { + cp, ok := tr.connPool().(*clientConnPool) + if !ok { + return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool()) + } + cp.mu.Lock() + defer cp.mu.Unlock() + if len(cp.dialing) != 0 { + return fmt.Errorf("dialing map = %v; want empty", cp.dialing) + } + if len(cp.conns) != 0 { + return fmt.Errorf("conns = %v; want empty", cp.conns) + } + if len(cp.keys) != 0 { + return fmt.Errorf("keys = %v; want empty", cp.keys) + } + return nil + }); err != nil { + t.Errorf("State of pool after CloseIdleConnections: %v", err) + } +} + +func retry(tries int, delay time.Duration, fn func() error) error { + var err error + for i := 0; i < tries; i++ { + err = fn() + if err == nil { + return nil + } + time.Sleep(delay) + } + return err +} + +func TestTransportAbortClosesPipes(t *testing.T) { + shutdown := make(chan struct{}) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + <-shutdown + }, + optOnlyServer, + ) + defer st.Close() + defer close(shutdown) // we must shutdown before st.Close() to avoid hanging + + done := make(chan struct{}) + requestMade := make(chan struct{}) + go func() { + defer close(done) + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + close(requestMade) + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Error("expected error from res.Body.Read") + } + }() + + <-requestMade + // Now force the serve loop to end, via closing the connection. + st.closeConn() + // deadlock? that's a bug. + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("timeout") + } +} + +// TODO: merge this with TestTransportBody to make TestTransportRequest? This +// could be a table-driven test with extra goodies. +func TestTransportPath(t *testing.T) { + gotc := make(chan *url.URL, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + gotc <- r.URL + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + const ( + path = "/testpath" + query = "q=1" + ) + surl := st.ts.URL + path + "?" + query + req, err := http.NewRequest("POST", surl, nil) + if err != nil { + t.Fatal(err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + got := <-gotc + if got.Path != path { + t.Errorf("Read Path = %q; want %q", got.Path, path) + } + if got.RawQuery != query { + t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query) + } +} + +func randString(n int) string { + rnd := rand.New(rand.NewSource(int64(n))) + b := make([]byte, n) + for i := range b { + b[i] = byte(rnd.Intn(256)) + } + return string(b) +} + +type panicReader struct{} + +func (panicReader) Read([]byte) (int, error) { panic("unexpected Read") } +func (panicReader) Close() error { panic("unexpected Close") } + +func TestActualContentLength(t *testing.T) { + tests := []struct { + req *http.Request + want int64 + }{ + // Verify we don't read from Body: + 0: { + req: &http.Request{Body: panicReader{}}, + want: -1, + }, + // nil Body means 0, regardless of ContentLength: + 1: { + req: &http.Request{Body: nil, ContentLength: 5}, + want: 0, + }, + // ContentLength is used if set. + 2: { + req: &http.Request{Body: panicReader{}, ContentLength: 5}, + want: 5, + }, + // http.NoBody means 0, not -1. + 3: { + req: &http.Request{Body: go18httpNoBody()}, + want: 0, + }, + } + for i, tt := range tests { + got := actualContentLength(tt.req) + if got != tt.want { + t.Errorf("test[%d]: got %d; want %d", i, got, tt.want) + } + } +} + +func TestTransportBody(t *testing.T) { + bodyTests := []struct { + body string + noContentLen bool + }{ + {body: "some message"}, + {body: "some message", noContentLen: true}, + {body: strings.Repeat("a", 1<<20), noContentLen: true}, + {body: strings.Repeat("a", 1<<20)}, + {body: randString(16<<10 - 1)}, + {body: randString(16 << 10)}, + {body: randString(16<<10 + 1)}, + {body: randString(512<<10 - 1)}, + {body: randString(512 << 10)}, + {body: randString(512<<10 + 1)}, + {body: randString(1<<20 - 1)}, + {body: randString(1 << 20)}, + {body: randString(1<<20 + 2)}, + } + + type reqInfo struct { + req *http.Request + slurp []byte + err error + } + gotc := make(chan reqInfo, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + gotc <- reqInfo{err: err} + } else { + gotc <- reqInfo{req: r, slurp: slurp} + } + }, + optOnlyServer, + ) + defer st.Close() + + for i, tt := range bodyTests { + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + var body io.Reader = strings.NewReader(tt.body) + if tt.noContentLen { + body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods + } + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + defer res.Body.Close() + ri := <-gotc + if ri.err != nil { + t.Errorf("#%d: read error: %v", i, ri.err) + continue + } + if got := string(ri.slurp); got != tt.body { + t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body)) + } + wantLen := int64(len(tt.body)) + if tt.noContentLen && tt.body != "" { + wantLen = -1 + } + if ri.req.ContentLength != wantLen { + t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen) + } + } +} + +func shortString(v string) string { + const maxLen = 100 + if len(v) <= maxLen { + return v + } + return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:]) +} + +func TestTransportDialTLS(t *testing.T) { + var mu sync.Mutex // guards following + var gotReq, didDial bool + + ts := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + gotReq = true + mu.Unlock() + }, + optOnlyServer, + ) + defer ts.Close() + tr := &Transport{ + DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { + mu.Lock() + didDial = true + mu.Unlock() + cfg.InsecureSkipVerify = true + c, err := tls.Dial(netw, addr, cfg) + if err != nil { + return nil, err + } + return c, c.Handshake() + }, + } + defer tr.CloseIdleConnections() + client := &http.Client{Transport: tr} + res, err := client.Get(ts.ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + mu.Lock() + if !gotReq { + t.Error("didn't get request") + } + if !didDial { + t.Error("didn't use dial hook") + } +} + +func TestConfigureTransport(t *testing.T) { + t1 := &http.Transport{} + err := ConfigureTransport(t1) + if err == errTransportVersion { + t.Skip(err) + } + if err != nil { + t.Fatal(err) + } + if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) { + // Laziness, to avoid buildtags. + t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got) + } + wantNextProtos := []string{"h2", "http/1.1"} + if t1.TLSClientConfig == nil { + t.Errorf("nil t1.TLSClientConfig") + } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) { + t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos) + } + if err := ConfigureTransport(t1); err == nil { + t.Error("unexpected success on second call to ConfigureTransport") + } + + // And does it work? + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.Proto) + }, optOnlyServer) + defer st.Close() + + t1.TLSClientConfig.InsecureSkipVerify = true + c := &http.Client{Transport: t1} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(slurp), "HTTP/2.0"; got != want { + t.Errorf("body = %q; want %q", got, want) + } +} + +type capitalizeReader struct { + r io.Reader +} + +func (cr capitalizeReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + for i, b := range p[:n] { + if b >= 'a' && b <= 'z' { + p[i] = b - ('a' - 'A') + } + } + return +} + +type flushWriter struct { + w io.Writer +} + +func (fw flushWriter) Write(p []byte) (n int, err error) { + n, err = fw.w.Write(p) + if f, ok := fw.w.(http.Flusher); ok { + f.Flush() + } + return +} + +type clientTester struct { + t *testing.T + tr *Transport + sc, cc net.Conn // server and client conn + fr *Framer // server's framer + client func() error + server func() error +} + +func newClientTester(t *testing.T) *clientTester { + var dialOnce struct { + sync.Mutex + dialed bool + } + ct := &clientTester{ + t: t, + } + ct.tr = &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialOnce.Lock() + defer dialOnce.Unlock() + if dialOnce.dialed { + return nil, errors.New("only one dial allowed in test mode") + } + dialOnce.dialed = true + return ct.cc, nil + }, + } + + ln := newLocalListener(t) + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + + } + sc, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + ln.Close() + ct.cc = cc + ct.sc = sc + ct.fr = NewFramer(sc, sc) + return ct +} + +func newLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err == nil { + return ln + } + ln, err = net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + return ln +} + +func (ct *clientTester) greet(settings ...Setting) { + buf := make([]byte, len(ClientPreface)) + _, err := io.ReadFull(ct.sc, buf) + if err != nil { + ct.t.Fatalf("reading client preface: %v", err) + } + f, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Fatalf("Reading client settings frame: %v", err) + } + if sf, ok := f.(*SettingsFrame); !ok { + ct.t.Fatalf("Wanted client settings frame; got %v", f) + _ = sf // stash it away? + } + if err := ct.fr.WriteSettings(settings...); err != nil { + ct.t.Fatal(err) + } + if err := ct.fr.WriteSettingsAck(); err != nil { + ct.t.Fatal(err) + } +} + +func (ct *clientTester) readNonSettingsFrame() (Frame, error) { + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return nil, err + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + return f, nil + } +} + +func (ct *clientTester) cleanup() { + ct.tr.CloseIdleConnections() +} + +func (ct *clientTester) run() { + errc := make(chan error, 2) + ct.start("client", errc, ct.client) + ct.start("server", errc, ct.server) + defer ct.cleanup() + for i := 0; i < 2; i++ { + if err := <-errc; err != nil { + ct.t.Error(err) + return + } + } +} + +func (ct *clientTester) start(which string, errc chan<- error, fn func() error) { + go func() { + finished := false + var err error + defer func() { + if !finished { + err = fmt.Errorf("%s goroutine didn't finish.", which) + } else if err != nil { + err = fmt.Errorf("%s: %v", which, err) + } + errc <- err + }() + err = fn() + finished = true + }() +} + +func (ct *clientTester) readFrame() (Frame, error) { + return readFrameTimeout(ct.fr, 2*time.Second) +} + +func (ct *clientTester) firstHeaders() (*HeadersFrame, error) { + for { + f, err := ct.readFrame() + if err != nil { + return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + hf, ok := f.(*HeadersFrame) + if !ok { + return nil, fmt.Errorf("Got %T; want HeadersFrame", f) + } + return hf, nil + } +} + +type countingReader struct { + n *int64 +} + +func (r countingReader) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(i) + } + atomic.AddInt64(r.n, int64(len(p))) + return len(p), err +} + +func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } +func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } + +func testTransportReqBodyAfterResponse(t *testing.T, status int) { + const bodySize = 10 << 20 + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + var n int64 // atomic + req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize)) + if err != nil { + return err + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != status { + return fmt.Errorf("status code = %v; want %v", res.StatusCode, status) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("unexpected body: %q", slurp) + } + if status == 200 { + if got := atomic.LoadInt64(&n); got != bodySize { + return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize) + } + } else { + if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize { + return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + var dataRecv int64 + var closed bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + //println(fmt.Sprintf("server got frame: %v", f)) + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + if f.StreamEnded() { + return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) + } + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if dataRecv == 0 { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + dataRecv += int64(dataLen) + + if !closed && ((status != 200 && dataRecv > 0) || + (status == 200 && dataRecv == bodySize)) { + closed = true + if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { + return err + } + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +// See golang.org/issue/13444 +func TestTransportFullDuplex(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) // redundant but for clarity + w.(http.Flusher).Flush() + io.Copy(flushWriter{w}, capitalizeReader{r.Body}) + fmt.Fprintf(w, "bye.\n") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + pr, pw := io.Pipe() + req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) + if err != nil { + t.Fatal(err) + } + req.ContentLength = -1 + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200) + } + bs := bufio.NewScanner(res.Body) + want := func(v string) { + if !bs.Scan() { + t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err()) + } + } + write := func(v string) { + _, err := io.WriteString(pw, v) + if err != nil { + t.Fatalf("pipe write: %v", err) + } + } + write("foo\n") + want("FOO") + write("bar\n") + want("BAR") + pw.Close() + want("bye.") + if err := bs.Err(); err != nil { + t.Fatal(err) + } +} + +func TestTransportConnectRequest(t *testing.T) { + gotc := make(chan *http.Request, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + gotc <- r + }, optOnlyServer) + defer st.Close() + + u, err := url.Parse(st.ts.URL) + if err != nil { + t.Fatal(err) + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + tests := []struct { + req *http.Request + want string + }{ + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + }, + want: u.Host, + }, + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + Host: "example.com:123", + }, + want: "example.com:123", + }, + } + + for i, tt := range tests { + res, err := c.Do(tt.req) + if err != nil { + t.Errorf("%d. RoundTrip = %v", i, err) + continue + } + res.Body.Close() + req := <-gotc + if req.Method != "CONNECT" { + t.Errorf("method = %q; want CONNECT", req.Method) + } + if req.Host != tt.want { + t.Errorf("Host = %q; want %q", req.Host, tt.want) + } + if req.URL.Host != tt.want { + t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want) + } + } +} + +type headerType int + +const ( + noHeader headerType = iota // omitted + oneHeader + splitHeader // broken into continuation on purpose +) + +const ( + f0 = noHeader + f1 = oneHeader + f2 = splitHeader + d0 = false + d1 = true +) + +// Test all 36 combinations of response frame orders: +// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) } +// Generated by http://play.golang.org/p/SScqYKJYXd +func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) } +func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) } +func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) } +func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) } +func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) } +func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) } +func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) } +func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) } +func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) } +func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) } +func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) } +func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) } +func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) } +func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) } +func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) } +func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) } +func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) } +func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) } +func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) } +func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) } +func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) } +func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) } +func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) } +func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) } +func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) } +func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) } +func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) } +func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) } +func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) } +func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) } +func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) } +func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) } +func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) } +func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) } +func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) } +func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) } + +func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) { + const reqBody = "some request body" + const resBody = "some response body" + + if resHeader == noHeader { + // TODO: test 100-continue followed by immediate + // server stream reset, without headers in the middle? + panic("invalid combination") + } + + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) + if expect100Continue != noHeader { + req.Header.Set("Expect", "100-continue") + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + wantBody := resBody + if !withData { + wantBody = "" + } + if string(slurp) != wantBody { + return fmt.Errorf("body = %q; want %q", slurp, wantBody) + } + if trailers == noHeader { + if len(res.Trailer) > 0 { + t.Errorf("Trailer = %v; want none", res.Trailer) + } + } else { + want := http.Header{"Some-Trailer": {"some-value"}} + if !reflect.DeepEqual(res.Trailer, want) { + t.Errorf("Trailer = %v; want %v", res.Trailer, want) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + endStream := false + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *DataFrame: + if !f.StreamEnded() { + // No need to send flow control tokens. The test request body is tiny. + continue + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"}) + enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"}) + if trailers != noHeader { + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"}) + } + endStream = withData == false && trailers == noHeader + send(resHeader) + } + if withData { + endStream = trailers == noHeader + ct.fr.WriteData(f.StreamID, endStream, []byte(resBody)) + } + if trailers != noHeader { + endStream = true + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) + send(trailers) + } + if endStream { + return nil + } + case *HeadersFrame: + if expect100Continue != noHeader { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + send(expect100Continue) + } + } + } + } + ct.run() +} + +func TestTransportReceiveUndeclaredTrailer(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + if _, ok := res.Trailer["Some-Trailer"]; !ok { + return fmt.Errorf("expected Some-Trailer") + } + return nil + } + ct.server = func() error { + ct.greet() + + var n int + var hf *HeadersFrame + for hf == nil && n < 10 { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + hf, _ = f.(*HeadersFrame) + n++ + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + // send headers without Trailer header + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // send trailers + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + ct.run() +} + +func TestTransportInvalidTrailer_Pseudo1(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, oneHeader) +} +func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, splitHeader) +} +func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + }) +} + +func TestTransportInvalidTrailer_Capital1(t *testing.T) { + testTransportInvalidTrailer_Capital(t, oneHeader) +} +func TestTransportInvalidTrailer_Capital2(t *testing.T) { + testTransportInvalidTrailer_Capital(t, splitHeader) +} +func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) + }) +} + +func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + se, ok := err.(StreamError) + if !ok || se.Cause != wantErr { + return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + var endStream bool + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"}) + endStream = false + send(oneHeader) + } + // Trailers: + { + endStream = true + buf.Reset() + writeTrailer(enc) + send(trailers) + } + return nil + } + } + } + ct.run() +} + +// headerListSize returns the HTTP2 header list size of h. +// http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE +// http://httpwg.org/specs/rfc7540.html#MaxHeaderBlock +func headerListSize(h http.Header) (size uint32) { + for k, vv := range h { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + size += hf.Size() + } + } + return size +} + +// padHeaders adds data to an http.Header until headerListSize(h) == +// limit. Due to the way header list sizes are calculated, padHeaders +// cannot add fewer than len("Pad-Headers") + 32 bytes to h, and will +// call t.Fatal if asked to do so. PadHeaders first reserves enough +// space for an empty "Pad-Headers" key, then adds as many copies of +// filler as possible. Any remaining bytes necessary to push the +// header list size up to limit are added to h["Pad-Headers"]. +func padHeaders(t *testing.T, h http.Header, limit uint64, filler string) { + if limit > 0xffffffff { + t.Fatalf("padHeaders: refusing to pad to more than 2^32-1 bytes. limit = %v", limit) + } + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minPadding := uint64(hf.Size()) + size := uint64(headerListSize(h)) + + minlimit := size + minPadding + if limit < minlimit { + t.Fatalf("padHeaders: limit %v < %v", limit, minlimit) + } + + // Use a fixed-width format for name so that fieldSize + // remains constant. + nameFmt := "Pad-Headers-%06d" + hf = hpack.HeaderField{Name: fmt.Sprintf(nameFmt, 1), Value: filler} + fieldSize := uint64(hf.Size()) + + // Add as many complete filler values as possible, leaving + // room for at least one empty "Pad-Headers" key. + limit = limit - minPadding + for i := 0; size+fieldSize < limit; i++ { + name := fmt.Sprintf(nameFmt, i) + h.Add(name, filler) + size += fieldSize + } + + // Add enough bytes to reach limit. + remain := limit - size + lastValue := strings.Repeat("*", int(remain)) + h.Add("Pad-Headers", lastValue) +} + +func TestPadHeaders(t *testing.T) { + check := func(h http.Header, limit uint32, fillerLen int) { + if h == nil { + h = make(http.Header) + } + filler := strings.Repeat("f", fillerLen) + padHeaders(t, h, uint64(limit), filler) + gotSize := headerListSize(h) + if gotSize != limit { + t.Errorf("Got size = %v; want %v", gotSize, limit) + } + } + // Try all possible combinations for small fillerLen and limit. + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minLimit := hf.Size() + for limit := minLimit; limit <= 128; limit++ { + for fillerLen := 0; uint32(fillerLen) <= limit; fillerLen++ { + check(nil, limit, fillerLen) + } + } + + // Try a few tests with larger limits, plus cumulative + // tests. Since these tests are cumulative, tests[i+1].limit + // must be >= tests[i].limit + minLimit. See the comment on + // padHeaders for more info on why the limit arg has this + // restriction. + tests := []struct { + fillerLen int + limit uint32 + }{ + { + fillerLen: 64, + limit: 1024, + }, + { + fillerLen: 1024, + limit: 1286, + }, + { + fillerLen: 256, + limit: 2048, + }, + { + fillerLen: 1024, + limit: 10 * 1024, + }, + { + fillerLen: 1023, + limit: 11 * 1024, + }, + } + h := make(http.Header) + for _, tc := range tests { + check(nil, tc.limit, tc.fillerLen) + check(h, tc.limit, tc.fillerLen) + } +} + +func TestTransportChecksRequestHeaderListSize(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + // Consume body & force client to send + // trailers before writing response. + // ioutil.ReadAll returns non-nil err for + // requests that attempt to send greater than + // maxHeaderListSize bytes of trailers, since + // those requests generate a stream reset. + ioutil.ReadAll(r.Body) + r.Body.Close() + }, + func(ts *httptest.Server) { + ts.Config.MaxHeaderBytes = 16 << 10 + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + checkRoundTrip := func(req *http.Request, wantErr error, desc string) { + res, err := tr.RoundTrip(req) + if err != wantErr { + if res != nil { + res.Body.Close() + } + t.Errorf("%v: RoundTrip err = %v; want %v", desc, err, wantErr) + return + } + if err == nil { + if res == nil { + t.Errorf("%v: response nil; want non-nil.", desc) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + t.Errorf("%v: response status = %v; want %v", desc, res.StatusCode, http.StatusOK) + } + return + } + if res != nil { + t.Errorf("%v: RoundTrip err = %v but response non-nil", desc, err) + } + } + headerListSizeForRequest := func(req *http.Request) (size uint64) { + contentLen := actualContentLength(req) + trailers, err := commaSeparatedTrailers(req) + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(req, true, trailers, contentLen) + cc.mu.Unlock() + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(hf hpack.HeaderField) { + size += uint64(hf.Size()) + }) + if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + } + return size + } + // Create a new Request for each test, rather than reusing the + // same Request, to avoid a race when modifying req.Headers. + // See https://github.com/golang/go/issues/21316 + newRequest := func() *http.Request { + // Body must be non-nil to enable writing trailers. + body := strings.NewReader("hello") + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("newRequest: NewRequest: %v", err) + } + return req + } + + // Make an arbitrary request to ensure we get the server's + // settings frame and initialize peerMaxHeaderListSize. + req := newRequest() + checkRoundTrip(req, nil, "Initial request") + + // Get the ClientConn associated with the request and validate + // peerMaxHeaderListSize. + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + cc, err := tr.connPool().GetClientConn(req, addr) + if err != nil { + t.Fatalf("GetClientConn: %v", err) + } + cc.mu.Lock() + peerSize := cc.peerMaxHeaderListSize + cc.mu.Unlock() + st.scMu.Lock() + wantSize := uint64(st.sc.maxHeaderListSize()) + st.scMu.Unlock() + if peerSize != wantSize { + t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize) + } + + // Sanity check peerSize. (*serverConn) maxHeaderListSize adds + // 320 bytes of padding. + wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320 + if peerSize != wantHeaderBytes { + t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes) + } + + // Pad headers & trailers, but stay under peerSize. + req = newRequest() + req.Header = make(http.Header) + req.Trailer = make(http.Header) + filler := strings.Repeat("*", 1024) + padHeaders(t, req.Trailer, peerSize, filler) + // cc.encodeHeaders adds some default headers to the request, + // so we need to leave room for those. + defaultBytes := headerListSizeForRequest(req) + padHeaders(t, req.Header, peerSize-defaultBytes, filler) + checkRoundTrip(req, nil, "Headers & Trailers under limit") + + // Add enough header bytes to push us over peerSize. + req = newRequest() + req.Header = make(http.Header) + padHeaders(t, req.Header, peerSize, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Headers over limit") + + // Push trailers over the limit. + req = newRequest() + req.Trailer = make(http.Header) + padHeaders(t, req.Trailer, peerSize+1, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Trailers over limit") + + // Send headers with a single large value. + req = newRequest() + filler = strings.Repeat("*", int(peerSize)) + req.Header = make(http.Header) + req.Header.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large header") + + // Send trailers with a single large value. + req = newRequest() + req.Trailer = make(http.Header) + req.Trailer.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large trailer") +} + +func TestTransportChecksResponseHeaderListSize(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != errResponseHeaderListSize { + if res != nil { + res.Body.Close() + } + size := int64(0) + for k, vv := range res.Header { + for _, v := range vv { + size += int64(len(k)) + int64(len(v)) + 32 + } + } + return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + large := strings.Repeat("a", 1<<10) + for i := 0; i < 5042; i++ { + enc.WriteField(hpack.HeaderField{Name: large, Value: large}) + } + if size, want := buf.Len(), 6329; size != want { + // Note: this number might change if + // our hpack implementation + // changes. That's fine. This is + // just a sanity check that our + // response can fit in a single + // header block fragment frame. + return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + } + } + ct.run() +} + +// Test that the Transport returns a typed error from Response.Body.Read calls +// when the server sends an error. (here we use a panic, since that should generate +// a stream error, but others like cancel should be similar) +func TestTransportBodyReadErrorType(t *testing.T) { + doPanic := make(chan bool, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() // force headers out + <-doPanic + panic("boom") + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + doPanic <- true + buf := make([]byte, 100) + n, err := res.Body.Read(buf) + want := StreamError{StreamID: 0x1, Code: 0x2} + if !reflect.DeepEqual(want, err) { + t.Errorf("Read = %v, %#v; want error %#v", n, err, want) + } +} + +// golang.org/issue/13924 +// This used to fail after many iterations, especially with -race: +// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race +func TestTransportDoubleCloseOnWriteError(t *testing.T) { + var ( + mu sync.Mutex + conn net.Conn // to close if set + ) + + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + if conn != nil { + conn.Close() + } + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + mu.Lock() + defer mu.Unlock() + conn = tc + return tc, nil + }, + } + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + c.Get(st.ts.URL) +} + +// Test that the http1 Transport.DisableKeepAlives option is respected +// and connections are closed as soon as idle. +// See golang.org/issue/14008 +func TestTransportDisableKeepAlives(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + connClosed := make(chan struct{}) // closed on tls.Conn.Close + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + return ¬eCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil + }, + } + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + select { + case <-connClosed: + case <-time.After(1 * time.Second): + t.Errorf("timeout") + } + +} + +// Test concurrent requests with Transport.DisableKeepAlives. We can share connections, +// but when things are totally idle, it still needs to close. +func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { + const D = 25 * time.Millisecond + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + time.Sleep(D) + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + var dials int32 + var conns sync.WaitGroup + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + atomic.AddInt32(&dials, 1) + conns.Add(1) + return ¬eCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil + }, + } + c := &http.Client{Transport: tr} + var reqs sync.WaitGroup + const N = 20 + for i := 0; i < N; i++ { + reqs.Add(1) + if i == N-1 { + // For the final request, try to make all the + // others close. This isn't verified in the + // count, other than the Log statement, since + // it's so timing dependent. This test is + // really to make sure we don't interrupt a + // valid request. + time.Sleep(D * 2) + } + go func() { + defer reqs.Done() + res, err := c.Get(st.ts.URL) + if err != nil { + t.Error(err) + return + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Error(err) + return + } + res.Body.Close() + }() + } + reqs.Wait() + conns.Wait() + t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N) +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} + +func isTimeout(err error) bool { + switch err := err.(type) { + case nil: + return false + case *url.Error: + return isTimeout(err.Err) + case net.Error: + return err.Timeout() + } + return false +} + +// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent. +func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) { + testTransportResponseHeaderTimeout(t, false) +} +func TestTransportResponseHeaderTimeout_Body(t *testing.T) { + testTransportResponseHeaderTimeout(t, true) +} + +func testTransportResponseHeaderTimeout(t *testing.T, body bool) { + ct := newClientTester(t) + ct.tr.t1 = &http.Transport{ + ResponseHeaderTimeout: 5 * time.Millisecond, + } + ct.client = func() error { + c := &http.Client{Transport: ct.tr} + var err error + var n int64 + const bodySize = 4 << 20 + if body { + _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize)) + } else { + _, err = c.Get("https://dummy.tld/") + } + if !isTimeout(err) { + t.Errorf("client expected timeout error; got %#v", err) + } + if body && n != bodySize { + t.Errorf("only read %d bytes of body; want %d", n, bodySize) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + switch f := f.(type) { + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + case *RSTStreamFrame: + if f.StreamID == 1 && f.ErrCode == ErrCodeCancel { + return nil + } + } + } + } + ct.run() +} + +func TestTransportDisableCompression(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + want := http.Header{ + "User-Agent": []string{"Go-http-client/2.0"}, + } + if !reflect.DeepEqual(r.Header, want) { + t.Errorf("request headers = %v; want %v", r.Header, want) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + t1: &http.Transport{ + DisableCompression: true, + }, + } + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + +// RFC 7540 section 8.1.2.2 +func TestTransportRejectsConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + tests := []struct { + key string + value []string + want string + }{ + { + key: "Upgrade", + value: []string{"anything"}, + want: "ERROR: http2: invalid Upgrade request header: [\"anything\"]", + }, + { + key: "Connection", + value: []string{"foo"}, + want: "ERROR: http2: invalid Connection request header: [\"foo\"]", + }, + { + key: "Connection", + value: []string{"close"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Connection", + value: []string{"close", "something-else"}, + want: "ERROR: http2: invalid Connection request header: [\"close\" \"something-else\"]", + }, + { + key: "Connection", + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Proxy-Connection", // just deleted and ignored + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{""}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"foo"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"foo\"]", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked", "other"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"chunked\" \"other\"]", + }, + { + key: "Content-Length", + value: []string{"123"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Keep-Alive", + value: []string{"doop"}, + want: "Accept-Encoding,User-Agent", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header[tt.key] = tt.value + res, err := tr.RoundTrip(req) + var got string + if err != nil { + got = fmt.Sprintf("ERROR: %v", err) + } else { + got = res.Header.Get("Got-Header") + res.Body.Close() + } + if got != tt.want { + t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want) + } + } +} + +// golang.org/issue/14048 +func TestTransportFailsOnInvalidHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tests := [...]struct { + h http.Header + wantErr string + }{ + 0: { + h: http.Header{"with space": {"foo"}}, + wantErr: `invalid HTTP header name "with space"`, + }, + 1: { + h: http.Header{"name": {"Брэд"}}, + wantErr: "", // okay + }, + 2: { + h: http.Header{"имя": {"Brad"}}, + wantErr: `invalid HTTP header name "имя"`, + }, + 3: { + h: http.Header{"foo": {"foo\x01bar"}}, + wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`, + }, + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header = tt.h + res, err := tr.RoundTrip(req) + var bad bool + if tt.wantErr == "" { + if err != nil { + bad = true + t.Errorf("case %d: error = %v; want no error", i, err) + } + } else { + if !strings.Contains(fmt.Sprint(err), tt.wantErr) { + bad = true + t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr) + } + } + if err == nil { + if bad { + t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header")) + } + res.Body.Close() + } + } +} + +// Tests that gzipReader doesn't crash on a second Read call following +// the first Read call's gzip.NewReader returning an error. +func TestGzipReader_DoubleReadCrash(t *testing.T) { + gz := &gzipReader{ + body: ioutil.NopCloser(strings.NewReader("0123456789")), + } + var buf [1]byte + n, err1 := gz.Read(buf[:]) + if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { + t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) + } + n, err2 := gz.Read(buf[:]) + if n != 0 || err2 != err1 { + t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) + } +} + +func TestTransportNewTLSConfig(t *testing.T) { + tests := [...]struct { + conf *tls.Config + host string + want *tls.Config + }{ + // Normal case. + 0: { + conf: nil, + host: "foo.com", + want: &tls.Config{ + ServerName: "foo.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // User-provided name (bar.com) takes precedence: + 1: { + conf: &tls.Config{ + ServerName: "bar.com", + }, + host: "foo.com", + want: &tls.Config{ + ServerName: "bar.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // NextProto is prepended: + 2: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar"}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{NextProtoTLS, "foo", "bar"}, + }, + }, + + // NextProto is not duplicated: + 3: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + }, + } + for i, tt := range tests { + // Ignore the session ticket keys part, which ends up populating + // unexported fields in the Config: + if tt.conf != nil { + tt.conf.SessionTicketsDisabled = true + } + + tr := &Transport{TLSClientConfig: tt.conf} + got := tr.newTLSConfig(tt.host) + + got.SessionTicketsDisabled = false + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. got %#v; want %#v", i, got, tt.want) + } + } +} + +// The Google GFE responds to HEAD requests with a HEADERS frame +// without END_STREAM, followed by a 0-length DATA frame with +// END_STREAM. Make sure we don't get confused by that. (We did.) +func TestTransportReadHeadResponse(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != 123 { + return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, nil) + + <-clientDone + return nil + } + } + ct.run() +} + +func TestTransportReadHeadResponseWithBody(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + response := "redirecting to /elsewhere" + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != int64(len(response)) { + return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response)) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, []byte(response)) + + <-clientDone + return nil + } + } + ct.run() +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// golang.org/issue/15425: test that a handler closing the request +// body doesn't terminate the stream to the peer. (It just stops +// readability from the handler's side, and eventually the client +// runs out of flow control tokens) +func TestTransportHandlerBodyClose(t *testing.T) { + const bodySize = 10 << 20 + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + r.Body.Close() + io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + g0 := runtime.NumGoroutine() + + const numReq = 10 + for i := 0; i < numReq; i++ { + req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if n != bodySize || err != nil { + t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) + } + } + tr.CloseIdleConnections() + + gd := runtime.NumGoroutine() - g0 + if gd > numReq/2 { + t.Errorf("appeared to leak goroutines") + } + +} + +// https://golang.org/issue/15930 +func TestTransportFlowControl(t *testing.T) { + const bufLen = 64 << 10 + var total int64 = 100 << 20 // 100MB + if testing.Short() { + total = 10 << 20 + } + + var wrote int64 // updated atomically + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, bufLen) + for wrote < total { + n, err := w.Write(b) + atomic.AddInt64(&wrote, int64(n)) + if err != nil { + t.Errorf("ResponseWriter.Write error: %v", err) + break + } + w.(http.Flusher).Flush() + } + }, optOnlyServer) + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal("NewRequest error:", err) + } + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal("RoundTrip error:", err) + } + defer resp.Body.Close() + + var read int64 + b := make([]byte, bufLen) + for { + n, err := resp.Body.Read(b) + if err == io.EOF { + break + } + if err != nil { + t.Fatal("Read error:", err) + } + read += int64(n) + + const max = transportDefaultStreamFlow + if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max { + t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read) + } + + // Let the server get ahead of the client. + time.Sleep(1 * time.Millisecond) + } +} + +// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make +// the Transport remember it and return it back to users (via +// RoundTrip or request body reads) if needed (e.g. if the server +// proceeds to close the TCP connection before the client gets its +// response) +func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) { + testTransportUsesGoAwayDebugError(t, false) +} + +func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { + testTransportUsesGoAwayDebugError(t, true) +} + +func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const goAwayErrCode = ErrCodeHTTP11Required // arbitrary + const goAwayDebugData = "some debug data" + + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if failMidBody { + if err != nil { + return fmt.Errorf("unexpected client RoundTrip error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + } + want := GoAwayError{ + LastStreamID: 5, + ErrCode: goAwayErrCode, + DebugData: goAwayDebugData, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + if failMidBody { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + // Write two GOAWAY frames, to test that the Transport takes + // the interesting parts of both. + ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) + ct.fr.WriteGoAway(5, goAwayErrCode, nil) + ct.sc.(*net.TCPConn).CloseWrite() + <-clientDone + return nil + } + } + ct.run() +} + +func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { + ct := newClientTester(t) + + clientClosed := make(chan struct{}) + serverWroteFirstByte := make(chan struct{}) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + <-serverWroteFirstByte + + if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { + return fmt.Errorf("body read = %v, %v; want 1, nil", n, err) + } + res.Body.Close() // leaving 4999 bytes unread + close(clientClosed) + + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // Two cases: + // - Send one DATA frame with 5000 bytes. + // - Send two DATA frames with 1 and 4999 bytes each. + // + // In both cases, the client should consume one byte of data, + // refund that byte, then refund the following 4999 bytes. + // + // In the second case, the server waits for the client connection to + // close before seconding the second DATA frame. This tests the case + // where the client receives a DATA frame after it has reset the stream. + if oneDataFrame { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000)) + close(serverWroteFirstByte) + <-clientClosed + } else { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1)) + close(serverWroteFirstByte) + <-clientClosed + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999)) + } + + waitingFor := "RSTStreamFrame" + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err) + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + switch waitingFor { + case "RSTStreamFrame": + if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel { + return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) + } + waitingFor = "WindowUpdateFrame" + case "WindowUpdateFrame": + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 { + return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f)) + } + return nil + } + } + } + ct.run() +} + +// See golang.org/issue/16481 +func TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, true) +} + +// See golang.org/issue/20469 +func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, false) +} + +// Issue 16612: adjust flow control on open streams when transport +// receives SETTINGS with INITIAL_WINDOW_SIZE from server. +func TestTransportAdjustsFlowControl(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const bodySize = 1 << 20 + + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + res.Body.Close() + return nil + } + ct.server = func() error { + _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface))) + if err != nil { + return fmt.Errorf("reading client preface: %v", err) + } + + var gotBytes int64 + var sentSettings bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + return nil + default: + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + } + switch f := f.(type) { + case *DataFrame: + gotBytes += int64(len(f.Data())) + // After we've got half the client's + // initial flow control window's worth + // of request body data, give it just + // enough flow control to finish. + if gotBytes >= initialWindowSize/2 && !sentSettings { + sentSettings = true + + ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) + ct.fr.WriteWindowUpdate(0, bodySize) + ct.fr.WriteSettingsAck() + } + + if f.StreamEnded() { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + } + } + } + ct.run() +} + +// See golang.org/issue/16556 +func TestTransportReturnsDataPaddingFlowControl(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool, 1) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + defer res.Body.Close() + <-unblockClient + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + pad := make([]byte, 5) + ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream + + f, err := ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err) + } + wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 { + return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + + f, err = ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err) + } + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 { + return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + unblockClient <- true + return nil + } + ct.run() +} + +// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a +// StreamError as a result of the response HEADERS +func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) { + ct := newClientTester(t) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err == nil { + res.Body.Close() + return errors.New("unexpected successful GET") + } + want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} + if !reflect.DeepEqual(want, err) { + t.Errorf("RoundTrip error = %#v; want %#v", err, want) + } + return nil + } + ct.server = func() error { + ct.greet() + + hf, err := ct.firstHeaders() + if err != nil { + return err + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + for { + fr, err := ct.readFrame() + if err != nil { + return fmt.Errorf("error waiting for RST_STREAM from client: %v", err) + } + if _, ok := fr.(*SettingsFrame); ok { + continue + } + if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol { + t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) + } + break + } + + return nil + } + ct.run() +} + +// byteAndEOFReader returns is in an io.Reader which reads one byte +// (the underlying byte) and io.EOF at once in its Read call. +type byteAndEOFReader byte + +func (b byteAndEOFReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + panic("unexpected useless call") + } + p[0] = byte(b) + return 1, io.EOF +} + +// Issue 16788: the Transport had a regression where it started +// sending a spurious DATA frame with a duplicate END_STREAM bit after +// the request body writer goroutine had already read an EOF from the +// Request.Body and included the END_STREAM on a data-carrying DATA +// frame. +// +// Notably, to trigger this, the requests need to use a Request.Body +// which returns (non-0, io.EOF) and also needs to set the ContentLength +// explicitly. +func TestTransportBodyDoubleEndStream(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Nothing. + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i := 0; i < 2; i++ { + req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a')) + req.ContentLength = 1 + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatalf("failure on req %d: %v", i+1, err) + } + defer res.Body.Close() + } +} + +// golang.org/issue/16847, golang.org/issue/19103 +func TestTransportRequestPathPseudo(t *testing.T) { + type result struct { + path string + err string + } + tests := []struct { + req *http.Request + want result + }{ + 0: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "/foo", + }, + }, + want: result{path: "/foo"}, + }, + // In Go 1.7, we accepted paths of "//foo". + // In Go 1.8, we rejected it (issue 16847). + // In Go 1.9, we accepted it again (issue 19103). + 1: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "//foo", + }, + }, + want: result{path: "//foo"}, + }, + + // Opaque with //$Matching_Hostname/path + 2: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//foo.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque with some other Request.Host instead: + 3: { + req: &http.Request{ + Method: "GET", + Host: "bar.com", + URL: &url.URL{ + Scheme: "https", + Opaque: "//bar.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque without the leading "//": + 4: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Opaque: "/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque we can't handle: + 5: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//unknown_host/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`}, + }, + + // A CONNECT request: + 6: { + req: &http.Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "foo.com", + }, + }, + want: result{}, + }, + } + for i, tt := range tests { + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(tt.req, false, "", -1) + cc.mu.Unlock() + var got result + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { + if f.Name == ":path" { + got.path = f.Value + } + }) + if err != nil { + got.err = err.Error() + } else if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Errorf("%d. bogus hpack: %v", i, err) + continue + } + } + if got != tt.want { + t.Errorf("%d. got %+v; want %+v", i, got, tt.want) + } + + } + +} + +// golang.org/issue/17071 -- don't sniff the first byte of the request body +// before we've determined that the ClientConn is usable. +func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { + const body = "foo" + req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body))) + cc := &ClientConn{ + closed: true, + } + _, err := cc.RoundTrip(req) + if err != errClientConnUnusable { + t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err) + } + slurp, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ReadAll = %v", err) + } + if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func TestClientConnPing(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false) + if err != nil { + t.Fatal(err) + } + if err = cc.Ping(testContext{}); err != nil { + t.Fatal(err) + } +} + +// Issue 16974: if the server sent a DATA frame after the user +// canceled the Transport's Request, the Transport previously wrote to a +// closed pipe, got an error, and ended up closing the whole TCP +// connection. +func TestTransportCancelDataResponseRace(t *testing.T) { + cancel := make(chan struct{}) + clientGotError := make(chan bool, 1) + + const msg = "Hello." + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/hello") { + time.Sleep(50 * time.Millisecond) + io.WriteString(w, msg) + return + } + for i := 0; i < 50; i++ { + io.WriteString(w, "Some data.") + w.(http.Flusher).Flush() + if i == 2 { + close(cancel) + <-clientGotError + } + time.Sleep(10 * time.Millisecond) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Cancel = cancel + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, res.Body); err == nil { + t.Fatal("unexpected success") + } + clientGotError <- true + + res, err = c.Get(st.ts.URL + "/hello") + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != msg { + t.Errorf("Got = %q; want %q", slurp, msg) + } +} + +// Issue 21316: It should be safe to reuse an http.Request after the +// request has completed. +func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + io.WriteString(w, "body") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("GET", st.ts.URL, nil) + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + t.Fatalf("error reading response body: %v", err) + } + if err := resp.Body.Close(); err != nil { + t.Fatalf("error closing response body: %v", err) + } + + // This access of req.Header should not race with code in the transport. + req.Header = http.Header{} +} + +func TestTransportRetryAfterGOAWAY(t *testing.T) { + var dialer struct { + sync.Mutex + count int + } + ct1 := make(chan *clientTester) + ct2 := make(chan *clientTester) + + ln := newLocalListener(t) + defer ln.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer.Lock() + defer dialer.Unlock() + dialer.count++ + if dialer.count == 3 { + return nil, errors.New("unexpected number of dials") + } + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + return nil, fmt.Errorf("dial error: %v", err) + } + sc, err := ln.Accept() + if err != nil { + return nil, fmt.Errorf("accept error: %v", err) + } + ct := &clientTester{ + t: t, + tr: tr, + cc: cc, + sc: sc, + fr: NewFramer(sc, sc), + } + switch dialer.count { + case 1: + ct1 <- ct + case 2: + ct2 <- ct + } + return cc, nil + } + + errs := make(chan error, 3) + done := make(chan struct{}) + defer close(done) + + // Client. + go func() { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := tr.RoundTrip(req) + if res != nil { + res.Body.Close() + if got := res.Header.Get("Foo"); got != "bar" { + err = fmt.Errorf("foo header = %q; want bar", got) + } + } + if err != nil { + err = fmt.Errorf("RoundTrip: %v", err) + } + errs <- err + }() + + connToClose := make(chan io.Closer, 2) + + // Server for the first request. + go func() { + var ct *clientTester + select { + case ct = <-ct1: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server1 failed reading HEADERS: %v", err) + return + } + t.Logf("server1 got %v", hf) + if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil { + errs <- fmt.Errorf("server1 failed writing GOAWAY: %v", err) + return + } + errs <- nil + }() + + // Server for the second request. + go func() { + var ct *clientTester + select { + case ct = <-ct2: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server2 failed reading HEADERS: %v", err) + return + } + t.Logf("server2 got %v", hf) + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + err = ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + if err != nil { + errs <- fmt.Errorf("server2 failed writing response HEADERS: %v", err) + } else { + errs <- nil + } + }() + + for k := 0; k < 3; k++ { + select { + case err := <-errs: + if err != nil { + t.Error(err) + } + case <-time.After(1 * time.Second): + t.Errorf("timed out") + } + } + + for { + select { + case c := <-connToClose: + c.Close() + default: + return + } + } +} + +func TestTransportRetryAfterRefusedStream(t *testing.T) { + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + resp.Body.Close() + if resp.StatusCode != 204 { + return fmt.Errorf("Status = %v; want 204", resp.StatusCode) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + nreq := 0 + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + nreq++ + if nreq == 1 { + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + } else { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportRetryHasLimit(t *testing.T) { + // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s. + if testing.Short() { + t.Skip("skipping long test in short mode") + } + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + t.Logf("expected error, got: %v", err) + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportResponseDataBeforeHeaders(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + req := httptest.NewRequest("GET", "https://dummy.tld/", nil) + // First request is normal to ensure the check is per stream and not per connection. + _, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip expected no error, got: %v", err) + } + // Second request returns a DATA frame with no HEADERS. + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol { + return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + switch f.StreamID { + case 1: + // Send a valid response to first request. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + case 3: + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} +func TestTransportRequestsStallAtServerLimit(t *testing.T) { + const maxConcurrent = 2 + + greet := make(chan struct{}) // server sends initial SETTINGS frame + gotRequest := make(chan struct{}) // server received a request + clientDone := make(chan struct{}) + + // Collect errors from goroutines. + var wg sync.WaitGroup + errs := make(chan error, 100) + defer func() { + wg.Wait() + close(errs) + for err := range errs { + t.Error(err) + } + }() + + // We will send maxConcurrent+2 requests. This checker goroutine waits for the + // following stages: + // 1. The first maxConcurrent requests are received by the server. + // 2. The client will cancel the next request + // 3. The server is unblocked so it can service the first maxConcurrent requests + // 4. The client will send the final request + wg.Add(1) + unblockClient := make(chan struct{}) + clientRequestCancelled := make(chan struct{}) + unblockServer := make(chan struct{}) + go func() { + defer wg.Done() + // Stage 1. + for k := 0; k < maxConcurrent; k++ { + <-gotRequest + } + // Stage 2. + close(unblockClient) + <-clientRequestCancelled + // Stage 3: give some time for the final RoundTrip call to be scheduled and + // verify that the final request is not sent. + time.Sleep(50 * time.Millisecond) + select { + case <-gotRequest: + errs <- errors.New("last request did not stall") + close(unblockServer) + return + default: + } + close(unblockServer) + // Stage 4. + <-gotRequest + }() + + ct := newClientTester(t) + ct.client = func() error { + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(clientDone) + ct.cc.(*net.TCPConn).CloseWrite() + }() + for k := 0; k < maxConcurrent+2; k++ { + wg.Add(1) + go func(k int) { + defer wg.Done() + // Don't send the second request until after receiving SETTINGS from the server + // to avoid a race where we use the default SettingMaxConcurrentStreams, which + // is much larger than maxConcurrent. We have to send the first request before + // waiting because the first request triggers the dial and greet. + if k > 0 { + <-greet + } + // Block until maxConcurrent requests are sent before sending any more. + if k >= maxConcurrent { + <-unblockClient + } + req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil) + if k == maxConcurrent { + // This request will be canceled. + cancel := make(chan struct{}) + req.Cancel = cancel + close(cancel) + _, err := ct.tr.RoundTrip(req) + close(clientRequestCancelled) + if err == nil { + errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k) + return + } + } else { + resp, err := ct.tr.RoundTrip(req) + if err != nil { + errs <- fmt.Errorf("RoundTrip(%d): %v", k, err) + return + } + ioutil.ReadAll(resp.Body) + resp.Body.Close() + if resp.StatusCode != 204 { + errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode) + return + } + } + }(k) + } + return nil + } + + ct.server = func() error { + var wg sync.WaitGroup + defer wg.Wait() + + ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) + + // Server write loop. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + writeResp := make(chan uint32, maxConcurrent+1) + + wg.Add(1) + go func() { + defer wg.Done() + <-unblockServer + for id := range writeResp { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: id, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + }() + + // Server read loop. + var nreq int + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it will have reported any errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame: + case *SettingsFrame: + // Wait for the client SETTINGS ack until ending the greet. + close(greet) + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + gotRequest <- struct{}{} + nreq++ + writeResp <- f.StreamID + if nreq == maxConcurrent+1 { + close(writeResp) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + + ct.run() +} + +func TestAuthorityAddr(t *testing.T) { + tests := []struct { + scheme, authority string + want string + }{ + {"http", "foo.com", "foo.com:80"}, + {"https", "foo.com", "foo.com:443"}, + {"https", "foo.com:1234", "foo.com:1234"}, + {"https", "1.2.3.4:1234", "1.2.3.4:1234"}, + {"https", "1.2.3.4", "1.2.3.4:443"}, + {"https", "[::1]:1234", "[::1]:1234"}, + {"https", "[::1]", "[::1]:443"}, + } + for _, tt := range tests { + got := authorityAddr(tt.scheme, tt.authority) + if got != tt.want { + t.Errorf("authorityAddr(%q, %q) = %q; want %q", tt.scheme, tt.authority, got, tt.want) + } + } +} + +// Issue 20448: stop allocating for DATA frames' payload after +// Response.Body.Close is called. +func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { + megabyteZero := make([]byte, 1<<20) + + writeErr := make(chan error, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + var sum int64 + for i := 0; i < 100; i++ { + n, err := w.Write(megabyteZero) + sum += int64(n) + if err != nil { + writeErr <- err + return + } + } + t.Logf("wrote all %d bytes", sum) + writeErr <- nil + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + var buf [1]byte + if _, err := res.Body.Read(buf[:]); err != nil { + t.Error(err) + } + if err := res.Body.Close(); err != nil { + t.Error(err) + } + + trb, ok := res.Body.(transportResponseBody) + if !ok { + t.Fatalf("res.Body = %T; want transportResponseBody", res.Body) + } + if trb.cs.bufPipe.b != nil { + t.Errorf("response body pipe is still open") + } + + gotErr := <-writeErr + if gotErr == nil { + t.Errorf("Handler unexpectedly managed to write its entire response without getting an error") + } else if gotErr != errStreamClosed { + t.Errorf("Handler Write err = %v; want errStreamClosed", gotErr) + } +} + +// Issue 18891: make sure Request.Body == NoBody means no DATA frame +// is ever sent, even if empty. +func TestTransportNoBodyMeansNoDATA(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", go18httpNoBody()) + ct.tr.RoundTrip(req) + <-unblockClient + return nil + } + ct.server = func() error { + defer close(unblockClient) + defer ct.cc.(*net.TCPConn).Close() + ct.greet() + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f := f.(type) { + default: + return fmt.Errorf("Got %T; want HeadersFrame", f) + case *WindowUpdateFrame, *SettingsFrame: + continue + case *HeadersFrame: + if !f.StreamEnded() { + return fmt.Errorf("got headers frame without END_STREAM") + } + return nil + } + } + } + ct.run() +} + +func benchSimpleRoundTrip(b *testing.B, nHeaders int) { + defer disableGoroutineTracking()() + b.ReportAllocs() + st := newServerTester(b, + func(w http.ResponseWriter, r *http.Request) { + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < nHeaders; i++ { + name := fmt.Sprint("A-", i) + req.Header.Set(name, "*") + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + res, err := tr.RoundTrip(req) + if err != nil { + if res != nil { + res.Body.Close() + } + b.Fatalf("RoundTrip err = %v; want nil", err) + } + res.Body.Close() + if res.StatusCode != http.StatusOK { + b.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } + } +} + +type infiniteReader struct{} + +func (r infiniteReader) Read(b []byte) (int, error) { + return len(b), nil +} + +// Issue 20521: it is not an error to receive a response and end stream +// from the server without the body being consumed. +func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // The request body needs to be big enough to trigger flow control. + req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{}) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != http.StatusOK { + t.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } +} + +// Verify transport doesn't crash when receiving bogus response lacking a :status header. +// Issue 22880. +func TestTransportHandlesInvalidStatuslessResponse(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + _, err := ct.tr.RoundTrip(req) + const substr = "malformed response from server: missing status pseudo header" + if !strings.Contains(fmt.Sprint(err), substr) { + return fmt.Errorf("RoundTrip error = %v; want substring %q", err, substr) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: "content-type", Value: "text/html"}) // no :status header + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, // we'll send some DATA to try to crash the transport + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + return nil + } + } + } + ct.run() +} + +func BenchmarkClientRequestHeaders(b *testing.B) { + b.Run(" 0 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 0) }) + b.Run(" 10 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 10) }) + b.Run(" 100 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 100) }) + b.Run("1000 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 1000) }) +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 0000000..54ab4a8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 0000000..4fe3073 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 0000000..848fed6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority_test.go b/vendor/golang.org/x/net/http2/writesched_priority_test.go new file mode 100644 index 0000000..f2b535a --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_test.go @@ -0,0 +1,541 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "sort" + "testing" +) + +func defaultPriorityWriteScheduler() *priorityWriteScheduler { + return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler) +} + +func checkPriorityWellFormed(ws *priorityWriteScheduler) error { + for id, n := range ws.nodes { + if id != n.id { + return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id) + } + if n.parent == nil { + if n.next != nil || n.prev != nil { + return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id) + } + continue + } + found := false + for k := n.parent.kids; k != nil; k = k.next { + if k.id == id { + found = true + break + } + } + if !found { + return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id) + } + } + return nil +} + +func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string { + var ids []int + for _, n := range ws.nodes { + ids = append(ids, int(n.id)) + } + sort.Ints(ids) + + var buf bytes.Buffer + for _, id := range ids { + if buf.Len() != 0 { + buf.WriteString(" ") + } + if id == 0 { + buf.WriteString(fmtNode(&ws.root)) + } else { + buf.WriteString(fmtNode(ws.nodes[uint32(id)])) + } + } + return buf.String() +} + +func fmtNodeParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{parent:nil}", n.id) + default: + return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id) + } +} + +func fmtNodeWeightParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight) + default: + return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id) + } +} + +func TestPriorityTwoStreams(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + // Move 1's parent to 2. + ws.AdjustStream(1, PriorityParam{ + StreamDep: 2, + Weight: 32, + Exclusive: false, + }) + want = "1{weight:32,parent:2} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustExclusiveZero(t *testing.T) { + // 1, 2, and 3 are all children of the 0 stream. + // Exclusive reprioritization to any of the streams should bring + // the rest of the streams under the reprioritized stream. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.OpenStream(3, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + ws.AdjustStream(2, PriorityParam{ + StreamDep: 0, + Weight: 20, + Exclusive: true, + }) + want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustOwnParent(t *testing.T) { + // Assigning a node as its own parent should have no effect. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.AdjustStream(2, PriorityParam{ + StreamDep: 2, + Weight: 20, + Exclusive: true, + }) + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + // Close the first three streams. We lose 1, but keep 2 and 3. + ws.CloseStream(1) + ws.CloseStream(2) + ws.CloseStream(3) + + want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } + + // Adding a stream as an exclusive child of 1 gives it default + // priorities, since 1 is gone. + ws.OpenStream(5, OpenStreamOptions{}) + ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true}) + + // Adding a stream as an exclusive child of 2 should work, since 2 is not gone. + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true}) + + want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After add streams\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + + // Close the first two streams. We keep only 3. + ws.CloseStream(1) + ws.CloseStream(2) + + want := "3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + ws.OpenStream(5, OpenStreamOptions{}) + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15}) + ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15}) + ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15}) + + want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + + want := "4{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func makeSection533Tree() *priorityWriteScheduler { + // Initial tree from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + return ws +} + +func TestPrioritySection533NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection533Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func checkPopAll(ws WriteScheduler, order []uint32) error { + for k, id := range order { + wr, ok := ws.Pop() + if !ok { + return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order) + } + if got := wr.StreamID(); got != id { + return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order) + } + } + wr, ok := ws.Pop() + if ok { + return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order) + } + return nil +} + +func TestPriorityPopFrom533Tree(t *testing.T) { + ws := makeSection533Tree() + + ws.Push(makeWriteHeadersRequest(3 /*C*/)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteHeadersRequest(5 /*E*/)) + ws.Push(makeWriteHeadersRequest(1 /*A*/)) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil { + t.Error(err) + } +} + +func TestPriorityPopFromLinearTree(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil { + t.Error(err) + } +} + +func TestPriorityFlowControl(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 16} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // No flow-control bytes available. + if wr, ok := ws.Pop(); ok { + t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr) + } + + // Add enough flow-control bytes to write st2 in two Pop calls. + // Should write data from st2 even though it's lower priority than st1. + for i := 1; i <= 2; i++ { + st2.flow.add(8) + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(%d)=false, want true", i) + } + if got, want := wr.DataSize(), 8; got != want { + t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want) + } + } +} + +func TestPriorityThrottleOutOfOrderWrites(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 4096} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(4096) + st2.flow.add(4096) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // We have enough flow-control bytes to write st2 in a single Pop call. + // However, due to out-of-order write throttling, the first call should + // only write 1KB. + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(st2.first)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want) + } + + // Now add data on st1. This should take precedence. + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil}) + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st1)=false, want true") + } + if got, want := wr.StreamID(), uint32(1); got != want { + t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 4096; got != want { + t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want) + } + + // Should go back to writing 1KB from st2. + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st2.last)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want) + } +} + +func TestPriorityWeights(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + sc := &serverConn{maxFrameSize: 8} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(40) + st2.flow.add(40) + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil}) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34}) + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9}) + + // st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)). + // The maximum frame size is 8 bytes. The write sequence should be: + // st1, total bytes so far is (st1=8, st=0) + // st2, total bytes so far is (st1=8, st=8) + // st1, total bytes so far is (st1=16, st=8) + // st1, total bytes so far is (st1=24, st=8) // 3x bandwidth + // st1, total bytes so far is (st1=32, st=8) // 4x bandwidth + // st2, total bytes so far is (st1=32, st=16) // 2x bandwidth + // st1, total bytes so far is (st1=40, st=16) + // st2, total bytes so far is (st1=40, st=24) + // st2, total bytes so far is (st1=40, st=32) + // st2, total bytes so far is (st1=40, st=40) + if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil { + t.Error(err) + } +} + +func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 0, + MaxIdleNodesInTree: 0, + }) + ws.OpenStream(1, OpenStreamOptions{}) + ws.CloseStream(1) + ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)}) + ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)}) + + if err := checkPopAll(ws, []uint32{1, 2}); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 0000000..36d7919 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_random_test.go b/vendor/golang.org/x/net/http2/writesched_random_test.go new file mode 100644 index 0000000..3bf4aa3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random_test.go @@ -0,0 +1,44 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "testing" + +func TestRandomScheduler(t *testing.T) { + ws := NewRandomWriteScheduler() + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + + // Pop all frames. Should get the non-stream requests first, + // followed by the stream requests in any order. + var order []FrameWriteRequest + for { + wr, ok := ws.Pop() + if !ok { + break + } + order = append(order, wr) + } + t.Logf("got frames: %v", order) + if len(order) != 6 { + t.Fatalf("got %d frames, expected 6", len(order)) + } + if order[0].StreamID() != 0 || order[1].StreamID() != 0 { + t.Fatal("expected non-stream frames first", order[0], order[1]) + } + got := make(map[uint32]bool) + for _, wr := range order[2:] { + got[wr.StreamID()] = true + } + for id := uint32(1); id <= 4; id++ { + if !got[id] { + t.Errorf("frame not found for stream %d", id) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_test.go b/vendor/golang.org/x/net/http2/writesched_test.go new file mode 100644 index 0000000..0807056 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "reflect" + "testing" +) + +func makeWriteNonStreamRequest() FrameWriteRequest { + return FrameWriteRequest{writeSettingsAck{}, nil, nil} +} + +func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest { + st := &stream{id: streamID} + return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil} +} + +func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error { + consumed, rest, n := wr.Consume(nbytes) + var wantConsumed, wantRest FrameWriteRequest + switch len(want) { + case 0: + case 1: + wantConsumed = want[0] + case 2: + wantConsumed = want[0] + wantRest = want[1] + } + if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) { + return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want)) + } + return nil +} + +func TestFrameWriteRequestNonData(t *testing.T) { + wr := makeWriteNonStreamRequest() + if got, want := wr.DataSize(), 0; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // Non-DATA frames are always consumed whole. + if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil { + t.Errorf("Consume:\n%v", err) + } +} + +func TestFrameWriteRequestData(t *testing.T) { + st := &stream{ + id: 1, + sc: &serverConn{maxFrameSize: 16}, + } + const size = 32 + wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)} + if got, want := wr.DataSize(), size; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // No flow-control bytes available: cannot consume anything. + if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil { + t.Errorf("Consume(limited by flow control):\n%v", err) + } + + // Add enough flow-control bytes to consume the entire frame, + // but we're now restricted by st.sc.maxFrameSize. + st.flow.add(size) + want := []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(wr, math.MaxInt32, want); err != nil { + t.Errorf("Consume(limited by maxFrameSize):\n%v", err) + } + rest := want[1] + + // Consume 8 bytes from the remaining frame. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, 8), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, 8, want); err != nil { + t.Errorf("Consume(8):\n%v", err) + } + rest = want[1] + + // Consume all remaining bytes. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, math.MaxInt32, want); err != nil { + t.Errorf("Consume(remainder):\n%v", err) + } +} + +func TestFrameWriteRequest_StreamID(t *testing.T) { + const streamID = 123 + wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)} + if got := wr.StreamID(); got != streamID { + t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID) + } +} diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go new file mode 100644 index 0000000..610b2cd --- /dev/null +++ b/vendor/golang.org/x/net/http2/z_spec_test.go @@ -0,0 +1,356 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/xml" + "flag" + "fmt" + "io" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" +) + +var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") + +// The global map of sentence coverage for the http2 spec. +var defaultSpecCoverage specCoverage + +var loadSpecOnce sync.Once + +func loadSpec() { + if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { + panic(err) + } else { + defaultSpecCoverage = readSpecCov(f) + f.Close() + } +} + +// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not +// "covered" will be included in report outputted by TestSpecCoverage. +func covers(sec, sentences string) { + loadSpecOnce.Do(loadSpec) + defaultSpecCoverage.cover(sec, sentences) +} + +type specPart struct { + section string + sentence string +} + +func (ss specPart) Less(oo specPart) bool { + atoi := func(s string) int { + n, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return n + } + a := strings.Split(ss.section, ".") + b := strings.Split(oo.section, ".") + for len(a) > 0 { + if len(b) == 0 { + return false + } + x, y := atoi(a[0]), atoi(b[0]) + if x == y { + a, b = a[1:], b[1:] + continue + } + return x < y + } + if len(b) > 0 { + return true + } + return false +} + +type bySpecSection []specPart + +func (a bySpecSection) Len() int { return len(a) } +func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } +func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type specCoverage struct { + coverage map[specPart]bool + d *xml.Decoder +} + +func joinSection(sec []int) string { + s := fmt.Sprintf("%d", sec[0]) + for _, n := range sec[1:] { + s = fmt.Sprintf("%s.%d", s, n) + } + return s +} + +func (sc specCoverage) readSection(sec []int) { + var ( + buf = new(bytes.Buffer) + sub = 0 + ) + for { + tk, err := sc.d.Token() + if err != nil { + if err == io.EOF { + return + } + panic(err) + } + switch v := tk.(type) { + case xml.StartElement: + if skipElement(v) { + if err := sc.d.Skip(); err != nil { + panic(err) + } + if v.Name.Local == "section" { + sub++ + } + break + } + switch v.Name.Local { + case "section": + sub++ + sc.readSection(append(sec, sub)) + case "xref": + buf.Write(sc.readXRef(v)) + } + case xml.CharData: + if len(sec) == 0 { + break + } + buf.Write(v) + case xml.EndElement: + if v.Name.Local == "section" { + sc.addSentences(joinSection(sec), buf.String()) + return + } + } + } +} + +func (sc specCoverage) readXRef(se xml.StartElement) []byte { + var b []byte + for { + tk, err := sc.d.Token() + if err != nil { + panic(err) + } + switch v := tk.(type) { + case xml.CharData: + if b != nil { + panic("unexpected CharData") + } + b = []byte(string(v)) + case xml.EndElement: + if v.Name.Local != "xref" { + panic("expected ") + } + if b != nil { + return b + } + sig := attrSig(se) + switch sig { + case "target": + return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) + case "fmt-of,rel,target", "fmt-,,rel,target": + return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) + case "fmt-of,sec,target", "fmt-,,sec,target": + return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) + case "fmt-of,rel,sec,target": + return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) + default: + panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) + } + default: + panic(fmt.Sprintf("unexpected tag %q", v)) + } + } +} + +var skipAnchor = map[string]bool{ + "intro": true, + "Overview": true, +} + +var skipTitle = map[string]bool{ + "Acknowledgements": true, + "Change Log": true, + "Document Organization": true, + "Conventions and Terminology": true, +} + +func skipElement(s xml.StartElement) bool { + switch s.Name.Local { + case "artwork": + return true + case "section": + for _, attr := range s.Attr { + switch attr.Name.Local { + case "anchor": + if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { + return true + } + case "title": + if skipTitle[attr.Value] { + return true + } + } + } + } + return false +} + +func readSpecCov(r io.Reader) specCoverage { + sc := specCoverage{ + coverage: map[specPart]bool{}, + d: xml.NewDecoder(r)} + sc.readSection(nil) + return sc +} + +func (sc specCoverage) addSentences(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + sc.coverage[specPart{sec, s}] = false + } +} + +func (sc specCoverage) cover(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + p := specPart{sec, s} + if _, ok := sc.coverage[p]; !ok { + panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) + } + sc.coverage[specPart{sec, s}] = true + } + +} + +var whitespaceRx = regexp.MustCompile(`\s+`) + +func parseSentences(sens string) []string { + sens = strings.TrimSpace(sens) + if sens == "" { + return nil + } + ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") + for i, s := range ss { + s = strings.TrimSpace(s) + if !strings.HasSuffix(s, ".") { + s += "." + } + ss[i] = s + } + return ss +} + +func TestSpecParseSentences(t *testing.T) { + tests := []struct { + ss string + want []string + }{ + {"Sentence 1. Sentence 2.", + []string{ + "Sentence 1.", + "Sentence 2.", + }}, + {"Sentence 1. \nSentence 2.\tSentence 3.", + []string{ + "Sentence 1.", + "Sentence 2.", + "Sentence 3.", + }}, + } + + for i, tt := range tests { + got := parseSentences(tt.ss) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d: got = %q, want %q", i, got, tt.want) + } + } +} + +func TestSpecCoverage(t *testing.T) { + if !*coverSpec { + t.Skip() + } + + loadSpecOnce.Do(loadSpec) + + var ( + list []specPart + cv = defaultSpecCoverage.coverage + total = len(cv) + complete = 0 + ) + + for sp, touched := range defaultSpecCoverage.coverage { + if touched { + complete++ + } else { + list = append(list, sp) + } + } + sort.Stable(bySpecSection(list)) + + if testing.Short() && len(list) > 5 { + list = list[:5] + } + + for _, p := range list { + t.Errorf("\tSECTION %s: %s", p.section, p.sentence) + } + + t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100) +} + +func attrSig(se xml.StartElement) string { + var names []string + for _, attr := range se.Attr { + if attr.Name.Local == "fmt" { + names = append(names, "fmt-"+attr.Value) + } else { + names = append(names, attr.Name.Local) + } + } + sort.Strings(names) + return strings.Join(names, ",") +} + +func attrValue(se xml.StartElement, attr string) string { + for _, a := range se.Attr { + if a.Name.Local == attr { + return a.Value + } + } + panic("unknown attribute " + attr) +} + +func TestSpecPartLess(t *testing.T) { + tests := []struct { + sec1, sec2 string + want bool + }{ + {"6.2.1", "6.2", false}, + {"6.2", "6.2.1", true}, + {"6.10", "6.10.1", true}, + {"6.10", "6.1.1", false}, // 10, not 1 + {"6.1", "6.1", false}, // equal, so not less + } + for _, tt := range tests { + got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) + if got != tt.want { + t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go new file mode 100644 index 0000000..75db991 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -0,0 +1,41 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A DstUnreach represents an ICMP destination unreachable message +// body. +type DstUnreach struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *DstUnreach) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DstUnreach) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseDstUnreach parses b as an ICMP destination unreachable message +// body. +func parseDstUnreach(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &DstUnreach{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go new file mode 100644 index 0000000..e6f15ef --- /dev/null +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Echo represents an ICMP echo request or reply message body. +type Echo struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *Echo) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *Echo) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) + copy(b[4:], p.Data) + return b, nil +} + +// parseEcho parses b as an ICMP echo request or reply message body. +func parseEcho(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go new file mode 100644 index 0000000..a68bfb0 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/endpoint.go @@ -0,0 +1,113 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "runtime" + "syscall" + "time" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var _ net.PacketConn = &PacketConn{} + +// A PacketConn represents a packet network endpoint that uses either +// ICMPv4 or ICMPv6. +type PacketConn struct { + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn +} + +func (c *PacketConn) ok() bool { return c != nil && c.c != nil } + +// IPv4PacketConn returns the ipv4.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv4. +func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { + if !c.ok() { + return nil + } + return c.p4 +} + +// IPv6PacketConn returns the ipv6.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv6. +func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { + if !c.ok() { + return nil + } + return c.p6 +} + +// ReadFrom reads an ICMP message from the connection. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + // Please be informed that ipv4.NewPacketConn enables + // IP_STRIPHDR option by default on Darwin. + // See golang.org/issue/9395 for further information. + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err + } + return c.c.ReadFrom(b) +} + +// WriteTo writes the ICMP message b to dst. +// Dst must be net.UDPAddr when c is a non-privileged +// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr. +func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.c.WriteTo(b, dst) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.Close() +} + +// LocalAddr returns the local network address. +func (c *PacketConn) LocalAddr() net.Addr { + if !c.ok() { + return nil + } + return c.c.LocalAddr() +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetWriteDeadline(t) +} diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go new file mode 100644 index 0000000..1df4cec --- /dev/null +++ b/vendor/golang.org/x/net/icmp/example_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "log" + "net" + "os" + "runtime" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") + if err != nil { + log.Fatal(err) + } + defer c.Close() + + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1, + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil { + log.Fatal(err) + } + + rb := make([]byte, 1500) + n, peer, err := c.ReadFrom(rb) + if err != nil { + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + switch rm.Type { + case ipv6.ICMPTypeEchoReply: + log.Printf("got reflection from %v", peer) + default: + log.Printf("got %+v; want echo reply", rm) + } +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go new file mode 100644 index 0000000..402a751 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Extension represents an ICMP extension. +type Extension interface { + // Len returns the length of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +const extensionVersion = 2 + +func validExtensionHeader(b []byte) bool { + v := int(b[0]&0xf0) >> 4 + s := binary.BigEndian.Uint16(b[2:4]) + if s != 0 { + s = checksum(b) + } + if v != extensionVersion || s != 0 { + return false + } + return true +} + +// parseExtensions parses b as a list of ICMP extensions. +// The length attribute l must be the length attribute field in +// received icmp messages. +// +// It will return a list of ICMP extensions and an adjusted length +// attribute that represents the length of the padded original +// datagram field. Otherwise, it returns an error. +func parseExtensions(b []byte, l int) ([]Extension, int, error) { + // Still a lot of non-RFC 4884 compliant implementations are + // out there. Set the length attribute l to 128 when it looks + // inappropriate for backwards compatibility. + // + // A minimal extension at least requires 8 octets; 4 octets + // for an extension header, and 4 octets for a single object + // header. + // + // See RFC 4884 for further information. + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { + return nil, -1, errNoExtension + } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } + var exts []Extension + for b = b[l+4:]; len(b) >= 4; { + ol := int(binary.BigEndian.Uint16(b[:2])) + if 4 > ol || ol > len(b) { + break + } + switch b[2] { + case classMPLSLabelStack: + ext, err := parseMPLSLabelStack(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceInfo: + ext, err := parseInterfaceInfo(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + } + b = b[ol:] + } + return exts, l, nil +} diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go new file mode 100644 index 0000000..0b3f7b9 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension_test.go @@ -0,0 +1,259 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/internal/iana" +) + +var marshalAndParseExtensionTests = []struct { + proto int + hdr []byte + obj []byte + exts []Extension +}{ + // MPLS label stack with no label + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x01, 0x01, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + }, + }, + }, + // MPLS label stack with a single label + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x08, 0x01, 0x01, + 0x03, 0xe8, 0xe9, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // MPLS label stack with multiple labels + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x01, 0x01, + 0x03, 0xe8, 0xde, 0xfe, + 0x03, 0xe8, 0xe1, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16013, + TC: 0x7, + S: false, + TTL: 254, + }, + { + Label: 16014, + TC: 0, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // Interface information with no attribute + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x02, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + }, + }, + }, + // Interface information with ifIndex and name + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x02, 0x0a, + 0x00, 0x00, 0x00, 0x10, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0a, + Interface: &net.Interface{ + Index: 16, + Name: "en101", + }, + }, + }, + }, + // Interface information with ifIndex, IPAddr, name and MTU + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x28, 0x02, 0x0f, + 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x02, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, +} + +func TestMarshalAndParseExtension(t *testing.T) { + for i, tt := range marshalAndParseExtensionTests { + for j, ext := range tt.exts { + var err error + var b []byte + switch ext := ext.(type) { + case *MPLSLabelStack: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + case *InterfaceInfo: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + } + if !reflect.DeepEqual(b, tt.obj) { + t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj) + continue + } + } + + for j, wire := range []struct { + data []byte // original datagram + inlattr int // length of padded original datagram, a hint + outlattr int // length of padded original datagram, a want + err error + }{ + {nil, 0, -1, errNoExtension}, + {make([]byte, 127), 128, -1, errNoExtension}, + + {make([]byte, 128), 127, -1, errNoExtension}, + {make([]byte, 128), 128, -1, errNoExtension}, + {make([]byte, 128), 129, -1, errNoExtension}, + + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil}, + + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension}, + } { + exts, l, err := parseExtensions(wire.data, wire.inlattr) + if err != wire.err { + t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err) + continue + } + if wire.err != nil { + continue + } + if l != wire.outlattr { + t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr) + } + if !reflect.DeepEqual(exts, tt.exts) { + for j, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + want := tt.exts[j].(*MPLSLabelStack) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + case *InterfaceInfo: + want := tt.exts[j].(*InterfaceInfo) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + } + } + continue + } + } + } +} + +var parseInterfaceNameTests = []struct { + b []byte + error +}{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, +} + +func TestParseInterfaceName(t *testing.T) { + ifi := InterfaceInfo{Interface: &net.Interface{}} + for i, tt := range parseInterfaceNameTests { + if _, err := ifi.parseName(tt.b); err != tt.error { + t.Errorf("#%d: got %v; want %v", i, err, tt.error) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go new file mode 100644 index 0000000..398fd38 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "strconv" + "syscall" +) + +func sockaddr(family int, address string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + a, err := net.ResolveIPAddr("ip4", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv4zero + } + if a.IP = a.IP.To4(); a.IP == nil { + return nil, net.InvalidAddrError("non-ipv4 address") + } + sa := &syscall.SockaddrInet4{} + copy(sa.Addr[:], a.IP) + return sa, nil + case syscall.AF_INET6: + a, err := net.ResolveIPAddr("ip6", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv6unspecified + } + if a.IP.Equal(net.IPv4zero) { + a.IP = net.IPv6unspecified + } + if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { + return nil, net.InvalidAddrError("non-ipv6 address") + } + sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} + copy(sa.Addr[:], a.IP) + return sa, nil + default: + return nil, net.InvalidAddrError("unexpected family") + } +} + +func zoneToUint32(zone string) uint32 { + if zone == "" { + return 0 + } + if ifi, err := net.InterfaceByName(zone); err == nil { + return uint32(ifi.Index) + } + n, err := strconv.Atoi(zone) + if err != nil { + return 0 + } + return uint32(n) +} + +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go new file mode 100644 index 0000000..78b5b98 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -0,0 +1,236 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "strings" + + "golang.org/x/net/internal/iana" +) + +const ( + classInterfaceInfo = 2 + + afiIPv4 = 1 + afiIPv6 = 2 +) + +const ( + attrMTU = 1 << iota + attrName + attrIPAddr + attrIfIndex +) + +// An InterfaceInfo represents interface and next-hop identification. +type InterfaceInfo struct { + Class int // extension object class number + Type int // extension object sub-type + Interface *net.Interface + Addr *net.IPAddr +} + +func (ifi *InterfaceInfo) nameLen() int { + if len(ifi.Interface.Name) > 63 { + return 64 + } + l := 1 + len(ifi.Interface.Name) + return (l + 3) &^ 3 +} + +func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { + l = 4 + if ifi.Interface != nil && ifi.Interface.Index > 0 { + attrs |= attrIfIndex + l += 4 + if len(ifi.Interface.Name) > 0 { + attrs |= attrName + l += ifi.nameLen() + } + if ifi.Interface.MTU > 0 { + attrs |= attrMTU + l += 4 + } + } + if ifi.Addr != nil { + switch proto { + case iana.ProtocolICMP: + if ifi.Addr.IP.To4() != nil { + attrs |= attrIPAddr + l += 4 + net.IPv4len + } + case iana.ProtocolIPv6ICMP: + if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + attrs |= attrIPAddr + l += 4 + net.IPv6len + } + } + } + return +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceInfo) Len(proto int) int { + _, l := ifi.attrsAndLen(proto) + return l +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { + attrs, l := ifi.attrsAndLen(proto) + b := make([]byte, l) + if err := ifi.marshal(proto, b, attrs, l); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceInfo, byte(ifi.Type) + for b = b[4:]; len(b) > 0 && attrs != 0; { + switch { + case attrs&attrIfIndex != 0: + b = ifi.marshalIfIndex(proto, b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b = ifi.marshalIPAddr(proto, b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b = ifi.marshalName(proto, b) + attrs &^= attrName + case attrs&attrMTU != 0: + b = ifi.marshalMTU(proto, b) + attrs &^= attrMTU + } + } + return nil +} + +func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { + switch proto { + case iana.ProtocolICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) + copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) + b = b[4+net.IPv4len:] + case iana.ProtocolIPv6ICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) + copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) + b = b[4+net.IPv6len:] + } + return b +} + +func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + afi := int(binary.BigEndian.Uint16(b[:2])) + b = b[4:] + switch afi { + case afiIPv4: + if len(b) < net.IPv4len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv4len) + copy(ifi.Addr.IP, b[:net.IPv4len]) + b = b[net.IPv4len:] + case afiIPv6: + if len(b) < net.IPv6len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv6len) + copy(ifi.Addr.IP, b[:net.IPv6len]) + b = b[net.IPv6len:] + } + return b, nil +} + +func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { + l := byte(ifi.nameLen()) + b[0] = l + copy(b[1:], []byte(ifi.Interface.Name)) + return b[l:] +} + +func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { + if 4 > len(b) || len(b) < int(b[0]) { + return nil, errMessageTooShort + } + l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } + var name [63]byte + copy(name[:], b[1:l]) + ifi.Interface.Name = strings.Trim(string(name[:]), "\000") + return b[l:], nil +} + +func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func parseInterfaceInfo(b []byte) (Extension, error) { + ifi := &InterfaceInfo{ + Class: int(b[2]), + Type: int(b[3]), + } + if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { + ifi.Interface = &net.Interface{} + } + if ifi.Type&attrIPAddr != 0 { + ifi.Addr = &net.IPAddr{} + } + attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) + for b = b[4:]; len(b) > 0 && attrs != 0; { + var err error + switch { + case attrs&attrIfIndex != 0: + b, err = ifi.parseIfIndex(b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b, err = ifi.parseIPAddr(b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b, err = ifi.parseName(b) + attrs &^= attrName + case attrs&attrMTU != 0: + b, err = ifi.parseMTU(b) + attrs &^= attrMTU + } + if err != nil { + return nil, err + } + } + if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + ifi.Addr.Zone = ifi.Interface.Name + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go new file mode 100644 index 0000000..ffc66ed --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "runtime" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +// freebsdVersion is set in sys_freebsd.go. +// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. +var freebsdVersion uint32 + +// ParseIPv4Header parses b as an IPv4 header of ICMP error message +// invoking packet, which is contained in ICMP error message. +func ParseIPv4Header(b []byte) (*ipv4.Header, error) { + if len(b) < ipv4.HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &ipv4.Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + case "freebsd": + if freebsdVersion >= 1000000 { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } else { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } + h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-ipv4.HeaderLen > 0 { + h.Options = make([]byte, hdrlen-ipv4.HeaderLen) + copy(h.Options, b[ipv4.HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go new file mode 100644 index 0000000..058953f --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -0,0 +1,83 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +type ipv4HeaderTest struct { + wireHeaderFromKernel [ipv4.HeaderLen]byte + wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte + Header *ipv4.Header +} + +var ipv4HeaderLittleEndianTest = ipv4HeaderTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + wireHeaderFromKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: ipv4.DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, +} + +func TestParseIPv4Header(t *testing.T) { + tt := &ipv4HeaderLittleEndianTest + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + var wh []byte + switch runtime.GOOS { + case "darwin": + wh = tt.wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion >= 1000000 { + wh = tt.wireHeaderFromKernel[:] + } else { + wh = tt.wireHeaderFromTradBSDKernel[:] + } + default: + wh = tt.wireHeaderFromKernel[:] + } + h, err := ParseIPv4Header(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } +} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go new file mode 100644 index 0000000..2e8cfeb --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv6.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + + "golang.org/x/net/internal/iana" +) + +const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 + +// IPv6PseudoHeader returns an IPv6 pseudo header for checksum +// calculation. +func IPv6PseudoHeader(src, dst net.IP) []byte { + b := make([]byte, ipv6PseudoHeaderLen) + copy(b, src.To16()) + copy(b[net.IPv6len:], dst.To16()) + b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) + return b +} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go new file mode 100644 index 0000000..7fac4f9 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -0,0 +1,100 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "os" + "runtime" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + var family, proto int + switch network { + case "udp4": + family, proto = syscall.AF_INET, iana.ProtocolICMP + case "udp6": + family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP + default: + i := last(network, ':') + switch network[:i] { + case "ip4": + proto = iana.ProtocolICMP + case "ip6": + proto = iana.ProtocolIPv6ICMP + } + } + var cerr error + var c net.PacketConn + switch family { + case syscall.AF_INET, syscall.AF_INET6: + s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + if runtime.GOOS == "darwin" && family == syscall.AF_INET { + if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("setsockopt", err) + } + } + sa, err := sockaddr(family, address) + if err != nil { + syscall.Close(s) + return nil, err + } + if err := syscall.Bind(s, sa); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("bind", err) + } + f := os.NewFile(uintptr(s), "datagram-oriented icmp") + c, cerr = net.FilePacketConn(f) + f.Close() + default: + c, cerr = net.ListenPacket(network, address) + } + if cerr != nil { + return nil, cerr + } + switch proto { + case iana.ProtocolICMP: + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil + case iana.ProtocolIPv6ICMP: + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil + default: + return &PacketConn{c: c}, nil + } +} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go new file mode 100644 index 0000000..668728d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package icmp + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + return nil, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go new file mode 100644 index 0000000..81140b0 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message.go @@ -0,0 +1,152 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package icmp provides basic functions for the manipulation of +// messages used in the Internet Control Message Protocols, +// ICMPv4 and ICMPv6. +// +// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. +// Multi-part message support for ICMP is defined in RFC 4884. +// ICMP extensions for MPLS are defined in RFC 4950. +// ICMP extensions for interface and next-hop identification are +// defined in RFC 5837. +package icmp // import "golang.org/x/net/icmp" + +import ( + "encoding/binary" + "errors" + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. + +var ( + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errOpNoSupport = errors.New("operation not supported") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") +) + +func checksum(b []byte) uint16 { + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + return ^uint16(s) +} + +// A Type represents an ICMP message type. +type Type interface { + Protocol() int +} + +// A Message represents an ICMP message. +type Message struct { + Type Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Body MessageBody // body +} + +// Marshal returns the binary encoding of the ICMP message m. +// +// For an ICMPv4 message, the returned message always contains the +// calculated checksum field. +// +// For an ICMPv6 message, the returned message contains the calculated +// checksum field when psh is not nil, otherwise the kernel will +// compute the checksum field during the message transmission. +// When psh is not nil, it must be the pseudo header for IPv6. +func (m *Message) Marshal(psh []byte) ([]byte, error) { + var mtype int + switch typ := m.Type.(type) { + case ipv4.ICMPType: + mtype = int(typ) + case ipv6.ICMPType: + mtype = int(typ) + default: + return nil, syscall.EINVAL + } + b := []byte{byte(mtype), byte(m.Code), 0, 0} + if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil { + b = append(psh, b...) + } + if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 { + mb, err := m.Body.Marshal(m.Type.Protocol()) + if err != nil { + return nil, err + } + b = append(b, mb...) + } + if m.Type.Protocol() == iana.ProtocolIPv6ICMP { + if psh == nil { // cannot calculate checksum here + return b, nil + } + off, l := 2*net.IPv6len, len(b)-len(psh) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) + } + s := checksum(b) + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[len(psh)+2] ^= byte(s) + b[len(psh)+3] ^= byte(s >> 8) + return b[len(psh):], nil +} + +var parseFns = map[Type]func(int, []byte) (MessageBody, error){ + ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv4.ICMPTypeParameterProblem: parseParamProb, + + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + + ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv6.ICMPTypePacketTooBig: parsePacketTooBig, + ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv6.ICMPTypeParameterProblem: parseParamProb, + + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, +} + +// ParseMessage parses b as an ICMP message. +// Proto must be either the ICMPv4 or ICMPv6 protocol number. +func ParseMessage(proto int, b []byte) (*Message, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + var err error + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} + switch proto { + case iana.ProtocolICMP: + m.Type = ipv4.ICMPType(b[0]) + case iana.ProtocolIPv6ICMP: + m.Type = ipv6.ICMPType(b[0]) + default: + return nil, syscall.EINVAL + } + if fn, ok := parseFns[m.Type]; !ok { + m.Body, err = parseDefaultMessageBody(proto, b[4:]) + } else { + m.Body, err = fn(proto, b[4:]) + } + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go new file mode 100644 index 0000000..5d2605f --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv4.ICMPTypePhoturis, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } +} + +var marshalAndParseMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypePacketTooBig, Code: 0, + Body: &icmp.PacketTooBig{ + MTU: 1<<16 - 1, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv6.ICMPTypeDuplicateAddressConfirmation, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } + } +} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go new file mode 100644 index 0000000..2463730 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/messagebody.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A MessageBody represents an ICMP message body. +type MessageBody interface { + // Len returns the length of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +// A DefaultMessageBody represents the default message body. +type DefaultMessageBody struct { + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *DefaultMessageBody) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} + +// parseDefaultMessageBody parses b as an ICMP message body. +func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) { + p := &DefaultMessageBody{Data: make([]byte, len(b))} + copy(p.Data, b) + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go new file mode 100644 index 0000000..c314917 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/mpls.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A MPLSLabel represents a MPLS label stack entry. +type MPLSLabel struct { + Label int // label value + TC int // traffic class; formerly experimental use + S bool // bottom of stack + TTL int // time to live +} + +const ( + classMPLSLabelStack = 1 + typeIncomingMPLSLabelStack = 1 +) + +// A MPLSLabelStack represents a MPLS label stack. +type MPLSLabelStack struct { + Class int // extension object class number + Type int // extension object sub-type + Labels []MPLSLabel +} + +// Len implements the Len method of Extension interface. +func (ls *MPLSLabelStack) Len(proto int) int { + return 4 + (4 * len(ls.Labels)) +} + +// Marshal implements the Marshal method of Extension interface. +func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { + b := make([]byte, ls.Len(proto)) + if err := ls.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { + l := ls.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack + off := 4 + for _, ll := range ls.Labels { + b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) + b[off+2] |= byte(ll.TC << 1 & 0x0e) + if ll.S { + b[off+2] |= 0x1 + } + b[off+3] = byte(ll.TTL) + off += 4 + } + return nil +} + +func parseMPLSLabelStack(b []byte) (Extension, error) { + ls := &MPLSLabelStack{ + Class: int(b[2]), + Type: int(b[3]), + } + for b = b[4:]; len(b) >= 4; b = b[4:] { + ll := MPLSLabel{ + Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, + TC: int(b[2]&0x0e) >> 1, + TTL: int(b[3]), + } + if b[2]&0x1 != 0 { + ll.S = true + } + ls.Labels = append(ls.Labels, ll) + } + return ls, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go new file mode 100644 index 0000000..f271356 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "golang.org/x/net/internal/iana" + +// multipartMessageBodyDataLen takes b as an original datagram and +// exts as extensions, and returns a required length for message body +// and a required length for a padded original datagram in wire +// format. +func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) { + for _, ext := range exts { + bodyLen += ext.Len(proto) + } + if bodyLen > 0 { + dataLen = multipartMessageOrigDatagramLen(proto, b) + bodyLen += 4 // length of extension header + } else { + dataLen = len(b) + } + bodyLen += dataLen + return bodyLen, dataLen +} + +// multipartMessageOrigDatagramLen takes b as an original datagram, +// and returns a required length for a padded orignal datagram in wire +// format. +func multipartMessageOrigDatagramLen(proto int, b []byte) int { + roundup := func(b []byte, align int) int { + // According to RFC 4884, the padded original datagram + // field must contain at least 128 octets. + if len(b) < 128 { + return 128 + } + r := len(b) + return (r + align - 1) & ^(align - 1) + } + switch proto { + case iana.ProtocolICMP: + return roundup(b, 4) + case iana.ProtocolIPv6ICMP: + return roundup(b, 8) + default: + return len(b) + } +} + +// marshalMultipartMessageBody takes data as an original datagram and +// exts as extesnsions, and returns a binary encoding of message body. +// It can be used for non-multipart message bodies when exts is nil. +func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts) + b := make([]byte, 4+bodyLen) + copy(b[4:], data) + off := dataLen + 4 + if len(exts) > 0 { + b[dataLen+4] = byte(extensionVersion << 4) + off += 4 // length of object header + for _, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceInfo: + attrs, l := ext.attrsAndLen(proto) + if err := ext.marshal(proto, b[off:], attrs, l); err != nil { + return nil, err + } + off += ext.Len(proto) + } + } + s := checksum(b[dataLen+4:]) + b[dataLen+4+2] ^= byte(s) + b[dataLen+4+3] ^= byte(s >> 8) + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } + } + return b, nil +} + +// parseMultipartMessageBody parses b as either a non-multipart +// message body or a multipart message body. +func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) { + var l int + switch proto { + case iana.ProtocolICMP: + l = 4 * int(b[1]) + case iana.ProtocolIPv6ICMP: + l = 8 * int(b[0]) + } + if len(b) == 4 { + return nil, nil, nil + } + exts, l, err := parseExtensions(b[4:], l) + if err != nil { + l = len(b) - 4 + } + data := make([]byte, l) + copy(data, b[4:]) + return data, exts, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go new file mode 100644 index 0000000..966ccb8 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart_test.go @@ -0,0 +1,442 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 2).To4(), + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMultipartMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + if b[5] != 32 { + t.Errorf("#%v: got %v; want 32", i, b[5]) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv4.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeParameterProblem: + got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } +} + +var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en102", + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMultipartMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + if b[4] != 16 { + t.Errorf("#%v: got %v; want 16", i, b[4]) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv6.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv6.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } + } +} + +func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string { + var s string + for j, got := range gotExts { + switch got := got.(type) { + case *icmp.MPLSLabelStack: + want := wantExts[j].(*icmp.MPLSLabelStack) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want) + } + case *icmp.InterfaceInfo: + want := wantExts[j].(*icmp.InterfaceInfo) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + } + } + } + return s[:len(s)-1] +} + +var multipartMessageBodyLenTests = []struct { + proto int + in icmp.MessageBody + out int +}{ + { + iana.ProtocolICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.PacketTooBig{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // mtu and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // pointer and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 127), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram + }, +} + +func TestMultipartMessageBodyLen(t *testing.T) { + for i, tt := range multipartMessageBodyLenTests { + if out := tt.in.Len(tt.proto); out != tt.out { + t.Errorf("#%d: got %d; want %d", i, out, tt.out) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go new file mode 100644 index 0000000..a1c9df7 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A PacketTooBig represents an ICMP packet too big message body. +type PacketTooBig struct { + MTU int // maximum transmission unit of the nexthop link + Data []byte // data, known as original datagram field +} + +// Len implements the Len method of MessageBody interface. +func (p *PacketTooBig) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) + copy(b[4:], p.Data) + return b, nil +} + +// parsePacketTooBig parses b as an ICMP packet too big message body. +func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go new file mode 100644 index 0000000..0a2548d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) + +// A ParamProb represents an ICMP parameter problem message body. +type ParamProb struct { + Pointer uintptr // offset within the data where the error was detected + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ParamProb) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ParamProb) Marshal(proto int) ([]byte, error) { + if proto == iana.ProtocolIPv6ICMP { + b := make([]byte, p.Len(proto)) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) + copy(b[4:], p.Data) + return b, nil + } + b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions) + if err != nil { + return nil, err + } + b[0] = byte(p.Pointer) + return b, nil +} + +// parseParamProb parses b as an ICMP parameter problem message body. +func parseParamProb(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ParamProb{} + if proto == iana.ProtocolIPv6ICMP { + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) + p.Data = make([]byte, len(b)-4) + copy(p.Data, b[4:]) + return p, nil + } + p.Pointer = uintptr(b[0]) + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go new file mode 100644 index 0000000..3171dad --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ping_test.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "os" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + netaddr := func(ip net.IP) (net.Addr, error) { + switch c.LocalAddr().(type) { + case *net.UDPAddr: + return &net.UDPAddr{IP: ip}, nil + case *net.IPAddr: + return &net.IPAddr{IP: ip}, nil + default: + return nil, errors.New("neither UDPAddr nor IPAddr") + } + } + for _, ip := range ips { + switch protocol { + case iana.ProtocolICMP: + if ip.To4() != nil { + return netaddr(ip) + } + case iana.ProtocolIPv6ICMP: + if ip.To16() != nil && ip.To4() == nil { + return netaddr(ip) + } + } + } + return nil, errors.New("no A or AAAA record") +} + +type pingTest struct { + network, address string + protocol int + mtype icmp.Type +} + +var nonPrivilegedPingTests = []pingTest{ + {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestNonPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + for i, tt := range nonPrivilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +var privilegedPingTests = []pingTest{ + {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + for i, tt := range privilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +func doPing(tt pingTest, seq int) error { + c, err := icmp.ListenPacket(tt.network, tt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, tt.protocol) + if err != nil { + return err + } + + if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + wm := icmp.Message{ + Type: tt.mtype, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) + if err != nil { + return err + } + switch rm.Type { + case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) + } +} + +func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + network, address := "udp4", "127.0.0.1" + if !nettest.SupportsIPv4() { + network, address = "udp6", "::1" + } + const N = 1000 + var wg sync.WaitGroup + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + c, err := icmp.ListenPacket(network, address) + if err != nil { + t.Error(err) + return + } + c.Close() + }() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go new file mode 100644 index 0000000..c75f3dd --- /dev/null +++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "syscall" + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") +} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go new file mode 100644 index 0000000..344e158 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A TimeExceeded represents an ICMP time exceeded message body. +type TimeExceeded struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *TimeExceeded) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseTimeExceeded parses b as an ICMP time exceeded message body. +func parseTimeExceeded(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &TimeExceeded{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/idna/example_test.go b/vendor/golang.org/x/net/idna/example_test.go new file mode 100644 index 0000000..948f6eb --- /dev/null +++ b/vendor/golang.org/x/net/idna/example_test.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna_test + +import ( + "fmt" + + "golang.org/x/net/idna" +) + +func ExampleProfile() { + // Raw Punycode has no restrictions and does no mappings. + fmt.Println(idna.ToASCII("")) + fmt.Println(idna.ToASCII("*.faß.com")) + fmt.Println(idna.Punycode.ToASCII("*.faß.com")) + + // Rewrite IDN for lookup. This (currently) uses transitional mappings to + // find a balance between IDNA2003 and IDNA2008 compatibility. + fmt.Println(idna.Lookup.ToASCII("")) + fmt.Println(idna.Lookup.ToASCII("www.faß.com")) + + // Convert an IDN to ASCII for registration purposes. This changes the + // encoding, but reports an error if the input was illformed. + fmt.Println(idna.Registration.ToASCII("")) + fmt.Println(idna.Registration.ToASCII("www.faß.com")) + + // Output: + // + // *.xn--fa-hia.com + // *.xn--fa-hia.com + // + // www.fass.com + // idna: invalid label "" + // www.xn--fa-hia.com +} + +func ExampleNew() { + var p *idna.Profile + + // Raw Punycode has no restrictions and does no mappings. + p = idna.New() + fmt.Println(p.ToASCII("*.faß.com")) + + // Do mappings. Note that star is not allowed in a DNS lookup. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true)) // Map ß -> ss + fmt.Println(p.ToASCII("*.faß.com")) + + // Lookup for registration. Also does not allow '*'. + p = idna.New(idna.ValidateForRegistration()) + fmt.Println(p.ToUnicode("*.faß.com")) + + // Set up a profile maps for lookup, but allows wild cards. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true), // Map ß -> ss + idna.StrictDomainName(false)) // Set more permissive ASCII rules. + fmt.Println(p.ToASCII("*.faß.com")) + + // Output: + // *.xn--fa-hia.com + // *.fass.com idna: disallowed rune U+002A + // *.faß.com idna: disallowed rune U+002A + // *.fass.com +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 0000000..346fe44 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,732 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go new file mode 100644 index 0000000..0b067ca --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna_test.go @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "testing" +) + +var idnaTestCases = [...]struct { + ascii, unicode string +}{ + // Labels. + {"books", "books"}, + {"xn--bcher-kva", "bücher"}, + + // Domains. + {"foo--xn--bar.org", "foo--xn--bar.org"}, + {"golang.org", "golang.org"}, + {"example.xn--p1ai", "example.рф"}, + {"xn--czrw28b.tw", "商業.tw"}, + {"www.xn--mller-kva.de", "www.müller.de"}, +} + +func TestIDNA(t *testing.T) { + for _, tc := range idnaTestCases { + if a, err := ToASCII(tc.unicode); err != nil { + t.Errorf("ToASCII(%q): %v", tc.unicode, err) + } else if a != tc.ascii { + t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii) + } + + if u, err := ToUnicode(tc.ascii); err != nil { + t.Errorf("ToUnicode(%q): %v", tc.ascii, err) + } else if u != tc.unicode { + t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode) + } + } +} + +func TestIDNASeparators(t *testing.T) { + type subCase struct { + unicode string + wantASCII string + wantErr bool + } + + testCases := []struct { + name string + profile *Profile + subCases []subCase + }{ + { + name: "Punycode", profile: Punycode, + subCases: []subCase{ + {"example\u3002jp", "xn--examplejp-ck3h", false}, + {"東京\uFF0Ejp", "xn--jp-l92cn98g071o", false}, + {"大阪\uFF61jp", "xn--jp-ku9cz72u463f", false}, + }, + }, + { + name: "Lookup", profile: Lookup, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"東京\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Display", profile: Display, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"東京\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Registration", profile: Registration, + subCases: []subCase{ + {"example\u3002jp", "", true}, + {"東京\uFF0Ejp", "", true}, + {"大阪\uFF61jp", "", true}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, c := range tc.subCases { + gotA, err := tc.profile.ToASCII(c.unicode) + if c.wantErr { + if err == nil { + t.Errorf("ToASCII(%q): got no error, but an error expected", c.unicode) + } + } else { + if err != nil { + t.Errorf("ToASCII(%q): got err=%v, but no error expected", c.unicode, err) + } else if gotA != c.wantASCII { + t.Errorf("ToASCII(%q): got %q, want %q", c.unicode, gotA, c.wantASCII) + } + } + } + }) + } +} + +// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode +// return errors. diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 0000000..02c7d59 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go new file mode 100644 index 0000000..bfec81d --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode_test.go @@ -0,0 +1,198 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "strings" + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3B. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) -with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) 2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) MajiKoi5 + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) de + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := decode(tc.encoded); err != nil { + t.Errorf("decode(%q): %v", tc.encoded, err) + } else if got != tc.s { + t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s) + } + + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} + +var punycodeErrorTestCases = [...]string{ + "decode -", // A sole '-' is invalid. + "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z]. + "decode foo#bar", // '#' is not in [0-9A-Za-z]. + "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z]. + "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated. + "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF. + "decode 9999999999a", // "9999999999a" overflows the int32 calculation. + + "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow. +} + +func TestPunycodeErrors(t *testing.T) { + for _, tc := range punycodeErrorTestCases { + var err error + switch { + case strings.HasPrefix(tc, "decode "): + _, err = decode(tc[7:]) + case strings.HasPrefix(tc, "encode "): + _, err = encode("", tc[7:]) + } + if err == nil { + if len(tc) > 256 { + tc = tc[:100] + "..." + tc[len(tc)-100:] + } + t.Errorf("no error for %s", tc) + } + } +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 0000000..f910b26 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4557 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 0000000..c4ef847 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 0000000..7a8cf88 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 0000000..c9df24d --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,180 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 +const ( + DiffServCS0 = 0x0 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT +) + +// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 +const ( + NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x3 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2016-06-22 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 0000000..2a5c310 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,293 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "https://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", + parseTOSTCByte, + }, + { + "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + drs := dr.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range drs { + fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + PoolRecords []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>record"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>registry>record"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escape() []canonDSCPRecord { + drs := make([]canonDSCPRecord, len(drr.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range drr.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + return drs +} + +func parseTOSTCByte(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var ttb tosTCByte + if err := dec.Decode(&ttb); err != nil { + return err + } + trs := ttb.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) + fmt.Fprintf(w, "const (\n") + for _, tr := range trs { + fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) + fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type tosTCByte struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + Records []struct { + Binary string `xml:"binary"` + Keyword string `xml:"keyword"` + } `xml:"registry>record"` +} + +type canonTOSTCByteRecord struct { + OrigKeyword string + Keyword string + Value int +} + +func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { + trs := make([]canonTOSTCByteRecord, len(ttb.Records)) + sr := strings.NewReplacer( + "Capable", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, tr := range ttb.Records { + s := strings.TrimSpace(tr.Keyword) + trs[i].OrigKeyword = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + trs[i].Keyword = strings.Join(ss[1:], " ") + } else { + trs[i].Keyword = ss[0] + } + trs[i].Keyword = sr.Replace(trs[i].Keyword) + n, err := strconv.ParseUint(tr.Binary, 2, 8) + if err != nil { + continue + } + trs[i].Value = int(n) + } + return trs +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_bsd.go b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go new file mode 100644 index 0000000..a6e433b --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package nettest + +import ( + "runtime" + "strconv" + "strings" + "syscall" +) + +var darwinVersion int + +func init() { + if runtime.GOOS == "darwin" { + // See http://support.apple.com/kb/HT1633. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + darwinVersion, _ = strconv.Atoi(ss[0]) + } +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + switch runtime.GOOS { + case "freebsd": + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + // Even after the fix, it looks like the latest + // kernels don't deliver link-local scoped multicast + // packets correctly. + return false + case "darwin": + return !causesIPv6Crash() + default: + return true + } +} + +func causesIPv6Crash() bool { + // We see some kernel crash when running IPv6 with IP-level + // options on Darwin kernel version 12 or below. + // See golang.org/issues/17015. + return darwinVersion < 13 +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go new file mode 100644 index 0000000..bc7da5e --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux solaris + +package nettest + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_posix.go b/vendor/golang.org/x/net/internal/nettest/helper_posix.go new file mode 100644 index 0000000..963ed99 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_posix.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package nettest + +import ( + "os" + "syscall" +) + +func protocolNotSupported(err error) bool { + switch err := err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + case *os.SyscallError: + switch err := err.Err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + } + } + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_stub.go b/vendor/golang.org/x/net/internal/nettest/helper_stub.go new file mode 100644 index 0000000..ea61b6f --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_stub.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +import ( + "fmt" + "runtime" +) + +func maxOpenFiles() int { + return defaultMaxOpenFiles +} + +func supportsRawIPSocket() (string, bool) { + return fmt.Sprintf("not supported on %s", runtime.GOOS), false +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return false +} + +func causesIPv6Crash() bool { + return false +} + +func protocolNotSupported(err error) bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_unix.go b/vendor/golang.org/x/net/internal/nettest/helper_unix.go new file mode 100644 index 0000000..ed13e44 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import ( + "fmt" + "os" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return defaultMaxOpenFiles + } + return int(rlim.Cur) +} + +func supportsRawIPSocket() (string, bool) { + if os.Getuid() != 0 { + return fmt.Sprintf("must be root on %s", runtime.GOOS), false + } + return "", true +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_windows.go b/vendor/golang.org/x/net/internal/nettest/helper_windows.go new file mode 100644 index 0000000..3dcb727 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_windows.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import ( + "fmt" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ +} + +func supportsRawIPSocket() (string, bool) { + // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: + // Note: To use a socket of type SOCK_RAW requires administrative privileges. + // Users running Winsock applications that use raw sockets must be a member of + // the Administrators group on the local computer, otherwise raw socket calls + // will fail with an error code of WSAEACCES. On Windows Vista and later, access + // for raw sockets is enforced at socket creation. In earlier versions of Windows, + // access for raw sockets is enforced during other socket operations. + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0) + if err == syscall.WSAEACCES { + return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false + } + if err != nil { + return err.Error(), false + } + syscall.Closesocket(s) + return "", true +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go new file mode 100644 index 0000000..8e6333a --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/interface.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import "net" + +// IsMulticastCapable reports whether ifi is an IP multicast-capable +// network interface. Network must be "ip", "ip4" or "ip6". +func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, false + } + if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { + return nil, false + } + return hasRoutableIP(network, ifi) +} + +// RoutedInterface returns a network interface that can route IP +// traffic and satisfies flags. It returns nil when an appropriate +// network interface is not found. Network must be "ip", "ip4" or +// "ip6". +func RoutedInterface(network string, flags net.Flags) *net.Interface { + switch network { + case "ip", "ip4", "ip6": + default: + return nil + } + ift, err := net.Interfaces() + if err != nil { + return nil + } + for _, ifi := range ift { + if ifi.Flags&flags != flags { + continue + } + if _, ok := hasRoutableIP(network, &ifi); !ok { + continue + } + return &ifi + } + return nil +} + +func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { + ifat, err := ifi.Addrs() + if err != nil { + return nil, false + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + case *net.IPNet: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + } + } + return nil, false +} + +func routableIP(network string, ip net.IP) net.IP { + if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { + return nil + } + switch network { + case "ip4": + if ip := ip.To4(); ip != nil { + return ip + } + case "ip6": + if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation + return nil + } + if ip := ip.To16(); ip != nil && ip.To4() == nil { + return ip + } + default: + if ip := ip.To4(); ip != nil { + return ip + } + if ip := ip.To16(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go new file mode 100644 index 0000000..bb34aec --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +const defaultMaxOpenFiles = 256 + +// MaxOpenFiles returns the maximum number of open files for the +// caller's process. +func MaxOpenFiles() int { return maxOpenFiles() } diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go new file mode 100644 index 0000000..06f4e09 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack.go @@ -0,0 +1,152 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest // import "golang.org/x/net/internal/nettest" + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "runtime" +) + +var ( + supportsIPv4 bool + supportsIPv6 bool +) + +func init() { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + ln.Close() + supportsIPv4 = true + } + if ln, err := net.Listen("tcp6", "[::1]:0"); err == nil { + ln.Close() + supportsIPv6 = true + } +} + +// SupportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func SupportsIPv4() bool { return supportsIPv4 } + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func SupportsIPv6() bool { return supportsIPv6 } + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + return supportsRawIPSocket() +} + +// SupportsIPv6MulticastDeliveryOnLoopback reports whether the +// platform supports IPv6 multicast packet delivery on software +// loopback interface. +func SupportsIPv6MulticastDeliveryOnLoopback() bool { + return supportsIPv6MulticastDeliveryOnLoopback() +} + +// ProtocolNotSupported reports whether err is a protocol not +// supported error. +func ProtocolNotSupported(err error) bool { + return protocolNotSupported(err) +} + +// TestableNetwork reports whether network is testable on the current +// platform configuration. +func TestableNetwork(network string) bool { + // This is based on logic from standard library's + // net/platform_test.go. + switch network { + case "unix", "unixgram": + switch runtime.GOOS { + case "android", "nacl", "plan9", "windows": + return false + } + if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + return false + } + case "unixpacket": + switch runtime.GOOS { + case "android", "darwin", "freebsd", "nacl", "plan9", "windows": + return false + case "netbsd": + // It passes on amd64 at least. 386 fails (Issue 22927). arm is unknown. + if runtime.GOARCH == "386" { + return false + } + } + } + return true +} + +// NewLocalListener returns a listener which listens to a loopback IP +// address or local file system path. +// Network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket". +func NewLocalListener(network string) (net.Listener, error) { + switch network { + case "tcp": + if supportsIPv4 { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + return ln, nil + } + } + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "tcp4": + if supportsIPv4 { + return net.Listen("tcp4", "127.0.0.1:0") + } + case "tcp6": + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "unix", "unixpacket": + return net.Listen(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +// NewLocalPacketListener returns a packet listener which listens to a +// loopback IP address or local file system path. +// Network must be "udp", "udp4", "udp6" or "unixgram". +func NewLocalPacketListener(network string) (net.PacketConn, error) { + switch network { + case "udp": + if supportsIPv4 { + if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil { + return c, nil + } + } + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "udp4": + if supportsIPv4 { + return net.ListenPacket("udp4", "127.0.0.1:0") + } + case "udp6": + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "unixgram": + return net.ListenPacket(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +func localPath() string { + f, err := ioutil.TempFile("", "nettest") + if err != nil { + panic(err) + } + path := f.Name() + f.Close() + os.Remove(path) + return path +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 0000000..1eb07d2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 0000000..d1d0c2d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 0000000..bac6681 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 0000000..63f0534 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 0000000..7dedd43 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 0000000..a4e7122 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go new file mode 100644 index 0000000..ce9ec2f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -0,0 +1,49 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include +#include + +#define _GNU_SOURCE +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go new file mode 100644 index 0000000..3f84335 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 0000000..93dff91 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 0000000..6a6379a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 0000000..05d6082 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 0000000..afb34ad --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 0000000..8d17a40 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 0000000..c87d2a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 0000000..2e80a9c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 0000000..3c42ea7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 0000000..5567afc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 0000000..b8c87b7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 0000000..5a38798 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 0000000..a7a5987 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 0000000..610fc4f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 0000000..71a69e2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 0000000..6465b20 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 0000000..64e8173 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 0000000..d6871d5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 0000000..499164a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 0000000..b21d2e6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 0000000..f78832a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !linux + +package socket + +import "errors" + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 0000000..96733cb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go new file mode 100644 index 0000000..d2add1a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/reflect.go b/vendor/golang.org/x/net/internal/socket/reflect.go new file mode 100644 index 0000000..bb179f1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/reflect.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "reflect" + "runtime" +) + +// A Conn represents a raw connection. +type Conn struct { + c net.Conn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + return &Conn{c: c}, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + s, err := socketOf(c.c) + if err != nil { + return 0, err + } + n, err := getsockopt(s, o.Level, o.Name, b) + return n, os.NewSyscallError("getsockopt", err) +} + +func (o *Option) set(c *Conn, b []byte) error { + s, err := socketOf(c.c) + if err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) +} + +func socketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + v := reflect.ValueOf(c) + switch e := v.Elem(); e.Kind() { + case reflect.Struct: + fd := e.FieldByName("conn").FieldByName("fd") + switch e := fd.Elem(); e.Kind() { + case reflect.Struct: + sysfd := e.FieldByName("sysfd") + if runtime.GOOS == "windows" { + return uintptr(sysfd.Uint()), nil + } + return uintptr(sysfd.Int()), nil + } + } + } + return 0, errors.New("invalid type") +} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 0000000..5f9730e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "unsafe" +) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go new file mode 100644 index 0000000..c4edd4a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go @@ -0,0 +1,259 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +type mockControl struct { + Level int + Type int + Data []byte +} + +func TestControlMessage(t *testing.T) { + for _, tt := range []struct { + cs []mockControl + }{ + { + []mockControl{ + {Level: 1, Type: 1}, + }, + }, + { + []mockControl{ + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + { + []mockControl{ + {Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + } { + var w []byte + var tailPadLen int + mm := socket.NewControlMessage([]int{0}) + for i, c := range tt.cs { + m := socket.NewControlMessage([]int{len(c.Data)}) + l := len(m) - len(mm) + if i == len(tt.cs)-1 && l > len(c.Data) { + tailPadLen = l - len(c.Data) + } + w = append(w, m...) + } + + var err error + ww := make([]byte, len(w)) + copy(ww, w) + m := socket.ControlMessage(ww) + for _, c := range tt.cs { + if err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil { + t.Fatalf("(%v).MarshalHeader() = %v", tt.cs, err) + } + copy(m.Data(len(c.Data)), c.Data) + m = m.Next(len(c.Data)) + } + m = socket.ControlMessage(w) + for _, c := range tt.cs { + m, err = m.Marshal(c.Level, c.Type, c.Data) + if err != nil { + t.Fatalf("(%v).Marshal() = %v", tt.cs, err) + } + } + if !bytes.Equal(ww, w) { + t.Fatalf("got %#v; want %#v", ww, w) + } + + ws := [][]byte{w} + if tailPadLen > 0 { + // Test a message with no tail padding. + nopad := w[:len(w)-tailPadLen] + ws = append(ws, [][]byte{nopad}...) + } + for _, w := range ws { + ms, err := socket.ControlMessage(w).Parse() + if err != nil { + t.Fatalf("(%v).Parse() = %v", tt.cs, err) + } + for i, m := range ms { + lvl, typ, dataLen, err := m.ParseHeader() + if err != nil { + t.Fatalf("(%v).ParseHeader() = %v", tt.cs, err) + } + if lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) { + t.Fatalf("%v: got %d, %d, %d; want %d, %d, %d", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data)) + } + } + } + } +} + +func TestUDP(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + + t.Run("Message", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: bytes.SplitAfter(data, []byte("-")), + Addr: c.LocalAddr(), + } + if err := cc.SendMsg(&wm, 0); err != nil { + t.Fatal(err) + } + b := make([]byte, 32) + rm := socket.Message{ + Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]}, + } + if err := cc.RecvMsg(&rm, 0); err != nil { + t.Fatal(err) + } + if !bytes.Equal(b[:rm.N], data) { + t.Fatalf("got %#v; want %#v", b[:rm.N], data) + } + }) + switch runtime.GOOS { + case "android", "linux": + t.Run("Messages", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wmbs := bytes.SplitAfter(data, []byte("-")) + wms := []socket.Message{ + {Buffers: wmbs[:1], Addr: c.LocalAddr()}, + {Buffers: wmbs[1:], Addr: c.LocalAddr()}, + } + n, err := cc.SendMsgs(wms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(wms) { + t.Fatalf("got %d; want %d", n, len(wms)) + } + b := make([]byte, 32) + rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}} + rms := []socket.Message{ + {Buffers: rmbs[0]}, + {Buffers: rmbs[1]}, + } + n, err = cc.RecvMsgs(rms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(rms) { + t.Fatalf("got %d; want %d", n, len(rms)) + } + nn := 0 + for i := 0; i < n; i++ { + nn += rms[i].N + } + if !bytes.Equal(b[:nn], data) { + t.Fatalf("got %#v; want %#v", b[:nn], data) + } + }) + } + + // The behavior of transmission for zero byte paylaod depends + // on each platform implementation. Some may transmit only + // protocol header and options, other may transmit nothing. + // We test only that SendMsg and SendMsgs will not crash with + // empty buffers. + wm := socket.Message{ + Buffers: [][]byte{{}}, + Addr: c.LocalAddr(), + } + cc.SendMsg(&wm, 0) + wms := []socket.Message{ + {Buffers: [][]byte{{}}, Addr: c.LocalAddr()}, + } + cc.SendMsgs(wms, 0) +} + +func BenchmarkUDP(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + b.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: [][]byte{data}, + Addr: c.LocalAddr(), + } + rm := socket.Message{ + Buffers: [][]byte{make([]byte, 128)}, + OOB: make([]byte, 128), + } + + for M := 1; M <= 1<<9; M = M << 1 { + b.Run(fmt.Sprintf("Iter-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + for j := 0; j < M; j++ { + if err := cc.SendMsg(&wm, 0); err != nil { + b.Fatal(err) + } + if err := cc.RecvMsg(&rm, 0); err != nil { + b.Fatal(err) + } + } + } + }) + switch runtime.GOOS { + case "android", "linux": + wms := make([]socket.Message, M) + for i := range wms { + wms[i].Buffers = [][]byte{data} + wms[i].Addr = c.LocalAddr() + } + rms := make([]socket.Message, M) + for i := range rms { + rms[i].Buffers = [][]byte{make([]byte, 128)} + rms[i].OOB = make([]byte, 128) + } + b.Run(fmt.Sprintf("Batch-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := cc.SendMsgs(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := cc.RecvMsgs(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + } + } +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_test.go b/vendor/golang.org/x/net/internal/socket/socket_test.go new file mode 100644 index 0000000..bf3751b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_test.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket_test + +import ( + "net" + "runtime" + "syscall" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +func TestSocket(t *testing.T) { + t.Run("Option", func(t *testing.T) { + testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4}) + }) +} + +func testSocketOption(t *testing.T, so *socket.Option) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + const N = 2048 + if err := so.SetInt(cc, N); err != nil { + t.Fatal(err) + } + n, err := so.GetInt(cc) + if err != nil { + t.Fatal(err) + } + if n < N { + t.Fatalf("got %d; want greater than or equal to %d", n, N) + } +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 0000000..4f0eead --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 0000000..f13e14f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package socket + +import "errors" + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 0000000..f723fa3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd openbsd + +package socket + +import "unsafe" + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 0000000..b17d223 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 0000000..b17d223 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 0000000..1559521 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 0000000..235b2cc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 0000000..93e7d75 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 0000000..9decee2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 0000000..d753b43 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 0000000..b670894 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 0000000..9c0d740 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 0000000..071a4ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 0000000..071a4ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 0000000..9c0d740 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 0000000..21c1e3f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 0000000..21c1e3f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 0000000..327979e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 0000000..06d7562 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 0000000..431851c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 0000000..dc130c2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,168 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +func (zc *ipv6ZoneCache) update(ift []net.Interface) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if zc.lastFetched.After(now.Add(-60 * time.Second)) { + return + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } +} + +func (zc *ipv6ZoneCache) name(zone int) string { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + name, ok := zoneCache.toName[zone] + if !ok { + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + index, ok := zoneCache.toIndex[zone] + if !ok { + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 0000000..cced74e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 0000000..a18ac5e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 0000000..d9f06d0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errors.New("not implemented") +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errors.New("not implemented") +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errors.New("not implemented") +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 0000000..18eba30 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 0000000..54a470e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x17 + + sysSOCK_RAW = 0x3 +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 0000000..26f8fef --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 0000000..e2987f7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 0000000..26f8fef --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 0000000..e2987f7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 0000000..c582abd --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 0000000..04a2488 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 0000000..35c7cb9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 0000000..04a2488 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 0000000..db60491 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 0000000..2a1a799 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,68 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 0000000..db60491 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 0000000..1c83636 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 0000000..a6c0bf4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 0000000..1c83636 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 0000000..327c632 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_RAW = 0x4 +) + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000..685f0e7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go new file mode 100644 index 0000000..66325a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeseries + +import ( + "math" + "testing" + "time" +) + +func isNear(x *Float, y float64, tolerance float64) bool { + return math.Abs(x.Value()-y) < tolerance +} + +func isApproximate(x *Float, y float64) bool { + return isNear(x, y, 1e-2) +} + +func checkApproximate(t *testing.T, o Observable, y float64) { + x := o.(*Float) + if !isApproximate(x, y) { + t.Errorf("Wanted %g, got %g", y, x.Value()) + } +} + +func checkNear(t *testing.T, o Observable, y, tolerance float64) { + x := o.(*Float) + if !isNear(x, y, tolerance) { + t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) + } +} + +var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) + +func tu(s int64) time.Time { + return baseTime.Add(time.Duration(s) * time.Second) +} + +func tu2(s int64, ns int64) time.Time { + return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) +} + +func TestBasicTimeSeries(t *testing.T) { + ts := NewTimeSeries(NewFloat) + fo := new(Float) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(1)), 40) + checkApproximate(t, ts.Total(), 40) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 40) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 70) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 60) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 90) + *fo = Float(100) + ts.AddWithTime(fo, tu(100)) + checkApproximate(t, ts.Range(tu(99), tu(100)), 100) + checkApproximate(t, ts.Range(tu(0), tu(4)), 36) + checkApproximate(t, ts.Total(), 190) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(4)), 44) + checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Total(), 210) + + for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { + if i == 63 { + checkApproximate(t, l, 100) + } else { + checkApproximate(t, l, 0) + } + } + + checkApproximate(t, ts.Range(tu(0), tu(100)), 210) + checkApproximate(t, ts.Range(tu(10), tu(100)), 100) + + for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { + if i < 10 { + checkApproximate(t, l, 11) + } else if i >= 90 { + checkApproximate(t, l, 10) + } else { + checkApproximate(t, l, 0) + } + } +} + +func TestFloat(t *testing.T) { + f := Float(1) + if g, w := f.String(), "1"; g != w { + t.Errorf("Float(1).String = %q; want %q", g, w) + } + f2 := Float(2) + var o Observable = &f2 + f.Add(o) + if g, w := f.Value(), 3.0; g != w { + t.Errorf("Float post-add = %v; want %v", g, w) + } + f.Multiply(2) + if g, w := f.Value(), 6.0; g != w { + t.Errorf("Float post-multiply = %v; want %v", g, w) + } + f.Clear() + if g, w := f.Value(), 0.0; g != w { + t.Errorf("Float post-clear = %v; want %v", g, w) + } + f.CopyFrom(&f2) + if g, w := f.Value(), 2.0; g != w { + t.Errorf("Float post-CopyFrom = %v; want %v", g, w) + } +} + +type mockClock struct { + time time.Time +} + +func (m *mockClock) Time() time.Time { return m.time } +func (m *mockClock) Set(t time.Time) { m.time = t } + +const buckets = 6 + +var testResolutions = []time.Duration{ + 10 * time.Second, // level holds one minute of observations + 100 * time.Second, // level holds ten minutes of observations + 10 * time.Minute, // level holds one hour of observations +} + +// TestTimeSeries uses a small number of buckets to force a higher +// error rate on approximations from the timeseries. +type TestTimeSeries struct { + timeSeries +} + +func TestExpectedErrorRate(t *testing.T) { + ts := new(TestTimeSeries) + fake := new(mockClock) + fake.Set(time.Now()) + ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) + for i := 1; i <= 61*61; i++ { + fake.Set(fake.Time().Add(1 * time.Second)) + ob := Float(1) + ts.AddWithTime(&ob, fake.Time()) + + // The results should be accurate within one missing bucket (1/6) of the observations recorded. + checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) + checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) + checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) + } +} + +func min(a, b float64) float64 { + if a < b { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 0000000..b445499 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,191 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/bpf_test.go b/vendor/golang.org/x/net/ipv4/bpf_test.go new file mode 100644 index 0000000..b44da90 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/bpf_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv4.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 0000000..a2b02ca --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 0000000..77e7ad5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 0000000..425338f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 0000000..5a2f7d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/control_test.go b/vendor/golang.org/x/net/ipv4/control_test.go new file mode 100644 index 0000000..f87fe12 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "testing" + + "golang.org/x/net/ipv4" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv4.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00", + "\f\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 0000000..e1ae816 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 0000000..ce55c66 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 0000000..c8f2e05 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 0000000..f30544e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 0000000..4dd57d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 0000000..beb1107 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPFilter = C.sizeof_struct_icmp_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type sockExtendedErr C.struct_sock_extended_err + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpFilter C.struct_icmp_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 0000000..8f8af1b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 0000000..8f8af1b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 0000000..aeb33e9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_NEXTHOP = C.IP_NEXTHOP + + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_REUSEADDR = C.IP_REUSEADDR + sysIP_DONTROUTE = C.IP_DONTROUTE + sysIP_BROADCAST = C.IP_BROADCAST + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 0000000..54d77d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 0000000..b43935a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 0000000..2ab8773 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errOpNoSupport + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go new file mode 100644 index 0000000..ddc7577 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/example_test.go @@ -0,0 +1,224 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "0.0.0.0:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil { + p := ipv4.NewConn(c) + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetTTL(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { + log.Fatal(err) + } + + b := make([]byte, 1500) + for { + _, cm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, nil, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To4() != nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no A record found") + } + + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if err := p.SetTTL(i); err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + if _, err := p.WriteTo(wb, nil, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, cm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(1, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv4.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + case ipv4.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + return + default: + log.Printf("unknown ICMP message: %+v\n", rm) + } + } +} + +func ExampleRawConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + log.Fatal(err) + } + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)} + if err := r.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer r.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 24) // fake ospf header, you need to implement this + ospf[0] = 2 // version 2 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + iph := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 0xc0, // DSCP CS6 + TotalLen: ipv4.HeaderLen + len(ospf), + TTL: 1, + Protocol: 89, + Dst: allSPFRouters.IP.To4(), + } + + var cm *ipv4.ControlMessage + switch runtime.GOOS { + case "darwin", "linux": + cm = &ipv4.ControlMessage{IfIndex: en0.Index} + default: + if err := r.SetMulticastInterface(en0); err != nil { + log.Fatal(err) + } + } + if err := r.WriteTo(iph, ospf, cm); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 0000000..9d490fa --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 0000000..119bf84 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,57 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "syscall" + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 0000000..8bb0f0f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, syscall.EINVAL + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and sotres the result in h. +func (h *Header) Parse(b []byte) error { + if h == nil || len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return errBufferTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go new file mode 100644 index 0000000..a246aee --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header_test.go @@ -0,0 +1,228 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "bytes" + "encoding/binary" + "net" + "reflect" + "runtime" + "strings" + "testing" + + "golang.org/x/net/internal/socket" +) + +type headerTest struct { + wireHeaderFromKernel []byte + wireHeaderToKernel []byte + wireHeaderFromTradBSDKernel []byte + wireHeaderToTradBSDKernel []byte + wireHeaderFromFreeBSD10Kernel []byte + wireHeaderToFreeBSD10Kernel []byte + *Header +} + +var headerLittleEndianTests = []headerTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + { + wireHeaderFromKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x45, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, + }, + + // with option headers + { + wireHeaderFromKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x46, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen + 4, + TOS: 1, + TotalLen: 0xbef3, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + Options: []byte{0xff, 0xfe, 0xfe, 0xff}, + }, + }, +} + +func TestMarshalHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + b, err := tt.Header.Marshal() + if err != nil { + t.Fatal(err) + } + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderToTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderToTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderToFreeBSD10Kernel + default: + wh = tt.wireHeaderToKernel + } + default: + wh = tt.wireHeaderToKernel + } + if !bytes.Equal(b, wh) { + t.Fatalf("got %#v; want %#v", b, wh) + } + } +} + +func TestParseHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for big endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderFromTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderFromTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderFromFreeBSD10Kernel + default: + wh = tt.wireHeaderFromKernel + } + default: + wh = tt.wireHeaderFromKernel + } + h, err := ParseHeader(wh) + if err != nil { + t.Fatal(err) + } + if err := h.Parse(wh); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 0000000..a5052e3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,63 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 0000000..be10c94 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,34 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 0000000..9902bb3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 0000000..6e1c5c8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 0000000..21bb29a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go new file mode 100644 index 0000000..3324b54 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_test.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var icmpStringTests = []struct { + in ipv4.ICMPType + out string +}{ + {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv4.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv4.ICMPType{ + ipv4.ICMPTypeDestinationUnreachable, + ipv4.ICMPTypeEchoReply, + ipv4.ICMPTypeTimeExceeded, + ipv4.ICMPTypeParameterProblem, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + + var f ipv4.ICMPFilter + f.SetAll(true) + f.Accept(ipv4.ICMPTypeEcho) + f.Accept(ipv4.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go new file mode 100644 index 0000000..bcf4973 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicast_test.go @@ -0,0 +1,334 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp4", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + switch { + case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} + +var rawConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnReadWriteMulticastICMP(t *testing.T) { + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + if tt.src == nil { + if err := r.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer r.LeaveGroup(ifi, tt.grp) + } else { + if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := r.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := r.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + Protocol: 1, + Dst: tt.grp.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + r.SetMulticastTTL(i + 1) + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + if rh, b, _, err := r.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + switch { + case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go new file mode 100644 index 0000000..e43fbbe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727 + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}, + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp4", "224.0.0.0:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp4", net.JoinHostPort("224.0.0.0", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv4.PacketConn + ps[0] = ipv4.NewPacketConn(c1) + ps[1] = ipv4.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp4", net.JoinHostPort(ip.String(), port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv4.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := r.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.RawConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{r, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go new file mode 100644 index 0000000..f7efac2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -0,0 +1,195 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +var rawConnMulticastSocketOptionTests = []struct { + grp, src net.Addr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnMulticastSocketOptionTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, r, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src) + } + } +} + +type testIPv4MulticastConn interface { + MulticastTTL() (int, error) + SetMulticastTTL(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) { + const ttl = 255 + if err := c.SetMulticastTTL(ttl); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastTTL(); err != nil { + t.Error(err) + return + } else if v != ttl { + t.Errorf("got %v; want %v", v, ttl) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 0000000..f00f5b0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return syscall.EINVAL + } + return c.writeTo(h, p, cm) +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/vendor/golang.org/x/net/ipv4/packet_go1_8.go new file mode 100644 index 0000000..b47d186 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_8.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4 + +import "net" + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + n, nn, _, src, err := c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if nn > 0 { + cm = new(ControlMessage) + if err := cm.Parse(oob[:nn]); err != nil { + return nil, nil, nil, err + } + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + oob := cm.Marshal() + wh, err := h.Marshal() + if err != nil { + return err + } + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/vendor/golang.org/x/net/ipv4/packet_go1_9.go new file mode 100644 index 0000000..082c36d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 0000000..f95f811 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 0000000..3f06d76 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go new file mode 100644 index 0000000..d26ccd9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + nb := make([]byte, maxHeaderLen+len(b)) + if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go new file mode 100644 index 0000000..2f19311 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 0000000..3926de7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go new file mode 100644 index 0000000..1cd926e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go @@ -0,0 +1,248 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go new file mode 100644 index 0000000..365de02 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go @@ -0,0 +1,388 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv4.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv4.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv4.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + var b []byte + if _, ok := dst.(*net.IPAddr); ok { + var h ipv4.Header + if err := h.Parse(ms[0].Buffers[0][:ms[0].N]); err != nil { + t.Error(err) + return + } + b = ms[0].Buffers[0][h.Len:ms[0].N] + } else { + b = ms[0].Buffers[0][:ms[0].N] + } + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv4.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go new file mode 100644 index 0000000..3896a8a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_test.go @@ -0,0 +1,140 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv4UDP", func(b *testing.B) { + p := ipv4.NewPacketConn(c) + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv4.ControlMessage{TTL: 1} + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 0000000..22e90c0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 0000000..e96955b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 0000000..23249b7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 0000000..0388cba --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,119 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 0000000..f391920 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 0000000..1f24f69 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 0000000..0711d3d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 0000000..9f30b73 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 0000000..9a21320 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 0000000..58256dd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 0000000..e8fb191 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}} + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 0000000..859764f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 0000000..b800324 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 0000000..60defe1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 0000000..832fef1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 0000000..ae5704e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 0000000..e6b7623 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 0000000..4f07647 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 0000000..b0913d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go new file mode 100644 index 0000000..02c089f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicast_test.go @@ -0,0 +1,247 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + loop: + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} + +func TestRawConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + TTL: i + 1, + Protocol: 1, + Dst: dst.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + loop: + if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if _, b, _, err := r.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go new file mode 100644 index 0000000..db5213b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -0,0 +1,148 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp4", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp4", "", "127.0.0.1:0"}, + {"ip4", ":icmp", "127.0.0.1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewPacketConn(c)) + } +} + +func TestRawConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + + testUnicastSocketOptions(t, r) +} + +type testIPv4UnicastConn interface { + TOS() (int, error) + SetTOS(int) error + TTL() (int, error) + SetTTL(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) { + tos := iana.DiffServCS0 | iana.NotECNTransport + switch runtime.GOOS { + case "windows": + // IP_TOS option is supported on Windows 8 and beyond. + t.Skipf("not supported on %s", runtime.GOOS) + } + + if err := c.SetTOS(tos); err != nil { + t.Fatal(err) + } + if v, err := c.TOS(); err != nil { + t.Fatal(err) + } else if v != tos { + t.Fatalf("got %v; want %v", v, tos) + } + const ttl = 255 + if err := c.SetTTL(ttl); err != nil { + t.Fatal(err) + } + if v, err := c.TTL(); err != nil { + t.Fatal(err) + } else if v != ttl { + t.Fatalf("got %v; want %v", v, ttl) + } +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 0000000..c07cc88 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 0000000..c4365e9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 0000000..8c4aec9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 0000000..4b10b7c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 0000000..4b10b7c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 0000000..f65bd9a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 0000000..fd3624d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 0000000..12f36be --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 0000000..0a3875c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 0000000..4f5fe68 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/bpf_test.go b/vendor/golang.org/x/net/ipv6/bpf_test.go new file mode 100644 index 0000000..8253e1f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/bpf_test.go @@ -0,0 +1,96 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv6" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + l, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv6.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp6", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 0000000..2da6444 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 0000000..9fd9eb1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 0000000..eec529c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 0000000..a045f28 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/control_test.go b/vendor/golang.org/x/net/ipv6/control_test.go new file mode 100644 index 0000000..c186ca9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "testing" + + "golang.org/x/net/ipv6" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv6.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00)\x00\x00\x00.\x00\x00\x00", + "\f\x00\x00\x00)\x00\x00\x00,\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 0000000..6651506 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 0000000..ef2563b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 0000000..55ddc11 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 0000000..a4c383a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 0000000..53e6253 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 0000000..3308cb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6FlowlabelReq C.struct_in6_flowlabel_req + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 0000000..be9ceb9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 0000000..177ddf8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 0000000..0f8ce2b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,114 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 0000000..703dafe --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,302 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errOpNoSupport + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errOpNoSupport + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 0000000..664a97d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 0000000..0624c17 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,128 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errOpNoSupport + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go new file mode 100644 index 0000000..e761aa2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/example_test.go @@ -0,0 +1,216 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "log" + "net" + "os" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "[::]:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil { + p := ipv6.NewConn(c) + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetHopLimit(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + b := make([]byte, 1500) + for { + _, rcm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + wcm.IfIndex = rcm.IfIndex + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, &wcm, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To16() != nil && ip.To4() == nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no AAAA record found") + } + + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + wcm.HopLimit = i + if _, err := p.WriteTo(wb, &wcm, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, rcm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv6.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + case ipv6.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + return + } + } +} + +func ExamplePacketConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")} + if err := p.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 16) // fake ospf header, you need to implement this + ospf[0] = 3 // version 3 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + if err := p.SetChecksum(true, 12); err != nil { + log.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: 0xc0, // DSCP CS6 + HopLimit: 1, + IfIndex: en0.Index, + } + if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 0000000..47b7e9f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 0000000..e9dbc2e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "syscall" + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 0000000..e05cb08 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go new file mode 100644 index 0000000..ca11dc2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "strings" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv6" +) + +var ( + wireHeaderFromKernel = [ipv6.HeaderLen]byte{ + 0x69, 0x8b, 0xee, 0xf1, + 0xca, 0xfe, 0x2c, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + } + + testHeader = &ipv6.Header{ + Version: ipv6.Version, + TrafficClass: iana.DiffServAF43, + FlowLabel: 0xbeef1, + PayloadLen: 0xcafe, + NextHeader: iana.ProtocolIPv6Frag, + HopLimit: 1, + Src: net.ParseIP("2001:db8:1::1"), + Dst: net.ParseIP("2001:db8:2::1"), + } +) + +func TestParseHeader(t *testing.T) { + h, err := ipv6.ParseHeader(wireHeaderFromKernel[:]) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 0000000..2597401 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 0000000..3c6214f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,82 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 0000000..b7f48e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 0000000..e1a791d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 0000000..647f6b4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 0000000..7c23bb1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 0000000..c4b9be6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go new file mode 100644 index 0000000..d8e9675 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_test.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var icmpStringTests = []struct { + in ipv6.ICMPType + out string +}{ + {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv6.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv6.ICMPType{ + ipv6.ICMPTypeDestinationUnreachable, + ipv6.ICMPTypeEchoReply, + ipv6.ICMPTypeNeighborSolicitation, + ipv6.ICMPTypeDuplicateAddressConfirmation, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoRequest) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 0000000..443cd07 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go new file mode 100644 index 0000000..6efe56c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "testing" +) + +func connector(t *testing.T, network, addr string, done chan<- bool) { + defer func() { done <- true }() + + c, err := net.Dial(network, addr) + if err != nil { + t.Error(err) + return + } + c.Close() +} + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go new file mode 100644 index 0000000..69a21cd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicast_test.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp6", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatal(err) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP) + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to + // modify ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to + // disable the kernel checksum + // processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go new file mode 100644 index 0000000..b27713e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go @@ -0,0 +1,261 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727 + &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}, + &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp6", "[ff02::]:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp6", net.JoinHostPort("ff02::", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv6.PacketConn + ps[0] = ipv6.NewPacketConn(c1) + ps[1] = ipv6.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp6", net.JoinHostPort(ip.String()+"%"+ifi.Name, port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip6:ipv6-icmp", ip.String()+"%"+ifi.Name) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go new file mode 100644 index 0000000..9e6b902 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -0,0 +1,157 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727 + + {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +type testIPv6MulticastConn interface { + MulticastHopLimit() (int, error) + SetMulticastHopLimit(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) { + const hoplim = 255 + if err := c.SetMulticastHopLimit(hoplim); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastHopLimit(); err != nil { + t.Error(err) + return + } else if v != hoplim { + t.Errorf("got %v; want %v", v, hoplim) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 0000000..a8197f1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 0000000..4ee4b06 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,35 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go new file mode 100644 index 0000000..fdc6c39 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go new file mode 100644 index 0000000..8f6d02e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 0000000..99a4354 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go new file mode 100644 index 0000000..c11d92a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go @@ -0,0 +1,242 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go new file mode 100644 index 0000000..e2fd733 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go @@ -0,0 +1,373 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv6.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv6.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv6.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + b := ms[0].Buffers[0][:ms[0].N] + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv6.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go new file mode 100644 index 0000000..206b915 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_test.go @@ -0,0 +1,148 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv6UDP", func(b *testing.B) { + p := ipv6.NewPacketConn(c) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 0000000..cc3907d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 0000000..0eac86e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errOpNoSupport + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 0000000..1f4a273 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go new file mode 100644 index 0000000..774338d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_test.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var supportsIPv6 bool = nettest.SupportsIPv6() + +func TestConnInitiatorPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestConnResponderPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go connector(t, "tcp6", ln.Addr().String(), done) + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestPacketConnChecksum(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + offset := 12 // see RFC 5340 + + for _, toggle := range []bool{false, true} { + if err := p.SetChecksum(toggle, offset); err != nil { + if toggle { + t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } else { + // Some platforms never allow to disable the kernel + // checksum processing. + t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } + } + if on, offset, err := p.Checksum(); err != nil { + t.Fatal(err) + } else { + t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 0000000..b0510c0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 0000000..eece961 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 0000000..b2dbcb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 0000000..676bea5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 0000000..e416eaa --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 0000000..e3d0443 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass} + ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit} + ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo} + ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop} + ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU} + sockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}} + sockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}} + sockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}} + sockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}} + sockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}} + sockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 0000000..e9349dc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 0000000..bc21810 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 0000000..d348b5f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 0000000..add8ccc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 0000000..581ee49 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 0000000..b845388 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 0000000..fc36b01 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go new file mode 100644 index 0000000..a0b7d95 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicast_test.go @@ -0,0 +1,184 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveIPAddr("ip6", "::1") + if err != nil { + t.Fatal(err) + } + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to modify + // ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to disable the + // kernel checksum processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go new file mode 100644 index 0000000..e175dcc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp6", "", "[::1]:0"}, + {"ip6", ":ipv6-icmp", "::1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewPacketConn(c)) + } +} + +type testIPv6UnicastConn interface { + TrafficClass() (int, error) + SetTrafficClass(int) error + HopLimit() (int, error) + SetHopLimit(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) { + tclass := iana.DiffServCS0 | iana.NotECNTransport + if err := c.SetTrafficClass(tclass); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_TCLASS option + t.Logf("not supported on %s", runtime.GOOS) + goto next + } + t.Fatal(err) + } + if v, err := c.TrafficClass(); err != nil { + t.Fatal(err) + } else if v != tclass { + t.Fatalf("got %v; want %v", v, tclass) + } + +next: + hoplim := 255 + if err := c.SetHopLimit(hoplim); err != nil { + t.Fatal(err) + } + if v, err := c.HopLimit(); err != nil { + t.Fatal(err) + } else if v != hoplim { + t.Fatalf("got %v; want %v", v, hoplim) + } +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 0000000..6aab1df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 0000000..d2de804 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 0000000..919e572 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 0000000..cb8141f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 0000000..cb8141f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 0000000..c9bf6a8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 0000000..bcada13 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 0000000..86cf3c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 0000000..cf1837d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 0000000..20f2b89 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/vendor/golang.org/x/net/lex/httplex/httplex_test.go new file mode 100644 index 0000000..f47adc9 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex_test.go @@ -0,0 +1,119 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httplex + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if IsTokenRune(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} + +func TestHeaderValuesContainsToken(t *testing.T) { + tests := []struct { + vals []string + token string + want bool + }{ + { + vals: []string{"foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"bar", "foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo"}, + token: "bar", + want: false, + }, + { + vals: []string{" foo "}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar,foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo ,bar "}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar, foo ,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + } + for _, tt := range tests { + got := HeaderValuesContainsToken(tt.vals, tt.token) + if got != tt.want { + t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want) + } + } +} + +func TestPunycodeHostPort(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.google.com", "www.google.com"}, + {"гофер.рф", "xn--c1ae0ajs.xn--p1ai"}, + {"bücher.de", "xn--bcher-kva.de"}, + {"bücher.de:8080", "xn--bcher-kva.de:8080"}, + {"[1::6]:8080", "[1::6]:8080"}, + } + for _, tt := range tests { + got, err := PunycodeHostPort(tt.in) + if tt.want != got || err != nil { + t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/lif/address.go b/vendor/golang.org/x/net/lif/address.go new file mode 100644 index 0000000..afb957f --- /dev/null +++ b/vendor/golang.org/x/net/lif/address.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "errors" + "unsafe" +) + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address + PrefixLen int // address prefix length +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + PrefixLen int // address prefix length + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +// Addrs returns a list of interface addresses. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Addrs(af int, name string) ([]Addr, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + lls, err := links(eps, name) + if len(lls) == 0 { + return nil, err + } + var as []Addr + for _, ll := range lls { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + for _, ep := range eps { + ioc := int64(sysSIOCGLIFADDR) + err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifr)) + if err != nil { + continue + } + sa := (*sockaddrStorage)(unsafe.Pointer(&lifr.Lifru[0])) + l := int(nativeEndian.Uint32(lifr.Lifru1[:4])) + if l == 0 { + continue + } + switch sa.Family { + case sysAF_INET: + a := &Inet4Addr{PrefixLen: l} + copy(a.IP[:], lifr.Lifru[4:8]) + as = append(as, a) + case sysAF_INET6: + a := &Inet6Addr{PrefixLen: l, ZoneID: int(nativeEndian.Uint32(lifr.Lifru[24:28]))} + copy(a.IP[:], lifr.Lifru[8:24]) + as = append(as, a) + } + } + } + return as, nil +} + +func parseLinkAddr(b []byte) ([]byte, error) { + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + l := 4 + nlen + alen + slen + if len(b) < l { + return nil, errors.New("invalid address") + } + b = b[4:] + var addr []byte + if nlen > 0 { + b = b[nlen:] + } + if alen > 0 { + addr = make([]byte, alen) + copy(addr, b[:alen]) + } + return addr, nil +} diff --git a/vendor/golang.org/x/net/lif/address_test.go b/vendor/golang.org/x/net/lif/address_test.go new file mode 100644 index 0000000..a25f10b --- /dev/null +++ b/vendor/golang.org/x/net/lif/address_test.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%s %s %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%s %s %d %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen, a.ZoneID) +} + +type addrPack struct { + af int + as []Addr +} + +func addrPacks() ([]addrPack, error) { + var lastErr error + var aps []addrPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + as, err := Addrs(af, "") + if err != nil { + lastErr = err + continue + } + aps = append(aps, addrPack{af: af, as: as}) + } + return aps, lastErr +} + +func TestAddrs(t *testing.T) { + aps, err := addrPacks() + if len(aps) == 0 && err != nil { + t.Fatal(err) + } + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, ll := range lp.lls { + as, err := Addrs(lp.af, ll.Name) + if err != nil { + t.Fatal(lp.af, ll.Name, err) + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), ll.Name, as) + n += len(as) + } + for _, ap := range aps { + if ap.af != lp.af { + continue + } + if n != len(ap.as) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(ap.as)) + continue + } + } + } +} diff --git a/vendor/golang.org/x/net/lif/binary.go b/vendor/golang.org/x/net/lif/binary.go new file mode 100644 index 0000000..738a94f --- /dev/null +++ b/vendor/golang.org/x/net/lif/binary.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +// This file contains duplicates of encoding/binary package. +// +// This package is supposed to be used by the net package of standard +// library. Therefore the package set used in the package must be the +// same as net package. + +var ( + littleEndian binaryLittleEndian + bigEndian binaryBigEndian +) + +type binaryByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) +} + +type binaryLittleEndian struct{} + +func (binaryLittleEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[0]) | uint16(b[1])<<8 +} + +func (binaryLittleEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (binaryLittleEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (binaryBigEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} diff --git a/vendor/golang.org/x/net/lif/defs_solaris.go b/vendor/golang.org/x/net/lif/defs_solaris.go new file mode 100644 index 0000000..02c1998 --- /dev/null +++ b/vendor/golang.org/x/net/lif/defs_solaris.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package lif + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_DGRAM = C.SOCK_DGRAM +) + +type sockaddrStorage C.struct_sockaddr_storage + +const ( + sysLIFC_NOXMIT = C.LIFC_NOXMIT + sysLIFC_EXTERNAL_SOURCE = C.LIFC_EXTERNAL_SOURCE + sysLIFC_TEMPORARY = C.LIFC_TEMPORARY + sysLIFC_ALLZONES = C.LIFC_ALLZONES + sysLIFC_UNDER_IPMP = C.LIFC_UNDER_IPMP + sysLIFC_ENABLED = C.LIFC_ENABLED + + sysSIOCGLIFADDR = C.SIOCGLIFADDR + sysSIOCGLIFDSTADDR = C.SIOCGLIFDSTADDR + sysSIOCGLIFFLAGS = C.SIOCGLIFFLAGS + sysSIOCGLIFMTU = C.SIOCGLIFMTU + sysSIOCGLIFNETMASK = C.SIOCGLIFNETMASK + sysSIOCGLIFMETRIC = C.SIOCGLIFMETRIC + sysSIOCGLIFNUM = C.SIOCGLIFNUM + sysSIOCGLIFINDEX = C.SIOCGLIFINDEX + sysSIOCGLIFSUBNET = C.SIOCGLIFSUBNET + sysSIOCGLIFLNKINFO = C.SIOCGLIFLNKINFO + sysSIOCGLIFCONF = C.SIOCGLIFCONF + sysSIOCGLIFHWADDR = C.SIOCGLIFHWADDR +) + +const ( + sysIFF_UP = C.IFF_UP + sysIFF_BROADCAST = C.IFF_BROADCAST + sysIFF_DEBUG = C.IFF_DEBUG + sysIFF_LOOPBACK = C.IFF_LOOPBACK + sysIFF_POINTOPOINT = C.IFF_POINTOPOINT + sysIFF_NOTRAILERS = C.IFF_NOTRAILERS + sysIFF_RUNNING = C.IFF_RUNNING + sysIFF_NOARP = C.IFF_NOARP + sysIFF_PROMISC = C.IFF_PROMISC + sysIFF_ALLMULTI = C.IFF_ALLMULTI + sysIFF_INTELLIGENT = C.IFF_INTELLIGENT + sysIFF_MULTICAST = C.IFF_MULTICAST + sysIFF_MULTI_BCAST = C.IFF_MULTI_BCAST + sysIFF_UNNUMBERED = C.IFF_UNNUMBERED + sysIFF_PRIVATE = C.IFF_PRIVATE +) + +const ( + sizeofLifnum = C.sizeof_struct_lifnum + sizeofLifreq = C.sizeof_struct_lifreq + sizeofLifconf = C.sizeof_struct_lifconf + sizeofLifIfinfoReq = C.sizeof_struct_lif_ifinfo_req +) + +type lifnum C.struct_lifnum + +type lifreq C.struct_lifreq + +type lifconf C.struct_lifconf + +type lifIfinfoReq C.struct_lif_ifinfo_req + +const ( + sysIFT_IPV4 = C.IFT_IPV4 + sysIFT_IPV6 = C.IFT_IPV6 + sysIFT_6TO4 = C.IFT_6TO4 +) diff --git a/vendor/golang.org/x/net/lif/lif.go b/vendor/golang.org/x/net/lif/lif.go new file mode 100644 index 0000000..6e81f81 --- /dev/null +++ b/vendor/golang.org/x/net/lif/lif.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +// Package lif provides basic functions for the manipulation of +// logical network interfaces and interface addresses on Solaris. +// +// The package supports Solaris 11 or above. +package lif + +import "syscall" + +type endpoint struct { + af int + s uintptr +} + +func (ep *endpoint) close() error { + return syscall.Close(int(ep.s)) +} + +func newEndpoints(af int) ([]endpoint, error) { + var lastErr error + var eps []endpoint + afs := []int{sysAF_INET, sysAF_INET6} + if af != sysAF_UNSPEC { + afs = []int{af} + } + for _, af := range afs { + s, err := syscall.Socket(af, sysSOCK_DGRAM, 0) + if err != nil { + lastErr = err + continue + } + eps = append(eps, endpoint{af: af, s: uintptr(s)}) + } + if len(eps) == 0 { + return nil, lastErr + } + return eps, nil +} diff --git a/vendor/golang.org/x/net/lif/link.go b/vendor/golang.org/x/net/lif/link.go new file mode 100644 index 0000000..913a53e --- /dev/null +++ b/vendor/golang.org/x/net/lif/link.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +// A Link represents logical data link information. +// +// It also represents base information for logical network interface. +// On Solaris, each logical network interface represents network layer +// adjacency information and the interface has a only single network +// address or address pair for tunneling. It's usual that multiple +// logical network interfaces share the same logical data link. +type Link struct { + Name string // name, equivalent to IP interface name + Index int // index, equivalent to IP interface index + Type int // type + Flags int // flags + MTU int // maximum transmission unit, basically link MTU but may differ between IP address families + Addr []byte // address +} + +func (ll *Link) fetch(s uintptr) { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + ioc := int64(sysSIOCGLIFINDEX) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Index = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + ioc = int64(sysSIOCGLIFFLAGS) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Flags = int(nativeEndian.Uint64(lifr.Lifru[:8])) + } + ioc = int64(sysSIOCGLIFMTU) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.MTU = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + switch ll.Type { + case sysIFT_IPV4, sysIFT_IPV6, sysIFT_6TO4: + default: + ioc = int64(sysSIOCGLIFHWADDR) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Addr, _ = parseLinkAddr(lifr.Lifru[4:]) + } + } +} + +// Links returns a list of logical data links. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Links(af int, name string) ([]Link, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + return links(eps, name) +} + +func links(eps []endpoint, name string) ([]Link, error) { + var lls []Link + lifn := lifnum{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + lifc := lifconf{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + for _, ep := range eps { + lifn.Family = uint16(ep.af) + ioc := int64(sysSIOCGLIFNUM) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifn)); err != nil { + continue + } + if lifn.Count == 0 { + continue + } + b := make([]byte, lifn.Count*sizeofLifreq) + lifc.Family = uint16(ep.af) + lifc.Len = lifn.Count * sizeofLifreq + if len(lifc.Lifcu) == 8 { + nativeEndian.PutUint64(lifc.Lifcu[:], uint64(uintptr(unsafe.Pointer(&b[0])))) + } else { + nativeEndian.PutUint32(lifc.Lifcu[:], uint32(uintptr(unsafe.Pointer(&b[0])))) + } + ioc = int64(sysSIOCGLIFCONF) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifc)); err != nil { + continue + } + nb := make([]byte, 32) // see LIFNAMSIZ in net/if.h + for i := 0; i < int(lifn.Count); i++ { + lifr := (*lifreq)(unsafe.Pointer(&b[i*sizeofLifreq])) + for i := 0; i < 32; i++ { + if lifr.Name[i] == 0 { + nb = nb[:i] + break + } + nb[i] = byte(lifr.Name[i]) + } + llname := string(nb) + nb = nb[:32] + if isDupLink(lls, llname) || name != "" && name != llname { + continue + } + ll := Link{Name: llname, Type: int(lifr.Type)} + ll.fetch(ep.s) + lls = append(lls, ll) + } + } + return lls, nil +} + +func isDupLink(lls []Link, name string) bool { + for _, ll := range lls { + if ll.Name == name { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/lif/link_test.go b/vendor/golang.org/x/net/lif/link_test.go new file mode 100644 index 0000000..0cb9b95 --- /dev/null +++ b/vendor/golang.org/x/net/lif/link_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +func (ll *Link) String() string { + return fmt.Sprintf("name=%s index=%d type=%d flags=%#x mtu=%d addr=%v", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr)) +} + +type linkPack struct { + af int + lls []Link +} + +func linkPacks() ([]linkPack, error) { + var lastErr error + var lps []linkPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + lls, err := Links(af, "") + if err != nil { + lastErr = err + continue + } + lps = append(lps, linkPack{af: af, lls: lls}) + } + return lps, lastErr +} + +func TestLinks(t *testing.T) { + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, sll := range lp.lls { + lls, err := Links(lp.af, sll.Name) + if err != nil { + t.Fatal(lp.af, sll.Name, err) + } + for _, ll := range lls { + if ll.Name != sll.Name || ll.Index != sll.Index { + t.Errorf("af=%s got %v; want %v", addrFamily(lp.af), &ll, &sll) + continue + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), sll.Name, &ll) + n++ + } + } + if n != len(lp.lls) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(lp.lls)) + continue + } + } +} diff --git a/vendor/golang.org/x/net/lif/sys.go b/vendor/golang.org/x/net/lif/sys.go new file mode 100644 index 0000000..c896041 --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +var nativeEndian binaryByteOrder + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } +} diff --git a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s new file mode 100644 index 0000000..39d76af --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s @@ -0,0 +1,8 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) diff --git a/vendor/golang.org/x/net/lif/syscall.go b/vendor/golang.org/x/net/lif/syscall.go new file mode 100644 index 0000000..aadab2e --- /dev/null +++ b/vendor/golang.org/x/net/lif/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +//go:linkname procIoctl libc_ioctl + +var procIoctl uintptr + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go new file mode 100644 index 0000000..b5e999b --- /dev/null +++ b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go @@ -0,0 +1,103 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package lif + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_DGRAM = 0x1 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +const ( + sysLIFC_NOXMIT = 0x1 + sysLIFC_EXTERNAL_SOURCE = 0x2 + sysLIFC_TEMPORARY = 0x4 + sysLIFC_ALLZONES = 0x8 + sysLIFC_UNDER_IPMP = 0x10 + sysLIFC_ENABLED = 0x20 + + sysSIOCGLIFADDR = -0x3f87968f + sysSIOCGLIFDSTADDR = -0x3f87968d + sysSIOCGLIFFLAGS = -0x3f87968b + sysSIOCGLIFMTU = -0x3f879686 + sysSIOCGLIFNETMASK = -0x3f879683 + sysSIOCGLIFMETRIC = -0x3f879681 + sysSIOCGLIFNUM = -0x3ff3967e + sysSIOCGLIFINDEX = -0x3f87967b + sysSIOCGLIFSUBNET = -0x3f879676 + sysSIOCGLIFLNKINFO = -0x3f879674 + sysSIOCGLIFCONF = -0x3fef965b + sysSIOCGLIFHWADDR = -0x3f879640 +) + +const ( + sysIFF_UP = 0x1 + sysIFF_BROADCAST = 0x2 + sysIFF_DEBUG = 0x4 + sysIFF_LOOPBACK = 0x8 + sysIFF_POINTOPOINT = 0x10 + sysIFF_NOTRAILERS = 0x20 + sysIFF_RUNNING = 0x40 + sysIFF_NOARP = 0x80 + sysIFF_PROMISC = 0x100 + sysIFF_ALLMULTI = 0x200 + sysIFF_INTELLIGENT = 0x400 + sysIFF_MULTICAST = 0x800 + sysIFF_MULTI_BCAST = 0x1000 + sysIFF_UNNUMBERED = 0x2000 + sysIFF_PRIVATE = 0x8000 +) + +const ( + sizeofLifnum = 0xc + sizeofLifreq = 0x178 + sizeofLifconf = 0x18 + sizeofLifIfinfoReq = 0x10 +) + +type lifnum struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Count int32 +} + +type lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} + +type lifconf struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Len int32 + Pad_cgo_1 [4]byte + Lifcu [8]byte +} + +type lifIfinfoReq struct { + Maxhops uint8 + Pad_cgo_0 [3]byte + Reachtime uint32 + Reachretrans uint32 + Maxmtu uint32 +} + +const ( + sysIFT_IPV4 = 0xc8 + sysIFT_IPV6 = 0xc9 + sysIFT_6TO4 = 0xca +) diff --git a/vendor/golang.org/x/net/nettest/conntest.go b/vendor/golang.org/x/net/nettest/conntest.go new file mode 100644 index 0000000..5bd3a8c --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest.go @@ -0,0 +1,456 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" + "math/rand" + "net" + "runtime" + "sync" + "testing" + "time" +) + +var ( + aLongTimeAgo = time.Unix(233431200, 0) + neverTimeout = time.Time{} +) + +// MakePipe creates a connection between two endpoints and returns the pair +// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa. +// The stop function closes all resources, including c1, c2, and the underlying +// net.Listener (if there is one), and should not be nil. +type MakePipe func() (c1, c2 net.Conn, stop func(), err error) + +// TestConn tests that a net.Conn implementation properly satisfies the interface. +// The tests should not produce any false positives, but may experience +// false negatives. Thus, some issues may only be detected when the test is +// run multiple times. For maximal effectiveness, run the tests under the +// race detector. +func TestConn(t *testing.T, mp MakePipe) { + testConn(t, mp) +} + +type connTester func(t *testing.T, c1, c2 net.Conn) + +func timeoutWrapper(t *testing.T, mp MakePipe, f connTester) { + c1, c2, stop, err := mp() + if err != nil { + t.Fatalf("unable to make pipe: %v", err) + } + var once sync.Once + defer once.Do(func() { stop() }) + timer := time.AfterFunc(time.Minute, func() { + once.Do(func() { + t.Error("test timed out; terminating pipe") + stop() + }) + }) + defer timer.Stop() + f(t, c1, c2) +} + +// testBasicIO tests that the data sent on c1 is properly received on c2. +func testBasicIO(t *testing.T, c1, c2 net.Conn) { + want := make([]byte, 1<<20) + rand.New(rand.NewSource(0)).Read(want) + + dataCh := make(chan []byte) + go func() { + rd := bytes.NewReader(want) + if err := chunkedCopy(c1, rd); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } + if err := c1.Close(); err != nil { + t.Errorf("unexpected c1.Close error: %v", err) + } + }() + + go func() { + wr := new(bytes.Buffer) + if err := chunkedCopy(wr, c2); err != nil { + t.Errorf("unexpected c2.Read error: %v", err) + } + if err := c2.Close(); err != nil { + t.Errorf("unexpected c2.Close error: %v", err) + } + dataCh <- wr.Bytes() + }() + + if got := <-dataCh; !bytes.Equal(got, want) { + t.Errorf("transmitted data differs") + } +} + +// testPingPong tests that the two endpoints can synchronously send data to +// each other in a typical request-response pattern. +func testPingPong(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + + pingPonger := func(c net.Conn) { + defer wg.Done() + buf := make([]byte, 8) + var prev uint64 + for { + if _, err := io.ReadFull(c, buf); err != nil { + if err == io.EOF { + break + } + t.Errorf("unexpected Read error: %v", err) + } + + v := binary.LittleEndian.Uint64(buf) + binary.LittleEndian.PutUint64(buf, v+1) + if prev != 0 && prev+2 != v { + t.Errorf("mismatching value: got %d, want %d", v, prev+2) + } + prev = v + if v == 1000 { + break + } + + if _, err := c.Write(buf); err != nil { + t.Errorf("unexpected Write error: %v", err) + break + } + } + if err := c.Close(); err != nil { + t.Errorf("unexpected Close error: %v", err) + } + } + + wg.Add(2) + go pingPonger(c1) + go pingPonger(c2) + + // Start off the chain reaction. + if _, err := c1.Write(make([]byte, 8)); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } +} + +// testRacyRead tests that it is safe to mutate the input Read buffer +// immediately after cancelation has occurred. +func testRacyRead(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Read(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testRacyWrite tests that it is safe to mutate the input Write buffer +// immediately after cancelation has occurred. +func testRacyWrite(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Write(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testReadTimeout tests that Read timeouts do not affect Write. +func testReadTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + c1.SetReadDeadline(aLongTimeAgo) + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Write(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// testWriteTimeout tests that Write timeouts do not affect Read. +func testWriteTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + c1.SetWriteDeadline(aLongTimeAgo) + _, err := c1.Write(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Read(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Read error: %v", err) + } +} + +// testPastTimeout tests that a deadline set in the past immediately times out +// Read and Write requests. +func testPastTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + testRoundtrip(t, c1) + + c1.SetDeadline(aLongTimeAgo) + n, err := c1.Write(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Write count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + n, err = c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + + testRoundtrip(t, c1) +} + +// testPresentTimeout tests that a deadline set while there are pending +// Read and Write operations immediately times out those operations. +func testPresentTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + deadlineSet := make(chan bool, 1) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + deadlineSet <- true + c1.SetReadDeadline(aLongTimeAgo) + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + n, err := c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Read timed out before deadline is set") + } + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Write timed out before deadline is set") + } + }() +} + +// testFutureTimeout tests that a future deadline will eventually time out +// Read and Write operations. +func testFutureTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + wg.Add(2) + + c1.SetDeadline(time.Now().Add(100 * time.Millisecond)) + go func() { + defer wg.Done() + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + }() + wg.Wait() + + go chunkedCopy(c2, c2) + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// testCloseTimeout tests that calling Close immediately times out pending +// Read and Write operations. +func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + // Test for cancelation upon connection closure. + c1.SetDeadline(neverTimeout) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + c1.Close() + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Read(buf) + } + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Write(buf) + } + }() +} + +// testConcurrentMethods tests that the methods of net.Conn can safely +// be called concurrently. +func testConcurrentMethods(t *testing.T, c1, c2 net.Conn) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/20489") + } + go chunkedCopy(c2, c2) + + // The results of the calls may be nonsensical, but this should + // not trigger a race detector warning. + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(7) + go func() { + defer wg.Done() + c1.Read(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.Write(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.SetDeadline(time.Now().Add(10 * time.Millisecond)) + }() + go func() { + defer wg.Done() + c1.SetReadDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.LocalAddr() + }() + go func() { + defer wg.Done() + c1.RemoteAddr() + }() + } + wg.Wait() // At worst, the deadline is set 10ms into the future + + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// checkForTimeoutError checks that the error satisfies the Error interface +// and that Timeout returns true. +func checkForTimeoutError(t *testing.T, err error) { + if nerr, ok := err.(net.Error); ok { + if !nerr.Timeout() { + t.Errorf("err.Timeout() = false, want true") + } + } else { + t.Errorf("got %T, want net.Error", err) + } +} + +// testRoundtrip writes something into c and reads it back. +// It assumes that everything written into c is echoed back to itself. +func testRoundtrip(t *testing.T, c net.Conn) { + if err := c.SetDeadline(neverTimeout); err != nil { + t.Errorf("roundtrip SetDeadline error: %v", err) + } + + const s = "Hello, world!" + buf := []byte(s) + if _, err := c.Write(buf); err != nil { + t.Errorf("roundtrip Write error: %v", err) + } + if _, err := io.ReadFull(c, buf); err != nil { + t.Errorf("roundtrip Read error: %v", err) + } + if string(buf) != s { + t.Errorf("roundtrip data mismatch: got %q, want %q", buf, s) + } +} + +// resyncConn resynchronizes the connection into a sane state. +// It assumes that everything written into c is echoed back to itself. +// It assumes that 0xff is not currently on the wire or in the read buffer. +func resyncConn(t *testing.T, c net.Conn) { + c.SetDeadline(neverTimeout) + errCh := make(chan error) + go func() { + _, err := c.Write([]byte{0xff}) + errCh <- err + }() + buf := make([]byte, 1024) + for { + n, err := c.Read(buf) + if n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 { + break + } + if err != nil { + t.Errorf("unexpected Read error: %v", err) + break + } + } + if err := <-errCh; err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// chunkedCopy copies from r to w in fixed-width chunks to avoid +// causing a Write that exceeds the maximum packet size for packet-based +// connections like "unixpacket". +// We assume that the maximum packet size is at least 1024. +func chunkedCopy(w io.Writer, r io.Reader) error { + b := make([]byte, 1024) + _, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b) + return err +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go16.go b/vendor/golang.org/x/net/nettest/conntest_go16.go new file mode 100644 index 0000000..4cbf48e --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go16.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Avoid using subtests on Go 1.6 and below. + timeoutWrapper(t, mp, testBasicIO) + timeoutWrapper(t, mp, testPingPong) + timeoutWrapper(t, mp, testRacyRead) + timeoutWrapper(t, mp, testRacyWrite) + timeoutWrapper(t, mp, testReadTimeout) + timeoutWrapper(t, mp, testWriteTimeout) + timeoutWrapper(t, mp, testPastTimeout) + timeoutWrapper(t, mp, testPresentTimeout) + timeoutWrapper(t, mp, testFutureTimeout) + timeoutWrapper(t, mp, testCloseTimeout) + timeoutWrapper(t, mp, testConcurrentMethods) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go17.go b/vendor/golang.org/x/net/nettest/conntest_go17.go new file mode 100644 index 0000000..fa039f0 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go17.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Use subtests on Go 1.7 and above since it is better organized. + t.Run("BasicIO", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) }) + t.Run("PingPong", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) }) + t.Run("RacyRead", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) }) + t.Run("RacyWrite", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) }) + t.Run("ReadTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) }) + t.Run("WriteTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) }) + t.Run("PastTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) }) + t.Run("PresentTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) }) + t.Run("FutureTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) }) + t.Run("CloseTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) }) + t.Run("ConcurrentMethods", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) }) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_test.go b/vendor/golang.org/x/net/nettest/conntest_test.go new file mode 100644 index 0000000..9f9453f --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_test.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package nettest + +import ( + "net" + "os" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" +) + +func TestTestConn(t *testing.T) { + tests := []struct{ name, network string }{ + {"TCP", "tcp"}, + {"UnixPipe", "unix"}, + {"UnixPacketPipe", "unixpacket"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if !nettest.TestableNetwork(tt.network) { + t.Skipf("not supported on %s", runtime.GOOS) + } + + mp := func() (c1, c2 net.Conn, stop func(), err error) { + ln, err := nettest.NewLocalListener(tt.network) + if err != nil { + return nil, nil, nil, err + } + + // Start a connection between two endpoints. + var err1, err2 error + done := make(chan bool) + go func() { + c2, err2 = ln.Accept() + close(done) + }() + c1, err1 = net.Dial(ln.Addr().Network(), ln.Addr().String()) + <-done + + stop = func() { + if err1 == nil { + c1.Close() + } + if err2 == nil { + c2.Close() + } + ln.Close() + switch tt.network { + case "unix", "unixpacket": + os.Remove(ln.Addr().String()) + } + } + + switch { + case err1 != nil: + stop() + return nil, nil, nil, err1 + case err2 != nil: + stop() + return nil, nil, nil, err2 + default: + return c1, c2, stop, nil + } + } + + TestConn(t, mp) + }) + } +} diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go new file mode 100644 index 0000000..56f43bf --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package netutil // import "golang.org/x/net/netutil" + +import ( + "net" + "sync" +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go new file mode 100644 index 0000000..5e07d7b --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen_test.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package netutil + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/internal/nettest" +) + +func TestLimitListener(t *testing.T) { + const max = 5 + attempts := (nettest.MaxOpenFiles() - max) / 2 + if attempts > 256 { // maximum length of accept queue is 128 by default + attempts = 256 + } + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + l = LimitListener(l, max) + + var open int32 + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > max { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + })) + + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Log(err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the kernel's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +type errorListener struct { + net.Listener +} + +func (errorListener) Accept() (net.Conn, error) { + return nil, errFake +} + +var errFake = errors.New("fake error from errorListener") + +// This used to hang. +func TestLimitListenerError(t *testing.T) { + donec := make(chan bool, 1) + go func() { + const n = 2 + ll := LimitListener(errorListener{}, n) + for i := 0; i < n+1; i++ { + _, err := ll.Accept() + if err != errFake { + t.Fatalf("Accept error = %v; want errFake", err) + } + } + donec <- true + }() + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout. deadlock?") + } +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 0000000..4c5ad88 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 0000000..0689bb6 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go new file mode 100644 index 0000000..a7d8095 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host_test.go @@ -0,0 +1,55 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "net" + "reflect" + "testing" +) + +type recordingProxy struct { + addrs []string +} + +func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { + r.addrs = append(r.addrs, addr) + return nil, errors.New("recordingProxy") +} + +func TestPerHost(t *testing.T) { + var def, bypass recordingProxy + perHost := NewPerHost(&def, &bypass) + perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") + + expectedDef := []string{ + "example.com:123", + "1.2.3.4:123", + "[1001::]:123", + } + expectedBypass := []string{ + "localhost:123", + "zone:123", + "foo.zone:123", + "127.0.0.1:123", + "10.1.2.3:123", + "[1000::]:123", + } + + for _, addr := range expectedDef { + perHost.Dial("tcp", addr) + } + for _, addr := range expectedBypass { + perHost.Dial("tcp", addr) + } + + if !reflect.DeepEqual(expectedDef, def.addrs) { + t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) + } + if !reflect.DeepEqual(expectedBypass, bypass.addrs) { + t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) + } +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 0000000..553ead7 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,134 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go new file mode 100644 index 0000000..0f31e21 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy_test.go @@ -0,0 +1,215 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "bytes" + "fmt" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" + "testing" +) + +type proxyFromEnvTest struct { + allProxyEnv string + noProxyEnv string + wantTypeOf Dialer +} + +func (t proxyFromEnvTest) String() string { + var buf bytes.Buffer + space := func() { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + } + if t.allProxyEnv != "" { + fmt.Fprintf(&buf, "all_proxy=%q", t.allProxyEnv) + } + if t.noProxyEnv != "" { + space() + fmt.Fprintf(&buf, "no_proxy=%q", t.noProxyEnv) + } + return strings.TrimSpace(buf.String()) +} + +func TestFromEnvironment(t *testing.T) { + ResetProxyEnv() + + type dummyDialer struct { + direct + } + + RegisterDialerType("irc", func(_ *url.URL, _ Dialer) (Dialer, error) { + return dummyDialer{}, nil + }) + + proxyFromEnvTests := []proxyFromEnvTest{ + {allProxyEnv: "127.0.0.1:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "ftp://example.com:8000", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "socks5://example.com:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: &PerHost{}}, + {allProxyEnv: "irc://example.com:8000", wantTypeOf: dummyDialer{}}, + {noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {wantTypeOf: direct{}}, + } + + for _, tt := range proxyFromEnvTests { + os.Setenv("ALL_PROXY", tt.allProxyEnv) + os.Setenv("NO_PROXY", tt.noProxyEnv) + ResetCachedEnvironment() + + d := FromEnvironment() + if got, want := fmt.Sprintf("%T", d), fmt.Sprintf("%T", tt.wantTypeOf); got != want { + t.Errorf("%v: got type = %T, want %T", tt, d, tt.wantTypeOf) + } + } +} + +func TestFromURL(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg) + + url, err := url.Parse("socks5://user:password@" + gateway.Addr().String()) + if err != nil { + t.Fatalf("url.Parse failed: %v", err) + } + proxy, err := FromURL(url, Direct) + if err != nil { + t.Fatalf("FromURL failed: %v", err) + } + _, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Fatalf("net.SplitHostPort failed: %v", err) + } + if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil { + t.Fatalf("FromURL.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func TestSOCKS5(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg) + + proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct) + if err != nil { + t.Fatalf("SOCKS5 failed: %v", err) + } + if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil { + t.Fatalf("SOCKS5.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) { + defer wg.Done() + + c, err := gateway.Accept() + if err != nil { + t.Errorf("net.Listener.Accept failed: %v", err) + return + } + defer c.Close() + + b := make([]byte, 32) + var n int + if typ == socks5Domain { + n = 4 + } else { + n = 3 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } + if typ == socks5Domain { + n = 16 + } else { + n = 10 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ { + t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3]) + return + } + if typ == socks5Domain { + copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9}) + b = append(b, []byte("localhost")...) + } else { + copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4}) + } + host, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Errorf("net.SplitHostPort failed: %v", err) + return + } + b = append(b, []byte(net.ParseIP(host).To4())...) + p, err := strconv.Atoi(port) + if err != nil { + t.Errorf("strconv.Atoi failed: %v", err) + return + } + b = append(b, []byte{byte(p >> 8), byte(p)}...) + if _, err := c.Write(b); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } +} + +func ResetProxyEnv() { + for _, env := range []*envOnce{allProxyEnv, noProxyEnv} { + for _, v := range env.names { + os.Setenv(v, "") + } + } + ResetCachedEnvironment() +} + +func ResetCachedEnvironment() { + allProxyEnv.reset() + noProxyEnv.reset() +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 0000000..3fed38e --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,214 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "io" + "net" + "strconv" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { + s := &socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type socks5 struct { + user, password string + network, addr string + forward Dialer +} + +const socks5Version = 5 + +const ( + socks5AuthNone = 0 + socks5AuthPassword = 2 +) + +const socks5Connect = 1 + +const ( + socks5IP4 = 1 + socks5Domain = 3 + socks5IP6 = 4 +) + +var socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, socks5IP4) + ip = ip4 + } else { + buf = append(buf, socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(socks5Errors) { + failure = socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case socks5IP4: + bytesToDiscard = net.IPv4len + case socks5IP6: + bytesToDiscard = net.IPv6len + case socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 0000000..f85a3c3 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 0000000..8bbf3bc --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1< len(b[j]) +} + +// eTLDPlusOneTestCases come from +// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt +var eTLDPlusOneTestCases = []struct { + domain, want string +}{ + // Empty input. + {"", ""}, + // Unlisted TLD. + {"example", ""}, + {"example.example", "example.example"}, + {"b.example.example", "example.example"}, + {"a.b.example.example", "example.example"}, + // TLD with only 1 rule. + {"biz", ""}, + {"domain.biz", "domain.biz"}, + {"b.domain.biz", "domain.biz"}, + {"a.b.domain.biz", "domain.biz"}, + // TLD with some 2-level rules. + {"com", ""}, + {"example.com", "example.com"}, + {"b.example.com", "example.com"}, + {"a.b.example.com", "example.com"}, + {"uk.com", ""}, + {"example.uk.com", "example.uk.com"}, + {"b.example.uk.com", "example.uk.com"}, + {"a.b.example.uk.com", "example.uk.com"}, + {"test.ac", "test.ac"}, + // TLD with only 1 (wildcard) rule. + {"mm", ""}, + {"c.mm", ""}, + {"b.c.mm", "b.c.mm"}, + {"a.b.c.mm", "b.c.mm"}, + // More complex TLD. + {"jp", ""}, + {"test.jp", "test.jp"}, + {"www.test.jp", "test.jp"}, + {"ac.jp", ""}, + {"test.ac.jp", "test.ac.jp"}, + {"www.test.ac.jp", "test.ac.jp"}, + {"kyoto.jp", ""}, + {"test.kyoto.jp", "test.kyoto.jp"}, + {"ide.kyoto.jp", ""}, + {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"c.kobe.jp", ""}, + {"b.c.kobe.jp", "b.c.kobe.jp"}, + {"a.b.c.kobe.jp", "b.c.kobe.jp"}, + {"city.kobe.jp", "city.kobe.jp"}, + {"www.city.kobe.jp", "city.kobe.jp"}, + // TLD with a wildcard rule and exceptions. + {"ck", ""}, + {"test.ck", ""}, + {"b.test.ck", "b.test.ck"}, + {"a.b.test.ck", "b.test.ck"}, + {"www.ck", "www.ck"}, + {"www.www.ck", "www.ck"}, + // US K12. + {"us", ""}, + {"test.us", "test.us"}, + {"www.test.us", "test.us"}, + {"ak.us", ""}, + {"test.ak.us", "test.ak.us"}, + {"www.test.ak.us", "test.ak.us"}, + {"k12.ak.us", ""}, + {"test.k12.ak.us", "test.k12.ak.us"}, + {"www.test.k12.ak.us", "test.k12.ak.us"}, + // Punycoded IDN labels + {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, + {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, + {"xn--55qx5d.cn", ""}, + {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, + {"xn--fiqs8s", ""}, +} + +func TestEffectiveTLDPlusOne(t *testing.T) { + for _, tc := range eTLDPlusOneTestCases { + got, _ := EffectiveTLDPlusOne(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go new file mode 100644 index 0000000..a870b36 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -0,0 +1,9534 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = "publicsuffix.org's public_suffix_list.dat, git revision 0f3b07d9aab6d6c9fe74990af98316468d40f488 (2018-01-25T09:22:16Z)" + +const ( + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 +) + +// numTLD is the number of top level domains. +const numTLD = 1551 + +// Text is the combined text of all labels. +const text = "0emmafann-arboretumbriamallamaceiobihirosakikamijimatsuzaki234li" + + "ma-cityeatselinogradult3l3p0rtargets-itargivestbytomaritimekeepi" + + "ng120009guacuiababia-goracleaningroks-theatreeastcoastaldefencea" + + "tonsbergjemnes3-ap-northeast-1337bilbaogashimadachicagoboats3-we" + + "bsite-us-east-1billustrationikonanporovnopocznoppdalindesnes3-we" + + "bsite-us-west-1biobirdartcenterprisesakimobetsuitainairforcechir" + + "ealminamiechizeninohekinannestadiybirkenesoddtangenovaranzanpach" + + "igasakievennodesaarlandnpanasonicateringebuilderschmidtre-gaulda" + + "livornobirthplacebitballooningladefinimakanegasakinkobayashikaoi" + + "rminamifuranobjarkoybjerkreimbananarepublicasadelamonedatingjesd" + + "alimitediscountysvardolls3-eu-west-3utilitiesquare7bjugninomiyak" + + "onojorpelandrangedalombardiamonds3-website-us-west-2blancomedica" + + "ltanissettaipeiheijinuyamashinatsukigatakasagotpantheonsitebloom" + + "bergbauernuorochesterbloxcms5ybluedancebmoattachmentsakyotanabel" + + "lunord-aurdalvdalcesalangenirasakinvestmentsalondonetskarmoybmsa" + + "ltdalombardynamisches-dnsaludray-dnsupdaternopilawawebspacebmwed" + + "dinglassassinationalheritagebnpparibaselburgleezebnrwedeployboml" + + "oansalvadordalibabalsanagochihayaakasakawaharaholtalenvironmenta" + + "lconservationishiazainzais-a-candidatebondrayddnsfreebox-osascol" + + "i-picenordre-landraydnsalzburgliwicebonnishigobookinglobalashovh" + + "achinohedmarkarpaczeladzparaglidingloboavistaprintelligencebooml" + + "adbrokesamegawabootsamnangerboschaefflerdalwaysdatabaseballangen" + + "oamishirasatochigiessensiositelekommunikationishiharabostikaruiz" + + "awabostonakijinsekikogentinglogowegroweibolognagasukebotanicalga" + + "rdenishiizunazukis-a-catererbotanicgardenishikatakayamatsushigeb" + + "otanybouncemerckmsdnipropetrovskjervoyagebounty-fullensakerrypro" + + "pertiesampagespeedmobilizeroboutiquebecatholicaxiascolipicenodum" + + "inamiiselectjomemorialomzaporizhzheguris-a-celticsfanishikatsura" + + "git-repostfoldnavybozentsujiiebplacedekagaminord-odalondrinaples" + + "amsclubindalorenskogloppenzaolbia-tempio-olbiatempioolbialystokk" + + "embuchikumagayagawakuyabukihokumakogenglandrivelandrobaknoluokta" + + "chikawakkanaibetsubamericanfamilydscloudcontrolappspotagerbrandy" + + "winevalleybrasiliabrindisibenikebristoloseyouripirangapartmentsa" + + "msungmbhartiffanybritishcolumbialowiezachpomorskienishikawazukam" + + "itsuebroadcastlefrakkestadrudunsandvikcoromantovalle-d-aostathel" + + "lebroadwaybroke-itjxjavald-aostaplesanfranciscofreakunemurorange" + + "iseiyoichippubetsubetsugarugbyengerdalaskanittedallasalleasingle" + + "surancertmgretagajobojis-a-chefarmsteadupontariodejaneirodoybrok" + + "erbronnoysundurbanamexnetlifyis-a-conservativefsnillfjordurhambu" + + "rgminakamichiharabrothermesaverdeatnurembergmodellingmxn--0trq7p" + + "7nnishimerabrowsersafetymarketsangobrumunddalotenkawabrunelastic" + + "beanstalkarumaifarsundyndns-at-workinggrouparisor-fronishinomiya" + + "shironobrusselsanjotkmaxxn--11b4c3dyndns-blogdnsannanishinoomote" + + "gobruxellesannohelplfinancialottebryanskleppgafanquannefrankfurt" + + "ksatxn--12c1fe0bradescorporationishinoshimatsuurabrynewjerseybus" + + "kerudinewportlligatmparliamentoyosatoyonakagyokutoyokawabuzenish" + + "iokoppegardyndns-freeboxoslodingenishitosashimizunaminamibosognd" + + "alottokorozawabuzzweirbwfashionishiwakis-a-cpadualstackspace-to-" + + "rentalstomakomaibarabzhitomirumalatvuopmicrolightingrimstadyndns" + + "-homednsanokasaokaminokawanishiaizubangecommunitysnesardegnaroyc" + + "omobaracomparemarkerryhotelsardiniacompute-1computerhistoryofsci" + + "ence-fictioncomsecuritytacticsarlutskashiwazakiyosemitecondoshic" + + "hinohealth-carereformitakeharaconferenceconstructionconsuladohar" + + "uovatrani-andria-barletta-trani-andriaconsultanthropologyconsult" + + "ingvolluxembourgruecontactraniandriabarlettatraniandriacontagema" + + "tsubaracontemporaryarteducationalchikugojomedio-campidano-medioc" + + "ampidanomediocontractorskenconventureshinodearthdfcbankasukabedz" + + "in-the-bandaioiraseeklogest-mon-blogueurovisionionjukudoyamainte" + + "nancebetsuikidsmynasushiobarackmazerbaijan-mayenebakkeshibechamb" + + "agriculturennebudapest-a-la-masionthewifiat-band-campaniacooking" + + "channelsdvrdnsdojoetsuwanouchikujogaszczytnordlandyndns-weberlin" + + "colncoolkuszkolahppiacenzagancooperativano-frankivskodjeffersonc" + + "openhagencyclopedichernivtsiciliacorsicagliaribeiraokinawashiros" + + "atochiokinoshimaizuruhrcorvettemasekasumigaurawa-mazowszextraspa" + + "cekitagatajirissagamiharacosenzakopanerairguardiannakadomarinebr" + + "askaunjargalsaceocosidnsfor-better-thanawatchesarpsborguitarsaru" + + "futsunomiyawakasaikaitakoelncostumedizinhistorischesasayamacouch" + + "potatofriesasebofagecounciluxurycouponsaskatchewancoursesassaris" + + "-a-doctoraycq-acranbrookuwanalyticsaudacreditcardyndns-wikiracre" + + "ditunioncremonashgabadaddjaguarqhachiojiyahoooshikamaishimodatec" + + "rewhoswhokksundyndns-workisboringujoinvillewismillercricketrzync" + + "rimeast-kazakhstanangercrotonexus-3crownprovidercrsvparsauherady" + + "ndns1cruisesavannahgacryptonomichigangwoncuisinellair-traffic-co" + + "ntrolleyculturalcentertainmentranoycuneocupcakecuritibaghdadynns" + + "aves-the-whalessandria-trani-barletta-andriatranibarlettaandriac" + + "xn--12cfi8ixb8luzerncyberlevagangaviikanonjis-a-financialadvisor" + + "-aurdalvivanovodkamisatokashikiwakunigamiharufcfancymrussiacyona" + + "barulsandoycyoutheworkpccwiiheyakagefgushikamifuranorth-kazakhst" + + "anfhvalerfidonnakanotoddenfieldynvpnchernovtsykkylvenetogakushim" + + "otoganewyorkshirecipesaro-urbino-pesarourbinopesaromasvuotnakaiw" + + "amizawassamukawataricohdatsunanjoburgriwataraidyndns-iparmattele" + + "fonicapitalonewspaperfigueresinstagingxn--1ctwolominamatakkokami" + + "noyamaxunusualpersonfilateliafilegearfilminamimakis-a-geekaszuby" + + "finalfinancefineartscholarshipschoolfinlandyroyrvikingulenfinnoy" + + "firebaseappartis-a-greenfirenzefirestonefirmdaleirvikatowicefish" + + "ingolffanschulefitjarfitnessettlementransurlfjalerflesbergflickr" + + "agerotikakamigaharaflightschwarzgwangjuniperflirflogintohmalvika" + + "tsushikabeeldengeluidfloraflorencefloridavvesiidazaifudaigokasel" + + "jordfloripaderbornfloristanohatakahamamurogawaflorogerschweizflo" + + "wersciencecentersciencehistoryflynnhosting-clusterflynnhubarclay" + + "s3-sa-east-1fndfor-ourfor-someeresistancefor-theaterforexrothach" + + "irogatakamoriokalmykiaforgotdnscientistockholmestrandforli-cesen" + + "a-forlicesenaforlikescandynamic-dnscjohnsonforsaleitungsenforsan" + + "dasuoloftrapaniizafortalfortmissoulancashireggio-calabriafortwor" + + "thadanorthwesternmutualforuminamiminowafosnescotlandfotaruis-a-g" + + "urufoxfordebianfozorafredrikstadtvscrapper-sitefreeddnsgeekgalax" + + "yfreemasonryfreesitevadsochildrensgardenfreetlscrappingfreiburgf" + + "reightravelchannelfreseniuscountryestateofdelawarezzoologyfribou" + + "rgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-g" + + "iuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-gi" + + "uliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriuliv" + + "giuliafrlfroganscrysechirurgiens-dentistes-en-francefrognfroland" + + "from-akrehamnfrom-alfrom-arfrom-azfrom-canonoichinomiyakefrom-co" + + "dynaliasdaburfrom-ctravelersinsurancefrom-dchiryukyuragifuchungb" + + "ukharafrom-dedyn-ip24from-flanderservegame-serversicherungfrom-g" + + "ausdalfrom-higashiagatsumagoianiafrom-iafrom-idfrom-ilfrom-inche" + + "onfrom-kservehalflifestylefrom-kyowariasahikawafrom-lancasterfro" + + "m-mangonohejis-a-hard-workerfrom-mdfrom-meethnologyfrom-mifunefr" + + "om-mnfrom-modalenfrom-mservehttpartnerservehumourfrom-mtnfrom-nc" + + "hitachinakagawatchandclockashibatakashimarumorimachidafrom-ndfro" + + "m-nefrom-nh-servebbserveirchitosetogitsuliguriafrom-njaworznotog" + + "awafrom-nminamiogunicomcastresindeviceserveminecraftrdfrom-nv-in" + + "foodnetworkshoppingfrom-nyfrom-ohtawaramotoineppuboliviajessheim" + + "periafrom-oketohnoshooguyfrom-orfrom-padovaksdalfrom-pratohobby-" + + "sitexashorokanaiefrom-rivnefrom-schoenbrunnfrom-sdfrom-tnfrom-tx" + + "n--1lqs03nfrom-utazuerichardlillehammerfeste-ipartservemp3from-v" + + "al-daostavalleyfrom-vtrentino-a-adigefrom-wafrom-wielunnerfrom-w" + + "valled-aostatoilfrom-wyfrosinonefrostalowa-wolawafroyahikobeardu" + + "baiduckdnservep2partyfstavernfujiiderafujikawaguchikonefujiminok" + + "amoenairtelecitychyattorneyagawakeisenbahnfujinomiyadafujiokayam" + + "angyshlakasamatsudontexistmein-vigorgefujisatoshonairtrafficplex" + + "us-1fujisawafujishiroishidakabiratoridefensells-for-lesservepics" + + "ervequakefujitsurugashimaringatlantakaharufujixeroxn--1lqs71dfuj" + + "iyoshidafukayabeatservesarcasmatartanddesignfukuchiyamadafukudom" + + "inichocolatelevisionissedalouvreisenisshingugefukuis-a-hunterfuk" + + "umitsubishigakirovogradoyfukuokazakiryuohadselfipasadenaritakura" + + "shikis-a-knightpointtokamachintaifun-dnsaliasiafukuroishikarikat" + + "urindalfukusakisarazurewebsiteshikagamiishibukawafukuyamagatakah" + + "ashimamakishiwadafunabashiriuchinadafunagatakahatakaishimogosenf" + + "unahashikamiamakusatsumasendaisennangoodyearfundaciofuoiskujukur" + + "iyamaniwakuratextileksvikatsuyamarylandfuosskoczowildlifedorainf" + + "racloudcontrolledogawarabikomaezakirunore-og-uvdalfurnitureggio-" + + "emilia-romagnakatombetsumitakagiizefurubirafurudonostiaarpassage" + + "nservicesettsurgeonshalloffameloyalistjordalshalsenfurukawais-a-" + + "landscaperfusodegaurafussaikisofukushimannorfolkebiblelveruminam" + + "isanrikubetsupportrentino-aadigefutabayamaguchinomigawafutboldly" + + "goingnowhere-for-morenakatsugawafuttsurugiminamitanefuturecmseva" + + "stopolefuturehostingfuturemailingfvgfylkesbiblackfridayfyresdalh" + + "angoutsystemscloudfunctionsevenassisicilyhannanmokuizumodenakaya" + + "mapassenger-associationhannosegawahanyuzenhapmirhareidsbergenhar" + + "stadharvestcelebrationhasamarburghasaminami-alpssells-itrentino-" + + "altoadigehashbanghasudahasura-appatriahasvikazohatogayaitakanabe" + + "autysfjordhatoyamazakitakamiizumisanofidelityhatsukaichikaiseis-" + + "a-linux-useranishiaritabashijonawatehattfjelldalhayashimamotobun" + + "gotakadapliernewmexicoalhazuminobusellsyourhomegoodsewilliamhill" + + "hbodoes-itvedestrandhelsinkitakatakanezawahembygdsforbundhemnesh" + + "aris-a-llamarriottrentino-s-tirollagrigentomologyeonggiehtavuoat" + + "nagaivuotnagaokakyotambabydgoszczecinemaceratabusebastopologyeon" + + "gnamegawakayamadridhemsedalhepforgeherokussldheroyhgtvalledaosta" + + "vangerhigashichichibunkyonanaoshimageandsoundandvisionhigashihir" + + "oshimanehigashiizumozakitakyushuaiahigashikagawahigashikagurasoe" + + "dahigashikawakitaaikitamihamadahigashikurumeguromskoghigashimats" + + "ushimarcheapaviancargodaddyn-vpnplus-2higashimatsuyamakitaakitad" + + "aitoigawahigashimurayamamotorcyclesharpfizerhigashinarusembokuki" + + "tamotosumy-routerhigashinehigashiomihachimanaustdalhigashiosakas" + + "ayamanakakogawahigashishirakawamatakaokaluganskydivinghigashisum" + + "iyoshikawaminamiaikitanakagusukumodernhigashitsunoshiroomurahiga" + + "shiurausukitashiobarahigashiyamatokoriyamanashifteditchyouripgfo" + + "ggiahigashiyodogawahigashiyoshinogaris-a-musicianhiraizumisatoka" + + "izukamakurazakitaurayasudahirakatashinagawahiranais-a-nascarfanh" + + "irarahiratsukagawahirayaizuwakamatsubushikusakadogawahistorichou" + + "seshawaiijimaritimoduminamiyamashirokawanabelembetsukubankazunow" + + "tvallee-aosteroyhitachiomiyagildeskaliszhitachiotagoperauniteroi" + + "zumizakisosakitagawahitraeumtgeradellogliastradinghjartdalhjelme" + + "landholeckobierzyceholidayhomeipharmacienshellaspeziahomelinkddi" + + "elddanuorrikuzentakataiwanairlinedre-eikerhomelinuxn--1qqw23ahom" + + "eofficehomesecuritymacaparecidahomesecuritypchofunatoriginsurecr" + + "eationiyodogawahomesenseminehomeunixn--2m4a15ehondahoneywellbein" + + "gzonehongotembaixadahonjyoitakarazukameokameyamatotakadahorninda" + + "lhorseoullensvanguardhortendofinternet-dnshimojis-a-nurservebeer" + + "hospitalhoteleshimokawahotmailhoyangerhoylandetroitskypehumaniti" + + "eshimokitayamahurdalhurumajis-a-painteractivegarsheis-a-patsfanh" + + "yllestadhyogoris-a-personaltrainerhyugawarahyundaiwafunejewelryj" + + "ewishartgalleryjfkharkovanylvenicejgorajlcube-serverrankoshigaya" + + "kumoldelmenhorstalbanshinichinanjlljmphilatelyjnjcphiladelphiaar" + + "eadmyblogsitejoyentrentino-sued-tiroljoyokaichibalatinoipifonymi" + + "nanojpmorganjpnjprshinjournalismailillesandefjordjurkoshunantank" + + "hmelnitskiyamarylhurstjohnkosugekotohiradomainshinjukumanokotour" + + "akouhokutamakis-a-techietis-a-photographerokuappharmacyshimonita" + + "yanagithubusercontentrentino-stirolkounosupplieshinkamigotoyohas" + + "himotottoris-a-therapistoiakouyamashikekouzushimashikis-an-accou" + + "ntantshimonosekikawakozagawakozakis-an-actorkozowinbarrel-of-kno" + + "wledgeologyonagoyaustrheimatunduhrennesoyolasitebizenakasatsunai" + + "rportland-4-salernoboribetsucks3-eu-central-1kpnkppspdnshinshino" + + "tsurgerykrasnodarkredstonekristiansandcatshinshirokristiansundkr" + + "odsheradkrokstadelvaldaostarnbergkrymincommbankhmelnytskyivaokum" + + "atorinokumejimasoykumenantokonamegatakatoris-an-actresshimosuwal" + + "kis-a-playerkunisakis-an-anarchistoricalsocietykunitachiarailway" + + "kunitomigusukumamotoyamashikokuchuokunneppugliakunstsammlungkuns" + + "tunddesignkuokgrouphoenixn--30rr7ykurehabmerkurgankurobelaudible" + + "borkangerkurogiminamiashigarakuroisoftwarendalenugkuromatsunais-" + + "an-artisteinkjerusalembroiderykurotakikawasakis-an-engineeringku" + + "shirogawakustanais-an-entertainerkusupplykutchanelkutnokuzumakis" + + "-bykvafjordkvalsundkvamfamberkeleykvanangenkvinesdalkvinnheradkv" + + "iteseidskogkvitsoykwpspiegelkzmitoyoakemiuramiyazumiyotamanomjon" + + "dalenmlbfanmonstermontrealestatefarmequipmentrentinoa-adigemonza" + + "-brianzaporizhzhiamonza-e-della-brianzapposhintomikasaharamonzab" + + "rianzaptokyotangotsukitahatakamatsukawamonzaebrianzaramonzaedell" + + "abrianzamoonscalemoparachutingmordoviamoriyamatsumotofukemoriyos" + + "himinamiawajikis-into-animeiwamarshallstatebankfhappoumormonmout" + + "hagakhanamigawamoroyamatsunomortgagemoscowindmillmoseushistorymo" + + "sjoenmoskeneshinyoshitomiokamogawamosshiojirishirifujiedamosvikn" + + "x-serveronamsskoganeis-a-rockstarachowicemoteginowaniihamatamaka" + + "wajimansionshioyanaizumoviemovimientolgamovistargardmtpchoyodoba" + + "shichikashukujitawaramtranbymuenstermuginozawaonsenmuikamisunaga" + + "wamukodairamulhouserveblogspotrentinoaadigemunakatanemuncienciam" + + "uosattemuphonefosshirahamatonbetsurnadalmurmanskolobrzegersundmu" + + "rotorcraftrentinoalto-adigemusashimurayamatsusakahoginankokubunj" + + "is-into-carshimotsukemusashinoharamuseetrentinoaltoadigemuseumve" + + "renigingmusicarbonia-iglesias-carboniaiglesiascarboniamutsuzawam" + + "y-vigorlicemy-wanggouvicenzamyactivedirectorymyasustor-elvdalmyc" + + "dn77-securecifedexhibitionmyddnskingmydissentrentinos-tirolmydro" + + "boehringerikemydshirakofuefukihaborokunohealthcareershiranukanag" + + "awamyeffectrentinostirolmyfirewallonieruchomoscienceandindustryn" + + "myfritzmyftpaccesshiraois-into-cartoonshimotsumamyhome-serversai" + + "lleshiraokananiimihoboleslawiechristiansburgrondarmykolaivaporcl" + + "oudmymailermymediapchristmasakinderoymyokohamamatsudamypephotogr" + + "aphysiomypetshiratakahagitlabormyphotoshibalestrandabergamoareke" + + "ymachinewhampshirebungoonombresciamypsxn--32vp30hagebostadmysecu" + + "ritycamerakermyshopblockshishikuis-into-gamessinazawamytis-a-boo" + + "kkeeperugiamytuleapiagetmyipictetrentinosud-tirolmyvnchromedicin" + + "akamagayachtsantabarbaramywireitrentinosudtirolpinkomaganepionee" + + "rpippulawypiszpittsburghofauskedsmokorsetagayasells-for-unzenpiw" + + "atepixolinopizzapkomakiyosunndalplanetariuminnesotaketakatsukis-" + + "certifieducatorahimeshimamateramobilyplantationplantshitaramapla" + + "tformshangrilanshizukuishimofusaitamatsukuris-lostre-toteneis-a-" + + "republicancerresearchaeologicaliforniaplaystationplazaplchungnam" + + "dalseidfjordyndns-mailucaniaplumbingoplurinacionalpmnpodzonepohl" + + "poivronpokerpokrovskomatsushimasfjordenpoliticarrierpolitiendapo" + + "lkowicepoltavalle-aostarostwodzislawindowshizuokanazawapomorzesz" + + "owinnershoujis-not-certifiedunetbankhakassiapordenonepornporsang" + + "erporsanguidell-ogliastraderporsgrunnanyokoshibahikariwanumatake" + + "tomisatoshimapoznanpraxis-a-bruinsfanprdpreservationpresidioprgm" + + "rprimeldalprincipeprivatizehealthinsuranceprochowiceproductionsh" + + "owaprofesionalprogressivegaskvolloabathsbchurchaseljeepsongdalen" + + "viknaharimalopolskanlandyndns-office-on-the-webcampinashikiminoh" + + "kurapromombetsurfbsbxn--12co0c3b4evalleaostaticsavonarusawaprope" + + "rtyprotectionprotonetrentinosued-tirolprudentialpruszkowioshowti" + + "memergencyahabahcavuotnagarahkkeravjuegoshikikonaikawachinaganoh" + + "aramcoachampionshiphoptobishimagentositecnologiaprzeworskogptplu" + + "sgardenpupictureshisognepvhaibarakitahiroshimaoris-a-lawyerpvtre" + + "ntinosuedtirolpwciprianiigataishinomakindlegnicafederationpzqldq" + + "ponqslgbtrentoyonezawaquicksyteshriramlidlugolekafjordquipelemen" + + "tsienarutomobellevuelosangelesjabbottrevisohughesigdalqvcirclego" + + "doesntexisteingeekashiharasrtroandinosaurepaircraftrogstadsrvare" + + "servecounterstrikestoragestordalstoregontrailroadstorfjordstorjd" + + "evcloudfrontdoorstpetersburgstreamsterdamnserverbaniastudiostudy" + + "ndns-at-homedepotenzamamidsundstuff-4-salestufftoread-booksnesir" + + "dalstuttgartromsakakinokiasusakis-savedsusonosuzakaniepcesuzukan" + + "makiwiensuzukis-slickharkivalleeaosteigensvalbardunloppacificirc" + + "ustomersveiosvelvikomvuxn--2scrj9choshibuyachiyodavvenjargaulard" + + "alowiczest-le-patronsvizzerasvn-reposjcbnlswedenswidnicartoonart" + + "decologiaswiebodzindianapolis-a-bloggerswiftcoverswinoujsciencea" + + "ndhistoryswisshikis-uberleetrentino-sud-tirolsynology-dslingtush" + + "uissier-justicetuvalle-daostatic-accessnoasaitotaltuxfamilytwmai" + + "lvenneslaskerrylogisticsokaneyamazoevestfoldvestnesokndalvestre-" + + "slidrepbodynathomebuiltrusteevestre-totennishiawakuravestvagoyve" + + "velstadvibo-valentiavibovalentiavideovillasnesoddenmarkhangelskj" + + "akdnepropetrovskiervaapsteiermarkongsvingervinnicasacamdvrcampin" + + "agrandebugattipschlesischesolarssonvinnytsiavipsinaappiemontevir" + + "giniavirtualvirtueeldomeindianmarketingvirtuelvisakegawaviterbok" + + "nowsitallvivoldavixn--3bst00misakis-foundationvlaanderenvladikav" + + "kazimierz-dolnyvladimirvlogoipilotshisuifuelblagdenesnaaseraling" + + "enkainanaejrietisalatinabenonichryslervolkswagentsolognevologdan" + + "skoninjambylvolvolkenkundenvolyngdalvossevangenvotevotingvotoyon" + + "owiwatsukiyonoticiaskimitsubatamibudejjuedischesapeakebayernrtrv" + + "arggatromsojamisonwloclawekonsulatrobeepilepsydneywmflabsolundbe" + + "ckommuneworldworse-thandawowitdkonskowolayangrouphilipsynology-d" + + "iskstationwpdevcloudwritesthisblogsytewroclawithgoogleapisa-hock" + + "eynutsiracusakatakinouewtcmisasaguris-gonewtfbx-ostrowwlkpmgunma" + + "nxn--1ck2e1barclaycards3-fips-us-gov-west-1wuozuwwwithyoutubenev" + + "entoeidsvollwzmiuwajimaxn--42c2d9axn--45br5cylxn--45brj9citadeli" + + "veryxn--45q11citichernigovernmentoyotaris-a-cubicle-slavellinota" + + "irestaurantoyotomiyazakis-a-democratoyotsukaidoxn--4gbriminingxn" + + "--4it168dxn--4it797kooris-a-soxfanxn--4pvxs4allxn--54b7fta0ccivi" + + "laviationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilisationx" + + "n--5rtq34kopervikhersonxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--" + + "6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationxn--80adxh" + + "ksolutionsilkomforbargainstitutelemarkarateu-1xn--80ao21axn--80a" + + "qecdr1axn--80asehdbarsyonlinewhollandiscoveryonaguniversityoriik" + + "aratsuginamikatagamilitaryoshiokaracoldwarmiastageu-2xn--80aswgx" + + "n--80audnedalnxn--8ltr62koryokamikawanehonbetsurutaharaxn--8pvr4" + + "uxn--8y0a063axn--90a3academiamicaaarborteaches-yogasawaracingxn-" + + "-90aeroportalaheadjudaicable-modemocraciaxn--90aishobarakawagoex" + + "n--90azhytomyrxn--9dbhblg6dietcimdbashkiriauthordalandeportenrig" + + "htathomeftpalmaseratibigawastronomy-gatewayokosukanzakiyosatokig" + + "awagrocerybnikahokutobamagazineat-url-o-g-i-natuurwetenschappena" + + "umburgjerdrumeteorappalermomahachijolstereportarumizusawaetnagah" + + "amaroygardendoftheinternetflixilovecollegefantasyleaguernseybolt" + + "arnobrzegyptianaturhistorisches3-ap-northeast-2ixboxenapponazure" + + "-mobile12hpaleobirabogadocscbgdyniabruzzoologicalvinklein-addram" + + "menuernberggfarmerseine164xn--9dbq2axn--9et52uxn--9krt00axn--and" + + "y-iraxn--aroport-byandexn--3ds443gxn--asky-iraxn--aurskog-hland-" + + "jnbasilicataniautomotiveconomiasakuchinotsuchiurakawalmartataran" + + "toyakokonoehimejibmdgcahcesuolocalhostrodawaraumalborkdalaziocea" + + "nographics3-eu-west-1xn--avery-yuasakuhokkaidoomdnsiskinkyotobet" + + "sumidatlanticivilwarmanagementoyouraxn--b-5gaxn--b4w605ferdxn--b" + + "ck1b9a5dre4claimsantacruzsantafedjejuifminamiizukamishihoronobea" + + "uxartsandcraftsantamariakexn--bdddj-mrabdxn--bearalvhki-y4axn--b" + + "erlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachika" + + "tsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bjddar-ptamayufuet" + + "tertdasnetzxn--blt-elabourxn--bmlo-graingerxn--bod-2natalxn--brn" + + "ny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigat" + + "ion-aptibleaseating-organicbcn-north-1xn--brum-voagatrysiljanxn-" + + "-btsfjord-9zaxn--c1avgxn--c2br7gxn--c3s14misawaxn--cck2b3basketb" + + "allyngenhktatsunoddautoscanadaejeonbukarasjohkamikoaniikappueblo" + + "ckbustermezgoraugustowadaegubambleclerc66xn--cg4bkis-very-badajo" + + "zxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisconfusedxn--comuni" + + "caes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694batodayu" + + "kindustriaveroykeniwaizumiotsukumiyamazonawsadodgemologicallilly" + + "ombolzanord-frontiereviewskrakowebhostingjerstadotsuruokakegawau" + + "kraanghkepnogifts3-ap-southeast-2xn--czrs0tulanxesslupskommunalf" + + "orbundxn--czru2dxn--czrw28batsfjordishakotanhlfanhs3-us-gov-west" + + "-1xn--d1acj3bauhausposts-and-telecommunicationsncfdisrechtranaka" + + "muratajimidoriopretogoldpoint2thisamitsukeu-3xn--d1alfaromeoxn--" + + "d1atuneslzxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--dj" + + "rs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-w" + + "uaxn--dyry-iraxn--e1a4cldmailuccapetownnews-stagingrongaxn--eckv" + + "dtc9dxn--efvn9somaxn--efvy88hair-surveillancexn--ehqz56nxn--elqq" + + "16hakatanortonxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429k" + + "osakaerodromegallupinbarreauctionflfanfshostrowiecaseihichisobet" + + "suldalimoliserniaustraliaisondriobranconagawalesundemoneyokozebi" + + "nordreisa-geekaragandamusementashkentatamotors3-ap-southeast-1pa" + + "sswordd-dnshome-webservercellikes-piedmonticellocus-4xn--fhbeiar" + + "nxn--finny-yuaxn--fiq228c5hsomnarviikamitondabayashiogamagorizia" + + "xn--fiq64bbcasertairavennagatorockartuzyukuhashimoichinosekigaha" + + "ravocatanzarowebredirectmetacentrumetlifeinsurancempresashibetsu" + + "kuiitatebayashiibajddarchitecturealtydalipayomitanoceanographiqu" + + "emrevistanbulminamidaitomandalimanowarudaurskog-holandroverhalla" + + "-speziajudygarlanddnss3-ap-south-1kappchizippodhaleangaviikadena" + + "amesjevuemielno-ip6xn--fiqs8sooxn--fiqz9sopotritonxn--fjord-lrax" + + "n--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde" + + "-grandrapidsor-odalxn--frna-woaraisaijosoyrorosor-varangerxn--fr" + + "ya-hraxn--fzc2c9e2clickashiwaraxn--fzys8d69uvgmailxn--g2xx48clin" + + "ichernihivguccieszynissandnessjoenissayokkaichiropracticheltenha" + + "m-radio-opencraftrainingripescaravantaaxn--gckr3f0fbxosaxoxn--ge" + + "crj9cliniquenoharaxn--ggaviika-8ya47hakodatexn--gildeskl-g0axn--" + + "givuotna-8yasakaiminatoyookannamilanotteroyxn--gjvik-wuaxn--gk3a" + + "t1exn--gls-elacaixaxn--gmq050is-very-evillagexn--gmqw5axn--h-2fa" + + "ilxn--h1aeghakonexn--h2breg3evenesorfoldxn--h2brj9c8clintonoshoe" + + "santoandreamhostersanukis-a-designerimarnardalucernexn--h3cuzk1d" + + "igitalxn--hbmer-xqaxn--hcesuolo-7ya35bbtattoolsztynsettlers3-us-" + + "west-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4accident-pre" + + "vention-webhopenairbusantiquest-a-la-maisondre-landroidvagsoyeri" + + "cssonyoursidealerimo-i-ranadexeterxn--hnefoss-q1axn--hobl-iraxn-" + + "-holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-" + + "54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugivingxn--io0a7is-v" + + "ery-goodhandsonxn--j1aefedorapeopleikangerxn--j1amhakubahccavuot" + + "nagareyamakeupowiathletajimabaridagawalbrzycharternidxn--j6w193g" + + "xn--jlq61u9w7bbvacationswatch-and-clockerhcloudns3-us-west-2xn--" + + "jlster-byasuokanraxn--jrpeland-54axn--jvr189mishimasudaxn--k7yn9" + + "5exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-w" + + "oaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3e0b707exn--koluok" + + "ta-7ya57hakuis-a-liberalxn--kprw13dxn--kpry57dxn--kpu716fedorapr" + + "ojectransportexn--kput3is-very-nicexn--krager-gyatomitamamuraxn-" + + "-kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49j" + + "dfastlylbarcelonagasakikuchikuseikarugamvikarasjokarasuyamarugam" + + "e-hostrolekamiminers3-external-1xn--ksnes-uuaxn--kvfjord-nxaxn--" + + "kvitsy-fyatsukanumazuryxn--kvnangen-k0axn--l-1fairwindsorocabals" + + "fjordxn--l1accentureklamborghinikis-very-sweetpepperxn--laheadju" + + "-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagavi" + + "ika-52bentleyurihonjournalistgoryusuharavoues3-eu-west-2xn--lesu" + + "nd-huaxn--lgbbat1ad8jelenia-goraxn--lgrd-poacctunkongsbergxn--lh" + + "ppi-xqaxn--linds-pramericanarturystykanoyaltakasakiyokawaraxn--l" + + "ns-qlapyatigorskoseis-a-studentalxn--loabt-0qaxn--lrdal-sraxn--l" + + "renskog-54axn--lt-liaclothingdustkagoshimalselvendrellukowhaling" + + "rossetouchijiwadegreexn--lten-granexn--lury-iraxn--m3ch0j3axn--m" + + "ely-iraxn--merker-kuaxn--mgb2ddesorreisahayakawakamiichikawamisa" + + "toursimple-urlxn--mgb9awbfeiraquarellebesbyglandynulvikasuyanaga" + + "waxn--mgba3a3ejtuscanyxn--mgba3a4f16axn--mgba3a4franamizuholding" + + "smilevangerxn--mgba7c0bbn0axn--mgbaakc7dvfermochizukirkenesbscho" + + "koladenxn--mgbaam7a8hakusandiegooglecodespotrentino-alto-adigexn" + + "--mgbab2bdxn--mgbai9a5eva00beppublishproxyzjampagefrontappalmspr" + + "ingsakerxn--mgbai9azgqp6jeonnamerikawauexn--mgbayh7gpalacexn--mg" + + "bb9fbpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mg" + + "berp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--" + + "mgbpl2fhskoleirfjordxn--mgbqly7c0a67fbcngroundhandlingroznyxn--m" + + "gbqly7cvafranziskanerdpolicexn--mgbt3dhdxn--mgbtf8flatangerxn--m" + + "gbtx2beskidyn-o-saurlandes3-website-ap-northeast-1xn--mgbx4cd0ab" + + "bvieeexn--mix082ferraraxn--mix891ferrarittoguraxn--mjndalen-64ax" + + "n--mk0axindigenaklodzkochikushinonsenergyxn--mk1bu44cnsaobernard" + + "ownloadyndns-picsaogoncartierxn--mkru45is-with-thebandovre-eiker" + + "xn--mlatvuopmi-s4axn--mli-tlaquilanciaxn--mlselv-iuaxn--moreke-j" + + "uaxn--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlarvikosherbroo" + + "kegawaxn--mre-og-romsdal-qqbestbuyshouses3-website-ap-southeast-" + + "1xn--msy-ula0haldenxn--mtta-vrjjat-k7afamilycompanycntoystre-sli" + + "drettozawaxn--muost-0qaxn--mxtq1missilezajsklabudhabikinokawabar" + + "thaebaruminamiuonumassa-carrara-massacarraramassabusinessebykleg" + + "allocalhistoryggeelvinckaufenxn--ngbc5azdxn--ngbe9e0axn--ngbrxn-" + + "-3hcrj9cistrondheimmobilienxn--nit225koshimizumakizunokunimimata" + + "kasugais-a-teacherkassymantechnologyxn--nmesjevuemie-tcbaltimore" + + "-og-romsdalpha-myqnapcloudaccesscambridgestoneuesortlandxn--nnx3" + + "88axn--nodessakuraisleofmanchesterxn--nqv7fs00emaxn--nry-yla5gxn" + + "--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveexchangexn--nvuotna-h" + + "waxn--nyqy26axn--o1achattanooganordkappimientakazakis-leetnedalx" + + "n--o3cw4halsaintlouis-a-anarchistoireggiocalabriaxn--o3cyx2axn--" + + "od0algxn--od0aq3betainaboxfusejnynysagaeroclubmedecincinnationwi" + + "dealstahaugesunderseaportsinfolldalabamagasakishimabaraogakibich" + + "uomutashinaindustriesteambulanceu-4xn--ogbpf8flekkefjordxn--oppe" + + "grd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--p1acferreroticampo" + + "bassociatestinguovdageaidnuslivinghistoryxn--p1aissmarterthanyou" + + "xn--pbt977coguchikuzenxn--pgbs0dhlxn--porsgu-sta26fetsundynv6xn-" + + "-pssu33lxn--pssy2uxn--q9jyb4collectionxn--qcka1pmckinseyxn--qqqt" + + "11misugitokuyamatsumaebashikshacknetrentino-suedtirolxn--qxamune" + + "ustarhubsoruminternationalfirearmshintokushimaxn--rady-iraxn--rd" + + "al-poaxn--rde-ulavagiskexn--rdy-0nabariwchonanbuildingroks-thisa" + + "yamanobeokakudamatsuexn--rennesy-v1axn--rhkkervju-01aflakstadaok" + + "agakicks-assedicolognextdirectozsdeloittemp-dnsaotomelhusdecorat" + + "iveartsapodlasiellaktyubinskiptveterinairealtorlandyndns-remotew" + + "dyndns-serverdaluroyxn--rholt-mragowoodsideltaitogliattiresouthc" + + "arolinarvikomonoxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa" + + "-5nativeamericanantiquesouthwestfalenxn--risr-iraxn--rland-uuaxn" + + "--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastafricapebretonami" + + "crosoftbankautokeinowruzhgorodeoxn--rovu88bhzcasinorddalindaskoy" + + "abearalvahkijobserverisignieznogataijinfinitintuitaxihuanikkoebe" + + "nhavnikolaevents3-website-ap-southeast-2xn--rros-granvindafjordx" + + "n--rskog-uuaxn--rst-0naturalhistorymuseumcenterxn--rsta-francais" + + "eharaxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithruhere" + + "dumbrellajollamericanexpressexyxn--s9brj9colonialwilliamsburgrpa" + + "rocherkasyno-dsapporoxn--sandnessjen-ogbizxn--sandy-yuaxn--seral" + + "-lraxn--ses554gxn--sgne-gratangenxn--skierv-utazassnasabaerobati" + + "cketsowaxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxax" + + "n--slat-5naturalsciencesnaturellespjelkavikomorotsukamiokamikita" + + "yamatsuris-a-socialistcgrouphdxn--slt-elabcgxn--smla-hraxn--smna" + + "-gratis-a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--sn" + + "es-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1" + + "axn--sr-varanger-ggbieigersundivtasvuodnakaniikawatanaguraxauste" + + "vollavangenaval-d-aosta-valleyokotebinagisoccertificationavigati" + + "onavoibestadds3-ca-central-1xn--srfold-byaxn--srreisa-q1axn--sru" + + "m-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbielawal" + + "terxn--stre-toten-zcbspreadbettingxn--t60b56axn--tckweatherchann" + + "elxn--tiq49xqyjetztrentino-sudtirolxn--tjme-hraxn--tn0agrinet-fr" + + "eakspydebergxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r" + + "1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvaroyxn--uc0ay4" + + "axn--uist22hamurakamigoris-a-libertarianxn--uisz3gxn--unjrga-rta" + + "obaomoriguchiharagusartsrlxn--unup4yxn--uuwu58axn--vads-jraxn--v" + + "ard-jraxn--vegrshei-c0axn--vermgensberater-ctbiellaakesvuemielec" + + "ceverbankareliancevje-og-hornnes3-website-eu-west-1xn--vermgensb" + + "eratung-pwbieszczadygeyachimataikikugawarszawashingtondclkariyam" + + "elbournexn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-q" + + "oa0jevnakershuscultureggioemiliaromagnamsosnowiechoseiroumuenche" + + "nxn--vgu402coloradoplateaudioxn--vhquvbarrell-of-knowledgeometre" + + "-experts-comptables3-us-east-2xn--vler-qoaxn--vre-eiker-k8axn--v" + + "rggt-xqadxn--vry-yla5gxn--vuq861bievatmallorcadaques3-website-sa" + + "-east-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1columb" + + "usheyxn--wgbl6axn--xhq521bifukagawashtenawdev-myqnapcloudapplebt" + + "imnetzlgjovikarlsoyusuisserveftpanamatta-varjjatjeldsundivttasvu" + + "otnakanojohanamakinoharaxn--xkc2al3hye2axn--xkc2dl3a5ee0hangglid" + + "ingxn--y9a3aquariumitourismolangevagrarchaeologyeongbukmpspbaref" + + "ootballfinanzgorzeleccoffeedbackplaneapplinziiyamanouchikuhokury" + + "ugasakitchenayorovigovtateshinanomachimkentateyamaustinnavuotnar" + + "ashinobninsk12xn--yer-znaturbruksgymnxn--yfro4i67oxn--ygarden-p1" + + "axn--ygbi2ammxn--3oq18vl8pn36axn--ystre-slidre-ujbihorologyuucon" + + "nectjmaxxxfinityuzawaxn--zbx025dxn--zf0ao64axn--zf0avxn--3pxu8ko" + + "nyvelolxn--zfr164bikedagestangeorgeorgiaxperiaxz" + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 0 bits] unused +// [10 bits] children index +// [ 1 bits] ICANN bit +// [15 bits] text index +// [ 6 bits] text length +var nodes = [...]uint32{ + 0x31a803, + 0x284d84, + 0x382f06, + 0x2f37c3, + 0x2f37c6, + 0x37af86, + 0x3a7a03, + 0x31b604, + 0x322487, + 0x382b48, + 0x1a00742, + 0x32e147, + 0x3672c9, + 0x2b4eca, + 0x2b4ecb, + 0x232183, + 0x2ab9c6, + 0x238485, + 0x1e01482, + 0x203b44, + 0x260543, + 0x201485, + 0x2215842, + 0x332603, + 0x271b0c4, + 0x31fe05, + 0x2a00102, + 0x38194e, + 0x256483, + 0x39cbc6, + 0x2e03d02, + 0x2c8047, + 0x23e146, + 0x3205c42, + 0x257dc3, + 0x257dc4, + 0x357406, + 0x205d08, + 0x277146, + 0x302004, + 0x3600602, + 0x33acc9, + 0x211307, + 0x347986, + 0x3c1109, + 0x2c78c8, + 0x331004, + 0x241286, + 0x230106, + 0x3a00582, + 0x3a234f, + 0x21f4ce, + 0x226484, + 0x2c1545, + 0x31a705, + 0x2f6809, + 0x244689, + 0x357c07, + 0x22bbc6, + 0x206dc3, + 0x3e03942, + 0x21d6c3, + 0x220d4a, + 0x21fbc3, + 0x3bde45, + 0x2f2542, + 0x370749, + 0x4200282, + 0x216c84, + 0x2ef006, + 0x2bb6c5, + 0x2d7c04, + 0x4a14344, + 0x205583, + 0x2374c4, + 0x4e02b82, + 0x267184, + 0x527eac4, + 0x39004a, + 0x5600cc2, + 0x35c447, + 0x2774c8, + 0x6207ec2, + 0x340687, + 0x2bde44, + 0x2bde47, + 0x3b9605, + 0x339407, + 0x31ca86, + 0x325384, + 0x3314c5, + 0x298307, + 0x720fc02, + 0x335a43, + 0x21ab82, + 0x3aae43, + 0x7612442, + 0x27f485, + 0x7a023c2, + 0x293584, + 0x276005, + 0x2263c7, + 0x20974e, + 0x2391c4, + 0x238cc4, + 0x20b583, + 0x364209, + 0x30e2cb, + 0x259e48, + 0x3c0ec8, + 0x316488, + 0x215cc8, + 0x330e4a, + 0x339307, + 0x309d86, + 0x7e6e442, + 0x345243, + 0x355943, + 0x35d344, + 0x3a7a43, + 0x32f6c3, + 0x172a782, + 0x8203102, + 0x27b385, + 0x28df86, + 0x2a9f04, + 0x369187, + 0x23ce86, + 0x3806c4, + 0x3806c7, + 0x205a83, + 0x86c31c2, + 0x8b14902, + 0x8e21182, + 0x221186, + 0x9200882, + 0x286c45, + 0x32bcc3, + 0x3c6444, + 0x2e3804, + 0x2e3805, + 0x2053c3, + 0x96b6c03, + 0x9a09342, + 0x289b05, + 0x289b0b, + 0x20bd06, + 0x331f4b, + 0x22aa44, + 0x20cec9, + 0x20d784, + 0x9e0d9c2, + 0x20ef03, + 0x20fec3, + 0x1610702, + 0x2fb9c3, + 0x21070a, + 0xa200302, + 0x203dc5, + 0x2d400a, + 0x243384, + 0x210f03, + 0x212984, + 0x213b83, + 0x213b84, + 0x213b87, + 0x2153c5, + 0x215705, + 0x216d46, + 0x2170c6, + 0x217d43, + 0x21a708, + 0x212d43, + 0xa6004c2, + 0x22c3c8, + 0x3878cb, + 0x223088, + 0x225f06, + 0x227447, + 0x22a1c8, + 0xb604002, + 0xbaf21c2, + 0x23b388, + 0x3031c7, + 0x207a45, + 0x207a48, + 0x383c48, + 0x2fa9c3, + 0x22f384, + 0x35d382, + 0xbe2f582, + 0xc201bc2, + 0xca30502, + 0x230503, + 0xce03cc2, + 0x31b5c3, + 0x2f1b84, + 0x20bf83, + 0x335e04, + 0x322b8b, + 0x237c03, + 0x2db106, + 0x237c04, + 0x2e21ce, + 0x2669c5, + 0x33d7c8, + 0x251107, + 0x25110a, + 0x2342c3, + 0x34f747, + 0x30e485, + 0x2342c4, + 0x2d4b86, + 0x2d4b87, + 0x2d0204, + 0x37d587, + 0x209a84, + 0x340c44, + 0x340c46, + 0x25d944, + 0x39db46, + 0x207803, + 0x207808, + 0x21a988, + 0x238c83, + 0x2fb983, + 0x3a8c04, + 0x3ae4c3, + 0xd24d5c2, + 0xd6d2fc2, + 0x2083c3, + 0x205646, + 0x241383, + 0x354bc4, + 0xda4b182, + 0x24cb83, + 0x339c03, + 0x218882, + 0xde03c02, + 0x2c0b06, + 0x23c007, + 0x2eab45, + 0x38a504, + 0x2981c5, + 0x27e687, + 0x2d84c9, + 0x2dcd46, + 0x307788, + 0x2eaa46, + 0xe2010c2, + 0x2f1408, + 0x2f3e06, + 0x223a85, + 0x30fe07, + 0x310344, + 0x310345, + 0x2010c4, + 0x2010c8, + 0xe619382, + 0xea02642, + 0x3292c6, + 0x202648, + 0x34d485, + 0x34df06, + 0x350108, + 0x36d548, + 0xee1f8c5, + 0xf21d0c4, + 0x38ca87, + 0xf60d642, + 0xfaefa02, + 0x10e02c42, + 0x2ef105, + 0x373905, + 0x3c1546, + 0x3208c7, + 0x3973c7, + 0x1160be03, + 0x26f507, + 0x2b99c8, + 0x231a09, + 0x381b07, + 0x2321c7, + 0x232b08, + 0x233306, + 0x233dc6, + 0x234a0c, + 0x235e4a, + 0x2364c7, + 0x23834b, + 0x23be47, + 0x23be4e, + 0x1a23d104, + 0x23d744, + 0x23e847, + 0x2616c7, + 0x243806, + 0x243807, + 0x243c87, + 0x1a630a42, + 0x2449c6, + 0x2449ca, + 0x244f4b, + 0x246d07, + 0x2478c5, + 0x247c03, + 0x248146, + 0x248147, + 0x322643, + 0x1aa022c2, + 0x248a4a, + 0x1af68802, + 0x1b24d602, + 0x1b64afc2, + 0x1ba3e242, + 0x24cc85, + 0x24d2c4, + 0x1c204ac2, + 0x267205, + 0x245543, + 0x20d885, + 0x215bc4, + 0x20f984, + 0x209d86, + 0x2505c6, + 0x289d03, + 0x3b6d84, + 0x3ac2c3, + 0x1ca02e02, + 0x3582c4, + 0x3582c6, + 0x38d005, + 0x36e3c6, + 0x30ff08, + 0x227b84, + 0x397848, + 0x399a45, + 0x311708, + 0x36c6c6, + 0x265847, + 0x27b984, + 0x27b986, + 0x26f803, + 0x3917c3, + 0x20b648, + 0x31c684, + 0x354fc7, + 0x2d2906, + 0x2d2909, + 0x20a1c8, + 0x317908, + 0x338884, + 0x2067c3, + 0x23dd42, + 0x1da4c3c2, + 0x1de14202, + 0x207583, + 0x1e20a502, + 0x3225c4, + 0x2440c6, + 0x335b45, + 0x283403, + 0x234ec4, + 0x2b1a07, + 0x336bc3, + 0x37cfc8, + 0x21ea85, + 0x25f7c3, + 0x275f85, + 0x2760c4, + 0x2f9c06, + 0x222704, + 0x225986, + 0x226306, + 0x357d84, + 0x23c203, + 0x1e614582, + 0x238ac5, + 0x2011c3, + 0x1ea05ec2, + 0x2319c3, + 0x21c8c5, + 0x237583, + 0x237589, + 0x1ee01f02, + 0x1f608ac2, + 0x289645, + 0x219286, + 0x37c8c6, + 0x2bfcc8, + 0x2bfccb, + 0x20568b, + 0x21c145, + 0x2ead45, + 0x2c3909, + 0x1603142, + 0x357f48, + 0x23e504, + 0x1fe01b02, + 0x20aac3, + 0x20661886, + 0x224fc8, + 0x20a003c2, + 0x307348, + 0x20e0a6c2, + 0x23994a, + 0x212c8d03, + 0x39f286, + 0x3b5048, + 0x389ac8, + 0x3ba046, + 0x377d47, + 0x3a2547, + 0x23fe0a, + 0x243404, + 0x352f84, + 0x366b89, + 0x21ba1d45, + 0x21f6c6, + 0x200143, + 0x255184, + 0x21e25784, + 0x323307, + 0x22f607, + 0x364044, + 0x2d3345, + 0x3c1608, + 0x37b847, + 0x38fc87, + 0x22208882, + 0x23b9c4, + 0x28e948, + 0x24e244, + 0x252944, + 0x253005, + 0x253147, + 0x22b509, + 0x254004, + 0x2547c9, + 0x254a08, + 0x254f04, + 0x254f07, + 0x226553c3, + 0x255547, + 0x1626d02, + 0x16ad402, + 0x255e86, + 0x2564c7, + 0x256b04, + 0x258487, + 0x258f47, + 0x259783, + 0x329982, + 0x205dc2, + 0x270003, + 0x270004, + 0x27000b, + 0x3c0fc8, + 0x25f184, + 0x25ad05, + 0x25cac7, + 0x25e5c5, + 0x30590a, + 0x25f0c3, + 0x22a12c42, + 0x212c44, + 0x261489, + 0x265183, + 0x265247, + 0x2f61c9, + 0x336308, + 0x25d1c3, + 0x27a247, + 0x27aa89, + 0x26be83, + 0x281b04, + 0x283c89, + 0x287dc6, + 0x2266c3, + 0x2039c2, + 0x241243, + 0x2ad207, + 0x383fc5, + 0x340346, + 0x268984, + 0x2dba05, + 0x220d03, + 0x217f86, + 0x20d0c2, + 0x3a3984, + 0x22e2ab02, + 0x22ab03, + 0x23201802, + 0x252843, + 0x217544, + 0x217547, + 0x3c6746, + 0x255e42, + 0x23629942, + 0x384384, + 0x23a30b82, + 0x23e01a42, + 0x337304, + 0x337305, + 0x201a45, + 0x35ab46, + 0x24208742, + 0x208745, + 0x2100c5, + 0x210ac3, + 0x213d06, + 0x214885, + 0x221102, + 0x34db45, + 0x221104, + 0x227ac3, + 0x227d03, + 0x2460ad82, + 0x298507, + 0x33a504, + 0x33a509, + 0x255084, + 0x281903, + 0x35b109, + 0x281908, + 0x24b0cc04, + 0x30cc06, + 0x2a2c83, + 0x20cb03, + 0x30e843, + 0x24eefe82, + 0x375502, + 0x25201402, + 0x32d8c8, + 0x327088, + 0x3a8046, + 0x2544c5, + 0x34f5c5, + 0x31e0c7, + 0x229985, + 0x25cd82, + 0x25694cc2, + 0x1602202, + 0x240a88, + 0x34e285, + 0x27ca84, + 0x2e7205, + 0x241d87, + 0x25efc4, + 0x248942, + 0x25a2dac2, + 0x33e704, + 0x226ec7, + 0x289fc7, + 0x3393c4, + 0x291003, + 0x238bc4, + 0x238bc8, + 0x234106, + 0x2d4a0a, + 0x22b3c4, + 0x291508, + 0x288204, + 0x227546, + 0x294c84, + 0x2ef406, + 0x33a7c9, + 0x26d007, + 0x34e1c3, + 0x25eebfc2, + 0x331203, + 0x207c82, + 0x2625c982, + 0x30cf06, + 0x371e48, + 0x2a44c7, + 0x2f7209, + 0x290ac9, + 0x2a61c5, + 0x2a73c9, + 0x2a7b85, + 0x2a7cc9, + 0x2a9045, + 0x2aa008, + 0x266598c4, + 0x26a598c7, + 0x232583, + 0x2aa207, + 0x232586, + 0x2aa5c7, + 0x2a0f45, + 0x2ca8c3, + 0x26e35c02, + 0x2ea984, + 0x27230bc2, + 0x276552c2, + 0x2f3ac6, + 0x277445, + 0x2acac7, + 0x326403, + 0x32f644, + 0x2130c3, + 0x23b0c3, + 0x27a07d02, + 0x28206202, + 0x37b084, + 0x329943, + 0x24b905, + 0x28603882, + 0x28e00c42, + 0x2e0586, + 0x31c7c4, + 0x385444, + 0x38544a, + 0x29601342, + 0x38e2ca, + 0x39e948, + 0x29a6ff84, + 0x201fc3, + 0x208c43, + 0x3165c9, + 0x267709, + 0x2a6e06, + 0x29e14bc3, + 0x214bc5, + 0x39434d, + 0x39eb06, + 0x20e84b, + 0x2a200802, + 0x220b88, + 0x2ca1a802, + 0x2ce00942, + 0x2c9a85, + 0x2d205842, + 0x21b147, + 0x2b0747, + 0x214a43, + 0x348148, + 0x2d601102, + 0x29f384, + 0x291203, + 0x325545, + 0x395983, + 0x245646, + 0x223504, + 0x2fb943, + 0x2aec03, + 0x2da03202, + 0x2eacc4, + 0x3af385, + 0x2ace07, + 0x277e03, + 0x2ad9c3, + 0x2ae803, + 0x16ae8c2, + 0x2ae8c3, + 0x2aeb83, + 0x2de0b0c2, + 0x39e304, + 0x2507c6, + 0x22a443, + 0x2af343, + 0x2e2b0102, + 0x2b0108, + 0x2b03c4, + 0x2ee8c6, + 0x256947, + 0x3845c6, + 0x2a4f04, + 0x3be01ec2, + 0x23244b, + 0x2ff28e, + 0x219e0f, + 0x2c7b83, + 0x3c65fe82, + 0x1647302, + 0x3ca00a82, + 0x25b4c3, + 0x205983, + 0x2d8746, + 0x2f1946, + 0x3c2147, + 0x2f9084, + 0x3ce193c2, + 0x3d21edc2, + 0x2425c5, + 0x2e44c7, + 0x37fd86, + 0x3d64d542, + 0x30de04, + 0x2b7b43, + 0x3da09602, + 0x3df63443, + 0x2b8444, + 0x2bd289, + 0x16c2482, + 0x3e20dd82, + 0x327e05, + 0x3e6c2702, + 0x3ea00682, + 0x352307, + 0x214fc9, + 0x36754b, + 0x3a2305, + 0x26ad09, + 0x37e806, + 0x20bd47, + 0x3ee074c4, + 0x348c89, + 0x337b07, + 0x224c87, + 0x230803, + 0x2afc46, + 0x30a7c7, + 0x20fbc3, + 0x2f0d46, + 0x3f6038c2, + 0x3fa0e402, + 0x3bec83, + 0x32f245, + 0x332807, + 0x222386, + 0x383f45, + 0x2f3f04, + 0x278f45, + 0x2f2144, + 0x3fe00f82, + 0x341587, + 0x2f2984, + 0x26a444, + 0x34694d, + 0x26a449, + 0x230b08, + 0x25c404, + 0x335ec5, + 0x20a047, + 0x341144, + 0x23cf47, + 0x204cc5, + 0x402a4e44, + 0x30bcc5, + 0x263e44, + 0x390706, + 0x3206c5, + 0x406291c2, + 0x210fc4, + 0x210fc5, + 0x35d8c6, + 0x343b85, + 0x25d144, + 0x3c6103, + 0x20eb46, + 0x22b705, + 0x22f045, + 0x3207c4, + 0x22b443, + 0x22b44c, + 0x40aacf02, + 0x40e0a5c2, + 0x41201542, + 0x20f003, + 0x20f004, + 0x41604482, + 0x30ae88, + 0x340405, + 0x236184, + 0x243686, + 0x41a0e302, + 0x41e1de42, + 0x422000c2, + 0x2b2cc5, + 0x294346, + 0x229304, + 0x357946, + 0x35c206, + 0x222a83, + 0x4272850a, + 0x26b085, + 0x28b003, + 0x228606, + 0x304789, + 0x228607, + 0x292288, + 0x2c7789, + 0x31d348, + 0x250e46, + 0x209703, + 0x42a6f582, + 0x392c08, + 0x42e54ac2, + 0x43201e42, + 0x20be83, + 0x2d8345, + 0x26ba04, + 0x3b6fc9, + 0x2ee004, + 0x21b388, + 0x20dc03, + 0x323004, + 0x2a5fc3, + 0x2192c8, + 0x346887, + 0x43a25242, + 0x290ec2, + 0x31a685, + 0x39cf89, + 0x21f743, + 0x27bfc4, + 0x394304, + 0x20a0c3, + 0x27d04a, + 0x43f7c0c2, + 0x44210f82, + 0x2c3143, + 0x37ea83, + 0x1600082, + 0x200083, + 0x44603282, + 0x44a05a02, + 0x44e1a484, + 0x322046, + 0x2e07c6, + 0x245e44, + 0x277043, + 0x345c03, + 0x2ec1c3, + 0x2452c6, + 0x341d05, + 0x2c32c7, + 0x2c6445, + 0x2c7d86, + 0x2c8708, + 0x2c8906, + 0x24efc4, + 0x29960b, + 0x2cb583, + 0x2cb585, + 0x2cba08, + 0x21a202, + 0x352602, + 0x4524cd02, + 0x4560d682, + 0x219403, + 0x45a6cd82, + 0x26cd83, + 0x2cbd04, + 0x2cc543, + 0x462168c2, + 0x466d0e06, + 0x25e446, + 0x46ad0f42, + 0x46e0ff02, + 0x47227d42, + 0x4763a3c2, + 0x47a1b5c2, + 0x47e047c2, + 0x20dec3, + 0x358645, + 0x2b6306, + 0x48226444, + 0x38ce0a, + 0x3a0546, + 0x21c384, + 0x277943, + 0x48e02f02, + 0x2032c2, + 0x26fb43, + 0x4920ec83, + 0x2e6747, + 0x3205c7, + 0x4aa70107, + 0x393f87, + 0x22cd03, + 0x3176ca, + 0x251304, + 0x397504, + 0x39750a, + 0x247705, + 0x4ae1f682, + 0x258443, + 0x4b202002, + 0x228803, + 0x3311c3, + 0x4ba02742, + 0x26f484, + 0x220704, + 0x2046c5, + 0x3080c5, + 0x34e4c6, + 0x34e846, + 0x4be53982, + 0x4c201382, + 0x2f8545, + 0x25e152, + 0x33f206, + 0x25e8c3, + 0x39d486, + 0x2a1f45, + 0x1604842, + 0x54610c82, + 0x35e3c3, + 0x210c83, + 0x27e483, + 0x54a0c502, + 0x381c43, + 0x54e06e02, + 0x200843, + 0x39e348, + 0x285603, + 0x2a6046, + 0x23ecc7, + 0x30b986, + 0x30b98b, + 0x21c2c7, + 0x2ea784, + 0x55601c82, + 0x340285, + 0x55a09cc3, + 0x292c83, + 0x239b45, + 0x3175c3, + 0x55f175c6, + 0x3580ca, + 0x245ac3, + 0x23e004, + 0x202586, + 0x223e86, + 0x56241d03, + 0x32f507, + 0x2a6d07, + 0x29ae85, + 0x311986, + 0x22b743, + 0x58e13f43, + 0x59206f02, + 0x21a244, + 0x207609, + 0x240887, + 0x229a85, + 0x247d04, + 0x26c7c8, + 0x273b85, + 0x59676405, + 0x284e49, + 0x347a43, + 0x24d584, + 0x59a0b182, + 0x219603, + 0x59e94742, + 0x299986, + 0x162bac2, + 0x5a2a47c2, + 0x2b2bc8, + 0x3a76c3, + 0x30bc07, + 0x2ce245, + 0x2b2785, + 0x2d8e4b, + 0x2d9846, + 0x2d9046, + 0x2dc486, + 0x279f04, + 0x2dc6c6, + 0x5a6f0248, + 0x237cc3, + 0x201f83, + 0x201f84, + 0x2ddbc4, + 0x2dde87, + 0x2df2c5, + 0x5aadf402, + 0x5ae08302, + 0x208305, + 0x2bb184, + 0x2e298b, + 0x2e3708, + 0x298804, + 0x230982, + 0x5b64e882, + 0x24e883, + 0x2e3f04, + 0x2e41c5, + 0x2e4d07, + 0x2e6d44, + 0x21c184, + 0x5ba057c2, + 0x36b449, + 0x2e84c5, + 0x3a25c5, + 0x2e9045, + 0x5be19543, + 0x2e9d04, + 0x2e9d0b, + 0x2ea0c4, + 0x2ea38b, + 0x2ec105, + 0x219f4a, + 0x2ecec8, + 0x2ed0ca, + 0x2ed983, + 0x2ed98a, + 0x5c21fc42, + 0x5c648602, + 0x209943, + 0x5caf1382, + 0x2f1383, + 0x5cf6c182, + 0x5d32c442, + 0x2f1fc4, + 0x21a846, + 0x357685, + 0x2f3d83, + 0x31adc6, + 0x34f085, + 0x250ac4, + 0x5d600382, + 0x2aefc4, + 0x2c358a, + 0x398a07, + 0x3477c6, + 0x24f3c7, + 0x244a03, + 0x2b8488, + 0x3a1f8b, + 0x2bd905, + 0x27c785, + 0x27c786, + 0x2dd704, + 0x3b5288, + 0x21d343, + 0x230004, + 0x230007, + 0x2f4a06, + 0x31fa06, + 0x2e200a, + 0x254844, + 0x31104a, + 0x5db364c6, + 0x3364c7, + 0x25ad87, + 0x273544, + 0x273549, + 0x250485, + 0x31cf4b, + 0x2e1283, + 0x225b43, + 0x5de20b43, + 0x2344c4, + 0x5e200982, + 0x3a3006, + 0x5e6ca645, + 0x39d6c5, + 0x258c46, + 0x29cd44, + 0x5ea07bc2, + 0x247c44, + 0x5ee16f02, + 0x224645, + 0x23f4c4, + 0x228d83, + 0x5f610cc2, + 0x210cc3, + 0x267b86, + 0x5fa00a02, + 0x2073c8, + 0x228484, + 0x228486, + 0x37f306, + 0x25cb84, + 0x20eac5, + 0x21cfc8, + 0x220f87, + 0x2227c7, + 0x2227cf, + 0x28e846, + 0x309bc3, + 0x398184, + 0x233744, + 0x2101c3, + 0x227684, + 0x34fd84, + 0x5fe030c2, + 0x289a43, + 0x372e83, + 0x60207c02, + 0x25c503, + 0x322683, + 0x21578a, + 0x207c07, + 0x2534cc, + 0x253786, + 0x255246, + 0x256647, + 0x60632f47, + 0x25c889, + 0x22c504, + 0x25eb04, + 0x60a09f82, + 0x60e01282, + 0x2e23c6, + 0x32f304, + 0x2d3146, + 0x2333c8, + 0x239444, + 0x21b186, + 0x37c885, + 0x285048, + 0x205883, + 0x28a685, + 0x290cc3, + 0x3a26c3, + 0x3a26c4, + 0x212c03, + 0x6125fd82, + 0x61603a42, + 0x2e1149, + 0x299885, + 0x2a1084, + 0x2a4a45, + 0x212384, + 0x393607, + 0x353f05, + 0x2702c4, + 0x2702c8, + 0x2e61c6, + 0x2e9f84, + 0x2ede88, + 0x2f27c7, + 0x61a04042, + 0x316244, + 0x210284, + 0x224e87, + 0x61e04044, + 0x2c9002, + 0x6220ed42, + 0x221b83, + 0x2d37c4, + 0x29bb43, + 0x2aacc5, + 0x6262e642, + 0x2fddc5, + 0x23a382, + 0x390c85, + 0x372005, + 0x62a04e02, + 0x339b84, + 0x62e06a42, + 0x3aba46, + 0x3a7346, + 0x39d0c8, + 0x2be888, + 0x2f3a44, + 0x303685, + 0x316049, + 0x2eadc4, + 0x358084, + 0x2b6983, + 0x6320fd85, + 0x2c2547, + 0x21fc84, + 0x3ae54d, + 0x2f4682, + 0x3b35c3, + 0x2f4683, + 0x63601b42, + 0x396e45, + 0x223747, + 0x2b9604, + 0x394047, + 0x2c7989, + 0x2c36c9, + 0x275247, + 0x202bc3, + 0x3a7508, + 0x25b949, + 0x2f5487, + 0x2f5805, + 0x2f6706, + 0x2f6d46, + 0x2f6ec5, + 0x26a545, + 0x63a00d42, + 0x2b7685, + 0x2b3a08, + 0x2c08c6, + 0x63e872c7, + 0x2ec344, + 0x2b8047, + 0x2f9206, + 0x6420a402, + 0x35d5c6, + 0x2fc9ca, + 0x2fd245, + 0x646da942, + 0x64a4eb02, + 0x30ab06, + 0x386548, + 0x64e8a187, + 0x6523c902, + 0x215c43, + 0x20c246, + 0x229144, + 0x3b2f86, + 0x201746, + 0x34290a, + 0x325e45, + 0x3559c6, + 0x39ec43, + 0x39ec44, + 0x202c02, + 0x31c743, + 0x6560f042, + 0x30b743, + 0x38e544, + 0x2b2484, + 0x38668a, + 0x214c43, + 0x277208, + 0x250f0a, + 0x23f747, + 0x300986, + 0x260484, + 0x21c242, + 0x2a3702, + 0x65a02982, + 0x238b83, + 0x25ab47, + 0x202987, + 0x284d04, + 0x3a4f87, + 0x2e4e06, + 0x221287, + 0x303304, + 0x399d85, + 0x292705, + 0x65e1b2c2, + 0x3c50c6, + 0x223443, + 0x22a4c2, + 0x22a4c6, + 0x66222342, + 0x6660e982, + 0x3bb945, + 0x66a27882, + 0x66e01c42, + 0x334e85, + 0x2c51c5, + 0x355a85, + 0x289043, + 0x244185, + 0x2d9907, + 0x2feb45, + 0x370005, + 0x33d8c4, + 0x310806, + 0x381d44, + 0x67202a82, + 0x67ee7585, + 0x2a3ac7, + 0x34f408, + 0x261146, + 0x26114d, + 0x2674c9, + 0x2674d2, + 0x300585, + 0x309f43, + 0x6820c202, + 0x30ee44, + 0x39eb83, + 0x33b0c5, + 0x2fdf05, + 0x68630882, + 0x25f803, + 0x68a51b02, + 0x692d6142, + 0x69602242, + 0x2a1d45, + 0x394183, + 0x3c4f08, + 0x69a0ad42, + 0x69e0c842, + 0x26f446, + 0x317c4a, + 0x20e043, + 0x25d0c3, + 0x337d03, + 0x6aa04182, + 0x78e0c542, + 0x79600d82, + 0x206d02, + 0x35d3c9, + 0x2c18c4, + 0x2a9348, + 0x79af3dc2, + 0x79e01ac2, + 0x2ea5c5, + 0x238788, + 0x39e488, + 0x268b8c, + 0x23f683, + 0x7a263802, + 0x7a611d82, + 0x270d46, + 0x301805, + 0x2787c3, + 0x253c06, + 0x301946, + 0x29bc83, + 0x303b03, + 0x303f46, + 0x304b84, + 0x239a46, + 0x214a05, + 0x214a0a, + 0x24c1c4, + 0x305244, + 0x305b8a, + 0x7aa04982, + 0x24c345, + 0x30798a, + 0x308305, + 0x308bc4, + 0x308cc6, + 0x308e44, + 0x2198c6, + 0x7ae308c2, + 0x2f3446, + 0x341ac5, + 0x325cc7, + 0x3adf46, + 0x256844, + 0x2d2387, + 0x328446, + 0x241a05, + 0x241a07, + 0x3aed07, + 0x3aed0e, + 0x2ebb06, + 0x23ce05, + 0x203f87, + 0x20ff43, + 0x20ff47, + 0x217945, + 0x22f484, + 0x22f5c2, + 0x246087, + 0x2f9104, + 0x244dc4, + 0x290d4b, + 0x220003, + 0x2e58c7, + 0x220004, + 0x2e6047, + 0x2903c3, + 0x33ca0d, + 0x3998c8, + 0x2297c4, + 0x2701c5, + 0x30b205, + 0x30b643, + 0x7b228382, + 0x30d5c3, + 0x30da83, + 0x321c04, + 0x27ab85, + 0x3c5247, + 0x39ecc6, + 0x37c1c3, + 0x22a60b, + 0x30e04b, + 0x2a5c8b, + 0x2fa5cb, + 0x2bd60a, + 0x30548b, + 0x3245cb, + 0x360a8c, + 0x384f4b, + 0x3c4351, + 0x3c5d4a, + 0x30f5cb, + 0x30f88c, + 0x30fb8b, + 0x31010a, + 0x311bca, + 0x312bce, + 0x31324b, + 0x31350a, + 0x3145d1, + 0x314a0a, + 0x314f0b, + 0x31544e, + 0x315d8c, + 0x316b8b, + 0x316e4e, + 0x3171cc, + 0x318d4a, + 0x31a04c, + 0x7b71a34a, + 0x31af48, + 0x31ba49, + 0x32368a, + 0x32390a, + 0x323b8b, + 0x328b4e, + 0x328ed1, + 0x330349, + 0x33058a, + 0x330bcb, + 0x332a4a, + 0x333296, + 0x334b8b, + 0x33784a, + 0x33818a, + 0x33908b, + 0x33ab49, + 0x33d389, + 0x33de0d, + 0x33e48b, + 0x33f38b, + 0x33fd4b, + 0x343d49, + 0x34438e, + 0x34500a, + 0x34a4ca, + 0x34a7ca, + 0x34afcb, + 0x34b80b, + 0x34bacd, + 0x34d18d, + 0x34d7d0, + 0x34dc8b, + 0x34f9cc, + 0x34fe8b, + 0x351e0b, + 0x35344e, + 0x353a0b, + 0x353a0d, + 0x35964b, + 0x35a0cf, + 0x35a48b, + 0x35acca, + 0x35b3c9, + 0x35ba89, + 0x35cd4b, + 0x35d00e, + 0x35e88b, + 0x35f64f, + 0x36160b, + 0x3618cb, + 0x361b8b, + 0x36238a, + 0x367149, + 0x36a18f, + 0x36f54c, + 0x37038c, + 0x37108e, + 0x37158f, + 0x37194e, + 0x3722d0, + 0x3726cf, + 0x3731ce, + 0x373f8c, + 0x374292, + 0x375211, + 0x375a0e, + 0x375e8e, + 0x3763ce, + 0x37674f, + 0x376b0e, + 0x376e93, + 0x377351, + 0x37778c, + 0x377a8e, + 0x377f0c, + 0x378513, + 0x378ed0, + 0x37970c, + 0x379a0c, + 0x379ecb, + 0x37ac8e, + 0x37b18b, + 0x37b5cb, + 0x37ca4c, + 0x3825ca, + 0x38474c, + 0x384a4c, + 0x384d49, + 0x387e0b, + 0x3880c8, + 0x388889, + 0x38888f, + 0x38a08b, + 0x7bb8afca, + 0x38e8cc, + 0x38fa89, + 0x390a48, + 0x39100b, + 0x39158b, + 0x39220a, + 0x39248b, + 0x39298c, + 0x393d48, + 0x39a40b, + 0x39d80b, + 0x3a114e, + 0x3a27cb, + 0x3a410b, + 0x3ae88b, + 0x3aeb49, + 0x3af08d, + 0x3b368a, + 0x3b45d7, + 0x3b5cd8, + 0x3b9749, + 0x3bb58b, + 0x3bc1d4, + 0x3bc6cb, + 0x3bcc4a, + 0x3bd14a, + 0x3bd3cb, + 0x3bf610, + 0x3bfa11, + 0x3c00ca, + 0x3c394d, + 0x3c404d, + 0x3c61cb, + 0x3c6a06, + 0x3c51c3, + 0x7bf74a03, + 0x2dd1c6, + 0x245a05, + 0x252087, + 0x324486, + 0x1601182, + 0x2cbe89, + 0x31abc4, + 0x2d8988, + 0x220a83, + 0x30ed87, + 0x201c02, + 0x2acb03, + 0x7c200dc2, + 0x2c4946, + 0x2c5c84, + 0x21a604, + 0x349a43, + 0x349a45, + 0x7cac2742, + 0x7cea8044, + 0x273487, + 0x7d22f442, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x20ae43, + 0x200742, + 0xcd588, + 0x202c42, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x207c03, + 0x32eb56, + 0x356d13, + 0x3a4e09, + 0x38c988, + 0x340109, + 0x307b06, + 0x33e750, + 0x248c93, + 0x2f4ac8, + 0x2a5687, + 0x2b6f87, + 0x278c8a, + 0x38e5c9, + 0x342549, + 0x28b30b, + 0x31ca86, + 0x20850a, + 0x225f06, + 0x31a7c3, + 0x298445, + 0x207808, + 0x3abb0d, + 0x2ef1cc, + 0x35cac7, + 0x312f0d, + 0x21d0c4, + 0x23478a, + 0x23598a, + 0x235e4a, + 0x21fa07, + 0x243507, + 0x245fc4, + 0x27b986, + 0x3264c4, + 0x2e01c8, + 0x2ee049, + 0x2bfcc6, + 0x2bfcc8, + 0x24944d, + 0x2c3909, + 0x389ac8, + 0x3a2547, + 0x2f1c0a, + 0x2564c6, + 0x260fc7, + 0x306a04, + 0x214707, + 0x3105ca, + 0x378a0e, + 0x229985, + 0x3bfe0b, + 0x300389, + 0x267709, + 0x2b0587, + 0x3694ca, + 0x224dc7, + 0x2ff3c9, + 0x31e5c8, + 0x239e8b, + 0x2d8345, + 0x2309ca, + 0x227b09, + 0x3abe0a, + 0x2c64cb, + 0x21460b, + 0x28b095, + 0x306745, + 0x3a25c5, + 0x2e9d0a, + 0x2a6f0a, + 0x300107, + 0x2388c3, + 0x2e2348, + 0x2cf00a, + 0x228486, + 0x25b789, + 0x285048, + 0x2e9f84, + 0x29bb49, + 0x2be888, + 0x36c607, + 0x2e7586, + 0x2a3ac7, + 0x2ac6c7, + 0x2450c5, + 0x2297cc, + 0x2701c5, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x20be03, + 0x20ec83, + 0xae43, + 0x285603, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0xcd588, + 0x202c42, + 0x209d42, + 0x236082, + 0x201102, + 0x2013c2, + 0x2db482, + 0x460be03, + 0x237583, + 0x203d43, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x2d1206, + 0x20ec83, + 0x241d03, + 0x238843, + 0xcd588, + 0x323584, + 0x322dc7, + 0x34a403, + 0x2402c4, + 0x21b903, + 0x283cc3, + 0x30e843, + 0x15da87, + 0x1221c4, + 0x121b83, + 0xf45, + 0x200742, + 0xb6c03, + 0x5a02c42, + 0x1488d09, + 0x891cd, + 0x8950d, + 0x236082, + 0x6ff84, + 0xf89, + 0x200342, + 0x5f8d588, + 0xe9484, + 0xcd588, + 0x1426502, + 0x1508546, + 0x233603, + 0x2b8283, + 0x660be03, + 0x234784, + 0x6a37583, + 0x6f0e843, + 0x207d02, + 0x26ff84, + 0x20ec83, + 0x2fbbc3, + 0x2056c2, + 0x241d03, + 0x21c4c2, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0xcd588, + 0x233603, + 0x2fbbc3, + 0x2056c2, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0x20be03, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x204bc2, + 0x219543, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x2f5805, + 0x230882, + 0x200742, + 0xcd588, + 0x1455908, + 0x1367ca, + 0x30e843, + 0x200001, + 0x202081, + 0x200ec1, + 0x200f01, + 0x200f41, + 0x20d701, + 0x312181, + 0x203801, + 0x24b241, + 0x2021c1, + 0x200101, + 0x200301, + 0x117485, + 0xcd588, + 0x200781, + 0x2014c1, + 0x200041, + 0x200141, + 0x201401, + 0x200901, + 0x200541, + 0x200c01, + 0x200a81, + 0x200641, + 0x200081, + 0x2001c1, + 0x200341, + 0x201681, + 0x20ab41, + 0x2002c1, + 0x200a01, + 0x200401, + 0x200441, + 0x201ac1, + 0x203f81, + 0x20d601, + 0x201181, + 0x200dc1, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x200342, + 0x241d03, + 0x15da87, + 0x1f847, + 0x29546, + 0x4160a, + 0x88348, + 0x5a588, + 0x5aa47, + 0x86, + 0xd61c5, + 0x14a345, + 0x7dac6, + 0x157206, + 0x28b304, + 0x340547, + 0xcd588, + 0x2d2484, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x31a548, + 0x31e084, + 0x2374c4, + 0x22aa44, + 0x270c47, + 0x2cde07, + 0x20be03, + 0x23d74b, + 0x31b7ca, + 0x31cd47, + 0x2fc048, + 0x3255c8, + 0x237583, + 0x346c47, + 0x203d43, + 0x37c208, + 0x335049, + 0x26ff84, + 0x214bc3, + 0x2dce48, + 0x21f743, + 0x2cb6ca, + 0x2d1206, + 0x3a0547, + 0x20ec83, + 0x2da606, + 0x309308, + 0x241d03, + 0x28d806, + 0x2e394d, + 0x2e49c8, + 0x2ea0cb, + 0x331e86, + 0x348087, + 0x20f605, + 0x2ef98a, + 0x22bfc5, + 0x36210a, + 0x230882, + 0x203f83, + 0x244dc4, + 0x2021c6, + 0x3a7a03, + 0x2af043, + 0x24be43, + 0x23b003, + 0x349183, + 0x200582, + 0x2d7285, + 0x2a6589, + 0x245743, + 0x205583, + 0x202fc3, + 0x200301, + 0x2a1a85, + 0x39da83, + 0x2053c3, + 0x22aa44, + 0x326443, + 0x214948, + 0x2ec443, + 0x302e8d, + 0x2ebbc8, + 0x21ab46, + 0x31c783, + 0x378983, + 0x381cc3, + 0xaa0be03, + 0x236dc8, + 0x23d744, + 0x246d03, + 0x2022c6, + 0x249bc8, + 0x202e03, + 0x2ef9c3, + 0x2319c3, + 0x237583, + 0x21d8c3, + 0x21e903, + 0x21a303, + 0x31c703, + 0x2b25c3, + 0x225783, + 0x370645, + 0x256c04, + 0x258107, + 0x329982, + 0x25a303, + 0x25d486, + 0x25ed03, + 0x25f3c3, + 0x276543, + 0x202043, + 0x323283, + 0x269687, + 0xaf0e843, + 0x2363c3, + 0x2096c3, + 0x204d03, + 0x26ff83, + 0x2f3783, + 0x374ac5, + 0x363fc3, + 0x246889, + 0x20b0c3, + 0x2fe203, + 0xb2527c3, + 0x286d03, + 0x21cd08, + 0x2a64c6, + 0x200706, + 0x29aa46, + 0x27a5c7, + 0x200c83, + 0x20be83, + 0x21f743, + 0x288446, + 0x21a202, + 0x29ea43, + 0x32dd05, + 0x20ec83, + 0x2a2e47, + 0x160ae43, + 0x24e483, + 0x21fa83, + 0x225e03, + 0x241d03, + 0x212e46, + 0x31d286, + 0x36aa43, + 0x22ba83, + 0x219543, + 0x253743, + 0x303b83, + 0x2f0603, + 0x2f20c3, + 0x34f085, + 0x24f3c3, + 0x2d3246, + 0x23eb08, + 0x225b43, + 0x341789, + 0x33a308, + 0x2110c8, + 0x21a185, + 0x32a38a, + 0x35400a, + 0x37cd8b, + 0x37d408, + 0x2fb903, + 0x2f2103, + 0x33b1c3, + 0x366d88, + 0x2f4e83, + 0x39ec44, + 0x261983, + 0x202983, + 0x22d483, + 0x26fcc3, + 0x238843, + 0x230882, + 0x22d0c3, + 0x23f683, + 0x305403, + 0x3065c4, + 0x244dc4, + 0x3be143, + 0xcd588, + 0x200742, + 0x200602, + 0x200582, + 0x203402, + 0x2023c2, + 0x200782, + 0x238c02, + 0x201b02, + 0x202542, + 0x2000c2, + 0x225242, + 0x20d682, + 0x26cd82, + 0x206f02, + 0x2db482, + 0x20b182, + 0x201f82, + 0x2057c2, + 0x2f5f42, + 0x208102, + 0x200982, + 0x219e82, + 0x207bc2, + 0x207c02, + 0x201282, + 0x20fd82, + 0x201c42, + 0x742, + 0x602, + 0x582, + 0x3402, + 0x23c2, + 0x782, + 0x38c02, + 0x1b02, + 0x2542, + 0xc2, + 0x25242, + 0xd682, + 0x6cd82, + 0x6f02, + 0xdb482, + 0xb182, + 0x1f82, + 0x57c2, + 0xf5f42, + 0x8102, + 0x982, + 0x19e82, + 0x7bc2, + 0x7c02, + 0x1282, + 0xfd82, + 0x1c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3f82, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x241d03, + 0xc60be03, + 0x30e843, + 0x21f743, + 0xaff03, + 0x223b82, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0xaff03, + 0x241d03, + 0xdc2, + 0x142f49, + 0x202382, + 0x15bda05, + 0x2eaa02, + 0xcd588, + 0x2c42, + 0x23bfc2, + 0x200482, + 0x244482, + 0x21f682, + 0x253982, + 0x14a345, + 0x203082, + 0x2056c2, + 0x20c502, + 0x203042, + 0x20b182, + 0x392a82, + 0x20ed42, + 0x24eb42, + 0x15da87, + 0x120a8d, + 0xd6249, + 0x6898b, + 0xd97c8, + 0x60b89, + 0xfeec6, + 0x30e843, + 0xcd588, + 0x1221c4, + 0x121b83, + 0xf45, + 0xcd588, + 0x5b646, + 0xf89, + 0xab07, + 0x200742, + 0x28b304, + 0x202c42, + 0x20be03, + 0x209d42, + 0x237583, + 0x202542, + 0x2d2484, + 0x214bc3, + 0x254ac2, + 0x20ec83, + 0x200342, + 0x241d03, + 0x3a25c6, + 0x32414f, + 0x70ec03, + 0xcd588, + 0x202c42, + 0x203d43, + 0x30e843, + 0x21f743, + 0xae43, + 0x14ef74b, + 0x141650a, + 0x14eca47, + 0x78d4b, + 0xd7e45, + 0x15da87, + 0x202c42, + 0x20be03, + 0x30e843, + 0x20ec83, + 0x200742, + 0x211a42, + 0x209342, + 0xfe0be03, + 0x2442c2, + 0x237583, + 0x226d02, + 0x22ab02, + 0x30e843, + 0x25cd82, + 0x251942, + 0x2a8002, + 0x211742, + 0x28d302, + 0x2029c2, + 0x200902, + 0x2ebfc2, + 0x278142, + 0x25c982, + 0x2ad9c2, + 0x2fcdc2, + 0x223482, + 0x23d082, + 0x21f743, + 0x205a02, + 0x20ec83, + 0x211e82, + 0x2c9fc2, + 0x241d03, + 0x2457c2, + 0x207c02, + 0x209f82, + 0x203a42, + 0x204e02, + 0x2da942, + 0x21b2c2, + 0x251b02, + 0x2234c2, + 0x31350a, + 0x35acca, + 0x38bf0a, + 0x3c6b82, + 0x20f2c2, + 0x374a82, + 0x103358c9, + 0x1072f70a, + 0x14328c7, + 0x10a03fc2, + 0x1410983, + 0x3342, + 0x12f70a, + 0x253404, + 0x1120be03, + 0x237583, + 0x254a04, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x1aec5, + 0x20ae43, + 0x241d03, + 0x24f3c3, + 0x203f83, + 0xcd588, + 0x1400004, + 0x149845, + 0x142f49, + 0xa8ca, + 0x119fc2, + 0x19cbc6, + 0x187251, + 0x11b358c9, + 0x1498c8, + 0x1c1948, + 0x1fbc7, + 0x282, + 0x11748b, + 0x18c40a, + 0x844a, + 0x2aa47, + 0xcd588, + 0x10c788, + 0xd547, + 0x18419a4b, + 0x1c787, + 0x4c2, + 0x5e87, + 0x23a8a, + 0x1f8cf, + 0x8308f, + 0xefa02, + 0x2c42, + 0x173908, + 0xf698a, + 0x12b48, + 0x5fcc8, + 0xd3708, + 0x2e02, + 0x1bda8f, + 0x9dc8b, + 0x7e948, + 0x3c2c7, + 0x127c0a, + 0xf400b, + 0x78449, + 0x127b07, + 0x12a48, + 0x1541cc, + 0x3a347, + 0x17a28a, + 0x67008, + 0xf6f8e, + 0x2954e, + 0x2a88b, + 0x2e28b, + 0x30e8b, + 0x50b89, + 0xe32cb, + 0xeb5cd, + 0x17d18b, + 0x198c8d, + 0x19900d, + 0x3cc4a, + 0x44c0b, + 0x4638b, + 0x49ec5, + 0x18828e50, + 0x15770f, + 0x3b4cf, + 0xfb1cd, + 0x39610, + 0xa6c2, + 0x18e071c8, + 0x1f6c8, + 0x192ec405, + 0x5400b, + 0x12e350, + 0x59c88, + 0x12c4a, + 0x2e449, + 0x66007, + 0x66347, + 0x66507, + 0x66887, + 0x67347, + 0x67947, + 0x68187, + 0x68547, + 0x68e87, + 0x69187, + 0x69847, + 0x69a07, + 0x69bc7, + 0x69d87, + 0x6a087, + 0x6a687, + 0x6af47, + 0x6b707, + 0x6bcc7, + 0x6bf87, + 0x6c147, + 0x6c447, + 0x6cc47, + 0x6ce47, + 0x6dd87, + 0x6df47, + 0x6e107, + 0x6ebc7, + 0x6f0c7, + 0x6fd87, + 0x70687, + 0x71147, + 0x71647, + 0x71807, + 0x71c07, + 0x72447, + 0x726c7, + 0x72ac7, + 0x72c87, + 0x72e47, + 0x73287, + 0x73e87, + 0x743c7, + 0x74947, + 0x74b07, + 0x74e87, + 0x75407, + 0xd0c2, + 0x5fdca, + 0xdc547, + 0x84785, + 0xb3111, + 0x10ac6, + 0x10cc0a, + 0x17378a, + 0x5b646, + 0xcb0b, + 0x1402, + 0x34111, + 0xb29c9, + 0x948c9, + 0xebfc2, + 0x71e8a, + 0xa5a89, + 0xa61cf, + 0xa67ce, + 0xa7708, + 0x552c2, + 0x549, + 0x18b4ce, + 0xfc6cc, + 0xdbe0f, + 0x1a814e, + 0x1840c, + 0x25589, + 0x26751, + 0x2f988, + 0x1109d2, + 0x1115cd, + 0x1545cd, + 0x43f8b, + 0x4bad5, + 0x52c49, + 0x5438a, + 0x5ee89, + 0x6b310, + 0x7cc8b, + 0x85ecf, + 0xf0c0b, + 0x16130c, + 0x1b2610, + 0x9208a, + 0x9e90d, + 0x9fc4e, + 0xa9bca, + 0xab6cc, + 0xac394, + 0xb2651, + 0xbb04b, + 0xe1ecf, + 0xca50d, + 0x1a720e, + 0x16c4cc, + 0x18618c, + 0xb234b, + 0xb428e, + 0xb4d50, + 0xb584b, + 0xbaa8d, + 0xbb4cf, + 0xbef4c, + 0xbfb4e, + 0xc0411, + 0xdff4c, + 0x10d8c7, + 0xc738d, + 0xd000c, + 0xd65d0, + 0xdb80d, + 0x18acc7, + 0xe6310, + 0xf9348, + 0xfd44b, + 0x17d9cf, + 0x142188, + 0x10ce0d, + 0x190c10, + 0xf5f89, + 0x196af346, + 0xb0303, + 0xb5b05, + 0x9602, + 0x143709, + 0x5c40a, + 0x106606, + 0x2098a, + 0x1991f309, + 0x264c3, + 0xd2711, + 0xd2b49, + 0xd3ec7, + 0x1873cb, + 0xdae90, + 0xdb34c, + 0xdc2c8, + 0xdcc45, + 0x11e748, + 0x1afe8a, + 0x26587, + 0x140947, + 0x1382, + 0x12f04a, + 0x3b809, + 0x71505, + 0xa2cca, + 0x8a0cf, + 0x4794b, + 0x174b8c, + 0x1a252, + 0x9df05, + 0xdf0c8, + 0x13a60a, + 0x19ee8f05, + 0x17478c, + 0x12c443, + 0x192a82, + 0xf258a, + 0x14f2d8c, + 0x3a6c8, + 0x198e48, + 0x15da07, + 0x16f02, + 0xa02, + 0x49fd0, + 0x653c7, + 0x1282, + 0x333cf, + 0x7dac6, + 0x79a8e, + 0xdeb8b, + 0x6e308, + 0xa9dc9, + 0xf5012, + 0x18998d, + 0x1be608, + 0x68849, + 0x6a20d, + 0x6c5c9, + 0x6c98b, + 0x6e4c8, + 0x73c88, + 0x76248, + 0x79dc9, + 0x79fca, + 0x7b48c, + 0x17010a, + 0x103bc7, + 0x2fdcd, + 0xf7a8b, + 0x11a9cc, + 0x1979c8, + 0x4d3c9, + 0x13d8d0, + 0xc842, + 0x521cd, + 0x4182, + 0xc542, + 0x103b0a, + 0x10cb0a, + 0x10ec8b, + 0x4654c, + 0x10c28a, + 0x10c50e, + 0x121ccd, + 0xb6a08, + 0xdc2, + 0x11e0340e, + 0x1272184e, + 0x12f4960a, + 0x13742c0e, + 0x13f374ce, + 0x147ac40c, + 0x14328c7, + 0x14328c9, + 0x1410983, + 0x14eb784c, + 0x15727309, + 0x15f69bc9, + 0x1660a6c9, + 0x3342, + 0x3351, + 0x121791, + 0x14954d, + 0x142b51, + 0x137411, + 0x1ac34f, + 0xb778f, + 0x12724c, + 0x169b0c, + 0xa60c, + 0x1654cd, + 0x10e595, + 0x5a00c, + 0x1ba48c, + 0x138c90, + 0x155e8c, + 0x15dc0c, + 0x17a659, + 0x180a19, + 0x19f3d9, + 0x1b57d4, + 0x1bbcd4, + 0x3ed4, + 0x4ed4, + 0xb814, + 0x16e5a0c9, + 0x17404189, + 0x17fba549, + 0x1222fb89, + 0x3342, + 0x12a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1322fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x13a2fb89, + 0x3342, + 0x1422fb89, + 0x3342, + 0x14a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1522fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x15a2fb89, + 0x3342, + 0x1622fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x16a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1722fb89, + 0x3342, + 0x17a2fb89, + 0x3342, + 0x1822fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x187245, + 0x18c404, + 0x340e, + 0x12184e, + 0x14960a, + 0x142c0e, + 0x1374ce, + 0x1ac40c, + 0xb784c, + 0x127309, + 0x169bc9, + 0xa6c9, + 0x5a0c9, + 0x4189, + 0x1ba549, + 0x10e78d, + 0x5189, + 0xbac9, + 0x116a84, + 0x118c44, + 0x13aa44, + 0x18e7c4, + 0x79004, + 0x98884, + 0x477c4, + 0x143c44, + 0x1fbc4, + 0x157cd03, + 0xa6c2, + 0x121cc3, + 0x2e02, + 0x200742, + 0x202c42, + 0x209d42, + 0x208882, + 0x202542, + 0x200342, + 0x200a02, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff83, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0x1a9c3, + 0x30e843, + 0x6ff84, + 0x200742, + 0x2b6c03, + 0x1be0be03, + 0x2394c7, + 0x30e843, + 0x20f003, + 0x226444, + 0x20ec83, + 0x241d03, + 0x22d50a, + 0x3a25c5, + 0x219543, + 0x20e982, + 0xcd588, + 0xcd588, + 0x2c42, + 0x129242, + 0x1c74660b, + 0x5fc5, + 0x1f8c5, + 0xf9fc6, + 0x1221c4, + 0x121b83, + 0xf45, + 0x117485, + 0xcd588, + 0x1c787, + 0xbe03, + 0x1ce41447, + 0x143146, + 0x1d149445, + 0x143207, + 0xf84a, + 0xf708, + 0x13407, + 0x68348, + 0x98647, + 0xf28f, + 0x47f87, + 0x4e786, + 0x12e350, + 0x12cf0f, + 0x1c009, + 0x106684, + 0x1d5432ce, + 0xa978c, + 0xf420a, + 0x785c7, + 0xd9f8a, + 0x11e909, + 0xada0c, + 0x1bdf0a, + 0x5cc0a, + 0xf89, + 0x106606, + 0x7868a, + 0x11d84a, + 0x9a209, + 0xd1fc8, + 0xd22c6, + 0xd6c0d, + 0xb7cc5, + 0xab07, + 0xfb709, + 0x1a3207, + 0x10bd94, + 0xfdb4b, + 0x7e78a, + 0xa358d, + 0xf283, + 0xf283, + 0x29546, + 0xf283, + 0xb6c03, + 0xcd588, + 0x2c42, + 0x54a04, + 0x5da83, + 0xf5805, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x205583, + 0x20be03, + 0x237583, + 0x203d43, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x294a83, + 0x203f83, + 0x205583, + 0x28b304, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x235cc3, + 0x20be03, + 0x237583, + 0x208883, + 0x203d43, + 0x30e843, + 0x26ff84, + 0x3c32c3, + 0x20be83, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x219543, + 0x20c283, + 0x1f20be03, + 0x237583, + 0x24e683, + 0x30e843, + 0x211343, + 0x20be83, + 0x241d03, + 0x2057c3, + 0x317f04, + 0xcd588, + 0x1fa0be03, + 0x237583, + 0x2a77c3, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x232f43, + 0xcd588, + 0x2020be03, + 0x237583, + 0x203d43, + 0x20ae43, + 0x241d03, + 0xcd588, + 0x14328c7, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x226444, + 0x20ec83, + 0x241d03, + 0x142f49, + 0x117485, + 0x15da87, + 0x10bfcb, + 0xd2f44, + 0xb7cc5, + 0x1455908, + 0xa7e0d, + 0x21676405, + 0x8f204, + 0x10ec3, + 0xf5e85, + 0x31cc45, + 0xcd588, + 0xf282, + 0x3a283, + 0xefec6, + 0x31b0c8, + 0x397247, + 0x28b304, + 0x346046, + 0x3699c6, + 0xcd588, + 0x312ec3, + 0x23aec9, + 0x265555, + 0x6555f, + 0x20be03, + 0x3ba052, + 0x10db06, + 0x14fc85, + 0x12c4a, + 0x2e449, + 0x3b9e0f, + 0x2d2484, + 0x225285, + 0x2fdfd0, + 0x38cb87, + 0x20ae43, + 0x310f08, + 0x157146, + 0x2a47ca, + 0x22d244, + 0x2e8943, + 0x3a25c6, + 0x20e982, + 0x3987cb, + 0xae43, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2f0ec3, + 0x202c42, + 0xee203, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20f003, + 0x227b03, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x20ec83, + 0xae43, + 0x241d03, + 0x200742, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x1f8c5, + 0x28b304, + 0x20be03, + 0x237583, + 0x21a484, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x20be03, + 0x237583, + 0x203d43, + 0x204d03, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x357d43, + 0x3cf83, + 0xf003, + 0x20ec83, + 0x241d03, + 0x31350a, + 0x333049, + 0x3524cb, + 0x352b4a, + 0x35acca, + 0x3686cb, + 0x37bfca, + 0x3825ca, + 0x38bf0a, + 0x38c18b, + 0x3afbc9, + 0x3b1a0a, + 0x3b1d8b, + 0x3bc98b, + 0x3c5b0a, + 0x20be03, + 0x237583, + 0x203d43, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x18754b, + 0x60308, + 0x14f209, + 0xcd588, + 0x20be03, + 0x266004, + 0x206302, + 0x226444, + 0x201485, + 0x205583, + 0x28b304, + 0x20be03, + 0x23d744, + 0x237583, + 0x254a04, + 0x2d2484, + 0x26ff84, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x252385, + 0x235cc3, + 0x219543, + 0x2b5d83, + 0x2702c4, + 0x2020c4, + 0x3c0885, + 0xcd588, + 0x320f04, + 0x39db46, + 0x2010c4, + 0x202c42, + 0x38fd87, + 0x256087, + 0x252944, + 0x25e5c5, + 0x2dba05, + 0x232585, + 0x26ff84, + 0x27a688, + 0x23c806, + 0x3c5f88, + 0x278185, + 0x2d8345, + 0x251304, + 0x241d03, + 0x2e9484, + 0x367486, + 0x3a26c3, + 0x2702c4, + 0x362205, + 0x26e984, + 0x23fd84, + 0x20e982, + 0x397746, + 0x3a4b06, + 0x301805, + 0x200742, + 0x2b6c03, + 0x27e02c42, + 0x207344, + 0x202542, + 0x21f743, + 0x23a3c2, + 0x20ec83, + 0x200342, + 0x207c03, + 0x203f83, + 0xcd588, + 0xcd588, + 0x30e843, + 0x200742, + 0x28a02c42, + 0x30e843, + 0x2574c3, + 0x3c32c3, + 0x2168c4, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x200742, + 0x29202c42, + 0x20be03, + 0x20ec83, + 0xae43, + 0x241d03, + 0x982, + 0x20c202, + 0x230882, + 0x20f003, + 0x2e2d83, + 0x200742, + 0x117485, + 0xcd588, + 0x15da87, + 0x202c42, + 0x237583, + 0x254a04, + 0x206c03, + 0x30e843, + 0x204d03, + 0x21f743, + 0x20ec83, + 0x207783, + 0x241d03, + 0x2388c3, + 0xb5cd3, + 0x1b9994, + 0x15da87, + 0x102dc6, + 0x5c60b, + 0x29546, + 0x5a3c7, + 0x2809, + 0x195d4a, + 0x8820d, + 0x12078c, + 0x104fca, + 0x14a345, + 0xf888, + 0x7dac6, + 0x6ff06, + 0x157206, + 0x20a6c2, + 0x1c170c, + 0x18c5c7, + 0x282d1, + 0x20be03, + 0x682c5, + 0x8808, + 0x22644, + 0x2a507646, + 0xb3106, + 0xd95c6, + 0x8d5ca, + 0x19dac3, + 0x2aa48c44, + 0x27c5, + 0x15cc83, + 0x2ae38a07, + 0x1aec5, + 0xcbcc, + 0xed348, + 0x6f6cb, + 0x2b25168c, + 0x140d6c3, + 0xb8888, + 0x9db09, + 0x3ff48, + 0x14208c6, + 0x2b76d609, + 0xd7e4a, + 0x10d08, + 0xf9fc8, + 0x1fbc4, + 0x118b45, + 0x6f807, + 0x2ba6f803, + 0x2bf39c86, + 0x2c2e9d04, + 0x2c790207, + 0xf9fc4, + 0xf9fc4, + 0xf9fc4, + 0xf9fc4, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x200742, + 0x202c42, + 0x30e843, + 0x207d02, + 0x20ec83, + 0x241d03, + 0x207c03, + 0x37158f, + 0x37194e, + 0xcd588, + 0x20be03, + 0x49a07, + 0x237583, + 0x30e843, + 0x214bc3, + 0x20ec83, + 0x241d03, + 0x220443, + 0x322887, + 0x203d02, + 0x292889, + 0x200602, + 0x24a2cb, + 0x2cf44a, + 0x28d009, + 0x200182, + 0x3418c6, + 0x235295, + 0x24a415, + 0x236793, + 0x24a993, + 0x203942, + 0x222dc5, + 0x3ab48c, + 0x27410b, + 0x2a2205, + 0x203402, + 0x2f2542, + 0x37e706, + 0x200282, + 0x261bc6, + 0x212ecd, + 0x21ac4c, + 0x228ec4, + 0x200cc2, + 0x2149c2, + 0x310d88, + 0x2023c2, + 0x211446, + 0x35c704, + 0x235455, + 0x236913, + 0x2108c3, + 0x32508a, + 0x20df47, + 0x30eec9, + 0x2d9d07, + 0x314902, + 0x200882, + 0x3b4b46, + 0x2099c2, + 0xcd588, + 0x210702, + 0x200302, + 0x217a07, + 0x336087, + 0x21c485, + 0x2004c2, + 0x2da6c7, + 0x220488, + 0x204002, + 0x2f21c2, + 0x230502, + 0x203cc2, + 0x23e988, + 0x20bf83, + 0x25dc48, + 0x20bf8d, + 0x237c03, + 0x23bc48, + 0x237c0f, + 0x237fce, + 0x38feca, + 0x2d1311, + 0x2d1790, + 0x38360d, + 0x38394c, + 0x3452c7, + 0x325207, + 0x346109, + 0x228fc2, + 0x200782, + 0x25becc, + 0x25c1cb, + 0x203c02, + 0x2b2506, + 0x2010c2, + 0x202642, + 0x2efa02, + 0x202c42, + 0x231fc4, + 0x240647, + 0x230a42, + 0x245207, + 0x2475c7, + 0x21bc02, + 0x21b282, + 0x2498c5, + 0x204ac2, + 0x2e72ce, + 0x2a384d, + 0x237583, + 0x28400e, + 0x3b868d, + 0x348003, + 0x202ec2, + 0x2817c4, + 0x238c42, + 0x202e82, + 0x372a45, + 0x37b407, + 0x24d902, + 0x208882, + 0x254607, + 0x257688, + 0x329982, + 0x29df86, + 0x25bd4c, + 0x25c08b, + 0x212c42, + 0x26208f, + 0x262450, + 0x26284f, + 0x262c15, + 0x263154, + 0x26364e, + 0x2639ce, + 0x263d4f, + 0x26410e, + 0x264494, + 0x264993, + 0x264e4d, + 0x2755c9, + 0x289843, + 0x201802, + 0x215f45, + 0x206c06, + 0x202542, + 0x344e47, + 0x30e843, + 0x201402, + 0x36dfc8, + 0x2d1551, + 0x2d1990, + 0x200c42, + 0x270f87, + 0x205842, + 0x341287, + 0x209602, + 0x348f89, + 0x37e6c7, + 0x2a4b48, + 0x307486, + 0x2e2c83, + 0x326e05, + 0x20e402, + 0x202682, + 0x3b4f45, + 0x3c1485, + 0x200f82, + 0x214d03, + 0x26ea07, + 0x208007, + 0x2085c2, + 0x22e684, + 0x20b4c3, + 0x20b4c9, + 0x20f108, + 0x201542, + 0x204482, + 0x2e3547, + 0x33d705, + 0x293988, + 0x222a87, + 0x201cc3, + 0x298106, + 0x38348d, + 0x38380c, + 0x2e0646, + 0x200482, + 0x26f582, + 0x201e42, + 0x237a8f, + 0x237e8e, + 0x2dba87, + 0x200b82, + 0x3517c5, + 0x3517c6, + 0x203282, + 0x205a02, + 0x28ad86, + 0x292ac3, + 0x3411c6, + 0x2c3ec5, + 0x2c3ecd, + 0x2c4495, + 0x2c4e8c, + 0x2c59cd, + 0x2c5d92, + 0x20d682, + 0x26cd82, + 0x2047c2, + 0x21ce86, + 0x2fc586, + 0x201382, + 0x206c86, + 0x20c502, + 0x20d245, + 0x2013c2, + 0x2a3949, + 0x21d70c, + 0x21da4b, + 0x200342, + 0x258508, + 0x20cb42, + 0x206f02, + 0x271946, + 0x22fb05, + 0x31f507, + 0x250d85, + 0x2982c5, + 0x249a82, + 0x204c02, + 0x20b182, + 0x2dc107, + 0x24f4cd, + 0x24f84c, + 0x34f687, + 0x22bac2, + 0x201f82, + 0x23d488, + 0x343888, + 0x303d48, + 0x30cdc4, + 0x2b4507, + 0x2e3c83, + 0x24e882, + 0x204882, + 0x2e6b09, + 0x2f7387, + 0x2057c2, + 0x271d45, + 0x248602, + 0x209942, + 0x2bca43, + 0x2bca46, + 0x2f0602, + 0x2f1e82, + 0x201442, + 0x3b33c6, + 0x3454c7, + 0x205e42, + 0x200382, + 0x25da8f, + 0x283e4d, + 0x38b8ce, + 0x3b850c, + 0x2017c2, + 0x200502, + 0x3072c5, + 0x311d86, + 0x209002, + 0x208102, + 0x200982, + 0x222a04, + 0x2dcdc4, + 0x3c23c6, + 0x200a02, + 0x2b7307, + 0x231d03, + 0x231d08, + 0x2326c8, + 0x243e07, + 0x2ecbc6, + 0x204042, + 0x23e683, + 0x23e687, + 0x28a8c6, + 0x2f3045, + 0x30d148, + 0x206a42, + 0x341687, + 0x20fd82, + 0x2f4682, + 0x20c142, + 0x2f1149, + 0x20a402, + 0x201742, + 0x24adc3, + 0x325ec7, + 0x2040c2, + 0x21d88c, + 0x21db8b, + 0x2e06c6, + 0x35cbc5, + 0x227882, + 0x201c42, + 0x2ba046, + 0x22e983, + 0x331547, + 0x20cb82, + 0x202a82, + 0x235115, + 0x24a5d5, + 0x236653, + 0x24ab13, + 0x25d207, + 0x274548, + 0x274550, + 0x28744f, + 0x373ad3, + 0x28cdd2, + 0x292450, + 0x2b350f, + 0x2fd6d2, + 0x3af491, + 0x2af493, + 0x3938d2, + 0x2c3b0f, + 0x2cd74e, + 0x2cf252, + 0x2d09d1, + 0x2d3b0f, + 0x2d528e, + 0x2dc811, + 0x2dd7d0, + 0x2ed512, + 0x2f0f51, + 0x2f2206, + 0x2f3907, + 0x38e407, + 0x200d02, + 0x27efc5, + 0x3713c7, + 0x230882, + 0x20f6c2, + 0x22d0c5, + 0x200443, + 0x200446, + 0x24f68d, + 0x24f9cc, + 0x206d02, + 0x3ab30b, + 0x273fca, + 0x22358a, + 0x2b9489, + 0x2e530b, + 0x222bcd, + 0x2fe44c, + 0x2ec88a, + 0x27500c, + 0x294d4b, + 0x2a204c, + 0x2f968b, + 0x2b9e83, + 0x2f4f06, + 0x3b9942, + 0x2f3dc2, + 0x20e343, + 0x201ac2, + 0x207203, + 0x24ec86, + 0x262dc7, + 0x2ad706, + 0x2f6b48, + 0x343588, + 0x2ca146, + 0x211d82, + 0x3011cd, + 0x30150c, + 0x2d2547, + 0x304e07, + 0x23c242, + 0x219742, + 0x23e602, + 0x257a42, + 0x202c42, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x207c03, + 0x200742, + 0x201482, + 0x2e68ecc5, + 0x2ea8e4c5, + 0x2efb3086, + 0xcd588, + 0x2f2afb05, + 0x202c42, + 0x209d42, + 0x2f726285, + 0x2fa7cb85, + 0x2fe7d647, + 0x302867c9, + 0x30667d84, + 0x202542, + 0x201402, + 0x30b0dec5, + 0x30e95f49, + 0x31327988, + 0x316ac205, + 0x31af0707, + 0x31e21cc8, + 0x322def85, + 0x3266d246, + 0x32b6d849, + 0x32ed4ec8, + 0x332bf988, + 0x3369658a, + 0x33a75e04, + 0x33f7c545, + 0x342bc308, + 0x34727e05, + 0x217f42, + 0x34a061c3, + 0x34ea2606, + 0x35311408, + 0x356eee46, + 0x35b643c8, + 0x35eb6306, + 0x363c2f44, + 0x2032c2, + 0x366f1587, + 0x36aa8644, + 0x36e77e87, + 0x3723ecc7, + 0x200342, + 0x3769ae85, + 0x37a403c4, + 0x37ee1787, + 0x383a3387, + 0x38681606, + 0x38a7d205, + 0x38e96047, + 0x392e5a48, + 0x396162c7, + 0x39b94949, + 0x39ec51c5, + 0x3a2b4107, + 0x3a68e306, + 0x3aa941c8, + 0x227d8d, + 0x251989, + 0x272fcb, + 0x27ac8b, + 0x2a78cb, + 0x2da98b, + 0x311f8b, + 0x31224b, + 0x312889, + 0x31378b, + 0x313a4b, + 0x313fcb, + 0x314c8a, + 0x3151ca, + 0x3157cc, + 0x31938b, + 0x319dca, + 0x33080a, + 0x33b28e, + 0x33be8e, + 0x33c20a, + 0x33e14a, + 0x33eb4b, + 0x33ee0b, + 0x33fa8b, + 0x35edcb, + 0x35f3ca, + 0x36008b, + 0x36034a, + 0x3605ca, + 0x36084a, + 0x37d74b, + 0x3856cb, + 0x388f8e, + 0x38930b, + 0x391f4b, + 0x392e0b, + 0x39a6ca, + 0x39a949, + 0x39ab8a, + 0x39c6ca, + 0x3b06cb, + 0x3b204b, + 0x3b2a0a, + 0x3b390b, + 0x3b904b, + 0x3c554b, + 0x3ae7fd48, + 0x3b287989, + 0x3b69d989, + 0x3bad8988, + 0x34c805, + 0x200583, + 0x22a3c4, + 0x217c05, + 0x267ac6, + 0x26cfc5, + 0x286284, + 0x344d48, + 0x30b505, + 0x290604, + 0x2064c7, + 0x29cf0a, + 0x266b4a, + 0x2dbb87, + 0x20c4c7, + 0x2fd2c7, + 0x282187, + 0x2f8c45, + 0x3b6e46, + 0x386007, + 0x244e44, + 0x2df546, + 0x2df446, + 0x204745, + 0x3389c4, + 0x2975c6, + 0x29bfc7, + 0x22df06, + 0x27c8c7, + 0x250803, + 0x3912c6, + 0x234f05, + 0x27d747, + 0x26a84a, + 0x26e7c4, + 0x21bd88, + 0x2b8a49, + 0x2e0d07, + 0x319c46, + 0x258788, + 0x2ef589, + 0x30f084, + 0x33a484, + 0x29ef05, + 0x2ba648, + 0x2c2807, + 0x2b3e49, + 0x22dc08, + 0x2f2306, + 0x310806, + 0x297f88, + 0x362bc6, + 0x28e4c5, + 0x2816c6, + 0x278988, + 0x237986, + 0x25af0b, + 0x2c7c06, + 0x299b0d, + 0x369405, + 0x2a8506, + 0x21f085, + 0x331b49, + 0x3a6cc7, + 0x318308, + 0x2a1e46, + 0x298d89, + 0x33ffc6, + 0x26a7c5, + 0x24c486, + 0x288b86, + 0x2c6e49, + 0x31e2c6, + 0x29cc07, + 0x245e85, + 0x203983, + 0x25b085, + 0x299dc7, + 0x3ab746, + 0x369309, + 0x3b3086, + 0x26b146, + 0x213fc9, + 0x2810c9, + 0x29fac7, + 0x200908, + 0x2b2f49, + 0x27ec48, + 0x330a46, + 0x2d1d85, + 0x240c8a, + 0x26b1c6, + 0x239346, + 0x2cac05, + 0x2d4888, + 0x22b287, + 0x233f0a, + 0x254f86, + 0x251dc5, + 0x3324c6, + 0x224507, + 0x319b07, + 0x2835c5, + 0x26a985, + 0x395a06, + 0x3b8c06, + 0x2fa846, + 0x2bc7c4, + 0x280449, + 0x288806, + 0x2c814a, + 0x227248, + 0x36fd08, + 0x266b4a, + 0x212505, + 0x29bf05, + 0x2dd048, + 0x2c9688, + 0x233907, + 0x2ba946, + 0x32bf88, + 0x309507, + 0x27f348, + 0x2b5706, + 0x281e48, + 0x295586, + 0x278307, + 0x33a206, + 0x2975c6, + 0x22ecca, + 0x232046, + 0x2d1d89, + 0x2ee586, + 0x35c00a, + 0x3c2f49, + 0x27dd86, + 0x2b8304, + 0x21600d, + 0x287c07, + 0x239c06, + 0x2bf845, + 0x340045, + 0x37f306, + 0x2e15c9, + 0x2d4407, + 0x279406, + 0x306886, + 0x286309, + 0x2a3204, + 0x242544, + 0x3c2a88, + 0x24f046, + 0x271348, + 0x2e8008, + 0x29f447, + 0x3b6589, + 0x2faa47, + 0x2af9ca, + 0x2e79cf, + 0x31194a, + 0x3070c5, + 0x278bc5, + 0x218b05, + 0x35c647, + 0x2240c3, + 0x200b08, + 0x21e646, + 0x21e749, + 0x2d8646, + 0x2c8547, + 0x298b49, + 0x318208, + 0x2cacc7, + 0x30eb43, + 0x34c885, + 0x224045, + 0x2bc60b, + 0x327ec4, + 0x2d6884, + 0x276bc6, + 0x30f407, + 0x38f4ca, + 0x206247, + 0x20c347, + 0x27cb85, + 0x3c6485, + 0x282609, + 0x2975c6, + 0x2060cd, + 0x31e505, + 0x2b18c3, + 0x20b003, + 0x3a4d45, + 0x351305, + 0x258788, + 0x27a347, + 0x2422c6, + 0x29d606, + 0x22de05, + 0x237847, + 0x3c1d47, + 0x23c6c7, + 0x37c5ca, + 0x391388, + 0x2bc7c4, + 0x257bc7, + 0x27bb07, + 0x33f086, + 0x2692c7, + 0x2a1808, + 0x395f08, + 0x329b06, + 0x20c708, + 0x2cfbc4, + 0x386006, + 0x370d86, + 0x36bd46, + 0x277806, + 0x29b244, + 0x282246, + 0x2be246, + 0x297986, + 0x2060c6, + 0x20aec6, + 0x2a1646, + 0x2421c8, + 0x385a88, + 0x2cdac8, + 0x26d1c8, + 0x2dcfc6, + 0x212305, + 0x39e746, + 0x2ac285, + 0x396f87, + 0x22dcc5, + 0x213c03, + 0x38e045, + 0x33dd04, + 0x20b005, + 0x247643, + 0x33c4c7, + 0x30d708, + 0x27c986, + 0x2c930d, + 0x278b86, + 0x296f45, + 0x222083, + 0x2bbcc9, + 0x2a3386, + 0x291706, + 0x271e04, + 0x3118c7, + 0x23a1c6, + 0x2d46c5, + 0x21af83, + 0x3be4c4, + 0x27bcc6, + 0x3b6f44, + 0x370e88, + 0x3459c9, + 0x2317c9, + 0x29ed0a, + 0x2a05cd, + 0x2118c7, + 0x2391c6, + 0x20f984, + 0x2867c9, + 0x284ac8, + 0x287806, + 0x241906, + 0x2692c7, + 0x2d9346, + 0x22a046, + 0x347086, + 0x23ed4a, + 0x221cc8, + 0x22f885, + 0x2a2fc9, + 0x27f84a, + 0x2ff648, + 0x29b6c8, + 0x291688, + 0x29d24c, + 0x3124c5, + 0x29d888, + 0x385d86, + 0x24c9c6, + 0x35eb07, + 0x206145, + 0x281845, + 0x231689, + 0x2139c7, + 0x21e705, + 0x22aec7, + 0x20b003, + 0x2c2d45, + 0x2151c8, + 0x280d47, + 0x29b589, + 0x2e9f85, + 0x33e384, + 0x2a0288, + 0x2f16c7, + 0x2cae88, + 0x3aac88, + 0x2e1dc5, + 0x21e546, + 0x29d706, + 0x3a7009, + 0x2cb3c7, + 0x2ac8c6, + 0x206e87, + 0x239fc3, + 0x267d84, + 0x2cfcc5, + 0x2f3f84, + 0x246804, + 0x27ffc7, + 0x340d87, + 0x26dc84, + 0x29b3d0, + 0x31d507, + 0x3c6485, + 0x2561cc, + 0x224a04, + 0x2c4c88, + 0x278209, + 0x375886, + 0x240088, + 0x21ca84, + 0x276ec8, + 0x234506, + 0x22eb48, + 0x29a086, + 0x28854b, + 0x38ddc5, + 0x2cfb48, + 0x2173c4, + 0x345e0a, + 0x29b589, + 0x33a106, + 0x218bc8, + 0x25ed85, + 0x31dec4, + 0x2c4b86, + 0x23c588, + 0x27fd48, + 0x32c806, + 0x3c2344, + 0x240c06, + 0x2faac7, + 0x277d87, + 0x2692cf, + 0x205847, + 0x27de47, + 0x351685, + 0x35e345, + 0x29f789, + 0x382e46, + 0x27d885, + 0x2813c7, + 0x3934c8, + 0x2c7645, + 0x33a206, + 0x227088, + 0x2eee4a, + 0x3bf088, + 0x28ab07, + 0x2e7e06, + 0x2a2f86, + 0x202583, + 0x20de03, + 0x27fa09, + 0x2b2dc9, + 0x2c4a86, + 0x2e9f85, + 0x36bac8, + 0x218bc8, + 0x362d48, + 0x34710b, + 0x2c9547, + 0x309149, + 0x269548, + 0x350284, + 0x318648, + 0x28c889, + 0x2acbc5, + 0x35c547, + 0x267e05, + 0x27fc48, + 0x28eb4b, + 0x295d90, + 0x2a8145, + 0x21730c, + 0x242485, + 0x27cc03, + 0x2b1d06, + 0x2bd884, + 0x2404c6, + 0x29bfc7, + 0x227104, + 0x248688, + 0x2009cd, + 0x2dfc05, + 0x211904, + 0x28f244, + 0x28f249, + 0x2ae548, + 0x31bc47, + 0x234588, + 0x280508, + 0x279705, + 0x21f2c7, + 0x279687, + 0x23ac87, + 0x26a989, + 0x346dc9, + 0x272146, + 0x383b46, + 0x269506, + 0x33b6c5, + 0x3aa4c4, + 0x3bd646, + 0x3c4c46, + 0x279748, + 0x2241cb, + 0x26e687, + 0x20f984, + 0x23a106, + 0x2a1b47, + 0x335405, + 0x3583c5, + 0x223884, + 0x346d46, + 0x3bd6c8, + 0x2867c9, + 0x2091c6, + 0x2848c8, + 0x2d4786, + 0x350908, + 0x2ce58c, + 0x2795c6, + 0x296c0d, + 0x29708b, + 0x29ccc5, + 0x3c1e87, + 0x31e3c6, + 0x3199c8, + 0x2721c9, + 0x329dc8, + 0x3c6485, + 0x208947, + 0x27ed48, + 0x24ff89, + 0x2a5586, + 0x24da8a, + 0x319748, + 0x329c0b, + 0x2ccd8c, + 0x276fc8, + 0x27b286, + 0x21dfc8, + 0x2eeac7, + 0x205989, + 0x2f084d, + 0x2974c6, + 0x31dd48, + 0x385949, + 0x2bc8c8, + 0x281f48, + 0x2bec8c, + 0x2bff87, + 0x2c0a47, + 0x26a7c5, + 0x2b4807, + 0x393388, + 0x2c4c06, + 0x20904c, + 0x2ec1c8, + 0x2c8c48, + 0x250286, + 0x223dc7, + 0x272344, + 0x26d1c8, + 0x2b6d0c, + 0x28430c, + 0x307145, + 0x2047c7, + 0x3c22c6, + 0x223d46, + 0x331d08, + 0x367784, + 0x22df0b, + 0x2b744b, + 0x2e7e06, + 0x200847, + 0x322385, + 0x271285, + 0x22e046, + 0x25ed45, + 0x327e85, + 0x2c6c87, + 0x270a09, + 0x3b8dc4, + 0x25f405, + 0x2de045, + 0x2add08, + 0x2da405, + 0x287109, + 0x2c9ac7, + 0x2c9acb, + 0x24fbc6, + 0x241f09, + 0x338908, + 0x291f85, + 0x23ad88, + 0x346e08, + 0x2570c7, + 0x208e47, + 0x280049, + 0x22ea87, + 0x2aa389, + 0x2b7dcc, + 0x394848, + 0x2d4d09, + 0x2d6447, + 0x2805c9, + 0x340ec7, + 0x2cce88, + 0x3b6745, + 0x385f86, + 0x2bf888, + 0x30d3c8, + 0x27f709, + 0x327ec7, + 0x256d85, + 0x2301c9, + 0x201c46, + 0x28e304, + 0x326006, + 0x311288, + 0x328747, + 0x2243c8, + 0x20c7c9, + 0x325b87, + 0x29d0c6, + 0x3c1f44, + 0x38e0c9, + 0x21f148, + 0x250147, + 0x2adf86, + 0x224106, + 0x2392c4, + 0x26d846, + 0x20af83, + 0x38d949, + 0x38dd86, + 0x20ca45, + 0x29d606, + 0x2c7205, + 0x27f1c8, + 0x2ee987, + 0x2eb146, + 0x3262c6, + 0x36fd08, + 0x29f907, + 0x297505, + 0x29b1c8, + 0x3b2448, + 0x319748, + 0x242345, + 0x386006, + 0x231589, + 0x3a6e84, + 0x2c708b, + 0x229d4b, + 0x22f789, + 0x20b003, + 0x25cf45, + 0x22abc6, + 0x242cc8, + 0x34e904, + 0x27c986, + 0x37c709, + 0x2f0405, + 0x2c6bc6, + 0x2f16c6, + 0x20c984, + 0x2a86ca, + 0x20c988, + 0x30d3c6, + 0x2934c5, + 0x331287, + 0x351547, + 0x21e544, + 0x229f87, + 0x22dc84, + 0x22dc86, + 0x200b43, + 0x26a985, + 0x37dc85, + 0x364648, + 0x257d85, + 0x279309, + 0x26d007, + 0x26d00b, + 0x2a240c, + 0x2a2a0a, + 0x2f0707, + 0x205c83, + 0x2ebcc8, + 0x242505, + 0x2c76c5, + 0x34c944, + 0x2ccd86, + 0x278206, + 0x26d887, + 0x23f8cb, + 0x29b244, + 0x2d7404, + 0x2c2784, + 0x2c6986, + 0x227104, + 0x2ba748, + 0x34c745, + 0x258a45, + 0x362c87, + 0x3c1f89, + 0x351305, + 0x37f30a, + 0x245d89, + 0x2d698a, + 0x23ee89, + 0x3a5104, + 0x306945, + 0x2d9448, + 0x2e184b, + 0x29ef05, + 0x2f3206, + 0x247684, + 0x279846, + 0x325a09, + 0x2a1c47, + 0x3b3248, + 0x2a0946, + 0x2faa47, + 0x27fd48, + 0x37f886, + 0x334f84, + 0x371c87, + 0x361205, + 0x373507, + 0x21c984, + 0x31e346, + 0x2e5bc8, + 0x297248, + 0x2e44c7, + 0x24e388, + 0x295645, + 0x20ae44, + 0x266a48, + 0x24e484, + 0x208e45, + 0x2f8e44, + 0x309607, + 0x2888c7, + 0x280708, + 0x2cb006, + 0x257d05, + 0x279108, + 0x3bf288, + 0x29ec49, + 0x22a046, + 0x233f88, + 0x345c8a, + 0x335488, + 0x2def85, + 0x225446, + 0x245c48, + 0x208a0a, + 0x229207, + 0x285dc5, + 0x28e508, + 0x2cc2c4, + 0x2d4906, + 0x2c0dc8, + 0x20aec6, + 0x31fc48, + 0x25b247, + 0x2063c6, + 0x2b8304, + 0x2a6b87, + 0x2b0d44, + 0x3259c7, + 0x2a524d, + 0x22f805, + 0x2e13cb, + 0x284586, + 0x258608, + 0x248644, + 0x2ee246, + 0x27bcc6, + 0x21e307, + 0x2968cd, + 0x24b947, + 0x2b1808, + 0x286985, + 0x364e48, + 0x2c2786, + 0x2956c8, + 0x354486, + 0x336b47, + 0x2c5689, + 0x353e07, + 0x287ac8, + 0x2733c5, + 0x21c508, + 0x223c85, + 0x2f7505, + 0x23f105, + 0x24c4c3, + 0x277884, + 0x28e705, + 0x36d849, + 0x36b906, + 0x2a1908, + 0x208c05, + 0x2b46c7, + 0x29f14a, + 0x2c6b09, + 0x288a8a, + 0x2cdb48, + 0x22ad0c, + 0x28144d, + 0x34a703, + 0x31fb48, + 0x3be485, + 0x2eec06, + 0x318086, + 0x2deac5, + 0x206f89, + 0x3ab885, + 0x279108, + 0x25e046, + 0x3532c6, + 0x2a0149, + 0x3a0f87, + 0x28ee06, + 0x29f0c8, + 0x36bc48, + 0x2d8b87, + 0x2be3ce, + 0x2c29c5, + 0x24fe85, + 0x20adc8, + 0x3269c7, + 0x208f82, + 0x2be804, + 0x2403ca, + 0x250208, + 0x346f46, + 0x298c88, + 0x29d706, + 0x31da88, + 0x2ac8c8, + 0x2f74c4, + 0x2b4a85, + 0x6010c4, + 0x6010c4, + 0x6010c4, + 0x200a43, + 0x223f86, + 0x2795c6, + 0x29c98c, + 0x201343, + 0x21c986, + 0x200b04, + 0x2a3308, + 0x37c545, + 0x2404c6, + 0x2bc408, + 0x2cef86, + 0x2eb0c6, + 0x339f08, + 0x2cfd47, + 0x22e849, + 0x2a714a, + 0x211644, + 0x22dcc5, + 0x2b3e05, + 0x2c5406, + 0x211906, + 0x29c706, + 0x2f8686, + 0x22e984, + 0x22e98b, + 0x22d744, + 0x242085, + 0x2ab5c5, + 0x29f506, + 0x369808, + 0x281307, + 0x38dd04, + 0x2076c3, + 0x2cbdc5, + 0x22dac7, + 0x28120b, + 0x364547, + 0x2bc308, + 0x2b4bc7, + 0x26be06, + 0x251c48, + 0x26f24b, + 0x217b46, + 0x216b09, + 0x26f3c5, + 0x30eb43, + 0x2c6bc6, + 0x25b148, + 0x20c843, + 0x22dbc3, + 0x27fd46, + 0x29d706, + 0x36808a, + 0x27b2c5, + 0x27bb0b, + 0x29d54b, + 0x247b03, + 0x220043, + 0x2af944, + 0x2a88c7, + 0x25b1c4, + 0x240084, + 0x385c04, + 0x335788, + 0x293408, + 0x20dd89, + 0x2c5248, + 0x23f387, + 0x2060c6, + 0x2a154f, + 0x2c2b06, + 0x2cd084, + 0x29324a, + 0x22d9c7, + 0x2b0e46, + 0x28e349, + 0x20dd05, + 0x364785, + 0x20de46, + 0x21c643, + 0x2cc309, + 0x221e46, + 0x20c589, + 0x38f4c6, + 0x26a985, + 0x307545, + 0x205843, + 0x2a8a08, + 0x31be07, + 0x21e644, + 0x2a3188, + 0x24c744, + 0x39a286, + 0x2b1d06, + 0x2445c6, + 0x2cfa09, + 0x2c7645, + 0x2975c6, + 0x21afc9, + 0x393086, + 0x2a1646, + 0x395846, + 0x203a45, + 0x2f8e46, + 0x336b44, + 0x3b6745, + 0x2bf884, + 0x2b2206, + 0x31e4c4, + 0x200d03, + 0x284b85, + 0x238888, + 0x2509c7, + 0x34e989, + 0x285cc8, + 0x297d51, + 0x2f174a, + 0x2e7d47, + 0x396246, + 0x200b04, + 0x2bf988, + 0x26d9c8, + 0x297f0a, + 0x286ecd, + 0x24c486, + 0x33a006, + 0x2a6c46, + 0x283447, + 0x2b18c5, + 0x341987, + 0x2009c5, + 0x2c9c04, + 0x2a7586, + 0x26d6c7, + 0x2cc00d, + 0x245b87, + 0x344c48, + 0x279409, + 0x225346, + 0x2a5505, + 0x2fa084, + 0x311386, + 0x21e446, + 0x250386, + 0x299508, + 0x21d683, + 0x208d83, + 0x341f45, + 0x257e46, + 0x2ac885, + 0x2a0b48, + 0x29c18a, + 0x39e284, + 0x2a3308, + 0x291688, + 0x29f347, + 0x208cc9, + 0x2bc008, + 0x286847, + 0x385e86, + 0x20aeca, + 0x311408, + 0x3a6b09, + 0x2ae608, + 0x228089, + 0x396107, + 0x2fea85, + 0x347306, + 0x2c4a88, + 0x27a888, + 0x28de08, + 0x38ab08, + 0x242085, + 0x203bc4, + 0x236ec8, + 0x209784, + 0x23ec84, + 0x26a985, + 0x290647, + 0x3c1d49, + 0x21e107, + 0x214045, + 0x276dc6, + 0x35bc86, + 0x211a84, + 0x2a0486, + 0x257b44, + 0x2a11c6, + 0x3c1b06, + 0x2181c6, + 0x3c6485, + 0x2a0a07, + 0x205c83, + 0x216e89, + 0x36fb08, + 0x2866c4, + 0x2866cd, + 0x297348, + 0x2ddc88, + 0x3a6a86, + 0x2c5789, + 0x2c6b09, + 0x325705, + 0x29c28a, + 0x252a0a, + 0x25e6cc, + 0x25e846, + 0x277c06, + 0x2c2c86, + 0x372b09, + 0x2eee46, + 0x29f946, + 0x3ab946, + 0x26d1c8, + 0x24e386, + 0x2cca0b, + 0x2907c5, + 0x258a45, + 0x277e85, + 0x3c2806, + 0x20ae83, + 0x244546, + 0x245b07, + 0x2bf845, + 0x3108c5, + 0x340045, + 0x2f83c6, + 0x3257c4, + 0x327886, + 0x2bad89, + 0x3c268c, + 0x2c9948, + 0x23c504, + 0x2f8b46, + 0x284686, + 0x25b148, + 0x218bc8, + 0x3c2589, + 0x331287, + 0x24ed89, + 0x37ba46, + 0x230604, + 0x20d804, + 0x280344, + 0x27fd48, + 0x3c1b8a, + 0x351286, + 0x35e207, + 0x36e207, + 0x242005, + 0x2b3dc4, + 0x28c846, + 0x2b1906, + 0x23a283, + 0x36f947, + 0x3aab88, + 0x32584a, + 0x22ca48, + 0x3643c8, + 0x31e505, + 0x29cdc5, + 0x26e785, + 0x2423c6, + 0x243286, + 0x340cc5, + 0x38db89, + 0x2b3bcc, + 0x26e847, + 0x297f88, + 0x2dee05, + 0x6010c4, + 0x24d184, + 0x280e84, + 0x21b846, + 0x29e4ce, + 0x364807, + 0x283645, + 0x3a6e0c, + 0x2f8f47, + 0x26d647, + 0x2f4449, + 0x21be49, + 0x285dc5, + 0x36fb08, + 0x231589, + 0x319605, + 0x2bf788, + 0x221fc6, + 0x266cc6, + 0x3c2f44, + 0x28b688, + 0x225503, + 0x3875c4, + 0x2cbe45, + 0x388307, + 0x228785, + 0x345b49, + 0x2ab04d, + 0x2b0486, + 0x207704, + 0x2ba8c8, + 0x27084a, + 0x228b87, + 0x31ce85, + 0x23b3c3, + 0x29d70e, + 0x2a8b0c, + 0x2ff747, + 0x29e687, + 0x217b83, + 0x2eee85, + 0x280e85, + 0x299048, + 0x2963c9, + 0x23c406, + 0x25b1c4, + 0x2e7c86, + 0x23390b, + 0x38320c, + 0x33a8c7, + 0x2cccc5, + 0x3b2348, + 0x2d8945, + 0x293247, + 0x2f1587, + 0x245945, + 0x20ae83, + 0x335ac4, + 0x22a385, + 0x3b8cc5, + 0x3b8cc6, + 0x2b5308, + 0x26d6c7, + 0x318386, + 0x205c06, + 0x23f046, + 0x27e509, + 0x21f3c7, + 0x250646, + 0x383386, + 0x275d06, + 0x2a8605, + 0x3c53c6, + 0x3746c5, + 0x2da488, + 0x28ff4b, + 0x28c586, + 0x36e244, + 0x2e0409, + 0x26d004, + 0x221f48, + 0x326107, + 0x281e44, + 0x2bb308, + 0x2c0844, + 0x2a8644, + 0x286605, + 0x2dfc46, + 0x3356c7, + 0x27f283, + 0x29d185, + 0x32ce84, + 0x24fec6, + 0x325788, + 0x2b6c05, + 0x28fc09, + 0x2303c5, + 0x21c988, + 0x2312c7, + 0x38de88, + 0x2ba487, + 0x27df09, + 0x2820c6, + 0x305706, + 0x2b3084, + 0x2d7345, + 0x300a4c, + 0x277e87, + 0x278a87, + 0x36e0c8, + 0x2b0486, + 0x271484, + 0x30a244, + 0x27fec9, + 0x2c2d86, + 0x282687, + 0x277784, + 0x24d786, + 0x317bc5, + 0x2cab47, + 0x2cc986, + 0x24d949, + 0x383047, + 0x2692c7, + 0x29ffc6, + 0x24d6c5, + 0x27d1c8, + 0x221cc8, + 0x348546, + 0x2b6c45, + 0x349e86, + 0x206543, + 0x298ec9, + 0x29c48e, + 0x2ba1c8, + 0x24c848, + 0x34834b, + 0x28fe46, + 0x211584, + 0x281044, + 0x29c58a, + 0x217207, + 0x250705, + 0x216b09, + 0x2be305, + 0x23ecc7, + 0x24e304, + 0x2a9a47, + 0x2e7f08, + 0x2e0dc6, + 0x24c589, + 0x2bc10a, + 0x217186, + 0x296e86, + 0x2ab545, + 0x3898c5, + 0x347ac7, + 0x24cf08, + 0x317b08, + 0x2f74c6, + 0x3075c5, + 0x21168e, + 0x2bc7c4, + 0x298fc5, + 0x276749, + 0x382c48, + 0x28aa46, + 0x29accc, + 0x29bd90, + 0x29e10f, + 0x29f688, + 0x2f0707, + 0x3c6485, + 0x28e705, + 0x335549, + 0x28e709, + 0x240d06, + 0x29ef87, + 0x2d7245, + 0x34d589, + 0x33f106, + 0x2eec8d, + 0x280209, + 0x240084, + 0x2b9f48, + 0x236f89, + 0x351446, + 0x2ebec5, + 0x305706, + 0x3b3109, + 0x277608, + 0x212305, + 0x28b684, + 0x29ae8b, + 0x351305, + 0x242d46, + 0x281786, + 0x285206, + 0x28f64b, + 0x28fd09, + 0x205b45, + 0x396e87, + 0x2f16c6, + 0x240206, + 0x280c08, + 0x2dfd49, + 0x344a0c, + 0x22d8c8, + 0x308ec6, + 0x32c803, + 0x32a506, + 0x27ddc5, + 0x27be48, + 0x306fc6, + 0x2cad88, + 0x2062c5, + 0x27a585, + 0x365288, + 0x31dc07, + 0x317fc7, + 0x26d887, + 0x240088, + 0x2c5508, + 0x2b1206, + 0x2b2047, + 0x267c47, + 0x28f34a, + 0x256c83, + 0x3c2806, + 0x23c645, + 0x2403c4, + 0x279409, + 0x27de84, + 0x250a44, + 0x29a104, + 0x29e68b, + 0x31bd47, + 0x2118c5, + 0x295348, + 0x276dc6, + 0x276dc8, + 0x27b206, + 0x28b5c5, + 0x28b885, + 0x28d446, + 0x28dbc8, + 0x28e288, + 0x2795c6, + 0x29518f, + 0x298990, + 0x369405, + 0x205c83, + 0x2306c5, + 0x309088, + 0x28e609, + 0x319748, + 0x24c408, + 0x238d88, + 0x31be07, + 0x276a89, + 0x2caf88, + 0x28dac4, + 0x299f88, + 0x2addc9, + 0x2b38c7, + 0x299f04, + 0x21e1c8, + 0x2a07ca, + 0x2aff86, + 0x24c486, + 0x229f09, + 0x29bfc7, + 0x2c83c8, + 0x345608, + 0x294048, + 0x25d345, + 0x38a705, + 0x258a45, + 0x280e45, + 0x37ffc7, + 0x20ae85, + 0x2bf845, + 0x206d86, + 0x319687, + 0x2e1787, + 0x2a0ac6, + 0x2ce085, + 0x242d46, + 0x24c685, + 0x2d70c8, + 0x2ff5c4, + 0x393106, + 0x334e84, + 0x31dec8, + 0x22f10a, + 0x27a34c, + 0x23fac5, + 0x283506, + 0x344bc6, + 0x341e06, + 0x308f44, + 0x317e85, + 0x27b047, + 0x29c049, + 0x2c6f47, + 0x6010c4, + 0x6010c4, + 0x31bbc5, + 0x2cb984, + 0x29a68a, + 0x276c46, + 0x251e84, + 0x204745, + 0x36c3c5, + 0x2b1804, + 0x2813c7, + 0x230347, + 0x2c6988, + 0x31fec8, + 0x212309, + 0x26eec8, + 0x29a84b, + 0x2b7fc4, + 0x37b985, + 0x27d905, + 0x26d809, + 0x2dfd49, + 0x2e0308, + 0x22d748, + 0x29f504, + 0x2846c5, + 0x200583, + 0x2c53c5, + 0x297646, + 0x29620c, + 0x21f046, + 0x2ebdc6, + 0x28acc5, + 0x2f8448, + 0x35ec46, + 0x3963c6, + 0x24c486, + 0x22c7cc, + 0x250544, + 0x23f18a, + 0x28ac08, + 0x296047, + 0x32cd86, + 0x23c4c7, + 0x2e7885, + 0x2adf86, + 0x35aa46, + 0x366207, + 0x250a84, + 0x309705, + 0x276744, + 0x2c9c87, + 0x276988, + 0x277a8a, + 0x27ebc7, + 0x2a8207, + 0x2f0687, + 0x2d8a89, + 0x29620a, + 0x22e943, + 0x250985, + 0x218203, + 0x385c49, + 0x336dc8, + 0x351687, + 0x319849, + 0x221dc6, + 0x3b6808, + 0x33c445, + 0x3bf38a, + 0x200c89, + 0x3299c9, + 0x35eb07, + 0x26dac9, + 0x2180c8, + 0x3663c6, + 0x2836c8, + 0x203a47, + 0x22ea87, + 0x245d87, + 0x2e5a48, + 0x2f89c6, + 0x2a0585, + 0x27b047, + 0x296988, + 0x334e04, + 0x2c8004, + 0x28ed07, + 0x2acc47, + 0x23140a, + 0x366346, + 0x364c4a, + 0x2be747, + 0x2bc587, + 0x3097c4, + 0x2aa444, + 0x2caa46, + 0x23a444, + 0x23a44c, + 0x39ee05, + 0x218a09, + 0x337284, + 0x2b18c5, + 0x2707c8, + 0x239dc5, + 0x37f306, + 0x2311c4, + 0x2d02ca, + 0x2cb2c6, + 0x29180a, + 0x2162c7, + 0x224505, + 0x21c645, + 0x24204a, + 0x28dd45, + 0x29ed06, + 0x209784, + 0x2afac6, + 0x347b85, + 0x307086, + 0x2e44cc, + 0x218d4a, + 0x252b04, + 0x2060c6, + 0x29bfc7, + 0x2cc904, + 0x26d1c8, + 0x2f3106, + 0x211509, + 0x2db609, + 0x394949, + 0x2c7246, + 0x203b46, + 0x283807, + 0x38dac8, + 0x203949, + 0x31bd47, + 0x2954c6, + 0x2faac7, + 0x2a6b05, + 0x2bc7c4, + 0x2833c7, + 0x267e05, + 0x286545, + 0x31f747, + 0x245808, + 0x3b22c6, + 0x2977cd, + 0x29924f, + 0x29d54d, + 0x214084, + 0x238986, + 0x2d0688, + 0x3ab905, + 0x28f508, + 0x256f8a, + 0x240084, + 0x233b46, + 0x2cd107, + 0x2ca387, + 0x2cfe09, + 0x283685, + 0x2b1804, + 0x2b49ca, + 0x2bbbc9, + 0x26dbc7, + 0x297a86, + 0x351446, + 0x284606, + 0x371d46, + 0x2cf6cf, + 0x2d0549, + 0x24e386, + 0x354846, + 0x31ac09, + 0x2b2147, + 0x20be43, + 0x22c946, + 0x20de03, + 0x2de988, + 0x2fa907, + 0x29f889, + 0x2b1b88, + 0x318108, + 0x328006, + 0x21ef89, + 0x399b05, + 0x2b2204, + 0x2e8187, + 0x372b85, + 0x214084, + 0x211988, + 0x2174c4, + 0x2b1e87, + 0x30d686, + 0x395ac5, + 0x2ae608, + 0x35130b, + 0x2b4107, + 0x2422c6, + 0x2c2b84, + 0x2b6286, + 0x26a985, + 0x267e05, + 0x27cf49, + 0x280fc9, + 0x22eac4, + 0x22eb05, + 0x206105, + 0x3bf206, + 0x36fc08, + 0x2bdc86, + 0x3aa9cb, + 0x37570a, + 0x2ba585, + 0x28b906, + 0x39df85, + 0x345405, + 0x29b847, + 0x3c2a88, + 0x24ed84, + 0x39ce86, + 0x28e306, + 0x218287, + 0x30eb04, + 0x27bcc6, + 0x35c745, + 0x35c749, + 0x203d44, + 0x2b3f49, + 0x2795c6, + 0x2c0048, + 0x206105, + 0x36e305, + 0x307086, + 0x344909, + 0x21be49, + 0x2ebe46, + 0x382d48, + 0x2ab188, + 0x39df44, + 0x2b5504, + 0x2b5508, + 0x239d08, + 0x24ee89, + 0x2975c6, + 0x24c486, + 0x32be4d, + 0x27c986, + 0x2ce449, + 0x39e845, + 0x20de46, + 0x2941c8, + 0x3277c5, + 0x267c84, + 0x26a985, + 0x280908, + 0x29a449, + 0x276804, + 0x31e346, + 0x251f0a, + 0x2ff648, + 0x231589, + 0x25890a, + 0x3197c6, + 0x299408, + 0x293005, + 0x28ae88, + 0x2e7905, + 0x221c89, + 0x376189, + 0x23c442, + 0x26f3c5, + 0x2ebf86, + 0x279507, + 0x38c8c5, + 0x30d2c6, + 0x304c08, + 0x2b0486, + 0x2d9309, + 0x278b86, + 0x280a88, + 0x2a9605, + 0x382106, + 0x336c48, + 0x27fd48, + 0x396008, + 0x2f2388, + 0x3c53c4, + 0x21e583, + 0x2d9544, + 0x27edc6, + 0x2a6b44, + 0x24c787, + 0x3962c9, + 0x3c0485, + 0x345606, + 0x22c946, + 0x2b514b, + 0x2b0d86, + 0x293b06, + 0x393208, + 0x310806, + 0x224303, + 0x3c4e83, + 0x2bc7c4, + 0x233e85, + 0x2d45c7, + 0x276988, + 0x27698f, + 0x27af4b, + 0x36fa08, + 0x31e3c6, + 0x36fd0e, + 0x242483, + 0x2d4544, + 0x2b0d05, + 0x2b1686, + 0x28c94b, + 0x290706, + 0x227109, + 0x395ac5, + 0x2eccc8, + 0x20e688, + 0x21bd0c, + 0x29e6c6, + 0x2c5406, + 0x2e9f85, + 0x287888, + 0x27a345, + 0x350288, + 0x29b04a, + 0x29d989, + 0x6010c4, + 0x200742, + 0x3c202c42, + 0x202542, + 0x26ff84, + 0x201e42, + 0x21a484, + 0x2032c2, + 0x200342, + 0x207c02, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x210143, + 0x28b304, + 0x20be03, + 0x23d744, + 0x237583, + 0x2d2484, + 0x30e843, + 0x38cb87, + 0x21f743, + 0x20ae43, + 0x310f08, + 0x241d03, + 0x2a47cb, + 0x2e8943, + 0x3a25c6, + 0x20e982, + 0x3987cb, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x241d03, + 0x2098c3, + 0x204543, + 0x200742, + 0xcd588, + 0x3574c5, + 0x267e88, + 0x2e2e08, + 0x202c42, + 0x3325c5, + 0x331707, + 0x201b02, + 0x248887, + 0x202542, + 0x256807, + 0x3c0b89, + 0x292bc8, + 0x293ec9, + 0x247342, + 0x269ec7, + 0x329844, + 0x3317c7, + 0x375607, + 0x25fe82, + 0x21f743, + 0x20d682, + 0x2032c2, + 0x200342, + 0x20b182, + 0x200382, + 0x207c02, + 0x2a9105, + 0x24d0c5, + 0x2c42, + 0x37583, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x10c43, + 0x781, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x21a003, + 0x3f0eca46, + 0x6f803, + 0x7f685, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x9482, + 0xcd588, + 0xae43, + 0xaff03, + 0x4cec4, + 0xd8d45, + 0x200742, + 0x3a4c04, + 0x20be03, + 0x237583, + 0x30e843, + 0x3a2f03, + 0x232585, + 0x214bc3, + 0x20f003, + 0x20ec83, + 0x228803, + 0x241d03, + 0x207c03, + 0x2605c3, + 0x203f83, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x241d03, + 0xcd588, + 0x30e843, + 0xaff03, + 0xcd588, + 0xaff03, + 0x2b8283, + 0x20be03, + 0x234784, + 0x237583, + 0x30e843, + 0x207d02, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x207d02, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x2e2d83, + 0x207c03, + 0x200742, + 0x202c42, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3a25c5, + 0x9a2c6, + 0x28b304, + 0x20e982, + 0xcd588, + 0x200742, + 0x20288, + 0x132983, + 0x202c42, + 0x43490186, + 0x12b44, + 0x10bfcb, + 0x41546, + 0x1f847, + 0x237583, + 0x52748, + 0x30e843, + 0xef4c5, + 0xe84, + 0x222003, + 0x56c47, + 0xd4344, + 0x20ec83, + 0xafd44, + 0xaff03, + 0x241d03, + 0x2e9484, + 0xfdb48, + 0x157206, + 0x10d08, + 0x135fc5, + 0x126749, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ae43, + 0x241d03, + 0x2e8943, + 0x20e982, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff83, + 0x226444, + 0x20ec83, + 0xae43, + 0x241d03, + 0x20be03, + 0x237583, + 0x2d2484, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3a25c6, + 0x237583, + 0x30e843, + 0x181c43, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x1f847, + 0xcd588, + 0x30e843, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x45e0be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x200742, + 0x202c42, + 0x20be03, + 0x30e843, + 0x20ec83, + 0x200342, + 0x241d03, + 0x32e147, + 0x23b00b, + 0x205d03, + 0x2409c8, + 0x38d847, + 0x224906, + 0x2c1605, + 0x38e5c9, + 0x21f4c8, + 0x36b4c9, + 0x39b210, + 0x36b4cb, + 0x2f6809, + 0x207503, + 0x22bcc9, + 0x235b46, + 0x235b4c, + 0x357588, + 0x3c02c8, + 0x200289, + 0x2e244e, + 0x3c094b, + 0x34754c, + 0x205583, + 0x27b80c, + 0x205589, + 0x2fbf47, + 0x2374cc, + 0x2ad14a, + 0x253404, + 0x32a08d, + 0x27b6c8, + 0x21014d, + 0x28a7c6, + 0x28b30b, + 0x31d689, + 0x27a747, + 0x3c2c46, + 0x341409, + 0x32538a, + 0x313048, + 0x2e8544, + 0x332e87, + 0x249d47, + 0x277984, + 0x2d9a44, + 0x386dc9, + 0x364209, + 0x215cc8, + 0x2108c5, + 0x2ea8c5, + 0x20d106, + 0x329f49, + 0x25720d, + 0x2f3308, + 0x20d007, + 0x2c1688, + 0x23ce86, + 0x37ce44, + 0x286c45, + 0x203846, + 0x2043c4, + 0x205487, + 0x2081ca, + 0x213904, + 0x2170c6, + 0x217d49, + 0x217d4f, + 0x21870d, + 0x218fc6, + 0x21fe90, + 0x220286, + 0x220807, + 0x221447, + 0x22144f, + 0x222149, + 0x228d46, + 0x22a1c7, + 0x22a1c8, + 0x22b089, + 0x395b88, + 0x2de4c7, + 0x22cf43, + 0x3bcfc6, + 0x3bbb08, + 0x2e270a, + 0x387809, + 0x212743, + 0x331606, + 0x39ccca, + 0x2e4b47, + 0x2fbd8a, + 0x209a8e, + 0x222286, + 0x26f5c7, + 0x21bac6, + 0x205646, + 0x38a50b, + 0x34edca, + 0x309b0d, + 0x203c07, + 0x260648, + 0x260649, + 0x26064f, + 0x34eb0c, + 0x27c0c9, + 0x2d778e, + 0x38cc8a, + 0x293886, + 0x2f9f06, + 0x313ccc, + 0x315a8c, + 0x328308, + 0x353d07, + 0x26d545, + 0x290504, + 0x202c4e, + 0x266f84, + 0x3188c7, + 0x39270a, + 0x3a2a54, + 0x3b92cf, + 0x221608, + 0x3bce88, + 0x33984d, + 0x33984e, + 0x231a09, + 0x232b08, + 0x232b0f, + 0x2371cc, + 0x2371cf, + 0x2386c7, + 0x23dfca, + 0x22c54b, + 0x23f5c8, + 0x242707, + 0x2616cd, + 0x20a286, + 0x32a246, + 0x2443c9, + 0x2a6f88, + 0x249208, + 0x24920e, + 0x23b107, + 0x24b505, + 0x24cc85, + 0x204c44, + 0x224bc6, + 0x215bc8, + 0x322ec3, + 0x397e4e, + 0x261a88, + 0x2ae14b, + 0x301c07, + 0x2f7305, + 0x27b986, + 0x2aab07, + 0x2f4848, + 0x317909, + 0x20a505, + 0x284bc8, + 0x226e06, + 0x39caca, + 0x202b49, + 0x237589, + 0x23758b, + 0x3211c8, + 0x277849, + 0x210986, + 0x36db0a, + 0x2bf50a, + 0x23e1cc, + 0x21e907, + 0x2929ca, + 0x211d0b, + 0x211d19, + 0x30a988, + 0x3a2645, + 0x261886, + 0x26ba89, + 0x358706, + 0x2d340a, + 0x21f6c6, + 0x225784, + 0x2c380d, + 0x323307, + 0x225789, + 0x24e685, + 0x251548, + 0x252509, + 0x252944, + 0x253307, + 0x253308, + 0x253907, + 0x268688, + 0x257887, + 0x205dc5, + 0x25d60c, + 0x25de49, + 0x30590a, + 0x3a0e09, + 0x22bdc9, + 0x37924c, + 0x26004b, + 0x260dc8, + 0x261e88, + 0x265244, + 0x281b08, + 0x283c89, + 0x2ad207, + 0x217f86, + 0x31d907, + 0x3843c9, + 0x335c0b, + 0x2b6107, + 0x3c6847, + 0x216407, + 0x2100c4, + 0x2100c5, + 0x278845, + 0x34c04b, + 0x3ad084, + 0x320d08, + 0x28550a, + 0x226ec7, + 0x35b207, + 0x28c112, + 0x2a10c6, + 0x234106, + 0x2b658e, + 0x2a4a86, + 0x291508, + 0x291a8f, + 0x210508, + 0x38b748, + 0x2bb78a, + 0x2bb791, + 0x2a0d4e, + 0x242a0a, + 0x242a0c, + 0x232d07, + 0x232d10, + 0x3c4cc8, + 0x2a0f45, + 0x2aae0a, + 0x20440c, + 0x29580d, + 0x2fc446, + 0x2fc447, + 0x2fc44c, + 0x30460c, + 0x214bcc, + 0x2ab88b, + 0x3706c4, + 0x211dc4, + 0x388489, + 0x30a2c7, + 0x23dd89, + 0x2bf349, + 0x2ace07, + 0x2acfc6, + 0x2acfc9, + 0x2ad3c3, + 0x2b058a, + 0x31b487, + 0x3491cb, + 0x30998a, + 0x3298c4, + 0x316946, + 0x27ee49, + 0x23a2c4, + 0x39eeca, + 0x2425c5, + 0x2bcbc5, + 0x2bcbcd, + 0x2bcf0e, + 0x2d9685, + 0x32d506, + 0x3a21c7, + 0x25d88a, + 0x37a506, + 0x2e1304, + 0x303707, + 0x246a4b, + 0x23cf47, + 0x3c1a04, + 0x390706, + 0x39070d, + 0x38408c, + 0x20eb46, + 0x2f350a, + 0x27c686, + 0x285788, + 0x354b47, + 0x23618a, + 0x24b386, + 0x203b03, + 0x294346, + 0x3bb988, + 0x38860a, + 0x2cb107, + 0x2cb108, + 0x30df84, + 0x28c687, + 0x201cc8, + 0x2a12c8, + 0x2827c8, + 0x2b130a, + 0x2d8345, + 0x20be87, + 0x242853, + 0x25a706, + 0x21b388, + 0x227609, + 0x248748, + 0x32808b, + 0x318488, + 0x246b84, + 0x365386, + 0x311e06, + 0x2dfa89, + 0x382807, + 0x25d708, + 0x29c806, + 0x31f644, + 0x341d05, + 0x2c7e48, + 0x34398a, + 0x2c3488, + 0x2c8906, + 0x29960a, + 0x3b8e48, + 0x2cc708, + 0x2cd2c8, + 0x2cdd46, + 0x2d0886, + 0x3a08cc, + 0x2d0e10, + 0x29ea45, + 0x210308, + 0x394490, + 0x210310, + 0x39b08e, + 0x3a054e, + 0x3a0554, + 0x3a624f, + 0x3a6606, + 0x321391, + 0x31eb13, + 0x31ef88, + 0x3ab285, + 0x240f08, + 0x387b05, + 0x2da18c, + 0x22cd09, + 0x290349, + 0x22d187, + 0x251309, + 0x24f147, + 0x2f8cc6, + 0x286a47, + 0x2034c5, + 0x210c83, + 0x323089, + 0x2278c9, + 0x381c43, + 0x38c7c4, + 0x326f0d, + 0x347c8f, + 0x31f685, + 0x3175c6, + 0x225a47, + 0x357307, + 0x2f5886, + 0x2f588b, + 0x2a2bc5, + 0x25f106, + 0x2f9d87, + 0x258249, + 0x375d06, + 0x322285, + 0x374e4b, + 0x3be7c6, + 0x229a85, + 0x27dc08, + 0x2b2bc8, + 0x2aec8c, + 0x2aec90, + 0x2ae949, + 0x2bd487, + 0x2d8e4b, + 0x306746, + 0x2de38a, + 0x2df80b, + 0x2e094a, + 0x2e0bc6, + 0x2e2c45, + 0x31b1c6, + 0x2b7048, + 0x22d24a, + 0x3394dc, + 0x2e8a0c, + 0x2e8d08, + 0x3a25c5, + 0x361f87, + 0x209946, + 0x270bc5, + 0x21a846, + 0x2f5a48, + 0x2bbe47, + 0x2e2348, + 0x25a7ca, + 0x225b4c, + 0x20d309, + 0x345787, + 0x222a04, + 0x24cd46, + 0x38b2ca, + 0x2bf445, + 0x2110cc, + 0x213588, + 0x373608, + 0x2238cc, + 0x2dd30c, + 0x329409, + 0x329647, + 0x24398c, + 0x22c044, + 0x2482ca, + 0x3033cc, + 0x27280b, + 0x372f0b, + 0x253786, + 0x258d87, + 0x232f47, + 0x232f4f, + 0x2fce11, + 0x2d5b52, + 0x2590cd, + 0x2590ce, + 0x25940e, + 0x3a6408, + 0x3a6412, + 0x25eb08, + 0x38d487, + 0x2556ca, + 0x355cc8, + 0x2a4a45, + 0x37fe0a, + 0x220607, + 0x316244, + 0x221b83, + 0x378205, + 0x2bba07, + 0x307c47, + 0x295a0e, + 0x399e8d, + 0x39b5c9, + 0x20fd85, + 0x3b00c3, + 0x20ab06, + 0x25f705, + 0x2ae388, + 0x2b9609, + 0x2618c5, + 0x2618cf, + 0x2e2a87, + 0x38e505, + 0x3025ca, + 0x2b1506, + 0x25b949, + 0x2f640c, + 0x2f80c9, + 0x3be506, + 0x28530c, + 0x32c906, + 0x2fb508, + 0x2fba86, + 0x30ab06, + 0x2b0f04, + 0x30d603, + 0x38668a, + 0x216711, + 0x27c28a, + 0x272085, + 0x282347, + 0x25ab47, + 0x201dc4, + 0x201dcb, + 0x293d48, + 0x2ba046, + 0x36e145, + 0x33d8c4, + 0x362109, + 0x202a84, + 0x249047, + 0x300585, + 0x300587, + 0x2b67c5, + 0x3482c3, + 0x38d348, + 0x317c4a, + 0x27f283, + 0x35750a, + 0x3b3486, + 0x26164f, + 0x3b8349, + 0x397dd0, + 0x2effc8, + 0x2c8d49, + 0x296707, + 0x39068f, + 0x319c04, + 0x2d2504, + 0x220106, + 0x34f846, + 0x2d6e8a, + 0x253c06, + 0x352987, + 0x303f48, + 0x304147, + 0x3049c7, + 0x305b8a, + 0x3083cb, + 0x341ac5, + 0x2d5788, + 0x256703, + 0x3b6b0c, + 0x35d60f, + 0x26d34d, + 0x25e287, + 0x39b709, + 0x36de47, + 0x282c48, + 0x3a2c4c, + 0x2c8a48, + 0x2701c8, + 0x31c38e, + 0x333d94, + 0x3342a4, + 0x35308a, + 0x36becb, + 0x24f204, + 0x24f209, + 0x233bc8, + 0x24d305, + 0x3229ca, + 0x261cc7, + 0x31b0c4, + 0x2b6c03, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x2d0e06, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x214bc3, + 0x2d0e06, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x203d43, + 0x20ec83, + 0xaff03, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x200742, + 0x24be43, + 0x202c42, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x203cc2, + 0x24d5c2, + 0x202c42, + 0x20be03, + 0x20b2c2, + 0x201342, + 0x26ff84, + 0x21a484, + 0x227d42, + 0x226444, + 0x200342, + 0x241d03, + 0x219543, + 0x253786, + 0x230882, + 0x204182, + 0x228382, + 0x48610503, + 0x48a32d03, + 0x5b586, + 0x5b586, + 0x28b304, + 0x20ae43, + 0x15a4a, + 0x3ba4c, + 0x121ecc, + 0x7f48d, + 0x117485, + 0x2aa47, + 0x14ec6, + 0x19148, + 0x1c787, + 0x23288, + 0x1807ca, + 0x102c07, + 0x496d2fc5, + 0x133789, + 0x3c00b, + 0x18754b, + 0x1bdd08, + 0xf608a, + 0x8a34e, + 0x144854b, + 0x12b44, + 0x5f246, + 0x8808, + 0x7e948, + 0x3c2c7, + 0x910c7, + 0x78449, + 0x3a347, + 0x67008, + 0x100249, + 0x170bc4, + 0x191e05, + 0x12f34e, + 0xa964d, + 0x1f6c8, + 0x49b64046, + 0x4a564048, + 0x739c8, + 0x12e350, + 0x5978c, + 0x666c7, + 0x66e47, + 0x6abc7, + 0x704c7, + 0xd0c2, + 0x1807, + 0x14ef8c, + 0x11d107, + 0xa4686, + 0xa5a89, + 0xa7708, + 0x552c2, + 0x1342, + 0x3900b, + 0xafdc7, + 0x25589, + 0x52c49, + 0x142188, + 0xb0102, + 0x1970c9, + 0xc9f8a, + 0xc6209, + 0xd3909, + 0xd50c8, + 0xd6007, + 0xd82c9, + 0xda885, + 0xdae90, + 0x138ac6, + 0x14a345, + 0xeb78d, + 0x2bac6, + 0xe3d47, + 0xe9498, + 0x3a6c8, + 0x14640a, + 0x16f02, + 0x5f88d, + 0x1282, + 0x7dac6, + 0x8cc08, + 0x6e308, + 0xcd449, + 0x1be608, + 0x6f98e, + 0xab07, + 0xfe68d, + 0xf26c5, + 0x1588, + 0x1a1e08, + 0xfeec6, + 0xc842, + 0x157206, + 0xdc2, + 0x2c1, + 0x60a07, + 0x8b003, + 0x49ee9d04, + 0x4a294a43, + 0x101, + 0x13d06, + 0x101, + 0x301, + 0x13d06, + 0x8b003, + 0x140e3c5, + 0x253404, + 0x20be03, + 0x254a04, + 0x26ff84, + 0x20ec83, + 0x2274c5, + 0x21a003, + 0x24f3c3, + 0x2f5805, + 0x203f83, + 0x4b60be03, + 0x237583, + 0x30e843, + 0x200541, + 0x21f743, + 0x21a484, + 0x226444, + 0x20ec83, + 0x241d03, + 0x207c03, + 0xcd588, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x203d43, + 0x201342, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x203f83, + 0xcd588, + 0x35d382, + 0x1851c7, + 0x2c42, + 0x141c85, + 0x598cf, + 0x1455908, + 0x10430e, + 0x4c607402, + 0x31a848, + 0x307206, + 0x2c1146, + 0x306b87, + 0x4ca11a42, + 0x4cfb81c8, + 0x21ed8a, + 0x266148, + 0x200602, + 0x31b2c9, + 0x341b07, + 0x217f06, + 0x38d089, + 0x20bfc4, + 0x20e2c6, + 0x2f2904, + 0x270984, + 0x25cf89, + 0x3030c6, + 0x24d185, + 0x2faf45, + 0x2322c7, + 0x2be9c7, + 0x354984, + 0x306dc6, + 0x2ff205, + 0x309485, + 0x39dec5, + 0x2ea687, + 0x301a45, + 0x319149, + 0x336f85, + 0x2f4984, + 0x37a447, + 0x348a0e, + 0x3aaec9, + 0x2b6449, + 0x335246, + 0x2454c8, + 0x2ee34b, + 0x35bd8c, + 0x33b746, + 0x347407, + 0x2afbc5, + 0x2d9a4a, + 0x215dc9, + 0x366f09, + 0x332706, + 0x2f9b45, + 0x383105, + 0x338689, + 0x39e04b, + 0x275e86, + 0x343ec6, + 0x203104, + 0x28bdc6, + 0x24b588, + 0x3bb806, + 0x21d186, + 0x206888, + 0x209347, + 0x209509, + 0x20acc5, + 0xcd588, + 0x291044, + 0x304f44, + 0x210f05, + 0x3a8c49, + 0x226087, + 0x22608b, + 0x2288ca, + 0x22cc45, + 0x4d207a42, + 0x309847, + 0x4d62cf48, + 0x370a47, + 0x383e85, + 0x32654a, + 0x2c42, + 0x2fac0b, + 0x2579ca, + 0x2277c6, + 0x210d83, + 0x2a4f8d, + 0x3aa74c, + 0x3bedcd, + 0x24e2c5, + 0x37dd45, + 0x322f07, + 0x20b2c9, + 0x21ec86, + 0x253a85, + 0x2ced88, + 0x28bcc3, + 0x2e3108, + 0x28bcc8, + 0x2c2107, + 0x30f108, + 0x3aa549, + 0x286d47, + 0x23ab87, + 0x224788, + 0x38ae04, + 0x38ae07, + 0x28a6c8, + 0x353706, + 0x3b544f, + 0x2293c7, + 0x2de646, + 0x329785, + 0x228503, + 0x391c87, + 0x378183, + 0x253e86, + 0x2553c6, + 0x255b06, + 0x28fa05, + 0x268683, + 0x396d48, + 0x379c89, + 0x38eb4b, + 0x255c88, + 0x257545, + 0x258b85, + 0x4db29982, + 0x286b09, + 0x38d707, + 0x25f185, + 0x25ce87, + 0x25e9c6, + 0x371c05, + 0x25f54b, + 0x260dc4, + 0x265d05, + 0x265e47, + 0x275806, + 0x275c45, + 0x281d07, + 0x2829c7, + 0x2e1704, + 0x28990a, + 0x289dc8, + 0x293089, + 0x241245, + 0x364946, + 0x24b74a, + 0x2fae46, + 0x268fc7, + 0x292d4d, + 0x2a2709, + 0x3954c5, + 0x2031c7, + 0x31f148, + 0x336a08, + 0x209ec7, + 0x3be1c6, + 0x21d4c7, + 0x255083, + 0x303044, + 0x36e785, + 0x39fc47, + 0x3a4609, + 0x230cc8, + 0x33dc85, + 0x2363c4, + 0x253d45, + 0x39038d, + 0x211742, + 0x2bd986, + 0x27da06, + 0x2dac0a, + 0x381346, + 0x38b205, + 0x31ffc5, + 0x31ffc7, + 0x39c90c, + 0x27384a, + 0x28ba86, + 0x2c4d85, + 0x28bc06, + 0x28bf47, + 0x28d986, + 0x28f90c, + 0x38d1c9, + 0x4de14187, + 0x291e45, + 0x291e46, + 0x2944c8, + 0x247e85, + 0x2a3505, + 0x2a3c88, + 0x2a3e8a, + 0x4e278142, + 0x4e607c82, + 0x2d7485, + 0x2a6b43, + 0x2461c8, + 0x211b83, + 0x2a4104, + 0x25ba8b, + 0x211b88, + 0x2ce288, + 0x4eb1cb49, + 0x2a8e09, + 0x2a9546, + 0x2aa788, + 0x2aa989, + 0x2ab386, + 0x2ab505, + 0x24e0c6, + 0x2abfc9, + 0x3ac147, + 0x381fc6, + 0x2d8787, + 0x21eb07, + 0x34e204, + 0x4ee3a9c9, + 0x270e08, + 0x3b80c8, + 0x31f887, + 0x2c2f46, + 0x20b0c9, + 0x2f2bc7, + 0x33194a, + 0x364a88, + 0x3be307, + 0x20ed86, + 0x39d28a, + 0x372cc8, + 0x382ac5, + 0x22b9c5, + 0x30b047, + 0x36ac49, + 0x30280b, + 0x314248, + 0x337009, + 0x255f87, + 0x2b868c, + 0x2b8c8c, + 0x2b8f8a, + 0x2b920c, + 0x2c10c8, + 0x2c12c8, + 0x2c14c4, + 0x2c1889, + 0x2c1ac9, + 0x2c1d0a, + 0x2c1f89, + 0x2c22c7, + 0x3b4c4c, + 0x23d386, + 0x3c0708, + 0x2faf06, + 0x37fc46, + 0x3953c7, + 0x3ab0c8, + 0x349c4b, + 0x370907, + 0x35b849, + 0x3782c9, + 0x254b87, + 0x2f2b44, + 0x282487, + 0x2eaf46, + 0x215946, + 0x2f36c5, + 0x3720c8, + 0x290244, + 0x290246, + 0x27370b, + 0x2b0889, + 0x39ddc6, + 0x204cc9, + 0x2ea806, + 0x22e688, + 0x20b4c3, + 0x2f9cc5, + 0x21d2c9, + 0x228b05, + 0x30ae84, + 0x274d06, + 0x3993c5, + 0x259b06, + 0x308747, + 0x331086, + 0x23078b, + 0x36da07, + 0x256e46, + 0x348606, + 0x232386, + 0x354949, + 0x2e474a, + 0x2ba345, + 0x3be8cd, + 0x2a3f86, + 0x2e9106, + 0x397cc6, + 0x285705, + 0x2db187, + 0x2f75c7, + 0x207cce, + 0x21f743, + 0x2c2f09, + 0x358489, + 0x2d9e47, + 0x26c287, + 0x2a1445, + 0x2ae085, + 0x4f386f0f, + 0x2c8f87, + 0x2c9148, + 0x2c9884, + 0x2c9e46, + 0x4f64cd02, + 0x2cdfc6, + 0x2d0e06, + 0x349f8e, + 0x2e2f4a, + 0x3b8946, + 0x2ca24a, + 0x2065c9, + 0x231e85, + 0x344788, + 0x39a146, + 0x29aac8, + 0x3c2dc8, + 0x2a57cb, + 0x306c85, + 0x301ac8, + 0x2069cc, + 0x383d47, + 0x255606, + 0x27c4c8, + 0x224a88, + 0x4fa53982, + 0x20e08b, + 0x3361c9, + 0x21cb09, + 0x39dc47, + 0x38a7c8, + 0x4fe3ca88, + 0x21318b, + 0x342009, + 0x28394d, + 0x24e488, + 0x3518c8, + 0x502056c2, + 0x331404, + 0x50623b82, + 0x2f7e06, + 0x50a0a542, + 0x24fc8a, + 0x204b86, + 0x22e0c8, + 0x2be048, + 0x326cc6, + 0x398b46, + 0x2efd46, + 0x2ae305, + 0x240684, + 0x50e2e604, + 0x34c986, + 0x2a2247, + 0x5121c1c7, + 0x2e1bcb, + 0x348dc9, + 0x37dd8a, + 0x357ec4, + 0x320108, + 0x381d8d, + 0x2e6e49, + 0x2e7088, + 0x2e7709, + 0x2e9484, + 0x22c404, + 0x27d505, + 0x2ee68b, + 0x211b06, + 0x34c7c5, + 0x222449, + 0x306e88, + 0x29fb44, + 0x2d9bc9, + 0x326b05, + 0x2bea08, + 0x23b247, + 0x2b6848, + 0x27f046, + 0x207907, + 0x2d4109, + 0x374fc9, + 0x229b05, + 0x240305, + 0x51607482, + 0x2f4744, + 0x225dc5, + 0x292786, + 0x2f8305, + 0x297b87, + 0x34ca85, + 0x275844, + 0x335306, + 0x253b07, + 0x234fc6, + 0x384305, + 0x20e4c8, + 0x307405, + 0x20ef87, + 0x2154c9, + 0x2b09ca, + 0x34e587, + 0x34e58c, + 0x24d146, + 0x241b89, + 0x244885, + 0x247dc8, + 0x201283, + 0x210945, + 0x2eac05, + 0x257f47, + 0x51a12c02, + 0x398347, + 0x2f3c06, + 0x32fdc6, + 0x2f7f46, + 0x2249c6, + 0x2eb408, + 0x241045, + 0x2de707, + 0x2de70d, + 0x221b83, + 0x221b85, + 0x302387, + 0x398688, + 0x301f45, + 0x219788, + 0x23dc86, + 0x333947, + 0x3c0645, + 0x306d06, + 0x3a4c85, + 0x226bca, + 0x2fe986, + 0x22eec7, + 0x2f04c5, + 0x2ffc87, + 0x303684, + 0x30ae06, + 0x3446c5, + 0x357a0b, + 0x2eadc9, + 0x24bf4a, + 0x229b88, + 0x34c2c8, + 0x34cb8c, + 0x353847, + 0x36f808, + 0x387c08, + 0x394205, + 0x3a684a, + 0x3b00c9, + 0x51e01b42, + 0x3c6646, + 0x222e44, + 0x222e49, + 0x294f49, + 0x276587, + 0x2f9887, + 0x2bf1c9, + 0x285908, + 0x28590f, + 0x21dec6, + 0x2d2c8b, + 0x2f5645, + 0x2f5647, + 0x2f5c49, + 0x25bbc6, + 0x2d9b47, + 0x2d5ec5, + 0x234dc4, + 0x341006, + 0x226244, + 0x2df647, + 0x2ce808, + 0x522f9a48, + 0x2fa1c5, + 0x2fa307, + 0x24eb09, + 0x20de44, + 0x2473c8, + 0x52716788, + 0x201dc4, + 0x235fc8, + 0x3c2d04, + 0x3bebc9, + 0x21b2c5, + 0x52a0e982, + 0x21df05, + 0x2cb8c5, + 0x203008, + 0x238507, + 0x52e02a82, + 0x339e45, + 0x2cc586, + 0x249746, + 0x2f4708, + 0x2f4c88, + 0x2f82c6, + 0x30a146, + 0x385289, + 0x32fd06, + 0x29124b, + 0x3478c5, + 0x355c06, + 0x28e088, + 0x231bc6, + 0x20a386, + 0x219c4a, + 0x2a91ca, + 0x370c85, + 0x241107, + 0x30d0c6, + 0x53206d02, + 0x3024c7, + 0x260505, + 0x24b6c4, + 0x24b6c5, + 0x357dc6, + 0x271a47, + 0x220105, + 0x295004, + 0x2ad5c8, + 0x20a445, + 0x309fc7, + 0x3b1c45, + 0x226b05, + 0x268904, + 0x2abac9, + 0x2ff048, + 0x399286, + 0x2adc46, + 0x201ac6, + 0x536ff908, + 0x2ffb07, + 0x2ffe4d, + 0x30074c, + 0x300d49, + 0x300f89, + 0x53b65c82, + 0x3b7e83, + 0x2228c3, + 0x2eb005, + 0x39fd4a, + 0x32fbc6, + 0x3052c5, + 0x308904, + 0x30890b, + 0x323e4c, + 0x32488c, + 0x324b95, + 0x32754d, + 0x32a68f, + 0x32aa52, + 0x32aecf, + 0x32b292, + 0x32b713, + 0x32bbcd, + 0x32c18d, + 0x32c50e, + 0x32ca8e, + 0x32d2cc, + 0x32d68c, + 0x32dacb, + 0x32de4e, + 0x32e752, + 0x32f98c, + 0x32ff50, + 0x33ba12, + 0x33c68c, + 0x33cd4d, + 0x33d08c, + 0x33f651, + 0x34404d, + 0x34ac8d, + 0x34b28a, + 0x34b50c, + 0x34be0c, + 0x34c4cc, + 0x34ce8c, + 0x350493, + 0x350b10, + 0x350f10, + 0x351acd, + 0x3520cc, + 0x352dc9, + 0x35518d, + 0x3554d3, + 0x356491, + 0x3568d3, + 0x35888f, + 0x358c4c, + 0x358f4f, + 0x35930d, + 0x35990f, + 0x359cd0, + 0x35a74e, + 0x35df0e, + 0x35e490, + 0x35f08d, + 0x35fa0e, + 0x35fd8c, + 0x360d93, + 0x3628ce, + 0x362f50, + 0x363351, + 0x36378f, + 0x363b53, + 0x36580d, + 0x365b4f, + 0x365f0e, + 0x3665d0, + 0x3669c9, + 0x367d10, + 0x36830f, + 0x36898f, + 0x368d52, + 0x369e0e, + 0x36a80d, + 0x36ae8d, + 0x36b1cd, + 0x36c84d, + 0x36cb8d, + 0x36ced0, + 0x36d2cb, + 0x36e54c, + 0x36e8cc, + 0x36eecc, + 0x36f1ce, + 0x37bbd0, + 0x37e012, + 0x37e48b, + 0x37e98e, + 0x37ed0e, + 0x37f58e, + 0x37fa0b, + 0x53f80196, + 0x38104d, + 0x3814d4, + 0x38228d, + 0x386915, + 0x388c4d, + 0x3895cf, + 0x389ccf, + 0x38ee0f, + 0x38f1ce, + 0x38f74d, + 0x391891, + 0x394b8c, + 0x394e8c, + 0x39518b, + 0x39560c, + 0x39654f, + 0x396912, + 0x39950d, + 0x39ae0c, + 0x39b94c, + 0x39bc4d, + 0x39bf8f, + 0x39c34e, + 0x39fa0c, + 0x39ffcd, + 0x3a030b, + 0x3a0bcc, + 0x3a14cd, + 0x3a180e, + 0x3a1b89, + 0x3a3553, + 0x3a3a8d, + 0x3a3dcd, + 0x3a43cc, + 0x3a484e, + 0x3a520f, + 0x3a55cc, + 0x3a58cd, + 0x3a5c0f, + 0x3a5fcc, + 0x3a778c, + 0x3a7b0c, + 0x3a7e0c, + 0x3a84cd, + 0x3a8812, + 0x3a8e8c, + 0x3a918c, + 0x3a9491, + 0x3a98cf, + 0x3a9c8f, + 0x3aa053, + 0x3ac70e, + 0x3aca8f, + 0x3ace4c, + 0x543ad18e, + 0x3ad50f, + 0x3ad8d6, + 0x3ae0d2, + 0x3af8cc, + 0x3b030f, + 0x3b098d, + 0x3b0ccf, + 0x3b108c, + 0x3b138d, + 0x3b16cd, + 0x3b2c8e, + 0x3b3bcc, + 0x3b3ecc, + 0x3b41d0, + 0x3b7211, + 0x3b764b, + 0x3b7a8c, + 0x3b7d8e, + 0x3baa91, + 0x3baece, + 0x3bb24d, + 0x3c338b, + 0x3c3c8f, + 0x3c4794, + 0x25cd82, + 0x25cd82, + 0x2032c3, + 0x25cd82, + 0x2032c3, + 0x25cd82, + 0x2009c2, + 0x24e105, + 0x3ba78c, + 0x25cd82, + 0x25cd82, + 0x2009c2, + 0x25cd82, + 0x294b45, + 0x2b09c5, + 0x25cd82, + 0x25cd82, + 0x200302, + 0x294b45, + 0x328909, + 0x35618c, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x24e105, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x200302, + 0x328909, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x2b09c5, + 0x25cd82, + 0x2b09c5, + 0x35618c, + 0x3ba78c, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x1233c8, + 0x6fac4, + 0xae43, + 0x193708, + 0x200742, + 0x55202c42, + 0x246d03, + 0x252d84, + 0x206c03, + 0x3c24c4, + 0x234106, + 0x2137c3, + 0x2f9084, + 0x2f0b05, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x22d50a, + 0x253786, + 0x37f08c, + 0xcd588, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20be83, + 0x2d0e06, + 0x20ec83, + 0x241d03, + 0x219543, + 0xa4d48, + 0x117485, + 0x187689, + 0x15842, + 0x56793ec5, + 0x2aa47, + 0xaf148, + 0xd9ce, + 0x87e92, + 0x11b5cb, + 0x102d06, + 0x56ad2fc5, + 0x56ed2fcc, + 0x25147, + 0x15da87, + 0x1208ca, + 0x42ed0, + 0x149445, + 0x10bfcb, + 0x7e948, + 0x3c2c7, + 0xf400b, + 0x78449, + 0x127b07, + 0x3a347, + 0x760c7, + 0x3c206, + 0x67008, + 0x57429546, + 0xa964d, + 0x120290, + 0x5780a6c2, + 0x1f6c8, + 0x82dd0, + 0x15b5cc, + 0x57f61e0d, + 0x5fbc8, + 0x6b8c7, + 0x164f49, + 0x5b646, + 0x946c8, + 0xebfc2, + 0x71e8a, + 0x31047, + 0x11d107, + 0xa5a89, + 0xa7708, + 0xef4c5, + 0xe85ce, + 0x1260e, + 0x1b98f, + 0x25589, + 0x52c49, + 0x7e10b, + 0x8ef4f, + 0xabccc, + 0x1125cb, + 0x105848, + 0xe1ac7, + 0xf87c8, + 0x132c8b, + 0x15274c, + 0x15af0c, + 0x1625cc, + 0x16784d, + 0x142188, + 0xfcdc2, + 0x1970c9, + 0x133acb, + 0xc3146, + 0x12e28b, + 0xd560a, + 0xd61c5, + 0xdae90, + 0xdd606, + 0x140806, + 0x14a345, + 0x18a988, + 0xe3d47, + 0xe4007, + 0x1fcc7, + 0xf7c4a, + 0xaefca, + 0x7dac6, + 0x9088d, + 0x6e308, + 0x1be608, + 0x68849, + 0xb7cc5, + 0xf778c, + 0x167a4b, + 0x16ab84, + 0xfec89, + 0xfeec6, + 0x4cb06, + 0x4182, + 0x157206, + 0x14634b, + 0x10ac87, + 0xdc2, + 0xc5105, + 0x16704, + 0x781, + 0x7bc3, + 0x572a6d06, + 0x94a43, + 0x2542, + 0x31044, + 0x602, + 0x8b304, + 0xcc2, + 0x7ec2, + 0x3102, + 0x114902, + 0x3cc2, + 0xd2fc2, + 0x3c02, + 0xefa02, + 0x3e242, + 0x4ac2, + 0x2e02, + 0x4c3c2, + 0x37583, + 0x1f02, + 0x1b02, + 0x8882, + 0x12c42, + 0x1402, + 0x35c02, + 0x552c2, + 0x6202, + 0x3882, + 0x1342, + 0x14bc3, + 0x5842, + 0x1102, + 0xb0102, + 0x9602, + 0x1542, + 0x4482, + 0xe302, + 0x6f582, + 0x1e42, + 0x17c0c2, + 0x6cd82, + 0x3a3c2, + 0xec83, + 0x2002, + 0x53982, + 0x1382, + 0x6e02, + 0x29a85, + 0x8302, + 0x48602, + 0x44303, + 0x982, + 0x16f02, + 0x1282, + 0x4042, + 0xed42, + 0x2a82, + 0xc842, + 0x4182, + 0x1f8c5, + 0x582009c2, + 0x587696c3, + 0x1fbc3, + 0x58a009c2, + 0x1fbc3, + 0x179487, + 0x215383, + 0x200742, + 0x20be03, + 0x237583, + 0x203d43, + 0x201fc3, + 0x20be83, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x294a83, + 0x10ec3, + 0xcd588, + 0x20be03, + 0x237583, + 0x203d43, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0xaff03, + 0x241d03, + 0x20be03, + 0x237583, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x200541, + 0x21f743, + 0x20ec83, + 0x228803, + 0x241d03, + 0x3744, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x21f6c3, + 0x203d43, + 0x257e43, + 0x26b143, + 0x2a2c83, + 0x280e83, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x203f83, + 0x31e084, + 0x250b03, + 0x5583, + 0x22d443, + 0x332388, + 0x325384, + 0x2023ca, + 0x238f06, + 0x10a904, + 0x37a147, + 0x22174a, + 0x21dd89, + 0x3ade07, + 0x3b628a, + 0x2b6c03, + 0x2d750b, + 0x293609, + 0x201bc5, + 0x34e347, + 0x2c42, + 0x20be03, + 0x214447, + 0x2fb145, + 0x2f2a09, + 0x237583, + 0x306a86, + 0x2c0c03, + 0xeae83, + 0x107f06, + 0x122746, + 0x13747, + 0x2176c6, + 0x227045, + 0x39e607, + 0x2d2107, + 0x5b30e843, + 0x33c8c7, + 0x371fc3, + 0x20fb85, + 0x26ff84, + 0x26ed48, + 0x36a50c, + 0x2ad885, + 0x2a2886, + 0x214307, + 0x345847, + 0x252e47, + 0x254d08, + 0x30600f, + 0x3371c5, + 0x246e07, + 0x37c407, + 0x2a424a, + 0x2cebc9, + 0x308045, + 0x30b7ca, + 0x136686, + 0x2c0c85, + 0x36c104, + 0x2bdf86, + 0x2f1a47, + 0x382947, + 0x348748, + 0x21b545, + 0x2fb046, + 0x21d105, + 0x36dd45, + 0x289684, + 0x326bc7, + 0x2eb24a, + 0x23fc48, + 0x366446, + 0xbe83, + 0x2d8345, + 0x318a86, + 0x3b4e86, + 0x34a246, + 0x21f743, + 0x399787, + 0x37c385, + 0x20ec83, + 0x2d58cd, + 0x20ae43, + 0x348848, + 0x38c844, + 0x275b05, + 0x2a4146, + 0x23d186, + 0x355b07, + 0x204a07, + 0x289085, + 0x241d03, + 0x3268c7, + 0x3650c9, + 0x340a49, + 0x30dc0a, + 0x249a82, + 0x20fb44, + 0x2de284, + 0x349b07, + 0x398208, + 0x2e4f89, + 0x221a49, + 0x2e5dc7, + 0x35c346, + 0xe8346, + 0x2e9484, + 0x2e9a8a, + 0x2edc08, + 0x2efc09, + 0x2de106, + 0x2b1985, + 0x23fb08, + 0x2c358a, + 0x2b5d83, + 0x31e206, + 0x2e5ec7, + 0x2311c5, + 0x38c705, + 0x3a26c3, + 0x2702c4, + 0x22b985, + 0x282ac7, + 0x2ff185, + 0x337c86, + 0x14aa05, + 0x2a3203, + 0x3b8a09, + 0x2758cc, + 0x2ca74c, + 0x2cbb08, + 0x2baec7, + 0x2fbc08, + 0x2fc24a, + 0x2fcc4b, + 0x293748, + 0x23c908, + 0x23d286, + 0x201985, + 0x320fca, + 0x369705, + 0x20e982, + 0x3c0507, + 0x261146, + 0x367405, + 0x36b749, + 0x277385, + 0x36d785, + 0x35c909, + 0x3189c6, + 0x3b6988, + 0x20fc43, + 0x217806, + 0x274c46, + 0x30a485, + 0x30a489, + 0x2e56c9, + 0x251b47, + 0x10c984, + 0x30c987, + 0x221949, + 0x23d605, + 0x40788, + 0x346245, + 0x332285, + 0x3c1309, + 0x203402, + 0x250904, + 0x203c82, + 0x205842, + 0x3c0d85, + 0x30a688, + 0x2b7c05, + 0x2c2483, + 0x2c2485, + 0x2ce1c3, + 0x20ff02, + 0x208f84, + 0x2c7d03, + 0x206f02, + 0x340484, + 0x2def43, + 0x204882, + 0x21fc43, + 0x28cb84, + 0x2f01c3, + 0x256784, + 0x200a02, + 0x219443, + 0x21d403, + 0x206a42, + 0x2f4682, + 0x2e5509, + 0x20ad42, + 0x2889c4, + 0x200442, + 0x23f984, + 0x35c304, + 0x287304, + 0x204182, + 0x23c542, + 0x3295c3, + 0x23b943, + 0x24d644, + 0x2b5c04, + 0x2eddc4, + 0x30b6c4, + 0x309043, + 0x335a83, + 0x336604, + 0x30eac4, + 0x30f306, + 0x2145c2, + 0x202c42, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x200742, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x201b03, + 0x30e843, + 0x26ff84, + 0x2e57c4, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x2ea0c4, + 0x31a803, + 0x2a6503, + 0x36aac4, + 0x346046, + 0x20b583, + 0x15da87, + 0x22fac3, + 0x21e903, + 0x2b0c43, + 0x20fbc3, + 0x20be83, + 0x339d45, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x210e03, + 0x2333c3, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x214bc3, + 0x20ec83, + 0x238184, + 0xaff03, + 0x241d03, + 0x209944, + 0x2bdd85, + 0x15da87, + 0x202c42, + 0x209d42, + 0x202542, + 0x2032c2, + 0xae43, + 0x200342, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0xae43, + 0x241d03, + 0x207c03, + 0x28b304, + 0xcd588, + 0x20be03, + 0x20ae43, + 0x10ec3, + 0x13b5c4, + 0x253404, + 0xcd588, + 0x20be03, + 0x254a04, + 0x26ff84, + 0x20ae43, + 0x2056c2, + 0x241d03, + 0x24f3c3, + 0x702c4, + 0x2f5805, + 0x20e982, + 0x30ec03, + 0xf89, + 0xd3686, + 0xfcc8, + 0x200742, + 0xcd588, + 0x202c42, + 0x237583, + 0x30e843, + 0x201342, + 0xae43, + 0x241d03, + 0x200742, + 0x1b6447, + 0x11c889, + 0x5483, + 0xcd588, + 0x1226c3, + 0x5f33d587, + 0xbe03, + 0x1c6548, + 0x237583, + 0x30e843, + 0x178d46, + 0x214bc3, + 0x5b388, + 0xc0248, + 0x40e06, + 0x21f743, + 0xc6788, + 0x97c03, + 0xdbd45, + 0x37787, + 0xec83, + 0x6c83, + 0x41d03, + 0x4bc2, + 0x16c18a, + 0x1c0e43, + 0x30c5c4, + 0x105e0b, + 0x1063c8, + 0x8d302, + 0x200742, + 0x202c42, + 0x20be03, + 0x237583, + 0x2d2484, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x20be03, + 0x237583, + 0x30e843, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x209943, + 0x207c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x10ec3, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x230882, + 0x200101, + 0x200742, + 0x200301, + 0x32a782, + 0xcd588, + 0x21fe85, + 0x200781, + 0xbe03, + 0x2014c1, + 0x200041, + 0x200141, + 0x24e082, + 0x378184, + 0x24e083, + 0x201401, + 0x200901, + 0x200541, + 0x200a81, + 0x316307, + 0x337dcf, + 0x2fa486, + 0x200641, + 0x33b606, + 0x200081, + 0x2001c1, + 0x3c35ce, + 0x200341, + 0x241d03, + 0x201681, + 0x254285, + 0x204bc2, + 0x3a25c5, + 0x2002c1, + 0x200a01, + 0x200401, + 0x20e982, + 0x200441, + 0x203f81, + 0x20d601, + 0x201181, + 0x200dc1, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x21a003, + 0x20be03, + 0x30e843, + 0x8d248, + 0x21f743, + 0x20ec83, + 0x5e8c3, + 0x241d03, + 0x14e0f48, + 0x10d08, + 0xcd588, + 0xae43, + 0x24704, + 0x4cec4, + 0x14e0f4a, + 0xcd588, + 0xaff03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x205583, + 0xcd588, + 0x20be03, + 0x237583, + 0x2d2484, + 0x241d03, + 0x252385, + 0x317c44, + 0x20be03, + 0x20ec83, + 0x241d03, + 0x2fc8a, + 0xfd504, + 0x112a46, + 0x202c42, + 0x20be03, + 0x234d09, + 0x237583, + 0x2a82c9, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2e9288, + 0x397b87, + 0x2f5805, + 0x1b7888, + 0x1b6447, + 0x19848a, + 0x101d0b, + 0x13b847, + 0x45388, + 0x3a80a, + 0x13dc8, + 0x11c889, + 0x2b847, + 0x67fc7, + 0x1c28c8, + 0x1c6548, + 0x470cf, + 0x26505, + 0x1c6847, + 0x178d46, + 0x4c207, + 0x108186, + 0x5b388, + 0x9b986, + 0x1187c7, + 0x142349, + 0x1b5207, + 0xe68c9, + 0xb8209, + 0xbdb06, + 0xc0248, + 0xbeb45, + 0x77fca, + 0xc6788, + 0x97c03, + 0xcea08, + 0x37787, + 0x1ac045, + 0x4dc90, + 0x6c83, + 0xaff03, + 0x1c3147, + 0x1d5c5, + 0xe4308, + 0x605c5, + 0x1c0e43, + 0x142748, + 0x132146, + 0x199bc9, + 0xaab87, + 0x124b, + 0x137a84, + 0xfe3c4, + 0x105e0b, + 0x1063c8, + 0x107e07, + 0x117485, + 0x20be03, + 0x237583, + 0x203d43, + 0x241d03, + 0x244a03, + 0x30e843, + 0xaff03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x7e24b, + 0x200742, + 0x202c42, + 0x241d03, + 0xcd588, + 0x200742, + 0x202c42, + 0x202542, + 0x201342, + 0x200b82, + 0x20ec83, + 0x200342, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x202542, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x226444, + 0x20ec83, + 0x207783, + 0x241d03, + 0x30c5c4, + 0x203f83, + 0x30e843, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x3afd87, + 0x20be03, + 0x279947, + 0x2e6686, + 0x216543, + 0x208883, + 0x30e843, + 0x204d03, + 0x26ff84, + 0x38b344, + 0x2b9906, + 0x20c743, + 0x20ec83, + 0x241d03, + 0x252385, + 0x309e84, + 0x320dc3, + 0x20d203, + 0x3c0507, + 0x23b1c5, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x98747, + 0x2149c2, + 0x26e443, + 0x20df43, + 0x2b6c03, + 0x6760be03, + 0x20b2c2, + 0x237583, + 0x206c03, + 0x30e843, + 0x26ff84, + 0x3c32c3, + 0x3371c3, + 0x21f743, + 0x226444, + 0x67a02f02, + 0x20ec83, + 0x241d03, + 0x235cc3, + 0x214c43, + 0x230882, + 0x203f83, + 0xcd588, + 0x30e843, + 0x10ec3, + 0x31b0c4, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x39e304, + 0x21a484, + 0x2d0e06, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x261146, + 0x4170b, + 0x29546, + 0xeb94a, + 0x10b34a, + 0xcd588, + 0x21d0c4, + 0x68e0be03, + 0x2b6bc4, + 0x237583, + 0x268984, + 0x30e843, + 0x357d43, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x55a43, + 0x33840b, + 0x3b1a0a, + 0x3c580c, + 0xd80c8, + 0x200742, + 0x202c42, + 0x202542, + 0x232585, + 0x26ff84, + 0x201e42, + 0x21f743, + 0x21a484, + 0x2032c2, + 0x200342, + 0x207c02, + 0x230882, + 0xb6c03, + 0x4d5c2, + 0x386389, + 0x3a3088, + 0x310449, + 0x34e049, + 0x23e48a, + 0x24e90a, + 0x219382, + 0x2efa02, + 0x2c42, + 0x20be03, + 0x230a42, + 0x246fc6, + 0x368802, + 0x207582, + 0x30208e, + 0x21948e, + 0x27bf47, + 0x20ec07, + 0x2e89c2, + 0x237583, + 0x30e843, + 0x209182, + 0x201342, + 0x6ff83, + 0x23d94f, + 0x247302, + 0x2f9507, + 0x2ad447, + 0x314407, + 0x2b0fcc, + 0x2b9b8c, + 0x207144, + 0x27d34a, + 0x2193c2, + 0x209602, + 0x2b9844, + 0x2028c2, + 0x2c10c2, + 0x2b9dc4, + 0x217f42, + 0x201542, + 0xf003, + 0x29ba07, + 0x233805, + 0x20e302, + 0x24c184, + 0x37c0c2, + 0x2d7c88, + 0x20ec83, + 0x39f108, + 0x206a82, + 0x207305, + 0x388206, + 0x241d03, + 0x208302, + 0x2e51c7, + 0x4bc2, + 0x272585, + 0x204905, + 0x212182, + 0x2030c2, + 0x293c0a, + 0x288f0a, + 0x23a382, + 0x29a184, + 0x2040c2, + 0x20fa08, + 0x200d82, + 0x39d588, + 0x302ac7, + 0x3038c9, + 0x204982, + 0x3086c5, + 0x36ba05, + 0x21b60b, + 0x2c418c, + 0x230548, + 0x31c188, + 0x2145c2, + 0x355bc2, + 0x200742, + 0xcd588, + 0x202c42, + 0x20be03, + 0x202542, + 0x2032c2, + 0xae43, + 0x200342, + 0x241d03, + 0x207c02, + 0x200742, + 0x6a202c42, + 0x6a70e843, + 0x20f003, + 0x201e42, + 0x20ec83, + 0x338c03, + 0x241d03, + 0x2e2d83, + 0x379586, + 0x1607c03, + 0xcd588, + 0x6e247, + 0x14a345, + 0xa7e0d, + 0xa5f4a, + 0x85047, + 0x6ae00a42, + 0x6b200602, + 0x6b600282, + 0x6ba02b82, + 0x6be12442, + 0x6c203cc2, + 0x15da87, + 0x6c602c42, + 0x6ca1b282, + 0x6ce1f9c2, + 0x6d202e02, + 0x219483, + 0x22644, + 0x282dc3, + 0x6d615902, + 0x6da039c2, + 0x55087, + 0x6de02202, + 0x6e200902, + 0x6e600542, + 0x6ea07d02, + 0x6ee03882, + 0x6f201342, + 0xc0f85, + 0x24c4c3, + 0x23a2c4, + 0x6f6028c2, + 0x6fa0dd82, + 0x6fe00682, + 0xb714b, + 0x702000c2, + 0x70a54ac2, + 0x70e01e42, + 0x71200b82, + 0x71603282, + 0x71a05a02, + 0x71e0d682, + 0x7226cd82, + 0x72602f02, + 0x72a04d42, + 0x72e032c2, + 0x7323e0c2, + 0x7362a402, + 0x73a11e82, + 0xafd44, + 0x339b43, + 0x73e0e882, + 0x742190c2, + 0x74606482, + 0x74a02882, + 0x74e00342, + 0x75206f02, + 0x7e3c7, + 0x756057c2, + 0x75a00502, + 0x75e07c02, + 0x76209f82, + 0xf778c, + 0x76627882, + 0x76a2c0c2, + 0x76e0a902, + 0x77206d02, + 0x77611d82, + 0x77a3e602, + 0x77e0fc42, + 0x78213802, + 0x78674fc2, + 0x78a4f1c2, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x11343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x707c32c3, + 0x211343, + 0x339dc4, + 0x3a2f86, + 0x2f0ec3, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x200189, + 0x24d5c2, + 0x391283, + 0x2b8503, + 0x202f85, + 0x206c03, + 0x3c32c3, + 0x211343, + 0x29ea43, + 0x233d43, + 0x3bd849, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x7920be03, + 0x237583, + 0x332683, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0xcd588, + 0x202c42, + 0x20be03, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x253404, + 0x202c42, + 0x20be03, + 0x322183, + 0x237583, + 0x254a04, + 0x203d43, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x24f3c3, + 0x2f5805, + 0x233d43, + 0x203f83, + 0xae43, + 0x202c42, + 0x20be03, + 0x3c32c3, + 0x20ec83, + 0x241d03, + 0x200742, + 0x2b6c03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x234106, + 0x26ff84, + 0x214bc3, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x20be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0x144be47, + 0x20be03, + 0x29546, + 0x237583, + 0x30e843, + 0xd91c6, + 0x20ec83, + 0x241d03, + 0x318fc8, + 0x31bfc9, + 0x330349, + 0x33af08, + 0x38a348, + 0x38a349, + 0x22c10d, + 0x24b00f, + 0x2ec510, + 0x354d0d, + 0x36ebcc, + 0x38bc4b, + 0xaf148, + 0xc7bc5, + 0x200742, + 0x23b005, + 0x20bfc3, + 0x7c602c42, + 0x237583, + 0x30e843, + 0x2d7ac7, + 0x20fbc3, + 0x21f743, + 0x20ec83, + 0x228803, + 0x20c0c3, + 0x20ae43, + 0x241d03, + 0x253786, + 0x20e982, + 0x203f83, + 0xcd588, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x207c03, + 0xf84, + 0x154ab06, + 0x200742, + 0x202c42, + 0x30e843, + 0x21f743, + 0x241d03, +} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 1 bits] wildcard bit +// [ 2 bits] node type +// [14 bits] high nodes index (exclusive) of children +// [14 bits] low nodes index (inclusive) of children +var children = [...]uint32{ + 0x0, + 0x10000000, + 0x20000000, + 0x40000000, + 0x50000000, + 0x60000000, + 0x185460f, + 0x1858615, + 0x187c616, + 0x19d861f, + 0x19ec676, + 0x1a0067b, + 0x1a14680, + 0x1a34685, + 0x1a3868d, + 0x1a5068e, + 0x1a78694, + 0x1a7c69e, + 0x1a9469f, + 0x1a986a5, + 0x1a9c6a6, + 0x1ad86a7, + 0x1adc6b6, + 0x21ae46b7, + 0x1b2c6b9, + 0x1b306cb, + 0x1b506cc, + 0x1b646d4, + 0x1b686d9, + 0x1b986da, + 0x1bb46e6, + 0x1bdc6ed, + 0x1bec6f7, + 0x1bf06fb, + 0x1c886fc, + 0x1c9c722, + 0x1cb0727, + 0x1ce072c, + 0x1cf0738, + 0x1d0473c, + 0x1da8741, + 0x1fa076a, + 0x1fa47e8, + 0x20107e9, + 0x207c804, + 0x209481f, + 0x20a8825, + 0x20b082a, + 0x20c482c, + 0x20c8831, + 0x20e4832, + 0x2134839, + 0x215084d, + 0x2154854, + 0x2158855, + 0x2174856, + 0x21b085d, + 0x621b486c, + 0x21cc86d, + 0x21e0873, + 0x21e4878, + 0x21f4879, + 0x22a487d, + 0x22a88a9, + 0x222b88aa, + 0x222bc8ae, + 0x222c08af, + 0x22f88b0, + 0x22fc8be, + 0x278c8bf, + 0x228349e3, + 0x22838a0d, + 0x2283ca0e, + 0x22848a0f, + 0x2284ca12, + 0x22858a13, + 0x2285ca16, + 0x22860a17, + 0x22864a18, + 0x22868a19, + 0x2286ca1a, + 0x22878a1b, + 0x2287ca1e, + 0x22888a1f, + 0x2288ca22, + 0x22890a23, + 0x22894a24, + 0x228a0a25, + 0x228a4a28, + 0x228b0a29, + 0x228b4a2c, + 0x228b8a2d, + 0x228bca2e, + 0x28c0a2f, + 0x228c4a30, + 0x228d0a31, + 0x228d4a34, + 0x28dca35, + 0x291ca37, + 0x2293ca47, + 0x22940a4f, + 0x22944a50, + 0x2948a51, + 0x2294ca52, + 0x2950a53, + 0x296ca54, + 0x2984a5b, + 0x2988a61, + 0x2998a62, + 0x29a4a66, + 0x29d8a69, + 0x29dca76, + 0x29f0a77, + 0x229f8a7c, + 0x2ab8a7e, + 0x22abcaae, + 0x2ac4aaf, + 0x2ac8ab1, + 0x2ae0ab2, + 0x2af4ab8, + 0x2b1cabd, + 0x2b3cac7, + 0x2b6cacf, + 0x2b94adb, + 0x2b98ae5, + 0x2bbcae6, + 0x2bc0aef, + 0x2bd4af0, + 0x2bd8af5, + 0x2bdcaf6, + 0x2bfcaf7, + 0x2c1caff, + 0x2c20b07, + 0x22c24b08, + 0x2c28b09, + 0x2c2cb0a, + 0x2c3cb0b, + 0x2c40b0f, + 0x2cb8b10, + 0x2cbcb2e, + 0x2cd8b2f, + 0x2ce8b36, + 0x2cfcb3a, + 0x2d14b3f, + 0x2d2cb45, + 0x2d44b4b, + 0x2d48b51, + 0x2d60b52, + 0x2d7cb58, + 0x2d9cb5f, + 0x2db4b67, + 0x2e14b6d, + 0x2e30b85, + 0x2e38b8c, + 0x2e3cb8e, + 0x2e50b8f, + 0x2e94b94, + 0x2f14ba5, + 0x2f40bc5, + 0x2f44bd0, + 0x2f4cbd1, + 0x2f6cbd3, + 0x2f70bdb, + 0x2f94bdc, + 0x2f9cbe5, + 0x2fd8be7, + 0x301cbf6, + 0x3020c07, + 0x3094c08, + 0x3098c25, + 0x2309cc26, + 0x230a0c27, + 0x230a4c28, + 0x230b4c29, + 0x230b8c2d, + 0x230bcc2e, + 0x230c0c2f, + 0x230c4c30, + 0x30dcc31, + 0x3100c37, + 0x3120c40, + 0x36e4c48, + 0x36f0db9, + 0x3710dbc, + 0x38ccdc4, + 0x399ce33, + 0x3a0ce67, + 0x3a64e83, + 0x3b4ce99, + 0x3ba4ed3, + 0x3be0ee9, + 0x3cdcef8, + 0x3da8f37, + 0x3e40f6a, + 0x3ed0f90, + 0x3f34fb4, + 0x416cfcd, + 0x422505b, + 0x42f1089, + 0x433d0bc, + 0x43c50cf, + 0x44010f1, + 0x4451100, + 0x44c9114, + 0x644cd132, + 0x644d1133, + 0x644d5134, + 0x4551135, + 0x45ad154, + 0x462916b, + 0x46a118a, + 0x47211a8, + 0x478d1c8, + 0x48b91e3, + 0x491122e, + 0x64915244, + 0x49ad245, + 0x4a3526b, + 0x4a8128d, + 0x4ae92a0, + 0x4b912ba, + 0x4c592e4, + 0x4cc1316, + 0x4dd5330, + 0x64dd9375, + 0x64ddd376, + 0x4e39377, + 0x4e9538e, + 0x4f253a5, + 0x4fa13c9, + 0x4fe53e8, + 0x50c93f9, + 0x50fd432, + 0x515d43f, + 0x51d1457, + 0x5259474, + 0x5299496, + 0x53094a6, + 0x6530d4c2, + 0x53314c3, + 0x53354cc, + 0x534d4cd, + 0x53694d3, + 0x53ad4da, + 0x53bd4eb, + 0x53d54ef, + 0x544d4f5, + 0x5455513, + 0x5469515, + 0x548551a, + 0x54b1521, + 0x54b552c, + 0x54bd52d, + 0x54d152f, + 0x54ed534, + 0x54f953b, + 0x550153e, + 0x553d540, + 0x555154f, + 0x5559554, + 0x5565556, + 0x556d559, + 0x559155b, + 0x55b5564, + 0x55cd56d, + 0x55d1573, + 0x55d9574, + 0x55dd576, + 0x5645577, + 0x5649591, + 0x566d592, + 0x569159b, + 0x56ad5a4, + 0x56bd5ab, + 0x56d15af, + 0x56d55b4, + 0x56dd5b5, + 0x56f15b7, + 0x57015bc, + 0x57055c0, + 0x57215c1, + 0x5fb15c8, + 0x5fe97ec, + 0x60157fa, + 0x6031805, + 0x605180c, + 0x6071814, + 0x60b581c, + 0x60bd82d, + 0x260c182f, + 0x260c5830, + 0x60cd831, + 0x6245833, + 0x26249891, + 0x26259892, + 0x26261896, + 0x2626d898, + 0x627189b, + 0x627589c, + 0x629d89d, + 0x62c58a7, + 0x62c98b1, + 0x63018b2, + 0x63218c0, + 0x6e798c8, + 0x6e7db9e, + 0x6e81b9f, + 0x26e85ba0, + 0x6e89ba1, + 0x26e8dba2, + 0x6e91ba3, + 0x26e9dba4, + 0x6ea1ba7, + 0x6ea5ba8, + 0x26ea9ba9, + 0x6eadbaa, + 0x26eb5bab, + 0x6eb9bad, + 0x6ebdbae, + 0x26ecdbaf, + 0x6ed1bb3, + 0x6ed5bb4, + 0x6ed9bb5, + 0x6eddbb6, + 0x26ee1bb7, + 0x6ee5bb8, + 0x6ee9bb9, + 0x6eedbba, + 0x6ef1bbb, + 0x26ef9bbc, + 0x6efdbbe, + 0x6f01bbf, + 0x6f05bc0, + 0x26f09bc1, + 0x6f0dbc2, + 0x26f15bc3, + 0x26f19bc5, + 0x6f35bc6, + 0x6f45bcd, + 0x6f89bd1, + 0x6f8dbe2, + 0x6fb1be3, + 0x6fb5bec, + 0x6fb9bed, + 0x7145bee, + 0x27149c51, + 0x27151c52, + 0x27155c54, + 0x27159c55, + 0x7161c56, + 0x723dc58, + 0x27249c8f, + 0x2724dc92, + 0x27251c93, + 0x27255c94, + 0x7259c95, + 0x7285c96, + 0x7289ca1, + 0x72adca2, + 0x72b9cab, + 0x72d9cae, + 0x72ddcb6, + 0x7315cb7, + 0x75adcc5, + 0x7669d6b, + 0x767dd9a, + 0x76b1d9f, + 0x76e1dac, + 0x76fddb8, + 0x7725dbf, + 0x7745dc9, + 0x7761dd1, + 0x7789dd8, + 0x7799de2, + 0x779dde6, + 0x77a1de7, + 0x77d5de8, + 0x77e1df5, + 0x7801df8, + 0x7879e00, + 0x2787de1e, + 0x78a1e1f, + 0x78c1e28, + 0x78d5e30, + 0x78e9e35, + 0x78ede3a, + 0x790de3b, + 0x79b1e43, + 0x79cde6c, + 0x79f1e73, + 0x79f9e7c, + 0x7a05e7e, + 0x7a0de81, + 0x7a21e83, + 0x7a41e88, + 0x7a4de90, + 0x7a59e93, + 0x7a89e96, + 0x7b5dea2, + 0x7b61ed7, + 0x7b75ed8, + 0x7b7dedd, + 0x7b95edf, + 0x7b99ee5, + 0x7ba5ee6, + 0x7ba9ee9, + 0x7bc5eea, + 0x7c01ef1, + 0x7c05f00, + 0x7c25f01, + 0x7c75f09, + 0x7c91f1d, + 0x7ce5f24, + 0x7ce9f39, + 0x7cedf3a, + 0x7cf1f3b, + 0x7d35f3c, + 0x7d45f4d, + 0x7d85f51, + 0x7d89f61, + 0x7db9f62, + 0x7f01f6e, + 0x7f29fc0, + 0x7f55fca, + 0x7f65fd5, + 0x7f6dfd9, + 0x807dfdb, + 0x808a01f, + 0x8096022, + 0x80a2025, + 0x80ae028, + 0x80ba02b, + 0x80c602e, + 0x80d2031, + 0x80de034, + 0x80ea037, + 0x80f603a, + 0x810203d, + 0x810e040, + 0x811a043, + 0x8122046, + 0x812e048, + 0x813a04b, + 0x814604e, + 0x8152051, + 0x815e054, + 0x816a057, + 0x817605a, + 0x818205d, + 0x818e060, + 0x819a063, + 0x81a6066, + 0x81d2069, + 0x81de074, + 0x81ea077, + 0x81f607a, + 0x820207d, + 0x820e080, + 0x8216083, + 0x8222085, + 0x822e088, + 0x823a08b, + 0x824608e, + 0x8252091, + 0x825e094, + 0x826a097, + 0x827609a, + 0x828209d, + 0x828e0a0, + 0x829a0a3, + 0x82a60a6, + 0x82b20a9, + 0x82ba0ac, + 0x82c60ae, + 0x82d20b1, + 0x82de0b4, + 0x82ea0b7, + 0x82f60ba, + 0x83020bd, + 0x830e0c0, + 0x831a0c3, + 0x831e0c6, + 0x832a0c7, + 0x83460ca, + 0x834a0d1, + 0x835a0d2, + 0x83760d6, + 0x83ba0dd, + 0x83be0ee, + 0x83d20ef, + 0x84060f4, + 0x8416101, + 0x8436105, + 0x844e10d, + 0x8466113, + 0x846e119, + 0x284b211b, + 0x84b612c, + 0x84e212d, + 0x84ea138, + 0x84fe13a, +} + +// max children 500 (capacity 1023) +// max text offset 29102 (capacity 32767) +// max text length 36 (capacity 63) +// max hi 8511 (capacity 16383) +// max lo 8506 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go new file mode 100644 index 0000000..228010c --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -0,0 +1,16959 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +var rules = [...]string{ + "ac", + "com.ac", + "edu.ac", + "gov.ac", + "net.ac", + "mil.ac", + "org.ac", + "ad", + "nom.ad", + "ae", + "co.ae", + "net.ae", + "org.ae", + "sch.ae", + "ac.ae", + "gov.ae", + "mil.ae", + "aero", + "accident-investigation.aero", + "accident-prevention.aero", + "aerobatic.aero", + "aeroclub.aero", + "aerodrome.aero", + "agents.aero", + "aircraft.aero", + "airline.aero", + "airport.aero", + "air-surveillance.aero", + "airtraffic.aero", + "air-traffic-control.aero", + "ambulance.aero", + "amusement.aero", + "association.aero", + "author.aero", + "ballooning.aero", + "broker.aero", + "caa.aero", + "cargo.aero", + "catering.aero", + "certification.aero", + "championship.aero", + "charter.aero", + "civilaviation.aero", + "club.aero", + "conference.aero", + "consultant.aero", + "consulting.aero", + "control.aero", + "council.aero", + "crew.aero", + "design.aero", + "dgca.aero", + "educator.aero", + "emergency.aero", + "engine.aero", + "engineer.aero", + "entertainment.aero", + "equipment.aero", + "exchange.aero", + "express.aero", + "federation.aero", + "flight.aero", + "freight.aero", + "fuel.aero", + "gliding.aero", + "government.aero", + "groundhandling.aero", + "group.aero", + "hanggliding.aero", + "homebuilt.aero", + "insurance.aero", + "journal.aero", + "journalist.aero", + "leasing.aero", + "logistics.aero", + "magazine.aero", + "maintenance.aero", + "media.aero", + "microlight.aero", + "modelling.aero", + "navigation.aero", + "parachuting.aero", + "paragliding.aero", + "passenger-association.aero", + "pilot.aero", + "press.aero", + "production.aero", + "recreation.aero", + "repbody.aero", + "res.aero", + "research.aero", + "rotorcraft.aero", + "safety.aero", + "scientist.aero", + "services.aero", + "show.aero", + "skydiving.aero", + "software.aero", + "student.aero", + "trader.aero", + "trading.aero", + "trainer.aero", + "union.aero", + "workinggroup.aero", + "works.aero", + "af", + "gov.af", + "com.af", + "org.af", + "net.af", + "edu.af", + "ag", + "com.ag", + "org.ag", + "net.ag", + "co.ag", + "nom.ag", + "ai", + "off.ai", + "com.ai", + "net.ai", + "org.ai", + "al", + "com.al", + "edu.al", + "gov.al", + "mil.al", + "net.al", + "org.al", + "am", + "ao", + "ed.ao", + "gv.ao", + "og.ao", + "co.ao", + "pb.ao", + "it.ao", + "aq", + "ar", + "com.ar", + "edu.ar", + "gob.ar", + "gov.ar", + "int.ar", + "mil.ar", + "musica.ar", + "net.ar", + "org.ar", + "tur.ar", + "arpa", + "e164.arpa", + "in-addr.arpa", + "ip6.arpa", + "iris.arpa", + "uri.arpa", + "urn.arpa", + "as", + "gov.as", + "asia", + "at", + "ac.at", + "co.at", + "gv.at", + "or.at", + "au", + "com.au", + "net.au", + "org.au", + "edu.au", + "gov.au", + "asn.au", + "id.au", + "info.au", + "conf.au", + "oz.au", + "act.au", + "nsw.au", + "nt.au", + "qld.au", + "sa.au", + "tas.au", + "vic.au", + "wa.au", + "act.edu.au", + "nsw.edu.au", + "nt.edu.au", + "qld.edu.au", + "sa.edu.au", + "tas.edu.au", + "vic.edu.au", + "wa.edu.au", + "qld.gov.au", + "sa.gov.au", + "tas.gov.au", + "vic.gov.au", + "wa.gov.au", + "aw", + "com.aw", + "ax", + "az", + "com.az", + "net.az", + "int.az", + "gov.az", + "org.az", + "edu.az", + "info.az", + "pp.az", + "mil.az", + "name.az", + "pro.az", + "biz.az", + "ba", + "com.ba", + "edu.ba", + "gov.ba", + "mil.ba", + "net.ba", + "org.ba", + "bb", + "biz.bb", + "co.bb", + "com.bb", + "edu.bb", + "gov.bb", + "info.bb", + "net.bb", + "org.bb", + "store.bb", + "tv.bb", + "*.bd", + "be", + "ac.be", + "bf", + "gov.bf", + "bg", + "a.bg", + "b.bg", + "c.bg", + "d.bg", + "e.bg", + "f.bg", + "g.bg", + "h.bg", + "i.bg", + "j.bg", + "k.bg", + "l.bg", + "m.bg", + "n.bg", + "o.bg", + "p.bg", + "q.bg", + "r.bg", + "s.bg", + "t.bg", + "u.bg", + "v.bg", + "w.bg", + "x.bg", + "y.bg", + "z.bg", + "0.bg", + "1.bg", + "2.bg", + "3.bg", + "4.bg", + "5.bg", + "6.bg", + "7.bg", + "8.bg", + "9.bg", + "bh", + "com.bh", + "edu.bh", + "net.bh", + "org.bh", + "gov.bh", + "bi", + "co.bi", + "com.bi", + "edu.bi", + "or.bi", + "org.bi", + "biz", + "bj", + "asso.bj", + "barreau.bj", + "gouv.bj", + "bm", + "com.bm", + "edu.bm", + "gov.bm", + "net.bm", + "org.bm", + "*.bn", + "bo", + "com.bo", + "edu.bo", + "gob.bo", + "int.bo", + "org.bo", + "net.bo", + "mil.bo", + "tv.bo", + "web.bo", + "academia.bo", + "agro.bo", + "arte.bo", + "blog.bo", + "bolivia.bo", + "ciencia.bo", + "cooperativa.bo", + "democracia.bo", + "deporte.bo", + "ecologia.bo", + "economia.bo", + "empresa.bo", + "indigena.bo", + "industria.bo", + "info.bo", + "medicina.bo", + "movimiento.bo", + "musica.bo", + "natural.bo", + "nombre.bo", + "noticias.bo", + "patria.bo", + "politica.bo", + "profesional.bo", + "plurinacional.bo", + "pueblo.bo", + "revista.bo", + "salud.bo", + "tecnologia.bo", + "tksat.bo", + "transporte.bo", + "wiki.bo", + "br", + "9guacu.br", + "abc.br", + "adm.br", + "adv.br", + "agr.br", + "aju.br", + "am.br", + "anani.br", + "aparecida.br", + "arq.br", + "art.br", + "ato.br", + "b.br", + "belem.br", + "bhz.br", + "bio.br", + "blog.br", + "bmd.br", + "boavista.br", + "bsb.br", + "campinagrande.br", + "campinas.br", + "caxias.br", + "cim.br", + "cng.br", + "cnt.br", + "com.br", + "contagem.br", + "coop.br", + "cri.br", + "cuiaba.br", + "curitiba.br", + "def.br", + "ecn.br", + "eco.br", + "edu.br", + "emp.br", + "eng.br", + "esp.br", + "etc.br", + "eti.br", + "far.br", + "feira.br", + "flog.br", + "floripa.br", + "fm.br", + "fnd.br", + "fortal.br", + "fot.br", + "foz.br", + "fst.br", + "g12.br", + "ggf.br", + "goiania.br", + "gov.br", + "ac.gov.br", + "al.gov.br", + "am.gov.br", + "ap.gov.br", + "ba.gov.br", + "ce.gov.br", + "df.gov.br", + "es.gov.br", + "go.gov.br", + "ma.gov.br", + "mg.gov.br", + "ms.gov.br", + "mt.gov.br", + "pa.gov.br", + "pb.gov.br", + "pe.gov.br", + "pi.gov.br", + "pr.gov.br", + "rj.gov.br", + "rn.gov.br", + "ro.gov.br", + "rr.gov.br", + "rs.gov.br", + "sc.gov.br", + "se.gov.br", + "sp.gov.br", + "to.gov.br", + "gru.br", + "imb.br", + "ind.br", + "inf.br", + "jab.br", + "jampa.br", + "jdf.br", + "joinville.br", + "jor.br", + "jus.br", + "leg.br", + "lel.br", + "londrina.br", + "macapa.br", + "maceio.br", + "manaus.br", + "maringa.br", + "mat.br", + "med.br", + "mil.br", + "morena.br", + "mp.br", + "mus.br", + "natal.br", + "net.br", + "niteroi.br", + "*.nom.br", + "not.br", + "ntr.br", + "odo.br", + "org.br", + "osasco.br", + "palmas.br", + "poa.br", + "ppg.br", + "pro.br", + "psc.br", + "psi.br", + "pvh.br", + "qsl.br", + "radio.br", + "rec.br", + "recife.br", + "ribeirao.br", + "rio.br", + "riobranco.br", + "riopreto.br", + "salvador.br", + "sampa.br", + "santamaria.br", + "santoandre.br", + "saobernardo.br", + "saogonca.br", + "sjc.br", + "slg.br", + "slz.br", + "sorocaba.br", + "srv.br", + "taxi.br", + "teo.br", + "the.br", + "tmp.br", + "trd.br", + "tur.br", + "tv.br", + "udi.br", + "vet.br", + "vix.br", + "vlog.br", + "wiki.br", + "zlg.br", + "bs", + "com.bs", + "net.bs", + "org.bs", + "edu.bs", + "gov.bs", + "bt", + "com.bt", + "edu.bt", + "gov.bt", + "net.bt", + "org.bt", + "bv", + "bw", + "co.bw", + "org.bw", + "by", + "gov.by", + "mil.by", + "com.by", + "of.by", + "bz", + "com.bz", + "net.bz", + "org.bz", + "edu.bz", + "gov.bz", + "ca", + "ab.ca", + "bc.ca", + "mb.ca", + "nb.ca", + "nf.ca", + "nl.ca", + "ns.ca", + "nt.ca", + "nu.ca", + "on.ca", + "pe.ca", + "qc.ca", + "sk.ca", + "yk.ca", + "gc.ca", + "cat", + "cc", + "cd", + "gov.cd", + "cf", + "cg", + "ch", + "ci", + "org.ci", + "or.ci", + "com.ci", + "co.ci", + "edu.ci", + "ed.ci", + "ac.ci", + "net.ci", + "go.ci", + "asso.ci", + "xn--aroport-bya.ci", + "int.ci", + "presse.ci", + "md.ci", + "gouv.ci", + "*.ck", + "!www.ck", + "cl", + "gov.cl", + "gob.cl", + "co.cl", + "mil.cl", + "cm", + "co.cm", + "com.cm", + "gov.cm", + "net.cm", + "cn", + "ac.cn", + "com.cn", + "edu.cn", + "gov.cn", + "net.cn", + "org.cn", + "mil.cn", + "xn--55qx5d.cn", + "xn--io0a7i.cn", + "xn--od0alg.cn", + "ah.cn", + "bj.cn", + "cq.cn", + "fj.cn", + "gd.cn", + "gs.cn", + "gz.cn", + "gx.cn", + "ha.cn", + "hb.cn", + "he.cn", + "hi.cn", + "hl.cn", + "hn.cn", + "jl.cn", + "js.cn", + "jx.cn", + "ln.cn", + "nm.cn", + "nx.cn", + "qh.cn", + "sc.cn", + "sd.cn", + "sh.cn", + "sn.cn", + "sx.cn", + "tj.cn", + "xj.cn", + "xz.cn", + "yn.cn", + "zj.cn", + "hk.cn", + "mo.cn", + "tw.cn", + "co", + "arts.co", + "com.co", + "edu.co", + "firm.co", + "gov.co", + "info.co", + "int.co", + "mil.co", + "net.co", + "nom.co", + "org.co", + "rec.co", + "web.co", + "com", + "coop", + "cr", + "ac.cr", + "co.cr", + "ed.cr", + "fi.cr", + "go.cr", + "or.cr", + "sa.cr", + "cu", + "com.cu", + "edu.cu", + "org.cu", + "net.cu", + "gov.cu", + "inf.cu", + "cv", + "cw", + "com.cw", + "edu.cw", + "net.cw", + "org.cw", + "cx", + "gov.cx", + "cy", + "ac.cy", + "biz.cy", + "com.cy", + "ekloges.cy", + "gov.cy", + "ltd.cy", + "name.cy", + "net.cy", + "org.cy", + "parliament.cy", + "press.cy", + "pro.cy", + "tm.cy", + "cz", + "de", + "dj", + "dk", + "dm", + "com.dm", + "net.dm", + "org.dm", + "edu.dm", + "gov.dm", + "do", + "art.do", + "com.do", + "edu.do", + "gob.do", + "gov.do", + "mil.do", + "net.do", + "org.do", + "sld.do", + "web.do", + "dz", + "com.dz", + "org.dz", + "net.dz", + "gov.dz", + "edu.dz", + "asso.dz", + "pol.dz", + "art.dz", + "ec", + "com.ec", + "info.ec", + "net.ec", + "fin.ec", + "k12.ec", + "med.ec", + "pro.ec", + "org.ec", + "edu.ec", + "gov.ec", + "gob.ec", + "mil.ec", + "edu", + "ee", + "edu.ee", + "gov.ee", + "riik.ee", + "lib.ee", + "med.ee", + "com.ee", + "pri.ee", + "aip.ee", + "org.ee", + "fie.ee", + "eg", + "com.eg", + "edu.eg", + "eun.eg", + "gov.eg", + "mil.eg", + "name.eg", + "net.eg", + "org.eg", + "sci.eg", + "*.er", + "es", + "com.es", + "nom.es", + "org.es", + "gob.es", + "edu.es", + "et", + "com.et", + "gov.et", + "org.et", + "edu.et", + "biz.et", + "name.et", + "info.et", + "net.et", + "eu", + "fi", + "aland.fi", + "*.fj", + "*.fk", + "fm", + "fo", + "fr", + "com.fr", + "asso.fr", + "nom.fr", + "prd.fr", + "presse.fr", + "tm.fr", + "aeroport.fr", + "assedic.fr", + "avocat.fr", + "avoues.fr", + "cci.fr", + "chambagri.fr", + "chirurgiens-dentistes.fr", + "experts-comptables.fr", + "geometre-expert.fr", + "gouv.fr", + "greta.fr", + "huissier-justice.fr", + "medecin.fr", + "notaires.fr", + "pharmacien.fr", + "port.fr", + "veterinaire.fr", + "ga", + "gb", + "gd", + "ge", + "com.ge", + "edu.ge", + "gov.ge", + "org.ge", + "mil.ge", + "net.ge", + "pvt.ge", + "gf", + "gg", + "co.gg", + "net.gg", + "org.gg", + "gh", + "com.gh", + "edu.gh", + "gov.gh", + "org.gh", + "mil.gh", + "gi", + "com.gi", + "ltd.gi", + "gov.gi", + "mod.gi", + "edu.gi", + "org.gi", + "gl", + "co.gl", + "com.gl", + "edu.gl", + "net.gl", + "org.gl", + "gm", + "gn", + "ac.gn", + "com.gn", + "edu.gn", + "gov.gn", + "org.gn", + "net.gn", + "gov", + "gp", + "com.gp", + "net.gp", + "mobi.gp", + "edu.gp", + "org.gp", + "asso.gp", + "gq", + "gr", + "com.gr", + "edu.gr", + "net.gr", + "org.gr", + "gov.gr", + "gs", + "gt", + "com.gt", + "edu.gt", + "gob.gt", + "ind.gt", + "mil.gt", + "net.gt", + "org.gt", + "*.gu", + "gw", + "gy", + "co.gy", + "com.gy", + "edu.gy", + "gov.gy", + "net.gy", + "org.gy", + "hk", + "com.hk", + "edu.hk", + "gov.hk", + "idv.hk", + "net.hk", + "org.hk", + "xn--55qx5d.hk", + "xn--wcvs22d.hk", + "xn--lcvr32d.hk", + "xn--mxtq1m.hk", + "xn--gmqw5a.hk", + "xn--ciqpn.hk", + "xn--gmq050i.hk", + "xn--zf0avx.hk", + "xn--io0a7i.hk", + "xn--mk0axi.hk", + "xn--od0alg.hk", + "xn--od0aq3b.hk", + "xn--tn0ag.hk", + "xn--uc0atv.hk", + "xn--uc0ay4a.hk", + "hm", + "hn", + "com.hn", + "edu.hn", + "org.hn", + "net.hn", + "mil.hn", + "gob.hn", + "hr", + "iz.hr", + "from.hr", + "name.hr", + "com.hr", + "ht", + "com.ht", + "shop.ht", + "firm.ht", + "info.ht", + "adult.ht", + "net.ht", + "pro.ht", + "org.ht", + "med.ht", + "art.ht", + "coop.ht", + "pol.ht", + "asso.ht", + "edu.ht", + "rel.ht", + "gouv.ht", + "perso.ht", + "hu", + "co.hu", + "info.hu", + "org.hu", + "priv.hu", + "sport.hu", + "tm.hu", + "2000.hu", + "agrar.hu", + "bolt.hu", + "casino.hu", + "city.hu", + "erotica.hu", + "erotika.hu", + "film.hu", + "forum.hu", + "games.hu", + "hotel.hu", + "ingatlan.hu", + "jogasz.hu", + "konyvelo.hu", + "lakas.hu", + "media.hu", + "news.hu", + "reklam.hu", + "sex.hu", + "shop.hu", + "suli.hu", + "szex.hu", + "tozsde.hu", + "utazas.hu", + "video.hu", + "id", + "ac.id", + "biz.id", + "co.id", + "desa.id", + "go.id", + "mil.id", + "my.id", + "net.id", + "or.id", + "sch.id", + "web.id", + "ie", + "gov.ie", + "il", + "ac.il", + "co.il", + "gov.il", + "idf.il", + "k12.il", + "muni.il", + "net.il", + "org.il", + "im", + "ac.im", + "co.im", + "com.im", + "ltd.co.im", + "net.im", + "org.im", + "plc.co.im", + "tt.im", + "tv.im", + "in", + "co.in", + "firm.in", + "net.in", + "org.in", + "gen.in", + "ind.in", + "nic.in", + "ac.in", + "edu.in", + "res.in", + "gov.in", + "mil.in", + "info", + "int", + "eu.int", + "io", + "com.io", + "iq", + "gov.iq", + "edu.iq", + "mil.iq", + "com.iq", + "org.iq", + "net.iq", + "ir", + "ac.ir", + "co.ir", + "gov.ir", + "id.ir", + "net.ir", + "org.ir", + "sch.ir", + "xn--mgba3a4f16a.ir", + "xn--mgba3a4fra.ir", + "is", + "net.is", + "com.is", + "edu.is", + "gov.is", + "org.is", + "int.is", + "it", + "gov.it", + "edu.it", + "abr.it", + "abruzzo.it", + "aosta-valley.it", + "aostavalley.it", + "bas.it", + "basilicata.it", + "cal.it", + "calabria.it", + "cam.it", + "campania.it", + "emilia-romagna.it", + "emiliaromagna.it", + "emr.it", + "friuli-v-giulia.it", + "friuli-ve-giulia.it", + "friuli-vegiulia.it", + "friuli-venezia-giulia.it", + "friuli-veneziagiulia.it", + "friuli-vgiulia.it", + "friuliv-giulia.it", + "friulive-giulia.it", + "friulivegiulia.it", + "friulivenezia-giulia.it", + "friuliveneziagiulia.it", + "friulivgiulia.it", + "fvg.it", + "laz.it", + "lazio.it", + "lig.it", + "liguria.it", + "lom.it", + "lombardia.it", + "lombardy.it", + "lucania.it", + "mar.it", + "marche.it", + "mol.it", + "molise.it", + "piedmont.it", + "piemonte.it", + "pmn.it", + "pug.it", + "puglia.it", + "sar.it", + "sardegna.it", + "sardinia.it", + "sic.it", + "sicilia.it", + "sicily.it", + "taa.it", + "tos.it", + "toscana.it", + "trentino-a-adige.it", + "trentino-aadige.it", + "trentino-alto-adige.it", + "trentino-altoadige.it", + "trentino-s-tirol.it", + "trentino-stirol.it", + "trentino-sud-tirol.it", + "trentino-sudtirol.it", + "trentino-sued-tirol.it", + "trentino-suedtirol.it", + "trentinoa-adige.it", + "trentinoaadige.it", + "trentinoalto-adige.it", + "trentinoaltoadige.it", + "trentinos-tirol.it", + "trentinostirol.it", + "trentinosud-tirol.it", + "trentinosudtirol.it", + "trentinosued-tirol.it", + "trentinosuedtirol.it", + "tuscany.it", + "umb.it", + "umbria.it", + "val-d-aosta.it", + "val-daosta.it", + "vald-aosta.it", + "valdaosta.it", + "valle-aosta.it", + "valle-d-aosta.it", + "valle-daosta.it", + "valleaosta.it", + "valled-aosta.it", + "valledaosta.it", + "vallee-aoste.it", + "valleeaoste.it", + "vao.it", + "vda.it", + "ven.it", + "veneto.it", + "ag.it", + "agrigento.it", + "al.it", + "alessandria.it", + "alto-adige.it", + "altoadige.it", + "an.it", + "ancona.it", + "andria-barletta-trani.it", + "andria-trani-barletta.it", + "andriabarlettatrani.it", + "andriatranibarletta.it", + "ao.it", + "aosta.it", + "aoste.it", + "ap.it", + "aq.it", + "aquila.it", + "ar.it", + "arezzo.it", + "ascoli-piceno.it", + "ascolipiceno.it", + "asti.it", + "at.it", + "av.it", + "avellino.it", + "ba.it", + "balsan.it", + "bari.it", + "barletta-trani-andria.it", + "barlettatraniandria.it", + "belluno.it", + "benevento.it", + "bergamo.it", + "bg.it", + "bi.it", + "biella.it", + "bl.it", + "bn.it", + "bo.it", + "bologna.it", + "bolzano.it", + "bozen.it", + "br.it", + "brescia.it", + "brindisi.it", + "bs.it", + "bt.it", + "bz.it", + "ca.it", + "cagliari.it", + "caltanissetta.it", + "campidano-medio.it", + "campidanomedio.it", + "campobasso.it", + "carbonia-iglesias.it", + "carboniaiglesias.it", + "carrara-massa.it", + "carraramassa.it", + "caserta.it", + "catania.it", + "catanzaro.it", + "cb.it", + "ce.it", + "cesena-forli.it", + "cesenaforli.it", + "ch.it", + "chieti.it", + "ci.it", + "cl.it", + "cn.it", + "co.it", + "como.it", + "cosenza.it", + "cr.it", + "cremona.it", + "crotone.it", + "cs.it", + "ct.it", + "cuneo.it", + "cz.it", + "dell-ogliastra.it", + "dellogliastra.it", + "en.it", + "enna.it", + "fc.it", + "fe.it", + "fermo.it", + "ferrara.it", + "fg.it", + "fi.it", + "firenze.it", + "florence.it", + "fm.it", + "foggia.it", + "forli-cesena.it", + "forlicesena.it", + "fr.it", + "frosinone.it", + "ge.it", + "genoa.it", + "genova.it", + "go.it", + "gorizia.it", + "gr.it", + "grosseto.it", + "iglesias-carbonia.it", + "iglesiascarbonia.it", + "im.it", + "imperia.it", + "is.it", + "isernia.it", + "kr.it", + "la-spezia.it", + "laquila.it", + "laspezia.it", + "latina.it", + "lc.it", + "le.it", + "lecce.it", + "lecco.it", + "li.it", + "livorno.it", + "lo.it", + "lodi.it", + "lt.it", + "lu.it", + "lucca.it", + "macerata.it", + "mantova.it", + "massa-carrara.it", + "massacarrara.it", + "matera.it", + "mb.it", + "mc.it", + "me.it", + "medio-campidano.it", + "mediocampidano.it", + "messina.it", + "mi.it", + "milan.it", + "milano.it", + "mn.it", + "mo.it", + "modena.it", + "monza-brianza.it", + "monza-e-della-brianza.it", + "monza.it", + "monzabrianza.it", + "monzaebrianza.it", + "monzaedellabrianza.it", + "ms.it", + "mt.it", + "na.it", + "naples.it", + "napoli.it", + "no.it", + "novara.it", + "nu.it", + "nuoro.it", + "og.it", + "ogliastra.it", + "olbia-tempio.it", + "olbiatempio.it", + "or.it", + "oristano.it", + "ot.it", + "pa.it", + "padova.it", + "padua.it", + "palermo.it", + "parma.it", + "pavia.it", + "pc.it", + "pd.it", + "pe.it", + "perugia.it", + "pesaro-urbino.it", + "pesarourbino.it", + "pescara.it", + "pg.it", + "pi.it", + "piacenza.it", + "pisa.it", + "pistoia.it", + "pn.it", + "po.it", + "pordenone.it", + "potenza.it", + "pr.it", + "prato.it", + "pt.it", + "pu.it", + "pv.it", + "pz.it", + "ra.it", + "ragusa.it", + "ravenna.it", + "rc.it", + "re.it", + "reggio-calabria.it", + "reggio-emilia.it", + "reggiocalabria.it", + "reggioemilia.it", + "rg.it", + "ri.it", + "rieti.it", + "rimini.it", + "rm.it", + "rn.it", + "ro.it", + "roma.it", + "rome.it", + "rovigo.it", + "sa.it", + "salerno.it", + "sassari.it", + "savona.it", + "si.it", + "siena.it", + "siracusa.it", + "so.it", + "sondrio.it", + "sp.it", + "sr.it", + "ss.it", + "suedtirol.it", + "sv.it", + "ta.it", + "taranto.it", + "te.it", + "tempio-olbia.it", + "tempioolbia.it", + "teramo.it", + "terni.it", + "tn.it", + "to.it", + "torino.it", + "tp.it", + "tr.it", + "trani-andria-barletta.it", + "trani-barletta-andria.it", + "traniandriabarletta.it", + "tranibarlettaandria.it", + "trapani.it", + "trentino.it", + "trento.it", + "treviso.it", + "trieste.it", + "ts.it", + "turin.it", + "tv.it", + "ud.it", + "udine.it", + "urbino-pesaro.it", + "urbinopesaro.it", + "va.it", + "varese.it", + "vb.it", + "vc.it", + "ve.it", + "venezia.it", + "venice.it", + "verbania.it", + "vercelli.it", + "verona.it", + "vi.it", + "vibo-valentia.it", + "vibovalentia.it", + "vicenza.it", + "viterbo.it", + "vr.it", + "vs.it", + "vt.it", + "vv.it", + "je", + "co.je", + "net.je", + "org.je", + "*.jm", + "jo", + "com.jo", + "org.jo", + "net.jo", + "edu.jo", + "sch.jo", + "gov.jo", + "mil.jo", + "name.jo", + "jobs", + "jp", + "ac.jp", + "ad.jp", + "co.jp", + "ed.jp", + "go.jp", + "gr.jp", + "lg.jp", + "ne.jp", + "or.jp", + "aichi.jp", + "akita.jp", + "aomori.jp", + "chiba.jp", + "ehime.jp", + "fukui.jp", + "fukuoka.jp", + "fukushima.jp", + "gifu.jp", + "gunma.jp", + "hiroshima.jp", + "hokkaido.jp", + "hyogo.jp", + "ibaraki.jp", + "ishikawa.jp", + "iwate.jp", + "kagawa.jp", + "kagoshima.jp", + "kanagawa.jp", + "kochi.jp", + "kumamoto.jp", + "kyoto.jp", + "mie.jp", + "miyagi.jp", + "miyazaki.jp", + "nagano.jp", + "nagasaki.jp", + "nara.jp", + "niigata.jp", + "oita.jp", + "okayama.jp", + "okinawa.jp", + "osaka.jp", + "saga.jp", + "saitama.jp", + "shiga.jp", + "shimane.jp", + "shizuoka.jp", + "tochigi.jp", + "tokushima.jp", + "tokyo.jp", + "tottori.jp", + "toyama.jp", + "wakayama.jp", + "yamagata.jp", + "yamaguchi.jp", + "yamanashi.jp", + "xn--4pvxs.jp", + "xn--vgu402c.jp", + "xn--c3s14m.jp", + "xn--f6qx53a.jp", + "xn--8pvr4u.jp", + "xn--uist22h.jp", + "xn--djrs72d6uy.jp", + "xn--mkru45i.jp", + "xn--0trq7p7nn.jp", + "xn--8ltr62k.jp", + "xn--2m4a15e.jp", + "xn--efvn9s.jp", + "xn--32vp30h.jp", + "xn--4it797k.jp", + "xn--1lqs71d.jp", + "xn--5rtp49c.jp", + "xn--5js045d.jp", + "xn--ehqz56n.jp", + "xn--1lqs03n.jp", + "xn--qqqt11m.jp", + "xn--kbrq7o.jp", + "xn--pssu33l.jp", + "xn--ntsq17g.jp", + "xn--uisz3g.jp", + "xn--6btw5a.jp", + "xn--1ctwo.jp", + "xn--6orx2r.jp", + "xn--rht61e.jp", + "xn--rht27z.jp", + "xn--djty4k.jp", + "xn--nit225k.jp", + "xn--rht3d.jp", + "xn--klty5x.jp", + "xn--kltx9a.jp", + "xn--kltp7d.jp", + "xn--uuwu58a.jp", + "xn--zbx025d.jp", + "xn--ntso0iqx3a.jp", + "xn--elqq16h.jp", + "xn--4it168d.jp", + "xn--klt787d.jp", + "xn--rny31h.jp", + "xn--7t0a264c.jp", + "xn--5rtq34k.jp", + "xn--k7yn95e.jp", + "xn--tor131o.jp", + "xn--d5qv7z876c.jp", + "*.kawasaki.jp", + "*.kitakyushu.jp", + "*.kobe.jp", + "*.nagoya.jp", + "*.sapporo.jp", + "*.sendai.jp", + "*.yokohama.jp", + "!city.kawasaki.jp", + "!city.kitakyushu.jp", + "!city.kobe.jp", + "!city.nagoya.jp", + "!city.sapporo.jp", + "!city.sendai.jp", + "!city.yokohama.jp", + "aisai.aichi.jp", + "ama.aichi.jp", + "anjo.aichi.jp", + "asuke.aichi.jp", + "chiryu.aichi.jp", + "chita.aichi.jp", + "fuso.aichi.jp", + "gamagori.aichi.jp", + "handa.aichi.jp", + "hazu.aichi.jp", + "hekinan.aichi.jp", + "higashiura.aichi.jp", + "ichinomiya.aichi.jp", + "inazawa.aichi.jp", + "inuyama.aichi.jp", + "isshiki.aichi.jp", + "iwakura.aichi.jp", + "kanie.aichi.jp", + "kariya.aichi.jp", + "kasugai.aichi.jp", + "kira.aichi.jp", + "kiyosu.aichi.jp", + "komaki.aichi.jp", + "konan.aichi.jp", + "kota.aichi.jp", + "mihama.aichi.jp", + "miyoshi.aichi.jp", + "nishio.aichi.jp", + "nisshin.aichi.jp", + "obu.aichi.jp", + "oguchi.aichi.jp", + "oharu.aichi.jp", + "okazaki.aichi.jp", + "owariasahi.aichi.jp", + "seto.aichi.jp", + "shikatsu.aichi.jp", + "shinshiro.aichi.jp", + "shitara.aichi.jp", + "tahara.aichi.jp", + "takahama.aichi.jp", + "tobishima.aichi.jp", + "toei.aichi.jp", + "togo.aichi.jp", + "tokai.aichi.jp", + "tokoname.aichi.jp", + "toyoake.aichi.jp", + "toyohashi.aichi.jp", + "toyokawa.aichi.jp", + "toyone.aichi.jp", + "toyota.aichi.jp", + "tsushima.aichi.jp", + "yatomi.aichi.jp", + "akita.akita.jp", + "daisen.akita.jp", + "fujisato.akita.jp", + "gojome.akita.jp", + "hachirogata.akita.jp", + "happou.akita.jp", + "higashinaruse.akita.jp", + "honjo.akita.jp", + "honjyo.akita.jp", + "ikawa.akita.jp", + "kamikoani.akita.jp", + "kamioka.akita.jp", + "katagami.akita.jp", + "kazuno.akita.jp", + "kitaakita.akita.jp", + "kosaka.akita.jp", + "kyowa.akita.jp", + "misato.akita.jp", + "mitane.akita.jp", + "moriyoshi.akita.jp", + "nikaho.akita.jp", + "noshiro.akita.jp", + "odate.akita.jp", + "oga.akita.jp", + "ogata.akita.jp", + "semboku.akita.jp", + "yokote.akita.jp", + "yurihonjo.akita.jp", + "aomori.aomori.jp", + "gonohe.aomori.jp", + "hachinohe.aomori.jp", + "hashikami.aomori.jp", + "hiranai.aomori.jp", + "hirosaki.aomori.jp", + "itayanagi.aomori.jp", + "kuroishi.aomori.jp", + "misawa.aomori.jp", + "mutsu.aomori.jp", + "nakadomari.aomori.jp", + "noheji.aomori.jp", + "oirase.aomori.jp", + "owani.aomori.jp", + "rokunohe.aomori.jp", + "sannohe.aomori.jp", + "shichinohe.aomori.jp", + "shingo.aomori.jp", + "takko.aomori.jp", + "towada.aomori.jp", + "tsugaru.aomori.jp", + "tsuruta.aomori.jp", + "abiko.chiba.jp", + "asahi.chiba.jp", + "chonan.chiba.jp", + "chosei.chiba.jp", + "choshi.chiba.jp", + "chuo.chiba.jp", + "funabashi.chiba.jp", + "futtsu.chiba.jp", + "hanamigawa.chiba.jp", + "ichihara.chiba.jp", + "ichikawa.chiba.jp", + "ichinomiya.chiba.jp", + "inzai.chiba.jp", + "isumi.chiba.jp", + "kamagaya.chiba.jp", + "kamogawa.chiba.jp", + "kashiwa.chiba.jp", + "katori.chiba.jp", + "katsuura.chiba.jp", + "kimitsu.chiba.jp", + "kisarazu.chiba.jp", + "kozaki.chiba.jp", + "kujukuri.chiba.jp", + "kyonan.chiba.jp", + "matsudo.chiba.jp", + "midori.chiba.jp", + "mihama.chiba.jp", + "minamiboso.chiba.jp", + "mobara.chiba.jp", + "mutsuzawa.chiba.jp", + "nagara.chiba.jp", + "nagareyama.chiba.jp", + "narashino.chiba.jp", + "narita.chiba.jp", + "noda.chiba.jp", + "oamishirasato.chiba.jp", + "omigawa.chiba.jp", + "onjuku.chiba.jp", + "otaki.chiba.jp", + "sakae.chiba.jp", + "sakura.chiba.jp", + "shimofusa.chiba.jp", + "shirako.chiba.jp", + "shiroi.chiba.jp", + "shisui.chiba.jp", + "sodegaura.chiba.jp", + "sosa.chiba.jp", + "tako.chiba.jp", + "tateyama.chiba.jp", + "togane.chiba.jp", + "tohnosho.chiba.jp", + "tomisato.chiba.jp", + "urayasu.chiba.jp", + "yachimata.chiba.jp", + "yachiyo.chiba.jp", + "yokaichiba.chiba.jp", + "yokoshibahikari.chiba.jp", + "yotsukaido.chiba.jp", + "ainan.ehime.jp", + "honai.ehime.jp", + "ikata.ehime.jp", + "imabari.ehime.jp", + "iyo.ehime.jp", + "kamijima.ehime.jp", + "kihoku.ehime.jp", + "kumakogen.ehime.jp", + "masaki.ehime.jp", + "matsuno.ehime.jp", + "matsuyama.ehime.jp", + "namikata.ehime.jp", + "niihama.ehime.jp", + "ozu.ehime.jp", + "saijo.ehime.jp", + "seiyo.ehime.jp", + "shikokuchuo.ehime.jp", + "tobe.ehime.jp", + "toon.ehime.jp", + "uchiko.ehime.jp", + "uwajima.ehime.jp", + "yawatahama.ehime.jp", + "echizen.fukui.jp", + "eiheiji.fukui.jp", + "fukui.fukui.jp", + "ikeda.fukui.jp", + "katsuyama.fukui.jp", + "mihama.fukui.jp", + "minamiechizen.fukui.jp", + "obama.fukui.jp", + "ohi.fukui.jp", + "ono.fukui.jp", + "sabae.fukui.jp", + "sakai.fukui.jp", + "takahama.fukui.jp", + "tsuruga.fukui.jp", + "wakasa.fukui.jp", + "ashiya.fukuoka.jp", + "buzen.fukuoka.jp", + "chikugo.fukuoka.jp", + "chikuho.fukuoka.jp", + "chikujo.fukuoka.jp", + "chikushino.fukuoka.jp", + "chikuzen.fukuoka.jp", + "chuo.fukuoka.jp", + "dazaifu.fukuoka.jp", + "fukuchi.fukuoka.jp", + "hakata.fukuoka.jp", + "higashi.fukuoka.jp", + "hirokawa.fukuoka.jp", + "hisayama.fukuoka.jp", + "iizuka.fukuoka.jp", + "inatsuki.fukuoka.jp", + "kaho.fukuoka.jp", + "kasuga.fukuoka.jp", + "kasuya.fukuoka.jp", + "kawara.fukuoka.jp", + "keisen.fukuoka.jp", + "koga.fukuoka.jp", + "kurate.fukuoka.jp", + "kurogi.fukuoka.jp", + "kurume.fukuoka.jp", + "minami.fukuoka.jp", + "miyako.fukuoka.jp", + "miyama.fukuoka.jp", + "miyawaka.fukuoka.jp", + "mizumaki.fukuoka.jp", + "munakata.fukuoka.jp", + "nakagawa.fukuoka.jp", + "nakama.fukuoka.jp", + "nishi.fukuoka.jp", + "nogata.fukuoka.jp", + "ogori.fukuoka.jp", + "okagaki.fukuoka.jp", + "okawa.fukuoka.jp", + "oki.fukuoka.jp", + "omuta.fukuoka.jp", + "onga.fukuoka.jp", + "onojo.fukuoka.jp", + "oto.fukuoka.jp", + "saigawa.fukuoka.jp", + "sasaguri.fukuoka.jp", + "shingu.fukuoka.jp", + "shinyoshitomi.fukuoka.jp", + "shonai.fukuoka.jp", + "soeda.fukuoka.jp", + "sue.fukuoka.jp", + "tachiarai.fukuoka.jp", + "tagawa.fukuoka.jp", + "takata.fukuoka.jp", + "toho.fukuoka.jp", + "toyotsu.fukuoka.jp", + "tsuiki.fukuoka.jp", + "ukiha.fukuoka.jp", + "umi.fukuoka.jp", + "usui.fukuoka.jp", + "yamada.fukuoka.jp", + "yame.fukuoka.jp", + "yanagawa.fukuoka.jp", + "yukuhashi.fukuoka.jp", + "aizubange.fukushima.jp", + "aizumisato.fukushima.jp", + "aizuwakamatsu.fukushima.jp", + "asakawa.fukushima.jp", + "bandai.fukushima.jp", + "date.fukushima.jp", + "fukushima.fukushima.jp", + "furudono.fukushima.jp", + "futaba.fukushima.jp", + "hanawa.fukushima.jp", + "higashi.fukushima.jp", + "hirata.fukushima.jp", + "hirono.fukushima.jp", + "iitate.fukushima.jp", + "inawashiro.fukushima.jp", + "ishikawa.fukushima.jp", + "iwaki.fukushima.jp", + "izumizaki.fukushima.jp", + "kagamiishi.fukushima.jp", + "kaneyama.fukushima.jp", + "kawamata.fukushima.jp", + "kitakata.fukushima.jp", + "kitashiobara.fukushima.jp", + "koori.fukushima.jp", + "koriyama.fukushima.jp", + "kunimi.fukushima.jp", + "miharu.fukushima.jp", + "mishima.fukushima.jp", + "namie.fukushima.jp", + "nango.fukushima.jp", + "nishiaizu.fukushima.jp", + "nishigo.fukushima.jp", + "okuma.fukushima.jp", + "omotego.fukushima.jp", + "ono.fukushima.jp", + "otama.fukushima.jp", + "samegawa.fukushima.jp", + "shimogo.fukushima.jp", + "shirakawa.fukushima.jp", + "showa.fukushima.jp", + "soma.fukushima.jp", + "sukagawa.fukushima.jp", + "taishin.fukushima.jp", + "tamakawa.fukushima.jp", + "tanagura.fukushima.jp", + "tenei.fukushima.jp", + "yabuki.fukushima.jp", + "yamato.fukushima.jp", + "yamatsuri.fukushima.jp", + "yanaizu.fukushima.jp", + "yugawa.fukushima.jp", + "anpachi.gifu.jp", + "ena.gifu.jp", + "gifu.gifu.jp", + "ginan.gifu.jp", + "godo.gifu.jp", + "gujo.gifu.jp", + "hashima.gifu.jp", + "hichiso.gifu.jp", + "hida.gifu.jp", + "higashishirakawa.gifu.jp", + "ibigawa.gifu.jp", + "ikeda.gifu.jp", + "kakamigahara.gifu.jp", + "kani.gifu.jp", + "kasahara.gifu.jp", + "kasamatsu.gifu.jp", + "kawaue.gifu.jp", + "kitagata.gifu.jp", + "mino.gifu.jp", + "minokamo.gifu.jp", + "mitake.gifu.jp", + "mizunami.gifu.jp", + "motosu.gifu.jp", + "nakatsugawa.gifu.jp", + "ogaki.gifu.jp", + "sakahogi.gifu.jp", + "seki.gifu.jp", + "sekigahara.gifu.jp", + "shirakawa.gifu.jp", + "tajimi.gifu.jp", + "takayama.gifu.jp", + "tarui.gifu.jp", + "toki.gifu.jp", + "tomika.gifu.jp", + "wanouchi.gifu.jp", + "yamagata.gifu.jp", + "yaotsu.gifu.jp", + "yoro.gifu.jp", + "annaka.gunma.jp", + "chiyoda.gunma.jp", + "fujioka.gunma.jp", + "higashiagatsuma.gunma.jp", + "isesaki.gunma.jp", + "itakura.gunma.jp", + "kanna.gunma.jp", + "kanra.gunma.jp", + "katashina.gunma.jp", + "kawaba.gunma.jp", + "kiryu.gunma.jp", + "kusatsu.gunma.jp", + "maebashi.gunma.jp", + "meiwa.gunma.jp", + "midori.gunma.jp", + "minakami.gunma.jp", + "naganohara.gunma.jp", + "nakanojo.gunma.jp", + "nanmoku.gunma.jp", + "numata.gunma.jp", + "oizumi.gunma.jp", + "ora.gunma.jp", + "ota.gunma.jp", + "shibukawa.gunma.jp", + "shimonita.gunma.jp", + "shinto.gunma.jp", + "showa.gunma.jp", + "takasaki.gunma.jp", + "takayama.gunma.jp", + "tamamura.gunma.jp", + "tatebayashi.gunma.jp", + "tomioka.gunma.jp", + "tsukiyono.gunma.jp", + "tsumagoi.gunma.jp", + "ueno.gunma.jp", + "yoshioka.gunma.jp", + "asaminami.hiroshima.jp", + "daiwa.hiroshima.jp", + "etajima.hiroshima.jp", + "fuchu.hiroshima.jp", + "fukuyama.hiroshima.jp", + "hatsukaichi.hiroshima.jp", + "higashihiroshima.hiroshima.jp", + "hongo.hiroshima.jp", + "jinsekikogen.hiroshima.jp", + "kaita.hiroshima.jp", + "kui.hiroshima.jp", + "kumano.hiroshima.jp", + "kure.hiroshima.jp", + "mihara.hiroshima.jp", + "miyoshi.hiroshima.jp", + "naka.hiroshima.jp", + "onomichi.hiroshima.jp", + "osakikamijima.hiroshima.jp", + "otake.hiroshima.jp", + "saka.hiroshima.jp", + "sera.hiroshima.jp", + "seranishi.hiroshima.jp", + "shinichi.hiroshima.jp", + "shobara.hiroshima.jp", + "takehara.hiroshima.jp", + "abashiri.hokkaido.jp", + "abira.hokkaido.jp", + "aibetsu.hokkaido.jp", + "akabira.hokkaido.jp", + "akkeshi.hokkaido.jp", + "asahikawa.hokkaido.jp", + "ashibetsu.hokkaido.jp", + "ashoro.hokkaido.jp", + "assabu.hokkaido.jp", + "atsuma.hokkaido.jp", + "bibai.hokkaido.jp", + "biei.hokkaido.jp", + "bifuka.hokkaido.jp", + "bihoro.hokkaido.jp", + "biratori.hokkaido.jp", + "chippubetsu.hokkaido.jp", + "chitose.hokkaido.jp", + "date.hokkaido.jp", + "ebetsu.hokkaido.jp", + "embetsu.hokkaido.jp", + "eniwa.hokkaido.jp", + "erimo.hokkaido.jp", + "esan.hokkaido.jp", + "esashi.hokkaido.jp", + "fukagawa.hokkaido.jp", + "fukushima.hokkaido.jp", + "furano.hokkaido.jp", + "furubira.hokkaido.jp", + "haboro.hokkaido.jp", + "hakodate.hokkaido.jp", + "hamatonbetsu.hokkaido.jp", + "hidaka.hokkaido.jp", + "higashikagura.hokkaido.jp", + "higashikawa.hokkaido.jp", + "hiroo.hokkaido.jp", + "hokuryu.hokkaido.jp", + "hokuto.hokkaido.jp", + "honbetsu.hokkaido.jp", + "horokanai.hokkaido.jp", + "horonobe.hokkaido.jp", + "ikeda.hokkaido.jp", + "imakane.hokkaido.jp", + "ishikari.hokkaido.jp", + "iwamizawa.hokkaido.jp", + "iwanai.hokkaido.jp", + "kamifurano.hokkaido.jp", + "kamikawa.hokkaido.jp", + "kamishihoro.hokkaido.jp", + "kamisunagawa.hokkaido.jp", + "kamoenai.hokkaido.jp", + "kayabe.hokkaido.jp", + "kembuchi.hokkaido.jp", + "kikonai.hokkaido.jp", + "kimobetsu.hokkaido.jp", + "kitahiroshima.hokkaido.jp", + "kitami.hokkaido.jp", + "kiyosato.hokkaido.jp", + "koshimizu.hokkaido.jp", + "kunneppu.hokkaido.jp", + "kuriyama.hokkaido.jp", + "kuromatsunai.hokkaido.jp", + "kushiro.hokkaido.jp", + "kutchan.hokkaido.jp", + "kyowa.hokkaido.jp", + "mashike.hokkaido.jp", + "matsumae.hokkaido.jp", + "mikasa.hokkaido.jp", + "minamifurano.hokkaido.jp", + "mombetsu.hokkaido.jp", + "moseushi.hokkaido.jp", + "mukawa.hokkaido.jp", + "muroran.hokkaido.jp", + "naie.hokkaido.jp", + "nakagawa.hokkaido.jp", + "nakasatsunai.hokkaido.jp", + "nakatombetsu.hokkaido.jp", + "nanae.hokkaido.jp", + "nanporo.hokkaido.jp", + "nayoro.hokkaido.jp", + "nemuro.hokkaido.jp", + "niikappu.hokkaido.jp", + "niki.hokkaido.jp", + "nishiokoppe.hokkaido.jp", + "noboribetsu.hokkaido.jp", + "numata.hokkaido.jp", + "obihiro.hokkaido.jp", + "obira.hokkaido.jp", + "oketo.hokkaido.jp", + "okoppe.hokkaido.jp", + "otaru.hokkaido.jp", + "otobe.hokkaido.jp", + "otofuke.hokkaido.jp", + "otoineppu.hokkaido.jp", + "oumu.hokkaido.jp", + "ozora.hokkaido.jp", + "pippu.hokkaido.jp", + "rankoshi.hokkaido.jp", + "rebun.hokkaido.jp", + "rikubetsu.hokkaido.jp", + "rishiri.hokkaido.jp", + "rishirifuji.hokkaido.jp", + "saroma.hokkaido.jp", + "sarufutsu.hokkaido.jp", + "shakotan.hokkaido.jp", + "shari.hokkaido.jp", + "shibecha.hokkaido.jp", + "shibetsu.hokkaido.jp", + "shikabe.hokkaido.jp", + "shikaoi.hokkaido.jp", + "shimamaki.hokkaido.jp", + "shimizu.hokkaido.jp", + "shimokawa.hokkaido.jp", + "shinshinotsu.hokkaido.jp", + "shintoku.hokkaido.jp", + "shiranuka.hokkaido.jp", + "shiraoi.hokkaido.jp", + "shiriuchi.hokkaido.jp", + "sobetsu.hokkaido.jp", + "sunagawa.hokkaido.jp", + "taiki.hokkaido.jp", + "takasu.hokkaido.jp", + "takikawa.hokkaido.jp", + "takinoue.hokkaido.jp", + "teshikaga.hokkaido.jp", + "tobetsu.hokkaido.jp", + "tohma.hokkaido.jp", + "tomakomai.hokkaido.jp", + "tomari.hokkaido.jp", + "toya.hokkaido.jp", + "toyako.hokkaido.jp", + "toyotomi.hokkaido.jp", + "toyoura.hokkaido.jp", + "tsubetsu.hokkaido.jp", + "tsukigata.hokkaido.jp", + "urakawa.hokkaido.jp", + "urausu.hokkaido.jp", + "uryu.hokkaido.jp", + "utashinai.hokkaido.jp", + "wakkanai.hokkaido.jp", + "wassamu.hokkaido.jp", + "yakumo.hokkaido.jp", + "yoichi.hokkaido.jp", + "aioi.hyogo.jp", + "akashi.hyogo.jp", + "ako.hyogo.jp", + "amagasaki.hyogo.jp", + "aogaki.hyogo.jp", + "asago.hyogo.jp", + "ashiya.hyogo.jp", + "awaji.hyogo.jp", + "fukusaki.hyogo.jp", + "goshiki.hyogo.jp", + "harima.hyogo.jp", + "himeji.hyogo.jp", + "ichikawa.hyogo.jp", + "inagawa.hyogo.jp", + "itami.hyogo.jp", + "kakogawa.hyogo.jp", + "kamigori.hyogo.jp", + "kamikawa.hyogo.jp", + "kasai.hyogo.jp", + "kasuga.hyogo.jp", + "kawanishi.hyogo.jp", + "miki.hyogo.jp", + "minamiawaji.hyogo.jp", + "nishinomiya.hyogo.jp", + "nishiwaki.hyogo.jp", + "ono.hyogo.jp", + "sanda.hyogo.jp", + "sannan.hyogo.jp", + "sasayama.hyogo.jp", + "sayo.hyogo.jp", + "shingu.hyogo.jp", + "shinonsen.hyogo.jp", + "shiso.hyogo.jp", + "sumoto.hyogo.jp", + "taishi.hyogo.jp", + "taka.hyogo.jp", + "takarazuka.hyogo.jp", + "takasago.hyogo.jp", + "takino.hyogo.jp", + "tamba.hyogo.jp", + "tatsuno.hyogo.jp", + "toyooka.hyogo.jp", + "yabu.hyogo.jp", + "yashiro.hyogo.jp", + "yoka.hyogo.jp", + "yokawa.hyogo.jp", + "ami.ibaraki.jp", + "asahi.ibaraki.jp", + "bando.ibaraki.jp", + "chikusei.ibaraki.jp", + "daigo.ibaraki.jp", + "fujishiro.ibaraki.jp", + "hitachi.ibaraki.jp", + "hitachinaka.ibaraki.jp", + "hitachiomiya.ibaraki.jp", + "hitachiota.ibaraki.jp", + "ibaraki.ibaraki.jp", + "ina.ibaraki.jp", + "inashiki.ibaraki.jp", + "itako.ibaraki.jp", + "iwama.ibaraki.jp", + "joso.ibaraki.jp", + "kamisu.ibaraki.jp", + "kasama.ibaraki.jp", + "kashima.ibaraki.jp", + "kasumigaura.ibaraki.jp", + "koga.ibaraki.jp", + "miho.ibaraki.jp", + "mito.ibaraki.jp", + "moriya.ibaraki.jp", + "naka.ibaraki.jp", + "namegata.ibaraki.jp", + "oarai.ibaraki.jp", + "ogawa.ibaraki.jp", + "omitama.ibaraki.jp", + "ryugasaki.ibaraki.jp", + "sakai.ibaraki.jp", + "sakuragawa.ibaraki.jp", + "shimodate.ibaraki.jp", + "shimotsuma.ibaraki.jp", + "shirosato.ibaraki.jp", + "sowa.ibaraki.jp", + "suifu.ibaraki.jp", + "takahagi.ibaraki.jp", + "tamatsukuri.ibaraki.jp", + "tokai.ibaraki.jp", + "tomobe.ibaraki.jp", + "tone.ibaraki.jp", + "toride.ibaraki.jp", + "tsuchiura.ibaraki.jp", + "tsukuba.ibaraki.jp", + "uchihara.ibaraki.jp", + "ushiku.ibaraki.jp", + "yachiyo.ibaraki.jp", + "yamagata.ibaraki.jp", + "yawara.ibaraki.jp", + "yuki.ibaraki.jp", + "anamizu.ishikawa.jp", + "hakui.ishikawa.jp", + "hakusan.ishikawa.jp", + "kaga.ishikawa.jp", + "kahoku.ishikawa.jp", + "kanazawa.ishikawa.jp", + "kawakita.ishikawa.jp", + "komatsu.ishikawa.jp", + "nakanoto.ishikawa.jp", + "nanao.ishikawa.jp", + "nomi.ishikawa.jp", + "nonoichi.ishikawa.jp", + "noto.ishikawa.jp", + "shika.ishikawa.jp", + "suzu.ishikawa.jp", + "tsubata.ishikawa.jp", + "tsurugi.ishikawa.jp", + "uchinada.ishikawa.jp", + "wajima.ishikawa.jp", + "fudai.iwate.jp", + "fujisawa.iwate.jp", + "hanamaki.iwate.jp", + "hiraizumi.iwate.jp", + "hirono.iwate.jp", + "ichinohe.iwate.jp", + "ichinoseki.iwate.jp", + "iwaizumi.iwate.jp", + "iwate.iwate.jp", + "joboji.iwate.jp", + "kamaishi.iwate.jp", + "kanegasaki.iwate.jp", + "karumai.iwate.jp", + "kawai.iwate.jp", + "kitakami.iwate.jp", + "kuji.iwate.jp", + "kunohe.iwate.jp", + "kuzumaki.iwate.jp", + "miyako.iwate.jp", + "mizusawa.iwate.jp", + "morioka.iwate.jp", + "ninohe.iwate.jp", + "noda.iwate.jp", + "ofunato.iwate.jp", + "oshu.iwate.jp", + "otsuchi.iwate.jp", + "rikuzentakata.iwate.jp", + "shiwa.iwate.jp", + "shizukuishi.iwate.jp", + "sumita.iwate.jp", + "tanohata.iwate.jp", + "tono.iwate.jp", + "yahaba.iwate.jp", + "yamada.iwate.jp", + "ayagawa.kagawa.jp", + "higashikagawa.kagawa.jp", + "kanonji.kagawa.jp", + "kotohira.kagawa.jp", + "manno.kagawa.jp", + "marugame.kagawa.jp", + "mitoyo.kagawa.jp", + "naoshima.kagawa.jp", + "sanuki.kagawa.jp", + "tadotsu.kagawa.jp", + "takamatsu.kagawa.jp", + "tonosho.kagawa.jp", + "uchinomi.kagawa.jp", + "utazu.kagawa.jp", + "zentsuji.kagawa.jp", + "akune.kagoshima.jp", + "amami.kagoshima.jp", + "hioki.kagoshima.jp", + "isa.kagoshima.jp", + "isen.kagoshima.jp", + "izumi.kagoshima.jp", + "kagoshima.kagoshima.jp", + "kanoya.kagoshima.jp", + "kawanabe.kagoshima.jp", + "kinko.kagoshima.jp", + "kouyama.kagoshima.jp", + "makurazaki.kagoshima.jp", + "matsumoto.kagoshima.jp", + "minamitane.kagoshima.jp", + "nakatane.kagoshima.jp", + "nishinoomote.kagoshima.jp", + "satsumasendai.kagoshima.jp", + "soo.kagoshima.jp", + "tarumizu.kagoshima.jp", + "yusui.kagoshima.jp", + "aikawa.kanagawa.jp", + "atsugi.kanagawa.jp", + "ayase.kanagawa.jp", + "chigasaki.kanagawa.jp", + "ebina.kanagawa.jp", + "fujisawa.kanagawa.jp", + "hadano.kanagawa.jp", + "hakone.kanagawa.jp", + "hiratsuka.kanagawa.jp", + "isehara.kanagawa.jp", + "kaisei.kanagawa.jp", + "kamakura.kanagawa.jp", + "kiyokawa.kanagawa.jp", + "matsuda.kanagawa.jp", + "minamiashigara.kanagawa.jp", + "miura.kanagawa.jp", + "nakai.kanagawa.jp", + "ninomiya.kanagawa.jp", + "odawara.kanagawa.jp", + "oi.kanagawa.jp", + "oiso.kanagawa.jp", + "sagamihara.kanagawa.jp", + "samukawa.kanagawa.jp", + "tsukui.kanagawa.jp", + "yamakita.kanagawa.jp", + "yamato.kanagawa.jp", + "yokosuka.kanagawa.jp", + "yugawara.kanagawa.jp", + "zama.kanagawa.jp", + "zushi.kanagawa.jp", + "aki.kochi.jp", + "geisei.kochi.jp", + "hidaka.kochi.jp", + "higashitsuno.kochi.jp", + "ino.kochi.jp", + "kagami.kochi.jp", + "kami.kochi.jp", + "kitagawa.kochi.jp", + "kochi.kochi.jp", + "mihara.kochi.jp", + "motoyama.kochi.jp", + "muroto.kochi.jp", + "nahari.kochi.jp", + "nakamura.kochi.jp", + "nankoku.kochi.jp", + "nishitosa.kochi.jp", + "niyodogawa.kochi.jp", + "ochi.kochi.jp", + "okawa.kochi.jp", + "otoyo.kochi.jp", + "otsuki.kochi.jp", + "sakawa.kochi.jp", + "sukumo.kochi.jp", + "susaki.kochi.jp", + "tosa.kochi.jp", + "tosashimizu.kochi.jp", + "toyo.kochi.jp", + "tsuno.kochi.jp", + "umaji.kochi.jp", + "yasuda.kochi.jp", + "yusuhara.kochi.jp", + "amakusa.kumamoto.jp", + "arao.kumamoto.jp", + "aso.kumamoto.jp", + "choyo.kumamoto.jp", + "gyokuto.kumamoto.jp", + "kamiamakusa.kumamoto.jp", + "kikuchi.kumamoto.jp", + "kumamoto.kumamoto.jp", + "mashiki.kumamoto.jp", + "mifune.kumamoto.jp", + "minamata.kumamoto.jp", + "minamioguni.kumamoto.jp", + "nagasu.kumamoto.jp", + "nishihara.kumamoto.jp", + "oguni.kumamoto.jp", + "ozu.kumamoto.jp", + "sumoto.kumamoto.jp", + "takamori.kumamoto.jp", + "uki.kumamoto.jp", + "uto.kumamoto.jp", + "yamaga.kumamoto.jp", + "yamato.kumamoto.jp", + "yatsushiro.kumamoto.jp", + "ayabe.kyoto.jp", + "fukuchiyama.kyoto.jp", + "higashiyama.kyoto.jp", + "ide.kyoto.jp", + "ine.kyoto.jp", + "joyo.kyoto.jp", + "kameoka.kyoto.jp", + "kamo.kyoto.jp", + "kita.kyoto.jp", + "kizu.kyoto.jp", + "kumiyama.kyoto.jp", + "kyotamba.kyoto.jp", + "kyotanabe.kyoto.jp", + "kyotango.kyoto.jp", + "maizuru.kyoto.jp", + "minami.kyoto.jp", + "minamiyamashiro.kyoto.jp", + "miyazu.kyoto.jp", + "muko.kyoto.jp", + "nagaokakyo.kyoto.jp", + "nakagyo.kyoto.jp", + "nantan.kyoto.jp", + "oyamazaki.kyoto.jp", + "sakyo.kyoto.jp", + "seika.kyoto.jp", + "tanabe.kyoto.jp", + "uji.kyoto.jp", + "ujitawara.kyoto.jp", + "wazuka.kyoto.jp", + "yamashina.kyoto.jp", + "yawata.kyoto.jp", + "asahi.mie.jp", + "inabe.mie.jp", + "ise.mie.jp", + "kameyama.mie.jp", + "kawagoe.mie.jp", + "kiho.mie.jp", + "kisosaki.mie.jp", + "kiwa.mie.jp", + "komono.mie.jp", + "kumano.mie.jp", + "kuwana.mie.jp", + "matsusaka.mie.jp", + "meiwa.mie.jp", + "mihama.mie.jp", + "minamiise.mie.jp", + "misugi.mie.jp", + "miyama.mie.jp", + "nabari.mie.jp", + "shima.mie.jp", + "suzuka.mie.jp", + "tado.mie.jp", + "taiki.mie.jp", + "taki.mie.jp", + "tamaki.mie.jp", + "toba.mie.jp", + "tsu.mie.jp", + "udono.mie.jp", + "ureshino.mie.jp", + "watarai.mie.jp", + "yokkaichi.mie.jp", + "furukawa.miyagi.jp", + "higashimatsushima.miyagi.jp", + "ishinomaki.miyagi.jp", + "iwanuma.miyagi.jp", + "kakuda.miyagi.jp", + "kami.miyagi.jp", + "kawasaki.miyagi.jp", + "marumori.miyagi.jp", + "matsushima.miyagi.jp", + "minamisanriku.miyagi.jp", + "misato.miyagi.jp", + "murata.miyagi.jp", + "natori.miyagi.jp", + "ogawara.miyagi.jp", + "ohira.miyagi.jp", + "onagawa.miyagi.jp", + "osaki.miyagi.jp", + "rifu.miyagi.jp", + "semine.miyagi.jp", + "shibata.miyagi.jp", + "shichikashuku.miyagi.jp", + "shikama.miyagi.jp", + "shiogama.miyagi.jp", + "shiroishi.miyagi.jp", + "tagajo.miyagi.jp", + "taiwa.miyagi.jp", + "tome.miyagi.jp", + "tomiya.miyagi.jp", + "wakuya.miyagi.jp", + "watari.miyagi.jp", + "yamamoto.miyagi.jp", + "zao.miyagi.jp", + "aya.miyazaki.jp", + "ebino.miyazaki.jp", + "gokase.miyazaki.jp", + "hyuga.miyazaki.jp", + "kadogawa.miyazaki.jp", + "kawaminami.miyazaki.jp", + "kijo.miyazaki.jp", + "kitagawa.miyazaki.jp", + "kitakata.miyazaki.jp", + "kitaura.miyazaki.jp", + "kobayashi.miyazaki.jp", + "kunitomi.miyazaki.jp", + "kushima.miyazaki.jp", + "mimata.miyazaki.jp", + "miyakonojo.miyazaki.jp", + "miyazaki.miyazaki.jp", + "morotsuka.miyazaki.jp", + "nichinan.miyazaki.jp", + "nishimera.miyazaki.jp", + "nobeoka.miyazaki.jp", + "saito.miyazaki.jp", + "shiiba.miyazaki.jp", + "shintomi.miyazaki.jp", + "takaharu.miyazaki.jp", + "takanabe.miyazaki.jp", + "takazaki.miyazaki.jp", + "tsuno.miyazaki.jp", + "achi.nagano.jp", + "agematsu.nagano.jp", + "anan.nagano.jp", + "aoki.nagano.jp", + "asahi.nagano.jp", + "azumino.nagano.jp", + "chikuhoku.nagano.jp", + "chikuma.nagano.jp", + "chino.nagano.jp", + "fujimi.nagano.jp", + "hakuba.nagano.jp", + "hara.nagano.jp", + "hiraya.nagano.jp", + "iida.nagano.jp", + "iijima.nagano.jp", + "iiyama.nagano.jp", + "iizuna.nagano.jp", + "ikeda.nagano.jp", + "ikusaka.nagano.jp", + "ina.nagano.jp", + "karuizawa.nagano.jp", + "kawakami.nagano.jp", + "kiso.nagano.jp", + "kisofukushima.nagano.jp", + "kitaaiki.nagano.jp", + "komagane.nagano.jp", + "komoro.nagano.jp", + "matsukawa.nagano.jp", + "matsumoto.nagano.jp", + "miasa.nagano.jp", + "minamiaiki.nagano.jp", + "minamimaki.nagano.jp", + "minamiminowa.nagano.jp", + "minowa.nagano.jp", + "miyada.nagano.jp", + "miyota.nagano.jp", + "mochizuki.nagano.jp", + "nagano.nagano.jp", + "nagawa.nagano.jp", + "nagiso.nagano.jp", + "nakagawa.nagano.jp", + "nakano.nagano.jp", + "nozawaonsen.nagano.jp", + "obuse.nagano.jp", + "ogawa.nagano.jp", + "okaya.nagano.jp", + "omachi.nagano.jp", + "omi.nagano.jp", + "ookuwa.nagano.jp", + "ooshika.nagano.jp", + "otaki.nagano.jp", + "otari.nagano.jp", + "sakae.nagano.jp", + "sakaki.nagano.jp", + "saku.nagano.jp", + "sakuho.nagano.jp", + "shimosuwa.nagano.jp", + "shinanomachi.nagano.jp", + "shiojiri.nagano.jp", + "suwa.nagano.jp", + "suzaka.nagano.jp", + "takagi.nagano.jp", + "takamori.nagano.jp", + "takayama.nagano.jp", + "tateshina.nagano.jp", + "tatsuno.nagano.jp", + "togakushi.nagano.jp", + "togura.nagano.jp", + "tomi.nagano.jp", + "ueda.nagano.jp", + "wada.nagano.jp", + "yamagata.nagano.jp", + "yamanouchi.nagano.jp", + "yasaka.nagano.jp", + "yasuoka.nagano.jp", + "chijiwa.nagasaki.jp", + "futsu.nagasaki.jp", + "goto.nagasaki.jp", + "hasami.nagasaki.jp", + "hirado.nagasaki.jp", + "iki.nagasaki.jp", + "isahaya.nagasaki.jp", + "kawatana.nagasaki.jp", + "kuchinotsu.nagasaki.jp", + "matsuura.nagasaki.jp", + "nagasaki.nagasaki.jp", + "obama.nagasaki.jp", + "omura.nagasaki.jp", + "oseto.nagasaki.jp", + "saikai.nagasaki.jp", + "sasebo.nagasaki.jp", + "seihi.nagasaki.jp", + "shimabara.nagasaki.jp", + "shinkamigoto.nagasaki.jp", + "togitsu.nagasaki.jp", + "tsushima.nagasaki.jp", + "unzen.nagasaki.jp", + "ando.nara.jp", + "gose.nara.jp", + "heguri.nara.jp", + "higashiyoshino.nara.jp", + "ikaruga.nara.jp", + "ikoma.nara.jp", + "kamikitayama.nara.jp", + "kanmaki.nara.jp", + "kashiba.nara.jp", + "kashihara.nara.jp", + "katsuragi.nara.jp", + "kawai.nara.jp", + "kawakami.nara.jp", + "kawanishi.nara.jp", + "koryo.nara.jp", + "kurotaki.nara.jp", + "mitsue.nara.jp", + "miyake.nara.jp", + "nara.nara.jp", + "nosegawa.nara.jp", + "oji.nara.jp", + "ouda.nara.jp", + "oyodo.nara.jp", + "sakurai.nara.jp", + "sango.nara.jp", + "shimoichi.nara.jp", + "shimokitayama.nara.jp", + "shinjo.nara.jp", + "soni.nara.jp", + "takatori.nara.jp", + "tawaramoto.nara.jp", + "tenkawa.nara.jp", + "tenri.nara.jp", + "uda.nara.jp", + "yamatokoriyama.nara.jp", + "yamatotakada.nara.jp", + "yamazoe.nara.jp", + "yoshino.nara.jp", + "aga.niigata.jp", + "agano.niigata.jp", + "gosen.niigata.jp", + "itoigawa.niigata.jp", + "izumozaki.niigata.jp", + "joetsu.niigata.jp", + "kamo.niigata.jp", + "kariwa.niigata.jp", + "kashiwazaki.niigata.jp", + "minamiuonuma.niigata.jp", + "mitsuke.niigata.jp", + "muika.niigata.jp", + "murakami.niigata.jp", + "myoko.niigata.jp", + "nagaoka.niigata.jp", + "niigata.niigata.jp", + "ojiya.niigata.jp", + "omi.niigata.jp", + "sado.niigata.jp", + "sanjo.niigata.jp", + "seiro.niigata.jp", + "seirou.niigata.jp", + "sekikawa.niigata.jp", + "shibata.niigata.jp", + "tagami.niigata.jp", + "tainai.niigata.jp", + "tochio.niigata.jp", + "tokamachi.niigata.jp", + "tsubame.niigata.jp", + "tsunan.niigata.jp", + "uonuma.niigata.jp", + "yahiko.niigata.jp", + "yoita.niigata.jp", + "yuzawa.niigata.jp", + "beppu.oita.jp", + "bungoono.oita.jp", + "bungotakada.oita.jp", + "hasama.oita.jp", + "hiji.oita.jp", + "himeshima.oita.jp", + "hita.oita.jp", + "kamitsue.oita.jp", + "kokonoe.oita.jp", + "kuju.oita.jp", + "kunisaki.oita.jp", + "kusu.oita.jp", + "oita.oita.jp", + "saiki.oita.jp", + "taketa.oita.jp", + "tsukumi.oita.jp", + "usa.oita.jp", + "usuki.oita.jp", + "yufu.oita.jp", + "akaiwa.okayama.jp", + "asakuchi.okayama.jp", + "bizen.okayama.jp", + "hayashima.okayama.jp", + "ibara.okayama.jp", + "kagamino.okayama.jp", + "kasaoka.okayama.jp", + "kibichuo.okayama.jp", + "kumenan.okayama.jp", + "kurashiki.okayama.jp", + "maniwa.okayama.jp", + "misaki.okayama.jp", + "nagi.okayama.jp", + "niimi.okayama.jp", + "nishiawakura.okayama.jp", + "okayama.okayama.jp", + "satosho.okayama.jp", + "setouchi.okayama.jp", + "shinjo.okayama.jp", + "shoo.okayama.jp", + "soja.okayama.jp", + "takahashi.okayama.jp", + "tamano.okayama.jp", + "tsuyama.okayama.jp", + "wake.okayama.jp", + "yakage.okayama.jp", + "aguni.okinawa.jp", + "ginowan.okinawa.jp", + "ginoza.okinawa.jp", + "gushikami.okinawa.jp", + "haebaru.okinawa.jp", + "higashi.okinawa.jp", + "hirara.okinawa.jp", + "iheya.okinawa.jp", + "ishigaki.okinawa.jp", + "ishikawa.okinawa.jp", + "itoman.okinawa.jp", + "izena.okinawa.jp", + "kadena.okinawa.jp", + "kin.okinawa.jp", + "kitadaito.okinawa.jp", + "kitanakagusuku.okinawa.jp", + "kumejima.okinawa.jp", + "kunigami.okinawa.jp", + "minamidaito.okinawa.jp", + "motobu.okinawa.jp", + "nago.okinawa.jp", + "naha.okinawa.jp", + "nakagusuku.okinawa.jp", + "nakijin.okinawa.jp", + "nanjo.okinawa.jp", + "nishihara.okinawa.jp", + "ogimi.okinawa.jp", + "okinawa.okinawa.jp", + "onna.okinawa.jp", + "shimoji.okinawa.jp", + "taketomi.okinawa.jp", + "tarama.okinawa.jp", + "tokashiki.okinawa.jp", + "tomigusuku.okinawa.jp", + "tonaki.okinawa.jp", + "urasoe.okinawa.jp", + "uruma.okinawa.jp", + "yaese.okinawa.jp", + "yomitan.okinawa.jp", + "yonabaru.okinawa.jp", + "yonaguni.okinawa.jp", + "zamami.okinawa.jp", + "abeno.osaka.jp", + "chihayaakasaka.osaka.jp", + "chuo.osaka.jp", + "daito.osaka.jp", + "fujiidera.osaka.jp", + "habikino.osaka.jp", + "hannan.osaka.jp", + "higashiosaka.osaka.jp", + "higashisumiyoshi.osaka.jp", + "higashiyodogawa.osaka.jp", + "hirakata.osaka.jp", + "ibaraki.osaka.jp", + "ikeda.osaka.jp", + "izumi.osaka.jp", + "izumiotsu.osaka.jp", + "izumisano.osaka.jp", + "kadoma.osaka.jp", + "kaizuka.osaka.jp", + "kanan.osaka.jp", + "kashiwara.osaka.jp", + "katano.osaka.jp", + "kawachinagano.osaka.jp", + "kishiwada.osaka.jp", + "kita.osaka.jp", + "kumatori.osaka.jp", + "matsubara.osaka.jp", + "minato.osaka.jp", + "minoh.osaka.jp", + "misaki.osaka.jp", + "moriguchi.osaka.jp", + "neyagawa.osaka.jp", + "nishi.osaka.jp", + "nose.osaka.jp", + "osakasayama.osaka.jp", + "sakai.osaka.jp", + "sayama.osaka.jp", + "sennan.osaka.jp", + "settsu.osaka.jp", + "shijonawate.osaka.jp", + "shimamoto.osaka.jp", + "suita.osaka.jp", + "tadaoka.osaka.jp", + "taishi.osaka.jp", + "tajiri.osaka.jp", + "takaishi.osaka.jp", + "takatsuki.osaka.jp", + "tondabayashi.osaka.jp", + "toyonaka.osaka.jp", + "toyono.osaka.jp", + "yao.osaka.jp", + "ariake.saga.jp", + "arita.saga.jp", + "fukudomi.saga.jp", + "genkai.saga.jp", + "hamatama.saga.jp", + "hizen.saga.jp", + "imari.saga.jp", + "kamimine.saga.jp", + "kanzaki.saga.jp", + "karatsu.saga.jp", + "kashima.saga.jp", + "kitagata.saga.jp", + "kitahata.saga.jp", + "kiyama.saga.jp", + "kouhoku.saga.jp", + "kyuragi.saga.jp", + "nishiarita.saga.jp", + "ogi.saga.jp", + "omachi.saga.jp", + "ouchi.saga.jp", + "saga.saga.jp", + "shiroishi.saga.jp", + "taku.saga.jp", + "tara.saga.jp", + "tosu.saga.jp", + "yoshinogari.saga.jp", + "arakawa.saitama.jp", + "asaka.saitama.jp", + "chichibu.saitama.jp", + "fujimi.saitama.jp", + "fujimino.saitama.jp", + "fukaya.saitama.jp", + "hanno.saitama.jp", + "hanyu.saitama.jp", + "hasuda.saitama.jp", + "hatogaya.saitama.jp", + "hatoyama.saitama.jp", + "hidaka.saitama.jp", + "higashichichibu.saitama.jp", + "higashimatsuyama.saitama.jp", + "honjo.saitama.jp", + "ina.saitama.jp", + "iruma.saitama.jp", + "iwatsuki.saitama.jp", + "kamiizumi.saitama.jp", + "kamikawa.saitama.jp", + "kamisato.saitama.jp", + "kasukabe.saitama.jp", + "kawagoe.saitama.jp", + "kawaguchi.saitama.jp", + "kawajima.saitama.jp", + "kazo.saitama.jp", + "kitamoto.saitama.jp", + "koshigaya.saitama.jp", + "kounosu.saitama.jp", + "kuki.saitama.jp", + "kumagaya.saitama.jp", + "matsubushi.saitama.jp", + "minano.saitama.jp", + "misato.saitama.jp", + "miyashiro.saitama.jp", + "miyoshi.saitama.jp", + "moroyama.saitama.jp", + "nagatoro.saitama.jp", + "namegawa.saitama.jp", + "niiza.saitama.jp", + "ogano.saitama.jp", + "ogawa.saitama.jp", + "ogose.saitama.jp", + "okegawa.saitama.jp", + "omiya.saitama.jp", + "otaki.saitama.jp", + "ranzan.saitama.jp", + "ryokami.saitama.jp", + "saitama.saitama.jp", + "sakado.saitama.jp", + "satte.saitama.jp", + "sayama.saitama.jp", + "shiki.saitama.jp", + "shiraoka.saitama.jp", + "soka.saitama.jp", + "sugito.saitama.jp", + "toda.saitama.jp", + "tokigawa.saitama.jp", + "tokorozawa.saitama.jp", + "tsurugashima.saitama.jp", + "urawa.saitama.jp", + "warabi.saitama.jp", + "yashio.saitama.jp", + "yokoze.saitama.jp", + "yono.saitama.jp", + "yorii.saitama.jp", + "yoshida.saitama.jp", + "yoshikawa.saitama.jp", + "yoshimi.saitama.jp", + "aisho.shiga.jp", + "gamo.shiga.jp", + "higashiomi.shiga.jp", + "hikone.shiga.jp", + "koka.shiga.jp", + "konan.shiga.jp", + "kosei.shiga.jp", + "koto.shiga.jp", + "kusatsu.shiga.jp", + "maibara.shiga.jp", + "moriyama.shiga.jp", + "nagahama.shiga.jp", + "nishiazai.shiga.jp", + "notogawa.shiga.jp", + "omihachiman.shiga.jp", + "otsu.shiga.jp", + "ritto.shiga.jp", + "ryuoh.shiga.jp", + "takashima.shiga.jp", + "takatsuki.shiga.jp", + "torahime.shiga.jp", + "toyosato.shiga.jp", + "yasu.shiga.jp", + "akagi.shimane.jp", + "ama.shimane.jp", + "gotsu.shimane.jp", + "hamada.shimane.jp", + "higashiizumo.shimane.jp", + "hikawa.shimane.jp", + "hikimi.shimane.jp", + "izumo.shimane.jp", + "kakinoki.shimane.jp", + "masuda.shimane.jp", + "matsue.shimane.jp", + "misato.shimane.jp", + "nishinoshima.shimane.jp", + "ohda.shimane.jp", + "okinoshima.shimane.jp", + "okuizumo.shimane.jp", + "shimane.shimane.jp", + "tamayu.shimane.jp", + "tsuwano.shimane.jp", + "unnan.shimane.jp", + "yakumo.shimane.jp", + "yasugi.shimane.jp", + "yatsuka.shimane.jp", + "arai.shizuoka.jp", + "atami.shizuoka.jp", + "fuji.shizuoka.jp", + "fujieda.shizuoka.jp", + "fujikawa.shizuoka.jp", + "fujinomiya.shizuoka.jp", + "fukuroi.shizuoka.jp", + "gotemba.shizuoka.jp", + "haibara.shizuoka.jp", + "hamamatsu.shizuoka.jp", + "higashiizu.shizuoka.jp", + "ito.shizuoka.jp", + "iwata.shizuoka.jp", + "izu.shizuoka.jp", + "izunokuni.shizuoka.jp", + "kakegawa.shizuoka.jp", + "kannami.shizuoka.jp", + "kawanehon.shizuoka.jp", + "kawazu.shizuoka.jp", + "kikugawa.shizuoka.jp", + "kosai.shizuoka.jp", + "makinohara.shizuoka.jp", + "matsuzaki.shizuoka.jp", + "minamiizu.shizuoka.jp", + "mishima.shizuoka.jp", + "morimachi.shizuoka.jp", + "nishiizu.shizuoka.jp", + "numazu.shizuoka.jp", + "omaezaki.shizuoka.jp", + "shimada.shizuoka.jp", + "shimizu.shizuoka.jp", + "shimoda.shizuoka.jp", + "shizuoka.shizuoka.jp", + "susono.shizuoka.jp", + "yaizu.shizuoka.jp", + "yoshida.shizuoka.jp", + "ashikaga.tochigi.jp", + "bato.tochigi.jp", + "haga.tochigi.jp", + "ichikai.tochigi.jp", + "iwafune.tochigi.jp", + "kaminokawa.tochigi.jp", + "kanuma.tochigi.jp", + "karasuyama.tochigi.jp", + "kuroiso.tochigi.jp", + "mashiko.tochigi.jp", + "mibu.tochigi.jp", + "moka.tochigi.jp", + "motegi.tochigi.jp", + "nasu.tochigi.jp", + "nasushiobara.tochigi.jp", + "nikko.tochigi.jp", + "nishikata.tochigi.jp", + "nogi.tochigi.jp", + "ohira.tochigi.jp", + "ohtawara.tochigi.jp", + "oyama.tochigi.jp", + "sakura.tochigi.jp", + "sano.tochigi.jp", + "shimotsuke.tochigi.jp", + "shioya.tochigi.jp", + "takanezawa.tochigi.jp", + "tochigi.tochigi.jp", + "tsuga.tochigi.jp", + "ujiie.tochigi.jp", + "utsunomiya.tochigi.jp", + "yaita.tochigi.jp", + "aizumi.tokushima.jp", + "anan.tokushima.jp", + "ichiba.tokushima.jp", + "itano.tokushima.jp", + "kainan.tokushima.jp", + "komatsushima.tokushima.jp", + "matsushige.tokushima.jp", + "mima.tokushima.jp", + "minami.tokushima.jp", + "miyoshi.tokushima.jp", + "mugi.tokushima.jp", + "nakagawa.tokushima.jp", + "naruto.tokushima.jp", + "sanagochi.tokushima.jp", + "shishikui.tokushima.jp", + "tokushima.tokushima.jp", + "wajiki.tokushima.jp", + "adachi.tokyo.jp", + "akiruno.tokyo.jp", + "akishima.tokyo.jp", + "aogashima.tokyo.jp", + "arakawa.tokyo.jp", + "bunkyo.tokyo.jp", + "chiyoda.tokyo.jp", + "chofu.tokyo.jp", + "chuo.tokyo.jp", + "edogawa.tokyo.jp", + "fuchu.tokyo.jp", + "fussa.tokyo.jp", + "hachijo.tokyo.jp", + "hachioji.tokyo.jp", + "hamura.tokyo.jp", + "higashikurume.tokyo.jp", + "higashimurayama.tokyo.jp", + "higashiyamato.tokyo.jp", + "hino.tokyo.jp", + "hinode.tokyo.jp", + "hinohara.tokyo.jp", + "inagi.tokyo.jp", + "itabashi.tokyo.jp", + "katsushika.tokyo.jp", + "kita.tokyo.jp", + "kiyose.tokyo.jp", + "kodaira.tokyo.jp", + "koganei.tokyo.jp", + "kokubunji.tokyo.jp", + "komae.tokyo.jp", + "koto.tokyo.jp", + "kouzushima.tokyo.jp", + "kunitachi.tokyo.jp", + "machida.tokyo.jp", + "meguro.tokyo.jp", + "minato.tokyo.jp", + "mitaka.tokyo.jp", + "mizuho.tokyo.jp", + "musashimurayama.tokyo.jp", + "musashino.tokyo.jp", + "nakano.tokyo.jp", + "nerima.tokyo.jp", + "ogasawara.tokyo.jp", + "okutama.tokyo.jp", + "ome.tokyo.jp", + "oshima.tokyo.jp", + "ota.tokyo.jp", + "setagaya.tokyo.jp", + "shibuya.tokyo.jp", + "shinagawa.tokyo.jp", + "shinjuku.tokyo.jp", + "suginami.tokyo.jp", + "sumida.tokyo.jp", + "tachikawa.tokyo.jp", + "taito.tokyo.jp", + "tama.tokyo.jp", + "toshima.tokyo.jp", + "chizu.tottori.jp", + "hino.tottori.jp", + "kawahara.tottori.jp", + "koge.tottori.jp", + "kotoura.tottori.jp", + "misasa.tottori.jp", + "nanbu.tottori.jp", + "nichinan.tottori.jp", + "sakaiminato.tottori.jp", + "tottori.tottori.jp", + "wakasa.tottori.jp", + "yazu.tottori.jp", + "yonago.tottori.jp", + "asahi.toyama.jp", + "fuchu.toyama.jp", + "fukumitsu.toyama.jp", + "funahashi.toyama.jp", + "himi.toyama.jp", + "imizu.toyama.jp", + "inami.toyama.jp", + "johana.toyama.jp", + "kamiichi.toyama.jp", + "kurobe.toyama.jp", + "nakaniikawa.toyama.jp", + "namerikawa.toyama.jp", + "nanto.toyama.jp", + "nyuzen.toyama.jp", + "oyabe.toyama.jp", + "taira.toyama.jp", + "takaoka.toyama.jp", + "tateyama.toyama.jp", + "toga.toyama.jp", + "tonami.toyama.jp", + "toyama.toyama.jp", + "unazuki.toyama.jp", + "uozu.toyama.jp", + "yamada.toyama.jp", + "arida.wakayama.jp", + "aridagawa.wakayama.jp", + "gobo.wakayama.jp", + "hashimoto.wakayama.jp", + "hidaka.wakayama.jp", + "hirogawa.wakayama.jp", + "inami.wakayama.jp", + "iwade.wakayama.jp", + "kainan.wakayama.jp", + "kamitonda.wakayama.jp", + "katsuragi.wakayama.jp", + "kimino.wakayama.jp", + "kinokawa.wakayama.jp", + "kitayama.wakayama.jp", + "koya.wakayama.jp", + "koza.wakayama.jp", + "kozagawa.wakayama.jp", + "kudoyama.wakayama.jp", + "kushimoto.wakayama.jp", + "mihama.wakayama.jp", + "misato.wakayama.jp", + "nachikatsuura.wakayama.jp", + "shingu.wakayama.jp", + "shirahama.wakayama.jp", + "taiji.wakayama.jp", + "tanabe.wakayama.jp", + "wakayama.wakayama.jp", + "yuasa.wakayama.jp", + "yura.wakayama.jp", + "asahi.yamagata.jp", + "funagata.yamagata.jp", + "higashine.yamagata.jp", + "iide.yamagata.jp", + "kahoku.yamagata.jp", + "kaminoyama.yamagata.jp", + "kaneyama.yamagata.jp", + "kawanishi.yamagata.jp", + "mamurogawa.yamagata.jp", + "mikawa.yamagata.jp", + "murayama.yamagata.jp", + "nagai.yamagata.jp", + "nakayama.yamagata.jp", + "nanyo.yamagata.jp", + "nishikawa.yamagata.jp", + "obanazawa.yamagata.jp", + "oe.yamagata.jp", + "oguni.yamagata.jp", + "ohkura.yamagata.jp", + "oishida.yamagata.jp", + "sagae.yamagata.jp", + "sakata.yamagata.jp", + "sakegawa.yamagata.jp", + "shinjo.yamagata.jp", + "shirataka.yamagata.jp", + "shonai.yamagata.jp", + "takahata.yamagata.jp", + "tendo.yamagata.jp", + "tozawa.yamagata.jp", + "tsuruoka.yamagata.jp", + "yamagata.yamagata.jp", + "yamanobe.yamagata.jp", + "yonezawa.yamagata.jp", + "yuza.yamagata.jp", + "abu.yamaguchi.jp", + "hagi.yamaguchi.jp", + "hikari.yamaguchi.jp", + "hofu.yamaguchi.jp", + "iwakuni.yamaguchi.jp", + "kudamatsu.yamaguchi.jp", + "mitou.yamaguchi.jp", + "nagato.yamaguchi.jp", + "oshima.yamaguchi.jp", + "shimonoseki.yamaguchi.jp", + "shunan.yamaguchi.jp", + "tabuse.yamaguchi.jp", + "tokuyama.yamaguchi.jp", + "toyota.yamaguchi.jp", + "ube.yamaguchi.jp", + "yuu.yamaguchi.jp", + "chuo.yamanashi.jp", + "doshi.yamanashi.jp", + "fuefuki.yamanashi.jp", + "fujikawa.yamanashi.jp", + "fujikawaguchiko.yamanashi.jp", + "fujiyoshida.yamanashi.jp", + "hayakawa.yamanashi.jp", + "hokuto.yamanashi.jp", + "ichikawamisato.yamanashi.jp", + "kai.yamanashi.jp", + "kofu.yamanashi.jp", + "koshu.yamanashi.jp", + "kosuge.yamanashi.jp", + "minami-alps.yamanashi.jp", + "minobu.yamanashi.jp", + "nakamichi.yamanashi.jp", + "nanbu.yamanashi.jp", + "narusawa.yamanashi.jp", + "nirasaki.yamanashi.jp", + "nishikatsura.yamanashi.jp", + "oshino.yamanashi.jp", + "otsuki.yamanashi.jp", + "showa.yamanashi.jp", + "tabayama.yamanashi.jp", + "tsuru.yamanashi.jp", + "uenohara.yamanashi.jp", + "yamanakako.yamanashi.jp", + "yamanashi.yamanashi.jp", + "ke", + "ac.ke", + "co.ke", + "go.ke", + "info.ke", + "me.ke", + "mobi.ke", + "ne.ke", + "or.ke", + "sc.ke", + "kg", + "org.kg", + "net.kg", + "com.kg", + "edu.kg", + "gov.kg", + "mil.kg", + "*.kh", + "ki", + "edu.ki", + "biz.ki", + "net.ki", + "org.ki", + "gov.ki", + "info.ki", + "com.ki", + "km", + "org.km", + "nom.km", + "gov.km", + "prd.km", + "tm.km", + "edu.km", + "mil.km", + "ass.km", + "com.km", + "coop.km", + "asso.km", + "presse.km", + "medecin.km", + "notaires.km", + "pharmaciens.km", + "veterinaire.km", + "gouv.km", + "kn", + "net.kn", + "org.kn", + "edu.kn", + "gov.kn", + "kp", + "com.kp", + "edu.kp", + "gov.kp", + "org.kp", + "rep.kp", + "tra.kp", + "kr", + "ac.kr", + "co.kr", + "es.kr", + "go.kr", + "hs.kr", + "kg.kr", + "mil.kr", + "ms.kr", + "ne.kr", + "or.kr", + "pe.kr", + "re.kr", + "sc.kr", + "busan.kr", + "chungbuk.kr", + "chungnam.kr", + "daegu.kr", + "daejeon.kr", + "gangwon.kr", + "gwangju.kr", + "gyeongbuk.kr", + "gyeonggi.kr", + "gyeongnam.kr", + "incheon.kr", + "jeju.kr", + "jeonbuk.kr", + "jeonnam.kr", + "seoul.kr", + "ulsan.kr", + "*.kw", + "ky", + "edu.ky", + "gov.ky", + "com.ky", + "org.ky", + "net.ky", + "kz", + "org.kz", + "edu.kz", + "net.kz", + "gov.kz", + "mil.kz", + "com.kz", + "la", + "int.la", + "net.la", + "info.la", + "edu.la", + "gov.la", + "per.la", + "com.la", + "org.la", + "lb", + "com.lb", + "edu.lb", + "gov.lb", + "net.lb", + "org.lb", + "lc", + "com.lc", + "net.lc", + "co.lc", + "org.lc", + "edu.lc", + "gov.lc", + "li", + "lk", + "gov.lk", + "sch.lk", + "net.lk", + "int.lk", + "com.lk", + "org.lk", + "edu.lk", + "ngo.lk", + "soc.lk", + "web.lk", + "ltd.lk", + "assn.lk", + "grp.lk", + "hotel.lk", + "ac.lk", + "lr", + "com.lr", + "edu.lr", + "gov.lr", + "org.lr", + "net.lr", + "ls", + "co.ls", + "org.ls", + "lt", + "gov.lt", + "lu", + "lv", + "com.lv", + "edu.lv", + "gov.lv", + "org.lv", + "mil.lv", + "id.lv", + "net.lv", + "asn.lv", + "conf.lv", + "ly", + "com.ly", + "net.ly", + "gov.ly", + "plc.ly", + "edu.ly", + "sch.ly", + "med.ly", + "org.ly", + "id.ly", + "ma", + "co.ma", + "net.ma", + "gov.ma", + "org.ma", + "ac.ma", + "press.ma", + "mc", + "tm.mc", + "asso.mc", + "md", + "me", + "co.me", + "net.me", + "org.me", + "edu.me", + "ac.me", + "gov.me", + "its.me", + "priv.me", + "mg", + "org.mg", + "nom.mg", + "gov.mg", + "prd.mg", + "tm.mg", + "edu.mg", + "mil.mg", + "com.mg", + "co.mg", + "mh", + "mil", + "mk", + "com.mk", + "org.mk", + "net.mk", + "edu.mk", + "gov.mk", + "inf.mk", + "name.mk", + "ml", + "com.ml", + "edu.ml", + "gouv.ml", + "gov.ml", + "net.ml", + "org.ml", + "presse.ml", + "*.mm", + "mn", + "gov.mn", + "edu.mn", + "org.mn", + "mo", + "com.mo", + "net.mo", + "org.mo", + "edu.mo", + "gov.mo", + "mobi", + "mp", + "mq", + "mr", + "gov.mr", + "ms", + "com.ms", + "edu.ms", + "gov.ms", + "net.ms", + "org.ms", + "mt", + "com.mt", + "edu.mt", + "net.mt", + "org.mt", + "mu", + "com.mu", + "net.mu", + "org.mu", + "gov.mu", + "ac.mu", + "co.mu", + "or.mu", + "museum", + "academy.museum", + "agriculture.museum", + "air.museum", + "airguard.museum", + "alabama.museum", + "alaska.museum", + "amber.museum", + "ambulance.museum", + "american.museum", + "americana.museum", + "americanantiques.museum", + "americanart.museum", + "amsterdam.museum", + "and.museum", + "annefrank.museum", + "anthro.museum", + "anthropology.museum", + "antiques.museum", + "aquarium.museum", + "arboretum.museum", + "archaeological.museum", + "archaeology.museum", + "architecture.museum", + "art.museum", + "artanddesign.museum", + "artcenter.museum", + "artdeco.museum", + "arteducation.museum", + "artgallery.museum", + "arts.museum", + "artsandcrafts.museum", + "asmatart.museum", + "assassination.museum", + "assisi.museum", + "association.museum", + "astronomy.museum", + "atlanta.museum", + "austin.museum", + "australia.museum", + "automotive.museum", + "aviation.museum", + "axis.museum", + "badajoz.museum", + "baghdad.museum", + "bahn.museum", + "bale.museum", + "baltimore.museum", + "barcelona.museum", + "baseball.museum", + "basel.museum", + "baths.museum", + "bauern.museum", + "beauxarts.museum", + "beeldengeluid.museum", + "bellevue.museum", + "bergbau.museum", + "berkeley.museum", + "berlin.museum", + "bern.museum", + "bible.museum", + "bilbao.museum", + "bill.museum", + "birdart.museum", + "birthplace.museum", + "bonn.museum", + "boston.museum", + "botanical.museum", + "botanicalgarden.museum", + "botanicgarden.museum", + "botany.museum", + "brandywinevalley.museum", + "brasil.museum", + "bristol.museum", + "british.museum", + "britishcolumbia.museum", + "broadcast.museum", + "brunel.museum", + "brussel.museum", + "brussels.museum", + "bruxelles.museum", + "building.museum", + "burghof.museum", + "bus.museum", + "bushey.museum", + "cadaques.museum", + "california.museum", + "cambridge.museum", + "can.museum", + "canada.museum", + "capebreton.museum", + "carrier.museum", + "cartoonart.museum", + "casadelamoneda.museum", + "castle.museum", + "castres.museum", + "celtic.museum", + "center.museum", + "chattanooga.museum", + "cheltenham.museum", + "chesapeakebay.museum", + "chicago.museum", + "children.museum", + "childrens.museum", + "childrensgarden.museum", + "chiropractic.museum", + "chocolate.museum", + "christiansburg.museum", + "cincinnati.museum", + "cinema.museum", + "circus.museum", + "civilisation.museum", + "civilization.museum", + "civilwar.museum", + "clinton.museum", + "clock.museum", + "coal.museum", + "coastaldefence.museum", + "cody.museum", + "coldwar.museum", + "collection.museum", + "colonialwilliamsburg.museum", + "coloradoplateau.museum", + "columbia.museum", + "columbus.museum", + "communication.museum", + "communications.museum", + "community.museum", + "computer.museum", + "computerhistory.museum", + "xn--comunicaes-v6a2o.museum", + "contemporary.museum", + "contemporaryart.museum", + "convent.museum", + "copenhagen.museum", + "corporation.museum", + "xn--correios-e-telecomunicaes-ghc29a.museum", + "corvette.museum", + "costume.museum", + "countryestate.museum", + "county.museum", + "crafts.museum", + "cranbrook.museum", + "creation.museum", + "cultural.museum", + "culturalcenter.museum", + "culture.museum", + "cyber.museum", + "cymru.museum", + "dali.museum", + "dallas.museum", + "database.museum", + "ddr.museum", + "decorativearts.museum", + "delaware.museum", + "delmenhorst.museum", + "denmark.museum", + "depot.museum", + "design.museum", + "detroit.museum", + "dinosaur.museum", + "discovery.museum", + "dolls.museum", + "donostia.museum", + "durham.museum", + "eastafrica.museum", + "eastcoast.museum", + "education.museum", + "educational.museum", + "egyptian.museum", + "eisenbahn.museum", + "elburg.museum", + "elvendrell.museum", + "embroidery.museum", + "encyclopedic.museum", + "england.museum", + "entomology.museum", + "environment.museum", + "environmentalconservation.museum", + "epilepsy.museum", + "essex.museum", + "estate.museum", + "ethnology.museum", + "exeter.museum", + "exhibition.museum", + "family.museum", + "farm.museum", + "farmequipment.museum", + "farmers.museum", + "farmstead.museum", + "field.museum", + "figueres.museum", + "filatelia.museum", + "film.museum", + "fineart.museum", + "finearts.museum", + "finland.museum", + "flanders.museum", + "florida.museum", + "force.museum", + "fortmissoula.museum", + "fortworth.museum", + "foundation.museum", + "francaise.museum", + "frankfurt.museum", + "franziskaner.museum", + "freemasonry.museum", + "freiburg.museum", + "fribourg.museum", + "frog.museum", + "fundacio.museum", + "furniture.museum", + "gallery.museum", + "garden.museum", + "gateway.museum", + "geelvinck.museum", + "gemological.museum", + "geology.museum", + "georgia.museum", + "giessen.museum", + "glas.museum", + "glass.museum", + "gorge.museum", + "grandrapids.museum", + "graz.museum", + "guernsey.museum", + "halloffame.museum", + "hamburg.museum", + "handson.museum", + "harvestcelebration.museum", + "hawaii.museum", + "health.museum", + "heimatunduhren.museum", + "hellas.museum", + "helsinki.museum", + "hembygdsforbund.museum", + "heritage.museum", + "histoire.museum", + "historical.museum", + "historicalsociety.museum", + "historichouses.museum", + "historisch.museum", + "historisches.museum", + "history.museum", + "historyofscience.museum", + "horology.museum", + "house.museum", + "humanities.museum", + "illustration.museum", + "imageandsound.museum", + "indian.museum", + "indiana.museum", + "indianapolis.museum", + "indianmarket.museum", + "intelligence.museum", + "interactive.museum", + "iraq.museum", + "iron.museum", + "isleofman.museum", + "jamison.museum", + "jefferson.museum", + "jerusalem.museum", + "jewelry.museum", + "jewish.museum", + "jewishart.museum", + "jfk.museum", + "journalism.museum", + "judaica.museum", + "judygarland.museum", + "juedisches.museum", + "juif.museum", + "karate.museum", + "karikatur.museum", + "kids.museum", + "koebenhavn.museum", + "koeln.museum", + "kunst.museum", + "kunstsammlung.museum", + "kunstunddesign.museum", + "labor.museum", + "labour.museum", + "lajolla.museum", + "lancashire.museum", + "landes.museum", + "lans.museum", + "xn--lns-qla.museum", + "larsson.museum", + "lewismiller.museum", + "lincoln.museum", + "linz.museum", + "living.museum", + "livinghistory.museum", + "localhistory.museum", + "london.museum", + "losangeles.museum", + "louvre.museum", + "loyalist.museum", + "lucerne.museum", + "luxembourg.museum", + "luzern.museum", + "mad.museum", + "madrid.museum", + "mallorca.museum", + "manchester.museum", + "mansion.museum", + "mansions.museum", + "manx.museum", + "marburg.museum", + "maritime.museum", + "maritimo.museum", + "maryland.museum", + "marylhurst.museum", + "media.museum", + "medical.museum", + "medizinhistorisches.museum", + "meeres.museum", + "memorial.museum", + "mesaverde.museum", + "michigan.museum", + "midatlantic.museum", + "military.museum", + "mill.museum", + "miners.museum", + "mining.museum", + "minnesota.museum", + "missile.museum", + "missoula.museum", + "modern.museum", + "moma.museum", + "money.museum", + "monmouth.museum", + "monticello.museum", + "montreal.museum", + "moscow.museum", + "motorcycle.museum", + "muenchen.museum", + "muenster.museum", + "mulhouse.museum", + "muncie.museum", + "museet.museum", + "museumcenter.museum", + "museumvereniging.museum", + "music.museum", + "national.museum", + "nationalfirearms.museum", + "nationalheritage.museum", + "nativeamerican.museum", + "naturalhistory.museum", + "naturalhistorymuseum.museum", + "naturalsciences.museum", + "nature.museum", + "naturhistorisches.museum", + "natuurwetenschappen.museum", + "naumburg.museum", + "naval.museum", + "nebraska.museum", + "neues.museum", + "newhampshire.museum", + "newjersey.museum", + "newmexico.museum", + "newport.museum", + "newspaper.museum", + "newyork.museum", + "niepce.museum", + "norfolk.museum", + "north.museum", + "nrw.museum", + "nuernberg.museum", + "nuremberg.museum", + "nyc.museum", + "nyny.museum", + "oceanographic.museum", + "oceanographique.museum", + "omaha.museum", + "online.museum", + "ontario.museum", + "openair.museum", + "oregon.museum", + "oregontrail.museum", + "otago.museum", + "oxford.museum", + "pacific.museum", + "paderborn.museum", + "palace.museum", + "paleo.museum", + "palmsprings.museum", + "panama.museum", + "paris.museum", + "pasadena.museum", + "pharmacy.museum", + "philadelphia.museum", + "philadelphiaarea.museum", + "philately.museum", + "phoenix.museum", + "photography.museum", + "pilots.museum", + "pittsburgh.museum", + "planetarium.museum", + "plantation.museum", + "plants.museum", + "plaza.museum", + "portal.museum", + "portland.museum", + "portlligat.museum", + "posts-and-telecommunications.museum", + "preservation.museum", + "presidio.museum", + "press.museum", + "project.museum", + "public.museum", + "pubol.museum", + "quebec.museum", + "railroad.museum", + "railway.museum", + "research.museum", + "resistance.museum", + "riodejaneiro.museum", + "rochester.museum", + "rockart.museum", + "roma.museum", + "russia.museum", + "saintlouis.museum", + "salem.museum", + "salvadordali.museum", + "salzburg.museum", + "sandiego.museum", + "sanfrancisco.museum", + "santabarbara.museum", + "santacruz.museum", + "santafe.museum", + "saskatchewan.museum", + "satx.museum", + "savannahga.museum", + "schlesisches.museum", + "schoenbrunn.museum", + "schokoladen.museum", + "school.museum", + "schweiz.museum", + "science.museum", + "scienceandhistory.museum", + "scienceandindustry.museum", + "sciencecenter.museum", + "sciencecenters.museum", + "science-fiction.museum", + "sciencehistory.museum", + "sciences.museum", + "sciencesnaturelles.museum", + "scotland.museum", + "seaport.museum", + "settlement.museum", + "settlers.museum", + "shell.museum", + "sherbrooke.museum", + "sibenik.museum", + "silk.museum", + "ski.museum", + "skole.museum", + "society.museum", + "sologne.museum", + "soundandvision.museum", + "southcarolina.museum", + "southwest.museum", + "space.museum", + "spy.museum", + "square.museum", + "stadt.museum", + "stalbans.museum", + "starnberg.museum", + "state.museum", + "stateofdelaware.museum", + "station.museum", + "steam.museum", + "steiermark.museum", + "stjohn.museum", + "stockholm.museum", + "stpetersburg.museum", + "stuttgart.museum", + "suisse.museum", + "surgeonshall.museum", + "surrey.museum", + "svizzera.museum", + "sweden.museum", + "sydney.museum", + "tank.museum", + "tcm.museum", + "technology.museum", + "telekommunikation.museum", + "television.museum", + "texas.museum", + "textile.museum", + "theater.museum", + "time.museum", + "timekeeping.museum", + "topology.museum", + "torino.museum", + "touch.museum", + "town.museum", + "transport.museum", + "tree.museum", + "trolley.museum", + "trust.museum", + "trustee.museum", + "uhren.museum", + "ulm.museum", + "undersea.museum", + "university.museum", + "usa.museum", + "usantiques.museum", + "usarts.museum", + "uscountryestate.museum", + "usculture.museum", + "usdecorativearts.museum", + "usgarden.museum", + "ushistory.museum", + "ushuaia.museum", + "uslivinghistory.museum", + "utah.museum", + "uvic.museum", + "valley.museum", + "vantaa.museum", + "versailles.museum", + "viking.museum", + "village.museum", + "virginia.museum", + "virtual.museum", + "virtuel.museum", + "vlaanderen.museum", + "volkenkunde.museum", + "wales.museum", + "wallonie.museum", + "war.museum", + "washingtondc.museum", + "watchandclock.museum", + "watch-and-clock.museum", + "western.museum", + "westfalen.museum", + "whaling.museum", + "wildlife.museum", + "williamsburg.museum", + "windmill.museum", + "workshop.museum", + "york.museum", + "yorkshire.museum", + "yosemite.museum", + "youth.museum", + "zoological.museum", + "zoology.museum", + "xn--9dbhblg6di.museum", + "xn--h1aegh.museum", + "mv", + "aero.mv", + "biz.mv", + "com.mv", + "coop.mv", + "edu.mv", + "gov.mv", + "info.mv", + "int.mv", + "mil.mv", + "museum.mv", + "name.mv", + "net.mv", + "org.mv", + "pro.mv", + "mw", + "ac.mw", + "biz.mw", + "co.mw", + "com.mw", + "coop.mw", + "edu.mw", + "gov.mw", + "int.mw", + "museum.mw", + "net.mw", + "org.mw", + "mx", + "com.mx", + "org.mx", + "gob.mx", + "edu.mx", + "net.mx", + "my", + "com.my", + "net.my", + "org.my", + "gov.my", + "edu.my", + "mil.my", + "name.my", + "mz", + "ac.mz", + "adv.mz", + "co.mz", + "edu.mz", + "gov.mz", + "mil.mz", + "net.mz", + "org.mz", + "na", + "info.na", + "pro.na", + "name.na", + "school.na", + "or.na", + "dr.na", + "us.na", + "mx.na", + "ca.na", + "in.na", + "cc.na", + "tv.na", + "ws.na", + "mobi.na", + "co.na", + "com.na", + "org.na", + "name", + "nc", + "asso.nc", + "nom.nc", + "ne", + "net", + "nf", + "com.nf", + "net.nf", + "per.nf", + "rec.nf", + "web.nf", + "arts.nf", + "firm.nf", + "info.nf", + "other.nf", + "store.nf", + "ng", + "com.ng", + "edu.ng", + "gov.ng", + "i.ng", + "mil.ng", + "mobi.ng", + "name.ng", + "net.ng", + "org.ng", + "sch.ng", + "ni", + "ac.ni", + "biz.ni", + "co.ni", + "com.ni", + "edu.ni", + "gob.ni", + "in.ni", + "info.ni", + "int.ni", + "mil.ni", + "net.ni", + "nom.ni", + "org.ni", + "web.ni", + "nl", + "bv.nl", + "no", + "fhs.no", + "vgs.no", + "fylkesbibl.no", + "folkebibl.no", + "museum.no", + "idrett.no", + "priv.no", + "mil.no", + "stat.no", + "dep.no", + "kommune.no", + "herad.no", + "aa.no", + "ah.no", + "bu.no", + "fm.no", + "hl.no", + "hm.no", + "jan-mayen.no", + "mr.no", + "nl.no", + "nt.no", + "of.no", + "ol.no", + "oslo.no", + "rl.no", + "sf.no", + "st.no", + "svalbard.no", + "tm.no", + "tr.no", + "va.no", + "vf.no", + "gs.aa.no", + "gs.ah.no", + "gs.bu.no", + "gs.fm.no", + "gs.hl.no", + "gs.hm.no", + "gs.jan-mayen.no", + "gs.mr.no", + "gs.nl.no", + "gs.nt.no", + "gs.of.no", + "gs.ol.no", + "gs.oslo.no", + "gs.rl.no", + "gs.sf.no", + "gs.st.no", + "gs.svalbard.no", + "gs.tm.no", + "gs.tr.no", + "gs.va.no", + "gs.vf.no", + "akrehamn.no", + "xn--krehamn-dxa.no", + "algard.no", + "xn--lgrd-poac.no", + "arna.no", + "brumunddal.no", + "bryne.no", + "bronnoysund.no", + "xn--brnnysund-m8ac.no", + "drobak.no", + "xn--drbak-wua.no", + "egersund.no", + "fetsund.no", + "floro.no", + "xn--flor-jra.no", + "fredrikstad.no", + "hokksund.no", + "honefoss.no", + "xn--hnefoss-q1a.no", + "jessheim.no", + "jorpeland.no", + "xn--jrpeland-54a.no", + "kirkenes.no", + "kopervik.no", + "krokstadelva.no", + "langevag.no", + "xn--langevg-jxa.no", + "leirvik.no", + "mjondalen.no", + "xn--mjndalen-64a.no", + "mo-i-rana.no", + "mosjoen.no", + "xn--mosjen-eya.no", + "nesoddtangen.no", + "orkanger.no", + "osoyro.no", + "xn--osyro-wua.no", + "raholt.no", + "xn--rholt-mra.no", + "sandnessjoen.no", + "xn--sandnessjen-ogb.no", + "skedsmokorset.no", + "slattum.no", + "spjelkavik.no", + "stathelle.no", + "stavern.no", + "stjordalshalsen.no", + "xn--stjrdalshalsen-sqb.no", + "tananger.no", + "tranby.no", + "vossevangen.no", + "afjord.no", + "xn--fjord-lra.no", + "agdenes.no", + "al.no", + "xn--l-1fa.no", + "alesund.no", + "xn--lesund-hua.no", + "alstahaug.no", + "alta.no", + "xn--lt-liac.no", + "alaheadju.no", + "xn--laheadju-7ya.no", + "alvdal.no", + "amli.no", + "xn--mli-tla.no", + "amot.no", + "xn--mot-tla.no", + "andebu.no", + "andoy.no", + "xn--andy-ira.no", + "andasuolo.no", + "ardal.no", + "xn--rdal-poa.no", + "aremark.no", + "arendal.no", + "xn--s-1fa.no", + "aseral.no", + "xn--seral-lra.no", + "asker.no", + "askim.no", + "askvoll.no", + "askoy.no", + "xn--asky-ira.no", + "asnes.no", + "xn--snes-poa.no", + "audnedaln.no", + "aukra.no", + "aure.no", + "aurland.no", + "aurskog-holand.no", + "xn--aurskog-hland-jnb.no", + "austevoll.no", + "austrheim.no", + "averoy.no", + "xn--avery-yua.no", + "balestrand.no", + "ballangen.no", + "balat.no", + "xn--blt-elab.no", + "balsfjord.no", + "bahccavuotna.no", + "xn--bhccavuotna-k7a.no", + "bamble.no", + "bardu.no", + "beardu.no", + "beiarn.no", + "bajddar.no", + "xn--bjddar-pta.no", + "baidar.no", + "xn--bidr-5nac.no", + "berg.no", + "bergen.no", + "berlevag.no", + "xn--berlevg-jxa.no", + "bearalvahki.no", + "xn--bearalvhki-y4a.no", + "bindal.no", + "birkenes.no", + "bjarkoy.no", + "xn--bjarky-fya.no", + "bjerkreim.no", + "bjugn.no", + "bodo.no", + "xn--bod-2na.no", + "badaddja.no", + "xn--bdddj-mrabd.no", + "budejju.no", + "bokn.no", + "bremanger.no", + "bronnoy.no", + "xn--brnny-wuac.no", + "bygland.no", + "bykle.no", + "barum.no", + "xn--brum-voa.no", + "bo.telemark.no", + "xn--b-5ga.telemark.no", + "bo.nordland.no", + "xn--b-5ga.nordland.no", + "bievat.no", + "xn--bievt-0qa.no", + "bomlo.no", + "xn--bmlo-gra.no", + "batsfjord.no", + "xn--btsfjord-9za.no", + "bahcavuotna.no", + "xn--bhcavuotna-s4a.no", + "dovre.no", + "drammen.no", + "drangedal.no", + "dyroy.no", + "xn--dyry-ira.no", + "donna.no", + "xn--dnna-gra.no", + "eid.no", + "eidfjord.no", + "eidsberg.no", + "eidskog.no", + "eidsvoll.no", + "eigersund.no", + "elverum.no", + "enebakk.no", + "engerdal.no", + "etne.no", + "etnedal.no", + "evenes.no", + "evenassi.no", + "xn--eveni-0qa01ga.no", + "evje-og-hornnes.no", + "farsund.no", + "fauske.no", + "fuossko.no", + "fuoisku.no", + "fedje.no", + "fet.no", + "finnoy.no", + "xn--finny-yua.no", + "fitjar.no", + "fjaler.no", + "fjell.no", + "flakstad.no", + "flatanger.no", + "flekkefjord.no", + "flesberg.no", + "flora.no", + "fla.no", + "xn--fl-zia.no", + "folldal.no", + "forsand.no", + "fosnes.no", + "frei.no", + "frogn.no", + "froland.no", + "frosta.no", + "frana.no", + "xn--frna-woa.no", + "froya.no", + "xn--frya-hra.no", + "fusa.no", + "fyresdal.no", + "forde.no", + "xn--frde-gra.no", + "gamvik.no", + "gangaviika.no", + "xn--ggaviika-8ya47h.no", + "gaular.no", + "gausdal.no", + "gildeskal.no", + "xn--gildeskl-g0a.no", + "giske.no", + "gjemnes.no", + "gjerdrum.no", + "gjerstad.no", + "gjesdal.no", + "gjovik.no", + "xn--gjvik-wua.no", + "gloppen.no", + "gol.no", + "gran.no", + "grane.no", + "granvin.no", + "gratangen.no", + "grimstad.no", + "grong.no", + "kraanghke.no", + "xn--kranghke-b0a.no", + "grue.no", + "gulen.no", + "hadsel.no", + "halden.no", + "halsa.no", + "hamar.no", + "hamaroy.no", + "habmer.no", + "xn--hbmer-xqa.no", + "hapmir.no", + "xn--hpmir-xqa.no", + "hammerfest.no", + "hammarfeasta.no", + "xn--hmmrfeasta-s4ac.no", + "haram.no", + "hareid.no", + "harstad.no", + "hasvik.no", + "aknoluokta.no", + "xn--koluokta-7ya57h.no", + "hattfjelldal.no", + "aarborte.no", + "haugesund.no", + "hemne.no", + "hemnes.no", + "hemsedal.no", + "heroy.more-og-romsdal.no", + "xn--hery-ira.xn--mre-og-romsdal-qqb.no", + "heroy.nordland.no", + "xn--hery-ira.nordland.no", + "hitra.no", + "hjartdal.no", + "hjelmeland.no", + "hobol.no", + "xn--hobl-ira.no", + "hof.no", + "hol.no", + "hole.no", + "holmestrand.no", + "holtalen.no", + "xn--holtlen-hxa.no", + "hornindal.no", + "horten.no", + "hurdal.no", + "hurum.no", + "hvaler.no", + "hyllestad.no", + "hagebostad.no", + "xn--hgebostad-g3a.no", + "hoyanger.no", + "xn--hyanger-q1a.no", + "hoylandet.no", + "xn--hylandet-54a.no", + "ha.no", + "xn--h-2fa.no", + "ibestad.no", + "inderoy.no", + "xn--indery-fya.no", + "iveland.no", + "jevnaker.no", + "jondal.no", + "jolster.no", + "xn--jlster-bya.no", + "karasjok.no", + "karasjohka.no", + "xn--krjohka-hwab49j.no", + "karlsoy.no", + "galsa.no", + "xn--gls-elac.no", + "karmoy.no", + "xn--karmy-yua.no", + "kautokeino.no", + "guovdageaidnu.no", + "klepp.no", + "klabu.no", + "xn--klbu-woa.no", + "kongsberg.no", + "kongsvinger.no", + "kragero.no", + "xn--krager-gya.no", + "kristiansand.no", + "kristiansund.no", + "krodsherad.no", + "xn--krdsherad-m8a.no", + "kvalsund.no", + "rahkkeravju.no", + "xn--rhkkervju-01af.no", + "kvam.no", + "kvinesdal.no", + "kvinnherad.no", + "kviteseid.no", + "kvitsoy.no", + "xn--kvitsy-fya.no", + "kvafjord.no", + "xn--kvfjord-nxa.no", + "giehtavuoatna.no", + "kvanangen.no", + "xn--kvnangen-k0a.no", + "navuotna.no", + "xn--nvuotna-hwa.no", + "kafjord.no", + "xn--kfjord-iua.no", + "gaivuotna.no", + "xn--givuotna-8ya.no", + "larvik.no", + "lavangen.no", + "lavagis.no", + "loabat.no", + "xn--loabt-0qa.no", + "lebesby.no", + "davvesiida.no", + "leikanger.no", + "leirfjord.no", + "leka.no", + "leksvik.no", + "lenvik.no", + "leangaviika.no", + "xn--leagaviika-52b.no", + "lesja.no", + "levanger.no", + "lier.no", + "lierne.no", + "lillehammer.no", + "lillesand.no", + "lindesnes.no", + "lindas.no", + "xn--linds-pra.no", + "lom.no", + "loppa.no", + "lahppi.no", + "xn--lhppi-xqa.no", + "lund.no", + "lunner.no", + "luroy.no", + "xn--lury-ira.no", + "luster.no", + "lyngdal.no", + "lyngen.no", + "ivgu.no", + "lardal.no", + "lerdal.no", + "xn--lrdal-sra.no", + "lodingen.no", + "xn--ldingen-q1a.no", + "lorenskog.no", + "xn--lrenskog-54a.no", + "loten.no", + "xn--lten-gra.no", + "malvik.no", + "masoy.no", + "xn--msy-ula0h.no", + "muosat.no", + "xn--muost-0qa.no", + "mandal.no", + "marker.no", + "marnardal.no", + "masfjorden.no", + "meland.no", + "meldal.no", + "melhus.no", + "meloy.no", + "xn--mely-ira.no", + "meraker.no", + "xn--merker-kua.no", + "moareke.no", + "xn--moreke-jua.no", + "midsund.no", + "midtre-gauldal.no", + "modalen.no", + "modum.no", + "molde.no", + "moskenes.no", + "moss.no", + "mosvik.no", + "malselv.no", + "xn--mlselv-iua.no", + "malatvuopmi.no", + "xn--mlatvuopmi-s4a.no", + "namdalseid.no", + "aejrie.no", + "namsos.no", + "namsskogan.no", + "naamesjevuemie.no", + "xn--nmesjevuemie-tcba.no", + "laakesvuemie.no", + "nannestad.no", + "narvik.no", + "narviika.no", + "naustdal.no", + "nedre-eiker.no", + "nes.akershus.no", + "nes.buskerud.no", + "nesna.no", + "nesodden.no", + "nesseby.no", + "unjarga.no", + "xn--unjrga-rta.no", + "nesset.no", + "nissedal.no", + "nittedal.no", + "nord-aurdal.no", + "nord-fron.no", + "nord-odal.no", + "norddal.no", + "nordkapp.no", + "davvenjarga.no", + "xn--davvenjrga-y4a.no", + "nordre-land.no", + "nordreisa.no", + "raisa.no", + "xn--risa-5na.no", + "nore-og-uvdal.no", + "notodden.no", + "naroy.no", + "xn--nry-yla5g.no", + "notteroy.no", + "xn--nttery-byae.no", + "odda.no", + "oksnes.no", + "xn--ksnes-uua.no", + "oppdal.no", + "oppegard.no", + "xn--oppegrd-ixa.no", + "orkdal.no", + "orland.no", + "xn--rland-uua.no", + "orskog.no", + "xn--rskog-uua.no", + "orsta.no", + "xn--rsta-fra.no", + "os.hedmark.no", + "os.hordaland.no", + "osen.no", + "osteroy.no", + "xn--ostery-fya.no", + "ostre-toten.no", + "xn--stre-toten-zcb.no", + "overhalla.no", + "ovre-eiker.no", + "xn--vre-eiker-k8a.no", + "oyer.no", + "xn--yer-zna.no", + "oygarden.no", + "xn--ygarden-p1a.no", + "oystre-slidre.no", + "xn--ystre-slidre-ujb.no", + "porsanger.no", + "porsangu.no", + "xn--porsgu-sta26f.no", + "porsgrunn.no", + "radoy.no", + "xn--rady-ira.no", + "rakkestad.no", + "rana.no", + "ruovat.no", + "randaberg.no", + "rauma.no", + "rendalen.no", + "rennebu.no", + "rennesoy.no", + "xn--rennesy-v1a.no", + "rindal.no", + "ringebu.no", + "ringerike.no", + "ringsaker.no", + "rissa.no", + "risor.no", + "xn--risr-ira.no", + "roan.no", + "rollag.no", + "rygge.no", + "ralingen.no", + "xn--rlingen-mxa.no", + "rodoy.no", + "xn--rdy-0nab.no", + "romskog.no", + "xn--rmskog-bya.no", + "roros.no", + "xn--rros-gra.no", + "rost.no", + "xn--rst-0na.no", + "royken.no", + "xn--ryken-vua.no", + "royrvik.no", + "xn--ryrvik-bya.no", + "rade.no", + "xn--rde-ula.no", + "salangen.no", + "siellak.no", + "saltdal.no", + "salat.no", + "xn--slt-elab.no", + "xn--slat-5na.no", + "samnanger.no", + "sande.more-og-romsdal.no", + "sande.xn--mre-og-romsdal-qqb.no", + "sande.vestfold.no", + "sandefjord.no", + "sandnes.no", + "sandoy.no", + "xn--sandy-yua.no", + "sarpsborg.no", + "sauda.no", + "sauherad.no", + "sel.no", + "selbu.no", + "selje.no", + "seljord.no", + "sigdal.no", + "siljan.no", + "sirdal.no", + "skaun.no", + "skedsmo.no", + "ski.no", + "skien.no", + "skiptvet.no", + "skjervoy.no", + "xn--skjervy-v1a.no", + "skierva.no", + "xn--skierv-uta.no", + "skjak.no", + "xn--skjk-soa.no", + "skodje.no", + "skanland.no", + "xn--sknland-fxa.no", + "skanit.no", + "xn--sknit-yqa.no", + "smola.no", + "xn--smla-hra.no", + "snillfjord.no", + "snasa.no", + "xn--snsa-roa.no", + "snoasa.no", + "snaase.no", + "xn--snase-nra.no", + "sogndal.no", + "sokndal.no", + "sola.no", + "solund.no", + "songdalen.no", + "sortland.no", + "spydeberg.no", + "stange.no", + "stavanger.no", + "steigen.no", + "steinkjer.no", + "stjordal.no", + "xn--stjrdal-s1a.no", + "stokke.no", + "stor-elvdal.no", + "stord.no", + "stordal.no", + "storfjord.no", + "omasvuotna.no", + "strand.no", + "stranda.no", + "stryn.no", + "sula.no", + "suldal.no", + "sund.no", + "sunndal.no", + "surnadal.no", + "sveio.no", + "svelvik.no", + "sykkylven.no", + "sogne.no", + "xn--sgne-gra.no", + "somna.no", + "xn--smna-gra.no", + "sondre-land.no", + "xn--sndre-land-0cb.no", + "sor-aurdal.no", + "xn--sr-aurdal-l8a.no", + "sor-fron.no", + "xn--sr-fron-q1a.no", + "sor-odal.no", + "xn--sr-odal-q1a.no", + "sor-varanger.no", + "xn--sr-varanger-ggb.no", + "matta-varjjat.no", + "xn--mtta-vrjjat-k7af.no", + "sorfold.no", + "xn--srfold-bya.no", + "sorreisa.no", + "xn--srreisa-q1a.no", + "sorum.no", + "xn--srum-gra.no", + "tana.no", + "deatnu.no", + "time.no", + "tingvoll.no", + "tinn.no", + "tjeldsund.no", + "dielddanuorri.no", + "tjome.no", + "xn--tjme-hra.no", + "tokke.no", + "tolga.no", + "torsken.no", + "tranoy.no", + "xn--trany-yua.no", + "tromso.no", + "xn--troms-zua.no", + "tromsa.no", + "romsa.no", + "trondheim.no", + "troandin.no", + "trysil.no", + "trana.no", + "xn--trna-woa.no", + "trogstad.no", + "xn--trgstad-r1a.no", + "tvedestrand.no", + "tydal.no", + "tynset.no", + "tysfjord.no", + "divtasvuodna.no", + "divttasvuotna.no", + "tysnes.no", + "tysvar.no", + "xn--tysvr-vra.no", + "tonsberg.no", + "xn--tnsberg-q1a.no", + "ullensaker.no", + "ullensvang.no", + "ulvik.no", + "utsira.no", + "vadso.no", + "xn--vads-jra.no", + "cahcesuolo.no", + "xn--hcesuolo-7ya35b.no", + "vaksdal.no", + "valle.no", + "vang.no", + "vanylven.no", + "vardo.no", + "xn--vard-jra.no", + "varggat.no", + "xn--vrggt-xqad.no", + "vefsn.no", + "vaapste.no", + "vega.no", + "vegarshei.no", + "xn--vegrshei-c0a.no", + "vennesla.no", + "verdal.no", + "verran.no", + "vestby.no", + "vestnes.no", + "vestre-slidre.no", + "vestre-toten.no", + "vestvagoy.no", + "xn--vestvgy-ixa6o.no", + "vevelstad.no", + "vik.no", + "vikna.no", + "vindafjord.no", + "volda.no", + "voss.no", + "varoy.no", + "xn--vry-yla5g.no", + "vagan.no", + "xn--vgan-qoa.no", + "voagat.no", + "vagsoy.no", + "xn--vgsy-qoa0j.no", + "vaga.no", + "xn--vg-yiab.no", + "valer.ostfold.no", + "xn--vler-qoa.xn--stfold-9xa.no", + "valer.hedmark.no", + "xn--vler-qoa.hedmark.no", + "*.np", + "nr", + "biz.nr", + "info.nr", + "gov.nr", + "edu.nr", + "org.nr", + "net.nr", + "com.nr", + "nu", + "nz", + "ac.nz", + "co.nz", + "cri.nz", + "geek.nz", + "gen.nz", + "govt.nz", + "health.nz", + "iwi.nz", + "kiwi.nz", + "maori.nz", + "mil.nz", + "xn--mori-qsa.nz", + "net.nz", + "org.nz", + "parliament.nz", + "school.nz", + "om", + "co.om", + "com.om", + "edu.om", + "gov.om", + "med.om", + "museum.om", + "net.om", + "org.om", + "pro.om", + "onion", + "org", + "pa", + "ac.pa", + "gob.pa", + "com.pa", + "org.pa", + "sld.pa", + "edu.pa", + "net.pa", + "ing.pa", + "abo.pa", + "med.pa", + "nom.pa", + "pe", + "edu.pe", + "gob.pe", + "nom.pe", + "mil.pe", + "org.pe", + "com.pe", + "net.pe", + "pf", + "com.pf", + "org.pf", + "edu.pf", + "*.pg", + "ph", + "com.ph", + "net.ph", + "org.ph", + "gov.ph", + "edu.ph", + "ngo.ph", + "mil.ph", + "i.ph", + "pk", + "com.pk", + "net.pk", + "edu.pk", + "org.pk", + "fam.pk", + "biz.pk", + "web.pk", + "gov.pk", + "gob.pk", + "gok.pk", + "gon.pk", + "gop.pk", + "gos.pk", + "info.pk", + "pl", + "com.pl", + "net.pl", + "org.pl", + "aid.pl", + "agro.pl", + "atm.pl", + "auto.pl", + "biz.pl", + "edu.pl", + "gmina.pl", + "gsm.pl", + "info.pl", + "mail.pl", + "miasta.pl", + "media.pl", + "mil.pl", + "nieruchomosci.pl", + "nom.pl", + "pc.pl", + "powiat.pl", + "priv.pl", + "realestate.pl", + "rel.pl", + "sex.pl", + "shop.pl", + "sklep.pl", + "sos.pl", + "szkola.pl", + "targi.pl", + "tm.pl", + "tourism.pl", + "travel.pl", + "turystyka.pl", + "gov.pl", + "ap.gov.pl", + "ic.gov.pl", + "is.gov.pl", + "us.gov.pl", + "kmpsp.gov.pl", + "kppsp.gov.pl", + "kwpsp.gov.pl", + "psp.gov.pl", + "wskr.gov.pl", + "kwp.gov.pl", + "mw.gov.pl", + "ug.gov.pl", + "um.gov.pl", + "umig.gov.pl", + "ugim.gov.pl", + "upow.gov.pl", + "uw.gov.pl", + "starostwo.gov.pl", + "pa.gov.pl", + "po.gov.pl", + "psse.gov.pl", + "pup.gov.pl", + "rzgw.gov.pl", + "sa.gov.pl", + "so.gov.pl", + "sr.gov.pl", + "wsa.gov.pl", + "sko.gov.pl", + "uzs.gov.pl", + "wiih.gov.pl", + "winb.gov.pl", + "pinb.gov.pl", + "wios.gov.pl", + "witd.gov.pl", + "wzmiuw.gov.pl", + "piw.gov.pl", + "wiw.gov.pl", + "griw.gov.pl", + "wif.gov.pl", + "oum.gov.pl", + "sdn.gov.pl", + "zp.gov.pl", + "uppo.gov.pl", + "mup.gov.pl", + "wuoz.gov.pl", + "konsulat.gov.pl", + "oirm.gov.pl", + "augustow.pl", + "babia-gora.pl", + "bedzin.pl", + "beskidy.pl", + "bialowieza.pl", + "bialystok.pl", + "bielawa.pl", + "bieszczady.pl", + "boleslawiec.pl", + "bydgoszcz.pl", + "bytom.pl", + "cieszyn.pl", + "czeladz.pl", + "czest.pl", + "dlugoleka.pl", + "elblag.pl", + "elk.pl", + "glogow.pl", + "gniezno.pl", + "gorlice.pl", + "grajewo.pl", + "ilawa.pl", + "jaworzno.pl", + "jelenia-gora.pl", + "jgora.pl", + "kalisz.pl", + "kazimierz-dolny.pl", + "karpacz.pl", + "kartuzy.pl", + "kaszuby.pl", + "katowice.pl", + "kepno.pl", + "ketrzyn.pl", + "klodzko.pl", + "kobierzyce.pl", + "kolobrzeg.pl", + "konin.pl", + "konskowola.pl", + "kutno.pl", + "lapy.pl", + "lebork.pl", + "legnica.pl", + "lezajsk.pl", + "limanowa.pl", + "lomza.pl", + "lowicz.pl", + "lubin.pl", + "lukow.pl", + "malbork.pl", + "malopolska.pl", + "mazowsze.pl", + "mazury.pl", + "mielec.pl", + "mielno.pl", + "mragowo.pl", + "naklo.pl", + "nowaruda.pl", + "nysa.pl", + "olawa.pl", + "olecko.pl", + "olkusz.pl", + "olsztyn.pl", + "opoczno.pl", + "opole.pl", + "ostroda.pl", + "ostroleka.pl", + "ostrowiec.pl", + "ostrowwlkp.pl", + "pila.pl", + "pisz.pl", + "podhale.pl", + "podlasie.pl", + "polkowice.pl", + "pomorze.pl", + "pomorskie.pl", + "prochowice.pl", + "pruszkow.pl", + "przeworsk.pl", + "pulawy.pl", + "radom.pl", + "rawa-maz.pl", + "rybnik.pl", + "rzeszow.pl", + "sanok.pl", + "sejny.pl", + "slask.pl", + "slupsk.pl", + "sosnowiec.pl", + "stalowa-wola.pl", + "skoczow.pl", + "starachowice.pl", + "stargard.pl", + "suwalki.pl", + "swidnica.pl", + "swiebodzin.pl", + "swinoujscie.pl", + "szczecin.pl", + "szczytno.pl", + "tarnobrzeg.pl", + "tgory.pl", + "turek.pl", + "tychy.pl", + "ustka.pl", + "walbrzych.pl", + "warmia.pl", + "warszawa.pl", + "waw.pl", + "wegrow.pl", + "wielun.pl", + "wlocl.pl", + "wloclawek.pl", + "wodzislaw.pl", + "wolomin.pl", + "wroclaw.pl", + "zachpomor.pl", + "zagan.pl", + "zarow.pl", + "zgora.pl", + "zgorzelec.pl", + "pm", + "pn", + "gov.pn", + "co.pn", + "org.pn", + "edu.pn", + "net.pn", + "post", + "pr", + "com.pr", + "net.pr", + "org.pr", + "gov.pr", + "edu.pr", + "isla.pr", + "pro.pr", + "biz.pr", + "info.pr", + "name.pr", + "est.pr", + "prof.pr", + "ac.pr", + "pro", + "aaa.pro", + "aca.pro", + "acct.pro", + "avocat.pro", + "bar.pro", + "cpa.pro", + "eng.pro", + "jur.pro", + "law.pro", + "med.pro", + "recht.pro", + "ps", + "edu.ps", + "gov.ps", + "sec.ps", + "plo.ps", + "com.ps", + "org.ps", + "net.ps", + "pt", + "net.pt", + "gov.pt", + "org.pt", + "edu.pt", + "int.pt", + "publ.pt", + "com.pt", + "nome.pt", + "pw", + "co.pw", + "ne.pw", + "or.pw", + "ed.pw", + "go.pw", + "belau.pw", + "py", + "com.py", + "coop.py", + "edu.py", + "gov.py", + "mil.py", + "net.py", + "org.py", + "qa", + "com.qa", + "edu.qa", + "gov.qa", + "mil.qa", + "name.qa", + "net.qa", + "org.qa", + "sch.qa", + "re", + "asso.re", + "com.re", + "nom.re", + "ro", + "arts.ro", + "com.ro", + "firm.ro", + "info.ro", + "nom.ro", + "nt.ro", + "org.ro", + "rec.ro", + "store.ro", + "tm.ro", + "www.ro", + "rs", + "ac.rs", + "co.rs", + "edu.rs", + "gov.rs", + "in.rs", + "org.rs", + "ru", + "ac.ru", + "edu.ru", + "gov.ru", + "int.ru", + "mil.ru", + "test.ru", + "rw", + "gov.rw", + "net.rw", + "edu.rw", + "ac.rw", + "com.rw", + "co.rw", + "int.rw", + "mil.rw", + "gouv.rw", + "sa", + "com.sa", + "net.sa", + "org.sa", + "gov.sa", + "med.sa", + "pub.sa", + "edu.sa", + "sch.sa", + "sb", + "com.sb", + "edu.sb", + "gov.sb", + "net.sb", + "org.sb", + "sc", + "com.sc", + "gov.sc", + "net.sc", + "org.sc", + "edu.sc", + "sd", + "com.sd", + "net.sd", + "org.sd", + "edu.sd", + "med.sd", + "tv.sd", + "gov.sd", + "info.sd", + "se", + "a.se", + "ac.se", + "b.se", + "bd.se", + "brand.se", + "c.se", + "d.se", + "e.se", + "f.se", + "fh.se", + "fhsk.se", + "fhv.se", + "g.se", + "h.se", + "i.se", + "k.se", + "komforb.se", + "kommunalforbund.se", + "komvux.se", + "l.se", + "lanbib.se", + "m.se", + "n.se", + "naturbruksgymn.se", + "o.se", + "org.se", + "p.se", + "parti.se", + "pp.se", + "press.se", + "r.se", + "s.se", + "t.se", + "tm.se", + "u.se", + "w.se", + "x.se", + "y.se", + "z.se", + "sg", + "com.sg", + "net.sg", + "org.sg", + "gov.sg", + "edu.sg", + "per.sg", + "sh", + "com.sh", + "net.sh", + "gov.sh", + "org.sh", + "mil.sh", + "si", + "sj", + "sk", + "sl", + "com.sl", + "net.sl", + "edu.sl", + "gov.sl", + "org.sl", + "sm", + "sn", + "art.sn", + "com.sn", + "edu.sn", + "gouv.sn", + "org.sn", + "perso.sn", + "univ.sn", + "so", + "com.so", + "net.so", + "org.so", + "sr", + "st", + "co.st", + "com.st", + "consulado.st", + "edu.st", + "embaixada.st", + "gov.st", + "mil.st", + "net.st", + "org.st", + "principe.st", + "saotome.st", + "store.st", + "su", + "sv", + "com.sv", + "edu.sv", + "gob.sv", + "org.sv", + "red.sv", + "sx", + "gov.sx", + "sy", + "edu.sy", + "gov.sy", + "net.sy", + "mil.sy", + "com.sy", + "org.sy", + "sz", + "co.sz", + "ac.sz", + "org.sz", + "tc", + "td", + "tel", + "tf", + "tg", + "th", + "ac.th", + "co.th", + "go.th", + "in.th", + "mi.th", + "net.th", + "or.th", + "tj", + "ac.tj", + "biz.tj", + "co.tj", + "com.tj", + "edu.tj", + "go.tj", + "gov.tj", + "int.tj", + "mil.tj", + "name.tj", + "net.tj", + "nic.tj", + "org.tj", + "test.tj", + "web.tj", + "tk", + "tl", + "gov.tl", + "tm", + "com.tm", + "co.tm", + "org.tm", + "net.tm", + "nom.tm", + "gov.tm", + "mil.tm", + "edu.tm", + "tn", + "com.tn", + "ens.tn", + "fin.tn", + "gov.tn", + "ind.tn", + "intl.tn", + "nat.tn", + "net.tn", + "org.tn", + "info.tn", + "perso.tn", + "tourism.tn", + "edunet.tn", + "rnrt.tn", + "rns.tn", + "rnu.tn", + "mincom.tn", + "agrinet.tn", + "defense.tn", + "turen.tn", + "to", + "com.to", + "gov.to", + "net.to", + "org.to", + "edu.to", + "mil.to", + "tr", + "com.tr", + "info.tr", + "biz.tr", + "net.tr", + "org.tr", + "web.tr", + "gen.tr", + "tv.tr", + "av.tr", + "dr.tr", + "bbs.tr", + "name.tr", + "tel.tr", + "gov.tr", + "bel.tr", + "pol.tr", + "mil.tr", + "k12.tr", + "edu.tr", + "kep.tr", + "nc.tr", + "gov.nc.tr", + "travel", + "tt", + "co.tt", + "com.tt", + "org.tt", + "net.tt", + "biz.tt", + "info.tt", + "pro.tt", + "int.tt", + "coop.tt", + "jobs.tt", + "mobi.tt", + "travel.tt", + "museum.tt", + "aero.tt", + "name.tt", + "gov.tt", + "edu.tt", + "tv", + "tw", + "edu.tw", + "gov.tw", + "mil.tw", + "com.tw", + "net.tw", + "org.tw", + "idv.tw", + "game.tw", + "ebiz.tw", + "club.tw", + "xn--zf0ao64a.tw", + "xn--uc0atv.tw", + "xn--czrw28b.tw", + "tz", + "ac.tz", + "co.tz", + "go.tz", + "hotel.tz", + "info.tz", + "me.tz", + "mil.tz", + "mobi.tz", + "ne.tz", + "or.tz", + "sc.tz", + "tv.tz", + "ua", + "com.ua", + "edu.ua", + "gov.ua", + "in.ua", + "net.ua", + "org.ua", + "cherkassy.ua", + "cherkasy.ua", + "chernigov.ua", + "chernihiv.ua", + "chernivtsi.ua", + "chernovtsy.ua", + "ck.ua", + "cn.ua", + "cr.ua", + "crimea.ua", + "cv.ua", + "dn.ua", + "dnepropetrovsk.ua", + "dnipropetrovsk.ua", + "dominic.ua", + "donetsk.ua", + "dp.ua", + "if.ua", + "ivano-frankivsk.ua", + "kh.ua", + "kharkiv.ua", + "kharkov.ua", + "kherson.ua", + "khmelnitskiy.ua", + "khmelnytskyi.ua", + "kiev.ua", + "kirovograd.ua", + "km.ua", + "kr.ua", + "krym.ua", + "ks.ua", + "kv.ua", + "kyiv.ua", + "lg.ua", + "lt.ua", + "lugansk.ua", + "lutsk.ua", + "lv.ua", + "lviv.ua", + "mk.ua", + "mykolaiv.ua", + "nikolaev.ua", + "od.ua", + "odesa.ua", + "odessa.ua", + "pl.ua", + "poltava.ua", + "rivne.ua", + "rovno.ua", + "rv.ua", + "sb.ua", + "sebastopol.ua", + "sevastopol.ua", + "sm.ua", + "sumy.ua", + "te.ua", + "ternopil.ua", + "uz.ua", + "uzhgorod.ua", + "vinnica.ua", + "vinnytsia.ua", + "vn.ua", + "volyn.ua", + "yalta.ua", + "zaporizhzhe.ua", + "zaporizhzhia.ua", + "zhitomir.ua", + "zhytomyr.ua", + "zp.ua", + "zt.ua", + "ug", + "co.ug", + "or.ug", + "ac.ug", + "sc.ug", + "go.ug", + "ne.ug", + "com.ug", + "org.ug", + "uk", + "ac.uk", + "co.uk", + "gov.uk", + "ltd.uk", + "me.uk", + "net.uk", + "nhs.uk", + "org.uk", + "plc.uk", + "police.uk", + "*.sch.uk", + "us", + "dni.us", + "fed.us", + "isa.us", + "kids.us", + "nsn.us", + "ak.us", + "al.us", + "ar.us", + "as.us", + "az.us", + "ca.us", + "co.us", + "ct.us", + "dc.us", + "de.us", + "fl.us", + "ga.us", + "gu.us", + "hi.us", + "ia.us", + "id.us", + "il.us", + "in.us", + "ks.us", + "ky.us", + "la.us", + "ma.us", + "md.us", + "me.us", + "mi.us", + "mn.us", + "mo.us", + "ms.us", + "mt.us", + "nc.us", + "nd.us", + "ne.us", + "nh.us", + "nj.us", + "nm.us", + "nv.us", + "ny.us", + "oh.us", + "ok.us", + "or.us", + "pa.us", + "pr.us", + "ri.us", + "sc.us", + "sd.us", + "tn.us", + "tx.us", + "ut.us", + "vi.us", + "vt.us", + "va.us", + "wa.us", + "wi.us", + "wv.us", + "wy.us", + "k12.ak.us", + "k12.al.us", + "k12.ar.us", + "k12.as.us", + "k12.az.us", + "k12.ca.us", + "k12.co.us", + "k12.ct.us", + "k12.dc.us", + "k12.de.us", + "k12.fl.us", + "k12.ga.us", + "k12.gu.us", + "k12.ia.us", + "k12.id.us", + "k12.il.us", + "k12.in.us", + "k12.ks.us", + "k12.ky.us", + "k12.la.us", + "k12.ma.us", + "k12.md.us", + "k12.me.us", + "k12.mi.us", + "k12.mn.us", + "k12.mo.us", + "k12.ms.us", + "k12.mt.us", + "k12.nc.us", + "k12.ne.us", + "k12.nh.us", + "k12.nj.us", + "k12.nm.us", + "k12.nv.us", + "k12.ny.us", + "k12.oh.us", + "k12.ok.us", + "k12.or.us", + "k12.pa.us", + "k12.pr.us", + "k12.ri.us", + "k12.sc.us", + "k12.tn.us", + "k12.tx.us", + "k12.ut.us", + "k12.vi.us", + "k12.vt.us", + "k12.va.us", + "k12.wa.us", + "k12.wi.us", + "k12.wy.us", + "cc.ak.us", + "cc.al.us", + "cc.ar.us", + "cc.as.us", + "cc.az.us", + "cc.ca.us", + "cc.co.us", + "cc.ct.us", + "cc.dc.us", + "cc.de.us", + "cc.fl.us", + "cc.ga.us", + "cc.gu.us", + "cc.hi.us", + "cc.ia.us", + "cc.id.us", + "cc.il.us", + "cc.in.us", + "cc.ks.us", + "cc.ky.us", + "cc.la.us", + "cc.ma.us", + "cc.md.us", + "cc.me.us", + "cc.mi.us", + "cc.mn.us", + "cc.mo.us", + "cc.ms.us", + "cc.mt.us", + "cc.nc.us", + "cc.nd.us", + "cc.ne.us", + "cc.nh.us", + "cc.nj.us", + "cc.nm.us", + "cc.nv.us", + "cc.ny.us", + "cc.oh.us", + "cc.ok.us", + "cc.or.us", + "cc.pa.us", + "cc.pr.us", + "cc.ri.us", + "cc.sc.us", + "cc.sd.us", + "cc.tn.us", + "cc.tx.us", + "cc.ut.us", + "cc.vi.us", + "cc.vt.us", + "cc.va.us", + "cc.wa.us", + "cc.wi.us", + "cc.wv.us", + "cc.wy.us", + "lib.ak.us", + "lib.al.us", + "lib.ar.us", + "lib.as.us", + "lib.az.us", + "lib.ca.us", + "lib.co.us", + "lib.ct.us", + "lib.dc.us", + "lib.fl.us", + "lib.ga.us", + "lib.gu.us", + "lib.hi.us", + "lib.ia.us", + "lib.id.us", + "lib.il.us", + "lib.in.us", + "lib.ks.us", + "lib.ky.us", + "lib.la.us", + "lib.ma.us", + "lib.md.us", + "lib.me.us", + "lib.mi.us", + "lib.mn.us", + "lib.mo.us", + "lib.ms.us", + "lib.mt.us", + "lib.nc.us", + "lib.nd.us", + "lib.ne.us", + "lib.nh.us", + "lib.nj.us", + "lib.nm.us", + "lib.nv.us", + "lib.ny.us", + "lib.oh.us", + "lib.ok.us", + "lib.or.us", + "lib.pa.us", + "lib.pr.us", + "lib.ri.us", + "lib.sc.us", + "lib.sd.us", + "lib.tn.us", + "lib.tx.us", + "lib.ut.us", + "lib.vi.us", + "lib.vt.us", + "lib.va.us", + "lib.wa.us", + "lib.wi.us", + "lib.wy.us", + "pvt.k12.ma.us", + "chtr.k12.ma.us", + "paroch.k12.ma.us", + "ann-arbor.mi.us", + "cog.mi.us", + "dst.mi.us", + "eaton.mi.us", + "gen.mi.us", + "mus.mi.us", + "tec.mi.us", + "washtenaw.mi.us", + "uy", + "com.uy", + "edu.uy", + "gub.uy", + "mil.uy", + "net.uy", + "org.uy", + "uz", + "co.uz", + "com.uz", + "net.uz", + "org.uz", + "va", + "vc", + "com.vc", + "net.vc", + "org.vc", + "gov.vc", + "mil.vc", + "edu.vc", + "ve", + "arts.ve", + "co.ve", + "com.ve", + "e12.ve", + "edu.ve", + "firm.ve", + "gob.ve", + "gov.ve", + "info.ve", + "int.ve", + "mil.ve", + "net.ve", + "org.ve", + "rec.ve", + "store.ve", + "tec.ve", + "web.ve", + "vg", + "vi", + "co.vi", + "com.vi", + "k12.vi", + "net.vi", + "org.vi", + "vn", + "com.vn", + "net.vn", + "org.vn", + "edu.vn", + "gov.vn", + "int.vn", + "ac.vn", + "biz.vn", + "info.vn", + "name.vn", + "pro.vn", + "health.vn", + "vu", + "com.vu", + "edu.vu", + "net.vu", + "org.vu", + "wf", + "ws", + "com.ws", + "net.ws", + "org.ws", + "gov.ws", + "edu.ws", + "yt", + "xn--mgbaam7a8h", + "xn--y9a3aq", + "xn--54b7fta0cc", + "xn--90ae", + "xn--90ais", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--lgbbat1ad8j", + "xn--wgbh1c", + "xn--e1a4c", + "xn--node", + "xn--qxam", + "xn--j6w193g", + "xn--2scrj9c", + "xn--3hcrj9c", + "xn--45br5cyl", + "xn--h2breg3eve", + "xn--h2brj9c8c", + "xn--mgbgu82a", + "xn--rvc1e0am3e", + "xn--h2brj9c", + "xn--mgbbh1a71e", + "xn--fpcrj9c3d", + "xn--gecrj9c", + "xn--s9brj9c", + "xn--45brj9c", + "xn--xkc2dl3a5ee0h", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgbtx2b", + "xn--mgbayh7gpa", + "xn--3e0b707e", + "xn--80ao21a", + "xn--fzc2c9e2c", + "xn--xkc2al3hye2a", + "xn--mgbc0a9azcg", + "xn--d1alf", + "xn--l1acc", + "xn--mix891f", + "xn--mix082f", + "xn--mgbx4cd0ab", + "xn--mgb9awbf", + "xn--mgbai9azgqp6j", + "xn--mgbai9a5eva00b", + "xn--ygbi2ammx", + "xn--90a3ac", + "xn--o1ac.xn--90a3ac", + "xn--c1avg.xn--90a3ac", + "xn--90azh.xn--90a3ac", + "xn--d1at.xn--90a3ac", + "xn--o1ach.xn--90a3ac", + "xn--80au.xn--90a3ac", + "xn--p1ai", + "xn--wgbl6a", + "xn--mgberp4a5d4ar", + "xn--mgberp4a5d4a87g", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbpl2fh", + "xn--yfro4i67o", + "xn--clchc0ea0b2g2a9gcd", + "xn--ogbpf8fl", + "xn--mgbtf8fl", + "xn--o3cw4h", + "xn--12c1fe0br.xn--o3cw4h", + "xn--12co0c3b4eva.xn--o3cw4h", + "xn--h3cuzk1di.xn--o3cw4h", + "xn--o3cyx2a.xn--o3cw4h", + "xn--m3ch0j3a.xn--o3cw4h", + "xn--12cfi8ixb8l.xn--o3cw4h", + "xn--pgbs0dh", + "xn--kpry57d", + "xn--kprw13d", + "xn--nnx388a", + "xn--j1amh", + "xn--mgb2ddes", + "xxx", + "*.ye", + "ac.za", + "agric.za", + "alt.za", + "co.za", + "edu.za", + "gov.za", + "grondar.za", + "law.za", + "mil.za", + "net.za", + "ngo.za", + "nis.za", + "nom.za", + "org.za", + "school.za", + "tm.za", + "web.za", + "zm", + "ac.zm", + "biz.zm", + "co.zm", + "com.zm", + "edu.zm", + "gov.zm", + "info.zm", + "mil.zm", + "net.zm", + "org.zm", + "sch.zm", + "zw", + "ac.zw", + "co.zw", + "gov.zw", + "mil.zw", + "org.zw", + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "adac", + "ads", + "adult", + "aeg", + "aetna", + "afamilycompany", + "afl", + "africa", + "agakhan", + "agency", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "aol", + "apartments", + "app", + "apple", + "aquarelle", + "arab", + "aramco", + "archi", + "army", + "art", + "arte", + "asda", + "associates", + "athleta", + "attorney", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aws", + "axa", + "azure", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bharti", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bms", + "bmw", + "bnl", + "bnpparibas", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bzh", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "ceb", + "center", + "ceo", + "cern", + "cfa", + "cfd", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "christmas", + "chrome", + "chrysler", + "church", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "coach", + "codes", + "coffee", + "college", + "cologne", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cuisinella", + "cymru", + "cyou", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dnp", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "earth", + "eat", + "eco", + "edeka", + "education", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "ericsson", + "erni", + "esq", + "estate", + "esurance", + "etisalat", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gbiz", + "gdn", + "gea", + "gent", + "genting", + "george", + "ggee", + "gift", + "gifts", + "gives", + "giving", + "glade", + "glass", + "gle", + "global", + "globo", + "gmail", + "gmbh", + "gmo", + "gmx", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hkt", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hsbc", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "ieee", + "ifm", + "ikano", + "imamat", + "imdb", + "immo", + "immobilien", + "industries", + "infiniti", + "ing", + "ink", + "institute", + "insurance", + "insure", + "intel", + "international", + "intuit", + "investments", + "ipiranga", + "irish", + "iselect", + "ismaili", + "ist", + "istanbul", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jmp", + "jnj", + "joburg", + "jot", + "joy", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "koeln", + "komatsu", + "kosher", + "kpmg", + "kpn", + "krd", + "kred", + "kuokgroup", + "kyoto", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "ltd", + "ltda", + "lundbeck", + "lupin", + "luxe", + "luxury", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mckinsey", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "miami", + "microsoft", + "mini", + "mint", + "mit", + "mitsubishi", + "mlb", + "mls", + "mma", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "msd", + "mtn", + "mtpc", + "mtr", + "mutual", + "nab", + "nadex", + "nagoya", + "nationwide", + "natura", + "navy", + "nba", + "nec", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nfl", + "ngo", + "nhk", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "nra", + "nrw", + "ntt", + "nyc", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "page", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pet", + "pfizer", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "pramerica", + "praxi", + "press", + "prime", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "pub", + "pwc", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rsvp", + "rugby", + "ruhr", + "run", + "rwe", + "ryukyu", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sbi", + "sbs", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "silk", + "sina", + "singles", + "site", + "ski", + "skin", + "sky", + "skype", + "sling", + "smart", + "smile", + "sncf", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "srl", + "srt", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "swatch", + "swiftcover", + "swiss", + "sydney", + "symantec", + "systems", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tci", + "tdk", + "team", + "tech", + "technology", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tjmaxx", + "tjx", + "tkmaxx", + "tmall", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "trade", + "trading", + "training", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tube", + "tui", + "tunes", + "tushu", + "tvs", + "ubank", + "ubs", + "uconnect", + "unicom", + "university", + "uno", + "uol", + "ups", + "vacations", + "vana", + "vanguard", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45q11c", + "xn--4gbrim", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fjq720a", + "xn--flw351e", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gk3at1e", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kpu716f", + "xn--kput3i", + "xn--mgba3a3ejt", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbab2bd", + "xn--mgbb9fbpob", + "xn--mgbca7dzdo", + "xn--mgbi4ecexp", + "xn--mgbt3dhd", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--p1acf", + "xn--pbt977c", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--rhqv96g", + "xn--rovu88b", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--xhq521b", + "xn--zfr164b", + "xperia", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yun", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zone", + "zuerich", + "cc.ua", + "inf.ua", + "ltd.ua", + "1password.ca", + "1password.com", + "1password.eu", + "beep.pl", + "*.compute.estate", + "*.alces.network", + "alwaysdata.net", + "cloudfront.net", + "*.compute.amazonaws.com", + "*.compute-1.amazonaws.com", + "*.compute.amazonaws.com.cn", + "us-east-1.amazonaws.com", + "cn-north-1.eb.amazonaws.com.cn", + "elasticbeanstalk.com", + "ap-northeast-1.elasticbeanstalk.com", + "ap-northeast-2.elasticbeanstalk.com", + "ap-south-1.elasticbeanstalk.com", + "ap-southeast-1.elasticbeanstalk.com", + "ap-southeast-2.elasticbeanstalk.com", + "ca-central-1.elasticbeanstalk.com", + "eu-central-1.elasticbeanstalk.com", + "eu-west-1.elasticbeanstalk.com", + "eu-west-2.elasticbeanstalk.com", + "eu-west-3.elasticbeanstalk.com", + "sa-east-1.elasticbeanstalk.com", + "us-east-1.elasticbeanstalk.com", + "us-east-2.elasticbeanstalk.com", + "us-gov-west-1.elasticbeanstalk.com", + "us-west-1.elasticbeanstalk.com", + "us-west-2.elasticbeanstalk.com", + "*.elb.amazonaws.com", + "*.elb.amazonaws.com.cn", + "s3.amazonaws.com", + "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", + "s3-ap-south-1.amazonaws.com", + "s3-ap-southeast-1.amazonaws.com", + "s3-ap-southeast-2.amazonaws.com", + "s3-ca-central-1.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", + "s3-eu-west-2.amazonaws.com", + "s3-eu-west-3.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", + "s3-sa-east-1.amazonaws.com", + "s3-us-gov-west-1.amazonaws.com", + "s3-us-east-2.amazonaws.com", + "s3-us-west-1.amazonaws.com", + "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", + "s3.ap-south-1.amazonaws.com", + "s3.cn-north-1.amazonaws.com.cn", + "s3.ca-central-1.amazonaws.com", + "s3.eu-central-1.amazonaws.com", + "s3.eu-west-2.amazonaws.com", + "s3.eu-west-3.amazonaws.com", + "s3.us-east-2.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + "s3.dualstack.eu-west-3.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + "s3-website-us-east-1.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ca-central-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website.eu-west-2.amazonaws.com", + "s3-website.eu-west-3.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "t3l3p0rt.net", + "tele.amune.org", + "on-aptible.com", + "user.party.eus", + "pimienta.org", + "poivron.org", + "potager.org", + "sweetpepper.org", + "myasustor.com", + "myfritz.net", + "*.awdev.ca", + "*.advisor.ws", + "backplaneapp.io", + "betainabox.com", + "bnr.la", + "boomla.net", + "boxfuse.io", + "square7.ch", + "bplaced.com", + "bplaced.de", + "square7.de", + "bplaced.net", + "square7.net", + "browsersafetymark.io", + "mycd.eu", + "ae.org", + "ar.com", + "br.com", + "cn.com", + "com.de", + "com.se", + "de.com", + "eu.com", + "gb.com", + "gb.net", + "hu.com", + "hu.net", + "jp.net", + "jpn.com", + "kr.com", + "mex.com", + "no.com", + "qc.com", + "ru.com", + "sa.com", + "se.com", + "se.net", + "uk.com", + "uk.net", + "us.com", + "uy.com", + "za.bz", + "za.com", + "africa.com", + "gr.com", + "in.net", + "us.org", + "co.com", + "c.la", + "certmgr.org", + "xenapponazure.com", + "virtueeldomein.nl", + "c66.me", + "jdevcloud.com", + "wpdevcloud.com", + "cloudaccess.host", + "freesite.host", + "cloudaccess.net", + "cloudcontrolled.com", + "cloudcontrolapp.com", + "co.ca", + "co.cz", + "c.cdn77.org", + "cdn77-ssl.net", + "r.cdn77.net", + "rsc.cdn77.org", + "ssl.origin.cdn77-secure.org", + "cloudns.asia", + "cloudns.biz", + "cloudns.club", + "cloudns.cc", + "cloudns.eu", + "cloudns.in", + "cloudns.info", + "cloudns.org", + "cloudns.pro", + "cloudns.pw", + "cloudns.us", + "co.nl", + "co.no", + "webhosting.be", + "hosting-cluster.nl", + "dyn.cosidns.de", + "dynamisches-dns.de", + "dnsupdater.de", + "internet-dns.de", + "l-o-g-i-n.de", + "dynamic-dns.info", + "feste-ip.net", + "knx-server.net", + "static-access.net", + "realm.cz", + "*.cryptonomic.net", + "cupcake.is", + "cyon.link", + "cyon.site", + "daplie.me", + "localhost.daplie.me", + "biz.dk", + "co.dk", + "firm.dk", + "reg.dk", + "store.dk", + "debian.net", + "dedyn.io", + "dnshome.de", + "drayddns.com", + "dreamhosters.com", + "mydrobo.com", + "drud.io", + "drud.us", + "duckdns.org", + "dy.fi", + "tunk.org", + "dyndns-at-home.com", + "dyndns-at-work.com", + "dyndns-blog.com", + "dyndns-free.com", + "dyndns-home.com", + "dyndns-ip.com", + "dyndns-mail.com", + "dyndns-office.com", + "dyndns-pics.com", + "dyndns-remote.com", + "dyndns-server.com", + "dyndns-web.com", + "dyndns-wiki.com", + "dyndns-work.com", + "dyndns.biz", + "dyndns.info", + "dyndns.org", + "dyndns.tv", + "at-band-camp.net", + "ath.cx", + "barrel-of-knowledge.info", + "barrell-of-knowledge.info", + "better-than.tv", + "blogdns.com", + "blogdns.net", + "blogdns.org", + "blogsite.org", + "boldlygoingnowhere.org", + "broke-it.net", + "buyshouses.net", + "cechire.com", + "dnsalias.com", + "dnsalias.net", + "dnsalias.org", + "dnsdojo.com", + "dnsdojo.net", + "dnsdojo.org", + "does-it.net", + "doesntexist.com", + "doesntexist.org", + "dontexist.com", + "dontexist.net", + "dontexist.org", + "doomdns.com", + "doomdns.org", + "dvrdns.org", + "dyn-o-saur.com", + "dynalias.com", + "dynalias.net", + "dynalias.org", + "dynathome.net", + "dyndns.ws", + "endofinternet.net", + "endofinternet.org", + "endoftheinternet.org", + "est-a-la-maison.com", + "est-a-la-masion.com", + "est-le-patron.com", + "est-mon-blogueur.com", + "for-better.biz", + "for-more.biz", + "for-our.info", + "for-some.biz", + "for-the.biz", + "forgot.her.name", + "forgot.his.name", + "from-ak.com", + "from-al.com", + "from-ar.com", + "from-az.net", + "from-ca.com", + "from-co.net", + "from-ct.com", + "from-dc.com", + "from-de.com", + "from-fl.com", + "from-ga.com", + "from-hi.com", + "from-ia.com", + "from-id.com", + "from-il.com", + "from-in.com", + "from-ks.com", + "from-ky.com", + "from-la.net", + "from-ma.com", + "from-md.com", + "from-me.org", + "from-mi.com", + "from-mn.com", + "from-mo.com", + "from-ms.com", + "from-mt.com", + "from-nc.com", + "from-nd.com", + "from-ne.com", + "from-nh.com", + "from-nj.com", + "from-nm.com", + "from-nv.com", + "from-ny.net", + "from-oh.com", + "from-ok.com", + "from-or.com", + "from-pa.com", + "from-pr.com", + "from-ri.com", + "from-sc.com", + "from-sd.com", + "from-tn.com", + "from-tx.com", + "from-ut.com", + "from-va.com", + "from-vt.com", + "from-wa.com", + "from-wi.com", + "from-wv.com", + "from-wy.com", + "ftpaccess.cc", + "fuettertdasnetz.de", + "game-host.org", + "game-server.cc", + "getmyip.com", + "gets-it.net", + "go.dyndns.org", + "gotdns.com", + "gotdns.org", + "groks-the.info", + "groks-this.info", + "ham-radio-op.net", + "here-for-more.info", + "hobby-site.com", + "hobby-site.org", + "home.dyndns.org", + "homedns.org", + "homeftp.net", + "homeftp.org", + "homeip.net", + "homelinux.com", + "homelinux.net", + "homelinux.org", + "homeunix.com", + "homeunix.net", + "homeunix.org", + "iamallama.com", + "in-the-band.net", + "is-a-anarchist.com", + "is-a-blogger.com", + "is-a-bookkeeper.com", + "is-a-bruinsfan.org", + "is-a-bulls-fan.com", + "is-a-candidate.org", + "is-a-caterer.com", + "is-a-celticsfan.org", + "is-a-chef.com", + "is-a-chef.net", + "is-a-chef.org", + "is-a-conservative.com", + "is-a-cpa.com", + "is-a-cubicle-slave.com", + "is-a-democrat.com", + "is-a-designer.com", + "is-a-doctor.com", + "is-a-financialadvisor.com", + "is-a-geek.com", + "is-a-geek.net", + "is-a-geek.org", + "is-a-green.com", + "is-a-guru.com", + "is-a-hard-worker.com", + "is-a-hunter.com", + "is-a-knight.org", + "is-a-landscaper.com", + "is-a-lawyer.com", + "is-a-liberal.com", + "is-a-libertarian.com", + "is-a-linux-user.org", + "is-a-llama.com", + "is-a-musician.com", + "is-a-nascarfan.com", + "is-a-nurse.com", + "is-a-painter.com", + "is-a-patsfan.org", + "is-a-personaltrainer.com", + "is-a-photographer.com", + "is-a-player.com", + "is-a-republican.com", + "is-a-rockstar.com", + "is-a-socialist.com", + "is-a-soxfan.org", + "is-a-student.com", + "is-a-teacher.com", + "is-a-techie.com", + "is-a-therapist.com", + "is-an-accountant.com", + "is-an-actor.com", + "is-an-actress.com", + "is-an-anarchist.com", + "is-an-artist.com", + "is-an-engineer.com", + "is-an-entertainer.com", + "is-by.us", + "is-certified.com", + "is-found.org", + "is-gone.com", + "is-into-anime.com", + "is-into-cars.com", + "is-into-cartoons.com", + "is-into-games.com", + "is-leet.com", + "is-lost.org", + "is-not-certified.com", + "is-saved.org", + "is-slick.com", + "is-uberleet.com", + "is-very-bad.org", + "is-very-evil.org", + "is-very-good.org", + "is-very-nice.org", + "is-very-sweet.org", + "is-with-theband.com", + "isa-geek.com", + "isa-geek.net", + "isa-geek.org", + "isa-hockeynut.com", + "issmarterthanyou.com", + "isteingeek.de", + "istmein.de", + "kicks-ass.net", + "kicks-ass.org", + "knowsitall.info", + "land-4-sale.us", + "lebtimnetz.de", + "leitungsen.de", + "likes-pie.com", + "likescandy.com", + "merseine.nu", + "mine.nu", + "misconfused.org", + "mypets.ws", + "myphotos.cc", + "neat-url.com", + "office-on-the.net", + "on-the-web.tv", + "podzone.net", + "podzone.org", + "readmyblog.org", + "saves-the-whales.com", + "scrapper-site.net", + "scrapping.cc", + "selfip.biz", + "selfip.com", + "selfip.info", + "selfip.net", + "selfip.org", + "sells-for-less.com", + "sells-for-u.com", + "sells-it.net", + "sellsyourhome.org", + "servebbs.com", + "servebbs.net", + "servebbs.org", + "serveftp.net", + "serveftp.org", + "servegame.org", + "shacknet.nu", + "simple-url.com", + "space-to-rent.com", + "stuff-4-sale.org", + "stuff-4-sale.us", + "teaches-yoga.com", + "thruhere.net", + "traeumtgerade.de", + "webhop.biz", + "webhop.info", + "webhop.net", + "webhop.org", + "worse-than.tv", + "writesthisblog.com", + "ddnss.de", + "dyn.ddnss.de", + "dyndns.ddnss.de", + "dyndns1.de", + "dyn-ip24.de", + "home-webserver.de", + "dyn.home-webserver.de", + "myhome-server.de", + "ddnss.org", + "definima.net", + "definima.io", + "ddnsfree.com", + "ddnsgeek.com", + "giize.com", + "gleeze.com", + "kozow.com", + "loseyourip.com", + "ooguy.com", + "theworkpc.com", + "casacam.net", + "dynu.net", + "accesscam.org", + "camdvr.org", + "freeddns.org", + "mywire.org", + "webredirect.org", + "myddns.rocks", + "blogsite.xyz", + "dynv6.net", + "e4.cz", + "mytuleap.com", + "enonic.io", + "customer.enonic.io", + "eu.org", + "al.eu.org", + "asso.eu.org", + "at.eu.org", + "au.eu.org", + "be.eu.org", + "bg.eu.org", + "ca.eu.org", + "cd.eu.org", + "ch.eu.org", + "cn.eu.org", + "cy.eu.org", + "cz.eu.org", + "de.eu.org", + "dk.eu.org", + "edu.eu.org", + "ee.eu.org", + "es.eu.org", + "fi.eu.org", + "fr.eu.org", + "gr.eu.org", + "hr.eu.org", + "hu.eu.org", + "ie.eu.org", + "il.eu.org", + "in.eu.org", + "int.eu.org", + "is.eu.org", + "it.eu.org", + "jp.eu.org", + "kr.eu.org", + "lt.eu.org", + "lu.eu.org", + "lv.eu.org", + "mc.eu.org", + "me.eu.org", + "mk.eu.org", + "mt.eu.org", + "my.eu.org", + "net.eu.org", + "ng.eu.org", + "nl.eu.org", + "no.eu.org", + "nz.eu.org", + "paris.eu.org", + "pl.eu.org", + "pt.eu.org", + "q-a.eu.org", + "ro.eu.org", + "ru.eu.org", + "se.eu.org", + "si.eu.org", + "sk.eu.org", + "tr.eu.org", + "uk.eu.org", + "us.eu.org", + "eu-1.evennode.com", + "eu-2.evennode.com", + "eu-3.evennode.com", + "eu-4.evennode.com", + "us-1.evennode.com", + "us-2.evennode.com", + "us-3.evennode.com", + "us-4.evennode.com", + "twmail.cc", + "twmail.net", + "twmail.org", + "mymailer.com.tw", + "url.tw", + "apps.fbsbx.com", + "ru.net", + "adygeya.ru", + "bashkiria.ru", + "bir.ru", + "cbg.ru", + "com.ru", + "dagestan.ru", + "grozny.ru", + "kalmykia.ru", + "kustanai.ru", + "marine.ru", + "mordovia.ru", + "msk.ru", + "mytis.ru", + "nalchik.ru", + "nov.ru", + "pyatigorsk.ru", + "spb.ru", + "vladikavkaz.ru", + "vladimir.ru", + "abkhazia.su", + "adygeya.su", + "aktyubinsk.su", + "arkhangelsk.su", + "armenia.su", + "ashgabad.su", + "azerbaijan.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "bukhara.su", + "chimkent.su", + "dagestan.su", + "east-kazakhstan.su", + "exnet.su", + "georgia.su", + "grozny.su", + "ivanovo.su", + "jambyl.su", + "kalmykia.su", + "kaluga.su", + "karacol.su", + "karaganda.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "kustanai.su", + "lenug.su", + "mangyshlak.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "navoi.su", + "north-kazakhstan.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "tashkent.su", + "termez.su", + "togliatti.su", + "troitsk.su", + "tselinograd.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", + "channelsdvr.net", + "fastlylb.net", + "map.fastlylb.net", + "freetls.fastly.net", + "map.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", + "a.ssl.fastly.net", + "b.ssl.fastly.net", + "global.ssl.fastly.net", + "fhapp.xyz", + "fedorainfracloud.org", + "fedorapeople.org", + "cloud.fedoraproject.org", + "app.os.fedoraproject.org", + "app.os.stg.fedoraproject.org", + "filegear.me", + "firebaseapp.com", + "flynnhub.com", + "flynnhosting.net", + "freebox-os.com", + "freeboxos.com", + "fbx-os.fr", + "fbxos.fr", + "freebox-os.fr", + "freeboxos.fr", + "*.futurecms.at", + "futurehosting.at", + "futuremailing.at", + "*.ex.ortsinfo.at", + "*.kunden.ortsinfo.at", + "*.statics.cloud", + "service.gov.uk", + "github.io", + "githubusercontent.com", + "gitlab.io", + "homeoffice.gov.uk", + "ro.im", + "shop.ro", + "goip.de", + "*.0emm.com", + "appspot.com", + "blogspot.ae", + "blogspot.al", + "blogspot.am", + "blogspot.ba", + "blogspot.be", + "blogspot.bg", + "blogspot.bj", + "blogspot.ca", + "blogspot.cf", + "blogspot.ch", + "blogspot.cl", + "blogspot.co.at", + "blogspot.co.id", + "blogspot.co.il", + "blogspot.co.ke", + "blogspot.co.nz", + "blogspot.co.uk", + "blogspot.co.za", + "blogspot.com", + "blogspot.com.ar", + "blogspot.com.au", + "blogspot.com.br", + "blogspot.com.by", + "blogspot.com.co", + "blogspot.com.cy", + "blogspot.com.ee", + "blogspot.com.eg", + "blogspot.com.es", + "blogspot.com.mt", + "blogspot.com.ng", + "blogspot.com.tr", + "blogspot.com.uy", + "blogspot.cv", + "blogspot.cz", + "blogspot.de", + "blogspot.dk", + "blogspot.fi", + "blogspot.fr", + "blogspot.gr", + "blogspot.hk", + "blogspot.hr", + "blogspot.hu", + "blogspot.ie", + "blogspot.in", + "blogspot.is", + "blogspot.it", + "blogspot.jp", + "blogspot.kr", + "blogspot.li", + "blogspot.lt", + "blogspot.lu", + "blogspot.md", + "blogspot.mk", + "blogspot.mr", + "blogspot.mx", + "blogspot.my", + "blogspot.nl", + "blogspot.no", + "blogspot.pe", + "blogspot.pt", + "blogspot.qa", + "blogspot.re", + "blogspot.ro", + "blogspot.rs", + "blogspot.ru", + "blogspot.se", + "blogspot.sg", + "blogspot.si", + "blogspot.sk", + "blogspot.sn", + "blogspot.td", + "blogspot.tw", + "blogspot.ug", + "blogspot.vn", + "cloudfunctions.net", + "cloud.goog", + "codespot.com", + "googleapis.com", + "googlecode.com", + "pagespeedmobilizer.com", + "publishproxy.com", + "withgoogle.com", + "withyoutube.com", + "hashbang.sh", + "hasura-app.io", + "hepforge.org", + "herokuapp.com", + "herokussl.com", + "moonscale.net", + "iki.fi", + "biz.at", + "info.at", + "info.cx", + "ac.leg.br", + "al.leg.br", + "am.leg.br", + "ap.leg.br", + "ba.leg.br", + "ce.leg.br", + "df.leg.br", + "es.leg.br", + "go.leg.br", + "ma.leg.br", + "mg.leg.br", + "ms.leg.br", + "mt.leg.br", + "pa.leg.br", + "pb.leg.br", + "pe.leg.br", + "pi.leg.br", + "pr.leg.br", + "rj.leg.br", + "rn.leg.br", + "ro.leg.br", + "rr.leg.br", + "rs.leg.br", + "sc.leg.br", + "se.leg.br", + "sp.leg.br", + "to.leg.br", + "pixolino.com", + "ipifony.net", + "*.triton.zone", + "*.cns.joyent.com", + "js.org", + "keymachine.de", + "knightpoint.systems", + "co.krd", + "edu.krd", + "git-repos.de", + "lcube-server.de", + "svn-repos.de", + "we.bs", + "barsy.bg", + "barsyonline.com", + "barsy.de", + "barsy.eu", + "barsy.in", + "barsy.net", + "barsy.online", + "barsy.support", + "*.magentosite.cloud", + "hb.cldmail.ru", + "cloud.metacentrum.cz", + "custom.metacentrum.cz", + "meteorapp.com", + "eu.meteorapp.com", + "co.pl", + "azurewebsites.net", + "azure-mobile.net", + "cloudapp.net", + "bmoattachments.org", + "net.ru", + "org.ru", + "pp.ru", + "bitballoon.com", + "netlify.com", + "4u.com", + "ngrok.io", + "nh-serv.co.uk", + "nfshost.com", + "nsupdate.info", + "nerdpol.ovh", + "blogsyte.com", + "brasilia.me", + "cable-modem.org", + "ciscofreak.com", + "collegefan.org", + "couchpotatofries.org", + "damnserver.com", + "ddns.me", + "ditchyourip.com", + "dnsfor.me", + "dnsiskinky.com", + "dvrcam.info", + "dynns.com", + "eating-organic.net", + "fantasyleague.cc", + "geekgalaxy.com", + "golffan.us", + "health-carereform.com", + "homesecuritymac.com", + "homesecuritypc.com", + "hopto.me", + "ilovecollege.info", + "loginto.me", + "mlbfan.org", + "mmafan.biz", + "myactivedirectory.com", + "mydissent.net", + "myeffect.net", + "mymediapc.net", + "mypsx.net", + "mysecuritycamera.com", + "mysecuritycamera.net", + "mysecuritycamera.org", + "net-freaks.com", + "nflfan.org", + "nhlfan.net", + "no-ip.ca", + "no-ip.co.uk", + "no-ip.net", + "noip.us", + "onthewifi.com", + "pgafan.net", + "point2this.com", + "pointto.us", + "privatizehealthinsurance.net", + "quicksytes.com", + "read-books.org", + "securitytactics.com", + "serveexchange.com", + "servehumour.com", + "servep2p.com", + "servesarcasm.com", + "stufftoread.com", + "ufcfan.org", + "unusualperson.com", + "workisboring.com", + "3utilities.com", + "bounceme.net", + "ddns.net", + "ddnsking.com", + "gotdns.ch", + "hopto.org", + "myftp.biz", + "myftp.org", + "myvnc.com", + "no-ip.biz", + "no-ip.info", + "no-ip.org", + "noip.me", + "redirectme.net", + "servebeer.com", + "serveblog.net", + "servecounterstrike.com", + "serveftp.com", + "servegame.com", + "servehalflife.com", + "servehttp.com", + "serveirc.com", + "serveminecraft.net", + "servemp3.com", + "servepics.com", + "servequake.com", + "sytes.net", + "webhop.me", + "zapto.org", + "stage.nodeart.io", + "nodum.co", + "nodum.io", + "nyc.mn", + "nom.ae", + "nom.ai", + "nom.al", + "nym.by", + "nym.bz", + "nom.cl", + "nom.gd", + "nom.gl", + "nym.gr", + "nom.gt", + "nom.hn", + "nom.im", + "nym.kz", + "nym.la", + "nom.li", + "nym.li", + "nym.lt", + "nym.lu", + "nym.me", + "nom.mk", + "nym.mx", + "nom.nu", + "nym.nz", + "nym.pe", + "nym.pt", + "nom.pw", + "nom.qa", + "nom.rs", + "nom.si", + "nym.sk", + "nym.su", + "nym.sx", + "nym.tw", + "nom.ug", + "nom.uy", + "nom.vc", + "nom.vg", + "cya.gg", + "nid.io", + "opencraft.hosting", + "operaunite.com", + "outsystemscloud.com", + "ownprovider.com", + "oy.lc", + "pgfog.com", + "pagefrontapp.com", + "art.pl", + "gliwice.pl", + "krakow.pl", + "poznan.pl", + "wroc.pl", + "zakopane.pl", + "pantheonsite.io", + "gotpantheon.com", + "mypep.link", + "on-web.fr", + "*.platform.sh", + "*.platformsh.site", + "xen.prgmr.com", + "priv.at", + "protonet.io", + "chirurgiens-dentistes-en-france.fr", + "byen.site", + "qa2.com", + "dev-myqnapcloud.com", + "alpha-myqnapcloud.com", + "myqnapcloud.com", + "*.quipelements.com", + "vapor.cloud", + "vaporcloud.io", + "rackmaze.com", + "rackmaze.net", + "rhcloud.com", + "resindevice.io", + "devices.resinstaging.io", + "hzc.io", + "wellbeingzone.eu", + "ptplus.fit", + "wellbeingzone.co.uk", + "sandcats.io", + "logoip.de", + "logoip.com", + "scrysec.com", + "firewall-gateway.com", + "firewall-gateway.de", + "my-gateway.de", + "my-router.de", + "spdns.de", + "spdns.eu", + "firewall-gateway.net", + "my-firewall.org", + "myfirewall.org", + "spdns.org", + "*.s5y.io", + "*.sensiosite.cloud", + "biz.ua", + "co.ua", + "pp.ua", + "shiftedit.io", + "myshopblocks.com", + "1kapp.com", + "appchizi.com", + "applinzi.com", + "sinaapp.com", + "vipsinaapp.com", + "bounty-full.com", + "alpha.bounty-full.com", + "beta.bounty-full.com", + "static.land", + "dev.static.land", + "sites.static.land", + "apps.lair.io", + "*.stolos.io", + "spacekit.io", + "stackspace.space", + "storj.farm", + "temp-dns.com", + "diskstation.me", + "dscloud.biz", + "dscloud.me", + "dscloud.mobi", + "dsmynas.com", + "dsmynas.net", + "dsmynas.org", + "familyds.com", + "familyds.net", + "familyds.org", + "i234.me", + "myds.me", + "synology.me", + "vpnplus.to", + "taifun-dns.de", + "gda.pl", + "gdansk.pl", + "gdynia.pl", + "med.pl", + "sopot.pl", + "cust.dev.thingdust.io", + "cust.disrec.thingdust.io", + "cust.prod.thingdust.io", + "cust.testing.thingdust.io", + "bloxcms.com", + "townnews-staging.com", + "12hp.at", + "2ix.at", + "4lima.at", + "lima-city.at", + "12hp.ch", + "2ix.ch", + "4lima.ch", + "lima-city.ch", + "trafficplex.cloud", + "de.cool", + "12hp.de", + "2ix.de", + "4lima.de", + "lima-city.de", + "1337.pictures", + "clan.rip", + "lima-city.rocks", + "webspace.rocks", + "lima.zone", + "*.transurl.be", + "*.transurl.eu", + "*.transurl.nl", + "tuxfamily.org", + "dd-dns.de", + "diskstation.eu", + "diskstation.org", + "dray-dns.de", + "draydns.de", + "dyn-vpn.de", + "dynvpn.de", + "mein-vigor.de", + "my-vigor.de", + "my-wan.de", + "syno-ds.de", + "synology-diskstation.de", + "synology-ds.de", + "uber.space", + "hk.com", + "hk.org", + "ltd.hk", + "inc.hk", + "lib.de.us", + "router.management", + "v-info.info", + "wedeploy.io", + "wedeploy.me", + "wedeploy.sh", + "remotewd.com", + "wmflabs.org", + "cistron.nl", + "demon.nl", + "xs4all.space", + "yolasite.com", + "ybo.faith", + "yombo.me", + "homelink.one", + "ybo.party", + "ybo.review", + "ybo.science", + "ybo.trade", + "za.net", + "za.org", + "now.sh", +} + +var nodeLabels = [...]string{ + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "ac", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "ad", + "adac", + "ads", + "adult", + "ae", + "aeg", + "aero", + "aetna", + "af", + "afamilycompany", + "afl", + "africa", + "ag", + "agakhan", + "agency", + "ai", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "al", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "am", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "ao", + "aol", + "apartments", + "app", + "apple", + "aq", + "aquarelle", + "ar", + "arab", + "aramco", + "archi", + "army", + "arpa", + "art", + "arte", + "as", + "asda", + "asia", + "associates", + "at", + "athleta", + "attorney", + "au", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aw", + "aws", + "ax", + "axa", + "az", + "azure", + "ba", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bb", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "bd", + "be", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bf", + "bg", + "bh", + "bharti", + "bi", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "biz", + "bj", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bm", + "bms", + "bmw", + "bn", + "bnl", + "bnpparibas", + "bo", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "br", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "bs", + "bt", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bv", + "bw", + "by", + "bz", + "bzh", + "ca", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "cat", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "cc", + "cd", + "ceb", + "center", + "ceo", + "cern", + "cf", + "cfa", + "cfd", + "cg", + "ch", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "christmas", + "chrome", + "chrysler", + "church", + "ci", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "ck", + "cl", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "cm", + "cn", + "co", + "coach", + "codes", + "coffee", + "college", + "cologne", + "com", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "coop", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "cr", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cu", + "cuisinella", + "cv", + "cw", + "cx", + "cy", + "cymru", + "cyou", + "cz", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "de", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dj", + "dk", + "dm", + "dnp", + "do", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "dz", + "earth", + "eat", + "ec", + "eco", + "edeka", + "edu", + "education", + "ee", + "eg", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "er", + "ericsson", + "erni", + "es", + "esq", + "estate", + "esurance", + "et", + "etisalat", + "eu", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fi", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "fj", + "fk", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "fm", + "fo", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "fr", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "ga", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gb", + "gbiz", + "gd", + "gdn", + "ge", + "gea", + "gent", + "genting", + "george", + "gf", + "gg", + "ggee", + "gh", + "gi", + "gift", + "gifts", + "gives", + "giving", + "gl", + "glade", + "glass", + "gle", + "global", + "globo", + "gm", + "gmail", + "gmbh", + "gmo", + "gmx", + "gn", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gov", + "gp", + "gq", + "gr", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "gs", + "gt", + "gu", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "gw", + "gy", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hk", + "hkt", + "hm", + "hn", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hr", + "hsbc", + "ht", + "hu", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "id", + "ie", + "ieee", + "ifm", + "ikano", + "il", + "im", + "imamat", + "imdb", + "immo", + "immobilien", + "in", + "industries", + "infiniti", + "info", + "ing", + "ink", + "institute", + "insurance", + "insure", + "int", + "intel", + "international", + "intuit", + "investments", + "io", + "ipiranga", + "iq", + "ir", + "irish", + "is", + "iselect", + "ismaili", + "ist", + "istanbul", + "it", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "je", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jm", + "jmp", + "jnj", + "jo", + "jobs", + "joburg", + "jot", + "joy", + "jp", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "ke", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kg", + "kh", + "ki", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "km", + "kn", + "koeln", + "komatsu", + "kosher", + "kp", + "kpmg", + "kpn", + "kr", + "krd", + "kred", + "kuokgroup", + "kw", + "ky", + "kyoto", + "kz", + "la", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lb", + "lc", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "li", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "lk", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "lr", + "ls", + "lt", + "ltd", + "ltda", + "lu", + "lundbeck", + "lupin", + "luxe", + "luxury", + "lv", + "ly", + "ma", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mc", + "mckinsey", + "md", + "me", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "mg", + "mh", + "miami", + "microsoft", + "mil", + "mini", + "mint", + "mit", + "mitsubishi", + "mk", + "ml", + "mlb", + "mls", + "mm", + "mma", + "mn", + "mo", + "mobi", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "mp", + "mq", + "mr", + "ms", + "msd", + "mt", + "mtn", + "mtpc", + "mtr", + "mu", + "museum", + "mutual", + "mv", + "mw", + "mx", + "my", + "mz", + "na", + "nab", + "nadex", + "nagoya", + "name", + "nationwide", + "natura", + "navy", + "nba", + "nc", + "ne", + "nec", + "net", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nf", + "nfl", + "ng", + "ngo", + "nhk", + "ni", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nl", + "no", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "np", + "nr", + "nra", + "nrw", + "ntt", + "nu", + "nyc", + "nz", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "om", + "omega", + "one", + "ong", + "onion", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "org", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "pa", + "page", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pe", + "pet", + "pf", + "pfizer", + "pg", + "ph", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "pk", + "pl", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pm", + "pn", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "post", + "pr", + "pramerica", + "praxi", + "press", + "prime", + "pro", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "ps", + "pt", + "pub", + "pw", + "pwc", + "py", + "qa", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "re", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "ro", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rs", + "rsvp", + "ru", + "rugby", + "ruhr", + "run", + "rw", + "rwe", + "ryukyu", + "sa", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sb", + "sbi", + "sbs", + "sc", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "sd", + "se", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "sg", + "sh", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "si", + "silk", + "sina", + "singles", + "site", + "sj", + "sk", + "ski", + "skin", + "sky", + "skype", + "sl", + "sling", + "sm", + "smart", + "smile", + "sn", + "sncf", + "so", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "sr", + "srl", + "srt", + "st", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "su", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "sv", + "swatch", + "swiftcover", + "swiss", + "sx", + "sy", + "sydney", + "symantec", + "systems", + "sz", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tc", + "tci", + "td", + "tdk", + "team", + "tech", + "technology", + "tel", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "tf", + "tg", + "th", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tj", + "tjmaxx", + "tjx", + "tk", + "tkmaxx", + "tl", + "tm", + "tmall", + "tn", + "to", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "tr", + "trade", + "trading", + "training", + "travel", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tt", + "tube", + "tui", + "tunes", + "tushu", + "tv", + "tvs", + "tw", + "tz", + "ua", + "ubank", + "ubs", + "uconnect", + "ug", + "uk", + "unicom", + "university", + "uno", + "uol", + "ups", + "us", + "uy", + "uz", + "va", + "vacations", + "vana", + "vanguard", + "vc", + "ve", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "vg", + "vi", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vn", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vu", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "wf", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "ws", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--2scrj9c", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3e0b707e", + "xn--3hcrj9c", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45br5cyl", + "xn--45brj9c", + "xn--45q11c", + "xn--4gbrim", + "xn--54b7fta0cc", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80ao21a", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--90a3ac", + "xn--90ae", + "xn--90ais", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--clchc0ea0b2g2a9gcd", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--d1alf", + "xn--e1a4c", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--fjq720a", + "xn--flw351e", + "xn--fpcrj9c3d", + "xn--fzc2c9e2c", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gecrj9c", + "xn--gk3at1e", + "xn--h2breg3eve", + "xn--h2brj9c", + "xn--h2brj9c8c", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--j1amh", + "xn--j6w193g", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kprw13d", + "xn--kpry57d", + "xn--kpu716f", + "xn--kput3i", + "xn--l1acc", + "xn--lgbbat1ad8j", + "xn--mgb2ddes", + "xn--mgb9awbf", + "xn--mgba3a3ejt", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbaam7a8h", + "xn--mgbab2bd", + "xn--mgbai9a5eva00b", + "xn--mgbai9azgqp6j", + "xn--mgbayh7gpa", + "xn--mgbb9fbpob", + "xn--mgbbh1a71e", + "xn--mgbc0a9azcg", + "xn--mgbca7dzdo", + "xn--mgberp4a5d4a87g", + "xn--mgberp4a5d4ar", + "xn--mgbgu82a", + "xn--mgbi4ecexp", + "xn--mgbpl2fh", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbt3dhd", + "xn--mgbtf8fl", + "xn--mgbtx2b", + "xn--mgbx4cd0ab", + "xn--mix082f", + "xn--mix891f", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nnx388a", + "xn--node", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--o3cw4h", + "xn--ogbpf8fl", + "xn--p1acf", + "xn--p1ai", + "xn--pbt977c", + "xn--pgbs0dh", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--qxam", + "xn--rhqv96g", + "xn--rovu88b", + "xn--rvc1e0am3e", + "xn--s9brj9c", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--wgbh1c", + "xn--wgbl6a", + "xn--xhq521b", + "xn--xkc2al3hye2a", + "xn--xkc2dl3a5ee0h", + "xn--y9a3aq", + "xn--yfro4i67o", + "xn--ygbi2ammx", + "xn--zfr164b", + "xperia", + "xxx", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "ye", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yt", + "yun", + "za", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zm", + "zone", + "zuerich", + "zw", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "nom", + "ac", + "blogspot", + "co", + "gov", + "mil", + "net", + "nom", + "org", + "sch", + "accident-investigation", + "accident-prevention", + "aerobatic", + "aeroclub", + "aerodrome", + "agents", + "air-surveillance", + "air-traffic-control", + "aircraft", + "airline", + "airport", + "airtraffic", + "ambulance", + "amusement", + "association", + "author", + "ballooning", + "broker", + "caa", + "cargo", + "catering", + "certification", + "championship", + "charter", + "civilaviation", + "club", + "conference", + "consultant", + "consulting", + "control", + "council", + "crew", + "design", + "dgca", + "educator", + "emergency", + "engine", + "engineer", + "entertainment", + "equipment", + "exchange", + "express", + "federation", + "flight", + "freight", + "fuel", + "gliding", + "government", + "groundhandling", + "group", + "hanggliding", + "homebuilt", + "insurance", + "journal", + "journalist", + "leasing", + "logistics", + "magazine", + "maintenance", + "media", + "microlight", + "modelling", + "navigation", + "parachuting", + "paragliding", + "passenger-association", + "pilot", + "press", + "production", + "recreation", + "repbody", + "res", + "research", + "rotorcraft", + "safety", + "scientist", + "services", + "show", + "skydiving", + "software", + "student", + "trader", + "trading", + "trainer", + "union", + "workinggroup", + "works", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "net", + "nom", + "org", + "com", + "net", + "nom", + "off", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "ed", + "gv", + "it", + "og", + "pb", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "musica", + "net", + "org", + "tur", + "blogspot", + "e164", + "in-addr", + "ip6", + "iris", + "uri", + "urn", + "gov", + "cloudns", + "12hp", + "2ix", + "4lima", + "ac", + "biz", + "co", + "futurecms", + "futurehosting", + "futuremailing", + "gv", + "info", + "lima-city", + "or", + "ortsinfo", + "priv", + "blogspot", + "ex", + "kunden", + "act", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "info", + "net", + "nsw", + "nt", + "org", + "oz", + "qld", + "sa", + "tas", + "vic", + "wa", + "blogspot", + "act", + "nsw", + "nt", + "qld", + "sa", + "tas", + "vic", + "wa", + "qld", + "sa", + "tas", + "vic", + "wa", + "com", + "biz", + "com", + "edu", + "gov", + "info", + "int", + "mil", + "name", + "net", + "org", + "pp", + "pro", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "net", + "org", + "store", + "tv", + "ac", + "blogspot", + "transurl", + "webhosting", + "gov", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "a", + "b", + "barsy", + "blogspot", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "or", + "org", + "cloudns", + "dscloud", + "dyndns", + "for-better", + "for-more", + "for-some", + "for-the", + "mmafan", + "myftp", + "no-ip", + "selfip", + "webhop", + "asso", + "barreau", + "blogspot", + "gouv", + "com", + "edu", + "gov", + "net", + "org", + "academia", + "agro", + "arte", + "blog", + "bolivia", + "ciencia", + "com", + "cooperativa", + "democracia", + "deporte", + "ecologia", + "economia", + "edu", + "empresa", + "gob", + "indigena", + "industria", + "info", + "int", + "medicina", + "mil", + "movimiento", + "musica", + "natural", + "net", + "nombre", + "noticias", + "org", + "patria", + "plurinacional", + "politica", + "profesional", + "pueblo", + "revista", + "salud", + "tecnologia", + "tksat", + "transporte", + "tv", + "web", + "wiki", + "9guacu", + "abc", + "adm", + "adv", + "agr", + "aju", + "am", + "anani", + "aparecida", + "arq", + "art", + "ato", + "b", + "belem", + "bhz", + "bio", + "blog", + "bmd", + "boavista", + "bsb", + "campinagrande", + "campinas", + "caxias", + "cim", + "cng", + "cnt", + "com", + "contagem", + "coop", + "cri", + "cuiaba", + "curitiba", + "def", + "ecn", + "eco", + "edu", + "emp", + "eng", + "esp", + "etc", + "eti", + "far", + "feira", + "flog", + "floripa", + "fm", + "fnd", + "fortal", + "fot", + "foz", + "fst", + "g12", + "ggf", + "goiania", + "gov", + "gru", + "imb", + "ind", + "inf", + "jab", + "jampa", + "jdf", + "joinville", + "jor", + "jus", + "leg", + "lel", + "londrina", + "macapa", + "maceio", + "manaus", + "maringa", + "mat", + "med", + "mil", + "morena", + "mp", + "mus", + "natal", + "net", + "niteroi", + "nom", + "not", + "ntr", + "odo", + "org", + "osasco", + "palmas", + "poa", + "ppg", + "pro", + "psc", + "psi", + "pvh", + "qsl", + "radio", + "rec", + "recife", + "ribeirao", + "rio", + "riobranco", + "riopreto", + "salvador", + "sampa", + "santamaria", + "santoandre", + "saobernardo", + "saogonca", + "sjc", + "slg", + "slz", + "sorocaba", + "srv", + "taxi", + "teo", + "the", + "tmp", + "trd", + "tur", + "tv", + "udi", + "vet", + "vix", + "vlog", + "wiki", + "zlg", + "blogspot", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "com", + "edu", + "gov", + "net", + "org", + "we", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "com", + "gov", + "mil", + "nym", + "of", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "za", + "1password", + "ab", + "awdev", + "bc", + "blogspot", + "co", + "gc", + "mb", + "nb", + "nf", + "nl", + "no-ip", + "ns", + "nt", + "nu", + "on", + "pe", + "qc", + "sk", + "yk", + "cloudns", + "fantasyleague", + "ftpaccess", + "game-server", + "myphotos", + "scrapping", + "twmail", + "gov", + "blogspot", + "12hp", + "2ix", + "4lima", + "blogspot", + "gotdns", + "lima-city", + "square7", + "ac", + "asso", + "co", + "com", + "ed", + "edu", + "go", + "gouv", + "int", + "md", + "net", + "or", + "org", + "presse", + "xn--aroport-bya", + "www", + "blogspot", + "co", + "gob", + "gov", + "mil", + "nom", + "magentosite", + "sensiosite", + "statics", + "trafficplex", + "vapor", + "cloudns", + "co", + "com", + "gov", + "net", + "ac", + "ah", + "bj", + "com", + "cq", + "edu", + "fj", + "gd", + "gov", + "gs", + "gx", + "gz", + "ha", + "hb", + "he", + "hi", + "hk", + "hl", + "hn", + "jl", + "js", + "jx", + "ln", + "mil", + "mo", + "net", + "nm", + "nx", + "org", + "qh", + "sc", + "sd", + "sh", + "sn", + "sx", + "tj", + "tw", + "xj", + "xn--55qx5d", + "xn--io0a7i", + "xn--od0alg", + "xz", + "yn", + "zj", + "amazonaws", + "cn-north-1", + "compute", + "eb", + "elb", + "s3", + "cn-north-1", + "arts", + "com", + "edu", + "firm", + "gov", + "info", + "int", + "mil", + "net", + "nodum", + "nom", + "org", + "rec", + "web", + "blogspot", + "0emm", + "1kapp", + "1password", + "3utilities", + "4u", + "africa", + "alpha-myqnapcloud", + "amazonaws", + "appchizi", + "applinzi", + "appspot", + "ar", + "barsyonline", + "betainabox", + "bitballoon", + "blogdns", + "blogspot", + "blogsyte", + "bloxcms", + "bounty-full", + "bplaced", + "br", + "cechire", + "ciscofreak", + "cloudcontrolapp", + "cloudcontrolled", + "cn", + "co", + "codespot", + "damnserver", + "ddnsfree", + "ddnsgeek", + "ddnsking", + "de", + "dev-myqnapcloud", + "ditchyourip", + "dnsalias", + "dnsdojo", + "dnsiskinky", + "doesntexist", + "dontexist", + "doomdns", + "drayddns", + "dreamhosters", + "dsmynas", + "dyn-o-saur", + "dynalias", + "dyndns-at-home", + "dyndns-at-work", + "dyndns-blog", + "dyndns-free", + "dyndns-home", + "dyndns-ip", + "dyndns-mail", + "dyndns-office", + "dyndns-pics", + "dyndns-remote", + "dyndns-server", + "dyndns-web", + "dyndns-wiki", + "dyndns-work", + "dynns", + "elasticbeanstalk", + "est-a-la-maison", + "est-a-la-masion", + "est-le-patron", + "est-mon-blogueur", + "eu", + "evennode", + "familyds", + "fbsbx", + "firebaseapp", + "firewall-gateway", + "flynnhub", + "freebox-os", + "freeboxos", + "from-ak", + "from-al", + "from-ar", + "from-ca", + "from-ct", + "from-dc", + "from-de", + "from-fl", + "from-ga", + "from-hi", + "from-ia", + "from-id", + "from-il", + "from-in", + "from-ks", + "from-ky", + "from-ma", + "from-md", + "from-mi", + "from-mn", + "from-mo", + "from-ms", + "from-mt", + "from-nc", + "from-nd", + "from-ne", + "from-nh", + "from-nj", + "from-nm", + "from-nv", + "from-oh", + "from-ok", + "from-or", + "from-pa", + "from-pr", + "from-ri", + "from-sc", + "from-sd", + "from-tn", + "from-tx", + "from-ut", + "from-va", + "from-vt", + "from-wa", + "from-wi", + "from-wv", + "from-wy", + "gb", + "geekgalaxy", + "getmyip", + "giize", + "githubusercontent", + "gleeze", + "googleapis", + "googlecode", + "gotdns", + "gotpantheon", + "gr", + "health-carereform", + "herokuapp", + "herokussl", + "hk", + "hobby-site", + "homelinux", + "homesecuritymac", + "homesecuritypc", + "homeunix", + "hu", + "iamallama", + "is-a-anarchist", + "is-a-blogger", + "is-a-bookkeeper", + "is-a-bulls-fan", + "is-a-caterer", + "is-a-chef", + "is-a-conservative", + "is-a-cpa", + "is-a-cubicle-slave", + "is-a-democrat", + "is-a-designer", + "is-a-doctor", + "is-a-financialadvisor", + "is-a-geek", + "is-a-green", + "is-a-guru", + "is-a-hard-worker", + "is-a-hunter", + "is-a-landscaper", + "is-a-lawyer", + "is-a-liberal", + "is-a-libertarian", + "is-a-llama", + "is-a-musician", + "is-a-nascarfan", + "is-a-nurse", + "is-a-painter", + "is-a-personaltrainer", + "is-a-photographer", + "is-a-player", + "is-a-republican", + "is-a-rockstar", + "is-a-socialist", + "is-a-student", + "is-a-teacher", + "is-a-techie", + "is-a-therapist", + "is-an-accountant", + "is-an-actor", + "is-an-actress", + "is-an-anarchist", + "is-an-artist", + "is-an-engineer", + "is-an-entertainer", + "is-certified", + "is-gone", + "is-into-anime", + "is-into-cars", + "is-into-cartoons", + "is-into-games", + "is-leet", + "is-not-certified", + "is-slick", + "is-uberleet", + "is-with-theband", + "isa-geek", + "isa-hockeynut", + "issmarterthanyou", + "jdevcloud", + "joyent", + "jpn", + "kozow", + "kr", + "likes-pie", + "likescandy", + "logoip", + "loseyourip", + "meteorapp", + "mex", + "myactivedirectory", + "myasustor", + "mydrobo", + "myqnapcloud", + "mysecuritycamera", + "myshopblocks", + "mytuleap", + "myvnc", + "neat-url", + "net-freaks", + "netlify", + "nfshost", + "no", + "on-aptible", + "onthewifi", + "ooguy", + "operaunite", + "outsystemscloud", + "ownprovider", + "pagefrontapp", + "pagespeedmobilizer", + "pgfog", + "pixolino", + "point2this", + "prgmr", + "publishproxy", + "qa2", + "qc", + "quicksytes", + "quipelements", + "rackmaze", + "remotewd", + "rhcloud", + "ru", + "sa", + "saves-the-whales", + "scrysec", + "se", + "securitytactics", + "selfip", + "sells-for-less", + "sells-for-u", + "servebbs", + "servebeer", + "servecounterstrike", + "serveexchange", + "serveftp", + "servegame", + "servehalflife", + "servehttp", + "servehumour", + "serveirc", + "servemp3", + "servep2p", + "servepics", + "servequake", + "servesarcasm", + "simple-url", + "sinaapp", + "space-to-rent", + "stufftoread", + "teaches-yoga", + "temp-dns", + "theworkpc", + "townnews-staging", + "uk", + "unusualperson", + "us", + "uy", + "vipsinaapp", + "withgoogle", + "withyoutube", + "workisboring", + "wpdevcloud", + "writesthisblog", + "xenapponazure", + "yolasite", + "za", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "compute", + "compute-1", + "elb", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "s3", + "s3-ap-northeast-1", + "s3-ap-northeast-2", + "s3-ap-south-1", + "s3-ap-southeast-1", + "s3-ap-southeast-2", + "s3-ca-central-1", + "s3-eu-central-1", + "s3-eu-west-1", + "s3-eu-west-2", + "s3-eu-west-3", + "s3-external-1", + "s3-fips-us-gov-west-1", + "s3-sa-east-1", + "s3-us-east-2", + "s3-us-gov-west-1", + "s3-us-west-1", + "s3-us-west-2", + "s3-website-ap-northeast-1", + "s3-website-ap-southeast-1", + "s3-website-ap-southeast-2", + "s3-website-eu-west-1", + "s3-website-sa-east-1", + "s3-website-us-east-1", + "s3-website-us-west-1", + "s3-website-us-west-2", + "sa-east-1", + "us-east-1", + "us-east-2", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "alpha", + "beta", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-west-1", + "us-west-1", + "us-west-2", + "eu-1", + "eu-2", + "eu-3", + "eu-4", + "us-1", + "us-2", + "us-3", + "us-4", + "apps", + "cns", + "eu", + "xen", + "de", + "ac", + "co", + "ed", + "fi", + "go", + "or", + "sa", + "com", + "edu", + "gov", + "inf", + "net", + "org", + "blogspot", + "com", + "edu", + "net", + "org", + "ath", + "gov", + "info", + "ac", + "biz", + "com", + "ekloges", + "gov", + "ltd", + "name", + "net", + "org", + "parliament", + "press", + "pro", + "tm", + "blogspot", + "blogspot", + "co", + "e4", + "metacentrum", + "realm", + "cloud", + "custom", + "12hp", + "2ix", + "4lima", + "barsy", + "blogspot", + "bplaced", + "com", + "cosidns", + "dd-dns", + "ddnss", + "dnshome", + "dnsupdater", + "dray-dns", + "draydns", + "dyn-ip24", + "dyn-vpn", + "dynamisches-dns", + "dyndns1", + "dynvpn", + "firewall-gateway", + "fuettertdasnetz", + "git-repos", + "goip", + "home-webserver", + "internet-dns", + "isteingeek", + "istmein", + "keymachine", + "l-o-g-i-n", + "lcube-server", + "lebtimnetz", + "leitungsen", + "lima-city", + "logoip", + "mein-vigor", + "my-gateway", + "my-router", + "my-vigor", + "my-wan", + "myhome-server", + "spdns", + "square7", + "svn-repos", + "syno-ds", + "synology-diskstation", + "synology-ds", + "taifun-dns", + "traeumtgerade", + "dyn", + "dyn", + "dyndns", + "dyn", + "biz", + "blogspot", + "co", + "firm", + "reg", + "store", + "com", + "edu", + "gov", + "net", + "org", + "art", + "com", + "edu", + "gob", + "gov", + "mil", + "net", + "org", + "sld", + "web", + "art", + "asso", + "com", + "edu", + "gov", + "net", + "org", + "pol", + "com", + "edu", + "fin", + "gob", + "gov", + "info", + "k12", + "med", + "mil", + "net", + "org", + "pro", + "aip", + "com", + "edu", + "fie", + "gov", + "lib", + "med", + "org", + "pri", + "riik", + "blogspot", + "com", + "edu", + "eun", + "gov", + "mil", + "name", + "net", + "org", + "sci", + "blogspot", + "com", + "edu", + "gob", + "nom", + "org", + "blogspot", + "compute", + "biz", + "com", + "edu", + "gov", + "info", + "name", + "net", + "org", + "1password", + "barsy", + "cloudns", + "diskstation", + "mycd", + "spdns", + "transurl", + "wellbeingzone", + "party", + "user", + "ybo", + "storj", + "aland", + "blogspot", + "dy", + "iki", + "ptplus", + "aeroport", + "assedic", + "asso", + "avocat", + "avoues", + "blogspot", + "cci", + "chambagri", + "chirurgiens-dentistes", + "chirurgiens-dentistes-en-france", + "com", + "experts-comptables", + "fbx-os", + "fbxos", + "freebox-os", + "freeboxos", + "geometre-expert", + "gouv", + "greta", + "huissier-justice", + "medecin", + "nom", + "notaires", + "on-web", + "pharmacien", + "port", + "prd", + "presse", + "tm", + "veterinaire", + "nom", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "pvt", + "co", + "cya", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "org", + "com", + "edu", + "gov", + "ltd", + "mod", + "org", + "co", + "com", + "edu", + "net", + "nom", + "org", + "ac", + "com", + "edu", + "gov", + "net", + "org", + "cloud", + "asso", + "com", + "edu", + "mobi", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "com", + "edu", + "gob", + "ind", + "mil", + "net", + "nom", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "idv", + "inc", + "ltd", + "net", + "org", + "xn--55qx5d", + "xn--ciqpn", + "xn--gmq050i", + "xn--gmqw5a", + "xn--io0a7i", + "xn--lcvr32d", + "xn--mk0axi", + "xn--mxtq1m", + "xn--od0alg", + "xn--od0aq3b", + "xn--tn0ag", + "xn--uc0atv", + "xn--uc0ay4a", + "xn--wcvs22d", + "xn--zf0avx", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "org", + "cloudaccess", + "freesite", + "opencraft", + "blogspot", + "com", + "from", + "iz", + "name", + "adult", + "art", + "asso", + "com", + "coop", + "edu", + "firm", + "gouv", + "info", + "med", + "net", + "org", + "perso", + "pol", + "pro", + "rel", + "shop", + "2000", + "agrar", + "blogspot", + "bolt", + "casino", + "city", + "co", + "erotica", + "erotika", + "film", + "forum", + "games", + "hotel", + "info", + "ingatlan", + "jogasz", + "konyvelo", + "lakas", + "media", + "news", + "org", + "priv", + "reklam", + "sex", + "shop", + "sport", + "suli", + "szex", + "tm", + "tozsde", + "utazas", + "video", + "ac", + "biz", + "co", + "desa", + "go", + "mil", + "my", + "net", + "or", + "sch", + "web", + "blogspot", + "blogspot", + "gov", + "ac", + "co", + "gov", + "idf", + "k12", + "muni", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "net", + "nom", + "org", + "ro", + "tt", + "tv", + "ltd", + "plc", + "ac", + "barsy", + "blogspot", + "cloudns", + "co", + "edu", + "firm", + "gen", + "gov", + "ind", + "mil", + "net", + "nic", + "org", + "res", + "barrel-of-knowledge", + "barrell-of-knowledge", + "cloudns", + "dvrcam", + "dynamic-dns", + "dyndns", + "for-our", + "groks-the", + "groks-this", + "here-for-more", + "ilovecollege", + "knowsitall", + "no-ip", + "nsupdate", + "selfip", + "v-info", + "webhop", + "eu", + "backplaneapp", + "boxfuse", + "browsersafetymark", + "com", + "dedyn", + "definima", + "drud", + "enonic", + "github", + "gitlab", + "hasura-app", + "hzc", + "lair", + "ngrok", + "nid", + "nodeart", + "nodum", + "pantheonsite", + "protonet", + "resindevice", + "resinstaging", + "s5y", + "sandcats", + "shiftedit", + "spacekit", + "stolos", + "thingdust", + "vaporcloud", + "wedeploy", + "customer", + "apps", + "stage", + "devices", + "dev", + "disrec", + "prod", + "testing", + "cust", + "cust", + "cust", + "cust", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "gov", + "id", + "net", + "org", + "sch", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "blogspot", + "com", + "cupcake", + "edu", + "gov", + "int", + "net", + "org", + "abr", + "abruzzo", + "ag", + "agrigento", + "al", + "alessandria", + "alto-adige", + "altoadige", + "an", + "ancona", + "andria-barletta-trani", + "andria-trani-barletta", + "andriabarlettatrani", + "andriatranibarletta", + "ao", + "aosta", + "aosta-valley", + "aostavalley", + "aoste", + "ap", + "aq", + "aquila", + "ar", + "arezzo", + "ascoli-piceno", + "ascolipiceno", + "asti", + "at", + "av", + "avellino", + "ba", + "balsan", + "bari", + "barletta-trani-andria", + "barlettatraniandria", + "bas", + "basilicata", + "belluno", + "benevento", + "bergamo", + "bg", + "bi", + "biella", + "bl", + "blogspot", + "bn", + "bo", + "bologna", + "bolzano", + "bozen", + "br", + "brescia", + "brindisi", + "bs", + "bt", + "bz", + "ca", + "cagliari", + "cal", + "calabria", + "caltanissetta", + "cam", + "campania", + "campidano-medio", + "campidanomedio", + "campobasso", + "carbonia-iglesias", + "carboniaiglesias", + "carrara-massa", + "carraramassa", + "caserta", + "catania", + "catanzaro", + "cb", + "ce", + "cesena-forli", + "cesenaforli", + "ch", + "chieti", + "ci", + "cl", + "cn", + "co", + "como", + "cosenza", + "cr", + "cremona", + "crotone", + "cs", + "ct", + "cuneo", + "cz", + "dell-ogliastra", + "dellogliastra", + "edu", + "emilia-romagna", + "emiliaromagna", + "emr", + "en", + "enna", + "fc", + "fe", + "fermo", + "ferrara", + "fg", + "fi", + "firenze", + "florence", + "fm", + "foggia", + "forli-cesena", + "forlicesena", + "fr", + "friuli-v-giulia", + "friuli-ve-giulia", + "friuli-vegiulia", + "friuli-venezia-giulia", + "friuli-veneziagiulia", + "friuli-vgiulia", + "friuliv-giulia", + "friulive-giulia", + "friulivegiulia", + "friulivenezia-giulia", + "friuliveneziagiulia", + "friulivgiulia", + "frosinone", + "fvg", + "ge", + "genoa", + "genova", + "go", + "gorizia", + "gov", + "gr", + "grosseto", + "iglesias-carbonia", + "iglesiascarbonia", + "im", + "imperia", + "is", + "isernia", + "kr", + "la-spezia", + "laquila", + "laspezia", + "latina", + "laz", + "lazio", + "lc", + "le", + "lecce", + "lecco", + "li", + "lig", + "liguria", + "livorno", + "lo", + "lodi", + "lom", + "lombardia", + "lombardy", + "lt", + "lu", + "lucania", + "lucca", + "macerata", + "mantova", + "mar", + "marche", + "massa-carrara", + "massacarrara", + "matera", + "mb", + "mc", + "me", + "medio-campidano", + "mediocampidano", + "messina", + "mi", + "milan", + "milano", + "mn", + "mo", + "modena", + "mol", + "molise", + "monza", + "monza-brianza", + "monza-e-della-brianza", + "monzabrianza", + "monzaebrianza", + "monzaedellabrianza", + "ms", + "mt", + "na", + "naples", + "napoli", + "no", + "novara", + "nu", + "nuoro", + "og", + "ogliastra", + "olbia-tempio", + "olbiatempio", + "or", + "oristano", + "ot", + "pa", + "padova", + "padua", + "palermo", + "parma", + "pavia", + "pc", + "pd", + "pe", + "perugia", + "pesaro-urbino", + "pesarourbino", + "pescara", + "pg", + "pi", + "piacenza", + "piedmont", + "piemonte", + "pisa", + "pistoia", + "pmn", + "pn", + "po", + "pordenone", + "potenza", + "pr", + "prato", + "pt", + "pu", + "pug", + "puglia", + "pv", + "pz", + "ra", + "ragusa", + "ravenna", + "rc", + "re", + "reggio-calabria", + "reggio-emilia", + "reggiocalabria", + "reggioemilia", + "rg", + "ri", + "rieti", + "rimini", + "rm", + "rn", + "ro", + "roma", + "rome", + "rovigo", + "sa", + "salerno", + "sar", + "sardegna", + "sardinia", + "sassari", + "savona", + "si", + "sic", + "sicilia", + "sicily", + "siena", + "siracusa", + "so", + "sondrio", + "sp", + "sr", + "ss", + "suedtirol", + "sv", + "ta", + "taa", + "taranto", + "te", + "tempio-olbia", + "tempioolbia", + "teramo", + "terni", + "tn", + "to", + "torino", + "tos", + "toscana", + "tp", + "tr", + "trani-andria-barletta", + "trani-barletta-andria", + "traniandriabarletta", + "tranibarlettaandria", + "trapani", + "trentino", + "trentino-a-adige", + "trentino-aadige", + "trentino-alto-adige", + "trentino-altoadige", + "trentino-s-tirol", + "trentino-stirol", + "trentino-sud-tirol", + "trentino-sudtirol", + "trentino-sued-tirol", + "trentino-suedtirol", + "trentinoa-adige", + "trentinoaadige", + "trentinoalto-adige", + "trentinoaltoadige", + "trentinos-tirol", + "trentinostirol", + "trentinosud-tirol", + "trentinosudtirol", + "trentinosued-tirol", + "trentinosuedtirol", + "trento", + "treviso", + "trieste", + "ts", + "turin", + "tuscany", + "tv", + "ud", + "udine", + "umb", + "umbria", + "urbino-pesaro", + "urbinopesaro", + "va", + "val-d-aosta", + "val-daosta", + "vald-aosta", + "valdaosta", + "valle-aosta", + "valle-d-aosta", + "valle-daosta", + "valleaosta", + "valled-aosta", + "valledaosta", + "vallee-aoste", + "valleeaoste", + "vao", + "varese", + "vb", + "vc", + "vda", + "ve", + "ven", + "veneto", + "venezia", + "venice", + "verbania", + "vercelli", + "verona", + "vi", + "vibo-valentia", + "vibovalentia", + "vicenza", + "viterbo", + "vr", + "vs", + "vt", + "vv", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "ac", + "ad", + "aichi", + "akita", + "aomori", + "blogspot", + "chiba", + "co", + "ed", + "ehime", + "fukui", + "fukuoka", + "fukushima", + "gifu", + "go", + "gr", + "gunma", + "hiroshima", + "hokkaido", + "hyogo", + "ibaraki", + "ishikawa", + "iwate", + "kagawa", + "kagoshima", + "kanagawa", + "kawasaki", + "kitakyushu", + "kobe", + "kochi", + "kumamoto", + "kyoto", + "lg", + "mie", + "miyagi", + "miyazaki", + "nagano", + "nagasaki", + "nagoya", + "nara", + "ne", + "niigata", + "oita", + "okayama", + "okinawa", + "or", + "osaka", + "saga", + "saitama", + "sapporo", + "sendai", + "shiga", + "shimane", + "shizuoka", + "tochigi", + "tokushima", + "tokyo", + "tottori", + "toyama", + "wakayama", + "xn--0trq7p7nn", + "xn--1ctwo", + "xn--1lqs03n", + "xn--1lqs71d", + "xn--2m4a15e", + "xn--32vp30h", + "xn--4it168d", + "xn--4it797k", + "xn--4pvxs", + "xn--5js045d", + "xn--5rtp49c", + "xn--5rtq34k", + "xn--6btw5a", + "xn--6orx2r", + "xn--7t0a264c", + "xn--8ltr62k", + "xn--8pvr4u", + "xn--c3s14m", + "xn--d5qv7z876c", + "xn--djrs72d6uy", + "xn--djty4k", + "xn--efvn9s", + "xn--ehqz56n", + "xn--elqq16h", + "xn--f6qx53a", + "xn--k7yn95e", + "xn--kbrq7o", + "xn--klt787d", + "xn--kltp7d", + "xn--kltx9a", + "xn--klty5x", + "xn--mkru45i", + "xn--nit225k", + "xn--ntso0iqx3a", + "xn--ntsq17g", + "xn--pssu33l", + "xn--qqqt11m", + "xn--rht27z", + "xn--rht3d", + "xn--rht61e", + "xn--rny31h", + "xn--tor131o", + "xn--uist22h", + "xn--uisz3g", + "xn--uuwu58a", + "xn--vgu402c", + "xn--zbx025d", + "yamagata", + "yamaguchi", + "yamanashi", + "yokohama", + "aisai", + "ama", + "anjo", + "asuke", + "chiryu", + "chita", + "fuso", + "gamagori", + "handa", + "hazu", + "hekinan", + "higashiura", + "ichinomiya", + "inazawa", + "inuyama", + "isshiki", + "iwakura", + "kanie", + "kariya", + "kasugai", + "kira", + "kiyosu", + "komaki", + "konan", + "kota", + "mihama", + "miyoshi", + "nishio", + "nisshin", + "obu", + "oguchi", + "oharu", + "okazaki", + "owariasahi", + "seto", + "shikatsu", + "shinshiro", + "shitara", + "tahara", + "takahama", + "tobishima", + "toei", + "togo", + "tokai", + "tokoname", + "toyoake", + "toyohashi", + "toyokawa", + "toyone", + "toyota", + "tsushima", + "yatomi", + "akita", + "daisen", + "fujisato", + "gojome", + "hachirogata", + "happou", + "higashinaruse", + "honjo", + "honjyo", + "ikawa", + "kamikoani", + "kamioka", + "katagami", + "kazuno", + "kitaakita", + "kosaka", + "kyowa", + "misato", + "mitane", + "moriyoshi", + "nikaho", + "noshiro", + "odate", + "oga", + "ogata", + "semboku", + "yokote", + "yurihonjo", + "aomori", + "gonohe", + "hachinohe", + "hashikami", + "hiranai", + "hirosaki", + "itayanagi", + "kuroishi", + "misawa", + "mutsu", + "nakadomari", + "noheji", + "oirase", + "owani", + "rokunohe", + "sannohe", + "shichinohe", + "shingo", + "takko", + "towada", + "tsugaru", + "tsuruta", + "abiko", + "asahi", + "chonan", + "chosei", + "choshi", + "chuo", + "funabashi", + "futtsu", + "hanamigawa", + "ichihara", + "ichikawa", + "ichinomiya", + "inzai", + "isumi", + "kamagaya", + "kamogawa", + "kashiwa", + "katori", + "katsuura", + "kimitsu", + "kisarazu", + "kozaki", + "kujukuri", + "kyonan", + "matsudo", + "midori", + "mihama", + "minamiboso", + "mobara", + "mutsuzawa", + "nagara", + "nagareyama", + "narashino", + "narita", + "noda", + "oamishirasato", + "omigawa", + "onjuku", + "otaki", + "sakae", + "sakura", + "shimofusa", + "shirako", + "shiroi", + "shisui", + "sodegaura", + "sosa", + "tako", + "tateyama", + "togane", + "tohnosho", + "tomisato", + "urayasu", + "yachimata", + "yachiyo", + "yokaichiba", + "yokoshibahikari", + "yotsukaido", + "ainan", + "honai", + "ikata", + "imabari", + "iyo", + "kamijima", + "kihoku", + "kumakogen", + "masaki", + "matsuno", + "matsuyama", + "namikata", + "niihama", + "ozu", + "saijo", + "seiyo", + "shikokuchuo", + "tobe", + "toon", + "uchiko", + "uwajima", + "yawatahama", + "echizen", + "eiheiji", + "fukui", + "ikeda", + "katsuyama", + "mihama", + "minamiechizen", + "obama", + "ohi", + "ono", + "sabae", + "sakai", + "takahama", + "tsuruga", + "wakasa", + "ashiya", + "buzen", + "chikugo", + "chikuho", + "chikujo", + "chikushino", + "chikuzen", + "chuo", + "dazaifu", + "fukuchi", + "hakata", + "higashi", + "hirokawa", + "hisayama", + "iizuka", + "inatsuki", + "kaho", + "kasuga", + "kasuya", + "kawara", + "keisen", + "koga", + "kurate", + "kurogi", + "kurume", + "minami", + "miyako", + "miyama", + "miyawaka", + "mizumaki", + "munakata", + "nakagawa", + "nakama", + "nishi", + "nogata", + "ogori", + "okagaki", + "okawa", + "oki", + "omuta", + "onga", + "onojo", + "oto", + "saigawa", + "sasaguri", + "shingu", + "shinyoshitomi", + "shonai", + "soeda", + "sue", + "tachiarai", + "tagawa", + "takata", + "toho", + "toyotsu", + "tsuiki", + "ukiha", + "umi", + "usui", + "yamada", + "yame", + "yanagawa", + "yukuhashi", + "aizubange", + "aizumisato", + "aizuwakamatsu", + "asakawa", + "bandai", + "date", + "fukushima", + "furudono", + "futaba", + "hanawa", + "higashi", + "hirata", + "hirono", + "iitate", + "inawashiro", + "ishikawa", + "iwaki", + "izumizaki", + "kagamiishi", + "kaneyama", + "kawamata", + "kitakata", + "kitashiobara", + "koori", + "koriyama", + "kunimi", + "miharu", + "mishima", + "namie", + "nango", + "nishiaizu", + "nishigo", + "okuma", + "omotego", + "ono", + "otama", + "samegawa", + "shimogo", + "shirakawa", + "showa", + "soma", + "sukagawa", + "taishin", + "tamakawa", + "tanagura", + "tenei", + "yabuki", + "yamato", + "yamatsuri", + "yanaizu", + "yugawa", + "anpachi", + "ena", + "gifu", + "ginan", + "godo", + "gujo", + "hashima", + "hichiso", + "hida", + "higashishirakawa", + "ibigawa", + "ikeda", + "kakamigahara", + "kani", + "kasahara", + "kasamatsu", + "kawaue", + "kitagata", + "mino", + "minokamo", + "mitake", + "mizunami", + "motosu", + "nakatsugawa", + "ogaki", + "sakahogi", + "seki", + "sekigahara", + "shirakawa", + "tajimi", + "takayama", + "tarui", + "toki", + "tomika", + "wanouchi", + "yamagata", + "yaotsu", + "yoro", + "annaka", + "chiyoda", + "fujioka", + "higashiagatsuma", + "isesaki", + "itakura", + "kanna", + "kanra", + "katashina", + "kawaba", + "kiryu", + "kusatsu", + "maebashi", + "meiwa", + "midori", + "minakami", + "naganohara", + "nakanojo", + "nanmoku", + "numata", + "oizumi", + "ora", + "ota", + "shibukawa", + "shimonita", + "shinto", + "showa", + "takasaki", + "takayama", + "tamamura", + "tatebayashi", + "tomioka", + "tsukiyono", + "tsumagoi", + "ueno", + "yoshioka", + "asaminami", + "daiwa", + "etajima", + "fuchu", + "fukuyama", + "hatsukaichi", + "higashihiroshima", + "hongo", + "jinsekikogen", + "kaita", + "kui", + "kumano", + "kure", + "mihara", + "miyoshi", + "naka", + "onomichi", + "osakikamijima", + "otake", + "saka", + "sera", + "seranishi", + "shinichi", + "shobara", + "takehara", + "abashiri", + "abira", + "aibetsu", + "akabira", + "akkeshi", + "asahikawa", + "ashibetsu", + "ashoro", + "assabu", + "atsuma", + "bibai", + "biei", + "bifuka", + "bihoro", + "biratori", + "chippubetsu", + "chitose", + "date", + "ebetsu", + "embetsu", + "eniwa", + "erimo", + "esan", + "esashi", + "fukagawa", + "fukushima", + "furano", + "furubira", + "haboro", + "hakodate", + "hamatonbetsu", + "hidaka", + "higashikagura", + "higashikawa", + "hiroo", + "hokuryu", + "hokuto", + "honbetsu", + "horokanai", + "horonobe", + "ikeda", + "imakane", + "ishikari", + "iwamizawa", + "iwanai", + "kamifurano", + "kamikawa", + "kamishihoro", + "kamisunagawa", + "kamoenai", + "kayabe", + "kembuchi", + "kikonai", + "kimobetsu", + "kitahiroshima", + "kitami", + "kiyosato", + "koshimizu", + "kunneppu", + "kuriyama", + "kuromatsunai", + "kushiro", + "kutchan", + "kyowa", + "mashike", + "matsumae", + "mikasa", + "minamifurano", + "mombetsu", + "moseushi", + "mukawa", + "muroran", + "naie", + "nakagawa", + "nakasatsunai", + "nakatombetsu", + "nanae", + "nanporo", + "nayoro", + "nemuro", + "niikappu", + "niki", + "nishiokoppe", + "noboribetsu", + "numata", + "obihiro", + "obira", + "oketo", + "okoppe", + "otaru", + "otobe", + "otofuke", + "otoineppu", + "oumu", + "ozora", + "pippu", + "rankoshi", + "rebun", + "rikubetsu", + "rishiri", + "rishirifuji", + "saroma", + "sarufutsu", + "shakotan", + "shari", + "shibecha", + "shibetsu", + "shikabe", + "shikaoi", + "shimamaki", + "shimizu", + "shimokawa", + "shinshinotsu", + "shintoku", + "shiranuka", + "shiraoi", + "shiriuchi", + "sobetsu", + "sunagawa", + "taiki", + "takasu", + "takikawa", + "takinoue", + "teshikaga", + "tobetsu", + "tohma", + "tomakomai", + "tomari", + "toya", + "toyako", + "toyotomi", + "toyoura", + "tsubetsu", + "tsukigata", + "urakawa", + "urausu", + "uryu", + "utashinai", + "wakkanai", + "wassamu", + "yakumo", + "yoichi", + "aioi", + "akashi", + "ako", + "amagasaki", + "aogaki", + "asago", + "ashiya", + "awaji", + "fukusaki", + "goshiki", + "harima", + "himeji", + "ichikawa", + "inagawa", + "itami", + "kakogawa", + "kamigori", + "kamikawa", + "kasai", + "kasuga", + "kawanishi", + "miki", + "minamiawaji", + "nishinomiya", + "nishiwaki", + "ono", + "sanda", + "sannan", + "sasayama", + "sayo", + "shingu", + "shinonsen", + "shiso", + "sumoto", + "taishi", + "taka", + "takarazuka", + "takasago", + "takino", + "tamba", + "tatsuno", + "toyooka", + "yabu", + "yashiro", + "yoka", + "yokawa", + "ami", + "asahi", + "bando", + "chikusei", + "daigo", + "fujishiro", + "hitachi", + "hitachinaka", + "hitachiomiya", + "hitachiota", + "ibaraki", + "ina", + "inashiki", + "itako", + "iwama", + "joso", + "kamisu", + "kasama", + "kashima", + "kasumigaura", + "koga", + "miho", + "mito", + "moriya", + "naka", + "namegata", + "oarai", + "ogawa", + "omitama", + "ryugasaki", + "sakai", + "sakuragawa", + "shimodate", + "shimotsuma", + "shirosato", + "sowa", + "suifu", + "takahagi", + "tamatsukuri", + "tokai", + "tomobe", + "tone", + "toride", + "tsuchiura", + "tsukuba", + "uchihara", + "ushiku", + "yachiyo", + "yamagata", + "yawara", + "yuki", + "anamizu", + "hakui", + "hakusan", + "kaga", + "kahoku", + "kanazawa", + "kawakita", + "komatsu", + "nakanoto", + "nanao", + "nomi", + "nonoichi", + "noto", + "shika", + "suzu", + "tsubata", + "tsurugi", + "uchinada", + "wajima", + "fudai", + "fujisawa", + "hanamaki", + "hiraizumi", + "hirono", + "ichinohe", + "ichinoseki", + "iwaizumi", + "iwate", + "joboji", + "kamaishi", + "kanegasaki", + "karumai", + "kawai", + "kitakami", + "kuji", + "kunohe", + "kuzumaki", + "miyako", + "mizusawa", + "morioka", + "ninohe", + "noda", + "ofunato", + "oshu", + "otsuchi", + "rikuzentakata", + "shiwa", + "shizukuishi", + "sumita", + "tanohata", + "tono", + "yahaba", + "yamada", + "ayagawa", + "higashikagawa", + "kanonji", + "kotohira", + "manno", + "marugame", + "mitoyo", + "naoshima", + "sanuki", + "tadotsu", + "takamatsu", + "tonosho", + "uchinomi", + "utazu", + "zentsuji", + "akune", + "amami", + "hioki", + "isa", + "isen", + "izumi", + "kagoshima", + "kanoya", + "kawanabe", + "kinko", + "kouyama", + "makurazaki", + "matsumoto", + "minamitane", + "nakatane", + "nishinoomote", + "satsumasendai", + "soo", + "tarumizu", + "yusui", + "aikawa", + "atsugi", + "ayase", + "chigasaki", + "ebina", + "fujisawa", + "hadano", + "hakone", + "hiratsuka", + "isehara", + "kaisei", + "kamakura", + "kiyokawa", + "matsuda", + "minamiashigara", + "miura", + "nakai", + "ninomiya", + "odawara", + "oi", + "oiso", + "sagamihara", + "samukawa", + "tsukui", + "yamakita", + "yamato", + "yokosuka", + "yugawara", + "zama", + "zushi", + "city", + "city", + "city", + "aki", + "geisei", + "hidaka", + "higashitsuno", + "ino", + "kagami", + "kami", + "kitagawa", + "kochi", + "mihara", + "motoyama", + "muroto", + "nahari", + "nakamura", + "nankoku", + "nishitosa", + "niyodogawa", + "ochi", + "okawa", + "otoyo", + "otsuki", + "sakawa", + "sukumo", + "susaki", + "tosa", + "tosashimizu", + "toyo", + "tsuno", + "umaji", + "yasuda", + "yusuhara", + "amakusa", + "arao", + "aso", + "choyo", + "gyokuto", + "kamiamakusa", + "kikuchi", + "kumamoto", + "mashiki", + "mifune", + "minamata", + "minamioguni", + "nagasu", + "nishihara", + "oguni", + "ozu", + "sumoto", + "takamori", + "uki", + "uto", + "yamaga", + "yamato", + "yatsushiro", + "ayabe", + "fukuchiyama", + "higashiyama", + "ide", + "ine", + "joyo", + "kameoka", + "kamo", + "kita", + "kizu", + "kumiyama", + "kyotamba", + "kyotanabe", + "kyotango", + "maizuru", + "minami", + "minamiyamashiro", + "miyazu", + "muko", + "nagaokakyo", + "nakagyo", + "nantan", + "oyamazaki", + "sakyo", + "seika", + "tanabe", + "uji", + "ujitawara", + "wazuka", + "yamashina", + "yawata", + "asahi", + "inabe", + "ise", + "kameyama", + "kawagoe", + "kiho", + "kisosaki", + "kiwa", + "komono", + "kumano", + "kuwana", + "matsusaka", + "meiwa", + "mihama", + "minamiise", + "misugi", + "miyama", + "nabari", + "shima", + "suzuka", + "tado", + "taiki", + "taki", + "tamaki", + "toba", + "tsu", + "udono", + "ureshino", + "watarai", + "yokkaichi", + "furukawa", + "higashimatsushima", + "ishinomaki", + "iwanuma", + "kakuda", + "kami", + "kawasaki", + "marumori", + "matsushima", + "minamisanriku", + "misato", + "murata", + "natori", + "ogawara", + "ohira", + "onagawa", + "osaki", + "rifu", + "semine", + "shibata", + "shichikashuku", + "shikama", + "shiogama", + "shiroishi", + "tagajo", + "taiwa", + "tome", + "tomiya", + "wakuya", + "watari", + "yamamoto", + "zao", + "aya", + "ebino", + "gokase", + "hyuga", + "kadogawa", + "kawaminami", + "kijo", + "kitagawa", + "kitakata", + "kitaura", + "kobayashi", + "kunitomi", + "kushima", + "mimata", + "miyakonojo", + "miyazaki", + "morotsuka", + "nichinan", + "nishimera", + "nobeoka", + "saito", + "shiiba", + "shintomi", + "takaharu", + "takanabe", + "takazaki", + "tsuno", + "achi", + "agematsu", + "anan", + "aoki", + "asahi", + "azumino", + "chikuhoku", + "chikuma", + "chino", + "fujimi", + "hakuba", + "hara", + "hiraya", + "iida", + "iijima", + "iiyama", + "iizuna", + "ikeda", + "ikusaka", + "ina", + "karuizawa", + "kawakami", + "kiso", + "kisofukushima", + "kitaaiki", + "komagane", + "komoro", + "matsukawa", + "matsumoto", + "miasa", + "minamiaiki", + "minamimaki", + "minamiminowa", + "minowa", + "miyada", + "miyota", + "mochizuki", + "nagano", + "nagawa", + "nagiso", + "nakagawa", + "nakano", + "nozawaonsen", + "obuse", + "ogawa", + "okaya", + "omachi", + "omi", + "ookuwa", + "ooshika", + "otaki", + "otari", + "sakae", + "sakaki", + "saku", + "sakuho", + "shimosuwa", + "shinanomachi", + "shiojiri", + "suwa", + "suzaka", + "takagi", + "takamori", + "takayama", + "tateshina", + "tatsuno", + "togakushi", + "togura", + "tomi", + "ueda", + "wada", + "yamagata", + "yamanouchi", + "yasaka", + "yasuoka", + "chijiwa", + "futsu", + "goto", + "hasami", + "hirado", + "iki", + "isahaya", + "kawatana", + "kuchinotsu", + "matsuura", + "nagasaki", + "obama", + "omura", + "oseto", + "saikai", + "sasebo", + "seihi", + "shimabara", + "shinkamigoto", + "togitsu", + "tsushima", + "unzen", + "city", + "ando", + "gose", + "heguri", + "higashiyoshino", + "ikaruga", + "ikoma", + "kamikitayama", + "kanmaki", + "kashiba", + "kashihara", + "katsuragi", + "kawai", + "kawakami", + "kawanishi", + "koryo", + "kurotaki", + "mitsue", + "miyake", + "nara", + "nosegawa", + "oji", + "ouda", + "oyodo", + "sakurai", + "sango", + "shimoichi", + "shimokitayama", + "shinjo", + "soni", + "takatori", + "tawaramoto", + "tenkawa", + "tenri", + "uda", + "yamatokoriyama", + "yamatotakada", + "yamazoe", + "yoshino", + "aga", + "agano", + "gosen", + "itoigawa", + "izumozaki", + "joetsu", + "kamo", + "kariwa", + "kashiwazaki", + "minamiuonuma", + "mitsuke", + "muika", + "murakami", + "myoko", + "nagaoka", + "niigata", + "ojiya", + "omi", + "sado", + "sanjo", + "seiro", + "seirou", + "sekikawa", + "shibata", + "tagami", + "tainai", + "tochio", + "tokamachi", + "tsubame", + "tsunan", + "uonuma", + "yahiko", + "yoita", + "yuzawa", + "beppu", + "bungoono", + "bungotakada", + "hasama", + "hiji", + "himeshima", + "hita", + "kamitsue", + "kokonoe", + "kuju", + "kunisaki", + "kusu", + "oita", + "saiki", + "taketa", + "tsukumi", + "usa", + "usuki", + "yufu", + "akaiwa", + "asakuchi", + "bizen", + "hayashima", + "ibara", + "kagamino", + "kasaoka", + "kibichuo", + "kumenan", + "kurashiki", + "maniwa", + "misaki", + "nagi", + "niimi", + "nishiawakura", + "okayama", + "satosho", + "setouchi", + "shinjo", + "shoo", + "soja", + "takahashi", + "tamano", + "tsuyama", + "wake", + "yakage", + "aguni", + "ginowan", + "ginoza", + "gushikami", + "haebaru", + "higashi", + "hirara", + "iheya", + "ishigaki", + "ishikawa", + "itoman", + "izena", + "kadena", + "kin", + "kitadaito", + "kitanakagusuku", + "kumejima", + "kunigami", + "minamidaito", + "motobu", + "nago", + "naha", + "nakagusuku", + "nakijin", + "nanjo", + "nishihara", + "ogimi", + "okinawa", + "onna", + "shimoji", + "taketomi", + "tarama", + "tokashiki", + "tomigusuku", + "tonaki", + "urasoe", + "uruma", + "yaese", + "yomitan", + "yonabaru", + "yonaguni", + "zamami", + "abeno", + "chihayaakasaka", + "chuo", + "daito", + "fujiidera", + "habikino", + "hannan", + "higashiosaka", + "higashisumiyoshi", + "higashiyodogawa", + "hirakata", + "ibaraki", + "ikeda", + "izumi", + "izumiotsu", + "izumisano", + "kadoma", + "kaizuka", + "kanan", + "kashiwara", + "katano", + "kawachinagano", + "kishiwada", + "kita", + "kumatori", + "matsubara", + "minato", + "minoh", + "misaki", + "moriguchi", + "neyagawa", + "nishi", + "nose", + "osakasayama", + "sakai", + "sayama", + "sennan", + "settsu", + "shijonawate", + "shimamoto", + "suita", + "tadaoka", + "taishi", + "tajiri", + "takaishi", + "takatsuki", + "tondabayashi", + "toyonaka", + "toyono", + "yao", + "ariake", + "arita", + "fukudomi", + "genkai", + "hamatama", + "hizen", + "imari", + "kamimine", + "kanzaki", + "karatsu", + "kashima", + "kitagata", + "kitahata", + "kiyama", + "kouhoku", + "kyuragi", + "nishiarita", + "ogi", + "omachi", + "ouchi", + "saga", + "shiroishi", + "taku", + "tara", + "tosu", + "yoshinogari", + "arakawa", + "asaka", + "chichibu", + "fujimi", + "fujimino", + "fukaya", + "hanno", + "hanyu", + "hasuda", + "hatogaya", + "hatoyama", + "hidaka", + "higashichichibu", + "higashimatsuyama", + "honjo", + "ina", + "iruma", + "iwatsuki", + "kamiizumi", + "kamikawa", + "kamisato", + "kasukabe", + "kawagoe", + "kawaguchi", + "kawajima", + "kazo", + "kitamoto", + "koshigaya", + "kounosu", + "kuki", + "kumagaya", + "matsubushi", + "minano", + "misato", + "miyashiro", + "miyoshi", + "moroyama", + "nagatoro", + "namegawa", + "niiza", + "ogano", + "ogawa", + "ogose", + "okegawa", + "omiya", + "otaki", + "ranzan", + "ryokami", + "saitama", + "sakado", + "satte", + "sayama", + "shiki", + "shiraoka", + "soka", + "sugito", + "toda", + "tokigawa", + "tokorozawa", + "tsurugashima", + "urawa", + "warabi", + "yashio", + "yokoze", + "yono", + "yorii", + "yoshida", + "yoshikawa", + "yoshimi", + "city", + "city", + "aisho", + "gamo", + "higashiomi", + "hikone", + "koka", + "konan", + "kosei", + "koto", + "kusatsu", + "maibara", + "moriyama", + "nagahama", + "nishiazai", + "notogawa", + "omihachiman", + "otsu", + "ritto", + "ryuoh", + "takashima", + "takatsuki", + "torahime", + "toyosato", + "yasu", + "akagi", + "ama", + "gotsu", + "hamada", + "higashiizumo", + "hikawa", + "hikimi", + "izumo", + "kakinoki", + "masuda", + "matsue", + "misato", + "nishinoshima", + "ohda", + "okinoshima", + "okuizumo", + "shimane", + "tamayu", + "tsuwano", + "unnan", + "yakumo", + "yasugi", + "yatsuka", + "arai", + "atami", + "fuji", + "fujieda", + "fujikawa", + "fujinomiya", + "fukuroi", + "gotemba", + "haibara", + "hamamatsu", + "higashiizu", + "ito", + "iwata", + "izu", + "izunokuni", + "kakegawa", + "kannami", + "kawanehon", + "kawazu", + "kikugawa", + "kosai", + "makinohara", + "matsuzaki", + "minamiizu", + "mishima", + "morimachi", + "nishiizu", + "numazu", + "omaezaki", + "shimada", + "shimizu", + "shimoda", + "shizuoka", + "susono", + "yaizu", + "yoshida", + "ashikaga", + "bato", + "haga", + "ichikai", + "iwafune", + "kaminokawa", + "kanuma", + "karasuyama", + "kuroiso", + "mashiko", + "mibu", + "moka", + "motegi", + "nasu", + "nasushiobara", + "nikko", + "nishikata", + "nogi", + "ohira", + "ohtawara", + "oyama", + "sakura", + "sano", + "shimotsuke", + "shioya", + "takanezawa", + "tochigi", + "tsuga", + "ujiie", + "utsunomiya", + "yaita", + "aizumi", + "anan", + "ichiba", + "itano", + "kainan", + "komatsushima", + "matsushige", + "mima", + "minami", + "miyoshi", + "mugi", + "nakagawa", + "naruto", + "sanagochi", + "shishikui", + "tokushima", + "wajiki", + "adachi", + "akiruno", + "akishima", + "aogashima", + "arakawa", + "bunkyo", + "chiyoda", + "chofu", + "chuo", + "edogawa", + "fuchu", + "fussa", + "hachijo", + "hachioji", + "hamura", + "higashikurume", + "higashimurayama", + "higashiyamato", + "hino", + "hinode", + "hinohara", + "inagi", + "itabashi", + "katsushika", + "kita", + "kiyose", + "kodaira", + "koganei", + "kokubunji", + "komae", + "koto", + "kouzushima", + "kunitachi", + "machida", + "meguro", + "minato", + "mitaka", + "mizuho", + "musashimurayama", + "musashino", + "nakano", + "nerima", + "ogasawara", + "okutama", + "ome", + "oshima", + "ota", + "setagaya", + "shibuya", + "shinagawa", + "shinjuku", + "suginami", + "sumida", + "tachikawa", + "taito", + "tama", + "toshima", + "chizu", + "hino", + "kawahara", + "koge", + "kotoura", + "misasa", + "nanbu", + "nichinan", + "sakaiminato", + "tottori", + "wakasa", + "yazu", + "yonago", + "asahi", + "fuchu", + "fukumitsu", + "funahashi", + "himi", + "imizu", + "inami", + "johana", + "kamiichi", + "kurobe", + "nakaniikawa", + "namerikawa", + "nanto", + "nyuzen", + "oyabe", + "taira", + "takaoka", + "tateyama", + "toga", + "tonami", + "toyama", + "unazuki", + "uozu", + "yamada", + "arida", + "aridagawa", + "gobo", + "hashimoto", + "hidaka", + "hirogawa", + "inami", + "iwade", + "kainan", + "kamitonda", + "katsuragi", + "kimino", + "kinokawa", + "kitayama", + "koya", + "koza", + "kozagawa", + "kudoyama", + "kushimoto", + "mihama", + "misato", + "nachikatsuura", + "shingu", + "shirahama", + "taiji", + "tanabe", + "wakayama", + "yuasa", + "yura", + "asahi", + "funagata", + "higashine", + "iide", + "kahoku", + "kaminoyama", + "kaneyama", + "kawanishi", + "mamurogawa", + "mikawa", + "murayama", + "nagai", + "nakayama", + "nanyo", + "nishikawa", + "obanazawa", + "oe", + "oguni", + "ohkura", + "oishida", + "sagae", + "sakata", + "sakegawa", + "shinjo", + "shirataka", + "shonai", + "takahata", + "tendo", + "tozawa", + "tsuruoka", + "yamagata", + "yamanobe", + "yonezawa", + "yuza", + "abu", + "hagi", + "hikari", + "hofu", + "iwakuni", + "kudamatsu", + "mitou", + "nagato", + "oshima", + "shimonoseki", + "shunan", + "tabuse", + "tokuyama", + "toyota", + "ube", + "yuu", + "chuo", + "doshi", + "fuefuki", + "fujikawa", + "fujikawaguchiko", + "fujiyoshida", + "hayakawa", + "hokuto", + "ichikawamisato", + "kai", + "kofu", + "koshu", + "kosuge", + "minami-alps", + "minobu", + "nakamichi", + "nanbu", + "narusawa", + "nirasaki", + "nishikatsura", + "oshino", + "otsuki", + "showa", + "tabayama", + "tsuru", + "uenohara", + "yamanakako", + "yamanashi", + "city", + "ac", + "co", + "go", + "info", + "me", + "mobi", + "ne", + "or", + "sc", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "ass", + "asso", + "com", + "coop", + "edu", + "gouv", + "gov", + "medecin", + "mil", + "nom", + "notaires", + "org", + "pharmaciens", + "prd", + "presse", + "tm", + "veterinaire", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "org", + "rep", + "tra", + "ac", + "blogspot", + "busan", + "chungbuk", + "chungnam", + "co", + "daegu", + "daejeon", + "es", + "gangwon", + "go", + "gwangju", + "gyeongbuk", + "gyeonggi", + "gyeongnam", + "hs", + "incheon", + "jeju", + "jeonbuk", + "jeonnam", + "kg", + "mil", + "ms", + "ne", + "or", + "pe", + "re", + "sc", + "seoul", + "ulsan", + "co", + "edu", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nym", + "org", + "bnr", + "c", + "com", + "edu", + "gov", + "info", + "int", + "net", + "nym", + "org", + "per", + "static", + "dev", + "sites", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "oy", + "blogspot", + "nom", + "nym", + "cyon", + "mypep", + "ac", + "assn", + "com", + "edu", + "gov", + "grp", + "hotel", + "int", + "ltd", + "net", + "ngo", + "org", + "sch", + "soc", + "web", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "blogspot", + "gov", + "nym", + "blogspot", + "nym", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "mil", + "net", + "org", + "com", + "edu", + "gov", + "id", + "med", + "net", + "org", + "plc", + "sch", + "ac", + "co", + "gov", + "net", + "org", + "press", + "router", + "asso", + "tm", + "blogspot", + "ac", + "brasilia", + "c66", + "co", + "daplie", + "ddns", + "diskstation", + "dnsfor", + "dscloud", + "edu", + "filegear", + "gov", + "hopto", + "i234", + "its", + "loginto", + "myds", + "net", + "noip", + "nym", + "org", + "priv", + "synology", + "webhop", + "wedeploy", + "yombo", + "localhost", + "co", + "com", + "edu", + "gov", + "mil", + "nom", + "org", + "prd", + "tm", + "blogspot", + "com", + "edu", + "gov", + "inf", + "name", + "net", + "nom", + "org", + "com", + "edu", + "gouv", + "gov", + "net", + "org", + "presse", + "edu", + "gov", + "nyc", + "org", + "com", + "edu", + "gov", + "net", + "org", + "dscloud", + "blogspot", + "gov", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "gov", + "net", + "or", + "org", + "academy", + "agriculture", + "air", + "airguard", + "alabama", + "alaska", + "amber", + "ambulance", + "american", + "americana", + "americanantiques", + "americanart", + "amsterdam", + "and", + "annefrank", + "anthro", + "anthropology", + "antiques", + "aquarium", + "arboretum", + "archaeological", + "archaeology", + "architecture", + "art", + "artanddesign", + "artcenter", + "artdeco", + "arteducation", + "artgallery", + "arts", + "artsandcrafts", + "asmatart", + "assassination", + "assisi", + "association", + "astronomy", + "atlanta", + "austin", + "australia", + "automotive", + "aviation", + "axis", + "badajoz", + "baghdad", + "bahn", + "bale", + "baltimore", + "barcelona", + "baseball", + "basel", + "baths", + "bauern", + "beauxarts", + "beeldengeluid", + "bellevue", + "bergbau", + "berkeley", + "berlin", + "bern", + "bible", + "bilbao", + "bill", + "birdart", + "birthplace", + "bonn", + "boston", + "botanical", + "botanicalgarden", + "botanicgarden", + "botany", + "brandywinevalley", + "brasil", + "bristol", + "british", + "britishcolumbia", + "broadcast", + "brunel", + "brussel", + "brussels", + "bruxelles", + "building", + "burghof", + "bus", + "bushey", + "cadaques", + "california", + "cambridge", + "can", + "canada", + "capebreton", + "carrier", + "cartoonart", + "casadelamoneda", + "castle", + "castres", + "celtic", + "center", + "chattanooga", + "cheltenham", + "chesapeakebay", + "chicago", + "children", + "childrens", + "childrensgarden", + "chiropractic", + "chocolate", + "christiansburg", + "cincinnati", + "cinema", + "circus", + "civilisation", + "civilization", + "civilwar", + "clinton", + "clock", + "coal", + "coastaldefence", + "cody", + "coldwar", + "collection", + "colonialwilliamsburg", + "coloradoplateau", + "columbia", + "columbus", + "communication", + "communications", + "community", + "computer", + "computerhistory", + "contemporary", + "contemporaryart", + "convent", + "copenhagen", + "corporation", + "corvette", + "costume", + "countryestate", + "county", + "crafts", + "cranbrook", + "creation", + "cultural", + "culturalcenter", + "culture", + "cyber", + "cymru", + "dali", + "dallas", + "database", + "ddr", + "decorativearts", + "delaware", + "delmenhorst", + "denmark", + "depot", + "design", + "detroit", + "dinosaur", + "discovery", + "dolls", + "donostia", + "durham", + "eastafrica", + "eastcoast", + "education", + "educational", + "egyptian", + "eisenbahn", + "elburg", + "elvendrell", + "embroidery", + "encyclopedic", + "england", + "entomology", + "environment", + "environmentalconservation", + "epilepsy", + "essex", + "estate", + "ethnology", + "exeter", + "exhibition", + "family", + "farm", + "farmequipment", + "farmers", + "farmstead", + "field", + "figueres", + "filatelia", + "film", + "fineart", + "finearts", + "finland", + "flanders", + "florida", + "force", + "fortmissoula", + "fortworth", + "foundation", + "francaise", + "frankfurt", + "franziskaner", + "freemasonry", + "freiburg", + "fribourg", + "frog", + "fundacio", + "furniture", + "gallery", + "garden", + "gateway", + "geelvinck", + "gemological", + "geology", + "georgia", + "giessen", + "glas", + "glass", + "gorge", + "grandrapids", + "graz", + "guernsey", + "halloffame", + "hamburg", + "handson", + "harvestcelebration", + "hawaii", + "health", + "heimatunduhren", + "hellas", + "helsinki", + "hembygdsforbund", + "heritage", + "histoire", + "historical", + "historicalsociety", + "historichouses", + "historisch", + "historisches", + "history", + "historyofscience", + "horology", + "house", + "humanities", + "illustration", + "imageandsound", + "indian", + "indiana", + "indianapolis", + "indianmarket", + "intelligence", + "interactive", + "iraq", + "iron", + "isleofman", + "jamison", + "jefferson", + "jerusalem", + "jewelry", + "jewish", + "jewishart", + "jfk", + "journalism", + "judaica", + "judygarland", + "juedisches", + "juif", + "karate", + "karikatur", + "kids", + "koebenhavn", + "koeln", + "kunst", + "kunstsammlung", + "kunstunddesign", + "labor", + "labour", + "lajolla", + "lancashire", + "landes", + "lans", + "larsson", + "lewismiller", + "lincoln", + "linz", + "living", + "livinghistory", + "localhistory", + "london", + "losangeles", + "louvre", + "loyalist", + "lucerne", + "luxembourg", + "luzern", + "mad", + "madrid", + "mallorca", + "manchester", + "mansion", + "mansions", + "manx", + "marburg", + "maritime", + "maritimo", + "maryland", + "marylhurst", + "media", + "medical", + "medizinhistorisches", + "meeres", + "memorial", + "mesaverde", + "michigan", + "midatlantic", + "military", + "mill", + "miners", + "mining", + "minnesota", + "missile", + "missoula", + "modern", + "moma", + "money", + "monmouth", + "monticello", + "montreal", + "moscow", + "motorcycle", + "muenchen", + "muenster", + "mulhouse", + "muncie", + "museet", + "museumcenter", + "museumvereniging", + "music", + "national", + "nationalfirearms", + "nationalheritage", + "nativeamerican", + "naturalhistory", + "naturalhistorymuseum", + "naturalsciences", + "nature", + "naturhistorisches", + "natuurwetenschappen", + "naumburg", + "naval", + "nebraska", + "neues", + "newhampshire", + "newjersey", + "newmexico", + "newport", + "newspaper", + "newyork", + "niepce", + "norfolk", + "north", + "nrw", + "nuernberg", + "nuremberg", + "nyc", + "nyny", + "oceanographic", + "oceanographique", + "omaha", + "online", + "ontario", + "openair", + "oregon", + "oregontrail", + "otago", + "oxford", + "pacific", + "paderborn", + "palace", + "paleo", + "palmsprings", + "panama", + "paris", + "pasadena", + "pharmacy", + "philadelphia", + "philadelphiaarea", + "philately", + "phoenix", + "photography", + "pilots", + "pittsburgh", + "planetarium", + "plantation", + "plants", + "plaza", + "portal", + "portland", + "portlligat", + "posts-and-telecommunications", + "preservation", + "presidio", + "press", + "project", + "public", + "pubol", + "quebec", + "railroad", + "railway", + "research", + "resistance", + "riodejaneiro", + "rochester", + "rockart", + "roma", + "russia", + "saintlouis", + "salem", + "salvadordali", + "salzburg", + "sandiego", + "sanfrancisco", + "santabarbara", + "santacruz", + "santafe", + "saskatchewan", + "satx", + "savannahga", + "schlesisches", + "schoenbrunn", + "schokoladen", + "school", + "schweiz", + "science", + "science-fiction", + "scienceandhistory", + "scienceandindustry", + "sciencecenter", + "sciencecenters", + "sciencehistory", + "sciences", + "sciencesnaturelles", + "scotland", + "seaport", + "settlement", + "settlers", + "shell", + "sherbrooke", + "sibenik", + "silk", + "ski", + "skole", + "society", + "sologne", + "soundandvision", + "southcarolina", + "southwest", + "space", + "spy", + "square", + "stadt", + "stalbans", + "starnberg", + "state", + "stateofdelaware", + "station", + "steam", + "steiermark", + "stjohn", + "stockholm", + "stpetersburg", + "stuttgart", + "suisse", + "surgeonshall", + "surrey", + "svizzera", + "sweden", + "sydney", + "tank", + "tcm", + "technology", + "telekommunikation", + "television", + "texas", + "textile", + "theater", + "time", + "timekeeping", + "topology", + "torino", + "touch", + "town", + "transport", + "tree", + "trolley", + "trust", + "trustee", + "uhren", + "ulm", + "undersea", + "university", + "usa", + "usantiques", + "usarts", + "uscountryestate", + "usculture", + "usdecorativearts", + "usgarden", + "ushistory", + "ushuaia", + "uslivinghistory", + "utah", + "uvic", + "valley", + "vantaa", + "versailles", + "viking", + "village", + "virginia", + "virtual", + "virtuel", + "vlaanderen", + "volkenkunde", + "wales", + "wallonie", + "war", + "washingtondc", + "watch-and-clock", + "watchandclock", + "western", + "westfalen", + "whaling", + "wildlife", + "williamsburg", + "windmill", + "workshop", + "xn--9dbhblg6di", + "xn--comunicaes-v6a2o", + "xn--correios-e-telecomunicaes-ghc29a", + "xn--h1aegh", + "xn--lns-qla", + "york", + "yorkshire", + "yosemite", + "youth", + "zoological", + "zoology", + "aero", + "biz", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "mil", + "museum", + "name", + "net", + "org", + "pro", + "ac", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "int", + "museum", + "net", + "org", + "blogspot", + "com", + "edu", + "gob", + "net", + "nym", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "ac", + "adv", + "co", + "edu", + "gov", + "mil", + "net", + "org", + "ca", + "cc", + "co", + "com", + "dr", + "in", + "info", + "mobi", + "mx", + "name", + "or", + "org", + "pro", + "school", + "tv", + "us", + "ws", + "her", + "his", + "forgot", + "forgot", + "asso", + "nom", + "alwaysdata", + "at-band-camp", + "azure-mobile", + "azurewebsites", + "barsy", + "blogdns", + "boomla", + "bounceme", + "bplaced", + "broke-it", + "buyshouses", + "casacam", + "cdn77", + "cdn77-ssl", + "channelsdvr", + "cloudaccess", + "cloudapp", + "cloudfront", + "cloudfunctions", + "cryptonomic", + "ddns", + "debian", + "definima", + "dnsalias", + "dnsdojo", + "does-it", + "dontexist", + "dsmynas", + "dynalias", + "dynathome", + "dynu", + "dynv6", + "eating-organic", + "endofinternet", + "familyds", + "fastly", + "fastlylb", + "feste-ip", + "firewall-gateway", + "flynnhosting", + "from-az", + "from-co", + "from-la", + "from-ny", + "gb", + "gets-it", + "ham-radio-op", + "homeftp", + "homeip", + "homelinux", + "homeunix", + "hu", + "in", + "in-the-band", + "ipifony", + "is-a-chef", + "is-a-geek", + "isa-geek", + "jp", + "kicks-ass", + "knx-server", + "moonscale", + "mydissent", + "myeffect", + "myfritz", + "mymediapc", + "mypsx", + "mysecuritycamera", + "nhlfan", + "no-ip", + "office-on-the", + "pgafan", + "podzone", + "privatizehealthinsurance", + "rackmaze", + "redirectme", + "ru", + "scrapper-site", + "se", + "selfip", + "sells-it", + "servebbs", + "serveblog", + "serveftp", + "serveminecraft", + "square7", + "static-access", + "sytes", + "t3l3p0rt", + "thruhere", + "twmail", + "uk", + "webhop", + "za", + "r", + "freetls", + "map", + "prod", + "ssl", + "a", + "global", + "a", + "b", + "global", + "map", + "alces", + "arts", + "com", + "firm", + "info", + "net", + "other", + "per", + "rec", + "store", + "web", + "com", + "edu", + "gov", + "i", + "mil", + "mobi", + "name", + "net", + "org", + "sch", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gob", + "in", + "info", + "int", + "mil", + "net", + "nom", + "org", + "web", + "blogspot", + "bv", + "cistron", + "co", + "demon", + "hosting-cluster", + "transurl", + "virtueeldomein", + "aa", + "aarborte", + "aejrie", + "afjord", + "agdenes", + "ah", + "akershus", + "aknoluokta", + "akrehamn", + "al", + "alaheadju", + "alesund", + "algard", + "alstahaug", + "alta", + "alvdal", + "amli", + "amot", + "andasuolo", + "andebu", + "andoy", + "ardal", + "aremark", + "arendal", + "arna", + "aseral", + "asker", + "askim", + "askoy", + "askvoll", + "asnes", + "audnedaln", + "aukra", + "aure", + "aurland", + "aurskog-holand", + "austevoll", + "austrheim", + "averoy", + "badaddja", + "bahcavuotna", + "bahccavuotna", + "baidar", + "bajddar", + "balat", + "balestrand", + "ballangen", + "balsfjord", + "bamble", + "bardu", + "barum", + "batsfjord", + "bearalvahki", + "beardu", + "beiarn", + "berg", + "bergen", + "berlevag", + "bievat", + "bindal", + "birkenes", + "bjarkoy", + "bjerkreim", + "bjugn", + "blogspot", + "bodo", + "bokn", + "bomlo", + "bremanger", + "bronnoy", + "bronnoysund", + "brumunddal", + "bryne", + "bu", + "budejju", + "buskerud", + "bygland", + "bykle", + "cahcesuolo", + "co", + "davvenjarga", + "davvesiida", + "deatnu", + "dep", + "dielddanuorri", + "divtasvuodna", + "divttasvuotna", + "donna", + "dovre", + "drammen", + "drangedal", + "drobak", + "dyroy", + "egersund", + "eid", + "eidfjord", + "eidsberg", + "eidskog", + "eidsvoll", + "eigersund", + "elverum", + "enebakk", + "engerdal", + "etne", + "etnedal", + "evenassi", + "evenes", + "evje-og-hornnes", + "farsund", + "fauske", + "fedje", + "fet", + "fetsund", + "fhs", + "finnoy", + "fitjar", + "fjaler", + "fjell", + "fla", + "flakstad", + "flatanger", + "flekkefjord", + "flesberg", + "flora", + "floro", + "fm", + "folkebibl", + "folldal", + "forde", + "forsand", + "fosnes", + "frana", + "fredrikstad", + "frei", + "frogn", + "froland", + "frosta", + "froya", + "fuoisku", + "fuossko", + "fusa", + "fylkesbibl", + "fyresdal", + "gaivuotna", + "galsa", + "gamvik", + "gangaviika", + "gaular", + "gausdal", + "giehtavuoatna", + "gildeskal", + "giske", + "gjemnes", + "gjerdrum", + "gjerstad", + "gjesdal", + "gjovik", + "gloppen", + "gol", + "gran", + "grane", + "granvin", + "gratangen", + "grimstad", + "grong", + "grue", + "gulen", + "guovdageaidnu", + "ha", + "habmer", + "hadsel", + "hagebostad", + "halden", + "halsa", + "hamar", + "hamaroy", + "hammarfeasta", + "hammerfest", + "hapmir", + "haram", + "hareid", + "harstad", + "hasvik", + "hattfjelldal", + "haugesund", + "hedmark", + "hemne", + "hemnes", + "hemsedal", + "herad", + "hitra", + "hjartdal", + "hjelmeland", + "hl", + "hm", + "hobol", + "hof", + "hokksund", + "hol", + "hole", + "holmestrand", + "holtalen", + "honefoss", + "hordaland", + "hornindal", + "horten", + "hoyanger", + "hoylandet", + "hurdal", + "hurum", + "hvaler", + "hyllestad", + "ibestad", + "idrett", + "inderoy", + "iveland", + "ivgu", + "jan-mayen", + "jessheim", + "jevnaker", + "jolster", + "jondal", + "jorpeland", + "kafjord", + "karasjohka", + "karasjok", + "karlsoy", + "karmoy", + "kautokeino", + "kirkenes", + "klabu", + "klepp", + "kommune", + "kongsberg", + "kongsvinger", + "kopervik", + "kraanghke", + "kragero", + "kristiansand", + "kristiansund", + "krodsherad", + "krokstadelva", + "kvafjord", + "kvalsund", + "kvam", + "kvanangen", + "kvinesdal", + "kvinnherad", + "kviteseid", + "kvitsoy", + "laakesvuemie", + "lahppi", + "langevag", + "lardal", + "larvik", + "lavagis", + "lavangen", + "leangaviika", + "lebesby", + "leikanger", + "leirfjord", + "leirvik", + "leka", + "leksvik", + "lenvik", + "lerdal", + "lesja", + "levanger", + "lier", + "lierne", + "lillehammer", + "lillesand", + "lindas", + "lindesnes", + "loabat", + "lodingen", + "lom", + "loppa", + "lorenskog", + "loten", + "lund", + "lunner", + "luroy", + "luster", + "lyngdal", + "lyngen", + "malatvuopmi", + "malselv", + "malvik", + "mandal", + "marker", + "marnardal", + "masfjorden", + "masoy", + "matta-varjjat", + "meland", + "meldal", + "melhus", + "meloy", + "meraker", + "midsund", + "midtre-gauldal", + "mil", + "mjondalen", + "mo-i-rana", + "moareke", + "modalen", + "modum", + "molde", + "more-og-romsdal", + "mosjoen", + "moskenes", + "moss", + "mosvik", + "mr", + "muosat", + "museum", + "naamesjevuemie", + "namdalseid", + "namsos", + "namsskogan", + "nannestad", + "naroy", + "narviika", + "narvik", + "naustdal", + "navuotna", + "nedre-eiker", + "nesna", + "nesodden", + "nesoddtangen", + "nesseby", + "nesset", + "nissedal", + "nittedal", + "nl", + "nord-aurdal", + "nord-fron", + "nord-odal", + "norddal", + "nordkapp", + "nordland", + "nordre-land", + "nordreisa", + "nore-og-uvdal", + "notodden", + "notteroy", + "nt", + "odda", + "of", + "oksnes", + "ol", + "omasvuotna", + "oppdal", + "oppegard", + "orkanger", + "orkdal", + "orland", + "orskog", + "orsta", + "osen", + "oslo", + "osoyro", + "osteroy", + "ostfold", + "ostre-toten", + "overhalla", + "ovre-eiker", + "oyer", + "oygarden", + "oystre-slidre", + "porsanger", + "porsangu", + "porsgrunn", + "priv", + "rade", + "radoy", + "rahkkeravju", + "raholt", + "raisa", + "rakkestad", + "ralingen", + "rana", + "randaberg", + "rauma", + "rendalen", + "rennebu", + "rennesoy", + "rindal", + "ringebu", + "ringerike", + "ringsaker", + "risor", + "rissa", + "rl", + "roan", + "rodoy", + "rollag", + "romsa", + "romskog", + "roros", + "rost", + "royken", + "royrvik", + "ruovat", + "rygge", + "salangen", + "salat", + "saltdal", + "samnanger", + "sandefjord", + "sandnes", + "sandnessjoen", + "sandoy", + "sarpsborg", + "sauda", + "sauherad", + "sel", + "selbu", + "selje", + "seljord", + "sf", + "siellak", + "sigdal", + "siljan", + "sirdal", + "skanit", + "skanland", + "skaun", + "skedsmo", + "skedsmokorset", + "ski", + "skien", + "skierva", + "skiptvet", + "skjak", + "skjervoy", + "skodje", + "slattum", + "smola", + "snaase", + "snasa", + "snillfjord", + "snoasa", + "sogndal", + "sogne", + "sokndal", + "sola", + "solund", + "somna", + "sondre-land", + "songdalen", + "sor-aurdal", + "sor-fron", + "sor-odal", + "sor-varanger", + "sorfold", + "sorreisa", + "sortland", + "sorum", + "spjelkavik", + "spydeberg", + "st", + "stange", + "stat", + "stathelle", + "stavanger", + "stavern", + "steigen", + "steinkjer", + "stjordal", + "stjordalshalsen", + "stokke", + "stor-elvdal", + "stord", + "stordal", + "storfjord", + "strand", + "stranda", + "stryn", + "sula", + "suldal", + "sund", + "sunndal", + "surnadal", + "svalbard", + "sveio", + "svelvik", + "sykkylven", + "tana", + "tananger", + "telemark", + "time", + "tingvoll", + "tinn", + "tjeldsund", + "tjome", + "tm", + "tokke", + "tolga", + "tonsberg", + "torsken", + "tr", + "trana", + "tranby", + "tranoy", + "troandin", + "trogstad", + "tromsa", + "tromso", + "trondheim", + "trysil", + "tvedestrand", + "tydal", + "tynset", + "tysfjord", + "tysnes", + "tysvar", + "ullensaker", + "ullensvang", + "ulvik", + "unjarga", + "utsira", + "va", + "vaapste", + "vadso", + "vaga", + "vagan", + "vagsoy", + "vaksdal", + "valle", + "vang", + "vanylven", + "vardo", + "varggat", + "varoy", + "vefsn", + "vega", + "vegarshei", + "vennesla", + "verdal", + "verran", + "vestby", + "vestfold", + "vestnes", + "vestre-slidre", + "vestre-toten", + "vestvagoy", + "vevelstad", + "vf", + "vgs", + "vik", + "vikna", + "vindafjord", + "voagat", + "volda", + "voss", + "vossevangen", + "xn--andy-ira", + "xn--asky-ira", + "xn--aurskog-hland-jnb", + "xn--avery-yua", + "xn--bdddj-mrabd", + "xn--bearalvhki-y4a", + "xn--berlevg-jxa", + "xn--bhcavuotna-s4a", + "xn--bhccavuotna-k7a", + "xn--bidr-5nac", + "xn--bievt-0qa", + "xn--bjarky-fya", + "xn--bjddar-pta", + "xn--blt-elab", + "xn--bmlo-gra", + "xn--bod-2na", + "xn--brnny-wuac", + "xn--brnnysund-m8ac", + "xn--brum-voa", + "xn--btsfjord-9za", + "xn--davvenjrga-y4a", + "xn--dnna-gra", + "xn--drbak-wua", + "xn--dyry-ira", + "xn--eveni-0qa01ga", + "xn--finny-yua", + "xn--fjord-lra", + "xn--fl-zia", + "xn--flor-jra", + "xn--frde-gra", + "xn--frna-woa", + "xn--frya-hra", + "xn--ggaviika-8ya47h", + "xn--gildeskl-g0a", + "xn--givuotna-8ya", + "xn--gjvik-wua", + "xn--gls-elac", + "xn--h-2fa", + "xn--hbmer-xqa", + "xn--hcesuolo-7ya35b", + "xn--hgebostad-g3a", + "xn--hmmrfeasta-s4ac", + "xn--hnefoss-q1a", + "xn--hobl-ira", + "xn--holtlen-hxa", + "xn--hpmir-xqa", + "xn--hyanger-q1a", + "xn--hylandet-54a", + "xn--indery-fya", + "xn--jlster-bya", + "xn--jrpeland-54a", + "xn--karmy-yua", + "xn--kfjord-iua", + "xn--klbu-woa", + "xn--koluokta-7ya57h", + "xn--krager-gya", + "xn--kranghke-b0a", + "xn--krdsherad-m8a", + "xn--krehamn-dxa", + "xn--krjohka-hwab49j", + "xn--ksnes-uua", + "xn--kvfjord-nxa", + "xn--kvitsy-fya", + "xn--kvnangen-k0a", + "xn--l-1fa", + "xn--laheadju-7ya", + "xn--langevg-jxa", + "xn--ldingen-q1a", + "xn--leagaviika-52b", + "xn--lesund-hua", + "xn--lgrd-poac", + "xn--lhppi-xqa", + "xn--linds-pra", + "xn--loabt-0qa", + "xn--lrdal-sra", + "xn--lrenskog-54a", + "xn--lt-liac", + "xn--lten-gra", + "xn--lury-ira", + "xn--mely-ira", + "xn--merker-kua", + "xn--mjndalen-64a", + "xn--mlatvuopmi-s4a", + "xn--mli-tla", + "xn--mlselv-iua", + "xn--moreke-jua", + "xn--mosjen-eya", + "xn--mot-tla", + "xn--mre-og-romsdal-qqb", + "xn--msy-ula0h", + "xn--mtta-vrjjat-k7af", + "xn--muost-0qa", + "xn--nmesjevuemie-tcba", + "xn--nry-yla5g", + "xn--nttery-byae", + "xn--nvuotna-hwa", + "xn--oppegrd-ixa", + "xn--ostery-fya", + "xn--osyro-wua", + "xn--porsgu-sta26f", + "xn--rady-ira", + "xn--rdal-poa", + "xn--rde-ula", + "xn--rdy-0nab", + "xn--rennesy-v1a", + "xn--rhkkervju-01af", + "xn--rholt-mra", + "xn--risa-5na", + "xn--risr-ira", + "xn--rland-uua", + "xn--rlingen-mxa", + "xn--rmskog-bya", + "xn--rros-gra", + "xn--rskog-uua", + "xn--rst-0na", + "xn--rsta-fra", + "xn--ryken-vua", + "xn--ryrvik-bya", + "xn--s-1fa", + "xn--sandnessjen-ogb", + "xn--sandy-yua", + "xn--seral-lra", + "xn--sgne-gra", + "xn--skierv-uta", + "xn--skjervy-v1a", + "xn--skjk-soa", + "xn--sknit-yqa", + "xn--sknland-fxa", + "xn--slat-5na", + "xn--slt-elab", + "xn--smla-hra", + "xn--smna-gra", + "xn--snase-nra", + "xn--sndre-land-0cb", + "xn--snes-poa", + "xn--snsa-roa", + "xn--sr-aurdal-l8a", + "xn--sr-fron-q1a", + "xn--sr-odal-q1a", + "xn--sr-varanger-ggb", + "xn--srfold-bya", + "xn--srreisa-q1a", + "xn--srum-gra", + "xn--stfold-9xa", + "xn--stjrdal-s1a", + "xn--stjrdalshalsen-sqb", + "xn--stre-toten-zcb", + "xn--tjme-hra", + "xn--tnsberg-q1a", + "xn--trany-yua", + "xn--trgstad-r1a", + "xn--trna-woa", + "xn--troms-zua", + "xn--tysvr-vra", + "xn--unjrga-rta", + "xn--vads-jra", + "xn--vard-jra", + "xn--vegrshei-c0a", + "xn--vestvgy-ixa6o", + "xn--vg-yiab", + "xn--vgan-qoa", + "xn--vgsy-qoa0j", + "xn--vre-eiker-k8a", + "xn--vrggt-xqad", + "xn--vry-yla5g", + "xn--yer-zna", + "xn--ygarden-p1a", + "xn--ystre-slidre-ujb", + "gs", + "gs", + "nes", + "gs", + "nes", + "gs", + "os", + "valer", + "xn--vler-qoa", + "gs", + "gs", + "os", + "gs", + "heroy", + "sande", + "gs", + "gs", + "bo", + "heroy", + "xn--b-5ga", + "xn--hery-ira", + "gs", + "gs", + "gs", + "gs", + "valer", + "gs", + "gs", + "gs", + "gs", + "bo", + "xn--b-5ga", + "gs", + "gs", + "gs", + "sande", + "gs", + "sande", + "xn--hery-ira", + "xn--vler-qoa", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "merseine", + "mine", + "nom", + "shacknet", + "ac", + "co", + "cri", + "geek", + "gen", + "govt", + "health", + "iwi", + "kiwi", + "maori", + "mil", + "net", + "nym", + "org", + "parliament", + "school", + "xn--mori-qsa", + "blogspot", + "co", + "com", + "edu", + "gov", + "med", + "museum", + "net", + "org", + "pro", + "homelink", + "barsy", + "accesscam", + "ae", + "amune", + "blogdns", + "blogsite", + "bmoattachments", + "boldlygoingnowhere", + "cable-modem", + "camdvr", + "cdn77", + "cdn77-secure", + "certmgr", + "cloudns", + "collegefan", + "couchpotatofries", + "ddnss", + "diskstation", + "dnsalias", + "dnsdojo", + "doesntexist", + "dontexist", + "doomdns", + "dsmynas", + "duckdns", + "dvrdns", + "dynalias", + "dyndns", + "endofinternet", + "endoftheinternet", + "eu", + "familyds", + "fedorainfracloud", + "fedorapeople", + "fedoraproject", + "freeddns", + "from-me", + "game-host", + "gotdns", + "hepforge", + "hk", + "hobby-site", + "homedns", + "homeftp", + "homelinux", + "homeunix", + "hopto", + "is-a-bruinsfan", + "is-a-candidate", + "is-a-celticsfan", + "is-a-chef", + "is-a-geek", + "is-a-knight", + "is-a-linux-user", + "is-a-patsfan", + "is-a-soxfan", + "is-found", + "is-lost", + "is-saved", + "is-very-bad", + "is-very-evil", + "is-very-good", + "is-very-nice", + "is-very-sweet", + "isa-geek", + "js", + "kicks-ass", + "misconfused", + "mlbfan", + "my-firewall", + "myfirewall", + "myftp", + "mysecuritycamera", + "mywire", + "nflfan", + "no-ip", + "pimienta", + "podzone", + "poivron", + "potager", + "read-books", + "readmyblog", + "selfip", + "sellsyourhome", + "servebbs", + "serveftp", + "servegame", + "spdns", + "stuff-4-sale", + "sweetpepper", + "tunk", + "tuxfamily", + "twmail", + "ufcfan", + "us", + "webhop", + "webredirect", + "wmflabs", + "za", + "zapto", + "tele", + "c", + "rsc", + "origin", + "ssl", + "go", + "home", + "al", + "asso", + "at", + "au", + "be", + "bg", + "ca", + "cd", + "ch", + "cn", + "cy", + "cz", + "de", + "dk", + "edu", + "ee", + "es", + "fi", + "fr", + "gr", + "hr", + "hu", + "ie", + "il", + "in", + "int", + "is", + "it", + "jp", + "kr", + "lt", + "lu", + "lv", + "mc", + "me", + "mk", + "mt", + "my", + "net", + "ng", + "nl", + "no", + "nz", + "paris", + "pl", + "pt", + "q-a", + "ro", + "ru", + "se", + "si", + "sk", + "tr", + "uk", + "us", + "cloud", + "os", + "stg", + "app", + "os", + "app", + "nerdpol", + "abo", + "ac", + "com", + "edu", + "gob", + "ing", + "med", + "net", + "nom", + "org", + "sld", + "ybo", + "blogspot", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "nym", + "org", + "com", + "edu", + "org", + "com", + "edu", + "gov", + "i", + "mil", + "net", + "ngo", + "org", + "1337", + "biz", + "com", + "edu", + "fam", + "gob", + "gok", + "gon", + "gop", + "gos", + "gov", + "info", + "net", + "org", + "web", + "agro", + "aid", + "art", + "atm", + "augustow", + "auto", + "babia-gora", + "bedzin", + "beep", + "beskidy", + "bialowieza", + "bialystok", + "bielawa", + "bieszczady", + "biz", + "boleslawiec", + "bydgoszcz", + "bytom", + "cieszyn", + "co", + "com", + "czeladz", + "czest", + "dlugoleka", + "edu", + "elblag", + "elk", + "gda", + "gdansk", + "gdynia", + "gliwice", + "glogow", + "gmina", + "gniezno", + "gorlice", + "gov", + "grajewo", + "gsm", + "ilawa", + "info", + "jaworzno", + "jelenia-gora", + "jgora", + "kalisz", + "karpacz", + "kartuzy", + "kaszuby", + "katowice", + "kazimierz-dolny", + "kepno", + "ketrzyn", + "klodzko", + "kobierzyce", + "kolobrzeg", + "konin", + "konskowola", + "krakow", + "kutno", + "lapy", + "lebork", + "legnica", + "lezajsk", + "limanowa", + "lomza", + "lowicz", + "lubin", + "lukow", + "mail", + "malbork", + "malopolska", + "mazowsze", + "mazury", + "med", + "media", + "miasta", + "mielec", + "mielno", + "mil", + "mragowo", + "naklo", + "net", + "nieruchomosci", + "nom", + "nowaruda", + "nysa", + "olawa", + "olecko", + "olkusz", + "olsztyn", + "opoczno", + "opole", + "org", + "ostroda", + "ostroleka", + "ostrowiec", + "ostrowwlkp", + "pc", + "pila", + "pisz", + "podhale", + "podlasie", + "polkowice", + "pomorskie", + "pomorze", + "powiat", + "poznan", + "priv", + "prochowice", + "pruszkow", + "przeworsk", + "pulawy", + "radom", + "rawa-maz", + "realestate", + "rel", + "rybnik", + "rzeszow", + "sanok", + "sejny", + "sex", + "shop", + "sklep", + "skoczow", + "slask", + "slupsk", + "sopot", + "sos", + "sosnowiec", + "stalowa-wola", + "starachowice", + "stargard", + "suwalki", + "swidnica", + "swiebodzin", + "swinoujscie", + "szczecin", + "szczytno", + "szkola", + "targi", + "tarnobrzeg", + "tgory", + "tm", + "tourism", + "travel", + "turek", + "turystyka", + "tychy", + "ustka", + "walbrzych", + "warmia", + "warszawa", + "waw", + "wegrow", + "wielun", + "wlocl", + "wloclawek", + "wodzislaw", + "wolomin", + "wroc", + "wroclaw", + "zachpomor", + "zagan", + "zakopane", + "zarow", + "zgora", + "zgorzelec", + "ap", + "griw", + "ic", + "is", + "kmpsp", + "konsulat", + "kppsp", + "kwp", + "kwpsp", + "mup", + "mw", + "oirm", + "oum", + "pa", + "pinb", + "piw", + "po", + "psp", + "psse", + "pup", + "rzgw", + "sa", + "sdn", + "sko", + "so", + "sr", + "starostwo", + "ug", + "ugim", + "um", + "umig", + "upow", + "uppo", + "us", + "uw", + "uzs", + "wif", + "wiih", + "winb", + "wios", + "witd", + "wiw", + "wsa", + "wskr", + "wuoz", + "wzmiuw", + "zp", + "co", + "edu", + "gov", + "net", + "org", + "ac", + "biz", + "com", + "edu", + "est", + "gov", + "info", + "isla", + "name", + "net", + "org", + "pro", + "prof", + "aaa", + "aca", + "acct", + "avocat", + "bar", + "cloudns", + "cpa", + "eng", + "jur", + "law", + "med", + "recht", + "com", + "edu", + "gov", + "net", + "org", + "plo", + "sec", + "blogspot", + "com", + "edu", + "gov", + "int", + "net", + "nome", + "nym", + "org", + "publ", + "belau", + "cloudns", + "co", + "ed", + "go", + "ne", + "nom", + "or", + "com", + "coop", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "nom", + "org", + "sch", + "asso", + "blogspot", + "com", + "nom", + "ybo", + "clan", + "arts", + "blogspot", + "com", + "firm", + "info", + "nom", + "nt", + "org", + "rec", + "shop", + "store", + "tm", + "www", + "lima-city", + "myddns", + "webspace", + "ac", + "blogspot", + "co", + "edu", + "gov", + "in", + "nom", + "org", + "ac", + "adygeya", + "bashkiria", + "bir", + "blogspot", + "cbg", + "cldmail", + "com", + "dagestan", + "edu", + "gov", + "grozny", + "int", + "kalmykia", + "kustanai", + "marine", + "mil", + "mordovia", + "msk", + "mytis", + "nalchik", + "net", + "nov", + "org", + "pp", + "pyatigorsk", + "spb", + "test", + "vladikavkaz", + "vladimir", + "hb", + "ac", + "co", + "com", + "edu", + "gouv", + "gov", + "int", + "mil", + "net", + "com", + "edu", + "gov", + "med", + "net", + "org", + "pub", + "sch", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "ybo", + "com", + "edu", + "gov", + "info", + "med", + "net", + "org", + "tv", + "a", + "ac", + "b", + "bd", + "blogspot", + "brand", + "c", + "com", + "d", + "e", + "f", + "fh", + "fhsk", + "fhv", + "g", + "h", + "i", + "k", + "komforb", + "kommunalforbund", + "komvux", + "l", + "lanbib", + "m", + "n", + "naturbruksgymn", + "o", + "org", + "p", + "parti", + "pp", + "press", + "r", + "s", + "t", + "tm", + "u", + "w", + "x", + "y", + "z", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "per", + "com", + "gov", + "hashbang", + "mil", + "net", + "now", + "org", + "platform", + "wedeploy", + "blogspot", + "nom", + "byen", + "cyon", + "platformsh", + "blogspot", + "nym", + "com", + "edu", + "gov", + "net", + "org", + "art", + "blogspot", + "com", + "edu", + "gouv", + "org", + "perso", + "univ", + "com", + "net", + "org", + "stackspace", + "uber", + "xs4all", + "co", + "com", + "consulado", + "edu", + "embaixada", + "gov", + "mil", + "net", + "org", + "principe", + "saotome", + "store", + "abkhazia", + "adygeya", + "aktyubinsk", + "arkhangelsk", + "armenia", + "ashgabad", + "azerbaijan", + "balashov", + "bashkiria", + "bryansk", + "bukhara", + "chimkent", + "dagestan", + "east-kazakhstan", + "exnet", + "georgia", + "grozny", + "ivanovo", + "jambyl", + "kalmykia", + "kaluga", + "karacol", + "karaganda", + "karelia", + "khakassia", + "krasnodar", + "kurgan", + "kustanai", + "lenug", + "mangyshlak", + "mordovia", + "msk", + "murmansk", + "nalchik", + "navoi", + "north-kazakhstan", + "nov", + "nym", + "obninsk", + "penza", + "pokrovsk", + "sochi", + "spb", + "tashkent", + "termez", + "togliatti", + "troitsk", + "tselinograd", + "tula", + "tuva", + "vladikavkaz", + "vladimir", + "vologda", + "barsy", + "com", + "edu", + "gob", + "org", + "red", + "gov", + "nym", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "knightpoint", + "ac", + "co", + "org", + "blogspot", + "ac", + "co", + "go", + "in", + "mi", + "net", + "or", + "ac", + "biz", + "co", + "com", + "edu", + "go", + "gov", + "int", + "mil", + "name", + "net", + "nic", + "org", + "test", + "web", + "gov", + "co", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "agrinet", + "com", + "defense", + "edunet", + "ens", + "fin", + "gov", + "ind", + "info", + "intl", + "mincom", + "nat", + "net", + "org", + "perso", + "rnrt", + "rns", + "rnu", + "tourism", + "turen", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "vpnplus", + "av", + "bbs", + "bel", + "biz", + "com", + "dr", + "edu", + "gen", + "gov", + "info", + "k12", + "kep", + "mil", + "name", + "nc", + "net", + "org", + "pol", + "tel", + "tv", + "web", + "blogspot", + "gov", + "ybo", + "aero", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "jobs", + "mobi", + "museum", + "name", + "net", + "org", + "pro", + "travel", + "better-than", + "dyndns", + "on-the-web", + "worse-than", + "blogspot", + "club", + "com", + "ebiz", + "edu", + "game", + "gov", + "idv", + "mil", + "net", + "nym", + "org", + "url", + "xn--czrw28b", + "xn--uc0atv", + "xn--zf0ao64a", + "mymailer", + "ac", + "co", + "go", + "hotel", + "info", + "me", + "mil", + "mobi", + "ne", + "or", + "sc", + "tv", + "biz", + "cc", + "cherkassy", + "cherkasy", + "chernigov", + "chernihiv", + "chernivtsi", + "chernovtsy", + "ck", + "cn", + "co", + "com", + "cr", + "crimea", + "cv", + "dn", + "dnepropetrovsk", + "dnipropetrovsk", + "dominic", + "donetsk", + "dp", + "edu", + "gov", + "if", + "in", + "inf", + "ivano-frankivsk", + "kh", + "kharkiv", + "kharkov", + "kherson", + "khmelnitskiy", + "khmelnytskyi", + "kiev", + "kirovograd", + "km", + "kr", + "krym", + "ks", + "kv", + "kyiv", + "lg", + "lt", + "ltd", + "lugansk", + "lutsk", + "lv", + "lviv", + "mk", + "mykolaiv", + "net", + "nikolaev", + "od", + "odesa", + "odessa", + "org", + "pl", + "poltava", + "pp", + "rivne", + "rovno", + "rv", + "sb", + "sebastopol", + "sevastopol", + "sm", + "sumy", + "te", + "ternopil", + "uz", + "uzhgorod", + "vinnica", + "vinnytsia", + "vn", + "volyn", + "yalta", + "zaporizhzhe", + "zaporizhzhia", + "zhitomir", + "zhytomyr", + "zp", + "zt", + "ac", + "blogspot", + "co", + "com", + "go", + "ne", + "nom", + "or", + "org", + "sc", + "ac", + "co", + "gov", + "ltd", + "me", + "net", + "nhs", + "org", + "plc", + "police", + "sch", + "blogspot", + "nh-serv", + "no-ip", + "wellbeingzone", + "homeoffice", + "service", + "ak", + "al", + "ar", + "as", + "az", + "ca", + "cloudns", + "co", + "ct", + "dc", + "de", + "dni", + "drud", + "fed", + "fl", + "ga", + "golffan", + "gu", + "hi", + "ia", + "id", + "il", + "in", + "is-by", + "isa", + "kids", + "ks", + "ky", + "la", + "land-4-sale", + "ma", + "md", + "me", + "mi", + "mn", + "mo", + "ms", + "mt", + "nc", + "nd", + "ne", + "nh", + "nj", + "nm", + "noip", + "nsn", + "nv", + "ny", + "oh", + "ok", + "or", + "pa", + "pointto", + "pr", + "ri", + "sc", + "sd", + "stuff-4-sale", + "tn", + "tx", + "ut", + "va", + "vi", + "vt", + "wa", + "wi", + "wv", + "wy", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "chtr", + "paroch", + "pvt", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "ann-arbor", + "cc", + "cog", + "dst", + "eaton", + "gen", + "k12", + "lib", + "mus", + "tec", + "washtenaw", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "cc", + "k12", + "lib", + "com", + "edu", + "gub", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "com", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "arts", + "co", + "com", + "e12", + "edu", + "firm", + "gob", + "gov", + "info", + "int", + "mil", + "net", + "org", + "rec", + "store", + "tec", + "web", + "nom", + "co", + "com", + "k12", + "net", + "org", + "ac", + "biz", + "blogspot", + "com", + "edu", + "gov", + "health", + "info", + "int", + "name", + "net", + "org", + "pro", + "com", + "edu", + "net", + "org", + "advisor", + "com", + "dyndns", + "edu", + "gov", + "mypets", + "net", + "org", + "xn--80au", + "xn--90azh", + "xn--c1avg", + "xn--d1at", + "xn--o1ac", + "xn--o1ach", + "xn--12c1fe0br", + "xn--12cfi8ixb8l", + "xn--12co0c3b4eva", + "xn--h3cuzk1di", + "xn--m3ch0j3a", + "xn--o3cyx2a", + "blogsite", + "fhapp", + "ac", + "agric", + "alt", + "co", + "edu", + "gov", + "grondar", + "law", + "mil", + "net", + "ngo", + "nis", + "nom", + "org", + "school", + "tm", + "web", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "mil", + "net", + "org", + "sch", + "lima", + "triton", + "ac", + "co", + "gov", + "mil", + "org", +} diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go new file mode 100644 index 0000000..e6bfa39 --- /dev/null +++ b/vendor/golang.org/x/net/route/address.go @@ -0,0 +1,425 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "runtime" + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// A LinkAddr represents a link-layer address. +type LinkAddr struct { + Index int // interface index when attached + Name string // interface name when attached + Addr []byte // link-layer address when attached +} + +// Family implements the Family method of Addr interface. +func (a *LinkAddr) Family() int { return sysAF_LINK } + +func (a *LinkAddr) lenAndSpace() (int, int) { + l := 8 + len(a.Name) + len(a.Addr) + return l, roundup(l) +} + +func (a *LinkAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + nlen, alen := len(a.Name), len(a.Addr) + if nlen > 255 || alen > 255 { + return 0, errInvalidAddr + } + b[0] = byte(l) + b[1] = sysAF_LINK + if a.Index > 0 { + nativeEndian.PutUint16(b[2:4], uint16(a.Index)) + } + data := b[8:] + if nlen > 0 { + b[5] = byte(nlen) + copy(data[:nlen], a.Addr) + data = data[nlen:] + } + if alen > 0 { + b[6] = byte(alen) + copy(data[:alen], a.Name) + data = data[alen:] + } + return ll, nil +} + +func parseLinkAddr(b []byte) (Addr, error) { + if len(b) < 8 { + return nil, errInvalidAddr + } + _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:]) + if err != nil { + return nil, err + } + a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4])) + return a, nil +} + +// parseKernelLinkAddr parses b as a link-layer address in +// conventional BSD kernel form. +func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + if nlen == 0xff { + nlen = 0 + } + if alen == 0xff { + alen = 0 + } + if slen == 0xff { + slen = 0 + } + l := 4 + nlen + alen + slen + if len(b) < l { + return 0, nil, errInvalidAddr + } + data := b[4:] + var name string + var addr []byte + if nlen > 0 { + name = string(data[:nlen]) + data = data[nlen:] + } + if alen > 0 { + addr = data[:alen] + data = data[alen:] + } + return l, &LinkAddr{Name: name, Addr: addr}, nil +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +func (a *Inet4Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet, roundup(sizeofSockaddrInet) +} + +func (a *Inet4Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET + copy(b[4:8], a.IP[:]) + return ll, nil +} + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +func (a *Inet6Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6) +} + +func (a *Inet6Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET6 + copy(b[8:24], a.IP[:]) + if a.ZoneID > 0 { + nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID)) + } + return ll, nil +} + +// parseInetAddr parses b as an internet address for IPv4 or IPv6. +func parseInetAddr(af int, b []byte) (Addr, error) { + switch af { + case sysAF_INET: + if len(b) < sizeofSockaddrInet { + return nil, errInvalidAddr + } + a := &Inet4Addr{} + copy(a.IP[:], b[4:8]) + return a, nil + case sysAF_INET6: + if len(b) < sizeofSockaddrInet6 { + return nil, errInvalidAddr + } + a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} + copy(a.IP[:], b[8:24]) + if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + id := int(bigEndian.Uint16(a.IP[2:4])) + if id != 0 { + a.ZoneID = id + a.IP[2], a.IP[3] = 0, 0 + } + } + return a, nil + default: + return nil, errInvalidAddr + } +} + +// parseKernelInetAddr parses b as an internet address in conventional +// BSD kernel form. +func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + l := int(b[0]) + if runtime.GOOS == "darwin" { + // On Darwn, an address in the kernel form is also + // used as a message filler. + if l == 0 || len(b) > roundup(l) { + l = roundup(l) + } + } else { + l = roundup(l) + } + if len(b) < l { + return 0, nil, errInvalidAddr + } + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const ( + off4 = 4 // offset of in_addr + off6 = 8 // offset of in6_addr + ) + switch { + case b[0] == sizeofSockaddrInet6: + a := &Inet6Addr{} + copy(a.IP[:], b[off6:off6+16]) + return int(b[0]), a, nil + case af == sysAF_INET6: + a := &Inet6Addr{} + if l-1 < off6 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off6:l]) + } + return int(b[0]), a, nil + case b[0] == sizeofSockaddrInet: + a := &Inet4Addr{} + copy(a.IP[:], b[off4:off4+4]) + return int(b[0]), a, nil + default: // an old fashion, AF_UNSPEC or unknown means AF_INET + a := &Inet4Addr{} + if l-1 < off4 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off4:l]) + } + return int(b[0]), a, nil + } +} + +// A DefaultAddr represents an address of various operating +// system-specific features. +type DefaultAddr struct { + af int + Raw []byte // raw format of address +} + +// Family implements the Family method of Addr interface. +func (a *DefaultAddr) Family() int { return a.af } + +func (a *DefaultAddr) lenAndSpace() (int, int) { + l := len(a.Raw) + return l, roundup(l) +} + +func (a *DefaultAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + if l > 255 { + return 0, errInvalidAddr + } + b[1] = byte(l) + copy(b[:l], a.Raw) + return ll, nil +} + +func parseDefaultAddr(b []byte) (Addr, error) { + if len(b) < 2 || len(b) < int(b[0]) { + return nil, errInvalidAddr + } + a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]} + return a, nil +} + +func addrsSpace(as []Addr) int { + var l int + for _, a := range as { + switch a := a.(type) { + case *LinkAddr: + _, ll := a.lenAndSpace() + l += ll + case *Inet4Addr: + _, ll := a.lenAndSpace() + l += ll + case *Inet6Addr: + _, ll := a.lenAndSpace() + l += ll + case *DefaultAddr: + _, ll := a.lenAndSpace() + l += ll + } + } + return l +} + +// marshalAddrs marshals as and returns a bitmap indicating which +// address is stored in b. +func marshalAddrs(b []byte, as []Addr) (uint, error) { + var attrs uint + for i, a := range as { + switch a := a.(type) { + case *LinkAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet4Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet6Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *DefaultAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + } + } + return attrs, nil +} + +func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { + var as [sysRTAX_MAX]Addr + af := int(sysAF_UNSPEC) + for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ { + if attrs&(1<> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go new file mode 100644 index 0000000..e771644 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_darwin.go @@ -0,0 +1,114 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STAT = C.NET_RT_STAT + sysNET_RT_TRASH = C.NET_RT_TRASH + sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2 + sysNET_RT_DUMP2 = C.NET_RT_DUMP2 + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFINFO2 = C.RTM_IFINFO2 + sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2 + sysRTM_GET2 = C.RTM_GET2 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr + sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2 + sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2 + sizeofIfDataDarwin15 = C.sizeof_struct_if_data + sizeofIfData64Darwin15 = C.sizeof_struct_if_data64 + + sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr + sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 + sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go new file mode 100644 index 0000000..dd31de2 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_dragonfly.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B + sysCTL_LWKT = C.CTL_LWKT + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_MPLS1 = C.RTA_MPLS1 + sysRTA_MPLS2 = C.RTA_MPLS2 + sysRTA_MPLS3 = C.RTA_MPLS3 + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MPLS1 = C.RTAX_MPLS1 + sysRTAX_MPLS2 = C.RTAX_MPLS2 + sysRTAX_MPLS3 = C.RTAX_MPLS3 + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go new file mode 100644 index 0000000..d95594d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_freebsd.go @@ -0,0 +1,337 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include + +struct if_data_freebsd7 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd9 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd10 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_vhid; + u_char ifi_baudrate_pf; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + uint64_t ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd11 { + uint8_t ifi_type; + uint8_t ifi_physical; + uint8_t ifi_addrlen; + uint8_t ifi_hdrlen; + uint8_t ifi_link_state; + uint8_t ifi_vhid; + uint16_t ifi_datalen; + uint32_t ifi_mtu; + uint32_t ifi_metric; + uint64_t ifi_baudrate; + uint64_t ifi_ipackets; + uint64_t ifi_ierrors; + uint64_t ifi_opackets; + uint64_t ifi_oerrors; + uint64_t ifi_collisions; + uint64_t ifi_ibytes; + uint64_t ifi_obytes; + uint64_t ifi_imcasts; + uint64_t ifi_omcasts; + uint64_t ifi_iqdrops; + uint64_t ifi_oqdrops; + uint64_t ifi_noproto; + uint64_t ifi_hwassist; + union { + time_t tt; + uint64_t ph; + } __ifi_epoch; + union { + struct timeval tv; + struct { + uint64_t ph1; + uint64_t ph2; + } ph; + } __ifi_lastchange; +}; + +struct if_msghdr_freebsd7 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd7 ifm_data; +}; + +struct if_msghdr_freebsd8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd8 ifm_data; +}; + +struct if_msghdr_freebsd9 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd9 ifm_data; +}; + +struct if_msghdr_freebsd10 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd10 ifm_data; +}; + +struct if_msghdr_freebsd11 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd11 ifm_data; +}; +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_IFMALIST = C.NET_RT_IFMALIST + sysNET_RT_IFLISTL = C.NET_RT_IFLISTL +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11 + + sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go new file mode 100644 index 0000000..b0abd54 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_netbsd.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_DDB = C.CTL_DDB + sysCTL_PROC = C.CTL_PROC + sysCTL_VENDOR = C.CTL_VENDOR + sysCTL_EMUL = C.CTL_EMUL + sysCTL_SECURITY = C.CTL_SECURITY + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + sysRTM_SETGATE = C.RTM_SETGATE + sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_CHGADDR = C.RTM_CHGADDR + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_TAG = C.RTA_TAG + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_TAG = C.RTAX_TAG + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr + sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go new file mode 100644 index 0000000..173bb5d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_openbsd.go @@ -0,0 +1,116 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STATS = C.NET_RT_STATS + sysNET_RT_TABLE = C.NET_RT_TABLE + sysNET_RT_IFNAMES = C.NET_RT_IFNAMES + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_FS = C.CTL_FS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_DDB = C.CTL_DDB + sysCTL_VFS = C.CTL_VFS + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_DESYNC = C.RTM_DESYNC + sysRTM_INVALIDATE = C.RTM_INVALIDATE + sysRTM_BFD = C.RTM_BFD + sysRTM_PROPOSAL = C.RTM_PROPOSAL + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_SRC = C.RTA_SRC + sysRTA_SRCMASK = C.RTA_SRCMASK + sysRTA_LABEL = C.RTA_LABEL + sysRTA_BFD = C.RTA_BFD + sysRTA_DNS = C.RTA_DNS + sysRTA_STATIC = C.RTA_STATIC + sysRTA_SEARCH = C.RTA_SEARCH + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_SRC = C.RTAX_SRC + sysRTAX_SRCMASK = C.RTAX_SRCMASK + sysRTAX_LABEL = C.RTAX_LABEL + sysRTAX_BFD = C.RTAX_BFD + sysRTAX_DNS = C.RTAX_DNS + sysRTAX_STATIC = C.RTAX_STATIC + sysRTAX_SEARCH = C.RTAX_SEARCH + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofRtMsghdr = C.sizeof_struct_rt_msghdr + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/interface.go b/vendor/golang.org/x/net/route/interface.go new file mode 100644 index 0000000..854906d --- /dev/null +++ b/vendor/golang.org/x/net/route/interface.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// An InterfaceMessage represents an interface message. +type InterfaceMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Name string // interface name + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// An InterfaceAddrMessage represents an interface address message. +type InterfaceAddrMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAddrMessage) Sys() []Sys { return nil } + +// An InterfaceMulticastAddrMessage represents an interface multicast +// address message. +type InterfaceMulticastAddrMessage struct { + Version int // message version + Type int // messsage type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil } + +// An InterfaceAnnounceMessage represents an interface announcement +// message. +type InterfaceAnnounceMessage struct { + Version int // message version + Type int // message type + Index int // interface index + Name string // interface name + What int // what type of announcement + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil } diff --git a/vendor/golang.org/x/net/route/interface_announce.go b/vendor/golang.org/x/net/route/interface_announce.go new file mode 100644 index 0000000..520d657 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_announce.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd + +package route + +func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[4:6])), + What: int(nativeEndian.Uint16(b[22:24])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[6+i] != 0 { + continue + } + m.Name = string(b[6 : 6+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_classic.go b/vendor/golang.org/x/net/route/interface_classic.go new file mode 100644 index 0000000..ac4e7a6 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_classic.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly netbsd + +package route + +import "runtime" + +func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Addrs: make([]Addr, sysRTAX_MAX), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + extOff: w.extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[w.bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + raw: b[:l], + } + if runtime.GOOS == "netbsd" { + m.Index = int(nativeEndian.Uint16(b[16:18])) + } else { + m.Index = int(nativeEndian.Uint16(b[12:14])) + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_freebsd.go b/vendor/golang.org/x/net/route/interface_freebsd.go new file mode 100644 index 0000000..9f6f50c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_freebsd.go @@ -0,0 +1,78 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) { + var extOff, bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 20 { + return nil, errMessageTooShort + } + extOff = int(nativeEndian.Uint16(b[18:20])) + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + extOff = w.extOff + bodyOff = w.bodyOff + } + if len(b) < extOff || len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + Addrs: make([]Addr, sysRTAX_MAX), + extOff: extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) { + var bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 24 { + return nil, errMessageTooShort + } + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + bodyOff = w.bodyOff + } + if len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_multicast.go b/vendor/golang.org/x/net/route/interface_multicast.go new file mode 100644 index 0000000..1e99a9c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_multicast.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd + +package route + +func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceMulticastAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_openbsd.go b/vendor/golang.org/x/net/route/interface_openbsd.go new file mode 100644 index 0000000..e4a143c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_openbsd.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 32 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[12:16])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + Addrs: make([]Addr, sysRTAX_MAX), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + a, err := parseLinkAddr(b[ll:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 24 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + bodyOff := int(nativeEndian.Uint16(b[4:6])) + if len(b) < bodyOff { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[12:16])), + Index: int(nativeEndian.Uint16(b[6:8])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} + +func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 26 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[6:8])), + What: int(nativeEndian.Uint16(b[8:10])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[10+i] != 0 { + continue + } + m.Name = string(b[10 : 10+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go new file mode 100644 index 0000000..0fa7e09 --- /dev/null +++ b/vendor/golang.org/x/net/route/message.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// A Message represents a routing message. +type Message interface { + // Sys returns operating system-specific information. + Sys() []Sys +} + +// A Sys reprensents operating system-specific information. +type Sys interface { + // SysType returns a type of operating system-specific + // information. + SysType() SysType +} + +// A SysType represents a type of operating system-specific +// information. +type SysType int + +const ( + SysMetrics SysType = iota + SysStats +) + +// ParseRIB parses b as a routing information base and returns a list +// of routing messages. +func ParseRIB(typ RIBType, b []byte) ([]Message, error) { + if !typ.parseable() { + return nil, errUnsupportedMessage + } + var msgs []Message + nmsgs, nskips := 0, 0 + for len(b) > 4 { + nmsgs++ + l := int(nativeEndian.Uint16(b[:2])) + if l == 0 { + return nil, errInvalidMessage + } + if len(b) < l { + return nil, errMessageTooShort + } + if b[2] != sysRTM_VERSION { + b = b[l:] + continue + } + if w, ok := wireFormats[int(b[3])]; !ok { + nskips++ + } else { + m, err := w.parse(typ, b) + if err != nil { + return nil, err + } + if m == nil { + nskips++ + } else { + msgs = append(msgs, m) + } + } + b = b[l:] + } + // We failed to parse any of the messages - version mismatch? + if nmsgs != len(msgs)+nskips { + return nil, errMessageMismatch + } + return msgs, nil +} diff --git a/vendor/golang.org/x/net/route/message_darwin_test.go b/vendor/golang.org/x/net/route/message_darwin_test.go new file mode 100644 index 0000000..316aa75 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_darwin_test.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "testing" + +func TestFetchAndParseRIBOnDarwin(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/message_freebsd_test.go b/vendor/golang.org/x/net/route/message_freebsd_test.go new file mode 100644 index 0000000..db4b567 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_freebsd_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "testing" + "unsafe" +) + +func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_IFMALIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} + +func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { + if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { + t.Skip("NET_RT_IFLISTL not supported") + } + var p uintptr + if kernelAlign != int(unsafe.Sizeof(p)) { + t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") + } + + var tests = [2]struct { + typ RIBType + b []byte + msgs []Message + ss []string + }{ + {typ: sysNET_RT_IFLIST}, + {typ: sysNET_RT_IFLISTL}, + } + for i := range tests { + var lastErr error + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, tests[i].typ) + if err != nil { + lastErr = err + continue + } + tests[i].msgs = append(tests[i].msgs, rs...) + } + if len(tests[i].msgs) == 0 && lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + tests[i].ss, lastErr = msgs(tests[i].msgs).validate() + if lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + for _, s := range tests[i].ss { + t.Log(s) + } + } + for i := len(tests) - 1; i > 0; i-- { + if len(tests[i].ss) != len(tests[i-1].ss) { + t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss) + continue + } + for j, s1 := range tests[i].ss { + s0 := tests[i-1].ss[j] + if s1 != s0 { + t.Errorf("got %s; want %s", s1, s0) + } + } + } +} diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go new file mode 100644 index 0000000..e848dab --- /dev/null +++ b/vendor/golang.org/x/net/route/message_test.go @@ -0,0 +1,239 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "os" + "syscall" + "testing" + "time" +) + +func TestFetchAndParseRIB(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(typ, s) + } + } +} + +var ( + rtmonSock int + rtmonErr error +) + +func init() { + // We need to keep rtmonSock alive to avoid treading on + // recycled socket descriptors. + rtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) +} + +// TestMonitorAndParseRIB leaks a worker goroutine and a socket +// descriptor but that's intentional. +func TestMonitorAndParseRIB(t *testing.T) { + if testing.Short() || os.Getuid() != 0 { + t.Skip("must be root") + } + + if rtmonErr != nil { + t.Fatal(rtmonErr) + } + + // We suppose that using an IPv4 link-local address and the + // dot1Q ID for Token Ring and FDDI doesn't harm anyone. + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(1002); err != nil { + t.Skip(err) + } + if err := pv.setup(); err != nil { + t.Skip(err) + } + pv.teardown() + + go func() { + b := make([]byte, os.Getpagesize()) + for { + // There's no easy way to unblock this read + // call because the routing message exchange + // over routing socket is a connectionless + // message-oriented protocol, no control plane + // for signaling connectivity, and we cannot + // use the net package of standard library due + // to the lack of support for routing socket + // and circular dependency. + n, err := syscall.Read(rtmonSock, b) + if err != nil { + return + } + ms, err := ParseRIB(0, b[:n]) + if err != nil { + t.Error(err) + return + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(err) + return + } + for _, s := range ss { + t.Log(s) + } + } + }() + + for _, vid := range []int{1002, 1003, 1004, 1005} { + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(vid); err != nil { + t.Fatal(err) + } + if err := pv.setup(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + if err := pv.teardown(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + } +} + +func TestParseRIBWithFuzz(t *testing.T) { + for _, fuzz := range []string{ + "0\x00\x05\x050000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "0000000000000\x02000000" + + "00000000", + "\x02\x00\x05\f0000000000000000" + + "0\x0200000000000000", + "\x02\x00\x05\x100000000000000\x1200" + + "0\x00\xff\x00", + "\x02\x00\x05\f0000000000000000" + + "0\x12000\x00\x02\x0000", + "\x00\x00\x00\x01\x00", + "00000", + } { + for typ := RIBType(0); typ < 256; typ++ { + ParseRIB(typ, []byte(fuzz)) + } + } +} + +func TestRouteMessage(t *testing.T) { + s, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) + if err != nil { + t.Fatal(err) + } + defer syscall.Close(s) + + var ms []RouteMessage + for _, af := range []int{sysAF_INET, sysAF_INET6} { + if _, err := fetchAndParseRIB(af, sysNET_RT_DUMP); err != nil { + t.Log(err) + continue + } + switch af { + case sysAF_INET: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet4Addr{}, + nil, + &Inet4Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + }, + }, + }...) + case sysAF_INET6: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet6Addr{}, + nil, + &Inet6Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + }, + }, + }...) + } + } + for i, m := range ms { + m.ID = uintptr(os.Getpid()) + m.Seq = i + 1 + wb, err := m.Marshal() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + if _, err := syscall.Write(s, wb); err != nil { + t.Fatalf("%v: %v", m, err) + } + rb := make([]byte, os.Getpagesize()) + n, err := syscall.Read(s, rb) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + rms, err := ParseRIB(0, rb[:n]) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, rm := range rms { + err := rm.(*RouteMessage).Err + if err != nil { + t.Errorf("%v: %v", m, err) + } + } + ss, err := msgs(rms).validate() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go new file mode 100644 index 0000000..081da0d --- /dev/null +++ b/vendor/golang.org/x/net/route/route.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// Package route provides basic functions for the manipulation of +// packet routing facilities on BSD variants. +// +// The package supports any version of Darwin, any version of +// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and +// OpenBSD 5.6 and above. +package route + +import ( + "errors" + "os" + "syscall" +) + +var ( + errUnsupportedMessage = errors.New("unsupported message") + errMessageMismatch = errors.New("message mismatch") + errMessageTooShort = errors.New("message too short") + errInvalidMessage = errors.New("invalid message") + errInvalidAddr = errors.New("invalid address") + errShortBuffer = errors.New("short buffer") +) + +// A RouteMessage represents a message conveying an address prefix, a +// nexthop address and an output interface. +// +// Unlike other messages, this message can be used to query adjacency +// information for the given address prefix, to add a new route, and +// to delete or modify the existing route from the routing information +// base inside the kernel by writing and reading route messages on a +// routing socket. +// +// For the manipulation of routing information, the route message must +// contain appropriate fields that include: +// +// Version = +// Type = +// Flags = +// Index = +// ID = +// Seq = +// Addrs = +// +// The Type field specifies a type of manipulation, the Flags field +// specifies a class of target information and the Addrs field +// specifies target information like the following: +// +// route.RouteMessage{ +// Version: RTM_VERSION, +// Type: RTM_GET, +// Flags: RTF_UP | RTF_HOST, +// ID: uintptr(os.Getpid()), +// Seq: 1, +// Addrs: []route.Addrs{ +// RTAX_DST: &route.Inet4Addr{ ... }, +// RTAX_IFP: &route.LinkAddr{ ... }, +// RTAX_BRD: &route.Inet4Addr{ ... }, +// }, +// } +// +// The values for the above fields depend on the implementation of +// each operating system. +// +// The Err field on a response message contains an error value on the +// requested operation. If non-nil, the requested operation is failed. +type RouteMessage struct { + Version int // message version + Type int // message type + Flags int // route flags + Index int // interface index when atatched + ID uintptr // sender's identifier; usually process ID + Seq int // sequence number + Err error // error on requested operation + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// Marshal returns the binary encoding of m. +func (m *RouteMessage) Marshal() ([]byte, error) { + return m.marshal() +} + +// A RIBType reprensents a type of routing information base. +type RIBType int + +const ( + RIBTypeRoute RIBType = syscall.NET_RT_DUMP + RIBTypeInterface RIBType = syscall.NET_RT_IFLIST +) + +// FetchRIB fetches a routing information base from the operating +// system. +// +// The provided af must be an address family. +// +// The provided arg must be a RIBType-specific argument. +// When RIBType is related to routes, arg might be a set of route +// flags. When RIBType is related to network interfaces, arg might be +// an interface index or a set of interface flags. In most cases, zero +// means a wildcard. +func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) { + mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)} + n := uintptr(0) + if err := sysctl(mib[:], nil, &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + if n == 0 { + return nil, nil + } + b := make([]byte, n) + if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + return b[:n], nil +} diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go new file mode 100644 index 0000000..02fa688 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_classic.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package route + +import ( + "runtime" + "syscall" +) + +func (m *RouteMessage) marshal() ([]byte, error) { + w, ok := wireFormats[m.Type] + if !ok { + return nil, errUnsupportedMessage + } + l := w.bodyOff + addrsSpace(m.Addrs) + if runtime.GOOS == "darwin" { + // Fix stray pointer writes on macOS. + // See golang.org/issue/22456. + l += 1024 + } + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint32(b[8:12], uint32(m.Flags)) + nativeEndian.PutUint16(b[4:6], uint16(m.Index)) + nativeEndian.PutUint32(b[16:20], uint32(m.ID)) + nativeEndian.PutUint32(b[20:24], uint32(m.Seq)) + attrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[4:6])), + ID: uintptr(nativeEndian.Uint32(b[16:20])), + Seq: int(nativeEndian.Uint32(b[20:24])), + extOff: w.extOff, + raw: b[:l], + } + errno := syscall.Errno(nativeEndian.Uint32(b[28:32])) + if errno != 0 { + m.Err = errno + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go new file mode 100644 index 0000000..daf2e90 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_openbsd.go @@ -0,0 +1,65 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + l := sizeofRtMsghdr + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr)) + nativeEndian.PutUint32(b[16:20], uint32(m.Flags)) + nativeEndian.PutUint16(b[6:8], uint16(m.Index)) + nativeEndian.PutUint32(b[24:28], uint32(m.ID)) + nativeEndian.PutUint32(b[28:32], uint32(m.Seq)) + attrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < sizeofRtMsghdr { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + ID: uintptr(nativeEndian.Uint32(b[24:28])), + Seq: int(nativeEndian.Uint32(b[28:32])), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + errno := syscall.Errno(nativeEndian.Uint32(b[32:36])) + if errno != 0 { + m.Err = errno + } + as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) + if err != nil { + return nil, err + } + m.Addrs = as + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_test.go b/vendor/golang.org/x/net/route/route_test.go new file mode 100644 index 0000000..61bd174 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_test.go @@ -0,0 +1,390 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "fmt" + "os/exec" + "runtime" + "time" +) + +func (m *RouteMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16]))) +} + +func (m *InterfaceMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceAddrMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceMulticastAddrMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8]))) +} + +func (m *InterfaceAnnounceMessage) String() string { + what := "" + switch m.What { + case 0: + what = "arrival" + case 1: + what = "departure" + } + return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what) +} + +func (m *InterfaceMetrics) String() string { + return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU) +} + +func (m *RouteMetrics) String() string { + return fmt.Sprintf("(pmtu=%d)", m.PathMTU) +} + +type addrAttrs uint + +var addrAttrNames = [...]string{ + "dst", + "gateway", + "netmask", + "genmask", + "ifp", + "ifa", + "author", + "brd", + "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd + "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd + "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd + "o:bfd", // bfd for openbsd + "o:dns", // dns for openbsd + "o:static", // static for openbsd + "o:search", // search for openbsd +} + +func (attrs addrAttrs) String() string { + var s string + for i, name := range addrAttrNames { + if attrs&(1<" + } + return s +} + +type msgs []Message + +func (ms msgs) validate() ([]string, error) { + var ss []string + for _, m := range ms { + switch m := m.(type) { + case *RouteMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceAddrMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceMulticastAddrMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceAnnounceMessage: + ss = append(ss, m.String()) + default: + ss = append(ss, fmt.Sprintf("%+v", m)) + } + } + return ss, nil +} + +type syss []Sys + +func (sys syss) String() string { + var s string + for _, sy := range sys { + switch sy := sy.(type) { + case *InterfaceMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + case *RouteMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + } + } + return s +} + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_LINK: + return "link" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *LinkAddr) String() string { + name := a.Name + if name == "" { + name = "" + } + lla := llAddr(a.Addr).String() + if lla == "" { + lla = "" + } + return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:])) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID) +} + +func (a *DefaultAddr) String() string { + return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String()) +} + +type addrs []Addr + +func (as addrs) String() string { + var s string + for _, a := range as { + if a == nil { + continue + } + if len(s) > 0 { + s += " " + } + switch a := a.(type) { + case *LinkAddr: + s += a.String() + case *Inet4Addr: + s += a.String() + case *Inet6Addr: + s += a.String() + case *DefaultAddr: + s += a.String() + } + } + if s == "" { + return "" + } + return s +} + +func (as addrs) match(attrs addrAttrs) error { + var ts addrAttrs + af := sysAF_UNSPEC + for i := range as { + if as[i] != nil { + ts |= 1 << uint(i) + } + switch as[i].(type) { + case *Inet4Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET + } + if af != sysAF_INET { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + case *Inet6Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET6 + } + if af != sysAF_INET6 { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + } + } + if ts != attrs && ts > attrs { + return fmt.Errorf("%v not included in %v", ts, attrs) + } + return nil +} + +func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) { + var err error + var b []byte + for i := 0; i < 3; i++ { + if b, err = FetchRIB(af, typ, 0); err != nil { + time.Sleep(10 * time.Millisecond) + continue + } + break + } + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + ms, err := ParseRIB(typ, b) + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + return ms, nil +} + +// propVirtual is a proprietary virtual network interface. +type propVirtual struct { + name string + addr, mask string + setupCmds []*exec.Cmd + teardownCmds []*exec.Cmd +} + +func (pv *propVirtual) setup() error { + for _, cmd := range pv.setupCmds { + if err := cmd.Run(); err != nil { + pv.teardown() + return err + } + } + return nil +} + +func (pv *propVirtual) teardown() error { + for _, cmd := range pv.teardownCmds { + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func (pv *propVirtual) configure(suffix int) error { + if runtime.GOOS == "openbsd" { + pv.name = fmt.Sprintf("vether%d", suffix) + } else { + pv.name = fmt.Sprintf("vlan%d", suffix) + } + xname, err := exec.LookPath("ifconfig") + if err != nil { + return err + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "create"}, + }) + if runtime.GOOS == "netbsd" { + // NetBSD requires an underlying dot1Q-capable network + // interface. + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"}, + }) + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "inet", pv.addr, "netmask", pv.mask}, + }) + pv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "destroy"}, + }) + return nil +} diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go new file mode 100644 index 0000000..3d0ee9b --- /dev/null +++ b/vendor/golang.org/x/net/route/sys.go @@ -0,0 +1,39 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "unsafe" + +var ( + nativeEndian binaryByteOrder + kernelAlign int + wireFormats map[int]*wireFormat +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } + kernelAlign, wireFormats = probeRoutingStack() +} + +func roundup(l int) int { + if l == 0 { + return kernelAlign + } + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} + +type wireFormat struct { + extOff int // offset of header extension + bodyOff int // offset of message body + parse func(RIBType, []byte) (Message, error) +} diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go new file mode 100644 index 0000000..d2daf5c --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_darwin.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STAT, sysNET_RT_TRASH: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} + rtm.parse = rtm.parseRouteMessage + rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} + rtm2.parse = rtm2.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} + ifm.parse = ifm.parseInterfaceMessage + ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} + ifm2.parse = ifm2.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} + ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage + // Darwin kernels require 32-bit aligned access to routing facilities. + return 4, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFINFO2: ifm2, + sysRTM_NEWMADDR2: ifmam2, + sysRTM_GET2: rtm2, + } +} diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go new file mode 100644 index 0000000..0c14bc2 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_dragonfly.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go new file mode 100644 index 0000000..89ba1c4 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_freebsd.go @@ -0,0 +1,155 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "syscall" + "unsafe" +) + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + if kernelAlign == 8 { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } + } + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + wordSize := int(unsafe.Sizeof(p)) + align := int(unsafe.Sizeof(p)) + // In the case of kern.supported_archs="amd64 i386", we need + // to know the underlying kernel's architecture because the + // alignment for routing facilities are set at the build time + // of the kernel. + conf, _ := syscall.Sysctl("kern.conftxt") + for i, j := 0, 0; j < len(conf); j++ { + if conf[j] != '\n' { + continue + } + s := conf[i:j] + i = j + 1 + if len(s) > len("machine") && s[:len("machine")] == "machine" { + s = s[len("machine"):] + for k := 0; k < len(s); k++ { + if s[k] == ' ' || s[k] == '\t' { + s = s[1:] + } + break + } + if s == "amd64" { + align = 8 + } + break + } + } + var rtm, ifm, ifam, ifmam, ifanm *wireFormat + if align != wordSize { // 386 emulation on amd64 + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} + } else { + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} + } + rel, _ := syscall.SysctlUint32("kern.osreldate") + switch { + case rel < 800000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD7 + } + case 800000 <= rel && rel < 900000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD8 + } + case 900000 <= rel && rel < 1000000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD9 + } + case 1000000 <= rel && rel < 1100000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD10 + } + default: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD11 + } + } + rtm.parse = rtm.parseRouteMessage + ifm.parse = ifm.parseInterfaceMessage + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return align, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go new file mode 100644 index 0000000..02f71d5 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_netbsd.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// RouteMetrics represents route metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + // NetBSD 6 and above kernels require 64-bit aligned access to + // routing facilities. + return 8, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_IFINFO: ifm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go new file mode 100644 index 0000000..c5674e8 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_openbsd.go @@ -0,0 +1,80 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STATS, sysNET_RT_TABLE: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[60:64])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[24]), + MTU: int(nativeEndian.Uint32(m.raw[28:32])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: -1, bodyOff: -1} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: -1, bodyOff: -1} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: -1, bodyOff: -1} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: -1, bodyOff: -1} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_DESYNC: rtm, + } +} diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go new file mode 100644 index 0000000..5f69ea6 --- /dev/null +++ b/vendor/golang.org/x/net/route/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "syscall" + "unsafe" +) + +var zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var p unsafe.Pointer + if len(mib) > 0 { + p = unsafe.Pointer(&mib[0]) + } else { + p = unsafe.Pointer(&zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go new file mode 100644 index 0000000..4e2e1ab --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STAT = 0x4 + sysNET_RT_TRASH = 0x5 + sysNET_RT_IFLIST2 = 0x6 + sysNET_RT_DUMP2 = 0x7 + sysNET_RT_MAXID = 0xa +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_MAXID = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFINFO2 = 0x12 + sysRTM_NEWMADDR2 = 0x13 + sysRTM_GET2 = 0x14 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrDarwin15 = 0x70 + sizeofIfaMsghdrDarwin15 = 0x14 + sizeofIfmaMsghdrDarwin15 = 0x10 + sizeofIfMsghdr2Darwin15 = 0xa0 + sizeofIfmaMsghdr2Darwin15 = 0x14 + sizeofIfDataDarwin15 = 0x60 + sizeofIfData64Darwin15 = 0x80 + + sizeofRtMsghdrDarwin15 = 0x5c + sizeofRtMsghdr2Darwin15 = 0x5c + sizeofRtMetricsDarwin15 = 0x38 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go new file mode 100644 index 0000000..719c88d --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_dragonfly.go @@ -0,0 +1,98 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_MAXID = 0x4 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 + sysCTL_LWKT = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x6 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_MPLS1 = 0x100 + sysRTA_MPLS2 = 0x200 + sysRTA_MPLS3 = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MPLS1 = 0x8 + sysRTAX_MPLS2 = 0x9 + sysRTAX_MPLS3 = 0xa + sysRTAX_MAX = 0xb +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = 0xb0 + sizeofIfaMsghdrDragonFlyBSD4 = 0x14 + sizeofIfmaMsghdrDragonFlyBSD4 = 0x10 + sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18 + + sizeofRtMsghdrDragonFlyBSD4 = 0x98 + sizeofRtMetricsDragonFlyBSD4 = 0x70 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go new file mode 100644 index 0000000..b03bc01 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_386.go @@ -0,0 +1,126 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x60 + sizeofIfMsghdrFreeBSD8 = 0x60 + sizeofIfMsghdrFreeBSD9 = 0x60 + sizeofIfMsghdrFreeBSD10 = 0x64 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x50 + sizeofIfDataFreeBSD8 = 0x50 + sizeofIfDataFreeBSD9 = 0x50 + sizeofIfDataFreeBSD10 = 0x54 + sizeofIfDataFreeBSD11 = 0x98 + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go new file mode 100644 index 0000000..0b675b3 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0xb0 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0xb0 + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x98 + sizeofRtMetricsFreeBSD10 = 0x70 + + sizeofIfMsghdrFreeBSD7 = 0xa8 + sizeofIfMsghdrFreeBSD8 = 0xa8 + sizeofIfMsghdrFreeBSD9 = 0xa8 + sizeofIfMsghdrFreeBSD10 = 0xa8 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x98 + sizeofIfDataFreeBSD8 = 0x98 + sizeofIfDataFreeBSD9 = 0x98 + sizeofIfDataFreeBSD10 = 0x98 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go new file mode 100644 index 0000000..58f8ea1 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x70 + sizeofIfMsghdrFreeBSD8 = 0x70 + sizeofIfMsghdrFreeBSD9 = 0x70 + sizeofIfMsghdrFreeBSD10 = 0x70 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x60 + sizeofIfDataFreeBSD8 = 0x60 + sizeofIfDataFreeBSD9 = 0x60 + sizeofIfDataFreeBSD10 = 0x60 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0x68 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0x6c + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x5c + sizeofRtMetricsFreeBSD10Emu = 0x38 + + sizeofIfMsghdrFreeBSD7Emu = 0x70 + sizeofIfMsghdrFreeBSD8Emu = 0x70 + sizeofIfMsghdrFreeBSD9Emu = 0x70 + sizeofIfMsghdrFreeBSD10Emu = 0x70 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x60 + sizeofIfDataFreeBSD8Emu = 0x60 + sizeofIfDataFreeBSD9Emu = 0x60 + sizeofIfDataFreeBSD10Emu = 0x60 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go new file mode 100644 index 0000000..e0df45e --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_netbsd.go @@ -0,0 +1,97 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x22 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x5 + sysNET_RT_MAXID = 0x6 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_DDB = 0x9 + sysCTL_PROC = 0xa + sysCTL_VENDOR = 0xb + sysCTL_EMUL = 0xc + sysCTL_SECURITY = 0xd + sysCTL_MAXID = 0xe +) + +const ( + sysRTM_VERSION = 0x4 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFANNOUNCE = 0x10 + sysRTM_IEEE80211 = 0x11 + sysRTM_SETGATE = 0x12 + sysRTM_LLINFO_UPD = 0x13 + sysRTM_IFINFO = 0x14 + sysRTM_CHGADDR = 0x15 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_TAG = 0x100 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_TAG = 0x8 + sysRTAX_MAX = 0x9 +) + +const ( + sizeofIfMsghdrNetBSD7 = 0x98 + sizeofIfaMsghdrNetBSD7 = 0x18 + sizeofIfAnnouncemsghdrNetBSD7 = 0x18 + + sizeofRtMsghdrNetBSD7 = 0x78 + sizeofRtMetricsNetBSD7 = 0x50 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go new file mode 100644 index 0000000..db8c8ef --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_openbsd.go @@ -0,0 +1,101 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STATS = 0x4 + sysNET_RT_TABLE = 0x5 + sysNET_RT_IFNAMES = 0x6 + sysNET_RT_MAXID = 0x7 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_FS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_DDB = 0x9 + sysCTL_VFS = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_IFANNOUNCE = 0xf + sysRTM_DESYNC = 0x10 + sysRTM_INVALIDATE = 0x11 + sysRTM_BFD = 0x12 + sysRTM_PROPOSAL = 0x13 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_SRC = 0x100 + sysRTA_SRCMASK = 0x200 + sysRTA_LABEL = 0x400 + sysRTA_BFD = 0x800 + sysRTA_DNS = 0x1000 + sysRTA_STATIC = 0x2000 + sysRTA_SEARCH = 0x4000 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_SRC = 0x8 + sysRTAX_SRCMASK = 0x9 + sysRTAX_LABEL = 0xa + sysRTAX_BFD = 0xb + sysRTAX_DNS = 0xc + sysRTAX_STATIC = 0xd + sysRTAX_SEARCH = 0xe + sysRTAX_MAX = 0xf +) + +const ( + sizeofRtMsghdr = 0x60 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000..c646a69 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

    /debug/events

    + +
  • + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
    + +{{if $.EventLogs}} +


    +

    Family: {{$.Family}}

    + +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    WhenElapsed
    {{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
    {{$el.Stack|trimSpace}}
    {{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
    +{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000..9bf4286 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
    Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
    +
    + +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
    [{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
    +`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go new file mode 100644 index 0000000..d384b93 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram_test.go @@ -0,0 +1,325 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "testing" +) + +type sumTest struct { + value int64 + sum int64 + sumOfSquares float64 + total int64 +} + +var sumTests = []sumTest{ + {100, 100, 10000, 1}, + {50, 150, 12500, 2}, + {50, 200, 15000, 3}, + {50, 250, 17500, 4}, +} + +type bucketingTest struct { + in int64 + log int + bucket int +} + +var bucketingTests = []bucketingTest{ + {0, 0, 0}, + {1, 1, 0}, + {2, 2, 1}, + {3, 2, 1}, + {4, 3, 2}, + {1000, 10, 9}, + {1023, 10, 9}, + {1024, 11, 10}, + {1000000, 20, 19}, +} + +type multiplyTest struct { + in int64 + ratio float64 + expectedSum int64 + expectedTotal int64 + expectedSumOfSquares float64 +} + +var multiplyTests = []multiplyTest{ + {15, 2.5, 37, 2, 562.5}, + {128, 4.6, 758, 13, 77953.9}, +} + +type percentileTest struct { + fraction float64 + expected int64 +} + +var percentileTests = []percentileTest{ + {0.25, 48}, + {0.5, 96}, + {0.6, 109}, + {0.75, 128}, + {0.90, 205}, + {0.95, 230}, + {0.99, 256}, +} + +func TestSum(t *testing.T) { + var h histogram + + for _, test := range sumTests { + h.addMeasurement(test.value) + sum := h.sum + if sum != test.sum { + t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) + } + + sumOfSquares := h.sumOfSquares + if sumOfSquares != test.sumOfSquares { + t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) + } + + total := h.total() + if total != test.total { + t.Errorf("h.Total = %v WANT: %v", total, test.total) + } + } +} + +func TestMultiply(t *testing.T) { + var h histogram + for i, test := range multiplyTests { + h.addMeasurement(test.in) + h.Multiply(test.ratio) + if h.sum != test.expectedSum { + t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) + } + if h.total() != test.expectedTotal { + t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) + } + if h.sumOfSquares != test.expectedSumOfSquares { + t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) + } + } +} + +func TestBucketingFunctions(t *testing.T) { + for _, test := range bucketingTests { + log := log2(test.in) + if log != test.log { + t.Errorf("log2 = %v WANT: %v", log, test.log) + } + + bucket := getBucket(test.in) + if bucket != test.bucket { + t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) + } + } +} + +func TestAverage(t *testing.T) { + a := new(histogram) + average := a.average() + if average != 0 { + t.Errorf("Average of empty histogram was %v WANT: 0", average) + } + + a.addMeasurement(1) + a.addMeasurement(1) + a.addMeasurement(3) + const expected = float64(5) / float64(3) + average = a.average() + + if !isApproximate(average, expected) { + t.Errorf("Average = %g WANT: %v", average, expected) + } +} + +func TestStandardDeviation(t *testing.T) { + a := new(histogram) + add(a, 10, 1<<4) + add(a, 10, 1<<5) + add(a, 10, 1<<6) + stdDev := a.standardDeviation() + const expected = 19.95 + + if !isApproximate(stdDev, expected) { + t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) + } + + // No values + a = new(histogram) + stdDev = a.standardDeviation() + + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 1, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 10, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } +} + +func TestPercentileBoundary(t *testing.T) { + a := new(histogram) + add(a, 5, 1<<4) + add(a, 10, 1<<6) + add(a, 5, 1<<7) + + for _, test := range percentileTests { + percentile := a.percentileBoundary(test.fraction) + if percentile != test.expected { + t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) + } + } +} + +func TestCopyFrom(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} + + a.CopyFrom(&b) + + if a.String() != b.String() { + t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) + } +} + +func TestClear(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + + a.Clear() + + expected := "0, 0.000000, 0, 0, []" + if a.String() != expected { + t.Errorf("a.String = %s WANT %s", a.String(), expected) + } +} + +func TestNew(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := "0, 0.000000, 0, 0, []" + if b.(*histogram).String() != expected { + t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) + } +} + +func TestAdd(t *testing.T) { + // The tests here depend on the associativity of addMeasurement and Add. + // Add empty observation + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := a.String() + a.Add(b) + if a.String() != expected { + t.Errorf("a.String = %s WANT: %s", a.String(), expected) + } + + // Add same bucketed value, no new buckets + c := new(histogram) + d := new(histogram) + e := new(histogram) + c.addMeasurement(12) + d.addMeasurement(11) + e.addMeasurement(12) + e.addMeasurement(11) + c.Add(d) + if c.String() != e.String() { + t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) + } + + // Add bucketed values + f := new(histogram) + g := new(histogram) + h := new(histogram) + f.addMeasurement(4) + f.addMeasurement(12) + f.addMeasurement(100) + g.addMeasurement(18) + g.addMeasurement(36) + g.addMeasurement(255) + h.addMeasurement(4) + h.addMeasurement(12) + h.addMeasurement(100) + h.addMeasurement(18) + h.addMeasurement(36) + h.addMeasurement(255) + f.Add(g) + if f.String() != h.String() { + t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) + } + + // add buckets to no buckets + i := new(histogram) + j := new(histogram) + k := new(histogram) + j.addMeasurement(18) + j.addMeasurement(36) + j.addMeasurement(255) + k.addMeasurement(18) + k.addMeasurement(36) + k.addMeasurement(255) + i.Add(j) + if i.String() != k.String() { + t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) + } + + // add buckets to single value (no overlap) + l := new(histogram) + m := new(histogram) + n := new(histogram) + l.addMeasurement(0) + m.addMeasurement(18) + m.addMeasurement(36) + m.addMeasurement(255) + n.addMeasurement(0) + n.addMeasurement(18) + n.addMeasurement(36) + n.addMeasurement(255) + l.Add(m) + if l.String() != n.String() { + t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) + } + + // mixed order + o := new(histogram) + p := new(histogram) + o.addMeasurement(0) + o.addMeasurement(2) + o.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(2) + if o.String() != p.String() { + t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) + } +} + +func add(h *histogram, times int, val int64) { + for i := 0; i < times; i++ { + h.addMeasurement(val) + } +} + +func isApproximate(x, y float64) bool { + return math.Abs(x-y) < 1e-2 +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000..a46ee0e --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1103 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

    /debug/requests

    +{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
    +{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
    +

    Family: {{$.Family}}

    + +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

    Showing {{len $.Traces}} of {{$.Total}} traces.

    +{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    + {{if $.Active}}Active{{else}}Completed{{end}} Requests +
    WhenElapsed (s)
    {{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
    {{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
    +{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

    Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

    +{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 0000000..d608191 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 0000000..df6e1fb --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go new file mode 100644 index 0000000..bfd9dfe --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_test.go @@ -0,0 +1,178 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "net/http" + "reflect" + "testing" +) + +type s struct{} + +func (s) String() string { return "lazy string" } + +// TestReset checks whether all the fields are zeroed after reset. +func TestReset(t *testing.T) { + tr := New("foo", "bar") + tr.LazyLog(s{}, false) + tr.LazyPrintf("%d", 1) + tr.SetRecycler(func(_ interface{}) {}) + tr.SetTraceInfo(3, 4) + tr.SetMaxEvents(100) + tr.SetError() + tr.Finish() + + tr.(*trace).reset() + + if !reflect.DeepEqual(tr, new(trace)) { + t.Errorf("reset didn't clear all fields: %+v", tr) + } +} + +// TestResetLog checks whether all the fields are zeroed after reset. +func TestResetLog(t *testing.T) { + el := NewEventLog("foo", "bar") + el.Printf("message") + el.Errorf("error") + el.Finish() + + el.(*eventLog).reset() + + if !reflect.DeepEqual(el, new(eventLog)) { + t.Errorf("reset didn't clear all fields: %+v", el) + } +} + +func TestAuthRequest(t *testing.T) { + testCases := []struct { + host string + want bool + }{ + {host: "192.168.23.1", want: false}, + {host: "192.168.23.1:8080", want: false}, + {host: "malformed remote addr", want: false}, + {host: "localhost", want: true}, + {host: "localhost:8080", want: true}, + {host: "127.0.0.1", want: true}, + {host: "127.0.0.1:8080", want: true}, + {host: "::1", want: true}, + {host: "[::1]:8080", want: true}, + } + for _, tt := range testCases { + req := &http.Request{RemoteAddr: tt.host} + any, sensitive := AuthRequest(req) + if any != tt.want || sensitive != tt.want { + t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) + } + } +} + +// TestParseTemplate checks that all templates used by this package are valid +// as they are parsed on first usage +func TestParseTemplate(t *testing.T) { + if tmpl := distTmpl(); tmpl == nil { + t.Error("invalid template returned from distTmpl()") + } + if tmpl := pageTmpl(); tmpl == nil { + t.Error("invalid template returned from pageTmpl()") + } + if tmpl := eventsTmpl(); tmpl == nil { + t.Error("invalid template returned from eventsTmpl()") + } +} + +func benchmarkTrace(b *testing.B, maxEvents, numEvents int) { + numSpans := (b.N + numEvents + 1) / numEvents + + for i := 0; i < numSpans; i++ { + tr := New("test", "test") + tr.SetMaxEvents(maxEvents) + for j := 0; j < numEvents; j++ { + tr.LazyPrintf("%d", j) + } + tr.Finish() + } +} + +func BenchmarkTrace_Default_2(b *testing.B) { + benchmarkTrace(b, 0, 2) +} + +func BenchmarkTrace_Default_10(b *testing.B) { + benchmarkTrace(b, 0, 10) +} + +func BenchmarkTrace_Default_100(b *testing.B) { + benchmarkTrace(b, 0, 100) +} + +func BenchmarkTrace_Default_1000(b *testing.B) { + benchmarkTrace(b, 0, 1000) +} + +func BenchmarkTrace_Default_10000(b *testing.B) { + benchmarkTrace(b, 0, 10000) +} + +func BenchmarkTrace_10_2(b *testing.B) { + benchmarkTrace(b, 10, 2) +} + +func BenchmarkTrace_10_10(b *testing.B) { + benchmarkTrace(b, 10, 10) +} + +func BenchmarkTrace_10_100(b *testing.B) { + benchmarkTrace(b, 10, 100) +} + +func BenchmarkTrace_10_1000(b *testing.B) { + benchmarkTrace(b, 10, 1000) +} + +func BenchmarkTrace_10_10000(b *testing.B) { + benchmarkTrace(b, 10, 10000) +} + +func BenchmarkTrace_100_2(b *testing.B) { + benchmarkTrace(b, 100, 2) +} + +func BenchmarkTrace_100_10(b *testing.B) { + benchmarkTrace(b, 100, 10) +} + +func BenchmarkTrace_100_100(b *testing.B) { + benchmarkTrace(b, 100, 100) +} + +func BenchmarkTrace_100_1000(b *testing.B) { + benchmarkTrace(b, 100, 1000) +} + +func BenchmarkTrace_100_10000(b *testing.B) { + benchmarkTrace(b, 100, 10000) +} + +func BenchmarkTrace_1000_2(b *testing.B) { + benchmarkTrace(b, 1000, 2) +} + +func BenchmarkTrace_1000_10(b *testing.B) { + benchmarkTrace(b, 1000, 10) +} + +func BenchmarkTrace_1000_100(b *testing.B) { + benchmarkTrace(b, 1000, 100) +} + +func BenchmarkTrace_1000_1000(b *testing.B) { + benchmarkTrace(b, 1000, 1000) +} + +func BenchmarkTrace_1000_10000(b *testing.B) { + benchmarkTrace(b, 1000, 10000) +} diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go new file mode 100644 index 0000000..748118d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file.go @@ -0,0 +1,796 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(ctx context.Context, name string, perm os.FileMode) error + OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) + RemoveAll(ctx context.Context, name string) error + Rename(ctx context.Context, oldName, newName string) error + Stat(ctx context.Context, name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +func (d Dir) RemoveAll(ctx context.Context, name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(ctx context.Context, oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(ctx context.Context, name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case os.SEEK_SET: + npos = int(offset) + case os.SEEK_CUR: + npos += int(offset) + case os.SEEK_END: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(ctx, dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(ctx, src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(ctx, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/golang.org/x/net/webdav/file_go1.6.go new file mode 100644 index 0000000..fa38770 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.6.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package webdav + +import ( + "net/http" + + "golang.org/x/net/context" +) + +func getContext(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/golang.org/x/net/webdav/file_go1.7.go new file mode 100644 index 0000000..d1c3de8 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.7.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package webdav + +import ( + "context" + "net/http" +) + +func getContext(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go new file mode 100644 index 0000000..bfd96e1 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_test.go @@ -0,0 +1,1184 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSlashClean(t *testing.T) { + testCases := []string{ + "", + ".", + "/", + "/./", + "//", + "//.", + "//a", + "/a", + "/a/b/c", + "/a//b/./../c/d/", + "a", + "a/b/c", + } + for _, tc := range testCases { + got := slashClean(tc) + want := path.Clean("/" + tc) + if got != want { + t.Errorf("tc=%q: got %q, want %q", tc, got, want) + } + } +} + +func TestDirResolve(t *testing.T) { + testCases := []struct { + dir, name, want string + }{ + {"/", "", "/"}, + {"/", "/", "/"}, + {"/", ".", "/"}, + {"/", "./a", "/a"}, + {"/", "..", "/"}, + {"/", "..", "/"}, + {"/", "../", "/"}, + {"/", "../.", "/"}, + {"/", "../a", "/a"}, + {"/", "../..", "/"}, + {"/", "../bar/a", "/bar/a"}, + {"/", "../baz/a", "/baz/a"}, + {"/", "...", "/..."}, + {"/", ".../a", "/.../a"}, + {"/", ".../..", "/"}, + {"/", "a", "/a"}, + {"/", "a/./b", "/a/b"}, + {"/", "a/../../b", "/b"}, + {"/", "a/../b", "/b"}, + {"/", "a/b", "/a/b"}, + {"/", "a/b/c/../../d", "/a/d"}, + {"/", "a/b/c/../../../d", "/d"}, + {"/", "a/b/c/../../../../d", "/d"}, + {"/", "a/b/c/d", "/a/b/c/d"}, + + {"/foo/bar", "", "/foo/bar"}, + {"/foo/bar", "/", "/foo/bar"}, + {"/foo/bar", ".", "/foo/bar"}, + {"/foo/bar", "./a", "/foo/bar/a"}, + {"/foo/bar", "..", "/foo/bar"}, + {"/foo/bar", "../", "/foo/bar"}, + {"/foo/bar", "../.", "/foo/bar"}, + {"/foo/bar", "../a", "/foo/bar/a"}, + {"/foo/bar", "../..", "/foo/bar"}, + {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, + {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, + {"/foo/bar", "...", "/foo/bar/..."}, + {"/foo/bar", ".../a", "/foo/bar/.../a"}, + {"/foo/bar", ".../..", "/foo/bar"}, + {"/foo/bar", "a", "/foo/bar/a"}, + {"/foo/bar", "a/./b", "/foo/bar/a/b"}, + {"/foo/bar", "a/../../b", "/foo/bar/b"}, + {"/foo/bar", "a/../b", "/foo/bar/b"}, + {"/foo/bar", "a/b", "/foo/bar/a/b"}, + {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, + {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, + + {"/foo/bar/", "", "/foo/bar"}, + {"/foo/bar/", "/", "/foo/bar"}, + {"/foo/bar/", ".", "/foo/bar"}, + {"/foo/bar/", "./a", "/foo/bar/a"}, + {"/foo/bar/", "..", "/foo/bar"}, + + {"/foo//bar///", "", "/foo/bar"}, + {"/foo//bar///", "/", "/foo/bar"}, + {"/foo//bar///", ".", "/foo/bar"}, + {"/foo//bar///", "./a", "/foo/bar/a"}, + {"/foo//bar///", "..", "/foo/bar"}, + + {"/x/y/z", "ab/c\x00d/ef", ""}, + + {".", "", "."}, + {".", "/", "."}, + {".", ".", "."}, + {".", "./a", "a"}, + {".", "..", "."}, + {".", "..", "."}, + {".", "../", "."}, + {".", "../.", "."}, + {".", "../a", "a"}, + {".", "../..", "."}, + {".", "../bar/a", "bar/a"}, + {".", "../baz/a", "baz/a"}, + {".", "...", "..."}, + {".", ".../a", ".../a"}, + {".", ".../..", "."}, + {".", "a", "a"}, + {".", "a/./b", "a/b"}, + {".", "a/../../b", "b"}, + {".", "a/../b", "b"}, + {".", "a/b", "a/b"}, + {".", "a/b/c/../../d", "a/d"}, + {".", "a/b/c/../../../d", "d"}, + {".", "a/b/c/../../../../d", "d"}, + {".", "a/b/c/d", "a/b/c/d"}, + + {"", "", "."}, + {"", "/", "."}, + {"", ".", "."}, + {"", "./a", "a"}, + {"", "..", "."}, + } + + for _, tc := range testCases { + d := Dir(filepath.FromSlash(tc.dir)) + if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { + t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + type walkStep struct { + name, frag string + final bool + } + + testCases := []struct { + dir string + want []walkStep + }{ + {"", []walkStep{ + {"", "", true}, + }}, + {"/", []walkStep{ + {"", "", true}, + }}, + {"/a", []walkStep{ + {"", "a", true}, + }}, + {"/a/", []walkStep{ + {"", "a", true}, + }}, + {"/a/b", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/c", []walkStep{ + {"", "a", false}, + {"a", "b", false}, + {"b", "c", true}, + }}, + // The following test case is the one mentioned explicitly + // in the method description. + {"/foo/bar/x", []walkStep{ + {"", "foo", false}, + {"foo", "bar", false}, + {"bar", "x", true}, + }}, + } + + ctx := context.Background() + + for _, tc := range testCases { + fs := NewMemFS().(*memFS) + + parts := strings.Split(tc.dir, "/") + for p := 2; p < len(parts); p++ { + d := strings.Join(parts[:p], "/") + if err := fs.Mkdir(ctx, d, 0666); err != nil { + t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) + } + } + + i, prevFrag := 0, "" + err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { + got := walkStep{ + name: prevFrag, + frag: frag, + final: final, + } + want := tc.want[i] + + if got != want { + return fmt.Errorf("got %+v, want %+v", got, want) + } + i, prevFrag = i+1, frag + return nil + }) + if err != nil { + t.Errorf("tc.dir=%q: %v", tc.dir, err) + } + } +} + +// find appends to ss the names of the named file and its children. It is +// analogous to the Unix find command. +// +// The returned strings are not guaranteed to be in any particular order. +func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(ctx, name) + if err != nil { + return nil, err + } + ss = append(ss, name) + if stat.IsDir() { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + return nil, err + } + for _, c := range children { + ss, err = find(ctx, ss, fs, path.Join(name, c.Name())) + if err != nil { + return nil, err + } + } + } + return ss, nil +} + +func testFS(t *testing.T, fs FileSystem) { + errStr := func(err error) string { + switch { + case os.IsExist(err): + return "errExist" + case os.IsNotExist(err): + return "errNotExist" + case err != nil: + return "err" + } + return "ok" + } + + // The non-"find" non-"stat" test cases should change the file system state. The + // indentation of the "find"s and "stat"s helps distinguish such test cases. + testCases := []string{ + " stat / want dir", + " stat /a want errNotExist", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + "create /a A want ok", + " stat /a want 1", + "create /d/e EEE want errNotExist", + "mk-dir /a want errExist", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + " stat /d want dir", + "create /d/e EEE want ok", + " stat /d/e want 3", + " find / /a /d /d/e", + "create /d/f FFFF want ok", + "create /d/g GGGGGGG want ok", + "mk-dir /d/m want ok", + "mk-dir /d/m want errExist", + "create /d/m/p PPPPP want ok", + " stat /d/e want 3", + " stat /d/f want 4", + " stat /d/g want 7", + " stat /d/h want errNotExist", + " stat /d/m want dir", + " stat /d/m/p want 5", + " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", + "rm-all /d want ok", + " stat /a want 1", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + " stat /d/f want errNotExist", + " stat /d/g want errNotExist", + " stat /d/m want errNotExist", + " stat /d/m/p want errNotExist", + " find / /a", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + "create /d/f FFFF want ok", + "rm-all /d/f want ok", + "mk-dir /d/m want ok", + "rm-all /z want ok", + "rm-all / want err", + "create /b BB want ok", + " stat / want dir", + " stat /a want 1", + " stat /b want 2", + " stat /c want errNotExist", + " stat /d want dir", + " stat /d/m want dir", + " find / /a /b /d /d/m", + "move__ o=F /b /c want ok", + " stat /b want errNotExist", + " stat /c want 2", + " stat /d/m want dir", + " stat /d/n want errNotExist", + " find / /a /c /d /d/m", + "move__ o=F /d/m /d/n want ok", + "create /d/n/q QQQQ want ok", + " stat /d/m want errNotExist", + " stat /d/n want dir", + " stat /d/n/q want 4", + "move__ o=F /d /d/n/z want err", + "move__ o=T /c /d/n/q want ok", + " stat /c want errNotExist", + " stat /d/n/q want 2", + " find / /a /d /d/n /d/n/q", + "create /d/n/r RRRRR want ok", + "mk-dir /u want ok", + "mk-dir /u/v want ok", + "move__ o=F /d/n /u want errExist", + "create /t TTTTTT want ok", + "move__ o=F /d/n /t want errExist", + "rm-all /t want ok", + "move__ o=F /d/n /t want ok", + " stat /d want dir", + " stat /d/n want errNotExist", + " stat /d/n/r want errNotExist", + " stat /t want dir", + " stat /t/q want 2", + " stat /t/r want 5", + " find / /a /d /t /t/q /t/r /u /u/v", + "move__ o=F /t / want errExist", + "move__ o=T /t /u/v want ok", + " stat /u/v/r want 5", + "move__ o=F / /z want err", + " find / /a /d /u /u/v /u/v/q /u/v/r", + " stat /a want 1", + " stat /b want errNotExist", + " stat /c want errNotExist", + " stat /u/v/r want 5", + "copy__ o=F d=0 /a /b want ok", + "copy__ o=T d=0 /a /c want ok", + " stat /a want 1", + " stat /b want 1", + " stat /c want 1", + " stat /u/v/r want 5", + "copy__ o=F d=0 /u/v/r /b want errExist", + " stat /b want 1", + "copy__ o=T d=0 /u/v/r /b want ok", + " stat /a want 1", + " stat /b want 5", + " stat /u/v/r want 5", + "rm-all /a want ok", + "rm-all /b want ok", + "mk-dir /u/v/w want ok", + "create /u/v/w/s SSSSSSSS want ok", + " stat /d want dir", + " stat /d/x want errNotExist", + " stat /d/y want errNotExist", + " stat /u/v/r want 5", + " stat /u/v/w/s want 8", + " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", + "copy__ o=T d=0 /u/v /d/x want ok", + "copy__ o=T d=∞ /u/v /d/y want ok", + "rm-all /u want ok", + " stat /d/x want dir", + " stat /d/x/q want errNotExist", + " stat /d/x/r want errNotExist", + " stat /d/x/w want errNotExist", + " stat /d/x/w/s want errNotExist", + " stat /d/y want dir", + " stat /d/y/q want 2", + " stat /d/y/r want 5", + " stat /d/y/w want dir", + " stat /d/y/w/s want 8", + " stat /u want errNotExist", + " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", + "copy__ o=F d=∞ /d/y /d/x want errExist", + } + + ctx := context.Background() + + for i, tc := range testCases { + tc = strings.TrimSpace(tc) + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create": + parts := strings.Split(arg, " ") + if len(parts) != 4 || parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid write", i, tc) + } + f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if got := errStr(opErr); got != parts[3] { + t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) + } + if f != nil { + if _, err := f.Write([]byte(parts[1])); err != nil { + t.Fatalf("test case #%d %q: Write: %v", i, tc, err) + } + if err := f.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + } + + case "find": + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Fatalf("test case #%d %q: find: %v", i, tc, err) + } + sort.Strings(got) + want := strings.Split(arg, " ") + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) + } + + case "copy__", "mk-dir", "move__", "rm-all", "stat": + nParts := 3 + switch op { + case "copy__": + nParts = 6 + case "move__": + nParts = 5 + } + parts := strings.Split(arg, " ") + if len(parts) != nParts { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + + got, opErr := "", error(nil) + switch op { + case "copy__": + depth := 0 + if parts[1] == "d=∞" { + depth = infiniteDepth + } + _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + case "mk-dir": + opErr = fs.Mkdir(ctx, parts[0], 0777) + case "move__": + _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T") + case "rm-all": + opErr = fs.RemoveAll(ctx, parts[0]) + case "stat": + var stat os.FileInfo + fileName := parts[0] + if stat, opErr = fs.Stat(ctx, fileName); opErr == nil { + if stat.IsDir() { + got = "dir" + } else { + got = strconv.Itoa(int(stat.Size())) + } + + if fileName == "/" { + // For a Dir FileSystem, the virtual file system root maps to a + // real file system name like "/tmp/webdav-test012345", which does + // not end with "/". We skip such cases. + } else if statName := stat.Name(); path.Base(fileName) != statName { + t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", + i, tc, fileName, statName) + } + } + } + if got == "" { + got = errStr(opErr) + } + + if parts[len(parts)-2] != "want" { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + if want := parts[len(parts)-1]; got != want { + t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) + } + } + } +} + +func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + + td, err := ioutil.TempDir("", "webdav-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + testFS(t, Dir(td)) +} + +func TestMemFS(t *testing.T) { + testFS(t, NewMemFS()) +} + +func TestMemFSRoot(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + for i := 0; i < 5; i++ { + stat, err := fs.Stat(ctx, "/") + if err != nil { + t.Fatalf("i=%d: Stat: %v", i, err) + } + if !stat.IsDir() { + t.Fatalf("i=%d: Stat.IsDir is false, want true", i) + } + + f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("i=%d: OpenFile: %v", i, err) + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + t.Fatalf("i=%d: Readdir: %v", i, err) + } + if len(children) != i { + t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) + } + + if _, err := f.Write(make([]byte, 1)); err == nil { + t.Fatalf("i=%d: Write: got nil error, want non-nil", i) + } + + if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil { + t.Fatalf("i=%d: Mkdir: %v", i, err) + } + } +} + +func TestMemFileReaddir(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + if err := fs.Mkdir(ctx, "/foo", 0777); err != nil { + t.Fatalf("Mkdir: %v", err) + } + readdir := func(count int) ([]os.FileInfo, error) { + f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + return f.Readdir(count) + } + if got, err := readdir(-1); len(got) != 0 || err != nil { + t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, ", len(got), err) + } + if got, err := readdir(+1); len(got) != 0 || err != io.EOF { + t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) + } +} + +func TestMemFile(t *testing.T) { + testCases := []string{ + "wantData ", + "wantSize 0", + "write abc", + "wantData abc", + "write de", + "wantData abcde", + "wantSize 5", + "write 5*x", + "write 4*y+2*z", + "write 3*st", + "wantData abcdexxxxxyyyyzzststst", + "wantSize 22", + "seek set 4 want 4", + "write EFG", + "wantData abcdEFGxxxyyyyzzststst", + "wantSize 22", + "seek set 2 want 2", + "read cdEF", + "read Gx", + "seek cur 0 want 8", + "seek cur 2 want 10", + "seek cur -1 want 9", + "write J", + "wantData abcdEFGxxJyyyyzzststst", + "wantSize 22", + "seek cur -4 want 6", + "write ghijk", + "wantData abcdEFghijkyyyzzststst", + "wantSize 22", + "read yyyz", + "seek cur 0 want 15", + "write ", + "seek cur 0 want 15", + "read ", + "seek cur 0 want 15", + "seek end -3 want 19", + "write ZZ", + "wantData abcdEFghijkyyyzzstsZZt", + "wantSize 22", + "write 4*A", + "wantData abcdEFghijkyyyzzstsZZAAAA", + "wantSize 25", + "seek end 0 want 25", + "seek end -5 want 20", + "read Z+4*A", + "write 5*B", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", + "wantSize 30", + "seek end 10 want 40", + "write C", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", + "wantSize 41", + "write D", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", + "wantSize 42", + "seek set 43 want 43", + "write E", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", + "wantSize 44", + "seek set 0 want 0", + "write 5*123456789_", + "wantData 123456789_123456789_123456789_123456789_123456789_", + "wantSize 50", + "seek cur 0 want 50", + "seek cur -99 want err", + } + + ctx := context.Background() + + const filename = "/foo" + fs := NewMemFS() + f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + // Expand an arg like "3*a+2*b" to "aaabb". + parts := strings.Split(arg, "+") + for j, part := range parts { + if k := strings.IndexByte(part, '*'); k >= 0 { + repeatCount, repeatStr := part[:k], part[k+1:] + n, err := strconv.Atoi(repeatCount) + if err != nil { + t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) + } + parts[j] = strings.Repeat(repeatStr, n) + } + } + arg = strings.Join(parts, "") + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "read": + buf := make([]byte, len(arg)) + if _, err := io.ReadFull(f, buf); err != nil { + t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) + } + if got := string(buf); got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + + case "seek": + parts := strings.Split(arg, " ") + if len(parts) != 4 { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + + whence := 0 + switch parts[0] { + default: + t.Fatalf("test case #%d %q: invalid seek whence", i, tc) + case "set": + whence = os.SEEK_SET + case "cur": + whence = os.SEEK_CUR + case "end": + whence = os.SEEK_END + } + offset, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) + } + + if parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + if parts[3] == "err" { + _, err := f.Seek(int64(offset), whence) + if err == nil { + t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) + } + } else { + got, err := f.Seek(int64(offset), whence) + if err != nil { + t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) + } + want, err := strconv.Atoi(parts[3]) + if err != nil { + t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) + } + if got != int64(want) { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + + case "write": + n, err := f.Write([]byte(arg)) + if err != nil { + t.Fatalf("test case #%d %q: write: %v", i, tc, err) + } + if n != len(arg) { + t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) + } + + case "wantData": + g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666) + if err != nil { + t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) + } + gotBytes, err := ioutil.ReadAll(g) + if err != nil { + t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) + } + for i, c := range gotBytes { + if c == '\x00' { + gotBytes[i] = '.' + } + } + got := string(gotBytes) + if got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + if err := g.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + + case "wantSize": + n, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) + } + fi, err := fs.Stat(ctx, filename) + if err != nil { + t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) + } + if got, want := fi.Size(), int64(n); got != want { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + } +} + +// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a +// memFile doesn't allocate a new buffer for each of those N times. Otherwise, +// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. +func TestMemFileWriteAllocs(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo allocates here") + } + ctx := context.Background() + fs := NewMemFS() + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + a := testing.AllocsPerRun(100, func() { + f.Write(xxx) + }) + // AllocsPerRun returns an integral value, so we compare the rounded-down + // number to zero. + if a > 0 { + t.Fatalf("%v allocs per run, want 0", a) + } +} + +func BenchmarkMemFileWrite(b *testing.B) { + ctx := context.Background() + fs := NewMemFS() + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + b.Fatalf("OpenFile: %v", err) + } + for j := 0; j < 100; j++ { + f.Write(xxx) + } + if err := f.Close(); err != nil { + b.Fatalf("Close: %v", err) + } + if err := fs.RemoveAll(ctx, "/xxx"); err != nil { + b.Fatalf("RemoveAll: %v", err) + } + } +} + +func TestCopyMoveProps(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + +func TestWalkFS(t *testing.T) { + testCases := []struct { + desc string + buildfs []string + startAt string + depth int + walkFn filepath.WalkFunc + want []string + }{{ + "just root", + []string{}, + "/", + infiniteDepth, + nil, + []string{ + "/", + }, + }, { + "infinite walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + infiniteDepth, + nil, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/d", + "/e", + "/f", + }, + }, { + "infinite walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/a", + infiniteDepth, + nil, + []string{ + "/a", + "/a/b", + "/a/b/c", + "/a/d", + }, + }, { + "depth 1 walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + 1, + nil, + []string{ + "/", + "/a", + "/e", + "/f", + }, + }, { + "depth 1 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 1, + nil, + []string{ + "/a/b", + "/a/b/c", + "/a/b/g", + }, + }, { + "depth 0 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk from file", + []string{ + "mkdir /a", + "touch /a/b", + "touch /a/c", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk with skipped subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + "touch /a/b/z", + }, + "/", + infiniteDepth, + func(path string, info os.FileInfo, err error) error { + if path == "/a/b/g" { + return filepath.SkipDir + } + return nil + }, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/z", + }, + }} + ctx := context.Background() + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + var got []string + traceFn := func(path string, info os.FileInfo, err error) error { + if tc.walkFn != nil { + err = tc.walkFn(path, info, err) + if err != nil { + return err + } + } + got = append(got, path) + return nil + } + fi, err := fs.Stat(ctx, tc.startAt) + if err != nil { + t.Fatalf("%s: cannot stat: %v", tc.desc, err) + } + err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn) + if err != nil { + t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) + continue + } + sort.Strings(got) + sort.Strings(tc.want) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) + continue + } + } +} + +func buildTestFS(buildfs []string) (FileSystem, error) { + // TODO: Could this be merged with the build logic in TestFS? + + ctx := context.Background() + fs := NewMemFS() + for _, b := range buildfs { + op := strings.Split(b, " ") + switch op[0] { + case "mkdir": + err := fs.Mkdir(ctx, op[1], os.ModeDir|0777) + if err != nil { + return nil, err + } + case "touch": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + f.Close() + case "write": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + _, err = f.Write([]byte(op[2])) + f.Close() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown file operation %q", op[0]) + } + } + return fs, nil +} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go new file mode 100644 index 0000000..416e81c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go new file mode 100644 index 0000000..aad61a4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if_test.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + ``, + ifHeader{}, + }, { + "bad: no list after resource #2", + ` (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + ` (a) (b) `, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `() + ()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `( + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not + )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `() + (Not )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + ` + ( + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + ` (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + ` (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README new file mode 100644 index 0000000..89656f4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go new file mode 100644 index 0000000..a712843 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go new file mode 100644 index 0000000..21b48de --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go new file mode 100644 index 0000000..cb82ec2 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go new file mode 100644 index 0000000..226cfd0 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `` + + `abc` + + `` + + `c1` + + `d1` + + `` + + `` + + `e1` + + `` + + ``, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `` + + `ab` + + `c` + + `` + + `c1` + + `d1` + + `` + + ``, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `` + + `b` + + `b1` + + ``, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `test`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: ``, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `hello world`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `hello`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello world`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: ``, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " \n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: ``, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: ``, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: ``, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ '> ]"), + }, + want: `'> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag does not match start tag ", + want: ``, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag in namespace another does not match start tag in namespace space", + want: ``, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: ``, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: ``, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: ``, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: ``, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: ``, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: ``, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: ``, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: ``, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "' '>' >", + " ]>", + " '<' ' doc ANY> ]>", + ">>> a < comment --> [ ] >", + } + testKO := []string{ + "<", + ">", + "", + "< > > < < >", + " -->", + "", + "'", + "", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go new file mode 100644 index 0000000..4089056 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go new file mode 100644 index 0000000..02f1e10 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `hello
    ` + + `world
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
    ` + + `hello
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
    ` + + `
    `, + tab: Tables{}, + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
    world
    ` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go new file mode 100644 index 0000000..fdde288 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go new file mode 100644 index 0000000..5b79cbe --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
    +// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&なまえ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&なまえ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

    ", "p", "nowrap"}, + {"

    ", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

    Foo

    \n\n

    Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go new file mode 100644 index 0000000..514db5d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go @@ -0,0 +1,94 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program is a server for the WebDAV 'litmus' compliance test at +http://www.webdav.org/neon/litmus/ +To run the test: + +go run litmus_test_server.go + +and separately, from the downloaded litmus-xxx directory: + +make URL=http://localhost:9999/ check +*/ +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/webdav" +) + +var port = flag.Int("port", 9999, "server port") + +func main() { + flag.Parse() + log.SetFlags(0) + h := &webdav.Handler{ + FileSystem: webdav.NewMemFS(), + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + litmus := r.Header.Get("X-Litmus") + if len(litmus) > 19 { + litmus = litmus[:16] + "..." + } + + switch r.Method { + case "COPY", "MOVE": + dst := "" + if u, err := url.Parse(r.Header.Get("Destination")); err == nil { + dst = u.Path + } + o := r.Header.Get("Overwrite") + log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) + default: + log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) + } + }, + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) + + addr := fmt.Sprintf(":%d", *port) + log.Printf("Serving %v", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go new file mode 100644 index 0000000..344ac5c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "container/heap" + "errors" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") +) + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(now time.Time, token string) error +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root string + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool +} + +// NewMemLS returns a new in-memory LockSystem. +func NewMemLS() LockSystem { + return &memLS{ + byName: make(map[string]*memLSNode), + byToken: make(map[string]*memLSNode), + gen: uint64(time.Now().Unix()), + } +} + +type memLS struct { + mu sync.Mutex + byName map[string]*memLSNode + byToken map[string]*memLSNode + gen uint64 + // byExpiry only contains those nodes whose LockDetails have a finite + // Duration and are yet to expire. + byExpiry byExpiry +} + +func (m *memLS) nextToken() string { + m.gen++ + return strconv.FormatUint(m.gen, 10) +} + +func (m *memLS) collectExpiredNodes(now time.Time) { + for len(m.byExpiry) > 0 { + if now.Before(m.byExpiry[0].expiry) { + break + } + m.remove(m.byExpiry[0]) + } +} + +func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + var n0, n1 *memLSNode + if name0 != "" { + if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { + return nil, ErrConfirmationFailed + } + } + if name1 != "" { + if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { + return nil, ErrConfirmationFailed + } + } + + // Don't hold the same node twice. + if n1 == n0 { + n1 = nil + } + + if n0 != nil { + m.hold(n0) + } + if n1 != nil { + m.hold(n1) + } + return func() { + m.mu.Lock() + defer m.mu.Unlock() + if n1 != nil { + m.unhold(n1) + } + if n0 != nil { + m.unhold(n0) + } + }, nil +} + +// lookup returns the node n that locks the named resource, provided that n +// matches at least one of the given conditions and that lock isn't held by +// another party. Otherwise, it returns nil. +// +// n may be a parent of the named resource, if n is an infinite depth lock. +func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { + // TODO: support Condition.Not and Condition.ETag. + for _, c := range conditions { + n = m.byToken[c.Token] + if n == nil || n.held { + continue + } + if name == n.details.Root { + return n + } + if n.details.ZeroDepth { + continue + } + if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { + return n + } + } + return nil +} + +func (m *memLS) hold(n *memLSNode) { + if n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = true + if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func (m *memLS) unhold(n *memLSNode) { + if !n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = false + if n.details.Duration >= 0 { + heap.Push(&m.byExpiry, n) + } +} + +func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + details.Root = slashClean(details.Root) + + if !m.canCreate(details.Root, details.ZeroDepth) { + return "", ErrLocked + } + n := m.create(details.Root) + n.token = m.nextToken() + m.byToken[n.token] = n + n.details = details + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.token, nil +} + +func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return LockDetails{}, ErrNoSuchLock + } + if n.held { + return LockDetails{}, ErrLocked + } + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } + n.details.Duration = duration + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.details, nil +} + +func (m *memLS) Unlock(now time.Time, token string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return ErrNoSuchLock + } + if n.held { + return ErrLocked + } + m.remove(n) + return nil +} + +func (m *memLS) canCreate(name string, zeroDepth bool) bool { + return walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + return true + } + if first { + if n.token != "" { + // The target node is already locked. + return false + } + if !zeroDepth { + // The requested lock depth is infinite, and the fact that n exists + // (n != nil) means that a descendent of the target node is locked. + return false + } + } else if n.token != "" && !n.details.ZeroDepth { + // An ancestor of the target node is locked with infinite depth. + return false + } + return true + }) +} + +func (m *memLS) create(name string) (ret *memLSNode) { + walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + n = &memLSNode{ + details: LockDetails{ + Root: name0, + }, + byExpiryIndex: -1, + } + m.byName[name0] = n + } + n.refCount++ + if first { + ret = n + } + return true + }) + return ret +} + +func (m *memLS) remove(n *memLSNode) { + delete(m.byToken, n.token) + n.token = "" + walkToRoot(n.details.Root, func(name0 string, first bool) bool { + x := m.byName[name0] + x.refCount-- + if x.refCount == 0 { + delete(m.byName, name0) + } + return true + }) + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func walkToRoot(name string, f func(name0 string, first bool) bool) bool { + for first := true; ; first = false { + if !f(name, first) { + return false + } + if name == "/" { + break + } + name = name[:strings.LastIndex(name, "/")] + if name == "" { + name = "/" + } + } + return true +} + +type memLSNode struct { + // details are the lock metadata. Even if this node's name is not explicitly locked, + // details.Root will still equal the node's name. + details LockDetails + // token is the unique identifier for this node's lock. An empty token means that + // this node is not explicitly locked. + token string + // refCount is the number of self-or-descendent nodes that are explicitly locked. + refCount int + // expiry is when this node's lock expires. + expiry time.Time + // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 + // if this node does not expire, or has expired. + byExpiryIndex int + // held is whether this node's lock is actively held by a Confirm call. + held bool +} + +type byExpiry []*memLSNode + +func (b *byExpiry) Len() int { + return len(*b) +} + +func (b *byExpiry) Less(i, j int) bool { + return (*b)[i].expiry.Before((*b)[j].expiry) +} + +func (b *byExpiry) Swap(i, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] + (*b)[i].byExpiryIndex = i + (*b)[j].byExpiryIndex = j +} + +func (b *byExpiry) Push(x interface{}) { + n := x.(*memLSNode) + n.byExpiryIndex = len(*b) + *b = append(*b, n) +} + +func (b *byExpiry) Pop() interface{} { + i := len(*b) - 1 + n := (*b)[i] + (*b)[i] = nil + n.byExpiryIndex = -1 + *b = (*b)[:i] + return n +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, errInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, errInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, errInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go new file mode 100644 index 0000000..5cf14cd --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock_test.go @@ -0,0 +1,731 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "fmt" + "math/rand" + "path" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +func TestWalkToRoot(t *testing.T) { + testCases := []struct { + name string + want []string + }{{ + "/a/b/c/d", + []string{ + "/a/b/c/d", + "/a/b/c", + "/a/b", + "/a", + "/", + }, + }, { + "/a", + []string{ + "/a", + "/", + }, + }, { + "/", + []string{ + "/", + }, + }} + + for _, tc := range testCases { + var got []string + if !walkToRoot(tc.name, func(name0 string, first bool) bool { + if first != (len(got) == 0) { + t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) + return false + } + got = append(got, name0) + return true + }) { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) + } + } +} + +var lockTestDurations = []time.Duration{ + infiniteTimeout, // infiniteTimeout means to never expire. + 0, // A zero duration means to expire immediately. + 100 * time.Hour, // A very large duration will not expire in these tests. +} + +// lockTestNames are the names of a set of mutually compatible locks. For each +// name fragment: +// - _ means no explicit lock. +// - i means an infinite-depth lock, +// - z means a zero-depth lock, +var lockTestNames = []string{ + "/_/_/_/_/z", + "/_/_/i", + "/_/z", + "/_/z/i", + "/_/z/z", + "/_/z/_/i", + "/_/z/_/z", + "/i", + "/z", + "/z/_/i", + "/z/_/z", +} + +func lockTestZeroDepth(name string) bool { + switch name[len(name)-1] { + case 'i': + return false + case 'z': + return true + } + panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) +} + +func TestMemLSCanCreate(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + for _, name := range lockTestNames { + _, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + } + + wantCanCreate := func(name string, zeroDepth bool) bool { + for _, n := range lockTestNames { + switch { + case n == name: + // An existing lock has the same name as the proposed lock. + return false + case strings.HasPrefix(n, name): + // An existing lock would be a child of the proposed lock, + // which conflicts if the proposed lock has infinite depth. + if !zeroDepth { + return false + } + case strings.HasPrefix(name, n): + // An existing lock would be an ancestor of the proposed lock, + // which conflicts if the ancestor has infinite depth. + if n[len(n)-1] == 'i' { + return false + } + } + } + return true + } + + var check func(int, string) + check = func(recursion int, name string) { + for _, zeroDepth := range []bool{false, true} { + got := m.canCreate(name, zeroDepth) + want := wantCanCreate(name, zeroDepth) + if got != want { + t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) + } + } + if recursion == 6 { + return + } + if name != "/" { + name += "/" + } + for _, c := range "_iz" { + check(recursion+1, name+string(c)) + } + } + check(0, "/") +} + +func TestMemLSLookup(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + badToken := m.nextToken() + t.Logf("badToken=%q", badToken) + + for _, name := range lockTestNames { + token, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) + } + + baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) + for _, baseName := range baseNames { + for _, suffix := range []string{"", "/0", "/1/2/3"} { + name := baseName + suffix + + goodToken := "" + base := m.byName[baseName] + if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { + goodToken = base.token + } + + for _, token := range []string{badToken, goodToken} { + if token == "" { + continue + } + + got := m.lookup(name, Condition{Token: token}) + want := base + if token == badToken { + want = nil + } + if got != want { + t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", + name, token, token == badToken, got, want) + } + } + } + } +} + +func TestMemLSConfirm(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + alice, err := m.Create(now, LockDetails{ + Root: "/alice", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + tweedle, err := m.Create(now, LockDetails{ + Root: "/tweedle", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + + // Test a mismatch between name and condition. + _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) + } + + // Test two names (that fall under the same lock) in the one Confirm call. + release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (twins): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (twins): inconsistent state: %v", err) + } + release() + if err := m.consistent(); err != nil { + t.Fatalf("release (twins): inconsistent state: %v", err) + } + + // Test the same two names in overlapping Confirm / release calls. + releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #0): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) + } + + _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) + } + + releaseDee() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #2): inconsistent state: %v", err) + } + + releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #3): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) + } + + // Test that you can't unlock a held lock. + err = m.Unlock(now, tweedle) + if err != ErrLocked { + t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) + } + + releaseDum() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #5): inconsistent state: %v", err) + } + + err = m.Unlock(now, tweedle) + if err != nil { + t.Fatalf("Unlock (sequence #6): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) + } +} + +func TestMemLSNonCanonicalRoot(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + token, err := m.Create(now, LockDetails{ + Root: "/foo/./bar//", + Duration: 1 * time.Second, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + if err := m.Unlock(now, token); err != nil { + t.Fatalf("Unlock: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock: inconsistent state: %v", err) + } +} + +func TestMemLSExpiry(t *testing.T) { + m := NewMemLS().(*memLS) + testCases := []string{ + "setNow 0", + "create /a.5", + "want /a.5", + "create /c.6", + "want /a.5 /c.6", + "create /a/b.7", + "want /a.5 /a/b.7 /c.6", + "setNow 4", + "want /a.5 /a/b.7 /c.6", + "setNow 5", + "want /a/b.7 /c.6", + "setNow 6", + "want /a/b.7", + "setNow 7", + "want ", + "setNow 8", + "want ", + "create /a.12", + "create /b.13", + "create /c.15", + "create /a/d.16", + "want /a.12 /a/d.16 /b.13 /c.15", + "refresh /a.14", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 12", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 13", + "want /a.14 /a/d.16 /c.15", + "setNow 14", + "want /a/d.16 /c.15", + "refresh /a/d.20", + "refresh /c.20", + "want /a/d.20 /c.20", + "setNow 20", + "want ", + } + + tokens := map[string]string{} + zTime := time.Unix(0, 0) + now := zTime + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create", "refresh": + parts := strings.Split(arg, ".") + if len(parts) != 2 { + t.Fatalf("test case #%d %q: invalid create", i, tc) + } + root := parts[0] + d, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) + + switch op { + case "create": + token, err := m.Create(now, LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + }) + if err != nil { + t.Fatalf("test case #%d %q: Create: %v", i, tc, err) + } + tokens[root] = token + + case "refresh": + token := tokens[root] + if token == "" { + t.Fatalf("test case #%d %q: no token for %q", i, tc, root) + } + got, err := m.Refresh(now, token, dur) + if err != nil { + t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) + } + want := LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + } + if got != want { + t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) + } + } + + case "setNow": + d, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) + + case "want": + m.mu.Lock() + m.collectExpiredNodes(now) + got := make([]string, 0, len(m.byToken)) + for _, n := range m.byToken { + got = append(got, fmt.Sprintf("%s.%d", + n.details.Root, n.expiry.Sub(zTime)/time.Second)) + } + m.mu.Unlock() + sort.Strings(got) + want := []string{} + if arg != "" { + want = strings.Split(arg, " ") + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) + } + } +} + +func TestMemLS(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + rng := rand.New(rand.NewSource(0)) + tokens := map[string]string{} + nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 + const N = 2000 + + for i := 0; i < N; i++ { + name := lockTestNames[rng.Intn(len(lockTestNames))] + duration := lockTestDurations[rng.Intn(len(lockTestDurations))] + confirmed, unlocked := false, false + + // If the name was already locked, we randomly confirm/release, refresh + // or unlock it. Otherwise, we create a lock. + token := tokens[name] + if token != "" { + switch rng.Intn(3) { + case 0: + confirmed = true + nConfirm++ + release, err := m.Confirm(now, name, "", Condition{Token: token}) + if err != nil { + t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) + } + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + release() + + case 1: + nRefresh++ + if _, err := m.Refresh(now, token, duration); err != nil { + t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) + } + + case 2: + unlocked = true + nUnlock++ + if err := m.Unlock(now, token); err != nil { + t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) + } + } + + } else { + nCreate++ + var err error + token, err = m.Create(now, LockDetails{ + Root: name, + Duration: duration, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("iteration #%d: Create %q: %v", i, name, err) + } + } + + if !confirmed { + if duration == 0 || unlocked { + // A zero-duration lock should expire immediately and is + // effectively equivalent to being unlocked. + tokens[name] = "" + } else { + tokens[name] = token + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + } + + if nConfirm < N/10 { + t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) + } + if nCreate < N/10 { + t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) + } + if nRefresh < N/10 { + t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) + } + if nUnlock < N/10 { + t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) + } +} + +func (m *memLS) consistent() error { + m.mu.Lock() + defer m.mu.Unlock() + + // If m.byName is non-empty, then it must contain an entry for the root "/", + // and its refCount should equal the number of locked nodes. + if len(m.byName) > 0 { + n := m.byName["/"] + if n == nil { + return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) + } + if n.refCount != len(m.byToken) { + return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) + } + } + + for name, n := range m.byName { + // The map keys should be consistent with the node's copy of the key. + if n.details.Root != name { + return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) + } + + // A name must be clean, and start with a "/". + if len(name) == 0 || name[0] != '/' { + return fmt.Errorf(`node name %q does not start with "/"`, name) + } + if name != path.Clean(name) { + return fmt.Errorf(`node name %q is not clean`, name) + } + + // A node's refCount should be positive. + if n.refCount <= 0 { + return fmt.Errorf("non-positive refCount for node at name %q", name) + } + + // A node's refCount should be the number of self-or-descendents that + // are locked (i.e. have a non-empty token). + var list []string + for name0, n0 := range m.byName { + // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', + // so strings.HasPrefix is equivalent to self-or-descendent name match. + // We don't have to worry about "/foo/bar" being a false positive match + // for "/foo/b". + if strings.HasPrefix(name0, name) && n0.token != "" { + list = append(list, name0) + } + } + if n.refCount != len(list) { + sort.Strings(list) + return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", + name, n.refCount, list, len(list)) + } + + // A node n is in m.byToken if it has a non-empty token. + if n.token != "" { + if _, ok := m.byToken[n.token]; !ok { + return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) + } + } + + // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. + if n.byExpiryIndex >= 0 { + if n.byExpiryIndex >= len(m.byExpiry) { + return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) + } + if n != m.byExpiry[n.byExpiryIndex] { + return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) + } + } + } + + for token, n := range m.byToken { + // The map keys should be consistent with the node's copy of the key. + if n.token != token { + return fmt.Errorf("node token %q != byToken map key %q", n.token, token) + } + + // Every node in m.byToken is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) + } + } + + for i, n := range m.byExpiry { + // The slice indices should be consistent with the node's copy of the index. + if n.byExpiryIndex != i { + return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) + } + + // Every node in m.byExpiry is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) + } + + // No node in m.byExpiry should be held. + if n.held { + return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) + } + } + return nil +} + +func TestParseTimeout(t *testing.T) { + testCases := []struct { + s string + want time.Duration + wantErr error + }{{ + "", + infiniteTimeout, + nil, + }, { + "Infinite", + infiniteTimeout, + nil, + }, { + "Infinitesimal", + 0, + errInvalidTimeout, + }, { + "infinite", + 0, + errInvalidTimeout, + }, { + "Second-0", + 0 * time.Second, + nil, + }, { + "Second-123", + 123 * time.Second, + nil, + }, { + " Second-456 ", + 456 * time.Second, + nil, + }, { + "Second-4100000000", + 4100000000 * time.Second, + nil, + }, { + "junk", + 0, + errInvalidTimeout, + }, { + "Second-", + 0, + errInvalidTimeout, + }, { + "Second--1", + 0, + errInvalidTimeout, + }, { + "Second--123", + 0, + errInvalidTimeout, + }, { + "Second-+123", + 0, + errInvalidTimeout, + }, { + "Second-0x123", + 0, + errInvalidTimeout, + }, { + "second-123", + 0, + errInvalidTimeout, + }, { + "Second-4294967295", + 4294967295 * time.Second, + nil, + }, { + // Section 10.7 says that "The timeout value for TimeType "Second" + // must not be greater than 2^32-1." + "Second-4294967296", + 0, + errInvalidTimeout, + }, { + // This test case comes from section 9.10.9 of the spec. It says, + // + // "In this request, the client has specified that it desires an + // infinite-length lock, if available, otherwise a timeout of 4.1 + // billion seconds, if available." + // + // The Go WebDAV package always supports infinite length locks, + // and ignores the fallback after the comma. + "Infinite, Second-4100000000", + infiniteTimeout, + nil, + }} + + for _, tc := range testCases { + got, gotErr := parseTimeout(tc.s) + if got != tc.want || gotErr != tc.wantErr { + t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go new file mode 100644 index 0000000..e36a3b3 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop.go @@ -0,0 +1,418 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + + "golang.org/x/net/context" +) + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []Property +} + +// Propstat describes a XML propstat element as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type Propstat struct { + // Props contains the properties for which Status applies. + Props []Property + + // Status defines the HTTP status code of the properties in Prop. + // Allowed values include, but are not limited to the WebDAV status + // code extensions for HTTP/1.1. + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 + Status int + + // XMLError contains the XML representation of the optional error element. + // XML content within this field must not rely on any predefined + // namespace declarations or prefixes. If empty, the XML error element + // is omitted. + XMLError string + + // ResponseDescription contains the contents of the optional + // responsedescription field. If empty, the XML element is omitted. + ResponseDescription string +} + +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) +} + +// liveProps contains all supported, protected DAV: properties. +var liveProps = map[xml.Name]struct { + // findFn implements the propfind function of this property. If nil, + // it indicates a hidden property. + findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error) + // dir is true if the property applies to directories. + dir bool +}{ + {Space: "DAV:", Local: "resourcetype"}: { + findFn: findResourceType, + dir: true, + }, + {Space: "DAV:", Local: "displayname"}: { + findFn: findDisplayName, + dir: true, + }, + {Space: "DAV:", Local: "getcontentlength"}: { + findFn: findContentLength, + dir: false, + }, + {Space: "DAV:", Local: "getlastmodified"}: { + findFn: findLastModified, + // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified + // suggests that getlastmodified should only apply to GETable + // resources, and this package does not support GET on directories. + // + // Nonetheless, some WebDAV clients expect child directories to be + // sortable by getlastmodified date, so this value is true, not false. + // See golang.org/issue/15334. + dir: true, + }, + {Space: "DAV:", Local: "creationdate"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontentlanguage"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontenttype"}: { + findFn: findContentType, + dir: false, + }, + {Space: "DAV:", Local: "getetag"}: { + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's + // modification time and size. This is not a reliable synchronization + // mechanism for directories, so we do not advertise getetag for DAV + // collections. + dir: false, + }, + + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. + {Space: "DAV:", Local: "lockdiscovery"}: {}, + {Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, +} + +// TODO(nigeltao) merge props and allprop? + +// Props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue + } + // Otherwise, it must either be a live property or we don't know it. + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(ctx, fs, ls, name, fi) + if err != nil { + return nil, err + } + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) + } + } + return makePropstats(pstatOK, pstatNotFound), nil +} + +// Propnames returns the property names defined for resource name. +func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) + for pn, prop := range liveProps { + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil +} + +// Allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(ctx, fs, ls, name) + if err != nil { + return nil, err + } + // Add names from include if they are not already covered in pnames. + nameset := make(map[xml.Name]bool) + for _, pn := range pnames { + nameset[pn] = true + } + for _, pn := range include { + if !nameset[pn] { + pnames = append(pnames, pn) + } + } + return props(ctx, fs, ls, name, pnames) +} + +// Patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: ``, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } + + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + } + } + return []Propstat{pstat}, nil +} + +func escapeXML(s string) string { + for i := 0; i < len(s); i++ { + // As an optimization, if s contains only ASCII letters, digits or a + // few special characters, the escaped value is s itself and we don't + // need to allocate a buffer and convert between string and []byte. + switch c := s[i]; { + case c == ' ' || c == '_' || + ('+' <= c && c <= '9') || // Digits as well as + , - . and / + ('A' <= c && c <= 'Z') || + ('a' <= c && c <= 'z'): + continue + } + // Otherwise, go through the full escaping process. + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() + } + return s +} + +func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + return ``, nil + } + return "", nil +} + +func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if slashClean(name) == "/" { + // Hide the real name of a possibly prefixed root directory. + return "", nil + } + return escapeXML(fi.Name()), nil +} + +func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return strconv.FormatInt(fi.Size(), 10), nil +} + +func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return fi.ModTime().Format(http.TimeFormat), nil +} + +func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer f.Close() + // This implementation is based on serveContent's code in the standard net/http package. + ctype := mime.TypeByExtension(filepath.Ext(name)) + if ctype != "" { + return ctype, nil + } + // Read a chunk to decide between utf-8 text and binary. + var buf [512]byte + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return "", err + } + ctype = http.DetectContentType(buf[:n]) + // Rewind file. + _, err = f.Seek(0, os.SEEK_SET) + return ctype, err +} + +func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + // The Apache http 2.4 web server by default concatenates the + // modification time and size of a file. We replicate the heuristic + // with nanosecond granularity. + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `` + + `` + + `` + + ``, nil +} diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go new file mode 100644 index 0000000..57d0e82 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop_test.go @@ -0,0 +1,613 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "net/http" + "os" + "reflect" + "sort" + "testing" + + "golang.org/x/net/context" +) + +func TestMemPS(t *testing.T) { + ctx := context.Background() + // calcProps calculates the getlastmodified and getetag DAV: property + // values in pstats for resource name in file-system fs. + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { + fi, err := fs.Stat(ctx, name) + if err != nil { + return err + } + for _, pst := range pstats { + for i, p := range pst.Props { + switch p.XMLName { + case xml.Name{Space: "DAV:", Local: "getlastmodified"}: + p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat)) + pst.Props[i] = p + case xml.Name{Space: "DAV:", Local: "getetag"}: + if fi.IsDir() { + continue + } + etag, err := findETag(ctx, fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) + pst.Props[i] = p + } + } + } + return nil + } + + const ( + lockEntry = `` + + `` + + `` + + `` + + `` + statForbiddenError = `` + ) + + type propOp struct { + op string + name string + pnames []xml.Name + patches []Proppatch + wantPnames []xml.Name + wantPropstats []Propstat + } + + testCases := []struct { + desc string + noDeadProps bool + buildfs []string + propOp []propOp + }{{ + desc: "propname", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propname", + name: "/dir", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "getlastmodified"}, + }, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }}, + }, { + desc: "allprop dir and file", + buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, + propOp: []propOp{{ + op: "allprop", + name: "/dir", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + pnames: []xml.Name{ + {"DAV:", "resourcetype"}, + {"foo", "bar"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}}, { + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}}, + }, + }}, + }, { + desc: "propfind DAV:resourcetype", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }}, + }}, + }}, + }, { + desc: "propfind unsupported DAV properties", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "creationdate"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, + }}, + }}, + }}, + }, { + desc: "propfind getetag for files but not for directories", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }}, + }}, + }}, + }, { + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }, { + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("xxx"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + }}, + }, { + Status: StatusFailedDependency, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }, { + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }}, + }, { + desc: "propname with dead property", + buildfs: []string{"touch /file"}, + propOp: []propOp{{ + op: "proppatch", + name: "/file", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, + }, + }}, + }, { + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "bad: propfind unknown property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"foo:", "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo:", Local: "bar"}, + }}, + }}, + }}, + }} + + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } + ls := NewMemLS() + for _, op := range tc.propOp { + desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { + t.Fatalf("%s: calcProps: %v", desc, err) + } + + // Call property system. + var propstats []Propstat + switch op.op { + case "propname": + pnames, err := propnames(ctx, fs, ls, op.name) + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) + } + continue + case "allprop": + propstats, err = allprop(ctx, fs, ls, op.name, op.pnames) + case "propfind": + propstats, err = props(ctx, fs, ls, op.name, op.pnames) + case "proppatch": + propstats, err = patch(ctx, fs, ls, op.name, op.patches) + default: + t.Fatalf("%s: %s not implemented", desc, op.op) + } + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + // Compare return values from allprop, propfind or proppatch. + for _, pst := range propstats { + sort.Sort(byPropname(pst.Props)) + } + for _, pst := range op.wantPropstats { + sort.Sort(byPropname(pst.Props)) + } + sort.Sort(byStatus(propstats)) + sort.Sort(byStatus(op.wantPropstats)) + if !reflect.DeepEqual(propstats, op.wantPropstats) { + t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) + } + } + } +} + +func cmpXMLName(a, b xml.Name) bool { + if a.Space != b.Space { + return a.Space < b.Space + } + return a.Local < b.Local +} + +type byXMLName []xml.Name + +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } + +type byPropname []Property + +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } + +type byStatus []Propstat + +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go new file mode 100644 index 0000000..7b56687 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav.go @@ -0,0 +1,702 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package webdav provides a WebDAV server implementation. +package webdav // import "golang.org/x/net/webdav" + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" +) + +type Handler struct { + // Prefix is the URL path prefix to strip from WebDAV resource paths. + Prefix string + // FileSystem is the virtual file system. + FileSystem FileSystem + // LockSystem is the lock management system. + LockSystem LockSystem + // Logger is an optional error logger. If non-nil, it will be called + // for all HTTP requests. + Logger func(*http.Request, error) +} + +func (h *Handler) stripPrefix(p string) (string, int, error) { + if h.Prefix == "" { + return p, http.StatusOK, nil + } + if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { + return r, http.StatusOK, nil + } + return p, http.StatusNotFound, errPrefixMismatch +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + status, err := http.StatusBadRequest, errUnsupportedMethod + if h.FileSystem == nil { + status, err = http.StatusInternalServerError, errNoFileSystem + } else if h.LockSystem == nil { + status, err = http.StatusInternalServerError, errNoLockSystem + } else { + switch r.Method { + case "OPTIONS": + status, err = h.handleOptions(w, r) + case "GET", "HEAD", "POST": + status, err = h.handleGetHeadPost(w, r) + case "DELETE": + status, err = h.handleDelete(w, r) + case "PUT": + status, err = h.handlePut(w, r) + case "MKCOL": + status, err = h.handleMkcol(w, r) + case "COPY", "MOVE": + status, err = h.handleCopyMove(w, r) + case "LOCK": + status, err = h.handleLock(w, r) + case "UNLOCK": + status, err = h.handleUnlock(w, r) + case "PROPFIND": + status, err = h.handlePropfind(w, r) + case "PROPPATCH": + status, err = h.handleProppatch(w, r) + } + } + + if status != 0 { + w.WriteHeader(status) + if status != http.StatusNoContent { + w.Write([]byte(StatusText(status))) + } + } + if h.Logger != nil { + h.Logger(r, err) + } +} + +func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { + token, err = h.LockSystem.Create(now, LockDetails{ + Root: root, + Duration: infiniteTimeout, + ZeroDepth: true, + }) + if err != nil { + if err == ErrLocked { + return "", StatusLocked, err + } + return "", http.StatusInternalServerError, err + } + return token, 0, nil +} + +func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { + hdr := r.Header.Get("If") + if hdr == "" { + // An empty If header means that the client hasn't previously created locks. + // Even if this client doesn't care about locks, we still need to check that + // the resources aren't locked by another client, so we create temporary + // locks that would conflict with another client's locks. These temporary + // locks are unlocked at the end of the HTTP request. + now, srcToken, dstToken := time.Now(), "", "" + if src != "" { + srcToken, status, err = h.lock(now, src) + if err != nil { + return nil, status, err + } + } + if dst != "" { + dstToken, status, err = h.lock(now, dst) + if err != nil { + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + return nil, status, err + } + } + + return func() { + if dstToken != "" { + h.LockSystem.Unlock(now, dstToken) + } + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + }, 0, nil + } + + ih, ok := parseIfHeader(hdr) + if !ok { + return nil, http.StatusBadRequest, errInvalidIfHeader + } + // ih is a disjunction (OR) of ifLists, so any ifList will do. + for _, l := range ih.lists { + lsrc := l.resourceTag + if lsrc == "" { + lsrc = src + } else { + u, err := url.Parse(lsrc) + if err != nil { + continue + } + if u.Host != r.Host { + continue + } + lsrc, status, err = h.stripPrefix(u.Path) + if err != nil { + return nil, status, err + } + } + release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) + if err == ErrConfirmationFailed { + continue + } + if err != nil { + return nil, http.StatusInternalServerError, err + } + return release, 0, nil + } + // Section 10.4.1 says that "If this header is evaluated and all state lists + // fail, then the request must fail with a 412 (Precondition Failed) status." + // We follow the spec even though the cond_put_corrupt_token test case from + // the litmus test warns on seeing a 412 instead of a 423 (Locked). + return nil, http.StatusPreconditionFailed, ErrLocked +} + +func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + allow := "OPTIONS, LOCK, PUT, MKCOL" + if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil { + if fi.IsDir() { + allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" + } else { + allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" + } + } + w.Header().Set("Allow", allow) + // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes + w.Header().Set("DAV", "1, 2") + // http://msdn.microsoft.com/en-au/library/cc250217.aspx + w.Header().Set("MS-Author-Via", "DAV") + return 0, nil +} + +func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + // TODO: check locks for read-only access?? + ctx := getContext(r) + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0) + if err != nil { + return http.StatusNotFound, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return http.StatusNotFound, err + } + if fi.IsDir() { + return http.StatusMethodNotAllowed, nil + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + // Let ServeContent determine the Content-Type header. + http.ServeContent(w, r, reqPath, fi.ModTime(), f) + return 0, nil +} + +func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + // TODO: return MultiStatus where appropriate. + + // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll + // returns nil (no error)." WebDAV semantics are that it should return a + // "404 Not Found". We therefore have to Stat before we RemoveAll. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil { + return http.StatusMethodNotAllowed, err + } + return http.StatusNoContent, nil +} + +func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. + ctx := getContext(r) + + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return http.StatusNotFound, err + } + _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() + closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. + if copyErr != nil { + return http.StatusMethodNotAllowed, copyErr + } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } + if closeErr != nil { + return http.StatusMethodNotAllowed, closeErr + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + return http.StatusCreated, nil +} + +func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if r.ContentLength > 0 { + return http.StatusUnsupportedMediaType, nil + } + if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusMethodNotAllowed, err + } + return http.StatusCreated, nil +} + +func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { + hdr := r.Header.Get("Destination") + if hdr == "" { + return http.StatusBadRequest, errInvalidDestination + } + u, err := url.Parse(hdr) + if err != nil { + return http.StatusBadRequest, errInvalidDestination + } + if u.Host != r.Host { + return http.StatusBadGateway, errInvalidDestination + } + + src, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + + dst, status, err := h.stripPrefix(u.Path) + if err != nil { + return status, err + } + + if dst == "" { + return http.StatusBadGateway, errInvalidDestination + } + if dst == src { + return http.StatusForbidden, errDestinationEqualsSource + } + + ctx := getContext(r) + + if r.Method == "COPY" { + // Section 7.5.1 says that a COPY only needs to lock the destination, + // not both destination and source. Strictly speaking, this is racy, + // even though a COPY doesn't modify the source, if a concurrent + // operation modifies the source. However, the litmus test explicitly + // checks that COPYing a locked-by-another source is OK. + release, status, err := h.confirmLocks(r, "", dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.8.3 says that "The COPY method on a collection without a Depth + // header must act as if a Depth header with value "infinity" was included". + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.8.3 says that "A client may submit a Depth header on a + // COPY on a collection with a value of "0" or "infinity"." + return http.StatusBadRequest, errInvalidDepth + } + } + return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + } + + release, status, err := h.confirmLocks(r, src, dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.9.2 says that "The MOVE method on a collection must act as if + // a "Depth: infinity" header was used on it. A client must not submit a + // Depth header on a MOVE on a collection with any value but "infinity"." + if hdr := r.Header.Get("Depth"); hdr != "" { + if parseDepth(hdr) != infiniteDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") +} + +func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { + duration, err := parseTimeout(r.Header.Get("Timeout")) + if err != nil { + return http.StatusBadRequest, err + } + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, err + } + + ctx := getContext(r) + token, ld, now, created := "", LockDetails{}, time.Now(), false + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get("If")) + if !ok { + return http.StatusBadRequest, errInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, errInvalidLockToken + } + ld, err = h.LockSystem.Refresh(now, token, duration) + if err != nil { + if err == ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, errInvalidDepth + } + } + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ld = LockDetails{ + Root: reqPath, + Duration: duration, + OwnerXML: li.Owner.InnerXML, + ZeroDepth: depth == 0, + } + token, err = h.LockSystem.Create(now, ld) + if err != nil { + if err == ErrLocked { + return StatusLocked, err + } + return http.StatusInternalServerError, err + } + defer func() { + if retErr != nil { + h.LockSystem.Unlock(now, token) + } + }() + + // Create the resource if it didn't previously exist. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + writeLockInfo(w, token, ld) + return 0, nil +} + +func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We strip its angle brackets. + t := r.Header.Get("Lock-Token") + if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { + return http.StatusBadRequest, errInvalidLockToken + } + t = t[1 : len(t)-1] + + switch err = h.LockSystem.Unlock(time.Now(), t); err { + case nil: + return http.StatusNoContent, err + case ErrForbidden: + return http.StatusForbidden, err + case ErrLocked: + return StatusLocked, err + case ErrNoSuchLock: + return http.StatusConflict, err + default: + return http.StatusInternalServerError, err + } +} + +func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + fi, err := h.FileSystem.Stat(ctx, reqPath) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth == invalidDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + pf, status, err := readPropfind(r.Body) + if err != nil { + return status, err + } + + mw := multistatusWriter{w: w} + + walkFn := func(reqPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + var pstats []Propstat + if pf.Propname != nil { + pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath) + if err != nil { + return err + } + pstat := Propstat{Status: http.StatusOK} + for _, xmlname := range pnames { + pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) + } + pstats = append(pstats, pstat) + } else if pf.Allprop != nil { + pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } else { + pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } + if err != nil { + return err + } + return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) + } + + walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn) + closeErr := mw.close() + if walkErr != nil { + return http.StatusInternalServerError, walkErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + patches, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches) + if err != nil { + return http.StatusInternalServerError, err + } + mw := multistatusWriter{w: w} + writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) + closeErr := mw.close() + if writeErr != nil { + return http.StatusInternalServerError, writeErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func makePropstatResponse(href string, pstats []Propstat) *response { + resp := response{ + Href: []string{(&url.URL{Path: href}).EscapedPath()}, + Propstat: make([]propstat, 0, len(pstats)), + } + for _, p := range pstats { + var xmlErr *xmlError + if p.XMLError != "" { + xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} + } + resp.Propstat = append(resp.Propstat, propstat{ + Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), + Prop: p.Props, + ResponseDescription: p.ResponseDescription, + Error: xmlErr, + }) + } + return &resp +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 +const ( + StatusMulti = 207 + StatusUnprocessableEntity = 422 + StatusLocked = 423 + StatusFailedDependency = 424 + StatusInsufficientStorage = 507 +) + +func StatusText(code int) string { + switch code { + case StatusMulti: + return "Multi-Status" + case StatusUnprocessableEntity: + return "Unprocessable Entity" + case StatusLocked: + return "Locked" + case StatusFailedDependency: + return "Failed Dependency" + case StatusInsufficientStorage: + return "Insufficient Storage" + } + return http.StatusText(code) +} + +var ( + errDestinationEqualsSource = errors.New("webdav: destination equals source") + errDirectoryNotEmpty = errors.New("webdav: directory not empty") + errInvalidDepth = errors.New("webdav: invalid depth") + errInvalidDestination = errors.New("webdav: invalid destination") + errInvalidIfHeader = errors.New("webdav: invalid If header") + errInvalidLockInfo = errors.New("webdav: invalid lock info") + errInvalidLockToken = errors.New("webdav: invalid lock token") + errInvalidPropfind = errors.New("webdav: invalid propfind") + errInvalidProppatch = errors.New("webdav: invalid proppatch") + errInvalidResponse = errors.New("webdav: invalid response") + errInvalidTimeout = errors.New("webdav: invalid timeout") + errNoFileSystem = errors.New("webdav: no file system") + errNoLockSystem = errors.New("webdav: no lock system") + errNotADirectory = errors.New("webdav: not a directory") + errPrefixMismatch = errors.New("webdav: prefix mismatch") + errRecursionTooDeep = errors.New("webdav: recursion too deep") + errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + errUnsupportedMethod = errors.New("webdav: unsupported method") +) diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go new file mode 100644 index 0000000..25e0d54 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav_test.go @@ -0,0 +1,344 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" + + "golang.org/x/net/context" +) + +// TODO: add tests to check XML responses with the expected prefix path +func TestPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + // createLockBody comes from the example in Section 9.10.7. + const createLockBody = ` + + + + + http://example.org/~ejw/contact.html + + + ` + + do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { + var bodyReader io.Reader + if body != "" { + bodyReader = strings.NewReader(body) + } + req, err := http.NewRequest(method, urlStr, bodyReader) + if err != nil { + return nil, err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return res.Header, nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + ctx := context.Background() + for _, prefix := range prefixes { + fs := NewMemFS() + h := &Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + } + mux := http.NewServeMux() + if prefix != "/" { + h.Prefix = prefix + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // LOCK /a/b/e/g + // PUT /a/b/e/g + // which should yield the (possibly stripped) filenames /a/b/c, + // /a/b/e/f and /a/b/e/g, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + var lockToken string + wantG := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { + t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) + continue + } else { + lockToken = h.Get("Lock-Token") + } + + ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) + wantH := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) + continue + } + + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, + "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, + "/a/b/c/": {"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} + +func TestEscapeXML(t *testing.T) { + // These test cases aren't exhaustive, and there is more than one way to + // escape e.g. a quot (as """ or """) or an apos. We presume that + // the encoding/xml package tests xml.EscapeText more thoroughly. This test + // here is just a sanity check for this package's escapeXML function, and + // its attempt to provide a fast path (and avoid a bytes.Buffer allocation) + // when escaping filenames is obviously a no-op. + testCases := map[string]string{ + "": "", + " ": " ", + "&": "&", + "*": "*", + "+": "+", + ",": ",", + "-": "-", + ".": ".", + "/": "/", + "0": "0", + "9": "9", + ":": ":", + "<": "<", + ">": ">", + "A": "A", + "_": "_", + "a": "a", + "~": "~", + "\u0201": "\u0201", + "&": "&amp;", + "foo&baz": "foo&<b/ar>baz", + } + + for in, want := range testCases { + if got := escapeXML(in); got != want { + t.Errorf("in=%q: got %q, want %q", in, got, want) + } + } +} + +func TestFilenameEscape(t *testing.T) { + hrefRe := regexp.MustCompile(`([^<]*)`) + displayNameRe := regexp.MustCompile(`([^<]*)`) + do := func(method, urlStr string) (string, string, error) { + req, err := http.NewRequest(method, urlStr, nil) + if err != nil { + return "", "", err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + hrefMatch := hrefRe.FindStringSubmatch(string(b)) + if len(hrefMatch) != 2 { + return "", "", errors.New("D:href not found") + } + displayNameMatch := displayNameRe.FindStringSubmatch(string(b)) + if len(displayNameMatch) != 2 { + return "", "", errors.New("D:displayname not found") + } + + return hrefMatch[1], displayNameMatch[1], nil + } + + testCases := []struct { + name, wantHref, wantDisplayName string + }{{ + name: `/foo%bar`, + wantHref: `/foo%25bar`, + wantDisplayName: `foo%bar`, + }, { + name: `/こんにちわ世界`, + wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + wantDisplayName: `こんにちわ世界`, + }, { + name: `/Program Files/`, + wantHref: `/Program%20Files`, + wantDisplayName: `Program Files`, + }, { + name: `/go+lang`, + wantHref: `/go+lang`, + wantDisplayName: `go+lang`, + }, { + name: `/go&lang`, + wantHref: `/go&lang`, + wantDisplayName: `go&lang`, + }, { + name: `/goexclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = ixml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = errInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + if li.Exclusive == nil || li.Shared != nil || li.Write == nil { + return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo + } + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + timeout := ld.Duration / time.Second + return fmt.Fprintf(w, "\n"+ + "\n"+ + " \n"+ + " \n"+ + " %s\n"+ + " %s\n"+ + " Second-%d\n"+ + " %s\n"+ + " %s\n"+ + "", + depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), + ) +} + +func escape(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '&', '\'', '<', '>': + b := bytes.NewBuffer(nil) + ixml.EscapeText(b, []byte(s)) + return b.String() + } + } + return s +} + +// Next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func next(d *ixml.Decoder) (ixml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case ixml.Comment, ixml.Directive, ixml.ProcInst: + continue + default: + return t, nil + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type propfindProps []xml.Name + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + for { + t, err := next(d) + if err != nil { + return err + } + switch t.(type) { + case ixml.EndElement: + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + name := t.(ixml.StartElement).Name + t, err = next(d) + if err != nil { + return err + } + if _, ok := t.(ixml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, xml.Name(name)) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type propfind struct { + XMLName ixml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop propfindProps `xml:"DAV: prop"` + Include propfindProps `xml:"DAV: include"` +} + +func readPropfind(r io.Reader) (pf propfind, status int, err error) { + c := countingReader{r: r} + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return propfind{Allprop: new(struct{})}, 0, nil + } + err = errInvalidPropfind + } + return propfind{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + return pf, 0, nil +} + +// Property represents a single DAV resource property as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type Property struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. +type xmlError struct { + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. +type propstat struct { + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } + for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. +type response struct { + XMLName ixml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MultistatusWriter marshals one or more Responses into a XML +// multistatus response. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 +type multistatusWriter struct { + // ResponseDescription contains the optional responsedescription + // of the multistatus XML element. Only the latest content before + // close will be emitted. Empty response descriptions are not + // written. + responseDescription string + + w http.ResponseWriter + enc *ixml.Encoder +} + +// Write validates and emits a DAV response as part of a multistatus response +// element. +// +// It sets the HTTP status code of its underlying http.ResponseWriter to 207 +// (Multi-Status) and populates the Content-Type header. If r is the +// first, valid response to be written, Write prepends the XML representation +// of r with a multistatus tag. Callers must call close after the last response +// has been written. +func (w *multistatusWriter) write(r *response) error { + switch len(r.Href) { + case 0: + return errInvalidResponse + case 1: + if len(r.Propstat) > 0 != (r.Status == "") { + return errInvalidResponse + } + default: + if len(r.Propstat) > 0 || r.Status == "" { + return errInvalidResponse + } + } + err := w.writeHeader() + if err != nil { + return err + } + return w.enc.Encode(r) +} + +// writeHeader writes a XML multistatus start element on w's underlying +// http.ResponseWriter and returns the result of the write operation. +// After the first write attempt, writeHeader becomes a no-op. +func (w *multistatusWriter) writeHeader() error { + if w.enc != nil { + return nil + } + w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") + w.w.WriteHeader(StatusMulti) + _, err := fmt.Fprintf(w.w, ``) + if err != nil { + return err + } + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ + Space: "DAV:", + Local: "multistatus", + }, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, + Value: "DAV:", + }}, + }) +} + +// Close completes the marshalling of the multistatus response. It returns +// an error if the multistatus response could not be completed. If both the +// return value and field enc of w are nil, then no multistatus response has +// been written. +func (w *multistatusWriter) close() error { + if w.enc == nil { + return nil + } + var end []ixml.Token + if w.responseDescription != "" { + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} + end = append(end, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, + ) + } + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, + }) + for _, t := range end { + err := w.enc.EncodeToken(t) + if err != nil { + return err + } + } + return w.enc.Flush() +} + +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s ixml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} + +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := ixml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case ixml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + p := Property{ + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), + } + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) + if err != nil { + return err + } + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName ixml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case ixml.Name{Space: "DAV:", Local: "set"}: + // No-op. + case ixml.Name{Space: "DAV:", Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go new file mode 100644 index 0000000..a3d9e1e --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml_test.go @@ -0,0 +1,906 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + + ixml "golang.org/x/net/webdav/internal/xml" +) + +func TestReadLockInfo(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + wantLI lockInfo + wantStatus int + }{{ + "bad: junk", + "xxx", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid owner XML", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " no end tag \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid UTF-8", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \xff \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #1", + "" + + "\n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #2", + "" + + "\n" + + " \n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "good: empty", + "", + lockInfo{}, + 0, + }, { + "good: plain-text owner", + "" + + "\n" + + " \n" + + " \n" + + " gopher\n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "gopher", + }, + }, + 0, + }, { + "section 9.10.7", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " http://example.org/~ejw/contact.html\n" + + " \n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "\n http://example.org/~ejw/contact.html\n ", + }, + }, + 0, + }} + + for _, tc := range testCases { + li, status, err := readLockInfo(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { + t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", + tc.desc, li, status, tc.wantLI, tc.wantStatus) + continue + } + } +} + +func TestReadPropfind(t *testing.T) { + testCases := []struct { + desc string + input string + wantPF propfind + wantStatus int + }{{ + desc: "propfind: propname", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: empty body means allprop", + input: "", + wantPF: propfind{ + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop followed by include", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: include followed by allprop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: prop with ignored comments", + input: "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored whitespace", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored mixed-content", + input: "" + + "\n" + + " foobar\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propname with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + " *boss*\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: bad: junk", + input: "xxx", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and allprop (section A.3)", + input: "" + + "\n" + + " " + + " " + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: allprop and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty propfind with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty prop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: prop with just chardata", + input: "" + + "\n" + + " foo\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: interrupted prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: malformed end element prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with chardata value", + input: "" + + "\n" + + " bar\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with whitespace value", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: include without allprop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pf, status, err := readPropfind(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { + t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", + tc.desc, pf, status, tc.wantPF, tc.wantStatus) + continue + } + } +} + +func TestMultistatusWriter(t *testing.T) { + ///The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + responses []response + respdesc string + writeHeader bool + wantXML string + wantCode int + wantErr error + }{{ + desc: "section 9.2.2 (failed dependency)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Authors", + }, + }}, + Status: "HTTP/1.1 424 Failed Dependency", + }, { + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Copyright-Owner", + }, + }}, + Status: "HTTP/1.1 409 Conflict", + }}, + ResponseDescription: "Copyright Owner cannot be deleted or altered.", + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 424 Failed Dependency` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 409 Conflict` + + ` ` + + ` Copyright Owner cannot be deleted or altered.` + + `` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.6.2 (lock-token-submitted)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Status: "HTTP/1.1 423 Locked", + Error: &xmlError{ + InnerXML: []byte(``), + }, + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` HTTP/1.1 423 Locked` + + ` ` + + ` ` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.1.3", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, + InnerXML: []byte(`` + + `` + + `Box type A` + + ``), + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, + InnerXML: []byte(`` + + `` + + `J.J. Johnson` + + ``), + }}, + Status: "HTTP/1.1 200 OK", + }, { + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, + }}, + Status: "HTTP/1.1 403 Forbidden", + ResponseDescription: "The user does not have access to the DingALing property.", + }}, + }}, + respdesc: "There has been an access violation error.", + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` Box type A` + + ` J.J. Johnson` + + ` ` + + ` HTTP/1.1 200 OK` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 403 Forbidden` + + ` The user does not have access to the DingALing property.` + + ` ` + + ` ` + + ` There has been an access violation error.` + + ``, + wantCode: StatusMulti, + }, { + desc: "no response written", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "no response written (with description)", + respdesc: "too bad", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "empty multistatus with header", + writeHeader: true, + wantXML: ``, + wantCode: StatusMulti, + }, { + desc: "bad: no href", + responses: []response{{ + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and no status", + responses: []response{{ + Href: []string{"http://example.com/foo", "http://example.com/bar"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: one href and no propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: status with one href and propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + Status: "HTTP/1.1 200 OK", + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and propstat", + responses: []response{{ + Href: []string{ + "http://example.com/foo", + "http://example.com/bar", + }, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }} + + n := xmlNormalizer{omitWhitespace: true} +loop: + for _, tc := range testCases { + rec := httptest.NewRecorder() + w := multistatusWriter{w: rec, responseDescription: tc.respdesc} + if tc.writeHeader { + if err := w.writeHeader(); err != nil { + t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) + continue + } + } + for _, r := range tc.responses { + if err := w.write(&r); err != nil { + if err != tc.wantErr { + t.Errorf("%s: got write error %v, want %v", + tc.desc, err, tc.wantErr) + } + continue loop + } + } + if err := w.close(); err != tc.wantErr { + t.Errorf("%s: got close error %v, want %v", + tc.desc, err, tc.wantErr) + continue + } + if rec.Code != tc.wantCode { + t.Errorf("%s: got HTTP status code %d, want %d\n", + tc.desc, rec.Code, tc.wantCode) + continue + } + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) + } + } +} + +func TestReadProppatch(t *testing.T) { + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + + testCases := []struct { + desc string + input string + wantPP []Proppatch + wantStatus int + }{{ + desc: "proppatch: section 9.2 (with simple property value)", + input: `` + + `` + + `` + + ` ` + + ` somevalue` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, + "", + []byte(`somevalue`), + }}, + }, { + Remove: true, + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, + "", + nil, + }}, + }}, + }, { + desc: "proppatch: lang attribute on prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://example.com/ns", Local: "foo"}, + "en", + nil, + }}, + }}, + }, { + desc: "bad: remove with value", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` Jim Whitehead` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty propertyupdate", + input: `` + + `` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pp, status, err := readProppatch(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if status != tc.wantStatus { + t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) + continue + } + if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "foo", + wantVal: "foo", + }, { + desc: "empty element", + input: "", + wantVal: "", + }, { + desc: "preserve namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve root element namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve whitespace", + input: " \t ", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: ` a `, + wantVal: ` a `, + }, { + desc: "section 9.2", + input: `` + + `` + + ` Jim Whitehead` + + ` Roy Fielding` + + ``, + wantVal: `` + + ` Jim Whitehead` + + ` Roy Fielding`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of ]]>.` + + ` ` + + ``, + wantVal: `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of <RFC2518>.` + + ` `, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := ixml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// * Rename namespace prefixes according to an internal heuristic. +// * Remove unnecessary namespace declarations. +// * Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// * Remove XML directives and processing instructions. +// * Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// * Remove comments, if instructed to do so. +// +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case ixml.Directive, ixml.ProcInst: + continue + case ixml.Comment: + if n.omitComments { + continue + } + case ixml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []ixml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 0000000..69a4ac7 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 0000000..2dab943 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/dial_test.go b/vendor/golang.org/x/net/websocket/dial_test.go new file mode 100644 index 0000000..aa03e30 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial_test.go @@ -0,0 +1,43 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http/httptest" + "testing" + "time" +) + +// This test depend on Go 1.3+ because in earlier versions the Dialer won't be +// used in TLS connections and a timeout won't be triggered. +func TestDialConfigTLSWithDialer(t *testing.T) { + tlsServer := httptest.NewTLSServer(nil) + tlsServerAddr := tlsServer.Listener.Addr().String() + log.Print("Test TLS WebSocket server listening on ", tlsServerAddr) + defer tlsServer.Close() + config, _ := NewConfig(fmt.Sprintf("wss://%s/echo", tlsServerAddr), "http://localhost") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + config.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go new file mode 100644 index 0000000..72bb9d4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/exampledial_test.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "fmt" + "log" + + "golang.org/x/net/websocket" +) + +// This example demonstrates a trivial client. +func ExampleDial() { + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + if _, err := ws.Write([]byte("hello, world!\n")); err != nil { + log.Fatal(err) + } + var msg = make([]byte, 512) + var n int + if n, err = ws.Read(msg); err != nil { + log.Fatal(err) + } + fmt.Printf("Received: %s.\n", msg[:n]) +} diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go new file mode 100644 index 0000000..f22a98f --- /dev/null +++ b/vendor/golang.org/x/net/websocket/examplehandler_test.go @@ -0,0 +1,26 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "io" + "net/http" + + "golang.org/x/net/websocket" +) + +// Echo the data received on the WebSocket. +func EchoServer(ws *websocket.Conn) { + io.Copy(ws, ws) +} + +// This example demonstrates a trivial echo server. +func ExampleHandler() { + http.Handle("/echo", websocket.Handler(EchoServer)) + err := http.ListenAndServe(":12345", nil) + if err != nil { + panic("ListenAndServe: " + err.Error()) + } +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 0000000..8cffdd1 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go new file mode 100644 index 0000000..9504aa2 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi_test.go @@ -0,0 +1,608 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "testing" +) + +// Test the getNonceAccept function with values in +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 +func TestSecWebSocketAccept(t *testing.T) { + nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") + expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") + accept, err := getNonceAccept(nonce) + if err != nil { + t.Errorf("getNonceAccept: returned error %v", err) + return + } + if !bytes.Equal(expected, accept) { + t.Errorf("getNonceAccept: expected %q got %q", expected, accept) + } +} + +func TestHybiClientHandshake(t *testing.T) { + type test struct { + url, host string + } + tests := []test{ + {"ws://server.example.com/chat", "server.example.com"}, + {"ws://127.0.0.1/chat", "127.0.0.1"}, + } + if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { + tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) + } + + for _, tt := range tests { + var b bytes.Buffer + bw := bufio.NewWriter(&b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + var config Config + config.Location, err = url.ParseRequestURI(tt.url) + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + if err := hybiClientHandshake(&config, br, bw); err != nil { + t.Fatal("handshake", err) + } + req, err := http.ReadRequest(bufio.NewReader(&b)) + if err != nil { + t.Fatal("read request", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %s", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %s", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) + } + if req.Host != tt.host { + t.Errorf("request host expected %s, but got %s", tt.host, req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) + } + } + } +} + +func TestHybiClientHandshakeWithHeader(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + config := new(Config) + config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.Header = http.Header(make(map[string][]string)) + config.Header.Add("User-Agent", "test") + + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + err = hybiClientHandshake(config, br, bw) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + req, err := http.ReadRequest(bufio.NewReader(b)) + if err != nil { + t.Fatalf("read request: %v", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %q", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %q", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) + } + if req.Host != "server.example.com" { + t.Errorf("request Host expected server.example.com, but got %v", req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + "User-Agent": "test", + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) + } + } +} + +func TestHybiServerHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + expectedProtocols := []string{"chat", "superchat"} + if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) { + t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = config.Protocol[:1] + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeNoSubProtocol(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + if len(config.Protocol) != 0 { + t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol)) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeHybiBadVersion(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Sec-WebSocket-Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 9 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != ErrBadWebSocketVersion { + t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err) + } + if code != http.StatusBadRequest { + t.Errorf("status expected %q but got %q", http.StatusBadRequest, code) + } +} + +func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) { + b := bytes.NewBuffer([]byte{}) + frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false} + w, _ := frameWriterFactory.NewFrameWriter(TextFrame) + w.(*hybiFrameWriter).header = frameHeader + _, err := w.Write(testPayload) + w.Close() + if err != nil { + t.Errorf("Write error %q", err) + } + var expectedFrame []byte + expectedFrame = append(expectedFrame, testHeader...) + expectedFrame = append(expectedFrame, testMaskedPayload...) + if !bytes.Equal(expectedFrame, b.Bytes()) { + t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes()) + } + frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)} + r, err := frameReaderFactory.NewFrameReader() + if err != nil { + t.Errorf("Read error %q", err) + } + if header := r.HeaderReader(); header == nil { + t.Errorf("no header") + } else { + actualHeader := make([]byte, r.Len()) + n, err := header.Read(actualHeader) + if err != nil { + t.Errorf("Read header error %q", err) + } else { + if n < len(testHeader) { + t.Errorf("header too short %q got %q", testHeader, actualHeader[:n]) + } + if !bytes.Equal(testHeader, actualHeader[:n]) { + t.Errorf("header expected %q got %q", testHeader, actualHeader[:n]) + } + } + } + if trailer := r.TrailerReader(); trailer != nil { + t.Errorf("unexpected trailer %q", trailer) + } + frame := r.(*hybiFrameReader) + if frameHeader.Fin != frame.header.Fin || + frameHeader.OpCode != frame.header.OpCode || + len(testPayload) != int(frame.header.Length) { + t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame) + } + payload := make([]byte, len(testPayload)) + _, err = r.Read(payload) + if err != nil && err != io.EOF { + t.Errorf("read %v", err) + } + if !bytes.Equal(testPayload, payload) { + t.Errorf("payload %q vs %q", testPayload, payload) + } +} + +func TestHybiShortTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader) +} + +func TestHybiShortMaskedTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame, + MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}} + payload := []byte("hello") + maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3} + header := []byte{0x81, 0x85} + header = append(header, frameHeader.MaskingKey...) + testHybiFrame(t, header, payload, maskedPayload, frameHeader) +} + +func TestHybiShortBinaryFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader) +} + +func TestHybiControlFrame(t *testing.T) { + payload := []byte("hello") + + frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} + payload = []byte{0x03, 0xe8} // 1000 + testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) +} + +func TestHybiLongFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := make([]byte, 126) + testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader) + + payload = make([]byte, 65535) + testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader) + + payload = make([]byte, 65536) + testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader) +} + +func TestHybiClientRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[2:7], msg[:n]) { + t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n]) + } + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[16:21], msg[:n]) { + t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n]) + } + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiShortRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + step := 0 + pos := 0 + expectedPos := []int{2, 5, 16, 19} + expectedLen := []int{3, 2, 3, 2} + for { + msg := make([]byte, 3) + n, err := conn.Read(msg) + if step >= len(expectedPos) { + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } + return + } + pos = expectedPos[step] + endPos := pos + expectedLen[step] + if err != nil { + t.Errorf("read from %d, got error %q", pos, err) + return + } + if n != endPos-pos { + t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n) + } + if !bytes.Equal(wireData[pos:endPos], msg[:n]) { + t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n]) + } + step++ + } +} + +func TestHybiServerRead(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello + 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24, + 0x9a, 0xec, 0xc6, 0x48, 0x89, // world + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + + expected := [][]byte{[]byte("hello"), []byte("world")} + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[0], msg[:n]) { + t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n]) + } + + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[1], msg[:n]) { + t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n]) + } + + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiServerReadWithoutMasking(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + // server MUST close the connection upon receiving a non-masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +func TestHybiClientReadWithMasking(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + // client MUST close the connection upon receiving a masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +// Test the hybiServerHandshaker supports firefox implementation and +// checks Connection request header include (but it's not necessary +// equal to) "upgrade" +func TestHybiServerFirefoxHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: keep-alive, upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = []string{"chat"} + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 0000000..0895dea --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 0000000..e242c89 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go new file mode 100644 index 0000000..2054ce8 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket_test.go @@ -0,0 +1,665 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var serverAddr string +var once sync.Once + +func echoServer(ws *Conn) { + defer ws.Close() + io.Copy(ws, ws) +} + +type Count struct { + S string + N int +} + +func countServer(ws *Conn) { + defer ws.Close() + for { + var count Count + err := JSON.Receive(ws, &count) + if err != nil { + return + } + count.N++ + count.S = strings.Repeat(count.S, count.N) + err = JSON.Send(ws, count) + if err != nil { + return + } + } +} + +type testCtrlAndDataHandler struct { + hybiFrameHandler +} + +func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { + h.hybiFrameHandler.conn.wio.Lock() + defer h.hybiFrameHandler.conn.wio.Unlock() + w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) + if err != nil { + return 0, err + } + n, err := w.Write(b) + w.Close() + return n, err +} + +func ctrlAndDataServer(ws *Conn) { + defer ws.Close() + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + go func() { + for i := 0; ; i++ { + var b []byte + if i%2 != 0 { // with or without payload + b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) + } + if _, err := h.WritePing(b); err != nil { + break + } + if _, err := h.WritePong(b); err != nil { // unsolicited pong + break + } + time.Sleep(10 * time.Millisecond) + } + }() + + b := make([]byte, 128) + for { + n, err := ws.Read(b) + if err != nil { + break + } + if _, err := ws.Write(b[:n]); err != nil { + break + } + } +} + +func subProtocolHandshake(config *Config, req *http.Request) error { + for _, proto := range config.Protocol { + if proto == "chat" { + config.Protocol = []string{proto} + return nil + } + } + return ErrBadWebSocketProtocol +} + +func subProtoServer(ws *Conn) { + for _, proto := range ws.Config().Protocol { + io.WriteString(ws, proto) + } +} + +func startServer() { + http.Handle("/echo", Handler(echoServer)) + http.Handle("/count", Handler(countServer)) + http.Handle("/ctrldata", Handler(ctrlAndDataServer)) + subproto := Server{ + Handshake: subProtocolHandshake, + Handler: Handler(subProtoServer), + } + http.Handle("/subproto", subproto) + server := httptest.NewServer(nil) + serverAddr = server.Listener.Addr().String() + log.Print("Test WebSocket server listening on ", serverAddr) +} + +func newConfig(t *testing.T, path string) *Config { + config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost") + return config +} + +func TestEcho(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var actual_msg = make([]byte, 512) + n, err := conn.Read(actual_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + actual_msg = actual_msg[0:n] + if !bytes.Equal(msg, actual_msg) { + t.Errorf("Echo: expected %q got %q", msg, actual_msg) + } + conn.Close() +} + +func TestAddr(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + ra := conn.RemoteAddr().String() + if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") { + t.Errorf("Bad remote addr: %v", ra) + } + la := conn.LocalAddr().String() + if !strings.HasPrefix(la, "http://") { + t.Errorf("Bad local addr: %v", la) + } + conn.Close() +} + +func TestCount(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/count"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + var count Count + count.S = "hello" + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 1 { + t.Errorf("count: expected %d got %d", 1, count.N) + } + if count.S != "hello" { + t.Errorf("count: expected %q got %q", "hello", count.S) + } + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 2 { + t.Errorf("count: expected %d got %d", 2, count.N) + } + if count.S != "hellohello" { + t.Errorf("count: expected %q got %q", "hellohello", count.S) + } + conn.Close() +} + +func TestWithQuery(t *testing.T) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/echo") + config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr)) + if err != nil { + t.Fatal("location url", err) + } + + ws, err := NewClient(config, client) + if err != nil { + t.Errorf("WebSocket handshake: %v", err) + return + } + ws.Close() +} + +func testWithProtocol(t *testing.T, subproto []string) (string, error) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/subproto") + config.Protocol = subproto + + ws, err := NewClient(config, client) + if err != nil { + return "", err + } + msg := make([]byte, 16) + n, err := ws.Read(msg) + if err != nil { + return "", err + } + ws.Close() + return string(msg[:n]), nil +} + +func TestWithProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithTwoProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"test", "chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithBadProtocol(t *testing.T) { + _, err := testWithProtocol(t, []string{"test"}) + if err != ErrBadStatus { + t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err) + } +} + +func TestHTTP(t *testing.T) { + once.Do(startServer) + + // If the client did not send a handshake that matches the protocol + // specification, the server MUST return an HTTP response with an + // appropriate error code (such as 400 Bad Request) + resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr)) + if err != nil { + t.Errorf("Get: error %#v", err) + return + } + if resp == nil { + t.Error("Get: resp is null") + return + } + if resp.StatusCode != http.StatusBadRequest { + t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode) + } +} + +func TestTrailingSpaces(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=955 + // The last runs of this create keys with trailing spaces that should not be + // generated by the client. + once.Do(startServer) + config := newConfig(t, "/echo") + for i := 0; i < 30; i++ { + // body + ws, err := DialConfig(config) + if err != nil { + t.Errorf("Dial #%d failed: %v", i, err) + break + } + ws.Close() + } +} + +func TestDialConfigBadVersion(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Version = 1234 + + _, err := DialConfig(config) + + if dialerr, ok := err.(*DialError); ok { + if dialerr.Err != ErrBadProtocolVersion { + t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err) + } + } +} + +func TestDialConfigWithDialer(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} + +func TestSmallBuffer(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=1145 + // Read should be able to handle reading a fragment of a frame. + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var small_msg = make([]byte, 8) + n, err := conn.Read(small_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + if !bytes.Equal(msg[:len(small_msg)], small_msg) { + t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg) + } + var second_msg = make([]byte, len(msg)) + n, err = conn.Read(second_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + second_msg = second_msg[0:n] + if !bytes.Equal(msg[len(small_msg):], second_msg) { + t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg) + } + conn.Close() +} + +var parseAuthorityTests = []struct { + in *url.URL + out string +}{ + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com", + }, + "www.google.com:443", + }, + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com:443", + }, + "www.google.com:443", + }, + // some invalid ones for parseAuthority. parseAuthority doesn't + // concern itself with the scheme unless it actually knows about it + { + &url.URL{ + Scheme: "http", + Host: "www.google.com", + }, + "www.google.com", + }, + { + &url.URL{ + Scheme: "http", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "127.0.0.1", + }, + "127.0.0.1", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "www.google.com", + }, + "www.google.com", + }, +} + +func TestParseAuthority(t *testing.T) { + for _, tt := range parseAuthorityTests { + out := parseAuthority(tt.in) + if out != tt.out { + t.Errorf("got %v; want %v", out, tt.out) + } + } +} + +type closerConn struct { + net.Conn + closed int // count of the number of times Close was called +} + +func (c *closerConn) Close() error { + c.closed++ + return c.Conn.Close() +} + +func TestClose(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/11454") + } + + once.Do(startServer) + + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + cc := closerConn{Conn: conn} + + client, err := NewClient(newConfig(t, "/echo"), &cc) + if err != nil { + t.Fatalf("WebSocket handshake: %v", err) + } + + // set the deadline to ten minutes ago, which will have expired by the time + // client.Close sends the close status frame. + conn.SetDeadline(time.Now().Add(-10 * time.Minute)) + + if err := client.Close(); err == nil { + t.Errorf("ws.Close(): expected error, got %v", err) + } + if cc.closed < 1 { + t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) + } +} + +var originTests = []struct { + req *http.Request + origin *url.URL +}{ + { + req: &http.Request{ + Header: http.Header{ + "Origin": []string{"http://www.example.com"}, + }, + }, + origin: &url.URL{ + Scheme: "http", + Host: "www.example.com", + }, + }, + { + req: &http.Request{}, + }, +} + +func TestOrigin(t *testing.T) { + conf := newConfig(t, "/echo") + conf.Version = ProtocolVersionHybi13 + for i, tt := range originTests { + origin, err := Origin(conf, tt.req) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(origin, tt.origin) { + t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) + continue + } + } +} + +func TestCtrlAndData(t *testing.T) { + once.Do(startServer) + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal(err) + } + ws, err := NewClient(newConfig(t, "/ctrldata"), c) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + b := make([]byte, 128) + for i := 0; i < 2; i++ { + data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) + if _, err := ws.Write(data); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var ctrl []byte + if i%2 != 0 { // with or without payload + ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) + } + if _, err := h.WritePing(ctrl); err != nil { + t.Fatalf("#%d: %v", i, err) + } + n, err := ws.Read(b) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + if !bytes.Equal(b[:n], data) { + t.Fatalf("#%d: got %v; want %v", i, b[:n], data) + } + } +} + +func TestCodec_ReceiveLimited(t *testing.T) { + const limit = 2048 + var payloads [][]byte + for _, size := range []int{ + 1024, + 2048, + 4096, // receive of this message would be interrupted due to limit + 2048, // this one is to make sure next receive recovers discarding leftovers + } { + b := make([]byte, size) + rand.Read(b) + payloads = append(payloads, b) + } + handlerDone := make(chan struct{}) + limitedHandler := func(ws *Conn) { + defer close(handlerDone) + ws.MaxPayloadBytes = limit + defer ws.Close() + for i, p := range payloads { + t.Logf("payload #%d (size %d, exceeds limit: %v)", i, len(p), len(p) > limit) + var recv []byte + err := Message.Receive(ws, &recv) + switch err { + case nil: + case ErrFrameTooLarge: + if len(p) <= limit { + t.Fatalf("unexpected frame size limit: expected %d bytes of payload having limit at %d", len(p), limit) + } + continue + default: + t.Fatalf("unexpected error: %v (want either nil or ErrFrameTooLarge)", err) + } + if len(recv) > limit { + t.Fatalf("received %d bytes of payload having limit at %d", len(recv), limit) + } + if !bytes.Equal(p, recv) { + t.Fatalf("received payload differs:\ngot:\t%v\nwant:\t%v", recv, p) + } + } + } + server := httptest.NewServer(Handler(limitedHandler)) + defer server.CloseClientConnections() + defer server.Close() + addr := server.Listener.Addr().String() + ws, err := Dial("ws://"+addr+"/", "", "http://localhost/") + if err != nil { + t.Fatal(err) + } + defer ws.Close() + for i, p := range payloads { + if err := Message.Send(ws, p); err != nil { + t.Fatalf("payload #%d (size %d): %v", i, len(p), err) + } + } + <-handlerDone +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go new file mode 100644 index 0000000..bc861e1 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xsrftoken provides methods for generating and validating secure XSRF tokens. +package xsrftoken // import "golang.org/x/net/xsrftoken" + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +// Timeout is the duration for which XSRF tokens are valid. +// It is exported so clients may set cookie timeouts that match generated tokens. +const Timeout = 24 * time.Hour + +// clean sanitizes a string for inclusion in a token by replacing all ":"s. +func clean(s string) string { + return strings.Replace(s, ":", "_", -1) +} + +// Generate returns a URL-safe secure XSRF token that expires in 24 hours. +// +// key is a secret key for your application; it must be non-empty. +// userID is an optional unique identifier for the user. +// actionID is an optional action the user is taking (e.g. POSTing to a particular path). +func Generate(key, userID, actionID string) string { + return generateTokenAtTime(key, userID, actionID, time.Now()) +} + +// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. +func generateTokenAtTime(key, userID, actionID string, now time.Time) string { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Round time up and convert to milliseconds. + milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 + + h := hmac.New(sha1.New, []byte(key)) + fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime) + + // Get the padded base64 string then removing the padding. + tok := string(h.Sum(nil)) + tok = base64.URLEncoding.EncodeToString([]byte(tok)) + tok = strings.TrimRight(tok, "=") + + return fmt.Sprintf("%s:%d", tok, milliTime) +} + +// Valid reports whether a token is a valid, unexpired token returned by Generate. +func Valid(token, key, userID, actionID string) bool { + return validTokenAtTime(token, key, userID, actionID, time.Now()) +} + +// validTokenAtTime reports whether a token is valid at the given time. +func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Extract the issue time of the token. + sep := strings.LastIndex(token, ":") + if sep < 0 { + return false + } + millis, err := strconv.ParseInt(token[sep+1:], 10, 64) + if err != nil { + return false + } + issueTime := time.Unix(0, millis*1e6) + + // Check that the token is not expired. + if now.Sub(issueTime) >= Timeout { + return false + } + + // Check that the token is not from the future. + // Allow 1 minute grace period in case the token is being verified on a + // machine whose clock is behind the machine that issued the token. + if issueTime.After(now.Add(1 * time.Minute)) { + return false + } + + expected := generateTokenAtTime(key, userID, actionID, issueTime) + + // Check that the token matches the expected value. + // Use constant time comparison to avoid timing attacks. + return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go new file mode 100644 index 0000000..6c8e7d9 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xsrftoken + +import ( + "encoding/base64" + "testing" + "time" +) + +const ( + key = "quay" + userID = "12345678" + actionID = "POST /form" +) + +var ( + now = time.Now() + oneMinuteFromNow = now.Add(1 * time.Minute) +) + +func TestValidToken(t *testing.T) { + tok := generateTokenAtTime(key, userID, actionID, now) + if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) { + t.Error("One second later: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { + t.Error("Just before timeout: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) { + t.Error("One minute in the past: Expected token to be valid") + } +} + +// TestSeparatorReplacement tests that separators are being correctly substituted +func TestSeparatorReplacement(t *testing.T) { + tok := generateTokenAtTime("foo:bar", "baz", "wah", now) + tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now) + if tok == tok2 { + t.Errorf("Expected generated tokens to be different") + } +} + +func TestInvalidToken(t *testing.T) { + invalidTokenTests := []struct { + name, key, userID, actionID string + t time.Time + }{ + {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, + {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, + {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, + {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)}, + {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, + } + + tok := generateTokenAtTime(key, userID, actionID, now) + for _, itt := range invalidTokenTests { + if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { + t.Errorf("%v: Expected token to be invalid", itt.name) + } + } +} + +// TestValidateBadData primarily tests that no unexpected panics are triggered +// during parsing +func TestValidateBadData(t *testing.T) { + badDataTests := []struct { + name, tok string + }{ + {"Invalid Base64", "ASDab24(@)$*=="}, + {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, + {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, + {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)}, + } + + for _, bdt := range badDataTests { + if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { + t.Errorf("%v: Expected token to be invalid", bdt.name) + } + } +} diff --git a/vendor/golang.org/x/sys/.gitattributes b/vendor/golang.org/x/sys/.gitattributes new file mode 100644 index 0000000..d2f212e --- /dev/null +++ b/vendor/golang.org/x/sys/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/sys/.gitignore b/vendor/golang.org/x/sys/.gitignore new file mode 100644 index 0000000..8339fd6 --- /dev/null +++ b/vendor/golang.org/x/sys/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/sys/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTING.md b/vendor/golang.org/x/sys/CONTRIBUTING.md new file mode 100644 index 0000000..d0485e8 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/README.md b/vendor/golang.org/x/sys/README.md new file mode 100644 index 0000000..ef6c9e5 --- /dev/null +++ b/vendor/golang.org/x/sys/README.md @@ -0,0 +1,18 @@ +# sys + +This repository holds supplemental Go packages for low-level interactions with +the operating system. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sys`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sys`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sys repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sys:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sys/codereview.cfg b/vendor/golang.org/x/sys/codereview.cfg new file mode 100644 index 0000000..3f8b14b --- /dev/null +++ b/vendor/golang.org/x/sys/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sys/plan9/asm.s b/vendor/golang.org/x/sys/plan9/asm.s new file mode 100644 index 0000000..06449eb --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/asm.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·use(SB),NOSPLIT,$0 + RET diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_386.s b/vendor/golang.org/x/sys/plan9/asm_plan9_386.s new file mode 100644 index 0000000..bc5cab1 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/asm_plan9_386.s @@ -0,0 +1,30 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// +// System call support for 386, Plan 9 +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-32 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-44 + JMP syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) + +TEXT ·seek(SB),NOSPLIT,$0-36 + JMP syscall·seek(SB) + +TEXT ·exit(SB),NOSPLIT,$4-4 + JMP syscall·exit(SB) diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s b/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s new file mode 100644 index 0000000..d3448e6 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s @@ -0,0 +1,30 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// +// System call support for amd64, Plan 9 +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-64 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-88 + JMP syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·seek(SB),NOSPLIT,$0-56 + JMP syscall·seek(SB) + +TEXT ·exit(SB),NOSPLIT,$8-8 + JMP syscall·exit(SB) diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s new file mode 100644 index 0000000..afb7c0a --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s @@ -0,0 +1,25 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// System call support for plan9 on arm + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-32 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-44 + JMP syscall·Syscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) + +TEXT ·seek(SB),NOSPLIT,$0-36 + JMP syscall·exit(SB) diff --git a/vendor/golang.org/x/sys/plan9/const_plan9.go b/vendor/golang.org/x/sys/plan9/const_plan9.go new file mode 100644 index 0000000..b4e85a3 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/const_plan9.go @@ -0,0 +1,70 @@ +package plan9 + +// Plan 9 Constants + +// Open modes +const ( + O_RDONLY = 0 + O_WRONLY = 1 + O_RDWR = 2 + O_TRUNC = 16 + O_CLOEXEC = 32 + O_EXCL = 0x1000 +) + +// Rfork flags +const ( + RFNAMEG = 1 << 0 + RFENVG = 1 << 1 + RFFDG = 1 << 2 + RFNOTEG = 1 << 3 + RFPROC = 1 << 4 + RFMEM = 1 << 5 + RFNOWAIT = 1 << 6 + RFCNAMEG = 1 << 10 + RFCENVG = 1 << 11 + RFCFDG = 1 << 12 + RFREND = 1 << 13 + RFNOMNT = 1 << 14 +) + +// Qid.Type bits +const ( + QTDIR = 0x80 + QTAPPEND = 0x40 + QTEXCL = 0x20 + QTMOUNT = 0x10 + QTAUTH = 0x08 + QTTMP = 0x04 + QTFILE = 0x00 +) + +// Dir.Mode bits +const ( + DMDIR = 0x80000000 + DMAPPEND = 0x40000000 + DMEXCL = 0x20000000 + DMMOUNT = 0x10000000 + DMAUTH = 0x08000000 + DMTMP = 0x04000000 + DMREAD = 0x4 + DMWRITE = 0x2 + DMEXEC = 0x1 +) + +const ( + STATMAX = 65535 + ERRMAX = 128 + STATFIXLEN = 49 +) + +// Mount and bind flags +const ( + MREPL = 0x0000 + MBEFORE = 0x0001 + MAFTER = 0x0002 + MORDER = 0x0003 + MCREATE = 0x0004 + MCACHE = 0x0010 + MMASK = 0x0017 +) diff --git a/vendor/golang.org/x/sys/plan9/dir_plan9.go b/vendor/golang.org/x/sys/plan9/dir_plan9.go new file mode 100644 index 0000000..0955e0c --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/dir_plan9.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Plan 9 directory marshalling. See intro(5). + +package plan9 + +import "errors" + +var ( + ErrShortStat = errors.New("stat buffer too short") + ErrBadStat = errors.New("malformed stat buffer") + ErrBadName = errors.New("bad character in file name") +) + +// A Qid represents a 9P server's unique identification for a file. +type Qid struct { + Path uint64 // the file server's unique identification for the file + Vers uint32 // version number for given Path + Type uint8 // the type of the file (plan9.QTDIR for example) +} + +// A Dir contains the metadata for a file. +type Dir struct { + // system-modified data + Type uint16 // server type + Dev uint32 // server subtype + + // file data + Qid Qid // unique id from server + Mode uint32 // permissions + Atime uint32 // last read time + Mtime uint32 // last write time + Length int64 // file length + Name string // last element of path + Uid string // owner name + Gid string // group name + Muid string // last modifier name +} + +var nullDir = Dir{ + Type: ^uint16(0), + Dev: ^uint32(0), + Qid: Qid{ + Path: ^uint64(0), + Vers: ^uint32(0), + Type: ^uint8(0), + }, + Mode: ^uint32(0), + Atime: ^uint32(0), + Mtime: ^uint32(0), + Length: ^int64(0), +} + +// Null assigns special "don't touch" values to members of d to +// avoid modifying them during plan9.Wstat. +func (d *Dir) Null() { *d = nullDir } + +// Marshal encodes a 9P stat message corresponding to d into b +// +// If there isn't enough space in b for a stat message, ErrShortStat is returned. +func (d *Dir) Marshal(b []byte) (n int, err error) { + n = STATFIXLEN + len(d.Name) + len(d.Uid) + len(d.Gid) + len(d.Muid) + if n > len(b) { + return n, ErrShortStat + } + + for _, c := range d.Name { + if c == '/' { + return n, ErrBadName + } + } + + b = pbit16(b, uint16(n)-2) + b = pbit16(b, d.Type) + b = pbit32(b, d.Dev) + b = pbit8(b, d.Qid.Type) + b = pbit32(b, d.Qid.Vers) + b = pbit64(b, d.Qid.Path) + b = pbit32(b, d.Mode) + b = pbit32(b, d.Atime) + b = pbit32(b, d.Mtime) + b = pbit64(b, uint64(d.Length)) + b = pstring(b, d.Name) + b = pstring(b, d.Uid) + b = pstring(b, d.Gid) + b = pstring(b, d.Muid) + + return n, nil +} + +// UnmarshalDir decodes a single 9P stat message from b and returns the resulting Dir. +// +// If b is too small to hold a valid stat message, ErrShortStat is returned. +// +// If the stat message itself is invalid, ErrBadStat is returned. +func UnmarshalDir(b []byte) (*Dir, error) { + if len(b) < STATFIXLEN { + return nil, ErrShortStat + } + size, buf := gbit16(b) + if len(b) != int(size)+2 { + return nil, ErrBadStat + } + b = buf + + var d Dir + d.Type, b = gbit16(b) + d.Dev, b = gbit32(b) + d.Qid.Type, b = gbit8(b) + d.Qid.Vers, b = gbit32(b) + d.Qid.Path, b = gbit64(b) + d.Mode, b = gbit32(b) + d.Atime, b = gbit32(b) + d.Mtime, b = gbit32(b) + + n, b := gbit64(b) + d.Length = int64(n) + + var ok bool + if d.Name, b, ok = gstring(b); !ok { + return nil, ErrBadStat + } + if d.Uid, b, ok = gstring(b); !ok { + return nil, ErrBadStat + } + if d.Gid, b, ok = gstring(b); !ok { + return nil, ErrBadStat + } + if d.Muid, b, ok = gstring(b); !ok { + return nil, ErrBadStat + } + + return &d, nil +} + +// pbit8 copies the 8-bit number v to b and returns the remaining slice of b. +func pbit8(b []byte, v uint8) []byte { + b[0] = byte(v) + return b[1:] +} + +// pbit16 copies the 16-bit number v to b in little-endian order and returns the remaining slice of b. +func pbit16(b []byte, v uint16) []byte { + b[0] = byte(v) + b[1] = byte(v >> 8) + return b[2:] +} + +// pbit32 copies the 32-bit number v to b in little-endian order and returns the remaining slice of b. +func pbit32(b []byte, v uint32) []byte { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + return b[4:] +} + +// pbit64 copies the 64-bit number v to b in little-endian order and returns the remaining slice of b. +func pbit64(b []byte, v uint64) []byte { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) + return b[8:] +} + +// pstring copies the string s to b, prepending it with a 16-bit length in little-endian order, and +// returning the remaining slice of b.. +func pstring(b []byte, s string) []byte { + b = pbit16(b, uint16(len(s))) + n := copy(b, s) + return b[n:] +} + +// gbit8 reads an 8-bit number from b and returns it with the remaining slice of b. +func gbit8(b []byte) (uint8, []byte) { + return uint8(b[0]), b[1:] +} + +// gbit16 reads a 16-bit number in little-endian order from b and returns it with the remaining slice of b. +func gbit16(b []byte) (uint16, []byte) { + return uint16(b[0]) | uint16(b[1])<<8, b[2:] +} + +// gbit32 reads a 32-bit number in little-endian order from b and returns it with the remaining slice of b. +func gbit32(b []byte) (uint32, []byte) { + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24, b[4:] +} + +// gbit64 reads a 64-bit number in little-endian order from b and returns it with the remaining slice of b. +func gbit64(b []byte) (uint64, []byte) { + lo := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + hi := uint32(b[4]) | uint32(b[5])<<8 | uint32(b[6])<<16 | uint32(b[7])<<24 + return uint64(lo) | uint64(hi)<<32, b[8:] +} + +// gstring reads a string from b, prefixed with a 16-bit length in little-endian order. +// It returns the string with the remaining slice of b and a boolean. If the length is +// greater than the number of bytes in b, the boolean will be false. +func gstring(b []byte) (string, []byte, bool) { + n, b := gbit16(b) + if int(n) > len(b) { + return "", b, false + } + return string(b[:n]), b[n:], true +} diff --git a/vendor/golang.org/x/sys/plan9/env_plan9.go b/vendor/golang.org/x/sys/plan9/env_plan9.go new file mode 100644 index 0000000..8f19180 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/env_plan9.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Plan 9 environment variables. + +package plan9 + +import ( + "syscall" +) + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/plan9/errors_plan9.go b/vendor/golang.org/x/sys/plan9/errors_plan9.go new file mode 100644 index 0000000..65fe74d --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/errors_plan9.go @@ -0,0 +1,50 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plan9 + +import "syscall" + +// Constants +const ( + // Invented values to support what package os expects. + O_CREAT = 0x02000 + O_APPEND = 0x00400 + O_NOCTTY = 0x00000 + O_NONBLOCK = 0x00000 + O_SYNC = 0x00000 + O_ASYNC = 0x00000 + + S_IFMT = 0x1f000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 +) + +// Errors +var ( + EINVAL = syscall.NewError("bad arg in system call") + ENOTDIR = syscall.NewError("not a directory") + EISDIR = syscall.NewError("file is a directory") + ENOENT = syscall.NewError("file does not exist") + EEXIST = syscall.NewError("file already exists") + EMFILE = syscall.NewError("no free file descriptors") + EIO = syscall.NewError("i/o error") + ENAMETOOLONG = syscall.NewError("file name too long") + EINTR = syscall.NewError("interrupted") + EPERM = syscall.NewError("permission denied") + EBUSY = syscall.NewError("no free devices") + ETIMEDOUT = syscall.NewError("connection timed out") + EPLAN9 = syscall.NewError("not supported by plan 9") + + // The following errors do not correspond to any + // Plan 9 system messages. Invented to support + // what package os and others expect. + EACCES = syscall.NewError("access permission denied") + EAFNOSUPPORT = syscall.NewError("address family not supported by protocol") +) diff --git a/vendor/golang.org/x/sys/plan9/mkall.sh b/vendor/golang.org/x/sys/plan9/mkall.sh new file mode 100755 index 0000000..9f73c60 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/mkall.sh @@ -0,0 +1,138 @@ +#!/usr/bin/env bash +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# The plan9 package provides access to the raw system call +# interface of the underlying operating system. Porting Go to +# a new architecture/operating system combination requires +# some manual effort, though there are tools that automate +# much of the process. The auto-generated files have names +# beginning with z. +# +# This script runs or (given -n) prints suggested commands to generate z files +# for the current system. Running those commands is not automatic. +# This script is documentation more than anything else. +# +# * asm_${GOOS}_${GOARCH}.s +# +# This hand-written assembly file implements system call dispatch. +# There are three entry points: +# +# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); +# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr); +# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); +# +# The first and second are the standard ones; they differ only in +# how many arguments can be passed to the kernel. +# The third is for low-level use by the ForkExec wrapper; +# unlike the first two, it does not call into the scheduler to +# let it know that a system call is running. +# +# * syscall_${GOOS}.go +# +# This hand-written Go file implements system calls that need +# special handling and lists "//sys" comments giving prototypes +# for ones that can be auto-generated. Mksyscall reads those +# comments to generate the stubs. +# +# * syscall_${GOOS}_${GOARCH}.go +# +# Same as syscall_${GOOS}.go except that it contains code specific +# to ${GOOS} on one particular architecture. +# +# * types_${GOOS}.c +# +# This hand-written C file includes standard C headers and then +# creates typedef or enum names beginning with a dollar sign +# (use of $ in variable names is a gcc extension). The hardest +# part about preparing this file is figuring out which headers to +# include and which symbols need to be #defined to get the +# actual data structures that pass through to the kernel system calls. +# Some C libraries present alternate versions for binary compatibility +# and translate them on the way in and out of system calls, but +# there is almost always a #define that can get the real ones. +# See types_darwin.c and types_linux.c for examples. +# +# * zerror_${GOOS}_${GOARCH}.go +# +# This machine-generated file defines the system's error numbers, +# error strings, and signal numbers. The generator is "mkerrors.sh". +# Usually no arguments are needed, but mkerrors.sh will pass its +# arguments on to godefs. +# +# * zsyscall_${GOOS}_${GOARCH}.go +# +# Generated by mksyscall.pl; see syscall_${GOOS}.go above. +# +# * zsysnum_${GOOS}_${GOARCH}.go +# +# Generated by mksysnum_${GOOS}. +# +# * ztypes_${GOOS}_${GOARCH}.go +# +# Generated by godefs; see types_${GOOS}.c above. + +GOOSARCH="${GOOS}_${GOARCH}" + +# defaults +mksyscall="./mksyscall.pl" +mkerrors="./mkerrors.sh" +zerrors="zerrors_$GOOSARCH.go" +mksysctl="" +zsysctl="zsysctl_$GOOSARCH.go" +mksysnum= +mktypes= +run="sh" + +case "$1" in +-syscalls) + for i in zsyscall*go + do + sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i + rm _$i + done + exit 0 + ;; +-n) + run="cat" + shift +esac + +case "$#" in +0) + ;; +*) + echo 'usage: mkall.sh [-n]' 1>&2 + exit 2 +esac + +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +plan9_386) + mkerrors= + mksyscall="./mksyscall.pl -l32 -plan9" + mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h" + mktypes="XXX" + ;; +*) + echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +( + if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi + case "$GOOS" in + plan9) + syscall_goos="syscall_$GOOS.go" + if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos syscall_$GOOSARCH.go |gofmt >zsyscall_$GOOSARCH.go"; fi + ;; + esac + if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi + if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi + if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi +) | $run diff --git a/vendor/golang.org/x/sys/plan9/mkerrors.sh b/vendor/golang.org/x/sys/plan9/mkerrors.sh new file mode 100755 index 0000000..052c86d --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/mkerrors.sh @@ -0,0 +1,246 @@ +#!/usr/bin/env bash +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Generate Go code listing errors and other #defined constant +# values (ENAMETOOLONG etc.), by asking the preprocessor +# about the definitions. + +unset LANG +export LC_ALL=C +export LC_CTYPE=C + +CC=${CC:-gcc} + +uname=$(uname) + +includes=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' + +ccflags="$@" + +# Write go tool cgo -godefs input. +( + echo package plan9 + echo + echo '/*' + indirect="includes_$(uname)" + echo "${!indirect} $includes" + echo '*/' + echo 'import "C"' + echo + echo 'const (' + + # The gcc command line prints all the #defines + # it encounters while processing the input + echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags | + awk ' + $1 != "#define" || $2 ~ /\(/ || $3 == "" {next} + + $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers + $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next} + $2 ~ /^(SCM_SRCRT)$/ {next} + $2 ~ /^(MAP_FAILED)$/ {next} + + $2 !~ /^ETH_/ && + $2 !~ /^EPROC_/ && + $2 !~ /^EQUIV_/ && + $2 !~ /^EXPR_/ && + $2 ~ /^E[A-Z0-9_]+$/ || + $2 ~ /^B[0-9_]+$/ || + $2 ~ /^V[A-Z0-9]+$/ || + $2 ~ /^CS[A-Z0-9]/ || + $2 ~ /^I(SIG|CANON|CRNL|EXTEN|MAXBEL|STRIP|UTF8)$/ || + $2 ~ /^IGN/ || + $2 ~ /^IX(ON|ANY|OFF)$/ || + $2 ~ /^IN(LCR|PCK)$/ || + $2 ~ /(^FLU?SH)|(FLU?SH$)/ || + $2 ~ /^C(LOCAL|READ)$/ || + $2 == "BRKINT" || + $2 == "HUPCL" || + $2 == "PENDIN" || + $2 == "TOSTOP" || + $2 ~ /^PAR/ || + $2 ~ /^SIG[^_]/ || + $2 ~ /^O[CNPFP][A-Z]+[^_][A-Z]+$/ || + $2 ~ /^IN_/ || + $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 == "ICMPV6_FILTER" || + $2 == "SOMAXCONN" || + $2 == "NAME_MAX" || + $2 == "IFNAMSIZ" || + $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ || + $2 ~ /^SYSCTL_VERS/ || + $2 ~ /^(MS|MNT)_/ || + $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || + $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^LINUX_REBOOT_CMD_/ || + $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || + $2 !~ "NLA_TYPE_MASK" && + $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || + $2 ~ /^SIOC/ || + $2 ~ /^TIOC/ || + $2 !~ "RTF_BITS" && + $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || + $2 ~ /^BIOC/ || + $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || + $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ || + $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || + $2 ~ /^CLONE_[A-Z_]+/ || + $2 !~ /^(BPF_TIMEVAL)$/ && + $2 ~ /^(BPF|DLT)_/ || + $2 !~ "WMESGLEN" && + $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^__WCOREFLAG$/ {next} + $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} + + {next} + ' | sort + + echo ')' +) >_const.go + +# Pull out the error names for later. +errors=$( + echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' | + sort +) + +# Pull out the signal names for later. +signals=$( + echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | + egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + sort +) + +# Again, writing regexps to a file. +echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' | + sort >_error.grep +echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | + egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + sort >_signal.grep + +echo '// mkerrors.sh' "$@" +echo '// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT' +echo +go tool cgo -godefs -- "$@" _const.go >_error.out +cat _error.out | grep -vf _error.grep | grep -vf _signal.grep +echo +echo '// Errors' +echo 'const (' +cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= Errno(\1)/' +echo ')' + +echo +echo '// Signals' +echo 'const (' +cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= Signal(\1)/' +echo ')' + +# Run C program to print error and syscall strings. +( + echo -E " +#include +#include +#include +#include +#include +#include + +#define nelem(x) (sizeof(x)/sizeof((x)[0])) + +enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below + +int errors[] = { +" + for i in $errors + do + echo -E ' '$i, + done + + echo -E " +}; + +int signals[] = { +" + for i in $signals + do + echo -E ' '$i, + done + + # Use -E because on some systems bash builtin interprets \n itself. + echo -E ' +}; + +static int +intcmp(const void *a, const void *b) +{ + return *(int*)a - *(int*)b; +} + +int +main(void) +{ + int i, j, e; + char buf[1024], *p; + + printf("\n\n// Error table\n"); + printf("var errors = [...]string {\n"); + qsort(errors, nelem(errors), sizeof errors[0], intcmp); + for(i=0; i 0 && errors[i-1] == e) + continue; + strcpy(buf, strerror(e)); + // lowercase first letter: Bad -> bad, but STREAM -> STREAM. + if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) + buf[0] += a - A; + printf("\t%d: \"%s\",\n", e, buf); + } + printf("}\n\n"); + + printf("\n\n// Signal table\n"); + printf("var signals = [...]string {\n"); + qsort(signals, nelem(signals), sizeof signals[0], intcmp); + for(i=0; i 0 && signals[i-1] == e) + continue; + strcpy(buf, strsignal(e)); + // lowercase first letter: Bad -> bad, but STREAM -> STREAM. + if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) + buf[0] += a - A; + // cut trailing : number. + p = strrchr(buf, ":"[0]); + if(p) + *p = '\0'; + printf("\t%d: \"%s\",\n", e, buf); + } + printf("}\n\n"); + + return 0; +} + +' +) >_errors.c + +$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out diff --git a/vendor/golang.org/x/sys/plan9/mksyscall.pl b/vendor/golang.org/x/sys/plan9/mksyscall.pl new file mode 100755 index 0000000..ce8e1e4 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/mksyscall.pl @@ -0,0 +1,319 @@ +#!/usr/bin/env perl +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# This program reads a file containing function prototypes +# (like syscall_plan9.go) and generates system call bodies. +# The prototypes are marked by lines beginning with "//sys" +# and read like func declarations if //sys is replaced by func, but: +# * The parameter lists must give a name for each argument. +# This includes return parameters. +# * The parameter lists must give a type for each argument: +# the (x, y, z int) shorthand is not allowed. +# * If the return parameter is an error number, it must be named errno. + +# A line beginning with //sysnb is like //sys, except that the +# goroutine will not be suspended during the execution of the system +# call. This must only be used for system calls which can never +# block, as otherwise the system call could cause all goroutines to +# hang. + +use strict; + +my $cmdline = "mksyscall.pl " . join(' ', @ARGV); +my $errors = 0; +my $_32bit = ""; +my $plan9 = 0; +my $openbsd = 0; +my $netbsd = 0; +my $dragonfly = 0; +my $nacl = 0; +my $arm = 0; # 64-bit value should use (even, odd)-pair + +if($ARGV[0] eq "-b32") { + $_32bit = "big-endian"; + shift; +} elsif($ARGV[0] eq "-l32") { + $_32bit = "little-endian"; + shift; +} +if($ARGV[0] eq "-plan9") { + $plan9 = 1; + shift; +} +if($ARGV[0] eq "-openbsd") { + $openbsd = 1; + shift; +} +if($ARGV[0] eq "-netbsd") { + $netbsd = 1; + shift; +} +if($ARGV[0] eq "-dragonfly") { + $dragonfly = 1; + shift; +} +if($ARGV[0] eq "-nacl") { + $nacl = 1; + shift; +} +if($ARGV[0] eq "-arm") { + $arm = 1; + shift; +} + +if($ARGV[0] =~ /^-/) { + print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n"; + exit 1; +} + +sub parseparamlist($) { + my ($list) = @_; + $list =~ s/^\s*//; + $list =~ s/\s*$//; + if($list eq "") { + return (); + } + return split(/\s*,\s*/, $list); +} + +sub parseparam($) { + my ($p) = @_; + if($p !~ /^(\S*) (\S*)$/) { + print STDERR "$ARGV:$.: malformed parameter: $p\n"; + $errors = 1; + return ("xx", "int"); + } + return ($1, $2); +} + +my $text = ""; +while(<>) { + chomp; + s/\s+/ /g; + s/^\s+//; + s/\s+$//; + my $nonblock = /^\/\/sysnb /; + next if !/^\/\/sys / && !$nonblock; + + # Line must be of the form + # func Open(path string, mode int, perm int) (fd int, errno error) + # Split into name, in params, out params. + if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { + print STDERR "$ARGV:$.: malformed //sys declaration\n"; + $errors = 1; + next; + } + my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); + + # Split argument lists on comma. + my @in = parseparamlist($in); + my @out = parseparamlist($out); + + # Try in vain to keep people from editing this file. + # The theory is that they jump into the middle of the file + # without reading the header. + $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + + # Go function header. + my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; + $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; + + # Check if err return available + my $errvar = ""; + foreach my $p (@out) { + my ($name, $type) = parseparam($p); + if($type eq "error") { + $errvar = $name; + last; + } + } + + # Prepare arguments to Syscall. + my @args = (); + my @uses = (); + my $n = 0; + foreach my $p (@in) { + my ($name, $type) = parseparam($p); + if($type =~ /^\*/) { + push @args, "uintptr(unsafe.Pointer($name))"; + } elsif($type eq "string" && $errvar ne "") { + $text .= "\tvar _p$n *byte\n"; + $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; + $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + push @uses, "use(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type eq "string") { + print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; + $text .= "\tvar _p$n *byte\n"; + $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + push @uses, "use(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type =~ /^\[\](.*)/) { + # Convert slice into pointer, length. + # Have to be careful not to take address of &a[0] if len == 0: + # pass dummy pointer in that case. + # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). + $text .= "\tvar _p$n unsafe.Pointer\n"; + $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; + $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; + $text .= "\n"; + push @args, "uintptr(_p$n)", "uintptr(len($name))"; + $n++; + } elsif($type eq "int64" && ($openbsd || $netbsd)) { + push @args, "0"; + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } elsif($_32bit eq "little-endian") { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } else { + push @args, "uintptr($name)"; + } + } elsif($type eq "int64" && $dragonfly) { + if ($func !~ /^extp(read|write)/i) { + push @args, "0"; + } + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } elsif($_32bit eq "little-endian") { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } else { + push @args, "uintptr($name)"; + } + } elsif($type eq "int64" && $_32bit ne "") { + if(@args % 2 && $arm) { + # arm abi specifies 64-bit argument uses + # (even, odd) pair + push @args, "0" + } + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } else { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } + } else { + push @args, "uintptr($name)"; + } + } + + # Determine which form to use; pad args with zeros. + my $asm = "Syscall"; + if ($nonblock) { + $asm = "RawSyscall"; + } + if(@args <= 3) { + while(@args < 3) { + push @args, "0"; + } + } elsif(@args <= 6) { + $asm .= "6"; + while(@args < 6) { + push @args, "0"; + } + } elsif(@args <= 9) { + $asm .= "9"; + while(@args < 9) { + push @args, "0"; + } + } else { + print STDERR "$ARGV:$.: too many arguments to system call\n"; + } + + # System call number. + if($sysname eq "") { + $sysname = "SYS_$func"; + $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar + $sysname =~ y/a-z/A-Z/; + if($nacl) { + $sysname =~ y/A-Z/a-z/; + } + } + + # Actual call. + my $args = join(', ', @args); + my $call = "$asm($sysname, $args)"; + + # Assign return values. + my $body = ""; + my @ret = ("_", "_", "_"); + my $do_errno = 0; + for(my $i=0; $i<@out; $i++) { + my $p = $out[$i]; + my ($name, $type) = parseparam($p); + my $reg = ""; + if($name eq "err" && !$plan9) { + $reg = "e1"; + $ret[2] = $reg; + $do_errno = 1; + } elsif($name eq "err" && $plan9) { + $ret[0] = "r0"; + $ret[2] = "e1"; + next; + } else { + $reg = sprintf("r%d", $i); + $ret[$i] = $reg; + } + if($type eq "bool") { + $reg = "$reg != 0"; + } + if($type eq "int64" && $_32bit ne "") { + # 64-bit number in r1:r0 or r0:r1. + if($i+2 > @out) { + print STDERR "$ARGV:$.: not enough registers for int64 return\n"; + } + if($_32bit eq "big-endian") { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); + } else { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); + } + $ret[$i] = sprintf("r%d", $i); + $ret[$i+1] = sprintf("r%d", $i+1); + } + if($reg ne "e1" || $plan9) { + $body .= "\t$name = $type($reg)\n"; + } + } + if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { + $text .= "\t$call\n"; + } else { + $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; + } + foreach my $use (@uses) { + $text .= "\t$use\n"; + } + $text .= $body; + + if ($plan9 && $ret[2] eq "e1") { + $text .= "\tif int32(r0) == -1 {\n"; + $text .= "\t\terr = e1\n"; + $text .= "\t}\n"; + } elsif ($do_errno) { + $text .= "\tif e1 != 0 {\n"; + $text .= "\t\terr = e1\n"; + $text .= "\t}\n"; + } + $text .= "\treturn\n"; + $text .= "}\n\n"; +} + +chomp $text; +chomp $text; + +if($errors) { + exit 1; +} + +print <= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go new file mode 100644 index 0000000..5046cfe --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -0,0 +1,74 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +// Package plan9 contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display the OS-specific documentation for the current +// system. If you want godoc to display documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.ErrorString. +package plan9 // import "golang.org/x/sys/plan9" + +import "unsafe" + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, EINVAL + } + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +//go:noescape +func use(p unsafe.Pointer) diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go new file mode 100644 index 0000000..84e1471 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -0,0 +1,349 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Plan 9 system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and +// wrap it in our own nicer implementation. + +package plan9 + +import ( + "bytes" + "syscall" + "unsafe" +) + +// A Note is a string describing a process note. +// It implements the os.Signal interface. +type Note string + +func (n Note) Signal() {} + +func (n Note) String() string { + return string(n) +} + +var ( + Stdin = 0 + Stdout = 1 + Stderr = 2 +) + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.ErrorString) +func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.ErrorString) +func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) +func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) + +func atoi(b []byte) (n uint) { + n = 0 + for i := 0; i < len(b); i++ { + n = n*10 + uint(b[i]-'0') + } + return +} + +func cstring(s []byte) string { + i := bytes.IndexByte(s, 0) + if i == -1 { + i = len(s) + } + return string(s[:i]) +} + +func errstr() string { + var buf [ERRMAX]byte + + RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)), 0) + + buf[len(buf)-1] = 0 + return cstring(buf[:]) +} + +// Implemented in assembly to import from runtime. +func exit(code int) + +func Exit(code int) { exit(code) } + +func readnum(path string) (uint, error) { + var b [12]byte + + fd, e := Open(path, O_RDONLY) + if e != nil { + return 0, e + } + defer Close(fd) + + n, e := Pread(fd, b[:], 0) + + if e != nil { + return 0, e + } + + m := 0 + for ; m < n && b[m] == ' '; m++ { + } + + return atoi(b[m : n-1]), nil +} + +func Getpid() (pid int) { + n, _ := readnum("#c/pid") + return int(n) +} + +func Getppid() (ppid int) { + n, _ := readnum("#c/ppid") + return int(n) +} + +func Read(fd int, p []byte) (n int, err error) { + return Pread(fd, p, -1) +} + +func Write(fd int, p []byte) (n int, err error) { + return Pwrite(fd, p, -1) +} + +var ioSync int64 + +//sys fd2path(fd int, buf []byte) (err error) +func Fd2path(fd int) (path string, err error) { + var buf [512]byte + + e := fd2path(fd, buf[:]) + if e != nil { + return "", e + } + return cstring(buf[:]), nil +} + +//sys pipe(p *[2]int32) (err error) +func Pipe(p []int) (err error) { + if len(p) != 2 { + return syscall.ErrorString("bad arg in system call") + } + var pp [2]int32 + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(placeholder uintptr, fd int, offset int64, whence int) (newoffset int64, err string) + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + newoffset, e := seek(0, fd, offset, whence) + + if newoffset == -1 { + err = syscall.ErrorString(e) + } + return +} + +func Mkdir(path string, mode uint32) (err error) { + fd, err := Create(path, O_RDONLY, DMDIR|mode) + + if fd != -1 { + Close(fd) + } + + return +} + +type Waitmsg struct { + Pid int + Time [3]uint32 + Msg string +} + +func (w Waitmsg) Exited() bool { return true } +func (w Waitmsg) Signaled() bool { return false } + +func (w Waitmsg) ExitStatus() int { + if len(w.Msg) == 0 { + // a normal exit returns no message + return 0 + } + return 1 +} + +//sys await(s []byte) (n int, err error) +func Await(w *Waitmsg) (err error) { + var buf [512]byte + var f [5][]byte + + n, err := await(buf[:]) + + if err != nil || w == nil { + return + } + + nf := 0 + p := 0 + for i := 0; i < n && nf < len(f)-1; i++ { + if buf[i] == ' ' { + f[nf] = buf[p:i] + p = i + 1 + nf++ + } + } + f[nf] = buf[p:] + nf++ + + if nf != len(f) { + return syscall.ErrorString("invalid wait message") + } + w.Pid = int(atoi(f[0])) + w.Time[0] = uint32(atoi(f[1])) + w.Time[1] = uint32(atoi(f[2])) + w.Time[2] = uint32(atoi(f[3])) + w.Msg = cstring(f[4]) + if w.Msg == "''" { + // await() returns '' for no error + w.Msg = "" + } + return +} + +func Unmount(name, old string) (err error) { + fixwd() + oldp, err := BytePtrFromString(old) + if err != nil { + return err + } + oldptr := uintptr(unsafe.Pointer(oldp)) + + var r0 uintptr + var e syscall.ErrorString + + // bind(2) man page: If name is zero, everything bound or mounted upon old is unbound or unmounted. + if name == "" { + r0, _, e = Syscall(SYS_UNMOUNT, _zero, oldptr, 0) + } else { + namep, err := BytePtrFromString(name) + if err != nil { + return err + } + r0, _, e = Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(namep)), oldptr, 0) + } + + if int32(r0) == -1 { + err = e + } + return +} + +func Fchdir(fd int) (err error) { + path, err := Fd2path(fd) + + if err != nil { + return + } + + return Chdir(path) +} + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int32(nsec / 1e9) + return +} + +func nsec() int64 { + var scratch int64 + + r0, _, _ := Syscall(SYS_NSEC, uintptr(unsafe.Pointer(&scratch)), 0, 0) + // TODO(aram): remove hack after I fix _nsec in the pc64 kernel. + if r0 == 0 { + return scratch + } + return int64(r0) +} + +func Gettimeofday(tv *Timeval) error { + nsec := nsec() + *tv = NsecToTimeval(nsec) + return nil +} + +func Getpagesize() int { return 0x1000 } + +func Getegid() (egid int) { return -1 } +func Geteuid() (euid int) { return -1 } +func Getgid() (gid int) { return -1 } +func Getuid() (uid int) { return -1 } + +func Getgroups() (gids []int, err error) { + return make([]int, 0), nil +} + +//sys open(path string, mode int) (fd int, err error) +func Open(path string, mode int) (fd int, err error) { + fixwd() + return open(path, mode) +} + +//sys create(path string, mode int, perm uint32) (fd int, err error) +func Create(path string, mode int, perm uint32) (fd int, err error) { + fixwd() + return create(path, mode, perm) +} + +//sys remove(path string) (err error) +func Remove(path string) error { + fixwd() + return remove(path) +} + +//sys stat(path string, edir []byte) (n int, err error) +func Stat(path string, edir []byte) (n int, err error) { + fixwd() + return stat(path, edir) +} + +//sys bind(name string, old string, flag int) (err error) +func Bind(name string, old string, flag int) (err error) { + fixwd() + return bind(name, old, flag) +} + +//sys mount(fd int, afd int, old string, flag int, aname string) (err error) +func Mount(fd int, afd int, old string, flag int, aname string) (err error) { + fixwd() + return mount(fd, afd, old, flag, aname) +} + +//sys wstat(path string, edir []byte) (err error) +func Wstat(path string, edir []byte) (err error) { + fixwd() + return wstat(path, edir) +} + +//sys chdir(path string) (err error) +//sys Dup(oldfd int, newfd int) (fd int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys Close(fd int) (err error) +//sys Fstat(fd int, edir []byte) (n int, err error) +//sys Fwstat(fd int, edir []byte) (err error) diff --git a/vendor/golang.org/x/sys/plan9/syscall_test.go b/vendor/golang.org/x/sys/plan9/syscall_test.go new file mode 100644 index 0000000..8f829ba --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/syscall_test.go @@ -0,0 +1,33 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package plan9_test + +import ( + "testing" + + "golang.org/x/sys/plan9" +) + +func testSetGetenv(t *testing.T, key, value string) { + err := plan9.Setenv(key, value) + if err != nil { + t.Fatalf("Setenv failed to set %q: %v", value, err) + } + newvalue, found := plan9.Getenv(key) + if !found { + t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) + } + if newvalue != value { + t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) + } +} + +func TestEnv(t *testing.T) { + testSetGetenv(t, "TESTENV", "AVALUE") + // make sure TESTENV gets set to "", not deleted + testSetGetenv(t, "TESTENV", "") +} diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go new file mode 100644 index 0000000..b35598a --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -0,0 +1,292 @@ +// mksyscall.pl -l32 -plan9 syscall_plan9.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package plan9 + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fd2path(fd int, buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]int32) (err error) { + r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func await(s []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(s) > 0 { + _p0 = unsafe.Pointer(&s[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func open(path string, mode int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func create(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func remove(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, edir []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(edir) > 0 { + _p1 = unsafe.Pointer(&edir[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(name string, old string, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(old) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(fd int, afd int, old string, flag int, aname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(old) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(aname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wstat(path string, edir []byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(edir) > 0 { + _p1 = unsafe.Pointer(&edir[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) + use(unsafe.Pointer(_p0)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int, newfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, edir []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(edir) > 0 { + _p0 = unsafe.Pointer(&edir[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fwstat(fd int, edir []byte) (err error) { + var _p0 unsafe.Pointer + if len(edir) > 0 { + _p0 = unsafe.Pointer(&edir[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) + if int32(r0) == -1 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go new file mode 100644 index 0000000..b35598a --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -0,0 +1,292 @@ +// mksyscall.pl -l32 -plan9 syscall_plan9.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package plan9 + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fd2path(fd int, buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]int32) (err error) { + r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func await(s []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(s) > 0 { + _p0 = unsafe.Pointer(&s[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func open(path string, mode int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func create(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func remove(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, edir []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(edir) > 0 { + _p1 = unsafe.Pointer(&edir[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) + use(unsafe.Pointer(_p0)) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(name string, old string, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(old) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(fd int, afd int, old string, flag int, aname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(old) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(aname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wstat(path string, edir []byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(edir) > 0 { + _p1 = unsafe.Pointer(&edir[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) + use(unsafe.Pointer(_p0)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int, newfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, edir []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(edir) > 0 { + _p0 = unsafe.Pointer(&edir[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fwstat(fd int, edir []byte) (err error) { + var _p0 unsafe.Pointer + if len(edir) > 0 { + _p0 = unsafe.Pointer(&edir[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) + if int32(r0) == -1 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go new file mode 100644 index 0000000..8dd8723 --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -0,0 +1,284 @@ +// mksyscall.pl -l32 -plan9 -tags plan9,arm syscall_plan9.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build plan9,arm + +package plan9 + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fd2path(fd int, buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]int32) (err error) { + r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func await(s []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(s) > 0 { + _p0 = unsafe.Pointer(&s[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func open(path string, mode int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func create(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func remove(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, edir []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(edir) > 0 { + _p1 = unsafe.Pointer(&edir[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(name string, old string, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(old) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(fd int, afd int, old string, flag int, aname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(old) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(aname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wstat(path string, edir []byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(edir) > 0 { + _p1 = unsafe.Pointer(&edir[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int, newfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0) + fd = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, edir []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(edir) > 0 { + _p0 = unsafe.Pointer(&edir[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) + n = int(r0) + if int32(r0) == -1 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fwstat(fd int, edir []byte) (err error) { + var _p0 unsafe.Pointer + if len(edir) > 0 { + _p0 = unsafe.Pointer(&edir[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) + if int32(r0) == -1 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go b/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go new file mode 100644 index 0000000..22e8abd --- /dev/null +++ b/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go @@ -0,0 +1,49 @@ +// mksysnum_plan9.sh /opt/plan9/sys/src/libc/9syscall/sys.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package plan9 + +const ( + SYS_SYSR1 = 0 + SYS_BIND = 2 + SYS_CHDIR = 3 + SYS_CLOSE = 4 + SYS_DUP = 5 + SYS_ALARM = 6 + SYS_EXEC = 7 + SYS_EXITS = 8 + SYS_FAUTH = 10 + SYS_SEGBRK = 12 + SYS_OPEN = 14 + SYS_OSEEK = 16 + SYS_SLEEP = 17 + SYS_RFORK = 19 + SYS_PIPE = 21 + SYS_CREATE = 22 + SYS_FD2PATH = 23 + SYS_BRK_ = 24 + SYS_REMOVE = 25 + SYS_NOTIFY = 28 + SYS_NOTED = 29 + SYS_SEGATTACH = 30 + SYS_SEGDETACH = 31 + SYS_SEGFREE = 32 + SYS_SEGFLUSH = 33 + SYS_RENDEZVOUS = 34 + SYS_UNMOUNT = 35 + SYS_SEMACQUIRE = 37 + SYS_SEMRELEASE = 38 + SYS_SEEK = 39 + SYS_FVERSION = 40 + SYS_ERRSTR = 41 + SYS_STAT = 42 + SYS_FSTAT = 43 + SYS_WSTAT = 44 + SYS_FWSTAT = 45 + SYS_MOUNT = 46 + SYS_AWAIT = 47 + SYS_PREAD = 50 + SYS_PWRITE = 51 + SYS_TSEMACQUIRE = 52 + SYS_NSEC = 53 +) diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore new file mode 100644 index 0000000..e3e0fc6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/.gitignore @@ -0,0 +1,2 @@ +_obj/ +unix.test diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md new file mode 100644 index 0000000..bc6f603 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/README.md @@ -0,0 +1,173 @@ +# Building `sys/unix` + +The sys/unix package provides access to the raw system call interface of the +underlying operating system. See: https://godoc.org/golang.org/x/sys/unix + +Porting Go to a new architecture/OS combination or adding syscalls, types, or +constants to an existing architecture/OS pair requires some manual effort; +however, there are tools that automate much of the process. + +## Build Systems + +There are currently two ways we generate the necessary files. We are currently +migrating the build system to use containers so the builds are reproducible. +This is being done on an OS-by-OS basis. Please update this documentation as +components of the build system change. + +### Old Build System (currently for `GOOS != "Linux" || GOARCH == "sparc64"`) + +The old build system generates the Go files based on the C header files +present on your system. This means that files +for a given GOOS/GOARCH pair must be generated on a system with that OS and +architecture. This also means that the generated code can differ from system +to system, based on differences in the header files. + +To avoid this, if you are using the old build system, only generate the Go +files on an installation with unmodified header files. It is also important to +keep track of which version of the OS the files were generated from (ex. +Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes +and have each OS upgrade correspond to a single change. + +To build the files for your current OS and architecture, make sure GOOS and +GOARCH are set correctly and run `mkall.sh`. This will generate the files for +your specific system. Running `mkall.sh -n` shows the commands that will be run. + +Requirements: bash, perl, go + +### New Build System (currently for `GOOS == "Linux" && GOARCH != "sparc64"`) + +The new build system uses a Docker container to generate the go files directly +from source checkouts of the kernel and various system libraries. This means +that on any platform that supports Docker, all the files using the new build +system can be generated at once, and generated files will not change based on +what the person running the scripts has installed on their computer. + +The OS specific files for the new build system are located in the `${GOOS}` +directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When +the kernel or system library updates, modify the Dockerfile at +`${GOOS}/Dockerfile` to checkout the new release of the source. + +To build all the files under the new build system, you must be on an amd64/Linux +system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will +then generate all of the files for all of the GOOS/GOARCH pairs in the new build +system. Running `mkall.sh -n` shows the commands that will be run. + +Requirements: bash, perl, go, docker + +## Component files + +This section describes the various files used in the code generation process. +It also contains instructions on how to modify these files to add a new +architecture/OS or to add additional syscalls, types, or constants. Note that +if you are using the new build system, the scripts cannot be called normally. +They must be called from within the docker container. + +### asm files + +The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system +call dispatch. There are three entry points: +``` + func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) + func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) + func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) +``` +The first and second are the standard ones; they differ only in how many +arguments can be passed to the kernel. The third is for low-level use by the +ForkExec wrapper. Unlike the first two, it does not call into the scheduler to +let it know that a system call is running. + +When porting Go to an new architecture/OS, this file must be implemented for +each GOOS/GOARCH pair. + +### mksysnum + +Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl` +for the old system). This script takes in a list of header files containing the +syscall number declarations and parses them to produce the corresponding list of +Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated +constants. + +Adding new syscall numbers is mostly done by running the build on a sufficiently +new installation of the target OS (or updating the source checkouts for the +new build system). However, depending on the OS, you make need to update the +parsing in mksysnum. + +### mksyscall.pl + +The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are +hand-written Go files which implement system calls (for unix, the specific OS, +or the specific OS/Architecture pair respectively) that need special handling +and list `//sys` comments giving prototypes for ones that can be generated. + +The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts +them into syscalls. This requires the name of the prototype in the comment to +match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function +prototype can be exported (capitalized) or not. + +Adding a new syscall often just requires adding a new `//sys` function prototype +with the desired arguments and a capitalized name so it is exported. However, if +you want the interface to the syscall to be different, often one will make an +unexported `//sys` prototype, an then write a custom wrapper in +`syscall_${GOOS}.go`. + +### types files + +For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or +`types_${GOOS}.go` on the old system). This file includes standard C headers and +creates Go type aliases to the corresponding C types. The file is then fed +through godef to get the Go compatible definitions. Finally, the generated code +is fed though mkpost.go to format the code correctly and remove any hidden or +private identifiers. This cleaned-up code is written to +`ztypes_${GOOS}_${GOARCH}.go`. + +The hardest part about preparing this file is figuring out which headers to +include and which symbols need to be `#define`d to get the actual data +structures that pass through to the kernel system calls. Some C libraries +preset alternate versions for binary compatibility and translate them on the +way in and out of system calls, but there is almost always a `#define` that can +get the real ones. +See `types_darwin.go` and `linux/types.go` for examples. + +To add a new type, add in the necessary include statement at the top of the +file (if it is not already there) and add in a type alias line. Note that if +your type is significantly different on different architectures, you may need +some `#if/#elif` macros in your include statements. + +### mkerrors.sh + +This script is used to generate the system's various constants. This doesn't +just include the error numbers and error strings, but also the signal numbers +an a wide variety of miscellaneous constants. The constants come from the list +of include files in the `includes_${uname}` variable. A regex then picks out +the desired `#define` statements, and generates the corresponding Go constants. +The error numbers and strings are generated from `#include `, and the +signal numbers and strings are generated from `#include `. All of +these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program, +`_errors.c`, which prints out all the constants. + +To add a constant, add the header that includes it to the appropriate variable. +Then, edit the regex (if necessary) to match the desired constant. Avoid making +the regex too broad to avoid matching unintended constants. + + +## Generated files + +### `zerror_${GOOS}_${GOARCH}.go` + +A file containing all of the system's generated error numbers, error strings, +signal numbers, and constants. Generated by `mkerrors.sh` (see above). + +### `zsyscall_${GOOS}_${GOARCH}.go` + +A file containing all the generated syscalls for a specific GOOS and GOARCH. +Generated by `mksyscall.pl` (see above). + +### `zsysnum_${GOOS}_${GOARCH}.go` + +A list of numeric constants for all the syscall number of the specific GOOS +and GOARCH. Generated by mksysnum (see above). + +### `ztypes_${GOOS}_${GOARCH}.go` + +A file containing Go types for passing into (or returning from) syscalls. +Generated by godefs and the types file (see above). diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go new file mode 100644 index 0000000..72afe33 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -0,0 +1,124 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CPU affinity functions + +package unix + +import ( + "unsafe" +) + +const cpuSetSize = _CPU_SETSIZE / _NCPUBITS + +// CPUSet represents a CPU affinity mask. +type CPUSet [cpuSetSize]cpuMask + +func schedAffinity(trap uintptr, pid int, set *CPUSet) error { + _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set))) + if e != 0 { + return errnoErr(e) + } + return nil +} + +// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func SchedGetaffinity(pid int, set *CPUSet) error { + return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set) +} + +// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func SchedSetaffinity(pid int, set *CPUSet) error { + return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set) +} + +// Zero clears the set s, so that it contains no CPUs. +func (s *CPUSet) Zero() { + for i := range s { + s[i] = 0 + } +} + +func cpuBitsIndex(cpu int) int { + return cpu / _NCPUBITS +} + +func cpuBitsMask(cpu int) cpuMask { + return cpuMask(1 << (uint(cpu) % _NCPUBITS)) +} + +// Set adds cpu to the set s. +func (s *CPUSet) Set(cpu int) { + i := cpuBitsIndex(cpu) + if i < len(s) { + s[i] |= cpuBitsMask(cpu) + } +} + +// Clear removes cpu from the set s. +func (s *CPUSet) Clear(cpu int) { + i := cpuBitsIndex(cpu) + if i < len(s) { + s[i] &^= cpuBitsMask(cpu) + } +} + +// IsSet reports whether cpu is in the set s. +func (s *CPUSet) IsSet(cpu int) bool { + i := cpuBitsIndex(cpu) + if i < len(s) { + return s[i]&cpuBitsMask(cpu) != 0 + } + return false +} + +// Count returns the number of CPUs in the set s. +func (s *CPUSet) Count() int { + c := 0 + for _, b := range s { + c += onesCount64(uint64(b)) + } + return c +} + +// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64. +// Once this package can require Go 1.9, we can delete this +// and update the caller to use bits.OnesCount64. +func onesCount64(x uint64) int { + const m0 = 0x5555555555555555 // 01010101 ... + const m1 = 0x3333333333333333 // 00110011 ... + const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... + const m3 = 0x00ff00ff00ff00ff // etc. + const m4 = 0x0000ffff0000ffff + + // Implementation: Parallel summing of adjacent bits. + // See "Hacker's Delight", Chap. 5: Counting Bits. + // The following pattern shows the general approach: + // + // x = x>>1&(m0&m) + x&(m0&m) + // x = x>>2&(m1&m) + x&(m1&m) + // x = x>>4&(m2&m) + x&(m2&m) + // x = x>>8&(m3&m) + x&(m3&m) + // x = x>>16&(m4&m) + x&(m4&m) + // x = x>>32&(m5&m) + x&(m5&m) + // return int(x) + // + // Masking (& operations) can be left away when there's no + // danger that a field's sum will carry over into the next + // field: Since the result cannot be > 64, 8 bits is enough + // and we can ignore the masks for the shifts by 8 and up. + // Per "Hacker's Delight", the first line can be simplified + // more, but it saves at best one instruction, so we leave + // it alone for clarity. + const m = 1<<64 - 1 + x = x>>1&(m0&m) + x&(m0&m) + x = x>>2&(m1&m) + x&(m1&m) + x = (x>>4 + x) & (m2 & m) + x += x >> 8 + x += x >> 16 + x += x >> 32 + return int(x) & (1<<7 - 1) +} diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s new file mode 100644 index 0000000..8a72783 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_darwin_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s new file mode 100644 index 0000000..6321421 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s new file mode 100644 index 0000000..333242d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s @@ -0,0 +1,30 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo +// +build arm,darwin + +#include "textflag.h" + +// +// System call support for ARM, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s new file mode 100644 index 0000000..97e0174 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s @@ -0,0 +1,30 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo +// +build arm64,darwin + +#include "textflag.h" + +// +// System call support for AMD64, Darwin +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s new file mode 100644 index 0000000..d5ed672 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, DragonFly +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-64 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-88 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-112 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-64 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s new file mode 100644 index 0000000..c9a0a26 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, FreeBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s new file mode 100644 index 0000000..3517247 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, FreeBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s new file mode 100644 index 0000000..9227c87 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s @@ -0,0 +1,29 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for ARM, FreeBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s new file mode 100644 index 0000000..448bebb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -0,0 +1,65 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for 386, Linux +// + +// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80 +// instead of the glibc-specific "CALL 0x10(GS)". +#define INVOKE_SYSCALL INT $0x80 + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 + CALL runtime·entersyscall(SB) + MOVL trap+0(FP), AX // syscall entry + MOVL a1+4(FP), BX + MOVL a2+8(FP), CX + MOVL a3+12(FP), DX + MOVL $0, SI + MOVL $0, DI + INVOKE_SYSCALL + MOVL AX, r1+16(FP) + MOVL DX, r2+20(FP) + CALL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 + MOVL trap+0(FP), AX // syscall entry + MOVL a1+4(FP), BX + MOVL a2+8(FP), CX + MOVL a3+12(FP), DX + MOVL $0, SI + MOVL $0, DI + INVOKE_SYSCALL + MOVL AX, r1+16(FP) + MOVL DX, r2+20(FP) + RET + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) + +TEXT ·seek(SB),NOSPLIT,$0-28 + JMP syscall·seek(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s new file mode 100644 index 0000000..c6468a9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -0,0 +1,57 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for AMD64, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + CALL runtime·entersyscall(SB) + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ $0, R10 + MOVQ $0, R8 + MOVQ $0, R9 + MOVQ trap+0(FP), AX // syscall entry + SYSCALL + MOVQ AX, r1+32(FP) + MOVQ DX, r2+40(FP) + CALL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ $0, R10 + MOVQ $0, R8 + MOVQ $0, R9 + MOVQ trap+0(FP), AX // syscall entry + SYSCALL + MOVQ AX, r1+32(FP) + MOVQ DX, r2+40(FP) + RET + +TEXT ·gettimeofday(SB),NOSPLIT,$0-16 + JMP syscall·gettimeofday(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s new file mode 100644 index 0000000..cf0f357 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -0,0 +1,56 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for arm, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 + BL runtime·entersyscall(SB) + MOVW trap+0(FP), R7 + MOVW a1+4(FP), R0 + MOVW a2+8(FP), R1 + MOVW a3+12(FP), R2 + MOVW $0, R3 + MOVW $0, R4 + MOVW $0, R5 + SWI $0 + MOVW R0, r1+16(FP) + MOVW $0, R0 + MOVW R0, r2+20(FP) + BL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 + MOVW trap+0(FP), R7 // syscall entry + MOVW a1+4(FP), R0 + MOVW a2+8(FP), R1 + MOVW a3+12(FP), R2 + SWI $0 + MOVW R0, r1+16(FP) + MOVW $0, R0 + MOVW R0, r2+20(FP) + RET + +TEXT ·seek(SB),NOSPLIT,$0-28 + B syscall·seek(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s new file mode 100644 index 0000000..afe6fdf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -0,0 +1,52 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build arm64 +// +build !gccgo + +#include "textflag.h" + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + B syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD $0, R3 + MOVD $0, R4 + MOVD $0, R5 + MOVD trap+0(FP), R8 // syscall entry + SVC + MOVD R0, r1+32(FP) // r1 + MOVD R1, r2+40(FP) // r2 + BL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + B syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD $0, R3 + MOVD $0, R4 + MOVD $0, R5 + MOVD trap+0(FP), R8 // syscall entry + SVC + MOVD R0, r1+32(FP) + MOVD R1, r2+40(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s new file mode 100644 index 0000000..ab9d638 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -0,0 +1,56 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build mips64 mips64le +// +build !gccgo + +#include "textflag.h" + +// +// System calls for mips64, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + JAL runtime·entersyscall(SB) + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R2 // syscall entry + SYSCALL + MOVV R2, r1+32(FP) + MOVV R3, r2+40(FP) + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R2 // syscall entry + SYSCALL + MOVV R2, r1+32(FP) + MOVV R3, r2+40(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s new file mode 100644 index 0000000..99e5399 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -0,0 +1,54 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build mips mipsle +// +build !gccgo + +#include "textflag.h" + +// +// System calls for mips, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 + JAL runtime·entersyscall(SB) + MOVW a1+4(FP), R4 + MOVW a2+8(FP), R5 + MOVW a3+12(FP), R6 + MOVW R0, R7 + MOVW trap+0(FP), R2 // syscall entry + SYSCALL + MOVW R2, r1+16(FP) // r1 + MOVW R3, r2+20(FP) // r2 + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 + MOVW a1+4(FP), R4 + MOVW a2+8(FP), R5 + MOVW a3+12(FP), R6 + MOVW trap+0(FP), R2 // syscall entry + SYSCALL + MOVW R2, r1+16(FP) + MOVW R3, r2+20(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s new file mode 100644 index 0000000..649e587 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + BR syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + BR syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R3 + MOVD a2+16(FP), R4 + MOVD a3+24(FP), R5 + MOVD R0, R6 + MOVD R0, R7 + MOVD R0, R8 + MOVD trap+0(FP), R9 // syscall entry + SYSCALL R9 + MOVD R3, r1+32(FP) + MOVD R4, r2+40(FP) + BL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + BR syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + BR syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVD a1+8(FP), R3 + MOVD a2+16(FP), R4 + MOVD a3+24(FP), R5 + MOVD R0, R6 + MOVD R0, R7 + MOVD R0, R8 + MOVD trap+0(FP), R9 // syscall entry + SYSCALL R9 + MOVD R3, r1+32(FP) + MOVD R4, r2+40(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s new file mode 100644 index 0000000..a5a863c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -0,0 +1,56 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x +// +build linux +// +build !gccgo + +#include "textflag.h" + +// +// System calls for s390x, Linux +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + BR syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + BR syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R2 + MOVD a2+16(FP), R3 + MOVD a3+24(FP), R4 + MOVD $0, R5 + MOVD $0, R6 + MOVD $0, R7 + MOVD trap+0(FP), R1 // syscall entry + SYSCALL + MOVD R2, r1+32(FP) + MOVD R3, r2+40(FP) + BL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + BR syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + BR syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVD a1+8(FP), R2 + MOVD a2+16(FP), R3 + MOVD a3+24(FP), R4 + MOVD $0, R5 + MOVD $0, R6 + MOVD $0, R7 + MOVD trap+0(FP), R1 // syscall entry + SYSCALL + MOVD R2, r1+32(FP) + MOVD R3, r2+40(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s new file mode 100644 index 0000000..48bdcd7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, NetBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s new file mode 100644 index 0000000..2ede05c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, NetBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s new file mode 100644 index 0000000..e892857 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for ARM, NetBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s new file mode 100644 index 0000000..00576f3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for 386, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s new file mode 100644 index 0000000..790ef77 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for AMD64, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s new file mode 100644 index 0000000..469bfa1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for ARM, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-28 + B syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-40 + B syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-52 + B syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 + B syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 + B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s new file mode 100644 index 0000000..ded8260 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go +// + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go new file mode 100644 index 0000000..6e32296 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Bluetooth sockets and messages + +package unix + +// Bluetooth Protocols +const ( + BTPROTO_L2CAP = 0 + BTPROTO_HCI = 1 + BTPROTO_SCO = 2 + BTPROTO_RFCOMM = 3 + BTPROTO_BNEP = 4 + BTPROTO_CMTP = 5 + BTPROTO_HIDP = 6 + BTPROTO_AVDTP = 7 +) + +const ( + HCI_CHANNEL_RAW = 0 + HCI_CHANNEL_USER = 1 + HCI_CHANNEL_MONITOR = 2 + HCI_CHANNEL_CONTROL = 3 +) + +// Socketoption Level +const ( + SOL_BLUETOOTH = 0x112 + SOL_HCI = 0x0 + SOL_L2CAP = 0x6 + SOL_RFCOMM = 0x12 + SOL_SCO = 0x11 +) diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go new file mode 100644 index 0000000..83b6bce --- /dev/null +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -0,0 +1,195 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd + +package unix + +import ( + errorspkg "errors" + "fmt" +) + +// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c + +const ( + // This is the version of CapRights this package understands. See C implementation for parallels. + capRightsGoVersion = CAP_RIGHTS_VERSION_00 + capArSizeMin = CAP_RIGHTS_VERSION_00 + 2 + capArSizeMax = capRightsGoVersion + 2 +) + +var ( + bit2idx = []int{ + -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, + 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + } +) + +func capidxbit(right uint64) int { + return int((right >> 57) & 0x1f) +} + +func rightToIndex(right uint64) (int, error) { + idx := capidxbit(right) + if idx < 0 || idx >= len(bit2idx) { + return -2, fmt.Errorf("index for right 0x%x out of range", right) + } + return bit2idx[idx], nil +} + +func caprver(right uint64) int { + return int(right >> 62) +} + +func capver(rights *CapRights) int { + return caprver(rights.Rights[0]) +} + +func caparsize(rights *CapRights) int { + return capver(rights) + 2 +} + +// CapRightsSet sets the permissions in setrights in rights. +func CapRightsSet(rights *CapRights, setrights []uint64) error { + // This is essentially a copy of cap_rights_vset() + if capver(rights) != CAP_RIGHTS_VERSION_00 { + return fmt.Errorf("bad rights version %d", capver(rights)) + } + + n := caparsize(rights) + if n < capArSizeMin || n > capArSizeMax { + return errorspkg.New("bad rights size") + } + + for _, right := range setrights { + if caprver(right) != CAP_RIGHTS_VERSION_00 { + return errorspkg.New("bad right version") + } + i, err := rightToIndex(right) + if err != nil { + return err + } + if i >= n { + return errorspkg.New("index overflow") + } + if capidxbit(rights.Rights[i]) != capidxbit(right) { + return errorspkg.New("index mismatch") + } + rights.Rights[i] |= right + if capidxbit(rights.Rights[i]) != capidxbit(right) { + return errorspkg.New("index mismatch (after assign)") + } + } + + return nil +} + +// CapRightsClear clears the permissions in clearrights from rights. +func CapRightsClear(rights *CapRights, clearrights []uint64) error { + // This is essentially a copy of cap_rights_vclear() + if capver(rights) != CAP_RIGHTS_VERSION_00 { + return fmt.Errorf("bad rights version %d", capver(rights)) + } + + n := caparsize(rights) + if n < capArSizeMin || n > capArSizeMax { + return errorspkg.New("bad rights size") + } + + for _, right := range clearrights { + if caprver(right) != CAP_RIGHTS_VERSION_00 { + return errorspkg.New("bad right version") + } + i, err := rightToIndex(right) + if err != nil { + return err + } + if i >= n { + return errorspkg.New("index overflow") + } + if capidxbit(rights.Rights[i]) != capidxbit(right) { + return errorspkg.New("index mismatch") + } + rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF) + if capidxbit(rights.Rights[i]) != capidxbit(right) { + return errorspkg.New("index mismatch (after assign)") + } + } + + return nil +} + +// CapRightsIsSet checks whether all the permissions in setrights are present in rights. +func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) { + // This is essentially a copy of cap_rights_is_vset() + if capver(rights) != CAP_RIGHTS_VERSION_00 { + return false, fmt.Errorf("bad rights version %d", capver(rights)) + } + + n := caparsize(rights) + if n < capArSizeMin || n > capArSizeMax { + return false, errorspkg.New("bad rights size") + } + + for _, right := range setrights { + if caprver(right) != CAP_RIGHTS_VERSION_00 { + return false, errorspkg.New("bad right version") + } + i, err := rightToIndex(right) + if err != nil { + return false, err + } + if i >= n { + return false, errorspkg.New("index overflow") + } + if capidxbit(rights.Rights[i]) != capidxbit(right) { + return false, errorspkg.New("index mismatch") + } + if (rights.Rights[i] & right) != right { + return false, nil + } + } + + return true, nil +} + +func capright(idx uint64, bit uint64) uint64 { + return ((1 << (57 + idx)) | bit) +} + +// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights. +// See man cap_rights_init(3) and rights(4). +func CapRightsInit(rights []uint64) (*CapRights, error) { + var r CapRights + r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0) + r.Rights[1] = capright(1, 0) + + err := CapRightsSet(&r, rights) + if err != nil { + return nil, err + } + return &r, nil +} + +// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights. +// The capability rights on fd can never be increased by CapRightsLimit. +// See man cap_rights_limit(2) and rights(4). +func CapRightsLimit(fd uintptr, rights *CapRights) error { + return capRightsLimit(int(fd), rights) +} + +// CapRightsGet returns a CapRights structure containing the operations permitted on fd. +// See man cap_rights_get(3) and rights(4). +func CapRightsGet(fd uintptr) (*CapRights, error) { + r, err := CapRightsInit(nil) + if err != nil { + return nil, err + } + err = capRightsGet(capRightsGoVersion, int(fd), r) + if err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go new file mode 100644 index 0000000..a96f0eb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +const ( + R_OK = 0x4 + W_OK = 0x2 + X_OK = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/creds_test.go b/vendor/golang.org/x/sys/unix/creds_test.go new file mode 100644 index 0000000..6b292b1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/creds_test.go @@ -0,0 +1,152 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package unix_test + +import ( + "bytes" + "go/build" + "net" + "os" + "syscall" + "testing" + + "golang.org/x/sys/unix" +) + +// TestSCMCredentials tests the sending and receiving of credentials +// (PID, UID, GID) in an ancillary message between two UNIX +// sockets. The SO_PASSCRED socket option is enabled on the sending +// socket for this to work. +func TestSCMCredentials(t *testing.T) { + socketTypeTests := []struct { + socketType int + dataLen int + }{ + { + unix.SOCK_STREAM, + 1, + }, { + unix.SOCK_DGRAM, + 0, + }, + } + + for _, tt := range socketTypeTests { + if tt.socketType == unix.SOCK_DGRAM && !atLeast1p10() { + t.Log("skipping DGRAM test on pre-1.10") + continue + } + + fds, err := unix.Socketpair(unix.AF_LOCAL, tt.socketType, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) + } + defer unix.Close(fds[0]) + defer unix.Close(fds[1]) + + err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1) + if err != nil { + t.Fatalf("SetsockoptInt: %v", err) + } + + srvFile := os.NewFile(uintptr(fds[0]), "server") + defer srvFile.Close() + srv, err := net.FileConn(srvFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer srv.Close() + + cliFile := os.NewFile(uintptr(fds[1]), "client") + defer cliFile.Close() + cli, err := net.FileConn(cliFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer cli.Close() + + var ucred unix.Ucred + if os.Getuid() != 0 { + ucred.Pid = int32(os.Getpid()) + ucred.Uid = 0 + ucred.Gid = 0 + oob := unix.UnixCredentials(&ucred) + _, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if op, ok := err.(*net.OpError); ok { + err = op.Err + } + if sys, ok := err.(*os.SyscallError); ok { + err = sys.Err + } + if err != syscall.EPERM { + t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err) + } + } + + ucred.Pid = int32(os.Getpid()) + ucred.Uid = uint32(os.Getuid()) + ucred.Gid = uint32(os.Getgid()) + oob := unix.UnixCredentials(&ucred) + + // On SOCK_STREAM, this is internally going to send a dummy byte + n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if err != nil { + t.Fatalf("WriteMsgUnix: %v", err) + } + if n != 0 { + t.Fatalf("WriteMsgUnix n = %d, want 0", n) + } + if oobn != len(oob) { + t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob)) + } + + oob2 := make([]byte, 10*len(oob)) + n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2) + if err != nil { + t.Fatalf("ReadMsgUnix: %v", err) + } + if flags != 0 { + t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags) + } + if n != tt.dataLen { + t.Fatalf("ReadMsgUnix n = %d, want %d", n, tt.dataLen) + } + if oobn2 != oobn { + // without SO_PASSCRED set on the socket, ReadMsgUnix will + // return zero oob bytes + t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn) + } + oob2 = oob2[:oobn2] + if !bytes.Equal(oob, oob2) { + t.Fatal("ReadMsgUnix oob bytes don't match") + } + + scm, err := unix.ParseSocketControlMessage(oob2) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + newUcred, err := unix.ParseUnixCredentials(&scm[0]) + if err != nil { + t.Fatalf("ParseUnixCredentials: %v", err) + } + if *newUcred != ucred { + t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred) + } + } +} + +// atLeast1p10 reports whether we are running on Go 1.10 or later. +func atLeast1p10() bool { + for _, ver := range build.Default.ReleaseTags { + if ver == "go1.10" { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go new file mode 100644 index 0000000..8d1dc0f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_darwin.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in Darwin's sys/types.h header. + +package unix + +// Major returns the major component of a Darwin device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 24) & 0xff) +} + +// Minor returns the minor component of a Darwin device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0xffffff) +} + +// Mkdev returns a Darwin device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 24) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dev_darwin_test.go b/vendor/golang.org/x/sys/unix/dev_darwin_test.go new file mode 100644 index 0000000..bf1adf3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_darwin_test.go @@ -0,0 +1,51 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func TestDevices(t *testing.T) { + testCases := []struct { + path string + major uint32 + minor uint32 + }{ + // Most of the device major/minor numbers on Darwin are + // dynamically generated by devfs. These are some well-known + // static numbers. + {"/dev/ttyp0", 4, 0}, + {"/dev/ttys0", 4, 48}, + {"/dev/ptyp0", 5, 0}, + {"/dev/ptyr0", 5, 32}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { + var stat unix.Stat_t + err := unix.Stat(tc.path, &stat) + if err != nil { + t.Errorf("failed to stat device: %v", err) + return + } + + dev := uint64(stat.Rdev) + if unix.Major(dev) != tc.major { + t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) + } + if unix.Minor(dev) != tc.minor { + t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) + } + if unix.Mkdev(tc.major, tc.minor) != dev { + t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) + } + }) + } +} diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go new file mode 100644 index 0000000..8502f20 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in Dragonfly's sys/types.h header. +// +// The information below is extracted and adapted from sys/types.h: +// +// Minor gives a cookie instead of an index since in order to avoid changing the +// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for +// devices that don't use them. + +package unix + +// Major returns the major component of a DragonFlyBSD device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 8) & 0xff) +} + +// Minor returns the minor component of a DragonFlyBSD device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0xffff00ff) +} + +// Mkdev returns a DragonFlyBSD device number generated from the given major and +// minor components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 8) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go b/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go new file mode 100644 index 0000000..9add376 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go @@ -0,0 +1,50 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func TestDevices(t *testing.T) { + testCases := []struct { + path string + major uint32 + minor uint32 + }{ + // Minor is a cookie instead of an index on DragonFlyBSD + {"/dev/null", 10, 0x00000002}, + {"/dev/random", 10, 0x00000003}, + {"/dev/urandom", 10, 0x00000004}, + {"/dev/zero", 10, 0x0000000c}, + {"/dev/bpf", 15, 0xffff00ff}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { + var stat unix.Stat_t + err := unix.Stat(tc.path, &stat) + if err != nil { + t.Errorf("failed to stat device: %v", err) + return + } + + dev := uint64(stat.Rdev) + if unix.Major(dev) != tc.major { + t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) + } + if unix.Minor(dev) != tc.minor { + t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) + } + if unix.Mkdev(tc.major, tc.minor) != dev { + t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) + } + }) + } +} diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go new file mode 100644 index 0000000..eba3b4b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in FreeBSD's sys/types.h header. +// +// The information below is extracted and adapted from sys/types.h: +// +// Minor gives a cookie instead of an index since in order to avoid changing the +// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for +// devices that don't use them. + +package unix + +// Major returns the major component of a FreeBSD device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 8) & 0xff) +} + +// Minor returns the minor component of a FreeBSD device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0xffff00ff) +} + +// Mkdev returns a FreeBSD device number generated from the given major and +// minor components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 8) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go new file mode 100644 index 0000000..d165d6f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_linux.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used by the Linux kernel and glibc. +// +// The information below is extracted and adapted from bits/sysmacros.h in the +// glibc sources: +// +// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's +// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major +// number and m is a hex digit of the minor number. This is backward compatible +// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also +// backward compatible with the Linux kernel, which for some architectures uses +// 32-bit dev_t, encoded as mmmM MMmm. + +package unix + +// Major returns the major component of a Linux device number. +func Major(dev uint64) uint32 { + major := uint32((dev & 0x00000000000fff00) >> 8) + major |= uint32((dev & 0xfffff00000000000) >> 32) + return major +} + +// Minor returns the minor component of a Linux device number. +func Minor(dev uint64) uint32 { + minor := uint32((dev & 0x00000000000000ff) >> 0) + minor |= uint32((dev & 0x00000ffffff00000) >> 12) + return minor +} + +// Mkdev returns a Linux device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + dev := (uint64(major) & 0x00000fff) << 8 + dev |= (uint64(major) & 0xfffff000) << 32 + dev |= (uint64(minor) & 0x000000ff) << 0 + dev |= (uint64(minor) & 0xffffff00) << 12 + return dev +} diff --git a/vendor/golang.org/x/sys/unix/dev_linux_test.go b/vendor/golang.org/x/sys/unix/dev_linux_test.go new file mode 100644 index 0000000..2fd3ead --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_linux_test.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func TestDevices(t *testing.T) { + testCases := []struct { + path string + major uint32 + minor uint32 + }{ + // well known major/minor numbers according to + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/admin-guide/devices.txt + {"/dev/null", 1, 3}, + {"/dev/zero", 1, 5}, + {"/dev/random", 1, 8}, + {"/dev/full", 1, 7}, + {"/dev/urandom", 1, 9}, + {"/dev/tty", 5, 0}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { + var stat unix.Stat_t + err := unix.Stat(tc.path, &stat) + if err != nil { + t.Errorf("failed to stat device: %v", err) + return + } + + dev := uint64(stat.Rdev) + if unix.Major(dev) != tc.major { + t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) + } + if unix.Minor(dev) != tc.minor { + t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) + } + if unix.Mkdev(tc.major, tc.minor) != dev { + t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) + } + }) + + } +} diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go new file mode 100644 index 0000000..b4a203d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in NetBSD's sys/types.h header. + +package unix + +// Major returns the major component of a NetBSD device number. +func Major(dev uint64) uint32 { + return uint32((dev & 0x000fff00) >> 8) +} + +// Minor returns the minor component of a NetBSD device number. +func Minor(dev uint64) uint32 { + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xfff00000) >> 12) + return minor +} + +// Mkdev returns a NetBSD device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + dev := (uint64(major) << 8) & 0x000fff00 + dev |= (uint64(minor) << 12) & 0xfff00000 + dev |= (uint64(minor) << 0) & 0x000000ff + return dev +} diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd_test.go b/vendor/golang.org/x/sys/unix/dev_netbsd_test.go new file mode 100644 index 0000000..441058a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_netbsd_test.go @@ -0,0 +1,50 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func TestDevices(t *testing.T) { + testCases := []struct { + path string + major uint32 + minor uint32 + }{ + // well known major/minor numbers according to /dev/MAKEDEV on + // NetBSD 8.0 + {"/dev/null", 2, 2}, + {"/dev/zero", 2, 12}, + {"/dev/random", 46, 0}, + {"/dev/urandom", 46, 1}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { + var stat unix.Stat_t + err := unix.Stat(tc.path, &stat) + if err != nil { + t.Errorf("failed to stat device: %v", err) + return + } + + dev := uint64(stat.Rdev) + if unix.Major(dev) != tc.major { + t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) + } + if unix.Minor(dev) != tc.minor { + t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) + } + if unix.Mkdev(tc.major, tc.minor) != dev { + t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) + } + }) + } +} diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go new file mode 100644 index 0000000..f3430c4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in OpenBSD's sys/types.h header. + +package unix + +// Major returns the major component of an OpenBSD device number. +func Major(dev uint64) uint32 { + return uint32((dev & 0x0000ff00) >> 8) +} + +// Minor returns the minor component of an OpenBSD device number. +func Minor(dev uint64) uint32 { + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xffff0000) >> 8) + return minor +} + +// Mkdev returns an OpenBSD device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + dev := (uint64(major) << 8) & 0x0000ff00 + dev |= (uint64(minor) << 8) & 0xffff0000 + dev |= (uint64(minor) << 0) & 0x000000ff + return dev +} diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd_test.go b/vendor/golang.org/x/sys/unix/dev_openbsd_test.go new file mode 100644 index 0000000..e6cb64f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_openbsd_test.go @@ -0,0 +1,54 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func TestDevices(t *testing.T) { + testCases := []struct { + path string + major uint32 + minor uint32 + }{ + // well known major/minor numbers according to /dev/MAKEDEV on + // OpenBSD 6.0 + {"/dev/null", 2, 2}, + {"/dev/zero", 2, 12}, + {"/dev/ttyp0", 5, 0}, + {"/dev/ttyp1", 5, 1}, + {"/dev/random", 45, 0}, + {"/dev/srandom", 45, 1}, + {"/dev/urandom", 45, 2}, + {"/dev/arandom", 45, 3}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { + var stat unix.Stat_t + err := unix.Stat(tc.path, &stat) + if err != nil { + t.Errorf("failed to stat device: %v", err) + return + } + + dev := uint64(stat.Rdev) + if unix.Major(dev) != tc.major { + t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) + } + if unix.Minor(dev) != tc.minor { + t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) + } + if unix.Mkdev(tc.major, tc.minor) != dev { + t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) + } + }) + } +} diff --git a/vendor/golang.org/x/sys/unix/dev_solaris_test.go b/vendor/golang.org/x/sys/unix/dev_solaris_test.go new file mode 100644 index 0000000..656508c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_solaris_test.go @@ -0,0 +1,51 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func TestDevices(t *testing.T) { + testCases := []struct { + path string + major uint32 + minor uint32 + }{ + // Well-known major/minor numbers on OpenSolaris according to + // /etc/name_to_major + {"/dev/zero", 134, 12}, + {"/dev/null", 134, 2}, + {"/dev/ptyp0", 172, 0}, + {"/dev/ttyp0", 175, 0}, + {"/dev/ttyp1", 175, 1}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { + var stat unix.Stat_t + err := unix.Stat(tc.path, &stat) + if err != nil { + t.Errorf("failed to stat device: %v", err) + return + } + + dev := uint64(stat.Rdev) + if unix.Major(dev) != tc.major { + t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) + } + if unix.Minor(dev) != tc.minor { + t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) + } + if unix.Mkdev(tc.major, tc.minor) != dev { + t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) + } + }) + } +} diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go new file mode 100644 index 0000000..95fd353 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -0,0 +1,17 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris + +package unix + +import "syscall" + +// ParseDirent parses up to max directory entries in buf, +// appending the names to names. It returns the number of +// bytes consumed from buf, the number of entries added +// to names, and the new names slice. +func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { + return syscall.ParseDirent(buf, max, names) +} diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go new file mode 100644 index 0000000..5e92690 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// +build ppc64 s390x mips mips64 + +package unix + +const isBigEndian = true diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go new file mode 100644 index 0000000..085df2d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le + +package unix + +const isBigEndian = false diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go new file mode 100644 index 0000000..706b3cd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -0,0 +1,31 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +// Unix environment variables. + +package unix + +import "syscall" + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go new file mode 100644 index 0000000..c56bc8b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go @@ -0,0 +1,227 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep +// them here for backwards compatibility. + +package unix + +const ( + IFF_SMART = 0x20 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BSC = 0x53 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IPPROTO_MAXID = 0x34 + IPV6_FAITH = 0x1d + IP_FAITH = 0x16 + MAP_NORESERVE = 0x40 + MAP_RENAME = 0x20 + NET_RT_MAXID = 0x6 + RTF_PRCLONING = 0x10000 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + SIOCADDRT = 0x8030720a + SIOCALIFADDR = 0x8118691b + SIOCDELRT = 0x8030720b + SIOCDLIFADDR = 0x8118691d + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCSLIFPHYADDR = 0x8118694a +) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go new file mode 100644 index 0000000..3e97711 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go @@ -0,0 +1,227 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep +// them here for backwards compatibility. + +package unix + +const ( + IFF_SMART = 0x20 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BSC = 0x53 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IPPROTO_MAXID = 0x34 + IPV6_FAITH = 0x1d + IP_FAITH = 0x16 + MAP_NORESERVE = 0x40 + MAP_RENAME = 0x20 + NET_RT_MAXID = 0x6 + RTF_PRCLONING = 0x10000 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + SIOCADDRT = 0x8040720a + SIOCALIFADDR = 0x8118691b + SIOCDELRT = 0x8040720b + SIOCDLIFADDR = 0x8118691d + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCSLIFPHYADDR = 0x8118694a +) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go new file mode 100644 index 0000000..856dca3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go @@ -0,0 +1,226 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +const ( + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BSC = 0x53 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + + // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go + IFF_SMART = 0x20 + IFT_FAITH = 0xf2 + IFT_IPXIP = 0xf9 + IPPROTO_MAXID = 0x34 + IPV6_FAITH = 0x1d + IP_FAITH = 0x16 + MAP_NORESERVE = 0x40 + MAP_RENAME = 0x20 + NET_RT_MAXID = 0x6 + RTF_PRCLONING = 0x10000 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + SIOCADDRT = 0x8030720a + SIOCALIFADDR = 0x8118691b + SIOCDELRT = 0x8030720b + SIOCDLIFADDR = 0x8118691d + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCSLIFPHYADDR = 0x8118694a +) diff --git a/vendor/golang.org/x/sys/unix/export_test.go b/vendor/golang.org/x/sys/unix/export_test.go new file mode 100644 index 0000000..e802469 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/export_test.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +var Itoa = itoa diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/flock.go new file mode 100644 index 0000000..2994ce7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/flock.go @@ -0,0 +1,22 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package unix + +import "unsafe" + +// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux +// systems by flock_linux_32bit.go to be SYS_FCNTL64. +var fcntl64Syscall uintptr = SYS_FCNTL + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) + if errno == 0 { + return nil + } + return errno +} diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go new file mode 100644 index 0000000..fc0e50e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go @@ -0,0 +1,13 @@ +// +build linux,386 linux,arm linux,mips linux,mipsle + +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +func init() { + // On 32-bit Linux systems, the fcntl syscall that matches Go's + // Flock_t type is SYS_FCNTL64, not SYS_FCNTL. + fcntl64Syscall = SYS_FCNTL64 +} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go new file mode 100644 index 0000000..50062e3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package unix + +import "syscall" + +// We can't use the gc-syntax .s files for gccgo. On the plus side +// much of the functionality can be written directly in Go. + +//extern gccgoRealSyscallNoError +func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr) + +//extern gccgoRealSyscall +func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) + +func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { + syscall.Entersyscall() + r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + syscall.Exitsyscall() + return r, 0 +} + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + syscall.Entersyscall() + r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + syscall.Exitsyscall() + return r, 0, syscall.Errno(errno) +} + +func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) { + syscall.Entersyscall() + r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0) + syscall.Exitsyscall() + return r, 0, syscall.Errno(errno) +} + +func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) { + syscall.Entersyscall() + r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9) + syscall.Exitsyscall() + return r, 0, syscall.Errno(errno) +} + +func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { + r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + return r, 0 +} + +func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + return r, 0, syscall.Errno(errno) +} + +func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) { + r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0) + return r, 0, syscall.Errno(errno) +} diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c new file mode 100644 index 0000000..24e96b1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -0,0 +1,47 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +#include +#include +#include + +#define _STRINGIFY2_(x) #x +#define _STRINGIFY_(x) _STRINGIFY2_(x) +#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__) + +// Call syscall from C code because the gccgo support for calling from +// Go to C does not support varargs functions. + +struct ret { + uintptr_t r; + uintptr_t err; +}; + +struct ret +gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) +{ + struct ret r; + + errno = 0; + r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9); + r.err = errno; + return r; +} + +uintptr_t +gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) +{ + return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9); +} + +// Define the use function in C so that it is not inlined. + +extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline)); + +void +use(void *p __attribute__ ((unused))) +{ +} diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go new file mode 100644 index 0000000..251a977 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -0,0 +1,20 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo,linux,amd64 + +package unix + +import "syscall" + +//extern gettimeofday +func realGettimeofday(*Timeval, *byte) int32 + +func gettimeofday(tv *Timeval) (err syscall.Errno) { + r := realGettimeofday(tv, nil) + if r < 0 { + return syscall.GetErrno() + } + return 0 +} diff --git a/vendor/golang.org/x/sys/unix/linux/Dockerfile b/vendor/golang.org/x/sys/unix/linux/Dockerfile new file mode 100644 index 0000000..c3b29ea --- /dev/null +++ b/vendor/golang.org/x/sys/unix/linux/Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:17.10 + +# Dependencies to get the git sources and go binaries +RUN apt-get update && apt-get install -y \ + curl \ + git \ + && rm -rf /var/lib/apt/lists/* + +# Get the git sources. If not cached, this takes O(5 minutes). +WORKDIR /git +RUN git config --global advice.detachedHead false +# Linux Kernel: Released 28 Jan 2018 +RUN git clone --branch v4.15 --depth 1 https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux +# GNU C library: Released 01 Feb 2018 (we should try to get a secure way to clone this) +RUN git clone --branch glibc-2.27 --depth 1 git://sourceware.org/git/glibc.git + +# Get Go 1.10 +ENV GOLANG_VERSION 1.10 +ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz +ENV GOLANG_DOWNLOAD_SHA256 b5a64335f1490277b585832d1f6c7f8c6c11206cba5cd3f771dcb87b98ad1a33 + +RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \ + && echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \ + && tar -C /usr/local -xzf golang.tar.gz \ + && rm golang.tar.gz + +ENV PATH /usr/local/go/bin:$PATH + +# Linux and Glibc build dependencies +RUN apt-get update && apt-get install -y \ + bison gawk make python \ + gcc gcc-multilib \ + gettext texinfo \ + && rm -rf /var/lib/apt/lists/* +# Emulator and cross compilers +RUN apt-get update && apt-get install -y \ + qemu \ + gcc-aarch64-linux-gnu gcc-arm-linux-gnueabi \ + gcc-mips-linux-gnu gcc-mips64-linux-gnuabi64 \ + gcc-mips64el-linux-gnuabi64 gcc-mipsel-linux-gnu \ + gcc-powerpc64-linux-gnu gcc-powerpc64le-linux-gnu \ + gcc-s390x-linux-gnu gcc-sparc64-linux-gnu \ + && rm -rf /var/lib/apt/lists/* + +# Let the scripts know they are in the docker environment +ENV GOLANG_SYS_BUILD docker +WORKDIR /build +ENTRYPOINT ["go", "run", "linux/mkall.go", "/git/linux", "/git/glibc"] diff --git a/vendor/golang.org/x/sys/unix/linux/mkall.go b/vendor/golang.org/x/sys/unix/linux/mkall.go new file mode 100644 index 0000000..89b2fe8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/linux/mkall.go @@ -0,0 +1,482 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// linux/mkall.go - Generates all Linux zsysnum, zsyscall, zerror, and ztype +// files for all 11 linux architectures supported by the go compiler. See +// README.md for more information about the build system. + +// To run it you must have a git checkout of the Linux kernel and glibc. Once +// the appropriate sources are ready, the program is run as: +// go run linux/mkall.go + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +// These will be paths to the appropriate source directories. +var LinuxDir string +var GlibcDir string + +const TempDir = "/tmp" +const IncludeDir = TempDir + "/include" // To hold our C headers +const BuildDir = TempDir + "/build" // To hold intermediate build files + +const GOOS = "linux" // Only for Linux targets +const BuildArch = "amd64" // Must be built on this architecture +const MinKernel = "2.6.23" // https://golang.org/doc/install#requirements + +type target struct { + GoArch string // Architecture name according to Go + LinuxArch string // Architecture name according to the Linux Kernel + GNUArch string // Architecture name according to GNU tools (https://wiki.debian.org/Multiarch/Tuples) + BigEndian bool // Default Little Endian + SignedChar bool // Is -fsigned-char needed (default no) + Bits int +} + +// List of the 11 Linux targets supported by the go compiler. sparc64 is not +// currently supported, though a port is in progress. +var targets = []target{ + { + GoArch: "386", + LinuxArch: "x86", + GNUArch: "i686-linux-gnu", // Note "i686" not "i386" + Bits: 32, + }, + { + GoArch: "amd64", + LinuxArch: "x86", + GNUArch: "x86_64-linux-gnu", + Bits: 64, + }, + { + GoArch: "arm64", + LinuxArch: "arm64", + GNUArch: "aarch64-linux-gnu", + SignedChar: true, + Bits: 64, + }, + { + GoArch: "arm", + LinuxArch: "arm", + GNUArch: "arm-linux-gnueabi", + Bits: 32, + }, + { + GoArch: "mips", + LinuxArch: "mips", + GNUArch: "mips-linux-gnu", + BigEndian: true, + Bits: 32, + }, + { + GoArch: "mipsle", + LinuxArch: "mips", + GNUArch: "mipsel-linux-gnu", + Bits: 32, + }, + { + GoArch: "mips64", + LinuxArch: "mips", + GNUArch: "mips64-linux-gnuabi64", + BigEndian: true, + Bits: 64, + }, + { + GoArch: "mips64le", + LinuxArch: "mips", + GNUArch: "mips64el-linux-gnuabi64", + Bits: 64, + }, + { + GoArch: "ppc64", + LinuxArch: "powerpc", + GNUArch: "powerpc64-linux-gnu", + BigEndian: true, + Bits: 64, + }, + { + GoArch: "ppc64le", + LinuxArch: "powerpc", + GNUArch: "powerpc64le-linux-gnu", + Bits: 64, + }, + { + GoArch: "s390x", + LinuxArch: "s390", + GNUArch: "s390x-linux-gnu", + BigEndian: true, + SignedChar: true, + Bits: 64, + }, + // { + // GoArch: "sparc64", + // LinuxArch: "sparc", + // GNUArch: "sparc64-linux-gnu", + // BigEndian: true, + // Bits: 64, + // }, +} + +// ptracePairs is a list of pairs of targets that can, in some cases, +// run each other's binaries. +var ptracePairs = []struct{ a1, a2 string }{ + {"386", "amd64"}, + {"arm", "arm64"}, + {"mips", "mips64"}, + {"mipsle", "mips64le"}, +} + +func main() { + if runtime.GOOS != GOOS || runtime.GOARCH != BuildArch { + fmt.Printf("Build system has GOOS_GOARCH = %s_%s, need %s_%s\n", + runtime.GOOS, runtime.GOARCH, GOOS, BuildArch) + return + } + + // Check that we are using the new build system if we should + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + fmt.Println("In the new build system, mkall.go should not be called directly.") + fmt.Println("See README.md") + return + } + + // Parse the command line options + if len(os.Args) != 3 { + fmt.Println("USAGE: go run linux/mkall.go ") + return + } + LinuxDir = os.Args[1] + GlibcDir = os.Args[2] + + for _, t := range targets { + fmt.Printf("----- GENERATING: %s -----\n", t.GoArch) + if err := t.generateFiles(); err != nil { + fmt.Printf("%v\n***** FAILURE: %s *****\n\n", err, t.GoArch) + } else { + fmt.Printf("----- SUCCESS: %s -----\n\n", t.GoArch) + } + } + + fmt.Printf("----- GENERATING ptrace pairs -----\n") + ok := true + for _, p := range ptracePairs { + if err := generatePtracePair(p.a1, p.a2); err != nil { + fmt.Printf("%v\n***** FAILURE: %s/%s *****\n\n", err, p.a1, p.a2) + ok = false + } + } + if ok { + fmt.Printf("----- SUCCESS ptrace pairs -----\n\n") + } +} + +// Makes an exec.Cmd with Stderr attached to os.Stderr +func makeCommand(name string, args ...string) *exec.Cmd { + cmd := exec.Command(name, args...) + cmd.Stderr = os.Stderr + return cmd +} + +// Runs the command, pipes output to a formatter, pipes that to an output file. +func (t *target) commandFormatOutput(formatter string, outputFile string, + name string, args ...string) (err error) { + mainCmd := makeCommand(name, args...) + + fmtCmd := makeCommand(formatter) + if formatter == "mkpost" { + fmtCmd = makeCommand("go", "run", "mkpost.go") + // Set GOARCH_TARGET so mkpost knows what GOARCH is.. + fmtCmd.Env = append(os.Environ(), "GOARCH_TARGET="+t.GoArch) + // Set GOARCH to host arch for mkpost, so it can run natively. + for i, s := range fmtCmd.Env { + if strings.HasPrefix(s, "GOARCH=") { + fmtCmd.Env[i] = "GOARCH=" + BuildArch + } + } + } + + // mainCmd | fmtCmd > outputFile + if fmtCmd.Stdin, err = mainCmd.StdoutPipe(); err != nil { + return + } + if fmtCmd.Stdout, err = os.Create(outputFile); err != nil { + return + } + + // Make sure the formatter eventually closes + if err = fmtCmd.Start(); err != nil { + return + } + defer func() { + fmtErr := fmtCmd.Wait() + if err == nil { + err = fmtErr + } + }() + + return mainCmd.Run() +} + +// Generates all the files for a Linux target +func (t *target) generateFiles() error { + // Setup environment variables + os.Setenv("GOOS", GOOS) + os.Setenv("GOARCH", t.GoArch) + + // Get appropriate compiler and emulator (unless on x86) + if t.LinuxArch != "x86" { + // Check/Setup cross compiler + compiler := t.GNUArch + "-gcc" + if _, err := exec.LookPath(compiler); err != nil { + return err + } + os.Setenv("CC", compiler) + + // Check/Setup emulator (usually first component of GNUArch) + qemuArchName := t.GNUArch[:strings.Index(t.GNUArch, "-")] + if t.LinuxArch == "powerpc" { + qemuArchName = t.GoArch + } + os.Setenv("GORUN", "qemu-"+qemuArchName) + } else { + os.Setenv("CC", "gcc") + } + + // Make the include directory and fill it with headers + if err := os.MkdirAll(IncludeDir, os.ModePerm); err != nil { + return err + } + defer os.RemoveAll(IncludeDir) + if err := t.makeHeaders(); err != nil { + return fmt.Errorf("could not make header files: %v", err) + } + fmt.Println("header files generated") + + // Make each of the four files + if err := t.makeZSysnumFile(); err != nil { + return fmt.Errorf("could not make zsysnum file: %v", err) + } + fmt.Println("zsysnum file generated") + + if err := t.makeZSyscallFile(); err != nil { + return fmt.Errorf("could not make zsyscall file: %v", err) + } + fmt.Println("zsyscall file generated") + + if err := t.makeZTypesFile(); err != nil { + return fmt.Errorf("could not make ztypes file: %v", err) + } + fmt.Println("ztypes file generated") + + if err := t.makeZErrorsFile(); err != nil { + return fmt.Errorf("could not make zerrors file: %v", err) + } + fmt.Println("zerrors file generated") + + return nil +} + +// Create the Linux and glibc headers in the include directory. +func (t *target) makeHeaders() error { + // Make the Linux headers we need for this architecture + linuxMake := makeCommand("make", "headers_install", "ARCH="+t.LinuxArch, "INSTALL_HDR_PATH="+TempDir) + linuxMake.Dir = LinuxDir + if err := linuxMake.Run(); err != nil { + return err + } + + // A Temporary build directory for glibc + if err := os.MkdirAll(BuildDir, os.ModePerm); err != nil { + return err + } + defer os.RemoveAll(BuildDir) + + // Make the glibc headers we need for this architecture + confScript := filepath.Join(GlibcDir, "configure") + glibcConf := makeCommand(confScript, "--prefix="+TempDir, "--host="+t.GNUArch, "--enable-kernel="+MinKernel) + glibcConf.Dir = BuildDir + if err := glibcConf.Run(); err != nil { + return err + } + glibcMake := makeCommand("make", "install-headers") + glibcMake.Dir = BuildDir + if err := glibcMake.Run(); err != nil { + return err + } + // We only need an empty stubs file + stubsFile := filepath.Join(IncludeDir, "gnu/stubs.h") + if file, err := os.Create(stubsFile); err != nil { + return err + } else { + file.Close() + } + + return nil +} + +// makes the zsysnum_linux_$GOARCH.go file +func (t *target) makeZSysnumFile() error { + zsysnumFile := fmt.Sprintf("zsysnum_linux_%s.go", t.GoArch) + unistdFile := filepath.Join(IncludeDir, "asm/unistd.h") + + args := append(t.cFlags(), unistdFile) + return t.commandFormatOutput("gofmt", zsysnumFile, "linux/mksysnum.pl", args...) +} + +// makes the zsyscall_linux_$GOARCH.go file +func (t *target) makeZSyscallFile() error { + zsyscallFile := fmt.Sprintf("zsyscall_linux_%s.go", t.GoArch) + // Find the correct architecture syscall file (might end with x.go) + archSyscallFile := fmt.Sprintf("syscall_linux_%s.go", t.GoArch) + if _, err := os.Stat(archSyscallFile); os.IsNotExist(err) { + shortArch := strings.TrimSuffix(t.GoArch, "le") + archSyscallFile = fmt.Sprintf("syscall_linux_%sx.go", shortArch) + } + + args := append(t.mksyscallFlags(), "-tags", "linux,"+t.GoArch, + "syscall_linux.go", archSyscallFile) + return t.commandFormatOutput("gofmt", zsyscallFile, "./mksyscall.pl", args...) +} + +// makes the zerrors_linux_$GOARCH.go file +func (t *target) makeZErrorsFile() error { + zerrorsFile := fmt.Sprintf("zerrors_linux_%s.go", t.GoArch) + + return t.commandFormatOutput("gofmt", zerrorsFile, "./mkerrors.sh", t.cFlags()...) +} + +// makes the ztypes_linux_$GOARCH.go file +func (t *target) makeZTypesFile() error { + ztypesFile := fmt.Sprintf("ztypes_linux_%s.go", t.GoArch) + + args := []string{"tool", "cgo", "-godefs", "--"} + args = append(args, t.cFlags()...) + args = append(args, "linux/types.go") + return t.commandFormatOutput("mkpost", ztypesFile, "go", args...) +} + +// Flags that should be given to gcc and cgo for this target +func (t *target) cFlags() []string { + // Compile statically to avoid cross-architecture dynamic linking. + flags := []string{"-Wall", "-Werror", "-static", "-I" + IncludeDir} + + // Architecture-specific flags + if t.SignedChar { + flags = append(flags, "-fsigned-char") + } + if t.LinuxArch == "x86" { + flags = append(flags, fmt.Sprintf("-m%d", t.Bits)) + } + + return flags +} + +// Flags that should be given to mksyscall for this target +func (t *target) mksyscallFlags() (flags []string) { + if t.Bits == 32 { + if t.BigEndian { + flags = append(flags, "-b32") + } else { + flags = append(flags, "-l32") + } + } + + // This flag menas a 64-bit value should use (even, odd)-pair. + if t.GoArch == "arm" || (t.LinuxArch == "mips" && t.Bits == 32) { + flags = append(flags, "-arm") + } + return +} + +// generatePtracePair takes a pair of GOARCH values that can run each +// other's binaries, such as 386 and amd64. It extracts the PtraceRegs +// type for each one. It writes a new file defining the types +// PtraceRegsArch1 and PtraceRegsArch2 and the corresponding functions +// Ptrace{Get,Set}Regs{arch1,arch2}. This permits debugging the other +// binary on a native system. +func generatePtracePair(arch1, arch2 string) error { + def1, err := ptraceDef(arch1) + if err != nil { + return err + } + def2, err := ptraceDef(arch2) + if err != nil { + return err + } + f, err := os.Create(fmt.Sprintf("zptrace%s_linux.go", arch1)) + if err != nil { + return err + } + buf := bufio.NewWriter(f) + fmt.Fprintf(buf, "// Code generated by linux/mkall.go generatePtracePair(%s, %s). DO NOT EDIT.\n", arch1, arch2) + fmt.Fprintf(buf, "\n") + fmt.Fprintf(buf, "// +build linux\n") + fmt.Fprintf(buf, "// +build %s %s\n", arch1, arch2) + fmt.Fprintf(buf, "\n") + fmt.Fprintf(buf, "package unix\n") + fmt.Fprintf(buf, "\n") + fmt.Fprintf(buf, "%s\n", `import "unsafe"`) + fmt.Fprintf(buf, "\n") + writeOnePtrace(buf, arch1, def1) + fmt.Fprintf(buf, "\n") + writeOnePtrace(buf, arch2, def2) + if err := buf.Flush(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + return nil +} + +// ptraceDef returns the definition of PtraceRegs for arch. +func ptraceDef(arch string) (string, error) { + filename := fmt.Sprintf("ztypes_linux_%s.go", arch) + data, err := ioutil.ReadFile(filename) + if err != nil { + return "", fmt.Errorf("reading %s: %v", filename, err) + } + start := bytes.Index(data, []byte("type PtraceRegs struct")) + if start < 0 { + return "", fmt.Errorf("%s: no definition of PtraceRegs", filename) + } + data = data[start:] + end := bytes.Index(data, []byte("\n}\n")) + if end < 0 { + return "", fmt.Errorf("%s: can't find end of PtraceRegs definition", filename) + } + return string(data[:end+2]), nil +} + +// writeOnePtrace writes out the ptrace definitions for arch. +func writeOnePtrace(w io.Writer, arch, def string) { + uarch := string(unicode.ToUpper(rune(arch[0]))) + arch[1:] + fmt.Fprintf(w, "// PtraceRegs%s is the registers used by %s binaries.\n", uarch, arch) + fmt.Fprintf(w, "%s\n", strings.Replace(def, "PtraceRegs", "PtraceRegs"+uarch, 1)) + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "// PtraceGetRegs%s fetches the registers used by %s binaries.\n", uarch, arch) + fmt.Fprintf(w, "func PtraceGetRegs%s(pid int, regsout *PtraceRegs%s) error {\n", uarch, uarch) + fmt.Fprintf(w, "\treturn ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))\n") + fmt.Fprintf(w, "}\n") + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "// PtraceSetRegs%s sets the registers used by %s binaries.\n", uarch, arch) + fmt.Fprintf(w, "func PtraceSetRegs%s(pid int, regs *PtraceRegs%s) error {\n", uarch, uarch) + fmt.Fprintf(w, "\treturn ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))\n") + fmt.Fprintf(w, "}\n") +} diff --git a/vendor/golang.org/x/sys/unix/linux/mksysnum.pl b/vendor/golang.org/x/sys/unix/linux/mksysnum.pl new file mode 100755 index 0000000..63fd800 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/linux/mksysnum.pl @@ -0,0 +1,85 @@ +#!/usr/bin/env perl +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +use strict; + +if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") { + print STDERR "GOARCH or GOOS not defined in environment\n"; + exit 1; +} + +# Check that we are using the new build system if we should +if($ENV{'GOLANG_SYS_BUILD'} ne "docker") { + print STDERR "In the new build system, mksysnum should not be called directly.\n"; + print STDERR "See README.md\n"; + exit 1; +} + +my $command = "$0 ". join(' ', @ARGV); + +print < 999){ + # ignore deprecated syscalls that are no longer implemented + # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716 + return; + } + $name =~ y/a-z/A-Z/; + $num = $num + $offset; + print " SYS_$name = $num;\n"; +} + +my $prev; +open(CC, "$ENV{'CC'} -E -dD @ARGV |") || die "can't run $ENV{'CC'}"; +while(){ + if(/^#define __NR_Linux\s+([0-9]+)/){ + # mips/mips64: extract offset + $offset = $1; + } + elsif(/^#define __NR(\w*)_SYSCALL_BASE\s+([0-9]+)/){ + # arm: extract offset + $offset = $1; + } + elsif(/^#define __NR_syscalls\s+/) { + # ignore redefinitions of __NR_syscalls + } + elsif(/^#define __NR_(\w*)Linux_syscalls\s+/) { + # mips/mips64: ignore definitions about the number of syscalls + } + elsif(/^#define __NR_(\w+)\s+([0-9]+)/){ + $prev = $2; + fmt($1, $2); + } + elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){ + $prev = $2; + fmt($1, $2); + } + elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){ + fmt($1, $prev+$2) + } + elsif(/^#define __NR_(\w+)\s+\(__NR_Linux \+ ([0-9]+)/){ + fmt($1, $2); + } + elsif(/^#define __NR_(\w+)\s+\(__NR_SYSCALL_BASE \+ ([0-9]+)/){ + fmt($1, $2); + } +} + +print < +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// On mips64, the glibc stat and kernel stat do not agree +#if (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64) + +// Use the stat defined by the kernel with a few modifications. These are: +// * The time fields (like st_atime and st_atimensec) use the timespec +// struct (like st_atim) for consitancy with the glibc fields. +// * The padding fields get different names to not break compatibility. +// * st_blocks is signed, again for compatibility. +struct stat { + unsigned int st_dev; + unsigned int st_pad1[3]; // Reserved for st_dev expansion + + unsigned long st_ino; + + mode_t st_mode; + __u32 st_nlink; + + uid_t st_uid; + gid_t st_gid; + + unsigned int st_rdev; + unsigned int st_pad2[3]; // Reserved for st_rdev expansion + + off_t st_size; + + // These are declared as speperate fields in the kernel. Here we use + // the timespec struct for consistancy with the other stat structs. + struct timespec st_atim; + struct timespec st_mtim; + struct timespec st_ctim; + + unsigned int st_blksize; + unsigned int st_pad4; + + long st_blocks; +}; + +// These are needed because we do not include fcntl.h or sys/types.h +#include +#include + +#else + +// Use the stat defined by glibc +#include +#include + +#endif + +// These are defined in linux/fcntl.h, but including it globally causes +// conflicts with fcntl.h +#ifndef AT_STATX_SYNC_TYPE +# define AT_STATX_SYNC_TYPE 0x6000 // Type of synchronisation required from statx() +#endif +#ifndef AT_STATX_SYNC_AS_STAT +# define AT_STATX_SYNC_AS_STAT 0x0000 // - Do whatever stat() does +#endif +#ifndef AT_STATX_FORCE_SYNC +# define AT_STATX_FORCE_SYNC 0x2000 // - Force the attributes to be sync'd with the server +#endif +#ifndef AT_STATX_DONT_SYNC +# define AT_STATX_DONT_SYNC 0x4000 // - Don't sync attributes with the server +#endif + +#ifdef TCSETS2 +// On systems that have "struct termios2" use this as type Termios. +typedef struct termios2 termios_t; +#else +typedef struct termios termios_t; +#endif + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_ll s5; + struct sockaddr_nl s6; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +// copied from /usr/include/bluetooth/hci.h +struct sockaddr_hci { + sa_family_t hci_family; + unsigned short hci_dev; + unsigned short hci_channel; +}; + +// copied from /usr/include/bluetooth/bluetooth.h +#define BDADDR_BREDR 0x00 +#define BDADDR_LE_PUBLIC 0x01 +#define BDADDR_LE_RANDOM 0x02 + +// copied from /usr/include/bluetooth/l2cap.h +struct sockaddr_l2 { + sa_family_t l2_family; + unsigned short l2_psm; + uint8_t l2_bdaddr[6]; + unsigned short l2_cid; + uint8_t l2_bdaddr_type; +}; + +// copied from /usr/include/linux/un.h +struct my_sockaddr_un { + sa_family_t sun_family; +#if defined(__ARM_EABI__) || defined(__powerpc64__) + // on ARM char is by default unsigned + signed char sun_path[108]; +#else + char sun_path[108]; +#endif +}; + +#ifdef __ARM_EABI__ +typedef struct user_regs PtraceRegs; +#elif defined(__aarch64__) +typedef struct user_pt_regs PtraceRegs; +#elif defined(__mips__) || defined(__powerpc64__) +typedef struct pt_regs PtraceRegs; +#elif defined(__s390x__) +typedef struct _user_regs_struct PtraceRegs; +#elif defined(__sparc__) +#include +typedef struct pt_regs PtraceRegs; +#else +typedef struct user_regs_struct PtraceRegs; +#endif + +#if defined(__s390x__) +typedef struct _user_psw_struct ptracePsw; +typedef struct _user_fpregs_struct ptraceFpregs; +typedef struct _user_per_struct ptracePer; +#else +typedef struct {} ptracePsw; +typedef struct {} ptraceFpregs; +typedef struct {} ptracePer; +#endif + +// The real epoll_event is a union, and godefs doesn't handle it well. +struct my_epoll_event { + uint32_t events; +#if defined(__ARM_EABI__) || defined(__aarch64__) || (defined(__mips__) && _MIPS_SIM == _ABIO32) + // padding is not specified in linux/eventpoll.h but added to conform to the + // alignment requirements of EABI + int32_t padFd; +#elif defined(__powerpc64__) || defined(__s390x__) || defined(__sparc__) + int32_t _padFd; +#endif + int32_t fd; + int32_t pad; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timex C.struct_timex + +type Time_t C.time_t + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type StatxTimestamp C.struct_statx_timestamp + +type Statx_t C.struct_statx + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +type Flock_t C.struct_flock + +// Filesystem Encryption + +type FscryptPolicy C.struct_fscrypt_policy + +type FscryptKey C.struct_fscrypt_key + +// Structure for Keyctl + +type KeyctlDHParams C.struct_keyctl_dh_params + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_my_sockaddr_un + +type RawSockaddrLinklayer C.struct_sockaddr_ll + +type RawSockaddrNetlink C.struct_sockaddr_nl + +type RawSockaddrHCI C.struct_sockaddr_hci + +type RawSockaddrL2 C.struct_sockaddr_l2 + +type RawSockaddrCAN C.struct_sockaddr_can + +type RawSockaddrALG C.struct_sockaddr_alg + +type RawSockaddrVM C.struct_sockaddr_vm + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPMreqn C.struct_ip_mreqn + +type IPv6Mreq C.struct_ipv6_mreq + +type PacketMreq C.struct_packet_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet4Pktinfo C.struct_in_pktinfo + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +type Ucred C.struct_ucred + +type TCPInfo C.struct_tcp_info + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll + SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl + SizeofSockaddrHCI = C.sizeof_struct_sockaddr_hci + SizeofSockaddrL2 = C.sizeof_struct_sockaddr_l2 + SizeofSockaddrCAN = C.sizeof_struct_sockaddr_can + SizeofSockaddrALG = C.sizeof_struct_sockaddr_alg + SizeofSockaddrVM = C.sizeof_struct_sockaddr_vm + SizeofLinger = C.sizeof_struct_linger + SizeofIovec = C.sizeof_struct_iovec + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPMreqn = C.sizeof_struct_ip_mreqn + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofPacketMreq = C.sizeof_struct_packet_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + SizeofUcred = C.sizeof_struct_ucred + SizeofTCPInfo = C.sizeof_struct_tcp_info +) + +// Netlink routing and interface messages + +const ( + IFA_UNSPEC = C.IFA_UNSPEC + IFA_ADDRESS = C.IFA_ADDRESS + IFA_LOCAL = C.IFA_LOCAL + IFA_LABEL = C.IFA_LABEL + IFA_BROADCAST = C.IFA_BROADCAST + IFA_ANYCAST = C.IFA_ANYCAST + IFA_CACHEINFO = C.IFA_CACHEINFO + IFA_MULTICAST = C.IFA_MULTICAST + IFLA_UNSPEC = C.IFLA_UNSPEC + IFLA_ADDRESS = C.IFLA_ADDRESS + IFLA_BROADCAST = C.IFLA_BROADCAST + IFLA_IFNAME = C.IFLA_IFNAME + IFLA_MTU = C.IFLA_MTU + IFLA_LINK = C.IFLA_LINK + IFLA_QDISC = C.IFLA_QDISC + IFLA_STATS = C.IFLA_STATS + IFLA_COST = C.IFLA_COST + IFLA_PRIORITY = C.IFLA_PRIORITY + IFLA_MASTER = C.IFLA_MASTER + IFLA_WIRELESS = C.IFLA_WIRELESS + IFLA_PROTINFO = C.IFLA_PROTINFO + IFLA_TXQLEN = C.IFLA_TXQLEN + IFLA_MAP = C.IFLA_MAP + IFLA_WEIGHT = C.IFLA_WEIGHT + IFLA_OPERSTATE = C.IFLA_OPERSTATE + IFLA_LINKMODE = C.IFLA_LINKMODE + IFLA_LINKINFO = C.IFLA_LINKINFO + IFLA_NET_NS_PID = C.IFLA_NET_NS_PID + IFLA_IFALIAS = C.IFLA_IFALIAS + IFLA_NUM_VF = C.IFLA_NUM_VF + IFLA_VFINFO_LIST = C.IFLA_VFINFO_LIST + IFLA_STATS64 = C.IFLA_STATS64 + IFLA_VF_PORTS = C.IFLA_VF_PORTS + IFLA_PORT_SELF = C.IFLA_PORT_SELF + IFLA_AF_SPEC = C.IFLA_AF_SPEC + IFLA_GROUP = C.IFLA_GROUP + IFLA_NET_NS_FD = C.IFLA_NET_NS_FD + IFLA_EXT_MASK = C.IFLA_EXT_MASK + IFLA_PROMISCUITY = C.IFLA_PROMISCUITY + IFLA_NUM_TX_QUEUES = C.IFLA_NUM_TX_QUEUES + IFLA_NUM_RX_QUEUES = C.IFLA_NUM_RX_QUEUES + IFLA_CARRIER = C.IFLA_CARRIER + IFLA_PHYS_PORT_ID = C.IFLA_PHYS_PORT_ID + IFLA_CARRIER_CHANGES = C.IFLA_CARRIER_CHANGES + IFLA_PHYS_SWITCH_ID = C.IFLA_PHYS_SWITCH_ID + IFLA_LINK_NETNSID = C.IFLA_LINK_NETNSID + IFLA_PHYS_PORT_NAME = C.IFLA_PHYS_PORT_NAME + IFLA_PROTO_DOWN = C.IFLA_PROTO_DOWN + IFLA_GSO_MAX_SEGS = C.IFLA_GSO_MAX_SEGS + IFLA_GSO_MAX_SIZE = C.IFLA_GSO_MAX_SIZE + IFLA_PAD = C.IFLA_PAD + IFLA_XDP = C.IFLA_XDP + IFLA_EVENT = C.IFLA_EVENT + IFLA_NEW_NETNSID = C.IFLA_NEW_NETNSID + IFLA_IF_NETNSID = C.IFLA_IF_NETNSID + IFLA_MAX = C.IFLA_MAX + RT_SCOPE_UNIVERSE = C.RT_SCOPE_UNIVERSE + RT_SCOPE_SITE = C.RT_SCOPE_SITE + RT_SCOPE_LINK = C.RT_SCOPE_LINK + RT_SCOPE_HOST = C.RT_SCOPE_HOST + RT_SCOPE_NOWHERE = C.RT_SCOPE_NOWHERE + RT_TABLE_UNSPEC = C.RT_TABLE_UNSPEC + RT_TABLE_COMPAT = C.RT_TABLE_COMPAT + RT_TABLE_DEFAULT = C.RT_TABLE_DEFAULT + RT_TABLE_MAIN = C.RT_TABLE_MAIN + RT_TABLE_LOCAL = C.RT_TABLE_LOCAL + RT_TABLE_MAX = C.RT_TABLE_MAX + RTA_UNSPEC = C.RTA_UNSPEC + RTA_DST = C.RTA_DST + RTA_SRC = C.RTA_SRC + RTA_IIF = C.RTA_IIF + RTA_OIF = C.RTA_OIF + RTA_GATEWAY = C.RTA_GATEWAY + RTA_PRIORITY = C.RTA_PRIORITY + RTA_PREFSRC = C.RTA_PREFSRC + RTA_METRICS = C.RTA_METRICS + RTA_MULTIPATH = C.RTA_MULTIPATH + RTA_FLOW = C.RTA_FLOW + RTA_CACHEINFO = C.RTA_CACHEINFO + RTA_TABLE = C.RTA_TABLE + RTN_UNSPEC = C.RTN_UNSPEC + RTN_UNICAST = C.RTN_UNICAST + RTN_LOCAL = C.RTN_LOCAL + RTN_BROADCAST = C.RTN_BROADCAST + RTN_ANYCAST = C.RTN_ANYCAST + RTN_MULTICAST = C.RTN_MULTICAST + RTN_BLACKHOLE = C.RTN_BLACKHOLE + RTN_UNREACHABLE = C.RTN_UNREACHABLE + RTN_PROHIBIT = C.RTN_PROHIBIT + RTN_THROW = C.RTN_THROW + RTN_NAT = C.RTN_NAT + RTN_XRESOLVE = C.RTN_XRESOLVE + RTNLGRP_NONE = C.RTNLGRP_NONE + RTNLGRP_LINK = C.RTNLGRP_LINK + RTNLGRP_NOTIFY = C.RTNLGRP_NOTIFY + RTNLGRP_NEIGH = C.RTNLGRP_NEIGH + RTNLGRP_TC = C.RTNLGRP_TC + RTNLGRP_IPV4_IFADDR = C.RTNLGRP_IPV4_IFADDR + RTNLGRP_IPV4_MROUTE = C.RTNLGRP_IPV4_MROUTE + RTNLGRP_IPV4_ROUTE = C.RTNLGRP_IPV4_ROUTE + RTNLGRP_IPV4_RULE = C.RTNLGRP_IPV4_RULE + RTNLGRP_IPV6_IFADDR = C.RTNLGRP_IPV6_IFADDR + RTNLGRP_IPV6_MROUTE = C.RTNLGRP_IPV6_MROUTE + RTNLGRP_IPV6_ROUTE = C.RTNLGRP_IPV6_ROUTE + RTNLGRP_IPV6_IFINFO = C.RTNLGRP_IPV6_IFINFO + RTNLGRP_IPV6_PREFIX = C.RTNLGRP_IPV6_PREFIX + RTNLGRP_IPV6_RULE = C.RTNLGRP_IPV6_RULE + RTNLGRP_ND_USEROPT = C.RTNLGRP_ND_USEROPT + SizeofNlMsghdr = C.sizeof_struct_nlmsghdr + SizeofNlMsgerr = C.sizeof_struct_nlmsgerr + SizeofRtGenmsg = C.sizeof_struct_rtgenmsg + SizeofNlAttr = C.sizeof_struct_nlattr + SizeofRtAttr = C.sizeof_struct_rtattr + SizeofIfInfomsg = C.sizeof_struct_ifinfomsg + SizeofIfAddrmsg = C.sizeof_struct_ifaddrmsg + SizeofRtMsg = C.sizeof_struct_rtmsg + SizeofRtNexthop = C.sizeof_struct_rtnexthop +) + +type NlMsghdr C.struct_nlmsghdr + +type NlMsgerr C.struct_nlmsgerr + +type RtGenmsg C.struct_rtgenmsg + +type NlAttr C.struct_nlattr + +type RtAttr C.struct_rtattr + +type IfInfomsg C.struct_ifinfomsg + +type IfAddrmsg C.struct_ifaddrmsg + +type RtMsg C.struct_rtmsg + +type RtNexthop C.struct_rtnexthop + +// Linux socket filter + +const ( + SizeofSockFilter = C.sizeof_struct_sock_filter + SizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type SockFilter C.struct_sock_filter + +type SockFprog C.struct_sock_fprog + +// Inotify + +type InotifyEvent C.struct_inotify_event + +const SizeofInotifyEvent = C.sizeof_struct_inotify_event + +// Ptrace + +// Register structures +type PtraceRegs C.PtraceRegs + +// Structures contained in PtraceRegs on s390x (exported by mkpost.go) +type PtracePsw C.ptracePsw + +type PtraceFpregs C.ptraceFpregs + +type PtracePer C.ptracePer + +// Misc + +type FdSet C.fd_set + +type Sysinfo_t C.struct_sysinfo + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +type EpollEvent C.struct_my_epoll_event + +const ( + AT_EMPTY_PATH = C.AT_EMPTY_PATH + AT_FDCWD = C.AT_FDCWD + AT_NO_AUTOMOUNT = C.AT_NO_AUTOMOUNT + AT_REMOVEDIR = C.AT_REMOVEDIR + + AT_STATX_SYNC_AS_STAT = C.AT_STATX_SYNC_AS_STAT + AT_STATX_FORCE_SYNC = C.AT_STATX_FORCE_SYNC + AT_STATX_DONT_SYNC = C.AT_STATX_DONT_SYNC + + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +type PollFd C.struct_pollfd + +const ( + POLLIN = C.POLLIN + POLLPRI = C.POLLPRI + POLLOUT = C.POLLOUT + POLLRDHUP = C.POLLRDHUP + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLNVAL = C.POLLNVAL +) + +type Sigset_t C.sigset_t + +const RNDGETENTCNT = C.RNDGETENTCNT + +const PERF_IOC_FLAG_GROUP = C.PERF_IOC_FLAG_GROUP + +// Terminal handling + +type Termios C.termios_t + +type Winsize C.struct_winsize + +// Taskstats and cgroup stats. + +type Taskstats C.struct_taskstats + +const ( + TASKSTATS_CMD_UNSPEC = C.TASKSTATS_CMD_UNSPEC + TASKSTATS_CMD_GET = C.TASKSTATS_CMD_GET + TASKSTATS_CMD_NEW = C.TASKSTATS_CMD_NEW + TASKSTATS_TYPE_UNSPEC = C.TASKSTATS_TYPE_UNSPEC + TASKSTATS_TYPE_PID = C.TASKSTATS_TYPE_PID + TASKSTATS_TYPE_TGID = C.TASKSTATS_TYPE_TGID + TASKSTATS_TYPE_STATS = C.TASKSTATS_TYPE_STATS + TASKSTATS_TYPE_AGGR_PID = C.TASKSTATS_TYPE_AGGR_PID + TASKSTATS_TYPE_AGGR_TGID = C.TASKSTATS_TYPE_AGGR_TGID + TASKSTATS_TYPE_NULL = C.TASKSTATS_TYPE_NULL + TASKSTATS_CMD_ATTR_UNSPEC = C.TASKSTATS_CMD_ATTR_UNSPEC + TASKSTATS_CMD_ATTR_PID = C.TASKSTATS_CMD_ATTR_PID + TASKSTATS_CMD_ATTR_TGID = C.TASKSTATS_CMD_ATTR_TGID + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = C.TASKSTATS_CMD_ATTR_REGISTER_CPUMASK + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = C.TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK +) + +type CGroupStats C.struct_cgroupstats + +const ( + CGROUPSTATS_CMD_UNSPEC = C.__TASKSTATS_CMD_MAX + CGROUPSTATS_CMD_GET = C.CGROUPSTATS_CMD_GET + CGROUPSTATS_CMD_NEW = C.CGROUPSTATS_CMD_NEW + CGROUPSTATS_TYPE_UNSPEC = C.CGROUPSTATS_TYPE_UNSPEC + CGROUPSTATS_TYPE_CGROUP_STATS = C.CGROUPSTATS_TYPE_CGROUP_STATS + CGROUPSTATS_CMD_ATTR_UNSPEC = C.CGROUPSTATS_CMD_ATTR_UNSPEC + CGROUPSTATS_CMD_ATTR_FD = C.CGROUPSTATS_CMD_ATTR_FD +) + +// Generic netlink + +type Genlmsghdr C.struct_genlmsghdr + +const ( + CTRL_CMD_UNSPEC = C.CTRL_CMD_UNSPEC + CTRL_CMD_NEWFAMILY = C.CTRL_CMD_NEWFAMILY + CTRL_CMD_DELFAMILY = C.CTRL_CMD_DELFAMILY + CTRL_CMD_GETFAMILY = C.CTRL_CMD_GETFAMILY + CTRL_CMD_NEWOPS = C.CTRL_CMD_NEWOPS + CTRL_CMD_DELOPS = C.CTRL_CMD_DELOPS + CTRL_CMD_GETOPS = C.CTRL_CMD_GETOPS + CTRL_CMD_NEWMCAST_GRP = C.CTRL_CMD_NEWMCAST_GRP + CTRL_CMD_DELMCAST_GRP = C.CTRL_CMD_DELMCAST_GRP + CTRL_CMD_GETMCAST_GRP = C.CTRL_CMD_GETMCAST_GRP + CTRL_ATTR_UNSPEC = C.CTRL_ATTR_UNSPEC + CTRL_ATTR_FAMILY_ID = C.CTRL_ATTR_FAMILY_ID + CTRL_ATTR_FAMILY_NAME = C.CTRL_ATTR_FAMILY_NAME + CTRL_ATTR_VERSION = C.CTRL_ATTR_VERSION + CTRL_ATTR_HDRSIZE = C.CTRL_ATTR_HDRSIZE + CTRL_ATTR_MAXATTR = C.CTRL_ATTR_MAXATTR + CTRL_ATTR_OPS = C.CTRL_ATTR_OPS + CTRL_ATTR_MCAST_GROUPS = C.CTRL_ATTR_MCAST_GROUPS + CTRL_ATTR_OP_UNSPEC = C.CTRL_ATTR_OP_UNSPEC + CTRL_ATTR_OP_ID = C.CTRL_ATTR_OP_ID + CTRL_ATTR_OP_FLAGS = C.CTRL_ATTR_OP_FLAGS + CTRL_ATTR_MCAST_GRP_UNSPEC = C.CTRL_ATTR_MCAST_GRP_UNSPEC + CTRL_ATTR_MCAST_GRP_NAME = C.CTRL_ATTR_MCAST_GRP_NAME + CTRL_ATTR_MCAST_GRP_ID = C.CTRL_ATTR_MCAST_GRP_ID +) + +// CPU affinity + +type cpuMask C.__cpu_mask + +const ( + _CPU_SETSIZE = C.__CPU_SETSIZE + _NCPUBITS = C.__NCPUBITS +) + +// Bluetooth + +const ( + BDADDR_BREDR = C.BDADDR_BREDR + BDADDR_LE_PUBLIC = C.BDADDR_LE_PUBLIC + BDADDR_LE_RANDOM = C.BDADDR_LE_RANDOM +) diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh new file mode 100755 index 0000000..1715122 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# This script runs or (given -n) prints suggested commands to generate files for +# the Architecture/OS specified by the GOARCH and GOOS environment variables. +# See README.md for more information about how the build system works. + +GOOSARCH="${GOOS}_${GOARCH}" + +# defaults +mksyscall="./mksyscall.pl" +mkerrors="./mkerrors.sh" +zerrors="zerrors_$GOOSARCH.go" +mksysctl="" +zsysctl="zsysctl_$GOOSARCH.go" +mksysnum= +mktypes= +run="sh" +cmd="" + +case "$1" in +-syscalls) + for i in zsyscall*go + do + # Run the command line that appears in the first line + # of the generated file to regenerate it. + sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i + rm _$i + done + exit 0 + ;; +-n) + run="cat" + cmd="echo" + shift +esac + +case "$#" in +0) + ;; +*) + echo 'usage: mkall.sh [-n]' 1>&2 + exit 2 +esac + +if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then + # Use then new build system + # Files generated through docker (use $cmd so you can Ctl-C the build or run) + $cmd docker build --tag generate:$GOOS $GOOS + $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS + exit +fi + +GOOSARCH_in=syscall_$GOOSARCH.go +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +darwin_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32" + mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +darwin_amd64) + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +darwin_arm) + mkerrors="$mkerrors" + mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +darwin_arm64) + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +dragonfly_amd64) + mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -dragonfly" + mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +freebsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32" + mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +freebsd_amd64) + mkerrors="$mkerrors -m64" + mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +freebsd_arm) + mkerrors="$mkerrors" + mksyscall="./mksyscall.pl -l32 -arm" + mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +linux_sparc64) + GOOSARCH_in=syscall_linux_sparc64.go + unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h + mkerrors="$mkerrors -m64" + mksysnum="./mksysnum_linux.pl $unistd_h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +netbsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -netbsd" + mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +netbsd_amd64) + mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -netbsd" + mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +netbsd_arm) + mkerrors="$mkerrors" + mksyscall="./mksyscall.pl -l32 -netbsd -arm" + mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -openbsd" + mksysctl="./mksysctl_openbsd.pl" + mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +openbsd_amd64) + mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -openbsd" + mksysctl="./mksysctl_openbsd.pl" + mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +openbsd_arm) + mkerrors="$mkerrors" + mksyscall="./mksyscall.pl -l32 -openbsd -arm" + mksysctl="./mksysctl_openbsd.pl" + mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +solaris_amd64) + mksyscall="./mksyscall_solaris.pl" + mkerrors="$mkerrors -m64" + mksysnum= + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; +*) + echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +( + if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi + case "$GOOS" in + *) + syscall_goos="syscall_$GOOS.go" + case "$GOOS" in + darwin | dragonfly | freebsd | netbsd | openbsd) + syscall_goos="syscall_bsd.go $syscall_goos" + ;; + esac + if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi + ;; + esac + if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi + if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi + if [ -n "$mktypes" ]; then + echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; + fi +) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh new file mode 100755 index 0000000..3b5e2c0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -0,0 +1,581 @@ +#!/usr/bin/env bash +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Generate Go code listing errors and other #defined constant +# values (ENAMETOOLONG etc.), by asking the preprocessor +# about the definitions. + +unset LANG +export LC_ALL=C +export LC_CTYPE=C + +if test -z "$GOARCH" -o -z "$GOOS"; then + echo 1>&2 "GOARCH or GOOS not defined in environment" + exit 1 +fi + +# Check that we are using the new build system if we should +if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then + if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then + echo 1>&2 "In the new build system, mkerrors should not be called directly." + echo 1>&2 "See README.md" + exit 1 + fi +fi + +CC=${CC:-cc} + +if [[ "$GOOS" = "solaris" ]]; then + # Assumes GNU versions of utilities in PATH. + export PATH=/usr/gnu/bin:$PATH +fi + +uname=$(uname) + +includes_Darwin=' +#define _DARWIN_C_SOURCE +#define KERNEL +#define _DARWIN_USE_64_BIT_INODE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' + +includes_DragonFly=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' + +includes_FreeBSD=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if __FreeBSD__ >= 10 +#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10 +#undef SIOCAIFADDR +#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data +#undef SIOCSIFPHYADDR +#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data +#endif +' + +includes_Linux=' +#define _LARGEFILE_SOURCE +#define _LARGEFILE64_SOURCE +#ifndef __LP64__ +#define _FILE_OFFSET_BITS 64 +#endif +#define _GNU_SOURCE + +// is broken on powerpc64, as it fails to include definitions of +// these structures. We just include them copied from . +#if defined(__powerpc__) +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + short sg_flags; +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef MSG_FASTOPEN +#define MSG_FASTOPEN 0x20000000 +#endif + +#ifndef PTRACE_GETREGS +#define PTRACE_GETREGS 0xc +#endif + +#ifndef PTRACE_SETREGS +#define PTRACE_SETREGS 0xd +#endif + +#ifndef SOL_NETLINK +#define SOL_NETLINK 270 +#endif + +#ifdef SOL_BLUETOOTH +// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h +// but it is already in bluetooth_linux.go +#undef SOL_BLUETOOTH +#endif + +// Certain constants are missing from the fs/crypto UAPI +#define FS_KEY_DESC_PREFIX "fscrypt:" +#define FS_KEY_DESC_PREFIX_SIZE 8 +#define FS_MAX_KEY_SIZE 64 +' + +includes_NetBSD=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Needed since refers to it... +#define schedppq 1 +' + +includes_OpenBSD=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// We keep some constants not supported in OpenBSD 5.5 and beyond for +// the promise of compatibility. +#define EMUL_ENABLED 0x1 +#define EMUL_NATIVE 0x2 +#define IPV6_FAITH 0x1d +#define IPV6_OPTIONS 0x1 +#define IPV6_RTHDR_STRICT 0x1 +#define IPV6_SOCKOPT_RESERVED1 0x3 +#define SIOCGIFGENERIC 0xc020693a +#define SIOCSIFGENERIC 0x80206939 +#define WALTSIG 0x4 +' + +includes_SunOS=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' + + +includes=' +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +' +ccflags="$@" + +# Write go tool cgo -godefs input. +( + echo package unix + echo + echo '/*' + indirect="includes_$(uname)" + echo "${!indirect} $includes" + echo '*/' + echo 'import "C"' + echo 'import "syscall"' + echo + echo 'const (' + + # The gcc command line prints all the #defines + # it encounters while processing the input + echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags | + awk ' + $1 != "#define" || $2 ~ /\(/ || $3 == "" {next} + + $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers + $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next} + $2 ~ /^(SCM_SRCRT)$/ {next} + $2 ~ /^(MAP_FAILED)$/ {next} + $2 ~ /^ELF_.*$/ {next}# contains ELF_ARCH, etc. + + $2 ~ /^EXTATTR_NAMESPACE_NAMES/ || + $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next} + + $2 !~ /^ETH_/ && + $2 !~ /^EPROC_/ && + $2 !~ /^EQUIV_/ && + $2 !~ /^EXPR_/ && + $2 ~ /^E[A-Z0-9_]+$/ || + $2 ~ /^B[0-9_]+$/ || + $2 ~ /^(OLD|NEW)DEV$/ || + $2 == "BOTHER" || + $2 ~ /^CI?BAUD(EX)?$/ || + $2 == "IBSHIFT" || + $2 ~ /^V[A-Z0-9]+$/ || + $2 ~ /^CS[A-Z0-9]/ || + $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ || + $2 ~ /^IGN/ || + $2 ~ /^IX(ON|ANY|OFF)$/ || + $2 ~ /^IN(LCR|PCK)$/ || + $2 !~ "X86_CR3_PCID_NOFLUSH" && + $2 ~ /(^FLU?SH)|(FLU?SH$)/ || + $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ || + $2 == "BRKINT" || + $2 == "HUPCL" || + $2 == "PENDIN" || + $2 == "TOSTOP" || + $2 == "XCASE" || + $2 == "ALTWERASE" || + $2 == "NOKERNINFO" || + $2 ~ /^PAR/ || + $2 ~ /^SIG[^_]/ || + $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || + $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || + $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^O?XTABS$/ || + $2 ~ /^TC[IO](ON|OFF)$/ || + $2 ~ /^IN_/ || + $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^FALLOC_/ || + $2 == "ICMPV6_FILTER" || + $2 == "SOMAXCONN" || + $2 == "NAME_MAX" || + $2 == "IFNAMSIZ" || + $2 ~ /^CTL_(HW|KERN|MAXNAME|NET|QUERY)$/ || + $2 ~ /^KERN_(HOSTNAME|OS(RELEASE|TYPE)|VERSION)$/ || + $2 ~ /^HW_MACHINE$/ || + $2 ~ /^SYSCTL_VERS/ || + $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || + $2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^LINUX_REBOOT_CMD_/ || + $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || + $2 !~ "NLA_TYPE_MASK" && + $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || + $2 ~ /^SIOC/ || + $2 ~ /^TIOC/ || + $2 ~ /^TCGET/ || + $2 ~ /^TCSET/ || + $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || + $2 !~ "RTF_BITS" && + $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || + $2 ~ /^BIOC/ || + $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || + $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || + $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || + $2 ~ /^CLONE_[A-Z_]+/ || + $2 !~ /^(BPF_TIMEVAL)$/ && + $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^CLOCK_/ || + $2 ~ /^CAN_/ || + $2 ~ /^CAP_/ || + $2 ~ /^ALG_/ || + $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ || + $2 ~ /^GRND_/ || + $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || + $2 ~ /^KEYCTL_/ || + $2 ~ /^PERF_EVENT_IOC_/ || + $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SPLICE_/ || + $2 ~ /^(VM|VMADDR)_/ || + $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(TASKSTATS|TS)_/ || + $2 ~ /^CGROUPSTATS_/ || + $2 ~ /^GENL_/ || + $2 ~ /^STATX_/ || + $2 ~ /^UTIME_/ || + $2 ~ /^XATTR_(CREATE|REPLACE)/ || + $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ || + $2 ~ /^FSOPT_/ || + $2 ~ /^WDIOC_/ || + $2 !~ "WMESGLEN" && + $2 ~ /^W[A-Z0-9]+$/ || + $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^__WCOREFLAG$/ {next} + $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} + + {next} + ' | sort + + echo ')' +) >_const.go + +# Pull out the error names for later. +errors=$( + echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' | + sort +) + +# Pull out the signal names for later. +signals=$( + echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | + egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + sort +) + +# Again, writing regexps to a file. +echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' | + sort >_error.grep +echo '#include ' | $CC -x c - -E -dM $ccflags | + awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | + egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + sort >_signal.grep + +echo '// mkerrors.sh' "$@" +echo '// Code generated by the command above; see README.md. DO NOT EDIT.' +echo +echo "// +build ${GOARCH},${GOOS}" +echo +go tool cgo -godefs -- "$@" _const.go >_error.out +cat _error.out | grep -vf _error.grep | grep -vf _signal.grep +echo +echo '// Errors' +echo 'const (' +cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/' +echo ')' + +echo +echo '// Signals' +echo 'const (' +cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/' +echo ')' + +# Run C program to print error and syscall strings. +( + echo -E " +#include +#include +#include +#include +#include +#include + +#define nelem(x) (sizeof(x)/sizeof((x)[0])) + +enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below + +int errors[] = { +" + for i in $errors + do + echo -E ' '$i, + done + + echo -E " +}; + +int signals[] = { +" + for i in $signals + do + echo -E ' '$i, + done + + # Use -E because on some systems bash builtin interprets \n itself. + echo -E ' +}; + +static int +intcmp(const void *a, const void *b) +{ + return *(int*)a - *(int*)b; +} + +int +main(void) +{ + int i, e; + char buf[1024], *p; + + printf("\n\n// Error table\n"); + printf("var errors = [...]string {\n"); + qsort(errors, nelem(errors), sizeof errors[0], intcmp); + for(i=0; i 0 && errors[i-1] == e) + continue; + strcpy(buf, strerror(e)); + // lowercase first letter: Bad -> bad, but STREAM -> STREAM. + if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) + buf[0] += a - A; + printf("\t%d: \"%s\",\n", e, buf); + } + printf("}\n\n"); + + printf("\n\n// Signal table\n"); + printf("var signals = [...]string {\n"); + qsort(signals, nelem(signals), sizeof signals[0], intcmp); + for(i=0; i 0 && signals[i-1] == e) + continue; + strcpy(buf, strsignal(e)); + // lowercase first letter: Bad -> bad, but STREAM -> STREAM. + if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) + buf[0] += a - A; + // cut trailing : number. + p = strrchr(buf, ":"[0]); + if(p) + *p = '\0'; + printf("\t%d: \"%s\",\n", e, buf); + } + printf("}\n\n"); + + return 0; +} + +' +) >_errors.c + +$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go new file mode 100644 index 0000000..23590bd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -0,0 +1,97 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// mkpost processes the output of cgo -godefs to +// modify the generated types. It is used to clean up +// the sys API in an architecture specific manner. +// +// mkpost is run after cgo -godefs; see README.md. +package main + +import ( + "bytes" + "fmt" + "go/format" + "io/ioutil" + "log" + "os" + "regexp" +) + +func main() { + // Get the OS and architecture (using GOARCH_TARGET if it exists) + goos := os.Getenv("GOOS") + goarch := os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + // Check that we are using the new build system if we should be. + if goos == "linux" && goarch != "sparc64" { + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + os.Stderr.WriteString("In the new build system, mkpost should not be called directly.\n") + os.Stderr.WriteString("See README.md\n") + os.Exit(1) + } + } + + b, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatal(err) + } + + // If we have empty Ptrace structs, we should delete them. Only s390x emits + // nonempty Ptrace structs. + ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) + b = ptraceRexexp.ReplaceAll(b, nil) + + // Replace the control_regs union with a blank identifier for now. + controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) + b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) + + // Remove fields that are added by glibc + // Note that this is unstable as the identifers are private. + removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) + + // Convert [65]int8 to [65]byte in Utsname members to simplify + // conversion to string; see golang.org/issue/20753 + convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) + b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) + + // Remove spare fields (e.g. in Statx_t) + spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) + b = spareFieldsRegex.ReplaceAll(b, []byte("_")) + + // Remove cgo padding fields + removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) + b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) + + // We refuse to export private fields on s390x + if goarch == "s390x" && goos == "linux" { + // Remove padding, hidden, or unused fields + removeFieldsRegex = regexp.MustCompile(`\bX_\S+`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) + } + + // Remove the first line of warning from cgo + b = b[bytes.IndexByte(b, '\n')+1:] + // Modify the command in the header to include: + // mkpost, our own warning, and a build tag. + replacement := fmt.Sprintf(`$1 | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s,%s`, goarch, goos) + cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) + b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) + + // gofmt + b, err = format.Source(b) + if err != nil { + log.Fatal(err) + } + + os.Stdout.Write(b) +} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl new file mode 100755 index 0000000..1f6b926 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall.pl @@ -0,0 +1,341 @@ +#!/usr/bin/env perl +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# This program reads a file containing function prototypes +# (like syscall_darwin.go) and generates system call bodies. +# The prototypes are marked by lines beginning with "//sys" +# and read like func declarations if //sys is replaced by func, but: +# * The parameter lists must give a name for each argument. +# This includes return parameters. +# * The parameter lists must give a type for each argument: +# the (x, y, z int) shorthand is not allowed. +# * If the return parameter is an error number, it must be named errno. + +# A line beginning with //sysnb is like //sys, except that the +# goroutine will not be suspended during the execution of the system +# call. This must only be used for system calls which can never +# block, as otherwise the system call could cause all goroutines to +# hang. + +use strict; + +my $cmdline = "mksyscall.pl " . join(' ', @ARGV); +my $errors = 0; +my $_32bit = ""; +my $plan9 = 0; +my $openbsd = 0; +my $netbsd = 0; +my $dragonfly = 0; +my $arm = 0; # 64-bit value should use (even, odd)-pair +my $tags = ""; # build tags + +if($ARGV[0] eq "-b32") { + $_32bit = "big-endian"; + shift; +} elsif($ARGV[0] eq "-l32") { + $_32bit = "little-endian"; + shift; +} +if($ARGV[0] eq "-plan9") { + $plan9 = 1; + shift; +} +if($ARGV[0] eq "-openbsd") { + $openbsd = 1; + shift; +} +if($ARGV[0] eq "-netbsd") { + $netbsd = 1; + shift; +} +if($ARGV[0] eq "-dragonfly") { + $dragonfly = 1; + shift; +} +if($ARGV[0] eq "-arm") { + $arm = 1; + shift; +} +if($ARGV[0] eq "-tags") { + shift; + $tags = $ARGV[0]; + shift; +} + +if($ARGV[0] =~ /^-/) { + print STDERR "usage: mksyscall.pl [-b32 | -l32] [-tags x,y] [file ...]\n"; + exit 1; +} + +# Check that we are using the new build system if we should +if($ENV{'GOOS'} eq "linux" && $ENV{'GOARCH'} ne "sparc64") { + if($ENV{'GOLANG_SYS_BUILD'} ne "docker") { + print STDERR "In the new build system, mksyscall should not be called directly.\n"; + print STDERR "See README.md\n"; + exit 1; + } +} + + +sub parseparamlist($) { + my ($list) = @_; + $list =~ s/^\s*//; + $list =~ s/\s*$//; + if($list eq "") { + return (); + } + return split(/\s*,\s*/, $list); +} + +sub parseparam($) { + my ($p) = @_; + if($p !~ /^(\S*) (\S*)$/) { + print STDERR "$ARGV:$.: malformed parameter: $p\n"; + $errors = 1; + return ("xx", "int"); + } + return ($1, $2); +} + +my $text = ""; +while(<>) { + chomp; + s/\s+/ /g; + s/^\s+//; + s/\s+$//; + my $nonblock = /^\/\/sysnb /; + next if !/^\/\/sys / && !$nonblock; + + # Line must be of the form + # func Open(path string, mode int, perm int) (fd int, errno error) + # Split into name, in params, out params. + if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { + print STDERR "$ARGV:$.: malformed //sys declaration\n"; + $errors = 1; + next; + } + my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); + + # Split argument lists on comma. + my @in = parseparamlist($in); + my @out = parseparamlist($out); + + # Try in vain to keep people from editing this file. + # The theory is that they jump into the middle of the file + # without reading the header. + $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + + # Go function header. + my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; + $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; + + # Check if err return available + my $errvar = ""; + foreach my $p (@out) { + my ($name, $type) = parseparam($p); + if($type eq "error") { + $errvar = $name; + last; + } + } + + # Prepare arguments to Syscall. + my @args = (); + my $n = 0; + foreach my $p (@in) { + my ($name, $type) = parseparam($p); + if($type =~ /^\*/) { + push @args, "uintptr(unsafe.Pointer($name))"; + } elsif($type eq "string" && $errvar ne "") { + $text .= "\tvar _p$n *byte\n"; + $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; + $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type eq "string") { + print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; + $text .= "\tvar _p$n *byte\n"; + $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type =~ /^\[\](.*)/) { + # Convert slice into pointer, length. + # Have to be careful not to take address of &a[0] if len == 0: + # pass dummy pointer in that case. + # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). + $text .= "\tvar _p$n unsafe.Pointer\n"; + $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; + $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; + $text .= "\n"; + push @args, "uintptr(_p$n)", "uintptr(len($name))"; + $n++; + } elsif($type eq "int64" && ($openbsd || $netbsd)) { + push @args, "0"; + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } elsif($_32bit eq "little-endian") { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } else { + push @args, "uintptr($name)"; + } + } elsif($type eq "int64" && $dragonfly) { + if ($func !~ /^extp(read|write)/i) { + push @args, "0"; + } + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } elsif($_32bit eq "little-endian") { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } else { + push @args, "uintptr($name)"; + } + } elsif($type eq "int64" && $_32bit ne "") { + if(@args % 2 && $arm) { + # arm abi specifies 64-bit argument uses + # (even, odd) pair + push @args, "0" + } + if($_32bit eq "big-endian") { + push @args, "uintptr($name>>32)", "uintptr($name)"; + } else { + push @args, "uintptr($name)", "uintptr($name>>32)"; + } + } else { + push @args, "uintptr($name)"; + } + } + + # Determine which form to use; pad args with zeros. + my $asm = "Syscall"; + if ($nonblock) { + if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { + $asm = "RawSyscallNoError"; + } else { + $asm = "RawSyscall"; + } + } else { + if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { + $asm = "SyscallNoError"; + } + } + if(@args <= 3) { + while(@args < 3) { + push @args, "0"; + } + } elsif(@args <= 6) { + $asm .= "6"; + while(@args < 6) { + push @args, "0"; + } + } elsif(@args <= 9) { + $asm .= "9"; + while(@args < 9) { + push @args, "0"; + } + } else { + print STDERR "$ARGV:$.: too many arguments to system call\n"; + } + + # System call number. + if($sysname eq "") { + $sysname = "SYS_$func"; + $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar + $sysname =~ y/a-z/A-Z/; + } + + # Actual call. + my $args = join(', ', @args); + my $call = "$asm($sysname, $args)"; + + # Assign return values. + my $body = ""; + my @ret = ("_", "_", "_"); + my $do_errno = 0; + for(my $i=0; $i<@out; $i++) { + my $p = $out[$i]; + my ($name, $type) = parseparam($p); + my $reg = ""; + if($name eq "err" && !$plan9) { + $reg = "e1"; + $ret[2] = $reg; + $do_errno = 1; + } elsif($name eq "err" && $plan9) { + $ret[0] = "r0"; + $ret[2] = "e1"; + next; + } else { + $reg = sprintf("r%d", $i); + $ret[$i] = $reg; + } + if($type eq "bool") { + $reg = "$reg != 0"; + } + if($type eq "int64" && $_32bit ne "") { + # 64-bit number in r1:r0 or r0:r1. + if($i+2 > @out) { + print STDERR "$ARGV:$.: not enough registers for int64 return\n"; + } + if($_32bit eq "big-endian") { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); + } else { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); + } + $ret[$i] = sprintf("r%d", $i); + $ret[$i+1] = sprintf("r%d", $i+1); + } + if($reg ne "e1" || $plan9) { + $body .= "\t$name = $type($reg)\n"; + } + } + if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { + $text .= "\t$call\n"; + } else { + if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { + # raw syscall without error on Linux, see golang.org/issue/22924 + $text .= "\t$ret[0], $ret[1] := $call\n"; + } else { + $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; + } + } + $text .= $body; + + if ($plan9 && $ret[2] eq "e1") { + $text .= "\tif int32(r0) == -1 {\n"; + $text .= "\t\terr = e1\n"; + $text .= "\t}\n"; + } elsif ($do_errno) { + $text .= "\tif e1 != 0 {\n"; + $text .= "\t\terr = errnoErr(e1)\n"; + $text .= "\t}\n"; + } + $text .= "\treturn\n"; + $text .= "}\n\n"; +} + +chomp $text; +chomp $text; + +if($errors) { + exit 1; +} + +print <) { + chomp; + s/\s+/ /g; + s/^\s+//; + s/\s+$//; + $package = $1 if !$package && /^package (\S+)$/; + my $nonblock = /^\/\/sysnb /; + next if !/^\/\/sys / && !$nonblock; + + # Line must be of the form + # func Open(path string, mode int, perm int) (fd int, err error) + # Split into name, in params, out params. + if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { + print STDERR "$ARGV:$.: malformed //sys declaration\n"; + $errors = 1; + next; + } + my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); + + # Split argument lists on comma. + my @in = parseparamlist($in); + my @out = parseparamlist($out); + + # So file name. + if($modname eq "") { + $modname = "libc"; + } + + # System call name. + if($sysname eq "") { + $sysname = "$func"; + } + + # System call pointer variable name. + my $sysvarname = "proc$sysname"; + + my $strconvfunc = "BytePtrFromString"; + my $strconvtype = "*byte"; + + $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. + + # Runtime import of function to allow cross-platform builds. + $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; + # Link symbol to proc address variable. + $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; + # Library proc address variable. + push @vars, $sysvarname; + + # Go function header. + $out = join(', ', @out); + if($out ne "") { + $out = " ($out)"; + } + if($text ne "") { + $text .= "\n" + } + $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out; + + # Check if err return available + my $errvar = ""; + foreach my $p (@out) { + my ($name, $type) = parseparam($p); + if($type eq "error") { + $errvar = $name; + last; + } + } + + # Prepare arguments to Syscall. + my @args = (); + my $n = 0; + foreach my $p (@in) { + my ($name, $type) = parseparam($p); + if($type =~ /^\*/) { + push @args, "uintptr(unsafe.Pointer($name))"; + } elsif($type eq "string" && $errvar ne "") { + $text .= "\tvar _p$n $strconvtype\n"; + $text .= "\t_p$n, $errvar = $strconvfunc($name)\n"; + $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type eq "string") { + print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; + $text .= "\tvar _p$n $strconvtype\n"; + $text .= "\t_p$n, _ = $strconvfunc($name)\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))"; + $n++; + } elsif($type =~ /^\[\](.*)/) { + # Convert slice into pointer, length. + # Have to be careful not to take address of &a[0] if len == 0: + # pass nil in that case. + $text .= "\tvar _p$n *$1\n"; + $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; + push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))"; + $n++; + } elsif($type eq "int64" && $_32bit ne "") { + if($_32bit eq "big-endian") { + push @args, "uintptr($name >> 32)", "uintptr($name)"; + } else { + push @args, "uintptr($name)", "uintptr($name >> 32)"; + } + } elsif($type eq "bool") { + $text .= "\tvar _p$n uint32\n"; + $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; + push @args, "uintptr(_p$n)"; + $n++; + } else { + push @args, "uintptr($name)"; + } + } + my $nargs = @args; + + # Determine which form to use; pad args with zeros. + my $asm = "sysvicall6"; + if ($nonblock) { + $asm = "rawSysvicall6"; + } + if(@args <= 6) { + while(@args < 6) { + push @args, "0"; + } + } else { + print STDERR "$ARGV:$.: too many arguments to system call\n"; + } + + # Actual call. + my $args = join(', ', @args); + my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)"; + + # Assign return values. + my $body = ""; + my $failexpr = ""; + my @ret = ("_", "_", "_"); + my @pout= (); + my $do_errno = 0; + for(my $i=0; $i<@out; $i++) { + my $p = $out[$i]; + my ($name, $type) = parseparam($p); + my $reg = ""; + if($name eq "err") { + $reg = "e1"; + $ret[2] = $reg; + $do_errno = 1; + } else { + $reg = sprintf("r%d", $i); + $ret[$i] = $reg; + } + if($type eq "bool") { + $reg = "$reg != 0"; + } + if($type eq "int64" && $_32bit ne "") { + # 64-bit number in r1:r0 or r0:r1. + if($i+2 > @out) { + print STDERR "$ARGV:$.: not enough registers for int64 return\n"; + } + if($_32bit eq "big-endian") { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); + } else { + $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); + } + $ret[$i] = sprintf("r%d", $i); + $ret[$i+1] = sprintf("r%d", $i+1); + } + if($reg ne "e1") { + $body .= "\t$name = $type($reg)\n"; + } + } + if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { + $text .= "\t$call\n"; + } else { + $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; + } + $text .= $body; + + if ($do_errno) { + $text .= "\tif e1 != 0 {\n"; + $text .= "\t\terr = e1\n"; + $text .= "\t}\n"; + } + $text .= "\treturn\n"; + $text .= "}\n"; +} + +if($errors) { + exit 1; +} + +print < "net.inet", + "net.inet.ipproto" => "net.inet", + "net.inet6.ipv6proto" => "net.inet6", + "net.inet6.ipv6" => "net.inet6.ip6", + "net.inet.icmpv6" => "net.inet6.icmp6", + "net.inet6.divert6" => "net.inet6.divert", + "net.inet6.tcp6" => "net.inet.tcp", + "net.inet6.udp6" => "net.inet.udp", + "mpls" => "net.mpls", + "swpenc" => "vm.swapencrypt" +); + +# Node mappings +my %node_map = ( + "net.inet.ip.ifq" => "net.ifq", + "net.inet.pfsync" => "net.pfsync", + "net.mpls.ifq" => "net.ifq" +); + +my $ctlname; +my %mib = (); +my %sysctl = (); +my $node; + +sub debug() { + print STDERR "$_[0]\n" if $debug; +} + +# Walk the MIB and build a sysctl name to OID mapping. +sub build_sysctl() { + my ($node, $name, $oid) = @_; + my %node = %{$node}; + my @oid = @{$oid}; + + foreach my $key (sort keys %node) { + my @node = @{$node{$key}}; + my $nodename = $name.($name ne '' ? '.' : '').$key; + my @nodeoid = (@oid, $node[0]); + if ($node[1] eq 'CTLTYPE_NODE') { + if (exists $node_map{$nodename}) { + $node = \%mib; + $ctlname = $node_map{$nodename}; + foreach my $part (split /\./, $ctlname) { + $node = \%{@{$$node{$part}}[2]}; + } + } else { + $node = $node[2]; + } + &build_sysctl($node, $nodename, \@nodeoid); + } elsif ($node[1] ne '') { + $sysctl{$nodename} = \@nodeoid; + } + } +} + +foreach my $ctl (@ctls) { + $ctls{$ctl} = $ctl; +} + +# Build MIB +foreach my $header (@headers) { + &debug("Processing $header..."); + open HEADER, "/usr/include/$header" || + print STDERR "Failed to open $header\n"; + while (

    ) { + if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || + $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || + $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { + if ($1 eq 'CTL_NAMES') { + # Top level. + $node = \%mib; + } else { + # Node. + my $nodename = lc($2); + if ($header =~ /^netinet\//) { + $ctlname = "net.inet.$nodename"; + } elsif ($header =~ /^netinet6\//) { + $ctlname = "net.inet6.$nodename"; + } elsif ($header =~ /^net\//) { + $ctlname = "net.$nodename"; + } else { + $ctlname = "$nodename"; + $ctlname =~ s/^(fs|net|kern)_/$1\./; + } + if (exists $ctl_map{$ctlname}) { + $ctlname = $ctl_map{$ctlname}; + } + if (not exists $ctls{$ctlname}) { + &debug("Ignoring $ctlname..."); + next; + } + + # Walk down from the top of the MIB. + $node = \%mib; + foreach my $part (split /\./, $ctlname) { + if (not exists $$node{$part}) { + &debug("Missing node $part"); + $$node{$part} = [ 0, '', {} ]; + } + $node = \%{@{$$node{$part}}[2]}; + } + } + + # Populate current node with entries. + my $i = -1; + while (defined($_) && $_ !~ /^}/) { + $_ =
    ; + $i++ if $_ =~ /{.*}/; + next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; + $$node{$1} = [ $i, $2, {} ]; + } + } + } + close HEADER; +} + +&build_sysctl(\%mib, "", []); + +print <){ + if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){ + my $name = $1; + my $num = $2; + $name =~ y/a-z/A-Z/; + print " SYS_$name = $num;" + } +} + +print <){ + if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){ + my $num = $1; + my $proto = $2; + my $name = "SYS_$3"; + $name =~ y/a-z/A-Z/; + + # There are multiple entries for enosys and nosys, so comment them out. + if($name =~ /^SYS_E?NOSYS$/){ + $name = "// $name"; + } + if($name eq 'SYS_SYS_EXIT'){ + $name = 'SYS_EXIT'; + } + + print " $name = $num; // $proto\n"; + } +} + +print <){ + if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){ + my $num = $1; + my $proto = $2; + my $name = "SYS_$3"; + $name =~ y/a-z/A-Z/; + + # There are multiple entries for enosys and nosys, so comment them out. + if($name =~ /^SYS_E?NOSYS$/){ + $name = "// $name"; + } + if($name eq 'SYS_SYS_EXIT'){ + $name = 'SYS_EXIT'; + } + + print " $name = $num; // $proto\n"; + } +} + +print <){ + if($line =~ /^(.*)\\$/) { + # Handle continuation + $line = $1; + $_ =~ s/^\s+//; + $line .= $_; + } else { + # New line + $line = $_; + } + next if $line =~ /\\$/; + if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) { + my $num = $1; + my $proto = $6; + my $compat = $8; + my $name = "$7_$9"; + + $name = "$7_$11" if $11 ne ''; + $name =~ y/a-z/A-Z/; + + if($compat eq '' || $compat eq '13' || $compat eq '30' || $compat eq '50') { + print " $name = $num; // $proto\n"; + } + } +} + +print <){ + if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){ + my $num = $1; + my $proto = $3; + my $name = $4; + $name =~ y/a-z/A-Z/; + + # There are multiple entries for enosys and nosys, so comment them out. + if($name =~ /^SYS_E?NOSYS$/){ + $name = "// $name"; + } + if($name eq 'SYS_SYS_EXIT'){ + $name = 'SYS_EXIT'; + } + + print " $name = $num; // $proto\n"; + } +} + +print < uint64(len(b)) { + return nil, nil, EINVAL + } + return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil +} + +// UnixRights encodes a set of open file descriptors into a socket +// control message for sending to another process. +func UnixRights(fds ...int) []byte { + datalen := len(fds) * 4 + b := make([]byte, CmsgSpace(datalen)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_RIGHTS + h.SetLen(CmsgLen(datalen)) + data := cmsgData(h) + for _, fd := range fds { + *(*int32)(data) = int32(fd) + data = unsafe.Pointer(uintptr(data) + 4) + } + return b +} + +// ParseUnixRights decodes a socket control message that contains an +// integer array of open file descriptors from another process. +func ParseUnixRights(m *SocketControlMessage) ([]int, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_RIGHTS { + return nil, EINVAL + } + fds := make([]int, len(m.Data)>>2) + for i, j := 0, 0; i < len(m.Data); i += 4 { + fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i]))) + j++ + } + return fds, nil +} diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go new file mode 100644 index 0000000..35ed664 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/str.go @@ -0,0 +1,26 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +func itoa(val int) string { // do it here rather than with fmt to avoid dependency + if val < 0 { + return "-" + uitoa(uint(-val)) + } + return uitoa(uint(val)) +} + +func uitoa(val uint) string { + var buf [32]byte // big enough for int64 + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go new file mode 100644 index 0000000..857d2a4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -0,0 +1,51 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +// Package unix contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display OS-specific documentation for the current +// system. If you want godoc to display OS documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package unix // import "golang.org/x/sys/unix" + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, EINVAL + } + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mkunix.pl. +var _zero uintptr diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go new file mode 100644 index 0000000..d3903ed --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -0,0 +1,665 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// BSD system call wrappers shared by *BSD based systems +// including OS X (Darwin) and FreeBSD. Like the other +// syscall_*.go files it is compiled as Go code but also +// used as input to mksyscall which parses the //sys +// lines and generates system call stubs. + +package unix + +import ( + "runtime" + "syscall" + "unsafe" +) + +/* + * Wrapped + */ + +//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error) +//sysnb setgroups(ngid int, gid *_Gid_t) (err error) + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 16 on BSD. + if n < 0 || n > 1000 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + // Final argument is (basep *uintptr) and the syscall doesn't take nil. + // 64 bits should be enough. (32 bits isn't even on 386). Since the + // actual system call is getdirentries64, 64 is a good guess. + // TODO(rsc): Can we use a single global basep for all calls? + var base = (*uintptr)(unsafe.Pointer(new(uint64))) + return Getdirentries(fd, buf, base) +} + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. + +type WaitStatus uint32 + +const ( + mask = 0x7F + core = 0x80 + shift = 8 + + exited = 0 + stopped = 0x7F +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) ExitStatus() int { + if w&mask != exited { + return -1 + } + return int(w >> shift) +} + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 } + +func (w WaitStatus) Signal() syscall.Signal { + sig := syscall.Signal(w & mask) + if sig == stopped || sig == 0 { + return -1 + } + return sig +} + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP } + +func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP } + +func (w WaitStatus) StopSignal() syscall.Signal { + if !w.Stopped() { + return -1 + } + return syscall.Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { return -1 } + +//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + var status _C_int + wpid, err = wait4(pid, &status, options, rusage) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys Shutdown(s int, how int) (err error) + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet4 + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet6 + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) || n == 0 { + return nil, 0, EINVAL + } + sa.raw.Len = byte(3 + n) // 2 for Family, Len; 1 for NUL + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Index == 0 { + return nil, 0, EINVAL + } + sa.raw.Len = sa.Len + sa.raw.Family = AF_LINK + sa.raw.Index = sa.Index + sa.raw.Type = sa.Type + sa.raw.Nlen = sa.Nlen + sa.raw.Alen = sa.Alen + sa.raw.Slen = sa.Slen + for i := 0; i < len(sa.raw.Data); i++ { + sa.raw.Data[i] = sa.Data[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil +} + +func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_LINK: + pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa)) + sa := new(SockaddrDatalink) + sa.Len = pp.Len + sa.Family = pp.Family + sa.Index = pp.Index + sa.Type = pp.Type + sa.Nlen = pp.Nlen + sa.Alen = pp.Alen + sa.Slen = pp.Slen + for i := 0; i < len(sa.Data); i++ { + sa.Data[i] = pp.Data[i] + } + return sa, nil + + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + if pp.Len < 2 || pp.Len > SizeofSockaddrUnix { + return nil, EINVAL + } + sa := new(SockaddrUnix) + + // Some BSDs include the trailing NUL in the length, whereas + // others do not. Work around this by subtracting the leading + // family and len. The path is then scanned to see if a NUL + // terminator still exists within the length. + n := int(pp.Len) - 2 // subtract leading Family, Len + for i := 0; i < n; i++ { + if pp.Path[i] == 0 { + // found early NUL; assume Len included the NUL + // or was overestimating. + n = i + break + } + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + if runtime.GOOS == "darwin" && len == 0 { + // Accepted socket has no address. + // This is likely due to a bug in xnu kernels, + // where instead of ECONNABORTED error socket + // is accepted, but has no address. + Close(nfd) + return 0, nil, ECONNABORTED + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + // TODO(jsing): DragonFly has a "bug" (see issue 3349), which should be + // reported upstream. + if runtime.GOOS == "dragonfly" && rsa.Addr.Family == AF_UNSPEC && rsa.Addr.Len == 0 { + rsa.Addr.Family = AF_UNIX + rsa.Addr.Len = SizeofSockaddrUnix + } + return anyToSockaddr(&rsa) +} + +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) + +func GetsockoptByte(fd, level, opt int) (value byte, err error) { + var n byte + vallen := _Socklen(1) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + return "", err + } + return string(buf[:vallen-1]), nil +} + +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +//sys kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) + +func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err error) { + var change, event unsafe.Pointer + if len(changes) > 0 { + change = unsafe.Pointer(&changes[0]) + } + if len(events) > 0 { + event = unsafe.Pointer(&events[0]) + } + return kevent(kq, change, len(changes), event, len(events), timeout) +} + +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + +// sysctlmib translates name to mib number and appends any additional args. +func sysctlmib(name string, args ...int) ([]_C_int, error) { + // Translate name to mib number. + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + for _, a := range args { + mib = append(mib, _C_int(a)) + } + + return mib, nil +} + +func Sysctl(name string) (string, error) { + return SysctlArgs(name) +} + +func SysctlArgs(name string, args ...int) (string, error) { + buf, err := SysctlRaw(name, args...) + if err != nil { + return "", err + } + n := len(buf) + + // Throw away terminating NUL. + if n > 0 && buf[n-1] == '\x00' { + n-- + } + return string(buf[0:n]), nil +} + +func SysctlUint32(name string) (uint32, error) { + return SysctlUint32Args(name) +} + +func SysctlUint32Args(name string, args ...int) (uint32, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return 0, err + } + + n := uintptr(4) + buf := make([]byte, 4) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return 0, err + } + if n != 4 { + return 0, EIO + } + return *(*uint32)(unsafe.Pointer(&buf[0])), nil +} + +func SysctlUint64(name string, args ...int) (uint64, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return 0, err + } + + n := uintptr(8) + buf := make([]byte, 8) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return 0, err + } + if n != 8 { + return 0, EIO + } + return *(*uint64)(unsafe.Pointer(&buf[0])), nil +} + +func SysctlRaw(name string, args ...int) ([]byte, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return nil, err + } + + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Read into buffer of that size. + buf := make([]byte, n) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return nil, err + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n], nil +} + +//sys utimes(path string, timeval *[2]Timeval) (err error) + +func Utimes(path string, tv []Timeval) error { + if tv == nil { + return utimes(path, nil) + } + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNano(path string, ts []Timespec) error { + if ts == nil { + err := utimensat(AT_FDCWD, path, nil, 0) + if err != ENOSYS { + return err + } + return utimes(path, nil) + } + if len(ts) != 2 { + return EINVAL + } + // Darwin setattrlist can set nanosecond timestamps + err := setattrlistTimes(path, ts, 0) + if err != ENOSYS { + return err + } + err = utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) + if err != ENOSYS { + return err + } + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := [2]Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + err := setattrlistTimes(path, ts, flags) + if err != ENOSYS { + return err + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +//sys futimes(fd int, timeval *[2]Timeval) (err error) + +func Futimes(fd int, tv []Timeval) error { + if tv == nil { + return futimes(fd, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + +// TODO: wrap +// Acct(name nil-string) (err error) +// Gethostuuid(uuid *byte, timeout *Timespec) (err error) +// Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys Madvise(b []byte, behav int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Msync(b []byte, flags int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go new file mode 100644 index 0000000..6c4e2ac --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package unix_test + +import ( + "os/exec" + "runtime" + "testing" + "time" + + "golang.org/x/sys/unix" +) + +const MNT_WAIT = 1 +const MNT_NOWAIT = 2 + +func TestGetfsstat(t *testing.T) { + const flags = MNT_NOWAIT // see golang.org/issue/16937 + n, err := unix.Getfsstat(nil, flags) + if err != nil { + t.Fatal(err) + } + + data := make([]unix.Statfs_t, n) + n2, err := unix.Getfsstat(data, flags) + if err != nil { + t.Fatal(err) + } + if n != n2 { + t.Errorf("Getfsstat(nil) = %d, but subsequent Getfsstat(slice) = %d", n, n2) + } + for i, stat := range data { + if stat == (unix.Statfs_t{}) { + t.Errorf("index %v is an empty Statfs_t struct", i) + } + } + if t.Failed() { + for i, stat := range data[:n2] { + t.Logf("data[%v] = %+v", i, stat) + } + mount, err := exec.Command("mount").CombinedOutput() + if err != nil { + t.Logf("mount: %v\n%s", err, mount) + } else { + t.Logf("mount: %s", mount) + } + } +} + +func TestSelect(t *testing.T) { + err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0}) + if err != nil { + t.Fatalf("Select: %v", err) + } + + dur := 250 * time.Millisecond + tv := unix.NsecToTimeval(int64(dur)) + start := time.Now() + err = unix.Select(0, nil, nil, nil, &tv) + took := time.Since(start) + if err != nil { + t.Fatalf("Select: %v", err) + } + + // On some BSDs the actual timeout might also be slightly less than the requested. + // Add an acceptable margin to avoid flaky tests. + if took < dur*2/3 { + t.Errorf("Select: timeout should have been at least %v, got %v", dur, took) + } +} + +func TestSysctlRaw(t *testing.T) { + if runtime.GOOS == "openbsd" { + t.Skip("kern.proc.pid does not exist on OpenBSD") + } + + _, err := unix.SysctlRaw("kern.proc.pid", unix.Getpid()) + if err != nil { + t.Fatal(err) + } +} + +func TestSysctlUint32(t *testing.T) { + maxproc, err := unix.SysctlUint32("kern.maxproc") + if err != nil { + t.Fatal(err) + } + t.Logf("kern.maxproc: %v", maxproc) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go new file mode 100644 index 0000000..006e21f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -0,0 +1,602 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Darwin system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import ( + errorspkg "errors" + "syscall" + "unsafe" +) + +const ImplementsGetwd = true + +func Getwd() (string, error) { + buf := make([]byte, 2048) + attrs, err := getAttrList(".", attrList{CommonAttr: attrCmnFullpath}, buf, 0) + if err == nil && len(attrs) == 1 && len(attrs[0]) >= 2 { + wd := string(attrs[0]) + // Sanity check that it's an absolute path and ends + // in a null byte, which we then strip. + if wd[0] == '/' && wd[len(wd)-1] == 0 { + return wd[:len(wd)-1], nil + } + } + // If pkg/os/getwd.go gets ENOTSUP, it will fall back to the + // slow algorithm. + return "", ENOTSUP +} + +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + raw RawSockaddrDatalink +} + +// Translate "kern.hostname" to []_C_int{0,1,2,3}. +func nametomib(name string) (mib []_C_int, err error) { + const siz = unsafe.Sizeof(mib[0]) + + // NOTE(rsc): It seems strange to set the buffer to have + // size CTL_MAXNAME+2 but use only CTL_MAXNAME + // as the size. I don't know why the +2 is here, but the + // kernel uses +2 for its own implementation of this function. + // I am scared that if we don't include the +2 here, the kernel + // will silently write 2 words farther than we specify + // and we'll get memory corruption. + var buf [CTL_MAXNAME + 2]_C_int + n := uintptr(CTL_MAXNAME) * siz + + p := (*byte)(unsafe.Pointer(&buf[0])) + bytes, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + + // Magic sysctl: "setting" 0.3 to a string name + // lets you read back the array of integers form. + if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { + return nil, err + } + return buf[0 : n/siz], nil +} + +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } +func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } + +const ( + attrBitMapCount = 5 + attrCmnFullpath = 0x08000000 +) + +type attrList struct { + bitmapCount uint16 + _ uint16 + CommonAttr uint32 + VolAttr uint32 + DirAttr uint32 + FileAttr uint32 + Forkattr uint32 +} + +func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) { + if len(attrBuf) < 4 { + return nil, errorspkg.New("attrBuf too small") + } + attrList.bitmapCount = attrBitMapCount + + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return nil, err + } + + _, _, e1 := Syscall6( + SYS_GETATTRLIST, + uintptr(unsafe.Pointer(_p0)), + uintptr(unsafe.Pointer(&attrList)), + uintptr(unsafe.Pointer(&attrBuf[0])), + uintptr(len(attrBuf)), + uintptr(options), + 0, + ) + if e1 != 0 { + return nil, e1 + } + size := *(*uint32)(unsafe.Pointer(&attrBuf[0])) + + // dat is the section of attrBuf that contains valid data, + // without the 4 byte length header. All attribute offsets + // are relative to dat. + dat := attrBuf + if int(size) < len(attrBuf) { + dat = dat[:size] + } + dat = dat[4:] // remove length prefix + + for i := uint32(0); int(i) < len(dat); { + header := dat[i:] + if len(header) < 8 { + return attrs, errorspkg.New("truncated attribute header") + } + datOff := *(*int32)(unsafe.Pointer(&header[0])) + attrLen := *(*uint32)(unsafe.Pointer(&header[4])) + if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) { + return attrs, errorspkg.New("truncated results; attrBuf too small") + } + end := uint32(datOff) + attrLen + attrs = append(attrs, dat[datOff:end]) + i = end + if r := i % 4; r != 0 { + i += (4 - r) + } + } + return +} + +//sysnb pipe() (r int, w int, err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func setattrlistTimes(path string, times []Timespec, flags int) error { + _p0, err := BytePtrFromString(path) + if err != nil { + return err + } + + var attrList attrList + attrList.bitmapCount = ATTR_BIT_MAP_COUNT + attrList.CommonAttr = ATTR_CMN_MODTIME | ATTR_CMN_ACCTIME + + // order is mtime, atime: the opposite of Chtimes + attributes := [2]Timespec{times[1], times[0]} + options := 0 + if flags&AT_SYMLINK_NOFOLLOW != 0 { + options |= FSOPT_NOFOLLOW + } + _, _, e1 := Syscall6( + SYS_SETATTRLIST, + uintptr(unsafe.Pointer(_p0)), + uintptr(unsafe.Pointer(&attrList)), + uintptr(unsafe.Pointer(&attributes)), + uintptr(unsafe.Sizeof(attributes)), + uintptr(options), + 0, + ) + if e1 != 0 { + return e1 + } + return nil +} + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { + // Darwin doesn't support SYS_UTIMENSAT + return ENOSYS +} + +/* + * Wrapped + */ + +//sys kill(pid int, signum int, posix int) (err error) + +func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req uint, value *Termios) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + return err + } + + return nil +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exchangedata(path1 string, path2 string, options int) (err error) +//sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 +//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 +//sys Getdtablesize() (size int) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Getuid() (uid int) +//sysnb Issetugid() (tainted bool) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Renameat(fromfd int, from string, tofd int, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sys Setprivexec(flag int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 +//sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Undelete(path string) (err error) +//sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE + +/* + * Unimplemented + */ +// Profil +// Sigaction +// Sigprocmask +// Getlogin +// Sigpending +// Sigaltstack +// Ioctl +// Reboot +// Execve +// Vfork +// Sbrk +// Sstk +// Ovadvise +// Mincore +// Setitimer +// Swapon +// Select +// Sigsuspend +// Readv +// Writev +// Nfssvc +// Getfh +// Quotactl +// Mount +// Csops +// Waitid +// Add_profil +// Kdebug_trace +// Sigreturn +// Atsocket +// Kqueue_from_portset_np +// Kqueue_portset +// Getattrlist +// Setattrlist +// Getdirentriesattr +// Searchfs +// Delete +// Copyfile +// Watchevent +// Waitevent +// Modwatch +// Getxattr +// Fgetxattr +// Setxattr +// Fsetxattr +// Removexattr +// Fremovexattr +// Listxattr +// Flistxattr +// Fsctl +// Initgroups +// Posix_spawn +// Nfsclnt +// Fhopen +// Minherit +// Semsys +// Msgsys +// Shmsys +// Semctl +// Semget +// Semop +// Msgctl +// Msgget +// Msgsnd +// Msgrcv +// Shmat +// Shmctl +// Shmdt +// Shmget +// Shm_open +// Shm_unlink +// Sem_open +// Sem_close +// Sem_unlink +// Sem_wait +// Sem_trywait +// Sem_post +// Sem_getvalue +// Sem_init +// Sem_destroy +// Open_extended +// Umask_extended +// Stat_extended +// Lstat_extended +// Fstat_extended +// Chmod_extended +// Fchmod_extended +// Access_extended +// Settid +// Gettid +// Setsgroups +// Getsgroups +// Setwgroups +// Getwgroups +// Mkfifo_extended +// Mkdir_extended +// Identitysvc +// Shared_region_check_np +// Shared_region_map_np +// __pthread_mutex_destroy +// __pthread_mutex_init +// __pthread_mutex_lock +// __pthread_mutex_trylock +// __pthread_mutex_unlock +// __pthread_cond_init +// __pthread_cond_destroy +// __pthread_cond_broadcast +// __pthread_cond_signal +// Setsid_with_pid +// __pthread_cond_timedwait +// Aio_fsync +// Aio_return +// Aio_suspend +// Aio_cancel +// Aio_error +// Aio_read +// Aio_write +// Lio_listio +// __pthread_cond_wait +// Iopolicysys +// __pthread_kill +// __pthread_sigmask +// __sigwait +// __disable_threadsignal +// __pthread_markcancel +// __pthread_canceled +// __semwait_signal +// Proc_info +// sendfile +// Stat64_extended +// Lstat64_extended +// Fstat64_extended +// __pthread_chdir +// __pthread_fchdir +// Audit +// Auditon +// Getauid +// Setauid +// Getaudit +// Setaudit +// Getaudit_addr +// Setaudit_addr +// Auditctl +// Bsdthread_create +// Bsdthread_terminate +// Stack_snapshot +// Bsdthread_register +// Workq_open +// Workq_ops +// __mac_execve +// __mac_syscall +// __mac_get_file +// __mac_set_file +// __mac_get_link +// __mac_set_link +// __mac_get_proc +// __mac_set_proc +// __mac_get_fd +// __mac_set_fd +// __mac_get_pid +// __mac_get_lcid +// __mac_get_lctx +// __mac_set_lctx +// Setlcid +// Read_nocancel +// Write_nocancel +// Open_nocancel +// Close_nocancel +// Wait4_nocancel +// Recvmsg_nocancel +// Sendmsg_nocancel +// Recvfrom_nocancel +// Accept_nocancel +// Fcntl_nocancel +// Select_nocancel +// Fsync_nocancel +// Connect_nocancel +// Sigsuspend_nocancel +// Readv_nocancel +// Writev_nocancel +// Sendto_nocancel +// Pread_nocancel +// Pwrite_nocancel +// Waitid_nocancel +// Poll_nocancel +// Msgsnd_nocancel +// Msgrcv_nocancel +// Sem_wait_nocancel +// Aio_suspend_nocancel +// __sigwait_nocancel +// __semwait_signal_nocancel +// __mac_mount +// __mac_get_mount +// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go new file mode 100644 index 0000000..b3ac109 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -0,0 +1,68 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = int32(sec) + tv.Usec = int32(usec) + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of darwin/386 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go new file mode 100644 index 0000000..7521944 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -0,0 +1,68 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = sec + tv.Usec = usec + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of darwin/amd64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go new file mode 100644 index 0000000..faae207 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -0,0 +1,66 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = int32(sec) + tv.Usec = int32(usec) + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of darwin/arm the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go new file mode 100644 index 0000000..d6d9628 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -0,0 +1,68 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,darwin + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) +func Gettimeofday(tv *Timeval) (err error) { + // The tv passed to gettimeofday must be non-nil + // but is otherwise unused. The answers come back + // in the two registers. + sec, usec, err := gettimeofday(tv) + tv.Sec = sec + tv.Usec = usec + return err +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var length = uint64(count) + + _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) + + written = int(length) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of darwin/arm64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go new file mode 100644 index 0000000..32869af --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -0,0 +1,522 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// DragonFly BSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import "unsafe" + +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + Rcf uint16 + Route [16]uint16 + raw RawSockaddrDatalink +} + +// Translate "kern.hostname" to []_C_int{0,1,2,3}. +func nametomib(name string) (mib []_C_int, err error) { + const siz = unsafe.Sizeof(mib[0]) + + // NOTE(rsc): It seems strange to set the buffer to have + // size CTL_MAXNAME+2 but use only CTL_MAXNAME + // as the size. I don't know why the +2 is here, but the + // kernel uses +2 for its own implementation of this function. + // I am scared that if we don't include the +2 here, the kernel + // will silently write 2 words farther than we specify + // and we'll get memory corruption. + var buf [CTL_MAXNAME + 2]_C_int + n := uintptr(CTL_MAXNAME) * siz + + p := (*byte)(unsafe.Pointer(&buf[0])) + bytes, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + + // Magic sysctl: "setting" 0.3 to a string name + // lets you read back the array of integers form. + if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { + return nil, err + } + return buf[0 : n/siz], nil +} + +//sysnb pipe() (r int, w int, err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) +func Pread(fd int, p []byte, offset int64) (n int, err error) { + return extpread(fd, p, 0, offset) +} + +//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + return extpwrite(fd, p, 0, offset) +} + +func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func setattrlistTimes(path string, times []Timespec, flags int) error { + // used on Darwin for UtimesNano + return ENOSYS +} + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req uint, value *Termios) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { + err := sysctl(mib, old, oldlen, nil, 0) + if err != nil { + // Utsname members on Dragonfly are only 32 bytes and + // the syscall returns ENOMEM in case the actual value + // is longer. + if err == ENOMEM { + err = nil + } + } + return err +} + +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctlUname(mib, &uname.Sysname[0], &n); err != nil { + return err + } + uname.Sysname[unsafe.Sizeof(uname.Sysname)-1] = 0 + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctlUname(mib, &uname.Nodename[0], &n); err != nil { + return err + } + uname.Nodename[unsafe.Sizeof(uname.Nodename)-1] = 0 + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctlUname(mib, &uname.Release[0], &n); err != nil { + return err + } + uname.Release[unsafe.Sizeof(uname.Release)-1] = 0 + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctlUname(mib, &uname.Version[0], &n); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctlUname(mib, &uname.Machine[0], &n); err != nil { + return err + } + uname.Machine[unsafe.Sizeof(uname.Machine)-1] = 0 + + return nil +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys Getdtablesize() (size int) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Undelete(path string) (err error) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE +//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) + +/* + * Unimplemented + * TODO(jsing): Update this list for DragonFly. + */ +// Profil +// Sigaction +// Sigprocmask +// Getlogin +// Sigpending +// Sigaltstack +// Reboot +// Execve +// Vfork +// Sbrk +// Sstk +// Ovadvise +// Mincore +// Setitimer +// Swapon +// Select +// Sigsuspend +// Readv +// Writev +// Nfssvc +// Getfh +// Quotactl +// Mount +// Csops +// Waitid +// Add_profil +// Kdebug_trace +// Sigreturn +// Atsocket +// Kqueue_from_portset_np +// Kqueue_portset +// Getattrlist +// Setattrlist +// Getdirentriesattr +// Searchfs +// Delete +// Copyfile +// Watchevent +// Waitevent +// Modwatch +// Getxattr +// Fgetxattr +// Setxattr +// Fsetxattr +// Removexattr +// Fremovexattr +// Listxattr +// Flistxattr +// Fsctl +// Initgroups +// Posix_spawn +// Nfsclnt +// Fhopen +// Minherit +// Semsys +// Msgsys +// Shmsys +// Semctl +// Semget +// Semop +// Msgctl +// Msgget +// Msgsnd +// Msgrcv +// Shmat +// Shmctl +// Shmdt +// Shmget +// Shm_open +// Shm_unlink +// Sem_open +// Sem_close +// Sem_unlink +// Sem_wait +// Sem_trywait +// Sem_post +// Sem_getvalue +// Sem_init +// Sem_destroy +// Open_extended +// Umask_extended +// Stat_extended +// Lstat_extended +// Fstat_extended +// Chmod_extended +// Fchmod_extended +// Access_extended +// Settid +// Gettid +// Setsgroups +// Getsgroups +// Setwgroups +// Getwgroups +// Mkfifo_extended +// Mkdir_extended +// Identitysvc +// Shared_region_check_np +// Shared_region_map_np +// __pthread_mutex_destroy +// __pthread_mutex_init +// __pthread_mutex_lock +// __pthread_mutex_trylock +// __pthread_mutex_unlock +// __pthread_cond_init +// __pthread_cond_destroy +// __pthread_cond_broadcast +// __pthread_cond_signal +// Setsid_with_pid +// __pthread_cond_timedwait +// Aio_fsync +// Aio_return +// Aio_suspend +// Aio_cancel +// Aio_error +// Aio_read +// Aio_write +// Lio_listio +// __pthread_cond_wait +// Iopolicysys +// __pthread_kill +// __pthread_sigmask +// __sigwait +// __disable_threadsignal +// __pthread_markcancel +// __pthread_canceled +// __semwait_signal +// Proc_info +// Stat64_extended +// Lstat64_extended +// Fstat64_extended +// __pthread_chdir +// __pthread_fchdir +// Audit +// Auditon +// Getauid +// Setauid +// Getaudit +// Setaudit +// Getaudit_addr +// Setaudit_addr +// Auditctl +// Bsdthread_create +// Bsdthread_terminate +// Stack_snapshot +// Bsdthread_register +// Workq_open +// Workq_ops +// __mac_execve +// __mac_syscall +// __mac_get_file +// __mac_set_file +// __mac_get_link +// __mac_set_link +// __mac_get_proc +// __mac_set_proc +// __mac_get_fd +// __mac_set_fd +// __mac_get_pid +// __mac_get_lcid +// __mac_get_lctx +// __mac_set_lctx +// Setlcid +// Read_nocancel +// Write_nocancel +// Open_nocancel +// Close_nocancel +// Wait4_nocancel +// Recvmsg_nocancel +// Sendmsg_nocancel +// Recvfrom_nocancel +// Accept_nocancel +// Fcntl_nocancel +// Select_nocancel +// Fsync_nocancel +// Connect_nocancel +// Sigsuspend_nocancel +// Readv_nocancel +// Writev_nocancel +// Sendto_nocancel +// Pread_nocancel +// Pwrite_nocancel +// Waitid_nocancel +// Msgsnd_nocancel +// Msgrcv_nocancel +// Sem_wait_nocancel +// Aio_suspend_nocancel +// __sigwait_nocancel +// __semwait_signal_nocancel +// __mac_mount +// __mac_get_mount +// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go new file mode 100644 index 0000000..9babb31 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -0,0 +1,52 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,dragonfly + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go new file mode 100644 index 0000000..7986985 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -0,0 +1,760 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// FreeBSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import "unsafe" + +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 + raw RawSockaddrDatalink +} + +// Translate "kern.hostname" to []_C_int{0,1,2,3}. +func nametomib(name string) (mib []_C_int, err error) { + const siz = unsafe.Sizeof(mib[0]) + + // NOTE(rsc): It seems strange to set the buffer to have + // size CTL_MAXNAME+2 but use only CTL_MAXNAME + // as the size. I don't know why the +2 is here, but the + // kernel uses +2 for its own implementation of this function. + // I am scared that if we don't include the +2 here, the kernel + // will silently write 2 words farther than we specify + // and we'll get memory corruption. + var buf [CTL_MAXNAME + 2]_C_int + n := uintptr(CTL_MAXNAME) * siz + + p := (*byte)(unsafe.Pointer(&buf[0])) + bytes, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + + // Magic sysctl: "setting" 0.3 to a string name + // lets you read back the array of integers form. + if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { + return nil, err + } + return buf[0 : n/siz], nil +} + +//sysnb pipe() (r int, w int, err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { + var value IPMreqn + vallen := _Socklen(SizeofIPMreqn) + errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, errno +} + +func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) +} + +func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func setattrlistTimes(path string, times []Timespec, flags int) error { + // used on Darwin for UtimesNano + return ENOSYS +} + +// Derive extattr namespace and attribute name + +func xattrnamespace(fullattr string) (ns int, attr string, err error) { + s := -1 + for idx, val := range fullattr { + if val == '.' { + s = idx + break + } + } + + if s == -1 { + return -1, "", ENOATTR + } + + namespace := fullattr[0:s] + attr = fullattr[s+1:] + + switch namespace { + case "user": + return EXTATTR_NAMESPACE_USER, attr, nil + case "system": + return EXTATTR_NAMESPACE_SYSTEM, attr, nil + default: + return -1, "", ENOATTR + } +} + +func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { + if len(dest) > idx { + return unsafe.Pointer(&dest[idx]) + } else { + return unsafe.Pointer(_zero) + } +} + +// FreeBSD implements its own syscalls to handle extended attributes + +func Getxattr(file string, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetFile(file, nsid, a, uintptr(d), destsize) +} + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetFd(fd, nsid, a, uintptr(d), destsize) +} + +func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetLink(link, nsid, a, uintptr(d), destsize) +} + +// flags are unused on FreeBSD + +func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetFd(fd, nsid, a, uintptr(d), datasiz) + return +} + +func Setxattr(file string, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetFile(file, nsid, a, uintptr(d), datasiz) + return +} + +func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetLink(link, nsid, a, uintptr(d), datasiz) + return +} + +func Removexattr(file string, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteFile(file, nsid, a) + return +} + +func Fremovexattr(fd int, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteFd(fd, nsid, a) + return +} + +func Lremovexattr(link string, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteLink(link, nsid, a) + return +} + +func Listxattr(file string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + // FreeBSD won't allow you to list xattrs from multiple namespaces + s := 0 + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + + /* Errors accessing system attrs are ignored so that + * we can implement the Linux-like behavior of omitting errors that + * we don't have read permissions on + * + * Linux will still error if we ask for user attributes on a file that + * we don't have read permissions on, so don't ignore those errors + */ + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, nil +} + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s := 0 + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, nil +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s := 0 + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, nil +} + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req uint, value *Termios) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + return err + } + + return nil +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys CapEnter() (err error) +//sys capRightsGet(version int, fd int, rightsp *CapRights) (err error) = SYS___CAP_RIGHTS_GET +//sys capRightsLimit(fd int, rightsp *CapRights) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) +//sys ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) +//sys ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) +//sys ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getdents(fd int, buf []byte) (n int, err error) +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys Getdtablesize() (size int) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Renameat(fromfd int, from string, tofd int, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Undelete(path string) (err error) +//sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE +//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) + +/* + * Unimplemented + */ +// Profil +// Sigaction +// Sigprocmask +// Getlogin +// Sigpending +// Sigaltstack +// Ioctl +// Reboot +// Execve +// Vfork +// Sbrk +// Sstk +// Ovadvise +// Mincore +// Setitimer +// Swapon +// Select +// Sigsuspend +// Readv +// Writev +// Nfssvc +// Getfh +// Quotactl +// Mount +// Csops +// Waitid +// Add_profil +// Kdebug_trace +// Sigreturn +// Atsocket +// Kqueue_from_portset_np +// Kqueue_portset +// Getattrlist +// Setattrlist +// Getdirentriesattr +// Searchfs +// Delete +// Copyfile +// Watchevent +// Waitevent +// Modwatch +// Getxattr +// Fgetxattr +// Setxattr +// Fsetxattr +// Removexattr +// Fremovexattr +// Listxattr +// Flistxattr +// Fsctl +// Initgroups +// Posix_spawn +// Nfsclnt +// Fhopen +// Minherit +// Semsys +// Msgsys +// Shmsys +// Semctl +// Semget +// Semop +// Msgctl +// Msgget +// Msgsnd +// Msgrcv +// Shmat +// Shmctl +// Shmdt +// Shmget +// Shm_open +// Shm_unlink +// Sem_open +// Sem_close +// Sem_unlink +// Sem_wait +// Sem_trywait +// Sem_post +// Sem_getvalue +// Sem_init +// Sem_destroy +// Open_extended +// Umask_extended +// Stat_extended +// Lstat_extended +// Fstat_extended +// Chmod_extended +// Fchmod_extended +// Access_extended +// Settid +// Gettid +// Setsgroups +// Getsgroups +// Setwgroups +// Getwgroups +// Mkfifo_extended +// Mkdir_extended +// Identitysvc +// Shared_region_check_np +// Shared_region_map_np +// __pthread_mutex_destroy +// __pthread_mutex_init +// __pthread_mutex_lock +// __pthread_mutex_trylock +// __pthread_mutex_unlock +// __pthread_cond_init +// __pthread_cond_destroy +// __pthread_cond_broadcast +// __pthread_cond_signal +// Setsid_with_pid +// __pthread_cond_timedwait +// Aio_fsync +// Aio_return +// Aio_suspend +// Aio_cancel +// Aio_error +// Aio_read +// Aio_write +// Lio_listio +// __pthread_cond_wait +// Iopolicysys +// __pthread_kill +// __pthread_sigmask +// __sigwait +// __disable_threadsignal +// __pthread_markcancel +// __pthread_canceled +// __semwait_signal +// Proc_info +// Stat64_extended +// Lstat64_extended +// Fstat64_extended +// __pthread_chdir +// __pthread_fchdir +// Audit +// Auditon +// Getauid +// Setauid +// Getaudit +// Setaudit +// Getaudit_addr +// Setaudit_addr +// Auditctl +// Bsdthread_create +// Bsdthread_terminate +// Stack_snapshot +// Bsdthread_register +// Workq_open +// Workq_ops +// __mac_execve +// __mac_syscall +// __mac_get_file +// __mac_set_file +// __mac_get_link +// __mac_set_link +// __mac_get_proc +// __mac_set_proc +// __mac_get_fd +// __mac_set_fd +// __mac_get_pid +// __mac_get_lcid +// __mac_get_lctx +// __mac_set_lctx +// Setlcid +// Read_nocancel +// Write_nocancel +// Open_nocancel +// Close_nocancel +// Wait4_nocancel +// Recvmsg_nocancel +// Sendmsg_nocancel +// Recvfrom_nocancel +// Accept_nocancel +// Fcntl_nocancel +// Select_nocancel +// Fsync_nocancel +// Connect_nocancel +// Sigsuspend_nocancel +// Readv_nocancel +// Writev_nocancel +// Sendto_nocancel +// Pread_nocancel +// Pwrite_nocancel +// Waitid_nocancel +// Poll_nocancel +// Msgsnd_nocancel +// Msgrcv_nocancel +// Sem_wait_nocancel +// Aio_suspend_nocancel +// __sigwait_nocancel +// __semwait_signal_nocancel +// __mac_mount +// __mac_get_mount +// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go new file mode 100644 index 0000000..21e0395 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -0,0 +1,52 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go new file mode 100644 index 0000000..9c945a6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -0,0 +1,52 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go new file mode 100644 index 0000000..5cd6243 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -0,0 +1,52 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go new file mode 100644 index 0000000..654439e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go @@ -0,0 +1,297 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd + +package unix_test + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "testing" + + "golang.org/x/sys/unix" +) + +func TestSysctlUint64(t *testing.T) { + _, err := unix.SysctlUint64("vm.swap_total") + if err != nil { + t.Fatal(err) + } +} + +// FIXME: Infrastructure for launching tests in subprocesses stolen from openbsd_test.go - refactor? +// testCmd generates a proper command that, when executed, runs the test +// corresponding to the given key. + +type testProc struct { + fn func() // should always exit instead of returning + arg func(t *testing.T) string // generate argument for test + cleanup func(arg string) error // for instance, delete coredumps from testing pledge + success bool // whether zero-exit means success or failure +} + +var ( + testProcs = map[string]testProc{} + procName = "" + procArg = "" +) + +const ( + optName = "sys-unix-internal-procname" + optArg = "sys-unix-internal-arg" +) + +func init() { + flag.StringVar(&procName, optName, "", "internal use only") + flag.StringVar(&procArg, optArg, "", "internal use only") + +} + +func testCmd(procName string, procArg string) (*exec.Cmd, error) { + exe, err := filepath.Abs(os.Args[0]) + if err != nil { + return nil, err + } + cmd := exec.Command(exe, "-"+optName+"="+procName, "-"+optArg+"="+procArg) + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + return cmd, nil +} + +// ExitsCorrectly is a comprehensive, one-line-of-use wrapper for testing +// a testProc with a key. +func ExitsCorrectly(t *testing.T, procName string) { + s := testProcs[procName] + arg := "-" + if s.arg != nil { + arg = s.arg(t) + } + c, err := testCmd(procName, arg) + defer func(arg string) { + if err := s.cleanup(arg); err != nil { + t.Fatalf("Failed to run cleanup for %s %s %#v", procName, err, err) + } + }(arg) + if err != nil { + t.Fatalf("Failed to construct command for %s", procName) + } + if (c.Run() == nil) != s.success { + result := "succeed" + if !s.success { + result = "fail" + } + t.Fatalf("Process did not %s when it was supposed to", result) + } +} + +func TestMain(m *testing.M) { + flag.Parse() + if procName != "" { + t := testProcs[procName] + t.fn() + os.Stderr.WriteString("test function did not exit\n") + if t.success { + os.Exit(1) + } else { + os.Exit(0) + } + } + os.Exit(m.Run()) +} + +// end of infrastructure + +const testfile = "gocapmodetest" +const testfile2 = testfile + "2" + +func CapEnterTest() { + _, err := os.OpenFile(path.Join(procArg, testfile), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + panic(fmt.Sprintf("OpenFile: %s", err)) + } + + err = unix.CapEnter() + if err != nil { + panic(fmt.Sprintf("CapEnter: %s", err)) + } + + _, err = os.OpenFile(path.Join(procArg, testfile2), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err == nil { + panic("OpenFile works!") + } + if err.(*os.PathError).Err != unix.ECAPMODE { + panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err)) + } + os.Exit(0) +} + +func makeTempDir(t *testing.T) string { + d, err := ioutil.TempDir("", "go_openat_test") + if err != nil { + t.Fatalf("TempDir failed: %s", err) + } + return d +} + +func removeTempDir(arg string) error { + err := os.RemoveAll(arg) + if err != nil && err.(*os.PathError).Err == unix.ENOENT { + return nil + } + return err +} + +func init() { + testProcs["cap_enter"] = testProc{ + CapEnterTest, + makeTempDir, + removeTempDir, + true, + } +} + +func TestCapEnter(t *testing.T) { + if runtime.GOARCH != "amd64" { + t.Skipf("skipping test on %s", runtime.GOARCH) + } + ExitsCorrectly(t, "cap_enter") +} + +func OpenatTest() { + f, err := os.Open(procArg) + if err != nil { + panic(err) + } + + err = unix.CapEnter() + if err != nil { + panic(fmt.Sprintf("CapEnter: %s", err)) + } + + fxx, err := unix.Openat(int(f.Fd()), "xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + panic(err) + } + unix.Close(fxx) + + // The right to open BASE/xx is not ambient + _, err = os.OpenFile(procArg+"/xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err == nil { + panic("OpenFile succeeded") + } + if err.(*os.PathError).Err != unix.ECAPMODE { + panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err)) + } + + // Can't make a new directory either + err = os.Mkdir(procArg+"2", 0777) + if err == nil { + panic("MKdir succeeded") + } + if err.(*os.PathError).Err != unix.ECAPMODE { + panic(fmt.Sprintf("Mkdir failed wrong: %s %#v", err, err)) + } + + // Remove all caps except read and lookup. + r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_LOOKUP}) + if err != nil { + panic(fmt.Sprintf("CapRightsInit failed: %s %#v", err, err)) + } + err = unix.CapRightsLimit(f.Fd(), r) + if err != nil { + panic(fmt.Sprintf("CapRightsLimit failed: %s %#v", err, err)) + } + + // Check we can get the rights back again + r, err = unix.CapRightsGet(f.Fd()) + if err != nil { + panic(fmt.Sprintf("CapRightsGet failed: %s %#v", err, err)) + } + b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP}) + if err != nil { + panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err)) + } + if !b { + panic(fmt.Sprintf("Unexpected rights")) + } + b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP, unix.CAP_WRITE}) + if err != nil { + panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err)) + } + if b { + panic(fmt.Sprintf("Unexpected rights (2)")) + } + + // Can no longer create a file + _, err = unix.Openat(int(f.Fd()), "xx2", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err == nil { + panic("Openat succeeded") + } + if err != unix.ENOTCAPABLE { + panic(fmt.Sprintf("OpenFileAt failed wrong: %s %#v", err, err)) + } + + // But can read an existing one + _, err = unix.Openat(int(f.Fd()), "xx", os.O_RDONLY, 0666) + if err != nil { + panic(fmt.Sprintf("Openat failed: %s %#v", err, err)) + } + + os.Exit(0) +} + +func init() { + testProcs["openat"] = testProc{ + OpenatTest, + makeTempDir, + removeTempDir, + true, + } +} + +func TestOpenat(t *testing.T) { + if runtime.GOARCH != "amd64" { + t.Skipf("skipping test on %s", runtime.GOARCH) + } + ExitsCorrectly(t, "openat") +} + +func TestCapRightsSetAndClear(t *testing.T) { + r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT}) + if err != nil { + t.Fatalf("CapRightsInit failed: %s", err) + } + + err = unix.CapRightsSet(r, []uint64{unix.CAP_EVENT, unix.CAP_LISTEN}) + if err != nil { + t.Fatalf("CapRightsSet failed: %s", err) + } + + b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT, unix.CAP_EVENT, unix.CAP_LISTEN}) + if err != nil { + t.Fatalf("CapRightsIsSet failed: %s", err) + } + if !b { + t.Fatalf("Wrong rights set") + } + + err = unix.CapRightsClear(r, []uint64{unix.CAP_READ, unix.CAP_PDWAIT}) + if err != nil { + t.Fatalf("CapRightsClear failed: %s", err) + } + + b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_WRITE, unix.CAP_EVENT, unix.CAP_LISTEN}) + if err != nil { + t.Fatalf("CapRightsIsSet failed: %s", err) + } + if !b { + t.Fatalf("Wrong rights set") + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go new file mode 100644 index 0000000..76cf81f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -0,0 +1,1503 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Linux system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and +// wrap it in our own nicer implementation. + +package unix + +import ( + "syscall" + "unsafe" +) + +/* + * Wrapped + */ + +func Access(path string, mode uint32) (err error) { + return Faccessat(AT_FDCWD, path, mode, 0) +} + +func Chmod(path string, mode uint32) (err error) { + return Fchmodat(AT_FDCWD, path, mode, 0) +} + +func Chown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, 0) +} + +func Creat(path string, mode uint32) (fd int, err error) { + return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) +} + +//sys fchmodat(dirfd int, path string, mode uint32) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior + // and check the flags. Otherwise the mode would be applied to the symlink + // destination which is not what the user expects. + if flags&^AT_SYMLINK_NOFOLLOW != 0 { + return EINVAL + } else if flags&AT_SYMLINK_NOFOLLOW != 0 { + return EOPNOTSUPP + } + return fchmodat(dirfd, path, mode) +} + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req uint, value *Termios) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) + +func Link(oldpath string, newpath string) (err error) { + return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0) +} + +func Mkdir(path string, mode uint32) (err error) { + return Mkdirat(AT_FDCWD, path, mode) +} + +func Mknod(path string, mode uint32, dev int) (err error) { + return Mknodat(AT_FDCWD, path, mode, dev) +} + +func Open(path string, mode int, perm uint32) (fd int, err error) { + return openat(AT_FDCWD, path, mode|O_LARGEFILE, perm) +} + +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + return openat(dirfd, path, flags|O_LARGEFILE, mode) +} + +//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) + +func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + if len(fds) == 0 { + return ppoll(nil, 0, timeout, sigmask) + } + return ppoll(&fds[0], len(fds), timeout, sigmask) +} + +//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) + +func Readlink(path string, buf []byte) (n int, err error) { + return Readlinkat(AT_FDCWD, path, buf) +} + +func Rename(oldpath string, newpath string) (err error) { + return Renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath) +} + +func Rmdir(path string) error { + return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR) +} + +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) + +func Symlink(oldpath string, newpath string) (err error) { + return Symlinkat(oldpath, AT_FDCWD, newpath) +} + +func Unlink(path string) error { + return Unlinkat(AT_FDCWD, path, 0) +} + +//sys Unlinkat(dirfd int, path string, flags int) (err error) + +//sys utimes(path string, times *[2]Timeval) (err error) + +func Utimes(path string, tv []Timeval) error { + if tv == nil { + err := utimensat(AT_FDCWD, path, nil, 0) + if err != ENOSYS { + return err + } + return utimes(path, nil) + } + if len(tv) != 2 { + return EINVAL + } + var ts [2]Timespec + ts[0] = NsecToTimespec(TimevalToNsec(tv[0])) + ts[1] = NsecToTimespec(TimevalToNsec(tv[1])) + err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) + if err != ENOSYS { + return err + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) + +func UtimesNano(path string, ts []Timespec) error { + if ts == nil { + err := utimensat(AT_FDCWD, path, nil, 0) + if err != ENOSYS { + return err + } + return utimes(path, nil) + } + if len(ts) != 2 { + return EINVAL + } + err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) + if err != ENOSYS { + return err + } + // If the utimensat syscall isn't available (utimensat was added to Linux + // in 2.6.22, Released, 8 July 2007) then fall back to utimes + var tv [2]Timeval + for i := 0; i < 2; i++ { + tv[i] = NsecToTimeval(TimespecToNsec(ts[i])) + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +//sys futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) + +func Futimesat(dirfd int, path string, tv []Timeval) error { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + if tv == nil { + return futimesat(dirfd, pathp, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func Futimes(fd int, tv []Timeval) (err error) { + // Believe it or not, this is the best we can do on Linux + // (and is what glibc does). + return Utimes("/proc/self/fd/"+itoa(fd), tv) +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) + +func Getwd() (wd string, err error) { + var buf [PathMax]byte + n, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + // Getcwd returns the number of bytes written to buf, including the NUL. + if n < 1 || n > len(buf) || buf[n-1] != 0 { + return "", EINVAL + } + return string(buf[0 : n-1]), nil +} + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 1<<16 on Linux. + if n < 0 || n > 1<<20 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +type WaitStatus uint32 + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. At least that's the idea. +// There are various irregularities. For example, the +// "continued" status is 0xFFFF, distinguishing itself +// from stopped via the core dump bit. + +const ( + mask = 0x7F + core = 0x80 + exited = 0x00 + stopped = 0x7F + shift = 8 +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited } + +func (w WaitStatus) Stopped() bool { return w&0xFF == stopped } + +func (w WaitStatus) Continued() bool { return w == 0xFFFF } + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) ExitStatus() int { + if !w.Exited() { + return -1 + } + return int(w>>shift) & 0xFF +} + +func (w WaitStatus) Signal() syscall.Signal { + if !w.Signaled() { + return -1 + } + return syscall.Signal(w & mask) +} + +func (w WaitStatus) StopSignal() syscall.Signal { + if !w.Stopped() { + return -1 + } + return syscall.Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { + if w.StopSignal() != SIGTRAP { + return -1 + } + return int(w>>shift) >> 8 +} + +//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + var status _C_int + wpid, err = wait4(pid, &status, options, rusage) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +func Mkfifo(path string, mode uint32) error { + return Mknod(path, mode|S_IFIFO, 0) +} + +func Mkfifoat(dirfd int, path string, mode uint32) error { + return Mknodat(dirfd, path, mode|S_IFIFO, 0) +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) { + return nil, 0, EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := _Socklen(2) + if n > 0 { + sl += _Socklen(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +// SockaddrLinklayer implements the Sockaddr interface for AF_PACKET type sockets. +type SockaddrLinklayer struct { + Protocol uint16 + Ifindex int + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]byte + raw RawSockaddrLinklayer +} + +func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff { + return nil, 0, EINVAL + } + sa.raw.Family = AF_PACKET + sa.raw.Protocol = sa.Protocol + sa.raw.Ifindex = int32(sa.Ifindex) + sa.raw.Hatype = sa.Hatype + sa.raw.Pkttype = sa.Pkttype + sa.raw.Halen = sa.Halen + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil +} + +// SockaddrNetlink implements the Sockaddr interface for AF_NETLINK type sockets. +type SockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 + raw RawSockaddrNetlink +} + +func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_NETLINK + sa.raw.Pad = sa.Pad + sa.raw.Pid = sa.Pid + sa.raw.Groups = sa.Groups + return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil +} + +// SockaddrHCI implements the Sockaddr interface for AF_BLUETOOTH type sockets +// using the HCI protocol. +type SockaddrHCI struct { + Dev uint16 + Channel uint16 + raw RawSockaddrHCI +} + +func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_BLUETOOTH + sa.raw.Dev = sa.Dev + sa.raw.Channel = sa.Channel + return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil +} + +// SockaddrL2 implements the Sockaddr interface for AF_BLUETOOTH type sockets +// using the L2CAP protocol. +type SockaddrL2 struct { + PSM uint16 + CID uint16 + Addr [6]uint8 + AddrType uint8 + raw RawSockaddrL2 +} + +func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_BLUETOOTH + psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) + psm[0] = byte(sa.PSM) + psm[1] = byte(sa.PSM >> 8) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] + } + cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) + cid[0] = byte(sa.CID) + cid[1] = byte(sa.CID >> 8) + sa.raw.Bdaddr_type = sa.AddrType + return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil +} + +// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. +// The RxID and TxID fields are used for transport protocol addressing in +// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with +// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning. +// +// The SockaddrCAN struct must be bound to the socket file descriptor +// using Bind before the CAN socket can be used. +// +// // Read one raw CAN frame +// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) +// addr := &SockaddrCAN{Ifindex: index} +// Bind(fd, addr) +// frame := make([]byte, 16) +// Read(fd, frame) +// +// The full SocketCAN documentation can be found in the linux kernel +// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt +type SockaddrCAN struct { + Ifindex int + RxID uint32 + TxID uint32 + raw RawSockaddrCAN +} + +func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff { + return nil, 0, EINVAL + } + sa.raw.Family = AF_CAN + sa.raw.Ifindex = int32(sa.Ifindex) + rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) + for i := 0; i < 4; i++ { + sa.raw.Addr[i] = rx[i] + } + tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) + for i := 0; i < 4; i++ { + sa.raw.Addr[i+4] = tx[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil +} + +// SockaddrALG implements the Sockaddr interface for AF_ALG type sockets. +// SockaddrALG enables userspace access to the Linux kernel's cryptography +// subsystem. The Type and Name fields specify which type of hash or cipher +// should be used with a given socket. +// +// To create a file descriptor that provides access to a hash or cipher, both +// Bind and Accept must be used. Once the setup process is complete, input +// data can be written to the socket, processed by the kernel, and then read +// back as hash output or ciphertext. +// +// Here is an example of using an AF_ALG socket with SHA1 hashing. +// The initial socket setup process is as follows: +// +// // Open a socket to perform SHA1 hashing. +// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) +// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} +// unix.Bind(fd, addr) +// // Note: unix.Accept does not work at this time; must invoke accept() +// // manually using unix.Syscall. +// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) +// +// Once a file descriptor has been returned from Accept, it may be used to +// perform SHA1 hashing. The descriptor is not safe for concurrent use, but +// may be re-used repeatedly with subsequent Write and Read operations. +// +// When hashing a small byte slice or string, a single Write and Read may +// be used: +// +// // Assume hashfd is already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash an input string and read the results. Each Write discards +// // previous hash state. Read always reads the current state. +// b := make([]byte, 20) +// for i := 0; i < 2; i++ { +// io.WriteString(hash, "Hello, world.") +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// } +// // Output: +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// +// For hashing larger byte slices, or byte streams such as those read from +// a file or socket, use Sendto with MSG_MORE to instruct the kernel to update +// the hash digest instead of creating a new one for a given chunk and finalizing it. +// +// // Assume hashfd and addr are already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash the contents of a file. +// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") +// b := make([]byte, 4096) +// for { +// n, err := f.Read(b) +// if err == io.EOF { +// break +// } +// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) +// } +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 +// +// For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. +type SockaddrALG struct { + Type string + Name string + Feature uint32 + Mask uint32 + raw RawSockaddrALG +} + +func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { + // Leave room for NUL byte terminator. + if len(sa.Type) > 13 { + return nil, 0, EINVAL + } + if len(sa.Name) > 63 { + return nil, 0, EINVAL + } + + sa.raw.Family = AF_ALG + sa.raw.Feat = sa.Feature + sa.raw.Mask = sa.Mask + + typ, err := ByteSliceFromString(sa.Type) + if err != nil { + return nil, 0, err + } + name, err := ByteSliceFromString(sa.Name) + if err != nil { + return nil, 0, err + } + + copy(sa.raw.Type[:], typ) + copy(sa.raw.Name[:], name) + + return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil +} + +// SockaddrVM implements the Sockaddr interface for AF_VSOCK type sockets. +// SockaddrVM provides access to Linux VM sockets: a mechanism that enables +// bidirectional communication between a hypervisor and its guest virtual +// machines. +type SockaddrVM struct { + // CID and Port specify a context ID and port address for a VM socket. + // Guests have a unique CID, and hosts may have a well-known CID of: + // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. + // - VMADDR_CID_HOST: refers to other processes on the host. + CID uint32 + Port uint32 + raw RawSockaddrVM +} + +func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_VSOCK + sa.raw.Port = sa.Port + sa.raw.Cid = sa.CID + + return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil +} + +func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_NETLINK: + pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa)) + sa := new(SockaddrNetlink) + sa.Family = pp.Family + sa.Pad = pp.Pad + sa.Pid = pp.Pid + sa.Groups = pp.Groups + return sa, nil + + case AF_PACKET: + pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa)) + sa := new(SockaddrLinklayer) + sa.Protocol = pp.Protocol + sa.Ifindex = int(pp.Ifindex) + sa.Hatype = pp.Hatype + sa.Pkttype = pp.Pkttype + sa.Halen = pp.Halen + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + if pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_VSOCK: + pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) + sa := &SockaddrVM{ + CID: pp.Cid, + Port: pp.Port, + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(&rsa) +} + +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { + var value IPMreqn + vallen := _Socklen(SizeofIPMreqn) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptUcred(fd, level, opt int) (*Ucred, error) { + var value Ucred + vallen := _Socklen(SizeofUcred) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { + var value TCPInfo + vallen := _Socklen(SizeofTCPInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + if err == ERANGE { + buf = make([]byte, vallen) + err = getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + } + if err != nil { + return "", err + } + } + return string(buf[:vallen-1]), nil +} + +func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) +} + +// Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) + +// KeyctlInt calls keyctl commands in which each argument is an int. +// These commands are KEYCTL_REVOKE, KEYCTL_CHOWN, KEYCTL_CLEAR, KEYCTL_LINK, +// KEYCTL_UNLINK, KEYCTL_NEGATE, KEYCTL_SET_REQKEY_KEYRING, KEYCTL_SET_TIMEOUT, +// KEYCTL_ASSUME_AUTHORITY, KEYCTL_SESSION_TO_PARENT, KEYCTL_REJECT, +// KEYCTL_INVALIDATE, and KEYCTL_GET_PERSISTENT. +//sys KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) = SYS_KEYCTL + +// KeyctlBuffer calls keyctl commands in which the third and fourth +// arguments are a buffer and its length, respectively. +// These commands are KEYCTL_UPDATE, KEYCTL_READ, and KEYCTL_INSTANTIATE. +//sys KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) = SYS_KEYCTL + +// KeyctlString calls keyctl commands which return a string. +// These commands are KEYCTL_DESCRIBE and KEYCTL_GET_SECURITY. +func KeyctlString(cmd int, id int) (string, error) { + // We must loop as the string data may change in between the syscalls. + // We could allocate a large buffer here to reduce the chance that the + // syscall needs to be called twice; however, this is unnecessary as + // the performance loss is negligible. + var buffer []byte + for { + // Try to fill the buffer with data + length, err := KeyctlBuffer(cmd, id, buffer, 0) + if err != nil { + return "", err + } + + // Check if the data was written + if length <= len(buffer) { + // Exclude the null terminator + return string(buffer[:length-1]), nil + } + + // Make a bigger buffer if needed + buffer = make([]byte, length) + } +} + +// Keyctl commands with special signatures. + +// KeyctlGetKeyringID implements the KEYCTL_GET_KEYRING_ID command. +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_get_keyring_ID.3.html +func KeyctlGetKeyringID(id int, create bool) (ringid int, err error) { + createInt := 0 + if create { + createInt = 1 + } + return KeyctlInt(KEYCTL_GET_KEYRING_ID, id, createInt, 0, 0) +} + +// KeyctlSetperm implements the KEYCTL_SETPERM command. The perm value is the +// key handle permission mask as described in the "keyctl setperm" section of +// http://man7.org/linux/man-pages/man1/keyctl.1.html. +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_setperm.3.html +func KeyctlSetperm(id int, perm uint32) error { + _, err := KeyctlInt(KEYCTL_SETPERM, id, int(perm), 0, 0) + return err +} + +//sys keyctlJoin(cmd int, arg2 string) (ret int, err error) = SYS_KEYCTL + +// KeyctlJoinSessionKeyring implements the KEYCTL_JOIN_SESSION_KEYRING command. +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_join_session_keyring.3.html +func KeyctlJoinSessionKeyring(name string) (ringid int, err error) { + return keyctlJoin(KEYCTL_JOIN_SESSION_KEYRING, name) +} + +//sys keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) = SYS_KEYCTL + +// KeyctlSearch implements the KEYCTL_SEARCH command. +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_search.3.html +func KeyctlSearch(ringid int, keyType, description string, destRingid int) (id int, err error) { + return keyctlSearch(KEYCTL_SEARCH, ringid, keyType, description, destRingid) +} + +//sys keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) = SYS_KEYCTL + +// KeyctlInstantiateIOV implements the KEYCTL_INSTANTIATE_IOV command. This +// command is similar to KEYCTL_INSTANTIATE, except that the payload is a slice +// of Iovec (each of which represents a buffer) instead of a single buffer. +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_instantiate_iov.3.html +func KeyctlInstantiateIOV(id int, payload []Iovec, ringid int) error { + return keyctlIOV(KEYCTL_INSTANTIATE_IOV, id, payload, ringid) +} + +//sys keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) = SYS_KEYCTL + +// KeyctlDHCompute implements the KEYCTL_DH_COMPUTE command. This command +// computes a Diffie-Hellman shared secret based on the provide params. The +// secret is written to the provided buffer and the returned size is the number +// of bytes written (returning an error if there is insufficient space in the +// buffer). If a nil buffer is passed in, this function returns the minimum +// buffer length needed to store the appropriate data. Note that this differs +// from KEYCTL_READ's behavior which always returns the requested payload size. +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_dh_compute.3.html +func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error) { + return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer) +} + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var iov Iovec + if len(p) > 0 { + iov.Base = &p[0] + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return + } + // receive at least one normal byte + if sockType != SOCK_DGRAM && len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = &oob[0] + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + var err error + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(ptr) + msg.Namelen = uint32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = &p[0] + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return 0, err + } + // send at least one normal byte + if sockType != SOCK_DGRAM && len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = &oob[0] + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +// BindToDevice binds the socket associated with fd to device. +func BindToDevice(fd int, device string) (err error) { + return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device) +} + +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + +func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { + // The peek requests are machine-size oriented, so we wrap it + // to retrieve arbitrary-length data. + + // The ptrace syscall differs from glibc's ptrace. + // Peeks returns the word in *data, not as the return value. + + var buf [sizeofPtr]byte + + // Leading edge. PEEKTEXT/PEEKDATA don't require aligned + // access (PEEKUSER warns that it might), but if we don't + // align our reads, we might straddle an unmapped page + // boundary and not get the bytes leading up to the page + // boundary. + n := 0 + if addr%sizeofPtr != 0 { + err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return 0, err + } + n += copy(out, buf[addr%sizeofPtr:]) + out = out[n:] + } + + // Remainder. + for len(out) > 0 { + // We use an internal buffer to guarantee alignment. + // It's not documented if this is necessary, but we're paranoid. + err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return n, err + } + copied := copy(out, buf[0:]) + n += copied + out = out[copied:] + } + + return n, nil +} + +func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) { + return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out) +} + +func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { + return ptracePeek(PTRACE_PEEKDATA, pid, addr, out) +} + +func PtracePeekUser(pid int, addr uintptr, out []byte) (count int, err error) { + return ptracePeek(PTRACE_PEEKUSR, pid, addr, out) +} + +func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) { + // As for ptracePeek, we need to align our accesses to deal + // with the possibility of straddling an invalid page. + + // Leading edge. + n := 0 + if addr%sizeofPtr != 0 { + var buf [sizeofPtr]byte + err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return 0, err + } + n += copy(buf[addr%sizeofPtr:], data) + word := *((*uintptr)(unsafe.Pointer(&buf[0]))) + err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word) + if err != nil { + return 0, err + } + data = data[n:] + } + + // Interior. + for len(data) > sizeofPtr { + word := *((*uintptr)(unsafe.Pointer(&data[0]))) + err = ptrace(pokeReq, pid, addr+uintptr(n), word) + if err != nil { + return n, err + } + n += sizeofPtr + data = data[sizeofPtr:] + } + + // Trailing edge. + if len(data) > 0 { + var buf [sizeofPtr]byte + err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return n, err + } + copy(buf[0:], data) + word := *((*uintptr)(unsafe.Pointer(&buf[0]))) + err = ptrace(pokeReq, pid, addr+uintptr(n), word) + if err != nil { + return n, err + } + n += len(data) + } + + return n, nil +} + +func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { + return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data) +} + +func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) { + return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data) +} + +func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { + return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data) +} + +func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +func PtraceSetOptions(pid int, options int) (err error) { + return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options)) +} + +func PtraceGetEventMsg(pid int) (msg uint, err error) { + var data _C_long + err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + msg = uint(data) + return +} + +func PtraceCont(pid int, signal int) (err error) { + return ptrace(PTRACE_CONT, pid, 0, uintptr(signal)) +} + +func PtraceSyscall(pid int, signal int) (err error) { + return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal)) +} + +func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } + +func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } + +func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } + +//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) + +func Reboot(cmd int) (err error) { + return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "") +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + return Getdents(fd, buf) +} + +//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) + +func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + // Certain file systems get rather angry and EINVAL if you give + // them an empty string of data, rather than NULL. + if data == "" { + return mount(source, target, fstype, flags, nil) + } + datap, err := BytePtrFromString(data) + if err != nil { + return err + } + return mount(source, target, fstype, flags, datap) +} + +// Sendto +// Recvfrom +// Socketpair + +/* + * Direct access + */ +//sys Acct(path string) (err error) +//sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) +//sys Adjtimex(buf *Timex) (state int, err error) +//sys Chdir(path string) (err error) +//sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys Close(fd int) (err error) +//sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys Dup(oldfd int) (fd int, err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sysnb EpollCreate1(flag int) (fd int, err error) +//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD2 +//sys Exit(code int) = SYS_EXIT_GROUP +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Fdatasync(fd int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fsync(fd int) (err error) +//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 +//sysnb Getpgid(pid int) (pgid int, err error) + +func Getpgrp() (pid int) { + pid, _ = Getpgid(0) + return +} + +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sys Getrandom(buf []byte, flags int) (n int, err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettid() (tid int) +//sys Getxattr(path string, attr string, dest []byte) (sz int, err error) +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) +//sysnb InotifyInit1(flags int) (fd int, err error) +//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) +//sysnb Kill(pid int, sig syscall.Signal) (err error) +//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG +//sys Lgetxattr(path string, attr string, dest []byte) (sz int, err error) +//sys Listxattr(path string, dest []byte) (sz int, err error) +//sys Llistxattr(path string, dest []byte) (sz int, err error) +//sys Lremovexattr(path string, attr string) (err error) +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT +//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) +//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 +//sys read(fd int, p []byte) (n int, err error) +//sys Removexattr(path string, attr string) (err error) +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) +//sys Setdomainname(p []byte) (err error) +//sys Sethostname(p []byte) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tv *Timeval) (err error) +//sys Setns(fd int, nstype int) (err error) + +// issue 1435. +// On linux Setuid and Setgid only affects the current thread, not the process. +// This does not match what most callers expect so we must return an error +// here rather than letting the caller think that the call succeeded. + +func Setuid(uid int) (err error) { + return EOPNOTSUPP +} + +func Setgid(uid int) (err error) { + return EOPNOTSUPP +} + +//sys Setpriority(which int, who int, prio int) (err error) +//sys Setxattr(path string, attr string, data []byte, flags int) (err error) +//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) +//sys Sync() +//sys Syncfs(fd int) (err error) +//sysnb Sysinfo(info *Sysinfo_t) (err error) +//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) +//sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) +//sysnb Times(tms *Tms) (ticks uintptr, err error) +//sysnb Umask(mask int) (oldmask int) +//sysnb Uname(buf *Utsname) (err error) +//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2 +//sys Unshare(flags int) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys exitThread(code int) (err error) = SYS_EXIT +//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ +//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE + +// mmap varies by architecture; see syscall_linux_*.go. +//sys munmap(addr uintptr, length uintptr) (err error) + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys Madvise(b []byte, advice int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Msync(b []byte, flags int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) + +// Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, +// using the specified flags. +func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { + n, _, errno := Syscall6( + SYS_VMSPLICE, + uintptr(fd), + uintptr(unsafe.Pointer(&iovs[0])), + uintptr(len(iovs)), + uintptr(flags), + 0, + 0, + ) + if errno != 0 { + return 0, syscall.Errno(errno) + } + + return int(n), nil +} + +/* + * Unimplemented + */ +// AfsSyscall +// Alarm +// ArchPrctl +// Brk +// Capget +// Capset +// ClockGetres +// ClockNanosleep +// ClockSettime +// Clone +// CreateModule +// DeleteModule +// EpollCtlOld +// EpollPwait +// EpollWaitOld +// Execve +// Fgetxattr +// Flistxattr +// Fork +// Fremovexattr +// Fsetxattr +// Futex +// GetKernelSyms +// GetMempolicy +// GetRobustList +// GetThreadArea +// Getitimer +// Getpmsg +// IoCancel +// IoDestroy +// IoGetevents +// IoSetup +// IoSubmit +// IoprioGet +// IoprioSet +// KexecLoad +// LookupDcookie +// Mbind +// MigratePages +// Mincore +// ModifyLdt +// Mount +// MovePages +// MqGetsetattr +// MqNotify +// MqOpen +// MqTimedreceive +// MqTimedsend +// MqUnlink +// Mremap +// Msgctl +// Msgget +// Msgrcv +// Msgsnd +// Nfsservctl +// Personality +// Pselect6 +// Ptrace +// Putpmsg +// QueryModule +// Quotactl +// Readahead +// Readv +// RemapFilePages +// RestartSyscall +// RtSigaction +// RtSigpending +// RtSigprocmask +// RtSigqueueinfo +// RtSigreturn +// RtSigsuspend +// RtSigtimedwait +// SchedGetPriorityMax +// SchedGetPriorityMin +// SchedGetparam +// SchedGetscheduler +// SchedRrGetInterval +// SchedSetparam +// SchedYield +// Security +// Semctl +// Semget +// Semop +// Semtimedop +// SetMempolicy +// SetRobustList +// SetThreadArea +// SetTidAddress +// Shmat +// Shmctl +// Shmdt +// Shmget +// Sigaltstack +// Signalfd +// Swapoff +// Swapon +// Sysfs +// TimerCreate +// TimerDelete +// TimerGetoverrun +// TimerGettime +// TimerSettime +// Timerfd +// Tkill (obsolete) +// Tuxcall +// Umount2 +// Uselib +// Utimensat +// Vfork +// Vhangup +// Vserver +// Waitid +// _Sysctl diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go new file mode 100644 index 0000000..bb8e4fb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -0,0 +1,391 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(rsc): Rewrite all nn(SP) references into name+(nn-8)(FP) +// so that go vet can check that they are correct. + +// +build 386,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +// 64-bit file system and 32-bit uid calls +// (386 default is 32-bit file system and 16-bit uid). +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 +//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 +//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 +//sysnb Getegid() (egid int) = SYS_GETEGID32 +//sysnb Geteuid() (euid int) = SYS_GETEUID32 +//sysnb Getgid() (gid int) = SYS_GETGID32 +//sysnb Getuid() (uid int) = SYS_GETUID32 +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 +//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 +//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 +//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 +//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32 +//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32 +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT + +//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Pause() (err error) + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + page := uintptr(offset / 4096) + if offset != int64(page)*4096 { + return 0, EINVAL + } + return mmap2(addr, length, prot, flags, fd, page) +} + +type rlimit32 struct { + Cur uint32 + Max uint32 +} + +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT + +const rlimInf32 = ^uint32(0) +const rlimInf64 = ^uint64(0) + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + err = getrlimit(resource, &rl) + if err != nil { + return + } + + if rl.Cur == rlimInf32 { + rlim.Cur = rlimInf64 + } else { + rlim.Cur = uint64(rl.Cur) + } + + if rl.Max == rlimInf32 { + rlim.Max = rlimInf64 + } else { + rlim.Max = uint64(rl.Max) + } + return +} + +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + if rlim.Cur == rlimInf64 { + rl.Cur = rlimInf32 + } else if rlim.Cur < uint64(rlimInf32) { + rl.Cur = uint32(rlim.Cur) + } else { + return EINVAL + } + if rlim.Max == rlimInf64 { + rl.Max = rlimInf32 + } else if rlim.Max < uint64(rlimInf32) { + rl.Max = uint32(rlim.Max) + } else { + return EINVAL + } + + return setrlimit(resource, &rl) +} + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + newoffset, errno := seek(fd, offset, whence) + if errno != 0 { + return 0, errno + } + return newoffset, nil +} + +// Vsyscalls on amd64. +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) + +//sys Utime(path string, buf *Utimbuf) (err error) + +// On x86 Linux, all the socket calls go through an extra indirection, +// I think because the 5-register system call interface can't handle +// the 6-argument calls like sendto and recvfrom. Instead the +// arguments to the underlying system call are the number below +// and a pointer to an array of uintptr. We hide the pointer in the +// socketcall assembly to avoid allocation on every system call. + +const ( + // see linux/net.h + _SOCKET = 1 + _BIND = 2 + _CONNECT = 3 + _LISTEN = 4 + _ACCEPT = 5 + _GETSOCKNAME = 6 + _GETPEERNAME = 7 + _SOCKETPAIR = 8 + _SEND = 9 + _RECV = 10 + _SENDTO = 11 + _RECVFROM = 12 + _SHUTDOWN = 13 + _SETSOCKOPT = 14 + _GETSOCKOPT = 15 + _SENDMSG = 16 + _RECVMSG = 17 + _ACCEPT4 = 18 + _RECVMMSG = 19 + _SENDMMSG = 20 +) + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) +func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + if e != 0 { + err = e + } + return +} + +func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, e := rawsocketcall(_GETSOCKNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, e := rawsocketcall(_GETPEERNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) { + _, e := rawsocketcall(_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0) + if e != 0 { + err = e + } + return +} + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, e := socketcall(_BIND, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, e := socketcall(_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func socket(domain int, typ int, proto int) (fd int, err error) { + fd, e := rawsocketcall(_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, e := socketcall(_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e != 0 { + err = e + } + return +} + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, e := socketcall(_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen, 0) + if e != 0 { + err = e + } + return +} + +func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var base uintptr + if len(p) > 0 { + base = uintptr(unsafe.Pointer(&p[0])) + } + n, e := socketcall(_RECVFROM, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + if e != 0 { + err = e + } + return +} + +func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var base uintptr + if len(p) > 0 { + base = uintptr(unsafe.Pointer(&p[0])) + } + _, e := socketcall(_SENDTO, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e != 0 { + err = e + } + return +} + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + n, e := socketcall(_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + n, e := socketcall(_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func Listen(s int, n int) (err error) { + _, e := socketcall(_LISTEN, uintptr(s), uintptr(n), 0, 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func Shutdown(s, how int) (err error) { + _, e := socketcall(_SHUTDOWN, uintptr(s), uintptr(how), 0, 0, 0, 0) + if e != 0 { + err = e + } + return +} + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func Statfs(path string, buf *Statfs_t) (err error) { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func (r *PtraceRegs) PC() uint64 { return uint64(uint32(r.Eip)) } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Eip = int32(pc) } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go new file mode 100644 index 0000000..53d38a5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -0,0 +1,144 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,linux + +package unix + +//sys Dup2(oldfd int, newfd int) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +func Gettimeofday(tv *Timeval) (err error) { + errno := gettimeofday(tv) + if errno != 0 { + return errno + } + return nil +} + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + errno := gettimeofday(&tv) + if errno != 0 { + return 0, errno + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +//sys Utime(path string, buf *Utimbuf) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func (r *PtraceRegs) PC() uint64 { return r.Rip } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go new file mode 100644 index 0000000..21a4946 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,linux +// +build !gccgo + +package unix + +import "syscall" + +//go:noescape +func gettimeofday(tv *Timeval) (err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go new file mode 100644 index 0000000..c59f858 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -0,0 +1,255 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,linux + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + newoffset, errno := seek(fd, offset, whence) + if errno != 0 { + return 0, errno + } + return newoffset, nil +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32 +//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32 +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +// 64-bit file system and 32-bit uid calls +// (16-bit uid calls are not always supported in newer kernels) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 +//sysnb Getegid() (egid int) = SYS_GETEGID32 +//sysnb Geteuid() (euid int) = SYS_GETEUID32 +//sysnb Getgid() (gid int) = SYS_GETGID32 +//sysnb Getuid() (uid int) = SYS_GETUID32 +//sysnb InotifyInit() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT +//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 +//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 +//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 +//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 + +// Vsyscalls on amd64. +//sysnb Gettimeofday(tv *Timeval) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Pause() (err error) + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_ARM_FADVISE64_64, uintptr(fd), uintptr(advice), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func Statfs(path string, buf *Statfs_t) (err error) { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + page := uintptr(offset / 4096) + if offset != int64(page)*4096 { + return 0, EINVAL + } + return mmap2(addr, length, prot, flags, fd, page) +} + +type rlimit32 struct { + Cur uint32 + Max uint32 +} + +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT + +const rlimInf32 = ^uint32(0) +const rlimInf64 = ^uint64(0) + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + err = getrlimit(resource, &rl) + if err != nil { + return + } + + if rl.Cur == rlimInf32 { + rlim.Cur = rlimInf64 + } else { + rlim.Cur = uint64(rl.Cur) + } + + if rl.Max == rlimInf32 { + rlim.Max = rlimInf64 + } else { + rlim.Max = uint64(rl.Max) + } + return +} + +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + if rlim.Cur == rlimInf64 { + rl.Cur = rlimInf32 + } else if rlim.Cur < uint64(rlimInf32) { + rl.Cur = uint32(rlim.Cur) + } else { + return EINVAL + } + if rlim.Max == rlimInf64 { + rl.Max = rlimInf32 + } else if rlim.Max < uint64(rlimInf32) { + rl.Max = uint32(rlim.Max) + } else { + return EINVAL + } + + return setrlimit(resource, &rl) +} + +func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go new file mode 100644 index 0000000..c464783 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -0,0 +1,189 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,linux + +package unix + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func (r *PtraceRegs) PC() uint64 { return r.Pc } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func InotifyInit() (fd int, err error) { + return InotifyInit1(0) +} + +func Dup2(oldfd int, newfd int) (err error) { + return Dup3(oldfd, newfd, 0) +} + +func Pause() (err error) { + _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove +// these when the deprecated syscalls that the syscall package relies on +// are removed. +const ( + SYS_GETPGRP = 1060 + SYS_UTIMES = 1037 + SYS_FUTIMESAT = 1066 + SYS_PAUSE = 1061 + SYS_USTAT = 1070 + SYS_UTIME = 1063 + SYS_LCHOWN = 1032 + SYS_TIME = 1062 + SYS_EPOLL_CREATE = 1042 + SYS_EPOLL_WAIT = 1069 +) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var ts *Timespec + if timeout >= 0 { + ts = new(Timespec) + *ts = NsecToTimespec(int64(timeout) * 1e6) + } + if len(fds) == 0 { + return ppoll(nil, 0, ts, nil) + } + return ppoll(&fds[0], len(fds), ts, nil) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go new file mode 100644 index 0000000..c26e6ec --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!gccgo + +package unix + +// SyscallNoError may be used instead of Syscall for syscalls that don't fail. +func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) + +// RawSyscallNoError may be used instead of RawSyscall for syscalls that don't +// fail. +func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go new file mode 100644 index 0000000..df9c123 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build gccgo +// +build 386 arm + +package unix + +import ( + "syscall" + "unsafe" +) + +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) { + offsetLow := uint32(offset & 0xffffffff) + offsetHigh := uint32((offset >> 32) & 0xffffffff) + _, _, err = Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) + return newoffset, err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go new file mode 100644 index 0000000..15a69cb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -0,0 +1,209 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build mips64 mips64le + +package unix + +//sys Dup2(oldfd int, newfd int) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + err = Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +//sys Utime(path string, buf *Utimbuf) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func Ioperm(from int, num int, on int) (err error) { + return ENOSYS +} + +func Iopl(level int) (err error) { + return ENOSYS +} + +type stat_t struct { + Dev uint32 + Pad0 [3]int32 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Pad1 [3]uint32 + Size int64 + Atime uint32 + Atime_nsec uint32 + Mtime uint32 + Mtime_nsec uint32 + Ctime uint32 + Ctime_nsec uint32 + Blksize uint32 + Pad2 uint32 + Blocks int64 +} + +//sys fstat(fd int, st *stat_t) (err error) +//sys lstat(path string, st *stat_t) (err error) +//sys stat(path string, st *stat_t) (err error) + +func Fstat(fd int, s *Stat_t) (err error) { + st := &stat_t{} + err = fstat(fd, st) + fillStat_t(s, st) + return +} + +func Lstat(path string, s *Stat_t) (err error) { + st := &stat_t{} + err = lstat(path, st) + fillStat_t(s, st) + return +} + +func Stat(path string, s *Stat_t) (err error) { + st := &stat_t{} + err = stat(path, st) + fillStat_t(s, st) + return +} + +func fillStat_t(s *Stat_t, st *stat_t) { + s.Dev = st.Dev + s.Ino = st.Ino + s.Mode = st.Mode + s.Nlink = st.Nlink + s.Uid = st.Uid + s.Gid = st.Gid + s.Rdev = st.Rdev + s.Size = st.Size + s.Atim = Timespec{int64(st.Atime), int64(st.Atime_nsec)} + s.Mtim = Timespec{int64(st.Mtime), int64(st.Mtime_nsec)} + s.Ctim = Timespec{int64(st.Ctime), int64(st.Ctime_nsec)} + s.Blksize = st.Blksize + s.Blocks = st.Blocks +} + +func (r *PtraceRegs) PC() uint64 { return r.Epc } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go new file mode 100644 index 0000000..40b8e4f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -0,0 +1,231 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build mips mipsle + +package unix + +import ( + "syscall" + "unsafe" +) + +func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) + +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) + +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 + +//sys Utime(path string, buf *Utimbuf) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Pause() (err error) + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = errnoErr(e) + } + return +} + +func Statfs(path string, buf *Statfs_t) (err error) { + p, err := BytePtrFromString(path) + if err != nil { + return err + } + _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(p)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = errnoErr(e) + } + return +} + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + _, _, e := Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offset>>32), uintptr(offset), uintptr(unsafe.Pointer(&off)), uintptr(whence), 0) + if e != 0 { + err = errnoErr(e) + } + return +} + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + page := uintptr(offset / 4096) + if offset != int64(page)*4096 { + return 0, EINVAL + } + return mmap2(addr, length, prot, flags, fd, page) +} + +const rlimInf32 = ^uint32(0) +const rlimInf64 = ^uint64(0) + +type rlimit32 struct { + Cur uint32 + Max uint32 +} + +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + err = getrlimit(resource, &rl) + if err != nil { + return + } + + if rl.Cur == rlimInf32 { + rlim.Cur = rlimInf64 + } else { + rlim.Cur = uint64(rl.Cur) + } + + if rl.Max == rlimInf32 { + rlim.Max = rlimInf64 + } else { + rlim.Max = uint64(rl.Max) + } + return +} + +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + if rlim.Cur == rlimInf64 { + rl.Cur = rlimInf32 + } else if rlim.Cur < uint64(rlimInf32) { + rl.Cur = uint32(rlim.Cur) + } else { + return EINVAL + } + if rlim.Max == rlimInf64 { + rl.Max = rlimInf32 + } else if rlim.Max < uint64(rlimInf32) { + rl.Max = uint32(rlim.Max) + } else { + return EINVAL + } + + return setrlimit(resource, &rl) +} + +func (r *PtraceRegs) PC() uint64 { return r.Epc } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go new file mode 100644 index 0000000..17c9116 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -0,0 +1,127 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package unix + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT +//sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2 +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) + +//sys Utime(path string, buf *Utimbuf) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func (r *PtraceRegs) PC() uint64 { return r.Nip } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Nip = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go new file mode 100644 index 0000000..c0d86e7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -0,0 +1,320 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,linux + +package unix + +import ( + "unsafe" +) + +//sys Dup2(oldfd int, newfd int) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + err = Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +//sys Utime(path string, buf *Utimbuf) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) // pipe2 is the same as pipe when flags are set to 0. + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func Ioperm(from int, num int, on int) (err error) { + return ENOSYS +} + +func Iopl(level int) (err error) { + return ENOSYS +} + +func (r *PtraceRegs) PC() uint64 { return r.Psw.Addr } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Psw.Addr = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +// Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct. +// mmap2 also requires arguments to be passed in a struct; it is currently not exposed in . +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + mmap_args := [6]uintptr{addr, length, uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)} + r0, _, e1 := Syscall(SYS_MMAP, uintptr(unsafe.Pointer(&mmap_args[0])), 0, 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// On s390x Linux, all the socket calls go through an extra indirection. +// The arguments to the underlying system call (SYS_SOCKETCALL) are the +// number below and a pointer to an array of uintptr. +const ( + // see linux/net.h + netSocket = 1 + netBind = 2 + netConnect = 3 + netListen = 4 + netAccept = 5 + netGetSockName = 6 + netGetPeerName = 7 + netSocketPair = 8 + netSend = 9 + netRecv = 10 + netSendTo = 11 + netRecvFrom = 12 + netShutdown = 13 + netSetSockOpt = 14 + netGetSockOpt = 15 + netSendMsg = 16 + netRecvMsg = 17 + netAccept4 = 18 + netRecvMMsg = 19 + netSendMMsg = 20 +) + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (int, error) { + args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} + fd, _, err := Syscall(SYS_SOCKETCALL, netAccept, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return 0, err + } + return int(fd), nil +} + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (int, error) { + args := [4]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)} + fd, _, err := Syscall(SYS_SOCKETCALL, netAccept4, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return 0, err + } + return int(fd), nil +} + +func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) error { + args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} + _, _, err := RawSyscall(SYS_SOCKETCALL, netGetSockName, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) error { + args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} + _, _, err := RawSyscall(SYS_SOCKETCALL, netGetPeerName, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func socketpair(domain int, typ int, flags int, fd *[2]int32) error { + args := [4]uintptr{uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd))} + _, _, err := RawSyscall(SYS_SOCKETCALL, netSocketPair, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) error { + args := [3]uintptr{uintptr(s), uintptr(addr), uintptr(addrlen)} + _, _, err := Syscall(SYS_SOCKETCALL, netBind, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) error { + args := [3]uintptr{uintptr(s), uintptr(addr), uintptr(addrlen)} + _, _, err := Syscall(SYS_SOCKETCALL, netConnect, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func socket(domain int, typ int, proto int) (int, error) { + args := [3]uintptr{uintptr(domain), uintptr(typ), uintptr(proto)} + fd, _, err := RawSyscall(SYS_SOCKETCALL, netSocket, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return 0, err + } + return int(fd), nil +} + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) error { + args := [5]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))} + _, _, err := Syscall(SYS_SOCKETCALL, netGetSockOpt, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error { + args := [4]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val)} + _, _, err := Syscall(SYS_SOCKETCALL, netSetSockOpt, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (int, error) { + var base uintptr + if len(p) > 0 { + base = uintptr(unsafe.Pointer(&p[0])) + } + args := [6]uintptr{uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))} + n, _, err := Syscall(SYS_SOCKETCALL, netRecvFrom, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return 0, err + } + return int(n), nil +} + +func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) error { + var base uintptr + if len(p) > 0 { + base = uintptr(unsafe.Pointer(&p[0])) + } + args := [6]uintptr{uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen)} + _, _, err := Syscall(SYS_SOCKETCALL, netSendTo, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func recvmsg(s int, msg *Msghdr, flags int) (int, error) { + args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)} + n, _, err := Syscall(SYS_SOCKETCALL, netRecvMsg, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return 0, err + } + return int(n), nil +} + +func sendmsg(s int, msg *Msghdr, flags int) (int, error) { + args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)} + n, _, err := Syscall(SYS_SOCKETCALL, netSendMsg, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return 0, err + } + return int(n), nil +} + +func Listen(s int, n int) error { + args := [2]uintptr{uintptr(s), uintptr(n)} + _, _, err := Syscall(SYS_SOCKETCALL, netListen, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +func Shutdown(s, how int) error { + args := [2]uintptr{uintptr(s), uintptr(how)} + _, _, err := Syscall(SYS_SOCKETCALL, netShutdown, uintptr(unsafe.Pointer(&args)), 0) + if err != 0 { + return err + } + return nil +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go new file mode 100644 index 0000000..a00f992 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -0,0 +1,143 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build sparc64,linux + +package unix + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +func Ioperm(from int, num int, on int) (err error) { + return ENOSYS +} + +func Iopl(level int) (err error) { + return ENOSYS +} + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + err = Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +//sys Utime(path string, buf *Utimbuf) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func (r *PtraceRegs) PC() uint64 { return r.Tpc } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Tpc = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go new file mode 100644 index 0000000..423f004 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go @@ -0,0 +1,402 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package unix_test + +import ( + "os" + "runtime" + "runtime/debug" + "testing" + "time" + + "golang.org/x/sys/unix" +) + +func TestFchmodat(t *testing.T) { + defer chtmpdir(t)() + + touch(t, "file1") + err := os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } + + err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, 0) + if err != nil { + t.Fatalf("Fchmodat: unexpected error: %v", err) + } + + fi, err := os.Stat("file1") + if err != nil { + t.Fatal(err) + } + + if fi.Mode() != 0444 { + t.Errorf("Fchmodat: failed to change mode: expected %v, got %v", 0444, fi.Mode()) + } + + err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, unix.AT_SYMLINK_NOFOLLOW) + if err != unix.EOPNOTSUPP { + t.Fatalf("Fchmodat: unexpected error: %v, expected EOPNOTSUPP", err) + } +} + +func TestIoctlGetInt(t *testing.T) { + f, err := os.Open("/dev/random") + if err != nil { + t.Fatalf("failed to open device: %v", err) + } + defer f.Close() + + v, err := unix.IoctlGetInt(int(f.Fd()), unix.RNDGETENTCNT) + if err != nil { + t.Fatalf("failed to perform ioctl: %v", err) + } + + t.Logf("%d bits of entropy available", v) +} + +func TestPpoll(t *testing.T) { + f, cleanup := mktmpfifo(t) + defer cleanup() + + const timeout = 100 * time.Millisecond + + ok := make(chan bool, 1) + go func() { + select { + case <-time.After(10 * timeout): + t.Errorf("Ppoll: failed to timeout after %d", 10*timeout) + case <-ok: + } + }() + + fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} + timeoutTs := unix.NsecToTimespec(int64(timeout)) + n, err := unix.Ppoll(fds, &timeoutTs, nil) + ok <- true + if err != nil { + t.Errorf("Ppoll: unexpected error: %v", err) + return + } + if n != 0 { + t.Errorf("Ppoll: wrong number of events: got %v, expected %v", n, 0) + return + } +} + +func TestTime(t *testing.T) { + var ut unix.Time_t + ut2, err := unix.Time(&ut) + if err != nil { + t.Fatalf("Time: %v", err) + } + if ut != ut2 { + t.Errorf("Time: return value %v should be equal to argument %v", ut2, ut) + } + + var now time.Time + + for i := 0; i < 10; i++ { + ut, err = unix.Time(nil) + if err != nil { + t.Fatalf("Time: %v", err) + } + + now = time.Now() + + if int64(ut) == now.Unix() { + return + } + } + + t.Errorf("Time: return value %v should be nearly equal to time.Now().Unix() %v", ut, now.Unix()) +} + +func TestUtime(t *testing.T) { + defer chtmpdir(t)() + + touch(t, "file1") + + buf := &unix.Utimbuf{ + Modtime: 12345, + } + + err := unix.Utime("file1", buf) + if err != nil { + t.Fatalf("Utime: %v", err) + } + + fi, err := os.Stat("file1") + if err != nil { + t.Fatal(err) + } + + if fi.ModTime().Unix() != 12345 { + t.Errorf("Utime: failed to change modtime: expected %v, got %v", 12345, fi.ModTime().Unix()) + } +} + +func TestUtimesNanoAt(t *testing.T) { + defer chtmpdir(t)() + + symlink := "symlink1" + os.Remove(symlink) + err := os.Symlink("nonexisting", symlink) + if err != nil { + t.Fatal(err) + } + + ts := []unix.Timespec{ + {Sec: 1111, Nsec: 2222}, + {Sec: 3333, Nsec: 4444}, + } + err = unix.UtimesNanoAt(unix.AT_FDCWD, symlink, ts, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + t.Fatalf("UtimesNanoAt: %v", err) + } + + var st unix.Stat_t + err = unix.Lstat(symlink, &st) + if err != nil { + t.Fatalf("Lstat: %v", err) + } + if st.Atim != ts[0] { + t.Errorf("UtimesNanoAt: wrong atime: %v", st.Atim) + } + if st.Mtim != ts[1] { + t.Errorf("UtimesNanoAt: wrong mtime: %v", st.Mtim) + } +} + +func TestRlimitAs(t *testing.T) { + // disable GC during to avoid flaky test + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + var rlim unix.Rlimit + err := unix.Getrlimit(unix.RLIMIT_AS, &rlim) + if err != nil { + t.Fatalf("Getrlimit: %v", err) + } + var zero unix.Rlimit + if zero == rlim { + t.Fatalf("Getrlimit: got zero value %#v", rlim) + } + set := rlim + set.Cur = uint64(unix.Getpagesize()) + err = unix.Setrlimit(unix.RLIMIT_AS, &set) + if err != nil { + t.Fatalf("Setrlimit: set failed: %#v %v", set, err) + } + + // RLIMIT_AS was set to the page size, so mmap()'ing twice the page size + // should fail. See 'man 2 getrlimit'. + _, err = unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE) + if err == nil { + t.Fatal("Mmap: unexpectedly suceeded after setting RLIMIT_AS") + } + + err = unix.Setrlimit(unix.RLIMIT_AS, &rlim) + if err != nil { + t.Fatalf("Setrlimit: restore failed: %#v %v", rlim, err) + } + + b, err := unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE) + if err != nil { + t.Fatalf("Mmap: %v", err) + } + err = unix.Munmap(b) + if err != nil { + t.Fatalf("Munmap: %v", err) + } +} + +func TestSelect(t *testing.T) { + _, err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0}) + if err != nil { + t.Fatalf("Select: %v", err) + } + + dur := 150 * time.Millisecond + tv := unix.NsecToTimeval(int64(dur)) + start := time.Now() + _, err = unix.Select(0, nil, nil, nil, &tv) + took := time.Since(start) + if err != nil { + t.Fatalf("Select: %v", err) + } + + if took < dur { + t.Errorf("Select: timeout should have been at least %v, got %v", dur, took) + } +} + +func TestPselect(t *testing.T) { + _, err := unix.Pselect(0, nil, nil, nil, &unix.Timespec{Sec: 0, Nsec: 0}, nil) + if err != nil { + t.Fatalf("Pselect: %v", err) + } + + dur := 2500 * time.Microsecond + ts := unix.NsecToTimespec(int64(dur)) + start := time.Now() + _, err = unix.Pselect(0, nil, nil, nil, &ts, nil) + took := time.Since(start) + if err != nil { + t.Fatalf("Pselect: %v", err) + } + + if took < dur { + t.Errorf("Pselect: timeout should have been at least %v, got %v", dur, took) + } +} + +func TestSchedSetaffinity(t *testing.T) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var oldMask unix.CPUSet + err := unix.SchedGetaffinity(0, &oldMask) + if err != nil { + t.Fatalf("SchedGetaffinity: %v", err) + } + + var newMask unix.CPUSet + newMask.Zero() + if newMask.Count() != 0 { + t.Errorf("CpuZero: didn't zero CPU set: %v", newMask) + } + cpu := 1 + newMask.Set(cpu) + if newMask.Count() != 1 || !newMask.IsSet(cpu) { + t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask) + } + cpu = 5 + newMask.Set(cpu) + if newMask.Count() != 2 || !newMask.IsSet(cpu) { + t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask) + } + newMask.Clear(cpu) + if newMask.Count() != 1 || newMask.IsSet(cpu) { + t.Errorf("CpuClr: didn't clear CPU %d in set: %v", cpu, newMask) + } + + if runtime.NumCPU() < 2 { + t.Skip("skipping setaffinity tests on single CPU system") + } + + err = unix.SchedSetaffinity(0, &newMask) + if err != nil { + t.Fatalf("SchedSetaffinity: %v", err) + } + + var gotMask unix.CPUSet + err = unix.SchedGetaffinity(0, &gotMask) + if err != nil { + t.Fatalf("SchedGetaffinity: %v", err) + } + + if gotMask != newMask { + t.Errorf("SchedSetaffinity: returned affinity mask does not match set affinity mask") + } + + // Restore old mask so it doesn't affect successive tests + err = unix.SchedSetaffinity(0, &oldMask) + if err != nil { + t.Fatalf("SchedSetaffinity: %v", err) + } +} + +func TestStatx(t *testing.T) { + var stx unix.Statx_t + err := unix.Statx(unix.AT_FDCWD, ".", 0, 0, &stx) + if err == unix.ENOSYS { + t.Skip("statx syscall is not available, skipping test") + } else if err != nil { + t.Fatalf("Statx: %v", err) + } + + defer chtmpdir(t)() + touch(t, "file1") + + var st unix.Stat_t + err = unix.Stat("file1", &st) + if err != nil { + t.Fatalf("Stat: %v", err) + } + + flags := unix.AT_STATX_SYNC_AS_STAT + err = unix.Statx(unix.AT_FDCWD, "file1", flags, unix.STATX_ALL, &stx) + if err != nil { + t.Fatalf("Statx: %v", err) + } + + if uint32(stx.Mode) != st.Mode { + t.Errorf("Statx: returned stat mode does not match Stat") + } + + atime := unix.StatxTimestamp{Sec: int64(st.Atim.Sec), Nsec: uint32(st.Atim.Nsec)} + ctime := unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)} + mtime := unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)} + + if stx.Atime != atime { + t.Errorf("Statx: returned stat atime does not match Stat") + } + if stx.Ctime != ctime { + t.Errorf("Statx: returned stat ctime does not match Stat") + } + if stx.Mtime != mtime { + t.Errorf("Statx: returned stat mtime does not match Stat") + } + + err = os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } + + err = unix.Lstat("symlink1", &st) + if err != nil { + t.Fatalf("Lstat: %v", err) + } + + err = unix.Statx(unix.AT_FDCWD, "symlink1", flags, unix.STATX_BASIC_STATS, &stx) + if err != nil { + t.Fatalf("Statx: %v", err) + } + + // follow symlink, expect a regulat file + if stx.Mode&unix.S_IFREG == 0 { + t.Errorf("Statx: didn't follow symlink") + } + + err = unix.Statx(unix.AT_FDCWD, "symlink1", flags|unix.AT_SYMLINK_NOFOLLOW, unix.STATX_ALL, &stx) + if err != nil { + t.Fatalf("Statx: %v", err) + } + + // follow symlink, expect a symlink + if stx.Mode&unix.S_IFLNK == 0 { + t.Errorf("Statx: unexpectedly followed symlink") + } + if uint32(stx.Mode) != st.Mode { + t.Errorf("Statx: returned stat mode does not match Lstat") + } + + atime = unix.StatxTimestamp{Sec: int64(st.Atim.Sec), Nsec: uint32(st.Atim.Nsec)} + ctime = unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)} + mtime = unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)} + + if stx.Atime != atime { + t.Errorf("Statx: returned stat atime does not match Lstat") + } + if stx.Ctime != ctime { + t.Errorf("Statx: returned stat ctime does not match Lstat") + } + if stx.Mtime != mtime { + t.Errorf("Statx: returned stat mtime does not match Lstat") + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go new file mode 100644 index 0000000..9a95076 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -0,0 +1,566 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// NetBSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import ( + "syscall" + "unsafe" +) + +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + raw RawSockaddrDatalink +} + +func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, CTL_QUERY) + qnode := Sysctlnode{Flags: SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err = sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes = make([]Sysctlnode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err = sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) (mib []_C_int, err error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, _C_int(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, EINVAL + } + } + + return mib, nil +} + +//sysnb pipe() (fd1 int, fd2 int, err error) +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + p[0], p[1], err = pipe() + return +} + +//sys getdents(fd int, buf []byte) (n int, err error) +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return getdents(fd, buf) +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + +// TODO +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + return -1, ENOSYS +} + +func setattrlistTimes(path string, times []Timespec, flags int) error { + // used on Darwin for UtimesNano + return ENOSYS +} + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req uint, value *Termios) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + return err + } + + return nil +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) + +/* + * Unimplemented + */ +// ____semctl13 +// __clone +// __fhopen40 +// __fhstat40 +// __fhstatvfs140 +// __fstat30 +// __getcwd +// __getfh30 +// __getlogin +// __lstat30 +// __mount50 +// __msgctl13 +// __msync13 +// __ntp_gettime30 +// __posix_chown +// __posix_fadvise50 +// __posix_fchown +// __posix_lchown +// __posix_rename +// __setlogin +// __shmctl13 +// __sigaction_sigtramp +// __sigaltstack14 +// __sigpending14 +// __sigprocmask14 +// __sigsuspend14 +// __sigtimedwait +// __stat30 +// __syscall +// __vfork14 +// _ksem_close +// _ksem_destroy +// _ksem_getvalue +// _ksem_init +// _ksem_open +// _ksem_post +// _ksem_trywait +// _ksem_unlink +// _ksem_wait +// _lwp_continue +// _lwp_create +// _lwp_ctl +// _lwp_detach +// _lwp_exit +// _lwp_getname +// _lwp_getprivate +// _lwp_kill +// _lwp_park +// _lwp_self +// _lwp_setname +// _lwp_setprivate +// _lwp_suspend +// _lwp_unpark +// _lwp_unpark_all +// _lwp_wait +// _lwp_wakeup +// _pset_bind +// _sched_getaffinity +// _sched_getparam +// _sched_setaffinity +// _sched_setparam +// acct +// aio_cancel +// aio_error +// aio_fsync +// aio_read +// aio_return +// aio_suspend +// aio_write +// break +// clock_getres +// clock_gettime +// clock_settime +// compat_09_ogetdomainname +// compat_09_osetdomainname +// compat_09_ouname +// compat_10_omsgsys +// compat_10_osemsys +// compat_10_oshmsys +// compat_12_fstat12 +// compat_12_getdirentries +// compat_12_lstat12 +// compat_12_msync +// compat_12_oreboot +// compat_12_oswapon +// compat_12_stat12 +// compat_13_sigaction13 +// compat_13_sigaltstack13 +// compat_13_sigpending13 +// compat_13_sigprocmask13 +// compat_13_sigreturn13 +// compat_13_sigsuspend13 +// compat_14___semctl +// compat_14_msgctl +// compat_14_shmctl +// compat_16___sigaction14 +// compat_16___sigreturn14 +// compat_20_fhstatfs +// compat_20_fstatfs +// compat_20_getfsstat +// compat_20_statfs +// compat_30___fhstat30 +// compat_30___fstat13 +// compat_30___lstat13 +// compat_30___stat13 +// compat_30_fhopen +// compat_30_fhstat +// compat_30_fhstatvfs1 +// compat_30_getdents +// compat_30_getfh +// compat_30_ntp_gettime +// compat_30_socket +// compat_40_mount +// compat_43_fstat43 +// compat_43_lstat43 +// compat_43_oaccept +// compat_43_ocreat +// compat_43_oftruncate +// compat_43_ogetdirentries +// compat_43_ogetdtablesize +// compat_43_ogethostid +// compat_43_ogethostname +// compat_43_ogetkerninfo +// compat_43_ogetpagesize +// compat_43_ogetpeername +// compat_43_ogetrlimit +// compat_43_ogetsockname +// compat_43_okillpg +// compat_43_olseek +// compat_43_ommap +// compat_43_oquota +// compat_43_orecv +// compat_43_orecvfrom +// compat_43_orecvmsg +// compat_43_osend +// compat_43_osendmsg +// compat_43_osethostid +// compat_43_osethostname +// compat_43_osetrlimit +// compat_43_osigblock +// compat_43_osigsetmask +// compat_43_osigstack +// compat_43_osigvec +// compat_43_otruncate +// compat_43_owait +// compat_43_stat43 +// execve +// extattr_delete_fd +// extattr_delete_file +// extattr_delete_link +// extattr_get_fd +// extattr_get_file +// extattr_get_link +// extattr_list_fd +// extattr_list_file +// extattr_list_link +// extattr_set_fd +// extattr_set_file +// extattr_set_link +// extattrctl +// fchroot +// fdatasync +// fgetxattr +// fktrace +// flistxattr +// fork +// fremovexattr +// fsetxattr +// fstatvfs1 +// fsync_range +// getcontext +// getitimer +// getvfsstat +// getxattr +// ktrace +// lchflags +// lchmod +// lfs_bmapv +// lfs_markv +// lfs_segclean +// lfs_segwait +// lgetxattr +// lio_listio +// listxattr +// llistxattr +// lremovexattr +// lseek +// lsetxattr +// lutimes +// madvise +// mincore +// minherit +// modctl +// mq_close +// mq_getattr +// mq_notify +// mq_open +// mq_receive +// mq_send +// mq_setattr +// mq_timedreceive +// mq_timedsend +// mq_unlink +// mremap +// msgget +// msgrcv +// msgsnd +// nfssvc +// ntp_adjtime +// pmc_control +// pmc_get_info +// pollts +// preadv +// profil +// pselect +// pset_assign +// pset_create +// pset_destroy +// ptrace +// pwritev +// quotactl +// rasctl +// readv +// reboot +// removexattr +// sa_enable +// sa_preempt +// sa_register +// sa_setconcurrency +// sa_stacks +// sa_yield +// sbrk +// sched_yield +// semconfig +// semget +// semop +// setcontext +// setitimer +// setxattr +// shmat +// shmdt +// shmget +// sstk +// statvfs1 +// swapctl +// sysarch +// syscall +// timer_create +// timer_delete +// timer_getoverrun +// timer_gettime +// timer_settime +// undelete +// utrace +// uuidgen +// vadvise +// vfork +// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go new file mode 100644 index 0000000..24f74e5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -0,0 +1,33 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,netbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = uint32(mode) + k.Flags = uint32(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go new file mode 100644 index 0000000..6878bf7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -0,0 +1,33 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,netbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = uint32(mode) + k.Flags = uint32(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go new file mode 100644 index 0000000..dbbfcf7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -0,0 +1,33 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,netbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = uint32(mode) + k.Flags = uint32(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go new file mode 100644 index 0000000..cef3d41 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -0,0 +1,366 @@ +// Copyright 2009,2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenBSD system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_bsd.go or syscall_unix.go. + +package unix + +import ( + "sort" + "syscall" + "unsafe" +) + +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. +type SockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 + raw RawSockaddrDatalink +} + +func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func nametomib(name string) (mib []_C_int, err error) { + i := sort.Search(len(sysctlMib), func(i int) bool { + return sysctlMib[i].ctlname >= name + }) + if i < len(sysctlMib) && sysctlMib[i].ctlname == name { + return sysctlMib[i].ctloid, nil + } + return nil, EINVAL +} + +//sysnb pipe(p *[2]_C_int) (err error) +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys getdents(fd int, buf []byte) (n int, err error) +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return getdents(fd, buf) +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + +// TODO +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + return -1, ENOSYS +} + +func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func setattrlistTimes(path string, times []Timespec, flags int) error { + // used on Darwin for UtimesNano + return ENOSYS +} + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req uint, value *Termios) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + return err + } + + return nil +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chflags(path string, flags int) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(from int, to int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchflags(fd int, flags int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgrp int) +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Issetugid() (tainted bool) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Kqueue() (fd int, err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Revoke(path string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Setlogin(name string) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tp *Timeval) (err error) +//sysnb Setuid(uid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Unlink(path string) (err error) +//sys Unmount(path string, flags int) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) + +/* + * Unimplemented + */ +// __getcwd +// __semctl +// __syscall +// __sysctl +// adjfreq +// break +// clock_getres +// clock_gettime +// clock_settime +// closefrom +// execve +// faccessat +// fchmodat +// fchownat +// fcntl +// fhopen +// fhstat +// fhstatfs +// fork +// fstatat +// futimens +// getfh +// getgid +// getitimer +// getlogin +// getresgid +// getresuid +// getrtable +// getthrid +// ktrace +// lfs_bmapv +// lfs_markv +// lfs_segclean +// lfs_segwait +// linkat +// mincore +// minherit +// mkdirat +// mkfifoat +// mknodat +// mount +// mquery +// msgctl +// msgget +// msgrcv +// msgsnd +// nfssvc +// nnpfspioctl +// openat +// preadv +// profil +// pwritev +// quotactl +// readlinkat +// readv +// reboot +// renameat +// rfork +// sched_yield +// semget +// semop +// setgroups +// setitimer +// setrtable +// setsockopt +// shmat +// shmctl +// shmdt +// shmget +// sigaction +// sigaltstack +// sigpending +// sigprocmask +// sigreturn +// sigsuspend +// symlinkat +// sysarch +// syscall +// threxit +// thrsigdivert +// thrsleep +// thrwakeup +// unlinkat +// vfork +// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go new file mode 100644 index 0000000..994964a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -0,0 +1,33 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go new file mode 100644 index 0000000..649e67f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -0,0 +1,33 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go new file mode 100644 index 0000000..59844f5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go new file mode 100644 index 0000000..f4d2a34 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -0,0 +1,718 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Solaris system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and wrap +// it in our own nicer implementation, either here or in +// syscall_solaris.go or syscall_unix.go. + +package unix + +import ( + "syscall" + "unsafe" +) + +// Implemented in runtime/syscall_solaris.go. +type syscallFunc uintptr + +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. +type SockaddrDatalink struct { + Family uint16 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [244]int8 + raw RawSockaddrDatalink +} + +//sysnb pipe(p *[2]_C_int) (n int, err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + n, err := pipe(&pp) + if n != 0 { + return err + } + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return nil +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) { + return nil, 0, EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := _Socklen(2) + if n > 0 { + sl += _Socklen(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(&rsa) +} + +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + return "", err + } + return string(buf[:vallen-1]), nil +} + +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) + +func Getwd() (wd string, err error) { + var buf [PathMax]byte + // Getcwd will return an error if it failed for any reason. + _, err = Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + +/* + * Wrapped + */ + +//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error) +//sysnb setgroups(ngid int, gid *_Gid_t) (err error) + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + // Check for error and sanity check group count. Newer versions of + // Solaris allow up to 1024 (NGROUPS_MAX). + if n < 0 || n > 1024 { + if err != nil { + return nil, err + } + return nil, EINVAL + } else if n == 0 { + return nil, nil + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if n == -1 { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + // Final argument is (basep *uintptr) and the syscall doesn't take nil. + // TODO(rsc): Can we use a single global basep for all calls? + return Getdents(fd, buf, new(uintptr)) +} + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. + +type WaitStatus uint32 + +const ( + mask = 0x7F + core = 0x80 + shift = 8 + + exited = 0 + stopped = 0x7F +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) ExitStatus() int { + if w&mask != exited { + return -1 + } + return int(w >> shift) +} + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 } + +func (w WaitStatus) Signal() syscall.Signal { + sig := syscall.Signal(w & mask) + if sig == stopped || sig == 0 { + return -1 + } + return sig +} + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP } + +func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP } + +func (w WaitStatus) StopSignal() syscall.Signal { + if !w.Stopped() { + return -1 + } + return syscall.Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { return -1 } + +//sys wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (int, error) { + var status _C_int + rpid, err := wait4(int32(pid), &status, options, rusage) + wpid := int(rpid) + if wpid == -1 { + return wpid, err + } + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return wpid, nil +} + +//sys gethostname(buf []byte) (n int, err error) + +func Gethostname() (name string, err error) { + var buf [MaxHostNameLen]byte + n, err := gethostname(buf[:]) + if n != 0 { + return "", err + } + n = clen(buf[:]) + if n < 1 { + return "", EFAULT + } + return string(buf[:n]), nil +} + +//sys utimes(path string, times *[2]Timeval) (err error) + +func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) + +func UtimesNano(path string, ts []Timespec) error { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) + if e1 != 0 { + return e1 + } + return nil +} + +//sys futimesat(fildes int, path *byte, times *[2]Timeval) (err error) + +func Futimesat(dirfd int, path string, tv []Timeval) error { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + if tv == nil { + return futimesat(dirfd, pathp, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +// Solaris doesn't have an futimes function because it allows NULL to be +// specified as the path for futimesat. However, Go doesn't like +// NULL-style string interfaces, so this simple wrapper is provided. +func Futimes(fd int, tv []Timeval) error { + if tv == nil { + return futimesat(fd, nil, nil) + } + if len(tv) != 2 { + return EINVAL + } + return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + // Assume path ends at NUL. + // This is not technically the Solaris semantics for + // abstract Unix domain sockets -- they are supposed + // to be uninterpreted fixed-size binary blobs -- but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = libsocket.accept + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if nfd == -1 { + return + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var iov Iovec + if len(p) > 0 { + iov.Base = (*int8)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy int8 + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Accrightslen = int32(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); n == -1 { + return + } + oobn = int(msg.Accrightslen) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*int8)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy int8 + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Accrightslen = int32(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +//sys acct(path *byte) (err error) + +func Acct(path string) (err error) { + if len(path) == 0 { + // Assume caller wants to disable accounting. + return acct(nil) + } + + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + return acct(pathp) +} + +//sys __makedev(version int, major uint, minor uint) (val uint64) + +func Mkdev(major, minor uint32) uint64 { + return __makedev(NEWDEV, uint(major), uint(minor)) +} + +//sys __major(version int, dev uint64) (val uint) + +func Major(dev uint64) uint32 { + return uint32(__major(NEWDEV, dev)) +} + +//sys __minor(version int, dev uint64) (val uint) + +func Minor(dev uint64) uint32 { + return uint32(__minor(NEWDEV, dev)) +} + +/* + * Expose the ioctl function + */ + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +func IoctlSetInt(fd int, req uint, value int) (err error) { + return ioctl(fd, req, uintptr(value)) +} + +func IoctlSetWinsize(fd int, req uint, value *Winsize) (err error) { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermios(fd int, req uint, value *Termios) (err error) { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermio(fd int, req uint) (*Termio, error) { + var value Termio + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + +/* + * Exposed directly + */ +//sys Access(path string, mode uint32) (err error) +//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) +//sys Chdir(path string) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Creat(path string, mode uint32) (fd int, err error) +//sys Dup(fd int) (nfd int, err error) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys Fdatasync(fd int) (err error) +//sys Flock(fd int, how int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) +//sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) +//sysnb Getgid() (gid int) +//sysnb Getpid() (pid int) +//sysnb Getpgid(pid int) (pgid int, err error) +//sysnb Getpgrp() (pgid int, err error) +//sys Geteuid() (euid int) +//sys Getegid() (egid int) +//sys Getppid() (ppid int) +//sys Getpriority(which int, who int) (n int, err error) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Getuid() (uid int) +//sys Kill(pid int, signum syscall.Signal) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Madvise(b []byte, advice int) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mkfifoat(dirfd int, path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Msync(b []byte, flags int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) +//sys Pathconf(path string, name int) (val int, err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Rename(from string, to string) (err error) +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sys Sethostname(p []byte) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Setuid(uid int) (err error) +//sys Shutdown(s int, how int) (err error) = libsocket.shutdown +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statvfs(path string, vfsstat *Statvfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Sync() (err error) +//sysnb Times(tms *Tms) (ticks uintptr, err error) +//sys Truncate(path string, length int64) (err error) +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Umask(mask int) (oldmask int) +//sysnb Uname(buf *Utsname) (err error) +//sys Unmount(target string, flags int) (err error) = libc.umount +//sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys Utime(path string, buf *Utimbuf) (err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_bind +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_connect +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_sendto +//sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.__xnet_socket +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.__xnet_socketpair +//sys write(fd int, p []byte) (n int, err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.__xnet_getsockopt +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go new file mode 100644 index 0000000..9d4e7a6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -0,0 +1,28 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,solaris + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO(aram): implement this, see issue 5847. + panic("unimplemented") +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_test.go b/vendor/golang.org/x/sys/unix/syscall_solaris_test.go new file mode 100644 index 0000000..57dba88 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_test.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package unix_test + +import ( + "os/exec" + "testing" + "time" + + "golang.org/x/sys/unix" +) + +func TestSelect(t *testing.T) { + err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0}) + if err != nil { + t.Fatalf("Select: %v", err) + } + + dur := 150 * time.Millisecond + tv := unix.NsecToTimeval(int64(dur)) + start := time.Now() + err = unix.Select(0, nil, nil, nil, &tv) + took := time.Since(start) + if err != nil { + t.Fatalf("Select: %v", err) + } + + if took < dur { + t.Errorf("Select: timeout should have been at least %v, got %v", dur, took) + } +} + +func TestStatvfs(t *testing.T) { + if err := unix.Statvfs("", nil); err == nil { + t.Fatal(`Statvfs("") expected failure`) + } + + statvfs := unix.Statvfs_t{} + if err := unix.Statvfs("/", &statvfs); err != nil { + t.Errorf(`Statvfs("/") failed: %v`, err) + } + + if t.Failed() { + mount, err := exec.Command("mount").CombinedOutput() + if err != nil { + t.Logf("mount: %v\n%s", err, mount) + } else { + t.Logf("mount: %s", mount) + } + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_test.go b/vendor/golang.org/x/sys/unix/syscall_test.go new file mode 100644 index 0000000..a8eef7c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_test.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func testSetGetenv(t *testing.T, key, value string) { + err := unix.Setenv(key, value) + if err != nil { + t.Fatalf("Setenv failed to set %q: %v", value, err) + } + newvalue, found := unix.Getenv(key) + if !found { + t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) + } + if newvalue != value { + t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) + } +} + +func TestEnv(t *testing.T) { + testSetGetenv(t, "TESTENV", "AVALUE") + // make sure TESTENV gets set to "", not deleted + testSetGetenv(t, "TESTENV", "") +} + +func TestItoa(t *testing.T) { + // Make most negative integer: 0x8000... + i := 1 + for i<<1 != 0 { + i <<= 1 + } + if i >= 0 { + t.Fatal("bad math") + } + s := unix.Itoa(i) + f := fmt.Sprint(i) + if s != f { + t.Fatalf("itoa(%d) = %s, want %s", i, s, f) + } +} + +func TestUname(t *testing.T) { + var utsname unix.Utsname + err := unix.Uname(&utsname) + if err != nil { + t.Fatalf("Uname: %v", err) + } + + t.Logf("OS: %s/%s %s", utsname.Sysname[:], utsname.Machine[:], utsname.Release[:]) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go new file mode 100644 index 0000000..80b05a4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -0,0 +1,307 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +import ( + "bytes" + "runtime" + "sync" + "syscall" + "unsafe" +) + +var ( + Stdin = 0 + Stdout = 1 + Stderr = 2 +) + +const ( + darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8 + dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8 + netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4 + solaris64Bit = runtime.GOOS == "solaris" && sizeofPtr == 8 +) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + } + return e +} + +// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. +func clen(n []byte) int { + i := bytes.IndexByte(n, 0) + if i == -1 { + i = len(n) + } + return i +} + +// Mmap manager, for use by operating system-specific implementations. + +type mmapper struct { + sync.Mutex + active map[*byte][]byte // active mappings; key is last byte in mapping + mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error) + munmap func(addr uintptr, length uintptr) error +} + +func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + if length <= 0 { + return nil, EINVAL + } + + // Map the requested memory. + addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) + if errno != nil { + return nil, errno + } + + // Slice memory layout + var sl = struct { + addr uintptr + len int + cap int + }{addr, length, length} + + // Use unsafe to turn sl into a []byte. + b := *(*[]byte)(unsafe.Pointer(&sl)) + + // Register mapping in m and return it. + p := &b[cap(b)-1] + m.Lock() + defer m.Unlock() + m.active[p] = b + return b, nil +} + +func (m *mmapper) Munmap(data []byte) (err error) { + if len(data) == 0 || len(data) != cap(data) { + return EINVAL + } + + // Find the base of the mapping. + p := &data[cap(data)-1] + m.Lock() + defer m.Unlock() + b := m.active[p] + if b == nil || &b[0] != &data[0] { + return EINVAL + } + + // Unmap the memory and update m. + if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil { + return errno + } + delete(m.active, p) + return nil +} + +func Read(fd int, p []byte) (n int, err error) { + n, err = read(fd, p) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Write(fd int, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = write(fd, p) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +// Sockaddr represents a socket address. +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs +} + +// SockaddrInet4 implements the Sockaddr interface for AF_INET type sockets. +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +// SockaddrInet6 implements the Sockaddr interface for AF_INET6 type sockets. +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +// SockaddrUnix implements the Sockaddr interface for AF_UNIX type sockets. +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func Bind(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getpeername(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getpeername(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(&rsa) +} + +func GetsockoptInt(fd, level, opt int) (value int, err error) { + var n int32 + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return int(n), err +} + +func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil { + return + } + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { + ptr, n, err := to.sockaddr() + if err != nil { + return err + } + return sendto(fd, p, flags, ptr, n) +} + +func SetsockoptByte(fd, level, opt int, value byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1) +} + +func SetsockoptInt(fd, level, opt int, value int) (err error) { + var n = int32(value) + return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4) +} + +func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4) +} + +func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq) +} + +func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq) +} + +func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error { + return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter) +} + +func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger) +} + +func SetsockoptString(fd, level, opt int, s string) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&[]byte(s)[0]), uintptr(len(s))) +} + +func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv)) +} + +func Socket(domain, typ, proto int) (fd int, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return -1, EAFNOSUPPORT + } + fd, err = socket(domain, typ, proto) + return +} + +func Socketpair(domain, typ, proto int) (fd [2]int, err error) { + var fdx [2]int32 + err = socketpair(domain, typ, proto, &fdx) + if err == nil { + fd[0] = int(fdx[0]) + fd[1] = int(fdx[1]) + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +var ioSync int64 + +func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) } + +func SetNonblock(fd int, nonblocking bool) (err error) { + flag, err := fcntl(fd, F_GETFL, 0) + if err != nil { + return err + } + if nonblocking { + flag |= O_NONBLOCK + } else { + flag &= ^O_NONBLOCK + } + _, err = fcntl(fd, F_SETFL, flag) + return err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go new file mode 100644 index 0000000..4cb8e8e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !gccgo + +package unix + +import "syscall" + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) +func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) +func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) +func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/golang.org/x/sys/unix/syscall_unix_test.go new file mode 100644 index 0000000..c337887 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_unix_test.go @@ -0,0 +1,521 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "flag" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "time" + + "golang.org/x/sys/unix" +) + +// Tests that below functions, structures and constants are consistent +// on all Unix-like systems. +func _() { + // program scheduling priority functions and constants + var ( + _ func(int, int, int) error = unix.Setpriority + _ func(int, int) (int, error) = unix.Getpriority + ) + const ( + _ int = unix.PRIO_USER + _ int = unix.PRIO_PROCESS + _ int = unix.PRIO_PGRP + ) + + // termios constants + const ( + _ int = unix.TCIFLUSH + _ int = unix.TCIOFLUSH + _ int = unix.TCOFLUSH + ) + + // fcntl file locking structure and constants + var ( + _ = unix.Flock_t{ + Type: int16(0), + Whence: int16(0), + Start: int64(0), + Len: int64(0), + Pid: int32(0), + } + ) + const ( + _ = unix.F_GETLK + _ = unix.F_SETLK + _ = unix.F_SETLKW + ) +} + +// TestFcntlFlock tests whether the file locking structure matches +// the calling convention of each kernel. +func TestFcntlFlock(t *testing.T) { + name := filepath.Join(os.TempDir(), "TestFcntlFlock") + fd, err := unix.Open(name, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0) + if err != nil { + t.Fatalf("Open failed: %v", err) + } + defer unix.Unlink(name) + defer unix.Close(fd) + flock := unix.Flock_t{ + Type: unix.F_RDLCK, + Start: 0, Len: 0, Whence: 1, + } + if err := unix.FcntlFlock(uintptr(fd), unix.F_GETLK, &flock); err != nil { + t.Fatalf("FcntlFlock failed: %v", err) + } +} + +// TestPassFD tests passing a file descriptor over a Unix socket. +// +// This test involved both a parent and child process. The parent +// process is invoked as a normal test, with "go test", which then +// runs the child process by running the current test binary with args +// "-test.run=^TestPassFD$" and an environment variable used to signal +// that the test should become the child process instead. +func TestPassFD(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + passFDChild() + return + } + + tempDir, err := ioutil.TempDir("", "TestPassFD") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) + } + defer unix.Close(fds[0]) + defer unix.Close(fds[1]) + writeFile := os.NewFile(uintptr(fds[0]), "child-writes") + readFile := os.NewFile(uintptr(fds[1]), "parent-reads") + defer writeFile.Close() + defer readFile.Close() + + cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + if lp := os.Getenv("LD_LIBRARY_PATH"); lp != "" { + cmd.Env = append(cmd.Env, "LD_LIBRARY_PATH="+lp) + } + cmd.ExtraFiles = []*os.File{writeFile} + + out, err := cmd.CombinedOutput() + if len(out) > 0 || err != nil { + t.Fatalf("child process: %q, %v", out, err) + } + + c, err := net.FileConn(readFile) + if err != nil { + t.Fatalf("FileConn: %v", err) + } + defer c.Close() + + uc, ok := c.(*net.UnixConn) + if !ok { + t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c) + } + + buf := make([]byte, 32) // expect 1 byte + oob := make([]byte, 32) // expect 24 bytes + closeUnix := time.AfterFunc(5*time.Second, func() { + t.Logf("timeout reading from unix socket") + uc.Close() + }) + _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) + if err != nil { + t.Fatalf("ReadMsgUnix: %v", err) + } + closeUnix.Stop() + + scms, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms) + } + scm := scms[0] + gotFds, err := unix.ParseUnixRights(&scm) + if err != nil { + t.Fatalf("unix.ParseUnixRights: %v", err) + } + if len(gotFds) != 1 { + t.Fatalf("wanted 1 fd; got %#v", gotFds) + } + + f := os.NewFile(uintptr(gotFds[0]), "fd-from-child") + defer f.Close() + + got, err := ioutil.ReadAll(f) + want := "Hello from child process!\n" + if string(got) != want { + t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want) + } +} + +// passFDChild is the child process used by TestPassFD. +func passFDChild() { + defer os.Exit(0) + + // Look for our fd. It should be fd 3, but we work around an fd leak + // bug here (http://golang.org/issue/2603) to let it be elsewhere. + var uc *net.UnixConn + for fd := uintptr(3); fd <= 10; fd++ { + f := os.NewFile(fd, "unix-conn") + var ok bool + netc, _ := net.FileConn(f) + uc, ok = netc.(*net.UnixConn) + if ok { + break + } + } + if uc == nil { + fmt.Println("failed to find unix fd") + return + } + + // Make a file f to send to our parent process on uc. + // We make it in tempDir, which our parent will clean up. + flag.Parse() + tempDir := flag.Arg(0) + f, err := ioutil.TempFile(tempDir, "") + if err != nil { + fmt.Printf("TempFile: %v", err) + return + } + + f.Write([]byte("Hello from child process!\n")) + f.Seek(0, 0) + + rights := unix.UnixRights(int(f.Fd())) + dummyByte := []byte("x") + n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil) + if err != nil { + fmt.Printf("WriteMsgUnix: %v", err) + return + } + if n != 1 || oobn != len(rights) { + fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights)) + return + } +} + +// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage, +// and ParseUnixRights are able to successfully round-trip lists of file descriptors. +func TestUnixRightsRoundtrip(t *testing.T) { + testCases := [...][][]int{ + {{42}}, + {{1, 2}}, + {{3, 4, 5}}, + {{}}, + {{1, 2}, {3, 4, 5}, {}, {7}}, + } + for _, testCase := range testCases { + b := []byte{} + var n int + for _, fds := range testCase { + // Last assignment to n wins + n = len(b) + unix.CmsgLen(4*len(fds)) + b = append(b, unix.UnixRights(fds...)...) + } + // Truncate b + b = b[:n] + + scms, err := unix.ParseSocketControlMessage(b) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + if len(scms) != len(testCase) { + t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms) + } + for i, scm := range scms { + gotFds, err := unix.ParseUnixRights(&scm) + if err != nil { + t.Fatalf("ParseUnixRights: %v", err) + } + wantFds := testCase[i] + if len(gotFds) != len(wantFds) { + t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds) + } + for j, fd := range gotFds { + if fd != wantFds[j] { + t.Fatalf("expected fd %v, got %v", wantFds[j], fd) + } + } + } + } +} + +func TestRlimit(t *testing.T) { + var rlimit, zero unix.Rlimit + err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit) + if err != nil { + t.Fatalf("Getrlimit: save failed: %v", err) + } + if zero == rlimit { + t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit) + } + set := rlimit + set.Cur = set.Max - 1 + err = unix.Setrlimit(unix.RLIMIT_NOFILE, &set) + if err != nil { + t.Fatalf("Setrlimit: set failed: %#v %v", set, err) + } + var get unix.Rlimit + err = unix.Getrlimit(unix.RLIMIT_NOFILE, &get) + if err != nil { + t.Fatalf("Getrlimit: get failed: %v", err) + } + set = rlimit + set.Cur = set.Max - 1 + if set != get { + // Seems like Darwin requires some privilege to + // increase the soft limit of rlimit sandbox, though + // Setrlimit never reports an error. + switch runtime.GOOS { + case "darwin": + default: + t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get) + } + } + err = unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit) + if err != nil { + t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err) + } +} + +func TestSeekFailure(t *testing.T) { + _, err := unix.Seek(-1, 0, 0) + if err == nil { + t.Fatalf("Seek(-1, 0, 0) did not fail") + } + str := err.Error() // used to crash on Linux + t.Logf("Seek: %v", str) + if str == "" { + t.Fatalf("Seek(-1, 0, 0) return error with empty message") + } +} + +func TestDup(t *testing.T) { + file, err := ioutil.TempFile("", "TestDup") + if err != nil { + t.Fatalf("Tempfile failed: %v", err) + } + defer os.Remove(file.Name()) + defer file.Close() + f := int(file.Fd()) + + newFd, err := unix.Dup(f) + if err != nil { + t.Fatalf("Dup: %v", err) + } + + err = unix.Dup2(newFd, newFd+1) + if err != nil { + t.Fatalf("Dup2: %v", err) + } + + b1 := []byte("Test123") + b2 := make([]byte, 7) + _, err = unix.Write(newFd+1, b1) + if err != nil { + t.Fatalf("Write to dup2 fd failed: %v", err) + } + _, err = unix.Seek(f, 0, 0) + if err != nil { + t.Fatalf("Seek failed: %v", err) + } + _, err = unix.Read(f, b2) + if err != nil { + t.Fatalf("Read back failed: %v", err) + } + if string(b1) != string(b2) { + t.Errorf("Dup: stdout write not in file, expected %v, got %v", string(b1), string(b2)) + } +} + +func TestPoll(t *testing.T) { + f, cleanup := mktmpfifo(t) + defer cleanup() + + const timeout = 100 + + ok := make(chan bool, 1) + go func() { + select { + case <-time.After(10 * timeout * time.Millisecond): + t.Errorf("Poll: failed to timeout after %d milliseconds", 10*timeout) + case <-ok: + } + }() + + fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} + n, err := unix.Poll(fds, timeout) + ok <- true + if err != nil { + t.Errorf("Poll: unexpected error: %v", err) + return + } + if n != 0 { + t.Errorf("Poll: wrong number of events: got %v, expected %v", n, 0) + return + } +} + +func TestGetwd(t *testing.T) { + fd, err := os.Open(".") + if err != nil { + t.Fatalf("Open .: %s", err) + } + defer fd.Close() + // These are chosen carefully not to be symlinks on a Mac + // (unlike, say, /var, /etc) + dirs := []string{"/", "/usr/bin"} + if runtime.GOOS == "darwin" { + switch runtime.GOARCH { + case "arm", "arm64": + d1, err := ioutil.TempDir("", "d1") + if err != nil { + t.Fatalf("TempDir: %v", err) + } + d2, err := ioutil.TempDir("", "d2") + if err != nil { + t.Fatalf("TempDir: %v", err) + } + dirs = []string{d1, d2} + } + } + oldwd := os.Getenv("PWD") + for _, d := range dirs { + err = os.Chdir(d) + if err != nil { + t.Fatalf("Chdir: %v", err) + } + pwd, err := unix.Getwd() + if err != nil { + t.Fatalf("Getwd in %s: %s", d, err) + } + os.Setenv("PWD", oldwd) + err = fd.Chdir() + if err != nil { + // We changed the current directory and cannot go back. + // Don't let the tests continue; they'll scribble + // all over some other directory. + fmt.Fprintf(os.Stderr, "fchdir back to dot failed: %s\n", err) + os.Exit(1) + } + if pwd != d { + t.Fatalf("Getwd returned %q want %q", pwd, d) + } + } +} + +func TestFstatat(t *testing.T) { + defer chtmpdir(t)() + + touch(t, "file1") + + var st1 unix.Stat_t + err := unix.Stat("file1", &st1) + if err != nil { + t.Fatalf("Stat: %v", err) + } + + var st2 unix.Stat_t + err = unix.Fstatat(unix.AT_FDCWD, "file1", &st2, 0) + if err != nil { + t.Fatalf("Fstatat: %v", err) + } + + if st1 != st2 { + t.Errorf("Fstatat: returned stat does not match Stat") + } + + err = os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } + + err = unix.Lstat("symlink1", &st1) + if err != nil { + t.Fatalf("Lstat: %v", err) + } + + err = unix.Fstatat(unix.AT_FDCWD, "symlink1", &st2, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + t.Fatalf("Fstatat: %v", err) + } + + if st1 != st2 { + t.Errorf("Fstatat: returned stat does not match Lstat") + } +} + +// mktmpfifo creates a temporary FIFO and provides a cleanup function. +func mktmpfifo(t *testing.T) (*os.File, func()) { + err := unix.Mkfifo("fifo", 0666) + if err != nil { + t.Fatalf("mktmpfifo: failed to create FIFO: %v", err) + } + + f, err := os.OpenFile("fifo", os.O_RDWR, 0666) + if err != nil { + os.Remove("fifo") + t.Fatalf("mktmpfifo: failed to open FIFO: %v", err) + } + + return f, func() { + f.Close() + os.Remove("fifo") + } +} + +// utilities taken from os/os_test.go + +func touch(t *testing.T, name string) { + f, err := os.Create(name) + if err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } +} + +// chtmpdir changes the working directory to a new temporary directory and +// provides a cleanup function. Used when PWD is read-only. +func chtmpdir(t *testing.T) func() { + oldwd, err := os.Getwd() + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + d, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + if err := os.Chdir(d); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + return func() { + if err := os.Chdir(oldwd); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + os.RemoveAll(d) + } +} diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go new file mode 100644 index 0000000..47b9011 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -0,0 +1,82 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +import "time" + +// TimespecToNsec converts a Timespec value into a number of +// nanoseconds since the Unix epoch. +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +// NsecToTimespec takes a number of nanoseconds since the Unix epoch +// and returns the corresponding Timespec value. +func NsecToTimespec(nsec int64) Timespec { + sec := nsec / 1e9 + nsec = nsec % 1e9 + if nsec < 0 { + nsec += 1e9 + sec-- + } + return setTimespec(sec, nsec) +} + +// TimeToTimespec converts t into a Timespec. +// On some 32-bit systems the range of valid Timespec values are smaller +// than that of time.Time values. So if t is out of the valid range of +// Timespec, it returns a zero Timespec and ERANGE. +func TimeToTimespec(t time.Time) (Timespec, error) { + sec := t.Unix() + nsec := int64(t.Nanosecond()) + ts := setTimespec(sec, nsec) + + // Currently all targets have either int32 or int64 for Timespec.Sec. + // If there were a new target with floating point type for it, we have + // to consider the rounding error. + if int64(ts.Sec) != sec { + return Timespec{}, ERANGE + } + return ts, nil +} + +// TimevalToNsec converts a Timeval value into a number of nanoseconds +// since the Unix epoch. +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +// NsecToTimeval takes a number of nanoseconds since the Unix epoch +// and returns the corresponding Timeval value. +func NsecToTimeval(nsec int64) Timeval { + nsec += 999 // round up to microsecond + usec := nsec % 1e9 / 1e3 + sec := nsec / 1e9 + if usec < 0 { + usec += 1e6 + sec-- + } + return setTimeval(sec, usec) +} + +// Unix returns ts as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +// Unix returns tv as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +// Nano returns ts as the number of nanoseconds elapsed since the Unix epoch. +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +// Nano returns tv as the number of nanoseconds elapsed since the Unix epoch. +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/unix/timestruct_test.go b/vendor/golang.org/x/sys/unix/timestruct_test.go new file mode 100644 index 0000000..4215f46 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/timestruct_test.go @@ -0,0 +1,54 @@ +// Copyright 2017 The Go Authors. All right reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "testing" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +func TestTimeToTimespec(t *testing.T) { + timeTests := []struct { + time time.Time + valid bool + }{ + {time.Unix(0, 0), true}, + {time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), true}, + {time.Date(2262, time.December, 31, 23, 0, 0, 0, time.UTC), false}, + {time.Unix(0x7FFFFFFF, 0), true}, + {time.Unix(0x80000000, 0), false}, + {time.Unix(0x7FFFFFFF, 1000000000), false}, + {time.Unix(0x7FFFFFFF, 999999999), true}, + {time.Unix(-0x80000000, 0), true}, + {time.Unix(-0x80000001, 0), false}, + {time.Date(2038, time.January, 19, 3, 14, 7, 0, time.UTC), true}, + {time.Date(2038, time.January, 19, 3, 14, 8, 0, time.UTC), false}, + {time.Date(1901, time.December, 13, 20, 45, 52, 0, time.UTC), true}, + {time.Date(1901, time.December, 13, 20, 45, 51, 0, time.UTC), false}, + } + + // Currently all targets have either int32 or int64 for Timespec.Sec. + // If there were a new target with unsigned or floating point type for + // it, this test must be adjusted. + have64BitTime := (unsafe.Sizeof(unix.Timespec{}.Sec) == 8) + for _, tt := range timeTests { + ts, err := unix.TimeToTimespec(tt.time) + tt.valid = tt.valid || have64BitTime + if tt.valid && err != nil { + t.Errorf("TimeToTimespec(%v): %v", tt.time, err) + } + if err == nil { + tstime := time.Unix(int64(ts.Sec), int64(ts.Nsec)) + if !tstime.Equal(tt.time) { + t.Errorf("TimeToTimespec(%v) is the time %v", tt.time, tstime) + } + } + } +} diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go new file mode 100644 index 0000000..46b9908 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_darwin.go @@ -0,0 +1,277 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define __DARWIN_UNIX03 0 +#define KERNEL +#define _DARWIN_USE_64_BIT_INODE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat64 + +type Statfs_t C.struct_statfs64 + +type Flock_t C.struct_flock + +type Fstore_t C.struct_fstore + +type Radvisory_t C.struct_radvisory + +type Fbootstraptransfer_t C.struct_fbootstraptransfer + +type Log2phys_t C.struct_log2phys + +type Fsid C.struct_fsid + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet4Pktinfo C.struct_in_pktinfo + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfmaMsghdr2 C.struct_ifma_msghdr2 + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go new file mode 100644 index 0000000..0c63304 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go @@ -0,0 +1,280 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.struct_fsid + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go new file mode 100644 index 0000000..4eb02cd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -0,0 +1,402 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +// This structure is a duplicate of stat on FreeBSD 8-STABLE. +// See /usr/include/sys/stat.h. +struct stat8 { +#undef st_atimespec st_atim +#undef st_mtimespec st_mtim +#undef st_ctimespec st_ctim +#undef st_birthtimespec st_birthtim + __dev_t st_dev; + ino_t st_ino; + mode_t st_mode; + nlink_t st_nlink; + uid_t st_uid; + gid_t st_gid; + __dev_t st_rdev; +#if __BSD_VISIBLE + struct timespec st_atimespec; + struct timespec st_mtimespec; + struct timespec st_ctimespec; +#else + time_t st_atime; + long __st_atimensec; + time_t st_mtime; + long __st_mtimensec; + time_t st_ctime; + long __st_ctimensec; +#endif + off_t st_size; + blkcnt_t st_blocks; + blksize_t st_blksize; + fflags_t st_flags; + __uint32_t st_gen; + __int32_t st_lspare; +#if __BSD_VISIBLE + struct timespec st_birthtimespec; + unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); + unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); +#else + time_t st_birthtime; + long st_birthtimensec; + unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); + unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); +#endif +}; + +// This structure is a duplicate of if_data on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_data8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; +// FIXME: these are now unions, so maybe need to change definitions? +#undef ifi_epoch + time_t ifi_epoch; +#undef ifi_lastchange + struct timeval ifi_lastchange; +}; + +// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_msghdr8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data8 ifm_data; +}; +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat8 + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.struct_fsid + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPMreqn C.struct_ip_mreqn + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPMreqn = C.sizeof_struct_ip_mreqn + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + sizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 + sizeofIfData = C.sizeof_struct_if_data + SizeofIfData = C.sizeof_struct_if_data8 + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type ifMsghdr C.struct_if_msghdr + +type IfMsghdr C.struct_if_msghdr8 + +type ifData C.struct_if_data + +type IfData C.struct_if_data8 + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr + SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfZbuf C.struct_bpf_zbuf + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfZbufHeader C.struct_bpf_zbuf_header + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLINIGNEOF = C.POLLINIGNEOF + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Capabilities + +type CapRights C.struct_cap_rights + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go new file mode 100644 index 0000000..10aa9b3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -0,0 +1,270 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Sysctl + +type Sysctlnode C.struct_sysctlnode + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go new file mode 100644 index 0000000..649e559 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -0,0 +1,282 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go new file mode 100644 index 0000000..f777155 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -0,0 +1,283 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +// These defines ensure that builds done on newer versions of Solaris are +// backwards-compatible with older versions of Solaris and +// OpenSolaris-based derivatives. +#define __USE_SUNOS_SOCKETS__ // msghdr +#define __USE_LEGACY_PROTOTYPES__ // iovec +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX + MaxHostNameLen = C.MAXHOSTNAMELEN +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( // Directory mode bits + S_IFMT = C.S_IFMT + S_IFIFO = C.S_IFIFO + S_IFCHR = C.S_IFCHR + S_IFDIR = C.S_IFDIR + S_IFBLK = C.S_IFBLK + S_IFREG = C.S_IFREG + S_IFLNK = C.S_IFLNK + S_IFSOCK = C.S_IFSOCK + S_ISUID = C.S_ISUID + S_ISGID = C.S_ISGID + S_ISVTX = C.S_ISVTX + S_IRUSR = C.S_IRUSR + S_IWUSR = C.S_IWUSR + S_IXUSR = C.S_IXUSR +) + +type Stat_t C.struct_stat + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +// Filesystems + +type _Fsblkcnt_t C.fsblkcnt_t + +type Statvfs_t C.struct_statvfs + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Select + +type FdSet C.fd_set + +// Misc + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_EACCESS = C.AT_EACCESS +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfTimeval C.struct_bpf_timeval + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Termio C.struct_termio + +type Winsize C.struct_winsize + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go new file mode 100644 index 0000000..dcba884 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -0,0 +1,1769 @@ +// mkerrors.sh -m32 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,darwin + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf007ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x8008427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xf + EVFILT_THREADMARKER = 0xf + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x17f0f5ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc01c697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc0086924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6981 + SIOCRSLVMULTI = 0xc008693b + SIOCSDRVSPEC = 0x801c697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40087458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x20 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "resource busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "device power is off", + 83: "device error", + 84: "value too large to be stored in data type", + 85: "bad executable (or shared library)", + 86: "bad CPU type in executable", + 87: "shared library version mismatch", + 88: "malformed Mach-o file", + 89: "operation canceled", + 90: "identifier removed", + 91: "no message of desired type", + 92: "illegal byte sequence", + 93: "attribute not found", + 94: "bad message", + 95: "EMULTIHOP (Reserved)", + 96: "no message available on STREAM", + 97: "ENOLINK (Reserved)", + 98: "no STREAM resources", + 99: "not a STREAM", + 100: "protocol error", + 101: "STREAM ioctl timeout", + 102: "operation not supported on socket", + 103: "policy not found", + 104: "state not recoverable", + 105: "previous owner died", + 106: "interface output queue is full", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go new file mode 100644 index 0000000..1a51c96 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -0,0 +1,1769 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,darwin + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf007ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xf + EVFILT_THREADMARKER = 0xf + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x17f0f5ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "resource busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "device power is off", + 83: "device error", + 84: "value too large to be stored in data type", + 85: "bad executable (or shared library)", + 86: "bad CPU type in executable", + 87: "shared library version mismatch", + 88: "malformed Mach-o file", + 89: "operation canceled", + 90: "identifier removed", + 91: "no message of desired type", + 92: "illegal byte sequence", + 93: "attribute not found", + 94: "bad message", + 95: "EMULTIHOP (Reserved)", + 96: "no message available on STREAM", + 97: "ENOLINK (Reserved)", + 98: "no STREAM resources", + 99: "not a STREAM", + 100: "protocol error", + 101: "STREAM ioctl timeout", + 102: "operation not supported on socket", + 103: "policy not found", + 104: "state not recoverable", + 105: "previous owner died", + 106: "interface output queue is full", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go new file mode 100644 index 0000000..fa135b1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -0,0 +1,1769 @@ +// mkerrors.sh +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,darwin + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf007ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xf + EVFILT_THREADMARKER = 0xf + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x17f0f5ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "resource busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "device power is off", + 83: "device error", + 84: "value too large to be stored in data type", + 85: "bad executable (or shared library)", + 86: "bad CPU type in executable", + 87: "shared library version mismatch", + 88: "malformed Mach-o file", + 89: "operation canceled", + 90: "identifier removed", + 91: "no message of desired type", + 92: "illegal byte sequence", + 93: "attribute not found", + 94: "bad message", + 95: "EMULTIHOP (Reserved)", + 96: "no message available on STREAM", + 97: "ENOLINK (Reserved)", + 98: "no STREAM resources", + 99: "not a STREAM", + 100: "protocol error", + 101: "STREAM ioctl timeout", + 102: "operation not supported on socket", + 103: "policy not found", + 104: "state not recoverable", + 105: "previous owner died", + 106: "interface output queue is full", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go new file mode 100644 index 0000000..6419c65 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -0,0 +1,1769 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,darwin + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x28 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf007ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xf + EVFILT_THREADMARKER = 0xf + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x17f0f5ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xa + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADARCH = syscall.Errno(0x56) + EBADEXEC = syscall.Errno(0x55) + EBADF = syscall.Errno(0x9) + EBADMACHO = syscall.Errno(0x58) + EBADMSG = syscall.Errno(0x5e) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x59) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDEVERR = syscall.Errno(0x53) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x5a) + EILSEQ = syscall.Errno(0x5c) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x6a) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5f) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x60) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x61) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5b) + ENOPOLICY = syscall.Errno(0x67) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x62) + ENOSTR = syscall.Errno(0x63) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x68) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x66) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x69) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x64) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + EPWROFF = syscall.Errno(0x52) + EQFULL = syscall.Errno(0x6a) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHLIBVERS = syscall.Errno(0x57) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x65) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "resource busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "device power is off", + 83: "device error", + 84: "value too large to be stored in data type", + 85: "bad executable (or shared library)", + 86: "bad CPU type in executable", + 87: "shared library version mismatch", + 88: "malformed Mach-o file", + 89: "operation canceled", + 90: "identifier removed", + 91: "no message of desired type", + 92: "illegal byte sequence", + 93: "attribute not found", + 94: "bad message", + 95: "EMULTIHOP (Reserved)", + 96: "no message available on STREAM", + 97: "ENOLINK (Reserved)", + 98: "no STREAM resources", + 99: "not a STREAM", + 100: "protocol error", + 101: "STREAM ioctl timeout", + 102: "operation not supported on socket", + 103: "policy not found", + 104: "state not recoverable", + 105: "previous owner died", + 106: "interface output queue is full", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go new file mode 100644 index 0000000..474441b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -0,0 +1,1578 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,dragonfly + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x21 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x23 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x22 + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8010427b + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DEFAULTBUFSIZE = 0x1000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MAX_CLONES = 0x80 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DBF = 0xf + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0x8 + EVFILT_FS = -0xa + EVFILT_MARKER = 0xf + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xa + EVFILT_TIMER = -0x7 + EVFILT_USER = -0x9 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_NODATA = 0x1000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTEXIT_LWP = 0x10000 + EXTEXIT_PROC = 0x0 + EXTEXIT_SETINT = 0x1 + EXTEXIT_SIMPLE = 0x0 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x118e72 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NPOLLING = 0x100000 + IFF_OACTIVE = 0x400 + IFF_OACTIVE_COMPAT = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_POLLING = 0x10000 + IFF_POLLING_COMPAT = 0x10000 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xf3 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SKIP = 0x39 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UNKNOWN = 0x102 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHLIM = 0x28 + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PKTOPTIONS = 0x34 + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_RESETLOG = 0x37 + IP_FW_X = 0x31 + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CONTROL_END = 0xb + MADV_CONTROL_START = 0xa + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_INVAL = 0xa + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SETMAP = 0xb + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_NOCORE = 0x20000 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_SIZEALIGN = 0x40000 + MAP_STACK = 0x400 + MAP_TRYFIXED = 0x10000 + MAP_VPAGETABLE = 0x2000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CMSG_CLOEXEC = 0x1000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FBLOCKING = 0x10000 + MSG_FMASK = 0xffff0000 + MSG_FNONBLOCKING = 0x20000 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_SYNC = 0x800 + MSG_TRUNC = 0x10 + MSG_UNUSED09 = 0x200 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x4 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x20000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x8000000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FAPPEND = 0x100000 + O_FASYNCWRITE = 0x800000 + O_FBLOCKING = 0x40000 + O_FMASK = 0xfc0000 + O_FNONBLOCKING = 0x80000 + O_FOFFSET = 0x200000 + O_FSYNC = 0x80 + O_FSYNCWRITE = 0x400000 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0xb + RTAX_MPLS1 = 0x8 + RTAX_MPLS2 = 0x9 + RTAX_MPLS3 = 0xa + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_MPLS1 = 0x100 + RTA_MPLS2 = 0x200 + RTA_MPLS3 = 0x400 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPLSOPS = 0x1000000 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x6 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_IWCAPSEGS = 0x400 + RTV_IWMAXSEGS = 0x200 + RTV_MSL = 0x100 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8040720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8040720b + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc0206926 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPOLLCPU = 0xc020697e + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFTSOLEN = 0xc0206980 + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFPOLLCPU = 0x8020697d + SIOCSIFTSOLEN = 0x8020697f + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_CPUHINT = 0x1030 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDSPACE = 0x100a + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_FASTKEEP = 0x80 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x20 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0x100 + TCP_MIN_WINSHIFT = 0x5 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_SIGNATURE_ENABLE = 0x10 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCISPTMASTER = 0x20007455 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VCHECKPT = 0x13 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_BCACHE_SIZE_MAX = 0x0 + VM_SWZONE_SIZE_MAX = 0x4000000000 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EASYNC = syscall.Errno(0x63) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x63) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEDIUM = syscall.Errno(0x5d) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUNUSED94 = syscall.Errno(0x5e) + EUNUSED95 = syscall.Errno(0x5f) + EUNUSED96 = syscall.Errno(0x60) + EUNUSED97 = syscall.Errno(0x61) + EUNUSED98 = syscall.Errno(0x62) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCKPT = syscall.Signal(0x21) + SIGCKPTEXIT = syscall.Signal(0x22) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "no medium found", + 94: "unknown error: 94", + 95: "unknown error: 95", + 96: "unknown error: 96", + 97: "unknown error: 97", + 98: "unknown error: 98", + 99: "unknown error: 99", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread Scheduler", + 33: "checkPoint", + 34: "checkPointExit", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go new file mode 100644 index 0000000..a8b0587 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -0,0 +1,1756 @@ +// mkerrors.sh -m32 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,freebsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4004427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x400c4280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x80084282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSETZBUF = 0x800c4281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xc + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f52 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_NORTREF = 0x2 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc028698b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_FASTOPEN = 0x401 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "capabilities insufficient", + 94: "not permitted in capability mode", + 95: "state not recoverable", + 96: "previous owner died", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "unknown signal", + 33: "unknown signal", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go new file mode 100644 index 0000000..cf5f012 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -0,0 +1,1757 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,freebsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffffffffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xc + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f52 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_NORTREF = 0x2 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc030698b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_FASTOPEN = 0x401 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "capabilities insufficient", + 94: "not permitted in capability mode", + 95: "state not recoverable", + 96: "previous owner died", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "unknown signal", + 33: "unknown signal", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go new file mode 100644 index 0000000..9bbb90a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -0,0 +1,1765 @@ +// mkerrors.sh +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,freebsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4004427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x400c4280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x80084282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSETZBUF = 0x800c4281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_FREEBSD = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xc + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f52 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_NORTREF = 0x2 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc028698b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_FASTOPEN = 0x401 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "capabilities insufficient", + 94: "not permitted in capability mode", + 95: "state not recoverable", + 96: "previous owner died", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "unknown signal", + 33: "unknown signal", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go new file mode 100644 index 0000000..fa06374 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -0,0 +1,2298 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x80041270 + BLKBSZSET = 0x40041271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80041272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x8000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x800854db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x6 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x20 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go new file mode 100644 index 0000000..eb2a22f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -0,0 +1,2299 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ARCH_PRCTL = 0x1e + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x6 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go new file mode 100644 index 0000000..37d212c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -0,0 +1,2306 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x80041270 + BLKBSZSET = 0x40041271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80041272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x20000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETCRUNCHREGS = 0x19 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFDPIC = 0x1f + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 + PTRACE_GETFPREGS = 0xe + PTRACE_GETHBPREGS = 0x1d + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GETVFPREGS = 0x1b + PTRACE_GETWMMXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x16 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETCRUNCHREGS = 0x1a + PTRACE_SETFPREGS = 0xf + PTRACE_SETHBPREGS = 0x1e + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SETVFPREGS = 0x1c + PTRACE_SETWMMXREGS = 0x13 + PTRACE_SET_SYSCALL = 0x17 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + PT_DATA_ADDR = 0x10004 + PT_TEXT_ADDR = 0x10000 + PT_TEXT_END_ADDR = 0x10008 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x800854db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x6 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x20 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go new file mode 100644 index 0000000..51d84a3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -0,0 +1,2289 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ESR_MAGIC = 0x45535201 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + EXTRA_MAGIC = 0x45585401 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x6 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go new file mode 100644 index 0000000..8aec95d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -0,0 +1,2308 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x21 + F_GETLK64 = 0x21 + F_GETOWN = 0x17 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x22 + F_SETLK64 = 0x22 + F_SETLKW = 0x23 + F_SETLKW64 = 0x23 + F_SETOWN = 0x18 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x100 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x80 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x800 + MAP_SHARED = 0x1 + MAP_STACK = 0x40000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT = 0x4 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x9 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x40047309 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x80047308 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x80 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x2 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0xffff + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1e + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x1008 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x4 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x20 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x7d) + EADDRNOTAVAIL = syscall.Errno(0x7e) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x7c) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x95) + EBADE = syscall.Errno(0x32) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x51) + EBADMSG = syscall.Errno(0x4d) + EBADR = syscall.Errno(0x33) + EBADRQC = syscall.Errno(0x36) + EBADSLT = syscall.Errno(0x37) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x9e) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x82) + ECONNREFUSED = syscall.Errno(0x92) + ECONNRESET = syscall.Errno(0x83) + EDEADLK = syscall.Errno(0x2d) + EDEADLOCK = syscall.Errno(0x38) + EDESTADDRREQ = syscall.Errno(0x60) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x46d) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x93) + EHOSTUNREACH = syscall.Errno(0x94) + EHWPOISON = syscall.Errno(0xa8) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x58) + EINIT = syscall.Errno(0x8d) + EINPROGRESS = syscall.Errno(0x96) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x85) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x8b) + EKEYEXPIRED = syscall.Errno(0xa2) + EKEYREJECTED = syscall.Errno(0xa4) + EKEYREVOKED = syscall.Errno(0xa3) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELIBACC = syscall.Errno(0x53) + ELIBBAD = syscall.Errno(0x54) + ELIBEXEC = syscall.Errno(0x57) + ELIBMAX = syscall.Errno(0x56) + ELIBSCN = syscall.Errno(0x55) + ELNRNG = syscall.Errno(0x29) + ELOOP = syscall.Errno(0x5a) + EMEDIUMTYPE = syscall.Errno(0xa0) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x61) + EMULTIHOP = syscall.Errno(0x4a) + ENAMETOOLONG = syscall.Errno(0x4e) + ENAVAIL = syscall.Errno(0x8a) + ENETDOWN = syscall.Errno(0x7f) + ENETRESET = syscall.Errno(0x81) + ENETUNREACH = syscall.Errno(0x80) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x35) + ENOBUFS = syscall.Errno(0x84) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0xa1) + ENOLCK = syscall.Errno(0x2e) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x9f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x63) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x59) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x86) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x5d) + ENOTNAM = syscall.Errno(0x89) + ENOTRECOVERABLE = syscall.Errno(0xa6) + ENOTSOCK = syscall.Errno(0x5f) + ENOTSUP = syscall.Errno(0x7a) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x50) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x7a) + EOVERFLOW = syscall.Errno(0x4f) + EOWNERDEAD = syscall.Errno(0xa5) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x7b) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x78) + EPROTOTYPE = syscall.Errno(0x62) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x52) + EREMDEV = syscall.Errno(0x8e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x8c) + ERESTART = syscall.Errno(0x5b) + ERFKILL = syscall.Errno(0xa7) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x8f) + ESOCKTNOSUPPORT = syscall.Errno(0x79) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x97) + ESTRPIPE = syscall.Errno(0x5c) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x91) + ETOOMANYREFS = syscall.Errno(0x90) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x87) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x5e) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x34) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x12) + SIGCLD = syscall.Signal(0x12) + SIGCONT = syscall.Signal(0x19) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x16) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x16) + SIGPROF = syscall.Signal(0x1d) + SIGPWR = syscall.Signal(0x13) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x17) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x18) + SIGTTIN = syscall.Signal(0x1a) + SIGTTOU = syscall.Signal(0x1b) + SIGURG = syscall.Signal(0x15) + SIGUSR1 = syscall.Signal(0x10) + SIGUSR2 = syscall.Signal(0x11) + SIGVTALRM = syscall.Signal(0x1c) + SIGWINCH = syscall.Signal(0x14) + SIGXCPU = syscall.Signal(0x1e) + SIGXFSZ = syscall.Signal(0x1f) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "no message of desired type", + 36: "identifier removed", + 37: "channel number out of range", + 38: "level 2 not synchronized", + 39: "level 3 halted", + 40: "level 3 reset", + 41: "link number out of range", + 42: "protocol driver not attached", + 43: "no CSI structure available", + 44: "level 2 halted", + 45: "resource deadlock avoided", + 46: "no locks available", + 50: "invalid exchange", + 51: "invalid request descriptor", + 52: "exchange full", + 53: "no anode", + 54: "invalid request code", + 55: "invalid slot", + 56: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 73: "RFS specific error", + 74: "multihop attempted", + 77: "bad message", + 78: "file name too long", + 79: "value too large for defined data type", + 80: "name not unique on network", + 81: "file descriptor in bad state", + 82: "remote address changed", + 83: "can not access a needed shared library", + 84: "accessing a corrupted shared library", + 85: ".lib section in a.out corrupted", + 86: "attempting to link in too many shared libraries", + 87: "cannot exec a shared library directly", + 88: "invalid or incomplete multibyte or wide character", + 89: "function not implemented", + 90: "too many levels of symbolic links", + 91: "interrupted system call should be restarted", + 92: "streams pipe error", + 93: "directory not empty", + 94: "too many users", + 95: "socket operation on non-socket", + 96: "destination address required", + 97: "message too long", + 98: "protocol wrong type for socket", + 99: "protocol not available", + 120: "protocol not supported", + 121: "socket type not supported", + 122: "operation not supported", + 123: "protocol family not supported", + 124: "address family not supported by protocol", + 125: "address already in use", + 126: "cannot assign requested address", + 127: "network is down", + 128: "network is unreachable", + 129: "network dropped connection on reset", + 130: "software caused connection abort", + 131: "connection reset by peer", + 132: "no buffer space available", + 133: "transport endpoint is already connected", + 134: "transport endpoint is not connected", + 135: "structure needs cleaning", + 137: "not a XENIX named type file", + 138: "no XENIX semaphores available", + 139: "is a named type file", + 140: "remote I/O error", + 141: "unknown error 141", + 142: "unknown error 142", + 143: "cannot send after transport endpoint shutdown", + 144: "too many references: cannot splice", + 145: "connection timed out", + 146: "connection refused", + 147: "host is down", + 148: "no route to host", + 149: "operation already in progress", + 150: "operation now in progress", + 151: "stale file handle", + 158: "operation canceled", + 159: "no medium found", + 160: "wrong medium type", + 161: "required key not available", + 162: "key has expired", + 163: "key has been revoked", + 164: "key was rejected by service", + 165: "owner died", + 166: "state not recoverable", + 167: "operation not possible due to RF-kill", + 168: "memory page has hardware error", + 1133: "disk quota exceeded", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "user defined signal 1", + 17: "user defined signal 2", + 18: "child exited", + 19: "power failure", + 20: "window changed", + 21: "urgent I/O condition", + 22: "I/O possible", + 23: "stopped (signal)", + 24: "stopped", + 25: "continued", + 26: "stopped (tty input)", + 27: "stopped (tty output)", + 28: "virtual timer expired", + 29: "profiling timer expired", + 30: "CPU time limit exceeded", + 31: "file size limit exceeded", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go new file mode 100644 index 0000000..423f48a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -0,0 +1,2308 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x18 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x100 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x80 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x800 + MAP_SHARED = 0x1 + MAP_STACK = 0x40000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x0 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT = 0x4 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x9 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x40047309 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x80047308 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x80 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x2 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0xffff + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1e + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x1008 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x4 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x7d) + EADDRNOTAVAIL = syscall.Errno(0x7e) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x7c) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x95) + EBADE = syscall.Errno(0x32) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x51) + EBADMSG = syscall.Errno(0x4d) + EBADR = syscall.Errno(0x33) + EBADRQC = syscall.Errno(0x36) + EBADSLT = syscall.Errno(0x37) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x9e) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x82) + ECONNREFUSED = syscall.Errno(0x92) + ECONNRESET = syscall.Errno(0x83) + EDEADLK = syscall.Errno(0x2d) + EDEADLOCK = syscall.Errno(0x38) + EDESTADDRREQ = syscall.Errno(0x60) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x46d) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x93) + EHOSTUNREACH = syscall.Errno(0x94) + EHWPOISON = syscall.Errno(0xa8) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x58) + EINIT = syscall.Errno(0x8d) + EINPROGRESS = syscall.Errno(0x96) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x85) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x8b) + EKEYEXPIRED = syscall.Errno(0xa2) + EKEYREJECTED = syscall.Errno(0xa4) + EKEYREVOKED = syscall.Errno(0xa3) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELIBACC = syscall.Errno(0x53) + ELIBBAD = syscall.Errno(0x54) + ELIBEXEC = syscall.Errno(0x57) + ELIBMAX = syscall.Errno(0x56) + ELIBSCN = syscall.Errno(0x55) + ELNRNG = syscall.Errno(0x29) + ELOOP = syscall.Errno(0x5a) + EMEDIUMTYPE = syscall.Errno(0xa0) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x61) + EMULTIHOP = syscall.Errno(0x4a) + ENAMETOOLONG = syscall.Errno(0x4e) + ENAVAIL = syscall.Errno(0x8a) + ENETDOWN = syscall.Errno(0x7f) + ENETRESET = syscall.Errno(0x81) + ENETUNREACH = syscall.Errno(0x80) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x35) + ENOBUFS = syscall.Errno(0x84) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0xa1) + ENOLCK = syscall.Errno(0x2e) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x9f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x63) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x59) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x86) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x5d) + ENOTNAM = syscall.Errno(0x89) + ENOTRECOVERABLE = syscall.Errno(0xa6) + ENOTSOCK = syscall.Errno(0x5f) + ENOTSUP = syscall.Errno(0x7a) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x50) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x7a) + EOVERFLOW = syscall.Errno(0x4f) + EOWNERDEAD = syscall.Errno(0xa5) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x7b) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x78) + EPROTOTYPE = syscall.Errno(0x62) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x52) + EREMDEV = syscall.Errno(0x8e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x8c) + ERESTART = syscall.Errno(0x5b) + ERFKILL = syscall.Errno(0xa7) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x8f) + ESOCKTNOSUPPORT = syscall.Errno(0x79) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x97) + ESTRPIPE = syscall.Errno(0x5c) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x91) + ETOOMANYREFS = syscall.Errno(0x90) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x87) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x5e) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x34) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x12) + SIGCLD = syscall.Signal(0x12) + SIGCONT = syscall.Signal(0x19) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x16) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x16) + SIGPROF = syscall.Signal(0x1d) + SIGPWR = syscall.Signal(0x13) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x17) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x18) + SIGTTIN = syscall.Signal(0x1a) + SIGTTOU = syscall.Signal(0x1b) + SIGURG = syscall.Signal(0x15) + SIGUSR1 = syscall.Signal(0x10) + SIGUSR2 = syscall.Signal(0x11) + SIGVTALRM = syscall.Signal(0x1c) + SIGWINCH = syscall.Signal(0x14) + SIGXCPU = syscall.Signal(0x1e) + SIGXFSZ = syscall.Signal(0x1f) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "no message of desired type", + 36: "identifier removed", + 37: "channel number out of range", + 38: "level 2 not synchronized", + 39: "level 3 halted", + 40: "level 3 reset", + 41: "link number out of range", + 42: "protocol driver not attached", + 43: "no CSI structure available", + 44: "level 2 halted", + 45: "resource deadlock avoided", + 46: "no locks available", + 50: "invalid exchange", + 51: "invalid request descriptor", + 52: "exchange full", + 53: "no anode", + 54: "invalid request code", + 55: "invalid slot", + 56: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 73: "RFS specific error", + 74: "multihop attempted", + 77: "bad message", + 78: "file name too long", + 79: "value too large for defined data type", + 80: "name not unique on network", + 81: "file descriptor in bad state", + 82: "remote address changed", + 83: "can not access a needed shared library", + 84: "accessing a corrupted shared library", + 85: ".lib section in a.out corrupted", + 86: "attempting to link in too many shared libraries", + 87: "cannot exec a shared library directly", + 88: "invalid or incomplete multibyte or wide character", + 89: "function not implemented", + 90: "too many levels of symbolic links", + 91: "interrupted system call should be restarted", + 92: "streams pipe error", + 93: "directory not empty", + 94: "too many users", + 95: "socket operation on non-socket", + 96: "destination address required", + 97: "message too long", + 98: "protocol wrong type for socket", + 99: "protocol not available", + 120: "protocol not supported", + 121: "socket type not supported", + 122: "operation not supported", + 123: "protocol family not supported", + 124: "address family not supported by protocol", + 125: "address already in use", + 126: "cannot assign requested address", + 127: "network is down", + 128: "network is unreachable", + 129: "network dropped connection on reset", + 130: "software caused connection abort", + 131: "connection reset by peer", + 132: "no buffer space available", + 133: "transport endpoint is already connected", + 134: "transport endpoint is not connected", + 135: "structure needs cleaning", + 137: "not a XENIX named type file", + 138: "no XENIX semaphores available", + 139: "is a named type file", + 140: "remote I/O error", + 141: "unknown error 141", + 142: "unknown error 142", + 143: "cannot send after transport endpoint shutdown", + 144: "too many references: cannot splice", + 145: "connection timed out", + 146: "connection refused", + 147: "host is down", + 148: "no route to host", + 149: "operation already in progress", + 150: "operation now in progress", + 151: "stale file handle", + 158: "operation canceled", + 159: "no medium found", + 160: "wrong medium type", + 161: "required key not available", + 162: "key has expired", + 163: "key has been revoked", + 164: "key was rejected by service", + 165: "owner died", + 166: "state not recoverable", + 167: "operation not possible due to RF-kill", + 168: "memory page has hardware error", + 1133: "disk quota exceeded", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "user defined signal 1", + 17: "user defined signal 2", + 18: "child exited", + 19: "power failure", + 20: "window changed", + 21: "urgent I/O condition", + 22: "I/O possible", + 23: "stopped (signal)", + 24: "stopped", + 25: "continued", + 26: "stopped (tty input)", + 27: "stopped (tty output)", + 28: "virtual timer expired", + 29: "profiling timer expired", + 30: "CPU time limit exceeded", + 31: "file size limit exceeded", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go new file mode 100644 index 0000000..5e40607 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -0,0 +1,2308 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64le,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x18 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x100 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x80 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x800 + MAP_SHARED = 0x1 + MAP_STACK = 0x40000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x0 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT = 0x4 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x9 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x40047309 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x80047308 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x80 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x2 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0xffff + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1e + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x1008 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x4 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x7d) + EADDRNOTAVAIL = syscall.Errno(0x7e) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x7c) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x95) + EBADE = syscall.Errno(0x32) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x51) + EBADMSG = syscall.Errno(0x4d) + EBADR = syscall.Errno(0x33) + EBADRQC = syscall.Errno(0x36) + EBADSLT = syscall.Errno(0x37) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x9e) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x82) + ECONNREFUSED = syscall.Errno(0x92) + ECONNRESET = syscall.Errno(0x83) + EDEADLK = syscall.Errno(0x2d) + EDEADLOCK = syscall.Errno(0x38) + EDESTADDRREQ = syscall.Errno(0x60) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x46d) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x93) + EHOSTUNREACH = syscall.Errno(0x94) + EHWPOISON = syscall.Errno(0xa8) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x58) + EINIT = syscall.Errno(0x8d) + EINPROGRESS = syscall.Errno(0x96) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x85) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x8b) + EKEYEXPIRED = syscall.Errno(0xa2) + EKEYREJECTED = syscall.Errno(0xa4) + EKEYREVOKED = syscall.Errno(0xa3) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELIBACC = syscall.Errno(0x53) + ELIBBAD = syscall.Errno(0x54) + ELIBEXEC = syscall.Errno(0x57) + ELIBMAX = syscall.Errno(0x56) + ELIBSCN = syscall.Errno(0x55) + ELNRNG = syscall.Errno(0x29) + ELOOP = syscall.Errno(0x5a) + EMEDIUMTYPE = syscall.Errno(0xa0) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x61) + EMULTIHOP = syscall.Errno(0x4a) + ENAMETOOLONG = syscall.Errno(0x4e) + ENAVAIL = syscall.Errno(0x8a) + ENETDOWN = syscall.Errno(0x7f) + ENETRESET = syscall.Errno(0x81) + ENETUNREACH = syscall.Errno(0x80) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x35) + ENOBUFS = syscall.Errno(0x84) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0xa1) + ENOLCK = syscall.Errno(0x2e) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x9f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x63) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x59) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x86) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x5d) + ENOTNAM = syscall.Errno(0x89) + ENOTRECOVERABLE = syscall.Errno(0xa6) + ENOTSOCK = syscall.Errno(0x5f) + ENOTSUP = syscall.Errno(0x7a) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x50) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x7a) + EOVERFLOW = syscall.Errno(0x4f) + EOWNERDEAD = syscall.Errno(0xa5) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x7b) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x78) + EPROTOTYPE = syscall.Errno(0x62) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x52) + EREMDEV = syscall.Errno(0x8e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x8c) + ERESTART = syscall.Errno(0x5b) + ERFKILL = syscall.Errno(0xa7) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x8f) + ESOCKTNOSUPPORT = syscall.Errno(0x79) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x97) + ESTRPIPE = syscall.Errno(0x5c) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x91) + ETOOMANYREFS = syscall.Errno(0x90) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x87) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x5e) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x34) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x12) + SIGCLD = syscall.Signal(0x12) + SIGCONT = syscall.Signal(0x19) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x16) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x16) + SIGPROF = syscall.Signal(0x1d) + SIGPWR = syscall.Signal(0x13) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x17) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x18) + SIGTTIN = syscall.Signal(0x1a) + SIGTTOU = syscall.Signal(0x1b) + SIGURG = syscall.Signal(0x15) + SIGUSR1 = syscall.Signal(0x10) + SIGUSR2 = syscall.Signal(0x11) + SIGVTALRM = syscall.Signal(0x1c) + SIGWINCH = syscall.Signal(0x14) + SIGXCPU = syscall.Signal(0x1e) + SIGXFSZ = syscall.Signal(0x1f) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "no message of desired type", + 36: "identifier removed", + 37: "channel number out of range", + 38: "level 2 not synchronized", + 39: "level 3 halted", + 40: "level 3 reset", + 41: "link number out of range", + 42: "protocol driver not attached", + 43: "no CSI structure available", + 44: "level 2 halted", + 45: "resource deadlock avoided", + 46: "no locks available", + 50: "invalid exchange", + 51: "invalid request descriptor", + 52: "exchange full", + 53: "no anode", + 54: "invalid request code", + 55: "invalid slot", + 56: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 73: "RFS specific error", + 74: "multihop attempted", + 77: "bad message", + 78: "file name too long", + 79: "value too large for defined data type", + 80: "name not unique on network", + 81: "file descriptor in bad state", + 82: "remote address changed", + 83: "can not access a needed shared library", + 84: "accessing a corrupted shared library", + 85: ".lib section in a.out corrupted", + 86: "attempting to link in too many shared libraries", + 87: "cannot exec a shared library directly", + 88: "invalid or incomplete multibyte or wide character", + 89: "function not implemented", + 90: "too many levels of symbolic links", + 91: "interrupted system call should be restarted", + 92: "streams pipe error", + 93: "directory not empty", + 94: "too many users", + 95: "socket operation on non-socket", + 96: "destination address required", + 97: "message too long", + 98: "protocol wrong type for socket", + 99: "protocol not available", + 120: "protocol not supported", + 121: "socket type not supported", + 122: "operation not supported", + 123: "protocol family not supported", + 124: "address family not supported by protocol", + 125: "address already in use", + 126: "cannot assign requested address", + 127: "network is down", + 128: "network is unreachable", + 129: "network dropped connection on reset", + 130: "software caused connection abort", + 131: "connection reset by peer", + 132: "no buffer space available", + 133: "transport endpoint is already connected", + 134: "transport endpoint is not connected", + 135: "structure needs cleaning", + 137: "not a XENIX named type file", + 138: "no XENIX semaphores available", + 139: "is a named type file", + 140: "remote I/O error", + 141: "unknown error 141", + 142: "unknown error 142", + 143: "cannot send after transport endpoint shutdown", + 144: "too many references: cannot splice", + 145: "connection timed out", + 146: "connection refused", + 147: "host is down", + 148: "no route to host", + 149: "operation already in progress", + 150: "operation now in progress", + 151: "stale file handle", + 158: "operation canceled", + 159: "no medium found", + 160: "wrong medium type", + 161: "required key not available", + 162: "key has expired", + 163: "key has been revoked", + 164: "key was rejected by service", + 165: "owner died", + 166: "state not recoverable", + 167: "operation not possible due to RF-kill", + 168: "memory page has hardware error", + 1133: "disk quota exceeded", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "user defined signal 1", + 17: "user defined signal 2", + 18: "child exited", + 19: "power failure", + 20: "window changed", + 21: "urgent I/O condition", + 22: "I/O possible", + 23: "stopped (signal)", + 24: "stopped", + 25: "continued", + 26: "stopped (tty input)", + 27: "stopped (tty output)", + 28: "virtual timer expired", + 29: "profiling timer expired", + 30: "CPU time limit exceeded", + 31: "file size limit exceeded", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go new file mode 100644 index 0000000..b9b9d63 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -0,0 +1,2308 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mipsle,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x21 + F_GETLK64 = 0x21 + F_GETOWN = 0x17 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x22 + F_SETLK64 = 0x22 + F_SETLKW = 0x23 + F_SETLKW64 = 0x23 + F_SETOWN = 0x18 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x100 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x80 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x800 + MAP_SHARED = 0x1 + MAP_STACK = 0x40000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT = 0x4 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x9 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x40047309 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x80047308 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x80 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x2 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0xffff + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1e + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x1008 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x4 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x20 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x7d) + EADDRNOTAVAIL = syscall.Errno(0x7e) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x7c) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x95) + EBADE = syscall.Errno(0x32) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x51) + EBADMSG = syscall.Errno(0x4d) + EBADR = syscall.Errno(0x33) + EBADRQC = syscall.Errno(0x36) + EBADSLT = syscall.Errno(0x37) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x9e) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x82) + ECONNREFUSED = syscall.Errno(0x92) + ECONNRESET = syscall.Errno(0x83) + EDEADLK = syscall.Errno(0x2d) + EDEADLOCK = syscall.Errno(0x38) + EDESTADDRREQ = syscall.Errno(0x60) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x46d) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x93) + EHOSTUNREACH = syscall.Errno(0x94) + EHWPOISON = syscall.Errno(0xa8) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x58) + EINIT = syscall.Errno(0x8d) + EINPROGRESS = syscall.Errno(0x96) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x85) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x8b) + EKEYEXPIRED = syscall.Errno(0xa2) + EKEYREJECTED = syscall.Errno(0xa4) + EKEYREVOKED = syscall.Errno(0xa3) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELIBACC = syscall.Errno(0x53) + ELIBBAD = syscall.Errno(0x54) + ELIBEXEC = syscall.Errno(0x57) + ELIBMAX = syscall.Errno(0x56) + ELIBSCN = syscall.Errno(0x55) + ELNRNG = syscall.Errno(0x29) + ELOOP = syscall.Errno(0x5a) + EMEDIUMTYPE = syscall.Errno(0xa0) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x61) + EMULTIHOP = syscall.Errno(0x4a) + ENAMETOOLONG = syscall.Errno(0x4e) + ENAVAIL = syscall.Errno(0x8a) + ENETDOWN = syscall.Errno(0x7f) + ENETRESET = syscall.Errno(0x81) + ENETUNREACH = syscall.Errno(0x80) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x35) + ENOBUFS = syscall.Errno(0x84) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0xa1) + ENOLCK = syscall.Errno(0x2e) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x9f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x63) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x59) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x86) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x5d) + ENOTNAM = syscall.Errno(0x89) + ENOTRECOVERABLE = syscall.Errno(0xa6) + ENOTSOCK = syscall.Errno(0x5f) + ENOTSUP = syscall.Errno(0x7a) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x50) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x7a) + EOVERFLOW = syscall.Errno(0x4f) + EOWNERDEAD = syscall.Errno(0xa5) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x7b) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x78) + EPROTOTYPE = syscall.Errno(0x62) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x52) + EREMDEV = syscall.Errno(0x8e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x8c) + ERESTART = syscall.Errno(0x5b) + ERFKILL = syscall.Errno(0xa7) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x8f) + ESOCKTNOSUPPORT = syscall.Errno(0x79) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x97) + ESTRPIPE = syscall.Errno(0x5c) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x91) + ETOOMANYREFS = syscall.Errno(0x90) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x87) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x5e) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x34) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x12) + SIGCLD = syscall.Signal(0x12) + SIGCONT = syscall.Signal(0x19) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x16) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x16) + SIGPROF = syscall.Signal(0x1d) + SIGPWR = syscall.Signal(0x13) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x17) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x18) + SIGTTIN = syscall.Signal(0x1a) + SIGTTOU = syscall.Signal(0x1b) + SIGURG = syscall.Signal(0x15) + SIGUSR1 = syscall.Signal(0x10) + SIGUSR2 = syscall.Signal(0x11) + SIGVTALRM = syscall.Signal(0x1c) + SIGWINCH = syscall.Signal(0x14) + SIGXCPU = syscall.Signal(0x1e) + SIGXFSZ = syscall.Signal(0x1f) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "no message of desired type", + 36: "identifier removed", + 37: "channel number out of range", + 38: "level 2 not synchronized", + 39: "level 3 halted", + 40: "level 3 reset", + 41: "link number out of range", + 42: "protocol driver not attached", + 43: "no CSI structure available", + 44: "level 2 halted", + 45: "resource deadlock avoided", + 46: "no locks available", + 50: "invalid exchange", + 51: "invalid request descriptor", + 52: "exchange full", + 53: "no anode", + 54: "invalid request code", + 55: "invalid slot", + 56: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 73: "RFS specific error", + 74: "multihop attempted", + 77: "bad message", + 78: "file name too long", + 79: "value too large for defined data type", + 80: "name not unique on network", + 81: "file descriptor in bad state", + 82: "remote address changed", + 83: "can not access a needed shared library", + 84: "accessing a corrupted shared library", + 85: ".lib section in a.out corrupted", + 86: "attempting to link in too many shared libraries", + 87: "cannot exec a shared library directly", + 88: "invalid or incomplete multibyte or wide character", + 89: "function not implemented", + 90: "too many levels of symbolic links", + 91: "interrupted system call should be restarted", + 92: "streams pipe error", + 93: "directory not empty", + 94: "too many users", + 95: "socket operation on non-socket", + 96: "destination address required", + 97: "message too long", + 98: "protocol wrong type for socket", + 99: "protocol not available", + 120: "protocol not supported", + 121: "socket type not supported", + 122: "operation not supported", + 123: "protocol family not supported", + 124: "address family not supported by protocol", + 125: "address already in use", + 126: "cannot assign requested address", + 127: "network is down", + 128: "network is unreachable", + 129: "network dropped connection on reset", + 130: "software caused connection abort", + 131: "connection reset by peer", + 132: "no buffer space available", + 133: "transport endpoint is already connected", + 134: "transport endpoint is not connected", + 135: "structure needs cleaning", + 137: "not a XENIX named type file", + 138: "no XENIX semaphores available", + 139: "is a named type file", + 140: "remote I/O error", + 141: "unknown error 141", + 142: "unknown error 142", + 143: "cannot send after transport endpoint shutdown", + 144: "too many references: cannot splice", + 145: "connection timed out", + 146: "connection refused", + 147: "host is down", + 148: "no route to host", + 149: "operation already in progress", + 150: "operation now in progress", + 151: "stale file handle", + 158: "operation canceled", + 159: "no medium found", + 160: "wrong medium type", + 161: "required key not available", + 162: "key has expired", + 163: "key has been revoked", + 164: "key was rejected by service", + 165: "owner died", + 166: "state not recoverable", + 167: "operation not possible due to RF-kill", + 168: "memory page has hardware error", + 1133: "disk quota exceeded", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "user defined signal 1", + 17: "user defined signal 2", + 18: "child exited", + 19: "power failure", + 20: "window changed", + 21: "urgent I/O condition", + 22: "I/O possible", + 23: "stopped (signal)", + 24: "stopped", + 25: "continued", + 26: "stopped (tty input)", + 27: "stopped (tty output)", + 28: "virtual timer expired", + 29: "profiling timer expired", + 30: "CPU time limit exceeded", + 31: "file size limit exceeded", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go new file mode 100644 index 0000000..509418e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -0,0 +1,2361 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x17 + B110 = 0x3 + B115200 = 0x11 + B1152000 = 0x18 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x19 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x1a + B230400 = 0x12 + B2400 = 0xb + B2500000 = 0x1b + B300 = 0x7 + B3000000 = 0x1c + B3500000 = 0x1d + B38400 = 0xf + B4000000 = 0x1e + B460800 = 0x13 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x16 + B9600 = 0xd + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0xff + CBAUDEX = 0x0 + CFLUSH = 0xf + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIGNAL = 0xff + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x4000 + IBSHIFT = 0x10 + ICANON = 0x100 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x400 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x300 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80000000 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x4 + ONLCR = 0x2 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x1000 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_SAO = 0x10 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGS64 = 0x16 + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGS64 = 0x17 + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x5 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4000 + XTABS = 0xc00 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 58: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go new file mode 100644 index 0000000..26afbb8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -0,0 +1,2361 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64le,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x17 + B110 = 0x3 + B115200 = 0x11 + B1152000 = 0x18 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x19 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x1a + B230400 = 0x12 + B2400 = 0xb + B2500000 = 0x1b + B300 = 0x7 + B3000000 = 0x1c + B3500000 = 0x1d + B38400 = 0xf + B4000000 = 0x1e + B460800 = 0x13 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x16 + B9600 = 0xd + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0xff + CBAUDEX = 0x0 + CFLUSH = 0xf + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIGNAL = 0xff + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x4000 + IBSHIFT = 0x10 + ICANON = 0x100 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x400 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x300 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80000000 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x4 + ONLCR = 0x2 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x1000 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_SAO = 0x10 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS = 0xc + PTRACE_GETREGS64 = 0x16 + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGS64 = 0x17 + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x5 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4000 + XTABS = 0xc00 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 58: "file locking deadlock error", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go new file mode 100644 index 0000000..eeb9941 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -0,0 +1,2360 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build s390x,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_DISABLE_TE = 0x5010 + PTRACE_ENABLE_TE = 0x5009 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_LAST_BREAK = 0x5006 + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKDATA_AREA = 0x5003 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKTEXT_AREA = 0x5002 + PTRACE_PEEKUSR = 0x3 + PTRACE_PEEKUSR_AREA = 0x5000 + PTRACE_PEEK_SYSTEM_CALL = 0x5007 + PTRACE_POKEDATA = 0x5 + PTRACE_POKEDATA_AREA = 0x5005 + PTRACE_POKETEXT = 0x4 + PTRACE_POKETEXT_AREA = 0x5004 + PTRACE_POKEUSR = 0x6 + PTRACE_POKEUSR_AREA = 0x5001 + PTRACE_POKE_SYSTEM_CALL = 0x5008 + PTRACE_PROT = 0x15 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLEBLOCK = 0xc + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TE_ABORT_RAND = 0x5011 + PTRACE_TRACEME = 0x0 + PT_ACR0 = 0x90 + PT_ACR1 = 0x94 + PT_ACR10 = 0xb8 + PT_ACR11 = 0xbc + PT_ACR12 = 0xc0 + PT_ACR13 = 0xc4 + PT_ACR14 = 0xc8 + PT_ACR15 = 0xcc + PT_ACR2 = 0x98 + PT_ACR3 = 0x9c + PT_ACR4 = 0xa0 + PT_ACR5 = 0xa4 + PT_ACR6 = 0xa8 + PT_ACR7 = 0xac + PT_ACR8 = 0xb0 + PT_ACR9 = 0xb4 + PT_CR_10 = 0x168 + PT_CR_11 = 0x170 + PT_CR_9 = 0x160 + PT_ENDREGS = 0x1af + PT_FPC = 0xd8 + PT_FPR0 = 0xe0 + PT_FPR1 = 0xe8 + PT_FPR10 = 0x130 + PT_FPR11 = 0x138 + PT_FPR12 = 0x140 + PT_FPR13 = 0x148 + PT_FPR14 = 0x150 + PT_FPR15 = 0x158 + PT_FPR2 = 0xf0 + PT_FPR3 = 0xf8 + PT_FPR4 = 0x100 + PT_FPR5 = 0x108 + PT_FPR6 = 0x110 + PT_FPR7 = 0x118 + PT_FPR8 = 0x120 + PT_FPR9 = 0x128 + PT_GPR0 = 0x10 + PT_GPR1 = 0x18 + PT_GPR10 = 0x60 + PT_GPR11 = 0x68 + PT_GPR12 = 0x70 + PT_GPR13 = 0x78 + PT_GPR14 = 0x80 + PT_GPR15 = 0x88 + PT_GPR2 = 0x20 + PT_GPR3 = 0x28 + PT_GPR4 = 0x30 + PT_GPR5 = 0x38 + PT_GPR6 = 0x40 + PT_GPR7 = 0x48 + PT_GPR8 = 0x50 + PT_GPR9 = 0x58 + PT_IEEE_IP = 0x1a8 + PT_LASTOFF = 0x1a8 + PT_ORIGGPR2 = 0xd0 + PT_PSWADDR = 0x8 + PT_PSWMASK = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1a + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETDEBUG = 0x400454c9 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x6 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", + 133: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "stack fault", + 17: "child exited", + 18: "continued", + 19: "stopped (signal)", + 20: "stopped", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "urgent I/O condition", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "I/O possible", + 30: "power failure", + 31: "bad system call", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go new file mode 100644 index 0000000..95de199 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -0,0 +1,2142 @@ +// mkerrors.sh -m64 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build sparc64,linux + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_X25 = 0x10f + ASI_LEON_DFLUSH = 0x11 + ASI_LEON_IFLUSH = 0x10 + ASI_LEON_MMUFLUSH = 0x18 + B0 = 0x0 + B1000000 = 0x100c + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x100d + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100e + B153600 = 0x1006 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100f + B230400 = 0x1003 + B2400 = 0xb + B300 = 0x7 + B307200 = 0x1007 + B38400 = 0xf + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x100a + B57600 = 0x1001 + B576000 = 0x100b + B600 = 0x8 + B614400 = 0x1008 + B75 = 0x2 + B76800 = 0x1005 + B921600 = 0x1009 + B9600 = 0xd + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EMT_TAGOVF = 0x1 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x400000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x7 + F_GETLK64 = 0x7 + F_GETOWN = 0x5 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x8 + F_SETLK64 = 0x8 + F_SETLKW = 0x9 + F_SETLKW64 = 0x9 + F_SETOWN = 0x6 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x3 + F_WRLCK = 0x2 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x8 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x400000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x4000 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_UNICAST_HOPS = 0x10 + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GROWSDOWN = 0x200 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x100 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_STACK = 0x20000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + NAME_MAX = 0xff + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x100000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x2000 + O_EXCL = 0x800 + O_FSYNC = 0x802000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x4004 + O_NOATIME = 0x200000 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x4000 + O_PATH = 0x1000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x802000 + O_SYNC = 0x802000 + O_TMPFILE = 0x2010000 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = -0x1 + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETFPAREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPREGS64 = 0x19 + PTRACE_GETREGS = 0xc + PTRACE_GETREGS64 = 0x16 + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_READDATA = 0x10 + PTRACE_READTEXT = 0x12 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SEIZE = 0x4206 + PTRACE_SETFPAREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPREGS64 = 0x1a + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGS64 = 0x17 + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLESTEP = 0x9 + PTRACE_SPARC_DETACH = 0xb + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + PTRACE_WRITEDATA = 0x11 + PTRACE_WRITETEXT = 0x13 + PT_FP = 0x48 + PT_G0 = 0x10 + PT_G1 = 0x14 + PT_G2 = 0x18 + PT_G3 = 0x1c + PT_G4 = 0x20 + PT_G5 = 0x24 + PT_G6 = 0x28 + PT_G7 = 0x2c + PT_I0 = 0x30 + PT_I1 = 0x34 + PT_I2 = 0x38 + PT_I3 = 0x3c + PT_I4 = 0x40 + PT_I5 = 0x44 + PT_I6 = 0x48 + PT_I7 = 0x4c + PT_NPC = 0x8 + PT_PC = 0x4 + PT_PSR = 0x0 + PT_REGS_MAGIC = 0x57ac6c00 + PT_TNPC = 0x90 + PT_TPC = 0x88 + PT_TSTATE = 0x80 + PT_V9_FP = 0x70 + PT_V9_G0 = 0x0 + PT_V9_G1 = 0x8 + PT_V9_G2 = 0x10 + PT_V9_G3 = 0x18 + PT_V9_G4 = 0x20 + PT_V9_G5 = 0x28 + PT_V9_G6 = 0x30 + PT_V9_G7 = 0x38 + PT_V9_I0 = 0x40 + PT_V9_I1 = 0x48 + PT_V9_I2 = 0x50 + PT_V9_I3 = 0x58 + PT_V9_I4 = 0x60 + PT_V9_I5 = 0x68 + PT_V9_I6 = 0x70 + PT_V9_I7 = 0x78 + PT_V9_MAGIC = 0x9c + PT_V9_TNPC = 0x90 + PT_V9_TPC = 0x88 + PT_V9_TSTATE = 0x80 + PT_V9_Y = 0x98 + PT_WIM = 0x10 + PT_Y = 0xc + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x6 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x1 + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x10 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x18 + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x5f + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x11 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_GATED = 0x8 + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x23 + SCM_TIMESTAMPNS = 0x21 + SCM_WIFI_STATUS = 0x25 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SOCK_CLOEXEC = 0x400000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x4000 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0xffff + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x8000 + SO_ATTACH_BPF = 0x34 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x35 + SO_ATTACH_REUSEPORT_EBPF = 0x36 + SO_BINDTODEVICE = 0xd + SO_BPF_EXTENSIONS = 0x32 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0x400 + SO_BUSY_POLL = 0x30 + SO_CNX_ADVICE = 0x37 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x33 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x28 + SO_MARK = 0x22 + SO_MAX_PACING_RATE = 0x31 + SO_NOFCS = 0x27 + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x2 + SO_PASSSEC = 0x1f + SO_PEEK_OFF = 0x26 + SO_PEERCRED = 0x40 + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1e + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x100b + SO_RCVLOWAT = 0x800 + SO_RCVTIMEO = 0x2000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x24 + SO_SECURITY_AUTHENTICATION = 0x5001 + SO_SECURITY_ENCRYPTION_NETWORK = 0x5004 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x5002 + SO_SELECT_ERR_QUEUE = 0x29 + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x100a + SO_SNDLOWAT = 0x1000 + SO_SNDTIMEO = 0x4000 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x23 + SO_TIMESTAMPNS = 0x21 + SO_TYPE = 0x1008 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x25 + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x20005407 + TCGETA = 0x40125401 + TCGETS = 0x40245408 + TCGETS2 = 0x402c540c + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x20005405 + TCSBRKP = 0x5425 + TCSETA = 0x80125402 + TCSETAF = 0x80125404 + TCSETAW = 0x80125403 + TCSETS = 0x80245409 + TCSETS2 = 0x802c540d + TCSETSF = 0x8024540b + TCSETSF2 = 0x802c540f + TCSETSW = 0x8024540a + TCSETSW2 = 0x802c540e + TCXONC = 0x20005406 + TIOCCBRK = 0x2000747a + TIOCCONS = 0x20007424 + TIOCEXCL = 0x2000740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x40047400 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x40047483 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40047486 + TIOCGRS485 = 0x40205441 + TIOCGSERIAL = 0x541e + TIOCGSID = 0x40047485 + TIOCGSOFTCAR = 0x40047464 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMIWAIT = 0x545c + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007484 + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x80047401 + TIOCSIG = 0x80047488 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x80047482 + TIOCSPTLCK = 0x80047487 + TIOCSRS485 = 0xc0205442 + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x80047465 + TIOCSTART = 0x2000746e + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x20005437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETDEBUG = 0x800454c9 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + VDISCARD = 0xd + VDSUSP = 0xb + VEOF = 0x4 + VEOL = 0x5 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x4 + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WRAP = 0x20000 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XCASE = 0x4 + XTABS = 0x1800 + __TIOCFLUSH = 0x80047410 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EADV = syscall.Errno(0x53) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x25) + EBADE = syscall.Errno(0x66) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x5d) + EBADMSG = syscall.Errno(0x4c) + EBADR = syscall.Errno(0x67) + EBADRQC = syscall.Errno(0x6a) + EBADSLT = syscall.Errno(0x6b) + EBFONT = syscall.Errno(0x6d) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7f) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x5e) + ECOMM = syscall.Errno(0x55) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0x4e) + EDEADLOCK = syscall.Errno(0x6c) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EHWPOISON = syscall.Errno(0x87) + EIDRM = syscall.Errno(0x4d) + EILSEQ = syscall.Errno(0x7a) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x81) + EKEYREJECTED = syscall.Errno(0x83) + EKEYREVOKED = syscall.Errno(0x82) + EL2HLT = syscall.Errno(0x65) + EL2NSYNC = syscall.Errno(0x5f) + EL3HLT = syscall.Errno(0x60) + EL3RST = syscall.Errno(0x61) + ELIBACC = syscall.Errno(0x72) + ELIBBAD = syscall.Errno(0x70) + ELIBEXEC = syscall.Errno(0x6e) + ELIBMAX = syscall.Errno(0x7b) + ELIBSCN = syscall.Errno(0x7c) + ELNRNG = syscall.Errno(0x62) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x7e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x57) + ENAMETOOLONG = syscall.Errno(0x3f) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x69) + ENOBUFS = syscall.Errno(0x37) + ENOCSI = syscall.Errno(0x64) + ENODATA = syscall.Errno(0x6f) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x80) + ENOLCK = syscall.Errno(0x4f) + ENOLINK = syscall.Errno(0x52) + ENOMEDIUM = syscall.Errno(0x7d) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x4b) + ENONET = syscall.Errno(0x50) + ENOPKG = syscall.Errno(0x71) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x4a) + ENOSTR = syscall.Errno(0x48) + ENOSYS = syscall.Errno(0x5a) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x85) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x73) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x5c) + EOWNERDEAD = syscall.Errno(0x84) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROTO = syscall.Errno(0x56) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x59) + EREMOTE = syscall.Errno(0x47) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x74) + ERFKILL = syscall.Errno(0x86) + EROFS = syscall.Errno(0x1e) + ERREMOTE = syscall.Errno(0x51) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x54) + ESTALE = syscall.Errno(0x46) + ESTRPIPE = syscall.Errno(0x5b) + ETIME = syscall.Errno(0x49) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x63) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x68) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLOST = syscall.Signal(0x1d) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x17) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1d) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol", + 48: "address already in use", + 49: "cannot assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "transport endpoint is already connected", + 57: "transport endpoint is not connected", + 58: "cannot send after transport endpoint shutdown", + 59: "too many references: cannot splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disk quota exceeded", + 70: "stale file handle", + 71: "object is remote", + 72: "device not a stream", + 73: "timer expired", + 74: "out of streams resources", + 75: "no message of desired type", + 76: "bad message", + 77: "identifier removed", + 78: "resource deadlock avoided", + 79: "no locks available", + 80: "machine is not on the network", + 81: "unknown error 81", + 82: "link has been severed", + 83: "advertise error", + 84: "srmount error", + 85: "communication error on send", + 86: "protocol error", + 87: "multihop attempted", + 88: "RFS specific error", + 89: "remote address changed", + 90: "function not implemented", + 91: "streams pipe error", + 92: "value too large for defined data type", + 93: "file descriptor in bad state", + 94: "channel number out of range", + 95: "level 2 not synchronized", + 96: "level 3 halted", + 97: "level 3 reset", + 98: "link number out of range", + 99: "protocol driver not attached", + 100: "no CSI structure available", + 101: "level 2 halted", + 102: "invalid exchange", + 103: "invalid request descriptor", + 104: "exchange full", + 105: "no anode", + 106: "invalid request code", + 107: "invalid slot", + 108: "file locking deadlock error", + 109: "bad font file format", + 110: "cannot exec a shared library directly", + 111: "no data available", + 112: "accessing a corrupted shared library", + 113: "package not installed", + 114: "can not access a needed shared library", + 115: "name not unique on network", + 116: "interrupted system call should be restarted", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "invalid or incomplete multibyte or wide character", + 123: "attempting to link in too many shared libraries", + 124: ".lib section in a.out corrupted", + 125: "no medium found", + 126: "wrong medium type", + 127: "operation canceled", + 128: "required key not available", + 129: "key has expired", + 130: "key has been revoked", + 131: "key was rejected by service", + 132: "owner died", + 133: "state not recoverable", + 134: "operation not possible due to RF-kill", + 135: "memory page has hardware error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "CPU time limit exceeded", + 25: "file size limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window changed", + 29: "resource lost", + 30: "user defined signal 1", + 31: "user defined signal 2", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go new file mode 100644 index 0000000..1612b66 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -0,0 +1,1719 @@ +// mkerrors.sh -m32 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,netbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x1c + AF_BLUETOOTH = 0x1f + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x20 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x23 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OROUTE = 0x11 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x22 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ARCNET = 0x7 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_STRIP = 0x17 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084277 + BIOCGETIF = 0x4090426b + BIOCGFEEDBACK = 0x4004427c + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x400c427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044276 + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8090426c + BIOCSFEEDBACK = 0x8004427d + BIOCSHDRCMPLT = 0x80044275 + BIOCSRTIMEOUT = 0x800c427a + BIOCSSEESENT = 0x80044279 + BIOCSTCPF = 0x80084272 + BIOCSUDPF = 0x80084273 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALIGNMENT32 = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLONE_CSIGNAL = 0xff + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_PID = 0x1000 + CLONE_PTRACE = 0x2000 + CLONE_SIGHAND = 0x800 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + CTL_QUERY = -0x2 + DIOCBSFLUSH = 0x20006478 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMUL_LINUX = 0x1 + EMUL_LINUX32 = 0x5 + EMUL_MAXID = 0x6 + EN_SW_CTL_INF = 0x1000 + EN_SW_CTL_PREC = 0x300 + EN_SW_CTL_ROUND = 0xc00 + EN_SW_DATACHAIN = 0x80 + EN_SW_DENORM = 0x2 + EN_SW_INVOP = 0x1 + EN_SW_OVERFLOW = 0x8 + EN_SW_PRECLOSS = 0x20 + EN_SW_UNDERFLOW = 0x10 + EN_SW_ZERODIV = 0x4 + ETHERCAP_JUMBO_MTU = 0x4 + ETHERCAP_VLAN_HWTAGGING = 0x2 + ETHERCAP_VLAN_MTU = 0x1 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERMTU_JUMBO = 0x2328 + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOWPROTOCOLS = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_LEN = 0x5ee + ETHER_MAX_LEN_JUMBO = 0x233a + ETHER_MIN_LEN = 0x40 + ETHER_PPPOE_ENCAP_LEN = 0x8 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = 0x2 + EVFILT_PROC = 0x4 + EVFILT_READ = 0x0 + EVFILT_SIGNAL = 0x5 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = 0x6 + EVFILT_VNODE = 0x3 + EVFILT_WRITE = 0x1 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x100 + FLUSHO = 0x800000 + F_CLOSEM = 0xa + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xc + F_FSCTL = -0x80000000 + F_FSDIRMASK = 0x70000000 + F_FSIN = 0x10000000 + F_FSINOUT = 0x30000000 + F_FSOUT = 0x20000000 + F_FSPRIV = 0x8000 + F_FSVOID = 0x40000000 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETNOSIGPIPE = 0xd + F_GETOWN = 0x5 + F_MAXFD = 0xb + F_OK = 0x0 + F_PARAM_MASK = 0xfff + F_PARAM_MAX = 0xfff + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETNOSIGPIPE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8f52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IPV6_ICMP = 0x3a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MOBILE = 0x37 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_VRRP = 0x70 + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_EF = 0x8000 + IP_ERRORMTU = 0x15 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x16 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINFRAGSIZE = 0x45 + IP_MINTTL = 0x18 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x17 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ALIGNMENT_16MB = 0x18000000 + MAP_ALIGNMENT_1TB = 0x28000000 + MAP_ALIGNMENT_256TB = 0x30000000 + MAP_ALIGNMENT_4GB = 0x20000000 + MAP_ALIGNMENT_64KB = 0x10000000 + MAP_ALIGNMENT_64PB = 0x38000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DEFAULT = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_STACK = 0x2000 + MAP_TRYFIXED = 0x400 + MAP_WIRED = 0x800 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CONTROLMBUF = 0x2000000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_IOVUSRSPACE = 0x4000000 + MSG_LENUSRSPACE = 0x8000000 + MSG_MCAST = 0x200 + MSG_NAMEMBUF = 0x1000000 + MSG_NBIO = 0x1000 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_USERFLAGS = 0xffffff + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x4 + NAME_MAX = 0x1ff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x5 + NET_RT_MAXID = 0x6 + NET_RT_OIFLIST = 0x4 + NET_RT_OOIFLIST = 0x3 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFIOGETBMAP = 0xc004667a + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALT_IO = 0x40000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x80000 + O_DIRECTORY = 0x200000 + O_DSYNC = 0x10000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_NOSIGPIPE = 0x1000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x20000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PRI_IOFLUSH = 0x7c + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_TAG = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_TAG = 0x100 + RTF_ANNOUNCE = 0x20000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x2000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SRC = 0x10000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0x15 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x11 + RTM_IFANNOUNCE = 0x10 + RTM_IFINFO = 0x14 + RTM_LLINFO_UPD = 0x13 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OIFINFO = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_OOIFINFO = 0xe + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_SETGATE = 0x12 + RTM_VERSION = 0x4 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x4 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x8 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80906931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691c + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80906932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80906919 + SIOCDIFPHYADDR = 0x80906949 + SIOCDLIFADDR = 0x8118691e + SIOCGDRVSPEC = 0xc01c697b + SIOCGETPFSYNC = 0xc09069f8 + SIOCGETSGCNT = 0xc0147534 + SIOCGETVIFCNT = 0xc0147533 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0906921 + SIOCGIFADDRPREF = 0xc0946920 + SIOCGIFALIAS = 0xc040691b + SIOCGIFBRDADDR = 0xc0906923 + SIOCGIFCAP = 0xc0206976 + SIOCGIFCONF = 0xc0086926 + SIOCGIFDATA = 0xc0946985 + SIOCGIFDLT = 0xc0906977 + SIOCGIFDSTADDR = 0xc0906922 + SIOCGIFFLAGS = 0xc0906911 + SIOCGIFGENERIC = 0xc090693a + SIOCGIFMEDIA = 0xc0286936 + SIOCGIFMETRIC = 0xc0906917 + SIOCGIFMTU = 0xc090697e + SIOCGIFNETMASK = 0xc0906925 + SIOCGIFPDSTADDR = 0xc0906948 + SIOCGIFPSRCADDR = 0xc0906947 + SIOCGLIFADDR = 0xc118691d + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLINKSTR = 0xc01c6987 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGVH = 0xc0906983 + SIOCIFCREATE = 0x8090697a + SIOCIFDESTROY = 0x80906979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCINITIFADDR = 0xc0446984 + SIOCSDRVSPEC = 0x801c697b + SIOCSETPFSYNC = 0x809069f7 + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8090690c + SIOCSIFADDRPREF = 0x8094691f + SIOCSIFBRDADDR = 0x80906913 + SIOCSIFCAP = 0x80206975 + SIOCSIFDSTADDR = 0x8090690e + SIOCSIFFLAGS = 0x80906910 + SIOCSIFGENERIC = 0x80906939 + SIOCSIFMEDIA = 0xc0906935 + SIOCSIFMETRIC = 0x80906918 + SIOCSIFMTU = 0x8090697f + SIOCSIFNETMASK = 0x80906916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLINKSTR = 0x801c6988 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSVH = 0xc0906982 + SIOCZIFDATA = 0xc0946986 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_FLAGS_MASK = 0xf0000000 + SOCK_NONBLOCK = 0x20000000 + SOCK_NOSIGPIPE = 0x40000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOHEADER = 0x100a + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_OVERFLOWED = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x100c + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x100b + SO_TIMESTAMP = 0x2000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SYSCTL_VERSION = 0x1000000 + SYSCTL_VERS_0 = 0x0 + SYSCTL_VERS_1 = 0x1000000 + SYSCTL_VERS_MASK = 0xff000000 + S_ARCH1 = 0x10000 + S_ARCH2 = 0x20000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + S_LOGIN_SET = 0x1 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONGCTL = 0x20 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x3 + TCP_KEEPINIT = 0x7 + TCP_KEEPINTVL = 0x5 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x400c7458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CDTRCTS = 0x10 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGLINED = 0x40207442 + TIOCGPGRP = 0x40047477 + TIOCGQSIZE = 0x40047481 + TIOCGRANTPT = 0x20007447 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMGET = 0x40287446 + TIOCPTSNAME = 0x40287448 + TIOCRCVFRAME = 0x80047445 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x2000745f + TIOCSLINED = 0x80207443 + TIOCSPGRP = 0x80047476 + TIOCSQSIZE = 0x80047480 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCXMTFRAME = 0x80047444 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALL = 0x8 + WALLSIG = 0x8 + WALTSIG = 0x4 + WCLONE = 0x4 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WNOWAIT = 0x10000 + WNOZOMBIE = 0x20000 + WOPTSCHECKED = 0x40000 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x58) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x57) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x55) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5e) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x59) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x5a) + ENOSTR = syscall.Errno(0x5b) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x56) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x60) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x5c) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x20) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large or too small", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol option not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "illegal byte sequence", + 86: "not supported", + 87: "operation Canceled", + 88: "bad or Corrupt message", + 89: "no message available", + 90: "no STREAM resources", + 91: "not a STREAM", + 92: "STREAM ioctl timeout", + 93: "attribute not found", + 94: "multihop attempted", + 95: "link has been severed", + 96: "protocol error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "power fail/restart", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go new file mode 100644 index 0000000..c994ab6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -0,0 +1,1709 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,netbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x1c + AF_BLUETOOTH = 0x1f + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x20 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x23 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OROUTE = 0x11 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x22 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ARCNET = 0x7 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_STRIP = 0x17 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104277 + BIOCGETIF = 0x4090426b + BIOCGFEEDBACK = 0x4004427c + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x4010427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044276 + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8090426c + BIOCSFEEDBACK = 0x8004427d + BIOCSHDRCMPLT = 0x80044275 + BIOCSRTIMEOUT = 0x8010427a + BIOCSSEESENT = 0x80044279 + BIOCSTCPF = 0x80104272 + BIOCSUDPF = 0x80104273 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALIGNMENT32 = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLONE_CSIGNAL = 0xff + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_PID = 0x1000 + CLONE_PTRACE = 0x2000 + CLONE_SIGHAND = 0x800 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + CTL_QUERY = -0x2 + DIOCBSFLUSH = 0x20006478 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMUL_LINUX = 0x1 + EMUL_LINUX32 = 0x5 + EMUL_MAXID = 0x6 + ETHERCAP_JUMBO_MTU = 0x4 + ETHERCAP_VLAN_HWTAGGING = 0x2 + ETHERCAP_VLAN_MTU = 0x1 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERMTU_JUMBO = 0x2328 + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOWPROTOCOLS = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_LEN = 0x5ee + ETHER_MAX_LEN_JUMBO = 0x233a + ETHER_MIN_LEN = 0x40 + ETHER_PPPOE_ENCAP_LEN = 0x8 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = 0x2 + EVFILT_PROC = 0x4 + EVFILT_READ = 0x0 + EVFILT_SIGNAL = 0x5 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = 0x6 + EVFILT_VNODE = 0x3 + EVFILT_WRITE = 0x1 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x100 + FLUSHO = 0x800000 + F_CLOSEM = 0xa + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xc + F_FSCTL = -0x80000000 + F_FSDIRMASK = 0x70000000 + F_FSIN = 0x10000000 + F_FSINOUT = 0x30000000 + F_FSOUT = 0x20000000 + F_FSPRIV = 0x8000 + F_FSVOID = 0x40000000 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETNOSIGPIPE = 0xd + F_GETOWN = 0x5 + F_MAXFD = 0xb + F_OK = 0x0 + F_PARAM_MASK = 0xfff + F_PARAM_MAX = 0xfff + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETNOSIGPIPE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8f52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IPV6_ICMP = 0x3a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MOBILE = 0x37 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_VRRP = 0x70 + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_EF = 0x8000 + IP_ERRORMTU = 0x15 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x16 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINFRAGSIZE = 0x45 + IP_MINTTL = 0x18 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x17 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ALIGNMENT_16MB = 0x18000000 + MAP_ALIGNMENT_1TB = 0x28000000 + MAP_ALIGNMENT_256TB = 0x30000000 + MAP_ALIGNMENT_4GB = 0x20000000 + MAP_ALIGNMENT_64KB = 0x10000000 + MAP_ALIGNMENT_64PB = 0x38000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DEFAULT = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_STACK = 0x2000 + MAP_TRYFIXED = 0x400 + MAP_WIRED = 0x800 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CONTROLMBUF = 0x2000000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_IOVUSRSPACE = 0x4000000 + MSG_LENUSRSPACE = 0x8000000 + MSG_MCAST = 0x200 + MSG_NAMEMBUF = 0x1000000 + MSG_NBIO = 0x1000 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_USERFLAGS = 0xffffff + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x4 + NAME_MAX = 0x1ff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x5 + NET_RT_MAXID = 0x6 + NET_RT_OIFLIST = 0x4 + NET_RT_OOIFLIST = 0x3 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFIOGETBMAP = 0xc004667a + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALT_IO = 0x40000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x80000 + O_DIRECTORY = 0x200000 + O_DSYNC = 0x10000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_NOSIGPIPE = 0x1000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x20000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PRI_IOFLUSH = 0x7c + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_TAG = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_TAG = 0x100 + RTF_ANNOUNCE = 0x20000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x2000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SRC = 0x10000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0x15 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x11 + RTM_IFANNOUNCE = 0x10 + RTM_IFINFO = 0x14 + RTM_LLINFO_UPD = 0x13 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OIFINFO = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_OOIFINFO = 0xe + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_SETGATE = 0x12 + RTM_VERSION = 0x4 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x4 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x8 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80906931 + SIOCADDRT = 0x8038720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691c + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80906932 + SIOCDELRT = 0x8038720b + SIOCDIFADDR = 0x80906919 + SIOCDIFPHYADDR = 0x80906949 + SIOCDLIFADDR = 0x8118691e + SIOCGDRVSPEC = 0xc028697b + SIOCGETPFSYNC = 0xc09069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0906921 + SIOCGIFADDRPREF = 0xc0986920 + SIOCGIFALIAS = 0xc040691b + SIOCGIFBRDADDR = 0xc0906923 + SIOCGIFCAP = 0xc0206976 + SIOCGIFCONF = 0xc0106926 + SIOCGIFDATA = 0xc0986985 + SIOCGIFDLT = 0xc0906977 + SIOCGIFDSTADDR = 0xc0906922 + SIOCGIFFLAGS = 0xc0906911 + SIOCGIFGENERIC = 0xc090693a + SIOCGIFMEDIA = 0xc0306936 + SIOCGIFMETRIC = 0xc0906917 + SIOCGIFMTU = 0xc090697e + SIOCGIFNETMASK = 0xc0906925 + SIOCGIFPDSTADDR = 0xc0906948 + SIOCGIFPSRCADDR = 0xc0906947 + SIOCGLIFADDR = 0xc118691d + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLINKSTR = 0xc0286987 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGVH = 0xc0906983 + SIOCIFCREATE = 0x8090697a + SIOCIFDESTROY = 0x80906979 + SIOCIFGCLONERS = 0xc0106978 + SIOCINITIFADDR = 0xc0706984 + SIOCSDRVSPEC = 0x8028697b + SIOCSETPFSYNC = 0x809069f7 + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8090690c + SIOCSIFADDRPREF = 0x8098691f + SIOCSIFBRDADDR = 0x80906913 + SIOCSIFCAP = 0x80206975 + SIOCSIFDSTADDR = 0x8090690e + SIOCSIFFLAGS = 0x80906910 + SIOCSIFGENERIC = 0x80906939 + SIOCSIFMEDIA = 0xc0906935 + SIOCSIFMETRIC = 0x80906918 + SIOCSIFMTU = 0x8090697f + SIOCSIFNETMASK = 0x80906916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLINKSTR = 0x80286988 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSVH = 0xc0906982 + SIOCZIFDATA = 0xc0986986 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_FLAGS_MASK = 0xf0000000 + SOCK_NONBLOCK = 0x20000000 + SOCK_NOSIGPIPE = 0x40000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOHEADER = 0x100a + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_OVERFLOWED = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x100c + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x100b + SO_TIMESTAMP = 0x2000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SYSCTL_VERSION = 0x1000000 + SYSCTL_VERS_0 = 0x0 + SYSCTL_VERS_1 = 0x1000000 + SYSCTL_VERS_MASK = 0xff000000 + S_ARCH1 = 0x10000 + S_ARCH2 = 0x20000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + S_LOGIN_SET = 0x1 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONGCTL = 0x20 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x3 + TCP_KEEPINIT = 0x7 + TCP_KEEPINTVL = 0x5 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CDTRCTS = 0x10 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGLINED = 0x40207442 + TIOCGPGRP = 0x40047477 + TIOCGQSIZE = 0x40047481 + TIOCGRANTPT = 0x20007447 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMGET = 0x40287446 + TIOCPTSNAME = 0x40287448 + TIOCRCVFRAME = 0x80087445 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x2000745f + TIOCSLINED = 0x80207443 + TIOCSPGRP = 0x80047476 + TIOCSQSIZE = 0x80047480 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCXMTFRAME = 0x80087444 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALL = 0x8 + WALLSIG = 0x8 + WALTSIG = 0x4 + WCLONE = 0x4 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WNOWAIT = 0x10000 + WNOZOMBIE = 0x20000 + WOPTSCHECKED = 0x40000 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x58) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x57) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x55) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5e) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x59) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x5a) + ENOSTR = syscall.Errno(0x5b) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x56) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x60) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x5c) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x20) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large or too small", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol option not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "illegal byte sequence", + 86: "not supported", + 87: "operation Canceled", + 88: "bad or Corrupt message", + 89: "no message available", + 90: "no STREAM resources", + 91: "not a STREAM", + 92: "STREAM ioctl timeout", + 93: "attribute not found", + 94: "multihop attempted", + 95: "link has been severed", + 96: "protocol error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "power fail/restart", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go new file mode 100644 index 0000000..a8f9efe --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -0,0 +1,1698 @@ +// mkerrors.sh -marm +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,netbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -marm _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x1c + AF_BLUETOOTH = 0x1f + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x20 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x23 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OROUTE = 0x11 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x22 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ARCNET = 0x7 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_STRIP = 0x17 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084277 + BIOCGETIF = 0x4090426b + BIOCGFEEDBACK = 0x4004427c + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x400c427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044276 + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8090426c + BIOCSFEEDBACK = 0x8004427d + BIOCSHDRCMPLT = 0x80044275 + BIOCSRTIMEOUT = 0x800c427a + BIOCSSEESENT = 0x80044279 + BIOCSTCPF = 0x80084272 + BIOCSUDPF = 0x80084273 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALIGNMENT32 = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + CTL_QUERY = -0x2 + DIOCBSFLUSH = 0x20006478 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMUL_LINUX = 0x1 + EMUL_LINUX32 = 0x5 + EMUL_MAXID = 0x6 + ETHERCAP_JUMBO_MTU = 0x4 + ETHERCAP_VLAN_HWTAGGING = 0x2 + ETHERCAP_VLAN_MTU = 0x1 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERMTU_JUMBO = 0x2328 + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOWPROTOCOLS = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_LEN = 0x5ee + ETHER_MAX_LEN_JUMBO = 0x233a + ETHER_MIN_LEN = 0x40 + ETHER_PPPOE_ENCAP_LEN = 0x8 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = 0x2 + EVFILT_PROC = 0x4 + EVFILT_READ = 0x0 + EVFILT_SIGNAL = 0x5 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = 0x6 + EVFILT_VNODE = 0x3 + EVFILT_WRITE = 0x1 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x100 + FLUSHO = 0x800000 + F_CLOSEM = 0xa + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xc + F_FSCTL = -0x80000000 + F_FSDIRMASK = 0x70000000 + F_FSIN = 0x10000000 + F_FSINOUT = 0x30000000 + F_FSOUT = 0x20000000 + F_FSPRIV = 0x8000 + F_FSVOID = 0x40000000 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETNOSIGPIPE = 0xd + F_GETOWN = 0x5 + F_MAXFD = 0xb + F_OK = 0x0 + F_PARAM_MASK = 0xfff + F_PARAM_MAX = 0xfff + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETNOSIGPIPE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8f52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IPV6_ICMP = 0x3a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MOBILE = 0x37 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_VRRP = 0x70 + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_EF = 0x8000 + IP_ERRORMTU = 0x15 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x16 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINFRAGSIZE = 0x45 + IP_MINTTL = 0x18 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x17 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ALIGNMENT_16MB = 0x18000000 + MAP_ALIGNMENT_1TB = 0x28000000 + MAP_ALIGNMENT_256TB = 0x30000000 + MAP_ALIGNMENT_4GB = 0x20000000 + MAP_ALIGNMENT_64KB = 0x10000000 + MAP_ALIGNMENT_64PB = 0x38000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DEFAULT = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_STACK = 0x2000 + MAP_TRYFIXED = 0x400 + MAP_WIRED = 0x800 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CONTROLMBUF = 0x2000000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_IOVUSRSPACE = 0x4000000 + MSG_LENUSRSPACE = 0x8000000 + MSG_MCAST = 0x200 + MSG_NAMEMBUF = 0x1000000 + MSG_NBIO = 0x1000 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_USERFLAGS = 0xffffff + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x4 + NAME_MAX = 0x1ff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x5 + NET_RT_MAXID = 0x6 + NET_RT_OIFLIST = 0x4 + NET_RT_OOIFLIST = 0x3 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFIOGETBMAP = 0xc004667a + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_ALT_IO = 0x40000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x80000 + O_DIRECTORY = 0x200000 + O_DSYNC = 0x10000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_NOSIGPIPE = 0x1000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x20000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PRI_IOFLUSH = 0x7c + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_TAG = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_TAG = 0x100 + RTF_ANNOUNCE = 0x20000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x2000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SRC = 0x10000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0x15 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x11 + RTM_IFANNOUNCE = 0x10 + RTM_IFINFO = 0x14 + RTM_LLINFO_UPD = 0x13 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OIFINFO = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_OOIFINFO = 0xe + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_SETGATE = 0x12 + RTM_VERSION = 0x4 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x4 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x8 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80906931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691c + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80906932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80906919 + SIOCDIFPHYADDR = 0x80906949 + SIOCDLIFADDR = 0x8118691e + SIOCGDRVSPEC = 0xc01c697b + SIOCGETPFSYNC = 0xc09069f8 + SIOCGETSGCNT = 0xc0147534 + SIOCGETVIFCNT = 0xc0147533 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0906921 + SIOCGIFADDRPREF = 0xc0946920 + SIOCGIFALIAS = 0xc040691b + SIOCGIFBRDADDR = 0xc0906923 + SIOCGIFCAP = 0xc0206976 + SIOCGIFCONF = 0xc0086926 + SIOCGIFDATA = 0xc0946985 + SIOCGIFDLT = 0xc0906977 + SIOCGIFDSTADDR = 0xc0906922 + SIOCGIFFLAGS = 0xc0906911 + SIOCGIFGENERIC = 0xc090693a + SIOCGIFMEDIA = 0xc0286936 + SIOCGIFMETRIC = 0xc0906917 + SIOCGIFMTU = 0xc090697e + SIOCGIFNETMASK = 0xc0906925 + SIOCGIFPDSTADDR = 0xc0906948 + SIOCGIFPSRCADDR = 0xc0906947 + SIOCGLIFADDR = 0xc118691d + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLINKSTR = 0xc01c6987 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGVH = 0xc0906983 + SIOCIFCREATE = 0x8090697a + SIOCIFDESTROY = 0x80906979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCINITIFADDR = 0xc0446984 + SIOCSDRVSPEC = 0x801c697b + SIOCSETPFSYNC = 0x809069f7 + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8090690c + SIOCSIFADDRPREF = 0x8094691f + SIOCSIFBRDADDR = 0x80906913 + SIOCSIFCAP = 0x80206975 + SIOCSIFDSTADDR = 0x8090690e + SIOCSIFFLAGS = 0x80906910 + SIOCSIFGENERIC = 0x80906939 + SIOCSIFMEDIA = 0xc0906935 + SIOCSIFMETRIC = 0x80906918 + SIOCSIFMTU = 0x8090697f + SIOCSIFNETMASK = 0x80906916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLINKSTR = 0x801c6988 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSVH = 0xc0906982 + SIOCZIFDATA = 0xc0946986 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_FLAGS_MASK = 0xf0000000 + SOCK_NONBLOCK = 0x20000000 + SOCK_NOSIGPIPE = 0x40000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOHEADER = 0x100a + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_OVERFLOWED = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x100c + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x100b + SO_TIMESTAMP = 0x2000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SYSCTL_VERSION = 0x1000000 + SYSCTL_VERS_0 = 0x0 + SYSCTL_VERS_1 = 0x1000000 + SYSCTL_VERS_MASK = 0xff000000 + S_ARCH1 = 0x10000 + S_ARCH2 = 0x20000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CONGCTL = 0x20 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x3 + TCP_KEEPINIT = 0x7 + TCP_KEEPINTVL = 0x5 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x400c7458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CDTRCTS = 0x10 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGLINED = 0x40207442 + TIOCGPGRP = 0x40047477 + TIOCGQSIZE = 0x40047481 + TIOCGRANTPT = 0x20007447 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMGET = 0x48087446 + TIOCPTSNAME = 0x48087448 + TIOCRCVFRAME = 0x80047445 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x2000745f + TIOCSLINED = 0x80207443 + TIOCSPGRP = 0x80047476 + TIOCSQSIZE = 0x80047480 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCXMTFRAME = 0x80047444 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALL = 0x8 + WALLSIG = 0x8 + WALTSIG = 0x4 + WCLONE = 0x4 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WNOWAIT = 0x10000 + WNOZOMBIE = 0x20000 + WOPTSCHECKED = 0x40000 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x58) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x57) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x55) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x60) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5e) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x5d) + ENOBUFS = syscall.Errno(0x37) + ENODATA = syscall.Errno(0x59) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5f) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x5a) + ENOSTR = syscall.Errno(0x5b) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x56) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x60) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIME = syscall.Errno(0x5c) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x20) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large or too small", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol option not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "illegal byte sequence", + 86: "not supported", + 87: "operation Canceled", + 88: "bad or Corrupt message", + 89: "no message available", + 90: "no STREAM resources", + 91: "not a STREAM", + 92: "STREAM ioctl timeout", + 93: "attribute not found", + 94: "multihop attempted", + 95: "link has been severed", + 96: "protocol error", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "power fail/restart", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go new file mode 100644 index 0000000..04e4f33 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -0,0 +1,1591 @@ +// mkerrors.sh -m32 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,openbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc008427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x400c426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80084277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x800c426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCOSFPFLUSH = 0x2000444e + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DIVERT_INIT = 0x2 + IPPROTO_DIVERT_RESP = 0x1 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DIVERTFL = 0x1022 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_COPY = 0x4 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0x1ff7 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_TRYFIXED = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x6 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_MASK = 0x3ff000 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xb + RTAX_NETMASK = 0x2 + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTF_ANNOUNCE = 0x4000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x10f808 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_SOURCE = 0x20000 + RTF_STATIC = 0x800 + RTF_TUNNEL = 0x100000 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCALIFADDR = 0x8218691c + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8054693c + SIOCBRDGADDS = 0x80546941 + SIOCBRDGARL = 0x806e694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8054693d + SIOCBRDGDELS = 0x80546942 + SIOCBRDGFLUSH = 0x80546948 + SIOCBRDGFRL = 0x806e694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc03c6958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc028694f + SIOCBRDGGSIFS = 0xc054693c + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0546942 + SIOCBRDGRTS = 0xc0186943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80546955 + SIOCBRDGSIFFLGS = 0x8054693f + SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8218691e + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0147534 + SIOCGETVIFCNT = 0xc0147533 + SIOCGETVLAN = 0xc0206990 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0086924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc024698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFMEDIA = 0xc0286936 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFADDR = 0xc218691d + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGVH = 0xc02069f6 + SIOCGVNETID = 0xc02069a7 + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8024698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFMEDIA = 0xc0206935 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFTIMESLOT = 0x80206985 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSSPPPPARAMS = 0x80206993 + SIOCSVH = 0xc02069f5 + SIOCSVNETID = 0x802069a6 + SOCK_DGRAM = 0x2 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_NSTATES = 0xb + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x400c745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5b) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "IPsec processing failure", + 83: "attribute not found", + 84: "illegal byte sequence", + 85: "no medium found", + 86: "wrong medium type", + 87: "value too large to be stored in data type", + 88: "operation canceled", + 89: "identifier removed", + 90: "no message of desired type", + 91: "not supported", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread AST", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go new file mode 100644 index 0000000..c80ff98 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -0,0 +1,1590 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,openbsd + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCOSFPFLUSH = 0x2000444e + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DIVERT_INIT = 0x2 + IPPROTO_DIVERT_RESP = 0x1 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DIVERTFL = 0x1022 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_COPY = 0x4 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0x1ff7 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_DONATE_COPY = 0x3 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_TRYFIXED = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x6 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xb + RTAX_NETMASK = 0x2 + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTF_ANNOUNCE = 0x4000 + RTF_BLACKHOLE = 0x1000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x10f808 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_SOURCE = 0x20000 + RTF_STATIC = 0x800 + RTF_TUNNEL = 0x100000 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCALIFADDR = 0x8218691c + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8058693c + SIOCBRDGADDS = 0x80586941 + SIOCBRDGARL = 0x806e694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8058693d + SIOCBRDGDELS = 0x80586942 + SIOCBRDGFLUSH = 0x80586948 + SIOCBRDGFRL = 0x806e694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc058693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGSIFS = 0xc058693c + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0586942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80586955 + SIOCBRDGSIFFLGS = 0x8058693f + SIOCBRDGSIFPRIO = 0x80586954 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8218691e + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFMEDIA = 0xc0306936 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFADDR = 0xc218691d + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGVH = 0xc02069f6 + SIOCGVNETID = 0xc02069a7 + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFMEDIA = 0xc0206935 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFTIMESLOT = 0x80206985 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSSPPPPARAMS = 0x80206993 + SIOCSVH = 0xc02069f5 + SIOCSVNETID = 0x802069a6 + SOCK_DGRAM = 0x2 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_NSTATES = 0xb + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5b) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "IPsec processing failure", + 83: "attribute not found", + 84: "illegal byte sequence", + 85: "no medium found", + 86: "wrong medium type", + 87: "value too large to be stored in data type", + 88: "operation canceled", + 89: "identifier removed", + 90: "no message of desired type", + 91: "not supported", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread AST", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go new file mode 100644 index 0000000..4c32049 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -0,0 +1,1593 @@ +// mkerrors.sh +// Code generated by the command above; see README.md. DO NOT EDIT. + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- _const.go + +// +build arm,openbsd + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc008427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x400c426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80084277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x800c426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCOSFPFLUSH = 0x2000444e + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x7 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFA_ROUTE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DIVERT_INIT = 0x2 + IPPROTO_DIVERT_RESP = 0x1 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DIVERTFL = 0x1022 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0x3ff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x6 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xb + RTAX_NETMASK = 0x2 + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTF_ANNOUNCE = 0x4000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x70f808 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCALIFADDR = 0x8218691c + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8054693c + SIOCBRDGADDS = 0x80546941 + SIOCBRDGARL = 0x806e694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8054693d + SIOCBRDGDELS = 0x80546942 + SIOCBRDGFLUSH = 0x80546948 + SIOCBRDGFRL = 0x806e694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc03c6958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc028694f + SIOCBRDGGSIFS = 0xc054693c + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0546942 + SIOCBRDGRTS = 0xc0186943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80546955 + SIOCBRDGSIFFLGS = 0x8054693f + SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8218691e + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0147534 + SIOCGETVIFCNT = 0xc0147533 + SIOCGETVLAN = 0xc0206990 + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0086924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc024698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFMEDIA = 0xc0286936 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFADDR = 0xc218691d + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGVH = 0xc02069f6 + SIOCGVNETID = 0xc02069a7 + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8024698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFMEDIA = 0xc0206935 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFTIMESLOT = 0x80206985 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSSPPPPARAMS = 0x80206993 + SIOCSVH = 0xc02069f5 + SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_NSTATES = 0xb + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x400c745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x80047465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5b) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "connection timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "IPsec processing failure", + 83: "attribute not found", + 84: "illegal byte sequence", + 85: "no medium found", + 86: "wrong medium type", + 87: "value too large to be stored in data type", + 88: "operation canceled", + 89: "identifier removed", + 90: "no message of desired type", + 91: "not supported", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread AST", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go new file mode 100644 index 0000000..09eedb0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -0,0 +1,1489 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,solaris + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_802 = 0x12 + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_ECMA = 0x8 + AF_FILE = 0x1 + AF_GOSIP = 0x16 + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1a + AF_INET_OFFLOAD = 0x1e + AF_IPX = 0x17 + AF_KEY = 0x1b + AF_LAT = 0xe + AF_LINK = 0x19 + AF_LOCAL = 0x1 + AF_MAX = 0x20 + AF_NBS = 0x7 + AF_NCA = 0x1c + AF_NIT = 0x11 + AF_NS = 0x6 + AF_OSI = 0x13 + AF_OSINET = 0x15 + AF_PACKET = 0x20 + AF_POLICY = 0x1d + AF_PUP = 0x4 + AF_ROUTE = 0x18 + AF_SNA = 0xb + AF_TRILL = 0x1f + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_X25 = 0x14 + ARPHRD_ARCNET = 0x7 + ARPHRD_ATM = 0x10 + ARPHRD_AX25 = 0x3 + ARPHRD_CHAOS = 0x5 + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_FC = 0x12 + ARPHRD_FRAME = 0xf + ARPHRD_HDLC = 0x11 + ARPHRD_IB = 0x20 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IPATM = 0x13 + ARPHRD_METRICOM = 0x17 + ARPHRD_TUNNEL = 0x1f + B0 = 0x0 + B110 = 0x3 + B115200 = 0x12 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B153600 = 0x13 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B230400 = 0x14 + B2400 = 0xb + B300 = 0x7 + B307200 = 0x15 + B38400 = 0xf + B460800 = 0x16 + B4800 = 0xc + B50 = 0x1 + B57600 = 0x10 + B600 = 0x8 + B75 = 0x2 + B76800 = 0x11 + B921600 = 0x17 + B9600 = 0xd + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = -0x3fefbd89 + BIOCGDLTLIST32 = -0x3ff7bd89 + BIOCGETIF = 0x4020426b + BIOCGETLIF = 0x4078426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRTIMEOUT = 0x4010427b + BIOCGRTIMEOUT32 = 0x4008427b + BIOCGSEESENT = 0x40044278 + BIOCGSTATS = 0x4080426f + BIOCGSTATSOLD = 0x4008426f + BIOCIMMEDIATE = -0x7ffbbd90 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = -0x3ffbbd9a + BIOCSDLT = -0x7ffbbd8a + BIOCSETF = -0x7fefbd99 + BIOCSETF32 = -0x7ff7bd99 + BIOCSETIF = -0x7fdfbd94 + BIOCSETLIF = -0x7f87bd94 + BIOCSHDRCMPLT = -0x7ffbbd8b + BIOCSRTIMEOUT = -0x7fefbd86 + BIOCSRTIMEOUT32 = -0x7ff7bd86 + BIOCSSEESENT = -0x7ffbbd87 + BIOCSTCPF = -0x7fefbd8e + BIOCSUDPF = -0x7fefbd8d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DFLTBUFSIZE = 0x100000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x1000000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0xf + CFLUSH = 0xf + CIBAUD = 0xf0000 + CLOCAL = 0x800 + CLOCK_HIGHRES = 0x4 + CLOCK_LEVEL = 0xa + CLOCK_MONOTONIC = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x5 + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x3 + CLOCK_THREAD_CPUTIME_ID = 0x2 + CLOCK_VIRTUAL = 0x1 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + CSWTCH = 0x1a + DLT_AIRONET_HEADER = 0x78 + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_BACNET_MS_TP = 0xa5 + DLT_CHAOS = 0x5 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FDDI = 0xa + DLT_FRELAY = 0x6b + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_HDLC = 0x10 + DLT_HHDLC = 0x79 + DLT_HIPPI = 0xf + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xa2 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_PPPD = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAW = 0xc + DLT_RAWAF_MASK = 0x2240000 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EMPTY_SET = 0x0 + EMT_CPCOVF = 0x1 + EQUALITY_CHECK = 0x0 + EXTA = 0xe + EXTB = 0xf + FD_CLOEXEC = 0x1 + FD_NFDBITS = 0x40 + FD_SETSIZE = 0x10000 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHALL = 0x1 + FLUSHDATA = 0x0 + FLUSHO = 0x2000 + F_ALLOCSP = 0xa + F_ALLOCSP64 = 0xa + F_BADFD = 0x2e + F_BLKSIZE = 0x13 + F_BLOCKS = 0x12 + F_CHKFL = 0x8 + F_COMPAT = 0x8 + F_DUP2FD = 0x9 + F_DUP2FD_CLOEXEC = 0x24 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x25 + F_FLOCK = 0x35 + F_FLOCK64 = 0x35 + F_FLOCKW = 0x36 + F_FLOCKW64 = 0x36 + F_FREESP = 0xb + F_FREESP64 = 0xb + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_GETXFL = 0x2d + F_HASREMOTELOCKS = 0x1a + F_ISSTREAM = 0xd + F_MANDDNY = 0x10 + F_MDACC = 0x20 + F_NODNY = 0x0 + F_NPRIV = 0x10 + F_OFD_GETLK = 0x2f + F_OFD_GETLK64 = 0x2f + F_OFD_SETLK = 0x30 + F_OFD_SETLK64 = 0x30 + F_OFD_SETLKW = 0x31 + F_OFD_SETLKW64 = 0x31 + F_PRIV = 0xf + F_QUOTACTL = 0x11 + F_RDACC = 0x1 + F_RDDNY = 0x1 + F_RDLCK = 0x1 + F_REVOKE = 0x19 + F_RMACC = 0x4 + F_RMDNY = 0x4 + F_RWACC = 0x3 + F_RWDNY = 0x3 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLK64_NBMAND = 0x2a + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETLK_NBMAND = 0x2a + F_SETOWN = 0x18 + F_SHARE = 0x28 + F_SHARE_NBMAND = 0x2b + F_UNLCK = 0x3 + F_UNLKSYS = 0x4 + F_UNSHARE = 0x29 + F_WRACC = 0x2 + F_WRDNY = 0x2 + F_WRLCK = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFF_ADDRCONF = 0x80000 + IFF_ALLMULTI = 0x200 + IFF_ANYCAST = 0x400000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x7f203003b5a + IFF_COS_ENABLED = 0x200000000 + IFF_DEBUG = 0x4 + IFF_DEPRECATED = 0x40000 + IFF_DHCPRUNNING = 0x4000 + IFF_DUPLICATE = 0x4000000000 + IFF_FAILED = 0x10000000 + IFF_FIXEDMTU = 0x1000000000 + IFF_INACTIVE = 0x40000000 + IFF_INTELLIGENT = 0x400 + IFF_IPMP = 0x8000000000 + IFF_IPMP_CANTCHANGE = 0x10000000 + IFF_IPMP_INVALID = 0x1ec200080 + IFF_IPV4 = 0x1000000 + IFF_IPV6 = 0x2000000 + IFF_L3PROTECT = 0x40000000000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x800 + IFF_MULTI_BCAST = 0x1000 + IFF_NOACCEPT = 0x4000000 + IFF_NOARP = 0x80 + IFF_NOFAILOVER = 0x8000000 + IFF_NOLINKLOCAL = 0x20000000000 + IFF_NOLOCAL = 0x20000 + IFF_NONUD = 0x200000 + IFF_NORTEXCH = 0x800000 + IFF_NOTRAILERS = 0x20 + IFF_NOXMIT = 0x10000 + IFF_OFFLINE = 0x80000000 + IFF_POINTOPOINT = 0x10 + IFF_PREFERRED = 0x400000000 + IFF_PRIVATE = 0x8000 + IFF_PROMISC = 0x100 + IFF_ROUTER = 0x100000 + IFF_RUNNING = 0x40 + IFF_STANDBY = 0x20000000 + IFF_TEMPORARY = 0x800000000 + IFF_UNNUMBERED = 0x2000 + IFF_UP = 0x1 + IFF_VIRTUAL = 0x2000000000 + IFF_VRRP = 0x10000000000 + IFF_XRESOLV = 0x100000000 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_6TO4 = 0xca + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IB = 0xc7 + IFT_IPV4 = 0xc8 + IFT_IPV6 = 0xc9 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_AUTOCONF_MASK = 0xffff0000 + IN_AUTOCONF_NET = 0xa9fe0000 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_CLASSE_NET = 0xffffffff + IN_LOOPBACKNET = 0x7f + IN_PRIVATE12_MASK = 0xfff00000 + IN_PRIVATE12_NET = 0xac100000 + IN_PRIVATE16_MASK = 0xffff0000 + IN_PRIVATE16_NET = 0xc0a80000 + IN_PRIVATE8_MASK = 0xff000000 + IN_PRIVATE8_NET = 0xa000000 + IPPROTO_AH = 0x33 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x4 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_HELLO = 0x3f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_ND = 0x4d + IPPROTO_NONE = 0x3b + IPPROTO_OSPF = 0x59 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_UDP = 0x11 + IPV6_ADD_MEMBERSHIP = 0x9 + IPV6_BOUND_IF = 0x41 + IPV6_CHECKSUM = 0x18 + IPV6_DONTFRAG = 0x21 + IPV6_DROP_MEMBERSHIP = 0xa + IPV6_DSTOPTS = 0xf + IPV6_FLOWINFO_FLOWLABEL = 0xffff0f00 + IPV6_FLOWINFO_TCLASS = 0xf00f + IPV6_HOPLIMIT = 0xc + IPV6_HOPOPTS = 0xe + IPV6_JOIN_GROUP = 0x9 + IPV6_LEAVE_GROUP = 0xa + IPV6_MULTICAST_HOPS = 0x7 + IPV6_MULTICAST_IF = 0x6 + IPV6_MULTICAST_LOOP = 0x8 + IPV6_NEXTHOP = 0xd + IPV6_PAD1_OPT = 0x0 + IPV6_PATHMTU = 0x25 + IPV6_PKTINFO = 0xb + IPV6_PREFER_SRC_CGA = 0x20 + IPV6_PREFER_SRC_CGADEFAULT = 0x10 + IPV6_PREFER_SRC_CGAMASK = 0x30 + IPV6_PREFER_SRC_COA = 0x2 + IPV6_PREFER_SRC_DEFAULT = 0x15 + IPV6_PREFER_SRC_HOME = 0x1 + IPV6_PREFER_SRC_MASK = 0x3f + IPV6_PREFER_SRC_MIPDEFAULT = 0x1 + IPV6_PREFER_SRC_MIPMASK = 0x3 + IPV6_PREFER_SRC_NONCGA = 0x10 + IPV6_PREFER_SRC_PUBLIC = 0x4 + IPV6_PREFER_SRC_TMP = 0x8 + IPV6_PREFER_SRC_TMPDEFAULT = 0x4 + IPV6_PREFER_SRC_TMPMASK = 0xc + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x13 + IPV6_RECVHOPOPTS = 0x14 + IPV6_RECVPATHMTU = 0x24 + IPV6_RECVPKTINFO = 0x12 + IPV6_RECVRTHDR = 0x16 + IPV6_RECVRTHDRDSTOPTS = 0x17 + IPV6_RECVTCLASS = 0x19 + IPV6_RTHDR = 0x10 + IPV6_RTHDRDSTOPTS = 0x11 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SEC_OPT = 0x22 + IPV6_SRC_PREFERENCES = 0x23 + IPV6_TCLASS = 0x26 + IPV6_UNICAST_HOPS = 0x5 + IPV6_UNSPEC_SRC = 0x42 + IPV6_USE_MIN_MTU = 0x20 + IPV6_V6ONLY = 0x27 + IP_ADD_MEMBERSHIP = 0x13 + IP_ADD_SOURCE_MEMBERSHIP = 0x17 + IP_BLOCK_SOURCE = 0x15 + IP_BOUND_IF = 0x41 + IP_BROADCAST = 0x106 + IP_BROADCAST_TTL = 0x43 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DHCPINIT_IF = 0x45 + IP_DONTFRAG = 0x1b + IP_DONTROUTE = 0x105 + IP_DROP_MEMBERSHIP = 0x14 + IP_DROP_SOURCE_MEMBERSHIP = 0x18 + IP_HDRINCL = 0x2 + IP_MAXPACKET = 0xffff + IP_MF = 0x2000 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x10 + IP_MULTICAST_LOOP = 0x12 + IP_MULTICAST_TTL = 0x11 + IP_NEXTHOP = 0x19 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x9 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVSLLA = 0xa + IP_RECVTTL = 0xb + IP_RETOPTS = 0x8 + IP_REUSEADDR = 0x104 + IP_SEC_OPT = 0x22 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x16 + IP_UNSPEC_SRC = 0x42 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x200 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_ACCESS_DEFAULT = 0x6 + MADV_ACCESS_LWP = 0x7 + MADV_ACCESS_MANY = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NORMAL = 0x0 + MADV_PURGE = 0x9 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80 + MAP_ALIGN = 0x200 + MAP_ANON = 0x100 + MAP_ANONYMOUS = 0x100 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_INITDATA = 0x800 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_TEXT = 0x400 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x10 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_DUPCTRL = 0x800 + MSG_EOR = 0x8 + MSG_MAXIOVLEN = 0x10 + MSG_NOTIFICATION = 0x100 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x20 + MSG_WAITALL = 0x40 + MSG_XPG4_2 = 0x8000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_OLDSYNC = 0x0 + MS_SYNC = 0x4 + M_FLUSH = 0x86 + NAME_MAX = 0xff + NEWDEV = 0x1 + NL0 = 0x0 + NL1 = 0x100 + NLDLY = 0x100 + NOFLSH = 0x80 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + OLDDEV = 0x0 + ONBITSMAJOR = 0x7 + ONBITSMINOR = 0x8 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPENFAIL = -0x1 + OPOST = 0x1 + O_ACCMODE = 0x600003 + O_APPEND = 0x8 + O_CLOEXEC = 0x800000 + O_CREAT = 0x100 + O_DSYNC = 0x40 + O_EXCL = 0x400 + O_EXEC = 0x400000 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x4 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NOLINKS = 0x40000 + O_NONBLOCK = 0x80 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x8000 + O_SEARCH = 0x200000 + O_SIOCGIFCONF = -0x3ff796ec + O_SIOCGLIFCONF = -0x3fef9688 + O_SYNC = 0x10 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + O_XATTR = 0x4000 + PARENB = 0x100 + PAREXT = 0x100000 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = -0x3 + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x9 + RTAX_NETMASK = 0x2 + RTAX_SRC = 0x8 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTA_NUMBITS = 0x9 + RTA_SRC = 0x100 + RTF_BLACKHOLE = 0x1000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INDIRECT = 0x40000 + RTF_KERNEL = 0x80000 + RTF_LLINFO = 0x400 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MULTIRT = 0x10000 + RTF_PRIVATE = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_REJECT = 0x8 + RTF_SETSRC = 0x20000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTF_ZONE = 0x100000 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_CHGADDR = 0xf + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_FREEADDR = 0x10 + RTM_GET = 0x4 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_VERSION = 0x3 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_AWARE = 0x1 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_RIGHTS = 0x1010 + SCM_TIMESTAMP = 0x1013 + SCM_UCRED = 0x1012 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIG2STR_MAX = 0x20 + SIOCADDMULTI = -0x7fdf96cf + SIOCADDRT = -0x7fcf8df6 + SIOCATMARK = 0x40047307 + SIOCDARP = -0x7fdb96e0 + SIOCDELMULTI = -0x7fdf96ce + SIOCDELRT = -0x7fcf8df5 + SIOCDXARP = -0x7fff9658 + SIOCGARP = -0x3fdb96e1 + SIOCGDSTINFO = -0x3fff965c + SIOCGENADDR = -0x3fdf96ab + SIOCGENPSTATS = -0x3fdf96c7 + SIOCGETLSGCNT = -0x3fef8deb + SIOCGETNAME = 0x40107334 + SIOCGETPEER = 0x40107335 + SIOCGETPROP = -0x3fff8f44 + SIOCGETSGCNT = -0x3feb8deb + SIOCGETSYNC = -0x3fdf96d3 + SIOCGETVIFCNT = -0x3feb8dec + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = -0x3fdf96f3 + SIOCGIFBRDADDR = -0x3fdf96e9 + SIOCGIFCONF = -0x3ff796a4 + SIOCGIFDSTADDR = -0x3fdf96f1 + SIOCGIFFLAGS = -0x3fdf96ef + SIOCGIFHWADDR = -0x3fdf9647 + SIOCGIFINDEX = -0x3fdf96a6 + SIOCGIFMEM = -0x3fdf96ed + SIOCGIFMETRIC = -0x3fdf96e5 + SIOCGIFMTU = -0x3fdf96ea + SIOCGIFMUXID = -0x3fdf96a8 + SIOCGIFNETMASK = -0x3fdf96e7 + SIOCGIFNUM = 0x40046957 + SIOCGIP6ADDRPOLICY = -0x3fff965e + SIOCGIPMSFILTER = -0x3ffb964c + SIOCGLIFADDR = -0x3f87968f + SIOCGLIFBINDING = -0x3f879666 + SIOCGLIFBRDADDR = -0x3f879685 + SIOCGLIFCONF = -0x3fef965b + SIOCGLIFDADSTATE = -0x3f879642 + SIOCGLIFDSTADDR = -0x3f87968d + SIOCGLIFFLAGS = -0x3f87968b + SIOCGLIFGROUPINFO = -0x3f4b9663 + SIOCGLIFGROUPNAME = -0x3f879664 + SIOCGLIFHWADDR = -0x3f879640 + SIOCGLIFINDEX = -0x3f87967b + SIOCGLIFLNKINFO = -0x3f879674 + SIOCGLIFMETRIC = -0x3f879681 + SIOCGLIFMTU = -0x3f879686 + SIOCGLIFMUXID = -0x3f87967d + SIOCGLIFNETMASK = -0x3f879683 + SIOCGLIFNUM = -0x3ff3967e + SIOCGLIFSRCOF = -0x3fef964f + SIOCGLIFSUBNET = -0x3f879676 + SIOCGLIFTOKEN = -0x3f879678 + SIOCGLIFUSESRC = -0x3f879651 + SIOCGLIFZONE = -0x3f879656 + SIOCGLOWAT = 0x40047303 + SIOCGMSFILTER = -0x3ffb964e + SIOCGPGRP = 0x40047309 + SIOCGSTAMP = -0x3fef9646 + SIOCGXARP = -0x3fff9659 + SIOCIFDETACH = -0x7fdf96c8 + SIOCILB = -0x3ffb9645 + SIOCLIFADDIF = -0x3f879691 + SIOCLIFDELND = -0x7f879673 + SIOCLIFGETND = -0x3f879672 + SIOCLIFREMOVEIF = -0x7f879692 + SIOCLIFSETND = -0x7f879671 + SIOCLOWER = -0x7fdf96d7 + SIOCSARP = -0x7fdb96e2 + SIOCSCTPGOPT = -0x3fef9653 + SIOCSCTPPEELOFF = -0x3ffb9652 + SIOCSCTPSOPT = -0x7fef9654 + SIOCSENABLESDP = -0x3ffb9649 + SIOCSETPROP = -0x7ffb8f43 + SIOCSETSYNC = -0x7fdf96d4 + SIOCSHIWAT = -0x7ffb8d00 + SIOCSIFADDR = -0x7fdf96f4 + SIOCSIFBRDADDR = -0x7fdf96e8 + SIOCSIFDSTADDR = -0x7fdf96f2 + SIOCSIFFLAGS = -0x7fdf96f0 + SIOCSIFINDEX = -0x7fdf96a5 + SIOCSIFMEM = -0x7fdf96ee + SIOCSIFMETRIC = -0x7fdf96e4 + SIOCSIFMTU = -0x7fdf96eb + SIOCSIFMUXID = -0x7fdf96a7 + SIOCSIFNAME = -0x7fdf96b7 + SIOCSIFNETMASK = -0x7fdf96e6 + SIOCSIP6ADDRPOLICY = -0x7fff965d + SIOCSIPMSFILTER = -0x7ffb964b + SIOCSLGETREQ = -0x3fdf96b9 + SIOCSLIFADDR = -0x7f879690 + SIOCSLIFBRDADDR = -0x7f879684 + SIOCSLIFDSTADDR = -0x7f87968e + SIOCSLIFFLAGS = -0x7f87968c + SIOCSLIFGROUPNAME = -0x7f879665 + SIOCSLIFINDEX = -0x7f87967a + SIOCSLIFLNKINFO = -0x7f879675 + SIOCSLIFMETRIC = -0x7f879680 + SIOCSLIFMTU = -0x7f879687 + SIOCSLIFMUXID = -0x7f87967c + SIOCSLIFNAME = -0x3f87967f + SIOCSLIFNETMASK = -0x7f879682 + SIOCSLIFPREFIX = -0x3f879641 + SIOCSLIFSUBNET = -0x7f879677 + SIOCSLIFTOKEN = -0x7f879679 + SIOCSLIFUSESRC = -0x7f879650 + SIOCSLIFZONE = -0x7f879655 + SIOCSLOWAT = -0x7ffb8cfe + SIOCSLSTAT = -0x7fdf96b8 + SIOCSMSFILTER = -0x7ffb964d + SIOCSPGRP = -0x7ffb8cf8 + SIOCSPROMISC = -0x7ffb96d0 + SIOCSQPTR = -0x3ffb9648 + SIOCSSDSTATS = -0x3fdf96d2 + SIOCSSESTATS = -0x3fdf96d1 + SIOCSXARP = -0x7fff965a + SIOCTMYADDR = -0x3ff79670 + SIOCTMYSITE = -0x3ff7966e + SIOCTONLINK = -0x3ff7966f + SIOCUPPER = -0x7fdf96d8 + SIOCX25RCV = -0x3fdf96c4 + SIOCX25TBL = -0x3fdf96c3 + SIOCX25XMT = -0x3fdf96c5 + SIOCXPROTO = 0x20007337 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NDELAY = 0x200000 + SOCK_NONBLOCK = 0x100000 + SOCK_RAW = 0x4 + SOCK_RDM = 0x5 + SOCK_SEQPACKET = 0x6 + SOCK_STREAM = 0x2 + SOCK_TYPE_MASK = 0xffff + SOL_FILTER = 0xfffc + SOL_PACKET = 0xfffd + SOL_ROUTE = 0xfffe + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ALL = 0x3f + SO_ALLZONES = 0x1014 + SO_ANON_MLP = 0x100a + SO_ATTACH_FILTER = 0x40000001 + SO_BAND = 0x4000 + SO_BROADCAST = 0x20 + SO_COPYOPT = 0x80000 + SO_DEBUG = 0x1 + SO_DELIM = 0x8000 + SO_DETACH_FILTER = 0x40000002 + SO_DGRAM_ERRIND = 0x200 + SO_DOMAIN = 0x100c + SO_DONTLINGER = -0x81 + SO_DONTROUTE = 0x10 + SO_ERROPT = 0x40000 + SO_ERROR = 0x1007 + SO_EXCLBIND = 0x1015 + SO_HIWAT = 0x10 + SO_ISNTTY = 0x800 + SO_ISTTY = 0x400 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOWAT = 0x20 + SO_MAC_EXEMPT = 0x100b + SO_MAC_IMPLICIT = 0x1016 + SO_MAXBLK = 0x100000 + SO_MAXPSZ = 0x8 + SO_MINPSZ = 0x4 + SO_MREADOFF = 0x80 + SO_MREADON = 0x40 + SO_NDELOFF = 0x200 + SO_NDELON = 0x100 + SO_NODELIM = 0x10000 + SO_OOBINLINE = 0x100 + SO_PROTOTYPE = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVPSH = 0x100d + SO_RCVTIMEO = 0x1006 + SO_READOPT = 0x1 + SO_RECVUCRED = 0x400 + SO_REUSEADDR = 0x4 + SO_SECATTR = 0x1011 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_STRHOLD = 0x20000 + SO_TAIL = 0x200000 + SO_TIMESTAMP = 0x1013 + SO_TONSTOP = 0x2000 + SO_TOSTOP = 0x1000 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_VRRP = 0x1017 + SO_WROFF = 0x2 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_ABORT_THRESHOLD = 0x11 + TCP_ANONPRIVBIND = 0x20 + TCP_CONN_ABORT_THRESHOLD = 0x13 + TCP_CONN_NOTIFY_THRESHOLD = 0x12 + TCP_CORK = 0x18 + TCP_EXCLBIND = 0x21 + TCP_INIT_CWND = 0x15 + TCP_KEEPALIVE = 0x8 + TCP_KEEPALIVE_ABORT_THRESHOLD = 0x17 + TCP_KEEPALIVE_THRESHOLD = 0x16 + TCP_KEEPCNT = 0x23 + TCP_KEEPIDLE = 0x22 + TCP_KEEPINTVL = 0x24 + TCP_LINGER2 = 0x1c + TCP_MAXSEG = 0x2 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOTIFY_THRESHOLD = 0x10 + TCP_RECVDSTADDR = 0x14 + TCP_RTO_INITIAL = 0x19 + TCP_RTO_MAX = 0x1b + TCP_RTO_MIN = 0x1a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETSF = 0x5410 + TCSETSW = 0x540f + TCXONC = 0x5406 + TIOC = 0x5400 + TIOCCBRK = 0x747a + TIOCCDTR = 0x7478 + TIOCCILOOP = 0x746c + TIOCEXCL = 0x740d + TIOCFLUSH = 0x7410 + TIOCGETC = 0x7412 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x7414 + TIOCGPPS = 0x547d + TIOCGPPSEV = 0x547f + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5469 + TIOCGWINSZ = 0x5468 + TIOCHPCL = 0x7402 + TIOCKBOF = 0x5409 + TIOCKBON = 0x5408 + TIOCLBIC = 0x747e + TIOCLBIS = 0x747f + TIOCLGET = 0x747c + TIOCLSET = 0x747d + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMSET = 0x741a + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x7471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7473 + TIOCREMOTE = 0x741e + TIOCSBRK = 0x747b + TIOCSCTTY = 0x7484 + TIOCSDTR = 0x7479 + TIOCSETC = 0x7411 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIGNAL = 0x741f + TIOCSILOOP = 0x746d + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x7415 + TIOCSPPS = 0x547e + TIOCSSOFTCAR = 0x546a + TIOCSTART = 0x746e + TIOCSTI = 0x7417 + TIOCSTOP = 0x746f + TIOCSWINSZ = 0x5467 + TOSTOP = 0x100 + VCEOF = 0x8 + VCEOL = 0x9 + VDISCARD = 0xd + VDSUSP = 0xb + VEOF = 0x4 + VEOL = 0x5 + VEOL2 = 0x6 + VERASE = 0x2 + VERASE2 = 0x11 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMIN = 0x4 + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTATUS = 0x10 + VSTOP = 0x9 + VSUSP = 0xa + VSWTCH = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WCONTFLG = 0xffff + WCONTINUED = 0x8 + WCOREFLG = 0x80 + WEXITED = 0x1 + WNOHANG = 0x40 + WNOWAIT = 0x80 + WOPTMASK = 0xcf + WRAP = 0x20000 + WSIGMASK = 0x7f + WSTOPFLG = 0x7f + WSTOPPED = 0x4 + WTRAPPED = 0x2 + WUNTRACED = 0x4 + XCASE = 0x4 + XTABS = 0x1800 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x7d) + EADDRNOTAVAIL = syscall.Errno(0x7e) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x7c) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x95) + EBADE = syscall.Errno(0x32) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x51) + EBADMSG = syscall.Errno(0x4d) + EBADR = syscall.Errno(0x33) + EBADRQC = syscall.Errno(0x36) + EBADSLT = syscall.Errno(0x37) + EBFONT = syscall.Errno(0x39) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x2f) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x82) + ECONNREFUSED = syscall.Errno(0x92) + ECONNRESET = syscall.Errno(0x83) + EDEADLK = syscall.Errno(0x2d) + EDEADLOCK = syscall.Errno(0x38) + EDESTADDRREQ = syscall.Errno(0x60) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x31) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x93) + EHOSTUNREACH = syscall.Errno(0x94) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x58) + EINPROGRESS = syscall.Errno(0x96) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x85) + EISDIR = syscall.Errno(0x15) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELIBACC = syscall.Errno(0x53) + ELIBBAD = syscall.Errno(0x54) + ELIBEXEC = syscall.Errno(0x57) + ELIBMAX = syscall.Errno(0x56) + ELIBSCN = syscall.Errno(0x55) + ELNRNG = syscall.Errno(0x29) + ELOCKUNMAPPED = syscall.Errno(0x48) + ELOOP = syscall.Errno(0x5a) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x61) + EMULTIHOP = syscall.Errno(0x4a) + ENAMETOOLONG = syscall.Errno(0x4e) + ENETDOWN = syscall.Errno(0x7f) + ENETRESET = syscall.Errno(0x81) + ENETUNREACH = syscall.Errno(0x80) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x35) + ENOBUFS = syscall.Errno(0x84) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x2e) + ENOLINK = syscall.Errno(0x43) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x63) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x59) + ENOTACTIVE = syscall.Errno(0x49) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x86) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x5d) + ENOTRECOVERABLE = syscall.Errno(0x3b) + ENOTSOCK = syscall.Errno(0x5f) + ENOTSUP = syscall.Errno(0x30) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x50) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x7a) + EOVERFLOW = syscall.Errno(0x4f) + EOWNERDEAD = syscall.Errno(0x3a) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x7b) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x78) + EPROTOTYPE = syscall.Errno(0x62) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x52) + EREMOTE = syscall.Errno(0x42) + ERESTART = syscall.Errno(0x5b) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x8f) + ESOCKTNOSUPPORT = syscall.Errno(0x79) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x97) + ESTRPIPE = syscall.Errno(0x5c) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x91) + ETOOMANYREFS = syscall.Errno(0x90) + ETXTBSY = syscall.Errno(0x1a) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x5e) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x34) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCANCEL = syscall.Signal(0x24) + SIGCHLD = syscall.Signal(0x12) + SIGCLD = syscall.Signal(0x12) + SIGCONT = syscall.Signal(0x19) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGFREEZE = syscall.Signal(0x22) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x29) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x16) + SIGIOT = syscall.Signal(0x6) + SIGJVM1 = syscall.Signal(0x27) + SIGJVM2 = syscall.Signal(0x28) + SIGKILL = syscall.Signal(0x9) + SIGLOST = syscall.Signal(0x25) + SIGLWP = syscall.Signal(0x21) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x16) + SIGPROF = syscall.Signal(0x1d) + SIGPWR = syscall.Signal(0x13) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x17) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHAW = syscall.Signal(0x23) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x18) + SIGTTIN = syscall.Signal(0x1a) + SIGTTOU = syscall.Signal(0x1b) + SIGURG = syscall.Signal(0x15) + SIGUSR1 = syscall.Signal(0x10) + SIGUSR2 = syscall.Signal(0x11) + SIGVTALRM = syscall.Signal(0x1c) + SIGWAITING = syscall.Signal(0x20) + SIGWINCH = syscall.Signal(0x14) + SIGXCPU = syscall.Signal(0x1e) + SIGXFSZ = syscall.Signal(0x1f) + SIGXRES = syscall.Signal(0x26) +) + +// Error table +var errors = [...]string{ + 1: "not owner", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "I/O error", + 6: "no such device or address", + 7: "arg list too long", + 8: "exec format error", + 9: "bad file number", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "not enough space", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "file table overflow", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "argument out of domain", + 34: "result too large", + 35: "no message of desired type", + 36: "identifier removed", + 37: "channel number out of range", + 38: "level 2 not synchronized", + 39: "level 3 halted", + 40: "level 3 reset", + 41: "link number out of range", + 42: "protocol driver not attached", + 43: "no CSI structure available", + 44: "level 2 halted", + 45: "deadlock situation detected/avoided", + 46: "no record locks available", + 47: "operation canceled", + 48: "operation not supported", + 49: "disc quota exceeded", + 50: "bad exchange descriptor", + 51: "bad request descriptor", + 52: "message tables full", + 53: "anode table overflow", + 54: "bad request code", + 55: "invalid slot", + 56: "file locking deadlock", + 57: "bad font file format", + 58: "owner of the lock died", + 59: "lock is not recoverable", + 60: "not a stream device", + 61: "no data available", + 62: "timer expired", + 63: "out of stream resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "locked lock was unmapped ", + 73: "facility is not active", + 74: "multihop attempted", + 77: "not a data message", + 78: "file name too long", + 79: "value too large for defined data type", + 80: "name not unique on network", + 81: "file descriptor in bad state", + 82: "remote address changed", + 83: "can not access a needed shared library", + 84: "accessing a corrupted shared library", + 85: ".lib section in a.out corrupted", + 86: "attempting to link in more shared libraries than system limit", + 87: "can not exec a shared library directly", + 88: "illegal byte sequence", + 89: "operation not applicable", + 90: "number of symbolic links encountered during path name traversal exceeds MAXSYMLINKS", + 91: "error 91", + 92: "error 92", + 93: "directory not empty", + 94: "too many users", + 95: "socket operation on non-socket", + 96: "destination address required", + 97: "message too long", + 98: "protocol wrong type for socket", + 99: "option not supported by protocol", + 120: "protocol not supported", + 121: "socket type not supported", + 122: "operation not supported on transport endpoint", + 123: "protocol family not supported", + 124: "address family not supported by protocol family", + 125: "address already in use", + 126: "cannot assign requested address", + 127: "network is down", + 128: "network is unreachable", + 129: "network dropped connection because of reset", + 130: "software caused connection abort", + 131: "connection reset by peer", + 132: "no buffer space available", + 133: "transport endpoint is already connected", + 134: "transport endpoint is not connected", + 143: "cannot send after socket shutdown", + 144: "too many references: cannot splice", + 145: "connection timed out", + 146: "connection refused", + 147: "host is down", + 148: "no route to host", + 149: "operation already in progress", + 150: "operation now in progress", + 151: "stale NFS file handle", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal Instruction", + 5: "trace/Breakpoint Trap", + 6: "abort", + 7: "emulation Trap", + 8: "arithmetic Exception", + 9: "killed", + 10: "bus Error", + 11: "segmentation Fault", + 12: "bad System Call", + 13: "broken Pipe", + 14: "alarm Clock", + 15: "terminated", + 16: "user Signal 1", + 17: "user Signal 2", + 18: "child Status Changed", + 19: "power-Fail/Restart", + 20: "window Size Change", + 21: "urgent Socket Condition", + 22: "pollable Event", + 23: "stopped (signal)", + 24: "stopped (user)", + 25: "continued", + 26: "stopped (tty input)", + 27: "stopped (tty output)", + 28: "virtual Timer Expired", + 29: "profiling Timer Expired", + 30: "cpu Limit Exceeded", + 31: "file Size Limit Exceeded", + 32: "no runnable lwp", + 33: "inter-lwp signal", + 34: "checkpoint Freeze", + 35: "checkpoint Thaw", + 36: "thread Cancellation", + 37: "resource Lost", + 38: "resource Control Exceeded", + 39: "reserved for JVM 1", + 40: "reserved for JVM 2", + 41: "information Request", +} diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace386_linux.go new file mode 100644 index 0000000..2d21c49 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace386_linux.go @@ -0,0 +1,80 @@ +// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. + +// +build linux +// +build 386 amd64 + +package unix + +import "unsafe" + +// PtraceRegs386 is the registers used by 386 binaries. +type PtraceRegs386 struct { + Ebx int32 + Ecx int32 + Edx int32 + Esi int32 + Edi int32 + Ebp int32 + Eax int32 + Xds int32 + Xes int32 + Xfs int32 + Xgs int32 + Orig_eax int32 + Eip int32 + Xcs int32 + Eflags int32 + Esp int32 + Xss int32 +} + +// PtraceGetRegs386 fetches the registers used by 386 binaries. +func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegs386 sets the registers used by 386 binaries. +func PtraceSetRegs386(pid int, regs *PtraceRegs386) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsAmd64 is the registers used by amd64 binaries. +type PtraceRegsAmd64 struct { + R15 uint64 + R14 uint64 + R13 uint64 + R12 uint64 + Rbp uint64 + Rbx uint64 + R11 uint64 + R10 uint64 + R9 uint64 + R8 uint64 + Rax uint64 + Rcx uint64 + Rdx uint64 + Rsi uint64 + Rdi uint64 + Orig_rax uint64 + Rip uint64 + Cs uint64 + Eflags uint64 + Rsp uint64 + Ss uint64 + Fs_base uint64 + Gs_base uint64 + Ds uint64 + Es uint64 + Fs uint64 + Gs uint64 +} + +// PtraceGetRegsAmd64 fetches the registers used by amd64 binaries. +func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsAmd64 sets the registers used by amd64 binaries. +func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptracearm_linux.go new file mode 100644 index 0000000..faf23bb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracearm_linux.go @@ -0,0 +1,41 @@ +// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. + +// +build linux +// +build arm arm64 + +package unix + +import "unsafe" + +// PtraceRegsArm is the registers used by arm binaries. +type PtraceRegsArm struct { + Uregs [18]uint32 +} + +// PtraceGetRegsArm fetches the registers used by arm binaries. +func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm sets the registers used by arm binaries. +func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsArm64 is the registers used by arm64 binaries. +type PtraceRegsArm64 struct { + Regs [31]uint64 + Sp uint64 + Pc uint64 + Pstate uint64 +} + +// PtraceGetRegsArm64 fetches the registers used by arm64 binaries. +func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm64 sets the registers used by arm64 binaries. +func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptracemips_linux.go new file mode 100644 index 0000000..c431131 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracemips_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. + +// +build linux +// +build mips mips64 + +package unix + +import "unsafe" + +// PtraceRegsMips is the registers used by mips binaries. +type PtraceRegsMips struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips fetches the registers used by mips binaries. +func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips sets the registers used by mips binaries. +func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64 is the registers used by mips64 binaries. +type PtraceRegsMips64 struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64 fetches the registers used by mips64 binaries. +func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64 sets the registers used by mips64 binaries. +func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go new file mode 100644 index 0000000..dc3d6d3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. + +// +build linux +// +build mipsle mips64le + +package unix + +import "unsafe" + +// PtraceRegsMipsle is the registers used by mipsle binaries. +type PtraceRegsMipsle struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMipsle fetches the registers used by mipsle binaries. +func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMipsle sets the registers used by mipsle binaries. +func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64le is the registers used by mips64le binaries. +type PtraceRegsMips64le struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64le fetches the registers used by mips64le binaries. +func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64le sets the registers used by mips64le binaries. +func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go new file mode 100644 index 0000000..4c9f727 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -0,0 +1,1635 @@ +// mksyscall.pl -l32 -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,386 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int32(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go new file mode 100644 index 0000000..2562377 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -0,0 +1,1635 @@ +// mksyscall.pl -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,amd64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int64(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go new file mode 100644 index 0000000..4ae787e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -0,0 +1,1635 @@ +// mksyscall.pl -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int32(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go new file mode 100644 index 0000000..14ed688 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -0,0 +1,1635 @@ +// mksyscall.pl -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kill(pid int, signum int, posix int) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exchangedata(path1 string, path2 string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path1) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(path2) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setprivexec(flag int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { + r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + sec = int64(r0) + usec = int32(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go new file mode 100644 index 0000000..0d3b26d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -0,0 +1,1493 @@ +// mksyscall.pl -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build dragonfly,amd64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func extpread(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go new file mode 100644 index 0000000..a86434a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -0,0 +1,1937 @@ +// mksyscall.pl -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build freebsd,386 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go new file mode 100644 index 0000000..040e2f7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -0,0 +1,1937 @@ +// mksyscall.pl -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build freebsd,amd64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go new file mode 100644 index 0000000..cddc5e8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -0,0 +1,1937 @@ +// mksyscall.pl -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build freebsd,arm + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go new file mode 100644 index 0000000..ef9602c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -0,0 +1,1994 @@ +// mksyscall.pl -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,386 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64_64, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN32, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE64, uintptr(fd), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID32, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID32, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID32, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID32, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN32, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go new file mode 100644 index 0000000..63054b3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -0,0 +1,2187 @@ +// mksyscall.pl -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,amd64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go new file mode 100644 index 0000000..8b10ee1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -0,0 +1,2096 @@ +// mksyscall.pl -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,arm + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN32, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID32, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID32, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID32, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID32, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN32, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE64, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go new file mode 100644 index 0000000..8f276d6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -0,0 +1,2044 @@ +// mksyscall.pl -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,arm64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go new file mode 100644 index 0000000..61169b3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -0,0 +1,2152 @@ +// mksyscall.pl -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,mips + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r0)<<32 | int64(r1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE64, uintptr(fd), 0, uintptr(length>>32), uintptr(length), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset>>32), uintptr(offset)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset>>32), uintptr(offset)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(int64(r0)<<32 | int64(r1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall9(SYS_SYNC_FILE_RANGE, uintptr(fd), 0, uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length>>32), uintptr(length), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go new file mode 100644 index 0000000..4cb59b4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -0,0 +1,2135 @@ +// mksyscall.pl -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,mips64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, st *stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, st *stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, st *stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go new file mode 100644 index 0000000..0b547ae --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -0,0 +1,2135 @@ +// mksyscall.pl -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,mips64le + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, st *stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, st *stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, st *stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go new file mode 100644 index 0000000..cd94d3a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -0,0 +1,2152 @@ +// mksyscall.pl -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,mipsle + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE64, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall9(SYS_SYNC_FILE_RANGE, uintptr(fd), 0, uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go new file mode 100644 index 0000000..cdad555 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -0,0 +1,2198 @@ +// mksyscall.pl -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,ppc64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go new file mode 100644 index 0000000..38f4e44 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -0,0 +1,2198 @@ +// mksyscall.pl -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,ppc64le + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go new file mode 100644 index 0000000..c443baf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -0,0 +1,1978 @@ +// mksyscall.pl -tags linux,s390x syscall_linux.go syscall_linux_s390x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,s390x + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go new file mode 100644 index 0000000..2dd9843 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -0,0 +1,1833 @@ +// mksyscall.pl -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,sparc64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + Syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go new file mode 100644 index 0000000..384b2c6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -0,0 +1,1399 @@ +// mksyscall.pl -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build netbsd,386 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (fd1 int, fd2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + fd1 = int(r0) + fd2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go new file mode 100644 index 0000000..98ad07f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -0,0 +1,1399 @@ +// mksyscall.pl -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build netbsd,amd64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (fd1 int, fd2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + fd1 = int(r0) + fd2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go new file mode 100644 index 0000000..d9f3d64 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -0,0 +1,1399 @@ +// mksyscall.pl -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build netbsd,arm + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe() (fd1 int, fd2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + fd1 = int(r0) + fd2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go new file mode 100644 index 0000000..91e38bd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -0,0 +1,1457 @@ +// mksyscall.pl -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build openbsd,386 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go new file mode 100644 index 0000000..eebf200 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -0,0 +1,1457 @@ +// mksyscall.pl -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build openbsd,amd64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go new file mode 100644 index 0000000..6a7fe67 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -0,0 +1,1457 @@ +// mksyscall.pl -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build openbsd,arm + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go new file mode 100644 index 0000000..5e88619 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -0,0 +1,1669 @@ +// mksyscall_solaris.pl -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build solaris,amd64 + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_pipe pipe "libc.so" +//go:cgo_import_dynamic libc_getsockname getsockname "libsocket.so" +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" +//go:cgo_import_dynamic libc_gethostname gethostname "libc.so" +//go:cgo_import_dynamic libc_utimes utimes "libc.so" +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" +//go:cgo_import_dynamic libc_futimesat futimesat "libc.so" +//go:cgo_import_dynamic libc_accept accept "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" +//go:cgo_import_dynamic libc_acct acct "libc.so" +//go:cgo_import_dynamic libc___makedev __makedev "libc.so" +//go:cgo_import_dynamic libc___major __major "libc.so" +//go:cgo_import_dynamic libc___minor __minor "libc.so" +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" +//go:cgo_import_dynamic libc_poll poll "libc.so" +//go:cgo_import_dynamic libc_access access "libc.so" +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" +//go:cgo_import_dynamic libc_chdir chdir "libc.so" +//go:cgo_import_dynamic libc_chmod chmod "libc.so" +//go:cgo_import_dynamic libc_chown chown "libc.so" +//go:cgo_import_dynamic libc_chroot chroot "libc.so" +//go:cgo_import_dynamic libc_close close "libc.so" +//go:cgo_import_dynamic libc_creat creat "libc.so" +//go:cgo_import_dynamic libc_dup dup "libc.so" +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" +//go:cgo_import_dynamic libc_exit exit "libc.so" +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" +//go:cgo_import_dynamic libc_fchown fchown "libc.so" +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" +//go:cgo_import_dynamic libc_fdatasync fdatasync "libc.so" +//go:cgo_import_dynamic libc_flock flock "libc.so" +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" +//go:cgo_import_dynamic libc_fstat fstat "libc.so" +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" +//go:cgo_import_dynamic libc_fstatvfs fstatvfs "libc.so" +//go:cgo_import_dynamic libc_getdents getdents "libc.so" +//go:cgo_import_dynamic libc_getgid getgid "libc.so" +//go:cgo_import_dynamic libc_getpid getpid "libc.so" +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" +//go:cgo_import_dynamic libc_getegid getegid "libc.so" +//go:cgo_import_dynamic libc_getppid getppid "libc.so" +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" +//go:cgo_import_dynamic libc_getuid getuid "libc.so" +//go:cgo_import_dynamic libc_kill kill "libc.so" +//go:cgo_import_dynamic libc_lchown lchown "libc.so" +//go:cgo_import_dynamic libc_link link "libc.so" +//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc_lstat lstat "libc.so" +//go:cgo_import_dynamic libc_madvise madvise "libc.so" +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" +//go:cgo_import_dynamic libc_mknod mknod "libc.so" +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" +//go:cgo_import_dynamic libc_mlock mlock "libc.so" +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" +//go:cgo_import_dynamic libc_msync msync "libc.so" +//go:cgo_import_dynamic libc_munlock munlock "libc.so" +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" +//go:cgo_import_dynamic libc_open open "libc.so" +//go:cgo_import_dynamic libc_openat openat "libc.so" +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" +//go:cgo_import_dynamic libc_pause pause "libc.so" +//go:cgo_import_dynamic libc_pread pread "libc.so" +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" +//go:cgo_import_dynamic libc_read read "libc.so" +//go:cgo_import_dynamic libc_readlink readlink "libc.so" +//go:cgo_import_dynamic libc_rename rename "libc.so" +//go:cgo_import_dynamic libc_renameat renameat "libc.so" +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" +//go:cgo_import_dynamic libc_lseek lseek "libc.so" +//go:cgo_import_dynamic libc_select select "libc.so" +//go:cgo_import_dynamic libc_setegid setegid "libc.so" +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" +//go:cgo_import_dynamic libc_setgid setgid "libc.so" +//go:cgo_import_dynamic libc_sethostname sethostname "libc.so" +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" +//go:cgo_import_dynamic libc_setregid setregid "libc.so" +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" +//go:cgo_import_dynamic libc_setsid setsid "libc.so" +//go:cgo_import_dynamic libc_setuid setuid "libc.so" +//go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" +//go:cgo_import_dynamic libc_stat stat "libc.so" +//go:cgo_import_dynamic libc_statvfs statvfs "libc.so" +//go:cgo_import_dynamic libc_symlink symlink "libc.so" +//go:cgo_import_dynamic libc_sync sync "libc.so" +//go:cgo_import_dynamic libc_times times "libc.so" +//go:cgo_import_dynamic libc_truncate truncate "libc.so" +//go:cgo_import_dynamic libc_fsync fsync "libc.so" +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" +//go:cgo_import_dynamic libc_umask umask "libc.so" +//go:cgo_import_dynamic libc_uname uname "libc.so" +//go:cgo_import_dynamic libc_umount umount "libc.so" +//go:cgo_import_dynamic libc_unlink unlink "libc.so" +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" +//go:cgo_import_dynamic libc_ustat ustat "libc.so" +//go:cgo_import_dynamic libc_utime utime "libc.so" +//go:cgo_import_dynamic libc___xnet_bind __xnet_bind "libsocket.so" +//go:cgo_import_dynamic libc___xnet_connect __xnet_connect "libsocket.so" +//go:cgo_import_dynamic libc_mmap mmap "libc.so" +//go:cgo_import_dynamic libc_munmap munmap "libc.so" +//go:cgo_import_dynamic libc___xnet_sendto __xnet_sendto "libsocket.so" +//go:cgo_import_dynamic libc___xnet_socket __xnet_socket "libsocket.so" +//go:cgo_import_dynamic libc___xnet_socketpair __xnet_socketpair "libsocket.so" +//go:cgo_import_dynamic libc_write write "libc.so" +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" + +//go:linkname procpipe libc_pipe +//go:linkname procgetsockname libc_getsockname +//go:linkname procGetcwd libc_getcwd +//go:linkname procgetgroups libc_getgroups +//go:linkname procsetgroups libc_setgroups +//go:linkname procwait4 libc_wait4 +//go:linkname procgethostname libc_gethostname +//go:linkname procutimes libc_utimes +//go:linkname procutimensat libc_utimensat +//go:linkname procfcntl libc_fcntl +//go:linkname procfutimesat libc_futimesat +//go:linkname procaccept libc_accept +//go:linkname proc__xnet_recvmsg libc___xnet_recvmsg +//go:linkname proc__xnet_sendmsg libc___xnet_sendmsg +//go:linkname procacct libc_acct +//go:linkname proc__makedev libc___makedev +//go:linkname proc__major libc___major +//go:linkname proc__minor libc___minor +//go:linkname procioctl libc_ioctl +//go:linkname procpoll libc_poll +//go:linkname procAccess libc_access +//go:linkname procAdjtime libc_adjtime +//go:linkname procChdir libc_chdir +//go:linkname procChmod libc_chmod +//go:linkname procChown libc_chown +//go:linkname procChroot libc_chroot +//go:linkname procClose libc_close +//go:linkname procCreat libc_creat +//go:linkname procDup libc_dup +//go:linkname procDup2 libc_dup2 +//go:linkname procExit libc_exit +//go:linkname procFchdir libc_fchdir +//go:linkname procFchmod libc_fchmod +//go:linkname procFchmodat libc_fchmodat +//go:linkname procFchown libc_fchown +//go:linkname procFchownat libc_fchownat +//go:linkname procFdatasync libc_fdatasync +//go:linkname procFlock libc_flock +//go:linkname procFpathconf libc_fpathconf +//go:linkname procFstat libc_fstat +//go:linkname procFstatat libc_fstatat +//go:linkname procFstatvfs libc_fstatvfs +//go:linkname procGetdents libc_getdents +//go:linkname procGetgid libc_getgid +//go:linkname procGetpid libc_getpid +//go:linkname procGetpgid libc_getpgid +//go:linkname procGetpgrp libc_getpgrp +//go:linkname procGeteuid libc_geteuid +//go:linkname procGetegid libc_getegid +//go:linkname procGetppid libc_getppid +//go:linkname procGetpriority libc_getpriority +//go:linkname procGetrlimit libc_getrlimit +//go:linkname procGetrusage libc_getrusage +//go:linkname procGettimeofday libc_gettimeofday +//go:linkname procGetuid libc_getuid +//go:linkname procKill libc_kill +//go:linkname procLchown libc_lchown +//go:linkname procLink libc_link +//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname procLstat libc_lstat +//go:linkname procMadvise libc_madvise +//go:linkname procMkdir libc_mkdir +//go:linkname procMkdirat libc_mkdirat +//go:linkname procMkfifo libc_mkfifo +//go:linkname procMkfifoat libc_mkfifoat +//go:linkname procMknod libc_mknod +//go:linkname procMknodat libc_mknodat +//go:linkname procMlock libc_mlock +//go:linkname procMlockall libc_mlockall +//go:linkname procMprotect libc_mprotect +//go:linkname procMsync libc_msync +//go:linkname procMunlock libc_munlock +//go:linkname procMunlockall libc_munlockall +//go:linkname procNanosleep libc_nanosleep +//go:linkname procOpen libc_open +//go:linkname procOpenat libc_openat +//go:linkname procPathconf libc_pathconf +//go:linkname procPause libc_pause +//go:linkname procPread libc_pread +//go:linkname procPwrite libc_pwrite +//go:linkname procread libc_read +//go:linkname procReadlink libc_readlink +//go:linkname procRename libc_rename +//go:linkname procRenameat libc_renameat +//go:linkname procRmdir libc_rmdir +//go:linkname proclseek libc_lseek +//go:linkname procSelect libc_select +//go:linkname procSetegid libc_setegid +//go:linkname procSeteuid libc_seteuid +//go:linkname procSetgid libc_setgid +//go:linkname procSethostname libc_sethostname +//go:linkname procSetpgid libc_setpgid +//go:linkname procSetpriority libc_setpriority +//go:linkname procSetregid libc_setregid +//go:linkname procSetreuid libc_setreuid +//go:linkname procSetrlimit libc_setrlimit +//go:linkname procSetsid libc_setsid +//go:linkname procSetuid libc_setuid +//go:linkname procshutdown libc_shutdown +//go:linkname procStat libc_stat +//go:linkname procStatvfs libc_statvfs +//go:linkname procSymlink libc_symlink +//go:linkname procSync libc_sync +//go:linkname procTimes libc_times +//go:linkname procTruncate libc_truncate +//go:linkname procFsync libc_fsync +//go:linkname procFtruncate libc_ftruncate +//go:linkname procUmask libc_umask +//go:linkname procUname libc_uname +//go:linkname procumount libc_umount +//go:linkname procUnlink libc_unlink +//go:linkname procUnlinkat libc_unlinkat +//go:linkname procUstat libc_ustat +//go:linkname procUtime libc_utime +//go:linkname proc__xnet_bind libc___xnet_bind +//go:linkname proc__xnet_connect libc___xnet_connect +//go:linkname procmmap libc_mmap +//go:linkname procmunmap libc_munmap +//go:linkname proc__xnet_sendto libc___xnet_sendto +//go:linkname proc__xnet_socket libc___xnet_socket +//go:linkname proc__xnet_socketpair libc___xnet_socketpair +//go:linkname procwrite libc_write +//go:linkname proc__xnet_getsockopt libc___xnet_getsockopt +//go:linkname procgetpeername libc_getpeername +//go:linkname procsetsockopt libc_setsockopt +//go:linkname procrecvfrom libc_recvfrom + +var ( + procpipe, + procgetsockname, + procGetcwd, + procgetgroups, + procsetgroups, + procwait4, + procgethostname, + procutimes, + procutimensat, + procfcntl, + procfutimesat, + procaccept, + proc__xnet_recvmsg, + proc__xnet_sendmsg, + procacct, + proc__makedev, + proc__major, + proc__minor, + procioctl, + procpoll, + procAccess, + procAdjtime, + procChdir, + procChmod, + procChown, + procChroot, + procClose, + procCreat, + procDup, + procDup2, + procExit, + procFchdir, + procFchmod, + procFchmodat, + procFchown, + procFchownat, + procFdatasync, + procFlock, + procFpathconf, + procFstat, + procFstatat, + procFstatvfs, + procGetdents, + procGetgid, + procGetpid, + procGetpgid, + procGetpgrp, + procGeteuid, + procGetegid, + procGetppid, + procGetpriority, + procGetrlimit, + procGetrusage, + procGettimeofday, + procGetuid, + procKill, + procLchown, + procLink, + proc__xnet_llisten, + procLstat, + procMadvise, + procMkdir, + procMkdirat, + procMkfifo, + procMkfifoat, + procMknod, + procMknodat, + procMlock, + procMlockall, + procMprotect, + procMsync, + procMunlock, + procMunlockall, + procNanosleep, + procOpen, + procOpenat, + procPathconf, + procPause, + procPread, + procPwrite, + procread, + procReadlink, + procRename, + procRenameat, + procRmdir, + proclseek, + procSelect, + procSetegid, + procSeteuid, + procSetgid, + procSethostname, + procSetpgid, + procSetpriority, + procSetregid, + procSetreuid, + procSetrlimit, + procSetsid, + procSetuid, + procshutdown, + procStat, + procStatvfs, + procSymlink, + procSync, + procTimes, + procTruncate, + procFsync, + procFtruncate, + procUmask, + procUname, + procumount, + procUnlink, + procUnlinkat, + procUstat, + procUtime, + proc__xnet_bind, + proc__xnet_connect, + procmmap, + procmunmap, + proc__xnet_sendto, + proc__xnet_socket, + proc__xnet_socketpair, + procwrite, + proc__xnet_getsockopt, + procgetpeername, + procsetsockopt, + procrecvfrom syscallFunc +) + +func pipe(p *[2]_C_int) (n int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getcwd(buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int32(r0) + if e1 != 0 { + err = e1 + } + return +} + +func gethostname(buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func acct(path *byte) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func __makedev(version int, major uint, minor uint) (val uint64) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__makedev)), 3, uintptr(version), uintptr(major), uintptr(minor), 0, 0, 0) + val = uint64(r0) + return +} + +func __major(version int, dev uint64) (val uint) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__major)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) + val = uint(r0) + return +} + +func __minor(version int, dev uint64) (val uint) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__minor)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) + val = uint(r0) + return +} + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Close(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Creat(path string, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) + nfd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Exit(code int) { + sysvicall6(uintptr(unsafe.Pointer(&procExit)), 1, uintptr(code), 0, 0, 0, 0, 0) + return +} + +func Fchdir(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fdatasync(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Flock(fd int, how int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Getgid() (gid int) { + r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetgid)), 0, 0, 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +func Getpid() (pid int) { + r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpid)), 0, 0, 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + pgid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Getpgrp() (pgid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) + pgid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Geteuid() (euid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGeteuid)), 0, 0, 0, 0, 0, 0, 0) + euid = int(r0) + return +} + +func Getegid() (egid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetegid)), 0, 0, 0, 0, 0, 0, 0) + egid = int(r0) + return +} + +func Getppid() (ppid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetppid)), 0, 0, 0, 0, 0, 0, 0) + ppid = int(r0) + return +} + +func Getpriority(which int, who int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Getuid() (uid int) { + r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetuid)), 0, 0, 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Listen(s int, backlog int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Madvise(b []byte, advice int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mlockall(flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Mprotect(b []byte, prot int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Msync(b []byte, flags int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Munlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Munlockall() (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Pause() (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func read(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + if len(buf) > 0 { + _p1 = &buf[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setegid(egid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Seteuid(euid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setgid(gid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Sethostname(p []byte) (err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Setsid() (pid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Setuid(uid int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Shutdown(s int, how int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Statvfs(path string, vfsstat *Statvfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStatvfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Sync() (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Fsync(fd int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Umask(mask int) (oldmask int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(mask), 0, 0, 0, 0, 0) + oldmask = int(r0) + return +} + +func Uname(buf *Utsname) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = e1 + } + return +} + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = e1 + } + return +} + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func write(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = e1 + } + return +} + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go new file mode 100644 index 0000000..83bb935 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go new file mode 100644 index 0000000..83bb935 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go new file mode 100644 index 0000000..83bb935 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go new file mode 100644 index 0000000..d1d36da --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go @@ -0,0 +1,436 @@ +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_MAXSYSCALL = 530 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go new file mode 100644 index 0000000..e35de41 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -0,0 +1,436 @@ +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_MAXSYSCALL = 530 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go new file mode 100644 index 0000000..f2df27d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go @@ -0,0 +1,436 @@ +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_MAXSYSCALL = 530 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go new file mode 100644 index 0000000..9694630 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -0,0 +1,436 @@ +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,darwin + +package unix + +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_MAXSYSCALL = 530 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go new file mode 100644 index 0000000..b2c9ef8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -0,0 +1,315 @@ +// mksysnum_dragonfly.pl +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,dragonfly + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void exit(int rval); } + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, \ + SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } + SYS_ACCESS = 33 // { int access(char *path, int flags); } + SYS_CHFLAGS = 34 // { int chflags(char *path, int flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, int flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { pid_t vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(int from, int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_GETDOMAINNAME = 162 // { int getdomainname(char *domainname, int len); } + SYS_SETDOMAINNAME = 163 // { int setdomainname(char *domainname, int len); } + SYS_UNAME = 164 // { int uname(struct utsname *name); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, \ + SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, \ + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, \ + // SYS_NOSYS = 198; // { int nosys(void); } __syscall __syscall_args int + SYS_LSEEK = 199 // { off_t lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int truncate(char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS___SEMCTL = 220 // { int __semctl(int semid, int semnum, int cmd, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_MSGCTL = 224 // { int msgctl(int msqid, int cmd, \ + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { caddr_t shmat(int shmid, const void *shmaddr, \ + SYS_SHMCTL = 229 // { int shmctl(int shmid, int cmd, \ + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, struct iovec *iovp, \ + SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, struct iovec *iovp,\ + SYS_FHSTATFS = 297 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_AIO_READ = 318 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 319 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 320 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(u_char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGACTION = 342 // { int sigaction(int sig, const struct sigaction *act, \ + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGRETURN = 344 // { int sigreturn(ucontext_t *sigcntxp); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set,\ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set,\ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { int extattr_set_file(const char *path, \ + SYS_EXTATTR_GET_FILE = 357 // { int extattr_get_file(const char *path, \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(char *path, int flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, \ + SYS_VARSYM_SET = 450 // { int varsym_set(int level, const char *name, const char *data); } + SYS_VARSYM_GET = 451 // { int varsym_get(int mask, const char *wild, char *buf, int bufsize); } + SYS_VARSYM_LIST = 452 // { int varsym_list(int level, char *buf, int maxsize, int *marker); } + SYS_EXEC_SYS_REGISTER = 465 // { int exec_sys_register(void *entry); } + SYS_EXEC_SYS_UNREGISTER = 466 // { int exec_sys_unregister(int id); } + SYS_SYS_CHECKPOINT = 467 // { int sys_checkpoint(int type, int fd, pid_t pid, int retval); } + SYS_MOUNTCTL = 468 // { int mountctl(const char *path, int op, int fd, const void *ctl, int ctllen, void *buf, int buflen); } + SYS_UMTX_SLEEP = 469 // { int umtx_sleep(volatile const int *ptr, int value, int timeout); } + SYS_UMTX_WAKEUP = 470 // { int umtx_wakeup(volatile const int *ptr, int count); } + SYS_JAIL_ATTACH = 471 // { int jail_attach(int jid); } + SYS_SET_TLS_AREA = 472 // { int set_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_GET_TLS_AREA = 473 // { int get_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_CLOSEFROM = 474 // { int closefrom(int fd); } + SYS_STAT = 475 // { int stat(const char *path, struct stat *ub); } + SYS_FSTAT = 476 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 477 // { int lstat(const char *path, struct stat *ub); } + SYS_FHSTAT = 478 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 479 // { int getdirentries(int fd, char *buf, u_int count, \ + SYS_GETDENTS = 480 // { int getdents(int fd, char *buf, size_t count); } + SYS_USCHED_SET = 481 // { int usched_set(pid_t pid, int cmd, void *data, \ + SYS_EXTACCEPT = 482 // { int extaccept(int s, int flags, caddr_t name, int *anamelen); } + SYS_EXTCONNECT = 483 // { int extconnect(int s, int flags, caddr_t name, int namelen); } + SYS_MCONTROL = 485 // { int mcontrol(void *addr, size_t len, int behav, off_t value); } + SYS_VMSPACE_CREATE = 486 // { int vmspace_create(void *id, int type, void *data); } + SYS_VMSPACE_DESTROY = 487 // { int vmspace_destroy(void *id); } + SYS_VMSPACE_CTL = 488 // { int vmspace_ctl(void *id, int cmd, \ + SYS_VMSPACE_MMAP = 489 // { int vmspace_mmap(void *id, void *addr, size_t len, \ + SYS_VMSPACE_MUNMAP = 490 // { int vmspace_munmap(void *id, void *addr, \ + SYS_VMSPACE_MCONTROL = 491 // { int vmspace_mcontrol(void *id, void *addr, \ + SYS_VMSPACE_PREAD = 492 // { ssize_t vmspace_pread(void *id, void *buf, \ + SYS_VMSPACE_PWRITE = 493 // { ssize_t vmspace_pwrite(void *id, const void *buf, \ + SYS_EXTEXIT = 494 // { void extexit(int how, int status, void *addr); } + SYS_LWP_CREATE = 495 // { int lwp_create(struct lwp_params *params); } + SYS_LWP_GETTID = 496 // { lwpid_t lwp_gettid(void); } + SYS_LWP_KILL = 497 // { int lwp_kill(pid_t pid, lwpid_t tid, int signum); } + SYS_LWP_RTPRIO = 498 // { int lwp_rtprio(int function, pid_t pid, lwpid_t tid, struct rtprio *rtp); } + SYS_PSELECT = 499 // { int pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_STATVFS = 500 // { int statvfs(const char *path, struct statvfs *buf); } + SYS_FSTATVFS = 501 // { int fstatvfs(int fd, struct statvfs *buf); } + SYS_FHSTATVFS = 502 // { int fhstatvfs(const struct fhandle *u_fhp, struct statvfs *buf); } + SYS_GETVFSSTAT = 503 // { int getvfsstat(struct statfs *buf, \ + SYS_OPENAT = 504 // { int openat(int fd, char *path, int flags, int mode); } + SYS_FSTATAT = 505 // { int fstatat(int fd, char *path, \ + SYS_FCHMODAT = 506 // { int fchmodat(int fd, char *path, int mode, \ + SYS_FCHOWNAT = 507 // { int fchownat(int fd, char *path, int uid, int gid, \ + SYS_UNLINKAT = 508 // { int unlinkat(int fd, char *path, int flags); } + SYS_FACCESSAT = 509 // { int faccessat(int fd, char *path, int amode, \ + SYS_MQ_OPEN = 510 // { mqd_t mq_open(const char * name, int oflag, \ + SYS_MQ_CLOSE = 511 // { int mq_close(mqd_t mqdes); } + SYS_MQ_UNLINK = 512 // { int mq_unlink(const char *name); } + SYS_MQ_GETATTR = 513 // { int mq_getattr(mqd_t mqdes, \ + SYS_MQ_SETATTR = 514 // { int mq_setattr(mqd_t mqdes, \ + SYS_MQ_NOTIFY = 515 // { int mq_notify(mqd_t mqdes, \ + SYS_MQ_SEND = 516 // { int mq_send(mqd_t mqdes, const char *msg_ptr, \ + SYS_MQ_RECEIVE = 517 // { ssize_t mq_receive(mqd_t mqdes, char *msg_ptr, \ + SYS_MQ_TIMEDSEND = 518 // { int mq_timedsend(mqd_t mqdes, \ + SYS_MQ_TIMEDRECEIVE = 519 // { ssize_t mq_timedreceive(mqd_t mqdes, \ + SYS_IOPRIO_SET = 520 // { int ioprio_set(int which, int who, int prio); } + SYS_IOPRIO_GET = 521 // { int ioprio_get(int which, int who); } + SYS_CHROOT_KERNEL = 522 // { int chroot_kernel(char *path); } + SYS_RENAMEAT = 523 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_MKDIRAT = 524 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 525 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 526 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_READLINKAT = 527 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_SYMLINKAT = 528 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_SWAPOFF = 529 // { int swapoff(char *name); } + SYS_VQUOTACTL = 530 // { int vquotactl(const char *path, \ + SYS_LINKAT = 531 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_EACCESS = 532 // { int eaccess(char *path, int flags); } + SYS_LPATHCONF = 533 // { int lpathconf(char *path, int name); } + SYS_VMM_GUEST_CTL = 534 // { int vmm_guest_ctl(int op, struct vmm_guest_options *options); } + SYS_VMM_GUEST_SYNC_ADDR = 535 // { int vmm_guest_sync_addr(long *dstaddr, long *srcaddr); } + SYS_PROCCTL = 536 // { int procctl(idtype_t idtype, id_t id, int cmd, void *data); } + SYS_CHFLAGSAT = 537 // { int chflagsat(int fd, const char *path, int flags, int atflags);} + SYS_PIPE2 = 538 // { int pipe2(int *fildes, int flags); } + SYS_UTIMENSAT = 539 // { int utimensat(int fd, const char *path, const struct timespec *ts, int flags); } + SYS_FUTIMENS = 540 // { int futimens(int fd, const struct timespec *ts); } + SYS_ACCEPT4 = 541 // { int accept4(int s, caddr_t name, int *anamelen, int flags); } + SYS_LWP_SETNAME = 542 // { int lwp_setname(lwpid_t tid, const char *name); } + SYS_PPOLL = 543 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_LWP_SETAFFINITY = 544 // { int lwp_setaffinity(pid_t pid, lwpid_t tid, const cpumask_t *mask); } + SYS_LWP_GETAFFINITY = 545 // { int lwp_getaffinity(pid_t pid, lwpid_t tid, cpumask_t *mask); } + SYS_LWP_CREATE2 = 546 // { int lwp_create2(struct lwp_params *params, const cpumask_t *mask); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go new file mode 100644 index 0000000..b64a812 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -0,0 +1,353 @@ +// mksysnum_freebsd.pl +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_FUTIMENS = 546 // { int futimens(int fd, \ + SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go new file mode 100644 index 0000000..81722ac --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -0,0 +1,353 @@ +// mksysnum_freebsd.pl +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_FUTIMENS = 546 // { int futimens(int fd, \ + SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go new file mode 100644 index 0000000..4488314 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -0,0 +1,353 @@ +// mksysnum_freebsd.pl +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_FUTIMENS = 546 // { int futimens(int fd, \ + SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go new file mode 100644 index 0000000..95ab129 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -0,0 +1,390 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86OLD = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_VM86 = 166 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_SET_THREAD_AREA = 243 + SYS_GET_THREAD_AREA = 244 + SYS_IO_SETUP = 245 + SYS_IO_DESTROY = 246 + SYS_IO_GETEVENTS = 247 + SYS_IO_SUBMIT = 248 + SYS_IO_CANCEL = 249 + SYS_FADVISE64 = 250 + SYS_EXIT_GROUP = 252 + SYS_LOOKUP_DCOOKIE = 253 + SYS_EPOLL_CREATE = 254 + SYS_EPOLL_CTL = 255 + SYS_EPOLL_WAIT = 256 + SYS_REMAP_FILE_PAGES = 257 + SYS_SET_TID_ADDRESS = 258 + SYS_TIMER_CREATE = 259 + SYS_TIMER_SETTIME = 260 + SYS_TIMER_GETTIME = 261 + SYS_TIMER_GETOVERRUN = 262 + SYS_TIMER_DELETE = 263 + SYS_CLOCK_SETTIME = 264 + SYS_CLOCK_GETTIME = 265 + SYS_CLOCK_GETRES = 266 + SYS_CLOCK_NANOSLEEP = 267 + SYS_STATFS64 = 268 + SYS_FSTATFS64 = 269 + SYS_TGKILL = 270 + SYS_UTIMES = 271 + SYS_FADVISE64_64 = 272 + SYS_VSERVER = 273 + SYS_MBIND = 274 + SYS_GET_MEMPOLICY = 275 + SYS_SET_MEMPOLICY = 276 + SYS_MQ_OPEN = 277 + SYS_MQ_UNLINK = 278 + SYS_MQ_TIMEDSEND = 279 + SYS_MQ_TIMEDRECEIVE = 280 + SYS_MQ_NOTIFY = 281 + SYS_MQ_GETSETATTR = 282 + SYS_KEXEC_LOAD = 283 + SYS_WAITID = 284 + SYS_ADD_KEY = 286 + SYS_REQUEST_KEY = 287 + SYS_KEYCTL = 288 + SYS_IOPRIO_SET = 289 + SYS_IOPRIO_GET = 290 + SYS_INOTIFY_INIT = 291 + SYS_INOTIFY_ADD_WATCH = 292 + SYS_INOTIFY_RM_WATCH = 293 + SYS_MIGRATE_PAGES = 294 + SYS_OPENAT = 295 + SYS_MKDIRAT = 296 + SYS_MKNODAT = 297 + SYS_FCHOWNAT = 298 + SYS_FUTIMESAT = 299 + SYS_FSTATAT64 = 300 + SYS_UNLINKAT = 301 + SYS_RENAMEAT = 302 + SYS_LINKAT = 303 + SYS_SYMLINKAT = 304 + SYS_READLINKAT = 305 + SYS_FCHMODAT = 306 + SYS_FACCESSAT = 307 + SYS_PSELECT6 = 308 + SYS_PPOLL = 309 + SYS_UNSHARE = 310 + SYS_SET_ROBUST_LIST = 311 + SYS_GET_ROBUST_LIST = 312 + SYS_SPLICE = 313 + SYS_SYNC_FILE_RANGE = 314 + SYS_TEE = 315 + SYS_VMSPLICE = 316 + SYS_MOVE_PAGES = 317 + SYS_GETCPU = 318 + SYS_EPOLL_PWAIT = 319 + SYS_UTIMENSAT = 320 + SYS_SIGNALFD = 321 + SYS_TIMERFD_CREATE = 322 + SYS_EVENTFD = 323 + SYS_FALLOCATE = 324 + SYS_TIMERFD_SETTIME = 325 + SYS_TIMERFD_GETTIME = 326 + SYS_SIGNALFD4 = 327 + SYS_EVENTFD2 = 328 + SYS_EPOLL_CREATE1 = 329 + SYS_DUP3 = 330 + SYS_PIPE2 = 331 + SYS_INOTIFY_INIT1 = 332 + SYS_PREADV = 333 + SYS_PWRITEV = 334 + SYS_RT_TGSIGQUEUEINFO = 335 + SYS_PERF_EVENT_OPEN = 336 + SYS_RECVMMSG = 337 + SYS_FANOTIFY_INIT = 338 + SYS_FANOTIFY_MARK = 339 + SYS_PRLIMIT64 = 340 + SYS_NAME_TO_HANDLE_AT = 341 + SYS_OPEN_BY_HANDLE_AT = 342 + SYS_CLOCK_ADJTIME = 343 + SYS_SYNCFS = 344 + SYS_SENDMMSG = 345 + SYS_SETNS = 346 + SYS_PROCESS_VM_READV = 347 + SYS_PROCESS_VM_WRITEV = 348 + SYS_KCMP = 349 + SYS_FINIT_MODULE = 350 + SYS_SCHED_SETATTR = 351 + SYS_SCHED_GETATTR = 352 + SYS_RENAMEAT2 = 353 + SYS_SECCOMP = 354 + SYS_GETRANDOM = 355 + SYS_MEMFD_CREATE = 356 + SYS_BPF = 357 + SYS_EXECVEAT = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_USERFAULTFD = 374 + SYS_MEMBARRIER = 375 + SYS_MLOCK2 = 376 + SYS_COPY_FILE_RANGE = 377 + SYS_PREADV2 = 378 + SYS_PWRITEV2 = 379 + SYS_PKEY_MPROTECT = 380 + SYS_PKEY_ALLOC = 381 + SYS_PKEY_FREE = 382 + SYS_STATX = 383 + SYS_ARCH_PRCTL = 384 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go new file mode 100644 index 0000000..c5dabf2 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -0,0 +1,342 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,linux + +package unix + +const ( + SYS_READ = 0 + SYS_WRITE = 1 + SYS_OPEN = 2 + SYS_CLOSE = 3 + SYS_STAT = 4 + SYS_FSTAT = 5 + SYS_LSTAT = 6 + SYS_POLL = 7 + SYS_LSEEK = 8 + SYS_MMAP = 9 + SYS_MPROTECT = 10 + SYS_MUNMAP = 11 + SYS_BRK = 12 + SYS_RT_SIGACTION = 13 + SYS_RT_SIGPROCMASK = 14 + SYS_RT_SIGRETURN = 15 + SYS_IOCTL = 16 + SYS_PREAD64 = 17 + SYS_PWRITE64 = 18 + SYS_READV = 19 + SYS_WRITEV = 20 + SYS_ACCESS = 21 + SYS_PIPE = 22 + SYS_SELECT = 23 + SYS_SCHED_YIELD = 24 + SYS_MREMAP = 25 + SYS_MSYNC = 26 + SYS_MINCORE = 27 + SYS_MADVISE = 28 + SYS_SHMGET = 29 + SYS_SHMAT = 30 + SYS_SHMCTL = 31 + SYS_DUP = 32 + SYS_DUP2 = 33 + SYS_PAUSE = 34 + SYS_NANOSLEEP = 35 + SYS_GETITIMER = 36 + SYS_ALARM = 37 + SYS_SETITIMER = 38 + SYS_GETPID = 39 + SYS_SENDFILE = 40 + SYS_SOCKET = 41 + SYS_CONNECT = 42 + SYS_ACCEPT = 43 + SYS_SENDTO = 44 + SYS_RECVFROM = 45 + SYS_SENDMSG = 46 + SYS_RECVMSG = 47 + SYS_SHUTDOWN = 48 + SYS_BIND = 49 + SYS_LISTEN = 50 + SYS_GETSOCKNAME = 51 + SYS_GETPEERNAME = 52 + SYS_SOCKETPAIR = 53 + SYS_SETSOCKOPT = 54 + SYS_GETSOCKOPT = 55 + SYS_CLONE = 56 + SYS_FORK = 57 + SYS_VFORK = 58 + SYS_EXECVE = 59 + SYS_EXIT = 60 + SYS_WAIT4 = 61 + SYS_KILL = 62 + SYS_UNAME = 63 + SYS_SEMGET = 64 + SYS_SEMOP = 65 + SYS_SEMCTL = 66 + SYS_SHMDT = 67 + SYS_MSGGET = 68 + SYS_MSGSND = 69 + SYS_MSGRCV = 70 + SYS_MSGCTL = 71 + SYS_FCNTL = 72 + SYS_FLOCK = 73 + SYS_FSYNC = 74 + SYS_FDATASYNC = 75 + SYS_TRUNCATE = 76 + SYS_FTRUNCATE = 77 + SYS_GETDENTS = 78 + SYS_GETCWD = 79 + SYS_CHDIR = 80 + SYS_FCHDIR = 81 + SYS_RENAME = 82 + SYS_MKDIR = 83 + SYS_RMDIR = 84 + SYS_CREAT = 85 + SYS_LINK = 86 + SYS_UNLINK = 87 + SYS_SYMLINK = 88 + SYS_READLINK = 89 + SYS_CHMOD = 90 + SYS_FCHMOD = 91 + SYS_CHOWN = 92 + SYS_FCHOWN = 93 + SYS_LCHOWN = 94 + SYS_UMASK = 95 + SYS_GETTIMEOFDAY = 96 + SYS_GETRLIMIT = 97 + SYS_GETRUSAGE = 98 + SYS_SYSINFO = 99 + SYS_TIMES = 100 + SYS_PTRACE = 101 + SYS_GETUID = 102 + SYS_SYSLOG = 103 + SYS_GETGID = 104 + SYS_SETUID = 105 + SYS_SETGID = 106 + SYS_GETEUID = 107 + SYS_GETEGID = 108 + SYS_SETPGID = 109 + SYS_GETPPID = 110 + SYS_GETPGRP = 111 + SYS_SETSID = 112 + SYS_SETREUID = 113 + SYS_SETREGID = 114 + SYS_GETGROUPS = 115 + SYS_SETGROUPS = 116 + SYS_SETRESUID = 117 + SYS_GETRESUID = 118 + SYS_SETRESGID = 119 + SYS_GETRESGID = 120 + SYS_GETPGID = 121 + SYS_SETFSUID = 122 + SYS_SETFSGID = 123 + SYS_GETSID = 124 + SYS_CAPGET = 125 + SYS_CAPSET = 126 + SYS_RT_SIGPENDING = 127 + SYS_RT_SIGTIMEDWAIT = 128 + SYS_RT_SIGQUEUEINFO = 129 + SYS_RT_SIGSUSPEND = 130 + SYS_SIGALTSTACK = 131 + SYS_UTIME = 132 + SYS_MKNOD = 133 + SYS_USELIB = 134 + SYS_PERSONALITY = 135 + SYS_USTAT = 136 + SYS_STATFS = 137 + SYS_FSTATFS = 138 + SYS_SYSFS = 139 + SYS_GETPRIORITY = 140 + SYS_SETPRIORITY = 141 + SYS_SCHED_SETPARAM = 142 + SYS_SCHED_GETPARAM = 143 + SYS_SCHED_SETSCHEDULER = 144 + SYS_SCHED_GETSCHEDULER = 145 + SYS_SCHED_GET_PRIORITY_MAX = 146 + SYS_SCHED_GET_PRIORITY_MIN = 147 + SYS_SCHED_RR_GET_INTERVAL = 148 + SYS_MLOCK = 149 + SYS_MUNLOCK = 150 + SYS_MLOCKALL = 151 + SYS_MUNLOCKALL = 152 + SYS_VHANGUP = 153 + SYS_MODIFY_LDT = 154 + SYS_PIVOT_ROOT = 155 + SYS__SYSCTL = 156 + SYS_PRCTL = 157 + SYS_ARCH_PRCTL = 158 + SYS_ADJTIMEX = 159 + SYS_SETRLIMIT = 160 + SYS_CHROOT = 161 + SYS_SYNC = 162 + SYS_ACCT = 163 + SYS_SETTIMEOFDAY = 164 + SYS_MOUNT = 165 + SYS_UMOUNT2 = 166 + SYS_SWAPON = 167 + SYS_SWAPOFF = 168 + SYS_REBOOT = 169 + SYS_SETHOSTNAME = 170 + SYS_SETDOMAINNAME = 171 + SYS_IOPL = 172 + SYS_IOPERM = 173 + SYS_CREATE_MODULE = 174 + SYS_INIT_MODULE = 175 + SYS_DELETE_MODULE = 176 + SYS_GET_KERNEL_SYMS = 177 + SYS_QUERY_MODULE = 178 + SYS_QUOTACTL = 179 + SYS_NFSSERVCTL = 180 + SYS_GETPMSG = 181 + SYS_PUTPMSG = 182 + SYS_AFS_SYSCALL = 183 + SYS_TUXCALL = 184 + SYS_SECURITY = 185 + SYS_GETTID = 186 + SYS_READAHEAD = 187 + SYS_SETXATTR = 188 + SYS_LSETXATTR = 189 + SYS_FSETXATTR = 190 + SYS_GETXATTR = 191 + SYS_LGETXATTR = 192 + SYS_FGETXATTR = 193 + SYS_LISTXATTR = 194 + SYS_LLISTXATTR = 195 + SYS_FLISTXATTR = 196 + SYS_REMOVEXATTR = 197 + SYS_LREMOVEXATTR = 198 + SYS_FREMOVEXATTR = 199 + SYS_TKILL = 200 + SYS_TIME = 201 + SYS_FUTEX = 202 + SYS_SCHED_SETAFFINITY = 203 + SYS_SCHED_GETAFFINITY = 204 + SYS_SET_THREAD_AREA = 205 + SYS_IO_SETUP = 206 + SYS_IO_DESTROY = 207 + SYS_IO_GETEVENTS = 208 + SYS_IO_SUBMIT = 209 + SYS_IO_CANCEL = 210 + SYS_GET_THREAD_AREA = 211 + SYS_LOOKUP_DCOOKIE = 212 + SYS_EPOLL_CREATE = 213 + SYS_EPOLL_CTL_OLD = 214 + SYS_EPOLL_WAIT_OLD = 215 + SYS_REMAP_FILE_PAGES = 216 + SYS_GETDENTS64 = 217 + SYS_SET_TID_ADDRESS = 218 + SYS_RESTART_SYSCALL = 219 + SYS_SEMTIMEDOP = 220 + SYS_FADVISE64 = 221 + SYS_TIMER_CREATE = 222 + SYS_TIMER_SETTIME = 223 + SYS_TIMER_GETTIME = 224 + SYS_TIMER_GETOVERRUN = 225 + SYS_TIMER_DELETE = 226 + SYS_CLOCK_SETTIME = 227 + SYS_CLOCK_GETTIME = 228 + SYS_CLOCK_GETRES = 229 + SYS_CLOCK_NANOSLEEP = 230 + SYS_EXIT_GROUP = 231 + SYS_EPOLL_WAIT = 232 + SYS_EPOLL_CTL = 233 + SYS_TGKILL = 234 + SYS_UTIMES = 235 + SYS_VSERVER = 236 + SYS_MBIND = 237 + SYS_SET_MEMPOLICY = 238 + SYS_GET_MEMPOLICY = 239 + SYS_MQ_OPEN = 240 + SYS_MQ_UNLINK = 241 + SYS_MQ_TIMEDSEND = 242 + SYS_MQ_TIMEDRECEIVE = 243 + SYS_MQ_NOTIFY = 244 + SYS_MQ_GETSETATTR = 245 + SYS_KEXEC_LOAD = 246 + SYS_WAITID = 247 + SYS_ADD_KEY = 248 + SYS_REQUEST_KEY = 249 + SYS_KEYCTL = 250 + SYS_IOPRIO_SET = 251 + SYS_IOPRIO_GET = 252 + SYS_INOTIFY_INIT = 253 + SYS_INOTIFY_ADD_WATCH = 254 + SYS_INOTIFY_RM_WATCH = 255 + SYS_MIGRATE_PAGES = 256 + SYS_OPENAT = 257 + SYS_MKDIRAT = 258 + SYS_MKNODAT = 259 + SYS_FCHOWNAT = 260 + SYS_FUTIMESAT = 261 + SYS_NEWFSTATAT = 262 + SYS_UNLINKAT = 263 + SYS_RENAMEAT = 264 + SYS_LINKAT = 265 + SYS_SYMLINKAT = 266 + SYS_READLINKAT = 267 + SYS_FCHMODAT = 268 + SYS_FACCESSAT = 269 + SYS_PSELECT6 = 270 + SYS_PPOLL = 271 + SYS_UNSHARE = 272 + SYS_SET_ROBUST_LIST = 273 + SYS_GET_ROBUST_LIST = 274 + SYS_SPLICE = 275 + SYS_TEE = 276 + SYS_SYNC_FILE_RANGE = 277 + SYS_VMSPLICE = 278 + SYS_MOVE_PAGES = 279 + SYS_UTIMENSAT = 280 + SYS_EPOLL_PWAIT = 281 + SYS_SIGNALFD = 282 + SYS_TIMERFD_CREATE = 283 + SYS_EVENTFD = 284 + SYS_FALLOCATE = 285 + SYS_TIMERFD_SETTIME = 286 + SYS_TIMERFD_GETTIME = 287 + SYS_ACCEPT4 = 288 + SYS_SIGNALFD4 = 289 + SYS_EVENTFD2 = 290 + SYS_EPOLL_CREATE1 = 291 + SYS_DUP3 = 292 + SYS_PIPE2 = 293 + SYS_INOTIFY_INIT1 = 294 + SYS_PREADV = 295 + SYS_PWRITEV = 296 + SYS_RT_TGSIGQUEUEINFO = 297 + SYS_PERF_EVENT_OPEN = 298 + SYS_RECVMMSG = 299 + SYS_FANOTIFY_INIT = 300 + SYS_FANOTIFY_MARK = 301 + SYS_PRLIMIT64 = 302 + SYS_NAME_TO_HANDLE_AT = 303 + SYS_OPEN_BY_HANDLE_AT = 304 + SYS_CLOCK_ADJTIME = 305 + SYS_SYNCFS = 306 + SYS_SENDMMSG = 307 + SYS_SETNS = 308 + SYS_GETCPU = 309 + SYS_PROCESS_VM_READV = 310 + SYS_PROCESS_VM_WRITEV = 311 + SYS_KCMP = 312 + SYS_FINIT_MODULE = 313 + SYS_SCHED_SETATTR = 314 + SYS_SCHED_GETATTR = 315 + SYS_RENAMEAT2 = 316 + SYS_SECCOMP = 317 + SYS_GETRANDOM = 318 + SYS_MEMFD_CREATE = 319 + SYS_KEXEC_FILE_LOAD = 320 + SYS_BPF = 321 + SYS_EXECVEAT = 322 + SYS_USERFAULTFD = 323 + SYS_MEMBARRIER = 324 + SYS_MLOCK2 = 325 + SYS_COPY_FILE_RANGE = 326 + SYS_PREADV2 = 327 + SYS_PWRITEV2 = 328 + SYS_PKEY_MPROTECT = 329 + SYS_PKEY_ALLOC = 330 + SYS_PKEY_FREE = 331 + SYS_STATX = 332 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go new file mode 100644 index 0000000..ab7fa5f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -0,0 +1,362 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_PTRACE = 26 + SYS_PAUSE = 29 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_VHANGUP = 111 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_GETDENTS64 = 217 + SYS_PIVOT_ROOT = 218 + SYS_MINCORE = 219 + SYS_MADVISE = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_LOOKUP_DCOOKIE = 249 + SYS_EPOLL_CREATE = 250 + SYS_EPOLL_CTL = 251 + SYS_EPOLL_WAIT = 252 + SYS_REMAP_FILE_PAGES = 253 + SYS_SET_TID_ADDRESS = 256 + SYS_TIMER_CREATE = 257 + SYS_TIMER_SETTIME = 258 + SYS_TIMER_GETTIME = 259 + SYS_TIMER_GETOVERRUN = 260 + SYS_TIMER_DELETE = 261 + SYS_CLOCK_SETTIME = 262 + SYS_CLOCK_GETTIME = 263 + SYS_CLOCK_GETRES = 264 + SYS_CLOCK_NANOSLEEP = 265 + SYS_STATFS64 = 266 + SYS_FSTATFS64 = 267 + SYS_TGKILL = 268 + SYS_UTIMES = 269 + SYS_ARM_FADVISE64_64 = 270 + SYS_PCICONFIG_IOBASE = 271 + SYS_PCICONFIG_READ = 272 + SYS_PCICONFIG_WRITE = 273 + SYS_MQ_OPEN = 274 + SYS_MQ_UNLINK = 275 + SYS_MQ_TIMEDSEND = 276 + SYS_MQ_TIMEDRECEIVE = 277 + SYS_MQ_NOTIFY = 278 + SYS_MQ_GETSETATTR = 279 + SYS_WAITID = 280 + SYS_SOCKET = 281 + SYS_BIND = 282 + SYS_CONNECT = 283 + SYS_LISTEN = 284 + SYS_ACCEPT = 285 + SYS_GETSOCKNAME = 286 + SYS_GETPEERNAME = 287 + SYS_SOCKETPAIR = 288 + SYS_SEND = 289 + SYS_SENDTO = 290 + SYS_RECV = 291 + SYS_RECVFROM = 292 + SYS_SHUTDOWN = 293 + SYS_SETSOCKOPT = 294 + SYS_GETSOCKOPT = 295 + SYS_SENDMSG = 296 + SYS_RECVMSG = 297 + SYS_SEMOP = 298 + SYS_SEMGET = 299 + SYS_SEMCTL = 300 + SYS_MSGSND = 301 + SYS_MSGRCV = 302 + SYS_MSGGET = 303 + SYS_MSGCTL = 304 + SYS_SHMAT = 305 + SYS_SHMDT = 306 + SYS_SHMGET = 307 + SYS_SHMCTL = 308 + SYS_ADD_KEY = 309 + SYS_REQUEST_KEY = 310 + SYS_KEYCTL = 311 + SYS_SEMTIMEDOP = 312 + SYS_VSERVER = 313 + SYS_IOPRIO_SET = 314 + SYS_IOPRIO_GET = 315 + SYS_INOTIFY_INIT = 316 + SYS_INOTIFY_ADD_WATCH = 317 + SYS_INOTIFY_RM_WATCH = 318 + SYS_MBIND = 319 + SYS_GET_MEMPOLICY = 320 + SYS_SET_MEMPOLICY = 321 + SYS_OPENAT = 322 + SYS_MKDIRAT = 323 + SYS_MKNODAT = 324 + SYS_FCHOWNAT = 325 + SYS_FUTIMESAT = 326 + SYS_FSTATAT64 = 327 + SYS_UNLINKAT = 328 + SYS_RENAMEAT = 329 + SYS_LINKAT = 330 + SYS_SYMLINKAT = 331 + SYS_READLINKAT = 332 + SYS_FCHMODAT = 333 + SYS_FACCESSAT = 334 + SYS_PSELECT6 = 335 + SYS_PPOLL = 336 + SYS_UNSHARE = 337 + SYS_SET_ROBUST_LIST = 338 + SYS_GET_ROBUST_LIST = 339 + SYS_SPLICE = 340 + SYS_ARM_SYNC_FILE_RANGE = 341 + SYS_TEE = 342 + SYS_VMSPLICE = 343 + SYS_MOVE_PAGES = 344 + SYS_GETCPU = 345 + SYS_EPOLL_PWAIT = 346 + SYS_KEXEC_LOAD = 347 + SYS_UTIMENSAT = 348 + SYS_SIGNALFD = 349 + SYS_TIMERFD_CREATE = 350 + SYS_EVENTFD = 351 + SYS_FALLOCATE = 352 + SYS_TIMERFD_SETTIME = 353 + SYS_TIMERFD_GETTIME = 354 + SYS_SIGNALFD4 = 355 + SYS_EVENTFD2 = 356 + SYS_EPOLL_CREATE1 = 357 + SYS_DUP3 = 358 + SYS_PIPE2 = 359 + SYS_INOTIFY_INIT1 = 360 + SYS_PREADV = 361 + SYS_PWRITEV = 362 + SYS_RT_TGSIGQUEUEINFO = 363 + SYS_PERF_EVENT_OPEN = 364 + SYS_RECVMMSG = 365 + SYS_ACCEPT4 = 366 + SYS_FANOTIFY_INIT = 367 + SYS_FANOTIFY_MARK = 368 + SYS_PRLIMIT64 = 369 + SYS_NAME_TO_HANDLE_AT = 370 + SYS_OPEN_BY_HANDLE_AT = 371 + SYS_CLOCK_ADJTIME = 372 + SYS_SYNCFS = 373 + SYS_SENDMMSG = 374 + SYS_SETNS = 375 + SYS_PROCESS_VM_READV = 376 + SYS_PROCESS_VM_WRITEV = 377 + SYS_KCMP = 378 + SYS_FINIT_MODULE = 379 + SYS_SCHED_SETATTR = 380 + SYS_SCHED_GETATTR = 381 + SYS_RENAMEAT2 = 382 + SYS_SECCOMP = 383 + SYS_GETRANDOM = 384 + SYS_MEMFD_CREATE = 385 + SYS_BPF = 386 + SYS_EXECVEAT = 387 + SYS_USERFAULTFD = 388 + SYS_MEMBARRIER = 389 + SYS_MLOCK2 = 390 + SYS_COPY_FILE_RANGE = 391 + SYS_PREADV2 = 392 + SYS_PWRITEV2 = 393 + SYS_PKEY_MPROTECT = 394 + SYS_PKEY_ALLOC = 395 + SYS_PKEY_FREE = 396 + SYS_STATX = 397 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go new file mode 100644 index 0000000..b1c6b4b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -0,0 +1,286 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_RENAMEAT = 38 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go new file mode 100644 index 0000000..2e9aa7a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -0,0 +1,375 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips,linux + +package unix + +const ( + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go new file mode 100644 index 0000000..9282763 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -0,0 +1,335 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,linux + +package unix + +const ( + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go new file mode 100644 index 0000000..45bd3fd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -0,0 +1,335 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64le,linux + +package unix + +const ( + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go new file mode 100644 index 0000000..62ccac4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -0,0 +1,375 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mipsle,linux + +package unix + +const ( + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go new file mode 100644 index 0000000..dfe5dab --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -0,0 +1,370 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go new file mode 100644 index 0000000..eca97f7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -0,0 +1,370 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64le,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go new file mode 100644 index 0000000..8bf50c8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -0,0 +1,334 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build s390x,linux + +package unix + +const ( + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_RESTART_SYSCALL = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SIGNAL = 48 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_LOOKUP_DCOOKIE = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_GETDENTS = 141 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_READAHEAD = 222 + SYS_SETXATTR = 224 + SYS_LSETXATTR = 225 + SYS_FSETXATTR = 226 + SYS_GETXATTR = 227 + SYS_LGETXATTR = 228 + SYS_FGETXATTR = 229 + SYS_LISTXATTR = 230 + SYS_LLISTXATTR = 231 + SYS_FLISTXATTR = 232 + SYS_REMOVEXATTR = 233 + SYS_LREMOVEXATTR = 234 + SYS_FREMOVEXATTR = 235 + SYS_GETTID = 236 + SYS_TKILL = 237 + SYS_FUTEX = 238 + SYS_SCHED_SETAFFINITY = 239 + SYS_SCHED_GETAFFINITY = 240 + SYS_TGKILL = 241 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_EPOLL_CREATE = 249 + SYS_EPOLL_CTL = 250 + SYS_EPOLL_WAIT = 251 + SYS_SET_TID_ADDRESS = 252 + SYS_FADVISE64 = 253 + SYS_TIMER_CREATE = 254 + SYS_TIMER_SETTIME = 255 + SYS_TIMER_GETTIME = 256 + SYS_TIMER_GETOVERRUN = 257 + SYS_TIMER_DELETE = 258 + SYS_CLOCK_SETTIME = 259 + SYS_CLOCK_GETTIME = 260 + SYS_CLOCK_GETRES = 261 + SYS_CLOCK_NANOSLEEP = 262 + SYS_STATFS64 = 265 + SYS_FSTATFS64 = 266 + SYS_REMAP_FILE_PAGES = 267 + SYS_MBIND = 268 + SYS_GET_MEMPOLICY = 269 + SYS_SET_MEMPOLICY = 270 + SYS_MQ_OPEN = 271 + SYS_MQ_UNLINK = 272 + SYS_MQ_TIMEDSEND = 273 + SYS_MQ_TIMEDRECEIVE = 274 + SYS_MQ_NOTIFY = 275 + SYS_MQ_GETSETATTR = 276 + SYS_KEXEC_LOAD = 277 + SYS_ADD_KEY = 278 + SYS_REQUEST_KEY = 279 + SYS_KEYCTL = 280 + SYS_WAITID = 281 + SYS_IOPRIO_SET = 282 + SYS_IOPRIO_GET = 283 + SYS_INOTIFY_INIT = 284 + SYS_INOTIFY_ADD_WATCH = 285 + SYS_INOTIFY_RM_WATCH = 286 + SYS_MIGRATE_PAGES = 287 + SYS_OPENAT = 288 + SYS_MKDIRAT = 289 + SYS_MKNODAT = 290 + SYS_FCHOWNAT = 291 + SYS_FUTIMESAT = 292 + SYS_UNLINKAT = 294 + SYS_RENAMEAT = 295 + SYS_LINKAT = 296 + SYS_SYMLINKAT = 297 + SYS_READLINKAT = 298 + SYS_FCHMODAT = 299 + SYS_FACCESSAT = 300 + SYS_PSELECT6 = 301 + SYS_PPOLL = 302 + SYS_UNSHARE = 303 + SYS_SET_ROBUST_LIST = 304 + SYS_GET_ROBUST_LIST = 305 + SYS_SPLICE = 306 + SYS_SYNC_FILE_RANGE = 307 + SYS_TEE = 308 + SYS_VMSPLICE = 309 + SYS_MOVE_PAGES = 310 + SYS_GETCPU = 311 + SYS_EPOLL_PWAIT = 312 + SYS_UTIMES = 313 + SYS_FALLOCATE = 314 + SYS_UTIMENSAT = 315 + SYS_SIGNALFD = 316 + SYS_TIMERFD = 317 + SYS_EVENTFD = 318 + SYS_TIMERFD_CREATE = 319 + SYS_TIMERFD_SETTIME = 320 + SYS_TIMERFD_GETTIME = 321 + SYS_SIGNALFD4 = 322 + SYS_EVENTFD2 = 323 + SYS_INOTIFY_INIT1 = 324 + SYS_PIPE2 = 325 + SYS_DUP3 = 326 + SYS_EPOLL_CREATE1 = 327 + SYS_PREADV = 328 + SYS_PWRITEV = 329 + SYS_RT_TGSIGQUEUEINFO = 330 + SYS_PERF_EVENT_OPEN = 331 + SYS_FANOTIFY_INIT = 332 + SYS_FANOTIFY_MARK = 333 + SYS_PRLIMIT64 = 334 + SYS_NAME_TO_HANDLE_AT = 335 + SYS_OPEN_BY_HANDLE_AT = 336 + SYS_CLOCK_ADJTIME = 337 + SYS_SYNCFS = 338 + SYS_SETNS = 339 + SYS_PROCESS_VM_READV = 340 + SYS_PROCESS_VM_WRITEV = 341 + SYS_S390_RUNTIME_INSTR = 342 + SYS_KCMP = 343 + SYS_FINIT_MODULE = 344 + SYS_SCHED_SETATTR = 345 + SYS_SCHED_GETATTR = 346 + SYS_RENAMEAT2 = 347 + SYS_SECCOMP = 348 + SYS_GETRANDOM = 349 + SYS_MEMFD_CREATE = 350 + SYS_BPF = 351 + SYS_S390_PCI_MMIO_WRITE = 352 + SYS_S390_PCI_MMIO_READ = 353 + SYS_EXECVEAT = 354 + SYS_USERFAULTFD = 355 + SYS_MEMBARRIER = 356 + SYS_RECVMMSG = 357 + SYS_SENDMMSG = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_MLOCK2 = 374 + SYS_COPY_FILE_RANGE = 375 + SYS_PREADV2 = 376 + SYS_PWRITEV2 = 377 + SYS_S390_GUARDED_STORAGE = 378 + SYS_STATX = 379 + SYS_S390_STHYI = 380 + SYS_SELECT = 142 + SYS_GETRLIMIT = 191 + SYS_LCHOWN = 198 + SYS_GETUID = 199 + SYS_GETGID = 200 + SYS_GETEUID = 201 + SYS_GETEGID = 202 + SYS_SETREUID = 203 + SYS_SETREGID = 204 + SYS_GETGROUPS = 205 + SYS_SETGROUPS = 206 + SYS_FCHOWN = 207 + SYS_SETRESUID = 208 + SYS_GETRESUID = 209 + SYS_SETRESGID = 210 + SYS_GETRESGID = 211 + SYS_CHOWN = 212 + SYS_SETUID = 213 + SYS_SETGID = 214 + SYS_SETFSUID = 215 + SYS_SETFSGID = 216 + SYS_NEWFSTATAT = 293 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go new file mode 100644 index 0000000..c9c129d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -0,0 +1,348 @@ +// mksysnum_linux.pl -Ilinux/usr/include -m64 -D__arch64__ linux/usr/include/asm/unistd.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build sparc64,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECV = 11 + SYS_CHDIR = 12 + SYS_CHOWN = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BRK = 17 + SYS_PERFCTR = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_CAPGET = 21 + SYS_CAPSET = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_VMSPLICE = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_SIGALTSTACK = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_STAT = 38 + SYS_SENDFILE = 39 + SYS_LSTAT = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_UMOUNT2 = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_MEMORY_ORDERING = 52 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_FSTAT = 62 + SYS_FSTAT64 = 63 + SYS_GETPAGESIZE = 64 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_MMAP = 71 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_VHANGUP = 76 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_SETHOSTNAME = 88 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_ACCEPT = 99 + SYS_GETPRIORITY = 100 + SYS_RT_SIGRETURN = 101 + SYS_RT_SIGACTION = 102 + SYS_RT_SIGPROCMASK = 103 + SYS_RT_SIGPENDING = 104 + SYS_RT_SIGTIMEDWAIT = 105 + SYS_RT_SIGQUEUEINFO = 106 + SYS_RT_SIGSUSPEND = 107 + SYS_SETRESUID = 108 + SYS_GETRESUID = 109 + SYS_SETRESGID = 110 + SYS_GETRESGID = 111 + SYS_RECVMSG = 113 + SYS_SENDMSG = 114 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_GETCWD = 119 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_RECVFROM = 125 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_TRUNCATE = 129 + SYS_FTRUNCATE = 130 + SYS_FLOCK = 131 + SYS_LSTAT64 = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_STAT64 = 139 + SYS_SENDFILE64 = 140 + SYS_GETPEERNAME = 141 + SYS_FUTEX = 142 + SYS_GETTID = 143 + SYS_GETRLIMIT = 144 + SYS_SETRLIMIT = 145 + SYS_PIVOT_ROOT = 146 + SYS_PRCTL = 147 + SYS_PCICONFIG_READ = 148 + SYS_PCICONFIG_WRITE = 149 + SYS_GETSOCKNAME = 150 + SYS_INOTIFY_INIT = 151 + SYS_INOTIFY_ADD_WATCH = 152 + SYS_POLL = 153 + SYS_GETDENTS64 = 154 + SYS_INOTIFY_RM_WATCH = 156 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UMOUNT = 159 + SYS_SCHED_SET_AFFINITY = 160 + SYS_SCHED_GET_AFFINITY = 161 + SYS_GETDOMAINNAME = 162 + SYS_SETDOMAINNAME = 163 + SYS_UTRAP_INSTALL = 164 + SYS_QUOTACTL = 165 + SYS_SET_TID_ADDRESS = 166 + SYS_MOUNT = 167 + SYS_USTAT = 168 + SYS_SETXATTR = 169 + SYS_LSETXATTR = 170 + SYS_FSETXATTR = 171 + SYS_GETXATTR = 172 + SYS_LGETXATTR = 173 + SYS_GETDENTS = 174 + SYS_SETSID = 175 + SYS_FCHDIR = 176 + SYS_FGETXATTR = 177 + SYS_LISTXATTR = 178 + SYS_LLISTXATTR = 179 + SYS_FLISTXATTR = 180 + SYS_REMOVEXATTR = 181 + SYS_LREMOVEXATTR = 182 + SYS_SIGPENDING = 183 + SYS_QUERY_MODULE = 184 + SYS_SETPGID = 185 + SYS_FREMOVEXATTR = 186 + SYS_TKILL = 187 + SYS_EXIT_GROUP = 188 + SYS_UNAME = 189 + SYS_INIT_MODULE = 190 + SYS_PERSONALITY = 191 + SYS_REMAP_FILE_PAGES = 192 + SYS_EPOLL_CREATE = 193 + SYS_EPOLL_CTL = 194 + SYS_EPOLL_WAIT = 195 + SYS_IOPRIO_SET = 196 + SYS_GETPPID = 197 + SYS_SIGACTION = 198 + SYS_SGETMASK = 199 + SYS_SSETMASK = 200 + SYS_SIGSUSPEND = 201 + SYS_OLDLSTAT = 202 + SYS_USELIB = 203 + SYS_READDIR = 204 + SYS_READAHEAD = 205 + SYS_SOCKETCALL = 206 + SYS_SYSLOG = 207 + SYS_LOOKUP_DCOOKIE = 208 + SYS_FADVISE64 = 209 + SYS_FADVISE64_64 = 210 + SYS_TGKILL = 211 + SYS_WAITPID = 212 + SYS_SWAPOFF = 213 + SYS_SYSINFO = 214 + SYS_IPC = 215 + SYS_SIGRETURN = 216 + SYS_CLONE = 217 + SYS_IOPRIO_GET = 218 + SYS_ADJTIMEX = 219 + SYS_SIGPROCMASK = 220 + SYS_CREATE_MODULE = 221 + SYS_DELETE_MODULE = 222 + SYS_GET_KERNEL_SYMS = 223 + SYS_GETPGID = 224 + SYS_BDFLUSH = 225 + SYS_SYSFS = 226 + SYS_AFS_SYSCALL = 227 + SYS_SETFSUID = 228 + SYS_SETFSGID = 229 + SYS__NEWSELECT = 230 + SYS_SPLICE = 232 + SYS_STIME = 233 + SYS_STATFS64 = 234 + SYS_FSTATFS64 = 235 + SYS__LLSEEK = 236 + SYS_MLOCK = 237 + SYS_MUNLOCK = 238 + SYS_MLOCKALL = 239 + SYS_MUNLOCKALL = 240 + SYS_SCHED_SETPARAM = 241 + SYS_SCHED_GETPARAM = 242 + SYS_SCHED_SETSCHEDULER = 243 + SYS_SCHED_GETSCHEDULER = 244 + SYS_SCHED_YIELD = 245 + SYS_SCHED_GET_PRIORITY_MAX = 246 + SYS_SCHED_GET_PRIORITY_MIN = 247 + SYS_SCHED_RR_GET_INTERVAL = 248 + SYS_NANOSLEEP = 249 + SYS_MREMAP = 250 + SYS__SYSCTL = 251 + SYS_GETSID = 252 + SYS_FDATASYNC = 253 + SYS_NFSSERVCTL = 254 + SYS_SYNC_FILE_RANGE = 255 + SYS_CLOCK_SETTIME = 256 + SYS_CLOCK_GETTIME = 257 + SYS_CLOCK_GETRES = 258 + SYS_CLOCK_NANOSLEEP = 259 + SYS_SCHED_GETAFFINITY = 260 + SYS_SCHED_SETAFFINITY = 261 + SYS_TIMER_SETTIME = 262 + SYS_TIMER_GETTIME = 263 + SYS_TIMER_GETOVERRUN = 264 + SYS_TIMER_DELETE = 265 + SYS_TIMER_CREATE = 266 + SYS_IO_SETUP = 268 + SYS_IO_DESTROY = 269 + SYS_IO_SUBMIT = 270 + SYS_IO_CANCEL = 271 + SYS_IO_GETEVENTS = 272 + SYS_MQ_OPEN = 273 + SYS_MQ_UNLINK = 274 + SYS_MQ_TIMEDSEND = 275 + SYS_MQ_TIMEDRECEIVE = 276 + SYS_MQ_NOTIFY = 277 + SYS_MQ_GETSETATTR = 278 + SYS_WAITID = 279 + SYS_TEE = 280 + SYS_ADD_KEY = 281 + SYS_REQUEST_KEY = 282 + SYS_KEYCTL = 283 + SYS_OPENAT = 284 + SYS_MKDIRAT = 285 + SYS_MKNODAT = 286 + SYS_FCHOWNAT = 287 + SYS_FUTIMESAT = 288 + SYS_FSTATAT64 = 289 + SYS_UNLINKAT = 290 + SYS_RENAMEAT = 291 + SYS_LINKAT = 292 + SYS_SYMLINKAT = 293 + SYS_READLINKAT = 294 + SYS_FCHMODAT = 295 + SYS_FACCESSAT = 296 + SYS_PSELECT6 = 297 + SYS_PPOLL = 298 + SYS_UNSHARE = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_GET_ROBUST_LIST = 301 + SYS_MIGRATE_PAGES = 302 + SYS_MBIND = 303 + SYS_GET_MEMPOLICY = 304 + SYS_SET_MEMPOLICY = 305 + SYS_KEXEC_LOAD = 306 + SYS_MOVE_PAGES = 307 + SYS_GETCPU = 308 + SYS_EPOLL_PWAIT = 309 + SYS_UTIMENSAT = 310 + SYS_SIGNALFD = 311 + SYS_TIMERFD_CREATE = 312 + SYS_EVENTFD = 313 + SYS_FALLOCATE = 314 + SYS_TIMERFD_SETTIME = 315 + SYS_TIMERFD_GETTIME = 316 + SYS_SIGNALFD4 = 317 + SYS_EVENTFD2 = 318 + SYS_EPOLL_CREATE1 = 319 + SYS_DUP3 = 320 + SYS_PIPE2 = 321 + SYS_INOTIFY_INIT1 = 322 + SYS_ACCEPT4 = 323 + SYS_PREADV = 324 + SYS_PWRITEV = 325 + SYS_RT_TGSIGQUEUEINFO = 326 + SYS_PERF_EVENT_OPEN = 327 + SYS_RECVMMSG = 328 + SYS_FANOTIFY_INIT = 329 + SYS_FANOTIFY_MARK = 330 + SYS_PRLIMIT64 = 331 + SYS_NAME_TO_HANDLE_AT = 332 + SYS_OPEN_BY_HANDLE_AT = 333 + SYS_CLOCK_ADJTIME = 334 + SYS_SYNCFS = 335 + SYS_SENDMMSG = 336 + SYS_SETNS = 337 + SYS_PROCESS_VM_READV = 338 + SYS_PROCESS_VM_WRITEV = 339 + SYS_KERN_FEATURES = 340 + SYS_KCMP = 341 + SYS_FINIT_MODULE = 342 + SYS_SCHED_SETATTR = 343 + SYS_SCHED_GETATTR = 344 + SYS_RENAMEAT2 = 345 + SYS_SECCOMP = 346 + SYS_GETRANDOM = 347 + SYS_MEMFD_CREATE = 348 + SYS_BPF = 349 + SYS_EXECVEAT = 350 + SYS_MEMBARRIER = 351 + SYS_USERFAULTFD = 352 + SYS_BIND = 353 + SYS_LISTEN = 354 + SYS_SETSOCKOPT = 355 + SYS_MLOCK2 = 356 + SYS_COPY_FILE_RANGE = 357 + SYS_PREADV2 = 358 + SYS_PWRITEV2 = 359 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go new file mode 100644 index 0000000..8afda9c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -0,0 +1,274 @@ +// mksysnum_netbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,netbsd + +package unix + +const ( + SYS_EXIT = 1 // { void|sys||exit(int rval); } + SYS_FORK = 2 // { int|sys||fork(void); } + SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int|sys||close(int fd); } + SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int|sys||unlink(const char *path); } + SYS_CHDIR = 12 // { int|sys||chdir(const char *path); } + SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); } + SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); } + SYS_BREAK = 17 // { int|sys||obreak(char *nsize); } + SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); } + SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); } + SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); } + SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); } + SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { void|sys||sync(void); } + SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); } + SYS_GETPPID = 39 // { pid_t|sys||getppid(void); } + SYS_DUP = 41 // { int|sys||dup(int fd); } + SYS_PIPE = 42 // { int|sys||pipe(void); } + SYS_GETEGID = 43 // { gid_t|sys||getegid(void); } + SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); } + SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); } + SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int|sys||acct(const char *path); } + SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); } + SYS_REVOKE = 56 // { int|sys||revoke(const char *path); } + SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); } + SYS_CHROOT = 61 // { int|sys||chroot(const char *path); } + SYS_VFORK = 66 // { int|sys||vfork(void); } + SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); } + SYS_SSTK = 70 // { int|sys||sstk(int incr); } + SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); } + SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int|sys||getpgrp(void); } + SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); } + SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); } + SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); } + SYS_FSYNC = 95 // { int|sys||fsync(int fd); } + SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); } + SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); } + SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); } + SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); } + SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); } + SYS_SETSID = 147 // { int|sys||setsid(void); } + SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); } + SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); } + SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); } + SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); } + SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); } + SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); } + SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); } + SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); } + SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); } + SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); } + SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); } + SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); } + SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); } + SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); } + SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); } + SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); } + SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); } + SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); } + SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); } + SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); } + SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); } + SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } + SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } + SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } + SYS_MSYNC = 277 // { int|sys|13|msync(void *addr, size_t len, int flags); } + SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } + SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); } + SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); } + SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); } + SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); } + SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); } + SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); } + SYS_ISSETUGID = 305 // { int|sys||issetugid(void); } + SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); } + SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); } + SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); } + SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); } + SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); } + SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); } + SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); } + SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); } + SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); } + SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); } + SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); } + SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); } + SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); } + SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); } + SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); } + SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); } + SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); } + SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); } + SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); } + SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); } + SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); } + SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); } + SYS_KQUEUE = 344 // { int|sys||kqueue(void); } + SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); } + SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); } + SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); } + SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); } + SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); } + SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); } + SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); } + SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); } + SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); } + SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); } + SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); } + SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); } + SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); } + SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); } + SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); } + SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); } + SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); } + SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); } + SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); } + SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); } + SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); } + SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); } + SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); } + SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); } + SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); } + SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); } + SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); } + SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); } + SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); } + SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); } + SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); } + SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); } + SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); } + SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); } + SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); } + SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); } + SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); } + SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); } + SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); } + SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); } + SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); } + SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); } + SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); } + SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); } + SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); } + SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); } + SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); } + SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); } + SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); } + SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); } + SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); } + SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); } + SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); } + SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); } + SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); } + SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); } + SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); } + SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); } + SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); } + SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); } + SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); } + SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); } + SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); } + SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); } + SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); } + SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); } + SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go new file mode 100644 index 0000000..aea8dbe --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -0,0 +1,274 @@ +// mksysnum_netbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,netbsd + +package unix + +const ( + SYS_EXIT = 1 // { void|sys||exit(int rval); } + SYS_FORK = 2 // { int|sys||fork(void); } + SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int|sys||close(int fd); } + SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int|sys||unlink(const char *path); } + SYS_CHDIR = 12 // { int|sys||chdir(const char *path); } + SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); } + SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); } + SYS_BREAK = 17 // { int|sys||obreak(char *nsize); } + SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); } + SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); } + SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); } + SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); } + SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { void|sys||sync(void); } + SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); } + SYS_GETPPID = 39 // { pid_t|sys||getppid(void); } + SYS_DUP = 41 // { int|sys||dup(int fd); } + SYS_PIPE = 42 // { int|sys||pipe(void); } + SYS_GETEGID = 43 // { gid_t|sys||getegid(void); } + SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); } + SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); } + SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int|sys||acct(const char *path); } + SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); } + SYS_REVOKE = 56 // { int|sys||revoke(const char *path); } + SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); } + SYS_CHROOT = 61 // { int|sys||chroot(const char *path); } + SYS_VFORK = 66 // { int|sys||vfork(void); } + SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); } + SYS_SSTK = 70 // { int|sys||sstk(int incr); } + SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); } + SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int|sys||getpgrp(void); } + SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); } + SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); } + SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); } + SYS_FSYNC = 95 // { int|sys||fsync(int fd); } + SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); } + SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); } + SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); } + SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); } + SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); } + SYS_SETSID = 147 // { int|sys||setsid(void); } + SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); } + SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); } + SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); } + SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); } + SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); } + SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); } + SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); } + SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); } + SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); } + SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); } + SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); } + SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); } + SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); } + SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); } + SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); } + SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); } + SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); } + SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); } + SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); } + SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); } + SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); } + SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } + SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } + SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } + SYS_MSYNC = 277 // { int|sys|13|msync(void *addr, size_t len, int flags); } + SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } + SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); } + SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); } + SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); } + SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); } + SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); } + SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); } + SYS_ISSETUGID = 305 // { int|sys||issetugid(void); } + SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); } + SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); } + SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); } + SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); } + SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); } + SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); } + SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); } + SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); } + SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); } + SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); } + SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); } + SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); } + SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); } + SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); } + SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); } + SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); } + SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); } + SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); } + SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); } + SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); } + SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); } + SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); } + SYS_KQUEUE = 344 // { int|sys||kqueue(void); } + SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); } + SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); } + SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); } + SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); } + SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); } + SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); } + SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); } + SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); } + SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); } + SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); } + SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); } + SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); } + SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); } + SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); } + SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); } + SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); } + SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); } + SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); } + SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); } + SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); } + SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); } + SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); } + SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); } + SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); } + SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); } + SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); } + SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); } + SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); } + SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); } + SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); } + SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); } + SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); } + SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); } + SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); } + SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); } + SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); } + SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); } + SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); } + SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); } + SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); } + SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); } + SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); } + SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); } + SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); } + SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); } + SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); } + SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); } + SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); } + SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); } + SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); } + SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); } + SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); } + SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); } + SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); } + SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); } + SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); } + SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); } + SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); } + SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); } + SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); } + SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); } + SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); } + SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); } + SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); } + SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); } + SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); } + SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go new file mode 100644 index 0000000..c6158a7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -0,0 +1,274 @@ +// mksysnum_netbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm,netbsd + +package unix + +const ( + SYS_EXIT = 1 // { void|sys||exit(int rval); } + SYS_FORK = 2 // { int|sys||fork(void); } + SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int|sys||close(int fd); } + SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int|sys||unlink(const char *path); } + SYS_CHDIR = 12 // { int|sys||chdir(const char *path); } + SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); } + SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); } + SYS_BREAK = 17 // { int|sys||obreak(char *nsize); } + SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); } + SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); } + SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); } + SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); } + SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { void|sys||sync(void); } + SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); } + SYS_GETPPID = 39 // { pid_t|sys||getppid(void); } + SYS_DUP = 41 // { int|sys||dup(int fd); } + SYS_PIPE = 42 // { int|sys||pipe(void); } + SYS_GETEGID = 43 // { gid_t|sys||getegid(void); } + SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); } + SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); } + SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int|sys||acct(const char *path); } + SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); } + SYS_REVOKE = 56 // { int|sys||revoke(const char *path); } + SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); } + SYS_CHROOT = 61 // { int|sys||chroot(const char *path); } + SYS_VFORK = 66 // { int|sys||vfork(void); } + SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); } + SYS_SSTK = 70 // { int|sys||sstk(int incr); } + SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); } + SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int|sys||getpgrp(void); } + SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); } + SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); } + SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); } + SYS_FSYNC = 95 // { int|sys||fsync(int fd); } + SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); } + SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); } + SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); } + SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); } + SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); } + SYS_SETSID = 147 // { int|sys||setsid(void); } + SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); } + SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); } + SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); } + SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); } + SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); } + SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); } + SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); } + SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); } + SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); } + SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); } + SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); } + SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); } + SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); } + SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); } + SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); } + SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); } + SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); } + SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); } + SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); } + SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); } + SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); } + SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } + SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } + SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } + SYS_MSYNC = 277 // { int|sys|13|msync(void *addr, size_t len, int flags); } + SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } + SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } + SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); } + SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); } + SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); } + SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); } + SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); } + SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); } + SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); } + SYS_ISSETUGID = 305 // { int|sys||issetugid(void); } + SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); } + SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); } + SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); } + SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); } + SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); } + SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); } + SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); } + SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); } + SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); } + SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); } + SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); } + SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); } + SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); } + SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); } + SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); } + SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); } + SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); } + SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); } + SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); } + SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); } + SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); } + SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); } + SYS_KQUEUE = 344 // { int|sys||kqueue(void); } + SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); } + SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); } + SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); } + SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); } + SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); } + SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); } + SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); } + SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); } + SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); } + SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); } + SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); } + SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); } + SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); } + SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); } + SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); } + SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); } + SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); } + SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); } + SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); } + SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); } + SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); } + SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); } + SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); } + SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); } + SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); } + SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); } + SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); } + SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); } + SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); } + SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); } + SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); } + SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); } + SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); } + SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); } + SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); } + SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); } + SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); } + SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); } + SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); } + SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); } + SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); } + SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); } + SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); } + SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); } + SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); } + SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); } + SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); } + SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); } + SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); } + SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); } + SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); } + SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); } + SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); } + SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); } + SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); } + SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); } + SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); } + SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); } + SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); } + SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); } + SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); } + SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); } + SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); } + SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); } + SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); } + SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); } + SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); } + SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go new file mode 100644 index 0000000..3e8ce2a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -0,0 +1,207 @@ +// mksysnum_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int sys_open(const char *path, \ + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, \ + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, \ + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, \ + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, \ + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, \ + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, \ + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \ + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ + SYS_ACCESS = 33 // { int sys_access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_KILL = 37 // { int sys_kill(int pid, int signum); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, \ + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, \ + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, \ + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ + SYS_READLINK = 58 // { int sys_readlink(const char *path, char *buf, \ + SYS_EXECVE = 59 // { int sys_execve(const char *path, \ + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \ + SYS_STATFS = 63 // { int sys_statfs(const char *path, \ + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, \ + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, \ + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, \ + SYS_SETITIMER = 69 // { int sys_setitimer(int which, \ + SYS_GETITIMER = 70 // { int sys_getitimer(int which, \ + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, \ + SYS_KEVENT = 72 // { int sys_kevent(int fd, \ + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, \ + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, \ + SYS_UTIMES = 76 // { int sys_utimes(const char *path, \ + SYS_FUTIMES = 77 // { int sys_futimes(int fd, \ + SYS_MINCORE = 78 // { int sys_mincore(void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, int pgid); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { ssize_t sys_readv(int fd, \ + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, \ + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, \ + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, \ + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, \ + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, \ + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, \ + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, \ + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, \ + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, \ + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, \ + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \ + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \ + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \ + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, \ + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \ + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, \ + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, \ + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, \ + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, \ + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, \ + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, \ + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \ + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, \ + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, \ + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, \ + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, \ + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \ + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, \ + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, \ + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, \ + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, \ + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \ + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, \ + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, \ + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, \ + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go new file mode 100644 index 0000000..bd28146 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -0,0 +1,207 @@ +// mksysnum_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build amd64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int sys_open(const char *path, \ + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, \ + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, \ + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, \ + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, \ + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, \ + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, \ + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \ + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ + SYS_ACCESS = 33 // { int sys_access(const char *path, int flags); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_KILL = 37 // { int sys_kill(int pid, int signum); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, \ + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, \ + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, \ + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ + SYS_READLINK = 58 // { int sys_readlink(const char *path, char *buf, \ + SYS_EXECVE = 59 // { int sys_execve(const char *path, \ + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \ + SYS_STATFS = 63 // { int sys_statfs(const char *path, \ + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, \ + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, \ + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, \ + SYS_SETITIMER = 69 // { int sys_setitimer(int which, \ + SYS_GETITIMER = 70 // { int sys_getitimer(int which, \ + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, \ + SYS_KEVENT = 72 // { int sys_kevent(int fd, \ + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, \ + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, \ + SYS_UTIMES = 76 // { int sys_utimes(const char *path, \ + SYS_FUTIMES = 77 // { int sys_futimes(int fd, \ + SYS_MINCORE = 78 // { int sys_mincore(void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, int pgid); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { ssize_t sys_readv(int fd, \ + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, \ + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, \ + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, \ + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, \ + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, \ + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, \ + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, \ + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, \ + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, \ + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, \ + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \ + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \ + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \ + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, \ + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \ + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, \ + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, \ + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, \ + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, \ + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, \ + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, \ + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \ + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, \ + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, \ + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, \ + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, \ + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \ + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, \ + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, \ + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, \ + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, \ + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \ + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, \ + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, \ + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, \ + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go new file mode 100644 index 0000000..32653e5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -0,0 +1,213 @@ +// mksysnum_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build arm,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int sys_open(const char *path, \ + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, \ + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, \ + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, \ + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, \ + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, \ + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, \ + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \ + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, \ + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, \ + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, \ + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, \ + SYS_EXECVE = 59 // { int sys_execve(const char *path, \ + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \ + SYS_STATFS = 63 // { int sys_statfs(const char *path, \ + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, \ + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, \ + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, \ + SYS_SETITIMER = 69 // { int sys_setitimer(int which, \ + SYS_GETITIMER = 70 // { int sys_getitimer(int which, \ + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, \ + SYS_KEVENT = 72 // { int sys_kevent(int fd, \ + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, \ + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, \ + SYS_UTIMES = 76 // { int sys_utimes(const char *path, \ + SYS_FUTIMES = 77 // { int sys_futimes(int fd, \ + SYS_MINCORE = 78 // { int sys_mincore(void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_SENDSYSLOG = 83 // { int sys_sendsyslog(const void *buf, size_t nbyte); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, \ + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, \ + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { ssize_t sys_readv(int fd, \ + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, \ + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, \ + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, \ + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, \ + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, \ + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, \ + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, \ + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, \ + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, \ + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, \ + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \ + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \ + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \ + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, \ + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \ + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, \ + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, \ + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, \ + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, \ + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, \ + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, \ + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \ + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, \ + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, \ + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, \ + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, \ + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \ + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, \ + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, \ + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, \ + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, \ + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \ + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, \ + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, \ + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, \ + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go new file mode 100644 index 0000000..bc4bc89 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -0,0 +1,489 @@ +// cgo -godefs types_darwin.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,darwin + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timeval32 struct{} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint32 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Contigbytes int64 + Devoffset int64 +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x2 + AT_REMOVEDIR = 0x80 + AT_SYMLINK_FOLLOW = 0x40 + AT_SYMLINK_NOFOLLOW = 0x20 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go new file mode 100644 index 0000000..d8abcab --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -0,0 +1,499 @@ +// cgo -godefs types_darwin.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,darwin + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Pad_cgo_0 [4]byte + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 + Pad_cgo_0 [4]byte +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint64 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Pad_cgo_0 [8]byte + Pad_cgo_1 [8]byte +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + Pad_cgo_0 [4]byte + Ispeed uint64 + Ospeed uint64 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x2 + AT_REMOVEDIR = 0x80 + AT_SYMLINK_FOLLOW = 0x40 + AT_SYMLINK_NOFOLLOW = 0x20 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go new file mode 100644 index 0000000..9749c9f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -0,0 +1,490 @@ +// NOTE: cgo can't generate struct Stat_t and struct Statfs_t yet +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +// +build arm,darwin + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timeval32 [0]byte + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint32 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Contigbytes int64 + Devoffset int64 +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x2 + AT_REMOVEDIR = 0x80 + AT_SYMLINK_FOLLOW = 0x40 + AT_SYMLINK_NOFOLLOW = 0x20 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go new file mode 100644 index 0000000..810b0bd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -0,0 +1,499 @@ +// cgo -godefs types_darwin.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,darwin + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Pad_cgo_0 [4]byte + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 +} + +type Statfs_t struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Fstore_t struct { + Flags uint32 + Posmode int32 + Offset int64 + Length int64 + Bytesalloc int64 +} + +type Radvisory_t struct { + Offset int64 + Count int32 + Pad_cgo_0 [4]byte +} + +type Fbootstraptransfer_t struct { + Offset int64 + Length uint64 + Buffer *byte +} + +type Log2phys_t struct { + Flags uint32 + Pad_cgo_0 [8]byte + Pad_cgo_1 [8]byte +} + +type Fsid struct { + Val [2]int32 +} + +type Dirent struct { + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + Pad_cgo_0 [3]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]int32 +} + +const ( + SizeofIfMsghdr = 0x70 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfmaMsghdr2 = 0x14 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 + Unused2 uint32 + Hwassist uint32 + Reserved1 uint32 + Reserved2 uint32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfmaMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Refcount int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire int32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Filler [4]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + Pad_cgo_0 [4]byte + Ispeed uint64 + Ospeed uint64 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x2 + AT_REMOVEDIR = 0x80 + AT_SYMLINK_FOLLOW = 0x40 + AT_SYMLINK_NOFOLLOW = 0x20 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go new file mode 100644 index 0000000..315a553 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -0,0 +1,486 @@ +// cgo -godefs types_dragonfly.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,dragonfly + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Ino uint64 + Nlink uint32 + Dev uint32 + Mode uint16 + Padding1 uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare1 int64 + Qspare2 int64 +} + +type Statfs_t struct { + Spare2 int64 + Bsize int64 + Iosize int64 + Blocks int64 + Bfree int64 + Bavail int64 + Files int64 + Ffree int64 + Fsid Fsid + Owner uint32 + Type int32 + Flags int32 + _ [4]byte + Syncwrites int64 + Asyncwrites int64 + Fstypename [16]int8 + Mntonname [80]int8 + Syncreads int64 + Asyncreads int64 + Spares1 int16 + Mntfromname [80]int8 + Spares2 int16 + _ [4]byte + Spare [2]int64 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Namlen uint16 + Type uint8 + Unused1 uint8 + Unused2 uint32 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + Rcf uint16 + Route [16]uint16 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen int32 + _ [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [16]uint64 +} + +const ( + SizeofIfMsghdr = 0xb0 + SizeofIfData = 0xa0 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + _ [2]byte + Mtu uint64 + Metric uint64 + Link_state uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Oqdrops uint64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Pksent uint64 + Expire uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Recvpipe uint64 + Hopcount uint64 + Mssopt uint16 + Pad uint16 + _ [4]byte + Msl uint64 + Iwmaxsegs uint64 + Iwcapsegs uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + _ [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = 0xfffafdcd + AT_SYMLINK_NOFOLLOW = 0x1 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [32]byte + Nodename [32]byte + Release [32]byte + Version [32]byte + Machine [32]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go new file mode 100644 index 0000000..878a21a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -0,0 +1,553 @@ +// cgo -godefs types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,freebsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtimespec Timespec + Pad_cgo_0 [8]byte +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 +} + +type Dirent struct { + Fileno uint32 + Reclen uint16 + Type uint8 + Namlen uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + X__fds_bits [32]uint32 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0x60 + sizeofIfData = 0x98 + SizeofIfData = 0x50 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + X__ifi_epoch [8]byte + X__ifi_lastchange [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint32 + Epoch int32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Weight uint32 + Filler [3]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0xc + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + X_bzh_pad [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_REMOVEDIR = 0x800 + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go new file mode 100644 index 0000000..8408af1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -0,0 +1,556 @@ +// cgo -godefs types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,freebsd + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtimespec Timespec +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + Pad_cgo_0 [4]byte +} + +type Dirent struct { + Fileno uint32 + Reclen uint16 + Type uint8 + Namlen uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + X__fds_bits [16]uint64 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0xa8 + sizeofIfData = 0x98 + SizeofIfData = 0x98 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + X__ifi_epoch [8]byte + X__ifi_lastchange [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Expire uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Pksent uint64 + Weight uint64 + Filler [3]uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0x18 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint64 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + X_bzh_pad [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_REMOVEDIR = 0x800 + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go new file mode 100644 index 0000000..4b2d9a4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -0,0 +1,556 @@ +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,freebsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 + Pad_cgo_0 [4]byte +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtimespec Timespec +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + Pad_cgo_0 [4]byte +} + +type Dirent struct { + Fileno uint32 + Reclen uint16 + Type uint8 + Namlen uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + X__fds_bits [32]uint32 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0x70 + sizeofIfData = 0x98 + SizeofIfData = 0x60 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + X__ifi_epoch [8]byte + X__ifi_lastchange [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint32 + Pad_cgo_0 [4]byte + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 + Weight uint32 + Filler [3]uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0xc + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + X_bzh_pad [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_REMOVEDIR = 0x800 + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go new file mode 100644 index 0000000..f9a9935 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -0,0 +1,897 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,linux + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + _ [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + X__pad1 uint16 + _ [2]byte + X__st_ino uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + X__pad2 uint16 + _ [2]byte + Size int64 + Blksize int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [1]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [2]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Ebx int32 + Ecx int32 + Edx int32 + Esi int32 + Edi int32 + Ebp int32 + Eax int32 + Xds int32 + Xes int32 + Xfs int32 + Xgs int32 + Orig_eax int32 + Eip int32 + Xcs int32 + Eflags int32 + Esp int32 + Xss int32 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + X_f [8]int8 +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]int8 + Fpack [6]int8 +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [32]uint32 +} + +const RNDGETENTCNT = 0x80045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go new file mode 100644 index 0000000..4df7088 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -0,0 +1,915 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + X__pad0 int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [3]int64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + R15 uint64 + R14 uint64 + R13 uint64 + R12 uint64 + Rbp uint64 + Rbx uint64 + R11 uint64 + R10 uint64 + R9 uint64 + R8 uint64 + Rax uint64 + Rcx uint64 + Rdx uint64 + Rsi uint64 + Rdi uint64 + Orig_rax uint64 + Rip uint64 + Cs uint64 + Eflags uint64 + Rsp uint64 + Ss uint64 + Fs_base uint64 + Gs_base uint64 + Ds uint64 + Es uint64 + Fs uint64 + Gs uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]int8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [16]uint64 +} + +const RNDGETENTCNT = 0x80045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go new file mode 100644 index 0000000..a181469 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -0,0 +1,886 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,linux + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + _ [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + X__pad1 uint16 + _ [2]byte + X__st_ino uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + X__pad2 uint16 + _ [6]byte + Size int64 + Blksize int32 + _ [4]byte + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 + _ [4]byte +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [2]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Uregs [18]uint32 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + X_f [8]uint8 +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]uint8 + Fpack [6]uint8 +} + +type EpollEvent struct { + Events uint32 + PadFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [32]uint32 +} + +const RNDGETENTCNT = 0x80045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]uint8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go new file mode 100644 index 0000000..cff50c3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -0,0 +1,894 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + X__pad1 uint64 + Size int64 + Blksize int32 + X__pad2 int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Regs [31]uint64 + Sp uint64 + Pc uint64 + Pstate uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]int8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + PadFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [16]uint64 +} + +const RNDGETENTCNT = 0x80045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go new file mode 100644 index 0000000..87d0a8b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -0,0 +1,891 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips,linux + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + _ [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint32 + Pad1 [3]int32 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Pad2 [3]int32 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int32 + Pad4 int32 + Blocks int64 + Pad5 [14]int32 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Frsize int32 + _ [4]byte + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int32 + Flags int32 + Spare [5]int32 + _ [4]byte +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [2]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + X_f [8]int8 +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]int8 + Fpack [6]int8 +} + +type EpollEvent struct { + Events uint32 + PadFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [32]uint32 +} + +const RNDGETENTCNT = 0x40045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [23]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go new file mode 100644 index 0000000..cf4e2bd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -0,0 +1,896 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint32 + Pad1 [3]uint32 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Pad2 [3]uint32 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize uint32 + Pad4 uint32 + Blocks int64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Frsize int64 + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int64 + Flags int64 + Spare [5]int64 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]int8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [16]uint64 +} + +const RNDGETENTCNT = 0x40045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [23]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go new file mode 100644 index 0000000..b8da482 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -0,0 +1,896 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64le,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint32 + Pad1 [3]uint32 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Pad2 [3]uint32 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize uint32 + Pad4 uint32 + Blocks int64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Frsize int64 + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int64 + Flags int64 + Spare [5]int64 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]int8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [16]uint64 +} + +const RNDGETENTCNT = 0x40045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [23]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go new file mode 100644 index 0000000..7106b51 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -0,0 +1,891 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mipsle,linux + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + _ [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint32 + Pad1 [3]int32 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Pad2 [3]int32 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int32 + Pad4 int32 + Blocks int64 + Pad5 [14]int32 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Frsize int32 + _ [4]byte + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int32 + Flags int32 + Spare [5]int32 + _ [4]byte +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [2]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + X_f [8]int8 +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]int8 + Fpack [6]int8 +} + +type EpollEvent struct { + Events uint32 + PadFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [32]uint32 +} + +const RNDGETENTCNT = 0x40045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [23]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go new file mode 100644 index 0000000..319071c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -0,0 +1,904 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + X__pad2 int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ uint64 + _ uint64 + _ uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Gpr [32]uint64 + Nip uint64 + Msr uint64 + Orig_gpr3 uint64 + Ctr uint64 + Link uint64 + Xer uint64 + Ccr uint64 + Softe uint64 + Trap uint64 + Dar uint64 + Dsisr uint64 + Result uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]uint8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + X_padFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [16]uint64 +} + +const RNDGETENTCNT = 0x40045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + Line uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]uint8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go new file mode 100644 index 0000000..ef00ed4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -0,0 +1,904 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64le,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + X__pad2 int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ uint64 + _ uint64 + _ uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Gpr [32]uint64 + Nip uint64 + Msr uint64 + Orig_gpr3 uint64 + Ctr uint64 + Link uint64 + Xer uint64 + Ccr uint64 + Softe uint64 + Trap uint64 + Dar uint64 + Dsisr uint64 + Result uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]uint8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + X_padFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [16]uint64 +} + +const RNDGETENTCNT = 0x40045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + Line uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]uint8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go new file mode 100644 index 0000000..e9ee497 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -0,0 +1,921 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build s390x,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int64 + Blocks int64 + _ [3]int64 +} + +type Statfs_t struct { + Type uint32 + Bsize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen uint32 + Frsize uint32 + Flags uint32 + Spare [4]uint32 + _ [4]byte +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + _ int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Fsid struct { + _ [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x6 + FADV_NOREUSE = 0x7 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Psw PtracePsw + Gprs [16]uint64 + Acrs [16]uint32 + Orig_gpr2 uint64 + Fp_regs PtraceFpregs + Per_info PtracePer + Ieee_instruction_pointer uint64 +} + +type PtracePsw struct { + Mask uint64 + Addr uint64 +} + +type PtraceFpregs struct { + Fpc uint32 + _ [4]byte + Fprs [16]float64 +} + +type PtracePer struct { + _ [0]uint64 + _ [24]byte + _ [8]byte + Starting_addr uint64 + Ending_addr uint64 + Perc_atmid uint16 + _ [6]byte + Address uint64 + Access_id uint8 + _ [7]byte +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]int8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + _ [16]uint64 +} + +const RNDGETENTCNT = 0x80045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go new file mode 100644 index 0000000..8e7384b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -0,0 +1,690 @@ +// +build sparc64,linux +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go | go run mkpost.go + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Timex struct { + Modes uint32 + Pad_cgo_0 [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Pad_cgo_1 [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Pad_cgo_2 [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + Pad_cgo_3 [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + X__pad1 uint16 + Pad_cgo_0 [6]byte + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + X__pad2 uint16 + Pad_cgo_1 [6]byte + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + X__glibc_reserved4 uint64 + X__glibc_reserved5 uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + Pad_cgo_0 [5]byte +} + +type Fsid struct { + X__val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + X__glibc_reserved int16 + Pad_cgo_1 [2]byte +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrCAN struct { + Family uint16 + Pad_cgo_0 [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Pad_cgo_0 [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + X__ifi_pad uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Regs [16]uint64 + Tstate uint64 + Tpc uint64 + Tnpc uint64 + Y uint32 + Magic uint32 +} + +type ptracePsw struct { +} + +type ptraceFpregs struct { +} + +type ptracePer struct { +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Pad_cgo_0 [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + X_f [0]int8 + Pad_cgo_1 [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + Pad_cgo_0 [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + Pad_cgo_1 [4]byte +} + +type EpollEvent struct { + Events uint32 + X_padFd int32 + Fd int32 + Pad int32 +} + +const ( + AT_FDCWD = -0x64 + AT_REMOVEDIR = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x800 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + X__val [16]uint64 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go new file mode 100644 index 0000000..da70faa --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -0,0 +1,439 @@ +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,netbsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Mode uint32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 +} + +type Statfs_t [0]byte + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [512]int8 + Pad_cgo_0 [3]byte +} + +type Fsid struct { + X__fsid_val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter uint32 + Flags uint32 + Fflags uint32 + Data int64 + Udata int32 +} + +type FdSet struct { + Bits [8]uint32 +} + +const ( + SizeofIfMsghdr = 0x98 + SizeofIfData = 0x84 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x78 + SizeofRtMetrics = 0x50 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData + Pad_cgo_1 [4]byte +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Pad_cgo_0 [1]byte + Link_state int32 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Lastchange Timespec +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Metric int32 + Index uint16 + Pad_cgo_0 [6]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits int32 + Pad_cgo_1 [4]byte + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Expire int64 + Pksent int64 +} + +type Mclpool [0]byte + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec int32 + Usec int32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sysctlnode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + X__rsvd uint32 + Un [16]byte + X_sysctl_size [8]byte + X_sysctl_func [8]byte + X_sysctl_parent [8]byte + X_sysctl_desc [8]byte +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go new file mode 100644 index 0000000..0963ab8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -0,0 +1,446 @@ +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,netbsd + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Mode uint32 + Pad_cgo_0 [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Pad_cgo_1 [4]byte + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + Pad_cgo_2 [4]byte +} + +type Statfs_t [0]byte + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [512]int8 + Pad_cgo_0 [3]byte +} + +type Fsid struct { + X__fsid_val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter uint32 + Flags uint32 + Fflags uint32 + Pad_cgo_0 [4]byte + Data int64 + Udata int64 +} + +type FdSet struct { + Bits [8]uint32 +} + +const ( + SizeofIfMsghdr = 0x98 + SizeofIfData = 0x88 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x78 + SizeofRtMetrics = 0x50 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Pad_cgo_0 [1]byte + Link_state int32 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Lastchange Timespec +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Metric int32 + Index uint16 + Pad_cgo_0 [6]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits int32 + Pad_cgo_1 [4]byte + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Expire int64 + Pksent int64 +} + +type Mclpool [0]byte + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [6]byte +} + +type BpfTimeval struct { + Sec int64 + Usec int64 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sysctlnode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + X__rsvd uint32 + Un [16]byte + X_sysctl_size [8]byte + X_sysctl_func [8]byte + X_sysctl_parent [8]byte + X_sysctl_desc [8]byte +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go new file mode 100644 index 0000000..211f641 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -0,0 +1,444 @@ +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,netbsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 + Pad_cgo_0 [4]byte +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Mode uint32 + Pad_cgo_0 [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Pad_cgo_1 [4]byte + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + Pad_cgo_2 [4]byte +} + +type Statfs_t [0]byte + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [512]int8 + Pad_cgo_0 [3]byte +} + +type Fsid struct { + X__fsid_val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter uint32 + Flags uint32 + Fflags uint32 + Data int64 + Udata int32 + Pad_cgo_0 [4]byte +} + +type FdSet struct { + Bits [8]uint32 +} + +const ( + SizeofIfMsghdr = 0x98 + SizeofIfData = 0x88 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x78 + SizeofRtMetrics = 0x50 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Pad_cgo_0 [1]byte + Link_state int32 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Lastchange Timespec +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Metric int32 + Index uint16 + Pad_cgo_0 [6]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits int32 + Pad_cgo_1 [4]byte + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Expire int64 + Pksent int64 +} + +type Mclpool [0]byte + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec int32 + Usec int32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sysctlnode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + X__rsvd uint32 + Un [16]byte + X_sysctl_size [8]byte + X_sysctl_func [8]byte + X_sysctl_parent [8]byte + X_sysctl_desc [8]byte +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go new file mode 100644 index 0000000..d5a2d75 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -0,0 +1,484 @@ +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,openbsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + X__st_birthtim Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_0 [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + X__d_padding [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xec + SizeofIfData = 0xd4 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Pad uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval + Mclpool [7]Mclpool +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct { + Grown int32 + Alive uint16 + Hwm uint16 + Cwm uint16 + Lwm uint16 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x2 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go new file mode 100644 index 0000000..d531410 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -0,0 +1,491 @@ +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,openbsd + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Pad_cgo_0 [4]byte + X__st_birthtim Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + Pad_cgo_0 [4]byte + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_1 [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + X__d_padding [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xf8 + SizeofIfData = 0xe0 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Pad uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Capabilities uint32 + Pad_cgo_0 [4]byte + Lastchange Timeval + Mclpool [7]Mclpool + Pad_cgo_1 [4]byte +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct { + Grown int32 + Alive uint16 + Hwm uint16 + Cwm uint16 + Lwm uint16 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Pad_cgo_0 [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x2 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go new file mode 100644 index 0000000..e35b13b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -0,0 +1,477 @@ +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,openbsd + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + X__st_birthtim Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]uint8 + F_mntonname [90]uint8 + F_mntfromname [90]uint8 + F_mntfromspec [90]uint8 + Pad_cgo_0 [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + X__d_padding [4]uint8 + Name [256]uint8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0x98 + SizeofIfData = 0x80 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Pad uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]uint8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x2 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go new file mode 100644 index 0000000..2248598 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -0,0 +1,459 @@ +// cgo -godefs types_solaris.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,solaris + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x400 + MaxHostNameLen = 0x100 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int32 + _ [4]byte + Blocks int64 + Fstype [16]int8 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Sysid int32 + Pid int32 + Pad [4]int64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Name [1]int8 + _ [5]byte +} + +type _Fsblkcnt_t uint64 + +type Statvfs_t struct { + Bsize uint64 + Frsize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fsid uint64 + Basetype [16]int8 + Flag uint64 + Namemax uint64 + Fstr [32]int8 +} + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrDatalink struct { + Family uint16 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [244]int8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [236]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *int8 + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen int32 + _ [4]byte + Accrights *int8 + Accrightslen int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + X__icmp6_filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x20 + SizeofSockaddrAny = 0xfc + SizeofSockaddrUnix = 0x6e + SizeofSockaddrDatalink = 0xfc + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x24 + SizeofICMPv6Filter = 0x20 +) + +type FdSet struct { + Bits [1024]int64 +} + +type Utsname struct { + Sysname [257]byte + Nodename [257]byte + Release [257]byte + Version [257]byte + Machine [257]byte +} + +type Ustat_t struct { + Tfree int64 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +const ( + AT_FDCWD = 0xffd19553 + AT_SYMLINK_NOFOLLOW = 0x1000 + AT_SYMLINK_FOLLOW = 0x2000 + AT_REMOVEDIR = 0x1 + AT_EACCESS = 0x4 +) + +const ( + SizeofIfMsghdr = 0x54 + SizeofIfData = 0x44 + SizeofIfaMsghdr = 0x14 + SizeofRtMsghdr = 0x4c + SizeofRtMetrics = 0x28 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + _ [1]byte + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Lastchange Timeval32 +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Hopcount uint32 + Expire uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pksent uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x80 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint64 + Drop uint64 + Capt uint64 + Padding [13]uint64 +} + +type BpfProgram struct { + Len uint32 + _ [4]byte + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfTimeval struct { + Sec int32 + Usec int32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + _ [1]byte +} + +type Termio struct { + Iflag uint16 + Oflag uint16 + Cflag uint16 + Lflag uint16 + Line int8 + Cc [8]uint8 + _ [1]byte +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_386.s b/vendor/golang.org/x/sys/windows/asm_windows_386.s new file mode 100644 index 0000000..1c20dd2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_386.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for 386, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-8 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-4 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s new file mode 100644 index 0000000..4d025ab --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for amd64, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-32 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-8 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go new file mode 100644 index 0000000..e92c05b --- /dev/null +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -0,0 +1,378 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// DLLError describes reasons for DLL load failures. +type DLLError struct { + Err error + ObjName string + Msg string +} + +func (e *DLLError) Error() string { return e.Msg } + +// Implemented in runtime/syscall_windows.goc; we provide jumps to them in our assembly file. +func loadlibrary(filename *uint16) (handle uintptr, err syscall.Errno) +func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err syscall.Errno) + +// A DLL implements access to a single DLL. +type DLL struct { + Name string + Handle Handle +} + +// LoadDLL loads DLL file into memory. +// +// Warning: using LoadDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use LazyDLL +// with System set to true, or use LoadLibraryEx directly. +func LoadDLL(name string) (dll *DLL, err error) { + namep, err := UTF16PtrFromString(name) + if err != nil { + return nil, err + } + h, e := loadlibrary(namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to load " + name + ": " + e.Error(), + } + } + d := &DLL{ + Name: name, + Handle: Handle(h), + } + return d, nil +} + +// MustLoadDLL is like LoadDLL but panics if load operation failes. +func MustLoadDLL(name string) *DLL { + d, e := LoadDLL(name) + if e != nil { + panic(e) + } + return d +} + +// FindProc searches DLL d for procedure named name and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProc(name string) (proc *Proc, err error) { + namep, err := BytePtrFromString(name) + if err != nil { + return nil, err + } + a, e := getprocaddress(uintptr(d.Handle), namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProc is like FindProc but panics if search fails. +func (d *DLL) MustFindProc(name string) *Proc { + p, e := d.FindProc(name) + if e != nil { + panic(e) + } + return p +} + +// Release unloads DLL d from memory. +func (d *DLL) Release() (err error) { + return FreeLibrary(d.Handle) +} + +// A Proc implements access to a procedure inside a DLL. +type Proc struct { + Dll *DLL + Name string + addr uintptr +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +func (p *Proc) Addr() uintptr { + return p.addr +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + switch len(a) { + case 0: + return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) + case 1: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) + case 2: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) + case 3: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) + case 4: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) + case 5: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) + case 6: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) + case 7: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) + case 8: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) + case 9: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) + case 10: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) + case 11: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) + case 12: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) + case 13: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) + case 14: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) + case 15: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) + default: + panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") + } +} + +// A LazyDLL implements access to a single DLL. +// It will delay the load of the DLL until the first +// call to its Handle method or to one of its +// LazyProc's Addr method. +type LazyDLL struct { + Name string + + // System determines whether the DLL must be loaded from the + // Windows System directory, bypassing the normal DLL search + // path. + System bool + + mu sync.Mutex + dll *DLL // non nil once DLL is loaded +} + +// Load loads DLL file d.Name into memory. It returns an error if fails. +// Load will not try to load DLL, if it is already loaded into memory. +func (d *LazyDLL) Load() error { + // Non-racy version of: + // if d.dll != nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { + return nil + } + d.mu.Lock() + defer d.mu.Unlock() + if d.dll != nil { + return nil + } + + // kernel32.dll is special, since it's where LoadLibraryEx comes from. + // The kernel already special-cases its name, so it's always + // loaded from system32. + var dll *DLL + var err error + if d.Name == "kernel32.dll" { + dll, err = LoadDLL(d.Name) + } else { + dll, err = loadLibraryEx(d.Name, d.System) + } + if err != nil { + return err + } + + // Non-racy version of: + // d.dll = dll + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) + return nil +} + +// mustLoad is like Load but panics if search fails. +func (d *LazyDLL) mustLoad() { + e := d.Load() + if e != nil { + panic(e) + } +} + +// Handle returns d's module handle. +func (d *LazyDLL) Handle() uintptr { + d.mustLoad() + return uintptr(d.dll.Handle) +} + +// NewProc returns a LazyProc for accessing the named procedure in the DLL d. +func (d *LazyDLL) NewProc(name string) *LazyProc { + return &LazyProc{l: d, Name: name} +} + +// NewLazyDLL creates new LazyDLL associated with DLL file. +func NewLazyDLL(name string) *LazyDLL { + return &LazyDLL{Name: name} +} + +// NewLazySystemDLL is like NewLazyDLL, but will only +// search Windows System directory for the DLL if name is +// a base name (like "advapi32.dll"). +func NewLazySystemDLL(name string) *LazyDLL { + return &LazyDLL{Name: name, System: true} +} + +// A LazyProc implements access to a procedure inside a LazyDLL. +// It delays the lookup until the Addr method is called. +type LazyProc struct { + Name string + + mu sync.Mutex + l *LazyDLL + proc *Proc +} + +// Find searches DLL for procedure named p.Name. It returns +// an error if search fails. Find will not search procedure, +// if it is already found and loaded into memory. +func (p *LazyProc) Find() error { + // Non-racy version of: + // if p.proc == nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { + p.mu.Lock() + defer p.mu.Unlock() + if p.proc == nil { + e := p.l.Load() + if e != nil { + return e + } + proc, e := p.l.dll.FindProc(p.Name) + if e != nil { + return e + } + // Non-racy version of: + // p.proc = proc + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) + } + } + return nil +} + +// mustFind is like Find but panics if search fails. +func (p *LazyProc) mustFind() { + e := p.Find() + if e != nil { + panic(e) + } +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +// It will panic if the procedure cannot be found. +func (p *LazyProc) Addr() uintptr { + p.mustFind() + return p.proc.Addr() +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. It will also panic if the procedure cannot be found. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + p.mustFind() + return p.proc.Call(a...) +} + +var canDoSearchSystem32Once struct { + sync.Once + v bool +} + +func initCanDoSearchSystem32() { + // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: + // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows + // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on + // systems that have KB2533623 installed. To determine whether the + // flags are available, use GetProcAddress to get the address of the + // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories + // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* + // flags can be used with LoadLibraryEx." + canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) +} + +func canDoSearchSystem32() bool { + canDoSearchSystem32Once.Do(initCanDoSearchSystem32) + return canDoSearchSystem32Once.v +} + +func isBaseName(name string) bool { + for _, c := range name { + if c == ':' || c == '/' || c == '\\' { + return false + } + } + return true +} + +// loadLibraryEx wraps the Windows LoadLibraryEx function. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx +// +// If name is not an absolute path, LoadLibraryEx searches for the DLL +// in a variety of automatic locations unless constrained by flags. +// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx +func loadLibraryEx(name string, system bool) (*DLL, error) { + loadDLL := name + var flags uintptr + if system { + if canDoSearchSystem32() { + const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 + flags = LOAD_LIBRARY_SEARCH_SYSTEM32 + } else if isBaseName(name) { + // WindowsXP or unpatched Windows machine + // trying to load "foo.dll" out of the system + // folder, but LoadLibraryEx doesn't support + // that yet on their system, so emulate it. + windir, _ := Getenv("WINDIR") // old var; apparently works on XP + if windir == "" { + return nil, errString("%WINDIR% not defined") + } + loadDLL = windir + "\\System32\\" + name + } + } + h, err := LoadLibraryEx(loadDLL, 0, flags) + if err != nil { + return nil, err + } + return &DLL{Name: name, Handle: h}, nil +} + +type errString string + +func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go new file mode 100644 index 0000000..bdc71e2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows environment variables. + +package windows + +import "syscall" + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go new file mode 100644 index 0000000..40af946 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + EVENTLOG_SUCCESS = 0 + EVENTLOG_ERROR_TYPE = 1 + EVENTLOG_WARNING_TYPE = 2 + EVENTLOG_INFORMATION_TYPE = 4 + EVENTLOG_AUDIT_SUCCESS = 8 + EVENTLOG_AUDIT_FAILURE = 16 +) + +//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW +//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource +//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go new file mode 100644 index 0000000..3606c3a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -0,0 +1,97 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fork, exec, wait, etc. + +package windows + +// EscapeArg rewrites command line argument s as prescribed +// in http://msdn.microsoft.com/en-us/library/ms880421. +// This function returns "" (2 double quotes) if s is empty. +// Alternatively, these transformations are done: +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. +func EscapeArg(s string) string { + if len(s) == 0 { + return "\"\"" + } + n := len(s) + hasSpace := false + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '\\': + n++ + case ' ', '\t': + hasSpace = true + } + } + if hasSpace { + n += 2 + } + if n == len(s) { + return s + } + + qs := make([]byte, n) + j := 0 + if hasSpace { + qs[j] = '"' + j++ + } + slashes := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + default: + slashes = 0 + qs[j] = s[i] + case '\\': + slashes++ + qs[j] = s[i] + case '"': + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '\\' + j++ + qs[j] = s[i] + } + j++ + } + if hasSpace { + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '"' + j++ + } + return string(qs[:j]) +} + +func CloseOnExec(fd Handle) { + SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) +} + +// FullPath retrieves the full path of the specified file. +func FullPath(name string) (path string, err error) { + p, err := UTF16PtrFromString(name) + if err != nil { + return "", err + } + n := uint32(100) + for { + buf := make([]uint16, n) + n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) + if err != nil { + return "", err + } + if n <= uint32(len(buf)) { + return UTF16ToString(buf[:n]), nil + } + } +} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go new file mode 100644 index 0000000..f80a420 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 + MEM_DECOMMIT = 0x00004000 + MEM_RELEASE = 0x00008000 + MEM_RESET = 0x00080000 + MEM_TOP_DOWN = 0x00100000 + MEM_WRITE_WATCH = 0x00200000 + MEM_PHYSICAL = 0x00400000 + MEM_RESET_UNDO = 0x01000000 + MEM_LARGE_PAGES = 0x20000000 + + PAGE_NOACCESS = 0x01 + PAGE_READONLY = 0x02 + PAGE_READWRITE = 0x04 + PAGE_WRITECOPY = 0x08 + PAGE_EXECUTE_READ = 0x20 + PAGE_EXECUTE_READWRITE = 0x40 + PAGE_EXECUTE_WRITECOPY = 0x80 +) diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go new file mode 100644 index 0000000..fb7db0e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go new file mode 100644 index 0000000..a74e3e2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race.go @@ -0,0 +1,30 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,race + +package windows + +import ( + "runtime" + "unsafe" +) + +const raceenabled = true + +func raceAcquire(addr unsafe.Pointer) { + runtime.RaceAcquire(addr) +} + +func raceReleaseMerge(addr unsafe.Pointer) { + runtime.RaceReleaseMerge(addr) +} + +func raceReadRange(addr unsafe.Pointer, len int) { + runtime.RaceReadRange(addr, len) +} + +func raceWriteRange(addr unsafe.Pointer, len int) { + runtime.RaceWriteRange(addr, len) +} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go new file mode 100644 index 0000000..e44a3cb --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!race + +package windows + +import ( + "unsafe" +) + +const raceenabled = false + +func raceAcquire(addr unsafe.Pointer) { +} + +func raceReleaseMerge(addr unsafe.Pointer) { +} + +func raceReadRange(addr unsafe.Pointer, len int) { +} + +func raceWriteRange(addr unsafe.Pointer, len int) { +} diff --git a/vendor/golang.org/x/sys/windows/registry/export_test.go b/vendor/golang.org/x/sys/windows/registry/export_test.go new file mode 100644 index 0000000..8badf6f --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/export_test.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry + +func (k Key) SetValue(name string, valtype uint32, data []byte) error { + return k.setValue(name, valtype, data) +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 0000000..c256483 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,198 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +// +package registry + +import ( + "io" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 0000000..0ac95ff --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package registry + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/registry_test.go b/vendor/golang.org/x/sys/windows/registry/registry_test.go new file mode 100644 index 0000000..2f4dd69 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/registry_test.go @@ -0,0 +1,756 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry_test + +import ( + "bytes" + "crypto/rand" + "os" + "syscall" + "testing" + "time" + "unsafe" + + "golang.org/x/sys/windows/registry" +) + +func randKeyName(prefix string) string { + const numbers = "0123456789" + buf := make([]byte, 10) + rand.Read(buf) + for i, b := range buf { + buf[i] = numbers[b%byte(len(numbers))] + } + return prefix + string(buf) +} + +func TestReadSubKeyNames(t *testing.T) { + k, err := registry.OpenKey(registry.CLASSES_ROOT, "TypeLib", registry.ENUMERATE_SUB_KEYS) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + names, err := k.ReadSubKeyNames(-1) + if err != nil { + t.Fatal(err) + } + var foundStdOle bool + for _, name := range names { + // Every PC has "stdole 2.0 OLE Automation" library installed. + if name == "{00020430-0000-0000-C000-000000000046}" { + foundStdOle = true + } + } + if !foundStdOle { + t.Fatal("could not find stdole 2.0 OLE Automation") + } +} + +func TestCreateOpenDeleteKey(t *testing.T) { + k, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + testKName := randKeyName("TestCreateOpenDeleteKey_") + + testK, exist, err := registry.CreateKey(k, testKName, registry.CREATE_SUB_KEY) + if err != nil { + t.Fatal(err) + } + defer testK.Close() + + if exist { + t.Fatalf("key %q already exists", testKName) + } + + testKAgain, exist, err := registry.CreateKey(k, testKName, registry.CREATE_SUB_KEY) + if err != nil { + t.Fatal(err) + } + defer testKAgain.Close() + + if !exist { + t.Fatalf("key %q should already exist", testKName) + } + + testKOpened, err := registry.OpenKey(k, testKName, registry.ENUMERATE_SUB_KEYS) + if err != nil { + t.Fatal(err) + } + defer testKOpened.Close() + + err = registry.DeleteKey(k, testKName) + if err != nil { + t.Fatal(err) + } + + testKOpenedAgain, err := registry.OpenKey(k, testKName, registry.ENUMERATE_SUB_KEYS) + if err == nil { + defer testKOpenedAgain.Close() + t.Fatalf("key %q should already been deleted", testKName) + } + if err != registry.ErrNotExist { + t.Fatalf(`unexpected error ("not exist" expected): %v`, err) + } +} + +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + if a == nil { + return true + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +type ValueTest struct { + Type uint32 + Name string + Value interface{} + WillFail bool +} + +var ValueTests = []ValueTest{ + {Type: registry.SZ, Name: "String1", Value: ""}, + {Type: registry.SZ, Name: "String2", Value: "\000", WillFail: true}, + {Type: registry.SZ, Name: "String3", Value: "Hello World"}, + {Type: registry.SZ, Name: "String4", Value: "Hello World\000", WillFail: true}, + {Type: registry.EXPAND_SZ, Name: "ExpString1", Value: ""}, + {Type: registry.EXPAND_SZ, Name: "ExpString2", Value: "\000", WillFail: true}, + {Type: registry.EXPAND_SZ, Name: "ExpString3", Value: "Hello World"}, + {Type: registry.EXPAND_SZ, Name: "ExpString4", Value: "Hello\000World", WillFail: true}, + {Type: registry.EXPAND_SZ, Name: "ExpString5", Value: "%PATH%"}, + {Type: registry.EXPAND_SZ, Name: "ExpString6", Value: "%NO_SUCH_VARIABLE%"}, + {Type: registry.EXPAND_SZ, Name: "ExpString7", Value: "%PATH%;."}, + {Type: registry.BINARY, Name: "Binary1", Value: []byte{}}, + {Type: registry.BINARY, Name: "Binary2", Value: []byte{1, 2, 3}}, + {Type: registry.BINARY, Name: "Binary3", Value: []byte{3, 2, 1, 0, 1, 2, 3}}, + {Type: registry.DWORD, Name: "Dword1", Value: uint64(0)}, + {Type: registry.DWORD, Name: "Dword2", Value: uint64(1)}, + {Type: registry.DWORD, Name: "Dword3", Value: uint64(0xff)}, + {Type: registry.DWORD, Name: "Dword4", Value: uint64(0xffff)}, + {Type: registry.QWORD, Name: "Qword1", Value: uint64(0)}, + {Type: registry.QWORD, Name: "Qword2", Value: uint64(1)}, + {Type: registry.QWORD, Name: "Qword3", Value: uint64(0xff)}, + {Type: registry.QWORD, Name: "Qword4", Value: uint64(0xffff)}, + {Type: registry.QWORD, Name: "Qword5", Value: uint64(0xffffff)}, + {Type: registry.QWORD, Name: "Qword6", Value: uint64(0xffffffff)}, + {Type: registry.MULTI_SZ, Name: "MultiString1", Value: []string{"a", "b", "c"}}, + {Type: registry.MULTI_SZ, Name: "MultiString2", Value: []string{"abc", "", "cba"}}, + {Type: registry.MULTI_SZ, Name: "MultiString3", Value: []string{""}}, + {Type: registry.MULTI_SZ, Name: "MultiString4", Value: []string{"abcdef"}}, + {Type: registry.MULTI_SZ, Name: "MultiString5", Value: []string{"\000"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString6", Value: []string{"a\000b"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString7", Value: []string{"ab", "\000", "cd"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString8", Value: []string{"\000", "cd"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString9", Value: []string{"ab", "\000"}, WillFail: true}, +} + +func setValues(t *testing.T, k registry.Key) { + for _, test := range ValueTests { + var err error + switch test.Type { + case registry.SZ: + err = k.SetStringValue(test.Name, test.Value.(string)) + case registry.EXPAND_SZ: + err = k.SetExpandStringValue(test.Name, test.Value.(string)) + case registry.MULTI_SZ: + err = k.SetStringsValue(test.Name, test.Value.([]string)) + case registry.BINARY: + err = k.SetBinaryValue(test.Name, test.Value.([]byte)) + case registry.DWORD: + err = k.SetDWordValue(test.Name, uint32(test.Value.(uint64))) + case registry.QWORD: + err = k.SetQWordValue(test.Name, test.Value.(uint64)) + default: + t.Fatalf("unsupported type %d for %s value", test.Type, test.Name) + } + if test.WillFail { + if err == nil { + t.Fatalf("setting %s value %q should fail, but succeeded", test.Name, test.Value) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} + +func enumerateValues(t *testing.T, k registry.Key) { + names, err := k.ReadValueNames(-1) + if err != nil { + t.Error(err) + return + } + haveNames := make(map[string]bool) + for _, n := range names { + haveNames[n] = false + } + for _, test := range ValueTests { + wantFound := !test.WillFail + _, haveFound := haveNames[test.Name] + if wantFound && !haveFound { + t.Errorf("value %s is not found while enumerating", test.Name) + } + if haveFound && !wantFound { + t.Errorf("value %s is found while enumerating, but expected to fail", test.Name) + } + if haveFound { + delete(haveNames, test.Name) + } + } + for n, v := range haveNames { + t.Errorf("value %s (%v) is found while enumerating, but has not been cretaed", n, v) + } +} + +func testErrNotExist(t *testing.T, name string, err error) { + if err == nil { + t.Errorf("%s value should not exist", name) + return + } + if err != registry.ErrNotExist { + t.Errorf("reading %s value should return 'not exist' error, but got: %s", name, err) + return + } +} + +func testErrUnexpectedType(t *testing.T, test ValueTest, gottype uint32, err error) { + if err == nil { + t.Errorf("GetXValue(%q) should not succeed", test.Name) + return + } + if err != registry.ErrUnexpectedType { + t.Errorf("reading %s value should return 'unexpected key value type' error, but got: %s", test.Name, err) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetStringValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetStringValue(test.Name) + if err != nil { + t.Errorf("GetStringValue(%s) failed: %v", test.Name, err) + return + } + if got != test.Value { + t.Errorf("want %s value %q, got %q", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + if gottype == registry.EXPAND_SZ { + _, err = registry.ExpandString(got) + if err != nil { + t.Errorf("ExpandString(%s) failed: %v", got, err) + return + } + } +} + +func testGetIntegerValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetIntegerValue(test.Name) + if err != nil { + t.Errorf("GetIntegerValue(%s) failed: %v", test.Name, err) + return + } + if got != test.Value.(uint64) { + t.Errorf("want %s value %v, got %v", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetBinaryValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetBinaryValue(test.Name) + if err != nil { + t.Errorf("GetBinaryValue(%s) failed: %v", test.Name, err) + return + } + if !bytes.Equal(got, test.Value.([]byte)) { + t.Errorf("want %s value %v, got %v", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetStringsValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetStringsValue(test.Name) + if err != nil { + t.Errorf("GetStringsValue(%s) failed: %v", test.Name, err) + return + } + if !equalStringSlice(got, test.Value.([]string)) { + t.Errorf("want %s value %#v, got %#v", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetValue(t *testing.T, k registry.Key, test ValueTest, size int) { + if size <= 0 { + return + } + // read data with no buffer + gotsize, gottype, err := k.GetValue(test.Name, nil) + if err != nil { + t.Errorf("GetValue(%s, [%d]byte) failed: %v", test.Name, size, err) + return + } + if gotsize != size { + t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + // read data with short buffer + gotsize, gottype, err = k.GetValue(test.Name, make([]byte, size-1)) + if err == nil { + t.Errorf("GetValue(%s, [%d]byte) should fail, but succeeded", test.Name, size-1) + return + } + if err != registry.ErrShortBuffer { + t.Errorf("reading %s value should return 'short buffer' error, but got: %s", test.Name, err) + return + } + if gotsize != size { + t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + // read full data + gotsize, gottype, err = k.GetValue(test.Name, make([]byte, size)) + if err != nil { + t.Errorf("GetValue(%s, [%d]byte) failed: %v", test.Name, size, err) + return + } + if gotsize != size { + t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + // check GetValue returns ErrNotExist as required + _, _, err = k.GetValue(test.Name+"_not_there", make([]byte, size)) + if err == nil { + t.Errorf("GetValue(%q) should not succeed", test.Name) + return + } + if err != registry.ErrNotExist { + t.Errorf("GetValue(%q) should return 'not exist' error, but got: %s", test.Name, err) + return + } +} + +func testValues(t *testing.T, k registry.Key) { + for _, test := range ValueTests { + switch test.Type { + case registry.SZ, registry.EXPAND_SZ: + if test.WillFail { + _, _, err := k.GetStringValue(test.Name) + testErrNotExist(t, test.Name, err) + } else { + testGetStringValue(t, k, test) + _, gottype, err := k.GetIntegerValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + // Size of utf16 string in bytes is not perfect, + // but correct for current test values. + // Size also includes terminating 0. + testGetValue(t, k, test, (len(test.Value.(string))+1)*2) + } + _, _, err := k.GetStringValue(test.Name + "_string_not_created") + testErrNotExist(t, test.Name+"_string_not_created", err) + case registry.DWORD, registry.QWORD: + testGetIntegerValue(t, k, test) + _, gottype, err := k.GetBinaryValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + _, _, err = k.GetIntegerValue(test.Name + "_int_not_created") + testErrNotExist(t, test.Name+"_int_not_created", err) + size := 8 + if test.Type == registry.DWORD { + size = 4 + } + testGetValue(t, k, test, size) + case registry.BINARY: + testGetBinaryValue(t, k, test) + _, gottype, err := k.GetStringsValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + _, _, err = k.GetBinaryValue(test.Name + "_byte_not_created") + testErrNotExist(t, test.Name+"_byte_not_created", err) + testGetValue(t, k, test, len(test.Value.([]byte))) + case registry.MULTI_SZ: + if test.WillFail { + _, _, err := k.GetStringsValue(test.Name) + testErrNotExist(t, test.Name, err) + } else { + testGetStringsValue(t, k, test) + _, gottype, err := k.GetStringValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + size := 0 + for _, s := range test.Value.([]string) { + size += len(s) + 1 // nil terminated + } + size += 1 // extra nil at the end + size *= 2 // count bytes, not uint16 + testGetValue(t, k, test, size) + } + _, _, err := k.GetStringsValue(test.Name + "_strings_not_created") + testErrNotExist(t, test.Name+"_strings_not_created", err) + default: + t.Errorf("unsupported type %d for %s value", test.Type, test.Name) + continue + } + } +} + +func testStat(t *testing.T, k registry.Key) { + subk, _, err := registry.CreateKey(k, "subkey", registry.CREATE_SUB_KEY) + if err != nil { + t.Error(err) + return + } + defer subk.Close() + + defer registry.DeleteKey(k, "subkey") + + ki, err := k.Stat() + if err != nil { + t.Error(err) + return + } + if ki.SubKeyCount != 1 { + t.Error("key must have 1 subkey") + } + if ki.MaxSubKeyLen != 6 { + t.Error("key max subkey name length must be 6") + } + if ki.ValueCount != 24 { + t.Errorf("key must have 24 values, but is %d", ki.ValueCount) + } + if ki.MaxValueNameLen != 12 { + t.Errorf("key max value name length must be 10, but is %d", ki.MaxValueNameLen) + } + if ki.MaxValueLen != 38 { + t.Errorf("key max value length must be 38, but is %d", ki.MaxValueLen) + } + if mt, ct := ki.ModTime(), time.Now(); ct.Sub(mt) > 100*time.Millisecond { + t.Errorf("key mod time is not close to current time: mtime=%v current=%v delta=%v", mt, ct, ct.Sub(mt)) + } +} + +func deleteValues(t *testing.T, k registry.Key) { + for _, test := range ValueTests { + if test.WillFail { + continue + } + err := k.DeleteValue(test.Name) + if err != nil { + t.Error(err) + continue + } + } + names, err := k.ReadValueNames(-1) + if err != nil { + t.Error(err) + return + } + if len(names) != 0 { + t.Errorf("some values remain after deletion: %v", names) + } +} + +func TestValues(t *testing.T) { + softwareK, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE) + if err != nil { + t.Fatal(err) + } + defer softwareK.Close() + + testKName := randKeyName("TestValues_") + + k, exist, err := registry.CreateKey(softwareK, testKName, registry.CREATE_SUB_KEY|registry.QUERY_VALUE|registry.SET_VALUE) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + if exist { + t.Fatalf("key %q already exists", testKName) + } + + defer registry.DeleteKey(softwareK, testKName) + + setValues(t, k) + + enumerateValues(t, k) + + testValues(t, k) + + testStat(t, k) + + deleteValues(t, k) +} + +func walkKey(t *testing.T, k registry.Key, kname string) { + names, err := k.ReadValueNames(-1) + if err != nil { + t.Fatalf("reading value names of %s failed: %v", kname, err) + } + for _, name := range names { + _, valtype, err := k.GetValue(name, nil) + if err != nil { + t.Fatalf("reading value type of %s of %s failed: %v", name, kname, err) + } + switch valtype { + case registry.NONE: + case registry.SZ: + _, _, err := k.GetStringValue(name) + if err != nil { + t.Error(err) + } + case registry.EXPAND_SZ: + s, _, err := k.GetStringValue(name) + if err != nil { + t.Error(err) + } + _, err = registry.ExpandString(s) + if err != nil { + t.Error(err) + } + case registry.DWORD, registry.QWORD: + _, _, err := k.GetIntegerValue(name) + if err != nil { + t.Error(err) + } + case registry.BINARY: + _, _, err := k.GetBinaryValue(name) + if err != nil { + t.Error(err) + } + case registry.MULTI_SZ: + _, _, err := k.GetStringsValue(name) + if err != nil { + t.Error(err) + } + case registry.FULL_RESOURCE_DESCRIPTOR, registry.RESOURCE_LIST, registry.RESOURCE_REQUIREMENTS_LIST: + // TODO: not implemented + default: + t.Fatalf("value type %d of %s of %s failed: %v", valtype, name, kname, err) + } + } + + names, err = k.ReadSubKeyNames(-1) + if err != nil { + t.Fatalf("reading sub-keys of %s failed: %v", kname, err) + } + for _, name := range names { + func() { + subk, err := registry.OpenKey(k, name, registry.ENUMERATE_SUB_KEYS|registry.QUERY_VALUE) + if err != nil { + if err == syscall.ERROR_ACCESS_DENIED { + // ignore error, if we are not allowed to access this key + return + } + t.Fatalf("opening sub-keys %s of %s failed: %v", name, kname, err) + } + defer subk.Close() + + walkKey(t, subk, kname+`\`+name) + }() + } +} + +func TestWalkFullRegistry(t *testing.T) { + if testing.Short() { + t.Skip("skipping long running test in short mode") + } + walkKey(t, registry.CLASSES_ROOT, "CLASSES_ROOT") + walkKey(t, registry.CURRENT_USER, "CURRENT_USER") + walkKey(t, registry.LOCAL_MACHINE, "LOCAL_MACHINE") + walkKey(t, registry.USERS, "USERS") + walkKey(t, registry.CURRENT_CONFIG, "CURRENT_CONFIG") +} + +func TestExpandString(t *testing.T) { + got, err := registry.ExpandString("%PATH%") + if err != nil { + t.Fatal(err) + } + want := os.Getenv("PATH") + if got != want { + t.Errorf("want %q string expanded, got %q", want, got) + } +} + +func TestInvalidValues(t *testing.T) { + softwareK, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE) + if err != nil { + t.Fatal(err) + } + defer softwareK.Close() + + testKName := randKeyName("TestInvalidValues_") + + k, exist, err := registry.CreateKey(softwareK, testKName, registry.CREATE_SUB_KEY|registry.QUERY_VALUE|registry.SET_VALUE) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + if exist { + t.Fatalf("key %q already exists", testKName) + } + + defer registry.DeleteKey(softwareK, testKName) + + var tests = []struct { + Type uint32 + Name string + Data []byte + }{ + {registry.DWORD, "Dword1", nil}, + {registry.DWORD, "Dword2", []byte{1, 2, 3}}, + {registry.QWORD, "Qword1", nil}, + {registry.QWORD, "Qword2", []byte{1, 2, 3}}, + {registry.QWORD, "Qword3", []byte{1, 2, 3, 4, 5, 6, 7}}, + {registry.MULTI_SZ, "MultiString1", nil}, + {registry.MULTI_SZ, "MultiString2", []byte{0}}, + {registry.MULTI_SZ, "MultiString3", []byte{'a', 'b', 0}}, + {registry.MULTI_SZ, "MultiString4", []byte{'a', 0, 0, 'b', 0}}, + {registry.MULTI_SZ, "MultiString5", []byte{'a', 0, 0}}, + } + + for _, test := range tests { + err := k.SetValue(test.Name, test.Type, test.Data) + if err != nil { + t.Fatalf("SetValue for %q failed: %v", test.Name, err) + } + } + + for _, test := range tests { + switch test.Type { + case registry.DWORD, registry.QWORD: + value, valType, err := k.GetIntegerValue(test.Name) + if err == nil { + t.Errorf("GetIntegerValue(%q) succeeded. Returns type=%d value=%v", test.Name, valType, value) + } + case registry.MULTI_SZ: + value, valType, err := k.GetStringsValue(test.Name) + if err == nil { + if len(value) != 0 { + t.Errorf("GetStringsValue(%q) succeeded. Returns type=%d value=%v", test.Name, valType, value) + } + } + default: + t.Errorf("unsupported type %d for %s value", test.Type, test.Name) + } + } +} + +func TestGetMUIStringValue(t *testing.T) { + if err := registry.LoadRegLoadMUIString(); err != nil { + t.Skip("regLoadMUIString not supported; skipping") + } + if err := procGetDynamicTimeZoneInformation.Find(); err != nil { + t.Skipf("%s not supported; skipping", procGetDynamicTimeZoneInformation.Name) + } + var dtzi DynamicTimezoneinformation + if _, err := GetDynamicTimeZoneInformation(&dtzi); err != nil { + t.Fatal(err) + } + tzKeyName := syscall.UTF16ToString(dtzi.TimeZoneKeyName[:]) + timezoneK, err := registry.OpenKey(registry.LOCAL_MACHINE, + `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones\`+tzKeyName, registry.READ) + if err != nil { + t.Fatal(err) + } + defer timezoneK.Close() + + type testType struct { + name string + want string + } + var tests = []testType{ + {"MUI_Std", syscall.UTF16ToString(dtzi.StandardName[:])}, + } + if dtzi.DynamicDaylightTimeDisabled == 0 { + tests = append(tests, testType{"MUI_Dlt", syscall.UTF16ToString(dtzi.DaylightName[:])}) + } + + for _, test := range tests { + got, err := timezoneK.GetMUIStringValue(test.name) + if err != nil { + t.Error("GetMUIStringValue:", err) + } + + if got != test.want { + t.Errorf("GetMUIStringValue: %s: Got %q, want %q", test.name, got, test.want) + } + } +} + +type DynamicTimezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate syscall.Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate syscall.Systemtime + DaylightBias int32 + TimeZoneKeyName [128]uint16 + DynamicDaylightTimeDisabled uint8 +} + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32") + + procGetDynamicTimeZoneInformation = kernel32DLL.NewProc("GetDynamicTimeZoneInformation") +) + +func GetDynamicTimeZoneInformation(dtzi *DynamicTimezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetDynamicTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(dtzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 0000000..e66643c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 0000000..71d4e15 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,384 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (date []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + u := (*[1 << 29]uint16)(unsafe.Pointer(&r[0]))[:] + return syscall.UTF16ToString(u), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + return uint64(*(*uint32)(unsafe.Pointer(&data[0]))), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + return uint64(*(*uint64)(unsafe.Pointer(&data[0]))), QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 0000000..ceebdd7 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,120 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go new file mode 100644 index 0000000..f1ec5dc --- /dev/null +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -0,0 +1,476 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +const ( + STANDARD_RIGHTS_REQUIRED = 0xf0000 + STANDARD_RIGHTS_READ = 0x20000 + STANDARD_RIGHTS_WRITE = 0x20000 + STANDARD_RIGHTS_EXECUTE = 0x20000 + STANDARD_RIGHTS_ALL = 0x1F0000 +) + +const ( + NameUnknown = 0 + NameFullyQualifiedDN = 1 + NameSamCompatible = 2 + NameDisplay = 3 + NameUniqueId = 6 + NameCanonical = 7 + NameUserPrincipal = 8 + NameCanonicalEx = 9 + NameServicePrincipal = 10 + NameDnsDomain = 12 +) + +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx +//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW +//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW + +// TranslateAccountName converts a directory service +// object name from one format to another. +func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { + u, e := UTF16PtrFromString(username) + if e != nil { + return "", e + } + n := uint32(50) + for { + b := make([]uint16, n) + e = TranslateName(u, from, to, &b[0], &n) + if e == nil { + return UTF16ToString(b[:n]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +const ( + // do not reorder + NetSetupUnknownStatus = iota + NetSetupUnjoined + NetSetupWorkgroupName + NetSetupDomainName +) + +type UserInfo10 struct { + Name *uint16 + Comment *uint16 + UsrComment *uint16 + FullName *uint16 +} + +//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo +//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation +//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree + +const ( + // do not reorder + SidTypeUser = 1 + iota + SidTypeGroup + SidTypeDomain + SidTypeAlias + SidTypeWellKnownGroup + SidTypeDeletedAccount + SidTypeInvalid + SidTypeUnknown + SidTypeComputer + SidTypeLabel +) + +type SidIdentifierAuthority struct { + Value [6]byte +} + +var ( + SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} + SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} + SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} + SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} + SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} + SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} + SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} +) + +const ( + SECURITY_NULL_RID = 0 + SECURITY_WORLD_RID = 0 + SECURITY_LOCAL_RID = 0 + SECURITY_CREATOR_OWNER_RID = 0 + SECURITY_CREATOR_GROUP_RID = 1 + SECURITY_DIALUP_RID = 1 + SECURITY_NETWORK_RID = 2 + SECURITY_BATCH_RID = 3 + SECURITY_INTERACTIVE_RID = 4 + SECURITY_LOGON_IDS_RID = 5 + SECURITY_SERVICE_RID = 6 + SECURITY_LOCAL_SYSTEM_RID = 18 + SECURITY_BUILTIN_DOMAIN_RID = 32 + SECURITY_PRINCIPAL_SELF_RID = 10 + SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 + SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 + SECURITY_LOGON_IDS_RID_COUNT = 0x3 + SECURITY_ANONYMOUS_LOGON_RID = 0x7 + SECURITY_PROXY_RID = 0x8 + SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 + SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID + SECURITY_AUTHENTICATED_USER_RID = 0xb + SECURITY_RESTRICTED_CODE_RID = 0xc + SECURITY_NT_NON_UNIQUE_RID = 0x15 +) + +// Predefined domain-relative RIDs for local groups. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx +const ( + DOMAIN_ALIAS_RID_ADMINS = 0x220 + DOMAIN_ALIAS_RID_USERS = 0x221 + DOMAIN_ALIAS_RID_GUESTS = 0x222 + DOMAIN_ALIAS_RID_POWER_USERS = 0x223 + DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 + DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 + DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 + DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 + DOMAIN_ALIAS_RID_REPLICATOR = 0x228 + DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 + DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a + DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b + DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c + DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d + DOMAIN_ALIAS_RID_MONITORING_USERS = 0X22e + DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f + DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 + DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 + DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 + DOMAIN_ALIAS_RID_IUSERS = 0x238 + DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 + DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b + DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c + DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d + DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e +) + +//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW +//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW +//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW +//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid +//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid +//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid +//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid +//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid + +// The security identifier (SID) structure is a variable-length +// structure used to uniquely identify users or groups. +type SID struct{} + +// StringToSid converts a string-format security identifier +// sid into a valid, functional sid. +func StringToSid(s string) (*SID, error) { + var sid *SID + p, e := UTF16PtrFromString(s) + if e != nil { + return nil, e + } + e = ConvertStringSidToSid(p, &sid) + if e != nil { + return nil, e + } + defer LocalFree((Handle)(unsafe.Pointer(sid))) + return sid.Copy() +} + +// LookupSID retrieves a security identifier sid for the account +// and the name of the domain on which the account was found. +// System specify target computer to search. +func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { + if len(account) == 0 { + return nil, "", 0, syscall.EINVAL + } + acc, e := UTF16PtrFromString(account) + if e != nil { + return nil, "", 0, e + } + var sys *uint16 + if len(system) > 0 { + sys, e = UTF16PtrFromString(system) + if e != nil { + return nil, "", 0, e + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]byte, n) + db := make([]uint16, dn) + sid = (*SID)(unsafe.Pointer(&b[0])) + e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) + if e == nil { + return sid, UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, "", 0, e + } + if n <= uint32(len(b)) { + return nil, "", 0, e + } + } +} + +// String converts sid to a string format +// suitable for display, storage, or transmission. +func (sid *SID) String() (string, error) { + var s *uint16 + e := ConvertSidToStringSid(sid, &s) + if e != nil { + return "", e + } + defer LocalFree((Handle)(unsafe.Pointer(s))) + return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil +} + +// Len returns the length, in bytes, of a valid security identifier sid. +func (sid *SID) Len() int { + return int(GetLengthSid(sid)) +} + +// Copy creates a duplicate of security identifier sid. +func (sid *SID) Copy() (*SID, error) { + b := make([]byte, sid.Len()) + sid2 := (*SID)(unsafe.Pointer(&b[0])) + e := CopySid(uint32(len(b)), sid2, sid) + if e != nil { + return nil, e + } + return sid2, nil +} + +// LookupAccount retrieves the name of the account for this sid +// and the name of the first domain on which this sid is found. +// System specify target computer to search for. +func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { + var sys *uint16 + if len(system) > 0 { + sys, err = UTF16PtrFromString(system) + if err != nil { + return "", "", 0, err + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]uint16, n) + db := make([]uint16, dn) + e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) + if e == nil { + return UTF16ToString(b), UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", "", 0, e + } + if n <= uint32(len(b)) { + return "", "", 0, e + } + } +} + +const ( + // do not reorder + TOKEN_ASSIGN_PRIMARY = 1 << iota + TOKEN_DUPLICATE + TOKEN_IMPERSONATE + TOKEN_QUERY + TOKEN_QUERY_SOURCE + TOKEN_ADJUST_PRIVILEGES + TOKEN_ADJUST_GROUPS + TOKEN_ADJUST_DEFAULT + + TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + TOKEN_ASSIGN_PRIMARY | + TOKEN_DUPLICATE | + TOKEN_IMPERSONATE | + TOKEN_QUERY | + TOKEN_QUERY_SOURCE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY + TOKEN_WRITE = STANDARD_RIGHTS_WRITE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE +) + +const ( + // do not reorder + TokenUser = 1 + iota + TokenGroups + TokenPrivileges + TokenOwner + TokenPrimaryGroup + TokenDefaultDacl + TokenSource + TokenType + TokenImpersonationLevel + TokenStatistics + TokenRestrictedSids + TokenSessionId + TokenGroupsAndPrivileges + TokenSessionReference + TokenSandBoxInert + TokenAuditPolicy + TokenOrigin + TokenElevationType + TokenLinkedToken + TokenElevation + TokenHasRestrictions + TokenAccessInformation + TokenVirtualizationAllowed + TokenVirtualizationEnabled + TokenIntegrityLevel + TokenUIAccess + TokenMandatoryPolicy + TokenLogonSid + MaxTokenInfoClass +) + +type SIDAndAttributes struct { + Sid *SID + Attributes uint32 +} + +type Tokenuser struct { + User SIDAndAttributes +} + +type Tokenprimarygroup struct { + PrimaryGroup *SID +} + +type Tokengroups struct { + GroupCount uint32 + Groups [1]SIDAndAttributes +} + +// Authorization Functions +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW + +// An access token contains the security information for a logon session. +// The system creates an access token when a user logs on, and every +// process executed on behalf of the user has a copy of the token. +// The token identifies the user, the user's groups, and the user's +// privileges. The system uses the token to control access to securable +// objects and to control the ability of the user to perform various +// system-related operations on the local computer. +type Token Handle + +// OpenCurrentProcessToken opens the access token +// associated with current process. +func OpenCurrentProcessToken() (Token, error) { + p, e := GetCurrentProcess() + if e != nil { + return 0, e + } + var t Token + e = OpenProcessToken(p, TOKEN_QUERY, &t) + if e != nil { + return 0, e + } + return t, nil +} + +// Close releases access to access token. +func (t Token) Close() error { + return CloseHandle(Handle(t)) +} + +// getInfo retrieves a specified type of information about an access token. +func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +// GetTokenUser retrieves access token t user account information. +func (t Token) GetTokenUser() (*Tokenuser, error) { + i, e := t.getInfo(TokenUser, 50) + if e != nil { + return nil, e + } + return (*Tokenuser)(i), nil +} + +// GetTokenGroups retrieves group accounts associated with access token t. +func (t Token) GetTokenGroups() (*Tokengroups, error) { + i, e := t.getInfo(TokenGroups, 50) + if e != nil { + return nil, e + } + return (*Tokengroups)(i), nil +} + +// GetTokenPrimaryGroup retrieves access token t primary group information. +// A pointer to a SID structure representing a group that will become +// the primary group of any objects created by a process using this access token. +func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { + i, e := t.getInfo(TokenPrimaryGroup, 50) + if e != nil { + return nil, e + } + return (*Tokenprimarygroup)(i), nil +} + +// GetUserProfileDirectory retrieves path to the +// root directory of the access token t user's profile. +func (t Token) GetUserProfileDirectory() (string, error) { + n := uint32(100) + for { + b := make([]uint16, n) + e := GetUserProfileDirectory(t, &b[0], &n) + if e == nil { + return UTF16ToString(b), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +// IsMember reports whether the access token t is a member of the provided SID. +func (t Token) IsMember(sid *SID) (bool, error) { + var b int32 + if e := checkTokenMembership(t, sid, &b); e != nil { + return false, e + } + return b != 0, nil +} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go new file mode 100644 index 0000000..a500dd7 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/service.go @@ -0,0 +1,164 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + SC_MANAGER_CONNECT = 1 + SC_MANAGER_CREATE_SERVICE = 2 + SC_MANAGER_ENUMERATE_SERVICE = 4 + SC_MANAGER_LOCK = 8 + SC_MANAGER_QUERY_LOCK_STATUS = 16 + SC_MANAGER_MODIFY_BOOT_CONFIG = 32 + SC_MANAGER_ALL_ACCESS = 0xf003f +) + +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW + +const ( + SERVICE_KERNEL_DRIVER = 1 + SERVICE_FILE_SYSTEM_DRIVER = 2 + SERVICE_ADAPTER = 4 + SERVICE_RECOGNIZER_DRIVER = 8 + SERVICE_WIN32_OWN_PROCESS = 16 + SERVICE_WIN32_SHARE_PROCESS = 32 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 256 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS + + SERVICE_BOOT_START = 0 + SERVICE_SYSTEM_START = 1 + SERVICE_AUTO_START = 2 + SERVICE_DEMAND_START = 3 + SERVICE_DISABLED = 4 + + SERVICE_ERROR_IGNORE = 0 + SERVICE_ERROR_NORMAL = 1 + SERVICE_ERROR_SEVERE = 2 + SERVICE_ERROR_CRITICAL = 3 + + SC_STATUS_PROCESS_INFO = 0 + + SERVICE_STOPPED = 1 + SERVICE_START_PENDING = 2 + SERVICE_STOP_PENDING = 3 + SERVICE_RUNNING = 4 + SERVICE_CONTINUE_PENDING = 5 + SERVICE_PAUSE_PENDING = 6 + SERVICE_PAUSED = 7 + SERVICE_NO_CHANGE = 0xffffffff + + SERVICE_ACCEPT_STOP = 1 + SERVICE_ACCEPT_PAUSE_CONTINUE = 2 + SERVICE_ACCEPT_SHUTDOWN = 4 + SERVICE_ACCEPT_PARAMCHANGE = 8 + SERVICE_ACCEPT_NETBINDCHANGE = 16 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 + SERVICE_ACCEPT_POWEREVENT = 64 + SERVICE_ACCEPT_SESSIONCHANGE = 128 + + SERVICE_CONTROL_STOP = 1 + SERVICE_CONTROL_PAUSE = 2 + SERVICE_CONTROL_CONTINUE = 3 + SERVICE_CONTROL_INTERROGATE = 4 + SERVICE_CONTROL_SHUTDOWN = 5 + SERVICE_CONTROL_PARAMCHANGE = 6 + SERVICE_CONTROL_NETBINDADD = 7 + SERVICE_CONTROL_NETBINDREMOVE = 8 + SERVICE_CONTROL_NETBINDENABLE = 9 + SERVICE_CONTROL_NETBINDDISABLE = 10 + SERVICE_CONTROL_DEVICEEVENT = 11 + SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 + SERVICE_CONTROL_POWEREVENT = 13 + SERVICE_CONTROL_SESSIONCHANGE = 14 + + SERVICE_ACTIVE = 1 + SERVICE_INACTIVE = 2 + SERVICE_STATE_ALL = 3 + + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + + NO_ERROR = 0 + + SC_ENUM_PROCESS_INFO = 0 +) + +type SERVICE_STATUS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 +} + +type SERVICE_TABLE_ENTRY struct { + ServiceName *uint16 + ServiceProc uintptr +} + +type QUERY_SERVICE_CONFIG struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName *uint16 + LoadOrderGroup *uint16 + TagId uint32 + Dependencies *uint16 + ServiceStartName *uint16 + DisplayName *uint16 +} + +type SERVICE_DESCRIPTION struct { + Description *uint16 +} + +type SERVICE_STATUS_PROCESS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 + ProcessId uint32 + ServiceFlags uint32 +} + +type ENUM_SERVICE_STATUS_PROCESS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatusProcess SERVICE_STATUS_PROCESS +} + +//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle +//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW +//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW +//sys DeleteService(service Handle) (err error) = advapi32.DeleteService +//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW +//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService +//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW +//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus +//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW +//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW +//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W +//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W +//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go new file mode 100644 index 0000000..917cc2a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/str.go @@ -0,0 +1,22 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +func itoa(val int) string { // do it here rather than with fmt to avoid dependency + if val < 0 { + return "-" + itoa(-val) + } + var buf [32]byte // big enough for int64 + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/golang.org/x/sys/windows/svc/debug/log.go b/vendor/golang.org/x/sys/windows/svc/debug/log.go new file mode 100644 index 0000000..e51ab42 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/debug/log.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package debug + +import ( + "os" + "strconv" +) + +// Log interface allows different log implementations to be used. +type Log interface { + Close() error + Info(eid uint32, msg string) error + Warning(eid uint32, msg string) error + Error(eid uint32, msg string) error +} + +// ConsoleLog provides access to the console. +type ConsoleLog struct { + Name string +} + +// New creates new ConsoleLog. +func New(source string) *ConsoleLog { + return &ConsoleLog{Name: source} +} + +// Close closes console log l. +func (l *ConsoleLog) Close() error { + return nil +} + +func (l *ConsoleLog) report(kind string, eid uint32, msg string) error { + s := l.Name + "." + kind + "(" + strconv.Itoa(int(eid)) + "): " + msg + "\n" + _, err := os.Stdout.Write([]byte(s)) + return err +} + +// Info writes an information event msg with event id eid to the console l. +func (l *ConsoleLog) Info(eid uint32, msg string) error { + return l.report("info", eid, msg) +} + +// Warning writes an warning event msg with event id eid to the console l. +func (l *ConsoleLog) Warning(eid uint32, msg string) error { + return l.report("warn", eid, msg) +} + +// Error writes an error event msg with event id eid to the console l. +func (l *ConsoleLog) Error(eid uint32, msg string) error { + return l.report("error", eid, msg) +} diff --git a/vendor/golang.org/x/sys/windows/svc/debug/service.go b/vendor/golang.org/x/sys/windows/svc/debug/service.go new file mode 100644 index 0000000..123df98 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/debug/service.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package debug provides facilities to execute svc.Handler on console. +// +package debug + +import ( + "os" + "os/signal" + "syscall" + + "golang.org/x/sys/windows/svc" +) + +// Run executes service name by calling appropriate handler function. +// The process is running on console, unlike real service. Use Ctrl+C to +// send "Stop" command to your service. +func Run(name string, handler svc.Handler) error { + cmds := make(chan svc.ChangeRequest) + changes := make(chan svc.Status) + + sig := make(chan os.Signal) + signal.Notify(sig) + + go func() { + status := svc.Status{State: svc.Stopped} + for { + select { + case <-sig: + cmds <- svc.ChangeRequest{svc.Stop, 0, 0, status} + case status = <-changes: + } + } + }() + + _, errno := handler.Execute([]string{name}, cmds, changes) + if errno != 0 { + return syscall.Errno(errno) + } + return nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/event.go b/vendor/golang.org/x/sys/windows/svc/event.go new file mode 100644 index 0000000..0508e22 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/event.go @@ -0,0 +1,48 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package svc + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// event represents auto-reset, initially non-signaled Windows event. +// It is used to communicate between go and asm parts of this package. +type event struct { + h windows.Handle +} + +func newEvent() (*event, error) { + h, err := windows.CreateEvent(nil, 0, 0, nil) + if err != nil { + return nil, err + } + return &event{h: h}, nil +} + +func (e *event) Close() error { + return windows.CloseHandle(e.h) +} + +func (e *event) Set() error { + return windows.SetEvent(e.h) +} + +func (e *event) Wait() error { + s, err := windows.WaitForSingleObject(e.h, windows.INFINITE) + switch s { + case windows.WAIT_OBJECT_0: + break + case windows.WAIT_FAILED: + return err + default: + return errors.New("unexpected result from WaitForSingleObject") + } + return nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/eventlog/install.go b/vendor/golang.org/x/sys/windows/svc/eventlog/install.go new file mode 100644 index 0000000..c76a376 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/eventlog/install.go @@ -0,0 +1,80 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package eventlog + +import ( + "errors" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +const ( + // Log levels. + Info = windows.EVENTLOG_INFORMATION_TYPE + Warning = windows.EVENTLOG_WARNING_TYPE + Error = windows.EVENTLOG_ERROR_TYPE +) + +const addKeyName = `SYSTEM\CurrentControlSet\Services\EventLog\Application` + +// Install modifies PC registry to allow logging with an event source src. +// It adds all required keys and values to the event log registry key. +// Install uses msgFile as the event message file. If useExpandKey is true, +// the event message file is installed as REG_EXPAND_SZ value, +// otherwise as REG_SZ. Use bitwise of log.Error, log.Warning and +// log.Info to specify events supported by the new event source. +func Install(src, msgFile string, useExpandKey bool, eventsSupported uint32) error { + appkey, err := registry.OpenKey(registry.LOCAL_MACHINE, addKeyName, registry.CREATE_SUB_KEY) + if err != nil { + return err + } + defer appkey.Close() + + sk, alreadyExist, err := registry.CreateKey(appkey, src, registry.SET_VALUE) + if err != nil { + return err + } + defer sk.Close() + if alreadyExist { + return errors.New(addKeyName + `\` + src + " registry key already exists") + } + + err = sk.SetDWordValue("CustomSource", 1) + if err != nil { + return err + } + if useExpandKey { + err = sk.SetExpandStringValue("EventMessageFile", msgFile) + } else { + err = sk.SetStringValue("EventMessageFile", msgFile) + } + if err != nil { + return err + } + err = sk.SetDWordValue("TypesSupported", eventsSupported) + if err != nil { + return err + } + return nil +} + +// InstallAsEventCreate is the same as Install, but uses +// %SystemRoot%\System32\EventCreate.exe as the event message file. +func InstallAsEventCreate(src string, eventsSupported uint32) error { + return Install(src, "%SystemRoot%\\System32\\EventCreate.exe", true, eventsSupported) +} + +// Remove deletes all registry elements installed by the correspondent Install. +func Remove(src string) error { + appkey, err := registry.OpenKey(registry.LOCAL_MACHINE, addKeyName, registry.SET_VALUE) + if err != nil { + return err + } + defer appkey.Close() + return registry.DeleteKey(appkey, src) +} diff --git a/vendor/golang.org/x/sys/windows/svc/eventlog/log.go b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go new file mode 100644 index 0000000..46e5153 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go @@ -0,0 +1,70 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package eventlog implements access to Windows event log. +// +package eventlog + +import ( + "errors" + "syscall" + + "golang.org/x/sys/windows" +) + +// Log provides access to the system log. +type Log struct { + Handle windows.Handle +} + +// Open retrieves a handle to the specified event log. +func Open(source string) (*Log, error) { + return OpenRemote("", source) +} + +// OpenRemote does the same as Open, but on different computer host. +func OpenRemote(host, source string) (*Log, error) { + if source == "" { + return nil, errors.New("Specify event log source") + } + var s *uint16 + if host != "" { + s = syscall.StringToUTF16Ptr(host) + } + h, err := windows.RegisterEventSource(s, syscall.StringToUTF16Ptr(source)) + if err != nil { + return nil, err + } + return &Log{Handle: h}, nil +} + +// Close closes event log l. +func (l *Log) Close() error { + return windows.DeregisterEventSource(l.Handle) +} + +func (l *Log) report(etype uint16, eid uint32, msg string) error { + ss := []*uint16{syscall.StringToUTF16Ptr(msg)} + return windows.ReportEvent(l.Handle, etype, 0, eid, 0, 1, 0, &ss[0], nil) +} + +// Info writes an information event msg with event id eid to the end of event log l. +// When EventCreate.exe is used, eid must be between 1 and 1000. +func (l *Log) Info(eid uint32, msg string) error { + return l.report(windows.EVENTLOG_INFORMATION_TYPE, eid, msg) +} + +// Warning writes an warning event msg with event id eid to the end of event log l. +// When EventCreate.exe is used, eid must be between 1 and 1000. +func (l *Log) Warning(eid uint32, msg string) error { + return l.report(windows.EVENTLOG_WARNING_TYPE, eid, msg) +} + +// Error writes an error event msg with event id eid to the end of event log l. +// When EventCreate.exe is used, eid must be between 1 and 1000. +func (l *Log) Error(eid uint32, msg string) error { + return l.report(windows.EVENTLOG_ERROR_TYPE, eid, msg) +} diff --git a/vendor/golang.org/x/sys/windows/svc/eventlog/log_test.go b/vendor/golang.org/x/sys/windows/svc/eventlog/log_test.go new file mode 100644 index 0000000..6fbbd4a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/eventlog/log_test.go @@ -0,0 +1,51 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package eventlog_test + +import ( + "testing" + + "golang.org/x/sys/windows/svc/eventlog" +) + +func TestLog(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode - it modifies system logs") + } + + const name = "mylog" + const supports = eventlog.Error | eventlog.Warning | eventlog.Info + err := eventlog.InstallAsEventCreate(name, supports) + if err != nil { + t.Fatalf("Install failed: %s", err) + } + defer func() { + err = eventlog.Remove(name) + if err != nil { + t.Fatalf("Remove failed: %s", err) + } + }() + + l, err := eventlog.Open(name) + if err != nil { + t.Fatalf("Open failed: %s", err) + } + defer l.Close() + + err = l.Info(1, "info") + if err != nil { + t.Fatalf("Info failed: %s", err) + } + err = l.Warning(2, "warning") + if err != nil { + t.Fatalf("Warning failed: %s", err) + } + err = l.Error(3, "error") + if err != nil { + t.Fatalf("Error failed: %s", err) + } +} diff --git a/vendor/golang.org/x/sys/windows/svc/example/beep.go b/vendor/golang.org/x/sys/windows/svc/example/beep.go new file mode 100644 index 0000000..dcf2340 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/example/beep.go @@ -0,0 +1,22 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package main + +import ( + "syscall" +) + +// BUG(brainman): MessageBeep Windows api is broken on Windows 7, +// so this example does not beep when runs as service on Windows 7. + +var ( + beepFunc = syscall.MustLoadDLL("user32.dll").MustFindProc("MessageBeep") +) + +func beep() { + beepFunc.Call(0xffffffff) +} diff --git a/vendor/golang.org/x/sys/windows/svc/example/install.go b/vendor/golang.org/x/sys/windows/svc/example/install.go new file mode 100644 index 0000000..39cb00d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/example/install.go @@ -0,0 +1,92 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package main + +import ( + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/mgr" +) + +func exePath() (string, error) { + prog := os.Args[0] + p, err := filepath.Abs(prog) + if err != nil { + return "", err + } + fi, err := os.Stat(p) + if err == nil { + if !fi.Mode().IsDir() { + return p, nil + } + err = fmt.Errorf("%s is directory", p) + } + if filepath.Ext(p) == "" { + p += ".exe" + fi, err := os.Stat(p) + if err == nil { + if !fi.Mode().IsDir() { + return p, nil + } + err = fmt.Errorf("%s is directory", p) + } + } + return "", err +} + +func installService(name, desc string) error { + exepath, err := exePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err == nil { + s.Close() + return fmt.Errorf("service %s already exists", name) + } + s, err = m.CreateService(name, exepath, mgr.Config{DisplayName: desc}, "is", "auto-started") + if err != nil { + return err + } + defer s.Close() + err = eventlog.InstallAsEventCreate(name, eventlog.Error|eventlog.Warning|eventlog.Info) + if err != nil { + s.Delete() + return fmt.Errorf("SetupEventLogSource() failed: %s", err) + } + return nil +} + +func removeService(name string) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err != nil { + return fmt.Errorf("service %s is not installed", name) + } + defer s.Close() + err = s.Delete() + if err != nil { + return err + } + err = eventlog.Remove(name) + if err != nil { + return fmt.Errorf("RemoveEventLogSource() failed: %s", err) + } + return nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/example/main.go b/vendor/golang.org/x/sys/windows/svc/example/main.go new file mode 100644 index 0000000..dc96c08 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/example/main.go @@ -0,0 +1,76 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Example service program that beeps. +// +// The program demonstrates how to create Windows service and +// install / remove it on a computer. It also shows how to +// stop / start / pause / continue any service, and how to +// write to event log. It also shows how to use debug +// facilities available in debug package. +// +package main + +import ( + "fmt" + "log" + "os" + "strings" + + "golang.org/x/sys/windows/svc" +) + +func usage(errmsg string) { + fmt.Fprintf(os.Stderr, + "%s\n\n"+ + "usage: %s \n"+ + " where is one of\n"+ + " install, remove, debug, start, stop, pause or continue.\n", + errmsg, os.Args[0]) + os.Exit(2) +} + +func main() { + const svcName = "myservice" + + isIntSess, err := svc.IsAnInteractiveSession() + if err != nil { + log.Fatalf("failed to determine if we are running in an interactive session: %v", err) + } + if !isIntSess { + runService(svcName, false) + return + } + + if len(os.Args) < 2 { + usage("no command specified") + } + + cmd := strings.ToLower(os.Args[1]) + switch cmd { + case "debug": + runService(svcName, true) + return + case "install": + err = installService(svcName, "my service") + case "remove": + err = removeService(svcName) + case "start": + err = startService(svcName) + case "stop": + err = controlService(svcName, svc.Stop, svc.Stopped) + case "pause": + err = controlService(svcName, svc.Pause, svc.Paused) + case "continue": + err = controlService(svcName, svc.Continue, svc.Running) + default: + usage(fmt.Sprintf("invalid command %s", cmd)) + } + if err != nil { + log.Fatalf("failed to %s %s: %v", cmd, svcName, err) + } + return +} diff --git a/vendor/golang.org/x/sys/windows/svc/example/manage.go b/vendor/golang.org/x/sys/windows/svc/example/manage.go new file mode 100644 index 0000000..782dbd9 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/example/manage.go @@ -0,0 +1,62 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package main + +import ( + "fmt" + "time" + + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/mgr" +) + +func startService(name string) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err != nil { + return fmt.Errorf("could not access service: %v", err) + } + defer s.Close() + err = s.Start("is", "manual-started") + if err != nil { + return fmt.Errorf("could not start service: %v", err) + } + return nil +} + +func controlService(name string, c svc.Cmd, to svc.State) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err != nil { + return fmt.Errorf("could not access service: %v", err) + } + defer s.Close() + status, err := s.Control(c) + if err != nil { + return fmt.Errorf("could not send control=%d: %v", c, err) + } + timeout := time.Now().Add(10 * time.Second) + for status.State != to { + if timeout.Before(time.Now()) { + return fmt.Errorf("timeout waiting for service to go to state=%d", to) + } + time.Sleep(300 * time.Millisecond) + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not retrieve service status: %v", err) + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/example/service.go b/vendor/golang.org/x/sys/windows/svc/example/service.go new file mode 100644 index 0000000..237e809 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/example/service.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package main + +import ( + "fmt" + "time" + + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/debug" + "golang.org/x/sys/windows/svc/eventlog" +) + +var elog debug.Log + +type myservice struct{} + +func (m *myservice) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) { + const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown | svc.AcceptPauseAndContinue + changes <- svc.Status{State: svc.StartPending} + fasttick := time.Tick(500 * time.Millisecond) + slowtick := time.Tick(2 * time.Second) + tick := fasttick + changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} +loop: + for { + select { + case <-tick: + beep() + elog.Info(1, "beep") + case c := <-r: + switch c.Cmd { + case svc.Interrogate: + changes <- c.CurrentStatus + // Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4 + time.Sleep(100 * time.Millisecond) + changes <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + break loop + case svc.Pause: + changes <- svc.Status{State: svc.Paused, Accepts: cmdsAccepted} + tick = slowtick + case svc.Continue: + changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} + tick = fasttick + default: + elog.Error(1, fmt.Sprintf("unexpected control request #%d", c)) + } + } + } + changes <- svc.Status{State: svc.StopPending} + return +} + +func runService(name string, isDebug bool) { + var err error + if isDebug { + elog = debug.New(name) + } else { + elog, err = eventlog.Open(name) + if err != nil { + return + } + } + defer elog.Close() + + elog.Info(1, fmt.Sprintf("starting %s service", name)) + run := svc.Run + if isDebug { + run = debug.Run + } + err = run(name, &myservice{}) + if err != nil { + elog.Error(1, fmt.Sprintf("%s service failed: %v", name, err)) + return + } + elog.Info(1, fmt.Sprintf("%s service stopped", name)) +} diff --git a/vendor/golang.org/x/sys/windows/svc/go12.c b/vendor/golang.org/x/sys/windows/svc/go12.c new file mode 100644 index 0000000..6f1be1f --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/go12.c @@ -0,0 +1,24 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows +// +build !go1.3 + +// copied from pkg/runtime +typedef unsigned int uint32; +typedef unsigned long long int uint64; +#ifdef _64BIT +typedef uint64 uintptr; +#else +typedef uint32 uintptr; +#endif + +// from sys_386.s or sys_amd64.s +void ·servicemain(void); + +void +·getServiceMain(uintptr *r) +{ + *r = (uintptr)·servicemain; +} diff --git a/vendor/golang.org/x/sys/windows/svc/go12.go b/vendor/golang.org/x/sys/windows/svc/go12.go new file mode 100644 index 0000000..cd8b913 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/go12.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows +// +build !go1.3 + +package svc + +// from go12.c +func getServiceMain(r *uintptr) diff --git a/vendor/golang.org/x/sys/windows/svc/go13.go b/vendor/golang.org/x/sys/windows/svc/go13.go new file mode 100644 index 0000000..9d7f3ce --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/go13.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows +// +build go1.3 + +package svc + +import "unsafe" + +const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const + +// Should be a built-in for unsafe.Pointer? +func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// funcPC returns the entry PC of the function f. +// It assumes that f is a func value. Otherwise the behavior is undefined. +func funcPC(f interface{}) uintptr { + return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize)) +} + +// from sys_386.s and sys_amd64.s +func servicectlhandler(ctl uint32) uintptr +func servicemain(argc uint32, argv **uint16) + +func getServiceMain(r *uintptr) { + *r = funcPC(servicemain) +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/config.go b/vendor/golang.org/x/sys/windows/svc/mgr/config.go new file mode 100644 index 0000000..0a6edba --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/config.go @@ -0,0 +1,139 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package mgr + +import ( + "syscall" + "unicode/utf16" + "unsafe" + + "golang.org/x/sys/windows" +) + +const ( + // Service start types. + StartManual = windows.SERVICE_DEMAND_START // the service must be started manually + StartAutomatic = windows.SERVICE_AUTO_START // the service will start by itself whenever the computer reboots + StartDisabled = windows.SERVICE_DISABLED // the service cannot be started + + // The severity of the error, and action taken, + // if this service fails to start. + ErrorCritical = windows.SERVICE_ERROR_CRITICAL + ErrorIgnore = windows.SERVICE_ERROR_IGNORE + ErrorNormal = windows.SERVICE_ERROR_NORMAL + ErrorSevere = windows.SERVICE_ERROR_SEVERE +) + +// TODO(brainman): Password is not returned by windows.QueryServiceConfig, not sure how to get it. + +type Config struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName string // fully qualified path to the service binary file, can also include arguments for an auto-start service + LoadOrderGroup string + TagId uint32 + Dependencies []string + ServiceStartName string // name of the account under which the service should run + DisplayName string + Password string + Description string +} + +func toString(p *uint16) string { + if p == nil { + return "" + } + return syscall.UTF16ToString((*[4096]uint16)(unsafe.Pointer(p))[:]) +} + +func toStringSlice(ps *uint16) []string { + if ps == nil { + return nil + } + r := make([]string, 0) + for from, i, p := 0, 0, (*[1 << 24]uint16)(unsafe.Pointer(ps)); true; i++ { + if p[i] == 0 { + // empty string marks the end + if i <= from { + break + } + r = append(r, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return r +} + +// Config retrieves service s configuration paramteres. +func (s *Service) Config() (Config, error) { + var p *windows.QUERY_SERVICE_CONFIG + n := uint32(1024) + for { + b := make([]byte, n) + p = (*windows.QUERY_SERVICE_CONFIG)(unsafe.Pointer(&b[0])) + err := windows.QueryServiceConfig(s.Handle, p, n, &n) + if err == nil { + break + } + if err.(syscall.Errno) != syscall.ERROR_INSUFFICIENT_BUFFER { + return Config{}, err + } + if n <= uint32(len(b)) { + return Config{}, err + } + } + + var p2 *windows.SERVICE_DESCRIPTION + n = uint32(1024) + for { + b := make([]byte, n) + p2 = (*windows.SERVICE_DESCRIPTION)(unsafe.Pointer(&b[0])) + err := windows.QueryServiceConfig2(s.Handle, + windows.SERVICE_CONFIG_DESCRIPTION, &b[0], n, &n) + if err == nil { + break + } + if err.(syscall.Errno) != syscall.ERROR_INSUFFICIENT_BUFFER { + return Config{}, err + } + if n <= uint32(len(b)) { + return Config{}, err + } + } + + return Config{ + ServiceType: p.ServiceType, + StartType: p.StartType, + ErrorControl: p.ErrorControl, + BinaryPathName: toString(p.BinaryPathName), + LoadOrderGroup: toString(p.LoadOrderGroup), + TagId: p.TagId, + Dependencies: toStringSlice(p.Dependencies), + ServiceStartName: toString(p.ServiceStartName), + DisplayName: toString(p.DisplayName), + Description: toString(p2.Description), + }, nil +} + +func updateDescription(handle windows.Handle, desc string) error { + d := windows.SERVICE_DESCRIPTION{toPtr(desc)} + return windows.ChangeServiceConfig2(handle, + windows.SERVICE_CONFIG_DESCRIPTION, (*byte)(unsafe.Pointer(&d))) +} + +// UpdateConfig updates service s configuration parameters. +func (s *Service) UpdateConfig(c Config) error { + err := windows.ChangeServiceConfig(s.Handle, c.ServiceType, c.StartType, + c.ErrorControl, toPtr(c.BinaryPathName), toPtr(c.LoadOrderGroup), + nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), + toPtr(c.Password), toPtr(c.DisplayName)) + if err != nil { + return err + } + return updateDescription(s.Handle, c.Description) +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go new file mode 100644 index 0000000..76965b5 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package mgr can be used to manage Windows service programs. +// It can be used to install and remove them. It can also start, +// stop and pause them. The package can query / change current +// service state and config parameters. +// +package mgr + +import ( + "syscall" + "unicode/utf16" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Mgr is used to manage Windows service. +type Mgr struct { + Handle windows.Handle +} + +// Connect establishes a connection to the service control manager. +func Connect() (*Mgr, error) { + return ConnectRemote("") +} + +// ConnectRemote establishes a connection to the +// service control manager on computer named host. +func ConnectRemote(host string) (*Mgr, error) { + var s *uint16 + if host != "" { + s = syscall.StringToUTF16Ptr(host) + } + h, err := windows.OpenSCManager(s, nil, windows.SC_MANAGER_ALL_ACCESS) + if err != nil { + return nil, err + } + return &Mgr{Handle: h}, nil +} + +// Disconnect closes connection to the service control manager m. +func (m *Mgr) Disconnect() error { + return windows.CloseServiceHandle(m.Handle) +} + +func toPtr(s string) *uint16 { + if len(s) == 0 { + return nil + } + return syscall.StringToUTF16Ptr(s) +} + +// toStringBlock terminates strings in ss with 0, and then +// concatenates them together. It also adds extra 0 at the end. +func toStringBlock(ss []string) *uint16 { + if len(ss) == 0 { + return nil + } + t := "" + for _, s := range ss { + if s != "" { + t += s + "\x00" + } + } + if t == "" { + return nil + } + t += "\x00" + return &utf16.Encode([]rune(t))[0] +} + +// CreateService installs new service name on the system. +// The service will be executed by running exepath binary. +// Use config c to specify service parameters. +// Any args will be passed as command-line arguments when +// the service is started; these arguments are distinct from +// the arguments passed to Service.Start or via the "Start +// parameters" field in the service's Properties dialog box. +func (m *Mgr) CreateService(name, exepath string, c Config, args ...string) (*Service, error) { + if c.StartType == 0 { + c.StartType = StartManual + } + if c.ErrorControl == 0 { + c.ErrorControl = ErrorNormal + } + if c.ServiceType == 0 { + c.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS + } + s := syscall.EscapeArg(exepath) + for _, v := range args { + s += " " + syscall.EscapeArg(v) + } + h, err := windows.CreateService(m.Handle, toPtr(name), toPtr(c.DisplayName), + windows.SERVICE_ALL_ACCESS, c.ServiceType, + c.StartType, c.ErrorControl, toPtr(s), toPtr(c.LoadOrderGroup), + nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), toPtr(c.Password)) + if err != nil { + return nil, err + } + if c.Description != "" { + err = updateDescription(h, c.Description) + if err != nil { + return nil, err + } + } + return &Service{Name: name, Handle: h}, nil +} + +// OpenService retrieves access to service name, so it can +// be interrogated and controlled. +func (m *Mgr) OpenService(name string) (*Service, error) { + h, err := windows.OpenService(m.Handle, syscall.StringToUTF16Ptr(name), windows.SERVICE_ALL_ACCESS) + if err != nil { + return nil, err + } + return &Service{Name: name, Handle: h}, nil +} + +// ListServices enumerates services in the specified +// service control manager database m. +// If the caller does not have the SERVICE_QUERY_STATUS +// access right to a service, the service is silently +// omitted from the list of services returned. +func (m *Mgr) ListServices() ([]string, error) { + var err error + var bytesNeeded, servicesReturned uint32 + var buf []byte + for { + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + err = windows.EnumServicesStatusEx(m.Handle, windows.SC_ENUM_PROCESS_INFO, + windows.SERVICE_WIN32, windows.SERVICE_STATE_ALL, + p, uint32(len(buf)), &bytesNeeded, &servicesReturned, nil, nil) + if err == nil { + break + } + if err != syscall.ERROR_MORE_DATA { + return nil, err + } + if bytesNeeded <= uint32(len(buf)) { + return nil, err + } + buf = make([]byte, bytesNeeded) + } + if servicesReturned == 0 { + return nil, nil + } + services := (*[1 << 20]windows.ENUM_SERVICE_STATUS_PROCESS)(unsafe.Pointer(&buf[0]))[:servicesReturned] + var names []string + for _, s := range services { + name := syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(s.ServiceName))[:]) + names = append(names, name) + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go new file mode 100644 index 0000000..1569a22 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package mgr_test + +import ( + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "testing" + "time" + + "golang.org/x/sys/windows/svc/mgr" +) + +func TestOpenLanManServer(t *testing.T) { + m, err := mgr.Connect() + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERROR_ACCESS_DENIED { + t.Skip("Skipping test: we don't have rights to manage services.") + } + t.Fatalf("SCM connection failed: %s", err) + } + defer m.Disconnect() + + s, err := m.OpenService("LanmanServer") + if err != nil { + t.Fatalf("OpenService(lanmanserver) failed: %s", err) + } + defer s.Close() + + _, err = s.Config() + if err != nil { + t.Fatalf("Config failed: %s", err) + } +} + +func install(t *testing.T, m *mgr.Mgr, name, exepath string, c mgr.Config) { + // Sometimes it takes a while for the service to get + // removed after previous test run. + for i := 0; ; i++ { + s, err := m.OpenService(name) + if err != nil { + break + } + s.Close() + + if i > 10 { + t.Fatalf("service %s already exists", name) + } + time.Sleep(300 * time.Millisecond) + } + + s, err := m.CreateService(name, exepath, c) + if err != nil { + t.Fatalf("CreateService(%s) failed: %v", name, err) + } + defer s.Close() +} + +func depString(d []string) string { + if len(d) == 0 { + return "" + } + for i := range d { + d[i] = strings.ToLower(d[i]) + } + ss := sort.StringSlice(d) + ss.Sort() + return strings.Join([]string(ss), " ") +} + +func testConfig(t *testing.T, s *mgr.Service, should mgr.Config) mgr.Config { + is, err := s.Config() + if err != nil { + t.Fatalf("Config failed: %s", err) + } + if should.DisplayName != is.DisplayName { + t.Fatalf("config mismatch: DisplayName is %q, but should have %q", is.DisplayName, should.DisplayName) + } + if should.StartType != is.StartType { + t.Fatalf("config mismatch: StartType is %v, but should have %v", is.StartType, should.StartType) + } + if should.Description != is.Description { + t.Fatalf("config mismatch: Description is %q, but should have %q", is.Description, should.Description) + } + if depString(should.Dependencies) != depString(is.Dependencies) { + t.Fatalf("config mismatch: Dependencies is %v, but should have %v", is.Dependencies, should.Dependencies) + } + return is +} + +func remove(t *testing.T, s *mgr.Service) { + err := s.Delete() + if err != nil { + t.Fatalf("Delete failed: %s", err) + } +} + +func TestMyService(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode - it modifies system services") + } + + const name = "myservice" + + m, err := mgr.Connect() + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERROR_ACCESS_DENIED { + t.Skip("Skipping test: we don't have rights to manage services.") + } + t.Fatalf("SCM connection failed: %s", err) + } + defer m.Disconnect() + + c := mgr.Config{ + StartType: mgr.StartDisabled, + DisplayName: "my service", + Description: "my service is just a test", + Dependencies: []string{"LanmanServer", "W32Time"}, + } + + exename := os.Args[0] + exepath, err := filepath.Abs(exename) + if err != nil { + t.Fatalf("filepath.Abs(%s) failed: %s", exename, err) + } + + install(t, m, name, exepath, c) + + s, err := m.OpenService(name) + if err != nil { + t.Fatalf("service %s is not installed", name) + } + defer s.Close() + + c.BinaryPathName = exepath + c = testConfig(t, s, c) + + c.StartType = mgr.StartManual + err = s.UpdateConfig(c) + if err != nil { + t.Fatalf("UpdateConfig failed: %v", err) + } + + testConfig(t, s, c) + + svcnames, err := m.ListServices() + if err != nil { + t.Fatalf("ListServices failed: %v", err) + } + var myserviceIsInstalled bool + for _, sn := range svcnames { + if sn == name { + myserviceIsInstalled = true + break + } + } + if !myserviceIsInstalled { + t.Errorf("ListServices failed to find %q service", name) + } + + remove(t, s) +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/service.go b/vendor/golang.org/x/sys/windows/svc/mgr/service.go new file mode 100644 index 0000000..fdc46af --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/service.go @@ -0,0 +1,72 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package mgr + +import ( + "syscall" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" +) + +// TODO(brainman): Use EnumDependentServices to enumerate dependent services. + +// Service is used to access Windows service. +type Service struct { + Name string + Handle windows.Handle +} + +// Delete marks service s for deletion from the service control manager database. +func (s *Service) Delete() error { + return windows.DeleteService(s.Handle) +} + +// Close relinquish access to the service s. +func (s *Service) Close() error { + return windows.CloseServiceHandle(s.Handle) +} + +// Start starts service s. +// args will be passed to svc.Handler.Execute. +func (s *Service) Start(args ...string) error { + var p **uint16 + if len(args) > 0 { + vs := make([]*uint16, len(args)) + for i := range vs { + vs[i] = syscall.StringToUTF16Ptr(args[i]) + } + p = &vs[0] + } + return windows.StartService(s.Handle, uint32(len(args)), p) +} + +// Control sends state change request c to the servce s. +func (s *Service) Control(c svc.Cmd) (svc.Status, error) { + var t windows.SERVICE_STATUS + err := windows.ControlService(s.Handle, uint32(c), &t) + if err != nil { + return svc.Status{}, err + } + return svc.Status{ + State: svc.State(t.CurrentState), + Accepts: svc.Accepted(t.ControlsAccepted), + }, nil +} + +// Query returns current status of service s. +func (s *Service) Query() (svc.Status, error) { + var t windows.SERVICE_STATUS + err := windows.QueryServiceStatus(s.Handle, &t) + if err != nil { + return svc.Status{}, err + } + return svc.Status{ + State: svc.State(t.CurrentState), + Accepts: svc.Accepted(t.ControlsAccepted), + }, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/security.go b/vendor/golang.org/x/sys/windows/svc/security.go new file mode 100644 index 0000000..6fbc923 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/security.go @@ -0,0 +1,62 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package svc + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +func allocSid(subAuth0 uint32) (*windows.SID, error) { + var sid *windows.SID + err := windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, + 1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid) + if err != nil { + return nil, err + } + return sid, nil +} + +// IsAnInteractiveSession determines if calling process is running interactively. +// It queries the process token for membership in the Interactive group. +// http://stackoverflow.com/questions/2668851/how-do-i-detect-that-my-application-is-running-as-service-or-in-an-interactive-s +func IsAnInteractiveSession() (bool, error) { + interSid, err := allocSid(windows.SECURITY_INTERACTIVE_RID) + if err != nil { + return false, err + } + defer windows.FreeSid(interSid) + + serviceSid, err := allocSid(windows.SECURITY_SERVICE_RID) + if err != nil { + return false, err + } + defer windows.FreeSid(serviceSid) + + t, err := windows.OpenCurrentProcessToken() + if err != nil { + return false, err + } + defer t.Close() + + gs, err := t.GetTokenGroups() + if err != nil { + return false, err + } + p := unsafe.Pointer(&gs.Groups[0]) + groups := (*[2 << 20]windows.SIDAndAttributes)(p)[:gs.GroupCount] + for _, g := range groups { + if windows.EqualSid(g.Sid, interSid) { + return true, nil + } + if windows.EqualSid(g.Sid, serviceSid) { + return false, nil + } + } + return false, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go new file mode 100644 index 0000000..903cba3 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/service.go @@ -0,0 +1,363 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package svc provides everything required to build Windows service. +// +package svc + +import ( + "errors" + "runtime" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// State describes service execution state (Stopped, Running and so on). +type State uint32 + +const ( + Stopped = State(windows.SERVICE_STOPPED) + StartPending = State(windows.SERVICE_START_PENDING) + StopPending = State(windows.SERVICE_STOP_PENDING) + Running = State(windows.SERVICE_RUNNING) + ContinuePending = State(windows.SERVICE_CONTINUE_PENDING) + PausePending = State(windows.SERVICE_PAUSE_PENDING) + Paused = State(windows.SERVICE_PAUSED) +) + +// Cmd represents service state change request. It is sent to a service +// by the service manager, and should be actioned upon by the service. +type Cmd uint32 + +const ( + Stop = Cmd(windows.SERVICE_CONTROL_STOP) + Pause = Cmd(windows.SERVICE_CONTROL_PAUSE) + Continue = Cmd(windows.SERVICE_CONTROL_CONTINUE) + Interrogate = Cmd(windows.SERVICE_CONTROL_INTERROGATE) + Shutdown = Cmd(windows.SERVICE_CONTROL_SHUTDOWN) + ParamChange = Cmd(windows.SERVICE_CONTROL_PARAMCHANGE) + NetBindAdd = Cmd(windows.SERVICE_CONTROL_NETBINDADD) + NetBindRemove = Cmd(windows.SERVICE_CONTROL_NETBINDREMOVE) + NetBindEnable = Cmd(windows.SERVICE_CONTROL_NETBINDENABLE) + NetBindDisable = Cmd(windows.SERVICE_CONTROL_NETBINDDISABLE) + DeviceEvent = Cmd(windows.SERVICE_CONTROL_DEVICEEVENT) + HardwareProfileChange = Cmd(windows.SERVICE_CONTROL_HARDWAREPROFILECHANGE) + PowerEvent = Cmd(windows.SERVICE_CONTROL_POWEREVENT) + SessionChange = Cmd(windows.SERVICE_CONTROL_SESSIONCHANGE) +) + +// Accepted is used to describe commands accepted by the service. +// Note that Interrogate is always accepted. +type Accepted uint32 + +const ( + AcceptStop = Accepted(windows.SERVICE_ACCEPT_STOP) + AcceptShutdown = Accepted(windows.SERVICE_ACCEPT_SHUTDOWN) + AcceptPauseAndContinue = Accepted(windows.SERVICE_ACCEPT_PAUSE_CONTINUE) + AcceptParamChange = Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE) + AcceptNetBindChange = Accepted(windows.SERVICE_ACCEPT_NETBINDCHANGE) + AcceptHardwareProfileChange = Accepted(windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE) + AcceptPowerEvent = Accepted(windows.SERVICE_ACCEPT_POWEREVENT) + AcceptSessionChange = Accepted(windows.SERVICE_ACCEPT_SESSIONCHANGE) +) + +// Status combines State and Accepted commands to fully describe running service. +type Status struct { + State State + Accepts Accepted + CheckPoint uint32 // used to report progress during a lengthy operation + WaitHint uint32 // estimated time required for a pending operation, in milliseconds +} + +// ChangeRequest is sent to the service Handler to request service status change. +type ChangeRequest struct { + Cmd Cmd + EventType uint32 + EventData uintptr + CurrentStatus Status +} + +// Handler is the interface that must be implemented to build Windows service. +type Handler interface { + + // Execute will be called by the package code at the start of + // the service, and the service will exit once Execute completes. + // Inside Execute you must read service change requests from r and + // act accordingly. You must keep service control manager up to date + // about state of your service by writing into s as required. + // args contains service name followed by argument strings passed + // to the service. + // You can provide service exit code in exitCode return parameter, + // with 0 being "no error". You can also indicate if exit code, + // if any, is service specific or not by using svcSpecificEC + // parameter. + Execute(args []string, r <-chan ChangeRequest, s chan<- Status) (svcSpecificEC bool, exitCode uint32) +} + +var ( + // These are used by asm code. + goWaitsH uintptr + cWaitsH uintptr + ssHandle uintptr + sName *uint16 + sArgc uintptr + sArgv **uint16 + ctlHandlerExProc uintptr + cSetEvent uintptr + cWaitForSingleObject uintptr + cRegisterServiceCtrlHandlerExW uintptr +) + +func init() { + k := syscall.MustLoadDLL("kernel32.dll") + cSetEvent = k.MustFindProc("SetEvent").Addr() + cWaitForSingleObject = k.MustFindProc("WaitForSingleObject").Addr() + a := syscall.MustLoadDLL("advapi32.dll") + cRegisterServiceCtrlHandlerExW = a.MustFindProc("RegisterServiceCtrlHandlerExW").Addr() +} + +// The HandlerEx prototype also has a context pointer but since we don't use +// it at start-up time we don't have to pass it over either. +type ctlEvent struct { + cmd Cmd + eventType uint32 + eventData uintptr + errno uint32 +} + +// service provides access to windows service api. +type service struct { + name string + h windows.Handle + cWaits *event + goWaits *event + c chan ctlEvent + handler Handler +} + +func newService(name string, handler Handler) (*service, error) { + var s service + var err error + s.name = name + s.c = make(chan ctlEvent) + s.handler = handler + s.cWaits, err = newEvent() + if err != nil { + return nil, err + } + s.goWaits, err = newEvent() + if err != nil { + s.cWaits.Close() + return nil, err + } + return &s, nil +} + +func (s *service) close() error { + s.cWaits.Close() + s.goWaits.Close() + return nil +} + +type exitCode struct { + isSvcSpecific bool + errno uint32 +} + +func (s *service) updateStatus(status *Status, ec *exitCode) error { + if s.h == 0 { + return errors.New("updateStatus with no service status handle") + } + var t windows.SERVICE_STATUS + t.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS + t.CurrentState = uint32(status.State) + if status.Accepts&AcceptStop != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_STOP + } + if status.Accepts&AcceptShutdown != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_SHUTDOWN + } + if status.Accepts&AcceptPauseAndContinue != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_PAUSE_CONTINUE + } + if status.Accepts&AcceptParamChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_PARAMCHANGE + } + if status.Accepts&AcceptNetBindChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_NETBINDCHANGE + } + if status.Accepts&AcceptHardwareProfileChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE + } + if status.Accepts&AcceptPowerEvent != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_POWEREVENT + } + if status.Accepts&AcceptSessionChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_SESSIONCHANGE + } + if ec.errno == 0 { + t.Win32ExitCode = windows.NO_ERROR + t.ServiceSpecificExitCode = windows.NO_ERROR + } else if ec.isSvcSpecific { + t.Win32ExitCode = uint32(windows.ERROR_SERVICE_SPECIFIC_ERROR) + t.ServiceSpecificExitCode = ec.errno + } else { + t.Win32ExitCode = ec.errno + t.ServiceSpecificExitCode = windows.NO_ERROR + } + t.CheckPoint = status.CheckPoint + t.WaitHint = status.WaitHint + return windows.SetServiceStatus(s.h, &t) +} + +const ( + sysErrSetServiceStatusFailed = uint32(syscall.APPLICATION_ERROR) + iota + sysErrNewThreadInCallback +) + +func (s *service) run() { + s.goWaits.Wait() + s.h = windows.Handle(ssHandle) + argv := (*[100]*int16)(unsafe.Pointer(sArgv))[:sArgc] + args := make([]string, len(argv)) + for i, a := range argv { + args[i] = syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(a))[:]) + } + + cmdsToHandler := make(chan ChangeRequest) + changesFromHandler := make(chan Status) + exitFromHandler := make(chan exitCode) + + go func() { + ss, errno := s.handler.Execute(args, cmdsToHandler, changesFromHandler) + exitFromHandler <- exitCode{ss, errno} + }() + + status := Status{State: Stopped} + ec := exitCode{isSvcSpecific: true, errno: 0} + var outch chan ChangeRequest + inch := s.c + var cmd Cmd + var evtype uint32 + var evdata uintptr +loop: + for { + select { + case r := <-inch: + if r.errno != 0 { + ec.errno = r.errno + break loop + } + inch = nil + outch = cmdsToHandler + cmd = r.cmd + evtype = r.eventType + evdata = r.eventData + case outch <- ChangeRequest{cmd, evtype, evdata, status}: + inch = s.c + outch = nil + case c := <-changesFromHandler: + err := s.updateStatus(&c, &ec) + if err != nil { + // best suitable error number + ec.errno = sysErrSetServiceStatusFailed + if err2, ok := err.(syscall.Errno); ok { + ec.errno = uint32(err2) + } + break loop + } + status = c + case ec = <-exitFromHandler: + break loop + } + } + + s.updateStatus(&Status{State: Stopped}, &ec) + s.cWaits.Set() +} + +func newCallback(fn interface{}) (cb uintptr, err error) { + defer func() { + r := recover() + if r == nil { + return + } + cb = 0 + switch v := r.(type) { + case string: + err = errors.New(v) + case error: + err = v + default: + err = errors.New("unexpected panic in syscall.NewCallback") + } + }() + return syscall.NewCallback(fn), nil +} + +// BUG(brainman): There is no mechanism to run multiple services +// inside one single executable. Perhaps, it can be overcome by +// using RegisterServiceCtrlHandlerEx Windows api. + +// Run executes service name by calling appropriate handler function. +func Run(name string, handler Handler) error { + runtime.LockOSThread() + + tid := windows.GetCurrentThreadId() + + s, err := newService(name, handler) + if err != nil { + return err + } + + ctlHandler := func(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr { + e := ctlEvent{cmd: Cmd(ctl), eventType: evtype, eventData: evdata} + // We assume that this callback function is running on + // the same thread as Run. Nowhere in MS documentation + // I could find statement to guarantee that. So putting + // check here to verify, otherwise things will go bad + // quickly, if ignored. + i := windows.GetCurrentThreadId() + if i != tid { + e.errno = sysErrNewThreadInCallback + } + s.c <- e + // Always return NO_ERROR (0) for now. + return 0 + } + + var svcmain uintptr + getServiceMain(&svcmain) + t := []windows.SERVICE_TABLE_ENTRY{ + {syscall.StringToUTF16Ptr(s.name), svcmain}, + {nil, 0}, + } + + goWaitsH = uintptr(s.goWaits.h) + cWaitsH = uintptr(s.cWaits.h) + sName = t[0].ServiceName + ctlHandlerExProc, err = newCallback(ctlHandler) + if err != nil { + return err + } + + go s.run() + + err = windows.StartServiceCtrlDispatcher(&t[0]) + if err != nil { + return err + } + return nil +} + +// StatusHandle returns service status handle. It is safe to call this function +// from inside the Handler.Execute because then it is guaranteed to be set. +// This code will have to change once multiple services are possible per process. +func StatusHandle() windows.Handle { + return windows.Handle(ssHandle) +} diff --git a/vendor/golang.org/x/sys/windows/svc/svc_test.go b/vendor/golang.org/x/sys/windows/svc/svc_test.go new file mode 100644 index 0000000..da7ec66 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/svc_test.go @@ -0,0 +1,118 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package svc_test + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/mgr" +) + +func getState(t *testing.T, s *mgr.Service) svc.State { + status, err := s.Query() + if err != nil { + t.Fatalf("Query(%s) failed: %s", s.Name, err) + } + return status.State +} + +func testState(t *testing.T, s *mgr.Service, want svc.State) { + have := getState(t, s) + if have != want { + t.Fatalf("%s state is=%d want=%d", s.Name, have, want) + } +} + +func waitState(t *testing.T, s *mgr.Service, want svc.State) { + for i := 0; ; i++ { + have := getState(t, s) + if have == want { + return + } + if i > 10 { + t.Fatalf("%s state is=%d, waiting timeout", s.Name, have) + } + time.Sleep(300 * time.Millisecond) + } +} + +func TestExample(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode - it modifies system services") + } + + const name = "myservice" + + m, err := mgr.Connect() + if err != nil { + t.Fatalf("SCM connection failed: %s", err) + } + defer m.Disconnect() + + dir, err := ioutil.TempDir("", "svc") + if err != nil { + t.Fatalf("failed to create temp directory: %v", err) + } + defer os.RemoveAll(dir) + + exepath := filepath.Join(dir, "a.exe") + o, err := exec.Command("go", "build", "-o", exepath, "golang.org/x/sys/windows/svc/example").CombinedOutput() + if err != nil { + t.Fatalf("failed to build service program: %v\n%v", err, string(o)) + } + + s, err := m.OpenService(name) + if err == nil { + err = s.Delete() + if err != nil { + s.Close() + t.Fatalf("Delete failed: %s", err) + } + s.Close() + } + s, err = m.CreateService(name, exepath, mgr.Config{DisplayName: "my service"}, "is", "auto-started") + if err != nil { + t.Fatalf("CreateService(%s) failed: %v", name, err) + } + defer s.Close() + + testState(t, s, svc.Stopped) + err = s.Start("is", "manual-started") + if err != nil { + t.Fatalf("Start(%s) failed: %s", s.Name, err) + } + waitState(t, s, svc.Running) + time.Sleep(1 * time.Second) + + // testing deadlock from issues 4. + _, err = s.Control(svc.Interrogate) + if err != nil { + t.Fatalf("Control(%s) failed: %s", s.Name, err) + } + _, err = s.Control(svc.Interrogate) + if err != nil { + t.Fatalf("Control(%s) failed: %s", s.Name, err) + } + time.Sleep(1 * time.Second) + + _, err = s.Control(svc.Stop) + if err != nil { + t.Fatalf("Control(%s) failed: %s", s.Name, err) + } + waitState(t, s, svc.Stopped) + + err = s.Delete() + if err != nil { + t.Fatalf("Delete failed: %s", err) + } +} diff --git a/vendor/golang.org/x/sys/windows/svc/sys_386.s b/vendor/golang.org/x/sys/windows/svc/sys_386.s new file mode 100644 index 0000000..2c82a9d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/sys_386.s @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// func servicemain(argc uint32, argv **uint16) +TEXT ·servicemain(SB),7,$0 + MOVL argc+0(FP), AX + MOVL AX, ·sArgc(SB) + MOVL argv+4(FP), AX + MOVL AX, ·sArgv(SB) + + PUSHL BP + PUSHL BX + PUSHL SI + PUSHL DI + + SUBL $12, SP + + MOVL ·sName(SB), AX + MOVL AX, (SP) + MOVL $·servicectlhandler(SB), AX + MOVL AX, 4(SP) + MOVL $0, 8(SP) + MOVL ·cRegisterServiceCtrlHandlerExW(SB), AX + MOVL SP, BP + CALL AX + MOVL BP, SP + CMPL AX, $0 + JE exit + MOVL AX, ·ssHandle(SB) + + MOVL ·goWaitsH(SB), AX + MOVL AX, (SP) + MOVL ·cSetEvent(SB), AX + MOVL SP, BP + CALL AX + MOVL BP, SP + + MOVL ·cWaitsH(SB), AX + MOVL AX, (SP) + MOVL $-1, AX + MOVL AX, 4(SP) + MOVL ·cWaitForSingleObject(SB), AX + MOVL SP, BP + CALL AX + MOVL BP, SP + +exit: + ADDL $12, SP + + POPL DI + POPL SI + POPL BX + POPL BP + + MOVL 0(SP), CX + ADDL $12, SP + JMP CX + +// I do not know why, but this seems to be the only way to call +// ctlHandlerProc on Windows 7. + +// func servicectlhandler(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr { +TEXT ·servicectlhandler(SB),7,$0 + MOVL ·ctlHandlerExProc(SB), CX + JMP CX diff --git a/vendor/golang.org/x/sys/windows/svc/sys_amd64.s b/vendor/golang.org/x/sys/windows/svc/sys_amd64.s new file mode 100644 index 0000000..06b4259 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/sys_amd64.s @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// func servicemain(argc uint32, argv **uint16) +TEXT ·servicemain(SB),7,$0 + MOVL CX, ·sArgc(SB) + MOVL DX, ·sArgv(SB) + + SUBQ $32, SP // stack for the first 4 syscall params + + MOVQ ·sName(SB), CX + MOVQ $·servicectlhandler(SB), DX + // BUG(pastarmovj): Figure out a way to pass in context in R8. + MOVQ ·cRegisterServiceCtrlHandlerExW(SB), AX + CALL AX + CMPQ AX, $0 + JE exit + MOVQ AX, ·ssHandle(SB) + + MOVQ ·goWaitsH(SB), CX + MOVQ ·cSetEvent(SB), AX + CALL AX + + MOVQ ·cWaitsH(SB), CX + MOVQ $4294967295, DX + MOVQ ·cWaitForSingleObject(SB), AX + CALL AX + +exit: + ADDQ $32, SP + RET + +// I do not know why, but this seems to be the only way to call +// ctlHandlerProc on Windows 7. + +// func ·servicectlhandler(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr { +TEXT ·servicectlhandler(SB),7,$0 + MOVQ ·ctlHandlerExProc(SB), AX + JMP AX diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go new file mode 100644 index 0000000..b07bc23 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -0,0 +1,71 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package windows contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display the OS-specific documentation for the current +// system. If you want godoc to display syscall documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package windows // import "golang.org/x/sys/windows" + +import ( + "syscall" +) + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/windows/syscall_test.go b/vendor/golang.org/x/sys/windows/syscall_test.go new file mode 100644 index 0000000..d7009e4 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_test.go @@ -0,0 +1,53 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows_test + +import ( + "syscall" + "testing" + + "golang.org/x/sys/windows" +) + +func testSetGetenv(t *testing.T, key, value string) { + err := windows.Setenv(key, value) + if err != nil { + t.Fatalf("Setenv failed to set %q: %v", value, err) + } + newvalue, found := windows.Getenv(key) + if !found { + t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) + } + if newvalue != value { + t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) + } +} + +func TestEnv(t *testing.T) { + testSetGetenv(t, "TESTENV", "AVALUE") + // make sure TESTENV gets set to "", not deleted + testSetGetenv(t, "TESTENV", "") +} + +func TestGetProcAddressByOrdinal(t *testing.T) { + // Attempt calling shlwapi.dll:IsOS, resolving it by ordinal, as + // suggested in + // https://msdn.microsoft.com/en-us/library/windows/desktop/bb773795.aspx + h, err := windows.LoadLibrary("shlwapi.dll") + if err != nil { + t.Fatalf("Failed to load shlwapi.dll: %s", err) + } + procIsOS, err := windows.GetProcAddressByOrdinal(h, 437) + if err != nil { + t.Fatalf("Could not find shlwapi.dll:IsOS by ordinal: %s", err) + } + const OS_NT = 1 + r, _, _ := syscall.Syscall(procIsOS, 1, OS_NT, 0, 0) + if r == 0 { + t.Error("shlwapi.dll:IsOS(OS_NT) returned 0, expected non-zero value") + } +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go new file mode 100644 index 0000000..1e9f4bb --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -0,0 +1,1153 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows system calls. + +package windows + +import ( + errorspkg "errors" + "sync" + "syscall" + "unicode/utf16" + "unsafe" +) + +type Handle uintptr + +const ( + InvalidHandle = ^Handle(0) + + // Flags for DefineDosDevice. + DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 + DDD_NO_BROADCAST_SYSTEM = 0x00000008 + DDD_RAW_TARGET_PATH = 0x00000001 + DDD_REMOVE_DEFINITION = 0x00000002 + + // Return values for GetDriveType. + DRIVE_UNKNOWN = 0 + DRIVE_NO_ROOT_DIR = 1 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + DRIVE_REMOTE = 4 + DRIVE_CDROM = 5 + DRIVE_RAMDISK = 6 + + // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. + FILE_CASE_SENSITIVE_SEARCH = 0x00000001 + FILE_CASE_PRESERVED_NAMES = 0x00000002 + FILE_FILE_COMPRESSION = 0x00000010 + FILE_DAX_VOLUME = 0x20000000 + FILE_NAMED_STREAMS = 0x00040000 + FILE_PERSISTENT_ACLS = 0x00000008 + FILE_READ_ONLY_VOLUME = 0x00080000 + FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 + FILE_SUPPORTS_ENCRYPTION = 0x00020000 + FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 + FILE_SUPPORTS_HARD_LINKS = 0x00400000 + FILE_SUPPORTS_OBJECT_IDS = 0x00010000 + FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 + FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 + FILE_SUPPORTS_SPARSE_FILES = 0x00000040 + FILE_SUPPORTS_TRANSACTIONS = 0x00200000 + FILE_SUPPORTS_USN_JOURNAL = 0x02000000 + FILE_UNICODE_ON_DISK = 0x00000004 + FILE_VOLUME_IS_COMPRESSED = 0x00008000 + FILE_VOLUME_QUOTAS = 0x00000020 +) + +// StringToUTF16 is deprecated. Use UTF16FromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16(s string) []uint16 { + a, err := UTF16FromString(s) + if err != nil { + panic("windows: string with NUL passed to StringToUTF16") + } + return a +} + +// UTF16FromString returns the UTF-16 encoding of the UTF-8 string +// s, with a terminating NUL added. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func UTF16FromString(s string) ([]uint16, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + return utf16.Encode([]rune(s + "\x00")), nil +} + +// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, +// with a terminating NUL removed. +func UTF16ToString(s []uint16) string { + for i, v := range s { + if v == 0 { + s = s[0:i] + break + } + } + return string(utf16.Decode(s)) +} + +// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } + +// UTF16PtrFromString returns pointer to the UTF-16 encoding of +// the UTF-8 string s, with a terminating NUL added. If s +// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). +func UTF16PtrFromString(s string) (*uint16, error) { + a, err := UTF16FromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +func Getpagesize() int { return 4096 } + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +func NewCallback(fn interface{}) uintptr { + return syscall.NewCallback(fn) +} + +// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +func NewCallbackCDecl(fn interface{}) uintptr { + return syscall.NewCallbackCDecl(fn) +} + +// windows api calls + +//sys GetLastError() (lasterr error) +//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW +//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW +//sys FreeLibrary(handle Handle) (err error) +//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) +//sys GetVersion() (ver uint32, err error) +//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW +//sys ExitProcess(exitcode uint32) +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] +//sys CloseHandle(handle Handle) (err error) +//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] +//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) +//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW +//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW +//sys FindClose(handle Handle) (err error) +//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW +//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW +//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW +//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW +//sys DeleteFile(path *uint16) (err error) = DeleteFileW +//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW +//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys SetEndOfFile(handle Handle) (err error) +//sys GetSystemTimeAsFileTime(time *Filetime) +//sys GetSystemTimePreciseAsFileTime(time *Filetime) +//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) +//sys CancelIo(s Handle) (err error) +//sys CancelIoEx(s Handle, o *Overlapped) (err error) +//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) +//sys TerminateProcess(handle Handle, exitcode uint32) (err error) +//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) +//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys GetCurrentProcess() (pseudoHandle Handle, err error) +//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) +//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) +//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] +//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW +//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) +//sys GetFileType(filehandle Handle) (n uint32, err error) +//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW +//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext +//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom +//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW +//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW +//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW +//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) +//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW +//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW +//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW +//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW +//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) +//sys FlushFileBuffers(handle Handle) (err error) +//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW +//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW +//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) +//sys UnmapViewOfFile(addr uintptr) (err error) +//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) +//sys VirtualLock(addr uintptr, length uintptr) (err error) +//sys VirtualUnlock(addr uintptr, length uintptr) (err error) +//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc +//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree +//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect +//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile +//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW +//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore +//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore +//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore +//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain +//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain +//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext +//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext +//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW +//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey +//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW +//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW +//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW +//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode +//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode +//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW +//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW +//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW +//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW +//sys GetCurrentThreadId() (id uint32) +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW +//sys SetEvent(event Handle) (err error) = kernel32.SetEvent +//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent +//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent + +// Volume Management Functions +//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW +//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW +//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW +//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW +//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW +//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW +//sys FindVolumeClose(findVolume Handle) (err error) +//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) +//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW +//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] +//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW +//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW +//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW +//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW +//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW +//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW +//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW +//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW +//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW + +// syscall interface implementation for other packages + +// GetProcAddressByOrdinal retrieves the address of the exported +// function from module by ordinal. +func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Exit(code int) { ExitProcess(uint32(code)) } + +func makeInheritSa() *SecurityAttributes { + var sa SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func Open(path string, mode int, perm uint32) (fd Handle, err error) { + if len(path) == 0 { + return InvalidHandle, ERROR_FILE_NOT_FOUND + } + pathp, err := UTF16PtrFromString(path) + if err != nil { + return InvalidHandle, err + } + var access uint32 + switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { + case O_RDONLY: + access = GENERIC_READ + case O_WRONLY: + access = GENERIC_WRITE + case O_RDWR: + access = GENERIC_READ | GENERIC_WRITE + } + if mode&O_CREAT != 0 { + access |= GENERIC_WRITE + } + if mode&O_APPEND != 0 { + access &^= GENERIC_WRITE + access |= FILE_APPEND_DATA + } + sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) + var sa *SecurityAttributes + if mode&O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): + createmode = CREATE_NEW + case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): + createmode = CREATE_ALWAYS + case mode&O_CREAT == O_CREAT: + createmode = OPEN_ALWAYS + case mode&O_TRUNC == O_TRUNC: + createmode = TRUNCATE_EXISTING + default: + createmode = OPEN_EXISTING + } + h, e := CreateFile(pathp, access, sharemode, sa, createmode, FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +func Read(fd Handle, p []byte) (n int, err error) { + var done uint32 + e := ReadFile(fd, p, &done, nil) + if e != nil { + if e == ERROR_BROKEN_PIPE { + // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin + return 0, nil + } + return 0, e + } + if raceenabled { + if done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return int(done), nil +} + +func Write(fd Handle, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + var done uint32 + e := WriteFile(fd, p, &done, nil) + if e != nil { + return 0, e + } + if raceenabled && done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(done)) + } + return int(done), nil +} + +var ioSync int64 + +func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { + var w uint32 + switch whence { + case 0: + w = FILE_BEGIN + case 1: + w = FILE_CURRENT + case 2: + w = FILE_END + } + hi := int32(offset >> 32) + lo := int32(offset) + // use GetFileType to check pipe, pipe can't do seek + ft, _ := GetFileType(fd) + if ft == FILE_TYPE_PIPE { + return 0, syscall.EPIPE + } + rlo, e := SetFilePointer(fd, lo, &hi, w) + if e != nil { + return 0, e + } + return int64(hi)<<32 + int64(rlo), nil +} + +func Close(fd Handle) (err error) { + return CloseHandle(fd) +} + +var ( + Stdin = getStdHandle(STD_INPUT_HANDLE) + Stdout = getStdHandle(STD_OUTPUT_HANDLE) + Stderr = getStdHandle(STD_ERROR_HANDLE) +) + +func getStdHandle(stdhandle uint32) (fd Handle) { + r, _ := GetStdHandle(stdhandle) + CloseOnExec(r) + return r +} + +const ImplementsGetwd = true + +func Getwd() (wd string, err error) { + b := make([]uint16, 300) + n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Chdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return SetCurrentDirectory(pathp) +} + +func Mkdir(path string, mode uint32) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return CreateDirectory(pathp, nil) +} + +func Rmdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return RemoveDirectory(pathp) +} + +func Unlink(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return DeleteFile(pathp) +} + +func Rename(oldpath, newpath string) (err error) { + from, err := UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +func ComputerName() (name string, err error) { + var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 + b := make([]uint16, n) + e := GetComputerName(&b[0], &n) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Ftruncate(fd Handle, length int64) (err error) { + curoffset, e := Seek(fd, 0, 1) + if e != nil { + return e + } + defer Seek(fd, curoffset, 0) + _, e = Seek(fd, length, 0) + if e != nil { + return e + } + e = SetEndOfFile(fd) + if e != nil { + return e + } + return nil +} + +func Gettimeofday(tv *Timeval) (err error) { + var ft Filetime + GetSystemTimeAsFileTime(&ft) + *tv = NsecToTimeval(ft.Nanoseconds()) + return nil +} + +func Pipe(p []Handle) (err error) { + if len(p) != 2 { + return syscall.EINVAL + } + var r, w Handle + e := CreatePipe(&r, &w, makeInheritSa(), 0) + if e != nil { + return e + } + p[0] = r + p[1] = w + return nil +} + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(tv[0].Nanoseconds()) + w := NsecToFiletime(tv[1].Nanoseconds()) + return SetFileTime(h, nil, &a, &w) +} + +func UtimesNano(path string, ts []Timespec) (err error) { + if len(ts) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(TimespecToNsec(ts[0])) + w := NsecToFiletime(TimespecToNsec(ts[1])) + return SetFileTime(h, nil, &a, &w) +} + +func Fsync(fd Handle) (err error) { + return FlushFileBuffers(fd) +} + +func Chmod(path string, mode uint32) (err error) { + if mode == 0 { + return syscall.EINVAL + } + p, e := UTF16PtrFromString(path) + if e != nil { + return e + } + attrs, e := GetFileAttributes(p) + if e != nil { + return e + } + if mode&S_IWRITE != 0 { + attrs &^= FILE_ATTRIBUTE_READONLY + } else { + attrs |= FILE_ATTRIBUTE_READONLY + } + return SetFileAttributes(p, attrs) +} + +func LoadGetSystemTimePreciseAsFileTime() error { + return procGetSystemTimePreciseAsFileTime.Find() +} + +func LoadCancelIoEx() error { + return procCancelIoEx.Find() +} + +func LoadSetFileCompletionNotificationModes() error { + return procSetFileCompletionNotificationModes.Find() +} + +// net api calls + +const socket_error = uintptr(^uint32(0)) + +//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup +//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup +//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket +//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt +//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt +//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind +//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect +//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname +//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername +//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen +//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown +//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket +//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx +//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs +//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv +//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend +//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom +//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname +//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname +//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs +//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname +//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W +//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree +//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W +//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW +//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW +//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry +//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo +//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes +//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs +} + +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrUnix struct { + Name string +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { + // TODO(brainman): implement SockaddrUnix.sockaddr() + return nil, 0, syscall.EWINDOWS +} + +func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + return nil, syscall.EWINDOWS + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, syscall.EAFNOSUPPORT +} + +func Socket(domain, typ, proto int) (fd Handle, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return InvalidHandle, syscall.EAFNOSUPPORT + } + return socket(int32(domain), int32(typ), int32(proto)) +} + +func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { + v := int32(value) + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) +} + +func Bind(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getsockname(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getsockname(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Getpeername(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getpeername(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Listen(s Handle, n int) (err error) { + return listen(s, int32(n)) +} + +func Shutdown(fd Handle, how int) (err error) { + return shutdown(fd, int32(how)) +} + +func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { + rsa, l, err := to.sockaddr() + if err != nil { + return err + } + return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) +} + +func LoadGetAddrInfo() error { + return procGetAddrInfoW.Find() +} + +var connectExFunc struct { + once sync.Once + addr uintptr + err error +} + +func LoadConnectEx() error { + connectExFunc.once.Do(func() { + var s Handle + s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) + if connectExFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + connectExFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), + uint32(unsafe.Sizeof(WSAID_CONNECTEX)), + (*byte)(unsafe.Pointer(&connectExFunc.addr)), + uint32(unsafe.Sizeof(connectExFunc.addr)), + &n, nil, 0) + }) + return connectExFunc.err +} + +func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { + err := LoadConnectEx() + if err != nil { + return errorspkg.New("failed to find ConnectEx: " + err.Error()) + } + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s Handle + s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +// Invented structures to support what package os expects. +type Rusage struct { + CreationTime Filetime + ExitTime Filetime + KernelTime Filetime + UserTime Filetime +} + +type WaitStatus struct { + ExitCode uint32 +} + +func (w WaitStatus) Exited() bool { return true } + +func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } + +func (w WaitStatus) Signal() Signal { return -1 } + +func (w WaitStatus) CoreDump() bool { return false } + +func (w WaitStatus) Stopped() bool { return false } + +func (w WaitStatus) Continued() bool { return false } + +func (w WaitStatus) StopSignal() Signal { return -1 } + +func (w WaitStatus) Signaled() bool { return false } + +func (w WaitStatus) TrapCause() int { return -1 } + +// Timespec is an invented structure on Windows, but here for +// consistency with the corresponding package for other operating systems. +type Timespec struct { + Sec int64 + Nsec int64 +} + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +// TODO(brainman): fix all needed for net + +func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } +func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { + return 0, nil, syscall.EWINDOWS +} +func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { return syscall.EWINDOWS } +func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } + +// The Linger struct is wrong but we only noticed after Go 1. +// sysLinger is the real system call structure. + +// BUG(brainman): The definition of Linger is not appropriate for direct use +// with Setsockopt and Getsockopt. +// Use SetsockoptLinger instead. + +type Linger struct { + Onoff int32 + Linger int32 +} + +type sysLinger struct { + Onoff uint16 + Linger uint16 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS } + +func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { + sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) +} + +func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) +} +func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) +} +func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { + return syscall.EWINDOWS +} + +func Getpid() (pid int) { return int(getCurrentProcessId()) } + +func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { + // NOTE(rsc): The Win32finddata struct is wrong for the system call: + // the two paths are each one uint16 short. Use the correct struct, + // a win32finddata1, and then copy the results out. + // There is no loss of expressivity here, because the final + // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. + // For Go 1.1, we might avoid the allocation of win32finddata1 here + // by adding a final Bug [2]uint16 field to the struct and then + // adjusting the fields in the result directly. + var data1 win32finddata1 + handle, err = findFirstFile1(name, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func FindNextFile(handle Handle, data *Win32finddata) (err error) { + var data1 win32finddata1 + err = findNextFile1(handle, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func getProcessEntry(pid int) (*ProcessEntry32, error) { + snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer CloseHandle(snapshot) + var procEntry ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +func Getppid() (ppid int) { + pe, err := getProcessEntry(Getpid()) + if err != nil { + return -1 + } + return int(pe.ParentProcessID) +} + +// TODO(brainman): fix all needed for os +func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } +func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } +func Symlink(path, link string) (err error) { return syscall.EWINDOWS } + +func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } +func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } + +func Getuid() (uid int) { return -1 } +func Geteuid() (euid int) { return -1 } +func Getgid() (gid int) { return -1 } +func Getegid() (egid int) { return -1 } +func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } + +type Signal int + +func (s Signal) Signal() {} + +func (s Signal) String() string { + if 0 <= s && int(s) < len(signals) { + str := signals[s] + if str != "" { + return str + } + } + return "signal " + itoa(int(s)) +} + +func LoadCreateSymbolicLink() error { + return procCreateSymbolicLinkW.Find() +} + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer CloseHandle(fd) + + rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + case IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows_test.go b/vendor/golang.org/x/sys/windows/syscall_windows_test.go new file mode 100644 index 0000000..9c7133c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows_test.go @@ -0,0 +1,107 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + "unsafe" + + "golang.org/x/sys/windows" +) + +func TestWin32finddata(t *testing.T) { + dir, err := ioutil.TempDir("", "go-build") + if err != nil { + t.Fatalf("failed to create temp directory: %v", err) + } + defer os.RemoveAll(dir) + + path := filepath.Join(dir, "long_name.and_extension") + f, err := os.Create(path) + if err != nil { + t.Fatalf("failed to create %v: %v", path, err) + } + f.Close() + + type X struct { + fd windows.Win32finddata + got byte + pad [10]byte // to protect ourselves + + } + var want byte = 2 // it is unlikely to have this character in the filename + x := X{got: want} + + pathp, _ := windows.UTF16PtrFromString(path) + h, err := windows.FindFirstFile(pathp, &(x.fd)) + if err != nil { + t.Fatalf("FindFirstFile failed: %v", err) + } + err = windows.FindClose(h) + if err != nil { + t.Fatalf("FindClose failed: %v", err) + } + + if x.got != want { + t.Fatalf("memory corruption: want=%d got=%d", want, x.got) + } +} + +func TestFormatMessage(t *testing.T) { + dll := windows.MustLoadDLL("pdh.dll") + + pdhOpenQuery := func(datasrc *uint16, userdata uint32, query *windows.Handle) (errno uintptr) { + r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhOpenQueryW").Addr(), 3, uintptr(unsafe.Pointer(datasrc)), uintptr(userdata), uintptr(unsafe.Pointer(query))) + return r0 + } + + pdhCloseQuery := func(query windows.Handle) (errno uintptr) { + r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhCloseQuery").Addr(), 1, uintptr(query), 0, 0) + return r0 + } + + var q windows.Handle + name, err := windows.UTF16PtrFromString("no_such_source") + if err != nil { + t.Fatal(err) + } + errno := pdhOpenQuery(name, 0, &q) + if errno == 0 { + pdhCloseQuery(q) + t.Fatal("PdhOpenQuery succeeded, but expected to fail.") + } + + const flags uint32 = syscall.FORMAT_MESSAGE_FROM_HMODULE | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS + buf := make([]uint16, 300) + _, err = windows.FormatMessage(flags, uintptr(dll.Handle), uint32(errno), 0, buf, nil) + if err != nil { + t.Fatalf("FormatMessage for handle=%x and errno=%x failed: %v", dll.Handle, errno, err) + } +} + +func abort(funcname string, err error) { + panic(funcname + " failed: " + err.Error()) +} + +func ExampleLoadLibrary() { + h, err := windows.LoadLibrary("kernel32.dll") + if err != nil { + abort("LoadLibrary", err) + } + defer windows.FreeLibrary(h) + proc, err := windows.GetProcAddress(h, "GetVersion") + if err != nil { + abort("GetProcAddress", err) + } + r, _, _ := syscall.Syscall(uintptr(proc), 0, 0, 0, 0) + major := byte(r) + minor := uint8(r >> 8) + build := uint16(r >> 16) + print("windows version ", major, ".", minor, " (Build ", build, ")\n") +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go new file mode 100644 index 0000000..52c2037 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -0,0 +1,1333 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + // Windows errors. + ERROR_FILE_NOT_FOUND syscall.Errno = 2 + ERROR_PATH_NOT_FOUND syscall.Errno = 3 + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_NO_MORE_FILES syscall.Errno = 18 + ERROR_HANDLE_EOF syscall.Errno = 38 + ERROR_NETNAME_DELETED syscall.Errno = 64 + ERROR_FILE_EXISTS syscall.Errno = 80 + ERROR_BROKEN_PIPE syscall.Errno = 109 + ERROR_BUFFER_OVERFLOW syscall.Errno = 111 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_MOD_NOT_FOUND syscall.Errno = 126 + ERROR_PROC_NOT_FOUND syscall.Errno = 127 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 + ERROR_MORE_DATA syscall.Errno = 234 + ERROR_OPERATION_ABORTED syscall.Errno = 995 + ERROR_IO_PENDING syscall.Errno = 997 + ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + WSAEACCES syscall.Errno = 10013 + WSAEMSGSIZE syscall.Errno = 10040 + WSAECONNRESET syscall.Errno = 10054 +) + +const ( + // Invented values to support what package os expects. + O_RDONLY = 0x00000 + O_WRONLY = 0x00001 + O_RDWR = 0x00002 + O_CREAT = 0x00040 + O_EXCL = 0x00080 + O_NOCTTY = 0x00100 + O_TRUNC = 0x00200 + O_NONBLOCK = 0x00800 + O_APPEND = 0x00400 + O_SYNC = 0x01000 + O_ASYNC = 0x02000 + O_CLOEXEC = 0x80000 +) + +const ( + // More invented values for signals + SIGHUP = Signal(0x1) + SIGINT = Signal(0x2) + SIGQUIT = Signal(0x3) + SIGILL = Signal(0x4) + SIGTRAP = Signal(0x5) + SIGABRT = Signal(0x6) + SIGBUS = Signal(0x7) + SIGFPE = Signal(0x8) + SIGKILL = Signal(0x9) + SIGSEGV = Signal(0xb) + SIGPIPE = Signal(0xd) + SIGALRM = Signal(0xe) + SIGTERM = Signal(0xf) +) + +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", +} + +const ( + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_APPEND_DATA = 0x00000004 + FILE_WRITE_ATTRIBUTES = 0x00000100 + + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + + INVALID_FILE_ATTRIBUTES = 0xffffffff + + CREATE_NEW = 1 + CREATE_ALWAYS = 2 + OPEN_EXISTING = 3 + OPEN_ALWAYS = 4 + TRUNCATE_EXISTING = 5 + + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + + HANDLE_FLAG_INHERIT = 0x00000001 + STARTF_USESTDHANDLES = 0x00000100 + STARTF_USESHOWWINDOW = 0x00000001 + DUPLICATE_CLOSE_SOURCE = 0x00000001 + DUPLICATE_SAME_ACCESS = 0x00000002 + + STD_INPUT_HANDLE = -10 & (1<<32 - 1) + STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) + STD_ERROR_HANDLE = -12 & (1<<32 - 1) + + FILE_BEGIN = 0 + FILE_CURRENT = 1 + FILE_END = 2 + + LANG_ENGLISH = 0x09 + SUBLANG_ENGLISH_US = 0x01 + + FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 + FORMAT_MESSAGE_IGNORE_INSERTS = 512 + FORMAT_MESSAGE_FROM_STRING = 1024 + FORMAT_MESSAGE_FROM_HMODULE = 2048 + FORMAT_MESSAGE_FROM_SYSTEM = 4096 + FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 + FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 + + MAX_PATH = 260 + MAX_LONG_PATH = 32768 + + MAX_COMPUTERNAME_LENGTH = 15 + + TIME_ZONE_ID_UNKNOWN = 0 + TIME_ZONE_ID_STANDARD = 1 + + TIME_ZONE_ID_DAYLIGHT = 2 + IGNORE = 0 + INFINITE = 0xffffffff + + WAIT_TIMEOUT = 258 + WAIT_ABANDONED = 0x00000080 + WAIT_OBJECT_0 = 0x00000000 + WAIT_FAILED = 0xFFFFFFFF + + PROCESS_TERMINATE = 1 + PROCESS_QUERY_INFORMATION = 0x00000400 + SYNCHRONIZE = 0x00100000 + + FILE_MAP_COPY = 0x01 + FILE_MAP_WRITE = 0x02 + FILE_MAP_READ = 0x04 + FILE_MAP_EXECUTE = 0x20 + + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + + // Windows reserves errors >= 1<<29 for application use. + APPLICATION_ERROR = 1 << 29 +) + +const ( + // Process creation flags. + CREATE_BREAKAWAY_FROM_JOB = 0x01000000 + CREATE_DEFAULT_ERROR_MODE = 0x04000000 + CREATE_NEW_CONSOLE = 0x00000010 + CREATE_NEW_PROCESS_GROUP = 0x00000200 + CREATE_NO_WINDOW = 0x08000000 + CREATE_PROTECTED_PROCESS = 0x00040000 + CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 + CREATE_SEPARATE_WOW_VDM = 0x00000800 + CREATE_SHARED_WOW_VDM = 0x00001000 + CREATE_SUSPENDED = 0x00000004 + CREATE_UNICODE_ENVIRONMENT = 0x00000400 + DEBUG_ONLY_THIS_PROCESS = 0x00000002 + DEBUG_PROCESS = 0x00000001 + DETACHED_PROCESS = 0x00000008 + EXTENDED_STARTUPINFO_PRESENT = 0x00080000 + INHERIT_PARENT_AFFINITY = 0x00010000 +) + +const ( + // flags for CreateToolhelp32Snapshot + TH32CS_SNAPHEAPLIST = 0x01 + TH32CS_SNAPPROCESS = 0x02 + TH32CS_SNAPTHREAD = 0x04 + TH32CS_SNAPMODULE = 0x08 + TH32CS_SNAPMODULE32 = 0x10 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD + TH32CS_INHERIT = 0x80000000 +) + +const ( + // filters for ReadDirectoryChangesW + FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 + FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 + FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 + FILE_NOTIFY_CHANGE_SIZE = 0x008 + FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 + FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 + FILE_NOTIFY_CHANGE_CREATION = 0x040 + FILE_NOTIFY_CHANGE_SECURITY = 0x100 +) + +const ( + // do not reorder + FILE_ACTION_ADDED = iota + 1 + FILE_ACTION_REMOVED + FILE_ACTION_MODIFIED + FILE_ACTION_RENAMED_OLD_NAME + FILE_ACTION_RENAMED_NEW_NAME +) + +const ( + // wincrypt.h + PROV_RSA_FULL = 1 + PROV_RSA_SIG = 2 + PROV_DSS = 3 + PROV_FORTEZZA = 4 + PROV_MS_EXCHANGE = 5 + PROV_SSL = 6 + PROV_RSA_SCHANNEL = 12 + PROV_DSS_DH = 13 + PROV_EC_ECDSA_SIG = 14 + PROV_EC_ECNRA_SIG = 15 + PROV_EC_ECDSA_FULL = 16 + PROV_EC_ECNRA_FULL = 17 + PROV_DH_SCHANNEL = 18 + PROV_SPYRUS_LYNKS = 20 + PROV_RNG = 21 + PROV_INTEL_SEC = 22 + PROV_REPLACE_OWF = 23 + PROV_RSA_AES = 24 + CRYPT_VERIFYCONTEXT = 0xF0000000 + CRYPT_NEWKEYSET = 0x00000008 + CRYPT_DELETEKEYSET = 0x00000010 + CRYPT_MACHINE_KEYSET = 0x00000020 + CRYPT_SILENT = 0x00000040 + CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 + + USAGE_MATCH_TYPE_AND = 0 + USAGE_MATCH_TYPE_OR = 1 + + X509_ASN_ENCODING = 0x00000001 + PKCS_7_ASN_ENCODING = 0x00010000 + + CERT_STORE_PROV_MEMORY = 2 + + CERT_STORE_ADD_ALWAYS = 4 + + CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 + + CERT_TRUST_NO_ERROR = 0x00000000 + CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 + CERT_TRUST_IS_REVOKED = 0x00000004 + CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 + CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 + CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 + CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 + CERT_TRUST_IS_CYCLIC = 0x00000080 + CERT_TRUST_INVALID_EXTENSION = 0x00000100 + CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 + CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 + CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 + CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 + CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 + CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 + CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 + CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 + CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 + CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 + CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 + + CERT_CHAIN_POLICY_BASE = 1 + CERT_CHAIN_POLICY_AUTHENTICODE = 2 + CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3 + CERT_CHAIN_POLICY_SSL = 4 + CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5 + CERT_CHAIN_POLICY_NT_AUTH = 6 + CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7 + CERT_CHAIN_POLICY_EV = 8 + + CERT_E_EXPIRED = 0x800B0101 + CERT_E_ROLE = 0x800B0103 + CERT_E_PURPOSE = 0x800B0106 + CERT_E_UNTRUSTEDROOT = 0x800B0109 + CERT_E_CN_NO_MATCH = 0x800B010F + + AUTHTYPE_CLIENT = 1 + AUTHTYPE_SERVER = 2 +) + +var ( + OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") + OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") + OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") +) + +// Invented values to support what package os expects. +type Timeval struct { + Sec int32 + Usec int32 +} + +func (tv *Timeval) Nanoseconds() int64 { + return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3 +} + +func NsecToTimeval(nsec int64) (tv Timeval) { + tv.Sec = int32(nsec / 1e9) + tv.Usec = int32(nsec % 1e9 / 1e3) + return +} + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor uintptr + InheritHandle uint32 +} + +type Overlapped struct { + Internal uintptr + InternalHigh uintptr + Offset uint32 + OffsetHigh uint32 + HEvent Handle +} + +type FileNotifyInformation struct { + NextEntryOffset uint32 + Action uint32 + FileNameLength uint32 + FileName uint16 +} + +type Filetime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Nanoseconds returns Filetime ft in nanoseconds +// since Epoch (00:00:00 UTC, January 1, 1970). +func (ft *Filetime) Nanoseconds() int64 { + // 100-nanosecond intervals since January 1, 1601 + nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) + // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) + nsec -= 116444736000000000 + // convert into nanoseconds + nsec *= 100 + return nsec +} + +func NsecToFiletime(nsec int64) (ft Filetime) { + // convert into 100-nanosecond + nsec /= 100 + // change starting time to January 1, 1601 + nsec += 116444736000000000 + // split into high / low + ft.LowDateTime = uint32(nsec & 0xffffffff) + ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff) + return ft +} + +type Win32finddata struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH - 1]uint16 + AlternateFileName [13]uint16 +} + +// This is the actual system call structure. +// Win32finddata is what we committed to in Go 1. +type win32finddata1 struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH]uint16 + AlternateFileName [14]uint16 +} + +func copyFindData(dst *Win32finddata, src *win32finddata1) { + dst.FileAttributes = src.FileAttributes + dst.CreationTime = src.CreationTime + dst.LastAccessTime = src.LastAccessTime + dst.LastWriteTime = src.LastWriteTime + dst.FileSizeHigh = src.FileSizeHigh + dst.FileSizeLow = src.FileSizeLow + dst.Reserved0 = src.Reserved0 + dst.Reserved1 = src.Reserved1 + + // The src is 1 element bigger than dst, but it must be NUL. + copy(dst.FileName[:], src.FileName[:]) + copy(dst.AlternateFileName[:], src.AlternateFileName[:]) +} + +type ByHandleFileInformation struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + VolumeSerialNumber uint32 + FileSizeHigh uint32 + FileSizeLow uint32 + NumberOfLinks uint32 + FileIndexHigh uint32 + FileIndexLow uint32 +} + +const ( + GetFileExInfoStandard = 0 + GetFileExMaxInfoLevel = 1 +) + +type Win32FileAttributeData struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 +} + +// ShowWindow constants +const ( + // winuser.h + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_SHOWMAXIMIZED = 3 + SW_MAXIMIZE = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +type StartupInfo struct { + Cb uint32 + _ *uint16 + Desktop *uint16 + Title *uint16 + X uint32 + Y uint32 + XSize uint32 + YSize uint32 + XCountChars uint32 + YCountChars uint32 + FillAttribute uint32 + Flags uint32 + ShowWindow uint16 + _ uint16 + _ *byte + StdInput Handle + StdOutput Handle + StdErr Handle +} + +type ProcessInformation struct { + Process Handle + Thread Handle + ProcessId uint32 + ThreadId uint32 +} + +type ProcessEntry32 struct { + Size uint32 + Usage uint32 + ProcessID uint32 + DefaultHeapID uintptr + ModuleID uint32 + Threads uint32 + ParentProcessID uint32 + PriClassBase int32 + Flags uint32 + ExeFile [MAX_PATH]uint16 +} + +type Systemtime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +type Timezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate Systemtime + DaylightBias int32 +} + +// Socket related. + +const ( + AF_UNSPEC = 0 + AF_UNIX = 1 + AF_INET = 2 + AF_INET6 = 23 + AF_NETBIOS = 17 + + SOCK_STREAM = 1 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_SEQPACKET = 5 + + IPPROTO_IP = 0 + IPPROTO_IPV6 = 0x29 + IPPROTO_TCP = 6 + IPPROTO_UDP = 17 + + SOL_SOCKET = 0xffff + SO_REUSEADDR = 4 + SO_KEEPALIVE = 8 + SO_DONTROUTE = 16 + SO_BROADCAST = 32 + SO_LINGER = 128 + SO_RCVBUF = 0x1002 + SO_SNDBUF = 0x1001 + SO_UPDATE_ACCEPT_CONTEXT = 0x700b + SO_UPDATE_CONNECT_CONTEXT = 0x7010 + + IOC_OUT = 0x40000000 + IOC_IN = 0x80000000 + IOC_VENDOR = 0x18000000 + IOC_INOUT = IOC_IN | IOC_OUT + IOC_WS2 = 0x08000000 + SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 + SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 + SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + + // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 + + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_LOOP = 0xb + IP_ADD_MEMBERSHIP = 0xc + IP_DROP_MEMBERSHIP = 0xd + + IPV6_V6ONLY = 0x1b + IPV6_UNICAST_HOPS = 0x4 + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_LOOP = 0xb + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_DONTROUTE = 0x4 + MSG_WAITALL = 0x8 + + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + MSG_BCAST = 0x0400 + MSG_MCAST = 0x0800 + + SOMAXCONN = 0x7fffffff + + TCP_NODELAY = 1 + + SHUT_RD = 0 + SHUT_WR = 1 + SHUT_RDWR = 2 + + WSADESCRIPTION_LEN = 256 + WSASYS_STATUS_LEN = 128 +) + +type WSABuf struct { + Len uint32 + Buf *byte +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *WSABuf + BufferCount uint32 + Control WSABuf + Flags uint32 +} + +// Invented values to support what package os expects. +const ( + S_IFMT = 0x1f000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +const ( + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_DISK = 0x0001 + FILE_TYPE_PIPE = 0x0003 + FILE_TYPE_REMOTE = 0x8000 + FILE_TYPE_UNKNOWN = 0x0000 +) + +type Hostent struct { + Name *byte + Aliases **byte + AddrType uint16 + Length uint16 + AddrList **byte +} + +type Protoent struct { + Name *byte + Aliases **byte + Proto uint16 +} + +const ( + DNS_TYPE_A = 0x0001 + DNS_TYPE_NS = 0x0002 + DNS_TYPE_MD = 0x0003 + DNS_TYPE_MF = 0x0004 + DNS_TYPE_CNAME = 0x0005 + DNS_TYPE_SOA = 0x0006 + DNS_TYPE_MB = 0x0007 + DNS_TYPE_MG = 0x0008 + DNS_TYPE_MR = 0x0009 + DNS_TYPE_NULL = 0x000a + DNS_TYPE_WKS = 0x000b + DNS_TYPE_PTR = 0x000c + DNS_TYPE_HINFO = 0x000d + DNS_TYPE_MINFO = 0x000e + DNS_TYPE_MX = 0x000f + DNS_TYPE_TEXT = 0x0010 + DNS_TYPE_RP = 0x0011 + DNS_TYPE_AFSDB = 0x0012 + DNS_TYPE_X25 = 0x0013 + DNS_TYPE_ISDN = 0x0014 + DNS_TYPE_RT = 0x0015 + DNS_TYPE_NSAP = 0x0016 + DNS_TYPE_NSAPPTR = 0x0017 + DNS_TYPE_SIG = 0x0018 + DNS_TYPE_KEY = 0x0019 + DNS_TYPE_PX = 0x001a + DNS_TYPE_GPOS = 0x001b + DNS_TYPE_AAAA = 0x001c + DNS_TYPE_LOC = 0x001d + DNS_TYPE_NXT = 0x001e + DNS_TYPE_EID = 0x001f + DNS_TYPE_NIMLOC = 0x0020 + DNS_TYPE_SRV = 0x0021 + DNS_TYPE_ATMA = 0x0022 + DNS_TYPE_NAPTR = 0x0023 + DNS_TYPE_KX = 0x0024 + DNS_TYPE_CERT = 0x0025 + DNS_TYPE_A6 = 0x0026 + DNS_TYPE_DNAME = 0x0027 + DNS_TYPE_SINK = 0x0028 + DNS_TYPE_OPT = 0x0029 + DNS_TYPE_DS = 0x002B + DNS_TYPE_RRSIG = 0x002E + DNS_TYPE_NSEC = 0x002F + DNS_TYPE_DNSKEY = 0x0030 + DNS_TYPE_DHCID = 0x0031 + DNS_TYPE_UINFO = 0x0064 + DNS_TYPE_UID = 0x0065 + DNS_TYPE_GID = 0x0066 + DNS_TYPE_UNSPEC = 0x0067 + DNS_TYPE_ADDRS = 0x00f8 + DNS_TYPE_TKEY = 0x00f9 + DNS_TYPE_TSIG = 0x00fa + DNS_TYPE_IXFR = 0x00fb + DNS_TYPE_AXFR = 0x00fc + DNS_TYPE_MAILB = 0x00fd + DNS_TYPE_MAILA = 0x00fe + DNS_TYPE_ALL = 0x00ff + DNS_TYPE_ANY = 0x00ff + DNS_TYPE_WINS = 0xff01 + DNS_TYPE_WINSR = 0xff02 + DNS_TYPE_NBSTAT = 0xff01 +) + +const ( + DNS_INFO_NO_RECORDS = 0x251D +) + +const ( + // flags inside DNSRecord.Dw + DnsSectionQuestion = 0x0000 + DnsSectionAnswer = 0x0001 + DnsSectionAuthority = 0x0002 + DnsSectionAdditional = 0x0003 +) + +type DNSSRVData struct { + Target *uint16 + Priority uint16 + Weight uint16 + Port uint16 + Pad uint16 +} + +type DNSPTRData struct { + Host *uint16 +} + +type DNSMXData struct { + NameExchange *uint16 + Preference uint16 + Pad uint16 +} + +type DNSTXTData struct { + StringCount uint16 + StringArray [1]*uint16 +} + +type DNSRecord struct { + Next *DNSRecord + Name *uint16 + Type uint16 + Length uint16 + Dw uint32 + Ttl uint32 + Reserved uint32 + Data [40]byte +} + +const ( + TF_DISCONNECT = 1 + TF_REUSE_SOCKET = 2 + TF_WRITE_BEHIND = 4 + TF_USE_DEFAULT_WORKER = 0 + TF_USE_SYSTEM_THREAD = 16 + TF_USE_KERNEL_APC = 32 +) + +type TransmitFileBuffers struct { + Head uintptr + HeadLength uint32 + Tail uintptr + TailLength uint32 +} + +const ( + IFF_UP = 1 + IFF_BROADCAST = 2 + IFF_LOOPBACK = 4 + IFF_POINTTOPOINT = 8 + IFF_MULTICAST = 16 +) + +const SIO_GET_INTERFACE_LIST = 0x4004747F + +// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. +// will be fixed to change variable type as suitable. + +type SockaddrGen [24]byte + +type InterfaceInfo struct { + Flags uint32 + Address SockaddrGen + BroadcastAddress SockaddrGen + Netmask SockaddrGen +} + +type IpAddressString struct { + String [16]byte +} + +type IpMaskString IpAddressString + +type IpAddrString struct { + Next *IpAddrString + IpAddress IpAddressString + IpMask IpMaskString + Context uint32 +} + +const MAX_ADAPTER_NAME_LENGTH = 256 +const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 +const MAX_ADAPTER_ADDRESS_LENGTH = 8 + +type IpAdapterInfo struct { + Next *IpAdapterInfo + ComboIndex uint32 + AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte + Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte + AddressLength uint32 + Address [MAX_ADAPTER_ADDRESS_LENGTH]byte + Index uint32 + Type uint32 + DhcpEnabled uint32 + CurrentIpAddress *IpAddrString + IpAddressList IpAddrString + GatewayList IpAddrString + DhcpServer IpAddrString + HaveWins bool + PrimaryWinsServer IpAddrString + SecondaryWinsServer IpAddrString + LeaseObtained int64 + LeaseExpires int64 +} + +const MAXLEN_PHYSADDR = 8 +const MAX_INTERFACE_NAME_LEN = 256 +const MAXLEN_IFDESCR = 256 + +type MibIfRow struct { + Name [MAX_INTERFACE_NAME_LEN]uint16 + Index uint32 + Type uint32 + Mtu uint32 + Speed uint32 + PhysAddrLen uint32 + PhysAddr [MAXLEN_PHYSADDR]byte + AdminStatus uint32 + OperStatus uint32 + LastChange uint32 + InOctets uint32 + InUcastPkts uint32 + InNUcastPkts uint32 + InDiscards uint32 + InErrors uint32 + InUnknownProtos uint32 + OutOctets uint32 + OutUcastPkts uint32 + OutNUcastPkts uint32 + OutDiscards uint32 + OutErrors uint32 + OutQLen uint32 + DescrLen uint32 + Descr [MAXLEN_IFDESCR]byte +} + +type CertContext struct { + EncodingType uint32 + EncodedCert *byte + Length uint32 + CertInfo uintptr + Store Handle +} + +type CertChainContext struct { + Size uint32 + TrustStatus CertTrustStatus + ChainCount uint32 + Chains **CertSimpleChain + LowerQualityChainCount uint32 + LowerQualityChains **CertChainContext + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertSimpleChain struct { + Size uint32 + TrustStatus CertTrustStatus + NumElements uint32 + Elements **CertChainElement + TrustListInfo uintptr + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertChainElement struct { + Size uint32 + CertContext *CertContext + TrustStatus CertTrustStatus + RevocationInfo *CertRevocationInfo + IssuanceUsage *CertEnhKeyUsage + ApplicationUsage *CertEnhKeyUsage + ExtendedErrorInfo *uint16 +} + +type CertRevocationInfo struct { + Size uint32 + RevocationResult uint32 + RevocationOid *byte + OidSpecificInfo uintptr + HasFreshnessTime uint32 + FreshnessTime uint32 + CrlInfo uintptr // *CertRevocationCrlInfo +} + +type CertTrustStatus struct { + ErrorStatus uint32 + InfoStatus uint32 +} + +type CertUsageMatch struct { + Type uint32 + Usage CertEnhKeyUsage +} + +type CertEnhKeyUsage struct { + Length uint32 + UsageIdentifiers **byte +} + +type CertChainPara struct { + Size uint32 + RequestedUsage CertUsageMatch + RequstedIssuancePolicy CertUsageMatch + URLRetrievalTimeout uint32 + CheckRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 + CacheResync *Filetime +} + +type CertChainPolicyPara struct { + Size uint32 + Flags uint32 + ExtraPolicyPara uintptr +} + +type SSLExtraCertChainPolicyPara struct { + Size uint32 + AuthType uint32 + Checks uint32 + ServerName *uint16 +} + +type CertChainPolicyStatus struct { + Size uint32 + Error uint32 + ChainIndex uint32 + ElementIndex uint32 + ExtraPolicyStatus uintptr +} + +const ( + // do not reorder + HKEY_CLASSES_ROOT = 0x80000000 + iota + HKEY_CURRENT_USER + HKEY_LOCAL_MACHINE + HKEY_USERS + HKEY_PERFORMANCE_DATA + HKEY_CURRENT_CONFIG + HKEY_DYN_DATA + + KEY_QUERY_VALUE = 1 + KEY_SET_VALUE = 2 + KEY_CREATE_SUB_KEY = 4 + KEY_ENUMERATE_SUB_KEYS = 8 + KEY_NOTIFY = 16 + KEY_CREATE_LINK = 32 + KEY_WRITE = 0x20006 + KEY_EXECUTE = 0x20019 + KEY_READ = 0x20019 + KEY_WOW64_64KEY = 0x0100 + KEY_WOW64_32KEY = 0x0200 + KEY_ALL_ACCESS = 0xf003f +) + +const ( + // do not reorder + REG_NONE = iota + REG_SZ + REG_EXPAND_SZ + REG_BINARY + REG_DWORD_LITTLE_ENDIAN + REG_DWORD_BIG_ENDIAN + REG_LINK + REG_MULTI_SZ + REG_RESOURCE_LIST + REG_FULL_RESOURCE_DESCRIPTOR + REG_RESOURCE_REQUIREMENTS_LIST + REG_QWORD_LITTLE_ENDIAN + REG_DWORD = REG_DWORD_LITTLE_ENDIAN + REG_QWORD = REG_QWORD_LITTLE_ENDIAN +) + +type AddrinfoW struct { + Flags int32 + Family int32 + Socktype int32 + Protocol int32 + Addrlen uintptr + Canonname *uint16 + Addr uintptr + Next *AddrinfoW +} + +const ( + AI_PASSIVE = 1 + AI_CANONNAME = 2 + AI_NUMERICHOST = 4 +) + +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +var WSAID_CONNECTEX = GUID{ + 0x25a207b9, + 0xddf3, + 0x4660, + [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, +} + +var WSAID_WSASENDMSG = GUID{ + 0xa441e712, + 0x754f, + 0x43ca, + [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = GUID{ + 0xf689d7c8, + 0x6f1f, + 0x436b, + [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +const ( + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + FILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +const ( + WSAPROTOCOL_LEN = 255 + MAX_PROTOCOL_CHAIN = 7 + BASE_PROTOCOL = 1 + LAYERED_PROTOCOL = 0 + + XP1_CONNECTIONLESS = 0x00000001 + XP1_GUARANTEED_DELIVERY = 0x00000002 + XP1_GUARANTEED_ORDER = 0x00000004 + XP1_MESSAGE_ORIENTED = 0x00000008 + XP1_PSEUDO_STREAM = 0x00000010 + XP1_GRACEFUL_CLOSE = 0x00000020 + XP1_EXPEDITED_DATA = 0x00000040 + XP1_CONNECT_DATA = 0x00000080 + XP1_DISCONNECT_DATA = 0x00000100 + XP1_SUPPORT_BROADCAST = 0x00000200 + XP1_SUPPORT_MULTIPOINT = 0x00000400 + XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 + XP1_MULTIPOINT_DATA_PLANE = 0x00001000 + XP1_QOS_SUPPORTED = 0x00002000 + XP1_UNI_SEND = 0x00008000 + XP1_UNI_RECV = 0x00010000 + XP1_IFS_HANDLES = 0x00020000 + XP1_PARTIAL_MESSAGE = 0x00040000 + XP1_SAN_SUPPORT_SDP = 0x00080000 + + PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 + PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 + PFL_HIDDEN = 0x00000004 + PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 + PFL_NETWORKDIRECT_PROVIDER = 0x00000010 +) + +type WSAProtocolInfo struct { + ServiceFlags1 uint32 + ServiceFlags2 uint32 + ServiceFlags3 uint32 + ServiceFlags4 uint32 + ProviderFlags uint32 + ProviderId GUID + CatalogEntryId uint32 + ProtocolChain WSAProtocolChain + Version int32 + AddressFamily int32 + MaxSockAddr int32 + MinSockAddr int32 + SocketType int32 + Protocol int32 + ProtocolMaxOffset int32 + NetworkByteOrder int32 + SecurityScheme int32 + MessageSize uint32 + ProviderReserved uint32 + ProtocolName [WSAPROTOCOL_LEN + 1]uint16 +} + +type WSAProtocolChain struct { + ChainLen int32 + ChainEntries [MAX_PROTOCOL_CHAIN]uint32 +} + +type TCPKeepalive struct { + OnOff uint32 + Time uint32 + Interval uint32 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +const ( + FSCTL_GET_REPARSE_POINT = 0x900A8 + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_SYMLINK = 0xA000000C + SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 +) + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 +) + +const ( + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + /* more fields might be present here. */ +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +// Console related constants used for the mode parameter to SetConsoleMode. See +// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. + +const ( + ENABLE_PROCESSED_INPUT = 0x1 + ENABLE_LINE_INPUT = 0x2 + ENABLE_ECHO_INPUT = 0x4 + ENABLE_WINDOW_INPUT = 0x8 + ENABLE_MOUSE_INPUT = 0x10 + ENABLE_INSERT_MODE = 0x20 + ENABLE_QUICK_EDIT_MODE = 0x40 + ENABLE_EXTENDED_FLAGS = 0x80 + ENABLE_AUTO_POSITION = 0x100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 + + ENABLE_PROCESSED_OUTPUT = 0x1 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 + DISABLE_NEWLINE_AUTO_RETURN = 0x8 + ENABLE_LVB_GRID_WORLDWIDE = 0x10 +) + +type Coord struct { + X int16 + Y int16 +} + +type SmallRect struct { + Left int16 + Top int16 + Right int16 + Bottom int16 +} + +// Used with GetConsoleScreenBuffer to retreive information about a console +// screen buffer. See +// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str +// for details. + +type ConsoleScreenBufferInfo struct { + Size Coord + CursorPosition Coord + Attributes uint16 + Window SmallRect + MaximumWindowSize Coord +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go new file mode 100644 index 0000000..fe0ddd0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go new file mode 100644 index 0000000..7e154c2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go new file mode 100644 index 0000000..c7b3b15 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -0,0 +1,2687 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package windows + +import ( + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = NewLazySystemDLL("advapi32.dll") + modkernel32 = NewLazySystemDLL("kernel32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + modmswsock = NewLazySystemDLL("mswsock.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") + modsecur32 = NewLazySystemDLL("secur32.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procDeleteService = modadvapi32.NewProc("DeleteService") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procControlService = modadvapi32.NewProc("ControlService") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procGetLastError = modkernel32.NewProc("GetLastError") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetVersion = modkernel32.NewProc("GetVersion") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procExitProcess = modkernel32.NewProc("ExitProcess") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procReadFile = modkernel32.NewProc("ReadFile") + procWriteFile = modkernel32.NewProc("WriteFile") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindClose = modkernel32.NewProc("FindClose") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procGetFileType = modkernel32.NewProc("GetFileType") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procsocket = modws2_32.NewProc("socket") + procsetsockopt = modws2_32.NewProc("setsockopt") + procgetsockopt = modws2_32.NewProc("getsockopt") + procbind = modws2_32.NewProc("bind") + procconnect = modws2_32.NewProc("connect") + procgetsockname = modws2_32.NewProc("getsockname") + procgetpeername = modws2_32.NewProc("getpeername") + proclisten = modws2_32.NewProc("listen") + procshutdown = modws2_32.NewProc("shutdown") + procclosesocket = modws2_32.NewProc("closesocket") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSASend = modws2_32.NewProc("WSASend") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procntohs = modws2_32.NewProc("ntohs") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetACP = modkernel32.NewProc("GetACP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procCopySid = modadvapi32.NewProc("CopySid") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procEqualSid = modadvapi32.NewProc("EqualSid") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") +) + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) + } + return +} + +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) +} + +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibraryEx(_p0, zero, flags) +} + +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return + } + return _GetProcAddress(module, _p0) +} + +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentProcess() (pseudoHandle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentProcess.Addr(), 0, 0, 0, 0) + pseudoHandle = Handle(r0) + if pseudoHandle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func getCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) + return +} + +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentThreadId() (id uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + id = uint32(r0) + return +} + +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) + return +} + +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + return +} + +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetHostByName(_p0) +} + +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) +} + +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) + return +} + +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetProtoByName(_p0) +} + +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + return +} + +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + return +} + +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) + return +} + +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return +} + +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 0000000..9f55693 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 0000000..8da58fb --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 0000000..b50c6e8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 0000000..1f7e87e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 0000000..e4e56e2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,775 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + good = d.unmarshal(n.alias, out) + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/decode_test.go b/vendor/gopkg.in/yaml.v2/decode_test.go new file mode 100644 index 0000000..9269f12 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode_test.go @@ -0,0 +1,1326 @@ +package yaml_test + +import ( + "errors" + "io" + "math" + "reflect" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + (*struct{})(nil), + }, + { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "bin: -0b1000000000000000000000000000000000000000000000000000000000000000", + map[string]interface{}{"bin": -9223372036854775808}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: [1, 2]", + &struct{ A [2]int }{[2]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!float 0", + map[string]interface{}{"v": float64(0)}, + }, { + "v: !!float -1", + map[string]interface{}{"v": float64(-1)}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Non-specific tag (Issue #75) + { + "v: ! test", + map[string]interface{}{"v": "test"}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]*string{"foo": nil}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Support for ~ + { + "foo: ~", + map[string]*string{"foo": nil}, + }, { + "foo: ~", + map[string]string{"foo": ""}, + }, { + "foo: ~", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // issue #295 (allow scalars with colons in flow mappings and sequences) + { + "a: {b: https://github.com/go-yaml/yaml}", + map[string]interface{}{"a": map[interface{}]interface{}{ + "b": "https://github.com/go-yaml/yaml", + }}, + }, + { + "a: [https://github.com/go-yaml/yaml]", + map[string]interface{}{"a": []interface{}{"https://github.com/go-yaml/yaml"}}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]textUnmarshaler{"a": textUnmarshaler{S: "1.2.3.4"}}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]textUnmarshaler{"a": textUnmarshaler{"2015-02-24T18:19:39Z"}}, + }, + + // Timestamps + { + // Date only. + "a: 2015-01-01\n", + map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, + }, + { + // RFC3339 + "a: 2015-02-24T18:19:39.12Z\n", + map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, .12e9, time.UTC)}, + }, + { + // RFC3339 with short dates. + "a: 2015-2-3T3:4:5Z", + map[string]time.Time{"a": time.Date(2015, 2, 3, 3, 4, 5, 0, time.UTC)}, + }, + { + // ISO8601 lower case t + "a: 2015-02-24t18:19:39Z\n", + map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)}, + }, + { + // space separate, no time zone + "a: 2015-02-24 18:19:39\n", + map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)}, + }, + // Some cases not currently handled. Uncomment these when + // the code is fixed. + // { + // // space separated with time zone + // "a: 2001-12-14 21:59:43.10 -5", + // map[string]interface{}{"a": time.Date(2001, 12, 14, 21, 59, 43, .1e9, time.UTC)}, + // }, + // { + // // arbitrary whitespace between fields + // "a: 2001-12-14 \t\t \t21:59:43.10 \t Z", + // map[string]interface{}{"a": time.Date(2001, 12, 14, 21, 59, 43, .1e9, time.UTC)}, + // }, + { + // explicit string tag + "a: !!str 2015-01-01", + map[string]interface{}{"a": "2015-01-01"}, + }, + { + // explicit timestamp tag on quoted string + "a: !!timestamp \"2015-01-01\"", + map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, + }, + { + // explicit timestamp tag on unquoted string + "a: !!timestamp 2015-01-01", + map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, + }, + { + // quoted string that's a valid timestamp + "a: \"2015-01-01\"", + map[string]interface{}{"a": "2015-01-01"}, + }, + { + // explicit timestamp tag into interface. + "a: !!timestamp \"2015-01-01\"", + map[string]interface{}{"a": "2015-01-01"}, + }, + { + // implicit timestamp tag into interface. + "a: 2015-01-01", + map[string]interface{}{"a": "2015-01-01"}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, + + // UTF-16-LE + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00", + M{"ñoño": "very yes"}, + }, + // UTF-16-LE with surrogate. + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00", + M{"ñoño": "very yes 🟔"}, + }, + + // UTF-16-BE + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n", + M{"ñoño": "very yes"}, + }, + // UTF-16-BE with surrogate. + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n", + M{"ñoño": "very yes 🟔"}, + }, + + // YAML Float regex shouldn't match this + { + "a: 123456e1\n", + M{"a": "123456e1"}, + }, { + "a: 123456E1\n", + M{"a": "123456E1"}, + }, + // yaml-test-suite 3GZX: Spec Example 7.1. Alias Nodes + { + "First occurrence: &anchor Foo\nSecond occurrence: *anchor\nOverride anchor: &anchor Bar\nReuse anchor: *anchor\n", + map[interface{}]interface{}{ + "Reuse anchor": "Bar", + "First occurrence": "Foo", + "Second occurrence": "Foo", + "Override anchor": "Bar", + }, + }, + // Single document with garbage following it. + { + "---\nhello\n...\n}not yaml", + "hello", + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for i, item := range unmarshalTests { + c.Logf("test %d: %q", i, item.data) + t := reflect.ValueOf(item.value).Type() + value := reflect.New(t) + err := yaml.Unmarshal([]byte(item.data), value.Interface()) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + c.Assert(value.Elem().Interface(), DeepEquals, item.value, Commentf("error: %v", err)) + } +} + +// TODO(v3): This test should also work when unmarshaling onto an interface{}. +func (s *S) TestUnmarshalFullTimestamp(c *C) { + // Full timestamp in same format as encoded. This is confirmed to be + // properly decoded by Python as a timestamp as well. + var str = "2015-02-24T18:19:39.123456789-03:00" + var t time.Time + err := yaml.Unmarshal([]byte(str), &t) + c.Assert(err, IsNil) + c.Assert(t, Equals, time.Date(2015, 2, 24, 18, 19, 39, 123456789, t.Location())) + c.Assert(t.In(time.UTC), Equals, time.Date(2015, 2, 24, 21, 19, 39, 123456789, time.UTC)) +} + +func (s *S) TestDecoderSingleDocument(c *C) { + // Test that Decoder.Decode works as expected on + // all the unmarshal tests. + for i, item := range unmarshalTests { + c.Logf("test %d: %q", i, item.data) + if item.data == "" { + // Behaviour differs when there's no YAML. + continue + } + t := reflect.ValueOf(item.value).Type() + value := reflect.New(t) + err := yaml.NewDecoder(strings.NewReader(item.data)).Decode(value.Interface()) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + c.Assert(value.Elem().Interface(), DeepEquals, item.value) + } +} + +var decoderTests = []struct { + data string + values []interface{} +}{{ + "", + nil, +}, { + "a: b", + []interface{}{ + map[interface{}]interface{}{"a": "b"}, + }, +}, { + "---\na: b\n...\n", + []interface{}{ + map[interface{}]interface{}{"a": "b"}, + }, +}, { + "---\n'hello'\n...\n---\ngoodbye\n...\n", + []interface{}{ + "hello", + "goodbye", + }, +}} + +func (s *S) TestDecoder(c *C) { + for i, item := range decoderTests { + c.Logf("test %d: %q", i, item.data) + var values []interface{} + dec := yaml.NewDecoder(strings.NewReader(item.data)) + for { + var value interface{} + err := dec.Decode(&value) + if err == io.EOF { + break + } + c.Assert(err, IsNil) + values = append(values, value) + } + c.Assert(values, DeepEquals, item.values) + } +} + +type errReader struct{} + +func (errReader) Read([]byte) (int, error) { + return 0, errors.New("some read error") +} + +func (s *S) TestDecoderReadError(c *C) { + err := yaml.NewDecoder(errReader{}).Decode(&struct{}{}) + c.Assert(err, ErrorMatches, `yaml: input error: some read error`) +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a:\n- b: *,", "yaml: line 2: did not find expected alphabetic or numeric character"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, + {"b: *a\na: &a {c: 1}", `yaml: unknown anchor 'a' referenced`}, + {"%TAG !%79! tag:yaml.org,2002:\n---\nv: !%79!int '1'", "yaml: did not find expected whitespace"}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for i, item := range unmarshalErrorTests { + c.Logf("test %d: %q", i, item.data) + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +func (s *S) TestDecoderErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.NewDecoder(strings.NewReader(item.data)).Decode(&value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, + {`_: ""`, "!!str", ""}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +var unmarshalStrictTests = []struct { + data string + value interface{} + error string +}{{ + data: "a: 1\nc: 2\n", + value: struct{ A, B int }{A: 1}, + error: `yaml: unmarshal errors:\n line 2: field c not found in type struct { A int; B int }`, +}, { + data: "a: 1\nb: 2\na: 3\n", + value: struct{ A, B int }{A: 3, B: 2}, + error: `yaml: unmarshal errors:\n line 3: field a already set in type struct { A int; B int }`, +}, { + data: "c: 3\na: 1\nb: 2\nc: 4\n", + value: struct { + A int + inlineB `yaml:",inline"` + }{ + A: 1, + inlineB: inlineB{ + B: 2, + inlineC: inlineC{ + C: 4, + }, + }, + }, + error: `yaml: unmarshal errors:\n line 4: field c already set in type struct { A int; yaml_test.inlineB "yaml:\\",inline\\"" }`, +}, { + data: "c: 0\na: 1\nb: 2\nc: 1\n", + value: struct { + A int + inlineB `yaml:",inline"` + }{ + A: 1, + inlineB: inlineB{ + B: 2, + inlineC: inlineC{ + C: 1, + }, + }, + }, + error: `yaml: unmarshal errors:\n line 4: field c already set in type struct { A int; yaml_test.inlineB "yaml:\\",inline\\"" }`, +}, { + data: "c: 1\na: 1\nb: 2\nc: 3\n", + value: struct { + A int + M map[string]interface{} `yaml:",inline"` + }{ + A: 1, + M: map[string]interface{}{ + "b": 2, + "c": 3, + }, + }, + error: `yaml: unmarshal errors:\n line 4: key "c" already set in map`, +}, { + data: "a: 1\n9: 2\nnull: 3\n9: 4", + value: map[interface{}]interface{}{ + "a": 1, + nil: 3, + 9: 4, + }, + error: `yaml: unmarshal errors:\n line 4: key 9 already set in map`, +}} + +func (s *S) TestUnmarshalStrict(c *C) { + for i, item := range unmarshalStrictTests { + c.Logf("test %d: %q", i, item.data) + // First test that normal Unmarshal unmarshals to the expected value. + t := reflect.ValueOf(item.value).Type() + value := reflect.New(t) + err := yaml.Unmarshal([]byte(item.data), value.Interface()) + c.Assert(err, Equals, nil) + c.Assert(value.Elem().Interface(), DeepEquals, item.value) + + // Then test that UnmarshalStrict fails on the same thing. + t = reflect.ValueOf(item.value).Type() + value = reflect.New(t) + err = yaml.UnmarshalStrict([]byte(item.data), value.Interface()) + c.Assert(err, ErrorMatches, item.error) + } +} + +type textUnmarshaler struct { + S string +} + +func (t *textUnmarshaler) UnmarshalText(s []byte) error { + t.S = string(s) + return nil +} + +func (s *S) TestFuzzCrashers(c *C) { + cases := []string{ + // runtime error: index out of range + "\"\\0\\\r\n", + + // should not happen + " 0: [\n] 0", + "? ? \"\n\" 0", + " - {\n000}0", + "0:\n 0: [0\n] 0", + " - \"\n000\"0", + " - \"\n000\"\"", + "0:\n - {\n000}0", + "0:\n - \"\n000\"0", + "0:\n - \"\n000\"\"", + + // runtime error: index out of range + " \ufeff\n", + "? \ufeff\n", + "? \ufeff:\n", + "0: \ufeff\n", + "? \ufeff: \ufeff\n", + } + for _, data := range cases { + var v interface{} + _ = yaml.Unmarshal([]byte(data), &v) + } +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 0000000..a1c2cc5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 0000000..a14435e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,362 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/encode_test.go b/vendor/gopkg.in/yaml.v2/encode_test.go new file mode 100644 index 0000000..f0911a7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode_test.go @@ -0,0 +1,595 @@ +package yaml_test + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "net" + "os" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + (*marshalerType)(nil), + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float32(0.99)}, + "v: 0.99\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct{ A [2]int }{[2]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + { + &struct { + T1 time.Time "t1,omitempty" + T2 time.Time "t2,omitempty" + T3 *time.Time "t3,omitempty" + T4 *time.Time "t4,omitempty" + }{ + T2: time.Date(2018, 1, 9, 10, 40, 47, 0, time.UTC), + T4: newTime(time.Date(2098, 1, 9, 10, 40, 47, 0, time.UTC)), + }, + "t2: 2018-01-09T10:40:47Z\nt4: 2098-01-09T10:40:47Z\n", + }, + // Nil interface that implements Marshaler. + { + map[string]yaml.Marshaler{ + "a": nil, + }, + "a: null\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + // time.Time gets a timestamp tag. + { + map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)}, + "a: 2015-02-24T18:19:39Z\n", + }, + { + map[string]*time.Time{"a": newTime(time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC))}, + "a: 2015-02-24T18:19:39Z\n", + }, + { + // This is confirmed to be properly decoded in Python (libyaml) without a timestamp tag. + map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 123456789, time.FixedZone("FOO", -3*60*60))}, + "a: 2015-02-24T18:19:39.123456789-03:00\n", + }, + // Ensure timestamp-like strings are quoted. + { + map[string]string{"a": "2015-02-24T18:19:39Z"}, + "a: \"2015-02-24T18:19:39Z\"\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for i, item := range marshalTests { + c.Logf("test %d: %q", i, item.data) + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +func (s *S) TestEncoderSingleDocument(c *C) { + for i, item := range marshalTests { + c.Logf("test %d. %q", i, item.data) + var buf bytes.Buffer + enc := yaml.NewEncoder(&buf) + err := enc.Encode(item.value) + c.Assert(err, Equals, nil) + err = enc.Close() + c.Assert(err, Equals, nil) + c.Assert(buf.String(), Equals, item.data) + } +} + +func (s *S) TestEncoderMultipleDocuments(c *C) { + var buf bytes.Buffer + enc := yaml.NewEncoder(&buf) + err := enc.Encode(map[string]string{"a": "b"}) + c.Assert(err, Equals, nil) + err = enc.Encode(map[string]string{"c": "d"}) + c.Assert(err, Equals, nil) + err = enc.Close() + c.Assert(err, Equals, nil) + c.Assert(buf.String(), Equals, "a: b\n---\nc: d\n") +} + +func (s *S) TestEncoderWriteError(c *C) { + enc := yaml.NewEncoder(errorWriter{}) + err := enc.Encode(map[string]string{"a": "b"}) + c.Assert(err, ErrorMatches, `yaml: write error: some write error`) // Data not flushed yet +} + +type errorWriter struct{} + +func (errorWriter) Write([]byte) (int, error) { + return 0, fmt.Errorf("some write error") +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/0001", + "a/002", + "a/3", + "a/10", + "a/11", + "a/0012", + "a/100", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d7", + "d7abc", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} + +func newTime(t time.Time) *time.Time { + return &t +} diff --git a/vendor/gopkg.in/yaml.v2/example_embedded_test.go b/vendor/gopkg.in/yaml.v2/example_embedded_test.go new file mode 100644 index 0000000..171c093 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/example_embedded_test.go @@ -0,0 +1,41 @@ +package yaml_test + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +// An example showing how to unmarshal embedded +// structs from YAML. + +type StructA struct { + A string `yaml:"a"` +} + +type StructB struct { + // Embedded structs are not treated as embedded in YAML by default. To do that, + // add the ",inline" annotation below + StructA `yaml:",inline"` + B string `yaml:"b"` +} + +var data = ` +a: a string from struct A +b: a string from struct B +` + +func ExampleUnmarshal_embedded() { + var b StructB + + err := yaml.Unmarshal([]byte(data), &b) + if err != nil { + log.Fatalf("cannot unmarshal data: %v", err) + } + fmt.Println(b.A) + fmt.Println(b.B) + // Output: + // a string from struct A + // a string from struct B +} diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod new file mode 100644 index 0000000..1934e87 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v2" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 0000000..81d05df --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 0000000..7c1f5fa --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 0000000..6c151db --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 0000000..077fd1d --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2696 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 0000000..4c45e66 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/suite_test.go b/vendor/gopkg.in/yaml.v2/suite_test.go new file mode 100644 index 0000000..c5cf1ed --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 0000000..a2dde60 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 0000000..de85aa4 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 0000000..e25cee5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 0000000..8110ce3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +}